diff --git a/drivers/staging/qca-wifi-host-cmn/README.txt b/drivers/staging/qca-wifi-host-cmn/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4554fc99fa8a3e1ff5082839c63ef7a0971af5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/README.txt @@ -0,0 +1 @@ + This is CNSS WLAN Host Driver for products starting from iHelium diff --git a/drivers/staging/qca-wifi-host-cmn/VERSION.txt b/drivers/staging/qca-wifi-host-cmn/VERSION.txt new file mode 100644 index 0000000000000000000000000000000000000000..56884950dded0c1b8a9c3b4825519205aa88e8a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/VERSION.txt @@ -0,0 +1,2 @@ +Current Component wlan-cmn.driver.lnx.1.0 version 5.1.1.17I +Matches Component wlan-cld3.driver.lnx.1.1 version 5.1.0.22C diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_converged.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_converged.h new file mode 100644 index 0000000000000000000000000000000000000000..87eecad0f2854a7b74f47ec5059efd2c9813dd2e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_converged.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized definitions of converged configuration. + */ + +#ifndef __CFG_CONVERGED_H +#define __CFG_CONVERGED_H + +#include +#include "cfg_dp.h" +#include "cfg_hif.h" +#include +#ifdef WLAN_SUPPORT_GREEN_AP +#include "cfg_green_ap_params.h" +#else +#define CFG_GREEN_AP_ALL +#endif +#include + +#define CFG_CONVERGED_ALL \ + CFG_SCAN_ALL \ + CFG_DP \ + CFG_EXTSCAN_ALL \ + CFG_GREEN_AP_ALL \ + CFG_SPECTRAL_ALL \ + CFG_HIF + +#endif /* __CFG_CONVERGED_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_define.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_define.h new file mode 100644 index 0000000000000000000000000000000000000000..375bce92154bc10b02ea8ecec5f2189068aff1b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_define.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: APIs and macros for defining configuration. + */ + +#ifndef __CFG_DEFINE_H +#define __CFG_DEFINE_H + +enum cfg_fallback_behavior { + CFG_VALUE_OR_CLAMP, + CFG_VALUE_OR_DEFAULT, +}; + +#define rm_parens(...) __VA_ARGS__ +#define __CFG(id, is_ini, mtype, args...) \ + __CFG_##is_ini##_##mtype(id, mtype, args) +#define _CFG(id, args) __CFG(id, args) +#define CFG(id) _CFG(__##id, rm_parens id) + +#define __CFG_INI_INT(args...) __CFG_INI(args) +#define __CFG_INI_UINT(args...) __CFG_INI(args) +#define __CFG_INI_BOOL(args...) __CFG_INI(args) +#define __CFG_INI_STRING(args...) __CFG_INI(args) +#define __CFG_INI_MAC(args...) __CFG_INI(args) +#define __CFG_INI_IPV4(args...) __CFG_INI(args) +#define __CFG_INI_IPV6(args...) __CFG_INI(args) +#define __CFG_INI(args...) (args) + +#define __CFG_NON_INI_INT(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI_UINT(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI_BOOL(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI_STRING(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI_MAC(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI_IPV4(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI_IPV6(args...) __CFG_NON_INI(args) +#define __CFG_NON_INI(args...) + +/* configuration available in ini */ +#define CFG_INI_INT(name, min, max, def, fallback, desc) \ + (INI, INT, int32_t, name, min, max, fallback, desc, def) +#define CFG_INI_UINT(name, min, max, def, fallback, desc) \ + (INI, UINT, uint32_t, name, min, max, fallback, desc, def) +#define CFG_INI_BOOL(name, def, desc) \ + (INI, BOOL, bool, name, false, true, -1, desc, def) +#define CFG_INI_STRING(name, min_len, max_len, def, desc) \ + (INI, STRING, char *, name, min_len, max_len, -1, desc, def) +#define CFG_INI_MAC(name, def, desc) \ + (INI, MAC, struct qdf_mac_addr, name, -1, -1, -1, desc, def) +#define CFG_INI_IPV4(name, def, desc) \ + (INI, IPV4, struct qdf_ipv4_addr, name, -1, -1, -1, desc, def) +#define CFG_INI_IPV6(name, def, desc) \ + (INI, IPV6, struct qdf_ipv6_addr, name, -1, -1, -1, desc, def) + +/* configuration *not* available in ini */ +#define CFG_INT(name, min, max, def, fallback, desc) \ + (NON_INI, INT, int32_t, name, min, max, fallback, desc, def) +#define CFG_UINT(name, min, max, def, fallback, desc) \ + (NON_INI, UINT, uint32_t, name, min, max, fallback, desc, def) +#define CFG_BOOL(name, def, desc) \ + (NON_INI, BOOL, bool, name, false, true, false, desc, def) +#define CFG_STRING(name, min_len, max_len, def, desc) \ + (NON_INI, STRING, char *, name, min_len, max_len, -1, desc, def) +#define CFG_MAC(name, def, desc) \ + (NON_INI, MAC, struct qdf_mac_addr, name, -1, -1, -1, desc, def) +#define CFG_IPV4(name, def, desc) \ + (NON_INI, IPV4, struct qdf_ipv4_addr, name, -1, -1, -1, desc, def) +#define CFG_IPV6(name, def, desc) \ + (NON_INI, IPV6, struct qdf_ipv6_addr, name, -1, -1, -1, desc, def) + +/* utility macros/functions */ +#ifdef CONFIG_AP_PLATFORM +#define PLATFORM_VALUE(non_ap_value, ap_value) ap_value +#else +#define PLATFORM_VALUE(non_ap_value, ap_value) non_ap_value +#endif + +#endif /* __CFG_DEFINE_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_dispatcher.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_dispatcher.h new file mode 100644 index 0000000000000000000000000000000000000000..c87672cc15398a566b632f58b6567ec9cea36c2b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_dispatcher.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Dispatcher related handler APIs for the configuration component + */ +#ifndef __CFG_DISPATCHER_H_ +#define __CFG_DISPATCHER_H_ + +#include + +/** + * cfg_dispatcher_init() - Configuration component global init handler + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_dispatcher_init(void); + +/** + * cfg_dispatcher_deinit() - Configuration component global deinit handler + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_dispatcher_deinit(void); + +#endif /* __CFG_DISPATCHER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3c4439a5c53fafd32a9cd5d635685d31fdf6f478 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: UCFG APIs for the configuration component. + * + * Logically, configuration exists at the psoc level. This means, each psoc can + * have its own custom configuration, and calls to lookup configuration take a + * psoc parameter for reference. E.g. + * + * int32_t value = cfg_get(psoc, WLAN_SOME_INTEGER_CONFIG_ID); + * + * Configuration is cascading, and lookups happen in this order: + * + * 1) use psoc value, if configured + * 2) use global value, if configured + * 3) fallback to the default value for the configuration item + * + * This means a psoc configuration is a specialization of the global + * configuration, and does not need to explicitly set the same values if they + * would match the global config. + * + * In order to load and parse the global config, call cfg_parse(). In order to + * load and parse psoc configs, call cfg_psoc_parse(). cfg_parse() MUST be + * called before cfg_psoc_parse(), as global configuration will be consulted + * during the psoc parsing process. + * + * There are two basic lifecycles supported: + * + * 1) The type and number of psocs is *not* known at load time + * + * // driver is loading + * cfg_parse("/path/to/config"); + * + * ... + * + * // a psoc has just been created + * cfg_psoc_parse(psoc, "/path/to/psoc/config"); + * + * ... + * + * // driver is unloading + * cfg_release(); + * + * 2) The type and number of psocs *is* known at load time + * + * // driver is loading + * cfg_parse("/path/to/config"); + * + * ... + * + * // for each psoc + * cfg_psoc_parse(psoc, "/path/to/psoc/config"); + * + * // no further psocs will be created after this point + * cfg_release(); + * + * ... + * + * // driver is unloaded later + * + * Each configuration store is reference counted to reduce memory footprint, and + * the configuration component itself will hold one ref count on the global + * config store. All psocs for which psoc-specific configurations have *not* + * been provided will reference the global config store. Psocs for which psoc- + * specific configurations *have* been provded will check for existings stores + * with a matching path to use, before parsing the specified configuration file. + * + * If, at some point in time, it is known that no further psocs will ever be + * created, a call to cfg_release() will release the global ref count held by + * the configuration component. For systems which specify psoc-specific configs + * for all psocs, this will release the unnecessary memory used by the global + * config store. Otherwise, calling cfg_release() at unload time will ensure + * the global config store is properly freed. + */ + +#ifndef __CFG_UCFG_H +#define __CFG_UCFG_H + +#include "cfg_all.h" +#include "cfg_define.h" +#include "i_cfg.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_types.h" +#include "wlan_objmgr_psoc_obj.h" + +/** + * cfg_parse() - parse an ini file, and populate the global config storei + * @path: The full file path of the ini file to parse + * + * Note: A matching cfg_release() call is required to release allocated + * resources. + * + * The *.ini file format is a simple format consiting of a list of key/value + * pairs, separated by an '=' character. e.g. + * + * gConfigItem1=some string value + * gConfigItem2=0xabc + * + * Comments are also supported, initiated with the '#' character: + * + * # This is a comment. It will be ignored by the *.ini parser + * gConfigItem3=aa:bb:cc:dd:ee:ff # this is also a comment + * + * Several datatypes are natively supported: + * + * gInt=-123 # bin (0b), octal (0o), hex (0x), and decimal supported + * gUint=123 # a non-negative integer value + * gBool=y # (1, Y, y) -> true; (0, N, n) -> false + * gString=any string # strings are useful for representing complex types + * gMacAddr=aa:bb:cc:dd:ee:ff # colons are optional, upper and lower case + * gIpv4Addr=127.0.0.1 # uses typical dot-decimal notation + * gIpv6Addr=::1 # typical notation, supporting zero-compression + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_parse(const char *path); + +/** + * cfg_release() - release the global configuration store + * + * This API releases the configuration component's reference to the global + * config store. + * + * See also: this file's DOC section. + * + * Return: None + */ +void cfg_release(void); + +/** + * cfg_psoc_parse() - specialize the config store for @psoc by parsing @path + * @psoc: The psoc whose config store should be specialized + * @path: The full file path of the ini file to parse + * + * See also: cfg_parse(), and this file's DOC section. + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_psoc_parse(struct wlan_objmgr_psoc *psoc, const char *path); + +/** + * cfg_parse_to_psoc_store() - Parse file @path and update psoc ini store + * @psoc: The psoc whose config store should be updated + * @path: The full file path of the ini file to parse + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_parse_to_psoc_store(struct wlan_objmgr_psoc *psoc, + const char *path); + +/** + * cfg_parse_to_global_store() Parse file @path and update global ini store + * @path: The full file path of the ini file to parse + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_parse_to_global_store(const char *path); + +/** + * cfg_ucfg_store_print() prints the cfg ini/non ini logs + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_cfg_store_print(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_cfg_ini_config_print() prints the cfg ini/non ini to buffer + * @psoc: psoc + * @buf: cache to save ini config + * @plen: the pointer to length + * @buflen: total buf length + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_cfg_ini_config_print(struct wlan_objmgr_psoc *psoc, + uint8_t *buf, ssize_t *plen, + ssize_t buflen); + +/** + * cfg_get() - lookup the configured value for @id from @psoc + * @psoc: The psoc from which to lookup the configured value + * @id: The id of the configured value to lookup + * + * E.g. + * + * int32_t value = cfg_get(psoc, WLAN_SOME_INTEGER_CONFIG_ID); + * + * Return: The configured value + */ +#define cfg_get(psoc, id) __cfg_get(psoc, __##id) + +/* Configuration Access APIs */ +#define __do_call(op, args...) op(args) +#define do_call(op, args) __do_call(op, rm_parens args) + +#define cfg_id(id) #id + +#define __cfg_mtype(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + mtype +#define cfg_mtype(id) do_call(__cfg_mtype, id) + +#define __cfg_type(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + ctype +#define cfg_type(id) do_call(__cfg_type, id) + +#define __cfg_name(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + name +#define cfg_name(id) do_call(__cfg_name, id) + +#define __cfg_min(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + min +#define cfg_min(id) do_call(__cfg_min, id) + +#define __cfg_max(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + max +#define cfg_max(id) do_call(__cfg_max, id) + +#define __cfg_fb(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + fallback +#define cfg_fallback(id) do_call(__cfg_fb, id) + +#define __cfg_desc(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + desc +#define cfg_description(id) do_call(__cfg_desc, id) + +#define __cfg_def(ini, mtype, ctype, name, min, max, fallback, desc, def...) \ + def +#define cfg_default(id) do_call(__cfg_def, id) + +#define __cfg_str(id...) #id +#define cfg_str(id) #id __cfg_str(id) + +/* validate APIs */ +static inline bool +cfg_string_in_range(const char *value, qdf_size_t min_len, qdf_size_t max_len) +{ + qdf_size_t len = qdf_str_len(value); + + return len >= min_len && len <= max_len; +} + +#define __cfg_INT_in_range(value, min, max) (value >= min && value <= max) +#define __cfg_UINT_in_range(value, min, max) (value >= min && value <= max) +#define __cfg_STRING_in_range(value, min_len, max_len) \ + cfg_string_in_range(value, min_len, max_len) + +#define __cfg_in_range(id, value, mtype) \ + __cfg_ ## mtype ## _in_range(value, cfg_min(id), cfg_max(id)) + +/* this may look redundant, but forces @mtype to be expanded */ +#define __cfg_in_range_type(id, value, mtype) \ + __cfg_in_range(id, value, mtype) + +#define cfg_in_range(id, value) __cfg_in_range_type(id, value, cfg_mtype(id)) + +/* Value-or-Default APIs */ +#define __cfg_value_or_default(id, value, def) \ + (cfg_in_range(id, value) ? value : def) + +#define cfg_value_or_default(id, value) \ + __cfg_value_or_default(id, value, cfg_default(id)) + +/* Value-or-Clamped APIs */ +#define __cfg_clamp(val, min, max) (val < min ? min : (val > max ? max : val)) +#define cfg_clamp(id, value) __cfg_clamp(value, cfg_min(id), cfg_max(id)) + +#endif /* __CFG_UCFG_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..986ba246be297d2da901e784de1666d813f0d169 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Internal APIs for the configuration component. + */ + +#ifndef __I_CFG_H +#define __I_CFG_H + +#include "cfg_define.h" +#include "qdf_trace.h" +#include "qdf_types.h" +#include "wlan_objmgr_psoc_obj.h" + +#define cfg_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_CONFIG, params) +#define cfg_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_CONFIG, params) +#define cfg_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_CONFIG, params) +#define cfg_enter() QDF_TRACE_ENTER(QDF_MODULE_ID_CONFIG, "enter") +#define cfg_exit() QDF_TRACE_EXIT(QDF_MODULE_ID_CONFIG, "exit") + +#define cfg_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_CONFIG, params) +#define cfg_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_CONFIG, params) +#define cfg_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_CONFIG, params) +#define cfg_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_CONFIG, params) + +#define cfg_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_CONFIG, params) +#define cfg_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_CONFIG, params) +#define cfg_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_CONFIG, params) +#define cfg_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_CONFIG, params) + +/* define global config values structure */ + +#undef __CFG_INI_STRING +#define __CFG_INI_STRING(id, mtype, ctype, name, min, max, fallback, desc, \ + def...) \ + const char id##_internal[(max) + 1]; +#undef __CFG_INI +#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + const ctype id##_internal; + +struct cfg_values { + /* e.g. const int32_t __CFG_SCAN_DWELL_TIME_internal; */ + CFG_ALL +}; + +#undef __CFG_INI_STRING +#define __CFG_INI_STRING(args...) __CFG_INI(args) +#undef __CFG_INI +#define __CFG_INI(args...) (args) + +struct cfg_values *cfg_psoc_get_values(struct wlan_objmgr_psoc *psoc); + +#define __cfg_get(psoc, id) (cfg_psoc_get_values( \ + (struct wlan_objmgr_psoc *)psoc)->id##_internal) + +#endif /* __I_CFG_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg_objmgr.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg_objmgr.h new file mode 100644 index 0000000000000000000000000000000000000000..330321b5bf0395f42bdca0ca71d7809ab1cd268a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg_objmgr.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains various object manager related wrappers and helpers + */ + +#ifndef __CFG_OBJMGR_H +#define __CFG_OBJMGR_H + +#include "wlan_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" + +/* Private Data */ + +#define cfg_psoc_get_priv(psoc) \ + wlan_objmgr_psoc_get_comp_private_obj((psoc), WLAN_UMAC_COMP_CONFIG) +#define cfg_psoc_set_priv(psoc, priv) \ + wlan_objmgr_psoc_component_obj_attach((psoc), WLAN_UMAC_COMP_CONFIG, \ + (priv), QDF_STATUS_SUCCESS) +#define cfg_psoc_unset_priv(psoc, priv) \ + wlan_objmgr_psoc_component_obj_detach((psoc), WLAN_UMAC_COMP_CONFIG, \ + (priv)) + +/* event registration */ + +#define cfg_psoc_register_create(callback) \ + wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) +#define cfg_psoc_register_destroy(callback) \ + wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) + +/* event de-registration */ + +#define cfg_psoc_unregister_create(callback) \ + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) +#define cfg_psoc_unregister_destroy(callback) \ + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) + +#endif /* __CFG_OBJMGR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/src/cfg.c b/drivers/staging/qca-wifi-host-cmn/cfg/src/cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..297c3706a8228a6ed69bce8b649dcb8935b6e389 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/src/cfg.c @@ -0,0 +1,867 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "cfg_all.h" +#include "cfg_define.h" +#include "cfg_dispatcher.h" +#include "cfg_ucfg_api.h" +#include "i_cfg.h" +#include "i_cfg_objmgr.h" +#include "qdf_atomic.h" +#include "qdf_list.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_parse.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" +#include "wlan_objmgr_psoc_obj.h" + +/** + * struct cfg_value_store - backing store for an ini file + * @path: file path of the ini file + * @node: internal list node for keeping track of all the allocated stores + * @users: number of references on the store + * @values: a values struct containing the parsed values from the ini file + */ +struct cfg_value_store { + char *path; + qdf_list_node_t node; + qdf_atomic_t users; + struct cfg_values values; +}; + +/* define/populate dynamic metadata lookup table */ + +/** + * struct cfg_meta - configuration item metadata for dynamic lookup during parse + * @name: name of the config item used in the ini file (i.e. "gScanDwellTime") + * @item_handler: parsing callback based on the type of the config item + * @min: minimum value for use in bounds checking (min_len for strings) + * @max: maximum value for use in bounds checking (max_len for strings) + * @fallback: the fallback behavior to use when configured values are invalid + */ +struct cfg_meta { + const char *name; + const uint32_t field_offset; + void (*const item_handler)(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *value); + const int32_t min; + const int32_t max; + const enum cfg_fallback_behavior fallback; +}; + +/* ini item handler functions */ + +#define cfg_value_ptr(store, meta) \ + ((void *)&(store)->values + (meta)->field_offset) + +static __attribute__((unused)) void +cfg_int_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + int32_t *store_value = cfg_value_ptr(store, meta); + int32_t value; + + status = qdf_int32_parse(str_value, &value); + if (QDF_IS_STATUS_ERROR(status)) { + cfg_err("%s=%s - Invalid format (status %d); Using default %d", + meta->name, str_value, status, *store_value); + return; + } + + QDF_BUG(meta->min <= meta->max); + if (meta->min > meta->max) { + cfg_err("Invalid config item meta for %s", meta->name); + return; + } + + if (value >= meta->min && value <= meta->max) { + *store_value = value; + return; + } + + switch (meta->fallback) { + default: + QDF_DEBUG_PANIC("Unknown fallback method %d for cfg item '%s'", + meta->fallback, meta->name); + /* fall through */ + case CFG_VALUE_OR_DEFAULT: + /* store already contains default */ + break; + case CFG_VALUE_OR_CLAMP: + *store_value = __cfg_clamp(value, meta->min, meta->max); + break; + } + + cfg_err("%s=%d - Out of range [%d, %d]; Using %d", + meta->name, value, meta->min, meta->max, *store_value); +} + +static __attribute__((unused)) void +cfg_uint_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + uint32_t *store_value = cfg_value_ptr(store, meta); + uint32_t value; + uint32_t min; + uint32_t max; + + /** + * Since meta min and max are of type int32_t + * We need explicit type casting to avoid + * implicit wrap around for uint32_t type cfg data. + */ + min = (uint32_t)meta->min; + max = (uint32_t)meta->max; + + status = qdf_uint32_parse(str_value, &value); + if (QDF_IS_STATUS_ERROR(status)) { + cfg_err("%s=%s - Invalid format (status %d); Using default %u", + meta->name, str_value, status, *store_value); + return; + } + + QDF_BUG(min <= max); + if (min > max) { + cfg_err("Invalid config item meta for %s", meta->name); + return; + } + + if (value >= min && value <= max) { + *store_value = value; + return; + } + + switch (meta->fallback) { + default: + QDF_DEBUG_PANIC("Unknown fallback method %d for cfg item '%s'", + meta->fallback, meta->name); + /* fall through */ + case CFG_VALUE_OR_DEFAULT: + /* store already contains default */ + break; + case CFG_VALUE_OR_CLAMP: + *store_value = __cfg_clamp(value, min, max); + break; + } + + cfg_err("%s=%u - Out of range [%d, %d]; Using %u", + meta->name, value, min, max, *store_value); +} + +static __attribute__((unused)) void +cfg_bool_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + bool *store_value = cfg_value_ptr(store, meta); + + status = qdf_bool_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default '%s'", + meta->name, str_value, status, *store_value ? "true" : "false"); +} + +static __attribute__((unused)) void +cfg_string_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + char *store_value = cfg_value_ptr(store, meta); + qdf_size_t len; + + QDF_BUG(meta->min >= 0); + QDF_BUG(meta->min <= meta->max); + if (meta->min < 0 || meta->min > meta->max) { + cfg_err("Invalid config item meta for %s", meta->name); + return; + } + + /* ensure min length */ + len = qdf_str_nlen(str_value, meta->min); + if (len < meta->min) { + cfg_err("%s=%s - Too short; Using default '%s'", + meta->name, str_value, store_value); + return; + } + + /* check max length */ + len += qdf_str_nlen(str_value + meta->min, meta->max - meta->min + 1); + if (len > meta->max) { + cfg_err("%s=%s - Too long; Using default '%s'", + meta->name, str_value, store_value); + return; + } + + qdf_str_lcopy(store_value, str_value, meta->max + 1); +} + +static __attribute__((unused)) void +cfg_mac_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + struct qdf_mac_addr *store_value = cfg_value_ptr(store, meta); + + status = qdf_mac_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default " + QDF_MAC_ADDR_FMT, meta->name, str_value, status, + QDF_MAC_ADDR_REF(store_value->bytes)); +} + +static __attribute__((unused)) void +cfg_ipv4_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + struct qdf_ipv4_addr *store_value = cfg_value_ptr(store, meta); + + status = qdf_ipv4_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default " + QDF_IPV4_ADDR_STR, meta->name, str_value, status, + QDF_IPV4_ADDR_ARRAY(store_value->bytes)); +} + +static __attribute__((unused)) void +cfg_ipv6_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + struct qdf_ipv6_addr *store_value = cfg_value_ptr(store, meta); + + status = qdf_ipv6_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default " + QDF_IPV6_ADDR_STR, meta->name, str_value, status, + QDF_IPV6_ADDR_ARRAY(store_value->bytes)); +} + +/* populate metadata lookup table */ +#undef __CFG_INI +#define __CFG_INI(_id, _mtype, _ctype, _name, _min, _max, _fallback, ...) \ +{ \ + .name = _name, \ + .field_offset = qdf_offsetof(struct cfg_values, _id##_internal), \ + .item_handler = cfg_ ## _mtype ## _item_handler, \ + .min = _min, \ + .max = _max, \ + .fallback = _fallback, \ +}, + +#define cfg_INT_item_handler cfg_int_item_handler +#define cfg_UINT_item_handler cfg_uint_item_handler +#define cfg_BOOL_item_handler cfg_bool_item_handler +#define cfg_STRING_item_handler cfg_string_item_handler +#define cfg_MAC_item_handler cfg_mac_item_handler +#define cfg_IPV4_item_handler cfg_ipv4_item_handler +#define cfg_IPV6_item_handler cfg_ipv6_item_handler + +static const struct cfg_meta cfg_meta_lookup_table[] = { + CFG_ALL +}; + +/* default store initializer */ + +static void cfg_store_set_defaults(struct cfg_value_store *store) +{ +#undef __CFG_INI +#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + ctype id = def; + + CFG_ALL + +#undef __CFG_INI_STRING +#define __CFG_INI_STRING(id, mtype, ctype, name, min_len, max_len, ...) \ + qdf_str_lcopy((char *)&store->values.id##_internal, id, (max_len) + 1); + +#undef __CFG_INI +#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + *(ctype *)&store->values.id##_internal = id; + + CFG_ALL +} + +static const struct cfg_meta *cfg_lookup_meta(const char *name) +{ + int i; + + QDF_BUG(name); + if (!name) + return NULL; + + /* linear search for now; optimize in the future if needed */ + for (i = 0; i < QDF_ARRAY_SIZE(cfg_meta_lookup_table); i++) { + const struct cfg_meta *meta = &cfg_meta_lookup_table[i]; + + if (qdf_str_eq(name, meta->name)) + return meta; + } + + return NULL; +} + +static QDF_STATUS +cfg_ini_item_handler(void *context, const char *key, const char *value) +{ + struct cfg_value_store *store = context; + const struct cfg_meta *meta; + + meta = cfg_lookup_meta(key); + if (!meta) { + /* TODO: promote to 'err' or 'warn' once legacy is ported */ + cfg_debug("Unknown config item '%s'", key); + return QDF_STATUS_SUCCESS; + } + + QDF_BUG(meta->item_handler); + if (!meta->item_handler) + return QDF_STATUS_SUCCESS; + + meta->item_handler(store, meta, value); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cfg_ini_section_handler(void *context, const char *name) +{ + cfg_err("Unexpected section '%s'. Sections are not supported.", name); + + return QDF_STATUS_SUCCESS; +} + +#define cfg_assert_success(expr) \ +do { \ + QDF_STATUS __assert_status = (expr); \ + QDF_BUG(QDF_IS_STATUS_SUCCESS(__assert_status)); \ +} while (0) + +static bool __cfg_is_init; +static struct cfg_value_store *__cfg_global_store; +static qdf_list_t __cfg_stores_list; +static qdf_spinlock_t __cfg_stores_lock; + +struct cfg_psoc_ctx { + struct cfg_value_store *store; +}; + +static QDF_STATUS +cfg_store_alloc(const char *path, struct cfg_value_store **out_store) +{ + QDF_STATUS status; + struct cfg_value_store *store; + + cfg_enter(); + + store = qdf_mem_malloc(sizeof(*store)); + if (!store) + return QDF_STATUS_E_NOMEM; + + status = qdf_str_dup(&store->path, path); + if (QDF_IS_STATUS_ERROR(status)) + goto free_store; + + status = qdf_atomic_init(&store->users); + if (QDF_IS_STATUS_ERROR(status)) + goto free_path; + qdf_atomic_inc(&store->users); + + qdf_spin_lock_bh(&__cfg_stores_lock); + status = qdf_list_insert_back(&__cfg_stores_list, &store->node); + qdf_spin_unlock_bh(&__cfg_stores_lock); + if (QDF_IS_STATUS_ERROR(status)) + goto free_path; + + *out_store = store; + + return QDF_STATUS_SUCCESS; + +free_path: + qdf_mem_free(store->path); + +free_store: + qdf_mem_free(store); + + return status; +} + +static void cfg_store_free(struct cfg_value_store *store) +{ + QDF_STATUS status; + + cfg_enter(); + + qdf_spin_lock_bh(&__cfg_stores_lock); + status = qdf_list_remove_node(&__cfg_stores_list, &store->node); + qdf_spin_unlock_bh(&__cfg_stores_lock); + if (QDF_IS_STATUS_ERROR(status)) + QDF_DEBUG_PANIC("Failed config store list removal; status:%d", + status); + + qdf_mem_free(store->path); + qdf_mem_free(store); +} + +static QDF_STATUS +cfg_store_get(const char *path, struct cfg_value_store **out_store) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + *out_store = NULL; + + qdf_spin_lock_bh(&__cfg_stores_lock); + status = qdf_list_peek_front(&__cfg_stores_list, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct cfg_value_store *store = + qdf_container_of(node, struct cfg_value_store, node); + + if (qdf_str_eq(path, store->path)) { + qdf_atomic_inc(&store->users); + *out_store = store; + break; + } + + status = qdf_list_peek_next(&__cfg_stores_list, node, &node); + } + qdf_spin_unlock_bh(&__cfg_stores_lock); + + return status; +} + +static void cfg_store_put(struct cfg_value_store *store) +{ + if (qdf_atomic_dec_and_test(&store->users)) + cfg_store_free(store); +} + +static struct cfg_psoc_ctx *cfg_psoc_get_ctx(struct wlan_objmgr_psoc *psoc) +{ + struct cfg_psoc_ctx *psoc_ctx; + + psoc_ctx = cfg_psoc_get_priv(psoc); + QDF_BUG(psoc_ctx); + + return psoc_ctx; +} + +struct cfg_values *cfg_psoc_get_values(struct wlan_objmgr_psoc *psoc) +{ + return &cfg_psoc_get_ctx(psoc)->store->values; +} +qdf_export_symbol(cfg_psoc_get_values); + +static QDF_STATUS +cfg_ini_parse_to_store(const char *path, struct cfg_value_store *store) +{ + QDF_STATUS status; + + status = qdf_ini_parse(path, store, cfg_ini_item_handler, + cfg_ini_section_handler); + if (QDF_IS_STATUS_ERROR(status)) + cfg_err("Failed to parse *.ini file @ %s; status:%d", + path, status); + + return status; +} + +QDF_STATUS cfg_parse_to_psoc_store(struct wlan_objmgr_psoc *psoc, + const char *path) +{ + return cfg_ini_parse_to_store(path, cfg_psoc_get_ctx(psoc)->store); +} + +qdf_export_symbol(cfg_parse_to_psoc_store); + +QDF_STATUS cfg_parse_to_global_store(const char *path) +{ + if (!__cfg_global_store) { + cfg_err("Global INI store is not valid"); + return QDF_STATUS_E_NOMEM; + } + + return cfg_ini_parse_to_store(path, __cfg_global_store); +} + +qdf_export_symbol(cfg_parse_to_global_store); + + +static QDF_STATUS +cfg_store_print(struct wlan_objmgr_psoc *psoc) +{ + struct cfg_value_store *store; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + psoc_ctx = cfg_psoc_get_ctx(psoc); + if (!psoc_ctx) + return QDF_STATUS_E_FAILURE; + + store = psoc_ctx->store; + if (!store) + return QDF_STATUS_E_FAILURE; + +#undef __CFG_INI_MAC +#define __CFG_INI_MAC(id, mtype, ctype, name, desc, def...) \ + cfg_nofl_debug("%s "QDF_MAC_ADDR_FMT, name, \ + QDF_MAC_ADDR_REF((&store->values.id##_internal)->bytes)); + +#undef __CFG_INI_IPV4 +#define __CFG_INI_IPV4(id, mtype, ctype, name, desc, def...) \ + cfg_nofl_debug("%s %pI4", name, (&store->values.id##_internal)->bytes); + +#undef __CFG_INI_IPV6 +#define __CFG_INI_IPV6(id, mtype, ctype, name, desc, def...) \ + cfg_nofl_debug("%s %pI6c", name, (&store->values.id##_internal)->bytes); + +#undef __CFG_INI +#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + cfg_nofl_debug("%s %u", name, *(ctype *)&store->values.id##_internal); + +#undef __CFG_INI_STRING +#define __CFG_INI_STRING(id, mtype, ctype, name, min_len, max_len, ...) \ + cfg_nofl_debug("%s %s", name, (char *)&store->values.id##_internal); + + CFG_ALL + +#undef __CFG_INI_MAC +#undef __CFG_INI_IPV4 +#undef __CFG_INI_IPV6 +#undef __CFG_INI +#undef __CFG_INI_STRING + + cfg_exit(); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +cfg_ini_config_print(struct wlan_objmgr_psoc *psoc, uint8_t *buf, + ssize_t *plen, ssize_t buflen) +{ + struct cfg_value_store *store; + struct cfg_psoc_ctx *psoc_ctx; + ssize_t len; + ssize_t total_len = buflen; + + cfg_enter(); + + psoc_ctx = cfg_psoc_get_ctx(psoc); + if (!psoc_ctx) + return QDF_STATUS_E_FAILURE; + + store = psoc_ctx->store; + if (!store) + return QDF_STATUS_E_FAILURE; + +#undef __CFG_INI_MAC +#define __CFG_INI_MAC(id, mtype, ctype, name, desc, def...) \ + do { \ + len = qdf_scnprintf(buf, buflen, "%s "QDF_MAC_ADDR_FMT"\n", name, \ + QDF_MAC_ADDR_REF((&store->values.id##_internal)->bytes)); \ + buf += len; \ + buflen -= len; \ + } while (0); + +#undef __CFG_INI_IPV4 +#define __CFG_INI_IPV4(id, mtype, ctype, name, desc, def...) \ + do { \ + len = qdf_scnprintf(buf, buflen, "%s %pI4\n", name, \ + (&store->values.id##_internal)->bytes); \ + buf += len; \ + buflen -= len; \ + } while (0); + +#undef __CFG_INI_IPV6 +#define __CFG_INI_IPV6(id, mtype, ctype, name, desc, def...) \ + do { \ + len = qdf_scnprintf(buf, buflen, "%s %pI6c\n", name, \ + (&store->values.id##_internal)->bytes); \ + buf += len; \ + buflen -= len; \ + } while (0); + +#undef __CFG_INI +#define __CFG_INI(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + do { \ + len = qdf_scnprintf(buf, buflen, "%s %u\n", name, \ + *(ctype *)&store->values.id##_internal); \ + buf += len; \ + buflen -= len; \ + } while (0); + +#undef __CFG_INI_STRING +#define __CFG_INI_STRING(id, mtype, ctype, name, min_len, max_len, ...) \ + do { \ + len = qdf_scnprintf(buf, buflen, "%s %s\n", name, \ + (char *)&store->values.id##_internal); \ + buf += len; \ + buflen -= len; \ + } while (0); + + CFG_ALL + +#undef __CFG_INI_MAC +#undef __CFG_INI_IPV4 +#undef __CFG_INI_IPV6 +#undef __CFG_INI +#undef __CFG_INI_STRING + + *plen = total_len - buflen; + cfg_exit(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_cfg_store_print(struct wlan_objmgr_psoc *psoc) +{ + return cfg_store_print(psoc); +} + +QDF_STATUS ucfg_cfg_ini_config_print(struct wlan_objmgr_psoc *psoc, + uint8_t *buf, ssize_t *plen, + ssize_t buflen) +{ + return cfg_ini_config_print(psoc, buf, plen, buflen); +} + +static QDF_STATUS +cfg_on_psoc_create(struct wlan_objmgr_psoc *psoc, void *context) +{ + QDF_STATUS status; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + QDF_BUG(__cfg_global_store); + if (!__cfg_global_store) + return QDF_STATUS_E_FAILURE; + + psoc_ctx = qdf_mem_malloc(sizeof(*psoc_ctx)); + if (!psoc_ctx) + return QDF_STATUS_E_NOMEM; + + qdf_atomic_inc(&__cfg_global_store->users); + psoc_ctx->store = __cfg_global_store; + + status = cfg_psoc_set_priv(psoc, psoc_ctx); + if (QDF_IS_STATUS_ERROR(status)) + goto put_store; + + return QDF_STATUS_SUCCESS; + +put_store: + cfg_store_put(__cfg_global_store); + qdf_mem_free(psoc_ctx); + + return status; +} + +static QDF_STATUS +cfg_on_psoc_destroy(struct wlan_objmgr_psoc *psoc, void *context) +{ + QDF_STATUS status; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + psoc_ctx = cfg_psoc_get_ctx(psoc); + status = cfg_psoc_unset_priv(psoc, psoc_ctx); + + cfg_store_put(psoc_ctx->store); + qdf_mem_free(psoc_ctx); + + return status; +} + +QDF_STATUS cfg_dispatcher_init(void) +{ + QDF_STATUS status; + + cfg_enter(); + + QDF_BUG(!__cfg_is_init); + if (__cfg_is_init) + return QDF_STATUS_E_INVAL; + + qdf_list_create(&__cfg_stores_list, 0); + qdf_spinlock_create(&__cfg_stores_lock); + + status = cfg_psoc_register_create(cfg_on_psoc_create); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = cfg_psoc_register_destroy(cfg_on_psoc_destroy); + if (QDF_IS_STATUS_ERROR(status)) + goto unreg_create; + + __cfg_is_init = true; + + return QDF_STATUS_SUCCESS; + +unreg_create: + cfg_assert_success(cfg_psoc_unregister_create(cfg_on_psoc_create)); + + return status; +} + +QDF_STATUS cfg_dispatcher_deinit(void) +{ + cfg_enter(); + + QDF_BUG(__cfg_is_init); + if (!__cfg_is_init) + return QDF_STATUS_E_INVAL; + + __cfg_is_init = false; + + cfg_assert_success(cfg_psoc_unregister_create(cfg_on_psoc_create)); + cfg_assert_success(cfg_psoc_unregister_destroy(cfg_on_psoc_destroy)); + + qdf_spin_lock_bh(&__cfg_stores_lock); + QDF_BUG(qdf_list_empty(&__cfg_stores_list)); + qdf_spin_unlock_bh(&__cfg_stores_lock); + + qdf_spinlock_destroy(&__cfg_stores_lock); + qdf_list_destroy(&__cfg_stores_list); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfg_parse(const char *path) +{ + QDF_STATUS status; + struct cfg_value_store *store; + + cfg_enter(); + + QDF_BUG(!__cfg_global_store); + if (__cfg_global_store) + return QDF_STATUS_E_INVAL; + + status = cfg_store_alloc(path, &store); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + cfg_store_set_defaults(store); + + status = cfg_ini_parse_to_store(path, store); + if (QDF_IS_STATUS_ERROR(status)) + goto free_store; + + __cfg_global_store = store; + + return QDF_STATUS_SUCCESS; + +free_store: + cfg_store_free(store); + + return status; +} + +void cfg_release(void) +{ + cfg_enter(); + + QDF_BUG(__cfg_global_store); + if (!__cfg_global_store) + return; + + cfg_store_put(__cfg_global_store); + __cfg_global_store = NULL; +} + +QDF_STATUS cfg_psoc_parse(struct wlan_objmgr_psoc *psoc, const char *path) +{ + QDF_STATUS status; + struct cfg_value_store *store; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + QDF_BUG(__cfg_global_store); + if (!__cfg_global_store) + return QDF_STATUS_E_INVAL; + + QDF_BUG(__cfg_is_init); + if (!__cfg_is_init) + return QDF_STATUS_E_INVAL; + + QDF_BUG(psoc); + if (!psoc) + return QDF_STATUS_E_INVAL; + + QDF_BUG(path); + if (!path) + return QDF_STATUS_E_INVAL; + + psoc_ctx = cfg_psoc_get_ctx(psoc); + + QDF_BUG(psoc_ctx->store == __cfg_global_store); + if (psoc_ctx->store != __cfg_global_store) + return QDF_STATUS_SUCCESS; + + /* check if @path has been parsed before */ + status = cfg_store_get(path, &store); + if (QDF_IS_STATUS_ERROR(status)) { + status = cfg_store_alloc(path, &store); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + /* inherit global configuration */ + qdf_mem_copy(&store->values, &__cfg_global_store->values, + sizeof(store->values)); + + status = cfg_ini_parse_to_store(path, store); + if (QDF_IS_STATUS_ERROR(status)) + goto put_store; + } + + psoc_ctx->store = store; + cfg_store_put(__cfg_global_store); + + return QDF_STATUS_SUCCESS; + +put_store: + cfg_store_put(store); + + return status; +} + +qdf_export_symbol(cfg_psoc_parse); + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_cal_client_api.c b/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_cal_client_api.c new file mode 100644 index 0000000000000000000000000000000000000000..08c8e55f42e2dee8b21d1d605b5e02c35e5b9eb8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_cal_client_api.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_cal_client_api.h" +#include "qdf_module.h" + +/* dp_cal_client_attach - function to attach cal client timer + * @cal_client_ctx: cal client timer context + * @pdev: pdev handle + * @osdev: device pointer + * @dp_iterate_peer_list : function pointer to iterate and update peer stats + * + * return: void + */ +void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx, + struct cdp_pdev *pdev, + qdf_device_t osdev, + void (*dp_iterate_peer_list)(struct cdp_pdev *)) +{ + struct cal_client *cal_cl; + + *cal_client_ctx = qdf_mem_malloc(sizeof(struct cal_client)); + + if (!(*cal_client_ctx)) + return; + + cal_cl = (struct cal_client *)(*cal_client_ctx); + cal_cl->iterate_update_peer_list = dp_iterate_peer_list; + cal_cl->pdev_hdl = pdev; + + qdf_timer_init(osdev, &cal_cl->cal_client_timer, + dp_cal_client_stats_timer_fn, *cal_client_ctx, + QDF_TIMER_TYPE_WAKE_APPS); +} + +qdf_export_symbol(dp_cal_client_attach); + +/* dp_cal_client_detach - detach cal client timer + * @cal_client_ctx: cal client timer context + * + * return: void + */ +void dp_cal_client_detach(struct cdp_cal_client **cal_client_ctx) +{ + struct cal_client *cal_cl; + + if (*cal_client_ctx) { + cal_cl = (struct cal_client *)*cal_client_ctx; + + qdf_timer_stop(&cal_cl->cal_client_timer); + qdf_timer_free(&cal_cl->cal_client_timer); + qdf_mem_free(cal_cl); + *cal_client_ctx = NULL; + } +} + +qdf_export_symbol(dp_cal_client_detach); + +/* dp_cal_client_timer_start- api to start cal client timer + * @ctx: cal client timer ctx + * + * return: void + */ +void dp_cal_client_timer_start(void *ctx) +{ + struct cal_client *cal_cl; + + if (ctx) { + cal_cl = (struct cal_client *)ctx; + qdf_timer_start(&cal_cl->cal_client_timer, DP_CAL_CLIENT_TIME); + } +} + +qdf_export_symbol(dp_cal_client_timer_start); + +/* dp_cal_client_timer_stop- api to stop cal client timer + * @ctx: cal client timer ctx + * + * return: void + */ +void dp_cal_client_timer_stop(void *ctx) +{ + struct cal_client *cal_cl; + + if (ctx) { + cal_cl = (struct cal_client *)ctx; + qdf_timer_sync_cancel(&cal_cl->cal_client_timer); + qdf_timer_stop(&cal_cl->cal_client_timer); + } +} + +qdf_export_symbol(dp_cal_client_timer_stop); + +/* dp_cal_client_stats_timer_fn- function called on timer interval + * @ctx: cal client timer ctx + * + * return: void + */ +void dp_cal_client_stats_timer_fn(void *ctx) +{ + struct cal_client *cal_cl = (struct cal_client *)ctx; + + if (!cal_cl) + return; + + cal_cl->iterate_update_peer_list(cal_cl->pdev_hdl); + qdf_timer_mod(&cal_cl->cal_client_timer, DP_CAL_CLIENT_TIME); +} + +qdf_export_symbol(dp_cal_client_stats_timer_fn); + +/*dp_cal_client_update_peer_stats - update peer stats in peer + * @peer_stats: cdp peer stats pointer + * + * return: void + */ +void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats) +{ + uint32_t temp_rx_bytes = peer_stats->rx.to_stack.bytes; + uint32_t temp_rx_data = peer_stats->rx.to_stack.num; + uint32_t temp_tx_bytes = peer_stats->tx.tx_success.bytes; + uint32_t temp_tx_data = peer_stats->tx.tx_success.num; + uint32_t temp_tx_ucast_pkts = peer_stats->tx.ucast.num; + + peer_stats->rx.rx_byte_rate = temp_rx_bytes - + peer_stats->rx.rx_bytes_success_last; + peer_stats->rx.rx_data_rate = temp_rx_data - + peer_stats->rx.rx_data_success_last; + peer_stats->tx.tx_byte_rate = temp_tx_bytes - + peer_stats->tx.tx_bytes_success_last; + peer_stats->tx.tx_data_rate = temp_tx_data - + peer_stats->tx.tx_data_success_last; + peer_stats->tx.tx_data_ucast_rate = temp_tx_ucast_pkts - + peer_stats->tx.tx_data_ucast_last; + + /* Check tx and rx packets in last one second, and increment + * inactive time for peer + */ + if (peer_stats->tx.tx_data_rate || peer_stats->rx.rx_data_rate) + peer_stats->tx.inactive_time = 0; + else + peer_stats->tx.inactive_time++; + + peer_stats->rx.rx_bytes_success_last = temp_rx_bytes; + peer_stats->rx.rx_data_success_last = temp_rx_data; + peer_stats->tx.tx_bytes_success_last = temp_tx_bytes; + peer_stats->tx.tx_data_success_last = temp_tx_data; + peer_stats->tx.tx_data_ucast_last = temp_tx_ucast_pkts; + + if (peer_stats->tx.tx_data_ucast_rate) { + if (peer_stats->tx.tx_data_ucast_rate > + peer_stats->tx.tx_data_rate) + peer_stats->tx.last_per = + ((peer_stats->tx.tx_data_ucast_rate - + peer_stats->tx.tx_data_rate) * 100) / + peer_stats->tx.tx_data_ucast_rate; + else + peer_stats->tx.last_per = 0; + } + +} + +qdf_export_symbol(dp_cal_client_update_peer_stats); + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.c b/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.c new file mode 100644 index 0000000000000000000000000000000000000000..78bc187a1b6d2aeff924329e9987b9fe6a52f6f1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.c @@ -0,0 +1,3302 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_ratetable.h" +#include "qdf_module.h" +#include "cdp_txrx_mon_struct.h" + +enum { + MODE_11A = 0, /* 11a Mode */ + MODE_11G = 1, /* 11b/g Mode */ + MODE_11B = 2, /* 11b Mode */ + MODE_11GONLY = 3, /* 11g only Mode */ + MODE_11NA_HT20 = 4, /* 11a HT20 mode */ + MODE_11NG_HT20 = 5, /* 11g HT20 mode */ + MODE_11NA_HT40 = 6, /* 11a HT40 mode */ + MODE_11NG_HT40 = 7, /* 11g HT40 mode */ + MODE_11AC_VHT20 = 8, + MODE_11AC_VHT40 = 9, + MODE_11AC_VHT80 = 10, + MODE_11AC_VHT20_2G = 11, + MODE_11AC_VHT40_2G = 12, + MODE_11AC_VHT80_2G = 13, + MODE_11AC_VHT80_80 = 14, + MODE_11AC_VHT160 = 15, + MODE_11AX_HE20 = 16, + MODE_11AX_HE40 = 17, + MODE_11AX_HE80 = 18, + MODE_11AX_HE80_80 = 19, + MODE_11AX_HE160 = 20, + MODE_11AX_HE20_2G = 21, + MODE_11AX_HE40_2G = 22, + MODE_11AX_HE80_2G = 23, + /* MODE_UNKNOWN should not be used within the host / target interface. + * Thus, it is permissible for ODE_UNKNOWN to be conditionally-defined, + * taking different values when compiling for different targets. + */ + MODE_UNKNOWN, + MODE_UNKNOWN_NO_160MHZ_SUPPORT = 14, /* not needed? */ + MODE_UNKNOWN_160MHZ_SUPPORT = MODE_UNKNOWN, /* not needed? */ +} DP_PHY_MODE; + +/* The following would span more than one octet + * when 160MHz BW defined for VHT + * Also it's important to maintain the ordering of this enum + * else it would break other rate adapation functions + */ + +enum DP_CMN_RATECODE_PREAM_TYPE { + DP_CMN_RATECODE_PREAM_OFDM, + DP_CMN_RATECODE_PREAM_CCK, + DP_CMN_RATECODE_PREAM_HT, + DP_CMN_RATECODE_PREAM_VHT, + DP_CMN_RATECODE_PREAM_HE, + DP_CMN_RATECODE_PREAM_COUNT, +}; + +/* + * @validmodemask : bit mask where 1 indicates the rate is valid for that mode + * @DP_CMN_MODULATION : modulation CCK/OFDM/MCS + * @propmask : bit mask of rate property. NSS/STBC/TXBF/LDPC + * @ratekbps : Rate in Kbits per second + * @ratebpdsgi : Rate in kbits per second if HT SGI is enabled + * @ratekbpsdgi : Rate in kbits per second if 1.6us GI is enabled + * @ratekbpsqgi : Rate in kbits per second if 3.2us GI is enabled + * @ratekbpsdcm : Rate in kbits per second if DCM is applied + * @userratekabps : User rate in KBits per second + * @dot11rate : Value that goes into supported rates info element of MLME + * @ratecode : rate that goes into hw descriptors + */ +const struct DP_CMN_RATE_TABLE { + struct { + uint32_t validmodemask; + enum DP_CMN_MODULATION_TYPE phy; + uint32_t ratekbps; + uint32_t ratekbpssgi; + uint32_t ratekbpsdgi; + uint32_t ratekbpsqgi; + uint32_t ratekbpsdcm; + uint32_t userratekbps; + uint16_t ratecode; + } info[DP_RATE_TABLE_SIZE]; +} DP_CMN_RATE_TABLE; + +/*Use the highest bit to indicate the invalid bcc rates accorss + *different PHYMODE + */ +#define INVALID_BCC_RATE BIT(MODE_UNKNOWN) + +#define CCK_MODE_VALID_MASK ((1 << MODE_11G) | (1 << MODE_11B) | \ + (1 << MODE_11NG_HT20) | (1 << MODE_11NG_HT40) | \ + (1 << MODE_11AC_VHT40_2G) | (1 << MODE_11AC_VHT20_2G) |\ + (1 << MODE_11AC_VHT80_2G)) + +#define OFDM_MODE_VALID_MASK ((1 << MODE_11A) | (1 << MODE_11G) | \ + (1 << MODE_11GONLY) | (1 << MODE_11NA_HT20) | \ + (1 << MODE_11NG_HT20) \ + | (1 << MODE_11NA_HT40) | (1 << MODE_11NG_HT40) \ + | (1 << MODE_11AC_VHT40) | (1 << MODE_11AC_VHT20) | \ + (1 << MODE_11AC_VHT80) \ + | (1 << MODE_11AC_VHT40_2G) | (1 << MODE_11AC_VHT20_2G) | \ + (1 << MODE_11AC_VHT80_2G) \ + | (1 << MODE_11AC_VHT160) | (1 << MODE_11AC_VHT80_80)) + +#define HT20_MODE_VALID_MASK ((1 << MODE_11NA_HT20) | \ + (1 << MODE_11NG_HT20) \ + | (1 << MODE_11NA_HT40) | (1 << MODE_11NG_HT40) \ + | (1 << MODE_11AC_VHT40) | (1 << MODE_11AC_VHT20) | \ + (1 << MODE_11AC_VHT80) \ + | (1 << MODE_11AC_VHT40_2G) | (1 << MODE_11AC_VHT20_2G) | \ + (1 << MODE_11AC_VHT80_2G) \ + | (1 << MODE_11AC_VHT160) | (1 << MODE_11AC_VHT80_80)) + +#define HT40_MODE_VALID_MASK ((1 << MODE_11NA_HT40) | \ + (1 << MODE_11NG_HT40) \ + | (1 << MODE_11AC_VHT40) | (1 << MODE_11AC_VHT80) \ + | (1 << MODE_11AC_VHT40_2G) | (1 << MODE_11AC_VHT80_2G) \ + | (1 << MODE_11AC_VHT160) | (1 << MODE_11AC_VHT80_80)) + +#define VHT20_MODE_VALID_MASK ((1 << MODE_11AC_VHT20) | \ + (1 << MODE_11AC_VHT40) | (1 << MODE_11AC_VHT80) | \ + (1 << MODE_11AC_VHT40_2G) | (1 << MODE_11AC_VHT20_2G) | \ + (1 << MODE_11AC_VHT80_2G) | \ + (1 << MODE_11AC_VHT160) | (1 << MODE_11AC_VHT80_80)) + +#define VHT40_MODE_VALID_MASK ((1 << MODE_11AC_VHT40) | \ + (1 << MODE_11AC_VHT80) | \ + (1 << MODE_11AC_VHT40_2G) | (1 << MODE_11AC_VHT80_2G) | \ + (1 << MODE_11AC_VHT160) | (1 << MODE_11AC_VHT80_80)) + +#define VHT80_MODE_VALID_MASK ((1 << MODE_11AC_VHT80) | \ + (1 << MODE_11AC_VHT80_2G) | \ + (1 << MODE_11AC_VHT160) | (1 << MODE_11AC_VHT80_80)) + +#define VHT160_MODE_VALID_MASK ((1 << MODE_11AC_VHT160) | \ + (1 << MODE_11AC_VHT80_80)) + +#define VHT20_LDPC_ONLY_MASKS (VHT20_MODE_VALID_MASK | INVALID_BCC_RATE) +#define VHT40_LDPC_ONLY_MASKS (VHT40_MODE_VALID_MASK | INVALID_BCC_RATE) +#define VHT80_LDPC_ONLY_MASKS (VHT80_MODE_VALID_MASK | INVALID_BCC_RATE) +#define VHT160_LDPC_ONLY_MASKS (VHT160_MODE_VALID_MASK | INVALID_BCC_RATE) + +#define VHT_INVALID_MCS (0xFF) +#define VHT_INVALID_RATES_MASK 0 + +#define HE20_MODE_VALID_MASK ((1 << MODE_11AX_HE20) |\ + (1 << MODE_11AX_HE40) | \ + (1 << MODE_11AX_HE80) | (1 << MODE_11AX_HE20_2G) | \ + (1 << MODE_11AX_HE40_2G) | \ + (1 << MODE_11AX_HE80_2G) | (1 << MODE_11AX_HE80_80) | \ + (1 << MODE_11AX_HE160)) + +#define HE40_MODE_VALID_MASK ((1 << MODE_11AX_HE40) | \ + (1 << MODE_11AX_HE80) | (1 << MODE_11AX_HE40_2G) | \ + (1 << MODE_11AX_HE80_2G) | (1 << MODE_11AX_HE80_80) | \ + (1 << MODE_11AX_HE160)) + +#define HE80_MODE_VALID_MASK ((1 << MODE_11AX_HE80) | \ + (1 << MODE_11AX_HE80_2G) | \ + (1 << MODE_11AX_HE80_80) | (1 << MODE_11AX_HE160)) + +#define HE160_MODE_VALID_MASK ((1 << MODE_11AX_HE80_80) | \ + (1 << MODE_11AX_HE160)) + +#define HE20_LDPC_ONLY_MASKS (HE20_MODE_VALID_MASK | INVALID_BCC_RATE) +#define HE40_LDPC_ONLY_MASKS (HE40_MODE_VALID_MASK | INVALID_BCC_RATE) +#define HE80_LDPC_ONLY_MASKS (HE80_MODE_VALID_MASK | INVALID_BCC_RATE) +#define HE160_LDPC_ONLY_MASKS (HE160_MODE_VALID_MASK | INVALID_BCC_RATE) + +#define HE_INVALID_RATES_MASK 0 + +static const struct DP_CMN_RATE_TABLE dp_11abgnratetable = { + { + /* When number of spatial strams > 4 or 11AX support is enabled */ + + /* 0 11 Mb */ { CCK_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_CCK, + 11000, 11000, 0, 0, 0, 11000, + 0x100 }, + /* 1 5.5 Mb */ { CCK_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_CCK, + 5500, 5500, 0, 0, 0, 5500, 0x101 + }, + /* 2 2 Mb */ { CCK_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_CCK, + 2000, 2000, 0, 0, 0, 2000, 0x102 + }, + /* 3 1 Mb */ { CCK_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_CCK, + 1000, 1000, 0, 0, 0, 1000, 0x103 + }, + /* 4 48 Mb */ { OFDM_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_OFDM, + 48000, 48000, 0, 0, 0, 48000, + 0x000 }, + /* 5 24 Mb */ { OFDM_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_OFDM, + 24000, 24000, 0, 0, 0, 24000, + 0x001 }, + /* 6 12 Mb */ { OFDM_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_OFDM, + 12000, 12000, 0, 0, 0, 12000, + 0x002 }, + /* 7 6 Mb */ { OFDM_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_OFDM, + 6000, 6000, 0, 0, 0, 6000, + 0x003 }, + /* 8 54 Mb */ { OFDM_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_OFDM, + 54000, 54000, 0, 0, 0, 54000, + 0x004 }, + /* 9 36 Mb */ { OFDM_MODE_VALID_MASK, DP_CMN_MOD_IEEE80211_T_OFDM, + 36000, 36000, 0, 0, 0, 36000, + 0x005 }, + /* 10 18 Mb */ { OFDM_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_OFDM, + 18000, 18000, 0, 0, 0, 18000, + 0x006 }, + /* 11 9 Mb */ { OFDM_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_OFDM, + 9000, 9000, 0, 0, 0, 9000, + 0x007}, + + /* 12 MCS-00 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 6500, + 7200, 0, 0, 0, 6500, 0x200 }, + /* 13 MCS-01 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 13000, + 14400, 0, 0, 0, 13000, 0x201 }, + /* 14 MCS-02 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 19500, + 21700, 0, 0, 0, 19500, 0x202 }, + /* 15 MCS-03 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 26000, + 28900, 0, 0, 0, 26000, 0x203 }, + /* 16 MCS-04 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 39000, + 43300, 0, 0, 0, 39000, 0x204 }, + /* 17 MCS-05 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 52000, + 57800, 0, 0, 0, 52000, 0x205 }, + /* 18 MCS-06 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 58500, + 65000, 0, 0, 0, 58500, 0x206 }, + /* 19 MCS-07 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 65000, + 72200, 0, 0, 0, 65000, 0x207 }, + /* When number of spatial streams > 1 */ + /* 20 MCS-00 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 13000, + 14400, 0, 0, 0, 13000, 0x220 }, + /* 21 MCS-01 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 26000, + 28900, 0, 0, 0, 26000, 0x221 }, + /* 22 MCS-02 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 39000, + 43300, 0, 0, 0, 39000, 0x222 }, + /* 23 MCS-03 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 52000, + 57800, 0, 0, 0, 52000, 0x223 }, + /* 24 MCS-04 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 78000, + 86700, 0, 0, 0, 78000, 0x224 }, + /* 25 MCS-05 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 104000, + 115600, 0, 0, 0, 104000, 0x225 }, + /* 26 MCS-06 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 117000, + 130000, 0, 0, 0, 117000, 0x226 }, + /* 27 MCS-07 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 130000, + 144000, 0, 0, 0, 130000, 0x227 }, + /* When number of spatial streams > 2 */ + /* 28 MCS-00 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 19500, + 21700, 0, 0, 0, 19500, 0x240 }, + /* 29 MCS-01 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 39000, + 43300, 0, 0, 0, 39000, 0x241 }, + /* 30 MCS-02 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 58500, + 65000, 0, 0, 0, 58500, 0x242 }, + /* 31 MCS-03 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 78000, + 86700, 0, 0, 0, 78000, 0x243 }, + /* 32 MCS-04 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 117000, + 130000, 0, 0, 0, 117000, 0x244 }, + /* 33 MCS-05 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 156000, + 173300, 0, 0, 0, 156000, 0x245 }, + /* 34 MCS-06 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 175500, + 195000, 0, 0, 0, 175500, 0x246 }, + /* 35 MCS-07 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 195000, + 216700, 0, 0, 0, 195000, 0x247 }, + /* When number of spatial streams > 3 */ + /* 36 MCS-00 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 26000, + 28900, 0, 0, 0, 26000, 0x260 }, + /* 37 MCS-01 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 52000, + 57800, 0, 0, 0, 52000, 0x261 }, + /* 38 MCS-02 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 78000, + 86700, 0, 0, 0, 78000, 0x262 }, + /* 39 MCS-03 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 104000, + 115600, 0, 0, 0, 104000, 0x263 }, + /* 40 MCS-04 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 156000, + 173300, 0, 0, 0, 156000, 0x264 }, + /* 41 MCS-05 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 208000, + 231100, 0, 0, 0, 208000, 0x265 }, + /* 42 MCS-06 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 234000, + 260000, 0, 0, 0, 234000, 0x266 }, + /* 43 MCS-07 */ { HT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_20, 260000, + 288900, 0, 0, 0, 260000, 0x267 }, + + /* 11n HT40 rates */ + /* 44 MCS-00 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 13500, + 15000, 0, 0, 0, 13500, 0x200 }, + /* 45 MCS-01 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 27000, + 30000, 0, 0, 0, 27000, 0x201 }, + /* 46 MCS-02 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 40500, + 45000, 0, 0, 0, 40500, 0x202 }, + /* 47 MCS-03 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 54000, + 60000, 0, 0, 0, 54000, 0x203 }, + /* 48 MCS-04 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 81500, + 90000, 0, 0, 0, 81500, 0x204 }, + /* 49 MCS-05 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 108000, + 120000, 0, 0, 0, 108000, 0x205 }, + /* 50 MCS-06 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 121500, + 135000, 0, 0, 0, 121500, 0x206 }, + /* 51 MCS-07 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 135000, + 150000, 0, 0, 0, 135000, 0x207 }, + /* When number of spatial streams > 1 */ + /* 52 MCS-00 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 27000, + 30000, 0, 0, 0, 27000, 0x220 }, + /* 53 MCS-01 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 54000, + 60000, 0, 0, 0, 54000, 0x221 }, + /* 54 MCS-02 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 81000, + 90000, 0, 0, 0, 81000, 0x222 }, + /* 55 MCS-03 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 108000, + 120000, 0, 0, 0, 108000, 0x223 }, + /* 56 MCS-04 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 162000, + 180000, 0, 0, 0, 162000, 0x224 }, + /* 57 MCS-05 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 216000, + 240000, 0, 0, 0, 216000, 0x225 }, + /* 58 MCS-06 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 243000, + 270000, 0, 0, 0, 243000, 0x226 }, + /* 59 MCS-07 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 270000, + 300000, 0, 0, 0, 270000, 0x227 }, + /* When number of spatial streams > 2 */ + /* 60 MCS-00 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 40500, + 45000, 0, 0, 0, 40500, 0x240 }, + /* 61 MCS-01 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 81000, + 90000, 0, 0, 0, 81000, 0x241 }, + /* 62 MCS-02 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 121500, + 135000, 0, 0, 0, 121500, 0x242 }, + /* 63 MCS-03 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 162000, + 180000, 0, 0, 0, 162000, 0x243 }, + /* 64 MCS-04 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 243000, + 270000, 0, 0, 0, 243000, 0x244 }, + /* 65 MCS-05 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 324000, + 360000, 0, 0, 0, 324000, 0x245 }, + /* 66 MCS-06 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 364500, + 405000, 0, 0, 0, 364500, 0x246 }, + /* 67 MCS-07 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 405000, + 450000, 0, 0, 0, 405000, 0x247 }, + /* When number of spatial streams > 3 */ + /* 68 MCS-00 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 54000, + 60000, 0, 0, 0, 54000, 0x260 }, + /* 69 MCS-01 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 108000, + 120000, 0, 0, 0, 108000, 0x261 }, + /* 70 MCS-02 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 162000, + 180000, 0, 0, 0, 162000, 0x262 }, + /* 71 MCS-03 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 216000, + 240000, 0, 0, 0, 216000, 0x263 }, + /* 72 MCS-04 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 324000, + 360000, 0, 0, 0, 324000, 0x264 }, + /* 73 MCS-05 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 432000, + 480000, 0, 0, 0, 432000, 0x265 }, + /* 74 MCS-06 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 486000, + 540000, 0, 0, 0, 486000, 0x266 }, + /* 75 MCS-07 */ { HT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HT_40, 540000, + 600000, 0, 0, 0, 540000, 0x267 }, + + /* 11ac VHT20 rates */ + /* 76 MCS-00 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 6500, + 7200, 0, 0, 0, 6500, 0x300 }, + /* 77 MCS-01 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 13000, + 14400, 0, 0, 0, 13000, 0x301 }, + /* 78 MCS-02 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 19500, + 21700, 0, 0, 0, 19500, 0x302 }, + /* 79 MCS-03 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 26000, + 28900, 0, 0, 0, 26000, 0x303 }, + /* 80 MCS-04 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 39000, + 43300, 0, 0, 0, 39000, 0x304 }, + /* 81 MCS-05 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 52000, + 57800, 0, 0, 0, 52000, 0x305 }, + /* 82 MCS-06 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 58500, + 65000, 0, 0, 0, 58500, 0x306 }, + /* 83 MCS-07 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 65000, + 72200, 0, 0, 0, 65000, 0x307 }, + /* 84 MCS-08 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 78000, + 86700, 0, 0, 0, 78000, 0x308 }, + /* 85 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 86500, + 96000, 0, 0, 0, 86500, 0x309 }, + /* When we support very hight throughput MCS */ + /* 86 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 97500, 108300, 0, 0, 0, + 97500, 0x30a}, + /* 87 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 108300, 120400, 0, 0, 0, + 108300, 0x30b}, + + /* When number of spatial streams > 1 */ + /* 88 MCS-00 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 13000, + 14400, 0, 0, 0, 13000, 0x320 }, + /* 89 MCS-01 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 26000, + 28900, 0, 0, 0, 26000, 0x321 }, + /* 90 MCS-02 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 39000, + 43300, 0, 0, 0, 39000, 0x322 }, + /* 91 MCS-03 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 52000, + 57800, 0, 0, 0, 52000, 0x323 }, + /* 92 MCS-04 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 78000, + 86700, 0, 0, 0, 78000, 0x324 }, + /* 93 MCS-05 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 104000, + 115600, 0, 0, 0, 104000, 0x325 }, + /* 94 MCS-06 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 117000, + 130000, 0, 0, 0, 117000, 0x326 }, + /* 95 MCS-07 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 130000, + 144400, 0, 0, 0, 130000, 0x327 }, + /* 96 MCS-08 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 156000, + 173300, 0, 0, 0, 156000, 0x328 }, + /* 97 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 173000, + 192000, 0, 0, 0, 173000, 0x329 }, + /* 98 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 195000, 216700, 0, 0, 0, + 195000, 0x32a }, + /* 99 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 216700, 240700, 0, 0, 0, + 216700, 0x32b }, + + /* when number of spatial streams > 2 */ + /* 100 MCS-00 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 19500, + 21700, 0, 0, 0, 19500, 0x340 }, + /* 101 MCS-01 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 39000, + 43300, 0, 0, 0, 39000, 0x341 }, + /* 102 MCS-02 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 58500, + 65000, 0, 0, 0, 58500, 0x342 }, + /* 103 MCS-03 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 78000, + 86700, 0, 0, 0, 78000, 0x343 }, + /* 104 MCS-04 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 117000, + 130000, 0, 0, 0, 117000, 0x344 }, + /* 105 MCS-05 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 156000, + 173300, 0, 0, 0, 156000, 0x345 }, + /* 106 MCS-06 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 175500, + 195000, 0, 0, 0, 175500, 0x346 }, + /* 107 MCS-07 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 195000, + 216700, 0, 0, 0, 195000, 0x347 }, + /* 108 MCS-08 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 234000, + 260000, 0, 0, 0, 234000, 0x348 }, + /* 109 MCS-09 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 260000, + 288900, 0, 0, 0, 260000, 0x349 }, + /* 110 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 292500, 325000, 0, 0, 0, + 292500, 0x34a}, + /* 111 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 325000, 361100, 0, 0, 0, + 325000, 0x34b}, + + /* when number of spatial streams > 3 */ + /* 112 MCS-00 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 26000, + 28900, 0, 0, 0, 26000, 0x360 }, + /* 113 MCS-01 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 52000, + 57800, 0, 0, 0, 52000, 0x361 }, + /* 114 MCS-02 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 78000, + 86700, 0, 0, 0, 78000, 0x362 }, + /* 115 MCS-03 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 104000, + 115600, 0, 0, 0, 104000, 0x363 }, + /* 116 MCS-04 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 156000, + 173300, 0, 0, 0, 156000, 0x364 }, + /* 117 MCS-05 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 208000, + 231100, 0, 0, 0, 208000, 0x365 }, + /* 118 MCS-06 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 234000, + 260000, 0, 0, 0, 234000, 0x366 }, + /* 119 MCS-07 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 260000, + 288900, 0, 0, 0, 260000, 0x367 }, + /* 120 MCS-08 */ { VHT20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_20, 312000, + 346700, 0, 0, 0, 312000, 0x368 }, + /* 121 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 344000, + 378400, 0, 0, 0, 344000, 0x369 }, + /* 122 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 390000, 433300, 0, 0, 0, 390000, + 0x36a}, + /* 123 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 433300, + 481500, 0, 0, 0, 433300, 0x36b}, + + /* when number of spatial streams > 4 */ + /* 124 MCS-00 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 32500, + 36100, 0, 0, 0, 32500, 0x380 }, + /* 125 MCS-01 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 65000, + 72200, 0, 0, 0, 65000, 0x381 }, + /* 126 MCS-02 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 97500, + 108300, 0, 0, 0, 97500, 0x382 }, + /* 127 MCS-03 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 130000, + 144400, 0, 0, 0, 130000, 0x383 }, + /* 128 MCS-04 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 195000, + 216700, 0, 0, 0, 195000, 0x384 }, + /* 129 MCS-05 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 260000, + 288900, 0, 0, 0, 260000, 0x385 }, + /* 130 MCS-06 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 292500, + 325000, 0, 0, 0, 292500, 0x386 }, + /* 131 MCS-07 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 325000, + 361100, 0, 0, 0, 325000, 0x387 }, + /* 132 MCS-08 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 390000, + 433300, 0, 0, 0, 390000, 0x388 }, + /* 133 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 433300, + 481500, 0, 0, 0, 433300, 0x389 }, + /* 134 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 487500, + 541700, 0, 0, 0, 487500, 0x38a }, + /* 135 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 541700, + 601900, 0, 0, 0, 541700, 0x38b }, + + /* When number of spatial streams > 5 */ + /* 136 MCS-00 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 39000, + 43300, 0, 0, 0, 39000, 0x3a0 }, + /* 137 MCS-01 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 78000, + 86700, 0, 0, 0, 78000, 0x3a1 }, + /* 138 MCS-02 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 117000, + 130000, 0, 0, 0, 117000, 0x3a2 }, + /* 139 MCS-03 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 156000, + 173300, 0, 0, 0, 156000, 0x3a3 }, + /* 140 MCS-04 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 234000, + 260000, 0, 0, 0, 234000, 0x3a4 }, + /* 141 MCS-05 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 312000, + 346700, 0, 0, 0, 312000, 0x3a5 }, + /* 142 MCS-06 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 351000, + 390000, 0, 0, 0, 351000, 0x3a6 }, + /* 143 MCS-07 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 390000, + 433300, 0, 0, 0, 390000, 0x3a7 }, + /* 144 MCS-08 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 468000, + 520000, 0, 0, 0, 468000, 0x3a8 }, + /* 145 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 520000, + 577800, 0, 0, 0, 520000, 0x3a9 }, + /* 146 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 585000, 650000, 0, 0, 0, + 585000, 0x3aa }, + /* 147 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, + 650000, 722200, 0, 0, + 0, 650000, 0x3ab }, + + /* when number of spatial streams > 6 */ + /* 148 MCS-00 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 45500, + 50600, 0, 0, 0, 45500, 0x3c0 }, + /* 149 MCS-01 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 91000, + 101100, 0, 0, 0, 91000, 0x3c1 }, + /* 150 MCS-02 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 136500, + 151700, 0, 0, 0, 136500, 0x3c2 }, + /* 151 MCS-03 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 182000, + 202200, 0, 0, 0, 182000, 0x3c3 }, + /* 152 MCS-04 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 273000, + 303300, 0, 0, 0, 273000, 0x3c4 }, + /* 153 MCS-05 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 364000, + 404400, 0, 0, 0, 364000, 0x3c5 }, + /* 154 MCS-06 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 409500, + 455000, 0, 0, 0, 409500, 0x3c6 }, + /* 155 MCS-07 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 455000, + 505600, 0, 0, 0, 455000, 0x3c7 }, + /* 156 MCS-08 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 546000, + 606700, 0, 0, 0, 546000, 0x3c8 }, + /* 157 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 606700, + 674100, 0, 0, 0, 606700, 0x3c9 }, + /* 158 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 682500, + 758300, 0, 0, 0, 682500, 0x3ca }, + /* 159 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 758300, + 842600, 0, 0, 0, 758300, 0x3cb }, + + /* when number of spatial streams > 7 */ + /* 160 MCS-00 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 52000, + 57800, 0, 0, 0, 52000, 0x3e0 }, + /* 161 MCS-01 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 104000, + 115600, 0, 0, 0, 104000, 0x3e1 }, + /* 162 MCS-02 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 156000, + 173300, 0, 0, 0, 156000, 0x3e2 }, + /* 163 MCS-03 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 208000, + 231100, 0, 0, 0, 208000, 0x3e3 }, + /* 164 MCS-04 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 312000, + 346700, 0, 0, 0, 312000, 0x3e4 }, + /* 165 MCS-05 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 416000, + 462200, 0, 0, 0, 416000, 0x3e5 }, + /* 166 MCS-06 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 468000, + 520000, 0, 0, 0, 468000, 0x3e6 }, + /* 167 MCS-07 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 520000, + 577800, 0, 0, 0, 520000, 0x3e7 }, + /* 168 MCS-08 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 624000, + 693300, 0, 0, 0, 624000, 0x3e8 }, + /* 169 MCS-09 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 693300, + 770400, 0, 0, 0, 693300, 0x3e9 }, + /* 170 MCS-10 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 780000, + 866700, 0, 0, 0, 780000, 0x3ea }, + /* 171 MCS-11 */ { VHT20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_20, 866700, + 963000, 0, 0, 0, 866700, 0x3eb }, + + /* 11ac VHT40 rates */ + /* 172 MCS-00 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 13500, + 15000, 0, 0, 0, 13500, 0x300 }, + /* 173 MCS-01 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 27000, + 30000, 0, 0, 0, 27000, 0x301 }, + /* 174 MCS-02 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 40500, + 45000, 0, 0, 0, 40500, 0x302 }, + /* 175 MCS-03 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 54000, + 60000, 0, 0, 0, 54000, 0x303 }, + /* 176 MCS-04 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 81000, + 90000, 0, 0, 0, 81000, 0x304 }, + /* 177 MCS-05 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 108000, + 120000, 0, 0, 0, 108000, 0x305 }, + /* 178 MCS-06 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 121500, + 135000, 0, 0, 0, 121500, 0x306 }, + /* 179 MCS-07 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 135000, + 150000, 0, 0, 0, 135000, 0x307 }, + /* 180 MCS-08 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 162000, + 180000, 0, 0, 0, 162000, 0x308 }, + /* 181 MCS-09 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 180000, + 200000, 0, 0, 0, 180000, 0x309 }, + /* 182 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 202500, + 225000, 0, 0, 0, 202500, 0x30a }, + /* 183 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 225000, + 250000, 0, 0, 0, 225000, 0x30b }, + + /* when number of spatial streams > 1 */ + /* 184 MCS-00 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 27000, + 30000, 0, 0, 0, 27000, 0x320 }, + /* 185 MCS-01 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 54000, + 60000, 0, 0, 0, 54000, 0x321 }, + /* 186 MCS-02 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 81000, + 90000, 0, 0, 0, 81000, 0x322 }, + /* 187 MCS-03 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 108000, + 120000, 0, 0, 0, 108000, 0x323 }, + /* 188 MCS-04 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 162000, + 180000, 0, 0, 0, 162000, 0x324 }, + /* 189 MCS-05 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 216000, + 240000, 0, 0, 0, 216000, 0x325 }, + /* 190 MCS-06 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 243000, + 270000, 0, 0, 0, 243000, 0x326 }, + /* 191 MCS-07 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 270000, + 300000, 0, 0, 0, 270000, 0x327 }, + /* 192 MCS-08 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 324000, + 360000, 0, 0, 0, 324000, 0x328 }, + /* 193 MCS-09 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 360000, + 400000, 0, 0, 0, 360000, 0x329 }, + /* 194 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 405000, + 450000, 0, 0, 0, 405000, 0x32a }, + /* 195 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 450000, + 500000, 0, 0, 0, 450000, 0x32b }, + + /* When number of spatial streams > 2 use below rate*/ + /* 196 MCS-00 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 40500, + 45000, 0, 0, 0, 40500, 0x340 }, + /* 197 MCS-01 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 81000, + 90000, 0, 0, 0, 81000, 0x341 }, + /* 198 MCS-02 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 121500, + 135000, 0, 0, 0, 121500, 0x342 }, + /* 199 MCS-03 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 162000, + 180000, 0, 0, 0, 162000, 0x343 }, + /* 200 MCS-04 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 243000, + 270000, 0, 0, 0, 243000, 0x344 }, + /* 201 MCS-05 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 324000, + 360000, 0, 0, 0, 324000, 0x345 }, + /* 202 MCS-06 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 364500, + 405000, 0, 0, 0, 364500, 0x346 }, + /* 203 MCS-07 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 405000, + 450000, 0, 0, 0, 405000, 0x347 }, + /* 204 MCS-08 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 486000, + 540000, 0, 0, 0, 486000, 0x348 }, + /* 205 MCS-09 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 540000, + 600000, 0, 0, 0, 540000, 0x349 }, + /* 206 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 607500, + 675000, 0, 0, 0, 607500, 0x34a}, + /* 207 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 675000, + 750000, 0, 0, 0, 675000, 0x34b}, + + /* When number of spatial streams > 3 use below rates */ + /* 208 MCS-00 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 54000, + 60000, 0, 0, 0, 54000, 0x360}, + /* 209 MCS-01 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 108000, + 120000, 0, 0, 0, 108000, 0x361}, + /* 210 MCS-02 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 162000, + 180000, 0, 0, 0, 162000, 0x362}, + /* 211 MCS-03 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 216000, + 240000, 0, 0, 0, 216000, 0x363}, + /* 212 MCS-04 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 324000, + 260000, 0, 0, 0, 324000, 0x364}, + /* 213 MCS-05 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 432000, + 480000, 0, 0, 0, 432000, 0x365}, + /* 214 MCS-06 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 486000, + 540000, 0, 0, 0, 486000, 0x366}, + /* 215 MCS-07 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 540000, + 600000, 0, 0, 0, 540000, 0x367}, + /* 216 MCS-08 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 648000, + 720000, 0, 0, 0, 648000, 0x368}, + /* 217 MCS-09 */ { VHT40_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_40, 720000, + 800000, 0, 0, 0, 720000, 0x369}, + /* 218 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 810000, + 900000, 0, 0, 0, 810000, 0x36a }, + /* 219 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 900000, + 1000000, 0, 0, 0, 900000, 0x36b }, + + /* when number of spatial streams > 4 use below rates */ + /* 220 MCS-00 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 67500, + 75000, 0, 0, 0, 67500, 0x380 }, + /* 221 MCS-01 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 135000, + 150000, 0, 0, 0, 135000, 0x381 }, + /* 222 MCS-02 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 202500, + 225000, 0, 0, 0, 202500, 0x382 }, + /* 223 MCS-03 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 270000, + 300000, 0, 0, 0, 270000, 0x383 }, + /* 224 MCS-04 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 405000, + 450000, 0, 0, 0, 405000, 0x384 }, + /* 225 MCS-05 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 540000, + 600000, 0, 0, 0, 540000, 0x385 }, + /* 226 MCS-06 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 607500, + 675000, 0, 0, 0, 607500, 0x386 }, + /* 227 MCS-07 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, 675000, + 750000, 0, 0, 0, 675000, 0x387 }, + /* 228 MCS-08 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 810000, 900000, 0, 0, 0, 810000, + 0x388 }, + /* 229 MCS-09 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 900000, 1000000, 0, 0, 0, 900000, + 0x389 }, + /* 230 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1012500, 1125000, 0, 0, 0, 1012500, + 0x38a }, + /* 231 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1125000, 1250000, 0, 0, 0, 1125000, + 0x38b }, + + /* when number of spatial streams > 5 use below rates*/ + /* 232 MCS-00 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 81000, 90000, 0, 0, 0, 81000, 0x3a0 + }, + /* 233 MCS-01 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 162000, 180000, 0, 0, 0, 162000, + 0x3a1 }, + /* 234 MCS-02 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 243000, 270000, 0, 0, 0, 243000, + 0x3a2 }, + /* 235 MCS-03 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 324000, 360000, 0, 0, 0, 324000, + 0x3a3 }, + /* 236 MCS-04 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 486000, 540000, 0, 0, 0, 486000, + 0x3a4 }, + /* 237 MCS-05 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 648000, 720000, 0, 0, 0, 648000, + 0x3a5 }, + /* 238 MCS-06 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 729000, 815000, 0, 0, 0, 729000, + 0x3a6 }, + /* 239 MCS-07 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 810000, 900000, 0, 0, 0, 810000, + 0x3a7 }, + /* 240 MCS-08 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 972000, 1080000, 0, 0, 0, 972000, + 0x3a8 }, + /* 241 MCS-09 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1080000, 1200000, 0, 0, 0, 1080000, + 0x3a9 }, + /* 242 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1215000, 1350000, 0, 0, 0, 1215000, + 0x3aa }, + /* 243 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1350000, 1500000, 0, 0, 0, 1350000, + 0x3ab }, + + /* when number of spatial streams > 6 use below rates */ + /* 244 MCS-00 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 94500, 105000, 0, 0, 0, 94500, 0x3c0 + }, + /* 245 MCS-01 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 189000, 210000, 0, 0, 0, 189000, + 0x3c1 }, + /* 246 MCS-02 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 283500, 315000, 0, 0, 0, 283500, + 0x3c2 }, + /* 247 MCS-03 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 378000, 420000, 0, 0, 0, 378000, + 0x3c3 }, + /* 248 MCS-04 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 567000, 630000, 0, 0, 0, 567000, + 0x3c4 }, + /* 249 MCS-05 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 756000, 840000, 0, 0, 0, 756000, + 0x3c5 }, + /* 250 MCS-06 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 850500, 945000, 0, 0, 0, 850500, + 0x3c6 }, + /* 251 MCS-07 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 945000, 1050000, 0, 0, 0, 945000, + 0x3c7 }, + /* 252 MCS-08 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1134000, 1260000, 0, 0, 0, 1134000, + 0x3c8 }, + /* 253 MCS-09 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1260000, 1400000, 0, 0, 0, 1260000, + 0x3c9 }, + /* 254 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1417500, 1575000, 0, 0, 0, 1417500, + 0x3ca }, + /* 255 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1575000, 1750000, 0, 0, 0, 1575000, + 0x3cb }, + + /* when number of spatial streams > 7 use below rates */ + /* 256 MCS-00 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 108000, 120000, 0, 0, 0, 108000, + 0x3e0 }, + /* 257 MCS-01 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 216000, 240000, 0, 0, 0, 216000, + 0x3e1 }, + /* 258 MCS-02 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 324000, 360000, 0, 0, 0, 324000, + 0x3e2 }, + /* 259 MCS-03 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 432000, 480000, 0, 0, 0, 432000, + 0x3e3 }, + /* 260 MCS-04 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 648000, 720000, 0, 0, 0, 648000, + 0x3e4 }, + /* 261 MCS-05 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 864000, 960000, 0, 0, 0, 864000, + 0x3e5 }, + /* 262 MCS-06 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 972000, 1080000, 0, 0, 0, 972000, + 0x3e6 }, + /* 263 MCS-07 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1080000, 1200000, 0, 0, 0, 1080000, + 0x3e7 }, + /* 264 MCS-08 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1296000, 1440000, 0, 0, 0, 1296000, + 0x3e8 }, + /* 265 MCS-09 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1440000, 1600000, 0, 0, 0, 1440000, + 0x3e9 }, + /* 266 MCS-10 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1620000, 1800000, 0, 0, 0, 1620000, + 0x3ea }, + /* 267 MCS-11 */ { VHT40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_40, + 1800000, 2000000, 0, 0, 0, 1800000, + 0x3eb }, + + /* 11ac VHT80 rates + */ + /* 268 MCS-00 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 29300, 32500, 0, 0, 0, 29300, + 0x300}, + /* 269 MCS-01 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 58500, 65000, 0, 0, 0, 58500, + 0x301}, + /* 270 MCS-02 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 87800, 97500, 0, 0, 0, 87800, + 0x302}, + /* 271 MCS-03 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 117000, 130000, 0, 0, 0, 117000, + 0x303}, + /* 272 MCS-04 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 175500, 195000, 0, 0, 0, 175500, + 0x304}, + /* 273 MCS-05 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 234000, 260000, 0, 0, 0, 234000, + 0x305}, + /* 274 MCS-06 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 263300, 292500, 0, 0, 0, 263300, + 0x306}, + /* 275 MCS-07 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 292500, 325000, 0, 0, 0, 292500, + 0x307}, + /* 276 MCS-08 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 351000, 390000, 0, 0, 0, 351000, + 0x308}, + /* 277 MCS-09 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 390000, 433300, 0, 0, 0, 390000, + 0x309}, + /* 278 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 438800, 487500, 0, 0, 0, 438800, + 0x30a}, + /* 279 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 487500, 541700, 0, 0, 0, 487500, + 0x30b}, + + /* When number of spatial streams > 1 use below rates*/ + /* 280 MCS-00 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 58500, 65000, 0, 0, 0, 58500, + 0x320}, + /* 281 MCS-01 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 117000, 130000, 0, 0, 0, 117000, + 0x321}, + /* 282 MCS-02 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 175500, 195000, 0, 0, 0, 175500, + 0x322}, + /* 283 MCS-03 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 234000, 260000, 0, 0, 0, 234000, + 0x323}, + /* 284 MCS-04 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 351000, 390000, 0, 0, 0, 351000, + 0x324}, + /* 285 MCS-05 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 468000, 520000, 0, 0, 0, 468000, + 0x325}, + /* 286 MCS-06 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 526500, 585000, 0, 0, 0, 526500, + 0x326}, + /* 287 MCS-07 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 585000, 650000, 0, 0, 0, 585000, + 0x327}, + /* 288 MCS-08 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 702000, 780000, 0, 0, 0, 702000, + 0x328}, + /* 289 MCS-09 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 780000, 866700, 0, 0, 0, 780000, + 0x329}, + /* 290 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 877500, 975000, 0, 0, 0, 877500, + 0x32a}, + /* 291 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 975000, 1083300, 0, 0, 0, 975000, + 0x32b}, + + /* When number of spatial streams > 2 use below rates */ + /* 292 MCS-00 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 87800, 97500, 0, 0, 0, 87800, 0x340 + }, + /* 293 MCS-01 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 175500, 195000, 0, 0, 0, 175500, + 0x341 }, + /* 294 MCS-02 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 263300, 292500, 0, 0, 0, 263300, + 0x342 }, + /* 295 MCS-03 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 351000, 390000, 0, 0, 0, 351000, + 0x343 }, + /* 296 MCS-04 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 526500, 585000, 0, 0, 0, 526500, + 0x344 }, + /* 297 MCS-05 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 702000, 780000, 0, 0, 0, 702000, + 0x345 }, + /* 298 MCS-06 */ { VHT_INVALID_RATES_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 789800, 877500, 0, 0, 0, 789800, + 0x346 }, + /* 299 MCS-07 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 877500, 975000, 0, 0, 0, 877500, + 0x347 }, + /* 300 MCS-08 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1053000, 1170000, 0, 0, 0, 1053000, + 0x348 }, + /* 301 MCS-09 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1170000, 1300000, 0, 0, 0, 1170000, + 0x349 }, + /* 302 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1316300, 1462500, 0, 0, 0, 1316300, + 0x34a }, + /* 303 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1462500, 1625000, 0, 0, 0, 1462500, + 0x34b }, + /* When number of spatial streams > 3 use below rates */ + /* 304 MCS-00 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 117000, 130000, 0, 0, 0, 117000, + 0x360 }, + /* 305 MCS-01 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 234000, 260000, 0, 0, 0, 234000, + 0x361 }, + /* 306 MCS-02 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 351000, 390000, 0, 0, 0, 351000, + 0x362 }, + /* 307 MCS-03 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 468000, 520000, 0, 0, 0, 468000, + 0x363 }, + /* 308 MCS-04 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 702000, 780000, 0, 0, 0, 702000, + 0x364 }, + /* 309 MCS-05 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 936000, 1040000, 0, 0, 0, 936000, + 0x365 }, + /* 310 MCS-06 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1053000, 1170000, 0, 0, 0, 1053000, + 0x366 }, + /* 311 MCS-07 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1170000, 1300000, 0, 0, 0, 1170000, + 0x367 }, + /* 312 MCS-08 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1404000, 1560000, 0, 0, 0, 1404000, + 0x368 }, + /* 313 MCS-09 */ { VHT80_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1560000, 1733000, 0, 0, 0, 1560000, + 0x369 }, + /* 314 MCS-08 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1755000, 1950000, 0, 0, 0, 1755000, + 0x36a }, + /* 315 MCS-09 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1950000, 2166700, 0, 0, 0, 1950000, + 0x36b }, + /* When number of spatial streams > 4 use below rates */ + /* 316 MCS-00 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 146300, 162500, 0, 0, 0, 146300, + 0x380 }, + /* 317 MCS-01 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 292500, 325000, 0, 0, 0, 292500, + 0x381 }, + /* 318 MCS-02 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 438800, 487500, 0, 0, 0, 438800, + 0x382 }, + /* 319 MCS-03 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 585000, 650000, 0, 0, 0, 585000, + 0x383 }, + /* 320 MCS-04 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 877500, 975000, 0, 0, 0, 877500, + 0x384 }, + /* 321 MCS-05 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1170000, 1300000, 0, 0, 0, 1170000, + 0x385 }, + /* 322 MCS-06 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1316300, 1462500, 0, 0, 0, 1316300, + 0x386 }, + /* 323 MCS-07 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1462500, 1625000, 0, 0, 0, 1462500, + 0x387 }, + /* 324 MCS-08 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1755000, 1950000, 0, 0, 0, 1755000, + 0x388 }, + /* 325 MCS-09 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1950000, 2166700, 0, 0, 0, 1950000, + 0x389 }, + /* 326 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2193800, 2437500, 0, 0, 0, 2193800, + 0x38a }, + /* 327 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2437500, 2708300, 0, 0, 0, 2437500, + 0x38b }, + /* When number of spatial streams > 5 use below rates */ + /* 328 MCS-00 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 175500, 195000, 0, 0, 0, 175500, + 0x3a0 }, + /* 329 MCS-01 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 351000, 390000, 0, 0, 0, 351000, + 0x3a1 }, + /* 330 MCS-02 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 526500, 585500, 0, 0, 0, 526500, + 0x3a2 }, + /* 331 MCS-03 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 702000, 780000, 0, 0, 0, 702000, + 0x3a3 }, + /* 332 MCS-04 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1053000, 1170000, 0, 0, 0, 1053000, + 0x3a4 }, + /* 333 MCS-05 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1404000, 1560000, 0, 0, 0, 1404000, + 0x3a5 }, + /* 334 MCS-06 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1579500, 1755000, 0, 0, 0, 1579500, + 0x3a6 }, + /* 335 MCS-07 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1755000, 1950000, 0, 0, 0, 1755000, + 0x3a7 }, + /* 336 MCS-08 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2106000, 2340000, 0, 0, 0, 2106000, + 0x3a8 }, + /* 337 MCS-09 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2340000, 2600000, 0, 0, 0, 2340000, + 0x3a9 }, + /* 338 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2632500, 2925000, 0, 0, 0, 2632500, + 0x3aa }, + /* 339 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2925000, 3250000, 0, 0, 0, 2925000, + 0x3ab }, + /* When number of spatial streams > 6 use below rates*/ + /* 340 MCS-00 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 204800, 227500, 0, 0, 0, 204800, + 0x3c0 }, + /* 341 MCS-01 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 409500, 455000, 0, 0, 0, 409500, + 0x3c1 }, + /* 342 MCS-02 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 614300, 682500, 0, 0, 0, 614300, + 0x3c2 }, + /* 343 MCS-03 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 819000, 910000, 0, 0, 0, 819000, + 0x3c3 }, + /* 344 MCS-04 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1288500, 1365000, 0, 0, 0, 1288500, + 0x3c4 }, + /* 345 MCS-05 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1638000, 1820000, 0, 0, 0, 1638000, + 0x3c5 }, + /* 346 MCS-06 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1842800, 2047500, 0, 0, 0, 1842800, + 0x3c6 }, + /* 347 MCS-07 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2047500, 2275000, 0, 0, 0, 2047500, + 0x3c7 }, + /* 348 MCS-08 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2457000, 2730000, 0, 0, 0, 2457000, + 0x3c8 }, + /* 349 MCS-09 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2730000, 3033300, 0, 0, 0, 2730000, + 0x3c9 }, + /* 350 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 3071300, 3412500, 0, 0, 0, 3071300, + 0x3ca }, + /* 351 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 3412500, 3791700, 0, 0, 0, 3412500, + 0x3cb }, + /* When number of spatial streams > 7 use below rates*/ + /* 352 MCS-00 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 234000, 260000, 0, 0, 0, 234000, + 0x3e0 }, + /* 353 MCS-01 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 468000, 520000, 0, 0, 0, 468000, + 0x3e1}, + /* 354 MCS-02 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 702000, 780000, 0, 0, 0, 702000, + 0x3e2}, + /* 355 MCS-03 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 936000, 1040000, 0, 0, 0, 936000, + 0x3e3}, + /* 356 MCS-04 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1404000, 1560000, 0, 0, 0, 1404000, + 0x3e4}, + /* 357 MCS-05 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 1872000, 2080000, 0, 0, 0, 1872000, + 0x3e5}, + /* 358 MCS-06 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2106000, 2340000, 0, 0, 0, 2106000, + 0x3e6}, + /* 359 MCS-07 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2340000, 2600000, 0, 0, 0, 2340000, + 0x3e7}, + /* 360 MCS-08 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 2808000, 3120000, 0, 0, 0, 2808000, + 0x3e8}, + /* 361 MCS-09 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 3120000, 3466700, 0, 0, 0, 3120000, + 0x3e9}, + /* 362 MCS-10 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 3510000, 3900000, 0, 0, 0, 3510000, + 0x3ea}, + /* 363 MCS-11 */ { VHT80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_80, + 3900000, 4333300, 0, 0, 0, 3900000, + 0x3eb}, + + /* 11ac VHT160 rates + */ + /* 364 MCS-00 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 58500, 65000, 0, 0, 0, 58500, + 0x300}, + /* 365 MCS-01 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 117000, 130000, 0, 0, 0, 117000, + 0x301}, + /* 366 MCS-02 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 175500, 195000, 0, 0, 0, 175500, + 0x302}, + /* 367 MCS-03 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 234000, 260000, 0, 0, 0, 234000, + 0x303}, + /* 368 MCS-04 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 351000, 390000, 0, 0, 0, 351000, + 0x304}, + /* 369 MCS-05 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 468000, 520000, 0, 0, 0, 468000, + 0x305}, + /* 370 MCS-06 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 526500, 585000, 0, 0, 0, 526500, + 0x306}, + /* 371 MCS-07 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 585000, 650000, 0, 0, 0, 585000, + 0x307}, + /* 372 MCS-08 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 702000, 780000, 0, 0, 0, 702000, + 0x308}, + /* 373 MCS-09 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 780000, 866700, 0, 0, 0, 780000, + 0x309}, + /* 374 MCS-10 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 877500, 975000, 0, 0, 0, 877500, + 0x30a }, + /* 375 MCS-11 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 975000, 1083300, 0, 0, 0, 975000, + 0x30b }, + /* If maximum number of spatial streams supported + * at 160MHZ > 1 use below rates + */ + /* 376 MCS-00 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 117000, 130000, 0, 0, 0, 117000, + 0x320}, + /* 377 MCS-01 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 234000, 260000, 0, 0, 0, 234000, + 0x321}, + /* 378 MCS-02 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 351000, 390000, 0, 0, 0, 351000, + 0x322}, + /* 379 MCS-03 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 468000, 520000, 0, 0, 0, 468000, + 0x323}, + /* 380 MCS-04 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 702000, 780000, 0, 0, 0, 702000, + 0x324}, + /* 381 MCS-05 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 936000, 1040000, 0, 0, 0, 936000, + 0x325}, + /* 382 MCS-06 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1053000, 1170000, 0, 0, 0, 1053000, + 0x326}, + /* 383 MCS-07 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1170000, 1300000, 0, 0, 0, 1170000, + 0x327}, + /* 384 MCS-08 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1404000, 1560000, 0, 0, 0, 1404000, + 0x328}, + /* 385 MCS-09 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1560000, 1733300, 0, 0, 0, 1560000, + 0x329}, + /* 386 MCS-10 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1755000, 1950000, 0, 0, 0, 1755000, + 0x32a}, + /* 387 MCS-11 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1950000, 2166700, 0, 0, 0, 1950000, + 0x32b}, + /* If maximum number of spatial streams supported + * at 160MHZ > 2 use below rates + */ + /* 388 MCS-00 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 175500, 195000, 0, 0, 0, 175500, + 0x340 }, + /* 389 MCS-01 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 351000, 390000, 0, 0, 0, 351000, + 0x341 }, + /* 390 MCS-02 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 526500, 585000, 0, 0, 0, 526500, + 0x342 }, + /* 391 MCS-03 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 702000, 780000, 0, 0, 0, 702000, + 0x343 }, + /* 392 MCS-04 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1053000, 1170000, 0, 0, 0, 1053000, + 0x344 }, + /* 393 MCS-05 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1404000, 1560000, 0, 0, 0, 1404000, + 0x345 }, + /* 394 MCS-06 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1579500, 1755000, 0, 0, 0, 1579500, + 0x346 }, + /* 395 MCS-07 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1755000, 1755000, 0, 0, 0, 1755000, + 0x347 }, + /* 396 MCS-08 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2106000, 2340000, 0, 0, 0, 2106000, + 0x348 }, + /* 397 MCS-09 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2340000, 2600000, 0, 0, 0, 2340000, + 0x349 }, + /* 398 MCS-10 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2632500, 2925000, 0, 0, 0, 2632500, + 0x34a }, + /* 399 MCS-11 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2925000, 3250000, 0, 0, 0, 2925000, + 0x34b }, + /* If maximum number of spatial streams supported + * at 160MHZ > 3 use below rates + */ + /* 400 MCS-00 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 234000, 260000, 0, 0, 0, 234000, + 0x360 }, + /* 401 MCS-01 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 468000, 520000, 0, 0, 0, 468000, + 0x361 }, + /* 402 MCS-02 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 702000, 780000, 0, 0, 0, 702000, + 0x362 }, + /* 403 MCS-03 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 936000, 1040000, 0, 0, 0, 936000, + 0x363 }, + /* 404 MCS-04 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1404000, 1560000, 0, 0, 0, 1404000, + 0x364 }, + /* 405 MCS-05 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 1872000, 2080000, 0, 0, 0, 1872000, + 0x365 }, + /* 406 MCS-06 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2106000, 2340000, 0, 0, 0, 2106000, + 0x366 }, + /* 407 MCS-07 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2340000, 2600000, 0, 0, 0, 2340000, + 0x367 }, + /* 408 MCS-08 */ { VHT160_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 2808000, 3120000, 0, 0, 0, 2808000, + 0x368 }, + /* 409 MCS-09 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 3120000, 3466700, 0, 0, 0, 3120000, + 0x369 }, + /* 410 MCS-10 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 3510000, 3900000, 0, 0, 0, 3510000, + 0x36a }, + /* 411 MCS-11 */ { VHT160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_VHT_160, + 3900000, 4333300, 0, 0, 0, 3900000, + 0x36b }, + + /* 11ax RU242 rates + */ + /* 412 MCS-00 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 8600, 8900, 8100, 7300, 4300, 8600, 0x400}, + /* 413 MCS-01 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 17200, 17700, 16300, 14600, 8600, 17200, + 0x401}, + /* 414 MCS-02 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 25800, 26600, 24400, 21900, 0, 25800, + 0x402}, + /* 415 MCS-03 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 34400, 35500, 32500, 29300, 17700, 34400, + 0x403}, + /* 416 MCS-04 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 51600, 53200, 48800, 43900, 25800, 51600, + 0x404}, + /* 417 MCS-05 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 68800, 70900, 65000, 58500, 0, 68800, + 0x405}, + /* 418 MCS-06 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 77400, 79800, 73100, 65800, 0, 77400, + 0x406}, + /* 419 MCS-07 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 86000, 88600, 81300, 73100, 0, 86000, + 0x407}, + /* 420 MCS-08 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 103200, 106400, 97500, 87800, 0, 103200, + 0x408}, + /* 421 MCS-09 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 114700, 118200, 108300, 97500, 0, 114700, + 0x409}, + /* 422 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 129000, 133000, 121900, 109700, 0, 129000, + 0x40a}, + /* 423 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 143400, 147700, 135400, 121900, 0, 143400, + 0x40b}, + /* When number spatial streams > 1 use below rates */ + /* 424 MCS-00 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 17200, 17700, 16300, 14600, 8600, 17200, + 0x420}, + /* 425 MCS-01 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 34400, 35500, 32500, 29300, 17700, 34400, + 0x421}, + /* 426 MCS-02 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 51600, 53200, 48800, 43900, 0, 51600, + 0x422}, + /* 427 MCS-03 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 68800, 70900, 65000, 58500, 34400, 68800, + 0x423}, + /* 428 MCS-04 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 103200, 106400, 97500, 87800, 51600, 103200, + 0x424}, + /* 429 MCS-05 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 137600, 141800, 130000, 117000, 0, 137600, + 0x425}, + /* 430 MCS-06 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 154900, 159500, 146300, 131600, 0, 154900, + 0x426}, + /* 431 MCS-07 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 172100, 177300, 162500, 146300, 0, 172100, + 0x427}, + /* 432 MCS-08 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 206500, 212700, 195000, 175500, 0, 206500, + 0x428}, + /* 433 MCS-09 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 229400, 236400, 216700, 195000, 0, 229400, + 0x429}, + /* 434 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 258100, 265900, 243800, 219400, 0, 258100, + 0x42a}, + /* 435 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 286800, 295500, 270800, 243800, 0, 286800, + 0x42b}, + + /* When number of spatial streams > 2 + * use below rates + */ + /* 436 MCS-00 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 25800, 26600, 24400, 21900, 12900, 25800, + 0x440}, + /* 437 MCS-01 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 51600, 53200, 48800, 43900, 25800, 51600, + 0x441}, + /* 438 MCS-02 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 77400, 79800, 73100, 65800, 0, 77400, + 0x442}, + /* 439 MCS-03 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 103200, 106400, 97500, 87800, 51600, 103200, + 0x443}, + /* 440 MCS-04 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 154900, 159500, 146300, 131600, 77400, 154900, + 0x444}, + /* 441 MCS-05 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 206500, 212700, 195000, 175500, 0, 206500, + 0x445}, + /* 442 MCS-06 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 232300, 239300, 219400, 197400, 0, 232300, + 0x446}, + /* 443 MCS-07 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 258100, 265900, 243800, 219400, 0, 258100, + 0x447}, + /* 444 MCS-08 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 309700, 319100, 292500, 263300, 0, 309700, + 0x448}, + /* 445 MCS-09 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 344100, 354500, 325000, 292500, 0, 344100, + 0x449}, + /* 446 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 387100, 398900, 365600, 329100, 0, 387100, + 0x44a}, + /* 447 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 430100, 443200, 406300, 365600, 0, 430100, + 0x44b}, + + /* When number of spatial streams > 3 + * use below rates + */ + /* 448 MCS-00 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 34400, 35500, 32500, 29300, 17700, 34400, + 0x460}, + /* 449 MCS-01 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 68800, 70900, 65000, 58500, 34400, 68800, + 0x461}, + /* 450 MCS-02 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 103200, 106400, 97500, 87800, 0, 103200, + 0x462}, + /* 451 MCS-03 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 137600, 141800, 130000, 117000, 68800, 137600, + 0x463}, + /* 452 MCS-04 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 206500, 212700, 195000, 175500, 103200, 206500, + 0x464}, + /* 453 MCS-05 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 275300, 283600, 260000, 234000, 0, 275300, + 0x465}, + /* 454 MCS-06 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 309700, 319100, 292500, 263300, 0, 309700, + 0x466}, + /* 455 MCS-07 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 344100, 354500, 325000, 292500, 0, 344100, + 0x467}, + /* 456 MCS-08 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 412900, 425500, 390000, 351000, 0, 412900, + 0x468}, + /* 457 MCS-09 */ { HE20_MODE_VALID_MASK, + DP_CMN_MOD_IEEE80211_T_HE_20, + 455800, 472700, 433300, 390000, 0, 455800, + 0x469}, + /* 458 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 516200, 531800, 487500, 438800, 0, 516200, + 0x46a}, + /* 459 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 573500, 590900, 541700, 487500, 0, 573500, + 0x46b}, + + /* When number of spatial streams > 4 + * use below rates + */ + /* 460 MCS-00 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 43000, 43300, 40600, 36600, 21500, 43000, + 0x480}, + /* 461 MCS-01 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 86000, 88600, 81300, 73100, 43000, 86000, + 0x481}, + /* 462 MCS-02 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 129000, 133000, 121900, 109700, 0, 129000, + 0x482}, + /* 463 MCS-03 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 172100, 177300, 162500, 146300, 86000, 172100, + 0x483}, + /* 464 MCS-04 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 258100, 265900, 243800, 219400, 129000, 258100, + 0x484}, + /* 465 MCS-05 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 344100, 354500, 325000, 292500, 0, 344100, + 0x485}, + /* 466 MCS-06 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 387100, 398900, 365600, 329100, 0, 387100, + 0x486}, + /* 467 MCS-07 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 430100, 443200, 406300, 365600, 0, 430100, + 0x487}, + /* 468 MCS-08 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 516200, 531800, 487500, 438800, 0, 516200, + 0x488}, + /* 469 MCS-09 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 573500, 590900, 541700, 487500, 0, 573500, + 0x489}, + /* 470 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 645200, 664800, 609400, 548400, 0, 645200, + 0x48a}, + /* 471 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 716900, 738600, 677100, 609400, 0, 716900, + 0x48b}, + + /* When number of spatial streams > 5 + * use below rates + */ + /* 472 MCS-00 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 51600, 53200, 48800, 43900, 25800, 51600, + 0x4a0}, + /* 473 MCS-01 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 103200, 106400, 97500, 87800, 51600, 103200, + 0x4a1}, + /* 474 MCS-02 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 154900, 159500, 146300, 131600, 0, 154900, + 0x4a2}, + /* 475 MCS-03 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 206500, 212700, 195000, 175500, 103200, 206500, + 0x4a3}, + /* 476 MCS-04 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 309700, 319100, 292500, 263300, 154900, 309700, + 0x4a4}, + /* 477 MCS-05 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 412900, 425500, 390000, 351000, 0, 412900, + 0x4a5}, + /* 478 MCS-06 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 464600, 478600, 438000, 394900, 0, 464600, + 0x4a6}, + /* 479 MCS-07 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 516200, 531800, 487500, 438800, 0, 516200, + 0x4a7}, + /* 480 MCS-08 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 619400, 638200, 585000, 526500, 0, 619400, + 0x4a8}, + /* 481 MCS-09 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 688200, 709100, 650000, 585000, 0, 688200, + 0x4a9}, + /* 482 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 774300, 797700, 731300, 658100, 0, 774300, + 0x4aa}, + /* 483 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 860300, 886400, 812500, 731300, 0, 860300, + 0x4ab}, + + /* When number of spatial streams > 6 + * use below rates + */ + /* 484 MCS-00 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 60200, 62000, 56900, 51200, 30100, 60200, + 0x4c0}, + /* 485 MCS-01 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 120400, 124100, 113800, 102400, 60200, 120400, + 0x4c1}, + /* 486 MCS-02 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 180700, 186100, 170600, 153600, 0, 180700, + 0x4c2}, + /* 487 MCS-03 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 240900, 248200, 227500, 204800, 120400, 240900, + 0x4c3}, + /* 488 MCS-04 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 361300, 372300, 341300, 307100, 180700, 361300, + 0x4c4}, + /* 489 MCS-05 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 481800, 496400, 455000, 409500, 0, 481800, + 0x4c5}, + /* 490 MCS-06 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 542000, 558400, 511900, 460700, 0, 542000, + 0x4c6}, + /* 491 MCS-07 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 602200, 620500, 568800, 511900, 0, 602200, + 0x4c7}, + /* 492 MCS-08 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 722600, 744500, 682500, 614300, 0, 722600, + 0x4c8}, + /* 493 MCS-09 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 802900, 827300, 758300, 682500, 0, 802900, + 0x4c9}, + /* 494 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 903300, 930700, 853100, 767800, 0, 903300, + 0x4ca}, + /* 495 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 1003700, 1034100, 947900, 853100, 0, 1003700, + 0x4cb}, + + /* When number of spatial streams > 7 + * use below rates + */ + /* 496 MCS-00 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 68800, 70900, 65000, 58500, 34400, 68800, + 0x4e0}, + /* 497 MCS-01 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 137600, 141800, 130000, 117000, 68800, 137600, + 0x4e1}, + /* 498 MCS-02 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 206500, 212700, 195000, 175500, 0, 206500, + 0x4e2}, + /* 499 MCS-03 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 275300, 283600, 260000, 234000, 137600, 275300, + 0x4e3}, + /* 500 MCS-04 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 412900, 425500, 390000, 351000, 206500, 412900, + 0x4e4}, + /* 501 MCS-05 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 550600, 567300, 520000, 468000, 0, 550600, + 0x4e5}, + /* 502 MCS-06 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 619400, 638200, 585000, 526500, 0, 619400, + 0x4e6}, + /* 503 MCS-07 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 688200, 709100, 650000, 585000, 0, 688200, + 0x4e7}, + /* 504 MCS-08 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 825900, 850900, 780000, 702000, 0, 825900, + 0x4e8}, + /* 505 MCS-09 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 917600, 945500, 866700, 780000, 0, 917600, + 0x4e9}, + /* 506 MCS-10 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 1032400, 1063600, 975000, 877500, 0, 1032400, + 0x4ea}, + /* 507 MCS-11 */ { HE20_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_20, + 1147100, 1181800, 1083300, 975000, 0, 1147100, + 0x4eb}, + + /* 11ax RU484 rates + */ + /* 508 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 17200, 17700, 16300, 14600, 8600, 17200, 0x400 + }, + /* 509 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 34400, 35500, 32500, 29300, 17700, 34400, 0x401 + }, + /* 510 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 51600, 53200, 48800, 43900, 25800, 51600, 0x402 + }, + /* 511 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 68800, 70900, 65000, 58500, 0, 68800, 0x403 + }, + /* 512 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 103200, 106400, 97500, 87800, 0, 103200, + 0x404 }, + /* 513 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 137600, 141800, 130000, 117000, 0, 137600, + 0x405 }, + /* 514 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 154900, 159500, 146300, 131600, 0, 154900, + 0x406 }, + /* 515 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 172100, 177300, 162500, 146300, 0, 172100, + 0x407 }, + /* 516 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 206500, 212700, 195000, 175500, 0, 206500, + 0x408 }, + /* 517 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 229400, 236400, 216700, 195000, 0, 229400, + 0x409 }, + /* 518 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 258100, 265900, 243800, 219400, 0, 258100, + 0x40a }, + /* 519 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 286800, 295500, 270800, 243800, 0, 286800, + 0x40b }, + /* When number of spatial streams > 1 + * use below rates + */ + /* 520 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 34400, 35500, 32500, 29300, 17700, 34400, 0x420 + }, + /* 521 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 68800, 70900, 65000, 58500, 34400, 68800, 0x421 + }, + /* 522 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 103200, 106400, 97500, 87800, 0, 103200, + 0x422 }, + /* 523 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 137600, 141800, 130000, 117000, 68800, 137600, + 0x423 }, + /* 524 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 206500, 212700, 195000, 175500, 103200, 206500, + 0x424 }, + /* 525 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 275300, 283600, 260000, 234000, 0, 275300, + 0x425 }, + /* 526 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 309700, 319100, 292500, 263300, 0, 309700, + 0x426 }, + /* 527 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 344100, 354500, 325000, 292500, 0, 344100, + 0x427 }, + /* 528 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 412900, 425500, 390000, 351000, 0, 412900, + 0x428 }, + /* 529 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 455800, 472700, 433300, 390000, 0, 455800, + 0x429 }, + /* 530 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 516200, 531800, 487500, 438800, 0, 516200, + 0x42a }, + /* 531 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 573500, 590900, 541700, 487500, 0, 573500, + 0x42b }, + + /* When number of spatial streams > 2 + * use below rates + */ + /* 532 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 51600, 53200, 48800, 43900, 25800, 51600, 0x440 + }, + /* 533 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 103200, 106400, 97500, 87800, 51600, 103200, + 0x441 }, + /* 534 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 154900, 159500, 146300, 131600, 0, 154900, + 0x442 }, + /* 535 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 206500, 212700, 195000, 175500, 103200, 206500, + 0x443 }, + /* 536 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 309700, 319100, 292500, 263300, 154900, 309700, + 0x444 }, + /* 537 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 412900, 425500, 390000, 351000, 0, 412900, + 0x445 }, + /* 538 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 464600, 478600, 438000, 394900, 0, 464600, + 0x446 }, + /* 539 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 516200, 531800, 487500, 438800, 0, 516200, + 0x447 }, + /* 540 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 619400, 638200, 585000, 526500, 0, 619400, + 0x448 }, + /* 541 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 688200, 709100, 650000, 585000, 0, 688200, + 0x449 }, + /* 542 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 774300, 797700, 731300, 658100, 0, 774300, + 0x44a }, + /* 543 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 860300, 886400, 812500, 731300, 0, 860300, + 0x44b }, + + /* When number of spatial streams > 3 + * use below rates + */ + /* 544 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 68800, 70900, 65000, 58500, 34400, 68800, 0x460 + }, + /* 545 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 137600, 141800, 130000, 117000, 68800, 137600, + 0x461 }, + /* 546 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 206500, 212700, 195000, 175500, 0, 206500, + 0x462 }, + /* 547 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 275300, 283600, 260000, 234000, 137600, 275300, + 0x463 }, + /* 548 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 412900, 425500, 390000, 351000, 206500, 412900, + 0x464 }, + /* 549 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 550600, 567300, 520000, 468000, 0, 550600, + 0x465 }, + /* 550 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 619400, 638200, 585000, 526500, 0, 619400, + 0x466 }, + /* 551 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 688200, 709100, 650000, 585000, 0, 688200, + 0x467 }, + /* 552 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 825900, 850900, 780000, 702000, 0, 825900, + 0x468 }, + /* 553 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 917600, 945500, 866700, 780000, 0, 917600, + 0x469 }, + /* 554 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1032400, 1063600, 975000, 877500, 0, 1032400, + 0x46a }, + /* 555 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1147100, 1181800, 1083300, 975000, 0, 1147100, + 0x46b }, + + /* When number of spatial streams > 4 + * use below rates + */ + /* 556 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 86000, 88600, 81300, 73100, 43000, 86000, 0x480 + }, + /* 557 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 172100, 177300, 162500, 146300, 86000, 172100, + 0x481 }, + /* 558 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 258100, 265900, 243800, 219400, 0, 258100, + 0x482 }, + /* 559 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 344100, 354500, 325000, 292500, 172100, 344100, + 0x483 }, + /* 560 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 516200, 531800, 487500, 438800, 258100, 516200, + 0x484 }, + /* 561 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 688200, 709100, 650000, 585000, 0, 688200, + 0x485 }, + /* 562 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 774300, 797700, 731300, 658100, 0, 774300, + 0x486 }, + /* 563 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 860300, 886400, 812500, 731300, 0, 860300, + 0x487 }, + /* 564 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1032400, 1063600, 975000, 877500, 0, 1032400, + 0x488 }, + /* 565 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1147100, 1181800, 1083300, 975000, 0, 1147100, + 0x489 }, + /* 566 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1290400, 1329500, 1218800, 1096900, 0, 1290400, + 0x48a }, + /* 567 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1433800, 1477300, 1354200, 1218800, 0, 1433800, + 0x48b }, + + /* When number of spatial streams > 5 + * use below rates + */ + /* 568 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 103200, 106400, 97500, 87800, 51600, 103200, + 0x4a0 }, + /* 569 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 206500, 212700, 195000, 175500, 103200, 206500, + 0x4a1 }, + /* 570 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 309700, 319100, 292500, 263300, 0, 309700, + 0x4a2 }, + /* 571 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 412900, 425500, 390000, 351000, 206500, 412900, + 0x4a3 }, + /* 572 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 619400, 638200, 585000, 526500, 309700, 619400, + 0x4a4 }, + /* 573 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 825900, 850900, 780000, 702000, 0, 825900, + 0x4a5 }, + /* 574 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 929100, 957300, 877500, 789800, 0, 929100, + 0x4a6 }, + /* 575 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1032400, 1063600, 975000, 877500, 0, 1032400, + 0x4a7 }, + /* 576 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1238800, 1276400, 1170000, 1053000, 0, 1238800, + 0x4a8 }, + /* 577 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1376500, 1418200, 1300000, 1170000, 0, 1376500, + 0x4a9 }, + /* 578 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1548500, 1595500, 1462500, 1316300, 0, 1548500, + 0x4aa }, + /* 579 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1720600, 1772700, 1625000, 1462500, 0, 1720600, + 0x4ab }, + /* When number spatial streams > 6 + * use below rates + */ + /* 580 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 120400, 124100, 113800, 102400, 60200, 120400, + 0x4c0 }, + /* 581 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 240900, 248200, 227500, 204800, 120400, 240900, + 0x4c1 }, + /* 582 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 361300, 372300, 341300, 307100, 180600, 361300, + 0x4c2 }, + /* 583 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 481800, 496400, 455000, 409500, 0, 481800, + 0x4c3 }, + /* 584 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 722600, 744500, 682500, 614300, 0, 722600, + 0x4c4 }, + /* 585 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 963500, 992700, 910000, 819000, 0, 963500, + 0x4c5 }, + /* 586 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1084000, 1116800, 1023800, 921400, 0, 1084000, + 0x4c6 }, + /* 587 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1204400, 1240900, 1137500, 1023800, 0, 1204400, + 0x4c7 }, + /* 588 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1445300, 1489100, 1365000, 1228500, 0, 1445300, + 0x4c8 }, + /* 589 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1605900, 1654500, 1516700, 1365000, 0, 1605900, + 0x4c9 }, + /* 590 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1806600, 1861400, 1706300, 1535600, 0, 1806600, + 0x4ca }, + /* 591 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 2007400, 2068200, 1895800, 1706300, 0, 2007400, + 0x4cb }, + + /* When number of spatial streams > 7 + * use below rates + */ + /* 592 MCS-00 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 137600, 141800, 130000, 117000, 68800, 137600, + 0x4e0 }, + /* 593 MCS-01 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 275300, 283600, 260000, 234000, 137600, 275300, + 0x4e1 }, + /* 594 MCS-02 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 412900, 425500, 390000, 351000, 206500, 412900, + 0x4e2 }, + /* 595 MCS-03 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 550600, 567300, 520000, 468000, 0, 550600, + 0x4e3 }, + /* 596 MCS-04 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 825900, 850900, 780000, 702000, 0, 825900, + 0x4e4 }, + /* 597 MCS-05 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1101200, 1134500, 1040000, 936000, 0, 1101200, + 0x4e5 }, + /* 598 MCS-06 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1238800, 1276400, 1170000, 1053000, 0, 1238800, + 0x4e6 }, + /* 599 MCS-07 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1376500, 1418200, 1300000, 1170000, 0, 1376500, + 0x4e7 }, + /* 600 MCS-08 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1651800, 1701800, 1560000, 1404000, 0, 1651800, + 0x4e8 }, + /* 601 MCS-09 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 1835300, 1890900, 1733300, 1560000, 0, 1835300, + 0x4e9 }, + /* 602 MCS-10 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 2064700, 2127300, 1950000, 1755000, 0, 2064700, + 0x4ea }, + /* 603 MCS-11 */ { HE40_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_40, + 2294100, 2363600, 2166700, 1950000, 0, 2294100, + 0x4eb }, + + /* 11ax RU996 rates + */ + /* 604 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 36000, 37100, 34000, 30600, 18000, 36000, 0x400 + }, + /* 605 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 72100, 74200, 68100, 61300, 36000, 72100, 0x401 + }, + /* 606 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 108100, 111400, 102100, 91900, 0, 108100, + 0x402 }, + /* 607 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 144100, 148500, 136100, 122500, 72100, 144100, + 0x403 }, + /* 608 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 216200, 222700, 204200, 183800, 108100, 216200, + 0x404 }, + /* 609 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 288200, 297000, 272200, 245000, 0, 288200, + 0x405 }, + /* 610 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 324300, 334100, 306300, 275600, 0, 324300, + 0x406 }, + /* 611 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 360300, 371200, 340300, 306300, 0, 360300, + 0x407 }, + /* 612 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 432400, 445500, 408300, 367500, 0, 432400, + 0x408 }, + /* 613 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 480400, 494900, 453700, 408300, 0, 480400, + 0x409 }, + /* 614 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 540400, 556800, 510400, 459400, 0, 540400, + 0x40a }, + /* 615 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 600500, 618700, 567100, 510400, 0, 600500, + 0x40b }, + /* When number spatial streams > 1 + * use below rates + */ + /* 616 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 72100, 74200, 68100, 61300, 36000, 72100, 0x420 + }, + /* 617 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 144100, 148500, 136100, 122500, 72100, 144100, + 0x421 }, + /* 618 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 216200, 222700, 204200, 183800, 0, 216200, + 0x422 }, + /* 619 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 288200, 297000, 272200, 245000, 144100, 288200, + 0x423 }, + /* 620 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 432400, 445500, 408300, 367500, 216200, 432400, + 0x424 }, + /* 621 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 576500, 593900, 544400, 490000, 0, 576500, + 0x425 }, + /* 622 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 648500, 668200, 612500, 551300, 0, 648500, + 0x426 }, + /* 623 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 720600, 742400, 680600, 612500, 0, 720600, + 0x427 }, + /* 624 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 864700, 890900, 816700, 735000, 0, 864700, + 0x428 }, + /* 625 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 960800, 989900, 907400, 816700, 0, 960800, + 0x429 }, + /* 626 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1080900, 1113600, 1020800, 918800, 0, 1080900, + 0x42a }, + /* 627 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1201000, 1237400, 1134300, 1020800, 0, 1201000, + 0x42b }, + + /* When number of spatial streams > 2 + * use below rates + */ + /* 628 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 108100, 111400, 102100, 91900, 54000, 108100, + 0x440 }, + /* 629 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 216200, 222700, 204200, 183800, 108100, 216200, + 0x441 }, + /* 630 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 324300, 334100, 306300, 275600, 0, 324300, + 0x442 }, + /* 631 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 432400, 445500, 408300, 367500, 0, 432400, + 0x443 }, + /* 632 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 648500, 668200, 612500, 551300, 0, 648500, + 0x444 }, + /* 633 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 864700, 890900, 816700, 735000, 0, 864700, + 0x445 }, + /* 634 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 972800, 1002300, 918800, 826900, 0, 972800, + 0x446 }, + /* 635 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1080900, 1113600, 1020800, 918800, 0, 1080900, + 0x447 }, + /* 636 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1297100, 1336400, 1225000, 1102500, 0, 1297100, + 0x448 }, + /* 637 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1441200, 1484800, 1361100, 1225000, 0, 1441200, + 0x449 }, + /* 638 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1621300, 1670500, 1531300, 1378100, 0, 1621300, + 0x44a }, + /* 639 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1801500, 1856100, 1701400, 1531300, 0, 1801500, + 0x44b }, + + /* When number of spatial streams > 3 + * use below rates + */ + /* 640 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 144100, 148500, 136100, 122500, 72100, 144100, + 0x460 }, + /* 641 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 288200, 297000, 272200, 245000, 144100, 288200, + 0x461 }, + /* 642 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 432400, 445500, 408300, 367500, 0, 432400, + 0x462 }, + /* 643 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 576500, 593900, 544400, 490000, 288200, 576500, + 0x463 }, + /* 644 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 864700, 890900, 816700, 735000, 432400, 864700, + 0x464 }, + /* 645 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1152900, 1187900, 1088900, 980000, 0, 1152900, + 0x465 }, + /* 646 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1297100, 1336400, 1225000, 1102500, 0, 1297100, + 0x466 }, + /* 647 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1441200, 1484800, 1361100, 1225000, 0, 1441200, + 0x467 }, + /* 648 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1729400, 1781800, 1633300, 1470000, 0, 1729400, + 0x468 }, + /* 649 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1921600, 1979800, 1814800, 1633300, 0, 1921600, + 0x469 }, + /* 650 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2161800, 2227300, 2041700, 1837500, 0, 2161800, + 0x46a }, + /* 651 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2402000, 2474700, 2268500, 2041700, 0, 2402000, + 0x46b }, + + /* When number spatial streams > 4 + * use below rates + */ + /* 652 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 180100, 185600, 170100, 153100, 90100, 180100, + 0x480 }, + /* 653 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 360300, 371200, 340300, 306300, 180100, 360300, + 0x481 }, + /* 654 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 540400, 556800, 510400, 459400, 0, 540400, + 0x482 }, + /* 655 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 720600, 742400, 680600, 612500, 0, 720600, + 0x483 }, + /* 656 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1080900, 1113600, 1020800, 918800, 0, 1080900, + 0x484 }, + /* 657 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1441200, 1484800, 1361100, 1225000, 0, 1441200, + 0x485 }, + /* 658 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1621300, 1670500, 1531300, 1378100, 0, 1621300, + 0x486 }, + /* 659 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1801500, 1856100, 1701400, 1531300, 0, 1801500, + 0x487 }, + /* 660 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2161800, 2227300, 2041700, 1837500, 0, 2161800, + 0x488 }, + /* 661 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2402000, 2474700, 2268500, 2041700, 0, 2402000, + 0x489 }, + /* 662 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2702200, 2784100, 2552100, 2296900, 0, 2702200, + 0x48a }, + /* 663 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3002500, 3093400, 2835600, 2552100, 0, 3002500, + 0x48b }, + + /* When number of spatial streams > 5 + * use below rates + */ + /* 664 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 216200, 222700, 204200, 183800, 108100, 216200, + 0x4a0 }, + /* 665 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 432400, 445500, 408300, 367500, 216200, 432400, + 0x4a1 }, + /* 666 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 648500, 668200, 612500, 551300, 0, 648500, + 0x4a2 }, + /* 667 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 864700, 890900, 816700, 735000, 432400, 864700, + 0x4a3 }, + /* 668 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1297100, 1336400, 1225000, 1102500, 648500, 1297100, + 0x4a4 }, + /* 669 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1729400, 1781800, 1633300, 1470000, 0, 1729400, + 0x4a5 }, + /* 670 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1945600, 2004500, 1837500, 1653800, 0, 1945600, + 0x4a6 }, + /* 671 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2161800, 2227300, 2041700, 1837500, 0, 2161800, + 0x4a7 }, + /* 672 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2594100, 2672700, 2450000, 2205000, 0, 2594100, + 0x4a8 }, + /* 673 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2882400, 2969700, 2722200, 2450000, 0, 2882400, + 0x4a9 }, + /* 674 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3242600, 3340900, 3062500, 2756300, 0, 3242600, + 0x4aa }, + /* 675 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3602900, 3712100, 3402800, 3062500, 0, 3602900, + 0x4ab }, + + /* When number of spatial streams > 6 + * use below rates + */ + /* 676 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 252200, 259800, 238200, 214400, 129900, 252200, + 0x4c0 }, + /* 677 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 504400, 519700, 476400, 428800, 252200, 504400, + 0x4c1 }, + /* 678 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 756600, 779500, 714600, 643100, 0, 756600, + 0x4c2 }, + /* 679 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1008800, 1039400, 952800, 857500, 504400, 1008800, + 0x4c3 }, + /* 680 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1513200, 1559100, 1429200, 1286300, 756600, 1513200, + 0x4c4 }, + /* 681 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2017600, 2078800, 1905600, 1715000, 0, 2017600, + 0x4c5 }, + /* 682 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2269900, 2338600, 2143800, 1929400, 0, 2269900, + 0x4c6 }, + /* 683 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2522100, 2598500, 2381900, 2143800, 0, 2522100, + 0x4c7 }, + /* 684 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3026500, 3118200, 2858300, 2572500, 0, 3026500, + 0x4c8 }, + /* 685 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3362700, 3464600, 3175900, 2858300, 0, 3362700, + 0x4c9 }, + /* 686 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3783100, 3897700, 3572900, 3215600, 0, 3783100, + 0x4ca }, + /* 687 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 4203400, 4330800, 3969900, 3572900, 0, 4203400, + 0x4cb }, + + /* When number of spatial streams > 7 + * use below rates + */ + /* 688 MCS-00 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 288200, 297000, 272200, 245000, 144100, 288200, + 0x4e0 }, + /* 689 MCS-01 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 576500, 593900, 544400, 490000, 288200, 576500, + 0x4e1 }, + /* 690 MCS-02 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 864700, 890900, 816700, 735000, 0, 864700, + 0x4e2 }, + /* 691 MCS-03 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1152900, 1187900, 1088900, 980000, 576500, 1152900, + 0x4e3 }, + /* 692 MCS-04 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 1729400, 1781800, 1633300, 1470000, 864700, 1729400, + 0x4e4 }, + /* 693 MCS-05 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2305900, 2375800, 2177800, 1960000, 0, 2305900, + 0x4e5 }, + /* 694 MCS-06 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2594100, 2672700, 2450000, 2205000, 0, 2594100, + 0x4e6 }, + /* 695 MCS-07 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 2882400, 2969700, 2722200, 2450000, 0, 2882400, + 0x4e7 }, + /* 696 MCS-08 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3458800, 3563600, 3266700, 2940000, 0, 3458800, + 0x4e8 }, + /* 697 MCS-09 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 3843100, 3959600, 3629600, 3266700, 0, 3843100, + 0x4e9 }, + /* 698 MCS-10 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 4323500, 4454500, 4083300, 3675000, 0, 4323500, + 0x4ea }, + /* 699 MCS-11 */ { HE80_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_80, + 4803900, 4949500, 4537000, 4083300, 0, 4803900, + 0x4eb }, + + /* 11ax RU996x2 rates + */ + /* 700 MCS-00 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 72100, 74200, 68100, 61300, 36000, 72100, + 0x400}, + /* 701 MCS-01 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 144100, 148500, 136100, 122500, 72100, 144100, + 0x401}, + /* 702 MCS-02 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 216200, 222700, 204200, 183800, 0, 216200, + 0x402}, + /* 703 MCS-03 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 288200, 297000, 272200, 245000, 144100, 288200, + 0x403}, + /* 704 MCS-04 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 432400, 445500, 408300, 367500, 216200, 432400, + 0x404}, + /* 705 MCS-05 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 576500, 593900, 544400, 490000, 0, 576500, + 0x405}, + /* 706 MCS-06 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 648500, 668200, 612500, 551300, 0, 648500, + 0x406}, + /* 707 MCS-07 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 720600, 742400, 680600, 612500, 0, 720600, + 0x407}, + /* 708 MCS-08 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 864700, 890900, 816700, 735000, 0, 864700, + 0x408}, + /* 709 MCS-09 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 960800, 989900, 907400, 816700, 0, 960800, + 0x409}, + /* 710 MCS-10 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1080900, 1113600, 1020800, 918800, 0, 1080900, + 0x40a}, + /* 711 MCS-11 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1201000, 1237400, 1134300, 1020800, 0, 1201000, + 0x40b}, + /* When maximum spatial streams supported at 160MHZ > 1 + * use below rates + */ + /* 712 MCS-00 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 144100, 148500, 136100, 122500, 72100, 144100, + 0x420}, + /* 713 MCS-01 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 288200, 297000, 272200, 245000, 144100, 288200, + 0x421}, + /* 714 MCS-02 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 432400, 445500, 408300, 367500, 0, 432400, + 0x422}, + /* 715 MCS-03 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 576500, 593900, 544400, 490000, 288200, 576500, + 0x423}, + /* 716 MCS-04 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 864700, 890900, 816700, 735000, 432400, 864700, + 0x424}, + /* 717 MCS-05 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1152900, 1187900, 1088900, 980000, 0, 1152900, + 0x425}, + /* 718 MCS-06 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1297100, 1336400, 1225000, 1102500, 0, 1297100, + 0x426}, + /* 719 MCS-07 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1441200, 1484800, 1361100, 1225000, 0, 1441200, + 0x427}, + /* 720 MCS-08 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1729400, 1781800, 1633300, 1470000, 0, 1729400, + 0x428}, + /* 721 MCS-09 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1921600, 1979800, 1814800, 1633300, 0, 1921600, + 0x429}, + /* 722 MCS-10 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2161800, 2227300, 2041700, 1837500, 0, 2161800, + 0x42a}, + /* 723 MCS-11 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2402000, 2474700, 2268500, 2041700, 0, 2402000, + 0x42b}, + + /* When maximum spatial streams supported at 160MHZ > 2 + * use below rates + */ + /* 724 MCS-00 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 216200, 222700, 204200, 183800, 108100, 216200, + 0x440}, + /* 725 MCS-01 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 432400, 445500, 408300, 367500, 216200, 432400, + 0x441}, + /* 726 MCS-02 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 648500, 668200, 612500, 551300, 0, 648500, + 0x442}, + /* 727 MCS-03 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 864700, 890900, 816700, 735000, 432400, 864700, + 0x443}, + /* 728 MCS-04 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1297100, 1336400, 1225000, 1102500, 648500, 1297100, + 0x444}, + /* 729 MCS-05 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1729400, 1781800, 1633300, 1470000, 0, 1729400, + 0x445}, + /* 730 MCS-06 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1945600, 2004500, 1837500, 1653800, 0, 1945600, + 0x446}, + /* 731 MCS-07 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2161800, 2227300, 2041700, 1837500, 0, 2161800, + 0x447}, + /* 732 MCS-08 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2594100, 2672700, 2450000, 2205000, 0, 2594100, + 0x448}, + /* 733 MCS-09 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2882400, 2969700, 2722200, 2450000, 0, 2882400, + 0x449}, + /* 734 MCS-10 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 3242600, 3340900, 3062500, 2756300, 0, 3242600, + 0x44a}, + /* 735 MCS-11 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 3602900, 3712100, 3402800, 3062500, 0, 3602900, + 0x44b}, + + /* When maximum spatial streams supported at 160MHZ > 3 + * use below rates + */ + /* 736 MCS-00 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 288200, 297000, 272200, 245000, 144100, 288200, + 0x460}, + /* 737 MCS-01 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 576500, 593900, 544400, 490000, 288200, 576500, + 0x461}, + /* 738 MCS-02 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 864700, 890900, 816700, 735000, 0, 864700, + 0x462}, + /* 739 MCS-03 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1152900, 1187900, 1088900, 980000, 576500, 1152900, + 0x463}, + /* 740 MCS-04 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 1729400, 1781800, 1633300, 1470000, 864700, 1729400, + 0x464}, + /* 741 MCS-05 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2305900, 2375800, 2177800, 1960000, 0, 2305900, + 0x465}, + /* 742 MCS-06 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2594100, 2672700, 2450000, 2205000, 0, 2594100, + 0x466}, + /* 743 MCS-07 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 2882400, 2969700, 2722200, 2450000, 0, 2882400, + 0x467}, + /* 744 MCS-08 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 3458800, 3563600, 3266700, 2940000, 0, 3458800, + 0x468}, + /* 745 MCS-09 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 3843100, 3959600, 3629600, 3266700, 0, 3843100, + 0x469}, + /* 746 MCS-10 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 4323500, 4454500, 4083300, 3675000, 0, 4323500, + 0x46a}, + /* 747 MCS-11 */ { HE160_LDPC_ONLY_MASKS, + DP_CMN_MOD_IEEE80211_T_HE_160, + 4803900, 4949500, 4537000, 4083300, 0, 4803900, + 0x46b} + }, +}; + +static const uint16_t _rc_idx[DP_CMN_MOD_IEEE80211_T_MAX_PHY] = { + CCK_RATE_TABLE_INDEX, + OFDM_RATE_TABLE_INDEX, + HT_20_RATE_TABLE_INDEX, + HT_40_RATE_TABLE_INDEX, + VHT_20_RATE_TABLE_INDEX, + VHT_40_RATE_TABLE_INDEX, + VHT_80_RATE_TABLE_INDEX, + VHT_160_RATE_TABLE_INDEX, + HE_20_RATE_TABLE_INDEX, + HE_40_RATE_TABLE_INDEX, + HE_80_RATE_TABLE_INDEX, + HE_160_RATE_TABLE_INDEX, +}; + +/* + * dp_getmodulation - return rate modulation given code spatial width + * @pream_type - preamble type + * @width - bandwidth + * + * return - modulation type + */ +enum DP_CMN_MODULATION_TYPE dp_getmodulation( + uint16_t pream_type, + uint8_t width) +{ + static const enum DP_CMN_MODULATION_TYPE _vht_bw_mod[] = { + DP_CMN_MOD_IEEE80211_T_VHT_20, + DP_CMN_MOD_IEEE80211_T_VHT_40, + DP_CMN_MOD_IEEE80211_T_VHT_80, + DP_CMN_MOD_IEEE80211_T_VHT_160 + }; + + static const enum DP_CMN_MODULATION_TYPE _he_bw_mod[] = { + DP_CMN_MOD_IEEE80211_T_HE_20, + DP_CMN_MOD_IEEE80211_T_HE_40, + DP_CMN_MOD_IEEE80211_T_HE_80, + DP_CMN_MOD_IEEE80211_T_HE_160 + }; + + enum DP_CMN_MODULATION_TYPE modulation; + + CMN_DP_ASSERT(width < CMN_BW_CNT); + + switch (pream_type) { + case DP_CMN_RATECODE_PREAM_HT: + if (width) + modulation = DP_CMN_MOD_IEEE80211_T_HT_40; + else + modulation = DP_CMN_MOD_IEEE80211_T_HT_20; + break; + + case DP_CMN_RATECODE_PREAM_CCK: + modulation = DP_CMN_MOD_IEEE80211_T_CCK; + break; + + case DP_CMN_RATECODE_PREAM_VHT: + modulation = _vht_bw_mod[width]; + break; + + case DP_CMN_RATECODE_PREAM_HE: + modulation = _he_bw_mod[width]; + break; + + default: + modulation = DP_CMN_MOD_IEEE80211_T_OFDM; + break; + } + + return modulation; +} + +/* dp_getrateindex - calculate ratekbps + * @mcs - MCS index + * @nss - NSS 1...8 + * preamble - preamble + * @bw - Transmission Bandwidth + * @rix: rate index to be populated + * @ratecode: ratecode + * + * return - rate in kbps + */ +uint32_t +dp_getrateindex(uint32_t gi, uint16_t mcs, uint8_t nss, uint8_t preamble, + uint8_t bw, uint32_t *rix, uint16_t *ratecode) +{ + uint32_t ratekbps = 0, res = RT_INVALID_INDEX; /* represents failure */ + uint16_t rc; + enum DP_CMN_MODULATION_TYPE mod; + + /* For error case, where idx exceeds bountry limit */ + *ratecode = 0; + mod = dp_getmodulation(preamble, bw); + rc = mcs; + + /* get the base of corresponding rate table entry */ + res = _rc_idx[mod]; + + switch (preamble) { + case DP_CMN_RATECODE_PREAM_HE: + res += rc + nss * NUM_HE_MCS; + break; + + case DP_CMN_RATECODE_PREAM_VHT: + res += rc + nss * NUM_VHT_MCS; + break; + + case DP_CMN_RATECODE_PREAM_HT: + res += rc + nss * NUM_HT_MCS; + break; + + case DP_CMN_RATECODE_PREAM_CCK: + rc &= ~HW_RATECODE_CCK_SHORT_PREAM_MASK; + res += rc; + break; + + case DP_CMN_RATECODE_PREAM_OFDM: + res += rc; + break; + + default: + break; + } + if (res >= DP_RATE_TABLE_SIZE) + goto done; + + if (!gi) { + ratekbps = dp_11abgnratetable.info[res].userratekbps; + } else { + switch (gi) { + case CDP_SGI_0_4_US: + ratekbps = dp_11abgnratetable.info[res].ratekbpssgi; + break; + case CDP_SGI_1_6_US: + ratekbps = dp_11abgnratetable.info[res].ratekbpsdgi; + break; + case CDP_SGI_3_2_US: + ratekbps = dp_11abgnratetable.info[res].ratekbpsqgi; + break; + } + } + *ratecode = dp_11abgnratetable.info[res].ratecode; +done: + *rix = res; + + return ratekbps; +} + +qdf_export_symbol(dp_getrateindex); + +/* dp_rate_idx_to_kbps - get rate kbps from index + * @rate_idx - rate index + * @gintval - guard interval + * + * return - rate index in kbps with help of ratetable + */ +int dp_rate_idx_to_kbps(uint8_t rate_idx, uint8_t gintval) +{ + if (rate_idx >= DP_RATE_TABLE_SIZE) + return 0; + + if (!gintval) + return RT_GET_RAW_KBPS(&dp_11abgnratetable, rate_idx); + else + return RT_GET_SGI_KBPS(&dp_11abgnratetable, rate_idx); + return 0; +} + +qdf_export_symbol(dp_rate_idx_to_kbps); diff --git a/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.h b/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.h new file mode 100644 index 0000000000000000000000000000000000000000..b22527a48590d14a58762d8299ed431a6a7b488e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/cmn_dp_api/dp_ratetable.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RATES_H_ +#define _DP_RATES_H_ + +#define CMN_DP_ASSERT(__bool) + +/* + *Band Width Types + */ +enum CMN_BW_TYPES { + CMN_BW_20MHZ, + CMN_BW_40MHZ, + CMN_BW_80MHZ, + CMN_BW_160MHZ, + CMN_BW_CNT, + CMN_BW_IDLE = 0xFF, /*default BW state */ +}; + +#define NUM_SPATIAL_STREAMS 8 +#define SUPPORT_11AX 1 +#define MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ 4 +#define VHT_EXTRA_MCS_SUPPORT +#define CONFIG_160MHZ_SUPPORT 1 +#define NUM_HT_MCS 8 +#define NUM_VHT_MCS 12 + +#define NUM_HE_MCS 12 + +#define NUM_SPATIAL_STREAM 4 +#define NUM_SPATIAL_STREAMS 8 +#define WHAL_160MHZ_SUPPORT 1 +#define MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ 4 +#define RT_GET_RT(_rt) ((const struct DP_CMN_RATE_TABLE *)(_rt)) +#define RT_GET_INFO(_rt, _index) RT_GET_RT(_rt)->info[(_index)] +#define RT_GET_RAW_KBPS(_rt, _index) \ + (RT_GET_INFO(_rt, (_index)).ratekbps) +#define RT_GET_SGI_KBPS(_rt, _index) \ + (RT_GET_INFO(_rt, (_index)).ratekbpssgi) + +#define HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4 +#define RT_INVALID_INDEX (0xff) +/* pow2 to optimize out * and / */ +#define DP_ATH_RATE_EP_MULTIPLIER BIT(7) +#define DP_ATH_EP_MUL(a, b) ((a) * (b)) +#define DP_ATH_RATE_LPF_LEN 10 /* Low pass filter length + * for averaging rates + */ +#define DUMMY_MARKER 0 +#define DP_ATH_RATE_IN(c) (DP_ATH_EP_MUL((c), DP_ATH_RATE_EP_MULTIPLIER)) + +static inline int dp_ath_rate_lpf(uint64_t _d, int _e) +{ + _e = DP_ATH_RATE_IN((_e)); + return (((_d) != DUMMY_MARKER) ? ((((_d) << 3) + (_e) - (_d)) >> 3) : + (_e)); +} + +static inline int dp_ath_rate_out(uint64_t _i) +{ + int _mul = DP_ATH_RATE_EP_MULTIPLIER; + + return (((_i) != DUMMY_MARKER) ? + ((((_i) % (_mul)) >= ((_mul) / 2)) ? + ((_i) + ((_mul) - 1)) / (_mul) : (_i) / (_mul)) : + DUMMY_MARKER); +} + +#define RXDESC_GET_DATA_LEN(rx_desc) \ + (txrx_pdev->htt_pdev->ar_rx_ops->msdu_desc_msdu_length(rx_desc)) +#define ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \ + (((_pream) << 6) | ((_nss) << 4) | (_rate)) +#define GET_HW_RATECODE_PREAM(_rcode) (((_rcode) >> 6) & 0x3) +#define GET_HW_RATECODE_NSS(_rcode) (((_rcode) >> 4) & 0x3) +#define GET_HW_RATECODE_RATE(_rcode) (((_rcode) >> 0) & 0xF) + +#define VHT_INVALID_MCS (0xFF) /* Certain MCSs are not valid in VHT mode */ +#define VHT_INVALID_BCC_RATE 0 +#define NUM_HT_SPATIAL_STREAM 4 + +#define NUM_HT_RIX_PER_BW (NUM_HT_MCS * NUM_HT_SPATIAL_STREAM) +#define NUM_VHT_RIX_PER_BW (NUM_VHT_MCS * NUM_SPATIAL_STREAMS) +#define NUM_HE_RIX_PER_BW (NUM_HE_MCS * NUM_SPATIAL_STREAMS) + +#define NUM_VHT_RIX_FOR_160MHZ (NUM_VHT_MCS * \ + MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ) +#define NUM_HE_RIX_FOR_160MHZ (NUM_HE_MCS * \ + MAX_SPATIAL_STREAMS_SUPPORTED_AT_160MHZ) + +#define CCK_RATE_TABLE_INDEX 0 +#define CCK_RATE_11M_INDEX 0 +#define CCK_FALLBACK_MIN_RATE 0x3 /** 1 Mbps */ +#define CCK_FALLBACK_MAX_RATE 0x2 /** 2 Mbps */ + +#define OFDM_RATE_TABLE_INDEX 4 +#define OFDMA_RATE_54M_INDEX 8 + +#define HT_20_RATE_TABLE_INDEX 12 +#define HT_40_RATE_TABLE_INDEX (HT_20_RATE_TABLE_INDEX + NUM_HT_RIX_PER_BW) + +#define VHT_20_RATE_TABLE_INDEX (HT_40_RATE_TABLE_INDEX + NUM_HT_RIX_PER_BW) +#define VHT_40_RATE_TABLE_INDEX (VHT_20_RATE_TABLE_INDEX + NUM_VHT_RIX_PER_BW) +#define VHT_80_RATE_TABLE_INDEX (VHT_40_RATE_TABLE_INDEX + NUM_VHT_RIX_PER_BW) + +#define VHT_160_RATE_TABLE_INDEX (VHT_80_RATE_TABLE_INDEX + NUM_VHT_RIX_PER_BW) +#define VHT_LAST_RIX_PLUS_ONE (VHT_160_RATE_TABLE_INDEX + \ + NUM_VHT_RIX_FOR_160MHZ) + +#define HE_20_RATE_TABLE_INDEX VHT_LAST_RIX_PLUS_ONE +#define HE_40_RATE_TABLE_INDEX (HE_20_RATE_TABLE_INDEX + NUM_HE_RIX_PER_BW) +#define HE_80_RATE_TABLE_INDEX (HE_40_RATE_TABLE_INDEX + NUM_HE_RIX_PER_BW) + +#define HE_160_RATE_TABLE_INDEX (HE_80_RATE_TABLE_INDEX + NUM_HE_RIX_PER_BW) +#define DP_RATE_TABLE_SIZE (HE_160_RATE_TABLE_INDEX + NUM_HE_RIX_FOR_160MHZ) + +/* The following would span more than one octet + * when 160MHz BW defined for VHT + * Also it's important to maintain the ordering of + * this enum else it would break other rate adapation functions. + */ +enum DP_CMN_MODULATION_TYPE { + DP_CMN_MOD_IEEE80211_T_DS, /* direct sequence spread spectrum */ + DP_CMN_MOD_IEEE80211_T_OFDM, /* frequency division multiplexing */ + DP_CMN_MOD_IEEE80211_T_HT_20, + DP_CMN_MOD_IEEE80211_T_HT_40, + DP_CMN_MOD_IEEE80211_T_VHT_20, + DP_CMN_MOD_IEEE80211_T_VHT_40, + DP_CMN_MOD_IEEE80211_T_VHT_80, + DP_CMN_MOD_IEEE80211_T_VHT_160, + DP_CMN_MOD_IEEE80211_T_HE_20, /* 11AX support enabled */ + DP_CMN_MOD_IEEE80211_T_HE_40, + DP_CMN_MOD_IEEE80211_T_HE_80, + DP_CMN_MOD_IEEE80211_T_HE_160, + DP_CMN_MOD_IEEE80211_T_MAX_PHY +}; + +/* more common nomenclature */ +#define DP_CMN_MOD_IEEE80211_T_CCK DP_CMN_MOD_IEEE80211_T_DS + +enum HW_RATECODE_PREAM_TYPE { + HW_RATECODE_PREAM_OFDM, + HW_RATECODE_PREAM_CCK, + HW_RATECODE_PREAM_HT, + HW_RATECODE_PREAM_VHT, +}; + +enum DP_CMN_MODULATION_TYPE dp_getmodulation( + uint16_t pream_type, + uint8_t width); + +uint32_t +dp_getrateindex(uint32_t gi, uint16_t mcs, uint8_t nss, uint8_t preamble, + uint8_t bw, uint32_t *rix, uint16_t *ratecode); + +int dp_rate_idx_to_kbps(uint8_t rate_idx, uint8_t gintval); + +#endif /*_DP_RATES_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h new file mode 100644 index 0000000000000000000000000000000000000000..99ad08adba05585639b168261518958f649e3b22 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_bus.h + * @brief Define the host data path bus related functions + */ +#ifndef _CDP_TXRX_BUS_H_ +#define _CDP_TXRX_BUS_H_ + +/** + * cdp_bus_suspend() - suspend bus + * @soc: data path soc handle + * @pdev_id: id of dp pdev handle + * + * suspend bus + * + * return QDF_STATUS_SUCCESS suspend is not implemented or suspend done + */ +static inline QDF_STATUS cdp_bus_suspend(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->bus_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->bus_ops->bus_suspend) + return soc->ops->bus_ops->bus_suspend(soc, pdev_id); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_bus_resume() - resume bus + * @soc: data path soc handle + * @pdev_id: id of dp pdev handle + * + * resume bus + * + * return QDF_STATUS_SUCCESS resume is not implemented or suspend done + */ +static inline QDF_STATUS cdp_bus_resume(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->bus_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->bus_ops->bus_resume) + return soc->ops->bus_ops->bus_resume(soc, pdev_id); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_process_wow_ack() - Process wow ack response + * @soc: data path soc handle + * @pdev_id: id of dp pdev handle + * + * Do any required data path operations for target wow ack + * suspend response. + * + * Return: None + */ +static inline void cdp_process_wow_ack_rsp(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->bus_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->bus_ops->process_wow_ack_rsp) + return soc->ops->bus_ops->process_wow_ack_rsp(soc, pdev_id); +} + +/** + * cdp_process_target_suspend_req() - Process target suspend request + * @soc: data path soc handle + * @pdev_id: id of dp pdev handle + * + * Complete the datapath specific work before target suspend + * + * Return: None + */ +static inline void cdp_process_target_suspend_req(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->bus_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->bus_ops->process_target_suspend_req) + return soc->ops->bus_ops->process_target_suspend_req(soc, + pdev_id); +} +#endif /* _CDP_TXRX_BUS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..4c88b815d30fd12aaf97043943db00c987d9627e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_cfg.h + * @brief Define the host data path configuration API functions + */ +#ifndef _CDP_TXRX_CFG_H_ +#define _CDP_TXRX_CFG_H_ +#include "cdp_txrx_handle.h" +/** + * cdp_cfg_set_rx_fwd_disabled() - enable/disable rx forwarding + * @soc - data path soc handle + * @pdev - data path device instance + * @disable_rx_fwd - enable or disable rx forwarding + * + * enable/disable rx forwarding + * + * return NONE + */ +static inline void +cdp_cfg_set_rx_fwd_disabled(ol_txrx_soc_handle soc, struct cdp_cfg *cfg_pdev, + uint8_t disable_rx_fwd) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_cfg_rx_fwd_disabled) + return; + + soc->ops->cfg_ops->set_cfg_rx_fwd_disabled(cfg_pdev, + disable_rx_fwd); +} + +/** + * cdp_cfg_set_packet_log_enabled() - enable/disable packet log + * @soc - data path soc handle + * @pdev - data path device instance + * @val - enable or disable packet log + * + * packet log enable or disable + * + * return NONE + */ +static inline void +cdp_cfg_set_packet_log_enabled(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_cfg_packet_log_enabled) + return; + + soc->ops->cfg_ops->set_cfg_packet_log_enabled(cfg_pdev, + val); +} + +/** + * cdp_cfg_attach() - attach config module + * @soc - data path soc handle + * @osdev - os instance + * @cfg_param - configuration parameter should be propagated + * + * Allocate configuration module instance, and propagate configuration values + * + * return soc configuration module instance + */ +static inline struct cdp_cfg +*cdp_cfg_attach(ol_txrx_soc_handle soc, + qdf_device_t osdev, void *cfg_param) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->cfg_attach) + return NULL; + + return soc->ops->cfg_ops->cfg_attach(osdev, cfg_param); +} + +/** + * cdp_cfg_vdev_rx_set_intrabss_fwd() - enable/disable intra bass forwarding + * @soc - data path soc handle + * @vdev_id - virtual interface id + * @val - enable or disable intra bss forwarding + * + * ap isolate, do not forward intra bss traffic + * + * return NONE + */ +static inline void +cdp_cfg_vdev_rx_set_intrabss_fwd(ol_txrx_soc_handle soc, + uint8_t vdev_id, bool val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd) + return; + + soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd(soc, vdev_id, val); +} + +/** + * cdp_cfg_is_rx_fwd_disabled() - get vdev rx forward + * @soc - data path soc handle + * @vdev - virtual interface instance + * + * Return rx forward feature enable status + * + * return 1 enabled + * 0 disabled + */ +static inline uint8_t +cdp_cfg_is_rx_fwd_disabled(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->is_rx_fwd_disabled) + return 0; + + return soc->ops->cfg_ops->is_rx_fwd_disabled(vdev); + +} + +/** + * cdp_cfg_tx_set_is_mgmt_over_wmi_enabled() - mgmt tx over wmi enable/disable + * @soc - data path soc handle + * @value - feature enable or disable + * + * Enable or disable management packet TX over WMI feature + * + * return None + */ +static inline void +cdp_cfg_tx_set_is_mgmt_over_wmi_enabled(ol_txrx_soc_handle soc, + uint8_t value) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled) + return; + + soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled(value); +} + +/** + * cdp_cfg_is_high_latency() - query data path is in high or low latency + * @soc - data path soc handle + * @pdev - data path device instance + * + * query data path is in high or low latency + * + * return 1 high latency data path, usb or sdio + * 0 low latency data path + */ +static inline int +cdp_cfg_is_high_latency(ol_txrx_soc_handle soc, struct cdp_cfg *cfg_pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->is_high_latency) + return 0; + + return soc->ops->cfg_ops->is_high_latency(cfg_pdev); +} + +/** + * cdp_cfg_set_flow_control_parameters() - set flow control params + * @soc - data path soc handle + * @cfg - dp config module instance + * @param - parameters should set + * + * set flow control params + * + * return None + */ +static inline void +cdp_cfg_set_flow_control_parameters(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, void *param) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_flow_control_parameters) + return; + + soc->ops->cfg_ops->set_flow_control_parameters(cfg_pdev, + param); +} + +/** + * cdp_cfg_set_flow_steering - Set Rx flow steering config based on CFG ini + * config. + * + * @pdev - handle to the physical device + * @val - 0 - disable, 1 - enable + * + * Return: None + */ +static inline void cdp_cfg_set_flow_steering(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_flow_steering) + return; + + soc->ops->cfg_ops->set_flow_steering(cfg_pdev, val); +} + +static inline void cdp_cfg_get_max_peer_id(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev) +{ +} + +/** + * cdp_cfg_set_ptp_rx_opt_enabled() - enable/disable ptp rx timestamping + * @soc - data path soc handle + * @pdev - data path device instance + * @val - enable or disable packet log + * + * ptp rx timestamping enable or disable + * + * return NONE + */ +static inline void +cdp_cfg_set_ptp_rx_opt_enabled(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_ptp_rx_opt_enabled) + return; + + soc->ops->cfg_ops->set_ptp_rx_opt_enabled(cfg_pdev, val); +} + +/** + * cdp_cfg_set_new_htt_msg_format() - set htt h2t msg feature + * @soc - datapath soc handle + * @val - enable or disable new htt h2t msg feature + * + * Enable whether htt h2t message length includes htc header length + * + * return NONE + */ +static inline void +cdp_cfg_set_new_htt_msg_format(ol_txrx_soc_handle soc, + uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_new_htt_msg_format) + return; + + soc->ops->cfg_ops->set_new_htt_msg_format(val); +} + +/** + * cdp_cfg_set_peer_unmap_conf_support() - set peer unmap conf feature + * @soc - datapath soc handle + * @val - enable or disable peer unmap conf feature + * + * Set if peer unmap confirmation feature is supported by both FW and in INI + * + * return NONE + */ +static inline void +cdp_cfg_set_peer_unmap_conf_support(ol_txrx_soc_handle soc, bool val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_peer_unmap_conf_support) + return; + + soc->ops->cfg_ops->set_peer_unmap_conf_support(val); +} + +/** + * cdp_cfg_get_peer_unmap_conf_support() - check peer unmap conf feature + * @soc - datapath soc handle + * + * Check if peer unmap confirmation feature is enabled + * + * return true is peer unmap confirmation feature is enabled else false + */ +static inline bool +cdp_cfg_get_peer_unmap_conf_support(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->get_peer_unmap_conf_support) + return false; + + return soc->ops->cfg_ops->get_peer_unmap_conf_support(); +} + +static inline void +cdp_cfg_set_tx_compl_tsf64(ol_txrx_soc_handle soc, + uint8_t val) +{ + if (!soc || !soc->ops) { + dp_debug("invalid instance"); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_tx_compl_tsf64) + return; + + soc->ops->cfg_ops->set_tx_compl_tsf64(val); +} + +static inline bool +cdp_cfg_get_tx_compl_tsf64(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + dp_debug("invalid instance"); + return false; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->get_tx_compl_tsf64) + return false; + + return soc->ops->cfg_ops->get_tx_compl_tsf64(); +} + +#endif /* _CDP_TXRX_CFG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..f9b1f601edb0f72f20e59b2b89b919e55d0be581 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h @@ -0,0 +1,2565 @@ +/* + * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_cmn.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_H_ +#define _CDP_TXRX_CMN_H_ + +#include "qdf_types.h" +#include "qdf_nbuf.h" +#include "cdp_txrx_ops.h" +#include "cdp_txrx_handle.h" +#include "cdp_txrx_cmn_struct.h" + +#ifdef ENABLE_VERBOSE_DEBUG +extern bool is_dp_verbose_debug_enabled; +#endif + +/****************************************************************************** + * + * Common Data Path Header File + * + *****************************************************************************/ +#define dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP, params) +#define dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP, params) +#define dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP, params) +#define dp_info(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP, ## params) +#define dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params) + +#ifdef DP_PRINT_NO_CONSOLE +#define dp_err_log(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP, ## params) +#define dp_info_rl(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP, ## params) +#else +#define dp_err_log(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP, params) +#define dp_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_DP, params) +#endif /* DP_PRINT_NO_CONSOLE */ + +#ifdef ENABLE_VERBOSE_DEBUG +/** + * @enum verbose_debug_module: + * if INI "enable_verbose_debug" has to set following bit positions to enable + * respective module's excessive logging, + * + * @hif_verbose_debug_mask: 1st bit [0th index] is for HIF module + * @hal_verbose_debug_mask: 2nd bit [1st index] is for HAL module + * @dp_verbose_debug_mask: 3rd bit [2nd index] is for DP module + */ +enum verbose_debug_module { + hif_vebose_debug_mask = 1 << 0, + hal_verbose_debug_mask = 1 << 1, + dp_verbose_debug_mask = 1 << 2, +}; + +#define dp_verbose_debug(params...) \ + if (unlikely(is_dp_verbose_debug_enabled)) \ + do {\ + QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params); \ + } while (0) +#else +#define dp_verbose_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params) +#endif + +#define dp_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_DP, params) +#define dp_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_DP, params) +#define dp_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_DP, params) +#define dp_nofl_info(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_DP, params) +#define dp_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_DP, params) + +#define dp_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_DP, params) +#define dp_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, params) +#define dp_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_DP, params) +#define dp_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, params) + +/** + * @enum vdev_host_stats_id: + * host stats update from CDP have to set one of the following stats ID + * + * @DP_VDEV_STATS_PKT_CNT_ONLY: update Tx packet count only + * @DP_VDEV_STATS_TX_ME: update Tx ingress stats + */ +enum { + DP_VDEV_STATS_PKT_CNT_ONLY, + DP_VDEV_STATS_TX_ME, +}; + +static inline QDF_STATUS +cdp_soc_attach_target(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_attach_target) + return QDF_STATUS_SUCCESS; + + return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc); + +} + +static inline QDF_STATUS +cdp_vdev_attach(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t *vdev_mac_addr, uint8_t vdev_id, + enum wlan_op_mode op_mode, enum wlan_op_subtype subtype) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_vdev_attach) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_vdev_attach(soc, pdev_id, + vdev_mac_addr, vdev_id, + op_mode, subtype); +} + +#ifdef DP_FLOW_CTL +/** + * cdp_flow_pool_map() - Create flow pool for vdev + * @soc: data path soc handle + * @pdev_id: id of dp pdev handle + * @vdev_id: vdev_id corresponding to vdev start + * + * Create per vdev flow pool. + * + * return none + */ +static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->flow_pool_map_handler) + return QDF_STATUS_E_INVAL; + + return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev_id, + vdev_id); +} + +/** + * cdp_flow_pool_unmap() - Delete flow pool + * @soc: data path soc handle + * @pdev_id: id of dp pdev handle + * @vdev_id: vdev_id corresponding to vdev start + * + * Delete flow pool + * + * return none + */ +static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->flow_pool_unmap_handler) + return; + + return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev_id, + vdev_id); +} +#endif + +static inline QDF_STATUS +cdp_vdev_detach(ol_txrx_soc_handle soc, uint8_t vdev_id, + ol_txrx_vdev_delete_cb callback, void *cb_context) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_vdev_detach) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_vdev_detach(soc, vdev_id, + callback, cb_context); +} + +static inline int +cdp_pdev_attach_target(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_attach_target) + return 0; + + return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(soc, pdev_id); +} + +static inline QDF_STATUS cdp_pdev_attach + (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev, qdf_device_t osdev, + uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_attach) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, htc_pdev, osdev, + pdev_id); +} + +/** + * cdp_pdev_post_attach() - attach the data SW state + * @soc: datapath soc handle + * @pdev_id: the data physical device id being removed + * + * This function is used when the WLAN driver is being loaded to + * attach the host data component within the driver. + * + * Return: 0 for success or error code + */ +static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_post_attach) + return 0; + + return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(soc, pdev_id); +} + +/** + * cdp_pdev_pre_detach() - detach the data SW state + * @soc: datapath soc handle + * @pdev_id: the data physical device id being removed + * @force: delete the pdev (and its vdevs and peers) even if + * there are outstanding references by the target to the vdevs + * and peers within the pdev + * + * This function is used when the WLAN driver is being removed to + * detach the host data component within the driver. + * + * Return: None + */ +static inline void +cdp_pdev_pre_detach(ol_txrx_soc_handle soc, uint8_t pdev_id, int force) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach) + return; + + soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(soc, pdev_id, force); +} + +static inline QDF_STATUS +cdp_pdev_detach(ol_txrx_soc_handle soc, uint8_t pdev_id, int force) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_detach) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_pdev_detach(soc, pdev_id, force); +} + +static inline void +cdp_pdev_deinit(ol_txrx_soc_handle soc, uint8_t pdev_id, int force) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_deinit) + return; + + soc->ops->cmn_drv_ops->txrx_pdev_deinit(soc, pdev_id, force); +} + +static inline QDF_STATUS cdp_peer_create + (ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac_addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_create) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_peer_create(soc, vdev_id, + peer_mac_addr); +} + +static inline void cdp_peer_setup + (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_setup) + return; + + soc->ops->cmn_drv_ops->txrx_peer_setup(soc, vdev_id, + peer_mac); +} + +/* + * cdp_cp_peer_del_response - Call the peer delete response handler + * @soc: Datapath SOC handle + * @vdev_id: id of virtual device object + * @peer_mac_addr: Mac address of the peer + * + * Return: void + */ +static inline QDF_STATUS cdp_cp_peer_del_response + (ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t *peer_mac_addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_cp_peer_del_response) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_cp_peer_del_response(soc, + vdev_id, + peer_mac_addr); +} +/** + * cdp_peer_get_ast_info_by_soc() - search the soc AST hash table + * and return ast entry information + * of first ast entry found in the + * table with given mac address + * + * @soc - data path soc handle + * @ast_mac_addr - AST entry mac address + * @ast_entry_info - ast entry information + * + * return - true if ast entry found with ast_mac_addr + * false if ast entry not found + */ +static inline bool cdp_peer_get_ast_info_by_soc + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + struct cdp_ast_entry_info *ast_entry_info) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_soc) + return false; + + return soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_soc + (soc, ast_mac_addr, + ast_entry_info); +} + +/** + * cdp_peer_get_ast_info_by_pdev() - search the soc AST hash table + * and return ast entry information + * if mac address and pdev_id matches + * + * @soc - data path soc handle + * @ast_mac_addr - AST entry mac address + * @pdev_id - pdev_id + * @ast_entry_info - ast entry information + * + * return - true if ast entry found with ast_mac_addr + * false if ast entry not found + */ +static inline bool cdp_peer_get_ast_info_by_pdev + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + uint8_t pdev_id, + struct cdp_ast_entry_info *ast_entry_info) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_pdev) + return false; + + return soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_pdev + (soc, + ast_mac_addr, + pdev_id, + ast_entry_info); +} + +/** + * cdp_peer_ast_delete_by_soc() - delete the ast entry from soc AST hash table + * with given mac address + * + * @soc - data path soc handle + * @ast_mac_addr - AST entry mac address + * @callback - callback function to called on ast delete response from FW + * @cookie - argument to be passed to callback + * + * return - QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete + * is sent + * QDF_STATUS_E_INVAL false if ast entry not found + */ +static inline QDF_STATUS cdp_peer_ast_delete_by_soc + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + txrx_ast_free_cb callback, + void *cookie) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_soc) + return QDF_STATUS_E_INVAL; + + return soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_soc + (soc, + ast_mac_addr, + callback, + cookie); +} + +/** + * cdp_peer_ast_delete_by_pdev() - delete the ast entry from soc AST hash table + * if mac address and pdev_id matches + * + * @soc - data path soc handle + * @ast_mac_addr - AST entry mac address + * @pdev_id - pdev id + * @callback - callback function to called on ast delete response from FW + * @cookie - argument to be passed to callback + * + * return - QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete + * is sent + * QDF_STATUS_E_INVAL false if ast entry not found + */ +static inline QDF_STATUS cdp_peer_ast_delete_by_pdev + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + uint8_t pdev_id, txrx_ast_free_cb callback, + void *cookie) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_pdev) + return QDF_STATUS_E_INVAL; + + return soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_pdev + (soc, + ast_mac_addr, + pdev_id, + callback, + cookie); +} + +static inline int cdp_peer_add_ast + (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + uint8_t *mac_addr, + enum cdp_txrx_ast_entry_type type, uint32_t flags) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_add_ast) + return 0; + + return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc, + vdev_id, + peer_mac, + mac_addr, + type, + flags); +} + +static inline QDF_STATUS cdp_peer_reset_ast + (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, uint8_t *peer_macaddr, + uint8_t vdev_id) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_reset_ast) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr, + peer_macaddr, vdev_id); +} + +static inline QDF_STATUS cdp_peer_reset_ast_table + (ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_id); +} + +static inline void cdp_peer_flush_ast_table + (ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table) + return; + + soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc); +} + +static inline int cdp_peer_update_ast + (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + uint8_t *wds_macaddr, uint32_t flags) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_update_ast) + return 0; + + + return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc, + vdev_id, + peer_mac, + wds_macaddr, + flags); +} + +static inline void cdp_peer_teardown + (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_teardown) + return; + + soc->ops->cmn_drv_ops->txrx_peer_teardown(soc, vdev_id, peer_mac); +} + +static inline void +cdp_peer_delete(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, uint32_t bitmap) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_delete) + return; + + soc->ops->cmn_drv_ops->txrx_peer_delete(soc, vdev_id, peer_mac, bitmap); +} + +/** + * cdp_peer_detach_sync() - peer detach sync callback + * @soc: datapath soc handle + * @vdev_id: virtual device/interface id + * @peer_mac: peer mac address + * @peer_unmap_sync: peer unmap sync cb. + * @bitmap: bitmap indicating special handling of request. + * + * Return: None + */ +static inline void +cdp_peer_delete_sync(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + QDF_STATUS(*delete_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list), + uint32_t bitmap) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_delete_sync) + return; + + soc->ops->cmn_drv_ops->txrx_peer_delete_sync(soc, vdev_id, peer_mac, + delete_cb, + bitmap); +} + +static inline int +cdp_set_monitor_mode(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t smart_monitor) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_monitor_mode) + return 0; + + return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(soc, vdev_id, + smart_monitor); +} + +static inline QDF_STATUS +cdp_set_curchan(ol_txrx_soc_handle soc, + uint8_t pdev_id, + uint32_t chan_mhz) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_curchan) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_set_curchan(soc, pdev_id, chan_mhz); +} + +static inline QDF_STATUS +cdp_set_privacy_filters(ol_txrx_soc_handle soc, uint8_t vdev_id, + void *filter, uint32_t num) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_privacy_filters) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_set_privacy_filters(soc, vdev_id, + filter, num); +} + +static inline int +cdp_set_monitor_filter(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct cdp_monitor_filter *filter_val) +{ + if (soc->ops->mon_ops->txrx_set_advance_monitor_filter) + return soc->ops->mon_ops->txrx_set_advance_monitor_filter(soc, + pdev_id, + filter_val); + return 0; +} + + +/****************************************************************************** + * Data Interface (B Interface) + *****************************************************************************/ +static inline void +cdp_vdev_register(ol_txrx_soc_handle soc, uint8_t vdev_id, + ol_osif_vdev_handle osif_vdev, + struct ol_txrx_ops *txrx_ops) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_vdev_register) + return; + + soc->ops->cmn_drv_ops->txrx_vdev_register(soc, vdev_id, + osif_vdev, txrx_ops); +} + +static inline int +cdp_mgmt_send(ol_txrx_soc_handle soc, uint8_t vdev_id, + qdf_nbuf_t tx_mgmt_frm, uint8_t type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_mgmt_send) + return 0; + + return soc->ops->cmn_drv_ops->txrx_mgmt_send(soc, vdev_id, + tx_mgmt_frm, type); +} + +static inline int +cdp_mgmt_send_ext(ol_txrx_soc_handle soc, uint8_t vdev_id, + qdf_nbuf_t tx_mgmt_frm, uint8_t type, + uint8_t use_6mbps, uint16_t chanfreq) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext) + return 0; + + return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext + (soc, vdev_id, tx_mgmt_frm, type, use_6mbps, chanfreq); +} + + +static inline QDF_STATUS +cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t type, ol_txrx_mgmt_tx_cb download_cb, + ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set + (soc, pdev_id, type, download_cb, ota_ack_cb, ctxt); +} + +/** + * cdp_peer_unmap_sync_cb_set() - set peer unmap sync callback + * @soc: datapath soc handle + * @pdev_id: physical device instance id + * @peer_unmap_sync: peer unmap sync callback + * + * Return: None + */ +static inline void +cdp_peer_unmap_sync_cb_set(ol_txrx_soc_handle soc, + uint8_t pdev_id, + QDF_STATUS(*unmap_resp_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list)) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set) + return; + + soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set(soc, pdev_id, + unmap_resp_cb); +} + +/* + * cdp_data_tx_cb_set(): set the callback for non standard tx + * @soc - datapath soc handle + * @vdev_id - virtual device/interface id + * @callback - callback function + * @ctxt: callback context + * + */ +static inline void +cdp_data_tx_cb_set(ol_txrx_soc_handle soc, uint8_t vdev_id, + ol_txrx_data_tx_cb callback, void *ctxt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set) + return; + + soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(soc, vdev_id, + callback, ctxt); +} + +/****************************************************************************** + * Statistics and Debugging Interface (C Interface) + *****************************************************************************/ +/** + * External Device physical address types + * + * Currently, both MAC and IPA uController use the same size addresses + * and descriptors are exchanged between these two depending on the mode. + * + * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA + * operations. However, external device physical address sizes + * may be different from host-specific physical address sizes. + * This calls for the following definitions for target devices + * (MAC, IPA uc). + */ +#if HTT_PADDR64 +typedef uint64_t target_paddr_t; +#else +typedef uint32_t target_paddr_t; +#endif /*HTT_PADDR64 */ + +static inline int +cdp_aggr_cfg(ol_txrx_soc_handle soc, uint8_t vdev_id, + int max_subfrms_ampdu, + int max_subfrms_amsdu) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_aggr_cfg) + return 0; + + return soc->ops->cmn_drv_ops->txrx_aggr_cfg(soc, vdev_id, + max_subfrms_ampdu, max_subfrms_amsdu); +} + +static inline int +cdp_fw_stats_get(ol_txrx_soc_handle soc, uint8_t vdev_id, + struct ol_txrx_stats_req *req, bool per_vdev, + bool response_expected) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_fw_stats_get) + return 0; + + return soc->ops->cmn_drv_ops->txrx_fw_stats_get(soc, vdev_id, req, + per_vdev, response_expected); +} + +static inline int +cdp_debug(ol_txrx_soc_handle soc, uint8_t vdev_id, int debug_specs) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_debug) + return 0; + + return soc->ops->cmn_drv_ops->txrx_debug(soc, vdev_id, debug_specs); +} + +static inline QDF_STATUS +cdp_fw_stats_cfg(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t cfg_stats_type, uint32_t cfg_val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(soc, vdev_id, + cfg_stats_type, cfg_val); +} + +static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_print_level_set) + return; + + soc->ops->cmn_drv_ops->txrx_print_level_set(level); +} + +/* + * cdp_get_vdev_mac_addr() – Detach txrx peer + * @soc_hdl: Datapath soc handle + * @vdev_id: virtual device/interface id + * + * Return: MAC address on success, NULL on failure. + * + */ +static inline uint8_t * +cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(soc, vdev_id); + +} + +/** + * cdp_get_os_rx_handles_from_vdev() - Return os rx handles for a vdev + * @soc: ol_txrx_soc_handle handle + * @vdev_id: vdev id for which os rx handles are needed + * @stack_fn_p: pointer to stack function pointer + * @osif_handle_p: pointer to ol_osif_vdev_handle + * + * Return: void + */ +static inline +void cdp_get_os_rx_handles_from_vdev(ol_txrx_soc_handle soc, + uint8_t vdev_id, + ol_txrx_rx_fp *stack_fn_p, + ol_osif_vdev_handle *osif_handle_p) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev) + return; + + soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev(soc, vdev_id, + stack_fn_p, + osif_handle_p); +} + +/** + * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev + * @soc: datapath soc handle + * @vdev_id: virtual device/interface id + * + * Return: Handle to control pdev + */ +static inline struct cdp_cfg * +cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev(soc, + vdev_id); +} + +/* + * cdp_get_mon_vdev_from_pdev() - Get vdev handle of monitor mode + * @soc: datapath soc handle + * @pdev_id: physical device instance id + * + * Return: virtual interface id + */ +static inline uint8_t +cdp_get_mon_vdev_from_pdev(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return -EINVAL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev) + return -EINVAL; + + return soc->ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev(soc, pdev_id); +} + +static inline void +cdp_soc_detach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_detach) + return; + + soc->ops->cmn_drv_ops->txrx_soc_detach(soc); +} + +/** + * cdp_soc_init() - Initialize txrx SOC + * @soc: ol_txrx_soc_handle handle + * @devid: Device ID + * @hif_handle: Opaque HIF handle + * @psoc: Opaque Objmgr handle + * @htc_handle: Opaque HTC handle + * @qdf_dev: QDF device + * @dp_ol_if_ops: Offload Operations + * + * Return: DP SOC handle on success, NULL on failure + */ +static inline ol_txrx_soc_handle +cdp_soc_init(ol_txrx_soc_handle soc, u_int16_t devid, + void *hif_handle, + struct cdp_ctrl_objmgr_psoc *psoc, + HTC_HANDLE htc_handle, qdf_device_t qdf_dev, + struct ol_if_ops *dp_ol_if_ops) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_init) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_soc_init(soc, psoc, + hif_handle, + htc_handle, qdf_dev, + dp_ol_if_ops, devid); +} + +/** + * cdp_soc_deinit() - Deinitialize txrx SOC + * @soc: Opaque DP SOC handle + * + * Return: None + */ +static inline void +cdp_soc_deinit(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_deinit) + return; + + soc->ops->cmn_drv_ops->txrx_soc_deinit(soc); +} + +/** + * cdp_tso_soc_attach() - TSO attach function + * @soc: ol_txrx_soc_handle handle + * + * Reserve TSO descriptor buffers + * + * Return: QDF_STATUS_SUCCESS on Success or + * QDF_STATUS_E_FAILURE on failure + */ +static inline QDF_STATUS +cdp_tso_soc_attach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_tso_soc_attach) + return 0; + + return soc->ops->cmn_drv_ops->txrx_tso_soc_attach(soc); +} + +/** + * cdp_tso_soc_detach() - TSO detach function + * @soc: ol_txrx_soc_handle handle + * + * Release TSO descriptor buffers + * + * Return: QDF_STATUS_SUCCESS on Success or + * QDF_STATUS_E_FAILURE on failure + */ +static inline QDF_STATUS +cdp_tso_soc_detach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_tso_soc_detach) + return 0; + + return soc->ops->cmn_drv_ops->txrx_tso_soc_detach(soc); +} + +/** + * cdp_addba_resp_tx_completion() - Indicate addba response tx + * completion to dp to change tid state. + * @soc: soc handle + * @peer_mac: mac address of peer handle + * @vdev_id: id of vdev handle + * @tid: tid + * @status: Tx completion status + * + * Return: success/failure of tid update + */ +static inline int cdp_addba_resp_tx_completion(ol_txrx_soc_handle soc, + uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t tid, int status) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->addba_resp_tx_completion) + return 0; + + return soc->ops->cmn_drv_ops->addba_resp_tx_completion(soc, peer_mac, + vdev_id, tid, status); +} + +static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc, + uint8_t *peer_mac, uint16_t vdev_id, uint8_t dialogtoken, uint16_t tid, + uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->addba_requestprocess) + return 0; + + return soc->ops->cmn_drv_ops->addba_requestprocess(soc, peer_mac, + vdev_id, dialogtoken, tid, batimeout, buffersize, + startseqnum); +} + +static inline QDF_STATUS +cdp_addba_responsesetup(ol_txrx_soc_handle soc, + uint8_t *peer_mac, uint16_t vdev_id, + uint8_t tid, uint8_t *dialogtoken, + uint16_t *statuscode, uint16_t *buffersize, + uint16_t *batimeout) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->addba_responsesetup) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->addba_responsesetup(soc, peer_mac, + vdev_id, tid, dialogtoken, statuscode, buffersize, + batimeout); +} + +static inline int cdp_delba_process(ol_txrx_soc_handle soc, uint8_t *peer_mac, + uint16_t vdev_id, int tid, + uint16_t reasoncode) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->delba_process) + return 0; + + return soc->ops->cmn_drv_ops->delba_process(soc, peer_mac, + vdev_id, tid, reasoncode); +} + +/** + * cdp_delba_tx_completion() - Handle delba tx completion + * to update stats and retry transmission if failed. + * @soc: soc handle + * @peer_mac: peer mac address + * @vdev_id: id of vdev handle + * @tid: Tid number + * @status: Tx completion status + * + * Return: 0 on Success, 1 on failure + */ + +static inline int cdp_delba_tx_completion(ol_txrx_soc_handle soc, + uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t tid, int status) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->delba_tx_completion) + return 0; + + return soc->ops->cmn_drv_ops->delba_tx_completion(soc, peer_mac, + vdev_id, + tid, status); +} + +static inline QDF_STATUS +cdp_set_addbaresponse(ol_txrx_soc_handle soc, + uint8_t *peer_mac, uint16_t vdev_id, int tid, + uint16_t statuscode) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_addba_response) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->set_addba_response(soc, peer_mac, vdev_id, + tid, statuscode); +} + +/** + * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap + * @soc : soc handle + * @vdev_id: id of vdev handle + * @map_id: id of the tid map + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t map_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(soc, vdev_id, + map_id); +} + +#ifdef QCA_MULTIPASS_SUPPORT +/** + * cdp_set_vlan_groupkey(): function to set vlan ID - group key map in the vap + * @soc : soc handle + * @vdev_id: id of vdev handle + * @vlan_id: vlan id + * @group_key: corresponding group key to vlan ID + * + * Return: void + */ +static inline +QDF_STATUS cdp_set_vlan_groupkey(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint16_t vlan_id, uint16_t group_key) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_vlan_groupkey) + return 0; + + return soc->ops->cmn_drv_ops->set_vlan_groupkey(soc, vdev_id, vlan_id, + group_key); +} +#endif + +/** + * cdp_ath_get_total_per(): function to get hw retries + * @soc : soc handle + * @pdev_id: id of pdev handle + * + * Return: get hw retries + */ +static inline +int cdp_ath_get_total_per(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_total_per) + return 0; + + return soc->ops->cmn_drv_ops->txrx_get_total_per(soc, pdev_id); +} + +/** + * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map + * @pdev_id: id of pdev handle + * @map_id: id of the tid map + * @tos: index value in map that needs to be changed + * @tid: tid value passed by user + * + * Return: void + */ +static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint8_t map_id, uint8_t tos, uint8_t tid) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) + return; + + soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(soc, pdev_id, + map_id, tos, tid); +} + +/** + * cdp_flush_cache_rx_queue() - flush cache rx queue frame + * + * Return: None + */ +static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->flush_cache_rx_queue) + return; + soc->ops->cmn_drv_ops->flush_cache_rx_queue(); +} + +/** + * cdp_txrx_stats_request(): function to map to host and firmware statistics + * @soc: soc handle + * @vdev_id: virtual device ID + * @req: stats request container + * + * return: status + */ +static inline +int cdp_txrx_stats_request(ol_txrx_soc_handle soc, uint8_t vdev_id, + struct cdp_txrx_stats_req *req) +{ + if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_ASSERT(0); + return 0; + } + + if (soc->ops->cmn_drv_ops->txrx_stats_request) + return soc->ops->cmn_drv_ops->txrx_stats_request(soc, vdev_id, + req); + + return 0; +} + +/** + * cdp_txrx_intr_attach(): function to attach and configure interrupt + * @soc: soc handle + */ +static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_intr_attach) + return 0; + + return soc->ops->cmn_drv_ops->txrx_intr_attach(soc); +} + +/** + * cdp_txrx_intr_detach(): function to detach interrupt + * @soc: soc handle + */ +static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_intr_detach) + return; + + soc->ops->cmn_drv_ops->txrx_intr_detach(soc); +} + +/** + * cdp_display_stats(): function to map to dump stats + * @soc: soc handle + * @value: statistics option + */ +static inline QDF_STATUS +cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value, + enum qdf_stats_verbosity_level level) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->display_stats) + return 0; + + return soc->ops->cmn_drv_ops->display_stats(soc, value, level); +} + + +/** + * cdp_set_pn_check(): function to set pn check + * @soc: soc handle + * @vdev_id: id of virtual device + * @peer_mac: mac address of peer + * @sec_type: security type + * @rx_pn: receive pn + */ +static inline int cdp_set_pn_check(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t *peer_mac, + enum cdp_sec_type sec_type, uint32_t *rx_pn) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_pn_check) + return 0; + + soc->ops->cmn_drv_ops->set_pn_check(soc, vdev_id, peer_mac, + sec_type, rx_pn); + return 0; +} + +/** + * cdp_set_key_sec_type(): function to set sec mode of key + * @soc: soc handle + * @vdev_id: id of virtual device + * @peer_mac: mac address of peer + * @sec_type: security type + * #is_unicast: ucast or mcast + */ +static inline int cdp_set_key_sec_type(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_sec_type sec_type, + bool is_unicast) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_key_sec_type) + return 0; + + soc->ops->cmn_drv_ops->set_key_sec_type(soc, vdev_id, + peer_mac, sec_type, is_unicast); + return 0; +} + +static inline QDF_STATUS +cdp_set_key(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t *mac, + bool is_unicast, uint32_t *key) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->set_key) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->set_key(soc, vdev_id, mac, + is_unicast, key); +} + +/** + * cdp_update_config_parameters(): function to propagate configuration + * parameters to datapath + * @soc: opaque soc handle + * @cfg: configuration handle + * + * Return: status: 0 - Success, non-zero: Failure + */ +static inline +QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc, + struct cdp_config_params *cfg) +{ + struct cdp_soc *psoc = (struct cdp_soc *)soc; + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->update_config_parameters) + return QDF_STATUS_SUCCESS; + + return soc->ops->cmn_drv_ops->update_config_parameters(psoc, + cfg); +} + +/** + * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * + * Return: opaque dp handle + */ +static inline void * +cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (soc->ops->cmn_drv_ops->get_dp_txrx_handle) + return soc->ops->cmn_drv_ops->get_dp_txrx_handle(soc, pdev_id); + + return 0; +} + +/** + * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @dp_hdl: opaque pointer for dp_txrx_handle + * + * Return: void + */ +static inline void +cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, uint8_t pdev_id, + void *dp_hdl) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_dp_txrx_handle) + return; + + soc->ops->cmn_drv_ops->set_dp_txrx_handle(soc, pdev_id, dp_hdl); +} + +/** + * cdp_vdev_get_dp_ext_txrx_handle() - get extended dp handle from vdev + * @soc: opaque soc handle + * @vdev_id: vdev id + * + * Return: opaque dp handle + */ +static inline void * +cdp_vdev_get_dp_ext_txrx_handle(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (soc->ops->cmn_drv_ops->get_vdev_dp_ext_txrx_handle) + return soc->ops->cmn_drv_ops->get_vdev_dp_ext_txrx_handle( + soc, vdev_id); + + return 0; +} + +/** + * cdp_vdev_set_dp_ext_txrx_handle() - set extended dp handle in vdev + * @soc: opaque soc handle + * @vdev_id: vdev id + * @size: size of the advance dp handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_vdev_set_dp_ext_txrx_handle(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint16_t size) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_vdev_dp_ext_txrx_handle) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->set_vdev_dp_ext_txrx_handle(soc, + vdev_id, + size); +} + +/* + * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc + * @soc: opaque soc handle + * + * Return: opaque extended dp handle + */ +static inline void * +cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle) + return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle( + (struct cdp_soc *) soc); + + return NULL; +} + +/** + * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc + * @soc: opaque soc handle + * @dp_hdl: opaque pointer for dp_txrx_handle + * + * Return: void + */ +static inline void +cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle) + return; + + soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc, + dp_handle); +} + +/** + * cdp_soc_handle_mode_change() - Update pdev_id to lmac_id mapping + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @lmac_id: lmac id + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_soc_handle_mode_change(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint32_t lmac_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->map_pdev_to_lmac) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->handle_mode_change(soc, pdev_id, + lmac_id); +} + +/** + * cdp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @lmac_id: lmac id + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_soc_map_pdev_to_lmac(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint32_t lmac_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->map_pdev_to_lmac) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->map_pdev_to_lmac(soc, pdev_id, + lmac_id); +} + +/** + * cdp_txrx_set_pdev_status_down() - set pdev down/up status + * @soc: soc opaque handle + * @pdev_id: id of data path pdev handle + * @is_pdev_down: pdev down/up status + * + * return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_txrx_set_pdev_status_down(ol_txrx_soc_handle soc, + uint8_t pdev_id, + bool is_pdev_down) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_pdev_status_down) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->set_pdev_status_down(soc, pdev_id, + is_pdev_down); +} + +/** + * cdp_tx_send() - enqueue frame for transmission + * @soc: soc opaque handle + * @vdev_id: id of VAP device + * @nbuf: nbuf to be enqueued + * + * This API is used by Extended Datapath modules to enqueue frame for + * transmission + * + * Return: void + */ +static inline void +cdp_tx_send(ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t nbuf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->tx_send) + return; + + soc->ops->cmn_drv_ops->tx_send(soc, vdev_id, nbuf); +} + +/** + * cdp_set_pdev_pcp_tid_map() - set pdev pcp-tid-map + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @pcp: pcp value + * @tid: tid value + * + * This API is used to configure the pcp-to-tid mapping for a pdev. + * + * Return: QDF_STATUS_SUCCESS if value set successfully + * QDF_STATUS_E_INVAL false if error + */ +static inline +QDF_STATUS cdp_set_pdev_pcp_tid_map(ol_txrx_soc_handle soc, + uint8_t pdev_id, + uint32_t pcp, uint32_t tid) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_pdev_pcp_tid_map) + return QDF_STATUS_E_INVAL; + + return soc->ops->cmn_drv_ops->set_pdev_pcp_tid_map(soc, pdev_id, + pcp, tid); +} + +/** + * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @peer_id: data path peer id + * @peer_mac: peer_mac + * + * Return: QDF_STATUS + */ +static inline +QDF_STATUS cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc, + uint32_t peer_id, uint8_t *peer_mac) +{ + if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id) + return soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id( + soc, peer_id, peer_mac); + + return QDF_STATUS_E_INVAL; +} + +/** + * cdp_vdev_tx_lock() - acquire lock + * @soc: opaque soc handle + * @vdev: data path vdev handle + * + * Return: void + */ +static inline +void cdp_vdev_tx_lock(ol_txrx_soc_handle soc, + uint8_t vdev_id) +{ + if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock) + soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(soc, vdev_id); +} + +/** + * cdp_vdev_tx_unlock() - release lock + * @soc: opaque soc handle + * @vdev_id: id of data path vdev handle + * + * Return: void + */ +static inline +void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc, + uint8_t vdev_id) +{ + if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock) + soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(soc, vdev_id); +} + +/** + * cdp_ath_getstats() - get updated athstats + * @soc: opaque soc handle + * @id: vdev_id/pdev_id based on type + * @stats: cdp network device stats structure + * @type: device type pdev/vdev + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ath_getstats(ol_txrx_soc_handle soc, + uint8_t id, struct cdp_dev_stats *stats, + uint8_t type) +{ + if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats) + return soc->ops->cmn_drv_ops->txrx_ath_getstats(soc, id, + stats, type); + + return QDF_STATUS_E_FAILURE; +} + +/** + * cdp_set_gid_flag() - set groupid flag + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @mem_status: member status from grp management frame + * @user_position: user position from grp management frame + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_set_gid_flag(ol_txrx_soc_handle soc, + uint8_t pdev_id, u_int8_t *mem_status, + u_int8_t *user_position) +{ + if (soc->ops->cmn_drv_ops->txrx_set_gid_flag) + return soc->ops->cmn_drv_ops->txrx_set_gid_flag(soc, pdev_id, + mem_status, + user_position); + return QDF_STATUS_E_FAILURE; +} + +/** + * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * + */ +static inline +uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version) + return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(soc, pdev_id); + return 0; +} + +/** + * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev + * @soc: opaque soc handle + * @vdev_id: id of vdev device + * @force: number of frame in SW queue + * Return: void + */ +static inline +void cdp_if_mgmt_drain(ol_txrx_soc_handle soc, + uint8_t vdev_id, int force) +{ + if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain) + soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(soc, vdev_id, force); +} + +/* cdp_peer_map_attach() - CDP API to allocate PEER map memory + * @soc: opaque soc handle + * @max_peers: number of peers created in FW + * @max_ast_index: max number of AST index supported in FW + * @peer_map_unmap_v2: flag indicates HTT peer map v2 is enabled in FW + * + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers, + uint32_t max_ast_index, bool peer_map_unmap_v2) +{ + if (soc && soc->ops && soc->ops->cmn_drv_ops && + soc->ops->cmn_drv_ops->txrx_peer_map_attach) + return soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, + max_peers, + max_ast_index, + peer_map_unmap_v2); + + return QDF_STATUS_SUCCESS; +} + +/* cdp_txrx_classify_and_update() - To classify the packet and update stats + * @soc: opaque soc handle + * @vdev: opaque dp vdev handle + * @skb: data + * @dir: rx or tx packet + * @nbuf_classify: packet classification object + * + * Return: 1 on success else return 0 + */ +static inline int +cdp_txrx_classify_and_update(ol_txrx_soc_handle soc, + uint8_t vdev_id, qdf_nbuf_t skb, + enum txrx_direction dir, + struct ol_txrx_nbuf_classify *nbuf_class) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_classify_update) + return 0; + + return soc->ops->cmn_drv_ops->txrx_classify_update(soc, vdev_id, + skb, + dir, nbuf_class); +} + +/** + * cdp_get_dp_capabilities() - get DP capabilities + * @soc: opaque soc handle + * @dp_cap: enum of DP capabilities + * + * Return: bool + */ +static inline bool +cdp_get_dp_capabilities(struct cdp_soc_t *soc, enum cdp_capabilities dp_caps) +{ + if (soc && soc->ops && soc->ops->cmn_drv_ops && + soc->ops->cmn_drv_ops->get_dp_capabilities) + return soc->ops->cmn_drv_ops->get_dp_capabilities(soc, dp_caps); + return false; +} + +#ifdef RECEIVE_OFFLOAD +/** + * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer + * @soc - data path soc handle + * @pdev - device instance pointer + * + * register rx offload flush callback function pointer + * + * return none + */ +static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc, + void (rx_ol_flush_cb)(void *)) +{ + if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb) + return soc->ops->rx_offld_ops->register_rx_offld_flush_cb( + rx_ol_flush_cb); +} + +/** + * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function + * @soc - data path soc handle + * + * deregister rx offload flush callback function pointer + * + * return none + */ +static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb) + return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb(); +} +#endif /* RECEIVE_OFFLOAD */ + +/** + * @cdp_set_ba_timeout() - set ba aging timeout per AC + * + * @soc - pointer to the soc + * @value - timeout value in millisec + * @ac - Access category + * + * @return - void + */ +static inline void cdp_set_ba_timeout(ol_txrx_soc_handle soc, + uint8_t ac, uint32_t value) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout) + return; + + soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout(soc, ac, value); +} + +/** + * @cdp_get_ba_timeout() - return ba aging timeout per AC + * + * @soc - pointer to the soc + * @ac - access category + * @value - timeout value in millisec + * + * @return - void + */ +static inline void cdp_get_ba_timeout(ol_txrx_soc_handle soc, + uint8_t ac, uint32_t *value) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout) + return; + + soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout(soc, ac, value); +} + +/** + * cdp_cfg_get() - get cfg for dp enum + * + * @soc: pointer to the soc + * @cfg: cfg enum + * + * Return - cfg value + */ +static inline uint32_t cdp_cfg_get(ol_txrx_soc_handle soc, enum cdp_dp_cfg cfg) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + return 0; + } + + if (!soc->ops->cmn_drv_ops || !soc->ops->cmn_drv_ops->txrx_get_cfg) + return 0; + + return soc->ops->cmn_drv_ops->txrx_get_cfg(soc, cfg); +} + +/** + * cdp_soc_set_rate_stats_ctx() - set rate stats context in soc + * @soc: opaque soc handle + * @ctx: rate stats context + * + * Return: void + */ +static inline void +cdp_soc_set_rate_stats_ctx(ol_txrx_soc_handle soc, + void *ctx) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_rate_stats_ctx) + return; + + soc->ops->cmn_drv_ops->set_rate_stats_ctx((struct cdp_soc_t *)soc, + ctx); +} + +/** + * cdp_soc_get_rate_stats_ctx() - get rate stats context in soc + * @soc: opaque soc handle + * + * Return: void + */ +static inline void* +cdp_soc_get_rate_stats_ctx(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->get_rate_stats_ctx) + return NULL; + + return soc->ops->cmn_drv_ops->get_rate_stats_ctx(soc); +} + +/** + * cdp_peer_flush_rate_stats() - flush peer rate statistics + * @soc: opaque soc handle + * @pdev_id: id of pdev handle + * @buf: stats buffer + */ +static inline void +cdp_peer_flush_rate_stats(ol_txrx_soc_handle soc, uint8_t pdev_id, + void *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_flush_rate_stats) + return; + + soc->ops->cmn_drv_ops->txrx_peer_flush_rate_stats(soc, pdev_id, buf); +} + +/** + * cdp_flush_rate_stats_request() - request flush rate statistics + * @soc: opaque soc handle + * @pdev_id: id of pdev handle + */ +static inline QDF_STATUS +cdp_flush_rate_stats_request(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_flush_rate_stats_request) + return QDF_STATUS_E_FAILURE; + + return soc->ops->cmn_drv_ops->txrx_flush_rate_stats_request(soc, + pdev_id); +} + +/** + * cdp_set_vdev_pcp_tid_map() - set vdev pcp-tid-map + * @soc: opaque soc handle + * @vdev: id of data path vdev handle + * @pcp: pcp value + * @tid: tid value + * + * This API is used to configure the pcp-to-tid mapping for a pdev. + * + * Return: QDF_STATUS_SUCCESS if value set successfully + * QDF_STATUS_E_INVAL false if error + */ +static inline +QDF_STATUS cdp_set_vdev_pcp_tid_map(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t pcp, uint8_t tid) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_vdev_pcp_tid_map) + return QDF_STATUS_E_INVAL; + + return soc->ops->cmn_drv_ops->set_vdev_pcp_tid_map(soc, vdev_id, + pcp, tid); +} + +/** + * cdp_tx_send_exc() - Transmit a frame on a given vdev in exception path + * + * @soc: opaque soc handle + * @vdev_id: vdev id + * @nbuf: skb + * @tx_exc_metadata: Handle that holds exception path meta data + * + * Return: NULL on success + * nbuf when it fails to send + */ +static inline qdf_nbuf_t +cdp_tx_send_exc(ol_txrx_soc_handle soc, + uint8_t vdev_id, + qdf_nbuf_t nbuf, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->tx_send_exc) + return 0; + + return soc->ops->cmn_drv_ops->tx_send_exc + (soc, vdev_id, nbuf, tx_exc_metadata); +} + +/** + * cdp_vdev_get_peer_mac_list(): function to get peer mac list of vdev + * @soc: Datapath soc handle + * @vdev_id: vdev id + * @newmac: Table of the clients mac + * @mac_cnt: No. of MACs required + * + * return: no of clients + */ +static inline uint16_t +cdp_vdev_get_peer_mac_list(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t newmac[][QDF_MAC_ADDR_SIZE], + uint16_t mac_cnt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->get_peer_mac_list) + return 0; + + return soc->ops->cmn_drv_ops->get_peer_mac_list + (soc, vdev_id, newmac, mac_cnt); +} + +/** + * cdp_rx_get_pending() - Get number of pending frames of RX threads + * @soc: opaque soc handle + * Return: number of pending frames + */ +static inline int +cdp_rx_get_pending(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ol_ops || + !soc->ol_ops->dp_rx_get_pending) + return 0; + + if (cdp_cfg_get(soc, cfg_dp_wow_check_rx_pending)) + return soc->ol_ops->dp_rx_get_pending(soc); + else + return 0; +} +#endif /* _CDP_TXRX_CMN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..f99fd6b5c37e7f6ac8bff6e56da0e7f813cb3298 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_cmn.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_REG_H_ +#define _CDP_TXRX_CMN_REG_H_ + +#include "hif_main.h" + +#define MOB_DRV_LEGACY_DP 0xdeed/*FIXME Add MCL device IDs */ +#define LITHIUM_DP 0xfffd/*FIXME Add Litium device ID */ +/* Use these device IDs for attach in future */ + +#if defined(DP_TXRX_SOC_ATTACH) +static inline ol_txrx_soc_handle +ol_txrx_soc_attach(void *scn_handle, struct ol_if_ops *dp_ol_if_ops) +{ + return NULL; +} +#else +ol_txrx_soc_handle +ol_txrx_soc_attach(void *scn_handle, struct ol_if_ops *dp_ol_if_ops); +#endif + +/** + * dp_soc_attach_wifi3() - Attach txrx SOC + * @ctrl_psoc: Opaque SOC handle from Ctrl plane + * @htc_handle: Opaque HTC handle + * @hif_handle: Opaque HIF handle + * @qdf_osdev: QDF device + * @ol_ops: Offload Operations + * @device_id: Device ID + * + * Return: DP SOC handle on success, NULL on failure + */ + +/** + * dp_soc_init_wifi3() - Initialize txrx SOC + * @soc: Opaque DP SOC handle + * @ctrl_psoc: Opaque SOC handle from control plane + * @hif_handle: Opaque HIF handle + * @htc_handle: Opaque HTC handle + * @qdf_osdev: QDF device + * @ol_ops: Offload Operations + * @device_id: Device ID + * + * Return: DP SOC handle on success, NULL on failure + */ +#if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) +struct cdp_soc_t * +dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id); +void *dp_soc_init_wifi3(struct cdp_soc_t *soc, + struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id); +#else +static inline struct cdp_soc_t * +dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, + uint16_t device_id) +{ + return NULL; +} + +static inline +void *dp_soc_init_wifi3(struct cdp_soc_t *soc, + struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id) +{ + return NULL; +} +#endif /* QCA_WIFI_QCA8074 */ + +static inline +ol_txrx_soc_handle cdp_soc_attach(u_int16_t devid, + struct hif_opaque_softc *hif_handle, + struct cdp_ctrl_objmgr_psoc *psoc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_dev, + struct ol_if_ops *dp_ol_if_ops) +{ + switch (devid) { + case LITHIUM_DP: /*FIXME Add lithium devide IDs */ + case QCA8074_DEVICE_ID: /* Hawekeye */ + case QCA8074V2_DEVICE_ID: /* Hawekeye V2*/ + case QCA6290_DEVICE_ID: + case QCN9000_DEVICE_ID: + case QCA6390_DEVICE_ID: + case QCA6490_DEVICE_ID: + case QCA6750_DEVICE_ID: + case QCA6390_EMULATION_DEVICE_ID: + case RUMIM2M_DEVICE_ID_NODE0: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE1: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE2: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE3: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE4: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE5: /*lithium emulation */ + return dp_soc_attach_wifi3(psoc, hif_handle, htc_handle, + qdf_dev, dp_ol_if_ops, devid); + break; + default: + return ol_txrx_soc_attach(psoc, dp_ol_if_ops); + } + return NULL; +} + +#endif /*_CDP_TXRX_CMN_REG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..33916952c6aa4aeba98821120034186ec1e99d6f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h @@ -0,0 +1,2372 @@ +/* + * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_cmn_struct.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_STRUCT_H_ +#define _CDP_TXRX_CMN_STRUCT_H_ + +/** + * For WIN legacy header compilation + * Temporary add dummy definitions + * should be removed properly WIN legacy code handle + */ + +#include "htc_api.h" +#include "qdf_types.h" +#include "qdf_nbuf.h" +#include "qdf_atomic.h" +#ifdef DP_MOB_DEFS +#include +#endif +#include +#include +#ifdef WLAN_RX_PKT_CAPTURE_ENH +#include "cdp_txrx_extd_struct.h" +#endif + +#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS +/* + * Each AP will occupy one ID, so it will occupy two IDs for AP-AP mode. + * Clients will be assigned max 32 IDs. + * STA(associated)/P2P DEV(self-PEER) will get one ID. + */ +#define OL_TXRX_NUM_LOCAL_PEER_IDS (32 + 1 + 1 + 1) +#endif + +#define CDP_BA_256_BIT_MAP_SIZE_DWORDS 8 +#define CDP_BA_64_BIT_MAP_SIZE_DWORDS 2 +#define CDP_RSSI_CHAIN_LEN 8 + +#define OL_TXRX_INVALID_PDEV_ID 0xff +#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff +#define CDP_INVALID_VDEV_ID 0xff +/* Options for Dump Statistics */ +#define CDP_HDD_STATS 0 +#define CDP_TXRX_PATH_STATS 1 +#define CDP_TXRX_HIST_STATS 2 +#define CDP_TXRX_TSO_STATS 3 +#define CDP_HDD_NETIF_OPER_HISTORY 4 +#define CDP_DUMP_TX_FLOW_POOL_INFO 5 +#define CDP_TXRX_DESC_STATS 6 +#define CDP_HIF_STATS 7 +#define CDP_LRO_STATS 8 +#define CDP_NAPI_STATS 9 +#define CDP_WLAN_RX_BUF_DEBUG_STATS 10 +#define CDP_RX_RING_STATS 11 +#define CDP_DP_NAPI_STATS 12 +#define CDP_DP_RX_THREAD_STATS 13 +#define CDP_SCHEDULER_STATS 21 +#define CDP_TX_QUEUE_STATS 22 +#define CDP_BUNDLE_STATS 23 +#define CDP_CREDIT_STATS 24 +#define CDP_DISCONNECT_STATS 25 +#define CDP_DP_RX_FISA_STATS 26 + +#define WME_AC_TO_TID(_ac) ( \ + ((_ac) == WME_AC_VO) ? 6 : \ + ((_ac) == WME_AC_VI) ? 5 : \ + ((_ac) == WME_AC_BK) ? 1 : \ + 0) + +#define TID_TO_WME_AC(_tid) ( \ + (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) + +#define CDP_MU_MAX_USERS 37 +#define CDP_MU_MAX_USER_INDEX (CDP_MU_MAX_USERS - 1) +#define CDP_INVALID_PEER 0xffff +#define CDP_INVALID_TID 31 +#define CDP_INVALID_TX_ENCAP_TYPE 6 +#define CDP_INVALID_SEC_TYPE 12 + +#define CDP_DATA_TID_MAX 8 +#define CDP_DATA_NON_QOS_TID 16 + +#define CDP_NUM_SA_BW 4 +#define CDP_PERCENT_MACRO 100 +#define CDP_NUM_KB_IN_MB 1000 +/* + * advance rx monitor filter + * */ +#define MON_FILTER_PASS 0x0001 +#define MON_FILTER_OTHER 0x0002 +#define MON_FILTER_ALL 0x0003 + +#define FILTER_MGMT_ALL 0xFFFF +#define FILTER_MGMT_ASSOC_REQ 0x0001 +#define FILTER_MGMT_ASSOC_RES 0x0002 +#define FILTER_MGMT_REASSOC_REQ 0x0004 +#define FILTER_MGMT_REASSOC_RES 0x0008 +#define FILTER_MGMT_PROBE_REQ 0x0010 +#define FILTER_MGMT_PROBE_RES 0x0020 +#define FILTER_MGMT_TIM_ADVT 0x0040 +#define FILTER_MGMT_RESERVED_7 0x0080 +#define FILTER_MGMT_BEACON 0x0100 +#define FILTER_MGMT_ATIM 0x0200 +#define FILTER_MGMT_DISASSOC 0x0400 +#define FILTER_MGMT_AUTH 0x0800 +#define FILTER_MGMT_DEAUTH 0x1000 +#define FILTER_MGMT_ACTION 0x2000 +#define FILTER_MGMT_ACT_NO_ACK 0x4000 +#define FILTER_MGMT_RESERVED_15 0x8000 + +#define FILTER_CTRL_ALL 0xFFFF +#define FILTER_CTRL_RESERVED_1 0x0001 +#define FILTER_CTRL_RESERVED_2 0x0002 +#define FILTER_CTRL_TRIGGER 0x0004 +#define FILTER_CTRL_RESERVED_4 0x0008 +#define FILTER_CTRL_BF_REP_POLL 0x0010 +#define FILTER_CTRL_VHT_NDP 0x0020 +#define FILTER_CTRL_FRAME_EXT 0x0040 +#define FILTER_CTRL_CTRLWRAP 0x0080 +#define FILTER_CTRL_BA_REQ 0x0100 +#define FILTER_CTRL_BA 0x0200 +#define FILTER_CTRL_PSPOLL 0x0400 +#define FILTER_CTRL_RTS 0x0800 +#define FILTER_CTRL_CTS 0x1000 +#define FILTER_CTRL_ACK 0x2000 +#define FILTER_CTRL_CFEND 0x4000 +#define FILTER_CTRL_CFEND_CFACK 0x8000 + +#define FILTER_DATA_ALL 0xFFFF +#define FILTER_DATA_MCAST 0x4000 +#define FILTER_DATA_UCAST 0x8000 +#define FILTER_DATA_DATA 0x0001 +#define FILTER_DATA_NULL 0x0008 + +/* + * Multiply rate by 2 to avoid float point + * and get rate in units of 500kbps + */ +#define CDP_11B_RATE_0MCS (11 * 2) +#define CDP_11B_RATE_1MCS (5.5 * 2) +#define CDP_11B_RATE_2MCS (2 * 2) +#define CDP_11B_RATE_3MCS (1 * 2) +#define CDP_11B_RATE_4MCS (11 * 2) +#define CDP_11B_RATE_5MCS (5.5 * 2) +#define CDP_11B_RATE_6MCS (2 * 2) + +#define CDP_11A_RATE_0MCS (48 * 2) +#define CDP_11A_RATE_1MCS (24 * 2) +#define CDP_11A_RATE_2MCS (12 * 2) +#define CDP_11A_RATE_3MCS (6 * 2) +#define CDP_11A_RATE_4MCS (54 * 2) +#define CDP_11A_RATE_5MCS (36 * 2) +#define CDP_11A_RATE_6MCS (18 * 2) +#define CDP_11A_RATE_7MCS (9 * 2) + +#define CDP_LEGACY_MCS0 0 +#define CDP_LEGACY_MCS1 1 +#define CDP_LEGACY_MCS2 2 +#define CDP_LEGACY_MCS3 3 +#define CDP_LEGACY_MCS4 4 +#define CDP_LEGACY_MCS5 5 +#define CDP_LEGACY_MCS6 6 +#define CDP_LEGACY_MCS7 7 + +QDF_DECLARE_EWMA(tx_lag, 1024, 8) +struct cdp_stats_cookie; + +/* + * DP configuration parameters + */ +enum cdp_cfg_param_type { + CDP_CFG_MAX_PEER_ID, + CDP_CFG_CCE_DISABLE, + CDP_CFG_NUM_PARAMS +}; + +/* + * PPDU TYPE from FW - + * @CDP_PPDU_STATS_PPDU_TYPE_SU: single user type + * @CDP_PPDU_STATS_PPDU_TYPE_MU_MIMO: multi user mu-mimo + * @CDP_PPDU_STATS_PPDU_TYPE_MU_OFDMA: multi user ofdma + * @CDP_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA: multi user mu-mimo ofdma + * @CDP_PPDU_STATS_PPDU_TYPE_UL_TRIG: ul trigger ppdu + * @CDP_PPDU_STATS_PPDU_TYPE_BURST_BCN: burst beacon + * @CDP_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP: bsr respond + * @CDP_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG: bsr trigger + * @CDP_PPDU_STATS_PPDU_TYPE_UL_RESP: ul response + * @CDP_PPDU_STATS_PPDU_TYPE_UNKNOWN + */ +enum CDP_PPDU_STATS_PPDU_TYPE { + CDP_PPDU_STATS_PPDU_TYPE_SU = 0, + CDP_PPDU_STATS_PPDU_TYPE_MU_MIMO = 1, + CDP_PPDU_STATS_PPDU_TYPE_MU_OFDMA = 2, + CDP_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA = 4, + CDP_PPDU_STATS_PPDU_TYPE_UL_TRIG = 5, + CDP_PPDU_STATS_PPDU_TYPE_BURST_BCN = 6, + CDP_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP = 7, + CDP_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG = 8, + CDP_PPDU_STATS_PPDU_TYPE_UL_RESP = 9, + CDP_PPDU_STATS_PPDU_TYPE_UNKNOWN = 0x1F, +}; + +/* + * htt_dbg_stats_type - + * bit positions for each stats type within a stats type bitmask + * The bitmask contains 24 bits. + */ +enum htt_cmn_dbg_stats_type { + HTT_DBG_CMN_STATS_WAL_PDEV_TXRX = 0, /* bit 0 -> 0x1 */ + HTT_DBG_CMN_STATS_RX_REORDER = 1, /* bit 1 -> 0x2 */ + HTT_DBG_CMN_STATS_RX_RATE_INFO = 2, /* bit 2 -> 0x4 */ + HTT_DBG_CMN_STATS_TX_PPDU_LOG = 3, /* bit 3 -> 0x8 */ + HTT_DBG_CMN_STATS_TX_RATE_INFO = 4, /* bit 4 -> 0x10 */ + HTT_DBG_CMN_STATS_TIDQ = 5, /* bit 5 -> 0x20 */ + HTT_DBG_CMN_STATS_TXBF_INFO = 6, /* bit 6 -> 0x40 */ + HTT_DBG_CMN_STATS_SND_INFO = 7, /* bit 7 -> 0x80 */ + HTT_DBG_CMN_STATS_ERROR_INFO = 8, /* bit 8 -> 0x100 */ + HTT_DBG_CMN_STATS_TX_SELFGEN_INFO = 9, /* bit 9 -> 0x200 */ + HTT_DBG_CMN_STATS_TX_MU_INFO = 10, /* bit 10 -> 0x400 */ + HTT_DBG_CMN_STATS_SIFS_RESP_INFO = 11, /* bit 11 -> 0x800 */ + HTT_DBG_CMN_STATS_RESET_INFO = 12, /* bit 12 -> 0x1000 */ + HTT_DBG_CMN_STATS_MAC_WDOG_INFO = 13, /* bit 13 -> 0x2000 */ + HTT_DBG_CMN_STATS_TX_DESC_INFO = 14, /* bit 14 -> 0x4000 */ + HTT_DBG_CMN_STATS_TX_FETCH_MGR_INFO = 15, /* bit 15 -> 0x8000 */ + HTT_DBG_CMN_STATS_TX_PFSCHED_INFO = 16, /* bit 16 -> 0x10000 */ + HTT_DBG_CMN_STATS_TX_PATH_STATS_INFO = 17, /* bit 17 -> 0x20000 */ + /* bits 18-23 currently reserved */ + + /* keep this last */ + HTT_DBG_CMN_NUM_STATS, + HTT_DBG_CMN_NUM_STATS_INVALID = 31, /* bit 31 -> 0x80000000 */ +}; + +/* + * cdp_host_txrx_stats: Different types of host stats + * @TXRX_HOST_STATS_INVALID: Invalid option + * @TXRX_RX_RATE_STATS: Rx rate info + * @TXRX_TX_RATE_STATS: Tx rate info + * @TXRX_TX_HOST_STATS: Print Tx stats + * @TXRX_RX_HOST_STATS: Print host Rx stats + * @TXRX_CLEAR_STATS: clear all host stats + * @TXRX_SRNG_PTR_STATS: Print SRNG pointer stats + * @TXRX_RX_MON_STATS: Print monitor mode stats + * @TXRX_REO_QUEUE_STATS: Print Per peer REO Queue Stats + * @TXRX_SOC_CFG_PARAMS: Print soc cfg params info + * @TXRX_PDEV_CFG_PARAMS: Print pdev cfg params info + * @TXRX_NAPI_STATS: Print NAPI scheduling statistics + * @TXRX_SOC_INTERRUPT_STATS: Print soc interrupt stats + * @TXRX_HAL_REG_WRITE_STATS: Hal Reg Write stats + */ +enum cdp_host_txrx_stats { + TXRX_HOST_STATS_INVALID = -1, + TXRX_CLEAR_STATS = 0, + TXRX_RX_RATE_STATS = 1, + TXRX_TX_RATE_STATS = 2, + TXRX_TX_HOST_STATS = 3, + TXRX_RX_HOST_STATS = 4, + TXRX_AST_STATS = 5, + TXRX_SRNG_PTR_STATS = 6, + TXRX_RX_MON_STATS = 7, + TXRX_REO_QUEUE_STATS = 8, + TXRX_SOC_CFG_PARAMS = 9, + TXRX_PDEV_CFG_PARAMS = 10, + TXRX_NAPI_STATS = 11, + TXRX_SOC_INTERRUPT_STATS = 12, + TXRX_SOC_FSE_STATS = 13, + TXRX_HAL_REG_WRITE_STATS = 14, + TXRX_SOC_REO_HW_DESC_DUMP = 15, + TXRX_HOST_STATS_MAX, +}; + +/* + * cdp_ppdu_ftype: PPDU Frame Type + * @CDP_PPDU_FTYPE_DATA: SU or MU Data Frame + * @CDP_PPDU_FTYPE_CTRL: Control/Management Frames + * @CDP_PPDU_FTYPE_BAR: SU or MU BAR frames +*/ +enum cdp_ppdu_ftype { + CDP_PPDU_FTYPE_CTRL, + CDP_PPDU_FTYPE_DATA, + CDP_PPDU_FTYPE_BAR, + CDP_PPDU_FTYPE_MAX +}; + + +/** + * @brief General specification of the tx frame contents + * + * @details + * for efficiency, the HTT packet type values correspond + * to the bit positions of the WAL packet type values, so the + * translation is a simple shift operation. + */ +enum htt_cmn_pkt_type { + htt_cmn_pkt_type_raw = 0, + htt_cmn_pkt_type_native_wifi = 1, + htt_cmn_pkt_type_ethernet = 2, + htt_cmn_pkt_type_mgmt = 3, + htt_cmn_pkt_type_eth2 = 4, + + /* keep this last */ + htt_cmn_pkt_num_types +}; + +/** + * @General description of HTT received packets status + * It is similar to htt_rx_status enum + * but is added as a cdp enum can be freely used in OL_IF layer + */ +enum htt_cmn_rx_status { + htt_cmn_rx_status_unknown = 0x0, + htt_cmn_rx_status_ok, + htt_cmn_rx_status_err_fcs, + htt_cmn_rx_status_err_dup, + htt_cmn_rx_status_err_replay, + htt_cmn_rx_status_inv_peer, + htt_cmn_rx_status_ctrl_mgmt_null = 0x08, + htt_cmn_rx_status_tkip_mic_err = 0x09, + htt_cmn_rx_status_decrypt_err = 0x0A, + htt_cmn_rx_status_mpdu_length_err = 0x0B, + htt_cmn_rx_status_err_misc = 0xFF +}; + + +enum cdp_host_reo_dest_ring { + cdp_host_reo_dest_ring_unknown = 0, + cdp_host_reo_dest_ring_1 = 1, + cdp_host_reo_dest_ring_2 = 2, + cdp_host_reo_dest_ring_3 = 3, + cdp_host_reo_dest_ring_4 = 4, +}; + +enum htt_cmn_t2h_en_stats_type { + /* keep this alwyas first */ + HTT_CMN_T2H_EN_STATS_TYPE_START = 0, + + /** ppdu_common_stats is the payload */ + HTT_CMN_T2H_EN_STATS_TYPE_COMMON = 1, + /** ppdu_sant_stats is the payload */ + HTT_CMN_T2H_EN_STATS_TYPE_SANT = 2, + /** ppdu_common_stats_v2 is the payload */ + HTT_CMN_T2H_EN_STATS_TYPE_COMMON_V2 = 3, + + /* Keep this last */ + HTT_CMN_T2H_EN_STATS_TYPE_END = 0x1f, +}; + +enum htt_cmn_t2h_en_stats_status { + /* Keep this first always */ + HTT_CMN_T2H_EN_STATS_STATUS_PARTIAL = 0, + HTT_CMN_T2H_EN_STATS_STATUS_PRESENT = 1, + HTT_CMN_T2H_EN_STATS_STATUS_ERROR = 2, + HTT_CMN_T2H_EN_STATS_STATUS_INVALID = 3, + + + /* keep this always last */ + HTT_CMN_T2H_EN_STATS_STATUS_SERIES_DONE = 7, +}; + +/** + * struct ol_txrx_peer_state - Peer state information + */ +enum ol_txrx_peer_state { + OL_TXRX_PEER_STATE_INVALID, + OL_TXRX_PEER_STATE_DISC, /* initial state */ + OL_TXRX_PEER_STATE_CONN, /* authentication in progress */ + OL_TXRX_PEER_STATE_AUTH, /* authentication successful */ +}; + +/** + * struct ol_txrx_ast_type - AST entry type information + */ +enum cdp_txrx_ast_entry_type { + CDP_TXRX_AST_TYPE_NONE, /* static ast entry for connected peer */ + CDP_TXRX_AST_TYPE_STATIC, /* static ast entry for connected peer */ + CDP_TXRX_AST_TYPE_SELF, /* static ast entry for self peer (STA mode) */ + CDP_TXRX_AST_TYPE_WDS, /* WDS peer ast entry type*/ + CDP_TXRX_AST_TYPE_MEC, /* Multicast echo ast entry type */ + CDP_TXRX_AST_TYPE_WDS_HM, /* HM WDS entry */ + CDP_TXRX_AST_TYPE_STA_BSS, /* BSS entry(STA mode) */ + CDP_TXRX_AST_TYPE_DA, /* AST entry based on Destination address */ + CDP_TXRX_AST_TYPE_WDS_HM_SEC, /* HM WDS entry for secondary radio */ + CDP_TXRX_AST_TYPE_MAX +}; + +/* + * cdp_ast_free_status: status passed to callback function before freeing ast + * @CDP_TXRX_AST_DELETED - AST is deleted from FW and delete response received + * @CDP_TXRX_AST_DELETE_IN_PROGRESS - AST delete command sent to FW and host + * is waiting for FW response + */ +enum cdp_ast_free_status { + CDP_TXRX_AST_DELETED, + CDP_TXRX_AST_DELETE_IN_PROGRESS, +}; + +/** + * txrx_ast_free_cb - callback registered for ast free + * @ctrl_soc: control path soc context + * @cdp_soc: DP soc context + * @cookie: cookie + * @cdp_ast_free_status: ast free status + */ +typedef void (*txrx_ast_free_cb)(struct cdp_ctrl_objmgr_psoc *ctrl_soc, + struct cdp_soc *cdp_soc, + void *cookie, + enum cdp_ast_free_status); + +/** + * struct cdp_ast_entry_info - AST entry information + * @peer_mac_addr: mac address of peer on which AST entry is added + * @type: ast entry type + * @vdev_id: vdev_id + * @pdev_id: pdev_id + * @peer_id: peer_id + * + * This structure holds the ast entry information + * + */ +struct cdp_ast_entry_info { + uint8_t peer_mac_addr[QDF_MAC_ADDR_SIZE]; + enum cdp_txrx_ast_entry_type type; + uint8_t vdev_id; + uint8_t pdev_id; + uint16_t peer_id; +}; + +#define MIC_SEQ_CTR_SIZE 6 + +enum cdp_rx_frame_type { + cdp_rx_frame_type_802_11, + cdp_rx_frame_type_802_3, +}; + +/** + * struct cdp_rx_mic_err_info - rx mic error information + * @frame_type: frame type - 0 - 802.11 frame + * - 1 - 802.3 frame + * @data: 802.11 frame + * @ta_mac_addr: transmitter mac address + * @da_mac_addr: destination mac address + * @tsc: sequence number + * @key_id: Key ID + * @multicast: flag for multicast + * @vdev_id: vdev ID + * + * This structure holds rx mic error information + * + */ +struct cdp_rx_mic_err_info { + uint8_t frame_type; + uint8_t *data; + struct qdf_mac_addr ta_mac_addr; + struct qdf_mac_addr da_mac_addr; + uint8_t tsc[MIC_SEQ_CTR_SIZE]; + uint8_t key_id; + bool multicast; + uint16_t vdev_id; +}; + +/** + * struct cdp_sec_type - security type information + */ +enum cdp_sec_type { + cdp_sec_type_none, + cdp_sec_type_wep128, + cdp_sec_type_wep104, + cdp_sec_type_wep40, + cdp_sec_type_tkip, + cdp_sec_type_tkip_nomic, + cdp_sec_type_aes_ccmp, + cdp_sec_type_wapi, + cdp_sec_type_aes_ccmp_256, + cdp_sec_type_aes_gcmp, + cdp_sec_type_aes_gcmp_256, + + /* keep this last! */ + cdp_num_sec_types +}; + +/** + * struct cdp_tx_exception_metadata - Exception path parameters + * @peer_id: Peer id of the peer + * @tid: Transmit Identifier + * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet) + * @sec_type: sec_type to be passed to HAL + * @is_tx_sniffer: Indicates if the packet has to be sniffed + * @ppdu_cookie: 16-bit ppdu cookie that has to be replayed back in completions + * + * This structure holds the parameters needed in the exception path of tx + * + */ +struct cdp_tx_exception_metadata { + uint16_t peer_id; + uint8_t tid; + uint16_t tx_encap_type; + enum cdp_sec_type sec_type; + uint8_t is_tx_sniffer; + uint16_t ppdu_cookie; +}; + +typedef struct cdp_soc_t *ol_txrx_soc_handle; + +/** + * ol_txrx_vdev_delete_cb - callback registered during vdev + * detach + */ +typedef void (*ol_txrx_vdev_delete_cb)(void *context); + +/** + * ol_txrx_peer_unmap_sync_cb - callback registered during peer detach sync + */ +typedef QDF_STATUS(*ol_txrx_peer_unmap_sync_cb)(uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list); + +/** + * ol_txrx_pkt_direction - Packet Direction + * @rx_direction: rx path packet + * @tx_direction: tx path packet + */ +enum txrx_direction { + rx_direction = 1, + tx_direction = 0, +}; + +/** + * cdp_capabilities- DP capabilities + */ +enum cdp_capabilities { + CDP_CFG_DP_TSO, + CDP_CFG_DP_LRO, + CDP_CFG_DP_SG, + CDP_CFG_DP_GRO, + CDP_CFG_DP_OL_TX_CSUM, + CDP_CFG_DP_OL_RX_CSUM, + CDP_CFG_DP_RAWMODE, + CDP_CFG_DP_PEER_FLOW_CTRL, +}; + +/** + * ol_txrx_nbuf_classify - Packet classification object + * @peer_id: unique peer identifier from fw + * @tid: traffic identifier(could be overridden) + * @pkt_tid: traffic identifier(cannot be overridden) + * @pkt_tos: ip header tos value + * @pkt_dscp: ip header dscp value + * @tos: index value in map + * @dscp: DSCP_TID map index + * @is_mcast: multicast pkt check + * @is_eap: eapol pkt check + * @is_arp: arp pkt check + * @is_tcp: tcp pkt check + * @is_dhcp: dhcp pkt check + * @is_igmp: igmp pkt check + * @is_ipv4: ip version 4 pkt check + * @is_ipv6: ip version 6 pkt check + */ +struct ol_txrx_nbuf_classify { + uint16_t peer_id; + uint8_t tid; + uint8_t pkt_tid; + uint8_t pkt_tos; + uint8_t pkt_dscp; + uint8_t tos; + uint8_t dscp; + uint8_t is_mcast; + uint8_t is_eap; + uint8_t is_arp; + uint8_t is_tcp; + uint8_t is_dhcp; + uint8_t is_igmp; + uint8_t is_ipv4; + uint8_t is_ipv6; +}; + +/** + * ol_osif_vdev_handle - paque handle for OS shim virtual device + * object + */ +struct ol_osif_vdev_t; +typedef struct ol_osif_vdev_t *ol_osif_vdev_handle; + +/** + * wlan_op_mode - Virtual device operation mode + * @wlan_op_mode_unknown: Unknown mode + * @wlan_op_mode_ap: AP mode + * @wlan_op_mode_ibss: IBSS mode + * @wlan_op_mode_sta: STA (client) mode + * @wlan_op_mode_monitor: Monitor mode + * @wlan_op_mode_ocb: OCB mode + */ +enum wlan_op_mode { + wlan_op_mode_unknown, + wlan_op_mode_ap, + wlan_op_mode_ibss, + wlan_op_mode_sta, + wlan_op_mode_monitor, + wlan_op_mode_ocb, + wlan_op_mode_ndi, +}; + +/** + * enum wlan_op_subtype - Virtual device subtype + * @wlan_op_subtype_none: Subtype not applicable + * @wlan_op_subtype_p2p_device: P2P device + * @wlan_op_subtye_p2p_cli: P2P Client + * @wlan_op_subtype_p2p_go: P2P GO + * + * This enum lists the subtypes of a particular virtual + * device. + */ +enum wlan_op_subtype { + wlan_op_subtype_none, + wlan_op_subtype_p2p_device, + wlan_op_subtype_p2p_cli, + wlan_op_subtype_p2p_go, +}; + +/** + * connectivity_stats_pkt_status - data pkt type + * @PKT_TYPE_REQ: Request packet + * @PKT_TYPE_RSP: Response packet + * @PKT_TYPE_TX_DROPPED: TX packet dropped + * @PKT_TYPE_RX_DROPPED: RX packet dropped + * @PKT_TYPE_RX_DELIVERED: RX packet delivered + * @PKT_TYPE_RX_REFUSED: RX packet refused + * @PKT_TYPE_TX_HOST_FW_SENT: TX packet FW sent + * @PKT_TYPE_TX_ACK_CNT:TC packet acked + * @PKT_TYPE_NONE: Invalid packet + */ +enum connectivity_stats_pkt_status { + PKT_TYPE_REQ, + PKT_TYPE_RSP, + PKT_TYPE_TX_DROPPED, + PKT_TYPE_RX_DROPPED, + PKT_TYPE_RX_DELIVERED, + PKT_TYPE_RX_REFUSED, + PKT_TYPE_TX_HOST_FW_SENT, + PKT_TYPE_TX_ACK_CNT, + PKT_TYPE_NONE, +}; + +/** + * ol_txrx_mgmt_tx_cb - tx management delivery notification + * callback function + */ +typedef void +(*ol_txrx_mgmt_tx_cb)(void *ctxt, qdf_nbuf_t tx_mgmt_frm, int had_error); + +/** + * ol_txrx_data_tx_cb - Function registered with the data path + * that is called when tx frames marked as "no free" are + * done being transmitted + */ +typedef void +(*ol_txrx_data_tx_cb)(void *ctxt, qdf_nbuf_t tx_frm, int had_error); + +/** + * ol_txrx_tx_fp - top-level transmit function + * @soc - dp soc handle + * @vdev_id - handle to the virtual device object + * @msdu_list - list of network buffers + */ +typedef qdf_nbuf_t (*ol_txrx_tx_fp)(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t msdu_list); + +/** + * ol_txrx_tx_exc_fp - top-level transmit function on exception path + * @soc - dp soc handle + * @vdev_id - handle to the virtual device object + * @msdu_list - list of network buffers + * @tx_exc_metadata - structure that holds parameters to exception path + */ +typedef qdf_nbuf_t (*ol_txrx_tx_exc_fp)(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t msdu_list, + struct cdp_tx_exception_metadata + *tx_exc_metadata); + +/** + * ol_txrx_completion_fp - top-level transmit function + * for tx completion + * @skb: skb data + * @osif_dev: the virtual device's OS shim object + */ +typedef void (*ol_txrx_completion_fp)(qdf_nbuf_t skb, + void *osif_dev); +/** + * ol_txrx_tx_flow_control_fp - tx flow control notification + * function from txrx to OS shim + * @osif_dev - the virtual device's OS shim object + * @tx_resume - tx os q should be resumed or not + */ +typedef void (*ol_txrx_tx_flow_control_fp)(void *osif_dev, + bool tx_resume); + +/** + * ol_txrx_tx_flow_control_is_pause_fp - is tx paused by flow control + * function from txrx to OS shim + * @osif_dev - the virtual device's OS shim object + * + * Return: true if tx is paused by flow control + */ +typedef bool (*ol_txrx_tx_flow_control_is_pause_fp)(void *osif_dev); + +/** + * ol_txrx_rx_fp - receive function to hand batches of data + * frames from txrx to OS shim + * @data_vdev - handle to the OSIF virtual device object + * @msdu_list - list of network buffers + */ +typedef QDF_STATUS(*ol_txrx_rx_fp)(void *osif_dev, qdf_nbuf_t msdu_list); + +typedef QDF_STATUS(*ol_txrx_fisa_rx_fp)(void *soc, + void *dp_vdev, + qdf_nbuf_t msdu_list); + +typedef QDF_STATUS(*ol_txrx_fisa_flush_fp)(void *soc, int ring_num); +/** + * ol_txrx_rx_flush_fp - receive function to hand batches of data + * frames from txrx to OS shim + * @osif_dev: handle to the OSIF virtual device object + * @vdev_id: vdev_if of the packets to be flushed + */ +typedef QDF_STATUS(*ol_txrx_rx_flush_fp)(void *osif_dev, uint8_t vdev_id); + +/** + * ol_txrx_rx_gro_flush_ind - function to send GRO flush indication to stack + * for a given RX Context Id. + * @osif_dev - handle to the OSIF virtual device object + * @rx_ctx_id - Rx context Id for which gro flush should happen + */ +typedef QDF_STATUS(*ol_txrx_rx_gro_flush_ind_fp)(void *osif_dev, + int rx_ctx_id); + +/** + * ol_txrx_stats_rx_fp - receive function to hand batches of data + * frames from txrx to OS shim + * @skb: skb data + * @osif_dev: the virtual device's OS shim object + * @action: data packet type + * @pkt_type: packet data type + */ +typedef void (*ol_txrx_stats_rx_fp)(struct sk_buff *skb, + void *osif_dev, enum connectivity_stats_pkt_status action, + uint8_t *pkt_type); + +/** + * ol_txrx_get_key_fp - function to gey key based on keyix and peer + * mac address + * @osif_dev - the virtual device's OS shim object + * @key_buf - pointer to store key + * @mac_addr - pointer to mac address + * @keyix - key id + */ +typedef QDF_STATUS(*ol_txrx_get_key_fp)(void *osif_dev, uint8_t *key_buf, uint8_t *mac_addr, uint8_t keyix); + +/** + * ol_txrx_rsim_rx_decap_fp - raw mode simulation function to decap the + * packets in receive path. + * @osif_dev - the virtual device's OS shim object + * @list_head - poniter to head of receive packet queue to decap + * @list_tail - poniter to tail of receive packet queue to decap + * @peer_mac - mac address of peer handler + */ +typedef QDF_STATUS(*ol_txrx_rsim_rx_decap_fp)(void *osif_dev, + qdf_nbuf_t *list_head, + qdf_nbuf_t *list_tail, + uint8_t *peer_mac); + +/* ol_txrx_rx_fp - external tx free function to read per packet stats and + * free tx buffer externally + * @netbuf - tx network buffer + */ +typedef void (*ol_txrx_tx_free_ext_fp)(qdf_nbuf_t netbuf); + +/** + * ol_txrx_rx_check_wai_fp - OSIF WAPI receive function +*/ +typedef bool (*ol_txrx_rx_check_wai_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t mpdu_head, + qdf_nbuf_t mpdu_tail); +/** + * ol_txrx_rx_mon_fp - OSIF monitor mode receive function for single + * MPDU (802.11 format) + */ +typedef void (*ol_txrx_rx_mon_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t mpdu, + void *rx_status); + +/** + * ol_txrx_proxy_arp_fp - proxy arp function pointer +*/ +typedef int (*ol_txrx_proxy_arp_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t netbuf); + +/* + * ol_txrx_mcast_me_fp - function pointer for multicast enhancement + */ +typedef int (*ol_txrx_mcast_me_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t netbuf); + +/** + * ol_txrx_stats_callback - statistics notify callback + */ +typedef void (*ol_txrx_stats_callback)(void *ctxt, + enum htt_cmn_dbg_stats_type type, + uint8_t *buf, int bytes); + +/** + * ol_txrx_pktdump_cb - callback for packet dump feature + */ +typedef void (*ol_txrx_pktdump_cb)(ol_txrx_soc_handle soc, + uint8_t pdev_id, + uint8_t vdev_id, + qdf_nbuf_t netbuf, + uint8_t status, + uint8_t type); + +/** + * ol_txrx_ops - (pointers to) the functions used for tx and rx + * data xfer + * + * There are two portions of these txrx operations. + * The rx portion is filled in by OSIF SW before calling + * ol_txrx_osif_vdev_register; inside the ol_txrx_osif_vdev_register + * the txrx SW stores a copy of these rx function pointers, to use + * as it delivers rx data frames to the OSIF SW. + * The tx portion is filled in by the txrx SW inside + * ol_txrx_osif_vdev_register; when the function call returns, + * the OSIF SW stores a copy of these tx functions to use as it + * delivers tx data frames to the txrx SW. + * + * @tx.std - the tx function pointer for standard data + * frames This function pointer is set by the txrx SW + * perform host-side transmit operations based on + * whether a HL or LL host/target interface is in use. + * @tx.flow_control_cb - the transmit flow control + * function that is registered by the + * OSIF which is called from txrx to + * indicate whether the transmit OS + * queues should be paused/resumed + * @rx.rx - the OS shim rx function to deliver rx data + * frames to. This can have different values for + * different virtual devices, e.g. so one virtual + * device's OS shim directly hands rx frames to the OS, + * but another virtual device's OS shim filters out P2P + * messages before sending the rx frames to the OS. The + * netbufs delivered to the osif_rx function are in the + * format specified by the OS to use for tx and rx + * frames (either 802.3 or native WiFi). In case RX Threads are enabled, pkts + * are given to the thread, instead of the stack via this pointer. + * @rx.stack - function to give packets to the stack. Differs from @rx.rx. + * In case RX Threads are enabled, this pointer holds the callback to give + * packets to the stack. + * @rx.rx_gro_flush - GRO flush indication to stack for a given RX CTX ID + * @rx.wai_check - the tx function pointer for WAPI frames + * @rx.mon - the OS shim rx monitor function to deliver + * monitor data to Though in practice, it is probable + * that the same function will be used for delivering + * rx monitor data for all virtual devices, in theory + * each different virtual device can have a different + * OS shim function for accepting rx monitor data. The + * netbufs delivered to the osif_rx_mon function are in + * 802.11 format. Each netbuf holds a 802.11 MPDU, not + * an 802.11 MSDU. Depending on compile-time + * configuration, each netbuf may also have a + * monitor-mode encapsulation header such as a radiotap + * header added before the MPDU contents. + * @rx.std - the OS shim rx function to deliver rx data + * @proxy_arp - proxy arp function pointer - specified by + * OS shim, stored by txrx + * @get_key - function pointer to get key of the peer with + * specific key index + */ +struct ol_txrx_ops { + /* tx function pointers - specified by txrx, stored by OS shim */ + struct { + ol_txrx_tx_fp tx; + ol_txrx_tx_exc_fp tx_exception; + ol_txrx_tx_free_ext_fp tx_free_ext; + ol_txrx_completion_fp tx_comp; + } tx; + + /* rx function pointers - specified by OS shim, stored by txrx */ + struct { + ol_txrx_rx_fp rx; + ol_txrx_rx_fp rx_stack; + ol_txrx_rx_flush_fp rx_flush; + ol_txrx_rx_gro_flush_ind_fp rx_gro_flush; + ol_txrx_rx_check_wai_fp wai_check; + ol_txrx_rx_mon_fp mon; + ol_txrx_stats_rx_fp stats_rx; + ol_txrx_rsim_rx_decap_fp rsim_rx_decap; + ol_txrx_fisa_rx_fp osif_fisa_rx; + ol_txrx_fisa_flush_fp osif_fisa_flush; + } rx; + /* proxy arp function pointer - specified by OS shim, stored by txrx */ + ol_txrx_proxy_arp_fp proxy_arp; + ol_txrx_mcast_me_fp me_convert; + + ol_txrx_get_key_fp get_key; +}; + +/** + * ol_txrx_stats_req - specifications of the requested + * statistics + */ +struct ol_txrx_stats_req { + uint32_t stats_type_upload_mask; /* which stats to upload */ + uint32_t stats_type_reset_mask; /* which stats to reset */ + + /* stats will be printed if either print element is set */ + struct { + int verbose; /* verbose stats printout */ + int concise; /* concise stats printout (takes precedence) */ + } print; /* print uploaded stats */ + + /* stats notify callback will be invoked if fp is non-NULL */ + struct { + ol_txrx_stats_callback fp; + void *ctxt; + } callback; + + /* stats will be copied into the specified buffer if buf is non-NULL */ + struct { + uint8_t *buf; + int byte_limit; /* don't copy more than this */ + } copy; + + /* + * If blocking is true, the caller will take the specified semaphore + * to wait for the stats to be uploaded, and the driver will release + * the semaphore when the stats are done being uploaded. + */ + struct { + int blocking; + /*Note: this needs to change to some qdf_* type */ + qdf_semaphore_t *sem_ptr; + } wait; +}; + + +/* DP soc struct definition */ +struct cdp_soc_t { + struct cdp_ops *ops; + struct ol_if_ops *ol_ops; +}; + +/* + * cdp_peer_param_type: different types of parameters + * to set values in peer + * @CDP_CONFIG_NAWDS: Enable nawds mode + * @CDP_CONFIG_NAC: Enable nac + */ +enum cdp_peer_param_type { + CDP_CONFIG_NAWDS, + CDP_CONFIG_NAC, +}; + +/* + * cdp_pdev_param_type: different types of parameters + * to set values in pdev + * @CDP_CONFIG_DEBUG_SNIFFER: Enable debug sniffer feature + * @CDP_CONFIG_BPR_ENABLE: Enable bcast probe feature + * @CDP_CONFIG_PRIMARY_RADIO: Configure radio as primary + * @CDP_CONFIG_ENABLE_PERPKT_TXSTATS: Enable per packet statistics + * @CDP_CONFIG_IGMPMLD_OVERRIDE: Override IGMP/MLD + * @CDP_CONFIG_IGMPMLD_TID: Configurable TID value when igmmld_override is set + * @CDP_CONFIG_ARP_DBG_CONF: Enable ARP debug + * @CDP_CONFIG_CAPTURE_LATENCY: Capture time latency + * @CDP_INGRESS_STATS: Accumulate ingress statistics + * @CDP_OSIF_DROP: Accumulate drops in OSIF layer + * @CDP_CONFIG_ENH_RX_CAPTURE: Enable enhanced RX capture + * @CDP_CONFIG_ENH_TX_CAPTURE: Enable enhanced TX capture + * @CDP_CONFIG_HMMC_TID_OVERRIDE: Enable hmmc tid override + * @CDP_CONFIG_HMMC_TID_VALUE: set hmmc tid value + * @CDP_CONFIG_TX_CAPTURE: set tx capture + * @CDP_CHAN_NOISE_FLOOR: set channel noise floor + * @CDP_CONFIG_VOW: set/get vow config + * @CDP_TIDQ_OVERRIDE: set/get tid queue override + * @CDP_TIDMAP_PRTY: set/get tid map prty + * @CDP_TX_PENDING: get tx pending + * @CDP_FILTER_NEIGH_PEERS: filter neighbour peers + * @CDP_FILTER_UCAST_DATA: filter unicast data + * @CDP_FILTER_MCAST_DATA: filter multicast data + * @CDP_FILTER_NO_DATA: filter no data + * @CDP_MONITOR_CHANNEL: monitor channel + * @CDP_MONITOR_FREQUENCY: monitor frequency + * @CDP_CONFIG_BSS_COLOR: configure bss color + */ +enum cdp_pdev_param_type { + CDP_CONFIG_DEBUG_SNIFFER, + CDP_CONFIG_BPR_ENABLE, + CDP_CONFIG_PRIMARY_RADIO, + CDP_CONFIG_ENABLE_PERPKT_TXSTATS, + CDP_CONFIG_IGMPMLD_OVERRIDE, + CDP_CONFIG_IGMPMLD_TID, + CDP_CONFIG_ARP_DBG_CONF, + CDP_CONFIG_CAPTURE_LATENCY, + CDP_INGRESS_STATS, + CDP_OSIF_DROP, + CDP_CONFIG_ENH_RX_CAPTURE, + CDP_CONFIG_ENH_TX_CAPTURE, + CDP_CONFIG_HMMC_TID_OVERRIDE, + CDP_CONFIG_HMMC_TID_VALUE, + CDP_CONFIG_TX_CAPTURE, + CDP_CHAN_NOISE_FLOOR, + CDP_CONFIG_VOW, + CDP_TIDQ_OVERRIDE, + CDP_TIDMAP_PRTY, + CDP_TX_PENDING, + CDP_FILTER_NEIGH_PEERS, + CDP_FILTER_UCAST_DATA, + CDP_FILTER_MCAST_DATA, + CDP_FILTER_NO_DATA, + CDP_MONITOR_CHANNEL, + CDP_MONITOR_FREQUENCY, + CDP_CONFIG_BSS_COLOR, +}; + +/* + * cdp_config_param_type: union of different types of parameters + * to set values into dp handles. + * + * @cdp_peer_param_nawds: Enable nawds mode + * @cdp_peer_param_nac: Enable nac + * + * @cdp_vdev_param_nawds: set nawds enable/disable + * @cdp_vdev_param_mcast_en: enable/disable multicast enhancement + * @cdp_vdev_param_wds: wds sta + * @cdp_vdev_param_mec: MEC enable flags + * @cdp_vdev_param_proxysta: proxy sta + * @cdp_vdev_param_tdls_flags: tdls link flags + * @cdp_vdev_param_ap_brdg_en: set ap_bridging enable/disable + * @cdp_vdev_param_cipher_en: set cipher type based on security + * @cdp_vdev_param_qwrap_isolation: qwrap isolation mode + * @cdp_vdev_param_tx_encap: tx encap type + * @cdp_vdev_param_rx_decap: rx decap type + * @cdp_vdev_param_mesh_rx_filter: set mesh rx filter + * @cdp_vdev_param_tidmap_prty: set tid vdev prty + * @cdp_vdev_param_tidmap_tbl_id: set tidmap table id + * @cdp_vdev_param_mesh_mode: set mesh mode + * @cdp_vdev_param_safe_mode: set safe mode + * @cdp_vdev_param_drop_unenc: set drop unencrypted flag + * + * @cdp_pdev_param_dbg_snf: Enable debug sniffer feature + * @cdp_pdev_param_bpr_enable: Enable bcast probe feature + * @cdp_pdev_param_primary_radio: Configure radio as primary + * @cdp_pdev_param_en_perpkt_txstats: Enable per packet statistics + * @cdp_pdev_param_igmpmld_override: Override IGMP/MLD + * @cdp_pdev_param_igmpmld_tid: TID value when igmmld_override is set + * @cdp_pdev_param_arp_dbg_conf: Enable ARP debug + * @cdp_pdev_param_cptr_latcy: Capture time latency + * @cdp_pdev_param_ingrs_stats: Accumulate ingress statistics + * @cdp_pdev_param_osif_drop: Accumulate drops in OSIF layer + * @cdp_pdev_param_en_rx_cap: Enable enhanced RX capture + * @cdp_pdev_param_en_tx_cap: Enable enhanced TX capture + * @cdp_pdev_param_hmmc_tid_ovrd: Enable hmmc tid override + * @cdp_pdev_param_hmmc_tid: set hmmc tid value + * @cdp_pdev_param_tx_capture: set tx capture + * @cdp_pdev_param_chn_noise_flr: set channel noise floor + * @cdp_pdev_param_cfg_vow: set/get vow config + * @cdp_pdev_param_tidq_override: set/get tid queue override + * @cdp_pdev_param_mon_freq: set monitor frequency + * @cdp_pdev_param_bss_color: configure bss color + * @cdp_pdev_param_tidmap_prty: set/get tid map prty + * @cdp_pdev_param_tx_pending: get tx pending + * @cdp_pdev_param_fltr_neigh_peers: filter neighbour peers + * @cdp_pdev_param_fltr_ucast: filter unicast data + * @cdp_pdev_param_fltr_mcast: filter multicast data + * @cdp_pdev_param_fltr_none: filter no data + * @cdp_pdev_param_monitor_chan: monitor channel + * + * @cdp_psoc_param_en_rate_stats: set rate stats enable/disable + * @cdp_psoc_param_en_nss_cfg: set nss cfg + * + * @cdp_enable_tx_checksum: Flag to specify if HW Tx checksum enabled + */ +typedef union cdp_config_param_t { + /* peer params */ + bool cdp_peer_param_nawds; + uint8_t cdp_peer_param_nac; + + /* vdev params */ + bool cdp_vdev_param_wds; + bool cdp_vdev_param_mec; + bool cdp_vdev_param_nawds; + bool cdp_vdev_param_proxysta; + bool cdp_vdev_param_tdls_flags; + bool cdp_vdev_param_ap_brdg_en; + bool cdp_vdev_param_qwrap_isolation; + bool cdp_vdev_param_update_multipass; + uint8_t cdp_vdev_param_da_war; + uint8_t cdp_vdev_param_mcast_en; + uint8_t cdp_vdev_param_tidmap_prty; + uint8_t cdp_vdev_param_tidmap_tbl_id; + uint32_t cdp_vdev_param_aging_tmr; + uint32_t cdp_vdev_param_cipher_en; + uint32_t cdp_vdev_param_tx_encap; + uint32_t cdp_vdev_param_rx_decap; + uint32_t cdp_vdev_param_mesh_rx_filter; + uint32_t cdp_vdev_param_mesh_mode; + uint32_t cdp_vdev_param_safe_mode; + uint32_t cdp_vdev_param_drop_unenc; + + /* pdev params */ + bool cdp_pdev_param_cptr_latcy; + bool cdp_pdev_param_hmmc_tid_ovrd; + bool cdp_pdev_param_fltr_neigh_peers; + bool cdp_pdev_param_cfg_vow; + bool cdp_pdev_param_fltr_mcast; + bool cdp_pdev_param_fltr_none; + bool cdp_pdev_param_fltr_ucast; + uint8_t cdp_pdev_param_primary_radio; + uint8_t cdp_pdev_param_en_rx_cap; + uint8_t cdp_pdev_param_en_tx_cap; + uint8_t cdp_pdev_param_tx_capture; + uint8_t cdp_pdev_param_hmmc_tid; + uint8_t cdp_pdev_param_tidmap_prty; + uint8_t cdp_pdev_param_igmpmld_override; + uint8_t cdp_pdev_param_igmpmld_tid; + uint8_t cdp_pdev_param_arp_dbg_conf; + uint8_t cdp_pdev_param_tidq_override; + uint8_t cdp_pdev_param_bss_color; + uint16_t cdp_pdev_param_chn_noise_flr; + qdf_freq_t cdp_pdev_param_mon_freq; + int cdp_pdev_param_dbg_snf; + int cdp_pdev_param_bpr_enable; + int cdp_pdev_param_monitor_chan; + uint32_t cdp_pdev_param_ingrs_stats; + uint32_t cdp_pdev_param_osif_drop; + uint32_t cdp_pdev_param_en_perpkt_txstats; + uint32_t cdp_pdev_param_tx_pending; + + /* psoc params */ + bool cdp_psoc_param_en_rate_stats; + int cdp_psoc_param_en_nss_cfg; + + bool cdp_enable_tx_checksum; +} cdp_config_param_type; + +/** + * cdp_rx_enh_capture_mode - Rx enhanced capture modes + * @CDP_RX_ENH_CAPTURE_DISABLED: Disable Rx enhance capture + * @CDP_RX_ENH_CAPTURE_MPDU: Enable capture of 128 bytes of each MPDU + * @CDP_RX_ENH_CAPTURE_MPDU_MSDU: Enable capture of 128 bytes of each MSDU + */ +enum cdp_rx_enh_capture_mode { + CDP_RX_ENH_CAPTURE_DISABLED = 0, + CDP_RX_ENH_CAPTURE_MPDU, + CDP_RX_ENH_CAPTURE_MPDU_MSDU, +}; + +/** + * cdp_rx_enh_capture_peer - Rx enhanced capture peer filtering + * @CDP_RX_ENH_CAPTURE_PEER_DISABLED: Disable Rx ENH capture peer filtering + * @CDP_RX_ENH_CAPTURE_PEER_ENABLED: Enable Rx ENH capture peer filtering + */ +enum cdp_rx_enh_capture_peer { + CDP_RX_ENH_CAPTURE_PEER_DISABLED = 0, + CDP_RX_ENH_CAPTURE_PEER_ENABLED, +}; + +/** + * cdp_tx_enh_capture_mode - Tx enhanced capture modes + * @CDP_TX_ENH_CAPTURE_DISABLED: Disable Tx enhance capture for all peers + * @CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS: Enable tx capture for all peers + * @CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER: Enable/disable per peer as necessary + */ +enum cdp_tx_enh_capture_mode { + CDP_TX_ENH_CAPTURE_DISABLED = 0, + CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS, + CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER, +}; + +/* + * enum cdp_pdev_bpr_param - different types of parameters + * to set value in pdev + * @CDP_BPR_DISABLE: Set bpr to disable state + * @CDP_BPR_ENABLE: set bpr to enable state + * + * Enum indicating bpr state to enable/disable. + */ +enum cdp_pdev_bpr_param { + CDP_BPR_DISABLE, + CDP_BPR_ENABLE, +}; + +/* + * cdp_vdev_param_type: different types of parameters + * to set values in vdev + * @CDP_ENABLE_NAWDS: set nawds enable/disable + * @CDP_ENABLE_MCAST_EN: enable/disable multicast enhancement + * @CDP_ENABLE_WDS: wds sta + * @CDP_ENABLE_MEC: MEC enable flags + * @CDP_ENABLE_PROXYSTA: proxy sta + * @CDP_UPDATE_TDLS_FLAGS: tdls link flags + * @CDP_ENABLE_AP_BRIDGE: set ap_bridging enable/disable + * @CDP_ENABLE_CIPHER : set cipher type based on security + * @CDP_ENABLE_QWRAP_ISOLATION: qwrap isolation mode + * @CDP_TX_ENCAP_TYPE: tx encap type + * @CDP_RX_DECAP_TYPE: rx decap type + * @CDP_MESH_RX_FILTER: set mesh rx filter + * @CDP_TID_VDEV_PRTY: set tid vdev prty + * @CDP_TIDMAP_TBL_ID: set tidmap table id + * @CDP_MESH_MODE: set mesh mode + * @CDP_SAFEMODE: set safe mode + * @CDP_DROP_UNENC: set drop unencrypted flag + */ +enum cdp_vdev_param_type { + CDP_ENABLE_NAWDS, + CDP_ENABLE_MCAST_EN, + CDP_ENABLE_WDS, + CDP_ENABLE_MEC, + CDP_ENABLE_DA_WAR, + CDP_ENABLE_PROXYSTA, + CDP_UPDATE_TDLS_FLAGS, + CDP_CFG_WDS_AGING_TIMER, + CDP_ENABLE_AP_BRIDGE, + CDP_ENABLE_CIPHER, + CDP_ENABLE_QWRAP_ISOLATION, + CDP_UPDATE_MULTIPASS, + CDP_TX_ENCAP_TYPE, + CDP_RX_DECAP_TYPE, + CDP_MESH_RX_FILTER, + CDP_TID_VDEV_PRTY, + CDP_TIDMAP_TBL_ID, +#ifdef MESH_MODE_SUPPORT + CDP_MESH_MODE, +#endif + CDP_SAFEMODE, + CDP_DROP_UNENC, + CDP_ENABLE_CSUM, +}; + +/* + * cdp_psoc_param_type: different types of parameters + * to set values in psoc + * @CDP_ENABLE_RATE_STATS: set rate stats enable/disable + * @CDP_SET_NSS_CFG: set nss cfg + */ +enum cdp_psoc_param_type { + CDP_ENABLE_RATE_STATS, + CDP_SET_NSS_CFG, +}; + +#define TXRX_FW_STATS_TXSTATS 1 +#define TXRX_FW_STATS_RXSTATS 2 +#define TXRX_FW_STATS_RX_RATE_INFO 3 +#define TXRX_FW_STATS_PHYSTATS 4 +#define TXRX_FW_STATS_PHYSTATS_CONCISE 5 +#define TXRX_FW_STATS_TX_RATE_INFO 6 +#define TXRX_FW_STATS_TID_STATE 7 +#define TXRX_FW_STATS_HOST_STATS 8 +#define TXRX_FW_STATS_CLEAR_HOST_STATS 9 +#define TXRX_FW_STATS_CE_STATS 10 +#define TXRX_FW_STATS_VOW_UMAC_COUNTER 11 +#define TXRX_FW_STATS_ME_STATS 12 +#define TXRX_FW_STATS_TXBF_INFO 13 +#define TXRX_FW_STATS_SND_INFO 14 +#define TXRX_FW_STATS_ERROR_INFO 15 +#define TXRX_FW_STATS_TX_SELFGEN_INFO 16 +#define TXRX_FW_STATS_TX_MU_INFO 17 +#define TXRX_FW_SIFS_RESP_INFO 18 +#define TXRX_FW_RESET_STATS 19 +#define TXRX_FW_MAC_WDOG_STATS 20 +#define TXRX_FW_MAC_DESC_STATS 21 +#define TXRX_FW_MAC_FETCH_MGR_STATS 22 +#define TXRX_FW_MAC_PREFETCH_MGR_STATS 23 +#define TXRX_FW_STATS_DURATION_INFO 24 +#define TXRX_FW_STATS_DURATION_INFO_RESET 25 +#define TXRX_FW_HALPHY_STATS 26 +#define TXRX_FW_COEX_STATS 27 + +#define PER_RADIO_FW_STATS_REQUEST 0 +#define PER_VDEV_FW_STATS_REQUEST 1 +/** + * enum data_stall_log_event_indicator - Module triggering data stall + * @DATA_STALL_LOG_INDICATOR_UNUSED: Unused + * @DATA_STALL_LOG_INDICATOR_HOST_DRIVER: Host driver indicates data stall + * @DATA_STALL_LOG_INDICATOR_FIRMWARE: FW indicates data stall + * @DATA_STALL_LOG_INDICATOR_FRAMEWORK: Framework indicates data stall + * + * Enum indicating the module that indicates data stall event + */ +enum data_stall_log_event_indicator { + DATA_STALL_LOG_INDICATOR_UNUSED, + DATA_STALL_LOG_INDICATOR_HOST_DRIVER, + DATA_STALL_LOG_INDICATOR_FIRMWARE, + DATA_STALL_LOG_INDICATOR_FRAMEWORK, +}; + +/** + * enum data_stall_log_event_type - data stall event type + * @DATA_STALL_LOG_NONE + * @DATA_STALL_LOG_FW_VDEV_PAUSE + * @DATA_STALL_LOG_HWSCHED_CMD_FILTER + * @DATA_STALL_LOG_HWSCHED_CMD_FLUSH + * @DATA_STALL_LOG_FW_RX_REFILL_FAILED + * @DATA_STALL_LOG_FW_RX_FCS_LEN_ERROR + * @DATA_STALL_LOG_FW_WDOG_ERRORS + * @DATA_STALL_LOG_BB_WDOG_ERROR + * @DATA_STALL_LOG_POST_TIM_NO_TXRX_ERROR + * @DATA_STALL_LOG_HOST_STA_TX_TIMEOUT + * @DATA_STALL_LOG_HOST_SOFTAP_TX_TIMEOUT + * @DATA_STALL_LOG_NUD_FAILURE + * + * Enum indicating data stall event type + */ +enum data_stall_log_event_type { + DATA_STALL_LOG_NONE, + DATA_STALL_LOG_FW_VDEV_PAUSE, + DATA_STALL_LOG_HWSCHED_CMD_FILTER, + DATA_STALL_LOG_HWSCHED_CMD_FLUSH, + DATA_STALL_LOG_FW_RX_REFILL_FAILED, + DATA_STALL_LOG_FW_RX_FCS_LEN_ERROR, + DATA_STALL_LOG_FW_WDOG_ERRORS, + DATA_STALL_LOG_BB_WDOG_ERROR, + DATA_STALL_LOG_POST_TIM_NO_TXRX_ERROR, + /* Stall events triggered by host/framework start from 0x100 onwards. */ + DATA_STALL_LOG_HOST_STA_TX_TIMEOUT = 0x100, + DATA_STALL_LOG_HOST_SOFTAP_TX_TIMEOUT, + DATA_STALL_LOG_NUD_FAILURE, +}; + +/** + * enum data_stall_log_recovery_type - data stall recovery type + * @DATA_STALL_LOG_RECOVERY_NONE, + * @DATA_STALL_LOG_RECOVERY_CONNECT_DISCONNECT, + * @DATA_STALL_LOG_RECOVERY_TRIGGER_PDR + * + * Enum indicating data stall recovery type + */ +enum data_stall_log_recovery_type { + DATA_STALL_LOG_RECOVERY_NONE = 0, + DATA_STALL_LOG_RECOVERY_CONNECT_DISCONNECT, + DATA_STALL_LOG_RECOVERY_TRIGGER_PDR, +}; + +/** + * struct data_stall_event_info - data stall info + * @indicator: Module triggering data stall + * @data_stall_type: data stall event type + * @vdev_id_bitmap: vdev_id_bitmap + * @pdev_id: pdev id + * @recovery_type: data stall recovery type + */ +struct data_stall_event_info { + uint32_t indicator; + uint32_t data_stall_type; + uint32_t vdev_id_bitmap; + uint32_t pdev_id; + uint32_t recovery_type; +}; + +typedef void (*data_stall_detect_cb)(struct data_stall_event_info *); + +/* + * enum cdp_stats - options for host and firmware + * statistics + * @CDP_TXRX_STATS_1: HTT Pdev tx stats + * @CDP_TXRX_STATS_2: HTT Pdev rx stats + * @CDP_TXRX_STATS_3: HTT Pdev Tx HW Queue stats + * @CDP_TXRX_STATS_4: HTT Pdev Tx HW Sched stats + * @CDP_TXRX_STATS_5: HTT Pdev error stats + * @CDP_TXRX_STATS_6: HTT TQM stats + * @CDP_TXRX_STATS_7: HTT TQM CMDQ stats + * @CDP_TXRX_STATS_8: HTT Tx_de_cmn thread stats + * @CDP_TXRX_STATS_9: HTT Pdev Tx rate stats + * @CDP_TXRX_STATS_10: HTT Pdev Rx rate stats + * @CDP_TXRX_STATS_11: HTT Peer stats + * @CDP_TXRX_STATS_12: HTT Tx Self Gen Info + * @CDP_TXRX_STATS_13: HTT Tx MU HWQ stats + * @CDP_TXRX_STATS_14: HTT Ring interface info stats + * @CDP_TXRX_STATS_15: HTT SRNG info stats + * @CDP_TXRX_STATS_16: HTT SFM info stats + * @CDP_TXRX_STATS_17: HTT Pdev tx mu mimo sched info + * @CDP_TXRX_STATS_18: HTT Peer list details + * @CDP_TXRX_STATS_19: Reserved + * @CDP_TXRX_STATS_20: Reset Host stats + * @CDP_TXRX_STATS_21: Host Rx rate stats + * @CDP_TXRX_STATS_22: Host Tx rate stats + * @CDP_TXRX_STATS_23: Host Tx stats + * @CDP_TXRX_STATS_24: Host Rx stats + * @CDP_TXRX_STATS_25: Host Ast stats + * @CDP_TXRX_STATS_26: Host Head/Tail Pointer stats + * @CDP_TXRX_STATS_27: Host Monitor mode stats + * @CDP_TXRX_STATS_28: Host Peer entry stats + * @CDP_TXRX_STATS_29: Host Soc config params info + * @CDP_TXRX_STATS_30: Host Pdev config params info + * @CDP_TXRX_STATS_31: Host DP Interrupt Stats + */ +enum cdp_stats { + CDP_TXRX_STATS_0 = 0, + CDP_TXRX_STATS_1, + CDP_TXRX_STATS_2, + CDP_TXRX_STATS_3, + CDP_TXRX_STATS_4, + CDP_TXRX_STATS_5, + CDP_TXRX_STATS_6, + CDP_TXRX_STATS_7, + CDP_TXRX_STATS_8, + CDP_TXRX_STATS_9, + CDP_TXRX_STATS_10, + CDP_TXRX_STATS_11, + CDP_TXRX_STATS_12, + CDP_TXRX_STATS_13, + CDP_TXRX_STATS_14, + CDP_TXRX_STATS_15, + CDP_TXRX_STATS_16, + CDP_TXRX_STATS_17, + CDP_TXRX_STATS_18, + CDP_TXRX_STATS_19, + CDP_TXRX_STATS_20, + CDP_TXRX_STATS_21, + CDP_TXRX_STATS_22, + CDP_TXRX_STATS_23, + CDP_TXRX_STATS_24, + CDP_TXRX_STATS_25, + CDP_TXRX_STATS_26, + CDP_TXRX_STATS_27, + CDP_TXRX_STATS_28, + CDP_TXRX_STATS_29, + CDP_TXRX_STATS_30, + CDP_TXRX_STATS_31, + CDP_TXRX_STATS_HTT_MAX = 256, + CDP_TXRX_MAX_STATS = 265, +}; + +/* + * Different Stat update types sent to OL_IF + * @UPDATE_PEER_STATS: update peer stats + * @UPDATE_VDEV_STATS: update vdev stats + * @UPDATE_PDE_STATS: Update pdev stats + */ +enum cdp_stat_update_type { + UPDATE_PEER_STATS = 0, + UPDATE_VDEV_STATS = 1, + UPDATE_PDEV_STATS = 2, +}; + +/* + * struct cdp_tx_sojourn_stats - Tx sojourn stats + * @ppdu_seq_id: ppdu_seq_id from tx completion + * @avg_sojourn_msdu: average sojourn msdu time + * @sum_sojourn_msdu: sum sojourn msdu time + * @num_msdu: number of msdus per ppdu + * @cookie: cookie to be used by upper layer + */ +struct cdp_tx_sojourn_stats { + uint32_t ppdu_seq_id; + qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX]; + uint32_t sum_sojourn_msdu[CDP_DATA_TID_MAX]; + uint32_t num_msdus[CDP_DATA_TID_MAX]; + struct cdp_stats_cookie *cookie; +}; + +/** + * struct cdp_delayed_tx_completion_ppdu_user - Delayed Tx PPDU completion + * per-user information + * @frame_ctrl: frame control field in 802.11 header + * @qos_ctrl: QoS control field in 802.11 header + * @mpdu_tried: number of mpdus tried + * @ltf_size: ltf_size + * @stbc: stbc + * @he_re: he_re (range extension) + * @txbf: txbf + * @bw: Transmission bandwidth + * + * + * + * + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @preamble: preamble + * @gi: guard interval 800/400/1600/3200 ns + * @dcm: dcm + * @ldpc: ldpc + * @ru_start: RU start index + * @ru_tones: RU tones length + * @is_mcast: MCAST or UCAST + * @user_pos: user position + * @mu_group_id: mu group id + */ +struct cdp_delayed_tx_completion_ppdu_user { + uint32_t frame_ctrl:16, + qos_ctrl:16; + uint32_t mpdu_tried_ucast:16, + mpdu_tried_mcast:16; + uint32_t ltf_size:2, + stbc:1, + he_re:1, + txbf:4, + bw:4, + nss:4, + mcs:4, + preamble:4, + gi:4, + dcm:1, + ldpc:1, + delayed_ba:1; + uint16_t ru_start; + uint16_t ru_tones; + bool is_mcast; + uint32_t user_pos; + uint32_t mu_group_id; +}; + +/** + * struct cdp_tx_completion_ppdu_user - Tx PPDU completion per-user information + * @completion_status: completion status - OK/Filter/Abort/Timeout + * @tid: TID number + * @peer_id: Peer ID + * @ba_size: Block-Ack size + * @frame_ctrl: frame control field in 802.11 header + * @qos_ctrl: QoS control field in 802.11 header + * @mpdu_tried: number of mpdus tried + * @mpdu_success: number of mpdus successfully transmitted + * @long_retries: long retries + * @short_retries: short retries + * @is_ampdu: mpdu aggregate or non-aggregate? + * @success_bytes: bytes successfully transmitted + * @retry_bytes: bytes retried + * @failed_msdus: MSDUs failed transmission + * @duration: user duration in ppdu + * @ltf_size: ltf_size + * @stbc: stbc + * @he_re: he_re (range extension) + * @txbf: txbf + * @bw: Transmission bandwidth + * + * + * + * + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @preamble: preamble + * @gi: guard interval 800/400/1600/3200 ns + * @dcm: dcm + * @ldpc: ldpc + * @delayed_ba: delayed ba bit + * @ppdu_type: SU/MU_MIMO/MU_OFDMA/MU_MIMO_OFDMA/UL_TRIG/BURST_BCN/UL_BSR_RESP/ + * UL_BSR_TRIG/UNKNOWN + * @ba_seq_no: Block Ack sequence number + * @ba_bitmap: Block Ack bitmap + * @start_seqa: Sequence number of first MPDU + * @enq_bitmap: Enqueue MPDU bitmap + * @ru_start: RU start index + * @ru_tones: RU tones length + * @is_mcast: MCAST or UCAST + * @tx_rate: Transmission Rate + * @user_pos: user position + * @mu_group_id: mu group id + * @rix: rate index + * @cookie: cookie to used by upper layer + * @is_ppdu_cookie_valid : Indicates that ppdu_cookie is valid + * @ppdu_cookie: 16-bit ppdu_cookie + * @sa_is_training: smart antenna training packets indication + * @rssi_chain: rssi chain per bandwidth + * @sa_tx_antenna: antenna in which packet is transmitted + * @sa_max_rates: smart antenna tx feedback info max rates + * @sa_goodput: smart antenna tx feedback info goodput + * @current_rate_per: Moving average per + * @last_enq_seq: last equeue sequence number + */ +struct cdp_tx_completion_ppdu_user { + uint32_t completion_status:8, + tid:8, + peer_id:16; + uint8_t mac_addr[6]; + uint16_t ba_size; + uint32_t frame_ctrl:16, + qos_ctrl:16; + uint32_t mpdu_tried_ucast:16, + mpdu_tried_mcast:16; + uint16_t mpdu_success:16; + uint16_t mpdu_failed:16; + uint32_t long_retries:4, + short_retries:4, + tx_ratecode:16, + is_ampdu:1, + ppdu_type:5; + uint32_t success_bytes; + uint32_t retry_bytes; + uint32_t failed_bytes; + uint32_t success_msdus:16, + retry_msdus:16; + uint32_t failed_msdus:16, + duration:16; + uint32_t ltf_size:2, + stbc:1, + he_re:1, + txbf:4, + bw:4, + nss:4, + mcs:4, + preamble:4, + gi:4, + dcm:1, + ldpc:1, + delayed_ba:1; + uint32_t ba_seq_no; + uint32_t ba_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS]; + uint32_t start_seq; + uint32_t enq_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS]; + uint32_t failed_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS]; + uint32_t num_mpdu:9, + num_msdu:16; + uint32_t tx_duration; + uint16_t ru_start; + uint16_t ru_tones; + bool is_mcast; + uint32_t tx_rate; + uint32_t tx_ratekbps; + /*ack rssi for separate chains*/ + uint32_t ack_rssi[CDP_RSSI_CHAIN_LEN]; + bool ack_rssi_valid; + uint32_t user_pos; + uint32_t mu_group_id; + uint32_t rix; + struct cdp_stats_cookie *cookie; + uint8_t is_ppdu_cookie_valid; + uint16_t ppdu_cookie; + uint8_t sa_is_training; + uint32_t rssi_chain[CDP_RSSI_CHAIN_LEN]; + uint32_t sa_tx_antenna; + /*Max rates for BW: 20MHZ, 40MHZ and 80MHZ and 160MHZ + * |---------------------------------------| + * | 16 bits | 16 bits | 16 bits | 16 bits | + * | BW-1 | BW-2 | BW-3 | BW-4 | + * | /\ \ | + * | / \ \ | + * | / \ \ | + * | / \ \ | + * | / \ \ | + * | / \ \ | + * |/ \ \ | + * |[11|8] [5|8] \ | + * | BW1 PADDED \ | + * |---------------------------------------| + */ + uint16_t sa_max_rates[CDP_NUM_SA_BW]; + uint32_t sa_goodput; + /* below field is used to calculate goodput in non-training period + * Note: As host is exposing goodput and hence current_rate_per is + * of no use. It is just for Host computation. + */ + uint32_t current_rate_per; + uint32_t last_enq_seq; +}; + +/** + * struct cdp_tx_indication_mpdu_info - Tx MPDU completion information + * @ppdu_id: PPDU id + * @duration: user duration in ppdu + * @frame_type: frame type MGMT/CTRL/DATA/BAR + * @frame_ctrl: frame control field in 802.11 header + * @qos_ctrl: QoS control field in 802.11 header + * @tid: TID number + * @num_msdu: number of msdu in MPDU + * @seq_no: Sequence number of first MPDU + * @ltf_size: ltf_size + * @stbc: stbc + * @he_re: he_re (range extension) + * @txbf: txbf + * @bw: Transmission bandwidth + * + * + * + * + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @preamble: preamble + * @gi: guard interval 800/400/1600/3200 ns + * @resp_type: response type + * @mprot_type: medium protection type + * @rts_success: rts success + * @rts failure: rts failure + * @channel: frequency + * @channel_num: channel number + * @ack_rssi: ack rssi + * @ldpc: ldpc + * @tx_rate: Transmission Rate + * @mac_address: peer mac address + * @bss_mac_address: bss mac address + * @ppdu_start_timestamp: TSF at PPDU start + * @ppdu_end_timestamp: TSF at PPDU end + * @ba_start_seq: Block Ack sequence number + * @ba_bitmap: Block Ack bitmap + * @ppdu_cookie: 16-bit ppdu_cookie + * @long_retries: long retries + * @short_retries: short retries + * @completion_status: completion status - OK/Filter/Abort/Timeout + */ +struct cdp_tx_indication_mpdu_info { + uint32_t ppdu_id; + uint32_t tx_duration; + uint16_t frame_type; + uint16_t frame_ctrl; + uint16_t qos_ctrl; + uint8_t tid; + uint32_t num_msdu; + uint32_t seq_no; + uint32_t ltf_size:2, + he_re:1, + txbf:4, + bw:4, + nss:4, + mcs:4, + preamble:4, + gi:4; + uint32_t channel; + uint8_t channel_num; + uint32_t ack_rssi; + uint32_t ldpc; + uint32_t tx_rate; + uint8_t mac_address[QDF_MAC_ADDR_SIZE]; + uint8_t bss_mac_address[QDF_MAC_ADDR_SIZE]; + uint64_t ppdu_start_timestamp; + uint64_t ppdu_end_timestamp; + uint32_t ba_start_seq; + uint32_t ba_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS]; + uint16_t ppdu_cookie; + uint16_t long_retries:4, + short_retries:4, + completion_status:8; + uint16_t resp_type:4, + mprot_type:3, + rts_success:1, + rts_failure:1; +}; + +/** + * struct cdp_tx_indication_info - Tx capture information + * @mpdu_info: Tx MPDU completion information + * @mpdu_nbuf: reconstructed mpdu packet + * @ppdu_desc: tx completion ppdu + */ +struct cdp_tx_indication_info { + struct cdp_tx_indication_mpdu_info mpdu_info; + qdf_nbuf_t mpdu_nbuf; + struct cdp_tx_completion_ppdu *ppdu_desc; +}; + +/** + * struct cdp_tx_mgmt_comp_info - Tx mgmt comp info + * @ppdu_id: ppdu_id + * @is_sgen_pkt: payload recevied from wmi or htt path + * @retries_count: retries count + * @tx_tsf: 64 bit timestamp + */ +struct cdp_tx_mgmt_comp_info { + uint32_t ppdu_id; + bool is_sgen_pkt; + uint16_t retries_count; + uint64_t tx_tsf; +}; + +/** + * struct cdp_tx_completion_ppdu - Tx PPDU completion information + * @completion_status: completion status - OK/Filter/Abort/Timeout + * @ppdu_id: PPDU Id + * @ppdu_seq_id: ppdu sequence id for sojourn stats + * @vdev_id: VAP Id + * @bar_num_users: BA response user count, based on completion common TLV + * @num_users: Number of users + * @pending_retries: pending MPDUs (retries) + * @drop_reason: drop reason from flush status + * @is_flush: is_flush is set based on flush tlv + * @flow_type: tx flow type from flush status + * @queue_type: queue type from flush status + * @num_mpdu: Number of MPDUs in PPDU + * @num_msdu: Number of MSDUs in PPDU + * @frame_type: frame SU or MU + * @htt_frame_type: frame type from htt + * @frame_ctrl: frame control of 80211 header + * @channel: Channel informartion + * @resp_type: response type + * @mprot_type: medium protection type + * @rts_success: rts success + * @rts failure: rts failure + * @phymode: phy mode + * @ack_rssi: RSSI value of last ack packet (units=dB above noise floor) + * @tx_duration: PPDU airtime + * @ppdu_start_timestamp: TSF at PPDU start + * @ppdu_end_timestamp: TSF at PPDU end + * @ack_timestamp: TSF at the reception of ACK + * @delayed_ba: Delayed ba flag + * @beam_change: beam change bit in ppdu for he-information + * @bss_color: 6 bit value for full bss color + * @user: per-User stats (array of per-user structures) + * @mpdu_q: queue of mpdu in a ppdu + * @mpdus: MPDU list based on enqueue sequence bitmap + * @bar_ppdu_id: BAR ppdu_id + * @bar_tx_duration: BAR tx duration + * @bar_ppdu_start_timestamp: BAR start timestamp + * @bar_ppdu_end_timestamp: BAR end timestamp + */ +struct cdp_tx_completion_ppdu { + uint32_t ppdu_id; + uint32_t ppdu_seq_id; + uint16_t vdev_id; + uint16_t bar_num_users; + uint32_t num_users; + uint8_t last_usr_index; + uint32_t pending_retries; + uint32_t drop_reason; + uint32_t is_flush:1, + flow_type:8, + queue_type:8; + uint32_t num_mpdu:9, + num_msdu:16; + uint16_t frame_type; + uint16_t htt_frame_type; + uint16_t frame_ctrl; + uint16_t channel; + uint16_t resp_type:4, + mprot_type:3, + rts_success:1, + rts_failure:1; + uint16_t phy_mode; + uint32_t ack_rssi; + uint32_t tx_duration; + uint64_t ppdu_start_timestamp; + uint64_t ppdu_end_timestamp; + uint64_t ack_timestamp; + bool delayed_ba; + uint8_t beam_change; + uint8_t bss_color; + struct cdp_tx_completion_ppdu_user user[CDP_MU_MAX_USERS]; + qdf_nbuf_queue_t mpdu_q; + qdf_nbuf_t *mpdus; + uint32_t bar_ppdu_id; + uint32_t bar_tx_duration; + uint32_t bar_ppdu_start_timestamp; + uint32_t bar_ppdu_end_timestamp; +}; + +/** + * struct cdp_dev_stats - Network device stats structure + * @tx_packets: Tx total packets transmitted + * @tx_bytes : Tx total bytes transmitted + * @tx_errors : Tx error due to FW tx failure, Ring failure DMA etc + * @tx_dropped: Tx dropped is same as tx errors as above + * @rx_packets: Rx total packets transmitted + * @rx_bytes : Rx total bytes transmitted + * @rx_errors : Rx erros + * @rx_dropped: Rx dropped stats + */ +struct cdp_dev_stats { + uint32_t tx_packets; + uint32_t tx_bytes; + uint32_t tx_errors; + uint32_t tx_dropped; + uint32_t rx_packets; + uint32_t rx_bytes; + uint32_t rx_errors; + uint32_t rx_dropped; +}; + +/** + * struct cdp_rate_stats - Tx/Rx Rate statistics + * @bw: Indicates the BW of the upcoming transmission - + * + * + * + * + * @pkt_type: Transmit Packet Type + * @stbc: When set, STBC transmission rate was used + * @ldpc: When set, use LDPC transmission rates + * @sgi: Legacy normal GI + * Legacy short GI + * HE related GI + * HE + * @mcs: Transmit MCS Rate + * @ofdma: Set when the transmission was an OFDMA transmission + * @tones_in_ru: The number of tones in the RU used. + * @tsf: Lower 32 bits of the TSF (timestamp when ppdu transmission finished) + * @peer_id: Peer ID of the flow or MPDU queue + * @tid: TID of the flow or MPDU queue + */ +struct cdp_rate_stats { + uint32_t rate_stats_info_valid:1, + bw:2, + pkt_type:4, + stbc:1, + ldpc:1, + sgi:2, + mcs:4, + ofdma:1, + tones_in_ru:12, + resvd0:4; + uint32_t tsf; + uint16_t peer_id; + uint8_t tid; +}; + +/** + * struct cdp_tx_completion_msdu - Tx MSDU completion descriptor + * @ppdu_id: PPDU to which this MSDU belongs + * @transmit_cnt: Number of times this frame has been transmitted + * @ack_frame_rssi: RSSI of the received ACK or BA frame + * @first_msdu: Indicates this MSDU is the first MSDU in AMSDU + * @last_msdu: Indicates this MSDU is the last MSDU in AMSDU + * @msdu_part_of_amsdu : Indicates this MSDU was part of an A-MSDU in MPDU + * @extd: Extended structure containing rate statistics + */ +struct cdp_tx_completion_msdu { + uint32_t ppdu_id; + uint8_t transmit_cnt; + uint32_t ack_frame_rssi:8, + resvd0:1, + first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1, + resvd1:20; + struct cdp_rate_stats extd; +}; + +/** + * struct cdp_rx_stats_ppdu_user -- per user RX stats + * @peer_id: Peer ID + * @vdev_id: VAP ID + * @is_ampdu: mpdu aggregate or non-aggregate? + * @mu_ul_info_valid: MU UL info valid + * @ofdma_ru_start_index: RU index number(0-73) + * @ofdma_ru_width: size of RU in units of 1(26tone)RU + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @user_index: user ID in multi-user case + * @ast_index: ast index in multi-user case + * @tid: TID number + * @num_msdu: Number of MSDUs in PPDU + * @udp_msdu_count: Number of UDP MSDUs in PPDU + * @tcp_msdu_count: Number of TCP MSDUs in PPDU + * @other_msdu_count: Number of MSDUs other than UDP and TCP MSDUs in PPDU + * @frame_control: frame control field + * @frame_control_info_valid: frame_control valid + * @data_sequence_control_info_valid: data_sequence_control_info valid + * @first_data_seq_ctrl: Sequence control field of first data frame + * @preamble: preamble + * @ht_flag: ht flag + * @vht_flag: vht flag + * @he_re: he_re (range extension) + * @mpdu_cnt_fcs_ok: Number of MPDUs in PPDU with fcs ok + * @mpdu_cnt_fcs_err: Number of MPDUs in PPDU with fcs err + * @mpdu_fcs_ok_bitmap - MPDU with fcs ok bitmap + * @retried - number of retries + * @mac_addr: Peer MAC Address + */ +struct cdp_rx_stats_ppdu_user { + uint16_t peer_id; + uint8_t vdev_id; + bool is_ampdu; + uint32_t mu_ul_info_valid:1, + ofdma_ru_start_index:7, + ofdma_ru_width:7, + nss:4, + mcs:4; + /* user id */ + uint8_t user_index; + uint32_t ast_index; + uint32_t tid; + uint32_t num_msdu; + uint16_t tcp_msdu_count; + uint16_t udp_msdu_count; + uint16_t other_msdu_count; + uint16_t frame_control; + uint8_t frame_control_info_valid; + uint8_t data_sequence_control_info_valid; + uint16_t first_data_seq_ctrl; + uint32_t preamble_type; + uint16_t ht_flags; + uint16_t vht_flags; + uint16_t he_flags; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; + uint32_t mpdu_fcs_ok_bitmap[QDF_MON_STATUS_MPDU_FCS_BMAP_NWORDS]; + uint32_t mpdu_ok_byte_count; + uint32_t mpdu_err_byte_count; + uint32_t retries; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct cdp_rx_indication_ppdu - Rx PPDU indication structure + * @ppdu_id: PPDU Id + * @is_ampdu: mpdu aggregate or non-aggregate? + * @num_mpdu: Number of MPDUs in PPDU + * @reserved: Reserved bits for future use + * @num_msdu: Number of MSDUs in PPDU + * @udp_msdu_count: Number of UDP MSDUs in PPDU + * @tcp_msdu_count: Number of TCP MSDUs in PPDU + * @other_msdu_count: Number of MSDUs other than UDP and TCP MSDUs in PPDU + * @duration: PPDU duration + * @tid: TID number + * @peer_id: Peer ID + * @vdev_id: VAP ID + * @mac_addr: Peer MAC Address + * @first_data_seq_ctrl: Sequence control field of first data frame + * @ltf_size: ltf_size + * @stbc: When set, STBC rate was used + * @he_re: he_re (range extension) + * @bw: Bandwidth + * + * + * + * + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @preamble: preamble + * @gi: Legacy normal GI + * Legacy short GI + * HE related GI + * HE + * @dcm: dcm + * @ldpc: ldpc + * @ppdu_type: SU/MU_MIMO/MU_OFDMA/MU_MIMO_OFDMA/UL_TRIG/BURST_BCN/UL_BSR_RESP/ + * UL_BSR_TRIG/UNKNOWN + * @rssi: RSSI value (units = dB above noise floor) + * @timestamp: TSF at the reception of PPDU + * @length: PPDU length + * @channel: Channel informartion + * @lsig_A: L-SIG in 802.11 PHY header + * @frame_ctrl: frame control field + * @rix: rate index + * @rssi_chain: rssi chain per nss per bw + * @cookie: cookie to used by upper layer + * @user: per user stats in MU-user case + * @nf: noise floor + * @per_chain_rssi: rssi per antenna + */ +struct cdp_rx_indication_ppdu { + uint32_t ppdu_id; + uint16_t is_ampdu:1, + num_mpdu:9, + reserved:6; + uint32_t num_msdu; + uint32_t num_bytes; + uint16_t udp_msdu_count; + uint16_t tcp_msdu_count; + uint16_t other_msdu_count; + uint16_t duration; + uint32_t tid:8, + peer_id:16; + uint8_t vdev_id; + uint8_t mac_addr[6]; + uint16_t first_data_seq_ctrl; + union { + uint32_t rate_info; + struct { + uint32_t ltf_size:2, + stbc:1, + he_re:1, + bw:4, + nss:4, + mcs:4, + preamble:4, + gi:4, + dcm:1, + ldpc:1, + ppdu_type:5; + }; + } u; + uint32_t rix; + uint32_t lsig_a; + uint32_t rssi; + uint64_t timestamp; + uint32_t length; + uint8_t channel; + uint8_t beamformed; + + uint32_t rx_ratekbps; + uint32_t ppdu_rx_rate; + + uint32_t retries; + uint32_t rx_byte_count; + uint16_t rx_ratecode; + uint8_t fcs_error_mpdus; + uint16_t frame_ctrl; + int8_t rssi_chain[SS_COUNT][MAX_BW]; + struct cdp_stats_cookie *cookie; + struct cdp_rx_su_evm_info evm_info; + uint32_t rx_antenna; + uint8_t num_users; + struct cdp_rx_stats_ppdu_user user[CDP_MU_MAX_USERS]; + uint32_t nf; + uint8_t per_chain_rssi[MAX_CHAIN]; + uint8_t is_mcast_bcast; +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) + struct cdp_rx_ppdu_cfr_info cfr_info; +#endif +}; + +/** + * struct cdp_rx_indication_msdu - Rx MSDU info + * @ppdu_id: PPDU to which the MSDU belongs + * @msdu_len: Length of MSDU in bytes + * @ack_frame_rssi: RSSI of the received ACK or BA frame + * @first_msdu: Indicates this MSDU is the first MSDU in AMSDU + * @last_msdu: Indicates this MSDU is the last MSDU in AMSDU + * @msdu_part_of_amsdu : Indicates this MSDU was part of an A-MSDU in MPDU + * @extd: Extended structure containing rate statistics + */ +struct cdp_rx_indication_msdu { + uint32_t ppdu_id; + uint16_t msdu_len; + uint32_t ack_frame_rssi:8, + resvd0:1, + first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1, + msdu_part_of_ampdu:1, + resvd1:19; + struct cdp_rate_stats extd; +}; + +/** + * struct cdp_config_params - Propagate configuration parameters to datapath + * @tso_enable: Enable/Disable TSO + * @lro_enable: Enable/Disable LRO + * @gro_enable: Enable/Disable GRO + * @flow_steering_enable: Enable/Disable Rx Hash based flow steering + * @p2p_tcp_udp_checksumoffload: Enable/Disable TCP/UDP Checksum Offload for P2P + * @nan_tcp_udp_checksumoffload: Enable/Disable TCP/UDP Checksum Offload for NAN + * @tcp_udp_checksumoffload: Enable/Disable TCP/UDP Checksum Offload + * @legacy_mode_checksumoffload_disable: Disable TCP/UDP Checksum Offload for + * legacy modes. + * @napi_enable: Enable/Disable Napi + * @ipa_enable: Flag indicating if IPA is enabled or not + * @tx_flow_stop_queue_threshold: Value to Pause tx queues + * @tx_flow_start_queue_offset: Available Tx descriptors to unpause + * tx queue + * @tx_comp_loop_pkt_limit: Max # of packets to be processed in 1 tx comp loop + * @rx_reap_loop_pkt_limit: Max # of packets to be processed in 1 rx reap loop + * @rx_hp_oos_update_limit: Max # of HP OOS (out of sync) updates + */ +struct cdp_config_params { + unsigned int tso_enable:1; + unsigned int lro_enable:1; + unsigned int gro_enable:1; + unsigned int flow_steering_enable:1; + unsigned int p2p_tcp_udp_checksumoffload:1; + unsigned int nan_tcp_udp_checksumoffload:1; + unsigned int tcp_udp_checksumoffload:1; + unsigned int legacy_mode_checksumoffload_disable:1; + unsigned int napi_enable:1; + unsigned int ipa_enable:1; + /* Set when QCA_LL_TX_FLOW_CONTROL_V2 is enabled */ + uint8_t tx_flow_stop_queue_threshold; + uint8_t tx_flow_start_queue_offset; + uint32_t tx_comp_loop_pkt_limit; + uint32_t rx_reap_loop_pkt_limit; + uint32_t rx_hp_oos_update_limit; + +}; + +/** + * cdp_txrx_stats_req: stats request wrapper + * used to pass request information to cdp layer + * @stats: type of stats requested + * @param0: opaque argument 0 to be passed to htt + * @param1: opaque argument 1 to be passed to htt + * @param2: opaque argument 2 to be passed to htt + * @param3: opaque argument 3 to be passed to htt + * @mac id: mac_id + */ +struct cdp_txrx_stats_req { + enum cdp_stats stats; + uint32_t param0; + uint32_t param1; + uint32_t param2; + uint32_t param3; + uint32_t cookie_val; + uint8_t mac_id; + char *peer_addr; +}; + +/** + * struct cdp_monitor_filter - monitor filter info + * @mode: set filter mode + * @fp_mgmt: set Filter Pass MGMT Configuration + * @fp_ctrl: set Filter Pass CTRL Configuration + * @fp_data: set Filter Pass DATA Configuration + * @mo_mgmt: set Monitor Other MGMT Configuration + * @mo_ctrl: set Monitor Other CTRL Configuration + * @mo_data: set Monitor other DATA Configuration + * + */ +struct cdp_monitor_filter { + uint16_t mode; + uint16_t fp_mgmt; + uint16_t fp_ctrl; + uint16_t fp_data; + uint16_t mo_mgmt; + uint16_t mo_ctrl; + uint16_t mo_data; +}; + +/** + * enum cdp_dp_cfg - CDP ENUMs to get to DP configation + * @cfg_dp_enable_data_stall: context passed to be used by consumer + * @cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload: get P2P checksum config + * @cfg_dp_enable_nan_ip_tcp_udp_checksum_offload: get NAN TX checksum config + * @cfg_dp_enable_ip_tcp_udp_checksum_offload: get TX checksum config for others + * @cfg_dp_tso_enable: get TSO enable config + * @cfg_dp_lro_enable: get LRO enable config + * @cfg_dp_gro_enable: get GRP enable config + * @cfg_dp_tc_based_dyn_gro_enable: get TC based dynamic gro enable config + * @cfg_dp_tc_ingress_prio: priority value to be checked for tc filters + * @cfg_dp_tx_flow_start_queue_offset: get DP TX flow start queue offset + * @cfg_dp_tx_flow_stop_queue_threshold: get DP TX flow stop queue threshold + * @cfg_dp_ipa_uc_tx_buf_size: get IPA TX buf size config + * @cfg_dp_ipa_uc_tx_partition_base: get IPA UC TX partition base config + * @cfg_dp_ipa_uc_rx_ind_ring_count: get IPA rx indication ring count config + * @cfg_dp_enable_flow_steering: get flow steerint enable config + * @cfg_dp_reorder_offload_supported: get reorder offload support config + * @cfg_dp_ce_classify_enable: get CE classify enable config + * @cfg_dp_disable_intra_bss_fwd: get intra bss fwd config + * @cfg_dp_pktlog_buffer_size: get packet log buffer size config + * @cfg_dp_wow_check_rx_pending: get wow rx pending frame check config + */ +enum cdp_dp_cfg { + cfg_dp_enable_data_stall, + cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload, + cfg_dp_enable_nan_ip_tcp_udp_checksum_offload, + cfg_dp_enable_ip_tcp_udp_checksum_offload, + /* Disable checksum offload for legacy modes */ + cfg_dp_disable_legacy_mode_csum_offload, + cfg_dp_tso_enable, + cfg_dp_lro_enable, + cfg_dp_gro_enable, + cfg_dp_sg_enable, + cfg_dp_tc_based_dyn_gro_enable, + cfg_dp_tc_ingress_prio, + cfg_dp_tx_flow_start_queue_offset, + cfg_dp_tx_flow_stop_queue_threshold, + cfg_dp_ipa_uc_tx_buf_size, + cfg_dp_ipa_uc_tx_partition_base, + cfg_dp_ipa_uc_rx_ind_ring_count, + cfg_dp_enable_flow_steering, + cfg_dp_reorder_offload_supported, + cfg_dp_ce_classify_enable, + cfg_dp_disable_intra_bss_fwd, + cfg_dp_pktlog_buffer_size, + cfg_dp_wow_check_rx_pending, +}; + +/** + * struct cdp_peer_cookie - cookie used when creating peer + * @ctx: context passed to be used by consumer + * @mac_addr: MAC address of peer + * @peer_id: peer id + * @pdev_id: pdev_id + * @cookie: cookie to be used by consumer + */ +struct cdp_peer_cookie { + struct cdp_stats_cookie *ctx; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + uint8_t peer_id; + uint8_t pdev_id; + uint8_t cookie; +}; + +#ifdef WLAN_SUPPORT_RX_FISA +struct cdp_flow_stats { + uint32_t aggr_count; + uint32_t curr_aggr_count; + uint32_t flush_count; + uint32_t bytes_aggregated; +}; +#else +/** + * cdp_flow_stats - Per-Flow (5-tuple) statistics + * @msdu_count: number of rx msdus matching this flow + * + * HW also includes msdu_byte_count and timestamp, which + * are not currently tracked in SW. + */ +struct cdp_flow_stats { + uint32_t msdu_count; +}; +#endif + +/** + * cdp_flow_fst_operation - RX FST operations allowed + */ +enum cdp_flow_fst_operation { + CDP_FLOW_FST_ENTRY_ADD, + CDP_FLOW_FST_ENTRY_DEL, + CDP_FLOW_FST_RX_BYPASS_ENABLE, + CDP_FLOW_FST_RX_BYPASS_DISABLE +}; + +/** + * cdp_flow_protocol_type - RX FST supported protocol types, mapped to HW spec + */ +enum cdp_flow_protocol_type { + CDP_FLOW_PROTOCOL_TYPE_TCP = 6, + CDP_FLOW_PROTOCOL_TYPE_UDP = 17, +}; + +/** + * cdp_rx_flow_tuple_info - RX flow tuple info used for addition/deletion + * @dest_ip_127_96: destination IP address bit fields 96-127 + * @dest_ip_95_64: destination IP address bit fields 64-95 + * @dest_ip_63_32: destination IP address bit fields 32-63 + * @dest_ip_31_0: destination IP address bit fields 0-31 + * @src_ip_127_96: source IP address bit fields 96-127 + * @src_ip_95_64: source IP address bit fields 64-95 + * @src_ip_63_32: source IP address bit fields 32-63 + * @src_ip_31_0: source IP address bit fields 0-31 + * @dest_port: destination port of flow + * @src_port: source port of flow + * @l4_protocol: protocol type in flow (TCP/UDP) + */ +struct cdp_rx_flow_tuple_info { +#ifdef WLAN_SUPPORT_RX_FISA + uint8_t tuple_populated; +#endif + uint32_t dest_ip_127_96; + uint32_t dest_ip_95_64; + uint32_t dest_ip_63_32; + uint32_t dest_ip_31_0; + uint32_t src_ip_127_96; + uint32_t src_ip_95_64; + uint32_t src_ip_63_32; + uint32_t src_ip_31_0; + uint16_t dest_port; + uint16_t src_port; + uint16_t l4_protocol; +}; + +/** + * cdp_rx_flow_info - RX flow info used for addition/deletion + * @is_addr_ipv4: indicates whether given IP address is IPv4/IPv6 + * @op_code: add/delete/enable/disable operation requested + * @flow_tupe_info: structure containing tuple info + * @fse_metadata: metadata to be set in RX flow + */ +struct cdp_rx_flow_info { + bool is_addr_ipv4; + enum cdp_flow_fst_operation op_code; + struct cdp_rx_flow_tuple_info flow_tuple_info; + uint16_t fse_metadata; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..82f7ce6c943950d0f0ae9fdaf7d30443a919aa7b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h @@ -0,0 +1,1186 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_ctrl.h + * @brief Define the host data path control API functions + * called by the host control SW and the OS interface module + */ + +#ifndef _CDP_TXRX_CTRL_H_ +#define _CDP_TXRX_CTRL_H_ +#include "cdp_txrx_handle.h" +#include "cdp_txrx_cmn_struct.h" +#include "cdp_txrx_cmn.h" +#include "cdp_txrx_ops.h" + +static inline int cdp_is_target_ar900b + (ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_is_target_ar900b) + return 0; + + return soc->ops->ctrl_ops->txrx_is_target_ar900b(soc); +} + + +/* WIN */ +static inline int +cdp_mempools_attach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_mempools_attach) + return 0; + + return soc->ops->ctrl_ops->txrx_mempools_attach(soc); +} + + +#if defined(ATH_SUPPORT_NAC) || defined(ATH_SUPPORT_NAC_RSSI) +/** + * @brief update the neighbour peer addresses + * @details + * This defines interface function to update neighbour peers addresses + * which needs to be filtered + * + * @param soc - the pointer to soc object + * @param vdev_id - id of the pointer to vdev + * @param cmd - add/del entry into peer table + * @param macaddr - the address of neighbour peer + * @return - int + */ +static inline int +cdp_update_filter_neighbour_peers(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint32_t cmd, uint8_t *macaddr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers) + return 0; + + return soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers + (soc, vdev_id, cmd, macaddr); +} +#endif /* ATH_SUPPORT_NAC || ATH_SUPPORT_NAC_RSSI*/ + +/** + * @brief set the Reo Destination ring for the pdev + * @details + * This will be used to configure the Reo Destination ring for this pdev. + * + * @param soc - pointer to the soc + * @param pdev_id - id of the data physical device object + * @param val - the Reo destination ring index (1 to 4) + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_set_pdev_reo_dest(ol_txrx_soc_handle soc, + uint8_t pdev_id, enum cdp_host_reo_dest_ring val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_pdev_reo_dest) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_set_pdev_reo_dest + (soc, pdev_id, val); +} + +/** + * @brief get the Reo Destination ring for the pdev + * + * @param soc - pointer to the soc + * @param pdev_id - id of physical device object + * @return - the Reo destination ring index (1 to 4), 0 if not supported. + */ +static inline enum cdp_host_reo_dest_ring +cdp_get_pdev_reo_dest(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return cdp_host_reo_dest_ring_unknown; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_pdev_reo_dest) + return cdp_host_reo_dest_ring_unknown; + + return soc->ops->ctrl_ops->txrx_get_pdev_reo_dest(soc, pdev_id); +} + +/* Is this similar to ol_txrx_peer_state_update() in MCL */ +/** + * @brief Update the authorize peer object at association time + * @details + * For the host-based implementation of rate-control, it + * updates the peer/node-related parameters within rate-control + * context of the peer at association. + * + * @param soc - pointer to the soc + * @param vdev_id - id of the pointer to vdev + * @param peer_mac - mac address of the node's object + * @authorize - either to authorize or unauthorize peer + * + * @return QDF_STATUS + */ +static inline QDF_STATUS +cdp_peer_authorize(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + u_int32_t authorize) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_peer_authorize) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_peer_authorize + (soc, vdev_id, peer_mac, authorize); +} + +static inline void cdp_tx_flush_buffers +(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->tx_flush_buffers) + return; + + soc->ops->ctrl_ops->tx_flush_buffers(soc, vdev_id); +} + +static inline QDF_STATUS cdp_txrx_get_vdev_param(ol_txrx_soc_handle soc, + uint8_t vdev_id, + enum cdp_vdev_param_type type, + cdp_config_param_type *val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_vdev_param) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: callback not registered:", __func__); + return QDF_STATUS_E_FAILURE; + } + + return soc->ops->ctrl_ops->txrx_get_vdev_param(soc, vdev_id, + type, val); +} + +static inline QDF_STATUS +cdp_txrx_set_vdev_param(ol_txrx_soc_handle soc, + uint8_t vdev_id, enum cdp_vdev_param_type type, + cdp_config_param_type val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_vdev_param) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "NULL vdev params callback"); + return QDF_STATUS_E_FAILURE; + } + + return soc->ops->ctrl_ops->txrx_set_vdev_param(soc, vdev_id, + type, val); +} + +static inline QDF_STATUS +cdp_txrx_set_psoc_param(ol_txrx_soc_handle soc, + enum cdp_psoc_param_type type, + cdp_config_param_type val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_psoc_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_set_psoc_param(soc, type, val); +} + +static inline QDF_STATUS +cdp_txrx_get_psoc_param(ol_txrx_soc_handle soc, + enum cdp_psoc_param_type type, + cdp_config_param_type *val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_psoc_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_get_psoc_param(soc, type, val); +} + +#ifdef VDEV_PEER_PROTOCOL_COUNT +/** + * cdp_set_vdev_peer_protocol_count() - set per-peer protocol count tracking + * + * @soc - pointer to the soc + * @vdev - the data virtual device object + * @enable - enable per-peer protocol count + * + * Set per-peer protocol count feature enable + * + * Return: void + */ +static inline +void cdp_set_vdev_peer_protocol_count(ol_txrx_soc_handle soc, int8_t vdev_id, + bool enable) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_enable_peer_protocol_count) + return; + + soc->ops->ctrl_ops->txrx_enable_peer_protocol_count(soc, vdev_id, + enable); +} + +/** + * cdp_set_vdev_peer_protocol_drop_mask() - set per-peer protocol drop mask + * + * @soc - pointer to the soc + * @vdev - the data virtual device object + * @drop_mask - drop_mask + * + * Set per-peer protocol drop_mask + * + * Return - void + */ +static inline +void cdp_set_vdev_peer_protocol_drop_mask(ol_txrx_soc_handle soc, + int8_t vdev_id, int drop_mask) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_peer_protocol_drop_mask) + return; + + soc->ops->ctrl_ops->txrx_set_peer_protocol_drop_mask(soc, vdev_id, + drop_mask); +} + +/** + * cdp_is_vdev_peer_protocol_count_enabled() - whether peer-protocol tracking + * enabled + * + * @soc - pointer to the soc + * @vdev - the data virtual device object + * + * Get whether peer protocol count feature enabled or not + * + * Return: whether feature enabled or not + */ +static inline +int cdp_is_vdev_peer_protocol_count_enabled(ol_txrx_soc_handle soc, + int8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_is_peer_protocol_count_enabled) + return 0; + + return soc->ops->ctrl_ops->txrx_is_peer_protocol_count_enabled(soc, + vdev_id); +} + +/** + * cdp_get_peer_protocol_drop_mask() - get per-peer protocol count drop-mask + * + * @soc - pointer to the soc + * @vdev - the data virtual device object + * + * Get peer-protocol-count drop-mask + * + * Return: peer-protocol-count drop-mask + */ +static inline +int cdp_get_peer_protocol_drop_mask(ol_txrx_soc_handle soc, int8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_peer_protocol_drop_mask) + return 0; + + return soc->ops->ctrl_ops->txrx_get_peer_protocol_drop_mask(soc, + vdev_id); +} + +/* + * Rx-Ingress and Tx-Egress are in the lower level DP layer + * Rx-Egress and Tx-ingress are handled in osif layer for DP + * So + * Rx-Ingress and Tx-Egress definitions are in DP layer + * Rx-Egress and Tx-ingress mask definitions are here below + */ +#define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1 +#define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2 +#define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4 +#define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8 + +#else +#define cdp_set_vdev_peer_protocol_count(soc, vdev_id, enable) +#define cdp_set_vdev_peer_protocol_drop_mask(soc, vdev_id, drop_mask) +#define cdp_is_vdev_peer_protocol_count_enabled(soc, vdev_id) 0 +#define cdp_get_peer_protocol_drop_mask(soc, vdev_id) 0 +#endif + +/** + * cdp_txrx_set_pdev_param() - set pdev parameter + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @type: param type + * @val: value + * + * Return: status: 0 - Success, non-zero: Failure + */ +static inline QDF_STATUS cdp_txrx_set_pdev_param(ol_txrx_soc_handle soc, + uint8_t pdev_id, + enum cdp_pdev_param_type type, + cdp_config_param_type val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_pdev_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_set_pdev_param + (soc, pdev_id, type, val); +} + +/** + * cdp_txrx_set_peer_param() - set pdev parameter + * @soc: opaque soc handle + * @vdev_id: id of data path vdev handle + * @peer_mac: peer mac address + * @type: param type + * @val: value + * + * Return: status: 0 - Success, non-zero: Failure + */ +static inline QDF_STATUS cdp_txrx_set_peer_param(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_peer_param_type type, + cdp_config_param_type val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_peer_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_set_peer_param + (soc, vdev_id, peer_mac, type, val); +} + +/** + * cdp_txrx_get_peer_param() - set pdev parameter + * @soc: opaque soc handle + * @vdev_id: id of data path vdev handle + * @peer_mac: peer mac address + * @type: param type + * @val: address of buffer + * + * Return: status + */ +static inline QDF_STATUS cdp_txrx_get_peer_param(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_peer_param_type type, + cdp_config_param_type *val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_peer_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_get_peer_param + (soc, vdev_id, peer_mac, type, val); +} + +#ifdef QCA_MULTIPASS_SUPPORT +static inline void +cdp_peer_set_vlan_id(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, uint8_t vlan_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_peer_set_vlan_id) + return; + + soc->ops->ctrl_ops->txrx_peer_set_vlan_id(soc, vdev_id, peer_mac, + vlan_id); +} +#endif + +/** + * cdp_txrx_get_pdev_param() - get pdev parameter + * @soc: opaque soc handle + * @pdev_id: id of data path pdev handle + * @type: param type + * @value: address of value buffer + * + * Return: status + */ +static inline QDF_STATUS cdp_txrx_get_pdev_param(ol_txrx_soc_handle soc, + uint8_t pdev_id, + enum cdp_pdev_param_type type, + cdp_config_param_type *value) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_pdev_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_get_pdev_param + (soc, pdev_id, type, value); +} + +/** + * cdp_txrx_peer_protocol_cnt() - set peer protocol count + * @soc: opaque soc handle + * @vdev: opaque vdev handle + * @nbuf: data packet + * @is_egress: whether egress or ingress + * @is_rx: whether tx or rx + * + * Return: void + */ +#ifdef VDEV_PEER_PROTOCOL_COUNT +static inline void +cdp_txrx_peer_protocol_cnt(ol_txrx_soc_handle soc, + int8_t vdev_id, + qdf_nbuf_t nbuf, + enum vdev_peer_protocol_enter_exit is_egress, + enum vdev_peer_protocol_tx_rx is_rx) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_peer_protocol_cnt) + return; + + soc->ops->ctrl_ops->txrx_peer_protocol_cnt(soc, vdev_id, nbuf, + is_egress, is_rx); +} +#else +#define cdp_txrx_peer_protocol_cnt(soc, vdev_id, nbuf, is_egress, is_rx) +#endif + +/** + * cdp_enable_peer_based_pktlog()- Set flag in peer structure + * + * @soc: pointer to the soc + * @pdev_id: id of the data physical device object + * @enable: enable or disable peer based filter based pktlog + * @peer_macaddr: Mac address of peer which needs to be + * filtered + * + * This function will set flag in peer structure if peer based filtering + * is enabled for pktlog + * + * Return: int + */ +static inline int +cdp_enable_peer_based_pktlog(ol_txrx_soc_handle soc, uint8_t pdev_id, + char *peer_macaddr, + uint8_t enable) +{ + if (!soc || !soc->ops) { + QDF_TRACE_ERROR(QDF_MODULE_ID_DP, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->enable_peer_based_pktlog) + return 0; + + return soc->ops->ctrl_ops->enable_peer_based_pktlog + (soc, pdev_id, peer_macaddr, enable); +} + +/** + * cdp_calculate_delay_stats()- get rx delay stats + * + * @soc: pointer to the soc + * @vdev_id: id of vdev handle + * @nbuf: nbuf which is passed + * + * This function will calculate rx delay statistics. + */ +static inline QDF_STATUS +cdp_calculate_delay_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, + qdf_nbuf_t nbuf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->calculate_delay_stats) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: callback not registered:", __func__); + return QDF_STATUS_E_FAILURE; + } + + return soc->ops->ctrl_ops->calculate_delay_stats(soc, vdev_id, nbuf); +} + +/** + * @brief Subscribe to a specified WDI event. + * @details + * This function adds the provided wdi_event_subscribe object to a list of + * subscribers for the specified WDI event. + * When the event in question happens, each subscriber for the event will + * have their callback function invoked. + * The order in which callback functions from multiple subscribers are + * invoked is unspecified. + * + * @param soc - pointer to the soc + * @param pdev_id - id of the data physical device object + * @param event_cb_sub - the callback and context for the event subscriber + * @param event - which event's notifications are being subscribed to + * @return - int + */ +static inline int +cdp_wdi_event_sub(ol_txrx_soc_handle soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub, uint32_t event) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_wdi_event_sub) + return 0; + + return soc->ops->ctrl_ops->txrx_wdi_event_sub + (soc, pdev_id, event_cb_sub, event); +} + +/** + * @brief Unsubscribe from a specified WDI event. + * @details + * This function removes the provided event subscription object from the + * list of subscribers for its event. + * This function shall only be called if there was a successful prior call + * to event_sub() on the same wdi_event_subscribe object. + * + * @param soc - pointer to the soc + * @param pdev_id - id of the data physical device object + * @param event_cb_sub - the callback and context for the event subscriber + * @param event - which event's notifications are being subscribed to + * @return - int + */ +static inline int +cdp_wdi_event_unsub(ol_txrx_soc_handle soc, + uint8_t pdev_id, wdi_event_subscribe *event_cb_sub, + uint32_t event) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_wdi_event_unsub) + return 0; + + return soc->ops->ctrl_ops->txrx_wdi_event_unsub + (soc, pdev_id, event_cb_sub, event); +} + +/** + * @brief Get security type from the from peer. + * @details + * This function gets the Security information from the peer handler. + * The security information is got from the rx descriptor and filled in + * to the peer handler. + * + * @param soc - pointer to the soc + * @param vdev_id - id of vdev handle + * @param peer mac - peer mac address + * @param sec_idx - mcast or ucast frame type. + * @return - int + */ +static inline int +cdp_get_sec_type(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + uint8_t sec_idx) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return A_ERROR; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_sec_type) + return A_ERROR; + + return soc->ops->ctrl_ops->txrx_get_sec_type + (soc, vdev_id, peer_mac, sec_idx); +} + +/** + * cdp_set_mgmt_tx_power(): function to set tx power for mgmt frames + * @param soc - pointer to the soc + * @vdev_id : id of vdev handle + * @subtype_index: subtype + * @tx_power: Tx power + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_set_mgmt_tx_power(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t subtype, uint8_t tx_power) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev(soc, vdev_id, + subtype, tx_power); +} + +/** + * cdp_get_pldev() - function to get pktlog device handle + * @soc: datapath soc handle + * @pdev_id: physical device id + * + * Return: pktlog device handle or NULL + */ +static inline void * +cdp_get_pldev(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->ctrl_ops || !soc->ops->ctrl_ops->txrx_get_pldev) + return NULL; + + return soc->ops->ctrl_ops->txrx_get_pldev(soc, pdev_id); +} + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/** + * cdp_cfr_filter() - Configure Host RX monitor status ring for CFR + * @soc: SOC TXRX handle + * @pdev_id: ID of the physical device object + * @enable: Enable or disable CFR + * @filter_val: Flag to select filter for monitor mode + */ +static inline void +cdp_cfr_filter(ol_txrx_soc_handle soc, + uint8_t pdev_id, + bool enable, + struct cdp_monitor_filter *filter_val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfr_ops || !soc->ops->cfr_ops->txrx_cfr_filter) + return; + + soc->ops->cfr_ops->txrx_cfr_filter(soc, pdev_id, enable, filter_val); +} + +/** + * cdp_get_cfr_rcc() - get cfr rcc config + * @soc: Datapath soc handle + * @pdev_id: id of objmgr pdev + * + * Return: true/false based on cfr mode setting + */ +static inline +bool cdp_get_cfr_rcc(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cfr_ops || !soc->ops->cfr_ops->txrx_get_cfr_rcc) + return 0; + + return soc->ops->cfr_ops->txrx_get_cfr_rcc(soc, pdev_id); +} + +/** + * cdp_set_cfr_rcc() - enable/disable cfr rcc config + * @soc: Datapath soc handle + * @pdev_id: id of objmgr pdev + * @enable: Enable/Disable cfr rcc mode + * + * Return: none + */ +static inline +void cdp_set_cfr_rcc(ol_txrx_soc_handle soc, uint8_t pdev_id, bool enable) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfr_ops || !soc->ops->cfr_ops->txrx_set_cfr_rcc) + return; + + return soc->ops->cfr_ops->txrx_set_cfr_rcc(soc, pdev_id, enable); +} + +/** + * cdp_get_cfr_dbg_stats() - Get debug statistics for CFR + * + * @soc: SOC TXRX handle + * @pdev_id: ID of the physical device object + * @buf: CFR RCC debug statistics buffer + * + * Return: None + */ +static inline void +cdp_get_cfr_dbg_stats(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct cdp_cfr_rcc_stats *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfr_ops || !soc->ops->cfr_ops->txrx_get_cfr_dbg_stats) + return; + + soc->ops->cfr_ops->txrx_get_cfr_dbg_stats(soc, pdev_id, buf); +} + +/** + * cdp_cfr_clr_dbg_stats() - Clear debug statistics for CFR + * + * @soc: SOC TXRX handle + * @pdev_id: ID of the physical device object + */ +static inline void +cdp_cfr_clr_dbg_stats(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfr_ops || !soc->ops->cfr_ops->txrx_clear_cfr_dbg_stats) + return; + + soc->ops->cfr_ops->txrx_clear_cfr_dbg_stats(soc, pdev_id); +} + +/** + * cdp_enable_mon_reap_timer() - enable/disable reap timer + * @soc: Datapath soc handle + * @pdev_id: id of objmgr pdev + * @enable: enable/disable reap timer of monitor status ring + * + * Return: none + */ +static inline void +cdp_enable_mon_reap_timer(ol_txrx_soc_handle soc, uint8_t pdev_id, + bool enable) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfr_ops || + !soc->ops->cfr_ops->txrx_enable_mon_reap_timer) + return; + + return soc->ops->cfr_ops->txrx_enable_mon_reap_timer(soc, pdev_id, + enable); +} +#endif + +#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) +/** + * cdp_update_peer_pkt_capture_params() - Sets Rx & Tx Capture params for a peer + * @soc: SOC TXRX handle + * @pdev_id: id of CDP pdev pointer + * @is_rx_pkt_cap_enable: enable/disable rx pkt capture for this peer + * @is_tx_pkt_cap_enable: enable/disable tx pkt capture for this peer + * @peer_mac: MAC address of peer for which pkt_cap is to be enabled/disabled + * + * Return: Success when matching peer is found & flags are set, error otherwise + */ +static inline QDF_STATUS +cdp_update_peer_pkt_capture_params(ol_txrx_soc_handle soc, + uint8_t pdev_id, + bool is_rx_pkt_cap_enable, + bool is_tx_pkt_cap_enable, + uint8_t *peer_mac) +{ + if (!soc || !soc->ops) { + dp_err("Invalid SOC instance"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_update_peer_pkt_capture_params) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_update_peer_pkt_capture_params + (soc, pdev_id, is_rx_pkt_cap_enable, + is_tx_pkt_cap_enable, + peer_mac); +} +#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ + +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +/** + * cdp_update_pdev_rx_protocol_tag() - wrapper function to set the protocol + * tag in CDP layer from cfg layer + * @soc: SOC TXRX handle + * @pdev_id: id of CDP pdev pointer + * @protocol_mask: Bitmap for protocol for which tagging is enabled + * @protocol_type: Protocol type for which the tag should be update + * @tag: Actual tag value for the given prototype + * Return: Returns QDF_STATUS_SUCCESS/FAILURE + */ +static inline QDF_STATUS +cdp_update_pdev_rx_protocol_tag(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint32_t protocol_mask, + uint16_t protocol_type, uint16_t tag) +{ + if (!soc || !soc->ops) { + dp_err("Invalid SOC instance"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_update_pdev_rx_protocol_tag) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_update_pdev_rx_protocol_tag + (soc, pdev_id, protocol_mask, protocol_type, tag); +} + +#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS +/** + * cdp_dump_pdev_rx_protocol_tag_stats() - wrapper function to dump the protocol + tag statistics for given or all protocols + * @soc: SOC TXRX handle + * @pdev_id: id of CDP pdev pointer + * @protocol_type: Protocol type for which the tag should be update + * Return: Returns QDF_STATUS_SUCCESS/FAILURE + */ +static inline QDF_STATUS +cdp_dump_pdev_rx_protocol_tag_stats(ol_txrx_soc_handle soc, + uint8_t pdev_id, + uint16_t protocol_type) +{ + if (!soc || !soc->ops) { + dp_err("Invalid SOC instance"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_dump_pdev_rx_protocol_tag_stats) + return QDF_STATUS_E_FAILURE; + + soc->ops->ctrl_ops->txrx_dump_pdev_rx_protocol_tag_stats(soc, pdev_id, + protocol_type); + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + +#ifdef ATH_SUPPORT_NAC_RSSI +/** + * cdp_vdev_config_for_nac_rssi(): To invoke dp callback for nac rssi config + * @soc: soc pointer + * @vdev_id: id of vdev + * @nac_cmd: specfies nac_rss config action add, del, list + * @bssid: Neighbour bssid + * @client_macaddr: Non-Associated client MAC + * @chan_num: channel number to scan + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS cdp_vdev_config_for_nac_rssi(ol_txrx_soc_handle soc, + uint8_t vdev_id, enum cdp_nac_param_cmd nac_cmd, + char *bssid, char *client_macaddr, uint8_t chan_num) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_vdev_config_for_nac_rssi) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_vdev_config_for_nac_rssi(soc, vdev_id, + nac_cmd, bssid, client_macaddr, chan_num); +} + +/* + * cdp_vdev_get_neighbour_rssi(): To invoke dp callback to get rssi value of nac + * @soc: soc pointer + * @vdev_id: id of vdev + * @macaddr: Non-Associated client MAC + * @rssi: rssi + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS cdp_vdev_get_neighbour_rssi(ol_txrx_soc_handle soc, + uint8_t vdev_id, + char *macaddr, + uint8_t *rssi) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_vdev_get_neighbour_rssi) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_vdev_get_neighbour_rssi(soc, vdev_id, + macaddr, + rssi); +} +#endif + +#ifdef WLAN_SUPPORT_RX_FLOW_TAG +/** + * cdp_set_rx_flow_tag() - wrapper function to set the flow + * tag in CDP layer from cfg layer + * @soc: SOC TXRX handle + * @pdev_id: id of CDP pdev pointer + * @flow_info: Flow 5-tuple, along with tag, if any, that needs to added/deleted + * + * Return: Success when add/del operation is successful, error otherwise + */ +static inline QDF_STATUS +cdp_set_rx_flow_tag(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct cdp_rx_flow_info *flow_info) +{ + if (!soc || !soc->ops) { + dp_err("Invalid SOC instance"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_rx_flow_tag) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_set_rx_flow_tag(soc, pdev_id, + flow_info); +} + +/** + * cdp_dump_rx_flow_tag_stats() - wrapper function to dump the flow + * tag statistics for given flow + * @soc: SOC TXRX handle + * @pdev_id: id of CDP pdev + * @flow_info: Flow tuple for which we want to print the statistics + * + * Return: Success when flow is found and stats are printed, error otherwise + */ +static inline QDF_STATUS +cdp_dump_rx_flow_tag_stats(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct cdp_rx_flow_info *flow_info) +{ + if (!soc || !soc->ops) { + dp_err("Invalid SOC instance"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_dump_rx_flow_tag_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_dump_rx_flow_tag_stats(soc, + pdev_id, + flow_info); +} +#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ +#endif /* _CDP_TXRX_CTRL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl_def.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl_def.h new file mode 100644 index 0000000000000000000000000000000000000000..1a922f6b7e6ed5dc51f9b7cf844ce93b0c13b09b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl_def.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2011-2016,2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_ctrl.h + * @brief Define the host data path control API functions + * called by the host control SW and the OS interface module + */ + +#ifndef _CDP_TXRX_CTRL_DEF_H_ +#define _CDP_TXRX_CTRL_DEF_H_ +/* TODO: adf need to be replaced with qdf */ +/* + * Cleanups -- Might need cleanup + */ +#if !QCA_OL_TX_PDEV_LOCK && QCA_NSS_PLATFORM || \ + (defined QCA_PARTNER_PLATFORM && QCA_PARTNER_SUPPORT_FAST_TX) +#define VAP_TX_SPIN_LOCK(_x) spin_lock(_x) +#define VAP_TX_SPIN_UNLOCK(_x) spin_unlock(_x) +#else /* QCA_OL_TX_PDEV_LOCK */ +#define VAP_TX_SPIN_LOCK(_x) +#define VAP_TX_SPIN_UNLOCK(_x) +#endif /* QCA_OL_TX_PDEV_LOCK */ + +#if QCA_OL_TX_PDEV_LOCK +void ol_ll_pdev_tx_lock(void *); +void ol_ll_pdev_tx_unlock(void *); +#define OL_TX_LOCK(_x) ol_ll_pdev_tx_lock(_x) +#define OL_TX_UNLOCK(_x) ol_ll_pdev_tx_unlock(_x) + +#define OL_TX_PDEV_LOCK(_x) qdf_spin_lock_bh(_x) +#define OL_TX_PDEV_UNLOCK(_x) qdf_spin_unlock_bh(_x) +#else +#define OL_TX_PDEV_LOCK(_x) +#define OL_TX_PDEV_UNLOCK(_x) + +#define OL_TX_LOCK(_x) +#define OL_TX_UNLOCK(_x) +#endif /* QCA_OL_TX_PDEV_LOCK */ + +#if !QCA_OL_TX_PDEV_LOCK +#define OL_TX_FLOW_CTRL_LOCK(_x) qdf_spin_lock_bh(_x) +#define OL_TX_FLOW_CTRL_UNLOCK(_x) qdf_spin_unlock_bh(_x) + +#define OL_TX_DESC_LOCK(_x) qdf_spin_lock_bh(_x) +#define OL_TX_DESC_UNLOCK(_x) qdf_spin_unlock_bh(_x) + +#define OSIF_VAP_TX_LOCK(_y, _x) spin_lock(&((_x)->tx_lock)) +#define OSIF_VAP_TX_UNLOCK(_y, _x) spin_unlock(&((_x)->tx_lock)) + +#define OL_TX_PEER_LOCK(_x, _id) qdf_spin_lock_bh(&((_x)->peer_lock[_id])) +#define OL_TX_PEER_UNLOCK(_x, _id) qdf_spin_unlock_bh(&((_x)->peer_lock[_id])) + +#define OL_TX_PEER_UPDATE_LOCK(_x, _id) \ + qdf_spin_lock_bh(&((_x)->peer_lock[_id])) +#define OL_TX_PEER_UPDATE_UNLOCK(_x, _id) \ + qdf_spin_unlock_bh(&((_x)->peer_lock[_id])) + +#else +#define OSIF_VAP_TX_LOCK(_y, _x) cdp_vdev_tx_lock( \ + _y, wlan_vdev_get_id((_x)->ctrl_vdev)) +#define OSIF_VAP_TX_UNLOCK(_y, _x) cdp_vdev_tx_unlock( \ + _y, wlan_vdev_get_id((_x)->ctrl_vdev)) + +#define OL_TX_FLOW_CTRL_LOCK(_x) +#define OL_TX_FLOW_CTRL_UNLOCK(_x) + +#define OL_TX_DESC_LOCK(_x) +#define OL_TX_DESC_UNLOCK(_x) + +#define OL_TX_PEER_LOCK(_x, _id) +#define OL_TX_PEER_UNLOCK(_x, _id) + +#define OL_TX_PEER_UPDATE_LOCK(_x, _id) qdf_spin_lock_bh(&((_x)->tx_lock)) +#define OL_TX_PEER_UPDATE_UNLOCK(_x, _id) qdf_spin_unlock_bh(&((_x)->tx_lock)) + +#endif /* !QCA_OL_TX_PDEV_LOCK */ +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_legacy.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_legacy.h new file mode 100644 index 0000000000000000000000000000000000000000..b00b905d3a9f7d8f1a9258f76f65b79bc410b463 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_legacy.h @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_flow_ctrl_legacy.h + * @brief Define the host data path legacy flow control API + * functions + */ +#ifndef _CDP_TXRX_FC_LEG_H_ +#define _CDP_TXRX_FC_LEG_H_ +#include +#include "cdp_txrx_handle.h" + +#ifdef QCA_HL_NETDEV_FLOW_CONTROL + +/** + * cdp_hl_fc_register() - Register HL flow control callback. + * @soc: data path soc handle + * @pdev_id: datapath pdev identifier + * @flowcontrol: callback function pointer to stop/start OS netdev queues + * + * Register flow control callback. + * + * Returns: 0 for success + */ +static inline int +cdp_hl_fc_register(ol_txrx_soc_handle soc, uint8_t pdev_id, + tx_pause_callback flowcontrol) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return -EINVAL; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->register_tx_flow_control) + return -EINVAL; + + return soc->ops->l_flowctl_ops->register_tx_flow_control(soc, pdev_id, + flowcontrol); +} + +static inline int cdp_hl_fc_set_td_limit(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint32_t chan_freq) +{ + if (!soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit) + return 0; + + return soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit(soc, vdev_id, + chan_freq); +} + +static inline int cdp_hl_fc_set_os_queue_status(ol_txrx_soc_handle soc, + uint8_t vdev_id, + enum netif_action_type action) +{ + if (!soc->ops->l_flowctl_ops->set_vdev_os_queue_status) + return -EINVAL; + + return soc->ops->l_flowctl_ops->set_vdev_os_queue_status(soc, + vdev_id, + action); +} +#else +static inline int +cdp_hl_fc_register(ol_txrx_soc_handle soc, uint8_t pdev_id, + tx_pause_callback flowcontrol) +{ + return 0; +} + +static inline int cdp_hl_fc_set_td_limit(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint32_t chan_freq) +{ + return 0; +} + +static inline int cdp_hl_fc_set_os_queue_status(ol_txrx_soc_handle soc, + uint8_t vdev_id, + enum netif_action_type action) +{ + return 0; +} + +#endif /* QCA_HL_NETDEV_FLOW_CONTROL */ + +#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL +/** + * cdp_fc_register() - Register flow control callback function pointer + * @soc - data path soc handle + * @vdev_id - virtual interface id to register flow control + * @flowControl - callback function pointer + * @osif_fc_ctx - client context pointer + * @flow_control_is_pause: is vdev paused by flow control + * + * Register flow control callback function pointer and client context pointer + * + * return 0 success + */ +static inline int +cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id, + ol_txrx_tx_flow_control_fp flowcontrol, void *osif_fc_ctx, + ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->register_tx_flow_control) + return 0; + + return soc->ops->l_flowctl_ops->register_tx_flow_control( + soc, vdev_id, flowcontrol, osif_fc_ctx, + flow_control_is_pause); +} +#else +static inline int +cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id, + ol_txrx_tx_flow_control_fp flowcontrol, void *osif_fc_ctx, + ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause) +{ + return 0; +} +#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ +/** + * cdp_fc_deregister() - remove flow control instance + * @soc - data path soc handle + * @vdev_id - virtual interface id to register flow control + * + * remove flow control instance + * + * return 0 success + */ +static inline int +cdp_fc_deregister(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb) + return 0; + + return soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb( + soc, vdev_id); +} + +/** + * cdp_fc_get_tx_resource() - get data path resource count + * @soc: data path soc handle + * @pdev_id: datapath pdev ID + * @peer_addr: peer mac address + * @low_watermark: low resource threshold + * @high_watermark_offset: high resource threshold + * + * get data path resource count + * + * return true enough data path resource available + * false resource is not avaialbe + */ +static inline bool +cdp_fc_get_tx_resource(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct qdf_mac_addr peer_addr, + unsigned int low_watermark, + unsigned int high_watermark_offset) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->get_tx_resource) + return false; + + return soc->ops->l_flowctl_ops->get_tx_resource(soc, pdev_id, peer_addr, + low_watermark, + high_watermark_offset); +} + +/** + * cdp_fc_ll_set_tx_pause_q_depth() - set pause queue depth + * @soc - data path soc handle + * @vdev_id - virtual interface id to register flow control + * @pause_q_depth - pending tx queue delth + * + * set pause queue depth + * + * return 0 success + */ +static inline int +cdp_fc_ll_set_tx_pause_q_depth(ol_txrx_soc_handle soc, + uint8_t vdev_id, int pause_q_depth) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth) + return 0; + + return soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth( + soc, vdev_id, pause_q_depth); + +} + +/** + * cdp_fc_vdev_flush() - flush tx queue + * @soc: data path soc handle + * @vdev_id: id of vdev + * + * flush tx queue + * + * return None + */ +static inline void +cdp_fc_vdev_flush(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->vdev_flush) + return; + + soc->ops->l_flowctl_ops->vdev_flush(soc, vdev_id); +} + +/** + * cdp_fc_vdev_pause() - pause tx scheduler on vdev + * @soc: data path soc handle + * @vdev_id: id of vdev + * @reason: pause reason + * @pause_type: type of pause + * + * pause tx scheduler on vdev + * + * return None + */ +static inline void +cdp_fc_vdev_pause(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint32_t reason, uint32_t pause_type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->vdev_pause) + return; + + soc->ops->l_flowctl_ops->vdev_pause(soc, vdev_id, reason, pause_type); +} + +/** + * cdp_fc_vdev_unpause() - resume tx scheduler on vdev + * @soc: data path soc handle + * @vdev_id: id of vdev + * @reason: pause reason + * @pause_type: type of pause + * + * resume tx scheduler on vdev + * + * return None + */ +static inline void +cdp_fc_vdev_unpause(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint32_t reason, uint32_t pause_type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + return; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->vdev_unpause) + return; + + soc->ops->l_flowctl_ops->vdev_unpause(soc, vdev_id, reason, + pause_type); +} +#endif /* _CDP_TXRX_FC_LEG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_v2.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..630b8b9060116a08e9ab8d449eef8cb933fd17eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_v2.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_flow_ctrl_v2.h + * @brief Define the host data path flow control version 2 API + * functions + */ +#ifndef _CDP_TXRX_FC_V2_H_ +#define _CDP_TXRX_FC_V2_H_ +#include + +/** + * cdp_register_pause_cb() - Register flow control callback function pointer + * @soc - data path soc handle + * @pause_cb - Pause callback intend to register + * + * Register flow control callback function pointer and client context pointer + * + * return QDF_STATUS_SUCCESS success + */ +static inline QDF_STATUS +cdp_register_pause_cb(ol_txrx_soc_handle soc, + tx_pause_callback pause_cb) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->register_pause_cb) + return QDF_STATUS_SUCCESS; + + return soc->ops->flowctl_ops->register_pause_cb(soc, pause_cb); + +} + +/** + * cdp_set_desc_global_pool_size() - set global device pool size + * @soc - data path soc handle + * @num_msdu_desc - descriptor pool size + * + * set global device pool size + * + * return none + */ +static inline void +cdp_set_desc_global_pool_size(ol_txrx_soc_handle soc, + uint32_t num_msdu_desc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->set_desc_global_pool_size) + return; + + soc->ops->flowctl_ops->set_desc_global_pool_size( + num_msdu_desc); +} + +/** + * cdp_dump_flow_pool_info() - dump flow pool information + * @soc - data path soc handle + * + * dump flow pool information + * + * return none + */ +static inline void +cdp_dump_flow_pool_info(struct cdp_soc_t *soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->dump_flow_pool_info) + return; + + soc->ops->flowctl_ops->dump_flow_pool_info(soc); +} + +/** + * cdp_tx_desc_thresh_reached() - Check if avail tx desc meet threshold + * @soc: data path soc handle + * @vdev_id: vdev_id corresponding to vdev start + * + * Return: true if threshold is met, false if not + */ +static inline bool +cdp_tx_desc_thresh_reached(struct cdp_soc_t *soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->tx_desc_thresh_reached) + return false; + + return soc->ops->flowctl_ops->tx_desc_thresh_reached(soc, vdev_id); +} +#endif /* _CDP_TXRX_FC_V2_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_handle.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e448eb3e1bea4ddc8138aaca29c0a0eedd62a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_handle.h @@ -0,0 +1,65 @@ + +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_handle.h + * @brief Holds the forward structure declarations for handles + * passed from the upper layers + */ + +#ifndef CDP_TXRX_HANDLE_H +#define CDP_TXRX_HANDLE_H + +struct cdp_cfg; +struct cdp_pdev; +struct cdp_vdev; +struct cdp_peer; +struct cdp_raw_ast; +struct cdp_soc; + +/** + * cdp_ctrl_objmgr_psoc - opaque handle for UMAC psoc object + */ +struct cdp_ctrl_objmgr_psoc; + +/** + * cdp_ctrl_objmgr_pdev - opaque handle for UMAC pdev object + */ +struct cdp_ctrl_objmgr_pdev; + +/** + * cdp_ctrl_objmgr_vdev - opaque handle for UMAC vdev object + */ +struct cdp_ctrl_objmgr_vdev; + +/** + * cdp_ctrl_objmgr_peer - opaque handle for UMAC peer object + */ +struct cdp_ctrl_objmgr_peer; + +/** + * cdp_cal_client - opaque handle for cal client object + */ +struct cdp_cal_client; + +/** + * cdp_ext_vdev - opaque handle for extended vdev data path handle + */ +struct cdp_ext_vdev; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..5db10b26e10e58ff608978dd1f8653a8ab40f90f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h @@ -0,0 +1,742 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_host_stats.h + * @brief Define the host data path stats API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_HOST_STATS_H_ +#define _CDP_TXRX_HOST_STATS_H_ +#include "cdp_txrx_handle.h" +#include +/** + * cdp_host_stats_get: cdp call to get host stats + * @soc: SOC handle + * @vdev_id: vdev id of vdev + * @req: Requirement type + * + * return: 0 for Success, Failure returns error message + */ +static inline int cdp_host_stats_get(ol_txrx_soc_handle soc, + uint8_t vdev_id, + struct ol_txrx_stats_req *req) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_stats_get) + return 0; + + return soc->ops->host_stats_ops->txrx_host_stats_get(soc, vdev_id, req); +} + +/** + * cdp_host_stats_get_ratekbps: cdp call to get rate in kbps + * @soc: SOC handle + * @preamb: Preamble + * @mcs: Modulation and Coding scheme index + * @htflag: Flag to identify HT or VHT + * @gintval: Gaurd Interval value + * + * return: 0 for Failure, Returns rate on Success + */ +static inline int cdp_host_stats_get_ratekbps(ol_txrx_soc_handle soc, + int preamb, int mcs, + int htflag, int gintval) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_ratekbps) + return 0; + + return soc->ops->host_stats_ops->txrx_get_ratekbps(preamb, + mcs, htflag, + gintval); +} + +/** + * cdp_host_stats_clr: cdp call to clear host stats + * @soc: soc handle + * @vdev_id: vdev handle id + * + * return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_host_stats_clr(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_stats_clr) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_host_stats_clr(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_host_ce_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_ce_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_host_ce_stats(soc, vdev_id); +} + +static inline int cdp_stats_publish + (ol_txrx_soc_handle soc, uint8_t pdev_id, + struct cdp_stats_extd *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_stats_publish) + return 0; + + return soc->ops->host_stats_ops->txrx_stats_publish(soc, pdev_id, buf); +} + +/** + * @brief Enable enhanced stats functionality. + * + * @param soc - the soc object + * @param pdev_id - id of the physical device object + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_enable_enhanced_stats(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_enable_enhanced_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_enable_enhanced_stats + (soc, pdev_id); +} + +/** + * @brief Disable enhanced stats functionality. + * + * @param soc - the soc object + * @param pdev_id - id of the physical device object + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_disable_enhanced_stats(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_disable_enhanced_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_disable_enhanced_stats + (soc, pdev_id); +} + +static inline QDF_STATUS +cdp_tx_print_tso_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_print_tso_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->tx_print_tso_stats(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_tx_rst_tso_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_rst_tso_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->tx_rst_tso_stats(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_tx_print_sg_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_print_sg_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->tx_print_sg_stats(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_tx_rst_sg_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_rst_sg_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->tx_rst_sg_stats(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_print_rx_cksum_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->print_rx_cksum_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->print_rx_cksum_stats(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_rst_rx_cksum_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->rst_rx_cksum_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->rst_rx_cksum_stats(soc, vdev_id); +} + +static inline QDF_STATUS +cdp_host_me_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_me_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_host_me_stats(soc, vdev_id); +} + +/** + * cdp_per_peer_stats(): function to print per peer REO Queue stats + * @soc: soc handle + * @pdev: physical device + * @addr: peer address + * + * return: status + */ +static inline QDF_STATUS cdp_per_peer_stats(ol_txrx_soc_handle soc, + uint8_t *addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_per_peer_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_per_peer_stats(soc, addr); +} + +static inline int cdp_host_msdu_ttl_stats(ol_txrx_soc_handle soc, + uint8_t vdev_id, + struct ol_txrx_stats_req *req) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_msdu_ttl_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_host_msdu_ttl_stats + (soc, vdev_id, req); +} + +static inline QDF_STATUS cdp_update_peer_stats(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t *mac, + void *stats, + uint32_t last_tx_rate_mcs, + uint32_t stats_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_update_peer_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_update_peer_stats + (soc, vdev_id, mac, stats, last_tx_rate_mcs, stats_id); +} + +static inline QDF_STATUS cdp_get_dp_fw_peer_stats(ol_txrx_soc_handle soc, + uint8_t pdev_id, + uint8_t *mac, uint32_t caps, + uint32_t copy_stats) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->get_fw_peer_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->get_fw_peer_stats + (soc, pdev_id, mac, caps, copy_stats); +} + +static inline QDF_STATUS cdp_get_dp_htt_stats(ol_txrx_soc_handle soc, + uint8_t pdev_id, + void *data, uint32_t data_len) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->get_htt_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->get_htt_stats(soc, pdev_id, data, + data_len); +} + +/** + * @brief Update pdev host stats received from firmware + * (wmi_host_pdev_stats and wmi_host_pdev_ext_stats) into dp + * + * @param soc - soc handle + * @param pdev_id - id of the physical device object + * @param data - pdev stats + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_update_pdev_host_stats(ol_txrx_soc_handle soc, + uint8_t pdev_id, + void *data, + uint16_t stats_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_update_pdev_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_update_pdev_stats(soc, pdev_id, + data, + stats_id); +} + +/** + * @brief Update vdev host stats + * + * @soc: soc handle + * @vdev_id: id of the virtual device object + * @data: pdev stats + * @stats_id: type of stats + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_update_vdev_host_stats(ol_txrx_soc_handle soc, + uint8_t vdev_id, + void *data, + uint16_t stats_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_update_vdev_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_update_vdev_stats(soc, vdev_id, + data, + stats_id); +} + +/** + * @brief Call to get specified peer stats + * + * @param soc - soc handle + * @param vdev_id - vdev_id of vdev object + * @param peer_mac - mac address of the peer + * @param type - enum of required stats + * @param buf - buffer to hold the value + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_txrx_get_peer_stats_param(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_peer_stats_type type, + cdp_peer_stats_param_t *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_peer_stats_param) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_get_peer_stats_param(soc, + vdev_id, + peer_mac, + type, + buf); +} + +/** + * @brief Call to get peer stats + * + * @param soc - soc handle + * @param vdev_id - vdev_id of vdev object + * @param peer_mac - mac address of the peer + * @return - struct cdp_peer_stats + */ +static inline QDF_STATUS +cdp_host_get_peer_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, + struct cdp_peer_stats *peer_stats) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_peer_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_get_peer_stats(soc, vdev_id, + peer_mac, + peer_stats); +} + +/** + * @brief Call to reset ald stats + * + * @param soc - soc handle + * @param vdev_id - vdev_id of vdev object + * @param peer_mac - mac address of the peer + * @return - void + */ +static inline QDF_STATUS +cdp_host_reset_peer_ald_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_reset_peer_ald_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_reset_peer_ald_stats(soc, + vdev_id, + peer_mac); +} + +/** + * @brief Call to reset peer stats + * + * @param soc - soc handle + * @param vdev_id - vdev_id of vdev object + * @param peer_mac - mac address of the peer + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_host_reset_peer_stats(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t *peer_mac) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_reset_peer_stats) + return QDF_STATUS_E_FAILURE; + + return soc->ops->host_stats_ops->txrx_reset_peer_stats(soc, + vdev_id, + peer_mac); +} + +/** + * @brief Call to get vdev stats + * + * @param soc - dp soc object + * @param vdev_id - id of dp vdev object + * @param buf - buffer + * @return - int + */ +static inline int +cdp_host_get_vdev_stats(ol_txrx_soc_handle soc, + uint8_t vdev_id, + struct cdp_vdev_stats *buf, + bool is_aggregate) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_vdev_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_get_vdev_stats(soc, vdev_id, + buf, + is_aggregate); +} + +/** + * @brief Call to update vdev stats received from firmware + * (wmi_host_vdev_stats and wmi_host_vdev_extd_stats) into dp + * + * @param data - stats data to be updated + * @param size - size of stats data + * @param stats_id - stats id + * @return - int + */ +static inline int +cdp_update_host_vdev_stats(ol_txrx_soc_handle soc, + void *data, + uint32_t size, + uint32_t stats_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_process_wmi_host_vdev_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_process_wmi_host_vdev_stats + (soc, + data, + size, + stats_id); +} + +/** + * @brief Call to get vdev extd stats + * + * @param soc - soc handle + * @param vdev_id - id of dp vdev object + * @param buf - buffer + * @return - int + */ +static inline int +cdp_get_vdev_extd_stats(ol_txrx_soc_handle soc, + uint8_t vdev_id, + wmi_host_vdev_extd_stats *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_vdev_extd_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_get_vdev_extd_stats(soc, vdev_id, + buf); +} + +/** + * @brief Call to get cdp_pdev_stats + * + * @param soc - soc handle + * @param pdev_id - id of dp pdev object + * @param buf - buffer to hold cdp_pdev_stats + * @return - success/failure + */ +static inline int +cdp_host_get_pdev_stats(ol_txrx_soc_handle soc, + uint8_t pdev_id, struct cdp_pdev_stats *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_pdev_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_get_pdev_stats(soc, pdev_id, buf); +} + +/** + * @brief Call to get radio stats + * + * @param soc - soc handle + * @param pdev_id - id of dp pdev object + * @param scn_stats_user - stats buffer + * @return - int + */ +static inline int +cdp_host_get_radio_stats(ol_txrx_soc_handle soc, + uint8_t pdev_id, + void *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_radio_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_get_radio_stats(soc, pdev_id, + buf); +} +#endif /* _CDP_TXRX_HOST_STATS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..9073aa90466692d24d04f5a243c967b3e9b095f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_ipa.h + * @brief Define the host data path IP Acceleraor API functions + */ +#ifndef _CDP_TXRX_IPA_H_ +#define _CDP_TXRX_IPA_H_ + +#ifdef IPA_OFFLOAD +#ifdef CONFIG_IPA_WDI_UNIFIED_API +#include +#else +#include +#endif +#include +#include "cdp_txrx_handle.h" + +/** + * cdp_ipa_get_resource() - Get allocated WLAN resources for IPA data path + * @soc - data path soc handle + * @pdev_id - device instance id + * + * Get allocated WLAN resources for IPA data path + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_get_resource(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_get_resource) + return soc->ops->ipa_ops->ipa_get_resource(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_set_doorbell_paddr() - give IPA db paddr to FW + * @soc - data path soc handle + * @pdev_id - device instance id + * + * give IPA db paddr to FW + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_set_doorbell_paddr(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_doorbell_paddr) + return soc->ops->ipa_ops->ipa_set_doorbell_paddr(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_set_active() - activate/de-ctivate IPA offload path + * @soc - data path soc handle + * @pdev_id - device instance id + * @uc_active - activate or de-activate + * @is_tx - toggle tx or rx data path + * + * activate/de-ctivate IPA offload path + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_set_active(ol_txrx_soc_handle soc, uint8_t pdev_id, bool uc_active, + bool is_tx) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_active) + return soc->ops->ipa_ops->ipa_set_active(soc, pdev_id, + uc_active, is_tx); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_op_response() - event handler from FW + * @soc - data path soc handle + * @pdev_id - device instance id + * @op_msg - event contents from firmware + * + * event handler from FW + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_op_response(ol_txrx_soc_handle soc, uint8_t pdev_id, uint8_t *op_msg) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_op_response) + return soc->ops->ipa_ops->ipa_op_response(soc, pdev_id, op_msg); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_register_op_cb() - register event handler function pointer + * @soc - data path soc handle + * @pdev_id - device instance id + * @op_cb - event handler callback function pointer + * @usr_ctxt - user context to registered + * + * register event handler function pointer + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_register_op_cb(ol_txrx_soc_handle soc, uint8_t pdev_id, + ipa_uc_op_cb_type op_cb, void *usr_ctxt) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_register_op_cb) + return soc->ops->ipa_ops->ipa_register_op_cb(soc, pdev_id, + op_cb, usr_ctxt); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_get_stat() - get IPA data path stats from FW + * @soc - data path soc handle + * @pdev_id - device instance id + * + * get IPA data path stats from FW async + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_get_stat(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_get_stat) + return soc->ops->ipa_ops->ipa_get_stat(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_tx_send_ipa_data_frame() - send IPA data frame + * @soc - data path soc handle + * @vdev_id: vdev id + * @skb: skb + * + * Return: skb/ NULL is for success + */ +static inline qdf_nbuf_t cdp_ipa_tx_send_data_frame(ol_txrx_soc_handle soc, + uint8_t vdev_id, + qdf_nbuf_t skb) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return skb; + } + + if (soc->ops->ipa_ops->ipa_tx_data_frame) + return soc->ops->ipa_ops->ipa_tx_data_frame(soc, vdev_id, skb); + + return skb; +} + +/** + * cdp_ipa_set_uc_tx_partition_base() - set tx packet partition base + * @soc - data path soc handle + * @cfg_pdev: physical device instance config + * @value: partition base value + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_set_uc_tx_partition_base(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint32_t value) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !cfg_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_uc_tx_partition_base) + soc->ops->ipa_ops->ipa_set_uc_tx_partition_base(cfg_pdev, + value); + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_METERING +/** + * cdp_ipa_uc_get_share_stats() - get Tx/Rx byte stats from FW + * @soc - data path soc handle + * @pdev_id: physical device instance number + * @value: reset stats + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_uc_get_share_stats(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t value) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_uc_get_share_stats) + return soc->ops->ipa_ops->ipa_uc_get_share_stats(soc, pdev_id, + value); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_uc_set_quota() - set quota limit to FW + * @soc - data path soc handle + * @pdev_id: physical device instance number + * @value: quota limit bytes + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_uc_set_quota(ol_txrx_soc_handle soc, uint8_t pdev_id, uint64_t value) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_uc_set_quota) + return soc->ops->ipa_ops->ipa_uc_set_quota(soc, pdev_id, value); + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * cdp_ipa_enable_autonomy() - Enable autonomy RX data path + * @soc: data path soc handle + * @pdev_id: physical device instance number + * + * IPA Data path is enabled and resumed. + * All autonomy data path elements are ready to deliver packet + * All RX packet should routed to IPA_REO ring, then IPA can receive packet + * from WLAN + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_enable_autonomy(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_enable_autonomy) + return soc->ops->ipa_ops->ipa_enable_autonomy(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_disable_autonomy() - Disable autonomy RX data path + * @soc: data path soc handle + * @pdev_id: physical device instance number + * + * IPA Data path is enabled and resumed. + * All autonomy datapath elements are ready to deliver packet + * All RX packet should routed to IPA_REO ring, then IPA can receive packet + * from WLAN + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_disable_autonomy(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + if (soc->ops->ipa_ops->ipa_disable_autonomy) + return soc->ops->ipa_ops->ipa_disable_autonomy(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_IPA_WDI_UNIFIED_API +/** + * cdp_ipa_setup() - Setup and connect IPA pipes + * @soc: data path soc handle + * @pdev_id: handle to the device instance number + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * @is_smmu_enabled: Is SMMU enabled or not + * @sys_in: parameters to setup sys pipe in mcc mode + * @over_gsi: Is IPA using GSI + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_setup(ol_txrx_soc_handle soc, uint8_t pdev_id, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, + bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in, + bool over_gsi) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_setup) + return soc->ops->ipa_ops->ipa_setup(soc, pdev_id, ipa_i2w_cb, + ipa_w2i_cb, + ipa_wdi_meter_notifier_cb, + ipa_desc_size, ipa_priv, + is_rm_enabled, + tx_pipe_handle, + rx_pipe_handle, + is_smmu_enabled, + sys_in, over_gsi); + + return QDF_STATUS_SUCCESS; +} +#else /* CONFIG_IPA_WDI_UNIFIED_API */ +/** + * cdp_ipa_setup() - Setup and connect IPA pipes + * @soc: data path soc handle + * @pdev_id: handle to the device instance number + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_setup(ol_txrx_soc_handle soc, uint8_t pdev_id, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_setup) + return soc->ops->ipa_ops->ipa_setup(soc, pdev_id, ipa_i2w_cb, + ipa_w2i_cb, + ipa_wdi_meter_notifier_cb, + ipa_desc_size, ipa_priv, + is_rm_enabled, + tx_pipe_handle, + rx_pipe_handle); + + return QDF_STATUS_SUCCESS; +} +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +/** + * cdp_ipa_cleanup() - Disconnect IPA pipes + * @soc: data path soc handle + * @pdev_id: handle to the device instance number + * @tx_pipe_handle: Tx pipe handle + * @rx_pipe_handle: Rx pipe handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_cleanup(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint32_t tx_pipe_handle, uint32_t rx_pipe_handle) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_cleanup) + return soc->ops->ipa_ops->ipa_cleanup(soc, pdev_id, + tx_pipe_handle, + rx_pipe_handle); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_setup_iface() - Setup IPA header and register interface + * @soc: data path soc handle + * @ifname: Interface name + * @mac_addr: Interface MAC address + * @prod_client: IPA prod client type + * @cons_client: IPA cons client type + * @session_id: Session ID + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_setup_iface(ol_txrx_soc_handle soc, char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_setup_iface) + return soc->ops->ipa_ops->ipa_setup_iface(ifname, mac_addr, + prod_client, + cons_client, + session_id, + is_ipv6_enabled); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface + * @soc: data path soc handle + * @ifname: Interface name + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_cleanup_iface(ol_txrx_soc_handle soc, char *ifname, + bool is_ipv6_enabled) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_cleanup_iface) + return soc->ops->ipa_ops->ipa_cleanup_iface(ifname, + is_ipv6_enabled); + + return QDF_STATUS_SUCCESS; +} + + /** + * cdp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes + * @soc - data path soc handle + * @pdev_id - device instance id + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_enable_pipes(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_enable_pipes) + return soc->ops->ipa_ops->ipa_enable_pipes(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_uc_disable_pipes() - Suspend traffic and disable Tx/Rx pipes + * @soc: data path soc handle + * @pdev_id - device instance id + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_disable_pipes(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_disable_pipes) + return soc->ops->ipa_ops->ipa_disable_pipes(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates + * @soc: data path soc handle + * @client: WLAN Client ID + * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps) + * + * Return: 0 on success, negative errno on error + */ +static inline QDF_STATUS +cdp_ipa_set_perf_level(ol_txrx_soc_handle soc, int client, + uint32_t max_supported_bw_mbps) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_perf_level) + return soc->ops->ipa_ops->ipa_set_perf_level(client, + max_supported_bw_mbps); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_rx_intrabss_fwd() - Perform intra-bss fwd for IPA RX path + * + * @soc: data path soc handle + * @vdev_id: vdev id + * @nbuf: pointer to skb of ethernet packet received from IPA RX path + * @fwd_success: pointer to indicate if skb succeeded in intra-bss TX + * + * This function performs intra-bss forwarding for WDI 3.0 IPA RX path. + * + * Return: true if packet is intra-bss fwd-ed and no need to pass to + * network stack. false if packet needs to be passed to network stack. + */ +static inline bool +cdp_ipa_rx_intrabss_fwd(ol_txrx_soc_handle soc, uint8_t vdev_id, + qdf_nbuf_t nbuf, bool *fwd_success) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !fwd_success) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_rx_intrabss_fwd) + return soc->ops->ipa_ops->ipa_rx_intrabss_fwd(soc, vdev_id, + nbuf, + fwd_success); + + /* Fall back to pass up to stack */ + return false; +} + +#endif /* IPA_OFFLOAD */ + +#endif /* _CDP_TXRX_IPA_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h new file mode 100644 index 0000000000000000000000000000000000000000..010b4c383a8a4714b12560d7c2c22467ac52430e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_me.h + * @brief Define the host data path mcast enhance API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_ME_H_ +#define _CDP_TXRX_ME_H_ + +#include +/* TODO: adf need to be replaced with qdf */ +#include "cdp_txrx_handle.h" + +static inline void +cdp_tx_me_alloc_descriptor(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_me_alloc_descriptor) + return; + + soc->ops->me_ops->tx_me_alloc_descriptor(soc, pdev_id); +} + +static inline void +cdp_tx_me_free_descriptor(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_me_free_descriptor) + return; + + soc->ops->me_ops->tx_me_free_descriptor(soc, pdev_id); +} + +static inline uint16_t +cdp_tx_me_convert_ucast(ol_txrx_soc_handle soc, uint8_t vdev_id, + qdf_nbuf_t wbuf, u_int8_t newmac[][6], + uint8_t newmaccnt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_me_convert_ucast) + return 0; + + return soc->ops->me_ops->tx_me_convert_ucast + (soc, vdev_id, wbuf, newmac, newmaccnt); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h new file mode 100644 index 0000000000000000000000000000000000000000..1db265f6ffe06860fc5b0e703104fbcdf5b83f47 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h @@ -0,0 +1,797 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_misc.h + * @brief Define the host data path miscellaneous API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_MISC_H_ +#define _CDP_TXRX_MISC_H_ + +#include "cdp_txrx_handle.h" +/** + * cdp_tx_non_std() - Allow the control-path SW to send data frames + * @soc: data path soc handle + * @vdev_id: id of vdev + * @tx_spec: what non-standard handling to apply to the tx data frames + * @msdu_list: NULL-terminated list of tx MSDUs + * + * Generally, all tx data frames come from the OS shim into the txrx layer. + * However, there are rare cases such as TDLS messaging where the UMAC + * control-path SW creates tx data frames. + * This UMAC SW can call this function to provide the tx data frames to + * the txrx layer. + * The UMAC SW can request a callback for these data frames after their + * transmission completes, by using the ol_txrx_data_tx_cb_set function + * to register a tx completion callback, and by specifying + * ol_tx_spec_no_free as the tx_spec arg when giving the frames to + * ol_tx_non_std. + * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11), + * as specified by ol_cfg_frame_type(). + * + * Return: null - success, skb - failure + */ +static inline qdf_nbuf_t +cdp_tx_non_std(ol_txrx_soc_handle soc, uint8_t vdev_id, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->misc_ops->tx_non_std) + return soc->ops->misc_ops->tx_non_std(soc, vdev_id, tx_spec, + msdu_list); + return NULL; +} + +/** + * cdp_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart + * beat timer + * @soc: data path soc handle + * @vdev_id: id of vdev + * @timer_value_sec: new heart beat timer value + * + * Return: Old timer value set in vdev. + */ +static inline uint16_t +cdp_set_ibss_vdev_heart_beat_timer(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint16_t timer_value_sec) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->set_ibss_vdev_heart_beat_timer) + return soc->ops->misc_ops->set_ibss_vdev_heart_beat_timer( + soc, vdev_id, timer_value_sec); + + return 0; +} + +/** + * cdp_set_wisa_mode() - set wisa mode + * @soc: data path soc handle + * @vdev_id: vdev_id + * @enable: enable or disable + * + * Return: QDF_STATUS_SUCCESS mode enable success + */ +static inline QDF_STATUS +cdp_set_wisa_mode(ol_txrx_soc_handle soc, uint8_t vdev_id, bool enable) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->set_wisa_mode) + return soc->ops->misc_ops->set_wisa_mode(soc, vdev_id, enable); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_data_stall_cb_register() - register data stall callback + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @cb: callback function + * + * Return: QDF_STATUS_SUCCESS register success + */ +static inline QDF_STATUS cdp_data_stall_cb_register(ol_txrx_soc_handle soc, + uint8_t pdev_id, + data_stall_detect_cb cb) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->txrx_data_stall_cb_register) + return soc->ops->misc_ops->txrx_data_stall_cb_register( + soc, pdev_id, cb); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_data_stall_cb_deregister() - de-register data stall callback + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @cb - callback function + * + * Return: QDF_STATUS_SUCCESS de-register success + */ +static inline QDF_STATUS cdp_data_stall_cb_deregister(ol_txrx_soc_handle soc, + uint8_t pdev_id, + data_stall_detect_cb cb) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->txrx_data_stall_cb_deregister) + return soc->ops->misc_ops->txrx_data_stall_cb_deregister( + soc, pdev_id, cb); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_post_data_stall_event() - post data stall event + * @soc: data path soc handle + * @indicator: Module triggering data stall + * @data_stall_type: data stall event type + * @pdev_id: pdev id + * @vdev_id_bitmap: vdev id bitmap + * @recovery_type: data stall recovery type + * + * Return: None + */ +static inline void +cdp_post_data_stall_event(ol_txrx_soc_handle soc, + enum data_stall_log_event_indicator indicator, + enum data_stall_log_event_type data_stall_type, + uint32_t pdev_id, uint32_t vdev_id_bitmap, + enum data_stall_log_recovery_type recovery_type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->misc_ops || + !soc->ops->misc_ops->txrx_post_data_stall_event) + return; + + soc->ops->misc_ops->txrx_post_data_stall_event( + soc, indicator, data_stall_type, pdev_id, + vdev_id_bitmap, recovery_type); +} + +/** + * cdp_set_wmm_param() - set wmm parameter + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @wmm_param: wmm parameter + * + * Return: none + */ +static inline void +cdp_set_wmm_param(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct ol_tx_wmm_param_t wmm_param) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->set_wmm_param) + return soc->ops->misc_ops->set_wmm_param(soc, pdev_id, + wmm_param); + + return; +} + +/** + * cdp_runtime_suspend() - suspend + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * + * Return: QDF_STATUS_SUCCESS suspend success + */ +static inline QDF_STATUS cdp_runtime_suspend(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->runtime_suspend) + return soc->ops->misc_ops->runtime_suspend(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_runtime_resume() - resume + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * + * Return: QDF_STATUS_SUCCESS suspend success + */ +static inline QDF_STATUS cdp_runtime_resume(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->runtime_resume) + return soc->ops->misc_ops->runtime_resume(soc, pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_hl_tdls_flag_reset() - tdls flag reset + * @soc: data path soc handle + * @vdev_id: id of vdev + * @flag: flag indicating to set/reset tdls + * + * Return: none + */ +static inline void +cdp_hl_tdls_flag_reset(ol_txrx_soc_handle soc, uint8_t vdev_id, bool flag) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->hl_tdls_flag_reset) + return soc->ops->misc_ops->hl_tdls_flag_reset(soc, vdev_id, + flag); + + return; +} + +/** + * cdp_get_opmode() - get vdev operation mode + * @soc: data path soc handle + * @vdev_id: id of vdev + * + * Return virtual device operational mode + * op_mode_ap, + * op_mode_ibss, + * op_mode_sta, + * op_mode_monitor, + * op_mode_ocb, + * + * return interface id + * 0 unknown interface + */ +static inline int +cdp_get_opmode(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_opmode) + return soc->ops->misc_ops->get_opmode(soc, vdev_id); + + return 0; +} + +/** + * cdp_get_vdev_id() - get vdev id + * @soc - data path soc handle + * @vdev - virtual interface instance + * + * get virtual interface id + * + * return interface id + * 0 unknown interface + */ +static inline uint16_t +cdp_get_vdev_id(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_vdev_id) + return soc->ops->misc_ops->get_vdev_id(vdev); + return 0; +} + +/** + * cdp_get_tx_ack_stats() - get tx ack count for vdev + * @soc - data path soc handle + * @vdev_id - vdev id + * + * return tx ack count + * 0 invalid count + */ +static inline uint32_t +cdp_get_tx_ack_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_tx_ack_stats) + return soc->ops->misc_ops->get_tx_ack_stats(soc, vdev_id); + + return 0; +} + +/** + * cdp_bad_peer_txctl_set_setting() - Set peer timer balance parameters + * @soc: data path soc handle + * @pdev_id: id of datapath pdev handle + * @enable: enable/disable peer balance state + * @period: balance timer period for peer + * @txq_limit: txp limit for peer + * + * Return: none + */ +static inline void +cdp_bad_peer_txctl_set_setting(ol_txrx_soc_handle soc, uint8_t pdev_id, + int enable, int period, int txq_limit) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->bad_peer_txctl_set_setting) + return soc->ops->misc_ops->bad_peer_txctl_set_setting( + soc, pdev_id, enable, period, + txq_limit); + return; +} + +/** + * cdp_bad_peer_txctl_update_threshold() - TBD + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @level: index of the threshold configuration + * @tput_thresh: peer balance throughput threshold + * @tx_limit: peer balance tx limit threshold + * + * TBD + * + * Return: none + */ +static inline void +cdp_bad_peer_txctl_update_threshold(ol_txrx_soc_handle soc, + uint8_t pdev_id, int level, + int tput_thresh, int tx_limit) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->bad_peer_txctl_update_threshold) + return soc->ops->misc_ops->bad_peer_txctl_update_threshold( + soc, pdev_id, level, tput_thresh, tx_limit); + return; +} + +/** + * cdp_mark_first_wakeup_packet() - set flag to indicate that + * fw is compatible for marking first packet after wow wakeup + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @value: 1 for enabled/ 0 for disabled + * + * Return: None + */ +static inline void cdp_mark_first_wakeup_packet(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint8_t value) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->mark_first_wakeup_packet) + return soc->ops->misc_ops->mark_first_wakeup_packet( + soc, pdev_id, value); + return; +} + + +/** + * cds_update_mac_id() - update mac_id for vdev + * @psoc: data path soc handle + * @vdev_id: vdev id + * @mac_id: mac id + * + * Return: none + */ +static inline void cdp_update_mac_id(void *psoc, uint8_t vdev_id, + uint8_t mac_id) +{ + ol_txrx_soc_handle soc = psoc; + + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->update_mac_id) + return soc->ops->misc_ops->update_mac_id(soc, vdev_id, mac_id); + return; +} + +/** + * cdp_flush_rx_frames() - flush cached rx frames + * @soc: data path soc handle + * @pdev_id: datapath pdev identifier + * @peer_mac: peer mac address + * @drop: set flag to drop frames + * + * Return: None + */ +static inline void cdp_flush_rx_frames(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t *peer_mac, bool drop) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->flush_rx_frames) + return soc->ops->misc_ops->flush_rx_frames(soc, pdev_id, + peer_mac, drop); + return; +} + +/* + * cdp_get_intra_bss_fwd_pkts_count() - to get the total tx and rx packets + * that has been forwarded from txrx layer without going to upper layers. + * @soc: Datapath soc handle + * @vdev_id: vdev id + * @fwd_tx_packets: pointer to forwarded tx packets count parameter + * @fwd_rx_packets: pointer to forwarded rx packets count parameter + * + * Return: status -> A_OK - success, A_ERROR - failure + */ +static inline A_STATUS cdp_get_intra_bss_fwd_pkts_count( + ol_txrx_soc_handle soc, uint8_t vdev_id, + uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_intra_bss_fwd_pkts_count) + return soc->ops->misc_ops->get_intra_bss_fwd_pkts_count( + soc, vdev_id, fwd_tx_packets, fwd_rx_packets); + + return 0; +} + +/** + * cdp_pkt_log_init() - API to initialize packet log + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @scn: HIF context + * + * Return: void + */ +static inline void cdp_pkt_log_init(ol_txrx_soc_handle soc, + uint8_t pdev_id, void *scn) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->pkt_log_init) + return soc->ops->misc_ops->pkt_log_init(soc, pdev_id, scn); + + return; +} + +/** + * cdp_pkt_log_con_service() - API to connect packet log service + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @scn: HIF context + * + * Return: void + */ +static inline void cdp_pkt_log_con_service(ol_txrx_soc_handle soc, + uint8_t pdev_id, void *scn) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->pkt_log_con_service) + return soc->ops->misc_ops->pkt_log_con_service( + soc, pdev_id, scn); + + return; +} + +/** + * cdp_get_num_rx_contexts() - API to get the number of RX contexts + * @soc: soc handle + * + * Return: number of RX contexts + */ +static inline int cdp_get_num_rx_contexts(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_num_rx_contexts) + return soc->ops->misc_ops->get_num_rx_contexts(soc); + + return 0; +} + +/** + * cdp_register_packetdump_cb() - API to register packetdump callback + * + * Register TX/RX callback for data packets, during connection. And per packet + * stats will be passed to user-space by @tx_cb/@rx_cb. + * + * @soc: soc handle + * @pdev_id: id of data path pdev handle + * @tx_cb: tx packet callback + * @rx_cb: rx packet callback + * + * Return: void + */ +static inline void cdp_register_packetdump_cb(ol_txrx_soc_handle soc, + uint8_t pdev_id, + ol_txrx_pktdump_cb tx_cb, + ol_txrx_pktdump_cb rx_cb) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->register_pktdump_cb) + return soc->ops->misc_ops->register_pktdump_cb( + soc, pdev_id, tx_cb, rx_cb); +} + +/** + * cdp_deregister_packetdump_cb() - API to unregister packetdump callback + * + * Deregister callback for TX/RX data packets. + * + * @soc: soc handle + * @pdev_id: id of data path pdev handle + * + * Return: void + */ +static inline void cdp_deregister_packetdump_cb(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->unregister_pktdump_cb) + return soc->ops->misc_ops->unregister_pktdump_cb(soc, pdev_id); +} + +typedef void (*rx_mic_error_callback)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + struct cdp_rx_mic_err_info *info); + +/** + * cdp_register_rx_mic_error_ind_handler() - API to register mic error + * indication handler + * + * @soc: soc handle + * @rx_mic_cb: rx mic error indication callback + * + * Return: void + */ +static inline void +cdp_register_rx_mic_error_ind_handler(ol_txrx_soc_handle soc, + rx_mic_error_callback rx_mic_cb) +{ + if (!soc || !soc->ol_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + soc->ol_ops->rx_mic_error = rx_mic_cb; +} + +/** + * cdp_pdev_reset_driver_del_ack() - reset driver TCP delayed ack flag + * @soc: data path soc handle + * @pdev_id: pdev id + * + * Return: none + */ +static inline void cdp_pdev_reset_driver_del_ack(void *psoc, + uint8_t pdev_id) +{ + ol_txrx_soc_handle soc = psoc; + + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->pdev_reset_driver_del_ack) + return soc->ops->misc_ops->pdev_reset_driver_del_ack(soc, + pdev_id); +} + +/* + * cdp_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag + * @soc: data path soc handle + * @vdev_id: vdev id + * @rx_packets: number of rx packets + * @time_in_ms: time in ms + * @high_th: high threshold + * @low_th: low threshold + * + * Return: none + */ +static inline void cdp_vdev_set_driver_del_ack_enable(ol_txrx_soc_handle soc, + uint8_t vdev_id, + unsigned long rx_packets, + uint32_t time_in_ms, + uint32_t high_th, + uint32_t low_th) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->vdev_set_driver_del_ack_enable) + return soc->ops->misc_ops->vdev_set_driver_del_ack_enable( + soc, vdev_id, rx_packets, time_in_ms, high_th, low_th); +} + +static inline void cdp_vdev_set_bundle_require_flag(ol_txrx_soc_handle soc, + uint8_t vdev_id, + unsigned long tx_bytes, + uint32_t time_in_ms, + uint32_t high_th, + uint32_t low_th) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->vdev_set_bundle_require_flag) + return soc->ops->misc_ops->vdev_set_bundle_require_flag( + vdev_id, tx_bytes, time_in_ms, high_th, low_th); +} + +static inline void cdp_pdev_reset_bundle_require_flag(ol_txrx_soc_handle soc, + uint8_t pdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->pdev_reset_bundle_require_flag) + return soc->ops->misc_ops->pdev_reset_bundle_require_flag( + soc, pdev_id); +} + +/** + * cdp_txrx_ext_stats_request(): request dp tx and rx extended stats + * @soc: soc handle + * @pdev_id: pdev id + * @req: stats request structure to fill + * + * return: status + */ +static inline QDF_STATUS +cdp_txrx_ext_stats_request(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct cdp_txrx_ext_stats *req) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops || !req) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->txrx_ext_stats_request) + return soc->ops->misc_ops->txrx_ext_stats_request(soc, pdev_id, + req); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_request_rx_hw_stats(): request rx hw stats + * @soc: soc handle + * @vdev_id: vdev id + * + * return: none + */ +static inline QDF_STATUS +cdp_request_rx_hw_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->request_rx_hw_stats) + return soc->ops->misc_ops->request_rx_hw_stats(soc, vdev_id); + + return QDF_STATUS_SUCCESS; +} +#endif /* _CDP_TXRX_MISC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h new file mode 100644 index 0000000000000000000000000000000000000000..96638a716f26275b5c0cc912090a999dcbbf29f3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CDP_TXRX_MOB_DEF_H +#define __CDP_TXRX_MOB_DEF_H +#include +#include + +#define TX_WMM_AC_NUM 4 +#define ENABLE_DP_HIST_STATS +#define DP_RX_DISABLE_NDI_MDNS_FORWARDING + +#define OL_TXQ_PAUSE_REASON_FW (1 << 0) +#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1) +#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2) +#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3) +#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4) + +#define OL_TXRX_INVALID_NUM_PEERS (-1) + + +/* Maximum number of station supported by data path, including BC. */ +#define WLAN_MAX_STA_COUNT (HAL_NUM_STA) + +/* The symbolic station ID return to HDD to specify the packet is bc/mc */ +#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1) + +/* The symbolic station ID return to HDD to specify the packet is + to soft-AP itself */ +#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2) + +/* is 802.11 address multicast/broadcast? */ +#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01) + +#define MAX_PEERS 32 + +/* + * Bins used for reporting delay histogram: + * bin 0: 0 - 10 ms delay + * bin 1: 10 - 20 ms delay + * bin 2: 20 - 40 ms delay + * bin 3: 40 - 80 ms delay + * bin 4: 80 - 160 ms delay + * bin 5: > 160 ms delay + */ +#define QCA_TX_DELAY_HIST_REPORT_BINS 6 + +/* BA actions */ +#define IEEE80211_ACTION_BA_ADDBA_REQUEST 0 /* ADDBA request */ +#define IEEE80211_ACTION_BA_ADDBA_RESPONSE 1 /* ADDBA response */ +#define IEEE80211_ACTION_BA_DELBA 2 /* DELBA */ + +#define IEEE80211_BA_POLICY_DELAYED 0 +#define IEEE80211_BA_POLICY_IMMEDIATE 1 +#define IEEE80211_BA_AMSDU_SUPPORTED 1 + +/** + * enum netif_action_type - Type of actions on netif queues + * @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues + * @WLAN_START_ALL_NETIF_QUEUE: start all netif queues + * @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues + * @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier + * @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier + * @WLAN_NETIF_TX_DISABLE: disable tx + * @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier + * @WLAN_NETIF_CARRIER_ON: on carrier + * @WLAN_NETIF_CARRIER_OFF: off carrier + * @WLAN_NETIF_PRIORITY_QUEUE_ON: start priority netif queues + * @WLAN_NETIF_PRIORITY_QUEUE_OFF: stop priority netif queues + * @WLAN_WAKE_NON_PRIORITY_QUEUE: wake non priority netif queues + * @WLAN_STOP_NON_PRIORITY_QUEUE: stop non priority netif queues + */ +enum netif_action_type { + WLAN_NETIF_ACTION_TYPE_NONE = 0, + WLAN_STOP_ALL_NETIF_QUEUE = 1, + WLAN_START_ALL_NETIF_QUEUE = 2, + WLAN_WAKE_ALL_NETIF_QUEUE = 3, + WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER = 4, + WLAN_START_ALL_NETIF_QUEUE_N_CARRIER = 5, + WLAN_NETIF_TX_DISABLE = 6, + WLAN_NETIF_TX_DISABLE_N_CARRIER = 7, + WLAN_NETIF_CARRIER_ON = 8, + WLAN_NETIF_CARRIER_OFF = 9, + WLAN_NETIF_PRIORITY_QUEUE_ON = 10, + WLAN_NETIF_PRIORITY_QUEUE_OFF = 11, + WLAN_NETIF_VO_QUEUE_ON = 12, + WLAN_NETIF_VO_QUEUE_OFF = 13, + WLAN_NETIF_VI_QUEUE_ON = 14, + WLAN_NETIF_VI_QUEUE_OFF = 15, + WLAN_NETIF_BE_BK_QUEUE_OFF = 16, + WLAN_WAKE_NON_PRIORITY_QUEUE = 17, + WLAN_STOP_NON_PRIORITY_QUEUE = 18, + WLAN_NETIF_ACTION_TYPE_MAX, +}; + +/** + * enum netif_reason_type - reason for netif queue action + * @WLAN_CONTROL_PATH: action from control path + * @WLAN_DATA_FLOW_CONTROL: because of flow control + * @WLAN_FW_PAUSE: because of firmware pause + * @WLAN_TX_ABORT: because of tx abort + * @WLAN_VDEV_STOP: because of vdev stop + * @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised + * @WLAN_THERMAL_MITIGATION: because of thermal mitigation + */ +enum netif_reason_type { + WLAN_CONTROL_PATH = 1, + WLAN_DATA_FLOW_CONTROL, + WLAN_FW_PAUSE, + WLAN_TX_ABORT, + WLAN_VDEV_STOP, + WLAN_PEER_UNAUTHORISED, + WLAN_THERMAL_MITIGATION, + WLAN_DATA_FLOW_CONTROL_PRIORITY, + WLAN_REASON_TYPE_MAX, +}; + +enum ol_rx_err_type { + OL_RX_ERR_DEFRAG_MIC, + OL_RX_ERR_PN, + OL_RX_ERR_UNKNOWN_PEER, + OL_RX_ERR_MALFORMED, + OL_RX_ERR_TKIP_MIC, + OL_RX_ERR_DECRYPT, + OL_RX_ERR_MPDU_LENGTH, + OL_RX_ERR_ENCRYPT_REQUIRED, + OL_RX_ERR_DUP, + OL_RX_ERR_UNKNOWN, + OL_RX_ERR_FCS, + OL_RX_ERR_PRIVACY, + OL_RX_ERR_NONE_FRAG, + OL_RX_ERR_NONE = 0xFF +}; + +enum throttle_level { + THROTTLE_LEVEL_0, + THROTTLE_LEVEL_1, + THROTTLE_LEVEL_2, + THROTTLE_LEVEL_3, + /* Invalid */ + THROTTLE_LEVEL_MAX, +}; + +enum { + OL_TX_WMM_AC_BE, + OL_TX_WMM_AC_BK, + OL_TX_WMM_AC_VI, + OL_TX_WMM_AC_VO, + OL_TX_NUM_WMM_AC +}; + +/** + * @enum ol_tx_spec + * @brief indicate what non-standard transmission actions to apply + * @details + * Indicate one or more of the following: + * - The tx frame already has a complete 802.11 header. + * Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and + * A-MSDU aggregation. + * - The tx frame should not be aggregated (A-MPDU or A-MSDU) + * - The tx frame is already encrypted - don't attempt encryption. + * - The tx frame is a segment of a TCP jumbo frame. + * - This tx frame should not be unmapped and freed by the txrx layer + * after transmission, but instead given to a registered tx completion + * callback. + * More than one of these specification can apply, though typically + * only a single specification is applied to a tx frame. + * A compound specification can be created, as a bit-OR of these + * specifications. + */ +enum ol_tx_spec { + OL_TX_SPEC_STD = 0x0, /* do regular processing */ + OL_TX_SPEC_RAW = 0x1, /* skip encap + A-MSDU aggr */ + OL_TX_SPEC_NO_AGGR = 0x2, /* skip encap + all aggr */ + OL_TX_SPEC_NO_ENCRYPT = 0x4, /* skip encap + encrypt */ + OL_TX_SPEC_TSO = 0x8, /* TCP segmented */ + OL_TX_SPEC_NWIFI_NO_ENCRYPT = 0x10, /* skip encrypt for nwifi */ + OL_TX_SPEC_NO_FREE = 0x20, /* give to cb rather than free */ +}; + +/** + * @enum peer_debug_id_type: debug ids to track peer get_ref and release_ref + * @brief Unique peer debug IDs to track the callers. Each new usage can add to + * this enum list to create a new "PEER_DEBUG_ID_". + * @PEER_DEBUG_ID_OL_INTERNAL: debug id for OL internal usage + * @PEER_DEBUG_ID_WMA_PKT_DROP: debug id for wma_is_pkt_drop_candidate API + * @PEER_DEBUG_ID_WMA_ADDBA_REQ: debug id for ADDBA request + * @PEER_DEBUG_ID_WMA_DELBA_REQ: debug id for DELBA request + * @PEER_DEBUG_ID_LIM_SEND_ADDBA_RESP: debug id for send ADDBA response + * @PEER_DEBUG_ID_OL_RX_THREAD: debug id for rx thread + * @PEER_DEBUG_ID_WMA_CCMP_REPLAY_ATTACK: debug id for CCMP replay + * @PEER_DEBUG_ID_WMA_DEL_BSS:debug id for remove BSS + * @PEER_DEBUG_ID_WMA_VDEV_STOP_RESP:debug id for vdev stop response handler + * @PEER_DEBUG_ID_OL_PEER_MAP:debug id for peer map/unmap + * @PEER_DEBUG_ID_OL_PEER_ATTACH: debug id for peer attach/detach + * @PEER_DEBUG_ID_OL_TXQ_VDEV_FL: debug id for vdev flush + * @PEER_DEBUG_ID_OL_HASH_ERS:debug id for peer find hash erase + * @PEER_DEBUG_ID_MAX: debug id MAX + */ +enum peer_debug_id_type { + PEER_DEBUG_ID_OL_INTERNAL, + PEER_DEBUG_ID_WMA_PKT_DROP, + PEER_DEBUG_ID_WMA_ADDBA_REQ, + PEER_DEBUG_ID_WMA_DELBA_REQ, + PEER_DEBUG_ID_LIM_SEND_ADDBA_RESP, + PEER_DEBUG_ID_OL_RX_THREAD, + PEER_DEBUG_ID_WMA_CCMP_REPLAY_ATTACK, + PEER_DEBUG_ID_WMA_DEL_BSS, + PEER_DEBUG_ID_WMA_VDEV_STOP_RESP, + PEER_DEBUG_ID_OL_PEER_MAP, + PEER_DEBUG_ID_OL_PEER_ATTACH, + PEER_DEBUG_ID_OL_TXQ_VDEV_FL, + PEER_DEBUG_ID_OL_HASH_ERS, + PEER_DEBUG_ID_OL_UNMAP_TIMER_WORK, + PEER_DEBUG_ID_MAX +}; + +/** + * struct ol_txrx_desc_type - txrx descriptor type + * @is_qos_enabled: is station qos enabled + * @is_wapi_supported: is station wapi supported + * @peer_addr: peer mac address + */ +struct ol_txrx_desc_type { + uint8_t is_qos_enabled; + uint8_t is_wapi_supported; + struct qdf_mac_addr peer_addr; +}; + +/** + * struct ol_tx_sched_wrr_ac_specs_t - the wrr ac specs params structure + * @wrr_skip_weight: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * wrr_skip_weight + * @credit_threshold: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * credit_threshold + * @send_limit: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * send_limit + * @credit_reserve: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * credit_reserve + * @discard_weight: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * discard_weight + * + * This structure is for wrr ac specs params set from user, it will update + * its content corresponding to the ol_tx_sched_wrr_adv_category_info_t.specs. + */ +struct ol_tx_sched_wrr_ac_specs_t { + int wrr_skip_weight; + uint32_t credit_threshold; + uint16_t send_limit; + int credit_reserve; + int discard_weight; +}; + +/** + * struct txrx_pdev_cfg_param_t - configuration information + * passed to the data path + */ +struct txrx_pdev_cfg_param_t { + uint8_t is_full_reorder_offload; + /* IPA Micro controller data path offload enable flag */ + uint8_t is_uc_offload_enabled; + /* IPA Micro controller data path offload TX buffer count */ + uint32_t uc_tx_buffer_count; + /* IPA Micro controller data path offload TX buffer size */ + uint32_t uc_tx_buffer_size; + /* IPA Micro controller data path offload RX indication ring count */ + uint32_t uc_rx_indication_ring_count; + /* IPA Micro controller data path offload TX partition base */ + uint32_t uc_tx_partition_base; + /* IP, TCP and UDP checksum offload */ + bool ip_tcp_udp_checksum_offload; + /* IP, TCP and UDP checksum offload for NAN Mode */ + bool nan_ip_tcp_udp_checksum_offload; + /* IP, TCP and UDP checksum offload for P2P Mode*/ + bool p2p_ip_tcp_udp_checksum_offload; + /* Checksum offload override flag for Legcay modes */ + bool legacy_mode_csum_disable; + /* Rx processing in thread from TXRX */ + bool enable_rxthread; + /* CE classification enabled through INI */ + bool ce_classify_enabled; +#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL) + /* Threshold to stop queue in percentage */ + uint32_t tx_flow_stop_queue_th; + /* Start queue offset in percentage */ + uint32_t tx_flow_start_queue_offset; +#endif + +#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK + /* enable the tcp delay ack feature in the driver */ + bool del_ack_enable; + /* timeout if no more tcp ack frames, unit is ms */ + uint16_t del_ack_timer_value; + /* the maximum number of replaced tcp ack frames */ + uint16_t del_ack_pkt_count; +#endif + + struct ol_tx_sched_wrr_ac_specs_t ac_specs[TX_WMM_AC_NUM]; + bool gro_enable; + bool tc_based_dyn_gro; + uint32_t tc_ingress_prio; + bool tso_enable; + bool lro_enable; + bool sg_enable; + bool enable_data_stall_detection; + bool enable_flow_steering; + bool disable_intra_bss_fwd; + +#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE + uint16_t bundle_timer_value; + uint16_t bundle_size; +#endif + uint8_t pktlog_buffer_size; +}; + +#ifdef IPA_OFFLOAD +/** + * ol_txrx_ipa_resources - Resources needed for IPA + */ +struct ol_txrx_ipa_resources { + qdf_shared_mem_t *ce_sr; + uint32_t ce_sr_ring_size; + qdf_dma_addr_t ce_reg_paddr; + + qdf_shared_mem_t *tx_comp_ring; + uint32_t tx_num_alloc_buffer; + + qdf_shared_mem_t *rx_rdy_ring; + qdf_shared_mem_t *rx_proc_done_idx; + + qdf_shared_mem_t *rx2_rdy_ring; + qdf_shared_mem_t *rx2_proc_done_idx; + + /* IPA UC doorbell registers paddr */ + qdf_dma_addr_t tx_comp_doorbell_dmaaddr; + qdf_dma_addr_t rx_ready_doorbell_dmaaddr; + + uint32_t tx_pipe_handle; + uint32_t rx_pipe_handle; +}; +#endif + +struct ol_txrx_ocb_chan_info { + uint32_t chan_freq; + uint16_t disable_rx_stats_hdr:1; +}; + +/** + * ol_mic_error_info - carries the information associated with + * a MIC error + * @vdev_id: virtual device ID + * @key_id: Key ID + * @pn: packet number + * @sa: source address + * @da: destination address + * @ta: transmitter address + */ +struct ol_mic_error_info { + uint8_t vdev_id; + uint32_t key_id; + uint64_t pn; + uint8_t sa[QDF_MAC_ADDR_SIZE]; + uint8_t da[QDF_MAC_ADDR_SIZE]; + uint8_t ta[QDF_MAC_ADDR_SIZE]; +}; + +/** + * ol_error_info - carries the information associated with an + * error indicated by the firmware + * @mic_err: MIC error information + */ +struct ol_error_info { + union { + struct ol_mic_error_info mic_err; + } u; +}; + + +/** + * struct ol_txrx_ocb_set_chan - txrx OCB channel info + * @ocb_channel_count: Channel count + * @ocb_channel_info: OCB channel info + */ +struct ol_txrx_ocb_set_chan { + uint32_t ocb_channel_count; + struct ol_txrx_ocb_chan_info *ocb_channel_info; +}; + +/** + * @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param + * @details + * The struct is used to specify informaiton to update TX WMM scheduler. + */ +struct ol_tx_ac_param_t { + uint32_t aifs; + uint32_t cwmin; + uint32_t cwmax; +}; + +struct ol_tx_wmm_param_t { + struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC]; +}; + +struct ieee80211_ba_parameterset { +#if _BYTE_ORDER == _BIG_ENDIAN + uint16_t buffersize:10, /* B6-15 buffer size */ + tid:4, /* B2-5 TID */ + bapolicy:1, /* B1 block ack policy */ + amsdusupported:1; /* B0 amsdu supported */ +#else + uint16_t amsdusupported:1, /* B0 amsdu supported */ + bapolicy:1, /* B1 block ack policy */ + tid:4, /* B2-5 TID */ + buffersize:10; /* B6-15 buffer size */ +#endif +} __packed; + +struct ieee80211_ba_seqctrl { +#if _BYTE_ORDER == _BIG_ENDIAN + uint16_t startseqnum:12, /* B4-15 starting sequence number */ + fragnum:4; /* B0-3 fragment number */ +#else + uint16_t fragnum:4, /* B0-3 fragment number */ + startseqnum:12; /* B4-15 starting sequence number */ +#endif +} __packed; + +struct ieee80211_delba_parameterset { +#if _BYTE_ORDER == _BIG_ENDIAN + uint16_t tid:4, /* B12-15 tid */ + initiator:1, /* B11 initiator */ + reserved0:11; /* B0-10 reserved */ +#else + uint16_t reserved0:11, /* B0-10 reserved */ + initiator:1, /* B11 initiator */ + tid:4; /* B12-15 tid */ +#endif +} __packed; + +/** + * ol_txrx_vdev_peer_remove_cb - wma_remove_peer callback + */ +typedef void (*ol_txrx_vdev_peer_remove_cb)(void *handle, uint8_t *bssid, + uint8_t vdev_id, void *peer); + +/** + * @typedef tx_pause_callback + * @brief OSIF function registered with the data path + */ +typedef void (*tx_pause_callback)(uint8_t vdev_id, + enum netif_action_type action, + enum netif_reason_type reason); + +typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, + void *osif_ctxt); + +/** + * struct ol_rx_inv_peer_params - rx invalid peer data parameters + * @vdev_id: Virtual device ID + * @ra: RX data receiver MAC address + * @ta: RX data transmitter MAC address + */ +struct ol_rx_inv_peer_params { + uint8_t vdev_id; + uint8_t ra[QDF_MAC_ADDR_SIZE]; + uint8_t ta[QDF_MAC_ADDR_SIZE]; +}; + +/** + * cdp_txrx_ext_stats: dp extended stats + * tx_msdu_enqueue: tx msdu queued to hw + * tx_msdu_overflow: tx msdu overflow + * rx_mpdu_received: rx mpdu processed by hw + * rx_mpdu_delivered: rx mpdu received from hw + * rx_mpdu_error: rx mpdu error count + * rx_mpdu_missed: rx mpdu missed by hw + */ +struct cdp_txrx_ext_stats { + uint32_t tx_msdu_enqueue; + uint32_t tx_msdu_overflow; + uint32_t rx_mpdu_received; + uint32_t rx_mpdu_delivered; + uint32_t rx_mpdu_error; + uint32_t rx_mpdu_missed; +}; + +#endif /* __CDP_TXRX_MOB_DEF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h new file mode 100644 index 0000000000000000000000000000000000000000..a11bb95846bfb2eacebc7601d7b52800909dd6ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_mon.h + * @brief Define the monitor mode API functions + * called by the host control SW and the OS interface module + */ + +#ifndef _CDP_TXRX_MON_H_ +#define _CDP_TXRX_MON_H_ +#include "cdp_txrx_handle.h" + +static inline QDF_STATUS cdp_reset_monitor_mode(ol_txrx_soc_handle soc, + uint8_t pdev_id, + u_int8_t smart_monitor) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_reset_monitor_mode) + return 0; + + return soc->ops->mon_ops->txrx_reset_monitor_mode(soc, pdev_id, + smart_monitor); +} + +/** + * cdp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture + * @soc: Datapath SOC handle + * @pdev_id: id of datapath PDEV handle + * @nbuf: Management frame buffer + */ +static inline QDF_STATUS +cdp_deliver_tx_mgmt(ol_txrx_soc_handle soc, uint8_t pdev_id, + qdf_nbuf_t nbuf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_deliver_tx_mgmt) + return QDF_STATUS_E_FAILURE; + + return soc->ops->mon_ops->txrx_deliver_tx_mgmt(soc, pdev_id, nbuf); +} + +#ifdef WLAN_FEATURE_PKT_CAPTURE +static inline void +cdp_pktcapture_record_channel( + ol_txrx_soc_handle soc, + uint8_t pdev_id, + int chan_num) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->pktcapture_ops || + !soc->ops->pktcapture_ops->txrx_pktcapture_record_channel) + return; + + soc->ops->pktcapture_ops->txrx_pktcapture_record_channel(soc, + pdev_id, + chan_num); +} + +static inline void +cdp_set_packet_capture_mode(ol_txrx_soc_handle soc, + uint8_t pdev_id, + uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->pktcapture_ops || + !soc->ops->pktcapture_ops->txrx_pktcapture_set_mode) + return; + + soc->ops->pktcapture_ops->txrx_pktcapture_set_mode(soc, pdev_id, val); +} + +static inline uint8_t +cdp_get_packet_capture_mode(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->pktcapture_ops || + !soc->ops->pktcapture_ops->txrx_pktcapture_get_mode) + return 0; + + return soc->ops->pktcapture_ops->txrx_pktcapture_get_mode(soc, + pdev_id); +} + +static inline QDF_STATUS +cdp_register_pktcapture_cb( + ol_txrx_soc_handle soc, uint8_t pdev_id, void *ctx, + QDF_STATUS(txrx_pktcapture_cb)(void *, qdf_nbuf_t)) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->pktcapture_ops || + !soc->ops->pktcapture_ops->txrx_pktcapture_cb_register) + return QDF_STATUS_E_INVAL; + + return soc->ops->pktcapture_ops->txrx_pktcapture_cb_register( + soc, + pdev_id, + ctx, + txrx_pktcapture_cb); +} + +static inline QDF_STATUS +cdp_deregister_pktcapture_cb(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->pktcapture_ops || + !soc->ops->pktcapture_ops->txrx_pktcapture_cb_deregister) + return QDF_STATUS_E_INVAL; + + return soc->ops->pktcapture_ops->txrx_pktcapture_cb_deregister(soc, + pdev_id); +} + +static inline QDF_STATUS +cdp_pktcapture_mgmtpkt_process( + ol_txrx_soc_handle soc, + uint8_t pdev_id, + struct mon_rx_status *txrx_status, + qdf_nbuf_t nbuf, + uint8_t status) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->pktcapture_ops || + !soc->ops->pktcapture_ops->txrx_pktcapture_mgmtpkt_process) + return QDF_STATUS_E_INVAL; + + return soc->ops->pktcapture_ops->txrx_pktcapture_mgmtpkt_process( + soc, + pdev_id, + txrx_status, + nbuf, + status); +} +#else +static inline uint8_t +cdp_get_packet_capture_mode(ol_txrx_soc_handle soc, uint8_t pdev_id) +{ + return 0; +} + +static inline void +cdp_pktcapture_record_channel(ol_txrx_soc_handle soc, + uint8_t pdev_id, + int chan_num) +{ +} +#endif /* WLAN_FEATURE_PKT_CAPTURE */ + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon_struct.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..edb12c95eda99f0ee16572541f92f32ae13be2aa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon_struct.h @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_mon_struct.h + * @brief Define the monitor mode API structure + * shared by data path and the OS interface module + */ + +#ifndef _CDP_TXRX_MON_STRUCT_H_ +#define _CDP_TXRX_MON_STRUCT_H_ +/* XXX not really a mode; there are really multiple PHY's */ +enum cdp_mon_phymode { + /* autoselect */ + CDP_IEEE80211_MODE_AUTO = 0, + /* 5GHz, OFDM */ + CDP_IEEE80211_MODE_11A = 1, + /* 2GHz, CCK */ + CDP_IEEE80211_MODE_11B = 2, + /* 2GHz, OFDM */ + CDP_IEEE80211_MODE_11G = 3, + /* 2GHz, GFSK */ + CDP_IEEE80211_MODE_FH = 4, + /* 5GHz, OFDM, 2x clock dynamic turbo */ + CDP_IEEE80211_MODE_TURBO_A = 5, + /* 2GHz, OFDM, 2x clock dynamic turbo */ + CDP_IEEE80211_MODE_TURBO_G = 6, + /* 5Ghz, HT20 */ + CDP_IEEE80211_MODE_11NA_HT20 = 7, + /* 2Ghz, HT20 */ + CDP_IEEE80211_MODE_11NG_HT20 = 8, + /* 5Ghz, HT40 (ext ch +1) */ + CDP_IEEE80211_MODE_11NA_HT40PLUS = 9, + /* 5Ghz, HT40 (ext ch -1) */ + CDP_IEEE80211_MODE_11NA_HT40MINUS = 10, + /* 2Ghz, HT40 (ext ch +1) */ + CDP_IEEE80211_MODE_11NG_HT40PLUS = 11, + /* 2Ghz, HT40 (ext ch -1) */ + CDP_IEEE80211_MODE_11NG_HT40MINUS = 12, + /* 2Ghz, Auto HT40 */ + CDP_IEEE80211_MODE_11NG_HT40 = 13, + /* 5Ghz, Auto HT40 */ + CDP_IEEE80211_MODE_11NA_HT40 = 14, + /* 5Ghz, VHT20 */ + CDP_IEEE80211_MODE_11AC_VHT20 = 15, + /* 5Ghz, VHT40 (Ext ch +1) */ + CDP_IEEE80211_MODE_11AC_VHT40PLUS = 16, + /* 5Ghz VHT40 (Ext ch -1) */ + CDP_IEEE80211_MODE_11AC_VHT40MINUS = 17, + /* 5Ghz, VHT40 */ + CDP_IEEE80211_MODE_11AC_VHT40 = 18, + /* 5Ghz, VHT80 */ + CDP_IEEE80211_MODE_11AC_VHT80 = 19, + /* 5Ghz, VHT160 */ + CDP_IEEE80211_MODE_11AC_VHT160 = 20, + /* 5Ghz, VHT80_80 */ + CDP_IEEE80211_MODE_11AC_VHT80_80 = 21, +}; + +enum { + CDP_PKT_TYPE_OFDM = 0, + CDP_PKT_TYPE_CCK, + CDP_PKT_TYPE_HT, + CDP_PKT_TYPE_VHT, + CDP_PKT_TYPE_HE, +}; + +enum { + CDP_SGI_0_8_US = 0, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, +}; + +enum { + CDP_RX_TYPE_SU = 0, + CDP_RX_TYPE_MU_MIMO, + CDP_RX_TYPE_MU_OFDMA, + CDP_RX_TYPE_MU_OFDMA_MIMO, +}; + +enum { + CDP_FULL_RX_BW_20 = 0, + CDP_FULL_RX_BW_40, + CDP_FULL_RX_BW_80, + CDP_FULL_RX_BW_160, +}; + +struct cdp_mon_status { + /* bss color value 1-63 used for update on ppdu_desc bsscolor */ + uint8_t bsscolor; + int rs_numchains; + int rs_flags; +#define IEEE80211_RX_FCS_ERROR 0x01 +#define IEEE80211_RX_MIC_ERROR 0x02 +#define IEEE80211_RX_DECRYPT_ERROR 0x04 +/* holes in flags here between, ATH_RX_XXXX to IEEE80211_RX_XXX */ +#define IEEE80211_RX_KEYMISS 0x200 + int rs_rssi; /* RSSI (noise floor ajusted) */ + int rs_abs_rssi; /* absolute RSSI */ + int rs_datarate; /* data rate received */ + int rs_rateieee; + int rs_ratephy1; + int rs_ratephy2; + int rs_ratephy3; + +/* Keep the same as ATH_MAX_ANTENNA */ +#define IEEE80211_MAX_ANTENNA 3 + /* RSSI (noise floor ajusted) */ + u_int8_t rs_rssictl[IEEE80211_MAX_ANTENNA]; + /* RSSI (noise floor ajusted) */ + u_int8_t rs_rssiextn[IEEE80211_MAX_ANTENNA]; + /* rs_rssi is valid or not */ + u_int8_t rs_isvalidrssi; + + enum cdp_mon_phymode rs_phymode; + int rs_freq; + + union { + u_int8_t data[8]; + u_int64_t tsf; + } rs_tstamp; + + /* + * Detail channel structure of recv frame. + * It could be NULL if not available + */ + + +#ifdef ATH_SUPPORT_AOW + u_int16_t rs_rxseq; /* WLAN Sequence number */ +#endif +#ifdef ATH_VOW_EXT_STATS + /* Lower 16 bits holds the udp checksum offset in the data pkt */ + u_int32_t vow_extstats_offset; + /* Higher 16 bits contains offset in the data pkt at which vow + * ext stats are embedded + */ +#endif + u_int8_t rs_isaggr; + u_int8_t rs_isapsd; + int16_t rs_noisefloor; + u_int16_t rs_channel; +#ifdef ATH_SUPPORT_TxBF + u_int32_t rs_rpttstamp; /* txbf report time stamp*/ +#endif + + /* The following counts are meant to assist in stats calculation. + * These variables are incremented only in specific situations, and + * should not be relied upon for any purpose other than the original + * stats related purpose they have been introduced for. + */ + + u_int16_t rs_cryptodecapcount; /* Crypto bytes decapped/demic'ed. */ + u_int8_t rs_padspace; /* No. of padding bytes present after + header in wbuf. */ + u_int8_t rs_qosdecapcount; /* QoS/HTC bytes decapped. */ + + /* End of stats calculation related counts. */ + + /* + * uint8_t rs_lsig[IEEE80211_LSIG_LEN]; + * uint8_t rs_htsig[IEEE80211_HTSIG_LEN]; + * uint8_t rs_servicebytes[IEEE80211_SB_LEN]; + * uint8_t rs_fcs_error; + */ + + /* cdp convergence monitor mode status */ + union { + u_int8_t cdp_data[8]; + u_int64_t cdp_tsf; + } cdp_rs_tstamp; + + uint8_t cdp_rs_pream_type; + uint32_t cdp_rs_user_rssi; + uint8_t cdp_rs_stbc; + uint8_t cdp_rs_sgi; + uint32_t cdf_rs_rate_mcs; + uint32_t cdp_rs_reception_type; + uint32_t cdp_rs_bw; + uint32_t cdp_rs_nss; + uint8_t cdp_rs_fcs_err; + +}; + +enum { + CDP_MON_PPDU_START = 0, + CDP_MON_PPDU_END, +}; + +#define MAX_PPDU_ID_HIST 128 + +/** + * struct cdp_pdev_mon_stats + * @status_ppdu_state: state on PPDU start and end + * @status_ppdu_start: status ring PPDU start TLV count + * @status_ppdu_end: status ring PPDU end TLV count + * @status_ppdu_compl: status ring matching start and end count on PPDU + * @status_ppdu_start_mis: status ring missing start TLV count on PPDU + * @status_ppdu_end_mis: status ring missing end TLV count on PPDU + * @status_ppdu_done: status ring PPDU done TLV count + * @dest_ppdu_done: destination ring PPDU count + * @dest_mpdu_done: destination ring MPDU count + * @dup_mon_linkdesc_cnt: duplicate link descriptor indications from HW + * @dup_mon_buf_cnt: duplicate buffer indications from HW + * @tlv_tag_status_err: status not correct in the tlv tag + */ +struct cdp_pdev_mon_stats { +#ifndef REMOVE_MON_DBG_STATS + uint32_t status_ppdu_state; + uint32_t status_ppdu_start; + uint32_t status_ppdu_end; + uint32_t status_ppdu_compl; + uint32_t status_ppdu_start_mis; + uint32_t status_ppdu_end_mis; +#endif + uint32_t status_ppdu_done; + uint32_t dest_ppdu_done; + uint32_t dest_mpdu_done; + uint32_t dest_mpdu_drop; + uint32_t dup_mon_linkdesc_cnt; + uint32_t dup_mon_buf_cnt; + uint32_t stat_ring_ppdu_id_hist[MAX_PPDU_ID_HIST]; + uint32_t dest_ring_ppdu_id_hist[MAX_PPDU_ID_HIST]; + uint32_t ppdu_id_hist_idx; + uint32_t mon_rx_dest_stuck; + uint32_t tlv_tag_status_err; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h new file mode 100644 index 0000000000000000000000000000000000000000..abab53af194eea28df204fd2c65d589d4e173008 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CDP_TXRX_OCB_H_ +#define _CDP_TXRX_OCB_H_ +#include +#include "cdp_txrx_handle.h" +/** + * cdp_set_ocb_chan_info() - set OCB channel info to vdev. + * @soc - data path soc handle + * @vdev_id: vdev_id corresponding to vdev start + * @ocb_set_chan: OCB channel information to be set in vdev. + * + * Return: NONE + */ +static inline void +cdp_set_ocb_chan_info(ol_txrx_soc_handle soc, uint8_t vdev_id, + struct ol_txrx_ocb_set_chan ocb_set_chan) +{ + if (!soc || !soc->ops || !soc->ops->ocb_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->ocb_ops->set_ocb_chan_info) + soc->ops->ocb_ops->set_ocb_chan_info(soc, vdev_id, + ocb_set_chan); + +} +/** + * cdp_get_ocb_chan_info() - return handle to vdev ocb_channel_info + * @soc - data path soc handle + * @vdev_id: vdev_id corresponding to vdev start + * + * Return: handle to struct ol_txrx_ocb_chan_info + */ +static inline struct ol_txrx_ocb_chan_info * +cdp_get_ocb_chan_info(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->ocb_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->ocb_ops->get_ocb_chan_info) + return soc->ops->ocb_ops->get_ocb_chan_info(soc, vdev_id); + + return NULL; +} +#endif /* _CDP_TXRX_OCB_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..243f1a87d503f67d664a120b7bad045878825412 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h @@ -0,0 +1,1693 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * @file cdp_txrx_ops.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_OPS_H_ +#define _CDP_TXRX_CMN_OPS_H_ + +#include +#include +#include "cdp_txrx_handle.h" +#include +#include "wlan_objmgr_psoc_obj.h" +#include +#include + +#ifdef IPA_OFFLOAD +#ifdef CONFIG_IPA_WDI_UNIFIED_API +#include +#else +#include +#endif +#endif + +/** + * bitmap values to indicate special handling of peer_delete + */ +#define CDP_PEER_DELETE_NO_SPECIAL 0 +#define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 + +struct hif_opaque_softc; + +/* same as ieee80211_nac_param */ +enum cdp_nac_param_cmd { + /* IEEE80211_NAC_PARAM_ADD */ + CDP_NAC_PARAM_ADD = 1, + /* IEEE80211_NAC_PARAM_DEL */ + CDP_NAC_PARAM_DEL, + /* IEEE80211_NAC_PARAM_LIST */ + CDP_NAC_PARAM_LIST, +}; + +/** + * enum vdev_peer_protocol_enter_exit - whether ingress or egress + * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress + * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress + * + * whether ingress or egress + */ +enum vdev_peer_protocol_enter_exit { + CDP_VDEV_PEER_PROTOCOL_IS_INGRESS, + CDP_VDEV_PEER_PROTOCOL_IS_EGRESS +}; + +/** + * enum vdev_peer_protocol_tx_rx - whether tx or rx + * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx + * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx + * + * whether tx or rx + */ +enum vdev_peer_protocol_tx_rx { + CDP_VDEV_PEER_PROTOCOL_IS_TX, + CDP_VDEV_PEER_PROTOCOL_IS_RX +}; + +/****************************************************************************** + * + * Control Interface (A Interface) + * + *****************************************************************************/ + +struct cdp_cmn_ops { + + QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); + + int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id); + + QDF_STATUS (*txrx_vdev_attach) + (struct cdp_soc_t *soc, uint8_t pdev_id, uint8_t *mac, + uint8_t vdev_id, enum wlan_op_mode op_mode, + enum wlan_op_subtype subtype); + + QDF_STATUS + (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, + ol_txrx_vdev_delete_cb callback, + void *cb_context); + + QDF_STATUS (*txrx_pdev_attach) + (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev, + qdf_device_t osdev, uint8_t pdev_id); + + int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id); + + void + (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id, + int force); + + QDF_STATUS + (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id, + int force); + + /** + * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory + * @soc: soc dp handle + * @pdev_id: id of Dp pdev handle + * @force: Force deinit or not + * + * Return: QDF_STATUS + */ + QDF_STATUS + (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id, + int force); + + QDF_STATUS + (*txrx_peer_create) + (ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac_addr); + + QDF_STATUS + (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac); + + QDF_STATUS + (*txrx_cp_peer_del_response) + (ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac_addr); + + QDF_STATUS + (*txrx_peer_teardown) + (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac); + + int (*txrx_peer_add_ast) + (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, + uint32_t flags); + + int (*txrx_peer_update_ast) + (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac, + uint8_t *mac_addr, uint32_t flags); + + bool (*txrx_peer_get_ast_info_by_soc) + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + struct cdp_ast_entry_info *ast_entry_info); + + bool (*txrx_peer_get_ast_info_by_pdev) + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + uint8_t pdev_id, + struct cdp_ast_entry_info *ast_entry_info); + + QDF_STATUS (*txrx_peer_ast_delete_by_soc) + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + txrx_ast_free_cb callback, + void *cookie); + + QDF_STATUS (*txrx_peer_ast_delete_by_pdev) + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, + uint8_t pdev_id, + txrx_ast_free_cb callback, + void *cookie); + + QDF_STATUS + (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, uint32_t bitmap); + + QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t smart_monitor); + void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, + QDF_STATUS(*delete_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list), + uint32_t bitmap); + + void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + ol_txrx_peer_unmap_sync_cb + peer_unmap_sync); + + QDF_STATUS + (*txrx_get_peer_mac_from_peer_id) + (struct cdp_soc_t *cdp_soc, + uint32_t peer_id, uint8_t *peer_mac); + + void + (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id); + + void + (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id, + struct cdp_dev_stats *stats, uint8_t type); + + QDF_STATUS + (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id, + u_int8_t *mem_status, + u_int8_t *user_position); + + uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc, + uint8_t pdev_id); + + QDF_STATUS + (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id, + int force); + + QDF_STATUS + (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id, + uint32_t chan_mhz); + + QDF_STATUS + (*txrx_set_privacy_filters) + (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter, + uint32_t num); + + uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg); + + /******************************************************************** + * Data Interface (B Interface) + ********************************************************************/ + + QDF_STATUS + (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id, + ol_osif_vdev_handle osif_vdev, + struct ol_txrx_ops *txrx_ops); + + int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t tx_mgmt_frm, uint8_t type); + + int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t tx_mgmt_frm, uint8_t type, + uint8_t use_6mbps, uint16_t chanfreq); + + /** + * ol_txrx_mgmt_tx_cb - tx management delivery notification + * callback function + */ + + QDF_STATUS + (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t type, + ol_txrx_mgmt_tx_cb download_cb, + ol_txrx_mgmt_tx_cb ota_ack_cb, + void *ctxt); + + /** + * ol_txrx_data_tx_cb - Function registered with the data path + * that is called when tx frames marked as "no free" are + * done being transmitted + */ + + void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id, + ol_txrx_data_tx_cb callback, void *ctxt); + + qdf_nbuf_t (*tx_send_exc) + (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list, + struct cdp_tx_exception_metadata *tx_exc_metadata); + + /******************************************************************* + * Statistics and Debugging Interface (C Interface) + ********************************************************************/ + + int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id, + int max_subfrms_ampdu, + int max_subfrms_amsdu); + + A_STATUS + (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id, + struct ol_txrx_stats_req *req, + bool per_vdev, bool response_expected); + + int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id, + int debug_specs); + + QDF_STATUS + (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t cfg_stats_type, uint32_t cfg_val); + + void (*txrx_print_level_set)(unsigned level); + + /** + * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev + * @soc: datapath soc handle + * @vdev_id: vdev id + * + * Return: vdev mac address + */ + uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc, + uint8_t vdev_id); + + /** + * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev + * @soc: datapath soc handle + * @vdev_id: vdev id + * + * Return: Handle to control pdev + */ + struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc, + uint8_t vdev_id); + + /** + * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev + * @soc: datapath soc handle + * @pdev: pdev id + * + * Return: vdev_id + */ + uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc, + uint8_t pdev_id); + + void (*txrx_soc_detach)(struct cdp_soc_t *soc); + + /** + * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory + * @soc: Opaque Dp handle + * + * Return None + */ + void (*txrx_soc_deinit)(struct cdp_soc_t *soc); + + /** + * txrx_soc_init() - Initialize dp soc and dp ring memory + * @soc: Opaque Dp handle + * @ctrl_psoc: Opaque Cp handle + * @htchdl: Opaque htc handle + * @hifhdl: Opaque hif handle + * + * Return: None + */ + void *(*txrx_soc_init)(struct cdp_soc_t *soc, + struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id); + + /** + * txrx_tso_soc_attach() - TSO attach handler triggered during + * dynamic tso activation + * @soc: Opaque Dp handle + * + * Return: QDF status + */ + QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc); + + /** + * txrx_tso_soc_detach() - TSO detach handler triggered during + * dynamic tso de-activation + * @soc: Opaque Dp handle + * + * Return: QDF status + */ + QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc); + int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, + int status); + + int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t dialogtoken, + uint16_t tid, uint16_t batimeout, + uint16_t buffersize, + uint16_t startseqnum); + + QDF_STATUS + (*addba_responsesetup)(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, + uint8_t *dialogtoken, uint16_t *statuscode, + uint16_t *buffersize, uint16_t *batimeout); + + int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, int tid, uint16_t reasoncode); + + /** + * delba_tx_completion() - Indicate delba tx status + * @cdp_soc: soc handle + * @peer_mac: Peer mac address + * @vdev_id: vdev id + * @tid: Tid number + * @status: Tx completion status + * + * Return: 0 on Success, 1 on failure + */ + int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t tid, int status); + + QDF_STATUS + (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, + uint16_t statuscode); + + QDF_STATUS + (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle, + uint8_t vdev_id, uint8_t map_id); + int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id); + + void (*flush_cache_rx_queue)(void); + + QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle, + uint8_t pdev_id, + uint8_t map_id, + uint8_t tos, uint8_t tid); + + QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle, + uint8_t vdev_id, + struct cdp_txrx_stats_req *req); + + QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value, + enum qdf_stats_verbosity_level level); + + QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle); + void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle); + QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle, + uint8_t vdev_id, uint8_t *peermac, + enum cdp_sec_type sec_type, + uint32_t *rx_pn); + + QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle, + uint8_t vdev_id, uint8_t *peermac, + enum cdp_sec_type sec_type, + bool is_unicast); + + QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, + struct cdp_config_params *params); + + void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id); + void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id, + void *dp_hdl); + + void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc, + uint8_t vdev_id); + QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint16_t size); + + void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); + void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, + void *dp_txrx_handle); + + QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint32_t lmac_id); + + QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint32_t lmac_id); + + QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle, + uint8_t pdev_id, bool is_pdev_down); + + QDF_STATUS (*txrx_peer_reset_ast) + (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, + uint8_t *peer_macaddr, uint8_t vdev_id); + + QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, + uint8_t vdev_id); + + void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); + void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle, + uint8_t ac, uint32_t value); + void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle, + uint8_t ac, uint32_t *value); + + QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, + uint32_t num_peers, + uint32_t max_ast_index, + bool peer_map_unmap_v2); + + ol_txrx_tx_fp tx_send; + /** + * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev + * to deliver pkt to stack. + * @soc: datapath soc handle + * @vdev: vdev id + * @stack_fn: pointer to - function pointer to deliver RX pkt to stack + * @osif_vdev: pointer to - osif vdev to deliver RX packet to. + */ + void (*txrx_get_os_rx_handles_from_vdev) + (ol_txrx_soc_handle soc, + uint8_t vdev_id, + ol_txrx_rx_fp *stack_fn, + ol_osif_vdev_handle *osif_vdev); + + void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, + void *ctx); + + int (*txrx_classify_update) + (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb, + enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class); + + bool (*get_dp_capabilities)(struct cdp_soc_t *soc, + enum cdp_capabilities dp_caps); + void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc); + QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc, + uint8_t pdev_id, + void *buf); + + QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc, + uint8_t pdev_id); + QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc, + uint8_t pdev_id, + uint8_t pcp, uint8_t tid); + QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t pcp, uint8_t tid); +#ifdef QCA_MULTIPASS_SUPPORT + QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint16_t vlan_id, uint16_t group_key); +#endif + + uint16_t (*get_peer_mac_list) + (ol_txrx_soc_handle soc, uint8_t vdev_id, + u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt); +}; + +struct cdp_ctrl_ops { + + int + (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc); + int + (*txrx_update_filter_neighbour_peers)( + struct cdp_soc_t *soc, uint8_t vdev_id, + uint32_t cmd, uint8_t *macaddr); + + /* Is this similar to ol_txrx_peer_state_update() in MCL */ + /** + * @brief Update the authorize peer object at association time + * @details + * For the host-based implementation of rate-control, it + * updates the peer/node-related parameters within rate-control + * context of the peer at association. + * + * @param soc_hdl - pointer to the soc object + * @param vdev_id - id of the virtual object + * @param peer_mac - mac address of the node's object + * @authorize - either to authorize or unauthorize peer + * + * @return QDF_STATUS + */ + QDF_STATUS + (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *peer_mac, + u_int32_t authorize); + + void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id); + + int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl); + + QDF_STATUS + (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id, + enum cdp_vdev_param_type param, + cdp_config_param_type val); + + /** + * @brief Set the reo dest ring num of the radio + * @details + * Set the reo destination ring no on which we will receive + * pkts for this radio. + * + * @txrx_soc - soc handle + * @param pdev_id - id of physical device + * @return the reo destination ring number + * @param reo_dest_ring_num - value ranges between 1 - 4 + */ + QDF_STATUS (*txrx_set_pdev_reo_dest)( + struct cdp_soc_t *txrx_soc, + uint8_t pdev_id, + enum cdp_host_reo_dest_ring reo_dest_ring_num); + + /** + * @brief Get the reo dest ring num of the radio + * @details + * Get the reo destination ring no on which we will receive + * pkts for this radio. + * + * @txrx_soc - soc handle + * @param pdev_id - id of physical device + * @return the reo destination ring number + */ + enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( + struct cdp_soc_t *txrx_soc, + uint8_t pdev_id); + + int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub, + uint32_t event); + + int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub, + uint32_t event); + + int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, uint8_t sec_idx); + + QDF_STATUS + (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t subtype, uint8_t tx_power); + + /** + * txrx_set_pdev_param() - callback to set pdev parameter + * @soc: opaque soc handle + * @pdev_id:id of data path pdev handle + * @val: value of pdev_tx_capture + * + * Return: status: 0 - Success, non-zero: Failure + */ + QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc, + uint8_t pdev_id, + enum cdp_pdev_param_type type, + cdp_config_param_type val); + + QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc, + uint8_t pdev_id, + enum cdp_pdev_param_type type, + cdp_config_param_type *val); + + QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc, + uint8_t vdev_id, uint8_t *peer_mac, + enum cdp_peer_param_type param, + cdp_config_param_type val); + + QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc, + uint8_t vdev_id, uint8_t *peer_mac, + enum cdp_peer_param_type param, + cdp_config_param_type *val); + + void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id); +#ifdef VDEV_PEER_PROTOCOL_COUNT + void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc, + int8_t vdev_id, + qdf_nbuf_t nbuf, + bool is_egress, + bool is_rx); +#endif +#ifdef ATH_SUPPORT_NAC_RSSI + QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, + enum cdp_nac_param_cmd cmd, + char *bssid, + char *client_macaddr, + uint8_t chan_num); + + QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, + char *macaddr, + uint8_t *rssi); +#endif + QDF_STATUS + (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac, + bool is_unicast, uint32_t *key); + + QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc, + uint8_t vdev_id, + enum cdp_vdev_param_type param, + cdp_config_param_type *val); + int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc, + uint8_t pdev_id, + uint8_t *macaddr, uint8_t enb_dsb); + + QDF_STATUS + (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, qdf_nbuf_t nbuf); +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG + QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)( + struct cdp_soc_t *soc, uint8_t pdev_id, + uint32_t protocol_mask, uint16_t protocol_type, + uint16_t tag); +#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS + void (*txrx_dump_pdev_rx_protocol_tag_stats)( + struct cdp_soc_t *soc, uint8_t pdev_id, + uint16_t protocol_type); +#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ +#ifdef WLAN_SUPPORT_RX_FLOW_TAG + QDF_STATUS (*txrx_set_rx_flow_tag)( + struct cdp_soc_t *cdp_soc, uint8_t pdev_id, + struct cdp_rx_flow_info *flow_info); + QDF_STATUS (*txrx_dump_rx_flow_tag_stats)( + struct cdp_soc_t *cdp_soc, uint8_t pdev_id, + struct cdp_rx_flow_info *flow_info); +#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ +#ifdef QCA_MULTIPASS_SUPPORT + void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, uint8_t *peer_mac, + uint16_t vlan_id); +#endif +#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) + QDF_STATUS (*txrx_update_peer_pkt_capture_params)( + ol_txrx_soc_handle soc, uint8_t pdev_id, + bool is_rx_pkt_cap_enable, bool is_tx_pkt_cap_enable, + uint8_t *peer_mac); +#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ + QDF_STATUS + (*txrx_set_psoc_param)(struct cdp_soc_t *soc, + enum cdp_psoc_param_type param, + cdp_config_param_type val); + + QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc, + enum cdp_psoc_param_type type, + cdp_config_param_type *val); +#ifdef VDEV_PEER_PROTOCOL_COUNT + /* + * Enable per-peer protocol counters + */ + void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc, + int8_t vdev_id, bool enable); + void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc, + int8_t vdev_id, int mask); + int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc, + int8_t vdev_id); + int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc, + int8_t vdev_id); + +#endif +}; + +struct cdp_me_ops { + + void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc, + uint8_t pdev_id); + + void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id); + + uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t wbuf, u_int8_t newmac[][6], + uint8_t newmaccnt); +}; + +struct cdp_mon_ops { + + QDF_STATUS (*txrx_reset_monitor_mode) + (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor); + + QDF_STATUS (*txrx_deliver_tx_mgmt) + (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf); + + /* HK advance monitor filter support */ + QDF_STATUS (*txrx_set_advance_monitor_filter) + (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct cdp_monitor_filter *filter_val); +}; + +#ifdef WLAN_FEATURE_PKT_CAPTURE +struct cdp_pktcapture_ops { + void (*txrx_pktcapture_set_mode) + (struct cdp_soc_t *soc, + uint8_t pdev_id, + uint8_t mode); + + uint8_t (*txrx_pktcapture_get_mode) + (struct cdp_soc_t *soc, + uint8_t pdev_id); + + QDF_STATUS (*txrx_pktcapture_cb_register) + (struct cdp_soc_t *soc, + uint8_t pdev_id, + void *context, + QDF_STATUS(cb)(void *, qdf_nbuf_t)); + + QDF_STATUS (*txrx_pktcapture_cb_deregister) + (struct cdp_soc_t *soc, + uint8_t pdev_id); + + QDF_STATUS (*txrx_pktcapture_mgmtpkt_process) + (struct cdp_soc_t *soc, + uint8_t pdev_id, + struct mon_rx_status *txrx_status, + qdf_nbuf_t nbuf, uint8_t status); + + void (*txrx_pktcapture_record_channel) + (struct cdp_soc_t *soc, + uint8_t pdev_id, + int chan_no); +}; +#endif /* #ifdef WLAN_FEATURE_PKT_CAPTURE */ + +struct cdp_host_stats_ops { + int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id, + struct ol_txrx_stats_req *req); + + QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc, + uint8_t vdev_id); + + QDF_STATUS + (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id, + struct cdp_stats_extd *buf); + /** + * @brief Enable enhanced stats functionality. + * + * @param soc - the soc handle + * @param pdev_id - pdev_id of pdev + * @return - QDF_STATUS + */ + QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc, + uint8_t pdev_id); + + /** + * @brief Disable enhanced stats functionality. + * + * @param soc - the soc handle + * @param pdev_id - pdev_id of pdev + * @return - QDF_STATUS + */ + QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc, + uint8_t pdev_id); + + QDF_STATUS + (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr); + + int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, + struct ol_txrx_stats_req *req); + + int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc, + uint8_t pdev_id, + uint8_t *addr, void *stats, + uint32_t last_tx_rate_mcs, + uint32_t stats_id); + + QDF_STATUS + (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t *addr, + uint32_t cap, uint32_t copy_stats); + + QDF_STATUS + (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, + void *data, + uint32_t data_len); + QDF_STATUS + (*txrx_update_pdev_stats)(struct cdp_soc_t *soc, + uint8_t pdev_id, void *data, + uint16_t stats_id); + QDF_STATUS + (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_peer_stats_type type, + cdp_peer_stats_param_t *buf); + QDF_STATUS + (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, + struct cdp_peer_stats *peer_stats); + QDF_STATUS + (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t *peer_mac); + QDF_STATUS + (*txrx_reset_peer_stats)(struct cdp_soc_t *soc, + uint8_t vdev_id, uint8_t *peer_mac); + int + (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, + void *buf, bool is_aggregate); + int + (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc, + void *data, uint32_t len, + uint32_t stats_id); + int + (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc, + uint8_t vdev_id, + wmi_host_vdev_extd_stats *buffer); + QDF_STATUS + (*txrx_update_vdev_stats)(struct cdp_soc_t *soc, + uint8_t vdev_id, void *buf, + uint16_t stats_id); + int + (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, + void *buf); + QDF_STATUS + (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, + struct cdp_pdev_stats *buf); + int + (*txrx_get_ratekbps)(int preamb, int mcs, + int htflag, int gintval); + + QDF_STATUS + (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, void *stats, + uint32_t last_tx_rate_mcs, + uint32_t stats_id); +}; + +struct cdp_wds_ops { + QDF_STATUS + (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id, + u_int32_t val); + QDF_STATUS + (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc, + uint8_t vdev_id, uint8_t *peer_mac, + int wds_tx_ucast, int wds_tx_mcast); + int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint32_t val); +}; + +struct cdp_raw_ops { + int (*txrx_get_nwifi_mode)(struct cdp_soc_t *soc, uint8_t vdev_id); + + QDF_STATUS + (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast); +}; + +#ifdef PEER_FLOW_CONTROL +struct cdp_pflow_ops { + uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc, + uint8_t pdev_id, + enum _ol_ath_param_t, + uint32_t, void *); +}; +#endif /* PEER_FLOW_CONTROL */ + +#define LRO_IPV4_SEED_ARR_SZ 5 +#define LRO_IPV6_SEED_ARR_SZ 11 + +/** + * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters + * @lro_enable: indicates whether rx_offld is enabled + * @tcp_flag: If the TCP flags from the packet do not match + * the values in this field after masking with TCP flags mask + * below, packet is not rx_offld eligible + * @tcp_flag_mask: field for comparing the TCP values provided + * above with the TCP flags field in the received packet + * @toeplitz_hash_ipv4: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv4 packets + * @toeplitz_hash_ipv6: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv6 packets + */ +struct cdp_lro_hash_config { + uint32_t lro_enable; + uint32_t tcp_flag:9, + tcp_flag_mask:9; + uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; + uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; +}; + +struct ol_if_ops { + void + (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + uint8_t pdev_id, uint8_t *peer_macaddr, + uint8_t vdev_id, + bool hash_based, uint8_t ring_num); + QDF_STATUS + (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_mac, + qdf_dma_addr_t hw_qdesc, int tid, + uint16_t queue_num, + uint8_t ba_window_size_valid, + uint16_t ba_window_size); + QDF_STATUS + (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_macaddr, + uint32_t tid_mask); + int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t *peer_mac, + uint8_t *vdev_mac, enum wlan_op_mode opmode); + bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); + int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc, + uint8_t vdev_id, + uint8_t *peer_macaddr, + const uint8_t *dest_macaddr, + uint8_t *next_node_mac, + uint32_t flags, + uint8_t type); + int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc, + uint8_t vdev_id, + uint8_t *dest_macaddr, + uint8_t *peer_macaddr, + uint32_t flags); + void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc, + uint8_t vdev_id, + uint8_t *wds_macaddr, + uint8_t type); + QDF_STATUS + (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id, + struct cdp_lro_hash_config *rx_offld_hash); + + void (*update_dp_stats)(void *soc, void *stats, uint16_t id, + uint8_t type); +#ifdef FEATURE_NAC_RSSI + uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc, + uint8_t pdev_id, void *msg); +#else + uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh); +#endif + + int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc, + uint16_t peer_id, uint16_t hw_peer_id, + uint8_t vdev_id, uint8_t *peer_mac_addr, + enum cdp_txrx_ast_entry_type peer_type, + uint32_t tx_ast_hashidx); + int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc, + uint16_t peer_id, + uint8_t vdev_id); + + int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc, + enum cdp_cfg_param_type param_num); + + void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + struct cdp_rx_mic_err_info *info); + + bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *peer_mac_addr, + qdf_nbuf_t nbuf, + uint16_t hdr_space); + + uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t vdev_id, uint16_t freq); + +#ifdef ATH_SUPPORT_NAC_RSSI + int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + u_int8_t vdev_id, + enum cdp_nac_param_cmd cmd, char *bssid, + char *client_macaddr, uint8_t chan_num); + + int + (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, u_int8_t vdev_id, + enum cdp_nac_param_cmd cmd, + char *bssid, char *client_mac); +#endif + int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc, + uint16_t pdev_id, uint8_t *peer_macaddr); + + /** + * send_delba() - Send delba to peer + * @psoc: Objmgr soc handle + * @vdev_id: dp vdev id + * @peer_macaddr: Peer mac addr + * @tid: Tid number + * + * Return: 0 for success, non-zero for failure + */ + int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id, + uint8_t *peer_macaddr, uint8_t tid, + uint8_t reason_code); + + int + (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t vdev_id, + uint8_t *dest_macaddr, + uint8_t *peer_macaddr, + uint32_t flags); + int + (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t *pdev_id, + uint8_t *lmac_id, + uint8_t *target_pdev_id); + bool (*is_roam_inprogress)(uint32_t vdev_id); + enum QDF_GLOBAL_MODE (*get_con_mode)(void); +#ifdef QCA_PEER_MULTIQ_SUPPORT + int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle, + uint16_t peer_id, uint8_t vdev_id, + uint8_t *peer_mac_addr); +#endif +#ifdef DP_MEM_PRE_ALLOC + void *(*dp_prealloc_get_context)(uint32_t ctxt_type); + + QDF_STATUS(*dp_prealloc_put_context)(uint32_t ctxt_type, void *vaddr); + void *(*dp_prealloc_get_consistent)(uint32_t *size, + void **base_vaddr_unaligned, + qdf_dma_addr_t *paddr_unaligned, + qdf_dma_addr_t *paddr_aligned, + uint32_t align, + uint32_t ring_type); + void (*dp_prealloc_put_consistent)(qdf_size_t size, + void *vaddr_unligned, + qdf_dma_addr_t paddr); + void (*dp_get_multi_pages)(uint32_t desc_type, + size_t element_size, + uint16_t element_num, + struct qdf_mem_multi_page_t *pages, + bool cacheable); + void (*dp_put_multi_pages)(uint32_t desc_type, + struct qdf_mem_multi_page_t *pages); +#endif + int (*dp_rx_get_pending)(ol_txrx_soc_handle soc); + /* TODO: Add any other control path calls required to OL_IF/WMA layer */ +}; + + +#ifdef DP_PEER_EXTENDED_API +/** + * struct cdp_misc_ops - mcl ops not classified + * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer + * @set_wmm_param: set wmm parameters + * @bad_peer_txctl_set_setting: configure bad peer tx limit setting + * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit + * @hl_tdls_flag_reset: reset tdls flag for vdev + * @tx_non_std: Allow the control-path SW to send data frames + * @get_vdev_id: get vdev id + * @set_wisa_mode: set wisa mode for a vdev + * @txrx_data_stall_cb_register: register data stall callback + * @txrx_data_stall_cb_deregister: deregister data stall callback + * @txrx_post_data_stall_event: post data stall event + * @runtime_suspend: ensure TXRX is ready to runtime suspend + * @runtime_resume: ensure TXRX is ready to runtime resume + * @get_opmode: get operation mode of vdev + * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for + marking first packet after wow wakeup + * @update_mac_id: update mac_id for vdev + * @flush_rx_frames: flush rx frames on the queue + * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that + has been forwarded from txrx layer + without going to upper layers + * @pkt_log_init: handler to initialize packet log + * @pkt_log_con_service: handler to connect packet log service + * @get_num_rx_contexts: handler to get number of RX contexts + * @register_packetdump_cb: register callback for different pktlog + * @unregister_packetdump_cb: unregister callback for different pktlog + * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag + * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag + * + * Function pointers for miscellaneous soc/pdev/vdev related operations. + */ +struct cdp_misc_ops { + uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint16_t timer_value_sec); + void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct ol_tx_wmm_param_t wmm_param); + void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, int enable, + int period, int txq_limit); + void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + int level, int tput_thresh, + int tx_limit); + void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, bool flag); + qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); + uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); + uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id); + QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, bool enable); + QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + data_stall_detect_cb cb); + QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + data_stall_detect_cb cb); + void (*txrx_post_data_stall_event)( + struct cdp_soc_t *soc_hdl, + enum data_stall_log_event_indicator indicator, + enum data_stall_log_event_type data_stall_type, + uint32_t pdev_id, uint32_t vdev_id_bitmap, + enum data_stall_log_recovery_type recovery_type); + QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); + void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, uint8_t value); + void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t mac_id); + void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *peer, bool drop); + A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint64_t *fwd_tx_packets, + uint64_t *fwd_rx_packets); + void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev, + void *scn); + void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, void *scn); + int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl); + void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + ol_txrx_pktdump_cb tx_cb, + ol_txrx_pktdump_cb rx_cb); + void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + unsigned long rx_packets, + uint32_t time_in_ms, + uint32_t high_th, + uint32_t low_th); + void (*vdev_set_bundle_require_flag)(uint8_t vdev_id, + unsigned long tx_bytes, + uint32_t time_in_ms, + uint32_t high_th, + uint32_t low_th); + void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + struct cdp_txrx_ext_stats *req); + QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id); +}; + +/** + * struct cdp_ocb_ops - mcl ocb ops + * @set_ocb_chan_info: set OCB channel info + * @get_ocb_chan_info: get OCB channel info + * + * Function pointers for operations related to OCB. + */ +struct cdp_ocb_ops { + void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + struct ol_txrx_ocb_set_chan ocb_set_chan); + struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)( + struct cdp_soc_t *soc_hdl, uint8_t vdev_id); +}; + +/** + * struct cdp_peer_ops - mcl peer related ops + * @register_peer: + * @clear_peer: + * @find_peer_exist + * @find_peer_exist_on_vdev + * @find_peer_exist_on_other_vdev + * @peer_state_update: + * @get_vdevid: + * @register_ocb_peer: + * @peer_get_peer_mac_addr: + * @get_peer_state: + * @update_ibss_add_peer_num_of_vdev: + * @copy_mac_addr_raw: + * @add_last_real_peer: + * @is_vdev_restore_last_peer: + * @update_last_real_peer: + */ +struct cdp_peer_ops { + QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct ol_txrx_desc_type *sta_desc); + QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct qdf_mac_addr peer_addr); + bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t *peer_addr); + bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_addr); + bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t *peer_addr, + uint16_t max_bssid); + QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc, + uint8_t *peer_addr, + enum ol_txrx_peer_state state); + QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, + uint8_t *vdev_id); + struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev, + struct qdf_mac_addr peer_addr); + QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr); + uint8_t * (*peer_get_peer_mac_addr)(void *peer); + int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac); + struct cdp_vdev * (*get_vdev_for_peer)(void *peer); + int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc, + uint8_t vdev_id, + int16_t peer_num_delta); + void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, + ol_txrx_vdev_peer_remove_cb callback, + void *callback_context, bool remove_last_peer); + void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, + ol_txrx_vdev_peer_remove_cb callback, + void *callback_context); + void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *bss_addr); + void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t vdev_id); + bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t *peer_mac); + void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t vdev_id, bool restore_last_peer); + void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, uint8_t *peer_addr); + void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, bool val); + void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, bool val); + void (*peer_flush_frags)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, uint8_t *peer_mac); +}; + +/** + * struct cdp_mob_stats_ops - mcl mob stats ops + * @clear_stats: handler to clear ol txrx stats + * @stats: handler to update ol txrx stats + */ +struct cdp_mob_stats_ops { + QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, uint8_t bitmap); + int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); +}; + +/** + * struct cdp_pmf_ops - mcl protected management frame ops + * @get_pn_info: handler to get pn info from peer + * + * Function pointers for pmf related operations. + */ +struct cdp_pmf_ops { + void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac, + uint8_t vdev_id, uint8_t **last_pn_valid, + uint64_t **last_pn, uint32_t **rmf_pn_replays); +}; +#endif + + +#ifdef DP_FLOW_CTL +/** + * struct cdp_cfg_ops - mcl configuration ops + * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag + * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag + * @cfg_attach: hardcode the configuration parameters + * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag + * @is_rx_fwd_disabled: get the rx_fwd_disabled flag, + * 1 enabled, 0 disabled. + * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to + * indicate that mgmt over wmi is enabled + * or not, + * 1 for enabled, 0 for disable + * @is_high_latency: get device is high or low latency device, + * 1 high latency bus, 0 low latency bus + * @set_flow_control_parameters: set flow control parameters + * @set_flow_steering: set flow_steering_enabled flag + * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag + * @set_new_htt_msg_format: set new_htt_msg_format flag + * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag + * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag + * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag, + * 1 enabled, 0 disabled. + * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag, + * 1 enabled, 0 disabled. + */ +struct cdp_cfg_ops { + void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, + uint8_t disable_rx_fwd); + void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, + uint8_t val); + struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); + void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, bool val); + uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); + void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); + int (*is_high_latency)(struct cdp_cfg *cfg_pdev); + void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, + void *param); + void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); + void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); + void (*set_new_htt_msg_format)(uint8_t val); + void (*set_peer_unmap_conf_support)(bool val); + bool (*get_peer_unmap_conf_support)(void); + void (*set_tx_compl_tsf64)(bool val); + bool (*get_tx_compl_tsf64)(void); +}; + +/** + * struct cdp_flowctl_ops - mcl flow control + * @flow_pool_map_handler: handler to map flow_id and pool descriptors + * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors + * @register_pause_cb: handler to register tx pause callback + * @set_desc_global_pool_size: handler to set global pool size + * @dump_flow_pool_info: handler to dump global and flow pool info + * @tx_desc_thresh_reached: handler to set tx desc threshold + * + * Function pointers for operations related to flow control + */ +struct cdp_flowctl_ops { + QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, + uint8_t pdev_id, + uint8_t vdev_id); + void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, + uint8_t pdev_id, + uint8_t vdev_id); + QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, + tx_pause_callback); + void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); + + void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl); + + bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id); +}; + +/** + * struct cdp_lflowctl_ops - mcl legacy flow control ops + * @register_tx_flow_control: Register tx flow control callback + * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev + * @set_vdev_os_queue_status: Set vdev queue status + * @deregister_tx_flow_control_cb: Deregister tx flow control callback + * @flow_control_cb: Call osif flow control callback + * @get_tx_resource: Get tx resources and comapre with watermark + * @ll_set_tx_pause_q_depth: set pause queue depth + * @vdev_flush: Flush all packets on a particular vdev + * @vdev_pause: Pause a particular vdev + * @vdev_unpause: Unpause a particular vdev + * + * Function pointers for operations related to flow control + */ +struct cdp_lflowctl_ops { +#ifdef QCA_HL_NETDEV_FLOW_CONTROL + int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + tx_pause_callback flowcontrol); + int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, uint32_t chan_freq); + int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + enum netif_action_type action); +#else + int (*register_tx_flow_control)( + struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, + ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); +#endif /* QCA_HL_NETDEV_FLOW_CONTROL */ + int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id); + void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + bool tx_resume); + bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct qdf_mac_addr peer_addr, + unsigned int low_watermark, + unsigned int high_watermark_offset); + int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id, + int pause_q_depth); + void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); + void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint32_t reason, uint32_t pause_type); + void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint32_t reason, uint32_t pause_type); +}; + +/** + * struct cdp_throttle_ops - mcl throttle ops + * @throttle_init_period: handler to initialize tx throttle time + * @throttle_set_level: handler to set tx throttle level + */ +struct cdp_throttle_ops { + void (*throttle_init_period)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, int period, + uint8_t *dutycycle_level); + void (*throttle_set_level)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, int level); +}; +#endif + +#ifdef IPA_OFFLOAD +/** + * struct cdp_ipa_ops - mcl ipa data path ops + * @ipa_get_resource: + * @ipa_set_doorbell_paddr: + * @ipa_set_active: + * @ipa_op_response: + * @ipa_register_op_cb: + * @ipa_get_stat: + * @ipa_tx_data_frame: + */ +struct cdp_ipa_ops { + QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + bool uc_active, bool is_tx); + QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, uint8_t *op_msg); + QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + void (*ipa_uc_op_cb_type) + (uint8_t *op_msg, void *osif_ctxt), + void *usr_ctxt); + QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, qdf_nbuf_t skb); + void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, + uint32_t value); +#ifdef FEATURE_METERING + QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + uint8_t reset_stats); + QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, uint64_t quota_bytes); +#endif + QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); +#ifdef CONFIG_IPA_WDI_UNIFIED_API + QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *ipa_i2w_cb, void *ipa_w2i_cb, + void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle, bool is_smmu_enabled, + qdf_ipa_sys_connect_params_t *sys_in, + bool over_gsi); +#else /* CONFIG_IPA_WDI_UNIFIED_API */ + QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *ipa_i2w_cb, void *ipa_w2i_cb, + void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle); +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + QDF_STATUS (*ipa_cleanup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint32_t tx_pipe_handle, + uint32_t rx_pipe_handle); + QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled); + QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); + QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + QDF_STATUS (*ipa_set_perf_level)(int client, + uint32_t max_supported_bw_mbps); + bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + qdf_nbuf_t nbuf, bool *fwd_success); +}; +#endif + +#ifdef DP_POWER_SAVE +/** + * struct cdp_tx_delay_ops - mcl tx delay ops + * @tx_delay: handler to get tx packet delay + * @tx_delay_hist: handler to get tx packet delay histogram + * @tx_packet_count: handler to get tx packet count + * @tx_set_compute_interval: update compute interval period for TSM stats + * + * Function pointer for operations related to tx delay. + */ +struct cdp_tx_delay_ops { + void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint32_t *queue_delay_microsec, + uint32_t *tx_delay_microsec, int category); + void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint16_t *bin_values, int category); + void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint16_t *out_packet_count, + uint16_t *out_packet_loss_count, int category); + void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, uint32_t interval); +}; + +/** + * struct cdp_bus_ops - mcl bus suspend/resume ops + * @bus_suspend: handler for bus suspend + * @bus_resume: handler for bus resume + * @process_wow_ack_rsp: handler for wow ack response + * @process_target_suspend_req: handler for target suspend request + */ +struct cdp_bus_ops { + QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); +}; +#endif + +#ifdef RECEIVE_OFFLOAD +/** + * struct cdp_rx_offld_ops - mcl host receive offload ops + * @register_rx_offld_flush_cb: + * @deregister_rx_offld_flush_cb: + */ +struct cdp_rx_offld_ops { + void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); + void (*deregister_rx_offld_flush_cb)(void); +}; +#endif + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/** + * struct cdp_cfr_ops - host cfr ops + * @txrx_cfr_filter: Handler to configure host rx monitor status ring + * @txrx_get_cfr_rcc: Handler to get CFR mode + * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode + * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode + * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode + * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring + */ +struct cdp_cfr_ops { + void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + bool enable, + struct cdp_monitor_filter *filter_val); + bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + bool enable); + void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + struct cdp_cfr_rcc_stats *buf); + void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); + void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + bool enable); +}; +#endif + +struct cdp_ops { + struct cdp_cmn_ops *cmn_drv_ops; + struct cdp_ctrl_ops *ctrl_ops; + struct cdp_me_ops *me_ops; + struct cdp_mon_ops *mon_ops; + struct cdp_host_stats_ops *host_stats_ops; + struct cdp_wds_ops *wds_ops; + struct cdp_raw_ops *raw_ops; + struct cdp_pflow_ops *pflow_ops; +#ifdef DP_PEER_EXTENDED_API + struct cdp_misc_ops *misc_ops; + struct cdp_peer_ops *peer_ops; + struct cdp_ocb_ops *ocb_ops; + struct cdp_mob_stats_ops *mob_stats_ops; + struct cdp_pmf_ops *pmf_ops; +#endif +#ifdef DP_FLOW_CTL + struct cdp_cfg_ops *cfg_ops; + struct cdp_flowctl_ops *flowctl_ops; + struct cdp_lflowctl_ops *l_flowctl_ops; + struct cdp_throttle_ops *throttle_ops; +#endif +#ifdef DP_POWER_SAVE + struct cdp_bus_ops *bus_ops; + struct cdp_tx_delay_ops *delay_ops; +#endif +#ifdef IPA_OFFLOAD + struct cdp_ipa_ops *ipa_ops; +#endif +#ifdef RECEIVE_OFFLOAD + struct cdp_rx_offld_ops *rx_offld_ops; +#endif +#ifdef WLAN_FEATURE_PKT_CAPTURE + struct cdp_pktcapture_ops *pktcapture_ops; +#endif +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) + struct cdp_cfr_ops *cfr_ops; +#endif + +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2a394392029b78ca56f297e18268c655337c29ea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_peer.h + * @brief Define the host data path peer API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_PEER_H_ +#define _CDP_TXRX_PEER_H_ +#include +#include "cdp_txrx_handle.h" + +/** + * cdp_peer_register() - Register peer into physical device + * @soc - data path soc handle + * @pdev_id - data path device instance id + * @sta_desc - peer description + * + * Register peer into physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_register(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct ol_txrx_desc_type *sta_desc) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->register_peer) + return soc->ops->peer_ops->register_peer(soc, pdev_id, + sta_desc); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_clear_peer() - remove peer from physical device + * @soc - data path soc handle + * @pdev_id - data path device instance id + * @peer_addr - peer mac address + * + * remove peer from physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_clear_peer(ol_txrx_soc_handle soc, uint8_t pdev_id, + struct qdf_mac_addr peer_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->clear_peer) + return soc->ops->peer_ops->clear_peer(soc, pdev_id, peer_addr); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_register_ocb_peer() - register ocb peer from physical device + * @soc - data path soc handle + * @cds_ctx - cds void context + * @mac_addr - mac address for ocb self peer + * + * register ocb peer from physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_register_ocb_peer(ol_txrx_soc_handle soc, + uint8_t *mac_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->register_ocb_peer) + return soc->ops->peer_ops->register_ocb_peer(mac_addr); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_find_peer_exist - Find if peer already exists + * @soc - data path soc handle + * @pdev_id - data path device instance id + * @peer_addr - peer mac address + * + * Return: true or false + */ +static inline bool +cdp_find_peer_exist(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t *peer_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return false; + } + + if (soc->ops->peer_ops->find_peer_exist) + return soc->ops->peer_ops->find_peer_exist(soc, pdev_id, + peer_addr); + + return false; +} + +/** + * cdp_find_peer_exist_on_vdev - Find if duplicate peer exists + * on the given vdev + * @soc - data path soc handle + * @vdev_id - data path virtual interface id + * @peer_addr - peer mac address + * + * Return: true or false + */ +static inline bool +cdp_find_peer_exist_on_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return false; + } + + if (soc->ops->peer_ops->find_peer_exist_on_vdev) + return soc->ops->peer_ops->find_peer_exist_on_vdev(soc, vdev_id, + peer_addr); + + return false; +} + +/** + * cdp_find_peer_exist_on_other_vdev - Find if duplicate peer exists + * on other than the given vdev + * @soc - data path soc handle + * @vdev_id - data path virtual interface id + * @peer_addr - peer mac address + * @max_bssid - max number of bssids + * + * Return: true or false + */ +static inline bool +cdp_find_peer_exist_on_other_vdev(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_addr, uint16_t max_bssid) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return false; + } + + if (soc->ops->peer_ops->find_peer_exist_on_other_vdev) + return soc->ops->peer_ops->find_peer_exist_on_other_vdev( + soc, vdev_id, + peer_addr, + max_bssid); + + return false; +} + +/** + * cdp_peer_state_update() - update peer local state + * @soc - data path soc handle + * @peer_addr - peer mac address + * @state - new peer local state + * + * update peer local state + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_state_update(ol_txrx_soc_handle soc, uint8_t *peer_addr, + enum ol_txrx_peer_state state) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->peer_state_update) + return soc->ops->peer_ops->peer_state_update(soc, peer_addr, + state); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_state_get() - Get local peer state + * @soc - data path soc handle + * @vdev_id - virtual interface id + * @peer_mac - peer mac addr + * + * Get local peer state + * + * Return: peer status + */ +static inline int +cdp_peer_state_get(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->peer_ops->get_peer_state) + return soc->ops->peer_ops->get_peer_state(soc, vdev_id, + peer_mac); + + return 0; +} + +/** + * cdp_peer_get_vdevid() - Get virtual interface id which peer registered + * @soc - data path soc handle + * @peer_mac - peer mac address + * @vdev_id - virtual interface id which peer registered + * + * Get virtual interface id which peer registered + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_get_vdevid(ol_txrx_soc_handle soc, + uint8_t *peer_mac, uint8_t *vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->get_vdevid) + return soc->ops->peer_ops->get_vdevid(soc, peer_mac, vdev_id); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_get_vdev_by_sta_id() - Get vdev instance by local peer id + * @soc - data path soc handle + * @pdev - data path device instance + * @peer_addr - peer mac address + * + * Get virtual interface id by local peer id + * + * Return: Virtual interface instance + * NULL in case cannot find + */ +static inline struct cdp_vdev +*cdp_peer_get_vdev_by_peer_addr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + struct qdf_mac_addr peer_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->get_vdev_by_peer_addr) + return soc->ops->peer_ops->get_vdev_by_peer_addr(pdev, + peer_addr); + + return NULL; +} + +/** + * cdp_peer_get_peer_mac_addr() - Get peer mac address + * @soc - data path soc handle + * @peer - peer instance + * + * Get peer mac address + * + * Return: peer mac address pointer + * NULL in case cannot find + */ +static inline uint8_t +*cdp_peer_get_peer_mac_addr(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->peer_get_peer_mac_addr) + return soc->ops->peer_ops->peer_get_peer_mac_addr(peer); + + return NULL; +} + +/** + * cdp_peer_update_ibss_add_peer_num_of_vdev() - update number of peer + * @soc - data path soc handle + * @vdev_id - virtual interface instance id + * @peer_num_delta - number of peer should be updated + * + * update number of peer + * + * Return: updated number of peer + * 0 fail + */ +static inline int16_t +cdp_peer_update_ibss_add_peer_num_of_vdev(ol_txrx_soc_handle soc, + uint8_t vdev_id, + int16_t peer_num_delta) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev) + return soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev( + soc, vdev_id, + peer_num_delta); + + return 0; +} + +/** + * cdp_peer_copy_mac_addr_raw() - copy peer mac address + * @soc - data path soc handle + * @vdev_id - virtual interface instance id + * @bss_addr - mac address should be copied + * + * copy peer mac address + * + * Return: none + */ +static inline void +cdp_peer_copy_mac_addr_raw(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t *bss_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->copy_mac_addr_raw) + return soc->ops->peer_ops->copy_mac_addr_raw(soc, vdev_id, + bss_addr); + + return; +} + +/** + * cdp_peer_add_last_real_peer() - Add peer with last peer marking + * @soc - data path soc handle + * @pdev_id - data path device instance id + * @vdev_id - virtual interface instance id + * + * copy peer mac address + * + * Return: none + */ +static inline void +cdp_peer_add_last_real_peer(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->add_last_real_peer) + return soc->ops->peer_ops->add_last_real_peer(soc, pdev_id, + vdev_id); + return; +} + +/** + * cdp_peer_is_vdev_restore_last_peer() - restore last peer + * @soc - data path soc handle + * @vdev_id - virtual interface id + * @peer_mac - peer mac address + * + * restore last peer + * + * Return: true, restore success + * fasle, restore fail + */ +static inline bool +cdp_peer_is_vdev_restore_last_peer(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return false; + } + + if (soc->ops->peer_ops->is_vdev_restore_last_peer) + return soc->ops->peer_ops->is_vdev_restore_last_peer(soc, + vdev_id, + peer_mac); + + return false; +} + +/** + * cdp_peer_update_last_real_peer() - update last real peer + * @soc - data path soc handle + * @pdev_id - data path device instance id + * @vdev_id - virtual interface id + * @restore_last_peer - restore last peer or not + * + * update last real peer + * + * Return: none + */ +static inline void +cdp_peer_update_last_real_peer(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint8_t vdev_id, bool restore_last_peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->update_last_real_peer) + return soc->ops->peer_ops->update_last_real_peer( + soc, pdev_id, vdev_id, + restore_last_peer); + + return; +} + +/** + * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object + * @peer - the object to detach + * + * Detach a peer and force the peer object to be removed. It is called during + * roaming scenario when the firmware has already deleted a peer. + * Peer object is freed immediately to avoid duplicate peers during roam sync + * indication processing. + * + * Return: None + */ +static inline void cdp_peer_detach_force_delete(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t *peer_mac) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->peer_detach_force_delete) + return soc->ops->peer_ops->peer_detach_force_delete(soc, + vdev_id, + peer_mac); + + return; +} + +/** + * is_cdp_peer_detach_force_delete_supported() - To check if force delete + * operation is supported + * @soc: pointer to SOC handle + * + * Some of the platforms support force delete operation and some of them + * don't. This API returns true if API which handles force delete operation + * is registered and false otherwise. + * + * Return: true if API which handles force delete operation is registered + * false in all other cases + */ +static inline bool +is_cdp_peer_detach_force_delete_supported(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return false; + } + + if (soc->ops->peer_ops->peer_detach_force_delete) + return true; + + return false; +} + +/* + * cdp_peer_set_peer_as_tdls() - To set peer as tdls peer + * @soc: pointer to SOC handle + * @vdev_id: virtual interface id + * @peer_mac: peer mac address + * @var: true or false + * + * Return: void + */ +static inline void +cdp_peer_set_peer_as_tdls(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, bool val) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->set_peer_as_tdls_peer) + soc->ops->peer_ops->set_peer_as_tdls_peer(soc, vdev_id, + peer_mac, val); +} + +/** + * cdp_peer_set_tdls_offchan_enabled() - Set tdls offchan operation as enabled + * @soc: pointer to SOC handle + * @vdev_id: virtual interface id + * @peer_mac: peer mac address + * @val: true or false + * + * update tdls_offchan_enabled + * + * Return: none + */ +static inline void +cdp_peer_set_tdls_offchan_enabled(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint8_t *peer_mac, bool val) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->set_tdls_offchan_enabled) + soc->ops->peer_ops->set_tdls_offchan_enabled(soc, vdev_id, + peer_mac, val); +} + +/** + * cdp_peer_flush_frags() - Flush frags on peer + * @soc - data path soc handle + * @vdev_id - virtual interface id + * @peer_mac - peer mac addr + * + * Return: None + */ +static inline void +cdp_peer_flush_frags(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->peer_flush_frags) + soc->ops->peer_ops->peer_flush_frags(soc, vdev_id, peer_mac); +} +#endif /* _CDP_TXRX_PEER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pflow.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pflow.h new file mode 100644 index 0000000000000000000000000000000000000000..ecb2bddc91d8ae195a40f8a9f7ecb134e04f26df --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pflow.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_pflow.h + * @brief Define the host data path peer flow API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_PFLOW_H_ +#define _CDP_TXRX_PFLOW_H_ + +#include +#include "cdp_txrx_ops.h" +#include "cdp_txrx_handle.h" + +static inline uint32_t cdp_pflow_update_pdev_params + (ol_txrx_soc_handle soc, uint8_t pdev_id, + enum _ol_ath_param_t param, uint32_t val, void *ctx) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->pflow_ops || + !soc->ops->pflow_ops->pflow_update_pdev_params) + return 0; + + return soc->ops->pflow_ops->pflow_update_pdev_params + (soc, pdev_id, param, val, ctx); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h new file mode 100644 index 0000000000000000000000000000000000000000..c869eac2b70cc445d703cc5eb03aa2242e6257cc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2016, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CDP_TXRX_PMF_H_ +#define _CDP_TXRX_PMF_H_ + +/** + * cdp_get_pn_info() - Returns pn info from peer + * @soc - data path soc handle + * @peer_mac: peer mac address + * @vdev_id: virtual device/interface id + * @last_pn_valid: return last_rmf_pn_valid value from peer. + * @last_pn: return last_rmf_pn value from peer. + * @rmf_pn_replays: return rmf_pn_replays value from peer. + * + * Return: NONE + */ +static inline void +cdp_get_pn_info(ol_txrx_soc_handle soc, uint8_t *peer_mac, uint8_t vdev_id, + uint8_t **last_pn_valid, uint64_t **last_pn, + uint32_t **rmf_pn_replays) +{ + if (!soc || !soc->ops || !soc->ops->pmf_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->pmf_ops->get_pn_info) + return soc->ops->pmf_ops->get_pn_info(soc, peer_mac, vdev_id, + last_pn_valid, + last_pn, rmf_pn_replays); + + return; +} +#endif /* _CDP_TXRX_PMF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h new file mode 100644 index 0000000000000000000000000000000000000000..0eb40e6a0efc3640f44519299271a9a9b557e3c6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_raw.h + * @brief Define the host data path raw mode API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_RAW_H_ +#define _CDP_TXRX_RAW_H_ + +#include "cdp_txrx_handle.h" +#include "cdp_txrx_ops.h" +/* TODO: adf need to be replaced with qdf */ +static inline int cdp_get_nwifi_mode(ol_txrx_soc_handle soc, + uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->raw_ops || + !soc->ops->raw_ops->txrx_get_nwifi_mode) + return 0; + + return soc->ops->raw_ops->txrx_get_nwifi_mode(soc, vdev_id); +} + +/** + * @brief finds the ast entry for the packet + * @details: Finds the ast entry i.e 4th address for the packet based on the + * details in the netbuf. + * + * @param soc - soc handle + * @param vdev_id - id of the data virtual device object + * @param pnbuf - pointer to nbuf + * @param raw_ast - pointer to fill ast information + * + * @return - 0 on success, -1 on error, 1 if more nbufs need to be consumed. + */ + +static inline QDF_STATUS +cdp_rawsim_get_astentry(ol_txrx_soc_handle soc, uint8_t vdev_id, + qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->raw_ops || + !soc->ops->raw_ops->rsim_get_astentry) + return QDF_STATUS_E_FAILURE; + + return soc->ops->raw_ops->rsim_get_astentry(soc, vdev_id, + pnbuf, raw_ast); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..c912591248ba4b336523918ea5fe74599e6bfd31 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016-2017,2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_stats.h + * @brief Define the host data path statistics API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_STATS_H_ +#define _CDP_TXRX_STATS_H_ +#include + +static inline QDF_STATUS +cdp_clear_stats(ol_txrx_soc_handle soc, uint8_t pdev_id, uint8_t bitmap) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->mob_stats_ops || + !soc->ops->mob_stats_ops->clear_stats) + return QDF_STATUS_E_INVAL; + + return soc->ops->mob_stats_ops->clear_stats(soc, pdev_id, bitmap); +} + +static inline int +cdp_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, char *buffer, + unsigned int buf_len) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mob_stats_ops || + !soc->ops->mob_stats_ops->stats) + return 0; + + return soc->ops->mob_stats_ops->stats(vdev_id, buffer, buf_len); +} + +#endif /* _CDP_TXRX_STATS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats_struct.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..f07bcd3608c03e7fbeb16713d05cd9cf49475c84 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats_struct.h @@ -0,0 +1,2122 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_stats_struct.h + * @brief Define the host data path stats API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_STATS_STRUCT_H_ +#define _CDP_TXRX_STATS_STRUCT_H_ + +#include + +#define TXRX_STATS_LEVEL_OFF 0 +#define TXRX_STATS_LEVEL_BASIC 1 +#define TXRX_STATS_LEVEL_FULL 2 + +#define BSS_CHAN_INFO_READ 1 +#define BSS_CHAN_INFO_READ_AND_CLEAR 2 + +#define TX_FRAME_TYPE_DATA 0 +#define TX_FRAME_TYPE_MGMT 1 +#define TX_FRAME_TYPE_BEACON 2 + +#ifndef TXRX_STATS_LEVEL +#define TXRX_STATS_LEVEL TXRX_STATS_LEVEL_BASIC +#endif + +/* 1 additional MCS is for invalid values */ +#define MAX_MCS (12 + 1) +#define MAX_MCS_11A 8 +#define MAX_MCS_11B 7 +#define MAX_MCS_11AC 12 +/* 1 additional GI is for invalid values */ +#define MAX_GI (4 + 1) +#define SS_COUNT 8 +#define MAX_BW 7 +#define MAX_RECEPTION_TYPES 4 + +#define MAX_TRANSMIT_TYPES 9 + +#define MAX_USER_POS 8 +#define MAX_MU_GROUP_ID 64 +#define MAX_RU_LOCATIONS 6 +#define RU_26 1 +#define RU_52 2 +#define RU_106 4 +#define RU_242 9 +#define RU_484 18 +#define RU_996 37 + +/* WME stream classes */ +#define WME_AC_BE 0 /* best effort */ +#define WME_AC_BK 1 /* background */ +#define WME_AC_VI 2 /* video */ +#define WME_AC_VO 3 /* voice */ +#define WME_AC_MAX 4 /* MAX AC Value */ + +#define CDP_MAX_RX_RINGS 4 /* max rx rings */ +#define CDP_MAX_TX_COMP_RINGS 3 /* max tx completion rings */ +#define CDP_MAX_TX_TQM_STATUS 9 /* max tx tqm completion status */ +#define CDP_MAX_TX_HTT_STATUS 7 /* max tx htt completion status */ + +/* TID level VoW stats macros + * to add and get stats + */ +#define PFLOW_TXRX_TIDQ_STATS_ADD(_peer, _tid, _var, _val) \ + (((_peer)->tidq_stats[_tid]).stats[_var]) += _val +#define PFLOW_TXRX_TIDQ_STATS_GET(_peer, _tid, _var, _val) \ + ((_peer)->tidq_stats[_tid].stats[_var]) +/* + * Video only stats + */ +#define PFLOW_CTRL_PDEV_VIDEO_STATS_SET(_pdev, _var, _val) \ + (((_pdev)->vow.vistats[_var]).value) = _val +#define PFLOW_CTRL_PDEV_VIDEO_STATS_GET(_pdev, _var) \ + ((_pdev)->vow.vistats[_var].value) +#define PFLOW_CTRL_PDEV_VIDEO_STATS_ADD(_pdev, _var, _val) \ + (((_pdev)->vow.vistats[_var]).value) += _val +/* + * video delay stats + */ +#define PFLOW_CTRL_PDEV_DELAY_VIDEO_STATS_SET(_pdev, _var, _val) \ + (((_pdev)->vow.delaystats[_var]).value) = _val +#define PFLOW_CTRL_PDEV_DELAY_VIDEO_STATS_GET(_pdev, _var) \ + ((_pdev)->vow.delaystats[_var].value) +#define PFLOW_CTRL_PDEV_DELAY_VIDEO_STATS_ADD(_pdev, _var, _val) \ + (((_pdev)->vow.delaystats[_var]).value) += _val +/* + * Number of TLVs sent by FW. Needs to reflect + * HTT_PPDU_STATS_MAX_TAG declared in FW + */ +#define CDP_PPDU_STATS_MAX_TAG 14 +#define CDP_MAX_DATA_TIDS 9 + +#define CDP_WDI_NUM_EVENTS WDI_NUM_EVENTS + +#define CDP_FCTL_RETRY 0x0800 +#define CDP_FC_IS_RETRY_SET(_fc) \ + ((_fc) & qdf_cpu_to_le16(CDP_FCTL_RETRY)) + +#define INVALID_RSSI 255 + +#define CDP_RSSI_MULTIPLIER BIT(8) +#define CDP_RSSI_MUL(x, mul) ((x) * (mul)) +#define CDP_RSSI_RND(x, mul) ((((x) % (mul)) >= ((mul) / 2)) ?\ + ((x) + ((mul) - 1)) / (mul) : (x) / (mul)) + +#define CDP_RSSI_OUT(x) (CDP_RSSI_RND((x), CDP_RSSI_MULTIPLIER)) +#define CDP_RSSI_IN(x) (CDP_RSSI_MUL((x), CDP_RSSI_MULTIPLIER)) +#define CDP_RSSI_AVG(x, y) ((((x) << 2) + (y) - (x)) >> 2) + +#define CDP_RSSI_UPDATE_AVG(x, y) x = CDP_RSSI_AVG((x), CDP_RSSI_IN((y))) + +/*Max SU EVM count */ +#define DP_RX_MAX_SU_EVM_COUNT 32 + +#define WDI_EVENT_BASE 0x100 + +#define CDP_TXRX_RATECODE_MCS_MASK 0xF +#define CDP_TXRX_RATECODE_NSS_MASK 0x3 +#define CDP_TXRX_RATECODE_NSS_LSB 4 +#define CDP_TXRX_RATECODE_PREM_MASK 0x3 +#define CDP_TXRX_RATECODE_PREM_LSB 6 + +/* Below BW_GAIN should be added to the SNR value of every ppdu based on the + * bandwidth. This table is obtained from HALPHY. + * BW BW_Gain + * 20 0 + * 40 3dBm + * 80 6dBm + * 160/80P80 9dBm + */ + +#define PKT_BW_GAIN_20MHZ 0 +#define PKT_BW_GAIN_40MHZ 3 +#define PKT_BW_GAIN_80MHZ 6 +#define PKT_BW_GAIN_160MHZ 9 + +/* + * cdp_tx_transmit_type: Transmit type index + * SU: SU Transmit type index + * MU_MIMO: MU_MIMO Transmit type index + * MU_OFDMA: MU_OFDMA Transmit type index + * MU_MIMO_OFDMA: MU MIMO OFDMA Transmit type index + */ +enum cdp_tx_transmit_type { + SU = 0, + MU_MIMO, + MU_OFDMA, + MU_MIMO_OFDMA, +}; + +/* + * cdp_ru_index: Different RU index + * + * RU_26_INDEX : 26-tone Resource Unit index + * RU_52_INDEX : 52-tone Resource Unit index + * RU_106_INDEX: 106-tone Resource Unit index + * RU_242_INDEX: 242-tone Resource Unit index + * RU_484_INDEX: 484-tone Resource Unit index + * RU_996_INDEX: 996-tone Resource Unit index + */ +enum cdp_ru_index { + RU_26_INDEX = 0, + RU_52_INDEX, + RU_106_INDEX, + RU_242_INDEX, + RU_484_INDEX, + RU_996_INDEX, +}; + +#ifdef FEATURE_TSO_STATS +/* Number of TSO Packet Statistics captured */ +#define CDP_MAX_TSO_PACKETS 5 +/* Information for Number of Segments for a TSO Packet captured */ +#define CDP_MAX_TSO_SEGMENTS 2 +/* Information for Number of Fragments for a TSO Segment captured */ +#define CDP_MAX_TSO_FRAGMENTS 6 +#endif /* FEATURE_TSO_STATS */ + +/* Different Packet Types */ +enum cdp_packet_type { + DOT11_A = 0, + DOT11_B = 1, + DOT11_N = 2, + DOT11_AC = 3, + DOT11_AX = 4, + DOT11_MAX = 5, +}; + +/* + * cdp_mu_packet_type: MU Rx type index + * RX_TYPE_MU_MIMO: MU MIMO Rx type index + * RX_TYPE_MU_OFDMA: MU OFDMA Rx type index + * MU_MIMO_OFDMA: MU Rx MAX type index + */ +enum cdp_mu_packet_type { + RX_TYPE_MU_MIMO = 0, + RX_TYPE_MU_OFDMA = 1, + RX_TYPE_MU_MAX = 2, +}; + +enum WDI_EVENT { + WDI_EVENT_TX_STATUS = WDI_EVENT_BASE, + WDI_EVENT_OFFLOAD_ALL, + WDI_EVENT_RX_DESC_REMOTE, + WDI_EVENT_RX_PEER_INVALID, + WDI_EVENT_DBG_PRINT, /* NEED to integrate pktlog changes*/ + WDI_EVENT_RX_CBF_REMOTE, + WDI_EVENT_RATE_FIND, + WDI_EVENT_RATE_UPDATE, + WDI_EVENT_SW_EVENT, + WDI_EVENT_RX_DESC, + WDI_EVENT_LITE_T2H, + WDI_EVENT_LITE_RX, + WDI_EVENT_RX_PPDU_DESC, + WDI_EVENT_TX_PPDU_DESC, + WDI_EVENT_TX_MSDU_DESC, + WDI_EVENT_TX_DATA, + WDI_EVENT_RX_DATA, + WDI_EVENT_TX_MGMT_CTRL, + WDI_EVENT_HTT_STATS, + WDI_EVENT_TX_BEACON, + WDI_EVENT_PEER_STATS, + WDI_EVENT_TX_SOJOURN_STAT, + WDI_EVENT_UPDATE_DP_STATS, + WDI_EVENT_RX_MGMT_CTRL, + WDI_EVENT_PEER_CREATE, + WDI_EVENT_PEER_DESTROY, + WDI_EVENT_PEER_FLUSH_RATE_STATS, + WDI_EVENT_FLUSH_RATE_STATS_REQ, + WDI_EVENT_RX_MPDU, + /* End of new event items */ + WDI_EVENT_LAST +}; + +#define WDI_NUM_EVENTS WDI_EVENT_LAST - WDI_EVENT_BASE + +struct cdp_stats_extd { +}; + +/* TID level Tx/Rx stats + * + */ +enum cdp_txrx_tidq_stats { + /* Tx Counters */ + TX_MSDU_TOTAL_LINUX_SUBSYSTEM, + TX_MSDU_TOTAL_FROM_OSIF, + TX_MSDU_TX_COMP_PKT_CNT, + /* Rx Counters */ + RX_MSDU_TOTAL_FROM_FW, + RX_MSDU_MCAST_FROM_FW, + RX_TID_MISMATCH_FROM_FW, + RX_MSDU_MISC_PKTS, + RX_MSDU_IS_ARP, + RX_MSDU_IS_EAP, + RX_MSDU_IS_DHCP, + RX_AGGREGATE_10, + RX_AGGREGATE_20, + RX_AGGREGATE_30, + RX_AGGREGATE_40, + RX_AGGREGATE_50, + RX_AGGREGATE_60, + RX_AGGREGATE_MORE, + RX_AMSDU_1, + RX_AMSDU_2, + RX_AMSDU_3, + RX_AMSDU_4, + RX_AMSDU_MORE, + RX_MSDU_CHAINED_FROM_FW, + RX_MSDU_REORDER_FAILED_FROM_FW, + RX_MSDU_REORDER_FLUSHED_FROM_FW, + RX_MSDU_DISCARD_FROM_FW, + RX_MSDU_DUPLICATE_FROM_FW, + RX_MSDU_DELIVERED_TO_STACK, + TIDQ_STATS_MAX, +}; + +struct cdp_tidq_stats { + uint32_t stats[TIDQ_STATS_MAX]; +}; + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/** + * struct cdp_rx_ppdu_cfr_info - struct for storing ppdu info extracted from HW + * TLVs, this will be used for CFR correlation + * + * @bb_captured_channel : Set by RXPCU when MACRX_FREEZE_CAPTURE_CHANNEL TLV is + * sent to PHY, SW checks it to correlate current PPDU TLVs with uploaded + * channel information. + * + * @bb_captured_timeout : Set by RxPCU to indicate channel capture condition is + * met, but MACRX_FREEZE_CAPTURE_CHANNEL is not sent to PHY due to AST delay, + * which means the rx_frame_falling edge to FREEZE TLV ready time exceeds + * the threshold time defined by RXPCU register FREEZE_TLV_DELAY_CNT_THRESH. + * Bb_captured_reason is still valid in this case. + * + * @bb_captured_reason : Copy capture_reason of MACRX_FREEZE_CAPTURE_CHANNEL + * TLV to here for FW usage. Valid when bb_captured_channel or + * bb_captured_timeout is set. + * + * + * + * + * + * + * + * + * @rx_location_info_valid: Indicates whether CFR DMA address in the PPDU TLV + * is valid + * + * + * + * + * @chan_capture_status : capture status reported by ucode + * a. CAPTURE_IDLE: FW has disabled "REPETITIVE_CHE_CAPTURE_CTRL" + * b. CAPTURE_BUSY: previous PPDU’s channel capture upload DMA ongoing. (Note + * that this upload is triggered after receiving freeze_channel_capture TLV + * after last PPDU is rx) + * c. CAPTURE_ACTIVE: channel capture is enabled and no previous channel + * capture ongoing + * d. CAPTURE_NO_BUFFER: next buffer in IPC ring not available + * + * @rtt_che_buffer_pointer_high8 : The high 8 bits of the 40 bits pointer to + * external RTT channel information buffer + * + * @rtt_che_buffer_pointer_low32 : The low 32 bits of the 40 bits pointer to + * external RTT channel information buffer + * + */ + +struct cdp_rx_ppdu_cfr_info { + bool bb_captured_channel; + bool bb_captured_timeout; + uint8_t bb_captured_reason; + bool rx_location_info_valid; + uint8_t chan_capture_status; + uint8_t rtt_che_buffer_pointer_high8; + uint32_t rtt_che_buffer_pointer_low32; +}; +#endif +/* + * struct cdp_rx_su_evm_info: Rx evm info + * @number_of_symbols: number of symbols + * @nss_count: number of spatial streams + * @pilot_count: number of pilot count + */ +struct cdp_rx_su_evm_info { + uint16_t number_of_symbols; + uint8_t nss_count; + uint8_t pilot_count; + uint32_t pilot_evm[DP_RX_MAX_SU_EVM_COUNT]; +}; + +/* + * cdp_delay_stats_mode: Different types of delay statistics + * + * @CDP_DELAY_STATS_SW_ENQ: Stack to hw enqueue delay + * @CDP_DELAY_STATS_TX_INTERFRAME: Interframe delay at radio entry point + * @CDP_DELAY_STATS_FW_HW_TRANSMIT: Hw enqueue to tx completion delay + * @CDP_DELAY_STATS_REAP_STACK: Delay in ring reap to indicating network stack + * @CDP_DELAY_STATS_RX_INTERFRAME: Rx inteframe delay + * @CDP_DELAY_STATS_MODE_MAX: Maximum delay mode + */ +enum cdp_delay_stats_mode { + CDP_DELAY_STATS_SW_ENQ, + CDP_DELAY_STATS_TX_INTERFRAME, + CDP_DELAY_STATS_FW_HW_TRANSMIT, + CDP_DELAY_STATS_REAP_STACK, + CDP_DELAY_STATS_RX_INTERFRAME, + CDP_DELAY_STATS_MODE_MAX, +}; + +/* + * cdp_delay_bucket_index + * Index to be used for all delay stats + */ +enum cdp_delay_bucket_index { + CDP_DELAY_BUCKET_0, + CDP_DELAY_BUCKET_1, + CDP_DELAY_BUCKET_2, + CDP_DELAY_BUCKET_3, + CDP_DELAY_BUCKET_4, + CDP_DELAY_BUCKET_5, + CDP_DELAY_BUCKET_6, + CDP_DELAY_BUCKET_7, + CDP_DELAY_BUCKET_8, + CDP_DELAY_BUCKET_9, + CDP_DELAY_BUCKET_10, + CDP_DELAY_BUCKET_11, + CDP_DELAY_BUCKET_12, + CDP_DELAY_BUCKET_MAX, +}; + +/* + * struct cdp_tx_host_drop - packet drop due to following reasons. + */ +enum cdp_tx_sw_drop { + TX_DESC_ERR, + TX_HAL_RING_ACCESS_ERR, + TX_DMA_MAP_ERR, + TX_HW_ENQUEUE, + TX_SW_ENQUEUE, + TX_MAX_DROP, +}; + +/* + * struct cdp_rx_host_drop - packet drop due to following reasons. + */ +enum cdp_rx_sw_drop { + INTRABSS_DROP, + MSDU_DONE_FAILURE, + INVALID_PEER_VDEV, + POLICY_CHECK_DROP, + MEC_DROP, + NAWDS_MCAST_DROP, + MESH_FILTER_DROP, + ENQUEUE_DROP, + RX_MAX_DROP, +}; + +/* + * struct cdp_delay_stats + * @delay_bucket: division of buckets as per latency + * @min_delay: minimum delay + * @max_delay: maximum delay + * @avg_delay: average delay + */ +struct cdp_delay_stats { + uint64_t delay_bucket[CDP_DELAY_BUCKET_MAX]; + uint32_t min_delay; + uint32_t max_delay; + uint32_t avg_delay; +}; + +/* + * struct cdp_tid_tx_stats + * @swq_delay: delay between wifi driver entry point and enqueue to HW in tx + * @hwtx_delay: delay between wifi driver exit (enqueue to HW) and tx completion + * @intfrm_delay: interframe delay + * @success_cnt: total successful transmit count + * @comp_fail_cnt: firmware drop found in tx completion path + * @swdrop_cnt: software drop in tx path + * @tqm_status_cnt: TQM completion status count + * @htt_status_cnt: HTT completion status count + */ +struct cdp_tid_tx_stats { + struct cdp_delay_stats swq_delay; + struct cdp_delay_stats hwtx_delay; + struct cdp_delay_stats intfrm_delay; + uint64_t success_cnt; + uint64_t comp_fail_cnt; + uint64_t swdrop_cnt[TX_MAX_DROP]; + uint64_t tqm_status_cnt[CDP_MAX_TX_TQM_STATUS]; + uint64_t htt_status_cnt[CDP_MAX_TX_HTT_STATUS]; +}; + +/* + * struct cdp_tid_tx_stats + * @to_stack_delay: Time taken between ring reap to indication to network stack + * @intfrm_delay: Interframe rx delay + * @delivered_cnt: Total packets indicated to stack + * @intrabss_cnt: Rx total intraBSS frames + * @msdu_cnt: number of msdu received from HW + * @mcast_msdu_cnt: Num Mcast Msdus received from HW in Rx + * @bcast_msdu_cnt: Num Bcast Msdus received from HW in Rx + * @fail_cnt: Rx deliver drop counters + */ +struct cdp_tid_rx_stats { + struct cdp_delay_stats to_stack_delay; + struct cdp_delay_stats intfrm_delay; + uint64_t delivered_to_stack; + uint64_t intrabss_cnt; + uint64_t msdu_cnt; + uint64_t mcast_msdu_cnt; + uint64_t bcast_msdu_cnt; + uint64_t fail_cnt[RX_MAX_DROP]; +}; + +/* + * struct cdp_tid_stats + * @ingress_stack: Total packets received from linux stack + * @osif_drop: drops in osif layer + * @tid_tx_stats: transmit counters per tid + * @tid_rx_stats: receive counters per tid + */ +struct cdp_tid_stats { + uint64_t ingress_stack; + uint64_t osif_drop; + struct cdp_tid_tx_stats tid_tx_stats[CDP_MAX_TX_COMP_RINGS] + [CDP_MAX_DATA_TIDS]; + struct cdp_tid_rx_stats tid_rx_stats[CDP_MAX_RX_RINGS] + [CDP_MAX_DATA_TIDS]; +}; + +/* struct cdp_pkt_info - packet info + * @num: no of packets + * @bytes: total no of bytes + */ +struct cdp_pkt_info { + uint32_t num; + uint64_t bytes; +}; + +/* struct cdp_pkt_type - packet type + * @mcs_count: Counter array for each MCS index + */ +struct cdp_pkt_type { + uint32_t mcs_count[MAX_MCS]; +}; + +/* + * struct cdp_rx_mu - Rx MU Stats + * @ppdu_nss[SS_COUNT]: Packet Count in spatial streams + * @mpdu_cnt_fcs_ok: Rx success mpdu count + * @mpdu_cnt_fcs_err: Rx fail mpdu count + * @cdp_pkt_type: counter array for each MCS index + */ +struct cdp_rx_mu { + uint32_t ppdu_nss[SS_COUNT]; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; + struct cdp_pkt_type ppdu; +}; + +/* struct cdp_tx_pkt_info - tx packet info + * num_msdu - successful msdu + * num_mpdu - successful mpdu from compltn common + * mpdu_tried - mpdu tried + * + * tx packet info counter field for mpdu success/tried and msdu + */ +struct cdp_tx_pkt_info { + uint32_t num_msdu; + uint32_t num_mpdu; + uint32_t mpdu_tried; +}; + +#ifdef FEATURE_TSO_STATS +/** + * struct cdp_tso_seg_histogram - Segment histogram for TCP Packets + * @segs_1: packets with single segments + * @segs_2_5: packets with 2-5 segments + * @segs_6_10: packets with 6-10 segments + * @segs_11_15: packets with 11-15 segments + * @segs_16_20: packets with 16-20 segments + * @segs_20_plus: packets with 20 plus segments + */ +struct cdp_tso_seg_histogram { + uint64_t segs_1; + uint64_t segs_2_5; + uint64_t segs_6_10; + uint64_t segs_11_15; + uint64_t segs_16_20; + uint64_t segs_20_plus; +}; + +/** + * struct cdp_tso_packet_info - Stats for TSO segments within a TSO packet + * @tso_seg: TSO Segment information + * @num_seg: Number of segments + * @tso_packet_len: Size of the tso packet + * @tso_seg_idx: segment number + */ +struct cdp_tso_packet_info { + struct qdf_tso_seg_t tso_seg[CDP_MAX_TSO_SEGMENTS]; + uint8_t num_seg; + size_t tso_packet_len; + uint32_t tso_seg_idx; +}; + +/** + * struct cdp_tso_info - stats for tso packets + * @tso_packet_info: TSO packet information + */ +struct cdp_tso_info { + struct cdp_tso_packet_info tso_packet_info[CDP_MAX_TSO_PACKETS]; +}; +#endif /* FEATURE_TSO_STATS */ + +/** + * struct cdp_tso_stats - TSO stats information + * @num_tso_pkts: Total number of TSO Packets + * @tso_comp: Total tso packet completions + * @dropped_host: TSO packets dropped by host + * @tso_no_mem_dropped: TSO packets dropped by host due to descriptor + unavailablity + * @dropped_target: TSO packets_dropped by target + * @tso_info: Per TSO packet counters + * @seg_histogram: TSO histogram stats + */ +struct cdp_tso_stats { + struct cdp_pkt_info num_tso_pkts; + uint32_t tso_comp; + struct cdp_pkt_info dropped_host; + struct cdp_pkt_info tso_no_mem_dropped; + uint32_t dropped_target; +#ifdef FEATURE_TSO_STATS + struct cdp_tso_info tso_info; + struct cdp_tso_seg_histogram seg_histogram; +#endif /* FEATURE_TSO_STATS */ +}; + +#define CDP_PEER_STATS_START 0 + +enum cdp_peer_stats_type { + cdp_peer_stats_min = CDP_PEER_STATS_START, + + /* Tx types */ + cdp_peer_tx_ucast = cdp_peer_stats_min, + cdp_peer_tx_mcast, + cdp_peer_tx_rate, + cdp_peer_tx_last_tx_rate, + cdp_peer_tx_inactive_time, + cdp_peer_tx_ratecode, + cdp_peer_tx_flags, + cdp_peer_tx_power, + + /* Rx types */ + cdp_peer_rx_rate, + cdp_peer_rx_last_rx_rate, + cdp_peer_rx_ratecode, + cdp_peer_rx_ucast, + cdp_peer_rx_flags, + cdp_peer_rx_avg_rssi, + cdp_peer_stats_max, +}; + +/* + * The max size of cdp_peer_stats_param_t is limited to 16 bytes. + * If the buffer size is exceeding this size limit, + * dp_txrx_get_peer_stats is to be used instead. + */ +typedef union cdp_peer_stats_buf { + /* Tx types */ + struct cdp_pkt_info tx_ucast; + struct cdp_pkt_info tx_mcast; + uint32_t tx_rate; + uint32_t last_tx_rate; + uint32_t tx_inactive_time; + uint32_t tx_flags; + uint32_t tx_power; + uint16_t tx_ratecode; + + /* Rx types */ + struct cdp_pkt_info rx_ucast; + uint32_t rx_rate; + uint32_t last_rx_rate; + uint32_t rx_ratecode; + uint32_t rx_flags; + uint32_t rx_avg_rssi; +} cdp_peer_stats_param_t; /* Max union size 16 bytes */ + +/** + * enum cdp_protocol_trace - Protocols supported by per-peer protocol trace + * @CDP_TRACE_ICMP: ICMP packets + * @CDP_TRACE_EAP: EAPOL packets + * @CDP_TRACE_ARP: ARP packets + * + * Enumeration of all protocols supported by per-peer protocol trace feature + */ +enum cdp_protocol_trace { + CDP_TRACE_ICMP, + CDP_TRACE_EAP, + CDP_TRACE_ARP, + CDP_TRACE_MAX +}; + +/** + * struct protocol_trace_count - type of count on per-peer protocol trace + * @egress_cnt: how many packets go out of host driver + * @ingress_cnt: how many packets come into the host driver + * + * Type of count on per-peer protocol trace + */ +struct protocol_trace_count { + uint16_t egress_cnt; + uint16_t ingress_cnt; +}; +/* struct cdp_tx_stats - tx stats + * @cdp_pkt_info comp_pkt: Pkt Info for which completions were received + * @cdp_pkt_info ucast: Unicast Packet Count + * @cdp_pkt_info mcast: Multicast Packet Count + * @cdp_pkt_info bcast: Broadcast Packet Count + * @cdp_pkt_info nawds_mcast: NAWDS Multicast Packet Count + * @cdp_pkt_info tx_success: Successful Tx Packets + * @nawds_mcast_drop: NAWDS Multicast Drop Count + * @protocol_trace_cnt: per-peer protocol counter + * @tx_failed: Total Tx failure + * @ofdma: Total Packets as ofdma + * @stbc: Packets in STBC + * @ldpc: Packets in LDPC + * @retries: Packet retries + * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation + * @amsdu_cnt: Number of MSDUs part of AMSDU + * @tx_rate: Tx Rate + * @last_tx_rate: Last tx rate for unicast packets + * @last_tx_rate_mcs: Tx rate mcs for unicast packets + * @mcast_last_tx_rate: Last tx rate for multicast packets + * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast + * @last_per: Tx Per + * @rnd_avg_tx_rate: Rounded average tx rate + * @avg_tx_rate: Average TX rate + * @last_ack_rssi: RSSI of last acked packet + * @tx_bytes_success_last: last Tx success bytes + * @tx_data_success_last: last Tx success data + * @tx_byte_rate: Bytes Trasmitted in last one sec + * @tx_data_rate: Data Transmitted in last one sec + * @sgi_count[MAX_GI]: SGI count + * @nss[SS_COUNT]: Packet count for different num_spatial_stream values + * @bw[MAX_BW]: Packet Count for different bandwidths + * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count + * @excess_retries_per_ac[WME_AC_MAX]: Wireless Multimedia type Count + * @dot11_tx_pkts: dot11 tx packets + * @fw_rem: Discarded by firmware + * @fw_rem_notx: firmware_discard_untransmitted + * @fw_rem_tx: firmware_discard_transmitted + * @age_out: aged out in mpdu/msdu queues + * @fw_reason1: discarded by firmware reason 1 + * @fw_reason2: discarded by firmware reason 2 + * @fw_reason3: discarded by firmware reason 3 + * @mcs_count: MCS Count + * @an_tx_cnt: ald tx count + * @an_tx_rates_used: ald rx rate used + * @an_tx_bytes: ald tx bytes + * @ald_txcount: ald tx count + * @ald_lastper: ald last PER + * @ald_max4msframelen: ald max frame len + * @an_tx_ratecount: ald tx ratecount + * @ald_retries: ald retries + * @ald_ac_nobufs: #buffer overflows per node per AC + * @ald_ac_excretries: #pkts dropped after excessive retries per node per AC + * @rssi_chain: rssi chain + * @inactive_time: inactive time in secs + * @tx_flags: tx flags + * @tx_power: Tx power latest + * @is_tx_no_ack: no ack received + * @tx_ratecode: Tx rate code of last frame + * @is_tx_nodefkey: tx failed 'cuz no defkey + * @is_tx_noheadroom: tx failed 'cuz no space + * @is_crypto_enmicfail: + * @is_tx_nonode: tx failed for no node + * @is_tx_unknownmgt: tx of unknown mgt frame + * @is_tx_badcipher: tx failed 'cuz key type + * @ampdu_cnt: completion of aggregation + * @non_ampdu_cnt: tx completion not aggregated + * @failed_retry_count: packets failed due to retry above 802.11 retry limit + * @retry_count: packets successfully send after one or more retry + * @multiple_retry_count: packets successfully sent after more than one retry + * @transmit_type: pkt info for tx transmit type + * @mu_group_id: mumimo mu group id + * @ru_start: RU start index + * @ru_tones: RU tones size + * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter + * @num_ppdu_cookie_valid : Number of comp received with valid ppdu cookie + */ +struct cdp_tx_stats { + struct cdp_pkt_info comp_pkt; + struct cdp_pkt_info ucast; + struct cdp_pkt_info mcast; + struct cdp_pkt_info bcast; + struct cdp_pkt_info nawds_mcast; +#ifdef VDEV_PEER_PROTOCOL_COUNT + struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX]; +#endif + struct cdp_pkt_info tx_success; + uint32_t nawds_mcast_drop; + uint32_t tx_failed; + uint32_t ofdma; + uint32_t stbc; + uint32_t ldpc; + uint32_t retries; + uint32_t non_amsdu_cnt; + uint32_t amsdu_cnt; + uint32_t tx_rate; + uint32_t last_tx_rate; + uint32_t last_tx_rate_mcs; + uint32_t mcast_last_tx_rate; + uint32_t mcast_last_tx_rate_mcs; + uint32_t last_per; + uint64_t rnd_avg_tx_rate; + uint64_t avg_tx_rate; + uint32_t last_ack_rssi; + uint32_t tx_bytes_success_last; + uint32_t tx_data_success_last; + uint32_t tx_byte_rate; + uint32_t tx_data_rate; + uint32_t tx_data_ucast_last; + uint32_t tx_data_ucast_rate; + struct cdp_pkt_type pkt_type[DOT11_MAX]; + uint32_t sgi_count[MAX_GI]; + + uint32_t nss[SS_COUNT]; + + uint32_t bw[MAX_BW]; + + uint32_t wme_ac_type[WME_AC_MAX]; + + uint32_t excess_retries_per_ac[WME_AC_MAX]; + struct cdp_pkt_info dot11_tx_pkts; + + struct { + struct cdp_pkt_info fw_rem; + uint32_t fw_rem_notx; + uint32_t fw_rem_tx; + uint32_t age_out; + uint32_t fw_reason1; + uint32_t fw_reason2; + uint32_t fw_reason3; + } dropped; + + + uint32_t fw_tx_cnt; + uint32_t fw_tx_bytes; + uint32_t fw_txcount; + uint32_t fw_max4msframelen; + uint32_t fw_ratecount; + + uint32_t ac_nobufs[WME_AC_MAX]; + uint32_t rssi_chain[WME_AC_MAX]; + uint32_t inactive_time; + + uint32_t tx_flags; + uint32_t tx_power; + + /* MSDUs which the target sent but couldn't get an ack for */ + struct cdp_pkt_info is_tx_no_ack; + uint16_t tx_ratecode; + + /*add for peer and upadted from ppdu*/ + uint32_t ampdu_cnt; + uint32_t non_ampdu_cnt; + uint32_t failed_retry_count; + uint32_t retry_count; + uint32_t multiple_retry_count; + uint32_t last_tx_rate_used; + + struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES]; + uint32_t mu_group_id[MAX_MU_GROUP_ID]; + uint32_t ru_start; + uint32_t ru_tones; + struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS]; + + uint32_t num_ppdu_cookie_valid; + uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX]; +}; + +/* struct cdp_rx_stats - rx Level Stats + * @to_stack: Total packets sent up the stack + * @rcvd_reo[CDP_MAX_RX_RINGS]: Packets received on the reo ring + * @unicast: Total unicast packets + * @multicast: Total multicast packets + * @bcast: Broadcast Packet Count + * @raw: Raw Pakets received + * @nawds_mcast_drop: Total multicast packets + * @mec_drop: Total MEC packets dropped + * @pkts: Intra BSS packets received + * @fail: Intra BSS packets failed + * @mdns_no_fwd: Intra BSS MDNS packets not forwarded + * @protocol_trace_cnt: per-peer protocol counters + * @mic_err: Rx MIC errors CCMP + * @decrypt_err: Rx Decryption Errors CRC + * @fcserr: rx MIC check failed (CCMP) + * @wme_ac_type[WME_AC_MAX]: Wireless Multimedia type Count + * @reception_type[MAX_RECEPTION_TYPES]: Reception type os packets + * @mcs_count[MAX_MCS]: mcs count + * @sgi_count[MAX_GI]: sgi count + * @nss[SS_COUNT]: packet count in spatiel Streams + * @ppdu_nss[SS_COUNT]: PPDU packet count in spatial streams + * @mpdu_cnt_fcs_ok: SU Rx success mpdu count + * @mpdu_cnt_fcs_err: SU Rx fail mpdu count + * @su_ax_ppdu_cnt: SU Rx packet count + * @ppdu_cnt[MAX_RECEPTION_TYPES]: PPDU packet count in reception type + * @rx_mu[RX_TYPE_MU_MAX]: Rx MU stats + * @bw[MAX_BW]: Packet Count in different bandwidths + * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation + * @ampdu_cnt: Number of MSDUs part of AMSPU + * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation + * @amsdu_cnt: Number of MSDUs part of AMSDU + * @bar_recv_cnt: Number of bar received + * @avg_rssi: Average rssi + * @rx_rate: Rx rate + * @last_rx_rate: Previous rx rate + * @rnd_avg_rx_rate: Rounded average rx rate + * @avg_rx_rate: Average Rx rate + * @dot11_rx_pkts: dot11 rx packets + * @rx_bytes_last: last Rx success bytes + * @rx_data_last: last rx success data + * @rx_byte_rate: bytes received in last one sec + * @rx_data_rate: data received in last one sec + * @rx_retries: retries of packet in rx + * @rx_mpdus: mpdu in rx + * @rx_ppdus: ppdu in rx + * @is_rx_tooshort: tooshort + * @is_rx_decap: rx decap + * @rx_ccmpmic: rx MIC check failed (CCMP) + * @rx_tkipmic: rx MIC check failed (TKIP) + * @rx_tkipicv: rx ICV check failed (TKIP) + * @rx_wpimic: rx MIC check failed (WPI) + * @rx_wepfail: rx wep processing failed + * @rx_aggr: aggregation on rx + * @rx_discard: packets discard in rx + * @rx_ratecode: Rx rate code of last frame + * @rx_flags: rx flags + * @rx_rssi_measured_time: Time at which rssi is measured + * @rssi: RSSI of received signal + * @last_rssi: Previous rssi + * @multipass_rx_pkt_drop: Dropped multipass rx pkt + * @rx_mpdu_cnt: rx mpdu count per MCS rate + */ +struct cdp_rx_stats { + struct cdp_pkt_info to_stack; + struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS]; + struct cdp_pkt_info unicast; + struct cdp_pkt_info multicast; + struct cdp_pkt_info bcast; + struct cdp_pkt_info raw; + uint32_t nawds_mcast_drop; + struct cdp_pkt_info mec_drop; + struct { + struct cdp_pkt_info pkts; + struct cdp_pkt_info fail; + uint32_t mdns_no_fwd; + } intra_bss; +#ifdef VDEV_PEER_PROTOCOL_COUNT + struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX]; +#endif + + struct { + uint32_t mic_err; + uint32_t decrypt_err; + uint32_t fcserr; + } err; + + uint32_t wme_ac_type[WME_AC_MAX]; + uint32_t reception_type[MAX_RECEPTION_TYPES]; + struct cdp_pkt_type pkt_type[DOT11_MAX]; + uint32_t sgi_count[MAX_GI]; + uint32_t nss[SS_COUNT]; + uint32_t ppdu_nss[SS_COUNT]; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; + struct cdp_pkt_type su_ax_ppdu_cnt; + uint32_t ppdu_cnt[MAX_RECEPTION_TYPES]; + struct cdp_rx_mu rx_mu[RX_TYPE_MU_MAX]; + uint32_t bw[MAX_BW]; + uint32_t non_ampdu_cnt; + uint32_t ampdu_cnt; + uint32_t non_amsdu_cnt; + uint32_t amsdu_cnt; + uint32_t bar_recv_cnt; + uint32_t avg_rssi; + uint32_t rx_rate; + uint32_t last_rx_rate; + uint32_t rnd_avg_rx_rate; + uint32_t avg_rx_rate; + struct cdp_pkt_info dot11_rx_pkts; + + uint32_t rx_bytes_success_last; + uint32_t rx_data_success_last; + uint32_t rx_byte_rate; + uint32_t rx_data_rate; + + uint32_t rx_retries; + uint32_t rx_mpdus; + uint32_t rx_ppdus; + + /*add for peer updated for ppdu*/ + uint32_t rx_aggr; + uint32_t rx_discard; + uint32_t rx_ratecode; + uint32_t rx_flags; + uint32_t rx_rssi_measured_time; + uint8_t rssi; + uint8_t last_rssi; + uint32_t multipass_rx_pkt_drop; + uint32_t rx_mpdu_cnt[MAX_MCS]; +}; + +/* struct cdp_tx_ingress_stats - Tx ingress Stats + * @rcvd: Total packets received for transmission + * @processed: Tx packets processed + * @inspect_pkts: Total packets passed to inspect handler + * @nawds_mcast: NAWDS Multicast Packet Count + * @bcast: Number of broadcast packets + * @raw_pkt: Total Raw packets + * @dma_map_error: DMA map error + * @num_seg: No of segments in TSO packets + * @tso_pkt:total no of TSO packets + * @non_tso_pkts: non - TSO packets + * @dropped_host: TSO packets dropped by host + * @dropped_target:TSO packets dropped by target + * @sg_pkt: Total scatter gather packets + * @non_sg_pkts: non SG packets + * @dropped_host: SG packets dropped by host + * @dropped_target: SG packets dropped by target + * @dma_map_error: Dma map error + * @mcast_pkt: total no of multicast conversion packets + * @dropped_map_error: packets dropped due to map error + * @dropped_self_mac: packets dropped due to self Mac address + * @dropped_send_fail: Packets dropped due to send fail + * @ucast: total unicast packets transmitted + * @fail_seg_alloc: Segment allocation failure + * @clone_fail: NBUF clone failure + * @dropped_pkt: Total scatter gather packets + * @desc_na: Desc Not Available + * @ring_full: ring full + * @enqueue_fail: hw enqueue fail + * @dma_error: dma fail + * @res_full: Resource Full: Congestion Control + * @exception_fw: packets sent to fw + * @completion_fw: packets completions received from fw + * @cce_classified:Number of packets classified by CCE + * @cce_classified_raw:Number of raw packets classified by CCE + * @sniffer_rcvd: Number of packets received with ppdu cookie + */ +struct cdp_tx_ingress_stats { + struct cdp_pkt_info rcvd; + struct cdp_pkt_info processed; + struct cdp_pkt_info reinject_pkts; + struct cdp_pkt_info inspect_pkts; + struct cdp_pkt_info nawds_mcast; + struct cdp_pkt_info bcast; + + struct { + struct cdp_pkt_info raw_pkt; + uint32_t dma_map_error; + uint32_t invalid_raw_pkt_datatype; + } raw; + + /* Scatter Gather packet info */ + struct { + struct cdp_pkt_info sg_pkt; + struct cdp_pkt_info non_sg_pkts; + struct cdp_pkt_info dropped_host; + uint32_t dropped_target; + uint32_t dma_map_error; + } sg; + + /* Multicast Enhancement packets info */ + struct { + struct cdp_pkt_info mcast_pkt; + uint32_t dropped_map_error; + uint32_t dropped_self_mac; + uint32_t dropped_send_fail; + uint32_t ucast; + uint32_t fail_seg_alloc; + uint32_t clone_fail; + } mcast_en; + + /* Packets dropped on the Tx side */ + struct { + struct cdp_pkt_info dropped_pkt; + struct cdp_pkt_info desc_na; + uint32_t ring_full; + uint32_t enqueue_fail; + uint32_t dma_error; + uint32_t res_full; + /* headroom insufficient */ + uint32_t headroom_insufficient; + } dropped; + + /* Mesh packets info */ + struct { + uint32_t exception_fw; + uint32_t completion_fw; + } mesh; + + uint32_t cce_classified; + uint32_t cce_classified_raw; + struct cdp_pkt_info sniffer_rcvd; + struct cdp_tso_stats tso_stats; +}; + +/* struct cdp_vdev_stats - vdev stats structure + * @tx_i: ingress tx stats + * @tx: cdp tx stats + * @rx: cdp rx stats + * @tso_stats: tso stats + */ +struct cdp_vdev_stats { + struct cdp_tx_ingress_stats tx_i; + struct cdp_tx_stats tx; + struct cdp_rx_stats rx; + struct cdp_tso_stats tso_stats; +}; + +/* struct cdp_peer_stats - peer stats structure + * @tx: cdp tx stats + * @rx: cdp rx stats + */ +struct cdp_peer_stats { + /* CDP Tx Stats */ + struct cdp_tx_stats tx; + /* CDP Rx Stats */ + struct cdp_rx_stats rx; +}; + +/* struct cdp_interface_peer_stats - interface structure for txrx peer stats + * @peer_mac: peer mac address + * @vdev_id : vdev_id for the peer + * @last_peer_tx_rate: peer tx rate for last transmission + * @peer_tx_rate: tx rate for current transmission + * @peer_rssi: current rssi value of peer + * @tx_packet_count: tx packet count + * @rx_packet_count: rx packet count + * @tx_byte_count: tx byte count + * @rx_byte_count: rx byte count + * @per: per error rate + * @ack_rssi: RSSI of the last ack received + * @rssi_changed: denotes rssi is changed + */ +struct cdp_interface_peer_stats { + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t vdev_id; + uint8_t rssi_changed; + uint32_t last_peer_tx_rate; + uint32_t peer_tx_rate; + uint32_t peer_rssi; + uint32_t tx_packet_count; + uint32_t rx_packet_count; + uint32_t tx_byte_count; + uint32_t rx_byte_count; + uint32_t per; + uint32_t ack_rssi; +}; + +/* Tx completions per interrupt */ +struct cdp_hist_tx_comp { + uint32_t pkts_1; + uint32_t pkts_2_20; + uint32_t pkts_21_40; + uint32_t pkts_41_60; + uint32_t pkts_61_80; + uint32_t pkts_81_100; + uint32_t pkts_101_200; + uint32_t pkts_201_plus; +}; + +/* Rx ring descriptors reaped per interrupt */ +struct cdp_hist_rx_ind { + uint32_t pkts_1; + uint32_t pkts_2_20; + uint32_t pkts_21_40; + uint32_t pkts_41_60; + uint32_t pkts_61_80; + uint32_t pkts_81_100; + uint32_t pkts_101_200; + uint32_t pkts_201_plus; +}; + +struct cdp_htt_tlv_hdr { + /* BIT [11 : 0] :- tag + * BIT [23 : 12] :- length + * BIT [31 : 24] :- reserved + */ + uint32_t tag__length; +}; + +#define HTT_STATS_SUBTYPE_MAX 16 + +struct cdp_htt_rx_pdev_fw_stats_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + uint32_t mac_id__word; + /* Num PPDU status processed from HW */ + uint32_t ppdu_recvd; + /* Num MPDU across PPDUs with FCS ok */ + uint32_t mpdu_cnt_fcs_ok; + /* Num MPDU across PPDUs with FCS err */ + uint32_t mpdu_cnt_fcs_err; + /* Num MSDU across PPDUs */ + uint32_t tcp_msdu_cnt; + /* Num MSDU across PPDUs */ + uint32_t tcp_ack_msdu_cnt; + /* Num MSDU across PPDUs */ + uint32_t udp_msdu_cnt; + /* Num MSDU across PPDUs */ + uint32_t other_msdu_cnt; + /* Num MPDU on FW ring indicated */ + uint32_t fw_ring_mpdu_ind; + /* Num MGMT MPDU given to protocol */ + uint32_t fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX]; + /* Num ctrl MPDU given to protocol */ + uint32_t fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX]; + /* Num mcast data packet received */ + uint32_t fw_ring_mcast_data_msdu; + /* Num broadcast data packet received */ + uint32_t fw_ring_bcast_data_msdu; + /* Num unicat data packet received */ + uint32_t fw_ring_ucast_data_msdu; + /* Num null data packet received */ + uint32_t fw_ring_null_data_msdu; + /* Num MPDU on FW ring dropped */ + uint32_t fw_ring_mpdu_drop; + + /* Num buf indication to offload */ + uint32_t ofld_local_data_ind_cnt; + /* Num buf recycle from offload */ + uint32_t ofld_local_data_buf_recycle_cnt; + /* Num buf indication to data_rx */ + uint32_t drx_local_data_ind_cnt; + /* Num buf recycle from data_rx */ + uint32_t drx_local_data_buf_recycle_cnt; + /* Num buf indication to protocol */ + uint32_t local_nondata_ind_cnt; + /* Num buf recycle from protocol */ + uint32_t local_nondata_buf_recycle_cnt; + + /* Num buf fed */ + uint32_t fw_status_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t fw_status_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t fw_pkt_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t fw_pkt_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t fw_link_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t fw_link_buf_ring_empty_cnt; + + /* Num buf fed */ + uint32_t host_pkt_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t host_pkt_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_pkt_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t mon_pkt_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_status_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t mon_status_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_desc_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t mon_desc_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_dest_ring_update_cnt; + /* Num ring full encountered */ + uint32_t mon_dest_ring_full_cnt; + + /* Num rx suspend is attempted */ + uint32_t rx_suspend_cnt; + /* Num rx suspend failed */ + uint32_t rx_suspend_fail_cnt; + /* Num rx resume attempted */ + uint32_t rx_resume_cnt; + /* Num rx resume failed */ + uint32_t rx_resume_fail_cnt; + /* Num rx ring switch */ + uint32_t rx_ring_switch_cnt; + /* Num rx ring restore */ + uint32_t rx_ring_restore_cnt; + /* Num rx flush issued */ + uint32_t rx_flush_cnt; +}; + +/* == TX PDEV STATS == */ +struct cdp_htt_tx_pdev_stats_cmn_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + uint32_t mac_id__word; + /* Num queued to HW */ + uint32_t hw_queued; + /* Num PPDU reaped from HW */ + uint32_t hw_reaped; + /* Num underruns */ + uint32_t underrun; + /* Num HW Paused counter. */ + uint32_t hw_paused; + /* Num HW flush counter. */ + uint32_t hw_flush; + /* Num HW filtered counter. */ + uint32_t hw_filt; + /* Num PPDUs cleaned up in TX abort */ + uint32_t tx_abort; + /* Num MPDUs requed by SW */ + uint32_t mpdu_requed; + /* excessive retries */ + uint32_t tx_xretry; + /* Last used data hw rate code */ + uint32_t data_rc; + /* frames dropped due to excessive sw retries */ + uint32_t mpdu_dropped_xretry; + /* illegal rate phy errors */ + uint32_t illgl_rate_phy_err; + /* wal pdev continuous xretry */ + uint32_t cont_xretry; + /* wal pdev continuous xretry */ + uint32_t tx_timeout; + /* wal pdev resets */ + uint32_t pdev_resets; + /* PhY/BB underrun */ + uint32_t phy_underrun; + /* MPDU is more than txop limit */ + uint32_t txop_ovf; + /* Number of Sequences posted */ + uint32_t seq_posted; + /* Number of Sequences failed queueing */ + uint32_t seq_failed_queueing; + /* Number of Sequences completed */ + uint32_t seq_completed; + /* Number of Sequences restarted */ + uint32_t seq_restarted; + /* Number of MU Sequences posted */ + uint32_t mu_seq_posted; + /* Number of time HW ring is paused between seq switch within ISR */ + uint32_t seq_switch_hw_paused; + /* Number of times seq continuation in DSR */ + uint32_t next_seq_posted_dsr; + /* Number of times seq continuation in ISR */ + uint32_t seq_posted_isr; + /* Number of seq_ctrl cached. */ + uint32_t seq_ctrl_cached; + /* Number of MPDUs successfully transmitted */ + uint32_t mpdu_count_tqm; + /* Number of MSDUs successfully transmitted */ + uint32_t msdu_count_tqm; + /* Number of MPDUs dropped */ + uint32_t mpdu_removed_tqm; + /* Number of MSDUs dropped */ + uint32_t msdu_removed_tqm; + /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT (Reset,channel change) */ + uint32_t mpdus_sw_flush; + /* Num MPDUs filtered by HW, all filter condition (TTL expired) */ + uint32_t mpdus_hw_filter; + /* Num MPDUs truncated by PDG (TXOP, TBTT, PPDU_duration based on rate, dyn_bw) */ + uint32_t mpdus_truncated; + /* Num MPDUs that was tried but didn't receive ACK or BA */ + uint32_t mpdus_ack_failed; + /* Num MPDUs that was dropped due to expiry (MSDU TTL). */ + uint32_t mpdus_expired; + /* Num MPDUs that was retried within seq_ctrl (MGMT/LEGACY) */ + uint32_t mpdus_seq_hw_retry; + /* Num of TQM acked cmds processed */ + uint32_t ack_tlv_proc; + /* coex_abort_mpdu_cnt valid. */ + uint32_t coex_abort_mpdu_cnt_valid; + /* coex_abort_mpdu_cnt from TX FES stats. */ + uint32_t coex_abort_mpdu_cnt; + /* Number of total PPDUs(DATA, MGMT, excludes selfgen) tried over the air (OTA) */ + uint32_t num_total_ppdus_tried_ota; + /* Number of data PPDUs tried over the air (OTA) */ + uint32_t num_data_ppdus_tried_ota; + /* Num Local control/mgmt frames (MSDUs) queued */ + uint32_t local_ctrl_mgmt_enqued; + /* local_ctrl_mgmt_freed: + * Num Local control/mgmt frames (MSDUs) done + * It includes all local ctrl/mgmt completions + * (acked, no ack, flush, TTL, etc) + */ + uint32_t local_ctrl_mgmt_freed; + /* Num Local data frames (MSDUs) queued */ + uint32_t local_data_enqued; + /* local_data_freed: + * Num Local data frames (MSDUs) done + * It includes all local data completions + * (acked, no ack, flush, TTL, etc) + */ + uint32_t local_data_freed; + + /* Num MPDUs tried by SW */ + uint32_t mpdu_tried; + /* Num of waiting seq posted in isr completion handler */ + uint32_t isr_wait_seq_posted; + uint32_t tx_active_dur_us_low; + uint32_t tx_active_dur_us_high; +}; + +struct cdp_htt_tx_pdev_stats_urrn_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t urrn_stats[1]; /* HTT_TX_PDEV_MAX_URRN_STATS */ +}; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +struct cdp_htt_tx_pdev_stats_flush_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t flush_errs[1]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */ +}; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +struct cdp_htt_tx_pdev_stats_sifs_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t sifs_status[1]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */ +}; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +struct cdp_htt_tx_pdev_stats_phy_err_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t phy_errs[1]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */ +}; + +/* == RX PDEV/SOC STATS == */ +/* HTT_STATS_RX_SOC_FW_STATS_TAG */ +struct cdp_htt_rx_soc_fw_stats_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num Packets received on REO FW ring */ + uint32_t fw_reo_ring_data_msdu; + /* Num bc/mc packets indicated from fw to host */ + uint32_t fw_to_host_data_msdu_bcmc; + /* Num unicast packets indicated from fw to host */ + uint32_t fw_to_host_data_msdu_uc; + /* Num remote buf recycle from offload */ + uint32_t ofld_remote_data_buf_recycle_cnt; + /* Num remote free buf given to offload */ + uint32_t ofld_remote_free_buf_indication_cnt; +}; + +struct cdp_htt_rx_soc_fw_refill_ring_num_refill_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num total buf refilled from refill ring */ + uint32_t refill_ring_num_refill[1]; /* HTT_RX_STATS_REFILL_MAX_RING */ +}; + +struct cdp_htt_rx_pdev_fw_ring_mpdu_err_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num error MPDU for each RxDMA error type */ + uint32_t fw_ring_mpdu_err[1]; /* HTT_RX_STATS_RXDMA_MAX_ERR */ +}; + +struct cdp_htt_rx_pdev_fw_mpdu_drop_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num MPDU dropped */ + uint32_t fw_mpdu_drop[1]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */ +}; + +#define HTT_STATS_PHY_ERR_MAX 43 + +struct cdp_htt_rx_pdev_fw_stats_phy_err_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + uint32_t mac_id__word; + /* Num of phy err */ + uint32_t total_phy_err_cnt; + /* Counts of different types of phy errs + * The mapping of PHY error types to phy_err array elements is HW dependent. + * The only currently-supported mapping is shown below: + * + * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV + * 1 phyrx_err_synth_off + * 2 phyrx_err_ofdma_timing + * 3 phyrx_err_ofdma_signal_parity + * 4 phyrx_err_ofdma_rate_illegal + * 5 phyrx_err_ofdma_length_illegal + * 6 phyrx_err_ofdma_restart + * 7 phyrx_err_ofdma_service + * 8 phyrx_err_ppdu_ofdma_power_drop + * 9 phyrx_err_cck_blokker + * 10 phyrx_err_cck_timing + * 11 phyrx_err_cck_header_crc + * 12 phyrx_err_cck_rate_illegal + * 13 phyrx_err_cck_length_illegal + * 14 phyrx_err_cck_restart + * 15 phyrx_err_cck_service + * 16 phyrx_err_cck_power_drop + * 17 phyrx_err_ht_crc_err + * 18 phyrx_err_ht_length_illegal + * 19 phyrx_err_ht_rate_illegal + * 20 phyrx_err_ht_zlf + * 21 phyrx_err_false_radar_ext + * 22 phyrx_err_green_field + * 23 phyrx_err_bw_gt_dyn_bw + * 24 phyrx_err_leg_ht_mismatch + * 25 phyrx_err_vht_crc_error + * 26 phyrx_err_vht_siga_unsupported + * 27 phyrx_err_vht_lsig_len_invalid + * 28 phyrx_err_vht_ndp_or_zlf + * 29 phyrx_err_vht_nsym_lt_zero + * 30 phyrx_err_vht_rx_extra_symbol_mismatch + * 31 phyrx_err_vht_rx_skip_group_id0 + * 32 phyrx_err_vht_rx_skip_group_id1to62 + * 33 phyrx_err_vht_rx_skip_group_id63 + * 34 phyrx_err_ofdm_ldpc_decoder_disabled + * 35 phyrx_err_defer_nap + * 36 phyrx_err_fdomain_timeout + * 37 phyrx_err_lsig_rel_check + * 38 phyrx_err_bt_collision + * 39 phyrx_err_unsupported_mu_feedback + * 40 phyrx_err_ppdu_tx_interrupt_rx + * 41 phyrx_err_unsupported_cbf + * 42 phyrx_err_other + */ + uint32_t phy_err[HTT_STATS_PHY_ERR_MAX]; +}; + +struct cdp_htt_rx_soc_fw_refill_ring_empty_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num ring empty encountered */ + uint32_t refill_ring_empty_cnt[1]; /* HTT_RX_STATS_REFILL_MAX_RING */ +}; + +struct cdp_htt_tx_pdev_stats { + struct cdp_htt_tx_pdev_stats_cmn_tlv cmn_tlv; + struct cdp_htt_tx_pdev_stats_urrn_tlv_v underrun_tlv; + struct cdp_htt_tx_pdev_stats_sifs_tlv_v sifs_tlv; + struct cdp_htt_tx_pdev_stats_flush_tlv_v flush_tlv; + struct cdp_htt_tx_pdev_stats_phy_err_tlv_v phy_err_tlv; +}; + +struct cdp_htt_rx_soc_stats_t { + struct cdp_htt_rx_soc_fw_stats_tlv fw_tlv; + struct cdp_htt_rx_soc_fw_refill_ring_empty_tlv_v fw_refill_ring_empty_tlv; + struct cdp_htt_rx_soc_fw_refill_ring_num_refill_tlv_v fw_refill_ring_num_refill_tlv; +}; + +struct cdp_htt_rx_pdev_stats { + struct cdp_htt_rx_soc_stats_t soc_stats; + struct cdp_htt_rx_pdev_fw_stats_tlv fw_stats_tlv; + struct cdp_htt_rx_pdev_fw_ring_mpdu_err_tlv_v fw_ring_mpdu_err_tlv; + struct cdp_htt_rx_pdev_fw_mpdu_drop_tlv_v fw_ring_mpdu_drop; + struct cdp_htt_rx_pdev_fw_stats_phy_err_tlv fw_stats_phy_err_tlv; +}; + +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +/* Since protocol type enumeration value is passed as CCE metadata + * to firmware, add a constant offset before passing it to firmware + */ +#define RX_PROTOCOL_TAG_START_OFFSET 128 +/* This should align with packet type enumerations in ieee80211_ioctl.h + * and wmi_unified_param.h files + */ +#define RX_PROTOCOL_TAG_MAX 24 +/* Macro that should be used to dump the statistics counter for all + * protocol types + */ +#define RX_PROTOCOL_TAG_ALL 0xff +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + +#define OFDMA_NUM_RU_SIZE 7 + +#define OFDMA_NUM_USERS 37 + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/* + * mac_freeze_capture_reason - capture reason counters + * @FREEZE_REASON_TM: When m_directed_ftm is enabled, this CFR data is + * captured for a Timing Measurement (TM) frame. + * @FREEZE_REASON_FTM: When m_directed_ftm is enabled, this CFR data is + * captured for a Fine Timing Measurement (FTM) frame. + * @FREEZE_REASON_ACK_RESP_TO_TM_FTM: When m_all_ftm_ack is enabled, this CFR + * data is captured for an ACK received for the FTM/TM frame sent to a station. + * @FREEZE_REASON_TA_RA_TYPE_FILTER: When m_ta_ra_filter is enabled, this CFR + * data is captured for a PPDU received,since the CFR TA_RA filter is met. + * @FREEZE_REASON_NDPA_NDP: When m_ndpa_ndp_directed(or)m_ndpa_ndp_all is + * enabled, this CFR data is captured for an NDP frame received. + * @FREEZE_REASON_ALL_PACKET: When m_all_packet is enabled, this CFR data is + * captured for an incoming PPDU. + */ +enum mac_freeze_capture_reason { + FREEZE_REASON_TM = 0, + FREEZE_REASON_FTM, + FREEZE_REASON_ACK_RESP_TO_TM_FTM, + FREEZE_REASON_TA_RA_TYPE_FILTER, + FREEZE_REASON_NDPA_NDP, + FREEZE_REASON_ALL_PACKET, + FREEZE_REASON_MAX, +}; + +/* + * chan_capture_status: capture status counters + * @CAPTURE_IDLE: CFR data is not captured, since VCSR setting for CFR/RCC is + * not enabled. + * @CAPTURE_BUSY: CFR data is not available, since previous channel + * upload is in progress + * @CAPTURE_ACTIVE: CFR data is captured in HW registers + * @CAPTURE_NO_BUFFER: CFR data is not captured, since no buffer is available + * in IPC ring to DMA CFR data + */ +enum chan_capture_status { + CAPTURE_IDLE = 0, + CAPTURE_BUSY, + CAPTURE_ACTIVE, + CAPTURE_NO_BUFFER, + CAPTURE_MAX, +}; + +/* struct cdp_cfr_rcc_stats - CFR RCC debug statistics + * @bb_captured_channel_cnt: No. of PPDUs for which MAC sent Freeze TLV to PHY + * @bb_captured_timeout_cnt: No. of PPDUs for which CFR filter criteria matched + * but MAC did not send Freeze TLV to PHY as time exceeded freeze tlv delay + * count threshold + * @rx_loc_info_valid_cnt: No. of PPDUs for which PHY could find a valid buffer + * in ucode IPC ring + * @chan_capture_status[]: capture status counters + * [0] - No. of PPDUs with capture status CAPTURE_IDLE + * [1] - No. of PPDUs with capture status CAPTURE_BUSY + * [2] - No. of PPDUs with capture status CAPTURE_ACTIVE + * [3] - No. of PPDUs with capture status CAPTURE_NO_BUFFER + * @reason_cnt[]: capture reason counters + * [0] - No. PPDUs filtered due to freeze_reason_TM + * [1] - No. PPDUs filtered due to freeze_reason_FTM + * [2] - No. PPDUs filtered due to freeze_reason_ACK_resp_to_TM_FTM + * [3] - No. PPDUs filtered due to freeze_reason_TA_RA_TYPE_FILTER + * [4] - No. PPDUs filtered due to freeze_reason_NDPA_NDP + * [5] - No. PPDUs filtered due to freeze_reason_ALL_PACKET + */ +struct cdp_cfr_rcc_stats { + uint64_t bb_captured_channel_cnt; + uint64_t bb_captured_timeout_cnt; + uint64_t rx_loc_info_valid_cnt; + uint64_t chan_capture_status[CAPTURE_MAX]; + uint64_t reason_cnt[FREEZE_REASON_MAX]; +}; +#else +struct cdp_cfr_rcc_stats { +}; +#endif +/* struct cdp_pdev_stats - pdev stats + * @msdu_not_done: packets dropped because msdu done bit not set + * @mec:Multicast Echo check + * @mesh_filter: Mesh Filtered packets + * @mon_rx_drop: packets dropped on monitor vap + * @wifi_parse: rxdma errors due to wifi parse error + * @mon_radiotap_update_err: not enough space to update radiotap + * @pkts: total packets replenished + * @rxdma_err: rxdma errors for replenished + * @nbuf_alloc_fail: nbuf alloc failed + * @map_err: Mapping failure + * @x86_fail: x86 failures + * @low_thresh_intrs: low threshold interrupts + * @rx_raw_pkts: Rx Raw Packets + * @mesh_mem_alloc: Mesh Rx Stats Alloc fail + * @tso_desc_cnt: TSO descriptors + * @sg_desc_cnt: SG Descriptors + * @vlan_tag_stp_cnt: Vlan tagged Stp packets in wifi parse error + * @desc_alloc_fail: desc alloc failed errors + * @ip_csum_err: ip checksum errors + * @tcp_udp_csum_err: tcp/udp checksum errors + * @buf_freelist: buffers added back in freelist + * @tx_i: Tx Ingress stats + * @tx:CDP Tx Stats + * @rx: CDP Rx Stats + * @tx_comp_histogram: Number of Tx completions per interrupt + * @rx_ind_histogram: Number of Rx ring descriptors reaped per interrupt + * @ppdu_stats_counter: ppdu stats counter + * @cdp_delayed_ba_not_recev: counter for delayed ba not received + * @htt_tx_pdev_stats: htt pdev stats for tx + * @htt_rx_pdev_stats: htt pdev stats for rx + * @data_rx_ru_size: UL ofdma data ru size counter array + * @nondata_rx_ru_size: UL ofdma non data ru size counter array + * @data_rx_ppdu: data rx ppdu counter + * @data_user: data user counter array + */ +struct cdp_pdev_stats { + struct { + uint32_t msdu_not_done; + uint32_t mec; + uint32_t mesh_filter; + uint32_t wifi_parse; + /* Monitor mode related */ + uint32_t mon_rx_drop; + uint32_t mon_radiotap_update_err; + } dropped; + + struct { + struct cdp_pkt_info pkts; + uint32_t rxdma_err; + uint32_t nbuf_alloc_fail; + uint32_t map_err; + uint32_t x86_fail; + uint32_t low_thresh_intrs; + } replenish; + + uint32_t rx_raw_pkts; + uint32_t mesh_mem_alloc; + uint32_t tso_desc_cnt; + uint32_t sg_desc_cnt; + uint32_t vlan_tag_stp_cnt; + + /* Rx errors */ + struct { + uint32_t desc_alloc_fail; + uint32_t ip_csum_err; + uint32_t tcp_udp_csum_err; + uint32_t rxdma_error; + uint32_t reo_error; + } err; + + uint32_t buf_freelist; + struct cdp_tx_ingress_stats tx_i; + struct cdp_tx_stats tx; + struct cdp_rx_stats rx; + struct cdp_hist_tx_comp tx_comp_histogram; + struct cdp_hist_rx_ind rx_ind_histogram; + uint64_t ppdu_stats_counter[CDP_PPDU_STATS_MAX_TAG]; + uint32_t cdp_delayed_ba_not_recev; + + struct cdp_htt_tx_pdev_stats htt_tx_pdev_stats; + struct cdp_htt_rx_pdev_stats htt_rx_pdev_stats; + + /* Received wdi messages from fw */ + uint32_t wdi_event[CDP_WDI_NUM_EVENTS]; + struct cdp_tid_stats tid_stats; + + /* numbers of data/nondata per RU sizes */ + struct { + uint32_t data_rx_ru_size[OFDMA_NUM_RU_SIZE]; + uint32_t nondata_rx_ru_size[OFDMA_NUM_RU_SIZE]; + uint32_t data_rx_ppdu; + uint32_t data_users[OFDMA_NUM_USERS]; + } ul_ofdma; + + struct cdp_tso_stats tso_stats; + struct cdp_cfr_rcc_stats rcc; + uint32_t peer_unauth_rx_pkt_drop; +}; + +#ifdef QCA_ENH_V3_STATS_SUPPORT +/* + * Enumeration of PDEV Configuration parameter + */ +enum _ol_ath_param_t { + OL_ATH_PARAM_TXCHAINMASK = 1, + OL_ATH_PARAM_RXCHAINMASK = 2, + OL_ATH_PARAM_AMPDU = 6, + OL_ATH_PARAM_AMPDU_LIMIT = 7, + OL_ATH_PARAM_AMPDU_SUBFRAMES = 8, + OL_ATH_PARAM_TXPOWER_LIMIT2G = 12, + OL_ATH_PARAM_TXPOWER_LIMIT5G = 13, + OL_ATH_PARAM_LDPC = 32, + OL_ATH_PARAM_VOW_EXT_STATS = 45, + OL_ATH_PARAM_DYN_TX_CHAINMASK = 73, + OL_ATH_PARAM_BURST_ENABLE = 77, + OL_ATH_PARAM_BURST_DUR = 78, + OL_ATH_PARAM_BCN_BURST = 80, + OL_ATH_PARAM_DCS = 82, +#if UMAC_SUPPORT_PERIODIC_PERFSTATS + OL_ATH_PARAM_PRDPERFSTAT_THRPUT_ENAB = 83, + OL_ATH_PARAM_PRDPERFSTAT_THRPUT_WIN = 84, + OL_ATH_PARAM_PRDPERFSTAT_THRPUT = 85, + OL_ATH_PARAM_PRDPERFSTAT_PER_ENAB = 86, + OL_ATH_PARAM_PRDPERFSTAT_PER_WIN = 87, + OL_ATH_PARAM_PRDPERFSTAT_PER = 88, +#endif + /* UMAC_SUPPORT_PERIODIC_PERFSTATS */ + OL_ATH_PARAM_TOTAL_PER = 89, + /* set manual rate for rts frame */ + OL_ATH_PARAM_RTS_CTS_RATE = 92, + /* co channel interference threshold level */ + OL_ATH_PARAM_DCS_COCH_THR = 93, + /* transmit error threshold */ + OL_ATH_PARAM_DCS_TXERR_THR = 94, + /* phy error threshold */ + OL_ATH_PARAM_DCS_PHYERR_THR = 95, + /* + * The IOCTL number is 114, it is made 114, inorder to make the IOCTL + * number same as Direct-attach IOCTL. + * Please, don't change number. This IOCTL gets the Interface code path + * it should be either DIRECT-ATTACH or OFF-LOAD. + */ + OL_ATH_PARAM_GET_IF_ID = 114, + /* Enable Acs back Ground Channel selection Scan timer in AP mode*/ + OL_ATH_PARAM_ACS_ENABLE_BK_SCANTIMEREN = 118, + /* ACS scan timer value in Seconds */ + OL_ATH_PARAM_ACS_SCANTIME = 119, + /* Negligence Delta RSSI between two channel */ + OL_ATH_PARAM_ACS_RSSIVAR = 120, + /* Negligence Delta Channel load between two channel*/ + OL_ATH_PARAM_ACS_CHLOADVAR = 121, + /* Enable Limited OBSS check */ + OL_ATH_PARAM_ACS_LIMITEDOBSS = 122, + /* Acs control flag for Scan timer */ + OL_ATH_PARAM_ACS_CTRLFLAG = 123, + /* Acs Run time Debug level*/ + OL_ATH_PARAM_ACS_DEBUGTRACE = 124, + OL_ATH_PARAM_SET_FW_HANG_ID = 137, + /* Radio type 1:11ac 0:11abgn */ + OL_ATH_PARAM_RADIO_TYPE = 138, + OL_ATH_PARAM_IGMPMLD_OVERRIDE, /* IGMP/MLD packet override */ + OL_ATH_PARAM_IGMPMLD_TID, /* IGMP/MLD packet TID no */ + OL_ATH_PARAM_ARPDHCP_AC_OVERRIDE, + OL_ATH_PARAM_NON_AGG_SW_RETRY_TH, + OL_ATH_PARAM_AGG_SW_RETRY_TH, + /* Dont change this number it as per sync with DA + Blocking certian channel from ic channel list */ + OL_ATH_PARAM_DISABLE_DFS = 144, + OL_ATH_PARAM_ENABLE_AMSDU = 145, + OL_ATH_PARAM_ENABLE_AMPDU = 146, + OL_ATH_PARAM_STA_KICKOUT_TH, + OL_ATH_PARAM_WLAN_PROF_ENABLE, + OL_ATH_PARAM_LTR_ENABLE, + OL_ATH_PARAM_LTR_AC_LATENCY_BE = 150, + OL_ATH_PARAM_LTR_AC_LATENCY_BK, + OL_ATH_PARAM_LTR_AC_LATENCY_VI, + OL_ATH_PARAM_LTR_AC_LATENCY_VO, + OL_ATH_PARAM_LTR_AC_LATENCY_TIMEOUT, + OL_ATH_PARAM_LTR_TX_ACTIVITY_TIMEOUT = 155, + OL_ATH_PARAM_LTR_SLEEP_OVERRIDE, + OL_ATH_PARAM_LTR_RX_OVERRIDE, + OL_ATH_PARAM_L1SS_ENABLE, + OL_ATH_PARAM_DSLEEP_ENABLE, + /* radar error threshold */ + OL_ATH_PARAM_DCS_RADAR_ERR_THR = 160, + /* Tx channel utilization due to AP's tx and rx */ + OL_ATH_PARAM_DCS_USERMAX_CU_THR, + /* interference detection threshold */ + OL_ATH_PARAM_DCS_INTR_DETECT_THR, + /* sampling window, default 10secs */ + OL_ATH_PARAM_DCS_SAMPLE_WINDOW, + /* debug logs enable/disable */ + OL_ATH_PARAM_DCS_DEBUG, + OL_ATH_PARAM_ANI_ENABLE = 165, + OL_ATH_PARAM_ANI_POLL_PERIOD, + OL_ATH_PARAM_ANI_LISTEN_PERIOD, + OL_ATH_PARAM_ANI_OFDM_LEVEL, + OL_ATH_PARAM_ANI_CCK_LEVEL, + OL_ATH_PARAM_DSCP_TID_MAP = 170, + OL_ATH_PARAM_TXPOWER_SCALE, + /* Phy error penalty */ + OL_ATH_PARAM_DCS_PHYERR_PENALTY, +#if ATH_SUPPORT_DSCP_OVERRIDE + /* set/get TID for sending HMMC packets */ + OL_ATH_PARAM_HMMC_DSCP_TID_MAP, + /* set/get DSCP mapping override */ + OL_ATH_PARAM_DSCP_OVERRIDE, + /* set/get HMMC-DSCP mapping override */ + OL_ATH_PARAM_HMMC_DSCP_OVERRIDE = 175, +#endif +#if ATH_RX_LOOPLIMIT_TIMER + OL_ATH_PARAM_LOOPLIMIT_NUM, +#endif + OL_ATH_PARAM_ANTENNA_GAIN_2G, + OL_ATH_PARAM_ANTENNA_GAIN_5G, + OL_ATH_PARAM_RX_FILTER, +#if ATH_SUPPORT_HYFI_ENHANCEMENTS + OL_ATH_PARAM_BUFF_THRESH = 180, + OL_ATH_PARAM_BLK_REPORT_FLOOD, + OL_ATH_PARAM_DROP_STA_QUERY, +#endif + OL_ATH_PARAM_QBOOST, + OL_ATH_PARAM_SIFS_FRMTYPE, + OL_ATH_PARAM_SIFS_UAPSD = 185, + OL_ATH_PARAM_FW_RECOVERY_ID, + OL_ATH_PARAM_RESET_OL_STATS, + OL_ATH_PARAM_AGGR_BURST, + /* Number of deauth sent in consecutive rx_peer_invalid */ + OL_ATH_PARAM_DEAUTH_COUNT, + OL_ATH_PARAM_BLOCK_INTERBSS = 190, + /* Firmware reset control for Bmiss / timeout / reset */ + OL_ATH_PARAM_FW_DISABLE_RESET, + OL_ATH_PARAM_MSDU_TTL, + OL_ATH_PARAM_PPDU_DURATION, + OL_ATH_PARAM_SET_TXBF_SND_PERIOD, + OL_ATH_PARAM_ALLOW_PROMISC = 195, + OL_ATH_PARAM_BURST_MODE, + OL_ATH_PARAM_DYN_GROUPING, + OL_ATH_PARAM_DPD_ENABLE, + OL_ATH_PARAM_DBGLOG_RATELIM, + /* firmware should intimate us about ps state change for node */ + OL_ATH_PARAM_PS_STATE_CHANGE = 200, + OL_ATH_PARAM_MCAST_BCAST_ECHO, + /* OBSS RSSI threshold for 20/40 coexistence */ + OL_ATH_PARAM_OBSS_RSSI_THRESHOLD, + /* Link/node RX RSSI threshold for 20/40 coexistence */ + OL_ATH_PARAM_OBSS_RX_RSSI_THRESHOLD, +#if ATH_CHANNEL_BLOCKING + OL_ATH_PARAM_ACS_BLOCK_MODE = 205, +#endif + OL_ATH_PARAM_ACS_TX_POWER_OPTION, + /* + * Default Antenna Polarization MSB 8 bits (24:31) specifying + * enable/disable ; LSB 24 bits (0:23) antenna mask value + */ + OL_ATH_PARAM_ANT_POLARIZATION, + /* rate limit mute type error prints */ + OL_ATH_PARAM_PRINT_RATE_LIMIT, + OL_ATH_PARAM_PDEV_RESET, /* Reset FW PDEV*/ + /* Do not crash host when target assert happened*/ + OL_ATH_PARAM_FW_DUMP_NO_HOST_CRASH = 210, + /* Consider OBSS non-erp to change to long slot*/ + OL_ATH_PARAM_CONSIDER_OBSS_NON_ERP_LONG_SLOT = 211, + OL_ATH_PARAM_STATS_FC, + OL_ATH_PARAM_QFLUSHINTERVAL, + OL_ATH_PARAM_TOTAL_Q_SIZE, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE0, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE1, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE2, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE3, + OL_ATH_PARAM_MIN_THRESHOLD, + OL_ATH_PARAM_MAX_Q_LIMIT, + OL_ATH_PARAM_MIN_Q_LIMIT, + OL_ATH_PARAM_CONG_CTRL_TIMER_INTV, + OL_ATH_PARAM_STATS_TIMER_INTV, + OL_ATH_PARAM_ROTTING_TIMER_INTV, + OL_ATH_PARAM_LATENCY_PROFILE, + OL_ATH_PARAM_HOSTQ_DUMP, + OL_ATH_PARAM_TIDQ_MAP, + OL_ATH_PARAM_DBG_ARP_SRC_ADDR, /* ARP DEBUG source address*/ + OL_ATH_PARAM_DBG_ARP_DST_ADDR, /* ARP DEBUG destination address*/ + OL_ATH_PARAM_ARP_DBG_CONF, /* ARP debug configuration */ + OL_ATH_PARAM_DISABLE_STA_VAP_AMSDU, /* Disable AMSDU for station vap */ +#if ATH_SUPPORT_DFS && ATH_SUPPORT_STA_DFS + OL_ATH_PARAM_STADFS_ENABLE = 300, /* STA DFS is enabled or not */ +#endif +#if QCA_AIRTIME_FAIRNESS + OL_ATH_PARAM_ATF_STRICT_SCHED = 301, + OL_ATH_PARAM_ATF_GROUP_POLICY = 302, +#endif +#if DBDC_REPEATER_SUPPORT + OL_ATH_PARAM_PRIMARY_RADIO, + OL_ATH_PARAM_DBDC_ENABLE, +#endif + OL_ATH_PARAM_TXPOWER_DBSCALE, + OL_ATH_PARAM_CTL_POWER_SCALE, +#if QCA_AIRTIME_FAIRNESS + OL_ATH_PARAM_ATF_OBSS_SCHED = 307, + OL_ATH_PARAM_ATF_OBSS_SCALE = 308, +#endif + OL_ATH_PARAM_PHY_OFDM_ERR = 309, + OL_ATH_PARAM_PHY_CCK_ERR = 310, + OL_ATH_PARAM_FCS_ERR = 311, + OL_ATH_PARAM_CHAN_UTIL = 312, +#if DBDC_REPEATER_SUPPORT + OL_ATH_PARAM_CLIENT_MCAST, +#endif + OL_ATH_PARAM_EMIWAR_80P80 = 314, + OL_ATH_PARAM_BATCHMODE = 315, + OL_ATH_PARAM_PACK_AGGR_DELAY = 316, +#if UMAC_SUPPORT_ACFG + OL_ATH_PARAM_DIAG_ENABLE = 317, +#endif +#if ATH_SUPPORT_VAP_QOS + OL_ATH_PARAM_VAP_QOS = 318, +#endif + OL_ATH_PARAM_CHAN_STATS_TH = 319, + /* Passive scan is enabled or disabled */ + OL_ATH_PARAM_PASSIVE_SCAN_ENABLE = 320, + OL_ATH_MIN_RSSI_ENABLE = 321, + OL_ATH_MIN_RSSI = 322, + OL_ATH_PARAM_ACS_2G_ALLCHAN = 323, +#if DBDC_REPEATER_SUPPORT + OL_ATH_PARAM_DELAY_STAVAP_UP = 324, +#endif + /* It is used to set the channel switch options */ + OL_ATH_PARAM_CHANSWITCH_OPTIONS = 327, + OL_ATH_BTCOEX_ENABLE = 328, + OL_ATH_BTCOEX_WL_PRIORITY = 329, + OL_ATH_PARAM_TID_OVERRIDE_QUEUE_MAPPING = 330, + OL_ATH_PARAM_CAL_VER_CHECK = 331, + OL_ATH_PARAM_NO_VLAN = 332, + OL_ATH_PARAM_CCA_THRESHOLD = 333, + OL_ATH_PARAM_ATF_LOGGING = 334, + OL_ATH_PARAM_STRICT_DOTH = 335, + OL_ATH_PARAM_DISCONNECTION_TIMEOUT = 336, + OL_ATH_PARAM_RECONFIGURATION_TIMEOUT = 337, + OL_ATH_PARAM_CHANNEL_SWITCH_COUNT = 338, + OL_ATH_PARAM_ALWAYS_PRIMARY = 339, + OL_ATH_PARAM_FAST_LANE = 340, + OL_ATH_GET_BTCOEX_DUTY_CYCLE = 341, + OL_ATH_PARAM_SECONDARY_OFFSET_IE = 342, + OL_ATH_PARAM_WIDE_BAND_SUB_ELEMENT = 343, + OL_ATH_PARAM_PREFERRED_UPLINK = 344, + OL_ATH_PARAM_PRECAC_ENABLE = 345, + OL_ATH_PARAM_PRECAC_TIMEOUT = 346, + OL_ATH_COEX_VER_CFG = 347, + OL_ATH_PARAM_DUMP_TARGET = 348, + OL_ATH_PARAM_PDEV_TO_REO_DEST = 349, + OL_ATH_PARAM_DUMP_CHAINMASK_TABLES = 350, + OL_ATH_PARAM_DUMP_OBJECTS = 351, + OL_ATH_PARAM_ACS_SRLOADVAR = 352, + OL_ATH_PARAM_MGMT_RSSI_THRESHOLD = 353, + OL_ATH_PARAM_EXT_NSS_CAPABLE = 354, + OL_ATH_PARAM_MGMT_PDEV_STATS_TIMER = 355, + OL_ATH_PARAM_TXACKTIMEOUT = 356, + OL_ATH_PARAM_ICM_ACTIVE = 357, + OL_ATH_PARAM_NOMINAL_NOISEFLOOR = 358, + OL_ATH_PARAM_CHAN_INFO = 359, + OL_ATH_PARAM_ACS_RANK = 360, + OL_ATH_PARAM_TXCHAINSOFT = 361, + OL_ATH_PARAM_WIDE_BAND_SCAN = 362, + OL_ATH_PARAM_CCK_TX_ENABLE = 363, + OL_ATH_PARAM_PAPI_ENABLE = 364, + OL_ATH_PARAM_ISOLATION = 365, + OL_ATH_PARAM_MAX_CLIENTS_PER_RADIO = 366, +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + OL_ATH_PARAM_DFS_HOST_WAIT_TIMEOUT = 367, +#endif + OL_ATH_PARAM_NF_THRESH = 368, +#ifdef OL_ATH_SMART_LOGGING + OL_ATH_PARAM_SMARTLOG_ENABLE = 369, + OL_ATH_PARAM_SMARTLOG_FATAL_EVENT = 370, + OL_ATH_PARAM_SMARTLOG_SKB_SZ = 371, + OL_ATH_PARAM_SMARTLOG_P1PINGFAIL = 372, +#endif /* OL_ATH_SMART_LOGGING */ +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT + OL_ATH_PARAM_PRECAC_INTER_CHANNEL = 373, + OL_ATH_PARAM_PRECAC_CHAN_STATE = 374, +#endif + OL_ATH_PARAM_DBR_RING_STATUS = 375, +#ifdef QCN_ESP_IE + OL_ATH_PARAM_ESP_PERIODICITY = 376, + OL_ATH_PARAM_ESP_AIRTIME = 377, + OL_ATH_PARAM_ESP_PPDU_DURATION = 378, + OL_ATH_PARAM_ESP_BA_WINDOW = 379, +#endif /* QCN_ESP_IE */ + + OL_ATH_PARAM_CBS = 380, + OL_ATH_PARAM_DCS_SIM = 381, + OL_ATH_PARAM_CBS_DWELL_SPLIT_TIME = 382, + OL_ATH_PARAM_CBS_DWELL_REST_TIME = 383, + OL_ATH_PARAM_CBS_WAIT_TIME = 384, + OL_ATH_PARAM_CBS_REST_TIME = 385, + OL_ATH_PARAM_CBS_CSA = 386, + OL_ATH_PARAM_TWICE_ANTENNA_GAIN = 387, + OL_ATH_PARAM_ACTIVITY_FACTOR = 388, + OL_ATH_PARAM_CHAN_AP_RX_UTIL = 389, + OL_ATH_PARAM_CHAN_FREE = 390, + OL_ATH_PARAM_CHAN_AP_TX_UTIL = 391, + OL_ATH_PARAM_CHAN_OBSS_RX_UTIL = 392, + OL_ATH_PARAM_CHAN_NON_WIFI = 393, +#if PEER_FLOW_CONTROL + OL_ATH_PARAM_VIDEO_STATS_FC = 394, + OL_ATH_PARAM_VIDEO_DELAY_STATS_FC = 395, +#endif + OL_ATH_PARAM_ENABLE_PEER_RETRY_STATS = 396, + OL_ATH_PARAM_HE_UL_TRIG_INT = 397, + OL_ATH_PARAM_DFS_NOL_SUBCHANNEL_MARKING = 398, + /* + * Get the band that is tuned for low, high, + * full band freq range or it's 2g + */ + OL_ATH_PARAM_BAND_INFO = 399, + OL_ATH_PARAM_BW_REDUCE = 400, + /* Enable/disable Spatial Reuse */ + OL_ATH_PARAM_HE_SR = 401, + OL_ATH_PARAM_HE_UL_PPDU_DURATION = 402, + OL_ATH_PARAM_HE_UL_RU_ALLOCATION = 403, + OL_ATH_PARAM_PERIODIC_CFR_CAPTURE = 404, + OL_ATH_PARAM_FLUSH_PEER_RATE_STATS = 405, + OL_ATH_PARAM_DCS_RE_ENABLE_TIMER = 406, + /* Enable/disable Rx lite monitor mode */ + OL_ATH_PARAM_RX_MON_LITE = 407, + /* wifi down indication used in MBSS feature */ + OL_ATH_PARAM_WIFI_DOWN_IND = 408, + OL_ATH_PARAM_TX_CAPTURE = 409, + /* Enable fw dump collectin if wmi disconnects */ + OL_ATH_PARAM_WMI_DIS_DUMP = 410, + OL_ATH_PARAM_ACS_CHAN_GRADE_ALGO = 411, + OL_ATH_PARAM_ACS_CHAN_EFFICIENCY_VAR = 412, + OL_ATH_PARAM_ACS_NEAR_RANGE_WEIGHTAGE = 413, + OL_ATH_PARAM_ACS_MID_RANGE_WEIGHTAGE = 414, + OL_ATH_PARAM_ACS_FAR_RANGE_WEIGHTAGE = 415, + /* Set SELF AP OBSS_PD_THRESHOLD value */ + OL_ATH_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 416, + /* Enable/Disable/Set MGMT_TTL in milliseconds. */ + OL_ATH_PARAM_MGMT_TTL = 417, + /* Enable/Disable/Set PROBE_RESP_TTL in milliseconds */ + OL_ATH_PARAM_PROBE_RESP_TTL = 418, + /* Set global MU PPDU duration for DL (usec units) */ + OL_ATH_PARAM_MU_PPDU_DURATION = 419, + /* Set TBTT_CTRL_CFG */ + OL_ATH_PARAM_TBTT_CTRL = 420, + /* Enable/disable AP OBSS_PD_THRESHOLD */ + OL_ATH_PARAM_SET_CMD_OBSS_PD_THRESHOLD_ENABLE = 421, + /* Get baseline radio level channel width */ + OL_ATH_PARAM_RCHWIDTH = 422, + /* Whether external ACS request is in progress */ + OL_ATH_EXT_ACS_REQUEST_IN_PROGRESS = 423, + /* set/get hw mode */ + OL_ATH_PARAM_HW_MODE = 424, +#if DBDC_REPEATER_SUPPORT + /* same ssid feature global disable */ + OL_ATH_PARAM_SAME_SSID_DISABLE = 425, +#endif + /* get MBSS enable flag */ + OL_ATH_PARAM_MBSS_EN = 426, + /* UNII-1 and UNII-2A channel coexistance */ + OL_ATH_PARAM_CHAN_COEX = 427, + /* Out of Band Advertisement feature */ + OL_ATH_PARAM_OOB_ENABLE = 428, + /* set/get opmode-notification timer for hw-mode switch */ + OL_ATH_PARAM_HW_MODE_SWITCH_OMN_TIMER = 429, + /* enable opmode-notification when doing hw-mode switch */ + OL_ATH_PARAM_HW_MODE_SWITCH_OMN_ENABLE = 430, + /* set primary interface for hw-mode switch */ + OL_ATH_PARAM_HW_MODE_SWITCH_PRIMARY_IF = 431, + /* Number of vdevs configured per PSOC */ + OL_ATH_PARAM_GET_PSOC_NUM_VDEVS = 432, + /* Number of peers configured per PSOC */ + OL_ATH_PARAM_GET_PSOC_NUM_PEERS = 433, + /* Number of vdevs configured per PDEV */ + OL_ATH_PARAM_GET_PDEV_NUM_VDEVS = 434, + /* Number of peers configured per PDEV */ + OL_ATH_PARAM_GET_PDEV_NUM_PEERS = 435, + /* Number of monitor vdevs configured per PDEV */ + OL_ATH_PARAM_GET_PDEV_NUM_MONITOR_VDEVS = 436, +#ifdef CE_TASKLET_DEBUG_ENABLE + /* Enable/disable CE stats print */ + OL_ATH_PARAM_ENABLE_CE_LATENCY_STATS = 437, +#endif +}; +#endif +/* Bitmasks for stats that can block */ +#define EXT_TXRX_FW_STATS 0x0001 +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_delay.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_delay.h new file mode 100644 index 0000000000000000000000000000000000000000..044052dd7d14ad3471991b5d69b754a2c271c3ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_delay.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_tx_delay.h + * @brief Define the host data path histogram API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_COMPUTE_TX_DELAY_H_ +#define _CDP_TXRX_COMPUTE_TX_DELAY_H_ +#include "cdp_txrx_handle.h" +/** + * cdp_tx_delay() - get tx packet delay + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @queue_delay_microsec: tx packet delay within queue, usec + * @tx_delay_microsec: tx packet delay, usec + * @category: packet category + * + * Return: NONE + */ +static inline void +cdp_tx_delay(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint32_t *queue_delay_microsec, uint32_t *tx_delay_microsec, + int category) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_delay) + return soc->ops->delay_ops->tx_delay(soc, pdev_id, + queue_delay_microsec, tx_delay_microsec, category); + return; +} + +/** + * cdp_tx_delay_hist() - get tx packet delay histogram + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @bin_values: bin + * @category: packet category + * + * Return: NONE + */ +static inline void +cdp_tx_delay_hist(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint16_t *bin_values, int category) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_delay_hist) + return soc->ops->delay_ops->tx_delay_hist(soc, pdev_id, + bin_values, category); + return; +} + +/** + * cdp_tx_packet_count() - get tx packet count + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @out_packet_loss_count: packet loss count + * @category: packet category + * + * Return: NONE + */ +static inline void +cdp_tx_packet_count(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint16_t *out_packet_count, uint16_t *out_packet_loss_count, + int category) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_packet_count) + return soc->ops->delay_ops->tx_packet_count(soc, pdev_id, + out_packet_count, out_packet_loss_count, category); + return; +} + +/** + * cdp_tx_set_compute_interval() - set tx packet stat compute interval + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @interval: compute interval + * + * Return: NONE + */ +static inline void +cdp_tx_set_compute_interval(ol_txrx_soc_handle soc, uint8_t pdev_id, + uint32_t interval) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_set_compute_interval) + return soc->ops->delay_ops->tx_set_compute_interval(soc, + pdev_id, + interval); + return; +} +#endif /* _CDP_TXRX_COMPUTE_TX_DELAY_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_throttle.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_throttle.h new file mode 100644 index 0000000000000000000000000000000000000000..a17d1237bc06b23043e4c004096959c40b7138e9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_throttle.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_tx_throttle.h + * @brief Define the host data path transmit throttle API + * functions called by the host control SW and the OS interface + * module + */ +#ifndef _CDP_TXRX_TX_THROTTLE_H_ +#define _CDP_TXRX_TX_THROTTLE_H_ +#include +#include "cdp_txrx_handle.h" +/** + * cdp_throttle_init_period() - init tx throttle period + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @period: throttle period + * @dutycycle_level: duty cycle level + * + * Return: NONE + */ +static inline void +cdp_throttle_init_period(ol_txrx_soc_handle soc, uint8_t pdev_id, + int period, uint8_t *dutycycle_level) +{ + if (!soc || !soc->ops || !soc->ops->throttle_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->throttle_ops->throttle_init_period) + return soc->ops->throttle_ops->throttle_init_period( + soc, pdev_id, period, dutycycle_level); + return; +} + +/** + * cdp_throttle_init_period() - init tx throttle period + * @soc: data path soc handle + * @pdev_id: id of data path pdev handle + * @level: throttle level + * + * Return: NONE + */ +static inline void +cdp_throttle_set_level(ol_txrx_soc_handle soc, uint8_t pdev_id, int level) +{ + if (!soc || !soc->ops || !soc->ops->throttle_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->throttle_ops->throttle_set_level) + return soc->ops->throttle_ops->throttle_set_level(soc, pdev_id, + level); + return; +} + +#endif /* _CDP_TXRX_TX_THROTTLE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h new file mode 100644 index 0000000000000000000000000000000000000000..24c253f03540c6eb82dfdfecdd789dd438c2f325 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_wds.h + * @brief Define the host data path WDS API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_WDS_H_ +#define _CDP_TXRX_WDS_H_ +#include "cdp_txrx_handle.h" +/** + * @brief set the wds rx filter policy of the device + * @details + * This flag sets the wds rx policy on the vdev. Rx frames not compliant + * with the policy will be dropped. + * + * @param vdev_id - id of the data virtual device object + * @param val - the wds rx policy bitmask + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_set_wds_rx_policy(ol_txrx_soc_handle soc, + uint8_t vdev_id, + u_int32_t val) +{ + if (!soc || !soc->ops || !soc->ops->wds_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->wds_ops->txrx_set_wds_rx_policy) + soc->ops->wds_ops->txrx_set_wds_rx_policy(soc, vdev_id, val); + return QDF_STATUS_SUCCESS; +} + +/** + * @brief set the wds rx filter policy of the device + * @details + * This flag sets the wds rx policy on the vdev. Rx frames not compliant + * with the policy will be dropped. + * + * @param psoc - psoc object + * @param vdev_id - id of the data virtual device object + * @param peer_mac - peer mac address + * @param val - the wds rx policy bitmask + * @return - QDF_STATUS + */ +static inline QDF_STATUS +cdp_set_wds_tx_policy_update(ol_txrx_soc_handle soc, + uint8_t vdev_id, uint8_t *peer_mac, + int wds_tx_ucast, int wds_tx_mcast) +{ + if (!soc || !soc->ops || !soc->ops->wds_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->wds_ops->txrx_wds_peer_tx_policy_update) + soc->ops->wds_ops->txrx_wds_peer_tx_policy_update( + soc, vdev_id, peer_mac, wds_tx_ucast, + wds_tx_mcast); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_vdev_set_wds() - Set/unset wds_enable flag in vdev + * @soc - data path soc handle + * @vdev_id - id of data path vap handle + * @val - value to be set in wds_en flag + * + * This flag enables WDS source port learning feature on a vdev + * + * return 1 on success + */ +static inline int +cdp_vdev_set_wds(ol_txrx_soc_handle soc, uint8_t vdev_id, uint32_t val) +{ + if (soc->ops->wds_ops->vdev_set_wds) + return soc->ops->wds_ops->vdev_set_wds(soc, vdev_id, val); + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_cal_client_api.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_cal_client_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d7c8815bff8f647472996f550f715c2daf04b2be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_cal_client_api.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cal_client_api.h + * @brief: define timer to update DP stats + */ +#ifndef _DP_CAL_CLIENT_H_ +#define _DP_CAL_CLIENT_H_ + +#include +#include +#include +#include + +/*timer will run every 1 sec*/ +#define DP_CAL_CLIENT_TIME 1000 + +struct cal_client { + qdf_timer_t cal_client_timer; + void (*iterate_update_peer_list)(struct cdp_pdev *ctx); + struct cdp_pdev *pdev_hdl; +}; + +void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx, + struct cdp_pdev *pdev, qdf_device_t osdev, + void (*iterate_peer_list)(struct cdp_pdev *)); +void dp_cal_client_detach(struct cdp_cal_client **cal_client_ctx); +void dp_cal_client_timer_start(void *ctx); +void dp_cal_client_timer_stop(void *ctx); +void dp_cal_client_stats_timer_fn(void *pdev_hdl); +void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats); + +#ifndef ATH_SUPPORT_EXT_STAT +void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx, + struct cdp_pdev *pdev, qdf_device_t osdev, + void (*iterate_peer_list)(struct cdp_pdev *)) +{ +} + +void dp_cal_client_detach(struct cdp_cal_client **cal_client_ctx) +{ +} + +void dp_cal_client_timer_start(void *ctx) +{ +} + +void dp_cal_client_timer_stop(void *ctx) +{ +} + +void dp_cal_client_stats_timer_fn(void *pdev_hdl) +{ +} + +void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats) +{ +} +#endif + +#endif /*_DP_CAL_CLIENT_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c new file mode 100644 index 0000000000000000000000000000000000000000..55cd09fa630c6ea7529cbac8421f85d41035f128 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c @@ -0,0 +1,5048 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "dp_peer.h" +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_rx_mon.h" +#include "htt_stats.h" +#include "htt_ppdu_stats.h" +#include "dp_htt.h" +#include "dp_rx.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ +#include "cdp_txrx_cmn_struct.h" + +#ifdef FEATURE_PERPKT_INFO +#include "dp_ratetable.h" +#endif + +#define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE + +#define HTT_HTC_PKT_POOL_INIT_SIZE 64 + +#define HTT_MSG_BUF_SIZE(msg_bytes) \ + ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) + +#define HTT_PID_BIT_MASK 0x3 + +#define DP_EXT_MSG_LENGTH 2048 + +#define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16 + +#define HTT_SHIFT_UPPER_TIMESTAMP 32 +#define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000 + +#define HTT_HTC_PKT_STATUS_SUCCESS \ + ((pkt->htc_pkt.Status != QDF_STATUS_E_CANCELED) && \ + (pkt->htc_pkt.Status != QDF_STATUS_E_RESOURCES)) + +/* + * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv + * bitmap for sniffer mode + * @bitmap: received bitmap + * + * Return: expected bitmap value, returns zero if doesn't match with + * either 64-bit Tx window or 256-bit window tlv bitmap + */ +int +dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) +{ + if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) + return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; + else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) + return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; + + return 0; +} + +#ifdef FEATURE_PERPKT_INFO +/* + * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + * + * on Tx data frame, we may get delayed ba set + * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we + * request Block Ack Request(BAR). Successful msdu is received only after Block + * Ack. To populate peer stats we need successful msdu(data frame). + * So we hold the Tx data stats on delayed_ba for stats update. + */ +static inline void +dp_peer_copy_delay_stats(struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu) +{ + struct dp_pdev *pdev; + struct dp_vdev *vdev; + + if (peer->last_delayed_ba) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "BA not yet recv for prev delayed ppdu[%d]\n", + peer->last_delayed_ba_ppduid); + vdev = peer->vdev; + if (vdev) { + pdev = vdev->pdev; + pdev->stats.cdp_delayed_ba_not_recev++; + } + } + + peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; + peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; + peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; + peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; + peer->delayed_ba_ppdu_stats.bw = ppdu->bw; + peer->delayed_ba_ppdu_stats.nss = ppdu->nss; + peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble; + peer->delayed_ba_ppdu_stats.gi = ppdu->gi; + peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; + peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; + peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; + peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast; + peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast; + peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; + peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; + peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; + + peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; + peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; + peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; + + peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; + peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; + + peer->last_delayed_ba = true; +} + +/* + * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + * + * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info + * from Tx BAR frame not required to populate peer stats. + * But we need successful MPDU and MSDU to update previous + * transmitted Tx data frame. Overwrite ppdu stats with the previous + * stored ppdu stats. + */ +static void +dp_peer_copy_stats_to_bar(struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu) +{ + ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size; + ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc; + ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re; + ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf; + ppdu->bw = peer->delayed_ba_ppdu_stats.bw; + ppdu->nss = peer->delayed_ba_ppdu_stats.nss; + ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble; + ppdu->gi = peer->delayed_ba_ppdu_stats.gi; + ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; + ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc; + ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; + ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; + ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; + ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl; + ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl; + ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; + + ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start; + ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones; + ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast; + + ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos; + ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id; + + peer->last_delayed_ba = false; +} + +/* + * dp_tx_rate_stats_update() - Update rate per-peer statistics + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + */ +static void +dp_tx_rate_stats_update(struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu) +{ + uint32_t ratekbps = 0; + uint64_t ppdu_tx_rate = 0; + uint32_t rix; + uint16_t ratecode = 0; + + if (!peer || !ppdu) + return; + + ratekbps = dp_getrateindex(ppdu->gi, + ppdu->mcs, + ppdu->nss, + ppdu->preamble, + ppdu->bw, + &rix, + &ratecode); + + DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps); + + if (!ratekbps) + return; + + /* Calculate goodput in non-training period + * In training period, don't do anything as + * pending pkt is send as goodput. + */ + if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { + ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * + (CDP_PERCENT_MACRO - ppdu->current_rate_per)); + } + ppdu->rix = rix; + ppdu->tx_ratekbps = ratekbps; + ppdu->tx_ratecode = ratecode; + peer->stats.tx.avg_tx_rate = + dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps); + ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate); + DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); + + if (peer->vdev) { + /* + * In STA mode: + * We get ucast stats as BSS peer stats. + * + * In AP mode: + * We get mcast stats as BSS peer stats. + * We get ucast stats as assoc peer stats. + */ + if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { + peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; + peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; + } else { + peer->vdev->stats.tx.last_tx_rate = ratekbps; + peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; + } + } +} + +/* + * dp_tx_stats_update() - Update per-peer statistics + * @pdev: Datapath pdev handle + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * @ack_rssi: RSSI of last ack received + * + * Return: None + */ +static void +dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu, + uint32_t ack_rssi) +{ + uint8_t preamble, mcs; + uint16_t num_msdu; + uint16_t num_mpdu; + uint16_t mpdu_tried; + uint16_t mpdu_failed; + + preamble = ppdu->preamble; + mcs = ppdu->mcs; + num_msdu = ppdu->num_msdu; + num_mpdu = ppdu->mpdu_success; + mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; + mpdu_failed = mpdu_tried - num_mpdu; + + /* If the peer statistics are already processed as part of + * per-MSDU completion handler, do not process these again in per-PPDU + * indications */ + if (pdev->soc->process_tx_status) + return; + + if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { + /* + * All failed mpdu will be retried, so incrementing + * retries mpdu based on mpdu failed. Even for + * ack failure i.e for long retries we get + * mpdu failed equal mpdu tried. + */ + DP_STATS_INC(peer, tx.retries, mpdu_failed); + DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); + return; + } + + if (ppdu->is_ppdu_cookie_valid) + DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1); + + if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && + ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { + if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "mu_group_id out of bound!!\n"); + else + DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id], + (ppdu->user_pos + 1)); + } + + if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || + ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { + DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones); + DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start); + switch (ppdu->ru_tones) { + case RU_26: + DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_52: + DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_106: + DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_242: + DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_484: + DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_996: + DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried, + mpdu_tried); + break; + } + } + + /* + * All failed mpdu will be retried, so incrementing + * retries mpdu based on mpdu failed. Even for + * ack failure i.e for long retries we get + * mpdu failed equal mpdu tried. + */ + DP_STATS_INC(peer, tx.retries, mpdu_failed); + DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); + + DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, + mpdu_tried); + + DP_STATS_INC_PKT(peer, tx.comp_pkt, + num_msdu, (ppdu->success_bytes + + ppdu->retry_bytes + ppdu->failed_bytes)); + DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate); + DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu); + DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu); + DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu); + if (ppdu->tid < CDP_DATA_TID_MAX) + DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], + num_msdu); + DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc); + DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc); + if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) + DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi); + + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu); + DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu)); + + dp_peer_stats_notify(pdev, peer); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, + &peer->stats, ppdu->peer_id, + UPDATE_PEER_STATS, pdev->pdev_id); +#endif +} +#endif + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_tx_capture.h" +#else +static inline void +dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev, + void *data, + uint32_t ppdu_id, + uint32_t size) +{ +} +#endif + +/* + * htt_htc_pkt_alloc() - Allocate HTC packet buffer + * @htt_soc: HTT SOC handle + * + * Return: Pointer to htc packet buffer + */ +static struct dp_htt_htc_pkt * +htt_htc_pkt_alloc(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt_union *pkt = NULL; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + if (soc->htt_htc_pkt_freelist) { + pkt = soc->htt_htc_pkt_freelist; + soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; + } + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); + + if (!pkt) + pkt = qdf_mem_malloc(sizeof(*pkt)); + return &pkt->u.pkt; /* not actually a dereference */ +} + +/* + * htt_htc_pkt_free() - Free HTC packet buffer + * @htt_soc: HTT SOC handle + */ +static void +htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) +{ + struct dp_htt_htc_pkt_union *u_pkt = + (struct dp_htt_htc_pkt_union *)pkt; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + u_pkt->u.next = soc->htt_htc_pkt_freelist; + soc->htt_htc_pkt_freelist = u_pkt; + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); +} + +/* + * htt_htc_pkt_pool_free() - Free HTC packet pool + * @htt_soc: HTT SOC handle + */ +static void +htt_htc_pkt_pool_free(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt_union *pkt, *next; + pkt = soc->htt_htc_pkt_freelist; + while (pkt) { + next = pkt->u.next; + qdf_mem_free(pkt); + pkt = next; + } + soc->htt_htc_pkt_freelist = NULL; +} + +/* + * htt_htc_misc_pkt_list_trim() - trim misc list + * @htt_soc: HTT SOC handle + * @level: max no. of pkts in list + */ +static void +htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level) +{ + struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL; + int i = 0; + qdf_nbuf_t netbuf; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + pkt = soc->htt_htc_pkt_misclist; + while (pkt) { + next = pkt->u.next; + /* trim the out grown list*/ + if (++i > level) { + netbuf = + (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); + qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); + qdf_nbuf_free(netbuf); + qdf_mem_free(pkt); + pkt = NULL; + if (prev) + prev->u.next = NULL; + } + prev = pkt; + pkt = next; + } + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); +} + +/* + * htt_htc_misc_pkt_list_add() - Add pkt to misc list + * @htt_soc: HTT SOC handle + * @dp_htt_htc_pkt: pkt to be added to list + */ +static void +htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) +{ + struct dp_htt_htc_pkt_union *u_pkt = + (struct dp_htt_htc_pkt_union *)pkt; + int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc, + pkt->htc_pkt.Endpoint) + + DP_HTT_HTC_PKT_MISCLIST_SIZE; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + if (soc->htt_htc_pkt_misclist) { + u_pkt->u.next = soc->htt_htc_pkt_misclist; + soc->htt_htc_pkt_misclist = u_pkt; + } else { + soc->htt_htc_pkt_misclist = u_pkt; + } + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); + + /* only ce pipe size + tx_queue_depth could possibly be in use + * free older packets in the misclist + */ + htt_htc_misc_pkt_list_trim(soc, misclist_trim_level); +} + +/** + * DP_HTT_SEND_HTC_PKT() - Send htt packet from host + * @soc : HTT SOC handle + * @pkt: pkt to be send + * @cmd : command to be recorded in dp htt logger + * @buf : Pointer to buffer needs to be recored for above cmd + * + * Return: None + */ +static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc, + struct dp_htt_htc_pkt *pkt, + uint8_t cmd, uint8_t *buf) +{ + QDF_STATUS status; + + htt_command_record(soc->htt_logger_handle, cmd, buf); + + status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt); + if (status == QDF_STATUS_SUCCESS && HTT_HTC_PKT_STATUS_SUCCESS) + htt_htc_misc_pkt_list_add(soc, pkt); + + return status; +} + +/* + * htt_htc_misc_pkt_pool_free() - free pkts in misc list + * @htt_soc: HTT SOC handle + */ +static void +htt_htc_misc_pkt_pool_free(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt_union *pkt, *next; + qdf_nbuf_t netbuf; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + pkt = soc->htt_htc_pkt_misclist; + + while (pkt) { + next = pkt->u.next; + netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); + qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); + + soc->stats.htc_pkt_free++; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: Pkt free count %d", + __func__, soc->stats.htc_pkt_free); + + qdf_nbuf_free(netbuf); + qdf_mem_free(pkt); + pkt = next; + } + soc->htt_htc_pkt_misclist = NULL; + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); +} + +/* + * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ + * @tgt_mac_addr: Target MAC + * @buffer: Output buffer + */ +static u_int8_t * +htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) +{ +#ifdef BIG_ENDIAN_HOST + /* + * The host endianness is opposite of the target endianness. + * To make u_int32_t elements come out correctly, the target->host + * upload has swizzled the bytes in each u_int32_t element of the + * message. + * For byte-array message fields like the MAC address, this + * upload swizzling puts the bytes in the wrong order, and needs + * to be undone. + */ + buffer[0] = tgt_mac_addr[3]; + buffer[1] = tgt_mac_addr[2]; + buffer[2] = tgt_mac_addr[1]; + buffer[3] = tgt_mac_addr[0]; + buffer[4] = tgt_mac_addr[7]; + buffer[5] = tgt_mac_addr[6]; + return buffer; +#else + /* + * The host endianness matches the target endianness - + * we can use the mac addr directly from the message buffer. + */ + return tgt_mac_addr; +#endif +} + +/* + * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer + * @soc: SOC handle + * @status: Completion status + * @netbuf: HTT buffer + */ +static void +dp_htt_h2t_send_complete_free_netbuf( + void *soc, A_STATUS status, qdf_nbuf_t netbuf) +{ + qdf_nbuf_free(netbuf); +} + +/* + * dp_htt_h2t_send_complete() - H2T completion handler + * @context: Opaque context (HTT SOC handle) + * @htc_pkt: HTC packet + */ +static void +dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) +{ + void (*send_complete_part2)( + void *soc, QDF_STATUS status, qdf_nbuf_t msdu); + struct htt_soc *soc = (struct htt_soc *) context; + struct dp_htt_htc_pkt *htt_pkt; + qdf_nbuf_t netbuf; + + send_complete_part2 = htc_pkt->pPktContext; + + htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); + + /* process (free or keep) the netbuf that held the message */ + netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; + /* + * adf sendcomplete is required for windows only + */ + /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ + if (send_complete_part2) { + send_complete_part2( + htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); + } + /* free the htt_htc_pkt / HTC_PACKET object */ + htt_htc_pkt_free(soc, htt_pkt); +} + +/* + * htt_h2t_ver_req_msg() - Send HTT version request message to target + * @htt_soc: HTT SOC handle + * + * Return: 0 on success; error code on failure + */ +static int htt_h2t_ver_req_msg(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + uint32_t *msg_word; + QDF_STATUS status; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!msg) + return QDF_STATUS_E_NOMEM; + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg", + __func__); + return QDF_STATUS_E_FAILURE; + } + + /* fill in the message contents */ + msg_word = (u_int32_t *) qdf_nbuf_data(msg); + + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), + qdf_nbuf_len(msg), soc->htc_endpoint, + HTC_TX_PACKET_TAG_RTPM_PUT_RC); + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, + NULL); + + if (status != QDF_STATUS_SUCCESS) { + qdf_nbuf_free(msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; +} + +/* + * htt_srng_setup() - Send SRNG setup message to target + * @htt_soc: HTT SOC handle + * @mac_id: MAC Id + * @hal_srng: Opaque HAL SRNG pointer + * @hal_ring_type: SRNG ring type + * + * Return: 0 on success; error code on failure + */ +int htt_srng_setup(struct htt_soc *soc, int mac_id, + hal_ring_handle_t hal_ring_hdl, + int hal_ring_type) +{ + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t htt_msg; + uint32_t *msg_word; + struct hal_srng_params srng_params; + qdf_dma_addr_t hp_addr, tp_addr; + uint32_t ring_entry_size = + hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); + int htt_ring_type, htt_ring_id; + uint8_t *htt_logger_bufp; + int target_pdev_id; + int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id); + QDF_STATUS status; + + /* Sizes should be set in 4-byte words */ + ring_entry_size = ring_entry_size >> 2; + + htt_msg = qdf_nbuf_alloc(soc->osdev, + HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!htt_msg) + goto fail0; + + hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); + hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl); + tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl); + + switch (hal_ring_type) { + case RXDMA_BUF: +#ifdef QCA_HOST2FW_RXBUF_RING + if (srng_params.ring_id == + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) { + htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; + htt_ring_type = HTT_SW_TO_SW_RING; +#ifdef IPA_OFFLOAD + } else if (srng_params.ring_id == + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) { + htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING; + htt_ring_type = HTT_SW_TO_SW_RING; +#endif +#else + if (srng_params.ring_id == + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + + (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { + htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; +#endif + } else if (srng_params.ring_id == +#ifdef IPA_OFFLOAD + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 + +#else + (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + +#endif + (lmac_id * HAL_MAX_RINGS_PER_LMAC))) { + htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring %d currently not supported", + __func__, srng_params.ring_id); + goto fail1; + } + + dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx", + hal_ring_type, srng_params.ring_id, htt_ring_id, + (uint64_t)hp_addr, + (uint64_t)tp_addr); + break; + case RXDMA_MONITOR_BUF: + htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_STATUS: + htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_DST: + htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + case RXDMA_MONITOR_DESC: + htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_DST: + htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring currently not supported", __func__); + goto fail1; + } + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to expand head for SRING_SETUP msg", + __func__); + return QDF_STATUS_E_FAILURE; + } + + msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); + + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); + + /* word 0 */ + *msg_word = 0; + htt_logger_bufp = (uint8_t *)msg_word; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); + target_pdev_id = + dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id); + + if ((htt_ring_type == HTT_SW_TO_HW_RING) || + (htt_ring_type == HTT_HW_TO_SW_RING)) + HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id); + else + HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); + + dp_info("%s: mac_id %d", __func__, mac_id); + HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); + /* TODO: Discuss with FW on changing this to unique ID and using + * htt_ring_type to send the type of ring + */ + HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); + + /* word 1 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, + srng_params.ring_base_paddr & 0xffffffff); + + /* word 2 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, + (uint64_t)srng_params.ring_base_paddr >> 32); + + /* word 3 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); + HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, + (ring_entry_size * srng_params.num_entries)); + dp_info("%s: entry_size %d", __func__, ring_entry_size); + dp_info("%s: num_entries %d", __func__, srng_params.num_entries); + dp_info("%s: ring_size %d", __func__, + (ring_entry_size * srng_params.num_entries)); + if (htt_ring_type == HTT_SW_TO_HW_RING) + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( + *msg_word, 1); + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); + + /* word 4 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, + hp_addr & 0xffffffff); + + /* word 5 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, + (uint64_t)hp_addr >> 32); + + /* word 6 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, + tp_addr & 0xffffffff); + + /* word 7 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, + (uint64_t)tp_addr >> 32); + + /* word 8 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, + srng_params.msi_addr & 0xffffffff); + + /* word 9 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, + (uint64_t)(srng_params.msi_addr) >> 32); + + /* word 10 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, + srng_params.msi_data); + + /* word 11 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, + srng_params.intr_batch_cntr_thres_entries * + ring_entry_size); + HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, + srng_params.intr_timer_thres_us >> 3); + + /* word 12 */ + msg_word++; + *msg_word = 0; + if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { + /* TODO: Setting low threshold to 1/8th of ring size - see + * if this needs to be configurable + */ + HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, + srng_params.low_threshold); + } + /* "response_required" field should be set if a HTT response message is + * required after setting up the ring. + */ + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) + goto fail1; + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX( + &pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(htt_msg), + qdf_nbuf_len(htt_msg), + soc->htc_endpoint, + HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); + status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP, + htt_logger_bufp); + + if (status != QDF_STATUS_SUCCESS) { + qdf_nbuf_free(htt_msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; + +fail1: + qdf_nbuf_free(htt_msg); +fail0: + return QDF_STATUS_E_FAILURE; +} + +/* + * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter + * config message to target + * @htt_soc: HTT SOC handle + * @pdev_id: WIN- PDEV Id, MCL- mac id + * @hal_srng: Opaque HAL SRNG pointer + * @hal_ring_type: SRNG ring type + * @ring_buf_size: SRNG buffer size + * @htt_tlv_filter: Rx SRNG TLV and filter setting + * Return: 0 on success; error code on failure + */ +int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id, + hal_ring_handle_t hal_ring_hdl, + int hal_ring_type, int ring_buf_size, + struct htt_rx_ring_tlv_filter *htt_tlv_filter) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t htt_msg; + uint32_t *msg_word; + struct hal_srng_params srng_params; + uint32_t htt_ring_type, htt_ring_id; + uint32_t tlv_filter; + uint8_t *htt_logger_bufp; + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx; + uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx); + int target_pdev_id; + QDF_STATUS status; + + htt_msg = qdf_nbuf_alloc(soc->osdev, + HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!htt_msg) + goto fail0; + + hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params); + + switch (hal_ring_type) { + case RXDMA_BUF: + htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_BUF: + htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_STATUS: + htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_DST: + htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + case RXDMA_MONITOR_DESC: + htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_DST: + htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring currently not supported", __func__); + goto fail1; + } + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to expand head for RX Ring Cfg msg", + __func__); + goto fail1; /* failure */ + } + + msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); + + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); + + /* word 0 */ + htt_logger_bufp = (uint8_t *)msg_word; + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); + + /* + * pdev_id is indexed from 0 whereas mac_id is indexed from 1 + * SW_TO_SW and SW_TO_HW rings are unaffected by this + */ + target_pdev_id = + dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id); + + if (htt_ring_type == HTT_SW_TO_SW_RING || + htt_ring_type == HTT_SW_TO_HW_RING) + HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, + target_pdev_id); + + /* TODO: Discuss with FW on changing this to unique ID and using + * htt_ring_type to send the type of ring + */ + HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); + + HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); + + HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); + + HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word, + htt_tlv_filter->offset_valid); + + if (mon_drop_th > 0) + HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, + 1); + else + HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word, + 0); + + /* word 1 */ + msg_word++; + *msg_word = 0; + HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, + ring_buf_size); + + /* word 2 */ + msg_word++; + *msg_word = 0; + + if (htt_tlv_filter->enable_fp) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0000, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0001, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0010, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_REASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0011, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_REASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0100, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_PROBE_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0101, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_PROBE_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0110, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_TIM_ADVT) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, + MGMT, 0111, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_RESERVED_7) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 1000, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_BEACON) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 1001, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ATIM) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0000, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_ASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0001, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_ASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0010, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_REASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0011, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_REASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0100, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_PROBE_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0101, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_PROBE_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 0110, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_TIM_ADVT) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0111, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_RESERVED_7) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 1000, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_BEACON) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MD, MGMT, 1001, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_ATIM) ? 1 : 0); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0000, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0001, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0010, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_REASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0011, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_REASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0100, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_PROBE_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0101, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_PROBE_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0110, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_TIM_ADVT) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, + MGMT, 0111, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_RESERVED_7) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 1000, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_BEACON) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 1001, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ATIM) ? 1 : 0); + } + + /* word 3 */ + msg_word++; + *msg_word = 0; + + if (htt_tlv_filter->enable_fp) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1010, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_DISASSOC) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1011, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_AUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1100, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_DEAUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1101, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ACTION) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1110, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); + /* reserved*/ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, + MGMT, 1111, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_RESERVED_15) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MD, MGMT, 1010, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_DISASSOC) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MD, MGMT, 1011, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_AUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MD, MGMT, 1100, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_DEAUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MD, MGMT, 1101, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_ACTION) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MD, MGMT, 1110, + (htt_tlv_filter->md_mgmt_filter & + FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1010, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_DISASSOC) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1011, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_AUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1100, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_DEAUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1101, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ACTION) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1110, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); + /* reserved*/ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, + MGMT, 1111, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_RESERVED_15) ? 1 : 0); + } + + /* word 4 */ + msg_word++; + *msg_word = 0; + + if (htt_tlv_filter->enable_fp) { + /* TYPE: CTRL */ + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0000, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RESERVED_1) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0001, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RESERVED_2) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0010, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_TRIGGER) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0011, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RESERVED_4) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0100, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_BF_REP_POLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0101, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_VHT_NDP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0110, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_FRAME_EXT) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0111, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CTRLWRAP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 1000, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_BA_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 1001, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_BA) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + /* TYPE: CTRL */ + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0000, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_RESERVED_1) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0001, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_RESERVED_2) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0010, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_TRIGGER) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0011, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_RESERVED_4) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0100, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_BF_REP_POLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0101, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_VHT_NDP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0110, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_FRAME_EXT) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0111, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_CTRLWRAP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 1000, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_BA_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 1001, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_BA) ? 1 : 0); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: CTRL */ + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0000, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RESERVED_1) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0001, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RESERVED_2) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0010, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_TRIGGER) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0011, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RESERVED_4) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0100, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_BF_REP_POLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0101, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_VHT_NDP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0110, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_FRAME_EXT) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0111, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CTRLWRAP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 1000, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_BA_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 1001, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_BA) ? 1 : 0); + } + + /* word 5 */ + msg_word++; + *msg_word = 0; + if (htt_tlv_filter->enable_fp) { + /* TYPE: CTRL */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1010, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_PSPOLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1011, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1100, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1101, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_ACK) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1110, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CFEND) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1111, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CFEND_CFACK) ? 1 : 0); + /* TYPE: DATA */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + DATA, MCAST, + (htt_tlv_filter->fp_data_filter & + FILTER_DATA_MCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + DATA, UCAST, + (htt_tlv_filter->fp_data_filter & + FILTER_DATA_UCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + DATA, NULL, + (htt_tlv_filter->fp_data_filter & + FILTER_DATA_NULL) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + /* TYPE: CTRL */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1010, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_PSPOLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1011, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_RTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1100, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_CTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1101, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_ACK) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1110, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_CFEND) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1111, + (htt_tlv_filter->md_ctrl_filter & + FILTER_CTRL_CFEND_CFACK) ? 1 : 0); + /* TYPE: DATA */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + DATA, MCAST, + (htt_tlv_filter->md_data_filter & + FILTER_DATA_MCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + DATA, UCAST, + (htt_tlv_filter->md_data_filter & + FILTER_DATA_UCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + DATA, NULL, + (htt_tlv_filter->md_data_filter & + FILTER_DATA_NULL) ? 1 : 0); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: CTRL */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1010, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_PSPOLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1011, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1100, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1101, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_ACK) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1110, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CFEND) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1111, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CFEND_CFACK) ? 1 : 0); + /* TYPE: DATA */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + DATA, MCAST, + (htt_tlv_filter->mo_data_filter & + FILTER_DATA_MCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + DATA, UCAST, + (htt_tlv_filter->mo_data_filter & + FILTER_DATA_UCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + DATA, NULL, + (htt_tlv_filter->mo_data_filter & + FILTER_DATA_NULL) ? 1 : 0); + } + + /* word 6 */ + msg_word++; + *msg_word = 0; + tlv_filter = 0; + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, + htt_tlv_filter->mpdu_start); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, + htt_tlv_filter->msdu_start); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, + htt_tlv_filter->packet); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, + htt_tlv_filter->msdu_end); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, + htt_tlv_filter->mpdu_end); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, + htt_tlv_filter->packet_header); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, + htt_tlv_filter->attention); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, + htt_tlv_filter->ppdu_start); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, + htt_tlv_filter->ppdu_end); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, + htt_tlv_filter->ppdu_end_user_stats); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, + PPDU_END_USER_STATS_EXT, + htt_tlv_filter->ppdu_end_user_stats_ext); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, + htt_tlv_filter->ppdu_end_status_done); + /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/ + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED, + htt_tlv_filter->header_per_msdu); + + HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); + + msg_word++; + *msg_word = 0; + if (htt_tlv_filter->offset_valid) { + HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_packet_offset); + HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_header_offset); + + msg_word++; + *msg_word = 0; + HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_mpdu_end_offset); + HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_mpdu_start_offset); + + msg_word++; + *msg_word = 0; + HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_msdu_end_offset); + HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_msdu_start_offset); + + msg_word++; + *msg_word = 0; + HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word, + htt_tlv_filter->rx_attn_offset); + msg_word++; + *msg_word = 0; + } else { + msg_word += 4; + *msg_word = 0; + } + + if (mon_drop_th > 0) + HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word, + mon_drop_th); + + /* "response_required" field should be set if a HTT response message is + * required after setting up the ring. + */ + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) + goto fail1; + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX( + &pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(htt_msg), + qdf_nbuf_len(htt_msg), + soc->htc_endpoint, + HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); + status = DP_HTT_SEND_HTC_PKT(soc, pkt, + HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG, + htt_logger_bufp); + + if (status != QDF_STATUS_SUCCESS) { + qdf_nbuf_free(htt_msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; + +fail1: + qdf_nbuf_free(htt_msg); +fail0: + return QDF_STATUS_E_FAILURE; +} + +#if defined(HTT_STATS_ENABLE) +static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, + struct dp_soc *soc, qdf_nbuf_t htt_msg) + +{ + uint32_t pdev_id; + uint32_t *msg_word = NULL; + uint32_t msg_remain_len = 0; + + msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); + + /*COOKIE MSB*/ + pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; + + /* stats message length + 16 size of HTT header*/ + msg_remain_len = qdf_min(htt_stats->msg_len + 16, + (uint32_t)DP_EXT_MSG_LENGTH); + + dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc, + msg_word, msg_remain_len, + WDI_NO_VAL, pdev_id); + + if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { + htt_stats->msg_len -= DP_EXT_MSG_LENGTH; + } + /* Need to be freed here as WDI handler will + * make a copy of pkt to send data to application + */ + qdf_nbuf_free(htt_msg); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS +dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, + struct dp_soc *soc, qdf_nbuf_t htt_msg) +{ + return QDF_STATUS_E_NOSUPPORT; +} +#endif +/** + * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats + * @htt_stats: htt stats info + * + * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message + * contains sub messages which are identified by a TLV header. + * In this function we will process the stream of T2H messages and read all the + * TLV contained in the message. + * + * THe following cases have been taken care of + * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer + * In this case the buffer will contain multiple tlvs. + * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer. + * Only one tlv will be contained in the HTT message and this tag + * will extend onto the next buffer. + * Case 3: When the buffer is the continuation of the previous message + * Case 4: tlv length is 0. which will indicate the end of message + * + * return: void + */ +static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats, + struct dp_soc *soc) +{ + htt_tlv_tag_t tlv_type = 0xff; + qdf_nbuf_t htt_msg = NULL; + uint32_t *msg_word; + uint8_t *tlv_buf_head = NULL; + uint8_t *tlv_buf_tail = NULL; + uint32_t msg_remain_len = 0; + uint32_t tlv_remain_len = 0; + uint32_t *tlv_start; + int cookie_val; + int cookie_msb; + int pdev_id; + bool copy_stats = false; + struct dp_pdev *pdev; + + /* Process node in the HTT message queue */ + while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) + != NULL) { + msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); + cookie_val = *(msg_word + 1); + htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET( + *(msg_word + + HTT_T2H_EXT_STATS_TLV_START_OFFSET)); + + if (cookie_val) { + if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg) + == QDF_STATUS_SUCCESS) { + continue; + } + } + + cookie_msb = *(msg_word + 2); + pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; + pdev = soc->pdev_list[pdev_id]; + + if (cookie_msb >> 2) { + copy_stats = true; + } + + /* read 5th word */ + msg_word = msg_word + 4; + msg_remain_len = qdf_min(htt_stats->msg_len, + (uint32_t) DP_EXT_MSG_LENGTH); + /* Keep processing the node till node length is 0 */ + while (msg_remain_len) { + /* + * if message is not a continuation of previous message + * read the tlv type and tlv length + */ + if (!tlv_buf_head) { + tlv_type = HTT_STATS_TLV_TAG_GET( + *msg_word); + tlv_remain_len = HTT_STATS_TLV_LENGTH_GET( + *msg_word); + } + + if (tlv_remain_len == 0) { + msg_remain_len = 0; + + if (tlv_buf_head) { + qdf_mem_free(tlv_buf_head); + tlv_buf_head = NULL; + tlv_buf_tail = NULL; + } + + goto error; + } + + if (!tlv_buf_head) + tlv_remain_len += HTT_TLV_HDR_LEN; + + if ((tlv_remain_len <= msg_remain_len)) { + /* Case 3 */ + if (tlv_buf_head) { + qdf_mem_copy(tlv_buf_tail, + (uint8_t *)msg_word, + tlv_remain_len); + tlv_start = (uint32_t *)tlv_buf_head; + } else { + /* Case 1 */ + tlv_start = msg_word; + } + + if (copy_stats) + dp_htt_stats_copy_tag(pdev, + tlv_type, + tlv_start); + else + dp_htt_stats_print_tag(pdev, + tlv_type, + tlv_start); + + if (tlv_type == HTT_STATS_PEER_DETAILS_TAG || + tlv_type == HTT_STATS_PEER_STATS_CMN_TAG) + dp_peer_update_inactive_time(pdev, + tlv_type, + tlv_start); + + msg_remain_len -= tlv_remain_len; + + msg_word = (uint32_t *) + (((uint8_t *)msg_word) + + tlv_remain_len); + + tlv_remain_len = 0; + + if (tlv_buf_head) { + qdf_mem_free(tlv_buf_head); + tlv_buf_head = NULL; + tlv_buf_tail = NULL; + } + + } else { /* tlv_remain_len > msg_remain_len */ + /* Case 2 & 3 */ + if (!tlv_buf_head) { + tlv_buf_head = qdf_mem_malloc( + tlv_remain_len); + + if (!tlv_buf_head) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "Alloc failed"); + goto error; + } + + tlv_buf_tail = tlv_buf_head; + } + + qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word, + msg_remain_len); + tlv_remain_len -= msg_remain_len; + tlv_buf_tail += msg_remain_len; + } + } + + if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { + htt_stats->msg_len -= DP_EXT_MSG_LENGTH; + } + + qdf_nbuf_free(htt_msg); + } + return; + +error: + qdf_nbuf_free(htt_msg); + while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) + != NULL) + qdf_nbuf_free(htt_msg); +} + +void htt_t2h_stats_handler(void *context) +{ + struct dp_soc *soc = (struct dp_soc *)context; + struct htt_stats_context htt_stats; + uint32_t *msg_word; + qdf_nbuf_t htt_msg = NULL; + uint8_t done; + uint32_t rem_stats; + + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "soc is NULL"); + return; + } + + if (!qdf_atomic_read(&soc->cmn_init_done)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "soc: 0x%pK, init_done: %d", soc, + qdf_atomic_read(&soc->cmn_init_done)); + return; + } + + qdf_mem_zero(&htt_stats, sizeof(htt_stats)); + qdf_nbuf_queue_init(&htt_stats.msg); + + /* pull one completed stats from soc->htt_stats_msg and process */ + qdf_spin_lock_bh(&soc->htt_stats.lock); + if (!soc->htt_stats.num_stats) { + qdf_spin_unlock_bh(&soc->htt_stats.lock); + return; + } + while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) { + msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); + msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET; + done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); + qdf_nbuf_queue_add(&htt_stats.msg, htt_msg); + /* + * Done bit signifies that this is the last T2H buffer in the + * stream of HTT EXT STATS message + */ + if (done) + break; + } + rem_stats = --soc->htt_stats.num_stats; + qdf_spin_unlock_bh(&soc->htt_stats.lock); + + /* If there are more stats to process, schedule stats work again. + * Scheduling prior to processing ht_stats to queue with early + * index + */ + if (rem_stats) + qdf_sched_work(0, &soc->htt_stats.work); + + dp_process_htt_stat_msg(&htt_stats, soc); +} + +/* + * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, + * if a new peer id arrives in a PPDU + * pdev: DP pdev handle + * @peer_id : peer unique identifier + * @ppdu_info: per ppdu tlv structure + * + * return:user index to be populated + */ +#ifdef FEATURE_PERPKT_INFO +static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, + uint16_t peer_id, + struct ppdu_info *ppdu_info) +{ + uint8_t user_index = 0; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + while ((user_index + 1) <= ppdu_info->last_user) { + ppdu_user_desc = &ppdu_desc->user[user_index]; + if (ppdu_user_desc->peer_id != peer_id) { + user_index++; + continue; + } else { + /* Max users possible is 8 so user array index should + * not exceed 7 + */ + qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX); + return user_index; + } + } + + ppdu_info->last_user++; + /* Max users possible is 8 so last user should not exceed 8 */ + qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS); + return ppdu_info->last_user - 1; +} + +/* + * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv + * pdev: DP pdev handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, struct ppdu_info *ppdu_info) +{ + uint16_t frame_type; + uint16_t frame_ctrl; + uint16_t freq; + struct dp_soc *soc = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + uint64_t ppdu_start_timestamp; + uint32_t *start_tag_buf; + + start_tag_buf = tag_buf; + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); + ppdu_info->sched_cmdid = + HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); + ppdu_desc->num_users = + HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); + frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); + ppdu_desc->htt_frame_type = frame_type; + + frame_ctrl = ppdu_desc->frame_ctrl; + + switch (frame_type) { + case HTT_STATS_FTYPE_TIDQ_DATA_SU: + case HTT_STATS_FTYPE_TIDQ_DATA_MU: + case HTT_STATS_FTYPE_SGEN_QOS_NULL: + /* + * for management packet, frame type come as DATA_SU + * need to check frame_ctrl before setting frame_type + */ + if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) + ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; + else + ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; + break; + case HTT_STATS_FTYPE_SGEN_MU_BAR: + case HTT_STATS_FTYPE_SGEN_BAR: + ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; + ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; + break; + default: + ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; + break; + } + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); + ppdu_desc->tx_duration = *tag_buf; + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); + ppdu_desc->ppdu_start_timestamp = *tag_buf; + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); + freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); + if (freq != ppdu_desc->channel) { + soc = pdev->soc; + ppdu_desc->channel = freq; + if (soc && soc->cdp_soc.ol_ops->freq_to_channel) + pdev->operating_channel = + soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, + pdev->pdev_id, freq); + } + + ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); + ppdu_desc->beam_change = + HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); + + dp_tx_capture_htt_frame_counter(pdev, frame_type); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); + ppdu_start_timestamp = *tag_buf; + ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << + HTT_SHIFT_UPPER_TIMESTAMP) & + HTT_MASK_UPPER_TIMESTAMP); + + ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + + ppdu_desc->tx_duration; + /* Ack time stamp is same as end time stamp*/ + ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; + + ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + + ppdu_desc->tx_duration; + + ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; + ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; + ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; + + /* Ack time stamp is same as end time stamp*/ + ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; +} + +/* + * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common + * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_common_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct dp_peer *peer; + struct dp_vdev *vdev; + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = + dp_get_ppdu_info_user_index(pdev, + peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + + if (peer_id == DP_SCAN_PEER_ID) { + ppdu_desc->vdev_id = + HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); + vdev = + dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc, + ppdu_desc->vdev_id); + if (!vdev) + return; + qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + } else { + peer = dp_peer_find_by_id(pdev->soc, peer_id); + if (!peer) + return; + qdf_mem_copy(ppdu_user_desc->mac_addr, + peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); + dp_peer_unref_del_find_by_id(peer); + } + + ppdu_user_desc->peer_id = peer_id; + + tag_buf++; + + if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { + ppdu_user_desc->delayed_ba = 1; + ppdu_desc->delayed_ba = 1; + } + + if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { + ppdu_user_desc->is_mcast = true; + ppdu_user_desc->mpdu_tried_mcast = + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); + ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; + } else { + ppdu_user_desc->mpdu_tried_ucast = + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); + } + + tag_buf++; + + ppdu_user_desc->qos_ctrl = + HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); + ppdu_user_desc->frame_ctrl = + HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); + ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; + + if (ppdu_user_desc->delayed_ba) + ppdu_user_desc->mpdu_success = 0; + + tag_buf += 3; + + if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { + ppdu_user_desc->ppdu_cookie = + HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); + ppdu_user_desc->is_ppdu_cookie_valid = 1; + } +} + + +/** + * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv + * @pdev: DP pdev handle + * @tag_buf: T2H message buffer carrying the user rate TLV + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct dp_peer *peer; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct dp_vdev *vdev; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = + dp_get_ppdu_info_user_index(pdev, + peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + if (peer_id == DP_SCAN_PEER_ID) { + vdev = + dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc, + ppdu_desc->vdev_id); + if (!vdev) + return; + } else { + peer = dp_peer_find_by_id(pdev->soc, peer_id); + if (!peer) + return; + dp_peer_unref_del_find_by_id(peer); + } + + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); + + tag_buf += 1; + + ppdu_user_desc->user_pos = + HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); + ppdu_user_desc->mu_group_id = + HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); + + tag_buf += 1; + + ppdu_user_desc->ru_start = + HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); + ppdu_user_desc->ru_tones = + (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - + HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; + + tag_buf += 2; + + ppdu_user_desc->ppdu_type = + HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); + + tag_buf++; + ppdu_user_desc->tx_rate = *tag_buf; + + ppdu_user_desc->ltf_size = + HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); + ppdu_user_desc->stbc = + HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); + ppdu_user_desc->he_re = + HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); + ppdu_user_desc->txbf = + HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); + ppdu_user_desc->bw = + HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2; + ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); + ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); + ppdu_user_desc->preamble = + HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); + ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); + ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); + ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process + * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = + (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; + + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->start_seq = dp_stats_buf->start_seq; + qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, + sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); + + dp_process_ppdu_stats_update_failed_bitmap(pdev, + (void *)ppdu_user_desc, + ppdu_info->ppdu_id, + size); +} + +/* + * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process + * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv + * soc: DP SOC handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = + (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; + + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->start_seq = dp_stats_buf->start_seq; + qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, + sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); + + dp_process_ppdu_stats_update_failed_bitmap(pdev, + (void *)ppdu_user_desc, + ppdu_info->ppdu_id, + size); +} + +/* + * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process + * htt_ppdu_stats_user_cmpltn_common_tlv + * soc: DP SOC handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_cmpltn_common_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint8_t bw_iter; + htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = + (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + ppdu_desc->last_usr_index = curr_user_index; + + ppdu_user_desc->completion_status = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( + *tag_buf); + + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); + + + tag_buf++; + if (qdf_likely(ppdu_user_desc->completion_status == + HTT_PPDU_STATS_USER_STATUS_OK)) { + ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; + ppdu_user_desc->ack_rssi_valid = 1; + } else { + ppdu_user_desc->ack_rssi_valid = 0; + } + + tag_buf++; + + ppdu_user_desc->mpdu_success = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); + + ppdu_user_desc->mpdu_failed = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - + ppdu_user_desc->mpdu_success; + + tag_buf++; + + ppdu_user_desc->long_retries = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); + + ppdu_user_desc->short_retries = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); + ppdu_user_desc->retry_msdus = + ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; + + ppdu_user_desc->is_ampdu = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); + ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; + + ppdu_desc->resp_type = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); + ppdu_desc->mprot_type = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); + ppdu_desc->rts_success = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); + ppdu_desc->rts_failure = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); + + /* + * increase successful mpdu counter from + * htt_ppdu_stats_user_cmpltn_common_tlv + */ + ppdu_info->mpdu_compltn_common_tlv += ppdu_user_desc->mpdu_success; + + /* + * MU BAR may send request to n users but we may received ack only from + * m users. To have count of number of users respond back, we have a + * separate counter bar_num_users per PPDU that get increment for every + * htt_ppdu_stats_user_cmpltn_common_tlv + */ + ppdu_desc->bar_num_users++; + + tag_buf++; + for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { + ppdu_user_desc->rssi_chain[bw_iter] = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); + tag_buf++; + } + + ppdu_user_desc->sa_tx_antenna = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); + + tag_buf++; + ppdu_user_desc->sa_is_training = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); + if (ppdu_user_desc->sa_is_training) { + ppdu_user_desc->sa_goodput = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); + } + + tag_buf++; + for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { + ppdu_user_desc->sa_max_rates[bw_iter] = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); + } + + tag_buf += CDP_NUM_SA_BW; + ppdu_user_desc->current_rate_per = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process + * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = + (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + struct cdp_tx_completion_ppdu *ppdu_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; + qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, + sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); + ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; +} + +/* + * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process + * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = + (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + struct cdp_tx_completion_ppdu *ppdu_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; + qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, + sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); + ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; +} + +/* + * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process + * htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * pdev: DP PDE handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf += 2; + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + tag_buf++; + /* not to update ppdu_desc->tid from this TLV */ + ppdu_user_desc->num_mpdu = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); + + ppdu_user_desc->num_msdu = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); + + ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; + + tag_buf++; + ppdu_user_desc->start_seq = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( + *tag_buf); + + tag_buf++; + ppdu_user_desc->success_bytes = *tag_buf; + + /* increase successful mpdu counter */ + ppdu_info->mpdu_ack_ba_tlv += ppdu_user_desc->num_mpdu; +} + +/* + * dp_process_ppdu_stats_user_common_array_tlv: Process + * htt_ppdu_stats_user_common_array_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_common_array_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint32_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct htt_tx_ppdu_stats_info *dp_stats_buf; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; + tag_buf += 3; + peer_id = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid peer"); + return; + } + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + + ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; + ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; + + tag_buf++; + + ppdu_user_desc->success_msdus = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); + ppdu_user_desc->retry_bytes = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); + tag_buf++; + ppdu_user_desc->failed_msdus = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_flush_tlv: Process + * htt_ppdu_stats_flush_tlv + * @pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void +dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc; + uint32_t peer_id; + uint8_t tid; + struct dp_peer *peer; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + ppdu_desc->is_flush = 1; + + tag_buf++; + ppdu_desc->drop_reason = *tag_buf; + + tag_buf++; + ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); + ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); + ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); + tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); + + ppdu_desc->user[0].peer_id = peer_id; + ppdu_desc->user[0].tid = tid; + + ppdu_desc->queue_type = + HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + if (!peer) + return; + + if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { + DP_STATS_INC(peer, + tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], + ppdu_desc->num_msdu); + } + + dp_peer_unref_del_find_by_id(peer); +} + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +/* + * dp_deliver_mgmt_frm: Process + * @pdev: DP PDEV handle + * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * + * return: void + */ +void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) +{ + if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, + nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } +} +#endif + +/* + * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process + * htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * @pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * @length: tlv_length + * + * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller + */ +static QDF_STATUS +dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, + qdf_nbuf_t tag_buf, + uint32_t ppdu_id) +{ + uint32_t *nbuf_ptr; + uint8_t trim_size; + size_t head_size; + struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; + uint32_t *msg_word; + uint32_t tsf_hdr; + + if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) && + (!pdev->bpr_enable) && (!pdev->tx_capture_enabled)) + return QDF_STATUS_SUCCESS; + + /* + * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t + */ + msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); + msg_word = msg_word + 2; + tsf_hdr = *msg_word; + + trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf + + HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - + qdf_nbuf_data(tag_buf)); + + if (!qdf_nbuf_pull_head(tag_buf, trim_size)) + return QDF_STATUS_SUCCESS; + + qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - + pdev->mgmtctrl_frm_info.mgmt_buf_len); + + if (pdev->tx_capture_enabled) { + head_size = sizeof(struct cdp_tx_mgmt_comp_info); + if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { + qdf_err("Fail to get headroom h_sz %d h_avail %d\n", + head_size, qdf_nbuf_headroom(tag_buf)); + qdf_assert_always(0); + return QDF_STATUS_E_NOMEM; + } + ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) + qdf_nbuf_push_head(tag_buf, head_size); + qdf_assert_always(ptr_mgmt_comp_info); + ptr_mgmt_comp_info->ppdu_id = ppdu_id; + ptr_mgmt_comp_info->is_sgen_pkt = true; + ptr_mgmt_comp_info->tx_tsf = tsf_hdr; + } else { + head_size = sizeof(ppdu_id); + nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); + *nbuf_ptr = ppdu_id; + } + + if (pdev->bpr_enable) { + dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, + tag_buf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } + + dp_deliver_mgmt_frm(pdev, tag_buf); + + return QDF_STATUS_E_ALREADY; +} + +/** + * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU + * + * If the TLV length sent as part of PPDU TLV is less that expected size i.e + * size of corresponding data structure, pad the remaining bytes with zeros + * and continue processing the TLVs + * + * @pdev: DP pdev handle + * @tag_buf: TLV buffer + * @tlv_expected_size: Expected size of Tag + * @tlv_len: TLV length received from FW + * + * Return: Pointer to updated TLV + */ +static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + uint16_t tlv_expected_size, + uint16_t tlv_len) +{ + uint32_t *tlv_desc = tag_buf; + + qdf_assert_always(tlv_len != 0); + + if (tlv_len < tlv_expected_size) { + qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size); + qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len); + tlv_desc = pdev->ppdu_tlv_buf; + } + + return tlv_desc; +} + +/** + * dp_process_ppdu_tag(): Function to process the PPDU TLVs + * @pdev: DP pdev handle + * @tag_buf: TLV buffer + * @tlv_len: length of tlv + * @ppdu_info: per ppdu tlv structure + * + * return: void + */ +static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf, + uint32_t tlv_len, struct ppdu_info *ppdu_info) +{ + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + uint16_t tlv_expected_size; + uint32_t *tlv_desc; + + switch (tlv_type) { + case HTT_PPDU_STATS_COMMON_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMMON_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, + ppdu_info); + break; + case HTT_PPDU_STATS_USR_RATE_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, + ppdu_info); + break; + case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_cmpltn_common_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_usr_common_array_tlv_v); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_common_array_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, + ppdu_info); + break; + default: + break; + } +} + +/** + * dp_ppdu_desc_user_stats_update(): Function to update TX user stats + * @pdev: DP pdev handle + * @ppdu_info: per PPDU TLV descriptor + * + * return: void + */ +void +dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + struct dp_peer *peer = NULL; + uint32_t tlv_bitmap_expected; + uint32_t tlv_bitmap_default; + uint16_t i; + uint32_t num_users; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + + ppdu_desc->num_users = ppdu_info->last_user; + ppdu_desc->ppdu_id = ppdu_info->ppdu_id; + + tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; + if (pdev->tx_sniffer_enable || pdev->mcopy_mode || + pdev->tx_capture_enabled) { + if (ppdu_info->is_ampdu) + tlv_bitmap_expected = + dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( + ppdu_info->tlv_bitmap); + } + + tlv_bitmap_default = tlv_bitmap_expected; + + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { + num_users = ppdu_desc->bar_num_users; + ppdu_desc->num_users = ppdu_desc->bar_num_users; + } else { + num_users = ppdu_desc->num_users; + } + + for (i = 0; i < num_users; i++) { + ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; + ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; + + peer = dp_peer_find_by_id(pdev->soc, + ppdu_desc->user[i].peer_id); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx; + + /* + * different frame like DATA, BAR or CTRL has different + * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we + * receive other tlv in-order/sequential from fw. + * Since ACK_BA_STATUS TLV come from Hardware it is + * asynchronous So we need to depend on some tlv to confirm + * all tlv is received for a ppdu. + * So we depend on both HTT_PPDU_STATS_COMMON_TLV and + * ACK_BA_STATUS_TLV. for failure packet we won't get + * ACK_BA_STATUS_TLV. + */ + if (!(ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_COMMON_TLV)) || + (!(ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && + (ppdu_desc->user[i].completion_status == + HTT_PPDU_STATS_USER_STATUS_OK))) { + dp_peer_unref_del_find_by_id(peer); + continue; + } + + /** + * Update tx stats for data frames having Qos as well as + * non-Qos data tid + */ + + if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || + (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || + (ppdu_desc->htt_frame_type == + HTT_STATS_FTYPE_SGEN_QOS_NULL)) && + (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { + + dp_tx_stats_update(pdev, peer, + &ppdu_desc->user[i], + ppdu_desc->ack_rssi); + dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]); + } + + dp_peer_unref_del_find_by_id(peer); + tlv_bitmap_expected = tlv_bitmap_default; + } +} + +#ifndef WLAN_TX_PKT_CAPTURE_ENH + +/** + * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor + * to upper layer + * @pdev: DP pdev handle + * @ppdu_info: per PPDU TLV descriptor + * + * return: void + */ +static +void dp_ppdu_desc_deliver(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + qdf_nbuf_t nbuf; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + + dp_ppdu_desc_user_stats_update(pdev, ppdu_info); + + /* + * Remove from the list + */ + TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); + nbuf = ppdu_info->nbuf; + pdev->list_depth--; + qdf_mem_free(ppdu_info); + + qdf_assert_always(nbuf); + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(nbuf); + + /** + * Deliver PPDU stats only for valid (acked) data frames if + * sniffer mode is not enabled. + * If sniffer mode is enabled, PPDU stats for all frames + * including mgmt/control frames should be delivered to upper layer + */ + if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc, + nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else { + if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 && + ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) { + + dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, + pdev->soc, nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else + qdf_nbuf_free(nbuf); + } + return; +} + +#endif + +/** + * dp_get_ppdu_desc(): Function to allocate new PPDU status + * desc for new ppdu id + * @pdev: DP pdev handle + * @ppdu_id: PPDU unique identifier + * @tlv_type: TLV type received + * + * return: ppdu_info per ppdu tlv structure + */ +static +struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, + uint8_t tlv_type) +{ + struct ppdu_info *ppdu_info = NULL; + + /* + * Find ppdu_id node exists or not + */ + TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) { + + if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { + break; + } + } + + if (ppdu_info) { + if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { + /** + * if we get tlv_type that is already been processed + * for ppdu, that means we got a new ppdu with same + * ppdu id. Hence Flush the older ppdu + * for MUMIMO and OFDMA, In a PPDU we have + * multiple user with same tlv types. tlv bitmap is + * used to check whether SU or MU_MIMO/OFDMA + */ + if (!(ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) + return ppdu_info; + + /** + * apart from ACK BA STATUS TLV rest all comes in order + * so if tlv type not ACK BA STATUS TLV we can deliver + * ppdu_info + */ + if (tlv_type == + HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) + return ppdu_info; + + dp_ppdu_desc_deliver(pdev, ppdu_info); + } else { + return ppdu_info; + } + } + + /** + * Flush the head ppdu descriptor if ppdu desc list reaches max + * threshold + */ + if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { + ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list); + dp_ppdu_desc_deliver(pdev, ppdu_info); + } + + /* + * Allocate new ppdu_info node + */ + ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); + if (!ppdu_info) + return NULL; + + ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, + sizeof(struct cdp_tx_completion_ppdu), 0, 4, + TRUE); + if (!ppdu_info->nbuf) { + qdf_mem_free(ppdu_info); + return NULL; + } + + ppdu_info->ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), + sizeof(struct cdp_tx_completion_ppdu)); + + if (qdf_nbuf_put_tail(ppdu_info->nbuf, + sizeof(struct cdp_tx_completion_ppdu)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "No tailroom for HTT PPDU"); + qdf_nbuf_free(ppdu_info->nbuf); + ppdu_info->nbuf = NULL; + ppdu_info->last_user = 0; + qdf_mem_free(ppdu_info); + return NULL; + } + + /** + * No lock is needed because all PPDU TLVs are processed in + * same context and this list is updated in same context + */ + TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info, + ppdu_info_list_elem); + pdev->list_depth++; + return ppdu_info; +} + +/** + * dp_htt_process_tlv(): Function to process each PPDU TLVs + * @pdev: DP pdev handle + * @htt_t2h_msg: HTT target to host message + * + * return: ppdu_info per ppdu tlv structure + */ + +static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, + qdf_nbuf_t htt_t2h_msg) +{ + uint32_t length; + uint32_t ppdu_id; + uint8_t tlv_type; + uint32_t tlv_length, tlv_bitmap_expected; + uint8_t *tlv_buf; + struct ppdu_info *ppdu_info = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + struct dp_peer *peer; + uint32_t i = 0; + + uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); + + length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); + + msg_word = msg_word + 1; + ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); + + + msg_word = msg_word + 3; + while (length > 0) { + tlv_buf = (uint8_t *)msg_word; + tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); + tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); + if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) + pdev->stats.ppdu_stats_counter[tlv_type]++; + + if (tlv_length == 0) + break; + + tlv_length += HTT_TLV_HDR_LEN; + + /** + * Not allocating separate ppdu descriptor for MGMT Payload + * TLV as this is sent as separate WDI indication and it + * doesn't contain any ppdu information + */ + if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { + pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; + pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; + pdev->mgmtctrl_frm_info.mgmt_buf_len = + HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET + (*(msg_word + 1)); + msg_word = + (uint32_t *)((uint8_t *)tlv_buf + tlv_length); + length -= (tlv_length); + continue; + } + + ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type); + if (!ppdu_info) + return NULL; + ppdu_info->ppdu_desc->bss_color = + pdev->rx_mon_recv_status.bsscolor; + + ppdu_info->ppdu_id = ppdu_id; + ppdu_info->tlv_bitmap |= (1 << tlv_type); + + dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); + + /** + * Increment pdev level tlv count to monitor + * missing TLVs + */ + pdev->tlv_count++; + ppdu_info->last_tlv_cnt = pdev->tlv_count; + msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); + length -= (tlv_length); + } + + if (!ppdu_info) + return NULL; + + pdev->last_ppdu_id = ppdu_id; + + tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; + + if (pdev->tx_sniffer_enable || pdev->mcopy_mode || + pdev->tx_capture_enabled) { + if (ppdu_info->is_ampdu) + tlv_bitmap_expected = + dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( + ppdu_info->tlv_bitmap); + } + + ppdu_desc = ppdu_info->ppdu_desc; + + if (!ppdu_desc) + return NULL; + + if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != + HTT_PPDU_STATS_USER_STATUS_OK) { + tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; + } + + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && + (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV)) && + ppdu_desc->delayed_ba) { + for (i = 0; i < ppdu_desc->num_users; i++) { + uint32_t ppdu_id; + + ppdu_id = ppdu_desc->ppdu_id; + peer = dp_peer_find_by_id(pdev->soc, + ppdu_desc->user[i].peer_id); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + /** + * save delayed ba user info + */ + if (ppdu_desc->user[i].delayed_ba) { + dp_peer_copy_delay_stats(peer, + &ppdu_desc->user[i]); + peer->last_delayed_ba_ppduid = ppdu_id; + } + dp_peer_unref_del_find_by_id(peer); + } + } + + /* + * when frame type is BAR and STATS_COMMON_TLV is set + * copy the store peer delayed info to BAR status + */ + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && + (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) { + for (i = 0; i < ppdu_desc->bar_num_users; i++) { + peer = dp_peer_find_by_id(pdev->soc, + ppdu_desc->user[i].peer_id); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + if (peer->last_delayed_ba) { + dp_peer_copy_stats_to_bar(peer, + &ppdu_desc->user[i]); + ppdu_desc->bar_ppdu_id = ppdu_desc->ppdu_id; + ppdu_desc->ppdu_id = + peer->last_delayed_ba_ppduid; + } + dp_peer_unref_del_find_by_id(peer); + } + } + + /* + * for frame type DATA and BAR, we update stats based on MSDU, + * successful msdu and mpdu are populate from ACK BA STATUS TLV + * which comes out of order. successful mpdu also populated from + * COMPLTN COMMON TLV which comes in order. for every ppdu_info + * we store successful mpdu from both tlv and compare before delivering + * to make sure we received ACK BA STATUS TLV. For some self generated + * frame we won't get ack ba status tlv so no need to wait for + * ack ba status tlv. + */ + if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && + ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { + /* + * successful mpdu count should match with both tlv + */ + if (ppdu_info->mpdu_compltn_common_tlv != + ppdu_info->mpdu_ack_ba_tlv) + return NULL; + } + + /** + * Once all the TLVs for a given PPDU has been processed, + * return PPDU status to be delivered to higher layer. + * tlv_bitmap_expected can't be available for different frame type. + * But STATS COMMON TLV is the last TLV from the FW for a ppdu. + * apart from ACK BA TLV, FW sends other TLV in sequential order. + * flush tlv comes separate. + */ + if ((ppdu_info->tlv_bitmap != 0 && + (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) || + (ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) + return ppdu_info; + + return NULL; +} +#endif /* FEATURE_PERPKT_INFO */ + +/** + * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW + * @soc: DP SOC handle + * @pdev_id: pdev id + * @htt_t2h_msg: HTT message nbuf + * + * return:void + */ +#if defined(WDI_EVENT_ENABLE) +#ifdef FEATURE_PERPKT_INFO +static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, + uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) +{ + struct dp_pdev *pdev = soc->pdev_list[pdev_id]; + struct ppdu_info *ppdu_info = NULL; + bool free_buf = true; + + if (!pdev) + return true; + + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode && !pdev->bpr_enable) + return free_buf; + + ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); + + if (pdev->mgmtctrl_frm_info.mgmt_buf) { + if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv + (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) != + QDF_STATUS_SUCCESS) + free_buf = false; + } + + if (ppdu_info) + dp_ppdu_desc_deliver(pdev, ppdu_info); + + pdev->mgmtctrl_frm_info.mgmt_buf = NULL; + pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; + pdev->mgmtctrl_frm_info.ppdu_id = 0; + + return free_buf; +} +#else +static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, + uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) +{ + return true; +} +#endif +#endif + +/** + * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats + * @soc: DP SOC handle + * @htt_t2h_msg: HTT message nbuf + * + * return:void + */ +static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, + qdf_nbuf_t htt_t2h_msg) +{ + uint8_t done; + qdf_nbuf_t msg_copy; + uint32_t *msg_word; + + msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); + msg_word = msg_word + 3; + done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); + + /* + * HTT EXT stats response comes as stream of TLVs which span over + * multiple T2H messages. + * The first message will carry length of the response. + * For rest of the messages length will be zero. + * + * Clone the T2H message buffer and store it in a list to process + * it later. + * + * The original T2H message buffers gets freed in the T2H HTT event + * handler + */ + msg_copy = qdf_nbuf_clone(htt_t2h_msg); + + if (!msg_copy) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "T2H messge clone failed for HTT EXT STATS"); + goto error; + } + + qdf_spin_lock_bh(&soc->htt_stats.lock); + qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy); + /* + * Done bit signifies that this is the last T2H buffer in the stream of + * HTT EXT STATS message + */ + if (done) { + soc->htt_stats.num_stats++; + qdf_sched_work(0, &soc->htt_stats.work); + } + qdf_spin_unlock_bh(&soc->htt_stats.lock); + + return; + +error: + qdf_spin_lock_bh(&soc->htt_stats.lock); + while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) + != NULL) { + qdf_nbuf_free(msg_copy); + } + soc->htt_stats.num_stats = 0; + qdf_spin_unlock_bh(&soc->htt_stats.lock); + return; + +} + +/* + * htt_soc_attach_target() - SOC level HTT setup + * @htt_soc: HTT SOC handle + * + * Return: 0 on success; error code on failure + */ +int htt_soc_attach_target(struct htt_soc *htt_soc) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + + return htt_h2t_ver_req_msg(soc); +} + +void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc) +{ + htt_soc->htc_soc = htc_soc; +} + +HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc) +{ + return htt_soc->htc_soc; +} + +struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle) +{ + int i; + int j; + int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long); + struct htt_soc *htt_soc = NULL; + + htt_soc = qdf_mem_malloc(sizeof(*htt_soc)); + if (!htt_soc) { + dp_err("HTT attach failed"); + return NULL; + } + + for (i = 0; i < MAX_PDEV_CNT; i++) { + htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size); + if (!htt_soc->pdevid_tt[i].umac_ttt) + break; + qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1); + htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size); + if (!htt_soc->pdevid_tt[i].lmac_ttt) { + qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt); + break; + } + qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1); + } + if (i != MAX_PDEV_CNT) { + for (j = 0; j < i; j++) { + qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt); + qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt); + } + return NULL; + } + + htt_soc->dp_soc = soc; + htt_soc->htc_soc = htc_handle; + HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex); + + return htt_soc; +} + +#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) +/* + * dp_ppdu_stats_ind_handler() - PPDU stats msg handler + * @htt_soc: HTT SOC handle + * @msg_word: Pointer to payload + * @htt_t2h_msg: HTT msg nbuf + * + * Return: True if buffer should be freed by caller. + */ +static bool +dp_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + u_int8_t pdev_id; + u_int8_t target_pdev_id; + bool free_buf; + qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE); + target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); + pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, + target_pdev_id); + free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, + htt_t2h_msg); + dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, + htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, + pdev_id); + return free_buf; +} +#else +static bool +dp_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + return true; +} +#endif + +#if defined(WDI_EVENT_ENABLE) && \ + !defined(REMOVE_PKT_LOG) +/* + * dp_pktlog_msg_handler() - Pktlog msg handler + * @htt_soc: HTT SOC handle + * @msg_word: Pointer to payload + * + * Return: None + */ +static void +dp_pktlog_msg_handler(struct htt_soc *soc, + uint32_t *msg_word) +{ + uint8_t pdev_id; + uint8_t target_pdev_id; + uint32_t *pl_hdr; + + target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word); + pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, + target_pdev_id); + pl_hdr = (msg_word + 1); + dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, + pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL, + pdev_id); +} +#else +static void +dp_pktlog_msg_handler(struct htt_soc *soc, + uint32_t *msg_word) +{ +} +#endif + +/* + * time_allow_print() - time allow print + * @htt_ring_tt: ringi_id array of timestamps + * @ring_id: ring_id (index) + * + * Return: 1 for successfully saving timestamp in array + * and 0 for timestamp falling within 2 seconds after last one + */ +static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id) +{ + unsigned long tstamp; + unsigned long delta; + + tstamp = qdf_get_system_timestamp(); + + if (!htt_ring_tt) + return 0; //unable to print backpressure messages + + if (htt_ring_tt[ring_id] == -1) { + htt_ring_tt[ring_id] = tstamp; + return 1; + } + delta = tstamp - htt_ring_tt[ring_id]; + if (delta >= 2000) { + htt_ring_tt[ring_id] = tstamp; + return 1; + } + + return 0; +} + +static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type, + u_int8_t pdev_id, u_int8_t ring_id, + u_int16_t hp_idx, u_int16_t tp_idx, + u_int32_t bkp_time, char *ring_stype) +{ + dp_alert("msg_type: %d pdev_id: %d ring_type: %s ", + msg_type, pdev_id, ring_stype); + dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ", + ring_id, hp_idx, tp_idx, bkp_time); +} + +/* + * dp_htt_bkp_event_alert() - htt backpressure event alert + * @msg_word: htt packet context + * @htt_soc: HTT SOC handle + * + * Return: after attempting to print stats + */ +static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc) +{ + u_int8_t ring_type; + u_int8_t pdev_id; + uint8_t target_pdev_id; + u_int8_t ring_id; + u_int16_t hp_idx; + u_int16_t tp_idx; + u_int32_t bkp_time; + enum htt_t2h_msg_type msg_type; + struct dp_soc *dpsoc; + struct dp_pdev *pdev; + struct dp_htt_timestamp *radio_tt; + + if (!soc) + return; + + dpsoc = (struct dp_soc *)soc->dp_soc; + msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); + ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word); + target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word); + pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, + target_pdev_id); + pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id]; + ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word); + hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1)); + tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1)); + bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2)); + radio_tt = &soc->pdevid_tt[pdev_id]; + + switch (ring_type) { + case HTT_SW_RING_TYPE_UMAC: + if (!time_allow_print(radio_tt->umac_ttt, ring_id)) + return; + dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx, + bkp_time, "HTT_SW_RING_TYPE_UMAC"); + break; + case HTT_SW_RING_TYPE_LMAC: + if (!time_allow_print(radio_tt->lmac_ttt, ring_id)) + return; + dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx, + bkp_time, "HTT_SW_RING_TYPE_LMAC"); + break; + default: + dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx, + bkp_time, "UNKNOWN"); + break; + } + + dp_print_ring_stats(pdev); + dp_print_napi_stats(pdev->soc); +} + +/* + * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler + * @context: Opaque context (HTT SOC handle) + * @pkt: HTC packet + */ +static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) +{ + struct htt_soc *soc = (struct htt_soc *) context; + qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; + u_int32_t *msg_word; + enum htt_t2h_msg_type msg_type; + bool free_buf = true; + + /* check for successful message reception */ + if (pkt->Status != QDF_STATUS_SUCCESS) { + if (pkt->Status != QDF_STATUS_E_CANCELED) + soc->stats.htc_err_cnt++; + + qdf_nbuf_free(htt_t2h_msg); + return; + } + + /* TODO: Check if we should pop the HTC/HTT header alignment padding */ + + msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); + msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); + htt_event_record(soc->htt_logger_handle, + msg_type, (uint8_t *)msg_word); + switch (msg_type) { + case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: + { + dp_htt_bkp_event_alert(msg_word, soc); + break; + } + case HTT_T2H_MSG_TYPE_PEER_MAP: + { + u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; + u_int8_t *peer_mac_addr; + u_int16_t peer_id; + u_int16_t hw_peer_id; + u_int8_t vdev_id; + u_int8_t is_wds; + struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc; + + peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); + hw_peer_id = + HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); + vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); + peer_mac_addr = htt_t2h_mac_addr_deswizzle( + (u_int8_t *) (msg_word+1), + &mac_addr_deswizzle_buf[0]); + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", + peer_id, vdev_id); + + /* + * check if peer already exists for this peer_id, if so + * this peer map event is in response for a wds peer add + * wmi command sent during wds source port learning. + * in this case just add the ast entry to the existing + * peer ast_list. + */ + is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]); + dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, + vdev_id, peer_mac_addr, 0, + is_wds); + break; + } + case HTT_T2H_MSG_TYPE_PEER_UNMAP: + { + u_int16_t peer_id; + u_int8_t vdev_id; + u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0}; + peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); + vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word); + + dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, + vdev_id, mac_addr, 0); + break; + } + case HTT_T2H_MSG_TYPE_SEC_IND: + { + u_int16_t peer_id; + enum cdp_sec_type sec_type; + int is_unicast; + + peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); + sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); + is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); + /* point to the first part of the Michael key */ + msg_word++; + dp_rx_sec_ind_handler( + soc->dp_soc, peer_id, sec_type, is_unicast, + msg_word, msg_word + 2); + break; + } + + case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: + { + free_buf = dp_ppdu_stats_ind_handler(soc, msg_word, + htt_t2h_msg); + break; + } + + case HTT_T2H_MSG_TYPE_PKTLOG: + { + dp_pktlog_msg_handler(soc, msg_word); + break; + } + + case HTT_T2H_MSG_TYPE_VERSION_CONF: + { + /* + * HTC maintains runtime pm count for H2T messages that + * have a response msg from FW. This count ensures that + * in the case FW does not sent out the response or host + * did not process this indication runtime_put happens + * properly in the cleanup path. + */ + if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0) + htc_pm_runtime_put(soc->htc_soc); + else + soc->stats.htt_ver_req_put_skip++; + soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); + soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, + "target uses HTT version %d.%d; host uses %d.%d", + soc->tgt_ver.major, soc->tgt_ver.minor, + HTT_CURRENT_VERSION_MAJOR, + HTT_CURRENT_VERSION_MINOR); + if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_WARN, + "*** Incompatible host/target HTT versions!"); + } + /* abort if the target is incompatible with the host */ + qdf_assert(soc->tgt_ver.major == + HTT_CURRENT_VERSION_MAJOR); + if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO_LOW, + "*** Warning: host/target HTT versions" + " are different, though compatible!"); + } + break; + } + case HTT_T2H_MSG_TYPE_RX_ADDBA: + { + uint16_t peer_id; + uint8_t tid; + uint8_t win_sz; + uint16_t status; + struct dp_peer *peer; + + /* + * Update REO Queue Desc with new values + */ + peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); + tid = HTT_RX_ADDBA_TID_GET(*msg_word); + win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); + peer = dp_peer_find_by_id(soc->dp_soc, peer_id); + + /* + * Window size needs to be incremented by 1 + * since fw needs to represent a value of 256 + * using just 8 bits + */ + if (peer) { + status = dp_addba_requestprocess_wifi3( + (struct cdp_soc_t *)soc->dp_soc, + peer->mac_addr.raw, peer->vdev->vdev_id, + 0, tid, 0, win_sz + 1, 0xffff); + + /* + * If PEER_LOCK_REF_PROTECT enbled dec ref + * which is inc by dp_peer_find_by_id + */ + dp_peer_unref_del_find_by_id(peer); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + FL("PeerID %d BAW %d TID %d stat %d"), + peer_id, win_sz, tid, status); + + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("Peer not found peer id %d"), + peer_id); + } + break; + } + case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: + { + dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg); + break; + } + case HTT_T2H_MSG_TYPE_PEER_MAP_V2: + { + u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; + u_int8_t *peer_mac_addr; + u_int16_t peer_id; + u_int16_t hw_peer_id; + u_int8_t vdev_id; + bool is_wds; + u_int16_t ast_hash; + struct dp_ast_flow_override_info ast_flow_info; + + qdf_mem_set(&ast_flow_info, 0, + sizeof(struct dp_ast_flow_override_info)); + + peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word); + hw_peer_id = + HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2)); + vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word); + peer_mac_addr = + htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), + &mac_addr_deswizzle_buf[0]); + is_wds = + HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3)); + ast_hash = + HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3)); + /* + * Update 4 ast_index per peer, ast valid mask + * and TID flow valid mask. + * AST valid mask is 3 bit field corresponds to + * ast_index[3:1]. ast_index 0 is always valid. + */ + ast_flow_info.ast_valid_mask = + HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3)); + ast_flow_info.ast_idx[0] = hw_peer_id; + ast_flow_info.ast_flow_mask[0] = + HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4)); + ast_flow_info.ast_idx[1] = + HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4)); + ast_flow_info.ast_flow_mask[1] = + HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4)); + ast_flow_info.ast_idx[2] = + HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5)); + ast_flow_info.ast_flow_mask[2] = + HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4)); + ast_flow_info.ast_idx[3] = + HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6)); + ast_flow_info.ast_flow_mask[3] = + HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4)); + /* + * TID valid mask is applicable only + * for HI and LOW priority flows. + * tid_valid_mas is 8 bit field corresponds + * to TID[7:0] + */ + ast_flow_info.tid_valid_low_pri_mask = + HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5)); + ast_flow_info.tid_valid_hi_pri_mask = + HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5)); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", + peer_id, vdev_id); + + dp_rx_peer_map_handler(soc->dp_soc, peer_id, + hw_peer_id, vdev_id, + peer_mac_addr, ast_hash, + is_wds); + + /* + * Update ast indexes for flow override support + * Applicable only for non wds peers + */ + dp_peer_ast_index_flow_queue_map_create( + soc->dp_soc, is_wds, + peer_id, peer_mac_addr, + &ast_flow_info); + + break; + } + case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2: + { + u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE]; + u_int8_t *mac_addr; + u_int16_t peer_id; + u_int8_t vdev_id; + u_int8_t is_wds; + + peer_id = + HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word); + vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word); + mac_addr = + htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1), + &mac_addr_deswizzle_buf[0]); + is_wds = + HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2)); + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n", + peer_id, vdev_id); + + dp_rx_peer_unmap_handler(soc->dp_soc, peer_id, + vdev_id, mac_addr, + is_wds); + break; + } + case HTT_T2H_MSG_TYPE_RX_DELBA: + { + uint16_t peer_id; + uint8_t tid; + uint8_t win_sz; + QDF_STATUS status; + + peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word); + tid = HTT_RX_DELBA_TID_GET(*msg_word); + win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word); + + status = dp_rx_delba_ind_handler( + soc->dp_soc, + peer_id, tid, win_sz); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + FL("DELBA PeerID %d BAW %d TID %d stat %d"), + peer_id, win_sz, tid, status); + break; + } + default: + break; + }; + + /* Free the indication buffer */ + if (free_buf) + qdf_nbuf_free(htt_t2h_msg); +} + +/* + * dp_htt_h2t_full() - Send full handler (called from HTC) + * @context: Opaque context (HTT SOC handle) + * @pkt: HTC packet + * + * Return: enum htc_send_full_action + */ +static enum htc_send_full_action +dp_htt_h2t_full(void *context, HTC_PACKET *pkt) +{ + return HTC_SEND_FULL_KEEP; +} + +/* + * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages + * @context: Opaque context (HTT SOC handle) + * @nbuf: nbuf containing T2H message + * @pipe_id: HIF pipe ID + * + * Return: QDF_STATUS + * + * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which + * will be used for packet log and other high-priority HTT messages. Proper + * HTC connection to be added later once required FW changes are available + */ +static QDF_STATUS +dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id) +{ + QDF_STATUS rc = QDF_STATUS_SUCCESS; + HTC_PACKET htc_pkt; + + qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE); + qdf_mem_zero(&htc_pkt, sizeof(htc_pkt)); + htc_pkt.Status = QDF_STATUS_SUCCESS; + htc_pkt.pPktContext = (void *)nbuf; + dp_htt_t2h_msg_handler(context, &htc_pkt); + + return rc; +} + +/* + * htt_htc_soc_attach() - Register SOC level HTT instance with HTC + * @htt_soc: HTT SOC handle + * + * Return: QDF_STATUS + */ +static QDF_STATUS +htt_htc_soc_attach(struct htt_soc *soc) +{ + struct htc_service_connect_req connect; + struct htc_service_connect_resp response; + QDF_STATUS status; + struct dp_soc *dpsoc = soc->dp_soc; + + qdf_mem_zero(&connect, sizeof(connect)); + qdf_mem_zero(&response, sizeof(response)); + + connect.pMetaData = NULL; + connect.MetaDataLength = 0; + connect.EpCallbacks.pContext = soc; + connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; + connect.EpCallbacks.EpTxCompleteMultiple = NULL; + connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; + + /* rx buffers currently are provided by HIF, not by EpRecvRefill */ + connect.EpCallbacks.EpRecvRefill = NULL; + + /* N/A, fill is done by HIF */ + connect.EpCallbacks.RecvRefillWaterMark = 1; + + connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; + /* + * Specify how deep to let a queue get before htc_send_pkt will + * call the EpSendFull function due to excessive send queue depth. + */ + connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; + + /* disable flow control for HTT data message service */ + connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + + /* connect to control service */ + connect.service_id = HTT_DATA_MSG_SVC; + + status = htc_connect_service(soc->htc_soc, &connect, &response); + + if (status != QDF_STATUS_SUCCESS) + return status; + + soc->htc_endpoint = response.Endpoint; + + hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint); + + htt_interface_logging_init(&soc->htt_logger_handle); + dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc, + dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE); + + return QDF_STATUS_SUCCESS; /* success */ +} + +/* + * htt_soc_initialize() - SOC level HTT initialization + * @htt_soc: Opaque htt SOC handle + * @ctrl_psoc: Opaque ctrl SOC handle + * @htc_soc: SOC level HTC handle + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF device + * + * Return: HTT handle on success; NULL on failure + */ +void * +htt_soc_initialize(struct htt_soc *htt_soc, + struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + HTC_HANDLE htc_soc, + hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + + soc->osdev = osdev; + soc->ctrl_psoc = ctrl_psoc; + soc->htc_soc = htc_soc; + soc->hal_soc = hal_soc_hdl; + + if (htt_htc_soc_attach(soc)) + goto fail2; + + return soc; + +fail2: + return NULL; +} + +void htt_soc_htc_dealloc(struct htt_soc *htt_handle) +{ + htt_interface_logging_deinit(htt_handle->htt_logger_handle); + htt_htc_misc_pkt_pool_free(htt_handle); + htt_htc_pkt_pool_free(htt_handle); +} + +/* + * htt_soc_htc_prealloc() - HTC memory prealloc + * @htt_soc: SOC level HTT handle + * + * Return: QDF_STATUS_SUCCESS on Success or + * QDF_STATUS_E_NOMEM on allocation failure + */ +QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc) +{ + int i; + + soc->htt_htc_pkt_freelist = NULL; + /* pre-allocate some HTC_PACKET objects */ + for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { + struct dp_htt_htc_pkt_union *pkt; + pkt = qdf_mem_malloc(sizeof(*pkt)); + if (!pkt) + return QDF_STATUS_E_NOMEM; + + htt_htc_pkt_free(soc, &pkt->u.pkt); + } + return QDF_STATUS_SUCCESS; +} + +/* + * htt_soc_detach() - Free SOC level HTT handle + * @htt_hdl: HTT SOC handle + */ +void htt_soc_detach(struct htt_soc *htt_hdl) +{ + int i; + struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl; + + for (i = 0; i < MAX_PDEV_CNT; i++) { + qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt); + qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt); + } + + HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex); + qdf_mem_free(htt_handle); + +} + +/** + * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW + * @pdev: DP PDEV handle + * @stats_type_upload_mask: stats type requested by user + * @config_param_0: extra configuration parameters + * @config_param_1: extra configuration parameters + * @config_param_2: extra configuration parameters + * @config_param_3: extra configuration parameters + * @mac_id: mac number + * + * return: QDF STATUS + */ +QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint32_t config_param_0, + uint32_t config_param_1, uint32_t config_param_2, + uint32_t config_param_3, int cookie_val, int cookie_msb, + uint8_t mac_id) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + uint32_t *msg_word; + uint8_t pdev_mask = 0; + uint8_t *htt_logger_bufp; + int mac_for_pdev; + int target_pdev_id; + QDF_STATUS status; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ), + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + + if (!msg) + return QDF_STATUS_E_NOMEM; + + /*TODO:Add support for SOC stats + * Bit 0: SOC Stats + * Bit 1: Pdev stats for pdev id 0 + * Bit 2: Pdev stats for pdev id 1 + * Bit 3: Pdev stats for pdev id 2 + */ + mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); + target_pdev_id = + dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev); + + pdev_mask = 1 << target_pdev_id; + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Failed to expand head for HTT_EXT_STATS"); + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n" + "config_param_1 %u\n config_param_2 %u\n" + "config_param_4 %u\n -------------", + __func__, __LINE__, cookie_val, config_param_0, + config_param_1, config_param_2, config_param_3); + + msg_word = (uint32_t *) qdf_nbuf_data(msg); + + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + htt_logger_bufp = (uint8_t *)msg_word; + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ); + HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask); + HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask); + + /* word 1 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0); + + /* word 2 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1); + + /* word 3 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2); + + /* word 4 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3); + + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0); + + /* word 5 */ + msg_word++; + + /* word 6 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val); + + /* word 7 */ + msg_word++; + *msg_word = 0; + /*Using last 2 bits for pdev_id */ + cookie_msb = ((cookie_msb << 2) | pdev->pdev_id); + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_nbuf_free(msg); + return QDF_STATUS_E_NOMEM; + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), qdf_nbuf_len(msg), + soc->htc_endpoint, + /* tag for FW response msg not guaranteed */ + HTC_TX_PACKET_TAG_RUNTIME_PUT); + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ, + htt_logger_bufp); + + if (status != QDF_STATUS_SUCCESS) { + qdf_nbuf_free(msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; +} + +/* This macro will revert once proper HTT header will define for + * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file + * */ +#if defined(WDI_EVENT_ENABLE) +/** + * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW + * @pdev: DP PDEV handle + * @stats_type_upload_mask: stats type requested by user + * @mac_id: Mac id number + * + * return: QDF STATUS + */ +QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint8_t mac_id) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + uint32_t *msg_word; + uint8_t pdev_mask; + QDF_STATUS status; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ), + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); + + if (!msg) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"); + qdf_assert(0); + return QDF_STATUS_E_NOMEM; + } + + /*TODO:Add support for SOC stats + * Bit 0: SOC Stats + * Bit 1: Pdev stats for pdev id 0 + * Bit 2: Pdev stats for pdev id 1 + * Bit 3: Pdev stats for pdev id 2 + */ + pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, + mac_id); + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Failed to expand head for HTT_CFG_STATS"); + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + + msg_word = (uint32_t *) qdf_nbuf_data(msg); + + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); + HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask); + HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word, + stats_type_upload_mask); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Fail to allocate dp_htt_htc_pkt buffer"); + qdf_assert(0); + qdf_nbuf_free(msg); + return QDF_STATUS_E_NOMEM; + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), qdf_nbuf_len(msg), + soc->htc_endpoint, + /* tag for no FW response msg */ + HTC_TX_PACKET_TAG_RUNTIME_PUT); + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG, + (uint8_t *)msg_word); + + if (status != QDF_STATUS_SUCCESS) { + qdf_nbuf_free(msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; +} +#endif + +void +dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, + uint32_t *tag_buf) +{ + switch (tag_type) { + case HTT_STATS_PEER_DETAILS_TAG: + { + htt_peer_details_tlv *dp_stats_buf = + (htt_peer_details_tlv *)tag_buf; + + pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id; + } + break; + case HTT_STATS_PEER_STATS_CMN_TAG: + { + htt_peer_stats_cmn_tlv *dp_stats_buf = + (htt_peer_stats_cmn_tlv *)tag_buf; + + struct dp_peer *peer = dp_peer_find_by_id(pdev->soc, + pdev->fw_stats_peer_id); + + if (peer && !peer->bss_peer) { + peer->stats.tx.inactive_time = + dp_stats_buf->inactive_time; + qdf_event_set(&pdev->fw_peer_stats_event); + } + if (peer) + dp_peer_unref_del_find_by_id(peer); + } + break; + default: + qdf_err("Invalid tag_type"); + } +} + +/** + * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW + * @pdev: DP pdev handle + * @fse_setup_info: FST setup parameters + * + * Return: Success when HTT message is sent, error on failure + */ +QDF_STATUS +dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev, + struct dp_htt_rx_flow_fst_setup *fse_setup_info) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + u_int32_t *msg_word; + struct htt_h2t_msg_rx_fse_setup_t *fse_setup; + uint8_t *htt_logger_bufp; + u_int32_t *key; + QDF_STATUS status; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + + if (!msg) + return QDF_STATUS_E_NOMEM; + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (!qdf_nbuf_put_tail(msg, + sizeof(struct htt_h2t_msg_rx_fse_setup_t))) { + qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg"); + return QDF_STATUS_E_FAILURE; + } + + /* fill in the message contents */ + msg_word = (u_int32_t *)qdf_nbuf_data(msg); + + memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t)); + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + htt_logger_bufp = (uint8_t *)msg_word; + + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG); + + fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word; + + HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id); + + msg_word++; + HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries); + HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search); + HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word, + fse_setup_info->ip_da_sa_prefix); + + msg_word++; + HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word, + fse_setup_info->base_addr_lo); + msg_word++; + HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word, + fse_setup_info->base_addr_hi); + + key = (u_int32_t *)fse_setup_info->hash_key; + fse_setup->toeplitz31_0 = *key++; + fse_setup->toeplitz63_32 = *key++; + fse_setup->toeplitz95_64 = *key++; + fse_setup->toeplitz127_96 = *key++; + fse_setup->toeplitz159_128 = *key++; + fse_setup->toeplitz191_160 = *key++; + fse_setup->toeplitz223_192 = *key++; + fse_setup->toeplitz255_224 = *key++; + fse_setup->toeplitz287_256 = *key++; + fse_setup->toeplitz314_288 = *key; + + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224); + msg_word++; + HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256); + msg_word++; + HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word, + fse_setup->toeplitz314_288); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); + qdf_assert(0); + qdf_nbuf_free(msg); + return QDF_STATUS_E_RESOURCES; /* failure */ + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX( + &pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), + qdf_nbuf_len(msg), + soc->htc_endpoint, + HTC_TX_PACKET_TAG_RUNTIME_PUT); + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + + status = DP_HTT_SEND_HTC_PKT(soc, pkt, + HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG, + htt_logger_bufp); + + if (status == QDF_STATUS_SUCCESS) { + dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u", + fse_setup_info->pdev_id); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, + (void *)fse_setup_info->hash_key, + fse_setup_info->hash_key_len); + } else { + qdf_nbuf_free(msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; +} + +/** + * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to + * add/del a flow in HW + * @pdev: DP pdev handle + * @fse_op_info: Flow entry parameters + * + * Return: Success when HTT message is sent, error on failure + */ +QDF_STATUS +dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev, + struct dp_htt_rx_flow_fst_operation *fse_op_info) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + u_int32_t *msg_word; + struct htt_h2t_msg_rx_fse_operation_t *fse_operation; + uint8_t *htt_logger_bufp; + QDF_STATUS status; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!msg) + return QDF_STATUS_E_NOMEM; + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (!qdf_nbuf_put_tail(msg, + sizeof(struct htt_h2t_msg_rx_fse_operation_t))) { + qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + + /* fill in the message contents */ + msg_word = (u_int32_t *)qdf_nbuf_data(msg); + + memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t)); + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + htt_logger_bufp = (uint8_t *)msg_word; + + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG); + + fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word; + + HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id); + msg_word++; + HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false); + if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) { + HTT_RX_FSE_OPERATION_SET(*msg_word, + HTT_RX_FSE_CACHE_INVALIDATE_ENTRY); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64)); + msg_word++; + HTT_RX_FSE_OPERATION_IP_ADDR_SET( + *msg_word, + qdf_htonl( + fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96)); + msg_word++; + HTT_RX_FSE_SOURCEPORT_SET( + *msg_word, + fse_op_info->rx_flow->flow_tuple_info.src_port); + HTT_RX_FSE_DESTPORT_SET( + *msg_word, + fse_op_info->rx_flow->flow_tuple_info.dest_port); + msg_word++; + HTT_RX_FSE_L4_PROTO_SET( + *msg_word, + fse_op_info->rx_flow->flow_tuple_info.l4_protocol); + } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) { + HTT_RX_FSE_OPERATION_SET(*msg_word, + HTT_RX_FSE_CACHE_INVALIDATE_FULL); + } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) { + HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE); + } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) { + HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE); + } + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); + qdf_assert(0); + qdf_nbuf_free(msg); + return QDF_STATUS_E_RESOURCES; /* failure */ + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX( + &pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), + qdf_nbuf_len(msg), + soc->htc_endpoint, + HTC_TX_PACKET_TAG_RUNTIME_PUT); + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + + status = DP_HTT_SEND_HTC_PKT(soc, pkt, + HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG, + htt_logger_bufp); + + if (status == QDF_STATUS_SUCCESS) { + dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u", + fse_op_info->pdev_id); + } else { + qdf_nbuf_free(msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; +} + +/** + * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA + * @pdev: DP pdev handle + * @fse_op_info: Flow entry parameters + * + * Return: Success when HTT message is sent, error on failure + */ +QDF_STATUS +dp_htt_rx_fisa_config(struct dp_pdev *pdev, + struct dp_htt_rx_fisa_cfg *fisa_config) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + u_int32_t *msg_word; + struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config; + uint8_t *htt_logger_bufp; + uint32_t len; + QDF_STATUS status; + + len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t)); + + msg = qdf_nbuf_alloc(soc->osdev, + len, + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, + 4, + TRUE); + if (!msg) + return QDF_STATUS_E_NOMEM; + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (!qdf_nbuf_put_tail(msg, + sizeof(struct htt_h2t_msg_type_fisa_config_t))) { + qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg"); + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + + /* fill in the message contents */ + msg_word = (u_int32_t *)qdf_nbuf_data(msg); + + memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t)); + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + htt_logger_bufp = (uint8_t *)msg_word; + + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG); + + htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word; + + HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id); + + msg_word++; + HTT_RX_FISA_CONFIG_FISA_ENABLE_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_IPSEC_SKIP_SEARCH_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_NON_TCP_SKIP_SEARCH_SET(*msg_word, 0); + HTT_RX_FISA_CONFIG_ADD_IPV4_FIXED_HDR_LEN_SET(*msg_word, 0); + HTT_RX_FISA_CONFIG_ADD_IPV6_FIXED_HDR_LEN_SET(*msg_word, 0); + HTT_RX_FISA_CONFIG_ADD_TCP_FIXED_HDR_LEN_SET(*msg_word, 0); + HTT_RX_FISA_CONFIG_ADD_UDP_HDR_LEN_SET(*msg_word, 0); + HTT_RX_FISA_CONFIG_CHKSUM_CUM_IP_LEN_EN_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_DISABLE_TID_CHECK_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_DISABLE_TA_CHECK_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_DISABLE_QOS_CHECK_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_DISABLE_RAW_CHECK_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_DISABLE_DECRYPT_ERR_CHECK_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_DISABLE_MSDU_DROP_CHECK_SET(*msg_word, 1); + HTT_RX_FISA_CONFIG_FISA_AGGR_LIMIT_SET(*msg_word, 0xf); + + msg_word++; + htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout; + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_err("Fail to allocate dp_htt_htc_pkt buffer"); + qdf_assert(0); + qdf_nbuf_free(msg); + return QDF_STATUS_E_RESOURCES; /* failure */ + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), + qdf_nbuf_len(msg), + soc->htc_endpoint, + HTC_TX_PACKET_TAG_RUNTIME_PUT); + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + + status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG, + htt_logger_bufp); + + if (status == QDF_STATUS_SUCCESS) { + dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u", + fisa_config->pdev_id); + } else { + qdf_nbuf_free(msg); + htt_htc_pkt_free(soc, pkt); + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h new file mode 100644 index 0000000000000000000000000000000000000000..6843904df22655e9a5e9c72e3a7991dd6e8f2754 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_HTT_H_ +#define _DP_HTT_H_ + +#include +#include +#include +#include + +#include "cdp_txrx_cmn_struct.h" +#include "dp_types.h" +#ifdef HTT_LOGGER +#include "dp_htt_logger.h" +#else +struct htt_logger; +static inline +void htt_interface_logging_init(struct htt_logger **htt_logger_handle) +{ +} + +static inline +void htt_interface_logging_deinit(struct htt_logger *htt_logger_handle) +{ +} + +static inline +int htt_command_record(struct htt_logger *h, uint8_t msg_type, + uint8_t *msg_data) +{ + return 0; +} + +static inline +int htt_event_record(struct htt_logger *h, uint8_t msg_type, + uint8_t *msg_data) +{ + return 0; +} + +static inline +int htt_wbm_event_record(struct htt_logger *h, uint8_t tx_status, + uint8_t *msg_data) +{ + return 0; +} + +#endif + +#define HTT_TX_MUTEX_TYPE qdf_spinlock_t + +#define HTT_TX_MUTEX_INIT(_mutex) \ + qdf_spinlock_create(_mutex) + +#define HTT_TX_MUTEX_ACQUIRE(_mutex) \ + qdf_spin_lock_bh(_mutex) + +#define HTT_TX_MUTEX_RELEASE(_mutex) \ + qdf_spin_unlock_bh(_mutex) + +#define HTT_TX_MUTEX_DESTROY(_mutex) \ + qdf_spinlock_destroy(_mutex) + +#define DP_HTT_MAX_SEND_QUEUE_DEPTH 64 + +#ifndef HTT_MAC_ADDR_LEN +#define HTT_MAC_ADDR_LEN 6 +#endif + +#define HTT_FRAMECTRL_TYPE_MASK 0x0C +#define HTT_GET_FRAME_CTRL_TYPE(_val) \ + (((_val) & HTT_FRAMECTRL_TYPE_MASK) >> 2) +#define FRAME_CTRL_TYPE_MGMT 0x0 +#define FRAME_CTRL_TYPE_CTRL 0x1 +#define FRAME_CTRL_TYPE_DATA 0x2 +#define FRAME_CTRL_TYPE_RESV 0x3 + +#define HTT_FRAMECTRL_DATATYPE 0x08 +#define HTT_PPDU_DESC_MAX_DEPTH 16 +#define DP_SCAN_PEER_ID 0xFFFF + +#define HTT_RX_DELBA_WIN_SIZE_M 0x0000FC00 +#define HTT_RX_DELBA_WIN_SIZE_S 10 + +#define HTT_RX_DELBA_WIN_SIZE_GET(word) \ + (((word) & HTT_RX_DELBA_WIN_SIZE_M) >> HTT_RX_DELBA_WIN_SIZE_S) + +/* + * Set the base misclist size to HTT copy engine source ring size + * to guarantee that a packet on the misclist wont be freed while it + * is sitting in the copy engine. + */ +#define DP_HTT_HTC_PKT_MISCLIST_SIZE 2048 +#define HTT_T2H_MAX_MSG_SIZE 2048 + +#define HTT_T2H_EXT_STATS_TLV_START_OFFSET 3 + +/* + * Below offset are based on htt_ppdu_stats_common_tlv + * defined in htt_ppdu_stats.h + */ +#define HTT_PPDU_STATS_COMMON_TLV_TLV_HDR_OFFSET 0 +#define HTT_PPDU_STATS_COMMON_TLV_PPDU_ID_OFFSET 1 +#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_SCH_CMD_ID_OFFSET 2 +#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_FRM_TYPE_OFFSET 3 +#define HTT_PPDU_STATS_COMMON_TLV_CHAIN_MASK_OFFSET 4 +#define HTT_PPDU_STATS_COMMON_TLV_FES_DUR_US_OFFSET 5 +#define HTT_PPDU_STATS_COMMON_TLV_SCH_EVAL_START_TSTMP_L32_US_OFFSET 6 +#define HTT_PPDU_STATS_COMMON_TLV_SCH_END_TSTMP_US_OFFSET 7 +#define HTT_PPDU_STATS_COMMON_TLV_START_TSTMP_L32_US_OFFSET 8 +#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_PHY_MODE_OFFSET 9 +#define HTT_PPDU_STATS_COMMON_TLV_CCA_DELTA_TIME_US_OFFSET 10 +#define HTT_PPDU_STATS_COMMON_TLV_RXFRM_DELTA_TIME_US_OFFSET 11 +#define HTT_PPDU_STATS_COMMON_TLV_TXFRM_DELTA_TIME_US_OFFSET 12 +#define HTT_PPDU_STATS_COMMON_TLV_RESV_NUM_UL_BEAM_OFFSET 13 +#define HTT_PPDU_STATS_COMMON_TLV_START_TSTMP_U32_US_OFFSET 14 + +/* get index for field in htt_ppdu_stats_common_tlv */ +#define HTT_GET_STATS_CMN_INDEX(index) \ + HTT_PPDU_STATS_COMMON_TLV_##index##_OFFSET + +struct dp_htt_htc_pkt { + void *soc_ctxt; + qdf_dma_addr_t nbuf_paddr; + HTC_PACKET htc_pkt; +}; + +struct dp_htt_htc_pkt_union { + union { + struct dp_htt_htc_pkt pkt; + struct dp_htt_htc_pkt_union *next; + } u; +}; + +struct dp_htt_timestamp { + long *umac_ttt; + long *lmac_ttt; +}; + +struct htt_soc { + struct cdp_ctrl_objmgr_psoc *ctrl_psoc; + struct dp_soc *dp_soc; + hal_soc_handle_t hal_soc; + struct dp_htt_timestamp pdevid_tt[MAX_PDEV_CNT]; + /* htt_logger handle */ + struct htt_logger *htt_logger_handle; + HTC_HANDLE htc_soc; + qdf_device_t osdev; + HTC_ENDPOINT_ID htc_endpoint; + struct dp_htt_htc_pkt_union *htt_htc_pkt_freelist; + struct dp_htt_htc_pkt_union *htt_htc_pkt_misclist; + struct { + u_int8_t major; + u_int8_t minor; + } tgt_ver; + struct { + u_int8_t major; + u_int8_t minor; + } wifi_ip_ver; + + struct { + int htc_err_cnt; + int htc_pkt_free; + /* rtpm put skip count for ver req msg */ + int htt_ver_req_put_skip; + } stats; + + HTT_TX_MUTEX_TYPE htt_tx_mutex; +}; + +/** + * struct htt_rx_ring_tlv_filter - Rx ring TLV filter + * enable/disable. + * @mpdu_start: enable/disable MPDU start TLV + * @msdu_start: enable/disable MSDU start TLV + * @packet: enable/disable PACKET TLV + * @msdu_end: enable/disable MSDU end TLV + * @mpdu_end: enable/disable MPDU end TLV + * @packet_header: enable/disable PACKET header TLV + * @attention: enable/disable ATTENTION TLV + * @ppdu_start: enable/disable PPDU start TLV + * @ppdu_end: enable/disable PPDU end TLV + * @ppdu_end_user_stats: enable/disable PPDU user stats TLV + * @ppdu_end_user_stats_ext: enable/disable PPDU user stats ext TLV + * @ppdu_end_status_done: enable/disable PPDU end status done TLV + * @enable_fp: enable/disable FP packet + * @enable_md: enable/disable MD packet + * @enable_mo: enable/disable MO packet + * @enable_mgmt: enable/disable MGMT packet + * @enable_ctrl: enable/disable CTRL packet + * @enable_data: enable/disable DATA packet + * @offset_valid: Flag to indicate if below offsets are valid + * @rx_packet_offset: Offset of packet payload + * @rx_header_offset: Offset of rx_header tlv + * @rx_mpdu_end_offset: Offset of rx_mpdu_end tlv + * @rx_mpdu_start_offset: Offset of rx_mpdu_start tlv + * @rx_msdu_end_offset: Offset of rx_msdu_end tlv + * @rx_msdu_start_offset: Offset of rx_msdu_start tlv + * @rx_attn_offset: Offset of rx_attention tlv + * + * NOTE: Do not change the layout of this structure + */ +struct htt_rx_ring_tlv_filter { + u_int32_t mpdu_start:1, + msdu_start:1, + packet:1, + msdu_end:1, + mpdu_end:1, + packet_header:1, + attention:1, + ppdu_start:1, + ppdu_end:1, + ppdu_end_user_stats:1, + ppdu_end_user_stats_ext:1, + ppdu_end_status_done:1, + header_per_msdu:1, + enable_fp:1, + enable_md:1, + enable_mo:1; + u_int32_t fp_mgmt_filter:16, + mo_mgmt_filter:16; + u_int32_t fp_ctrl_filter:16, + mo_ctrl_filter:16; + u_int32_t fp_data_filter:16, + mo_data_filter:16; + u_int16_t md_data_filter; + u_int16_t md_mgmt_filter; + u_int16_t md_ctrl_filter; + bool offset_valid; + uint16_t rx_packet_offset; + uint16_t rx_header_offset; + uint16_t rx_mpdu_end_offset; + uint16_t rx_mpdu_start_offset; + uint16_t rx_msdu_end_offset; + uint16_t rx_msdu_start_offset; + uint16_t rx_attn_offset; +}; + +/** + * struct dp_htt_rx_flow_fst_setup - Rx FST setup message + * @pdev_id: DP Pdev identifier + * @max_entries: Size of Rx FST in number of entries + * @max_search: Number of collisions allowed + * @base_addr_lo: lower 32-bit physical address + * @base_addr_hi: upper 32-bit physical address + * @ip_da_sa_prefix: IPv4 prefix to map to IPv6 address scheme + * @hash_key_len: Rx FST hash key size + * @hash_key: Rx FST Toeplitz hash key + */ +struct dp_htt_rx_flow_fst_setup { + uint8_t pdev_id; + uint32_t max_entries; + uint32_t max_search; + uint32_t base_addr_lo; + uint32_t base_addr_hi; + uint32_t ip_da_sa_prefix; + uint32_t hash_key_len; + uint8_t *hash_key; +}; + +/** + * enum dp_htt_flow_fst_operation - FST related operations allowed + * @DP_HTT_FST_CACHE_OP_NONE: Cache no-op + * @DP_HTT_FST_CACHE_INVALIDATE_ENTRY: Invalidate single cache entry + * @DP_HTT_FST_CACHE_INVALIDATE_FULL: Invalidate entire cache + * @DP_HTT_FST_ENABLE: Bypass FST is enabled + * @DP_HTT_FST_DISABLE: Disable bypass FST + */ +enum dp_htt_flow_fst_operation { + DP_HTT_FST_CACHE_OP_NONE, + DP_HTT_FST_CACHE_INVALIDATE_ENTRY, + DP_HTT_FST_CACHE_INVALIDATE_FULL, + DP_HTT_FST_ENABLE, + DP_HTT_FST_DISABLE +}; + +/** + * struct dp_htt_rx_flow_fst_setup - Rx FST setup message + * @pdev_id: DP Pdev identifier + * @op_code: FST operation to be performed by FW/HW + * @rx_flow: Rx Flow information on which operation is to be performed + */ +struct dp_htt_rx_flow_fst_operation { + uint8_t pdev_id; + enum dp_htt_flow_fst_operation op_code; + struct cdp_rx_flow_info *rx_flow; +}; + +/** + * struct dp_htt_rx_fisa_config - Rx fisa config + * @pdev_id: DP Pdev identifier + * @fisa_timeout: fisa aggregation timeout + */ +struct dp_htt_rx_fisa_cfg { + uint8_t pdev_id; + uint32_t fisa_timeout; +}; + +QDF_STATUS dp_htt_rx_fisa_config(struct dp_pdev *pdev, + struct dp_htt_rx_fisa_cfg *fisa_config); + +/* + * htt_soc_initialize() - SOC level HTT initialization + * @htt_soc: Opaque htt SOC handle + * @ctrl_psoc: Opaque ctrl SOC handle + * @htc_soc: SOC level HTC handle + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF device + * + * Return: HTT handle on success; NULL on failure + */ +void * +htt_soc_initialize(struct htt_soc *htt_soc, + struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + HTC_HANDLE htc_soc, + hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev); + +/* + * htt_soc_attach() - attach DP and HTT SOC + * @soc: DP SOC handle + * @htc_hdl: HTC handle + * + * Return: htt_soc handle on Success, NULL on Failure + */ +struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_hdl); + +/* + * htt_set_htc_handle_() - set HTC handle + * @htt_hdl: HTT handle/SOC + * @htc_soc: HTC handle + * + * Return: None + */ +void htt_set_htc_handle(struct htt_soc *htt_hdl, HTC_HANDLE htc_soc); + +/* + * htt_get_htc_handle_() - set HTC handle + * @htt_hdl: HTT handle/SOC + * + * Return: HTC_HANDLE + */ +HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_hdl); + +/* + * htt_soc_htc_dealloc() - HTC memory de-alloc + * @htt_soc: SOC level HTT handle + * + * Return: None + */ +void htt_soc_htc_dealloc(struct htt_soc *htt_handle); + +/* + * htt_soc_htc_prealloc() - HTC memory prealloc + * @htt_soc: SOC level HTT handle + * + * Return: QDF_STATUS_SUCCESS on success or + * QDF_STATUS_E_NO_MEM on allocation failure + */ +QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *htt_soc); + +void htt_soc_detach(struct htt_soc *soc); + +int htt_srng_setup(struct htt_soc *htt_soc, int pdev_id, + hal_ring_handle_t hal_ring_hdl, + int hal_ring_type); + +int htt_soc_attach_target(struct htt_soc *htt_soc); + +/* + * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter + * config message to target + * @htt_soc: HTT SOC handle + * @pdev_id: PDEV Id + * @hal_srng: Opaque HAL SRNG pointer + * @hal_ring_type: SRNG ring type + * @ring_buf_size: SRNG buffer size + * @htt_tlv_filter: Rx SRNG TLV and filter setting + * + * Return: 0 on success; error code on failure + */ +int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id, + hal_ring_handle_t hal_ring_hdl, + int hal_ring_type, int ring_buf_size, + struct htt_rx_ring_tlv_filter *htt_tlv_filter); + +/* + * htt_t2h_stats_handler() - target to host stats work handler + * @context: context (dp soc context) + * + * Return: void + */ +void htt_t2h_stats_handler(void *context); + +/** + * struct htt_stats_context - htt stats information + * @soc: Size of each descriptor in the pool + * @msg: T2H Ext stats message queue + * @msg_len: T2H Ext stats message length + */ +struct htt_stats_context { + struct dp_soc *soc; + qdf_nbuf_queue_t msg; + uint32_t msg_len; +}; + +int +dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap); + +/** + * dp_ppdu_desc_user_stats_update(): Function to update TX user stats + * @pdev: DP pdev handle + * @ppdu_info: per PPDU TLV descriptor + * + * return: void + */ +void +dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info); + +/** + * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW + * @pdev: DP pdev handle + * @fse_setup_info: FST setup parameters + * + * Return: Success when HTT message is sent, error on failure + */ +QDF_STATUS +dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev, + struct dp_htt_rx_flow_fst_setup *setup_info); + +/** + * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to + * add/del a flow in HW + * @pdev: DP pdev handle + * @fse_op_info: Flow entry parameters + * + * Return: Success when HTT message is sent, error on failure + */ +QDF_STATUS +dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev, + struct dp_htt_rx_flow_fst_operation *op_info); +#endif /* _DP_HTT_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..69e821a5e4b658095752e3426248bbeee186d2be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h @@ -0,0 +1,2317 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_INTERNAL_H_ +#define _DP_INTERNAL_H_ + +#include "dp_types.h" + +#define RX_BUFFER_SIZE_PKTLOG_LITE 1024 +/* Alignment for consistent memory for DP rings*/ +#define DP_RING_BASE_ALIGN 8 + + +#define DP_RSSI_INVAL 0x80 +#define DP_RSSI_AVG_WEIGHT 2 +/* + * Formula to derive avg_rssi is taken from wifi2.o firmware + */ +#define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ + (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ + + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) + +/* Macro For NYSM value received in VHT TLV */ +#define VHT_SGI_NYSM 3 + +/* PPDU STATS CFG */ +#define DP_PPDU_STATS_CFG_ALL 0xFFFF + +/* PPDU stats mask sent to FW to enable enhanced stats */ +#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67 +/* PPDU stats mask sent to FW to support debug sniffer feature */ +#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF +/* PPDU stats mask sent to FW to support BPR feature*/ +#define DP_PPDU_STATS_CFG_BPR 0x2000 +/* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ +#define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ + DP_PPDU_STATS_CFG_ENH_STATS) +/* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ +#define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ + DP_PPDU_TXLITE_STATS_BITMASK_CFG) + +/** + * Bitmap of HTT PPDU TLV types for Default mode + */ +#define HTT_PPDU_DEFAULT_TLV_BITMAP \ + (1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) + +/** + * Bitmap of HTT PPDU delayed ba TLV types for Default mode + */ +#define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ + (1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_RATE_TLV) + +/** + * Bitmap of HTT PPDU TLV types for Delayed BA + */ +#define HTT_PPDU_STATUS_TLV_BITMAP \ + (1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) + +/** + * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 + */ +#define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ + ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ + (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) + +/** + * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 + */ +#define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ + ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ + (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +extern uint8_t +dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX]; +#endif + +#define DP_MAX_TIMER_EXEC_TIME_TICKS \ + (QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100 * 20) + +/** + * enum timer_yield_status - yield status code used in monitor mode timer. + * @DP_TIMER_NO_YIELD: do not yield + * @DP_TIMER_WORK_DONE: yield because work is done + * @DP_TIMER_WORK_EXHAUST: yield because work quota is exhausted + * @DP_TIMER_TIME_EXHAUST: yield due to time slot exhausted + */ +enum timer_yield_status { + DP_TIMER_NO_YIELD, + DP_TIMER_WORK_DONE, + DP_TIMER_WORK_EXHAUST, + DP_TIMER_TIME_EXHAUST, +}; + +#if DP_PRINT_ENABLE +#include /* va_list */ +#include /* qdf_vprint */ +#include + +enum { + /* FATAL_ERR - print only irrecoverable error messages */ + DP_PRINT_LEVEL_FATAL_ERR, + + /* ERR - include non-fatal err messages */ + DP_PRINT_LEVEL_ERR, + + /* WARN - include warnings */ + DP_PRINT_LEVEL_WARN, + + /* INFO1 - include fundamental, infrequent events */ + DP_PRINT_LEVEL_INFO1, + + /* INFO2 - include non-fundamental but infrequent events */ + DP_PRINT_LEVEL_INFO2, +}; + +#define dp_print(level, fmt, ...) do { \ + if (level <= g_txrx_print_level) \ + qdf_print(fmt, ## __VA_ARGS__); \ +while (0) +#define DP_PRINT(level, fmt, ...) do { \ + dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ +while (0) +#else +#define DP_PRINT(level, fmt, ...) +#endif /* DP_PRINT_ENABLE */ + +#define DP_TRACE(LVL, fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ + fmt, ## args) + +#ifdef DP_PRINT_NO_CONSOLE +/* Stat prints should not go to console or kernel logs.*/ +#define DP_PRINT_STATS(fmt, args ...)\ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ + fmt, ## args) +#else +#define DP_PRINT_STATS(fmt, args ...)\ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ + fmt, ## args) +#endif +#define DP_STATS_INIT(_handle) \ + qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) + +#define DP_STATS_CLR(_handle) \ + qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) + +#ifndef DISABLE_DP_STATS +#define DP_STATS_INC(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field += _delta; \ +} + +#define DP_STATS_INCC(_handle, _field, _delta, _cond) \ +{ \ + if (_cond && likely(_handle)) \ + _handle->stats._field += _delta; \ +} + +#define DP_STATS_DEC(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field -= _delta; \ +} + +#define DP_STATS_UPD(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field = _delta; \ +} + +#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ +{ \ + DP_STATS_INC(_handle, _field.num, _count); \ + DP_STATS_INC(_handle, _field.bytes, _bytes) \ +} + +#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ +{ \ + DP_STATS_INCC(_handle, _field.num, _count, _cond); \ + DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ +} + +#define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ +{ \ + _handle_a->stats._field += _handle_b->stats._field; \ +} + +#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ +{ \ + DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ + DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ +} + +#define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ +{ \ + _handle_a->stats._field = _handle_b->stats._field; \ +} + +#else +#define DP_STATS_INC(_handle, _field, _delta) +#define DP_STATS_INCC(_handle, _field, _delta, _cond) +#define DP_STATS_DEC(_handle, _field, _delta) +#define DP_STATS_UPD(_handle, _field, _delta) +#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) +#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) +#define DP_STATS_AGGR(_handle_a, _handle_b, _field) +#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) +#endif + +#ifdef ENABLE_DP_HIST_STATS +#define DP_HIST_INIT() \ + uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; + +#define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ +{ \ + ++num_of_packets[_pdev_id]; \ +} + +#define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ + do { \ + if (_p_cntrs == 1) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_1, 1); \ + } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_2_20, 1); \ + } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_21_40, 1); \ + } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_41_60, 1); \ + } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_61_80, 1); \ + } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_81_100, 1); \ + } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_101_200, 1); \ + } else if (_p_cntrs > 200) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_201_plus, 1); \ + } \ + } while (0) + +#define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ + do { \ + if (_p_cntrs == 1) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_1, 1); \ + } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_2_20, 1); \ + } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_21_40, 1); \ + } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_41_60, 1); \ + } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_61_80, 1); \ + } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_81_100, 1); \ + } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_101_200, 1); \ + } else if (_p_cntrs > 200) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_201_plus, 1); \ + } \ + } while (0) + +#define DP_TX_HIST_STATS_PER_PDEV() \ + do { \ + uint8_t hist_stats = 0; \ + for (hist_stats = 0; hist_stats < soc->pdev_count; \ + hist_stats++) { \ + DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ + num_of_packets[hist_stats]); \ + } \ + } while (0) + + +#define DP_RX_HIST_STATS_PER_PDEV() \ + do { \ + uint8_t hist_stats = 0; \ + for (hist_stats = 0; hist_stats < soc->pdev_count; \ + hist_stats++) { \ + DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ + num_of_packets[hist_stats]); \ + } \ + } while (0) + +#else +#define DP_HIST_INIT() +#define DP_HIST_PACKET_COUNT_INC(_pdev_id) +#define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) +#define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) +#define DP_RX_HIST_STATS_PER_PDEV() +#define DP_TX_HIST_STATS_PER_PDEV() +#endif /* DISABLE_DP_STATS */ + +#ifdef FEATURE_TSO_STATS +/** + * dp_init_tso_stats() - Clear tso stats + * @pdev: pdev handle + * + * Return: None + */ +static inline +void dp_init_tso_stats(struct dp_pdev *pdev) +{ + if (pdev) { + qdf_mem_zero(&((pdev)->stats.tso_stats), + sizeof((pdev)->stats.tso_stats)); + qdf_atomic_init(&pdev->tso_idx); + } +} + +/** + * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram + * @pdev: pdev handle + * @_p_cntrs: number of tso segments for a tso packet + * + * Return: None + */ +void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, + uint8_t _p_cntrs); + +/** + * dp_tso_segment_update() - Collect tso segment information + * @pdev: pdev handle + * @stats_idx: tso packet number + * @idx: tso segment number + * @seg: tso segment + * + * Return: None + */ +void dp_tso_segment_update(struct dp_pdev *pdev, + uint32_t stats_idx, + uint8_t idx, + struct qdf_tso_seg_t seg); + +/** + * dp_tso_packet_update() - TSO Packet information + * @pdev: pdev handle + * @stats_idx: tso packet number + * @msdu: nbuf handle + * @num_segs: tso segments + * + * Return: None + */ +void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, + qdf_nbuf_t msdu, uint16_t num_segs); + +/** + * dp_tso_segment_stats_update() - TSO Segment stats + * @pdev: pdev handle + * @stats_seg: tso segment list + * @stats_idx: tso packet number + * + * Return: None + */ +void dp_tso_segment_stats_update(struct dp_pdev *pdev, + struct qdf_tso_seg_elem_t *stats_seg, + uint32_t stats_idx); + +/** + * dp_print_tso_stats() - dump tso statistics + * @soc:soc handle + * @level: verbosity level + * + * Return: None + */ +void dp_print_tso_stats(struct dp_soc *soc, + enum qdf_stats_verbosity_level level); + +/** + * dp_txrx_clear_tso_stats() - clear tso stats + * @soc: soc handle + * + * Return: None + */ +void dp_txrx_clear_tso_stats(struct dp_soc *soc); +#else +static inline +void dp_init_tso_stats(struct dp_pdev *pdev) +{ +} + +static inline +void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, + uint8_t _p_cntrs) +{ +} + +static inline +void dp_tso_segment_update(struct dp_pdev *pdev, + uint32_t stats_idx, + uint32_t idx, + struct qdf_tso_seg_t seg) +{ +} + +static inline +void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, + qdf_nbuf_t msdu, uint16_t num_segs) +{ +} + +static inline +void dp_tso_segment_stats_update(struct dp_pdev *pdev, + struct qdf_tso_seg_elem_t *stats_seg, + uint32_t stats_idx) +{ +} + +static inline +void dp_print_tso_stats(struct dp_soc *soc, + enum qdf_stats_verbosity_level level) +{ +} + +static inline +void dp_txrx_clear_tso_stats(struct dp_soc *soc) +{ +} +#endif /* FEATURE_TSO_STATS */ + +#define DP_HTT_T2H_HP_PIPE 5 +static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj, + struct cdp_vdev_stats *srcobj) +{ + uint8_t i; + uint8_t pream_type; + + for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { + for (i = 0; i < MAX_MCS; i++) { + tgtobj->stats.tx.pkt_type[pream_type]. + mcs_count[i] += + srcobj->tx.pkt_type[pream_type]. + mcs_count[i]; + tgtobj->stats.rx.pkt_type[pream_type]. + mcs_count[i] += + srcobj->rx.pkt_type[pream_type]. + mcs_count[i]; + } + } + + for (i = 0; i < MAX_BW; i++) { + tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i]; + tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i]; + } + + for (i = 0; i < SS_COUNT; i++) { + tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i]; + tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i]; + } + + for (i = 0; i < WME_AC_MAX; i++) { + tgtobj->stats.tx.wme_ac_type[i] += + srcobj->tx.wme_ac_type[i]; + tgtobj->stats.rx.wme_ac_type[i] += + srcobj->rx.wme_ac_type[i]; + tgtobj->stats.tx.excess_retries_per_ac[i] += + srcobj->tx.excess_retries_per_ac[i]; + } + + for (i = 0; i < MAX_GI; i++) { + tgtobj->stats.tx.sgi_count[i] += + srcobj->tx.sgi_count[i]; + tgtobj->stats.rx.sgi_count[i] += + srcobj->rx.sgi_count[i]; + } + + for (i = 0; i < MAX_RECEPTION_TYPES; i++) + tgtobj->stats.rx.reception_type[i] += + srcobj->rx.reception_type[i]; + + tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes; + tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num; + tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num; + tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes; + tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num; + tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes; + tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num; + tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes; + tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num; + tgtobj->stats.tx.tx_success.bytes += + srcobj->tx.tx_success.bytes; + tgtobj->stats.tx.nawds_mcast.num += + srcobj->tx.nawds_mcast.num; + tgtobj->stats.tx.nawds_mcast.bytes += + srcobj->tx.nawds_mcast.bytes; + tgtobj->stats.tx.nawds_mcast_drop += + srcobj->tx.nawds_mcast_drop; + tgtobj->stats.tx.num_ppdu_cookie_valid += + srcobj->tx.num_ppdu_cookie_valid; + tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed; + tgtobj->stats.tx.ofdma += srcobj->tx.ofdma; + tgtobj->stats.tx.stbc += srcobj->tx.stbc; + tgtobj->stats.tx.ldpc += srcobj->tx.ldpc; + tgtobj->stats.tx.retries += srcobj->tx.retries; + tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt; + tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt; + tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt; + tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt; + tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num; + tgtobj->stats.tx.dropped.fw_rem.bytes += + srcobj->tx.dropped.fw_rem.bytes; + tgtobj->stats.tx.dropped.fw_rem_tx += + srcobj->tx.dropped.fw_rem_tx; + tgtobj->stats.tx.dropped.fw_rem_notx += + srcobj->tx.dropped.fw_rem_notx; + tgtobj->stats.tx.dropped.fw_reason1 += + srcobj->tx.dropped.fw_reason1; + tgtobj->stats.tx.dropped.fw_reason2 += + srcobj->tx.dropped.fw_reason2; + tgtobj->stats.tx.dropped.fw_reason3 += + srcobj->tx.dropped.fw_reason3; + tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out; + tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err; + if (srcobj->rx.rssi != 0) + tgtobj->stats.rx.rssi = srcobj->rx.rssi; + tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate; + tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err; + tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt; + tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt; + tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt; + tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt; + tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop; + tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num; + tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes; + + for (i = 0; i < CDP_MAX_RX_RINGS; i++) { + tgtobj->stats.rx.rcvd_reo[i].num += + srcobj->rx.rcvd_reo[i].num; + tgtobj->stats.rx.rcvd_reo[i].bytes += + srcobj->rx.rcvd_reo[i].bytes; + } + + srcobj->rx.unicast.num = + srcobj->rx.to_stack.num - + (srcobj->rx.multicast.num); + srcobj->rx.unicast.bytes = + srcobj->rx.to_stack.bytes - + (srcobj->rx.multicast.bytes); + + tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num; + tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes; + tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num; + tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes; + tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num; + tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes; + tgtobj->stats.rx.raw.num += srcobj->rx.raw.num; + tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes; + tgtobj->stats.rx.intra_bss.pkts.num += + srcobj->rx.intra_bss.pkts.num; + tgtobj->stats.rx.intra_bss.pkts.bytes += + srcobj->rx.intra_bss.pkts.bytes; + tgtobj->stats.rx.intra_bss.fail.num += + srcobj->rx.intra_bss.fail.num; + tgtobj->stats.rx.intra_bss.fail.bytes += + srcobj->rx.intra_bss.fail.bytes; + + tgtobj->stats.tx.last_ack_rssi = + srcobj->tx.last_ack_rssi; + tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num; + tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes; + tgtobj->stats.rx.multipass_rx_pkt_drop += + srcobj->rx.multipass_rx_pkt_drop; +} + +static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, + struct dp_vdev *srcobj) +{ + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast); + + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt); + DP_STATS_AGGR(tgtobj, srcobj, + tx_i.mcast_en.dropped_map_error); + DP_STATS_AGGR(tgtobj, srcobj, + tx_i.mcast_en.dropped_self_mac); + DP_STATS_AGGR(tgtobj, srcobj, + tx_i.mcast_en.dropped_send_fail); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw); + DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw); + DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw); + + tgtobj->stats.tx_i.dropped.dropped_pkt.num = + tgtobj->stats.tx_i.dropped.dma_error + + tgtobj->stats.tx_i.dropped.ring_full + + tgtobj->stats.tx_i.dropped.enqueue_fail + + tgtobj->stats.tx_i.dropped.desc_na.num + + tgtobj->stats.tx_i.dropped.res_full; + +} + +static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj, + struct dp_peer *srcobj) +{ + uint8_t i; + uint8_t pream_type; + + for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { + for (i = 0; i < MAX_MCS; i++) { + tgtobj->tx.pkt_type[pream_type]. + mcs_count[i] += + srcobj->stats.tx.pkt_type[pream_type]. + mcs_count[i]; + tgtobj->rx.pkt_type[pream_type]. + mcs_count[i] += + srcobj->stats.rx.pkt_type[pream_type]. + mcs_count[i]; + } + } + + for (i = 0; i < MAX_BW; i++) { + tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i]; + tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i]; + } + + for (i = 0; i < SS_COUNT; i++) { + tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i]; + tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i]; + } + + for (i = 0; i < WME_AC_MAX; i++) { + tgtobj->tx.wme_ac_type[i] += + srcobj->stats.tx.wme_ac_type[i]; + tgtobj->rx.wme_ac_type[i] += + srcobj->stats.rx.wme_ac_type[i]; + tgtobj->tx.excess_retries_per_ac[i] += + srcobj->stats.tx.excess_retries_per_ac[i]; + } + + for (i = 0; i < MAX_GI; i++) { + tgtobj->tx.sgi_count[i] += + srcobj->stats.tx.sgi_count[i]; + tgtobj->rx.sgi_count[i] += + srcobj->stats.rx.sgi_count[i]; + } + + for (i = 0; i < MAX_RECEPTION_TYPES; i++) + tgtobj->rx.reception_type[i] += + srcobj->stats.rx.reception_type[i]; + + tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes; + tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num; + tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num; + tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes; + tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num; + tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes; + tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num; + tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes; + tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num; + tgtobj->tx.tx_success.bytes += + srcobj->stats.tx.tx_success.bytes; + tgtobj->tx.nawds_mcast.num += + srcobj->stats.tx.nawds_mcast.num; + tgtobj->tx.nawds_mcast.bytes += + srcobj->stats.tx.nawds_mcast.bytes; + tgtobj->tx.nawds_mcast_drop += + srcobj->stats.tx.nawds_mcast_drop; + tgtobj->tx.num_ppdu_cookie_valid += + srcobj->stats.tx.num_ppdu_cookie_valid; + tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed; + tgtobj->tx.ofdma += srcobj->stats.tx.ofdma; + tgtobj->tx.stbc += srcobj->stats.tx.stbc; + tgtobj->tx.ldpc += srcobj->stats.tx.ldpc; + tgtobj->tx.retries += srcobj->stats.tx.retries; + tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt; + tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt; + tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt; + tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt; + tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num; + tgtobj->tx.dropped.fw_rem.bytes += + srcobj->stats.tx.dropped.fw_rem.bytes; + tgtobj->tx.dropped.fw_rem_tx += + srcobj->stats.tx.dropped.fw_rem_tx; + tgtobj->tx.dropped.fw_rem_notx += + srcobj->stats.tx.dropped.fw_rem_notx; + tgtobj->tx.dropped.fw_reason1 += + srcobj->stats.tx.dropped.fw_reason1; + tgtobj->tx.dropped.fw_reason2 += + srcobj->stats.tx.dropped.fw_reason2; + tgtobj->tx.dropped.fw_reason3 += + srcobj->stats.tx.dropped.fw_reason3; + tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out; + tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err; + if (srcobj->stats.rx.rssi != 0) + tgtobj->rx.rssi = srcobj->stats.rx.rssi; + tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate; + tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err; + tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt; + tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt; + tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt; + tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt; + tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop; + tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num; + tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes; + + for (i = 0; i < CDP_MAX_RX_RINGS; i++) { + tgtobj->rx.rcvd_reo[i].num += + srcobj->stats.rx.rcvd_reo[i].num; + tgtobj->rx.rcvd_reo[i].bytes += + srcobj->stats.rx.rcvd_reo[i].bytes; + } + + srcobj->stats.rx.unicast.num = + srcobj->stats.rx.to_stack.num - + srcobj->stats.rx.multicast.num; + srcobj->stats.rx.unicast.bytes = + srcobj->stats.rx.to_stack.bytes - + srcobj->stats.rx.multicast.bytes; + + tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num; + tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes; + tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num; + tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes; + tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num; + tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes; + tgtobj->rx.raw.num += srcobj->stats.rx.raw.num; + tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes; + tgtobj->rx.intra_bss.pkts.num += + srcobj->stats.rx.intra_bss.pkts.num; + tgtobj->rx.intra_bss.pkts.bytes += + srcobj->stats.rx.intra_bss.pkts.bytes; + tgtobj->rx.intra_bss.fail.num += + srcobj->stats.rx.intra_bss.fail.num; + tgtobj->rx.intra_bss.fail.bytes += + srcobj->stats.rx.intra_bss.fail.bytes; + tgtobj->tx.last_ack_rssi = + srcobj->stats.tx.last_ack_rssi; + tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num; + tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes; + tgtobj->rx.multipass_rx_pkt_drop += + srcobj->stats.rx.multipass_rx_pkt_drop; +} + +#define DP_UPDATE_STATS(_tgtobj, _srcobj) \ + do { \ + uint8_t i; \ + uint8_t pream_type; \ + for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ + for (i = 0; i < MAX_MCS; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, \ + tx.pkt_type[pream_type].mcs_count[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, \ + rx.pkt_type[pream_type].mcs_count[i]); \ + } \ + } \ + \ + for (i = 0; i < MAX_BW; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ + } \ + \ + for (i = 0; i < SS_COUNT; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ + } \ + for (i = 0; i < WME_AC_MAX; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ + \ + } \ + \ + for (i = 0; i < MAX_GI; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ + } \ + \ + for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ + \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ + \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ + if (_srcobj->stats.rx.rssi != 0) \ + DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \ + DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ + \ + for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ + \ + _srcobj->stats.rx.unicast.num = \ + _srcobj->stats.rx.to_stack.num - \ + _srcobj->stats.rx.multicast.num; \ + _srcobj->stats.rx.unicast.bytes = \ + _srcobj->stats.rx.to_stack.bytes - \ + _srcobj->stats.rx.multicast.bytes; \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ + \ + _tgtobj->stats.tx.last_ack_rssi = \ + _srcobj->stats.tx.last_ack_rssi; \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ + } while (0) + +extern int dp_peer_find_attach(struct dp_soc *soc); +extern void dp_peer_find_detach(struct dp_soc *soc); +extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); +extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); +extern void dp_peer_find_hash_erase(struct dp_soc *soc); + +/* + * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer + * @peer: Datapath peer + * + * return: void + */ +void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer); + +/* + * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer + * @peer: Datapath peer + * + * return: void + */ +void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); + +extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); +void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer); +void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, + bool reuse); +void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, + bool reuse); +void dp_peer_unref_delete(struct dp_peer *peer); +extern void *dp_find_peer_by_addr(struct cdp_pdev *dev, + uint8_t *peer_mac_addr); +extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, + uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id); + +#ifdef DP_PEER_EXTENDED_API +/** + * dp_register_peer() - Register peer into physical device + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * @sta_desc - peer description + * + * Register peer into physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_FAULT peer not found + */ +QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct ol_txrx_desc_type *sta_desc); + +/** + * dp_clear_peer() - remove peer from physical device + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * @peer_addr - peer mac address + * + * remove peer from physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_FAULT peer not found + */ +QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct qdf_mac_addr peer_addr); + +/* + * dp_find_peer_exist - find peer if already exists + * @soc: datapath soc handle + * @pdev_id: physical device instance id + * @peer_mac_addr: peer mac address + * + * Return: true or false + */ +bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint8_t *peer_addr); + +/* + * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev + * @soc: datapath soc handle + * @vdev_id: vdev instance id + * @peer_mac_addr: peer mac address + * + * Return: true or false + */ +bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_addr); + +/* + * dp_find_peer_exist_on_other_vdev - find if peer exists + * on other than the given vdev + * @soc: datapath soc handle + * @vdev_id: vdev instance id + * @peer_mac_addr: peer mac address + * @max_bssid: max number of bssids + * + * Return: true or false + */ +bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, uint8_t *peer_addr, + uint16_t max_bssid); + +void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, + struct cdp_vdev *vdev, + uint8_t *peer_addr); + +/** + * dp_peer_state_update() - update peer local state + * @pdev - data path device instance + * @peer_addr - peer mac address + * @state - new peer local state + * + * update peer local state + * + * Return: QDF_STATUS_SUCCESS registration success + */ +QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, + enum ol_txrx_peer_state state); + +/** + * dp_get_vdevid() - Get virtual interface id which peer registered + * @soc - datapath soc handle + * @peer_mac - peer mac address + * @vdev_id - virtual interface id which peer registered + * + * Get virtual interface id which peer registered + * + * Return: QDF_STATUS_SUCCESS registration success + */ +QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, + uint8_t *vdev_id); +struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, + struct qdf_mac_addr peer_addr); +struct cdp_vdev *dp_get_vdev_for_peer(void *peer); +uint8_t *dp_peer_get_peer_mac_addr(void *peer); + +/** + * dp_get_peer_state() - Get local peer state + * @soc - datapath soc handle + * @vdev_id - vdev id + * @peer_mac - peer mac addr + * + * Get local peer state + * + * Return: peer status + */ +int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac); +void dp_local_peer_id_pool_init(struct dp_pdev *pdev); +void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); +void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); +#else +/** + * dp_get_vdevid() - Get virtual interface id which peer registered + * @soc - datapath soc handle + * @peer_mac - peer mac address + * @vdev_id - virtual interface id which peer registered + * + * Get virtual interface id which peer registered + * + * Return: QDF_STATUS_SUCCESS registration success + */ +static inline +QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, + uint8_t *vdev_id) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) +{ +} + +static inline +void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) +{ +} + +static inline +void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) +{ +} +#endif +int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, uint16_t vdev_id, + uint8_t tid, + int status); +int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, uint16_t vdev_id, + uint8_t dialogtoken, uint16_t tid, + uint16_t batimeout, + uint16_t buffersize, + uint16_t startseqnum); +QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, uint16_t vdev_id, + uint8_t tid, uint8_t *dialogtoken, + uint16_t *statuscode, + uint16_t *buffersize, + uint16_t *batimeout); +QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, + uint16_t statuscode); +int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, int tid, + uint16_t reasoncode); +/* + * dp_delba_tx_completion_wifi3() - Handle delba tx completion + * + * @cdp_soc: soc handle + * @vdev_id: id of the vdev handle + * @peer_mac: peer mac address + * @tid: Tid number + * @status: Tx completion status + * Indicate status of delba Tx to DP for stats update and retry + * delba if tx failed. + * + */ +int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, + int status); +extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, + uint32_t ba_window_size, + uint32_t start_seq); + +extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, + enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params, + void (*callback_fn), void *data); + +extern void dp_reo_cmdlist_destroy(struct dp_soc *soc); + +/** + * dp_reo_status_ring_handler - Handler for REO Status ring + * @int_ctx: pointer to DP interrupt context + * @soc: DP Soc handle + * + * Returns: Number of descriptors reaped + */ +uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, + struct dp_soc *soc); +void dp_aggregate_vdev_stats(struct dp_vdev *vdev, + struct cdp_vdev_stats *vdev_stats); +void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status); +void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status); +uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t nbuf, + uint8_t newmac[][QDF_MAC_ADDR_SIZE], + uint8_t new_mac_cnt); +void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); + +void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); +QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint32_t config_param_0, + uint32_t config_param_1, uint32_t config_param_2, + uint32_t config_param_3, int cookie, int cookie_msb, + uint8_t mac_id); +void dp_htt_stats_print_tag(struct dp_pdev *pdev, + uint8_t tag_type, uint32_t *tag_buf); +void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); +/** + * dp_rxtid_stats_cmd_cb - function pointer for peer + * rx tid stats cmd call_back + */ +typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status); +int dp_peer_rxtid_stats(struct dp_peer *peer, + dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, + void *cb_ctxt); +QDF_STATUS +dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, enum cdp_sec_type sec_type, + uint32_t *rx_pn); + +QDF_STATUS +dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, enum cdp_sec_type sec_type, + bool is_unicast); + +void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); + +QDF_STATUS +dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, + bool is_unicast, uint32_t *key); + +/** + * dp_check_pdev_exists() - Validate pdev before use + * @soc - dp soc handle + * @data - pdev handle + * + * Return: 0 - success/invalid - failure + */ +bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); + +/** + * dp_update_delay_stats() - Update delay statistics in structure + * and fill min, max and avg delay + * @pdev: pdev handle + * @delay: delay in ms + * @tid: tid value + * @mode: type of tx delay mode + * @ring id: ring number + * + * Return: none + */ +void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay, + uint8_t tid, uint8_t mode, uint8_t ring_id); + +/** + * dp_print_ring_stats(): Print tail and head pointer + * @pdev: DP_PDEV handle + * + * Return:void + */ +void dp_print_ring_stats(struct dp_pdev *pdev); + +/** + * dp_print_pdev_cfg_params() - Print the pdev cfg parameters + * @pdev_handle: DP pdev handle + * + * Return - void + */ +void dp_print_pdev_cfg_params(struct dp_pdev *pdev); + +/** + * dp_print_soc_cfg_params()- Dump soc wlan config parameters + * @soc_handle: Soc handle + * + * Return: void + */ +void dp_print_soc_cfg_params(struct dp_soc *soc); + +/** + * dp_srng_get_str_from_ring_type() - Return string name for a ring + * @ring_type: Ring + * + * Return: char const pointer + */ +const +char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); + +/* + * dp_txrx_path_stats() - Function to display dump stats + * @soc - soc handle + * + * return: none + */ +void dp_txrx_path_stats(struct dp_soc *soc); + +/* + * dp_print_per_ring_stats(): Packet count per ring + * @soc - soc handle + * + * Return - None + */ +void dp_print_per_ring_stats(struct dp_soc *soc); + +/** + * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level + * @pdev: DP PDEV handle + * + * return: void + */ +void dp_aggregate_pdev_stats(struct dp_pdev *pdev); + +/** + * dp_print_rx_rates(): Print Rx rate stats + * @vdev: DP_VDEV handle + * + * Return:void + */ +void dp_print_rx_rates(struct dp_vdev *vdev); + +/** + * dp_print_tx_rates(): Print tx rates + * @vdev: DP_VDEV handle + * + * Return:void + */ +void dp_print_tx_rates(struct dp_vdev *vdev); + +/** + * dp_print_peer_stats():print peer stats + * @peer: DP_PEER handle + * + * return void + */ +void dp_print_peer_stats(struct dp_peer *peer); + +/** + * dp_print_pdev_tx_stats(): Print Pdev level TX stats + * @pdev: DP_PDEV Handle + * + * Return:void + */ +void +dp_print_pdev_tx_stats(struct dp_pdev *pdev); + +/** + * dp_print_pdev_rx_stats(): Print Pdev level RX stats + * @pdev: DP_PDEV Handle + * + * Return: void + */ +void +dp_print_pdev_rx_stats(struct dp_pdev *pdev); + +/** + * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats + * @pdev: DP_PDEV Handle + * + * Return: void + */ +void +dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev); + +/** + * dp_print_soc_tx_stats(): Print SOC level stats + * @soc DP_SOC Handle + * + * Return: void + */ +void dp_print_soc_tx_stats(struct dp_soc *soc); + +/** + * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc + * @soc: dp_soc handle + * + * Return: None + */ +void dp_print_soc_interrupt_stats(struct dp_soc *soc); + +/** + * dp_print_soc_rx_stats: Print SOC level Rx stats + * @soc: DP_SOC Handle + * + * Return:void + */ +void dp_print_soc_rx_stats(struct dp_soc *soc); + +/** + * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac + * + * @mac_id: MAC id + * @pdev_id: pdev_id corresponding to pdev, 0 for MCL + * + * Single pdev using both MACs will operate on both MAC rings, + * which is the case for MCL. + * For WIN each PDEV will operate one ring, so index is zero. + * + */ +static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) +{ + if (mac_id && pdev_id) { + qdf_print("Both mac_id and pdev_id cannot be non zero"); + QDF_BUG(0); + return 0; + } + return (mac_id + pdev_id); +} + +/** + * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id + * @soc: soc pointer + * @mac_id: MAC id + * @pdev_id: pdev_id corresponding to pdev, 0 for MCL + * + * For MCL, Single pdev using both MACs will operate on both MAC rings. + * + * For WIN, each PDEV will operate one ring. + * + */ +static inline int +dp_get_lmac_id_for_pdev_id + (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) +{ + if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { + if (mac_id && pdev_id) { + qdf_print("Both mac_id and pdev_id cannot be non zero"); + QDF_BUG(0); + return 0; + } + return (mac_id + pdev_id); + } + + return soc->pdev_list[pdev_id]->lmac_id; +} + +/** + * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id + * @soc: soc pointer + * @lmac_id: LMAC id + * + * For MCL, Single pdev exists + * + * For WIN, each PDEV will operate one ring. + * + */ +static inline struct dp_pdev * + dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) +{ + int i = 0; + + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { + i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); + qdf_assert_always(i < MAX_PDEV_CNT); + + return soc->pdev_list[i]; + } + + /* Typically for MCL as there only 1 PDEV*/ + return soc->pdev_list[0]; +} + +/** + * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding + * to host pdev id + * @soc: soc pointer + * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL + * + * returns target pdev_id for host pdev id. For WIN, this is derived through + * a two step process: + * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change + * during mode switch) + * 2. Get target pdev_id (set up during WMI ready) from lmac_id + * + * For MCL, return the offset-1 translated mac_id + */ +static inline int +dp_get_target_pdev_id_for_host_pdev_id + (struct dp_soc *soc, uint32_t mac_for_pdev) +{ + struct dp_pdev *pdev; + + if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + return DP_SW2HW_MACID(mac_for_pdev); + + pdev = soc->pdev_list[mac_for_pdev]; + + /*non-MCL case, get original target_pdev mapping*/ + return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); +} + +/** + * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding + * to target pdev id + * @soc: soc pointer + * @pdev_id: pdev_id corresponding to target pdev + * + * returns host pdev_id for target pdev id. For WIN, this is derived through + * a two step process: + * 1. Get lmac_id corresponding to target pdev_id + * 2. Get host pdev_id (set up during WMI ready) from lmac_id + * + * For MCL, return the 0-offset pdev_id + */ +static inline int +dp_get_host_pdev_id_for_target_pdev_id + (struct dp_soc *soc, uint32_t pdev_id) +{ + struct dp_pdev *pdev; + int lmac_id; + + if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + return DP_HW2SW_MACID(pdev_id); + + /*non-MCL case, get original target_lmac mapping from target pdev*/ + lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, + DP_HW2SW_MACID(pdev_id)); + + /*Get host pdev from lmac*/ + pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); + + return pdev->pdev_id; +} + +/* + * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids + * + * @soc: handle to DP soc + * @mac_id: MAC id + * + * Single pdev using both MACs will operate on both MAC rings, + * which is the case for MCL. + * For WIN each PDEV will operate one ring, so index is zero. + * + */ +static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) +{ + /* + * Single pdev using both MACs will operate on both MAC rings, + * which is the case for MCL. + */ + if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + return mac_id; + + /* For WIN each PDEV will operate one ring, so index is zero. */ + return 0; +} + +bool dp_is_soc_reinit(struct dp_soc *soc); + +/* + * dp_is_subtype_data() - check if the frame subtype is data + * + * @frame_ctrl: Frame control field + * + * check the frame control field and verify if the packet + * is a data packet. + * + * Return: true or false + */ +static inline bool dp_is_subtype_data(uint16_t frame_ctrl) +{ + if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == + QDF_IEEE80211_FC0_TYPE_DATA) && + (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == + QDF_IEEE80211_FC0_SUBTYPE_DATA) || + ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == + QDF_IEEE80211_FC0_SUBTYPE_QOS))) { + return true; + } + + return false; +} + +#ifdef WDI_EVENT_ENABLE +QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, + uint8_t mac_id); + +int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub_handle, + uint32_t event); + +int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub_handle, + uint32_t event); + +void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, + void *data, u_int16_t peer_id, + int status, u_int8_t pdev_id); + +int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); +int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); +int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable); + +/** + * dp_get_pldev() - function to get pktlog device handle + * @soc_hdl: datapath soc handle + * @pdev_id: physical device id + * + * Return: pktlog device handle or NULL + */ +void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); +void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn); + +static inline void +dp_hif_update_pipe_callback(struct dp_soc *dp_soc, + void *cb_context, + QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), + uint8_t pipe_id) +{ + struct hif_msg_callbacks hif_pipe_callbacks; + + /* TODO: Temporary change to bypass HTC connection for this new + * HIF pipe, which will be used for packet log and other high- + * priority HTT messages. Proper HTC connection to be added + * later once required FW changes are available + */ + hif_pipe_callbacks.rxCompletionHandler = callback; + hif_pipe_callbacks.Context = cb_context; + hif_update_pipe_callback(dp_soc->hif_handle, + DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); +} + +QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, struct dp_peer *peer); + +#else +static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub_handle, + uint32_t event) +{ + return 0; +} + +static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub_handle, + uint32_t event) +{ + return 0; +} + +static inline +void dp_wdi_event_handler(enum WDI_EVENT event, + struct dp_soc *soc, + void *data, u_int16_t peer_id, + int status, u_int8_t pdev_id) +{ +} + +static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) +{ + return 0; +} + +static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) +{ + return 0; +} + +static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable) +{ + return 0; +} +static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint8_t mac_id) +{ + return 0; +} + +static inline void +dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) +{ +} + +static inline void +dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, + QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), + uint8_t pipe_id) +{ +} + +static inline QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, + struct dp_peer *peer) +{ + return QDF_STATUS_SUCCESS; +} + +#endif /* CONFIG_WIN */ + +#ifdef VDEV_PEER_PROTOCOL_COUNT +/** + * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters + * @vdev: VDEV DP object + * @nbuf: data packet + * @peer: Peer DP object + * @is_egress: whether egress or ingress + * @is_rx: whether rx or tx + * + * This function updates the per-peer protocol counters + * Return: void + */ +void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, + struct dp_peer *peer, + bool is_egress, + bool is_rx); + +/** + * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters + * @soc: SOC DP object + * @vdev_id: vdev_id + * @nbuf: data packet + * @is_egress: whether egress or ingress + * @is_rx: whether rx or tx + * + * This function updates the per-peer protocol counters + * Return: void + */ + +void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, + int8_t vdev_id, + qdf_nbuf_t nbuf, + bool is_egress, + bool is_rx); + +#else +#define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \ + is_egress, is_rx) +#endif + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); +int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, + bool force); +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +#ifdef PEER_PROTECTED_ACCESS +/** + * dp_peer_unref_del_find_by_id() - dec ref and del peer if ref count is + * taken by dp_peer_find_by_id + * @peer: peer context + * + * Return: none + */ +static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer) +{ + dp_peer_unref_delete(peer); +} +#else +static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer) +{ +} +#endif + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +/** + * dp_srng_access_start() - Wrapper function to log access start of a hal ring + * @int_ctx: pointer to DP interrupt context + * @soc: DP Soc handle + * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * + * Return: 0 on success; error on failure + */ +int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, + hal_ring_handle_t hal_ring_hdl); + +/** + * dp_srng_access_end() - Wrapper function to log access end of a hal ring + * @int_ctx: pointer to DP interrupt context + * @soc: DP Soc handle + * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * + * Return: void + */ +void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, + hal_ring_handle_t hal_ring_hdl); + +#else + +static inline int dp_srng_access_start(struct dp_intr *int_ctx, + struct dp_soc *dp_soc, + hal_ring_handle_t hal_ring_hdl) +{ + hal_soc_handle_t hal_soc = dp_soc->hal_soc; + + return hal_srng_access_start(hal_soc, hal_ring_hdl); +} + +static inline void dp_srng_access_end(struct dp_intr *int_ctx, + struct dp_soc *dp_soc, + hal_ring_handle_t hal_ring_hdl) +{ + hal_soc_handle_t hal_soc = dp_soc->hal_soc; + + return hal_srng_access_end(hal_soc, hal_ring_hdl); +} +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +#ifdef QCA_ENH_V3_STATS_SUPPORT +/** + * dp_pdev_print_delay_stats(): Print pdev level delay stats + * @pdev: DP_PDEV handle + * + * Return:void + */ +void dp_pdev_print_delay_stats(struct dp_pdev *pdev); + +/** + * dp_pdev_print_tid_stats(): Print pdev level tid stats + * @pdev: DP_PDEV handle + * + * Return:void + */ +void dp_pdev_print_tid_stats(struct dp_pdev *pdev); +#endif /* CONFIG_WIN */ + +void dp_soc_set_txrx_ring_map(struct dp_soc *soc); + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +/** + * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture + * @pdev: DP PDEV + * + * Return: none + */ +static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev) +{ +} + +/** + * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture + * @pdev: DP PDEV + * + * Return: none + */ +static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev) +{ +} + +/** + * dp_tx_ppdu_stats_process - Deferred PPDU stats handler + * @context: Opaque work context (PDEV) + * + * Return: none + */ +static inline void dp_tx_ppdu_stats_process(void *context) +{ +} + +/** + * dp_tx_add_to_comp_queue() - add completion msdu to queue + * @soc: DP Soc handle + * @tx_desc: software Tx descriptor + * @ts : Tx completion status from HAL/HTT descriptor + * @peer: DP peer + * + * Return: none + */ +static inline +QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer) +{ + return QDF_STATUS_E_FAILURE; +} + +/* + * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type + * pdev: DP pdev handle + * htt_frame_type: htt frame type received from fw + * + * return: void + */ +static inline +void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev, + uint32_t htt_frame_type) +{ +} + +/* + * dp_tx_cature_stats: print tx capture stats + * @pdev: DP PDEV handle + * + * return: void + */ +static inline +void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev) +{ +} + +#endif + +#ifdef FEATURE_PERPKT_INFO +void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf); +#else +static inline +void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) +{ +} +#endif + +/** + * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev + * @vdev: DP vdev handle + * + * Return: struct cdp_vdev pointer + */ +static inline +struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) +{ + return (struct cdp_vdev *)vdev; +} + +/** + * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev + * @pdev: DP pdev handle + * + * Return: struct cdp_pdev pointer + */ +static inline +struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) +{ + return (struct cdp_pdev *)pdev; +} + +/** + * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc + * @psoc: DP psoc handle + * + * Return: struct cdp_soc pointer + */ +static inline +struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) +{ + return (struct cdp_soc *)psoc; +} + +/** + * dp_soc_to_cdp_soc_t() - typecast dp psoc to + * ol txrx soc handle + * @psoc: DP psoc handle + * + * Return: struct cdp_soc_t pointer + */ +static inline +struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) +{ + return (struct cdp_soc_t *)psoc; +} + +/** + * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to + * dp soc handle + * @psoc: CDP psoc handle + * + * Return: struct dp_soc pointer + */ +static inline +struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) +{ + return (struct dp_soc *)psoc; +} + +#if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) +/** + * dp_rx_flow_update_fse_stats() - Update a flow's statistics + * @pdev: pdev handle + * @flow_id: flow index (truncated hash) in the Rx FST + * + * Return: Success when flow statistcs is updated, error on failure + */ +QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, + struct cdp_rx_flow_info *rx_flow_info, + struct cdp_flow_stats *stats); + +/** + * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table + * @pdev: pdev handle + * @rx_flow_info: DP flow parameters + * + * Return: Success when flow is deleted, error on failure + */ +QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, + struct cdp_rx_flow_info *rx_flow_info); + +/** + * dp_rx_flow_add_entry() - Add a flow entry to flow search table + * @pdev: DP pdev instance + * @rx_flow_info: DP flow paramaters + * + * Return: Success when flow is added, no-memory or already exists on error + */ +QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, + struct cdp_rx_flow_info *rx_flow_info); + +/** + * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters + * @soc: SoC handle + * @pdev: Pdev handle + * + * Return: Handle to flow search table entry + */ +QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); + +/** + * dp_rx_fst_detach() - De-initialize Rx FST + * @soc: SoC handle + * @pdev: Pdev handle + * + * Return: None + */ +void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); + +/** + * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach + * @soc: SoC handle + * @pdev: Pdev handle + * + * Return: Success when fst parameters are programmed in FW, error otherwise + */ +QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, + struct dp_pdev *pdev); +#else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */ + +/** + * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters + * @soc: SoC handle + * @pdev: Pdev handle + * + * Return: Handle to flow search table entry + */ +static inline +QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_fst_detach() - De-initialize Rx FST + * @soc: SoC handle + * @pdev: Pdev handle + * + * Return: None + */ +static inline +void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ +} +#endif + +/** + * dp_get_vdev_from_soc_vdev_id_wifi3() - Returns vdev object given the vdev id + * @soc: core DP soc context + * @vdev_id: vdev id from vdev object can be retrieved + * + * Return: struct dp_vdev*: Pointer to DP vdev object + */ +static inline struct dp_vdev * +dp_get_vdev_from_soc_vdev_id_wifi3(struct dp_soc *soc, + uint8_t vdev_id) +{ + if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) + return NULL; + + return soc->vdev_id_map[vdev_id]; +} + +/** + * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id + * @soc: core DP soc context + * @pdev_id: pdev id from pdev object can be retrieved + * + * Return: struct dp_pdev*: Pointer to DP pdev object + */ +static inline struct dp_pdev * +dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, + uint8_t pdev_id) +{ + if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) + return NULL; + + return soc->pdev_list[pdev_id]; +} + +/* + * dp_rx_tid_update_wifi3() – Update receive TID state + * @peer: Datapath peer handle + * @tid: TID + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * + * Return: QDF_STATUS code + */ +QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t + ba_window_size, uint32_t start_seq); + +/** + * dp_get_peer_mac_list(): function to get peer mac list of vdev + * @soc: Datapath soc handle + * @vdev_id: vdev id + * @newmac: Table of the clients mac + * @mac_cnt: No. of MACs required + * + * return: no of clients + */ +uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, + u_int8_t newmac[][QDF_MAC_ADDR_SIZE], + u_int16_t mac_cnt); +/* + * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported + * @soc: DP SoC context + * @max_mac_rings: No of MAC rings + * + * Return: None + */ +void dp_is_hw_dbs_enable(struct dp_soc *soc, + int *max_mac_rings); + + +#if defined(WLAN_SUPPORT_RX_FISA) +void dp_rx_dump_fisa_table(struct dp_soc *soc); +#endif /* WLAN_SUPPORT_RX_FISA */ + +/** + * dp_rx_skip_tlvs() - Skip TLVs len + L2 hdr_offset, save in nbuf->cb + * @nbuf: nbuf cb to be updated + * @l2_hdr_offset: l2_hdr_offset + * + * Return: None + */ +void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding); + +#ifdef MAX_ALLOC_PAGE_SIZE +/** + * dp_set_page_size() - Set the max page size for hw link desc. + * For MCL the page size is set to OS defined value and for WIN + * the page size is set to the max_alloc_size cfg ini + * param. + * This is to ensure that WIN gets contiguous memory allocations + * as per requirement. + * @pages: link desc page handle + * @max_alloc_size: max_alloc_size + * + * Return: None + */ +static inline +void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, + uint32_t max_alloc_size) +{ + pages->page_size = qdf_page_size; +} + +#else +static inline +void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages, + uint32_t max_alloc_size) +{ + pages->page_size = max_alloc_size; +} +#endif /* MAX_ALLOC_PAGE_SIZE */ + +/** + * dp_history_get_next_index() - get the next entry to record an entry + * in the history. + * @curr_idx: Current index where the last entry is written. + * @max_entries: Max number of entries in the history + * + * This function assumes that the max number os entries is a power of 2. + * + * Returns: The index where the next entry is to be written. + */ +static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx, + uint32_t max_entries) +{ + uint32_t idx = qdf_atomic_inc_return(curr_idx); + + return idx & (max_entries - 1); +} + +#ifdef DP_MEM_PRE_ALLOC +/** + * dp_context_alloc_mem() - allocate memory for DP context + * @soc: datapath soc handle + * @ctxt_type: DP context type + * @ctxt_size: DP context size + * + * Return: DP context address + */ +void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, + size_t ctxt_size); + +/** + * dp_context_free_mem() - Free memory of DP context + * @soc: datapath soc handle + * @ctxt_type: DP context type + * @vaddr: Address of context memory + * + * Return: None + */ +void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, + void *vaddr); + +/** + * dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages + * @soc: datapath soc handle + * @desc_type: memory request source type + * @pages: multi page information storage + * @element_size: each element size + * @element_num: total number of elements should be allocated + * @memctxt: memory context + * @cacheable: coherent memory or cacheable memory + * + * This function is a wrapper for memory allocation over multiple + * pages, if dp prealloc method is registered, then will try prealloc + * firstly. if prealloc failed, fall back to regular way over + * qdf_mem_multi_pages_alloc(). + * + * Return: None + */ +void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, + enum dp_desc_type desc_type, + struct qdf_mem_multi_page_t *pages, + size_t element_size, + uint16_t element_num, + qdf_dma_context_t memctxt, + bool cacheable); + +/** + * dp_desc_multi_pages_mem_free() - free multiple pages memory + * @soc: datapath soc handle + * @desc_type: memory request source type + * @pages: multi page information storage + * @memctxt: memory context + * @cacheable: coherent memory or cacheable memory + * + * This function is a wrapper for multiple pages memory free, + * if memory is got from prealloc pool, put it back to pool. + * otherwise free by qdf_mem_multi_pages_free(). + * + * Return: None + */ +void dp_desc_multi_pages_mem_free(struct dp_soc *soc, + enum dp_desc_type desc_type, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, + bool cacheable); + +#else +static inline +void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, + size_t ctxt_size) +{ + return qdf_mem_malloc(ctxt_size); +} + +static inline +void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, + void *vaddr) +{ + qdf_mem_free(vaddr); +} + +static inline +void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, + enum dp_desc_type desc_type, + struct qdf_mem_multi_page_t *pages, + size_t element_size, + uint16_t element_num, + qdf_dma_context_t memctxt, + bool cacheable) +{ + qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, + element_num, memctxt, cacheable); +} + +static inline +void dp_desc_multi_pages_mem_free(struct dp_soc *soc, + enum dp_desc_type desc_type, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, + bool cacheable) +{ + qdf_mem_multi_pages_free(soc->osdev, pages, + memctxt, cacheable); +} + +#endif +#ifdef FEATURE_RUNTIME_PM +/** + * dp_runtime_get() - Get dp runtime refcount + * @soc: Datapath soc handle + * + * Get dp runtime refcount by increment of an atomic variable, which can block + * dp runtime resume to wait to flush pending tx by runtime suspend. + * + * Return: Current refcount + */ +static inline int32_t dp_runtime_get(struct dp_soc *soc) +{ + return qdf_atomic_inc_return(&soc->dp_runtime_refcount); +} + +/** + * dp_runtime_put() - Return dp runtime refcount + * @soc: Datapath soc handle + * + * Return dp runtime refcount by decrement of an atomic variable, allow dp + * runtime resume finish. + * + * Return: Current refcount + */ +static inline int32_t dp_runtime_put(struct dp_soc *soc) +{ + return qdf_atomic_dec_return(&soc->dp_runtime_refcount); +} + +/** + * dp_runtime_get_refcount() - Get dp runtime refcount + * @soc: Datapath soc handle + * + * Get dp runtime refcount by returning an atomic variable + * + * Return: Current refcount + */ +static inline int32_t dp_runtime_get_refcount(struct dp_soc *soc) +{ + return qdf_atomic_read(&soc->dp_runtime_refcount); +} + +/** + * dp_runtime_init() - Init dp runtime refcount when dp soc init + * @soc: Datapath soc handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) +{ + return qdf_atomic_init(&soc->dp_runtime_refcount); +} +#else +static inline int32_t dp_runtime_get(struct dp_soc *soc) +{ + return 0; +} + +static inline int32_t dp_runtime_put(struct dp_soc *soc) +{ + return 0; +} + +static inline QDF_STATUS dp_runtime_init(struct dp_soc *soc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dp_peer_flush_frags() - Flush all fragments for a particular + * peer + * @soc_hdl - data path soc handle + * @vdev_id - vdev id + * @peer_addr - peer mac address + * + * Return: None + */ +void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac); +#endif /* #ifndef _DP_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c new file mode 100644 index 0000000000000000000000000000000000000000..2f6ecb3510760081fe47b4691c98c73091d34b3c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c @@ -0,0 +1,2013 @@ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef IPA_OFFLOAD + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_types.h" +#include "dp_htt.h" +#include "dp_tx.h" +#include "dp_rx.h" +#include "dp_ipa.h" + +/* Ring index for WBM2SW2 release ring */ +#define IPA_TX_COMP_RING_IDX HAL_IPA_TX_COMP_RING_IDX + +/* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */ +#define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048) + +/* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to + * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full. + * This causes back pressure, resulting in a FW crash. + * By leaving some entries with no buffer attached, WBM will be able to write + * to the ring, and from dumps we can figure out the buffer which is causing + * this issue. + */ +#define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16 +/** + *struct dp_ipa_reo_remap_record - history for dp ipa reo remaps + * @ix0_reg: reo destination ring IX0 value + * @ix2_reg: reo destination ring IX2 value + * @ix3_reg: reo destination ring IX3 value + */ +struct dp_ipa_reo_remap_record { + uint64_t timestamp; + uint32_t ix0_reg; + uint32_t ix2_reg; + uint32_t ix3_reg; +}; + +#define REO_REMAP_HISTORY_SIZE 32 + +struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE]; + +static qdf_atomic_t dp_ipa_reo_remap_history_index; +static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index) +{ + int next = qdf_atomic_inc_return(index); + + if (next == REO_REMAP_HISTORY_SIZE) + qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index); + + return next % REO_REMAP_HISTORY_SIZE; +} + +/** + * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values + * @ix0_val: reo destination ring IX0 value + * @ix2_val: reo destination ring IX2 value + * @ix3_val: reo destination ring IX3 value + * + * Return: None + */ +static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val, + uint32_t ix3_val) +{ + int idx = dp_ipa_reo_remap_record_index_next( + &dp_ipa_reo_remap_history_index); + struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx]; + + record->timestamp = qdf_get_log_timestamp(); + record->ix0_reg = ix0_val; + record->ix2_reg = ix2_val; + record->ix3_reg = ix3_val; +} + +static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc, + qdf_nbuf_t nbuf, + bool create) +{ + qdf_mem_info_t mem_map_table = {0}; + + qdf_update_mem_map_table(soc->osdev, &mem_map_table, + qdf_nbuf_get_frag_paddr(nbuf, 0), + skb_end_pointer(nbuf) - nbuf->data); + + if (create) + qdf_ipa_wdi_create_smmu_mapping(1, &mem_map_table); + else + qdf_ipa_wdi_release_smmu_mapping(1, &mem_map_table); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, + qdf_nbuf_t nbuf, + bool create) +{ + struct dp_pdev *pdev; + int i; + + for (i = 0; i < soc->pdev_count; i++) { + pdev = soc->pdev_list[i]; + if (pdev && pdev->monitor_configured) + return QDF_STATUS_SUCCESS; + } + + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) || + !qdf_mem_smmu_s1_enabled(soc->osdev)) + return QDF_STATUS_SUCCESS; + + if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) + return QDF_STATUS_SUCCESS; + + return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create); +} + +#ifdef RX_DESC_MULTI_PAGE_ALLOC +static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc, + struct dp_pdev *pdev, + bool create) +{ + struct rx_desc_pool *rx_pool; + uint8_t pdev_id; + uint32_t num_desc, page_id, offset, i; + uint16_t num_desc_per_page; + union dp_rx_desc_list_elem_t *rx_desc_elem; + struct dp_rx_desc *rx_desc; + qdf_nbuf_t nbuf; + + if (!qdf_mem_smmu_s1_enabled(soc->osdev)) + return QDF_STATUS_SUCCESS; + + pdev_id = pdev->pdev_id; + rx_pool = &soc->rx_desc_buf[pdev_id]; + + qdf_spin_lock_bh(&rx_pool->lock); + num_desc = rx_pool->pool_size; + num_desc_per_page = rx_pool->desc_pages.num_element_per_page; + for (i = 0; i < num_desc; i++) { + page_id = i / num_desc_per_page; + offset = i % num_desc_per_page; + if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages))) + break; + rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool); + rx_desc = &rx_desc_elem->rx_desc; + if ((!(rx_desc->in_use)) || rx_desc->unmapped) + continue; + nbuf = rx_desc->nbuf; + + __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create); + } + qdf_spin_unlock_bh(&rx_pool->lock); + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc, + struct dp_pdev *pdev, + bool create) +{ + struct rx_desc_pool *rx_pool; + uint8_t pdev_id; + qdf_nbuf_t nbuf; + int i; + + if (!qdf_mem_smmu_s1_enabled(soc->osdev)) + return QDF_STATUS_SUCCESS; + + pdev_id = pdev->pdev_id; + rx_pool = &soc->rx_desc_buf[pdev_id]; + + qdf_spin_lock_bh(&rx_pool->lock); + for (i = 0; i < rx_pool->pool_size; i++) { + if ((!(rx_pool->array[i].rx_desc.in_use)) || + rx_pool->array[i].rx_desc.unmapped) + continue; + + nbuf = rx_pool->array[i].rx_desc.nbuf; + + __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create); + } + qdf_spin_unlock_bh(&rx_pool->lock); + + return QDF_STATUS_SUCCESS; +} +#endif /* RX_DESC_MULTI_PAGE_ALLOC */ + +/** + * dp_tx_ipa_uc_detach - Free autonomy TX resources + * @soc: data path instance + * @pdev: core txrx pdev context + * + * Free allocated TX buffers with WBM SRNG + * + * Return: none + */ +static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + int idx; + qdf_nbuf_t nbuf; + struct dp_ipa_resources *ipa_res; + + for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { + nbuf = (qdf_nbuf_t) + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]; + if (!nbuf) + continue; + + if (qdf_mem_smmu_s1_enabled(soc->osdev)) + __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, false); + + qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); + qdf_nbuf_free(nbuf); + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] = + (void *)NULL; + } + + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; + + ipa_res = &pdev->ipa_resource; + iounmap(ipa_res->tx_comp_doorbell_vaddr); + + qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable); + qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable); +} + +/** + * dp_rx_ipa_uc_detach - free autonomy RX resources + * @soc: data path instance + * @pdev: core txrx pdev context + * + * This function will detach DP RX into main device context + * will free DP Rx resources. + * + * Return: none + */ +static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; + + qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable); + qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable); +} + +int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + /* TX resource detach */ + dp_tx_ipa_uc_detach(soc, pdev); + + /* RX resource detach */ + dp_rx_ipa_uc_detach(soc, pdev); + + return QDF_STATUS_SUCCESS; /* success */ +} + +/** + * dp_tx_ipa_uc_attach - Allocate autonomy TX resources + * @soc: data path instance + * @pdev: Physical device handle + * + * Allocate TX buffer from non-cacheable memory + * Attache allocated TX buffers with WBM SRNG + * + * Return: int + */ +static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + uint32_t tx_buffer_count; + uint32_t ring_base_align = 8; + qdf_dma_addr_t buffer_paddr; + struct hal_srng *wbm_srng = (struct hal_srng *) + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + struct hal_srng_params srng_params; + uint32_t paddr_lo; + uint32_t paddr_hi; + void *ring_entry; + int num_entries; + qdf_nbuf_t nbuf; + int retval = QDF_STATUS_SUCCESS; + int max_alloc_count = 0; + + /* + * Uncomment when dp_ops_cfg.cfg_attach is implemented + * unsigned int uc_tx_buf_sz = + * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); + */ + unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; + unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; + + hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng), + &srng_params); + num_entries = srng_params.num_entries; + + max_alloc_count = + num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES; + if (max_alloc_count <= 0) { + dp_err("incorrect value for buffer count %u", max_alloc_count); + return -EINVAL; + } + + dp_info("requested %d buffers to be posted to wbm ring", + max_alloc_count); + + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = + qdf_mem_malloc(num_entries * + sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned)); + if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) { + dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail"); + return -ENOMEM; + } + + hal_srng_access_start_unlocked(soc->hal_soc, + hal_srng_to_hal_ring_handle(wbm_srng)); + + /* + * Allocate Tx buffers as many as possible. + * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty + * Populate Tx buffers into WBM2IPA ring + * This initial buffer population will simulate H/W as source ring, + * and update HP + */ + for (tx_buffer_count = 0; + tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { + nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); + if (!nbuf) + break; + + ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc, + hal_srng_to_hal_ring_handle(wbm_srng)); + if (!ring_entry) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: Failed to get WBM ring entry", + __func__); + qdf_nbuf_free(nbuf); + break; + } + + qdf_nbuf_map_single(soc->osdev, nbuf, + QDF_DMA_BIDIRECTIONAL); + buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); + + paddr_lo = ((uint64_t)buffer_paddr & 0x00000000ffffffff); + paddr_hi = ((uint64_t)buffer_paddr & 0x0000001f00000000) >> 32; + HAL_RXDMA_PADDR_LO_SET(ring_entry, paddr_lo); + HAL_RXDMA_PADDR_HI_SET(ring_entry, paddr_hi); + HAL_RXDMA_MANAGER_SET(ring_entry, (IPA_TCL_DATA_RING_IDX + + HAL_WBM_SW0_BM_ID)); + + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count] + = (void *)nbuf; + + if (qdf_mem_smmu_s1_enabled(soc->osdev)) + __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, true); + } + + hal_srng_access_end_unlocked(soc->hal_soc, + hal_srng_to_hal_ring_handle(wbm_srng)); + + soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; + + if (tx_buffer_count) { + dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count); + } else { + dp_err("No IPA WDI TX buffer allocated!"); + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; + retval = -ENOMEM; + } + + return retval; +} + +/** + * dp_rx_ipa_uc_attach - Allocate autonomy RX resources + * @soc: data path instance + * @pdev: core txrx pdev context + * + * This function will attach a DP RX instance into the main + * device (SOC) context. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + int error; + + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + /* TX resource attach */ + error = dp_tx_ipa_uc_attach(soc, pdev); + if (error) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: DP IPA UC TX attach fail code %d", + __func__, error); + return error; + } + + /* RX resource attach */ + error = dp_rx_ipa_uc_attach(soc, pdev); + if (error) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: DP IPA UC RX attach fail code %d", + __func__, error); + dp_tx_ipa_uc_detach(soc, pdev); + return error; + } + + return QDF_STATUS_SUCCESS; /* success */ +} + +/* + * dp_ipa_ring_resource_setup() - setup IPA ring resources + * @soc: data path SoC handle + * + * Return: none + */ +int dp_ipa_ring_resource_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; + struct hal_srng *hal_srng; + struct hal_srng_params srng_params; + qdf_dma_addr_t hp_addr; + unsigned long addr_offset, dev_base_paddr; + uint32_t ix0; + + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */ + hal_srng = (struct hal_srng *) + soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng; + hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), + hal_srng_to_hal_ring_handle(hal_srng), + &srng_params); + + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_tx_rsc.ipa_tcl_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + /* + * For the register backed memory addresses, use the scn->mem_pa to + * calculate the physical address of the shadow registers + */ + dev_base_paddr = + (unsigned long) + ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; + addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - + (unsigned long)(hal_soc->dev_base_addr); + soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr = + (qdf_dma_addr_t)(addr_offset + dev_base_paddr); + + dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", + (unsigned int)addr_offset, + (unsigned int)dev_base_paddr, + (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr), + (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, + (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, + srng_params.num_entries, + soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); + + /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */ + hal_srng = (struct hal_srng *) + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), + hal_srng_to_hal_ring_handle(hal_srng), + &srng_params); + + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_tx_rsc.ipa_wbm_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr = + hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), + hal_srng_to_hal_ring_handle(hal_srng)); + addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - + (unsigned long)(hal_soc->dev_base_addr); + soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr = + (qdf_dma_addr_t)(addr_offset + dev_base_paddr); + + dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)", + (unsigned int)addr_offset, + (unsigned int)dev_base_paddr, + (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr), + (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, + (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, + srng_params.num_entries, + soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); + + /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */ + hal_srng = (struct hal_srng *) + soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; + hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), + hal_srng_to_hal_ring_handle(hal_srng), + &srng_params); + + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_rx_rsc.ipa_reo_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - + (unsigned long)(hal_soc->dev_base_addr); + soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr = + (qdf_dma_addr_t)(addr_offset + dev_base_paddr); + + dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", + (unsigned int)addr_offset, + (unsigned int)dev_base_paddr, + (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr), + (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, + (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, + srng_params.num_entries, + soc->ipa_uc_rx_rsc.ipa_reo_ring_size); + + hal_srng = (struct hal_srng *) + pdev->rx_refill_buf_ring2.hal_srng; + hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc), + hal_srng_to_hal_ring_handle(hal_srng), + &srng_params); + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc), + hal_srng_to_hal_ring_handle(hal_srng)); + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = + qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr); + + dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)", + (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr), + (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, + (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, + srng_params.num_entries, + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); + + /* + * Set DEST_RING_MAPPING_4 to SW2 as default value for + * DESTINATION_RING_CTRL_IX_0. + */ + ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) | + HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) | + HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) | + HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) | + HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) | + HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) | + HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) | + HAL_REO_REMAP_IX0(REO_REMAP_FW, 7); + + hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL); + + return 0; +} + +static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev, + qdf_shared_mem_t *shared_mem, + void *cpu_addr, + qdf_dma_addr_t dma_addr, + uint32_t size) +{ + qdf_dma_addr_t paddr; + int ret; + + shared_mem->vaddr = cpu_addr; + qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); + *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr; + + paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); + qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); + + ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, + shared_mem->vaddr, dma_addr, size); + if (ret) { + dp_err("Unable to get DMA sgtable"); + return QDF_STATUS_E_NOMEM; + } + + qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + struct dp_ipa_resources *ipa_res; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + ipa_res = &pdev->ipa_resource; + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + ipa_res->tx_num_alloc_buffer = + (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; + + dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring, + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr, + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr, + soc->ipa_uc_tx_rsc.ipa_tcl_ring_size); + + dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring, + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr, + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr, + soc->ipa_uc_tx_rsc.ipa_wbm_ring_size); + + dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring, + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr, + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr, + soc->ipa_uc_rx_rsc.ipa_reo_ring_size); + + dp_ipa_get_shared_mem_info( + soc->osdev, &ipa_res->rx_refill_ring, + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr, + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr, + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size); + + if (!qdf_mem_get_dma_addr(soc->osdev, + &ipa_res->tx_comp_ring.mem_info) || + !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc, + struct dp_ipa_resources *ipa_res) +{ + struct hal_srng *wbm_srng = (struct hal_srng *) + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + + hal_srng_dst_set_hp_paddr_confirm(wbm_srng, + ipa_res->tx_comp_doorbell_paddr); + + dp_info("paddr %pK vaddr %pK", + (void *)ipa_res->tx_comp_doorbell_paddr, + (void *)ipa_res->tx_comp_doorbell_vaddr); +} + +#ifdef IPA_SET_RESET_TX_DB_PA +#define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) +#else +#define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \ + dp_ipa_set_tx_doorbell_paddr(soc, ipa_res) +#endif + +QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + struct dp_ipa_resources *ipa_res; + struct hal_srng *reo_srng = (struct hal_srng *) + soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; + uint32_t tx_comp_doorbell_dmaaddr; + uint32_t rx_ready_doorbell_dmaaddr; + int ret = 0; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + ipa_res = &pdev->ipa_resource; + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + ipa_res->tx_comp_doorbell_vaddr = + ioremap(ipa_res->tx_comp_doorbell_paddr, 4); + + if (qdf_mem_smmu_s1_enabled(soc->osdev)) { + ret = pld_smmu_map(soc->osdev->dev, + ipa_res->tx_comp_doorbell_paddr, + &tx_comp_doorbell_dmaaddr, + sizeof(uint32_t)); + ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr; + qdf_assert_always(!ret); + + ret = pld_smmu_map(soc->osdev->dev, + ipa_res->rx_ready_doorbell_paddr, + &rx_ready_doorbell_dmaaddr, + sizeof(uint32_t)); + ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr; + qdf_assert_always(!ret); + } + + DP_IPA_SET_TX_DB_PADDR(soc, ipa_res); + + /* + * For RX, REO module on Napier/Hastings does reordering on incoming + * Ethernet packets and writes one or more descriptors to REO2IPA Rx + * ring.It then updates the ring’s Write/Head ptr and rings a doorbell + * to IPA. + * Set the doorbell addr for the REO ring. + */ + hal_srng_dst_set_hp_paddr_confirm(reo_srng, + ipa_res->rx_ready_doorbell_paddr); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint8_t *op_msg) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + if (pdev->ipa_uc_op_cb) { + pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt); + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: IPA callback function is not registered", __func__); + qdf_mem_free(op_msg); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + ipa_uc_op_cb_type op_cb, + void *usr_ctxt) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + pdev->ipa_uc_op_cb = op_cb; + pdev->usr_ctxt = usr_ctxt; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + /* TBD */ + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_send_ipa_data_frame() - send IPA data frame + * @soc_hdl: datapath soc handle + * @vdev_id: id of the virtual device + * @skb: skb to transmit + * + * Return: skb/ NULL is for success + */ +qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + qdf_nbuf_t skb) +{ + qdf_nbuf_t ret; + + /* Terminate the (single-element) list of tx frames */ + qdf_nbuf_set_next(skb, NULL); + ret = dp_tx_send(soc_hdl, vdev_id, skb); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to tx", __func__); + return ret; + } + + return NULL; +} + +QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + uint32_t ix0; + uint32_t ix2; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) + return QDF_STATUS_E_AGAIN; + + /* Call HAL API to remap REO rings to REO2IPA ring */ + ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) | + HAL_REO_REMAP_IX0(REO_REMAP_SW4, 1) | + HAL_REO_REMAP_IX0(REO_REMAP_SW1, 2) | + HAL_REO_REMAP_IX0(REO_REMAP_SW4, 3) | + HAL_REO_REMAP_IX0(REO_REMAP_SW4, 4) | + HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) | + HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) | + HAL_REO_REMAP_IX0(REO_REMAP_FW, 7); + + if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { + ix2 = HAL_REO_REMAP_IX2(REO_REMAP_SW4, 16) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 17) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 20) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 22) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23); + + hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, + &ix2, &ix2); + dp_ipa_reo_remap_history_add(ix0, ix2, ix2); + } else { + hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, + NULL, NULL); + dp_ipa_reo_remap_history_add(ix0, 0, 0); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + uint32_t ix0; + uint32_t ix2; + uint32_t ix3; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) + return QDF_STATUS_E_AGAIN; + + /* Call HAL API to remap REO rings to REO2IPA ring */ + ix0 = HAL_REO_REMAP_IX0(REO_REMAP_TCL, 0) | + HAL_REO_REMAP_IX0(REO_REMAP_SW1, 1) | + HAL_REO_REMAP_IX0(REO_REMAP_SW2, 2) | + HAL_REO_REMAP_IX0(REO_REMAP_SW3, 3) | + HAL_REO_REMAP_IX0(REO_REMAP_SW2, 4) | + HAL_REO_REMAP_IX0(REO_REMAP_RELEASE, 5) | + HAL_REO_REMAP_IX0(REO_REMAP_FW, 6) | + HAL_REO_REMAP_IX0(REO_REMAP_FW, 7); + + if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { + dp_reo_remap_config(soc, &ix2, &ix3); + + hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, + &ix2, &ix3); + dp_ipa_reo_remap_history_add(ix0, ix2, ix3); + } else { + hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, + NULL, NULL); + dp_ipa_reo_remap_history_add(ix0, 0, 0); + } + + return QDF_STATUS_SUCCESS; +} + +/* This should be configurable per H/W configuration enable status */ +#define L3_HEADER_PADDING 2 + +#ifdef CONFIG_IPA_WDI_UNIFIED_API + +#ifndef QCA_LL_TX_FLOW_CONTROL_V2 +static inline void dp_setup_mcc_sys_pipes( + qdf_ipa_sys_connect_params_t *sys_in, + qdf_ipa_wdi_conn_in_params_t *pipe_in) +{ + /* Setup MCC sys pipe */ + QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = + DP_IPA_MAX_IFACE; + for (int i = 0; i < DP_IPA_MAX_IFACE; i++) + memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i], + &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t)); +} +#else +static inline void dp_setup_mcc_sys_pipes( + qdf_ipa_sys_connect_params_t *sys_in, + qdf_ipa_wdi_conn_in_params_t *pipe_in) +{ + QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0; +} +#endif + +static void dp_ipa_wdi_tx_params(struct dp_soc *soc, + struct dp_ipa_resources *ipa_res, + qdf_ipa_wdi_pipe_setup_info_t *tx, + bool over_gsi) +{ + struct tcl_data_cmd *tcl_desc_ptr; + uint8_t *desc_addr; + uint32_t desc_size; + + if (over_gsi) + QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS; + else + QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; + + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = + qdf_mem_get_dma_addr(soc->osdev, + &ipa_res->tx_comp_ring.mem_info); + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->tx_comp_ring.mem_info); + + /* WBM Tail Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = + soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; + QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true; + + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = + qdf_mem_get_dma_addr(soc->osdev, + &ipa_res->tx_ring.mem_info); + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->tx_ring.mem_info); + + /* TCL Head Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = + soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; + QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true; + + QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = + ipa_res->tx_num_alloc_buffer; + + QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; + + /* Preprogram TCL descriptor */ + desc_addr = + (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx); + desc_size = sizeof(struct tcl_data_cmd); + HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); + tcl_desc_ptr = (struct tcl_data_cmd *) + (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1); + tcl_desc_ptr->buf_addr_info.return_buffer_manager = + HAL_RX_BUF_RBM_SW2_BM; + tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ + tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; + tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ +} + +static void dp_ipa_wdi_rx_params(struct dp_soc *soc, + struct dp_ipa_resources *ipa_res, + qdf_ipa_wdi_pipe_setup_info_t *rx, + bool over_gsi) +{ + if (over_gsi) + QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = + IPA_CLIENT_WLAN2_PROD; + else + QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = + IPA_CLIENT_WLAN1_PROD; + + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = + qdf_mem_get_dma_addr(soc->osdev, + &ipa_res->rx_rdy_ring.mem_info); + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->rx_rdy_ring.mem_info); + + /* REO Tail Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = + soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; + QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true; + + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = + qdf_mem_get_dma_addr(soc->osdev, + &ipa_res->rx_refill_ring.mem_info); + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->rx_refill_ring.mem_info); + + /* FW Head Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; + QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false; + + QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = + RX_PKT_TLVS_LEN + L3_HEADER_PADDING; +} + +static void +dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc, + struct dp_ipa_resources *ipa_res, + qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu, + bool over_gsi) +{ + struct tcl_data_cmd *tcl_desc_ptr; + uint8_t *desc_addr; + uint32_t desc_size; + + if (over_gsi) + QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = + IPA_CLIENT_WLAN2_CONS; + else + QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = + IPA_CLIENT_WLAN1_CONS; + + qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu), + &ipa_res->tx_comp_ring.sgtable, + sizeof(sgtable_t)); + QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->tx_comp_ring.mem_info); + /* WBM Tail Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) = + soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; + QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true; + + qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu), + &ipa_res->tx_ring.sgtable, + sizeof(sgtable_t)); + QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->tx_ring.mem_info); + /* TCL Head Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) = + soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; + QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true; + + QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) = + ipa_res->tx_num_alloc_buffer; + QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0; + + /* Preprogram TCL descriptor */ + desc_addr = (uint8_t *)QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE( + tx_smmu); + desc_size = sizeof(struct tcl_data_cmd); + HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); + tcl_desc_ptr = (struct tcl_data_cmd *) + (QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(tx_smmu) + 1); + tcl_desc_ptr->buf_addr_info.return_buffer_manager = + HAL_RX_BUF_RBM_SW2_BM; + tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ + tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; + tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ +} + +static void +dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc, + struct dp_ipa_resources *ipa_res, + qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu, + bool over_gsi) +{ + if (over_gsi) + QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = + IPA_CLIENT_WLAN2_PROD; + else + QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) = + IPA_CLIENT_WLAN1_PROD; + + qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu), + &ipa_res->rx_rdy_ring.sgtable, + sizeof(sgtable_t)); + QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->rx_rdy_ring.mem_info); + /* REO Tail Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) = + soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; + QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true; + + qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu), + &ipa_res->rx_refill_ring.sgtable, + sizeof(sgtable_t)); + QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) = + qdf_mem_get_dma_size(soc->osdev, + &ipa_res->rx_refill_ring.mem_info); + + /* FW Head Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) = + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; + QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false; + + QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = + RX_PKT_TLVS_LEN + L3_HEADER_PADDING; +} + +QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *ipa_i2w_cb, void *ipa_w2i_cb, + void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle, bool is_smmu_enabled, + qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + struct dp_ipa_resources *ipa_res; + qdf_ipa_ep_cfg_t *tx_cfg; + qdf_ipa_ep_cfg_t *rx_cfg; + qdf_ipa_wdi_pipe_setup_info_t *tx = NULL; + qdf_ipa_wdi_pipe_setup_info_t *rx = NULL; + qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu; + qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu; + qdf_ipa_wdi_conn_in_params_t pipe_in; + qdf_ipa_wdi_conn_out_params_t pipe_out; + int ret; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + ipa_res = &pdev->ipa_resource; + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + qdf_mem_zero(&pipe_in, sizeof(pipe_in)); + qdf_mem_zero(&pipe_out, sizeof(pipe_out)); + + if (is_smmu_enabled) + QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true; + else + QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false; + + dp_setup_mcc_sys_pipes(sys_in, &pipe_in); + + /* TX PIPE */ + if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) { + tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in); + tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu); + } else { + tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in); + tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx); + } + + QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT; + QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0; + QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0; + QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0; + QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC; + QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true; + + /** + * Transfer Ring: WBM Ring + * Transfer Ring Doorbell PA: WBM Tail Pointer Address + * Event Ring: TCL ring + * Event Ring Doorbell PA: TCL Head Pointer Address + */ + if (is_smmu_enabled) + dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi); + else + dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi); + + /* RX PIPE */ + if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) { + rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in); + rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu); + } else { + rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in); + rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx); + } + + QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT; + QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN; + QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1; + QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0; + QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0; + QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0; + QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1; + QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC; + QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true; + + /** + * Transfer Ring: REO Ring + * Transfer Ring Doorbell PA: REO Tail Pointer Address + * Event Ring: FW ring + * Event Ring Doorbell PA: FW Head Pointer Address + */ + if (is_smmu_enabled) + dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi); + else + dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi); + + QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb; + QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv; + + /* Connect WDI IPA PIPEs */ + ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + /* IPA uC Doorbell registers */ + dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x", + (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), + (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); + + ipa_res->tx_comp_doorbell_paddr = + QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out); + ipa_res->rx_ready_doorbell_paddr = + QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out); + + soc->ipa_first_tx_db_access = true; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_setup_iface() - Setup IPA header and register interface + * @ifname: Interface name + * @mac_addr: Interface MAC address + * @prod_client: IPA prod client type + * @cons_client: IPA cons client type + * @session_id: Session ID + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled) +{ + qdf_ipa_wdi_reg_intf_in_params_t in; + qdf_ipa_wdi_hdr_info_t hdr_info; + struct dp_ipa_uc_tx_hdr uc_tx_hdr; + struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; + int ret = -EINVAL; + + dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname, + QDF_MAC_ADDR_REF(mac_addr)); + qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t)); + qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); + qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); + + /* IPV4 header */ + uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); + + QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; + QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II; + QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = + DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; + + QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; + qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), + &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); + QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client; + QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; + QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = + htonl(session_id << 16); + QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000); + + /* IPV6 header */ + if (is_ipv6_enabled) { + qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, + DP_IPA_UC_WLAN_TX_HDR_LEN); + uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); + QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; + qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), + &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); + } + + dp_debug("registering for session_id: %u", session_id); + + ret = qdf_ipa_wdi_reg_intf(&in); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi_reg_intf: register IPA interface falied: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#else /* CONFIG_IPA_WDI_UNIFIED_API */ +QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *ipa_i2w_cb, void *ipa_w2i_cb, + void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + struct dp_ipa_resources *ipa_res; + qdf_ipa_wdi_pipe_setup_info_t *tx; + qdf_ipa_wdi_pipe_setup_info_t *rx; + qdf_ipa_wdi_conn_in_params_t pipe_in; + qdf_ipa_wdi_conn_out_params_t pipe_out; + struct tcl_data_cmd *tcl_desc_ptr; + uint8_t *desc_addr; + uint32_t desc_size; + int ret; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + ipa_res = &pdev->ipa_resource; + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); + qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t)); + qdf_mem_zero(&pipe_in, sizeof(pipe_in)); + qdf_mem_zero(&pipe_out, sizeof(pipe_out)); + + /* TX PIPE */ + /** + * Transfer Ring: WBM Ring + * Transfer Ring Doorbell PA: WBM Tail Pointer Address + * Event Ring: TCL ring + * Event Ring Doorbell PA: TCL Head Pointer Address + */ + tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in); + QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT; + QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0; + QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0; + QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0; + QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC; + QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true; + QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = + ipa_res->tx_comp_ring_base_paddr; + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) = + ipa_res->tx_comp_ring_size; + /* WBM Tail Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = + soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) = + ipa_res->tx_ring_base_paddr; + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size; + /* TCL Head Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = + soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; + QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) = + ipa_res->tx_num_alloc_buffer; + QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0; + + /* Preprogram TCL descriptor */ + desc_addr = + (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx); + desc_size = sizeof(struct tcl_data_cmd); + HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); + tcl_desc_ptr = (struct tcl_data_cmd *) + (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1); + tcl_desc_ptr->buf_addr_info.return_buffer_manager = + HAL_RX_BUF_RBM_SW2_BM; + tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ + tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; + tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ + + /* RX PIPE */ + /** + * Transfer Ring: REO Ring + * Transfer Ring Doorbell PA: REO Tail Pointer Address + * Event Ring: FW ring + * Event Ring Doorbell PA: FW Head Pointer Address + */ + rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in); + QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT; + QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN; + QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0; + QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0; + QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0; + QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0; + QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1; + QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC; + QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true; + QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD; + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = + ipa_res->rx_rdy_ring_base_paddr; + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) = + ipa_res->rx_rdy_ring_size; + /* REO Tail Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = + soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) = + ipa_res->rx_refill_ring_base_paddr; + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) = + ipa_res->rx_refill_ring_size; + /* FW Head Pointer Address */ + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; + QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN + + L3_HEADER_PADDING; + QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb; + QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv; + + /* Connect WDI IPA PIPE */ + ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + /* IPA uC Doorbell registers */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Tx DB PA=0x%x, Rx DB PA=0x%x", + __func__, + (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), + (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); + + ipa_res->tx_comp_doorbell_paddr = + QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out); + ipa_res->tx_comp_doorbell_vaddr = + QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out); + ipa_res->rx_ready_doorbell_paddr = + QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out); + + soc->ipa_first_tx_db_access = true; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", + __func__, + "transfer_ring_base_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx), + "transfer_ring_size", + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx), + "transfer_ring_doorbell_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx), + "event_ring_base_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx), + "event_ring_size", + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx), + "event_ring_doorbell_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx), + "num_pkt_buffers", + QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx), + "tx_comp_doorbell_paddr", + (void *)ipa_res->tx_comp_doorbell_paddr); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", + __func__, + "transfer_ring_base_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx), + "transfer_ring_size", + QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx), + "transfer_ring_doorbell_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx), + "event_ring_base_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx), + "event_ring_size", + QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx), + "event_ring_doorbell_pa", + (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx), + "num_pkt_buffers", + QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx), + "tx_comp_doorbell_paddr", + (void *)ipa_res->rx_ready_doorbell_paddr); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_setup_iface() - Setup IPA header and register interface + * @ifname: Interface name + * @mac_addr: Interface MAC address + * @prod_client: IPA prod client type + * @cons_client: IPA cons client type + * @session_id: Session ID + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled) +{ + qdf_ipa_wdi_reg_intf_in_params_t in; + qdf_ipa_wdi_hdr_info_t hdr_info; + struct dp_ipa_uc_tx_hdr uc_tx_hdr; + struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6; + int ret = -EINVAL; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, + __func__, ifname, QDF_MAC_ADDR_REF(mac_addr)); + + qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t)); + qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); + qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); + + /* IPV4 header */ + uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); + + QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; + QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II; + QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = + DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; + + QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; + qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]), + &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); + QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; + QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) = + htonl(session_id << 16); + QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000); + + /* IPV6 header */ + if (is_ipv6_enabled) { + qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr, + DP_IPA_UC_WLAN_TX_HDR_LEN); + uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6); + QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6; + qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]), + &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t)); + } + + ret = qdf_ipa_wdi_reg_intf(&in); + if (ret) { + dp_err("ipa_wdi_reg_intf: register IPA interface falied: ret=%d", + ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +/** + * dp_ipa_cleanup() - Disconnect IPA pipes + * @soc_hdl: dp soc handle + * @pdev_id: dp pdev id + * @tx_pipe_handle: Tx pipe handle + * @rx_pipe_handle: Rx pipe handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint32_t tx_pipe_handle, uint32_t rx_pipe_handle) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_ipa_resources *ipa_res; + struct dp_pdev *pdev; + int ret; + + ret = qdf_ipa_wdi_disconn_pipes(); + if (ret) { + dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d", + ret); + status = QDF_STATUS_E_FAILURE; + } + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (qdf_unlikely(!pdev)) { + dp_err_rl("Invalid pdev for pdev_id %d", pdev_id); + status = QDF_STATUS_E_FAILURE; + goto exit; + } + + if (qdf_mem_smmu_s1_enabled(soc->osdev)) { + ipa_res = &pdev->ipa_resource; + + /* unmap has to be the reverse order of smmu map */ + ret = pld_smmu_unmap(soc->osdev->dev, + ipa_res->rx_ready_doorbell_paddr, + sizeof(uint32_t)); + qdf_assert_always(!ret); + + ret = pld_smmu_unmap(soc->osdev->dev, + ipa_res->tx_comp_doorbell_paddr, + sizeof(uint32_t)); + qdf_assert_always(!ret); + } + +exit: + return status; +} + +/** + * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface + * @ifname: Interface name + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled) +{ + int ret; + + ret = qdf_ipa_wdi_dereg_intf(ifname); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef IPA_SET_RESET_TX_DB_PA +static +QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc, + struct dp_ipa_resources *ipa_res) +{ + hal_ring_handle_t wbm_srng = + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + qdf_dma_addr_t hp_addr; + + if (!wbm_srng) + return QDF_STATUS_E_FAILURE; + + hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr; + + hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr); + + dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr); + + return QDF_STATUS_SUCCESS; +} + +#define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \ + dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res)) +#define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \ + dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res)) +#else +#define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) +#define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) +#endif + +QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + struct hal_srng *wbm_srng = (struct hal_srng *) + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + struct dp_ipa_resources *ipa_res; + QDF_STATUS result; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + ipa_res = &pdev->ipa_resource; + + qdf_atomic_set(&soc->ipa_pipes_enabled, 1); + DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res); + dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true); + + result = qdf_ipa_wdi_enable_pipes(); + if (result) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Enable WDI PIPE fail, code %d", + __func__, result); + qdf_atomic_set(&soc->ipa_pipes_enabled, 0); + DP_IPA_RESET_TX_DB_PA(soc, ipa_res); + dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ipa_first_tx_db_access) { + hal_srng_dst_init_hp( + soc->hal_soc, wbm_srng, + ipa_res->tx_comp_doorbell_vaddr); + soc->ipa_first_tx_db_access = false; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + QDF_STATUS result; + struct dp_ipa_resources *ipa_res; + + if (!pdev) { + dp_err("%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + ipa_res = &pdev->ipa_resource; + + qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS); + /* + * Reset the tx completion doorbell address before invoking IPA disable + * pipes API to ensure that there is no access to IPA tx doorbell + * address post disable pipes. + */ + DP_IPA_RESET_TX_DB_PA(soc, ipa_res); + + result = qdf_ipa_wdi_disable_pipes(); + if (result) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Disable WDI PIPE fail, code %d", + __func__, result); + qdf_assert_always(0); + return QDF_STATUS_E_FAILURE; + } + + qdf_atomic_set(&soc->ipa_pipes_enabled, 0); + dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false); + + return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates + * @client: Client type + * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps) + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps) +{ + qdf_ipa_wdi_perf_profile_t profile; + QDF_STATUS result; + + profile.client = client; + profile.max_supported_bw_mbps = max_supported_bw_mbps; + + result = qdf_ipa_wdi_set_perf_profile(&profile); + if (result) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi_set_perf_profile fail, code %d", + __func__, result); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_intrabss_send - send IPA RX intra-bss frames + * @pdev: pdev + * @vdev: vdev + * @nbuf: skb + * + * Return: nbuf if TX fails and NULL if TX succeeds + */ +static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev, + struct dp_vdev *vdev, + qdf_nbuf_t nbuf) +{ + struct dp_peer *vdev_peer; + uint16_t len; + + vdev_peer = vdev->vap_bss_peer; + if (qdf_unlikely(!vdev_peer)) + return nbuf; + + qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); + len = qdf_nbuf_len(nbuf); + + if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) { + DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len); + return nbuf; + } + + DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len); + return NULL; +} + +bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + qdf_nbuf_t nbuf, bool *fwd_success) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + struct dp_pdev *pdev; + struct dp_peer *da_peer; + struct dp_peer *sa_peer; + qdf_nbuf_t nbuf_copy; + uint8_t da_is_bcmc; + struct ethhdr *eh; + + *fwd_success = false; /* set default as failure */ + + /* + * WDI 3.0 skb->cb[] info from IPA driver + * skb->cb[0] = vdev_id + * skb->cb[1].bit#1 = da_is_bcmc + */ + da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2; + + if (qdf_unlikely(!vdev)) + return false; + + pdev = vdev->pdev; + if (qdf_unlikely(!pdev)) + return false; + + /* no fwd for station mode and just pass up to stack */ + if (vdev->opmode == wlan_op_mode_sta) + return false; + + if (da_is_bcmc) { + nbuf_copy = qdf_nbuf_copy(nbuf); + if (!nbuf_copy) + return false; + + if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy)) + qdf_nbuf_free(nbuf_copy); + else + *fwd_success = true; + + /* return false to pass original pkt up to stack */ + return false; + } + + eh = (struct ethhdr *)qdf_nbuf_data(nbuf); + + if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)) + return false; + + da_peer = dp_find_peer_by_addr_and_vdev(dp_pdev_to_cdp_pdev(pdev), + dp_vdev_to_cdp_vdev(vdev), + eh->h_dest); + + if (!da_peer) + return false; + + sa_peer = dp_find_peer_by_addr_and_vdev(dp_pdev_to_cdp_pdev(pdev), + dp_vdev_to_cdp_vdev(vdev), + eh->h_source); + + if (!sa_peer) + return false; + + /* + * In intra-bss forwarding scenario, skb is allocated by IPA driver. + * Need to add skb to internal tracking table to avoid nbuf memory + * leak check for unallocated skb. + */ + qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__); + + if (dp_ipa_intrabss_send(pdev, vdev, nbuf)) + qdf_nbuf_free(nbuf); + else + *fwd_success = true; + + return true; +} + +#ifdef MDM_PLATFORM +bool dp_ipa_is_mdm_platform(void) +{ + return true; +} +#else +bool dp_ipa_is_mdm_platform(void) +{ + return false; +} +#endif + +/** + * dp_ipa_frag_nbuf_linearize - linearize nbuf for IPA + * @soc: soc + * @nbuf: source skb + * + * Return: new nbuf if success and otherwise NULL + */ +static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc, + qdf_nbuf_t nbuf) +{ + uint8_t *src_nbuf_data; + uint8_t *dst_nbuf_data; + qdf_nbuf_t dst_nbuf; + qdf_nbuf_t temp_nbuf = nbuf; + uint32_t nbuf_len = qdf_nbuf_len(nbuf); + bool is_nbuf_head = true; + uint32_t copy_len = 0; + + dst_nbuf = qdf_nbuf_alloc(soc->osdev, RX_DATA_BUFFER_SIZE, + RX_BUFFER_RESERVATION, + RX_DATA_BUFFER_ALIGNMENT, FALSE); + + if (!dst_nbuf) { + dp_err_rl("nbuf allocate fail"); + return NULL; + } + + if ((nbuf_len + L3_HEADER_PADDING) > RX_DATA_BUFFER_SIZE) { + qdf_nbuf_free(dst_nbuf); + dp_err_rl("nbuf is jumbo data"); + return NULL; + } + + /* prepeare to copy all data into new skb */ + dst_nbuf_data = qdf_nbuf_data(dst_nbuf); + while (temp_nbuf) { + src_nbuf_data = qdf_nbuf_data(temp_nbuf); + /* first head nbuf */ + if (is_nbuf_head) { + qdf_mem_copy(dst_nbuf_data, src_nbuf_data, + RX_PKT_TLVS_LEN); + /* leave extra 2 bytes L3_HEADER_PADDING */ + dst_nbuf_data += (RX_PKT_TLVS_LEN + L3_HEADER_PADDING); + src_nbuf_data += RX_PKT_TLVS_LEN; + copy_len = qdf_nbuf_headlen(temp_nbuf) - + RX_PKT_TLVS_LEN; + temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf); + is_nbuf_head = false; + } else { + copy_len = qdf_nbuf_len(temp_nbuf); + temp_nbuf = qdf_nbuf_queue_next(temp_nbuf); + } + qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len); + dst_nbuf_data += copy_len; + } + + qdf_nbuf_set_len(dst_nbuf, nbuf_len); + /* copy is done, free original nbuf */ + qdf_nbuf_free(nbuf); + + return dst_nbuf; +} + +/** + * dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer + * @soc: soc + * @nbuf: skb + * + * Return: nbuf if success and otherwise NULL + */ +qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf) +{ + + if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + return nbuf; + + /* WLAN IPA is run-time disabled */ + if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) + return nbuf; + + if (!qdf_nbuf_is_frag(nbuf)) + return nbuf; + + /* linearize skb for IPA */ + return dp_ipa_frag_nbuf_linearize(soc, nbuf); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..0ec21d3ce1892e432a44b9baf2f48376267d5f6c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_IPA_H_ +#define _DP_IPA_H_ + +#ifdef IPA_OFFLOAD + +#define DP_IPA_MAX_IFACE 3 +#define IPA_TCL_DATA_RING_IDX 2 +#define IPA_REO_DEST_RING_IDX 3 +#define IPA_RX_REFILL_BUF_RING_IDX 2 + +/* Adding delay before disabling ipa pipes if any Tx Completions are pending */ +#define TX_COMP_DRAIN_WAIT_MS 50 +#define TX_COMP_DRAIN_WAIT_TIMEOUT_MS 200 + +/** + * struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware + * @eth: ether II header + */ +struct dp_ipa_uc_tx_hdr { + struct ethhdr eth; +} __packed; + +/** + * struct dp_ipa_uc_rx_hdr - full rx header registered to IPA hardware + * @eth: ether II header + */ +struct dp_ipa_uc_rx_hdr { + struct ethhdr eth; +} __packed; + +#define DP_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct dp_ipa_uc_tx_hdr) +#define DP_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct dp_ipa_uc_rx_hdr) +#define DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 0 + +/** + * dp_ipa_get_resource() - Client request resource information + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * + * IPA client will request IPA UC related resource information + * Resource information will be distributed to IPA module + * All of the required resources should be pre-allocated + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + +/** + * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * + * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB + * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB + * + * Return: none + */ +QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id); +QDF_STATUS dp_ipa_uc_set_active(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + bool uc_active, bool is_tx); + +/** + * dp_ipa_op_response() - Handle OP command response from firmware + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * @op_msg: op response message from firmware + * + * Return: none + */ +QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint8_t *op_msg); + +/** + * dp_ipa_register_op_cb() - Register OP handler function + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * @op_cb: handler function pointer + * + * Return: none + */ +QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + ipa_uc_op_cb_type op_cb, void *usr_ctxt); + +/** + * dp_ipa_get_stat() - Get firmware wdi status + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * + * Return: none + */ +QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + +/** + * dp_tx_send_ipa_data_frame() - send IPA data frame + * @soc_hdl: datapath soc handle + * @vdev_id: virtual device/interface id + * @skb: skb + * + * Return: skb/ NULL is for success + */ +qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + qdf_nbuf_t skb); + +/** + * dp_ipa_enable_autonomy() – Enable autonomy RX path + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * + * Set all RX packet route to IPA REO ring + * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring + * Return: none + */ +QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + +/** + * dp_ipa_disable_autonomy() – Disable autonomy RX path + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * + * Disable RX packet routing to IPA REO + * Program Destination_Ring_Ctrl_IX_0 REO register to disable + * Return: none + */ +QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + +#ifdef CONFIG_IPA_WDI_UNIFIED_API +/** + * dp_ipa_setup() - Setup and connect IPA pipes + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * @is_smmu_enabled: Is SMMU enabled or not + * @sys_in: parameters to setup sys pipe in mcc mode + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *ipa_i2w_cb, void *ipa_w2i_cb, + void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle, + bool is_smmu_enabled, + qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi); +#else /* CONFIG_IPA_WDI_UNIFIED_API */ +/** + * dp_ipa_setup() - Setup and connect IPA pipes + * @soc_hdl - data path soc handle + * @pdev_id - device instance id + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + void *ipa_i2w_cb, void *ipa_w2i_cb, + void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle); +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ +QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint32_t tx_pipe_handle, + uint32_t rx_pipe_handle); +QDF_STATUS dp_ipa_remove_header(char *name); +int dp_ipa_add_header_info(char *ifname, uint8_t *mac_addr, + uint8_t session_id, bool is_ipv6_enabled); +int dp_ipa_register_interface(char *ifname, bool is_ipv6_enabled); +QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled); +QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled); + +/** + * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes + * @soc_hdl - handle to the soc + * @pdev_id - pdev id number, to get the handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); + +/** + * dp_ipa_disable_pipes() – Suspend traffic and disable Tx/Rx pipes + * @soc_hdl - handle to the soc + * @pdev_id - pdev id number, to get the handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); +QDF_STATUS dp_ipa_set_perf_level(int client, + uint32_t max_supported_bw_mbps); + +/** + * dp_ipa_rx_intrabss_fwd() - Perform intra-bss fwd for IPA RX path + * + * @soc_hdl: data path soc handle + * @vdev_id: virtual device/interface id + * @nbuf: pointer to skb of ethernet packet received from IPA RX path + * @fwd_success: pointer to indicate if skb succeeded in intra-bss TX + * + * This function performs intra-bss forwarding for WDI 3.0 IPA RX path. + * + * Return: true if packet is intra-bss fwd-ed and no need to pass to + * network stack. false if packet needs to be passed to network stack. + */ +bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + qdf_nbuf_t nbuf, bool *fwd_success); +int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev); +int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev); +int dp_ipa_ring_resource_setup(struct dp_soc *soc, + struct dp_pdev *pdev); +QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, + qdf_nbuf_t nbuf, + bool create); + +bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, + uint32_t *remap2); +bool dp_ipa_is_mdm_platform(void); + +qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf); + +#else +static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + return 0; +} + +static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc, + qdf_nbuf_t nbuf, + bool create) +{ + return QDF_STATUS_SUCCESS; +} + +static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, + qdf_nbuf_t nbuf) +{ + return nbuf; +} + +#endif +#endif /* _DP_IPA_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c new file mode 100644 index 0000000000000000000000000000000000000000..926543b156e86d426de96e2450a562fa665d1c8a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c @@ -0,0 +1,12190 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_tx.h" +#include "dp_tx_desc.h" +#include "dp_rx.h" +#include "dp_rx_mon.h" +#ifdef DP_RATETABLE_SUPPORT +#include "dp_ratetable.h" +#endif +#include +#include +#include "cdp_txrx_cmn_struct.h" +#include "cdp_txrx_stats_struct.h" +#include "cdp_txrx_cmn_reg.h" +#include +#include "dp_peer.h" +#include "dp_rx_mon.h" +#include "htt_stats.h" +#include "dp_htt.h" +#ifdef WLAN_SUPPORT_RX_FISA +#include +#endif +#include "htt_ppdu_stats.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ +#include "cfg_ucfg_api.h" +#include "dp_mon_filter.h" +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +#include "cdp_txrx_flow_ctrl_v2.h" +#else +static inline void +cdp_dump_flow_pool_info(struct cdp_soc_t *soc) +{ + return; +} +#endif +#include "dp_ipa.h" +#include "dp_cal_client_api.h" +#ifdef FEATURE_WDS +#include "dp_txrx_wds.h" +#endif +#ifdef ATH_SUPPORT_IQUE +#include "dp_txrx_me.h" +#endif +#if defined(DP_CON_MON) +#ifndef REMOVE_PKT_LOG +#include +#include +#endif +#endif + +#ifdef WLAN_FEATURE_STATS_EXT +#define INIT_RX_HW_STATS_LOCK(_soc) \ + qdf_spinlock_create(&(_soc)->rx_hw_stats_lock) +#define DEINIT_RX_HW_STATS_LOCK(_soc) \ + qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock) +#else +#define INIT_RX_HW_STATS_LOCK(_soc) /* no op */ +#define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */ +#endif + +#ifdef DP_PEER_EXTENDED_API +#define SET_PEER_REF_CNT_ONE(_peer) \ + qdf_atomic_set(&(_peer)->ref_cnt, 1) +#else +#define SET_PEER_REF_CNT_ONE(_peer) +#endif + +/* + * The max size of cdp_peer_stats_param_t is limited to 16 bytes. + * If the buffer size is exceeding this size limit, + * dp_txrx_get_peer_stats is to be used instead. + */ +QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size, + (sizeof(cdp_peer_stats_param_t) <= 16)); + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +/* + * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS + * also should be updated accordingly + */ +QDF_COMPILE_TIME_ASSERT(num_intr_grps, + HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS); + +/* + * HIF_EVENT_HIST_MAX should always be power of 2 + */ +QDF_COMPILE_TIME_ASSERT(hif_event_history_size, + (HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0); +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +/* + * If WLAN_CFG_INT_NUM_CONTEXTS is changed, + * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated + */ +QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs, + WLAN_CFG_INT_NUM_CONTEXTS_MAX >= + WLAN_CFG_INT_NUM_CONTEXTS); + +#ifdef WLAN_RX_PKT_CAPTURE_ENH +#include "dp_rx_mon_feature.h" +#else +/* + * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture + * @pdev_handle: DP_PDEV handle + * @val: user provided value + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val) +{ + return QDF_STATUS_E_INVAL; +} +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_tx_capture.h" +#else +/* + * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture + * @pdev_handle: DP_PDEV handle + * @val: user provided value + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val) +{ + return QDF_STATUS_E_INVAL; +} +#endif + +void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, + struct hif_opaque_softc *hif_handle); +static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force); +static struct dp_soc * +dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id); +static void dp_pktlogmod_exit(struct dp_pdev *handle); +static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *peer_mac_addr); +static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *peer_mac, uint32_t bitmap); +static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, + bool unmap_only); +#ifdef ENABLE_VERBOSE_DEBUG +bool is_dp_verbose_debug_enabled; +#endif + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + bool enable, + struct cdp_monitor_filter *filter_val); +static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); +static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + bool enable); +static inline void +dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct cdp_cfr_rcc_stats *cfr_rcc_stats); +static inline void +dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); +static inline void +dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + bool enable); +#endif +static inline bool +dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev); +static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, + enum hal_ring_type ring_type, + int ring_num); +#define DP_INTR_POLL_TIMER_MS 5 +/* Generic AST entry aging timer value */ +#define DP_AST_AGING_TIMER_DEFAULT_MS 1000 +#define DP_MCS_LENGTH (6*MAX_MCS) + +#define DP_CURR_FW_STATS_AVAIL 19 +#define DP_HTT_DBG_EXT_STATS_MAX 256 +#define DP_MAX_SLEEP_TIME 100 +#ifndef QCA_WIFI_3_0_EMU +#define SUSPEND_DRAIN_WAIT 500 +#else +#define SUSPEND_DRAIN_WAIT 3000 +#endif + +#ifdef IPA_OFFLOAD +/* Exclude IPA rings from the interrupt context */ +#define TX_RING_MASK_VAL 0xb +#define RX_RING_MASK_VAL 0x7 +#else +#define TX_RING_MASK_VAL 0xF +#define RX_RING_MASK_VAL 0xF +#endif + +#define STR_MAXLEN 64 + +#define RNG_ERR "SRNG setup failed for" + +/* Threshold for peer's cached buf queue beyond which frames are dropped */ +#define DP_RX_CACHED_BUFQ_THRESH 64 + +/* Budget to reap monitor status ring */ +#define DP_MON_REAP_BUDGET 1024 + +/** + * default_dscp_tid_map - Default DSCP-TID mapping + * + * DSCP TID + * 000000 0 + * 001000 1 + * 010000 2 + * 011000 3 + * 100000 4 + * 101000 5 + * 110000 6 + * 111000 7 + */ +static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, +}; + +/** + * default_pcp_tid_map - Default PCP-TID mapping + * + * PCP TID + * 000 0 + * 001 1 + * 010 2 + * 011 3 + * 100 4 + * 101 5 + * 110 6 + * 111 7 + */ +static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +/** + * @brief Cpu to tx ring map + */ +uint8_t +dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = { + {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2}, + {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1}, + {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0}, + {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2}, + {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3}, +#ifdef WLAN_TX_PKT_CAPTURE_ENH + {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1} +#endif +}; + +/** + * @brief Select the type of statistics + */ +enum dp_stats_type { + STATS_FW = 0, + STATS_HOST = 1, + STATS_TYPE_MAX = 2, +}; + +/** + * @brief General Firmware statistics options + * + */ +enum dp_fw_stats { + TXRX_FW_STATS_INVALID = -1, +}; + +/** + * dp_stats_mapping_table - Firmware and Host statistics + * currently supported + */ +const int dp_stats_mapping_table[][STATS_TYPE_MAX] = { + {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID}, + {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID}, + /* Last ENUM for HTT FW STATS */ + {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID}, + {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_AST_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS}, + {TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS}, + {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP}, +}; + +/* MCL specific functions */ +#if defined(DP_CON_MON) +/** + * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode + * @soc: pointer to dp_soc handle + * @intr_ctx_num: interrupt context number for which mon mask is needed + * + * For MCL, monitor mode rings are being processed in timer contexts (polled). + * This function is returning 0, since in interrupt mode(softirq based RX), + * we donot want to process monitor mode rings in a softirq. + * + * So, in case packet log is enabled for SAP/STA/P2P modes, + * regular interrupt processing will not process monitor mode rings. It would be + * done in a separate timer context. + * + * Return: 0 + */ +static inline +uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num) +{ + return 0; +} + +/* + * dp_service_mon_rings()- service monitor rings + * @soc: soc dp handle + * @quota: number of ring entry that can be serviced + * + * Return: None + * + */ +static void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota) +{ + int ring = 0, work_done; + struct dp_pdev *pdev = NULL; + + for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { + pdev = dp_get_pdev_for_lmac_id(soc, ring); + if (!pdev) + continue; + work_done = dp_mon_process(soc, ring, quota); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("Reaped %d descs from Monitor rings"), + work_done); + } +} + +/* + * dp_mon_reap_timer_handler()- timer to reap monitor rings + * reqd as we are not getting ppdu end interrupts + * @arg: SoC Handle + * + * Return: + * + */ +static void dp_mon_reap_timer_handler(void *arg) +{ + struct dp_soc *soc = (struct dp_soc *)arg; + + dp_service_mon_rings(soc, QCA_NAPI_BUDGET); + + qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS); +} + +#ifndef REMOVE_PKT_LOG +/** + * dp_pkt_log_init() - API to initialize packet log + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @scn: HIF context + * + * Return: none + */ +void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *handle = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!handle) { + dp_err("pdev handle is NULL"); + return; + } + + if (handle->pkt_log_init) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Packet log not initialized", __func__); + return; + } + + pktlog_sethandle(&handle->pl_dev, scn); + pktlog_set_pdev_id(handle->pl_dev, pdev_id); + pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); + + if (pktlogmod_init(scn)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pktlogmod_init failed", __func__); + handle->pkt_log_init = false; + } else { + handle->pkt_log_init = true; + } +} + +/** + * dp_pkt_log_con_service() - connect packet log service + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @scn: device context + * + * Return: none + */ +static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, void *scn) +{ + dp_pkt_log_init(soc_hdl, pdev_id, scn); + pktlog_htc_attach(); +} + +/** + * dp_pktlogmod_exit() - API to cleanup pktlog info + * @pdev: Pdev handle + * + * Return: none + */ +static void dp_pktlogmod_exit(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + struct hif_opaque_softc *scn = soc->hif_handle; + + if (!scn) { + dp_err("Invalid hif(scn) handle"); + return; + } + + /* stop mon_reap_timer if it has been started */ + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED && + soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_sync_cancel(&soc->mon_reap_timer); + + pktlogmod_exit(scn); + pdev->pkt_log_init = false; +} +#else +static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, void *scn) +{ +} + +static void dp_pktlogmod_exit(struct dp_pdev *handle) { } +#endif +/** + * dp_get_num_rx_contexts() - get number of RX contexts + * @soc_hdl: cdp opaque soc handle + * + * Return: number of RX contexts + */ +static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) +{ + int i; + int num_rx_contexts = 0; + + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) + if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i)) + num_rx_contexts++; + + return num_rx_contexts; +} + +#else +static void dp_pktlogmod_exit(struct dp_pdev *handle) { } + +/** + * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode + * @soc: pointer to dp_soc handle + * @intr_ctx_num: interrupt context number for which mon mask is needed + * + * Return: mon mask value + */ +static inline +uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num) +{ + return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); +} + +/* + * dp_service_lmac_rings()- timer to reap lmac rings + * @arg: SoC Handle + * + * Return: + * + */ +static void dp_service_lmac_rings(void *arg) +{ + struct dp_soc *soc = (struct dp_soc *)arg; + int ring = 0, i; + struct dp_pdev *pdev = NULL; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + + /* Process LMAC interrupts */ + for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { + int mac_for_pdev = ring; + struct dp_srng *rx_refill_buf_ring; + + pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev); + if (!pdev) + continue; + + rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev]; + + dp_mon_process(soc, mac_for_pdev, + QCA_NAPI_BUDGET); + + for (i = 0; + i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) + dp_rxdma_err_process(&soc->intr_ctx[i], soc, + mac_for_pdev, + QCA_NAPI_BUDGET); + + if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, + mac_for_pdev)) + dp_rx_buffers_replenish(soc, mac_for_pdev, + rx_refill_buf_ring, + &soc->rx_desc_buf[mac_for_pdev], + 0, &desc_list, &tail); + } + + qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS); +} + +#endif + +static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *peer_mac, + uint8_t *mac_addr, + enum cdp_txrx_ast_entry_type type, + uint32_t flags) +{ + int ret = -1; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + goto fail; + } + + ret = dp_peer_add_ast((struct dp_soc *)soc_hdl, + peer, + mac_addr, + type, + flags); +fail: + if (peer) + dp_peer_unref_delete(peer); + + return ret; +} + +static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *peer_mac, + uint8_t *wds_macaddr, + uint32_t flags) +{ + int status = -1; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_ast_entry *ast_entry = NULL; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + goto fail; + } + + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, + peer->vdev->pdev->pdev_id); + + if (ast_entry) { + status = dp_peer_update_ast(soc, + peer, + ast_entry, flags); + } + qdf_spin_unlock_bh(&soc->ast_lock); + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* + * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry + * @soc_handle: Datapath SOC handle + * @wds_macaddr: WDS entry MAC Address + * @peer_macaddr: WDS entry MAC Address + * @vdev_id: id of vdev handle + * Return: QDF_STATUS + */ +static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t *wds_macaddr, + uint8_t *peer_mac_addr, + uint8_t vdev_id) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_ast_entry *ast_entry = NULL; + struct dp_ast_entry *tmp_ast_entry; + struct dp_peer *peer; + struct dp_pdev *pdev; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + pdev = vdev->pdev; + + if (peer_mac_addr) { + peer = dp_peer_find_hash_find(soc, peer_mac_addr, + 0, vdev->vdev_id); + if (!peer) { + return QDF_STATUS_E_FAILURE; + } + + if (peer->delete_in_progress) { + dp_peer_unref_delete(peer); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&soc->ast_lock); + DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) { + if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || + (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) + dp_peer_del_ast(soc, ast_entry); + } + qdf_spin_unlock_bh(&soc->ast_lock); + dp_peer_unref_delete(peer); + + return QDF_STATUS_SUCCESS; + } else if (wds_macaddr) { + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr, + pdev->pdev_id); + + if (ast_entry) { + if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) || + (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) + dp_peer_del_ast(soc, ast_entry); + } + qdf_spin_unlock_bh(&soc->ast_lock); + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry + * @soc: Datapath SOC handle + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id) +{ + struct dp_soc *soc = (struct dp_soc *) soc_hdl; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *temp_ase; + int i; + + qdf_spin_lock_bh(&soc->ast_lock); + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { + if ((ase->type == + CDP_TXRX_AST_TYPE_WDS_HM) || + (ase->type == + CDP_TXRX_AST_TYPE_WDS_HM_SEC)) + dp_peer_del_ast(soc, ase); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + qdf_spin_unlock_bh(&soc->ast_lock); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry + * @soc: Datapath SOC handle + * + * Return: None + */ +static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl) +{ + struct dp_soc *soc = (struct dp_soc *) soc_hdl; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *temp_ase; + int i; + + qdf_spin_lock_bh(&soc->ast_lock); + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { + if ((ase->type == + CDP_TXRX_AST_TYPE_STATIC) || + (ase->type == + CDP_TXRX_AST_TYPE_SELF) || + (ase->type == + CDP_TXRX_AST_TYPE_STA_BSS)) + continue; + dp_peer_del_ast(soc, ase); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + qdf_spin_unlock_bh(&soc->ast_lock); +} + +/** + * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table + * and return ast entry information + * of first ast entry found in the + * table with given mac address + * + * @soc : data path soc handle + * @ast_mac_addr : AST entry mac address + * @ast_entry_info : ast entry information + * + * return : true if ast entry found with ast_mac_addr + * false if ast entry not found + */ +static bool dp_peer_get_ast_info_by_soc_wifi3 + (struct cdp_soc_t *soc_hdl, + uint8_t *ast_mac_addr, + struct cdp_ast_entry_info *ast_entry_info) +{ + struct dp_ast_entry *ast_entry = NULL; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + + qdf_spin_lock_bh(&soc->ast_lock); + + ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr); + if (!ast_entry || !ast_entry->peer) { + qdf_spin_unlock_bh(&soc->ast_lock); + return false; + } + if (ast_entry->delete_in_progress && !ast_entry->callback) { + qdf_spin_unlock_bh(&soc->ast_lock); + return false; + } + ast_entry_info->type = ast_entry->type; + ast_entry_info->pdev_id = ast_entry->pdev_id; + ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id; + ast_entry_info->peer_id = ast_entry->peer->peer_ids[0]; + qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], + &ast_entry->peer->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + qdf_spin_unlock_bh(&soc->ast_lock); + return true; +} + +/** + * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table + * and return ast entry information + * if mac address and pdev_id matches + * + * @soc : data path soc handle + * @ast_mac_addr : AST entry mac address + * @pdev_id : pdev_id + * @ast_entry_info : ast entry information + * + * return : true if ast entry found with ast_mac_addr + * false if ast entry not found + */ +static bool dp_peer_get_ast_info_by_pdevid_wifi3 + (struct cdp_soc_t *soc_hdl, + uint8_t *ast_mac_addr, + uint8_t pdev_id, + struct cdp_ast_entry_info *ast_entry_info) +{ + struct dp_ast_entry *ast_entry; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + + qdf_spin_lock_bh(&soc->ast_lock); + + ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id); + + if (!ast_entry || !ast_entry->peer) { + qdf_spin_unlock_bh(&soc->ast_lock); + return false; + } + if (ast_entry->delete_in_progress && !ast_entry->callback) { + qdf_spin_unlock_bh(&soc->ast_lock); + return false; + } + ast_entry_info->type = ast_entry->type; + ast_entry_info->pdev_id = ast_entry->pdev_id; + ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id; + ast_entry_info->peer_id = ast_entry->peer->peer_ids[0]; + qdf_mem_copy(&ast_entry_info->peer_mac_addr[0], + &ast_entry->peer->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + qdf_spin_unlock_bh(&soc->ast_lock); + return true; +} + +/** + * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table + * with given mac address + * + * @soc : data path soc handle + * @ast_mac_addr : AST entry mac address + * @callback : callback function to called on ast delete response from FW + * @cookie : argument to be passed to callback + * + * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete + * is sent + * QDF_STATUS_E_INVAL false if ast entry not found + */ +static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle, + uint8_t *mac_addr, + txrx_ast_free_cb callback, + void *cookie) + +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_ast_entry *ast_entry = NULL; + txrx_ast_free_cb cb = NULL; + void *arg = NULL; + + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); + if (!ast_entry) { + qdf_spin_unlock_bh(&soc->ast_lock); + return -QDF_STATUS_E_INVAL; + } + + if (ast_entry->callback) { + cb = ast_entry->callback; + arg = ast_entry->cookie; + } + + ast_entry->callback = callback; + ast_entry->cookie = cookie; + + /* + * if delete_in_progress is set AST delete is sent to target + * and host is waiting for response should not send delete + * again + */ + if (!ast_entry->delete_in_progress) + dp_peer_del_ast(soc, ast_entry); + + qdf_spin_unlock_bh(&soc->ast_lock); + if (cb) { + cb(soc->ctrl_psoc, + dp_soc_to_cdp_soc(soc), + arg, + CDP_TXRX_AST_DELETE_IN_PROGRESS); + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash + * table if mac address and pdev_id matches + * + * @soc : data path soc handle + * @ast_mac_addr : AST entry mac address + * @pdev_id : pdev id + * @callback : callback function to called on ast delete response from FW + * @cookie : argument to be passed to callback + * + * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete + * is sent + * QDF_STATUS_E_INVAL false if ast entry not found + */ + +static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle, + uint8_t *mac_addr, + uint8_t pdev_id, + txrx_ast_free_cb callback, + void *cookie) + +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_ast_entry *ast_entry; + txrx_ast_free_cb cb = NULL; + void *arg = NULL; + + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id); + + if (!ast_entry) { + qdf_spin_unlock_bh(&soc->ast_lock); + return -QDF_STATUS_E_INVAL; + } + + if (ast_entry->callback) { + cb = ast_entry->callback; + arg = ast_entry->cookie; + } + + ast_entry->callback = callback; + ast_entry->cookie = cookie; + + /* + * if delete_in_progress is set AST delete is sent to target + * and host is waiting for response should not sent delete + * again + */ + if (!ast_entry->delete_in_progress) + dp_peer_del_ast(soc, ast_entry); + + qdf_spin_unlock_bh(&soc->ast_lock); + + if (cb) { + cb(soc->ctrl_psoc, + dp_soc_to_cdp_soc(soc), + arg, + CDP_TXRX_AST_DELETE_IN_PROGRESS); + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs + * @ring_num: ring num of the ring being queried + * @grp_mask: the grp_mask array for the ring type in question. + * + * The grp_mask array is indexed by group number and the bit fields correspond + * to ring numbers. We are finding which interrupt group a ring belongs to. + * + * Return: the index in the grp_mask array with the ring number. + * -QDF_STATUS_E_NOENT if no entry is found + */ +static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask) +{ + int ext_group_num; + int mask = 1 << ring_num; + + for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS; + ext_group_num++) { + if (mask & grp_mask[ext_group_num]) + return ext_group_num; + } + + return -QDF_STATUS_E_NOENT; +} + +static int dp_srng_calculate_msi_group(struct dp_soc *soc, + enum hal_ring_type ring_type, + int ring_num) +{ + int *grp_mask; + + switch (ring_type) { + case WBM2SW_RELEASE: + /* dp_tx_comp_handler - soc->tx_comp_ring */ + if (ring_num < 3) + grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; + + /* dp_rx_wbm_err_process - soc->rx_rel_ring */ + else if (ring_num == 3) { + /* sw treats this as a separate ring type */ + grp_mask = &soc->wlan_cfg_ctx-> + int_rx_wbm_rel_ring_mask[0]; + ring_num = 0; + } else { + qdf_assert(0); + return -QDF_STATUS_E_NOENT; + } + break; + + case REO_EXCEPTION: + /* dp_rx_err_process - &soc->reo_exception_ring */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0]; + break; + + case REO_DST: + /* dp_rx_process - soc->reo_dest_ring */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; + break; + + case REO_STATUS: + /* dp_reo_status_ring_handler - soc->reo_status_ring */ + grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0]; + break; + + /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/ + case RXDMA_MONITOR_STATUS: + /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */ + case RXDMA_MONITOR_DST: + /* dp_mon_process */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0]; + break; + case RXDMA_DST: + /* dp_rxdma_err_process */ + grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0]; + break; + + case RXDMA_BUF: + grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; + break; + + case RXDMA_MONITOR_BUF: + /* TODO: support low_thresh interrupt */ + return -QDF_STATUS_E_NOENT; + break; + + case TCL_DATA: + case TCL_CMD: + case REO_CMD: + case SW2WBM_RELEASE: + case WBM_IDLE_LINK: + /* normally empty SW_TO_HW rings */ + return -QDF_STATUS_E_NOENT; + break; + + case TCL_STATUS: + case REO_REINJECT: + /* misc unused rings */ + return -QDF_STATUS_E_NOENT; + break; + + case CE_SRC: + case CE_DST: + case CE_DST_STATUS: + /* CE_rings - currently handled by hif */ + default: + return -QDF_STATUS_E_NOENT; + break; + } + + return dp_srng_find_ring_in_mask(ring_num, grp_mask); +} + +static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params + *ring_params, int ring_type, int ring_num) +{ + int msi_group_number; + int msi_data_count; + int ret; + uint32_t msi_data_start, msi_irq_start, addr_low, addr_high; + + ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", + &msi_data_count, &msi_data_start, + &msi_irq_start); + + if (ret) + return; + + msi_group_number = dp_srng_calculate_msi_group(soc, ring_type, + ring_num); + if (msi_group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + FL("ring not part of an ext_group; ring_type: %d,ring_num %d"), + ring_type, ring_num); + ring_params->msi_addr = 0; + ring_params->msi_data = 0; + return; + } + + if (msi_group_number > msi_data_count) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + FL("2 msi_groups will share an msi; msi_group_num %d"), + msi_group_number); + + QDF_ASSERT(0); + } + + pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high); + + ring_params->msi_addr = addr_low; + ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); + ring_params->msi_data = (msi_group_number % msi_data_count) + + msi_data_start; + ring_params->flags |= HAL_SRNG_MSI_INTR; +} + +/** + * dp_print_ast_stats() - Dump AST table contents + * @soc: Datapath soc handle + * + * return void + */ +#ifdef FEATURE_AST +void dp_print_ast_stats(struct dp_soc *soc) +{ + uint8_t i; + uint8_t num_entries = 0; + struct dp_vdev *vdev; + struct dp_pdev *pdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *tmp_ase; + char type[CDP_TXRX_AST_TYPE_MAX][10] = { + "NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS", + "DA", "HMWDS_SEC"}; + + DP_PRINT_STATS("AST Stats:"); + DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added); + DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted); + DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out); + DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err); + + DP_PRINT_STATS("AST Table:"); + + qdf_spin_lock_bh(&soc->ast_lock); + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) { + DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT + " peer_mac_addr = "QDF_MAC_ADDR_FMT + " peer_id = %u" + " type = %s" + " next_hop = %d" + " is_active = %d" + " ast_idx = %d" + " ast_hash = %d" + " delete_in_progress = %d" + " pdev_id = %d" + " vdev_id = %d", + ++num_entries, + QDF_MAC_ADDR_REF(ase->mac_addr.raw), + QDF_MAC_ADDR_REF(ase->peer->mac_addr.raw), + ase->peer->peer_ids[0], + type[ase->type], + ase->next_hop, + ase->is_active, + ase->ast_idx, + ase->ast_hash_value, + ase->delete_in_progress, + ase->pdev_id, + vdev->vdev_id); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + qdf_spin_unlock_bh(&soc->ast_lock); +} +#else +void dp_print_ast_stats(struct dp_soc *soc) +{ + DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST"); + return; +} +#endif + +/** + * dp_print_peer_table() - Dump all Peer stats + * @vdev: Datapath Vdev handle + * + * return void + */ +static void dp_print_peer_table(struct dp_vdev *vdev) +{ + struct dp_peer *peer = NULL; + + DP_PRINT_STATS("Dumping Peer Table Stats:"); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer) { + DP_PRINT_STATS("Invalid Peer"); + return; + } + DP_PRINT_STATS(" peer_mac_addr = "QDF_MAC_ADDR_FMT + " nawds_enabled = %d" + " bss_peer = %d" + " wds_enabled = %d" + " tx_cap_enabled = %d" + " rx_cap_enabled = %d" + " delete in progress = %d" + " peer id = %d", + QDF_MAC_ADDR_REF(peer->mac_addr.raw), + peer->nawds_enabled, + peer->bss_peer, + peer->wds_enabled, + peer->tx_cap_enabled, + peer->rx_cap_enabled, + peer->delete_in_progress, + peer->peer_ids[0]); + } +} + +#ifdef WLAN_DP_PER_RING_TYPE_CONFIG +/** + * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt + * threshold values from the wlan_srng_cfg table for each ring type + * @soc: device handle + * @ring_params: per ring specific parameters + * @ring_type: Ring type + * @ring_num: Ring number for a given ring type + * + * Fill the ring params with the interrupt threshold + * configuration parameters available in the per ring type wlan_srng_cfg + * table. + * + * Return: None + */ +static void +dp_srng_configure_interrupt_thresholds(struct dp_soc *soc, + struct hal_srng_params *ring_params, + int ring_type, int ring_num, + int num_entries) +{ + if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) { + ring_params->intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); + ring_params->intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); + } else { + ring_params->intr_timer_thres_us = + soc->wlan_srng_cfg[ring_type].timer_threshold; + ring_params->intr_batch_cntr_thres_entries = + soc->wlan_srng_cfg[ring_type].batch_count_threshold; + } + ring_params->low_threshold = + soc->wlan_srng_cfg[ring_type].low_threshold; + + if (ring_params->low_threshold) + ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; +} +#else +static void +dp_srng_configure_interrupt_thresholds(struct dp_soc *soc, + struct hal_srng_params *ring_params, + int ring_type, int ring_num, + int num_entries) +{ + if (ring_type == REO_DST) { + ring_params->intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); + ring_params->intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx); + } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) { + ring_params->intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx); + ring_params->intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx); + } else { + ring_params->intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); + ring_params->intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); + } + + /* Enable low threshold interrupts for rx buffer rings (regular and + * monitor buffer rings. + * TODO: See if this is required for any other ring + */ + if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) || + (ring_type == RXDMA_MONITOR_STATUS)) { + /* TODO: Setting low threshold to 1/8th of ring size + * see if this needs to be configurable + */ + ring_params->low_threshold = num_entries >> 3; + ring_params->intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); + ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; + ring_params->intr_batch_cntr_thres_entries = 0; + } +} +#endif + + +#ifdef DP_MEM_PRE_ALLOC +void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, + size_t ctxt_size) +{ + void *ctxt_mem; + + if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) { + dp_warn("dp_prealloc_get_context null!"); + goto dynamic_alloc; + } + + ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type); + + if (ctxt_mem) + goto end; + +dynamic_alloc: + dp_info("Pre-alloc of ctxt failed. Dynamic allocation"); + ctxt_mem = qdf_mem_malloc(ctxt_size); +end: + return ctxt_mem; +} + +void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type, + void *vaddr) +{ + QDF_STATUS status; + + if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) { + status = soc->cdp_soc.ol_ops->dp_prealloc_put_context( + ctxt_type, + vaddr); + } else { + dp_warn("dp_prealloc_get_context null!"); + status = QDF_STATUS_E_NOSUPPORT; + } + + if (QDF_IS_STATUS_ERROR(status)) { + dp_info("Context not pre-allocated"); + qdf_mem_free(vaddr); + } +} + +static inline +void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, + struct dp_srng *srng, + struct hal_srng_params *ring_params, + uint32_t ring_type) +{ + void *mem; + + qdf_assert(!srng->is_mem_prealloc); + + if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) { + dp_warn("dp_prealloc_get_consistent is null!"); + goto qdf; + } + + mem = + soc->cdp_soc.ol_ops->dp_prealloc_get_consistent + (&srng->alloc_size, + &srng->base_vaddr_unaligned, + &srng->base_paddr_unaligned, + &ring_params->ring_base_paddr, + DP_RING_BASE_ALIGN, ring_type); + + if (mem) { + srng->is_mem_prealloc = true; + goto end; + } +qdf: + mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, + &srng->base_vaddr_unaligned, + &srng->base_paddr_unaligned, + &ring_params->ring_base_paddr, + DP_RING_BASE_ALIGN); +end: + dp_info("%s memory %pK dp_srng %pK alloc_size %d num_entries %d", + srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem, + srng, srng->alloc_size, srng->num_entries); + return mem; +} + +static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, + struct dp_srng *srng) +{ + if (srng->is_mem_prealloc) { + if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) { + dp_warn("dp_prealloc_put_consistent is null!"); + QDF_BUG(0); + return; + } + soc->cdp_soc.ol_ops->dp_prealloc_put_consistent + (srng->alloc_size, + srng->base_vaddr_unaligned, + srng->base_paddr_unaligned); + + } else { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + srng->alloc_size, + srng->base_vaddr_unaligned, + srng->base_paddr_unaligned, 0); + } +} + +void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc, + enum dp_desc_type desc_type, + struct qdf_mem_multi_page_t *pages, + size_t element_size, + uint16_t element_num, + qdf_dma_context_t memctxt, + bool cacheable) +{ + if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) { + dp_warn("dp_get_multi_pages is null!"); + goto qdf; + } + + pages->num_pages = 0; + pages->is_mem_prealloc = 0; + soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type, + element_size, + element_num, + pages, + cacheable); + if (pages->num_pages) + goto end; + +qdf: + qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size, + element_num, memctxt, cacheable); +end: + dp_info("%s desc_type %d element_size %d element_num %d cacheable %d", + pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", + desc_type, (int)element_size, element_num, cacheable); +} + +void dp_desc_multi_pages_mem_free(struct dp_soc *soc, + enum dp_desc_type desc_type, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, + bool cacheable) +{ + if (pages->is_mem_prealloc) { + if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) { + dp_warn("dp_put_multi_pages is null!"); + QDF_BUG(0); + return; + } + + soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages); + qdf_mem_zero(pages, sizeof(*pages)); + } else { + qdf_mem_multi_pages_free(soc->osdev, pages, + memctxt, cacheable); + } +} + +#else + +static inline +void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc, + struct dp_srng *srng, + struct hal_srng_params *ring_params, + uint32_t ring_type) + +{ + return qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size, + &srng->base_vaddr_unaligned, + &srng->base_paddr_unaligned, + &ring_params->ring_base_paddr, + DP_RING_BASE_ALIGN); +} + +static inline void dp_srng_mem_free_consistent(struct dp_soc *soc, + struct dp_srng *srng) +{ + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + srng->alloc_size, + srng->base_vaddr_unaligned, + srng->base_paddr_unaligned, 0); +} + +#endif /* DP_MEM_PRE_ALLOC */ +/** + * dp_srng_setup() - Internal function to setup SRNG rings used by data path + * @soc: datapath soc handle + * @srng: srng handle + * @ring_type: ring that needs to be configured + * @mac_id: mac number + * @num_entries: Total number of entries for a given ring + * + * Return: non-zero - failure/zero - success + */ +static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng, + int ring_type, int ring_num, int mac_id, + uint32_t num_entries, bool cached) +{ + hal_soc_handle_t hal_soc = soc->hal_soc; + uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type); + struct hal_srng_params ring_params; + uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type); + + /* TODO: Currently hal layer takes care of endianness related settings. + * See if these settings need to passed from DP layer + */ + qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params)); + + num_entries = (num_entries > max_entries) ? max_entries : num_entries; + srng->hal_srng = NULL; + srng->alloc_size = num_entries * entry_size; + srng->num_entries = num_entries; + + if (!dp_is_soc_reinit(soc)) { + if (!cached) { + ring_params.ring_base_vaddr = + dp_srng_aligned_mem_alloc_consistent(soc, srng, + &ring_params, + ring_type); + } else { + ring_params.ring_base_vaddr = qdf_aligned_malloc( + &srng->alloc_size, + &srng->base_vaddr_unaligned, + &srng->base_paddr_unaligned, + &ring_params.ring_base_paddr, + DP_RING_BASE_ALIGN); + } + + if (!ring_params.ring_base_vaddr) { + dp_err("alloc failed - ring_type: %d, ring_num %d", + ring_type, ring_num); + return QDF_STATUS_E_NOMEM; + } + } + + ring_params.ring_base_paddr = (qdf_dma_addr_t)qdf_align( + (unsigned long)(srng->base_paddr_unaligned), + DP_RING_BASE_ALIGN); + + ring_params.ring_base_vaddr = (void *)( + (unsigned long)(srng->base_vaddr_unaligned) + + ((unsigned long)(ring_params.ring_base_paddr) - + (unsigned long)(srng->base_paddr_unaligned))); + + qdf_assert_always(ring_params.ring_base_vaddr); + + ring_params.num_entries = num_entries; + + dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u", + ring_type, ring_num, + (void *)ring_params.ring_base_vaddr, + (void *)ring_params.ring_base_paddr, + ring_params.num_entries); + + if (soc->intr_mode == DP_INTR_MSI) { + dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num); + dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d", + ring_type, ring_num); + + } else { + ring_params.msi_data = 0; + ring_params.msi_addr = 0; + dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d", + ring_type, ring_num); + } + + dp_srng_configure_interrupt_thresholds(soc, &ring_params, + ring_type, ring_num, + num_entries); + + if (cached) { + ring_params.flags |= HAL_SRNG_CACHED_DESC; + srng->cached = 1; + } + + srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num, + mac_id, &ring_params); + + if (!srng->hal_srng) { + if (cached) { + qdf_mem_free(srng->base_vaddr_unaligned); + } else { + dp_srng_mem_free_consistent(soc, srng); + } + } + + return 0; +} + +/* + * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path + * @soc: DP SOC handle + * @srng: source ring structure + * @ring_type: type of ring + * @ring_num: ring number + * + * Return: None + */ +static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, + int ring_type, int ring_num) +{ + if (!srng->hal_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Ring type: %d, num:%d not setup"), + ring_type, ring_num); + return; + } + + hal_srng_cleanup(soc->hal_soc, srng->hal_srng); + srng->hal_srng = NULL; +} + +/** + * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path + * Any buffers allocated and attached to ring entries are expected to be freed + * before calling this function. + */ +static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng, + int ring_type, int ring_num) +{ + if (!dp_is_soc_reinit(soc)) { + if (!srng->hal_srng && (srng->alloc_size == 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Ring type: %d, num:%d not setup"), + ring_type, ring_num); + return; + } + + if (srng->hal_srng) { + hal_srng_cleanup(soc->hal_soc, srng->hal_srng); + srng->hal_srng = NULL; + } + } + + if (srng->alloc_size && srng->base_vaddr_unaligned) { + if (!srng->cached) { + dp_srng_mem_free_consistent(soc, srng); + } else { + qdf_mem_free(srng->base_vaddr_unaligned); + } + srng->alloc_size = 0; + srng->base_vaddr_unaligned = NULL; + } + srng->hal_srng = NULL; +} + +/* TODO: Need this interface from HIF */ +void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle); + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, + hal_ring_handle_t hal_ring_hdl) +{ + hal_soc_handle_t hal_soc = dp_soc->hal_soc; + uint32_t hp, tp; + uint8_t ring_id; + + hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); + ring_id = hal_srng_ring_id_get(hal_ring_hdl); + + hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, + ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START); + + return hal_srng_access_start(hal_soc, hal_ring_hdl); +} + +void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, + hal_ring_handle_t hal_ring_hdl) +{ + hal_soc_handle_t hal_soc = dp_soc->hal_soc; + uint32_t hp, tp; + uint8_t ring_id; + + hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp); + ring_id = hal_srng_ring_id_get(hal_ring_hdl); + + hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id, + ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END); + + return hal_srng_access_end(hal_soc, hal_ring_hdl); +} +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +/* + * dp_should_timer_irq_yield() - Decide if the bottom half should yield + * @soc: DP soc handle + * @work_done: work done in softirq context + * @start_time: start time for the softirq + * + * Return: enum with yield code + */ +static enum timer_yield_status +dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done, + uint64_t start_time) +{ + uint64_t cur_time = qdf_get_log_timestamp(); + + if (!work_done) + return DP_TIMER_WORK_DONE; + + if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS) + return DP_TIMER_TIME_EXHAUST; + + return DP_TIMER_NO_YIELD; +} + +/** + * dp_process_lmac_rings() - Process LMAC rings + * @int_ctx: interrupt context + * @total_budget: budget of work which can be done + * + * Return: work done + */ +static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget) +{ + struct dp_intr_stats *intr_stats = &int_ctx->intr_stats; + struct dp_soc *soc = int_ctx->soc; + uint32_t remaining_quota = total_budget; + struct dp_pdev *pdev = NULL; + uint32_t work_done = 0; + int budget = total_budget; + int ring = 0; + + /* Process LMAC interrupts */ + for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { + int mac_for_pdev = ring; + + pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev); + if (!pdev) + continue; + if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) { + work_done = dp_mon_process(soc, mac_for_pdev, + remaining_quota); + if (work_done) + intr_stats->num_rx_mon_ring_masks++; + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + + if (int_ctx->rxdma2host_ring_mask & + (1 << mac_for_pdev)) { + work_done = dp_rxdma_err_process(int_ctx, soc, + mac_for_pdev, + remaining_quota); + if (work_done) + intr_stats->num_rxdma2host_ring_masks++; + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + + if (int_ctx->host2rxdma_ring_mask & + (1 << mac_for_pdev)) { + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *rx_refill_buf_ring; + + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + rx_refill_buf_ring = + &soc->rx_refill_buf_ring[mac_for_pdev]; + else + rx_refill_buf_ring = + &soc->rx_refill_buf_ring[pdev->lmac_id]; + + intr_stats->num_host2rxdma_ring_masks++; + DP_STATS_INC(pdev, replenish.low_thresh_intrs, + 1); + dp_rx_buffers_replenish(soc, mac_for_pdev, + rx_refill_buf_ring, + &soc->rx_desc_buf[mac_for_pdev], + 0, &desc_list, &tail); + } + } + +budget_done: + return total_budget - budget; +} + +/* + * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts + * @dp_ctx: DP SOC handle + * @budget: Number of frames/descriptors that can be processed in one shot + * + * Return: remaining budget/quota for the soc device + */ +static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget) +{ + struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx; + struct dp_intr_stats *intr_stats = &int_ctx->intr_stats; + struct dp_soc *soc = int_ctx->soc; + int ring = 0; + uint32_t work_done = 0; + int budget = dp_budget; + uint8_t tx_mask = int_ctx->tx_ring_mask; + uint8_t rx_mask = int_ctx->rx_ring_mask; + uint8_t rx_err_mask = int_ctx->rx_err_ring_mask; + uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask; + uint8_t reo_status_mask = int_ctx->reo_status_ring_mask; + uint32_t remaining_quota = dp_budget; + + dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n", + tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask, + reo_status_mask, + int_ctx->rx_mon_ring_mask, + int_ctx->host2rxdma_ring_mask, + int_ctx->rxdma2host_ring_mask); + + /* Process Tx completion interrupts first to return back buffers */ + while (tx_mask) { + if (tx_mask & 0x1) { + work_done = dp_tx_comp_handler(int_ctx, + soc, + soc->tx_comp_ring[ring].hal_srng, + ring, remaining_quota); + + if (work_done) { + intr_stats->num_tx_ring_masks[ring]++; + dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d", + tx_mask, ring, budget, + work_done); + } + + budget -= work_done; + if (budget <= 0) + goto budget_done; + + remaining_quota = budget; + } + tx_mask = tx_mask >> 1; + ring++; + } + + /* Process REO Exception ring interrupt */ + if (rx_err_mask) { + work_done = dp_rx_err_process(int_ctx, soc, + soc->reo_exception_ring.hal_srng, + remaining_quota); + + if (work_done) { + intr_stats->num_rx_err_ring_masks++; + dp_verbose_debug("REO Exception Ring: work_done %d budget %d", + work_done, budget); + } + + budget -= work_done; + if (budget <= 0) { + goto budget_done; + } + remaining_quota = budget; + } + + /* Process Rx WBM release ring interrupt */ + if (rx_wbm_rel_mask) { + work_done = dp_rx_wbm_err_process(int_ctx, soc, + soc->rx_rel_ring.hal_srng, + remaining_quota); + + if (work_done) { + intr_stats->num_rx_wbm_rel_ring_masks++; + dp_verbose_debug("WBM Release Ring: work_done %d budget %d", + work_done, budget); + } + + budget -= work_done; + if (budget <= 0) { + goto budget_done; + } + remaining_quota = budget; + } + + /* Process Rx interrupts */ + if (rx_mask) { + for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { + if (!(rx_mask & (1 << ring))) + continue; + work_done = dp_rx_process(int_ctx, + soc->reo_dest_ring[ring].hal_srng, + ring, + remaining_quota); + if (work_done) { + intr_stats->num_rx_ring_masks[ring]++; + dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d", + rx_mask, ring, + work_done, budget); + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + } + } + + if (reo_status_mask) { + if (dp_reo_status_ring_handler(int_ctx, soc)) + int_ctx->intr_stats.num_reo_status_ring_masks++; + } + + work_done = dp_process_lmac_rings(int_ctx, remaining_quota); + if (work_done) { + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + + qdf_lro_flush(int_ctx->lro_ctx); + intr_stats->num_masks++; + +budget_done: + return dp_budget - budget; +} + +/* dp_interrupt_timer()- timer poll for interrupts + * + * @arg: SoC Handle + * + * Return: + * + */ +static void dp_interrupt_timer(void *arg) +{ + struct dp_soc *soc = (struct dp_soc *) arg; + enum timer_yield_status yield = DP_TIMER_NO_YIELD; + uint32_t work_done = 0, total_work_done = 0; + int budget = 0xffff; + uint32_t remaining_quota = budget; + uint64_t start_time; + int i; + + if (!qdf_atomic_read(&soc->cmn_init_done)) + return; + + start_time = qdf_get_log_timestamp(); + + while (yield == DP_TIMER_NO_YIELD) { + for (i = 0; + i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + if (!soc->intr_ctx[i].rx_mon_ring_mask) + continue; + + work_done = dp_process_lmac_rings(&soc->intr_ctx[i], + remaining_quota); + if (work_done) { + budget -= work_done; + if (budget <= 0) { + yield = DP_TIMER_WORK_EXHAUST; + goto budget_done; + } + remaining_quota = budget; + total_work_done += work_done; + } + } + + yield = dp_should_timer_irq_yield(soc, total_work_done, + start_time); + total_work_done = 0; + } + +budget_done: + if (yield == DP_TIMER_WORK_EXHAUST || + yield == DP_TIMER_TIME_EXHAUST) + qdf_timer_mod(&soc->int_timer, 1); + else + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); +} + +/* + * dp_soc_attach_poll() - Register handlers for DP interrupts + * @txrx_soc: DP SOC handle + * + * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS†number of NAPI + * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and + * rx_monitor_ring mask to indicate the rings that are processed by the handler. + * + * Return: 0 for success, nonzero for failure. + */ +static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + soc->intr_mode = DP_INTR_POLL; + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + soc->intr_ctx[i].dp_intr_id = i; + soc->intr_ctx[i].tx_ring_mask = + wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_ring_mask = + wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_mon_ring_mask = + wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_err_ring_mask = + wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_wbm_rel_ring_mask = + wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].reo_status_ring_mask = + wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rxdma2host_ring_mask = + wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].soc = soc; + soc->intr_ctx[i].lro_ctx = qdf_lro_init(); + } + + qdf_timer_init(soc->osdev, &soc->int_timer, + dp_interrupt_timer, (void *)soc, + QDF_TIMER_TYPE_WAKE_APPS); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc + * soc: DP soc handle + * + * Set the appropriate interrupt mode flag in the soc + */ +static void dp_soc_set_interrupt_mode(struct dp_soc *soc) +{ + uint32_t msi_base_data, msi_vector_start; + int msi_vector_count, ret; + + soc->intr_mode = DP_INTR_INTEGRATED; + + if (!(soc->wlan_cfg_ctx->napi_enabled) || + (soc->cdp_soc.ol_ops->get_con_mode && + soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) { + soc->intr_mode = DP_INTR_POLL; + } else { + ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", + &msi_vector_count, + &msi_base_data, + &msi_vector_start); + if (ret) + return; + + soc->intr_mode = DP_INTR_MSI; + } +} + +static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc); +#if defined(DP_INTR_POLL_BOTH) +/* + * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts + * @txrx_soc: DP SOC handle + * + * Call the appropriate attach function based on the mode of operation. + * This is a WAR for enabling monitor mode. + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + if (!(soc->wlan_cfg_ctx->napi_enabled) || + (soc->cdp_soc.ol_ops->get_con_mode && + soc->cdp_soc.ol_ops->get_con_mode() == + QDF_GLOBAL_MONITOR_MODE)) { + dp_info("Poll mode"); + return dp_soc_attach_poll(txrx_soc); + } else { + dp_info("Interrupt mode"); + return dp_soc_interrupt_attach(txrx_soc); + } +} +#else +#if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED +static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) +{ + return dp_soc_attach_poll(txrx_soc); +} +#else +static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + if (hif_is_polled_mode_enabled(soc->hif_handle)) + return dp_soc_attach_poll(txrx_soc); + else + return dp_soc_interrupt_attach(txrx_soc); +} +#endif +#endif + +static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc, + int intr_ctx_num, int *irq_id_map, int *num_irq_r) +{ + int j; + int num_irq = 0; + + int tx_mask = + wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mask = + wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mon_mask = + wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); + int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + + soc->intr_mode = DP_INTR_INTEGRATED; + + for (j = 0; j < HIF_MAX_GRP_IRQ; j++) { + + if (tx_mask & (1 << j)) { + irq_id_map[num_irq++] = + (wbm2host_tx_completions_ring1 - j); + } + + if (rx_mask & (1 << j)) { + irq_id_map[num_irq++] = + (reo2host_destination_ring1 - j); + } + + if (rxdma2host_ring_mask & (1 << j)) { + irq_id_map[num_irq++] = + rxdma2host_destination_ring_mac1 - j; + } + + if (host2rxdma_ring_mask & (1 << j)) { + irq_id_map[num_irq++] = + host2rxdma_host_buf_ring_mac1 - j; + } + + if (host2rxdma_mon_ring_mask & (1 << j)) { + irq_id_map[num_irq++] = + host2rxdma_monitor_ring1 - j; + } + + if (rx_mon_mask & (1 << j)) { + irq_id_map[num_irq++] = + ppdu_end_interrupts_mac1 - j; + irq_id_map[num_irq++] = + rxdma2host_monitor_status_ring_mac1 - j; + } + + if (rx_wbm_rel_ring_mask & (1 << j)) + irq_id_map[num_irq++] = wbm2host_rx_release; + + if (rx_err_ring_mask & (1 << j)) + irq_id_map[num_irq++] = reo2host_exception; + + if (reo_status_ring_mask & (1 << j)) + irq_id_map[num_irq++] = reo2host_status; + + } + *num_irq_r = num_irq; +} + +static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc, + int intr_ctx_num, int *irq_id_map, int *num_irq_r, + int msi_vector_count, int msi_vector_start) +{ + int tx_mask = wlan_cfg_get_tx_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mask = wlan_cfg_get_rx_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + + unsigned int vector = + (intr_ctx_num % msi_vector_count) + msi_vector_start; + int num_irq = 0; + + soc->intr_mode = DP_INTR_MSI; + + if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask | + rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask) + irq_id_map[num_irq++] = + pld_get_msi_irq(soc->osdev->dev, vector); + + *num_irq_r = num_irq; +} + +static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num, + int *irq_id_map, int *num_irq) +{ + int msi_vector_count, ret; + uint32_t msi_base_data, msi_vector_start; + + ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", + &msi_vector_count, + &msi_base_data, + &msi_vector_start); + if (ret) + return dp_soc_interrupt_map_calculate_integrated(soc, + intr_ctx_num, irq_id_map, num_irq); + + else + dp_soc_interrupt_map_calculate_msi(soc, + intr_ctx_num, irq_id_map, num_irq, + msi_vector_count, msi_vector_start); +} + +/* + * dp_soc_interrupt_attach() - Register handlers for DP interrupts + * @txrx_soc: DP SOC handle + * + * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS†number of NAPI + * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and + * rx_monitor_ring mask to indicate the rings that are processed by the handler. + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + int i = 0; + int num_irq = 0; + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + int ret = 0; + + /* Map of IRQ ids registered with one interrupt context */ + int irq_id_map[HIF_MAX_GRP_IRQ]; + + int tx_mask = + wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i); + int rx_mask = + wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i); + int rx_mon_mask = + dp_soc_get_mon_mask_for_interrupt_mode(soc, i); + int rx_err_ring_mask = + wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i); + int rx_wbm_rel_ring_mask = + wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i); + int reo_status_ring_mask = + wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i); + int rxdma2host_ring_mask = + wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i); + int host2rxdma_ring_mask = + wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i); + int host2rxdma_mon_ring_mask = + wlan_cfg_get_host2rxdma_mon_ring_mask( + soc->wlan_cfg_ctx, i); + + soc->intr_ctx[i].dp_intr_id = i; + soc->intr_ctx[i].tx_ring_mask = tx_mask; + soc->intr_ctx[i].rx_ring_mask = rx_mask; + soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask; + soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask; + soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask; + soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask; + soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask; + soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask; + soc->intr_ctx[i].host2rxdma_mon_ring_mask = + host2rxdma_mon_ring_mask; + + soc->intr_ctx[i].soc = soc; + + num_irq = 0; + + dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0], + &num_irq); + + ret = hif_register_ext_group(soc->hif_handle, + num_irq, irq_id_map, dp_service_srngs, + &soc->intr_ctx[i], "dp_intr", + HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("failed, ret = %d"), ret); + + return QDF_STATUS_E_FAILURE; + } + soc->intr_ctx[i].lro_ctx = qdf_lro_init(); + } + + hif_configure_ext_group_interrupts(soc->hif_handle); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts + * @txrx_soc: DP SOC handle + * + * Return: none + */ +static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + if (soc->intr_mode == DP_INTR_POLL) { + qdf_timer_free(&soc->int_timer); + } else { + hif_deconfigure_ext_group_interrupts(soc->hif_handle); + hif_deregister_exec_group(soc->hif_handle, "dp_intr"); + } + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + soc->intr_ctx[i].tx_ring_mask = 0; + soc->intr_ctx[i].rx_ring_mask = 0; + soc->intr_ctx[i].rx_mon_ring_mask = 0; + soc->intr_ctx[i].rx_err_ring_mask = 0; + soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0; + soc->intr_ctx[i].reo_status_ring_mask = 0; + soc->intr_ctx[i].rxdma2host_ring_mask = 0; + soc->intr_ctx[i].host2rxdma_ring_mask = 0; + soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0; + + qdf_lro_deinit(soc->intr_ctx[i].lro_ctx); + } +} + +#define AVG_MAX_MPDUS_PER_TID 128 +#define AVG_TIDS_PER_CLIENT 2 +#define AVG_FLOWS_PER_TID 2 +#define AVG_MSDUS_PER_FLOW 128 +#define AVG_MSDUS_PER_MPDU 4 + +/* + * Allocate and setup link descriptor pool that will be used by HW for + * various link and queue descriptors and managed by WBM + */ +static int dp_hw_link_desc_pool_setup(struct dp_soc *soc) +{ + int link_desc_size = hal_get_link_desc_size(soc->hal_soc); + int link_desc_align = hal_get_link_desc_align(soc->hal_soc); + uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx); + uint32_t num_mpdus_per_link_desc = + hal_num_mpdus_per_link_desc(soc->hal_soc); + uint32_t num_msdus_per_link_desc = + hal_num_msdus_per_link_desc(soc->hal_soc); + uint32_t num_mpdu_links_per_queue_desc = + hal_num_mpdu_links_per_queue_desc(soc->hal_soc); + uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); + uint32_t total_link_descs, total_mem_size; + uint32_t num_mpdu_link_descs, num_mpdu_queue_descs; + uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs; + uint32_t entry_size, num_entries; + int i; + uint32_t cookie = 0; + qdf_dma_addr_t *baseaddr = NULL; + uint32_t page_idx = 0; + struct qdf_mem_multi_page_t *pages; + struct qdf_mem_dma_page_t *dma_pages; + uint32_t offset = 0; + uint32_t count = 0; + uint32_t num_descs_per_page; + + /* Only Tx queue descriptors are allocated from common link descriptor + * pool Rx queue descriptors are not included in this because (REO queue + * extension descriptors) they are expected to be allocated contiguously + * with REO queue descriptors + */ + num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * + AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc; + + num_mpdu_queue_descs = num_mpdu_link_descs / + num_mpdu_links_per_queue_desc; + + num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * + AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) / + num_msdus_per_link_desc; + + num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * + AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6; + + num_entries = num_mpdu_link_descs + num_mpdu_queue_descs + + num_tx_msdu_link_descs + num_rx_msdu_link_descs; + + /* Round up to power of 2 */ + total_link_descs = 1; + while (total_link_descs < num_entries) + total_link_descs <<= 1; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("total_link_descs: %u, link_desc_size: %d"), + total_link_descs, link_desc_size); + total_mem_size = total_link_descs * link_desc_size; + + total_mem_size += link_desc_align; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("total_mem_size: %d"), total_mem_size); + + pages = &soc->link_desc_pages; + dp_set_max_page_size(pages, max_alloc_size); + if (!dp_is_soc_reinit(soc)) { + dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE, + pages, + link_desc_size, + total_link_descs, + 0, false); + if (!pages->num_pages) { + dp_err("Multi page alloc fail for hw link desc pool"); + goto fail_page_alloc; + } + qdf_minidump_log( + (void *)(pages->dma_pages->page_v_addr_start), + pages->num_pages * + sizeof(struct qdf_mem_dma_page_t), + "hw_link_desc_bank"); + } + + /* Allocate and setup link descriptor idle list for HW internal use */ + entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK); + total_mem_size = entry_size * total_link_descs; + + if (total_mem_size <= max_alloc_size) { + void *desc; + + if (dp_srng_setup(soc, &soc->wbm_idle_link_ring, + WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Link desc idle ring setup failed")); + goto fail; + } + + qdf_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned, + soc->wbm_idle_link_ring.alloc_size, + "wbm_idle_link_ring"); + + hal_srng_access_start_unlocked(soc->hal_soc, + soc->wbm_idle_link_ring.hal_srng); + page_idx = 0; count = 0; + offset = 0; + pages = &soc->link_desc_pages; + if (pages->dma_pages) + dma_pages = pages->dma_pages; + else + goto fail; + num_descs_per_page = + pages->num_element_per_page; + while ((desc = hal_srng_src_get_next( + soc->hal_soc, + soc->wbm_idle_link_ring.hal_srng)) && + (count < total_link_descs)) { + page_idx = count / num_descs_per_page; + offset = count % num_descs_per_page; + cookie = LINK_DESC_COOKIE(count, page_idx); + hal_set_link_desc_addr( + desc, cookie, + dma_pages[page_idx].page_p_addr + + (offset * link_desc_size)); + count++; + } + hal_srng_access_end_unlocked(soc->hal_soc, + soc->wbm_idle_link_ring.hal_srng); + } else { + uint32_t num_scatter_bufs; + uint32_t num_entries_per_buf; + uint32_t rem_entries; + uint8_t *scatter_buf_ptr; + uint16_t scatter_buf_num; + uint32_t buf_size = 0; + + soc->wbm_idle_scatter_buf_size = + hal_idle_list_scatter_buf_size(soc->hal_soc); + num_entries_per_buf = hal_idle_scatter_buf_num_entries( + soc->hal_soc, soc->wbm_idle_scatter_buf_size); + num_scatter_bufs = hal_idle_list_num_scatter_bufs( + soc->hal_soc, total_mem_size, + soc->wbm_idle_scatter_buf_size); + + if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("scatter bufs size out of bounds")); + goto fail; + } + + for (i = 0; i < num_scatter_bufs; i++) { + baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i]; + if (!dp_is_soc_reinit(soc)) { + buf_size = soc->wbm_idle_scatter_buf_size; + soc->wbm_idle_scatter_buf_base_vaddr[i] = + qdf_mem_alloc_consistent(soc->osdev, + soc->osdev-> + dev, + buf_size, + baseaddr); + } + if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Scatter lst memory alloc fail")); + goto fail; + } + } + + /* Populate idle list scatter buffers with link descriptor + * pointers + */ + scatter_buf_num = 0; + scatter_buf_ptr = (uint8_t *)( + soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]); + rem_entries = num_entries_per_buf; + pages = &soc->link_desc_pages; + page_idx = 0; count = 0; + offset = 0; + num_descs_per_page = + pages->num_element_per_page; + if (pages->dma_pages) + dma_pages = pages->dma_pages; + else + goto fail; + while (count < total_link_descs) { + page_idx = count / num_descs_per_page; + offset = count % num_descs_per_page; + cookie = LINK_DESC_COOKIE(count, page_idx); + hal_set_link_desc_addr( + (void *)scatter_buf_ptr, + cookie, + dma_pages[page_idx].page_p_addr + + (offset * link_desc_size)); + rem_entries--; + if (rem_entries) { + scatter_buf_ptr += entry_size; + } else { + rem_entries = num_entries_per_buf; + scatter_buf_num++; + if (scatter_buf_num >= num_scatter_bufs) + break; + scatter_buf_ptr = + (uint8_t *) + (soc->wbm_idle_scatter_buf_base_vaddr[ + scatter_buf_num]); + } + count++; + } + /* Setup link descriptor idle list in HW */ + hal_setup_link_idle_list(soc->hal_soc, + soc->wbm_idle_scatter_buf_base_paddr, + soc->wbm_idle_scatter_buf_base_vaddr, + num_scatter_bufs, soc->wbm_idle_scatter_buf_size, + (uint32_t)(scatter_buf_ptr - + (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[ + scatter_buf_num-1])), total_link_descs); + } + return 0; + +fail: + if (soc->wbm_idle_link_ring.hal_srng) { + dp_srng_cleanup(soc, &soc->wbm_idle_link_ring, + WBM_IDLE_LINK, 0); + } + + for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) { + if (soc->wbm_idle_scatter_buf_base_vaddr[i]) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->wbm_idle_scatter_buf_size, + soc->wbm_idle_scatter_buf_base_vaddr[i], + soc->wbm_idle_scatter_buf_base_paddr[i], 0); + soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL; + } + } + + pages = &soc->link_desc_pages; + qdf_minidump_remove( + (void *)pages->dma_pages->page_v_addr_start); + dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE, + pages, 0, false); + return QDF_STATUS_E_FAILURE; + +fail_page_alloc: + return QDF_STATUS_E_FAULT; +} + +/* + * Free link descriptor pool that was setup HW + */ +static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc) +{ + int i; + struct qdf_mem_multi_page_t *pages; + + if (soc->wbm_idle_link_ring.hal_srng) { + qdf_minidump_remove( + soc->wbm_idle_link_ring.base_vaddr_unaligned); + dp_srng_cleanup(soc, &soc->wbm_idle_link_ring, + WBM_IDLE_LINK, 0); + } + + for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) { + if (soc->wbm_idle_scatter_buf_base_vaddr[i]) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->wbm_idle_scatter_buf_size, + soc->wbm_idle_scatter_buf_base_vaddr[i], + soc->wbm_idle_scatter_buf_base_paddr[i], 0); + soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL; + } + } + + pages = &soc->link_desc_pages; + qdf_minidump_remove( + (void *)pages->dma_pages->page_v_addr_start); + dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE, + pages, 0, false); +} + +#ifdef IPA_OFFLOAD +#define REO_DST_RING_SIZE_QCA6290 1023 +#ifndef CONFIG_WIFI_EMULATION_WIFI_3_0 +#define REO_DST_RING_SIZE_QCA8074 1023 +#define REO_DST_RING_SIZE_QCN9000 2048 +#else +#define REO_DST_RING_SIZE_QCA8074 8 +#define REO_DST_RING_SIZE_QCN9000 8 +#endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */ + +#else + +#define REO_DST_RING_SIZE_QCA6290 1024 +#ifndef CONFIG_WIFI_EMULATION_WIFI_3_0 +#define REO_DST_RING_SIZE_QCA8074 2048 +#define REO_DST_RING_SIZE_QCN9000 2048 +#else +#define REO_DST_RING_SIZE_QCA8074 8 +#define REO_DST_RING_SIZE_QCN9000 8 +#endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */ +#endif /* IPA_OFFLOAD */ + +#ifndef FEATURE_WDS +static void dp_soc_wds_attach(struct dp_soc *soc) +{ +} + +static void dp_soc_wds_detach(struct dp_soc *soc) +{ +} +#endif +/* + * dp_soc_reset_ring_map() - Reset cpu ring map + * @soc: Datapath soc handler + * + * This api resets the default cpu ring map + */ + +static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc) +{ + uint8_t i; + int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + switch (nss_config) { + case dp_nss_cfg_first_radio: + /* + * Setting Tx ring map for one nss offloaded radio + */ + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i]; + break; + + case dp_nss_cfg_second_radio: + /* + * Setting Tx ring for two nss offloaded radios + */ + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i]; + break; + + case dp_nss_cfg_dbdc: + /* + * Setting Tx ring map for 2 nss offloaded radios + */ + soc->tx_ring_map[i] = + dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i]; + break; + + case dp_nss_cfg_dbtc: + /* + * Setting Tx ring map for 3 nss offloaded radios + */ + soc->tx_ring_map[i] = + dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i]; + break; + + default: + dp_err("tx_ring_map failed due to invalid nss cfg"); + break; + } + } +} + +/* + * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS + * @dp_soc - DP soc handle + * @ring_type - ring type + * @ring_num - ring_num + * + * return 0 or 1 + */ +static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num) +{ + uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + uint8_t status = 0; + + switch (ring_type) { + case WBM2SW_RELEASE: + case REO_DST: + case RXDMA_BUF: + status = ((nss_config) & (1 << ring_num)); + break; + default: + break; + } + + return status; +} + +/* + * dp_soc_disable_mac2_intr_mask() - reset interrupt mask for WMAC2 hw rings + * @dp_soc - DP Soc handle + * + * Return: Return void + */ +static void dp_soc_disable_mac2_intr_mask(struct dp_soc *soc) +{ + int *grp_mask = NULL; + int group_number; + + grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; + group_number = dp_srng_find_ring_in_mask(0x2, grp_mask); + wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx, + group_number, 0x0); + + grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0]; + group_number = dp_srng_find_ring_in_mask(0x2, grp_mask); + wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx, + group_number, 0x0); + + grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0]; + group_number = dp_srng_find_ring_in_mask(0x2, grp_mask); + wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx, + group_number, 0x0); + + grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0]; + group_number = dp_srng_find_ring_in_mask(0x2, grp_mask); + wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx, + group_number, 0x0); +} + +/* + * dp_soc_reset_intr_mask() - reset interrupt mask + * @dp_soc - DP Soc handle + * + * Return: Return void + */ +static void dp_soc_reset_intr_mask(struct dp_soc *soc) +{ + uint8_t j; + int *grp_mask = NULL; + int group_number, mask, num_ring; + + /* number of tx ring */ + num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx); + + /* + * group mask for tx completion ring. + */ + grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; + + /* loop and reset the mask for only offloaded ring */ + for (j = 0; j < num_ring; j++) { + if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) { + continue; + } + + /* + * Group number corresponding to tx offloaded ring. + */ + group_number = dp_srng_find_ring_in_mask(j, grp_mask); + if (group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("ring not part of any group; ring_type: %d,ring_num %d"), + WBM2SW_RELEASE, j); + return; + } + + /* reset the tx mask for offloaded ring */ + mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number); + mask &= (~(1 << j)); + + /* + * reset the interrupt mask for offloaded ring. + */ + wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask); + } + + /* number of rx rings */ + num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + + /* + * group mask for reo destination ring. + */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; + + /* loop and reset the mask for only offloaded ring */ + for (j = 0; j < num_ring; j++) { + if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) { + continue; + } + + /* + * Group number corresponding to rx offloaded ring. + */ + group_number = dp_srng_find_ring_in_mask(j, grp_mask); + if (group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("ring not part of any group; ring_type: %d,ring_num %d"), + REO_DST, j); + return; + } + + /* set the interrupt mask for offloaded ring */ + mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number); + mask &= (~(1 << j)); + + /* + * set the interrupt mask to zero for rx offloaded radio. + */ + wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask); + } + + /* + * group mask for Rx buffer refill ring + */ + grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; + + /* loop and reset the mask for only offloaded ring */ + for (j = 0; j < MAX_PDEV_CNT; j++) { + int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j); + + if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) { + continue; + } + + /* + * Group number corresponding to rx offloaded ring. + */ + group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask); + if (group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("ring not part of any group; ring_type: %d,ring_num %d"), + REO_DST, lmac_id); + return; + } + + /* set the interrupt mask for offloaded ring */ + mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, + group_number); + mask &= (~(1 << lmac_id)); + + /* + * set the interrupt mask to zero for rx offloaded radio. + */ + wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx, + group_number, mask); + } +} + +#ifdef IPA_OFFLOAD +/** + * dp_reo_remap_config() - configure reo remap register value based + * nss configuration. + * based on offload_radio value below remap configuration + * get applied. + * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4) + * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4) + * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4) + * 3 - both Radios handled by NSS (remap not required) + * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3) + * + * @remap1: output parameter indicates reo remap 1 register value + * @remap2: output parameter indicates reo remap 2 register value + * Return: bool type, true if remap is configured else false. + */ +bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2) +{ + *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) | + HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 20) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 21) | + HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 23); + + *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW3, 24) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 26) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 27) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31); + + dp_debug("remap1 %x remap2 %x", *remap1, *remap2); + + return true; +} + +/** + * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA + * + * @tx_ring_num: Tx ring number + * @tx_ipa_ring_sz: Return param only updated for IPA. + * + * Return: None + */ +static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz) +{ + if (tx_ring_num == WLAN_CFG_IPA_TX_N_TXCMPL_RING) + *tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE; +} + +/** + * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA + * + * @tx_comp_ring_num: Tx comp ring number + * @tx_comp_ipa_ring_sz: Return param only updated for IPA. + * + * Return: None + */ +static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num, + int *tx_comp_ipa_ring_sz) +{ + if (tx_comp_ring_num == WLAN_CFG_IPA_TX_N_TXCMPL_RING) + *tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE; +} +#else +static bool dp_reo_remap_config(struct dp_soc *soc, + uint32_t *remap1, + uint32_t *remap2) +{ + uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + uint8_t target_type; + + target_type = hal_get_target_type(soc->hal_soc); + + switch (offload_radio) { + case dp_nss_cfg_default: + *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) | + HAL_REO_REMAP_IX2(REO_REMAP_SW1, 20) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 21) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 22) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23); + + *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW1, 24) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) | + HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) | + HAL_REO_REMAP_IX3(REO_REMAP_SW4, 31); + break; + case dp_nss_cfg_first_radio: + *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW2, 16) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 19) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) | + HAL_REO_REMAP_IX2(REO_REMAP_SW2, 22) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23); + + *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) | + HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 28) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) | + HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) | + HAL_REO_REMAP_IX3(REO_REMAP_SW2, 31); + break; + case dp_nss_cfg_second_radio: + *remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) | + HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) | + HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) | + HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) | + HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23); + + *remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) | + HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) | + HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) | + HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) | + HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31); + + break; + case dp_nss_cfg_dbdc: + case dp_nss_cfg_dbtc: + /* return false if both or all are offloaded to NSS */ + return false; + } + + dp_debug("remap1 %x remap2 %x offload_radio %u", + *remap1, *remap2, offload_radio); + return true; +} + +static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz) +{ +} + +static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num, + int *tx_comp_ipa_ring_sz) +{ +} +#endif /* IPA_OFFLOAD */ + +/* + * dp_reo_frag_dst_set() - configure reo register to set the + * fragment destination ring + * @soc : Datapath soc + * @frag_dst_ring : output parameter to set fragment destination ring + * + * Based on offload_radio below fragment destination rings is selected + * 0 - TCL + * 1 - SW1 + * 2 - SW2 + * 3 - SW3 + * 4 - SW4 + * 5 - Release + * 6 - FW + * 7 - alternate select + * + * return: void + */ +static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring) +{ + uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + + switch (offload_radio) { + case dp_nss_cfg_default: + *frag_dst_ring = REO_REMAP_TCL; + break; + case dp_nss_cfg_first_radio: + /* + * This configuration is valid for single band radio which + * is also NSS offload. + */ + case dp_nss_cfg_dbdc: + case dp_nss_cfg_dbtc: + *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_reo_frag_dst_set invalid offload radio config")); + break; + } +} + +#ifdef ENABLE_VERBOSE_DEBUG +static void dp_enable_verbose_debug(struct dp_soc *soc) +{ + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask) + is_dp_verbose_debug_enabled = true; + + if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask) + hal_set_verbose_debug(true); + else + hal_set_verbose_debug(false); +} +#else +static void dp_enable_verbose_debug(struct dp_soc *soc) +{ +} +#endif + +#ifdef WLAN_FEATURE_STATS_EXT +static inline void dp_create_ext_stats_event(struct dp_soc *soc) +{ + qdf_event_create(&soc->rx_hw_stats_event); +} +#else +static inline void dp_create_ext_stats_event(struct dp_soc *soc) +{ +} +#endif + +/* + * dp_soc_cmn_setup() - Common SoC level initializion + * @soc: Datapath SOC handle + * + * This is an internal function used to setup common SOC data structures, + * to be called from PDEV attach after receiving HW mode capabilities from FW + */ +static int dp_soc_cmn_setup(struct dp_soc *soc) +{ + int i, cached; + struct hal_reo_params reo_params; + int tx_ring_size; + int tx_comp_ring_size; + int reo_dst_ring_size; + uint32_t entries; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + + if (qdf_atomic_read(&soc->cmn_init_done)) + return 0; + + if (dp_hw_link_desc_pool_setup(soc)) + goto fail1; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + dp_enable_verbose_debug(soc); + + /* Setup SRNG rings */ + /* Common rings */ + entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx); + + if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for wbm_desc_rel_ring")); + goto fail1; + } + + qdf_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned, + soc->wbm_desc_rel_ring.alloc_size, + "wbm_desc_rel_ring"); + + soc->num_tcl_data_rings = 0; + /* Tx data rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) { + soc->num_tcl_data_rings = + wlan_cfg_num_tcl_data_rings(soc_cfg_ctx); + tx_comp_ring_size = + wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); + tx_ring_size = + wlan_cfg_tx_ring_size(soc_cfg_ctx); + for (i = 0; i < soc->num_tcl_data_rings; i++) { + dp_ipa_get_tx_ring_size(i, &tx_ring_size); + + if (dp_srng_setup(soc, &soc->tcl_data_ring[i], + TCL_DATA, i, 0, tx_ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_data_ring[%d]"), i); + goto fail1; + } + + /* Disable cached desc if NSS offload is enabled */ + cached = WLAN_CFG_DST_RING_CACHED_DESC; + if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) + cached = 0; + + dp_ipa_get_tx_comp_ring_size(i, &tx_comp_ring_size); + /* + * TBD: Set IPA WBM ring size with ini IPA UC tx buffer + * count + */ + if (dp_srng_setup(soc, &soc->tx_comp_ring[i], + WBM2SW_RELEASE, i, 0, + tx_comp_ring_size, + cached)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tx_comp_ring[%d]"), i); + goto fail1; + } + } + } else { + /* This will be incremented during per pdev ring setup */ + soc->num_tcl_data_rings = 0; + } + + if (dp_tx_soc_attach(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_tx_soc_attach failed")); + goto fail1; + } + + entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx); + /* TCL command and status rings */ + if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_cmd_ring")); + goto fail2; + } + + entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx); + if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_status_ring")); + goto fail2; + } + + reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx); + + /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension + * descriptors + */ + + /* Rx data rings */ + if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) { + soc->num_reo_dest_rings = + wlan_cfg_num_reo_dest_rings(soc_cfg_ctx); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings); + + /* Disable cached desc if NSS offload is enabled */ + cached = WLAN_CFG_DST_RING_CACHED_DESC; + if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) + cached = 0; + + for (i = 0; i < soc->num_reo_dest_rings; i++) { + if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST, + i, 0, reo_dst_ring_size, cached)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "reo_dest_ring [%d]"), i); + goto fail2; + } + } + } else { + /* This will be incremented during per pdev ring setup */ + soc->num_reo_dest_rings = 0; + } + + entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); + /* LMAC RxDMA to SW Rings configuration */ + if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) { + + for (i = 0; i < MAX_RX_MAC_RINGS; i++) { + if (dp_srng_setup(soc, &soc->rxdma_err_dst_ring[i], + RXDMA_DST, 0, i, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_err_dst_ring")); + goto fail2; + } + } + } + /* TBD: call dp_rx_init to setup Rx SW descriptors */ + + /* REO reinjection ring */ + entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx); + if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_reinject_ring")); + goto fail2; + } + + + /* Rx release ring */ + if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0, + wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx), + 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rx_rel_ring")); + goto fail2; + } + + + /* Rx exception ring */ + entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx); + if (dp_srng_setup(soc, &soc->reo_exception_ring, + REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_exception_ring")); + goto fail2; + } + + + /* REO command and status rings */ + if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0, + wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx), + 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_cmd_ring")); + goto fail2; + } + + hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng); + TAILQ_INIT(&soc->rx.reo_cmd_list); + qdf_spinlock_create(&soc->rx.reo_cmd_lock); + + if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0, + wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx), + 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_status_ring")); + goto fail2; + } + + /* + * Skip registering hw ring interrupts for WMAC2 on IPQ6018 + * WMAC2 is not there in IPQ6018 platform. + */ + if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) { + dp_soc_disable_mac2_intr_mask(soc); + } + + /* Reset the cpu ring map if radio is NSS offloaded */ + if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) { + dp_soc_reset_cpu_ring_map(soc); + dp_soc_reset_intr_mask(soc); + } + + /* Setup HW REO */ + qdf_mem_zero(&reo_params, sizeof(reo_params)); + + if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) { + + /* + * Reo ring remap is not required if both radios + * are offloaded to NSS + */ + if (!dp_reo_remap_config(soc, + &reo_params.remap1, + &reo_params.remap2)) + goto out; + + reo_params.rx_hash_enabled = true; + } + + /* setup the global rx defrag waitlist */ + TAILQ_INIT(&soc->rx.defrag.waitlist); + soc->rx.defrag.timeout_ms = + wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx); + soc->rx.defrag.next_flush_ms = 0; + soc->rx.flags.defrag_timeout_check = + wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx); + qdf_spinlock_create(&soc->rx.defrag.defrag_lock); + + dp_create_ext_stats_event(soc); +out: + /* + * set the fragment destination ring + */ + dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); + + hal_reo_setup(soc->hal_soc, &reo_params); + + hal_reo_set_err_dst_remap(soc->hal_soc); + + qdf_atomic_set(&soc->cmn_init_done, 1); + + dp_soc_wds_attach(soc); + + qdf_nbuf_queue_init(&soc->htt_stats.msg); + return 0; +fail2: + dp_tx_soc_detach(soc); +fail1: + /* + * Cleanup will be done as part of soc_detach, which will + * be called on pdev attach failure + */ + return QDF_STATUS_E_FAILURE; +} + +/* + * dp_soc_cmn_cleanup() - Common SoC level De-initializion + * + * @soc: Datapath SOC handle + * + * This function is responsible for cleaning up DP resource of Soc + * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since + * dp_soc_detach_wifi3 could not identify some of them + * whether they have done initialized or not accurately. + * + */ +static void dp_soc_cmn_cleanup(struct dp_soc *soc) +{ + if (!dp_is_soc_reinit(soc)) { + dp_tx_soc_detach(soc); + } + + qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock); + qdf_spinlock_destroy(&soc->rx.reo_cmd_lock); +} + +static QDF_STATUS +dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, + int force); + +static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) +{ + struct cdp_lro_hash_config lro_hash; + QDF_STATUS status; + + if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) && + !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) && + !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { + dp_err("LRO, GRO and RX hash disabled"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_zero(&lro_hash, sizeof(lro_hash)); + + if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) || + wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) { + lro_hash.lro_enable = 1; + lro_hash.tcp_flag = QDF_TCPHDR_ACK; + lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN | + QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG | + QDF_TCPHDR_ECE | QDF_TCPHDR_CWR; + } + + qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4, + (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * + LRO_IPV4_SEED_ARR_SZ)); + qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6, + (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * + LRO_IPV6_SEED_ARR_SZ)); + + qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config); + + if (!soc->cdp_soc.ol_ops->lro_hash_config) { + QDF_BUG(0); + dp_err("lro_hash_config not configured"); + return QDF_STATUS_E_FAILURE; + } + + status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc, + pdev->pdev_id, + &lro_hash); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("failed to send lro_hash_config to FW %u", status); + return status; + } + + dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x", + lro_hash.lro_enable, lro_hash.tcp_flag, + lro_hash.tcp_flag_mask); + + dp_info("toeplitz_hash_ipv4:"); + qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + lro_hash.toeplitz_hash_ipv4, + (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * + LRO_IPV4_SEED_ARR_SZ)); + + dp_info("toeplitz_hash_ipv6:"); + qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + lro_hash.toeplitz_hash_ipv6, + (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * + LRO_IPV6_SEED_ARR_SZ)); + + return status; +} + +/* +* dp_rxdma_ring_setup() - configure the RX DMA rings +* @soc: data path SoC handle +* @pdev: Physical device handle +* +* Return: 0 - success, > 0 - failure +*/ +#ifdef QCA_HOST2FW_RXBUF_RING +static int dp_rxdma_ring_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + int max_mac_rings; + int i; + int ring_size; + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx); + ring_size = wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx); + + for (i = 0; i < max_mac_rings; i++) { + dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); + if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i], + RXDMA_BUF, 1, i, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("failed rx mac ring setup")); + return QDF_STATUS_E_FAILURE; + } + } + return QDF_STATUS_SUCCESS; +} +#else +static int dp_rxdma_ring_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps + * @pdev - DP_PDEV handle + * + * Return: void + */ +static inline void +dp_dscp_tid_map_setup(struct dp_pdev *pdev) +{ + uint8_t map_id; + struct dp_soc *soc = pdev->soc; + + if (!soc) + return; + + for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) { + qdf_mem_copy(pdev->dscp_tid_map[map_id], + default_dscp_tid_map, + sizeof(default_dscp_tid_map)); + } + + for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) { + hal_tx_set_dscp_tid_map(soc->hal_soc, + default_dscp_tid_map, + map_id); + } +} + +/** + * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps + * @pdev - DP_PDEV handle + * + * Return: void + */ +static inline void +dp_pcp_tid_map_setup(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + + if (!soc) + return; + + qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map, + sizeof(default_pcp_tid_map)); + hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map); +} + +#ifdef IPA_OFFLOAD +/** + * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring + * @soc: data path instance + * @pdev: core txrx pdev context + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + int entries; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); + + /* Setup second Rx refill buffer ring */ + if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, + IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0) + ) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed second rx refill ring")); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring + * @soc: data path instance + * @pdev: core txrx pdev context + * + * Return: void + */ +static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, + IPA_RX_REFILL_BUF_RING_IDX); +} + +#else +static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ +} +#endif + +#if !defined(DISABLE_MON_CONFIG) +/** + * dp_mon_rings_setup() - Initialize Monitor rings based on target + * @soc: soc handle + * @pdev: physical device handle + * + * Return: nonzero on failure and zero on success + */ +static +QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev) +{ + int mac_id = 0; + int pdev_id = pdev->pdev_id; + int entries; + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + + if (soc->wlan_cfg_ctx->rxdma1_enable) { + entries = + wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx); + if (dp_srng_setup(soc, + &soc->rxdma_mon_buf_ring[lmac_id], + RXDMA_MONITOR_BUF, 0, lmac_id, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_buf_ring ")); + return QDF_STATUS_E_NOMEM; + } + + entries = + wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx); + if (dp_srng_setup(soc, + &soc->rxdma_mon_dst_ring[lmac_id], + RXDMA_MONITOR_DST, 0, lmac_id, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_dst_ring")); + return QDF_STATUS_E_NOMEM; + } + + entries = + wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); + if (dp_srng_setup(soc, + &soc->rxdma_mon_status_ring[lmac_id], + RXDMA_MONITOR_STATUS, 0, lmac_id, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_status_ring")); + return QDF_STATUS_E_NOMEM; + } + + entries = + wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx); + if (dp_srng_setup(soc, + &soc->rxdma_mon_desc_ring[lmac_id], + RXDMA_MONITOR_DESC, 0, lmac_id, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_desc_ring")); + return QDF_STATUS_E_NOMEM; + } + } else { + entries = + wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); + if (dp_srng_setup(soc, + &soc->rxdma_mon_status_ring[lmac_id], + RXDMA_MONITOR_STATUS, 0, lmac_id, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_status_ring")); + return QDF_STATUS_E_NOMEM; + } + } + } + + return QDF_STATUS_SUCCESS; +} +#else +static +QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/*dp_iterate_update_peer_list - update peer stats on cal client timer + * @pdev_hdl: pdev handle + */ +#ifdef ATH_SUPPORT_EXT_STAT +void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; + struct dp_soc *soc = pdev->soc; + struct dp_vdev *vdev = NULL; + struct dp_peer *peer = NULL; + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + dp_cal_client_update_peer_stats(&peer->stats); + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); +} +#else +void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) +{ +} +#endif + +/* + * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing + * @pdev: Datapath PDEV handle + * + * Return: QDF_STATUS_SUCCESS: Success + * QDF_STATUS_E_NOMEM: Error + */ +static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) +{ + pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); + + if (!pdev->ppdu_tlv_buf) { + QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); + return QDF_STATUS_E_NOMEM; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY +#ifndef RX_DEFRAG_DO_NOT_REINJECT +/** + * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring + * history. + * @soc: DP soc handle + * + * Return: None + */ +static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc) +{ + soc->rx_reinject_ring_history = dp_context_alloc_mem( + soc, DP_RX_REINJECT_RING_HIST_TYPE, rx_ring_hist_size); + if (soc->rx_reinject_ring_history) + qdf_atomic_init(&soc->rx_reinject_ring_history->index); +} +#else /* RX_DEFRAG_DO_NOT_REINJECT */ +static inline void +dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc) +{ +} +#endif /* RX_DEFRAG_DO_NOT_REINJECT */ + +/** + * dp_soc_rx_history_attach() - Attach the ring history record buffers + * @soc: DP soc structure + * + * This function allocates the memory for recording the rx ring, rx error + * ring and the reinject ring entries. There is no error returned in case + * of allocation failure since the record function checks if the history is + * initialized or not. We do not want to fail the driver load in case of + * failure to allocate memory for debug history. + * + * Returns: None + */ +static void dp_soc_rx_history_attach(struct dp_soc *soc) +{ + int i; + uint32_t rx_ring_hist_size; + uint32_t rx_err_ring_hist_size; + uint32_t rx_reinject_hist_size; + + rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]); + rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history); + rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history); + + for (i = 0; i < MAX_REO_DEST_RINGS; i++) { + soc->rx_ring_history[i] = dp_context_alloc_mem( + soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size); + if (soc->rx_ring_history[i]) + qdf_atomic_init(&soc->rx_ring_history[i]->index); + } + + soc->rx_err_ring_history = dp_context_alloc_mem( + soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size); + if (soc->rx_err_ring_history) + qdf_atomic_init(&soc->rx_err_ring_history->index); + + dp_soc_rx_reinject_ring_history_attach(soc); +} + +static void dp_soc_rx_history_detach(struct dp_soc *soc) +{ + int i; + + for (i = 0; i < MAX_REO_DEST_RINGS; i++) + dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE, + soc->rx_ring_history[i]); + + dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE, + soc->rx_err_ring_history); + + /* + * No need for a featurized detach since qdf_mem_free takes + * care of NULL pointer. + */ + dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE, + soc->rx_reinject_ring_history); +} + +#else +static inline void dp_soc_rx_history_attach(struct dp_soc *soc) +{ +} + +static inline void dp_soc_rx_history_detach(struct dp_soc *soc) +{ +} +#endif + +/* +* dp_pdev_attach_wifi3() - attach txrx pdev +* @txrx_soc: Datapath SOC handle +* @htc_handle: HTC handle for host-target interface +* @qdf_osdev: QDF OS device +* @pdev_id: PDEV ID +* +* Return: QDF_STATUS +*/ +static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + uint8_t pdev_id) +{ + int ring_size; + int entries; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + int nss_cfg; + void *sojourn_buf; + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + struct dp_pdev *pdev = NULL; + QDF_STATUS ret; + + if (dp_is_soc_reinit(soc)) { + pdev = soc->pdev_list[pdev_id]; + } else { + pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, sizeof(*pdev)); + qdf_minidump_log(pdev, sizeof(*pdev), "dp_pdev"); + } + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP PDEV memory allocation failed")); + ret = QDF_STATUS_E_NOMEM; + goto fail0; + } + + pdev->filter = dp_mon_filter_alloc(pdev); + if (!pdev->filter) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Memory allocation failed for monitor filters")); + qdf_mem_free(pdev); + ret = QDF_STATUS_E_NOMEM; + goto fail0; + } + + /* + * Variable to prevent double pdev deinitialization during + * radio detach execution .i.e. in the absence of any vdev. + */ + pdev->pdev_deinit = 0; + pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer)); + + if (!pdev->invalid_peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid peer memory allocation failed")); + dp_mon_filter_dealloc(pdev); + qdf_mem_free(pdev); + ret = QDF_STATUS_E_NOMEM; + goto fail0; + } + + soc_cfg_ctx = soc->wlan_cfg_ctx; + pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc); + + if (!pdev->wlan_cfg_ctx) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("pdev cfg_attach failed")); + + qdf_mem_free(pdev->invalid_peer); + dp_mon_filter_dealloc(pdev); + qdf_mem_free(pdev); + ret = QDF_STATUS_E_FAILURE; + goto fail0; + } + + /* + * set nss pdev config based on soc config + */ + nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); + wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, + (nss_cfg & (1 << pdev_id))); + + pdev->soc = soc; + pdev->pdev_id = pdev_id; + soc->pdev_list[pdev_id] = pdev; + + pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id); + soc->pdev_count++; + + TAILQ_INIT(&pdev->vdev_list); + qdf_spinlock_create(&pdev->vdev_list_lock); + pdev->vdev_count = 0; + + qdf_spinlock_create(&pdev->tx_mutex); + qdf_spinlock_create(&pdev->neighbour_peer_mutex); + TAILQ_INIT(&pdev->neighbour_peers_list); + pdev->neighbour_peers_added = false; + pdev->monitor_configured = false; + pdev->enable_reap_timer_non_pkt = false; + + if (dp_soc_cmn_setup(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_soc_cmn_setup failed")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + /* Setup per PDEV TCL rings if configured */ + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + ring_size = + wlan_cfg_tx_ring_size(soc_cfg_ctx); + + if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA, + pdev_id, pdev_id, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_data_ring")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + ring_size = + wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); + + if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id], + WBM2SW_RELEASE, pdev_id, pdev_id, + ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tx_comp_ring")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + soc->num_tcl_data_rings++; + } + + /* Tx specific init */ + if (dp_tx_pdev_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_tx_pdev_attach failed")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx); + /* Setup per PDEV REO rings if configured */ + if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) { + if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST, + pdev_id, pdev_id, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_dest_ringn")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + soc->num_reo_dest_rings++; + } + + ring_size = + wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx); + + if (dp_srng_setup(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], + RXDMA_BUF, 0, pdev->lmac_id, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed rx refill ring")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + if (dp_rxdma_ring_setup(soc, pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("RXDMA ring config failed")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + if (dp_mon_rings_setup(soc, pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("MONITOR rings setup failed")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { + if (dp_srng_setup(soc, + &soc->rxdma_err_dst_ring[pdev->lmac_id], + RXDMA_DST, + 0, pdev->lmac_id, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_err_dst_ring")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + } + + if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) { + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + if (dp_ipa_ring_resource_setup(soc, pdev)) { + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_ipa_uc_attach failed")); + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + /* Rx specific init */ + if (dp_rx_pdev_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_rx_pdev_attach failed")); + ret = QDF_STATUS_E_FAILURE; + goto fail2; + } + + DP_STATS_INIT(pdev); + + /* Monitor filter init */ + pdev->mon_filter_mode = MON_FILTER_ALL; + pdev->fp_mgmt_filter = FILTER_MGMT_ALL; + pdev->fp_ctrl_filter = FILTER_CTRL_ALL; + pdev->fp_data_filter = FILTER_DATA_ALL; + pdev->mo_mgmt_filter = FILTER_MGMT_ALL; + pdev->mo_ctrl_filter = FILTER_CTRL_ALL; + pdev->mo_data_filter = FILTER_DATA_ALL; + + dp_local_peer_id_pool_init(pdev); + + dp_dscp_tid_map_setup(pdev); + dp_pcp_tid_map_setup(pdev); + + /* Rx monitor mode specific init */ + if (dp_rx_pdev_mon_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "dp_rx_pdev_mon_attach failed"); + ret = QDF_STATUS_E_FAILURE; + goto fail2; + } + + if (dp_wdi_event_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "dp_wdi_evet_attach failed"); + ret = QDF_STATUS_E_FAILURE; + goto wdi_attach_fail; + } + + /* set the reo destination during initialization */ + pdev->reo_dest = pdev->pdev_id + 1; + + /* + * initialize ppdu tlv list + */ + TAILQ_INIT(&pdev->ppdu_info_list); + pdev->tlv_count = 0; + pdev->list_depth = 0; + + qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats)); + + pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev, + sizeof(struct cdp_tx_sojourn_stats), 0, 4, + TRUE); + + if (pdev->sojourn_buf) { + sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf); + qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats)); + } + /* initlialize cal client timer */ + dp_cal_client_attach(&pdev->cal_client_ctx, + dp_pdev_to_cdp_pdev(pdev), + pdev->soc->osdev, + &dp_iterate_update_peer_list); + qdf_event_create(&pdev->fw_peer_stats_event); + + pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + + dp_init_tso_stats(pdev); + + if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) { + ret = QDF_STATUS_E_FAILURE; + goto fail1; + } + + dp_tx_ppdu_stats_attach(pdev); + + return QDF_STATUS_SUCCESS; + +wdi_attach_fail: + /* + * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach + * and hence need not to be done here. + */ + dp_rx_pdev_mon_detach(pdev); + +fail2: + dp_rx_pdev_detach(pdev); + dp_ipa_uc_detach(soc, pdev); + +fail1: + soc->pdev_count--; + if (pdev->invalid_peer) + qdf_mem_free(pdev->invalid_peer); + + if (pdev->filter) + dp_mon_filter_dealloc(pdev); + + dp_pdev_detach((struct cdp_pdev *)pdev, 0); +fail0: + return ret; +} + +/* +* dp_rxdma_ring_cleanup() - configure the RX DMA rings +* @soc: data path SoC handle +* @pdev: Physical device handle +* +* Return: void +*/ +#ifdef QCA_HOST2FW_RXBUF_RING +static void dp_rxdma_ring_cleanup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + int i; + + for (i = 0; i < MAX_RX_MAC_RINGS; i++) + dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i], + RXDMA_BUF, 1); + + if (soc->reap_timer_init) { + qdf_timer_free(&soc->mon_reap_timer); + soc->reap_timer_init = 0; + } +} +#else +static void dp_rxdma_ring_cleanup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + if (soc->lmac_timer_init) { + qdf_timer_stop(&soc->lmac_reap_timer); + qdf_timer_free(&soc->lmac_reap_timer); + soc->lmac_timer_init = 0; + } +} +#endif + +/* + * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients) + * @pdev: device object + * + * Return: void + */ +static void dp_neighbour_peers_detach(struct dp_pdev *pdev) +{ + struct dp_neighbour_peer *peer = NULL; + struct dp_neighbour_peer *temp_peer = NULL; + + TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem, temp_peer) { + /* delete this peer from the list */ + TAILQ_REMOVE(&pdev->neighbour_peers_list, + peer, neighbour_peer_list_elem); + qdf_mem_free(peer); + } + + qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); +} + +/** +* dp_htt_ppdu_stats_detach() - detach stats resources +* @pdev: Datapath PDEV handle +* +* Return: void +*/ +static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) +{ + struct ppdu_info *ppdu_info, *ppdu_info_next; + + TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, + ppdu_info_list_elem, ppdu_info_next) { + if (!ppdu_info) + break; + qdf_assert_always(ppdu_info->nbuf); + qdf_nbuf_free(ppdu_info->nbuf); + qdf_mem_free(ppdu_info); + } + + if (pdev->ppdu_tlv_buf) + qdf_mem_free(pdev->ppdu_tlv_buf); + +} + +#if !defined(DISABLE_MON_CONFIG) + +static +void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev, + int mac_id) +{ + if (soc->wlan_cfg_ctx->rxdma1_enable) { + dp_srng_cleanup(soc, + &soc->rxdma_mon_buf_ring[mac_id], + RXDMA_MONITOR_BUF, 0); + + dp_srng_cleanup(soc, + &soc->rxdma_mon_dst_ring[mac_id], + RXDMA_MONITOR_DST, 0); + + dp_srng_cleanup(soc, + &soc->rxdma_mon_status_ring[mac_id], + RXDMA_MONITOR_STATUS, 0); + + dp_srng_cleanup(soc, + &soc->rxdma_mon_desc_ring[mac_id], + RXDMA_MONITOR_DESC, 0); + + dp_srng_cleanup(soc, + &soc->rxdma_err_dst_ring[mac_id], + RXDMA_DST, 0); + } else { + dp_srng_cleanup(soc, + &soc->rxdma_mon_status_ring[mac_id], + RXDMA_MONITOR_STATUS, 0); + + dp_srng_cleanup(soc, + &soc->rxdma_err_dst_ring[mac_id], + RXDMA_DST, 0); + } + +} +#else +static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev, + int mac_id) +{ +} +#endif + +/** + * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings + * + * @soc: soc handle + * @pdev: datapath physical dev handle + * @mac_id: mac number + * + * Return: None + */ +static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev, + int mac_id) +{ +} + +/** + * dp_pdev_mem_reset() - Reset txrx pdev memory + * @pdev: dp pdev handle + * + * Return: None + */ +static void dp_pdev_mem_reset(struct dp_pdev *pdev) +{ + uint16_t len = 0; + uint8_t *dp_pdev_offset = (uint8_t *)pdev; + + len = sizeof(struct dp_pdev) - + offsetof(struct dp_pdev, pdev_deinit) - + sizeof(pdev->pdev_deinit); + dp_pdev_offset = dp_pdev_offset + + offsetof(struct dp_pdev, pdev_deinit) + + sizeof(pdev->pdev_deinit); + + qdf_mem_zero(dp_pdev_offset, len); +} + +#ifdef WLAN_DP_PENDING_MEM_FLUSH +/** + * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev + * @pdev: Datapath PDEV handle + * + * This is the last chance to flush all pending dp vdevs/peers, + * some peer/vdev leak case like Non-SSR + peer unmap missing + * will be covered here. + * + * Return: None + */ +static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) +{ + struct dp_vdev *vdev = NULL; + + while (true) { + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->delete.pending) + break; + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + + /* + * vdev will be freed when all peers get cleanup, + * dp_delete_pending_vdev will remove vdev from vdev_list + * in pdev. + */ + if (vdev) + dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0); + else + break; + } +} +#else +static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) +{ +} +#endif + +/** + * dp_pdev_deinit() - Deinit txrx pdev + * @txrx_pdev: Datapath PDEV handle + * @force: Force deinit + * + * Return: None + */ +static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force) +{ + struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; + struct dp_soc *soc = pdev->soc; + qdf_nbuf_t curr_nbuf, next_nbuf; + int mac_id; + + /* + * Prevent double pdev deinitialization during radio detach + * execution .i.e. in the absence of any vdev + */ + if (pdev->pdev_deinit) + return; + + pdev->pdev_deinit = 1; + + dp_wdi_event_detach(pdev); + + dp_pdev_flush_pending_vdevs(pdev); + + dp_tx_pdev_detach(pdev); + + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id], + TCL_DATA, pdev->pdev_id); + dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id], + WBM2SW_RELEASE, pdev->pdev_id); + } + + dp_pktlogmod_exit(pdev); + + dp_rx_fst_detach(soc, pdev); + dp_rx_pdev_detach(pdev); + dp_rx_pdev_mon_detach(pdev); + dp_neighbour_peers_detach(pdev); + qdf_spinlock_destroy(&pdev->tx_mutex); + qdf_spinlock_destroy(&pdev->vdev_list_lock); + + dp_ipa_uc_detach(soc, pdev); + + dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); + + /* Cleanup per PDEV REO rings if configured */ + if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id], + REO_DST, pdev->pdev_id); + } + + dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], + RXDMA_BUF, 0); + + dp_rxdma_ring_cleanup(soc, pdev); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int lmac_id = + dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); + + dp_mon_ring_deinit(soc, pdev, lmac_id); + + dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id], + RXDMA_DST, 0); + } + + curr_nbuf = pdev->invalid_peer_head_msdu; + while (curr_nbuf) { + next_nbuf = qdf_nbuf_next(curr_nbuf); + qdf_nbuf_free(curr_nbuf); + curr_nbuf = next_nbuf; + } + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + + dp_htt_ppdu_stats_detach(pdev); + + dp_tx_ppdu_stats_detach(pdev); + + qdf_nbuf_free(pdev->sojourn_buf); + qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q); + + dp_cal_client_detach(&pdev->cal_client_ctx); + + soc->pdev_count--; + + /* only do soc common cleanup when last pdev do detach */ + if (!(soc->pdev_count)) + dp_reo_cmdlist_destroy(soc); + + wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); + if (pdev->invalid_peer) + qdf_mem_free(pdev->invalid_peer); + + /* + * Fee the monitor filter allocated and stored + */ + if (pdev->filter) + dp_mon_filter_dealloc(pdev); + + qdf_mem_free(pdev->dp_txrx_handle); + dp_pdev_mem_reset(pdev); +} + +/** + * dp_pdev_deinit_wifi3() - Deinit txrx pdev + * @psoc: Datapath psoc handle + * @pdev_id: Id of datapath PDEV handle + * @force: Force deinit + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, + int force) +{ + struct dp_soc *soc = (struct dp_soc *)psoc; + struct dp_pdev *txrx_pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, + pdev_id); + + if (!txrx_pdev) + return QDF_STATUS_E_FAILURE; + + soc->dp_soc_reinit = TRUE; + + dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_pdev_detach() - Complete rest of pdev detach + * @txrx_pdev: Datapath PDEV handle + * @force: Force deinit + * + * Return: None + */ +static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force) +{ + struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + int mac_id, mac_for_pdev; + int lmac_id; + + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id], + TCL_DATA, pdev->pdev_id); + dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id], + WBM2SW_RELEASE, pdev->pdev_id); + } + + dp_mon_link_free(pdev); + + /* Cleanup per PDEV REO rings if configured */ + if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id], + REO_DST, pdev->pdev_id); + } + dp_rxdma_ring_cleanup(soc, pdev); + wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); + + dp_srng_cleanup(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], + RXDMA_BUF, 0); + dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + lmac_id = + dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); + dp_mon_ring_cleanup(soc, pdev, lmac_id); + dp_srng_cleanup(soc, &soc->rxdma_err_dst_ring[lmac_id], + RXDMA_DST, 0); + + if (dp_is_soc_reinit(soc)) { + mac_for_pdev = + dp_get_lmac_id_for_pdev_id(soc, mac_id, + pdev->pdev_id); + rx_desc_pool = &soc->rx_desc_status[mac_for_pdev]; + dp_rx_desc_pool_free(soc, rx_desc_pool); + rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev]; + dp_rx_desc_pool_free(soc, rx_desc_pool); + } + } + + if (dp_is_soc_reinit(soc)) { + rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; + dp_rx_desc_pool_free(soc, rx_desc_pool); + } + + /* only do soc common cleanup when last pdev do detach */ + if (!(soc->pdev_count)) + dp_soc_cmn_cleanup(soc); + + soc->pdev_list[pdev->pdev_id] = NULL; + qdf_minidump_remove(pdev); + dp_context_free_mem(soc, DP_PDEV_TYPE, pdev); +} + +/* + * dp_pdev_detach_wifi3() - detach txrx pdev + * @psoc: Datapath soc handle + * @pdev_id: pdev id of pdev + * @force: Force detach + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, + int force) +{ + struct dp_soc *soc = (struct dp_soc *)psoc; + struct dp_pdev *txrx_pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, + pdev_id); + + if (!txrx_pdev) { + dp_err("Couldn't find dp pdev"); + return QDF_STATUS_E_FAILURE; + } + + if (dp_is_soc_reinit(soc)) { + dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force); + } else { + dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); + dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force); + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist + * @soc: DP SOC handle + */ +static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc) +{ + struct reo_desc_list_node *desc; + struct dp_rx_tid *rx_tid; + + qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); + while (qdf_list_remove_front(&soc->reo_desc_freelist, + (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) { + rx_tid = &desc->rx_tid; + qdf_mem_unmap_nbytes_single(soc->osdev, + rx_tid->hw_qdesc_paddr, + QDF_DMA_BIDIRECTIONAL, + rx_tid->hw_qdesc_alloc_size); + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + qdf_mem_free(desc); + } + qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); + qdf_list_destroy(&soc->reo_desc_freelist); + qdf_spinlock_destroy(&soc->reo_desc_freelist_lock); +} + +/** + * dp_soc_mem_reset() - Reset Dp Soc memory + * @soc: DP handle + * + * Return: None + */ +static void dp_soc_mem_reset(struct dp_soc *soc) +{ + uint16_t len = 0; + uint8_t *dp_soc_offset = (uint8_t *)soc; + + len = sizeof(struct dp_soc) - + offsetof(struct dp_soc, dp_soc_reinit) - + sizeof(soc->dp_soc_reinit); + dp_soc_offset = dp_soc_offset + + offsetof(struct dp_soc, dp_soc_reinit) + + sizeof(soc->dp_soc_reinit); + + qdf_mem_zero(dp_soc_offset, len); +} + +/** + * dp_soc_deinit() - Deinitialize txrx SOC + * @txrx_soc: Opaque DP SOC handle + * + * Return: None + */ +static void dp_soc_deinit(void *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + qdf_atomic_set(&soc->cmn_init_done, 0); + + for (i = 0; i < MAX_PDEV_CNT; i++) { + if (soc->pdev_list[i]) + dp_pdev_deinit((struct cdp_pdev *) + soc->pdev_list[i], 1); + } + + qdf_flush_work(&soc->htt_stats.work); + qdf_disable_work(&soc->htt_stats.work); + + /* Free pending htt stats messages */ + qdf_nbuf_queue_free(&soc->htt_stats.msg); + + dp_peer_find_detach(soc); + + /* Free the ring memories */ + /* Common rings */ + dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); + + /* Tx data rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + for (i = 0; i < soc->num_tcl_data_rings; i++) { + dp_srng_deinit(soc, &soc->tcl_data_ring[i], + TCL_DATA, i); + dp_srng_deinit(soc, &soc->tx_comp_ring[i], + WBM2SW_RELEASE, i); + } + } + + /* TCL command and status rings */ + dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0); + dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0); + + /* Rx data rings */ + if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + soc->num_reo_dest_rings = + wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + for (i = 0; i < soc->num_reo_dest_rings; i++) { + /* TODO: Get number of rings and ring sizes + * from wlan_cfg + */ + dp_srng_deinit(soc, &soc->reo_dest_ring[i], + REO_DST, i); + } + } + /* REO reinjection ring */ + dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); + + /* Rx release ring */ + dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); + + /* Rx exception ring */ + /* TODO: Better to store ring_type and ring_num in + * dp_srng during setup + */ + dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); + + /* REO command and status rings */ + dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0); + dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0); + + dp_soc_wds_detach(soc); + + qdf_spinlock_destroy(&soc->peer_ref_mutex); + qdf_spinlock_destroy(&soc->htt_stats.lock); + + htt_soc_htc_dealloc(soc->htt_handle); + + dp_reo_desc_freelist_destroy(soc); + + qdf_spinlock_destroy(&soc->ast_lock); + + DEINIT_RX_HW_STATS_LOCK(soc); + + dp_soc_mem_reset(soc); +} + +/** + * dp_soc_deinit_wifi3() - Deinitialize txrx SOC + * @txrx_soc: Opaque DP SOC handle + * + * Return: None + */ +static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + soc->dp_soc_reinit = 1; + dp_soc_deinit(txrx_soc); +} + +/* + * dp_soc_detach() - Detach rest of txrx SOC + * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. + * + * Return: None + */ +static void dp_soc_detach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + qdf_atomic_set(&soc->cmn_init_done, 0); + + /* TBD: Call Tx and Rx cleanup functions to free buffers and + * SW descriptors + */ + + for (i = 0; i < MAX_PDEV_CNT; i++) { + if (soc->pdev_list[i]) + dp_pdev_detach((struct cdp_pdev *) + soc->pdev_list[i], 1); + } + + /* Free the ring memories */ + /* Common rings */ + qdf_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned); + dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); + + if (dp_is_soc_reinit(soc)) { + dp_tx_soc_detach(soc); + } + + /* Tx data rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + for (i = 0; i < soc->num_tcl_data_rings; i++) { + dp_srng_cleanup(soc, &soc->tcl_data_ring[i], + TCL_DATA, i); + dp_srng_cleanup(soc, &soc->tx_comp_ring[i], + WBM2SW_RELEASE, i); + } + } + + /* TCL command and status rings */ + dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0); + dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0); + + /* Rx data rings */ + if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + soc->num_reo_dest_rings = + wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + for (i = 0; i < soc->num_reo_dest_rings; i++) { + /* TODO: Get number of rings and ring sizes + * from wlan_cfg + */ + dp_srng_cleanup(soc, &soc->reo_dest_ring[i], + REO_DST, i); + } + } + /* REO reinjection ring */ + dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); + + /* Rx release ring */ + dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); + dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3); + + /* Rx exception ring */ + /* TODO: Better to store ring_type and ring_num in + * dp_srng during setup + */ + dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); + + /* REO command and status rings */ + dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0); + dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0); + dp_hw_link_desc_pool_cleanup(soc); + + htt_soc_detach(soc->htt_handle); + soc->dp_soc_reinit = 0; + + wlan_cfg_soc_detach(soc->wlan_cfg_ctx); + dp_soc_rx_history_detach(soc); + + qdf_minidump_remove(soc); + qdf_mem_free(soc); +} + +/* + * dp_soc_detach_wifi3() - Detach txrx SOC + * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. + * + * Return: None + */ +static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + if (dp_is_soc_reinit(soc)) { + dp_soc_detach(txrx_soc); + } else { + dp_soc_deinit(txrx_soc); + dp_soc_detach(txrx_soc); + } +} + +#if !defined(DISABLE_MON_CONFIG) +/** + * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings + * @soc: soc handle + * @pdev: physical device handle + * @mac_id: ring number + * @mac_for_pdev: mac_id + * + * Return: non-zero for failure, zero for success + */ +static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (soc->wlan_cfg_ctx->rxdma1_enable) { + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_buf_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_BUF); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon buf ring"); + return status; + } + + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_dst_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_DST); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon dst ring"); + return status; + } + + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); + return status; + } + + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_desc_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_DESC); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng message for Rxdma mon desc ring"); + return status; + } + } else { + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); + return status; + } + } + + return status; + +} +#else +static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * dp_rxdma_ring_config() - configure the RX DMA rings + * + * This function is used to configure the MAC rings. + * On MCL host provides buffers in Host2FW ring + * FW refills (copies) buffers to the ring and updates + * ring_idx in register + * + * @soc: data path SoC handle + * + * Return: zero on success, non-zero on failure + */ +#ifdef QCA_HOST2FW_RXBUF_RING +static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) +{ + int i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + for (i = 0; i < MAX_PDEV_CNT; i++) { + struct dp_pdev *pdev = soc->pdev_list[i]; + + if (pdev) { + int mac_id; + bool dbs_enable = 0; + int max_mac_rings = + wlan_cfg_get_num_mac_rings + (pdev->wlan_cfg_ctx); + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); + + htt_srng_setup(soc->htt_handle, 0, + soc->rx_refill_buf_ring[lmac_id] + .hal_srng, + RXDMA_BUF); + + if (pdev->rx_refill_buf_ring2.hal_srng) + htt_srng_setup(soc->htt_handle, 0, + pdev->rx_refill_buf_ring2.hal_srng, + RXDMA_BUF); + + if (soc->cdp_soc.ol_ops-> + is_hw_dbs_2x2_capable) { + dbs_enable = soc->cdp_soc.ol_ops-> + is_hw_dbs_2x2_capable( + (void *)soc->ctrl_psoc); + } + + if (dbs_enable) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("DBS enabled max_mac_rings %d"), + max_mac_rings); + } else { + max_mac_rings = 1; + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("DBS disabled, max_mac_rings %d"), + max_mac_rings); + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("pdev_id %d max_mac_rings %d"), + pdev->pdev_id, max_mac_rings); + + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + /* + * Obtain lmac id from pdev to access the LMAC + * ring in soc context + */ + lmac_id = + dp_get_lmac_id_for_pdev_id(soc, + mac_id, + pdev->pdev_id); + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("mac_id %d"), mac_for_pdev); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rx_mac_buf_ring[mac_id] + .hal_srng, + RXDMA_BUF); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_err_dst_ring[lmac_id] + .hal_srng, + RXDMA_DST); + + /* Configure monitor mode rings */ + status = dp_mon_htt_srng_setup(soc, pdev, + lmac_id, + mac_for_pdev); + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt monitor messages to target"); + return status; + } + + } + } + } + + /* + * Timer to reap rxdma status rings. + * Needed until we enable ppdu end interrupts + */ + qdf_timer_init(soc->osdev, &soc->mon_reap_timer, + dp_mon_reap_timer_handler, (void *)soc, + QDF_TIMER_TYPE_WAKE_APPS); + soc->reap_timer_init = 1; + return status; +} +#else +/* This is only for WIN */ +static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) +{ + int i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int mac_for_pdev; + int lmac_id; + + for (i = 0; i < MAX_PDEV_CNT; i++) { + struct dp_pdev *pdev = soc->pdev_list[i]; + + if (!pdev) + continue; + + mac_for_pdev = i; + lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rx_refill_buf_ring[lmac_id]. + hal_srng, RXDMA_BUF); +#ifndef DISABLE_MON_CONFIG + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_buf_ring[lmac_id].hal_srng, + RXDMA_MONITOR_BUF); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_dst_ring[lmac_id].hal_srng, + RXDMA_MONITOR_DST); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_status_ring[lmac_id].hal_srng, + RXDMA_MONITOR_STATUS); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_desc_ring[lmac_id].hal_srng, + RXDMA_MONITOR_DESC); +#endif + htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_err_dst_ring[lmac_id].hal_srng, + RXDMA_DST); + } + + /* Configure LMAC rings in Polled mode */ + if (soc->lmac_polled_mode) { + /* + * Timer to reap lmac rings. + */ + qdf_timer_init(soc->osdev, &soc->lmac_reap_timer, + dp_service_lmac_rings, (void *)soc, + QDF_TIMER_TYPE_WAKE_APPS); + soc->lmac_timer_init = 1; + qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS); + } + return status; +} +#endif + +#ifdef NO_RX_PKT_HDR_TLV +static QDF_STATUS +dp_rxdma_ring_sel_cfg(struct dp_soc *soc) +{ + int i; + int mac_id; + struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 1; + htt_tlv_filter.mpdu_end = 1; + htt_tlv_filter.msdu_end = 1; + htt_tlv_filter.attention = 1; + htt_tlv_filter.packet = 1; + htt_tlv_filter.packet_header = 0; + + htt_tlv_filter.ppdu_start = 0; + htt_tlv_filter.ppdu_end = 0; + htt_tlv_filter.ppdu_end_user_stats = 0; + htt_tlv_filter.ppdu_end_user_stats_ext = 0; + htt_tlv_filter.ppdu_end_status_done = 0; + htt_tlv_filter.enable_fp = 1; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = 0; + + htt_tlv_filter.fp_mgmt_filter = 0; + htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ; + htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST | + FILTER_DATA_MCAST | + FILTER_DATA_DATA); + htt_tlv_filter.mo_mgmt_filter = 0; + htt_tlv_filter.mo_ctrl_filter = 0; + htt_tlv_filter.mo_data_filter = 0; + htt_tlv_filter.md_data_filter = 0; + + htt_tlv_filter.offset_valid = true; + + htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN; + /*Not subscribing rx_pkt_header*/ + htt_tlv_filter.rx_header_offset = 0; + htt_tlv_filter.rx_mpdu_start_offset = + HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc); + htt_tlv_filter.rx_mpdu_end_offset = + HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc); + htt_tlv_filter.rx_msdu_start_offset = + HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc); + htt_tlv_filter.rx_msdu_end_offset = + HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc); + htt_tlv_filter.rx_attn_offset = + HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc); + + for (i = 0; i < MAX_PDEV_CNT; i++) { + struct dp_pdev *pdev = soc->pdev_list[i]; + + if (!pdev) + continue; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); + /* + * Obtain lmac id from pdev to access the LMAC ring + * in soc context + */ + int lmac_id = + dp_get_lmac_id_for_pdev_id(soc, mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + soc->rx_refill_buf_ring[lmac_id]. + hal_srng, + RXDMA_BUF, RX_DATA_BUFFER_SIZE, + &htt_tlv_filter); + } + } + return status; +} +#else +static QDF_STATUS +dp_rxdma_ring_sel_cfg(struct dp_soc *soc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine + * + * This function is used to configure the FSE HW block in RX OLE on a + * per pdev basis. Here, we will be programming parameters related to + * the Flow Search Table. + * + * @soc: data path SoC handle + * + * Return: zero on success, non-zero on failure + */ +#ifdef WLAN_SUPPORT_RX_FLOW_TAG +static QDF_STATUS +dp_rx_target_fst_config(struct dp_soc *soc) +{ + int i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + for (i = 0; i < MAX_PDEV_CNT; i++) { + struct dp_pdev *pdev = soc->pdev_list[i]; + + /* Flow search is not enabled if NSS offload is enabled */ + if (pdev && + !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { + status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev); + if (status != QDF_STATUS_SUCCESS) + break; + } + } + return status; +} +#elif defined(WLAN_SUPPORT_RX_FISA) +/** + * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW + * @soc: SoC handle + * + * Return: Success + */ +static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) +{ + /* Check if it is enabled in the INI */ + if (!soc->fisa_enable) { + dp_err("RX FISA feature is disabled"); + return QDF_STATUS_E_NOSUPPORT; + } + + return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]); +} + +#define FISA_MAX_TIMEOUT 0xffffffff +#define FISA_DISABLE_TIMEOUT 0 +static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc) +{ + struct dp_htt_rx_fisa_cfg fisa_config; + + fisa_config.pdev_id = 0; + fisa_config.fisa_timeout = FISA_MAX_TIMEOUT; + + return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config); +} +#else /* !WLAN_SUPPORT_RX_FISA */ +static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* !WLAN_SUPPORT_RX_FISA */ + +#ifndef WLAN_SUPPORT_RX_FISA +static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc) +{ + return QDF_STATUS_SUCCESS; +} + +static void dp_rx_dump_fisa_table(struct dp_soc *soc) +{ +} +#endif /* !WLAN_SUPPORT_RX_FISA */ + +/* + * dp_soc_attach_target_wifi3() - SOC initialization in the target + * @cdp_soc: Opaque Datapath SOC handle + * + * Return: zero on success, non-zero on failure + */ +static QDF_STATUS +dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + htt_soc_attach_target(soc->htt_handle); + + status = dp_rxdma_ring_config(soc); + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup messages to target"); + return status; + } + + status = dp_rxdma_ring_sel_cfg(soc); + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt ring config message to target"); + return status; + } + + status = dp_rx_target_fst_config(soc); + if (status != QDF_STATUS_SUCCESS && + status != QDF_STATUS_E_NOSUPPORT) { + dp_err("Failed to send htt fst setup config message to target"); + return status; + } + + if (status == QDF_STATUS_SUCCESS) { + status = dp_rx_fisa_config(soc); + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt FISA config message to target"); + return status; + } + } + + DP_STATS_INIT(soc); + + dp_runtime_init(soc); + + /* initialize work queue for stats processing */ + qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); + + qdf_minidump_log(soc, sizeof(*soc), "dp_soc"); + + return QDF_STATUS_SUCCESS; +} + +/* +* dp_vdev_attach_wifi3() - attach txrx vdev +* @txrx_pdev: Datapath PDEV handle +* @vdev_mac_addr: MAC address of the virtual interface +* @vdev_id: VDEV Id +* @wlan_op_mode: VDEV operating mode +* @subtype: VDEV operating subtype +* +* Return: status +*/ +static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t pdev_id, + uint8_t *vdev_mac_addr, + uint8_t vdev_id, + enum wlan_op_mode op_mode, + enum wlan_op_subtype subtype) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev)); + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP PDEV is Null for pdev id %d"), pdev_id); + qdf_mem_free(vdev); + goto fail0; + } + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP VDEV memory allocation failed")); + goto fail0; + } + + vdev->pdev = pdev; + vdev->vdev_id = vdev_id; + vdev->opmode = op_mode; + vdev->subtype = subtype; + vdev->osdev = soc->osdev; + + vdev->osif_rx = NULL; + vdev->osif_rsim_rx_decap = NULL; + vdev->osif_get_key = NULL; + vdev->osif_rx_mon = NULL; + vdev->osif_tx_free_ext = NULL; + vdev->osif_vdev = NULL; + + vdev->delete.pending = 0; + vdev->safemode = 0; + vdev->drop_unenc = 1; + vdev->sec_type = cdp_sec_type_none; + vdev->multipass_en = false; +#ifdef notyet + vdev->filters_num = 0; +#endif + + qdf_mem_copy( + &vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE); + + /* TODO: Initialize default HTT meta data that will be used in + * TCL descriptors for packets transmitted from this VDEV + */ + + TAILQ_INIT(&vdev->peer_list); + dp_peer_multipass_list_init(vdev); + + if ((soc->intr_mode == DP_INTR_POLL) && + wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) { + if ((pdev->vdev_count == 0) || + (wlan_op_mode_monitor == vdev->opmode)) + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + } + + soc->vdev_id_map[vdev_id] = vdev; + + if (wlan_op_mode_monitor == vdev->opmode) { + pdev->monitor_vdev = vdev; + return QDF_STATUS_SUCCESS; + } + + vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); + vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); + vdev->dscp_tid_map_id = 0; + vdev->mcast_enhancement_en = 0; + vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx); + vdev->prev_tx_enq_tstamp = 0; + vdev->prev_rx_deliver_tstamp = 0; + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + /* add this vdev into the pdev's list */ + TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem); + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + pdev->vdev_count++; + + if (wlan_op_mode_sta != vdev->opmode && + wlan_op_mode_ndi != vdev->opmode) + vdev->ap_bridge_enabled = true; + else + vdev->ap_bridge_enabled = false; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: wlan_cfg_ap_bridge_enabled %d", + __func__, vdev->ap_bridge_enabled); + + dp_tx_vdev_attach(vdev); + + if (pdev->vdev_count == 1) + dp_lro_hash_setup(soc, pdev); + + dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev, + QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); + DP_STATS_INIT(vdev); + + if (wlan_op_mode_sta == vdev->opmode) + dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id, + vdev->mac_addr.raw); + + return QDF_STATUS_SUCCESS; + +fail0: + return QDF_STATUS_E_FAILURE; +} + +/** + * dp_vdev_register_wifi3() - Register VDEV operations from osif layer + * @soc: Datapath soc handle + * @vdev_id: id of Datapath VDEV handle + * @osif_vdev: OSIF vdev handle + * @txrx_ops: Tx and Rx operations + * + * Return: DP VDEV handle on success, NULL on failure + */ +static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc, + uint8_t vdev_id, + ol_osif_vdev_handle osif_vdev, + struct ol_txrx_ops *txrx_ops) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + vdev->osif_vdev = osif_vdev; + vdev->osif_rx = txrx_ops->rx.rx; + vdev->osif_rx_stack = txrx_ops->rx.rx_stack; + vdev->osif_rx_flush = txrx_ops->rx.rx_flush; + vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush; + vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap; + vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx; + vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush; + vdev->osif_get_key = txrx_ops->get_key; + vdev->osif_rx_mon = txrx_ops->rx.mon; + vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext; + vdev->tx_comp = txrx_ops->tx.tx_comp; +#ifdef notyet +#if ATH_SUPPORT_WAPI + vdev->osif_check_wai = txrx_ops->rx.wai_check; +#endif +#endif +#ifdef UMAC_SUPPORT_PROXY_ARP + vdev->osif_proxy_arp = txrx_ops->proxy_arp; +#endif + vdev->me_convert = txrx_ops->me_convert; + + /* TODO: Enable the following once Tx code is integrated */ + if (vdev->mesh_vdev) + txrx_ops->tx.tx = dp_tx_send_mesh; + else + txrx_ops->tx.tx = dp_tx_send; + + txrx_ops->tx.tx_exception = dp_tx_send_exception; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "DP Vdev Register success"); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer + * @soc: Datapath soc handle + * @peer: Datapath peer handle + * @peer_id: Peer ID + * @vdev_id: Vdev ID + * + * Return: void + */ +static void dp_peer_flush_ast_entry(struct dp_soc *soc, + struct dp_peer *peer, + uint16_t peer_id, + uint8_t vdev_id) +{ + struct dp_ast_entry *ase, *tmp_ase; + + if (soc->is_peer_map_unmap_v2) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) { + dp_rx_peer_unmap_handler + (soc, peer_id, + vdev_id, + ase->mac_addr.raw, + 1); + } + } +} + +/** + * dp_vdev_flush_peers() - Forcibily Flush peers of vdev + * @vdev: Datapath VDEV handle + * @unmap_only: Flag to indicate "only unmap" + * + * Return: void + */ +static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_peer *peer; + uint16_t *peer_ids; + struct dp_peer **peer_array = NULL; + uint8_t i = 0, j = 0; + uint8_t m = 0, n = 0; + + peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0])); + if (!peer_ids) { + dp_err("DP alloc failure - unable to flush peers"); + return; + } + + if (!unmap_only) { + peer_array = qdf_mem_malloc( + soc->max_peers * sizeof(struct dp_peer *)); + if (!peer_array) { + qdf_mem_free(peer_ids); + dp_err("DP alloc failure - unable to flush peers"); + return; + } + } + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!unmap_only && n < soc->max_peers) + peer_array[n++] = peer; + + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) + if (peer->peer_ids[i] != HTT_INVALID_PEER) + if (j < soc->max_peers) + peer_ids[j++] = peer->peer_ids[i]; + } + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + /* + * If peer id is invalid, need to flush the peer if + * peer valid flag is true, this is needed for NAN + SSR case. + */ + if (!unmap_only) { + for (m = 0; m < n ; m++) { + peer = peer_array[m]; + + dp_info("peer: "QDF_MAC_ADDR_FMT" is getting deleted", + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + /* only if peer valid is true */ + if (peer->valid) + dp_peer_delete_wifi3((struct cdp_soc_t *)soc, + vdev->vdev_id, + peer->mac_addr.raw, 0); + } + qdf_mem_free(peer_array); + } + + for (i = 0; i < j ; i++) { + peer = __dp_peer_find_by_id(soc, peer_ids[i]); + + if (!peer) + continue; + + dp_info("peer ref cnt %d", qdf_atomic_read(&peer->ref_cnt)); + /* + * set ref count to one to force delete the peers + * with ref count leak + */ + SET_PEER_REF_CNT_ONE(peer); + dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap", + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + /* free AST entries of peer */ + dp_peer_flush_ast_entry(soc, peer, + peer_ids[i], + vdev->vdev_id); + + dp_rx_peer_unmap_handler(soc, peer_ids[i], + vdev->vdev_id, + peer->mac_addr.raw, 0); + } + + qdf_mem_free(peer_ids); + dp_info("Flushed peers for vdev object %pK ", vdev); +} + +/* + * dp_vdev_detach_wifi3() - Detach txrx vdev + * @cdp_soc: Datapath soc handle + * @vdev_id: VDEV Id + * @callback: Callback OL_IF on completion of detach + * @cb_context: Callback context + * + */ +static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, + ol_txrx_vdev_delete_cb callback, + void *cb_context) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + struct dp_pdev *pdev; + struct dp_neighbour_peer *peer = NULL; + struct dp_neighbour_peer *temp_peer = NULL; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + pdev = vdev->pdev; + + if (wlan_op_mode_sta == vdev->opmode) + dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id, + vdev->vap_self_peer->mac_addr.raw, 0); + + /* + * If Target is hung, flush all peers before detaching vdev + * this will free all references held due to missing + * unmap commands from Target + */ + if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle))) + dp_vdev_flush_peers((struct cdp_vdev *)vdev, false); + else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) + dp_vdev_flush_peers((struct cdp_vdev *)vdev, true); + + dp_rx_vdev_detach(vdev); + /* + * move it after dp_rx_vdev_detach(), + * as the call back done in dp_rx_vdev_detach() + * still need to get vdev pointer by vdev_id. + */ + soc->vdev_id_map[vdev->vdev_id] = NULL; + /* + * Use peer_ref_mutex while accessing peer_list, in case + * a peer is in the process of being removed from the list. + */ + qdf_spin_lock_bh(&soc->peer_ref_mutex); + /* check that the vdev has no peers allocated */ + if (!TAILQ_EMPTY(&vdev->peer_list)) { + /* debug print - will be removed later */ + dp_warn("not deleting vdev object %pK ("QDF_MAC_ADDR_FMT") until deletion finishes for all its peers", + vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); + + if (vdev->vdev_dp_ext_handle) { + qdf_mem_free(vdev->vdev_dp_ext_handle); + vdev->vdev_dp_ext_handle = NULL; + } + /* indicate that the vdev needs to be deleted */ + vdev->delete.pending = 1; + vdev->delete.callback = callback; + vdev->delete.context = cb_context; + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + if (wlan_op_mode_monitor == vdev->opmode) + goto free_vdev; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + if (!soc->hw_nac_monitor_support) { + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + QDF_ASSERT(peer->vdev != vdev); + } + } else { + TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem, temp_peer) { + if (peer->vdev == vdev) { + TAILQ_REMOVE(&pdev->neighbour_peers_list, peer, + neighbour_peer_list_elem); + qdf_mem_free(peer); + } + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + /* remove the vdev from its parent pdev's list */ + TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem); + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + + dp_tx_vdev_detach(vdev); +free_vdev: + if (wlan_op_mode_monitor == vdev->opmode) { + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_sync_cancel(&soc->int_timer); + pdev->monitor_vdev = NULL; + } + + if (vdev->vdev_dp_ext_handle) { + qdf_mem_free(vdev->vdev_dp_ext_handle); + vdev->vdev_dp_ext_handle = NULL; + } + + dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")", vdev, + QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); + + qdf_mem_free(vdev); + + if (callback) + callback(cb_context); + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_AST +/* + * dp_peer_delete_ast_entries(): Delete all AST entries for a peer + * @soc - datapath soc handle + * @peer - datapath peer handle + * + * Delete the AST entries belonging to a peer + */ +static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, + struct dp_peer *peer) +{ + struct dp_ast_entry *ast_entry, *temp_ast_entry; + + dp_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry); + DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) + dp_peer_del_ast(soc, ast_entry); + + peer->self_ast_entry = NULL; +} +#else +static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, + struct dp_peer *peer) +{ +} +#endif +#if ATH_SUPPORT_WRAP +static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev, + uint8_t *peer_mac_addr) +{ + struct dp_peer *peer; + + peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr, + 0, vdev->vdev_id); + if (!peer) + return NULL; + + if (peer->bss_peer) + return peer; + + dp_peer_unref_delete(peer); + return NULL; +} +#else +static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev, + uint8_t *peer_mac_addr) +{ + struct dp_peer *peer; + + peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr, + 0, vdev->vdev_id); + if (!peer) + return NULL; + + if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id)) + return peer; + + dp_peer_unref_delete(peer); + return NULL; +} +#endif + +#ifdef FEATURE_AST +static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc, + struct dp_pdev *pdev, + uint8_t *peer_mac_addr) +{ + struct dp_ast_entry *ast_entry; + + qdf_spin_lock_bh(&soc->ast_lock); + if (soc->ast_override_support) + ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr, + pdev->pdev_id); + else + ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr); + + if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress) + dp_peer_del_ast(soc, ast_entry); + + qdf_spin_unlock_bh(&soc->ast_lock); +} +#endif + +#ifdef PEER_CACHE_RX_PKTS +static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer) +{ + qdf_spinlock_create(&peer->bufq_info.bufq_lock); + peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH; + qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH); +} +#else +static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer) +{ +} +#endif + +/* + * dp_peer_create_wifi3() - attach txrx peer + * @soc_hdl: Datapath soc handle + * @vdev_id: id of vdev + * @peer_mac_addr: Peer MAC address + * + * Return: 0 on success, -1 on failure + */ +static QDF_STATUS +dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac_addr) +{ + struct dp_peer *peer; + int i; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_pdev *pdev; + struct cdp_peer_cookie peer_cookie; + enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev || !peer_mac_addr) + return QDF_STATUS_E_FAILURE; + + pdev = vdev->pdev; + soc = pdev->soc; + + /* + * If a peer entry with given MAC address already exists, + * reuse the peer and reset the state of peer. + */ + peer = dp_peer_can_reuse(vdev, peer_mac_addr); + + if (peer) { + qdf_atomic_init(&peer->is_default_route_set); + dp_peer_cleanup(vdev, peer, true); + + qdf_spin_lock_bh(&soc->ast_lock); + dp_peer_delete_ast_entries(soc, peer); + peer->delete_in_progress = false; + qdf_spin_unlock_bh(&soc->ast_lock); + + if ((vdev->opmode == wlan_op_mode_sta) && + !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE)) { + ast_type = CDP_TXRX_AST_TYPE_SELF; + } + dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); + /* + * Control path maintains a node count which is incremented + * for every new peer create command. Since new peer is not being + * created and earlier reference is reused here, + * peer_unref_delete event is sent to control path to + * increment the count back. + */ + if (soc->cdp_soc.ol_ops->peer_unref_delete) { + soc->cdp_soc.ol_ops->peer_unref_delete( + soc->ctrl_psoc, + pdev->pdev_id, + peer->mac_addr.raw, vdev->mac_addr.raw, + vdev->opmode); + } + + peer->valid = 1; + dp_local_peer_id_alloc(pdev, peer); + + qdf_spinlock_create(&peer->peer_info_lock); + dp_peer_rx_bufq_resources_init(peer); + + DP_STATS_INIT(peer); + DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI); + + return QDF_STATUS_SUCCESS; + } else { + /* + * When a STA roams from RPTR AP to ROOT AP and vice versa, we + * need to remove the AST entry which was earlier added as a WDS + * entry. + * If an AST entry exists, but no peer entry exists with a given + * MAC addresses, we could deduce it as a WDS entry + */ + dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr); + } + +#ifdef notyet + peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev, + soc->mempool_ol_ath_peer); +#else + peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer)); +#endif + + if (!peer) + return QDF_STATUS_E_FAILURE; /* failure */ + + qdf_mem_zero(peer, sizeof(struct dp_peer)); + + TAILQ_INIT(&peer->ast_entry_list); + + /* store provided params */ + peer->vdev = vdev; + + if ((vdev->opmode == wlan_op_mode_sta) && + !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE)) { + ast_type = CDP_TXRX_AST_TYPE_SELF; + } + dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0); + qdf_spinlock_create(&peer->peer_info_lock); + + dp_peer_rx_bufq_resources_init(peer); + + qdf_mem_copy( + &peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE); + + /* initialize the peer_id */ + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) + peer->peer_ids[i] = HTT_INVALID_PEER; + + /* reset the ast index to flowid table */ + dp_peer_reset_flowq_map(peer); + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + + qdf_atomic_init(&peer->ref_cnt); + + /* keep one reference for attach */ + qdf_atomic_inc(&peer->ref_cnt); + + /* add this peer into the vdev's list */ + if (wlan_op_mode_sta == vdev->opmode) + TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem); + else + TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem); + + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + /* TODO: See if hash based search is required */ + dp_peer_find_hash_add(soc, peer); + + /* Initialize the peer state */ + peer->state = OL_TXRX_PEER_STATE_DISC; + + dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d", + vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), + qdf_atomic_read(&peer->ref_cnt)); + /* + * For every peer MAp message search and set if bss_peer + */ + if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE) == 0 && + (wlan_op_mode_sta != vdev->opmode)) { + dp_info("vdev bss_peer!!"); + peer->bss_peer = 1; + vdev->vap_bss_peer = peer; + } + + if (wlan_op_mode_sta == vdev->opmode && + qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE) == 0) { + vdev->vap_self_peer = peer; + } + + for (i = 0; i < DP_MAX_TIDS; i++) + qdf_spinlock_create(&peer->rx_tid[i].tid_lock); + + peer->valid = 1; + dp_local_peer_id_alloc(pdev, peer); + DP_STATS_INIT(peer); + DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI); + + qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + peer_cookie.ctx = NULL; + peer_cookie.pdev_id = pdev->pdev_id; + peer_cookie.cookie = pdev->next_peer_cookie++; +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc, + (void *)&peer_cookie, + peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id); +#endif + if (soc->wlanstats_enabled) { + if (!peer_cookie.ctx) { + pdev->next_peer_cookie--; + qdf_err("Failed to initialize peer rate stats"); + } else { + peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *) + peer_cookie.ctx; + } + } + return QDF_STATUS_SUCCESS; +} + +/* + * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev + * @vdev: Datapath VDEV handle + * @reo_dest: pointer to default reo_dest ring for vdev to be populated + * @hash_based: pointer to hash value (enabled/disabled) to be populated + * + * Return: None + */ +static +void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev, + enum cdp_host_reo_dest_ring *reo_dest, + bool *hash_based) +{ + struct dp_soc *soc; + struct dp_pdev *pdev; + + pdev = vdev->pdev; + soc = pdev->soc; + /* + * hash based steering is disabled for Radios which are offloaded + * to NSS + */ + if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) + *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx); + + /* + * Below line of code will ensure the proper reo_dest ring is chosen + * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP) + */ + *reo_dest = pdev->reo_dest; +} + +#ifdef IPA_OFFLOAD +/** + * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P + * @vdev: Virtual device + * + * Return: true if the vdev is of subtype P2P + * false if the vdev is of any other subtype + */ +static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev) +{ + if (vdev->subtype == wlan_op_subtype_p2p_device || + vdev->subtype == wlan_op_subtype_p2p_cli || + vdev->subtype == wlan_op_subtype_p2p_go) + return true; + + return false; +} + +/* + * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer + * @vdev: Datapath VDEV handle + * @reo_dest: pointer to default reo_dest ring for vdev to be populated + * @hash_based: pointer to hash value (enabled/disabled) to be populated + * + * If IPA is enabled in ini, for SAP mode, disable hash based + * steering, use default reo_dst ring for RX. Use config values for other modes. + * Return: None + */ +static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev, + enum cdp_host_reo_dest_ring *reo_dest, + bool *hash_based) +{ + struct dp_soc *soc; + struct dp_pdev *pdev; + + pdev = vdev->pdev; + soc = pdev->soc; + + dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based); + + /* For P2P-GO interfaces we do not need to change the REO + * configuration even if IPA config is enabled + */ + if (dp_is_vdev_subtype_p2p(vdev)) + return; + + /* + * If IPA is enabled, disable hash-based flow steering and set + * reo_dest_ring_4 as the REO ring to receive packets on. + * IPA is configured to reap reo_dest_ring_4. + * + * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring + * value enum value is from 1 - 4. + * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1 + */ + if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { + if (vdev->opmode == wlan_op_mode_ap) { + *reo_dest = IPA_REO_DEST_RING_IDX + 1; + *hash_based = 0; + } else if (vdev->opmode == wlan_op_mode_sta && + dp_ipa_is_mdm_platform()) { + *reo_dest = IPA_REO_DEST_RING_IDX + 1; + } + } +} + +#else + +/* + * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer + * @vdev: Datapath VDEV handle + * @reo_dest: pointer to default reo_dest ring for vdev to be populated + * @hash_based: pointer to hash value (enabled/disabled) to be populated + * + * Use system config values for hash based steering. + * Return: None + */ + +static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev, + enum cdp_host_reo_dest_ring *reo_dest, + bool *hash_based) +{ + dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based); +} +#endif /* IPA_OFFLOAD */ + +/* + * dp_peer_setup_wifi3() - initialize the peer + * @soc_hdl: soc handle object + * @vdev_id : vdev_id of vdev object + * @peer_mac: Peer's mac address + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_pdev *pdev; + bool hash_based = 0; + enum cdp_host_reo_dest_ring reo_dest; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + struct dp_peer *peer = + dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id); + + if (!vdev || !peer || peer->delete_in_progress) { + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + pdev = vdev->pdev; + dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based); + + dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u", + pdev->pdev_id, vdev->vdev_id, + vdev->opmode, hash_based, reo_dest); + + + /* + * There are corner cases where the AD1 = AD2 = "VAPs address" + * i.e both the devices have same MAC address. In these + * cases we want such pkts to be processed in NULL Q handler + * which is REO2TCL ring. for this reason we should + * not setup reo_queues and default route for bss_peer. + */ + if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) { + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + if (soc->cdp_soc.ol_ops->peer_set_default_routing) { + /* TODO: Check the destination ring number to be passed to FW */ + soc->cdp_soc.ol_ops->peer_set_default_routing( + soc->ctrl_psoc, + peer->vdev->pdev->pdev_id, + peer->mac_addr.raw, + peer->vdev->vdev_id, hash_based, reo_dest); + } + + qdf_atomic_set(&peer->is_default_route_set, 1); + + dp_peer_rx_init(pdev, peer); + dp_peer_tx_init(pdev, peer); + + dp_peer_ppdu_delayed_ba_init(peer); + +fail: + if (peer) + dp_peer_unref_delete(peer); + return status; +} + +/* + * dp_cp_peer_del_resp_handler - Handle the peer delete response + * @soc_hdl: Datapath SOC handle + * @vdev_id: id of virtual device object + * @mac_addr: Mac address of the peer + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *mac_addr) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_ast_entry *ast_entry = NULL; + txrx_ast_free_cb cb = NULL; + void *cookie; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + qdf_spin_lock_bh(&soc->ast_lock); + + if (soc->ast_override_support) + ast_entry = + dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, + vdev->pdev->pdev_id); + else + ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); + + /* in case of qwrap we have multiple BSS peers + * with same mac address + * + * AST entry for this mac address will be created + * only for one peer hence it will be NULL here + */ + if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) { + qdf_spin_unlock_bh(&soc->ast_lock); + return QDF_STATUS_E_FAILURE; + } + + if (ast_entry->is_mapped) + soc->ast_table[ast_entry->ast_idx] = NULL; + + DP_STATS_INC(soc, ast.deleted, 1); + dp_peer_ast_hash_remove(soc, ast_entry); + + cb = ast_entry->callback; + cookie = ast_entry->cookie; + ast_entry->callback = NULL; + ast_entry->cookie = NULL; + + soc->num_ast_entries--; + qdf_spin_unlock_bh(&soc->ast_lock); + + if (cb) { + cb(soc->ctrl_psoc, + dp_soc_to_cdp_soc(soc), + cookie, + CDP_TXRX_AST_DELETED); + } + qdf_mem_free(ast_entry); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_set_ba_aging_timeout() - set ba aging timeout per AC + * @txrx_soc: cdp soc handle + * @ac: Access category + * @value: timeout value in millisec + * + * Return: void + */ +static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc, + uint8_t ac, uint32_t value) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + hal_set_ba_aging_timeout(soc->hal_soc, ac, value); +} + +/* + * dp_get_ba_aging_timeout() - get ba aging timeout per AC + * @txrx_soc: cdp soc handle + * @ac: access category + * @value: timeout value in millisec + * + * Return: void + */ +static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc, + uint8_t ac, uint32_t *value) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + hal_get_ba_aging_timeout(soc->hal_soc, ac, value); +} + +/* + * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev + * @txrx_soc: cdp soc handle + * @pdev_id: id of physical device object + * @val: reo destination ring index (1 - 4) + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id, + enum cdp_host_reo_dest_ring val) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc, + pdev_id); + + if (pdev) { + pdev->reo_dest = val; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * dp_get_pdev_reo_dest() - get the reo destination for this pdev + * @txrx_soc: cdp soc handle + * @pdev_id: id of physical device object + * + * Return: reo destination ring index + */ +static enum cdp_host_reo_dest_ring +dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc, + pdev_id); + + if (pdev) + return pdev->reo_dest; + else + return cdp_host_reo_dest_ring_unknown; +} + +#ifdef ATH_SUPPORT_NAC +/* + * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh + * @pdev_handle: device object + * @val: value to be set + * + * Return: void + */ +static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, + bool val) +{ + /* Enable/Disable smart mesh filtering. This flag will be checked + * during rx processing to check if packets are from NAC clients. + */ + pdev->filter_neighbour_peers = val; + return 0; +} +#else +static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, + bool val) +{ + return 0; +} +#endif /* ATH_SUPPORT_NAC */ + +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) +/* + * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) + * address for smart mesh filtering + * @txrx_soc: cdp soc handle + * @vdev_id: id of virtual device object + * @cmd: Add/Del command + * @macaddr: nac client mac address + * + * Return: success/failure + */ +static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint32_t cmd, uint8_t *macaddr) +{ + struct dp_pdev *pdev; + struct dp_neighbour_peer *peer = NULL; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev || !macaddr) + goto fail0; + + pdev = vdev->pdev; + + if (!pdev) + goto fail0; + + /* Store address of NAC (neighbour peer) which will be checked + * against TA of received packets. + */ + if (cmd == DP_NAC_PARAM_ADD) { + peer = (struct dp_neighbour_peer *) qdf_mem_malloc( + sizeof(*peer)); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP neighbour peer node memory allocation failed")); + goto fail0; + } + + qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], + macaddr, QDF_MAC_ADDR_SIZE); + peer->vdev = vdev; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + + /* add this neighbour peer into the list */ + TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer, + neighbour_peer_list_elem); + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + /* first neighbour */ + if (!pdev->neighbour_peers_added) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + pdev->neighbour_peers_added = true; + dp_mon_filter_setup_smart_monitor(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("smart mon filter setup failed")); + dp_mon_filter_reset_smart_monitor(pdev); + pdev->neighbour_peers_added = false; + } + } + return 1; + + } else if (cmd == DP_NAC_PARAM_DEL) { + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + macaddr, QDF_MAC_ADDR_SIZE)) { + /* delete this peer from the list */ + TAILQ_REMOVE(&pdev->neighbour_peers_list, + peer, neighbour_peer_list_elem); + qdf_mem_free(peer); + break; + } + } + /* last neighbour deleted */ + if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + pdev->neighbour_peers_added = false; + dp_mon_filter_reset_smart_monitor(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("smart mon filter clear failed")); + } + + } + + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + return 1; + + } + +fail0: + return 0; +} +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + +/* + * dp_get_sec_type() - Get the security type + * @soc: soc handle + * @vdev_id: id of dp handle + * @peer_mac: mac of datapath PEER handle + * @sec_idx: Security id (mcast, ucast) + * + * return sec_type: Security type + */ +static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, uint8_t sec_idx) +{ + int sec_type = 0; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + goto fail; + } + + sec_type = peer->security[sec_idx].sec_type; +fail: + if (peer) + dp_peer_unref_delete(peer); + return sec_type; +} + +/* + * dp_peer_authorize() - authorize txrx peer + * @soc: soc handle + * @vdev_id: id of dp handle + * @peer_mac: mac of datapath PEER handle + * @authorize + * + */ +static QDF_STATUS +dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac, uint32_t authorize) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_peer *peer = dp_peer_find_hash_find(soc, + peer_mac, + 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + } else { + qdf_spin_lock_bh(&soc->peer_ref_mutex); + peer->authorize = authorize ? 1 : 0; + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + } + + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* + * dp_vdev_reset_peer() - Update peer related member in vdev + as peer is going to free + * @vdev: datapath vdev handle + * @peer: dataptah peer handle + * + * Return: None + */ +static void dp_vdev_reset_peer(struct dp_vdev *vdev, + struct dp_peer *peer) +{ + struct dp_peer *bss_peer = NULL; + + if (!vdev) { + dp_err("vdev is NULL"); + } else { + if (vdev->vap_bss_peer == peer) { + vdev->vap_bss_peer = NULL; + qdf_mem_zero(vdev->vap_bss_peer_mac_addr, + QDF_MAC_ADDR_SIZE); + } + + if (vdev && vdev->vap_bss_peer) { + bss_peer = vdev->vap_bss_peer; + DP_UPDATE_STATS(vdev, peer); + } + } +} + +/* + * dp_peer_release_mem() - free dp peer handle memory + * @soc: dataptah soc handle + * @pdev: datapath pdev handle + * @peer: datapath peer handle + * @vdev_opmode: Vdev operation mode + * @vdev_mac_addr: Vdev Mac address + * + * Return: None + */ +static void dp_peer_release_mem(struct dp_soc *soc, + struct dp_pdev *pdev, + struct dp_peer *peer, + enum wlan_op_mode vdev_opmode, + uint8_t *vdev_mac_addr) +{ + if (soc->cdp_soc.ol_ops->peer_unref_delete) + soc->cdp_soc.ol_ops->peer_unref_delete( + soc->ctrl_psoc, + pdev->pdev_id, + peer->mac_addr.raw, vdev_mac_addr, + vdev_opmode); + + /* + * Peer AST list hast to be empty here + */ + DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list)); + + qdf_mem_free(peer); +} + +/** + * dp_delete_pending_vdev() - check and process vdev delete + * @pdev: DP specific pdev pointer + * @vdev: DP specific vdev pointer + * @vdev_id: vdev id corresponding to vdev + * + * This API does following: + * 1) It releases tx flow pools buffers as vdev is + * going down and no peers are associated. + * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory + */ +static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev, + uint8_t vdev_id) +{ + ol_txrx_vdev_delete_cb vdev_delete_cb = NULL; + void *vdev_delete_context = NULL; + + vdev_delete_cb = vdev->delete.callback; + vdev_delete_context = vdev->delete.context; + + dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done", + vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); + /* all peers are gone, go ahead and delete it */ + dp_tx_flow_pool_unmap_handler(pdev, vdev_id, + FLOW_TYPE_VDEV, vdev_id); + dp_tx_vdev_detach(vdev); + + pdev->soc->vdev_id_map[vdev_id] = NULL; + + if (wlan_op_mode_monitor == vdev->opmode) { + pdev->monitor_vdev = NULL; + } else { + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem); + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")", + vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw)); + qdf_mem_free(vdev); + vdev = NULL; + + if (vdev_delete_cb) + vdev_delete_cb(vdev_delete_context); +} + +/* + * dp_peer_unref_delete() - unref and delete peer + * @peer_handle: Datapath peer handle + * + */ +void dp_peer_unref_delete(struct dp_peer *peer) +{ + struct dp_vdev *vdev = peer->vdev; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_peer *tmppeer; + int found = 0; + uint16_t peer_id; + uint16_t vdev_id; + bool vdev_delete = false; + struct cdp_peer_cookie peer_cookie; + enum wlan_op_mode vdev_opmode; + uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE]; + + /* + * Hold the lock all the way from checking if the peer ref count + * is zero until the peer references are removed from the hash + * table and vdev list (if the peer ref count is zero). + * This protects against a new HL tx operation starting to use the + * peer object just after this function concludes it's done being used. + * Furthermore, the lock needs to be held while checking whether the + * vdev's list of peers is empty, to make sure that list is not modified + * concurrently with the empty check. + */ + qdf_spin_lock_bh(&soc->peer_ref_mutex); + if (qdf_atomic_dec_and_test(&peer->ref_cnt)) { + peer_id = peer->peer_ids[0]; + vdev_id = vdev->vdev_id; + + /* + * Make sure that the reference to the peer in + * peer object map is removed + */ + if (peer_id != HTT_INVALID_PEER) + soc->peer_id_to_obj_map[peer_id] = NULL; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer, + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + /* remove the reference to the peer from the hash table */ + dp_peer_find_hash_remove(soc, peer); + + qdf_spin_lock_bh(&soc->ast_lock); + if (peer->self_ast_entry) { + dp_peer_del_ast(soc, peer->self_ast_entry); + } + qdf_spin_unlock_bh(&soc->ast_lock); + + TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) { + if (tmppeer == peer) { + found = 1; + break; + } + } + + if (found) { + TAILQ_REMOVE(&peer->vdev->peer_list, peer, + peer_list_elem); + } else { + /*Ignoring the remove operation as peer not found*/ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "peer:%pK not found in vdev:%pK peerlist:%pK", + peer, vdev, &peer->vdev->peer_list); + } + + /* send peer destroy event to upper layer */ + qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + peer_cookie.ctx = NULL; + peer_cookie.ctx = (struct cdp_stats_cookie *) + peer->wlanstats_ctx; +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY, + pdev->soc, + (void *)&peer_cookie, + peer->peer_ids[0], + WDI_NO_VAL, + pdev->pdev_id); +#endif + peer->wlanstats_ctx = NULL; + + /* cleanup the peer data */ + dp_peer_cleanup(vdev, peer, false); + /* reset this peer related info in vdev */ + dp_vdev_reset_peer(vdev, peer); + /* save vdev related member in case vdev freed */ + vdev_opmode = vdev->opmode; + qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + /* + * check whether the parent vdev is pending for deleting + * and no peers left. + */ + if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list)) + vdev_delete = true; + /* + * Now that there are no references to the peer, we can + * release the peer reference lock. + */ + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + /* + * Invoke soc.ol_ops->peer_unref_delete out of + * peer_ref_mutex in case deadlock issue. + */ + dp_peer_release_mem(soc, pdev, peer, + vdev_opmode, + vdev_mac_addr); + /* + * Delete the vdev if it's waiting all peer deleted + * and it's chance now. + */ + if (vdev_delete) + dp_delete_pending_vdev(pdev, vdev, vdev_id); + + } else { + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + } +} + +#ifdef PEER_CACHE_RX_PKTS +static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer) +{ + qdf_list_destroy(&peer->bufq_info.cached_bufq); + qdf_spinlock_destroy(&peer->bufq_info.bufq_lock); +} +#else +static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer) +{ +} +#endif + +/* + * dp_peer_detach_wifi3() – Detach txrx peer + * @soc_hdl: soc handle + * @vdev_id: id of dp handle + * @peer_mac: mac of datapath PEER handle + * @bitmap: bitmap indicating special handling of request. + * + */ +static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint8_t *peer_mac, uint32_t bitmap) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, + 0, vdev_id); + + /* Peer can be null for monitor vap mac address */ + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid peer\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (!peer->valid) { + dp_peer_unref_delete(peer); + dp_err("Invalid peer: "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer_mac)); + return QDF_STATUS_E_ALREADY; + } + + peer->valid = 0; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("peer %pK ("QDF_MAC_ADDR_FMT")"), peer, + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + dp_local_peer_id_free(peer->vdev->pdev, peer); + + /* Drop all rx packets before deleting peer */ + dp_clear_peer_internal(soc, peer); + + dp_peer_rx_bufq_resources_deinit(peer); + + qdf_spinlock_destroy(&peer->peer_info_lock); + dp_peer_multipass_list_remove(peer); + + /* + * Remove the reference added during peer_attach. + * The peer will still be left allocated until the + * PEER_UNMAP message arrives to remove the other + * reference, added by the PEER_MAP message. + */ + dp_peer_unref_delete(peer); + /* + * Remove the reference taken above + */ + dp_peer_unref_delete(peer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer + * @soc_hdl: Datapath soc handle + * @vdev_id: virtual interface id + * + * Return: MAC address on success, NULL on failure. + * + */ +static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return NULL; + + return vdev->mac_addr.raw; +} + +/* + * dp_vdev_set_wds() - Enable per packet stats + * @soc: DP soc handle + * @vdev_id: id of DP VDEV handle + * @val: value + * + * Return: none + */ +static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + vdev->wds_enabled = val; + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode + * @soc_hdl: datapath soc handle + * @pdev_id: physical device instance id + * + * Return: virtual interface id + */ +static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (qdf_unlikely(!pdev)) + return -EINVAL; + + return pdev->monitor_vdev->vdev_id; +} + +static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) { + dp_err("vdev for id %d is NULL", vdev_id); + return -EINVAL; + } + + return vdev->opmode; +} + +/** + * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev + * @soc_hdl: ol_txrx_soc_handle handle + * @vdev_id: vdev id for which os rx handles are needed + * @stack_fn_p: pointer to stack function pointer + * @osif_handle_p: pointer to ol_osif_vdev_handle + * + * Return: void + */ +static +void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + ol_txrx_rx_fp *stack_fn_p, + ol_osif_vdev_handle *osif_vdev_p) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return; + + *stack_fn_p = vdev->osif_rx_stack; + *osif_vdev_p = vdev->osif_vdev; +} + +/** + * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev + * @soc_hdl: datapath soc handle + * @vdev_id: virtual device/interface id + * + * Return: Handle to control pdev + */ +static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3( + struct cdp_soc_t *soc_hdl, + uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + struct dp_pdev *pdev; + + if (!vdev || !vdev->pdev) + return NULL; + + pdev = vdev->pdev; + return (struct cdp_cfg *)pdev->wlan_cfg_ctx; +} + +/** + * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer + * ring based on target + * @soc: soc handle + * @mac_for_pdev: WIN- pdev_id, MCL- mac id + * @pdev: physical device handle + * @ring_num: mac id + * @htt_tlv_filter: tlv filter + * + * Return: zero on success, non-zero on failure + */ +static inline +QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, + struct dp_pdev *pdev, uint8_t ring_num, + struct htt_rx_ring_tlv_filter htt_tlv_filter) +{ + QDF_STATUS status; + + if (soc->wlan_cfg_ctx->rxdma1_enable) + status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_buf_ring[ring_num] + .hal_srng, + RXDMA_MONITOR_BUF, + RX_MONITOR_BUFFER_SIZE, + &htt_tlv_filter); + else + status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rx_mac_buf_ring[ring_num] + .hal_srng, + RXDMA_BUF, RX_DATA_BUFFER_SIZE, + &htt_tlv_filter); + + return status; +} + +static inline void +dp_pdev_disable_mcopy_code(struct dp_pdev *pdev) +{ + pdev->mcopy_mode = 0; + pdev->monitor_configured = false; + pdev->monitor_vdev = NULL; + qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q); +} + +/** + * dp_reset_monitor_mode() - Disable monitor mode + * @soc_hdl: Datapath soc handle + * @pdev_id: id of datapath PDEV handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + uint8_t special_monitor) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + qdf_spin_lock_bh(&pdev->mon_lock); + + pdev->monitor_vdev = NULL; + pdev->monitor_configured = false; + + /* + * Lite monitor mode, smart monitor mode and monitor + * mode uses this APIs to filter reset and mode disable + */ + if (pdev->mcopy_mode) { +#if defined(FEATURE_PERPKT_INFO) + dp_pdev_disable_mcopy_code(pdev); + dp_mon_filter_reset_mcopy_mode(pdev); +#endif /* FEATURE_PERPKT_INFO */ + } else if (special_monitor) { +#if defined(ATH_SUPPORT_NAC) + dp_mon_filter_reset_smart_monitor(pdev); +#endif /* ATH_SUPPORT_NAC */ + } else { + dp_mon_filter_reset_mon_mode(pdev); + } + + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to reset monitor filters")); + } + + qdf_spin_unlock_bh(&pdev->mon_lock); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_get_tx_pending() - read pending tx + * @pdev_handle: Datapath PDEV handle + * + * Return: outstanding tx + */ +static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + return qdf_atomic_read(&pdev->num_tx_outstanding); +} + +/** + * dp_get_peer_mac_from_peer_id() - get peer mac + * @pdev_handle: Datapath PDEV handle + * @peer_id: Peer ID + * @peer_mac: MAC addr of PEER + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc, + uint32_t peer_id, + uint8_t *peer_mac) +{ + struct dp_peer *peer; + + if (soc && peer_mac) { + peer = dp_peer_find_by_id((struct dp_soc *)soc, + (uint16_t)peer_id); + if (peer) { + qdf_mem_copy(peer_mac, peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + dp_peer_unref_del_find_by_id(peer); + return QDF_STATUS_SUCCESS; + } + } + + return QDF_STATUS_E_FAILURE; +} + +/** + * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode + * @vdev_handle: Datapath VDEV handle + * @smart_monitor: Flag to denote if its smart monitor mode + * + * Return: 0 on success, not 0 on failure + */ +static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t special_monitor) +{ + struct dp_pdev *pdev; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + pdev = vdev->pdev; + pdev->monitor_vdev = vdev; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", + pdev, pdev->pdev_id, pdev->soc, vdev); + + /* + * do not configure monitor buf ring and filter for smart and + * lite monitor + * for smart monitor filters are added along with first NAC + * for lite monitor required configuration done through + * dp_set_pdev_param + */ + if (special_monitor) + return QDF_STATUS_SUCCESS; + + /*Check if current pdev's monitor_vdev exists */ + if (pdev->monitor_configured) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "monitor vap already created vdev=%pK\n", vdev); + return QDF_STATUS_E_RESOURCES; + } + + pdev->monitor_configured = true; + dp_mon_buf_delayed_replenish(pdev); + + dp_mon_filter_setup_mon_mode(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to reset monitor filters")); + dp_mon_filter_reset_mon_mode(pdev); + pdev->monitor_configured = false; + pdev->monitor_vdev = NULL; + } + + return status; +} + +/** + * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter + * @soc: soc handle + * @pdev_id: id of Datapath PDEV handle + * @filter_val: Flag to select Filter for monitor mode + * Return: 0 on success, not 0 on failure + */ +static QDF_STATUS +dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct cdp_monitor_filter *filter_val) +{ + /* Many monitor VAPs can exists in a system but only one can be up at + * anytime + */ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_vdev *vdev; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + vdev = pdev->monitor_vdev; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK", + pdev, pdev_id, soc, vdev); + + /*Check if current pdev's monitor_vdev exists */ + if (!pdev->monitor_vdev) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "vdev=%pK", vdev); + qdf_assert(vdev); + } + + /* update filter mode, type in pdev structure */ + pdev->mon_filter_mode = filter_val->mode; + pdev->fp_mgmt_filter = filter_val->fp_mgmt; + pdev->fp_ctrl_filter = filter_val->fp_ctrl; + pdev->fp_data_filter = filter_val->fp_data; + pdev->mo_mgmt_filter = filter_val->mo_mgmt; + pdev->mo_ctrl_filter = filter_val->mo_ctrl; + pdev->mo_data_filter = filter_val->mo_data; + + dp_mon_filter_setup_mon_mode(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to set filter for advance mon mode")); + dp_mon_filter_reset_mon_mode(pdev); + } + + return status; +} + +/** + * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture + * @cdp_soc : data path soc handle + * @pdev_id : pdev_id + * @nbuf: Management frame buffer + */ +static QDF_STATUS +dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + dp_deliver_mgmt_frm(pdev, nbuf); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_set_bsscolor() - sets bsscolor for tx capture + * @pdev: Datapath PDEV handle + * @bsscolor: new bsscolor + */ +static void +dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) +{ + pdev->rx_mon_recv_status.bsscolor = bsscolor; +} + +/** + * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter + * @soc : data path soc handle + * @pdev_id : pdev_id + * Return: true on ucast filter flag set + */ +static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if ((pdev->fp_data_filter & FILTER_DATA_UCAST) || + (pdev->mo_data_filter & FILTER_DATA_UCAST)) + return true; + + return false; +} + +/** + * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter + * @pdev_handle: Datapath PDEV handle + * Return: true on mcast filter flag set + */ +static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if ((pdev->fp_data_filter & FILTER_DATA_MCAST) || + (pdev->mo_data_filter & FILTER_DATA_MCAST)) + return true; + + return false; +} + +/** + * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter + * @pdev_handle: Datapath PDEV handle + * Return: true on non data filter flag set + */ +static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || + (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { + if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || + (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { + return true; + } + } + + return false; +} + +#ifdef MESH_MODE_SUPPORT +void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("val %d"), val); + vdev->mesh_vdev = val; +} + +/* + * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter + * @vdev_hdl: virtual device object + * @val: value to be set + * + * Return: void + */ +void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("val %d"), val); + vdev->mesh_rx_filter = val; +} +#endif + +#ifdef VDEV_PEER_PROTOCOL_COUNT +static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc, + int8_t vdev_id, + bool enable) +{ + struct dp_vdev *vdev; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + dp_info("enable %d vdev_id %d", enable, vdev_id); + vdev->peer_protocol_count_track = enable; +} + +static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc, + int8_t vdev_id, + int drop_mask) +{ + struct dp_vdev *vdev; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id); + vdev->peer_protocol_count_dropmask = drop_mask; +} + +static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc, + int8_t vdev_id) +{ + struct dp_vdev *vdev; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track, + vdev_id); + return vdev->peer_protocol_count_track; +} + +static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc, + int8_t vdev_id) +{ + struct dp_vdev *vdev; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask, + vdev_id); + return vdev->peer_protocol_count_dropmask; +} + +#endif + +bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data) +{ + uint8_t pdev_count; + + for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) { + if (soc->pdev_list[pdev_count] && + soc->pdev_list[pdev_count] == data) + return true; + } + return false; +} + +/** + * dp_rx_bar_stats_cb(): BAR received stats callback + * @soc: SOC handle + * @cb_ctxt: Call back context + * @reo_status: Reo status + * + * return: void + */ +void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt; + struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); + + if (!dp_check_pdev_exists(soc, pdev)) { + dp_err_rl("pdev doesn't exist"); + return; + } + + if (!qdf_atomic_read(&soc->cmn_init_done)) + return; + + if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { + DP_PRINT_STATS("REO stats failure %d", + queue_status->header.status); + qdf_atomic_set(&(pdev->stats_cmd_complete), 1); + return; + } + + pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt; + qdf_atomic_set(&(pdev->stats_cmd_complete), 1); + +} + +/** + * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level + * @vdev: DP VDEV handle + * + * return: void + */ +void dp_aggregate_vdev_stats(struct dp_vdev *vdev, + struct cdp_vdev_stats *vdev_stats) +{ + struct dp_peer *peer = NULL; + struct dp_soc *soc = NULL; + + if (!vdev || !vdev->pdev) + return; + + soc = vdev->pdev->soc; + + qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) + dp_update_vdev_stats(vdev_stats, peer); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, + vdev_stats, vdev->vdev_id, + UPDATE_VDEV_STATS, vdev->pdev->pdev_id); +#endif +} + +void dp_aggregate_pdev_stats(struct dp_pdev *pdev) +{ + struct dp_vdev *vdev = NULL; + struct dp_soc *soc; + struct cdp_vdev_stats *vdev_stats = + qdf_mem_malloc(sizeof(struct cdp_vdev_stats)); + + if (!vdev_stats) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "DP alloc failure - unable to get alloc vdev stats"); + return; + } + + qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx)); + qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx)); + qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i)); + + if (pdev->mcopy_mode) + DP_UPDATE_STATS(pdev, pdev->invalid_peer); + + soc = pdev->soc; + qdf_spin_lock_bh(&soc->peer_ref_mutex); + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + + dp_aggregate_vdev_stats(vdev, vdev_stats); + dp_update_pdev_stats(pdev, vdev_stats); + dp_update_pdev_ingress_stats(pdev, vdev); + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + qdf_mem_free(vdev_stats); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats, + pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id); +#endif +} + +/** + * dp_vdev_getstats() - get vdev packet level stats + * @vdev_handle: Datapath VDEV handle + * @stats: cdp network device stats structure + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle, + struct cdp_dev_stats *stats) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + struct dp_soc *soc; + struct cdp_vdev_stats *vdev_stats; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + pdev = vdev->pdev; + if (!pdev) + return QDF_STATUS_E_FAILURE; + + soc = pdev->soc; + + vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats)); + + if (!vdev_stats) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "DP alloc failure - unable to get alloc vdev stats"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + dp_aggregate_vdev_stats(vdev, vdev_stats); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + stats->tx_packets = vdev_stats->tx_i.rcvd.num; + stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes; + + stats->tx_errors = vdev_stats->tx.tx_failed + + vdev_stats->tx_i.dropped.dropped_pkt.num; + stats->tx_dropped = stats->tx_errors; + + stats->rx_packets = vdev_stats->rx.unicast.num + + vdev_stats->rx.multicast.num + + vdev_stats->rx.bcast.num; + stats->rx_bytes = vdev_stats->rx.unicast.bytes + + vdev_stats->rx.multicast.bytes + + vdev_stats->rx.bcast.bytes; + + qdf_mem_free(vdev_stats); + + return QDF_STATUS_SUCCESS; +} + + +/** + * dp_pdev_getstats() - get pdev packet level stats + * @pdev_handle: Datapath PDEV handle + * @stats: cdp network device stats structure + * + * Return: QDF_STATUS + */ +static void dp_pdev_getstats(struct cdp_pdev *pdev_handle, + struct cdp_dev_stats *stats) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + dp_aggregate_pdev_stats(pdev); + + stats->tx_packets = pdev->stats.tx_i.rcvd.num; + stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes; + + stats->tx_errors = pdev->stats.tx.tx_failed + + pdev->stats.tx_i.dropped.dropped_pkt.num; + stats->tx_dropped = stats->tx_errors; + + stats->rx_packets = pdev->stats.rx.unicast.num + + pdev->stats.rx.multicast.num + + pdev->stats.rx.bcast.num; + stats->rx_bytes = pdev->stats.rx.unicast.bytes + + pdev->stats.rx.multicast.bytes + + pdev->stats.rx.bcast.bytes; + stats->rx_errors = pdev->stats.err.desc_alloc_fail + + pdev->stats.err.ip_csum_err + + pdev->stats.err.tcp_udp_csum_err + + pdev->stats.rx.err.mic_err + + pdev->stats.rx.err.decrypt_err + + pdev->stats.err.rxdma_error + + pdev->stats.err.reo_error; + stats->rx_dropped = pdev->stats.dropped.msdu_not_done + + pdev->stats.dropped.mec + + pdev->stats.dropped.mesh_filter + + pdev->stats.dropped.wifi_parse + + pdev->stats.dropped.mon_rx_drop + + pdev->stats.dropped.mon_radiotap_update_err; +} + +/** + * dp_get_device_stats() - get interface level packet stats + * @soc: soc handle + * @id : vdev_id or pdev_id based on type + * @stats: cdp network device stats structure + * @type: device type pdev/vdev + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id, + struct cdp_dev_stats *stats, + uint8_t type) +{ + switch (type) { + case UPDATE_VDEV_STATS: + return dp_vdev_getstats( + (struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3( + (struct dp_soc *)soc, id), stats); + case UPDATE_PDEV_STATS: + { + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3( + (struct dp_soc *)soc, + id); + if (pdev) { + dp_pdev_getstats((struct cdp_pdev *)pdev, + stats); + return QDF_STATUS_SUCCESS; + } + } + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "apstats cannot be updated for this input " + "type %d", type); + break; + } + + return QDF_STATUS_E_FAILURE; +} + +const +char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type) +{ + switch (ring_type) { + case REO_DST: + return "Reo_dst"; + case REO_EXCEPTION: + return "Reo_exception"; + case REO_CMD: + return "Reo_cmd"; + case REO_REINJECT: + return "Reo_reinject"; + case REO_STATUS: + return "Reo_status"; + case WBM2SW_RELEASE: + return "wbm2sw_release"; + case TCL_DATA: + return "tcl_data"; + case TCL_CMD: + return "tcl_cmd"; + case TCL_STATUS: + return "tcl_status"; + case SW2WBM_RELEASE: + return "sw2wbm_release"; + case RXDMA_BUF: + return "Rxdma_buf"; + case RXDMA_DST: + return "Rxdma_dst"; + case RXDMA_MONITOR_BUF: + return "Rxdma_monitor_buf"; + case RXDMA_MONITOR_DESC: + return "Rxdma_monitor_desc"; + case RXDMA_MONITOR_STATUS: + return "Rxdma_monitor_status"; + default: + dp_err("Invalid ring type"); + break; + } + return "Invalid"; +} + +/* + * dp_print_napi_stats(): NAPI stats + * @soc - soc handle + */ +void dp_print_napi_stats(struct dp_soc *soc) +{ + hif_print_napi_stats(soc->hif_handle); +} + +/** + * dp_txrx_host_stats_clr(): Reinitialize the txrx stats + * @vdev: DP_VDEV handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +dp_txrx_host_stats_clr(struct dp_vdev *vdev) +{ + struct dp_peer *peer = NULL; + + if (!vdev || !vdev->pdev) + return QDF_STATUS_E_FAILURE; + + DP_STATS_CLR(vdev->pdev); + DP_STATS_CLR(vdev->pdev->soc); + DP_STATS_CLR(vdev); + + hif_clear_napi_stats(vdev->pdev->soc->hif_handle); + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer) + return QDF_STATUS_E_FAILURE; + DP_STATS_CLR(peer); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, + &peer->stats, peer->peer_ids[0], + UPDATE_PEER_STATS, vdev->pdev->pdev_id); +#endif + } + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, + &vdev->stats, vdev->vdev_id, + UPDATE_VDEV_STATS, vdev->pdev->pdev_id); +#endif + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_host_peer_stats()- function to print peer stats + * @soc: dp_soc handle + * @mac_addr: mac address of the peer + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + mac_addr, 0, + DP_VDEV_ALL); + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid peer\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + dp_print_peer_stats(peer); + dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL); +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/** + * dp_txrx_stats_help() - Helper function for Txrx_Stats + * + * Return: None + */ +static void dp_txrx_stats_help(void) +{ + dp_info("Command: iwpriv wlan0 txrx_stats "); + dp_info("stats_option:"); + dp_info(" 1 -- HTT Tx Statistics"); + dp_info(" 2 -- HTT Rx Statistics"); + dp_info(" 3 -- HTT Tx HW Queue Statistics"); + dp_info(" 4 -- HTT Tx HW Sched Statistics"); + dp_info(" 5 -- HTT Error Statistics"); + dp_info(" 6 -- HTT TQM Statistics"); + dp_info(" 7 -- HTT TQM CMDQ Statistics"); + dp_info(" 8 -- HTT TX_DE_CMN Statistics"); + dp_info(" 9 -- HTT Tx Rate Statistics"); + dp_info(" 10 -- HTT Rx Rate Statistics"); + dp_info(" 11 -- HTT Peer Statistics"); + dp_info(" 12 -- HTT Tx SelfGen Statistics"); + dp_info(" 13 -- HTT Tx MU HWQ Statistics"); + dp_info(" 14 -- HTT RING_IF_INFO Statistics"); + dp_info(" 15 -- HTT SRNG Statistics"); + dp_info(" 16 -- HTT SFM Info Statistics"); + dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics"); + dp_info(" 18 -- HTT Peer List Details"); + dp_info(" 20 -- Clear Host Statistics"); + dp_info(" 21 -- Host Rx Rate Statistics"); + dp_info(" 22 -- Host Tx Rate Statistics"); + dp_info(" 23 -- Host Tx Statistics"); + dp_info(" 24 -- Host Rx Statistics"); + dp_info(" 25 -- Host AST Statistics"); + dp_info(" 26 -- Host SRNG PTR Statistics"); + dp_info(" 27 -- Host Mon Statistics"); + dp_info(" 28 -- Host REO Queue Statistics"); + dp_info(" 29 -- Host Soc cfg param Statistics"); + dp_info(" 30 -- Host pdev cfg param Statistics"); + dp_info(" 31 -- Host FISA stats"); + dp_info(" 32 -- Host Register Work stats"); +} + +/** + * dp_print_host_stats()- Function to print the stats aggregated at host + * @vdev_handle: DP_VDEV handle + * @type: host stats type + * + * Return: 0 on success, print error message in case of failure + */ +static int +dp_print_host_stats(struct dp_vdev *vdev, + struct cdp_txrx_stats_req *req) +{ + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + enum cdp_host_txrx_stats type = + dp_stats_mapping_table[req->stats][STATS_HOST]; + + dp_aggregate_pdev_stats(pdev); + + switch (type) { + case TXRX_CLEAR_STATS: + dp_txrx_host_stats_clr(vdev); + break; + case TXRX_RX_RATE_STATS: + dp_print_rx_rates(vdev); + break; + case TXRX_TX_RATE_STATS: + dp_print_tx_rates(vdev); + break; + case TXRX_TX_HOST_STATS: + dp_print_pdev_tx_stats(pdev); + dp_print_soc_tx_stats(pdev->soc); + break; + case TXRX_RX_HOST_STATS: + dp_print_pdev_rx_stats(pdev); + dp_print_soc_rx_stats(pdev->soc); + break; + case TXRX_AST_STATS: + dp_print_ast_stats(pdev->soc); + dp_print_peer_table(vdev); + break; + case TXRX_SRNG_PTR_STATS: + dp_print_ring_stats(pdev); + break; + case TXRX_RX_MON_STATS: + dp_print_pdev_rx_mon_stats(pdev); + break; + case TXRX_REO_QUEUE_STATS: + dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc, + req->peer_addr); + break; + case TXRX_SOC_CFG_PARAMS: + dp_print_soc_cfg_params(pdev->soc); + break; + case TXRX_PDEV_CFG_PARAMS: + dp_print_pdev_cfg_params(pdev); + break; + case TXRX_NAPI_STATS: + dp_print_napi_stats(pdev->soc); + break; + case TXRX_SOC_INTERRUPT_STATS: + dp_print_soc_interrupt_stats(pdev->soc); + break; + case TXRX_SOC_FSE_STATS: + dp_rx_dump_fisa_table(pdev->soc); + break; + case TXRX_HAL_REG_WRITE_STATS: + hal_dump_reg_write_stats(pdev->soc->hal_soc); + hal_dump_reg_write_srng_stats(pdev->soc->hal_soc); + break; + case TXRX_SOC_REO_HW_DESC_DUMP: + dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc, + vdev->vdev_id); + break; + default: + dp_info("Wrong Input For TxRx Host Stats"); + dp_txrx_stats_help(); + break; + } + return 0; +} + +/* + * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer + * modes are enabled or not. + * @dp_pdev: dp pdev handle. + * + * Return: bool + */ +static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev) +{ + if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode) + return true; + else + return false; +} + +/* + *dp_set_bpr_enable() - API to enable/disable bpr feature + *@pdev_handle: DP_PDEV handle. + *@val: Provided value. + * + *Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS +dp_set_bpr_enable(struct dp_pdev *pdev, int val) +{ + switch (val) { + case CDP_BPR_DISABLE: + pdev->bpr_enable = CDP_BPR_DISABLE; + if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + } else if (pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && !pdev->mcopy_mode && + !pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_ENH_STATS, + pdev->pdev_id); + } + break; + case CDP_BPR_ENABLE: + pdev->bpr_enable = CDP_BPR_ENABLE; + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR, + pdev->pdev_id); + } else if (pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && !pdev->mcopy_mode && + !pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_ENH, + pdev->pdev_id); + } else if (pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_PKTLOG, + pdev->pdev_id); + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_pdev_tid_stats_ingress_inc + * @pdev: pdev handle + * @val: increase in value + * + * Return: void + */ +static void +dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val) +{ + pdev->stats.tid_stats.ingress_stack += val; +} + +/* + * dp_pdev_tid_stats_osif_drop + * @pdev: pdev handle + * @val: increase in value + * + * Return: void + */ +static void +dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val) +{ + pdev->stats.tid_stats.osif_drop += val; +} + + +/* + * dp_config_debug_sniffer()- API to enable/disable debug sniffer + * @pdev: DP_PDEV handle + * @val: user provided value + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS +dp_config_debug_sniffer(struct dp_pdev *pdev, int val) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + /* + * Note: The mirror copy mode cannot co-exist with any other + * monitor modes. Hence disabling the filter for this mode will + * reset the monitor destination ring filters. + */ + if (pdev->mcopy_mode) { +#ifdef FEATURE_PERPKT_INFO + dp_pdev_disable_mcopy_code(pdev); + dp_mon_filter_reset_mcopy_mode(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to reset AM copy mode filters")); + } +#endif /* FEATURE_PERPKT_INFO */ + } + + switch (val) { + case 0: + pdev->tx_sniffer_enable = 0; + pdev->monitor_configured = false; + + /* + * We don't need to reset the Rx monitor status ring or call + * the API dp_ppdu_ring_reset() if all debug sniffer mode is + * disabled. The Rx monitor status ring will be disabled when + * the last mode using the monitor status ring get disabled. + */ + if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en && + !pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); + } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_ENH, + pdev->pdev_id); + } else { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR, + pdev->pdev_id); + } + break; + + case 1: + pdev->tx_sniffer_enable = 1; + pdev->monitor_configured = false; + + if (!pdev->pktlog_ppdu_stats) + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id); + break; + case 2: + if (pdev->monitor_vdev) { + status = QDF_STATUS_E_RESOURCES; + break; + } + +#ifdef FEATURE_PERPKT_INFO + pdev->mcopy_mode = 1; + pdev->tx_sniffer_enable = 0; + pdev->monitor_configured = true; + + /* + * Setup the M copy mode filter. + */ + dp_mon_filter_setup_mcopy_mode(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to set M_copy mode filters")); + dp_mon_filter_reset_mcopy_mode(pdev); + dp_pdev_disable_mcopy_code(pdev); + return status; + } + + if (!pdev->pktlog_ppdu_stats) + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id); +#endif /* FEATURE_PERPKT_INFO */ + break; + + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid value"); + break; + } + return status; +} + +#ifdef FEATURE_PERPKT_INFO +/* + * dp_enable_enhanced_stats()- API to enable enhanced statistcs + * @soc_handle: DP_SOC handle + * @pdev_id: id of DP_PDEV handle + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + if (pdev->enhanced_stats_en == 0) + dp_cal_client_timer_start(pdev->cal_client_ctx); + + pdev->enhanced_stats_en = 1; + + dp_mon_filter_setup_enhanced_stats(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to set enhanced mode filters")); + dp_mon_filter_reset_enhanced_stats(pdev); + dp_cal_client_timer_stop(pdev->cal_client_ctx); + pdev->enhanced_stats_en = 0; + return QDF_STATUS_E_FAILURE; + } + + if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); + } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_ENH, + pdev->pdev_id); + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_disable_enhanced_stats()- API to disable enhanced statistcs + * + * @param soc - the soc handle + * @param pdev_id - pdev_id of pdev + * @return - QDF_STATUS + */ +static QDF_STATUS +dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + if (pdev->enhanced_stats_en == 1) + dp_cal_client_timer_stop(pdev->cal_client_ctx); + + pdev->enhanced_stats_en = 0; + + if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR, + pdev->pdev_id); + } + + dp_mon_filter_reset_enhanced_stats(pdev); + if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to reset enhanced mode filters")); + } + + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_PERPKT_INFO */ + +/* + * dp_get_fw_peer_stats()- function to print peer stats + * @soc: soc handle + * @pdev_id : id of the pdev handle + * @mac_addr: mac address of the peer + * @cap: Type of htt stats requested + * @is_wait: if set, wait on completion from firmware response + * + * Currently Supporting only MAC ID based requests Only + * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY + * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM + * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t *mac_addr, + uint32_t cap, uint32_t is_wait) +{ + int i; + uint32_t config_param0 = 0; + uint32_t config_param1 = 0; + uint32_t config_param2 = 0; + uint32_t config_param3 = 0; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1); + config_param0 |= (1 << (cap + 1)); + + for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) { + config_param1 |= (1 << i); + } + + config_param2 |= (mac_addr[0] & 0x000000ff); + config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00); + config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000); + config_param2 |= ((mac_addr[3] << 24) & 0xff000000); + + config_param3 |= (mac_addr[4] & 0x000000ff); + config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00); + + if (is_wait) { + qdf_event_reset(&pdev->fw_peer_stats_event); + dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, + config_param0, config_param1, + config_param2, config_param3, + 0, 1, 0); + qdf_wait_single_event(&pdev->fw_peer_stats_event, + DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC); + } else { + dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, + config_param0, config_param1, + config_param2, config_param3, + 0, 0, 0); + } + + return QDF_STATUS_SUCCESS; + +} + +/* This struct definition will be removed from here + * once it get added in FW headers*/ +struct httstats_cmd_req { + uint32_t config_param0; + uint32_t config_param1; + uint32_t config_param2; + uint32_t config_param3; + int cookie; + u_int8_t stats_id; +}; + +/* + * dp_get_htt_stats: function to process the httstas request + * @soc: DP soc handle + * @pdev_id: id of pdev handle + * @data: pointer to request data + * @data_len: length for request data + * + * return: QDF_STATUS + */ +static QDF_STATUS +dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data, + uint32_t data_len) +{ + struct httstats_cmd_req *req = (struct httstats_cmd_req *)data; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req)); + dp_h2t_ext_stats_msg_send(pdev, req->stats_id, + req->config_param0, req->config_param1, + req->config_param2, req->config_param3, + req->cookie, 0, 0); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev + * @pdev: DP_PDEV handle + * @prio: tidmap priority value passed by the user + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev, + uint8_t prio) +{ + struct dp_soc *soc = pdev->soc; + + soc->tidmap_prty = prio; + + hal_tx_set_tidmap_prty(soc->hal_soc, prio); + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_peer_param: function to get parameters in peer + * @cdp_soc: DP soc handle + * @vdev_id: id of vdev handle + * @peer_mac: peer mac address + * @param: parameter type to be set + * @val : address of buffer + * + * Return: val + */ +static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_peer_param_type param, + cdp_config_param_type *val) +{ + return QDF_STATUS_SUCCESS; +} + +/* + * dp_set_peer_param: function to set parameters in peer + * @cdp_soc: DP soc handle + * @vdev_id: id of vdev handle + * @peer_mac: peer mac address + * @param: parameter type to be set + * @val: value of parameter to be set + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, + uint8_t *peer_mac, + enum cdp_peer_param_type param, + cdp_config_param_type val) +{ + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) + goto fail; + + switch (param) { + case CDP_CONFIG_NAWDS: + peer->nawds_enabled = val.cdp_peer_param_nawds; + break; + case CDP_CONFIG_NAC: + peer->nac = !!(val.cdp_peer_param_nac); + break; + default: + break; + } + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_pdev_param: function to get parameters from pdev + * @cdp_soc: DP soc handle + * @pdev_id: id of pdev handle + * @param: parameter type to be get + * @value : buffer for value + * + * Return: status + */ +static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, + enum cdp_pdev_param_type param, + cdp_config_param_type *val) +{ + struct cdp_pdev *pdev = (struct cdp_pdev *) + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, + pdev_id); + if (!pdev) + return QDF_STATUS_E_FAILURE; + + switch (param) { + case CDP_CONFIG_VOW: + val->cdp_pdev_param_cfg_vow = + ((struct dp_pdev *)pdev)->delay_stats_flag; + break; + case CDP_TX_PENDING: + val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev); + break; + case CDP_FILTER_MCAST_DATA: + val->cdp_pdev_param_fltr_mcast = + dp_pdev_get_filter_mcast_data(pdev); + break; + case CDP_FILTER_NO_DATA: + val->cdp_pdev_param_fltr_none = + dp_pdev_get_filter_non_data(pdev); + break; + case CDP_FILTER_UCAST_DATA: + val->cdp_pdev_param_fltr_ucast = + dp_pdev_get_filter_ucast_data(pdev); + break; + default: + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_set_pdev_param: function to set parameters in pdev + * @cdp_soc: DP soc handle + * @pdev_id: id of pdev handle + * @param: parameter type to be set + * @val: value of parameter to be set + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, + enum cdp_pdev_param_type param, + cdp_config_param_type val) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc, + pdev_id); + if (!pdev) + return QDF_STATUS_E_FAILURE; + + switch (param) { + case CDP_CONFIG_TX_CAPTURE: + return dp_config_debug_sniffer(pdev, + val.cdp_pdev_param_tx_capture); + case CDP_CONFIG_DEBUG_SNIFFER: + return dp_config_debug_sniffer(pdev, + val.cdp_pdev_param_dbg_snf); + case CDP_CONFIG_BPR_ENABLE: + return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable); + case CDP_CONFIG_PRIMARY_RADIO: + pdev->is_primary = val.cdp_pdev_param_primary_radio; + break; + case CDP_CONFIG_CAPTURE_LATENCY: + pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy; + break; + case CDP_INGRESS_STATS: + dp_pdev_tid_stats_ingress_inc(pdev, + val.cdp_pdev_param_ingrs_stats); + break; + case CDP_OSIF_DROP: + dp_pdev_tid_stats_osif_drop(pdev, + val.cdp_pdev_param_osif_drop); + break; + case CDP_CONFIG_ENH_RX_CAPTURE: + return dp_config_enh_rx_capture(pdev, + val.cdp_pdev_param_en_rx_cap); + case CDP_CONFIG_ENH_TX_CAPTURE: + return dp_config_enh_tx_capture(pdev, + val.cdp_pdev_param_en_tx_cap); + case CDP_CONFIG_HMMC_TID_OVERRIDE: + pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd; + break; + case CDP_CONFIG_HMMC_TID_VALUE: + pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid; + break; + case CDP_CHAN_NOISE_FLOOR: + pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr; + break; + case CDP_TIDMAP_PRTY: + dp_set_pdev_tidmap_prty_wifi3(pdev, + val.cdp_pdev_param_tidmap_prty); + break; + case CDP_FILTER_NEIGH_PEERS: + dp_set_filter_neigh_peers(pdev, + val.cdp_pdev_param_fltr_neigh_peers); + break; + case CDP_MONITOR_CHANNEL: + pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan; + break; + case CDP_MONITOR_FREQUENCY: + pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq; + break; + case CDP_CONFIG_BSS_COLOR: + dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color); + break; + default: + return QDF_STATUS_E_INVAL; + } + return QDF_STATUS_SUCCESS; +} + +/* + * dp_calculate_delay_stats: function to get rx delay stats + * @cdp_soc: DP soc handle + * @vdev_id: id of DP vdev handle + * @nbuf: skb + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, + qdf_nbuf_t nbuf) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc, + vdev_id); + if (vdev) { + dp_rx_compute_delay(vdev, nbuf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_vdev_param: function to get parameters from vdev + * @cdp_soc : DP soc handle + * @vdev_id: id of DP vdev handle + * @param: parameter type to get value + * @val: buffer address + * + * return: status + */ +static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, + enum cdp_vdev_param_type param, + cdp_config_param_type *val) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc, + vdev_id); + if (!vdev) + return QDF_STATUS_E_FAILURE; + + switch (param) { + case CDP_ENABLE_WDS: + val->cdp_vdev_param_wds = vdev->wds_enabled; + break; + case CDP_ENABLE_MEC: + val->cdp_vdev_param_mec = vdev->mec_enabled; + break; + case CDP_ENABLE_DA_WAR: + val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "param value %d is wrong\n", + param); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_set_vdev_param: function to set parameters in vdev + * @cdp_soc : DP soc handle + * @vdev_id: id of DP vdev handle + * @param: parameter type to get value + * @val: value + * + * return: QDF_STATUS + */ +static QDF_STATUS +dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, + enum cdp_vdev_param_type param, cdp_config_param_type val) +{ + struct dp_soc *dsoc = (struct dp_soc *)cdp_soc; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3(dsoc, vdev_id); + uint32_t var = 0; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + switch (param) { + case CDP_ENABLE_WDS: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "wds_enable %d for vdev(%pK) id(%d)\n", + val.cdp_vdev_param_wds, vdev, vdev->vdev_id); + vdev->wds_enabled = val.cdp_vdev_param_wds; + break; + case CDP_ENABLE_MEC: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "mec_enable %d for vdev(%pK) id(%d)\n", + val.cdp_vdev_param_mec, vdev, vdev->vdev_id); + vdev->mec_enabled = val.cdp_vdev_param_mec; + break; + case CDP_ENABLE_DA_WAR: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "da_war_enable %d for vdev(%pK) id(%d)\n", + val.cdp_vdev_param_da_war, vdev, vdev->vdev_id); + vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war; + dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *) + vdev->pdev->soc)); + break; + case CDP_ENABLE_NAWDS: + vdev->nawds_enabled = val.cdp_vdev_param_nawds; + break; + case CDP_ENABLE_MCAST_EN: + vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en; + break; + case CDP_ENABLE_PROXYSTA: + vdev->proxysta_vdev = val.cdp_vdev_param_proxysta; + break; + case CDP_UPDATE_TDLS_FLAGS: + vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags; + break; + case CDP_CFG_WDS_AGING_TIMER: + var = val.cdp_vdev_param_aging_tmr; + if (!var) + qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer); + else if (var != vdev->wds_aging_timer_val) + qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var); + + vdev->wds_aging_timer_val = var; + break; + case CDP_ENABLE_AP_BRIDGE: + if (wlan_op_mode_sta != vdev->opmode) + vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en; + else + vdev->ap_bridge_enabled = false; + break; + case CDP_ENABLE_CIPHER: + vdev->sec_type = val.cdp_vdev_param_cipher_en; + break; + case CDP_ENABLE_QWRAP_ISOLATION: + vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation; + break; + case CDP_UPDATE_MULTIPASS: + vdev->multipass_en = val.cdp_vdev_param_update_multipass; + break; + case CDP_TX_ENCAP_TYPE: + vdev->tx_encap_type = val.cdp_vdev_param_tx_encap; + break; + case CDP_RX_DECAP_TYPE: + vdev->rx_decap_type = val.cdp_vdev_param_rx_decap; + break; + case CDP_TID_VDEV_PRTY: + vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty; + break; + case CDP_TIDMAP_TBL_ID: + vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id; + break; +#ifdef MESH_MODE_SUPPORT + case CDP_MESH_RX_FILTER: + dp_peer_set_mesh_rx_filter((struct cdp_vdev *)vdev, + val.cdp_vdev_param_mesh_rx_filter); + break; + case CDP_MESH_MODE: + dp_peer_set_mesh_mode((struct cdp_vdev *)vdev, + val.cdp_vdev_param_mesh_mode); + break; +#endif + case CDP_ENABLE_CSUM: + dp_info("vdev_id %d enable Checksum %d", vdev_id, + val.cdp_enable_tx_checksum); + vdev->csum_enabled = val.cdp_enable_tx_checksum; + break; + default: + break; + } + + dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_set_psoc_param: function to set parameters in psoc + * @cdp_soc : DP soc handle + * @param: parameter type to be set + * @val: value of parameter to be set + * + * return: QDF_STATUS + */ +static QDF_STATUS +dp_set_psoc_param(struct cdp_soc_t *cdp_soc, + enum cdp_psoc_param_type param, cdp_config_param_type val) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx; + + switch (param) { + case CDP_ENABLE_RATE_STATS: + soc->wlanstats_enabled = val.cdp_psoc_param_en_rate_stats; + break; + case CDP_SET_NSS_CFG: + wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, + val.cdp_psoc_param_en_nss_cfg); + /* + * TODO: masked out based on the per offloaded radio + */ + switch (val.cdp_psoc_param_en_nss_cfg) { + case dp_nss_cfg_default: + break; + case dp_nss_cfg_first_radio: + /* + * This configuration is valid for single band radio which + * is also NSS offload. + */ + case dp_nss_cfg_dbdc: + case dp_nss_cfg_dbtc: + wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid offload config %d", + val.cdp_psoc_param_en_nss_cfg); + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("nss-wifi<0> nss config is enabled")); + break; + + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_get_psoc_param: function to get parameters in soc + * @cdp_soc : DP soc handle + * @param: parameter type to be set + * @val: address of buffer + * + * return: status + */ +static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc, + enum cdp_psoc_param_type param, + cdp_config_param_type *val) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer + * @soc: DP_SOC handle + * @pdev_id: id of DP_PDEV handle + * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode + * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode + * @peer_mac: MAC address for which the above need to be enabled/disabled + * + * Return: Success if Rx & Tx capture is enabled for peer, false otherwise + */ +QDF_STATUS +dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, + uint8_t pdev_id, + bool is_rx_pkt_cap_enable, + bool is_tx_pkt_cap_enable, + uint8_t *peer_mac) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev, + peer_mac); + if (!peer) { + dp_err("Invalid Peer"); + return QDF_STATUS_E_FAILURE; + } + + dp_peer_set_rx_capture_enabled(peer, is_rx_pkt_cap_enable); + dp_peer_set_tx_capture_enabled(peer, is_tx_pkt_cap_enable); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev + * @soc: DP_SOC handle + * @vdev_id: id of DP_VDEV handle + * @map_id:ID of map that needs to be updated + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc, + uint8_t vdev_id, + uint8_t map_id) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + if (vdev) { + vdev->dscp_tid_map_id = map_id; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +#ifdef DP_RATETABLE_SUPPORT +static int dp_txrx_get_ratekbps(int preamb, int mcs, + int htflag, int gintval) +{ + uint32_t rix; + uint16_t ratecode; + + return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1, + (uint8_t)preamb, 1, &rix, &ratecode); +} +#else +static int dp_txrx_get_ratekbps(int preamb, int mcs, + int htflag, int gintval) +{ + return 0; +} +#endif + +/* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats + * @soc: DP soc handle + * @pdev_id: id of DP pdev handle + * @pdev_stats: buffer to copy to + * + * return : status success/failure + */ +static QDF_STATUS +dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id, + struct cdp_pdev_stats *pdev_stats) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + if (!pdev) + return QDF_STATUS_E_FAILURE; + + dp_aggregate_pdev_stats(pdev); + + qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats)); + return QDF_STATUS_SUCCESS; +} + +/* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP + * @vdev: DP vdev handle + * @buf: buffer containing specific stats structure + * + * Returns: void + */ +static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev, + void *buf) +{ + struct cdp_tx_ingress_stats *host_stats = NULL; + + if (!buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid host stats buf"); + return; + } + host_stats = (struct cdp_tx_ingress_stats *)buf; + + DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, + host_stats->mcast_en.mcast_pkt.num, + host_stats->mcast_en.mcast_pkt.bytes); + DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, + host_stats->mcast_en.dropped_map_error); + DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac, + host_stats->mcast_en.dropped_self_mac); + DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail, + host_stats->mcast_en.dropped_send_fail); + DP_STATS_INC(vdev, tx_i.mcast_en.ucast, + host_stats->mcast_en.ucast); + DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, + host_stats->mcast_en.fail_seg_alloc); + DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, + host_stats->mcast_en.clone_fail); +} + +/* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP + * @soc: DP soc handle + * @vdev_id: id of DP vdev handle + * @buf: buffer containing specific stats structure + * @stats_id: stats type + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc, + uint8_t vdev_id, + void *buf, + uint16_t stats_id) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid vdev handle"); + return QDF_STATUS_E_FAILURE; + } + + switch (stats_id) { + case DP_VDEV_STATS_PKT_CNT_ONLY: + break; + case DP_VDEV_STATS_TX_ME: + dp_txrx_update_vdev_me_stats(vdev, buf); + break; + default: + qdf_info("Invalid stats_id %d", stats_id); + break; + } + + return QDF_STATUS_SUCCESS; +} + +/* dp_txrx_get_peer_stats - will return cdp_peer_stats + * @soc: soc handle + * @vdev_id: id of vdev handle + * @peer_mac: mac of DP_PEER handle + * @peer_stats: buffer to copy to + * return : status success/failure + */ +static QDF_STATUS +dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, struct cdp_peer_stats *peer_stats) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + status = QDF_STATUS_E_FAILURE; + } else + qdf_mem_copy(peer_stats, &peer->stats, + sizeof(struct cdp_peer_stats)); + + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats + * @param soc - soc handle + * @param vdev_id - vdev_id of vdev object + * @param peer_mac - mac address of the peer + * @param type - enum of required stats + * @param buf - buffer to hold the value + * return : status success/failure + */ +static QDF_STATUS +dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, enum cdp_peer_stats_type type, + cdp_peer_stats_param_t *buf) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid Peer for Mac "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer_mac)); + ret = QDF_STATUS_E_FAILURE; + } else if (type < cdp_peer_stats_max) { + switch (type) { + case cdp_peer_tx_ucast: + buf->tx_ucast = peer->stats.tx.ucast; + break; + case cdp_peer_tx_mcast: + buf->tx_mcast = peer->stats.tx.mcast; + break; + case cdp_peer_tx_rate: + buf->tx_rate = peer->stats.tx.tx_rate; + break; + case cdp_peer_tx_last_tx_rate: + buf->last_tx_rate = peer->stats.tx.last_tx_rate; + break; + case cdp_peer_tx_inactive_time: + buf->tx_inactive_time = peer->stats.tx.inactive_time; + break; + case cdp_peer_tx_ratecode: + buf->tx_ratecode = peer->stats.tx.tx_ratecode; + break; + case cdp_peer_tx_flags: + buf->tx_flags = peer->stats.tx.tx_flags; + break; + case cdp_peer_tx_power: + buf->tx_power = peer->stats.tx.tx_power; + break; + case cdp_peer_rx_rate: + buf->rx_rate = peer->stats.rx.rx_rate; + break; + case cdp_peer_rx_last_rx_rate: + buf->last_rx_rate = peer->stats.rx.last_rx_rate; + break; + case cdp_peer_rx_ratecode: + buf->rx_ratecode = peer->stats.rx.rx_ratecode; + break; + case cdp_peer_rx_ucast: + buf->rx_ucast = peer->stats.rx.unicast; + break; + case cdp_peer_rx_flags: + buf->rx_flags = peer->stats.rx.rx_flags; + break; + case cdp_peer_rx_avg_rssi: + buf->rx_avg_rssi = peer->stats.rx.avg_rssi; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid value"); + ret = QDF_STATUS_E_FAILURE; + break; + } + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid value"); + ret = QDF_STATUS_E_FAILURE; + } + + if (peer) + dp_peer_unref_delete(peer); + + return ret; +} + +/* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer + * @soc: soc handle + * @vdev_id: id of vdev handle + * @peer_mac: mac of DP_PEER handle + * + * return : QDF_STATUS + */ +static QDF_STATUS +dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + qdf_mem_zero(&peer->stats, sizeof(peer->stats)); + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats + * @vdev_handle: DP_VDEV handle + * @buf: buffer for vdev stats + * + * return : int + */ +static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id, + void *buf, bool is_aggregate) +{ + struct cdp_vdev_stats *vdev_stats; + struct dp_pdev *pdev; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev) + return 1; + + pdev = vdev->pdev; + if (!pdev) + return 1; + + vdev_stats = (struct cdp_vdev_stats *)buf; + + if (is_aggregate) { + qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex); + dp_aggregate_vdev_stats(vdev, buf); + qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex); + } else { + qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats)); + } + + return 0; +} + +/* + * dp_get_total_per(): get total per + * @soc: DP soc handle + * @pdev_id: id of DP_PDEV handle + * + * Return: % error rate using retries per packet and success packets + */ +static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return 0; + + dp_aggregate_pdev_stats(pdev); + if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0) + return 0; + return ((pdev->stats.tx.retries * 100) / + ((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries))); +} + +/* + * dp_txrx_stats_publish(): publish pdev stats into a buffer + * @soc: DP soc handle + * @pdev_id: id of DP_PDEV handle + * @buf: to hold pdev_stats + * + * Return: int + */ +static int +dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id, + struct cdp_stats_extd *buf) +{ + struct cdp_txrx_stats_req req = {0,}; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return TXRX_STATS_LEVEL_OFF; + + dp_aggregate_pdev_stats(pdev); + req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX; + req.cookie_val = 1; + dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, + req.param1, req.param2, req.param3, 0, + req.cookie_val, 0); + + msleep(DP_MAX_SLEEP_TIME); + + req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX; + req.cookie_val = 1; + dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, + req.param1, req.param2, req.param3, 0, + req.cookie_val, 0); + + msleep(DP_MAX_SLEEP_TIME); + qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats)); + + return TXRX_STATS_LEVEL; +} + +/** + * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev + * @soc: soc handle + * @pdev_id: id of DP_PDEV handle + * @map_id: ID of map that needs to be updated + * @tos: index value in map + * @tid: tid value passed by the user + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle, + uint8_t pdev_id, + uint8_t map_id, + uint8_t tos, uint8_t tid) +{ + uint8_t dscp; + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; + pdev->dscp_tid_map[map_id][dscp] = tid; + + if (map_id < soc->num_hw_dscp_tid_map) + hal_tx_update_dscp_tid(soc->hal_soc, tid, + map_id, dscp); + else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_fw_stats_process(): Process TxRX FW stats request + * @vdev_handle: DP VDEV handle + * @req: stats request + * + * return: int + */ +static int dp_fw_stats_process(struct dp_vdev *vdev, + struct cdp_txrx_stats_req *req) +{ + struct dp_pdev *pdev = NULL; + uint32_t stats = req->stats; + uint8_t mac_id = req->mac_id; + + if (!vdev) { + DP_TRACE(NONE, "VDEV not found"); + return 1; + } + pdev = vdev->pdev; + + /* + * For HTT_DBG_EXT_STATS_RESET command, FW need to config + * from param0 to param3 according to below rule: + * + * PARAM: + * - config_param0 : start_offset (stats type) + * - config_param1 : stats bmask from start offset + * - config_param2 : stats bmask from start offset + 32 + * - config_param3 : stats bmask from start offset + 64 + */ + if (req->stats == CDP_TXRX_STATS_0) { + req->param0 = HTT_DBG_EXT_STATS_PDEV_TX; + req->param1 = 0xFFFFFFFF; + req->param2 = 0xFFFFFFFF; + req->param3 = 0xFFFFFFFF; + } else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) { + req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id); + } + + return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0, + req->param1, req->param2, req->param3, + 0, 0, mac_id); +} + +/** + * dp_txrx_stats_request - function to map to firmware and host stats + * @soc: soc handle + * @vdev_id: virtual device ID + * @req: stats request + * + * Return: QDF_STATUS + */ +static +QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle, + uint8_t vdev_id, + struct cdp_txrx_stats_req *req) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle); + int host_stats; + int fw_stats; + enum cdp_stats stats; + int num_stats; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, + vdev_id); + + if (!vdev || !req) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid vdev/req instance"); + return QDF_STATUS_E_INVAL; + } + + if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) { + dp_err("Invalid mac id request"); + return QDF_STATUS_E_INVAL; + } + + stats = req->stats; + if (stats >= CDP_TXRX_MAX_STATS) + return QDF_STATUS_E_INVAL; + + /* + * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available + * has to be updated if new FW HTT stats added + */ + if (stats > CDP_TXRX_STATS_HTT_MAX) + stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; + + num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table); + + if (stats >= num_stats) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid stats option: %d", __func__, stats); + return QDF_STATUS_E_INVAL; + } + + req->stats = stats; + fw_stats = dp_stats_mapping_table[stats][STATS_FW]; + host_stats = dp_stats_mapping_table[stats][STATS_HOST]; + + dp_info("stats: %u fw_stats_type: %d host_stats: %d", + stats, fw_stats, host_stats); + + if (fw_stats != TXRX_FW_STATS_INVALID) { + /* update request with FW stats type */ + req->stats = fw_stats; + return dp_fw_stats_process(vdev, req); + } + + if ((host_stats != TXRX_HOST_STATS_INVALID) && + (host_stats <= TXRX_HOST_STATS_MAX)) + return dp_print_host_stats(vdev, req); + else + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "Wrong Input for TxRx Stats"); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_txrx_dump_stats() - Dump statistics + * @value - Statistics option + */ +static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value, + enum qdf_stats_verbosity_level level) +{ + struct dp_soc *soc = + (struct dp_soc *)psoc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: soc is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + switch (value) { + case CDP_TXRX_PATH_STATS: + dp_txrx_path_stats(soc); + dp_print_soc_interrupt_stats(soc); + hal_dump_reg_write_stats(soc->hal_soc); + break; + + case CDP_RX_RING_STATS: + dp_print_per_ring_stats(soc); + break; + + case CDP_TXRX_TSO_STATS: + dp_print_tso_stats(soc, level); + break; + + case CDP_DUMP_TX_FLOW_POOL_INFO: + if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH) + cdp_dump_flow_pool_info((struct cdp_soc_t *)soc); + break; + + case CDP_DP_NAPI_STATS: + dp_print_napi_stats(soc); + break; + + case CDP_TXRX_DESC_STATS: + /* TODO: NOT IMPLEMENTED */ + break; + + case CDP_DP_RX_FISA_STATS: + dp_rx_dump_fisa_stats(soc); + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; + +} + +/** + * dp_txrx_clear_dump_stats() - clear dumpStats + * @soc- soc handle + * @value - stats option + * + * Return: 0 - Success, non-zero - failure + */ +static +QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint8_t value) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!soc) { + dp_err("%s: soc is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + switch (value) { + case CDP_TXRX_TSO_STATS: + dp_txrx_clear_tso_stats(soc); + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * dp_update_flow_control_parameters() - API to store datapath + * config parameters + * @soc: soc handle + * @cfg: ini parameter handle + * + * Return: void + */ +static inline +void dp_update_flow_control_parameters(struct dp_soc *soc, + struct cdp_config_params *params) +{ + soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold = + params->tx_flow_stop_queue_threshold; + soc->wlan_cfg_ctx->tx_flow_start_queue_offset = + params->tx_flow_start_queue_offset; +} +#else +static inline +void dp_update_flow_control_parameters(struct dp_soc *soc, + struct cdp_config_params *params) +{ +} +#endif + +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT +/* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */ +#define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024 + +/* Max packet limit for RX REAP Loop (dp_rx_process) */ +#define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024 + +static +void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, + struct cdp_config_params *params) +{ + soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = + params->tx_comp_loop_pkt_limit; + + if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX) + soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true; + else + soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false; + + soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = + params->rx_reap_loop_pkt_limit; + + if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX) + soc->wlan_cfg_ctx->rx_enable_eol_data_check = true; + else + soc->wlan_cfg_ctx->rx_enable_eol_data_check = false; + + soc->wlan_cfg_ctx->rx_hp_oos_update_limit = + params->rx_hp_oos_update_limit; + + dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u", + soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit, + soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check, + soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit, + soc->wlan_cfg_ctx->rx_enable_eol_data_check, + soc->wlan_cfg_ctx->rx_hp_oos_update_limit); +} +#else +static inline +void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc, + struct cdp_config_params *params) +{ } +#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ + +/** + * dp_update_config_parameters() - API to store datapath + * config parameters + * @soc: soc handle + * @cfg: ini parameter handle + * + * Return: status + */ +static +QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc, + struct cdp_config_params *params) +{ + struct dp_soc *soc = (struct dp_soc *)psoc; + + if (!(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid handle", __func__); + return QDF_STATUS_E_INVAL; + } + + soc->wlan_cfg_ctx->tso_enabled = params->tso_enable; + soc->wlan_cfg_ctx->lro_enabled = params->lro_enable; + soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable; + soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload = + params->p2p_tcp_udp_checksumoffload; + soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload = + params->nan_tcp_udp_checksumoffload; + soc->wlan_cfg_ctx->tcp_udp_checksumoffload = + params->tcp_udp_checksumoffload; + soc->wlan_cfg_ctx->napi_enabled = params->napi_enable; + soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable; + soc->wlan_cfg_ctx->gro_enabled = params->gro_enable; + + dp_update_rx_soft_irq_limit_params(soc, params); + dp_update_flow_control_parameters(soc, params); + + return QDF_STATUS_SUCCESS; +} + +static struct cdp_wds_ops dp_ops_wds = { + .vdev_set_wds = dp_vdev_set_wds, +#ifdef WDS_VENDOR_EXTENSION + .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy, + .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update, +#endif +}; + +/* + * dp_txrx_data_tx_cb_set(): set the callback for non standard tx + * @soc_hdl - datapath soc handle + * @vdev_id - virtual interface id + * @callback - callback function + * @ctxt: callback context + * + */ +static void +dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + ol_txrx_data_tx_cb callback, void *ctxt) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return; + + vdev->tx_non_std_data_callback.func = callback; + vdev->tx_non_std_data_callback.ctxt = ctxt; +} + +/** + * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev + * @soc: datapath soc handle + * @pdev_id: id of datapath pdev handle + * + * Return: opaque pointer to dp txrx handle + */ +static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + if (qdf_unlikely(!pdev)) + return NULL; + + return pdev->dp_txrx_handle; +} + +/** + * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev + * @soc: datapath soc handle + * @pdev_id: id of datapath pdev handle + * @dp_txrx_hdl: opaque pointer for dp_txrx_handle + * + * Return: void + */ +static void +dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id, + void *dp_txrx_hdl) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return; + + pdev->dp_txrx_handle = dp_txrx_hdl; +} + +/** + * dp_vdev_get_dp_ext_handle() - get dp handle from vdev + * @soc: datapath soc handle + * @vdev_id: vdev id + * + * Return: opaque pointer to dp txrx handle + */ +static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev) + return NULL; + + return vdev->vdev_dp_ext_handle; +} + +/** + * dp_vdev_set_dp_ext_handle() - set dp handle in vdev + * @soc: datapath soc handle + * @vdev_id: vdev id + * @size: size of advance dp handle + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id, + uint16_t size) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + void *dp_ext_handle; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + dp_ext_handle = qdf_mem_malloc(size); + + if (!dp_ext_handle) + return QDF_STATUS_E_FAILURE; + + vdev->vdev_dp_ext_handle = dp_ext_handle; + return QDF_STATUS_SUCCESS; +} + +/** + * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc + * @soc_handle: datapath soc handle + * + * Return: opaque pointer to external dp (non-core DP) + */ +static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + return soc->external_txrx_handle; +} + +/** + * dp_soc_set_dp_txrx_handle() - set external dp handle in soc + * @soc_handle: datapath soc handle + * @txrx_handle: opaque pointer to external dp (non-core DP) + * + * Return: void + */ +static void +dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + soc->external_txrx_handle = txrx_handle; +} + +/** + * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping + * @soc_hdl: datapath soc handle + * @pdev_id: id of the datapath pdev handle + * @lmac_id: lmac id + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_soc_map_pdev_to_lmac + (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint32_t lmac_id) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + + wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx, + pdev_id, + lmac_id); + + /*Set host PDEV ID for lmac_id*/ + wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, + pdev_id, + lmac_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping + * @soc_hdl: datapath soc handle + * @pdev_id: id of the datapath pdev handle + * @lmac_id: lmac id + * + * In the event of a dynamic mode change, update the pdev to lmac mapping + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_soc_handle_pdev_mode_change + (struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint32_t lmac_id) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_vdev *vdev = NULL; + uint8_t hw_pdev_id, mac_id; + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, + pdev_id); + int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + + if (qdf_unlikely(!pdev)) + return QDF_STATUS_E_FAILURE; + + pdev->lmac_id = lmac_id; + dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id); + + /*Set host PDEV ID for lmac_id*/ + wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx, + pdev->pdev_id, + lmac_id); + + hw_pdev_id = + dp_get_target_pdev_id_for_host_pdev_id(soc, + pdev->pdev_id); + + /* + * When NSS offload is enabled, send pdev_id->lmac_id + * and pdev_id to hw_pdev_id to NSS FW + */ + if (nss_config) { + mac_id = pdev->lmac_id; + if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id) + soc->cdp_soc.ol_ops-> + pdev_update_lmac_n_target_pdev_id( + soc->ctrl_psoc, + &pdev_id, &mac_id, &hw_pdev_id); + } + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, + hw_pdev_id); + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_soc_set_pdev_status_down() - set pdev down/up status + * @soc: datapath soc handle + * @pdev_id: id of datapath pdev handle + * @is_pdev_down: pdev down/up status + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id, + bool is_pdev_down) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + if (!pdev) + return QDF_STATUS_E_FAILURE; + + pdev->is_pdev_down = is_pdev_down; + return QDF_STATUS_SUCCESS; +} + +/** + * dp_get_cfg_capabilities() - get dp capabilities + * @soc_handle: datapath soc handle + * @dp_caps: enum for dp capabilities + * + * Return: bool to determine if dp caps is enabled + */ +static bool +dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle, + enum cdp_capabilities dp_caps) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps); +} + +#ifdef FEATURE_AST +static QDF_STATUS +dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_peer *peer = + dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id); + + /* Peer can be null for monitor vap mac address */ + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid peer\n", __func__); + return QDF_STATUS_E_FAILURE; + } + /* + * For BSS peer, new peer is not created on alloc_node if the + * peer with same address already exists , instead refcnt is + * increased for existing peer. Correspondingly in delete path, + * only refcnt is decreased; and peer is only deleted , when all + * references are deleted. So delete_in_progress should not be set + * for bss_peer, unless only 3 reference remains (peer map reference, + * peer hash table reference and above local reference). + */ + if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 3)) { + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + qdf_spin_lock_bh(&soc->ast_lock); + peer->delete_in_progress = true; + dp_peer_delete_ast_entries(soc, peer); + qdf_spin_unlock_bh(&soc->ast_lock); + +fail: + if (peer) + dp_peer_unref_delete(peer); + return status; +} +#endif + +#ifdef ATH_SUPPORT_NAC_RSSI +/** + * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC + * @soc_hdl: DP soc handle + * @vdev_id: id of DP vdev handle + * @mac_addr: neighbour mac + * @rssi: rssi value + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc, + uint8_t vdev_id, + char *mac_addr, + uint8_t *rssi) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + struct dp_pdev *pdev; + struct dp_neighbour_peer *peer = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!vdev) + return status; + + pdev = vdev->pdev; + *rssi = 0; + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + mac_addr, QDF_MAC_ADDR_SIZE) == 0) { + *rssi = peer->rssi; + status = QDF_STATUS_SUCCESS; + break; + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + return status; +} + +static QDF_STATUS +dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, + enum cdp_nac_param_cmd cmd, char *bssid, + char *client_macaddr, + uint8_t chan_num) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3(soc, + vdev_id); + struct dp_pdev *pdev; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + pdev = (struct dp_pdev *)vdev->pdev; + pdev->nac_rssi_filtering = 1; + /* Store address of NAC (neighbour peer) which will be checked + * against TA of received packets. + */ + + if (cmd == CDP_NAC_PARAM_ADD) { + dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, + DP_NAC_PARAM_ADD, + (uint8_t *)client_macaddr); + } else if (cmd == CDP_NAC_PARAM_DEL) { + dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, + DP_NAC_PARAM_DEL, + (uint8_t *)client_macaddr); + } + + if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) + soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi + (soc->ctrl_psoc, pdev->pdev_id, + vdev->vdev_id, cmd, bssid, client_macaddr); + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering + * for pktlog + * @soc: cdp_soc handle + * @pdev_id: id of dp pdev handle + * @mac_addr: Peer mac address + * @enb_dsb: Enable or disable peer based filtering + * + * Return: QDF_STATUS + */ +static int +dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t *mac_addr, uint8_t enb_dsb) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) { + dp_err("Invalid Pdev for pdev_id %d", pdev_id); + return QDF_STATUS_E_FAILURE; + } + + peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev, + mac_addr); + + if (!peer) { + dp_err("Invalid Peer"); + return QDF_STATUS_E_FAILURE; + } + + peer->peer_based_pktlog_filter = enb_dsb; + pdev->dp_peer_based_pktlog = enb_dsb; + + return QDF_STATUS_SUCCESS; +} + +#ifndef WLAN_SUPPORT_RX_TAG_STATISTICS +/** + * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for + * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol) + * @soc: cdp_soc handle + * @pdev_id: id of cdp_pdev handle + * @protocol_type: protocol type for which stats should be displayed + * + * Return: none + */ +static inline void +dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id, + uint16_t protocol_type) +{ +} +#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ + +#ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +/** + * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be + * applied to the desired protocol type packets + * @soc: soc handle + * @pdev_id: id of cdp_pdev handle + * @enable_rx_protocol_tag - bitmask that indicates what protocol types + * are enabled for tagging. zero indicates disable feature, non-zero indicates + * enable feature + * @protocol_type: new protocol type for which the tag is being added + * @tag: user configured tag for the new protocol + * + * Return: Success + */ +static inline QDF_STATUS +dp_update_pdev_rx_protocol_tag(struct cdp_soc_t *soc, uint8_t pdev_id, + uint32_t enable_rx_protocol_tag, + uint16_t protocol_type, + uint16_t tag) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + +#ifndef WLAN_SUPPORT_RX_FLOW_TAG +/** + * dp_set_rx_flow_tag - add/delete a flow + * @soc: soc handle + * @pdev_id: id of cdp_pdev handle + * @flow_info: flow tuple that is to be added to/deleted from flow search table + * + * Return: Success + */ +static inline QDF_STATUS +dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, + struct cdp_rx_flow_info *flow_info) +{ + return QDF_STATUS_SUCCESS; +} +/** + * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for + * given flow 5-tuple + * @cdp_soc: soc handle + * @pdev_id: id of cdp_pdev handle + * @flow_info: flow 5-tuple for which stats should be displayed + * + * Return: Success + */ +static inline QDF_STATUS +dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, + struct cdp_rx_flow_info *flow_info) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ + +static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl, + uint32_t max_peers, + uint32_t max_ast_index, + bool peer_map_unmap_v2) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + + soc->max_peers = max_peers; + + qdf_print ("%s max_peers %u, max_ast_index: %u\n", + __func__, max_peers, max_ast_index); + wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index); + + if (dp_peer_find_attach(soc)) + return QDF_STATUS_E_FAILURE; + + soc->is_peer_map_unmap_v2 = peer_map_unmap_v2; + + return QDF_STATUS_SUCCESS; +} + +static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle, + void *stats_ctx) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx; +} + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE +static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_vdev *vdev = NULL; + struct dp_peer *peer = NULL; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (peer && !peer->bss_peer) + dp_wdi_event_handler( + WDI_EVENT_FLUSH_RATE_STATS_REQ, + soc, peer->wlanstats_ctx, + peer->peer_ids[0], + WDI_NO_VAL, pdev_id); + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS +dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE +static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc, + uint8_t pdev_id, + void *buf) +{ + dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS, + (struct dp_soc *)soc, buf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev_id); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS +dp_peer_flush_rate_stats(struct cdp_soc_t *soc, + uint8_t pdev_id, + void *buf) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + return soc->rate_stats_ctx; +} + +/* + * dp_get_cfg() - get dp cfg + * @soc: cdp soc handle + * @cfg: cfg enum + * + * Return: cfg value + */ +static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg) +{ + struct dp_soc *dpsoc = (struct dp_soc *)soc; + uint32_t value = 0; + + switch (cfg) { + case cfg_dp_enable_data_stall: + value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection; + break; + case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload: + value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload; + break; + case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload: + value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload; + break; + case cfg_dp_enable_ip_tcp_udp_checksum_offload: + value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload; + break; + case cfg_dp_disable_legacy_mode_csum_offload: + value = dpsoc->wlan_cfg_ctx-> + legacy_mode_checksumoffload_disable; + break; + case cfg_dp_tso_enable: + value = dpsoc->wlan_cfg_ctx->tso_enabled; + break; + case cfg_dp_lro_enable: + value = dpsoc->wlan_cfg_ctx->lro_enabled; + break; + case cfg_dp_gro_enable: + value = dpsoc->wlan_cfg_ctx->gro_enabled; + break; + case cfg_dp_sg_enable: + value = dpsoc->wlan_cfg_ctx->sg_enabled; + break; + case cfg_dp_tc_based_dyn_gro_enable: + value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro; + break; + case cfg_dp_tc_ingress_prio: + value = dpsoc->wlan_cfg_ctx->tc_ingress_prio; + break; + case cfg_dp_tx_flow_start_queue_offset: + value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset; + break; + case cfg_dp_tx_flow_stop_queue_threshold: + value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold; + break; + case cfg_dp_disable_intra_bss_fwd: + value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd; + break; + case cfg_dp_pktlog_buffer_size: + value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size; + break; + case cfg_dp_wow_check_rx_pending: + value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable; + break; + default: + value = 0; + } + + return value; +} + +#ifdef PEER_FLOW_CONTROL +/** + * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params + * @soc_handle: datapath soc handle + * @pdev_id: id of datapath pdev handle + * @param: ol ath params + * @value: value of the flag + * @buff: Buffer to be passed + * + * Implemented this function same as legacy function. In legacy code, single + * function is used to display stats and update pdev params. + * + * Return: 0 for success. nonzero for failure. + */ +static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle, + uint8_t pdev_id, + enum _ol_ath_param_t param, + uint32_t value, void *buff) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (qdf_unlikely(!pdev)) + return 1; + + soc = pdev->soc; + if (!soc) + return 1; + + switch (param) { +#ifdef QCA_ENH_V3_STATS_SUPPORT + case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC: + if (value) + pdev->delay_stats_flag = true; + else + pdev->delay_stats_flag = false; + break; + case OL_ATH_PARAM_VIDEO_STATS_FC: + qdf_print("------- TID Stats ------\n"); + dp_pdev_print_tid_stats(pdev); + qdf_print("------ Delay Stats ------\n"); + dp_pdev_print_delay_stats(pdev); + break; +#endif + case OL_ATH_PARAM_TOTAL_Q_SIZE: + { + uint32_t tx_min, tx_max; + + tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx); + tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + + if (!buff) { + if ((value >= tx_min) && (value <= tx_max)) { + pdev->num_tx_allowed = value; + } else { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "Failed to update num_tx_allowed, Q_min = %d Q_max = %d", + tx_min, tx_max); + break; + } + } else { + *(int *)buff = pdev->num_tx_allowed; + } + } + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: not handled param %d ", __func__, param); + break; + } + + return 0; +} +#endif + +/** + * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev + * @psoc: dp soc handle + * @pdev_id: id of DP_PDEV handle + * @pcp: pcp value + * @tid: tid value passed by the user + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc, + uint8_t pdev_id, + uint8_t pcp, uint8_t tid) +{ + struct dp_soc *soc = (struct dp_soc *)psoc; + + soc->pcp_tid_map[pcp] = tid; + + hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev + * @soc: DP soc handle + * @vdev_id: id of DP_VDEV handle + * @pcp: pcp value + * @tid: tid value passed by the user + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t pcp, uint8_t tid) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + vdev->pcp_tid_map[pcp] = tid; + + return QDF_STATUS_SUCCESS; +} + +static struct cdp_cmn_ops dp_ops_cmn = { + .txrx_soc_attach_target = dp_soc_attach_target_wifi3, + .txrx_vdev_attach = dp_vdev_attach_wifi3, + .txrx_vdev_detach = dp_vdev_detach_wifi3, + .txrx_pdev_attach = dp_pdev_attach_wifi3, + .txrx_pdev_detach = dp_pdev_detach_wifi3, + .txrx_pdev_deinit = dp_pdev_deinit_wifi3, + .txrx_peer_create = dp_peer_create_wifi3, + .txrx_peer_setup = dp_peer_setup_wifi3, +#ifdef FEATURE_AST + .txrx_peer_teardown = dp_peer_teardown_wifi3, +#else + .txrx_peer_teardown = NULL, +#endif + .txrx_peer_add_ast = dp_peer_add_ast_wifi3, + .txrx_peer_update_ast = dp_peer_update_ast_wifi3, + .txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3, + .txrx_peer_get_ast_info_by_pdev = + dp_peer_get_ast_info_by_pdevid_wifi3, + .txrx_peer_ast_delete_by_soc = + dp_peer_ast_entry_del_by_soc, + .txrx_peer_ast_delete_by_pdev = + dp_peer_ast_entry_del_by_pdev, + .txrx_peer_delete = dp_peer_delete_wifi3, + .txrx_vdev_register = dp_vdev_register_wifi3, + .txrx_soc_detach = dp_soc_detach_wifi3, + .txrx_soc_deinit = dp_soc_deinit_wifi3, + .txrx_soc_init = dp_soc_init_wifi3, + .txrx_tso_soc_attach = dp_tso_soc_attach, + .txrx_tso_soc_detach = dp_tso_soc_detach, + .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, + .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3, + .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, + .txrx_ath_getstats = dp_get_device_stats, + .addba_requestprocess = dp_addba_requestprocess_wifi3, + .addba_responsesetup = dp_addba_responsesetup_wifi3, + .addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3, + .delba_process = dp_delba_process_wifi3, + .set_addba_response = dp_set_addba_response, + .flush_cache_rx_queue = NULL, + /* TODO: get API's for dscp-tid need to be added*/ + .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3, + .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3, + .txrx_get_total_per = dp_get_total_per, + .txrx_stats_request = dp_txrx_stats_request, + .txrx_set_monitor_mode = dp_vdev_set_monitor_mode, + .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id, + .display_stats = dp_txrx_dump_stats, + .txrx_intr_attach = dp_soc_interrupt_attach_wrapper, + .txrx_intr_detach = dp_soc_interrupt_detach, + .set_pn_check = dp_set_pn_check_wifi3, + .set_key_sec_type = dp_set_key_sec_type_wifi3, + .update_config_parameters = dp_update_config_parameters, + /* TODO: Add other functions */ + .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set, + .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle, + .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle, + .get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle, + .set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle, + .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle, + .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle, + .map_pdev_to_lmac = dp_soc_map_pdev_to_lmac, + .handle_mode_change = dp_soc_handle_pdev_mode_change, + .set_pdev_status_down = dp_soc_set_pdev_status_down, + .txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout, + .txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout, + .tx_send = dp_tx_send, + .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3, + .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3, + .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3, + .txrx_peer_map_attach = dp_peer_map_attach_wifi3, + .txrx_get_os_rx_handles_from_vdev = + dp_get_os_rx_handles_from_vdev_wifi3, + .delba_tx_completion = dp_delba_tx_completion_wifi3, + .get_dp_capabilities = dp_get_cfg_capabilities, + .txrx_get_cfg = dp_get_cfg, + .set_rate_stats_ctx = dp_soc_set_rate_stats_ctx, + .get_rate_stats_ctx = dp_soc_get_rate_stats_ctx, + .txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats, + .txrx_flush_rate_stats_request = dp_flush_rate_stats_req, + + .set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3, + .set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3, + + .txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler, +#ifdef QCA_MULTIPASS_SUPPORT + .set_vlan_groupkey = dp_set_vlan_groupkey, +#endif + .get_peer_mac_list = dp_get_peer_mac_list, + .tx_send_exc = dp_tx_send_exception, +}; + +static struct cdp_ctrl_ops dp_ops_ctrl = { + .txrx_peer_authorize = dp_peer_authorize, +#ifdef VDEV_PEER_PROTOCOL_COUNT + .txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count, + .txrx_set_peer_protocol_drop_mask = + dp_enable_vdev_peer_protocol_drop_mask, + .txrx_is_peer_protocol_count_enabled = + dp_is_vdev_peer_protocol_count_enabled, + .txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask, +#endif + .txrx_set_vdev_param = dp_set_vdev_param, + .txrx_set_psoc_param = dp_set_psoc_param, + .txrx_get_psoc_param = dp_get_psoc_param, + .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest, + .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest, +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) + .txrx_update_filter_neighbour_peers = + dp_update_filter_neighbour_peers, +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + .txrx_get_sec_type = dp_get_sec_type, + .txrx_wdi_event_sub = dp_wdi_event_sub, + .txrx_wdi_event_unsub = dp_wdi_event_unsub, +#ifdef WDI_EVENT_ENABLE + .txrx_get_pldev = dp_get_pldev, +#endif + .txrx_set_pdev_param = dp_set_pdev_param, + .txrx_get_pdev_param = dp_get_pdev_param, + .txrx_set_peer_param = dp_set_peer_param, + .txrx_get_peer_param = dp_get_peer_param, +#ifdef VDEV_PEER_PROTOCOL_COUNT + .txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt, +#endif +#ifdef ATH_SUPPORT_NAC_RSSI + .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi, + .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi, +#endif + .set_key = dp_set_michael_key, + .txrx_get_vdev_param = dp_get_vdev_param, + .enable_peer_based_pktlog = dp_enable_peer_based_pktlog, + .calculate_delay_stats = dp_calculate_delay_stats, +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG + .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag, +#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS + .txrx_dump_pdev_rx_protocol_tag_stats = + dp_dump_pdev_rx_protocol_tag_stats, +#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ +#ifdef WLAN_SUPPORT_RX_FLOW_TAG + .txrx_set_rx_flow_tag = dp_set_rx_flow_tag, + .txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats, +#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ +#ifdef QCA_MULTIPASS_SUPPORT + .txrx_peer_set_vlan_id = dp_peer_set_vlan_id, +#endif /*QCA_MULTIPASS_SUPPORT*/ +#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) + .txrx_update_peer_pkt_capture_params = + dp_peer_update_pkt_capture_params, +#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ +}; + +static struct cdp_me_ops dp_ops_me = { +#ifdef ATH_SUPPORT_IQUE + .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor, + .tx_me_free_descriptor = dp_tx_me_free_descriptor, + .tx_me_convert_ucast = dp_tx_me_send_convert_ucast, +#endif +}; + +static struct cdp_mon_ops dp_ops_mon = { + .txrx_reset_monitor_mode = dp_reset_monitor_mode, + /* Added support for HK advance filter */ + .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter, + .txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt, +}; + +static struct cdp_host_stats_ops dp_ops_host_stats = { + .txrx_per_peer_stats = dp_get_host_peer_stats, + .get_fw_peer_stats = dp_get_fw_peer_stats, + .get_htt_stats = dp_get_htt_stats, +#ifdef FEATURE_PERPKT_INFO + .txrx_enable_enhanced_stats = dp_enable_enhanced_stats, + .txrx_disable_enhanced_stats = dp_disable_enhanced_stats, +#endif /* FEATURE_PERPKT_INFO */ + .txrx_stats_publish = dp_txrx_stats_publish, + .txrx_get_vdev_stats = dp_txrx_get_vdev_stats, + .txrx_get_peer_stats = dp_txrx_get_peer_stats, + .txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param, + .txrx_reset_peer_stats = dp_txrx_reset_peer_stats, + .txrx_get_pdev_stats = dp_txrx_get_pdev_stats, + .txrx_get_ratekbps = dp_txrx_get_ratekbps, + .txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats, + /* TODO */ +}; + +static struct cdp_raw_ops dp_ops_raw = { + /* TODO */ +}; + +#ifdef PEER_FLOW_CONTROL +static struct cdp_pflow_ops dp_ops_pflow = { + dp_tx_flow_ctrl_configure_pdev, +}; +#endif /* CONFIG_WIN */ + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +static struct cdp_cfr_ops dp_ops_cfr = { + .txrx_cfr_filter = dp_cfr_filter, + .txrx_get_cfr_rcc = dp_get_cfr_rcc, + .txrx_set_cfr_rcc = dp_set_cfr_rcc, + .txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats, + .txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats, + .txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer, +}; +#endif + +#ifdef FEATURE_RUNTIME_PM +/** + * dp_flush_ring_hptp() - Update ring shadow + * register HP/TP address when runtime + * resume + * @opaque_soc: DP soc context + * + * Return: None + */ +static +void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng) +{ + if (hal_srng && hal_srng_get_clear_event(hal_srng, + HAL_SRNG_FLUSH_EVENT)) { + /* Acquire the lock */ + hal_srng_access_start(soc->hal_soc, hal_srng); + + hal_srng_access_end(soc->hal_soc, hal_srng); + + hal_srng_set_flush_last_ts(hal_srng); + dp_debug("flushed"); + } +} + +/** + * dp_runtime_suspend() - ensure DP is ready to runtime suspend + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * + * DP is ready to runtime suspend if there are no pending TX packets. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev; + uint8_t i; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Abort if there are any pending TX packets */ + if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("Abort suspend due to pending TX packets")); + + /* perform a force flush if tx is pending */ + for (i = 0; i < soc->num_tcl_data_rings; i++) { +#ifdef IPA_OFFLOAD + if (i == IPA_TCL_DATA_RING_IDX) + continue; +#endif + hal_srng_set_event(soc->tcl_data_ring[i].hal_srng, + HAL_SRNG_FLUSH_EVENT); + dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng); + } + + return QDF_STATUS_E_AGAIN; + } + + if (dp_runtime_get_refcount(soc)) { + dp_info("refcount: %d", dp_runtime_get_refcount(soc)); + + return QDF_STATUS_E_AGAIN; + } + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_stop(&soc->int_timer); + + return QDF_STATUS_SUCCESS; +} + +#define DP_FLUSH_WAIT_CNT 10 +#define DP_RUNTIME_SUSPEND_WAIT_MS 10 +/** + * dp_runtime_resume() - ensure DP is ready to runtime resume + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * + * Resume DP for runtime PM. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + int i, suspend_wait = 0; + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + + /* + * Wait until dp runtime refcount becomes zero or time out, then flush + * pending tx for runtime suspend. + */ + while (dp_runtime_get_refcount(soc) && + suspend_wait < DP_FLUSH_WAIT_CNT) { + qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS); + suspend_wait++; + } + + for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { + dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng); + } + + dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng); + + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_RUNTIME_PM */ + +/** + * dp_tx_get_success_ack_stats() - get tx success completion count + * @soc_hdl: Datapath soc handle + * @vdevid: vdev identifier + * + * Return: tx success ack count + */ +static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct cdp_vdev_stats *vdev_stats = NULL; + uint32_t tx_success; + struct dp_vdev *vdev = + (struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc, + vdev_id); + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid vdev id %d"), vdev_id); + return 0; + } + + vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats)); + if (!vdev_stats) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "DP alloc failure - unable to get alloc vdev stats"); + return 0; + } + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + dp_aggregate_vdev_stats(vdev, vdev_stats); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + tx_success = vdev_stats->tx.tx_success.num; + qdf_mem_free(vdev_stats); + + return tx_success; +} + +#ifdef WLAN_SUPPORT_DATA_STALL +/** + * dp_register_data_stall_detect_cb() - register data stall callback + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @data_stall_detect_callback: data stall callback function + * + * Return: QDF_STATUS Enumeration + */ +static +QDF_STATUS dp_register_data_stall_detect_cb( + struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + data_stall_detect_cb data_stall_detect_callback) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev NULL!"); + return QDF_STATUS_E_INVAL; + } + + pdev->data_stall_detect_callback = data_stall_detect_callback; + return QDF_STATUS_SUCCESS; +} + +/** + * dp_deregister_data_stall_detect_cb() - de-register data stall callback + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @data_stall_detect_callback: data stall callback function + * + * Return: QDF_STATUS Enumeration + */ +static +QDF_STATUS dp_deregister_data_stall_detect_cb( + struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + data_stall_detect_cb data_stall_detect_callback) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev NULL!"); + return QDF_STATUS_E_INVAL; + } + + pdev->data_stall_detect_callback = NULL; + return QDF_STATUS_SUCCESS; +} + +/** + * dp_txrx_post_data_stall_event() - post data stall event + * @soc_hdl: Datapath soc handle + * @indicator: Module triggering data stall + * @data_stall_type: data stall event type + * @pdev_id: pdev id + * @vdev_id_bitmap: vdev id bitmap + * @recovery_type: data stall recovery type + * + * Return: None + */ +static void +dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl, + enum data_stall_log_event_indicator indicator, + enum data_stall_log_event_type data_stall_type, + uint32_t pdev_id, uint32_t vdev_id_bitmap, + enum data_stall_log_recovery_type recovery_type) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct data_stall_event_info data_stall_info; + struct dp_pdev *pdev; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev NULL!"); + return; + } + + if (!pdev->data_stall_detect_callback) { + dp_err("data stall cb not registered!"); + return; + } + + dp_info("data_stall_type: %x pdev_id: %d", + data_stall_type, pdev_id); + + data_stall_info.indicator = indicator; + data_stall_info.data_stall_type = data_stall_type; + data_stall_info.vdev_id_bitmap = vdev_id_bitmap; + data_stall_info.pdev_id = pdev_id; + data_stall_info.recovery_type = recovery_type; + + pdev->data_stall_detect_callback(&data_stall_info); +} +#endif /* WLAN_SUPPORT_DATA_STALL */ + +#ifdef DP_PEER_EXTENDED_API +/** + * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc + * @dev: physical device instance + * @peer_mac_addr: peer mac address + * @debug_id: to track enum peer access + * + * Return: peer instance pointer + */ +static void * +dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr, + enum peer_debug_id_type debug_id) +{ + struct dp_pdev *pdev = (struct dp_pdev *)dev; + struct dp_peer *peer; + + peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); + + if (!peer) + return NULL; + + if (peer->delete_in_progress) { + dp_err("Peer deletion in progress"); + dp_peer_unref_delete(peer); + return NULL; + } + + dp_info_rl("peer %pK mac: "QDF_MAC_ADDR_FMT, peer, + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + return peer; +} +#endif /* DP_PEER_EXTENDED_API */ + +#ifdef WLAN_FEATURE_STATS_EXT +/* rx hw stats event wait timeout in ms */ +#define DP_REO_STATUS_STATS_TIMEOUT 1500 +/** + * dp_txrx_ext_stats_request - request dp txrx extended stats request + * @soc_hdl: soc handle + * @pdev_id: pdev id + * @req: stats request + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct cdp_txrx_ext_stats *req) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) { + dp_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + dp_aggregate_pdev_stats(pdev); + + req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num; + req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full; + req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received; + req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received; + req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed; + /* only count error source from RXDMA */ + req->rx_mpdu_error = pdev->stats.err.rxdma_error; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_hw_stats_cb - request rx hw stats response callback + * @soc: soc handle + * @cb_ctxt: callback context + * @reo_status: reo command response status + * + * Return: None + */ +static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt; + struct hal_reo_queue_status *queue_status = &reo_status->queue_status; + bool is_query_timeout; + + qdf_spin_lock_bh(&soc->rx_hw_stats_lock); + is_query_timeout = rx_hw_stats->is_query_timeout; + /* free the cb_ctxt if all pending tid stats query is received */ + if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) { + if (!is_query_timeout) { + qdf_event_set(&soc->rx_hw_stats_event); + soc->is_last_stats_ctx_init = false; + } + + qdf_mem_free(rx_hw_stats); + } + + if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { + dp_info("REO stats failure %d", + queue_status->header.status); + qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); + return; + } + + if (!is_query_timeout) { + soc->ext_stats.rx_mpdu_received += + queue_status->mpdu_frms_cnt; + soc->ext_stats.rx_mpdu_missed += + queue_status->hole_cnt; + } + qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); +} + +/** + * dp_request_rx_hw_stats - request rx hardware stats + * @soc_hdl: soc handle + * @vdev_id: vdev id + * + * Return: None + */ +static QDF_STATUS +dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + struct dp_peer *peer; + QDF_STATUS status; + struct dp_req_rx_hw_stats_t *rx_hw_stats; + int rx_stats_sent_cnt = 0; + uint32_t last_rx_mpdu_received; + uint32_t last_rx_mpdu_missed; + + if (!vdev) { + dp_err("vdev is null for vdev_id: %u", vdev_id); + return QDF_STATUS_E_INVAL; + } + + peer = dp_peer_get_ref_find_by_addr((struct cdp_pdev *)vdev->pdev, + vdev->vap_bss_peer_mac_addr, 0); + + if (!peer) { + dp_err("Peer is NULL"); + return QDF_STATUS_E_INVAL; + } + + rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats)); + + if (!rx_hw_stats) { + dp_err("malloc failed for hw stats structure"); + dp_peer_unref_delete(peer); + return QDF_STATUS_E_NOMEM; + } + + qdf_event_reset(&soc->rx_hw_stats_event); + qdf_spin_lock_bh(&soc->rx_hw_stats_lock); + /* save the last soc cumulative stats and reset it to 0 */ + last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received; + last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed; + soc->ext_stats.rx_mpdu_received = 0; + soc->ext_stats.rx_mpdu_missed = 0; + + rx_stats_sent_cnt = + dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats); + if (!rx_stats_sent_cnt) { + dp_err("no tid stats sent successfully"); + qdf_mem_free(rx_hw_stats); + qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); + dp_peer_unref_delete(peer); + return QDF_STATUS_E_INVAL; + } + qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt, + rx_stats_sent_cnt); + rx_hw_stats->is_query_timeout = false; + soc->is_last_stats_ctx_init = true; + qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); + + status = qdf_wait_single_event(&soc->rx_hw_stats_event, + DP_REO_STATUS_STATS_TIMEOUT); + + qdf_spin_lock_bh(&soc->rx_hw_stats_lock); + if (status != QDF_STATUS_SUCCESS) { + dp_info("rx hw stats event timeout"); + if (soc->is_last_stats_ctx_init) + rx_hw_stats->is_query_timeout = true; + /** + * If query timeout happened, use the last saved stats + * for this time query. + */ + soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received; + soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed; + } + qdf_spin_unlock_bh(&soc->rx_hw_stats_lock); + dp_peer_unref_delete(peer); + + return status; +} +#endif /* WLAN_FEATURE_STATS_EXT */ + +#ifdef DP_PEER_EXTENDED_API +static struct cdp_misc_ops dp_ops_misc = { +#ifdef FEATURE_WLAN_TDLS + .tx_non_std = dp_tx_non_std, +#endif /* FEATURE_WLAN_TDLS */ + .get_opmode = dp_get_opmode, +#ifdef FEATURE_RUNTIME_PM + .runtime_suspend = dp_runtime_suspend, + .runtime_resume = dp_runtime_resume, +#endif /* FEATURE_RUNTIME_PM */ + .pkt_log_init = dp_pkt_log_init, + .pkt_log_con_service = dp_pkt_log_con_service, + .get_num_rx_contexts = dp_get_num_rx_contexts, + .get_tx_ack_stats = dp_tx_get_success_ack_stats, +#ifdef WLAN_SUPPORT_DATA_STALL + .txrx_data_stall_cb_register = dp_register_data_stall_detect_cb, + .txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb, + .txrx_post_data_stall_event = dp_txrx_post_data_stall_event, +#endif + +#ifdef WLAN_FEATURE_STATS_EXT + .txrx_ext_stats_request = dp_txrx_ext_stats_request, + .request_rx_hw_stats = dp_request_rx_hw_stats, +#endif /* WLAN_FEATURE_STATS_EXT */ +}; +#endif + +#ifdef DP_FLOW_CTL +static struct cdp_flowctl_ops dp_ops_flowctl = { + /* WIFI 3.0 DP implement as required. */ +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + .flow_pool_map_handler = dp_tx_flow_pool_map, + .flow_pool_unmap_handler = dp_tx_flow_pool_unmap, + .register_pause_cb = dp_txrx_register_pause_cb, + .dump_flow_pool_info = dp_tx_dump_flow_pool_info, + .tx_desc_thresh_reached = dp_tx_desc_thresh_reached, +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ +}; + +static struct cdp_lflowctl_ops dp_ops_l_flowctl = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; +#endif + +#ifdef IPA_OFFLOAD +static struct cdp_ipa_ops dp_ops_ipa = { + .ipa_get_resource = dp_ipa_get_resource, + .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr, + .ipa_op_response = dp_ipa_op_response, + .ipa_register_op_cb = dp_ipa_register_op_cb, + .ipa_get_stat = dp_ipa_get_stat, + .ipa_tx_data_frame = dp_tx_send_ipa_data_frame, + .ipa_enable_autonomy = dp_ipa_enable_autonomy, + .ipa_disable_autonomy = dp_ipa_disable_autonomy, + .ipa_setup = dp_ipa_setup, + .ipa_cleanup = dp_ipa_cleanup, + .ipa_setup_iface = dp_ipa_setup_iface, + .ipa_cleanup_iface = dp_ipa_cleanup_iface, + .ipa_enable_pipes = dp_ipa_enable_pipes, + .ipa_disable_pipes = dp_ipa_disable_pipes, + .ipa_set_perf_level = dp_ipa_set_perf_level, + .ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd +}; +#endif + +#ifdef DP_POWER_SAVE +static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + int timeout = SUSPEND_DRAIN_WAIT; + int drain_wait_delay = 50; /* 50 ms */ + + if (qdf_unlikely(!pdev)) { + dp_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Abort if there are any pending TX packets */ + while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) { + qdf_sleep(drain_wait_delay); + if (timeout <= 0) { + dp_err("TX frames are pending, abort suspend"); + return QDF_STATUS_E_TIMEOUT; + } + timeout = timeout - drain_wait_delay; + } + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_stop(&soc->int_timer); + + /* Stop monitor reap timer and reap any pending frames in ring */ + if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || + dp_is_enable_reap_timer_non_pkt(pdev)) && + soc->reap_timer_init) { + qdf_timer_sync_cancel(&soc->mon_reap_timer); + dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (qdf_unlikely(!pdev)) { + dp_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + + /* Start monitor reap timer */ + if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || + dp_is_enable_reap_timer_non_pkt(pdev)) && + soc->reap_timer_init) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_process_wow_ack_rsp() - process wow ack response + * @soc_hdl: datapath soc handle + * @pdev_id: data path pdev handle id + * + * Return: none + */ +static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (qdf_unlikely(!pdev)) { + dp_err("pdev is NULL"); + return; + } + + /* + * As part of wow enable FW disables the mon status ring and in wow ack + * response from FW reap mon status ring to make sure no packets pending + * in the ring. + */ + if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || + dp_is_enable_reap_timer_non_pkt(pdev)) && + soc->reap_timer_init) { + dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); + } +} + +/** + * dp_process_target_suspend_req() - process target suspend request + * @soc_hdl: datapath soc handle + * @pdev_id: data path pdev handle id + * + * Return: none + */ +static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (qdf_unlikely(!pdev)) { + dp_err("pdev is NULL"); + return; + } + + /* Stop monitor reap timer and reap any pending frames in ring */ + if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || + dp_is_enable_reap_timer_non_pkt(pdev)) && + soc->reap_timer_init) { + qdf_timer_sync_cancel(&soc->mon_reap_timer); + dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); + } +} + +static struct cdp_bus_ops dp_ops_bus = { + .bus_suspend = dp_bus_suspend, + .bus_resume = dp_bus_resume, + .process_wow_ack_rsp = dp_process_wow_ack_rsp, + .process_target_suspend_req = dp_process_target_suspend_req +}; +#endif + +#ifdef DP_FLOW_CTL +static struct cdp_throttle_ops dp_ops_throttle = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + +static struct cdp_cfg_ops dp_ops_cfg = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; +#endif + +#ifdef DP_PEER_EXTENDED_API +static struct cdp_ocb_ops dp_ops_ocb = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + +static struct cdp_mob_stats_ops dp_ops_mob_stats = { + .clear_stats = dp_txrx_clear_dump_stats, +}; + +static struct cdp_peer_ops dp_ops_peer = { + .register_peer = dp_register_peer, + .clear_peer = dp_clear_peer, + .find_peer_exist = dp_find_peer_exist, + .find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev, + .find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev, + .peer_state_update = dp_peer_state_update, + .get_vdevid = dp_get_vdevid, + .get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr, + .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr, + .get_peer_state = dp_get_peer_state, + .peer_flush_frags = dp_peer_flush_frags, +}; +#endif + +static struct cdp_ops dp_txrx_ops = { + .cmn_drv_ops = &dp_ops_cmn, + .ctrl_ops = &dp_ops_ctrl, + .me_ops = &dp_ops_me, + .mon_ops = &dp_ops_mon, + .host_stats_ops = &dp_ops_host_stats, + .wds_ops = &dp_ops_wds, + .raw_ops = &dp_ops_raw, +#ifdef PEER_FLOW_CONTROL + .pflow_ops = &dp_ops_pflow, +#endif /* PEER_FLOW_CONTROL */ +#ifdef DP_PEER_EXTENDED_API + .misc_ops = &dp_ops_misc, + .ocb_ops = &dp_ops_ocb, + .peer_ops = &dp_ops_peer, + .mob_stats_ops = &dp_ops_mob_stats, +#endif +#ifdef DP_FLOW_CTL + .cfg_ops = &dp_ops_cfg, + .flowctl_ops = &dp_ops_flowctl, + .l_flowctl_ops = &dp_ops_l_flowctl, + .throttle_ops = &dp_ops_throttle, +#endif +#ifdef IPA_OFFLOAD + .ipa_ops = &dp_ops_ipa, +#endif +#ifdef DP_POWER_SAVE + .bus_ops = &dp_ops_bus, +#endif +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) + .cfr_ops = &dp_ops_cfr, +#endif +}; + +/* + * dp_soc_set_txrx_ring_map() + * @dp_soc: DP handler for soc + * + * Return: Void + */ +void dp_soc_set_txrx_ring_map(struct dp_soc *soc) +{ + uint32_t i; + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i]; + } +} + +#if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) + +#ifndef QCA_MEM_ATTACH_ON_WIFI3 + +/** + * dp_soc_attach_wifi3() - Attach txrx SOC + * @ctrl_psoc: Opaque SOC handle from control plane + * @htc_handle: Opaque HTC handle + * @hif_handle: Opaque HIF handle + * @qdf_osdev: QDF device + * @ol_ops: Offload Operations + * @device_id: Device ID + * + * Return: DP SOC handle on success, NULL on failure + */ +struct cdp_soc_t * +dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id) +{ + struct dp_soc *dp_soc = NULL; + + dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev, + ol_ops, device_id); + if (!dp_soc) + return NULL; + + if (!dp_soc_init(dp_soc, htc_handle, hif_handle)) + return NULL; + + return dp_soc_to_cdp_soc_t(dp_soc); +} +#else + +/** + * dp_soc_attach_wifi3() - Attach txrx SOC + * @ctrl_psoc: Opaque SOC handle from control plane + * @htc_handle: Opaque HTC handle + * @hif_handle: Opaque HIF handle + * @qdf_osdev: QDF device + * @ol_ops: Offload Operations + * @device_id: Device ID + * + * Return: DP SOC handle on success, NULL on failure + */ +struct cdp_soc_t * +dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id) +{ + struct dp_soc *dp_soc = NULL; + + dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev, + ol_ops, device_id); + return dp_soc_to_cdp_soc_t(dp_soc); +} + +#endif + +/** + * dp_soc_attach() - Attach txrx SOC + * @ctrl_psoc: Opaque SOC handle from control plane + * @htc_handle: Opaque HTC handle + * @qdf_osdev: QDF device + * @ol_ops: Offload Operations + * @device_id: Device ID + * + * Return: DP SOC handle on success, NULL on failure + */ +static struct dp_soc * +dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id) +{ + int int_ctx; + struct dp_soc *soc = NULL; + struct htt_soc *htt_soc; + + soc = qdf_mem_malloc(sizeof(*soc)); + + if (!soc) { + dp_err("DP SOC memory allocation failed"); + goto fail0; + } + + int_ctx = 0; + soc->device_id = device_id; + soc->cdp_soc.ops = &dp_txrx_ops; + soc->cdp_soc.ol_ops = ol_ops; + soc->ctrl_psoc = ctrl_psoc; + soc->osdev = qdf_osdev; + soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS; + + dp_soc_rx_history_attach(soc); + wlan_set_srng_cfg(&soc->wlan_srng_cfg); + qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map)); + + soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc); + if (!soc->wlan_cfg_ctx) { + dp_err("wlan_cfg_ctx failed\n"); + goto fail1; + } + + dp_soc_set_interrupt_mode(soc); + + htt_soc = htt_soc_attach(soc, htc_handle); + + if (!htt_soc) + goto fail1; + + soc->htt_handle = htt_soc; + + if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS) + goto fail2; + + return soc; +fail2: + htt_soc_detach(htt_soc); +fail1: + dp_soc_rx_history_detach(soc); + qdf_mem_free(soc); +fail0: + return NULL; +} + +/** + * dp_soc_init() - Initialize txrx SOC + * @dp_soc: Opaque DP SOC handle + * @htc_handle: Opaque HTC handle + * @hif_handle: Opaque HIF handle + * + * Return: DP SOC handle on success, NULL on failure + */ +void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, + struct hif_opaque_softc *hif_handle) +{ + int target_type; + struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle; + bool is_monitor_mode = false; + + htt_set_htc_handle(htt_soc, htc_handle); + soc->hif_handle = hif_handle; + + soc->hal_soc = hif_get_hal_handle(soc->hif_handle); + if (!soc->hal_soc) + return NULL; + + htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, + htt_get_htc_handle(htt_soc), + soc->hal_soc, soc->osdev); + target_type = hal_get_target_type(soc->hal_soc); + switch (target_type) { + case TARGET_TYPE_QCA6290: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA6290); + soc->ast_override_support = 1; + soc->da_war_enabled = false; + break; +#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCA6750) + case TARGET_TYPE_QCA6390: + case TARGET_TYPE_QCA6490: + case TARGET_TYPE_QCA6750: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA6290); + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); + soc->ast_override_support = 1; + if (soc->cdp_soc.ol_ops->get_con_mode && + soc->cdp_soc.ol_ops->get_con_mode() == + QDF_GLOBAL_MONITOR_MODE) { + int int_ctx; + + for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) { + soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0; + soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0; + } + } + soc->wlan_cfg_ctx->rxdma1_enable = 0; + break; +#endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */ + + case TARGET_TYPE_QCA8074: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); + soc->da_war_enabled = true; + soc->is_rx_fse_full_cache_invalidate_war_enabled = true; + break; + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6018: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); + soc->hw_nac_monitor_support = 1; + soc->ast_override_support = 1; + soc->per_tid_basize_max_tid = 8; + soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; + soc->da_war_enabled = false; + soc->is_rx_fse_full_cache_invalidate_war_enabled = true; + break; + case TARGET_TYPE_QCN9000: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCN9000); + soc->ast_override_support = 1; + soc->da_war_enabled = false; + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); + soc->hw_nac_monitor_support = 1; + soc->per_tid_basize_max_tid = 8; + soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; + soc->lmac_polled_mode = 1; + break; + default: + qdf_print("%s: Unknown tgt type %d\n", __func__, target_type); + qdf_assert_always(0); + break; + } + + dp_soc_set_interrupt_mode(soc); + if (soc->cdp_soc.ol_ops->get_con_mode && + soc->cdp_soc.ol_ops->get_con_mode() == + QDF_GLOBAL_MONITOR_MODE) + is_monitor_mode = true; + + wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode, + is_monitor_mode); + wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, + cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH)); + soc->cce_disable = false; + + qdf_atomic_init(&soc->num_tx_outstanding); + soc->num_tx_allowed = + wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx); + + if (soc->cdp_soc.ol_ops->get_dp_cfg_param) { + int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, + CDP_CFG_MAX_PEER_ID); + + if (ret != -EINVAL) { + wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret); + } + + ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, + CDP_CFG_CCE_DISABLE); + if (ret == 1) + soc->cce_disable = true; + } + + qdf_spinlock_create(&soc->peer_ref_mutex); + qdf_spinlock_create(&soc->ast_lock); + + qdf_spinlock_create(&soc->reo_desc_freelist_lock); + qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE); + INIT_RX_HW_STATS_LOCK(soc); + + /* fill the tx/rx cpu ring map*/ + dp_soc_set_txrx_ring_map(soc); + + qdf_spinlock_create(&soc->htt_stats.lock); + /* initialize work queue for stats processing */ + qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); + + return soc; + +} + +/** + * dp_soc_init_wifi3() - Initialize txrx SOC + * @soc: Opaque DP SOC handle + * @ctrl_psoc: Opaque SOC handle from control plane(Unused) + * @hif_handle: Opaque HIF handle + * @htc_handle: Opaque HTC handle + * @qdf_osdev: QDF device (Unused) + * @ol_ops: Offload Operations (Unused) + * @device_id: Device ID (Unused) + * + * Return: DP SOC handle on success, NULL on failure + */ +void *dp_soc_init_wifi3(struct cdp_soc_t *soc, + struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops, uint16_t device_id) +{ + return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle); +} + +#endif + +/* + * dp_get_pdev_for_mac_id() - Return pdev for mac_id + * + * @soc: handle to DP soc + * @mac_id: MAC id + * + * Return: Return pdev corresponding to MAC + */ +void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id) +{ + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + return soc->pdev_list[mac_id]; + + /* Typically for MCL as there only 1 PDEV*/ + return soc->pdev_list[0]; +} + +/* + * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported + * @soc: DP SoC context + * @max_mac_rings: No of MAC rings + * + * Return: None + */ +void dp_is_hw_dbs_enable(struct dp_soc *soc, + int *max_mac_rings) +{ + bool dbs_enable = false; + if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable) + dbs_enable = soc->cdp_soc.ol_ops-> + is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc); + + *max_mac_rings = (dbs_enable)?(*max_mac_rings):1; +} + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/* + * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @enable: Enable/Disable CFR + * @filter_val: Flag to select Filter for monitor mode + */ +static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + bool enable, + struct cdp_monitor_filter *filter_val) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = NULL; + struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; + int max_mac_rings; + uint8_t mac_id = 0; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return; + } + + if (pdev->monitor_vdev) { + dp_info("No action is needed since monitor mode is enabled\n"); + return; + } + soc = pdev->soc; + pdev->cfr_rcc_mode = false; + max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); + dp_is_hw_dbs_enable(soc, &max_mac_rings); + + dp_debug("Max_mac_rings %d", max_mac_rings); + dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode); + + if (enable) { + pdev->cfr_rcc_mode = true; + + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.offset_valid = false; + + htt_tlv_filter.enable_fp = + (filter_val->mode & MON_FILTER_PASS) ? 1 : 0; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = + (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0; + htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt; + htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl; + htt_tlv_filter.fp_data_filter = filter_val->fp_data; + htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt; + htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl; + htt_tlv_filter.mo_data_filter = filter_val->mo_data; + } + + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, + mac_for_pdev, + soc->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS, + RX_DATA_BUFFER_SIZE, + &htt_tlv_filter); + } +} + +/** + * dp_get_cfr_rcc() - get cfr rcc config + * @soc_hdl: Datapath soc handle + * @pdev_id: id of objmgr pdev + * + * Return: true/false based on cfr mode setting + */ +static +bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = NULL; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return false; + } + + return pdev->cfr_rcc_mode; +} + +/** + * dp_set_cfr_rcc() - enable/disable cfr rcc config + * @soc_hdl: Datapath soc handle + * @pdev_id: id of objmgr pdev + * @enable: Enable/Disable cfr rcc mode + * + * Return: none + */ +static +void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = NULL; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return; + } + + pdev->cfr_rcc_mode = enable; +} + +/* + * dp_get_cfr_dbg_stats - Get the debug statistics for CFR + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @cfr_rcc_stats: CFR RCC debug statistics buffer + * + * Return: none + */ +static inline void +dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct cdp_cfr_rcc_stats *cfr_rcc_stats) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) { + dp_err("Invalid pdev"); + return; + } + + qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc, + sizeof(struct cdp_cfr_rcc_stats)); +} + +/* + * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * + * Return: none + */ +static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) { + dp_err("dp pdev is NULL"); + return; + } + + qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc)); +} + +/* + * dp_enable_mon_reap_timer() - enable/disable reap timer + * @soc_hdl: Datapath soc handle + * @pdev_id: id of objmgr pdev + * @enable: Enable/Disable reap timer of monitor status ring + * + * Return: none + */ +static void +dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + bool enable) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = NULL; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return; + } + + pdev->enable_reap_timer_non_pkt = enable; + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { + dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode); + return; + } + + if (!soc->reap_timer_init) { + dp_err("reap timer not init"); + return; + } + + if (enable) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + else + qdf_timer_sync_cancel(&soc->mon_reap_timer); +} +#endif + +/* + * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is + * enabled by non-pkt log or not + * @pdev: point to dp pdev + * + * Return: true if mon reap timer is enabled by non-pkt log + */ +static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) +{ + if (!pdev) { + dp_err("null pdev"); + return false; + } + + return pdev->enable_reap_timer_non_pkt; +} + +/* +* dp_is_soc_reinit() - Check if soc reinit is true +* @soc: DP SoC context +* +* Return: true or false +*/ +bool dp_is_soc_reinit(struct dp_soc *soc) +{ + return soc->dp_soc_reinit; +} + +/* +* dp_set_pktlog_wifi3() - attach txrx vdev +* @pdev: Datapath PDEV handle +* @event: which event's notifications are being subscribed to +* @enable: WDI event subscribe or not. (True or False) +* +* Return: Success, NULL on failure +*/ +#ifdef WDI_EVENT_ENABLE +int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable) +{ + struct dp_soc *soc = NULL; + int max_mac_rings = wlan_cfg_get_num_mac_rings + (pdev->wlan_cfg_ctx); + uint8_t mac_id = 0; + + soc = pdev->soc; + dp_is_hw_dbs_enable(soc, &max_mac_rings); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + FL("Max_mac_rings %d "), + max_mac_rings); + + if (enable) { + switch (event) { + case WDI_EVENT_RX_DESC: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; + dp_mon_filter_setup_rx_pkt_log_full(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Pktlog full filters set failed")); + dp_mon_filter_reset_rx_pkt_log_full(pdev); + pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; + return 0; + } + + if (soc->reap_timer_init && + (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + case WDI_EVENT_LITE_RX: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; + + /* + * Set the packet log lite mode filter. + */ + dp_mon_filter_setup_rx_pkt_log_lite(pdev); + if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Pktlog lite filters set failed")); + dp_mon_filter_reset_rx_pkt_log_lite(pdev); + pdev->rx_pktlog_mode = + DP_RX_PKTLOG_DISABLED; + return 0; + } + + if (soc->reap_timer_init && + (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + case WDI_EVENT_LITE_T2H: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev( + mac_id, pdev->pdev_id); + + pdev->pktlog_ppdu_stats = true; + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_TXLITE_STATS_BITMASK_CFG, + mac_for_pdev); + } + break; + + default: + /* Nothing needs to be done for other pktlog types */ + break; + } + } else { + switch (event) { + case WDI_EVENT_RX_DESC: + case WDI_EVENT_LITE_RX: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; + dp_mon_filter_reset_rx_pkt_log_full(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Pktlog filters reset failed")); + return 0; + } + + dp_mon_filter_reset_rx_pkt_log_lite(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Pktlog filters reset failed")); + return 0; + } + + if (soc->reap_timer_init && + (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_stop(&soc->mon_reap_timer); + } + break; + case WDI_EVENT_LITE_T2H: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW + * passing value 0. Once these macros will define in htt + * header file will use proper macros + */ + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + pdev->pktlog_ppdu_stats = false; + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, 0, + mac_for_pdev); + } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER, + mac_for_pdev); + } else if (pdev->enhanced_stats_en) { + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, + mac_for_pdev); + } + } + + break; + default: + /* Nothing needs to be done for other pktlog types */ + break; + } + } + return 0; +} +#endif + +/** + * dp_bucket_index() - Return index from array + * + * @delay: delay measured + * @array: array used to index corresponding delay + * + * Return: index + */ +static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array) +{ + uint8_t i = CDP_DELAY_BUCKET_0; + + for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) { + if (delay >= array[i] && delay <= array[i + 1]) + return i; + } + + return (CDP_DELAY_BUCKET_MAX - 1); +} + +/** + * dp_fill_delay_buckets() - Fill delay statistics bucket for each + * type of delay + * + * @pdev: pdev handle + * @delay: delay in ms + * @tid: tid value + * @mode: type of tx delay mode + * @ring_id: ring number + * Return: pointer to cdp_delay_stats structure + */ +static struct cdp_delay_stats * +dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay, + uint8_t tid, uint8_t mode, uint8_t ring_id) +{ + uint8_t delay_index = 0; + struct cdp_tid_tx_stats *tstats = + &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; + struct cdp_tid_rx_stats *rstats = + &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; + /* + * cdp_fw_to_hw_delay_range + * Fw to hw delay ranges in milliseconds + */ + uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = { + 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500}; + + /* + * cdp_sw_enq_delay_range + * Software enqueue delay ranges in milliseconds + */ + uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + + /* + * cdp_intfrm_delay_range + * Interframe delay ranges in milliseconds + */ + uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = { + 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60}; + + /* + * Update delay stats in proper bucket + */ + switch (mode) { + /* Software Enqueue delay ranges */ + case CDP_DELAY_STATS_SW_ENQ: + + delay_index = dp_bucket_index(delay, cdp_sw_enq_delay); + tstats->swq_delay.delay_bucket[delay_index]++; + return &tstats->swq_delay; + + /* Tx Completion delay ranges */ + case CDP_DELAY_STATS_FW_HW_TRANSMIT: + + delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay); + tstats->hwtx_delay.delay_bucket[delay_index]++; + return &tstats->hwtx_delay; + + /* Interframe tx delay ranges */ + case CDP_DELAY_STATS_TX_INTERFRAME: + + delay_index = dp_bucket_index(delay, cdp_intfrm_delay); + tstats->intfrm_delay.delay_bucket[delay_index]++; + return &tstats->intfrm_delay; + + /* Interframe rx delay ranges */ + case CDP_DELAY_STATS_RX_INTERFRAME: + + delay_index = dp_bucket_index(delay, cdp_intfrm_delay); + rstats->intfrm_delay.delay_bucket[delay_index]++; + return &rstats->intfrm_delay; + + /* Ring reap to indication to network stack */ + case CDP_DELAY_STATS_REAP_STACK: + + delay_index = dp_bucket_index(delay, cdp_intfrm_delay); + rstats->to_stack_delay.delay_bucket[delay_index]++; + return &rstats->to_stack_delay; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Incorrect delay mode: %d", __func__, mode); + } + + return NULL; +} + +/** + * dp_update_delay_stats() - Update delay statistics in structure + * and fill min, max and avg delay + * + * @pdev: pdev handle + * @delay: delay in ms + * @tid: tid value + * @mode: type of tx delay mode + * @ring id: ring number + * Return: none + */ +void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay, + uint8_t tid, uint8_t mode, uint8_t ring_id) +{ + struct cdp_delay_stats *dstats = NULL; + + /* + * Delay ranges are different for different delay modes + * Get the correct index to update delay bucket + */ + dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id); + if (qdf_unlikely(!dstats)) + return; + + if (delay != 0) { + /* + * Compute minimum,average and maximum + * delay + */ + if (delay < dstats->min_delay) + dstats->min_delay = delay; + + if (delay > dstats->max_delay) + dstats->max_delay = delay; + + /* + * Average over delay measured till now + */ + if (!dstats->avg_delay) + dstats->avg_delay = delay; + else + dstats->avg_delay = ((delay + dstats->avg_delay) / 2); + } +} + +/** + * dp_get_peer_mac_list(): function to get peer mac list of vdev + * @soc: Datapath soc handle + * @vdev_id: vdev id + * @newmac: Table of the clients mac + * @mac_cnt: No. of MACs required + * + * return: no of clients + */ +uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, + u_int8_t newmac[][QDF_MAC_ADDR_SIZE], + u_int16_t mac_cnt) +{ + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + struct dp_soc *dp_soc = (struct dp_soc *)soc; + struct dp_peer *peer; + uint16_t new_mac_cnt = 0; + + if (!vdev) + return new_mac_cnt; + + qdf_spin_lock_bh(&dp_soc->peer_ref_mutex); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (peer->bss_peer) + continue; + if (new_mac_cnt < mac_cnt) { + WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw); + new_mac_cnt++; + } + } + qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex); + return new_mac_cnt; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_mon_filter.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_mon_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..cfd9619be4c033726173b01f40b58628271e70d4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_mon_filter.c @@ -0,0 +1,1182 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_htt.h" +#include "dp_mon_filter.h" + +/** + * dp_mon_filter_mode_type_to_str + * Monitor Filter mode to string + */ +static int8_t *dp_mon_filter_mode_type_to_str[DP_MON_FILTER_MAX_MODE] = { +#ifdef FEATURE_PERPKT_INFO + "DP MON FILTER ENHACHED STATS MODE", + "DP MON FILTER MCOPY MODE", +#endif /* FEATURE_PERPKT_INFO */ +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) + "DP MON FILTER SMART MONITOR MODE", +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + "DP_MON FILTER MONITOR MODE", +#ifdef WLAN_RX_PKT_CAPTURE_ENH + "DP MON FILTER RX CAPTURE MODE", +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ +#ifdef WDI_EVENT_ENABLE + "DP MON FILTER PKT LOG FULL MODE", + "DP MON FILTER PKT LOG LITE_MODE", +#endif /* WDI_EVENT_ENABLE */ +}; + +/** + * dp_mon_filter_show_filter() - Show the set filters + * @pdev: DP pdev handle + * @mode: The filter modes + * @tlv_filter: tlv filter + */ +static void dp_mon_filter_show_filter(struct dp_pdev *pdev, + enum dp_mon_filter_mode mode, + struct dp_mon_filter *filter) +{ + struct htt_rx_ring_tlv_filter *tlv_filter = &filter->tlv_filter; + + DP_MON_FILTER_PRINT("[%s]: Valid: %d", + dp_mon_filter_mode_type_to_str[mode], + filter->valid); + DP_MON_FILTER_PRINT("mpdu_start: %d", tlv_filter->mpdu_start); + DP_MON_FILTER_PRINT("msdu_start: %d", tlv_filter->msdu_start); + DP_MON_FILTER_PRINT("packet: %d", tlv_filter->packet); + DP_MON_FILTER_PRINT("msdu_end: %d", tlv_filter->msdu_end); + DP_MON_FILTER_PRINT("mpdu_end: %d", tlv_filter->mpdu_end); + DP_MON_FILTER_PRINT("packet_header: %d", + tlv_filter->packet_header); + DP_MON_FILTER_PRINT("attention: %d", tlv_filter->attention); + DP_MON_FILTER_PRINT("ppdu_start: %d", tlv_filter->ppdu_start); + DP_MON_FILTER_PRINT("ppdu_end: %d", tlv_filter->ppdu_end); + DP_MON_FILTER_PRINT("ppdu_end_user_stats: %d", + tlv_filter->ppdu_end_user_stats); + DP_MON_FILTER_PRINT("ppdu_end_user_stats_ext: %d", + tlv_filter->ppdu_end_user_stats_ext); + DP_MON_FILTER_PRINT("ppdu_end_status_done: %d", + tlv_filter->ppdu_end_status_done); + DP_MON_FILTER_PRINT("header_per_msdu: %d", tlv_filter->header_per_msdu); + DP_MON_FILTER_PRINT("enable_fp: %d", tlv_filter->enable_fp); + DP_MON_FILTER_PRINT("enable_md: %d", tlv_filter->enable_md); + DP_MON_FILTER_PRINT("enable_mo: %d", tlv_filter->enable_mo); + DP_MON_FILTER_PRINT("fp_mgmt_filter: 0x%x", tlv_filter->fp_mgmt_filter); + DP_MON_FILTER_PRINT("mo_mgmt_filter: 0x%x", tlv_filter->mo_mgmt_filter); + DP_MON_FILTER_PRINT("fp_ctrl_filter: 0x%x", tlv_filter->fp_ctrl_filter); + DP_MON_FILTER_PRINT("mo_ctrl_filter: 0x%x", tlv_filter->mo_ctrl_filter); + DP_MON_FILTER_PRINT("fp_data_filter: 0x%x", tlv_filter->fp_data_filter); + DP_MON_FILTER_PRINT("mo_data_filter: 0x%x", tlv_filter->mo_data_filter); + DP_MON_FILTER_PRINT("md_data_filter: 0x%x", tlv_filter->md_data_filter); + DP_MON_FILTER_PRINT("md_mgmt_filter: 0x%x", tlv_filter->md_mgmt_filter); + DP_MON_FILTER_PRINT("md_ctrl_filter: 0x%x", tlv_filter->md_ctrl_filter); +} + +/** + * dp_mon_ht2_rx_ring_cfg() - Send the tlv config to fw for a srng_type + * based on target + * @soc: DP soc handle + * @pdev: DP pdev handle + * @srng_type: The srng type for which filter wll be set + * @tlv_filter: tlv filter + */ +static QDF_STATUS +dp_mon_ht2_rx_ring_cfg(struct dp_soc *soc, + struct dp_pdev *pdev, + enum dp_mon_filter_srng_type srng_type, + struct htt_rx_ring_tlv_filter *tlv_filter) +{ + int mac_id; + int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); + QDF_STATUS status = QDF_STATUS_SUCCESS; + + /* + * Overwrite the max_mac_rings for the status rings. + */ + if (srng_type == DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS) + dp_is_hw_dbs_enable(soc, &max_mac_rings); + + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_INFO, + FL("srng type %d Max_mac_rings %d "), + srng_type, + max_mac_rings); + + /* + * Loop through all MACs per radio and set the filter to the individual + * macs. For MCL + */ + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); + int hal_ring_type, ring_buf_size; + hal_ring_handle_t hal_ring_hdl; + + switch (srng_type) { + case DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF: + hal_ring_hdl = pdev->rx_mac_buf_ring[lmac_id].hal_srng; + hal_ring_type = RXDMA_BUF; + ring_buf_size = RX_DATA_BUFFER_SIZE; + break; + + case DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS: + /* + * If two back to back HTT msg sending happened in + * short time, the second HTT msg source SRNG HP + * writing has chance to fail, this has been confirmed + * by HST HW. + * for monitor mode, here is the last HTT msg for sending. + * if the 2nd HTT msg for monitor status ring sending failed, + * HW won't provide anything into 2nd monitor status ring. + * as a WAR, add some delay before 2nd HTT msg start sending, + * > 2us is required per HST HW, delay 100 us for safe. + */ + if (mac_id) + qdf_udelay(100); + + hal_ring_hdl = + soc->rxdma_mon_status_ring[lmac_id].hal_srng; + hal_ring_type = RXDMA_MONITOR_STATUS; + ring_buf_size = RX_DATA_BUFFER_SIZE; + break; + + case DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF: + hal_ring_hdl = + soc->rxdma_mon_buf_ring[lmac_id].hal_srng; + hal_ring_type = RXDMA_MONITOR_BUF; + ring_buf_size = RX_DATA_BUFFER_SIZE; + break; + + default: + return QDF_STATUS_E_FAILURE; + } + + status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + hal_ring_hdl, hal_ring_type, + ring_buf_size, + tlv_filter); + if (status != QDF_STATUS_SUCCESS) + return status; + } + + return status; +} + +/** + * dp_mon_filter_ht2_setup() - Setup the filter for the Target setup + * @soc: DP soc handle + * @pdev: DP pdev handle + * @srng_type: The srng type for which filter wll be set + * @tlv_filter: tlv filter + */ +static void dp_mon_filter_ht2_setup(struct dp_soc *soc, struct dp_pdev *pdev, + enum dp_mon_filter_srng_type srng_type, + struct dp_mon_filter *filter) +{ + int32_t current_mode = 0; + struct htt_rx_ring_tlv_filter *tlv_filter = &filter->tlv_filter; + + /* + * Loop through all the modes. + */ + for (current_mode = 0; current_mode < DP_MON_FILTER_MAX_MODE; + current_mode++) { + struct dp_mon_filter *mon_filter = + &pdev->filter[current_mode][srng_type]; + uint32_t src_filter = 0, dst_filter = 0; + + /* + * Check if the correct mode is enabled or not. + */ + if (!mon_filter->valid) + continue; + + filter->valid = true; + + /* + * Set the super bit fields + */ + src_filter = + DP_MON_FILTER_GET(&mon_filter->tlv_filter, FILTER_TLV); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_TLV); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_TLV, dst_filter); + + /* + * Set the filter management filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_FP_MGMT); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_FP_MGMT); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_FP_MGMT, dst_filter); + + /* + * Set the monitor other management filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_MO_MGMT); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_MO_MGMT); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_MO_MGMT, dst_filter); + + /* + * Set the filter pass control filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_FP_CTRL); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_FP_CTRL); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_FP_CTRL, dst_filter); + + /* + * Set the monitor other control filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_MO_CTRL); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_MO_CTRL); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_MO_CTRL, dst_filter); + + /* + * Set the filter pass data filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_FP_DATA); + dst_filter = DP_MON_FILTER_GET(tlv_filter, + FILTER_FP_DATA); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, + FILTER_FP_DATA, dst_filter); + + /* + * Set the monitor other data filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_MO_DATA); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_MO_DATA); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_MO_DATA, dst_filter); + + /* + * Set the monitor direct data filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_MD_DATA); + dst_filter = DP_MON_FILTER_GET(tlv_filter, + FILTER_MD_DATA); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, + FILTER_MD_DATA, dst_filter); + + /* + * Set the monitor direct management filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_MD_MGMT); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_MD_MGMT); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_MD_MGMT, dst_filter); + + /* + * Set the monitor direct management filter. + */ + src_filter = DP_MON_FILTER_GET(&mon_filter->tlv_filter, + FILTER_MD_CTRL); + dst_filter = DP_MON_FILTER_GET(tlv_filter, FILTER_MD_CTRL); + dst_filter |= src_filter; + DP_MON_FILTER_SET(tlv_filter, FILTER_MD_CTRL, dst_filter); + } + + dp_mon_filter_show_filter(pdev, 0, filter); +} + +/** + * dp_mon_filter_reset_mon_srng() + * @soc: DP SoC handle + * @pdev: DP pdev handle + * @mon_srng_type: Monitor srng type + */ +static void +dp_mon_filter_reset_mon_srng(struct dp_soc *soc, struct dp_pdev *pdev, + enum dp_mon_filter_srng_type mon_srng_type) +{ + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + + if (dp_mon_ht2_rx_ring_cfg(soc, pdev, mon_srng_type, + &tlv_filter) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Monitor destinatin ring filter setting failed")); + } +} + +#if defined(FEATURE_PERPKT_INFO) || defined(ATH_SUPPORT_NAC_RSSI) \ + || defined(ATH_SUPPORT_NAC) || defined(WLAN_RX_PKT_CAPTURE_ENH) +/** + * dp_mon_filter_check_co_exist() - Check the co-existing of the + * enabled modes. + * @pdev: DP pdev handle + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_mon_filter_check_co_exist(struct dp_pdev *pdev) +{ + /* + * Check if the Rx Enhanced capture mode, monitor mode, + * smart_monitor_mode and mcopy mode can co-exist together. + */ + if ((pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) && + ((pdev->neighbour_peers_added && pdev->monitor_vdev) || + pdev->mcopy_mode)) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Rx Capture mode can't exist with modes:\n" + "Smart Monitor Mode:%d\n" + "M_Copy Mode:%d"), + pdev->neighbour_peers_added, + pdev->mcopy_mode); + return QDF_STATUS_E_FAILURE; + } + + /* + * Check if the monitor mode cannot co-exist with any other mode. + */ + if ((pdev->monitor_vdev && pdev->monitor_configured) && + (pdev->mcopy_mode || pdev->neighbour_peers_added)) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Monitor mode can't exist with modes\n" + "M_Copy Mode:%d\n" + "Smart Monitor Mode:%d"), + pdev->mcopy_mode, + pdev->neighbour_peers_added); + return QDF_STATUS_E_FAILURE; + } + + /* + * Check if the smart monitor mode can co-exist with any other mode + */ + if (pdev->neighbour_peers_added && + ((pdev->mcopy_mode) || pdev->monitor_configured)) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Smart Monitor mode can't exist with modes\n" + "M_Copy Mode:%d\n" + "Monitor Mode:%d"), + pdev->mcopy_mode, + pdev->monitor_configured); + return QDF_STATUS_E_FAILURE; + } + + /* + * Check if the m_copy, monitor mode and the smart_monitor_mode + * can co-exist togther. + */ + if (pdev->mcopy_mode && + (pdev->monitor_vdev || pdev->neighbour_peers_added)) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("mcopy mode can't exist with modes\n" + "Monitor Mode:%d\n" + "Smart Monitor Mode:%d"), + pdev->monitor_vdev, + pdev->neighbour_peers_added); + return QDF_STATUS_E_FAILURE; + } + + /* + * Check if the Rx packet log lite or full can co-exist with + * the enable modes. + */ + if ((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) && + (pdev->monitor_vdev || pdev->monitor_configured)) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Rx pktlog full/lite can't exist with modes\n" + "Monitor Mode:%d"), + pdev->monitor_configured); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dp_mon_filter_check_co_exist(struct dp_pdev *pdev) +{ + /* + * Check if the Rx packet log lite or full can co-exist with + * the enable modes. + */ + if ((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) && + (pdev->monitor_vdev || pdev->monitor_configured)) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Rx pktlog full/lite can't exist with modes\n" + "Monitor Mode:%d"), + pdev->monitor_configured); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dp_mon_filter_set_mon_cmn() - Setp the common mon filters + * @pdev: DP pdev handle + * @filter: DP mon filter + * + * Return: QDF_STATUS + */ +static void dp_mon_filter_set_mon_cmn(struct dp_pdev *pdev, + struct dp_mon_filter *filter) +{ + filter->tlv_filter.mpdu_start = 1; + filter->tlv_filter.msdu_start = 1; + filter->tlv_filter.packet = 1; + filter->tlv_filter.msdu_end = 1; + filter->tlv_filter.mpdu_end = 1; + filter->tlv_filter.packet_header = 1; + filter->tlv_filter.attention = 1; + filter->tlv_filter.ppdu_start = 0; + filter->tlv_filter.ppdu_end = 0; + filter->tlv_filter.ppdu_end_user_stats = 0; + filter->tlv_filter.ppdu_end_user_stats_ext = 0; + filter->tlv_filter.ppdu_end_status_done = 0; + filter->tlv_filter.header_per_msdu = 1; + filter->tlv_filter.enable_fp = + (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0; + filter->tlv_filter.enable_mo = + (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0; + + filter->tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter; + filter->tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter; + filter->tlv_filter.fp_data_filter = pdev->fp_data_filter; + filter->tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter; + filter->tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter; + filter->tlv_filter.mo_data_filter = pdev->mo_data_filter; + filter->tlv_filter.offset_valid = false; +} + +/** + * dp_mon_filter_set_status_cmn() - Setp the common status filters + * @pdev: DP pdev handle + * @filter: Dp mon filters + * + * Return: QDF_STATUS + */ +static void dp_mon_filter_set_status_cmn(struct dp_pdev *pdev, + struct dp_mon_filter *filter) +{ + filter->tlv_filter.mpdu_start = 1; + filter->tlv_filter.msdu_start = 0; + filter->tlv_filter.packet = 0; + filter->tlv_filter.msdu_end = 0; + filter->tlv_filter.mpdu_end = 0; + filter->tlv_filter.attention = 0; + filter->tlv_filter.ppdu_start = 1; + filter->tlv_filter.ppdu_end = 1; + filter->tlv_filter.ppdu_end_user_stats = 1; + filter->tlv_filter.ppdu_end_user_stats_ext = 1; + filter->tlv_filter.ppdu_end_status_done = 1; + filter->tlv_filter.enable_fp = 1; + filter->tlv_filter.enable_md = 0; + filter->tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL; + filter->tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL; + filter->tlv_filter.fp_data_filter = FILTER_DATA_ALL; + filter->tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL; + filter->tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL; + filter->tlv_filter.mo_data_filter = FILTER_DATA_ALL; + filter->tlv_filter.offset_valid = false; +} + +#ifdef FEATURE_PERPKT_INFO +/** + * dp_mon_filter_setup_enhanced_stats() - Setup the enhanced stats filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_enhanced_stats(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_ENHACHED_STATS_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + /* Enabled the filter */ + filter.valid = true; + + dp_mon_filter_set_status_cmn(pdev, &filter); + dp_mon_filter_show_filter(pdev, mode, &filter); + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_enhanced_stats() - Reset the enhanced stats filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_enhanced_stats(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_ENHACHED_STATS_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_setup_mcopy_mode() - Setup the m_copy mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_mcopy_mode(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_MCOPY_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_mon_cmn(pdev, &filter); + + filter.tlv_filter.fp_data_filter = 0; + filter.tlv_filter.mo_data_filter = 0; + + dp_mon_filter_show_filter(pdev, mode, &filter); + + srng_type = ((soc->wlan_cfg_ctx->rxdma1_enable) ? + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF : + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF); + pdev->filter[mode][srng_type] = filter; + + /* Clear the filter as the same filter will be used to set the + * monitor status ring + */ + qdf_mem_zero(&(filter), sizeof(struct dp_mon_filter)); + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_status_cmn(pdev, &filter); + + /* Setup the filter */ + filter.tlv_filter.enable_mo = 1; + filter.tlv_filter.packet_header = 1; + dp_mon_filter_show_filter(pdev, mode, &filter); + + srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_mcopy_mode() - Reset the m_copy mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_mcopy_mode(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_MCOPY_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + srng_type = ((soc->wlan_cfg_ctx->rxdma1_enable) ? + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF : + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF); + pdev->filter[mode][srng_type] = filter; + + srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + pdev->filter[mode][srng_type] = filter; +} +#endif /* FEATURE_PERPKT_INFO */ + +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) +/** + * dp_mon_filter_setup_smart_monitor() - Setup the smart monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_smart_monitor(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_SMART_MONITOR_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_status_cmn(pdev, &filter); + + if (soc->hw_nac_monitor_support) { + filter.tlv_filter.enable_md = 1; + filter.tlv_filter.packet_header = 1; + filter.tlv_filter.md_data_filter = FILTER_DATA_ALL; + } + + dp_mon_filter_show_filter(pdev, mode, &filter); + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_smart_monitor() - Reset the smart monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_smart_monitor(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_SMART_MONITOR_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + pdev->filter[mode][srng_type] = filter; +} +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + +#ifdef WLAN_RX_PKT_CAPTURE_ENH +/** + * dp_mon_filter_setup_rx_enh_capture() - Setup the Rx capture mode filters + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_rx_enh_capture(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_RX_CAPTURE_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_mon_cmn(pdev, &filter); + + filter.tlv_filter.fp_mgmt_filter = 0; + filter.tlv_filter.fp_ctrl_filter = 0; + filter.tlv_filter.fp_data_filter = 0; + filter.tlv_filter.mo_mgmt_filter = 0; + filter.tlv_filter.mo_ctrl_filter = 0; + filter.tlv_filter.mo_data_filter = 0; + + dp_mon_filter_show_filter(pdev, mode, &filter); + + srng_type = ((soc->wlan_cfg_ctx->rxdma1_enable) ? + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF : + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF); + pdev->filter[mode][srng_type] = filter; + + /* Clear the filter as the same filter will be used to set the + * monitor status ring + */ + qdf_mem_zero(&(filter), sizeof(struct dp_mon_filter)); + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_status_cmn(pdev, &filter); + + /* Setup the filter */ + filter.tlv_filter.mpdu_end = 1; + filter.tlv_filter.enable_mo = 1; + filter.tlv_filter.packet_header = 1; + + if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) { + filter.tlv_filter.header_per_msdu = 0; + filter.tlv_filter.enable_mo = 0; + } else if (pdev->rx_enh_capture_mode == + CDP_RX_ENH_CAPTURE_MPDU_MSDU) { + bool is_rx_mon_proto_flow_tag_enabled = + wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx); + filter.tlv_filter.header_per_msdu = 1; + filter.tlv_filter.enable_mo = 0; + if (pdev->is_rx_enh_capture_trailer_enabled || + is_rx_mon_proto_flow_tag_enabled) + filter.tlv_filter.msdu_end = 1; + } + + dp_mon_filter_show_filter(pdev, mode, &filter); + + srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_rx_enh_capture() - Reset the Rx capture mode filters + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_rx_enh_capture(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_RX_CAPTURE_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + srng_type = ((soc->wlan_cfg_ctx->rxdma1_enable) ? + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF : + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF); + pdev->filter[mode][srng_type] = filter; + + srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + pdev->filter[mode][srng_type] = filter; +} +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ + +/** + * dp_mon_filter_setup_mon_mode() - Setup the Rx monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_mon_mode(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_MONITOR_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + filter.valid = true; + dp_mon_filter_set_mon_cmn(pdev, &filter); + dp_mon_filter_show_filter(pdev, mode, &filter); + + srng_type = ((soc->wlan_cfg_ctx->rxdma1_enable) ? + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF : + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF); + pdev->filter[mode][srng_type] = filter; + + /* Clear the filter as the same filter will be used to set the + * monitor status ring + */ + qdf_mem_zero(&(filter), sizeof(struct dp_mon_filter)); + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_status_cmn(pdev, &filter); + filter.tlv_filter.enable_mo = 1; + + dp_mon_filter_show_filter(pdev, mode, &filter); + + /* Store the above filter */ + srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_mon_mode() - Reset the Rx monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_mon_mode(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + struct dp_soc *soc = NULL; + enum dp_mon_filter_mode mode = DP_MON_FILTER_MONITOR_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return; + } + + srng_type = ((soc->wlan_cfg_ctx->rxdma1_enable) ? + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF : + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF); + pdev->filter[mode][srng_type] = filter; + + srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + pdev->filter[mode][srng_type] = filter; +} + +#ifdef WDI_EVENT_ENABLE +/** + * dp_mon_filter_setup_rx_pkt_log_full() - Setup the Rx pktlog full mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_rx_pkt_log_full(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_PKT_LOG_FULL_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_status_cmn(pdev, &filter); + + /* Setup the filter */ + filter.tlv_filter.packet_header = 1; + filter.tlv_filter.msdu_start = 1; + filter.tlv_filter.msdu_end = 1; + filter.tlv_filter.mpdu_end = 1; + filter.tlv_filter.attention = 1; + + dp_mon_filter_show_filter(pdev, mode, &filter); + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_rx_pkt_log_full() - Reset the Rx pktlog full mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_rx_pkt_log_full(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_PKT_LOG_FULL_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_setup_rx_pkt_log_lite() - Setup the Rx pktlog lite mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_rx_pkt_log_lite(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_PKT_LOG_LITE_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + /* Enabled the filter */ + filter.valid = true; + dp_mon_filter_set_status_cmn(pdev, &filter); + + dp_mon_filter_show_filter(pdev, mode, &filter); + pdev->filter[mode][srng_type] = filter; +} + +/** + * dp_mon_filter_reset_rx_pkt_log_lite() - Reset the Rx pktlog lite mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_rx_pkt_log_lite(struct dp_pdev *pdev) +{ + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_mode mode = DP_MON_FILTER_PKT_LOG_LITE_MODE; + enum dp_mon_filter_srng_type srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + pdev->filter[mode][srng_type] = filter; +} +#endif /* WDI_EVENT_ENABLE */ + +/** + * dp_mon_filter_update() - Setup the monitor filter setting for a srng + * type + * @pdev: DP pdev handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_mon_filter_update(struct dp_pdev *pdev) +{ + struct dp_soc *soc; + bool mon_mode_set = false; + struct dp_mon_filter filter = {0}; + enum dp_mon_filter_srng_type mon_srng_type = + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return QDF_STATUS_E_FAILURE; + } + + soc = pdev->soc; + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Soc Context is null")); + return QDF_STATUS_E_FAILURE; + } + + status = dp_mon_filter_check_co_exist(pdev); + if (status != QDF_STATUS_SUCCESS) + return status; + + /* + * Setup the filters for the monitor destination ring. + */ + if (!soc->wlan_cfg_ctx->rxdma1_enable) + mon_srng_type = DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF; + + /* + * Setup the filters for the monitor mode. + */ + qdf_mem_zero(&(filter), sizeof(filter)); + dp_mon_filter_ht2_setup(soc, pdev, mon_srng_type, &filter); + + mon_mode_set = filter.valid; + if (mon_mode_set) { + status = dp_mon_ht2_rx_ring_cfg(soc, pdev, + mon_srng_type, + &filter.tlv_filter); + } else { + /* + * For WIN case the monitor buffer ring is used and it does need + * reset when monitor mode gets disabled. + */ + if (soc->wlan_cfg_ctx->rxdma1_enable) { + status = dp_mon_ht2_rx_ring_cfg(soc, pdev, + mon_srng_type, + &filter.tlv_filter); + } + } + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Monitor destination ring filter setting failed")); + return QDF_STATUS_E_FAILURE; + } + + /* + * Setup the filters for the status ring. + */ + qdf_mem_zero(&(filter), sizeof(filter)); + dp_mon_filter_ht2_setup(soc, pdev, + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS, + &filter); + + /* + * Reset the monitor filters if the all the modes for the status rings + * are disabled. This is done to prevent the HW backpressure from the + * monitor destination ring in case the status ring filters + * are not enabled. + */ + if (!filter.valid && mon_mode_set) + dp_mon_filter_reset_mon_srng(soc, pdev, mon_srng_type); + + if (dp_mon_ht2_rx_ring_cfg(soc, pdev, + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS, + &filter.tlv_filter) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Monitor status ring filter setting failed")); + dp_mon_filter_reset_mon_srng(soc, pdev, mon_srng_type); + return QDF_STATUS_E_FAILURE; + } + + return status; +} + +/** + * dp_mon_filter_dealloc() - Deallocate the filter objects to be stored in + * the radio object. + * @pdev: DP pdev handle + */ +void dp_mon_filter_dealloc(struct dp_pdev *pdev) +{ + enum dp_mon_filter_mode mode; + struct dp_mon_filter **mon_filter = NULL; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return; + } + + mon_filter = pdev->filter; + + /* + * Check if the monitor filters are already allocated to the pdev. + */ + if (!mon_filter) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Found NULL memmory for the Monitor filter")); + return; + } + + /* + * Iterate through the every mode and free the filter object. + */ + for (mode = 0; mode < DP_MON_FILTER_MAX_MODE; mode++) { + if (!mon_filter[mode]) { + continue; + } + + qdf_mem_free(mon_filter[mode]); + mon_filter[mode] = NULL; + } + + qdf_mem_free(mon_filter); + pdev->filter = NULL; +} + +/** + * dp_mon_filter_alloc() - Allocate the filter objects to be stored in + * the radio object. + * @pdev: DP pdev handle + */ +struct dp_mon_filter **dp_mon_filter_alloc(struct dp_pdev *pdev) +{ + struct dp_mon_filter **mon_filter = NULL; + enum dp_mon_filter_mode mode; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("pdev Context is null")); + return NULL; + } + + mon_filter = (struct dp_mon_filter **)qdf_mem_malloc( + (sizeof(struct dp_mon_filter *) * + DP_MON_FILTER_MAX_MODE)); + if (!mon_filter) { + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_ERROR, + FL("Monitor filter mem allocation failed")); + return NULL; + } + + qdf_mem_zero(mon_filter, + sizeof(struct dp_mon_filter *) * DP_MON_FILTER_MAX_MODE); + + /* + * Allocate the memory for filters for different srngs for each modes. + */ + for (mode = 0; mode < DP_MON_FILTER_MAX_MODE; mode++) { + mon_filter[mode] = qdf_mem_malloc(sizeof(struct dp_mon_filter) * + DP_MON_FILTER_SRNG_TYPE_MAX); + /* Assign the mon_filter to the pdev->filter such + * that the dp_mon_filter_dealloc() can free up the filters. */ + if (!mon_filter[mode]) { + pdev->filter = mon_filter; + goto fail; + } + } + + return mon_filter; +fail: + dp_mon_filter_dealloc(pdev); + return NULL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_mon_filter.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_mon_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..711185f28b424457655d71670686ab9c57b3ed31 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_mon_filter.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_MON_FILTER_H_ +#define _DP_MON_FILTER_H_ + +/** + * Accessor Macros to access the software + * defined HTT filter htt_rx_ring_tlv_filter. + */ +#define DP_MON_FILTER_TLV_OFFSET 0x00000000 +#define DP_MON_FILTER_TLV_MASK 0xffffffff +#define DP_MON_FILTER_TLV_LSB 0 + +#define DP_MON_FILTER_FP_MGMT_OFFSET 0x00000004 +#define DP_MON_FILTER_FP_MGMT_MASK 0x0000ffff +#define DP_MON_FILTER_FP_MGMT_LSB 0 + +#define DP_MON_FILTER_MO_MGMT_OFFSET 0x00000004 +#define DP_MON_FILTER_MO_MGMT_MASK 0xffff0000 +#define DP_MON_FILTER_MO_MGMT_LSB 16 + +#define DP_MON_FILTER_FP_CTRL_OFFSET 0x00000008 +#define DP_MON_FILTER_FP_CTRL_MASK 0x0000ffff +#define DP_MON_FILTER_FP_CTRL_LSB 0 + +#define DP_MON_FILTER_MO_CTRL_OFFSET 0x00000008 +#define DP_MON_FILTER_MO_CTRL_MASK 0xffff0000 +#define DP_MON_FILTER_MO_CTRL_LSB 16 + +#define DP_MON_FILTER_FP_DATA_OFFSET 0x0000000c +#define DP_MON_FILTER_FP_DATA_MASK 0x0000ffff +#define DP_MON_FILTER_FP_DATA_LSB 0 + +#define DP_MON_FILTER_MO_DATA_OFFSET 0x0000000c +#define DP_MON_FILTER_MO_DATA_MASK 0xffff0000 +#define DP_MON_FILTER_MO_DATA_LSB 16 + +#define DP_MON_FILTER_MD_DATA_OFFSET 0x00000010 +#define DP_MON_FILTER_MD_DATA_MASK 0x0000ffff +#define DP_MON_FILTER_MD_DATA_LSB 0 + +#define DP_MON_FILTER_MD_MGMT_OFFSET 0x00000010 +#define DP_MON_FILTER_MD_MGMT_MASK 0xffff0000 +#define DP_MON_FILTER_MD_MGMT_LSB 16 + +#define DP_MON_FILTER_MD_CTRL_OFFSET 0x00000014 +#define DP_MON_FILTER_MD_CTRL_MASK 0x0000ffff +#define DP_MON_FILTER_MD_CTRL_LSB 0 + +#define DP_MON_FILTER_GET(src, field) \ + ((*((uint32_t *)((uint8_t *)(src) + DP_MON_ ## field ## _OFFSET)) & \ + (DP_MON_ ## field ## _MASK)) >> DP_MON_ ## field ## _LSB) \ + +#define DP_MON_FILTER_SET(dst, field, value) \ +do { \ + uint32_t *val = \ + ((uint32_t *)((uint8_t *)(dst) + DP_MON_ ## field ## _OFFSET)); \ + *val &= ~(DP_MON_ ## field ## _MASK); \ + *val |= ((value) << DP_MON_ ## field ## _LSB); \ +} while (0) + +#define DP_MON_FILTER_PRINT(fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_MON_FILTER, QDF_TRACE_LEVEL_DEBUG, \ + fmt, ## args) +/** + * struct dp_mon_filter - Monitor TLV filter + * @valid: enable/disable TLV filter + * @tlv_filter: Rx ring TLV filter + */ +struct dp_mon_filter { + bool valid; + struct htt_rx_ring_tlv_filter tlv_filter; +}; + +/** + * enum dp_mon_filter_mode - Different modes for SRNG filters + * @DP_MON_FILTER_ENHACHED_STATS_MODE: PPDU enhanced stats mode + * @DP_MON_FILTER_SMART_MONITOR_MODE: Smart monitor mode + * @DP_MON_FILTER_MCOPY_MODE: AM copy mode + * @DP_MON_FILTER_MONITOR_MODE: Monitor mode + * @DP_MON_FILTER_RX_CAPTURE_MODE: Rx Capture mode + * @DP_MON_FILTER_PKT_LOG_FULL_MODE: Packet log full mode + * @DP_MON_FILTER_PKT_LOG_LITE_MODE: Packet log lite mode + */ +enum dp_mon_filter_mode { +#ifdef FEATURE_PERPKT_INFO + DP_MON_FILTER_ENHACHED_STATS_MODE, + DP_MON_FILTER_MCOPY_MODE, +#endif /* FEATURE_PERPKT_INFO */ +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) + DP_MON_FILTER_SMART_MONITOR_MODE, +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + DP_MON_FILTER_MONITOR_MODE, +#ifdef WLAN_RX_PKT_CAPTURE_ENH + DP_MON_FILTER_RX_CAPTURE_MODE, +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ + +#ifdef WDI_EVENT_ENABLE + DP_MON_FILTER_PKT_LOG_FULL_MODE, + DP_MON_FILTER_PKT_LOG_LITE_MODE, +#endif /* WDI_EVENT_ENABLE */ + DP_MON_FILTER_MAX_MODE +}; + +/** + * enum dp_mon_filter_srng_type - Srng types dynamic mode filter + * settings. + * @DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF: RXDMA srng type + * @DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS: RxDMA monitor status srng + * @DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF: RxDMA destination srng + * @DP_MON_FILTER_SRNG_TYPE_MAX: Srng max type + */ +enum dp_mon_filter_srng_type { + DP_MON_FILTER_SRNG_TYPE_RXDMA_BUF, + DP_MON_FILTER_SRNG_TYPE_RXDMA_MONITOR_STATUS, + DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF, + DP_MON_FILTER_SRNG_TYPE_MAX +}; + +/** + * enum dp_mon_filter_action - Action for storing the filters + * into the radio structure. + * @DP_MON_FILTER_CLEAR - Clears the filter for a mode + * @DP_MON_FILTER_SET - Set the filtes for a mode + */ +enum dp_mon_filter_action { + DP_MON_FILTER_CLEAR, + DP_MON_FILTER_SET, +}; + +#ifdef FEATURE_PERPKT_INFO +/** + * dp_mon_filter_setup_enhanced_stats() - Setup the enhanced stats filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_enhanced_stats(struct dp_pdev *pdev); + +/*** + * dp_mon_filter_reset_enhanced_stats() - Reset the enhanced stats filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_enhanced_stats(struct dp_pdev *pdev); + +/** + * dp_mon_filter_setup_mcopy_mode() - Setup the m_copy mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_mcopy_mode(struct dp_pdev *pdev); + +/** + * dp_mon_filter_reset_mcopy_mode() - Reset the m_copy mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_mcopy_mode(struct dp_pdev *pdev); +#endif /* FEATURE_PERPKT_INFO */ + +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) +/** + * dp_mon_filter_setup_smart_monitor() - Setup the smart monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_smart_monitor(struct dp_pdev *pdev); + +/** + * dp_mon_filter_reset_smart_monitor() - Reset the smart monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_smart_monitor(struct dp_pdev *pdev); +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + +#ifdef WLAN_RX_PKT_CAPTURE_ENH +/** + * dp_mon_filter_setup_rx_enh_capture() - Setup the Rx capture mode filters + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_rx_enh_capture(struct dp_pdev *pdev); + +/** + * dp_mon_filter_reset_rx_enh_capture() - Reset the Rx capture mode filters + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_rx_enh_capture(struct dp_pdev *pdev); +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ + +/** + * dp_mon_filter_setup_mon_mode() - Setup the Rx monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_mon_mode(struct dp_pdev *pdev); + +/** + * dp_mon_filter_reset_mon_mode() - Reset the Rx monitor mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_mon_mode(struct dp_pdev *pdev); + +#ifdef WDI_EVENT_ENABLE +/** + * dp_mon_filter_setup_rx_pkt_log_full() - Setup the Rx pktlog full mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_rx_pkt_log_full(struct dp_pdev *pdev); + +/** + * dp_mon_filter_reset_rx_pkt_log_full() - Reset the Rx pktlog full mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_rx_pkt_log_full(struct dp_pdev *pdev); + +/** + * dp_mon_filter_setup_rx_pkt_log_lite() - Setup the Rx pktlog lite mode filter + * in the radio object. + * @pdev: DP pdev handle + */ +void dp_mon_filter_setup_rx_pkt_log_lite(struct dp_pdev *pdev); + +/** + * dp_mon_filter_reset_rx_pkt_log_lite() - Reset the Rx pktlog lite mode filter + * @pdev: DP pdev handle + */ +void dp_mon_filter_reset_rx_pkt_log_lite(struct dp_pdev *pdev); +#endif /* WDI_EVENT_ENABLE */ + +/** + * dp_mon_filter_update() - Setup the monitor filter setting for a srng + * type + * @pdev: DP pdev handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_mon_filter_update(struct dp_pdev *pdev); + +/** + * dp_mon_filter_dealloc() - Deallocate the filter objects to be stored in + * the radio object. + * @pdev: DP pdev handle + */ +void dp_mon_filter_dealloc(struct dp_pdev *pdev); + +/** + * dp_mon_filter_alloc() - Allocate the filter objects to be stored in + * the radio object. + * @pdev: DP pdev handle + */ +struct dp_mon_filter **dp_mon_filter_alloc(struct dp_pdev *pdev); +#endif /* #ifndef _DP_MON_FILTER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c new file mode 100644 index 0000000000000000000000000000000000000000..9cc238688bd8eb270ec54cea220039dd3e5af6b8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c @@ -0,0 +1,3979 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "dp_htt.h" +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_peer.h" +#include "dp_rx_defrag.h" +#include "dp_rx.h" +#include +#include +#include +#include + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_tx_capture.h" +#endif + +#ifdef FEATURE_WDS +static inline bool +dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer, + struct dp_ast_entry *ast_entry) +{ + /* if peer map v2 is enabled we are not freeing ast entry + * here and it is supposed to be freed in unmap event (after + * we receive delete confirmation from target) + * + * if peer_id is invalid we did not get the peer map event + * for the peer free ast entry from here only in this case + */ + + if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) && + (ast_entry->type != CDP_TXRX_AST_TYPE_SELF)) + return true; + + return false; +} +#else +static inline bool +dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer, + struct dp_ast_entry *ast_entry) +{ + return false; +} +#endif + +static inline void +dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, + uint8_t valid) +{ + params->u.upd_queue_params.update_svld = 1; + params->u.upd_queue_params.svld = valid; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Setting SSN valid bit to %d", + __func__, valid); +} + +static inline int dp_peer_find_mac_addr_cmp( + union dp_align_mac_addr *mac_addr1, + union dp_align_mac_addr *mac_addr2) +{ + /* + * Intentionally use & rather than &&. + * because the operands are binary rather than generic boolean, + * the functionality is equivalent. + * Using && has the advantage of short-circuited evaluation, + * but using & has the advantage of no conditional branching, + * which is a more significant benefit. + */ + return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) + & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); +} + +static int dp_peer_ast_table_attach(struct dp_soc *soc) +{ + uint32_t max_ast_index; + + max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); + /* allocate ast_table for ast entry to ast_index map */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "\n<=== cfg max ast idx %d ====>", max_ast_index); + soc->ast_table = qdf_mem_malloc(max_ast_index * + sizeof(struct dp_ast_entry *)); + if (!soc->ast_table) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: ast_table memory allocation failed", __func__); + return QDF_STATUS_E_NOMEM; + } + return 0; /* success */ +} + +static int dp_peer_find_map_attach(struct dp_soc *soc) +{ + uint32_t max_peers, peer_map_size; + + max_peers = soc->max_peers; + /* allocate the peer ID -> peer object map */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "\n<=== cfg max peer id %d ====>", max_peers); + peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); + soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); + if (!soc->peer_id_to_obj_map) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: peer map memory allocation failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + /* + * The peer_id_to_obj_map doesn't really need to be initialized, + * since elements are only used after they have been individually + * initialized. + * However, it is convenient for debugging to have all elements + * that are not in use set to 0. + */ + qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); + return 0; /* success */ +} + +static int dp_log2_ceil(unsigned int value) +{ + unsigned int tmp = value; + int log2 = -1; + + while (tmp) { + log2++; + tmp >>= 1; + } + if (1 << log2 != value) + log2++; + return log2; +} + +static int dp_peer_find_add_id_to_obj( + struct dp_peer *peer, + uint16_t peer_id) +{ + int i; + + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { + if (peer->peer_ids[i] == HTT_INVALID_PEER) { + peer->peer_ids[i] = peer_id; + return 0; /* success */ + } + } + return QDF_STATUS_E_FAILURE; /* failure */ +} + +#define DP_PEER_HASH_LOAD_MULT 2 +#define DP_PEER_HASH_LOAD_SHIFT 0 + +#define DP_AST_HASH_LOAD_MULT 2 +#define DP_AST_HASH_LOAD_SHIFT 0 + +static int dp_peer_find_hash_attach(struct dp_soc *soc) +{ + int i, hash_elems, log2; + + /* allocate the peer MAC address -> peer object hash table */ + hash_elems = soc->max_peers; + hash_elems *= DP_PEER_HASH_LOAD_MULT; + hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; + log2 = dp_log2_ceil(hash_elems); + hash_elems = 1 << log2; + + soc->peer_hash.mask = hash_elems - 1; + soc->peer_hash.idx_bits = log2; + /* allocate an array of TAILQ peer object lists */ + soc->peer_hash.bins = qdf_mem_malloc( + hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); + if (!soc->peer_hash.bins) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < hash_elems; i++) + TAILQ_INIT(&soc->peer_hash.bins[i]); + + return 0; +} + +static void dp_peer_find_hash_detach(struct dp_soc *soc) +{ + if (soc->peer_hash.bins) { + qdf_mem_free(soc->peer_hash.bins); + soc->peer_hash.bins = NULL; + } +} + +static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc, + union dp_align_mac_addr *mac_addr) +{ + unsigned index; + + index = + mac_addr->align2.bytes_ab ^ + mac_addr->align2.bytes_cd ^ + mac_addr->align2.bytes_ef; + index ^= index >> soc->peer_hash.idx_bits; + index &= soc->peer_hash.mask; + return index; +} + + +void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) +{ + unsigned index; + + index = dp_peer_find_hash_index(soc, &peer->mac_addr); + qdf_spin_lock_bh(&soc->peer_ref_mutex); + /* + * It is important to add the new peer at the tail of the peer list + * with the bin index. Together with having the hash_find function + * search from head to tail, this ensures that if two entries with + * the same MAC address are stored, the one added first will be + * found first. + */ + TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); +} + +#ifdef FEATURE_AST +/* + * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table + * @soc: SoC handle + * + * Return: None + */ +static int dp_peer_ast_hash_attach(struct dp_soc *soc) +{ + int i, hash_elems, log2; + unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx); + + hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >> + DP_AST_HASH_LOAD_SHIFT); + + log2 = dp_log2_ceil(hash_elems); + hash_elems = 1 << log2; + + soc->ast_hash.mask = hash_elems - 1; + soc->ast_hash.idx_bits = log2; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "ast hash_elems: %d, max_ast_idx: %d", + hash_elems, max_ast_idx); + + /* allocate an array of TAILQ peer object lists */ + soc->ast_hash.bins = qdf_mem_malloc( + hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, + dp_ast_entry))); + + if (!soc->ast_hash.bins) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < hash_elems; i++) + TAILQ_INIT(&soc->ast_hash.bins[i]); + + return 0; +} + +/* + * dp_peer_ast_cleanup() - cleanup the references + * @soc: SoC handle + * @ast: ast entry + * + * Return: None + */ +static inline void dp_peer_ast_cleanup(struct dp_soc *soc, + struct dp_ast_entry *ast) +{ + txrx_ast_free_cb cb = ast->callback; + void *cookie = ast->cookie; + + dp_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK", + QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie); + + /* Call the callbacks to free up the cookie */ + if (cb) { + ast->callback = NULL; + ast->cookie = NULL; + cb(soc->ctrl_psoc, + dp_soc_to_cdp_soc(soc), + cookie, + CDP_TXRX_AST_DELETE_IN_PROGRESS); + } +} + +/* + * dp_peer_ast_hash_detach() - Free AST Hash table + * @soc: SoC handle + * + * Return: None + */ +static void dp_peer_ast_hash_detach(struct dp_soc *soc) +{ + unsigned int index; + struct dp_ast_entry *ast, *ast_next; + + if (!soc->ast_hash.mask) + return; + + if (!soc->ast_hash.bins) + return; + + dp_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries); + + qdf_spin_lock_bh(&soc->ast_lock); + for (index = 0; index <= soc->ast_hash.mask; index++) { + if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) { + TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index], + hash_list_elem, ast_next) { + TAILQ_REMOVE(&soc->ast_hash.bins[index], ast, + hash_list_elem); + dp_peer_ast_cleanup(soc, ast); + qdf_mem_free(ast); + } + } + } + qdf_spin_unlock_bh(&soc->ast_lock); + + qdf_mem_free(soc->ast_hash.bins); + soc->ast_hash.bins = NULL; +} + +/* + * dp_peer_ast_hash_index() - Compute the AST hash from MAC address + * @soc: SoC handle + * + * Return: AST hash + */ +static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, + union dp_align_mac_addr *mac_addr) +{ + uint32_t index; + + index = + mac_addr->align2.bytes_ab ^ + mac_addr->align2.bytes_cd ^ + mac_addr->align2.bytes_ef; + index ^= index >> soc->ast_hash.idx_bits; + index &= soc->ast_hash.mask; + return index; +} + +/* + * dp_peer_ast_hash_add() - Add AST entry into hash table + * @soc: SoC handle + * + * This function adds the AST entry into SoC AST hash table + * It assumes caller has taken the ast lock to protect the access to this table + * + * Return: None + */ +static inline void dp_peer_ast_hash_add(struct dp_soc *soc, + struct dp_ast_entry *ase) +{ + uint32_t index; + + index = dp_peer_ast_hash_index(soc, &ase->mac_addr); + TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); +} + +/* + * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table + * @soc: SoC handle + * + * This function removes the AST entry from soc AST hash table + * It assumes caller has taken the ast lock to protect the access to this table + * + * Return: None + */ +void dp_peer_ast_hash_remove(struct dp_soc *soc, + struct dp_ast_entry *ase) +{ + unsigned index; + struct dp_ast_entry *tmpase; + int found = 0; + + index = dp_peer_ast_hash_index(soc, &ase->mac_addr); + /* Check if tail is not empty before delete*/ + QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); + + dp_debug("ast_idx: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT, + ase->ast_idx, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw)); + + TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { + if (tmpase == ase) { + found = 1; + break; + } + } + + QDF_ASSERT(found); + TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); +} + +/* + * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list + * @soc: SoC handle + * @peer: peer handle + * @ast_mac_addr: mac address + * + * It assumes caller has taken the ast lock to protect the access to ast list + * + * Return: AST entry + */ +struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *ast_mac_addr) +{ + struct dp_ast_entry *ast_entry = NULL; + union dp_align_mac_addr *mac_addr = + (union dp_align_mac_addr *)ast_mac_addr; + + TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) { + if (!dp_peer_find_mac_addr_cmp(mac_addr, + &ast_entry->mac_addr)) { + return ast_entry; + } + } + + return NULL; +} + +/* + * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address + * @soc: SoC handle + * + * It assumes caller has taken the ast lock to protect the access to + * AST hash table + * + * Return: AST entry + */ +struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, + uint8_t *ast_mac_addr, + uint8_t pdev_id) +{ + union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; + uint32_t index; + struct dp_ast_entry *ase; + + qdf_mem_copy(&local_mac_addr_aligned.raw[0], + ast_mac_addr, QDF_MAC_ADDR_SIZE); + mac_addr = &local_mac_addr_aligned; + + index = dp_peer_ast_hash_index(soc, mac_addr); + TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { + if ((pdev_id == ase->pdev_id) && + !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) { + return ase; + } + } + + return NULL; +} + +/* + * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address + * @soc: SoC handle + * + * It assumes caller has taken the ast lock to protect the access to + * AST hash table + * + * Return: AST entry + */ +struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, + uint8_t *ast_mac_addr) +{ + union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; + unsigned index; + struct dp_ast_entry *ase; + + qdf_mem_copy(&local_mac_addr_aligned.raw[0], + ast_mac_addr, QDF_MAC_ADDR_SIZE); + mac_addr = &local_mac_addr_aligned; + + index = dp_peer_ast_hash_index(soc, mac_addr); + TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { + if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { + return ase; + } + } + + return NULL; +} + +/* + * dp_peer_map_ast() - Map the ast entry with HW AST Index + * @soc: SoC handle + * @peer: peer to which ast node belongs + * @mac_addr: MAC address of ast node + * @hw_peer_id: HW AST Index returned by target in peer map event + * @vdev_id: vdev id for VAP to which the peer belongs to + * @ast_hash: ast hash value in HW + * + * Return: None + */ +static inline void dp_peer_map_ast(struct dp_soc *soc, + struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, + uint8_t vdev_id, uint16_t ast_hash) +{ + struct dp_ast_entry *ast_entry = NULL; + enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; + + if (!peer) { + return; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: peer %pK ID %d vid %d mac "QDF_MAC_ADDR_FMT, + __func__, peer, hw_peer_id, vdev_id, + QDF_MAC_ADDR_REF(mac_addr)); + + qdf_spin_lock_bh(&soc->ast_lock); + + ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr); + + if (ast_entry) { + ast_entry->ast_idx = hw_peer_id; + soc->ast_table[hw_peer_id] = ast_entry; + ast_entry->is_active = TRUE; + peer_type = ast_entry->type; + ast_entry->ast_hash_value = ast_hash; + ast_entry->is_mapped = TRUE; + } + + if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) { + if (soc->cdp_soc.ol_ops->peer_map_event) { + soc->cdp_soc.ol_ops->peer_map_event( + soc->ctrl_psoc, peer->peer_ids[0], + hw_peer_id, vdev_id, + mac_addr, peer_type, ast_hash); + } + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "AST entry not found"); + } + + qdf_spin_unlock_bh(&soc->ast_lock); + return; +} + +void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct cdp_soc *dp_soc, + void *cookie, + enum cdp_ast_free_status status) +{ + struct dp_ast_free_cb_params *param = + (struct dp_ast_free_cb_params *)cookie; + struct dp_soc *soc = (struct dp_soc *)dp_soc; + struct dp_peer *peer = NULL; + + if (status != CDP_TXRX_AST_DELETED) { + qdf_mem_free(cookie); + return; + } + + peer = dp_peer_find_hash_find(soc, ¶m->peer_mac_addr.raw[0], + 0, param->vdev_id); + if (peer) { + dp_peer_add_ast(soc, peer, + ¶m->mac_addr.raw[0], + param->type, + param->flags); + dp_peer_unref_delete(peer); + } + qdf_mem_free(cookie); +} + +/* + * dp_peer_add_ast() - Allocate and add AST entry into peer list + * @soc: SoC handle + * @peer: peer to which ast node belongs + * @mac_addr: MAC address of ast node + * @is_self: Is this base AST entry with peer mac address + * + * This API is used by WDS source port learning function to + * add a new AST entry into peer AST list + * + * Return: 0 if new entry is allocated, + * -1 if entry add failed + */ +int dp_peer_add_ast(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *mac_addr, + enum cdp_txrx_ast_entry_type type, + uint32_t flags) +{ + struct dp_ast_entry *ast_entry = NULL; + struct dp_vdev *vdev = NULL, *tmp_vdev = NULL; + struct dp_pdev *pdev = NULL; + uint8_t next_node_mac[6]; + int ret = -1; + txrx_ast_free_cb cb = NULL; + void *cookie = NULL; + struct dp_peer *tmp_peer = NULL; + bool is_peer_found = false; + + vdev = peer->vdev; + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Peers vdev is NULL")); + QDF_ASSERT(0); + return ret; + } + + pdev = vdev->pdev; + + tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0, + DP_VDEV_ALL); + if (tmp_peer) { + tmp_vdev = tmp_peer->vdev; + if (!tmp_vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Peers vdev is NULL")); + QDF_ASSERT(0); + dp_peer_unref_delete(tmp_peer); + return ret; + } + if (tmp_vdev->pdev->pdev_id == pdev->pdev_id) + is_peer_found = true; + + dp_peer_unref_delete(tmp_peer); + } + + qdf_spin_lock_bh(&soc->ast_lock); + if (peer->delete_in_progress) { + qdf_spin_unlock_bh(&soc->ast_lock); + return ret; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: "QDF_MAC_ADDR_FMT" peer: %pK mac "QDF_MAC_ADDR_FMT, + __func__, pdev->pdev_id, vdev->vdev_id, type, flags, + QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer, + QDF_MAC_ADDR_REF(mac_addr)); + + + /* fw supports only 2 times the max_peers ast entries */ + if (soc->num_ast_entries >= + wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Max ast entries reached")); + return ret; + } + + /* If AST entry already exists , just return from here + * ast entry with same mac address can exist on different radios + * if ast_override support is enabled use search by pdev in this + * case + */ + if (soc->ast_override_support) { + ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, + pdev->pdev_id); + if (ast_entry) { + if ((type == CDP_TXRX_AST_TYPE_MEC) && + (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)) + ast_entry->is_active = TRUE; + + qdf_spin_unlock_bh(&soc->ast_lock); + return 0; + } + if (is_peer_found) { + /* During WDS to static roaming, peer is added + * to the list before static AST entry create. + * So, allow AST entry for STATIC type + * even if peer is present + */ + if (type != CDP_TXRX_AST_TYPE_STATIC) { + qdf_spin_unlock_bh(&soc->ast_lock); + return 0; + } + } + } else { + /* For HWMWDS_SEC entries can be added for same mac address + * do not check for existing entry + */ + if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) + goto add_ast_entry; + + ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr); + + if (ast_entry) { + if ((type == CDP_TXRX_AST_TYPE_MEC) && + (ast_entry->type == CDP_TXRX_AST_TYPE_MEC)) + ast_entry->is_active = TRUE; + + if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) && + !ast_entry->delete_in_progress) { + qdf_spin_unlock_bh(&soc->ast_lock); + return 0; + } + + /* Add for HMWDS entry we cannot be ignored if there + * is AST entry with same mac address + * + * if ast entry exists with the requested mac address + * send a delete command and register callback which + * can take care of adding HMWDS ast enty on delete + * confirmation from target + */ + if (type == CDP_TXRX_AST_TYPE_WDS_HM) { + struct dp_ast_free_cb_params *param = NULL; + + if (ast_entry->type == + CDP_TXRX_AST_TYPE_WDS_HM_SEC) + goto add_ast_entry; + + /* save existing callback */ + if (ast_entry->callback) { + cb = ast_entry->callback; + cookie = ast_entry->cookie; + } + + param = qdf_mem_malloc(sizeof(*param)); + if (!param) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "Allocation failed"); + qdf_spin_unlock_bh(&soc->ast_lock); + return ret; + } + + qdf_mem_copy(¶m->mac_addr.raw[0], mac_addr, + QDF_MAC_ADDR_SIZE); + qdf_mem_copy(¶m->peer_mac_addr.raw[0], + &peer->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + param->type = type; + param->flags = flags; + param->vdev_id = vdev->vdev_id; + ast_entry->callback = dp_peer_free_hmwds_cb; + ast_entry->pdev_id = vdev->pdev->pdev_id; + ast_entry->type = type; + ast_entry->cookie = (void *)param; + if (!ast_entry->delete_in_progress) + dp_peer_del_ast(soc, ast_entry); + } + + /* Modify an already existing AST entry from type + * WDS to MEC on promption. This serves as a fix when + * backbone of interfaces are interchanged wherein + * wds entr becomes its own MEC. The entry should be + * replaced only when the ast_entry peer matches the + * peer received in mec event. This additional check + * is needed in wds repeater cases where a multicast + * packet from station to the root via the repeater + * should not remove the wds entry. + */ + else if ((type == CDP_TXRX_AST_TYPE_MEC) && + (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) && + (ast_entry->peer == peer)) { + ast_entry->is_active = FALSE; + dp_peer_del_ast(soc, ast_entry); + } + qdf_spin_unlock_bh(&soc->ast_lock); + + /* Call the saved callback*/ + if (cb) { + cb(soc->ctrl_psoc, + dp_soc_to_cdp_soc(soc), + cookie, + CDP_TXRX_AST_DELETE_IN_PROGRESS); + } + return 0; + } + } + +add_ast_entry: + ast_entry = (struct dp_ast_entry *) + qdf_mem_malloc(sizeof(struct dp_ast_entry)); + + if (!ast_entry) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("fail to allocate ast_entry")); + QDF_ASSERT(0); + return ret; + } + + qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE); + ast_entry->pdev_id = vdev->pdev->pdev_id; + ast_entry->is_mapped = false; + ast_entry->delete_in_progress = false; + + switch (type) { + case CDP_TXRX_AST_TYPE_STATIC: + peer->self_ast_entry = ast_entry; + ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; + if (peer->vdev->opmode == wlan_op_mode_sta) + ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS; + break; + case CDP_TXRX_AST_TYPE_SELF: + peer->self_ast_entry = ast_entry; + ast_entry->type = CDP_TXRX_AST_TYPE_SELF; + break; + case CDP_TXRX_AST_TYPE_WDS: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS; + break; + case CDP_TXRX_AST_TYPE_WDS_HM: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; + break; + case CDP_TXRX_AST_TYPE_WDS_HM_SEC: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC; + break; + case CDP_TXRX_AST_TYPE_MEC: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_MEC; + break; + case CDP_TXRX_AST_TYPE_DA: + peer = peer->vdev->vap_bss_peer; + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_DA; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Incorrect AST entry type")); + } + + ast_entry->is_active = TRUE; + DP_STATS_INC(soc, ast.added, 1); + soc->num_ast_entries++; + dp_peer_ast_hash_add(soc, ast_entry); + + ast_entry->peer = peer; + + if (type == CDP_TXRX_AST_TYPE_MEC) + qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6); + else + qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6); + + TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); + + if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) && + (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) && + (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) && + (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) { + if (QDF_STATUS_SUCCESS == + soc->cdp_soc.ol_ops->peer_add_wds_entry( + soc->ctrl_psoc, + peer->vdev->vdev_id, + peer->mac_addr.raw, + mac_addr, + next_node_mac, + flags, + ast_entry->type)) { + qdf_spin_unlock_bh(&soc->ast_lock); + return 0; + } + } + + qdf_spin_unlock_bh(&soc->ast_lock); + return ret; +} + +/* + * dp_peer_free_ast_entry() - Free up the ast entry memory + * @soc: SoC handle + * @ast_entry: Address search entry + * + * This API is used to free up the memory associated with + * AST entry. + * + * Return: None + */ +void dp_peer_free_ast_entry(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + /* + * NOTE: Ensure that call to this API is done + * after soc->ast_lock is taken + */ + dp_debug("type: %d ast_idx: %u mac_addr: " QDF_MAC_ADDR_FMT, + ast_entry->type, ast_entry->ast_idx, + QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw)); + + ast_entry->callback = NULL; + ast_entry->cookie = NULL; + + DP_STATS_INC(soc, ast.deleted, 1); + dp_peer_ast_hash_remove(soc, ast_entry); + dp_peer_ast_cleanup(soc, ast_entry); + qdf_mem_free(ast_entry); + soc->num_ast_entries--; +} + +/* + * dp_peer_unlink_ast_entry() - Free up the ast entry memory + * @soc: SoC handle + * @ast_entry: Address search entry + * + * This API is used to remove/unlink AST entry from the peer list + * and hash list. + * + * Return: None + */ +void dp_peer_unlink_ast_entry(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + /* + * NOTE: Ensure that call to this API is done + * after soc->ast_lock is taken + */ + struct dp_peer *peer = ast_entry->peer; + + TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); + + if (ast_entry == peer->self_ast_entry) + peer->self_ast_entry = NULL; + + /* + * release the reference only if it is mapped + * to ast_table + */ + if (ast_entry->is_mapped) + soc->ast_table[ast_entry->ast_idx] = NULL; + + ast_entry->peer = NULL; +} + +/* + * dp_peer_del_ast() - Delete and free AST entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function removes the AST entry from peer and soc tables + * It assumes caller has taken the ast lock to protect the access to these + * tables + * + * Return: None + */ +void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) +{ + struct dp_peer *peer; + + if (!ast_entry) + return; + + if (ast_entry->delete_in_progress) + return; + + dp_debug("call by %ps: ast_idx: %u mac_addr: " QDF_MAC_ADDR_FMT, + (void *)_RET_IP_, ast_entry->ast_idx, + QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw)); + + ast_entry->delete_in_progress = true; + + peer = ast_entry->peer; + dp_peer_ast_send_wds_del(soc, ast_entry); + + /* Remove SELF and STATIC entries in teardown itself */ + if (!ast_entry->next_hop) + dp_peer_unlink_ast_entry(soc, ast_entry); + + if (ast_entry->is_mapped) + soc->ast_table[ast_entry->ast_idx] = NULL; + + /* if peer map v2 is enabled we are not freeing ast entry + * here and it is supposed to be freed in unmap event (after + * we receive delete confirmation from target) + * + * if peer_id is invalid we did not get the peer map event + * for the peer free ast entry from here only in this case + */ + if (dp_peer_ast_free_in_unmap_supported(peer, ast_entry)) + return; + + /* for WDS secondary entry ast_entry->next_hop would be set so + * unlinking has to be done explicitly here. + * As this entry is not a mapped entry unmap notification from + * FW wil not come. Hence unlinkling is done right here. + */ + if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC) + dp_peer_unlink_ast_entry(soc, ast_entry); + + dp_peer_free_ast_entry(soc, ast_entry); +} + +/* + * dp_peer_update_ast() - Delete and free AST entry + * @soc: SoC handle + * @peer: peer to which ast node belongs + * @ast_entry: AST entry of the node + * @flags: wds or hmwds + * + * This function update the AST entry to the roamed peer and soc tables + * It assumes caller has taken the ast lock to protect the access to these + * tables + * + * Return: 0 if ast entry is updated successfully + * -1 failure + */ +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags) +{ + int ret = -1; + struct dp_peer *old_peer; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: "QDF_MAC_ADDR_FMT" peer_mac: "QDF_MAC_ADDR_FMT"\n", + __func__, ast_entry->type, peer->vdev->pdev->pdev_id, + peer->vdev->vdev_id, flags, + QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw), + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + /* Do not send AST update in below cases + * 1) Ast entry delete has already triggered + * 2) Peer delete is already triggered + * 3) We did not get the HTT map for create event + */ + if (ast_entry->delete_in_progress || peer->delete_in_progress || + !ast_entry->is_mapped) + return ret; + + if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) || + (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) || + (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) || + (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) + return 0; + + /* + * Avoids flood of WMI update messages sent to FW for same peer. + */ + if (qdf_unlikely(ast_entry->peer == peer) && + (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) && + (ast_entry->peer->vdev == peer->vdev) && + (ast_entry->is_active)) + return 0; + + old_peer = ast_entry->peer; + TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); + + ast_entry->peer = peer; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS; + ast_entry->pdev_id = peer->vdev->pdev->pdev_id; + ast_entry->is_active = TRUE; + TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); + + ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( + soc->ctrl_psoc, + peer->vdev->vdev_id, + ast_entry->mac_addr.raw, + peer->mac_addr.raw, + flags); + + return ret; +} + +/* + * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function gets the pdev_id from the ast entry. + * + * Return: (uint8_t) pdev_id + */ +uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return ast_entry->pdev_id; +} + +/* + * dp_peer_ast_get_next_hop() - get next_hop from the ast entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function gets the next hop from the ast entry. + * + * Return: (uint8_t) next_hop + */ +uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return ast_entry->next_hop; +} + +/* + * dp_peer_ast_set_type() - set type from the ast entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function sets the type in the ast entry. + * + * Return: + */ +void dp_peer_ast_set_type(struct dp_soc *soc, + struct dp_ast_entry *ast_entry, + enum cdp_txrx_ast_entry_type type) +{ + ast_entry->type = type; +} + +#else +int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, + uint32_t flags) +{ + return 1; +} + +void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) +{ +} + +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags) +{ + return 1; +} + +struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, + uint8_t *ast_mac_addr) +{ + return NULL; +} + +struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, + uint8_t *ast_mac_addr, + uint8_t pdev_id) +{ + return NULL; +} + +static int dp_peer_ast_hash_attach(struct dp_soc *soc) +{ + return 0; +} + +static inline void dp_peer_map_ast(struct dp_soc *soc, + struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, + uint8_t vdev_id, uint16_t ast_hash) +{ + return; +} + +static void dp_peer_ast_hash_detach(struct dp_soc *soc) +{ +} + +void dp_peer_ast_set_type(struct dp_soc *soc, + struct dp_ast_entry *ast_entry, + enum cdp_txrx_ast_entry_type type) +{ +} + +uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return 0xff; +} + +uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return 0xff; +} + +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags) +{ + return 1; +} + +#endif + +void dp_peer_ast_send_wds_del(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + struct dp_peer *peer = ast_entry->peer; + struct cdp_soc_t *cdp_soc = &soc->cdp_soc; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE, + "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_mac: "QDF_MAC_ADDR_FMT"\n", + __func__, ast_entry->type, peer->vdev->pdev->pdev_id, + peer->vdev->vdev_id, + QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw), + ast_entry->next_hop, + QDF_MAC_ADDR_REF(ast_entry->peer->mac_addr.raw)); + + if (ast_entry->next_hop) { + cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc, + peer->vdev->vdev_id, + ast_entry->mac_addr.raw, + ast_entry->type); + } + +} + +/** + * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete + * @soc: soc handle + * @peer: peer handle + * @mac_addr: mac address of the AST entry to searc and delete + * + * find the ast entry from the peer list using the mac address and free + * the entry. + * + * Return: SUCCESS or NOENT + */ +static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *mac_addr) +{ + struct dp_ast_entry *ast_entry; + void *cookie = NULL; + txrx_ast_free_cb cb = NULL; + + /* + * release the reference only if it is mapped + * to ast_table + */ + + qdf_spin_lock_bh(&soc->ast_lock); + + ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr); + if (!ast_entry) { + qdf_spin_unlock_bh(&soc->ast_lock); + return QDF_STATUS_E_NOENT; + } else if (ast_entry->is_mapped) { + soc->ast_table[ast_entry->ast_idx] = NULL; + } + + cb = ast_entry->callback; + cookie = ast_entry->cookie; + + + dp_peer_unlink_ast_entry(soc, ast_entry); + dp_peer_free_ast_entry(soc, ast_entry); + + qdf_spin_unlock_bh(&soc->ast_lock); + + if (cb) { + cb(soc->ctrl_psoc, + dp_soc_to_cdp_soc(soc), + cookie, + CDP_TXRX_AST_DELETED); + } + + return QDF_STATUS_SUCCESS; +} + +struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, + uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id) +{ + union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; + unsigned index; + struct dp_peer *peer; + + if (mac_addr_is_aligned) { + mac_addr = (union dp_align_mac_addr *) peer_mac_addr; + } else { + qdf_mem_copy( + &local_mac_addr_aligned.raw[0], + peer_mac_addr, QDF_MAC_ADDR_SIZE); + mac_addr = &local_mac_addr_aligned; + } + index = dp_peer_find_hash_index(soc, mac_addr); + qdf_spin_lock_bh(&soc->peer_ref_mutex); + TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { + if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && + ((peer->vdev->vdev_id == vdev_id) || + (vdev_id == DP_VDEV_ALL))) { + /* found it - increment the ref count before releasing + * the lock + */ + qdf_atomic_inc(&peer->ref_cnt); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return peer; + } + } + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return NULL; /* failure */ +} + +void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) +{ + unsigned index; + struct dp_peer *tmppeer = NULL; + int found = 0; + + index = dp_peer_find_hash_index(soc, &peer->mac_addr); + /* Check if tail is not empty before delete*/ + QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); + /* + * DO NOT take the peer_ref_mutex lock here - it needs to be taken + * by the caller. + * The caller needs to hold the lock from the time the peer object's + * reference count is decremented and tested up through the time the + * reference to the peer object is removed from the hash table, by + * this function. + * Holding the lock only while removing the peer object reference + * from the hash table keeps the hash table consistent, but does not + * protect against a new HL tx context starting to use the peer object + * if it looks up the peer object from its MAC address just after the + * peer ref count is decremented to zero, but just before the peer + * object reference is removed from the hash table. + */ + TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { + if (tmppeer == peer) { + found = 1; + break; + } + } + QDF_ASSERT(found); + TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); +} + +void dp_peer_find_hash_erase(struct dp_soc *soc) +{ + int i; + + /* + * Not really necessary to take peer_ref_mutex lock - by this point, + * it's known that the soc is no longer in use. + */ + for (i = 0; i <= soc->peer_hash.mask; i++) { + if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { + struct dp_peer *peer, *peer_next; + + /* + * TAILQ_FOREACH_SAFE must be used here to avoid any + * memory access violation after peer is freed + */ + TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], + hash_list_elem, peer_next) { + /* + * Don't remove the peer from the hash table - + * that would modify the list we are currently + * traversing, and it's not necessary anyway. + */ + /* + * Artificially adjust the peer's ref count to + * 1, so it will get deleted by + * dp_peer_unref_delete. + */ + /* set to zero */ + qdf_atomic_init(&peer->ref_cnt); + /* incr to one */ + qdf_atomic_inc(&peer->ref_cnt); + dp_peer_unref_delete(peer); + } + } + } +} + +static void dp_peer_ast_table_detach(struct dp_soc *soc) +{ + if (soc->ast_table) { + qdf_mem_free(soc->ast_table); + soc->ast_table = NULL; + } +} + +static void dp_peer_find_map_detach(struct dp_soc *soc) +{ + if (soc->peer_id_to_obj_map) { + qdf_mem_free(soc->peer_id_to_obj_map); + soc->peer_id_to_obj_map = NULL; + } +} + +int dp_peer_find_attach(struct dp_soc *soc) +{ + if (dp_peer_find_map_attach(soc)) + return 1; + + if (dp_peer_find_hash_attach(soc)) { + dp_peer_find_map_detach(soc); + return 1; + } + + if (dp_peer_ast_table_attach(soc)) { + dp_peer_find_hash_detach(soc); + dp_peer_find_map_detach(soc); + return 1; + } + + if (dp_peer_ast_hash_attach(soc)) { + dp_peer_ast_table_detach(soc); + dp_peer_find_hash_detach(soc); + dp_peer_find_map_detach(soc); + return 1; + } + + return 0; /* success */ +} + +void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; + struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); + + if (queue_status->header.status == HAL_REO_CMD_DRAIN) + return; + + if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { + DP_PRINT_STATS("REO stats failure %d for TID %d\n", + queue_status->header.status, rx_tid->tid); + return; + } + + DP_PRINT_STATS("REO queue stats (TID: %d):\n" + "ssn: %d\n" + "curr_idx : %d\n" + "pn_31_0 : %08x\n" + "pn_63_32 : %08x\n" + "pn_95_64 : %08x\n" + "pn_127_96 : %08x\n" + "last_rx_enq_tstamp : %08x\n" + "last_rx_deq_tstamp : %08x\n" + "rx_bitmap_31_0 : %08x\n" + "rx_bitmap_63_32 : %08x\n" + "rx_bitmap_95_64 : %08x\n" + "rx_bitmap_127_96 : %08x\n" + "rx_bitmap_159_128 : %08x\n" + "rx_bitmap_191_160 : %08x\n" + "rx_bitmap_223_192 : %08x\n" + "rx_bitmap_255_224 : %08x\n", + rx_tid->tid, + queue_status->ssn, queue_status->curr_idx, + queue_status->pn_31_0, queue_status->pn_63_32, + queue_status->pn_95_64, queue_status->pn_127_96, + queue_status->last_rx_enq_tstamp, + queue_status->last_rx_deq_tstamp, + queue_status->rx_bitmap_31_0, + queue_status->rx_bitmap_63_32, + queue_status->rx_bitmap_95_64, + queue_status->rx_bitmap_127_96, + queue_status->rx_bitmap_159_128, + queue_status->rx_bitmap_191_160, + queue_status->rx_bitmap_223_192, + queue_status->rx_bitmap_255_224); + + DP_PRINT_STATS( + "curr_mpdu_cnt : %d\n" + "curr_msdu_cnt : %d\n" + "fwd_timeout_cnt : %d\n" + "fwd_bar_cnt : %d\n" + "dup_cnt : %d\n" + "frms_in_order_cnt : %d\n" + "bar_rcvd_cnt : %d\n" + "mpdu_frms_cnt : %d\n" + "msdu_frms_cnt : %d\n" + "total_byte_cnt : %d\n" + "late_recv_mpdu_cnt : %d\n" + "win_jump_2k : %d\n" + "hole_cnt : %d\n", + queue_status->curr_mpdu_cnt, + queue_status->curr_msdu_cnt, + queue_status->fwd_timeout_cnt, + queue_status->fwd_bar_cnt, + queue_status->dup_cnt, + queue_status->frms_in_order_cnt, + queue_status->bar_rcvd_cnt, + queue_status->mpdu_frms_cnt, + queue_status->msdu_frms_cnt, + queue_status->total_cnt, + queue_status->late_recv_mpdu_cnt, + queue_status->win_jump_2k, + queue_status->hole_cnt); + + DP_PRINT_STATS("Addba Req : %d\n" + "Addba Resp : %d\n" + "Addba Resp success : %d\n" + "Addba Resp failed : %d\n" + "Delba Req received : %d\n" + "Delba Tx success : %d\n" + "Delba Tx Fail : %d\n" + "BA window size : %d\n" + "Pn size : %d\n", + rx_tid->num_of_addba_req, + rx_tid->num_of_addba_resp, + rx_tid->num_addba_rsp_success, + rx_tid->num_addba_rsp_failed, + rx_tid->num_of_delba_req, + rx_tid->delba_tx_success_cnt, + rx_tid->delba_tx_fail_cnt, + rx_tid->ba_win_size, + rx_tid->pn_size); +} + +static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, + uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, + uint8_t vdev_id) +{ + struct dp_peer *peer; + + QDF_ASSERT(peer_id <= soc->max_peers); + /* check if there's already a peer object with this MAC address */ + peer = dp_peer_find_hash_find(soc, peer_mac_addr, + 0 /* is aligned */, vdev_id); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: peer %pK ID %d vid %d mac "QDF_MAC_ADDR_FMT, + __func__, peer, peer_id, vdev_id, + QDF_MAC_ADDR_REF(peer_mac_addr)); + + if (peer) { + /* peer's ref count was already incremented by + * peer_find_hash_find + */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: ref_cnt: %d", __func__, + qdf_atomic_read(&peer->ref_cnt)); + if (!soc->peer_id_to_obj_map[peer_id]) + soc->peer_id_to_obj_map[peer_id] = peer; + else { + /* Peer map event came for peer_id which + * is already mapped, this is not expected + */ + QDF_ASSERT(0); + } + + if (dp_peer_find_add_id_to_obj(peer, peer_id)) { + /* TBDXXX: assert for now */ + QDF_ASSERT(0); + } + + return peer; + } + + return NULL; +} + +/** + * dp_rx_peer_map_handler() - handle peer map event from firmware + * @soc_handle - genereic soc handle + * @peeri_id - peer_id from firmware + * @hw_peer_id - ast index for this peer + * @vdev_id - vdev ID + * @peer_mac_addr - mac address of the peer + * @ast_hash - ast hash value + * @is_wds - flag to indicate peer map event for WDS ast entry + * + * associate the peer_id that firmware provided with peer entry + * and update the ast table in the host with the hw_peer_id. + * + * Return: none + */ + +void +dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, + uint16_t hw_peer_id, uint8_t vdev_id, + uint8_t *peer_mac_addr, uint16_t ast_hash, + uint8_t is_wds) +{ + struct dp_peer *peer = NULL; + enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC; + + dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d", + soc, peer_id, hw_peer_id, + QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id); + + /* Peer map event for WDS ast entry get the peer from + * obj map + */ + if (is_wds) { + peer = soc->peer_id_to_obj_map[peer_id]; + /* + * In certain cases like Auth attack on a repeater + * can result in the number of ast_entries falling + * in the same hash bucket to exceed the max_skid + * length supported by HW in root AP. In these cases + * the FW will return the hw_peer_id (ast_index) as + * 0xffff indicating HW could not add the entry in + * its table. Host has to delete the entry from its + * table in these cases. + */ + if (hw_peer_id == HTT_INVALID_PEER) { + DP_STATS_INC(soc, ast.map_err, 1); + if (!dp_peer_ast_free_entry_by_mac(soc, + peer, + peer_mac_addr)) + return; + + dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u", + peer, peer->peer_ids[0], + QDF_MAC_ADDR_REF(peer->mac_addr.raw), + QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id, + is_wds); + + return; + } + + } else { + /* + * It's the responsibility of the CP and FW to ensure + * that peer is created successfully. Ideally DP should + * not hit the below condition for directly assocaited + * peers. + */ + if ((hw_peer_id < 0) || + (hw_peer_id >= + wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "invalid hw_peer_id: %d", hw_peer_id); + qdf_assert_always(0); + } + + peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, + hw_peer_id, vdev_id); + + if (peer) { + if (wlan_op_mode_sta == peer->vdev->opmode && + qdf_mem_cmp(peer->mac_addr.raw, + peer->vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE) != 0) { + dp_info("STA vdev bss_peer!!!!"); + peer->bss_peer = 1; + peer->vdev->vap_bss_peer = peer; + qdf_mem_copy(peer->vdev->vap_bss_peer_mac_addr, + peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + } + + if (peer->vdev->opmode == wlan_op_mode_sta) { + peer->vdev->bss_ast_hash = ast_hash; + peer->vdev->bss_ast_idx = hw_peer_id; + } + + /* Add ast entry incase self ast entry is + * deleted due to DP CP sync issue + * + * self_ast_entry is modified in peer create + * and peer unmap path which cannot run in + * parllel with peer map, no lock need before + * referring it + */ + if (!peer->self_ast_entry) { + dp_info("Add self ast from map "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer_mac_addr)); + dp_peer_add_ast(soc, peer, + peer_mac_addr, + type, 0); + } + + } + } + dp_peer_map_ast(soc, peer, peer_mac_addr, + hw_peer_id, vdev_id, ast_hash); +} + +/** + * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware + * @soc_handle - genereic soc handle + * @peeri_id - peer_id from firmware + * @vdev_id - vdev ID + * @mac_addr - mac address of the peer or wds entry + * @is_wds - flag to indicate peer map event for WDS ast entry + * + * Return: none + */ +void +dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, + uint8_t vdev_id, uint8_t *mac_addr, + uint8_t is_wds) +{ + struct dp_peer *peer; + uint8_t i; + + peer = __dp_peer_find_by_id(soc, peer_id); + + /* + * Currently peer IDs are assigned for vdevs as well as peers. + * If the peer ID is for a vdev, then the peer pointer stored + * in peer_id_to_obj_map will be NULL. + */ + if (!peer) { + dp_err("Received unmap event for invalid peer_id %u", peer_id); + return; + } + + /* If V2 Peer map messages are enabled AST entry has to be freed here + */ + if (is_wds) { + if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr)) + return; + + dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u", + peer, peer->peer_ids[0], + QDF_MAC_ADDR_REF(peer->mac_addr.raw), + QDF_MAC_ADDR_REF(mac_addr), vdev_id, + is_wds); + + return; + } + + dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK", + soc, peer_id, peer); + + soc->peer_id_to_obj_map[peer_id] = NULL; + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { + if (peer->peer_ids[i] == peer_id) { + peer->peer_ids[i] = HTT_INVALID_PEER; + break; + } + } + + /* + * Reset ast flow mapping table + */ + dp_peer_reset_flowq_map(peer); + + if (soc->cdp_soc.ol_ops->peer_unmap_event) { + soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, + peer_id, vdev_id); + } + + /* + * Remove a reference to the peer. + * If there are no more references, delete the peer object. + */ + dp_peer_unref_delete(peer); +} + +void +dp_peer_find_detach(struct dp_soc *soc) +{ + dp_peer_find_map_detach(soc); + dp_peer_find_hash_detach(soc); + dp_peer_ast_hash_detach(soc); + dp_peer_ast_table_detach(soc); +} + +static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; + + if ((reo_status->rx_queue_status.header.status != + HAL_REO_CMD_SUCCESS) && + (reo_status->rx_queue_status.header.status != + HAL_REO_CMD_DRAIN)) { + /* Should not happen normally. Just print error for now */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc update failed(%d): tid %d", + __func__, + reo_status->rx_queue_status.header.status, + rx_tid->tid); + } +} + +/* + * dp_find_peer_by_addr - find peer instance by mac address + * @dev: physical device instance + * @peer_mac_addr: peer mac address + * + * Return: peer instance pointer + */ +void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr) +{ + struct dp_pdev *pdev = (struct dp_pdev *)dev; + struct dp_peer *peer; + + peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); + + if (!peer) + return NULL; + + dp_verbose_debug("peer %pK mac: "QDF_MAC_ADDR_FMT, peer, + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + dp_peer_unref_delete(peer); + + return peer; +} + +static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer) +{ + struct ol_if_ops *ol_ops = NULL; + bool is_roaming = false; + uint8_t vdev_id = -1; + struct cdp_soc_t *soc; + + if (!peer) { + dp_info("Peer is NULL. No roaming possible"); + return false; + } + + soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc); + ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops; + + if (ol_ops && ol_ops->is_roam_inprogress) { + dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id); + is_roaming = ol_ops->is_roam_inprogress(vdev_id); + } + + dp_info("peer: "QDF_MAC_ADDR_FMT", vdev_id: %d, is_roaming: %d", + QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming); + + return is_roaming; +} + +QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t + ba_window_size, uint32_t start_seq) +{ + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + struct dp_soc *soc = peer->vdev->pdev->soc; + struct hal_reo_cmd_params params; + + qdf_mem_zero(¶ms, sizeof(params)); + + params.std.need_status = 1; + params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.upd_queue_params.update_ba_window_size = 1; + params.u.upd_queue_params.ba_window_size = ba_window_size; + + if (start_seq < IEEE80211_SEQ_MAX) { + params.u.upd_queue_params.update_ssn = 1; + params.u.upd_queue_params.ssn = start_seq; + } else { + dp_set_ssn_valid_flag(¶ms, 0); + } + + if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, + dp_rx_tid_update_cb, rx_tid)) { + dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE"); + DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); + } + + rx_tid->ba_win_size = ba_window_size; + + if (dp_get_peer_vdev_roaming_in_progress(peer)) + return QDF_STATUS_E_PERM; + + if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) + soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( + soc->ctrl_psoc, peer->vdev->pdev->pdev_id, + peer->vdev->vdev_id, peer->mac_addr.raw, + rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_reo_desc_free() - Callback free reo descriptor memory after + * HW cache flush + * + * @soc: DP SOC handle + * @cb_ctxt: Callback context + * @reo_status: REO command status + */ +static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct reo_desc_list_node *freedesc = + (struct reo_desc_list_node *)cb_ctxt; + struct dp_rx_tid *rx_tid = &freedesc->rx_tid; + unsigned long curr_ts = qdf_get_system_timestamp(); + + if ((reo_status->fl_cache_status.header.status != + HAL_REO_CMD_SUCCESS) && + (reo_status->fl_cache_status.header.status != + HAL_REO_CMD_DRAIN)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc flush failed(%d): tid %d", + __func__, + reo_status->rx_queue_status.header.status, + freedesc->rx_tid.tid); + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__, + curr_ts, + (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid); + qdf_mem_unmap_nbytes_single(soc->osdev, + rx_tid->hw_qdesc_paddr, + QDF_DMA_BIDIRECTIONAL, + rx_tid->hw_qdesc_alloc_size); + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + qdf_mem_free(freedesc); +} + +#if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) +/* Hawkeye emulation requires bus address to be >= 0x50000000 */ +static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) +{ + if (dma_addr < 0x50000000) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} +#else +static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) +{ + return QDF_STATUS_SUCCESS; +} +#endif + + +/* + * dp_rx_tid_setup_wifi3() – Setup receive TID state + * @peer: Datapath peer handle + * @tid: TID + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * + * Return: QDF_STATUS code + */ +QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, + uint32_t ba_window_size, uint32_t start_seq) +{ + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + struct dp_vdev *vdev = peer->vdev; + struct dp_soc *soc = vdev->pdev->soc; + uint32_t hw_qdesc_size; + uint32_t hw_qdesc_align; + int hal_pn_type; + void *hw_qdesc_vaddr; + uint32_t alloc_tries = 0; + QDF_STATUS err = QDF_STATUS_SUCCESS; + + if (peer->delete_in_progress || + !qdf_atomic_read(&peer->is_default_route_set)) + return QDF_STATUS_E_FAILURE; + + rx_tid->ba_win_size = ba_window_size; + if (rx_tid->hw_qdesc_vaddr_unaligned) + return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, + start_seq); + rx_tid->delba_tx_status = 0; + rx_tid->ppdu_id_2k = 0; + rx_tid->num_of_addba_req = 0; + rx_tid->num_of_delba_req = 0; + rx_tid->num_of_addba_resp = 0; + rx_tid->num_addba_rsp_failed = 0; + rx_tid->num_addba_rsp_success = 0; + rx_tid->delba_tx_success_cnt = 0; + rx_tid->delba_tx_fail_cnt = 0; + rx_tid->statuscode = 0; + + /* TODO: Allocating HW queue descriptors based on max BA window size + * for all QOS TIDs so that same descriptor can be used later when + * ADDBA request is recevied. This should be changed to allocate HW + * queue descriptors based on BA window size being negotiated (0 for + * non BA cases), and reallocate when BA window size changes and also + * send WMI message to FW to change the REO queue descriptor in Rx + * peer entry as part of dp_rx_tid_update. + */ + if (tid != DP_NON_QOS_TID) + hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, + HAL_RX_MAX_BA_WINDOW, tid); + else + hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, + ba_window_size, tid); + + hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); + /* To avoid unnecessary extra allocation for alignment, try allocating + * exact size and see if we already have aligned address. + */ + rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; + +try_desc_alloc: + rx_tid->hw_qdesc_vaddr_unaligned = + qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); + + if (!rx_tid->hw_qdesc_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc alloc failed: tid %d", + __func__, tid); + return QDF_STATUS_E_NOMEM; + } + + if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % + hw_qdesc_align) { + /* Address allocated above is not alinged. Allocate extra + * memory for alignment + */ + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + rx_tid->hw_qdesc_vaddr_unaligned = + qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + + hw_qdesc_align - 1); + + if (!rx_tid->hw_qdesc_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc alloc failed: tid %d", + __func__, tid); + return QDF_STATUS_E_NOMEM; + } + + hw_qdesc_vaddr = (void *)qdf_align((unsigned long) + rx_tid->hw_qdesc_vaddr_unaligned, + hw_qdesc_align); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Total Size %d Aligned Addr %pK", + __func__, rx_tid->hw_qdesc_alloc_size, + hw_qdesc_vaddr); + + } else { + hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; + } + rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr; + + /* TODO: Ensure that sec_type is set before ADDBA is received. + * Currently this is set based on htt indication + * HTT_T2H_MSG_TYPE_SEC_IND from target + */ + switch (peer->security[dp_sec_ucast].sec_type) { + case cdp_sec_type_tkip_nomic: + case cdp_sec_type_aes_ccmp: + case cdp_sec_type_aes_ccmp_256: + case cdp_sec_type_aes_gcmp: + case cdp_sec_type_aes_gcmp_256: + hal_pn_type = HAL_PN_WPA; + break; + case cdp_sec_type_wapi: + if (vdev->opmode == wlan_op_mode_ap) + hal_pn_type = HAL_PN_WAPI_EVEN; + else + hal_pn_type = HAL_PN_WAPI_UNEVEN; + break; + default: + hal_pn_type = HAL_PN_NONE; + break; + } + + hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, + hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type); + + qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, + QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, + &(rx_tid->hw_qdesc_paddr)); + + if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != + QDF_STATUS_SUCCESS) { + if (alloc_tries++ < 10) { + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + rx_tid->hw_qdesc_vaddr_unaligned = NULL; + goto try_desc_alloc; + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc alloc failed (lowmem): tid %d", + __func__, tid); + err = QDF_STATUS_E_NOMEM; + goto error; + } + } + + if (dp_get_peer_vdev_roaming_in_progress(peer)) { + err = QDF_STATUS_E_PERM; + goto error; + } + + if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { + if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( + soc->ctrl_psoc, + peer->vdev->pdev->pdev_id, + peer->vdev->vdev_id, + peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid, + 1, ba_window_size)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to send reo queue setup to FW - tid %d\n", + __func__, tid); + err = QDF_STATUS_E_FAILURE; + goto error; + } + } + return 0; +error: + if (rx_tid->hw_qdesc_vaddr_unaligned) { + if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) == + QDF_STATUS_SUCCESS) + qdf_mem_unmap_nbytes_single( + soc->osdev, + rx_tid->hw_qdesc_paddr, + QDF_DMA_BIDIRECTIONAL, + rx_tid->hw_qdesc_alloc_size); + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + rx_tid->hw_qdesc_vaddr_unaligned = NULL; + } + return err; +} + +#ifdef REO_DESC_DEFER_FREE +/* + * dp_reo_desc_clean_up() - If cmd to flush base desc fails add + * desc back to freelist and defer the deletion + * + * @soc: DP SOC handle + * @desc: Base descriptor to be freed + * @reo_status: REO command status + */ +static void dp_reo_desc_clean_up(struct dp_soc *soc, + struct reo_desc_list_node *desc, + union hal_reo_status *reo_status) +{ + desc->free_ts = qdf_get_system_timestamp(); + DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); + qdf_list_insert_back(&soc->reo_desc_freelist, + (qdf_list_node_t *)desc); +} + +/* + * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd + * ring in aviod of REO hang + * + * @list_size: REO desc list size to be cleaned + */ +static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size) +{ + unsigned long curr_ts = qdf_get_system_timestamp(); + + if ((*list_size) > REO_DESC_FREELIST_SIZE) { + dp_err_log("%lu:freedesc number %d in freelist", + curr_ts, *list_size); + /* limit the batch queue size */ + *list_size = REO_DESC_FREELIST_SIZE; + } +} +#else +/* + * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush + * cache fails free the base REO desc anyway + * + * @soc: DP SOC handle + * @desc: Base descriptor to be freed + * @reo_status: REO command status + */ +static void dp_reo_desc_clean_up(struct dp_soc *soc, + struct reo_desc_list_node *desc, + union hal_reo_status *reo_status) +{ + if (reo_status) { + qdf_mem_zero(reo_status, sizeof(*reo_status)); + reo_status->fl_cache_status.header.status = 0; + dp_reo_desc_free(soc, (void *)desc, reo_status); + } +} + +/* + * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd + * ring in aviod of REO hang + * + * @list_size: REO desc list size to be cleaned + */ +static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size) +{ +} +#endif + +/* + * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE + * cmd and re-insert desc into free list if send fails. + * + * @soc: DP SOC handle + * @desc: desc with resend update cmd flag set + * @rx_tid: Desc RX tid associated with update cmd for resetting + * valid field to 0 in h/w + * + * Return: QDF status + */ +static QDF_STATUS +dp_resend_update_reo_cmd(struct dp_soc *soc, + struct reo_desc_list_node *desc, + struct dp_rx_tid *rx_tid) +{ + struct hal_reo_cmd_params params; + + qdf_mem_zero(¶ms, sizeof(params)); + params.std.need_status = 1; + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.upd_queue_params.update_vld = 1; + params.u.upd_queue_params.vld = 0; + desc->resend_update_reo_cmd = false; + /* + * If the cmd send fails then set resend_update_reo_cmd flag + * and insert the desc at the end of the free list to retry. + */ + if (dp_reo_send_cmd(soc, + CMD_UPDATE_RX_REO_QUEUE, + ¶ms, + dp_rx_tid_delete_cb, + (void *)desc) + != QDF_STATUS_SUCCESS) { + desc->resend_update_reo_cmd = true; + desc->free_ts = qdf_get_system_timestamp(); + qdf_list_insert_back(&soc->reo_desc_freelist, + (qdf_list_node_t *)desc); + dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE"); + DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache + * after deleting the entries (ie., setting valid=0) + * + * @soc: DP SOC handle + * @cb_ctxt: Callback context + * @reo_status: REO command status + */ +void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct reo_desc_list_node *freedesc = + (struct reo_desc_list_node *)cb_ctxt; + uint32_t list_size; + struct reo_desc_list_node *desc; + unsigned long curr_ts = qdf_get_system_timestamp(); + uint32_t desc_size, tot_desc_size; + struct hal_reo_cmd_params params; + bool flush_failure = false; + + if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { + qdf_mem_zero(reo_status, sizeof(*reo_status)); + reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; + dp_reo_desc_free(soc, (void *)freedesc, reo_status); + DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1); + return; + } else if (reo_status->rx_queue_status.header.status != + HAL_REO_CMD_SUCCESS) { + /* Should not happen normally. Just print error for now */ + dp_info_rl("%s: Rx tid HW desc deletion failed(%d): tid %d", + __func__, + reo_status->rx_queue_status.header.status, + freedesc->rx_tid.tid); + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: rx_tid: %d status: %d", __func__, + freedesc->rx_tid.tid, + reo_status->rx_queue_status.header.status); + + qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); + freedesc->free_ts = curr_ts; + qdf_list_insert_back_size(&soc->reo_desc_freelist, + (qdf_list_node_t *)freedesc, &list_size); + + /* MCL path add the desc back to reo_desc_freelist when REO FLUSH + * failed. it may cause the number of REO queue pending in free + * list is even larger than REO_CMD_RING max size and lead REO CMD + * flood then cause REO HW in an unexpected condition. So it's + * needed to limit the number REO cmds in a batch operation. + */ + dp_reo_limit_clean_batch_sz(&list_size); + + while ((qdf_list_peek_front(&soc->reo_desc_freelist, + (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && + ((list_size >= REO_DESC_FREELIST_SIZE) || + (curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) || + (desc->resend_update_reo_cmd && list_size))) { + struct dp_rx_tid *rx_tid; + + qdf_list_remove_front(&soc->reo_desc_freelist, + (qdf_list_node_t **)&desc); + list_size--; + rx_tid = &desc->rx_tid; + + /* First process descs with resend_update_reo_cmd set */ + if (desc->resend_update_reo_cmd) { + if (dp_resend_update_reo_cmd(soc, desc, rx_tid) != + QDF_STATUS_SUCCESS) + break; + else + continue; + } + + /* Flush and invalidate REO descriptor from HW cache: Base and + * extension descriptors should be flushed separately */ + if (desc->pending_ext_desc_size) + tot_desc_size = desc->pending_ext_desc_size; + else + tot_desc_size = rx_tid->hw_qdesc_alloc_size; + /* Get base descriptor size by passing non-qos TID */ + desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0, + DP_NON_QOS_TID); + + /* Flush reo extension descriptors */ + while ((tot_desc_size -= desc_size) > 0) { + qdf_mem_zero(¶ms, sizeof(params)); + params.std.addr_lo = + ((uint64_t)(rx_tid->hw_qdesc_paddr) + + tot_desc_size) & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, + CMD_FLUSH_CACHE, + ¶ms, + NULL, + NULL)) { + dp_info_rl("fail to send CMD_CACHE_FLUSH:" + "tid %d desc %pK", rx_tid->tid, + (void *)(rx_tid->hw_qdesc_paddr)); + desc->pending_ext_desc_size = tot_desc_size + + desc_size; + dp_reo_desc_clean_up(soc, desc, reo_status); + flush_failure = true; + break; + } + } + + if (flush_failure) + break; + else + desc->pending_ext_desc_size = desc_size; + + /* Flush base descriptor */ + qdf_mem_zero(¶ms, sizeof(params)); + params.std.need_status = 1; + params.std.addr_lo = + (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; + params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, + CMD_FLUSH_CACHE, + ¶ms, + dp_reo_desc_free, + (void *)desc)) { + union hal_reo_status reo_status; + /* + * If dp_reo_send_cmd return failure, related TID queue desc + * should be unmapped. Also locally reo_desc, together with + * TID queue desc also need to be freed accordingly. + * + * Here invoke desc_free function directly to do clean up. + * + * In case of MCL path add the desc back to the free + * desc list and defer deletion. + */ + dp_info_rl("%s: fail to send REO cmd to flush cache: tid %d", + __func__, rx_tid->tid); + dp_reo_desc_clean_up(soc, desc, &reo_status); + DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); + break; + } + } + qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); +} + +/* + * dp_rx_tid_delete_wifi3() – Delete receive TID queue + * @peer: Datapath peer handle + * @tid: TID + * + * Return: 0 on success, error code on failure + */ +static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) +{ + struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); + struct dp_soc *soc = peer->vdev->pdev->soc; + struct hal_reo_cmd_params params; + struct reo_desc_list_node *freedesc = + qdf_mem_malloc(sizeof(*freedesc)); + + if (!freedesc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: malloc failed for freedesc: tid %d", + __func__, tid); + return -ENOMEM; + } + + freedesc->rx_tid = *rx_tid; + freedesc->resend_update_reo_cmd = false; + + qdf_mem_zero(¶ms, sizeof(params)); + + params.std.need_status = 1; + params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.upd_queue_params.update_vld = 1; + params.u.upd_queue_params.vld = 0; + + if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, + dp_rx_tid_delete_cb, (void *)freedesc) + != QDF_STATUS_SUCCESS) { + /* Defer the clean up to the call back context */ + qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); + freedesc->free_ts = qdf_get_system_timestamp(); + freedesc->resend_update_reo_cmd = true; + qdf_list_insert_front(&soc->reo_desc_freelist, + (qdf_list_node_t *)freedesc); + DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1); + qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); + dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE"); + } + + rx_tid->hw_qdesc_vaddr_unaligned = NULL; + rx_tid->hw_qdesc_alloc_size = 0; + rx_tid->hw_qdesc_paddr = 0; + + return 0; +} + +#ifdef DP_LFR +static void dp_peer_setup_remaining_tids(struct dp_peer *peer) +{ + int tid; + + for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { + dp_rx_tid_setup_wifi3(peer, tid, 1, 0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Setting up TID %d for peer %pK peer->local_id %d", + tid, peer, peer->local_id); + } +} +#else +static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; +#endif + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +/* + * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID + * @peer: Datapath peer + * + */ +static inline void dp_peer_tid_queue_init(struct dp_peer *peer) +{ +} + +/* + * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID + * @peer: Datapath peer + * + */ +static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer) +{ +} + +/* + * dp_peer_update_80211_hdr() – dp peer update 80211 hdr + * @vdev: Datapath vdev + * @peer: Datapath peer + * + */ +static inline void +dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer) +{ +} +#endif + +/* + * dp_peer_tx_init() – Initialize receive TID state + * @pdev: Datapath pdev + * @peer: Datapath peer + * + */ +void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer) +{ + dp_peer_tid_queue_init(peer); + dp_peer_update_80211_hdr(peer->vdev, peer); +} + +/* + * dp_peer_tx_cleanup() – Deinitialize receive TID state + * @vdev: Datapath vdev + * @peer: Datapath peer + * + */ +static inline void +dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) +{ + dp_peer_tid_queue_cleanup(peer); +} + +/* + * dp_peer_rx_init() – Initialize receive TID state + * @pdev: Datapath pdev + * @peer: Datapath peer + * + */ +void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) +{ + int tid; + struct dp_rx_tid *rx_tid; + for (tid = 0; tid < DP_MAX_TIDS; tid++) { + rx_tid = &peer->rx_tid[tid]; + rx_tid->array = &rx_tid->base; + rx_tid->base.head = rx_tid->base.tail = NULL; + rx_tid->tid = tid; + rx_tid->defrag_timeout_ms = 0; + rx_tid->ba_win_size = 0; + rx_tid->ba_status = DP_RX_BA_INACTIVE; + + rx_tid->defrag_waitlist_elem.tqe_next = NULL; + rx_tid->defrag_waitlist_elem.tqe_prev = NULL; + } + + peer->active_ba_session_cnt = 0; + peer->hw_buffer_size = 0; + peer->kill_256_sessions = 0; + + /* Setup default (non-qos) rx tid queue */ + dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); + + /* Setup rx tid queue for TID 0. + * Other queues will be setup on receiving first packet, which will cause + * NULL REO queue error + */ + dp_rx_tid_setup_wifi3(peer, 0, 1, 0); + + /* + * Setup the rest of TID's to handle LFR + */ + dp_peer_setup_remaining_tids(peer); + + /* + * Set security defaults: no PN check, no security. The target may + * send a HTT SEC_IND message to overwrite these defaults. + */ + peer->security[dp_sec_ucast].sec_type = + peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none; +} + +/* + * dp_peer_rx_cleanup() – Cleanup receive TID state + * @vdev: Datapath vdev + * @peer: Datapath peer + * @reuse: Peer reference reuse + * + */ +void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse) +{ + int tid; + uint32_t tid_delete_mask = 0; + + dp_info("Remove tids for peer: %pK", peer); + for (tid = 0; tid < DP_MAX_TIDS; tid++) { + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + + qdf_spin_lock_bh(&rx_tid->tid_lock); + if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) { + /* Cleanup defrag related resource */ + dp_rx_defrag_waitlist_remove(peer, tid); + dp_rx_reorder_flush_frag(peer, tid); + } + + if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) { + dp_rx_tid_delete_wifi3(peer, tid); + + tid_delete_mask |= (1 << tid); + } + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } +#ifdef notyet /* See if FW can remove queues as part of peer cleanup */ + if (soc->ol_ops->peer_rx_reorder_queue_remove) { + soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc, + peer->vdev->pdev->pdev_id, + peer->vdev->vdev_id, peer->mac_addr.raw, + tid_delete_mask); + } +#endif + if (!reuse) + for (tid = 0; tid < DP_MAX_TIDS; tid++) + qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock); +} + +#ifdef FEATURE_PERPKT_INFO +/* + * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer + * @peer: Datapath peer + * + * return: void + */ +void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) +{ + qdf_mem_zero(&peer->delayed_ba_ppdu_stats, + sizeof(struct cdp_delayed_tx_completion_ppdu_user)); + peer->last_delayed_ba = false; + peer->last_delayed_ba_ppduid = 0; +} +#else +/* + * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer + * @peer: Datapath peer + * + * return: void + */ +void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer) +{ +} +#endif + +/* + * dp_peer_cleanup() – Cleanup peer information + * @vdev: Datapath vdev + * @peer: Datapath peer + * @reuse: Peer reference reuse + * + */ +void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse) +{ + dp_peer_tx_cleanup(vdev, peer); + + /* cleanup the Rx reorder queues for this peer */ + dp_peer_rx_cleanup(vdev, peer, reuse); +} + +/* dp_teardown_256_ba_session() - Teardown sessions using 256 + * window size when a request with + * 64 window size is received. + * This is done as a WAR since HW can + * have only one setting per peer (64 or 256). + * For HKv2, we use per tid buffersize setting + * for 0 to per_tid_basize_max_tid. For tid + * more than per_tid_basize_max_tid we use HKv1 + * method. + * @peer: Datapath peer + * + * Return: void + */ +static void dp_teardown_256_ba_sessions(struct dp_peer *peer) +{ + uint8_t delba_rcode = 0; + int tid; + struct dp_rx_tid *rx_tid = NULL; + + tid = peer->vdev->pdev->soc->per_tid_basize_max_tid; + for (; tid < DP_MAX_TIDS; tid++) { + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + + if (rx_tid->ba_win_size <= 64) { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + continue; + } else { + if (rx_tid->ba_status == DP_RX_BA_ACTIVE || + rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { + /* send delba */ + if (!rx_tid->delba_tx_status) { + rx_tid->delba_tx_retry++; + rx_tid->delba_tx_status = 1; + rx_tid->delba_rcode = + IEEE80211_REASON_QOS_SETUP_REQUIRED; + delba_rcode = rx_tid->delba_rcode; + + qdf_spin_unlock_bh(&rx_tid->tid_lock); + if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba) + peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( + peer->vdev->pdev->soc->ctrl_psoc, + peer->vdev->vdev_id, + peer->mac_addr.raw, + tid, delba_rcode); + } else { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } + } else { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } + } + } +} + +/* +* dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State +* +* @soc: Datapath soc handle +* @peer_mac: Datapath peer mac address +* @vdev_id: id of atapath vdev +* @tid: TID number +* @status: tx completion status +* Return: 0 on success, error code on failure +*/ +int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t tid, int status) +{ + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, + peer_mac, 0, vdev_id); + struct dp_rx_tid *rx_tid = NULL; + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + goto fail; + } + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + if (status) { + rx_tid->num_addba_rsp_failed++; + dp_rx_tid_update_wifi3(peer, tid, 1, + IEEE80211_SEQ_MAX); + rx_tid->ba_status = DP_RX_BA_INACTIVE; + qdf_spin_unlock_bh(&rx_tid->tid_lock); + dp_err("RxTid- %d addba rsp tx completion failed", tid); + + goto success; + } + + rx_tid->num_addba_rsp_success++; + if (rx_tid->ba_status == DP_RX_BA_INACTIVE) { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS", + __func__, tid); + goto fail; + } + + if (!qdf_atomic_read(&peer->is_default_route_set)) { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: default route is not set for peer: "QDF_MAC_ADDR_FMT, + __func__, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + goto fail; + } + + if (dp_rx_tid_update_wifi3(peer, tid, + rx_tid->ba_win_size, + rx_tid->startseqnum)) { + dp_err("%s: failed update REO SSN", __func__); + } + + dp_info("%s: tid %u window_size %u start_seq_num %u", + __func__, tid, rx_tid->ba_win_size, + rx_tid->startseqnum); + + /* First Session */ + if (peer->active_ba_session_cnt == 0) { + if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256) + peer->hw_buffer_size = 256; + else + peer->hw_buffer_size = 64; + } + + rx_tid->ba_status = DP_RX_BA_ACTIVE; + + peer->active_ba_session_cnt++; + + qdf_spin_unlock_bh(&rx_tid->tid_lock); + + /* Kill any session having 256 buffer size + * when 64 buffer size request is received. + * Also, latch on to 64 as new buffer size. + */ + if (peer->kill_256_sessions) { + dp_teardown_256_ba_sessions(peer); + peer->kill_256_sessions = 0; + } + +success: + dp_peer_unref_delete(peer); + return QDF_STATUS_SUCCESS; + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return QDF_STATUS_E_FAILURE; +} + +/* +* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer +* +* @soc: Datapath soc handle +* @peer_mac: Datapath peer mac address +* @vdev_id: id of atapath vdev +* @tid: TID number +* @dialogtoken: output dialogtoken +* @statuscode: output dialogtoken +* @buffersize: Output BA window size +* @batimeout: Output BA timeout +*/ +QDF_STATUS +dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, + uint8_t *dialogtoken, uint16_t *statuscode, + uint16_t *buffersize, uint16_t *batimeout) +{ + struct dp_rx_tid *rx_tid = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + rx_tid->num_of_addba_resp++; + /* setup ADDBA response parameters */ + *dialogtoken = rx_tid->dialogtoken; + *statuscode = rx_tid->statuscode; + *buffersize = rx_tid->ba_win_size; + *batimeout = 0; + qdf_spin_unlock_bh(&rx_tid->tid_lock); + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* dp_check_ba_buffersize() - Check buffer size in request + * and latch onto this size based on + * size used in first active session. + * @peer: Datapath peer + * @tid: Tid + * @buffersize: Block ack window size + * + * Return: void + */ +static void dp_check_ba_buffersize(struct dp_peer *peer, + uint16_t tid, + uint16_t buffersize) +{ + struct dp_rx_tid *rx_tid = NULL; + + rx_tid = &peer->rx_tid[tid]; + if (peer->vdev->pdev->soc->per_tid_basize_max_tid && + tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) { + rx_tid->ba_win_size = buffersize; + return; + } else { + if (peer->active_ba_session_cnt == 0) { + rx_tid->ba_win_size = buffersize; + } else { + if (peer->hw_buffer_size == 64) { + if (buffersize <= 64) + rx_tid->ba_win_size = buffersize; + else + rx_tid->ba_win_size = peer->hw_buffer_size; + } else if (peer->hw_buffer_size == 256) { + if (buffersize > 64) { + rx_tid->ba_win_size = buffersize; + } else { + rx_tid->ba_win_size = buffersize; + peer->hw_buffer_size = 64; + peer->kill_256_sessions = 1; + } + } + } + } +} + +#define DP_RX_BA_SESSION_DISABLE 1 + +/* + * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer + * + * @soc: Datapath soc handle + * @peer_mac: Datapath peer mac address + * @vdev_id: id of atapath vdev + * @dialogtoken: dialogtoken from ADDBA frame + * @tid: TID number + * @batimeout: BA timeout + * @buffersize: BA window size + * @startseqnum: Start seq. number received in BA sequence control + * + * Return: 0 on success, error code on failure + */ +int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, + uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t dialogtoken, + uint16_t tid, uint16_t batimeout, + uint16_t buffersize, + uint16_t startseqnum) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_rx_tid *rx_tid = NULL; + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + struct dp_peer *peer = dp_peer_find_hash_find(soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + rx_tid->num_of_addba_req++; + if ((rx_tid->ba_status == DP_RX_BA_ACTIVE && + rx_tid->hw_qdesc_vaddr_unaligned)) { + dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); + rx_tid->ba_status = DP_RX_BA_INACTIVE; + peer->active_ba_session_cnt--; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx Tid- %d hw qdesc is already setup", + __func__, tid); + } + + if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + if (wlan_cfg_is_dp_force_rx_64_ba(soc->wlan_cfg_ctx)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "force use BA64 scheme"); + buffersize = qdf_min((uint16_t)64, buffersize); + } + + if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s disable BA session", + __func__); + + buffersize = 1; + } else if (rx_tid->rx_ba_win_size_override) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s override BA win to %d", __func__, + rx_tid->rx_ba_win_size_override); + + buffersize = rx_tid->rx_ba_win_size_override; + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s restore BA win %d based on addba req", + __func__, buffersize); + } + + dp_check_ba_buffersize(peer, tid, buffersize); + + if (dp_rx_tid_setup_wifi3(peer, tid, + rx_tid->ba_win_size, startseqnum)) { + rx_tid->ba_status = DP_RX_BA_INACTIVE; + qdf_spin_unlock_bh(&rx_tid->tid_lock); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + rx_tid->ba_status = DP_RX_BA_IN_PROGRESS; + + rx_tid->dialogtoken = dialogtoken; + rx_tid->startseqnum = startseqnum; + + if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) + rx_tid->statuscode = rx_tid->userstatuscode; + else + rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; + + if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) + rx_tid->statuscode = IEEE80211_STATUS_REFUSED; + + qdf_spin_unlock_bh(&rx_tid->tid_lock); + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* +* dp_set_addba_response() – Set a user defined ADDBA response status code +* +* @soc: Datapath soc handle +* @peer_mac: Datapath peer mac address +* @vdev_id: id of atapath vdev +* @tid: TID number +* @statuscode: response status code to be set +*/ +QDF_STATUS +dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, uint8_t tid, uint16_t statuscode) +{ + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, + peer_mac, 0, vdev_id); + struct dp_rx_tid *rx_tid; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + rx_tid->userstatuscode = statuscode; + qdf_spin_unlock_bh(&rx_tid->tid_lock); +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* +* dp_rx_delba_process_wifi3() – Process DELBA from peer +* @soc: Datapath soc handle +* @peer_mac: Datapath peer mac address +* @vdev_id: id of atapath vdev +* @tid: TID number +* @reasoncode: Reason code received in DELBA frame +* +* Return: 0 on success, error code on failure +*/ +int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, int tid, uint16_t reasoncode) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_rx_tid *rx_tid; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + if (rx_tid->ba_status == DP_RX_BA_INACTIVE || + rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + /* TODO: See if we can delete the existing REO queue descriptor and + * replace with a new one without queue extenstion descript to save + * memory + */ + rx_tid->delba_rcode = reasoncode; + rx_tid->num_of_delba_req++; + dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); + + rx_tid->ba_status = DP_RX_BA_INACTIVE; + peer->active_ba_session_cnt--; + qdf_spin_unlock_bh(&rx_tid->tid_lock); +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +/* + * dp_rx_delba_tx_completion_wifi3() – Send Delba Request + * + * @soc: Datapath soc handle + * @peer_mac: Datapath peer mac address + * @vdev_id: id of atapath vdev + * @tid: TID number + * @status: tx completion status + * Return: 0 on success, error code on failure + */ + +int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, + uint16_t vdev_id, + uint8_t tid, int status) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + struct dp_rx_tid *rx_tid = NULL; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!", __func__); + ret = QDF_STATUS_E_FAILURE; + goto end; + } + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + if (status) { + rx_tid->delba_tx_fail_cnt++; + if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) { + rx_tid->delba_tx_retry = 0; + rx_tid->delba_tx_status = 0; + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } else { + rx_tid->delba_tx_retry++; + rx_tid->delba_tx_status = 1; + qdf_spin_unlock_bh(&rx_tid->tid_lock); + if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba) + peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba( + peer->vdev->pdev->soc->ctrl_psoc, + peer->vdev->vdev_id, + peer->mac_addr.raw, tid, + rx_tid->delba_rcode); + } + goto end; + } else { + rx_tid->delba_tx_success_cnt++; + rx_tid->delba_tx_retry = 0; + rx_tid->delba_tx_status = 0; + } + if (rx_tid->ba_status == DP_RX_BA_ACTIVE) { + dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); + rx_tid->ba_status = DP_RX_BA_INACTIVE; + peer->active_ba_session_cnt--; + } + if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) { + dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); + rx_tid->ba_status = DP_RX_BA_INACTIVE; + } + qdf_spin_unlock_bh(&rx_tid->tid_lock); + +end: + if (peer) + dp_peer_unref_delete(peer); + + return ret; +} + +/** + * dp_set_pn_check_wifi3() - enable PN check in REO for security + * @soc: Datapath soc handle + * @peer_mac: Datapath peer mac address + * @vdev_id: id of atapath vdev + * @vdev: Datapath vdev + * @pdev - data path device instance + * @sec_type - security type + * @rx_pn - Receive pn starting number + * + */ + +QDF_STATUS +dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, enum cdp_sec_type sec_type, + uint32_t *rx_pn) +{ + struct dp_pdev *pdev; + int i; + uint8_t pn_size; + struct hal_reo_cmd_params params; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (!vdev || !peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + pdev = vdev->pdev; + qdf_mem_zero(¶ms, sizeof(params)); + + params.std.need_status = 1; + params.u.upd_queue_params.update_pn_valid = 1; + params.u.upd_queue_params.update_pn_size = 1; + params.u.upd_queue_params.update_pn = 1; + params.u.upd_queue_params.update_pn_check_needed = 1; + params.u.upd_queue_params.update_svld = 1; + params.u.upd_queue_params.svld = 0; + + switch (sec_type) { + case cdp_sec_type_tkip_nomic: + case cdp_sec_type_aes_ccmp: + case cdp_sec_type_aes_ccmp_256: + case cdp_sec_type_aes_gcmp: + case cdp_sec_type_aes_gcmp_256: + params.u.upd_queue_params.pn_check_needed = 1; + params.u.upd_queue_params.pn_size = 48; + pn_size = 48; + break; + case cdp_sec_type_wapi: + params.u.upd_queue_params.pn_check_needed = 1; + params.u.upd_queue_params.pn_size = 128; + pn_size = 128; + if (vdev->opmode == wlan_op_mode_ap) { + params.u.upd_queue_params.pn_even = 1; + params.u.upd_queue_params.update_pn_even = 1; + } else { + params.u.upd_queue_params.pn_uneven = 1; + params.u.upd_queue_params.update_pn_uneven = 1; + } + break; + default: + params.u.upd_queue_params.pn_check_needed = 0; + pn_size = 0; + break; + } + + + for (i = 0; i < DP_MAX_TIDS; i++) { + struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + if (rx_tid->hw_qdesc_vaddr_unaligned) { + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (pn_size) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO_HIGH, + "%s PN set for TID:%d pn:%x:%x:%x:%x", + __func__, i, rx_pn[3], rx_pn[2], + rx_pn[1], rx_pn[0]); + params.u.upd_queue_params.update_pn_valid = 1; + params.u.upd_queue_params.pn_31_0 = rx_pn[0]; + params.u.upd_queue_params.pn_63_32 = rx_pn[1]; + params.u.upd_queue_params.pn_95_64 = rx_pn[2]; + params.u.upd_queue_params.pn_127_96 = rx_pn[3]; + } + rx_tid->pn_size = pn_size; + if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc), + CMD_UPDATE_RX_REO_QUEUE, + ¶ms, dp_rx_tid_update_cb, + rx_tid)) { + dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE" + "tid %d desc %pK", rx_tid->tid, + (void *)(rx_tid->hw_qdesc_paddr)); + DP_STATS_INC(cdp_soc_t_to_dp_soc(soc), + rx.err.reo_cmd_send_fail, 1); + } + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "PN Check not setup for TID :%d ", i); + } + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + + +/** + * dp_set_key_sec_type_wifi3() - set security mode of key + * @soc: Datapath soc handle + * @peer_mac: Datapath peer mac address + * @vdev_id: id of atapath vdev + * @vdev: Datapath vdev + * @pdev - data path device instance + * @sec_type - security type + * #is_unicast - key type + * + */ + +QDF_STATUS +dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, + uint8_t *peer_mac, enum cdp_sec_type sec_type, + bool is_unicast) +{ + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + QDF_STATUS status = QDF_STATUS_SUCCESS; + int sec_index; + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Peer is NULL!\n", __func__); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "key sec spec for peer %pK "QDF_MAC_ADDR_FMT": %s key of type %d", + peer, + QDF_MAC_ADDR_REF(peer->mac_addr.raw), + is_unicast ? "ucast" : "mcast", + sec_type); + + sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; + peer->security[sec_index].sec_type = sec_type; + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +void +dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, + enum cdp_sec_type sec_type, int is_unicast, + u_int32_t *michael_key, + u_int32_t *rx_pn) +{ + struct dp_peer *peer; + int sec_index; + + peer = dp_peer_find_by_id(soc, peer_id); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Couldn't find peer from ID %d - skipping security inits", + peer_id); + return; + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "sec spec for peer %pK "QDF_MAC_ADDR_FMT": %s key of type %d", + peer, + QDF_MAC_ADDR_REF(peer->mac_addr.raw), + is_unicast ? "ucast" : "mcast", + sec_type); + sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; + peer->security[sec_index].sec_type = sec_type; +#ifdef notyet /* TODO: See if this is required for defrag support */ + /* michael key only valid for TKIP, but for simplicity, + * copy it anyway + */ + qdf_mem_copy( + &peer->security[sec_index].michael_key[0], + michael_key, + sizeof(peer->security[sec_index].michael_key)); +#ifdef BIG_ENDIAN_HOST + OL_IF_SWAPBO(peer->security[sec_index].michael_key[0], + sizeof(peer->security[sec_index].michael_key)); +#endif /* BIG_ENDIAN_HOST */ +#endif + +#ifdef notyet /* TODO: Check if this is required for wifi3.0 */ + if (sec_type != cdp_sec_type_wapi) { + qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS); + } else { + for (i = 0; i < DP_MAX_TIDS; i++) { + /* + * Setting PN valid bit for WAPI sec_type, + * since WAPI PN has to be started with predefined value + */ + peer->tids_last_pn_valid[i] = 1; + qdf_mem_copy( + (u_int8_t *) &peer->tids_last_pn[i], + (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); + peer->tids_last_pn[i].pn128[1] = + qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); + peer->tids_last_pn[i].pn128[0] = + qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); + } + } +#endif + /* TODO: Update HW TID queue with PN check parameters (pn type for + * all security types and last pn for WAPI) once REO command API + * is available + */ + + dp_peer_unref_del_find_by_id(peer); +} + +QDF_STATUS +dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id, + uint8_t tid, uint16_t win_sz) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_peer *peer; + struct dp_rx_tid *rx_tid; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + peer = dp_peer_find_by_id(soc, peer_id); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Couldn't find peer from ID %d", + peer_id); + return QDF_STATUS_E_FAILURE; + } + + qdf_assert_always(tid < DP_MAX_TIDS); + + rx_tid = &peer->rx_tid[tid]; + + if (rx_tid->hw_qdesc_vaddr_unaligned) { + if (!rx_tid->delba_tx_status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: PEER_ID: %d TID: %d, BA win: %d ", + __func__, peer_id, tid, win_sz); + + qdf_spin_lock_bh(&rx_tid->tid_lock); + + rx_tid->delba_tx_status = 1; + + rx_tid->rx_ba_win_size_override = + qdf_min((uint16_t)63, win_sz); + + rx_tid->delba_rcode = + IEEE80211_REASON_QOS_SETUP_REQUIRED; + + qdf_spin_unlock_bh(&rx_tid->tid_lock); + + if (soc->cdp_soc.ol_ops->send_delba) + soc->cdp_soc.ol_ops->send_delba( + peer->vdev->pdev->soc->ctrl_psoc, + peer->vdev->vdev_id, + peer->mac_addr.raw, + tid, + rx_tid->delba_rcode); + } + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "BA session is not setup for TID:%d ", tid); + status = QDF_STATUS_E_FAILURE; + } + + dp_peer_unref_del_find_by_id(peer); + + return status; +} + +#ifdef DP_PEER_EXTENDED_API +QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct ol_txrx_desc_type *sta_desc) +{ + struct dp_peer *peer; + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAULT; + + peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, + sta_desc->peer_addr.bytes); + + if (!peer) + return QDF_STATUS_E_FAULT; + + qdf_spin_lock_bh(&peer->peer_info_lock); + peer->state = OL_TXRX_PEER_STATE_CONN; + qdf_spin_unlock_bh(&peer->peer_info_lock); + + dp_rx_flush_rx_cached(peer, false); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + struct qdf_mac_addr peer_addr) +{ + struct dp_peer *peer; + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAULT; + + peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes); + if (!peer || !peer->valid) + return QDF_STATUS_E_FAULT; + + dp_clear_peer_internal(soc, peer); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev + * @pdev - data path device instance + * @vdev - virtual interface instance + * @peer_addr - peer mac address + * + * Find peer by peer mac address within vdev + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, + struct cdp_vdev *vdev_handle, + uint8_t *peer_addr) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_peer *peer; + + peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL); + + if (!peer) + return NULL; + + if (peer->vdev != vdev) { + dp_peer_unref_delete(peer); + return NULL; + } + + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + dp_peer_unref_delete(peer); + + return peer; +} + +QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, + enum ol_txrx_peer_state state) +{ + struct dp_peer *peer; + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + + peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Failed to find peer for: ["QDF_MAC_ADDR_FMT"]", + QDF_MAC_ADDR_REF(peer_mac)); + return QDF_STATUS_E_FAILURE; + } + peer->state = state; + + peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0; + + dp_info("peer %pK state %d", peer, peer->state); + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + dp_peer_unref_delete(peer); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, + uint8_t *vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_peer *peer = + dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL); + + if (!peer) + return QDF_STATUS_E_FAILURE; + + dp_info("peer %pK vdev %pK vdev id %d", + peer, peer->vdev, peer->vdev->vdev_id); + *vdev_id = peer->vdev->vdev_id; + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + dp_peer_unref_delete(peer); + + return QDF_STATUS_SUCCESS; +} + +struct cdp_vdev * +dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, + struct qdf_mac_addr peer_addr) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_peer *peer = NULL; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer_addr.bytes)); + return NULL; + } + + peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer_addr.bytes)); + return NULL; + } + + return (struct cdp_vdev *)peer->vdev; +} + +/** + * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs + * @peer - peer instance + * + * Get virtual interface instance which peer belongs + * + * Return: virtual interface instance pointer + * NULL in case cannot find + */ +struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) +{ + struct dp_peer *peer = peer_handle; + + DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev); + return (struct cdp_vdev *)peer->vdev; +} + +/** + * dp_peer_get_peer_mac_addr() - Get peer mac address + * @peer - peer instance + * + * Get peer mac address + * + * Return: peer mac address pointer + * NULL in case cannot find + */ +uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) +{ + struct dp_peer *peer = peer_handle; + uint8_t *mac; + + mac = peer->mac_addr.raw; + dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", + peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + return peer->mac_addr.raw; +} + +int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac) +{ + enum ol_txrx_peer_state peer_state; + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0, + vdev_id); + + if (!peer) + return QDF_STATUS_E_FAILURE; + + DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); + peer_state = peer->state; + dp_peer_unref_delete(peer); + + return peer_state; +} + +/** + * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device + * @pdev - data path device instance + * + * local peer id pool alloc for physical device + * + * Return: none + */ +void dp_local_peer_id_pool_init(struct dp_pdev *pdev) +{ + int i; + + /* point the freelist to the first ID */ + pdev->local_peer_ids.freelist = 0; + + /* link each ID to the next one */ + for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { + pdev->local_peer_ids.pool[i] = i + 1; + pdev->local_peer_ids.map[i] = NULL; + } + + /* link the last ID to itself, to mark the end of the list */ + i = OL_TXRX_NUM_LOCAL_PEER_IDS; + pdev->local_peer_ids.pool[i] = i; + + qdf_spinlock_create(&pdev->local_peer_ids.lock); + DP_TRACE(INFO, "Peer pool init"); +} + +/** + * dp_local_peer_id_alloc() - allocate local peer id + * @pdev - data path device instance + * @peer - new peer instance + * + * allocate local peer id + * + * Return: none + */ +void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) +{ + int i; + + qdf_spin_lock_bh(&pdev->local_peer_ids.lock); + i = pdev->local_peer_ids.freelist; + if (pdev->local_peer_ids.pool[i] == i) { + /* the list is empty, except for the list-end marker */ + peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; + } else { + /* take the head ID and advance the freelist */ + peer->local_id = i; + pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; + pdev->local_peer_ids.map[i] = peer; + } + qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); + dp_info("peer %pK, local id %d", peer, peer->local_id); +} + +/** + * dp_local_peer_id_free() - remove local peer id + * @pdev - data path device instance + * @peer - peer instance should be removed + * + * remove local peer id + * + * Return: none + */ +void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) +{ + int i = peer->local_id; + if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || + (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { + return; + } + + /* put this ID on the head of the freelist */ + qdf_spin_lock_bh(&pdev->local_peer_ids.lock); + pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; + pdev->local_peer_ids.freelist = i; + pdev->local_peer_ids.map[i] = NULL; + qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); +} + +bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, uint8_t *peer_addr) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) + return false; + + return !!dp_find_peer_by_addr_and_vdev( + dp_pdev_to_cdp_pdev(vdev->pdev), + dp_vdev_to_cdp_vdev(vdev), + peer_addr); +} + +bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, uint8_t *peer_addr, + uint16_t max_bssid) +{ + int i; + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev; + + for (i = 0; i < max_bssid; i++) { + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, i); + /* Need to check vdevs other than the vdev_id */ + if (vdev_id == i || !vdev) + continue; + if (dp_find_peer_by_addr_and_vdev( + dp_pdev_to_cdp_pdev(vdev->pdev), + dp_vdev_to_cdp_vdev(vdev), + peer_addr)) { + dp_err("%s: Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d", + __func__, QDF_MAC_ADDR_REF(peer_addr), i); + return true; + } + } + + return false; +} + +bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + uint8_t *peer_addr) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) + return false; + + return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr); +} +#endif + +/** + * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW + * @peer: DP peer handle + * @dp_stats_cmd_cb: REO command callback function + * @cb_ctxt: Callback context + * + * Return: count of tid stats cmd send succeeded + */ +int dp_peer_rxtid_stats(struct dp_peer *peer, + dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, + void *cb_ctxt) +{ + struct dp_soc *soc = peer->vdev->pdev->soc; + struct hal_reo_cmd_params params; + int i; + int stats_cmd_sent_cnt = 0; + QDF_STATUS status; + + if (!dp_stats_cmd_cb) + return stats_cmd_sent_cnt; + + qdf_mem_zero(¶ms, sizeof(params)); + for (i = 0; i < DP_MAX_TIDS; i++) { + struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; + if (rx_tid->hw_qdesc_vaddr_unaligned) { + params.std.need_status = 1; + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (cb_ctxt) { + status = dp_reo_send_cmd( + soc, CMD_GET_QUEUE_STATS, + ¶ms, dp_stats_cmd_cb, + cb_ctxt); + } else { + status = dp_reo_send_cmd( + soc, CMD_GET_QUEUE_STATS, + ¶ms, dp_stats_cmd_cb, + rx_tid); + } + + if (QDF_IS_STATUS_SUCCESS(status)) + stats_cmd_sent_cnt++; + + /* Flush REO descriptor from HW cache to update stats + * in descriptor memory. This is to help debugging */ + qdf_mem_zero(¶ms, sizeof(params)); + params.std.need_status = 0; + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.fl_cache_params.flush_no_inval = 1; + dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, + NULL); + } + } + + return stats_cmd_sent_cnt; +} + +QDF_STATUS +dp_set_michael_key(struct cdp_soc_t *soc, + uint8_t vdev_id, + uint8_t *peer_mac, + bool is_unicast, uint32_t *key) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t sec_index = is_unicast ? 1 : 0; + struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, vdev_id); + + if (!peer || peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "peer not found "); + status = QDF_STATUS_E_FAILURE; + goto fail; + } + + qdf_mem_copy(&peer->security[sec_index].michael_key[0], + key, IEEE80211_WEP_MICLEN); + +fail: + if (peer) + dp_peer_unref_delete(peer); + + return status; +} + +bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) +{ + struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id); + + if (peer) { + /* + * Decrement the peer ref which is taken as part of + * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled + */ + dp_peer_unref_del_find_by_id(peer); + + return true; + } + + return false; +} + +void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + uint8_t *peer_mac) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0, + vdev_id); + struct dp_rx_tid *rx_tid; + uint8_t tid; + + if (!peer) + return; + + dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + for (tid = 0; tid < DP_MAX_TIDS; tid++) { + rx_tid = &peer->rx_tid[tid]; + + qdf_spin_lock_bh(&rx_tid->tid_lock); + dp_rx_defrag_waitlist_remove(peer, tid); + dp_rx_reorder_flush_frag(peer, tid); + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } + + dp_peer_unref_delete(peer); +} + +#ifdef DUMP_REO_QUEUE_INFO_IN_DDR +void dp_dump_rx_reo_queue_info( + struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status) +{ + struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; + + if (!rx_tid) + return; + + if (reo_status->fl_cache_status.header.status != + HAL_REO_CMD_SUCCESS) { + dp_err_rl("Rx tid REO HW desc flush failed(%d)", + reo_status->rx_queue_status.header.status); + return; + } + qdf_spin_lock_bh(&rx_tid->tid_lock); + hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned); + qdf_spin_unlock_bh(&rx_tid->tid_lock); +} + +void dp_send_cache_flush_for_rx_tid( + struct dp_soc *soc, struct dp_peer *peer) +{ + int i; + struct dp_rx_tid *rx_tid; + struct hal_reo_cmd_params params; + + if (!peer) { + dp_err_rl("Peer is NULL"); + return; + } + + for (i = 0; i < DP_MAX_TIDS; i++) { + rx_tid = &peer->rx_tid[i]; + if (!rx_tid) + continue; + qdf_spin_lock_bh(&rx_tid->tid_lock); + if (rx_tid->hw_qdesc_vaddr_aligned) { + qdf_mem_zero(¶ms, sizeof(params)); + params.std.need_status = 1; + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.fl_cache_params.flush_no_inval = 0; + if (QDF_STATUS_SUCCESS != + dp_reo_send_cmd( + soc, CMD_FLUSH_CACHE, + ¶ms, dp_dump_rx_reo_queue_info, + (void *)rx_tid)) { + dp_err_rl("cache flush send failed tid %d", + rx_tid->tid); + qdf_spin_unlock_bh(&rx_tid->tid_lock); + break; + } + } + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } +} + +void dp_get_rx_reo_queue_info( + struct cdp_soc_t *soc_hdl, uint8_t vdev_id) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + struct dp_peer *peer = NULL; + + if (!vdev) { + dp_err_rl("vdev is null for vdev_id: %u", vdev_id); + return; + } + + peer = vdev->vap_bss_peer; + + if (!peer) { + dp_err_rl("Peer is NULL"); + return; + } + dp_send_cache_flush_for_rx_tid(soc, peer); +} +#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h new file mode 100644 index 0000000000000000000000000000000000000000..4992673a76aa2c710da317ff746be7a01845362f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _DP_PEER_H_ +#define _DP_PEER_H_ + +#include +#include +#include "dp_types.h" + +#ifdef DUMP_REO_QUEUE_INFO_IN_DDR +#include "hal_reo.h" +#endif + +#define DP_INVALID_PEER_ID 0xffff + +#define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000 +/** + * __dp_peer_find_by_id() - Returns peer object given the peer id + * + * @soc : core DP soc context + * @peer_id : peer id from peer object can be retrieved + * + * Return: struct dp_peer*: Pointer to DP peer object + */ +static inline struct dp_peer * +__dp_peer_find_by_id(struct dp_soc *soc, + uint16_t peer_id) +{ + struct dp_peer *peer; + + /* TODO: Hold lock */ + peer = (peer_id >= soc->max_peers) ? NULL : + soc->peer_id_to_obj_map[peer_id]; + + return peer; +} + +#ifdef PEER_PROTECTED_ACCESS +/** + * dp_peer_find_by_id() - Returns peer object given the peer id + * if delete_in_progress in not set for peer + * + * @soc : core DP soc context + * @peer_id : peer id from peer object can be retrieved + * + * Return: struct dp_peer*: Pointer to DP peer object + */ +static inline +struct dp_peer *dp_peer_find_by_id(struct dp_soc *soc, + uint16_t peer_id) +{ + struct dp_peer *peer; + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + peer = __dp_peer_find_by_id(soc, peer_id); + if (!peer || (peer && peer->delete_in_progress)) { + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return NULL; + } + qdf_atomic_inc(&peer->ref_cnt); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + return peer; +} +#else +static inline struct dp_peer * +dp_peer_find_by_id(struct dp_soc *soc, + uint16_t peer_id) +{ + struct dp_peer *peer; + + peer = __dp_peer_find_by_id (soc, peer_id); + if (peer && peer->delete_in_progress) { + return NULL; + } + + return peer; +} +#endif /* PEER_LOCK_REF_PROTECT */ + +#ifdef PEER_CACHE_RX_PKTS +/** + * dp_rx_flush_rx_cached() - flush cached rx frames + * @peer: peer + * @drop: set flag to drop frames + * + * Return: None + */ +void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop); +#else +static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) +{ +} +#endif + +static inline void +dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer) +{ + qdf_spin_lock_bh(&peer->peer_info_lock); + peer->state = OL_TXRX_PEER_STATE_DISC; + qdf_spin_unlock_bh(&peer->peer_info_lock); + + dp_rx_flush_rx_cached(peer, true); +} + +void dp_print_ast_stats(struct dp_soc *soc); +void dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id, + uint16_t hw_peer_id, uint8_t vdev_id, + uint8_t *peer_mac_addr, uint16_t ast_hash, + uint8_t is_wds); +void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id, + uint8_t vdev_id, uint8_t *peer_mac_addr, + uint8_t is_wds); +void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id, + enum cdp_sec_type sec_type, int is_unicast, + u_int32_t *michael_key, u_int32_t *rx_pn); + +QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id, + uint8_t tid, uint16_t win_sz); + +uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, + uint16_t peer_id, uint8_t *peer_mac); + +int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, + uint32_t flags); + +void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry); + +void dp_peer_ast_unmap_handler(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags); + +struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc, + uint8_t *ast_mac_addr, + uint8_t pdev_id); + +struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc, + uint8_t *ast_mac_addr); + +struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *ast_mac_addr); + +uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + + +uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + +void dp_peer_ast_set_type(struct dp_soc *soc, + struct dp_ast_entry *ast_entry, + enum cdp_txrx_ast_entry_type type); + +void dp_peer_ast_send_wds_del(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + +void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct cdp_soc *dp_soc, + void *cookie, + enum cdp_ast_free_status status); + +void dp_peer_ast_hash_remove(struct dp_soc *soc, + struct dp_ast_entry *ase); + +void dp_peer_free_ast_entry(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + +void dp_peer_unlink_ast_entry(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + +/* + * dp_peer_find_by_id_exist - check if peer exists for given id + * @soc: core DP soc context + * @peer_id: peer id from peer object can be retrieved + * + * Return: true if peer exists of false otherwise + */ +bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id); + +#define DP_AST_ASSERT(_condition) \ + do { \ + if (!(_condition)) { \ + dp_print_ast_stats(soc);\ + QDF_BUG(_condition); \ + } \ + } while (0) + +/** + * dp_peer_update_inactive_time - Update inactive time for peer + * @pdev: pdev object + * @tag_type: htt_tlv_tag type + * #tag_buf: buf message + */ +void +dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type, + uint32_t *tag_buf); + +#ifndef QCA_MULTIPASS_SUPPORT +/** + * dp_peer_set_vlan_id: set vlan_id for this peer + * @cdp_soc: soc handle + * @vdev_id: id of vdev object + * @peer_mac: mac address + * @vlan_id: vlan id for peer + * + * return: void + */ +static inline +void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, uint8_t *peer_mac, + uint16_t vlan_id) +{ +} + +/** + * dp_set_vlan_groupkey: set vlan map for vdev + * @soc: pointer to soc + * @vdev_id: id of vdev handle + * @vlan_id: vlan_id + * @group_key: group key for vlan + * + * return: set success/failure + */ +static inline +QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id, + uint16_t vlan_id, uint16_t group_key) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * dp_peer_multipass_list_init: initialize multipass peer list + * @vdev: pointer to vdev + * + * return: void + */ +static inline +void dp_peer_multipass_list_init(struct dp_vdev *vdev) +{ +} + +/** + * dp_peer_multipass_list_remove: remove peer from special peer list + * @peer: peer handle + * + * return: void + */ +static inline +void dp_peer_multipass_list_remove(struct dp_peer *peer) +{ +} +#else +void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, uint8_t *peer_mac, + uint16_t vlan_id); +QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id, + uint16_t vlan_id, uint16_t group_key); +void dp_peer_multipass_list_init(struct dp_vdev *vdev); +void dp_peer_multipass_list_remove(struct dp_peer *peer); +#endif + + +#ifndef QCA_PEER_MULTIQ_SUPPORT +/** + * dp_peer_reset_flowq_map() - reset peer flowq map table + * @peer - dp peer handle + * + * Return: none + */ +static inline +void dp_peer_reset_flowq_map(struct dp_peer *peer) +{ +} + +/** + * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map + * @soc - genereic soc handle + * @is_wds - flag to indicate if peer is wds + * @peer_id - peer_id from htt peer map message + * @peer_mac_addr - mac address of the peer + * @ast_info - ast flow override information from peer map + * + * Return: none + */ +static inline +void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, + bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, + struct dp_ast_flow_override_info *ast_info) +{ +} +#else +void dp_peer_reset_flowq_map(struct dp_peer *peer); +void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, + bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr, + struct dp_ast_flow_override_info *ast_info); +#endif + +/** + * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer + * @soc: DP SOC handle + * @pdev_id: id of DP pdev handle + * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode + * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode + * @peer_mac: MAC address for which the above need to be enabled/disabled + * + * Return: Success if Rx & Tx capture is enabled for peer, false otherwise + */ +QDF_STATUS +dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, + uint8_t pdev_id, + bool is_rx_pkt_cap_enable, + bool is_tx_pkt_cap_enable, + uint8_t *peer_mac); + +/* + * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache + * after deleting the entries (ie., setting valid=0) + * + * @soc: DP SOC handle + * @cb_ctxt: Callback context + * @reo_status: REO command status + */ +void dp_rx_tid_delete_cb(struct dp_soc *soc, + void *cb_ctxt, + union hal_reo_status *reo_status); + +#ifdef DUMP_REO_QUEUE_INFO_IN_DDR +/** + * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid + * @soc : dp_soc handle + * @peer: peer + * + * This function is used to send cache flush cmd to reo and + * to register the callback to handle the dumping of the reo + * queue stas from DDR + * + * Return: none + */ +void dp_send_cache_flush_for_rx_tid( + struct dp_soc *soc, struct dp_peer *peer); + +/** + * dp_get_rx_reo_queue_info() - Handler to get rx tid info + * @soc : cdp_soc_t handle + * @vdev_id: vdev id + * + * Handler to get rx tid info from DDR after h/w cache is + * invalidated first using the cache flush cmd. + * + * Return: none + */ +void dp_get_rx_reo_queue_info( + struct cdp_soc_t *soc_hdl, uint8_t vdev_id); + +/** + * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats + * @soc : dp_soc handle + * @cb_ctxt - callback context + * @reo_status: vdev id + * + * This is the callback function registered after sending the reo cmd + * to flush the h/w cache and invalidate it. In the callback the reo + * queue desc info is dumped from DDR. + * + * Return: none + */ +void dp_dump_rx_reo_queue_info( + struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status); + +#else /* DUMP_REO_QUEUE_INFO_IN_DDR */ + +static inline void dp_get_rx_reo_queue_info( + struct cdp_soc_t *soc_hdl, uint8_t vdev_id) +{ +} +#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */ +#endif /* _DP_PEER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c new file mode 100644 index 0000000000000000000000000000000000000000..ba1db7f13653478391d821ad0186ceac58efb8ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "hal_reo.h" +#include "dp_internal.h" +#include + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +/** + * dp_reo_cmd_srng_event_record() - Record reo cmds posted + * to the reo cmd ring + * @soc: dp soc handle + * @type: reo cmd type + * @post_status: command error status + * + * Return: None + */ +static +void dp_reo_cmd_srng_event_record(struct dp_soc *soc, + enum hal_reo_cmd_type type, + int post_status) +{ + struct reo_cmd_event_history *cmd_event_history = + &soc->stats.cmd_event_history; + struct reo_cmd_event_record *record = cmd_event_history->cmd_record; + int record_index; + + record_index = (qdf_atomic_inc_return(&cmd_event_history->index)) & + (REO_CMD_EVENT_HIST_MAX - 1); + + record[record_index].cmd_type = type; + record[record_index].cmd_return_status = post_status; + record[record_index].timestamp = qdf_get_log_timestamp(); +} +#else +static inline +void dp_reo_cmd_srng_event_record(struct dp_soc *soc, + enum hal_reo_cmd_type type, + int post_status) +{ +} +#endif /*WLAN_FEATURE_DP_EVENT_HISTORY */ + +QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, + struct hal_reo_cmd_params *params, + void (*callback_fn), void *data) +{ + struct dp_reo_cmd_info *reo_cmd; + int num; + + switch (type) { + case CMD_GET_QUEUE_STATS: + num = hal_reo_cmd_queue_stats(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_FLUSH_QUEUE: + num = hal_reo_cmd_flush_queue(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_FLUSH_CACHE: + num = hal_reo_cmd_flush_cache(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_UNBLOCK_CACHE: + num = hal_reo_cmd_unblock_cache(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_FLUSH_TIMEOUT_LIST: + num = hal_reo_cmd_flush_timeout_list(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_UPDATE_RX_REO_QUEUE: + num = hal_reo_cmd_update_rx_queue(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + default: + dp_err_log("Invalid REO command type: %d", type); + return QDF_STATUS_E_INVAL; + }; + + dp_reo_cmd_srng_event_record(soc, type, num); + + if (num < 0) { + return QDF_STATUS_E_FAILURE; + } + + if (callback_fn) { + reo_cmd = qdf_mem_malloc(sizeof(*reo_cmd)); + if (!reo_cmd) { + dp_err_log("alloc failed for REO cmd:%d!!", + type); + return QDF_STATUS_E_NOMEM; + } + + reo_cmd->cmd = num; + reo_cmd->cmd_type = type; + reo_cmd->handler = callback_fn; + reo_cmd->data = data; + qdf_spin_lock_bh(&soc->rx.reo_cmd_lock); + TAILQ_INSERT_TAIL(&soc->rx.reo_cmd_list, reo_cmd, + reo_cmd_list_elem); + qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock); + } + + return QDF_STATUS_SUCCESS; +} + +uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, struct dp_soc *soc) +{ + uint32_t *reo_desc; + struct dp_reo_cmd_info *reo_cmd = NULL; + union hal_reo_status reo_status; + int num; + int processed_count = 0; + + if (dp_srng_access_start(int_ctx, soc, soc->reo_status_ring.hal_srng)) { + return processed_count; + } + reo_desc = hal_srng_dst_get_next(soc->hal_soc, + soc->reo_status_ring.hal_srng); + + while (reo_desc) { + uint16_t tlv = HAL_GET_TLV(reo_desc); + processed_count++; + + switch (tlv) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + hal_reo_queue_stats_status(reo_desc, + &reo_status.queue_status, + soc->hal_soc); + num = reo_status.queue_status.header.cmd_num; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + hal_reo_flush_queue_status(reo_desc, + &reo_status.fl_queue_status, + soc->hal_soc); + num = reo_status.fl_queue_status.header.cmd_num; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + hal_reo_flush_cache_status(reo_desc, + &reo_status.fl_cache_status, + soc->hal_soc); + num = reo_status.fl_cache_status.header.cmd_num; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + hal_reo_unblock_cache_status(reo_desc, soc->hal_soc, + &reo_status.unblk_cache_status); + num = reo_status.unblk_cache_status.header.cmd_num; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + hal_reo_flush_timeout_list_status(reo_desc, + &reo_status.fl_timeout_status, + soc->hal_soc); + num = reo_status.fl_timeout_status.header.cmd_num; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + hal_reo_desc_thres_reached_status(reo_desc, + &reo_status.thres_status, + soc->hal_soc); + num = reo_status.thres_status.header.cmd_num; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + hal_reo_rx_update_queue_status(reo_desc, + &reo_status.rx_queue_status, + soc->hal_soc); + num = reo_status.rx_queue_status.header.cmd_num; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + "%s, no handler for TLV:%d", __func__, tlv); + goto next; + } /* switch */ + + qdf_spin_lock_bh(&soc->rx.reo_cmd_lock); + TAILQ_FOREACH(reo_cmd, &soc->rx.reo_cmd_list, + reo_cmd_list_elem) { + if (reo_cmd->cmd == num) { + TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd, + reo_cmd_list_elem); + break; + } + } + qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock); + + if (reo_cmd) { + reo_cmd->handler(soc, reo_cmd->data, + &reo_status); + qdf_mem_free(reo_cmd); + } + +next: + reo_desc = hal_srng_dst_get_next(soc, + soc->reo_status_ring.hal_srng); + } /* while */ + + dp_srng_access_end(int_ctx, soc, soc->reo_status_ring.hal_srng); + return processed_count; +} + +/** + * dp_reo_cmdlist_destroy - Free REO commands in the queue + * @soc: DP SoC hanle + * + */ +void dp_reo_cmdlist_destroy(struct dp_soc *soc) +{ + struct dp_reo_cmd_info *reo_cmd = NULL; + struct dp_reo_cmd_info *tmp_cmd = NULL; + union hal_reo_status reo_status; + + reo_status.queue_status.header.status = + HAL_REO_CMD_DRAIN; + + qdf_spin_lock_bh(&soc->rx.reo_cmd_lock); + TAILQ_FOREACH_SAFE(reo_cmd, &soc->rx.reo_cmd_list, + reo_cmd_list_elem, tmp_cmd) { + TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd, + reo_cmd_list_elem); + reo_cmd->handler(soc, reo_cmd->data, &reo_status); + qdf_mem_free(reo_cmd); + } + qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..065a5e2ac34083d3dfae9e18128fd1e969a6e30c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c @@ -0,0 +1,3142 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_api.h" +#include "qdf_nbuf.h" +#ifdef MESH_MODE_SUPPORT +#include "if_meta_hdr.h" +#endif +#include "dp_internal.h" +#include "dp_rx_mon.h" +#include "dp_ipa.h" +#ifdef FEATURE_WDS +#include "dp_txrx_wds.h" +#endif + +#ifdef ATH_RX_PRI_SAVE +#define DP_RX_TID_SAVE(_nbuf, _tid) \ + (qdf_nbuf_set_priority(_nbuf, _tid)) +#else +#define DP_RX_TID_SAVE(_nbuf, _tid) +#endif + +#ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING +static inline +bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) +{ + if (ta_peer->vdev->opmode == wlan_op_mode_ndi && + qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { + DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1); + return false; + } + return true; +} +#else +static inline +bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) +{ + return true; +} +#endif +static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) +{ + return vdev->ap_bridge_enabled; +} + +#ifdef DUP_RX_DESC_WAR +void dp_rx_dump_info_and_assert(struct dp_soc *soc, + hal_ring_handle_t hal_ring, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + void *hal_soc = soc->hal_soc; + + hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc); + dp_rx_desc_dump(rx_desc); +} +#else +void dp_rx_dump_info_and_assert(struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + hal_soc_handle_t hal_soc = soc->hal_soc; + + dp_rx_desc_dump(rx_desc); + hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc); + hal_srng_dump_ring(hal_soc, hal_ring_hdl); + qdf_assert_always(0); +} +#endif + +#ifdef RX_DESC_SANITY_WAR +static inline +QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + uint8_t return_buffer_manager; + + if (qdf_unlikely(!rx_desc)) { + /* + * This is an unlikely case where the cookie obtained + * from the ring_desc is invalid and hence we are not + * able to find the corresponding rx_desc + */ + goto fail; + } + + return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc); + if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM || + return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) { + goto fail; + } + + return QDF_STATUS_SUCCESS; + +fail: + DP_STATS_INC(soc, rx.err.invalid_cookie, 1); + dp_err("Ring Desc:"); + hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, + ring_desc); + return QDF_STATUS_E_NULL_VALUE; + +} +#else +static inline +QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs + * called during dp rx initialization + * and at the end of dp_rx_process. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp rxdma circular ring + * @rx_desc_pool: Pointer to free Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * @desc_list: list of descs if called from dp_rx_process + * or NULL during dp rx initialization or out of buffer + * interrupt. + * @tail: tail of descs list + * @func_name: name of the caller function + * Return: return success or failure + */ +QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail, + const char *func_name) +{ + uint32_t num_alloc_desc; + uint16_t num_desc_to_free = 0; + struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); + uint32_t num_entries_avail; + uint32_t count; + int sync_hw_ptr = 1; + qdf_dma_addr_t paddr; + qdf_nbuf_t rx_netbuf; + void *rxdma_ring_entry; + union dp_rx_desc_list_elem_t *next; + QDF_STATUS ret; + uint16_t buf_size = rx_desc_pool->buf_size; + uint8_t buf_alignment = rx_desc_pool->buf_alignment; + + void *rxdma_srng; + + rxdma_srng = dp_rxdma_srng->hal_srng; + + if (!rxdma_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "rxdma srng not initialized"); + DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); + return QDF_STATUS_E_FAILURE; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "requested %d buffers for replenish", num_req_buffers); + + hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); + num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, + rxdma_srng, + sync_hw_ptr); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "no of available entries in rxdma ring: %d", + num_entries_avail); + + if (!(*desc_list) && (num_entries_avail > + ((dp_rxdma_srng->num_entries * 3) / 4))) { + num_req_buffers = num_entries_avail; + } else if (num_entries_avail < num_req_buffers) { + num_desc_to_free = num_req_buffers - num_entries_avail; + num_req_buffers = num_entries_avail; + } + + if (qdf_unlikely(!num_req_buffers)) { + num_desc_to_free = num_req_buffers; + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + goto free_descs; + } + + /* + * if desc_list is NULL, allocate the descs from freelist + */ + if (!(*desc_list)) { + num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, + rx_desc_pool, + num_req_buffers, + desc_list, + tail); + + if (!num_alloc_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "no free rx_descs in freelist"); + DP_STATS_INC(dp_pdev, err.desc_alloc_fail, + num_req_buffers); + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + return QDF_STATUS_E_NOMEM; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%d rx desc allocated", num_alloc_desc); + num_req_buffers = num_alloc_desc; + } + + + count = 0; + + while (count < num_req_buffers) { + rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, + buf_size, + RX_BUFFER_RESERVATION, + buf_alignment, + FALSE); + + if (qdf_unlikely(!rx_netbuf)) { + DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); + break; + } + + ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, + QDF_DMA_FROM_DEVICE); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { + qdf_nbuf_free(rx_netbuf); + DP_STATS_INC(dp_pdev, replenish.map_err, 1); + continue; + } + + paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); + + dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true); + /* + * check if the physical address of nbuf->data is + * less then 0x50000000 then free the nbuf and try + * allocating new nbuf. We can try for 100 times. + * this is a temp WAR till we fix it properly. + */ + ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool); + if (ret == QDF_STATUS_E_FAILURE) { + DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); + break; + } + + count++; + + rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, + rxdma_srng); + qdf_assert_always(rxdma_ring_entry); + + next = (*desc_list)->next; + + dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); + + /* rx_desc.in_use should be zero at this time*/ + qdf_assert_always((*desc_list)->rx_desc.in_use == 0); + + (*desc_list)->rx_desc.in_use = 1; + (*desc_list)->rx_desc.in_err_state = 0; + dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc, + func_name, RX_DESC_REPLENISHED); + dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", + rx_netbuf, qdf_nbuf_data(rx_netbuf), + (unsigned long long)paddr, + (*desc_list)->rx_desc.cookie); + + hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, + (*desc_list)->rx_desc.cookie, + rx_desc_pool->owner); + + *desc_list = next; + + } + + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + + dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u", + count, num_desc_to_free); + + /* No need to count the number of bytes received during replenish. + * Therefore set replenish.pkts.bytes as 0. + */ + DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); + +free_descs: + DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); + /* + * add any available free desc back to the free list + */ + if (*desc_list) + dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, + mac_id, rx_desc_pool); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_deliver_raw() - process RAW mode pkts and hand over the + * pkts to RAW mode simulation to + * decapsulate the pkt. + * + * @vdev: vdev on which RAW mode is enabled + * @nbuf_list: list of RAW pkts to process + * @peer: peer object from which the pkt is rx + * + * Return: void + */ +void +dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, + struct dp_peer *peer) +{ + qdf_nbuf_t deliver_list_head = NULL; + qdf_nbuf_t deliver_list_tail = NULL; + qdf_nbuf_t nbuf; + + nbuf = nbuf_list; + while (nbuf) { + qdf_nbuf_t next = qdf_nbuf_next(nbuf); + + DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); + + DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); + DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); + /* + * reset the chfrag_start and chfrag_end bits in nbuf cb + * as this is a non-amsdu pkt and RAW mode simulation expects + * these bit s to be 0 for non-amsdu pkt. + */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf) && + qdf_nbuf_is_rx_chfrag_end(nbuf)) { + qdf_nbuf_set_rx_chfrag_start(nbuf, 0); + qdf_nbuf_set_rx_chfrag_end(nbuf, 0); + } + + nbuf = next; + } + + vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, + &deliver_list_tail, peer->mac_addr.raw); + + vdev->osif_rx(vdev->osif_vdev, deliver_list_head); +} + + +#ifdef DP_LFR +/* + * In case of LFR, data of a new peer might be sent up + * even before peer is added. + */ +static inline struct dp_vdev * +dp_get_vdev_from_peer(struct dp_soc *soc, + uint16_t peer_id, + struct dp_peer *peer, + struct hal_rx_mpdu_desc_info mpdu_desc_info) +{ + struct dp_vdev *vdev; + uint8_t vdev_id; + + if (unlikely(!peer)) { + if (peer_id != HTT_INVALID_PEER) { + vdev_id = DP_PEER_METADATA_VDEV_ID_GET( + mpdu_desc_info.peer_meta_data); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("PeerID %d not found use vdevID %d"), + peer_id, vdev_id); + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, + vdev_id); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("Invalid PeerID %d"), + peer_id); + return NULL; + } + } else { + vdev = peer->vdev; + } + return vdev; +} +#else +static inline struct dp_vdev * +dp_get_vdev_from_peer(struct dp_soc *soc, + uint16_t peer_id, + struct dp_peer *peer, + struct hal_rx_mpdu_desc_info mpdu_desc_info) +{ + if (unlikely(!peer)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("Peer not found for peerID %d"), + peer_id); + return NULL; + } else { + return peer->vdev; + } +} +#endif + +#ifndef FEATURE_WDS +static void +dp_rx_da_learn(struct dp_soc *soc, + uint8_t *rx_tlv_hdr, + struct dp_peer *ta_peer, + qdf_nbuf_t nbuf) +{ +} +#endif +/* + * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic + * + * @soc: core txrx main context + * @ta_peer : source peer entry + * @rx_tlv_hdr : start address of rx tlvs + * @nbuf : nbuf that has to be intrabss forwarded + * + * Return: bool: true if it is forwarded else false + */ +static bool +dp_rx_intrabss_fwd(struct dp_soc *soc, + struct dp_peer *ta_peer, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf, + struct hal_rx_msdu_metadata msdu_metadata) +{ + uint16_t len; + uint8_t is_frag; + struct dp_peer *da_peer; + struct dp_ast_entry *ast_entry; + qdf_nbuf_t nbuf_copy; + uint8_t tid = qdf_nbuf_get_tid_val(nbuf); + uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); + struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats. + tid_stats.tid_rx_stats[ring_id][tid]; + + /* check if the destination peer is available in peer table + * and also check if the source peer and destination peer + * belong to the same vap and destination peer is not bss peer. + */ + + if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) { + + ast_entry = soc->ast_table[msdu_metadata.da_idx]; + if (!ast_entry) + return false; + + if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) { + ast_entry->is_active = TRUE; + return false; + } + + da_peer = ast_entry->peer; + + if (!da_peer) + return false; + /* TA peer cannot be same as peer(DA) on which AST is present + * this indicates a change in topology and that AST entries + * are yet to be updated. + */ + if (da_peer == ta_peer) + return false; + + if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) { + len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + is_frag = qdf_nbuf_is_frag(nbuf); + memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); + + /* linearize the nbuf just before we send to + * dp_tx_send() + */ + if (qdf_unlikely(is_frag)) { + if (qdf_nbuf_linearize(nbuf) == -ENOMEM) + return false; + + nbuf = qdf_nbuf_unshare(nbuf); + if (!nbuf) { + DP_STATS_INC_PKT(ta_peer, + rx.intra_bss.fail, + 1, + len); + /* return true even though the pkt is + * not forwarded. Basically skb_unshare + * failed and we want to continue with + * next nbuf. + */ + tid_stats->fail_cnt[INTRABSS_DROP]++; + return true; + } + } + + if (!dp_tx_send((struct cdp_soc_t *)soc, + ta_peer->vdev->vdev_id, nbuf)) { + DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, + len); + return true; + } else { + DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, + len); + tid_stats->fail_cnt[INTRABSS_DROP]++; + return false; + } + } + } + /* if it is a broadcast pkt (eg: ARP) and it is not its own + * source, then clone the pkt and send the cloned pkt for + * intra BSS forwarding and original pkt up the network stack + * Note: how do we handle multicast pkts. do we forward + * all multicast pkts as is or let a higher layer module + * like igmpsnoop decide whether to forward or not with + * Mcast enhancement. + */ + else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) && + !ta_peer->bss_peer))) { + if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf)) + goto end; + + nbuf_copy = qdf_nbuf_copy(nbuf); + if (!nbuf_copy) + goto end; + + len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); + + /* Set cb->ftype to intrabss FWD */ + qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD); + if (dp_tx_send((struct cdp_soc_t *)soc, + ta_peer->vdev->vdev_id, nbuf_copy)) { + DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); + tid_stats->fail_cnt[INTRABSS_DROP]++; + qdf_nbuf_free(nbuf_copy); + } else { + DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); + tid_stats->intrabss_cnt++; + } + } + +end: + /* return false as we have to still send the original pkt + * up the stack + */ + return false; +} + +#ifdef MESH_MODE_SUPPORT + +/** + * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats + * + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @peer: pointer to peer + * + * This function allocated memory for mesh receive stats and fill the + * required stats. Stores the memory address in skb cb. + * + * Return: void + */ + +void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer) +{ + struct mesh_recv_hdr_s *rx_info = NULL; + uint32_t pkt_type; + uint32_t nss; + uint32_t rate_mcs; + uint32_t bw; + + /* fill recv mesh stats */ + rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); + + /* upper layers are resposible to free this memory */ + + if (!rx_info) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Memory allocation failed for mesh rx stats"); + DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); + return; + } + + rx_info->rs_flags = MESH_RXHDR_VER1; + if (qdf_nbuf_is_rx_chfrag_start(nbuf)) + rx_info->rs_flags |= MESH_RX_FIRST_MSDU; + + if (qdf_nbuf_is_rx_chfrag_end(nbuf)) + rx_info->rs_flags |= MESH_RX_LAST_MSDU; + + if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { + rx_info->rs_flags |= MESH_RX_DECRYPTED; + rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); + if (vdev->osif_get_key) + vdev->osif_get_key(vdev->osif_vdev, + &rx_info->rs_decryptkey[0], + &peer->mac_addr.raw[0], + rx_info->rs_keyix); + } + + rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); + rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); + pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); + rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); + bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); + nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); + rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | + (bw << 24); + + qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, + FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), + rx_info->rs_flags, + rx_info->rs_rssi, + rx_info->rs_channel, + rx_info->rs_ratephy1, + rx_info->rs_keyix); + +} + +/** + * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets + * + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * + * This checks if the received packet is matching any filter out + * catogery and and drop the packet if it matches. + * + * Return: status(0 indicates drop, 1 indicate to no drop) + */ + +QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + union dp_align_mac_addr mac_addr; + struct dp_soc *soc = vdev->pdev->soc; + + if (qdf_unlikely(vdev->mesh_rx_filter)) { + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) + if (hal_rx_mpdu_get_fr_ds(soc->hal_soc, + rx_tlv_hdr)) + return QDF_STATUS_SUCCESS; + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) + if (hal_rx_mpdu_get_to_ds(soc->hal_soc, + rx_tlv_hdr)) + return QDF_STATUS_SUCCESS; + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) + if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc, + rx_tlv_hdr) && + !hal_rx_mpdu_get_to_ds(soc->hal_soc, + rx_tlv_hdr)) + return QDF_STATUS_SUCCESS; + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { + if (hal_rx_mpdu_get_addr1(soc->hal_soc, + rx_tlv_hdr, + &mac_addr.raw[0])) + return QDF_STATUS_E_FAILURE; + + if (!qdf_mem_cmp(&mac_addr.raw[0], + &vdev->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE)) + return QDF_STATUS_SUCCESS; + } + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { + if (hal_rx_mpdu_get_addr2(soc->hal_soc, + rx_tlv_hdr, + &mac_addr.raw[0])) + return QDF_STATUS_E_FAILURE; + + if (!qdf_mem_cmp(&mac_addr.raw[0], + &vdev->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE)) + return QDF_STATUS_SUCCESS; + } + } + + return QDF_STATUS_E_FAILURE; +} + +#else +void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer) +{ +} + +QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + return QDF_STATUS_E_FAILURE; +} + +#endif + +#ifdef FEATURE_NAC_RSSI +/** + * dp_rx_nac_filter(): Function to perform filtering of non-associated + * clients + * @pdev: DP pdev handle + * @rx_pkt_hdr: Rx packet Header + * + * return: dp_vdev* + */ +static +struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr) +{ + struct ieee80211_frame *wh; + struct dp_neighbour_peer *peer = NULL; + + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) + return NULL; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { + QDF_TRACE( + QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), + peer->neighbour_peers_macaddr.raw[0], + peer->neighbour_peers_macaddr.raw[1], + peer->neighbour_peers_macaddr.raw[2], + peer->neighbour_peers_macaddr.raw[3], + peer->neighbour_peers_macaddr.raw[4], + peer->neighbour_peers_macaddr.raw[5]); + + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return pdev->monitor_vdev; + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return NULL; +} + +/** + * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac + * @soc: DP SOC handle + * @mpdu: mpdu for which peer is invalid + * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and + * pool_id has same mapping) + * + * return: integer type + */ +uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, + uint8_t mac_id) +{ + struct dp_invalid_peer_msg msg; + struct dp_vdev *vdev = NULL; + struct dp_pdev *pdev = NULL; + struct ieee80211_frame *wh; + qdf_nbuf_t curr_nbuf, next_nbuf; + uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); + uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); + + rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); + + if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Drop decapped frames"); + goto free; + } + + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if (!DP_FRAME_IS_DATA(wh)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "NAWDS valid only for data frames"); + goto free; + } + + if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid nbuf length"); + goto free; + } + + pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + + if (!pdev || qdf_unlikely(pdev->is_pdev_down)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "PDEV %s", !pdev ? "not found" : "down"); + goto free; + } + + if (pdev->filter_neighbour_peers) { + /* Next Hop scenario not yet handle */ + vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); + if (vdev) { + dp_rx_mon_deliver(soc, pdev->pdev_id, + pdev->invalid_peer_head_msdu, + pdev->invalid_peer_tail_msdu); + + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + + return 0; + } + } + + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + + if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE) == 0) { + goto out; + } + } + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "VDEV not found"); + goto free; + } + +out: + msg.wh = wh; + qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); + msg.nbuf = mpdu; + msg.vdev_id = vdev->vdev_id; + if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) + pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( + (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, + pdev->pdev_id, &msg); + +free: + /* Drop and free packet */ + curr_nbuf = mpdu; + while (curr_nbuf) { + next_nbuf = qdf_nbuf_next(curr_nbuf); + qdf_nbuf_free(curr_nbuf); + curr_nbuf = next_nbuf; + } + + return 0; +} + +/** + * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler + * @soc: DP SOC handle + * @mpdu: mpdu for which peer is invalid + * @mpdu_done: if an mpdu is completed + * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and + * pool_id has same mapping) + * + * return: integer type + */ +void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, + qdf_nbuf_t mpdu, bool mpdu_done, + uint8_t mac_id) +{ + /* Only trigger the process when mpdu is completed */ + if (mpdu_done) + dp_rx_process_invalid_peer(soc, mpdu, mac_id); +} +#else +uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, + uint8_t mac_id) +{ + qdf_nbuf_t curr_nbuf, next_nbuf; + struct dp_pdev *pdev; + struct dp_vdev *vdev = NULL; + struct ieee80211_frame *wh; + uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); + uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); + + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if (!DP_FRAME_IS_DATA(wh)) { + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, + "only for data frames"); + goto free; + } + + if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid nbuf length"); + goto free; + } + + pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "PDEV not found"); + goto free; + } + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE) == 0) { + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + goto out; + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "VDEV not found"); + goto free; + } + +out: + if (soc->cdp_soc.ol_ops->rx_invalid_peer) + soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh); +free: + /* reset the head and tail pointers */ + pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + if (pdev) { + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + } + + /* Drop and free packet */ + curr_nbuf = mpdu; + while (curr_nbuf) { + next_nbuf = qdf_nbuf_next(curr_nbuf); + qdf_nbuf_free(curr_nbuf); + curr_nbuf = next_nbuf; + } + + /* Reset the head and tail pointers */ + pdev = dp_get_pdev_for_mac_id(soc, mac_id); + if (pdev) { + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + } + + return 0; +} + +void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, + qdf_nbuf_t mpdu, bool mpdu_done, + uint8_t mac_id) +{ + /* Process the nbuf */ + dp_rx_process_invalid_peer(soc, mpdu, mac_id); +} +#endif + +#ifdef RECEIVE_OFFLOAD +/** + * dp_rx_print_offload_info() - Print offload info from RX TLV + * @soc: dp soc handle + * @rx_tlv: RX TLV for which offload information is to be printed + * + * Return: None + */ +static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv) +{ + dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); + dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); + dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); + dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc, + rx_tlv)); + dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); + dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); + dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); + dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); + dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); + dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); + dp_verbose_debug("---------------------------------------------------------"); +} + +/** + * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb + * @soc: DP SOC handle + * @rx_tlv: RX TLV received for the msdu + * @msdu: msdu for which GRO info needs to be filled + * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets + * + * Return: None + */ +static +void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, + qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) +{ + if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) + return; + + /* Filling up RX offload info only for TCP packets */ + if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) + return; + + *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; + + QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = + HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); + QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = + HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); + QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = + hal_rx_tlv_get_tcp_chksum(soc->hal_soc, + rx_tlv); + QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = + HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); + QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = + HAL_RX_TLV_GET_TCP_ACK(rx_tlv); + QDF_NBUF_CB_RX_TCP_WIN(msdu) = + HAL_RX_TLV_GET_TCP_WIN(rx_tlv); + QDF_NBUF_CB_RX_TCP_PROTO(msdu) = + HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); + QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = + HAL_RX_TLV_GET_IPV6(rx_tlv); + QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = + HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); + QDF_NBUF_CB_RX_FLOW_ID(msdu) = + HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); + + dp_rx_print_offload_info(soc, rx_tlv); +} +#else +static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, + qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) +{ +} +#endif /* RECEIVE_OFFLOAD */ + +/** + * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. + * + * @nbuf: pointer to msdu. + * @mpdu_len: mpdu length + * @l3_pad_len: L3 padding length by HW + * + * Return: returns true if nbuf is last msdu of mpdu else retuns false. + */ +static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, + uint16_t *mpdu_len, + uint32_t l3_pad_len) +{ + bool last_nbuf; + uint32_t pkt_hdr_size; + + pkt_hdr_size = RX_PKT_TLVS_LEN + l3_pad_len; + + if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) { + qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); + last_nbuf = false; + *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size); + } else { + qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size)); + last_nbuf = true; + *mpdu_len = 0; + } + + return last_nbuf; +} + +/** + * dp_get_l3_hdr_pad_len() - get L3 header padding length. + * + * @soc: DP soc handle + * @nbuf: pointer to msdu. + * + * Return: returns padding length in bytes. + */ +static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, + qdf_nbuf_t nbuf) +{ + uint32_t l3_hdr_pad = 0; + uint8_t *rx_tlv_hdr; + struct hal_rx_msdu_metadata msdu_metadata; + + while (nbuf) { + if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) { + /* scattered msdu end with continuation is 0 */ + rx_tlv_hdr = qdf_nbuf_data(nbuf); + hal_rx_msdu_metadata_get(soc->hal_soc, + rx_tlv_hdr, + &msdu_metadata); + l3_hdr_pad = msdu_metadata.l3_hdr_pad; + break; + } + nbuf = nbuf->next; + } + + return l3_hdr_pad; +} + +/** + * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across + * multiple nbufs. + * @soc: DP SOC handle + * @nbuf: pointer to the first msdu of an amsdu. + * + * This function implements the creation of RX frag_list for cases + * where an MSDU is spread across multiple nbufs. + * + * Return: returns the head nbuf which contains complete frag_list. + */ +qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) +{ + qdf_nbuf_t parent, frag_list, next = NULL; + uint16_t frag_list_len = 0; + uint16_t mpdu_len; + bool last_nbuf; + uint32_t l3_hdr_pad_offset = 0; + + /* + * Use msdu len got from REO entry descriptor instead since + * there is case the RX PKT TLV is corrupted while msdu_len + * from REO descriptor is right for non-raw RX scatter msdu. + */ + mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + + /* + * this is a case where the complete msdu fits in one single nbuf. + * in this case HW sets both start and end bit and we only need to + * reset these bits for RAW mode simulator to decap the pkt + */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf) && + qdf_nbuf_is_rx_chfrag_end(nbuf)) { + qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); + return nbuf; + } + + l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf); + + /* + * This is a case where we have multiple msdus (A-MSDU) spread across + * multiple nbufs. here we create a fraglist out of these nbufs. + * + * the moment we encounter a nbuf with continuation bit set we + * know for sure we have an MSDU which is spread across multiple + * nbufs. We loop through and reap nbufs till we reach last nbuf. + */ + parent = nbuf; + frag_list = nbuf->next; + nbuf = nbuf->next; + + /* + * set the start bit in the first nbuf we encounter with continuation + * bit set. This has the proper mpdu length set as it is the first + * msdu of the mpdu. this becomes the parent nbuf and the subsequent + * nbufs will form the frag_list of the parent nbuf. + */ + qdf_nbuf_set_rx_chfrag_start(parent, 1); + /* + * L3 header padding is only needed for the 1st buffer + * in a scattered msdu + */ + last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len, + l3_hdr_pad_offset); + + /* + * HW issue: MSDU cont bit is set but reported MPDU length can fit + * in to single buffer + * + * Increment error stats and avoid SG list creation + */ + if (last_nbuf) { + qdf_nbuf_pull_head(parent, + RX_PKT_TLVS_LEN + l3_hdr_pad_offset); + return parent; + } + + /* + * this is where we set the length of the fragments which are + * associated to the parent nbuf. We iterate through the frag_list + * till we hit the last_nbuf of the list. + */ + do { + last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len, 0); + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); + frag_list_len += qdf_nbuf_len(nbuf); + + if (last_nbuf) { + next = nbuf->next; + nbuf->next = NULL; + break; + } + + nbuf = nbuf->next; + } while (!last_nbuf); + + qdf_nbuf_set_rx_chfrag_start(nbuf, 0); + qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); + parent->next = next; + + qdf_nbuf_pull_head(parent, + RX_PKT_TLVS_LEN + l3_hdr_pad_offset); + return parent; +} + +/** + * dp_rx_compute_delay() - Compute and fill in all timestamps + * to pass in correct fields + * + * @vdev: pdev handle + * @tx_desc: tx descriptor + * @tid: tid value + * Return: none + */ +void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) +{ + uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); + int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get()); + uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf); + uint8_t tid = qdf_nbuf_get_tid_val(nbuf); + uint32_t interframe_delay = + (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp); + + dp_update_delay_stats(vdev->pdev, to_stack, tid, + CDP_DELAY_STATS_REAP_STACK, ring_id); + /* + * Update interframe delay stats calculated at deliver_data_ol point. + * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so + * interframe delay will not be calculate correctly for 1st frame. + * On the other side, this will help in avoiding extra per packet check + * of vdev->prev_rx_deliver_tstamp. + */ + dp_update_delay_stats(vdev->pdev, interframe_delay, tid, + CDP_DELAY_STATS_RX_INTERFRAME, ring_id); + vdev->prev_rx_deliver_tstamp = current_ts; +} + +/** + * dp_rx_drop_nbuf_list() - drop an nbuf list + * @pdev: dp pdev reference + * @buf_list: buffer list to be dropepd + * + * Return: int (number of bufs dropped) + */ +static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, + qdf_nbuf_t buf_list) +{ + struct cdp_tid_rx_stats *stats = NULL; + uint8_t tid = 0, ring_id = 0; + int num_dropped = 0; + qdf_nbuf_t buf, next_buf; + + buf = buf_list; + while (buf) { + ring_id = QDF_NBUF_CB_RX_CTX_ID(buf); + next_buf = qdf_nbuf_queue_next(buf); + tid = qdf_nbuf_get_tid_val(buf); + if (qdf_likely(pdev)) { + stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; + stats->fail_cnt[INVALID_PEER_VDEV]++; + stats->delivered_to_stack--; + } + qdf_nbuf_free(buf); + buf = next_buf; + num_dropped++; + } + + return num_dropped; +} + +#ifdef PEER_CACHE_RX_PKTS +/** + * dp_rx_flush_rx_cached() - flush cached rx frames + * @peer: peer + * @drop: flag to drop frames or forward to net stack + * + * Return: None + */ +void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) +{ + struct dp_peer_cached_bufq *bufqi; + struct dp_rx_cached_buf *cache_buf = NULL; + ol_txrx_rx_fp data_rx = NULL; + int num_buff_elem; + QDF_STATUS status; + + if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) { + qdf_atomic_dec(&peer->flush_in_progress); + return; + } + + qdf_spin_lock_bh(&peer->peer_info_lock); + if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx) + data_rx = peer->vdev->osif_rx; + else + drop = true; + qdf_spin_unlock_bh(&peer->peer_info_lock); + + bufqi = &peer->bufq_info; + + qdf_spin_lock_bh(&bufqi->bufq_lock); + qdf_list_remove_front(&bufqi->cached_bufq, + (qdf_list_node_t **)&cache_buf); + while (cache_buf) { + num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST( + cache_buf->buf); + bufqi->entries -= num_buff_elem; + qdf_spin_unlock_bh(&bufqi->bufq_lock); + if (drop) { + bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, + cache_buf->buf); + } else { + /* Flush the cached frames to OSIF DEV */ + status = data_rx(peer->vdev->osif_vdev, cache_buf->buf); + if (status != QDF_STATUS_SUCCESS) + bufqi->dropped = dp_rx_drop_nbuf_list( + peer->vdev->pdev, + cache_buf->buf); + } + qdf_mem_free(cache_buf); + cache_buf = NULL; + qdf_spin_lock_bh(&bufqi->bufq_lock); + qdf_list_remove_front(&bufqi->cached_bufq, + (qdf_list_node_t **)&cache_buf); + } + qdf_spin_unlock_bh(&bufqi->bufq_lock); + qdf_atomic_dec(&peer->flush_in_progress); +} + +/** + * dp_rx_enqueue_rx() - cache rx frames + * @peer: peer + * @rx_buf_list: cache buffer list + * + * Return: None + */ +static QDF_STATUS +dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) +{ + struct dp_rx_cached_buf *cache_buf; + struct dp_peer_cached_bufq *bufqi = &peer->bufq_info; + int num_buff_elem; + + dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries, + bufqi->dropped); + if (!peer->valid) { + bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, + rx_buf_list); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_bh(&bufqi->bufq_lock); + if (bufqi->entries >= bufqi->thresh) { + bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, + rx_buf_list); + qdf_spin_unlock_bh(&bufqi->bufq_lock); + return QDF_STATUS_E_RESOURCES; + } + qdf_spin_unlock_bh(&bufqi->bufq_lock); + + num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list); + + cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf)); + if (!cache_buf) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Failed to allocate buf to cache rx frames"); + bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev, + rx_buf_list); + return QDF_STATUS_E_NOMEM; + } + + cache_buf->buf = rx_buf_list; + + qdf_spin_lock_bh(&bufqi->bufq_lock); + qdf_list_insert_back(&bufqi->cached_bufq, + &cache_buf->node); + bufqi->entries += num_buff_elem; + qdf_spin_unlock_bh(&bufqi->bufq_lock); + + return QDF_STATUS_SUCCESS; +} + +static inline +bool dp_rx_is_peer_cache_bufq_supported(void) +{ + return true; +} +#else +static inline +bool dp_rx_is_peer_cache_bufq_supported(void) +{ + return false; +} + +static inline QDF_STATUS +dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifndef DELIVERY_TO_STACK_STATUS_CHECK +/** + * dp_rx_check_delivery_to_stack() - Deliver pkts to network + * using the appropriate call back functions. + * @soc: soc + * @vdev: vdev + * @peer: peer + * @nbuf_head: skb list head + * @nbuf_tail: skb list tail + * + * Return: None + */ +static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, + struct dp_vdev *vdev, + struct dp_peer *peer, + qdf_nbuf_t nbuf_head) +{ + /* Function pointer initialized only when FISA is enabled */ + if (vdev->osif_fisa_rx) + /* on failure send it via regular path */ + vdev->osif_fisa_rx(soc, vdev, nbuf_head); + else + vdev->osif_rx(vdev->osif_vdev, nbuf_head); +} + +#else +/** + * dp_rx_check_delivery_to_stack() - Deliver pkts to network + * using the appropriate call back functions. + * @soc: soc + * @vdev: vdev + * @peer: peer + * @nbuf_head: skb list head + * @nbuf_tail: skb list tail + * + * Check the return status of the call back function and drop + * the packets if the return status indicates a failure. + * + * Return: None + */ +static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, + struct dp_vdev *vdev, + struct dp_peer *peer, + qdf_nbuf_t nbuf_head) +{ + int num_nbuf = 0; + QDF_STATUS ret_val = QDF_STATUS_E_FAILURE; + + /* Function pointer initialized only when FISA is enabled */ + if (vdev->osif_fisa_rx) + /* on failure send it via regular path */ + ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head); + else if (vdev->osif_rx) + ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head); + + if (!QDF_IS_STATUS_SUCCESS(ret_val)) { + num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); + DP_STATS_INC(soc, rx.err.rejected, num_nbuf); + if (peer) + DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf); + } +} +#endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ + +void dp_rx_deliver_to_stack(struct dp_soc *soc, + struct dp_vdev *vdev, + struct dp_peer *peer, + qdf_nbuf_t nbuf_head, + qdf_nbuf_t nbuf_tail) +{ + int num_nbuf = 0; + + if (qdf_unlikely(!vdev || vdev->delete.pending)) { + num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head); + /* + * This is a special case where vdev is invalid, + * so we cannot know the pdev to which this packet + * belonged. Hence we update the soc rx error stats. + */ + DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf); + return; + } + + /* + * highly unlikely to have a vdev without a registered rx + * callback function. if so let us free the nbuf_list. + */ + if (qdf_unlikely(!vdev->osif_rx)) { + if (peer && dp_rx_is_peer_cache_bufq_supported()) { + dp_rx_enqueue_rx(peer, nbuf_head); + } else { + num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, + nbuf_head); + DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf); + } + return; + } + + if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || + (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { + vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, + &nbuf_tail, peer->mac_addr.raw); + } + + dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head); +} + +/** + * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * + * The ipsumed field of the skb is set based on whether HW validated the + * IP/TCP/UDP checksum. + * + * Return: void + */ +static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + qdf_nbuf_rx_cksum_t cksum = {0}; + bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); + bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); + + if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { + cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; + qdf_nbuf_set_rx_cksum(nbuf, &cksum); + } else { + DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); + DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); + } +} + +#ifdef VDEV_PEER_PROTOCOL_COUNT +#define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \ +{ \ + qdf_nbuf_t nbuf_local; \ + struct dp_peer *peer_local; \ + struct dp_vdev *vdev_local = vdev_hdl; \ + do { \ + if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ + break; \ + nbuf_local = nbuf; \ + peer_local = peer; \ + if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ + break; \ + else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ + break; \ + dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ + (nbuf_local), \ + (peer_local), 0, 1); \ + } while (0); \ +} +#else +#define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) +#endif + +/** + * dp_rx_msdu_stats_update() - update per msdu stats. + * @soc: core txrx main context + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * @peer: pointer to the peer object. + * @ring_id: reo dest ring number on which pkt is reaped. + * @tid_stats: per tid rx stats. + * + * update all the per msdu stats for that nbuf. + * Return: void + */ +static void dp_rx_msdu_stats_update(struct dp_soc *soc, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, + struct dp_peer *peer, + uint8_t ring_id, + struct cdp_tid_rx_stats *tid_stats) +{ + bool is_ampdu, is_not_amsdu; + uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; + struct dp_vdev *vdev = peer->vdev; + qdf_ether_header_t *eh; + uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + + dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer); + is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & + qdf_nbuf_is_rx_chfrag_end(nbuf); + + DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); + DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); + DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); + DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf)); + + tid_stats->msdu_cnt++; + if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && + (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); + tid_stats->mcast_msdu_cnt++; + if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { + DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); + tid_stats->bcast_msdu_cnt++; + } + } + + /* + * currently we can return from here as we have similar stats + * updated at per ppdu level instead of msdu level + */ + if (!soc->process_rx_status) + return; + + is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); + DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); + DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); + + sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); + mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); + tid = qdf_nbuf_get_tid_val(nbuf); + bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); + reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, + rx_tlv_hdr); + nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); + pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); + + DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1, + ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); + DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); + DP_STATS_INC(peer, rx.bw[bw], 1); + /* + * only if nss > 0 and pkt_type is 11N/AC/AX, + * then increase index [nss - 1] in array counter. + */ + if (nss > 0 && (pkt_type == DOT11_N || + pkt_type == DOT11_AC || + pkt_type == DOT11_AX)) + DP_STATS_INC(peer, rx.nss[nss - 1], 1); + + DP_STATS_INC(peer, rx.sgi_count[sgi], 1); + DP_STATS_INCC(peer, rx.err.mic_err, 1, + hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); + DP_STATS_INCC(peer, rx.err.decrypt_err, 1, + hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); + + DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); + DP_STATS_INC(peer, rx.reception_type[reception_type], 1); + + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); + + if ((soc->process_rx_status) && + hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + if (!vdev->pdev) + return; + + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc, + &peer->stats, peer->peer_ids[0], + UPDATE_PEER_STATS, + vdev->pdev->pdev_id); +#endif + + } +} + +static inline bool is_sa_da_idx_valid(struct dp_soc *soc, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf, + struct hal_rx_msdu_metadata msdu_info) +{ + if ((qdf_nbuf_is_sa_valid(nbuf) && + (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || + (!qdf_nbuf_is_da_mcbc(nbuf) && + qdf_nbuf_is_da_valid(nbuf) && + (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) + return false; + + return true; +} + +#ifndef WDS_VENDOR_EXTENSION +int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, + struct dp_vdev *vdev, + struct dp_peer *peer) +{ + return 1; +} +#endif + +#ifdef RX_DESC_DEBUG_CHECK +/** + * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr + * corruption + * + * @ring_desc: REO ring descriptor + * @rx_desc: Rx descriptor + * + * Return: NONE + */ +static inline +QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + struct hal_buf_info hbi; + + hal_rx_reo_buf_paddr_get(ring_desc, &hbi); + /* Sanity check for possible buffer paddr corruption */ + if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr)) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} +#else +static inline +QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT +static inline +bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) +{ + bool limit_hit = false; + struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; + + limit_hit = + (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false; + + if (limit_hit) + DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1) + + return limit_hit; +} + +static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) +{ + return soc->wlan_cfg_ctx->rx_enable_eol_data_check; +} + +#else +static inline +bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) +{ + return false; +} + +static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) +{ + return false; +} + +#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ + +#ifdef DP_RX_PKT_NO_PEER_DELIVER +/** + * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if + * no corresbonding peer found + * @soc: core txrx main context + * @nbuf: pkt skb pointer + * + * This function will try to deliver some RX special frames to stack + * even there is no peer matched found. for instance, LFR case, some + * eapol data will be sent to host before peer_map done. + * + * Return: None + */ +static +void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) +{ + uint16_t peer_id; + uint8_t vdev_id; + struct dp_vdev *vdev; + uint32_t l2_hdr_offset = 0; + uint16_t msdu_len = 0; + uint32_t pkt_len = 0; + uint8_t *rx_tlv_hdr; + uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | + FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; + + peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); + if (peer_id > soc->max_peers) + goto deliver_fail; + + vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + if (!vdev || vdev->delete.pending || !vdev->osif_rx) + goto deliver_fail; + + if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) + goto deliver_fail; + + rx_tlv_hdr = qdf_nbuf_data(nbuf); + l2_hdr_offset = + hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); + + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; + QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; + + qdf_nbuf_set_pktlen(nbuf, pkt_len); + qdf_nbuf_pull_head(nbuf, + RX_PKT_TLVS_LEN + + l2_hdr_offset); + + if (dp_rx_is_special_frame(nbuf, frame_mask)) { + qdf_nbuf_set_exc_frame(nbuf, 1); + if (QDF_STATUS_SUCCESS != + vdev->osif_rx(vdev->osif_vdev, nbuf)) + goto deliver_fail; + DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); + return; + } + +deliver_fail: + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + qdf_nbuf_free(nbuf); +} +#else +static inline +void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) +{ + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + qdf_nbuf_free(nbuf); +} +#endif + +/** + * dp_rx_srng_get_num_pending() - get number of pending entries + * @hal_soc: hal soc opaque pointer + * @hal_ring: opaque pointer to the HAL Rx Ring + * @num_entries: number of entries in the hal_ring. + * @near_full: pointer to a boolean. This is set if ring is near full. + * + * The function returns the number of entries in a destination ring which are + * yet to be reaped. The function also checks if the ring is near full. + * If more than half of the ring needs to be reaped, the ring is considered + * approaching full. + * The function useses hal_srng_dst_num_valid_locked to get the number of valid + * entries. It should not be called within a SRNG lock. HW pointer value is + * synced into cached_hp. + * + * Return: Number of pending entries if any + */ +static +uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + uint32_t num_entries, + bool *near_full) +{ + uint32_t num_pending = 0; + + num_pending = hal_srng_dst_num_valid_locked(hal_soc, + hal_ring_hdl, + true); + + if (num_entries && (num_pending >= num_entries >> 1)) + *near_full = true; + else + *near_full = false; + + return num_pending; +} + +#ifdef WLAN_SUPPORT_RX_FISA +void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) +{ + QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; + qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); +} + +/** + * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb + * @nbuf: pkt skb pointer + * @l3_padding: l3 padding + * + * Return: None + */ +static inline +void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) +{ + QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; +} +#else +void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) +{ + qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); +} + +static inline +void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) +{ +} +#endif + +#ifdef DP_RX_DROP_RAW_FRM +/** + * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop + * @nbuf: pkt skb pointer + * + * Return: true - raw frame, dropped + * false - not raw frame, do nothing + */ +static inline +bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) +{ + if (qdf_nbuf_is_raw_frame(nbuf)) { + qdf_nbuf_free(nbuf); + return true; + } + + return false; +} +#else +static inline +bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) +{ + return false; +} +#endif + +#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY +/** + * dp_rx_ring_record_entry() - Record an entry into the rx ring history. + * @soc: Datapath soc structure + * @ring_num: REO ring number + * @ring_desc: REO ring descriptor + * + * Returns: None + */ +static inline void +dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, + hal_ring_desc_t ring_desc) +{ + struct dp_buf_info_record *record; + uint8_t rbm; + struct hal_buf_info hbi; + uint32_t idx; + + if (qdf_unlikely(!soc->rx_ring_history[ring_num])) + return; + + hal_rx_reo_buf_paddr_get(ring_desc, &hbi); + rbm = hal_rx_ret_buf_manager_get(ring_desc); + + idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, + DP_RX_HIST_MAX); + + /* No NULL check needed for record since its an array */ + record = &soc->rx_ring_history[ring_num]->entry[idx]; + + record->timestamp = qdf_get_log_timestamp(); + record->hbi.paddr = hbi.paddr; + record->hbi.sw_cookie = hbi.sw_cookie; + record->hbi.rbm = rbm; +} +#else +static inline void +dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, + hal_ring_desc_t ring_desc) +{ +} +#endif + +#ifdef DISABLE_EAPOL_INTRABSS_FWD +/* + * dp_rx_intrabss_fwd_wrapper() - Wrapper API for intrabss fwd. For EAPOL + * pkt with DA not equal to vdev mac addr, fwd is not allowed. + * @soc: core txrx main context + * @ta_peer: source peer entry + * @rx_tlv_hdr: start address of rx tlvs + * @nbuf: nbuf that has to be intrabss forwarded + * @msdu_metadata: msdu metadata + * + * Return: true if it is forwarded else false + */ +static inline +bool dp_rx_intrabss_fwd_wrapper(struct dp_soc *soc, struct dp_peer *ta_peer, + uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, + struct hal_rx_msdu_metadata msdu_metadata) +{ + if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && + qdf_mem_cmp(qdf_nbuf_data(nbuf) + + QDF_NBUF_DEST_MAC_OFFSET, + ta_peer->vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE))) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); + return true; + } + + return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf, + msdu_metadata); + +} +#define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \ + dp_rx_intrabss_fwd_wrapper(soc, peer, rx_tlv_hdr, nbuf, \ + msdu_metadata) +#else +#define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \ + dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) +#endif + +/** + * dp_rx_process() - Brain of the Rx processing functionality + * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) + * @int_ctx: per interrupt context + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements the core of Rx functionality. This is + * expected to handle only non-error frames. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, + uint8_t reo_ring_num, uint32_t quota) +{ + hal_ring_desc_t ring_desc; + hal_soc_handle_t hal_soc; + struct dp_rx_desc *rx_desc = NULL; + qdf_nbuf_t nbuf, next; + bool near_full; + union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; + union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; + uint32_t num_pending; + uint32_t rx_bufs_used = 0, rx_buf_cookie; + uint16_t msdu_len = 0; + uint16_t peer_id; + uint8_t vdev_id; + struct dp_peer *peer; + struct dp_vdev *vdev; + uint32_t pkt_len = 0; + struct hal_rx_mpdu_desc_info mpdu_desc_info; + struct hal_rx_msdu_desc_info msdu_desc_info; + enum hal_reo_error_status error; + uint32_t peer_mdata; + uint8_t *rx_tlv_hdr; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; + uint8_t mac_id = 0; + struct dp_pdev *rx_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + struct dp_soc *soc = int_ctx->soc; + uint8_t ring_id = 0; + uint8_t core_id = 0; + struct cdp_tid_rx_stats *tid_stats; + qdf_nbuf_t nbuf_head; + qdf_nbuf_t nbuf_tail; + qdf_nbuf_t deliver_list_head; + qdf_nbuf_t deliver_list_tail; + uint32_t num_rx_bufs_reaped = 0; + uint32_t intr_id; + struct hif_opaque_softc *scn; + int32_t tid = 0; + bool is_prev_msdu_last = true; + uint32_t num_entries_avail = 0; + uint32_t rx_ol_pkt_cnt = 0; + uint32_t num_entries = 0; + struct hal_rx_msdu_metadata msdu_metadata; + QDF_STATUS status; + + DP_HIST_INIT(); + + qdf_assert_always(soc && hal_ring_hdl); + hal_soc = soc->hal_soc; + qdf_assert_always(hal_soc); + + scn = soc->hif_handle; + hif_pm_runtime_mark_dp_rx_busy(scn); + intr_id = int_ctx->dp_intr_id; + num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); + +more_data: + /* reset local variables here to be re-used in the function */ + nbuf_head = NULL; + nbuf_tail = NULL; + deliver_list_head = NULL; + deliver_list_tail = NULL; + peer = NULL; + vdev = NULL; + num_rx_bufs_reaped = 0; + + qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); + qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); + qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); + qdf_mem_zero(head, sizeof(head)); + qdf_mem_zero(tail, sizeof(tail)); + + if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { + + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); + goto done; + } + + /* + * start reaping the buffers from reo ring and queue + * them in per vdev queue. + * Process the received pkts in a different per vdev loop. + */ + while (qdf_likely(quota && + (ring_desc = hal_srng_dst_peek(hal_soc, + hal_ring_hdl)))) { + + error = HAL_RX_ERROR_STATUS_GET(ring_desc); + ring_id = hal_srng_ring_id_get(hal_ring_hdl); + + if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error); + DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); + /* Don't know how to deal with this -- assert */ + qdf_assert(0); + } + + dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); + rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); + status = dp_rx_cookie_check_and_invalidate(ring_desc); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + DP_STATS_INC(soc, rx.err.stale_cookie, 1); + break; + } + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, + ring_desc, rx_desc); + if (QDF_IS_STATUS_ERROR(status)) { + if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { + qdf_assert_always(rx_desc->unmapped); + dp_ipa_handle_rx_buf_smmu_mapping(soc, + rx_desc->nbuf, + false); + qdf_nbuf_unmap_single(soc->osdev, + rx_desc->nbuf, + QDF_DMA_FROM_DEVICE); + rx_desc->unmapped = 1; + qdf_nbuf_free(rx_desc->nbuf); + dp_rx_add_to_free_desc_list( + &head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + } + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + /* + * this is a unlikely scenario where the host is reaping + * a descriptor which it already reaped just a while ago + * but is yet to replenish it back to HW. + * In this case host will dump the last 128 descriptors + * including the software descriptor rx_desc and assert. + */ + + if (qdf_unlikely(!rx_desc->in_use)) { + DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); + dp_info_rl("Reaping rx_desc not in use!"); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + /* ignore duplicate RX desc and continue to process */ + /* Pop out the descriptor */ + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); + if (QDF_IS_STATUS_ERROR(status)) { + DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); + rx_desc->in_err_state = 1; + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { + dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); + DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + } + + /* Get MPDU DESC info */ + hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); + + /* Get MSDU DESC info */ + hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); + + if (qdf_unlikely(msdu_desc_info.msdu_flags & + HAL_MSDU_F_MSDU_CONTINUATION)) { + /* previous msdu has end bit set, so current one is + * the new MPDU + */ + if (is_prev_msdu_last) { + /* Get number of entries available in HW ring */ + num_entries_avail = + hal_srng_dst_num_valid(hal_soc, + hal_ring_hdl, 1); + + /* For new MPDU check if we can read complete + * MPDU by comparing the number of buffers + * available and number of buffers needed to + * reap this MPDU + */ + if (((msdu_desc_info.msdu_len / + (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) + + 1)) > num_entries_avail) { + DP_STATS_INC( + soc, + rx.msdu_scatter_wait_break, + 1); + break; + } + is_prev_msdu_last = false; + } + + } + + /* + * move unmap after scattered msdu waiting break logic + * in case double skb unmap happened. + */ + dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, false); + qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, + QDF_DMA_FROM_DEVICE); + rx_desc->unmapped = 1; + + core_id = smp_processor_id(); + DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); + + if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) + qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); + + if (qdf_unlikely(mpdu_desc_info.mpdu_flags & + HAL_MPDU_F_RAW_AMPDU)) + qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); + + if (!is_prev_msdu_last && + msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + is_prev_msdu_last = true; + + /* Pop out the descriptor*/ + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + + rx_bufs_reaped[rx_desc->pool_id]++; + peer_mdata = mpdu_desc_info.peer_meta_data; + QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = + DP_PEER_METADATA_PEER_ID_GET(peer_mdata); + QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = + DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); + + /* + * save msdu flags first, last and continuation msdu in + * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and + * length to nbuf->cb. This ensures the info required for + * per pkt processing is always in the same cache line. + * This helps in improving throughput for smaller pkt + * sizes. + */ + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) + qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) + qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) + qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) + qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); + + qdf_nbuf_set_tid_val(rx_desc->nbuf, + HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); + qdf_nbuf_set_rx_reo_dest_ind( + rx_desc->nbuf, + HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); + + QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; + + QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; + + DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); + + /* + * if continuation bit is set then we have MSDU spread + * across multiple buffers, let us not decrement quota + * till we reap all buffers of that MSDU. + */ + if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) + quota -= 1; + + dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + + num_rx_bufs_reaped++; + /* + * only if complete msdu is received for scatter case, + * then allow break. + */ + if (is_prev_msdu_last && + dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) + break; + } +done: + dp_srng_access_end(int_ctx, soc, hal_ring_hdl); + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + /* + * continue with next mac_id if no pkts were reaped + * from that pool + */ + if (!rx_bufs_reaped[mac_id]) + continue; + + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; + + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_reaped[mac_id], + &head[mac_id], &tail[mac_id]); + } + + dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); + /* Peer can be NULL is case of LFR */ + if (qdf_likely(peer)) + vdev = NULL; + + /* + * BIG loop where each nbuf is dequeued from global queue, + * processed and queued back on a per vdev basis. These nbufs + * are sent to stack as and when we run out of nbufs + * or a new nbuf dequeued from global queue has a different + * vdev when compared to previous nbuf. + */ + nbuf = nbuf_head; + while (nbuf) { + next = nbuf->next; + if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { + nbuf = next; + DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); + continue; + } + + rx_tlv_hdr = qdf_nbuf_data(nbuf); + vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); + + if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) { + dp_rx_deliver_to_stack(soc, vdev, peer, + deliver_list_head, + deliver_list_tail); + deliver_list_head = NULL; + deliver_list_tail = NULL; + } + + /* Get TID from struct cb->tid_val, save to tid */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf)) { + tid = qdf_nbuf_get_tid_val(nbuf); + if (tid >= CDP_MAX_DATA_TIDS) { + DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); + + if (qdf_unlikely(!peer)) { + peer = dp_peer_find_by_id(soc, peer_id); + } else if (peer && peer->peer_ids[0] != peer_id) { + dp_peer_unref_del_find_by_id(peer); + peer = dp_peer_find_by_id(soc, peer_id); + } + + if (peer) { + QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; + qdf_dp_trace_set_track(nbuf, QDF_RX); + QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; + QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = + QDF_NBUF_RX_PKT_DATA_TRACK; + } + + rx_bufs_used++; + + if (qdf_likely(peer)) { + vdev = peer->vdev; + } else { + nbuf->next = NULL; + dp_rx_deliver_to_stack_no_peer(soc, nbuf); + nbuf = next; + continue; + } + + if (qdf_unlikely(!vdev)) { + qdf_nbuf_free(nbuf); + nbuf = next; + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + continue; + } + + rx_pdev = vdev->pdev; + DP_RX_TID_SAVE(nbuf, tid); + if (qdf_unlikely(rx_pdev->delay_stats_flag)) + qdf_nbuf_set_timestamp(nbuf); + + ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); + tid_stats = + &rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) { + if (qdf_unlikely(!hal_rx_attn_msdu_done_get( + rx_tlv_hdr))) { + dp_err_rl("MSDU DONE failure"); + DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); + hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_INFO); + tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; + qdf_assert(0); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get( + rx_tlv_hdr))) { + DP_STATS_INC(soc, rx.err.msdu_len_err, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); + /* + * First IF condition: + * 802.11 Fragmented pkts are reinjected to REO + * HW block as SG pkts and for these pkts we only + * need to pull the RX TLVS header length. + * Second IF condition: + * The below condition happens when an MSDU is spread + * across multiple buffers. This can happen in two cases + * 1. The nbuf size is smaller then the received msdu. + * ex: we have set the nbuf size to 2048 during + * nbuf_alloc. but we received an msdu which is + * 2304 bytes in size then this msdu is spread + * across 2 nbufs. + * + * 2. AMSDUs when RAW mode is enabled. + * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread + * across 1st nbuf and 2nd nbuf and last MSDU is + * spread across 2nd nbuf and 3rd nbuf. + * + * for these scenarios let us create a skb frag_list and + * append these buffers till the last MSDU of the AMSDU + * Third condition: + * This is the most likely case, we receive 802.3 pkts + * decapsulated by HW, here we need to set the pkt length. + */ + hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); + if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { + bool is_mcbc, is_sa_vld, is_da_vld; + + is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, + rx_tlv_hdr); + is_sa_vld = + hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, + rx_tlv_hdr); + is_da_vld = + hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, + rx_tlv_hdr); + + qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); + qdf_nbuf_set_da_valid(nbuf, is_da_vld); + qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); + + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); + } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + nbuf = dp_rx_sg_create(soc, nbuf); + next = nbuf->next; + + if (qdf_nbuf_is_raw_frame(nbuf)) { + DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); + DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); + } else { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.scatter_msdu, 1); + dp_info_rl("scatter msdu len %d, dropped", + msdu_len); + nbuf = next; + continue; + } + } else { + + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + pkt_len = msdu_len + + msdu_metadata.l3_hdr_pad + + RX_PKT_TLVS_LEN; + + qdf_nbuf_set_pktlen(nbuf, pkt_len); + dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad); + } + + /* + * process frame for mulitpass phrase processing + */ + if (qdf_unlikely(vdev->multipass_en)) { + if (dp_rx_multipass_process(peer, nbuf, tid) == false) { + DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Policy Check Drop pkt")); + tid_stats->fail_cnt[POLICY_CHECK_DROP]++; + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + /* Statistics */ + nbuf = next; + continue; + } + + if (qdf_unlikely(peer && (peer->nawds_enabled) && + (qdf_nbuf_is_da_mcbc(nbuf)) && + (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, + rx_tlv_hdr) == + false))) { + tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; + DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + /* + * Drop non-EAPOL frames from unauthorized peer. + */ + if (qdf_likely(peer) && qdf_unlikely(!peer->authorize)) { + bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || + qdf_nbuf_is_ipv4_wapi_pkt(nbuf); + + if (!is_eapol) { + DP_STATS_INC(soc, + rx.err.peer_unauth_rx_pkt_drop, + 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + /* + * Drop non-EAPOL frames from unauthorized peer. + */ + if (qdf_likely(peer) && qdf_unlikely(!peer->authorize)) { + bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || + qdf_nbuf_is_ipv4_wapi_pkt(nbuf); + + if (!is_eapol) { + DP_STATS_INC(soc, + rx.err.peer_unauth_rx_pkt_drop, + 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + if (soc->process_rx_status) + dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); + + /* Update the protocol tag in SKB based on CCE metadata */ + dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, + reo_ring_num, false, true); + + /* Update the flow tag in SKB based on FSE metadata */ + dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); + + dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, + ring_id, tid_stats); + + if (qdf_unlikely(vdev->mesh_vdev)) { + if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) + == QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO_MED, + FL("mesh pkt filtered")); + tid_stats->fail_cnt[MESH_FILTER_DROP]++; + DP_STATS_INC(vdev->pdev, dropped.mesh_filter, + 1); + + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); + } + + if (qdf_likely(vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet) && + qdf_likely(!vdev->mesh_vdev)) { + /* WDS Destination Address Learning */ + dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); + + /* Due to HW issue, sometimes we see that the sa_idx + * and da_idx are invalid with sa_valid and da_valid + * bits set + * + * in this case we also see that value of + * sa_sw_peer_id is set as 0 + * + * Drop the packet if sa_idx and da_idx OOB or + * sa_sw_peerid is 0 + */ + if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, + msdu_metadata)) { + qdf_nbuf_free(nbuf); + nbuf = next; + DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); + continue; + } + /* WDS Source Port Learning */ + if (qdf_likely(vdev->wds_enabled)) + dp_rx_wds_srcport_learn(soc, + rx_tlv_hdr, + peer, + nbuf, + msdu_metadata); + + /* Intrabss-fwd */ + if (dp_rx_check_ap_bridge(vdev)) + if (DP_RX_INTRABSS_FWD(soc, + peer, + rx_tlv_hdr, + nbuf, + msdu_metadata)) { + nbuf = next; + tid_stats->intrabss_cnt++; + continue; /* Get next desc */ + } + } + + dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); + + DP_RX_LIST_APPEND(deliver_list_head, + deliver_list_tail, + nbuf); + DP_STATS_INC_PKT(peer, rx.to_stack, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + + tid_stats->delivered_to_stack++; + nbuf = next; + } + + if (qdf_likely(deliver_list_head)) { + if (qdf_likely(peer)) + dp_rx_deliver_to_stack(soc, vdev, peer, + deliver_list_head, + deliver_list_tail); + else { + nbuf = deliver_list_head; + while (nbuf) { + next = nbuf->next; + nbuf->next = NULL; + dp_rx_deliver_to_stack_no_peer(soc, nbuf); + nbuf = next; + } + } + } + + if (qdf_likely(peer)) + dp_peer_unref_del_find_by_id(peer); + + if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { + if (quota) { + num_pending = + dp_rx_srng_get_num_pending(hal_soc, + hal_ring_hdl, + num_entries, + &near_full); + if (num_pending) { + DP_STATS_INC(soc, rx.hp_oos2, 1); + + if (!hif_exec_should_yield(scn, intr_id)) + goto more_data; + + if (qdf_unlikely(near_full)) { + DP_STATS_INC(soc, rx.near_full, 1); + goto more_data; + } + } + } + + if (vdev && vdev->osif_fisa_flush) + vdev->osif_fisa_flush(soc, reo_ring_num); + + if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { + vdev->osif_gro_flush(vdev->osif_vdev, + reo_ring_num); + } + } + + /* Update histogram statistics by looping through pdev's */ + DP_RX_HIST_STATS_PER_PDEV(); + + return rx_bufs_used; /* Assume no scale factor for now */ +} + +QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) +{ + QDF_STATUS ret; + + if (vdev->osif_rx_flush) { + ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id); + if (!QDF_IS_STATUS_SUCCESS(ret)) { + dp_err("Failed to flush rx pkts for vdev %d\n", + vdev->vdev_id); + return ret; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_pdev_detach() - detach dp rx + * @pdev: core txrx pdev context + * + * This function will detach DP RX into main device context + * will free DP Rx resources. + * + * Return: void + */ +void +dp_rx_pdev_detach(struct dp_pdev *pdev) +{ + uint8_t mac_for_pdev = pdev->lmac_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; + + if (rx_desc_pool->pool_size != 0) { + if (!dp_is_soc_reinit(soc)) + dp_rx_desc_nbuf_and_pool_free(soc, mac_for_pdev, + rx_desc_pool); + else + dp_rx_desc_nbuf_free(soc, rx_desc_pool); + } + + return; +} + +static QDF_STATUS +dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf, + struct dp_pdev *dp_pdev, + struct rx_desc_pool *rx_desc_pool) +{ + qdf_dma_addr_t paddr; + QDF_STATUS ret = QDF_STATUS_E_FAILURE; + + *nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size, + RX_BUFFER_RESERVATION, + rx_desc_pool->buf_alignment, FALSE); + if (!(*nbuf)) { + dp_err("nbuf alloc failed"); + DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); + return ret; + } + + ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf, + QDF_DMA_FROM_DEVICE); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { + qdf_nbuf_free(*nbuf); + dp_err("nbuf map failed"); + DP_STATS_INC(dp_pdev, replenish.map_err, 1); + return ret; + } + + paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0); + + ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool); + if (ret == QDF_STATUS_E_FAILURE) { + qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(*nbuf); + dp_err("nbuf check x86 failed"); + DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); + return ret; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers) +{ + struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); + hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng; + union dp_rx_desc_list_elem_t *next; + void *rxdma_ring_entry; + qdf_dma_addr_t paddr; + qdf_nbuf_t *rx_nbuf_arr; + uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0; + uint32_t buffer_index, nbuf_ptrs_per_page; + qdf_nbuf_t nbuf; + QDF_STATUS ret; + int page_idx, total_pages; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + + if (qdf_unlikely(!rxdma_srng)) { + DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); + return QDF_STATUS_E_FAILURE; + } + + dp_debug("requested %u RX buffers for driver attach", num_req_buffers); + + nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, + num_req_buffers, &desc_list, &tail); + if (!nr_descs) { + dp_err("no free rx_descs in freelist"); + DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers); + return QDF_STATUS_E_NOMEM; + } + + dp_debug("got %u RX descs for driver attach", nr_descs); + + /* + * Try to allocate pointers to the nbuf one page at a time. + * Take pointers that can fit in one page of memory and + * iterate through the total descriptors that need to be + * allocated in order of pages. Reuse the pointers that + * have been allocated to fit in one page across each + * iteration to index into the nbuf. + */ + total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE; + + /* + * Add an extra page to store the remainder if any + */ + if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE) + total_pages++; + rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE); + if (!rx_nbuf_arr) { + dp_err("failed to allocate nbuf array"); + DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); + QDF_BUG(0); + return QDF_STATUS_E_NOMEM; + } + nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr); + + for (page_idx = 0; page_idx < total_pages; page_idx++) { + qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE); + + for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) { + /* + * The last page of buffer pointers may not be required + * completely based on the number of descriptors. Below + * check will ensure we are allocating only the + * required number of descriptors. + */ + if (nr_nbuf_total >= nr_descs) + break; + ret = dp_pdev_nbuf_alloc_and_map(dp_soc, + &rx_nbuf_arr[nr_nbuf], + dp_pdev, rx_desc_pool); + if (QDF_IS_STATUS_ERROR(ret)) + break; + + nr_nbuf_total++; + } + + hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); + + for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) { + rxdma_ring_entry = + hal_srng_src_get_next(dp_soc->hal_soc, + rxdma_srng); + qdf_assert_always(rxdma_ring_entry); + + next = desc_list->next; + nbuf = rx_nbuf_arr[buffer_index]; + paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); + + dp_rx_desc_prep(&desc_list->rx_desc, nbuf); + desc_list->rx_desc.in_use = 1; + dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc); + dp_rx_desc_update_dbg_info(&desc_list->rx_desc, + __func__, + RX_DESC_REPLENISHED); + + hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, + desc_list->rx_desc.cookie, + rx_desc_pool->owner); + + dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true); + + desc_list = next; + } + + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + } + + dp_info("filled %u RX buffers for driver attach", nr_nbuf_total); + qdf_mem_free(rx_nbuf_arr); + + if (!nr_nbuf_total) { + dp_err("No nbuf's allocated"); + QDF_BUG(0); + return QDF_STATUS_E_RESOURCES; + } + + /* No need to count the number of bytes received during replenish. + * Therefore set replenish.pkts.bytes as 0. + */ + DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_attach() - attach DP RX + * @pdev: core txrx pdev context + * + * This function will attach a DP RX instance into the main + * device (SOC) context. Will allocate dp rx resource and + * initialize resources. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_attach(struct dp_pdev *pdev) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + uint32_t rxdma_entries; + uint32_t rx_sw_desc_weight; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + QDF_STATUS ret_val; + int mac_for_pdev; + + if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "nss-wifi<4> skip Rx refil %d", pdev_id); + return QDF_STATUS_SUCCESS; + } + + pdev = soc->pdev_list[pdev_id]; + mac_for_pdev = pdev->lmac_id; + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; + + rxdma_entries = dp_rxdma_srng->num_entries; + + soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; + + rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; + rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx); + + rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE; + dp_rx_desc_pool_alloc(soc, mac_for_pdev, + rx_sw_desc_weight * rxdma_entries, + rx_desc_pool); + + rx_desc_pool->owner = DP_WBM2SW_RBM; + rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; + rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; + + /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ + + ret_val = dp_rx_fst_attach(soc, pdev); + if ((ret_val != QDF_STATUS_SUCCESS) && + (ret_val != QDF_STATUS_E_NOSUPPORT)) { + QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR, + "RX Flow Search Table attach failed: pdev %d err %d", + pdev_id, ret_val); + return ret_val; + } + + return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng, + rx_desc_pool, rxdma_entries - 1); +} + +/* + * dp_rx_nbuf_prepare() - prepare RX nbuf + * @soc: core txrx main context + * @pdev: core txrx pdev context + * + * This function alloc & map nbuf for RX dma usage, retry it if failed + * until retry times reaches max threshold or succeeded. + * + * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. + */ +qdf_nbuf_t +dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) +{ + uint8_t *buf; + int32_t nbuf_retry_count; + QDF_STATUS ret; + qdf_nbuf_t nbuf = NULL; + + for (nbuf_retry_count = 0; nbuf_retry_count < + QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; + nbuf_retry_count++) { + /* Allocate a new skb */ + nbuf = qdf_nbuf_alloc(soc->osdev, + RX_DATA_BUFFER_SIZE, + RX_BUFFER_RESERVATION, + RX_DATA_BUFFER_ALIGNMENT, + FALSE); + + if (!nbuf) { + DP_STATS_INC(pdev, + replenish.nbuf_alloc_fail, 1); + continue; + } + + buf = qdf_nbuf_data(nbuf); + + memset(buf, 0, RX_DATA_BUFFER_SIZE); + + ret = qdf_nbuf_map_single(soc->osdev, nbuf, + QDF_DMA_FROM_DEVICE); + + /* nbuf map failed */ + if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(pdev, replenish.map_err, 1); + continue; + } + /* qdf_nbuf alloc and map succeeded */ + break; + } + + /* qdf_nbuf still alloc or map failed */ + if (qdf_unlikely(nbuf_retry_count >= + QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) + return NULL; + + return nbuf; +} + +#ifdef DP_RX_SPECIAL_FRAME_NEED +bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer, + qdf_nbuf_t nbuf, uint32_t frame_mask, + uint8_t *rx_tlv_hdr) +{ + uint32_t l2_hdr_offset = 0; + uint16_t msdu_len = 0; + uint32_t skip_len; + + l2_hdr_offset = + hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); + + if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { + skip_len = l2_hdr_offset; + } else { + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN; + qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); + } + + QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; + dp_rx_set_hdr_pad(nbuf, l2_hdr_offset); + qdf_nbuf_pull_head(nbuf, skip_len); + + if (dp_rx_is_special_frame(nbuf, frame_mask)) { + qdf_nbuf_set_exc_frame(nbuf, 1); + dp_rx_deliver_to_stack(soc, peer->vdev, peer, + nbuf, NULL); + return true; + } + + return false; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..9e30da08facefb99d9fef1242fa2aee3c9325c61 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h @@ -0,0 +1,1349 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RX_H +#define _DP_RX_H + +#include "hal_rx.h" +#include "dp_tx.h" +#include "dp_peer.h" +#include "dp_internal.h" + +#ifdef RXDMA_OPTIMIZATION +#ifndef RX_DATA_BUFFER_ALIGNMENT +#define RX_DATA_BUFFER_ALIGNMENT 128 +#endif +#ifndef RX_MONITOR_BUFFER_ALIGNMENT +#define RX_MONITOR_BUFFER_ALIGNMENT 128 +#endif +#else /* RXDMA_OPTIMIZATION */ +#define RX_DATA_BUFFER_ALIGNMENT 4 +#define RX_MONITOR_BUFFER_ALIGNMENT 4 +#endif /* RXDMA_OPTIMIZATION */ + +#ifdef QCA_HOST2FW_RXBUF_RING +#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM +/* RBM value used for re-injecting defragmented packets into REO */ +#define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM +#else +#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM +#define DP_DEFRAG_RBM DP_WBM2SW_RBM +#endif /* QCA_HOST2FW_RXBUF_RING */ + +#define RX_BUFFER_RESERVATION 0 + +#define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff +#define DP_PEER_METADATA_PEER_ID_SHIFT 0 +#define DP_PEER_METADATA_VDEV_ID_MASK 0x003f0000 +#define DP_PEER_METADATA_VDEV_ID_SHIFT 16 + +#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \ + (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \ + >> DP_PEER_METADATA_PEER_ID_SHIFT) + +#define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata) \ + (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \ + >> DP_PEER_METADATA_VDEV_ID_SHIFT) + +#define DP_RX_DESC_MAGIC 0xdec0de + +/** + * enum dp_rx_desc_state + * + * @RX_DESC_REPLENISH: rx desc replenished + * @RX_DESC_FREELIST: rx desc in freelist + */ +enum dp_rx_desc_state { + RX_DESC_REPLENISHED, + RX_DESC_IN_FREELIST, +}; + +/** + * struct dp_rx_desc_dbg_info + * + * @freelist_caller: name of the function that put the + * the rx desc in freelist + * @freelist_ts: timestamp when the rx desc is put in + * a freelist + * @replenish_caller: name of the function that last + * replenished the rx desc + * @replenish_ts: last replenish timestamp + */ +struct dp_rx_desc_dbg_info { + char freelist_caller[QDF_MEM_FUNC_NAME_SIZE]; + uint64_t freelist_ts; + char replenish_caller[QDF_MEM_FUNC_NAME_SIZE]; + uint64_t replenish_ts; +}; + +/** + * struct dp_rx_desc + * + * @nbuf : VA of the "skb" posted + * @rx_buf_start : VA of the original Rx buffer, before + * movement of any skb->data pointer + * @cookie : index into the sw array which holds + * the sw Rx descriptors + * Cookie space is 21 bits: + * lower 18 bits -- index + * upper 3 bits -- pool_id + * @pool_id : pool Id for which this allocated. + * Can only be used if there is no flow + * steering + * @in_use rx_desc is in use + * @unmapped used to mark rx_desc an unmapped if the corresponding + * nbuf is already unmapped + * @in_err_state : Nbuf sanity failed for this descriptor. + */ +struct dp_rx_desc { + qdf_nbuf_t nbuf; + uint8_t *rx_buf_start; + uint32_t cookie; + uint8_t pool_id; +#ifdef RX_DESC_DEBUG_CHECK + uint32_t magic; + struct dp_rx_desc_dbg_info *dbg_info; +#endif + uint8_t in_use:1, + unmapped:1, + in_err_state:1; +}; + +/* RX Descriptor Multi Page memory alloc related */ +#define DP_RX_DESC_OFFSET_NUM_BITS 8 +#define DP_RX_DESC_PAGE_ID_NUM_BITS 8 +#define DP_RX_DESC_POOL_ID_NUM_BITS 4 + +#define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS +#define DP_RX_DESC_POOL_ID_SHIFT \ + (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS) +#define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \ + (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT) +#define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \ + (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \ + DP_RX_DESC_PAGE_ID_SHIFT) +#define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \ + ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1) +#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \ + (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \ + DP_RX_DESC_POOL_ID_SHIFT) +#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \ + (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \ + DP_RX_DESC_PAGE_ID_SHIFT) +#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \ + ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK) + +#define RX_DESC_COOKIE_INDEX_SHIFT 0 +#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ +#define RX_DESC_COOKIE_POOL_ID_SHIFT 18 +#define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 + +#define DP_RX_DESC_COOKIE_MAX \ + (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK) + +#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ + (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ + RX_DESC_COOKIE_POOL_ID_SHIFT) + +#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ + (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ + RX_DESC_COOKIE_INDEX_SHIFT) + +#define FRAME_MASK_IPV4_ARP 1 +#define FRAME_MASK_IPV4_DHCP 2 +#define FRAME_MASK_IPV4_EAPOL 4 +#define FRAME_MASK_IPV6_DHCP 8 + +#define dp_rx_add_to_free_desc_list(head, tail, new) \ + __dp_rx_add_to_free_desc_list(head, tail, new, __func__) + +#define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ + num_buffers, desc_list, tail) \ + __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \ + num_buffers, desc_list, tail, __func__) + +#ifdef DP_RX_SPECIAL_FRAME_NEED +/** + * dp_rx_is_special_frame() - check is RX frame special needed + * + * @nbuf: RX skb pointer + * @frame_mask: the mask for speical frame needed + * + * Check is RX frame wanted matched with mask + * + * Return: true - special frame needed, false - no + */ +static inline +bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) +{ + if (((frame_mask & FRAME_MASK_IPV4_ARP) && + qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || + ((frame_mask & FRAME_MASK_IPV4_DHCP) && + qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || + ((frame_mask & FRAME_MASK_IPV4_EAPOL) && + qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || + ((frame_mask & FRAME_MASK_IPV6_DHCP) && + qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))) + return true; + + return false; +} + +/** + * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack + * if matches mask + * + * @soc: Datapath soc handler + * @peer: pointer to DP peer + * @nbuf: pointer to the skb of RX frame + * @frame_mask: the mask for speical frame needed + * @rx_tlv_hdr: start of rx tlv header + * + * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and + * single nbuf is expected. + * + * return: true - nbuf has been delivered to stack, false - not. + */ +bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer, + qdf_nbuf_t nbuf, uint32_t frame_mask, + uint8_t *rx_tlv_hdr); +#else +static inline +bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) +{ + return false; +} + +static inline +bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer, + qdf_nbuf_t nbuf, uint32_t frame_mask, + uint8_t *rx_tlv_hdr) +{ + return false; +} +#endif + +/* DOC: Offset to obtain LLC hdr + * + * In the case of Wifi parse error + * to reach LLC header from beginning + * of VLAN tag we need to skip 8 bytes. + * Vlan_tag(4)+length(2)+length added + * by HW(2) = 8 bytes. + */ +#define DP_SKIP_VLAN 8 + +/** + * struct dp_rx_cached_buf - rx cached buffer + * @list: linked list node + * @buf: skb buffer + */ +struct dp_rx_cached_buf { + qdf_list_node_t node; + qdf_nbuf_t buf; +}; + +/* + *dp_rx_xor_block() - xor block of data + *@b: destination data block + *@a: source data block + *@len: length of the data to process + * + *Returns: None + */ +static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) +{ + qdf_size_t i; + + for (i = 0; i < len; i++) + b[i] ^= a[i]; +} + +/* + *dp_rx_rotl() - rotate the bits left + *@val: unsigned integer input value + *@bits: number of bits + * + *Returns: Integer with left rotated by number of 'bits' + */ +static inline uint32_t dp_rx_rotl(uint32_t val, int bits) +{ + return (val << bits) | (val >> (32 - bits)); +} + +/* + *dp_rx_rotr() - rotate the bits right + *@val: unsigned integer input value + *@bits: number of bits + * + *Returns: Integer with right rotated by number of 'bits' + */ +static inline uint32_t dp_rx_rotr(uint32_t val, int bits) +{ + return (val >> bits) | (val << (32 - bits)); +} + +/* + * dp_set_rx_queue() - set queue_mapping in skb + * @nbuf: skb + * @queue_id: rx queue_id + * + * Return: void + */ +#ifdef QCA_OL_RX_MULTIQ_SUPPORT +static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) +{ + qdf_nbuf_record_rx_queue(nbuf, queue_id); + return; +} +#else +static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) +{ +} +#endif + +/* + *dp_rx_xswap() - swap the bits left + *@val: unsigned integer input value + * + *Returns: Integer with bits swapped + */ +static inline uint32_t dp_rx_xswap(uint32_t val) +{ + return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); +} + +/* + *dp_rx_get_le32_split() - get little endian 32 bits split + *@b0: byte 0 + *@b1: byte 1 + *@b2: byte 2 + *@b3: byte 3 + * + *Returns: Integer with split little endian 32 bits + */ +static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, + uint8_t b3) +{ + return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); +} + +/* + *dp_rx_get_le32() - get little endian 32 bits + *@b0: byte 0 + *@b1: byte 1 + *@b2: byte 2 + *@b3: byte 3 + * + *Returns: Integer with little endian 32 bits + */ +static inline uint32_t dp_rx_get_le32(const uint8_t *p) +{ + return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); +} + +/* + * dp_rx_put_le32() - put little endian 32 bits + * @p: destination char array + * @v: source 32-bit integer + * + * Returns: None + */ +static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) +{ + p[0] = (v) & 0xff; + p[1] = (v >> 8) & 0xff; + p[2] = (v >> 16) & 0xff; + p[3] = (v >> 24) & 0xff; +} + +/* Extract michal mic block of data */ +#define dp_rx_michael_block(l, r) \ + do { \ + r ^= dp_rx_rotl(l, 17); \ + l += r; \ + r ^= dp_rx_xswap(l); \ + l += r; \ + r ^= dp_rx_rotl(l, 3); \ + l += r; \ + r ^= dp_rx_rotr(l, 2); \ + l += r; \ + } while (0) + +/** + * struct dp_rx_desc_list_elem_t + * + * @next : Next pointer to form free list + * @rx_desc : DP Rx descriptor + */ +union dp_rx_desc_list_elem_t { + union dp_rx_desc_list_elem_t *next; + struct dp_rx_desc rx_desc; +}; + +#ifdef RX_DESC_MULTI_PAGE_ALLOC +/** + * dp_rx_desc_find() - find dp rx descriptor from page ID and offset + * @page_id: Page ID + * @offset: Offset of the descriptor element + * + * Return: RX descriptor element + */ +union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, + struct rx_desc_pool *rx_pool); + +static inline +struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc, + struct rx_desc_pool *pool, + uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie); + uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie); + uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie); + struct rx_desc_pool *rx_desc_pool; + union dp_rx_desc_list_elem_t *rx_desc_elem; + + if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) + return NULL; + + rx_desc_pool = &pool[pool_id]; + rx_desc_elem = (union dp_rx_desc_list_elem_t *) + (rx_desc_pool->desc_pages.cacheable_pages[page_id] + + rx_desc_pool->elem_size * offset); + + return &rx_desc_elem->rx_desc; +} + +/** + * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of + * the Rx descriptor on Rx DMA source ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: Pointer to the Rx descriptor + */ +static inline +struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, + uint32_t cookie) +{ + return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie); +} + +/** + * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of + * the Rx descriptor on monitor ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: Pointer to the Rx descriptor + */ +static inline +struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, + uint32_t cookie) +{ + return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie); +} + +/** + * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of + * the Rx descriptor on monitor status ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: Pointer to the Rx descriptor + */ +static inline +struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, + uint32_t cookie) +{ + return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie); +} +#else +/** + * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of + * the Rx descriptor on Rx DMA source ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); + uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); + struct rx_desc_pool *rx_desc_pool; + + if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) + return NULL; + + rx_desc_pool = &soc->rx_desc_buf[pool_id]; + + if (qdf_unlikely(index >= rx_desc_pool->pool_size)) + return NULL; + + return &(soc->rx_desc_buf[pool_id].array[index].rx_desc); +} + +/** + * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of + * the Rx descriptor on monitor ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); + uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); + /* TODO */ + /* Add sanity for pool_id & index */ + return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); +} + +/** + * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of + * the Rx descriptor on monitor status ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); + uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); + /* TODO */ + /* Add sanity for pool_id & index */ + return &(soc->rx_desc_status[pool_id].array[index].rx_desc); +} +#endif /* RX_DESC_MULTI_PAGE_ALLOC */ + +#ifdef DP_RX_DESC_COOKIE_INVALIDATE +static inline QDF_STATUS +dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) +{ + if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc))) + return QDF_STATUS_E_FAILURE; + + HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS +dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, + union dp_rx_desc_list_elem_t **local_desc_list, + union dp_rx_desc_list_elem_t **tail, + uint16_t pool_id, + struct rx_desc_pool *rx_desc_pool); + +uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool, + uint16_t num_descs, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail); + + +QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); + +void dp_rx_pdev_detach(struct dp_pdev *pdev); + +void dp_print_napi_stats(struct dp_soc *soc); + +/** + * dp_rx_vdev_detach() - detach vdev from dp rx + * @vdev: virtual device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev); + +uint32_t +dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, + uint8_t reo_ring_num, + uint32_t quota); + +/** + * dp_rx_err_process() - Processes error frames routed to REO error ring + * @int_ctx: pointer to DP interrupt context + * @soc: core txrx main context + * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements error processing and top level demultiplexer + * for all the frames routed to REO error ring. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, uint32_t quota); + +/** + * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring + * @int_ctx: pointer to DP interrupt context + * @soc: core txrx main context + * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements error processing and top level demultiplexer + * for all the frames routed to WBM2HOST sw release ring. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t +dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, uint32_t quota); + +/** + * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across + * multiple nbufs. + * @soc: core txrx main context + * @nbuf: pointer to the first msdu of an amsdu. + * + * This function implements the creation of RX frag_list for cases + * where an MSDU is spread across multiple nbufs. + * + * Return: returns the head nbuf which contains complete frag_list. + */ +qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); + +/* + * dp_rx_desc_pool_alloc() - create a pool of software rx_descs + * at the time of dp rx initialization + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @pool_size: number of Rx descriptor in the pool + * @rx_desc_pool: rx descriptor pool pointer + * + * Return: QDF status + */ +QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, + uint32_t pool_size, struct rx_desc_pool *pool); + +/* + * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during + * de-initialization of wifi module. + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + * + * Return: None + */ +void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool); + +/* + * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during + * de-initialization of wifi module. + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + * + * Return: None + */ +void dp_rx_desc_nbuf_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool); + +/* + * dp_rx_desc_pool_free() - free the sw rx desc array called during + * de-initialization of wifi module. + * + * @soc: core txrx main context + * @rx_desc_pool: rx descriptor pool pointer + * + * Return: None + */ +void dp_rx_desc_pool_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool); + +void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, + struct dp_peer *peer); + +#ifdef RX_DESC_DEBUG_CHECK +/** + * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc + * @rx_desc: rx descriptor + * @ring_paddr: paddr obatined from the ring + * + * Returns: QDF_STATUS + */ +static inline +bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, + uint64_t ring_paddr) +{ + return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); +} + +/* + * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug + * structure + * @rx_desc: rx descriptor pointer + * + * Return: None + */ +static inline +void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) +{ + rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); +} + +/* + * dp_rx_desc_free_dbg_info() - Free rx descriptor debug + * structure memory + * @rx_desc: rx descriptor pointer + * + * Return: None + */ +static inline +void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) +{ + qdf_mem_free(rx_desc->dbg_info); +} + +/* + * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info + * structure memory + * @rx_desc: rx descriptor pointer + * + * Return: None + */ +static +void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, + const char *func_name, uint8_t flag) +{ + struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info; + + if (!info) + return; + + if (flag == RX_DESC_REPLENISHED) { + qdf_str_lcopy(info->replenish_caller, func_name, + QDF_MEM_FUNC_NAME_SIZE); + info->replenish_ts = qdf_get_log_timestamp(); + } else { + qdf_str_lcopy(info->freelist_caller, func_name, + QDF_MEM_FUNC_NAME_SIZE); + info->freelist_ts = qdf_get_log_timestamp(); + } +} +#else + +static inline +bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, + uint64_t ring_paddr) +{ + return true; +} + +static inline +void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) +{ +} + +static inline +void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) +{ +} + +static inline +void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, + const char *func_name, uint8_t flag) +{ +} +#endif /* RX_DESC_DEBUG_CHECK */ + +/** + * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list + * + * @head: pointer to the head of local free list + * @tail: pointer to the tail of local free list + * @new: new descriptor that is added to the free list + * @func_name: caller func name + * + * Return: void: + */ +static inline +void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + struct dp_rx_desc *new, const char *func_name) +{ + qdf_assert(head && new); + + new->nbuf = NULL; + new->in_use = 0; + + ((union dp_rx_desc_list_elem_t *)new)->next = *head; + *head = (union dp_rx_desc_list_elem_t *)new; + /* reset tail if head->next is NULL */ + if (!*tail || !(*head)->next) + *tail = *head; + + dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST); +} + +uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t mac_id); +void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, + qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); +void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer); +void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, + uint16_t peer_id, uint8_t tid); + + +#define DP_RX_LIST_APPEND(head, tail, elem) \ + do { \ + if (!(head)) { \ + (head) = (elem); \ + QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\ + } else { \ + qdf_nbuf_set_next((tail), (elem)); \ + QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \ + } \ + (tail) = (elem); \ + qdf_nbuf_set_next((tail), NULL); \ + } while (0) + +/*for qcn9000 emulation the pcie is complete phy and no address restrictions*/ +#if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000) +static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, + qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool) +{ + return QDF_STATUS_SUCCESS; +} +#else +#define MAX_RETRY 100 +static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, + qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool) +{ + uint32_t nbuf_retry = 0; + int32_t ret; + const uint32_t x86_phy_addr = 0x50000000; + /* + * in M2M emulation platforms (x86) the memory below 0x50000000 + * is reserved for target use, so any memory allocated in this + * region should not be used by host + */ + do { + if (qdf_likely(*paddr > x86_phy_addr)) + return QDF_STATUS_SUCCESS; + else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "phy addr %pK exceeded 0x50000000 trying again", + paddr); + + nbuf_retry++; + if ((*rx_netbuf)) { + qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, + QDF_DMA_FROM_DEVICE); + /* Not freeing buffer intentionally. + * Observed that same buffer is getting + * re-allocated resulting in longer load time + * WMI init timeout. + * This buffer is anyway not useful so skip it. + **/ + } + + *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, + rx_desc_pool->buf_size, + RX_BUFFER_RESERVATION, + rx_desc_pool->buf_alignment, + FALSE); + + if (qdf_unlikely(!(*rx_netbuf))) + return QDF_STATUS_E_FAILURE; + + ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf, + QDF_DMA_FROM_DEVICE); + + if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { + qdf_nbuf_free(*rx_netbuf); + *rx_netbuf = NULL; + continue; + } + + *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); + } + } while (nbuf_retry < MAX_RETRY); + + if ((*rx_netbuf)) { + qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(*rx_netbuf); + } + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of + * the MSDU Link Descriptor + * @soc: core txrx main context + * @buf_info: buf_info includes cookie that is used to lookup + * virtual address of link descriptor after deriving the page id + * and the offset or index of the desc on the associatde page. + * + * This is the VA of the link descriptor, that HAL layer later uses to + * retrieve the list of MSDU's for a given MPDU. + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, + struct hal_buf_info *buf_info) +{ + void *link_desc_va; + struct qdf_mem_multi_page_t *pages; + uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie); + + pages = &soc->link_desc_pages; + if (!pages) + return NULL; + if (qdf_unlikely(page_id >= pages->num_pages)) + return NULL; + link_desc_va = pages->dma_pages[page_id].page_v_addr_start + + (buf_info->paddr - pages->dma_pages[page_id].page_p_addr); + return link_desc_va; +} + +/** + * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of + * the MSDU Link Descriptor + * @pdev: core txrx pdev context + * @buf_info: buf_info includes cookie that used to lookup virtual address of + * link descriptor. Normally this is just an index into a per pdev array. + * + * This is the VA of the link descriptor in monitor mode destination ring, + * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU. + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, + struct hal_buf_info *buf_info, + int mac_id) +{ + void *link_desc_va; + + /* TODO */ + /* Add sanity for cookie */ + + link_desc_va = + pdev->soc->mon_link_desc_banks[mac_id][buf_info->sw_cookie] + .base_vaddr + + (buf_info->paddr - + pdev->soc->mon_link_desc_banks[mac_id][buf_info->sw_cookie] + .base_paddr); + + return link_desc_va; +} + +/** + * dp_rx_defrag_concat() - Concatenate the fragments + * + * @dst: destination pointer to the buffer + * @src: source pointer from where the fragment payload is to be copied + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) +{ + /* + * Inside qdf_nbuf_cat, if it is necessary to reallocate dst + * to provide space for src, the headroom portion is copied from + * the original dst buffer to the larger new dst buffer. + * (This is needed, because the headroom of the dst buffer + * contains the rx desc.) + */ + if (!qdf_nbuf_cat(dst, src)) { + /* + * qdf_nbuf_cat does not free the src memory. + * Free src nbuf before returning + * For failure case the caller takes of freeing the nbuf + */ + qdf_nbuf_free(src); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_DEFRAG_ERROR; +} + +#ifndef FEATURE_WDS +static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void +dp_rx_wds_srcport_learn(struct dp_soc *soc, + uint8_t *rx_tlv_hdr, + struct dp_peer *ta_peer, + qdf_nbuf_t nbuf, + struct hal_rx_msdu_metadata msdu_metadata) +{ +} +#endif + +/* + * dp_rx_desc_dump() - dump the sw rx descriptor + * + * @rx_desc: sw rx descriptor + */ +static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) +{ + dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d", + rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, + rx_desc->in_use, rx_desc->unmapped); +} + +/* + * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. + * In qwrap mode, packets originated from + * any vdev should not loopback and + * should be dropped. + * @vdev: vdev on which rx packet is received + * @nbuf: rx pkt + * + */ +#if ATH_SUPPORT_WRAP +static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, + qdf_nbuf_t nbuf) +{ + struct dp_vdev *psta_vdev; + struct dp_pdev *pdev = vdev->pdev; + uint8_t *data = qdf_nbuf_data(nbuf); + + if (qdf_unlikely(vdev->proxysta_vdev)) { + /* In qwrap isolation mode, allow loopback packets as all + * packets go to RootAP and Loopback on the mpsta. + */ + if (vdev->isolation_vdev) + return false; + TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { + if (qdf_unlikely(psta_vdev->proxysta_vdev && + !qdf_mem_cmp(psta_vdev->mac_addr.raw, + &data[QDF_MAC_ADDR_SIZE], + QDF_MAC_ADDR_SIZE))) { + /* Drop packet if source address is equal to + * any of the vdev addresses. + */ + return true; + } + } + } + return false; +} +#else +static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, + qdf_nbuf_t nbuf) +{ + return false; +} +#endif + +#if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\ + defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\ + defined(WLAN_SUPPORT_RX_FLOW_TAG) +#include "dp_rx_tag.h" +#endif + +#ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +/** + * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV + * and set the corresponding tag in QDF packet + * @soc: core txrx main context + * @vdev: vdev on which the packet is received + * @nbuf: QDF pkt buffer on which the protocol tag should be set + * @rx_tlv_hdr: rBbase address where the RX TLVs starts + * @ring_index: REO ring number, not used for error & monitor ring + * @is_reo_exception: flag to indicate if rx from REO ring or exception ring + * @is_update_stats: flag to indicate whether to update stats or not + * Return: void + */ +static inline void +dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, + qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, + uint16_t ring_index, + bool is_reo_exception, bool is_update_stats) +{ +} +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + +#ifndef WLAN_SUPPORT_RX_FLOW_TAG +/** + * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV + * and set the corresponding tag in QDF packet + * @soc: core txrx main context + * @vdev: vdev on which the packet is received + * @nbuf: QDF pkt buffer on which the protocol tag should be set + * @rx_tlv_hdr: base address where the RX TLVs starts + * @is_update_stats: flag to indicate whether to update stats or not + * + * Return: void + */ +static inline void +dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, + qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) +{ +} +#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ + +#if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\ + !defined(WLAN_SUPPORT_RX_FLOW_TAG) +/** + * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor + * mode and then tags appropriate packets + * @soc: core txrx main context + * @vdev: pdev on which packet is received + * @msdu: QDF packet buffer on which the protocol tag should be set + * @rx_desc: base address where the RX TLVs start + * Return: void + */ +static inline +void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc, + struct dp_pdev *dp_pdev, + qdf_nbuf_t msdu, void *rx_desc) +{ +} +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */ + +/* + * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs + * called during dp rx initialization + * and at the end of dp_rx_process. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp rxdma circular ring + * @rx_desc_pool: Pointer to free Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * @desc_list: list of descs if called from dp_rx_process + * or NULL during dp rx initialization or out of buffer + * interrupt. + * @tail: tail of descs list + * @func_name: name of the caller function + * Return: return success or failure + */ +QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail, + const char *func_name); + +/* + * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs + * called during dp rx initialization + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp rxdma circular ring + * @rx_desc_pool: Pointer to free Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * + * Return: return success or failure + */ +QDF_STATUS +dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers); + +/** + * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @soc: core DP main context + * @buf_addr_info: opaque pointer to the REO error ring descriptor + * @buf_addr_info: void pointer to the buffer_addr_info + * @bm_action: put to idle_list or release to msdu_list + * + * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS + */ +QDF_STATUS +dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, + uint8_t bm_action); + +/** + * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to + * (WBM) by address + * + * @soc: core DP main context + * @link_desc_addr: link descriptor addr + * + * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS + */ +QDF_STATUS +dp_rx_link_desc_return_by_addr(struct dp_soc *soc, + hal_buff_addrinfo_t link_desc_addr, + uint8_t bm_action); + +/** + * dp_rxdma_err_process() - RxDMA error processing functionality + * @soc: core txrx main contex + * @mac_id: mac id which is one of 3 mac_ids + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * Return: num of buffers processed + */ +uint32_t +dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + uint32_t mac_id, uint32_t quota); + +void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer); +QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr); + +int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, + struct dp_peer *peer); + +qdf_nbuf_t +dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev); + +/* + * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info + * + * @soc: core txrx main context + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @ring_desc: opaque pointer to the RX ring descriptor + * @rx_desc: host rs descriptor + * + * Return: void + */ +void dp_rx_dump_info_and_assert(struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc); + +void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); +#ifdef RX_DESC_DEBUG_CHECK +/** + * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc + * @rx_desc: rx descriptor pointer + * + * Return: true, if magic is correct, else false. + */ +static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) +{ + if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) + return false; + + rx_desc->magic = 0; + return true; +} + +/** + * dp_rx_desc_prep() - prepare rx desc + * @rx_desc: rx descriptor pointer to be prepared + * @nbuf: nbuf to be associated with rx_desc + * + * Note: assumption is that we are associating a nbuf which is mapped + * + * Return: none + */ +static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) +{ + rx_desc->magic = DP_RX_DESC_MAGIC; + rx_desc->nbuf = nbuf; + rx_desc->unmapped = 0; +} + +#else + +static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) +{ + return true; +} + +static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) +{ + rx_desc->nbuf = nbuf; + rx_desc->unmapped = 0; +} +#endif /* RX_DESC_DEBUG_CHECK */ + +void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer, + uint8_t err_code, uint8_t mac_id); + +#ifndef QCA_MULTIPASS_SUPPORT +static inline +bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid) +{ + return false; +} +#else +bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, + uint8_t tid); +#endif + +#ifndef WLAN_RX_PKT_CAPTURE_ENH +static inline +void dp_peer_set_rx_capture_enabled(struct dp_peer *peer_handle, bool value) +{ +} +#endif + +/** + * dp_rx_deliver_to_stack() - deliver pkts to network stack + * Caller to hold peer refcount and check for valid peer + * @soc: soc + * @vdev: vdev + * @peer: peer + * @nbuf_head: skb list head + * @nbuf_tail: skb list tail + * + * Return: None + */ +void dp_rx_deliver_to_stack(struct dp_soc *soc, + struct dp_vdev *vdev, + struct dp_peer *peer, + qdf_nbuf_t nbuf_head, + qdf_nbuf_t nbuf_tail); + +/* + * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate + to refill + * @soc: DP SOC handle + * @buf_info: the last link desc buf info + * @ring_buf_info: current buf address pointor including link desc + * + * return: none. + */ +void dp_rx_link_desc_refill_duplicate_check( + struct dp_soc *soc, + struct hal_buf_info *buf_info, + hal_buff_addrinfo_t ring_buf_info); +#endif /* _DP_RX_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c new file mode 100644 index 0000000000000000000000000000000000000000..e807a6d00ee3bba9df329ac94fac0acabb8b3f3e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c @@ -0,0 +1,2068 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "dp_internal.h" +#include "dp_rx_defrag.h" +#include /* LLC_SNAP_HDR_LEN */ +#include "dp_rx_defrag.h" +#include "dp_ipa.h" + +const struct dp_rx_defrag_cipher dp_f_ccmp = { + "AES-CCM", + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, + IEEE80211_WEP_MICLEN, + 0, +}; + +const struct dp_rx_defrag_cipher dp_f_tkip = { + "TKIP", + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, + IEEE80211_WEP_CRCLEN, + IEEE80211_WEP_MICLEN, +}; + +const struct dp_rx_defrag_cipher dp_f_wep = { + "WEP", + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, + IEEE80211_WEP_CRCLEN, + 0, +}; + +/* + * dp_rx_defrag_frames_free(): Free fragment chain + * @frames: Fragment chain + * + * Iterates through the fragment chain and frees them + * Returns: None + */ +static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) +{ + qdf_nbuf_t next, frag = frames; + + while (frag) { + next = qdf_nbuf_next(frag); + qdf_nbuf_free(frag); + frag = next; + } +} + +/* + * dp_rx_clear_saved_desc_info(): Clears descriptor info + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Saves MPDU descriptor info and MSDU link pointer from REO + * ring descriptor. The cache is created per peer, per TID + * + * Returns: None + */ +static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) +{ + if (peer->rx_tid[tid].dst_ring_desc) + qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); + + peer->rx_tid[tid].dst_ring_desc = NULL; + peer->rx_tid[tid].head_frag_desc = NULL; +} + +static void dp_rx_return_head_frag_desc(struct dp_peer *peer, + unsigned int tid) +{ + struct dp_soc *soc; + struct dp_pdev *pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + uint8_t pool_id; + + pdev = peer->vdev->pdev; + soc = pdev->soc; + + if (peer->rx_tid[tid].head_frag_desc) { + pool_id = peer->rx_tid[tid].head_frag_desc->pool_id; + dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; + rx_desc_pool = &soc->rx_desc_buf[pool_id]; + + dp_rx_add_to_free_desc_list(&head, &tail, + peer->rx_tid[tid].head_frag_desc); + dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, + 1, &head, &tail); + } + + if (peer->rx_tid[tid].dst_ring_desc) { + if (dp_rx_link_desc_return(soc, + peer->rx_tid[tid].dst_ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc", __func__); + } +} + +/* + * dp_rx_reorder_flush_frag(): Flush the frag list + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Flush the per-TID frag list + * + * Returns: None + */ +void dp_rx_reorder_flush_frag(struct dp_peer *peer, + unsigned int tid) +{ + dp_info_rl("Flushing TID %d", tid); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: NULL peer", __func__); + return; + } + + dp_rx_return_head_frag_desc(peer, tid); + dp_rx_defrag_cleanup(peer, tid); +} + +/* + * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list + * @soc: DP SOC + * + * Flush fragments of all waitlisted TID's + * + * Returns: None + */ +void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) +{ + struct dp_rx_tid *rx_reorder = NULL; + struct dp_rx_tid *tmp; + uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); + TAILQ_HEAD(, dp_rx_tid) temp_list; + + TAILQ_INIT(&temp_list); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("Current time %u"), now_ms); + + qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); + TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, + defrag_waitlist_elem, tmp) { + uint32_t tid; + + if (rx_reorder->defrag_timeout_ms > now_ms) + break; + + tid = rx_reorder->tid; + if (tid >= DP_MAX_TIDS) { + qdf_assert(0); + continue; + } + + TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, + defrag_waitlist_elem); + DP_STATS_DEC(soc, rx.rx_frag_wait, 1); + + /* Move to temp list and clean-up later */ + TAILQ_INSERT_TAIL(&temp_list, rx_reorder, + defrag_waitlist_elem); + } + if (rx_reorder) { + soc->rx.defrag.next_flush_ms = + rx_reorder->defrag_timeout_ms; + } else { + soc->rx.defrag.next_flush_ms = + now_ms + soc->rx.defrag.timeout_ms; + } + + qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); + + TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, + defrag_waitlist_elem, tmp) { + struct dp_peer *peer, *temp_peer = NULL; + + qdf_spin_lock_bh(&rx_reorder->tid_lock); + TAILQ_REMOVE(&temp_list, rx_reorder, + defrag_waitlist_elem); + /* get address of current peer */ + peer = + container_of(rx_reorder, struct dp_peer, + rx_tid[rx_reorder->tid]); + qdf_spin_unlock_bh(&rx_reorder->tid_lock); + + temp_peer = dp_peer_find_by_id(soc, peer->peer_ids[0]); + if (temp_peer == peer) { + qdf_spin_lock_bh(&rx_reorder->tid_lock); + dp_rx_reorder_flush_frag(peer, rx_reorder->tid); + qdf_spin_unlock_bh(&rx_reorder->tid_lock); + } + + if (temp_peer) + dp_peer_unref_del_find_by_id(temp_peer); + + } +} + +/* + * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Appends per-tid fragments to global fragment wait list + * + * Returns: None + */ +static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) +{ + struct dp_soc *psoc = peer->vdev->pdev->soc; + struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; + + dp_debug("Adding TID %u to waitlist for peer %pK at MAC address "QDF_MAC_ADDR_FMT, + tid, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + /* TODO: use LIST macros instead of TAIL macros */ + qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); + if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist)) + psoc->rx.defrag.next_flush_ms = rx_reorder->defrag_timeout_ms; + TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, + defrag_waitlist_elem); + DP_STATS_INC(psoc, rx.rx_frag_wait, 1); + qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); +} + +/* + * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Remove fragments from waitlist + * + * Returns: None + */ +void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_rx_tid *rx_reorder; + struct dp_rx_tid *tmp; + + dp_debug("Removing TID %u to waitlist for peer %pK at MAC address "QDF_MAC_ADDR_FMT, + tid, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + if (tid >= DP_MAX_TIDS) { + dp_err("TID out of bounds: %d", tid); + qdf_assert_always(0); + } + + qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); + TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, + defrag_waitlist_elem, tmp) { + struct dp_peer *peer_on_waitlist; + + /* get address of current peer */ + peer_on_waitlist = + container_of(rx_reorder, struct dp_peer, + rx_tid[rx_reorder->tid]); + + /* Ensure it is TID for same peer */ + if (peer_on_waitlist == peer && rx_reorder->tid == tid) { + TAILQ_REMOVE(&soc->rx.defrag.waitlist, + rx_reorder, defrag_waitlist_elem); + DP_STATS_DEC(soc, rx.rx_frag_wait, 1); + } + } + qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); +} + +/* + * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * @head_addr: Pointer to head list + * @tail_addr: Pointer to tail list + * @frag: Incoming fragment + * @all_frag_present: Flag to indicate whether all fragments are received + * + * Build a per-tid, per-sequence fragment list. + * + * Returns: Success, if inserted + */ +static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, + qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, + uint8_t *all_frag_present) +{ + qdf_nbuf_t next; + qdf_nbuf_t prev = NULL; + qdf_nbuf_t cur; + uint16_t head_fragno, cur_fragno, next_fragno; + uint8_t last_morefrag = 1, count = 0; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + uint8_t *rx_desc_info; + + + qdf_assert(frag); + qdf_assert(head_addr); + qdf_assert(tail_addr); + + *all_frag_present = 0; + rx_desc_info = qdf_nbuf_data(frag); + cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); + + dp_debug("cur_fragno %d\n", cur_fragno); + /* If this is the first fragment */ + if (!(*head_addr)) { + *head_addr = *tail_addr = frag; + qdf_nbuf_set_next(*tail_addr, NULL); + rx_tid->curr_frag_num = cur_fragno; + + goto insert_done; + } + + /* In sequence fragment */ + if (cur_fragno > rx_tid->curr_frag_num) { + qdf_nbuf_set_next(*tail_addr, frag); + *tail_addr = frag; + qdf_nbuf_set_next(*tail_addr, NULL); + rx_tid->curr_frag_num = cur_fragno; + } else { + /* Out of sequence fragment */ + cur = *head_addr; + rx_desc_info = qdf_nbuf_data(cur); + head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); + + if (cur_fragno == head_fragno) { + qdf_nbuf_free(frag); + goto insert_fail; + } else if (head_fragno > cur_fragno) { + qdf_nbuf_set_next(frag, cur); + cur = frag; + *head_addr = frag; /* head pointer to be updated */ + } else { + while ((cur_fragno > head_fragno) && cur) { + prev = cur; + cur = qdf_nbuf_next(cur); + if (cur) { + rx_desc_info = qdf_nbuf_data(cur); + head_fragno = + dp_rx_frag_get_mpdu_frag_number( + rx_desc_info); + } + } + + if (cur_fragno == head_fragno) { + qdf_nbuf_free(frag); + goto insert_fail; + } + + qdf_nbuf_set_next(prev, frag); + qdf_nbuf_set_next(frag, cur); + } + } + + next = qdf_nbuf_next(*head_addr); + + rx_desc_info = qdf_nbuf_data(*tail_addr); + last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); + + /* TODO: optimize the loop */ + if (!last_morefrag) { + /* Check if all fragments are present */ + do { + rx_desc_info = qdf_nbuf_data(next); + next_fragno = + dp_rx_frag_get_mpdu_frag_number(rx_desc_info); + count++; + + if (next_fragno != count) + break; + + next = qdf_nbuf_next(next); + } while (next); + + if (!next) { + *all_frag_present = 1; + return QDF_STATUS_SUCCESS; + } else { + /* revisit */ + } + } + +insert_done: + return QDF_STATUS_SUCCESS; + +insert_fail: + return QDF_STATUS_E_FAILURE; +} + + +/* + * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment + * @msdu: Pointer to the fragment + * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) + * + * decap tkip encrypted fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) +{ + uint8_t *ivp, *orig_hdr; + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + + /* start of 802.11 header info */ + orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); + + /* TKIP header is located post 802.11 header */ + ivp = orig_hdr + hdrlen; + if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); + return QDF_STATUS_E_DEFRAG_ERROR; + } + + qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment + * @nbuf: Pointer to the fragment buffer + * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) + * + * Remove MIC information from CCMP fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) +{ + uint8_t *ivp, *orig_hdr; + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + + /* start of the 802.11 header */ + orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); + + /* CCMP header is located after 802.11 header */ + ivp = orig_hdr + hdrlen; + if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) + return QDF_STATUS_E_DEFRAG_ERROR; + + qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment + * @nbuf: Pointer to the fragment + * @hdrlen: length of the header information + * + * decap CCMP encrypted fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) +{ + uint8_t *ivp, *origHdr; + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + + origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); + ivp = origHdr + hdrlen; + + if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) + return QDF_STATUS_E_DEFRAG_ERROR; + + /* Let's pull the header later */ + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment + * @msdu: Pointer to the fragment + * @hdrlen: length of the header information + * + * decap WEP encrypted fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) +{ + uint8_t *origHdr; + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + + origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); + qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); + + qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment + * @soc: soc handle + * @nbuf: Pointer to the fragment + * + * Calculate the header size of the received fragment + * + * Returns: header size (uint16_t) + */ +static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf) +{ + uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); + uint16_t size = sizeof(struct ieee80211_frame); + uint16_t fc = 0; + uint32_t to_ds, fr_ds; + uint8_t frm_ctrl_valid; + uint16_t frm_ctrl_field; + + to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr); + fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr); + frm_ctrl_valid = + hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, + rx_tlv_hdr); + frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); + + if (to_ds && fr_ds) + size += QDF_MAC_ADDR_SIZE; + + if (frm_ctrl_valid) { + fc = frm_ctrl_field; + + /* use 1-st byte for validation */ + if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { + size += sizeof(uint16_t); + /* use 2-nd byte for validation */ + if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) + size += sizeof(struct ieee80211_htc); + } + } + + return size; +} + +/* + * dp_rx_defrag_michdr(): Calculate a pseudo MIC header + * @wh0: Pointer to the wireless header of the fragment + * @hdr: Array to hold the pseudo header + * + * Calculate a pseudo MIC header + * + * Returns: None + */ +static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, + uint8_t hdr[]) +{ + const struct ieee80211_frame_addr4 *wh = + (const struct ieee80211_frame_addr4 *)wh0; + + switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { + case IEEE80211_FC1_DIR_NODS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, + wh->i_addr2); + break; + case IEEE80211_FC1_DIR_TODS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, + wh->i_addr2); + break; + case IEEE80211_FC1_DIR_FROMDS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, + wh->i_addr3); + break; + case IEEE80211_FC1_DIR_DSTODS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, + wh->i_addr4); + break; + } + + /* + * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but + * it could also be set for deauth, disassoc, action, etc. for + * a mgt type frame. It comes into picture for MFP. + */ + if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { + if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == + IEEE80211_FC1_DIR_DSTODS) { + const struct ieee80211_qosframe_addr4 *qwh = + (const struct ieee80211_qosframe_addr4 *)wh; + hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; + } else { + const struct ieee80211_qosframe *qwh = + (const struct ieee80211_qosframe *)wh; + hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; + } + } else { + hdr[12] = 0; + } + + hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ +} + +/* + * dp_rx_defrag_mic(): Calculate MIC header + * @key: Pointer to the key + * @wbuf: fragment buffer + * @off: Offset + * @data_len: Data length + * @mic: Array to hold MIC + * + * Calculate a pseudo MIC header + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, + uint16_t off, uint16_t data_len, uint8_t mic[]) +{ + uint8_t hdr[16] = { 0, }; + uint32_t l, r; + const uint8_t *data; + uint32_t space; + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + + dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) + + rx_desc_len), hdr); + + l = dp_rx_get_le32(key); + r = dp_rx_get_le32(key + 4); + + /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ + l ^= dp_rx_get_le32(hdr); + dp_rx_michael_block(l, r); + l ^= dp_rx_get_le32(&hdr[4]); + dp_rx_michael_block(l, r); + l ^= dp_rx_get_le32(&hdr[8]); + dp_rx_michael_block(l, r); + l ^= dp_rx_get_le32(&hdr[12]); + dp_rx_michael_block(l, r); + + /* first buffer has special handling */ + data = (uint8_t *)qdf_nbuf_data(wbuf) + off; + space = qdf_nbuf_len(wbuf) - off; + + for (;; ) { + if (space > data_len) + space = data_len; + + /* collect 32-bit blocks from current buffer */ + while (space >= sizeof(uint32_t)) { + l ^= dp_rx_get_le32(data); + dp_rx_michael_block(l, r); + data += sizeof(uint32_t); + space -= sizeof(uint32_t); + data_len -= sizeof(uint32_t); + } + if (data_len < sizeof(uint32_t)) + break; + + wbuf = qdf_nbuf_next(wbuf); + if (!wbuf) + return QDF_STATUS_E_DEFRAG_ERROR; + + if (space != 0) { + const uint8_t *data_next; + /* + * Block straddles buffers, split references. + */ + data_next = + (uint8_t *)qdf_nbuf_data(wbuf) + off; + if ((qdf_nbuf_len(wbuf)) < + sizeof(uint32_t) - space) { + return QDF_STATUS_E_DEFRAG_ERROR; + } + switch (space) { + case 1: + l ^= dp_rx_get_le32_split(data[0], + data_next[0], data_next[1], + data_next[2]); + data = data_next + 3; + space = (qdf_nbuf_len(wbuf) - off) - 3; + break; + case 2: + l ^= dp_rx_get_le32_split(data[0], data[1], + data_next[0], data_next[1]); + data = data_next + 2; + space = (qdf_nbuf_len(wbuf) - off) - 2; + break; + case 3: + l ^= dp_rx_get_le32_split(data[0], data[1], + data[2], data_next[0]); + data = data_next + 1; + space = (qdf_nbuf_len(wbuf) - off) - 1; + break; + } + dp_rx_michael_block(l, r); + data_len -= sizeof(uint32_t); + } else { + /* + * Setup for next buffer. + */ + data = (uint8_t *)qdf_nbuf_data(wbuf) + off; + space = qdf_nbuf_len(wbuf) - off; + } + } + /* Last block and padding (0x5a, 4..7 x 0) */ + switch (data_len) { + case 0: + l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); + break; + case 1: + l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); + break; + case 2: + l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); + break; + case 3: + l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); + break; + } + dp_rx_michael_block(l, r); + dp_rx_michael_block(l, r); + dp_rx_put_le32(mic, l); + dp_rx_put_le32(mic + 4, r); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame + * @key: Pointer to the key + * @msdu: fragment buffer + * @hdrlen: Length of the header information + * + * Remove MIC information from the TKIP frame + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, + qdf_nbuf_t msdu, uint16_t hdrlen) +{ + QDF_STATUS status; + uint32_t pktlen = 0; + uint8_t mic[IEEE80211_WEP_MICLEN]; + uint8_t mic0[IEEE80211_WEP_MICLEN]; + qdf_nbuf_t prev = NULL, next; + + next = msdu; + while (next) { + pktlen += (qdf_nbuf_len(next) - hdrlen); + prev = next; + dp_debug("%s pktlen %u", __func__, + (uint32_t)(qdf_nbuf_len(next) - hdrlen)); + next = qdf_nbuf_next(next); + } + + if (!prev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Defrag chaining failed !\n", __func__); + return QDF_STATUS_E_DEFRAG_ERROR; + } + + qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen, + dp_f_tkip.ic_miclen, (caddr_t)mic0); + qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen); + pktlen -= dp_f_tkip.ic_miclen; + + status = dp_rx_defrag_mic(key, msdu, hdrlen, + pktlen, mic); + + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) + return QDF_STATUS_E_DEFRAG_ERROR; + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers + * @nbuf: buffer pointer + * @hdrsize: size of the header to be pulled + * + * Pull the RXTLV & the 802.11 headers + * + * Returns: None + */ +static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) +{ + struct rx_pkt_tlvs *rx_pkt_tlv = + (struct rx_pkt_tlvs *)qdf_nbuf_data(nbuf); + struct rx_mpdu_info *rx_mpdu_info_details = + &rx_pkt_tlv->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + dp_debug("pn_31_0 0x%x pn_63_32 0x%x pn_95_64 0x%x pn_127_96 0x%x\n", + rx_mpdu_info_details->pn_31_0, rx_mpdu_info_details->pn_63_32, + rx_mpdu_info_details->pn_95_64, + rx_mpdu_info_details->pn_127_96); + + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN + hdrsize); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: final pktlen %d .11len %d", + __func__, (uint32_t)qdf_nbuf_len(nbuf), hdrsize); +} + +/* + * dp_rx_defrag_pn_check(): Check the PN of current fragmented with prev PN + * @msdu: msdu to get the current PN + * @cur_pn128: PN extracted from current msdu + * @prev_pn128: Prev PN + * + * Returns: 0 on success, non zero on failure + */ +static int dp_rx_defrag_pn_check(qdf_nbuf_t msdu, + uint64_t *cur_pn128, uint64_t *prev_pn128) +{ + struct rx_pkt_tlvs *rx_pkt_tlv = + (struct rx_pkt_tlvs *)qdf_nbuf_data(msdu); + struct rx_mpdu_info *rx_mpdu_info_details = + &rx_pkt_tlv->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + int out_of_order = 0; + + cur_pn128[0] = rx_mpdu_info_details->pn_31_0; + cur_pn128[0] |= + ((uint64_t)rx_mpdu_info_details->pn_63_32 << 32); + cur_pn128[1] = rx_mpdu_info_details->pn_95_64; + cur_pn128[1] |= + ((uint64_t)rx_mpdu_info_details->pn_127_96 << 32); + + if (cur_pn128[1] == prev_pn128[1]) + out_of_order = (cur_pn128[0] - prev_pn128[0] != 1); + else + out_of_order = (cur_pn128[1] - prev_pn128[1] != 1); + + return out_of_order; +} + +/* + * dp_rx_construct_fraglist(): Construct a nbuf fraglist + * @peer: Pointer to the peer + * @head: Pointer to list of fragments + * @hdrsize: Size of the header to be pulled + * + * Construct a nbuf fraglist + * + * Returns: None + */ +static int +dp_rx_construct_fraglist(struct dp_peer *peer, int tid, qdf_nbuf_t head, + uint16_t hdrsize) +{ + qdf_nbuf_t msdu = qdf_nbuf_next(head); + qdf_nbuf_t rx_nbuf = msdu; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + uint32_t len = 0; + uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2]; + int out_of_order = 0; + int index; + int needs_pn_check = 0; + + prev_pn128[0] = rx_tid->pn128[0]; + prev_pn128[1] = rx_tid->pn128[1]; + + index = hal_rx_msdu_is_wlan_mcast(msdu) ? dp_sec_mcast : dp_sec_ucast; + if (qdf_likely(peer->security[index].sec_type != cdp_sec_type_none)) + needs_pn_check = 1; + + while (msdu) { + if (qdf_likely(needs_pn_check)) + out_of_order = dp_rx_defrag_pn_check(msdu, + &cur_pn128[0], + &prev_pn128[0]); + + if (qdf_unlikely(out_of_order)) { + dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx", + cur_pn128[0], cur_pn128[1], + prev_pn128[0], prev_pn128[1]); + return QDF_STATUS_E_FAILURE; + } + + prev_pn128[0] = cur_pn128[0]; + prev_pn128[1] = cur_pn128[1]; + + /* + * Broadcast and multicast frames should never be fragmented. + * Iterating through all msdus and dropping fragments if even + * one of them has mcast/bcast destination address. + */ + if (hal_rx_msdu_is_wlan_mcast(msdu)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Dropping multicast/broadcast fragments"); + return QDF_STATUS_E_FAILURE; + } + + dp_rx_frag_pull_hdr(msdu, hdrsize); + len += qdf_nbuf_len(msdu); + msdu = qdf_nbuf_next(msdu); + } + + qdf_nbuf_append_ext_list(head, rx_nbuf, len); + qdf_nbuf_set_next(head, NULL); + qdf_nbuf_set_is_frag(head, 1); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: head len %d ext len %d data len %d ", + __func__, + (uint32_t)qdf_nbuf_len(head), + (uint32_t)qdf_nbuf_len(rx_nbuf), + (uint32_t)(head->data_len)); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_defrag_err() - rx err handler + * @pdev: handle to pdev object + * @vdev_id: vdev id + * @peer_mac_addr: peer mac address + * @tid: TID + * @tsf32: TSF + * @err_type: error type + * @rx_frame: rx frame + * @pn: PN Number + * @key_id: key id + * + * This function handles rx error and send MIC error notification + * + * Return: None + */ +static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf) +{ + struct ol_if_ops *tops = NULL; + struct dp_pdev *pdev = vdev->pdev; + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + uint8_t *orig_hdr; + struct ieee80211_frame *wh; + struct cdp_rx_mic_err_info mic_failure_info; + + orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); + wh = (struct ieee80211_frame *)orig_hdr; + + qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr, + (struct qdf_mac_addr *)&wh->i_addr1); + qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr, + (struct qdf_mac_addr *)&wh->i_addr2); + mic_failure_info.key_id = 0; + mic_failure_info.multicast = + IEEE80211_IS_MULTICAST(wh->i_addr1); + qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); + mic_failure_info.frame_type = cdp_rx_frame_type_802_11; + mic_failure_info.data = (uint8_t *)wh; + mic_failure_info.vdev_id = vdev->vdev_id; + + tops = pdev->soc->cdp_soc.ol_ops; + if (tops->rx_mic_error) + tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id, + &mic_failure_info); +} + + +/* + * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 + * @soc: dp soc handle + * @nbuf: Pointer to the fragment buffer + * @hdrsize: Size of headers + * + * Transcap the fragment from 802.11 to 802.3 + * + * Returns: None + */ +static void +dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_peer *peer, int tid, + qdf_nbuf_t nbuf, uint16_t hdrsize) +{ + struct llc_snap_hdr_t *llchdr; + struct ethernet_hdr_t *eth_hdr; + uint8_t ether_type[2]; + uint16_t fc = 0; + union dp_align_mac_addr mac_addr; + uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); + struct rx_pkt_tlvs *rx_pkt_tlv = + (struct rx_pkt_tlvs *)qdf_nbuf_data(nbuf); + struct rx_mpdu_info *rx_mpdu_info_details = + &rx_pkt_tlv->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + + dp_debug("head_nbuf pn_31_0 0x%x pn_63_32 0x%x pn_95_64 0x%x pn_127_96 0x%x\n", + rx_mpdu_info_details->pn_31_0, rx_mpdu_info_details->pn_63_32, + rx_mpdu_info_details->pn_95_64, + rx_mpdu_info_details->pn_127_96); + + rx_tid->pn128[0] = rx_mpdu_info_details->pn_31_0; + rx_tid->pn128[0] |= ((uint64_t)rx_mpdu_info_details->pn_63_32 << 32); + rx_tid->pn128[1] = rx_mpdu_info_details->pn_95_64; + rx_tid->pn128[1] |= ((uint64_t)rx_mpdu_info_details->pn_127_96 << 32); + + if (!rx_desc_info) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Memory alloc failed ! ", __func__); + QDF_ASSERT(0); + return; + } + + qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); + + llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + + RX_PKT_TLVS_LEN + hdrsize); + qdf_mem_copy(ether_type, llchdr->ethertype, 2); + + qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + + sizeof(struct llc_snap_hdr_t) - + sizeof(struct ethernet_hdr_t))); + + eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); + + if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, + rx_desc_info)) + fc = hal_rx_get_frame_ctrl_field(rx_desc_info); + + dp_debug("%s: frame control type: 0x%x", __func__, fc); + + switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { + case IEEE80211_FC1_DIR_NODS: + hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + break; + case IEEE80211_FC1_DIR_TODS: + hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + break; + case IEEE80211_FC1_DIR_FROMDS: + hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + break; + + case IEEE80211_FC1_DIR_DSTODS: + hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + hal_rx_mpdu_get_addr4(soc->hal_soc, rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + break; + + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Unknown frame control type: 0x%x", __func__, fc); + } + + qdf_mem_copy(eth_hdr->ethertype, ether_type, + sizeof(ether_type)); + + qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); + qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); + qdf_mem_free(rx_desc_info); +} + +#ifdef RX_DEFRAG_DO_NOT_REINJECT +/* + * dp_rx_defrag_deliver(): Deliver defrag packet to stack + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * @head: Nbuf to be delivered + * + * Returns: None + */ +static inline void dp_rx_defrag_deliver(struct dp_peer *peer, + unsigned int tid, + qdf_nbuf_t head) +{ + struct dp_vdev *vdev = peer->vdev; + struct dp_soc *soc = vdev->pdev->soc; + qdf_nbuf_t deliver_list_head = NULL; + qdf_nbuf_t deliver_list_tail = NULL; + uint8_t *rx_tlv_hdr; + + rx_tlv_hdr = qdf_nbuf_data(head); + + QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id; + qdf_nbuf_set_tid_val(head, tid); + qdf_nbuf_pull_head(head, RX_PKT_TLVS_LEN); + + DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, + head); + dp_rx_deliver_to_stack(soc, vdev, peer, deliver_list_head, + deliver_list_tail); +} + +/* + * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * @head: Buffer to be reinjected back + * + * Reinject the fragment chain back into REO + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, + unsigned int tid, qdf_nbuf_t head) +{ + struct dp_rx_reorder_array_elem *rx_reorder_array_elem; + + rx_reorder_array_elem = peer->rx_tid[tid].array; + + dp_rx_defrag_deliver(peer, tid, head); + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + dp_rx_return_head_frag_desc(peer, tid); + + return QDF_STATUS_SUCCESS; +} +#else + +#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY +/** + * dp_rx_reinject_ring_record_entry() - Record reinject ring history + * @soc: Datapath soc structure + * @paddr: paddr of the buffer reinjected to SW2REO ring + * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring + * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring + * + * Returns: None + */ +static inline void +dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr, + uint32_t sw_cookie, uint8_t rbm) +{ + struct dp_buf_info_record *record; + uint32_t idx; + + if (qdf_unlikely(!soc->rx_reinject_ring_history)) + return; + + idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index, + DP_RX_REINJECT_HIST_MAX); + + /* No NULL check needed for record since its an array */ + record = &soc->rx_reinject_ring_history->entry[idx]; + + record->timestamp = qdf_get_log_timestamp(); + record->hbi.paddr = paddr; + record->hbi.sw_cookie = sw_cookie; + record->hbi.rbm = rbm; +} +#else +static inline void +dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr, + uint32_t sw_cookie, uint8_t rbm) +{ +} +#endif + +/* + * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * @head: Buffer to be reinjected back + * + * Reinject the fragment chain back into REO + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, + unsigned int tid, qdf_nbuf_t head) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct hal_buf_info buf_info; + void *link_desc_va; + void *msdu0, *msdu_desc_info; + void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; + void *dst_mpdu_desc_info, *dst_qdesc_addr; + qdf_dma_addr_t paddr; + uint32_t nbuf_len, seq_no, dst_ind; + uint32_t *mpdu_wrd; + uint32_t ret, cookie; + hal_ring_desc_t dst_ring_desc = + peer->rx_tid[tid].dst_ring_desc; + hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng; + struct dp_rx_desc *rx_desc = peer->rx_tid[tid].head_frag_desc; + struct dp_rx_reorder_array_elem *rx_reorder_array_elem = + peer->rx_tid[tid].array; + qdf_nbuf_t nbuf_head; + struct rx_desc_pool *rx_desc_pool = NULL; + void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc); + + /* do duplicate link desc address check */ + dp_rx_link_desc_refill_duplicate_check( + soc, + &soc->last_op_info.reo_reinject_link_desc, + buf_addr_info); + + nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head); + if (qdf_unlikely(!nbuf_head)) { + dp_err_rl("IPA RX REO reinject failed"); + return QDF_STATUS_E_FAILURE; + } + + /* update new allocated skb in case IPA is enabled */ + if (nbuf_head != head) { + head = nbuf_head; + rx_desc->nbuf = head; + rx_reorder_array_elem->head = head; + } + + ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); + if (!ent_ring_desc) { + dp_err_rl("HAL src ring next entry NULL"); + return QDF_STATUS_E_FAILURE; + } + + hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); + + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + qdf_assert_always(link_desc_va); + + msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va); + nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; + + HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); + HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, + UNI_DESC_BUF_TYPE_RX_MSDU_LINK); + + /* msdu reconfig */ + msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0); + + dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va); + + qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); + + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + FIRST_MSDU_IN_MPDU_FLAG, 1); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + LAST_MSDU_IN_MPDU_FLAG, 1); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + MSDU_CONTINUATION, 0x0); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + REO_DESTINATION_INDICATION, dst_ind); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + MSDU_LENGTH, nbuf_len); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + SA_IS_VALID, 1); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + DA_IS_VALID, 1); + + /* change RX TLV's */ + hal_rx_msdu_start_msdu_len_set( + qdf_nbuf_data(head), nbuf_len); + + cookie = HAL_RX_BUF_COOKIE_GET(msdu0); + + /* map the nbuf before reinject it into HW */ + ret = qdf_nbuf_map_single(soc->osdev, head, + QDF_DMA_FROM_DEVICE); + if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: nbuf map failed !", __func__); + return QDF_STATUS_E_FAILURE; + } + + /* + * As part of rx frag handler bufffer was unmapped and rx desc + * unmapped is set to 1. So again for defrag reinject frame reset + * it back to 0. + */ + rx_desc->unmapped = 0; + + dp_ipa_handle_rx_buf_smmu_mapping(soc, head, true); + + paddr = qdf_nbuf_get_frag_paddr(head, 0); + rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; + + ret = check_x86_paddr(soc, &head, &paddr, rx_desc_pool); + + if (ret == QDF_STATUS_E_FAILURE) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: x86 check failed !", __func__); + return QDF_STATUS_E_FAILURE; + } + + hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_DEFRAG_RBM); + + /* Lets fill entrance ring now !!! */ + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "HAL RING Access For REO entrance SRNG Failed: %pK", + hal_srng); + + return QDF_STATUS_E_FAILURE; + } + + dp_rx_reinject_ring_record_entry(soc, paddr, cookie, DP_DEFRAG_RBM); + paddr = (uint64_t)buf_info.paddr; + /* buf addr */ + hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, + buf_info.sw_cookie, + HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); + /* mpdu desc info */ + ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc, + ent_ring_desc); + dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc, + dst_ring_desc); + + qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, + sizeof(struct rx_mpdu_desc_info)); + qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); + + mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; + seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); + + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + MSDU_COUNT, 0x1); + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + MPDU_SEQUENCE_NUMBER, seq_no); + /* unset frag bit */ + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + FRAGMENT_FLAG, 0x0); + /* set sa/da valid bits */ + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + SA_IS_VALID, 0x1); + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + DA_IS_VALID, 0x1); + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + RAW_MPDU, 0x0); + + /* qdesc addr */ + ent_qdesc_addr = (uint8_t *)ent_ring_desc + + REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; + + dst_qdesc_addr = (uint8_t *)dst_ring_desc + + REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; + + qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); + + HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, + REO_DESTINATION_INDICATION, dst_ind); + + hal_srng_access_end(soc->hal_soc, hal_srng); + + DP_STATS_INC(soc, rx.reo_reinject, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: reinjection done !", __func__); + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * dp_rx_defrag(): Defragment the fragment chain + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * @frag_list_head: Pointer to head list + * @frag_list_tail: Pointer to tail list + * + * Defragment the fragment chain + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, + qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) +{ + qdf_nbuf_t tmp_next, prev; + qdf_nbuf_t cur = frag_list_head, msdu; + uint32_t index, tkip_demic = 0; + uint16_t hdr_space; + uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; + struct dp_vdev *vdev = peer->vdev; + struct dp_soc *soc = vdev->pdev->soc; + uint8_t status = 0; + + hdr_space = dp_rx_defrag_hdrsize(soc, cur); + index = hal_rx_msdu_is_wlan_mcast(cur) ? + dp_sec_mcast : dp_sec_ucast; + + /* Remove FCS from all fragments */ + while (cur) { + tmp_next = qdf_nbuf_next(cur); + qdf_nbuf_set_next(cur, NULL); + qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); + prev = cur; + qdf_nbuf_set_next(cur, tmp_next); + cur = tmp_next; + } + cur = frag_list_head; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: index %d Security type: %d", __func__, + index, peer->security[index].sec_type); + + switch (peer->security[index].sec_type) { + case cdp_sec_type_tkip: + tkip_demic = 1; + + case cdp_sec_type_tkip_nomic: + while (cur) { + tmp_next = qdf_nbuf_next(cur); + if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: TKIP decap failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + cur = tmp_next; + } + + /* If success, increment header to be stripped later */ + hdr_space += dp_f_tkip.ic_header; + break; + + case cdp_sec_type_aes_ccmp: + while (cur) { + tmp_next = qdf_nbuf_next(cur); + if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: CCMP demic failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: CCMP decap failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + cur = tmp_next; + } + + /* If success, increment header to be stripped later */ + hdr_space += dp_f_ccmp.ic_header; + break; + + case cdp_sec_type_wep40: + case cdp_sec_type_wep104: + case cdp_sec_type_wep128: + while (cur) { + tmp_next = qdf_nbuf_next(cur); + if (dp_rx_defrag_wep_decap(cur, hdr_space)) { + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: WEP decap failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + cur = tmp_next; + } + + /* If success, increment header to be stripped later */ + hdr_space += dp_f_wep.ic_header; + break; + default: + break; + } + + if (tkip_demic) { + msdu = frag_list_head; + qdf_mem_copy(key, + &peer->security[index].michael_key[0], + IEEE80211_WEP_MICLEN); + status = dp_rx_defrag_tkip_demic(key, msdu, + RX_PKT_TLVS_LEN + + hdr_space); + + if (status) { + dp_rx_defrag_err(vdev, frag_list_head); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: TKIP demic failed status %d", + __func__, status); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + } + + /* Convert the header to 802.3 header */ + dp_rx_defrag_nwifi_to_8023(soc, peer, tid, frag_list_head, hdr_space); + if (dp_rx_construct_fraglist(peer, tid, frag_list_head, hdr_space)) + return QDF_STATUS_E_DEFRAG_ERROR; + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_cleanup(): Clean up activities + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * + * Returns: None + */ +void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) +{ + struct dp_rx_reorder_array_elem *rx_reorder_array_elem = + peer->rx_tid[tid].array; + + if (rx_reorder_array_elem) { + /* Free up nbufs */ + dp_rx_defrag_frames_free(rx_reorder_array_elem->head); + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + } else { + dp_info("Cleanup self peer %pK and TID %u at MAC address "QDF_MAC_ADDR_FMT, + peer, tid, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + } + + /* Free up saved ring descriptors */ + dp_rx_clear_saved_desc_info(peer, tid); + + peer->rx_tid[tid].defrag_timeout_ms = 0; + peer->rx_tid[tid].curr_frag_num = 0; + peer->rx_tid[tid].curr_seq_num = 0; +} + +/* + * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor + * @ring_desc: Pointer to the dst ring descriptor + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * + * Returns: None + */ +static QDF_STATUS +dp_rx_defrag_save_info_from_ring_desc(hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc, + struct dp_peer *peer, + unsigned int tid) +{ + void *dst_ring_desc = qdf_mem_malloc( + sizeof(struct reo_destination_ring)); + + if (!dst_ring_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Memory alloc failed !", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(dst_ring_desc, ring_desc, + sizeof(struct reo_destination_ring)); + + peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; + peer->rx_tid[tid].head_frag_desc = rx_desc; + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_store_fragment(): Store incoming fragments + * @soc: Pointer to the SOC data structure + * @ring_desc: Pointer to the ring descriptor + * @mpdu_desc_info: MPDU descriptor info + * @tid: Traffic Identifier + * @rx_desc: Pointer to rx descriptor + * @rx_bfs: Number of bfs consumed + * + * Returns: QDF_STATUS + */ +static QDF_STATUS +dp_rx_defrag_store_fragment(struct dp_soc *soc, + hal_ring_desc_t ring_desc, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + unsigned int tid, struct dp_rx_desc *rx_desc, + uint32_t *rx_bfs) +{ + struct dp_rx_reorder_array_elem *rx_reorder_array_elem; + struct dp_pdev *pdev; + struct dp_peer *peer = NULL; + uint16_t peer_id; + uint8_t fragno, more_frag, all_frag_present = 0; + uint16_t rxseq = mpdu_desc_info->mpdu_seq; + QDF_STATUS status; + struct dp_rx_tid *rx_tid; + uint8_t mpdu_sequence_control_valid; + uint8_t mpdu_frame_control_valid; + qdf_nbuf_t frag = rx_desc->nbuf; + uint32_t msdu_len; + + if (qdf_nbuf_len(frag) > 0) { + dp_info("Dropping unexpected packet with skb_len: %d," + "data len: %d, cookie: %d", + (uint32_t)qdf_nbuf_len(frag), frag->data_len, + rx_desc->cookie); + DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1); + goto discard_frag; + } + + msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start); + + qdf_nbuf_set_pktlen(frag, (msdu_len + RX_PKT_TLVS_LEN)); + qdf_nbuf_append_ext_list(frag, NULL, 0); + + /* Check if the packet is from a valid peer */ + peer_id = DP_PEER_METADATA_PEER_ID_GET( + mpdu_desc_info->peer_meta_data); + peer = dp_peer_find_by_id(soc, peer_id); + + if (!peer) { + /* We should not receive anything from unknown peer + * however, that might happen while we are in the monitor mode. + * We don't need to handle that here + */ + dp_info_rl("Unknown peer with peer_id %d, dropping fragment", + peer_id); + DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1); + goto discard_frag; + } + + if (tid >= DP_MAX_TIDS) { + dp_info("TID out of bounds: %d", tid); + qdf_assert_always(0); + goto discard_frag; + } + + pdev = peer->vdev->pdev; + rx_tid = &peer->rx_tid[tid]; + + mpdu_sequence_control_valid = + hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc, + rx_desc->rx_buf_start); + + /* Invalid MPDU sequence control field, MPDU is of no use */ + if (!mpdu_sequence_control_valid) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid MPDU seq control field, dropping MPDU"); + + qdf_assert(0); + goto discard_frag; + } + + mpdu_frame_control_valid = + hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, + rx_desc->rx_buf_start); + + /* Invalid frame control field */ + if (!mpdu_frame_control_valid) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid frame control field, dropping MPDU"); + + qdf_assert(0); + goto discard_frag; + } + + /* Current mpdu sequence */ + more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); + + /* HW does not populate the fragment number as of now + * need to get from the 802.11 header + */ + fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); + + rx_reorder_array_elem = peer->rx_tid[tid].array; + if (!rx_reorder_array_elem) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Rcvd Fragmented pkt before peer_tid is setup"); + goto discard_frag; + } + + /* + * !more_frag: no more fragments to be delivered + * !frag_no: packet is not fragmented + * !rx_reorder_array_elem->head: no saved fragments so far + */ + if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { + /* We should not get into this situation here. + * It means an unfragmented packet with fragment flag + * is delivered over the REO exception ring. + * Typically it follows normal rx path. + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Rcvd unfragmented pkt on REO Err srng, dropping"); + + qdf_assert(0); + goto discard_frag; + } + + /* Check if the fragment is for the same sequence or a different one */ + dp_debug("rx_tid %d", tid); + if (rx_reorder_array_elem->head) { + dp_debug("rxseq %d\n", rxseq); + if (rxseq != rx_tid->curr_seq_num) { + + dp_debug("mismatch cur_seq %d rxseq %d\n", + rx_tid->curr_seq_num, rxseq); + /* Drop stored fragments if out of sequence + * fragment is received + */ + dp_rx_reorder_flush_frag(peer, tid); + + DP_STATS_INC(soc, rx.rx_frag_oor, 1); + + dp_debug("cur rxseq %d\n", rxseq); + /* + * The sequence number for this fragment becomes the + * new sequence number to be processed + */ + rx_tid->curr_seq_num = rxseq; + } + } else { + dp_debug("cur rxseq %d\n", rxseq); + /* Start of a new sequence */ + dp_rx_defrag_cleanup(peer, tid); + rx_tid->curr_seq_num = rxseq; + /* store PN number also */ + } + + /* + * If the earlier sequence was dropped, this will be the fresh start. + * Else, continue with next fragment in a given sequence + */ + status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, + &rx_reorder_array_elem->tail, frag, + &all_frag_present); + + /* + * Currently, we can have only 6 MSDUs per-MPDU, if the current + * packet sequence has more than 6 MSDUs for some reason, we will + * have to use the next MSDU link descriptor and chain them together + * before reinjection + */ + if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && + (rx_reorder_array_elem->head == frag)) { + + qdf_assert_always(ring_desc); + status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, + rx_desc, peer, tid); + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Unable to store ring desc !", __func__); + goto discard_frag; + } + } else { + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + (*rx_bfs)++; + + /* Return the non-head link desc */ + if (ring_desc && + dp_rx_link_desc_return(soc, ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc", __func__); + + } + + if (pdev->soc->rx.flags.defrag_timeout_check) + dp_rx_defrag_waitlist_remove(peer, tid); + + /* Yet to receive more fragments for this sequence number */ + if (!all_frag_present) { + uint32_t now_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks()); + + peer->rx_tid[tid].defrag_timeout_ms = + now_ms + pdev->soc->rx.defrag.timeout_ms; + + dp_rx_defrag_waitlist_add(peer, tid); + dp_peer_unref_del_find_by_id(peer); + + return QDF_STATUS_SUCCESS; + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "All fragments received for sequence: %d", rxseq); + + /* Process the fragments */ + status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, + rx_reorder_array_elem->tail); + if (QDF_IS_STATUS_ERROR(status)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Fragment processing failed"); + + dp_rx_add_to_free_desc_list(head, tail, + peer->rx_tid[tid].head_frag_desc); + (*rx_bfs)++; + + if (dp_rx_link_desc_return(soc, + peer->rx_tid[tid].dst_ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc", + __func__); + dp_rx_defrag_cleanup(peer, tid); + goto end; + } + + /* Re-inject the fragments back to REO for further processing */ + status = dp_rx_defrag_reo_reinject(peer, tid, + rx_reorder_array_elem->head); + if (QDF_IS_STATUS_SUCCESS(status)) { + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "Fragmented sequence successfully reinjected"); + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Fragmented sequence reinjection failed"); + dp_rx_return_head_frag_desc(peer, tid); + } + + dp_rx_defrag_cleanup(peer, tid); + + dp_peer_unref_del_find_by_id(peer); + + return QDF_STATUS_SUCCESS; + +discard_frag: + qdf_nbuf_free(frag); + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + if (dp_rx_link_desc_return(soc, ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc", __func__); + (*rx_bfs)++; + +end: + if (peer) + dp_peer_unref_del_find_by_id(peer); + + DP_STATS_INC(soc, rx.rx_frag_err, 1); + return QDF_STATUS_E_DEFRAG_ERROR; +} + +/** + * dp_rx_frag_handle() - Handles fragmented Rx frames + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements RX 802.11 fragmentation handling + * The handling is mostly same as legacy fragmentation handling. + * If required, this function can re-inject the frames back to + * REO ring (with proper setting to by-pass fragmentation check + * but use duplicate detection / re-ordering and routing these frames + * to a different core. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + struct dp_rx_desc *rx_desc, + uint8_t *mac_id, + uint32_t quota) +{ + uint32_t rx_bufs_used = 0; + qdf_nbuf_t msdu = NULL; + uint32_t tid; + uint32_t rx_bfs = 0; + struct dp_pdev *pdev; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + qdf_assert(soc); + qdf_assert(mpdu_desc_info); + qdf_assert(rx_desc); + + dp_debug("Number of MSDUs to process, num_msdus: %d", + mpdu_desc_info->msdu_count); + + + if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Not sufficient MSDUs to process"); + return rx_bufs_used; + } + + /* all buffers in MSDU link belong to same pdev */ + pdev = soc->pdev_list[rx_desc->pool_id]; + *mac_id = rx_desc->pool_id; + + if (rx_desc->unmapped) + return rx_bufs_used; + + msdu = rx_desc->nbuf; + + dp_ipa_handle_rx_buf_smmu_mapping(soc, msdu, false); + qdf_nbuf_unmap_single(soc->osdev, msdu, QDF_DMA_FROM_DEVICE); + rx_desc->unmapped = 1; + + rx_desc->rx_buf_start = qdf_nbuf_data(msdu); + + tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start); + + /* Process fragment-by-fragment */ + status = dp_rx_defrag_store_fragment(soc, ring_desc, + &pdev->free_list_head, + &pdev->free_list_tail, + mpdu_desc_info, + tid, rx_desc, &rx_bfs); + + if (rx_bfs) + rx_bufs_used += rx_bfs; + + if (!QDF_IS_STATUS_SUCCESS(status)) + dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", + mpdu_desc_info->mpdu_seq, + mpdu_desc_info->msdu_count, + mpdu_desc_info->mpdu_flags); + + return rx_bufs_used; +} + +QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, + struct dp_peer *peer, uint16_t tid, + uint16_t rxseq, qdf_nbuf_t nbuf) +{ + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + struct dp_rx_reorder_array_elem *rx_reorder_array_elem; + uint8_t all_frag_present; + uint32_t msdu_len; + QDF_STATUS status; + + rx_reorder_array_elem = peer->rx_tid[tid].array; + + /* + * HW may fill in unexpected peer_id in RX PKT TLV, + * if this peer_id related peer is valid by coincidence, + * but actually this peer won't do dp_peer_rx_init(like SAP vdev + * self peer), then invalid access to rx_reorder_array_elem happened. + */ + if (!rx_reorder_array_elem) { + dp_verbose_debug( + "peer id:%d mac: "QDF_MAC_ADDR_FMT" drop rx frame!", + peer->peer_ids[0], + QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1); + qdf_nbuf_free(nbuf); + goto fail; + } + + if (rx_reorder_array_elem->head && + rxseq != rx_tid->curr_seq_num) { + /* Drop stored fragments if out of sequence + * fragment is received + */ + dp_rx_reorder_flush_frag(peer, tid); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: No list found for TID %d Seq# %d", + __func__, tid, rxseq); + qdf_nbuf_free(nbuf); + goto fail; + } + + msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf)); + + qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN)); + + status = dp_rx_defrag_fraglist_insert(peer, tid, + &rx_reorder_array_elem->head, + &rx_reorder_array_elem->tail, nbuf, + &all_frag_present); + + if (QDF_IS_STATUS_ERROR(status)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s Fragment insert failed", __func__); + + goto fail; + } + + if (soc->rx.flags.defrag_timeout_check) + dp_rx_defrag_waitlist_remove(peer, tid); + + if (!all_frag_present) { + uint32_t now_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks()); + + peer->rx_tid[tid].defrag_timeout_ms = + now_ms + soc->rx.defrag.timeout_ms; + + dp_rx_defrag_waitlist_add(peer, tid); + + return QDF_STATUS_SUCCESS; + } + + status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, + rx_reorder_array_elem->tail); + + if (QDF_IS_STATUS_ERROR(status)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s Fragment processing failed", __func__); + + dp_rx_return_head_frag_desc(peer, tid); + dp_rx_defrag_cleanup(peer, tid); + + goto fail; + } + + /* Re-inject the fragments back to REO for further processing */ + status = dp_rx_defrag_reo_reinject(peer, tid, + rx_reorder_array_elem->head); + if (QDF_IS_STATUS_SUCCESS(status)) { + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: Frag seq successfully reinjected", + __func__); + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Frag seq reinjection failed", __func__); + dp_rx_return_head_frag_desc(peer, tid); + } + + dp_rx_defrag_cleanup(peer, tid); + return QDF_STATUS_SUCCESS; + +fail: + return QDF_STATUS_E_DEFRAG_ERROR; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h new file mode 100644 index 0000000000000000000000000000000000000000..02d2b1df7c480751312a3c94b9acf63164c0da21 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RX_DEFRAG_H +#define _DP_RX_DEFRAG_H + +#include "hal_rx.h" + +#define DEFRAG_IEEE80211_KEY_LEN 8 +#define DEFRAG_IEEE80211_FCS_LEN 4 + +#define DP_RX_DEFRAG_IEEE80211_ADDR_COPY(dst, src) \ + qdf_mem_copy(dst, src, QDF_MAC_ADDR_SIZE) + +#define DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \ + (((wh) & \ + (IEEE80211_FC0_TYPE_MASK | QDF_IEEE80211_FC0_SUBTYPE_QOS)) == \ + (IEEE80211_FC0_TYPE_DATA | QDF_IEEE80211_FC0_SUBTYPE_QOS)) + +#define UNI_DESC_OWNER_SW 0x1 +#define UNI_DESC_BUF_TYPE_RX_MSDU_LINK 0x6 +/** + * struct dp_rx_defrag_cipher: structure to indicate cipher header + * @ic_name: Name + * @ic_header: header length + * @ic_trailer: trail length + * @ic_miclen: MIC length + */ +struct dp_rx_defrag_cipher { + const char *ic_name; + uint16_t ic_header; + uint8_t ic_trailer; + uint8_t ic_miclen; +}; + +uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + struct dp_rx_desc *rx_desc, + uint8_t *mac_id, + uint32_t quota); + +/* + * dp_rx_frag_get_mac_hdr() - Return pointer to the mac hdr + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * It is inefficient to peek into the packet for received + * frames but these APIs are required to get to some of + * 802.11 fields that hardware does not populate in the + * rx meta data. + * + * Returns: pointer to ieee80211_frame + */ +static inline +struct ieee80211_frame *dp_rx_frag_get_mac_hdr(uint8_t *rx_desc_info) +{ + int rx_desc_len = SIZE_OF_DATA_RX_TLV; + return (struct ieee80211_frame *)(rx_desc_info + rx_desc_len); +} + +/* + * dp_rx_frag_get_mpdu_seq_number() - Get mpdu sequence number + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * Returns: uint16_t, rx sequence number + */ +static inline +uint16_t dp_rx_frag_get_mpdu_seq_number(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >> + IEEE80211_SEQ_SEQ_SHIFT; +} + +/* + * dp_rx_frag_get_mpdu_frag_number() - Get mpdu fragment number + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * Returns: uint8_t, receive fragment number + */ +static inline +uint8_t dp_rx_frag_get_mpdu_frag_number(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & + IEEE80211_SEQ_FRAG_MASK; +} + +/* + * dp_rx_frag_get_more_frag_bit() - Get more fragment bit + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * Returns: uint8_t, get more fragment bit + */ +static inline +uint8_t dp_rx_frag_get_more_frag_bit(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return (mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG) >> 2; +} + +static inline +uint8_t dp_rx_get_pkt_dir(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return mac_hdr->i_fc[1] & IEEE80211_FC1_DIR_MASK; +} + +void dp_rx_defrag_waitlist_flush(struct dp_soc *soc); +void dp_rx_reorder_flush_frag(struct dp_peer *peer, + unsigned int tid); +void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid); +void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid); + +QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, + struct dp_peer *peer, uint16_t tid, + uint16_t rxseq, qdf_nbuf_t nbuf); +#endif /* _DP_RX_DEFRAG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c new file mode 100644 index 0000000000000000000000000000000000000000..e80121e85612c0e1079bf22e870514ef88fae4b6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_ipa.h" + +#ifdef RX_DESC_MULTI_PAGE_ALLOC +A_COMPILE_TIME_ASSERT(cookie_size_check, + PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <= + 1 << DP_RX_DESC_PAGE_ID_SHIFT); + +QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, + uint32_t num_elem, + struct rx_desc_pool *rx_desc_pool) +{ + uint32_t id, page_id, offset, desc_size, num_desc_per_page; + uint32_t count = 0; + union dp_rx_desc_list_elem_t *rx_desc_elem; + + desc_size = sizeof(*rx_desc_elem); + rx_desc_pool->elem_size = desc_size; + if (!dp_is_soc_reinit(soc)) { + dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type, + &rx_desc_pool->desc_pages, + desc_size, num_elem, 0, true); + if (!rx_desc_pool->desc_pages.num_pages) { + qdf_err("Multi page alloc fail,size=%d, elem=%d", + desc_size, num_elem); + return QDF_STATUS_E_NOMEM; + } + } + + num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page; + rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *) + *rx_desc_pool->desc_pages.cacheable_pages; + if (qdf_mem_multi_page_link(soc->osdev, + &rx_desc_pool->desc_pages, + desc_size, num_elem, true)) { + qdf_err("overflow num link,size=%d, elem=%d", + desc_size, num_elem); + goto free_rx_desc_pool; + } + /* Initialize the lock */ + qdf_spinlock_create(&rx_desc_pool->lock); + qdf_spin_lock_bh(&rx_desc_pool->lock); + rx_desc_pool->pool_size = num_elem; + + rx_desc_elem = rx_desc_pool->freelist; + while (rx_desc_elem) { + page_id = count / num_desc_per_page; + offset = count % num_desc_per_page; + /* + * Below cookie size is from REO destination ring + * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie + * cookie size = 21 bits + * 8 bits - offset + * 8 bits - page ID + * 4 bits - pool ID + */ + id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) | + (page_id << DP_RX_DESC_PAGE_ID_SHIFT) | + offset); + rx_desc_elem->rx_desc.cookie = id; + rx_desc_elem->rx_desc.pool_id = pool_id; + rx_desc_elem->rx_desc.in_use = 0; + rx_desc_elem = rx_desc_elem->next; + count++; + } + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return QDF_STATUS_SUCCESS; + +free_rx_desc_pool: + dp_rx_desc_pool_free(soc, rx_desc_pool); + + return QDF_STATUS_E_FAULT; +} + +union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, + struct rx_desc_pool *rx_desc_pool) +{ + return rx_desc_pool->desc_pages.cacheable_pages[page_id] + + rx_desc_pool->elem_size * offset; +} + +static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool) +{ + uint32_t i, num_desc, page_id, offset, num_desc_per_page; + union dp_rx_desc_list_elem_t *rx_desc_elem; + struct dp_rx_desc *rx_desc; + qdf_nbuf_t nbuf; + + if (qdf_unlikely(!(rx_desc_pool-> + desc_pages.cacheable_pages))) { + qdf_err("No pages found on this desc pool"); + return QDF_STATUS_E_INVAL; + } + num_desc = rx_desc_pool->pool_size; + num_desc_per_page = + rx_desc_pool->desc_pages.num_element_per_page; + for (i = 0; i < num_desc; i++) { + page_id = i / num_desc_per_page; + offset = i % num_desc_per_page; + rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool); + rx_desc = &rx_desc_elem->rx_desc; + dp_rx_desc_free_dbg_info(rx_desc); + if (rx_desc->in_use) { + nbuf = rx_desc->nbuf; + if (!rx_desc->unmapped) { + dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, + false); + qdf_nbuf_unmap_single(soc->osdev, nbuf, + QDF_DMA_BIDIRECTIONAL); + } + qdf_nbuf_free(nbuf); + } + } + + return QDF_STATUS_SUCCESS; +} + +void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool) +{ + QDF_STATUS qdf_status; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool); + if (QDF_IS_STATUS_SUCCESS(qdf_status)) + dp_rx_desc_pool_free(soc, rx_desc_pool); + qdf_spin_unlock_bh(&rx_desc_pool->lock); + + qdf_spinlock_destroy(&rx_desc_pool->lock); +} + +void dp_rx_desc_nbuf_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool) +{ + qdf_spin_lock_bh(&rx_desc_pool->lock); + __dp_rx_desc_nbuf_free(soc, rx_desc_pool); + qdf_spin_unlock_bh(&rx_desc_pool->lock); + + qdf_spinlock_destroy(&rx_desc_pool->lock); +} + +void dp_rx_desc_pool_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool) +{ + if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) + return; + dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type, + &rx_desc_pool->desc_pages, 0, true); +} +#else +QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, + uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) +{ + uint32_t i; + + if (!dp_is_soc_reinit(soc)) { + rx_desc_pool->array = + qdf_mem_malloc(pool_size * + sizeof(union dp_rx_desc_list_elem_t)); + + if (!(rx_desc_pool->array)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s: RX Desc Pool[%d] allocation failed", + __func__, pool_id); + return QDF_STATUS_E_NOMEM; + } + } + + /* Initialize the lock */ + qdf_spinlock_create(&rx_desc_pool->lock); + + qdf_spin_lock_bh(&rx_desc_pool->lock); + rx_desc_pool->pool_size = pool_size; + + /* link SW rx descs into a freelist */ + rx_desc_pool->freelist = &rx_desc_pool->array[0]; + for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { + if (i == rx_desc_pool->pool_size - 1) + rx_desc_pool->array[i].next = NULL; + else + rx_desc_pool->array[i].next = + &rx_desc_pool->array[i + 1]; + rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18); + rx_desc_pool->array[i].rx_desc.pool_id = pool_id; + rx_desc_pool->array[i].rx_desc.in_use = 0; + } + + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return QDF_STATUS_SUCCESS; +} + +void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool) +{ + qdf_nbuf_t nbuf; + int i; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + for (i = 0; i < rx_desc_pool->pool_size; i++) { + if (rx_desc_pool->array[i].rx_desc.in_use) { + nbuf = rx_desc_pool->array[i].rx_desc.nbuf; + + if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { + dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, + false); + + qdf_nbuf_unmap_single(soc->osdev, nbuf, + QDF_DMA_FROM_DEVICE); + } + qdf_nbuf_free(nbuf); + } + } + qdf_mem_free(rx_desc_pool->array); + qdf_spin_unlock_bh(&rx_desc_pool->lock); + qdf_spinlock_destroy(&rx_desc_pool->lock); +} + +void dp_rx_desc_nbuf_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool) +{ + qdf_nbuf_t nbuf; + int i; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + for (i = 0; i < rx_desc_pool->pool_size; i++) { + if (rx_desc_pool->array[i].rx_desc.in_use) { + nbuf = rx_desc_pool->array[i].rx_desc.nbuf; + + if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { + dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, + false); + + qdf_nbuf_unmap_single(soc->osdev, nbuf, + QDF_DMA_FROM_DEVICE); + } + + qdf_nbuf_free(nbuf); + } + } + qdf_spin_unlock_bh(&rx_desc_pool->lock); + qdf_spinlock_destroy(&rx_desc_pool->lock); +} + +void dp_rx_desc_pool_free(struct dp_soc *soc, + struct rx_desc_pool *rx_desc_pool) +{ + qdf_mem_free(rx_desc_pool->array); +} +#endif /* RX_DESC_MULTI_PAGE_ALLOC */ +/* + * dp_rx_get_free_desc_list() - provide a list of descriptors from + * the free rx desc pool. + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + * @num_descs: number of descs requested from freelist + * @desc_list: attach the descs to this list (output parameter) + * @tail: attach the point to last desc of free list (output parameter) + * + * Return: number of descs allocated from free list. + */ +uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool, + uint16_t num_descs, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail) +{ + uint16_t count; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + + *desc_list = *tail = rx_desc_pool->freelist; + + for (count = 0; count < num_descs; count++) { + + if (qdf_unlikely(!rx_desc_pool->freelist)) { + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return count; + } + *tail = rx_desc_pool->freelist; + rx_desc_pool->freelist = rx_desc_pool->freelist->next; + } + (*tail)->next = NULL; + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return count; +} + +/* + * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to + * freelist. + * + * @soc: core txrx main context + * @local_desc_list: local desc list provided by the caller + * @tail: attach the point to last desc of local desc list + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + */ +void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, + union dp_rx_desc_list_elem_t **local_desc_list, + union dp_rx_desc_list_elem_t **tail, + uint16_t pool_id, + struct rx_desc_pool *rx_desc_pool) +{ + union dp_rx_desc_list_elem_t *temp_list = NULL; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + + + temp_list = rx_desc_pool->freelist; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK", + temp_list, *local_desc_list, *tail, (*tail)->next); + rx_desc_pool->freelist = *local_desc_list; + (*tail)->next = temp_list; + *tail = NULL; + *local_desc_list = NULL; + + qdf_spin_unlock_bh(&rx_desc_pool->lock); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c new file mode 100644 index 0000000000000000000000000000000000000000..2d7d2c296174a71074687716490a580dfec54dbc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c @@ -0,0 +1,2556 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "dp_internal.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "dp_rx_defrag.h" +#include "dp_ipa.h" +#ifdef FEATURE_WDS +#include "dp_txrx_wds.h" +#endif +#include /* LLC_SNAP_HDR_LEN */ +#include "qdf_net_types.h" + +/* Max buffer in invalid peer SG list*/ +#define DP_MAX_INVALID_BUFFERS 10 + +/** + * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop + * back on same vap or a different vap. + * + * @soc: core DP main context + * @peer: dp peer handler + * @rx_tlv_hdr: start of the rx TLV header + * @nbuf: pkt buffer + * + * Return: bool (true if it is a looped back pkt else false) + * + */ +static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf) +{ + struct dp_vdev *vdev = peer->vdev; + struct dp_ast_entry *ase = NULL; + uint16_t sa_idx = 0; + uint8_t *data; + + /* + * Multicast Echo Check is required only if vdev is STA and + * received pkt is a multicast/broadcast pkt. otherwise + * skip the MEC check. + */ + if (vdev->opmode != wlan_op_mode_sta) + return false; + + if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr)) + return false; + + data = qdf_nbuf_data(nbuf); + /* + * if the received pkts src mac addr matches with vdev + * mac address then drop the pkt as it is looped back + */ + if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE], + vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE))) + return true; + + /* + * In case of qwrap isolation mode, donot drop loopback packets. + * In isolation mode, all packets from the wired stations need to go + * to rootap and loop back to reach the wireless stations and + * vice-versa. + */ + if (qdf_unlikely(vdev->isolation_vdev)) + return false; + + /* if the received pkts src mac addr matches with the + * wired PCs MAC addr which is behind the STA or with + * wireless STAs MAC addr which are behind the Repeater, + * then drop the pkt as it is looped back + */ + qdf_spin_lock_bh(&soc->ast_lock); + if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { + sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); + + if ((sa_idx < 0) || + (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "invalid sa_idx: %d", sa_idx); + qdf_assert_always(0); + } + + ase = soc->ast_table[sa_idx]; + if (!ase) { + /* We do not get a peer map event for STA and without + * this event we don't know what is STA's sa_idx. + * For this reason the AST is still not associated to + * any index postion in ast_table. + * In these kind of scenarios where sa is valid but + * ast is not in ast_table, we use the below API to get + * AST entry for STA's own mac_address. + */ + ase = dp_peer_ast_list_find(soc, peer, + &data[QDF_MAC_ADDR_SIZE]); + if (ase) { + ase->ast_idx = sa_idx; + soc->ast_table[sa_idx] = ase; + ase->is_mapped = TRUE; + } + } + } else { + ase = dp_peer_ast_hash_find_by_pdevid(soc, + &data[QDF_MAC_ADDR_SIZE], + vdev->pdev->pdev_id); + } + + if (ase) { + + if (ase->pdev_id != vdev->pdev->pdev_id) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "Detected DBDC Root AP "QDF_MAC_ADDR_FMT", %d %d", + QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]), + vdev->pdev->pdev_id, + ase->pdev_id); + return false; + } + + if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || + (ase->peer != peer)) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "received pkt with same src mac "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE])); + + return true; + } + } + qdf_spin_unlock_bh(&soc->ast_lock); + return false; +} + +void dp_rx_link_desc_refill_duplicate_check( + struct dp_soc *soc, + struct hal_buf_info *buf_info, + hal_buff_addrinfo_t ring_buf_info) +{ + struct hal_buf_info current_link_desc_buf_info = { 0 }; + + /* do duplicate link desc address check */ + hal_rx_buffer_addr_info_get_paddr(ring_buf_info, + ¤t_link_desc_buf_info); + if (qdf_unlikely(current_link_desc_buf_info.paddr == + buf_info->paddr)) { + dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x", + current_link_desc_buf_info.paddr, + current_link_desc_buf_info.sw_cookie); + DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1); + } + *buf_info = current_link_desc_buf_info; +} + +/** + * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to + * (WBM) by address + * + * @soc: core DP main context + * @link_desc_addr: link descriptor addr + * + * Return: QDF_STATUS + */ +QDF_STATUS +dp_rx_link_desc_return_by_addr(struct dp_soc *soc, + hal_buff_addrinfo_t link_desc_addr, + uint8_t bm_action) +{ + struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; + hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng; + hal_soc_handle_t hal_soc = soc->hal_soc; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + void *src_srng_desc; + + if (!wbm_rel_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "WBM RELEASE RING not initialized"); + return status; + } + + /* do duplicate link desc address check */ + dp_rx_link_desc_refill_duplicate_check( + soc, + &soc->last_op_info.wbm_rel_link_desc, + link_desc_addr); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access For WBM Release SRNG Failed - %pK"), + wbm_rel_srng); + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + goto done; + } + src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); + if (qdf_likely(src_srng_desc)) { + /* Return link descriptor through WBM ring (SW2WBM)*/ + hal_rx_msdu_link_desc_set(hal_soc, + src_srng_desc, link_desc_addr, bm_action); + status = QDF_STATUS_SUCCESS; + } else { + struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; + + DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1); + + dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)", + srng->ring_id, + soc->stats.rx.err.hal_ring_access_full_fail); + dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", + *srng->u.src_ring.hp_addr, + srng->u.src_ring.reap_hp, + *srng->u.src_ring.tp_addr, + srng->u.src_ring.cached_tp); + QDF_BUG(0); + } +done: + hal_srng_access_end(hal_soc, wbm_rel_srng); + return status; + +} + +/** + * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @soc: core DP main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * + * Return: QDF_STATUS + */ +QDF_STATUS +dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, + uint8_t bm_action) +{ + void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); + + return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); +} + +/** + * dp_rx_msdus_drop() - Drops all MSDU's per MPDU + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function is used to drop all MSDU in an MPDU + * + * Return: uint32_t: No. of elements processed + */ +static uint32_t +dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + uint8_t *mac_id, + uint32_t quota) +{ + uint32_t rx_bufs_used = 0; + void *link_desc_va; + struct hal_buf_info buf_info; + struct dp_pdev *pdev; + struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ + int i; + uint8_t *rx_tlv_hdr; + uint32_t tid; + + hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); + + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + /* No UNMAP required -- this is "malloc_consistent" memory */ + hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, + &mpdu_desc_info->msdu_count); + + for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_rxdma_buf(soc, + msdu_list.sw_cookie[i]); + + qdf_assert_always(rx_desc); + + /* all buffers from a MSDU link link belong to same pdev */ + *mac_id = rx_desc->pool_id; + pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); + + if (!dp_rx_desc_check_magic(rx_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid rx_desc cookie=%d"), + msdu_list.sw_cookie[i]); + return rx_bufs_used; + } + + dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, false); + qdf_nbuf_unmap_single(soc->osdev, + rx_desc->nbuf, QDF_DMA_FROM_DEVICE); + + rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf); + + rx_bufs_used++; + tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, + rx_desc->rx_buf_start); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Packet received with PN error for tid :%d", tid); + + rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); + if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr)) + hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr); + + /* Just free the buffers */ + qdf_nbuf_free(rx_desc->nbuf); + + dp_rx_add_to_free_desc_list(&pdev->free_list_head, + &pdev->free_list_tail, rx_desc); + } + + /* Return link descriptor through WBM ring (SW2WBM)*/ + dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); + + return rx_bufs_used; +} + +/** + * dp_rx_pn_error_handle() - Handles PN check errors + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements PN error handling + * If the peer is configured to ignore the PN check errors + * or if DP feels, that this frame is still OK, the frame can be + * re-injected back to REO to use some of the other features + * of REO e.g. duplicate detection/routing to other cores + * + * Return: uint32_t: No. of elements processed + */ +static uint32_t +dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + uint8_t *mac_id, + uint32_t quota) +{ + uint16_t peer_id; + uint32_t rx_bufs_used = 0; + struct dp_peer *peer; + bool peer_pn_policy = false; + + peer_id = DP_PEER_METADATA_PEER_ID_GET( + mpdu_desc_info->peer_meta_data); + + + peer = dp_peer_find_by_id(soc, peer_id); + + if (qdf_likely(peer)) { + /* + * TODO: Check for peer specific policies & set peer_pn_policy + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "discard rx due to PN error for peer %pK "QDF_MAC_ADDR_FMT, + peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw)); + + dp_peer_unref_del_find_by_id(peer); + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Packet received with PN error"); + + /* No peer PN policy -- definitely drop */ + if (!peer_pn_policy) + rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, + mpdu_desc_info, + mac_id, quota); + + return rx_bufs_used; +} + +/** + * dp_rx_oor_handle() - Handles the msdu which is OOR error + * + * @soc: core txrx main context + * @nbuf: pointer to msdu skb + * @peer_id: dp peer ID + * @rx_tlv_hdr: start of rx tlv header + * + * This function process the msdu delivered from REO2TCL + * ring with error type OOR + * + * Return: None + */ +static void +dp_rx_oor_handle(struct dp_soc *soc, + qdf_nbuf_t nbuf, + uint16_t peer_id, + uint8_t *rx_tlv_hdr) +{ + uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP | + FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP; + struct dp_peer *peer = NULL; + + peer = dp_peer_find_by_id(soc, peer_id); + if (!peer) { + dp_info_rl("peer not found"); + goto free_nbuf; + } + + if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, + rx_tlv_hdr)) { + DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1); + dp_peer_unref_del_find_by_id(peer); + return; + } + +free_nbuf: + if (peer) + dp_peer_unref_del_find_by_id(peer); + + DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1); + qdf_nbuf_free(nbuf); +} + +/** + * dp_rx_reo_err_entry_process() - Handles for REO error entry processing + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: pointer to mpdu level description info + * @link_desc_va: pointer to msdu_link_desc virtual address + * @err_code: reo erro code fetched from ring entry + * + * Function to handle msdus fetched from msdu link desc, currently + * only support 2K jump, OOR error. + * + * Return: msdu count processed. + */ +static uint32_t +dp_rx_reo_err_entry_process(struct dp_soc *soc, + void *ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + void *link_desc_va, + enum hal_reo_error_code err_code) +{ + uint32_t rx_bufs_used = 0; + struct dp_pdev *pdev; + int i; + uint8_t *rx_tlv_hdr_first; + uint8_t *rx_tlv_hdr_last; + uint32_t tid = DP_MAX_TIDS; + uint16_t peer_id; + struct dp_rx_desc *rx_desc; + qdf_nbuf_t nbuf; + struct hal_buf_info buf_info; + struct hal_rx_msdu_list msdu_list; + uint16_t num_msdus; + struct buffer_addr_info cur_link_desc_addr_info = { 0 }; + struct buffer_addr_info next_link_desc_addr_info = { 0 }; + /* First field in REO Dst ring Desc is buffer_addr_info */ + void *buf_addr_info = ring_desc; + qdf_nbuf_t head_nbuf = NULL; + qdf_nbuf_t tail_nbuf = NULL; + uint16_t msdu_processed = 0; + + peer_id = DP_PEER_METADATA_PEER_ID_GET( + mpdu_desc_info->peer_meta_data); + +more_msdu_link_desc: + hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, + &num_msdus); + for (i = 0; i < num_msdus; i++) { + rx_desc = dp_rx_cookie_2_va_rxdma_buf( + soc, + msdu_list.sw_cookie[i]); + + qdf_assert_always(rx_desc); + + /* all buffers from a MSDU link belong to same pdev */ + pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); + + nbuf = rx_desc->nbuf; + dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, false); + qdf_nbuf_unmap_single(soc->osdev, + nbuf, QDF_DMA_FROM_DEVICE); + + QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len; + rx_bufs_used++; + dp_rx_add_to_free_desc_list(&pdev->free_list_head, + &pdev->free_list_tail, rx_desc); + + DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf); + + if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags & + HAL_MSDU_F_MSDU_CONTINUATION)) + continue; + + rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf); + rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf); + + if (qdf_unlikely(head_nbuf != tail_nbuf)) { + nbuf = dp_rx_sg_create(soc, head_nbuf); + qdf_nbuf_set_is_frag(nbuf, 1); + DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1); + } + + switch (err_code) { + case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: + /* + * only first msdu, mpdu start description tlv valid? + * and use it for following msdu. + */ + if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, + rx_tlv_hdr_last)) + tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, + rx_tlv_hdr_first); + + dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last, + peer_id, tid); + break; + + case HAL_REO_ERR_REGULAR_FRAME_OOR: + dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last); + break; + default: + dp_err_rl("Non-support error code %d", err_code); + qdf_nbuf_free(nbuf); + } + + msdu_processed++; + head_nbuf = NULL; + tail_nbuf = NULL; + } + + if (msdu_processed < mpdu_desc_info->msdu_count) { + hal_rx_get_next_msdu_link_desc_buf_addr_info( + link_desc_va, + &next_link_desc_addr_info); + + if (hal_rx_is_buf_addr_info_valid( + &next_link_desc_addr_info)) { + dp_rx_link_desc_return_by_addr( + soc, + buf_addr_info, + HAL_BM_ACTION_PUT_IN_IDLE_LIST); + + hal_rx_buffer_addr_info_get_paddr( + &next_link_desc_addr_info, + &buf_info); + link_desc_va = + dp_rx_cookie_2_link_desc_va(soc, &buf_info); + cur_link_desc_addr_info = next_link_desc_addr_info; + buf_addr_info = &cur_link_desc_addr_info; + + goto more_msdu_link_desc; + } + } + + dp_rx_link_desc_return_by_addr(soc, buf_addr_info, + HAL_BM_ACTION_PUT_IN_IDLE_LIST); + if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count)) + DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1); + + return rx_bufs_used; +} + +#ifdef DP_INVALID_PEER_ASSERT +#define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \ + do { \ + qdf_assert_always(!(head)); \ + qdf_assert_always(!(tail)); \ + } while (0) +#else +#define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */ +#endif + +/** + * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu + * to pdev invalid peer list + * + * @soc: core DP main context + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @mac_id: mac id + * + * Return: bool: true for last msdu of mpdu + */ +static bool +dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, uint8_t mac_id) +{ + bool mpdu_done = false; + qdf_nbuf_t curr_nbuf = NULL; + qdf_nbuf_t tmp_nbuf = NULL; + + /* TODO: Currently only single radio is supported, hence + * pdev hard coded to '0' index + */ + struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + + /* if invalid peer SG list has max values free the buffers in list + * and treat current buffer as start of list + * + * current logic to detect the last buffer from attn_tlv is not reliable + * in OFDMA UL scenario hence add max buffers check to avoid list pile + * up + */ + if (!dp_pdev->first_nbuf || + (dp_pdev->invalid_peer_head_msdu && + QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST + (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) { + qdf_nbuf_set_rx_chfrag_start(nbuf, 1); + dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc, + rx_tlv_hdr); + dp_pdev->first_nbuf = true; + + /* If the new nbuf received is the first msdu of the + * amsdu and there are msdus in the invalid peer msdu + * list, then let us free all the msdus of the invalid + * peer msdu list. + * This scenario can happen when we start receiving + * new a-msdu even before the previous a-msdu is completely + * received. + */ + curr_nbuf = dp_pdev->invalid_peer_head_msdu; + while (curr_nbuf) { + tmp_nbuf = curr_nbuf->next; + qdf_nbuf_free(curr_nbuf); + curr_nbuf = tmp_nbuf; + } + + dp_pdev->invalid_peer_head_msdu = NULL; + dp_pdev->invalid_peer_tail_msdu = NULL; + hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr, + &(dp_pdev->ppdu_info.rx_status)); + + } + + if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && + hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { + qdf_nbuf_set_rx_chfrag_end(nbuf, 1); + qdf_assert_always(dp_pdev->first_nbuf == true); + dp_pdev->first_nbuf = false; + mpdu_done = true; + } + + /* + * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu + * should be NULL here, add the checking for debugging purpose + * in case some corner case. + */ + DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu, + dp_pdev->invalid_peer_tail_msdu); + DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, + dp_pdev->invalid_peer_tail_msdu, + nbuf); + + return mpdu_done; +} + +static +void dp_rx_err_handle_bar(struct dp_soc *soc, + struct dp_peer *peer, + qdf_nbuf_t nbuf) +{ + uint8_t *rx_tlv_hdr; + unsigned char type, subtype; + uint16_t start_seq_num; + uint32_t tid; + struct ieee80211_frame_bar *bar; + + /* + * 1. Is this a BAR frame. If not Discard it. + * 2. If it is, get the peer id, tid, ssn + * 2a Do a tid update + */ + + rx_tlv_hdr = qdf_nbuf_data(nbuf); + bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV); + + type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK; + subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; + + if (!(type == IEEE80211_FC0_TYPE_CTL && + subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) { + dp_err_rl("Not a BAR frame!"); + return; + } + + tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr); + qdf_assert_always(tid < DP_MAX_TIDS); + + start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; + + dp_info_rl("tid %u window_size %u start_seq_num %u", + tid, peer->rx_tid[tid].ba_win_size, start_seq_num); + + dp_rx_tid_update_wifi3(peer, tid, + peer->rx_tid[tid].ba_win_size, + start_seq_num); +} + +/** + * dp_rx_bar_frame_handle() - Function to handle err BAR frames + * @soc: core DP main context + * @ring_desc: Hal ring desc + * @rx_desc: dp rx desc + * @mpdu_desc_info: mpdu desc info + * + * Handle the error BAR frames received. Ensure the SOC level + * stats are updated based on the REO error code. The BAR frames + * are further processed by updating the Rx tids with the start + * sequence number (SSN) and BA window size. Desc is returned + * to the free desc list + * + * Return: none + */ +static void +dp_rx_bar_frame_handle(struct dp_soc *soc, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info) +{ + qdf_nbuf_t nbuf; + struct dp_pdev *pdev; + struct dp_peer *peer; + struct rx_desc_pool *rx_desc_pool; + uint16_t peer_id; + uint8_t *rx_tlv_hdr; + uint32_t tid; + uint8_t reo_err_code; + + nbuf = rx_desc->nbuf; + rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; + dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, + false); + qdf_nbuf_unmap_single(soc->osdev, nbuf, + QDF_DMA_FROM_DEVICE); + rx_desc->unmapped = 1; + rx_tlv_hdr = qdf_nbuf_data(nbuf); + peer_id = + hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, + rx_tlv_hdr); + peer = dp_peer_find_by_id(soc, peer_id); + tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, + rx_tlv_hdr); + pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id); + + if (!peer) + goto next; + + reo_err_code = HAL_RX_REO_ERROR_GET(ring_desc); + dp_info("BAR frame: peer = "QDF_MAC_ADDR_FMT + " peer_id = %d" + " tid = %u" + " SSN = %d" + " error code = %d", + QDF_MAC_ADDR_REF(peer->mac_addr.raw), + peer_id, + tid, + mpdu_desc_info->mpdu_seq, + reo_err_code); + + switch (reo_err_code) { + case HAL_REO_ERR_BAR_FRAME_2K_JUMP: + DP_STATS_INC(soc, + rx.err.reo_error[reo_err_code], 1); + case HAL_REO_ERR_BAR_FRAME_OOR: + dp_rx_err_handle_bar(soc, peer, nbuf); + DP_STATS_INC(soc, + rx.err.reo_error[reo_err_code], 1); + break; + default: + DP_STATS_INC(soc, rx.bar_frame, 1); + } + + dp_peer_unref_del_find_by_id(peer); +next: + dp_rx_link_desc_return(soc, ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST); + dp_rx_add_to_free_desc_list(&pdev->free_list_head, + &pdev->free_list_tail, + rx_desc); + qdf_nbuf_free(nbuf); +} + +/** + * dp_2k_jump_handle() - Function to handle 2k jump exception + * on WBM ring + * + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @peer_id: peer id of first msdu + * @tid: Tid for which exception occurred + * + * This function handles 2k jump violations arising out + * of receiving aggregates in non BA case. This typically + * may happen if aggregates are received on a QOS enabled TID + * while Rx window size is still initialized to value of 2. Or + * it may also happen if negotiated window size is 1 but peer + * sends aggregates. + * + */ + +void +dp_2k_jump_handle(struct dp_soc *soc, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, + uint16_t peer_id, + uint8_t tid) +{ + struct dp_peer *peer = NULL; + struct dp_rx_tid *rx_tid = NULL; + uint32_t frame_mask = FRAME_MASK_IPV4_ARP; + + peer = dp_peer_find_by_id(soc, peer_id); + if (!peer) { + dp_info_rl("peer not found"); + goto free_nbuf; + } + + if (tid >= DP_MAX_TIDS) { + dp_info_rl("invalid tid"); + goto nbuf_deliver; + } + + rx_tid = &peer->rx_tid[tid]; + qdf_spin_lock_bh(&rx_tid->tid_lock); + + /* only if BA session is active, allow send Delba */ + if (rx_tid->ba_status != DP_RX_BA_ACTIVE) { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + goto nbuf_deliver; + } + + if (!rx_tid->delba_tx_status) { + rx_tid->delba_tx_retry++; + rx_tid->delba_tx_status = 1; + rx_tid->delba_rcode = + IEEE80211_REASON_QOS_SETUP_REQUIRED; + qdf_spin_unlock_bh(&rx_tid->tid_lock); + if (soc->cdp_soc.ol_ops->send_delba) { + DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1); + soc->cdp_soc.ol_ops->send_delba( + peer->vdev->pdev->soc->ctrl_psoc, + peer->vdev->vdev_id, + peer->mac_addr.raw, + tid, + rx_tid->delba_rcode); + } + } else { + qdf_spin_unlock_bh(&rx_tid->tid_lock); + } + +nbuf_deliver: + if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask, + rx_tlv_hdr)) { + DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1); + dp_peer_unref_del_find_by_id(peer); + return; + } + +free_nbuf: + if (peer) + dp_peer_unref_del_find_by_id(peer); + + DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1); + qdf_nbuf_free(nbuf); +} + +#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCA6750) +/** + * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception + * @soc: pointer to dp_soc struct + * @pool_id: Pool id to find dp_pdev + * @rx_tlv_hdr: TLV header of received packet + * @nbuf: SKB + * + * In certain types of packets if peer_id is not correct then + * driver may not be able find. Try finding peer by addr_2 of + * received MPDU. If you find the peer then most likely sw_peer_id & + * ast_idx is corrupted. + * + * Return: True if you find the peer by addr_2 of received MPDU else false + */ +static bool +dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, + uint8_t pool_id, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf) +{ + struct dp_peer *peer = NULL; + uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); + struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; + + /* + * WAR- In certain types of packets if peer_id is not correct then + * driver may not be able find. Try finding peer by addr_2 of + * received MPDU + */ + if (wh) + peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, + wh->i_addr2); + if (peer) { + dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted"); + hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_DEBUG); + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id, + 1, qdf_nbuf_len(nbuf)); + qdf_nbuf_free(nbuf); + + return true; + } + return false; +} + +/** + * dp_rx_check_pkt_len() - Check for pktlen validity + * @soc: DP SOC context + * @pkt_len: computed length of the pkt from caller in bytes + * + * Return: true if pktlen > RX_BUFFER_SIZE, else return false + * + */ +static inline +bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) +{ + if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len, + 1, pkt_len); + return true; + } else { + return false; + } +} + +#else +static inline bool +dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, + uint8_t pool_id, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf) +{ + return false; +} + +static inline +bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) +{ + return false; +} + +#endif + +/** + * dp_rx_null_q_desc_handle() - Function to handle NULL Queue + * descriptor violation on either a + * REO or WBM ring + * + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @pool_id: mac id + * @peer: peer handle + * + * This function handles NULL queue descriptor violations arising out + * a missing REO queue for a given peer or a given TID. This typically + * may happen if a packet is received on a QOS enabled TID before the + * ADDBA negotiation for that TID, when the TID queue is setup. Or + * it may also happen for MC/BC frames if they are not routed to the + * non-QOS TID queue, in the absence of any other default TID queue. + * This error can show up both in a REO destination or WBM release ring. + * + * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code + * if nbuf could not be handled or dropped. + */ +static QDF_STATUS +dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, uint8_t pool_id, + struct dp_peer *peer) +{ + uint32_t pkt_len; + uint16_t msdu_len; + struct dp_vdev *vdev; + uint8_t tid; + qdf_ether_header_t *eh; + struct hal_rx_msdu_metadata msdu_metadata; + uint16_t sa_idx = 0; + + qdf_nbuf_set_rx_chfrag_start(nbuf, + hal_rx_msdu_end_first_msdu_get(soc->hal_soc, + rx_tlv_hdr)); + qdf_nbuf_set_rx_chfrag_end(nbuf, + hal_rx_msdu_end_last_msdu_get(soc->hal_soc, + rx_tlv_hdr)); + qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, + rx_tlv_hdr)); + qdf_nbuf_set_da_valid(nbuf, + hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, + rx_tlv_hdr)); + qdf_nbuf_set_sa_valid(nbuf, + hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, + rx_tlv_hdr)); + + hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); + msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); + pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN; + + if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) { + if (dp_rx_check_pkt_len(soc, pkt_len)) + goto drop_nbuf; + + /* Set length in nbuf */ + qdf_nbuf_set_pktlen( + nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE)); + qdf_assert_always(nbuf->data == rx_tlv_hdr); + } + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { + + dp_err_rl("MSDU DONE failure"); + hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_INFO); + qdf_assert(0); + } + + if (!peer && + dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id, + rx_tlv_hdr, nbuf)) + return QDF_STATUS_E_FAILURE; + + if (!peer) { + bool mpdu_done = false; + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id); + + dp_err_rl("peer is NULL"); + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, + qdf_nbuf_len(nbuf)); + + mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); + /* Trigger invalid peer handler wrapper */ + dp_rx_process_invalid_peer_wrapper(soc, + pdev->invalid_peer_head_msdu, + mpdu_done, pool_id); + + if (mpdu_done) { + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + } + + return QDF_STATUS_E_FAILURE; + } + + vdev = peer->vdev; + if (!vdev) { + dp_err_rl("Null vdev!"); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + goto drop_nbuf; + } + + /* + * Advance the packet start pointer by total size of + * pre-header TLV's + */ + if (qdf_nbuf_is_frag(nbuf)) + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); + else + qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad + + RX_PKT_TLVS_LEN)); + + dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1); + + if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) { + sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr); + + if ((sa_idx < 0) || + (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) { + DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); + goto drop_nbuf; + } + } + + if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { + /* this is a looped back MCBC pkt, drop it */ + DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); + goto drop_nbuf; + } + + /* + * In qwrap mode if the received packet matches with any of the vdev + * mac addresses, drop it. Donot receive multicast packets originated + * from any proxysta. + */ + if (check_qwrap_multicast_loopback(vdev, nbuf)) { + DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); + goto drop_nbuf; + } + + + if (qdf_unlikely((peer->nawds_enabled == true) && + hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, + rx_tlv_hdr))) { + dp_err_rl("free buffer for multicast packet"); + DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); + goto drop_nbuf; + } + + if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { + dp_err_rl("mcast Policy Check Drop pkt"); + goto drop_nbuf; + } + /* WDS Source Port Learning */ + if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && + vdev->wds_enabled)) + dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf, + msdu_metadata); + + if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { + tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); + if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) + dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); + /* IEEE80211_SEQ_MAX indicates invalid start_seq */ + } + + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + + if (peer && !peer->authorize) { + bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || + qdf_nbuf_is_ipv4_wapi_pkt(nbuf); + + bool is_not_match = qdf_mem_cmp(eh->ether_dhost, + &vdev->mac_addr.raw[0], + QDF_MAC_ADDR_SIZE); + + if (!is_eapol) + goto drop_nbuf; + else if(is_not_match) + goto drop_nbuf; + } + + if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { + qdf_nbuf_set_next(nbuf, NULL); + dp_rx_deliver_raw(vdev, nbuf, peer); + } else { + qdf_nbuf_set_next(nbuf, NULL); + DP_STATS_INC_PKT(peer, rx.to_stack, 1, + qdf_nbuf_len(nbuf)); + + /* + * Update the protocol tag in SKB based on + * CCE metadata + */ + dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, + EXCEPTION_DEST_RING_ID, + true, true); + + /* Update the flow tag in SKB based on FSE metadata */ + dp_rx_update_flow_tag(soc, vdev, nbuf, + rx_tlv_hdr, true); + + if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get( + soc->hal_soc, rx_tlv_hdr) && + (vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet))) { + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + DP_STATS_INC_PKT(peer, rx.multicast, 1, + qdf_nbuf_len(nbuf)); + + if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) + DP_STATS_INC_PKT(peer, rx.bcast, 1, + qdf_nbuf_len(nbuf)); + } + + qdf_nbuf_set_exc_frame(nbuf, 1); + dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); + } + return QDF_STATUS_SUCCESS; + +drop_nbuf: + qdf_nbuf_free(nbuf); + return QDF_STATUS_E_FAILURE; +} + +/** + * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err + * frames to OS or wifi parse errors. + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @peer: peer reference + * @err_code: rxdma err code + * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and + * pool_id has same mapping) + * + * Return: None + */ +void +dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer, + uint8_t err_code, uint8_t mac_id) +{ + uint32_t pkt_len, l2_hdr_offset; + uint16_t msdu_len; + struct dp_vdev *vdev; + qdf_ether_header_t *eh; + bool is_broadcast; + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { + + dp_err_rl("MSDU DONE failure"); + + hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_INFO); + qdf_assert(0); + } + + l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, + rx_tlv_hdr); + msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); + pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; + + if (dp_rx_check_pkt_len(soc, pkt_len)) { + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + return; + } + /* Set length in nbuf */ + qdf_nbuf_set_pktlen(nbuf, pkt_len); + + qdf_nbuf_set_next(nbuf, NULL); + + qdf_nbuf_set_rx_chfrag_start(nbuf, 1); + qdf_nbuf_set_rx_chfrag_end(nbuf, 1); + + if (!peer) { + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL"); + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, + qdf_nbuf_len(nbuf)); + /* Trigger invalid peer handler wrapper */ + dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id); + return; + } + + vdev = peer->vdev; + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("INVALID vdev %pK OR osif_rx"), vdev); + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + return; + } + + /* + * Advance the packet start pointer by total size of + * pre-header TLV's + */ + dp_rx_skip_tlvs(nbuf, l2_hdr_offset); + + if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) { + uint8_t *pkt_type; + + pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE); + if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) { + if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) == + htons(QDF_LLC_STP)) { + DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1); + goto process_mesh; + } else { + goto process_rx; + } + } + } + if (vdev->rx_decap_type == htt_cmn_pkt_type_raw) + goto process_mesh; + + /* + * WAPI cert AP sends rekey frames as unencrypted. + * Thus RXDMA will report unencrypted frame error. + * To pass WAPI cert case, SW needs to pass unencrypted + * rekey frame to stack. + */ + if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) { + goto process_rx; + } + /* + * In dynamic WEP case rekey frames are not encrypted + * similar to WAPI. Allow EAPOL when 8021+wep is enabled and + * key install is already done + */ + if ((vdev->sec_type == cdp_sec_type_wep104) && + (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))) + goto process_rx; + +process_mesh: + + if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + return; + } + + if (vdev->mesh_vdev) { + if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) + == QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, + FL("mesh pkt filtered")); + DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); + + qdf_nbuf_free(nbuf); + return; + } + dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); + } +process_rx: + if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, + rx_tlv_hdr) && + (vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet))) { + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + is_broadcast = (QDF_IS_ADDR_BROADCAST + (eh->ether_dhost)) ? 1 : 0 ; + DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); + if (is_broadcast) { + DP_STATS_INC_PKT(peer, rx.bcast, 1, + qdf_nbuf_len(nbuf)); + } + } + + if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { + dp_rx_deliver_raw(vdev, nbuf, peer); + } else { + /* Update the protocol tag in SKB based on CCE metadata */ + dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, + EXCEPTION_DEST_RING_ID, true, true); + /* Update the flow tag in SKB based on FSE metadata */ + dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); + DP_STATS_INC(peer, rx.to_stack.num, 1); + qdf_nbuf_set_exc_frame(nbuf, 1); + dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); + } + + return; +} + +/** + * dp_rx_process_mic_error(): Function to pass mic error indication to umac + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @peer: peer handle + * + * return: void + */ +void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer) +{ + struct dp_vdev *vdev = NULL; + struct dp_pdev *pdev = NULL; + struct ol_if_ops *tops = NULL; + uint16_t rx_seq, fragno; + uint8_t is_raw; + unsigned int tid; + QDF_STATUS status; + struct cdp_rx_mic_err_info mic_failure_info; + + if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc, + rx_tlv_hdr)) + return; + + if (!peer) { + dp_info_rl("peer not found"); + goto fail; + } + + vdev = peer->vdev; + if (!vdev) { + dp_info_rl("VDEV not found"); + goto fail; + } + + pdev = vdev->pdev; + if (!pdev) { + dp_info_rl("PDEV not found"); + goto fail; + } + + is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf)); + if (is_raw) { + fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf)); + /* Can get only last fragment */ + if (fragno) { + tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, + qdf_nbuf_data(nbuf)); + rx_seq = hal_rx_get_rx_sequence(soc->hal_soc, + qdf_nbuf_data(nbuf)); + + status = dp_rx_defrag_add_last_frag(soc, peer, + tid, rx_seq, nbuf); + dp_info_rl("Frag pkt seq# %d frag# %d consumed " + "status %d !", rx_seq, fragno, status); + return; + } + } + + if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf), + &mic_failure_info.da_mac_addr.bytes[0])) { + dp_err_rl("Failed to get da_mac_addr"); + goto fail; + } + + if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf), + &mic_failure_info.ta_mac_addr.bytes[0])) { + dp_err_rl("Failed to get ta_mac_addr"); + goto fail; + } + + mic_failure_info.key_id = 0; + mic_failure_info.multicast = + IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes); + qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); + mic_failure_info.frame_type = cdp_rx_frame_type_802_11; + mic_failure_info.data = NULL; + mic_failure_info.vdev_id = vdev->vdev_id; + + tops = pdev->soc->cdp_soc.ol_ops; + if (tops->rx_mic_error) + tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id, + &mic_failure_info); + +fail: + qdf_nbuf_free(nbuf); + return; +} + +#ifdef DP_RX_DESC_COOKIE_INVALIDATE +/** + * dp_rx_link_cookie_check() - Validate link desc cookie + * @ring_desc: ring descriptor + * + * Return: qdf status + */ +static inline QDF_STATUS +dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) +{ + if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc))) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie + * @ring_desc: ring descriptor + * + * Return: None + */ +static inline void +dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) +{ + HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc); +} +#else +static inline QDF_STATUS +dp_rx_link_cookie_check(hal_ring_desc_t ring_desc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void +dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc) +{ +} +#endif + +#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY +/** + * dp_rx_err_ring_record_entry() - Record rx err ring history + * @soc: Datapath soc structure + * @paddr: paddr of the buffer in RX err ring + * @sw_cookie: SW cookie of the buffer in RX err ring + * @rbm: Return buffer manager of the buffer in RX err ring + * + * Returns: None + */ +static inline void +dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, + uint32_t sw_cookie, uint8_t rbm) +{ + struct dp_buf_info_record *record; + uint32_t idx; + + if (qdf_unlikely(!soc->rx_err_ring_history)) + return; + + idx = dp_history_get_next_index(&soc->rx_err_ring_history->index, + DP_RX_ERR_HIST_MAX); + + /* No NULL check needed for record since its an array */ + record = &soc->rx_err_ring_history->entry[idx]; + + record->timestamp = qdf_get_log_timestamp(); + record->hbi.paddr = paddr; + record->hbi.sw_cookie = sw_cookie; + record->hbi.rbm = rbm; +} +#else +static inline void +dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr, + uint32_t sw_cookie, uint8_t rbm) +{ +} +#endif + +uint32_t +dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, uint32_t quota) +{ + hal_ring_desc_t ring_desc; + hal_soc_handle_t hal_soc; + uint32_t count = 0; + uint32_t rx_bufs_used = 0; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; + uint8_t mac_id = 0; + uint8_t buf_type; + uint8_t error, rbm; + struct hal_rx_mpdu_desc_info mpdu_desc_info; + struct hal_buf_info hbi; + struct dp_pdev *dp_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + uint32_t cookie = 0; + void *link_desc_va; + struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ + uint16_t num_msdus; + struct dp_rx_desc *rx_desc = NULL; + QDF_STATUS status; + bool ret; + + /* Debug -- Remove later */ + qdf_assert(soc && hal_ring_hdl); + + hal_soc = soc->hal_soc; + + /* Debug -- Remove later */ + qdf_assert(hal_soc); + + if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); + goto done; + } + + while (qdf_likely(quota-- && (ring_desc = + hal_srng_dst_peek(hal_soc, + hal_ring_hdl)))) { + + DP_STATS_INC(soc, rx.err_ring_pkts, 1); + + error = HAL_RX_ERROR_STATUS_GET(ring_desc); + + buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); + + /* Get the MPDU DESC info */ + hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); + + if (mpdu_desc_info.msdu_count == 0) + goto next_entry; + + /* + * For REO error ring, expect only MSDU LINK DESC + */ + qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); + + cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); + /* + * check for the magic number in the sw cookie + */ + qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & + LINK_DESC_ID_START); + + status = dp_rx_link_cookie_check(ring_desc); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1); + break; + } + + /* + * Check if the buffer is to be processed on this processor + */ + rbm = hal_rx_ret_buf_manager_get(ring_desc); + + hal_rx_reo_buf_paddr_get(ring_desc, &hbi); + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); + hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, + &num_msdus); + dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0], + msdu_list.sw_cookie[0], + msdu_list.rbm[0]); + if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && + (msdu_list.rbm[0] != + HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) && + (msdu_list.rbm[0] != DP_DEFRAG_RBM))) { + /* TODO */ + /* Call appropriate handler */ + if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { + DP_STATS_INC(soc, rx.err.invalid_rbm, 1); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Invalid RBM %d"), + msdu_list.rbm[0]); + } + + /* Return link descriptor through WBM ring (SW2WBM)*/ + dp_rx_link_desc_return(soc, ring_desc, + HAL_BM_ACTION_RELEASE_MSDU_LIST); + goto next_entry; + } + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, + msdu_list.sw_cookie[0]); + qdf_assert_always(rx_desc); + + mac_id = rx_desc->pool_id; + + if (mpdu_desc_info.bar_frame) { + qdf_assert_always(mpdu_desc_info.msdu_count == 1); + + dp_rx_bar_frame_handle(soc, + ring_desc, + rx_desc, + &mpdu_desc_info); + + rx_bufs_reaped[mac_id] += 1; + goto next_entry; + } + + dp_info("Got pkt with REO ERROR: %d", error); + + if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { + /* + * We only handle one msdu per link desc for fragmented + * case. We drop the msdus and release the link desc + * back if there are more than one msdu in link desc. + */ + if (qdf_unlikely(num_msdus > 1)) { + count = dp_rx_msdus_drop(soc, ring_desc, + &mpdu_desc_info, + &mac_id, quota); + rx_bufs_reaped[mac_id] += count; + goto next_entry; + } + + /* + * this is a unlikely scenario where the host is reaping + * a descriptor which it already reaped just a while ago + * but is yet to replenish it back to HW. + * In this case host will dump the last 128 descriptors + * including the software descriptor rx_desc and assert. + */ + + if (qdf_unlikely(!rx_desc->in_use)) { + DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); + dp_info_rl("Reaping rx_desc not in use!"); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + /* ignore duplicate RX desc and continue */ + /* Pop out the descriptor */ + goto next_entry; + } + + ret = dp_rx_desc_paddr_sanity_check(rx_desc, + msdu_list.paddr[0]); + if (!ret) { + DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); + rx_desc->in_err_state = 1; + goto next_entry; + } + + count = dp_rx_frag_handle(soc, + ring_desc, &mpdu_desc_info, + rx_desc, &mac_id, quota); + + rx_bufs_reaped[mac_id] += count; + DP_STATS_INC(soc, rx.rx_frags, 1); + goto next_entry; + } + + /* + * Expect REO errors to be handled after this point + */ + qdf_assert_always(error == HAL_REO_ERROR_DETECTED); + + if (hal_rx_reo_is_pn_error(ring_desc)) { + /* TOD0 */ + DP_STATS_INC(soc, + rx.err. + reo_error[HAL_REO_ERR_PN_CHECK_FAILED], + 1); + /* increment @pdev level */ + dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + if (dp_pdev) + DP_STATS_INC(dp_pdev, err.reo_error, 1); + count = dp_rx_pn_error_handle(soc, + ring_desc, + &mpdu_desc_info, &mac_id, + quota); + + rx_bufs_reaped[mac_id] += count; + goto next_entry; + } + + if (hal_rx_reo_is_2k_jump(ring_desc)) { + /* TOD0 */ + DP_STATS_INC(soc, + rx.err. + reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], + 1); + /* increment @pdev level */ + dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + if (dp_pdev) + DP_STATS_INC(dp_pdev, err.reo_error, 1); + + count = dp_rx_reo_err_entry_process( + soc, + ring_desc, + &mpdu_desc_info, + link_desc_va, + HAL_REO_ERR_REGULAR_FRAME_2K_JUMP); + + rx_bufs_reaped[mac_id] += count; + goto next_entry; + } + + if (hal_rx_reo_is_oor_error(ring_desc)) { + DP_STATS_INC( + soc, + rx.err. + reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR], + 1); + /* increment @pdev level */ + dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + if (dp_pdev) + DP_STATS_INC(dp_pdev, err.reo_error, 1); + count = dp_rx_reo_err_entry_process( + soc, + ring_desc, + &mpdu_desc_info, + link_desc_va, + HAL_REO_ERR_REGULAR_FRAME_OOR); + + rx_bufs_reaped[mac_id] += count; + goto next_entry; + } + /* Assert if unexpected error type */ + qdf_assert_always(0); +next_entry: + dp_rx_link_cookie_invalidate(ring_desc); + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + } + +done: + dp_srng_access_end(int_ctx, soc, hal_ring_hdl); + + if (soc->rx.flags.defrag_timeout_check) { + uint32_t now_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks()); + + if (now_ms >= soc->rx.defrag.next_flush_ms) + dp_rx_defrag_waitlist_flush(soc); + } + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + if (rx_bufs_reaped[mac_id]) { + dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, + rx_bufs_reaped[mac_id], + &dp_pdev->free_list_head, + &dp_pdev->free_list_tail); + rx_bufs_used += rx_bufs_reaped[mac_id]; + } + } + + return rx_bufs_used; /* Assume no scale factor for now */ +} + +#ifdef DROP_RXDMA_DECRYPT_ERR +/** + * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled + * + * Return: true if rxdma decrypt err frames are handled and false otheriwse + */ +static inline bool dp_handle_rxdma_decrypt_err(void) +{ + return false; +} +#else +static inline bool dp_handle_rxdma_decrypt_err(void) +{ + return true; +} +#endif + +uint32_t +dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, uint32_t quota) +{ + hal_ring_desc_t ring_desc; + hal_soc_handle_t hal_soc; + struct dp_rx_desc *rx_desc; + union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; + union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; + uint32_t rx_bufs_used = 0; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; + uint8_t buf_type, rbm; + uint32_t rx_buf_cookie; + uint8_t mac_id; + struct dp_pdev *dp_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + uint8_t *rx_tlv_hdr; + qdf_nbuf_t nbuf_head = NULL; + qdf_nbuf_t nbuf_tail = NULL; + qdf_nbuf_t nbuf, next; + struct hal_wbm_err_desc_info wbm_err_info = { 0 }; + uint8_t pool_id; + uint8_t tid = 0; + + /* Debug -- Remove later */ + qdf_assert(soc && hal_ring_hdl); + + hal_soc = soc->hal_soc; + + /* Debug -- Remove later */ + qdf_assert(hal_soc); + + if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); + goto done; + } + + while (qdf_likely(quota-- && (ring_desc = + hal_srng_dst_get_next(hal_soc, + hal_ring_hdl)))) { + + /* XXX */ + buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); + + /* + * For WBM ring, expect only MSDU buffers + */ + qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); + + qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) + == HAL_RX_WBM_ERR_SRC_RXDMA) || + (HAL_RX_WBM_ERR_SRC_GET(ring_desc) + == HAL_RX_WBM_ERR_SRC_REO)); + + /* + * Check if the buffer is to be processed on this processor + */ + rbm = hal_rx_ret_buf_manager_get(ring_desc); + + if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { + /* TODO */ + /* Call appropriate handler */ + DP_STATS_INC(soc, rx.err.invalid_rbm, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid RBM %d"), rbm); + continue; + } + + rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + qdf_assert_always(rx_desc); + + if (!dp_rx_desc_check_magic(rx_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid rx_desc cookie=%d"), + rx_buf_cookie); + continue; + } + + /* + * this is a unlikely scenario where the host is reaping + * a descriptor which it already reaped just a while ago + * but is yet to replenish it back to HW. + * In this case host will dump the last 128 descriptors + * including the software descriptor rx_desc and assert. + */ + if (qdf_unlikely(!rx_desc->in_use)) { + DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + } + + nbuf = rx_desc->nbuf; + dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, false); + qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE); + + /* + * save the wbm desc info in nbuf TLV. We will need this + * info when we do the actual nbuf processing + */ + hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); + wbm_err_info.pool_id = rx_desc->pool_id; + hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), + &wbm_err_info); + + rx_bufs_reaped[rx_desc->pool_id]++; + + DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); + dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + } +done: + dp_srng_access_end(int_ctx, soc, hal_ring_hdl); + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + if (rx_bufs_reaped[mac_id]) { + dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_reaped[mac_id], + &head[mac_id], &tail[mac_id]); + rx_bufs_used += rx_bufs_reaped[mac_id]; + } + } + + nbuf = nbuf_head; + while (nbuf) { + struct dp_peer *peer; + uint16_t peer_id; + uint8_t err_code; + uint8_t *tlv_hdr; + rx_tlv_hdr = qdf_nbuf_data(nbuf); + + /* + * retrieve the wbm desc info from nbuf TLV, so we can + * handle error cases appropriately + */ + hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); + + peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, + rx_tlv_hdr); + peer = dp_peer_find_by_id(soc, peer_id); + + if (!peer) + dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u", + peer_id, wbm_err_info.wbm_err_src, + wbm_err_info.reo_psh_rsn); + + /* Set queue_mapping in nbuf to 0 */ + dp_set_rx_queue(nbuf, 0); + + next = nbuf->next; + + if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { + if (wbm_err_info.reo_psh_rsn + == HAL_RX_WBM_REO_PSH_RSN_ERROR) { + + DP_STATS_INC(soc, + rx.err.reo_error + [wbm_err_info.reo_err_code], 1); + /* increment @pdev level */ + pool_id = wbm_err_info.pool_id; + dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); + if (dp_pdev) + DP_STATS_INC(dp_pdev, err.reo_error, + 1); + + switch (wbm_err_info.reo_err_code) { + /* + * Handling for packets which have NULL REO + * queue descriptor + */ + case HAL_REO_ERR_QUEUE_DESC_ADDR_0: + pool_id = wbm_err_info.pool_id; + dp_rx_null_q_desc_handle(soc, nbuf, + rx_tlv_hdr, + pool_id, peer); + nbuf = next; + if (peer) + dp_peer_unref_del_find_by_id( + peer); + continue; + /* TODO */ + /* Add per error code accounting */ + case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: + pool_id = wbm_err_info.pool_id; + + if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, + rx_tlv_hdr)) { + peer_id = + hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc, + rx_tlv_hdr); + tid = + hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr); + } + QDF_NBUF_CB_RX_PKT_LEN(nbuf) = + hal_rx_msdu_start_msdu_len_get( + rx_tlv_hdr); + nbuf->next = NULL; + dp_2k_jump_handle(soc, nbuf, + rx_tlv_hdr, + peer_id, tid); + nbuf = next; + if (peer) + dp_peer_unref_del_find_by_id( + peer); + continue; + case HAL_REO_ERR_BAR_FRAME_2K_JUMP: + case HAL_REO_ERR_BAR_FRAME_OOR: + if (peer) + dp_rx_err_handle_bar(soc, + peer, + nbuf); + qdf_nbuf_free(nbuf); + break; + + default: + dp_info_rl("Got pkt with REO ERROR: %d", + wbm_err_info.reo_err_code); + break; + } + } + } else if (wbm_err_info.wbm_err_src == + HAL_RX_WBM_ERR_SRC_RXDMA) { + if (wbm_err_info.rxdma_psh_rsn + == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { + DP_STATS_INC(soc, + rx.err.rxdma_error + [wbm_err_info.rxdma_err_code], 1); + /* increment @pdev level */ + pool_id = wbm_err_info.pool_id; + dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); + if (dp_pdev) + DP_STATS_INC(dp_pdev, + err.rxdma_error, 1); + + switch (wbm_err_info.rxdma_err_code) { + case HAL_RXDMA_ERR_UNENCRYPTED: + + case HAL_RXDMA_ERR_WIFI_PARSE: + pool_id = wbm_err_info.pool_id; + dp_rx_process_rxdma_err(soc, nbuf, + rx_tlv_hdr, + peer, + wbm_err_info. + rxdma_err_code, + pool_id); + nbuf = next; + if (peer) + dp_peer_unref_del_find_by_id(peer); + continue; + + case HAL_RXDMA_ERR_TKIP_MIC: + dp_rx_process_mic_error(soc, nbuf, + rx_tlv_hdr, + peer); + nbuf = next; + if (peer) { + DP_STATS_INC(peer, rx.err.mic_err, 1); + dp_peer_unref_del_find_by_id( + peer); + } + continue; + + case HAL_RXDMA_ERR_DECRYPT: + if (!dp_handle_rxdma_decrypt_err()) { + if (peer) + DP_STATS_INC(peer, + rx.err.decrypt_err, 1); + break; + } + + pool_id = wbm_err_info.pool_id; + err_code = wbm_err_info.rxdma_err_code; + tlv_hdr = rx_tlv_hdr; + dp_rx_process_rxdma_err(soc, nbuf, + tlv_hdr, peer, + err_code, + pool_id); + nbuf = next; + if (peer) { + DP_STATS_INC(peer, rx.err. + decrypt_err, 1); + dp_peer_unref_del_find_by_id( + peer); + } + continue; + + default: + dp_err_rl("RXDMA error %d", + wbm_err_info.rxdma_err_code); + } + } + } else { + /* Should not come here */ + qdf_assert(0); + } + + if (peer) + dp_peer_unref_del_find_by_id(peer); + + hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_DEBUG); + qdf_nbuf_free(nbuf); + nbuf = next; + } + return rx_bufs_used; /* Assume no scale factor for now */ +} + +/** + * dup_desc_dbg() - dump and assert if duplicate rx desc found + * + * @soc: core DP main context + * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info + * @rx_desc: void pointer to rx descriptor + * + * Return: void + */ +static void dup_desc_dbg(struct dp_soc *soc, + hal_rxdma_desc_t rxdma_dst_ring_desc, + void *rx_desc) +{ + DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1); + dp_rx_dump_info_and_assert( + soc, + soc->rx_rel_ring.hal_srng, + hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc), + rx_desc); +} + +/** + * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs + * + * @soc: core DP main context + * @mac_id: mac id which is one of 3 mac_ids + * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info + * @head: head of descs list to be freed + * @tail: tail of decs list to be freed + + * Return: number of msdu in MPDU to be popped + */ +static inline uint32_t +dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, + hal_rxdma_desc_t rxdma_dst_ring_desc, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail) +{ + void *rx_msdu_link_desc; + qdf_nbuf_t msdu; + qdf_nbuf_t last; + struct hal_rx_msdu_list msdu_list; + uint16_t num_msdus; + struct hal_buf_info buf_info; + uint32_t rx_bufs_used = 0; + uint32_t msdu_cnt; + uint32_t i; + uint8_t push_reason; + uint8_t rxdma_error_code = 0; + uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; + hal_rxdma_desc_t ring_desc; + + msdu = 0; + + last = NULL; + + hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, + &msdu_cnt); + + push_reason = + hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); + if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { + rxdma_error_code = + hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); + } + + do { + rx_msdu_link_desc = + dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + qdf_assert_always(rx_msdu_link_desc); + + hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, + &msdu_list, &num_msdus); + + if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { + /* if the msdus belongs to NSS offloaded radio && + * the rbm is not SW1_BM then return the msdu_link + * descriptor without freeing the msdus (nbufs). let + * these buffers be given to NSS completion ring for + * NSS to free them. + * else iterate through the msdu link desc list and + * free each msdu in the list. + */ + if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && + wlan_cfg_get_dp_pdev_nss_enabled( + pdev->wlan_cfg_ctx)) + bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; + else { + for (i = 0; i < num_msdus; i++) { + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_rxdma_buf(soc, + msdu_list.sw_cookie[i]); + qdf_assert_always(rx_desc); + msdu = rx_desc->nbuf; + /* + * this is a unlikely scenario + * where the host is reaping + * a descriptor which + * it already reaped just a while ago + * but is yet to replenish + * it back to HW. + * In this case host will dump + * the last 128 descriptors + * including the software descriptor + * rx_desc and assert. + */ + ring_desc = rxdma_dst_ring_desc; + if (qdf_unlikely(!rx_desc->in_use)) { + dup_desc_dbg(soc, + ring_desc, + rx_desc); + continue; + } + + dp_ipa_handle_rx_buf_smmu_mapping(soc, + msdu, + false); + qdf_nbuf_unmap_single(soc->osdev, msdu, + QDF_DMA_FROM_DEVICE); + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] msdu_nbuf=%pK ", + __func__, __LINE__, msdu); + + qdf_nbuf_free(msdu); + rx_bufs_used++; + dp_rx_add_to_free_desc_list(head, + tail, rx_desc); + } + } + } else { + rxdma_error_code = HAL_RXDMA_ERR_WAR; + } + + /* + * Store the current link buffer into to the local structure + * to be used for release purpose. + */ + hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, + buf_info.sw_cookie, buf_info.rbm); + + hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); + dp_rx_link_desc_return_by_addr(soc, + (hal_buff_addrinfo_t) + rx_link_buf_info, + bm_action); + } while (buf_info.paddr); + + DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); + if (pdev) + DP_STATS_INC(pdev, err.rxdma_error, 1); + + if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Packet received with Decrypt error"); + } + + return rx_bufs_used; +} + +uint32_t +dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + uint32_t mac_id, uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + hal_rxdma_desc_t rxdma_dst_ring_desc; + hal_soc_handle_t hal_soc; + void *err_dst_srng; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + uint32_t work_done = 0; + uint32_t rx_bufs_used = 0; + + if (!pdev) + return 0; + + err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng; + + if (!err_dst_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring Init \ + Failed -- %pK", + __func__, __LINE__, err_dst_srng); + return 0; + } + + hal_soc = soc->hal_soc; + + qdf_assert(hal_soc); + + if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring Init \ + Failed -- %pK", + __func__, __LINE__, err_dst_srng); + return 0; + } + + while (qdf_likely(quota-- && (rxdma_dst_ring_desc = + hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { + + rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, + rxdma_dst_ring_desc, + &head, &tail); + } + + dp_srng_access_end(int_ctx, soc, err_dst_srng); + + if (rx_bufs_used) { + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; + else + dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id]; + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_used, &head, &tail); + + work_done += rx_bufs_used; + } + + return work_done; +} + +static inline uint32_t +dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, + hal_rxdma_desc_t rxdma_dst_ring_desc, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail) +{ + void *rx_msdu_link_desc; + qdf_nbuf_t msdu; + qdf_nbuf_t last; + struct hal_rx_msdu_list msdu_list; + uint16_t num_msdus; + struct hal_buf_info buf_info; + uint32_t rx_bufs_used = 0, msdu_cnt, i; + uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; + + msdu = 0; + + last = NULL; + + hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, + &msdu_cnt); + + do { + rx_msdu_link_desc = + dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + if (!rx_msdu_link_desc) { + DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1); + break; + } + + hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, + &msdu_list, &num_msdus); + + if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { + for (i = 0; i < num_msdus; i++) { + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_rxdma_buf( + soc, + msdu_list.sw_cookie[i]); + qdf_assert_always(rx_desc); + msdu = rx_desc->nbuf; + + qdf_nbuf_unmap_single(soc->osdev, msdu, + QDF_DMA_FROM_DEVICE); + + qdf_nbuf_free(msdu); + rx_bufs_used++; + dp_rx_add_to_free_desc_list(head, + tail, rx_desc); + } + } + + /* + * Store the current link buffer into to the local structure + * to be used for release purpose. + */ + hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, + buf_info.sw_cookie, buf_info.rbm); + + hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); + dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t) + rx_link_buf_info, + HAL_BM_ACTION_PUT_IN_IDLE_LIST); + } while (buf_info.paddr); + + return rx_bufs_used; +} + +/* + * + * dp_handle_wbm_internal_error() - handles wbm_internal_error case + * + * @soc: core DP main context + * @hal_desc: hal descriptor + * @buf_type: indicates if the buffer is of type link disc or msdu + * Return: None + * + * wbm_internal_error is seen in following scenarios : + * + * 1. Null pointers detected in WBM_RELEASE_RING descriptors + * 2. Null pointers detected during delinking process + * + * Some null pointer cases: + * + * a. MSDU buffer pointer is NULL + * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag + * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL + */ +void +dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, + uint32_t buf_type) +{ + struct hal_buf_info buf_info = {0}; + struct dp_pdev *dp_pdev; + struct dp_rx_desc *rx_desc = NULL; + uint32_t rx_buf_cookie; + uint32_t rx_bufs_reaped = 0; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + uint8_t pool_id; + + hal_rx_reo_buf_paddr_get(hal_desc, &buf_info); + + if (!buf_info.paddr) { + DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1); + return; + } + + rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc); + pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie); + + if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) { + DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1); + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + + if (rx_desc && rx_desc->nbuf) { + qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, + QDF_DMA_FROM_DEVICE); + + rx_desc->unmapped = 1; + + qdf_nbuf_free(rx_desc->nbuf); + dp_rx_add_to_free_desc_list(&head, + &tail, + rx_desc); + + rx_bufs_reaped++; + } + } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) { + rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id, + hal_desc, + &head, &tail); + } + + if (rx_bufs_reaped) { + struct rx_desc_pool *rx_desc_pool; + struct dp_srng *dp_rxdma_srng; + + DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1); + dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); + dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; + rx_desc_pool = &soc->rx_desc_buf[pool_id]; + + dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng, + rx_desc_pool, + rx_bufs_reaped, + &head, &tail); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon.h new file mode 100644 index 0000000000000000000000000000000000000000..80eda34f67425be62faf80f6817ae52a6fc6e430 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RX_MON_H_ +#define _DP_RX_MON_H_ + +/** +* dp_rx_mon_dest_process() - Brain of the Rx processing functionality +* Called from the bottom half (tasklet/NET_RX_SOFTIRQ) +* @soc: core txrx main context 164 +* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced +* @quota: No. of units (packets) that can be serviced in one shot. +* +* This function implements the core of Rx functionality. This is +* expected to handle only non-error frames. +* +* Return: uint32_t: No. of elements processed +*/ +void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota); + +QDF_STATUS dp_rx_pdev_mon_attach(struct dp_pdev *pdev); +QDF_STATUS dp_rx_pdev_mon_detach(struct dp_pdev *pdev); +QDF_STATUS dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int mac_id); +QDF_STATUS dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id); + +/** + * dp_reset_monitor_mode() - Disable monitor mode + * @pdev_handle: Datapath PDEV handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + uint8_t smart_monitor); + +/** + * dp_mon_link_free() - free monitor link desc pool + * @pdev: core txrx pdev context + * + * This function will release DP link desc pool for monitor mode from + * main device context. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_mon_link_free(struct dp_pdev *pdev); + + +uint32_t dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota); +QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id, + qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu); +/* + * dp_rx_mon_deliver_non_std() - deliver frames for non standard path + * @soc: core txrx main contex + * @mac_id: MAC ID + * + * This function delivers the radio tap and dummy MSDU + * into user layer application for preamble only PPDU. + * + * Return: Operation status + */ +QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc, uint32_t mac_id); + +/** + * dp_rxdma_err_process() - RxDMA error processing functionality + * @soc: core txrx main contex + * @mac_id: mac id which is one of 3 mac_ids + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * Return: num of buffers processed + */ +uint32_t dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, + uint32_t mac_id, uint32_t quota); + +/** + * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf + * @pdev: DP pdev object + * + * Return: None + */ +void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev); +#ifndef REMOVE_MON_DBG_STATS +/* + * dp_rx_mon_update_dbg_ppdu_stats() - Update status ring TLV count + * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV + * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count + * + * Update status ring PPDU start and end count. Keep track TLV state on + * PPDU start and end to find out if start and end is matching. Keep + * track missing PPDU start and end count. Keep track matching PPDU + * start and end count. + * + * Return: None + */ +static inline void +dp_rx_mon_update_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ + if (ppdu_info->rx_state == + HAL_RX_MON_PPDU_START) { + rx_mon_stats->status_ppdu_start++; + if (rx_mon_stats->status_ppdu_state + != CDP_MON_PPDU_END) + rx_mon_stats->status_ppdu_end_mis++; + rx_mon_stats->status_ppdu_state + = CDP_MON_PPDU_START; + } else if (ppdu_info->rx_state == + HAL_RX_MON_PPDU_END) { + rx_mon_stats->status_ppdu_end++; + if (rx_mon_stats->status_ppdu_state + != CDP_MON_PPDU_START) + rx_mon_stats->status_ppdu_start_mis++; + else + rx_mon_stats->status_ppdu_compl++; + rx_mon_stats->status_ppdu_state + = CDP_MON_PPDU_END; + } +} + +/* + * dp_rx_mon_init_dbg_ppdu_stats() - initialization for monitor mode stats + * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV + * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count + * + * Return: None + */ +static inline void +dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ + ppdu_info->rx_state = HAL_RX_MON_PPDU_END; + rx_mon_stats->status_ppdu_state + = CDP_MON_PPDU_END; +} + +#else +static inline void +dp_rx_mon_update_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ +} + +static inline void +dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ +} + +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_dest.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_dest.c new file mode 100644 index 0000000000000000000000000000000000000000..07a9760e4ee3b1e95cca6c842ff8067eb92a8645 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_dest.c @@ -0,0 +1,1742 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "hal_api_mon.h" +#include "dp_rx_mon.h" +#include "wlan_cfg.h" +#include "dp_internal.h" + +/* The maxinum buffer length allocated for radio tap */ +#define MAX_MONITOR_HEADER (512) +/* + * PPDU id is from 0 to 64k-1. PPDU id read from status ring and PPDU id + * read from destination ring shall track each other. If the distance of + * two ppdu id is less than 20000. It is assume no wrap around. Otherwise, + * It is assume wrap around. + */ +#define NOT_PPDU_ID_WRAP_AROUND 20000 +/* + * The destination ring processing is stuck if the destrination is not + * moving while status ring moves 16 ppdu. the destination ring processing + * skips this destination ring ppdu as walkaround + */ +#define MON_DEST_RING_STUCK_MAX_CNT 16 + +/** + * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @dp_pdev: core txrx pdev context + * @buf_addr_info: void pointer to monitor link descriptor buf addr info + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev, + hal_buff_addrinfo_t buf_addr_info, int mac_id) +{ + struct dp_srng *dp_srng; + hal_ring_handle_t hal_ring_hdl; + hal_soc_handle_t hal_soc; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + void *src_srng_desc; + + hal_soc = dp_pdev->soc->hal_soc; + + dp_srng = &dp_pdev->soc->rxdma_mon_desc_ring[mac_id]; + hal_ring_hdl = dp_srng->hal_srng; + + qdf_assert(hal_ring_hdl); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring_hdl))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : \ + HAL RING Access For WBM Release SRNG Failed -- %pK", + __func__, __LINE__, hal_ring_hdl); + goto done; + } + + src_srng_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + + if (qdf_likely(src_srng_desc)) { + /* Return link descriptor through WBM ring (SW2WBM)*/ + hal_rx_mon_msdu_link_desc_set(hal_soc, + src_srng_desc, buf_addr_info); + status = QDF_STATUS_SUCCESS; + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d -- Monitor Link Desc WBM Release Ring Full", + __func__, __LINE__); + } +done: + hal_srng_access_end(hal_soc, hal_ring_hdl); + return status; +} + +/** + * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across + * multiple nbufs. This function + * is to return data length in + * fragmented buffer + * + * @total_len: pointer to remaining data length. + * @frag_len: pointer to data length in this fragment. +*/ +static inline void dp_mon_adjust_frag_len(uint32_t *total_len, +uint32_t *frag_len) +{ + if (*total_len >= (RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { + *frag_len = RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN; + *total_len -= *frag_len; + } else { + *frag_len = *total_len; + *total_len = 0; + } +} + +/** + * dp_rx_cookie_2_mon_link_desc() - Retrieve Link descriptor based on target + * @pdev: core physical device context + * @hal_buf_info: structure holding the buffer info + * mac_id: mac number + * + * Return: link descriptor address + */ +static inline +void *dp_rx_cookie_2_mon_link_desc(struct dp_pdev *pdev, + struct hal_buf_info buf_info, + uint8_t mac_id) +{ + if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) + return dp_rx_cookie_2_mon_link_desc_va(pdev, &buf_info, + mac_id); + + return dp_rx_cookie_2_link_desc_va(pdev->soc, &buf_info); +} + +/** + * dp_rx_monitor_link_desc_return() - Return Link descriptor based on target + * @pdev: core physical device context + * @p_last_buf_addr_info: MPDU Link descriptor + * mac_id: mac number + * + * Return: QDF_STATUS + */ +static inline +QDF_STATUS dp_rx_monitor_link_desc_return(struct dp_pdev *pdev, + hal_buff_addrinfo_t + p_last_buf_addr_info, + uint8_t mac_id, uint8_t bm_action) +{ + if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) + return dp_rx_mon_link_desc_return(pdev, p_last_buf_addr_info, + mac_id); + + return dp_rx_link_desc_return_by_addr(pdev->soc, p_last_buf_addr_info, + bm_action); +} + +/** + * dp_rxdma_get_mon_dst_ring() - Return the pointer to rxdma_err_dst_ring + * or mon_dst_ring based on the target + * @pdev: core physical device context + * @mac_for_pdev: mac_id number + * + * Return: ring address + */ +static inline +void *dp_rxdma_get_mon_dst_ring(struct dp_pdev *pdev, + uint8_t mac_for_pdev) +{ + if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) + return pdev->soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng; + + return pdev->soc->rxdma_err_dst_ring[mac_for_pdev].hal_srng; +} + +/** + * dp_rxdma_get_mon_buf_ring() - Return monitor buf ring address + * based on target + * @pdev: core physical device context + * @mac_for_pdev: mac id number + * + * Return: ring address + */ +static inline +struct dp_srng *dp_rxdma_get_mon_buf_ring(struct dp_pdev *pdev, + uint8_t mac_for_pdev) +{ + if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) + return &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; + + /* For MCL there is only 1 rx refill ring */ + return &pdev->soc->rx_refill_buf_ring[0]; +} + +/** + * dp_rx_get_mon_desc_pool() - Return monitor descriptor pool + * based on target + * @soc: soc handle + * @mac_id: mac id number + * @pdev_id: pdev id number + * + * Return: descriptor pool address + */ +static inline +struct rx_desc_pool *dp_rx_get_mon_desc_pool(struct dp_soc *soc, + uint8_t mac_id, + uint8_t pdev_id) +{ + if (soc->wlan_cfg_ctx->rxdma1_enable) + return &soc->rx_desc_mon[mac_id]; + + return &soc->rx_desc_buf[pdev_id]; +} + +/** + * dp_rx_get_mon_desc() - Return Rx descriptor based on target + * @soc: soc handle + * @cookie: cookie value + * + * Return: Rx descriptor + */ +static inline +struct dp_rx_desc *dp_rx_get_mon_desc(struct dp_soc *soc, + uint32_t cookie) +{ + if (soc->wlan_cfg_ctx->rxdma1_enable) + return dp_rx_cookie_2_va_mon_buf(soc, cookie); + + return dp_rx_cookie_2_va_rxdma_buf(soc, cookie); +} + +/** + * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @soc: core DP main context + * @mac_id: mac id which is one of 3 mac_ids + * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info + * @head_msdu: head of msdu to be popped + * @tail_msdu: tail of msdu to be popped + * @npackets: number of packet to be popped + * @ppdu_id: ppdu id of processing ppdu + * @head: head of descs list to be freed + * @tail: tail of decs list to be freed + * + * Return: number of msdu in MPDU to be popped + */ +static inline uint32_t +dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, + hal_rxdma_desc_t rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu, + qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail) +{ + struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + void *rx_desc_tlv; + void *rx_msdu_link_desc; + qdf_nbuf_t msdu; + qdf_nbuf_t last; + struct hal_rx_msdu_list msdu_list; + uint16_t num_msdus; + uint32_t rx_buf_size, rx_pkt_offset; + struct hal_buf_info buf_info; + uint32_t rx_bufs_used = 0; + uint32_t msdu_ppdu_id, msdu_cnt; + uint8_t *data; + uint32_t i; + uint32_t total_frag_len = 0, frag_len = 0; + bool is_frag, is_first_msdu; + bool drop_mpdu = false; + uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; + uint64_t nbuf_paddr = 0; + uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS]; + + msdu = 0; + + last = NULL; + + hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, &msdu_cnt); + + if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) == + HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) { + uint8_t rxdma_err = + hal_rx_reo_ent_rxdma_error_code_get( + rxdma_dst_ring_desc); + if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) || + (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) || + (rxdma_err == HAL_RXDMA_ERR_OVERFLOW))) { + drop_mpdu = true; + dp_pdev->rx_mon_stats.dest_mpdu_drop++; + } + } + + is_frag = false; + is_first_msdu = true; + + do { + /* WAR for duplicate link descriptors received from HW */ + if (qdf_unlikely(dp_pdev->mon_last_linkdesc_paddr == + buf_info.paddr)) { + dp_pdev->rx_mon_stats.dup_mon_linkdesc_cnt++; + return rx_bufs_used; + } + + rx_msdu_link_desc = + dp_rx_cookie_2_mon_link_desc(dp_pdev, + buf_info, mac_id); + + qdf_assert_always(rx_msdu_link_desc); + + hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc, + &msdu_list, &num_msdus); + + for (i = 0; i < num_msdus; i++) { + uint32_t l2_hdr_offset; + struct dp_rx_desc *rx_desc = NULL; + + rx_desc = dp_rx_get_mon_desc(soc, + msdu_list.sw_cookie[i]); + + qdf_assert_always(rx_desc); + msdu = rx_desc->nbuf; + + if (msdu) + nbuf_paddr = qdf_nbuf_get_frag_paddr(msdu, 0); + /* WAR for duplicate buffers received from HW */ + if (qdf_unlikely(dp_pdev->mon_last_buf_cookie == + msdu_list.sw_cookie[i] || + !msdu || + msdu_list.paddr[i] != nbuf_paddr || + !rx_desc->in_use)) { + /* Skip duplicate buffer and drop subsequent + * buffers in this MPDU + */ + drop_mpdu = true; + dp_pdev->rx_mon_stats.dup_mon_buf_cnt++; + dp_pdev->mon_last_linkdesc_paddr = + buf_info.paddr; + continue; + } + + if (rx_desc->unmapped == 0) { + qdf_nbuf_unmap_single(soc->osdev, msdu, + QDF_DMA_FROM_DEVICE); + rx_desc->unmapped = 1; + } + + if (drop_mpdu) { + dp_pdev->mon_last_linkdesc_paddr = + buf_info.paddr; + qdf_nbuf_free(msdu); + msdu = NULL; + goto next_msdu; + } + + data = qdf_nbuf_data(msdu); + + rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data); + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s] i=%d, ppdu_id=%x, num_msdus = %u", + __func__, i, *ppdu_id, num_msdus); + + if (is_first_msdu) { + if (!HAL_RX_HW_DESC_MPDU_VALID( + rx_desc_tlv)) { + drop_mpdu = true; + qdf_nbuf_free(msdu); + msdu = NULL; + dp_pdev->mon_last_linkdesc_paddr = + buf_info.paddr; + goto next_msdu; + } + + msdu_ppdu_id = hal_rx_hw_desc_get_ppduid_get( + soc->hal_soc, + rx_desc_tlv); + is_first_msdu = false; + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s] msdu_ppdu_id=%x", + __func__, msdu_ppdu_id); + + if (*ppdu_id > msdu_ppdu_id) + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] ppdu_id=%d " + "msdu_ppdu_id=%d", + __func__, __LINE__, *ppdu_id, + msdu_ppdu_id); + + if ((*ppdu_id < msdu_ppdu_id) && ( + (msdu_ppdu_id - *ppdu_id) < + NOT_PPDU_ID_WRAP_AROUND)) { + *ppdu_id = msdu_ppdu_id; + return rx_bufs_used; + } else if ((*ppdu_id > msdu_ppdu_id) && ( + (*ppdu_id - msdu_ppdu_id) > + NOT_PPDU_ID_WRAP_AROUND)) { + *ppdu_id = msdu_ppdu_id; + return rx_bufs_used; + } + dp_pdev->mon_last_linkdesc_paddr = + buf_info.paddr; + } + + if (hal_rx_desc_is_first_msdu(soc->hal_soc, + rx_desc_tlv)) + hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, + rx_desc_tlv, + &(dp_pdev->ppdu_info.rx_status)); + + + if (msdu_list.msdu_info[i].msdu_flags & + HAL_MSDU_F_MSDU_CONTINUATION) { + if (!is_frag) { + total_frag_len = + msdu_list.msdu_info[i].msdu_len; + is_frag = true; + } + dp_mon_adjust_frag_len( + &total_frag_len, &frag_len); + } else { + if (is_frag) { + dp_mon_adjust_frag_len( + &total_frag_len, &frag_len); + } else { + frag_len = + msdu_list.msdu_info[i].msdu_len; + } + is_frag = false; + msdu_cnt--; + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s total_len %u frag_len %u flags %u", + __func__, total_frag_len, frag_len, + msdu_list.msdu_info[i].msdu_flags); + + rx_pkt_offset = SIZE_OF_MONITOR_TLV; + /* + * HW structures call this L3 header padding + * -- even though this is actually the offset + * from the buffer beginning where the L2 + * header begins. + */ + l2_hdr_offset = + hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, data); + + rx_buf_size = rx_pkt_offset + l2_hdr_offset + + frag_len; + + qdf_nbuf_set_pktlen(msdu, rx_buf_size); +#if 0 + /* Disble it.see packet on msdu done set to 0 */ + /* + * Check if DMA completed -- msdu_done is the + * last bit to be written + */ + if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) { + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "%s:%d: Pkt Desc", + __func__, __LINE__); + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + rx_desc_tlv, 128); + + qdf_assert_always(0); + } +#endif + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "%s: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, addr=%pK skb->len %u", + __func__, rx_pkt_offset, l2_hdr_offset, + msdu_list.msdu_info[i].msdu_len, + qdf_nbuf_data(msdu), + (uint32_t)qdf_nbuf_len(msdu)); + + if (head_msdu && !*head_msdu) { + *head_msdu = msdu; + } else { + if (last) + qdf_nbuf_set_next(last, msdu); + } + + last = msdu; +next_msdu: + dp_pdev->mon_last_buf_cookie = msdu_list.sw_cookie[i]; + rx_bufs_used++; + dp_rx_add_to_free_desc_list(head, + tail, rx_desc); + } + + /* + * Store the current link buffer into to the local + * structure to be used for release purpose. + */ + hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr, + buf_info.sw_cookie, buf_info.rbm); + + hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info); + if (dp_rx_monitor_link_desc_return(dp_pdev, + (hal_buff_addrinfo_t) + rx_link_buf_info, + mac_id, + bm_action) + != QDF_STATUS_SUCCESS) + dp_err_rl("monitor link desc return failed"); + } while (buf_info.paddr && msdu_cnt); + + if (last) + qdf_nbuf_set_next(last, NULL); + + *tail_msdu = msdu; + + return rx_bufs_used; + +} + +static inline +void dp_rx_msdus_set_payload(struct dp_soc *soc, qdf_nbuf_t msdu) +{ + uint8_t *data; + uint32_t rx_pkt_offset, l2_hdr_offset; + + data = qdf_nbuf_data(msdu); + rx_pkt_offset = SIZE_OF_MONITOR_TLV; + l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, data); + qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset); +} + +static inline +qdf_nbuf_t dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc, + uint32_t mac_id, qdf_nbuf_t head_msdu, qdf_nbuf_t last_msdu, + struct cdp_mon_status *rx_status) +{ + qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list; + uint32_t decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len, + mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir, + is_amsdu, is_first_frag, amsdu_pad; + void *rx_desc; + char *hdr_desc; + unsigned char *dest; + struct ieee80211_frame *wh; + struct ieee80211_qoscntl *qos; + struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + head_frag_list = NULL; + mpdu_buf = NULL; + + /* The nbuf has been pulled just beyond the status and points to the + * payload + */ + if (!head_msdu) + goto mpdu_stitch_fail; + + msdu_orig = head_msdu; + + rx_desc = qdf_nbuf_data(msdu_orig); + + if (HAL_RX_DESC_GET_MPDU_LENGTH_ERR(rx_desc)) { + /* It looks like there is some issue on MPDU len err */ + /* Need further investigate if drop the packet */ + DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); + return NULL; + } + + rx_desc = qdf_nbuf_data(last_msdu); + + rx_status->cdp_rs_fcs_err = HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc); + dp_pdev->ppdu_info.rx_status.rs_fcs_err = + HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc); + + /* Fill out the rx_status from the PPDU start and end fields */ + /* HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */ + + rx_desc = qdf_nbuf_data(head_msdu); + + decap_format = HAL_RX_DESC_GET_DECAP_FORMAT(rx_desc); + + /* Easy case - The MSDU status indicates that this is a non-decapped + * packet in RAW mode. + */ + if (decap_format == HAL_HW_RX_DECAP_FORMAT_RAW) { + /* Note that this path might suffer from headroom unavailabilty + * - but the RX status is usually enough + */ + + dp_rx_msdus_set_payload(soc, head_msdu); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK", + __func__, __LINE__, head_msdu, head_msdu->next, + last_msdu, last_msdu->next); + + mpdu_buf = head_msdu; + + prev_buf = mpdu_buf; + + frag_list_sum_len = 0; + msdu = qdf_nbuf_next(head_msdu); + is_first_frag = 1; + + while (msdu) { + + dp_rx_msdus_set_payload(soc, msdu); + + if (is_first_frag) { + is_first_frag = 0; + head_frag_list = msdu; + } + + frag_list_sum_len += qdf_nbuf_len(msdu); + + /* Maintain the linking of the cloned MSDUS */ + qdf_nbuf_set_next_ext(prev_buf, msdu); + + /* Move to the next */ + prev_buf = msdu; + msdu = qdf_nbuf_next(msdu); + } + + qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN); + + /* If there were more fragments to this RAW frame */ + if (head_frag_list) { + if (frag_list_sum_len < + sizeof(struct ieee80211_frame_min_one)) { + DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); + return NULL; + } + frag_list_sum_len -= HAL_RX_FCS_LEN; + qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, + frag_list_sum_len); + qdf_nbuf_set_next(mpdu_buf, NULL); + } + + goto mpdu_stitch_done; + } + + /* Decap mode: + * Calculate the amount of header in decapped packet to knock off based + * on the decap type and the corresponding number of raw bytes to copy + * status header + */ + rx_desc = qdf_nbuf_data(head_msdu); + + hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] decap format not raw", + __func__, __LINE__); + + + /* Base size */ + wifi_hdr_len = sizeof(struct ieee80211_frame); + wh = (struct ieee80211_frame *)hdr_desc; + + dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; + + if (dir == IEEE80211_FC1_DIR_DSTODS) + wifi_hdr_len += 6; + + is_amsdu = 0; + if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { + qos = (struct ieee80211_qoscntl *) + (hdr_desc + wifi_hdr_len); + wifi_hdr_len += 2; + + is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU); + } + + /*Calculate security header length based on 'Protected' + * and 'EXT_IV' flag + * */ + if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + char *iv = (char *)wh + wifi_hdr_len; + + if (iv[3] & KEY_EXTIV) + sec_hdr_len = 8; + else + sec_hdr_len = 4; + } else { + sec_hdr_len = 0; + } + wifi_hdr_len += sec_hdr_len; + + /* MSDU related stuff LLC - AMSDU subframe header etc */ + msdu_llc_len = is_amsdu ? (14 + 8) : 8; + + mpdu_buf_len = wifi_hdr_len + msdu_llc_len; + + /* "Decap" header to remove from MSDU buffer */ + decap_hdr_pull_bytes = 14; + + /* Allocate a new nbuf for holding the 802.11 header retrieved from the + * status of the now decapped first msdu. Leave enough headroom for + * accomodating any radio-tap /prism like PHY header + */ + mpdu_buf = qdf_nbuf_alloc(soc->osdev, + MAX_MONITOR_HEADER + mpdu_buf_len, + MAX_MONITOR_HEADER, 4, FALSE); + + if (!mpdu_buf) + goto mpdu_stitch_done; + + /* Copy the MPDU related header and enc headers into the first buffer + * - Note that there can be a 2 byte pad between heaader and enc header + */ + + prev_buf = mpdu_buf; + dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len); + if (!dest) + goto mpdu_stitch_fail; + + qdf_mem_copy(dest, hdr_desc, wifi_hdr_len); + hdr_desc += wifi_hdr_len; + +#if 0 + dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len); + adf_os_mem_copy(dest, hdr_desc, sec_hdr_len); + hdr_desc += sec_hdr_len; +#endif + + /* The first LLC len is copied into the MPDU buffer */ + frag_list_sum_len = 0; + + msdu_orig = head_msdu; + is_first_frag = 1; + amsdu_pad = 0; + + while (msdu_orig) { + + /* TODO: intra AMSDU padding - do we need it ??? */ + + msdu = msdu_orig; + + if (is_first_frag) { + head_frag_list = msdu; + } else { + /* Reload the hdr ptr only on non-first MSDUs */ + rx_desc = qdf_nbuf_data(msdu_orig); + hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc); + } + + /* Copy this buffers MSDU related status into the prev buffer */ + + if (is_first_frag) { + is_first_frag = 0; + } + + /* Update protocol and flow tag for MSDU */ + dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, + msdu_orig, rx_desc); + + dest = qdf_nbuf_put_tail(prev_buf, + msdu_llc_len + amsdu_pad); + + if (!dest) + goto mpdu_stitch_fail; + + dest += amsdu_pad; + qdf_mem_copy(dest, hdr_desc, msdu_llc_len); + + dp_rx_msdus_set_payload(soc, msdu); + + /* Push the MSDU buffer beyond the decap header */ + qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes); + frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu) + + amsdu_pad; + + /* Set up intra-AMSDU pad to be added to start of next buffer - + * AMSDU pad is 4 byte pad on AMSDU subframe */ + amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3; + amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; + + /* TODO FIXME How do we handle MSDUs that have fraglist - Should + * probably iterate all the frags cloning them along the way and + * and also updating the prev_buf pointer + */ + + /* Move to the next */ + prev_buf = msdu; + msdu_orig = qdf_nbuf_next(msdu_orig); + + } + +#if 0 + /* Add in the trailer section - encryption trailer + FCS */ + qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN); + frag_list_sum_len += HAL_RX_FCS_LEN; +#endif + + frag_list_sum_len -= msdu_llc_len; + + /* TODO: Convert this to suitable adf routines */ + qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, + frag_list_sum_len); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s %d mpdu_buf %pK mpdu_buf->len %u", + __func__, __LINE__, + mpdu_buf, mpdu_buf->len); + +mpdu_stitch_done: + /* Check if this buffer contains the PPDU end status for TSF */ + /* Need revist this code to see where we can get tsf timestamp */ +#if 0 + /* PPDU end TLV will be retrieved from monitor status ring */ + last_mpdu = + (*(((u_int32_t *)&rx_desc->attention)) & + RX_ATTENTION_0_LAST_MPDU_MASK) >> + RX_ATTENTION_0_LAST_MPDU_LSB; + + if (last_mpdu) + rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp; + +#endif + return mpdu_buf; + +mpdu_stitch_fail: + if ((mpdu_buf) && (decap_format != HAL_HW_RX_DECAP_FORMAT_RAW)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s mpdu_stitch_fail mpdu_buf %pK", + __func__, mpdu_buf); + /* Free the head buffer */ + qdf_nbuf_free(mpdu_buf); + } + return NULL; +} + +/** + * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers + * + * @soc: soc handle + * @nbuf: Mgmt packet + * @pdev: pdev handle + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_INVAL in error + */ +#ifdef FEATURE_PERPKT_INFO +static inline QDF_STATUS dp_send_mgmt_packet_to_stack(struct dp_soc *soc, + qdf_nbuf_t nbuf, + struct dp_pdev *pdev) +{ + uint32_t *nbuf_data; + struct ieee80211_frame *wh; + + if (!nbuf) + return QDF_STATUS_E_INVAL; + + /*check if this is not a mgmt packet*/ + wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf); + if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != + IEEE80211_FC0_TYPE_MGT) && + ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != + IEEE80211_FC0_TYPE_CTL)) { + qdf_nbuf_free(nbuf); + return QDF_STATUS_E_INVAL; + } + nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4); + if (!nbuf_data) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("No headroom")); + qdf_nbuf_free(nbuf); + return QDF_STATUS_E_INVAL; + } + *nbuf_data = pdev->ppdu_info.com_info.ppdu_id; + + dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf, + HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS dp_send_mgmt_packet_to_stack(struct dp_soc *soc, + qdf_nbuf_t nbuf, + struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dp_rx_extract_radiotap_info(): Extract and populate information in + * struct mon_rx_status type + * @rx_status: Receive status + * @mon_rx_status: Monitor mode status + * + * Returns: None + */ +static inline +void dp_rx_extract_radiotap_info(struct cdp_mon_status *rx_status, + struct mon_rx_status *rx_mon_status) +{ + rx_mon_status->tsft = rx_status->cdp_rs_tstamp.cdp_tsf; + rx_mon_status->chan_freq = rx_status->rs_freq; + rx_mon_status->chan_num = rx_status->rs_channel; + rx_mon_status->chan_flags = rx_status->rs_flags; + rx_mon_status->rate = rx_status->rs_datarate; + /* TODO: rx_mon_status->ant_signal_db */ + /* TODO: rx_mon_status->nr_ant */ + rx_mon_status->mcs = rx_status->cdf_rs_rate_mcs; + rx_mon_status->is_stbc = rx_status->cdp_rs_stbc; + rx_mon_status->sgi = rx_status->cdp_rs_sgi; + /* TODO: rx_mon_status->ldpc */ + /* TODO: rx_mon_status->beamformed */ + /* TODO: rx_mon_status->vht_flags */ + /* TODO: rx_mon_status->vht_flag_values1 */ +} + +/* + * dp_rx_mon_deliver(): function to deliver packets to stack + * @soc: DP soc + * @mac_id: MAC ID + * @head_msdu: head of msdu list + * @tail_msdu: tail of msdu list + * + * Return: status: 0 - Success, non-zero: Failure + */ +QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id, + qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu) +{ + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + struct cdp_mon_status *rs = &pdev->rx_mon_recv_status; + qdf_nbuf_t mon_skb, skb_next; + qdf_nbuf_t mon_mpdu = NULL; + + if (!pdev->monitor_vdev && !pdev->mcopy_mode) + goto mon_deliver_fail; + + /* restitch mon MPDU for delivery via monitor interface */ + mon_mpdu = dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu, + tail_msdu, rs); + + /* monitor vap cannot be present when mcopy is enabled + * hence same skb can be consumed + */ + if (pdev->mcopy_mode) + return dp_send_mgmt_packet_to_stack(soc, mon_mpdu, pdev); + + if (mon_mpdu && pdev->monitor_vdev && pdev->monitor_vdev->osif_vdev && + pdev->monitor_vdev->osif_rx_mon) { + pdev->ppdu_info.rx_status.ppdu_id = + pdev->ppdu_info.com_info.ppdu_id; + pdev->ppdu_info.rx_status.device_id = soc->device_id; + pdev->ppdu_info.rx_status.chan_noise_floor = + pdev->chan_noise_floor; + + if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, + mon_mpdu, + qdf_nbuf_headroom(mon_mpdu))) { + DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); + goto mon_deliver_fail; + } + + pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev, + mon_mpdu, + &pdev->ppdu_info.rx_status); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] mon_mpdu=%pK monitor_vdev %pK osif_vdev %pK" + , __func__, __LINE__, mon_mpdu, pdev->monitor_vdev, + (pdev->monitor_vdev ? pdev->monitor_vdev->osif_vdev + : NULL)); + goto mon_deliver_fail; + } + + return QDF_STATUS_SUCCESS; + +mon_deliver_fail: + mon_skb = head_msdu; + while (mon_skb) { + skb_next = qdf_nbuf_next(mon_skb); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] mon_skb=%pK len %u", __func__, + __LINE__, mon_skb, mon_skb->len); + + qdf_nbuf_free(mon_skb); + mon_skb = skb_next; + } + return QDF_STATUS_E_INVAL; +} + +/** +* dp_rx_mon_deliver_non_std() +* @soc: core txrx main contex +* @mac_id: MAC ID +* +* This function delivers the radio tap and dummy MSDU +* into user layer application for preamble only PPDU. +* +* Return: QDF_STATUS +*/ +QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc, + uint32_t mac_id) +{ + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + ol_txrx_rx_mon_fp osif_rx_mon; + qdf_nbuf_t dummy_msdu; + + /* Sanity checking */ + if ((!pdev->monitor_vdev) || (!pdev->monitor_vdev->osif_rx_mon)) + goto mon_deliver_non_std_fail; + + /* Generate a dummy skb_buff */ + osif_rx_mon = pdev->monitor_vdev->osif_rx_mon; + dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER, + MAX_MONITOR_HEADER, 4, FALSE); + if (!dummy_msdu) + goto allocate_dummy_msdu_fail; + + qdf_nbuf_set_pktlen(dummy_msdu, 0); + qdf_nbuf_set_next(dummy_msdu, NULL); + + pdev->ppdu_info.rx_status.ppdu_id = + pdev->ppdu_info.com_info.ppdu_id; + + /* Apply the radio header to this dummy skb */ + if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, dummy_msdu, + qdf_nbuf_headroom(dummy_msdu))) { + DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); + qdf_nbuf_free(dummy_msdu); + goto mon_deliver_non_std_fail; + } + + /* deliver to the user layer application */ + osif_rx_mon(pdev->monitor_vdev->osif_vdev, + dummy_msdu, NULL); + + /* Clear rx_status*/ + qdf_mem_zero(&pdev->ppdu_info.rx_status, + sizeof(pdev->ppdu_info.rx_status)); + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + + return QDF_STATUS_SUCCESS; + +allocate_dummy_msdu_fail: + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "[%s][%d] mon_skb=%pK ", + __func__, __LINE__, dummy_msdu); + +mon_deliver_non_std_fail: + return QDF_STATUS_E_INVAL; +} + +/** +* dp_rx_mon_dest_process() - Brain of the Rx processing functionality +* Called from the bottom half (tasklet/NET_RX_SOFTIRQ) +* @soc: core txrx main contex +* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced +* @quota: No. of units (packets) that can be serviced in one shot. +* +* This function implements the core of Rx functionality. This is +* expected to handle only non-error frames. +* +* Return: none +*/ +void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + uint8_t pdev_id; + hal_rxdma_desc_t rxdma_dst_ring_desc; + hal_soc_handle_t hal_soc; + void *mon_dst_srng; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + uint32_t ppdu_id; + uint32_t rx_bufs_used; + uint32_t mpdu_rx_bufs_used; + int mac_for_pdev = mac_id; + struct cdp_pdev_mon_stats *rx_mon_stats; + + mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev); + + if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring Init Failed -- %pK", + __func__, __LINE__, mon_dst_srng); + return; + } + + hal_soc = soc->hal_soc; + + qdf_assert((hal_soc && pdev)); + + qdf_spin_lock_bh(&pdev->mon_lock); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring access Failed -- %pK", + __func__, __LINE__, mon_dst_srng); + return; + } + + pdev_id = pdev->pdev_id; + ppdu_id = pdev->ppdu_info.com_info.ppdu_id; + rx_bufs_used = 0; + rx_mon_stats = &pdev->rx_mon_stats; + + while (qdf_likely(rxdma_dst_ring_desc = + hal_srng_dst_peek(hal_soc, mon_dst_srng))) { + qdf_nbuf_t head_msdu, tail_msdu; + uint32_t npackets; + head_msdu = (qdf_nbuf_t) NULL; + tail_msdu = (qdf_nbuf_t) NULL; + + mpdu_rx_bufs_used = + dp_rx_mon_mpdu_pop(soc, mac_id, + rxdma_dst_ring_desc, + &head_msdu, &tail_msdu, + &npackets, &ppdu_id, + &head, &tail); + + rx_bufs_used += mpdu_rx_bufs_used; + + if (mpdu_rx_bufs_used) + pdev->mon_dest_ring_stuck_cnt = 0; + else + pdev->mon_dest_ring_stuck_cnt++; + + if (pdev->mon_dest_ring_stuck_cnt > + MON_DEST_RING_STUCK_MAX_CNT) { + dp_info("destination ring stuck"); + dp_info("ppdu_id status=%d dest=%d", + pdev->ppdu_info.com_info.ppdu_id, ppdu_id); + rx_mon_stats->mon_rx_dest_stuck++; + pdev->ppdu_info.com_info.ppdu_id = ppdu_id; + continue; + } + + if (ppdu_id != pdev->ppdu_info.com_info.ppdu_id) { + rx_mon_stats->stat_ring_ppdu_id_hist[ + rx_mon_stats->ppdu_id_hist_idx] = + pdev->ppdu_info.com_info.ppdu_id; + rx_mon_stats->dest_ring_ppdu_id_hist[ + rx_mon_stats->ppdu_id_hist_idx] = ppdu_id; + rx_mon_stats->ppdu_id_hist_idx = + (rx_mon_stats->ppdu_id_hist_idx + 1) & + (MAX_PPDU_ID_HIST - 1); + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + qdf_mem_zero(&(pdev->ppdu_info.rx_status), + sizeof(pdev->ppdu_info.rx_status)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s %d ppdu_id %x != ppdu_info.com_info .ppdu_id %x", + __func__, __LINE__, + ppdu_id, pdev->ppdu_info.com_info.ppdu_id); + break; + } + + if (qdf_likely((head_msdu) && (tail_msdu))) { + rx_mon_stats->dest_mpdu_done++; + dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu); + } + + rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc, + mon_dst_srng); + } + hal_srng_access_end(hal_soc, mon_dst_srng); + + qdf_spin_unlock_bh(&pdev->mon_lock); + + if (rx_bufs_used) { + rx_mon_stats->dest_ppdu_done++; + dp_rx_buffers_replenish(soc, mac_id, + dp_rxdma_get_mon_buf_ring(pdev, + mac_for_pdev), + dp_rx_get_mon_desc_pool(soc, mac_id, + pdev_id), + rx_bufs_used, &head, &tail); + } +} + +#ifndef DISABLE_MON_CONFIG +#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \ + !defined(QCA_WIFI_QCA6750) +/** + * dp_rx_pdev_mon_buf_attach() - Allocate the monitor descriptor pool + * + * @pdev: physical device handle + * @mac_id: mac id + * + * Return: QDF_STATUS + */ +#define MON_BUF_MIN_ALLOC_ENTRIES 128 +static QDF_STATUS +dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) { + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng *mon_buf_ring; + uint32_t num_entries; + struct rx_desc_pool *rx_desc_pool; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t rx_desc_pool_size, replenish_size; + + mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; + + num_entries = mon_buf_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + dp_debug("Mon RX Desc Pool[%d] entries=%u", + pdev_id, num_entries); + + rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx) * num_entries; + status = dp_rx_desc_pool_alloc(soc, mac_id, rx_desc_pool_size, + rx_desc_pool); + if (!QDF_IS_STATUS_SUCCESS(status)) + return status; + + rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM; + rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; + rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; + + replenish_size = ((num_entries - 1) < MON_BUF_MIN_ALLOC_ENTRIES) ? + (num_entries - 1) : MON_BUF_MIN_ALLOC_ENTRIES; + status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring, + rx_desc_pool, replenish_size); + + return status; +} + +static QDF_STATUS +dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + if (rx_desc_pool->pool_size != 0) { + if (!dp_is_soc_reinit(soc)) + dp_rx_desc_nbuf_and_pool_free(soc, mac_id, + rx_desc_pool); + else + dp_rx_desc_nbuf_free(soc, rx_desc_pool); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_mon_link_desc_pool_setup(): Allocate and setup link descriptor pool + * that will be used by HW for various link + * and queue descriptorsand managed by WBM + * + * @soc: soc handle + * @mac_id: mac id + * + * Return: QDF_STATUS + */ +static +QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id) +{ + int link_desc_size = hal_get_link_desc_size(soc->hal_soc); + int link_desc_align = hal_get_link_desc_align(soc->hal_soc); + uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); + uint32_t total_link_descs, total_mem_size; + uint32_t num_link_desc_banks; + uint32_t last_bank_size = 0; + uint32_t entry_size, num_entries; + void *mon_desc_srng; + uint32_t num_replenish_buf; + struct dp_srng *dp_srng; + int i; + qdf_dma_addr_t *baseaddr = NULL; + + dp_srng = &soc->rxdma_mon_desc_ring[mac_id]; + + num_entries = dp_srng->alloc_size/hal_srng_get_entrysize( + soc->hal_soc, RXDMA_MONITOR_DESC); + + /* Round up to power of 2 */ + total_link_descs = 1; + while (total_link_descs < num_entries) + total_link_descs <<= 1; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: total_link_descs: %u, link_desc_size: %d", + __func__, total_link_descs, link_desc_size); + + total_mem_size = total_link_descs * link_desc_size; + + total_mem_size += link_desc_align; + + if (total_mem_size <= max_alloc_size) { + num_link_desc_banks = 0; + last_bank_size = total_mem_size; + } else { + num_link_desc_banks = (total_mem_size) / + (max_alloc_size - link_desc_align); + last_bank_size = total_mem_size % + (max_alloc_size - link_desc_align); + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "%s: total_mem_size: %d, num_link_desc_banks: %u", + __func__, total_mem_size, num_link_desc_banks); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "%s: max_alloc_size: %d last_bank_size: %d", + __func__, max_alloc_size, last_bank_size); + + for (i = 0; i < num_link_desc_banks; i++) { + baseaddr = &soc->mon_link_desc_banks[mac_id][i]. + base_paddr_unaligned; + if (!dp_is_soc_reinit(soc)) { + soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned = + qdf_mem_alloc_consistent(soc->osdev, + soc->osdev->dev, + max_alloc_size, + baseaddr); + + if (!soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: Link desc mem alloc failed", + __func__); + goto fail; + } + } + soc->mon_link_desc_banks[mac_id][i].size = max_alloc_size; + + soc->mon_link_desc_banks[mac_id][i].base_vaddr = + (void *)((unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) + + ((unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) % + link_desc_align)); + + soc->mon_link_desc_banks[mac_id][i].base_paddr = + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_paddr_unaligned) + + ((unsigned long) + (soc->mon_link_desc_banks[mac_id][i].base_vaddr) - + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned)); + } + + if (last_bank_size) { + /* Allocate last bank in case total memory required is not exact + * multiple of max_alloc_size + */ + baseaddr = &soc->mon_link_desc_banks[mac_id][i]. + base_paddr_unaligned; + if (!dp_is_soc_reinit(soc)) { + soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned = + qdf_mem_alloc_consistent(soc->osdev, + soc->osdev->dev, + last_bank_size, + baseaddr); + + if (!soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: alloc fail:mon link desc pool", + __func__); + goto fail; + } + } + soc->mon_link_desc_banks[mac_id][i].size = + last_bank_size; + + soc->mon_link_desc_banks[mac_id][i].base_vaddr = + (void *)((unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) + + ((unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) % + link_desc_align)); + + soc->mon_link_desc_banks[mac_id][i].base_paddr = + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_paddr_unaligned) + + ((unsigned long) + (soc->mon_link_desc_banks[mac_id][i].base_vaddr) - + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned)); + } + + /* Allocate and setup link descriptor idle list for HW internal use */ + entry_size = hal_srng_get_entrysize(soc->hal_soc, RXDMA_MONITOR_DESC); + total_mem_size = entry_size * total_link_descs; + + mon_desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng; + + num_replenish_buf = 0; + + if (total_mem_size <= max_alloc_size) { + void *desc; + + + for (i = 0; + i < MAX_MON_LINK_DESC_BANKS && + soc->mon_link_desc_banks[mac_id][i].base_paddr; + i++) { + uint32_t num_entries = + (soc->mon_link_desc_banks[mac_id][i].size - + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i].base_vaddr) - + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned)) / link_desc_size; + unsigned long paddr = + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i].base_paddr); + unsigned long vaddr = + (unsigned long) + (soc->mon_link_desc_banks[mac_id][i].base_vaddr); + + hal_srng_access_start_unlocked(soc->hal_soc, + mon_desc_srng); + + while (num_entries && (desc = + hal_srng_src_get_next(soc->hal_soc, + mon_desc_srng))) { + + hal_set_link_desc_addr(desc, i, paddr); + num_entries--; + num_replenish_buf++; + paddr += link_desc_size; + vaddr += link_desc_size; + } + hal_srng_access_end_unlocked(soc->hal_soc, + mon_desc_srng); + } + } else { + qdf_assert(0); + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "%s: successfully replenished %d buffer", + __func__, num_replenish_buf); + + return QDF_STATUS_SUCCESS; + +fail: + for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) { + if (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->mon_link_desc_banks[mac_id][i]. + size, + soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned, + soc->mon_link_desc_banks[mac_id][i]. + base_paddr_unaligned, 0); + soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned = NULL; + } + } + return QDF_STATUS_E_FAILURE; +} + +/* + * Free link descriptor pool that was setup HW + */ +static +void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id) +{ + int i; + + for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) { + if (soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->mon_link_desc_banks[mac_id][i]. + size, + soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned, + soc->mon_link_desc_banks[mac_id][i]. + base_paddr_unaligned, 0); + soc->mon_link_desc_banks[mac_id][i]. + base_vaddr_unaligned = NULL; + } + } +} + +/** + * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf + * @pdev: DP pdev object + * + * Return: None + */ +void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev) +{ + struct dp_soc *soc; + uint32_t mac_for_pdev; + union dp_rx_desc_list_elem_t *tail = NULL; + union dp_rx_desc_list_elem_t *desc_list = NULL; + uint32_t num_entries; + uint32_t id; + + soc = pdev->soc; + num_entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev->wlan_cfg_ctx); + + for (id = 0; id < NUM_RXDMA_RINGS_PER_PDEV; id++) { + /* + * Get mac_for_pdev appropriately for both MCL & WIN, + * since MCL have multiple mon buf rings and WIN just + * has one mon buffer ring mapped per pdev, below API + * helps identify accurate buffer_ring for both cases + * + */ + mac_for_pdev = + dp_get_lmac_id_for_pdev_id(soc, id, pdev->pdev_id); + + dp_rx_buffers_replenish(soc, mac_for_pdev, + dp_rxdma_get_mon_buf_ring(pdev, + mac_for_pdev), + dp_rx_get_mon_desc_pool(soc, + mac_for_pdev, + pdev->pdev_id), + num_entries, &desc_list, &tail); + } +} +#else +static +QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) +{ + return QDF_STATUS_SUCCESS; +} + +static +void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id) +{ +} + +static QDF_STATUS +dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev) +{} +#endif + +/** + * dp_rx_pdev_mon_cmn_detach() - detach dp rx for monitor mode + * @pdev: core txrx pdev context + * @mac_id: mac_id for which deinit is to be done + * + * This function will free DP Rx resources for + * monitor mode + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +static QDF_STATUS +dp_rx_pdev_mon_cmn_detach(struct dp_pdev *pdev, int mac_id) { + struct dp_soc *soc = pdev->soc; + uint8_t pdev_id = pdev->pdev_id; + int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + + dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev); + dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev); + dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_rx_pdev_mon_cmn_attach() - attach DP RX for monitor mode + * @pdev: core txrx pdev context + * @mac_id: mac_id for which init is to be done + * + * This function Will allocate dp rx resource and + * initialize resources for monitor mode. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +static QDF_STATUS +dp_rx_pdev_mon_cmn_attach(struct dp_pdev *pdev, int mac_id) { + struct dp_soc *soc = pdev->soc; + uint8_t pdev_id = pdev->pdev_id; + int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + QDF_STATUS status; + + status = dp_rx_pdev_mon_buf_attach(pdev, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_rx_pdev_mon_buf_attach() failed\n", __func__); + goto fail; + } + + status = dp_rx_pdev_mon_status_attach(pdev, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_rx_pdev_mon_status_attach() failed", __func__); + goto mon_buf_detach; + } + + status = dp_mon_link_desc_pool_setup(soc, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_mon_link_desc_pool_setup() failed", __func__); + goto mon_status_detach; + } + + return status; + +mon_status_detach: + dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev); + +mon_buf_detach: + dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev); + +fail: + return status; +} + +/** + * dp_rx_pdev_mon_attach() - attach DP RX for monitor mode + * @pdev: core txrx pdev context + * + * This function will attach a DP RX for monitor mode instance into + * the main device (SOC) context. Will allocate dp rx resource and + * initialize resources. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_attach(struct dp_pdev *pdev) { + QDF_STATUS status; + uint8_t pdev_id = pdev->pdev_id; + int mac_id; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + "%s: pdev attach id=%d", __func__, pdev_id); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + status = dp_rx_pdev_mon_cmn_attach(pdev, mac_id); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_pdev_mon_cmn_attach(%d) failed\n", + __func__, mac_id); + goto fail; + } + } + pdev->mon_last_linkdesc_paddr = 0; + pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; + qdf_spinlock_create(&pdev->mon_lock); + return QDF_STATUS_SUCCESS; + +fail: + for (mac_id = mac_id - 1; mac_id >= 0; mac_id--) + dp_rx_pdev_mon_cmn_detach(pdev, mac_id); + + return status; +} + +QDF_STATUS +dp_mon_link_free(struct dp_pdev *pdev) { + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + int mac_id; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, + mac_id, pdev_id); + + dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev); + } + + return QDF_STATUS_SUCCESS; +} +/** + * dp_rx_pdev_mon_detach() - detach dp rx for monitor mode + * @pdev: core txrx pdev context + * + * This function will detach DP RX for monitor mode from + * main device context. will free DP Rx resources for + * monitor mode + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_detach(struct dp_pdev *pdev) { + uint8_t pdev_id = pdev->pdev_id; + int mac_id; + + qdf_spinlock_destroy(&pdev->mon_lock); + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, + mac_id, pdev_id); + + dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev); + dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev); + } + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS +dp_rx_pdev_mon_attach(struct dp_pdev *pdev) { + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +dp_rx_pdev_mon_detach(struct dp_pdev *pdev) { + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +dp_mon_link_free(struct dp_pdev *pdev) { + return QDF_STATUS_SUCCESS; +} + +void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev) +{} +#endif /* DISABLE_MON_CONFIG */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_status.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_status.c new file mode 100644 index 0000000000000000000000000000000000000000..251b6d4a0d60193fec1056b22845b62c5cd27a56 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_status.c @@ -0,0 +1,2063 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "hal_api_mon.h" +#include "dp_rx_mon.h" +#include "dp_internal.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ + +#include "htt.h" + +#ifdef FEATURE_PERPKT_INFO +#include "dp_ratetable.h" +#endif + +static inline void +dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu); + +#ifdef WLAN_RX_PKT_CAPTURE_ENH +#include "dp_rx_mon_feature.h" +#else +static QDF_STATUS +dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + return QDF_STATUS_SUCCESS; +} + +static void +dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status, + qdf_nbuf_t status_nbuf, + struct hal_rx_ppdu_info *ppdu_info, + bool *nbuf_used) +{ +} +#endif + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_rx_mon_feature.h" +#else +static QDF_STATUS +dp_send_ack_frame_to_stack(struct dp_soc *soc, + struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef FEATURE_PERPKT_INFO +static inline void +dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + uint8_t chain, bw; + int8_t rssi; + + for (chain = 0; chain < SS_COUNT; chain++) { + for (bw = 0; bw < MAX_BW; bw++) { + rssi = ppdu_info->rx_status.rssi_chain[chain][bw]; + if (rssi != DP_RSSI_INVAL) + cdp_rx_ppdu->rssi_chain[chain][bw] = rssi; + else + cdp_rx_ppdu->rssi_chain[chain][bw] = 0; + } + } +} + +/* + * dp_rx_populate_su_evm_details() - Populate su evm info + * @ppdu_info: ppdu info structure from ppdu ring + * @cdp_rx_ppdu: rx ppdu indication structure + */ +static inline void +dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + uint8_t pilot_evm; + uint8_t nss_count; + uint8_t pilot_count; + + nss_count = ppdu_info->evm_info.nss_count; + pilot_count = ppdu_info->evm_info.pilot_count; + + if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) { + qdf_err("pilot evm count is more than expected"); + return; + } + cdp_rx_ppdu->evm_info.pilot_count = pilot_count; + cdp_rx_ppdu->evm_info.nss_count = nss_count; + + /* Populate evm for pilot_evm = nss_count*pilot_count */ + for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) { + cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] = + ppdu_info->evm_info.pilot_evm[pilot_evm]; + } +} + +/** + * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size + * @pdev: pdev ctx + * @rx_user_status: mon rx user status + * + * Return: bool + */ +static inline bool +dp_rx_inc_rusize_cnt(struct dp_pdev *pdev, + struct mon_rx_user_status *rx_user_status) +{ + uint32_t ru_size; + bool is_data; + + ru_size = rx_user_status->ofdma_ru_size; + + if (dp_is_subtype_data(rx_user_status->frame_control)) { + DP_STATS_INC(pdev, + ul_ofdma.data_rx_ru_size[ru_size], 1); + is_data = true; + } else { + DP_STATS_INC(pdev, + ul_ofdma.nondata_rx_ru_size[ru_size], 1); + is_data = false; + } + + return is_data; +} + +/** + * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication + * @pdev: pdev ctx + * @ppdu_info: ppdu info structure from ppdu ring + * @cdp_rx_ppdu: Rx PPDU indication structure + * + * Return: none + */ +static inline void +dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu + *cdp_rx_ppdu) +{ + struct dp_peer *peer; + struct dp_soc *soc = pdev->soc; + struct dp_ast_entry *ast_entry; + uint32_t ast_index; + int i; + struct mon_rx_user_status *rx_user_status; + struct cdp_rx_stats_ppdu_user *rx_stats_peruser; + int ru_size; + bool is_data = false; + uint32_t num_users; + + num_users = ppdu_info->com_info.num_users; + for (i = 0; i < num_users; i++) { + if (i > OFDMA_NUM_USERS) + return; + + rx_user_status = &ppdu_info->rx_user_status[i]; + rx_stats_peruser = &cdp_rx_ppdu->user[i]; + + ast_index = rx_user_status->ast_index; + if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { + rx_stats_peruser->peer_id = HTT_INVALID_PEER; + continue; + } + + ast_entry = soc->ast_table[ast_index]; + if (!ast_entry) { + rx_stats_peruser->peer_id = HTT_INVALID_PEER; + continue; + } + + peer = ast_entry->peer; + if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) { + rx_stats_peruser->peer_id = HTT_INVALID_PEER; + continue; + } + + rx_stats_peruser->first_data_seq_ctrl = + rx_user_status->first_data_seq_ctrl; + + rx_stats_peruser->frame_control_info_valid = + rx_user_status->frame_control_info_valid; + rx_stats_peruser->frame_control = + rx_user_status->frame_control; + + rx_stats_peruser->tcp_msdu_count = + rx_user_status->tcp_msdu_count; + rx_stats_peruser->udp_msdu_count = + rx_user_status->udp_msdu_count; + rx_stats_peruser->other_msdu_count = + rx_user_status->other_msdu_count; + + rx_stats_peruser->num_msdu = + rx_stats_peruser->tcp_msdu_count + + rx_stats_peruser->udp_msdu_count + + rx_stats_peruser->other_msdu_count; + + rx_stats_peruser->preamble_type = + rx_user_status->preamble_type; + rx_stats_peruser->mpdu_cnt_fcs_ok = + rx_user_status->mpdu_cnt_fcs_ok; + rx_stats_peruser->mpdu_cnt_fcs_err = + rx_user_status->mpdu_cnt_fcs_err; + qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap, + &rx_user_status->mpdu_fcs_ok_bitmap, + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0])); + rx_stats_peruser->mpdu_ok_byte_count = + rx_user_status->mpdu_ok_byte_count; + rx_stats_peruser->mpdu_err_byte_count = + rx_user_status->mpdu_err_byte_count; + + cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok; + cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu; + rx_stats_peruser->retries = + CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ? + rx_stats_peruser->mpdu_cnt_fcs_ok : 0; + + if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1) + rx_stats_peruser->is_ampdu = 1; + else + rx_stats_peruser->is_ampdu = 0; + + rx_stats_peruser->tid = ppdu_info->rx_status.tid; + + qdf_mem_copy(rx_stats_peruser->mac_addr, + peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); + rx_stats_peruser->peer_id = peer->peer_ids[0]; + cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; + rx_stats_peruser->vdev_id = peer->vdev->vdev_id; + rx_stats_peruser->mu_ul_info_valid = 0; + + if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA || + cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) { + if (rx_user_status->mu_ul_info_valid) { + rx_stats_peruser->nss = rx_user_status->nss; + rx_stats_peruser->mcs = rx_user_status->mcs; + rx_stats_peruser->mu_ul_info_valid = + rx_user_status->mu_ul_info_valid; + rx_stats_peruser->ofdma_ru_start_index = + rx_user_status->ofdma_ru_start_index; + rx_stats_peruser->ofdma_ru_width = + rx_user_status->ofdma_ru_width; + rx_stats_peruser->user_index = i; + ru_size = rx_user_status->ofdma_ru_size; + /* + * max RU size will be equal to + * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2 + */ + if (ru_size >= OFDMA_NUM_RU_SIZE) { + dp_err("invalid ru_size %d\n", + ru_size); + return; + } + is_data = dp_rx_inc_rusize_cnt(pdev, + rx_user_status); + } + if (is_data) { + /* counter to get number of MU OFDMA */ + pdev->stats.ul_ofdma.data_rx_ppdu++; + pdev->stats.ul_ofdma.data_users[num_users]++; + } + } + } +} + +/** + * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure + * @pdev: pdev ctx + * @ppdu_info: ppdu info structure from ppdu ring + * @cdp_rx_ppdu: Rx PPDU indication structure + * + * Return: none + */ +static inline void +dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + struct dp_peer *peer; + struct dp_soc *soc = pdev->soc; + struct dp_ast_entry *ast_entry; + uint32_t ast_index; + uint32_t i; + + cdp_rx_ppdu->first_data_seq_ctrl = + ppdu_info->rx_status.first_data_seq_ctrl; + cdp_rx_ppdu->frame_ctrl = + ppdu_info->rx_status.frame_control; + cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; + cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; + cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; + cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type; + /* num mpdu is consolidated and added together in num user loop */ + cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; + /* num msdu is consolidated and added together in num user loop */ + cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + + cdp_rx_ppdu->udp_msdu_count + + cdp_rx_ppdu->other_msdu_count); + + cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ? + ppdu_info->com_info.mpdu_cnt_fcs_ok : 0; + + if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) + cdp_rx_ppdu->is_ampdu = 1; + else + cdp_rx_ppdu->is_ampdu = 0; + cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; + + + ast_index = ppdu_info->rx_status.ast_index; + if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { + cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; + cdp_rx_ppdu->num_users = 0; + goto end; + } + + ast_entry = soc->ast_table[ast_index]; + if (!ast_entry) { + cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; + cdp_rx_ppdu->num_users = 0; + goto end; + } + peer = ast_entry->peer; + if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) { + cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; + cdp_rx_ppdu->num_users = 0; + goto end; + } + + qdf_mem_copy(cdp_rx_ppdu->mac_addr, + peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); + cdp_rx_ppdu->peer_id = peer->peer_ids[0]; + cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; + + cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; + cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; + cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; + cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw; + cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; + cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; + if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && + (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) + cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; + else + cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; + cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; + cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; + cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >> + QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3; + cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; + cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; + cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; + cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; + cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len; + cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; + cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size; + + dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu); + dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu); + cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna; + + cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor; + for (i = 0; i < MAX_CHAIN; i++) + cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i]; + + cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast; + + cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; + + cdp_rx_ppdu->num_mpdu = 0; + cdp_rx_ppdu->num_msdu = 0; + + dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu); + + return; +end: + dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu); +} +#else +static inline void +dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ +} +#endif +/** + * dp_rx_stats_update() - Update per-peer statistics + * @soc: Datapath SOC handle + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + */ +#ifdef FEATURE_PERPKT_INFO +static inline void dp_rx_rate_stats_update(struct dp_peer *peer, + struct cdp_rx_indication_ppdu *ppdu, + uint32_t user) +{ + uint32_t ratekbps = 0; + uint32_t ppdu_rx_rate = 0; + uint32_t nss = 0; + uint32_t rix; + uint16_t ratecode; + struct cdp_rx_stats_ppdu_user *ppdu_user; + + if (!peer || !ppdu) + return; + + ppdu_user = &ppdu->user[user]; + + if (ppdu_user->nss == 0) + nss = 0; + else + nss = ppdu_user->nss - 1; + + ratekbps = dp_getrateindex(ppdu->u.gi, + ppdu_user->mcs, + nss, + ppdu->u.preamble, + ppdu->u.bw, + &rix, + &ratecode); + + if (!ratekbps) + return; + + ppdu->rix = rix; + DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps); + dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps); + ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate); + DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate); + ppdu->rx_ratekbps = ratekbps; + ppdu->rx_ratecode = ratecode; + + if (peer->vdev) + peer->vdev->stats.rx.last_rx_rate = ratekbps; +} + +static void dp_rx_stats_update(struct dp_pdev *pdev, + struct cdp_rx_indication_ppdu *ppdu) +{ + struct dp_soc *soc = NULL; + uint8_t mcs, preamble, ac = 0, nss, ppdu_type; + uint16_t num_msdu; + uint8_t pkt_bw_offset; + struct dp_peer *peer; + struct cdp_rx_stats_ppdu_user *ppdu_user; + uint32_t i; + enum cdp_mu_packet_type mu_pkt_type; + + if (pdev) + soc = pdev->soc; + else + return; + + if (!soc || soc->process_rx_status) + return; + + preamble = ppdu->u.preamble; + ppdu_type = ppdu->u.ppdu_type; + + for (i = 0; i < ppdu->num_users; i++) { + ppdu_user = &ppdu->user[i]; + peer = dp_peer_find_by_id(soc, ppdu_user->peer_id); + + if (!peer) + peer = pdev->invalid_peer; + + ppdu->cookie = (void *)peer->wlanstats_ctx; + + if (ppdu_type == HAL_RX_TYPE_SU) { + mcs = ppdu->u.mcs; + nss = ppdu->u.nss; + } else { + mcs = ppdu_user->mcs; + nss = ppdu_user->nss; + } + + num_msdu = ppdu_user->num_msdu; + switch (ppdu->u.bw) { + case CMN_BW_20MHZ: + pkt_bw_offset = PKT_BW_GAIN_20MHZ; + break; + case CMN_BW_40MHZ: + pkt_bw_offset = PKT_BW_GAIN_40MHZ; + break; + case CMN_BW_80MHZ: + pkt_bw_offset = PKT_BW_GAIN_80MHZ; + break; + case CMN_BW_160MHZ: + pkt_bw_offset = PKT_BW_GAIN_160MHZ; + break; + default: + pkt_bw_offset = 0; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Invalid BW index = %d", ppdu->u.bw); + } + + DP_STATS_UPD(peer, rx.rssi, (ppdu->rssi + pkt_bw_offset)); + + if (peer->stats.rx.avg_rssi == INVALID_RSSI) + peer->stats.rx.avg_rssi = + CDP_RSSI_IN(peer->stats.rx.rssi); + else + CDP_RSSI_UPDATE_AVG(peer->stats.rx.avg_rssi, + peer->stats.rx.rssi); + + if ((preamble == DOT11_A) || (preamble == DOT11_B)) + nss = 1; + + if (ppdu_type == HAL_RX_TYPE_SU) { + if (nss) { + DP_STATS_INC(peer, rx.nss[nss - 1], num_msdu); + DP_STATS_INC(peer, rx.ppdu_nss[nss - 1], 1); + } + + DP_STATS_INC(peer, rx.mpdu_cnt_fcs_ok, + ppdu_user->mpdu_cnt_fcs_ok); + DP_STATS_INC(peer, rx.mpdu_cnt_fcs_err, + ppdu_user->mpdu_cnt_fcs_err); + } + + if (ppdu_type >= HAL_RX_TYPE_MU_MIMO && + ppdu_type <= HAL_RX_TYPE_MU_OFDMA) { + if (ppdu_type == HAL_RX_TYPE_MU_MIMO) + mu_pkt_type = RX_TYPE_MU_MIMO; + else + mu_pkt_type = RX_TYPE_MU_OFDMA; + + if (nss) { + DP_STATS_INC(peer, rx.nss[nss - 1], num_msdu); + DP_STATS_INC(peer, + rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1], + 1); + } + + DP_STATS_INC(peer, + rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok, + ppdu_user->mpdu_cnt_fcs_ok); + DP_STATS_INC(peer, + rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err, + ppdu_user->mpdu_cnt_fcs_err); + } + + DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu); + DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu); + DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], + num_msdu); + DP_STATS_INC(peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1); + DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, + ppdu_user->is_ampdu); + DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, + !(ppdu_user->is_ampdu)); + DP_STATS_UPD(peer, rx.rx_rate, mcs); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, + rx.su_ax_ppdu_cnt.mcs_count[MAX_MCS - 1], 1, + ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX) && + (ppdu_type == HAL_RX_TYPE_SU))); + DP_STATS_INCC(peer, + rx.su_ax_ppdu_cnt.mcs_count[mcs], 1, + ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX) && + (ppdu_type == HAL_RX_TYPE_SU))); + DP_STATS_INCC(peer, + rx.rx_mu[RX_TYPE_MU_OFDMA].ppdu.mcs_count[MAX_MCS - 1], + 1, ((mcs >= (MAX_MCS - 1)) && + (preamble == DOT11_AX) && + (ppdu_type == HAL_RX_TYPE_MU_OFDMA))); + DP_STATS_INCC(peer, + rx.rx_mu[RX_TYPE_MU_OFDMA].ppdu.mcs_count[mcs], + 1, ((mcs < (MAX_MCS - 1)) && + (preamble == DOT11_AX) && + (ppdu_type == HAL_RX_TYPE_MU_OFDMA))); + DP_STATS_INCC(peer, + rx.rx_mu[RX_TYPE_MU_MIMO].ppdu.mcs_count[MAX_MCS - 1], + 1, ((mcs >= (MAX_MCS - 1)) && + (preamble == DOT11_AX) && + (ppdu_type == HAL_RX_TYPE_MU_MIMO))); + DP_STATS_INCC(peer, + rx.rx_mu[RX_TYPE_MU_MIMO].ppdu.mcs_count[mcs], + 1, ((mcs < (MAX_MCS - 1)) && + (preamble == DOT11_AX) && + (ppdu_type == HAL_RX_TYPE_MU_MIMO))); + + /* + * If invalid TID, it could be a non-qos frame, hence do not + * update any AC counters + */ + ac = TID_TO_WME_AC(ppdu_user->tid); + + if (ppdu->tid != HAL_TID_INVALID) + DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu); + dp_peer_stats_notify(pdev, peer); + DP_STATS_UPD(peer, rx.last_rssi, ppdu->rssi); + + if (peer == pdev->invalid_peer) + continue; + + if (dp_is_subtype_data(ppdu->frame_ctrl)) + dp_rx_rate_stats_update(peer, ppdu, i); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, + &peer->stats, ppdu->peer_id, + UPDATE_PEER_STATS, pdev->pdev_id); +#endif + dp_peer_unref_del_find_by_id(peer); + } +} +#endif + +/* + * dp_rx_get_fcs_ok_msdu() - get ppdu status buffer containing fcs_ok msdu + * @pdev: pdev object + * @ppdu_info: ppdu info object + * + * Return: nbuf + */ + +static inline qdf_nbuf_t +dp_rx_get_fcs_ok_msdu(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + uint16_t mpdu_fcs_ok; + qdf_nbuf_t status_nbuf = NULL; + unsigned long *fcs_ok_bitmap; + + if (qdf_unlikely(qdf_nbuf_is_queue_empty(&pdev->rx_ppdu_buf_q))) + return NULL; + + /* Obtain fcs_ok passed index from bitmap + * this index is used to get fcs passed first msdu payload + */ + + fcs_ok_bitmap = + (unsigned long *)&ppdu_info->com_info.mpdu_fcs_ok_bitmap[0]; + mpdu_fcs_ok = qdf_find_first_bit(fcs_ok_bitmap, + HAL_RX_MAX_MPDU); + + if (qdf_unlikely(mpdu_fcs_ok >= HAL_RX_MAX_MPDU)) + goto end; + + if (qdf_unlikely(!ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf)) + goto end; + + /* Get status buffer by indexing mpdu_fcs_ok index + * containing first msdu payload with fcs passed + * and clone the buffer + */ + status_nbuf = ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf; + ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf = NULL; + + /* Take ref of status nbuf as this nbuf is to be + * freeed by upper layer. + */ + qdf_nbuf_ref(status_nbuf); + ppdu_info->fcs_ok_msdu_info.first_msdu_payload = + ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].first_msdu_payload; + ppdu_info->fcs_ok_msdu_info.payload_len = + ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].payload_len; + + +end: + /* Free the ppdu status buffer queue */ + qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q); + + qdf_mem_zero(&ppdu_info->ppdu_msdu_info, + (ppdu_info->com_info.mpdu_cnt_fcs_ok + + ppdu_info->com_info.mpdu_cnt_fcs_err) + * sizeof(struct hal_rx_msdu_payload_info)); + return status_nbuf; +} + +static inline void +dp_rx_handle_ppdu_status_buf(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + qdf_nbuf_t status_nbuf) +{ + qdf_nbuf_t dropnbuf; + + if (qdf_nbuf_queue_len(&pdev->rx_ppdu_buf_q) > + HAL_RX_MAX_MPDU) { + dropnbuf = qdf_nbuf_queue_remove(&pdev->rx_ppdu_buf_q); + qdf_nbuf_free(dropnbuf); + } + qdf_nbuf_queue_add(&pdev->rx_ppdu_buf_q, status_nbuf); +} +/** + * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload + * @soc: core txrx main context + * @pdev: pdev strcuture + * @ppdu_info: structure for rx ppdu ring + * + * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller + * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller + */ +#ifdef FEATURE_PERPKT_INFO +static inline QDF_STATUS +dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf) +{ + uint8_t size = 0; + struct ieee80211_frame *wh; + uint32_t *nbuf_data; + + if (!ppdu_info->fcs_ok_msdu_info.first_msdu_payload) + return QDF_STATUS_SUCCESS; + + if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id) + return QDF_STATUS_SUCCESS; + + pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; + + wh = (struct ieee80211_frame *) + (ppdu_info->fcs_ok_msdu_info.first_msdu_payload + 4); + + size = (ppdu_info->fcs_ok_msdu_info.first_msdu_payload - + qdf_nbuf_data(nbuf)); + + if (qdf_nbuf_pull_head(nbuf, size) == NULL) + return QDF_STATUS_SUCCESS; + + if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == + IEEE80211_FC0_TYPE_MGT) || + ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == + IEEE80211_FC0_TYPE_CTL)) { + return QDF_STATUS_SUCCESS; + } + + ppdu_info->fcs_ok_msdu_info.first_msdu_payload = NULL; + nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf); + *nbuf_data = pdev->ppdu_info.com_info.ppdu_id; + /* only retain RX MSDU payload in the skb */ + qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - + ppdu_info->fcs_ok_msdu_info.payload_len); + dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, + nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id); + return QDF_STATUS_E_ALREADY; +} +#else +static inline QDF_STATUS +dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef FEATURE_PERPKT_INFO +static inline void +dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + uint32_t tlv_status, + qdf_nbuf_t status_nbuf) +{ + QDF_STATUS mcopy_status; + + if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) { + qdf_nbuf_free(status_nbuf); + return; + } + /* Add buffers to queue until we receive + * HAL_TLV_STATUS_PPDU_DONE + */ + dp_rx_handle_ppdu_status_buf(pdev, ppdu_info, status_nbuf); + + /* If tlv_status is PPDU_DONE, process rx_ppdu_buf_q + * and devliver fcs_ok msdu buffer + */ + if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { + if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt != + (ppdu_info->com_info.mpdu_cnt_fcs_ok + + ppdu_info->com_info.mpdu_cnt_fcs_err))) { + qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q); + return; + } + /* Get rx ppdu status buffer having fcs ok msdu */ + status_nbuf = dp_rx_get_fcs_ok_msdu(pdev, ppdu_info); + if (status_nbuf) { + mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev, + ppdu_info, + status_nbuf); + if (mcopy_status == QDF_STATUS_SUCCESS) + qdf_nbuf_free(status_nbuf); + } + } +} +#else +static inline void +dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + uint32_t tlv_status, + qdf_nbuf_t status_nbuf) +{ +} +#endif + +/** + * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh + * @soc: Datapath SOC handle + * @pdev: Datapath PDEV handle + * @ppdu_info: Structure for rx ppdu info + * @nbuf: Qdf nbuf abstraction for linux skb + * + * Return: 0 on success, 1 on failure + */ +static inline int +dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + qdf_nbuf_t nbuf) +{ + uint8_t size = 0; + + if (!pdev->monitor_vdev) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "[%s]:[%d] Monitor vdev is NULL !!", + __func__, __LINE__); + return 1; + } + if (!ppdu_info->msdu_info.first_msdu_payload) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "[%s]:[%d] First msdu payload not present", + __func__, __LINE__); + return 1; + } + + /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */ + size = (ppdu_info->msdu_info.first_msdu_payload - + qdf_nbuf_data(nbuf)) + 4; + ppdu_info->msdu_info.first_msdu_payload = NULL; + + if (qdf_nbuf_pull_head(nbuf, size) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "[%s]:[%d] No header present", + __func__, __LINE__); + return 1; + } + + /* Only retain RX MSDU payload in the skb */ + qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - + ppdu_info->msdu_info.payload_len); + if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, nbuf, + qdf_nbuf_headroom(nbuf))) { + DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1); + return 1; + } + + pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev, + nbuf, NULL); + pdev->ppdu_info.rx_status.monitor_direct_used = 0; + return 0; +} + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/* + * dp_rx_mon_handle_cfr_mu_info() - Gather macaddr and ast_index of peer(s) in + * the PPDU received, this will be used for correlation of CFR data captured + * for an UL-MU-PPDU + * @pdev: pdev ctx + * @ppdu_info: pointer to ppdu info structure populated from ppdu status TLVs + * @cdp_rx_ppdu: Rx PPDU indication structure + * + * Return: none + */ +static inline void +dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + struct dp_peer *peer; + struct dp_soc *soc = pdev->soc; + struct dp_ast_entry *ast_entry; + struct mon_rx_user_status *rx_user_status; + struct cdp_rx_stats_ppdu_user *rx_stats_peruser; + uint32_t num_users; + int user_id; + uint32_t ast_index; + + qdf_spin_lock_bh(&soc->ast_lock); + + num_users = ppdu_info->com_info.num_users; + for (user_id = 0; user_id < num_users; user_id++) { + if (user_id > OFDMA_NUM_USERS) { + qdf_spin_unlock_bh(&soc->ast_lock); + return; + } + + rx_user_status = &ppdu_info->rx_user_status[user_id]; + rx_stats_peruser = &cdp_rx_ppdu->user[user_id]; + ast_index = rx_user_status->ast_index; + + if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { + rx_stats_peruser->peer_id = HTT_INVALID_PEER; + continue; + } + + ast_entry = soc->ast_table[ast_index]; + if (!ast_entry) { + rx_stats_peruser->peer_id = HTT_INVALID_PEER; + continue; + } + + peer = ast_entry->peer; + if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) { + rx_stats_peruser->peer_id = HTT_INVALID_PEER; + continue; + } + + qdf_mem_copy(rx_stats_peruser->mac_addr, + peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); + } + + qdf_spin_unlock_bh(&soc->ast_lock); +} + +/* + * dp_rx_mon_populate_cfr_ppdu_info() - Populate cdp ppdu info from hal ppdu + * info + * @pdev: pdev ctx + * @ppdu_info: ppdu info structure from ppdu ring + * @cdp_rx_ppdu : Rx PPDU indication structure + * + * Return: none + */ +static inline void +dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + int chain; + + cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; + cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; + cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; + cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users; + + for (chain = 0; chain < MAX_CHAIN; chain++) + cdp_rx_ppdu->per_chain_rssi[chain] = + ppdu_info->rx_status.rssi[chain]; + dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu); +} + +/** + * dp_cfr_rcc_mode_status() - Return status of cfr rcc mode + * @pdev: pdev ctx + * + * Return: True or False + */ + +static inline bool +dp_cfr_rcc_mode_status(struct dp_pdev *pdev) +{ + return pdev->cfr_rcc_mode; +} + +/* + * dp_rx_mon_populate_cfr_info() - Populate cdp ppdu info from hal cfr info + * @pdev: pdev ctx + * @ppdu_info: ppdu info structure from ppdu ring + * @cdp_rx_ppdu: Rx PPDU indication structure + * + * Return: none + */ +static inline void +dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + struct cdp_rx_ppdu_cfr_info *cfr_info; + + if (!dp_cfr_rcc_mode_status(pdev)) + return; + + cfr_info = &cdp_rx_ppdu->cfr_info; + + cfr_info->bb_captured_channel + = ppdu_info->cfr_info.bb_captured_channel; + cfr_info->bb_captured_timeout + = ppdu_info->cfr_info.bb_captured_timeout; + cfr_info->bb_captured_reason + = ppdu_info->cfr_info.bb_captured_reason; + cfr_info->rx_location_info_valid + = ppdu_info->cfr_info.rx_location_info_valid; + cfr_info->chan_capture_status + = ppdu_info->cfr_info.chan_capture_status; + cfr_info->rtt_che_buffer_pointer_high8 + = ppdu_info->cfr_info.rtt_che_buffer_pointer_high8; + cfr_info->rtt_che_buffer_pointer_low32 + = ppdu_info->cfr_info.rtt_che_buffer_pointer_low32; +} + +/** + * dp_update_cfr_dbg_stats() - Increment RCC debug statistics + * @pdev: pdev structure + * @ppdu_info: structure for rx ppdu ring + * + * Return: none + */ +static inline void +dp_update_cfr_dbg_stats(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; + + DP_STATS_INC(pdev, + rcc.chan_capture_status[cfr->chan_capture_status], 1); + if (cfr->rx_location_info_valid) { + DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1); + if (cfr->bb_captured_channel) { + DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1); + DP_STATS_INC(pdev, + rcc.reason_cnt[cfr->bb_captured_reason], + 1); + } else if (cfr->bb_captured_timeout) { + DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1); + DP_STATS_INC(pdev, + rcc.reason_cnt[cfr->bb_captured_reason], + 1); + } + } +} + +/* + * dp_rx_handle_cfr() - Gather cfr info from hal ppdu info + * @soc: core txrx main context + * @pdev: pdev ctx + * @ppdu_info: ppdu info structure from ppdu ring + * + * Return: none + */ +static inline void +dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + qdf_nbuf_t ppdu_nbuf; + struct cdp_rx_indication_ppdu *cdp_rx_ppdu; + + dp_update_cfr_dbg_stats(pdev, ppdu_info); + if (!ppdu_info->cfr_info.bb_captured_channel) + return; + + ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, + sizeof(struct cdp_rx_indication_ppdu), + 0, + 0, + FALSE); + if (ppdu_nbuf) { + cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; + + dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); + dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); + qdf_nbuf_put_tail(ppdu_nbuf, + sizeof(struct cdp_rx_indication_ppdu)); + dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, + ppdu_nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } +} + +/** + * dp_rx_populate_cfr_non_assoc_sta() - Populate cfr ppdu info for PPDUs from + * non-associated stations + * @pdev: pdev ctx + * @ppdu_info: ppdu info structure from ppdu ring + * @cdp_rx_ppdu: Rx PPDU indication structure + * + * Return: none + */ +static inline void +dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ + if (!dp_cfr_rcc_mode_status(pdev)) + return; + + if (ppdu_info->cfr_info.bb_captured_channel) + dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu); +} + +/** + * dp_bb_captured_chan_status() - Get the bb_captured_channel status + * @ppdu_info: structure for rx ppdu ring + * + * Return: Success/ Failure + */ + +static inline QDF_STATUS +dp_bb_captured_chan_status(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info; + + if (dp_cfr_rcc_mode_status(pdev)) { + if (cfr->bb_captured_channel) + status = QDF_STATUS_SUCCESS; + } + + return status; +} +#else +static inline void +dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ +} + +static inline void +dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ +} + +static inline void +dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ +} + +static inline void +dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ +} + +static inline void +dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + struct cdp_rx_indication_ppdu *cdp_rx_ppdu) +{ +} + +static inline void +dp_update_cfr_dbg_stats(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ +} + +static inline QDF_STATUS +dp_bb_captured_chan_status(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline bool +dp_cfr_rcc_mode_status(struct dp_pdev *pdev) +{ + return false; +} +#endif + +/** +* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer +* @soc: core txrx main context +* @pdev: pdev strcuture +* @ppdu_info: structure for rx ppdu ring +* +* Return: none +*/ +#ifdef FEATURE_PERPKT_INFO +static inline void +dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + qdf_nbuf_t ppdu_nbuf; + struct cdp_rx_indication_ppdu *cdp_rx_ppdu; + + /* + * Do not allocate if fcs error, + * ast idx invalid / fctl invalid + * + * In CFR RCC mode - PPDU status TLVs of error pkts are also needed + */ + if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0) + return; + + if (ppdu_info->nac_info.fc_valid && + ppdu_info->nac_info.to_ds_flag && + ppdu_info->nac_info.mac_addr2_valid) { + struct dp_neighbour_peer *peer = NULL; + uint8_t rssi = ppdu_info->rx_status.rssi_comb; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + if (pdev->neighbour_peers_added) { + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr, + &ppdu_info->nac_info.mac_addr2, + QDF_MAC_ADDR_SIZE)) { + peer->rssi = rssi; + break; + } + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + } + + /* need not generate wdi event when mcopy, cfr rcc mode and + * enhanced stats are not enabled + */ + if (!pdev->mcopy_mode && !pdev->enhanced_stats_en && + !dp_cfr_rcc_mode_status(pdev)) + return; + + if (dp_cfr_rcc_mode_status(pdev)) + dp_update_cfr_dbg_stats(pdev, ppdu_info); + + if (!ppdu_info->rx_status.frame_control_info_valid || + (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) { + if (!(pdev->mcopy_mode || + (dp_bb_captured_chan_status(pdev, ppdu_info) == + QDF_STATUS_SUCCESS))) + return; + } + + ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, + sizeof(struct cdp_rx_indication_ppdu), + 0, 0, FALSE); + if (ppdu_nbuf) { + cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; + + dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu); + dp_rx_populate_cdp_indication_ppdu(pdev, + ppdu_info, cdp_rx_ppdu); + qdf_nbuf_put_tail(ppdu_nbuf, + sizeof(struct cdp_rx_indication_ppdu)); + dp_rx_stats_update(pdev, cdp_rx_ppdu); + + if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) { + dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, + soc, ppdu_nbuf, + cdp_rx_ppdu->peer_id, + WDI_NO_VAL, pdev->pdev_id); + } else if (pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev)) { + dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, + ppdu_nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else { + qdf_nbuf_free(ppdu_nbuf); + } + } +} +#else +static inline void +dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ +} +#endif + +/** +* dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based +* filtering enabled +* @soc: core txrx main context +* @ppdu_info: Structure for rx ppdu info +* @status_nbuf: Qdf nbuf abstraction for linux skb +* @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN +* +* Return: none +*/ +static inline void +dp_rx_process_peer_based_pktlog(struct dp_soc *soc, + struct hal_rx_ppdu_info *ppdu_info, + qdf_nbuf_t status_nbuf, uint32_t mac_id) +{ + struct dp_peer *peer; + struct dp_ast_entry *ast_entry; + uint32_t ast_index; + + ast_index = ppdu_info->rx_status.ast_index; + if (ast_index < wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) { + ast_entry = soc->ast_table[ast_index]; + if (ast_entry) { + peer = ast_entry->peer; + if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER)) { + if (peer->peer_based_pktlog_filter) { + dp_wdi_event_handler( + WDI_EVENT_RX_DESC, soc, + status_nbuf, + peer->peer_ids[0], + WDI_NO_VAL, mac_id); + } + } + } + } +} + +#if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M) +static inline void +dp_rx_ul_ofdma_ru_size_to_width( + uint32_t ru_size, + uint32_t *ru_width) +{ + uint32_t width; + + width = 0; + switch (ru_size) { + case HTT_UL_OFDMA_V0_RU_SIZE_RU_26: + width = 1; + break; + case HTT_UL_OFDMA_V0_RU_SIZE_RU_52: + width = 2; + break; + case HTT_UL_OFDMA_V0_RU_SIZE_RU_106: + width = 4; + break; + case HTT_UL_OFDMA_V0_RU_SIZE_RU_242: + width = 9; + break; + case HTT_UL_OFDMA_V0_RU_SIZE_RU_484: + width = 18; + break; + case HTT_UL_OFDMA_V0_RU_SIZE_RU_996: + width = 37; + break; + case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2: + width = 74; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "RU size to width convert err"); + break; + } + *ru_width = width; +} + +static inline void +dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) +{ + struct mon_rx_user_status *mon_rx_user_status; + uint32_t num_users; + uint32_t i; + uint32_t mu_ul_user_v0_word0; + uint32_t mu_ul_user_v0_word1; + uint32_t ru_width; + uint32_t ru_size; + + if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA || + ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO)) + return; + + num_users = ppdu_info->com_info.num_users; + if (num_users > HAL_MAX_UL_MU_USERS) + num_users = HAL_MAX_UL_MU_USERS; + for (i = 0; i < num_users; i++) { + mon_rx_user_status = &ppdu_info->rx_user_status[i]; + mu_ul_user_v0_word0 = + mon_rx_user_status->mu_ul_user_v0_word0; + mu_ul_user_v0_word1 = + mon_rx_user_status->mu_ul_user_v0_word1; + + if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET( + mu_ul_user_v0_word0) && + !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET( + mu_ul_user_v0_word0)) { + mon_rx_user_status->mcs = + HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET( + mu_ul_user_v0_word1); + mon_rx_user_status->nss = + HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET( + mu_ul_user_v0_word1) + 1; + + mon_rx_user_status->mu_ul_info_valid = 1; + mon_rx_user_status->ofdma_ru_start_index = + HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET( + mu_ul_user_v0_word1); + + ru_size = + HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET( + mu_ul_user_v0_word1); + dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width); + mon_rx_user_status->ofdma_ru_width = ru_width; + mon_rx_user_status->ofdma_ru_size = ru_size; + } + } +} +#else +static inline void +dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info) +{ +} +#endif + +/** +* dp_rx_mon_status_process_tlv() - Process status TLV in status +* buffer on Rx status Queue posted by status SRNG processing. +* @soc: core txrx main context +* @mac_id: mac_id which is one of 3 mac_ids _ring +* +* Return: none +*/ +static inline void +dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + struct hal_rx_ppdu_info *ppdu_info; + qdf_nbuf_t status_nbuf; + uint8_t *rx_tlv; + uint8_t *rx_tlv_start; + uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE; + QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS; + struct cdp_pdev_mon_stats *rx_mon_stats; + int smart_mesh_status; + enum WDI_EVENT pktlog_mode = WDI_NO_VAL; + bool nbuf_used; + uint32_t rx_enh_capture_mode; + + + ppdu_info = &pdev->ppdu_info; + rx_mon_stats = &pdev->rx_mon_stats; + + if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START) + return; + + rx_enh_capture_mode = pdev->rx_enh_capture_mode; + + while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) { + + status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q); + + rx_tlv = qdf_nbuf_data(status_nbuf); + rx_tlv_start = rx_tlv; + nbuf_used = false; + + if ((pdev->monitor_vdev) || (pdev->enhanced_stats_en) || + (pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) || + (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) { + do { + tlv_status = hal_rx_status_get_tlv_info(rx_tlv, + ppdu_info, pdev->soc->hal_soc, + status_nbuf); + + dp_rx_mon_update_dbg_ppdu_stats(ppdu_info, + rx_mon_stats); + + dp_rx_mon_enh_capture_process(pdev, tlv_status, + status_nbuf, ppdu_info, + &nbuf_used); + + rx_tlv = hal_rx_status_get_next_tlv(rx_tlv); + + if ((rx_tlv - rx_tlv_start) >= + RX_DATA_BUFFER_SIZE) + break; + + } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) || + (tlv_status == HAL_TLV_STATUS_HEADER) || + (tlv_status == HAL_TLV_STATUS_MPDU_END) || + (tlv_status == HAL_TLV_STATUS_MSDU_END)); + } + if (pdev->dp_peer_based_pktlog) { + dp_rx_process_peer_based_pktlog(soc, ppdu_info, + status_nbuf, mac_id); + } else { + if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL) + pktlog_mode = WDI_EVENT_RX_DESC; + else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE) + pktlog_mode = WDI_EVENT_LITE_RX; + + if (pktlog_mode != WDI_NO_VAL) + dp_wdi_event_handler(pktlog_mode, soc, + status_nbuf, + HTT_INVALID_PEER, + WDI_NO_VAL, mac_id); + } + + /* smart monitor vap and m_copy cannot co-exist */ + if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added + && pdev->monitor_vdev) { + smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc, + pdev, ppdu_info, status_nbuf); + if (smart_mesh_status) + qdf_nbuf_free(status_nbuf); + } else if (qdf_unlikely(pdev->mcopy_mode)) { + dp_rx_process_mcopy_mode(soc, pdev, + ppdu_info, tlv_status, + status_nbuf); + } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) { + if (!nbuf_used) + qdf_nbuf_free(status_nbuf); + + if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) + enh_log_status = + dp_rx_handle_enh_capture(soc, + pdev, ppdu_info); + } else { + qdf_nbuf_free(status_nbuf); + } + + if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) { + dp_rx_mon_deliver_non_std(soc, mac_id); + } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { + rx_mon_stats->status_ppdu_done++; + dp_rx_mon_handle_mu_ul_info(ppdu_info); + + if (pdev->tx_capture_enabled + != CDP_TX_ENH_CAPTURE_DISABLED) + dp_send_ack_frame_to_stack(soc, pdev, + ppdu_info); + + if (pdev->enhanced_stats_en || + pdev->mcopy_mode || pdev->neighbour_peers_added) + dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info); + else if (dp_cfr_rcc_mode_status(pdev)) + dp_rx_handle_cfr(soc, pdev, ppdu_info); + + pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE; + + /* + * if chan_num is not fetched correctly from ppdu RX TLV, + * get it from pdev saved. + */ + if (qdf_unlikely(pdev->ppdu_info.rx_status.chan_num == 0)) + pdev->ppdu_info.rx_status.chan_num = pdev->mon_chan_num; + /* + * if chan_freq is not fetched correctly from ppdu RX TLV, + * get it from pdev saved. + */ + if (qdf_unlikely(pdev->ppdu_info.rx_status.chan_freq == 0)) { + pdev->ppdu_info.rx_status.chan_freq = + pdev->mon_chan_freq; + } + + dp_rx_mon_dest_process(soc, mac_id, quota); + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + } + } + return; +} + +/* + * dp_rx_mon_status_srng_process() - Process monitor status ring + * post the status ring buffer to Rx status Queue for later + * processing when status ring is filled with status TLV. + * Allocate a new buffer to status ring if the filled buffer + * is posted. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @quota: No. of ring entry that can be serviced in one shot. + + * Return: uint32_t: No. of ring entry that is processed. + */ +static inline uint32_t +dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id); + hal_soc_handle_t hal_soc; + void *mon_status_srng; + void *rxdma_mon_status_ring_entry; + QDF_STATUS status; + uint32_t work_done = 0; + + mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng; + + qdf_assert(mon_status_srng); + if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) { + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Status Ring Init Failed -- %pK", + __func__, __LINE__, mon_status_srng); + return work_done; + } + + hal_soc = soc->hal_soc; + + qdf_assert(hal_soc); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) + goto done; + + /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT => + * BUFFER_ADDR_INFO STRUCT + */ + while (qdf_likely((rxdma_mon_status_ring_entry = + hal_srng_src_peek(hal_soc, mon_status_srng)) + && quota--)) { + uint32_t rx_buf_cookie; + qdf_nbuf_t status_nbuf; + struct dp_rx_desc *rx_desc; + uint8_t *status_buf; + qdf_dma_addr_t paddr; + uint64_t buf_addr; + + buf_addr = + (HAL_RX_BUFFER_ADDR_31_0_GET( + rxdma_mon_status_ring_entry) | + ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET( + rxdma_mon_status_ring_entry)) << 32)); + + if (qdf_likely(buf_addr)) { + + rx_buf_cookie = + HAL_RX_BUF_COOKIE_GET( + rxdma_mon_status_ring_entry); + rx_desc = dp_rx_cookie_2_va_mon_status(soc, + rx_buf_cookie); + + qdf_assert(rx_desc); + + status_nbuf = rx_desc->nbuf; + + qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, + QDF_DMA_FROM_DEVICE); + + status_buf = qdf_nbuf_data(status_nbuf); + + status = hal_get_rx_status_done(status_buf); + + if (status != QDF_STATUS_SUCCESS) { + uint32_t hp, tp; + hal_get_sw_hptp(hal_soc, mon_status_srng, + &tp, &hp); + dp_info_rl("tlv tag status error hp:%u, tp:%u", + hp, tp); + pdev->rx_mon_stats.tlv_tag_status_err++; + /* WAR for missing status: Skip status entry */ + hal_srng_src_get_next(hal_soc, mon_status_srng); + continue; + } + qdf_nbuf_set_pktlen(status_nbuf, RX_DATA_BUFFER_SIZE); + + qdf_nbuf_unmap_single(soc->osdev, status_nbuf, + QDF_DMA_FROM_DEVICE); + + /* Put the status_nbuf to queue */ + qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf); + + } else { + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct rx_desc_pool *rx_desc_pool; + uint32_t num_alloc_desc; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, + rx_desc_pool, + 1, + &desc_list, + &tail); + /* + * No free descriptors available + */ + if (qdf_unlikely(num_alloc_desc == 0)) { + work_done++; + break; + } + + rx_desc = &desc_list->rx_desc; + } + + status_nbuf = dp_rx_nbuf_prepare(soc, pdev); + + /* + * qdf_nbuf alloc or map failed, + * free the dp rx desc to free list, + * fill in NULL dma address at current HP entry, + * keep HP in mon_status_ring unchanged, + * wait next time dp_rx_mon_status_srng_process + * to fill in buffer at current HP. + */ + if (qdf_unlikely(!status_nbuf)) { + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + dp_info_rl("fail to allocate or map qdf_nbuf"); + dp_rx_add_to_free_desc_list(&desc_list, + &tail, rx_desc); + dp_rx_add_desc_list_to_free_list(soc, &desc_list, + &tail, mac_id, rx_desc_pool); + + hal_rxdma_buff_addr_info_set( + rxdma_mon_status_ring_entry, + 0, 0, HAL_RX_BUF_RBM_SW3_BM); + work_done++; + break; + } + + paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0); + + rx_desc->nbuf = status_nbuf; + rx_desc->in_use = 1; + + hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry, + paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM); + + hal_srng_src_get_next(hal_soc, mon_status_srng); + work_done++; + } +done: + + hal_srng_access_end(hal_soc, mon_status_srng); + + return work_done; + +} +/* + * dp_rx_mon_status_process() - Process monitor status ring and + * TLV in status ring. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @quota: No. of ring entry that can be serviced in one shot. + + * Return: uint32_t: No. of ring entry that is processed. + */ +static inline uint32_t +dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { + uint32_t work_done; + + work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota); + quota -= work_done; + dp_rx_mon_status_process_tlv(soc, mac_id, quota); + + return work_done; +} + +#ifndef DISABLE_MON_CONFIG +/** + * dp_mon_process() - Main monitor mode processing roution. + * This call monitor status ring process then monitor + * destination ring process. + * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @quota: No. of status ring entry that can be serviced in one shot. + + * Return: uint32_t: No. of ring entry that is processed. + */ +uint32_t +dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { + return dp_rx_mon_status_process(soc, mac_id, quota); +} +#else +uint32_t +dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { + return 0; +} +#endif + +/** + * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring + * @pdev: core txrx pdev context + * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN + * + * This function will detach DP RX status ring from + * main device context. will free DP Rx resources for + * status ring + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + if (rx_desc_pool->pool_size != 0) { + if (!dp_is_soc_reinit(soc)) + dp_rx_desc_nbuf_and_pool_free(soc, mac_id, + rx_desc_pool); + else + dp_rx_desc_nbuf_free(soc, rx_desc_pool); + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_buffers_replenish() - replenish monitor status ring with + * rx nbufs called during dp rx + * monitor status ring initialization + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp monitor status circular ring + * @rx_desc_pool; Pointer to Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * @desc_list: list of descs if called from dp rx monitor status + * process or NULL during dp rx initialization or + * out of buffer interrupt + * @tail: tail of descs list + * @owner: who owns the nbuf (host, NSS etc...) + * Return: return success or failure + */ +static inline +QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, + uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail, + uint8_t owner) +{ + uint32_t num_alloc_desc; + uint16_t num_desc_to_free = 0; + uint32_t num_entries_avail; + uint32_t count = 0; + int sync_hw_ptr = 1; + qdf_dma_addr_t paddr; + qdf_nbuf_t rx_netbuf; + void *rxdma_ring_entry; + union dp_rx_desc_list_elem_t *next; + void *rxdma_srng; + struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id); + + rxdma_srng = dp_rxdma_srng->hal_srng; + + qdf_assert(rxdma_srng); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] requested %d buffers for replenish", + __func__, __LINE__, num_req_buffers); + + /* + * if desc_list is NULL, allocate the descs from freelist + */ + if (!(*desc_list)) { + + num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, + rx_desc_pool, + num_req_buffers, + desc_list, + tail); + + if (!num_alloc_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "[%s][%d] no free rx_descs in freelist", + __func__, __LINE__); + return QDF_STATUS_E_NOMEM; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] %d rx desc allocated", __func__, __LINE__, + num_alloc_desc); + + num_req_buffers = num_alloc_desc; + } + + hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); + num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, + rxdma_srng, sync_hw_ptr); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] no of available entries in rxdma ring: %d", + __func__, __LINE__, num_entries_avail); + + if (num_entries_avail < num_req_buffers) { + num_desc_to_free = num_req_buffers - num_entries_avail; + num_req_buffers = num_entries_avail; + } + + while (count < num_req_buffers) { + rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev); + + /* + * qdf_nbuf alloc or map failed, + * keep HP in mon_status_ring unchanged, + * wait dp_rx_mon_status_srng_process + * to fill in buffer at current HP. + */ + if (qdf_unlikely(!rx_netbuf)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: qdf_nbuf allocate or map fail, count %d", + __func__, count); + break; + } + + paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); + + next = (*desc_list)->next; + rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, + rxdma_srng); + + if (qdf_unlikely(!rxdma_ring_entry)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "[%s][%d] rxdma_ring_entry is NULL, count - %d", + __func__, __LINE__, count); + qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(rx_netbuf); + break; + } + + (*desc_list)->rx_desc.nbuf = rx_netbuf; + (*desc_list)->rx_desc.in_use = 1; + count++; + + hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, + (*desc_list)->rx_desc.cookie, owner); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \ + paddr=%pK", + __func__, __LINE__, &(*desc_list)->rx_desc, + (*desc_list)->rx_desc.cookie, rx_netbuf, + (void *)paddr); + + *desc_list = next; + } + + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "successfully replenished %d buffers", num_req_buffers); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%d rx desc added back to free list", num_desc_to_free); + + /* + * add any available free desc back to the free list + */ + if (*desc_list) { + dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, + mac_id, rx_desc_pool); + } + + return QDF_STATUS_SUCCESS; +} +/** + * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring + * @pdev: core txrx pdev context + * @ring_id: ring number + * This function will attach a DP RX monitor status ring into pDEV + * and replenish monitor status ring with buffer. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) { + struct dp_soc *soc = pdev->soc; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *mon_status_ring; + uint32_t num_entries; + uint32_t i; + struct rx_desc_pool *rx_desc_pool; + QDF_STATUS status; + + mon_status_ring = &soc->rxdma_mon_status_ring[ring_id]; + + num_entries = mon_status_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_status[ring_id]; + + dp_info("Mon RX Status Pool[%d] entries=%d", + ring_id, num_entries); + + rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE; + status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1, + rx_desc_pool); + if (!QDF_IS_STATUS_SUCCESS(status)) + return status; + + rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; + rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; + + dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id); + + status = dp_rx_mon_status_buffers_replenish(soc, ring_id, + mon_status_ring, + rx_desc_pool, + num_entries, + &desc_list, &tail, + HAL_RX_BUF_RBM_SW3_BM); + + if (!QDF_IS_STATUS_SUCCESS(status)) + return status; + + qdf_nbuf_queue_init(&pdev->rx_status_q); + qdf_nbuf_queue_init(&pdev->rx_ppdu_buf_q); + + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + + qdf_mem_zero(&(pdev->ppdu_info.rx_status), + sizeof(pdev->ppdu_info.rx_status)); + + qdf_mem_zero(&pdev->rx_mon_stats, + sizeof(pdev->rx_mon_stats)); + + dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info, + &pdev->rx_mon_stats); + + for (i = 0; i < MAX_MU_USERS; i++) { + qdf_nbuf_queue_init(&pdev->mpdu_q[i]); + pdev->is_mpdu_hdr[i] = true; + } + qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS])); + + pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..094e1214f2587ca00633fb4218432e8c24df1445 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c @@ -0,0 +1,6297 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_types.h" +#include "dp_peer.h" +#include "dp_types.h" +#include "dp_internal.h" +#include "htt_stats.h" +#include "htt_ppdu_stats.h" +#include "dp_htt.h" + +#define DP_MAX_STRING_LEN 500 + +#define DP_HTT_HW_INTR_NAME_LEN HTT_STATS_MAX_HW_INTR_NAME_LEN +#define DP_HTT_HW_MODULE_NAME_LEN HTT_STATS_MAX_HW_MODULE_NAME_LEN +#define DP_HTT_COUNTER_NAME_LEN HTT_MAX_COUNTER_NAME +#define DP_HTT_LOW_WM_HIT_COUNT_LEN HTT_STATS_LOW_WM_BINS +#define DP_HTT_HIGH_WM_HIT_COUNT_LEN HTT_STATS_HIGH_WM_BINS +#define DP_HTT_TX_MCS_LEN HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_TX_SU_MCS_LEN HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_TX_MU_MCS_LEN HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_TX_NSS_LEN HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_TX_BW_LEN HTT_TX_PDEV_STATS_NUM_BW_COUNTERS +#define DP_HTT_TX_PREAM_LEN HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES +#define DP_HTT_TX_PDEV_GI_LEN HTT_TX_PDEV_STATS_NUM_GI_COUNTERS +#define DP_HTT_TX_DCM_LEN HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS +#define DP_HTT_RX_MCS_LEN HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_RX_NSS_LEN HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_RX_DCM_LEN HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS +#define DP_HTT_RX_BW_LEN HTT_RX_PDEV_STATS_NUM_BW_COUNTERS +#define DP_HTT_RX_PREAM_LEN HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES +#define DP_HTT_RSSI_CHAIN_LEN HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_RX_GI_LEN HTT_RX_PDEV_STATS_NUM_GI_COUNTERS +#define DP_HTT_FW_RING_MGMT_SUBTYPE_LEN HTT_STATS_SUBTYPE_MAX +#define DP_HTT_FW_RING_CTRL_SUBTYPE_LEN HTT_STATS_SUBTYPE_MAX +#define DP_HTT_FW_RING_MPDU_ERR_LEN HTT_RX_STATS_RXDMA_MAX_ERR +#define DP_HTT_TID_NAME_LEN MAX_HTT_TID_NAME +#define DP_HTT_PEER_NUM_SS HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_PDEV_TX_GI_LEN HTT_TX_PDEV_STATS_NUM_GI_COUNTERS + +#define DP_MAX_INT_CONTEXTS_STRING_LENGTH (6 * WLAN_CFG_INT_NUM_CONTEXTS) +#define DP_NSS_LENGTH (6 * SS_COUNT) +#define DP_MU_GROUP_LENGTH (6 * DP_MU_GROUP_SHOW) +#define DP_MU_GROUP_SHOW 16 +#define DP_MAX_MCS_STRING_LEN 34 +#define DP_RXDMA_ERR_LENGTH (6 * HAL_RXDMA_ERR_MAX) +#define DP_REO_ERR_LENGTH (6 * HAL_REO_ERR_MAX) +#define STATS_PROC_TIMEOUT (HZ / 1000) + +#define MCS_VALID 1 +#define MCS_INVALID 0 + +/* + * struct dp_rate_debug + * + * @mcs_type: print string for a given mcs + * @valid: valid mcs rate? + */ +struct dp_rate_debug { + char mcs_type[DP_MAX_MCS_STRING_LEN]; + uint8_t valid; +}; + +static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = { + { + {"OFDM 48 Mbps", MCS_VALID}, + {"OFDM 24 Mbps", MCS_VALID}, + {"OFDM 12 Mbps", MCS_VALID}, + {"OFDM 6 Mbps ", MCS_VALID}, + {"OFDM 54 Mbps", MCS_VALID}, + {"OFDM 36 Mbps", MCS_VALID}, + {"OFDM 18 Mbps", MCS_VALID}, + {"OFDM 9 Mbps ", MCS_VALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"CCK 11 Mbps Long ", MCS_VALID}, + {"CCK 5.5 Mbps Long ", MCS_VALID}, + {"CCK 2 Mbps Long ", MCS_VALID}, + {"CCK 1 Mbps Long ", MCS_VALID}, + {"CCK 11 Mbps Short ", MCS_VALID}, + {"CCK 5.5 Mbps Short", MCS_VALID}, + {"CCK 2 Mbps Short ", MCS_VALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"HT MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HT MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HT MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HT MCS 3 (16-QAM 1/2)", MCS_VALID}, + {"HT MCS 4 (16-QAM 3/4)", MCS_VALID}, + {"HT MCS 5 (64-QAM 2/3)", MCS_VALID}, + {"HT MCS 6 (64-QAM 3/4)", MCS_VALID}, + {"HT MCS 7 (64-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"HE MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HE MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HE MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + } +}; + +static const struct dp_rate_debug dp_ppdu_rate_string[DOT11_MAX][MAX_MCS] = { + { + {"HE MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HE MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HE MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + } +}; + +static const struct dp_rate_debug dp_mu_rate_string[RX_TYPE_MU_MAX][MAX_MCS] = { + { + {"HE MU-MIMO MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HE MU-MIMO MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HE MU-MIMO MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HE MU-MIMO MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"HE MU-MIMO MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"HE MU-MIMO MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"HE MU-MIMO MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"HE MU-MIMO MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"HE MU-MIMO MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"HE MU-MIMO MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"HE MU-MIMO MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"HE MU-MIMO MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"HE OFDMA MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HE OFDMA MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HE OFDMA MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HE OFDMA MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"HE OFDMA MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"HE OFDMA MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"HE OFDMA MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"HE OFDMA MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"HE OFDMA MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"HE OFDMA MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"HE OFDMA MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"HE OFDMA MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + }, +}; + +const char *mu_reception_mode[RX_TYPE_MU_MAX] = { + "MU MIMO", "MU OFDMA" +}; + +#ifdef QCA_ENH_V3_STATS_SUPPORT +const char *fw_to_hw_delay_bucket[CDP_DELAY_BUCKET_MAX + 1] = { + "0 to 10 ms", "11 to 20 ms", + "21 to 30 ms", "31 to 40 ms", + "41 to 50 ms", "51 to 60 ms", + "61 to 70 ms", "71 to 80 ms", + "81 to 90 ms", "91 to 100 ms", + "101 to 250 ms", "251 to 500 ms", "500+ ms" +}; + +const char *sw_enq_delay_bucket[CDP_DELAY_BUCKET_MAX + 1] = { + "0 to 1 ms", "1 to 2 ms", + "2 to 3 ms", "3 to 4 ms", + "4 to 5 ms", "5 to 6 ms", + "6 to 7 ms", "7 to 8 ms", + "8 to 9 ms", "9 to 10 ms", + "10 to 11 ms", "11 to 12 ms", "12+ ms" +}; + +const char *intfrm_delay_bucket[CDP_DELAY_BUCKET_MAX + 1] = { + "0 to 5 ms", "6 to 10 ms", + "11 to 15 ms", "16 to 20 ms", + "21 to 25 ms", "26 to 30 ms", + "31 to 35 ms", "36 to 40 ms", + "41 to 45 ms", "46 to 50 ms", + "51 to 55 ms", "56 to 60 ms", "60+ ms" +}; +#endif + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_tx_capture.h" +#endif + +#define TID_COUNTER_STATS 1 /* Success/drop stats type */ +#define TID_DELAY_STATS 2 /* Delay stats type */ + +/* + * dp_print_stats_string_tlv: display htt_stats_string_tlv + * @tag_buf: buffer containing the tlv htt_stats_string_tlv + * + * return:void + */ +static inline void dp_print_stats_string_tlv(uint32_t *tag_buf) +{ + htt_stats_string_tlv *dp_stats_buf = + (htt_stats_string_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *data = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!data) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_STATS_STRING_TLV:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&data[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->data[i]); + } + DP_PRINT_STATS("data = %s\n", data); + qdf_mem_free(data); +} + +/* + * dp_print_tx_pdev_stats_cmn_tlv: display htt_tx_pdev_stats_cmn_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_cmn_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_cmn_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_cmn_tlv *dp_stats_buf = + (htt_tx_pdev_stats_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_PDEV_STATS_CMN_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("hw_queued = %u", + dp_stats_buf->hw_queued); + DP_PRINT_STATS("hw_reaped = %u", + dp_stats_buf->hw_reaped); + DP_PRINT_STATS("underrun = %u", + dp_stats_buf->underrun); + DP_PRINT_STATS("hw_paused = %u", + dp_stats_buf->hw_paused); + DP_PRINT_STATS("hw_flush = %u", + dp_stats_buf->hw_flush); + DP_PRINT_STATS("hw_filt = %u", + dp_stats_buf->hw_filt); + DP_PRINT_STATS("tx_abort = %u", + dp_stats_buf->tx_abort); + DP_PRINT_STATS("mpdu_requeued = %u", + dp_stats_buf->mpdu_requed); + DP_PRINT_STATS("tx_xretry = %u", + dp_stats_buf->tx_xretry); + DP_PRINT_STATS("data_rc = %u", + dp_stats_buf->data_rc); + DP_PRINT_STATS("mpdu_dropped_xretry = %u", + dp_stats_buf->mpdu_dropped_xretry); + DP_PRINT_STATS("illegal_rate_phy_err = %u", + dp_stats_buf->illgl_rate_phy_err); + DP_PRINT_STATS("cont_xretry = %u", + dp_stats_buf->cont_xretry); + DP_PRINT_STATS("tx_timeout = %u", + dp_stats_buf->tx_timeout); + DP_PRINT_STATS("pdev_resets = %u", + dp_stats_buf->pdev_resets); + DP_PRINT_STATS("phy_underrun = %u", + dp_stats_buf->phy_underrun); + DP_PRINT_STATS("txop_ovf = %u", + dp_stats_buf->txop_ovf); + DP_PRINT_STATS("seq_posted = %u", + dp_stats_buf->seq_posted); + DP_PRINT_STATS("seq_failed_queueing = %u", + dp_stats_buf->seq_failed_queueing); + DP_PRINT_STATS("seq_completed = %u", + dp_stats_buf->seq_completed); + DP_PRINT_STATS("seq_restarted = %u", + dp_stats_buf->seq_restarted); + DP_PRINT_STATS("mu_seq_posted = %u", + dp_stats_buf->mu_seq_posted); + DP_PRINT_STATS("seq_switch_hw_paused = %u", + dp_stats_buf->seq_switch_hw_paused); + DP_PRINT_STATS("next_seq_posted_dsr = %u", + dp_stats_buf->next_seq_posted_dsr); + DP_PRINT_STATS("seq_posted_isr = %u", + dp_stats_buf->seq_posted_isr); + DP_PRINT_STATS("seq_ctrl_cached = %u", + dp_stats_buf->seq_ctrl_cached); + DP_PRINT_STATS("mpdu_count_tqm = %u", + dp_stats_buf->mpdu_count_tqm); + DP_PRINT_STATS("msdu_count_tqm = %u", + dp_stats_buf->msdu_count_tqm); + DP_PRINT_STATS("mpdu_removed_tqm = %u", + dp_stats_buf->mpdu_removed_tqm); + DP_PRINT_STATS("msdu_removed_tqm = %u", + dp_stats_buf->msdu_removed_tqm); + DP_PRINT_STATS("mpdus_sw_flush = %u", + dp_stats_buf->mpdus_sw_flush); + DP_PRINT_STATS("mpdus_hw_filter = %u", + dp_stats_buf->mpdus_hw_filter); + DP_PRINT_STATS("mpdus_truncated = %u", + dp_stats_buf->mpdus_truncated); + DP_PRINT_STATS("mpdus_ack_failed = %u", + dp_stats_buf->mpdus_ack_failed); + DP_PRINT_STATS("mpdus_expired = %u", + dp_stats_buf->mpdus_expired); + DP_PRINT_STATS("mpdus_seq_hw_retry = %u", + dp_stats_buf->mpdus_seq_hw_retry); + DP_PRINT_STATS("ack_tlv_proc = %u", + dp_stats_buf->ack_tlv_proc); + DP_PRINT_STATS("coex_abort_mpdu_cnt_valid = %u", + dp_stats_buf->coex_abort_mpdu_cnt_valid); + DP_PRINT_STATS("coex_abort_mpdu_cnt = %u\n", + dp_stats_buf->coex_abort_mpdu_cnt); +} + +/* + * dp_print_tx_pdev_stats_urrn_tlv_v: display htt_tx_pdev_stats_urrn_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_urrn_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_urrn_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_urrn_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_urrn_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *urrn_stats = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!urrn_stats) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_URRN_STATS); + DP_PRINT_STATS("HTT_TX_PDEV_STATS_URRN_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&urrn_stats[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->urrn_stats[i]); + } + DP_PRINT_STATS("urrn_stats = %s\n", urrn_stats); + qdf_mem_free(urrn_stats); +} + +/* + * dp_print_tx_pdev_stats_flush_tlv_v: display htt_tx_pdev_stats_flush_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_flush_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_flush_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_flush_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_flush_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *flush_errs = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!flush_errs) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); + + DP_PRINT_STATS("HTT_TX_PDEV_STATS_FLUSH_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&flush_errs[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->flush_errs[i]); + } + DP_PRINT_STATS("flush_errs = %s\n", flush_errs); + qdf_mem_free(flush_errs); +} + +/* + * dp_print_tx_pdev_stats_sifs_tlv_v: display htt_tx_pdev_stats_sifs_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_sifs_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_sifs_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_sifs_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_sifs_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *sifs_status = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!sifs_status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_SIFS_BURST_STATS); + + DP_PRINT_STATS("HTT_TX_PDEV_STATS_SIFS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&sifs_status[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->sifs_status[i]); + } + DP_PRINT_STATS("sifs_status = %s\n", sifs_status); + qdf_mem_free(sifs_status); +} + +/* + * dp_print_tx_pdev_stats_phy_err_tlv_v: display htt_tx_pdev_stats_phy_err_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_phy_err_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_phy_err_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_phy_err_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_phy_err_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *phy_errs = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!phy_errs) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_PHY_ERR_STATS); + + DP_PRINT_STATS("HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&phy_errs[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->phy_errs[i]); + } + DP_PRINT_STATS("phy_errs = %s\n", phy_errs); + qdf_mem_free(phy_errs); +} + +/* + * dp_print_hw_stats_intr_misc_tlv: display htt_hw_stats_intr_misc_tlv + * @tag_buf: buffer containing the tlv htt_hw_stats_intr_misc_tlv + * + * return:void + */ +static inline void dp_print_hw_stats_intr_misc_tlv(uint32_t *tag_buf) +{ + htt_hw_stats_intr_misc_tlv *dp_stats_buf = + (htt_hw_stats_intr_misc_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *hw_intr_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!hw_intr_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_HW_STATS_INTR_MISC_TLV:"); + for (i = 0; i < DP_HTT_HW_INTR_NAME_LEN; i++) { + index += qdf_snprint(&hw_intr_name[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->hw_intr_name[i]); + } + DP_PRINT_STATS("hw_intr_name = %s ", hw_intr_name); + DP_PRINT_STATS("mask = %u", + dp_stats_buf->mask); + DP_PRINT_STATS("count = %u\n", + dp_stats_buf->count); + qdf_mem_free(hw_intr_name); +} + +/* + * dp_print_hw_stats_wd_timeout_tlv: display htt_hw_stats_wd_timeout_tlv + * @tag_buf: buffer containing the tlv htt_hw_stats_wd_timeout_tlv + * + * return:void + */ +static inline void dp_print_hw_stats_wd_timeout_tlv(uint32_t *tag_buf) +{ + htt_hw_stats_wd_timeout_tlv *dp_stats_buf = + (htt_hw_stats_wd_timeout_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *hw_module_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!hw_module_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_HW_STATS_WD_TIMEOUT_TLV:"); + for (i = 0; i < DP_HTT_HW_MODULE_NAME_LEN; i++) { + index += qdf_snprint(&hw_module_name[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->hw_module_name[i]); + } + DP_PRINT_STATS("hw_module_name = %s ", hw_module_name); + DP_PRINT_STATS("count = %u", + dp_stats_buf->count); + qdf_mem_free(hw_module_name); +} + +/* + * dp_print_hw_stats_pdev_errs_tlv: display htt_hw_stats_pdev_errs_tlv + * @tag_buf: buffer containing the tlv htt_hw_stats_pdev_errs_tlv + * + * return:void + */ +static inline void dp_print_hw_stats_pdev_errs_tlv(uint32_t *tag_buf) +{ + htt_hw_stats_pdev_errs_tlv *dp_stats_buf = + (htt_hw_stats_pdev_errs_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_HW_STATS_PDEV_ERRS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("tx_abort = %u", + dp_stats_buf->tx_abort); + DP_PRINT_STATS("tx_abort_fail_count = %u", + dp_stats_buf->tx_abort_fail_count); + DP_PRINT_STATS("rx_abort = %u", + dp_stats_buf->rx_abort); + DP_PRINT_STATS("rx_abort_fail_count = %u", + dp_stats_buf->rx_abort_fail_count); + DP_PRINT_STATS("warm_reset = %u", + dp_stats_buf->warm_reset); + DP_PRINT_STATS("cold_reset = %u", + dp_stats_buf->cold_reset); + DP_PRINT_STATS("tx_flush = %u", + dp_stats_buf->tx_flush); + DP_PRINT_STATS("tx_glb_reset = %u", + dp_stats_buf->tx_glb_reset); + DP_PRINT_STATS("tx_txq_reset = %u", + dp_stats_buf->tx_txq_reset); + DP_PRINT_STATS("rx_timeout_reset = %u\n", + dp_stats_buf->rx_timeout_reset); +} + +/* + * dp_print_msdu_flow_stats_tlv: display htt_msdu_flow_stats_tlv + * @tag_buf: buffer containing the tlv htt_msdu_flow_stats_tlv + * + * return:void + */ +static inline void dp_print_msdu_flow_stats_tlv(uint32_t *tag_buf) +{ + htt_msdu_flow_stats_tlv *dp_stats_buf = + (htt_msdu_flow_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_MSDU_FLOW_STATS_TLV:"); + DP_PRINT_STATS("last_update_timestamp = %u", + dp_stats_buf->last_update_timestamp); + DP_PRINT_STATS("last_add_timestamp = %u", + dp_stats_buf->last_add_timestamp); + DP_PRINT_STATS("last_remove_timestamp = %u", + dp_stats_buf->last_remove_timestamp); + DP_PRINT_STATS("total_processed_msdu_count = %u", + dp_stats_buf->total_processed_msdu_count); + DP_PRINT_STATS("cur_msdu_count_in_flowq = %u", + dp_stats_buf->cur_msdu_count_in_flowq); + DP_PRINT_STATS("sw_peer_id = %u", + dp_stats_buf->sw_peer_id); + DP_PRINT_STATS("tx_flow_no__tid_num__drop_rule = %u\n", + dp_stats_buf->tx_flow_no__tid_num__drop_rule); +} + +/* + * dp_print_tx_tid_stats_tlv: display htt_tx_tid_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tid_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_tid_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_tid_stats_tlv *dp_stats_buf = + (htt_tx_tid_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *tid_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!tid_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_TX_TID_STATS_TLV:"); + for (i = 0; i < DP_HTT_TID_NAME_LEN; i++) { + index += qdf_snprint(&tid_name[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tid_name[i]); + } + DP_PRINT_STATS("tid_name = %s ", tid_name); + DP_PRINT_STATS("sw_peer_id__tid_num = %u", + dp_stats_buf->sw_peer_id__tid_num); + DP_PRINT_STATS("num_sched_pending__num_ppdu_in_hwq = %u", + dp_stats_buf->num_sched_pending__num_ppdu_in_hwq); + DP_PRINT_STATS("tid_flags = %u", + dp_stats_buf->tid_flags); + DP_PRINT_STATS("hw_queued = %u", + dp_stats_buf->hw_queued); + DP_PRINT_STATS("hw_reaped = %u", + dp_stats_buf->hw_reaped); + DP_PRINT_STATS("mpdus_hw_filter = %u", + dp_stats_buf->mpdus_hw_filter); + DP_PRINT_STATS("qdepth_bytes = %u", + dp_stats_buf->qdepth_bytes); + DP_PRINT_STATS("qdepth_num_msdu = %u", + dp_stats_buf->qdepth_num_msdu); + DP_PRINT_STATS("qdepth_num_mpdu = %u", + dp_stats_buf->qdepth_num_mpdu); + DP_PRINT_STATS("last_scheduled_tsmp = %u", + dp_stats_buf->last_scheduled_tsmp); + DP_PRINT_STATS("pause_module_id = %u", + dp_stats_buf->pause_module_id); + DP_PRINT_STATS("block_module_id = %u\n", + dp_stats_buf->block_module_id); + DP_PRINT_STATS("tid_tx_airtime = %u\n", + dp_stats_buf->tid_tx_airtime); + qdf_mem_free(tid_name); +} + +/* + * dp_print_tx_tid_stats_v1_tlv: display htt_tx_tid_stats_v1_tlv + * @tag_buf: buffer containing the tlv htt_tx_tid_stats_v1_tlv + * + * return:void + */ +static inline void dp_print_tx_tid_stats_v1_tlv(uint32_t *tag_buf) +{ + htt_tx_tid_stats_v1_tlv *dp_stats_buf = + (htt_tx_tid_stats_v1_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *tid_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!tid_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_TX_TID_STATS_V1_TLV:"); + for (i = 0; i < DP_HTT_TID_NAME_LEN; i++) { + index += qdf_snprint(&tid_name[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tid_name[i]); + } + DP_PRINT_STATS("tid_name = %s ", tid_name); + DP_PRINT_STATS("sw_peer_id__tid_num = %u", + dp_stats_buf->sw_peer_id__tid_num); + DP_PRINT_STATS("num_sched_pending__num_ppdu_in_hwq = %u", + dp_stats_buf->num_sched_pending__num_ppdu_in_hwq); + DP_PRINT_STATS("tid_flags = %u", + dp_stats_buf->tid_flags); + DP_PRINT_STATS("max_qdepth_bytes = %u", + dp_stats_buf->max_qdepth_bytes); + DP_PRINT_STATS("max_qdepth_n_msdus = %u", + dp_stats_buf->max_qdepth_n_msdus); + DP_PRINT_STATS("rsvd = %u", + dp_stats_buf->rsvd); + DP_PRINT_STATS("qdepth_bytes = %u", + dp_stats_buf->qdepth_bytes); + DP_PRINT_STATS("qdepth_num_msdu = %u", + dp_stats_buf->qdepth_num_msdu); + DP_PRINT_STATS("qdepth_num_mpdu = %u", + dp_stats_buf->qdepth_num_mpdu); + DP_PRINT_STATS("last_scheduled_tsmp = %u", + dp_stats_buf->last_scheduled_tsmp); + DP_PRINT_STATS("pause_module_id = %u", + dp_stats_buf->pause_module_id); + DP_PRINT_STATS("block_module_id = %u\n", + dp_stats_buf->block_module_id); + DP_PRINT_STATS("tid_tx_airtime = %u\n", + dp_stats_buf->tid_tx_airtime); + qdf_mem_free(tid_name); +} + +/* + * dp_print_rx_tid_stats_tlv: display htt_rx_tid_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_tid_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_tid_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_tid_stats_tlv *dp_stats_buf = + (htt_rx_tid_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *tid_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!tid_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_RX_TID_STATS_TLV:"); + DP_PRINT_STATS("sw_peer_id__tid_num = %u", + dp_stats_buf->sw_peer_id__tid_num); + for (i = 0; i < DP_HTT_TID_NAME_LEN; i++) { + index += qdf_snprint(&tid_name[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tid_name[i]); + } + DP_PRINT_STATS("tid_name = %s ", tid_name); + DP_PRINT_STATS("dup_in_reorder = %u", + dp_stats_buf->dup_in_reorder); + DP_PRINT_STATS("dup_past_outside_window = %u", + dp_stats_buf->dup_past_outside_window); + DP_PRINT_STATS("dup_past_within_window = %u", + dp_stats_buf->dup_past_within_window); + DP_PRINT_STATS("rxdesc_err_decrypt = %u\n", + dp_stats_buf->rxdesc_err_decrypt); + qdf_mem_free(tid_name); +} + +/* + * dp_print_counter_tlv: display htt_counter_tlv + * @tag_buf: buffer containing the tlv htt_counter_tlv + * + * return:void + */ +static inline void dp_print_counter_tlv(uint32_t *tag_buf) +{ + htt_counter_tlv *dp_stats_buf = + (htt_counter_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *counter_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!counter_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_COUNTER_TLV:"); + for (i = 0; i < DP_HTT_COUNTER_NAME_LEN; i++) { + index += qdf_snprint(&counter_name[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->counter_name[i]); + } + DP_PRINT_STATS("counter_name = %s ", counter_name); + DP_PRINT_STATS("count = %u\n", + dp_stats_buf->count); + qdf_mem_free(counter_name); +} + +/* + * dp_print_peer_stats_cmn_tlv: display htt_peer_stats_cmn_tlv + * @tag_buf: buffer containing the tlv htt_peer_stats_cmn_tlv + * + * return:void + */ +static inline void dp_print_peer_stats_cmn_tlv(uint32_t *tag_buf) +{ + htt_peer_stats_cmn_tlv *dp_stats_buf = + (htt_peer_stats_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_PEER_STATS_CMN_TLV:"); + DP_PRINT_STATS("ppdu_cnt = %u", + dp_stats_buf->ppdu_cnt); + DP_PRINT_STATS("mpdu_cnt = %u", + dp_stats_buf->mpdu_cnt); + DP_PRINT_STATS("msdu_cnt = %u", + dp_stats_buf->msdu_cnt); + DP_PRINT_STATS("pause_bitmap = %u", + dp_stats_buf->pause_bitmap); + DP_PRINT_STATS("block_bitmap = %u", + dp_stats_buf->block_bitmap); + DP_PRINT_STATS("current_timestamp = %u\n", + dp_stats_buf->current_timestamp); + DP_PRINT_STATS("inactive_time = %u", + dp_stats_buf->inactive_time); +} + +/* + * dp_print_peer_details_tlv: display htt_peer_details_tlv + * @tag_buf: buffer containing the tlv htt_peer_details_tlv + * + * return:void + */ +static inline void dp_print_peer_details_tlv(uint32_t *tag_buf) +{ + htt_peer_details_tlv *dp_stats_buf = + (htt_peer_details_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_PEER_DETAILS_TLV:"); + DP_PRINT_STATS("peer_type = %u", + dp_stats_buf->peer_type); + DP_PRINT_STATS("sw_peer_id = %u", + dp_stats_buf->sw_peer_id); + DP_PRINT_STATS("vdev_pdev_ast_idx = %u", + dp_stats_buf->vdev_pdev_ast_idx); + DP_PRINT_STATS("mac_addr(upper 4 bytes) = %u", + dp_stats_buf->mac_addr.mac_addr31to0); + DP_PRINT_STATS("mac_addr(lower 2 bytes) = %u", + dp_stats_buf->mac_addr.mac_addr47to32); + DP_PRINT_STATS("peer_flags = %u", + dp_stats_buf->peer_flags); + DP_PRINT_STATS("qpeer_flags = %u\n", + dp_stats_buf->qpeer_flags); +} + +/* + * dp_print_tx_peer_rate_stats_tlv: display htt_tx_peer_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_peer_rate_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_peer_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_peer_rate_stats_tlv *dp_stats_buf = + (htt_tx_peer_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) + tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + + DP_PRINT_STATS("HTT_TX_PEER_RATE_STATS_TLV:"); + DP_PRINT_STATS("tx_ldpc = %u", + dp_stats_buf->tx_ldpc); + DP_PRINT_STATS("rts_cnt = %u", + dp_stats_buf->rts_cnt); + DP_PRINT_STATS("ack_rssi = %u", + dp_stats_buf->ack_rssi); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_mcs[i]); + } + DP_PRINT_STATS("tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_SU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_su_mcs[i]); + } + DP_PRINT_STATS("tx_su_mcs = %s ", str_buf); + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_MU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_mu_mcs[i]); + } + DP_PRINT_STATS("tx_mu_mcs = %s ", str_buf); + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", (i + 1), + dp_stats_buf->tx_nss[i]); + } + DP_PRINT_STATS("tx_nss = %s ", str_buf); + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_bw[i]); + } + DP_PRINT_STATS("tx_bw = %s ", str_buf); + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_stbc[i]); + } + DP_PRINT_STATS("tx_stbc = %s ", str_buf); + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + + for (i = 0; i < DP_HTT_TX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_pream[i]); + } + DP_PRINT_STATS("tx_pream = %s ", str_buf); + + for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + for (i = 0; i < HTT_TX_PEER_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->tx_gi[j][i]); + } + DP_PRINT_STATS("tx_gi[%u] = %s ", j, tx_gi[j]); + } + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_dcm[i]); + } + DP_PRINT_STATS("tx_dcm = %s\n", str_buf); + for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(tx_gi[i]); + + qdf_mem_free(str_buf); +} + +/* + * dp_print_rx_peer_rate_stats_tlv: display htt_rx_peer_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_peer_rate_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_peer_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_peer_rate_stats_tlv *dp_stats_buf = + (htt_rx_peer_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *rssi_chain[DP_HTT_PEER_NUM_SS]; + char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + for (i = 0; i < DP_HTT_PEER_NUM_SS; i++) + rssi_chain[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; i++) + rx_gi[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + + DP_PRINT_STATS("HTT_RX_PEER_RATE_STATS_TLV:"); + DP_PRINT_STATS("nsts = %u", + dp_stats_buf->nsts); + DP_PRINT_STATS("rx_ldpc = %u", + dp_stats_buf->rx_ldpc); + DP_PRINT_STATS("rts_cnt = %u", + dp_stats_buf->rts_cnt); + DP_PRINT_STATS("rssi_mgmt = %u", + dp_stats_buf->rssi_mgmt); + DP_PRINT_STATS("rssi_data = %u", + dp_stats_buf->rssi_data); + DP_PRINT_STATS("rssi_comb = %u", + dp_stats_buf->rssi_comb); + + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_mcs[i]); + } + DP_PRINT_STATS("rx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", (i + 1), + dp_stats_buf->rx_nss[i]); + } + DP_PRINT_STATS("rx_nss = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_dcm[i]); + } + DP_PRINT_STATS("rx_dcm = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_stbc[i]); + } + DP_PRINT_STATS("rx_stbc = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_bw[i]); + } + DP_PRINT_STATS("rx_bw = %s ", str_buf); + + for (j = 0; j < DP_HTT_PEER_NUM_SS; j++) { + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + index = 0; + for (i = 0; i < HTT_RX_PEER_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&rssi_chain[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->rssi_chain[j][i]); + } + DP_PRINT_STATS("rssi_chain[%u] = %s ", j, rssi_chain[j]); + } + + for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&rx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->rx_gi[j][i]); + } + DP_PRINT_STATS("rx_gi[%u] = %s ", j, rx_gi[j]); + } + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_pream[i]); + } + DP_PRINT_STATS("rx_pream = %s\n", str_buf); + + for (i = 0; i < DP_HTT_PEER_NUM_SS; i++) + qdf_mem_free(rssi_chain[i]); + for (i = 0; i < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(rx_gi[i]); + + qdf_mem_free(str_buf); +} + +/* + * dp_print_tx_hwq_mu_mimo_sch_stats_tlv: display htt_tx_hwq_mu_mimo_sch_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_mu_mimo_sch_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_mu_mimo_sch_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_mu_mimo_sch_stats_tlv *dp_stats_buf = + (htt_tx_hwq_mu_mimo_sch_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:"); + DP_PRINT_STATS("mu_mimo_sch_posted = %u", + dp_stats_buf->mu_mimo_sch_posted); + DP_PRINT_STATS("mu_mimo_sch_failed = %u", + dp_stats_buf->mu_mimo_sch_failed); + DP_PRINT_STATS("mu_mimo_ppdu_posted = %u\n", + dp_stats_buf->mu_mimo_ppdu_posted); +} + +/* + * dp_print_tx_hwq_mu_mimo_mpdu_stats_tlv: display htt_tx_hwq_mu_mimo_mpdu_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_mu_mimo_mpdu_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_mu_mimo_mpdu_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_mu_mimo_mpdu_stats_tlv *dp_stats_buf = + (htt_tx_hwq_mu_mimo_mpdu_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:"); + DP_PRINT_STATS("mu_mimo_mpdus_queued_usr = %u", + dp_stats_buf->mu_mimo_mpdus_queued_usr); + DP_PRINT_STATS("mu_mimo_mpdus_tried_usr = %u", + dp_stats_buf->mu_mimo_mpdus_tried_usr); + DP_PRINT_STATS("mu_mimo_mpdus_failed_usr = %u", + dp_stats_buf->mu_mimo_mpdus_failed_usr); + DP_PRINT_STATS("mu_mimo_mpdus_requeued_usr = %u", + dp_stats_buf->mu_mimo_mpdus_requeued_usr); + DP_PRINT_STATS("mu_mimo_err_no_ba_usr = %u", + dp_stats_buf->mu_mimo_err_no_ba_usr); + DP_PRINT_STATS("mu_mimo_mpdu_underrun_usr = %u", + dp_stats_buf->mu_mimo_mpdu_underrun_usr); + DP_PRINT_STATS("mu_mimo_ampdu_underrun_usr = %u\n", + dp_stats_buf->mu_mimo_ampdu_underrun_usr); +} + +/* + * dp_print_tx_hwq_mu_mimo_cmn_stats_tlv: display htt_tx_hwq_mu_mimo_cmn_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_mu_mimo_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_mu_mimo_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_mu_mimo_cmn_stats_tlv *dp_stats_buf = + (htt_tx_hwq_mu_mimo_cmn_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:"); + DP_PRINT_STATS("mac_id__hwq_id__word = %u\n", + dp_stats_buf->mac_id__hwq_id__word); +} + +/* + * dp_print_tx_hwq_stats_cmn_tlv: display htt_tx_hwq_stats_cmn_tlv + * @tag_buf: buffer containing the tlv htt_tx_hwq_stats_cmn_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_stats_cmn_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_stats_cmn_tlv *dp_stats_buf = + (htt_tx_hwq_stats_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_HWQ_STATS_CMN_TLV:"); + DP_PRINT_STATS("mac_id__hwq_id__word = %u", + dp_stats_buf->mac_id__hwq_id__word); + DP_PRINT_STATS("xretry = %u", + dp_stats_buf->xretry); + DP_PRINT_STATS("underrun_cnt = %u", + dp_stats_buf->underrun_cnt); + DP_PRINT_STATS("flush_cnt = %u", + dp_stats_buf->flush_cnt); + DP_PRINT_STATS("filt_cnt = %u", + dp_stats_buf->filt_cnt); + DP_PRINT_STATS("null_mpdu_bmap = %u", + dp_stats_buf->null_mpdu_bmap); + DP_PRINT_STATS("user_ack_failure = %u", + dp_stats_buf->user_ack_failure); + DP_PRINT_STATS("ack_tlv_proc = %u", + dp_stats_buf->ack_tlv_proc); + DP_PRINT_STATS("sched_id_proc = %u", + dp_stats_buf->sched_id_proc); + DP_PRINT_STATS("null_mpdu_tx_count = %u", + dp_stats_buf->null_mpdu_tx_count); + DP_PRINT_STATS("mpdu_bmap_not_recvd = %u", + dp_stats_buf->mpdu_bmap_not_recvd); + DP_PRINT_STATS("num_bar = %u", + dp_stats_buf->num_bar); + DP_PRINT_STATS("rts = %u", + dp_stats_buf->rts); + DP_PRINT_STATS("cts2self = %u", + dp_stats_buf->cts2self); + DP_PRINT_STATS("qos_null = %u", + dp_stats_buf->qos_null); + DP_PRINT_STATS("mpdu_tried_cnt = %u", + dp_stats_buf->mpdu_tried_cnt); + DP_PRINT_STATS("mpdu_queued_cnt = %u", + dp_stats_buf->mpdu_queued_cnt); + DP_PRINT_STATS("mpdu_ack_fail_cnt = %u", + dp_stats_buf->mpdu_ack_fail_cnt); + DP_PRINT_STATS("mpdu_filt_cnt = %u", + dp_stats_buf->mpdu_filt_cnt); + DP_PRINT_STATS("false_mpdu_ack_count = %u\n", + dp_stats_buf->false_mpdu_ack_count); +} + +/* + * dp_print_tx_hwq_difs_latency_stats_tlv_v: display + * htt_tx_hwq_difs_latency_stats_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_hwq_difs_latency_stats_tlv_v + * + *return:void + */ +static inline void dp_print_tx_hwq_difs_latency_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_difs_latency_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_difs_latency_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *difs_latency_hist = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!difs_latency_hist) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS); + + DP_PRINT_STATS("HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:"); + DP_PRINT_STATS("hist_intvl = %u", + dp_stats_buf->hist_intvl); + + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&difs_latency_hist[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->difs_latency_hist[i]); + } + DP_PRINT_STATS("difs_latency_hist = %s\n", difs_latency_hist); + qdf_mem_free(difs_latency_hist); +} + +/* + * dp_print_tx_hwq_cmd_result_stats_tlv_v: display htt_tx_hwq_cmd_result_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_cmd_result_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_hwq_cmd_result_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_cmd_result_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_cmd_result_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *cmd_result = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!cmd_result) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_HWQ_MAX_CMD_RESULT_STATS); + + DP_PRINT_STATS("HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&cmd_result[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->cmd_result[i]); + } + DP_PRINT_STATS("cmd_result = %s ", cmd_result); + qdf_mem_free(cmd_result); +} + +/* + * dp_print_tx_hwq_cmd_stall_stats_tlv_v: display htt_tx_hwq_cmd_stall_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_hwq_cmd_stall_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_hwq_cmd_stall_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_cmd_stall_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_cmd_stall_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *cmd_stall_status = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!cmd_stall_status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_HWQ_MAX_CMD_STALL_STATS); + + DP_PRINT_STATS("HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&cmd_stall_status[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->cmd_stall_status[i]); + } + DP_PRINT_STATS("cmd_stall_status = %s\n", cmd_stall_status); + qdf_mem_free(cmd_stall_status); +} + +/* + * dp_print_tx_hwq_fes_result_stats_tlv_v: display htt_tx_hwq_fes_result_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_fes_result_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_hwq_fes_result_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_fes_result_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_fes_result_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *fes_result = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!fes_result) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_HWQ_MAX_FES_RESULT_STATS); + + DP_PRINT_STATS("HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&fes_result[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->fes_result[i]); + } + DP_PRINT_STATS("fes_result = %s ", fes_result); + qdf_mem_free(fes_result); +} + +/* + * dp_print_tx_selfgen_cmn_stats_tlv: display htt_tx_selfgen_cmn_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_cmn_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_cmn_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_SELFGEN_CMN_STATS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("su_bar = %u", + dp_stats_buf->su_bar); + DP_PRINT_STATS("rts = %u", + dp_stats_buf->rts); + DP_PRINT_STATS("cts2self = %u", + dp_stats_buf->cts2self); + DP_PRINT_STATS("qos_null = %u", + dp_stats_buf->qos_null); + DP_PRINT_STATS("delayed_bar_1 = %u", + dp_stats_buf->delayed_bar_1); + DP_PRINT_STATS("delayed_bar_2 = %u", + dp_stats_buf->delayed_bar_2); + DP_PRINT_STATS("delayed_bar_3 = %u", + dp_stats_buf->delayed_bar_3); + DP_PRINT_STATS("delayed_bar_4 = %u", + dp_stats_buf->delayed_bar_4); + DP_PRINT_STATS("delayed_bar_5 = %u", + dp_stats_buf->delayed_bar_5); + DP_PRINT_STATS("delayed_bar_6 = %u", + dp_stats_buf->delayed_bar_6); + DP_PRINT_STATS("delayed_bar_7 = %u\n", + dp_stats_buf->delayed_bar_7); +} + +/* + * dp_print_tx_selfgen_ac_stats_tlv: display htt_tx_selfgen_ac_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ac_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ac_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ac_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ac_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_SELFGEN_AC_STATS_TLV:"); + DP_PRINT_STATS("ac_su_ndpa = %u", + dp_stats_buf->ac_su_ndpa); + DP_PRINT_STATS("ac_su_ndp = %u", + dp_stats_buf->ac_su_ndp); + DP_PRINT_STATS("ac_mu_mimo_ndpa = %u", + dp_stats_buf->ac_mu_mimo_ndpa); + DP_PRINT_STATS("ac_mu_mimo_ndp = %u", + dp_stats_buf->ac_mu_mimo_ndp); + DP_PRINT_STATS("ac_mu_mimo_brpoll_1 = %u", + dp_stats_buf->ac_mu_mimo_brpoll_1); + DP_PRINT_STATS("ac_mu_mimo_brpoll_2 = %u", + dp_stats_buf->ac_mu_mimo_brpoll_2); + DP_PRINT_STATS("ac_mu_mimo_brpoll_3 = %u\n", + dp_stats_buf->ac_mu_mimo_brpoll_3); +} + +/* + * dp_print_tx_selfgen_ax_stats_tlv: display htt_tx_selfgen_ax_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ax_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ax_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ax_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ax_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_SELFGEN_AX_STATS_TLV:"); + DP_PRINT_STATS("ax_su_ndpa = %u", + dp_stats_buf->ax_su_ndpa); + DP_PRINT_STATS("ax_su_ndp = %u", + dp_stats_buf->ax_su_ndp); + DP_PRINT_STATS("ax_mu_mimo_ndpa = %u", + dp_stats_buf->ax_mu_mimo_ndpa); + DP_PRINT_STATS("ax_mu_mimo_ndp = %u", + dp_stats_buf->ax_mu_mimo_ndp); + DP_PRINT_STATS("ax_mu_mimo_brpoll_1 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_1); + DP_PRINT_STATS("ax_mu_mimo_brpoll_2 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_2); + DP_PRINT_STATS("ax_mu_mimo_brpoll_3 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_3); + DP_PRINT_STATS("ax_mu_mimo_brpoll_4 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_4); + DP_PRINT_STATS("ax_mu_mimo_brpoll_5 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_5); + DP_PRINT_STATS("ax_mu_mimo_brpoll_6 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_6); + DP_PRINT_STATS("ax_mu_mimo_brpoll_7 = %u", + dp_stats_buf->ax_mu_mimo_brpoll_7); + DP_PRINT_STATS("ax_basic_trigger = %u", + dp_stats_buf->ax_basic_trigger); + DP_PRINT_STATS("ax_bsr_trigger = %u", + dp_stats_buf->ax_bsr_trigger); + DP_PRINT_STATS("ax_mu_bar_trigger = %u", + dp_stats_buf->ax_mu_bar_trigger); + DP_PRINT_STATS("ax_mu_rts_trigger = %u\n", + dp_stats_buf->ax_mu_rts_trigger); +} + +/* + * dp_print_tx_selfgen_ac_err_stats_tlv: display htt_tx_selfgen_ac_err_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ac_err_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ac_err_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ac_err_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ac_err_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_SELFGEN_AC_ERR_STATS_TLV:"); + DP_PRINT_STATS("ac_su_ndp_err = %u", + dp_stats_buf->ac_su_ndp_err); + DP_PRINT_STATS("ac_su_ndpa_err = %u", + dp_stats_buf->ac_su_ndpa_err); + DP_PRINT_STATS("ac_mu_mimo_ndpa_err = %u", + dp_stats_buf->ac_mu_mimo_ndpa_err); + DP_PRINT_STATS("ac_mu_mimo_ndp_err = %u", + dp_stats_buf->ac_mu_mimo_ndp_err); + DP_PRINT_STATS("ac_mu_mimo_brp1_err = %u", + dp_stats_buf->ac_mu_mimo_brp1_err); + DP_PRINT_STATS("ac_mu_mimo_brp2_err = %u", + dp_stats_buf->ac_mu_mimo_brp2_err); + DP_PRINT_STATS("ac_mu_mimo_brp3_err = %u\n", + dp_stats_buf->ac_mu_mimo_brp3_err); +} + +/* + * dp_print_tx_selfgen_ax_err_stats_tlv: display htt_tx_selfgen_ax_err_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ax_err_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ax_err_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ax_err_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ax_err_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_SELFGEN_AX_ERR_STATS_TLV:"); + DP_PRINT_STATS("ax_su_ndp_err = %u", + dp_stats_buf->ax_su_ndp_err); + DP_PRINT_STATS("ax_su_ndpa_err = %u", + dp_stats_buf->ax_su_ndpa_err); + DP_PRINT_STATS("ax_mu_mimo_ndpa_err = %u", + dp_stats_buf->ax_mu_mimo_ndpa_err); + DP_PRINT_STATS("ax_mu_mimo_ndp_err = %u", + dp_stats_buf->ax_mu_mimo_ndp_err); + DP_PRINT_STATS("ax_mu_mimo_brp1_err = %u", + dp_stats_buf->ax_mu_mimo_brp1_err); + DP_PRINT_STATS("ax_mu_mimo_brp2_err = %u", + dp_stats_buf->ax_mu_mimo_brp2_err); + DP_PRINT_STATS("ax_mu_mimo_brp3_err = %u", + dp_stats_buf->ax_mu_mimo_brp3_err); + DP_PRINT_STATS("ax_mu_mimo_brp4_err = %u", + dp_stats_buf->ax_mu_mimo_brp4_err); + DP_PRINT_STATS("ax_mu_mimo_brp5_err = %u", + dp_stats_buf->ax_mu_mimo_brp5_err); + DP_PRINT_STATS("ax_mu_mimo_brp6_err = %u", + dp_stats_buf->ax_mu_mimo_brp6_err); + DP_PRINT_STATS("ax_mu_mimo_brp7_err = %u", + dp_stats_buf->ax_mu_mimo_brp7_err); + DP_PRINT_STATS("ax_basic_trigger_err = %u", + dp_stats_buf->ax_basic_trigger_err); + DP_PRINT_STATS("ax_bsr_trigger_err = %u", + dp_stats_buf->ax_bsr_trigger_err); + DP_PRINT_STATS("ax_mu_bar_trigger_err = %u", + dp_stats_buf->ax_mu_bar_trigger_err); + DP_PRINT_STATS("ax_mu_rts_trigger_err = %u\n", + dp_stats_buf->ax_mu_rts_trigger_err); +} + +/* + * dp_print_tx_pdev_mu_mimo_sch_stats_tlv: display htt_tx_pdev_mu_mimo_sch_stats + * @tag_buf: buffer containing the tlv htt_tx_pdev_mu_mimo_sch_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_mu_mimo_sch_stats_tlv(uint32_t *tag_buf) +{ + uint8_t i; + htt_tx_pdev_mu_mimo_sch_stats_tlv *dp_stats_buf = + (htt_tx_pdev_mu_mimo_sch_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:"); + DP_PRINT_STATS("mu_mimo_sch_posted = %u", + dp_stats_buf->mu_mimo_sch_posted); + DP_PRINT_STATS("mu_mimo_sch_failed = %u", + dp_stats_buf->mu_mimo_sch_failed); + DP_PRINT_STATS("mu_mimo_ppdu_posted = %u\n", + dp_stats_buf->mu_mimo_ppdu_posted); + + DP_PRINT_STATS("11ac MU_MIMO SCH STATS:"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) { + DP_PRINT_STATS("ac_mu_mimo_sch_nusers_%u = %u", i, + dp_stats_buf->ac_mu_mimo_sch_nusers[i]); + } + + DP_PRINT_STATS("\n11ax MU_MIMO SCH STATS:"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) { + DP_PRINT_STATS("ax_mu_mimo_sch_nusers_%u = %u", i, + dp_stats_buf->ax_mu_mimo_sch_nusers[i]); + } + + DP_PRINT_STATS("\n11ax OFDMA SCH STATS:"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) { + DP_PRINT_STATS("ax_ofdma_sch_nusers_%u = %u", i, + dp_stats_buf->ax_ofdma_sch_nusers[i]); + } +} + +/* + * dp_print_tx_pdev_mu_mimo_mpdu_stats_tlv: display + * htt_tx_pdev_mu_mimo_mpdu_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_mu_mimo_mpdu_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_mu_mimo_mpdu_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_mpdu_stats_tlv *dp_stats_buf = + (htt_tx_pdev_mpdu_stats_tlv *)tag_buf; + + if (dp_stats_buf->tx_sched_mode == + HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) { + if (!dp_stats_buf->user_index) + DP_PRINT_STATS( + "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n"); + + if (dp_stats_buf->user_index < + HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) { + DP_PRINT_STATS( + "ac_mu_mimo_mpdus_queued_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_queued_usr); + DP_PRINT_STATS( + "ac_mu_mimo_mpdus_tried_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_tried_usr); + DP_PRINT_STATS( + "ac_mu_mimo_mpdus_failed_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_failed_usr); + DP_PRINT_STATS( + "ac_mu_mimo_mpdus_requeued_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_requeued_usr); + DP_PRINT_STATS( + "ac_mu_mimo_err_no_ba_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->err_no_ba_usr); + DP_PRINT_STATS( + "ac_mu_mimo_mpdu_underrun_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdu_underrun_usr); + DP_PRINT_STATS( + "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n", + dp_stats_buf->user_index, + dp_stats_buf->ampdu_underrun_usr); + } + } + + if (dp_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) { + if (!dp_stats_buf->user_index) + DP_PRINT_STATS( + "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n"); + + if (dp_stats_buf->user_index < + HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) { + DP_PRINT_STATS( + "ax_mu_mimo_mpdus_queued_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_queued_usr); + DP_PRINT_STATS( + "ax_mu_mimo_mpdus_tried_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_tried_usr); + DP_PRINT_STATS( + "ax_mu_mimo_mpdus_failed_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_failed_usr); + DP_PRINT_STATS( + "ax_mu_mimo_mpdus_requeued_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_requeued_usr); + DP_PRINT_STATS( + "ax_mu_mimo_err_no_ba_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->err_no_ba_usr); + DP_PRINT_STATS( + "ax_mu_mimo_mpdu_underrun_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdu_underrun_usr); + DP_PRINT_STATS( + "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n", + dp_stats_buf->user_index, + dp_stats_buf->ampdu_underrun_usr); + } + } + + if (dp_stats_buf->tx_sched_mode == + HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) { + if (!dp_stats_buf->user_index) + DP_PRINT_STATS( + "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n"); + + if (dp_stats_buf->user_index < + HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) { + DP_PRINT_STATS( + "ax_mu_ofdma_mpdus_queued_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_queued_usr); + DP_PRINT_STATS( + "ax_mu_ofdma_mpdus_tried_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_tried_usr); + DP_PRINT_STATS( + "ax_mu_ofdma_mpdus_failed_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_failed_usr); + DP_PRINT_STATS( + "ax_mu_ofdma_mpdus_requeued_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdus_requeued_usr); + DP_PRINT_STATS( + "ax_mu_ofdma_err_no_ba_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->err_no_ba_usr); + DP_PRINT_STATS( + "ax_mu_ofdma_mpdu_underrun_usr_%u = %u", + dp_stats_buf->user_index, + dp_stats_buf->mpdu_underrun_usr); + DP_PRINT_STATS( + "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n", + dp_stats_buf->user_index, + dp_stats_buf->ampdu_underrun_usr); + } + } +} + +/* + * dp_print_sched_txq_cmd_posted_tlv_v: display htt_sched_txq_cmd_posted_tlv_v + * @tag_buf: buffer containing the tlv htt_sched_txq_cmd_posted_tlv_v + * + * return:void + */ +static inline void dp_print_sched_txq_cmd_posted_tlv_v(uint32_t *tag_buf) +{ + htt_sched_txq_cmd_posted_tlv_v *dp_stats_buf = + (htt_sched_txq_cmd_posted_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *sched_cmd_posted = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!sched_cmd_posted) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_SCHED_TX_MODE_MAX); + + DP_PRINT_STATS("HTT_SCHED_TXQ_CMD_POSTED_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&sched_cmd_posted[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->sched_cmd_posted[i]); + } + DP_PRINT_STATS("sched_cmd_posted = %s\n", sched_cmd_posted); + qdf_mem_free(sched_cmd_posted); +} + +/* + * dp_print_sched_txq_cmd_reaped_tlv_v: display htt_sched_txq_cmd_reaped_tlv_v + * @tag_buf: buffer containing the tlv htt_sched_txq_cmd_reaped_tlv_v + * + * return:void + */ +static inline void dp_print_sched_txq_cmd_reaped_tlv_v(uint32_t *tag_buf) +{ + htt_sched_txq_cmd_reaped_tlv_v *dp_stats_buf = + (htt_sched_txq_cmd_reaped_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *sched_cmd_reaped = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!sched_cmd_reaped) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_SCHED_TX_MODE_MAX); + + DP_PRINT_STATS("HTT_SCHED_TXQ_CMD_REAPED_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&sched_cmd_reaped[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->sched_cmd_reaped[i]); + } + DP_PRINT_STATS("sched_cmd_reaped = %s\n", sched_cmd_reaped); + qdf_mem_free(sched_cmd_reaped); +} + +/* + * dp_print_tx_pdev_stats_sched_per_txq_tlv: display + * htt_tx_pdev_stats_sched_per_txq_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_sched_per_txq_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_sched_per_txq_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_sched_per_txq_tlv *dp_stats_buf = + (htt_tx_pdev_stats_sched_per_txq_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:"); + DP_PRINT_STATS("mac_id__txq_id__word = %u", + dp_stats_buf->mac_id__txq_id__word); + DP_PRINT_STATS("sched_policy = %u", + dp_stats_buf->sched_policy); + DP_PRINT_STATS("last_sched_cmd_posted_timestamp = %u", + dp_stats_buf->last_sched_cmd_posted_timestamp); + DP_PRINT_STATS("last_sched_cmd_compl_timestamp = %u", + dp_stats_buf->last_sched_cmd_compl_timestamp); + DP_PRINT_STATS("sched_2_tac_lwm_count = %u", + dp_stats_buf->sched_2_tac_lwm_count); + DP_PRINT_STATS("sched_2_tac_ring_full = %u", + dp_stats_buf->sched_2_tac_ring_full); + DP_PRINT_STATS("sched_cmd_post_failure = %u", + dp_stats_buf->sched_cmd_post_failure); + DP_PRINT_STATS("num_active_tids = %u", + dp_stats_buf->num_active_tids); + DP_PRINT_STATS("num_ps_schedules = %u", + dp_stats_buf->num_ps_schedules); + DP_PRINT_STATS("sched_cmds_pending = %u", + dp_stats_buf->sched_cmds_pending); + DP_PRINT_STATS("num_tid_register = %u", + dp_stats_buf->num_tid_register); + DP_PRINT_STATS("num_tid_unregister = %u", + dp_stats_buf->num_tid_unregister); + DP_PRINT_STATS("num_qstats_queried = %u", + dp_stats_buf->num_qstats_queried); + DP_PRINT_STATS("qstats_update_pending = %u", + dp_stats_buf->qstats_update_pending); + DP_PRINT_STATS("last_qstats_query_timestamp = %u", + dp_stats_buf->last_qstats_query_timestamp); + DP_PRINT_STATS("num_tqm_cmdq_full = %u", + dp_stats_buf->num_tqm_cmdq_full); + DP_PRINT_STATS("num_de_sched_algo_trigger = %u", + dp_stats_buf->num_de_sched_algo_trigger); + DP_PRINT_STATS("num_rt_sched_algo_trigger = %u", + dp_stats_buf->num_rt_sched_algo_trigger); + DP_PRINT_STATS("num_tqm_sched_algo_trigger = %u", + dp_stats_buf->num_tqm_sched_algo_trigger); + DP_PRINT_STATS("notify_sched = %u\n", + dp_stats_buf->notify_sched); +} + +/* + * dp_print_stats_tx_sched_cmn_tlv: display htt_stats_tx_sched_cmn_tlv + * @tag_buf: buffer containing the tlv htt_stats_tx_sched_cmn_tlv + * + * return:void + */ +static inline void dp_print_stats_tx_sched_cmn_tlv(uint32_t *tag_buf) +{ + htt_stats_tx_sched_cmn_tlv *dp_stats_buf = + (htt_stats_tx_sched_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_STATS_TX_SCHED_CMN_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("current_timestamp = %u\n", + dp_stats_buf->current_timestamp); +} + +/* + * dp_print_tx_tqm_gen_mpdu_stats_tlv_v: display htt_tx_tqm_gen_mpdu_stats_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_tqm_gen_mpdu_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_gen_mpdu_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_gen_mpdu_stats_tlv_v *dp_stats_buf = + (htt_tx_tqm_gen_mpdu_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *gen_mpdu_end_reason = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!gen_mpdu_end_reason) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_TQM_MAX_GEN_MPDU_END_REASON); + + DP_PRINT_STATS("HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&gen_mpdu_end_reason[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->gen_mpdu_end_reason[i]); + } + DP_PRINT_STATS("gen_mpdu_end_reason = %s\n", gen_mpdu_end_reason); + qdf_mem_free(gen_mpdu_end_reason); +} + +/* + * dp_print_tx_tqm_list_mpdu_stats_tlv_v: display htt_tx_tqm_list_mpdu_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_list_mpdu_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_list_mpdu_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_list_mpdu_stats_tlv_v *dp_stats_buf = + (htt_tx_tqm_list_mpdu_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *list_mpdu_end_reason = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!list_mpdu_end_reason) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); + + DP_PRINT_STATS("HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&list_mpdu_end_reason[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->list_mpdu_end_reason[i]); + } + DP_PRINT_STATS("list_mpdu_end_reason = %s\n", + list_mpdu_end_reason); + qdf_mem_free(list_mpdu_end_reason); +} + +/* + * dp_print_tx_tqm_list_mpdu_cnt_tlv_v: display htt_tx_tqm_list_mpdu_cnt_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_tqm_list_mpdu_cnt_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_list_mpdu_cnt_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_list_mpdu_cnt_tlv_v *dp_stats_buf = + (htt_tx_tqm_list_mpdu_cnt_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *list_mpdu_cnt_hist = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!list_mpdu_cnt_hist) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); + + DP_PRINT_STATS("HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&list_mpdu_cnt_hist[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->list_mpdu_cnt_hist[i]); + } + DP_PRINT_STATS("list_mpdu_cnt_hist = %s\n", list_mpdu_cnt_hist); + qdf_mem_free(list_mpdu_cnt_hist); +} + +/* + * dp_print_tx_tqm_pdev_stats_tlv_v: display htt_tx_tqm_pdev_stats_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_tqm_pdev_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_pdev_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_pdev_stats_tlv_v *dp_stats_buf = + (htt_tx_tqm_pdev_stats_tlv_v *)tag_buf; + + DP_PRINT_STATS("HTT_TX_TQM_PDEV_STATS_TLV_V:"); + DP_PRINT_STATS("msdu_count = %u", + dp_stats_buf->msdu_count); + DP_PRINT_STATS("mpdu_count = %u", + dp_stats_buf->mpdu_count); + DP_PRINT_STATS("remove_msdu = %u", + dp_stats_buf->remove_msdu); + DP_PRINT_STATS("remove_mpdu = %u", + dp_stats_buf->remove_mpdu); + DP_PRINT_STATS("remove_msdu_ttl = %u", + dp_stats_buf->remove_msdu_ttl); + DP_PRINT_STATS("send_bar = %u", + dp_stats_buf->send_bar); + DP_PRINT_STATS("bar_sync = %u", + dp_stats_buf->bar_sync); + DP_PRINT_STATS("notify_mpdu = %u", + dp_stats_buf->notify_mpdu); + DP_PRINT_STATS("sync_cmd = %u", + dp_stats_buf->sync_cmd); + DP_PRINT_STATS("write_cmd = %u", + dp_stats_buf->write_cmd); + DP_PRINT_STATS("hwsch_trigger = %u", + dp_stats_buf->hwsch_trigger); + DP_PRINT_STATS("ack_tlv_proc = %u", + dp_stats_buf->ack_tlv_proc); + DP_PRINT_STATS("gen_mpdu_cmd = %u", + dp_stats_buf->gen_mpdu_cmd); + DP_PRINT_STATS("gen_list_cmd = %u", + dp_stats_buf->gen_list_cmd); + DP_PRINT_STATS("remove_mpdu_cmd = %u", + dp_stats_buf->remove_mpdu_cmd); + DP_PRINT_STATS("remove_mpdu_tried_cmd = %u", + dp_stats_buf->remove_mpdu_tried_cmd); + DP_PRINT_STATS("mpdu_queue_stats_cmd = %u", + dp_stats_buf->mpdu_queue_stats_cmd); + DP_PRINT_STATS("mpdu_head_info_cmd = %u", + dp_stats_buf->mpdu_head_info_cmd); + DP_PRINT_STATS("msdu_flow_stats_cmd = %u", + dp_stats_buf->msdu_flow_stats_cmd); + DP_PRINT_STATS("remove_msdu_cmd = %u", + dp_stats_buf->remove_msdu_cmd); + DP_PRINT_STATS("remove_msdu_ttl_cmd = %u", + dp_stats_buf->remove_msdu_ttl_cmd); + DP_PRINT_STATS("flush_cache_cmd = %u", + dp_stats_buf->flush_cache_cmd); + DP_PRINT_STATS("update_mpduq_cmd = %u", + dp_stats_buf->update_mpduq_cmd); + DP_PRINT_STATS("enqueue = %u", + dp_stats_buf->enqueue); + DP_PRINT_STATS("enqueue_notify = %u", + dp_stats_buf->enqueue_notify); + DP_PRINT_STATS("notify_mpdu_at_head = %u", + dp_stats_buf->notify_mpdu_at_head); + DP_PRINT_STATS("notify_mpdu_state_valid = %u\n", + dp_stats_buf->notify_mpdu_state_valid); +} + +/* + * dp_print_tx_tqm_cmn_stats_tlv: display htt_tx_tqm_cmn_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_tqm_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_tqm_cmn_stats_tlv *dp_stats_buf = + (htt_tx_tqm_cmn_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_TQM_CMN_STATS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("max_cmdq_id = %u", + dp_stats_buf->max_cmdq_id); + DP_PRINT_STATS("list_mpdu_cnt_hist_intvl = %u", + dp_stats_buf->list_mpdu_cnt_hist_intvl); + DP_PRINT_STATS("add_msdu = %u", + dp_stats_buf->add_msdu); + DP_PRINT_STATS("q_empty = %u", + dp_stats_buf->q_empty); + DP_PRINT_STATS("q_not_empty = %u", + dp_stats_buf->q_not_empty); + DP_PRINT_STATS("drop_notification = %u", + dp_stats_buf->drop_notification); + DP_PRINT_STATS("desc_threshold = %u\n", + dp_stats_buf->desc_threshold); +} + +/* + * dp_print_tx_tqm_error_stats_tlv: display htt_tx_tqm_error_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_error_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_tqm_error_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_tqm_error_stats_tlv *dp_stats_buf = + (htt_tx_tqm_error_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_TQM_ERROR_STATS_TLV:"); + DP_PRINT_STATS("q_empty_failure = %u", + dp_stats_buf->q_empty_failure); + DP_PRINT_STATS("q_not_empty_failure = %u", + dp_stats_buf->q_not_empty_failure); + DP_PRINT_STATS("add_msdu_failure = %u\n", + dp_stats_buf->add_msdu_failure); +} + +/* + * dp_print_tx_tqm_cmdq_status_tlv: display htt_tx_tqm_cmdq_status_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_cmdq_status_tlv + * + * return:void + */ +static inline void dp_print_tx_tqm_cmdq_status_tlv(uint32_t *tag_buf) +{ + htt_tx_tqm_cmdq_status_tlv *dp_stats_buf = + (htt_tx_tqm_cmdq_status_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_TQM_CMDQ_STATUS_TLV:"); + DP_PRINT_STATS("mac_id__cmdq_id__word = %u", + dp_stats_buf->mac_id__cmdq_id__word); + DP_PRINT_STATS("sync_cmd = %u", + dp_stats_buf->sync_cmd); + DP_PRINT_STATS("write_cmd = %u", + dp_stats_buf->write_cmd); + DP_PRINT_STATS("gen_mpdu_cmd = %u", + dp_stats_buf->gen_mpdu_cmd); + DP_PRINT_STATS("mpdu_queue_stats_cmd = %u", + dp_stats_buf->mpdu_queue_stats_cmd); + DP_PRINT_STATS("mpdu_head_info_cmd = %u", + dp_stats_buf->mpdu_head_info_cmd); + DP_PRINT_STATS("msdu_flow_stats_cmd = %u", + dp_stats_buf->msdu_flow_stats_cmd); + DP_PRINT_STATS("remove_mpdu_cmd = %u", + dp_stats_buf->remove_mpdu_cmd); + DP_PRINT_STATS("remove_msdu_cmd = %u", + dp_stats_buf->remove_msdu_cmd); + DP_PRINT_STATS("flush_cache_cmd = %u", + dp_stats_buf->flush_cache_cmd); + DP_PRINT_STATS("update_mpduq_cmd = %u", + dp_stats_buf->update_mpduq_cmd); + DP_PRINT_STATS("update_msduq_cmd = %u\n", + dp_stats_buf->update_msduq_cmd); +} + +/* + * dp_print_tx_de_eapol_packets_stats_tlv: display htt_tx_de_eapol_packets_stats + * @tag_buf: buffer containing the tlv htt_tx_de_eapol_packets_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_eapol_packets_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_eapol_packets_stats_tlv *dp_stats_buf = + (htt_tx_de_eapol_packets_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:"); + DP_PRINT_STATS("m1_packets = %u", + dp_stats_buf->m1_packets); + DP_PRINT_STATS("m2_packets = %u", + dp_stats_buf->m2_packets); + DP_PRINT_STATS("m3_packets = %u", + dp_stats_buf->m3_packets); + DP_PRINT_STATS("m4_packets = %u", + dp_stats_buf->m4_packets); + DP_PRINT_STATS("g1_packets = %u", + dp_stats_buf->g1_packets); + DP_PRINT_STATS("g2_packets = %u\n", + dp_stats_buf->g2_packets); +} + +/* + * dp_print_tx_de_classify_failed_stats_tlv: display + * htt_tx_de_classify_failed_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_classify_failed_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_classify_failed_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_classify_failed_stats_tlv *dp_stats_buf = + (htt_tx_de_classify_failed_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:"); + DP_PRINT_STATS("ap_bss_peer_not_found = %u", + dp_stats_buf->ap_bss_peer_not_found); + DP_PRINT_STATS("ap_bcast_mcast_no_peer = %u", + dp_stats_buf->ap_bcast_mcast_no_peer); + DP_PRINT_STATS("sta_delete_in_progress = %u", + dp_stats_buf->sta_delete_in_progress); + DP_PRINT_STATS("ibss_no_bss_peer = %u", + dp_stats_buf->ibss_no_bss_peer); + DP_PRINT_STATS("invaild_vdev_type = %u", + dp_stats_buf->invaild_vdev_type); + DP_PRINT_STATS("invalid_ast_peer_entry = %u", + dp_stats_buf->invalid_ast_peer_entry); + DP_PRINT_STATS("peer_entry_invalid = %u", + dp_stats_buf->peer_entry_invalid); + DP_PRINT_STATS("ethertype_not_ip = %u", + dp_stats_buf->ethertype_not_ip); + DP_PRINT_STATS("eapol_lookup_failed = %u", + dp_stats_buf->eapol_lookup_failed); + DP_PRINT_STATS("qpeer_not_allow_data = %u", + dp_stats_buf->qpeer_not_allow_data); + DP_PRINT_STATS("fse_tid_override = %u\n", + dp_stats_buf->fse_tid_override); +} + +/* + * dp_print_tx_de_classify_stats_tlv: display htt_tx_de_classify_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_classify_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_classify_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_classify_stats_tlv *dp_stats_buf = + (htt_tx_de_classify_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_CLASSIFY_STATS_TLV:"); + DP_PRINT_STATS("arp_packets = %u", + dp_stats_buf->arp_packets); + DP_PRINT_STATS("igmp_packets = %u", + dp_stats_buf->igmp_packets); + DP_PRINT_STATS("dhcp_packets = %u", + dp_stats_buf->dhcp_packets); + DP_PRINT_STATS("host_inspected = %u", + dp_stats_buf->host_inspected); + DP_PRINT_STATS("htt_included = %u", + dp_stats_buf->htt_included); + DP_PRINT_STATS("htt_valid_mcs = %u", + dp_stats_buf->htt_valid_mcs); + DP_PRINT_STATS("htt_valid_nss = %u", + dp_stats_buf->htt_valid_nss); + DP_PRINT_STATS("htt_valid_preamble_type = %u", + dp_stats_buf->htt_valid_preamble_type); + DP_PRINT_STATS("htt_valid_chainmask = %u", + dp_stats_buf->htt_valid_chainmask); + DP_PRINT_STATS("htt_valid_guard_interval = %u", + dp_stats_buf->htt_valid_guard_interval); + DP_PRINT_STATS("htt_valid_retries = %u", + dp_stats_buf->htt_valid_retries); + DP_PRINT_STATS("htt_valid_bw_info = %u", + dp_stats_buf->htt_valid_bw_info); + DP_PRINT_STATS("htt_valid_power = %u", + dp_stats_buf->htt_valid_power); + DP_PRINT_STATS("htt_valid_key_flags = %u", + dp_stats_buf->htt_valid_key_flags); + DP_PRINT_STATS("htt_valid_no_encryption = %u", + dp_stats_buf->htt_valid_no_encryption); + DP_PRINT_STATS("fse_entry_count = %u", + dp_stats_buf->fse_entry_count); + DP_PRINT_STATS("fse_priority_be = %u", + dp_stats_buf->fse_priority_be); + DP_PRINT_STATS("fse_priority_high = %u", + dp_stats_buf->fse_priority_high); + DP_PRINT_STATS("fse_priority_low = %u", + dp_stats_buf->fse_priority_low); + DP_PRINT_STATS("fse_traffic_ptrn_be = %u", + dp_stats_buf->fse_traffic_ptrn_be); + DP_PRINT_STATS("fse_traffic_ptrn_over_sub = %u", + dp_stats_buf->fse_traffic_ptrn_over_sub); + DP_PRINT_STATS("fse_traffic_ptrn_bursty = %u", + dp_stats_buf->fse_traffic_ptrn_bursty); + DP_PRINT_STATS("fse_traffic_ptrn_interactive = %u", + dp_stats_buf->fse_traffic_ptrn_interactive); + DP_PRINT_STATS("fse_traffic_ptrn_periodic = %u", + dp_stats_buf->fse_traffic_ptrn_periodic); + DP_PRINT_STATS("fse_hwqueue_alloc = %u", + dp_stats_buf->fse_hwqueue_alloc); + DP_PRINT_STATS("fse_hwqueue_created = %u", + dp_stats_buf->fse_hwqueue_created); + DP_PRINT_STATS("fse_hwqueue_send_to_host = %u", + dp_stats_buf->fse_hwqueue_send_to_host); + DP_PRINT_STATS("mcast_entry = %u", + dp_stats_buf->mcast_entry); + DP_PRINT_STATS("bcast_entry = %u\n", + dp_stats_buf->bcast_entry); +} + +/* + * dp_print_tx_de_classify_status_stats_tlv: display + * htt_tx_de_classify_status_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_classify_status_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_classify_status_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_classify_status_stats_tlv *dp_stats_buf = + (htt_tx_de_classify_status_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:"); + DP_PRINT_STATS("eok = %u", + dp_stats_buf->eok); + DP_PRINT_STATS("classify_done = %u", + dp_stats_buf->classify_done); + DP_PRINT_STATS("lookup_failed = %u", + dp_stats_buf->lookup_failed); + DP_PRINT_STATS("send_host_dhcp = %u", + dp_stats_buf->send_host_dhcp); + DP_PRINT_STATS("send_host_mcast = %u", + dp_stats_buf->send_host_mcast); + DP_PRINT_STATS("send_host_unknown_dest = %u", + dp_stats_buf->send_host_unknown_dest); + DP_PRINT_STATS("send_host = %u", + dp_stats_buf->send_host); + DP_PRINT_STATS("status_invalid = %u\n", + dp_stats_buf->status_invalid); +} + +/* + * dp_print_tx_de_enqueue_packets_stats_tlv: display + * htt_tx_de_enqueue_packets_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_enqueue_packets_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_enqueue_packets_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_enqueue_packets_stats_tlv *dp_stats_buf = + (htt_tx_de_enqueue_packets_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:"); + DP_PRINT_STATS("enqueued_pkts = %u", + dp_stats_buf->enqueued_pkts); + DP_PRINT_STATS("to_tqm = %u", + dp_stats_buf->to_tqm); + DP_PRINT_STATS("to_tqm_bypass = %u\n", + dp_stats_buf->to_tqm_bypass); +} + +/* + * dp_print_tx_de_enqueue_discard_stats_tlv: display + * htt_tx_de_enqueue_discard_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_enqueue_discard_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_enqueue_discard_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_enqueue_discard_stats_tlv *dp_stats_buf = + (htt_tx_de_enqueue_discard_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:"); + DP_PRINT_STATS("discarded_pkts = %u", + dp_stats_buf->discarded_pkts); + DP_PRINT_STATS("local_frames = %u", + dp_stats_buf->local_frames); + DP_PRINT_STATS("is_ext_msdu = %u\n", + dp_stats_buf->is_ext_msdu); +} + +/* + * dp_print_tx_de_compl_stats_tlv: display htt_tx_de_compl_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_compl_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_compl_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_compl_stats_tlv *dp_stats_buf = + (htt_tx_de_compl_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_COMPL_STATS_TLV:"); + DP_PRINT_STATS("tcl_dummy_frame = %u", + dp_stats_buf->tcl_dummy_frame); + DP_PRINT_STATS("tqm_dummy_frame = %u", + dp_stats_buf->tqm_dummy_frame); + DP_PRINT_STATS("tqm_notify_frame = %u", + dp_stats_buf->tqm_notify_frame); + DP_PRINT_STATS("fw2wbm_enq = %u", + dp_stats_buf->fw2wbm_enq); + DP_PRINT_STATS("tqm_bypass_frame = %u\n", + dp_stats_buf->tqm_bypass_frame); +} + +/* + * dp_print_tx_de_cmn_stats_tlv: display htt_tx_de_cmn_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_cmn_stats_tlv *dp_stats_buf = + (htt_tx_de_cmn_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_TX_DE_CMN_STATS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("tcl2fw_entry_count = %u", + dp_stats_buf->tcl2fw_entry_count); + DP_PRINT_STATS("not_to_fw = %u", + dp_stats_buf->not_to_fw); + DP_PRINT_STATS("invalid_pdev_vdev_peer = %u", + dp_stats_buf->invalid_pdev_vdev_peer); + DP_PRINT_STATS("tcl_res_invalid_addrx = %u", + dp_stats_buf->tcl_res_invalid_addrx); + DP_PRINT_STATS("wbm2fw_entry_count = %u", + dp_stats_buf->wbm2fw_entry_count); + DP_PRINT_STATS("invalid_pdev = %u\n", + dp_stats_buf->invalid_pdev); +} + +/* + * dp_print_ring_if_stats_tlv: display htt_ring_if_stats_tlv + * @tag_buf: buffer containing the tlv htt_ring_if_stats_tlv + * + * return:void + */ +static inline void dp_print_ring_if_stats_tlv(uint32_t *tag_buf) +{ + htt_ring_if_stats_tlv *dp_stats_buf = + (htt_ring_if_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *wm_hit_count = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!wm_hit_count) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_RING_IF_STATS_TLV:"); + DP_PRINT_STATS("base_addr = %u", + dp_stats_buf->base_addr); + DP_PRINT_STATS("elem_size = %u", + dp_stats_buf->elem_size); + DP_PRINT_STATS("num_elems__prefetch_tail_idx = %u", + dp_stats_buf->num_elems__prefetch_tail_idx); + DP_PRINT_STATS("head_idx__tail_idx = %u", + dp_stats_buf->head_idx__tail_idx); + DP_PRINT_STATS("shadow_head_idx__shadow_tail_idx = %u", + dp_stats_buf->shadow_head_idx__shadow_tail_idx); + DP_PRINT_STATS("num_tail_incr = %u", + dp_stats_buf->num_tail_incr); + DP_PRINT_STATS("lwm_thresh__hwm_thresh = %u", + dp_stats_buf->lwm_thresh__hwm_thresh); + DP_PRINT_STATS("overrun_hit_count = %u", + dp_stats_buf->overrun_hit_count); + DP_PRINT_STATS("underrun_hit_count = %u", + dp_stats_buf->underrun_hit_count); + DP_PRINT_STATS("prod_blockwait_count = %u", + dp_stats_buf->prod_blockwait_count); + DP_PRINT_STATS("cons_blockwait_count = %u", + dp_stats_buf->cons_blockwait_count); + + for (i = 0; i < DP_HTT_LOW_WM_HIT_COUNT_LEN; i++) { + index += qdf_snprint(&wm_hit_count[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->low_wm_hit_count[i]); + } + DP_PRINT_STATS("low_wm_hit_count = %s ", wm_hit_count); + + qdf_mem_zero(wm_hit_count, DP_MAX_STRING_LEN); + + index = 0; + for (i = 0; i < DP_HTT_HIGH_WM_HIT_COUNT_LEN; i++) { + index += qdf_snprint(&wm_hit_count[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->high_wm_hit_count[i]); + } + DP_PRINT_STATS("high_wm_hit_count = %s\n", wm_hit_count); +} + +/* + * dp_print_ring_if_cmn_tlv: display htt_ring_if_cmn_tlv + * @tag_buf: buffer containing the tlv htt_ring_if_cmn_tlv + * + * return:void + */ +static inline void dp_print_ring_if_cmn_tlv(uint32_t *tag_buf) +{ + htt_ring_if_cmn_tlv *dp_stats_buf = + (htt_ring_if_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_RING_IF_CMN_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("num_records = %u\n", + dp_stats_buf->num_records); +} + +/* + * dp_print_sfm_client_user_tlv_v: display htt_sfm_client_user_tlv_v + * @tag_buf: buffer containing the tlv htt_sfm_client_user_tlv_v + * + * return:void + */ +static inline void dp_print_sfm_client_user_tlv_v(uint32_t *tag_buf) +{ + htt_sfm_client_user_tlv_v *dp_stats_buf = + (htt_sfm_client_user_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *dwords_used_by_user_n = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!dwords_used_by_user_n) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_SFM_CLIENT_USER_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&dwords_used_by_user_n[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->dwords_used_by_user_n[i]); + } + DP_PRINT_STATS("dwords_used_by_user_n = %s\n", + dwords_used_by_user_n); + qdf_mem_free(dwords_used_by_user_n); +} + +/* + * dp_print_sfm_client_tlv: display htt_sfm_client_tlv + * @tag_buf: buffer containing the tlv htt_sfm_client_tlv + * + * return:void + */ +static inline void dp_print_sfm_client_tlv(uint32_t *tag_buf) +{ + htt_sfm_client_tlv *dp_stats_buf = + (htt_sfm_client_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_SFM_CLIENT_TLV:"); + DP_PRINT_STATS("client_id = %u", + dp_stats_buf->client_id); + DP_PRINT_STATS("buf_min = %u", + dp_stats_buf->buf_min); + DP_PRINT_STATS("buf_max = %u", + dp_stats_buf->buf_max); + DP_PRINT_STATS("buf_busy = %u", + dp_stats_buf->buf_busy); + DP_PRINT_STATS("buf_alloc = %u", + dp_stats_buf->buf_alloc); + DP_PRINT_STATS("buf_avail = %u", + dp_stats_buf->buf_avail); + DP_PRINT_STATS("num_users = %u\n", + dp_stats_buf->num_users); +} + +/* + * dp_print_sfm_cmn_tlv: display htt_sfm_cmn_tlv + * @tag_buf: buffer containing the tlv htt_sfm_cmn_tlv + * + * return:void + */ +static inline void dp_print_sfm_cmn_tlv(uint32_t *tag_buf) +{ + htt_sfm_cmn_tlv *dp_stats_buf = + (htt_sfm_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_SFM_CMN_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("buf_total = %u", + dp_stats_buf->buf_total); + DP_PRINT_STATS("mem_empty = %u", + dp_stats_buf->mem_empty); + DP_PRINT_STATS("deallocate_bufs = %u", + dp_stats_buf->deallocate_bufs); + DP_PRINT_STATS("num_records = %u\n", + dp_stats_buf->num_records); +} + +/* + * dp_print_sring_stats_tlv: display htt_sring_stats_tlv + * @tag_buf: buffer containing the tlv htt_sring_stats_tlv + * + * return:void + */ +static inline void dp_print_sring_stats_tlv(uint32_t *tag_buf) +{ + htt_sring_stats_tlv *dp_stats_buf = + (htt_sring_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_SRING_STATS_TLV:"); + DP_PRINT_STATS("mac_id__ring_id__arena__ep = %u", + dp_stats_buf->mac_id__ring_id__arena__ep); + DP_PRINT_STATS("base_addr_lsb = %u", + dp_stats_buf->base_addr_lsb); + DP_PRINT_STATS("base_addr_msb = %u", + dp_stats_buf->base_addr_msb); + DP_PRINT_STATS("ring_size = %u", + dp_stats_buf->ring_size); + DP_PRINT_STATS("elem_size = %u", + dp_stats_buf->elem_size); + DP_PRINT_STATS("num_avail_words__num_valid_words = %u", + dp_stats_buf->num_avail_words__num_valid_words); + DP_PRINT_STATS("head_ptr__tail_ptr = %u", + dp_stats_buf->head_ptr__tail_ptr); + DP_PRINT_STATS("consumer_empty__producer_full = %u", + dp_stats_buf->consumer_empty__producer_full); + DP_PRINT_STATS("prefetch_count__internal_tail_ptr = %u\n", + dp_stats_buf->prefetch_count__internal_tail_ptr); +} + +/* + * dp_print_sring_cmn_tlv: display htt_sring_cmn_tlv + * @tag_buf: buffer containing the tlv htt_sring_cmn_tlv + * + * return:void + */ +static inline void dp_print_sring_cmn_tlv(uint32_t *tag_buf) +{ + htt_sring_cmn_tlv *dp_stats_buf = + (htt_sring_cmn_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_SRING_CMN_TLV:"); + DP_PRINT_STATS("num_records = %u\n", + dp_stats_buf->num_records); +} + +/* + * dp_print_tx_pdev_rate_stats_tlv: display htt_tx_pdev_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_rate_stats_tlv + * + * return:void + */ +static void dp_print_tx_pdev_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_rate_stats_tlv *dp_stats_buf = + (htt_tx_pdev_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]; + char *ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]; + char *ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]; + char *ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + dp_err("Output buffer not allocated"); + return; + } + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) { + tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!tx_gi[i]) { + dp_err("Unable to allocate buffer for tx_gi"); + goto fail1; + } + ac_mu_mimo_tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!ac_mu_mimo_tx_gi[i]) { + dp_err("Unable to allocate buffer for ac_mu_mimo_tx_gi"); + goto fail2; + } + ax_mu_mimo_tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!ax_mu_mimo_tx_gi[i]) { + dp_err("Unable to allocate buffer for ax_mu_mimo_tx_gi"); + goto fail3; + } + ofdma_tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!ofdma_tx_gi[i]) { + dp_err("Unable to allocate buffer for ofdma_tx_gi"); + goto fail4; + } + } + + DP_PRINT_STATS("HTT_TX_PDEV_RATE_STATS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("tx_ldpc = %u", + dp_stats_buf->tx_ldpc); + DP_PRINT_STATS("rts_cnt = %u", + dp_stats_buf->rts_cnt); + DP_PRINT_STATS("rts_success = %u", + dp_stats_buf->rts_success); + + DP_PRINT_STATS("ack_rssi = %u", + dp_stats_buf->ack_rssi); + + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_mcs[i]); + } + DP_PRINT_STATS("tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_SU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_su_mcs[i]); + } + DP_PRINT_STATS("tx_su_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_MU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_mu_mcs[i]); + } + DP_PRINT_STATS("tx_mu_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", (i + 1), + dp_stats_buf->tx_nss[i]); + } + DP_PRINT_STATS("tx_nss = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_bw[i]); + } + DP_PRINT_STATS("tx_bw = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_stbc[i]); + } + DP_PRINT_STATS("tx_stbc = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_pream[i]); + } + DP_PRINT_STATS("tx_pream = %s ", str_buf); + + for (j = 0; j < DP_HTT_PDEV_TX_GI_LEN; j++) { + index = 0; + qdf_mem_zero(tx_gi[j], DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->tx_gi[j][i]); + } + DP_PRINT_STATS("tx_gi[%u] = %s ", j, tx_gi[j]); + } + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_TX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->tx_dcm[i]); + } + DP_PRINT_STATS("tx_dcm = %s\n", str_buf); + + DP_PRINT_STATS("rts_success = %u", + dp_stats_buf->rts_success); + DP_PRINT_STATS("ac_mu_mimo_tx_ldpc = %u", + dp_stats_buf->ac_mu_mimo_tx_ldpc); + DP_PRINT_STATS("ax_mu_mimo_tx_ldpc = %u", + dp_stats_buf->ax_mu_mimo_tx_ldpc); + DP_PRINT_STATS("ofdma_tx_ldpc = %u", + dp_stats_buf->ofdma_tx_ldpc); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->tx_legacy_cck_rate[i]); + } + DP_PRINT_STATS("tx_legacy_cck_rate = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->tx_legacy_ofdm_rate[i]); + } + DP_PRINT_STATS("tx_legacy_ofdm_rate = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_LTF; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->tx_he_ltf[i]); + } + DP_PRINT_STATS("tx_he_ltf = %s ", str_buf); + + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ofdma_tx_mcs[i]); + } + DP_PRINT_STATS("ofdma_tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ac_mu_mimo_tx_mcs[i]); + } + DP_PRINT_STATS("ac_mu_mimo_tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ax_mu_mimo_tx_mcs[i]); + } + DP_PRINT_STATS("ax_mu_mimo_tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ofdma_tx_mcs[i]); + } + DP_PRINT_STATS("ofdma_tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ac_mu_mimo_tx_nss[i]); + } + DP_PRINT_STATS("ac_mu_mimo_tx_nss = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ax_mu_mimo_tx_nss[i]); + } + DP_PRINT_STATS("ax_mu_mimo_tx_nss = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ofdma_tx_nss[i]); + } + DP_PRINT_STATS("ofdma_tx_nss = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ac_mu_mimo_tx_bw[i]); + } + DP_PRINT_STATS("ac_mu_mimo_tx_bw = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ax_mu_mimo_tx_bw[i]); + } + DP_PRINT_STATS("ax_mu_mimo_tx_bw = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ofdma_tx_bw[i]); + } + + DP_PRINT_STATS("ofdma_tx_bw = %s ", str_buf); + + for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + qdf_mem_zero(ac_mu_mimo_tx_gi[j], DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&ac_mu_mimo_tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf-> + ac_mu_mimo_tx_gi[j][i]); + } + DP_PRINT_STATS("ac_mu_mimo_tx_gi[%u] = %s ", + j, ac_mu_mimo_tx_gi[j]); + } + + for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + qdf_mem_zero(ax_mu_mimo_tx_gi[j], DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&ax_mu_mimo_tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->ax_mu_mimo_tx_gi[j][i]); + } + DP_PRINT_STATS("ax_mu_mimo_tx_gi[%u] = %s ", + j, ax_mu_mimo_tx_gi[j]); + } + + for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + qdf_mem_zero(ofdma_tx_gi[j], DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&ofdma_tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->ofdma_tx_gi[j][i]); + } + DP_PRINT_STATS("ofdma_tx_gi[%u] = %s ", + j, ofdma_tx_gi[j]); + } + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(ofdma_tx_gi[i]); + +fail4: + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(ax_mu_mimo_tx_gi[i]); +fail3: + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(ac_mu_mimo_tx_gi[i]); +fail2: + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(tx_gi[i]); + +fail1: + qdf_mem_free(str_buf); +} + +/* + * dp_print_rx_pdev_rate_stats_tlv: display htt_rx_pdev_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_pdev_rate_stats_tlv + * + * return:void + */ +static void dp_print_rx_pdev_rate_stats_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf) +{ + htt_rx_pdev_rate_stats_tlv *dp_stats_buf = + (htt_rx_pdev_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *rssi_chain[DP_HTT_RSSI_CHAIN_LEN]; + char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + char *ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]; + + if (!str_buf) { + dp_err("Output buffer not allocated"); + return; + } + + for (i = 0; i < DP_HTT_RSSI_CHAIN_LEN; i++) { + rssi_chain[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!rssi_chain[i]) { + dp_err("Unable to allocate buffer for rssi_chain"); + goto fail1; + } + } + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++) { + rx_gi[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!rx_gi[i]) { + dp_err("Unable to allocate buffer for rx_gi"); + goto fail2; + } + } + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) { + ul_ofdma_rx_gi[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + if (!ul_ofdma_rx_gi[i]) { + dp_err("Unable to allocate buffer for ul_ofdma_rx_gi"); + goto fail3; + } + } + + DP_PRINT_STATS("ul_ofdma_data_rx_ppdu = %d", + pdev->stats.ul_ofdma.data_rx_ppdu); + + for (i = 0; i < OFDMA_NUM_USERS; i++) { + DP_PRINT_STATS("ul_ofdma data %d user = %d", + i, pdev->stats.ul_ofdma.data_users[i]); + } + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < OFDMA_NUM_RU_SIZE; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + pdev->stats.ul_ofdma.data_rx_ru_size[i]); + } + DP_PRINT_STATS("ul_ofdma_data_rx_ru_size= %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < OFDMA_NUM_RU_SIZE; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + pdev->stats.ul_ofdma.nondata_rx_ru_size[i]); + } + DP_PRINT_STATS("ul_ofdma_nondata_rx_ru_size= %s", str_buf); + + DP_PRINT_STATS("HTT_RX_PDEV_RATE_STATS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("nsts = %u", + dp_stats_buf->nsts); + DP_PRINT_STATS("rx_ldpc = %u", + dp_stats_buf->rx_ldpc); + DP_PRINT_STATS("rts_cnt = %u", + dp_stats_buf->rts_cnt); + DP_PRINT_STATS("rssi_mgmt = %u", + dp_stats_buf->rssi_mgmt); + DP_PRINT_STATS("rssi_data = %u", + dp_stats_buf->rssi_data); + DP_PRINT_STATS("rssi_comb = %u", + dp_stats_buf->rssi_comb); + DP_PRINT_STATS("rssi_in_dbm = %d", + dp_stats_buf->rssi_in_dbm); + DP_PRINT_STATS("rx_11ax_su_ext = %u", + dp_stats_buf->rx_11ax_su_ext); + DP_PRINT_STATS("rx_11ac_mumimo = %u", + dp_stats_buf->rx_11ac_mumimo); + DP_PRINT_STATS("rx_11ax_mumimo = %u", + dp_stats_buf->rx_11ax_mumimo); + DP_PRINT_STATS("rx_11ax_ofdma = %u", + dp_stats_buf->rx_11ax_ofdma); + DP_PRINT_STATS("txbf = %u", + dp_stats_buf->txbf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_mcs[i]); + } + DP_PRINT_STATS("rx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", (i + 1), + dp_stats_buf->rx_nss[i]); + } + DP_PRINT_STATS("rx_nss = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_dcm[i]); + } + DP_PRINT_STATS("rx_dcm = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_stbc[i]); + } + DP_PRINT_STATS("rx_stbc = %s ", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->rx_bw[i]); + } + DP_PRINT_STATS("rx_bw = %s ", str_buf); + + for (j = 0; j < DP_HTT_RSSI_CHAIN_LEN; j++) { + index = 0; + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&rssi_chain[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->rssi_chain[j][i]); + } + DP_PRINT_STATS("rssi_chain[%u] = %s ", j, rssi_chain[j]); + } + + for (j = 0; j < DP_HTT_RX_GI_LEN; j++) { + index = 0; + qdf_mem_zero(rx_gi[j], DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&rx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->rx_gi[j][i]); + } + DP_PRINT_STATS("rx_gi[%u] = %s ", j, rx_gi[j]); + } + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < DP_HTT_RX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, + dp_stats_buf->rx_pream[i]); + } + DP_PRINT_STATS("rx_pream = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, + dp_stats_buf->rx_legacy_cck_rate[i]); + } + DP_PRINT_STATS("rx_legacy_cck_rate = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, + dp_stats_buf->rx_legacy_ofdm_rate[i]); + } + DP_PRINT_STATS("rx_legacy_ofdm_rate = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->ul_ofdma_rx_mcs[i]); + } + DP_PRINT_STATS("ul_ofdma_rx_mcs = %s", str_buf); + + DP_PRINT_STATS("rx_11ax_ul_ofdma = %u", + dp_stats_buf->rx_11ax_ul_ofdma); + + for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + qdf_mem_zero(ul_ofdma_rx_gi[j], DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&ul_ofdma_rx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf-> + ul_ofdma_rx_gi[j][i]); + } + DP_PRINT_STATS("ul_ofdma_rx_gi[%u] = %s ", + j, ul_ofdma_rx_gi[j]); + } + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->ul_ofdma_rx_nss[i]); + } + DP_PRINT_STATS("ul_ofdma_rx_nss = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->ul_ofdma_rx_bw[i]); + } + DP_PRINT_STATS("ul_ofdma_rx_bw = %s", str_buf); + DP_PRINT_STATS("ul_ofdma_rx_stbc = %u", + dp_stats_buf->ul_ofdma_rx_stbc); + DP_PRINT_STATS("ul_ofdma_rx_ldpc = %u", + dp_stats_buf->ul_ofdma_rx_ldpc); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->rx_ulofdma_non_data_ppdu[i]); + } + DP_PRINT_STATS("rx_ulofdma_non_data_ppdu = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->rx_ulofdma_data_ppdu[i]); + } + DP_PRINT_STATS("rx_ulofdma_data_ppdu = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->rx_ulofdma_mpdu_ok[i]); + } + DP_PRINT_STATS("rx_ulofdma_mpdu_ok = %s", str_buf); + + index = 0; + qdf_mem_zero(str_buf, DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", + i, dp_stats_buf->rx_ulofdma_mpdu_fail[i]); + } + DP_PRINT_STATS("rx_ulofdma_mpdu_fail = %s", str_buf); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(ul_ofdma_rx_gi[i]); + +fail3: + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(rx_gi[i]); +fail2: + for (i = 0; i < DP_HTT_RSSI_CHAIN_LEN; i++) + qdf_mem_free(rssi_chain[i]); +fail1: + qdf_mem_free(str_buf); + +} + +/* + * dp_print_rx_soc_fw_stats_tlv: display htt_rx_soc_fw_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_soc_fw_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_soc_fw_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_soc_fw_stats_tlv *dp_stats_buf = + (htt_rx_soc_fw_stats_tlv *)tag_buf; + + DP_PRINT_STATS("HTT_RX_SOC_FW_STATS_TLV:"); + DP_PRINT_STATS("fw_reo_ring_data_msdu = %u", + dp_stats_buf->fw_reo_ring_data_msdu); + DP_PRINT_STATS("fw_to_host_data_msdu_bcmc = %u", + dp_stats_buf->fw_to_host_data_msdu_bcmc); + DP_PRINT_STATS("fw_to_host_data_msdu_uc = %u", + dp_stats_buf->fw_to_host_data_msdu_uc); + DP_PRINT_STATS("ofld_remote_data_buf_recycle_cnt = %u", + dp_stats_buf->ofld_remote_data_buf_recycle_cnt); + DP_PRINT_STATS("ofld_remote_free_buf_indication_cnt = %u", + dp_stats_buf->ofld_remote_free_buf_indication_cnt); + DP_PRINT_STATS("ofld_buf_to_host_data_msdu_uc = %u ", + dp_stats_buf->ofld_buf_to_host_data_msdu_uc); + DP_PRINT_STATS("reo_fw_ring_to_host_data_msdu_uc = %u ", + dp_stats_buf->reo_fw_ring_to_host_data_msdu_uc); + DP_PRINT_STATS("wbm_sw_ring_reap = %u ", + dp_stats_buf->wbm_sw_ring_reap); + DP_PRINT_STATS("wbm_forward_to_host_cnt = %u ", + dp_stats_buf->wbm_forward_to_host_cnt); + DP_PRINT_STATS("wbm_target_recycle_cnt = %u ", + dp_stats_buf->wbm_target_recycle_cnt); + DP_PRINT_STATS("target_refill_ring_recycle_cnt = %u", + dp_stats_buf->target_refill_ring_recycle_cnt); + +} + +/* + * dp_print_rx_soc_fw_refill_ring_empty_tlv_v: display + * htt_rx_soc_fw_refill_ring_empty_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_soc_fw_refill_ring_empty_tlv_v + * + * return:void + */ +static inline void dp_print_rx_soc_fw_refill_ring_empty_tlv_v(uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_empty_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_empty_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *refill_ring_empty_cnt = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!refill_ring_empty_cnt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_STATS_REFILL_MAX_RING); + + DP_PRINT_STATS("HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&refill_ring_empty_cnt[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->refill_ring_empty_cnt[i]); + } + DP_PRINT_STATS("refill_ring_empty_cnt = %s\n", + refill_ring_empty_cnt); + qdf_mem_free(refill_ring_empty_cnt); +} + +/* + * dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v: display + * htt_rx_soc_fw_refill_ring_num_refill_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_soc_fw_refill_ring_num_refill_tlv + * + * return:void + */ +static inline void dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v( + uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_num_refill_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_num_refill_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *refill_ring_num_refill = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!refill_ring_num_refill) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_URRN_STATS); + + DP_PRINT_STATS("HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&refill_ring_num_refill[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->refill_ring_num_refill[i]); + } + DP_PRINT_STATS("refill_ring_num_refill = %s\n", + refill_ring_num_refill); + qdf_mem_free(refill_ring_num_refill); +} + +/* + * dp_print_rx_pdev_fw_stats_tlv: display htt_rx_pdev_fw_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_pdev_fw_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_pdev_fw_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_stats_tlv *dp_stats_buf = + (htt_rx_pdev_fw_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char fw_ring_mgmt_subtype[DP_MAX_STRING_LEN]; + char fw_ring_ctrl_subtype[DP_MAX_STRING_LEN]; + + DP_PRINT_STATS("HTT_RX_PDEV_FW_STATS_TLV:"); + DP_PRINT_STATS("mac_id__word = %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("ppdu_recvd = %u", + dp_stats_buf->ppdu_recvd); + DP_PRINT_STATS("mpdu_cnt_fcs_ok = %u", + dp_stats_buf->mpdu_cnt_fcs_ok); + DP_PRINT_STATS("mpdu_cnt_fcs_err = %u", + dp_stats_buf->mpdu_cnt_fcs_err); + DP_PRINT_STATS("tcp_msdu_cnt = %u", + dp_stats_buf->tcp_msdu_cnt); + DP_PRINT_STATS("tcp_ack_msdu_cnt = %u", + dp_stats_buf->tcp_ack_msdu_cnt); + DP_PRINT_STATS("udp_msdu_cnt = %u", + dp_stats_buf->udp_msdu_cnt); + DP_PRINT_STATS("other_msdu_cnt = %u", + dp_stats_buf->other_msdu_cnt); + DP_PRINT_STATS("fw_ring_mpdu_ind = %u", + dp_stats_buf->fw_ring_mpdu_ind); + + for (i = 0; i < DP_HTT_FW_RING_MGMT_SUBTYPE_LEN; i++) { + index += qdf_snprint(&fw_ring_mgmt_subtype[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->fw_ring_mgmt_subtype[i]); + } + DP_PRINT_STATS("fw_ring_mgmt_subtype = %s ", fw_ring_mgmt_subtype); + + index = 0; + for (i = 0; i < DP_HTT_FW_RING_CTRL_SUBTYPE_LEN; i++) { + index += qdf_snprint(&fw_ring_ctrl_subtype[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->fw_ring_ctrl_subtype[i]); + } + DP_PRINT_STATS("fw_ring_ctrl_subtype = %s ", fw_ring_ctrl_subtype); + DP_PRINT_STATS("fw_ring_mcast_data_msdu = %u", + dp_stats_buf->fw_ring_mcast_data_msdu); + DP_PRINT_STATS("fw_ring_bcast_data_msdu = %u", + dp_stats_buf->fw_ring_bcast_data_msdu); + DP_PRINT_STATS("fw_ring_ucast_data_msdu = %u", + dp_stats_buf->fw_ring_ucast_data_msdu); + DP_PRINT_STATS("fw_ring_null_data_msdu = %u", + dp_stats_buf->fw_ring_null_data_msdu); + DP_PRINT_STATS("fw_ring_mpdu_drop = %u", + dp_stats_buf->fw_ring_mpdu_drop); + DP_PRINT_STATS("ofld_local_data_ind_cnt = %u", + dp_stats_buf->ofld_local_data_ind_cnt); + DP_PRINT_STATS("ofld_local_data_buf_recycle_cnt = %u", + dp_stats_buf->ofld_local_data_buf_recycle_cnt); + DP_PRINT_STATS("drx_local_data_ind_cnt = %u", + dp_stats_buf->drx_local_data_ind_cnt); + DP_PRINT_STATS("drx_local_data_buf_recycle_cnt = %u", + dp_stats_buf->drx_local_data_buf_recycle_cnt); + DP_PRINT_STATS("local_nondata_ind_cnt = %u", + dp_stats_buf->local_nondata_ind_cnt); + DP_PRINT_STATS("local_nondata_buf_recycle_cnt = %u", + dp_stats_buf->local_nondata_buf_recycle_cnt); + DP_PRINT_STATS("fw_status_buf_ring_refill_cnt = %u", + dp_stats_buf->fw_status_buf_ring_refill_cnt); + DP_PRINT_STATS("fw_status_buf_ring_empty_cnt = %u", + dp_stats_buf->fw_status_buf_ring_empty_cnt); + DP_PRINT_STATS("fw_pkt_buf_ring_refill_cnt = %u", + dp_stats_buf->fw_pkt_buf_ring_refill_cnt); + DP_PRINT_STATS("fw_pkt_buf_ring_empty_cnt = %u", + dp_stats_buf->fw_pkt_buf_ring_empty_cnt); + DP_PRINT_STATS("fw_link_buf_ring_refill_cnt = %u", + dp_stats_buf->fw_link_buf_ring_refill_cnt); + DP_PRINT_STATS("fw_link_buf_ring_empty_cnt = %u", + dp_stats_buf->fw_link_buf_ring_empty_cnt); + DP_PRINT_STATS("host_pkt_buf_ring_refill_cnt = %u", + dp_stats_buf->host_pkt_buf_ring_refill_cnt); + DP_PRINT_STATS("host_pkt_buf_ring_empty_cnt = %u", + dp_stats_buf->host_pkt_buf_ring_empty_cnt); + DP_PRINT_STATS("mon_pkt_buf_ring_refill_cnt = %u", + dp_stats_buf->mon_pkt_buf_ring_refill_cnt); + DP_PRINT_STATS("mon_pkt_buf_ring_empty_cnt = %u", + dp_stats_buf->mon_pkt_buf_ring_empty_cnt); + DP_PRINT_STATS("mon_status_buf_ring_refill_cnt = %u", + dp_stats_buf->mon_status_buf_ring_refill_cnt); + DP_PRINT_STATS("mon_status_buf_ring_empty_cnt = %u", + dp_stats_buf->mon_status_buf_ring_empty_cnt); + DP_PRINT_STATS("mon_desc_buf_ring_refill_cnt = %u", + dp_stats_buf->mon_desc_buf_ring_refill_cnt); + DP_PRINT_STATS("mon_desc_buf_ring_empty_cnt = %u", + dp_stats_buf->mon_desc_buf_ring_empty_cnt); + DP_PRINT_STATS("mon_dest_ring_update_cnt = %u", + dp_stats_buf->mon_dest_ring_update_cnt); + DP_PRINT_STATS("mon_dest_ring_full_cnt = %u", + dp_stats_buf->mon_dest_ring_full_cnt); + DP_PRINT_STATS("rx_suspend_cnt = %u", + dp_stats_buf->rx_suspend_cnt); + DP_PRINT_STATS("rx_suspend_fail_cnt = %u", + dp_stats_buf->rx_suspend_fail_cnt); + DP_PRINT_STATS("rx_resume_cnt = %u", + dp_stats_buf->rx_resume_cnt); + DP_PRINT_STATS("rx_resume_fail_cnt = %u", + dp_stats_buf->rx_resume_fail_cnt); + DP_PRINT_STATS("rx_ring_switch_cnt = %u", + dp_stats_buf->rx_ring_switch_cnt); + DP_PRINT_STATS("rx_ring_restore_cnt = %u", + dp_stats_buf->rx_ring_restore_cnt); + DP_PRINT_STATS("rx_flush_cnt = %u\n", + dp_stats_buf->rx_flush_cnt); +} + +/* + * dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v: display + * htt_rx_pdev_fw_ring_mpdu_err_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_pdev_fw_ring_mpdu_err_tlv_v + * + * return:void + */ +static inline void dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_ring_mpdu_err_tlv_v *dp_stats_buf = + (htt_rx_pdev_fw_ring_mpdu_err_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *fw_ring_mpdu_err = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!fw_ring_mpdu_err) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + DP_PRINT_STATS("HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:"); + for (i = 0; i < DP_HTT_FW_RING_MPDU_ERR_LEN; i++) { + index += qdf_snprint(&fw_ring_mpdu_err[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->fw_ring_mpdu_err[i]); + } + DP_PRINT_STATS("fw_ring_mpdu_err = %s\n", fw_ring_mpdu_err); + qdf_mem_free(fw_ring_mpdu_err); +} + +/* + * dp_print_rx_pdev_fw_mpdu_drop_tlv_v: display htt_rx_pdev_fw_mpdu_drop_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_pdev_fw_mpdu_drop_tlv_v + * + * return:void + */ +static inline void dp_print_rx_pdev_fw_mpdu_drop_tlv_v(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_mpdu_drop_tlv_v *dp_stats_buf = + (htt_rx_pdev_fw_mpdu_drop_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *fw_mpdu_drop = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!fw_mpdu_drop) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_STATS_FW_DROP_REASON_MAX); + + DP_PRINT_STATS("HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&fw_mpdu_drop[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->fw_mpdu_drop[i]); + } + DP_PRINT_STATS("fw_mpdu_drop = %s\n", fw_mpdu_drop); + qdf_mem_free(fw_mpdu_drop); +} + +/* + * dp_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv() - Accounts for rxdma error + * packets + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv(uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *)tag_buf; + + uint8_t i; + uint16_t index = 0; + char rxdma_err_cnt[DP_MAX_STRING_LEN]; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_RXDMA_MAX_ERR_CODE); + + DP_PRINT_STATS("HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V"); + + for (i = 0; i < tag_len; i++) { + index += snprintf(&rxdma_err_cnt[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->rxdma_err[i]); + } + + DP_PRINT_STATS("rxdma_err = %s\n", rxdma_err_cnt); +} + +/* + * dp_print_rx_soc_fw_refill_ring_num_reo_err_tlv() - Accounts for reo error + * packets + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_soc_fw_refill_ring_num_reo_err_tlv(uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *)tag_buf; + + uint8_t i; + uint16_t index = 0; + char reo_err_cnt[DP_MAX_STRING_LEN]; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_REO_MAX_ERR_CODE); + + DP_PRINT_STATS("HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V"); + + for (i = 0; i < tag_len; i++) { + index += snprintf(&reo_err_cnt[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, + dp_stats_buf->reo_err[i]); + } + + DP_PRINT_STATS("reo_err = %s\n", reo_err_cnt); +} + +/* + * dp_print_rx_reo_debug_stats_tlv() - REO Statistics + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_reo_debug_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_reo_resource_stats_tlv_v *dp_stats_buf = + (htt_rx_reo_resource_stats_tlv_v *)tag_buf; + + DP_PRINT_STATS("HTT_RX_REO_RESOURCE_STATS_TLV"); + + DP_PRINT_STATS("sample_id: %u ", + dp_stats_buf->sample_id); + DP_PRINT_STATS("total_max: %u ", + dp_stats_buf->total_max); + DP_PRINT_STATS("total_avg: %u ", + dp_stats_buf->total_avg); + DP_PRINT_STATS("total_sample: %u ", + dp_stats_buf->total_sample); + DP_PRINT_STATS("non_zeros_avg: %u ", + dp_stats_buf->non_zeros_avg); + DP_PRINT_STATS("non_zeros_sample: %u ", + dp_stats_buf->non_zeros_sample); + DP_PRINT_STATS("last_non_zeros_max: %u ", + dp_stats_buf->last_non_zeros_max); + DP_PRINT_STATS("last_non_zeros_min: %u ", + dp_stats_buf->last_non_zeros_min); + DP_PRINT_STATS("last_non_zeros_avg: %u ", + dp_stats_buf->last_non_zeros_avg); + DP_PRINT_STATS("last_non_zeros_sample: %u\n ", + dp_stats_buf->last_non_zeros_sample); +} + +/* + * dp_print_rx_pdev_fw_stats_phy_err_tlv() - Accounts for phy errors + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_pdev_fw_stats_phy_err_tlv(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_stats_phy_err_tlv *dp_stats_buf = + (htt_rx_pdev_fw_stats_phy_err_tlv *)tag_buf; + + uint8_t i = 0; + uint16_t index = 0; + char phy_errs[DP_MAX_STRING_LEN]; + + DP_PRINT_STATS("HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV"); + + DP_PRINT_STATS("mac_id_word: %u", + dp_stats_buf->mac_id__word); + DP_PRINT_STATS("total_phy_err_cnt: %u", + dp_stats_buf->total_phy_err_cnt); + + for (i = 0; i < HTT_STATS_PHY_ERR_MAX; i++) { + index += snprintf(&phy_errs[index], + DP_MAX_STRING_LEN - index, + " %u:%u,", i, dp_stats_buf->phy_err[i]); + } + + DP_PRINT_STATS("phy_errs: %s\n", phy_errs); +} + +/* + * dp_htt_stats_print_tag: function to select the tag type and + * print the corresponding tag structure + * @pdev: pdev pointer + * @tag_type: tag type that is to be printed + * @tag_buf: pointer to the tag structure + * + * return: void + */ +void dp_htt_stats_print_tag(struct dp_pdev *pdev, + uint8_t tag_type, uint32_t *tag_buf) +{ + switch (tag_type) { + case HTT_STATS_TX_PDEV_CMN_TAG: + dp_print_tx_pdev_stats_cmn_tlv(tag_buf); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + dp_print_tx_pdev_stats_urrn_tlv_v(tag_buf); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + dp_print_tx_pdev_stats_sifs_tlv_v(tag_buf); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + dp_print_tx_pdev_stats_flush_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_PDEV_PHY_ERR_TAG: + dp_print_tx_pdev_stats_phy_err_tlv_v(tag_buf); + break; + + case HTT_STATS_STRING_TAG: + dp_print_stats_string_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_CMN_TAG: + dp_print_tx_hwq_stats_cmn_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG: + dp_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_HWQ_CMD_RESULT_TAG: + dp_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_HWQ_CMD_STALL_TAG: + dp_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_HWQ_FES_STATUS_TAG: + dp_print_tx_hwq_fes_result_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_GEN_MPDU_TAG: + dp_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_LIST_MPDU_TAG: + dp_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG: + dp_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_CMN_TAG: + dp_print_tx_tqm_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_TQM_PDEV_TAG: + dp_print_tx_tqm_pdev_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG: + dp_print_tx_tqm_cmdq_status_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG: + dp_print_tx_de_eapol_packets_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG: + dp_print_tx_de_classify_failed_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG: + dp_print_tx_de_classify_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG: + dp_print_tx_de_classify_status_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG: + dp_print_tx_de_enqueue_packets_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG: + dp_print_tx_de_enqueue_discard_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CMN_TAG: + dp_print_tx_de_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_RING_IF_TAG: + dp_print_ring_if_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG: + dp_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf); + break; + + case HTT_STATS_SFM_CMN_TAG: + dp_print_sfm_cmn_tlv(tag_buf); + break; + + case HTT_STATS_SRING_STATS_TAG: + dp_print_sring_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_STATS_TAG: + dp_print_rx_pdev_fw_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG: + dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG: + dp_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf); + break; + + case HTT_STATS_RX_SOC_FW_STATS_TAG: + dp_print_rx_soc_fw_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG: + dp_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf); + break; + + case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG: + dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v( + tag_buf); + break; + + case HTT_STATS_TX_PDEV_RATE_STATS_TAG: + dp_print_tx_pdev_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_RATE_STATS_TAG: + dp_print_rx_pdev_rate_stats_tlv(pdev, tag_buf); + break; + + case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG: + dp_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf); + break; + + case HTT_STATS_TX_SCHED_CMN_TAG: + dp_print_stats_tx_sched_cmn_tlv(tag_buf); + break; + + case HTT_STATS_TX_PDEV_MPDU_STATS_TAG: + dp_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf); + break; + + case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG: + dp_print_sched_txq_cmd_posted_tlv_v(tag_buf); + break; + + case HTT_STATS_RING_IF_CMN_TAG: + dp_print_ring_if_cmn_tlv(tag_buf); + break; + + case HTT_STATS_SFM_CLIENT_USER_TAG: + dp_print_sfm_client_user_tlv_v(tag_buf); + break; + + case HTT_STATS_SFM_CLIENT_TAG: + dp_print_sfm_client_tlv(tag_buf); + break; + + case HTT_STATS_TX_TQM_ERROR_STATS_TAG: + dp_print_tx_tqm_error_stats_tlv(tag_buf); + break; + + case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG: + dp_print_sched_txq_cmd_reaped_tlv_v(tag_buf); + break; + + case HTT_STATS_SRING_CMN_TAG: + dp_print_sring_cmn_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG: + dp_print_tx_selfgen_ac_err_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG: + dp_print_tx_selfgen_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AC_STATS_TAG: + dp_print_tx_selfgen_ac_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AX_STATS_TAG: + dp_print_tx_selfgen_ax_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG: + dp_print_tx_selfgen_ax_err_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG: + dp_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG: + dp_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG: + dp_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_HW_INTR_MISC_TAG: + dp_print_hw_stats_intr_misc_tlv(tag_buf); + break; + + case HTT_STATS_HW_WD_TIMEOUT_TAG: + dp_print_hw_stats_wd_timeout_tlv(tag_buf); + break; + + case HTT_STATS_HW_PDEV_ERRS_TAG: + dp_print_hw_stats_pdev_errs_tlv(tag_buf); + break; + + case HTT_STATS_COUNTER_NAME_TAG: + dp_print_counter_tlv(tag_buf); + break; + + case HTT_STATS_TX_TID_DETAILS_TAG: + dp_print_tx_tid_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_TID_DETAILS_V1_TAG: + dp_print_tx_tid_stats_v1_tlv(tag_buf); + break; + + case HTT_STATS_RX_TID_DETAILS_TAG: + dp_print_rx_tid_stats_tlv(tag_buf); + break; + + case HTT_STATS_PEER_STATS_CMN_TAG: + dp_print_peer_stats_cmn_tlv(tag_buf); + break; + + case HTT_STATS_PEER_DETAILS_TAG: + dp_print_peer_details_tlv(tag_buf); + break; + + case HTT_STATS_PEER_MSDU_FLOWQ_TAG: + dp_print_msdu_flow_stats_tlv(tag_buf); + break; + + case HTT_STATS_PEER_TX_RATE_STATS_TAG: + dp_print_tx_peer_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_PEER_RX_RATE_STATS_TAG: + dp_print_rx_peer_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_COMPL_STATS_TAG: + dp_print_tx_de_compl_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG: + dp_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv(tag_buf); + break; + + case HTT_STATS_RX_REFILL_REO_ERR_TAG: + dp_print_rx_soc_fw_refill_ring_num_reo_err_tlv(tag_buf); + break; + + case HTT_STATS_RX_REO_RESOURCE_STATS_TAG: + dp_print_rx_reo_debug_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG: + dp_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf); + break; + + default: + break; + } +} + +/* + * dp_htt_stats_copy_tag: function to select the tag type and + * copy the corresponding tag structure + * @pdev: DP_PDEV handle + * @tag_type: tag type that is to be printed + * @tag_buf: pointer to the tag structure + * + * return: void + */ +void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf) +{ + void *dest_ptr = NULL; + uint32_t size = 0; + + switch (tag_type) { + case HTT_STATS_TX_PDEV_CMN_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.cmn_tlv; + size = sizeof(htt_tx_pdev_stats_cmn_tlv); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.underrun_tlv; + size = sizeof(htt_tx_pdev_stats_urrn_tlv_v); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.sifs_tlv; + size = sizeof(htt_tx_pdev_stats_sifs_tlv_v); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.flush_tlv; + size = sizeof(htt_tx_pdev_stats_flush_tlv_v); + break; + case HTT_STATS_TX_PDEV_PHY_ERR_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.phy_err_tlv; + size = sizeof(htt_tx_pdev_stats_phy_err_tlv_v); + break; + case HTT_STATS_RX_PDEV_FW_STATS_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.fw_stats_tlv; + size = sizeof(htt_rx_pdev_fw_stats_tlv); + break; + case HTT_STATS_RX_SOC_FW_STATS_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.soc_stats.fw_tlv; + size = sizeof(htt_rx_soc_fw_stats_tlv); + break; + case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.soc_stats.fw_refill_ring_empty_tlv; + size = sizeof(htt_rx_soc_fw_refill_ring_empty_tlv_v); + break; + case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.soc_stats.fw_refill_ring_num_refill_tlv; + size = sizeof(htt_rx_soc_fw_refill_ring_num_refill_tlv_v); + break; + case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.fw_ring_mpdu_err_tlv; + size = sizeof(htt_rx_pdev_fw_ring_mpdu_err_tlv_v); + break; + case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.fw_ring_mpdu_drop; + size = sizeof(htt_rx_pdev_fw_mpdu_drop_tlv_v); + break; + default: + break; + } + + if (dest_ptr) + qdf_mem_copy(dest_ptr, tag_buf, size); +} + +#ifdef VDEV_PEER_PROTOCOL_COUNT +#ifdef VDEV_PEER_PROTOCOL_COUNT_TESTING +static QDF_STATUS dp_peer_stats_update_protocol_test_cnt(struct dp_vdev *vdev, + bool is_egress, + bool is_rx) +{ + int mask; + + if (is_egress) + if (is_rx) + mask = VDEV_PEER_PROTOCOL_RX_EGRESS_MASK; + else + mask = VDEV_PEER_PROTOCOL_TX_EGRESS_MASK; + else + if (is_rx) + mask = VDEV_PEER_PROTOCOL_RX_INGRESS_MASK; + else + mask = VDEV_PEER_PROTOCOL_TX_INGRESS_MASK; + + if (qdf_unlikely(vdev->peer_protocol_count_dropmask & mask)) { + dp_info("drop mask set %x", vdev->peer_protocol_count_dropmask); + return QDF_STATUS_SUCCESS; + } + return QDF_STATUS_E_FAILURE; +} + +#else +static QDF_STATUS dp_peer_stats_update_protocol_test_cnt(struct dp_vdev *vdev, + bool is_egress, + bool is_rx) +{ + return QDF_STATUS_E_FAILURE; +} +#endif + +void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, + struct dp_peer *peer, + bool is_egress, + bool is_rx) +{ + struct cdp_peer_stats *peer_stats; + struct protocol_trace_count *protocol_trace_cnt; + enum cdp_protocol_trace prot; + struct dp_soc *soc; + struct ether_header *eh; + char *mac; + bool new_peer_ref = false; + + if (qdf_likely(!vdev->peer_protocol_count_track)) + return; + if (qdf_unlikely(dp_peer_stats_update_protocol_test_cnt(vdev, + is_egress, + is_rx) == + QDF_STATUS_SUCCESS)) + return; + + soc = vdev->pdev->soc; + eh = (struct ether_header *)qdf_nbuf_data(nbuf); + if (is_rx) + mac = eh->ether_shost; + else + mac = eh->ether_dhost; + + if (!peer) { + peer = dp_peer_find_hash_find(soc, mac, 0, vdev->vdev_id); + new_peer_ref = true; + if (!peer) + return; + } + peer_stats = &peer->stats; + + if (qdf_nbuf_is_icmp_pkt(nbuf) == true) + prot = CDP_TRACE_ICMP; + else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf) == true) + prot = CDP_TRACE_ARP; + else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf) == true) + prot = CDP_TRACE_EAP; + else + goto dp_vdev_peer_stats_update_protocol_cnt_free_peer; + + if (is_rx) + protocol_trace_cnt = peer_stats->rx.protocol_trace_cnt; + else + protocol_trace_cnt = peer_stats->tx.protocol_trace_cnt; + + if (is_egress) + protocol_trace_cnt[prot].egress_cnt++; + else + protocol_trace_cnt[prot].ingress_cnt++; +dp_vdev_peer_stats_update_protocol_cnt_free_peer: + if (new_peer_ref) + dp_peer_unref_delete(peer); +} + +void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, + int8_t vdev_id, + qdf_nbuf_t nbuf, + bool is_egress, + bool is_rx) +{ + struct dp_vdev *vdev; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + if (qdf_likely(!vdev->peer_protocol_count_track)) + return; + dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, is_egress, + is_rx); +} +#endif + +#ifdef WDI_EVENT_ENABLE +QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) +{ + struct cdp_interface_peer_stats peer_stats_intf; + struct cdp_peer_stats *peer_stats = &peer->stats; + + if (!peer->vdev) + return QDF_STATUS_E_FAULT; + + qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf)); + if (peer_stats->rx.last_rssi != peer_stats->rx.rssi) + peer_stats_intf.rssi_changed = true; + + if ((peer_stats->rx.rssi && peer_stats_intf.rssi_changed) || + (peer_stats->tx.tx_rate && + peer_stats->tx.tx_rate != peer_stats->tx.last_tx_rate)) { + qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + peer_stats_intf.vdev_id = peer->vdev->vdev_id; + peer_stats_intf.last_peer_tx_rate = peer_stats->tx.last_tx_rate; + peer_stats_intf.peer_tx_rate = peer_stats->tx.tx_rate; + peer_stats_intf.peer_rssi = peer_stats->rx.rssi; + peer_stats_intf.tx_packet_count = peer_stats->tx.ucast.num; + peer_stats_intf.rx_packet_count = peer_stats->rx.to_stack.num; + peer_stats_intf.tx_byte_count = peer_stats->tx.tx_success.bytes; + peer_stats_intf.rx_byte_count = peer_stats->rx.to_stack.bytes; + peer_stats_intf.per = peer_stats->tx.last_per; + peer_stats_intf.ack_rssi = peer_stats->tx.last_ack_rssi; + dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, + (void *)&peer_stats_intf, 0, + WDI_NO_VAL, dp_pdev->pdev_id); + } + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef QCA_ENH_V3_STATS_SUPPORT +/** + * dp_vow_str_fw_to_hw_delay() - Return string for a delay + * @index: Index of delay + * + * Return: char const pointer + */ +static inline const char *dp_vow_str_fw_to_hw_delay(uint8_t index) +{ + if (index > CDP_DELAY_BUCKET_MAX) { + return "Invalid index"; + } + return fw_to_hw_delay_bucket[index]; +} + +/** + * dp_vow_str_sw_enq_delay() - Return string for a delay + * @index: Index of delay + * + * Return: char const pointer + */ +static inline const char *dp_vow_str_sw_enq_delay(uint8_t index) +{ + if (index > CDP_DELAY_BUCKET_MAX) { + return "Invalid index"; + } + return sw_enq_delay_bucket[index]; +} + +/** + * dp_vow_str_intfrm_delay() - Return string for a delay + * @index: Index of delay + * + * Return: char const pointer + */ +static inline const char *dp_vow_str_intfrm_delay(uint8_t index) +{ + if (index > CDP_DELAY_BUCKET_MAX) { + return "Invalid index"; + } + return intfrm_delay_bucket[index]; +} + +/** + * dp_accumulate_delay_stats() - Update delay stats members + * @total: Update stats total structure + * @per_ring: per ring structures from where stats need to be accumulated + * + * Return: void + */ +static void +dp_accumulate_delay_stats(struct cdp_delay_stats *total, + struct cdp_delay_stats *per_ring) +{ + uint8_t index; + + for (index = 0; index < CDP_DELAY_BUCKET_MAX; index++) + total->delay_bucket[index] += per_ring->delay_bucket[index]; + total->min_delay = QDF_MIN(total->min_delay, per_ring->min_delay); + total->max_delay = QDF_MAX(total->max_delay, per_ring->max_delay); + total->avg_delay = (total->avg_delay + per_ring->avg_delay) / 2; +} + +/** + * dp_accumulate_tid_stats() - Accumulate TID stats from each ring + * @pdev: pdev handle + * @tid: traffic ID + * @total_tx: fill this tx structure to get stats from all wbm rings + * @total_rx: fill this rx structure to get stats from all reo rings + * @type: delay stats or regular frame counters + * + * Return: void + */ +static void +dp_accumulate_tid_stats(struct dp_pdev *pdev, uint8_t tid, + struct cdp_tid_tx_stats *total_tx, + struct cdp_tid_rx_stats *total_rx, uint8_t type) +{ + uint8_t ring_id = 0, drop = 0, tqm_status_idx = 0, htt_status_idx = 0; + struct cdp_tid_stats *tid_stats = &pdev->stats.tid_stats; + struct cdp_tid_tx_stats *per_ring_tx = NULL; + struct cdp_tid_rx_stats *per_ring_rx = NULL; + + if (wlan_cfg_get_dp_soc_nss_cfg(pdev->soc->wlan_cfg_ctx)) { + qdf_mem_copy(total_tx, &tid_stats->tid_tx_stats[0][tid], + sizeof(struct cdp_tid_tx_stats)); + qdf_mem_copy(total_rx, &tid_stats->tid_rx_stats[0][tid], + sizeof(struct cdp_tid_rx_stats)); + return; + } else { + qdf_mem_zero(total_tx, sizeof(struct cdp_tid_tx_stats)); + qdf_mem_zero(total_rx, sizeof(struct cdp_tid_rx_stats)); + } + + switch (type) { + case TID_COUNTER_STATS: + { + for (ring_id = 0; ring_id < CDP_MAX_TX_COMP_RINGS; ring_id++) { + per_ring_tx = &tid_stats->tid_tx_stats[ring_id][tid]; + total_tx->success_cnt += per_ring_tx->success_cnt; + for (tqm_status_idx = 0; tqm_status_idx < CDP_MAX_TX_TQM_STATUS; tqm_status_idx++) { + total_tx->tqm_status_cnt[tqm_status_idx] += + per_ring_tx->tqm_status_cnt[tqm_status_idx]; + } + + for (htt_status_idx = 0; htt_status_idx < CDP_MAX_TX_HTT_STATUS; htt_status_idx++) { + total_tx->htt_status_cnt[htt_status_idx] += + per_ring_tx->htt_status_cnt[htt_status_idx]; + } + + for (drop = 0; drop < TX_MAX_DROP; drop++) + total_tx->swdrop_cnt[drop] += + per_ring_tx->swdrop_cnt[drop]; + } + for (ring_id = 0; ring_id < CDP_MAX_RX_RINGS; ring_id++) { + per_ring_rx = &tid_stats->tid_rx_stats[ring_id][tid]; + total_rx->delivered_to_stack += + per_ring_rx->delivered_to_stack; + total_rx->intrabss_cnt += per_ring_rx->intrabss_cnt; + total_rx->msdu_cnt += per_ring_rx->msdu_cnt; + total_rx->mcast_msdu_cnt += per_ring_rx->mcast_msdu_cnt; + total_rx->bcast_msdu_cnt += per_ring_rx->bcast_msdu_cnt; + for (drop = 0; drop < RX_MAX_DROP; drop++) + total_rx->fail_cnt[drop] += + per_ring_rx->fail_cnt[drop]; + } + break; + } + + case TID_DELAY_STATS: + { + for (ring_id = 0; ring_id < CDP_MAX_TX_COMP_RINGS; ring_id++) { + per_ring_tx = &tid_stats->tid_tx_stats[ring_id][tid]; + dp_accumulate_delay_stats(&total_tx->swq_delay, + &per_ring_tx->swq_delay); + dp_accumulate_delay_stats(&total_tx->hwtx_delay, + &per_ring_tx->hwtx_delay); + dp_accumulate_delay_stats(&total_tx->intfrm_delay, + &per_ring_tx->intfrm_delay); + } + for (ring_id = 0; ring_id < CDP_MAX_RX_RINGS; ring_id++) { + per_ring_rx = &tid_stats->tid_rx_stats[ring_id][tid]; + dp_accumulate_delay_stats(&total_rx->intfrm_delay, + &per_ring_rx->intfrm_delay); + dp_accumulate_delay_stats(&total_rx->to_stack_delay, + &per_ring_rx->to_stack_delay); + } + break; + } + + default: + qdf_err("Invalid stats type"); + break; + } +} + +void dp_pdev_print_tid_stats(struct dp_pdev *pdev) +{ + struct cdp_tid_tx_stats total_tx; + struct cdp_tid_rx_stats total_rx; + uint8_t tid, tqm_status_idx, htt_status_idx; + + DP_PRINT_STATS("Packets received in hardstart: %llu ", + pdev->stats.tid_stats.ingress_stack); + DP_PRINT_STATS("Packets dropped in osif layer: %llu ", + pdev->stats.tid_stats.osif_drop); + DP_PRINT_STATS("Per TID Video Stats:\n"); + + for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) { + dp_accumulate_tid_stats(pdev, tid, &total_tx, &total_rx, + TID_COUNTER_STATS); + DP_PRINT_STATS("----TID: %d----", tid); + DP_PRINT_STATS("Tx TQM Success Count: %llu", + total_tx.tqm_status_cnt[HAL_TX_TQM_RR_FRAME_ACKED]); + DP_PRINT_STATS("Tx HTT Success Count: %llu", + total_tx.htt_status_cnt[HTT_TX_FW2WBM_TX_STATUS_OK]); + for (tqm_status_idx = 1; tqm_status_idx < CDP_MAX_TX_TQM_STATUS; tqm_status_idx++) { + if (total_tx.tqm_status_cnt[tqm_status_idx]) { + DP_PRINT_STATS("Tx TQM Drop Count[%d]: %llu", + tqm_status_idx, total_tx.tqm_status_cnt[tqm_status_idx]); + } + } + + for (htt_status_idx = 1; htt_status_idx < CDP_MAX_TX_HTT_STATUS; htt_status_idx++) { + if (total_tx.htt_status_cnt[htt_status_idx]) { + DP_PRINT_STATS("Tx HTT Drop Count[%d]: %llu", + htt_status_idx, total_tx.htt_status_cnt[htt_status_idx]); + } + } + + DP_PRINT_STATS("Tx Hardware Drop Count: %llu", + total_tx.swdrop_cnt[TX_HW_ENQUEUE]); + DP_PRINT_STATS("Tx Software Drop Count: %llu", + total_tx.swdrop_cnt[TX_SW_ENQUEUE]); + DP_PRINT_STATS("Tx Descriptor Error Count: %llu", + total_tx.swdrop_cnt[TX_DESC_ERR]); + DP_PRINT_STATS("Tx HAL Ring Error Count: %llu", + total_tx.swdrop_cnt[TX_HAL_RING_ACCESS_ERR]); + DP_PRINT_STATS("Tx Dma Map Error Count: %llu", + total_tx.swdrop_cnt[TX_DMA_MAP_ERR]); + DP_PRINT_STATS("Rx Delievered Count: %llu", + total_rx.delivered_to_stack); + DP_PRINT_STATS("Rx Software Enqueue Drop Count: %llu", + total_rx.fail_cnt[ENQUEUE_DROP]); + DP_PRINT_STATS("Rx Intrabss Drop Count: %llu", + total_rx.fail_cnt[INTRABSS_DROP]); + DP_PRINT_STATS("Rx Msdu Done Failure Count: %llu", + total_rx.fail_cnt[MSDU_DONE_FAILURE]); + DP_PRINT_STATS("Rx Invalid Peer Count: %llu", + total_rx.fail_cnt[INVALID_PEER_VDEV]); + DP_PRINT_STATS("Rx Policy Check Drop Count: %llu", + total_rx.fail_cnt[POLICY_CHECK_DROP]); + DP_PRINT_STATS("Rx Mec Drop Count: %llu", + total_rx.fail_cnt[MEC_DROP]); + DP_PRINT_STATS("Rx Nawds Mcast Drop Count: %llu", + total_rx.fail_cnt[NAWDS_MCAST_DROP]); + DP_PRINT_STATS("Rx Mesh Filter Drop Count: %llu", + total_rx.fail_cnt[MESH_FILTER_DROP]); + DP_PRINT_STATS("Rx Intra Bss Deliver Count: %llu", + total_rx.intrabss_cnt); + DP_PRINT_STATS("Rx MSDU Count: %llu", total_rx.msdu_cnt); + DP_PRINT_STATS("Rx Multicast MSDU Count: %llu", + total_rx.mcast_msdu_cnt); + DP_PRINT_STATS("Rx Broadcast MSDU Count: %llu\n", + total_rx.bcast_msdu_cnt); + } +} + +void dp_pdev_print_delay_stats(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + struct cdp_tid_tx_stats total_tx; + struct cdp_tid_rx_stats total_rx; + struct cdp_tid_stats *tid_stats; + + uint8_t tid, index; + uint64_t count = 0; + + if (!soc) + return; + + tid = 0; + index = 0; + tid_stats = &pdev->stats.tid_stats; + + DP_PRINT_STATS("Per TID Delay Non-Zero Stats:\n"); + for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) { + dp_accumulate_tid_stats(pdev, tid, &total_tx, &total_rx, + TID_DELAY_STATS); + DP_PRINT_STATS("----TID: %d----", tid); + + DP_PRINT_STATS("Software Enqueue Delay:"); + for (index = 0; index < CDP_DELAY_BUCKET_MAX; index++) { + count = total_tx.swq_delay.delay_bucket[index]; + if (count) { + DP_PRINT_STATS("%s: Packets = %llu", + dp_vow_str_sw_enq_delay(index), + count); + } + } + + DP_PRINT_STATS("Min = %u", total_tx.swq_delay.min_delay); + DP_PRINT_STATS("Max = %u", total_tx.swq_delay.max_delay); + DP_PRINT_STATS("Avg = %u\n", total_tx.swq_delay.avg_delay); + + DP_PRINT_STATS("Hardware Transmission Delay:"); + for (index = 0; index < CDP_DELAY_BUCKET_MAX; index++) { + count = total_tx.hwtx_delay.delay_bucket[index]; + if (count) { + DP_PRINT_STATS("%s: Packets = %llu", + dp_vow_str_fw_to_hw_delay(index), + count); + } + } + DP_PRINT_STATS("Min = %u", total_tx.hwtx_delay.min_delay); + DP_PRINT_STATS("Max = %u", total_tx.hwtx_delay.max_delay); + DP_PRINT_STATS("Avg = %u\n", total_tx.hwtx_delay.avg_delay); + + DP_PRINT_STATS("Tx Interframe Delay:"); + for (index = 0; index < CDP_DELAY_BUCKET_MAX; index++) { + count = total_tx.intfrm_delay.delay_bucket[index]; + if (count) { + DP_PRINT_STATS("%s: Packets = %llu", + dp_vow_str_intfrm_delay(index), + count); + } + } + DP_PRINT_STATS("Min = %u", total_tx.intfrm_delay.min_delay); + DP_PRINT_STATS("Max = %u", total_tx.intfrm_delay.max_delay); + DP_PRINT_STATS("Avg = %u\n", total_tx.intfrm_delay.avg_delay); + + DP_PRINT_STATS("Rx Interframe Delay:"); + for (index = 0; index < CDP_DELAY_BUCKET_MAX; index++) { + count = total_rx.intfrm_delay.delay_bucket[index]; + if (count) { + DP_PRINT_STATS("%s: Packets = %llu", + dp_vow_str_intfrm_delay(index), + count); + } + } + DP_PRINT_STATS("Min = %u", total_rx.intfrm_delay.min_delay); + DP_PRINT_STATS("Max = %u", total_rx.intfrm_delay.max_delay); + DP_PRINT_STATS("Avg = %u\n", total_rx.intfrm_delay.avg_delay); + + DP_PRINT_STATS("Rx Reap to Stack Delay:"); + for (index = 0; index < CDP_DELAY_BUCKET_MAX; index++) { + count = total_rx.to_stack_delay.delay_bucket[index]; + if (count) { + DP_PRINT_STATS("%s: Packets = %llu", + dp_vow_str_intfrm_delay(index), + count); + } + } + + DP_PRINT_STATS("Min = %u", total_rx.to_stack_delay.min_delay); + DP_PRINT_STATS("Max = %u", total_rx.to_stack_delay.max_delay); + DP_PRINT_STATS("Avg = %u\n", total_rx.to_stack_delay.avg_delay); + } +} +#endif + +void dp_print_soc_cfg_params(struct dp_soc *soc) +{ + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + uint8_t index = 0, i = 0; + char ring_mask[DP_MAX_INT_CONTEXTS_STRING_LENGTH]; + int num_of_int_contexts; + + if (!soc) { + dp_err("Context is null"); + return; + } + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + if (!soc_cfg_ctx) { + dp_err("Context is null"); + return; + } + + num_of_int_contexts = + wlan_cfg_get_num_contexts(soc_cfg_ctx); + + DP_PRINT_STATS("No. of interrupt contexts: %u", + soc_cfg_ctx->num_int_ctxts); + DP_PRINT_STATS("Max clients: %u", + soc_cfg_ctx->max_clients); + DP_PRINT_STATS("Max alloc size: %u ", + soc_cfg_ctx->max_alloc_size); + DP_PRINT_STATS("Per pdev tx ring: %u ", + soc_cfg_ctx->per_pdev_tx_ring); + DP_PRINT_STATS("Num tcl data rings: %u ", + soc_cfg_ctx->num_tcl_data_rings); + DP_PRINT_STATS("Per pdev rx ring: %u ", + soc_cfg_ctx->per_pdev_rx_ring); + DP_PRINT_STATS("Per pdev lmac ring: %u ", + soc_cfg_ctx->per_pdev_lmac_ring); + DP_PRINT_STATS("Num of reo dest rings: %u ", + soc_cfg_ctx->num_reo_dest_rings); + DP_PRINT_STATS("Num tx desc pool: %u ", + soc_cfg_ctx->num_tx_desc_pool); + DP_PRINT_STATS("Num tx ext desc pool: %u ", + soc_cfg_ctx->num_tx_ext_desc_pool); + DP_PRINT_STATS("Num tx desc: %u ", + soc_cfg_ctx->num_tx_desc); + DP_PRINT_STATS("Num tx ext desc: %u ", + soc_cfg_ctx->num_tx_ext_desc); + DP_PRINT_STATS("Htt packet type: %u ", + soc_cfg_ctx->htt_packet_type); + DP_PRINT_STATS("Max peer_ids: %u ", + soc_cfg_ctx->max_peer_id); + DP_PRINT_STATS("Tx ring size: %u ", + soc_cfg_ctx->tx_ring_size); + DP_PRINT_STATS("Tx comp ring size: %u ", + soc_cfg_ctx->tx_comp_ring_size); + DP_PRINT_STATS("Tx comp ring size nss: %u ", + soc_cfg_ctx->tx_comp_ring_size_nss); + DP_PRINT_STATS("Int batch threshold tx: %u ", + soc_cfg_ctx->int_batch_threshold_tx); + DP_PRINT_STATS("Int timer threshold tx: %u ", + soc_cfg_ctx->int_timer_threshold_tx); + DP_PRINT_STATS("Int batch threshold rx: %u ", + soc_cfg_ctx->int_batch_threshold_rx); + DP_PRINT_STATS("Int timer threshold rx: %u ", + soc_cfg_ctx->int_timer_threshold_rx); + DP_PRINT_STATS("Int batch threshold other: %u ", + soc_cfg_ctx->int_batch_threshold_other); + DP_PRINT_STATS("Int timer threshold other: %u ", + soc_cfg_ctx->int_timer_threshold_other); + + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_tx_ring_mask[i]); + } + + DP_PRINT_STATS("Tx ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_rx_ring_mask[i]); + } + + DP_PRINT_STATS("Rx ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_rx_mon_ring_mask[i]); + } + + DP_PRINT_STATS("Rx mon ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_rx_err_ring_mask[i]); + } + + DP_PRINT_STATS("Rx err ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_rx_wbm_rel_ring_mask[i]); + } + + DP_PRINT_STATS("Rx wbm rel ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_reo_status_ring_mask[i]); + } + + DP_PRINT_STATS("Reo ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_rxdma2host_ring_mask[i]); + } + + DP_PRINT_STATS("Rxdma2host ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + index = 0; + for (i = 0; i < num_of_int_contexts; i++) { + index += qdf_snprint(&ring_mask[index], + DP_MAX_INT_CONTEXTS_STRING_LENGTH - index, + " %d", + soc_cfg_ctx->int_host2rxdma_ring_mask[i]); + } + + DP_PRINT_STATS("Host2rxdma ring mask (0-%d):%s", + num_of_int_contexts, ring_mask); + + DP_PRINT_STATS("Rx hash: %u ", + soc_cfg_ctx->rx_hash); + DP_PRINT_STATS("Tso enabled: %u ", + soc_cfg_ctx->tso_enabled); + DP_PRINT_STATS("Lro enabled: %u ", + soc_cfg_ctx->lro_enabled); + DP_PRINT_STATS("Sg enabled: %u ", + soc_cfg_ctx->sg_enabled); + DP_PRINT_STATS("Gro enabled: %u ", + soc_cfg_ctx->gro_enabled); + DP_PRINT_STATS("TC based dynamic GRO: %u ", + soc_cfg_ctx->tc_based_dynamic_gro); + DP_PRINT_STATS("TC ingress prio: %u ", + soc_cfg_ctx->tc_ingress_prio); + DP_PRINT_STATS("rawmode enabled: %u ", + soc_cfg_ctx->rawmode_enabled); + DP_PRINT_STATS("peer flow ctrl enabled: %u ", + soc_cfg_ctx->peer_flow_ctrl_enabled); + DP_PRINT_STATS("napi enabled: %u ", + soc_cfg_ctx->napi_enabled); + DP_PRINT_STATS("P2P Tcp Udp checksum offload: %u ", + soc_cfg_ctx->p2p_tcp_udp_checksumoffload); + DP_PRINT_STATS("NAN Tcp Udp checksum offload: %u ", + soc_cfg_ctx->nan_tcp_udp_checksumoffload); + DP_PRINT_STATS("Tcp Udp checksum offload: %u ", + soc_cfg_ctx->tcp_udp_checksumoffload); + DP_PRINT_STATS("Defrag timeout check: %u ", + soc_cfg_ctx->defrag_timeout_check); + DP_PRINT_STATS("Rx defrag min timeout: %u ", + soc_cfg_ctx->rx_defrag_min_timeout); + DP_PRINT_STATS("WBM release ring: %u ", + soc_cfg_ctx->wbm_release_ring); + DP_PRINT_STATS("TCL CMD ring: %u ", + soc_cfg_ctx->tcl_cmd_ring); + DP_PRINT_STATS("TCL Status ring: %u ", + soc_cfg_ctx->tcl_status_ring); + DP_PRINT_STATS("REO Reinject ring: %u ", + soc_cfg_ctx->reo_reinject_ring); + DP_PRINT_STATS("RX release ring: %u ", + soc_cfg_ctx->rx_release_ring); + DP_PRINT_STATS("REO Exception ring: %u ", + soc_cfg_ctx->reo_exception_ring); + DP_PRINT_STATS("REO CMD ring: %u ", + soc_cfg_ctx->reo_cmd_ring); + DP_PRINT_STATS("REO STATUS ring: %u ", + soc_cfg_ctx->reo_status_ring); + DP_PRINT_STATS("RXDMA refill ring: %u ", + soc_cfg_ctx->rxdma_refill_ring); + DP_PRINT_STATS("TX_desc limit_0: %u ", + soc_cfg_ctx->tx_desc_limit_0); + DP_PRINT_STATS("TX_desc limit_1: %u ", + soc_cfg_ctx->tx_desc_limit_1); + DP_PRINT_STATS("TX_desc limit_2: %u ", + soc_cfg_ctx->tx_desc_limit_2); + DP_PRINT_STATS("TX device limit: %u ", + soc_cfg_ctx->tx_device_limit); + DP_PRINT_STATS("TX sw internode queue: %u ", + soc_cfg_ctx->tx_sw_internode_queue); + DP_PRINT_STATS("RXDMA err dst ring: %u ", + soc_cfg_ctx->rxdma_err_dst_ring); + DP_PRINT_STATS("RX Flow Tag Enabled: %u ", + soc_cfg_ctx->is_rx_flow_tag_enabled); + DP_PRINT_STATS("RX Flow Search Table Size (# of entries): %u ", + soc_cfg_ctx->rx_flow_search_table_size); + DP_PRINT_STATS("RX Flow Search Table Per PDev : %u ", + soc_cfg_ctx->is_rx_flow_search_table_per_pdev); +} + +void +dp_print_pdev_cfg_params(struct dp_pdev *pdev) +{ + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + + if (!pdev) { + dp_err("Context is null"); + return; + } + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + + if (!pdev_cfg_ctx) { + dp_err("Context is null"); + return; + } + + DP_PRINT_STATS("Rx dma buf ring size: %d ", + pdev_cfg_ctx->rx_dma_buf_ring_size); + DP_PRINT_STATS("DMA Mon buf ring size: %d ", + pdev_cfg_ctx->dma_mon_buf_ring_size); + DP_PRINT_STATS("DMA Mon dest ring size: %d ", + pdev_cfg_ctx->dma_mon_dest_ring_size); + DP_PRINT_STATS("DMA Mon status ring size: %d ", + pdev_cfg_ctx->dma_mon_status_ring_size); + DP_PRINT_STATS("Rxdma monitor desc ring: %d", + pdev_cfg_ctx->rxdma_monitor_desc_ring); + DP_PRINT_STATS("Num mac rings: %d ", + pdev_cfg_ctx->num_mac_rings); +} + +/** + * dp_print_ring_stat_from_hal(): Print hal level ring stats + * @soc: DP_SOC handle + * @srng: DP_SRNG handle + * @ring_name: SRNG name + * @ring_type: srng src/dst ring + * + * Return: void + */ +static void +dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, + enum hal_ring_type ring_type) +{ + uint32_t tailp; + uint32_t headp; + int32_t hw_headp = -1; + int32_t hw_tailp = -1; + const char *ring_name; + struct hal_soc *hal_soc; + + if (soc && srng && srng->hal_srng) { + hal_soc = (struct hal_soc *)soc->hal_soc; + ring_name = dp_srng_get_str_from_hal_ring_type(ring_type); + + hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &tailp, &headp); + + DP_PRINT_STATS("%s:SW:Head pointer = %d Tail Pointer = %d\n", + ring_name, headp, tailp); + + hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_headp, + &hw_tailp, ring_type); + + DP_PRINT_STATS("%s:HW:Head pointer = %d Tail Pointer = %d\n", + ring_name, hw_headp, hw_tailp); + } +} + +#ifdef FEATURE_TSO_STATS +/** + * dp_print_tso_seg_stats - tso segment stats + * @pdev: pdev handle + * @id: tso packet id + * + * Return: None + */ +static void dp_print_tso_seg_stats(struct dp_pdev *pdev, uint32_t id) +{ + uint8_t num_seg; + uint32_t segid; + + /* TSO LEVEL 2 - SEGMENT INFO */ + num_seg = pdev->stats.tso_stats.tso_info.tso_packet_info[id].num_seg; + for (segid = 0; segid < CDP_MAX_TSO_SEGMENTS && segid < num_seg; segid++) { + DP_PRINT_STATS( + "Segment id:[%u] fragments: %u | Segment Length %u | TCP Seq no.: %u | ip_id: %u", + segid, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].num_frags, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].total_len, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.tcp_seq_num, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.ip_id); + DP_PRINT_STATS( + "fin: %u syn: %u rst: %u psh: %u ack: %u urg: %u ece: %u cwr: %u ns: %u", + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.fin, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.syn, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.rst, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.psh, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.ack, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.urg, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.ece, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.cwr, + pdev->stats.tso_stats.tso_info.tso_packet_info[id] + .tso_seg[segid].tso_flags.ns); + } +} +#else +static inline +void dp_print_tso_seg_stats(struct dp_pdev *pdev, uint32_t id) +{ +} +#endif /* FEATURE_TSO_STATS */ + +/** + * dp_print_mon_ring_stats_from_hal() - Print stat for monitor rings based + * on target + * @pdev: physical device handle + * @mac_id: mac id + * + * Return: void + */ +static inline +void dp_print_mon_ring_stat_from_hal(struct dp_pdev *pdev, uint8_t mac_id) +{ + if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) { + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rxdma_mon_buf_ring[mac_id], + RXDMA_MONITOR_BUF); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rxdma_mon_dst_ring[mac_id], + RXDMA_MONITOR_DST); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rxdma_mon_desc_ring[mac_id], + RXDMA_MONITOR_DESC); + } + + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rxdma_mon_status_ring[mac_id], + RXDMA_MONITOR_STATUS); +} + +void +dp_print_ring_stats(struct dp_pdev *pdev) +{ + uint32_t i; + int mac_id; + int lmac_id; + + if (hif_pm_runtime_get_sync(pdev->soc->hif_handle, + RTPM_ID_DP_PRINT_RING_STATS)) + return; + + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_exception_ring, + REO_EXCEPTION); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_reinject_ring, + REO_REINJECT); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_cmd_ring, + REO_CMD); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_status_ring, + REO_STATUS); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rx_rel_ring, + WBM2SW_RELEASE); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tcl_cmd_ring, + TCL_CMD); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tcl_status_ring, + TCL_STATUS); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->wbm_desc_rel_ring, + SW2WBM_RELEASE); + for (i = 0; i < MAX_REO_DEST_RINGS; i++) + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_dest_ring[i], + REO_DST); + + for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tcl_data_ring[i], + TCL_DATA); + for (i = 0; i < MAX_TCL_DATA_RINGS; i++) + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tx_comp_ring[i], + WBM2SW_RELEASE); + + lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rx_refill_buf_ring[lmac_id], + RXDMA_BUF); + + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rx_refill_buf_ring2, + RXDMA_BUF); + + for (i = 0; i < MAX_RX_MAC_RINGS; i++) + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rx_mac_buf_ring[i], + RXDMA_BUF); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, + mac_id, pdev->pdev_id); + + dp_print_mon_ring_stat_from_hal(pdev, lmac_id); + } + + for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { + lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, + i, pdev->pdev_id); + + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rxdma_err_dst_ring + [lmac_id], + RXDMA_DST); + } + hif_pm_runtime_put(pdev->soc->hif_handle, + RTPM_ID_DP_PRINT_RING_STATS); +} + +/** + * dp_print_common_rates_info(): Print common rate for tx or rx + * @pkt_type_array: rate type array contains rate info + * + * Return:void + */ +static inline void +dp_print_common_rates_info(struct cdp_pkt_type *pkt_type_array) +{ + uint8_t mcs, pkt_type; + + DP_PRINT_STATS("MSDU Count"); + for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) { + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_rate_string[pkt_type][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_rate_string[pkt_type][mcs].mcs_type, + pkt_type_array[pkt_type].mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); + } +} + +/** + * dp_print_common_ppdu_rates_info(): Print common rate for tx or rx + * @pkt_type_array: rate type array contains rate info + * + * Return:void + */ +static inline void +dp_print_common_ppdu_rates_info(struct cdp_pkt_type *pkt_type_array) +{ + uint8_t mcs; + + DP_PRINT_STATS("PPDU Count"); + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_ppdu_rate_string[0][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_ppdu_rate_string[0][mcs].mcs_type, + pkt_type_array->mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); +} + +/** + * dp_print_mu_ppdu_rates_info(): Print mu rate for tx or rx + * @rx_mu: rx MU stats array + * + * Return:void + */ +static inline void +dp_print_mu_ppdu_rates_info(struct cdp_rx_mu *rx_mu) +{ + uint8_t mcs, pkt_type; + + DP_PRINT_STATS("PPDU Count"); + for (pkt_type = 0; pkt_type < RX_TYPE_MU_MAX; pkt_type++) { + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_mu_rate_string[pkt_type][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_mu_rate_string[pkt_type][mcs].mcs_type, + rx_mu[pkt_type].ppdu.mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); + } +} + +void dp_print_rx_rates(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + uint8_t i; + uint8_t index = 0; + char nss[DP_NSS_LENGTH]; + + DP_PRINT_STATS("Rx Rate Info:\n"); + dp_print_common_rates_info(pdev->stats.rx.pkt_type); + + index = 0; + for (i = 0; i < SS_COUNT; i++) { + index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index, + " %d", pdev->stats.rx.nss[i]); + } + DP_PRINT_STATS("NSS(1-8) = %s", + nss); + + DP_PRINT_STATS("SGI = 0.8us %d 0.4us %d 1.6us %d 3.2us %d", + pdev->stats.rx.sgi_count[0], + pdev->stats.rx.sgi_count[1], + pdev->stats.rx.sgi_count[2], + pdev->stats.rx.sgi_count[3]); + DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d", + pdev->stats.rx.bw[0], pdev->stats.rx.bw[1], + pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]); + DP_PRINT_STATS("Reception Type =" + "SU: %d MU_MIMO:%d MU_OFDMA:%d MU_OFDMA_MIMO:%d", + pdev->stats.rx.reception_type[0], + pdev->stats.rx.reception_type[1], + pdev->stats.rx.reception_type[2], + pdev->stats.rx.reception_type[3]); + DP_PRINT_STATS("Aggregation:\n"); + DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d", + pdev->stats.rx.ampdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d", + pdev->stats.rx.non_ampdu_cnt); + DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d", + pdev->stats.rx.amsdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d", + pdev->stats.rx.non_amsdu_cnt); +} + +void dp_print_tx_rates(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + + DP_PRINT_STATS("Tx Rate Info:\n"); + dp_print_common_rates_info(pdev->stats.tx.pkt_type); + + DP_PRINT_STATS("SGI = 0.8us %d 0.4us %d 1.6us %d 3.2us %d", + pdev->stats.tx.sgi_count[0], + pdev->stats.tx.sgi_count[1], + pdev->stats.tx.sgi_count[2], + pdev->stats.tx.sgi_count[3]); + + DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d", + pdev->stats.tx.bw[0], pdev->stats.tx.bw[1], + pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]); + + DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma); + DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc); + DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc); + DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries); + DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi); + + DP_PRINT_STATS("Aggregation:\n"); + DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d", + pdev->stats.tx.ampdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d", + pdev->stats.tx.non_ampdu_cnt); + DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d", + pdev->stats.tx.amsdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d", + pdev->stats.tx.non_amsdu_cnt); +} + +/** + * dp_print_nss(): Print nss count + * @nss: printable nss count array + * @pnss: nss count array + * @ss_count: number of nss + * + * Return:void + */ +static void dp_print_nss(char *nss, uint32_t *pnss, uint32_t ss_count) +{ + uint32_t index; + uint8_t i; + + index = 0; + for (i = 0; i < ss_count; i++) { + index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index, + " %d", *(pnss + i)); + } +} + +void dp_print_peer_stats(struct dp_peer *peer) +{ + uint8_t i; + uint32_t index; + uint32_t j; + char nss[DP_NSS_LENGTH]; + char mu_group_id[DP_MU_GROUP_LENGTH]; + struct dp_pdev *pdev; + uint32_t *pnss; + enum cdp_mu_packet_type rx_mu_type; + struct cdp_rx_mu *rx_mu; + + pdev = peer->vdev->pdev; + + DP_PRINT_STATS("Node Tx Stats:\n"); + DP_PRINT_STATS("Total Packet Completions = %d", + peer->stats.tx.comp_pkt.num); + DP_PRINT_STATS("Total Bytes Completions = %llu", + peer->stats.tx.comp_pkt.bytes); + DP_PRINT_STATS("Success Packets = %d", + peer->stats.tx.tx_success.num); + DP_PRINT_STATS("Success Bytes = %llu", + peer->stats.tx.tx_success.bytes); + DP_PRINT_STATS("Unicast Success Packets = %d", + peer->stats.tx.ucast.num); + DP_PRINT_STATS("Unicast Success Bytes = %llu", + peer->stats.tx.ucast.bytes); + DP_PRINT_STATS("Multicast Success Packets = %d", + peer->stats.tx.mcast.num); + DP_PRINT_STATS("Multicast Success Bytes = %llu", + peer->stats.tx.mcast.bytes); + DP_PRINT_STATS("Broadcast Success Packets = %d", + peer->stats.tx.bcast.num); + DP_PRINT_STATS("Broadcast Success Bytes = %llu", + peer->stats.tx.bcast.bytes); + DP_PRINT_STATS("Packets Failed = %d", + peer->stats.tx.tx_failed); + DP_PRINT_STATS("Packets In OFDMA = %d", + peer->stats.tx.ofdma); + DP_PRINT_STATS("Packets In STBC = %d", + peer->stats.tx.stbc); + DP_PRINT_STATS("Packets In LDPC = %d", + peer->stats.tx.ldpc); + DP_PRINT_STATS("Packet Retries = %d", + peer->stats.tx.retries); + DP_PRINT_STATS("MSDU's Part of AMSDU = %d", + peer->stats.tx.amsdu_cnt); + DP_PRINT_STATS("Msdu's As Part of Ampdu = %d", + peer->stats.tx.non_ampdu_cnt); + DP_PRINT_STATS("Msdu's As Ampdu = %d", + peer->stats.tx.ampdu_cnt); + DP_PRINT_STATS("Last Packet RSSI = %d", + peer->stats.tx.last_ack_rssi); + DP_PRINT_STATS("Dropped At FW: Removed Pkts = %u", + peer->stats.tx.dropped.fw_rem.num); + if (pdev && !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { + DP_PRINT_STATS("Dropped At FW: Removed bytes = %llu", + peer->stats.tx.dropped.fw_rem.bytes); + } + DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d", + peer->stats.tx.dropped.fw_rem_tx); + DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d", + peer->stats.tx.dropped.fw_rem_notx); + DP_PRINT_STATS("Dropped : Age Out = %d", + peer->stats.tx.dropped.age_out); + DP_PRINT_STATS("NAWDS : "); + DP_PRINT_STATS("Nawds multicast Drop Tx Packet = %d", + peer->stats.tx.nawds_mcast_drop); + DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d", + peer->stats.tx.nawds_mcast.num); + DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu", + peer->stats.tx.nawds_mcast.bytes); + + DP_PRINT_STATS("Rate Info:"); + dp_print_common_rates_info(peer->stats.tx.pkt_type); + + DP_PRINT_STATS("SGI = 0.8us %d 0.4us %d 1.6us %d 3.2us %d", + peer->stats.tx.sgi_count[0], + peer->stats.tx.sgi_count[1], + peer->stats.tx.sgi_count[2], + peer->stats.tx.sgi_count[3]); + DP_PRINT_STATS("Excess Retries per AC "); + DP_PRINT_STATS(" Best effort = %d", + peer->stats.tx.excess_retries_per_ac[0]); + DP_PRINT_STATS(" Background= %d", + peer->stats.tx.excess_retries_per_ac[1]); + DP_PRINT_STATS(" Video = %d", + peer->stats.tx.excess_retries_per_ac[2]); + DP_PRINT_STATS(" Voice = %d", + peer->stats.tx.excess_retries_per_ac[3]); + DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n", + peer->stats.tx.bw[0], peer->stats.tx.bw[1], + peer->stats.tx.bw[2], peer->stats.tx.bw[3]); + + pnss = &peer->stats.tx.nss[0]; + dp_print_nss(nss, pnss, SS_COUNT); + + DP_PRINT_STATS("NSS(1-8) = %s", nss); + + DP_PRINT_STATS("Transmit Type :"); + DP_PRINT_STATS("SU %d, MU_MIMO %d, MU_OFDMA %d, MU_MIMO_OFDMA %d", + peer->stats.tx.transmit_type[SU].num_msdu, + peer->stats.tx.transmit_type[MU_MIMO].num_msdu, + peer->stats.tx.transmit_type[MU_OFDMA].num_msdu, + peer->stats.tx.transmit_type[MU_MIMO_OFDMA].num_msdu); + + for (i = 0; i < MAX_MU_GROUP_ID;) { + index = 0; + for (j = 0; j < DP_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID; + j++) { + index += qdf_snprint(&mu_group_id[index], + DP_MU_GROUP_LENGTH - index, + " %d", + peer->stats.tx.mu_group_id[i]); + i++; + } + + DP_PRINT_STATS("User position list for GID %02d->%d: [%s]", + i - DP_MU_GROUP_SHOW, i - 1, mu_group_id); + } + + DP_PRINT_STATS("Last Packet RU index [%d], Size [%d]", + peer->stats.tx.ru_start, peer->stats.tx.ru_tones); + DP_PRINT_STATS("RU Locations RU[26 52 106 242 484 996]:"); + DP_PRINT_STATS("RU_26: %d", + peer->stats.tx.ru_loc[RU_26_INDEX].num_msdu); + DP_PRINT_STATS("RU 52: %d", + peer->stats.tx.ru_loc[RU_52_INDEX].num_msdu); + DP_PRINT_STATS("RU 106: %d", + peer->stats.tx.ru_loc[RU_106_INDEX].num_msdu); + DP_PRINT_STATS("RU 242: %d", + peer->stats.tx.ru_loc[RU_242_INDEX].num_msdu); + DP_PRINT_STATS("RU 484: %d", + peer->stats.tx.ru_loc[RU_484_INDEX].num_msdu); + DP_PRINT_STATS("RU 996: %d", + peer->stats.tx.ru_loc[RU_996_INDEX].num_msdu); + + DP_PRINT_STATS("Aggregation:"); + DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d", + peer->stats.tx.amsdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d", + peer->stats.tx.non_amsdu_cnt); + + DP_PRINT_STATS("Bytes and Packets transmitted in last one sec:"); + DP_PRINT_STATS(" Bytes transmitted in last sec: %d", + peer->stats.tx.tx_byte_rate); + DP_PRINT_STATS(" Data transmitted in last sec: %d", + peer->stats.tx.tx_data_rate); + + DP_PRINT_STATS("Node Rx Stats:"); + DP_PRINT_STATS("Packets Sent To Stack = %d", + peer->stats.rx.to_stack.num); + DP_PRINT_STATS("Bytes Sent To Stack = %llu", + peer->stats.rx.to_stack.bytes); + for (i = 0; i < CDP_MAX_RX_RINGS; i++) { + DP_PRINT_STATS("Ring Id = %d", i); + DP_PRINT_STATS(" Packets Received = %d", + peer->stats.rx.rcvd_reo[i].num); + DP_PRINT_STATS(" Bytes Received = %llu", + peer->stats.rx.rcvd_reo[i].bytes); + } + DP_PRINT_STATS("Multicast Packets Received = %d", + peer->stats.rx.multicast.num); + DP_PRINT_STATS("Multicast Bytes Received = %llu", + peer->stats.rx.multicast.bytes); + DP_PRINT_STATS("Broadcast Packets Received = %d", + peer->stats.rx.bcast.num); + DP_PRINT_STATS("Broadcast Bytes Received = %llu", + peer->stats.rx.bcast.bytes); + DP_PRINT_STATS("Intra BSS Packets Received = %d", + peer->stats.rx.intra_bss.pkts.num); + DP_PRINT_STATS("Intra BSS Bytes Received = %llu", + peer->stats.rx.intra_bss.pkts.bytes); + DP_PRINT_STATS("Raw Packets Received = %d", + peer->stats.rx.raw.num); + DP_PRINT_STATS("Raw Bytes Received = %llu", + peer->stats.rx.raw.bytes); + DP_PRINT_STATS("Errors: MIC Errors = %d", + peer->stats.rx.err.mic_err); + DP_PRINT_STATS("Erros: Decryption Errors = %d", + peer->stats.rx.err.decrypt_err); + DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d", + peer->stats.rx.non_ampdu_cnt); + DP_PRINT_STATS("Msdu's Recived As Ampdu = %d", + peer->stats.rx.ampdu_cnt); + DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d", + peer->stats.rx.non_amsdu_cnt); + DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d", + peer->stats.rx.amsdu_cnt); + DP_PRINT_STATS("NAWDS : "); + DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d", + peer->stats.rx.nawds_mcast_drop); + DP_PRINT_STATS("SGI = 0.8us %d 0.4us %d 1.6us %d 3.2us %d", + peer->stats.rx.sgi_count[0], + peer->stats.rx.sgi_count[1], + peer->stats.rx.sgi_count[2], + peer->stats.rx.sgi_count[3]); + DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d", + peer->stats.rx.bw[0], peer->stats.rx.bw[1], + peer->stats.rx.bw[2], peer->stats.rx.bw[3]); + DP_PRINT_STATS("MSDU Reception Type"); + DP_PRINT_STATS("SU %d MU_MIMO %d MU_OFDMA %d MU_OFDMA_MIMO %d", + peer->stats.rx.reception_type[0], + peer->stats.rx.reception_type[1], + peer->stats.rx.reception_type[2], + peer->stats.rx.reception_type[3]); + DP_PRINT_STATS("PPDU Reception Type"); + DP_PRINT_STATS("SU %d MU_MIMO %d MU_OFDMA %d MU_OFDMA_MIMO %d", + peer->stats.rx.ppdu_cnt[0], + peer->stats.rx.ppdu_cnt[1], + peer->stats.rx.ppdu_cnt[2], + peer->stats.rx.ppdu_cnt[3]); + + dp_print_common_rates_info(peer->stats.rx.pkt_type); + dp_print_common_ppdu_rates_info(&peer->stats.rx.su_ax_ppdu_cnt); + dp_print_mu_ppdu_rates_info(&peer->stats.rx.rx_mu[0]); + + pnss = &peer->stats.rx.nss[0]; + dp_print_nss(nss, pnss, SS_COUNT); + DP_PRINT_STATS("MSDU Count"); + DP_PRINT_STATS(" NSS(1-8) = %s", nss); + + DP_PRINT_STATS("reception mode SU"); + pnss = &peer->stats.rx.ppdu_nss[0]; + dp_print_nss(nss, pnss, SS_COUNT); + + DP_PRINT_STATS(" PPDU Count"); + DP_PRINT_STATS(" NSS(1-8) = %s", nss); + + DP_PRINT_STATS(" MPDU OK = %d, MPDU Fail = %d", + peer->stats.rx.mpdu_cnt_fcs_ok, + peer->stats.rx.mpdu_cnt_fcs_err); + + for (rx_mu_type = 0; rx_mu_type < RX_TYPE_MU_MAX; rx_mu_type++) { + DP_PRINT_STATS("reception mode %s", + mu_reception_mode[rx_mu_type]); + rx_mu = &peer->stats.rx.rx_mu[rx_mu_type]; + + pnss = &rx_mu->ppdu_nss[0]; + dp_print_nss(nss, pnss, SS_COUNT); + DP_PRINT_STATS(" PPDU Count"); + DP_PRINT_STATS(" NSS(1-8) = %s", nss); + + DP_PRINT_STATS(" MPDU OK = %d, MPDU Fail = %d", + rx_mu->mpdu_cnt_fcs_ok, + rx_mu->mpdu_cnt_fcs_err); + } + + DP_PRINT_STATS("Aggregation:"); + DP_PRINT_STATS(" Msdu's Part of Ampdu = %d", + peer->stats.rx.ampdu_cnt); + DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d", + peer->stats.rx.non_ampdu_cnt); + DP_PRINT_STATS(" Msdu's Part of Amsdu = %d", + peer->stats.rx.amsdu_cnt); + DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d", + peer->stats.rx.non_amsdu_cnt); + + DP_PRINT_STATS("Bytes and Packets received in last one sec:"); + DP_PRINT_STATS(" Bytes received in last sec: %d", + peer->stats.rx.rx_byte_rate); + DP_PRINT_STATS(" Data received in last sec: %d", + peer->stats.rx.rx_data_rate); + DP_PRINT_STATS("Multipass Rx Packet Drop = %d", + peer->stats.rx.multipass_rx_pkt_drop); +} + +void dp_print_per_ring_stats(struct dp_soc *soc) +{ + uint8_t ring; + uint16_t core; + uint64_t total_packets; + + DP_PRINT_STATS("Reo packets per ring:"); + for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) { + total_packets = 0; + DP_PRINT_STATS("Packets on ring %u:", ring); + for (core = 0; core < num_possible_cpus(); core++) { + if (!soc->stats.rx.ring_packets[core][ring]) + continue; + DP_PRINT_STATS("Packets arriving on core %u: %llu", + core, + soc->stats.rx.ring_packets[core][ring]); + total_packets += soc->stats.rx.ring_packets[core][ring]; + } + DP_PRINT_STATS("Total packets on ring %u: %llu", + ring, total_packets); + } +} + +void dp_txrx_path_stats(struct dp_soc *soc) +{ + uint8_t error_code; + uint8_t loop_pdev; + struct dp_pdev *pdev; + uint8_t i; + + if (!soc) { + dp_err("%s: Invalid access", __func__); + return; + } + + for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) { + pdev = soc->pdev_list[loop_pdev]; + dp_aggregate_pdev_stats(pdev); + DP_PRINT_STATS("Tx path Statistics:"); + DP_PRINT_STATS("from stack: %u msdus (%llu bytes)", + pdev->stats.tx_i.rcvd.num, + pdev->stats.tx_i.rcvd.bytes); + DP_PRINT_STATS("processed from host: %u msdus (%llu bytes)", + pdev->stats.tx_i.processed.num, + pdev->stats.tx_i.processed.bytes); + DP_PRINT_STATS("successfully transmitted: %u msdus (%llu bytes)", + pdev->stats.tx.tx_success.num, + pdev->stats.tx.tx_success.bytes); + + DP_PRINT_STATS("Dropped in host:"); + DP_PRINT_STATS("Total packets dropped: %u,", + pdev->stats.tx_i.dropped.dropped_pkt.num); + DP_PRINT_STATS("Descriptor not available: %u", + pdev->stats.tx_i.dropped.desc_na.num); + DP_PRINT_STATS("Ring full: %u", + pdev->stats.tx_i.dropped.ring_full); + DP_PRINT_STATS("Enqueue fail: %u", + pdev->stats.tx_i.dropped.enqueue_fail); + DP_PRINT_STATS("DMA Error: %u", + pdev->stats.tx_i.dropped.dma_error); + + DP_PRINT_STATS("Dropped in hardware:"); + DP_PRINT_STATS("total packets dropped: %u", + pdev->stats.tx.tx_failed); + DP_PRINT_STATS("mpdu age out: %u", + pdev->stats.tx.dropped.age_out); + DP_PRINT_STATS("firmware removed packets: %u (%llu bytes)", + pdev->stats.tx.dropped.fw_rem.num, + pdev->stats.tx.dropped.fw_rem.bytes); + DP_PRINT_STATS("firmware removed tx: %u", + pdev->stats.tx.dropped.fw_rem_tx); + DP_PRINT_STATS("firmware removed notx %u", + pdev->stats.tx.dropped.fw_rem_notx); + DP_PRINT_STATS("Invalid peer on tx path: %u", + pdev->soc->stats.tx.tx_invalid_peer.num); + + DP_PRINT_STATS("Tx packets sent per interrupt:"); + DP_PRINT_STATS("Single Packet: %u", + pdev->stats.tx_comp_histogram.pkts_1); + DP_PRINT_STATS("2-20 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_2_20); + DP_PRINT_STATS("21-40 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_21_40); + DP_PRINT_STATS("41-60 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_41_60); + DP_PRINT_STATS("61-80 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_61_80); + DP_PRINT_STATS("81-100 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_81_100); + DP_PRINT_STATS("101-200 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_101_200); + DP_PRINT_STATS(" 201+ Packets: %u", + pdev->stats.tx_comp_histogram.pkts_201_plus); + + DP_PRINT_STATS("Rx path statistics"); + + DP_PRINT_STATS("delivered %u msdus ( %llu bytes),", + pdev->stats.rx.to_stack.num, + pdev->stats.rx.to_stack.bytes); + for (i = 0; i < CDP_MAX_RX_RINGS; i++) { + if (!pdev->stats.rx.rcvd_reo[i].num) + continue; + DP_PRINT_STATS( + "received on reo[%d] %u msdus( %llu bytes),", + i, pdev->stats.rx.rcvd_reo[i].num, + pdev->stats.rx.rcvd_reo[i].bytes); + } + DP_PRINT_STATS("intra-bss packets %u msdus ( %llu bytes),", + pdev->stats.rx.intra_bss.pkts.num, + pdev->stats.rx.intra_bss.pkts.bytes); + DP_PRINT_STATS("intra-bss fails %u msdus ( %llu bytes),", + pdev->stats.rx.intra_bss.fail.num, + pdev->stats.rx.intra_bss.fail.bytes); + DP_PRINT_STATS("intra-bss no mdns fwds %u msdus", + pdev->stats.rx.intra_bss.mdns_no_fwd); + DP_PRINT_STATS("intra-bss EAPOL drops: %u", + soc->stats.rx.err.intrabss_eapol_drop); + + DP_PRINT_STATS("raw packets %u msdus ( %llu bytes),", + pdev->stats.rx.raw.num, + pdev->stats.rx.raw.bytes); + DP_PRINT_STATS("mic errors %u", + pdev->stats.rx.err.mic_err); + DP_PRINT_STATS("Invalid peer on rx path: %u", + pdev->soc->stats.rx.err.rx_invalid_peer.num); + DP_PRINT_STATS("sw_peer_id invalid %u", + pdev->soc->stats.rx.err.rx_invalid_peer_id.num); + DP_PRINT_STATS("packet_len invalid %u", + pdev->soc->stats.rx.err.rx_invalid_pkt_len.num); + DP_PRINT_STATS("sa or da idx invalid %u", + pdev->soc->stats.rx.err.invalid_sa_da_idx); + DP_PRINT_STATS("defrag peer uninit %u", + pdev->soc->stats.rx.err.defrag_peer_uninit); + DP_PRINT_STATS("pkts delivered no peer %u", + pdev->soc->stats.rx.err.pkt_delivered_no_peer); + DP_PRINT_STATS("RX invalid cookie: %d", + soc->stats.rx.err.invalid_cookie); + DP_PRINT_STATS("RX stale cookie: %d", + soc->stats.rx.err.stale_cookie); + DP_PRINT_STATS("2k jump delba sent: %u", + pdev->soc->stats.rx.err.rx_2k_jump_delba_sent); + DP_PRINT_STATS("2k jump msdu to stack: %u", + pdev->soc->stats.rx.err.rx_2k_jump_to_stack); + DP_PRINT_STATS("2k jump msdu drop: %u", + pdev->soc->stats.rx.err.rx_2k_jump_drop); + DP_PRINT_STATS("REO err oor msdu to stack %u", + pdev->soc->stats.rx.err.reo_err_oor_to_stack); + DP_PRINT_STATS("REO err oor msdu drop: %u", + pdev->soc->stats.rx.err.reo_err_oor_drop); + DP_PRINT_STATS("Rx err msdu rejected: %d", + soc->stats.rx.err.rejected); + DP_PRINT_STATS("Rx raw frame dropped: %d", + soc->stats.rx.err.raw_frm_drop); + DP_PRINT_STATS("Rx stale link desc cookie: %d", + pdev->soc->stats.rx.err.invalid_link_cookie); + DP_PRINT_STATS("Rx nbuf sanity fails: %d", + pdev->soc->stats.rx.err.nbuf_sanity_fail); + DP_PRINT_STATS("Rx refill duplicate link desc: %d", + pdev->soc->stats.rx.err.dup_refill_link_desc); + + DP_PRINT_STATS("Reo Statistics"); + DP_PRINT_STATS("near_full: %u ", soc->stats.rx.near_full); + DP_PRINT_STATS("rbm error: %u msdus", + pdev->soc->stats.rx.err.invalid_rbm); + DP_PRINT_STATS("hal ring access fail: %u msdus", + pdev->soc->stats.rx.err.hal_ring_access_fail); + + DP_PRINT_STATS("hal ring access full fail: %u msdus", + pdev->soc->stats.rx.err.hal_ring_access_full_fail); + + DP_PRINT_STATS("Rx BAR frames:%d", soc->stats.rx.bar_frame); + + for (error_code = 0; error_code < HAL_REO_ERR_MAX; + error_code++) { + if (!pdev->soc->stats.rx.err.reo_error[error_code]) + continue; + DP_PRINT_STATS("Reo error number (%u): %u msdus", + error_code, + pdev->soc->stats.rx.err + .reo_error[error_code]); + } + + for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX; + error_code++) { + if (!pdev->soc->stats.rx.err.rxdma_error[error_code]) + continue; + DP_PRINT_STATS("Rxdma error number (%u): %u msdus", + error_code, + pdev->soc->stats.rx.err + .rxdma_error[error_code]); + } + + DP_PRINT_STATS("Rx packets reaped per interrupt:"); + DP_PRINT_STATS("Single Packet: %u", + pdev->stats.rx_ind_histogram.pkts_1); + DP_PRINT_STATS("2-20 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_2_20); + DP_PRINT_STATS("21-40 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_21_40); + DP_PRINT_STATS("41-60 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_41_60); + DP_PRINT_STATS("61-80 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_61_80); + DP_PRINT_STATS("81-100 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_81_100); + DP_PRINT_STATS("101-200 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_101_200); + DP_PRINT_STATS(" 201+ Packets: %u", + pdev->stats.rx_ind_histogram.pkts_201_plus); + + DP_PRINT_STATS("%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u", + __func__, + pdev->soc->wlan_cfg_ctx + ->tso_enabled, + pdev->soc->wlan_cfg_ctx + ->lro_enabled, + pdev->soc->wlan_cfg_ctx + ->rx_hash, + pdev->soc->wlan_cfg_ctx + ->napi_enabled); +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + DP_PRINT_STATS("%s: Tx flow stop queue: %u tx flow start queue offset: %u", + __func__, + pdev->soc->wlan_cfg_ctx + ->tx_flow_stop_queue_threshold, + pdev->soc->wlan_cfg_ctx + ->tx_flow_start_queue_offset); +#endif + } +} + +/* + * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats + * Current scope is bar received count + * + * @pdev_handle: DP_PDEV handle + * + * Return: void + */ +static void +dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev) +{ + struct dp_vdev *vdev; + struct dp_peer *peer; + uint32_t waitcnt; + + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer) { + dp_err("DP Invalid Peer refernce"); + return; + } + + if (peer->delete_in_progress) { + dp_err("DP Peer deletion in progress"); + continue; + } + qdf_atomic_inc(&peer->ref_cnt); + waitcnt = 0; + dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev); + while (!(qdf_atomic_read(&pdev->stats_cmd_complete)) && + waitcnt < 10) { + schedule_timeout_interruptible( + STATS_PROC_TIMEOUT); + waitcnt++; + } + qdf_atomic_set(&pdev->stats_cmd_complete, 0); + dp_peer_unref_delete(peer); + } + } +} + +void +dp_print_pdev_tx_stats(struct dp_pdev *pdev) +{ + uint8_t i = 0, index = 0; + + DP_PRINT_STATS("PDEV Tx Stats:\n"); + DP_PRINT_STATS("Received From Stack:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.rcvd.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.rcvd.bytes); + DP_PRINT_STATS("Processed:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.processed.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.processed.bytes); + DP_PRINT_STATS("Total Completions:"); + DP_PRINT_STATS(" Packets = %u", + pdev->stats.tx.comp_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx.comp_pkt.bytes); + DP_PRINT_STATS("Successful Completions:"); + DP_PRINT_STATS(" Packets = %u", + pdev->stats.tx.tx_success.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx.tx_success.bytes); + DP_PRINT_STATS("Dropped:"); + DP_PRINT_STATS(" Total = %d", + pdev->stats.tx_i.dropped.dropped_pkt.num); + DP_PRINT_STATS(" Dma_map_error = %d", + pdev->stats.tx_i.dropped.dma_error); + DP_PRINT_STATS(" Ring Full = %d", + pdev->stats.tx_i.dropped.ring_full); + DP_PRINT_STATS(" Descriptor Not available = %d", + pdev->stats.tx_i.dropped.desc_na.num); + DP_PRINT_STATS(" HW enqueue failed= %d", + pdev->stats.tx_i.dropped.enqueue_fail); + DP_PRINT_STATS(" Resources Full = %d", + pdev->stats.tx_i.dropped.res_full); + DP_PRINT_STATS(" FW removed Pkts = %u", + pdev->stats.tx.dropped.fw_rem.num); + DP_PRINT_STATS(" FW removed bytes= %llu", + pdev->stats.tx.dropped.fw_rem.bytes); + DP_PRINT_STATS(" FW removed transmitted = %d", + pdev->stats.tx.dropped.fw_rem_tx); + DP_PRINT_STATS(" FW removed untransmitted = %d", + pdev->stats.tx.dropped.fw_rem_notx); + DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d", + pdev->stats.tx.dropped.fw_reason1); + DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d", + pdev->stats.tx.dropped.fw_reason2); + DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d", + pdev->stats.tx.dropped.fw_reason3); + DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d", + pdev->stats.tx.dropped.age_out); + DP_PRINT_STATS(" headroom insufficient = %d", + pdev->stats.tx_i.dropped.headroom_insufficient); + DP_PRINT_STATS("Multicast:"); + DP_PRINT_STATS(" Packets: %u", + pdev->stats.tx.mcast.num); + DP_PRINT_STATS(" Bytes: %llu", + pdev->stats.tx.mcast.bytes); + DP_PRINT_STATS("Scatter Gather:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.sg.sg_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.sg.sg_pkt.bytes); + DP_PRINT_STATS(" Dropped By Host = %d", + pdev->stats.tx_i.sg.dropped_host.num); + DP_PRINT_STATS(" Dropped By Target = %d", + pdev->stats.tx_i.sg.dropped_target); + DP_PRINT_STATS("Mcast Enhancement:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.mcast_en.mcast_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.mcast_en.mcast_pkt.bytes); + DP_PRINT_STATS(" Dropped: Map Errors = %d", + pdev->stats.tx_i.mcast_en.dropped_map_error); + DP_PRINT_STATS(" Dropped: Self Mac = %d", + pdev->stats.tx_i.mcast_en.dropped_self_mac); + DP_PRINT_STATS(" Dropped: Send Fail = %d", + pdev->stats.tx_i.mcast_en.dropped_send_fail); + DP_PRINT_STATS(" Unicast sent = %d", + pdev->stats.tx_i.mcast_en.ucast); + DP_PRINT_STATS("Raw:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.raw.raw_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.raw.raw_pkt.bytes); + DP_PRINT_STATS(" DMA map error = %d", + pdev->stats.tx_i.raw.dma_map_error); + DP_PRINT_STATS(" RAW pkt type[!data] error = %d", + pdev->stats.tx_i.raw.invalid_raw_pkt_datatype); + DP_PRINT_STATS("Reinjected:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.reinject_pkts.num); + DP_PRINT_STATS(" Bytes = %llu\n", + pdev->stats.tx_i.reinject_pkts.bytes); + DP_PRINT_STATS("Inspected:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.inspect_pkts.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.inspect_pkts.bytes); + DP_PRINT_STATS("Nawds Multicast:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.nawds_mcast.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.nawds_mcast.bytes); + DP_PRINT_STATS("CCE Classified:"); + DP_PRINT_STATS(" CCE Classified Packets: %u", + pdev->stats.tx_i.cce_classified); + DP_PRINT_STATS(" RAW CCE Classified Packets: %u", + pdev->stats.tx_i.cce_classified_raw); + DP_PRINT_STATS("Mesh stats:"); + DP_PRINT_STATS(" frames to firmware: %u", + pdev->stats.tx_i.mesh.exception_fw); + DP_PRINT_STATS(" completions from fw: %u", + pdev->stats.tx_i.mesh.completion_fw); + DP_PRINT_STATS("PPDU stats counter"); + for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) { + DP_PRINT_STATS(" Tag[%d] = %llu", index, + pdev->stats.ppdu_stats_counter[index]); + } + DP_PRINT_STATS("BA not received for delayed_ba: %d", + pdev->stats.cdp_delayed_ba_not_recev); + DP_PRINT_STATS("tx_ppdu_proc: %llu\n", + pdev->tx_ppdu_proc); + + for (i = 0; i < CDP_WDI_NUM_EVENTS; i++) { + if (!pdev->stats.wdi_event[i]) + DP_PRINT_STATS("Wdi msgs received from fw[%d]:%d", + i, pdev->stats.wdi_event[i]); + } + + dp_print_pdev_tx_capture_stats(pdev); +} + +void +dp_print_pdev_rx_stats(struct dp_pdev *pdev) +{ + DP_PRINT_STATS("PDEV Rx Stats:\n"); + DP_PRINT_STATS("Received From HW (Per Rx Ring):"); + DP_PRINT_STATS(" Packets = %d %d %d %d", + pdev->stats.rx.rcvd_reo[0].num, + pdev->stats.rx.rcvd_reo[1].num, + pdev->stats.rx.rcvd_reo[2].num, + pdev->stats.rx.rcvd_reo[3].num); + DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu", + pdev->stats.rx.rcvd_reo[0].bytes, + pdev->stats.rx.rcvd_reo[1].bytes, + pdev->stats.rx.rcvd_reo[2].bytes, + pdev->stats.rx.rcvd_reo[3].bytes); + DP_PRINT_STATS("Replenished:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.replenish.pkts.num); + DP_PRINT_STATS(" Buffers Added To Freelist = %d", + pdev->stats.buf_freelist); + DP_PRINT_STATS(" Low threshold intr = %d", + pdev->stats.replenish.low_thresh_intrs); + DP_PRINT_STATS("Dropped:"); + DP_PRINT_STATS(" msdu_not_done = %d", + pdev->stats.dropped.msdu_not_done); + DP_PRINT_STATS(" wifi parse = %d", + pdev->stats.dropped.wifi_parse); + DP_PRINT_STATS(" mon_rx_drop = %d", + pdev->stats.dropped.mon_rx_drop); + DP_PRINT_STATS(" mon_radiotap_update_err = %d", + pdev->stats.dropped.mon_radiotap_update_err); + DP_PRINT_STATS(" mec_drop = %d", + pdev->stats.rx.mec_drop.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.rx.mec_drop.bytes); + DP_PRINT_STATS("Sent To Stack:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.rx.to_stack.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.rx.to_stack.bytes); + DP_PRINT_STATS(" vlan_tag_stp_cnt = %d", + pdev->stats.vlan_tag_stp_cnt); + DP_PRINT_STATS("Multicast/Broadcast:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.rx.multicast.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.rx.multicast.bytes); + DP_PRINT_STATS("Errors:"); + DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d", + pdev->stats.replenish.rxdma_err); + DP_PRINT_STATS(" Desc Alloc Failed: = %d", + pdev->stats.err.desc_alloc_fail); + DP_PRINT_STATS(" IP checksum error = %d", + pdev->stats.err.ip_csum_err); + DP_PRINT_STATS(" TCP/UDP checksum error = %d", + pdev->stats.err.tcp_udp_csum_err); + + /* Get bar_recv_cnt */ + dp_aggregate_pdev_ctrl_frames_stats(pdev); + DP_PRINT_STATS("BAR Received Count: = %d", + pdev->stats.rx.bar_recv_cnt); +} + +void +dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) +{ + struct cdp_pdev_mon_stats *rx_mon_stats; + uint32_t *stat_ring_ppdu_ids; + uint32_t *dest_ring_ppdu_ids; + int i, idx; + + rx_mon_stats = &pdev->rx_mon_stats; + + DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); + + DP_PRINT_STATS("status_ppdu_compl_cnt = %d", + rx_mon_stats->status_ppdu_compl); + DP_PRINT_STATS("status_ppdu_start_cnt = %d", + rx_mon_stats->status_ppdu_start); + DP_PRINT_STATS("status_ppdu_end_cnt = %d", + rx_mon_stats->status_ppdu_end); + DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", + rx_mon_stats->status_ppdu_start_mis); + DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", + rx_mon_stats->status_ppdu_end_mis); + DP_PRINT_STATS("status_ppdu_done_cnt = %d", + rx_mon_stats->status_ppdu_done); + DP_PRINT_STATS("dest_ppdu_done_cnt = %d", + rx_mon_stats->dest_ppdu_done); + DP_PRINT_STATS("dest_mpdu_done_cnt = %d", + rx_mon_stats->dest_mpdu_done); + DP_PRINT_STATS("tlv_tag_status_err_cnt = %u", + rx_mon_stats->tlv_tag_status_err); + DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", + rx_mon_stats->dest_mpdu_drop); + DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d", + rx_mon_stats->dup_mon_linkdesc_cnt); + DP_PRINT_STATS("dup_mon_buf_cnt = %d", + rx_mon_stats->dup_mon_buf_cnt); + stat_ring_ppdu_ids = + (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); + dest_ring_ppdu_ids = + (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); + + if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids) + DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n"); + + qdf_spin_lock_bh(&pdev->mon_lock); + idx = rx_mon_stats->ppdu_id_hist_idx; + qdf_mem_copy(stat_ring_ppdu_ids, + rx_mon_stats->stat_ring_ppdu_id_hist, + sizeof(uint32_t) * MAX_PPDU_ID_HIST); + qdf_mem_copy(dest_ring_ppdu_ids, + rx_mon_stats->dest_ring_ppdu_id_hist, + sizeof(uint32_t) * MAX_PPDU_ID_HIST); + qdf_spin_unlock_bh(&pdev->mon_lock); + + DP_PRINT_STATS("PPDU Id history:"); + DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids"); + for (i = 0; i < MAX_PPDU_ID_HIST; i++) { + idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1); + DP_PRINT_STATS("%*u\t%*u", 16, + rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16, + rx_mon_stats->dest_ring_ppdu_id_hist[idx]); + } + qdf_mem_free(stat_ring_ppdu_ids); + qdf_mem_free(dest_ring_ppdu_ids); + DP_PRINT_STATS("mon_rx_dest_stuck = %d", + rx_mon_stats->mon_rx_dest_stuck); +} + +void +dp_print_soc_tx_stats(struct dp_soc *soc) +{ + uint8_t desc_pool_id; + + soc->stats.tx.desc_in_use = 0; + + DP_PRINT_STATS("SOC Tx Stats:\n"); + + for (desc_pool_id = 0; + desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + desc_pool_id++) + soc->stats.tx.desc_in_use += + soc->tx_desc[desc_pool_id].num_allocated; + + DP_PRINT_STATS("Tx Descriptors In Use = %d", + soc->stats.tx.desc_in_use); + DP_PRINT_STATS("Tx Invalid peer:"); + DP_PRINT_STATS(" Packets = %d", + soc->stats.tx.tx_invalid_peer.num); + DP_PRINT_STATS(" Bytes = %llu", + soc->stats.tx.tx_invalid_peer.bytes); + DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d", + soc->stats.tx.tcl_ring_full[0], + soc->stats.tx.tcl_ring_full[1], + soc->stats.tx.tcl_ring_full[2]); + DP_PRINT_STATS("Tx invalid completion release = %d", + soc->stats.tx.invalid_release_source); + DP_PRINT_STATS("Tx comp wbm internal error = %d : [%d %d %d %d]", + soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_ALL], + soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], + soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], + soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], + soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED]); + DP_PRINT_STATS("Tx comp non wbm internal error = %d", + soc->stats.tx.non_wbm_internal_err); + DP_PRINT_STATS("Tx comp loop pkt limit hit = %d", + soc->stats.tx.tx_comp_loop_pkt_limit_hit); + DP_PRINT_STATS("Tx comp HP out of sync2 = %d", + soc->stats.tx.hp_oos2); +} + +void dp_print_soc_interrupt_stats(struct dp_soc *soc) +{ + int i = 0; + struct dp_intr_stats *intr_stats; + + DP_PRINT_STATS("INT: Total |txComps|reo[0] |reo[1] |reo[2] |reo[3] |mon |rx_err | wbm |reo_sta|rxdm2hst|hst2rxdm|"); + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + intr_stats = &soc->intr_ctx[i].intr_stats; + DP_PRINT_STATS("%3u[%3d]: %7u %7u %7u %7u %7u %7u %7u %7u %7u %7u %8u %8u", + i, + hif_get_int_ctx_irq_num(soc->hif_handle, i), + intr_stats->num_masks, + intr_stats->num_tx_ring_masks[0], + intr_stats->num_rx_ring_masks[0], + intr_stats->num_rx_ring_masks[1], + intr_stats->num_rx_ring_masks[2], + intr_stats->num_rx_ring_masks[3], + intr_stats->num_rx_mon_ring_masks, + intr_stats->num_rx_err_ring_masks, + intr_stats->num_rx_wbm_rel_ring_masks, + intr_stats->num_reo_status_ring_masks, + intr_stats->num_rxdma2host_ring_masks, + intr_stats->num_host2rxdma_ring_masks); + } +} + +void +dp_print_soc_rx_stats(struct dp_soc *soc) +{ + uint32_t i; + char reo_error[DP_REO_ERR_LENGTH]; + char rxdma_error[DP_RXDMA_ERR_LENGTH]; + uint8_t index = 0; + + DP_PRINT_STATS("No of AST Entries = %d", soc->num_ast_entries); + DP_PRINT_STATS("SOC Rx Stats:\n"); + DP_PRINT_STATS("Fragmented packets: %u", + soc->stats.rx.rx_frags); + DP_PRINT_STATS("Reo reinjected packets: %u", + soc->stats.rx.reo_reinject); + DP_PRINT_STATS("Errors:\n"); + DP_PRINT_STATS("Rx Decrypt Errors = %d", + (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] + + soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC])); + DP_PRINT_STATS("Invalid RBM = %d", + soc->stats.rx.err.invalid_rbm); + DP_PRINT_STATS("Invalid Vdev = %d", + soc->stats.rx.err.invalid_vdev); + DP_PRINT_STATS("Invalid sa_idx or da_idx = %d", + soc->stats.rx.err.invalid_sa_da_idx); + DP_PRINT_STATS("Defrag peer uninit = %d", + soc->stats.rx.err.defrag_peer_uninit); + DP_PRINT_STATS("Pkts delivered no peer = %d", + soc->stats.rx.err.pkt_delivered_no_peer); + DP_PRINT_STATS("Pkts drop due to no peer auth :%d", + soc->stats.rx.err.peer_unauth_rx_pkt_drop); + DP_PRINT_STATS("Invalid Pdev = %d", + soc->stats.rx.err.invalid_pdev); + DP_PRINT_STATS("Invalid Peer = %d", + soc->stats.rx.err.rx_invalid_peer.num); + DP_PRINT_STATS("HAL Ring Access Fail = %d", + soc->stats.rx.err.hal_ring_access_fail); + DP_PRINT_STATS("HAL Ring Access Full Fail = %d", + soc->stats.rx.err.hal_ring_access_full_fail); + DP_PRINT_STATS("MSDU Done failures = %d", + soc->stats.rx.err.msdu_done_fail); + DP_PRINT_STATS("RX frags: %d", soc->stats.rx.rx_frags); + DP_PRINT_STATS("RX frag wait: %d", soc->stats.rx.rx_frag_wait); + DP_PRINT_STATS("RX frag err: %d", soc->stats.rx.rx_frag_err); + DP_PRINT_STATS("RX frag OOR: %d", soc->stats.rx.rx_frag_oor); + + DP_PRINT_STATS("RX HP out_of_sync: %d", soc->stats.rx.hp_oos2); + DP_PRINT_STATS("RX Ring Near Full: %d", soc->stats.rx.near_full); + + DP_PRINT_STATS("RX Reap Loop Pkt Limit Hit: %d", + soc->stats.rx.reap_loop_pkt_limit_hit); + DP_PRINT_STATS("RX DESC invalid magic: %u", + soc->stats.rx.err.rx_desc_invalid_magic); + DP_PRINT_STATS("RX DUP DESC: %d", + soc->stats.rx.err.hal_reo_dest_dup); + DP_PRINT_STATS("RX REL DUP DESC: %d", + soc->stats.rx.err.hal_wbm_rel_dup); + + DP_PRINT_STATS("RXDMA ERR DUP DESC: %d", + soc->stats.rx.err.hal_rxdma_err_dup); + + DP_PRINT_STATS("RX scatter msdu: %d", + soc->stats.rx.err.scatter_msdu); + + DP_PRINT_STATS("RX invalid cookie: %d", + soc->stats.rx.err.invalid_cookie); + + DP_PRINT_STATS("RX stale cookie: %d", + soc->stats.rx.err.stale_cookie); + + DP_PRINT_STATS("RX wait completed msdu break: %d", + soc->stats.rx.msdu_scatter_wait_break); + + DP_PRINT_STATS("2k jump delba sent: %d", + soc->stats.rx.err.rx_2k_jump_delba_sent); + + DP_PRINT_STATS("2k jump msdu to stack: %d", + soc->stats.rx.err.rx_2k_jump_to_stack); + + DP_PRINT_STATS("2k jump msdu drop: %d", + soc->stats.rx.err.rx_2k_jump_drop); + + DP_PRINT_STATS("REO err oor msdu to stack %d", + soc->stats.rx.err.reo_err_oor_to_stack); + + DP_PRINT_STATS("REO err oor msdu drop: %d", + soc->stats.rx.err.reo_err_oor_drop); + + DP_PRINT_STATS("Rx err msdu rejected: %d", + soc->stats.rx.err.rejected); + + DP_PRINT_STATS("Rx stale link desc cookie: %d", + soc->stats.rx.err.invalid_link_cookie); + + DP_PRINT_STATS("Rx nbuf sanity fail: %d", + soc->stats.rx.err.nbuf_sanity_fail); + + for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) { + index += qdf_snprint(&rxdma_error[index], + DP_RXDMA_ERR_LENGTH - index, + " %d", soc->stats.rx.err.rxdma_error[i]); + } + DP_PRINT_STATS("RXDMA Error (0-31):%s", rxdma_error); + + index = 0; + for (i = 0; i < HAL_REO_ERR_MAX; i++) { + index += qdf_snprint(&reo_error[index], + DP_REO_ERR_LENGTH - index, + " %d", soc->stats.rx.err.reo_error[i]); + } + DP_PRINT_STATS("REO Error(0-14):%s", reo_error); + DP_PRINT_STATS("REO CMD SEND FAIL: %d", + soc->stats.rx.err.reo_cmd_send_fail); + + DP_PRINT_STATS("Rx BAR frames:%d", soc->stats.rx.bar_frame); + DP_PRINT_STATS("Rx invalid TID count:%d", + soc->stats.rx.err.rx_invalid_tid_err); +} + +#ifdef FEATURE_TSO_STATS +void dp_print_tso_stats(struct dp_soc *soc, + enum qdf_stats_verbosity_level level) +{ + uint8_t loop_pdev; + uint32_t id; + struct dp_pdev *pdev; + + for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) { + pdev = soc->pdev_list[loop_pdev]; + DP_PRINT_STATS("TSO Statistics\n"); + DP_PRINT_STATS( + "From stack: %d | Successful completions: %d | TSO Packets: %d | TSO Completions: %d", + pdev->stats.tx_i.rcvd.num, + pdev->stats.tx.tx_success.num, + pdev->stats.tso_stats.num_tso_pkts.num, + pdev->stats.tso_stats.tso_comp); + + for (id = 0; id < CDP_MAX_TSO_PACKETS; id++) { + /* TSO LEVEL 1 - PACKET INFO */ + DP_PRINT_STATS( + "Packet_Id:[%u]: Packet Length %zu | No. of segments: %u", + id, + pdev->stats.tso_stats.tso_info + .tso_packet_info[id].tso_packet_len, + pdev->stats.tso_stats.tso_info + .tso_packet_info[id].num_seg); + /* TSO LEVEL 2 */ + if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH) + dp_print_tso_seg_stats(pdev, id); + } + + DP_PRINT_STATS( + "TSO Histogram: Single: %llu | 2-5 segs: %llu | 6-10: %llu segs | 11-15 segs: %llu | 16-20 segs: %llu | 20+ segs: %llu", + pdev->stats.tso_stats.seg_histogram.segs_1, + pdev->stats.tso_stats.seg_histogram.segs_2_5, + pdev->stats.tso_stats.seg_histogram.segs_6_10, + pdev->stats.tso_stats.seg_histogram.segs_11_15, + pdev->stats.tso_stats.seg_histogram.segs_16_20, + pdev->stats.tso_stats.seg_histogram.segs_20_plus); + } +} + +void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, + uint8_t _p_cntrs) +{ + if (_p_cntrs == 1) { + DP_STATS_INC(pdev, + tso_stats.seg_histogram.segs_1, 1); + } else if (_p_cntrs >= 2 && _p_cntrs <= 5) { + DP_STATS_INC(pdev, + tso_stats.seg_histogram.segs_2_5, 1); + } else if (_p_cntrs > 5 && _p_cntrs <= 10) { + DP_STATS_INC(pdev, + tso_stats.seg_histogram.segs_6_10, 1); + } else if (_p_cntrs > 10 && _p_cntrs <= 15) { + DP_STATS_INC(pdev, + tso_stats.seg_histogram.segs_11_15, 1); + } else if (_p_cntrs > 15 && _p_cntrs <= 20) { + DP_STATS_INC(pdev, + tso_stats.seg_histogram.segs_16_20, 1); + } else if (_p_cntrs > 20) { + DP_STATS_INC(pdev, + tso_stats.seg_histogram.segs_20_plus, 1); + } +} + +void dp_tso_segment_update(struct dp_pdev *pdev, + uint32_t stats_idx, + uint8_t idx, + struct qdf_tso_seg_t seg) +{ + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].num_frags, + seg.num_frags); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].total_len, + seg.total_len); + + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.tso_enable, + seg.tso_flags.tso_enable); + + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.fin, + seg.tso_flags.fin); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.syn, + seg.tso_flags.syn); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.rst, + seg.tso_flags.rst); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.psh, + seg.tso_flags.psh); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.ack, + seg.tso_flags.ack); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.urg, + seg.tso_flags.urg); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.ece, + seg.tso_flags.ece); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.cwr, + seg.tso_flags.cwr); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.ns, + seg.tso_flags.ns); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.tcp_seq_num, + seg.tso_flags.tcp_seq_num); + DP_STATS_UPD(pdev, tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_seg[idx].tso_flags.ip_id, + seg.tso_flags.ip_id); +} + +void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, + qdf_nbuf_t msdu, uint16_t num_segs) +{ + DP_STATS_UPD(pdev, + tso_stats.tso_info.tso_packet_info[stats_idx] + .num_seg, + num_segs); + + DP_STATS_UPD(pdev, + tso_stats.tso_info.tso_packet_info[stats_idx] + .tso_packet_len, + qdf_nbuf_get_tcp_payload_len(msdu)); +} + +void dp_tso_segment_stats_update(struct dp_pdev *pdev, + struct qdf_tso_seg_elem_t *stats_seg, + uint32_t stats_idx) +{ + uint8_t tso_seg_idx = 0; + + while (stats_seg && (tso_seg_idx < CDP_MAX_TSO_SEGMENTS)) { + dp_tso_segment_update(pdev, stats_idx, + tso_seg_idx, + stats_seg->seg); + ++tso_seg_idx; + stats_seg = stats_seg->next; + } +} + +void dp_txrx_clear_tso_stats(struct dp_soc *soc) +{ + uint8_t loop_pdev; + struct dp_pdev *pdev; + + for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) { + pdev = soc->pdev_list[loop_pdev]; + dp_init_tso_stats(pdev); + } +} +#endif /* FEATURE_TSO_STATS */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..b29c353632849cc79f8d893b795586ac782ee19a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c @@ -0,0 +1,4508 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htt.h" +#include "dp_htt.h" +#include "hal_hw_headers.h" +#include "dp_tx.h" +#include "dp_tx_desc.h" +#include "dp_peer.h" +#include "dp_types.h" +#include "hal_tx.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "qdf_net_types.h" +#include +#if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO) +#include "if_meta_hdr.h" +#endif +#include "enet.h" +#include "dp_internal.h" +#ifdef FEATURE_WDS +#include "dp_txrx_wds.h" +#endif +#ifdef ATH_SUPPORT_IQUE +#include "dp_txrx_me.h" +#endif + + +/* TODO Add support in TSO */ +#define DP_DESC_NUM_FRAG(x) 0 + +/* disable TQM_BYPASS */ +#define TQM_BYPASS_WAR 0 + +/* invalid peer id for reinject*/ +#define DP_INVALID_PEER 0XFFFE + +/*mapping between hal encrypt type and cdp_sec_type*/ +#define MAX_CDP_SEC_TYPE 12 +static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { + HAL_TX_ENCRYPT_TYPE_NO_CIPHER, + HAL_TX_ENCRYPT_TYPE_WEP_128, + HAL_TX_ENCRYPT_TYPE_WEP_104, + HAL_TX_ENCRYPT_TYPE_WEP_40, + HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, + HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, + HAL_TX_ENCRYPT_TYPE_WAPI, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, + HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; + +#ifdef QCA_TX_LIMIT_CHECK +/** + * dp_tx_limit_check - Check if allocated tx descriptors reached + * soc max limit and pdev max limit + * @vdev: DP vdev handle + * + * Return: true if allocated tx descriptors reached max configured value, else + * false + */ +static inline bool +dp_tx_limit_check(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + if (qdf_atomic_read(&soc->num_tx_outstanding) >= + soc->num_tx_allowed) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: queued packets are more than max tx, drop the frame", + __func__); + DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); + return true; + } + + if (qdf_atomic_read(&pdev->num_tx_outstanding) >= + pdev->num_tx_allowed) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: queued packets are more than max tx, drop the frame", + __func__); + DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); + return true; + } + return false; +} + +/** + * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc + * @vdev: DP pdev handle + * + * Return: void + */ +static inline void +dp_tx_outstanding_inc(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + + qdf_atomic_inc(&pdev->num_tx_outstanding); + qdf_atomic_inc(&soc->num_tx_outstanding); +} + +/** + * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc + * @vdev: DP pdev handle + * + * Return: void + */ +static inline void +dp_tx_outstanding_dec(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + + qdf_atomic_dec(&pdev->num_tx_outstanding); + qdf_atomic_dec(&soc->num_tx_outstanding); +} + +#else //QCA_TX_LIMIT_CHECK +static inline bool +dp_tx_limit_check(struct dp_vdev *vdev) +{ + return false; +} + +static inline void +dp_tx_outstanding_inc(struct dp_pdev *pdev) +{ + qdf_atomic_inc(&pdev->num_tx_outstanding); +} + +static inline void +dp_tx_outstanding_dec(struct dp_pdev *pdev) +{ + qdf_atomic_dec(&pdev->num_tx_outstanding); +} +#endif //QCA_TX_LIMIT_CHECK + +#if defined(FEATURE_TSO) +/** + * dp_tx_tso_unmap_segment() - Unmap TSO segment + * + * @soc - core txrx main context + * @seg_desc - tso segment descriptor + * @num_seg_desc - tso number segment descriptor + */ +static void dp_tx_tso_unmap_segment( + struct dp_soc *soc, + struct qdf_tso_seg_elem_t *seg_desc, + struct qdf_tso_num_seg_elem_t *num_seg_desc) +{ + TSO_DEBUG("%s: Unmap the tso segment", __func__); + if (qdf_unlikely(!seg_desc)) { + DP_TRACE(ERROR, "%s %d TSO desc is NULL!", + __func__, __LINE__); + qdf_assert(0); + } else if (qdf_unlikely(!num_seg_desc)) { + DP_TRACE(ERROR, "%s %d TSO num desc is NULL!", + __func__, __LINE__); + qdf_assert(0); + } else { + bool is_last_seg; + /* no tso segment left to do dma unmap */ + if (num_seg_desc->num_seg.tso_cmn_num_seg < 1) + return; + + is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ? + true : false; + qdf_nbuf_unmap_tso_segment(soc->osdev, + seg_desc, is_last_seg); + num_seg_desc->num_seg.tso_cmn_num_seg--; + } +} + +/** + * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg + * back to the freelist + * + * @soc - soc device handle + * @tx_desc - Tx software descriptor + */ +static void dp_tx_tso_desc_release(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + TSO_DEBUG("%s: Free the tso descriptor", __func__); + if (qdf_unlikely(!tx_desc->tso_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d TSO desc is NULL!", + __func__, __LINE__); + qdf_assert(0); + } else if (qdf_unlikely(!tx_desc->tso_num_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d TSO num desc is NULL!", + __func__, __LINE__); + qdf_assert(0); + } else { + struct qdf_tso_num_seg_elem_t *tso_num_desc = + (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc; + + /* Add the tso num segment into the free list */ + if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) { + dp_tso_num_seg_free(soc, tx_desc->pool_id, + tx_desc->tso_num_desc); + tx_desc->tso_num_desc = NULL; + DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); + } + + /* Add the tso segment into the free list*/ + dp_tx_tso_desc_free(soc, + tx_desc->pool_id, tx_desc->tso_desc); + tx_desc->tso_desc = NULL; + } +} +#else +static void dp_tx_tso_unmap_segment( + struct dp_soc *soc, + struct qdf_tso_seg_elem_t *seg_desc, + struct qdf_tso_num_seg_elem_t *num_seg_desc) + +{ +} + +static void dp_tx_tso_desc_release(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ +} +#endif +/** + * dp_tx_desc_release() - Release Tx Descriptor + * @tx_desc : Tx Descriptor + * @desc_pool_id: Descriptor Pool ID + * + * Deallocate all resources attached to Tx descriptor and free the Tx + * descriptor. + * + * Return: + */ +static void +dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) +{ + struct dp_pdev *pdev = tx_desc->pdev; + struct dp_soc *soc; + uint8_t comp_status = 0; + + qdf_assert(pdev); + + soc = pdev->soc; + + if (tx_desc->frm_type == dp_tx_frm_tso) + dp_tx_tso_desc_release(soc, tx_desc); + + if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) + dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); + + if (tx_desc->flags & DP_TX_DESC_FLAG_ME) + dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); + + dp_tx_outstanding_dec(pdev); + + if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) + qdf_atomic_dec(&pdev->num_tx_exception); + + if (HAL_TX_COMP_RELEASE_SOURCE_TQM == + hal_tx_comp_get_buffer_source(&tx_desc->comp)) + comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, + soc->hal_soc); + else + comp_status = HAL_TX_COMP_RELEASE_REASON_FW; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Tx Completion Release desc %d status %d outstanding %d", + tx_desc->id, comp_status, + qdf_atomic_read(&pdev->num_tx_outstanding)); + + dp_tx_desc_free(soc, tx_desc, desc_pool_id); + return; +} + +/** + * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames + * @vdev: DP vdev Handle + * @nbuf: skb + * @msdu_info: msdu_info required to create HTT metadata + * + * Prepares and fills HTT metadata in the frame pre-header for special frames + * that should be transmitted using varying transmit parameters. + * There are 2 VDEV modes that currently needs this special metadata - + * 1) Mesh Mode + * 2) DSRC Mode + * + * Return: HTT metadata size + * + */ +static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + uint32_t *meta_data = msdu_info->meta_data; + struct htt_tx_msdu_desc_ext2_t *desc_ext = + (struct htt_tx_msdu_desc_ext2_t *) meta_data; + + uint8_t htt_desc_size; + + /* Size rounded of multiple of 8 bytes */ + uint8_t htt_desc_size_aligned; + + uint8_t *hdr = NULL; + + /* + * Metadata - HTT MSDU Extension header + */ + htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); + htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; + + if (vdev->mesh_vdev || msdu_info->is_tx_sniffer || + HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info-> + meta_data[0])) { + if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < + htt_desc_size_aligned)) { + nbuf = qdf_nbuf_realloc_headroom(nbuf, + htt_desc_size_aligned); + if (!nbuf) { + /* + * qdf_nbuf_realloc_headroom won't do skb_clone + * as skb_realloc_headroom does. so, no free is + * needed here. + */ + DP_STATS_INC(vdev, + tx_i.dropped.headroom_insufficient, + 1); + qdf_print(" %s[%d] skb_realloc_headroom failed", + __func__, __LINE__); + return 0; + } + } + /* Fill and add HTT metaheader */ + hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); + if (!hdr) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Error in filling HTT metadata"); + + return 0; + } + qdf_mem_copy(hdr, desc_ext, htt_desc_size); + + } else if (vdev->opmode == wlan_op_mode_ocb) { + /* Todo - Add support for DSRC */ + } + + return htt_desc_size_aligned; +} + +/** + * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO + * @tso_seg: TSO segment to process + * @ext_desc: Pointer to MSDU extension descriptor + * + * Return: void + */ +#if defined(FEATURE_TSO) +static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, + void *ext_desc) +{ + uint8_t num_frag; + uint32_t tso_flags; + + /* + * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), + * tcp_flag_mask + * + * Checksum enable flags are set in TCL descriptor and not in Extension + * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) + */ + tso_flags = *(uint32_t *) &tso_seg->tso_flags; + + hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); + + hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, + tso_seg->tso_flags.ip_len); + + hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); + hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); + + + for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { + uint32_t lo = 0; + uint32_t hi = 0; + + qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) && + (tso_seg->tso_frags[num_frag].length)); + + qdf_dmaaddr_to_32s( + tso_seg->tso_frags[num_frag].paddr, &lo, &hi); + hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, + tso_seg->tso_frags[num_frag].length); + } + + return; +} +#else +static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, + void *ext_desc) +{ + return; +} +#endif + +#if defined(FEATURE_TSO) +/** + * dp_tx_free_tso_seg_list() - Loop through the tso segments + * allocated and free them + * + * @soc: soc handle + * @free_seg: list of tso segments + * @msdu_info: msdu descriptor + * + * Return - void + */ +static void dp_tx_free_tso_seg_list( + struct dp_soc *soc, + struct qdf_tso_seg_elem_t *free_seg, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct qdf_tso_seg_elem_t *next_seg; + + while (free_seg) { + next_seg = free_seg->next; + dp_tx_tso_desc_free(soc, + msdu_info->tx_queue.desc_pool_id, + free_seg); + free_seg = next_seg; + } +} + +/** + * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments + * allocated and free them + * + * @soc: soc handle + * @free_num_seg: list of tso number segments + * @msdu_info: msdu descriptor + * Return - void + */ +static void dp_tx_free_tso_num_seg_list( + struct dp_soc *soc, + struct qdf_tso_num_seg_elem_t *free_num_seg, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct qdf_tso_num_seg_elem_t *next_num_seg; + + while (free_num_seg) { + next_num_seg = free_num_seg->next; + dp_tso_num_seg_free(soc, + msdu_info->tx_queue.desc_pool_id, + free_num_seg); + free_num_seg = next_num_seg; + } +} + +/** + * dp_tx_unmap_tso_seg_list() - Loop through the tso segments + * do dma unmap for each segment + * + * @soc: soc handle + * @free_seg: list of tso segments + * @num_seg_desc: tso number segment descriptor + * + * Return - void + */ +static void dp_tx_unmap_tso_seg_list( + struct dp_soc *soc, + struct qdf_tso_seg_elem_t *free_seg, + struct qdf_tso_num_seg_elem_t *num_seg_desc) +{ + struct qdf_tso_seg_elem_t *next_seg; + + if (qdf_unlikely(!num_seg_desc)) { + DP_TRACE(ERROR, "TSO number seg desc is NULL!"); + return; + } + + while (free_seg) { + next_seg = free_seg->next; + dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc); + free_seg = next_seg; + } +} + +#ifdef FEATURE_TSO_STATS +/** + * dp_tso_get_stats_idx: Retrieve the tso packet id + * @pdev - pdev handle + * + * Return: id + */ +static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev) +{ + uint32_t stats_idx; + + stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx)) + % CDP_MAX_TSO_PACKETS); + return stats_idx; +} +#else +static int dp_tso_get_stats_idx(struct dp_pdev *pdev) +{ + return 0; +} +#endif /* FEATURE_TSO_STATS */ + +/** + * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any, + * free the tso segments descriptor and + * tso num segments descriptor + * + * @soc: soc handle + * @msdu_info: msdu descriptor + * @tso_seg_unmap: flag to show if dma unmap is necessary + * + * Return - void + */ +static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc, + struct dp_tx_msdu_info_s *msdu_info, + bool tso_seg_unmap) +{ + struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info; + struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list; + struct qdf_tso_num_seg_elem_t *tso_num_desc = + tso_info->tso_num_seg_list; + + /* do dma unmap for each segment */ + if (tso_seg_unmap) + dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc); + + /* free all tso number segment descriptor though looks only have 1 */ + dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info); + + /* free all tso segment descriptor */ + dp_tx_free_tso_seg_list(soc, free_seg, msdu_info); +} + +/** + * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info + * @vdev: virtual device handle + * @msdu: network buffer + * @msdu_info: meta data associated with the msdu + * + * Return: QDF_STATUS_SUCCESS success + */ +static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, + qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) +{ + struct qdf_tso_seg_elem_t *tso_seg; + int num_seg = qdf_nbuf_get_tso_num_seg(msdu); + struct dp_soc *soc = vdev->pdev->soc; + struct dp_pdev *pdev = vdev->pdev; + struct qdf_tso_info_t *tso_info; + struct qdf_tso_num_seg_elem_t *tso_num_seg; + tso_info = &msdu_info->u.tso_info; + tso_info->curr_seg = NULL; + tso_info->tso_seg_list = NULL; + tso_info->num_segs = num_seg; + msdu_info->frm_type = dp_tx_frm_tso; + tso_info->tso_num_seg_list = NULL; + + TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); + + while (num_seg) { + tso_seg = dp_tx_tso_desc_alloc( + soc, msdu_info->tx_queue.desc_pool_id); + if (tso_seg) { + tso_seg->next = tso_info->tso_seg_list; + tso_info->tso_seg_list = tso_seg; + num_seg--; + } else { + dp_err_rl("Failed to alloc tso seg desc"); + DP_STATS_INC_PKT(vdev->pdev, + tso_stats.tso_no_mem_dropped, 1, + qdf_nbuf_len(msdu)); + dp_tx_free_remaining_tso_desc(soc, msdu_info, false); + + return QDF_STATUS_E_NOMEM; + } + } + + TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); + + tso_num_seg = dp_tso_num_seg_alloc(soc, + msdu_info->tx_queue.desc_pool_id); + + if (tso_num_seg) { + tso_num_seg->next = tso_info->tso_num_seg_list; + tso_info->tso_num_seg_list = tso_num_seg; + } else { + DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc", + __func__); + dp_tx_free_remaining_tso_desc(soc, msdu_info, false); + + return QDF_STATUS_E_NOMEM; + } + + msdu_info->num_seg = + qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); + + TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, + msdu_info->num_seg); + + if (!(msdu_info->num_seg)) { + /* + * Free allocated TSO seg desc and number seg desc, + * do unmap for segments if dma map has done. + */ + DP_TRACE(ERROR, "%s: Failed to get tso info", __func__); + dp_tx_free_remaining_tso_desc(soc, msdu_info, true); + + return QDF_STATUS_E_INVAL; + } + + tso_info->curr_seg = tso_info->tso_seg_list; + + tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev); + dp_tso_packet_update(pdev, tso_info->msdu_stats_idx, + msdu, msdu_info->num_seg); + dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list, + tso_info->msdu_stats_idx); + dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg); + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, + qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) +{ + return QDF_STATUS_E_NOMEM; +} +#endif + +QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check, + (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >= + sizeof(struct htt_tx_msdu_desc_ext2_t))); + +/** + * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor + * @vdev: DP Vdev handle + * @msdu_info: MSDU info to be setup in MSDU extension descriptor + * @desc_pool_id: Descriptor Pool ID + * + * Return: + */ +static +struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, + struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) +{ + uint8_t i; + uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; + struct dp_tx_seg_info_s *seg_info; + struct dp_tx_ext_desc_elem_s *msdu_ext_desc; + struct dp_soc *soc = vdev->pdev->soc; + + /* Allocate an extension descriptor */ + msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); + qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); + + if (!msdu_ext_desc) { + DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); + return NULL; + } + + if (msdu_info->exception_fw && + qdf_unlikely(vdev->mesh_vdev)) { + qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], + &msdu_info->meta_data[0], + sizeof(struct htt_tx_msdu_desc_ext2_t)); + qdf_atomic_inc(&vdev->pdev->num_tx_exception); + msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID; + } + + switch (msdu_info->frm_type) { + case dp_tx_frm_sg: + case dp_tx_frm_me: + case dp_tx_frm_raw: + seg_info = msdu_info->u.sg_info.curr_seg; + /* Update the buffer pointers in MSDU Extension Descriptor */ + for (i = 0; i < seg_info->frag_cnt; i++) { + hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, + seg_info->frags[i].paddr_lo, + seg_info->frags[i].paddr_hi, + seg_info->frags[i].len); + } + + break; + + case dp_tx_frm_tso: + dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, + &cached_ext_desc[0]); + break; + + + default: + break; + } + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); + + hal_tx_ext_desc_sync(&cached_ext_desc[0], + msdu_ext_desc->vaddr); + + return msdu_ext_desc; +} + +/** + * dp_tx_trace_pkt() - Trace TX packet at DP layer + * + * @skb: skb to be traced + * @msdu_id: msdu_id of the packet + * @vdev_id: vdev_id of the packet + * + * Return: None + */ +static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, + uint8_t vdev_id) +{ + QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + DPTRACE(qdf_dp_trace_ptr(skb, + QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(skb), + sizeof(qdf_nbuf_data(skb)), + msdu_id, vdev_id)); + + qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); + + DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, + QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, + msdu_id, QDF_TX)); +} + +/** + * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor + * @vdev: DP vdev handle + * @nbuf: skb + * @desc_pool_id: Descriptor pool ID + * @meta_data: Metadata to the fw + * @tx_exc_metadata: Handle that holds exception path metadata + * Allocate and prepare Tx descriptor with msdu information. + * + * Return: Pointer to Tx Descriptor on success, + * NULL on failure + */ +static +struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, uint8_t desc_pool_id, + struct dp_tx_msdu_info_s *msdu_info, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + uint8_t align_pad; + uint8_t is_exception = 0; + uint8_t htt_hdr_size; + qdf_ether_header_t *eh; + struct dp_tx_desc_s *tx_desc; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + if (dp_tx_limit_check(vdev)) + return NULL; + + /* Allocate software Tx descriptor */ + tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); + if (qdf_unlikely(!tx_desc)) { + DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); + return NULL; + } + + dp_tx_outstanding_inc(pdev); + + /* Initialize the SW tx descriptor */ + tx_desc->nbuf = nbuf; + tx_desc->frm_type = dp_tx_frm_std; + tx_desc->tx_encap_type = ((tx_exc_metadata && + (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ? + tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); + tx_desc->vdev = vdev; + tx_desc->pdev = pdev; + tx_desc->msdu_ext_desc = NULL; + tx_desc->pkt_offset = 0; + + dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); + + if (qdf_unlikely(vdev->multipass_en)) { + if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info)) + goto failure; + } + + /* + * For special modes (vdev_type == ocb or mesh), data frames should be + * transmitted using varying transmit parameters (tx spec) which include + * transmit rate, power, priority, channel, channel bandwidth , nss etc. + * These are filled in HTT MSDU descriptor and sent in frame pre-header. + * These frames are sent as exception packets to firmware. + * + * HW requirement is that metadata should always point to a + * 8-byte aligned address. So we add alignment pad to start of buffer. + * HTT Metadata should be ensured to be multiple of 8-bytes, + * to get 8-byte aligned start address along with align_pad added + * + * |-----------------------------| + * | | + * |-----------------------------| <-----Buffer Pointer Address given + * | | ^ in HW descriptor (aligned) + * | HTT Metadata | | + * | | | + * | | | Packet Offset given in descriptor + * | | | + * |-----------------------------| | + * | Alignment Pad | v + * |-----------------------------| <----- Actual buffer start address + * | SKB Data | (Unaligned) + * | | + * | | + * | | + * | | + * | | + * |-----------------------------| + */ + if (qdf_unlikely((msdu_info->exception_fw)) || + (vdev->opmode == wlan_op_mode_ocb) || + (tx_exc_metadata && + tx_exc_metadata->is_tx_sniffer)) { + align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; + + if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) { + DP_STATS_INC(vdev, + tx_i.dropped.headroom_insufficient, 1); + goto failure; + } + + if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_push_head failed"); + goto failure; + } + + htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, + msdu_info); + if (htt_hdr_size == 0) + goto failure; + tx_desc->pkt_offset = align_pad + htt_hdr_size; + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + is_exception = 1; + } + + if (qdf_unlikely(QDF_STATUS_SUCCESS != + qdf_nbuf_map(soc->osdev, nbuf, + QDF_DMA_TO_DEVICE))) { + /* Handle failure */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_map failed"); + DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); + goto failure; + } + + if (qdf_unlikely(vdev->nawds_enabled)) { + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + is_exception = 1; + } + } + +#if !TQM_BYPASS_WAR + if (is_exception || tx_exc_metadata) +#endif + { + /* Temporary WAR due to TQM VP issues */ + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + qdf_atomic_inc(&pdev->num_tx_exception); + } + + return tx_desc; + +failure: + dp_tx_desc_release(tx_desc, desc_pool_id); + return NULL; +} + +/** + * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame + * @vdev: DP vdev handle + * @nbuf: skb + * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor + * @desc_pool_id : Descriptor Pool ID + * + * Allocate and prepare Tx descriptor with msdu and fragment descritor + * information. For frames wth fragments, allocate and prepare + * an MSDU extension descriptor + * + * Return: Pointer to Tx Descriptor on success, + * NULL on failure + */ +static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc; + struct dp_tx_ext_desc_elem_s *msdu_ext_desc; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + if (dp_tx_limit_check(vdev)) + return NULL; + + /* Allocate software Tx descriptor */ + tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); + if (!tx_desc) { + DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1); + return NULL; + } + + dp_tx_outstanding_inc(pdev); + + /* Initialize the SW tx descriptor */ + tx_desc->nbuf = nbuf; + tx_desc->frm_type = msdu_info->frm_type; + tx_desc->tx_encap_type = vdev->tx_encap_type; + tx_desc->vdev = vdev; + tx_desc->pdev = pdev; + tx_desc->pkt_offset = 0; + tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; + tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; + + dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); + + /* Handle scattered frames - TSO/SG/ME */ + /* Allocate and prepare an extension descriptor for scattered frames */ + msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); + if (!msdu_ext_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Extension Descriptor Alloc Fail", + __func__); + goto failure; + } + +#if TQM_BYPASS_WAR + /* Temporary WAR due to TQM VP issues */ + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + qdf_atomic_inc(&pdev->num_tx_exception); +#endif + if (qdf_unlikely(msdu_info->exception_fw)) + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + + tx_desc->msdu_ext_desc = msdu_ext_desc; + tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; + + return tx_desc; +failure: + dp_tx_desc_release(tx_desc, desc_pool_id); + return NULL; +} + +/** + * dp_tx_prepare_raw() - Prepare RAW packet TX + * @vdev: DP vdev handle + * @nbuf: buffer pointer + * @seg_info: Pointer to Segment info Descriptor to be prepared + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension + * descriptor + * + * Return: + */ +static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) +{ + qdf_nbuf_t curr_nbuf = NULL; + uint16_t total_len = 0; + qdf_dma_addr_t paddr; + int32_t i; + int32_t mapped_buf_num = 0; + + struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; + qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; + + DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); + + /* Continue only if frames are of DATA type */ + if (!DP_FRAME_IS_DATA(qos_wh)) { + DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Pkt. recd is of not data type"); + goto error; + } + /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ + if (vdev->raw_mode_war && + (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) && + (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU)) + qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; + + for (curr_nbuf = nbuf, i = 0; curr_nbuf; + curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { + + if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf, + QDF_DMA_TO_DEVICE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s dma map error ", __func__); + DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); + mapped_buf_num = i; + goto error; + } + + paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); + seg_info->frags[i].paddr_lo = paddr; + seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); + seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); + seg_info->frags[i].vaddr = (void *) curr_nbuf; + total_len += qdf_nbuf_len(curr_nbuf); + } + + seg_info->frag_cnt = i; + seg_info->total_len = total_len; + seg_info->next = NULL; + + sg_info->curr_seg = seg_info; + + msdu_info->frm_type = dp_tx_frm_raw; + msdu_info->num_seg = 1; + + return nbuf; + +error: + i = 0; + while (nbuf) { + curr_nbuf = nbuf; + if (i < mapped_buf_num) { + qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE); + i++; + } + nbuf = qdf_nbuf_next(nbuf); + qdf_nbuf_free(curr_nbuf); + } + return NULL; + +} + +/** + * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame. + * @soc: DP soc handle + * @nbuf: Buffer pointer + * + * unmap the chain of nbufs that belong to this RAW frame. + * + * Return: None + */ +static void dp_tx_raw_prepare_unset(struct dp_soc *soc, + qdf_nbuf_t nbuf) +{ + qdf_nbuf_t cur_nbuf = nbuf; + + do { + qdf_nbuf_unmap(soc->osdev, cur_nbuf, QDF_DMA_TO_DEVICE); + cur_nbuf = qdf_nbuf_next(cur_nbuf); + } while (cur_nbuf); +} + +#ifdef VDEV_PEER_PROTOCOL_COUNT +#define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \ +{ \ + qdf_nbuf_t nbuf_local; \ + struct dp_vdev *vdev_local = vdev_hdl; \ + do { \ + if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ + break; \ + nbuf_local = nbuf; \ + if (qdf_unlikely(((vdev_local)->tx_encap_type) == \ + htt_cmn_pkt_type_raw)) \ + break; \ + else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \ + break; \ + else if (qdf_nbuf_is_tso((nbuf_local))) \ + break; \ + dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ + (nbuf_local), \ + NULL, 1, 0); \ + } while (0); \ +} +#else +#define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb) +#endif + +#ifdef FEATURE_RUNTIME_PM +/** + * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end + * @soc: Datapath soc handle + * @hal_ring_hdl: HAL ring handle + * + * Wrapper for HAL ring access end for data transmission for + * FEATURE_RUNTIME_PM + * + * Returns: none + */ +static inline void +dp_tx_ring_access_end_wrapper(struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl) +{ + int ret; + + ret = hif_pm_runtime_get(soc->hif_handle, + RTPM_ID_DW_TX_HW_ENQUEUE); + switch (ret) { + case 0: + hal_srng_access_end(soc->hal_soc, hal_ring_hdl); + hif_pm_runtime_put(soc->hif_handle, + RTPM_ID_DW_TX_HW_ENQUEUE); + break; + /* + * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS, + * take the dp runtime refcount using dp_runtime_get, + * check link state,if up, write TX ring HP, else just set flush event. + * In dp_runtime_resume, wait until dp runtime refcount becomes + * zero or time out, then flush pending tx. + */ + case -EBUSY: + case -EINPROGRESS: + dp_runtime_get(soc); + if (hif_pm_get_link_state(soc->hif_handle) == + HIF_PM_LINK_STATE_UP) { + hal_srng_access_end(soc->hal_soc, hal_ring_hdl); + } else { + hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + dp_runtime_put(soc); + break; + default: + dp_runtime_get(soc); + hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + dp_runtime_put(soc); + } +} +#else +static inline void +dp_tx_ring_access_end_wrapper(struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl) +{ + hal_srng_access_end(soc->hal_soc, hal_ring_hdl); +} +#endif + +/** + * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit + * @soc: DP Soc Handle + * @vdev: DP vdev handle + * @tx_desc: Tx Descriptor Handle + * @tid: TID from HLOS for overriding default DSCP-TID mapping + * @fw_metadata: Metadata to send to Target Firmware along with frame + * @ring_id: Ring ID of H/W ring to which we enqueue the packet + * @tx_exc_metadata: Handle that holds exception path meta data + * + * Gets the next free TCL HW DMA descriptor and sets up required parameters + * from software Tx descriptor + * + * Return: + */ +static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, + struct dp_tx_desc_s *tx_desc, uint8_t tid, + uint16_t fw_metadata, uint8_t ring_id, + struct cdp_tx_exception_metadata + *tx_exc_metadata) +{ + uint8_t type; + uint16_t length; + void *hal_tx_desc, *hal_tx_desc_cached; + qdf_dma_addr_t dma_addr; + uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES]; + + enum cdp_sec_type sec_type = ((tx_exc_metadata && + tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ? + tx_exc_metadata->sec_type : vdev->sec_type); + + /* Return Buffer Manager ID */ + uint8_t bm_id = ring_id; + hal_ring_handle_t hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng; + + hal_tx_desc_cached = (void *) cached_desc; + qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); + + if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { + length = HAL_TX_EXT_DESC_WITH_META_DATA; + type = HAL_TX_BUF_TYPE_EXT_DESC; + dma_addr = tx_desc->msdu_ext_desc->paddr; + + if (tx_desc->msdu_ext_desc->flags & + DP_TX_EXT_DESC_FLAG_METADATA_VALID) + length = HAL_TX_EXT_DESC_WITH_META_DATA; + else + length = HAL_TX_EXTENSION_DESC_LEN_BYTES; + } else { + length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset; + type = HAL_TX_BUF_TYPE_BUFFER; + dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); + } + + qdf_assert_always(dma_addr); + + hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); + hal_tx_desc_set_buf_addr(hal_tx_desc_cached, + dma_addr, bm_id, tx_desc->id, + type, soc->hal_soc); + + if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) + return QDF_STATUS_E_RESOURCES; + + hal_tx_desc_set_buf_length(hal_tx_desc_cached, length); + hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); + hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); + hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached, + vdev->pdev->lmac_id); + hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached, + vdev->search_type); + hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached, + vdev->bss_ast_idx); + hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, + vdev->dscp_tid_map_id); + hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, + sec_type_map[sec_type]); + hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, + (vdev->bss_ast_hash & 0xF)); + + dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u", + length, type, (uint64_t)dma_addr, + tx_desc->pkt_offset, tx_desc->id); + + if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) + hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); + + hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, + vdev->hal_desc_addr_search_flags); + + /* verify checksum offload configuration*/ + if (vdev->csum_enabled && + ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) + || qdf_nbuf_is_tso(tx_desc->nbuf))) { + hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); + hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); + } + + if (tid != HTT_TX_EXT_TID_INVALID) + hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); + + if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) + hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1); + + + tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get()); + /* Sync cached descriptor with HW */ + hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); + + if (!hal_tx_desc) { + dp_verbose_debug("TCL ring full ring_id:%d", ring_id); + DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); + DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); + return QDF_STATUS_E_RESOURCES; + } + + tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; + dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf); + + hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); + DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length); + + return QDF_STATUS_SUCCESS; +} + + +/** + * dp_cce_classify() - Classify the frame based on CCE rules + * @vdev: DP vdev handle + * @nbuf: skb + * + * Classify frames based on CCE rules + * Return: bool( true if classified, + * else false) + */ +static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) +{ + qdf_ether_header_t *eh = NULL; + uint16_t ether_type; + qdf_llc_t *llcHdr; + qdf_nbuf_t nbuf_clone = NULL; + qdf_dot3_qosframe_t *qos_wh = NULL; + + /* for mesh packets don't do any classification */ + if (qdf_unlikely(vdev->mesh_vdev)) + return false; + + if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + ether_type = eh->ether_type; + llcHdr = (qdf_llc_t *)(nbuf->data + + sizeof(qdf_ether_header_t)); + } else { + qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; + /* For encrypted packets don't do any classification */ + if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP)) + return false; + + if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { + if (qdf_unlikely( + qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && + qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { + + ether_type = *(uint16_t *)(nbuf->data + + QDF_IEEE80211_4ADDR_HDR_LEN + + sizeof(qdf_llc_t) + - sizeof(ether_type)); + llcHdr = (qdf_llc_t *)(nbuf->data + + QDF_IEEE80211_4ADDR_HDR_LEN); + } else { + ether_type = *(uint16_t *)(nbuf->data + + QDF_IEEE80211_3ADDR_HDR_LEN + + sizeof(qdf_llc_t) + - sizeof(ether_type)); + llcHdr = (qdf_llc_t *)(nbuf->data + + QDF_IEEE80211_3ADDR_HDR_LEN); + } + + if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) + && (ether_type == + qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { + + DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); + return true; + } + } + + return false; + } + + if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { + ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE + + sizeof(*llcHdr)); + nbuf_clone = qdf_nbuf_clone(nbuf); + if (qdf_unlikely(nbuf_clone)) { + qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); + + if (ether_type == htons(ETHERTYPE_VLAN)) { + qdf_nbuf_pull_head(nbuf_clone, + sizeof(qdf_net_vlanhdr_t)); + } + } + } else { + if (ether_type == htons(ETHERTYPE_VLAN)) { + nbuf_clone = qdf_nbuf_clone(nbuf); + if (qdf_unlikely(nbuf_clone)) { + qdf_nbuf_pull_head(nbuf_clone, + sizeof(qdf_net_vlanhdr_t)); + } + } + } + + if (qdf_unlikely(nbuf_clone)) + nbuf = nbuf_clone; + + + if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) + || qdf_nbuf_is_ipv4_arp_pkt(nbuf) + || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) + || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) + || (qdf_nbuf_is_ipv4_pkt(nbuf) + && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) + || (qdf_nbuf_is_ipv6_pkt(nbuf) && + qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { + if (qdf_unlikely(nbuf_clone)) + qdf_nbuf_free(nbuf_clone); + return true; + } + + if (qdf_unlikely(nbuf_clone)) + qdf_nbuf_free(nbuf_clone); + + return false; +} + +/** + * dp_tx_get_tid() - Obtain TID to be used for this frame + * @vdev: DP vdev handle + * @nbuf: skb + * + * Extract the DSCP or PCP information from frame and map into TID value. + * + * Return: void + */ +static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + uint8_t tos = 0, dscp_tid_override = 0; + uint8_t *hdr_ptr, *L3datap; + uint8_t is_mcast = 0; + qdf_ether_header_t *eh = NULL; + qdf_ethervlan_header_t *evh = NULL; + uint16_t ether_type; + qdf_llc_t *llcHdr; + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + + DP_TX_TID_OVERRIDE(msdu_info, nbuf); + if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + eh = (qdf_ether_header_t *)nbuf->data; + hdr_ptr = (uint8_t *)(eh->ether_dhost); + L3datap = hdr_ptr + sizeof(qdf_ether_header_t); + } else { + qdf_dot3_qosframe_t *qos_wh = + (qdf_dot3_qosframe_t *) nbuf->data; + msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? + qos_wh->i_qos[0] & DP_QOS_TID : 0; + return; + } + + is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); + ether_type = eh->ether_type; + + llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t)); + /* + * Check if packet is dot3 or eth2 type. + */ + if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { + ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE + + sizeof(*llcHdr)); + + if (ether_type == htons(ETHERTYPE_VLAN)) { + L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + + sizeof(*llcHdr); + ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE + + sizeof(*llcHdr) + + sizeof(qdf_net_vlanhdr_t)); + } else { + L3datap = hdr_ptr + sizeof(qdf_ether_header_t) + + sizeof(*llcHdr); + } + } else { + if (ether_type == htons(ETHERTYPE_VLAN)) { + evh = (qdf_ethervlan_header_t *) eh; + ether_type = evh->ether_type; + L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); + } + } + + /* + * Find priority from IP TOS DSCP field + */ + if (qdf_nbuf_is_ipv4_pkt(nbuf)) { + qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; + if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { + /* Only for unicast frames */ + if (!is_mcast) { + /* send it on VO queue */ + msdu_info->tid = DP_VO_TID; + } + } else { + /* + * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 + * from TOS byte. + */ + tos = ip->ip_tos; + dscp_tid_override = 1; + + } + } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { + /* TODO + * use flowlabel + *igmpmld cases to be handled in phase 2 + */ + unsigned long ver_pri_flowlabel; + unsigned long pri; + ver_pri_flowlabel = *(unsigned long *) L3datap; + pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> + DP_IPV6_PRIORITY_SHIFT; + tos = pri; + dscp_tid_override = 1; + } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) + msdu_info->tid = DP_VO_TID; + else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { + /* Only for unicast frames */ + if (!is_mcast) { + /* send ucast arp on VO queue */ + msdu_info->tid = DP_VO_TID; + } + } + + /* + * Assign all MCAST packets to BE + */ + if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + if (is_mcast) { + tos = 0; + dscp_tid_override = 1; + } + } + + if (dscp_tid_override == 1) { + tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; + msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; + } + + if (msdu_info->tid >= CDP_MAX_DATA_TIDS) + msdu_info->tid = CDP_MAX_DATA_TIDS - 1; + + return; +} + +/** + * dp_tx_classify_tid() - Obtain TID to be used for this frame + * @vdev: DP vdev handle + * @nbuf: skb + * + * Software based TID classification is required when more than 2 DSCP-TID + * mapping tables are needed. + * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2. + * + * Return: void + */ +static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + + DP_TX_TID_OVERRIDE(msdu_info, nbuf); + + if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map) + return; + + /* for mesh packets don't do any classification */ + if (qdf_unlikely(vdev->mesh_vdev)) + return; + + dp_tx_get_tid(vdev, nbuf, msdu_info); +} + +#ifdef FEATURE_WLAN_TDLS +/** + * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame + * @tx_desc: TX descriptor + * + * Return: None + */ +static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) +{ + if (tx_desc->vdev) { + if (tx_desc->vdev->is_tdls_frame) { + tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; + tx_desc->vdev->is_tdls_frame = false; + } + } +} + +/** + * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer + * @soc: DP soc handdle + * @tx_desc: TX descriptor + * @vdev: datapath vdev handle + * + * Return: None + */ +static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc, + struct dp_vdev *vdev) +{ + struct hal_tx_completion_status ts = {0}; + qdf_nbuf_t nbuf = tx_desc->nbuf; + + if (qdf_unlikely(!vdev)) { + dp_err_rl("vdev is null!"); + goto error; + } + + hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc); + if (vdev->tx_non_std_data_callback.func) { + qdf_nbuf_set_next(nbuf, NULL); + vdev->tx_non_std_data_callback.func( + vdev->tx_non_std_data_callback.ctxt, + nbuf, ts.status); + return; + } else { + dp_err_rl("callback func is null"); + } + +error: + qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); + qdf_nbuf_free(nbuf); +} +#else +static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) +{ +} + +static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc, + struct dp_vdev *vdev) +{ +} +#endif + +/** + * dp_tx_frame_is_drop() - checks if the packet is loopback + * @vdev: DP vdev handle + * @nbuf: skb + * + * Return: 1 if frame needs to be dropped else 0 + */ +int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac) +{ + struct dp_pdev *pdev = NULL; + struct dp_ast_entry *src_ast_entry = NULL; + struct dp_ast_entry *dst_ast_entry = NULL; + struct dp_soc *soc = NULL; + + qdf_assert(vdev); + pdev = vdev->pdev; + qdf_assert(pdev); + soc = pdev->soc; + + dst_ast_entry = dp_peer_ast_hash_find_by_pdevid + (soc, dstmac, vdev->pdev->pdev_id); + + src_ast_entry = dp_peer_ast_hash_find_by_pdevid + (soc, srcmac, vdev->pdev->pdev_id); + if (dst_ast_entry && src_ast_entry) { + if (dst_ast_entry->peer->peer_ids[0] == + src_ast_entry->peer->peer_ids[0]) + return 1; + } + + return 0; +} + +/** + * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL + * @vdev: DP vdev handle + * @nbuf: skb + * @tid: TID from HLOS for overriding default DSCP-TID mapping + * @meta_data: Metadata to the fw + * @tx_q: Tx queue to be used for this Tx frame + * @peer_id: peer_id of the peer in case of NAWDS frames + * @tx_exc_metadata: Handle that holds exception path metadata + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t +dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_s *tx_desc; + QDF_STATUS status; + struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); + hal_ring_handle_t hal_ring_hdl = + soc->tcl_data_ring[tx_q->ring_id].hal_srng; + uint16_t htt_tcl_metadata = 0; + uint8_t tid = msdu_info->tid; + struct cdp_tid_tx_stats *tid_stats = NULL; + + /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ + tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, + msdu_info, tx_exc_metadata); + if (!tx_desc) { + dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d", + vdev, tx_q->desc_pool_id); + dp_tx_get_tid(vdev, nbuf, msdu_info); + tid_stats = &pdev->stats.tid_stats. + tid_tx_stats[tx_q->ring_id][msdu_info->tid]; + tid_stats->swdrop_cnt[TX_DESC_ERR]++; + return nbuf; + } + + if (qdf_unlikely(soc->cce_disable)) { + if (dp_cce_classify(vdev, nbuf) == true) { + DP_STATS_INC(vdev, tx_i.cce_classified, 1); + tid = DP_VO_TID; + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + } + } + + dp_tx_update_tdls_flags(tx_desc); + + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL RING Access Failed -- %pK", + __func__, __LINE__, hal_ring_hdl); + dp_tx_get_tid(vdev, nbuf, msdu_info); + tid_stats = &pdev->stats.tid_stats. + tid_tx_stats[tx_q->ring_id][tid]; + tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++; + DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); + dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); + qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE); + goto fail_return; + } + + if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { + htt_tcl_metadata = vdev->htt_tcl_metadata; + HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); + } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { + HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, + HTT_TCL_METADATA_TYPE_PEER_BASED); + HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, + peer_id); + } else + htt_tcl_metadata = vdev->htt_tcl_metadata; + + + if (msdu_info->exception_fw) { + HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); + } + + /* Enqueue the Tx MSDU descriptor to HW for transmit */ + status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, + htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", + __func__, tx_desc, tx_q->ring_id); + dp_tx_get_tid(vdev, nbuf, msdu_info); + tid_stats = &pdev->stats.tid_stats. + tid_tx_stats[tx_q->ring_id][tid]; + tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; + dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); + qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE); + goto fail_return; + } + + nbuf = NULL; + +fail_return: + dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl); + + return nbuf; +} + +/** + * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor + * @soc: Soc handle + * @desc: software Tx descriptor to be processed + * + * Return: none + */ +static inline void dp_tx_comp_free_buf(struct dp_soc *soc, + struct dp_tx_desc_s *desc) +{ + struct dp_vdev *vdev = desc->vdev; + qdf_nbuf_t nbuf = desc->nbuf; + + /* nbuf already freed in vdev detach path */ + if (!nbuf) + return; + + /* If it is TDLS mgmt, don't unmap or free the frame */ + if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) + return dp_non_std_tx_comp_free_buff(soc, desc, vdev); + + /* 0 : MSDU buffer, 1 : MLE */ + if (desc->msdu_ext_desc) { + /* TSO free */ + if (hal_tx_ext_desc_get_tso_enable( + desc->msdu_ext_desc->vaddr)) { + /* unmap eash TSO seg before free the nbuf */ + dp_tx_tso_unmap_segment(soc, desc->tso_desc, + desc->tso_num_desc); + qdf_nbuf_free(nbuf); + return; + } + } + + qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); + + if (qdf_unlikely(!vdev)) { + qdf_nbuf_free(nbuf); + return; + } + + if (qdf_likely(!vdev->mesh_vdev)) + qdf_nbuf_free(nbuf); + else { + if (desc->flags & DP_TX_DESC_FLAG_TO_FW) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); + } else + vdev->osif_tx_free_ext((nbuf)); + } +} +/** + * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs + * @vdev: DP vdev handle + * @nbuf: skb + * @msdu_info: MSDU info to be setup in MSDU extension descriptor + * + * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL + * + * Return: NULL on success, + * nbuf when it fails to send + */ +#if QDF_LOCK_STATS +noinline +#else +#endif +qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + uint32_t i; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_s *tx_desc; + bool is_cce_classified = false; + QDF_STATUS status; + uint16_t htt_tcl_metadata = 0; + struct dp_tx_queue *tx_q = &msdu_info->tx_queue; + hal_ring_handle_t hal_ring_hdl = + soc->tcl_data_ring[tx_q->ring_id].hal_srng; + struct cdp_tid_tx_stats *tid_stats = NULL; + + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL RING Access Failed -- %pK", + __func__, __LINE__, hal_ring_hdl); + dp_tx_get_tid(vdev, nbuf, msdu_info); + tid_stats = &pdev->stats.tid_stats. + tid_tx_stats[tx_q->ring_id][msdu_info->tid]; + tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++; + DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); + return nbuf; + } + + if (qdf_unlikely(soc->cce_disable)) { + is_cce_classified = dp_cce_classify(vdev, nbuf); + if (is_cce_classified) { + DP_STATS_INC(vdev, tx_i.cce_classified, 1); + msdu_info->tid = DP_VO_TID; + } + } + + if (msdu_info->frm_type == dp_tx_frm_me) + nbuf = msdu_info->u.sg_info.curr_seg->nbuf; + + i = 0; + /* Print statement to track i and num_seg */ + /* + * For each segment (maps to 1 MSDU) , prepare software and hardware + * descriptors using information in msdu_info + */ + while (i < msdu_info->num_seg) { + /* + * Setup Tx descriptor for an MSDU, and MSDU extension + * descriptor + */ + tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, + tx_q->desc_pool_id); + + if (!tx_desc) { + if (msdu_info->frm_type == dp_tx_frm_me) { + dp_tx_me_free_buf(pdev, + (void *)(msdu_info->u.sg_info + .curr_seg->frags[0].vaddr)); + i++; + continue; + } + + if (msdu_info->frm_type == dp_tx_frm_tso) { + dp_tx_tso_unmap_segment(soc, + msdu_info->u.tso_info. + curr_seg, + msdu_info->u.tso_info. + tso_num_seg_list); + + if (msdu_info->u.tso_info.curr_seg->next) { + msdu_info->u.tso_info.curr_seg = + msdu_info->u.tso_info.curr_seg->next; + i++; + continue; + } + } + + goto done; + } + + if (msdu_info->frm_type == dp_tx_frm_me) { + tx_desc->me_buffer = + msdu_info->u.sg_info.curr_seg->frags[0].vaddr; + tx_desc->flags |= DP_TX_DESC_FLAG_ME; + } + + if (is_cce_classified) + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + + htt_tcl_metadata = vdev->htt_tcl_metadata; + if (msdu_info->exception_fw) { + HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); + } + + /* + * For frames with multiple segments (TSO, ME), jump to next + * segment. + */ + if (msdu_info->frm_type == dp_tx_frm_tso) { + if (msdu_info->u.tso_info.curr_seg->next) { + msdu_info->u.tso_info.curr_seg = + msdu_info->u.tso_info.curr_seg->next; + + /* + * If this is a jumbo nbuf, then increment the + * number of nbuf users for each additional + * segment of the msdu. This will ensure that + * the skb is freed only after receiving tx + * completion for all segments of an nbuf + */ + qdf_nbuf_inc_users(nbuf); + + /* Check with MCL if this is needed */ + /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; + */ + } + } + + /* + * Enqueue the Tx MSDU descriptor to HW for transmit + */ + status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, + htt_tcl_metadata, tx_q->ring_id, NULL); + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx_hw_enqueue Fail tx_desc %pK queue %d", + __func__, tx_desc, tx_q->ring_id); + + dp_tx_get_tid(vdev, nbuf, msdu_info); + tid_stats = &pdev->stats.tid_stats. + tid_tx_stats[tx_q->ring_id][msdu_info->tid]; + tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++; + + dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); + if (msdu_info->frm_type == dp_tx_frm_me) { + i++; + continue; + } + + /* + * For TSO frames, the nbuf users increment done for + * the current segment has to be reverted, since the + * hw enqueue for this segment failed + */ + if (msdu_info->frm_type == dp_tx_frm_tso && + msdu_info->u.tso_info.curr_seg) { + /* + * unmap and free current, + * retransmit remaining segments + */ + dp_tx_comp_free_buf(soc, tx_desc); + i++; + continue; + } + + goto done; + } + + /* + * TODO + * if tso_info structure can be modified to have curr_seg + * as first element, following 2 blocks of code (for TSO and SG) + * can be combined into 1 + */ + + /* + * For Multicast-Unicast converted packets, + * each converted frame (for a client) is represented as + * 1 segment + */ + if ((msdu_info->frm_type == dp_tx_frm_sg) || + (msdu_info->frm_type == dp_tx_frm_me)) { + if (msdu_info->u.sg_info.curr_seg->next) { + msdu_info->u.sg_info.curr_seg = + msdu_info->u.sg_info.curr_seg->next; + nbuf = msdu_info->u.sg_info.curr_seg->nbuf; + } + } + i++; + } + + nbuf = NULL; + +done: + if (hif_pm_runtime_get(soc->hif_handle, + RTPM_ID_DW_TX_HW_ENQUEUE) == 0) { + hal_srng_access_end(soc->hal_soc, hal_ring_hdl); + hif_pm_runtime_put(soc->hif_handle, + RTPM_ID_DW_TX_HW_ENQUEUE); + } else { + hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + return nbuf; +} + +/** + * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info + * for SG frames + * @vdev: DP vdev handle + * @nbuf: skb + * @seg_info: Pointer to Segment info Descriptor to be prepared + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. + * + * Return: NULL on success, + * nbuf when it fails to send + */ +static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) +{ + uint32_t cur_frag, nr_frags; + qdf_dma_addr_t paddr; + struct dp_tx_sg_info_s *sg_info; + + sg_info = &msdu_info->u.sg_info; + nr_frags = qdf_nbuf_get_nr_frags(nbuf); + + if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf, + QDF_DMA_TO_DEVICE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "dma map error"); + DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); + + qdf_nbuf_free(nbuf); + return NULL; + } + + paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); + seg_info->frags[0].paddr_lo = paddr; + seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; + seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); + seg_info->frags[0].vaddr = (void *) nbuf; + + for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { + if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, + nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "frag dma map error"); + DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); + qdf_nbuf_free(nbuf); + return NULL; + } + + paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); + seg_info->frags[cur_frag + 1].paddr_lo = paddr; + seg_info->frags[cur_frag + 1].paddr_hi = + ((uint64_t) paddr) >> 32; + seg_info->frags[cur_frag + 1].len = + qdf_nbuf_get_frag_size(nbuf, cur_frag); + } + + seg_info->frag_cnt = (cur_frag + 1); + seg_info->total_len = qdf_nbuf_len(nbuf); + seg_info->next = NULL; + + sg_info->curr_seg = seg_info; + + msdu_info->frm_type = dp_tx_frm_sg; + msdu_info->num_seg = 1; + + return nbuf; +} + +/** + * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info + * @vdev: DP vdev handle + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. + * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions + * + * Return: NULL on failure, + * nbuf when extracted successfully + */ +static +void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev, + struct dp_tx_msdu_info_s *msdu_info, + uint16_t ppdu_cookie) +{ + struct htt_tx_msdu_desc_ext2_t *meta_data = + (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; + + qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); + + HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET + (msdu_info->meta_data[5], 1); + HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET + (msdu_info->meta_data[5], 1); + HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET + (msdu_info->meta_data[6], ppdu_cookie); + + msdu_info->exception_fw = 1; + msdu_info->is_tx_sniffer = 1; +} + +#ifdef MESH_MODE_SUPPORT + +/** + * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf + and prepare msdu_info for mesh frames. + * @vdev: DP vdev handle + * @nbuf: skb + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. + * + * Return: NULL on failure, + * nbuf when extracted successfully + */ +static +qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct meta_hdr_s *mhdr; + struct htt_tx_msdu_desc_ext2_t *meta_data = + (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; + + mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); + + if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { + msdu_info->exception_fw = 0; + goto remove_meta_hdr; + } + + msdu_info->exception_fw = 1; + + qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t)); + + meta_data->host_tx_desc_pool = 1; + meta_data->update_peer_cache = 1; + meta_data->learning_frame = 1; + + if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { + meta_data->power = mhdr->power; + + meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; + meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; + meta_data->pream_type = mhdr->rate_info[0].preamble_type; + meta_data->retry_limit = mhdr->rate_info[0].max_tries; + + meta_data->dyn_bw = 1; + + meta_data->valid_pwr = 1; + meta_data->valid_mcs_mask = 1; + meta_data->valid_nss_mask = 1; + meta_data->valid_preamble_type = 1; + meta_data->valid_retries = 1; + meta_data->valid_bw_info = 1; + } + + if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { + meta_data->encrypt_type = 0; + meta_data->valid_encrypt_type = 1; + meta_data->learning_frame = 0; + } + + meta_data->valid_key_flags = 1; + meta_data->key_flags = (mhdr->keyix & 0x3); + +remove_meta_hdr: + if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_pull_head failed"); + qdf_nbuf_free(nbuf); + return NULL; + } + + msdu_info->tid = qdf_nbuf_get_priority(nbuf); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "%s , Meta hdr %0x %0x %0x %0x %0x %0x" + " tid %d to_fw %d", + __func__, msdu_info->meta_data[0], + msdu_info->meta_data[1], + msdu_info->meta_data[2], + msdu_info->meta_data[3], + msdu_info->meta_data[4], + msdu_info->meta_data[5], + msdu_info->tid, msdu_info->exception_fw); + + return nbuf; +} +#else +static +qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + return nbuf; +} + +#endif + +/** + * dp_check_exc_metadata() - Checks if parameters are valid + * @tx_exc - holds all exception path parameters + * + * Returns true when all the parameters are valid else false + * + */ +static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) +{ + bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != + HTT_INVALID_TID); + bool invalid_encap_type = + (tx_exc->tx_encap_type > htt_cmn_pkt_num_types && + tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE); + bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types && + tx_exc->sec_type != CDP_INVALID_SEC_TYPE); + bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 && + tx_exc->ppdu_cookie == 0); + + if (invalid_tid || invalid_encap_type || invalid_sec_type || + invalid_cookie) { + return false; + } + + return true; +} + +/** + * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path + * @soc: DP soc handle + * @vdev_id: id of DP vdev handle + * @nbuf: skb + * @tx_exc_metadata: Handle that holds exception path meta data + * + * Entry point for Core Tx layer (DP_TX) invoked from + * hard_start_xmit in OSIF/HDD to transmit frames through fw + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t +dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + qdf_ether_header_t *eh = NULL; + struct dp_tx_msdu_info_s msdu_info; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (qdf_unlikely(!vdev)) + goto fail; + + qdf_mem_zero(&msdu_info, sizeof(msdu_info)); + + if (!tx_exc_metadata) + goto fail; + + msdu_info.tid = tx_exc_metadata->tid; + + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + dp_verbose_debug("skb "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(nbuf->data)); + + DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); + + if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid parameters in exception path"); + goto fail; + } + + /* Basic sanity checks for unsupported packets */ + + /* MESH mode */ + if (qdf_unlikely(vdev->mesh_vdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Mesh mode is not supported in exception path"); + goto fail; + } + + /* TSO or SG */ + if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || + qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "TSO and SG are not supported in exception path"); + + goto fail; + } + + /* RAW */ + if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Raw frame is not supported in exception path"); + goto fail; + } + + + /* Mcast enhancement*/ + if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { + if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && + !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW"); + } + } + + if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) { + DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1, + qdf_nbuf_len(nbuf)); + + dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info, + tx_exc_metadata->ppdu_cookie); + } + + /* + * Get HW Queue to use for this frame. + * TCL supports upto 4 DMA rings, out of which 3 rings are + * dedicated for data and 1 for command. + * "queue_id" maps to one hardware ring. + * With each ring, we also associate a unique Tx descriptor pool + * to minimize lock contention for these resources. + */ + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + /* Single linear frame */ + /* + * If nbuf is a simple linear frame, use send_single function to + * prepare direct-buffer type TCL descriptor and enqueue to TCL + * SRNG. There is no need to setup a MSDU extension descriptor. + */ + nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, + tx_exc_metadata->peer_id, tx_exc_metadata); + + return nbuf; + +fail: + dp_verbose_debug("pkt send failed"); + return nbuf; +} + +/** + * dp_tx_send_mesh() - Transmit mesh frame on a given VAP + * @soc: DP soc handle + * @vdev_id: DP vdev handle + * @nbuf: skb + * + * Entry point for Core Tx layer (DP_TX) invoked from + * hard_start_xmit in OSIF/HDD + * + * Return: NULL on success, + * nbuf when it fails to send + */ +#ifdef MESH_MODE_SUPPORT +qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t nbuf) +{ + struct meta_hdr_s *mhdr; + qdf_nbuf_t nbuf_mesh = NULL; + qdf_nbuf_t nbuf_clone = NULL; + struct dp_vdev *vdev; + uint8_t no_enc_frame = 0; + + nbuf_mesh = qdf_nbuf_unshare(nbuf); + if (!nbuf_mesh) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_unshare failed"); + return nbuf; + } + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "vdev is NULL for vdev_id %d", vdev_id); + return nbuf; + } + + nbuf = nbuf_mesh; + + mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); + + if ((vdev->sec_type != cdp_sec_type_none) && + (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) + no_enc_frame = 1; + + if (mhdr->flags & METAHDR_FLAG_NOQOS) + qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST); + + if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && + !no_enc_frame) { + nbuf_clone = qdf_nbuf_clone(nbuf); + if (!nbuf_clone) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_clone failed"); + return nbuf; + } + qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); + } + + if (nbuf_clone) { + if (!dp_tx_send(soc, vdev_id, nbuf_clone)) { + DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); + } else { + qdf_nbuf_free(nbuf_clone); + } + } + + if (no_enc_frame) + qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); + else + qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); + + nbuf = dp_tx_send(soc, vdev_id, nbuf); + if ((!nbuf) && no_enc_frame) { + DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); + } + + return nbuf; +} + +#else + +qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t nbuf) +{ + return dp_tx_send(soc, vdev_id, nbuf); +} + +#endif + +/** + * dp_tx_send() - Transmit a frame on a given VAP + * @soc: DP soc handle + * @vdev_id: id of DP vdev handle + * @nbuf: skb + * + * Entry point for Core Tx layer (DP_TX) invoked from + * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding + * cases + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf) +{ + qdf_ether_header_t *eh = NULL; + struct dp_tx_msdu_info_s msdu_info; + struct dp_tx_seg_info_s seg_info; + uint16_t peer_id = HTT_INVALID_PEER; + qdf_nbuf_t nbuf_mesh = NULL; + struct dp_vdev *vdev = + dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, + vdev_id); + + if (qdf_unlikely(!vdev)) + return nbuf; + + qdf_mem_zero(&msdu_info, sizeof(msdu_info)); + qdf_mem_zero(&seg_info, sizeof(seg_info)); + + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + + dp_verbose_debug("skb "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(nbuf->data)); + + /* + * Set Default Host TID value to invalid TID + * (TID override disabled) + */ + msdu_info.tid = HTT_TX_EXT_TID_INVALID; + DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); + + if (qdf_unlikely(vdev->mesh_vdev)) { + nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, + &msdu_info); + if (!nbuf_mesh) { + dp_verbose_debug("Extracting mesh metadata failed"); + return nbuf; + } + nbuf = nbuf_mesh; + } + + /* + * Get HW Queue to use for this frame. + * TCL supports upto 4 DMA rings, out of which 3 rings are + * dedicated for data and 1 for command. + * "queue_id" maps to one hardware ring. + * With each ring, we also associate a unique Tx descriptor pool + * to minimize lock contention for these resources. + */ + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + /* + * TCL H/W supports 2 DSCP-TID mapping tables. + * Table 1 - Default DSCP-TID mapping table + * Table 2 - 1 DSCP-TID override table + * + * If we need a different DSCP-TID mapping for this vap, + * call tid_classify to extract DSCP/ToS from frame and + * map to a TID and store in msdu_info. This is later used + * to fill in TCL Input descriptor (per-packet TID override). + */ + dp_tx_classify_tid(vdev, nbuf, &msdu_info); + + /* + * Classify the frame and call corresponding + * "prepare" function which extracts the segment (TSO) + * and fragmentation information (for TSO , SG, ME, or Raw) + * into MSDU_INFO structure which is later used to fill + * SW and HW descriptors. + */ + if (qdf_nbuf_is_tso(nbuf)) { + dp_verbose_debug("TSO frame %pK", vdev); + DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1, + qdf_nbuf_len(nbuf)); + + if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { + DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1, + qdf_nbuf_len(nbuf)); + return nbuf; + } + + goto send_multiple; + } + + /* SG */ + if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { + nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); + + if (!nbuf) + return NULL; + + dp_verbose_debug("non-TSO SG frame %pK", vdev); + + DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, + qdf_nbuf_len(nbuf)); + + goto send_multiple; + } + +#ifdef ATH_SUPPORT_IQUE + /* Mcast to Ucast Conversion*/ + if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) && + !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) { + dp_verbose_debug("Mcast frm for ME %pK", vdev); + + DP_STATS_INC_PKT(vdev, + tx_i.mcast_en.mcast_pkt, 1, + qdf_nbuf_len(nbuf)); + if (dp_tx_prepare_send_me(vdev, nbuf) == + QDF_STATUS_SUCCESS) { + return NULL; + } + } + } +#endif + + /* RAW */ + if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { + nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); + if (!nbuf) + return NULL; + + dp_verbose_debug("Raw frame %pK", vdev); + + goto send_multiple; + + } + + /* Single linear frame */ + /* + * If nbuf is a simple linear frame, use send_single function to + * prepare direct-buffer type TCL descriptor and enqueue to TCL + * SRNG. There is no need to setup a MSDU extension descriptor. + */ + nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); + + return nbuf; + +send_multiple: + nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); + + if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw)) + dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf); + + return nbuf; +} + +/** + * dp_tx_reinject_handler() - Tx Reinject Handler + * @tx_desc: software descriptor head pointer + * @status : Tx completion status from HTT descriptor + * + * This function reinjects frames back to Target. + * Todo - Host queue needs to be added + * + * Return: none + */ +static +void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) +{ + struct dp_vdev *vdev; + struct dp_peer *peer = NULL; + uint32_t peer_id = HTT_INVALID_PEER; + qdf_nbuf_t nbuf = tx_desc->nbuf; + qdf_nbuf_t nbuf_copy = NULL; + struct dp_tx_msdu_info_s msdu_info; + struct dp_peer *sa_peer = NULL; + struct dp_ast_entry *ast_entry = NULL; + struct dp_soc *soc = NULL; + qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); +#ifdef WDS_VENDOR_EXTENSION + int is_mcast = 0, is_ucast = 0; + int num_peers_3addr = 0; + qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf)); + struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); +#endif + + vdev = tx_desc->vdev; + soc = vdev->pdev->soc; + + qdf_assert(vdev); + + qdf_mem_zero(&msdu_info, sizeof(msdu_info)); + + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Tx reinject path", __func__); + + DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, + qdf_nbuf_len(tx_desc->nbuf)); + + qdf_spin_lock_bh(&(soc->ast_lock)); + + ast_entry = dp_peer_ast_hash_find_by_pdevid + (soc, + (uint8_t *)(eh->ether_shost), + vdev->pdev->pdev_id); + + if (ast_entry) + sa_peer = ast_entry->peer; + + qdf_spin_unlock_bh(&(soc->ast_lock)); + +#ifdef WDS_VENDOR_EXTENSION + if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; + } else { + is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; + } + is_ucast = !is_mcast; + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (peer->bss_peer) + continue; + + /* Detect wds peers that use 3-addr framing for mcast. + * if there are any, the bss_peer is used to send the + * the mcast frame using 3-addr format. all wds enabled + * peers that use 4-addr framing for mcast frames will + * be duplicated and sent as 4-addr frames below. + */ + if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { + num_peers_3addr = 1; + break; + } + } +#endif + + if (qdf_unlikely(vdev->mesh_vdev)) { + DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); + } else { + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if ((peer->peer_ids[0] != HTT_INVALID_PEER) && +#ifdef WDS_VENDOR_EXTENSION + /* + * . if 3-addr STA, then send on BSS Peer + * . if Peer WDS enabled and accept 4-addr mcast, + * send mcast on that peer only + * . if Peer WDS enabled and accept 4-addr ucast, + * send ucast on that peer only + */ + ((peer->bss_peer && num_peers_3addr && is_mcast) || + (peer->wds_enabled && + ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || + (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { +#else + ((peer->bss_peer && + !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) || + peer->nawds_enabled)) { +#endif + peer_id = DP_INVALID_PEER; + + if (peer->nawds_enabled) { + peer_id = peer->peer_ids[0]; + if (sa_peer == peer) { + QDF_TRACE( + QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + " %s: multicast packet", + __func__); + DP_STATS_INC(peer, + tx.nawds_mcast_drop, 1); + continue; + } + } + + nbuf_copy = qdf_nbuf_copy(nbuf); + + if (!nbuf_copy) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("nbuf copy failed")); + break; + } + + nbuf_copy = dp_tx_send_msdu_single(vdev, + nbuf_copy, + &msdu_info, + peer_id, + NULL); + + if (nbuf_copy) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("pkt send failed")); + qdf_nbuf_free(nbuf_copy); + } else { + if (peer_id != DP_INVALID_PEER) + DP_STATS_INC_PKT(peer, + tx.nawds_mcast, + 1, qdf_nbuf_len(nbuf)); + } + } + } + } + + if (vdev->nawds_enabled) { + peer_id = DP_INVALID_PEER; + + DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, + 1, qdf_nbuf_len(nbuf)); + + nbuf = dp_tx_send_msdu_single(vdev, + nbuf, + &msdu_info, + peer_id, NULL); + + if (nbuf) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("pkt send failed")); + qdf_nbuf_free(nbuf); + } + } else + qdf_nbuf_free(nbuf); + + dp_tx_desc_release(tx_desc, tx_desc->pool_id); +} + +/** + * dp_tx_inspect_handler() - Tx Inspect Handler + * @tx_desc: software descriptor head pointer + * @status : Tx completion status from HTT descriptor + * + * Handles Tx frames sent back to Host for inspection + * (ProxyARP) + * + * Return: none + */ +static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) +{ + + struct dp_soc *soc; + struct dp_pdev *pdev = tx_desc->pdev; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx inspect path", + __func__); + + qdf_assert(pdev); + + soc = pdev->soc; + + DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1, + qdf_nbuf_len(tx_desc->nbuf)); + + DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); + dp_tx_desc_release(tx_desc, tx_desc->pool_id); +} + +#ifdef FEATURE_PERPKT_INFO +/** + * dp_get_completion_indication_for_stack() - send completion to stack + * @soc : dp_soc handle + * @pdev: dp_pdev handle + * @peer: dp peer handle + * @ts: transmit completion status structure + * @netbuf: Buffer pointer for free + * + * This function is used for indication whether buffer needs to be + * sent to stack for freeing or not +*/ +QDF_STATUS +dp_get_completion_indication_for_stack(struct dp_soc *soc, + struct dp_pdev *pdev, + struct dp_peer *peer, + struct hal_tx_completion_status *ts, + qdf_nbuf_t netbuf, + uint64_t time_latency) +{ + struct tx_capture_hdr *ppdu_hdr; + uint16_t peer_id = ts->peer_id; + uint32_t ppdu_id = ts->ppdu_id; + uint8_t first_msdu = ts->first_msdu; + uint8_t last_msdu = ts->last_msdu; + + if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode && + !pdev->latency_capture_enable)) + return QDF_STATUS_E_NOSUPPORT; + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Peer Invalid")); + return QDF_STATUS_E_INVAL; + } + + if (pdev->mcopy_mode) { + if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && + (pdev->m_copy_id.tx_peer_id == peer_id)) { + return QDF_STATUS_E_INVAL; + } + + pdev->m_copy_id.tx_ppdu_id = ppdu_id; + pdev->m_copy_id.tx_peer_id = peer_id; + } + + if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("No headroom")); + return QDF_STATUS_E_NOMEM; + } + + ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); + qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + ppdu_hdr->ppdu_id = ppdu_id; + ppdu_hdr->peer_id = peer_id; + ppdu_hdr->first_msdu = first_msdu; + ppdu_hdr->last_msdu = last_msdu; + if (qdf_unlikely(pdev->latency_capture_enable)) { + ppdu_hdr->tsf = ts->tsf; + ppdu_hdr->time_latency = time_latency; + } + + return QDF_STATUS_SUCCESS; +} + + +/** + * dp_send_completion_to_stack() - send completion to stack + * @soc : dp_soc handle + * @pdev: dp_pdev handle + * @peer_id: peer_id of the peer for which completion came + * @ppdu_id: ppdu_id + * @netbuf: Buffer pointer for free + * + * This function is used to send completion to stack + * to free buffer +*/ +void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, + qdf_nbuf_t netbuf) +{ + dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, + netbuf, peer_id, + WDI_NO_VAL, pdev->pdev_id); +} +#else +static QDF_STATUS +dp_get_completion_indication_for_stack(struct dp_soc *soc, + struct dp_pdev *pdev, + struct dp_peer *peer, + struct hal_tx_completion_status *ts, + qdf_nbuf_t netbuf, + uint64_t time_latency) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static void +dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) +{ +} +#endif + +#ifdef MESH_MODE_SUPPORT +/** + * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats + * in mesh meta header + * @tx_desc: software descriptor head pointer + * @ts: pointer to tx completion stats + * Return: none + */ +static +void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, + struct hal_tx_completion_status *ts) +{ + struct meta_hdr_s *mhdr; + qdf_nbuf_t netbuf = tx_desc->nbuf; + + if (!tx_desc->msdu_ext_desc) { + if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "netbuf %pK offset %d", + netbuf, tx_desc->pkt_offset); + return; + } + } + if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "netbuf %pK offset %lu", netbuf, + sizeof(struct meta_hdr_s)); + return; + } + + mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); + mhdr->rssi = ts->ack_frame_rssi; + mhdr->channel = tx_desc->pdev->operating_channel; +} + +#else +static +void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, + struct hal_tx_completion_status *ts) +{ +} + +#endif + +/** + * dp_tx_compute_delay() - Compute and fill in all timestamps + * to pass in correct fields + * + * @vdev: pdev handle + * @tx_desc: tx descriptor + * @tid: tid value + * @ring_id: TCL or WBM ring number for transmit path + * Return: none + */ +static void dp_tx_compute_delay(struct dp_vdev *vdev, + struct dp_tx_desc_s *tx_desc, + uint8_t tid, uint8_t ring_id) +{ + int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue; + uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay; + + if (qdf_likely(!vdev->pdev->delay_stats_flag)) + return; + + current_timestamp = qdf_ktime_to_ms(qdf_ktime_get()); + timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf); + timestamp_hw_enqueue = tx_desc->timestamp; + sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress); + fwhw_transmit_delay = (uint32_t)(current_timestamp - + timestamp_hw_enqueue); + interframe_delay = (uint32_t)(timestamp_ingress - + vdev->prev_tx_enq_tstamp); + + /* + * Delay in software enqueue + */ + dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid, + CDP_DELAY_STATS_SW_ENQ, ring_id); + /* + * Delay between packet enqueued to HW and Tx completion + */ + dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid, + CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id); + + /* + * Update interframe delay stats calculated at hardstart receive point. + * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so + * interframe delay will not be calculate correctly for 1st frame. + * On the other side, this will help in avoiding extra per packet check + * of !vdev->prev_tx_enq_tstamp. + */ + dp_update_delay_stats(vdev->pdev, interframe_delay, tid, + CDP_DELAY_STATS_TX_INTERFRAME, ring_id); + vdev->prev_tx_enq_tstamp = timestamp_ingress; +} + +#ifdef DISABLE_DP_STATS +static +inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer) +{ +} +#else +static +inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer) +{ + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + + DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype)); + if (subtype != QDF_PROTO_INVALID) + DP_STATS_INC(peer, tx.no_ack_count[subtype], 1); +} +#endif + +/** + * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications + * per wbm ring + * + * @tx_desc: software descriptor head pointer + * @ts: Tx completion status + * @peer: peer handle + * @ring_id: ring number + * + * Return: None + */ +static inline void +dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer, uint8_t ring_id) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + struct dp_soc *soc = NULL; + uint8_t mcs, pkt_type; + uint8_t tid = ts->tid; + uint32_t length; + struct cdp_tid_tx_stats *tid_stats; + + if (!pdev) + return; + + if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) + tid = CDP_MAX_DATA_TIDS - 1; + + tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; + soc = pdev->soc; + + mcs = ts->mcs; + pkt_type = ts->pkt_type; + + if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) { + dp_err("Release source is not from TQM"); + return; + } + + length = qdf_nbuf_len(tx_desc->nbuf); + DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); + + if (qdf_unlikely(pdev->delay_stats_flag)) + dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id); + DP_STATS_INCC(peer, tx.dropped.age_out, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); + + DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length, + (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); + + DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); + + DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); + + DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, + (ts->status == HAL_TX_TQM_RR_FW_REASON1)); + + DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, + (ts->status == HAL_TX_TQM_RR_FW_REASON2)); + + DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, + (ts->status == HAL_TX_TQM_RR_FW_REASON3)); + + /* + * tx_failed is ideally supposed to be updated from HTT ppdu completion + * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there + * are no completions for failed cases. Hence updating tx_failed from + * data path. Please note that if tx_failed is fixed to be from ppdu, + * then this has to be removed + */ + peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num + + peer->stats.tx.dropped.fw_rem_notx + + peer->stats.tx.dropped.fw_rem_tx + + peer->stats.tx.dropped.age_out + + peer->stats.tx.dropped.fw_reason1 + + peer->stats.tx.dropped.fw_reason2 + + peer->stats.tx.dropped.fw_reason3; + + if (ts->status < CDP_MAX_TX_TQM_STATUS) { + tid_stats->tqm_status_cnt[ts->status]++; + } + + if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { + dp_update_no_ack_stats(tx_desc->nbuf, peer); + return; + } + + DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); + + DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); + DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); + + /* + * Following Rate Statistics are updated from HTT PPDU events from FW. + * Return from here if HTT PPDU events are enabled. + */ + if (!(soc->process_tx_status)) + return; + + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); + + DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); + DP_STATS_INC(peer, tx.bw[ts->bw], 1); + DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); + DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); + DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); + DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); + DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, + &peer->stats, ts->peer_id, + UPDATE_PEER_STATS, pdev->pdev_id); +#endif +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * dp_tx_flow_pool_lock() - take flow pool lock + * @soc: core txrx main context + * @tx_desc: tx desc + * + * Return: None + */ +static inline +void dp_tx_flow_pool_lock(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + struct dp_tx_desc_pool_s *pool; + uint8_t desc_pool_id; + + desc_pool_id = tx_desc->pool_id; + pool = &soc->tx_desc[desc_pool_id]; + + qdf_spin_lock_bh(&pool->flow_pool_lock); +} + +/** + * dp_tx_flow_pool_unlock() - release flow pool lock + * @soc: core txrx main context + * @tx_desc: tx desc + * + * Return: None + */ +static inline +void dp_tx_flow_pool_unlock(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + struct dp_tx_desc_pool_s *pool; + uint8_t desc_pool_id; + + desc_pool_id = tx_desc->pool_id; + pool = &soc->tx_desc[desc_pool_id]; + + qdf_spin_unlock_bh(&pool->flow_pool_lock); +} +#else +static inline +void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) +{ +} + +static inline +void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) +{ +} +#endif + +/** + * dp_tx_notify_completion() - Notify tx completion for this desc + * @soc: core txrx main context + * @tx_desc: tx desc + * @netbuf: buffer + * + * Return: none + */ +static inline void dp_tx_notify_completion(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc, + qdf_nbuf_t netbuf) +{ + void *osif_dev; + ol_txrx_completion_fp tx_compl_cbk = NULL; + + qdf_assert(tx_desc); + + dp_tx_flow_pool_lock(soc, tx_desc); + + if (!tx_desc->vdev || + !tx_desc->vdev->osif_vdev) { + dp_tx_flow_pool_unlock(soc, tx_desc); + return; + } + + osif_dev = tx_desc->vdev->osif_vdev; + tx_compl_cbk = tx_desc->vdev->tx_comp; + dp_tx_flow_pool_unlock(soc, tx_desc); + + if (tx_compl_cbk) + tx_compl_cbk(netbuf, osif_dev); +} + +/** dp_tx_sojourn_stats_process() - Collect sojourn stats + * @pdev: pdev handle + * @tid: tid value + * @txdesc_ts: timestamp from txdesc + * @ppdu_id: ppdu id + * + * Return: none + */ +#ifdef FEATURE_PERPKT_INFO +static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, + struct dp_peer *peer, + uint8_t tid, + uint64_t txdesc_ts, + uint32_t ppdu_id) +{ + uint64_t delta_ms; + struct cdp_tx_sojourn_stats *sojourn_stats; + + if (qdf_unlikely(pdev->enhanced_stats_en == 0)) + return; + + if (qdf_unlikely(tid == HTT_INVALID_TID || + tid >= CDP_DATA_TID_MAX)) + return; + + if (qdf_unlikely(!pdev->sojourn_buf)) + return; + + sojourn_stats = (struct cdp_tx_sojourn_stats *) + qdf_nbuf_data(pdev->sojourn_buf); + + sojourn_stats->cookie = (void *)peer->wlanstats_ctx; + + delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) - + txdesc_ts; + qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid], + delta_ms); + sojourn_stats->sum_sojourn_msdu[tid] = delta_ms; + sojourn_stats->num_msdus[tid] = 1; + sojourn_stats->avg_sojourn_msdu[tid].internal = + peer->avg_sojourn_msdu[tid].internal; + dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, + pdev->sojourn_buf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + sojourn_stats->sum_sojourn_msdu[tid] = 0; + sojourn_stats->num_msdus[tid] = 0; + sojourn_stats->avg_sojourn_msdu[tid].internal = 0; +} +#else +static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, + struct dp_peer *peer, + uint8_t tid, + uint64_t txdesc_ts, + uint32_t ppdu_id) +{ +} +#endif + +/** + * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf + * @soc: DP Soc handle + * @tx_desc: software Tx descriptor + * @ts : Tx completion status from HAL/HTT descriptor + * + * Return: none + */ +static inline void +dp_tx_comp_process_desc(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer) +{ + uint64_t time_latency = 0; + /* + * m_copy/tx_capture modes are not supported for + * scatter gather packets + */ + if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) { + time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) - + desc->timestamp); + } + if (!(desc->msdu_ext_desc)) { + if (QDF_STATUS_SUCCESS == + dp_tx_add_to_comp_queue(soc, desc, ts, peer)) { + return; + } + + if (QDF_STATUS_SUCCESS == + dp_get_completion_indication_for_stack(soc, + desc->pdev, + peer, ts, + desc->nbuf, + time_latency)) { + qdf_nbuf_unmap(soc->osdev, desc->nbuf, + QDF_DMA_TO_DEVICE); + dp_send_completion_to_stack(soc, + desc->pdev, + ts->peer_id, + ts->ppdu_id, + desc->nbuf); + return; + } + } + + dp_tx_comp_free_buf(soc, desc); +} + +/** + * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info + * @soc: DP soc handle + * @tx_desc: software descriptor head pointer + * @ts: Tx completion status + * @peer: peer handle + * @ring_id: ring number + * + * Return: none + */ +static inline +void dp_tx_comp_process_tx_status(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer, uint8_t ring_id) +{ + uint32_t length; + qdf_ether_header_t *eh; + struct dp_vdev *vdev = tx_desc->vdev; + qdf_nbuf_t nbuf = tx_desc->nbuf; + + if (!vdev || !nbuf) { + dp_info_rl("invalid tx descriptor. vdev or nbuf NULL"); + goto out; + } + + eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); + length = qdf_nbuf_len(nbuf); + + DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf, + QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(nbuf), + sizeof(qdf_nbuf_data(nbuf)), + tx_desc->id, + ts->status)); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "-------------------- \n" + "Tx Completion Stats: \n" + "-------------------- \n" + "ack_frame_rssi = %d \n" + "first_msdu = %d \n" + "last_msdu = %d \n" + "msdu_part_of_amsdu = %d \n" + "rate_stats valid = %d \n" + "bw = %d \n" + "pkt_type = %d \n" + "stbc = %d \n" + "ldpc = %d \n" + "sgi = %d \n" + "mcs = %d \n" + "ofdma = %d \n" + "tones_in_ru = %d \n" + "tsf = %d \n" + "ppdu_id = %d \n" + "transmit_cnt = %d \n" + "tid = %d \n" + "peer_id = %d\n", + ts->ack_frame_rssi, ts->first_msdu, + ts->last_msdu, ts->msdu_part_of_amsdu, + ts->valid, ts->bw, ts->pkt_type, ts->stbc, + ts->ldpc, ts->sgi, ts->mcs, ts->ofdma, + ts->tones_in_ru, ts->tsf, ts->ppdu_id, + ts->transmit_cnt, ts->tid, ts->peer_id); + + /* Update SoC level stats */ + DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); + + if (!peer) { + dp_err_rl("peer is null or deletion in progress"); + DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); + goto out; + } + + /* Update per-packet stats for mesh mode */ + if (qdf_unlikely(vdev->mesh_vdev) && + !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) + dp_tx_comp_fill_tx_completion_stats(tx_desc, ts); + + /* Update peer level stats */ + if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) { + if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) { + DP_STATS_INC_PKT(peer, tx.mcast, 1, length); + + if ((peer->vdev->tx_encap_type == + htt_cmn_pkt_type_ethernet) && + QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { + DP_STATS_INC_PKT(peer, tx.bcast, 1, length); + } + } + } else { + DP_STATS_INC_PKT(peer, tx.ucast, 1, length); + if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) + DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); + } + + dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id); + +#ifdef QCA_SUPPORT_RDK_STATS + if (soc->wlanstats_enabled) + dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid, + tx_desc->timestamp, + ts->ppdu_id); +#endif + +out: + return; +} + +/** + * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler + * @soc: core txrx main context + * @comp_head: software descriptor head pointer + * @ring_id: ring number + * + * This function will process batch of descriptors reaped by dp_tx_comp_handler + * and release the software descriptors after processing is complete + * + * Return: none + */ +static void +dp_tx_comp_process_desc_list(struct dp_soc *soc, + struct dp_tx_desc_s *comp_head, uint8_t ring_id) +{ + struct dp_tx_desc_s *desc; + struct dp_tx_desc_s *next; + struct hal_tx_completion_status ts = {0}; + struct dp_peer *peer; + qdf_nbuf_t netbuf; + + desc = comp_head; + + while (desc) { + hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc); + peer = dp_peer_find_by_id(soc, ts.peer_id); + dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id); + + netbuf = desc->nbuf; + /* check tx complete notification */ + if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf)) + dp_tx_notify_completion(soc, desc, netbuf); + + dp_tx_comp_process_desc(soc, desc, &ts, peer); + + if (peer) + dp_peer_unref_del_find_by_id(peer); + + next = desc->next; + + dp_tx_desc_release(desc, desc->pool_id); + desc = next; + } + +} + +/** + * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler + * @tx_desc: software descriptor head pointer + * @status : Tx completion status from HTT descriptor + * @ring_id: ring number + * + * This function will process HTT Tx indication messages from Target + * + * Return: none + */ +static +void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status, + uint8_t ring_id) +{ + uint8_t tx_status; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_soc *soc; + struct hal_tx_completion_status ts = {0}; + uint32_t *htt_desc = (uint32_t *)status; + struct dp_peer *peer; + struct cdp_tid_tx_stats *tid_stats = NULL; + struct htt_soc *htt_handle; + + qdf_assert(tx_desc->pdev); + + pdev = tx_desc->pdev; + vdev = tx_desc->vdev; + soc = pdev->soc; + + if (!vdev) + return; + tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]); + htt_handle = (struct htt_soc *)soc->htt_handle; + htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status); + + switch (tx_status) { + case HTT_TX_FW2WBM_TX_STATUS_OK: + case HTT_TX_FW2WBM_TX_STATUS_DROP: + case HTT_TX_FW2WBM_TX_STATUS_TTL: + { + uint8_t tid; + if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) { + ts.peer_id = + HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET( + htt_desc[2]); + ts.tid = + HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET( + htt_desc[2]); + } else { + ts.peer_id = HTT_INVALID_PEER; + ts.tid = HTT_INVALID_TID; + } + ts.ppdu_id = + HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET( + htt_desc[1]); + ts.ack_frame_rssi = + HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET( + htt_desc[1]); + + ts.first_msdu = 1; + ts.last_msdu = 1; + tid = ts.tid; + if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) + tid = CDP_MAX_DATA_TIDS - 1; + + tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; + + if (qdf_unlikely(pdev->delay_stats_flag)) + dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); + if (tx_status < CDP_MAX_TX_HTT_STATUS) { + tid_stats->htt_status_cnt[tx_status]++; + } + + peer = dp_peer_find_by_id(soc, ts.peer_id); + + if (qdf_likely(peer)) + dp_peer_unref_del_find_by_id(peer); + + dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id); + dp_tx_comp_process_desc(soc, tx_desc, &ts, peer); + dp_tx_desc_release(tx_desc, tx_desc->pool_id); + + break; + } + case HTT_TX_FW2WBM_TX_STATUS_REINJECT: + { + dp_tx_reinject_handler(tx_desc, status); + break; + } + case HTT_TX_FW2WBM_TX_STATUS_INSPECT: + { + dp_tx_inspect_handler(tx_desc, status); + break; + } + case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: + { + dp_tx_mec_handler(vdev, status); + break; + } + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Invalid HTT tx_status %d\n", + __func__, tx_status); + break; + } +} + +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT +static inline +bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) +{ + bool limit_hit = false; + struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx; + + limit_hit = + (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false; + + if (limit_hit) + DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1); + + return limit_hit; +} + +static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) +{ + return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check; +} +#else +static inline +bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) +{ + return false; +} + +static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc) +{ + return false; +} +#endif + +uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, + hal_ring_handle_t hal_ring_hdl, uint8_t ring_id, + uint32_t quota) +{ + void *tx_comp_hal_desc; + uint8_t buffer_src; + uint8_t pool_id; + uint32_t tx_desc_id; + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_s *head_desc = NULL; + struct dp_tx_desc_s *tail_desc = NULL; + uint32_t num_processed = 0; + uint32_t count; + bool force_break = false; + + DP_HIST_INIT(); + +more_data: + /* Re-initialize local variables to be re-used */ + head_desc = NULL; + tail_desc = NULL; + count = 0; + + if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { + dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl); + return 0; + } + + /* Find head descriptor from completion ring */ + while (qdf_likely(tx_comp_hal_desc = + hal_srng_dst_get_next(soc->hal_soc, hal_ring_hdl))) { + + buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); + + /* If this buffer was not released by TQM or FW, then it is not + * Tx completion indication, assert */ + if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) && + (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) { + uint8_t wbm_internal_error; + + dp_err_rl( + "Tx comp release_src != TQM | FW but from %d", + buffer_src); + hal_dump_comp_desc(tx_comp_hal_desc); + DP_STATS_INC(soc, tx.invalid_release_source, 1); + + /* When WBM sees NULL buffer_addr_info in any of + * ingress rings it sends an error indication, + * with wbm_internal_error=1, to a specific ring. + * The WBM2SW ring used to indicate these errors is + * fixed in HW, and that ring is being used as Tx + * completion ring. These errors are not related to + * Tx completions, and should just be ignored + */ + wbm_internal_error = hal_get_wbm_internal_error( + soc->hal_soc, + tx_comp_hal_desc); + + if (wbm_internal_error) { + dp_err_rl("Tx comp wbm_internal_error!!"); + DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1); + + if (HAL_TX_COMP_RELEASE_SOURCE_REO == + buffer_src) + dp_handle_wbm_internal_error( + soc, + tx_comp_hal_desc, + hal_tx_comp_get_buffer_type( + tx_comp_hal_desc)); + + } else { + dp_err_rl("Tx comp wbm_internal_error false"); + DP_STATS_INC(soc, tx.non_wbm_internal_err, 1); + } + continue; + } + + /* Get descriptor id */ + tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); + pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> + DP_TX_DESC_ID_POOL_OS; + + /* Find Tx descriptor */ + tx_desc = dp_tx_desc_find(soc, pool_id, + (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> + DP_TX_DESC_ID_PAGE_OS, + (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> + DP_TX_DESC_ID_OFFSET_OS); + + /* + * If the descriptor is already freed in vdev_detach, + * continue to next descriptor + */ + if (!tx_desc->vdev && !tx_desc->flags) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "Descriptor freed in vdev_detach %d", + tx_desc_id); + + num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); + count++; + continue; + } + + if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "pdev in down state %d", + tx_desc_id); + + num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); + count++; + + dp_tx_comp_free_buf(soc, tx_desc); + dp_tx_desc_release(tx_desc, tx_desc->pool_id); + continue; + } + + /* + * If the release source is FW, process the HTT status + */ + if (qdf_unlikely(buffer_src == + HAL_TX_COMP_RELEASE_SOURCE_FW)) { + uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; + hal_tx_comp_get_htt_desc(tx_comp_hal_desc, + htt_tx_status); + dp_tx_process_htt_completion(tx_desc, + htt_tx_status, ring_id); + } else { + /* Pool id is not matching. Error */ + if (tx_desc->pool_id != pool_id) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "Tx Comp pool id %d not matched %d", + pool_id, tx_desc->pool_id); + + qdf_assert_always(0); + } + + if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || + !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "Txdesc invalid, flgs = %x,id = %d", + tx_desc->flags, tx_desc_id); + qdf_assert_always(0); + } + + /* First ring descriptor on the cycle */ + if (!head_desc) { + head_desc = tx_desc; + tail_desc = tx_desc; + } + + tail_desc->next = tx_desc; + tx_desc->next = NULL; + tail_desc = tx_desc; + + DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id); + + /* Collect hw completion contents */ + hal_tx_comp_desc_sync(tx_comp_hal_desc, + &tx_desc->comp, 1); + + } + + num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); + + /* + * Processed packet count is more than given quota + * stop to processing + */ + if (num_processed >= quota) { + force_break = true; + break; + } + + count++; + + if (dp_tx_comp_loop_pkt_limit_hit(soc, count)) + break; + } + + dp_srng_access_end(int_ctx, soc, hal_ring_hdl); + + /* Process the reaped descriptors */ + if (head_desc) + dp_tx_comp_process_desc_list(soc, head_desc, ring_id); + + if (dp_tx_comp_enable_eol_data_check(soc)) { + if (!force_break && + hal_srng_dst_peek_sync_locked(soc->hal_soc, + hal_ring_hdl)) { + DP_STATS_INC(soc, tx.hp_oos2, 1); + if (!hif_exec_should_yield(soc->hif_handle, + int_ctx->dp_intr_id)) + goto more_data; + } + } + DP_TX_HIST_STATS_PER_PDEV(); + + return num_processed; +} + +#ifdef FEATURE_WLAN_TDLS +qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + + if (!vdev) { + dp_err("vdev handle for id %d is NULL", vdev_id); + return NULL; + } + + if (tx_spec & OL_TX_SPEC_NO_FREE) + vdev->is_tdls_frame = true; + + return dp_tx_send(soc_hdl, vdev_id, msdu_list); +} +#endif + +static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev) +{ + struct wlan_cfg_dp_soc_ctxt *cfg; + + struct dp_soc *soc; + + soc = vdev->pdev->soc; + if (!soc) + return; + + cfg = soc->wlan_cfg_ctx; + if (!cfg) + return; + + if (vdev->opmode == wlan_op_mode_ndi) + vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg); + else if ((vdev->subtype == wlan_op_subtype_p2p_device) || + (vdev->subtype == wlan_op_subtype_p2p_cli) || + (vdev->subtype == wlan_op_subtype_p2p_go)) + vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg); + else + vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg); +} + +/** + * dp_tx_vdev_attach() - attach vdev to dp tx + * @vdev: virtual device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) +{ + int pdev_id; + /* + * Fill HTT TCL Metadata with Vdev ID and MAC ID + */ + HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, + HTT_TCL_METADATA_TYPE_VDEV_BASED); + + HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, + vdev->vdev_id); + + pdev_id = + dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc, + vdev->pdev->pdev_id); + HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id); + + /* + * Set HTT Extension Valid bit to 0 by default + */ + HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); + + dp_tx_vdev_update_search_flags(vdev); + + dp_tx_vdev_update_feature_flags(vdev); + + return QDF_STATUS_SUCCESS; +} + +#ifndef FEATURE_WDS +static inline bool dp_tx_da_search_override(struct dp_vdev *vdev) +{ + return false; +} +#endif + +/** + * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode + * @vdev: virtual device instance + * + * Return: void + * + */ +void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) +{ + struct dp_soc *soc = vdev->pdev->soc; + + /* + * Enable both AddrY (SA based search) and AddrX (Da based search) + * for TDLS link + * + * Enable AddrY (SA based search) only for non-WDS STA and + * ProxySTA VAP (in HKv1) modes. + * + * In all other VAP modes, only DA based search should be + * enabled + */ + if (vdev->opmode == wlan_op_mode_sta && + vdev->tdls_link_connected) + vdev->hal_desc_addr_search_flags = + (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); + else if ((vdev->opmode == wlan_op_mode_sta) && + !dp_tx_da_search_override(vdev)) + vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; + else + vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; + + /* Set search type only when peer map v2 messaging is enabled + * as we will have the search index (AST hash) only when v2 is + * enabled + */ + if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta) + vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH; + else + vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; +} + +static inline bool +dp_is_tx_desc_flush_match(struct dp_pdev *pdev, + struct dp_vdev *vdev, + struct dp_tx_desc_s *tx_desc) +{ + if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED))) + return false; + + /* + * if vdev is given, then only check whether desc + * vdev match. if vdev is NULL, then check whether + * desc pdev match. + */ + return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev); +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * dp_tx_desc_flush() - release resources associated + * to TX Desc + * + * @dp_pdev: Handle to DP pdev structure + * @vdev: virtual device instance + * NULL: no specific Vdev is required and check all allcated TX desc + * on this pdev. + * Non-NULL: only check the allocated TX Desc associated to this Vdev. + * + * @force_free: + * true: flush the TX desc. + * false: only reset the Vdev in each allocated TX desc + * that associated to current Vdev. + * + * This function will go through the TX desc pool to flush + * the outstanding TX data or reset Vdev to NULL in associated TX + * Desc. + */ +void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, + bool force_free) +{ + uint8_t i; + uint32_t j; + uint32_t num_desc, page_id, offset; + uint16_t num_desc_per_page; + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_pool_s *tx_desc_pool = NULL; + + if (!vdev && !force_free) { + dp_err("Reset TX desc vdev, Vdev param is required!"); + return; + } + + for (i = 0; i < MAX_TXDESC_POOLS; i++) { + tx_desc_pool = &soc->tx_desc[i]; + if (!(tx_desc_pool->pool_size) || + IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) || + !(tx_desc_pool->desc_pages.cacheable_pages)) + continue; + + /* + * Add flow pool lock protection in case pool is freed + * due to all tx_desc is recycled when handle TX completion. + * this is not necessary when do force flush as: + * a. double lock will happen if dp_tx_desc_release is + * also trying to acquire it. + * b. dp interrupt has been disabled before do force TX desc + * flush in dp_pdev_deinit(). + */ + if (!force_free) + qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock); + num_desc = tx_desc_pool->pool_size; + num_desc_per_page = + tx_desc_pool->desc_pages.num_element_per_page; + for (j = 0; j < num_desc; j++) { + page_id = j / num_desc_per_page; + offset = j % num_desc_per_page; + + if (qdf_unlikely(!(tx_desc_pool-> + desc_pages.cacheable_pages))) + break; + + tx_desc = dp_tx_desc_find(soc, i, page_id, offset); + + if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { + /* + * Free TX desc if force free is + * required, otherwise only reset vdev + * in this TX desc. + */ + if (force_free) { + dp_tx_comp_free_buf(soc, tx_desc); + dp_tx_desc_release(tx_desc, i); + } else { + tx_desc->vdev = NULL; + } + } + } + if (!force_free) + qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock); + } +} +#else /* QCA_LL_TX_FLOW_CONTROL_V2! */ +/** + * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc + * + * @soc: Handle to DP soc structure + * @tx_desc: pointer of one TX desc + * @desc_pool_id: TX Desc pool id + */ +static inline void +dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, + uint8_t desc_pool_id) +{ + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + tx_desc->vdev = NULL; + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); +} + +void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, + bool force_free) +{ + uint8_t i, num_pool; + uint32_t j; + uint32_t num_desc, page_id, offset; + uint16_t num_desc_per_page; + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_pool_s *tx_desc_pool = NULL; + + if (!vdev && !force_free) { + dp_err("Reset TX desc vdev, Vdev param is required!"); + return; + } + + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + + for (i = 0; i < num_pool; i++) { + tx_desc_pool = &soc->tx_desc[i]; + if (!tx_desc_pool->desc_pages.cacheable_pages) + continue; + + num_desc_per_page = + tx_desc_pool->desc_pages.num_element_per_page; + for (j = 0; j < num_desc; j++) { + page_id = j / num_desc_per_page; + offset = j % num_desc_per_page; + tx_desc = dp_tx_desc_find(soc, i, page_id, offset); + + if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) { + if (force_free) { + dp_tx_comp_free_buf(soc, tx_desc); + dp_tx_desc_release(tx_desc, i); + } else { + dp_tx_desc_reset_vdev(soc, tx_desc, + i); + } + } + } + } +} +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ + +/** + * dp_tx_vdev_detach() - detach vdev from dp tx + * @vdev: virtual device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = vdev->pdev; + + /* Reset TX desc associated to this Vdev as NULL */ + dp_tx_desc_flush(pdev, vdev, false); + dp_tx_vdev_multipass_deinit(vdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_pdev_attach() - attach pdev to dp tx + * @pdev: physical device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + + /* Initialize Flow control counters */ + qdf_atomic_init(&pdev->num_tx_exception); + qdf_atomic_init(&pdev->num_tx_outstanding); + + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + /* Initialize descriptors in TCL Ring */ + hal_tx_init_data_ring(soc->hal_soc, + soc->tcl_data_ring[pdev->pdev_id].hal_srng); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_pdev_detach() - detach pdev from dp tx + * @pdev: physical device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) +{ + /* flush TX outstanding data per pdev */ + dp_tx_desc_flush(pdev, NULL, true); + dp_tx_me_exit(pdev); + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/* Pools will be allocated dynamically */ +static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) { + qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); + soc->tx_desc[i].status = FLOW_POOL_INACTIVE; + } + + return 0; +} + +static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) + qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); +} +#else /* QCA_LL_TX_FLOW_CONTROL_V2! */ +static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) +{ + uint8_t i; + + /* Allocate software Tx descriptor pools */ + for (i = 0; i < num_pool; i++) { + if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx Desc Pool alloc %d failed %pK", + __func__, i, soc); + return ENOMEM; + } + } + return 0; +} + +static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) { + qdf_assert_always(!soc->tx_desc[i].num_allocated); + if (dp_tx_desc_pool_free(soc, i)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Desc Pool Free failed", __func__); + } + } +} + +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ + +#ifndef QCA_MEM_ATTACH_ON_WIFI3 +/** + * dp_tso_attach_wifi3() - TSO attach handler + * @txrx_soc: Opaque Dp handle + * + * Reserve TSO descriptor buffers + * + * Return: QDF_STATUS_E_FAILURE on failure or + * QDF_STATUS_SUCCESS on success + */ +static +QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc) +{ + return dp_tso_soc_attach(txrx_soc); +} + +/** + * dp_tso_detach_wifi3() - TSO Detach handler + * @txrx_soc: Opaque Dp handle + * + * Deallocate TSO descriptor buffers + * + * Return: QDF_STATUS_E_FAILURE on failure or + * QDF_STATUS_SUCCESS on success + */ +static +QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc) +{ + return dp_tso_soc_detach(txrx_soc); +} +#else +static +QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc) +{ + return QDF_STATUS_SUCCESS; +} + +static +QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + uint8_t i; + uint8_t num_pool; + uint32_t num_desc; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + + for (i = 0; i < num_pool; i++) + dp_tx_tso_desc_pool_free(soc, i); + + dp_info("%s TSO Desc Pool %d Free descs = %d", + __func__, num_pool, num_desc); + + for (i = 0; i < num_pool; i++) + dp_tx_tso_num_seg_pool_free(soc, i); + + dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d", + __func__, num_pool, num_desc); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tso_attach() - TSO attach handler + * @txrx_soc: Opaque Dp handle + * + * Reserve TSO descriptor buffers + * + * Return: QDF_STATUS_E_FAILURE on failure or + * QDF_STATUS_SUCCESS on success + */ +QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + uint8_t i; + uint8_t num_pool; + uint32_t num_desc; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + + for (i = 0; i < num_pool; i++) { + if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { + dp_err("TSO Desc Pool alloc %d failed %pK", + i, soc); + + return QDF_STATUS_E_FAILURE; + } + } + + dp_info("%s TSO Desc Alloc %d, descs = %d", + __func__, num_pool, num_desc); + + for (i = 0; i < num_pool; i++) { + if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { + dp_err("TSO Num of seg Pool alloc %d failed %pK", + i, soc); + + return QDF_STATUS_E_FAILURE; + } + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_soc_detach() - detach soc from dp tx + * @soc: core txrx main context + * + * This function will detach dp tx into main device context + * will free dp tx resource and initialize resources + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) +{ + uint8_t num_pool; + uint16_t num_desc; + uint16_t num_ext_desc; + uint8_t i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); + + dp_tx_flow_control_deinit(soc); + dp_tx_delete_static_pools(soc, num_pool); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Desc Pool Free num_pool = %d, descs = %d", + __func__, num_pool, num_desc); + + for (i = 0; i < num_pool; i++) { + if (dp_tx_ext_desc_pool_free(soc, i)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Ext Desc Pool Free failed", + __func__); + return QDF_STATUS_E_RESOURCES; + } + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s MSDU Ext Desc Pool %d Free descs = %d", + __func__, num_pool, num_ext_desc); + + status = dp_tso_detach_wifi3(soc); + if (status != QDF_STATUS_SUCCESS) + return status; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_soc_attach() - attach soc to dp tx + * @soc: core txrx main context + * + * This function will attach dp tx into main device context + * will allocate dp tx resource and initialize resources + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) +{ + uint8_t i; + uint8_t num_pool; + uint32_t num_desc; + uint32_t num_ext_desc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Desc Alloc num_pool = %d, descs = %d", + __func__, num_pool, num_desc); + + if ((num_pool > MAX_TXDESC_POOLS) || + (num_desc > WLAN_CFG_NUM_TX_DESC_MAX)) + goto fail; + + if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) + goto fail; + + dp_tx_flow_control_init(soc); + + /* Allocate extension tx descriptor pools */ + for (i = 0; i < num_pool; i++) { + if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "MSDU Ext Desc Pool alloc %d failed %pK", + i, soc); + + goto fail; + } + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s MSDU Ext Desc Alloc %d, descs = %d", + __func__, num_pool, num_ext_desc); + + status = dp_tso_attach_wifi3((void *)soc); + if (status != QDF_STATUS_SUCCESS) + goto fail; + + + /* Initialize descriptors in TCL Rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + for (i = 0; i < soc->num_tcl_data_rings; i++) { + hal_tx_init_data_ring(soc->hal_soc, + soc->tcl_data_ring[i].hal_srng); + } + } + + /* + * todo - Add a runtime config option to enable this. + */ + /* + * Due to multiple issues on NPR EMU, enable it selectively + * only for NPR EMU, should be removed, once NPR platforms + * are stable. + */ + soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s HAL Tx init Success", __func__); + + return QDF_STATUS_SUCCESS; + +fail: + /* Detach will take care of freeing only allocated resources */ + dp_tx_soc_detach(soc); + return QDF_STATUS_E_RESOURCES; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..8df5d35ee4952b6c2c8cee130c247b30b71aee8c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __DP_TX_H +#define __DP_TX_H + +#include +#include +#include "dp_types.h" + + +#define DP_TX_MAX_NUM_FRAGS 6 + +#define DP_TX_DESC_FLAG_ALLOCATED 0x1 +#define DP_TX_DESC_FLAG_TO_FW 0x2 +#define DP_TX_DESC_FLAG_FRAG 0x4 +#define DP_TX_DESC_FLAG_RAW 0x8 +#define DP_TX_DESC_FLAG_MESH 0x10 +#define DP_TX_DESC_FLAG_QUEUED_TX 0x20 +#define DP_TX_DESC_FLAG_COMPLETED_TX 0x40 +#define DP_TX_DESC_FLAG_ME 0x80 +#define DP_TX_DESC_FLAG_TDLS_FRAME 0x100 + +#define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1 + +#define DP_TX_FREE_SINGLE_BUF(soc, buf) \ +do { \ + qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \ + qdf_nbuf_free(buf); \ +} while (0) + +#define OCB_HEADER_VERSION 1 + +#ifdef TX_PER_PDEV_DESC_POOL +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) +#else /* QCA_LL_TX_FLOW_CONTROL_V2 */ +#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) +#else + #ifdef TX_PER_VDEV_DESC_POOL + #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) + #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) + #endif /* TX_PER_VDEV_DESC_POOL */ +#endif /* TX_PER_PDEV_DESC_POOL */ +#define DP_TX_QUEUE_MASK 0x3 +#define DP_TX_MSDU_INFO_META_DATA_DWORDS 7 + + +/** + * struct dp_tx_frag_info_s + * @vaddr: hlos vritual address for buffer + * @paddr_lo: physical address lower 32bits + * @paddr_hi: physical address higher bits + * @len: length of the buffer + */ +struct dp_tx_frag_info_s { + uint8_t *vaddr; + uint32_t paddr_lo; + uint16_t paddr_hi; + uint16_t len; +}; + +/** + * struct dp_tx_seg_info_s - Segmentation Descriptor + * @nbuf: NBUF pointer if segment corresponds to separate nbuf + * @frag_cnt: Fragment count in this segment + * @total_len: Total length of segment + * @frags: per-Fragment information + * @next: pointer to next MSDU segment + */ +struct dp_tx_seg_info_s { + qdf_nbuf_t nbuf; + uint16_t frag_cnt; + uint16_t total_len; + struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS]; + struct dp_tx_seg_info_s *next; +}; + +/** + * struct dp_tx_sg_info_s - Scatter Gather Descriptor + * @num_segs: Number of segments (TSO/ME) in the frame + * @total_len: Total length of the frame + * @curr_seg: Points to current segment descriptor to be processed. Chain of + * descriptors for SG frames/multicast-unicast converted packets. + * + * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to + * carry fragmentation information + * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries + * indicated through flags in SKB CB (first_msdu and last_msdu). This will be + * converted into set of skb sg (nr_frags) structures. + */ +struct dp_tx_sg_info_s { + uint32_t num_segs; + uint32_t total_len; + struct dp_tx_seg_info_s *curr_seg; +}; + +/** + * struct dp_tx_queue - Tx queue + * @desc_pool_id: Descriptor Pool to be used for the tx queue + * @ring_id: TCL descriptor ring ID corresponding to the tx queue + * + * Tx queue contains information of the software (Descriptor pool) + * and hardware resources (TCL ring id) to be used for a particular + * transmit queue (obtained from skb_queue_mapping in case of linux) + */ +struct dp_tx_queue { + uint8_t desc_pool_id; + uint8_t ring_id; +}; + +/** + * struct dp_tx_msdu_info_s - MSDU Descriptor + * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement + * @tx_queue: Tx queue on which this MSDU should be transmitted + * @num_seg: Number of segments (TSO) + * @tid: TID (override) that is sent from HLOS + * @u.tso_info: TSO information for TSO frame types + * (chain of the TSO segments, number of segments) + * @u.sg_info: Scatter Gather information for non-TSO SG frames + * @meta_data: Mesh meta header information + * @exception_fw: Duplicate frame to be sent to firmware + * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions + * @ix_tx_sniffer: Indicates if the packet has to be sniffed + * + * This structure holds the complete MSDU information needed to program the + * Hardware TCL and MSDU extension descriptors for different frame types + * + */ +struct dp_tx_msdu_info_s { + enum dp_tx_frm_type frm_type; + struct dp_tx_queue tx_queue; + uint32_t num_seg; + uint8_t tid; + union { + struct qdf_tso_info_t tso_info; + struct dp_tx_sg_info_s sg_info; + } u; + uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS]; + uint8_t exception_fw; + uint16_t ppdu_cookie; + uint8_t is_tx_sniffer; +}; + +QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); +QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); +void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); + +QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc); +QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc); + +/** + * dp_tso_attach() - TSO Attach handler + * @txrx_soc: Opaque Dp handle + * + * Reserve TSO descriptor buffers + * + * Return: QDF_STATUS_E_FAILURE on failure or + * QDF_STATUS_SUCCESS on success + */ +QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc); + +/** + * dp_tso_detach() - TSO Detach handler + * @txrx_soc: Opaque Dp handle + * + * Deallocate TSO descriptor buffers + * + * Return: QDF_STATUS_E_FAILURE on failure or + * QDF_STATUS_SUCCESS on success + */ +QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc); + +QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev); +QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev); + +qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); + +qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t nbuf, + struct cdp_tx_exception_metadata *tx_exc); +qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, + qdf_nbuf_t nbuf); +qdf_nbuf_t +dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, + struct cdp_tx_exception_metadata *tx_exc_metadata); + +#if QDF_LOCK_STATS +noinline qdf_nbuf_t +dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info); +#else +qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info); +#endif +#ifdef FEATURE_WLAN_TDLS +/** + * dp_tx_non_std() - Allow the control-path SW to send data frames + * @soc_hdl: Datapath soc handle + * @vdev_id: id of vdev + * @tx_spec: what non-standard handling to apply to the tx data frames + * @msdu_list: NULL-terminated list of tx MSDUs + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); +#endif +int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac); + +/** + * dp_tx_comp_handler() - Tx completion handler + * @int_ctx: pointer to DP interrupt context + * @soc: core txrx main context + * @hal_srng: Opaque HAL SRNG pointer + * @ring_id: completion ring id + * @quota: No. of packets/descriptors that can be serviced in one loop + * + * This function will collect hardware release ring element contents and + * handle descriptor contents. Based on contents, free packet or handle error + * conditions + * + * Return: Number of TX completions processed + */ +uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc, + hal_ring_handle_t hal_srng, uint8_t ring_id, + uint32_t quota); + +QDF_STATUS +dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); + +#ifndef FEATURE_WDS +static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) +{ + return; +} +#endif + +#ifndef ATH_SUPPORT_IQUE +static inline void dp_tx_me_exit(struct dp_pdev *pdev) +{ + return; +} +#endif + +#ifndef QCA_MULTIPASS_SUPPORT +static inline +bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, + qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + return true; +} + +static inline +void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) +{ +} + +#else +bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, + qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info); + +void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev); +#endif + +/** + * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @queue: queue ids container for nbuf + * + * TX packet queue has 2 instances, software descriptors id and dma ring id + * Based on tx feature and hardware configuration queue id combination could be + * different. + * For example - + * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id + * With no XPS,lock based resource protection, Descriptor pool ids are different + * for each vdev, dma ring id will be same as single pdev id + * + * Return: None + */ +#ifdef QCA_OL_TX_MULTIQ_SUPPORT +static inline void dp_tx_get_queue(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, struct dp_tx_queue *queue) +{ + uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & + DP_TX_QUEUE_MASK; + + queue->desc_pool_id = queue_offset; + queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s, pool_id:%d ring_id: %d", + __func__, queue->desc_pool_id, queue->ring_id); +} +#else /* QCA_OL_TX_MULTIQ_SUPPORT */ +static inline void dp_tx_get_queue(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, struct dp_tx_queue *queue) +{ + /* get flow id */ + queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); + queue->ring_id = DP_TX_GET_RING_ID(vdev); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s, pool_id:%d ring_id: %d", + __func__, queue->desc_pool_id, queue->ring_id); +} +#endif +#ifdef FEATURE_PERPKT_INFO +QDF_STATUS +dp_get_completion_indication_for_stack(struct dp_soc *soc, + struct dp_pdev *pdev, + struct dp_peer *peer, + struct hal_tx_completion_status *ts, + qdf_nbuf_t netbuf, + uint64_t time_latency); + +void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, + qdf_nbuf_t netbuf); +#endif + +void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl); + +#ifdef ATH_TX_PRI_OVERRIDE +#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \ + ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf)) +#else +#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) +#endif + +void +dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc, + uint32_t buf_type); + +/* TODO TX_FEATURE_NOT_YET */ +static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) +{ + return; +} +/* TODO TX_FEATURE_NOT_YET */ + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +static inline +void dp_peer_set_tx_capture_enabled(struct dp_peer *peer_handle, bool value) +{ +} +#endif +void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev, + bool force_free); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c new file mode 100644 index 0000000000000000000000000000000000000000..0e0fdb6e04cd89c01234cc9ad2b6d547e72bed87 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_tx_desc.h" + +#ifndef DESC_PARTITION +#define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a) +#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \ +do { \ + uint8_t sig_bit; \ + soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \ + /* Calculate page divider to find page number */ \ + sig_bit = 0; \ + while (num_desc_per_page) { \ + sig_bit++; \ + num_desc_per_page = num_desc_per_page >> 1; \ + } \ + soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \ +} while (0) +#else +#define DP_TX_DESC_SIZE(a) a +#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {} +#endif /* DESC_PARTITION */ + +/** + * dp_tx_desc_pool_counter_initialize() - Initialize counters + * @tx_desc_pool Handle to DP tx_desc_pool structure + * @num_elem Number of descriptor elements per pool + * + * Return: None + */ +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +static void +dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool, + uint16_t num_elem) +{ +} +#else +static void +dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool, + uint16_t num_elem) +{ + tx_desc_pool->num_free = num_elem; + tx_desc_pool->num_allocated = 0; +} +#endif + +/** + * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s) + * @soc Handle to DP SoC structure + * @num_pool Number of pools to allocate + * @num_elem Number of descriptor elements per pool + * + * This function allocates memory for SW tx descriptors + * (used within host for tx data path). + * The number of tx descriptors required will be large + * since based on number of clients (1024 clients x 3 radios), + * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly + * large. + * + * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf + * function to allocate memory + * in multiple pages. It then iterates through the memory allocated across pages + * and links each descriptor + * to next descriptor, taking care of page boundaries. + * + * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated, + * one for each ring; + * This minimizes lock contention when hard_start_xmit is called + * from multiple CPUs. + * Alternately, multiple pools can be used for multiple VDEVs for VDEV level + * flow control. + * + * Return: Status code. 0 for success. + */ +QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + uint32_t id, count, page_id, offset, pool_id_32; + uint16_t num_desc_per_page; + struct dp_tx_desc_s *tx_desc_elem; + uint32_t desc_size; + struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]); + + desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem)); + tx_desc_pool->elem_size = desc_size; + if (!dp_is_soc_reinit(soc)) + dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE, + &tx_desc_pool->desc_pages, + desc_size, num_elem, + 0, true); + if (!tx_desc_pool->desc_pages.num_pages) { + dp_err("Multi page alloc fail, tx desc"); + goto fail_exit; + } + + num_desc_per_page = + tx_desc_pool->desc_pages.num_element_per_page; + tx_desc_pool->freelist = (struct dp_tx_desc_s *) + *tx_desc_pool->desc_pages.cacheable_pages; + if (qdf_mem_multi_page_link(soc->osdev, + &tx_desc_pool->desc_pages, + desc_size, num_elem, true)) { + dp_err("invalid tx desc allocation - overflow num link"); + goto free_tx_desc; + } + + /* Set unique IDs for each Tx descriptor */ + tx_desc_elem = tx_desc_pool->freelist; + count = 0; + pool_id_32 = (uint32_t)pool_id; + while (tx_desc_elem) { + page_id = count / num_desc_per_page; + offset = count % num_desc_per_page; + id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) | + (page_id << DP_TX_DESC_ID_PAGE_OS) | offset); + + tx_desc_elem->id = id; + tx_desc_elem->pool_id = pool_id; + tx_desc_elem = tx_desc_elem->next; + count++; + } + + dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem); + TX_DESC_LOCK_CREATE(&tx_desc_pool->lock); + return QDF_STATUS_SUCCESS; + +free_tx_desc: + dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE, + &tx_desc_pool->desc_pages, 0, true); + +fail_exit: + return QDF_STATUS_E_FAULT; +} + +/** + * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors + * + * @soc Handle to DP SoC structure + * @pool_id + * + * Return: + */ +QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + struct dp_tx_desc_pool_s *tx_desc_pool = + &((soc)->tx_desc[(pool_id)]); + + dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE, + &tx_desc_pool->desc_pages, + 0, true); + TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock); + TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool + * @soc Handle to DP SoC structure + * @pool_id + * + * Return: NONE + */ +QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + uint16_t num_page; + uint32_t count; + struct dp_tx_ext_desc_elem_s *c_elem, *p_elem; + struct qdf_mem_dma_page_t *page_info; + struct qdf_mem_multi_page_t *pages; + QDF_STATUS status; + qdf_dma_context_t memctx = 0; + + /* Coherent tx extension descriptor alloc */ + soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA; + soc->tx_ext_desc[pool_id].elem_count = num_elem; + memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx); + if (!dp_is_soc_reinit(soc)) { + dp_desc_multi_pages_mem_alloc(soc, + DP_TX_EXT_DESC_TYPE, + &soc->tx_ext_desc[pool_id]. + desc_pages, + soc->tx_ext_desc[pool_id].elem_size, + soc->tx_ext_desc[pool_id].elem_count, + memctx, false); + } + if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext desc page alloc fail"); + status = QDF_STATUS_E_NOMEM; + goto fail_exit; + } + + num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages; + /* + * Cacheable ext descriptor link alloc + * This structure also large size already + * single element is 24bytes, 2K elements are 48Kbytes + * Have to alloc multi page cacheable memory + */ + soc->tx_ext_desc[pool_id].link_elem_size = + sizeof(struct dp_tx_ext_desc_elem_s); + if (!dp_is_soc_reinit(soc)) { + dp_desc_multi_pages_mem_alloc(soc, + DP_TX_EXT_DESC_LINK_TYPE, + &soc->tx_ext_desc[pool_id].desc_link_pages, + soc->tx_ext_desc[pool_id].link_elem_size, + soc->tx_ext_desc[pool_id].elem_count, + 0, true); + } + if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext link desc page alloc fail"); + status = QDF_STATUS_E_NOMEM; + goto free_ext_desc_page; + } + + /* link tx descriptors into a freelist */ + soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *) + *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages; + if (qdf_mem_multi_page_link(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_link_pages, + soc->tx_ext_desc[pool_id].link_elem_size, + soc->tx_ext_desc[pool_id].elem_count, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext link desc page linking fail"); + status = QDF_STATUS_E_FAULT; + goto free_ext_link_desc_page; + } + + /* Assign coherent memory pointer into linked free list */ + pages = &soc->tx_ext_desc[pool_id].desc_pages; + page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages; + c_elem = soc->tx_ext_desc[pool_id].freelist; + p_elem = c_elem; + for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) { + if (!(count % pages->num_element_per_page)) { + /** + * First element for new page, + * should point next page + */ + if (!pages->dma_pages->page_v_addr_start) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "link over flow"); + status = QDF_STATUS_E_FAULT; + goto free_ext_link_desc_page; + } + c_elem->vaddr = (void *)page_info->page_v_addr_start; + c_elem->paddr = page_info->page_p_addr; + page_info++; + } else { + c_elem->vaddr = (void *)(p_elem->vaddr + + soc->tx_ext_desc[pool_id].elem_size); + c_elem->paddr = (p_elem->paddr + + soc->tx_ext_desc[pool_id].elem_size); + } + p_elem = c_elem; + c_elem = c_elem->next; + if (!c_elem) + break; + } + + soc->tx_ext_desc[pool_id].num_free = num_elem; + qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock); + return QDF_STATUS_SUCCESS; + +free_ext_link_desc_page: + dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_LINK_TYPE, + &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true); + +free_ext_desc_page: + dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_TYPE, + &soc->tx_ext_desc[pool_id].desc_pages, + qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), + false); + +fail_exit: + return status; + +} + +/** + * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool + * @soc: Handle to DP SoC structure + * @pool_id: extension descriptor pool id + * + * Return: NONE + */ +QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_LINK_TYPE, + &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true); + + dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_TYPE, + &soc->tx_ext_desc[pool_id].desc_pages, + qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), + false); + + qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool + * @soc: Handle to DP SoC structure + * @pool_id: tso descriptor pool id + * @num_elem: number of element + * + * Return: QDF_STATUS_SUCCESS + */ +#if defined(FEATURE_TSO) +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + struct dp_tx_tso_seg_pool_s *tso_desc_pool; + uint32_t desc_size; + + tso_desc_pool = &soc->tx_tso_desc[pool_id]; + tso_desc_pool->num_free = 0; + desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t)); + if (!dp_is_soc_reinit(soc)) + dp_desc_multi_pages_mem_alloc(soc, + DP_TX_TSO_DESC_TYPE, + &tso_desc_pool->desc_pages, + desc_size, + num_elem, 0, true); + + if (!tso_desc_pool->desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Alloc Failed %pK pool_id %d"), + soc, pool_id); + return QDF_STATUS_E_NOMEM; + } + + tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *) + *tso_desc_pool->desc_pages.cacheable_pages; + tso_desc_pool->num_free = num_elem; + if (qdf_mem_multi_page_link(soc->osdev, + &tso_desc_pool->desc_pages, + desc_size, + num_elem, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "invalid tso desc allocation - overflow num link"); + goto free_tso_desc; + } + TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free); + tso_desc_pool->pool_size = num_elem; + qdf_spinlock_create(&tso_desc_pool->lock); + + return QDF_STATUS_SUCCESS; + +free_tso_desc: + dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE, + &tso_desc_pool->desc_pages, 0, true); + + return QDF_STATUS_E_FAULT; +} + +/** + * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool + * @soc: Handle to DP SoC structure + * @pool_id: extension descriptor pool id + * + * Return: NONE + */ +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + struct dp_tx_tso_seg_pool_s *tso_desc_pool; + + tso_desc_pool = &soc->tx_tso_desc[pool_id]; + + qdf_spin_lock_bh(&tso_desc_pool->lock); + + dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE, + &tso_desc_pool->desc_pages, 0, true); + tso_desc_pool->freelist = NULL; + tso_desc_pool->num_free = 0; + tso_desc_pool->pool_size = 0; + qdf_spin_unlock_bh(&tso_desc_pool->lock); + qdf_spinlock_destroy(&tso_desc_pool->lock); + return; +} +/** + * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the + * fragments in each tso segment + * + * @soc: handle to dp soc structure + * @pool_id: descriptor pool id + * @num_elem: total number of descriptors to be allocated + */ +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool; + uint32_t desc_size; + + tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; + tso_num_seg_pool->num_free = 0; + desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t)); + if (!dp_is_soc_reinit(soc)) + dp_desc_multi_pages_mem_alloc(soc, + DP_TX_TSO_NUM_SEG_TYPE, + &tso_num_seg_pool->desc_pages, + desc_size, + num_elem, 0, true); + if (!tso_num_seg_pool->desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Alloc Failed %pK pool_id %d"), + soc, pool_id); + return QDF_STATUS_E_NOMEM; + } + + if (qdf_mem_multi_page_link(soc->osdev, + &tso_num_seg_pool->desc_pages, + desc_size, + num_elem, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "invalid tso desc allocation - overflow num link"); + goto fail; + } + + tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *) + *tso_num_seg_pool->desc_pages.cacheable_pages; + tso_num_seg_pool->num_free = num_elem; + tso_num_seg_pool->num_seg_pool_size = num_elem; + + qdf_spinlock_create(&tso_num_seg_pool->lock); + + return QDF_STATUS_SUCCESS; + +fail: + dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE, + &tso_num_seg_pool->desc_pages, 0, true); + + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks + * the fragments in tso segment + * + * + * @soc: handle to dp soc structure + * @pool_id: descriptor pool_id + */ +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool; + + tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; + qdf_spin_lock_bh(&tso_num_seg_pool->lock); + + dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE, + &tso_num_seg_pool->desc_pages, 0, true); + tso_num_seg_pool->freelist = NULL; + tso_num_seg_pool->num_free = 0; + tso_num_seg_pool->num_seg_pool_size = 0; + qdf_spin_unlock_bh(&tso_num_seg_pool->lock); + qdf_spinlock_destroy(&tso_num_seg_pool->lock); + return; +} + +#else +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + return; +} + +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + return; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h new file mode 100644 index 0000000000000000000000000000000000000000..07ed3741f60ff9956b311559fb56711875385381 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h @@ -0,0 +1,951 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DP_TX_DESC_H +#define DP_TX_DESC_H + +#include "dp_types.h" +#include "dp_tx.h" +#include "dp_internal.h" + +/** + * 21 bits cookie + * 2 bits pool id 0 ~ 3, + * 10 bits page id 0 ~ 1023 + * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32) + */ +/* ???Ring ID needed??? */ +#define DP_TX_DESC_ID_POOL_MASK 0x018000 +#define DP_TX_DESC_ID_POOL_OS 15 +#define DP_TX_DESC_ID_PAGE_MASK 0x007FE0 +#define DP_TX_DESC_ID_PAGE_OS 5 +#define DP_TX_DESC_ID_OFFSET_MASK 0x00001F +#define DP_TX_DESC_ID_OFFSET_OS 0 + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +#define TX_DESC_LOCK_CREATE(lock) +#define TX_DESC_LOCK_DESTROY(lock) +#define TX_DESC_LOCK_LOCK(lock) +#define TX_DESC_LOCK_UNLOCK(lock) +#define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \ + ((pool)->status == FLOW_POOL_INACTIVE) +#ifdef QCA_AC_BASED_FLOW_CONTROL +#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ + dp_tx_flow_pool_member_clean(_tx_desc_pool) + +#else /* !QCA_AC_BASED_FLOW_CONTROL */ +#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ +do { \ + (_tx_desc_pool)->elem_size = 0; \ + (_tx_desc_pool)->freelist = NULL; \ + (_tx_desc_pool)->pool_size = 0; \ + (_tx_desc_pool)->avail_desc = 0; \ + (_tx_desc_pool)->start_th = 0; \ + (_tx_desc_pool)->stop_th = 0; \ + (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \ +} while (0) +#endif /* QCA_AC_BASED_FLOW_CONTROL */ +#else /* !QCA_LL_TX_FLOW_CONTROL_V2 */ +#define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock) +#define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock) +#define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock) +#define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock) +#define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false) +#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ +do { \ + (_tx_desc_pool)->elem_size = 0; \ + (_tx_desc_pool)->num_allocated = 0; \ + (_tx_desc_pool)->freelist = NULL; \ + (_tx_desc_pool)->elem_count = 0; \ + (_tx_desc_pool)->num_free = 0; \ +} while (0) +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ +#define MAX_POOL_BUFF_COUNT 10000 + +QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id); + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +void dp_tx_flow_control_init(struct dp_soc *); +void dp_tx_flow_control_deinit(struct dp_soc *); + +QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc, + tx_pause_callback pause_cb); +QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t vdev_id); +void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id, + uint8_t vdev_id); +void dp_tx_clear_flow_pool_stats(struct dp_soc *soc); +struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, + uint8_t flow_pool_id, uint16_t flow_pool_size); + +QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size); +void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id); + +/** + * dp_tx_get_desc_flow_pool() - get descriptor from flow pool + * @pool: flow pool + * + * Caller needs to take lock and do sanity checks. + * + * Return: tx descriptor + */ +static inline +struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool) +{ + struct dp_tx_desc_s *tx_desc = pool->freelist; + + pool->freelist = pool->freelist->next; + pool->avail_desc--; + return tx_desc; +} + +/** + * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist + * @pool: flow pool + * @tx_desc: tx descriptor + * + * Caller needs to take lock and do sanity checks. + * + * Return: none + */ +static inline +void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool, + struct dp_tx_desc_s *tx_desc) +{ + tx_desc->next = pool->freelist; + pool->freelist = tx_desc; + pool->avail_desc++; +} + +#ifdef QCA_AC_BASED_FLOW_CONTROL + +/** + * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool + * + * @pool: flow pool + * + * Return: None + */ +static inline void +dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool) +{ + pool->elem_size = 0; + pool->freelist = NULL; + pool->pool_size = 0; + pool->avail_desc = 0; + qdf_mem_zero(pool->start_th, FL_TH_MAX); + qdf_mem_zero(pool->stop_th, FL_TH_MAX); + pool->status = FLOW_POOL_INACTIVE; +} + +/** + * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold + * + * @pool: flow pool + * @avail_desc: available descriptor number + * + * Return: true if threshold is met, false if not + */ +static inline bool +dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc) +{ + if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK])) + return true; + else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI])) + return true; + else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO])) + return true; + else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI])) + return true; + else + return false; +} + +/** + * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool + * + * @soc: Handle to DP SoC structure + * @desc_pool_id: ID of the flow control fool + * + * Return: TX descriptor allocated or NULL + */ +static inline struct dp_tx_desc_s * +dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; + bool is_pause = false; + enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE; + enum dp_fl_ctrl_threshold level = DP_TH_BE_BK; + + if (qdf_likely(pool)) { + qdf_spin_lock_bh(&pool->flow_pool_lock); + if (qdf_likely(pool->avail_desc)) { + tx_desc = dp_tx_get_desc_flow_pool(pool); + tx_desc->pool_id = desc_pool_id; + tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + is_pause = dp_tx_is_threshold_reached(pool, + pool->avail_desc); + + if (qdf_unlikely(is_pause)) { + switch (pool->status) { + case FLOW_POOL_ACTIVE_UNPAUSED: + /* pause network BE\BK queue */ + act = WLAN_NETIF_BE_BK_QUEUE_OFF; + level = DP_TH_BE_BK; + pool->status = FLOW_POOL_BE_BK_PAUSED; + break; + case FLOW_POOL_BE_BK_PAUSED: + /* pause network VI queue */ + act = WLAN_NETIF_VI_QUEUE_OFF; + level = DP_TH_VI; + pool->status = FLOW_POOL_VI_PAUSED; + break; + case FLOW_POOL_VI_PAUSED: + /* pause network VO queue */ + act = WLAN_NETIF_VO_QUEUE_OFF; + level = DP_TH_VO; + pool->status = FLOW_POOL_VO_PAUSED; + break; + case FLOW_POOL_VO_PAUSED: + /* pause network HI PRI queue */ + act = WLAN_NETIF_PRIORITY_QUEUE_OFF; + level = DP_TH_HI; + pool->status = FLOW_POOL_ACTIVE_PAUSED; + break; + case FLOW_POOL_ACTIVE_PAUSED: + act = WLAN_NETIF_ACTION_TYPE_NONE; + break; + default: + dp_err_rl("pool status is %d!", + pool->status); + break; + } + + if (act != WLAN_NETIF_ACTION_TYPE_NONE) { + pool->latest_pause_time[level] = + qdf_get_system_timestamp(); + soc->pause_cb(desc_pool_id, + act, + WLAN_DATA_FLOW_CONTROL); + } + } + } else { + pool->pkt_drop_no_desc++; + } + qdf_spin_unlock_bh(&pool->flow_pool_lock); + } else { + soc->pool_stats.pkt_drop_no_pool++; + } + + return tx_desc; +} + +/** + * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list + * + * @soc: Handle to DP SoC structure + * @tx_desc: the tx descriptor to be freed + * @desc_pool_id: ID of the flow control fool + * + * Return: None + */ +static inline void +dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; + qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur; + enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE; + + qdf_spin_lock_bh(&pool->flow_pool_lock); + tx_desc->vdev = NULL; + tx_desc->nbuf = NULL; + tx_desc->flags = 0; + dp_tx_put_desc_flow_pool(pool, tx_desc); + switch (pool->status) { + case FLOW_POOL_ACTIVE_PAUSED: + if (pool->avail_desc > pool->start_th[DP_TH_HI]) { + act = WLAN_NETIF_PRIORITY_QUEUE_ON; + pool->status = FLOW_POOL_VO_PAUSED; + + /* Update maxinum pause duration for HI queue */ + pause_dur = unpause_time - + pool->latest_pause_time[DP_TH_HI]; + if (pool->max_pause_time[DP_TH_HI] < pause_dur) + pool->max_pause_time[DP_TH_HI] = pause_dur; + } + break; + case FLOW_POOL_VO_PAUSED: + if (pool->avail_desc > pool->start_th[DP_TH_VO]) { + act = WLAN_NETIF_VO_QUEUE_ON; + pool->status = FLOW_POOL_VI_PAUSED; + + /* Update maxinum pause duration for VO queue */ + pause_dur = unpause_time - + pool->latest_pause_time[DP_TH_VO]; + if (pool->max_pause_time[DP_TH_VO] < pause_dur) + pool->max_pause_time[DP_TH_VO] = pause_dur; + } + break; + case FLOW_POOL_VI_PAUSED: + if (pool->avail_desc > pool->start_th[DP_TH_VI]) { + act = WLAN_NETIF_VI_QUEUE_ON; + pool->status = FLOW_POOL_BE_BK_PAUSED; + + /* Update maxinum pause duration for VI queue */ + pause_dur = unpause_time - + pool->latest_pause_time[DP_TH_VI]; + if (pool->max_pause_time[DP_TH_VI] < pause_dur) + pool->max_pause_time[DP_TH_VI] = pause_dur; + } + break; + case FLOW_POOL_BE_BK_PAUSED: + if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) { + act = WLAN_WAKE_NON_PRIORITY_QUEUE; + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + + /* Update maxinum pause duration for BE_BK queue */ + pause_dur = unpause_time - + pool->latest_pause_time[DP_TH_BE_BK]; + if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur) + pool->max_pause_time[DP_TH_BE_BK] = pause_dur; + } + break; + case FLOW_POOL_INVALID: + if (pool->avail_desc == pool->pool_size) { + dp_tx_desc_pool_free(soc, desc_pool_id); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d pool is freed!!", + __func__, __LINE__); + return; + } + break; + + case FLOW_POOL_ACTIVE_UNPAUSED: + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d pool is INACTIVE State!!", + __func__, __LINE__); + break; + }; + + if (act != WLAN_WAKE_ALL_NETIF_QUEUE) + soc->pause_cb(pool->flow_pool_id, + act, WLAN_DATA_FLOW_CONTROL); + qdf_spin_unlock_bh(&pool->flow_pool_lock); +} +#else /* QCA_AC_BASED_FLOW_CONTROL */ + +static inline bool +dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc) +{ + if (qdf_unlikely(avail_desc < pool->stop_th)) + return true; + else + return false; +} + +/** + * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool + * + * @soc Handle to DP SoC structure + * @pool_id + * + * Return: + */ +static inline struct dp_tx_desc_s * +dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; + + if (pool) { + qdf_spin_lock_bh(&pool->flow_pool_lock); + if (pool->status <= FLOW_POOL_ACTIVE_PAUSED && + pool->avail_desc) { + tx_desc = dp_tx_get_desc_flow_pool(pool); + tx_desc->pool_id = desc_pool_id; + tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + if (qdf_unlikely(pool->avail_desc < pool->stop_th)) { + pool->status = FLOW_POOL_ACTIVE_PAUSED; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + /* pause network queues */ + soc->pause_cb(desc_pool_id, + WLAN_STOP_ALL_NETIF_QUEUE, + WLAN_DATA_FLOW_CONTROL); + } else { + qdf_spin_unlock_bh(&pool->flow_pool_lock); + } + + /* + * If one packet is going to be sent, PM usage count + * needs to be incremented by one to prevent future + * runtime suspend. This should be tied with the + * success of allocating one descriptor. It will be + * decremented after the packet has been sent. + */ + hif_pm_runtime_get_noresume( + soc->hif_handle, + RTPM_ID_DP_TX_DESC_ALLOC_FREE); + } else { + pool->pkt_drop_no_desc++; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + } + } else { + soc->pool_stats.pkt_drop_no_pool++; + } + + + return tx_desc; +} + +/** + * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list + * + * @soc Handle to DP SoC structure + * @pool_id + * @tx_desc + * + * Return: None + */ +static inline void +dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; + + qdf_spin_lock_bh(&pool->flow_pool_lock); + tx_desc->vdev = NULL; + tx_desc->nbuf = NULL; + tx_desc->flags = 0; + dp_tx_put_desc_flow_pool(pool, tx_desc); + switch (pool->status) { + case FLOW_POOL_ACTIVE_PAUSED: + if (pool->avail_desc > pool->start_th) { + soc->pause_cb(pool->flow_pool_id, + WLAN_WAKE_ALL_NETIF_QUEUE, + WLAN_DATA_FLOW_CONTROL); + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + } + break; + case FLOW_POOL_INVALID: + if (pool->avail_desc == pool->pool_size) { + dp_tx_desc_pool_free(soc, desc_pool_id); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + qdf_print("%s %d pool is freed!!", + __func__, __LINE__); + goto out; + } + break; + + case FLOW_POOL_ACTIVE_UNPAUSED: + break; + default: + qdf_print("%s %d pool is INACTIVE State!!", + __func__, __LINE__); + break; + }; + + qdf_spin_unlock_bh(&pool->flow_pool_lock); + +out: + /** + * Decrement PM usage count if the packet has been sent. This + * should be tied with the success of freeing one descriptor. + */ + hif_pm_runtime_put(soc->hif_handle, + RTPM_ID_DP_TX_DESC_ALLOC_FREE); +} + +#endif /* QCA_AC_BASED_FLOW_CONTROL */ + +static inline bool +dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, + vdev_id); + struct dp_tx_desc_pool_s *pool; + + if (!vdev) + return false; + + pool = vdev->pool; + + return dp_tx_is_threshold_reached(pool, pool->avail_desc); +} +#else /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +static inline void dp_tx_flow_control_init(struct dp_soc *handle) +{ +} + +static inline void dp_tx_flow_control_deinit(struct dp_soc *handle) +{ +} + +static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, + uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id, + uint16_t flow_pool_size) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, + uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id) +{ +} + +/** + * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool + * + * @param soc Handle to DP SoC structure + * @param pool_id + * + * Return: + */ +static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc = NULL; + + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + tx_desc = soc->tx_desc[desc_pool_id].freelist; + + /* Pool is exhausted */ + if (!tx_desc) { + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + return NULL; + } + + soc->tx_desc[desc_pool_id].freelist = + soc->tx_desc[desc_pool_id].freelist->next; + soc->tx_desc[desc_pool_id].num_allocated++; + soc->tx_desc[desc_pool_id].num_free--; + + tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + + return tx_desc; +} + +/** + * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors + * from given pool + * @soc: Handle to DP SoC structure + * @pool_id: pool id should pick up + * @num_requested: number of required descriptor + * + * allocate multiple tx descriptor and make a link + * + * Return: h_desc first descriptor pointer + */ +static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple( + struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested) +{ + struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL; + uint8_t count; + + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + if ((num_requested == 0) || + (soc->tx_desc[desc_pool_id].num_free < num_requested)) { + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s, No Free Desc: Available(%d) num_requested(%d)", + __func__, soc->tx_desc[desc_pool_id].num_free, + num_requested); + return NULL; + } + + h_desc = soc->tx_desc[desc_pool_id].freelist; + + /* h_desc should never be NULL since num_free > requested */ + qdf_assert_always(h_desc); + + c_desc = h_desc; + for (count = 0; count < (num_requested - 1); count++) { + c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + c_desc = c_desc->next; + } + soc->tx_desc[desc_pool_id].num_free -= count; + soc->tx_desc[desc_pool_id].num_allocated += count; + soc->tx_desc[desc_pool_id].freelist = c_desc->next; + c_desc->next = NULL; + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + return h_desc; +} + +/** + * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list + * + * @soc Handle to DP SoC structure + * @pool_id + * @tx_desc + */ +static inline void +dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, + uint8_t desc_pool_id) +{ + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + tx_desc->vdev = NULL; + tx_desc->nbuf = NULL; + tx_desc->flags = 0; + tx_desc->next = soc->tx_desc[desc_pool_id].freelist; + soc->tx_desc[desc_pool_id].freelist = tx_desc; + soc->tx_desc[desc_pool_id].num_allocated--; + soc->tx_desc[desc_pool_id].num_free++; + + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); +} +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +#ifdef QCA_DP_TX_DESC_ID_CHECK +/** + * dp_tx_is_desc_id_valid() - check is the tx desc id valid + * + * @soc Handle to DP SoC structure + * @tx_desc_id + * + * Return: true or false + */ +static inline bool +dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id) +{ + uint8_t pool_id; + uint16_t page_id, offset; + struct dp_tx_desc_pool_s *pool; + + pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> + DP_TX_DESC_ID_POOL_OS; + /* Pool ID is out of limit */ + if (pool_id > wlan_cfg_get_num_tx_desc_pool( + soc->wlan_cfg_ctx)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "%s:Tx Comp pool id %d not valid", + __func__, + pool_id); + goto warn_exit; + } + + pool = &soc->tx_desc[pool_id]; + /* the pool is freed */ + if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "%s:the pool %d has been freed", + __func__, + pool_id); + goto warn_exit; + } + + page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> + DP_TX_DESC_ID_PAGE_OS; + /* the page id is out of limit */ + if (page_id >= pool->desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "%s:the page id %d invalid, pool id %d, num_page %d", + __func__, + page_id, + pool_id, + pool->desc_pages.num_pages); + goto warn_exit; + } + + offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> + DP_TX_DESC_ID_OFFSET_OS; + /* the offset is out of limit */ + if (offset >= pool->desc_pages.num_element_per_page) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "%s:offset %d invalid, pool%d,num_elem_per_page %d", + __func__, + offset, + pool_id, + pool->desc_pages.num_element_per_page); + goto warn_exit; + } + + return true; + +warn_exit: + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "%s:Tx desc id 0x%x not valid", + __func__, + tx_desc_id); + qdf_assert_always(0); + return false; +} + +#else +static inline bool +dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id) +{ + return true; +} +#endif /* QCA_DP_TX_DESC_ID_CHECK */ + +/** + * dp_tx_desc_find() - find dp tx descriptor from cokie + * @soc - handle for the device sending the data + * @tx_desc_id - the ID of the descriptor in question + * @return the descriptor object that has the specified ID + * + * Use a tx descriptor ID to find the corresponding descriptor object. + * + */ +static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc, + uint8_t pool_id, uint16_t page_id, uint16_t offset) +{ + struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]); + + return tx_desc_pool->desc_pages.cacheable_pages[page_id] + + tx_desc_pool->elem_size * offset; +} + +/** + * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool + * @soc: handle for the device sending the data + * @pool_id: target pool id + * + * Return: None + */ +static inline +struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc, + uint8_t desc_pool_id) +{ + struct dp_tx_ext_desc_elem_s *c_elem; + + qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) { + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + return NULL; + } + c_elem = soc->tx_ext_desc[desc_pool_id].freelist; + soc->tx_ext_desc[desc_pool_id].freelist = + soc->tx_ext_desc[desc_pool_id].freelist->next; + soc->tx_ext_desc[desc_pool_id].num_free--; + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + return c_elem; +} + +/** + * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool + * @soc: handle for the device sending the data + * @pool_id: target pool id + * @elem: ext descriptor pointer should release + * + * Return: None + */ +static inline void dp_tx_ext_desc_free(struct dp_soc *soc, + struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id) +{ + qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + elem->next = soc->tx_ext_desc[desc_pool_id].freelist; + soc->tx_ext_desc[desc_pool_id].freelist = elem; + soc->tx_ext_desc[desc_pool_id].num_free++; + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + return; +} + +/** + * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and + * attach it to free list + * @soc: Handle to DP SoC structure + * @desc_pool_id: pool id should pick up + * @elem: tx descriptor should be freed + * @num_free: number of descriptors should be freed + * + * Return: none + */ +static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc, + struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id, + uint8_t num_free) +{ + struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem; + uint8_t freed = num_free; + + /* caller should always guarantee atleast list of num_free nodes */ + qdf_assert_always(elem); + + head = elem; + c_elem = head; + tail = head; + while (c_elem && freed) { + tail = c_elem; + c_elem = c_elem->next; + freed--; + } + + /* caller should always guarantee atleast list of num_free nodes */ + qdf_assert_always(tail); + + qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + tail->next = soc->tx_ext_desc[desc_pool_id].freelist; + soc->tx_ext_desc[desc_pool_id].freelist = head; + soc->tx_ext_desc[desc_pool_id].num_free += num_free; + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + + return; +} + +#if defined(FEATURE_TSO) +/** + * dp_tx_tso_desc_alloc() - function to allocate a TSO segment + * @soc: device soc instance + * @pool_id: pool id should pick up tso descriptor + * + * Allocates a TSO segment element from the free list held in + * the soc + * + * Return: tso_seg, tso segment memory pointer + */ +static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc( + struct dp_soc *soc, uint8_t pool_id) +{ + struct qdf_tso_seg_elem_t *tso_seg = NULL; + + qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); + if (soc->tx_tso_desc[pool_id].freelist) { + soc->tx_tso_desc[pool_id].num_free--; + tso_seg = soc->tx_tso_desc[pool_id].freelist; + soc->tx_tso_desc[pool_id].freelist = + soc->tx_tso_desc[pool_id].freelist->next; + } + qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); + + return tso_seg; +} + +/** + * dp_tx_tso_desc_free() - function to free a TSO segment + * @soc: device soc instance + * @pool_id: pool id should pick up tso descriptor + * @tso_seg: tso segment memory pointer + * + * Returns a TSO segment element to the free list held in the + * HTT pdev + * + * Return: none + */ +static inline void dp_tx_tso_desc_free(struct dp_soc *soc, + uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg) +{ + qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); + tso_seg->next = soc->tx_tso_desc[pool_id].freelist; + soc->tx_tso_desc[pool_id].freelist = tso_seg; + soc->tx_tso_desc[pool_id].num_free++; + qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); +} + +static inline +struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc, + uint8_t pool_id) +{ + struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL; + + qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); + if (soc->tx_tso_num_seg[pool_id].freelist) { + soc->tx_tso_num_seg[pool_id].num_free--; + tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist; + soc->tx_tso_num_seg[pool_id].freelist = + soc->tx_tso_num_seg[pool_id].freelist->next; + } + qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); + + return tso_num_seg; +} + +static inline +void dp_tso_num_seg_free(struct dp_soc *soc, + uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg) +{ + qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); + tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist; + soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg; + soc->tx_tso_num_seg[pool_id].num_free++; + qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); +} +#endif + +/* + * dp_tx_me_alloc_buf() Alloc descriptor from me pool + * @pdev DP_PDEV handle for datapath + * + * Return:dp_tx_me_buf_t(buf) + */ +static inline struct dp_tx_me_buf_t* +dp_tx_me_alloc_buf(struct dp_pdev *pdev) +{ + struct dp_tx_me_buf_t *buf = NULL; + qdf_spin_lock_bh(&pdev->tx_mutex); + if (pdev->me_buf.freelist) { + buf = pdev->me_buf.freelist; + pdev->me_buf.freelist = pdev->me_buf.freelist->next; + pdev->me_buf.buf_in_use++; + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Error allocating memory in pool"); + qdf_spin_unlock_bh(&pdev->tx_mutex); + return NULL; + } + qdf_spin_unlock_bh(&pdev->tx_mutex); + return buf; +} + +/* + * dp_tx_me_free_buf() - Free me descriptor and add it to pool + * @pdev: DP_PDEV handle for datapath + * @buf : Allocated ME BUF + * + * Return:void + */ +static inline void +dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf) +{ + qdf_spin_lock_bh(&pdev->tx_mutex); + buf->next = pdev->me_buf.freelist; + pdev->me_buf.freelist = buf; + pdev->me_buf.buf_in_use--; + qdf_spin_unlock_bh(&pdev->tx_mutex); +} +#endif /* DP_TX_DESC_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c new file mode 100644 index 0000000000000000000000000000000000000000..4e5a5a620dc4090e90aa528232156826b72fc2d9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +/* OS abstraction libraries */ +#include /* qdf_nbuf_t, etc. */ +#include /* qdf_atomic_read, etc. */ +#include /* qdf_unlikely */ +#include "dp_types.h" +#include "dp_tx_desc.h" + +#include +#include "dp_internal.h" +#define INVALID_FLOW_ID 0xFF +#define MAX_INVALID_BIN 3 + +#ifdef QCA_AC_BASED_FLOW_CONTROL +/** + * dp_tx_initialize_threshold() - Threshold of flow Pool initialization + * @pool: flow_pool + * @stop_threshold: stop threshold of certian AC + * @start_threshold: start threshold of certian AC + * @flow_pool_size: flow pool size + * + * Return: none + */ +static inline void +dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool, + uint32_t start_threshold, + uint32_t stop_threshold, + uint16_t flow_pool_size) +{ + /* BE_BK threshold is same as previous threahold */ + pool->start_th[DP_TH_BE_BK] = (start_threshold + * flow_pool_size) / 100; + pool->stop_th[DP_TH_BE_BK] = (stop_threshold + * flow_pool_size) / 100; + + /* Update VI threshold based on BE_BK threashold */ + pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK] + * FL_TH_VI_PERCENTAGE) / 100; + pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK] + * FL_TH_VI_PERCENTAGE) / 100; + + /* Update VO threshold based on BE_BK threashold */ + pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK] + * FL_TH_VO_PERCENTAGE) / 100; + pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK] + * FL_TH_VO_PERCENTAGE) / 100; + + /* Update High Priority threshold based on BE_BK threashold */ + pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK] + * FL_TH_HI_PERCENTAGE) / 100; + pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK] + * FL_TH_HI_PERCENTAGE) / 100; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: tx flow control threshold is set, pool size is %d", + __func__, flow_pool_size); +} + +/** + * dp_tx_flow_pool_reattach() - Reattach flow_pool + * @pool: flow_pool + * + * Return: none + */ +static inline void +dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool) +{ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow pool already allocated, attached %d times", + __func__, pool->pool_create_cnt); + + if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + else if (pool->avail_desc <= pool->start_th[DP_TH_BE_BK] && + pool->avail_desc > pool->start_th[DP_TH_VI]) + pool->status = FLOW_POOL_BE_BK_PAUSED; + else if (pool->avail_desc <= pool->start_th[DP_TH_VI] && + pool->avail_desc > pool->start_th[DP_TH_VO]) + pool->status = FLOW_POOL_VI_PAUSED; + else if (pool->avail_desc <= pool->start_th[DP_TH_VO] && + pool->avail_desc > pool->start_th[DP_TH_HI]) + pool->status = FLOW_POOL_VO_PAUSED; + else + pool->status = FLOW_POOL_ACTIVE_PAUSED; + + pool->pool_create_cnt++; +} + +/** + * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool + * @pool: flow_pool + * + * Return: none + */ +static inline void +dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool) +{ + int i; + + for (i = 0; i < FL_TH_MAX; i++) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Level %d :: Start threshold %d :: Stop threshold %d", + i, pool->start_th[i], pool->stop_th[i]); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Level %d :: Maximun pause time %lu ms", + i, pool->max_pause_time[i]); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Level %d :: Latest pause timestamp %lu", + i, pool->latest_pause_time[i]); + } +} + +#else +static inline void +dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool, + uint32_t start_threshold, + uint32_t stop_threshold, + uint16_t flow_pool_size) + +{ + /* INI is in percentage so divide by 100 */ + pool->start_th = (start_threshold * flow_pool_size) / 100; + pool->stop_th = (stop_threshold * flow_pool_size) / 100; +} + +static inline void +dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool) +{ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow pool already allocated, attached %d times", + __func__, pool->pool_create_cnt); + if (pool->avail_desc > pool->start_th) + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + else + pool->status = FLOW_POOL_ACTIVE_PAUSED; + + pool->pool_create_cnt++; +} + +static inline void +dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool) +{ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Start threshold %d :: Stop threshold %d", + pool->start_th, pool->stop_th); +} + +#endif + +/** + * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info + * + * @ctx: Handle to struct dp_soc. + * + * Return: none + */ +void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats; + struct dp_tx_desc_pool_s *pool = NULL; + struct dp_tx_desc_pool_s tmp_pool; + int i; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "No of pool map received %d", pool_stats->pool_map_count); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "No of pool unmap received %d", pool_stats->pool_unmap_count); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Pkt dropped due to unavailablity of pool %d", + pool_stats->pkt_drop_no_pool); + + /* + * Nested spin lock. + * Always take in below order. + * flow_pool_array_lock -> flow_pool_lock + */ + qdf_spin_lock_bh(&soc->flow_pool_array_lock); + for (i = 0; i < MAX_TXDESC_POOLS; i++) { + pool = &soc->tx_desc[i]; + if (pool->status > FLOW_POOL_INVALID) + continue; + qdf_spin_lock_bh(&pool->flow_pool_lock); + qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool)); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + qdf_spin_unlock_bh(&soc->flow_pool_array_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n"); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Flow_pool_id %d :: status %d", + tmp_pool.flow_pool_id, tmp_pool.status); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Total %d :: Available %d", + tmp_pool.pool_size, tmp_pool.avail_desc); + dp_tx_flow_pool_dump_threshold(&tmp_pool); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Member flow_id %d :: flow_type %d", + tmp_pool.flow_pool_id, tmp_pool.flow_type); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Pkt dropped due to unavailablity of descriptors %d", + tmp_pool.pkt_drop_no_desc); + qdf_spin_lock_bh(&soc->flow_pool_array_lock); + } + qdf_spin_unlock_bh(&soc->flow_pool_array_lock); +} + +/** + * dp_tx_clear_flow_pool_stats() - clear flow pool statistics + * + * @soc: Handle to struct dp_soc. + * + * Return: None + */ +void dp_tx_clear_flow_pool_stats(struct dp_soc *soc) +{ + + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: soc is null", __func__); + return; + } + qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats)); +} + +/** + * dp_tx_create_flow_pool() - create flow pool + * @soc: Handle to struct dp_soc + * @flow_pool_id: flow pool id + * @flow_pool_size: flow pool size + * + * Return: flow_pool pointer / NULL for error + */ +struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, + uint8_t flow_pool_id, uint16_t flow_pool_size) +{ + struct dp_tx_desc_pool_s *pool; + uint32_t stop_threshold; + uint32_t start_threshold; + + if (flow_pool_id >= MAX_TXDESC_POOLS) { + dp_err("invalid flow_pool_id %d", flow_pool_id); + return NULL; + } + pool = &soc->tx_desc[flow_pool_id]; + qdf_spin_lock_bh(&pool->flow_pool_lock); + if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) { + dp_tx_flow_pool_reattach(pool); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + dp_err("cannot alloc desc, status=%d, create_cnt=%d", + pool->status, pool->pool_create_cnt); + return pool; + } + + if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) { + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return NULL; + } + + stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx); + start_threshold = stop_threshold + + wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx); + + pool->flow_pool_id = flow_pool_id; + pool->pool_size = flow_pool_size; + pool->avail_desc = flow_pool_size; + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + dp_tx_initialize_threshold(pool, start_threshold, stop_threshold, + flow_pool_size); + pool->pool_create_cnt++; + + qdf_spin_unlock_bh(&pool->flow_pool_lock); + + return pool; +} + +/** + * dp_tx_delete_flow_pool() - delete flow pool + * @soc: Handle to struct dp_soc + * @pool: flow pool pointer + * @force: free pool forcefully + * + * Delete flow_pool if all tx descriptors are available. + * Otherwise put it in FLOW_POOL_INVALID state. + * If force is set then pull all available descriptors to + * global pool. + * + * Return: 0 for success or error + */ +int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, + bool force) +{ + struct dp_vdev *vdev; + + if (!soc || !pool) { + dp_err("pool or soc is NULL"); + QDF_ASSERT(0); + return ENOMEM; + } + + dp_info("pool create_cnt=%d, avail_desc=%d, size=%d, status=%d", + pool->pool_create_cnt, pool->avail_desc, + pool->pool_size, pool->status); + qdf_spin_lock_bh(&pool->flow_pool_lock); + if (!pool->pool_create_cnt) { + qdf_spin_unlock_bh(&pool->flow_pool_lock); + dp_err("flow pool either not created or alread deleted"); + return -ENOENT; + } + pool->pool_create_cnt--; + if (pool->pool_create_cnt) { + qdf_spin_unlock_bh(&pool->flow_pool_lock); + dp_err("pool is still attached, pending detach %d", + pool->pool_create_cnt); + return -EAGAIN; + } + + if (pool->avail_desc < pool->pool_size) { + pool->status = FLOW_POOL_INVALID; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + /* Reset TX desc associated to this Vdev as NULL */ + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, + pool->flow_pool_id); + if (vdev) + dp_tx_desc_flush(vdev->pdev, vdev, false); + dp_err("avail desc less than pool size"); + return -EAGAIN; + } + + /* We have all the descriptors for the pool, we can delete the pool */ + dp_tx_desc_pool_free(soc, pool->flow_pool_id); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return 0; +} + +/** + * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev + * @pdev: Handle to struct dp_pdev + * @pool: flow_pool + * @vdev_id: flow_id /vdev_id + * + * Return: none + */ +static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev, + struct dp_tx_desc_pool_s *pool, uint8_t vdev_id) +{ + struct dp_vdev *vdev; + struct dp_soc *soc = pdev->soc; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: invalid vdev_id %d", + __func__, vdev_id); + return; + } + + vdev->pool = pool; + qdf_spin_lock_bh(&pool->flow_pool_lock); + pool->pool_owner_ctx = soc; + pool->flow_pool_id = vdev_id; + qdf_spin_unlock_bh(&pool->flow_pool_lock); +} + +/** + * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev + * @pdev: Handle to struct dp_pdev + * @pool: flow_pool + * @vdev_id: flow_id /vdev_id + * + * Return: none + */ +static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev, + struct dp_tx_desc_pool_s *pool, uint8_t vdev_id) +{ + struct dp_vdev *vdev; + struct dp_soc *soc = pdev->soc; + + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: invalid vdev_id %d", + __func__, vdev_id); + return; + } + + vdev->pool = NULL; +} + +/** + * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors + * @pdev: Handle to struct dp_pdev + * @flow_id: flow id + * @flow_type: flow type + * @flow_pool_id: pool id + * @flow_pool_size: pool size + * + * Process below target to host message + * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP + * + * Return: none + */ +QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size) +{ + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_pool_s *pool; + enum htt_flow_type type = flow_type; + + + dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d", + flow_id, flow_type, flow_pool_id, flow_pool_size); + + if (qdf_unlikely(!soc)) { + dp_err("soc is NULL"); + return QDF_STATUS_E_FAULT; + } + soc->pool_stats.pool_map_count++; + + pool = dp_tx_create_flow_pool(soc, flow_pool_id, + flow_pool_size); + if (!pool) { + dp_err("creation of flow_pool %d size %d failed", + flow_pool_id, flow_pool_size); + return QDF_STATUS_E_RESOURCES; + } + + switch (type) { + + case FLOW_TYPE_VDEV: + dp_tx_flow_pool_vdev_map(pdev, pool, flow_id); + break; + default: + dp_err("flow type %d not supported", type); + break; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors + * @pdev: Handle to struct dp_pdev + * @flow_id: flow id + * @flow_type: flow type + * @flow_pool_id: pool id + * + * Process below target to host message + * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP + * + * Return: none + */ +void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id) +{ + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_pool_s *pool; + enum htt_flow_type type = flow_type; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: flow_id %d flow_type %d flow_pool_id %d", + __func__, flow_id, flow_type, flow_pool_id); + + if (qdf_unlikely(!pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pdev is NULL", __func__); + return; + } + soc->pool_stats.pool_unmap_count++; + + pool = &soc->tx_desc[flow_pool_id]; + if (!pool) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow_pool not available flow_pool_id %d", + __func__, type); + return; + } + + switch (type) { + + case FLOW_TYPE_VDEV: + dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow type %d not supported !!!", + __func__, type); + return; + } + + /* only delete if all descriptors are available */ + dp_tx_delete_flow_pool(soc, pool, false); +} + +/** + * dp_tx_flow_control_init() - Initialize tx flow control + * @tx_desc_pool: Handle to flow_pool + * + * Return: none + */ +void dp_tx_flow_control_init(struct dp_soc *soc) +{ + qdf_spinlock_create(&soc->flow_pool_array_lock); +} + +/** + * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool + * @tx_desc_pool: Handle to flow_pool + * + * Return: none + */ +static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc) +{ + struct dp_tx_desc_pool_s *tx_desc_pool; + int i; + + for (i = 0; i < MAX_TXDESC_POOLS; i++) { + tx_desc_pool = &((soc)->tx_desc[i]); + if (!tx_desc_pool->desc_pages.num_pages) + continue; + + if (dp_tx_desc_pool_free(soc, i) != QDF_STATUS_SUCCESS) + dp_err("Tx Desc Pool:%d Free failed", i); + } +} + +/** + * dp_tx_flow_control_deinit() - Deregister fw based tx flow control + * @tx_desc_pool: Handle to flow_pool + * + * Return: none + */ +void dp_tx_flow_control_deinit(struct dp_soc *soc) +{ + dp_tx_desc_pool_dealloc(soc); + + qdf_spinlock_destroy(&soc->flow_pool_array_lock); +} + +/** + * dp_txrx_register_pause_cb() - Register pause callback + * @ctx: Handle to struct dp_soc + * @pause_cb: Tx pause_cb + * + * Return: none + */ +QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle, + tx_pause_callback pause_cb) +{ + struct dp_soc *soc = (struct dp_soc *)handle; + + if (!soc || !pause_cb) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("soc or pause_cb is NULL")); + return QDF_STATUS_E_INVAL; + } + soc->pause_cb = pause_cb; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id, + uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + + if (!pdev) { + dp_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV, + vdev_id, tx_ring_size); +} + +void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id, + uint8_t vdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle); + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) { + dp_err("pdev is NULL"); + return; + } + + return dp_tx_flow_pool_unmap_handler(pdev, vdev_id, + FLOW_TYPE_VDEV, vdev_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h new file mode 100644 index 0000000000000000000000000000000000000000..3297d5ec5cdc7af5a21abdd9b9f2ec9f2ba4051c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h @@ -0,0 +1,2560 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_TYPES_H_ +#define _DP_TYPES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef DP_MOB_DEFS +#include +#endif +#include /* WDI subscriber event list */ + +#include "hal_hw_headers.h" +#include +#include +#include "wlan_cfg.h" +#include "hal_rx.h" +#include +#include +#include "hal_rx.h" +//#include "hal_rx_flow.h" + +#define MAX_BW 7 +#define MAX_RETRIES 4 +#define MAX_RECEPTION_TYPES 4 + +#ifndef REMOVE_PKT_LOG +#include +#endif + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_tx_capture.h" +#endif + +#define REPT_MU_MIMO 1 +#define REPT_MU_OFDMA_MIMO 3 +#define DP_VO_TID 6 + /** MAX TID MAPS AVAILABLE PER PDEV */ +#define DP_MAX_TID_MAPS 16 +/** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */ +#define DSCP_TID_MAP_MAX (64 + 6) +#define DP_IP_DSCP_SHIFT 2 +#define DP_IP_DSCP_MASK 0x3f +#define DP_FC0_SUBTYPE_QOS 0x80 +#define DP_QOS_TID 0x0f +#define DP_IPV6_PRIORITY_SHIFT 20 +#define MAX_MON_LINK_DESC_BANKS 2 +#define DP_VDEV_ALL 0xff + +#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) +#define MAX_PDEV_CNT 1 +#else +#define MAX_PDEV_CNT 3 +#endif + +/* Max no. of VDEV per PSOC */ +#ifdef WLAN_PSOC_MAX_VDEVS +#define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS +#else +#define MAX_VDEV_CNT 51 +#endif + +#define MAX_TXDESC_POOLS 4 +#define MAX_RXDESC_POOLS 4 +#define MAX_REO_DEST_RINGS 4 +#define EXCEPTION_DEST_RING_ID 0 +#define MAX_TCL_DATA_RINGS 4 +#define MAX_IDLE_SCATTER_BUFS 16 +#define DP_MAX_IRQ_PER_CONTEXT 12 +#define DEFAULT_HW_PEER_ID 0xffff + +#define WBM_INT_ERROR_ALL 0 +#define WBM_INT_ERROR_REO_NULL_BUFFER 1 +#define WBM_INT_ERROR_REO_NULL_LINK_DESC 2 +#define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3 +#define WBM_INT_ERROR_REO_BUFF_REAPED 4 +#define MAX_WBM_INT_ERROR_REASONS 5 + +#define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS +/* Maximum retries for Delba per tid per peer */ +#define DP_MAX_DELBA_RETRY 3 + +#define PCP_TID_MAP_MAX 8 +#define MAX_MU_USERS 37 + +#define REO_CMD_EVENT_HIST_MAX 64 + +enum rx_pktlog_mode { + DP_RX_PKTLOG_DISABLED = 0, + DP_RX_PKTLOG_FULL, + DP_RX_PKTLOG_LITE, +}; + +struct msdu_list { + qdf_nbuf_t head; + qdf_nbuf_t tail; + uint32 sum_len; +}; + +struct dp_soc_cmn; +struct dp_pdev; +struct dp_vdev; +struct dp_tx_desc_s; +struct dp_soc; +union dp_rx_desc_list_elem_t; +struct cdp_peer_rate_stats_ctx; +struct cdp_soc_rate_stats_ctx; +struct dp_rx_fst; +struct dp_mon_filter; + +#define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \ + TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem) + +#define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \ + TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem) + +#define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \ + TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase)) + +#define DP_MUTEX_TYPE qdf_spinlock_t + +#define DP_FRAME_IS_MULTICAST(_a) (*(_a) & 0x01) +#define DP_FRAME_IS_IPV4_MULTICAST(_a) (*(_a) == 0x01) + +#define DP_FRAME_IS_IPV6_MULTICAST(_a) \ + ((_a)[0] == 0x33 && \ + (_a)[1] == 0x33) + +#define DP_FRAME_IS_BROADCAST(_a) \ + ((_a)[0] == 0xff && \ + (_a)[1] == 0xff && \ + (_a)[2] == 0xff && \ + (_a)[3] == 0xff && \ + (_a)[4] == 0xff && \ + (_a)[5] == 0xff) +#define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \ + (_llc)->llc_ssap == 0xaa && \ + (_llc)->llc_un.type_snap.control == 0x3) +#define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600) +#define DP_FRAME_FC0_TYPE_MASK 0x0c +#define DP_FRAME_FC0_TYPE_DATA 0x08 +#define DP_FRAME_IS_DATA(_frame) \ + (((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA) + +/** + * macros to convert hw mac id to sw mac id: + * mac ids used by hardware start from a value of 1 while + * those in host software start from a value of 0. Use the + * macros below to convert between mac ids used by software and + * hardware + */ +#define DP_SW2HW_MACID(id) ((id) + 1) +#define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0) + +/** + * Number of Tx Queues + * enum and macro to define how many threshold levels is used + * for the AC based flow control + */ +#ifdef QCA_AC_BASED_FLOW_CONTROL +enum dp_fl_ctrl_threshold { + DP_TH_BE_BK = 0, + DP_TH_VI, + DP_TH_VO, + DP_TH_HI, +}; + +#define FL_TH_MAX (4) +#define FL_TH_VI_PERCENTAGE (80) +#define FL_TH_VO_PERCENTAGE (60) +#define FL_TH_HI_PERCENTAGE (40) +#endif + +/** + * enum dp_intr_mode + * @DP_INTR_INTEGRATED: Line interrupts + * @DP_INTR_MSI: MSI interrupts + * @DP_INTR_POLL: Polling + */ +enum dp_intr_mode { + DP_INTR_INTEGRATED = 0, + DP_INTR_MSI, + DP_INTR_POLL, +}; + +/** + * enum dp_tx_frm_type + * @dp_tx_frm_std: Regular frame, no added header fragments + * @dp_tx_frm_tso: TSO segment, with a modified IP header added + * @dp_tx_frm_sg: SG segment + * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added + * @dp_tx_frm_me: Multicast to Unicast Converted frame + * @dp_tx_frm_raw: Raw Frame + */ +enum dp_tx_frm_type { + dp_tx_frm_std = 0, + dp_tx_frm_tso, + dp_tx_frm_sg, + dp_tx_frm_audio, + dp_tx_frm_me, + dp_tx_frm_raw, +}; + +/** + * enum dp_ast_type + * @dp_ast_type_wds: WDS peer AST type + * @dp_ast_type_static: static ast entry type + * @dp_ast_type_mec: Multicast echo ast entry type + */ +enum dp_ast_type { + dp_ast_type_wds = 0, + dp_ast_type_static, + dp_ast_type_mec, +}; + +/** + * enum dp_nss_cfg + * @dp_nss_cfg_default: No radios are offloaded + * @dp_nss_cfg_first_radio: First radio offloaded + * @dp_nss_cfg_second_radio: Second radio offloaded + * @dp_nss_cfg_dbdc: Dual radios offloaded + * @dp_nss_cfg_dbtc: Three radios offloaded + */ +enum dp_nss_cfg { + dp_nss_cfg_default = 0x0, + dp_nss_cfg_first_radio = 0x1, + dp_nss_cfg_second_radio = 0x2, + dp_nss_cfg_dbdc = 0x3, + dp_nss_cfg_dbtc = 0x7, + dp_nss_cfg_max +}; + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#define DP_CPU_RING_MAP_1 1 +#endif + +/** + * dp_cpu_ring_map_type - dp tx cpu ring map + * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded + * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded + * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded + * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded + * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded + * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring + * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val + */ +enum dp_cpu_ring_map_types { + DP_NSS_DEFAULT_MAP, + DP_NSS_FIRST_RADIO_OFFLOADED_MAP, + DP_NSS_SECOND_RADIO_OFFLOADED_MAP, + DP_NSS_DBDC_OFFLOADED_MAP, + DP_NSS_DBTC_OFFLOADED_MAP, +#ifdef WLAN_TX_PKT_CAPTURE_ENH + DP_SINGLE_TX_RING_MAP, +#endif + DP_NSS_CPU_RING_MAP_MAX +}; + +/** + * enum dp_ctxt - context type + * @DP_PDEV_TYPE: PDEV context + * @DP_RX_RING_HIST_TYPE: Datapath rx ring history + * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history + * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history + */ +enum dp_ctxt_type { + DP_PDEV_TYPE, + DP_RX_RING_HIST_TYPE, + DP_RX_ERR_RING_HIST_TYPE, + DP_RX_REINJECT_RING_HIST_TYPE, +}; + +/** + * enum dp_desc_type - source type for multiple pages allocation + * @DP_TX_DESC_TYPE: DP SW TX descriptor + * @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor + * @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc + * @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor + * @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments + * @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor + * @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status + * @DP_HW_LINK_DESC_TYPE: DP HW link descriptor + */ +enum dp_desc_type { + DP_TX_DESC_TYPE, + DP_TX_EXT_DESC_TYPE, + DP_TX_EXT_DESC_LINK_TYPE, + DP_TX_TSO_DESC_TYPE, + DP_TX_TSO_NUM_SEG_TYPE, + DP_RX_DESC_BUF_TYPE, + DP_RX_DESC_STATUS_TYPE, + DP_HW_LINK_DESC_TYPE, +}; + +/** + * struct rx_desc_pool + * @pool_size: number of RX descriptor in the pool + * @elem_size: Element size + * @desc_pages: Multi page descriptors + * @array: pointer to array of RX descriptor + * @freelist: pointer to free RX descriptor link list + * @lock: Protection for the RX descriptor pool + * @owner: owner for nbuf + * @buf_size: Buffer size + * @buf_alignment: Buffer alignment + * @desc_type: type of desc this pool serves + */ +struct rx_desc_pool { + uint32_t pool_size; +#ifdef RX_DESC_MULTI_PAGE_ALLOC + uint16_t elem_size; + struct qdf_mem_multi_page_t desc_pages; +#else + union dp_rx_desc_list_elem_t *array; +#endif + union dp_rx_desc_list_elem_t *freelist; + qdf_spinlock_t lock; + uint8_t owner; + uint16_t buf_size; + uint8_t buf_alignment; + enum dp_desc_type desc_type; +}; + +/** + * struct dp_tx_ext_desc_elem_s + * @next: next extension descriptor pointer + * @vaddr: hlos virtual address pointer + * @paddr: physical address pointer for descriptor + * @flags: mark features for extension descriptor + */ +struct dp_tx_ext_desc_elem_s { + struct dp_tx_ext_desc_elem_s *next; + void *vaddr; + qdf_dma_addr_t paddr; + uint16_t flags; +}; + +/** + * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool + * @elem_count: Number of descriptors in the pool + * @elem_size: Size of each descriptor + * @num_free: Number of free descriptors + * @msdu_ext_desc: MSDU extension descriptor + * @desc_pages: multiple page allocation information for actual descriptors + * @link_elem_size: size of the link descriptor in cacheable memory used for + * chaining the extension descriptors + * @desc_link_pages: multiple page allocation information for link descriptors + */ +struct dp_tx_ext_desc_pool_s { + uint16_t elem_count; + int elem_size; + uint16_t num_free; + struct qdf_mem_multi_page_t desc_pages; + int link_elem_size; + struct qdf_mem_multi_page_t desc_link_pages; + struct dp_tx_ext_desc_elem_s *freelist; + qdf_spinlock_t lock; + qdf_dma_mem_context(memctx); +}; + +/** + * struct dp_tx_desc_s - Tx Descriptor + * @next: Next in the chain of descriptors in freelist or in the completion list + * @nbuf: Buffer Address + * @msdu_ext_desc: MSDU extension descriptor + * @id: Descriptor ID + * @vdev: vdev over which the packet was transmitted + * @pdev: Handle to pdev + * @pool_id: Pool ID - used when releasing the descriptor + * @flags: Flags to track the state of descriptor and special frame handling + * @comp: Pool ID - used when releasing the descriptor + * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet). + * This is maintained in descriptor to allow more efficient + * processing in completion event processing code. + * This field is filled in with the htt_pkt_type enum. + * @frm_type: Frame Type - ToDo check if this is redundant + * @pkt_offset: Offset from which the actual packet data starts + * @me_buffer: Pointer to ME buffer - store this so that it can be freed on + * Tx completion of ME packet + * @pool: handle to flow_pool this descriptor belongs to. + */ +struct dp_tx_desc_s { + struct dp_tx_desc_s *next; + qdf_nbuf_t nbuf; + struct dp_tx_ext_desc_elem_s *msdu_ext_desc; + uint32_t id; + struct dp_vdev *vdev; + struct dp_pdev *pdev; + uint8_t pool_id; + uint16_t flags; + struct hal_tx_desc_comp_s comp; + uint16_t tx_encap_type; + uint8_t frm_type; + uint8_t pkt_offset; + void *me_buffer; + void *tso_desc; + void *tso_num_desc; + uint64_t timestamp; +}; + +/** + * enum flow_pool_status - flow pool status + * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors) + * and network queues are unpaused + * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors) + * and network queues are paused + * @FLOW_POOL_INVALID: pool is invalid (put descriptor) + * @FLOW_POOL_INACTIVE: pool is inactive (pool is free) + */ +enum flow_pool_status { + FLOW_POOL_ACTIVE_UNPAUSED = 0, + FLOW_POOL_ACTIVE_PAUSED = 1, + FLOW_POOL_BE_BK_PAUSED = 2, + FLOW_POOL_VI_PAUSED = 3, + FLOW_POOL_VO_PAUSED = 4, + FLOW_POOL_INVALID = 5, + FLOW_POOL_INACTIVE = 6, +}; + +/** + * struct dp_tx_tso_seg_pool_s + * @pool_size: total number of pool elements + * @num_free: free element count + * @freelist: first free element pointer + * @desc_pages: multiple page allocation information for actual descriptors + * @lock: lock for accessing the pool + */ +struct dp_tx_tso_seg_pool_s { + uint16_t pool_size; + uint16_t num_free; + struct qdf_tso_seg_elem_t *freelist; + struct qdf_mem_multi_page_t desc_pages; + qdf_spinlock_t lock; +}; + +/** + * struct dp_tx_tso_num_seg_pool_s { + * @num_seg_pool_size: total number of pool elements + * @num_free: free element count + * @freelist: first free element pointer + * @desc_pages: multiple page allocation information for actual descriptors + * @lock: lock for accessing the pool + */ + +struct dp_tx_tso_num_seg_pool_s { + uint16_t num_seg_pool_size; + uint16_t num_free; + struct qdf_tso_num_seg_elem_t *freelist; + struct qdf_mem_multi_page_t desc_pages; + /*tso mutex */ + qdf_spinlock_t lock; +}; + +/** + * struct dp_tx_desc_pool_s - Tx Descriptor pool information + * @elem_size: Size of each descriptor in the pool + * @pool_size: Total number of descriptors in the pool + * @num_free: Number of free descriptors + * @num_allocated: Number of used descriptors + * @freelist: Chain of free descriptors + * @desc_pages: multiple page allocation information for actual descriptors + * @num_invalid_bin: Deleted pool with pending Tx completions. + * @flow_pool_array_lock: Lock when operating on flow_pool_array. + * @flow_pool_array: List of allocated flow pools + * @lock- Lock for descriptor allocation/free from/to the pool + */ +struct dp_tx_desc_pool_s { + uint16_t elem_size; + uint32_t num_allocated; + struct dp_tx_desc_s *freelist; + struct qdf_mem_multi_page_t desc_pages; +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + uint16_t pool_size; + uint8_t flow_pool_id; + uint8_t num_invalid_bin; + uint16_t avail_desc; + enum flow_pool_status status; + enum htt_flow_type flow_type; +#ifdef QCA_AC_BASED_FLOW_CONTROL + uint16_t stop_th[FL_TH_MAX]; + uint16_t start_th[FL_TH_MAX]; + qdf_time_t max_pause_time[FL_TH_MAX]; + qdf_time_t latest_pause_time[FL_TH_MAX]; +#else + uint16_t stop_th; + uint16_t start_th; +#endif + uint16_t pkt_drop_no_desc; + qdf_spinlock_t flow_pool_lock; + uint8_t pool_create_cnt; + void *pool_owner_ctx; +#else + uint16_t elem_count; + uint32_t num_free; + qdf_spinlock_t lock; +#endif +}; + +/** + * struct dp_txrx_pool_stats - flow pool related statistics + * @pool_map_count: flow pool map received + * @pool_unmap_count: flow pool unmap received + * @pkt_drop_no_pool: packets dropped due to unavailablity of pool + */ +struct dp_txrx_pool_stats { + uint16_t pool_map_count; + uint16_t pool_unmap_count; + uint16_t pkt_drop_no_pool; +}; + +struct dp_srng { + hal_ring_handle_t hal_srng; + void *base_vaddr_unaligned; + qdf_dma_addr_t base_paddr_unaligned; + uint32_t alloc_size; + uint8_t cached; + int irq; + uint32_t num_entries; +#ifdef DP_MEM_PRE_ALLOC + uint8_t is_mem_prealloc; +#endif +}; + +struct dp_rx_reorder_array_elem { + qdf_nbuf_t head; + qdf_nbuf_t tail; +}; + +#define DP_RX_BA_INACTIVE 0 +#define DP_RX_BA_ACTIVE 1 +#define DP_RX_BA_IN_PROGRESS 2 +struct dp_reo_cmd_info { + uint16_t cmd; + enum hal_reo_cmd_type cmd_type; + void *data; + void (*handler)(struct dp_soc *, void *, union hal_reo_status *); + TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem; +}; + +/* Rx TID */ +struct dp_rx_tid { + /* TID */ + int tid; + + /* Num of addba requests */ + uint32_t num_of_addba_req; + + /* Num of addba responses */ + uint32_t num_of_addba_resp; + + /* Num of delba requests */ + uint32_t num_of_delba_req; + + /* Num of addba responses successful */ + uint32_t num_addba_rsp_success; + + /* Num of addba responses failed */ + uint32_t num_addba_rsp_failed; + + /* pn size */ + uint8_t pn_size; + /* REO TID queue descriptors */ + void *hw_qdesc_vaddr_unaligned; + void *hw_qdesc_vaddr_aligned; + qdf_dma_addr_t hw_qdesc_paddr_unaligned; + qdf_dma_addr_t hw_qdesc_paddr; + uint32_t hw_qdesc_alloc_size; + + /* RX ADDBA session state */ + int ba_status; + + /* RX BA window size */ + uint16_t ba_win_size; + + /* Starting sequence number in Addba request */ + uint16_t startseqnum; + + /* TODO: Check the following while adding defragmentation support */ + struct dp_rx_reorder_array_elem *array; + /* base - single rx reorder element used for non-aggr cases */ + struct dp_rx_reorder_array_elem base; + + /* only used for defrag right now */ + TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem; + + /* Store dst desc for reinjection */ + hal_ring_desc_t dst_ring_desc; + struct dp_rx_desc *head_frag_desc; + + /* rx_tid lock */ + qdf_spinlock_t tid_lock; + + /* Sequence and fragments that are being processed currently */ + uint32_t curr_seq_num; + uint32_t curr_frag_num; + + /* head PN number */ + uint64_t pn128[2]; + + uint32_t defrag_timeout_ms; + uint16_t dialogtoken; + uint16_t statuscode; + /* user defined ADDBA response status code */ + uint16_t userstatuscode; + + /* Store ppdu_id when 2k exception is received */ + uint32_t ppdu_id_2k; + + /* Delba Tx completion status */ + uint8_t delba_tx_status; + + /* Delba Tx retry count */ + uint8_t delba_tx_retry; + + /* Delba stats */ + uint32_t delba_tx_success_cnt; + uint32_t delba_tx_fail_cnt; + + /* Delba reason code for retries */ + uint8_t delba_rcode; + + /* Coex Override preserved windows size 1 based */ + uint16_t rx_ba_win_size_override; +}; + +/** + * struct dp_intr_stats - DP Interrupt Stats for an interrupt context + * @num_tx_ring_masks: interrupts with tx_ring_mask set + * @num_rx_ring_masks: interrupts with rx_ring_mask set + * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set + * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set + * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set + * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set + * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set + * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set + * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set + * @num_masks: total number of times the interrupt was received + * + * Counter for individual masks are incremented only if there are any packets + * on that ring. + */ +struct dp_intr_stats { + uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS]; + uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS]; + uint32_t num_rx_mon_ring_masks; + uint32_t num_rx_err_ring_masks; + uint32_t num_rx_wbm_rel_ring_masks; + uint32_t num_reo_status_ring_masks; + uint32_t num_rxdma2host_ring_masks; + uint32_t num_host2rxdma_ring_masks; + uint32_t num_masks; +}; + +/* per interrupt context */ +struct dp_intr { + uint8_t tx_ring_mask; /* WBM Tx completion rings (0-2) + associated with this napi context */ + uint8_t rx_ring_mask; /* Rx REO rings (0-3) associated + with this interrupt context */ + uint8_t rx_mon_ring_mask; /* Rx monitor ring mask (0-2) */ + uint8_t rx_err_ring_mask; /* REO Exception Ring */ + uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */ + uint8_t reo_status_ring_mask; /* REO command response ring */ + uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */ + uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */ + /* Host to RXDMA monitor buffer ring */ + uint8_t host2rxdma_mon_ring_mask; + struct dp_soc *soc; /* Reference to SoC structure , + to get DMA ring handles */ + qdf_lro_ctx_t lro_ctx; + uint8_t dp_intr_id; + + /* Interrupt Stats for individual masks */ + struct dp_intr_stats intr_stats; +}; + +#define REO_DESC_FREELIST_SIZE 64 +#define REO_DESC_FREE_DEFER_MS 1000 +struct reo_desc_list_node { + qdf_list_node_t node; + unsigned long free_ts; + struct dp_rx_tid rx_tid; + bool resend_update_reo_cmd; + uint32_t pending_ext_desc_size; +}; + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +/** + * struct reo_cmd_event_record: Elements to record for each reo command + * @cmd_type: reo command type + * @cmd_return_status: reo command post status + * @timestamp: record timestamp for the reo command + */ +struct reo_cmd_event_record { + enum hal_reo_cmd_type cmd_type; + uint8_t cmd_return_status; + uint32_t timestamp; +}; + +/** + * struct reo_cmd_event_history: Account for reo cmd events + * @index: record number + * @cmd_record: list of records + */ +struct reo_cmd_event_history { + qdf_atomic_t index; + struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX]; +}; +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +/* SoC level data path statistics */ +struct dp_soc_stats { + struct { + uint32_t added; + uint32_t deleted; + uint32_t aged_out; + uint32_t map_err; + } ast; + + /* SOC level TX stats */ + struct { + /* packets dropped on tx because of no peer */ + struct cdp_pkt_info tx_invalid_peer; + /* descriptors in each tcl ring */ + uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS]; + /* Descriptors in use at soc */ + uint32_t desc_in_use; + /* tqm_release_reason == FW removed */ + uint32_t dropped_fw_removed; + /* tx completion release_src != TQM or FW */ + uint32_t invalid_release_source; + /* tx completion wbm_internal_error */ + uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS]; + /* tx completion non_wbm_internal_error */ + uint32_t non_wbm_internal_err; + /* TX Comp loop packet limit hit */ + uint32_t tx_comp_loop_pkt_limit_hit; + /* Head pointer Out of sync at the end of dp_tx_comp_handler */ + uint32_t hp_oos2; + } tx; + + /* SOC level RX stats */ + struct { + /* Rx errors */ + /* Total Packets in Rx Error ring */ + uint32_t err_ring_pkts; + /* No of Fragments */ + uint32_t rx_frags; + /* No of incomplete fragments in waitlist */ + uint32_t rx_frag_wait; + /* Fragments dropped due to errors */ + uint32_t rx_frag_err; + /* Fragments received OOR causing sequence num mismatch */ + uint32_t rx_frag_oor; + /* Fragments dropped due to len errors in skb */ + uint32_t rx_frag_err_len_error; + /* Fragments dropped due to no peer found */ + uint32_t rx_frag_err_no_peer; + /* No of reinjected packets */ + uint32_t reo_reinject; + /* Reap loop packet limit hit */ + uint32_t reap_loop_pkt_limit_hit; + /* Head pointer Out of sync at the end of dp_rx_process */ + uint32_t hp_oos2; + /* Rx ring near full */ + uint32_t near_full; + /* Break ring reaping as not all scattered msdu received */ + uint32_t msdu_scatter_wait_break; + /* Number of bar frames received */ + uint32_t bar_frame; + + struct { + /* Invalid RBM error count */ + uint32_t invalid_rbm; + /* Invalid VDEV Error count */ + uint32_t invalid_vdev; + /* Invalid PDEV error count */ + uint32_t invalid_pdev; + + /* Packets delivered to stack that no related peer */ + uint32_t pkt_delivered_no_peer; + /* Defrag peer uninit error count */ + uint32_t defrag_peer_uninit; + /* Invalid sa_idx or da_idx*/ + uint32_t invalid_sa_da_idx; + /* MSDU DONE failures */ + uint32_t msdu_done_fail; + /* Invalid PEER Error count */ + struct cdp_pkt_info rx_invalid_peer; + /* Invalid PEER ID count */ + struct cdp_pkt_info rx_invalid_peer_id; + /* Invalid packet length */ + struct cdp_pkt_info rx_invalid_pkt_len; + /* HAL ring access Fail error count */ + uint32_t hal_ring_access_fail; + /* HAL ring access full Fail error count */ + uint32_t hal_ring_access_full_fail; + /* RX DMA error count */ + uint32_t rxdma_error[HAL_RXDMA_ERR_MAX]; + /* RX REO DEST Desc Invalid Magic count */ + uint32_t rx_desc_invalid_magic; + /* REO Error count */ + uint32_t reo_error[HAL_REO_ERR_MAX]; + /* HAL REO ERR Count */ + uint32_t hal_reo_error[MAX_REO_DEST_RINGS]; + /* HAL REO DEST Duplicate count */ + uint32_t hal_reo_dest_dup; + /* HAL WBM RELEASE Duplicate count */ + uint32_t hal_wbm_rel_dup; + /* HAL RXDMA error Duplicate count */ + uint32_t hal_rxdma_err_dup; + /* REO cmd send fail/requeue count */ + uint32_t reo_cmd_send_fail; + /* REO cmd send drain count */ + uint32_t reo_cmd_send_drain; + /* RX msdu drop count due to scatter */ + uint32_t scatter_msdu; + /* RX msdu drop count due to invalid cookie */ + uint32_t invalid_cookie; + /* Count of stale cookie read in RX path */ + uint32_t stale_cookie; + /* Delba sent count due to RX 2k jump */ + uint32_t rx_2k_jump_delba_sent; + /* RX 2k jump msdu indicated to stack count */ + uint32_t rx_2k_jump_to_stack; + /* RX 2k jump msdu dropped count */ + uint32_t rx_2k_jump_drop; + /* REO OOR msdu drop count */ + uint32_t reo_err_oor_drop; + /* REO OOR msdu indicated to stack count */ + uint32_t reo_err_oor_to_stack; + /* REO OOR scattered msdu count */ + uint32_t reo_err_oor_sg_count; + /* RX msdu rejected count on delivery to vdev stack_fn*/ + uint32_t rejected; + /* Incorrect msdu count in MPDU desc info */ + uint32_t msdu_count_mismatch; + /* RX raw frame dropped count */ + uint32_t raw_frm_drop; + /* Stale link desc cookie count*/ + uint32_t invalid_link_cookie; + /* Nbuf sanity failure */ + uint32_t nbuf_sanity_fail; + /* Duplicate link desc refilled */ + uint32_t dup_refill_link_desc; + /* Non Eapol pkt drop cnt due to peer not authorized */ + uint32_t peer_unauth_rx_pkt_drop; + /* EAPOL drop count in intrabss scenario */ + uint32_t intrabss_eapol_drop; + /* Non Eapol pkt drop cnt due to peer not authorized */ + uint32_t peer_unauth_rx_pkt_drop; + /* MSDU len err count */ + uint32_t msdu_len_err; + /* Rx invalid tid count */ + uint32_t rx_invalid_tid_err; + } err; + + /* packet count per core - per ring */ + uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS]; + } rx; + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY + struct reo_cmd_event_history cmd_event_history; +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ +}; + +union dp_align_mac_addr { + uint8_t raw[QDF_MAC_ADDR_SIZE]; + struct { + uint16_t bytes_ab; + uint16_t bytes_cd; + uint16_t bytes_ef; + } align2; + struct { + uint32_t bytes_abcd; + uint16_t bytes_ef; + } align4; + struct __attribute__((__packed__)) { + uint16_t bytes_ab; + uint32_t bytes_cdef; + } align4_2; +}; + +/** + * struct dp_ast_free_cb_params - HMWDS free callback cookie + * @mac_addr: ast mac address + * @peer_mac_addr: mac address of peer + * @type: ast entry type + * @vdev_id: vdev_id + * @flags: ast flags + */ +struct dp_ast_free_cb_params { + union dp_align_mac_addr mac_addr; + union dp_align_mac_addr peer_mac_addr; + enum cdp_txrx_ast_entry_type type; + uint8_t vdev_id; + uint32_t flags; +}; + +/* + * dp_ast_entry + * + * @ast_idx: Hardware AST Index + * @mac_addr: MAC Address for this AST entry + * @peer: Next Hop peer (for non-WDS nodes, this will be point to + * associated peer with this MAC address) + * @next_hop: Set to 1 if this is for a WDS node + * @is_active: flag to indicate active data traffic on this node + * (used for aging out/expiry) + * @ase_list_elem: node in peer AST list + * @is_bss: flag to indicate if entry corresponds to bss peer + * @is_mapped: flag to indicate that we have mapped the AST entry + * in ast_table + * @pdev_id: pdev ID + * @vdev_id: vdev ID + * @ast_hash_value: hast value in HW + * @ref_cnt: reference count + * @type: flag to indicate type of the entry(static/WDS/MEC) + * @delete_in_progress: Flag to indicate that delete commands send to FW + * and host is waiting for response from FW + * @callback: ast free/unmap callback + * @cookie: argument to callback + * @hash_list_elem: node in soc AST hash list (mac address used as hash) + */ +struct dp_ast_entry { + uint16_t ast_idx; + union dp_align_mac_addr mac_addr; + struct dp_peer *peer; + bool next_hop; + bool is_active; + bool is_mapped; + uint8_t pdev_id; + uint16_t ast_hash_value; + qdf_atomic_t ref_cnt; + enum cdp_txrx_ast_entry_type type; + bool delete_in_progress; + txrx_ast_free_cb callback; + void *cookie; + TAILQ_ENTRY(dp_ast_entry) ase_list_elem; + TAILQ_ENTRY(dp_ast_entry) hash_list_elem; +}; + +/* SOC level htt stats */ +struct htt_t2h_stats { + /* lock to protect htt_stats_msg update */ + qdf_spinlock_t lock; + + /* work queue to process htt stats */ + qdf_work_t work; + + /* T2H Ext stats message queue */ + qdf_nbuf_queue_t msg; + + /* number of completed stats in htt_stats_msg */ + uint32_t num_stats; +}; + +/* + * The logic for get current index of these history is dependent on this + * value being power of 2. + */ +#define DP_RX_HIST_MAX 2048 +#define DP_RX_ERR_HIST_MAX 4096 +#define DP_RX_REINJECT_HIST_MAX 1024 + +QDF_COMPILE_TIME_ASSERT(rx_history_size, + (DP_RX_HIST_MAX & + (DP_RX_HIST_MAX - 1)) == 0); +QDF_COMPILE_TIME_ASSERT(rx_err_history_size, + (DP_RX_ERR_HIST_MAX & + (DP_RX_ERR_HIST_MAX - 1)) == 0); +QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size, + (DP_RX_REINJECT_HIST_MAX & + (DP_RX_REINJECT_HIST_MAX - 1)) == 0); + +/** + * struct dp_buf_info_record - ring buffer info + * @hbi: HW ring buffer info + * @timestamp: timestamp when this entry was recorded + */ +struct dp_buf_info_record { + struct hal_buf_info hbi; + uint64_t timestamp; +}; + +/* struct dp_rx_history - rx ring hisotry + * @index: Index where the last entry is written + * @entry: history entries + */ +struct dp_rx_history { + qdf_atomic_t index; + struct dp_buf_info_record entry[DP_RX_HIST_MAX]; +}; + +/* struct dp_rx_err_history - rx err ring hisotry + * @index: Index where the last entry is written + * @entry: history entries + */ +struct dp_rx_err_history { + qdf_atomic_t index; + struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX]; +}; + +/* struct dp_rx_reinject_history - rx reinject ring hisotry + * @index: Index where the last entry is written + * @entry: history entries + */ +struct dp_rx_reinject_history { + qdf_atomic_t index; + struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX]; +}; + +/* structure to record recent operation related variable */ +struct dp_last_op_info { + /* last link desc buf info through WBM release ring */ + struct hal_buf_info wbm_rel_link_desc; + /* last link desc buf info through REO reinject ring */ + struct hal_buf_info reo_reinject_link_desc; +}; + +/* SOC level structure for data path */ +struct dp_soc { + /** + * re-use memory section starts + */ + + /* Common base structure - Should be the first member */ + struct cdp_soc_t cdp_soc; + + /* SoC Obj */ + struct cdp_ctrl_objmgr_psoc *ctrl_psoc; + + /* OS device abstraction */ + qdf_device_t osdev; + + /* WLAN config context */ + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx; + + /* HTT handle for host-fw interaction */ + struct htt_soc *htt_handle; + + /* Commint init done */ + qdf_atomic_t cmn_init_done; + + /* Opaque hif handle */ + struct hif_opaque_softc *hif_handle; + + /* PDEVs on this SOC */ + struct dp_pdev *pdev_list[MAX_PDEV_CNT]; + + /* Ring used to replenish rx buffers (maybe to the firmware of MAC) */ + struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT]; + + struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW]; + + /* RXDMA error destination ring */ + struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW]; + + /* Link descriptor memory banks */ + struct { + void *base_vaddr_unaligned; + void *base_vaddr; + qdf_dma_addr_t base_paddr_unaligned; + qdf_dma_addr_t base_paddr; + uint32_t size; + } mon_link_desc_banks[MAX_NUM_LMAC_HW][MAX_MON_LINK_DESC_BANKS]; + + /* RXDMA monitor buffer replenish ring */ + struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW]; + + /* RXDMA monitor destination ring */ + struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW]; + + /* RXDMA monitor status ring. TBD: Check format of this ring */ + struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW]; + + /* Number of PDEVs */ + uint8_t pdev_count; + + /*cce disable*/ + bool cce_disable; + + /*ast override support in HW*/ + bool ast_override_support; + + /*number of hw dscp tid map*/ + uint8_t num_hw_dscp_tid_map; + + /* HAL SOC handle */ + hal_soc_handle_t hal_soc; + + /* Device ID coming from Bus sub-system */ + uint32_t device_id; + + /* Link descriptor pages */ + struct qdf_mem_multi_page_t link_desc_pages; + + /* Link descriptor Idle list for HW internal use (SRNG mode) */ + struct dp_srng wbm_idle_link_ring; + + /* Link descriptor Idle list for HW internal use (scatter buffer mode) + */ + qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS]; + void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS]; + + /* Tx SW descriptor pool */ + struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS]; + + /* Tx MSDU Extension descriptor pool */ + struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS]; + + /* Tx TSO descriptor pool */ + struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS]; + + /* Tx TSO Num of segments pool */ + struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS]; + + /* REO destination rings */ + struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS]; + + /* REO exception ring - See if should combine this with reo_dest_ring */ + struct dp_srng reo_exception_ring; + + /* REO reinjection ring */ + struct dp_srng reo_reinject_ring; + + /* REO command ring */ + struct dp_srng reo_cmd_ring; + + /* REO command status ring */ + struct dp_srng reo_status_ring; + + /* WBM Rx release ring */ + struct dp_srng rx_rel_ring; + + /* TCL data ring */ + struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS]; + + /* Number of TCL data rings */ + uint8_t num_tcl_data_rings; + + /* TCL command ring */ + struct dp_srng tcl_cmd_ring; + + /* TCL command status ring */ + struct dp_srng tcl_status_ring; + + /* WBM Tx completion rings */ + struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS]; + + /* Common WBM link descriptor release ring (SW to WBM) */ + struct dp_srng wbm_desc_rel_ring; + + /* DP Interrupts */ + struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS]; + + /* Rx SW descriptor pool for RXDMA monitor buffer */ + struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS]; + + /* Rx SW descriptor pool for RXDMA status buffer */ + struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS]; + + /* Rx SW descriptor pool for RXDMA buffer */ + struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS]; + + /* Number of REO destination rings */ + uint8_t num_reo_dest_rings; + + struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS]; + struct dp_rx_err_history *rx_err_ring_history; + struct dp_rx_reinject_history *rx_reinject_ring_history; + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + /* lock to control access to soc TX descriptors */ + qdf_spinlock_t flow_pool_array_lock; + + /* pause callback to pause TX queues as per flow control */ + tx_pause_callback pause_cb; + + /* flow pool related statistics */ + struct dp_txrx_pool_stats pool_stats; +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ + + /* + * Re-use memory section ends. reuse memory indicator. + * Everything above this variable "dp_soc_reinit" is retained across + * WiFi up/down for AP use-cases. + * Everything below this variable "dp_soc_reinit" is reset during + * dp_soc_deinit. + */ + bool dp_soc_reinit; + + uint32_t wbm_idle_scatter_buf_size; + + /* VDEVs on this SOC */ + struct dp_vdev *vdev_id_map[MAX_VDEV_CNT]; + + /* Tx H/W queues lock */ + qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES]; + + /* Tx ring map for interrupt processing */ + uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS]; + + /* Rx ring map for interrupt processing */ + uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS]; + + /* peer ID to peer object map (array of pointers to peer objects) */ + struct dp_peer **peer_id_to_obj_map; + + struct { + unsigned mask; + unsigned idx_bits; + TAILQ_HEAD(, dp_peer) * bins; + } peer_hash; + + /* rx defrag state – TBD: do we need this per radio? */ + struct { + struct { + TAILQ_HEAD(, dp_rx_tid) waitlist; + uint32_t timeout_ms; + uint32_t next_flush_ms; + qdf_spinlock_t defrag_lock; + } defrag; + struct { + int defrag_timeout_check; + int dup_check; + } flags; + TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list; + qdf_spinlock_t reo_cmd_lock; + } rx; + + /* optional rx processing function */ + void (*rx_opt_proc)( + struct dp_vdev *vdev, + struct dp_peer *peer, + unsigned tid, + qdf_nbuf_t msdu_list); + + /* pool addr for mcast enhance buff */ + struct { + int size; + uint32_t paddr; + uint32_t *vaddr; + struct dp_tx_me_buf_t *freelist; + int buf_in_use; + qdf_dma_mem_context(memctx); + } me_buf; + + /** + * peer ref mutex: + * 1. Protect peer object lookups until the returned peer object's + * reference count is incremented. + * 2. Provide mutex when accessing peer object lookup structures. + */ + DP_MUTEX_TYPE peer_ref_mutex; + + /* maximum value for peer_id */ + uint32_t max_peers; + + /* SoC level data path statistics */ + struct dp_soc_stats stats; + + /* Enable processing of Tx completion status words */ + bool process_tx_status; + bool process_rx_status; + struct dp_ast_entry **ast_table; + struct { + unsigned mask; + unsigned idx_bits; + TAILQ_HEAD(, dp_ast_entry) * bins; + } ast_hash; + + qdf_spinlock_t ast_lock; + /*Timer for AST entry ageout maintainance */ + qdf_timer_t ast_aging_timer; + + /*Timer counter for WDS AST entry ageout*/ + uint8_t wds_ast_aging_timer_cnt; + + /*interrupt timer*/ + qdf_timer_t mon_reap_timer; + uint8_t reap_timer_init; + qdf_timer_t lmac_reap_timer; + uint8_t lmac_timer_init; + qdf_timer_t int_timer; + uint8_t intr_mode; + uint8_t lmac_polled_mode; + + qdf_list_t reo_desc_freelist; + qdf_spinlock_t reo_desc_freelist_lock; + + /* htt stats */ + struct htt_t2h_stats htt_stats; + + void *external_txrx_handle; /* External data path handle */ +#ifdef IPA_OFFLOAD + /* IPA uC datapath offload Wlan Tx resources */ + struct { + /* Resource info to be passed to IPA */ + qdf_dma_addr_t ipa_tcl_ring_base_paddr; + void *ipa_tcl_ring_base_vaddr; + uint32_t ipa_tcl_ring_size; + qdf_dma_addr_t ipa_tcl_hp_paddr; + uint32_t alloc_tx_buf_cnt; + + qdf_dma_addr_t ipa_wbm_ring_base_paddr; + void *ipa_wbm_ring_base_vaddr; + uint32_t ipa_wbm_ring_size; + qdf_dma_addr_t ipa_wbm_tp_paddr; + /* WBM2SW HP shadow paddr */ + qdf_dma_addr_t ipa_wbm_hp_shadow_paddr; + + /* TX buffers populated into the WBM ring */ + void **tx_buf_pool_vaddr_unaligned; + qdf_dma_addr_t *tx_buf_pool_paddr_unaligned; + } ipa_uc_tx_rsc; + + /* IPA uC datapath offload Wlan Rx resources */ + struct { + /* Resource info to be passed to IPA */ + qdf_dma_addr_t ipa_reo_ring_base_paddr; + void *ipa_reo_ring_base_vaddr; + uint32_t ipa_reo_ring_size; + qdf_dma_addr_t ipa_reo_tp_paddr; + + /* Resource info to be passed to firmware and IPA */ + qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr; + void *ipa_rx_refill_buf_ring_base_vaddr; + uint32_t ipa_rx_refill_buf_ring_size; + qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr; + } ipa_uc_rx_rsc; + + qdf_atomic_t ipa_pipes_enabled; + bool ipa_first_tx_db_access; +#endif + +#ifdef WLAN_FEATURE_STATS_EXT + struct { + uint32_t rx_mpdu_received; + uint32_t rx_mpdu_missed; + } ext_stats; + qdf_event_t rx_hw_stats_event; + qdf_spinlock_t rx_hw_stats_lock; + bool is_last_stats_ctx_init; +#endif /* WLAN_FEATURE_STATS_EXT */ + + /* Smart monitor capability for HKv2 */ + uint8_t hw_nac_monitor_support; + /* Flag to indicate if HTT v2 is enabled*/ + bool is_peer_map_unmap_v2; + /* Per peer per Tid ba window size support */ + uint8_t per_tid_basize_max_tid; + /* Soc level flag to enable da_war */ + uint8_t da_war_enabled; + /* number of active ast entries */ + uint32_t num_ast_entries; + /* rdk rate statistics context at soc level*/ + struct cdp_soc_rate_stats_ctx *rate_stats_ctx; + /* rdk rate statistics control flag */ + bool wlanstats_enabled; + + /* 8021p PCP-TID map values */ + uint8_t pcp_tid_map[PCP_TID_MAP_MAX]; + /* TID map priority value */ + uint8_t tidmap_prty; + /* Pointer to global per ring type specific configuration table */ + struct wlan_srng_cfg *wlan_srng_cfg; + /* Num Tx outstanding on device */ + qdf_atomic_t num_tx_outstanding; + /* Num Tx allowed */ + uint32_t num_tx_allowed; + + /** + * Flag to indicate whether WAR to address single cache entry + * invalidation bug is enabled or not + */ + bool is_rx_fse_full_cache_invalidate_war_enabled; +#if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) + /** + * Pointer to DP RX Flow FST at SOC level if + * is_rx_flow_search_table_per_pdev is false + * TBD: rx_fst[num_macs] if we decide to have per mac FST + */ + struct dp_rx_fst *rx_fst; +#ifdef WLAN_SUPPORT_RX_FISA + uint8_t fisa_enable; + + /** + * Params used for controlling the fisa aggregation dynamically + */ + struct { + qdf_atomic_t skip_fisa; + uint8_t fisa_force_flush[MAX_REO_DEST_RINGS]; + } skip_fisa_param; +#endif +#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */ + /* Save recent operation related variable */ + struct dp_last_op_info last_op_info; +#ifdef FEATURE_RUNTIME_PM + /* Dp runtime refcount */ + qdf_atomic_t dp_runtime_refcount; +#endif +}; + +#ifdef IPA_OFFLOAD +/** + * dp_ipa_resources - Resources needed for IPA + */ +struct dp_ipa_resources { + qdf_shared_mem_t tx_ring; + uint32_t tx_num_alloc_buffer; + + qdf_shared_mem_t tx_comp_ring; + qdf_shared_mem_t rx_rdy_ring; + qdf_shared_mem_t rx_refill_ring; + + /* IPA UC doorbell registers paddr */ + qdf_dma_addr_t tx_comp_doorbell_paddr; + uint32_t *tx_comp_doorbell_vaddr; + qdf_dma_addr_t rx_ready_doorbell_paddr; +}; +#endif + +#define MAX_RX_MAC_RINGS 2 +/* Same as NAC_MAX_CLENT */ +#define DP_NAC_MAX_CLIENT 24 + +/* + * 24 bits cookie size + * 10 bits page id 0 ~ 1023 for MCL + * 3 bits page id 0 ~ 7 for WIN + * WBM Idle List Desc size = 128, + * Num descs per page = 4096/128 = 32 for MCL + * Num descs per page = 2MB/128 = 16384 for WIN + */ +/* + * Macros to setup link descriptor cookies - for link descriptors, we just + * need first 3 bits to store bank/page ID for WIN. The + * remaining bytes will be used to set a unique ID, which will + * be useful in debugging + */ +#ifdef MAX_ALLOC_PAGE_SIZE +#define LINK_DESC_PAGE_ID_MASK 0x007FE0 +#define LINK_DESC_ID_SHIFT 5 +#define LINK_DESC_COOKIE(_desc_id, _page_id) \ + ((((_page_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_desc_id)) +#define LINK_DESC_COOKIE_PAGE_ID(_cookie) \ + (((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT) +#else +#define LINK_DESC_PAGE_ID_MASK 0x7 +#define LINK_DESC_ID_SHIFT 3 +#define LINK_DESC_COOKIE(_desc_id, _page_id) \ + ((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_page_id)) +#define LINK_DESC_COOKIE_PAGE_ID(_cookie) \ + ((_cookie) & LINK_DESC_PAGE_ID_MASK) +#endif +#define LINK_DESC_ID_START 0x8000 + +/* same as ieee80211_nac_param */ +enum dp_nac_param_cmd { + /* IEEE80211_NAC_PARAM_ADD */ + DP_NAC_PARAM_ADD = 1, + /* IEEE80211_NAC_PARAM_DEL */ + DP_NAC_PARAM_DEL, + /* IEEE80211_NAC_PARAM_LIST */ + DP_NAC_PARAM_LIST, +}; + +/** + * struct dp_neighbour_peer - neighbour peer list type for smart mesh + * @neighbour_peers_macaddr: neighbour peer's mac address + * @neighbour_peer_list_elem: neighbour peer list TAILQ element + * @ast_entry: ast_entry for neighbour peer + * @rssi: rssi value + */ +struct dp_neighbour_peer { + /* MAC address of neighbour's peer */ + union dp_align_mac_addr neighbour_peers_macaddr; + struct dp_vdev *vdev; + struct dp_ast_entry *ast_entry; + uint8_t rssi; + /* node in the list of neighbour's peer */ + TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem; +}; + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#define WLAN_TX_PKT_CAPTURE_ENH 1 +#define DP_TX_PPDU_PROC_THRESHOLD 8 +#define DP_TX_PPDU_PROC_TIMEOUT 10 +#endif + +/** + * struct ppdu_info - PPDU Status info descriptor + * @ppdu_id - Unique ppduid assigned by firmware for every tx packet + * @sched_cmdid - schedule command id, which will be same in a burst + * @max_ppdu_id - wrap around for ppdu id + * @last_tlv_cnt - Keep track for missing ppdu tlvs + * @last_user - last ppdu processed for user + * @is_ampdu - set if Ampdu aggregate + * @nbuf - ppdu descriptor payload + * @ppdu_desc - ppdu descriptor + * @ppdu_info_list_elem - linked list of ppdu tlvs + * @ppdu_info_queue_elem - Singly linked list (queue) of ppdu tlvs + * @mpdu_compltn_common_tlv - Successful MPDU counter from COMPLTN COMMON tlv + * @mpdu_ack_ba_tlv - Successful MPDU from ACK BA tlv + */ +struct ppdu_info { + uint32_t ppdu_id; + uint32_t sched_cmdid; + uint32_t max_ppdu_id; + uint16_t tlv_bitmap; + uint16_t last_tlv_cnt; + uint16_t last_user:8, + is_ampdu:1; + qdf_nbuf_t nbuf; + struct cdp_tx_completion_ppdu *ppdu_desc; +#ifdef WLAN_TX_PKT_CAPTURE_ENH + union { + TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem; + STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem; + } ulist; +#define ppdu_info_list_elem ulist.ppdu_info_dlist_elem +#define ppdu_info_queue_elem ulist.ppdu_info_slist_elem +#else + TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem; +#endif + uint16_t mpdu_compltn_common_tlv; + uint16_t mpdu_ack_ba_tlv; +}; + +/** + * struct msdu_completion_info - wbm msdu completion info + * @ppdu_id - Unique ppduid assigned by firmware for every tx packet + * @peer_id - peer_id + * @tid - tid which used during transmit + * @first_msdu - first msdu indication + * @last_msdu - last msdu indication + * @msdu_part_of_amsdu - msdu part of amsdu + * @transmit_cnt - retried count + * @status - transmit status + * @tsf - timestamp which it transmitted + */ +struct msdu_completion_info { + uint32_t ppdu_id; + uint16_t peer_id; + uint8_t tid; + uint8_t first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1; + uint8_t transmit_cnt; + uint8_t status; + uint32_t tsf; +}; + +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +struct rx_protocol_tag_map { + /* This is the user configured tag for the said protocol type */ + uint16_t tag; +}; + +#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS +struct rx_protocol_tag_stats { + uint32_t tag_ctr; +}; +#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ + +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +struct dp_pdev_tx_capture { +}; + +struct dp_peer_tx_capture { +}; +#endif +#ifdef WLAN_RX_PKT_CAPTURE_ENH +/* Template data to be set for Enhanced RX Monitor packets */ +#define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a + +/** + * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern + * at end of each MSDU in monitor-lite mode + * @reserved1: reserved for future use + * @reserved2: reserved for future use + * @flow_tag: flow tag value read from skb->cb + * @protocol_tag: protocol tag value read from skb->cb + */ +struct dp_rx_mon_enh_trailer_data { + uint16_t reserved1; + uint16_t reserved2; + uint16_t flow_tag; + uint16_t protocol_tag; +}; +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ + +/* PDEV level structure for data path */ +struct dp_pdev { + /** + * Re-use Memory Section Starts + */ + + /* PDEV Id */ + int pdev_id; + + /* LMAC Id */ + int lmac_id; + + /* TXRX SOC handle */ + struct dp_soc *soc; + + /* Stuck count on monitor destination ring MPDU process */ + uint32_t mon_dest_ring_stuck_cnt; + + /* + * re-use memory section ends + * reuse memory/deinit indicator + * + * DO NOT CHANGE NAME OR MOVE THIS VARIABLE + */ + bool pdev_deinit; + + /* pdev status down or up required to handle dynamic hw + * mode switch between DBS and DBS_SBS. + * 1 = down + * 0 = up + */ + bool is_pdev_down; + + /* Second ring used to replenish rx buffers */ + struct dp_srng rx_refill_buf_ring2; + + /* Empty ring used by firmware to post rx buffers to the MAC */ + struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS]; + + /* wlan_cfg pdev ctxt*/ + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx; + + /** + * TODO: See if we need a ring map here for LMAC rings. + * 1. Monitor rings are currently planning to be processed on receiving + * PPDU end interrupts and hence wont need ring based interrupts. + * 2. Rx buffer rings will be replenished during REO destination + * processing and doesn't require regular interrupt handling - we will + * only handle low water mark interrupts which is not expected + * frequently + */ + + /* VDEV list */ + TAILQ_HEAD(, dp_vdev) vdev_list; + + /* vdev list lock */ + qdf_spinlock_t vdev_list_lock; + + /* Number of vdevs this device have */ + uint16_t vdev_count; + + /* PDEV transmit lock */ + qdf_spinlock_t tx_lock; + +#ifndef REMOVE_PKT_LOG + bool pkt_log_init; + /* Pktlog pdev */ + struct pktlog_dev_t *pl_dev; +#endif /* #ifndef REMOVE_PKT_LOG */ + + /* Monitor mode interface and status storage */ + struct dp_vdev *monitor_vdev; + + /* Monitor mode operation channel */ + int mon_chan_num; + + /* Monitor mode operation frequency */ + qdf_freq_t mon_chan_freq; + + /* monitor mode lock */ + qdf_spinlock_t mon_lock; + + /*tx_mutex for me*/ + DP_MUTEX_TYPE tx_mutex; + + /* monitor */ + bool monitor_configured; + + /* Smart Mesh */ + bool filter_neighbour_peers; + + /*flag to indicate neighbour_peers_list not empty */ + bool neighbour_peers_added; + /* smart mesh mutex */ + qdf_spinlock_t neighbour_peer_mutex; + /* Neighnour peer list */ + TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list; + /* msdu chain head & tail */ + qdf_nbuf_t invalid_peer_head_msdu; + qdf_nbuf_t invalid_peer_tail_msdu; + + /* Band steering */ + /* TBD */ + + /* PDEV level data path statistics */ + struct cdp_pdev_stats stats; + + /* Global RX decap mode for the device */ + enum htt_pkt_type rx_decap_mode; + + /* Enhanced Stats is enabled */ + bool enhanced_stats_en; + + /* advance filter mode and type*/ + uint8_t mon_filter_mode; + uint16_t fp_mgmt_filter; + uint16_t fp_ctrl_filter; + uint16_t fp_data_filter; + uint16_t mo_mgmt_filter; + uint16_t mo_ctrl_filter; + uint16_t mo_data_filter; + uint16_t md_data_filter; + + qdf_atomic_t num_tx_outstanding; + + qdf_atomic_t num_tx_exception; + + /* MCL specific local peer handle */ + struct { + uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1]; + uint8_t freelist; + qdf_spinlock_t lock; + struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS]; + } local_peer_ids; + + /* dscp_tid_map_*/ + uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX]; + + struct hal_rx_ppdu_info ppdu_info; + + /* operating channel */ + uint8_t operating_channel; + + qdf_nbuf_queue_t rx_status_q; + uint32_t mon_ppdu_status; + struct cdp_mon_status rx_mon_recv_status; + /* monitor mode status/destination ring PPDU and MPDU count */ + struct cdp_pdev_mon_stats rx_mon_stats; + /* to track duplicate link descriptor indications by HW for a WAR */ + uint64_t mon_last_linkdesc_paddr; + /* to track duplicate buffer indications by HW for a WAR */ + uint32_t mon_last_buf_cookie; + /* 128 bytes mpdu header queue per user for ppdu */ + qdf_nbuf_queue_t mpdu_q[MAX_MU_USERS]; + /* is this a mpdu header TLV and not msdu header TLV */ + bool is_mpdu_hdr[MAX_MU_USERS]; + /* per user 128 bytes msdu header list for MPDU */ + struct msdu_list msdu_list[MAX_MU_USERS]; + /* RX enhanced capture mode */ + uint8_t rx_enh_capture_mode; + /* Rx per peer enhanced capture mode */ + bool rx_enh_capture_peer; + struct dp_vdev *rx_enh_monitor_vdev; + /* RX enhanced capture trailer enable/disable flag */ + bool is_rx_enh_capture_trailer_enabled; +#ifdef WLAN_RX_PKT_CAPTURE_ENH + /* RX per MPDU/PPDU information */ + struct cdp_rx_indication_mpdu mpdu_ind; +#endif + /* pool addr for mcast enhance buff */ + struct { + int size; + uint32_t paddr; + char *vaddr; + struct dp_tx_me_buf_t *freelist; + int buf_in_use; + qdf_dma_mem_context(memctx); + } me_buf; + + bool hmmc_tid_override_en; + uint8_t hmmc_tid; + + /* Number of VAPs with mcast enhancement enabled */ + qdf_atomic_t mc_num_vap_attached; + + qdf_atomic_t stats_cmd_complete; + +#ifdef IPA_OFFLOAD + ipa_uc_op_cb_type ipa_uc_op_cb; + void *usr_ctxt; + struct dp_ipa_resources ipa_resource; +#endif + + /* TBD */ + + /* map this pdev to a particular Reo Destination ring */ + enum cdp_host_reo_dest_ring reo_dest; + + /* Packet log mode */ + uint8_t rx_pktlog_mode; + + /* WDI event handlers */ + struct wdi_event_subscribe_t **wdi_event_list; + + /* ppdu_id of last received HTT TX stats */ + uint32_t last_ppdu_id; + struct { + uint8_t last_user; + qdf_nbuf_t buf; + } tx_ppdu_info; + + bool tx_sniffer_enable; + /* mirror copy mode */ + bool mcopy_mode; + bool cfr_rcc_mode; + bool enable_reap_timer_non_pkt; + bool bpr_enable; + + /* enable time latency check for tx completion */ + bool latency_capture_enable; + + /* enable calculation of delay stats*/ + bool delay_stats_flag; + struct { + uint16_t tx_ppdu_id; + uint16_t tx_peer_id; + uint16_t rx_ppdu_id; + } m_copy_id; + + /* To check if PPDU Tx stats are enabled for Pktlog */ + bool pktlog_ppdu_stats; + + void *dp_txrx_handle; /* Advanced data path handle */ + +#ifdef ATH_SUPPORT_NAC_RSSI + bool nac_rssi_filtering; +#endif + /* list of ppdu tlvs */ + TAILQ_HEAD(, ppdu_info) ppdu_info_list; + uint32_t tlv_count; + uint32_t list_depth; + uint32_t ppdu_id; + bool first_nbuf; + struct { + qdf_nbuf_t last_nbuf; /*Ptr to mgmt last buf */ + uint8_t *mgmt_buf; /* Ptr to mgmt. payload in HTT ppdu stats */ + uint32_t mgmt_buf_len; /* Len of mgmt. payload in ppdu stats */ + uint32_t ppdu_id; + } mgmtctrl_frm_info; + + /* Current noise-floor reading for the pdev channel */ + int16_t chan_noise_floor; + + /* + * For multiradio device, this flag indicates if + * this radio is primary or secondary. + * + * For HK 1.0, this is used for WAR for the AST issue. + * HK 1.x mandates creation of only 1 AST entry with same MAC address + * across 2 radios. is_primary indicates the radio on which DP should + * install HW AST entry if there is a request to add 2 AST entries + * with same MAC address across 2 radios + */ + uint8_t is_primary; + /* Context of cal client timer */ + struct cdp_cal_client *cal_client_ctx; + struct cdp_tx_sojourn_stats sojourn_stats; + qdf_nbuf_t sojourn_buf; + + /* peer pointer for collecting invalid peer stats */ + struct dp_peer *invalid_peer; + + union dp_rx_desc_list_elem_t *free_list_head; + union dp_rx_desc_list_elem_t *free_list_tail; + /* Pdev level flag to check peer based pktlog enabled or + * disabled + */ + uint8_t dp_peer_based_pktlog; + + /* Cached peer_id from htt_peer_details_tlv */ + uint16_t fw_stats_peer_id; + + /* qdf_event for fw_peer_stats */ + qdf_event_t fw_peer_stats_event; + + /* User configured max number of tx buffers */ + uint32_t num_tx_allowed; + + /* unique cookie required for peer session */ + uint32_t next_peer_cookie; + + /* + * Run time enabled when the first protocol tag is added, + * run time disabled when the last protocol tag is deleted + */ + bool is_rx_protocol_tagging_enabled; + +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG + /* + * The protocol type is used as array index to save + * user provided tag info + */ + struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX]; + +#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS + /* + * Track msdus received from each reo ring separately to avoid + * simultaneous writes from different core + */ + struct rx_protocol_tag_stats + reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX]; + /* Track msdus received from expection ring separately */ + struct rx_protocol_tag_stats + rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX]; +#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + + /* tx packet capture enhancement */ + enum cdp_tx_enh_capture_mode tx_capture_enabled; + struct dp_pdev_tx_capture tx_capture; + /* stats counter for tx ppdu processed */ + uint64_t tx_ppdu_proc; + + uint32_t *ppdu_tlv_buf; /* Buffer to hold HTT ppdu stats TLVs*/ + + /* nbuf queue to maintain rx ppdu status buffer + * belonging to one ppdu + */ + qdf_nbuf_queue_t rx_ppdu_buf_q; +#ifdef WLAN_SUPPORT_RX_FLOW_TAG + /** + * Pointer to DP Flow FST at SOC level if + * is_rx_flow_search_table_per_pdev is true + */ + struct dp_rx_fst *rx_fst; +#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ + +#ifdef FEATURE_TSO_STATS + /* TSO Id to index into TSO packet information */ + qdf_atomic_t tso_idx; +#endif /* FEATURE_TSO_STATS */ + +#ifdef WLAN_SUPPORT_DATA_STALL + data_stall_detect_cb data_stall_detect_callback; +#endif /* WLAN_SUPPORT_DATA_STALL */ + + struct dp_mon_filter **filter; /* Monitor Filter pointer */ +}; + +struct dp_peer; + +/* VDEV structure for data path state */ +struct dp_vdev { + /* OS device abstraction */ + qdf_device_t osdev; + /* physical device that is the parent of this virtual device */ + struct dp_pdev *pdev; + + /* HW TX Checksum Enabled Flag */ + uint8_t csum_enabled; + + /* Handle to the OS shim SW's virtual device */ + ol_osif_vdev_handle osif_vdev; + + /* vdev_id - ID used to specify a particular vdev to the target */ + uint8_t vdev_id; + + /* MAC address */ + union dp_align_mac_addr mac_addr; + + /* node in the pdev's list of vdevs */ + TAILQ_ENTRY(dp_vdev) vdev_list_elem; + + /* dp_peer list */ + TAILQ_HEAD(, dp_peer) peer_list; + + /* RX call back function to flush GRO packets*/ + ol_txrx_rx_gro_flush_ind_fp osif_gro_flush; + /* default RX call back function called by dp */ + ol_txrx_rx_fp osif_rx; + /* callback to deliver rx frames to the OS */ + ol_txrx_rx_fp osif_rx_stack; + /* Callback to handle rx fisa frames */ + ol_txrx_fisa_rx_fp osif_fisa_rx; + ol_txrx_fisa_flush_fp osif_fisa_flush; + + /* call back function to flush out queued rx packets*/ + ol_txrx_rx_flush_fp osif_rx_flush; + ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap; + ol_txrx_get_key_fp osif_get_key; + ol_txrx_tx_free_ext_fp osif_tx_free_ext; + +#ifdef notyet + /* callback to check if the msdu is an WAI (WAPI) frame */ + ol_rx_check_wai_fp osif_check_wai; +#endif + + /* proxy arp function */ + ol_txrx_proxy_arp_fp osif_proxy_arp; + + /* callback to hand rx monitor 802.11 MPDU to the OS shim */ + ol_txrx_rx_mon_fp osif_rx_mon; + + ol_txrx_mcast_me_fp me_convert; + + /* completion function used by this vdev*/ + ol_txrx_completion_fp tx_comp; + + /* deferred vdev deletion state */ + struct { + /* VDEV delete pending */ + int pending; + /* + * callback and a context argument to provide a + * notification for when the vdev is deleted. + */ + ol_txrx_vdev_delete_cb callback; + void *context; + } delete; + + /* tx data delivery notification callback function */ + struct { + ol_txrx_data_tx_cb func; + void *ctxt; + } tx_non_std_data_callback; + + + /* safe mode control to bypass the encrypt and decipher process*/ + uint32_t safemode; + + /* rx filter related */ + uint32_t drop_unenc; +#ifdef notyet + privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS]; + uint32_t filters_num; +#endif + /* TDLS Link status */ + bool tdls_link_connected; + bool is_tdls_frame; + + + /* VDEV operating mode */ + enum wlan_op_mode opmode; + + /* VDEV subtype */ + enum wlan_op_subtype subtype; + + /* Tx encapsulation type for this VAP */ + enum htt_cmn_pkt_type tx_encap_type; + /* Rx Decapsulation type for this VAP */ + enum htt_cmn_pkt_type rx_decap_type; + + /* BSS peer */ + struct dp_peer *vap_bss_peer; + + /* WDS enabled */ + bool wds_enabled; + + /* MEC enabled */ + bool mec_enabled; + + /* WDS Aging timer period */ + uint32_t wds_aging_timer_val; + + /* NAWDS enabled */ + bool nawds_enabled; + + /* Default HTT meta data for this VDEV */ + /* TBD: check alignment constraints */ + uint16_t htt_tcl_metadata; + + /* Mesh mode vdev */ + uint32_t mesh_vdev; + + /* Mesh mode rx filter setting */ + uint32_t mesh_rx_filter; + + /* DSCP-TID mapping table ID */ + uint8_t dscp_tid_map_id; + + /* Multicast enhancement enabled */ + uint8_t mcast_enhancement_en; + + /* per vdev rx nbuf queue */ + qdf_nbuf_queue_t rxq; + + uint8_t tx_ring_id; + struct dp_tx_desc_pool_s *tx_desc; + struct dp_tx_ext_desc_pool_s *tx_ext_desc; + + /* VDEV Stats */ + struct cdp_vdev_stats stats; + + /* Is this a proxySTA VAP */ + bool proxysta_vdev; + /* Is isolation mode enabled */ + bool isolation_vdev; + + /* Address search flags to be configured in HAL descriptor */ + uint8_t hal_desc_addr_search_flags; +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + struct dp_tx_desc_pool_s *pool; +#endif + /* AP BRIDGE enabled */ + bool ap_bridge_enabled; + + enum cdp_sec_type sec_type; + + /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ + bool raw_mode_war; + + /* Address search type to be set in TX descriptor */ + uint8_t search_type; + + /* AST hash value for BSS peer in HW valid for STA VAP*/ + uint16_t bss_ast_hash; + + /* AST hash index for BSS peer in HW valid for STA VAP*/ + uint16_t bss_ast_idx; + + /* Capture timestamp of previous tx packet enqueued */ + uint64_t prev_tx_enq_tstamp; + + /* Capture timestamp of previous rx packet delivered */ + uint64_t prev_rx_deliver_tstamp; + + /* 8021p PCP-TID mapping table ID */ + uint8_t tidmap_tbl_id; + + /* 8021p PCP-TID map values */ + uint8_t pcp_tid_map[PCP_TID_MAP_MAX]; + + /* TIDmap priority */ + uint8_t tidmap_prty; + /* Self Peer in STA mode */ + struct dp_peer *vap_self_peer; + + bool multipass_en; +#ifdef QCA_MULTIPASS_SUPPORT + uint16_t *iv_vlan_map; + + /* dp_peer special list */ + TAILQ_HEAD(, dp_peer) mpass_peer_list; + DP_MUTEX_TYPE mpass_peer_mutex; +#endif + /* Extended data path handle */ + struct cdp_ext_vdev *vdev_dp_ext_handle; +#ifdef VDEV_PEER_PROTOCOL_COUNT + /* + * Rx-Ingress and Tx-Egress are in the lower level DP layer + * Rx-Egress and Tx-ingress are handled in osif layer for DP + * So + * Rx-Egress and Tx-ingress mask definitions are in OSIF layer + * Rx-Ingress and Tx-Egress definitions are here below + */ +#define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1 +#define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2 +#define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4 +#define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8 + bool peer_protocol_count_track; + int peer_protocol_count_dropmask; +#endif + + /* vap bss peer mac addr */ + uint8_t vap_bss_peer_mac_addr[QDF_MAC_ADDR_SIZE]; + +#ifdef WLAN_SUPPORT_RX_FISA + /** + * Params used for controlling the fisa aggregation dynamically + */ + uint8_t fisa_disallowed[MAX_REO_DEST_RINGS]; + uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS]; +#endif +}; + + +enum { + dp_sec_mcast = 0, + dp_sec_ucast +}; + +#ifdef WDS_VENDOR_EXTENSION +typedef struct { + uint8_t wds_tx_mcast_4addr:1, + wds_tx_ucast_4addr:1, + wds_rx_filter:1, /* enforce rx filter */ + wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames */ + wds_rx_mcast_4addr:1; /* when set, accept 4addr multicast frames */ + +} dp_ecm_policy; +#endif + +/* + * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets + * @cached_bufq: nbuff list to enqueue rx packets + * @bufq_lock: spinlock for nbuff list access + * @thres: maximum threshold for number of rx buff to enqueue + * @entries: number of entries + * @dropped: number of packets dropped + */ +struct dp_peer_cached_bufq { + qdf_list_t cached_bufq; + qdf_spinlock_t bufq_lock; + uint32_t thresh; + uint32_t entries; + uint32_t dropped; +}; + +/** + * enum dp_peer_ast_flowq + * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue + * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue + * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP + * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP + */ +enum dp_peer_ast_flowq { + DP_PEER_AST_FLOWQ_HI_PRIO, + DP_PEER_AST_FLOWQ_LOW_PRIO, + DP_PEER_AST_FLOWQ_UDP, + DP_PEER_AST_FLOWQ_NON_UDP, + DP_PEER_AST_FLOWQ_MAX, +}; + +/* + * struct dp_ast_flow_override_info - ast override info + * @ast_index - ast indexes in peer map message + * @ast_valid_mask - ast valid mask for each ast index + * @ast_flow_mask - ast flow mask for each ast index + * @tid_valid_low_pri_mask - per tid mask for low priority flow + * @tid_valid_hi_pri_mask - per tid mask for hi priority flow + */ +struct dp_ast_flow_override_info { + uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX]; + uint8_t ast_valid_mask; + uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX]; + uint8_t tid_valid_low_pri_mask; + uint8_t tid_valid_hi_pri_mask; +}; + +/* + * struct dp_peer_ast_params - ast parameters for a msdu flow-queue + * @ast_index - ast index populated by FW + * @is_valid - ast flow valid mask + * @valid_tid_mask - per tid mask for this ast index + * @flowQ - flow queue id associated with this ast index + */ +struct dp_peer_ast_params { + uint16_t ast_idx; + uint8_t is_valid; + uint8_t valid_tid_mask; + uint8_t flowQ; +}; + +/* Peer structure for data path state */ +struct dp_peer { + /* VDEV to which this peer is associated */ + struct dp_vdev *vdev; + + struct dp_ast_entry *self_ast_entry; + + qdf_atomic_t ref_cnt; + + /* TODO: See if multiple peer IDs are required in wifi3.0 */ + /* peer ID(s) for this peer */ + uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER]; + + union dp_align_mac_addr mac_addr; + + /* node in the vdev's list of peers */ + TAILQ_ENTRY(dp_peer) peer_list_elem; + /* node in the hash table bin's list of peers */ + TAILQ_ENTRY(dp_peer) hash_list_elem; + + /* TID structures */ + struct dp_rx_tid rx_tid[DP_MAX_TIDS]; + struct dp_peer_tx_capture tx_capture; + + + /* TBD: No transmit TID state required? */ + + struct { + enum cdp_sec_type sec_type; + u_int32_t michael_key[2]; /* relevant for TKIP */ + } security[2]; /* 0 -> multicast, 1 -> unicast */ + + /* NAWDS Flag and Bss Peer bit */ + uint8_t nawds_enabled:1, /* NAWDS flag */ + bss_peer:1, /* set for bss peer */ + wds_enabled:1, /* WDS peer */ + authorize:1, /* Set when authorized */ + nac:1, /* NAC Peer*/ + tx_cap_enabled:1, /* Peer's tx-capture is enabled */ + rx_cap_enabled:1, /* Peer's rx-capture is enabled */ + valid:1; /* valid bit */ + + /* MCL specific peer local id */ + uint16_t local_id; + enum ol_txrx_peer_state state; + qdf_spinlock_t peer_info_lock; + + /* Peer Stats */ + struct cdp_peer_stats stats; + + TAILQ_HEAD(, dp_ast_entry) ast_entry_list; + /* TBD */ + +#ifdef WDS_VENDOR_EXTENSION + dp_ecm_policy wds_ecm; +#endif + bool delete_in_progress; + + /* Active Block ack sessions */ + uint16_t active_ba_session_cnt; + + /* Current HW buffersize setting */ + uint16_t hw_buffer_size; + + /* + * Flag to check if sessions with 256 buffersize + * should be terminated. + */ + uint8_t kill_256_sessions; + qdf_atomic_t is_default_route_set; + /* Peer level flag to check peer based pktlog enabled or + * disabled + */ + uint8_t peer_based_pktlog_filter; + + /* rdk statistics context */ + struct cdp_peer_rate_stats_ctx *wlanstats_ctx; + /* average sojourn time */ + qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX]; + +#ifdef QCA_MULTIPASS_SUPPORT + /* node in the special peer list element */ + TAILQ_ENTRY(dp_peer) mpass_peer_list_elem; + /* vlan id for key */ + uint16_t vlan_id; +#endif + +#ifdef PEER_CACHE_RX_PKTS + qdf_atomic_t flush_in_progress; + struct dp_peer_cached_bufq bufq_info; +#endif +#ifdef FEATURE_PERPKT_INFO + /* delayed ba ppdu stats handling */ + struct cdp_delayed_tx_completion_ppdu_user delayed_ba_ppdu_stats; + /* delayed ba flag */ + bool last_delayed_ba; + /* delayed ba ppdu id */ + uint32_t last_delayed_ba_ppduid; +#endif +#ifdef QCA_PEER_MULTIQ_SUPPORT + struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX]; +#endif +}; + +/* + * dp_invalid_peer_msg + * @nbuf: data buffer + * @wh: 802.11 header + * @vdev_id: id of vdev + */ +struct dp_invalid_peer_msg { + qdf_nbuf_t nbuf; + struct ieee80211_frame *wh; + uint8_t vdev_id; +}; + +/* + * dp_tx_me_buf_t: ME buffer + * next: pointer to next buffer + * data: Destination Mac address + */ +struct dp_tx_me_buf_t { + /* Note: ME buf pool initialization logic expects next pointer to + * be the first element. Dont add anything before next */ + struct dp_tx_me_buf_t *next; + uint8_t data[QDF_MAC_ADDR_SIZE]; +}; + +#if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA) +struct hal_rx_fst; + +#ifdef WLAN_SUPPORT_RX_FLOW_TAG +struct dp_rx_fse { + /* HAL Rx Flow Search Entry which matches HW definition */ + void *hal_rx_fse; + /* Toeplitz hash value */ + uint32_t flow_hash; + /* Flow index, equivalent to hash value truncated to FST size */ + uint32_t flow_id; + /* Stats tracking for this flow */ + struct cdp_flow_stats stats; + /* Flag indicating whether flow is IPv4 address tuple */ + uint8_t is_ipv4_addr_entry; + /* Flag indicating whether flow is valid */ + uint8_t is_valid; +}; + +struct dp_rx_fst { + /* Software (DP) FST */ + uint8_t *base; + /* Pointer to HAL FST */ + struct hal_rx_fst *hal_rx_fst; + /* Base physical address of HAL RX HW FST */ + uint64_t hal_rx_fst_base_paddr; + /* Maximum number of flows FSE supports */ + uint16_t max_entries; + /* Num entries in flow table */ + uint16_t num_entries; + /* SKID Length */ + uint16_t max_skid_length; + /* Hash mask to obtain legitimate hash entry */ + uint32_t hash_mask; + /* Timer for bundling of flows */ + qdf_timer_t cache_invalidate_timer; + /** + * Flag which tracks whether cache update + * is needed on timer expiry + */ + qdf_atomic_t is_cache_update_pending; + /* Flag to indicate completion of FSE setup in HW/FW */ + bool fse_setup_done; +}; + +#define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse) +#elif WLAN_SUPPORT_RX_FISA + +struct dp_fisa_stats { + /* flow index invalid from RX HW TLV */ + uint32_t invalid_flow_index; +}; + +enum fisa_aggr_ret { + FISA_AGGR_DONE, + FISA_AGGR_NOT_ELIGIBLE, + FISA_FLUSH_FLOW +}; + +struct dp_fisa_rx_sw_ft { + /* HAL Rx Flow Search Entry which matches HW definition */ + void *hw_fse; + /* Toeplitz hash value */ + uint32_t flow_hash; + /* Flow index, equivalent to hash value truncated to FST size */ + uint32_t flow_id; + /* Stats tracking for this flow */ + struct cdp_flow_stats stats; + /* Flag indicating whether flow is IPv4 address tuple */ + uint8_t is_ipv4_addr_entry; + /* Flag indicating whether flow is valid */ + uint8_t is_valid; + uint8_t is_populated; + uint8_t is_flow_udp; + uint8_t is_flow_tcp; + qdf_nbuf_t head_skb; + uint16_t cumulative_l4_checksum; + uint16_t adjusted_cumulative_ip_length; + uint16_t cur_aggr; + uint16_t napi_flush_cumulative_l4_checksum; + uint16_t napi_flush_cumulative_ip_length; + qdf_nbuf_t last_skb; + uint32_t head_skb_ip_hdr_offset; + uint32_t head_skb_l4_hdr_offset; + struct cdp_rx_flow_tuple_info rx_flow_tuple_info; + uint8_t napi_id; + struct dp_vdev *vdev; + uint64_t bytes_aggregated; + uint32_t flush_count; + uint32_t aggr_count; + uint8_t do_not_aggregate; + uint16_t hal_cumultive_ip_len; + struct dp_soc *soc_hdl; + /* last aggregate count fetched from RX PKT TLV */ + uint32_t last_hal_aggr_count; + uint32_t cur_aggr_gso_size; + struct udphdr *head_skb_udp_hdr; + uint32_t reo_dest_indication; +}; + +#define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft) +#define MAX_FSE_CACHE_FL_HST 10 +/** + * struct fse_cache_flush_history - Debug history cache flush + * @timestamp: Entry update timestamp + * @flows_added: Number of flows added for this flush + * @flows_deleted: Number of flows deleted for this flush + */ +struct fse_cache_flush_history { + uint64_t timestamp; + uint32_t flows_added; + uint32_t flows_deleted; +}; + +struct dp_rx_fst { + /* Software (DP) FST */ + uint8_t *base; + /* Pointer to HAL FST */ + struct hal_rx_fst *hal_rx_fst; + /* Base physical address of HAL RX HW FST */ + uint64_t hal_rx_fst_base_paddr; + /* Maximum number of flows FSE supports */ + uint16_t max_entries; + /* Num entries in flow table */ + uint16_t num_entries; + /* SKID Length */ + uint16_t max_skid_length; + /* Hash mask to obtain legitimate hash entry */ + uint32_t hash_mask; + /* Lock for adding/deleting entries of FST */ + qdf_spinlock_t dp_rx_fst_lock; + uint32_t add_flow_count; + uint32_t del_flow_count; + uint32_t hash_collision_cnt; + struct dp_soc *soc_hdl; + qdf_atomic_t fse_cache_flush_posted; + qdf_timer_t fse_cache_flush_timer; + struct fse_cache_flush_history cache_fl_rec[MAX_FSE_CACHE_FL_HST]; + /* FISA DP stats */ + struct dp_fisa_stats stats; +}; + +#endif /* WLAN_SUPPORT_RX_FISA */ +#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */ + +#ifdef WLAN_FEATURE_STATS_EXT +/* + * dp_req_rx_hw_stats_t: RX peer HW stats query structure + * @pending_tid_query_cnt: pending tid stats count which waits for REO status + * @is_query_timeout: flag to show is stats query timeout + */ +struct dp_req_rx_hw_stats_t { + qdf_atomic_t pending_tid_stats_cnt; + bool is_query_timeout; +}; +#endif + +#endif /* _DP_TYPES_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c new file mode 100644 index 0000000000000000000000000000000000000000..697a570ac53069c4fb35ca806227151aac2a1434 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +#include "dp_internal.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ + +#ifdef WDI_EVENT_ENABLE +void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!pdev) + return NULL; + + return pdev->pl_dev; +} +/* + * dp_wdi_event_next_sub() - Return handle for Next WDI event + * @wdi_sub: WDI Event handle + * + * Return handle for next WDI event in list + * + * Return: Next WDI event to be subscribe + */ +static inline wdi_event_subscribe * +dp_wdi_event_next_sub(wdi_event_subscribe *wdi_sub) +{ + if (!wdi_sub) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid subscriber in %s", __func__); + return NULL; + } + return wdi_sub->priv.next; +} + + +/* + * dp_wdi_event_del_subs() -Delete Event subscription + * @wdi_sub: WDI Event handle + * @event_index: Event index from list + * + * This API will delete subscribed event from list + * Return: None + */ +static inline void +dp_wdi_event_del_subs(wdi_event_subscribe *wdi_sub, int event_index) +{ + /* Subscribers should take care of deletion */ +} + + +/* + * dp_wdi_event_iter_sub() - Iterate through all WDI event in the list + * and pass WDI event to callback function + * @pdev: DP pdev handle + * @event_index: Event index in list + * @wdi_event: WDI event handle + * @data: pointer to data + * @peer_id: peer id number + * @status: HTT rx status + * + * + * Return: None + */ +static inline void +dp_wdi_event_iter_sub( + struct dp_pdev *pdev, + uint32_t event_index, + wdi_event_subscribe *wdi_sub, + void *data, + uint16_t peer_id, + int status) +{ + enum WDI_EVENT event = event_index + WDI_EVENT_BASE; + + if (wdi_sub) { + do { + wdi_sub->callback(wdi_sub->context, event, data, + peer_id, status); + } while ((wdi_sub = dp_wdi_event_next_sub(wdi_sub))); + } +} + + +/* + * dp_wdi_event_handler() - Event handler for WDI event + * @event: wdi event number + * @soc: soc handle + * @data: pointer to data + * @peer_id: peer id number + * @status: HTT rx status + * @pdev_id: id of pdev + * + * It will be called to register WDI event + * + * Return: None + */ +void +dp_wdi_event_handler( + enum WDI_EVENT event, + struct dp_soc *soc, + void *data, + uint16_t peer_id, + int status, uint8_t pdev_id) +{ + uint32_t event_index; + wdi_event_subscribe *wdi_sub; + struct dp_pdev *txrx_pdev; + struct dp_soc *soc_t = (struct dp_soc *)soc; + txrx_pdev = dp_get_pdev_for_mac_id(soc_t, pdev_id); + + if (!event) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid WDI event in %s", __func__); + return; + } + if (!txrx_pdev || txrx_pdev->pdev_deinit) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid pdev in WDI event handler"); + return; + } + + /* + * There can be NULL data, so no validation for the data + * Subscribers must do the sanity based on the requirements + */ + event_index = event - WDI_EVENT_BASE; + + DP_STATS_INC(txrx_pdev, wdi_event[event_index], 1); + wdi_sub = txrx_pdev->wdi_event_list[event_index]; + + /* Find the subscriber */ + dp_wdi_event_iter_sub(txrx_pdev, event_index, wdi_sub, data, + peer_id, status); +} + + +/* + * dp_wdi_event_sub() - Subscribe WDI event + * @soc: soc handle + * @pdev_id: id of pdev + * @event_cb_sub_handle: subcribe evnet handle + * @event: Event to be subscribe + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_sub( + struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub_handle, + uint32_t event) +{ + uint32_t event_index; + wdi_event_subscribe *wdi_sub; + struct dp_pdev *txrx_pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + wdi_event_subscribe *event_cb_sub = + (wdi_event_subscribe *) event_cb_sub_handle; + + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid txrx_pdev in %s", __func__); + return -EINVAL; + } + if (!event_cb_sub) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid callback in %s", __func__); + return -EINVAL; + } + if ((!event) || (event >= WDI_EVENT_LAST) || (event < WDI_EVENT_BASE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid event in %s", __func__); + return -EINVAL; + } + dp_set_pktlog_wifi3(txrx_pdev, event, true); + event_index = event - WDI_EVENT_BASE; + wdi_sub = txrx_pdev->wdi_event_list[event_index]; + + /* + * Check if it is the first subscriber of the event + */ + if (!wdi_sub) { + wdi_sub = event_cb_sub; + wdi_sub->priv.next = NULL; + wdi_sub->priv.prev = NULL; + txrx_pdev->wdi_event_list[event_index] = wdi_sub; + return 0; + } + event_cb_sub->priv.next = wdi_sub; + event_cb_sub->priv.prev = NULL; + wdi_sub->priv.prev = event_cb_sub; + txrx_pdev->wdi_event_list[event_index] = event_cb_sub; + return 0; + +} + +/* + * dp_wdi_event_unsub() - WDI event unsubscribe + * @soc: soc handle + * @pdev_id: id of pdev + * @event_cb_sub_handle: subscribed event handle + * @event: Event to be unsubscribe + * + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_unsub( + struct cdp_soc_t *soc, uint8_t pdev_id, + wdi_event_subscribe *event_cb_sub_handle, + uint32_t event) +{ + uint32_t event_index = event - WDI_EVENT_BASE; + struct dp_pdev *txrx_pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + wdi_event_subscribe *event_cb_sub = + (wdi_event_subscribe *) event_cb_sub_handle; + + if (!txrx_pdev || !event_cb_sub) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid callback or pdev in %s", __func__); + return -EINVAL; + } + + dp_set_pktlog_wifi3(txrx_pdev, event, false); + + if (!event_cb_sub->priv.prev) { + txrx_pdev->wdi_event_list[event_index] = event_cb_sub->priv.next; + } else { + event_cb_sub->priv.prev->priv.next = event_cb_sub->priv.next; + } + if (event_cb_sub->priv.next) { + event_cb_sub->priv.next->priv.prev = event_cb_sub->priv.prev; + } + + return 0; +} + + +/* + * dp_wdi_event_attach() - Attach wdi event + * @txrx_pdev: DP pdev handle + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_attach(struct dp_pdev *txrx_pdev) +{ + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid device in %s\nWDI event attach failed", + __func__); + return -EINVAL; + } + /* Separate subscriber list for each event */ + txrx_pdev->wdi_event_list = (wdi_event_subscribe **) + qdf_mem_malloc( + sizeof(wdi_event_subscribe *) * WDI_NUM_EVENTS); + if (!txrx_pdev->wdi_event_list) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Insufficient memory for the WDI event lists"); + return -EINVAL; + } + return 0; +} + + +/* + * dp_wdi_event_detach() - Detach WDI event + * @txrx_pdev: DP pdev handle + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_detach(struct dp_pdev *txrx_pdev) +{ + int i; + wdi_event_subscribe *wdi_sub; + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid device in %s\nWDI attach failed", __func__); + return -EINVAL; + } + if (!txrx_pdev->wdi_event_list) { + return -EINVAL; + } + for (i = 0; i < WDI_NUM_EVENTS; i++) { + wdi_sub = txrx_pdev->wdi_event_list[i]; + /* Delete all the subscribers */ + dp_wdi_event_del_subs(wdi_sub, i); + } + qdf_mem_free(txrx_pdev->wdi_event_list); + return 0; +} +#endif /* CONFIG_WIN */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc.c b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc.c new file mode 100644 index 0000000000000000000000000000000000000000..fc8f0de8cc604ba2d63abbbc10134c9b74d26b34 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This implementation of init/deint functions for FTM services. + */ + +#include "wlan_ftm_svc_i.h" +#include +#include + +static inline struct wlan_lmac_if_ftm_tx_ops * +wlan_psoc_get_ftm_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.ftm_tx_ops)); +} + +static QDF_STATUS +ftm_pdev_obj_init(struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj) +{ + ftm_pdev_obj->data = qdf_mem_malloc(FTM_CMD_MAX_BUF_LENGTH); + if (!ftm_pdev_obj->data) + return QDF_STATUS_E_NOMEM; + + ftm_pdev_obj->length = 0; + + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_UNKNOWN; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list) +{ + QDF_STATUS status; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + + ftm_pdev_obj = qdf_mem_malloc(sizeof(*ftm_pdev_obj)); + + if (!ftm_pdev_obj) + return QDF_STATUS_E_NOMEM; + + ftm_pdev_obj->pdev = pdev; + status = ftm_pdev_obj_init(ftm_pdev_obj); + + if (QDF_IS_STATUS_ERROR(status)) { + ftm_err("ftm pdev obj init failed"); + qdf_mem_free(ftm_pdev_obj); + return status; + } + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_FTM, + ftm_pdev_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + ftm_err("ftm pdev obj attach failed"); + qdf_mem_free(ftm_pdev_obj); + return status; + } + + return status; +} + +static QDF_STATUS +ftm_pdev_obj_deinit(struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj) +{ + if (ftm_pdev_obj->data) { + qdf_mem_free(ftm_pdev_obj->data); + + ftm_pdev_obj->data = NULL; + ftm_pdev_obj->length = 0; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list) +{ + QDF_STATUS status; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj = + wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_FTM); + + if (!ftm_pdev_obj) { + ftm_err("invalid wifi ftm obj"); + return QDF_STATUS_E_FAULT; + } + + status = wlan_objmgr_pdev_component_obj_detach(pdev, WLAN_UMAC_COMP_FTM, + ftm_pdev_obj); + + status = ftm_pdev_obj_deinit(ftm_pdev_obj); + ftm_pdev_obj->pdev = NULL; + + qdf_mem_free(ftm_pdev_obj); + + return status; +} + +QDF_STATUS +wlan_ftm_testmode_attach(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + + ftm_tx_ops = wlan_psoc_get_ftm_txops(psoc); + + if (ftm_tx_ops->ftm_attach) + return ftm_tx_ops->ftm_attach(psoc); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_testmode_detach(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + + ftm_tx_ops = wlan_psoc_get_ftm_txops(psoc); + + if (ftm_tx_ops->ftm_detach) + return ftm_tx_ops->ftm_detach(psoc); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, uint8_t *buf, + uint32_t len, uint8_t pdev_id) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) + return QDF_STATUS_E_NOENT; + + ftm_tx_ops = wlan_psoc_get_ftm_txops(psoc); + + if (ftm_tx_ops->ftm_cmd_send) + return ftm_tx_ops->ftm_cmd_send(pdev, buf, len, pdev_id); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc_i.h b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc_i.h new file mode 100644 index 0000000000000000000000000000000000000000..bd8be913e46ea783577924e0a68d81c7f38d7737 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc_i.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _WLAN_FTM_SVC_H_ +#define _WLAN_FTM_SVC_H_ + +#include +#include +#include + +/** + * struct ftm_seg_hdr_info - the segment header for the event from FW + * @len: length of the segment header + * @msgref: message reference + * @segment_info: segment information + * @pad: padding + * + */ +struct ftm_seg_hdr_info { + uint32_t len; + uint32_t msgref; + uint32_t segment_info; + uint32_t pad; +}; + +/** + * wlan_ftm_pdev_obj_create_notification() - ftm pdev create handler + * @pdev: pdev pointer + * @arg_list: argument list + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list); + +/** + * wlan_ftm_pdev_obj_destroy_notification() - ftm pdev destroy handler + * @pdev: pdev pointer + * @arg_list: argument list + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list); + +/** + * wlan_ftm_cmd_send() - send ftm command to target_if layer + * @pdev: pdev pointer + * @buf: data buffer + * @len: event length + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, uint8_t *buf, + uint32_t len, uint8_t pdev_id); + +/** + * wlan_ftm_testmode_attach() - Attach FTM UTF handle + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_ftm_testmode_attach(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_ftm_testmode_detach() - Attach FTM UTF handle + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_ftm_testmode_detach(struct wlan_objmgr_psoc *psoc); +#endif /* _WLAN_FTM_SVC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_init_deinit_api.h b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_init_deinit_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1c1c312d4c7b2648860c04ced1dc2d260a7d03d7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_init_deinit_api.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _WLAN_FTM_UCFG_API_H_ +#define _WLAN_FTM_UCFG_API_H_ + +#include +#include +#include + +#ifdef QCA_WIFI_FTM +/** + * dispatcher_ftm_init() - FTM testmode initialization API + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_init(void); + +/** + * dispatcher_ftm_deinit() - FTM testmode deinitialization API + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_deinit(void); + +/** + * dispatcher_ftm_psoc_open() - FTM module open API + * @psoc: psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_ftm_psoc_close() - FTM module close API + * @psoc: psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_psoc_close(struct wlan_objmgr_psoc *psoc); + +#else +static inline QDF_STATUS dispatcher_ftm_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS dispatcher_ftm_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +dispatcher_ftm_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +dispatcher_ftm_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _WLAN_FTM_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..c2da3816104d62dbe542123944dcacea3031a126 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_ucfg_api.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _WLAN_FTM_UCFG_API_H_ +#define _WLAN_FTM_UCFG_API_H_ + +#include +#include +#include + +#define FTM_DEBUG 0 + +#if FTM_DEBUG +#define ftm_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_FTM, params) +#define ftm_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_FTM, params) +#define ftm_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_FTM, params) +#define ftm_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_FTM, params) +#define ftm_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_FTM, params) + +#define ftm_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_FTM, params) +#define ftm_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_FTM, params) +#define ftm_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_FTM, params) +#define ftm_nofl_notice(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_FTM, params) +#define ftm_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_FTM, params) + +#else +#define ftm_alert(params...) +#define ftm_err(params...) +#define ftm_warn(params...) +#define ftm_notice(params...) +#define ftm_debug(params...) + +#define ftm_nofl_alert(params...) +#define ftm_nofl_err(params...) +#define ftm_nofl_warn(params...) +#define ftm_nofl_notice(params...) +#define ftm_nofl_debug(params...) +#endif + +#define FTM_IOCTL_UNIFIED_UTF_CMD 0x1000 +#define FTM_IOCTL_UNIFIED_UTF_RSP 0x1001 +#define FTM_CMD_MAX_BUF_LENGTH 2048 + +/** + * enum wifi_ftm_cmd_type - the enumeration of the command source per pdev + * @WIFI_FTM_CMD_IOCTL: command from ioctl on the pdev + * @WIFI_FTM_CMD_NL80211: command from nl80211 on the pdev + * + */ +enum wifi_ftm_pdev_cmd_type { + WIFI_FTM_CMD_IOCTL = 1, + WIFI_FTM_CMD_NL80211, + + /* command should be added above */ + WIFI_FTM_CMD_UNKNOWN, +}; + +/** + * struct wifi_ftm_pdev_priv_obj - wifi ftm pdev utf event info + * @pdev: pointer to pdev + * @data: data ptr + * @current_seq: curent squence + * @expected_seq: expected sequence + * @length: length + * @offset: offset + * @cmd_type: command type from either ioctl or nl80211 + */ +struct wifi_ftm_pdev_priv_obj { + struct wlan_objmgr_pdev *pdev; + uint8_t *data; + uint8_t current_seq; + uint8_t expected_seq; + qdf_size_t length; + qdf_size_t offset; + enum wifi_ftm_pdev_cmd_type cmd_type; +}; + +/** + * wlan_ftm_testmode_cmd() - handle FTM testmode command + * @pdev: pdev pointer + * @data: data + * @len: data length + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS ucfg_wlan_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len); + +/** + * wlan_ftm_testmode_rsp() - handle FTM testmode command + * @pdev: pdev pointer + * @data: data + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS ucfg_wlan_ftm_testmode_rsp(struct wlan_objmgr_pdev *pdev, + uint8_t *data); + +/** + * wlan_ftm_process_utf_event() - process ftm UTF event + * @scn_handle: scn handle + * @event: event buffer + * @len: event length + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_process_utf_event(struct wlan_objmgr_pdev *pdev, + uint8_t *event_buf, uint32_t len); +#endif /* _WLAN_FTM_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_init_deinit.c b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_init_deinit.c new file mode 100644 index 0000000000000000000000000000000000000000..47364e07284dde2cae184265b2e9716160019a75 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_init_deinit.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This implementation of init/deint functions for FTM services. + */ + +#include +#include +#include +#include "../../core/src/wlan_ftm_svc_i.h" +#include +#include + +QDF_STATUS dispatcher_ftm_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_pdev_create_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_create_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + goto err_pdev_create; + + status = wlan_objmgr_register_pdev_destroy_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + goto err_pdev_delete; + + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_create_notification, NULL); +err_pdev_create: + return status; +} + +QDF_STATUS dispatcher_ftm_deinit(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_create_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dispatcher_ftm_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + /* calling the wmi event handler registration */ + return wlan_ftm_testmode_attach(psoc); +} + +QDF_STATUS dispatcher_ftm_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + /* calling the wmi event handler de-registration */ + return wlan_ftm_testmode_detach(psoc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..10449058558d418d568624c48f1658ffc3ac7562 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_ucfg_api.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This implementation of init/deint functions for FTM services. + */ + +#include +#include +#include "../../core/src/wlan_ftm_svc_i.h" +#include +#include + +QDF_STATUS ucfg_wlan_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len) +{ + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + uint8_t pdev_id; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + ftm_pdev_obj->length = 0; + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return wlan_ftm_cmd_send(pdev, data, len, pdev_id); +} + +QDF_STATUS +wlan_ftm_process_utf_event(struct wlan_objmgr_pdev *pdev, + uint8_t *event_buf, uint32_t len) +{ + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + uint32_t utf_datalen; + uint8_t *utf_data; + struct ftm_seg_hdr_info seghdr_info; + u_int8_t total_segments, current_seq; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + utf_data = event_buf; + seghdr_info = *(struct ftm_seg_hdr_info *)(event_buf); + ftm_pdev_obj->current_seq = (seghdr_info.segment_info & 0xF); + + current_seq = (seghdr_info.segment_info & 0xF); + total_segments = (seghdr_info.segment_info >> 4) & 0xF; + + utf_datalen = len - sizeof(seghdr_info); + + if (current_seq == 0) { + ftm_pdev_obj->expected_seq = 0; + ftm_pdev_obj->offset = 0; + } else { + if (ftm_pdev_obj->expected_seq != current_seq) { + ftm_debug("seq mismatch exp Seq %d got seq %d\n", + ftm_pdev_obj->expected_seq, current_seq); + } + } + + if ((len > FTM_CMD_MAX_BUF_LENGTH) || + (ftm_pdev_obj->offset > (FTM_CMD_MAX_BUF_LENGTH - utf_datalen))) { + ftm_err("Invalid utf data len :%d", len); + return QDF_STATUS_E_FAILURE; + } + qdf_mem_copy(&ftm_pdev_obj->data[ftm_pdev_obj->offset], + &utf_data[sizeof(seghdr_info)], utf_datalen); + + ftm_pdev_obj->offset = ftm_pdev_obj->offset + utf_datalen; + ftm_pdev_obj->expected_seq++; + + if (ftm_pdev_obj->expected_seq == total_segments) { + if (ftm_pdev_obj->offset != seghdr_info.len) { + ftm_debug("len mismatch len %zu total len %d\n", + ftm_pdev_obj->offset, seghdr_info.len); + } + + ftm_pdev_obj->length = ftm_pdev_obj->offset; + + /** + * If the repsonse is for a command from FTM daemon, + * send this repsonse data to cfg80211 + */ + if (ftm_pdev_obj->cmd_type == WIFI_FTM_CMD_NL80211) { + if (wlan_cfg80211_ftm_rx_event(pdev, ftm_pdev_obj->data, + ftm_pdev_obj->length) != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_UNKNOWN; + } + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_WIFI_FTM_IOCTL +QDF_STATUS ucfg_wlan_ftm_testmode_rsp(struct wlan_objmgr_pdev *pdev, + uint8_t *data) +{ + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + uint32_t *len; + + ftm_pdev_obj = + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + if (ftm_pdev_obj->length) { + len = (uint32_t *)data; + *len = ftm_pdev_obj->length; + qdf_mem_copy((data + 4), ftm_pdev_obj->data, + ftm_pdev_obj->length); + + ftm_pdev_obj->length = 0; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/global_lmac_if/inc/wlan_global_lmac_if_api.h b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/inc/wlan_global_lmac_if_api.h new file mode 100644 index 0000000000000000000000000000000000000000..aadfe7def89c2a041fa39164cc36e6a08d7c2ad5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/inc/wlan_global_lmac_if_api.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_GLOBAL_LMAC_IF_API_H_ +#define _WLAN_GLOBAL_LMAC_IF_API_H_ + +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_psoc_obj.h" + +/** + * wlan_global_lmac_if_open() - global lmac_if open + * @psoc: psoc context + * + * Opens up lmac_if southbound layer. This function calls OL,DA and UMAC + * modules to register respective tx and rx callbacks. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_global_lmac_if_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_global_lmac_if_rx_ops_register() - UMAC rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register umac RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_rx_ops_register + (struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * wlan_global_lmac_if_close() - Close global lmac_if + * @psoc: psoc context + * + * Deregister global lmac_if TX and RX handlers + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_close(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_global_lmac_if_set_txops_registration_cb() -tx + * registration callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_set_txops_registration_cb(WLAN_DEV_TYPE dev_type, + QDF_STATUS (*handler)(struct wlan_lmac_if_tx_ops *)); + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * wlan_lmac_if_sptrl_set_rx_ops_register_cb ()- Spectral LMAC Rx ops + * registration callback assignment + * @handler: Handler to be called for spectral LMAC rx ops registration + * + * API to assign appropriate Spectral LMAC rx ops registration callback handler + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_sptrl_set_rx_ops_register_cb(void (*handler) + (struct wlan_lmac_if_rx_ops *)); +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ +#endif /* _WLAN_LMAC_IF_API_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/global_lmac_if/src/wlan_global_lmac_if.c b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/src/wlan_global_lmac_if.c new file mode 100644 index 0000000000000000000000000000000000000000..f304973bf93d07dbe5cc4bca8c794e6f56c8fba1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/src/wlan_global_lmac_if.c @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_module.h" +#include "wlan_lmac_if_def.h" +#include "wlan_lmac_if_api.h" +#include "wlan_global_lmac_if_api.h" +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include +#endif +#include + +/* Function pointer to call DA/OL specific tx_ops registration function */ +QDF_STATUS (*wlan_global_lmac_if_tx_ops_register[MAX_DEV_TYPE]) + (struct wlan_lmac_if_tx_ops *tx_ops); + +/* + * spectral scan is built as separate .ko for WIN where + * MCL it is part of wlan.ko so the registration of +.* rx ops to global lmac if layer is different between WIN + * and MCL + */ +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * wlan_spectral_register_rx_ops() - Register spectral component RX OPS + * @rx_ops: lmac if receive ops + * + * Return: None + */ +#ifdef SPECTRAL_MODULIZED_ENABLE +/* Function pointer for spectral rx_ops registration function */ +void (*wlan_lmac_if_sptrl_rx_ops)(struct wlan_lmac_if_rx_ops *rx_ops); + +QDF_STATUS wlan_lmac_if_sptrl_set_rx_ops_register_cb(void (*handler) + (struct wlan_lmac_if_rx_ops *)) +{ + wlan_lmac_if_sptrl_rx_ops = handler; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_lmac_if_sptrl_set_rx_ops_register_cb); + +static void wlan_spectral_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + wlan_lmac_if_sptrl_rx_ops(rx_ops); +} +#else +static void wlan_spectral_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + wlan_lmac_if_sptrl_register_rx_ops(rx_ops); +} +#endif /* SPECTRAL_MODULIZED_ENABLE */ +#else +/** + * wlan_spectral_register_rx_ops() - Dummy api to register spectral RX OPS + * @rx_ops: lmac if receive ops + * + * Return: None + */ +static void wlan_spectral_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /*WLAN_CONV_SPECTRAL_ENABLE*/ + +/** + * wlan_global_lmac_if_rx_ops_register() - Global lmac_if + * rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register lmac_if RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +wlan_global_lmac_if_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + /* + * Component specific public api's to be called to register + * respective callbacks + * Ex: rx_ops->fp = function; + */ + if (!rx_ops) { + qdf_print("%s: lmac if rx ops pointer is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + /* Registeration for UMAC componets */ + wlan_lmac_if_umac_rx_ops_register(rx_ops); + + /* spectral rx_ops registration*/ + wlan_spectral_register_rx_ops(rx_ops); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_global_lmac_if_open() - global lmac_if open + * @psoc: psoc context + * + * Opens up lmac_if southbound layer. This function calls OL,DA and UMAC + * modules to register respective tx and rx callbacks. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_global_lmac_if_open(struct wlan_objmgr_psoc *psoc) +{ + WLAN_DEV_TYPE dev_type; + + dev_type = psoc->soc_nif.phy_type; + + if (dev_type == WLAN_DEV_DA || dev_type == WLAN_DEV_OL) { + wlan_global_lmac_if_tx_ops_register[dev_type] + (&psoc->soc_cb.tx_ops); + } else { + /* Control should ideally not reach here */ + qdf_print("Invalid device type"); + return QDF_STATUS_E_INVAL; + } + + /* Function call to register rx-ops handlers */ + wlan_global_lmac_if_rx_ops_register(&psoc->soc_cb.rx_ops); + + target_if_wake_lock_init(psoc); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_global_lmac_if_open); + +/** + * wlan_global_lmac_if_close() - Close global lmac_if + * @psoc: psoc context + * + * Deregister lmac_if TX and RX handlers + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_close(struct wlan_objmgr_psoc *psoc) +{ + target_if_wake_lock_deinit(psoc); + qdf_mem_zero(&psoc->soc_cb.tx_ops, sizeof(psoc->soc_cb.tx_ops)); + qdf_mem_zero(&psoc->soc_cb.rx_ops, sizeof(psoc->soc_cb.rx_ops)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_global_lmac_if_close); + +/** + * wlan_global_lmac_if_set_txops_registration_cb() - tx + * registration callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_set_txops_registration_cb(WLAN_DEV_TYPE dev_type, + QDF_STATUS (*handler)(struct wlan_lmac_if_tx_ops *)) +{ + wlan_global_lmac_if_tx_ops_register[dev_type] = handler; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_global_lmac_if_set_txops_registration_cb); diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/core/inc/wlan_gpio_api.h b/drivers/staging/qca-wifi-host-cmn/gpio/core/inc/wlan_gpio_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5989812fb2001d6a0cb935ecc568e0c541f785d3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/core/inc/wlan_gpio_api.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_gpio_api.h + * + * This header file provide API declarations required for gpio cfg + * that called by other components + */ + +#ifndef __WLAN_GPIO_CFG_API_H__ +#define __WLAN_GPIO_CFG_API_H__ + +#include + +#ifdef WLAN_FEATURE_GPIO_CFG + +/** + * wlan_gpio_init() - API to init component + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_gpio_init(void); + +/** + * wlan_gpio_deinit() - API to deinit component + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_gpio_deinit(void); + +#else +static inline +QDF_STATUS wlan_gpio_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS wlan_gpio_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_GPIO_CFG */ +#endif /*__WLAN_GPIO_CFG_API_H__*/ + diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/core/inc/wlan_gpio_priv_api.h b/drivers/staging/qca-wifi-host-cmn/gpio/core/inc/wlan_gpio_priv_api.h new file mode 100644 index 0000000000000000000000000000000000000000..4ea4ead5727a43afb85337b8aa24f4e87e509fa0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/core/inc/wlan_gpio_priv_api.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_gpio_priv_api.h + * + * This header file provide API declarations required for gpio cfg + * that called internally + */ + +#ifndef __WLAN_GPIO_CFG_PRIV_API_H__ +#define __WLAN_GPIO_CFG_PRIV_API_H__ + +#include +#include +#include + +#define gpio_debug(args ...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_GPIO, ## args) +#define gpio_err(args ...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_GPIO, ## args) + +/** + * struct gpio_psoc_priv_obj - psoc private object + * @lock: qdf spin lock + * @soc: pointer to psoc object + */ +struct gpio_psoc_priv_obj { + qdf_spinlock_t lock; + struct wlan_objmgr_psoc *soc; +}; + +/** + * gpio_get_psoc_priv_obj() - get priv object from psoc object + * @psoc: pointer to psoc object + * + * Return: pointer to gpio psoc private object + */ +static inline +struct gpio_psoc_priv_obj * +gpio_get_psoc_priv_obj(struct wlan_objmgr_psoc *psoc) +{ + struct gpio_psoc_priv_obj *obj; + + if (!psoc) + return NULL; + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_GPIO); + + return obj; +} + +/** + * wlan_psoc_get_gpio_txops() - get TX ops from the private object + * @psoc: pointer to psoc object + * + * Return: pointer to TX op callback + */ + +static inline struct wlan_lmac_if_gpio_tx_ops * +wlan_psoc_get_gpio_txops(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tx_ops *tx_ops; + + tx_ops = wlan_psoc_get_lmac_if_txops(psoc); + if (!tx_ops) { + gpio_err("tx_ops is NULL"); + return NULL; + } + + return &tx_ops->gpio_ops; +} +#endif /*__WLAN_GPIO_CFG_PRIV_API_H__*/ diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/core/src/wlan_gpio_api.c b/drivers/staging/qca-wifi-host-cmn/gpio/core/src/wlan_gpio_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8525368d67aae9ad12b1395c8bb90d144a32e391 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/core/src/wlan_gpio_api.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_gpio_api.c + */ +#include +#include +#include + +/** + * gpio_psoc_obj_created_notification() - PSOC obj create callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is created. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS +gpio_psoc_obj_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct gpio_psoc_priv_obj *gpio_obj; + + gpio_obj = qdf_mem_malloc(sizeof(*gpio_obj)); + if (!gpio_obj) + return QDF_STATUS_E_NOMEM; + + qdf_spinlock_create(&gpio_obj->lock); + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_GPIO, + gpio_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + gpio_err("obj attach with psoc failed"); + goto gpio_psoc_attach_failed; + } + + return QDF_STATUS_SUCCESS; + +gpio_psoc_attach_failed: + qdf_spinlock_destroy(&gpio_obj->lock); + qdf_mem_free(gpio_obj); + return status; +} + +/** + * gpio_psoc_obj_destroyed_notification() - obj delete callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is deleted. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS +gpio_psoc_obj_destroyed_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct gpio_psoc_priv_obj *gpio_obj; + + gpio_obj = gpio_get_psoc_priv_obj(psoc); + + if (!gpio_obj) { + gpio_err("gpio_obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_GPIO, + gpio_obj); + if (QDF_IS_STATUS_ERROR(status)) + gpio_err("gpio_obj detach failed"); + + qdf_spinlock_destroy(&gpio_obj->lock); + qdf_mem_free(gpio_obj); + + return status; +} + +QDF_STATUS wlan_gpio_init(void) +{ + QDF_STATUS status; + + /* register psoc create handler functions. */ + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_GPIO, + gpio_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + gpio_err("register create handler failed"); + return status; + } + + /* register psoc delete handler functions. */ + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_GPIO, + gpio_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + gpio_err("register destroy handler failed"); + goto fail_delete_psoc; + } + + return status; + +fail_delete_psoc: + wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_GPIO, + gpio_psoc_obj_created_notification, + NULL); + return status; +} + +QDF_STATUS wlan_gpio_deinit(void) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS, status; + + /* unregister psoc delete handler functions. */ + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_GPIO, + gpio_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + gpio_err("unregister destroy handler failed"); + ret = status; + } + + /* unregister psoc create handler functions. */ + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_GPIO, + gpio_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + gpio_err("unregister create handler failed"); + ret = status; + } + + return ret; +} diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/inc/wlan_gpio_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/inc/wlan_gpio_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..0b706f516f2c07e7cd911561991e025876eb0019 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/inc/wlan_gpio_tgt_api.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_gpio_tgt_api.h + * + * This header file provide with API declarations to interface with Southbound + */ +#ifndef __WLAN_GPIO_CFG_TGT_API_H__ +#define __WLAN_GPIO_CFG_TGT_API_H__ + +#ifdef WLAN_FEATURE_GPIO_CFG +#include +#include +struct wlan_objmgr_psoc; + +/** + * tgt_set_gpio_config_req(): API to set GPIO configuration to lmac + * @psoc: the pointer to psoc object manager + * @param: the pointer to gpio cfg info + * + * Return: status of operation + */ +QDF_STATUS +tgt_set_gpio_config_req(struct wlan_objmgr_psoc *psoc, + struct gpio_config_params *param); + +/** + * tgt_set_gpio_output_req(): API to set GPIO output info to lmac + * @psoc: the pointer to psoc object manager + * @param: the pointer to gpio output info + * + * Return: status of operation + */ + +QDF_STATUS +tgt_set_gpio_output_req(struct wlan_objmgr_psoc *psoc, + struct gpio_output_params *param); + +/** + * tgt_if_gpio_config() - API to send gpio config request + * @psoc: pointer to psoc object + * @gpio_num: gpio pin number + * @input: enable/disable the gpio pin + * @pull_type: gpio pull type + * @intr_mode: gpio interrupt mode + * @mux_config_val: gpio MUX value + * @drive: gpio drive + * @init_enable: gpio init_enable + * + * Return: status of operation + */ +QDF_STATUS tgt_gpio_config(struct wlan_objmgr_psoc *psoc, uint32_t gpio_num, + uint32_t input, uint32_t pull_type, + uint32_t intr_mode, uint32_t mux_config_val, + uint32_t drive, uint32_t init_enable); +/** + * tgt_if_gpio_output() - API to send gpio output request + * @psoc: pointer to psoc object + * @gpio_num: gpio pin number + * @set: enable/disable the gpio pin + * + * Return: status of operation + */ +QDF_STATUS tgt_gpio_output(struct wlan_objmgr_psoc *psoc, uint32_t gpio_num, + uint32_t set); +#else +static QDF_STATUS tgt_gpio_config(struct wlan_objmgr_psoc *psoc, + uint32_t gpio_num, uint32_t input, + uint32_t pull_type, uint32_t intr_mode) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tgt_gpio_output(struct wlan_objmgr_psoc *psoc, + uint32_t gpio_num, uint32_t set) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_GPIO_CFG */ +#endif /* __WLAN_GPIO_TGT_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/inc/wlan_gpio_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/inc/wlan_gpio_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..933825f8a1a64e0414e15d16d2530668fb41b4c7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/inc/wlan_gpio_ucfg_api.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_gpio_ucfg_api.h + * + * This header file maintain API declaration required for northbound interaction + */ + +#ifndef __WLAN_GPIO_CFG_UCFG_API_H__ +#define __WLAN_GPIO_CFG_UCFG_API_H__ + +#include +#include +struct wlan_objmgr_psoc; + +#ifdef WLAN_FEATURE_GPIO_CFG + +/** + * ucfg_set_gpio_config() - API to set gpio config + * @psoc: the pointer of psoc object + * @param: the pointer of gpio configuration info + * + * Return:QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS ucfg_set_gpio_config(struct wlan_objmgr_psoc *psoc, + struct gpio_config_params *param); + +/** + * ucfg_set_gpio_output() - API to set gpio output + * @psoc: the pointer of psoc object + * @param: the pointer of gpio output info + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS ucfg_set_gpio_output(struct wlan_objmgr_psoc *psoc, + struct gpio_output_params *param); +#endif /* WLAN_FEATURE_GPIO_CFG */ +#endif /* __WLAN_GPIO_CFG_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/src/wlan_gpio_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/src/wlan_gpio_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..293cfdf0bb24dcd875827ef1deee794c7ef29497 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/src/wlan_gpio_tgt_api.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC:wlan_gpio_tgt_api.c + * + * This file provide API definitions to update gpio configuration from interface + */ +#include +#include +#include +#include + +QDF_STATUS tgt_set_gpio_config_req(struct wlan_objmgr_psoc *psoc, + struct gpio_config_params *param) +{ + struct wlan_lmac_if_gpio_tx_ops *gpio_tx_ops; + + if (!psoc) { + gpio_err("NULL psoc"); + return QDF_STATUS_E_NULL_VALUE; + } + gpio_tx_ops = wlan_psoc_get_gpio_txops(psoc); + if (!gpio_tx_ops) + return QDF_STATUS_E_NULL_VALUE; + + return gpio_tx_ops->set_gpio_config(psoc, param); +} + +QDF_STATUS tgt_set_gpio_output_req(struct wlan_objmgr_psoc *psoc, + struct gpio_output_params *param) +{ + struct wlan_lmac_if_gpio_tx_ops *gpio_tx_ops; + + if (!psoc) { + gpio_err("NULL psoc"); + return QDF_STATUS_E_NULL_VALUE; + } + + gpio_tx_ops = wlan_psoc_get_gpio_txops(psoc); + if (!gpio_tx_ops) + return QDF_STATUS_E_NULL_VALUE; + + return gpio_tx_ops->set_gpio_output(psoc, param); +} + +QDF_STATUS tgt_gpio_config(struct wlan_objmgr_psoc *psoc, uint32_t gpio_num, + uint32_t input, uint32_t pull_type, + uint32_t intr_mode, uint32_t mux_config_val, + uint32_t drive, uint32_t init_enable) +{ + struct gpio_config_params param; + + if (!psoc) { + gpio_err("psoc_obj is null"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_set(¶m, sizeof(param), 0); + param.pin_pull_type = pull_type; + param.pin_num = gpio_num; + param.pin_dir = input; + param.pin_intr_mode = intr_mode; + param.mux_config_val = mux_config_val; + param.drive = drive; + param.init_enable = init_enable; + + return tgt_set_gpio_config_req(psoc, ¶m); +} + +qdf_export_symbol(tgt_gpio_config); + +static bool tgt_gpio_disabled(struct wlan_objmgr_psoc *psoc) +{ + uint32_t target_type = 0; + struct wlan_lmac_if_target_tx_ops *target_type_tx_ops; + struct wlan_lmac_if_tx_ops *tx_ops; + + tx_ops = wlan_psoc_get_lmac_if_txops(psoc); + if (!tx_ops) { + gpio_err("tx_ops is NULL"); + return false; + } + target_type_tx_ops = &tx_ops->target_tx_ops; + + if (target_type_tx_ops->tgt_get_tgt_type) + target_type = target_type_tx_ops->tgt_get_tgt_type(psoc); + + if ((target_type == TARGET_TYPE_QCA8074) || + (target_type == TARGET_TYPE_QCN6122) || + (target_type == TARGET_TYPE_QCA8074V2) || + (target_type == TARGET_TYPE_QCA5018) || + (target_type == TARGET_TYPE_QCA6018)) { + return true; + } + + return false; +} + +QDF_STATUS tgt_gpio_output(struct wlan_objmgr_psoc *psoc, uint32_t gpio_num, + uint32_t set) +{ + struct gpio_output_params param; + + if (!psoc) { + gpio_err("psoc_obj is null"); + return QDF_STATUS_E_INVAL; + } + + if (tgt_gpio_disabled(psoc)) + return QDF_STATUS_E_INVAL; + + qdf_mem_set(¶m, sizeof(param), 0); + param.pin_num = gpio_num; + param.pin_set = set; + + return tgt_set_gpio_output_req(psoc, ¶m); +} + +qdf_export_symbol(tgt_gpio_output); diff --git a/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/src/wlan_gpio_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/src/wlan_gpio_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..648658a7fbe331cff4f6d83be71d6bb611ea6890 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/gpio/dispatcher/src/wlan_gpio_ucfg_api.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains gpio north bound interface definitions + */ +#include +#include +#include "qdf_module.h" + +QDF_STATUS +ucfg_set_gpio_config(struct wlan_objmgr_psoc *psoc, + struct gpio_config_params *param) +{ + return tgt_set_gpio_config_req(psoc, param); +} +qdf_export_symbol(ucfg_set_gpio_config); + +QDF_STATUS +ucfg_set_gpio_output(struct wlan_objmgr_psoc *psoc, + struct gpio_output_params *param) +{ + return tgt_set_gpio_output_req(psoc, param); +} +qdf_export_symbol(ucfg_set_gpio_output); diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/README b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/README new file mode 100644 index 0000000000000000000000000000000000000000..e966035035f71ac2fabc623ea9bb9a7d7be55ecd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/README @@ -0,0 +1,17 @@ +The below qca6290 and qca8074 folder has c files defining functions +and structures to hold target specific definitions to handle differences between +chips with respect to hal. + +Please ensure changes get applied to all platform specific files if the fixes +are generic and applicable to all the folders. + +qca6290 + hal_6290_rx.c - rx related target specific function + hal_6290_srng_table.c - holds hw srng table and hal hw reg offsets array + hal_6290_tx.c - tx related target specific function +qca8074 + hal_8074_rx.c - rx related target specific function + hal_8074_srng_table.c - holds hw srng table and hal hw reg offsets array + hal_8074_tx.c - tx related target specific function + + diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h new file mode 100644 index 0000000000000000000000000000000000000000..649852dcac214a381a563ae4171dcf4ebc743cc5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h @@ -0,0 +1,2394 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_API_H_ +#define _HAL_API_H_ + +#include "qdf_types.h" +#include "qdf_util.h" +#include "qdf_atomic.h" +#include "hal_internal.h" +#include "hif.h" +#include "hif_io32.h" +#include "qdf_platform.h" + +#ifdef DUMP_REO_QUEUE_INFO_IN_DDR +#include "hal_hw_headers.h" +#endif + +/* Ring index for WBM2SW2 release ring */ +#define HAL_IPA_TX_COMP_RING_IDX 2 + +/* calculate the register address offset from bar0 of shadow register x */ +#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCA6750) +#define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC +#define SHADOW_REGISTER_END_ADDRESS_OFFSET \ + ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS))) +#define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x))) +#elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000) +#define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024 +#define SHADOW_REGISTER_END_ADDRESS_OFFSET \ + ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS))) +#define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x))) +#else +#define SHADOW_REGISTER(x) 0 +#endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */ + +#define MAX_UNWINDOWED_ADDRESS 0x80000 +#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6750) +#define WINDOW_ENABLE_BIT 0x40000000 +#else +#define WINDOW_ENABLE_BIT 0x80000000 +#endif +#define WINDOW_REG_ADDRESS 0x310C +#define WINDOW_SHIFT 19 +#define WINDOW_VALUE_MASK 0x3F +#define WINDOW_START MAX_UNWINDOWED_ADDRESS +#define WINDOW_RANGE_MASK 0x7FFFF +/* + * BAR + 4K is always accessible, any access outside this + * space requires force wake procedure. + * OFFSET = 4K - 32 bytes = 0xFE0 + */ +#define MAPPED_REF_OFF 0xFE0 + +/** + * hal_ring_desc - opaque handle for DP ring descriptor + */ +struct hal_ring_desc; +typedef struct hal_ring_desc *hal_ring_desc_t; + +/** + * hal_link_desc - opaque handle for DP link descriptor + */ +struct hal_link_desc; +typedef struct hal_link_desc *hal_link_desc_t; + +/** + * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor + */ +struct hal_rxdma_desc; +typedef struct hal_rxdma_desc *hal_rxdma_desc_t; + +/** + * hal_buff_addrinfo - opaque handle for DP buffer address info + */ +struct hal_buff_addrinfo; +typedef struct hal_buff_addrinfo *hal_buff_addrinfo_t; + +#ifdef ENABLE_VERBOSE_DEBUG +static inline void +hal_set_verbose_debug(bool flag) +{ + is_hal_verbose_debug_enabled = flag; +} +#endif + +#ifdef ENABLE_HAL_SOC_STATS +#define HAL_STATS_INC(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field += _delta; \ +} +#else +#define HAL_STATS_INC(_handle, _field, _delta) +#endif + +#ifdef ENABLE_HAL_REG_WR_HISTORY +#define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \ + hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val) + +void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, + uint32_t offset, + uint32_t wr_val, + uint32_t rd_val); + +static inline int hal_history_get_next_index(qdf_atomic_t *table_index, + int array_size) +{ + int record_index = qdf_atomic_inc_return(table_index); + + return record_index & (array_size - 1); +} +#else +#define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \ + hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \ + offset, \ + wr_val, \ + rd_val) +#endif + +/** + * hal_reg_write_result_check() - check register writing result + * @hal_soc: HAL soc handle + * @offset: register offset to read + * @exp_val: the expected value of register + * @ret_confirm: result confirm flag + * + * Return: none + */ +static inline void hal_reg_write_result_check(struct hal_soc *hal_soc, + uint32_t offset, + uint32_t exp_val) +{ + uint32_t value; + + value = qdf_ioread32(hal_soc->dev_base_addr + offset); + if (exp_val != value) { + HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value); + HAL_STATS_INC(hal_soc, reg_write_fail, 1); + } +} + +#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) +static inline void hal_lock_reg_access(struct hal_soc *soc, + unsigned long *flags) +{ + qdf_spin_lock_irqsave(&soc->register_access_lock); +} + +static inline void hal_unlock_reg_access(struct hal_soc *soc, + unsigned long *flags) +{ + qdf_spin_unlock_irqrestore(&soc->register_access_lock); +} +#else +static inline void hal_lock_reg_access(struct hal_soc *soc, + unsigned long *flags) +{ + pld_lock_reg_window(soc->qdf_dev->dev, flags); +} + +static inline void hal_unlock_reg_access(struct hal_soc *soc, + unsigned long *flags) +{ + pld_unlock_reg_window(soc->qdf_dev->dev, flags); +} +#endif + +#ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE +/** + * hal_select_window_confirm() - write remap window register and + check writing result + * + */ +static inline void hal_select_window_confirm(struct hal_soc *hal_soc, + uint32_t offset) +{ + uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK; + + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS, + WINDOW_ENABLE_BIT | window); + hal_soc->register_window = window; + + hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS, + WINDOW_ENABLE_BIT | window); +} +#else +static inline void hal_select_window_confirm(struct hal_soc *hal_soc, + uint32_t offset) +{ + uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK; + + if (window != hal_soc->register_window) { + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS, + WINDOW_ENABLE_BIT | window); + hal_soc->register_window = window; + + hal_reg_write_result_check( + hal_soc, + WINDOW_REG_ADDRESS, + WINDOW_ENABLE_BIT | window); + } +} +#endif + +static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return hal_soc->ops->hal_get_window_address(hal_soc, addr); +} + +/** + * hal_write32_mb() - Access registers to update configuration + * @hal_soc: hal soc handle + * @offset: offset address from the BAR + * @value: value to write + * + * Return: None + * + * Description: Register address space is split below: + * SHADOW REGION UNWINDOWED REGION WINDOWED REGION + * |--------------------|-------------------|------------------| + * BAR NO FORCE WAKE BAR+4K FORCE WAKE BAR+512K FORCE WAKE + * + * 1. Any access to the shadow region, doesn't need force wake + * and windowing logic to access. + * 2. Any access beyond BAR + 4K: + * If init_phase enabled, no force wake is needed and access + * should be based on windowed or unwindowed access. + * If init_phase disabled, force wake is needed and access + * should be based on windowed or unwindowed access. + * + * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1 + * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS + * note3: WINDOW_VALUE_MASK = big enough that trying to write past + * that window would be a bug + */ +#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \ + !defined(QCA_WIFI_QCA6750) +static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset, + uint32_t value) +{ + unsigned long flags; + qdf_iomem_t new_addr; + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + qdf_iowrite32(hal_soc->dev_base_addr + offset, value); + } else if (hal_soc->static_window_map) { + new_addr = hal_get_window_address(hal_soc, + hal_soc->dev_base_addr + offset); + qdf_iowrite32(new_addr, value); + } else { + hal_lock_reg_access(hal_soc, &flags); + hal_select_window_confirm(hal_soc, offset); + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK), value); + hal_unlock_reg_access(hal_soc, &flags); + } +} + +/** + * hal_write_address_32_mb - write a value to a register + * + */ +static inline +void hal_write_address_32_mb(struct hal_soc *hal_soc, + qdf_iomem_t addr, uint32_t value) +{ + uint32_t offset; + qdf_iomem_t new_addr; + + if (!hal_soc->use_register_windowing) + return qdf_iowrite32(addr, value); + + offset = addr - hal_soc->dev_base_addr; + if (hal_soc->static_window_map) { + new_addr = hal_get_window_address(hal_soc, addr); + return qdf_iowrite32(new_addr, value); + } + hal_write32_mb(hal_soc, offset, value); +} + +#define hal_write32_mb_confirm(_hal_soc, _offset, _value) \ + hal_write32_mb(_hal_soc, _offset, _value) +#else +static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset, + uint32_t value) +{ + int ret; + unsigned long flags; + + /* Region < BAR + 4K can be directly accessed */ + if (offset < MAPPED_REF_OFF) { + qdf_iowrite32(hal_soc->dev_base_addr + offset, value); + return; + } + + /* Region greater than BAR + 4K */ + if (!hal_soc->init_phase) { + ret = hif_force_wake_request(hal_soc->hif_handle); + if (ret) { + hal_err("Wake up request failed"); + qdf_check_state_before_panic(); + return; + } + } + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + qdf_iowrite32(hal_soc->dev_base_addr + offset, value); + } else { + hal_lock_reg_access(hal_soc, &flags); + hal_select_window_confirm(hal_soc, offset); + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK), value); + hal_unlock_reg_access(hal_soc, &flags); + } + + if (!hal_soc->init_phase) { + ret = hif_force_wake_release(hal_soc->hif_handle); + if (ret) { + hal_err("Wake up release failed"); + qdf_check_state_before_panic(); + return; + } + } +} + +/** + * hal_write32_mb_confirm() - write register and check wirting result + * + */ +static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc, + uint32_t offset, + uint32_t value) +{ + int ret; + unsigned long flags; + + /* Region < BAR + 4K can be directly accessed */ + if (offset < MAPPED_REF_OFF) { + qdf_iowrite32(hal_soc->dev_base_addr + offset, value); + return; + } + + /* Region greater than BAR + 4K */ + if (!hal_soc->init_phase) { + ret = hif_force_wake_request(hal_soc->hif_handle); + if (ret) { + hal_err("Wake up request failed"); + qdf_check_state_before_panic(); + return; + } + } + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + qdf_iowrite32(hal_soc->dev_base_addr + offset, value); + hal_reg_write_result_check(hal_soc, offset, + value); + } else { + hal_lock_reg_access(hal_soc, &flags); + hal_select_window_confirm(hal_soc, offset); + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK), value); + + hal_reg_write_result_check( + hal_soc, + WINDOW_START + (offset & WINDOW_RANGE_MASK), + value); + hal_unlock_reg_access(hal_soc, &flags); + } + + if (!hal_soc->init_phase) { + ret = hif_force_wake_release(hal_soc->hif_handle); + if (ret) { + hal_err("Wake up release failed"); + qdf_check_state_before_panic(); + return; + } + } +} + +/** + * hal_write_address_32_mb - write a value to a register + * + */ +static inline +void hal_write_address_32_mb(struct hal_soc *hal_soc, + qdf_iomem_t addr, uint32_t value, bool wr_confirm) +{ + uint32_t offset; + + if (!hal_soc->use_register_windowing) + return qdf_iowrite32(addr, value); + + offset = addr - hal_soc->dev_base_addr; + + if (qdf_unlikely(wr_confirm)) + hal_write32_mb_confirm(hal_soc, offset, value); + else + hal_write32_mb(hal_soc, offset, value); +} +#endif + + +#ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS +static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc, + struct hal_srng *srng, + void __iomem *addr, + uint32_t value) +{ + qdf_iowrite32(addr, value); +} +#elif defined(FEATURE_HAL_DELAYED_REG_WRITE) +static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc, + struct hal_srng *srng, + void __iomem *addr, + uint32_t value) +{ + hal_delayed_reg_write(hal_soc, srng, addr, value); +} +#else +static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc, + struct hal_srng *srng, + void __iomem *addr, + uint32_t value) +{ + hal_write_address_32_mb(hal_soc, addr, value, false); +} +#endif + +#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \ + !defined(QCA_WIFI_QCA6750) +/** + * hal_read32_mb() - Access registers to read configuration + * @hal_soc: hal soc handle + * @offset: offset address from the BAR + * @value: value to write + * + * Description: Register address space is split below: + * SHADOW REGION UNWINDOWED REGION WINDOWED REGION + * |--------------------|-------------------|------------------| + * BAR NO FORCE WAKE BAR+4K FORCE WAKE BAR+512K FORCE WAKE + * + * 1. Any access to the shadow region, doesn't need force wake + * and windowing logic to access. + * 2. Any access beyond BAR + 4K: + * If init_phase enabled, no force wake is needed and access + * should be based on windowed or unwindowed access. + * If init_phase disabled, force wake is needed and access + * should be based on windowed or unwindowed access. + * + * Return: < 0 for failure/>= 0 for success + */ +static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset) +{ + uint32_t ret; + unsigned long flags; + qdf_iomem_t new_addr; + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + return qdf_ioread32(hal_soc->dev_base_addr + offset); + } else if (hal_soc->static_window_map) { + new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset); + return qdf_ioread32(new_addr); + } + + hal_lock_reg_access(hal_soc, &flags); + hal_select_window_confirm(hal_soc, offset); + ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK)); + hal_unlock_reg_access(hal_soc, &flags); + + return ret; +} +#else +static +uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset) +{ + uint32_t ret; + unsigned long flags; + + /* Region < BAR + 4K can be directly accessed */ + if (offset < MAPPED_REF_OFF) + return qdf_ioread32(hal_soc->dev_base_addr + offset); + + if ((!hal_soc->init_phase) && + hif_force_wake_request(hal_soc->hif_handle)) { + hal_err("Wake up request failed"); + qdf_check_state_before_panic(); + return 0; + } + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + ret = qdf_ioread32(hal_soc->dev_base_addr + offset); + } else { + hal_lock_reg_access(hal_soc, &flags); + hal_select_window_confirm(hal_soc, offset); + ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK)); + hal_unlock_reg_access(hal_soc, &flags); + } + + if ((!hal_soc->init_phase) && + hif_force_wake_release(hal_soc->hif_handle)) { + hal_err("Wake up release failed"); + qdf_check_state_before_panic(); + return 0; + } + + return ret; +} +#endif + +#ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE +/* To check shadow config index range between 0..31 */ +#define HAL_SHADOW_REG_INDEX_LOW 32 +/* To check shadow config index range between 32..39 */ +#define HAL_SHADOW_REG_INDEX_HIGH 40 +/* Dirty bit reg offsets corresponding to shadow config index */ +#define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8 +#define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4 +/* PCIE_PCIE_TOP base addr offset */ +#define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000 +/* Max retry attempts to read the dirty bit reg */ +#ifdef HAL_CONFIG_SLUB_DEBUG_ON +#define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000 +#else +#define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000 +#endif +/* Delay in usecs for polling dirty bit reg */ +#define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5 + +/** + * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm + * write was successful + * @hal_soc: hal soc handle + * @shadow_config_index: index of shadow reg used to confirm + * write + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal, + int shadow_config_index) +{ + uint32_t read_value = 0; + int retry_cnt = 0; + uint32_t reg_offset = 0; + + if (shadow_config_index > 0 && + shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) { + reg_offset = + HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET; + } else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW && + shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) { + reg_offset = + HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET; + } else { + hal_err("Invalid shadow_config_index = %d", + shadow_config_index); + return QDF_STATUS_E_INVAL; + } + while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) { + read_value = hal_read32_mb( + hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset); + /* Check if dirty bit corresponding to shadow_index is set */ + if (read_value & BIT(shadow_config_index)) { + /* Dirty reg bit not reset */ + qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY); + retry_cnt++; + } else { + hal_debug("Shadow write: offset 0x%x read val 0x%x", + reg_offset, read_value); + return QDF_STATUS_SUCCESS; + } + } + return QDF_STATUS_E_TIMEOUT; +} + +/** + * hal_write32_mb_shadow_confirm() - write to shadow reg and + * poll dirty register bit to confirm write + * @hal_soc: hal soc handle + * @reg_offset: target reg offset address from BAR + * @value: value to write + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS hal_write32_mb_shadow_confirm( + struct hal_soc *hal, + uint32_t reg_offset, + uint32_t value) +{ + int i; + QDF_STATUS ret; + uint32_t shadow_reg_offset; + uint32_t read_value; + int shadow_config_index; + bool is_reg_offset_present = false; + + for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) { + /* Found the shadow config for the reg_offset */ + struct shadow_reg_config *hal_shadow_reg_list = + &hal->list_shadow_reg_config[i]; + if (hal_shadow_reg_list->target_register == + reg_offset) { + shadow_config_index = + hal_shadow_reg_list->shadow_config_index; + shadow_reg_offset = + SHADOW_REGISTER(shadow_config_index); + hal_write32_mb_confirm( + hal, shadow_reg_offset, value); + is_reg_offset_present = true; + break; + } + ret = QDF_STATUS_E_FAILURE; + } + if (is_reg_offset_present) { + ret = hal_poll_dirty_bit_reg(hal, shadow_config_index); + read_value = hal_read32_mb(hal, reg_offset); + hal_info("Shadow retry:reg 0x%x val 0x%x readval 0x%x ret %d", + reg_offset, value, read_value, ret); + if (QDF_IS_STATUS_ERROR(ret)) { + HAL_STATS_INC(hal, shadow_reg_write_fail, 1); + return ret; + } + HAL_STATS_INC(hal, shadow_reg_write_succ, 1); + } + return ret; +} + +#else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */ + +static inline QDF_STATUS hal_write32_mb_shadow_confirm( + struct hal_soc *hal_soc, + uint32_t offset, + uint32_t value) +{ + return QDF_STATUS_SUCCESS; +} + +#endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */ + +/* Max times allowed for register writing retry */ +#define HAL_REG_WRITE_RETRY_MAX 5 +/* Delay milliseconds for each time retry */ +#define HAL_REG_WRITE_RETRY_DELAY 1 + +/** + * hal_write32_mb_confirm_retry() - write register with confirming and + do retry/recovery if writing failed + * @hal_soc: hal soc handle + * @offset: offset address from the BAR + * @value: value to write + * @recovery: is recovery needed or not. + * + * Write the register value with confirming and read it back, if + * read back value is not as expected, do retry for writing, if + * retry hit max times allowed but still fail, check if recovery + * needed. + * + * Return: None + */ +static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc, + uint32_t offset, + uint32_t value, + bool recovery) +{ + uint8_t retry_cnt = 0; + uint32_t read_value; + QDF_STATUS ret; + + while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) { + hal_write32_mb_confirm(hal_soc, offset, value); + read_value = hal_read32_mb(hal_soc, offset); + if (qdf_likely(read_value == value)) + break; + + /* write failed, do retry */ + hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x", + offset, value, read_value); + qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY); + retry_cnt++; + } + + if (retry_cnt > HAL_REG_WRITE_RETRY_MAX) { + ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value); + if (QDF_IS_STATUS_ERROR(ret) && recovery) + qdf_trigger_self_recovery( + NULL, QDF_HAL_REG_WRITE_FAILURE); + } +} + +#ifdef FEATURE_HAL_DELAYED_REG_WRITE +/** + * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats + * @hal_soc: HAL soc handle + * + * Return: none + */ +void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl); + +/** + * hal_dump_reg_write_stats() - dump reg write stats + * @hal_soc: HAL soc handle + * + * Return: none + */ +void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl); + +/** + * hal_get_reg_write_pending_work() - get the number of entries + * pending in the workqueue to be processed. + * @hal_soc: HAL soc handle + * + * Returns: the number of entries pending to be processed + */ +int hal_get_reg_write_pending_work(void *hal_soc); + +#else +static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) +{ +} + +static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) +{ +} + +static inline int hal_get_reg_write_pending_work(void *hal_soc) +{ + return 0; +} +#endif + +/** + * hal_read_address_32_mb() - Read 32-bit value from the register + * @soc: soc handle + * @addr: register address to read + * + * Return: 32-bit value + */ +static inline +uint32_t hal_read_address_32_mb(struct hal_soc *soc, + qdf_iomem_t addr) +{ + uint32_t offset; + uint32_t ret; + qdf_iomem_t new_addr; + + if (!soc->use_register_windowing) + return qdf_ioread32(addr); + + offset = addr - soc->dev_base_addr; + if (soc->static_window_map) { + new_addr = hal_get_window_address(soc, addr); + return qdf_ioread32(new_addr); + } + + ret = hal_read32_mb(soc, offset); + return ret; +} + +/** + * hal_attach - Initialize HAL layer + * @hif_handle: Opaque HIF handle + * @qdf_dev: QDF device + * + * Return: Opaque HAL SOC handle + * NULL on failure (if given ring is not available) + * + * This function should be called as part of HIF initialization (for accessing + * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() + */ +void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev); + +/** + * hal_detach - Detach HAL layer + * @hal_soc: HAL SOC handle + * + * This function should be called as part of HIF detach + * + */ +extern void hal_detach(void *hal_soc); + +/* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */ +enum hal_ring_type { + REO_DST = 0, + REO_EXCEPTION = 1, + REO_REINJECT = 2, + REO_CMD = 3, + REO_STATUS = 4, + TCL_DATA = 5, + TCL_CMD = 6, + TCL_STATUS = 7, + CE_SRC = 8, + CE_DST = 9, + CE_DST_STATUS = 10, + WBM_IDLE_LINK = 11, + SW2WBM_RELEASE = 12, + WBM2SW_RELEASE = 13, + RXDMA_BUF = 14, + RXDMA_DST = 15, + RXDMA_MONITOR_BUF = 16, + RXDMA_MONITOR_STATUS = 17, + RXDMA_MONITOR_DST = 18, + RXDMA_MONITOR_DESC = 19, + DIR_BUF_RX_DMA_SRC = 20, +#ifdef WLAN_FEATURE_CIF_CFR + WIFI_POS_SRC, +#endif + MAX_RING_TYPES +}; + +#define HAL_SRNG_LMAC_RING 0x80000000 +/* SRNG flags passed in hal_srng_params.flags */ +#define HAL_SRNG_MSI_SWAP 0x00000008 +#define HAL_SRNG_RING_PTR_SWAP 0x00000010 +#define HAL_SRNG_DATA_TLV_SWAP 0x00000020 +#define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000 +#define HAL_SRNG_MSI_INTR 0x00020000 +#define HAL_SRNG_CACHED_DESC 0x00040000 + +#ifdef QCA_WIFI_QCA6490 +#define HAL_SRNG_PREFETCH_TIMER 1 +#else +#define HAL_SRNG_PREFETCH_TIMER 0 +#endif + +#define PN_SIZE_24 0 +#define PN_SIZE_48 1 +#define PN_SIZE_128 2 + +#ifdef FORCE_WAKE +/** + * hal_set_init_phase() - Indicate initialization of + * datapath rings + * @soc: hal_soc handle + * @init_phase: flag to indicate datapath rings + * initialization status + * + * Return: None + */ +void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase); +#else +static inline +void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) +{ +} +#endif /* FORCE_WAKE */ + +/** + * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be + * used by callers for calculating the size of memory to be allocated before + * calling hal_srng_setup to setup the ring + * + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + */ +extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type); + +/** + * hal_srng_max_entries - Returns maximum possible number of ring entries + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + * Return: Maximum number of entries for the given ring_type + */ +uint32_t hal_srng_max_entries(void *hal_soc, int ring_type); + +/** + * hal_srng_dump - Dump ring status + * @srng: hal srng pointer + */ +void hal_srng_dump(struct hal_srng *srng); + +/** + * hal_srng_get_dir - Returns the direction of the ring + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + * Return: Ring direction + */ +enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type); + +/* HAL memory information */ +struct hal_mem_info { + /* dev base virutal addr */ + void *dev_base_addr; + /* dev base physical addr */ + void *dev_base_paddr; + /* Remote virtual pointer memory for HW/FW updates */ + void *shadow_rdptr_mem_vaddr; + /* Remote physical pointer memory for HW/FW updates */ + void *shadow_rdptr_mem_paddr; + /* Shared memory for ring pointer updates from host to FW */ + void *shadow_wrptr_mem_vaddr; + /* Shared physical memory for ring pointer updates from host to FW */ + void *shadow_wrptr_mem_paddr; +}; + +/* SRNG parameters to be passed to hal_srng_setup */ +struct hal_srng_params { + /* Physical base address of the ring */ + qdf_dma_addr_t ring_base_paddr; + /* Virtual base address of the ring */ + void *ring_base_vaddr; + /* Number of entries in ring */ + uint32_t num_entries; + /* max transfer length */ + uint16_t max_buffer_length; + /* MSI Address */ + qdf_dma_addr_t msi_addr; + /* MSI data */ + uint32_t msi_data; + /* Interrupt timer threshold – in micro seconds */ + uint32_t intr_timer_thres_us; + /* Interrupt batch counter threshold – in number of ring entries */ + uint32_t intr_batch_cntr_thres_entries; + /* Low threshold – in number of ring entries + * (valid for src rings only) + */ + uint32_t low_threshold; + /* Misc flags */ + uint32_t flags; + /* Unique ring id */ + uint8_t ring_id; + /* Source or Destination ring */ + enum hal_srng_dir ring_dir; + /* Size of ring entry */ + uint32_t entry_size; + /* hw register base address */ + void *hwreg_base[MAX_SRNG_REG_GROUPS]; + /* prefetch timer config - in micro seconds */ + uint32_t prefetch_timer; +}; + +/* hal_construct_srng_shadow_regs() - initialize the shadow + * registers for srngs + * @hal_soc: hal handle + * + * Return: QDF_STATUS_OK on success + */ +QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc); + +/* hal_set_one_shadow_config() - add a config for the specified ring + * @hal_soc: hal handle + * @ring_type: ring type + * @ring_num: ring num + * + * The ring type and ring num uniquely specify the ring. After this call, + * the hp/tp will be added as the next entry int the shadow register + * configuration table. The hal code will use the shadow register address + * in place of the hp/tp address. + * + * This function is exposed, so that the CE module can skip configuring shadow + * registers for unused ring and rings assigned to the firmware. + * + * Return: QDF_STATUS_OK on success + */ +QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, + int ring_num); +/** + * hal_get_shadow_config() - retrieve the config table + * @hal_soc: hal handle + * @shadow_config: will point to the table after + * @num_shadow_registers_configured: will contain the number of valid entries + */ +extern void hal_get_shadow_config(void *hal_soc, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured); +/** + * hal_srng_setup - Initialize HW SRNG ring. + * + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * @ring_num: Ring number if there are multiple rings of + * same type (staring from 0) + * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings + * @ring_params: SRNG ring params in hal_srng_params structure. + + * Callers are expected to allocate contiguous ring memory of size + * 'num_entries * entry_size' bytes and pass the physical and virtual base + * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params + * structure. Ring base address should be 8 byte aligned and size of each ring + * entry should be queried using the API hal_srng_get_entrysize + * + * Return: Opaque pointer to ring on success + * NULL on failure (if given ring is not available) + */ +extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, + int mac_id, struct hal_srng_params *ring_params); + +/* Remapping ids of REO rings */ +#define REO_REMAP_TCL 0 +#define REO_REMAP_SW1 1 +#define REO_REMAP_SW2 2 +#define REO_REMAP_SW3 3 +#define REO_REMAP_SW4 4 +#define REO_REMAP_RELEASE 5 +#define REO_REMAP_FW 6 +#define REO_REMAP_UNUSED 7 + +/* + * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0 + * to map destination to rings + */ +#define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \ + ((_VALUE) << \ + (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \ + DESTINATION_RING_ ## _OFFSET ## _SHFT)) + +/* + * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1 + * to map destination to rings + */ +#define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \ + ((_VALUE) << \ + (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \ + DESTINATION_RING_ ## _OFFSET ## _SHFT)) + +/* + * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0 + * to map destination to rings + */ +#define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \ + ((_VALUE) << \ + (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \ + _OFFSET ## _SHFT)) + +/* + * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1 + * to map destination to rings + */ +#define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \ + ((_VALUE) << \ + (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \ + _OFFSET ## _SHFT)) + +/* + * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3 + * to map destination to rings + */ +#define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \ + ((_VALUE) << \ + (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \ + _OFFSET ## _SHFT)) + +/** + * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX + * @hal_soc_hdl: HAL SOC handle + * @read: boolean value to indicate if read or write + * @ix0: pointer to store IX0 reg value + * @ix1: pointer to store IX1 reg value + * @ix2: pointer to store IX2 reg value + * @ix3: pointer to store IX3 reg value + */ +void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, + uint32_t *ix0, uint32_t *ix1, + uint32_t *ix2, uint32_t *ix3); + +/** + * hal_srng_set_hp_paddr_confirm() - Set physical address to dest SRNG head + * pointer and confirm that write went through by reading back the value + * @sring: sring pointer + * @paddr: physical address + */ +extern void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring, + uint64_t paddr); + +/** + * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer + * @hal_soc: hal_soc handle + * @srng: sring pointer + * @vaddr: virtual address + */ +void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, + struct hal_srng *srng, + uint32_t *vaddr); + +/** + * hal_srng_cleanup - Deinitialize HW SRNG ring. + * @hal_soc: Opaque HAL SOC handle + * @hal_srng: Opaque HAL SRNG pointer + */ +void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl); + +static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + return !!srng->initialized; +} + +/** + * hal_srng_dst_peek - Check if there are any entries in the ring (peek) + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * + * Caller takes responsibility for any locking needs. + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline +void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) + return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]); + + return NULL; +} + +/** + * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use + * hal_srng_access_start if locked access is required + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline int +hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *desc; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) + srng->u.src_ring.cached_tp = + *(volatile uint32_t *)(srng->u.src_ring.tp_addr); + else { + srng->u.dst_ring.cached_hp = + *(volatile uint32_t *)(srng->u.dst_ring.hp_addr); + + if (srng->flags & HAL_SRNG_CACHED_DESC) { + desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl); + if (qdf_likely(desc)) { + qdf_mem_dma_cache_sync(soc->qdf_dev, + qdf_mem_virt_to_phys + (desc), + QDF_DMA_FROM_DEVICE, + (srng->entry_size * + sizeof(uint32_t))); + qdf_prefetch(desc); + } + } + } + + return 0; +} + +/** + * hal_srng_access_start - Start (locked) ring access + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + if (qdf_unlikely(!hal_ring_hdl)) { + qdf_print("Error: Invalid hal_ring\n"); + return -EINVAL; + } + + SRNG_LOCK(&(srng->lock)); + + return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl); +} + +/** + * hal_srng_dst_get_next - Get next entry from a destination ring and move + * cached tail pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline +void *hal_srng_dst_get_next(void *hal_soc, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + struct hal_soc *soc = (struct hal_soc *)hal_soc; + uint32_t *desc; + uint32_t *desc_next; + uint32_t tp; + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) { + desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]); + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % + srng->ring_size; + + if (srng->flags & HAL_SRNG_CACHED_DESC) { + tp = srng->u.dst_ring.tp; + desc_next = &srng->ring_base_vaddr[tp]; + qdf_mem_dma_cache_sync(soc->qdf_dev, + qdf_mem_virt_to_phys(desc_next), + QDF_DMA_FROM_DEVICE, + (srng->entry_size * + sizeof(uint32_t))); + qdf_prefetch(desc_next); + } + + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move + * cached head pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void * +hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) % + srng->ring_size; + + if (next_hp != srng->u.dst_ring.tp) { + desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]); + srng->u.dst_ring.cached_hp = next_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek) + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * + * Sync cached head pointer with HW. + * Caller takes responsibility for any locking needs. + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline +void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + srng->u.dst_ring.cached_hp = + *(volatile uint32_t *)(srng->u.dst_ring.hp_addr); + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) + return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp])); + + return NULL; +} + +/** + * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * + * Sync cached head pointer with HW. + * This function takes up SRNG_LOCK. Should not be called with SRNG lock held. + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline +void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + void *ring_desc_ptr = NULL; + + if (qdf_unlikely(!hal_ring_hdl)) { + qdf_print("Error: Invalid hal_ring\n"); + return NULL; + } + + SRNG_LOCK(&srng->lock); + + ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl); + + SRNG_UNLOCK(&srng->lock); + + return ring_desc_ptr; +} + +/** + * hal_srng_dst_num_valid - Returns number of valid entries (to be processed + * by SW) in destination ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * @sync_hw_ptr: Sync cached head pointer with HW + * + */ +static inline +uint32_t hal_srng_dst_num_valid(void *hal_soc, + hal_ring_handle_t hal_ring_hdl, + int sync_hw_ptr) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t hp; + uint32_t tp = srng->u.dst_ring.tp; + + if (sync_hw_ptr) { + hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr); + srng->u.dst_ring.cached_hp = hp; + } else { + hp = srng->u.dst_ring.cached_hp; + } + + if (hp >= tp) + return (hp - tp) / srng->entry_size; + else + return (srng->ring_size - tp + hp) / srng->entry_size; +} + +/** + * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Destination ring pointer + * @sync_hw_ptr: Sync cached head pointer with HW + * + * Returns number of valid entries to be processed by the host driver. The + * function takes up SRNG lock. + * + * Return: Number of valid destination entries + */ +static inline uint32_t +hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + int sync_hw_ptr) +{ + uint32_t num_valid; + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + SRNG_LOCK(&srng->lock); + num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr); + SRNG_UNLOCK(&srng->lock); + + return num_valid; +} + +/** + * hal_srng_src_reap_next - Reap next entry from a source ring and move reap + * pointer. This can be used to release any buffers associated with completed + * ring entries. Note that this should not be used for posting new descriptor + * entries. Posting of new entries should be done only using + * hal_srng_src_get_next_reaped when this function is used for reaping. + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void * +hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp != srng->u.src_ring.cached_tp) { + desc = &(srng->ring_base_vaddr[next_reap_hp]); + srng->u.src_ring.reap_hp = next_reap_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_get_next_reaped - Get next entry from a source ring that is + * already reaped using hal_srng_src_reap_next, for posting new entries to + * the ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * + * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire + */ +static inline void * +hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + + if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) { + desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]); + srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size; + + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_pending_reap_next - Reap next entry from a source ring and + * move reap pointer. This API is used in detach path to release any buffers + * associated with ring entries which are pending reap. + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void * +hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + + uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp != srng->u.src_ring.hp) { + desc = &(srng->ring_base_vaddr[next_reap_hp]); + srng->u.src_ring.reap_hp = next_reap_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_done_val - + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline uint32_t +hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp == srng->u.src_ring.cached_tp) + return 0; + + if (srng->u.src_ring.cached_tp > next_reap_hp) + return (srng->u.src_ring.cached_tp - next_reap_hp) / + srng->entry_size; + else + return ((srng->ring_size - next_reap_hp) + + srng->u.src_ring.cached_tp) / srng->entry_size; +} + +/** + * hal_get_entrysize_from_srng() - Retrieve ring entry size + * @hal_ring_hdl: Source ring pointer + * + * Return: uint8_t + */ +static inline +uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + return srng->entry_size; +} + +/** + * hal_get_sw_hptp - Get SW head and tail pointer location for any ring + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * @tailp: Tail Pointer + * @headp: Head Pointer + * + * Return: Update tail pointer and head pointer in arguments. + */ +static inline +void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl, + uint32_t *tailp, uint32_t *headp) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + *headp = srng->u.src_ring.hp; + *tailp = *srng->u.src_ring.tp_addr; + } else { + *tailp = srng->u.dst_ring.tp; + *headp = *srng->u.dst_ring.hp_addr; + } +} + +/** + * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline +void *hal_srng_src_get_next(void *hal_soc, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size; + + if (next_hp != srng->u.src_ring.cached_tp) { + desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]); + srng->u.src_ring.hp = next_hp; + /* TODO: Since reap function is not used by all rings, we can + * remove the following update of reap_hp in this function + * if we can ensure that only hal_srng_src_get_next_reaped + * is used for the rings requiring reap functionality + */ + srng->u.src_ring.reap_hp = next_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_peek - Get next entry from a ring without moving head pointer. + * hal_srng_src_get_next should be called subsequently to move the head pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline +void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + if (((srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size) != srng->u.src_ring.cached_tp) { + desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]); + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_num_avail - Returns number of available entries in src ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * @sync_hw_ptr: Sync cached tail pointer with HW + * + */ +static inline uint32_t +hal_srng_src_num_avail(void *hal_soc, + hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t tp; + uint32_t hp = srng->u.src_ring.hp; + + if (sync_hw_ptr) { + tp = *(srng->u.src_ring.tp_addr); + srng->u.src_ring.cached_tp = tp; + } else { + tp = srng->u.src_ring.cached_tp; + } + + if (tp > hp) + return ((tp - hp) / srng->entry_size) - 1; + else + return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; +} + +/** + * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached + * ring head/tail pointers to HW. + * This should be used only if hal_srng_access_start_unlocked to start ring + * access + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline void +hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + /* TODO: See if we need a write memory barrier here */ + if (srng->flags & HAL_SRNG_LMAC_RING) { + /* For LMAC rings, ring pointer updates are done through FW and + * hence written to a shared memory location that is read by FW + */ + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp; + } else { + *(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp; + } + } else { + if (srng->ring_dir == HAL_SRNG_SRC_RING) + hal_srng_write_address_32_mb(hal_soc, + srng, + srng->u.src_ring.hp_addr, + srng->u.src_ring.hp); + else + hal_srng_write_address_32_mb(hal_soc, + srng, + srng->u.dst_ring.tp_addr, + srng->u.dst_ring.tp); + } +} + +/** + * hal_srng_access_end - Unlock ring access and update cached ring head/tail + * pointers to HW + * This should be used only if hal_srng_access_start to start ring access + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline void +hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + if (qdf_unlikely(!hal_ring_hdl)) { + qdf_print("Error: Invalid hal_ring\n"); + return; + } + + hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl); + SRNG_UNLOCK(&(srng->lock)); +} + +/** + * hal_srng_access_end_reap - Unlock ring access + * This should be used only if hal_srng_access_start to start ring access + * and should be used only while reaping SRC ring completions + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline void +hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + SRNG_UNLOCK(&(srng->lock)); +} + +/* TODO: Check if the following definitions is available in HW headers */ +#define WBM_IDLE_SCATTER_BUF_SIZE 32704 +#define NUM_MPDUS_PER_LINK_DESC 6 +#define NUM_MSDUS_PER_LINK_DESC 7 +#define REO_QUEUE_DESC_ALIGN 128 + +#define LINK_DESC_ALIGN 128 + +#define ADDRESS_MATCH_TAG_VAL 0x5 +/* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in + * of TX_MPDU_QUEUE_EXT. We are defining a common average count here + */ +#define NUM_MPDU_LINKS_PER_QUEUE_DESC 12 + +/* TODO: Check with HW team on the scatter buffer size supported. As per WBM + * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size + * should be specified in 16 word units. But the number of bits defined for + * this field in HW header files is 5. + */ +#define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8 + + +/** + * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer + * in an idle list + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline +uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl) +{ + return WBM_IDLE_SCATTER_BUF_SIZE; +} + +/** + * hal_get_link_desc_size - Get the size of each link descriptor + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + qdf_print("Error: Invalid ops\n"); + QDF_BUG(0); + return -EINVAL; + } + if (!hal_soc->ops->hal_get_link_desc_size) { + qdf_print("Error: Invalid function pointer\n"); + QDF_BUG(0); + return -EINVAL; + } + return hal_soc->ops->hal_get_link_desc_size(); +} + +/** + * hal_get_link_desc_align - Get the required start address alignment for + * link descriptors + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline +uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl) +{ + return LINK_DESC_ALIGN; +} + +/** + * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline +uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl) +{ + return NUM_MPDUS_PER_LINK_DESC; +} + +/** + * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline +uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl) +{ + return NUM_MSDUS_PER_LINK_DESC; +} + +/** + * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue + * descriptor can hold + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline +uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl) +{ + return NUM_MPDU_LINKS_PER_QUEUE_DESC; +} + +/** + * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries + * that the given buffer size + * + * @hal_soc: Opaque HAL SOC handle + * @scatter_buf_size: Size of scatter buffer + * + */ +static inline +uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl, + uint32_t scatter_buf_size) +{ + return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) / + hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK); +} + +/** + * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer + * each given buffer size + * + * @hal_soc: Opaque HAL SOC handle + * @total_mem: size of memory to be scattered + * @scatter_buf_size: Size of scatter buffer + * + */ +static inline +uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl, + uint32_t total_mem, + uint32_t scatter_buf_size) +{ + uint8_t rem = (total_mem % (scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0; + + uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem; + + return num_scatter_bufs; +} + +enum hal_pn_type { + HAL_PN_NONE, + HAL_PN_WPA, + HAL_PN_WAPI_EVEN, + HAL_PN_WAPI_UNEVEN, +}; + +#define HAL_RX_MAX_BA_WINDOW 256 + +/** + * hal_get_reo_qdesc_align - Get start address alignment for reo + * queue descriptors + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline +uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl) +{ + return REO_QUEUE_DESC_ALIGN; +} + +/** + * hal_reo_qdesc_setup - Setup HW REO queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory + * @hw_qdesc_paddr: Physical address of REO queue descriptor memory + * @pn_type: PN type (one of the types defined in 'enum hal_pn_type') + * + */ +void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, + int tid, uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, + qdf_dma_addr_t hw_qdesc_paddr, + int pn_type); + +/** + * hal_srng_get_hp_addr - Get head pointer physical address + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + */ +static inline qdf_dma_addr_t +hal_srng_get_hp_addr(void *hal_soc, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + return hal->shadow_wrptr_mem_paddr + + ((unsigned long)(srng->u.src_ring.hp_addr) - + (unsigned long)(hal->shadow_wrptr_mem_vaddr)); + } else { + return hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.dst_ring.hp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr)); + } +} + +/** + * hal_srng_get_tp_addr - Get tail pointer physical address + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + */ +static inline qdf_dma_addr_t +hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + return hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.src_ring.tp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr)); + } else { + return hal->shadow_wrptr_mem_paddr + + ((unsigned long)(srng->u.dst_ring.tp_addr) - + (unsigned long)(hal->shadow_wrptr_mem_vaddr)); + } +} + +/** + * hal_srng_get_num_entries - Get total entries in the HAL Srng + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * + * Return: total number of entries in hal ring + */ +static inline +uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + return srng->num_entries; +} + +/** + * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Ring pointer (Source or Destination ring) + * @ring_params: SRNG parameters will be returned through this structure + */ +void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + struct hal_srng_params *ring_params); + +/** + * hal_mem_info - Retrieve hal memory base address + * + * @hal_soc: Opaque HAL SOC handle + * @mem: pointer to structure to be updated with hal mem info + */ +void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem); + +/** + * hal_get_target_type - Return target type + * + * @hal_soc: Opaque HAL SOC handle + */ +uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl); + +/** + * hal_get_ba_aging_timeout - Retrieve BA aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * @value: timeout duration in millisec + */ +void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t *value); +/** + * hal_set_aging_timeout - Set BA aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category in millisec + * @value: timeout duration value + */ +void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t value); +/** + * hal_srng_dst_hw_init - Private function to initialize SRNG + * destination ring HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_srng_dst_hw_init(struct hal_soc *hal, + struct hal_srng *srng) +{ + hal->ops->hal_srng_dst_hw_init(hal, srng); +} + +/** + * hal_srng_src_hw_init - Private function to initialize SRNG + * source ring HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_srng_src_hw_init(struct hal_soc *hal, + struct hal_srng *srng) +{ + hal->ops->hal_srng_src_hw_init(hal, srng); +} + +/** + * hal_get_hw_hptp() - Get HW head and tail pointer value for any ring + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * @headp: Head Pointer + * @tailp: Tail Pointer + * @ring_type: Ring + * + * Return: Update tail pointer and head pointer in arguments. + */ +static inline +void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + uint32_t *headp, uint32_t *tailp, + uint8_t ring_type) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl, + headp, tailp, ring_type); +} + +/** + * hal_reo_setup - Initialize HW REO block + * + * @hal_soc: Opaque HAL SOC handle + * @reo_params: parameters needed by HAL for REO config + */ +static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl, + void *reoparams) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_reo_setup(hal_soc, reoparams); +} + +/** + * hal_setup_link_idle_list - Setup scattered idle list using the + * buffer list provided + * + * @hal_soc: Opaque HAL SOC handle + * @scatter_bufs_base_paddr: Array of physical base addresses + * @scatter_bufs_base_vaddr: Array of virtual base addresses + * @num_scatter_bufs: Number of scatter buffers in the above lists + * @scatter_buf_size: Size of each scatter buffer + * @last_buf_end_offset: Offset to the last entry + * @num_entries: Total entries of all scatter bufs + * + */ +static inline +void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl, + qdf_dma_addr_t scatter_bufs_base_paddr[], + void *scatter_bufs_base_vaddr[], + uint32_t num_scatter_bufs, + uint32_t scatter_buf_size, + uint32_t last_buf_end_offset, + uint32_t num_entries) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr, + scatter_bufs_base_vaddr, num_scatter_bufs, + scatter_buf_size, last_buf_end_offset, + num_entries); + +} + +#ifdef DUMP_REO_QUEUE_INFO_IN_DDR +/** + * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields + * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr + * + * Use the virtual addr pointer to reo h/w queue desc to read + * the values from ddr and log them. + * + * Return: none + */ +static inline void hal_dump_rx_reo_queue_desc( + void *hw_qdesc_vaddr_aligned) +{ + struct rx_reo_queue *hw_qdesc = + (struct rx_reo_queue *)hw_qdesc_vaddr_aligned; + + if (!hw_qdesc) + return; + + hal_info("receive_queue_number %u vld %u window_jump_2k %u" + " hole_count %u ba_window_size %u ignore_ampdu_flag %u" + " svld %u ssn %u current_index %u" + " disable_duplicate_detection %u soft_reorder_enable %u" + " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u" + " msdu_frames_processed_count %u total_processed_byte_count %u" + " late_receive_mpdu_count %u seq_2k_error_detected_flag %u" + " pn_error_detected_flag %u current_mpdu_count %u" + " current_msdu_count %u timeout_count %u" + " forward_due_to_bar_count %u duplicate_count %u" + " frames_in_order_count %u bar_received_count %u" + " pn_check_needed %u pn_shall_be_even %u" + " pn_shall_be_uneven %u pn_size %u", + hw_qdesc->receive_queue_number, + hw_qdesc->vld, + hw_qdesc->window_jump_2k, + hw_qdesc->hole_count, + hw_qdesc->ba_window_size, + hw_qdesc->ignore_ampdu_flag, + hw_qdesc->svld, + hw_qdesc->ssn, + hw_qdesc->current_index, + hw_qdesc->disable_duplicate_detection, + hw_qdesc->soft_reorder_enable, + hw_qdesc->chk_2k_mode, + hw_qdesc->oor_mode, + hw_qdesc->mpdu_frames_processed_count, + hw_qdesc->msdu_frames_processed_count, + hw_qdesc->total_processed_byte_count, + hw_qdesc->late_receive_mpdu_count, + hw_qdesc->seq_2k_error_detected_flag, + hw_qdesc->pn_error_detected_flag, + hw_qdesc->current_mpdu_count, + hw_qdesc->current_msdu_count, + hw_qdesc->timeout_count, + hw_qdesc->forward_due_to_bar_count, + hw_qdesc->duplicate_count, + hw_qdesc->frames_in_order_count, + hw_qdesc->bar_received_count, + hw_qdesc->pn_check_needed, + hw_qdesc->pn_shall_be_even, + hw_qdesc->pn_shall_be_uneven, + hw_qdesc->pn_size); +} + +#else /* DUMP_REO_QUEUE_INFO_IN_DDR */ + +static inline void hal_dump_rx_reo_queue_desc( + void *hw_qdesc_vaddr_aligned) +{ +} +#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */ + +/** + * hal_srng_dump_ring_desc() - Dump ring descriptor info + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + * @ring_desc: Opaque ring descriptor handle + */ +static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + ring_desc, (srng->entry_size << 2)); +} + +/** + * hal_srng_dump_ring() - Dump last 128 descs of the ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring_hdl: Source ring pointer + */ +static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + uint32_t *desc; + uint32_t tp, i; + + tp = srng->u.dst_ring.tp; + + for (i = 0; i < 128; i++) { + if (!tp) + tp = srng->ring_size; + + desc = &srng->ring_base_vaddr[tp - srng->entry_size]; + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + desc, (srng->entry_size << 2)); + + tp -= srng->entry_size; + } +} + +/* + * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc + * to opaque dp_ring desc type + * @ring_desc - rxdma ring desc + * + * Return: hal_rxdma_desc_t type + */ +static inline +hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc) +{ + return (hal_ring_desc_t)ring_desc; +} + +/** + * hal_srng_set_event() - Set hal_srng event + * @hal_ring_hdl: Source ring pointer + * @event: SRNG ring event + * + * Return: None + */ +static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + qdf_atomic_set_bit(event, &srng->srng_event); +} + +/** + * hal_srng_clear_event() - Clear hal_srng event + * @hal_ring_hdl: Source ring pointer + * @event: SRNG ring event + * + * Return: None + */ +static inline +void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + qdf_atomic_clear_bit(event, &srng->srng_event); +} + +/** + * hal_srng_get_clear_event() - Clear srng event and return old value + * @hal_ring_hdl: Source ring pointer + * @event: SRNG ring event + * + * Return: Return old event value + */ +static inline +int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + return qdf_atomic_test_and_clear_bit(event, &srng->srng_event); +} + +/** + * hal_srng_set_flush_last_ts() - Record last flush time stamp + * @hal_ring_hdl: Source ring pointer + * + * Return: None + */ +static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + srng->last_flush_ts = qdf_get_log_timestamp(); +} + +/** + * hal_srng_inc_flush_cnt() - Increment flush counter + * @hal_ring_hdl: Source ring pointer + * + * Return: None + */ +static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + + srng->flush_count++; +} + +/** + * hal_reo_set_err_dst_remap() - Set REO error destination ring remap + * register value. + * + * @hal_soc_hdl: Opaque HAL soc handle + * + * Return: None + */ +static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (hal_soc->ops->hal_reo_set_err_dst_remap) + hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc); +} + +#ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE + +/** + * hal_set_one_target_reg_config() - Populate the target reg + * offset in hal_soc for one non srng related register at the + * given list index + * @hal_soc: hal handle + * @target_reg_offset: target register offset + * @list_index: index in hal list for shadow regs + * + * Return: none + */ +void hal_set_one_target_reg_config(struct hal_soc *hal, + uint32_t target_reg_offset, + int list_index); + +/** + * hal_set_shadow_regs() - Populate register offset for + * registers that need to be populated in list_shadow_reg_config + * in order to be sent to FW. These reg offsets will be mapped + * to shadow registers. + * @hal_soc: hal handle + * + * Return: QDF_STATUS_OK on success + */ +QDF_STATUS hal_set_shadow_regs(void *hal_soc); + +/** + * hal_construct_shadow_regs() - initialize the shadow registers + * for non-srng related register configs + * @hal_soc: hal handle + * + * Return: QDF_STATUS_OK on success + */ +QDF_STATUS hal_construct_shadow_regs(void *hal_soc); + +#else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */ +static inline void hal_set_one_target_reg_config( + struct hal_soc *hal, + uint32_t target_reg_offset, + int list_index) +{ +} + +static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */ +#endif /* _HAL_APIH_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api_mon.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api_mon.h new file mode 100644 index 0000000000000000000000000000000000000000..5913d9a7ab02091660dd7dcf58bd019664524a99 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api_mon.h @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_API_MON_H_ +#define _HAL_API_MON_H_ + +#include "qdf_types.h" +#include "hal_internal.h" +#include + +#define HAL_RX_PHY_DATA_RADAR 0x01 +#define HAL_SU_MU_CODING_LDPC 0x01 + +#define HAL_RX_FCS_LEN (4) +#define KEY_EXTIV 0x20 + +#define HAL_RX_USER_TLV32_TYPE_OFFSET 0x00000000 +#define HAL_RX_USER_TLV32_TYPE_LSB 1 +#define HAL_RX_USER_TLV32_TYPE_MASK 0x000003FE + +#define HAL_RX_USER_TLV32_LEN_OFFSET 0x00000000 +#define HAL_RX_USER_TLV32_LEN_LSB 10 +#define HAL_RX_USER_TLV32_LEN_MASK 0x003FFC00 + +#define HAL_RX_USER_TLV32_USERID_OFFSET 0x00000000 +#define HAL_RX_USER_TLV32_USERID_LSB 26 +#define HAL_RX_USER_TLV32_USERID_MASK 0xFC000000 + +#define HAL_ALIGN(x, a) HAL_ALIGN_MASK(x, (a)-1) +#define HAL_ALIGN_MASK(x, mask) (typeof(x))(((uint32)(x) + (mask)) & ~(mask)) + +#define HAL_RX_TLV32_HDR_SIZE 4 + +#define HAL_RX_GET_USER_TLV32_TYPE(rx_status_tlv_ptr) \ + ((*((uint32_t *)(rx_status_tlv_ptr)) & \ + HAL_RX_USER_TLV32_TYPE_MASK) >> \ + HAL_RX_USER_TLV32_TYPE_LSB) + +#define HAL_RX_GET_USER_TLV32_LEN(rx_status_tlv_ptr) \ + ((*((uint32_t *)(rx_status_tlv_ptr)) & \ + HAL_RX_USER_TLV32_LEN_MASK) >> \ + HAL_RX_USER_TLV32_LEN_LSB) + +#define HAL_RX_GET_USER_TLV32_USERID(rx_status_tlv_ptr) \ + ((*((uint32_t *)(rx_status_tlv_ptr)) & \ + HAL_RX_USER_TLV32_USERID_MASK) >> \ + HAL_RX_USER_TLV32_USERID_LSB) + +#define HAL_TLV_STATUS_PPDU_NOT_DONE 0 +#define HAL_TLV_STATUS_PPDU_DONE 1 +#define HAL_TLV_STATUS_BUF_DONE 2 +#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3 +#define HAL_TLV_STATUS_PPDU_START 4 +#define HAL_TLV_STATUS_HEADER 5 +#define HAL_TLV_STATUS_MPDU_END 6 +#define HAL_TLV_STATUS_MSDU_START 7 +#define HAL_TLV_STATUS_MSDU_END 8 + +#define HAL_MAX_UL_MU_USERS 37 + +#define HAL_RX_PKT_TYPE_11A 0 +#define HAL_RX_PKT_TYPE_11B 1 +#define HAL_RX_PKT_TYPE_11N 2 +#define HAL_RX_PKT_TYPE_11AC 3 +#define HAL_RX_PKT_TYPE_11AX 4 + +#define HAL_RX_RECEPTION_TYPE_SU 0 +#define HAL_RX_RECEPTION_TYPE_MU_MIMO 1 +#define HAL_RX_RECEPTION_TYPE_OFDMA 2 +#define HAL_RX_RECEPTION_TYPE_MU_OFDMA 3 + +/* Multiply rate by 2 to avoid float point + * and get rate in units of 500kbps + */ +#define HAL_11B_RATE_0MCS 11*2 +#define HAL_11B_RATE_1MCS 5.5*2 +#define HAL_11B_RATE_2MCS 2*2 +#define HAL_11B_RATE_3MCS 1*2 +#define HAL_11B_RATE_4MCS 11*2 +#define HAL_11B_RATE_5MCS 5.5*2 +#define HAL_11B_RATE_6MCS 2*2 + +#define HAL_11A_RATE_0MCS 48*2 +#define HAL_11A_RATE_1MCS 24*2 +#define HAL_11A_RATE_2MCS 12*2 +#define HAL_11A_RATE_3MCS 6*2 +#define HAL_11A_RATE_4MCS 54*2 +#define HAL_11A_RATE_5MCS 36*2 +#define HAL_11A_RATE_6MCS 18*2 +#define HAL_11A_RATE_7MCS 9*2 + +#define HAL_LEGACY_MCS0 0 +#define HAL_LEGACY_MCS1 1 +#define HAL_LEGACY_MCS2 2 +#define HAL_LEGACY_MCS3 3 +#define HAL_LEGACY_MCS4 4 +#define HAL_LEGACY_MCS5 5 +#define HAL_LEGACY_MCS6 6 +#define HAL_LEGACY_MCS7 7 + +#define HE_GI_0_8 0 +#define HE_GI_0_4 1 +#define HE_GI_1_6 2 +#define HE_GI_3_2 3 + +#define HT_SGI_PRESENT 0x80 + +#define HE_LTF_1_X 0 +#define HE_LTF_2_X 1 +#define HE_LTF_4_X 2 +#define HE_LTF_UNKNOWN 3 +#define VHT_SIG_SU_NSS_MASK 0x7 +#define HT_SIG_SU_NSS_SHIFT 0x3 + +#define HAL_TID_INVALID 31 +#define HAL_AST_IDX_INVALID 0xFFFF + +#ifdef GET_MSDU_AGGREGATION +#define HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs)\ +{\ + struct rx_msdu_end *rx_msdu_end;\ + bool first_msdu, last_msdu; \ + rx_msdu_end = &rx_desc->msdu_end_tlv.rx_msdu_end;\ + first_msdu = HAL_RX_GET(rx_msdu_end, RX_MSDU_END_5, FIRST_MSDU);\ + last_msdu = HAL_RX_GET(rx_msdu_end, RX_MSDU_END_5, LAST_MSDU);\ + if (first_msdu && last_msdu)\ + rs->rs_flags &= (~IEEE80211_AMSDU_FLAG);\ + else\ + rs->rs_flags |= (IEEE80211_AMSDU_FLAG); \ +} \ + +#else +#define HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs) +#endif + +/* Max MPDUs per status buffer */ +#define HAL_RX_MAX_MPDU 256 +#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5) + +/* Max pilot count */ +#define HAL_RX_MAX_SU_EVM_COUNT 32 + +/* + * Struct hal_rx_su_evm_info - SU evm info + * @number_of_symbols: number of symbols + * @nss_count: nss count + * @pilot_count: pilot count + * @pilot_evm: Array of pilot evm values + */ +struct hal_rx_su_evm_info { + uint32_t number_of_symbols; + uint8_t nss_count; + uint8_t pilot_count; + uint32_t pilot_evm[HAL_RX_MAX_SU_EVM_COUNT]; +}; + +enum { + DP_PPDU_STATUS_START, + DP_PPDU_STATUS_DONE, +}; + +static inline +uint8_t *HAL_RX_MON_DEST_GET_DESC(uint8_t *data) +{ + return data; +} + +static inline +uint32_t HAL_RX_DESC_GET_MPDU_LENGTH_ERR(void *hw_desc_addr) +{ + struct rx_attention *rx_attn; + struct rx_mon_pkt_tlvs *rx_desc = + (struct rx_mon_pkt_tlvs *)hw_desc_addr; + + rx_attn = &rx_desc->attn_tlv.rx_attn; + + return HAL_RX_GET(rx_attn, RX_ATTENTION_1, MPDU_LENGTH_ERR); +} + +static inline +uint32_t HAL_RX_DESC_GET_MPDU_FCS_ERR(void *hw_desc_addr) +{ + struct rx_attention *rx_attn; + struct rx_mon_pkt_tlvs *rx_desc = + (struct rx_mon_pkt_tlvs *)hw_desc_addr; + + rx_attn = &rx_desc->attn_tlv.rx_attn; + + return HAL_RX_GET(rx_attn, RX_ATTENTION_1, FCS_ERR); +} + +/* + * HAL_RX_HW_DESC_MPDU_VALID() - check MPDU start TLV tag in MPDU + * start TLV of Hardware TLV descriptor + * @hw_desc_addr: Hardware desciptor address + * + * Return: bool: if TLV tag match + */ +static inline +bool HAL_RX_HW_DESC_MPDU_VALID(void *hw_desc_addr) +{ + struct rx_mon_pkt_tlvs *rx_desc = + (struct rx_mon_pkt_tlvs *)hw_desc_addr; + uint32_t tlv_tag; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE( + &rx_desc->mpdu_start_tlv); + + return tlv_tag == WIFIRX_MPDU_START_E ? true : false; +} + + +/* TODO: Move all Rx descriptor functions to hal_rx.h to avoid duplication */ + +#define HAL_RX_BUFFER_ADDR_31_0_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET)), \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB)) + +#define HAL_RX_REO_ENT_BUFFER_ADDR_39_32_GET(reo_ent_desc) \ + (HAL_RX_BUFFER_ADDR_39_32_GET(& \ + (((struct reo_entrance_ring *)reo_ent_desc) \ + ->reo_level_mpdu_frame_info.msdu_link_desc_addr_info))) + +#define HAL_RX_REO_ENT_BUFFER_ADDR_31_0_GET(reo_ent_desc) \ + (HAL_RX_BUFFER_ADDR_31_0_GET(& \ + (((struct reo_entrance_ring *)reo_ent_desc) \ + ->reo_level_mpdu_frame_info.msdu_link_desc_addr_info))) + +#define HAL_RX_REO_ENT_BUF_COOKIE_GET(reo_ent_desc) \ + (HAL_RX_BUF_COOKIE_GET(& \ + (((struct reo_entrance_ring *)reo_ent_desc) \ + ->reo_level_mpdu_frame_info.msdu_link_desc_addr_info))) + +/** + * hal_rx_reo_ent_buf_paddr_get: Gets the physical address and + * cookie from the REO entrance ring element + * + * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to + * the current descriptor + * @ buf_info: structure to return the buffer information + * @ msdu_cnt: pointer to msdu count in MPDU + * Return: void + */ +static inline +void hal_rx_reo_ent_buf_paddr_get(hal_rxdma_desc_t rx_desc, + struct hal_buf_info *buf_info, + uint32_t *msdu_cnt +) +{ + struct reo_entrance_ring *reo_ent_ring = + (struct reo_entrance_ring *)rx_desc; + struct buffer_addr_info *buf_addr_info; + struct rx_mpdu_desc_info *rx_mpdu_desc_info_details; + uint32_t loop_cnt; + + rx_mpdu_desc_info_details = + &reo_ent_ring->reo_level_mpdu_frame_info.rx_mpdu_desc_info_details; + + *msdu_cnt = HAL_RX_GET(rx_mpdu_desc_info_details, + RX_MPDU_DESC_INFO_0, MSDU_COUNT); + + loop_cnt = HAL_RX_GET(reo_ent_ring, REO_ENTRANCE_RING_7, LOOPING_COUNT); + + buf_addr_info = + &reo_ent_ring->reo_level_mpdu_frame_info.msdu_link_desc_addr_info; + + buf_info->paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t) + (HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); + buf_info->rbm = HAL_RX_BUF_RBM_GET(buf_addr_info); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] ReoAddr=%pK, addrInfo=%pK, paddr=0x%llx, loopcnt=%d", + __func__, __LINE__, reo_ent_ring, buf_addr_info, + (unsigned long long)buf_info->paddr, loop_cnt); +} + +static inline +void hal_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, + struct hal_buf_info *buf_info) +{ + struct rx_msdu_link *msdu_link = + (struct rx_msdu_link *)rx_msdu_link_desc; + struct buffer_addr_info *buf_addr_info; + + buf_addr_info = &msdu_link->next_msdu_link_desc_addr_info; + + buf_info->paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t) + (HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); + buf_info->rbm = HAL_RX_BUF_RBM_GET(buf_addr_info); +} + +/** + * hal_rx_msdu_link_desc_set: Retrieves MSDU Link Descriptor to WBM + * + * @ soc : HAL version of the SOC pointer + * @ src_srng_desc : void pointer to the WBM Release Ring descriptor + * @ buf_addr_info : void pointer to the buffer_addr_info + * + * Return: void + */ + +static inline +void hal_rx_mon_msdu_link_desc_set(hal_soc_handle_t hal_soc_hdl, + void *src_srng_desc, + hal_buff_addrinfo_t buf_addr_info) +{ + struct buffer_addr_info *wbm_srng_buffer_addr_info = + (struct buffer_addr_info *)src_srng_desc; + uint64_t paddr; + struct buffer_addr_info *p_buffer_addr_info = + (struct buffer_addr_info *)buf_addr_info; + + paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t) + (HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] src_srng_desc=%pK, buf_addr=0x%llx, cookie=0x%llx", + __func__, __LINE__, src_srng_desc, (unsigned long long)paddr, + (unsigned long long)p_buffer_addr_info->sw_buffer_cookie); + + /* Structure copy !!! */ + *wbm_srng_buffer_addr_info = + *((struct buffer_addr_info *)buf_addr_info); +} + +static inline +uint32 hal_get_rx_msdu_link_desc_size(void) +{ + return sizeof(struct rx_msdu_link); +} + +enum { + HAL_PKT_TYPE_OFDM = 0, + HAL_PKT_TYPE_CCK, + HAL_PKT_TYPE_HT, + HAL_PKT_TYPE_VHT, + HAL_PKT_TYPE_HE, +}; + +enum { + HAL_SGI_0_8_US, + HAL_SGI_0_4_US, + HAL_SGI_1_6_US, + HAL_SGI_3_2_US, +}; + +enum { + HAL_FULL_RX_BW_20, + HAL_FULL_RX_BW_40, + HAL_FULL_RX_BW_80, + HAL_FULL_RX_BW_160, +}; + +enum { + HAL_RX_TYPE_SU, + HAL_RX_TYPE_MU_MIMO, + HAL_RX_TYPE_MU_OFDMA, + HAL_RX_TYPE_MU_OFDMA_MIMO, +}; + +/** + * enum + * @HAL_RX_MON_PPDU_START: PPDU start TLV is decoded in HAL + * @HAL_RX_MON_PPDU_END: PPDU end TLV is decided in HAL + */ +enum { + HAL_RX_MON_PPDU_START = 0, + HAL_RX_MON_PPDU_END, +}; + +/* struct hal_rx_ppdu_common_info - common ppdu info + * @ppdu_id - ppdu id number + * @ppdu_timestamp - timestamp at ppdu received + * @mpdu_cnt_fcs_ok - mpdu count in ppdu with fcs ok + * @mpdu_cnt_fcs_err - mpdu count in ppdu with fcs err + * @mpdu_fcs_ok_bitmap - fcs ok mpdu count in ppdu bitmap + * @last_ppdu_id - last received ppdu id + * @mpdu_cnt - total mpdu count + * @num_users - num users + */ +struct hal_rx_ppdu_common_info { + uint32_t ppdu_id; + uint32_t ppdu_timestamp; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; + uint32_t mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP]; + uint32_t last_ppdu_id; + uint32_t mpdu_cnt; + uint8_t num_users; +}; + +/** + * struct hal_rx_msdu_payload_info - msdu payload info + * @first_msdu_payload: pointer to first msdu payload + * @payload_len: payload len + * @nbuf: status network buffer to which msdu belongs to + */ +struct hal_rx_msdu_payload_info { + uint8_t *first_msdu_payload; + uint32_t payload_len; + qdf_nbuf_t nbuf; +}; + +/** + * struct hal_rx_nac_info - struct for neighbour info + * @fc_valid: flag indicate if it has valid frame control information + * @frame_control: frame control from each MPDU + * @to_ds_flag: flag indicate to_ds bit + * @mac_addr2_valid: flag indicate if mac_addr2 is valid + * @mac_addr2: mac address2 in wh + * @mcast_bcast: multicast/broadcast + */ +struct hal_rx_nac_info { + uint8_t fc_valid; + uint16_t frame_control; + uint8_t to_ds_flag; + uint8_t mac_addr2_valid; + uint8_t mac_addr2[QDF_MAC_ADDR_SIZE]; + uint8_t mcast_bcast; +}; + +/** + * struct hal_rx_ppdu_msdu_info - struct for msdu info from HW TLVs + * @cce_metadata: cached CCE metadata value received in the MSDU_END TLV + * @is_flow_idx_timeout: flag to indicate if flow search timeout occurred + * @is_flow_idx_invalid: flag to indicate if flow idx is valid or not + * @fse_metadata: cached FSE metadata value received in the MSDU END TLV + * @flow_idx: flow idx matched in FSE received in the MSDU END TLV + */ +struct hal_rx_ppdu_msdu_info { + uint16_t cce_metadata; + bool is_flow_idx_timeout; + bool is_flow_idx_invalid; + uint32_t fse_metadata; + uint32_t flow_idx; +}; + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/** + * struct hal_rx_ppdu_cfr_user_info - struct for storing peer info extracted + * from HW TLVs, this will be used for correlating CFR data with multiple peers + * in MU PPDUs + * + * @peer_macaddr: macaddr of the peer + * @ast_index: AST index of the peer + */ +struct hal_rx_ppdu_cfr_user_info { + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t ast_index; +}; + +/** + * struct hal_rx_ppdu_cfr_info - struct for storing ppdu info extracted from HW + * TLVs, this will be used for CFR correlation + * + * @bb_captured_channel : Set by RXPCU when MACRX_FREEZE_CAPTURE_CHANNEL TLV is + * sent to PHY, SW checks it to correlate current PPDU TLVs with uploaded + * channel information. + * + * @bb_captured_timeout : Set by RxPCU to indicate channel capture condition is + * met, but MACRX_FREEZE_CAPTURE_CHANNEL is not sent to PHY due to AST delay, + * which means the rx_frame_falling edge to FREEZE TLV ready time exceeds + * the threshold time defined by RXPCU register FREEZE_TLV_DELAY_CNT_THRESH. + * Bb_captured_reason is still valid in this case. + * + * @rx_location_info_valid: Indicates whether CFR DMA address in the PPDU TLV + * is valid + * + * + * + * + * @bb_captured_reason : Copy capture_reason of MACRX_FREEZE_CAPTURE_CHANNEL + * TLV to here for FW usage. Valid when bb_captured_channel or + * bb_captured_timeout is set. + * + * + * + * + * + * + * + * + * @rtt_che_buffer_pointer_low32 : The low 32 bits of the 40 bits pointer to + * external RTT channel information buffer + * + * @rtt_che_buffer_pointer_high8 : The high 8 bits of the 40 bits pointer to + * external RTT channel information buffer + * + * @chan_capture_status : capture status reported by ucode + * a. CAPTURE_IDLE: FW has disabled "REPETITIVE_CHE_CAPTURE_CTRL" + * b. CAPTURE_BUSY: previous PPDU’s channel capture upload DMA ongoing. (Note + * that this upload is triggered after receiving freeze_channel_capture TLV + * after last PPDU is rx) + * c. CAPTURE_ACTIVE: channel capture is enabled and no previous channel + * capture ongoing + * d. CAPTURE_NO_BUFFER: next buffer in IPC ring not available + * + * @cfr_user_info: Peer mac for upto 4 MU users + */ + +struct hal_rx_ppdu_cfr_info { + bool bb_captured_channel; + bool bb_captured_timeout; + uint8_t bb_captured_reason; + bool rx_location_info_valid; + uint8_t chan_capture_status; + uint8_t rtt_che_buffer_pointer_high8; + uint32_t rtt_che_buffer_pointer_low32; + struct hal_rx_ppdu_cfr_user_info cfr_user_info[HAL_MAX_UL_MU_USERS]; +}; +#else +struct hal_rx_ppdu_cfr_info {}; +#endif + +struct mon_rx_info { + uint8_t qos_control_info_valid; + uint16_t qos_control; + uint8_t mac_addr1_valid; + uint8_t mac_addr1[QDF_MAC_ADDR_SIZE]; +}; + +struct mon_rx_user_info { + uint16_t qos_control; + uint8_t qos_control_info_valid; + uint32_t bar_frame:1; +}; + +struct hal_rx_ppdu_info { + struct hal_rx_ppdu_common_info com_info; + struct mon_rx_status rx_status; + struct mon_rx_user_status rx_user_status[HAL_MAX_UL_MU_USERS]; + struct mon_rx_info rx_info; + struct mon_rx_user_info rx_user_info[HAL_MAX_UL_MU_USERS]; + struct hal_rx_msdu_payload_info msdu_info; + struct hal_rx_msdu_payload_info fcs_ok_msdu_info; + struct hal_rx_nac_info nac_info; + /* status ring PPDU start and end state */ + uint32_t rx_state; + /* MU user id for status ring TLV */ + uint32_t user_id; + /* MPDU/MSDU truncated to 128 bytes header start addr in status skb */ + unsigned char *data; + /* MPDU/MSDU truncated to 128 bytes header real length */ + uint32_t hdr_len; + /* MPDU FCS error */ + bool fcs_err; + /* Id to indicate how to process mpdu */ + uint8_t sw_frame_group_id; + struct hal_rx_ppdu_msdu_info rx_msdu_info[HAL_MAX_UL_MU_USERS]; + /* first msdu payload for all mpdus in ppdu */ + struct hal_rx_msdu_payload_info ppdu_msdu_info[HAL_RX_MAX_MPDU]; + /* evm info */ + struct hal_rx_su_evm_info evm_info; + /** + * Will be used to store ppdu info extracted from HW TLVs, + * and for CFR correlation as well + */ + struct hal_rx_ppdu_cfr_info cfr_info; +}; + +static inline uint32_t +hal_get_rx_status_buf_size(void) { + /* RX status buffer size is hard coded for now */ + return 2048; +} + +static inline uint8_t* +hal_rx_status_get_next_tlv(uint8_t *rx_tlv) { + uint32_t tlv_len, tlv_tag; + + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv); + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv); + + /* The actual length of PPDU_END is the combined length of many PHY + * TLVs that follow. Skip the TLV header and + * rx_rxpcu_classification_overview that follows the header to get to + * next TLV. + */ + if (tlv_tag == WIFIRX_PPDU_END_E) + tlv_len = sizeof(struct rx_rxpcu_classification_overview); + + return (uint8_t *)(((unsigned long)(rx_tlv + tlv_len + + HAL_RX_TLV32_HDR_SIZE + 3)) & (~((unsigned long)3))); +} + +/** + * hal_rx_proc_phyrx_other_receive_info_tlv() + * - process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static inline void hal_rx_proc_phyrx_other_receive_info_tlv(struct hal_soc *hal_soc, + void *rx_tlv_hdr, + struct hal_rx_ppdu_info + *ppdu_info) +{ + hal_soc->ops->hal_rx_proc_phyrx_other_receive_info_tlv(rx_tlv_hdr, + (void *)ppdu_info); +} + +/** + * hal_rx_status_get_tlv_info() - process receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * @hal_soc: HAL soc handle + * @nbuf: PPDU status netowrk buffer + * + * Return: HAL_TLV_STATUS_PPDU_NOT_DONE or HAL_TLV_STATUS_PPDU_DONE from tlv + */ +static inline uint32_t +hal_rx_status_get_tlv_info(void *rx_tlv_hdr, void *ppdu_info, + hal_soc_handle_t hal_soc_hdl, + qdf_nbuf_t nbuf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_status_get_tlv_info( + rx_tlv_hdr, + ppdu_info, + hal_soc_hdl, + nbuf); +} + +static inline +uint32_t hal_get_rx_status_done_tlv_size(hal_soc_handle_t hal_soc_hdl) +{ + return HAL_RX_TLV32_HDR_SIZE; +} + +static inline QDF_STATUS +hal_get_rx_status_done(uint8_t *rx_tlv) +{ + uint32_t tlv_tag; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv); + + if (tlv_tag == WIFIRX_STATUS_BUFFER_DONE_E) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_EMPTY; +} + +static inline QDF_STATUS +hal_clear_rx_status_done(uint8_t *rx_tlv) +{ + *(uint32_t *)rx_tlv = 0; + return QDF_STATUS_SUCCESS; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_flow.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..b89146d10ab1b650f0908d0f94645df1d6e1cba6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_flow.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __HAL_FLOW_H +#define __HAL_FLOW_H + +#define HAL_SET_FLD_SM(block, field, value) \ + (((value) << (block ## _ ## field ## _LSB)) & \ + (block ## _ ## field ## _MASK)) + +#define HAL_SET_FLD_MS(block, field, value) \ + (((value) & (block ## _ ## field ## _MASK)) >> \ + (block ## _ ## field ## _LSB)) + +#define HAL_CLR_FLD(desc, block, field) \ +do { \ + uint32_t val; \ + typeof(desc) desc_ = desc; \ + val = *((uint32_t *)((uint8_t *)(desc_) + \ + HAL_OFFSET(block, field))); \ + val &= ~(block ## _ ## field ## _MASK); \ + HAL_SET_FLD(desc_, block, field) = val; \ +} while (0) + +#define HAL_GET_FLD(desc, block, field) \ + ((*((uint32_t *)((uint8_t *)(desc) + HAL_OFFSET(block, field))) & \ + (block ## _ ## field ## _MASK)) >> (block ## _ ## field ## _LSB)) + +/** + * struct hal_flow_tuple_info - Hal Flow 5-tuple + * @dest_ip_127_96: Destination IP address bits 96-127 + * @dest_ip_95_64: Destination IP address bits 64-95 + * @dest_ip_63_32: Destination IP address bits 32-63 + * @dest_ip_31_0: Destination IP address bits 0-31 + * @src_ip_127_96: Source IP address bits 96-127 + * @src_ip_95_64: Source IP address bits 64-95 + * @src_ip_63_32: Source IP address bits 32-63 + * @src_ip_31_0: Source IP address bits 0-31 + * @dest_port: Destination Port + * @src_port: Source Port + * @l4_protocol: Layer-4 protocol type (TCP/UDP) + */ +struct hal_flow_tuple_info { + uint32_t dest_ip_127_96; + uint32_t dest_ip_95_64; + uint32_t dest_ip_63_32; + uint32_t dest_ip_31_0; + uint32_t src_ip_127_96; + uint32_t src_ip_95_64; + uint32_t src_ip_63_32; + uint32_t src_ip_31_0; + uint16_t dest_port; + uint16_t src_port; + uint16_t l4_protocol; +}; + +/** + * key_bitwise_shift_left() - Bitwise left shift (in place) an array of bytes + * @key: Pointer to array to key bytes + * @len: size of array (number of key bytes) + * @shift: number of shift operations to be performed + * + * Return: + */ +static inline void +key_bitwise_shift_left(uint8_t *key, int len, int shift) +{ + int i; + int next; + + while (shift--) { + for (i = len - 1; i >= 0 ; i--) { + if (i > 0) + next = (key[i - 1] & 0x80 ? 1 : 0); + else + next = 0; + key[i] = (key[i] << 1) | next; + } + } +} + +/** + * key_reverse() - Reverse the key buffer from MSB to LSB + * @dest: pointer to the destination key + * @src: pointer to the source key which should be shifted + * @len: size of key in bytes + * + * Return: + */ +static inline void +key_reverse(uint8_t *dest, uint8_t *src, int len) +{ + int i, j; + + for (i = 0, j = len - 1; i < len; i++, j--) + dest[i] = src[j]; +} +#endif /* HAL_FLOW_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_generic_api.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_generic_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3f02ab4daa9c1e65e2ed58a648311c8591aaf172 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_generic_api.h @@ -0,0 +1,2341 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _HAL_GENERIC_API_H_ +#define _HAL_GENERIC_API_H_ + +#include + +/** + * hal_tx_comp_get_status() - TQM Release reason + * @hal_desc: completion ring Tx status + * + * This function will parse the WBM completion descriptor and populate in + * HAL structure + * + * Return: none + */ +static inline +void hal_tx_comp_get_status_generic(void *desc, + void *ts1, + struct hal_soc *hal) +{ + uint8_t rate_stats_valid = 0; + uint32_t rate_stats = 0; + struct hal_tx_completion_status *ts = + (struct hal_tx_completion_status *)ts1; + + ts->ppdu_id = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_3, + TQM_STATUS_NUMBER); + ts->ack_frame_rssi = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, + ACK_FRAME_RSSI); + ts->first_msdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, FIRST_MSDU); + ts->last_msdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, LAST_MSDU); + ts->msdu_part_of_amsdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, + MSDU_PART_OF_AMSDU); + + ts->peer_id = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_7, SW_PEER_ID); + ts->tid = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_7, TID); + ts->transmit_cnt = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_3, + TRANSMIT_COUNT); + + rate_stats = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_5, + TX_RATE_STATS); + + rate_stats_valid = HAL_TX_MS(TX_RATE_STATS_INFO_0, + TX_RATE_STATS_INFO_VALID, rate_stats); + + ts->valid = rate_stats_valid; + + if (rate_stats_valid) { + ts->bw = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_BW, + rate_stats); + ts->pkt_type = HAL_TX_MS(TX_RATE_STATS_INFO_0, + TRANSMIT_PKT_TYPE, rate_stats); + ts->stbc = HAL_TX_MS(TX_RATE_STATS_INFO_0, + TRANSMIT_STBC, rate_stats); + ts->ldpc = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_LDPC, + rate_stats); + ts->sgi = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_SGI, + rate_stats); + ts->mcs = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_MCS, + rate_stats); + ts->ofdma = HAL_TX_MS(TX_RATE_STATS_INFO_0, OFDMA_TRANSMISSION, + rate_stats); + ts->tones_in_ru = HAL_TX_MS(TX_RATE_STATS_INFO_0, TONES_IN_RU, + rate_stats); + } + + ts->release_src = hal_tx_comp_get_buffer_source(desc); + ts->status = hal_tx_comp_get_release_reason( + desc, + hal_soc_to_hal_soc_handle(hal)); + + ts->tsf = HAL_TX_DESC_GET(desc, UNIFIED_WBM_RELEASE_RING_6, + TX_RATE_STATS_INFO_TX_RATE_STATS); +} + +/** + * hal_tx_desc_set_buf_addr - Fill Buffer Address information in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @paddr: Physical Address + * @pool_id: Return Buffer Manager ID + * @desc_id: Descriptor ID + * @type: 0 - Address points to a MSDU buffer + * 1 - Address points to MSDU extension descriptor + * + * Return: void + */ +static inline void hal_tx_desc_set_buf_addr_generic(void *desc, + dma_addr_t paddr, uint8_t pool_id, + uint32_t desc_id, uint8_t type) +{ + /* Set buffer_addr_info.buffer_addr_31_0 */ + HAL_SET_FLD(desc, UNIFIED_TCL_DATA_CMD_0, BUFFER_ADDR_INFO_BUF_ADDR_INFO) = + HAL_TX_SM(UNIFIED_BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0, paddr); + + /* Set buffer_addr_info.buffer_addr_39_32 */ + HAL_SET_FLD(desc, UNIFIED_TCL_DATA_CMD_1, + BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= + HAL_TX_SM(UNIFIED_BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32, + (((uint64_t) paddr) >> 32)); + + /* Set buffer_addr_info.return_buffer_manager = pool id */ + HAL_SET_FLD(desc, UNIFIED_TCL_DATA_CMD_1, + BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= + HAL_TX_SM(UNIFIED_BUFFER_ADDR_INFO_1, + RETURN_BUFFER_MANAGER, (pool_id + HAL_WBM_SW0_BM_ID)); + + /* Set buffer_addr_info.sw_buffer_cookie = desc_id */ + HAL_SET_FLD(desc, UNIFIED_TCL_DATA_CMD_1, + BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= + HAL_TX_SM(UNIFIED_BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE, desc_id); + + /* Set Buffer or Ext Descriptor Type */ + HAL_SET_FLD(desc, UNIFIED_TCL_DATA_CMD_2, + BUF_OR_EXT_DESC_TYPE) |= + HAL_TX_SM(UNIFIED_TCL_DATA_CMD_2, BUF_OR_EXT_DESC_TYPE, type); +} + +#if defined(QCA_WIFI_QCA6290_11AX_MU_UL) && defined(QCA_WIFI_QCA6290_11AX) +/** + * hal_rx_handle_other_tlvs() - handle special TLVs like MU_UL + * tlv_tag: Taf of the TLVs + * rx_tlv: the pointer to the TLVs + * @ppdu_info: pointer to ppdu_info + * + * Return: true if the tlv is handled, false if not + */ +static inline bool +hal_rx_handle_other_tlvs(uint32_t tlv_tag, void *rx_tlv, + struct hal_rx_ppdu_info *ppdu_info) +{ + uint32_t value; + + switch (tlv_tag) { + case WIFIPHYRX_HE_SIG_A_MU_UL_E: + { + uint8_t *he_sig_a_mu_ul_info = + (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HE_SIG_A_MU_UL_0, + HE_SIG_A_MU_UL_INFO_PHYRX_HE_SIG_A_MU_UL_INFO_DETAILS); + ppdu_info->rx_status.he_flags = 1; + + value = HAL_RX_GET(he_sig_a_mu_ul_info, HE_SIG_A_MU_UL_INFO_0, + FORMAT_INDICATION); + if (value == 0) { + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_TRIG_FORMAT_TYPE; + } else { + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_SU_FORMAT_TYPE; + } + + /* data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_BSS_COLOR_KNOWN | + QDF_MON_STATUS_HE_DL_UL_KNOWN | + QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN; + + /* data2 */ + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_TXOP_KNOWN; + + /*data3*/ + value = HAL_RX_GET(he_sig_a_mu_ul_info, + HE_SIG_A_MU_UL_INFO_0, BSS_COLOR_ID); + ppdu_info->rx_status.he_data3 = value; + /* 1 for UL and 0 for DL */ + value = 1; + value = value << QDF_MON_STATUS_DL_UL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /*data4*/ + value = HAL_RX_GET(he_sig_a_mu_ul_info, HE_SIG_A_MU_UL_INFO_0, + SPATIAL_REUSE); + ppdu_info->rx_status.he_data4 = value; + + /*data5*/ + value = HAL_RX_GET(he_sig_a_mu_ul_info, + HE_SIG_A_MU_UL_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_data5 = value; + ppdu_info->rx_status.bw = value; + + /*data6*/ + value = HAL_RX_GET(he_sig_a_mu_ul_info, HE_SIG_A_MU_UL_INFO_1, + TXOP_DURATION); + value = value << QDF_MON_STATUS_TXOP_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + return true; + } + default: + return false; + } +} +#else +static inline bool +hal_rx_handle_other_tlvs(uint32_t tlv_tag, void *rx_tlv, + struct hal_rx_ppdu_info *ppdu_info) +{ + return false; +} +#endif /* QCA_WIFI_QCA6290_11AX_MU_UL && QCA_WIFI_QCA6290_11AX */ + +#if defined(RX_PPDU_END_USER_STATS_1_OFDMA_INFO_VALID_OFFSET) && \ +defined(RX_PPDU_END_USER_STATS_22_SW_RESPONSE_REFERENCE_PTR_EXT_OFFSET) + +static inline void +hal_rx_handle_mu_ul_info( + void *rx_tlv, + struct mon_rx_user_status *mon_rx_user_status) +{ + mon_rx_user_status->mu_ul_user_v0_word0 = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_11, + SW_RESPONSE_REFERENCE_PTR); + + mon_rx_user_status->mu_ul_user_v0_word1 = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_22, + SW_RESPONSE_REFERENCE_PTR_EXT); +} + +static inline void +hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, + struct mon_rx_user_status *mon_rx_user_status) +{ + uint32_t mpdu_ok_byte_count; + uint32_t mpdu_err_byte_count; + + mpdu_ok_byte_count = HAL_RX_GET(rx_tlv, + RX_PPDU_END_USER_STATS_17, + MPDU_OK_BYTE_COUNT); + mpdu_err_byte_count = HAL_RX_GET(rx_tlv, + RX_PPDU_END_USER_STATS_19, + MPDU_ERR_BYTE_COUNT); + + mon_rx_user_status->mpdu_ok_byte_count = mpdu_ok_byte_count; + mon_rx_user_status->mpdu_err_byte_count = mpdu_err_byte_count; +} +#else +static inline void +hal_rx_handle_mu_ul_info(void *rx_tlv, + struct mon_rx_user_status *mon_rx_user_status) +{ +} + +static inline void +hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, + struct mon_rx_user_status *mon_rx_user_status) +{ + struct hal_rx_ppdu_info *ppdu_info = + (struct hal_rx_ppdu_info *)ppduinfo; + + /* HKV1: doesn't support mpdu byte count */ + mon_rx_user_status->mpdu_ok_byte_count = ppdu_info->rx_status.ppdu_len; + mon_rx_user_status->mpdu_err_byte_count = 0; +} +#endif + +static inline void +hal_rx_populate_mu_user_info(void *rx_tlv, void *ppduinfo, + struct mon_rx_user_status *mon_rx_user_status) +{ + struct hal_rx_ppdu_info *ppdu_info = + (struct hal_rx_ppdu_info *)ppduinfo; + + mon_rx_user_status->ast_index = ppdu_info->rx_status.ast_index; + mon_rx_user_status->tid = ppdu_info->rx_status.tid; + mon_rx_user_status->tcp_msdu_count = + ppdu_info->rx_status.tcp_msdu_count; + mon_rx_user_status->udp_msdu_count = + ppdu_info->rx_status.udp_msdu_count; + mon_rx_user_status->other_msdu_count = + ppdu_info->rx_status.other_msdu_count; + mon_rx_user_status->frame_control = ppdu_info->rx_status.frame_control; + mon_rx_user_status->frame_control_info_valid = + ppdu_info->rx_status.frame_control_info_valid; + mon_rx_user_status->data_sequence_control_info_valid = + ppdu_info->rx_status.data_sequence_control_info_valid; + mon_rx_user_status->first_data_seq_ctrl = + ppdu_info->rx_status.first_data_seq_ctrl; + mon_rx_user_status->preamble_type = ppdu_info->rx_status.preamble_type; + mon_rx_user_status->ht_flags = ppdu_info->rx_status.ht_flags; + mon_rx_user_status->rtap_flags = ppdu_info->rx_status.rtap_flags; + mon_rx_user_status->vht_flags = ppdu_info->rx_status.vht_flags; + mon_rx_user_status->he_flags = ppdu_info->rx_status.he_flags; + mon_rx_user_status->rs_flags = ppdu_info->rx_status.rs_flags; + + mon_rx_user_status->mpdu_cnt_fcs_ok = + ppdu_info->com_info.mpdu_cnt_fcs_ok; + mon_rx_user_status->mpdu_cnt_fcs_err = + ppdu_info->com_info.mpdu_cnt_fcs_err; + qdf_mem_copy(&mon_rx_user_status->mpdu_fcs_ok_bitmap, + &ppdu_info->com_info.mpdu_fcs_ok_bitmap, + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(ppdu_info->com_info.mpdu_fcs_ok_bitmap[0])); + + hal_rx_populate_byte_count(rx_tlv, ppdu_info, mon_rx_user_status); +} + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +static inline void +hal_rx_populate_tx_capture_user_info(void *ppduinfo, + uint32_t user_id) +{ + struct hal_rx_ppdu_info *ppdu_info; + struct mon_rx_info *mon_rx_info; + struct mon_rx_user_info *mon_rx_user_info; + + ppdu_info = (struct hal_rx_ppdu_info *)ppduinfo; + mon_rx_info = &ppdu_info->rx_info; + mon_rx_user_info = &ppdu_info->rx_user_info[user_id]; + mon_rx_user_info->qos_control_info_valid = + mon_rx_info->qos_control_info_valid; + mon_rx_user_info->qos_control = mon_rx_info->qos_control; +} +#else +static inline void +hal_rx_populate_tx_capture_user_info(void *ppduinfo, + uint32_t user_id) +{ +} +#endif + +#define HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(chain, word_1, word_2, \ + ppdu_info, rssi_info_tlv) \ + { \ + ppdu_info->rx_status.rssi_chain[chain][0] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_1,\ + RSSI_PRI20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][1] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_1,\ + RSSI_EXT20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][2] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_1,\ + RSSI_EXT40_LOW20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][3] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_1,\ + RSSI_EXT40_HIGH20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][4] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_2,\ + RSSI_EXT80_LOW20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][5] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_2,\ + RSSI_EXT80_LOW_HIGH20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][6] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_2,\ + RSSI_EXT80_HIGH_LOW20_CHAIN##chain); \ + ppdu_info->rx_status.rssi_chain[chain][7] = \ + HAL_RX_GET(rssi_info_tlv, RECEIVE_RSSI_INFO_##word_2,\ + RSSI_EXT80_HIGH20_CHAIN##chain); \ + } \ + +#define HAL_RX_PPDU_UPDATE_RSSI(ppdu_info, rssi_info_tlv) \ + {HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(0, 0, 1, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(1, 2, 3, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(2, 4, 5, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(3, 6, 7, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(4, 8, 9, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(5, 10, 11, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(6, 12, 13, ppdu_info, rssi_info_tlv) \ + HAL_RX_UPDATE_RSSI_PER_CHAIN_BW(7, 14, 15, ppdu_info, rssi_info_tlv)} \ + +static inline uint32_t +hal_rx_update_rssi_chain(struct hal_rx_ppdu_info *ppdu_info, + uint8_t *rssi_info_tlv) +{ + HAL_RX_PPDU_UPDATE_RSSI(ppdu_info, rssi_info_tlv) + return 0; +} + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +static inline void +hal_get_qos_control(void *rx_tlv, + struct hal_rx_ppdu_info *ppdu_info) +{ + ppdu_info->rx_info.qos_control_info_valid = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + QOS_CONTROL_INFO_VALID); + + if (ppdu_info->rx_info.qos_control_info_valid) + ppdu_info->rx_info.qos_control = + HAL_RX_GET(rx_tlv, + RX_PPDU_END_USER_STATS_5, + QOS_CONTROL_FIELD); +} + +static inline void +hal_get_mac_addr1(uint8_t *rx_mpdu_start, + struct hal_rx_ppdu_info *ppdu_info) +{ + if (ppdu_info->sw_frame_group_id + == HAL_MPDU_SW_FRAME_GROUP_MGMT_PROBE_REQ) { + ppdu_info->rx_info.mac_addr1_valid = + HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start); + + *(uint32_t *)&ppdu_info->rx_info.mac_addr1[0] = + HAL_RX_GET(rx_mpdu_start, + RX_MPDU_INFO_15, + MAC_ADDR_AD1_31_0); + } +} +#else +static inline void +hal_get_qos_control(void *rx_tlv, + struct hal_rx_ppdu_info *ppdu_info) +{ +} + +static inline void +hal_get_mac_addr1(uint8_t *rx_mpdu_start, + struct hal_rx_ppdu_info *ppdu_info) +{ +} +#endif +/** + * hal_rx_status_get_tlv_info() - process receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: HAL_TLV_STATUS_PPDU_NOT_DONE or HAL_TLV_STATUS_PPDU_DONE from tlv + */ +static inline uint32_t +hal_rx_status_get_tlv_info_generic(void *rx_tlv_hdr, void *ppduinfo, + hal_soc_handle_t hal_soc_hdl, + qdf_nbuf_t nbuf) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; + uint32_t tlv_tag, user_id, tlv_len, value; + uint8_t group_id = 0; + uint8_t he_dcm = 0; + uint8_t he_stbc = 0; + uint16_t he_gi = 0; + uint16_t he_ltf = 0; + void *rx_tlv; + bool unhandled = false; + struct mon_rx_user_status *mon_rx_user_status; + struct hal_rx_ppdu_info *ppdu_info = + (struct hal_rx_ppdu_info *)ppduinfo; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + user_id = HAL_RX_GET_USER_TLV32_USERID(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + + rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + + qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + rx_tlv, tlv_len); + + switch (tlv_tag) { + + case WIFIRX_PPDU_START_E: + { + struct hal_rx_ppdu_common_info *com_info = &ppdu_info->com_info; + + ppdu_info->com_info.ppdu_id = + HAL_RX_GET(rx_tlv, RX_PPDU_START_0, + PHY_PPDU_ID); + /* channel number is set in PHY meta data */ + ppdu_info->rx_status.chan_num = + (HAL_RX_GET(rx_tlv, RX_PPDU_START_1, + SW_PHY_META_DATA) & 0x0000FFFF); + ppdu_info->rx_status.chan_freq = + (HAL_RX_GET(rx_tlv, RX_PPDU_START_1, + SW_PHY_META_DATA) & 0xFFFF0000)>>16; + ppdu_info->com_info.ppdu_timestamp = + HAL_RX_GET(rx_tlv, RX_PPDU_START_2, + PPDU_START_TIMESTAMP); + ppdu_info->rx_status.ppdu_timestamp = + ppdu_info->com_info.ppdu_timestamp; + ppdu_info->rx_state = HAL_RX_MON_PPDU_START; + + /* If last ppdu_id doesn't match new ppdu_id, + * 1. reset mpdu_cnt + * 2. update last_ppdu_id with new + * 3. reset mpdu fcs bitmap + */ + if (com_info->ppdu_id != com_info->last_ppdu_id) { + com_info->mpdu_cnt = 0; + com_info->last_ppdu_id = + com_info->ppdu_id; + com_info->num_users = 0; + qdf_mem_zero(&com_info->mpdu_fcs_ok_bitmap, + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(com_info->mpdu_fcs_ok_bitmap[0])); + } + break; + } + + case WIFIRX_PPDU_START_USER_INFO_E: + break; + + case WIFIRX_PPDU_END_E: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] ppdu_end_e len=%d", + __func__, __LINE__, tlv_len); + /* This is followed by sub-TLVs of PPDU_END */ + ppdu_info->rx_state = HAL_RX_MON_PPDU_END; + break; + + case WIFIPHYRX_PKT_END_E: + hal_rx_get_rtt_info(hal_soc_hdl, rx_tlv, ppdu_info); + break; + + case WIFIRXPCU_PPDU_END_INFO_E: + ppdu_info->rx_status.rx_antenna = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_2, RX_ANTENNA); + ppdu_info->rx_status.tsft = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_1, + WB_TIMESTAMP_UPPER_32); + ppdu_info->rx_status.tsft = (ppdu_info->rx_status.tsft << 32) | + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_0, + WB_TIMESTAMP_LOWER_32); + ppdu_info->rx_status.duration = + HAL_RX_GET(rx_tlv, UNIFIED_RXPCU_PPDU_END_INFO_8, + RX_PPDU_DURATION); + hal_rx_get_bb_info(hal_soc_hdl, rx_tlv, ppdu_info); + break; + + /* + * WIFIRX_PPDU_END_USER_STATS_E comes for each user received. + * for MU, based on num users we see this tlv that many times. + */ + case WIFIRX_PPDU_END_USER_STATS_E: + { + unsigned long tid = 0; + uint16_t seq = 0; + + ppdu_info->rx_status.ast_index = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_4, + AST_INDEX); + + tid = HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_12, + RECEIVED_QOS_DATA_TID_BITMAP); + ppdu_info->rx_status.tid = qdf_find_first_bit(&tid, sizeof(tid)*8); + + if (ppdu_info->rx_status.tid == (sizeof(tid) * 8)) + ppdu_info->rx_status.tid = HAL_TID_INVALID; + + ppdu_info->rx_status.tcp_msdu_count = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_9, + TCP_MSDU_COUNT) + + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_10, + TCP_ACK_MSDU_COUNT); + ppdu_info->rx_status.udp_msdu_count = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_9, + UDP_MSDU_COUNT); + ppdu_info->rx_status.other_msdu_count = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_10, + OTHER_MSDU_COUNT); + + if (ppdu_info->sw_frame_group_id + != HAL_MPDU_SW_FRAME_GROUP_NULL_DATA) { + ppdu_info->rx_status.frame_control_info_valid = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + FRAME_CONTROL_INFO_VALID); + + if (ppdu_info->rx_status.frame_control_info_valid) + ppdu_info->rx_status.frame_control = + HAL_RX_GET(rx_tlv, + RX_PPDU_END_USER_STATS_4, + FRAME_CONTROL_FIELD); + + hal_get_qos_control(rx_tlv, ppdu_info); + } + + ppdu_info->rx_status.data_sequence_control_info_valid = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + DATA_SEQUENCE_CONTROL_INFO_VALID); + + seq = HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_5, + FIRST_DATA_SEQ_CTRL); + if (ppdu_info->rx_status.data_sequence_control_info_valid) + ppdu_info->rx_status.first_data_seq_ctrl = seq; + + ppdu_info->rx_status.preamble_type = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + HT_CONTROL_FIELD_PKT_TYPE); + switch (ppdu_info->rx_status.preamble_type) { + case HAL_RX_PKT_TYPE_11N: + ppdu_info->rx_status.ht_flags = 1; + ppdu_info->rx_status.rtap_flags |= HT_SGI_PRESENT; + break; + case HAL_RX_PKT_TYPE_11AC: + ppdu_info->rx_status.vht_flags = 1; + break; + case HAL_RX_PKT_TYPE_11AX: + ppdu_info->rx_status.he_flags = 1; + break; + default: + break; + } + + ppdu_info->com_info.mpdu_cnt_fcs_ok = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + MPDU_CNT_FCS_OK); + ppdu_info->com_info.mpdu_cnt_fcs_err = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_2, + MPDU_CNT_FCS_ERR); + if ((ppdu_info->com_info.mpdu_cnt_fcs_ok | + ppdu_info->com_info.mpdu_cnt_fcs_err) > 1) + ppdu_info->rx_status.rs_flags |= IEEE80211_AMPDU_FLAG; + else + ppdu_info->rx_status.rs_flags &= + (~IEEE80211_AMPDU_FLAG); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[0] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_7, + FCS_OK_BITMAP_31_0); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[1] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_8, + FCS_OK_BITMAP_63_32); + + if (user_id < HAL_MAX_UL_MU_USERS) { + mon_rx_user_status = + &ppdu_info->rx_user_status[user_id]; + + hal_rx_handle_mu_ul_info(rx_tlv, mon_rx_user_status); + + ppdu_info->com_info.num_users++; + + hal_rx_populate_mu_user_info(rx_tlv, ppdu_info, + mon_rx_user_status); + + hal_rx_populate_tx_capture_user_info(ppdu_info, + user_id); + + } + break; + } + + case WIFIRX_PPDU_END_USER_STATS_EXT_E: + ppdu_info->com_info.mpdu_fcs_ok_bitmap[2] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_EXT_1, + FCS_OK_BITMAP_95_64); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[3] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_EXT_2, + FCS_OK_BITMAP_127_96); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[4] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_EXT_3, + FCS_OK_BITMAP_159_128); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[5] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_EXT_4, + FCS_OK_BITMAP_191_160); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[6] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_EXT_5, + FCS_OK_BITMAP_223_192); + + ppdu_info->com_info.mpdu_fcs_ok_bitmap[7] = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_EXT_6, + FCS_OK_BITMAP_255_224); + break; + + case WIFIRX_PPDU_END_STATUS_DONE_E: + return HAL_TLV_STATUS_PPDU_DONE; + + case WIFIDUMMY_E: + return HAL_TLV_STATUS_BUF_DONE; + + case WIFIPHYRX_HT_SIG_E: + { + uint8_t *ht_sig_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_HT_SIG_0, + HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS); + value = HAL_RX_GET(ht_sig_info, HT_SIG_INFO_1, + FEC_CODING); + ppdu_info->rx_status.ldpc = (value == HAL_SU_MU_CODING_LDPC) ? + 1 : 0; + ppdu_info->rx_status.mcs = HAL_RX_GET(ht_sig_info, + HT_SIG_INFO_0, MCS); + ppdu_info->rx_status.ht_mcs = ppdu_info->rx_status.mcs; + ppdu_info->rx_status.bw = HAL_RX_GET(ht_sig_info, + HT_SIG_INFO_0, CBW); + ppdu_info->rx_status.sgi = HAL_RX_GET(ht_sig_info, + HT_SIG_INFO_1, SHORT_GI); + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_SU; + ppdu_info->rx_status.nss = ((ppdu_info->rx_status.mcs) >> + HT_SIG_SU_NSS_SHIFT) + 1; + ppdu_info->rx_status.mcs &= ((1 << HT_SIG_SU_NSS_SHIFT) - 1); + break; + } + + case WIFIPHYRX_L_SIG_B_E: + { + uint8_t *l_sig_b_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_L_SIG_B_0, + L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS); + + value = HAL_RX_GET(l_sig_b_info, L_SIG_B_INFO_0, RATE); + ppdu_info->rx_status.l_sig_b_info = *((uint32_t *)l_sig_b_info); + switch (value) { + case 1: + ppdu_info->rx_status.rate = HAL_11B_RATE_3MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS3; + break; + case 2: + ppdu_info->rx_status.rate = HAL_11B_RATE_2MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS2; + break; + case 3: + ppdu_info->rx_status.rate = HAL_11B_RATE_1MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS1; + break; + case 4: + ppdu_info->rx_status.rate = HAL_11B_RATE_0MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS0; + break; + case 5: + ppdu_info->rx_status.rate = HAL_11B_RATE_6MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS6; + break; + case 6: + ppdu_info->rx_status.rate = HAL_11B_RATE_5MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS5; + break; + case 7: + ppdu_info->rx_status.rate = HAL_11B_RATE_4MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS4; + break; + default: + break; + } + ppdu_info->rx_status.cck_flag = 1; + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_SU; + break; + } + + case WIFIPHYRX_L_SIG_A_E: + { + uint8_t *l_sig_a_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_L_SIG_A_0, + L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS); + + value = HAL_RX_GET(l_sig_a_info, L_SIG_A_INFO_0, RATE); + ppdu_info->rx_status.l_sig_a_info = *((uint32_t *)l_sig_a_info); + switch (value) { + case 8: + ppdu_info->rx_status.rate = HAL_11A_RATE_0MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS0; + break; + case 9: + ppdu_info->rx_status.rate = HAL_11A_RATE_1MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS1; + break; + case 10: + ppdu_info->rx_status.rate = HAL_11A_RATE_2MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS2; + break; + case 11: + ppdu_info->rx_status.rate = HAL_11A_RATE_3MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS3; + break; + case 12: + ppdu_info->rx_status.rate = HAL_11A_RATE_4MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS4; + break; + case 13: + ppdu_info->rx_status.rate = HAL_11A_RATE_5MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS5; + break; + case 14: + ppdu_info->rx_status.rate = HAL_11A_RATE_6MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS6; + break; + case 15: + ppdu_info->rx_status.rate = HAL_11A_RATE_7MCS; + ppdu_info->rx_status.mcs = HAL_LEGACY_MCS7; + break; + default: + break; + } + ppdu_info->rx_status.ofdm_flag = 1; + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_SU; + break; + } + + case WIFIPHYRX_VHT_SIG_A_E: + { + uint8_t *vht_sig_a_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_VHT_SIG_A_0, + VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS); + + value = HAL_RX_GET(vht_sig_a_info, VHT_SIG_A_INFO_1, + SU_MU_CODING); + ppdu_info->rx_status.ldpc = (value == HAL_SU_MU_CODING_LDPC) ? + 1 : 0; + group_id = HAL_RX_GET(vht_sig_a_info, VHT_SIG_A_INFO_0, GROUP_ID); + ppdu_info->rx_status.vht_flag_values5 = group_id; + ppdu_info->rx_status.mcs = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, MCS); + ppdu_info->rx_status.sgi = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, GI_SETTING); + + switch (hal->target_type) { + case TARGET_TYPE_QCA8074: + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6018: + case TARGET_TYPE_QCN9000: +#ifdef QCA_WIFI_QCA6390 + case TARGET_TYPE_QCA6390: +#endif + ppdu_info->rx_status.is_stbc = + HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, STBC); + value = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, N_STS); + value = value & VHT_SIG_SU_NSS_MASK; + if (ppdu_info->rx_status.is_stbc && (value > 0)) + value = ((value + 1) >> 1) - 1; + ppdu_info->rx_status.nss = + ((value & VHT_SIG_SU_NSS_MASK) + 1); + + break; + case TARGET_TYPE_QCA6290: +#if !defined(QCA_WIFI_QCA6290_11AX) + ppdu_info->rx_status.is_stbc = + HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, STBC); + value = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, N_STS); + value = value & VHT_SIG_SU_NSS_MASK; + if (ppdu_info->rx_status.is_stbc && (value > 0)) + value = ((value + 1) >> 1) - 1; + ppdu_info->rx_status.nss = + ((value & VHT_SIG_SU_NSS_MASK) + 1); +#else + ppdu_info->rx_status.nss = 0; +#endif + break; + case TARGET_TYPE_QCA6490: + case TARGET_TYPE_QCA6750: + ppdu_info->rx_status.nss = 0; + break; + default: + break; + } + ppdu_info->rx_status.vht_flag_values3[0] = + (((ppdu_info->rx_status.mcs) << 4) + | ppdu_info->rx_status.nss); + ppdu_info->rx_status.bw = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, BANDWIDTH); + ppdu_info->rx_status.vht_flag_values2 = + ppdu_info->rx_status.bw; + ppdu_info->rx_status.vht_flag_values4 = + HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, SU_MU_CODING); + + ppdu_info->rx_status.beamformed = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, BEAMFORMED); + if (group_id == 0 || group_id == 63) + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_SU; + else + ppdu_info->rx_status.reception_type = + HAL_RX_TYPE_MU_MIMO; + + break; + } + case WIFIPHYRX_HE_SIG_A_SU_E: + { + uint8_t *he_sig_a_su_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_HE_SIG_A_SU_0, + HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS); + ppdu_info->rx_status.he_flags = 1; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, + FORMAT_INDICATION); + if (value == 0) { + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_TRIG_FORMAT_TYPE; + } else { + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_SU_FORMAT_TYPE; + } + + /* data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_BSS_COLOR_KNOWN | + QDF_MON_STATUS_HE_BEAM_CHANGE_KNOWN | + QDF_MON_STATUS_HE_DL_UL_KNOWN | + QDF_MON_STATUS_HE_MCS_KNOWN | + QDF_MON_STATUS_HE_DCM_KNOWN | + QDF_MON_STATUS_HE_CODING_KNOWN | + QDF_MON_STATUS_HE_LDPC_EXTRA_SYMBOL_KNOWN | + QDF_MON_STATUS_HE_STBC_KNOWN | + QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN | + QDF_MON_STATUS_HE_DOPPLER_KNOWN; + + /* data2 */ + ppdu_info->rx_status.he_data2 = + QDF_MON_STATUS_HE_GI_KNOWN; + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_TXBF_KNOWN | + QDF_MON_STATUS_PE_DISAMBIGUITY_KNOWN | + QDF_MON_STATUS_TXOP_KNOWN | + QDF_MON_STATUS_LTF_SYMBOLS_KNOWN | + QDF_MON_STATUS_PRE_FEC_PADDING_KNOWN | + QDF_MON_STATUS_MIDABLE_PERIODICITY_KNOWN; + + /* data3 */ + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, BSS_COLOR_ID); + ppdu_info->rx_status.he_data3 = value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, BEAM_CHANGE); + value = value << QDF_MON_STATUS_BEAM_CHANGE_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, DL_UL_FLAG); + value = value << QDF_MON_STATUS_DL_UL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, TRANSMIT_MCS); + ppdu_info->rx_status.mcs = value; + value = value << QDF_MON_STATUS_TRANSMIT_MCS_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, DCM); + he_dcm = value; + value = value << QDF_MON_STATUS_DCM_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, CODING); + ppdu_info->rx_status.ldpc = (value == HAL_SU_MU_CODING_LDPC) ? + 1 : 0; + value = value << QDF_MON_STATUS_CODING_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, + LDPC_EXTRA_SYMBOL); + value = value << QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, STBC); + he_stbc = value; + value = value << QDF_MON_STATUS_STBC_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /* data4 */ + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, + SPATIAL_REUSE); + ppdu_info->rx_status.he_data4 = value; + + /* data5 */ + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_data5 = value; + ppdu_info->rx_status.bw = value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_1_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + if (he_dcm && he_stbc) { + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + } else { + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + } + break; + } + ppdu_info->rx_status.sgi = he_gi; + value = he_gi << QDF_MON_STATUS_GI_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + value = he_ltf << QDF_MON_STATUS_HE_LTF_SIZE_SHIFT; + ppdu_info->rx_status.ltf_size = he_ltf; + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, NSTS); + value = (value << QDF_MON_STATUS_HE_LTF_SYM_SHIFT); + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + PACKET_EXTENSION_A_FACTOR); + value = value << QDF_MON_STATUS_PRE_FEC_PAD_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, TXBF); + value = value << QDF_MON_STATUS_TXBF_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + PACKET_EXTENSION_PE_DISAMBIGUITY); + value = value << QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + /* data6 */ + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, NSTS); + value++; + ppdu_info->rx_status.nss = value; + ppdu_info->rx_status.he_data6 = value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + DOPPLER_INDICATION); + value = value << QDF_MON_STATUS_DOPPLER_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + TXOP_DURATION); + value = value << QDF_MON_STATUS_TXOP_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + + ppdu_info->rx_status.beamformed = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, TXBF); + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_SU; + break; + } + case WIFIPHYRX_HE_SIG_A_MU_DL_E: + { + uint8_t *he_sig_a_mu_dl_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_HE_SIG_A_MU_DL_0, + HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS); + + ppdu_info->rx_status.he_mu_flags = 1; + + /* HE Flags */ + /*data1*/ + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_MU_FORMAT_TYPE; + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_BSS_COLOR_KNOWN | + QDF_MON_STATUS_HE_DL_UL_KNOWN | + QDF_MON_STATUS_HE_LDPC_EXTRA_SYMBOL_KNOWN | + QDF_MON_STATUS_HE_STBC_KNOWN | + QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN | + QDF_MON_STATUS_HE_DOPPLER_KNOWN; + + /* data2 */ + ppdu_info->rx_status.he_data2 = + QDF_MON_STATUS_HE_GI_KNOWN; + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_LTF_SYMBOLS_KNOWN | + QDF_MON_STATUS_PRE_FEC_PADDING_KNOWN | + QDF_MON_STATUS_PE_DISAMBIGUITY_KNOWN | + QDF_MON_STATUS_TXOP_KNOWN | + QDF_MON_STATUS_MIDABLE_PERIODICITY_KNOWN; + + /*data3*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, BSS_COLOR_ID); + ppdu_info->rx_status.he_data3 = value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, DL_UL_FLAG); + value = value << QDF_MON_STATUS_DL_UL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_1, + LDPC_EXTRA_SYMBOL); + value = value << QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_1, STBC); + he_stbc = value; + value = value << QDF_MON_STATUS_STBC_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /*data4*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_0, + SPATIAL_REUSE); + ppdu_info->rx_status.he_data4 = value; + + /*data5*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_data5 = value; + ppdu_info->rx_status.bw = value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + break; + } + ppdu_info->rx_status.sgi = he_gi; + value = he_gi << QDF_MON_STATUS_GI_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + value = he_ltf << QDF_MON_STATUS_HE_LTF_SIZE_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_1, NUM_LTF_SYMBOLS); + value = (value << QDF_MON_STATUS_HE_LTF_SYM_SHIFT); + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_1, + PACKET_EXTENSION_A_FACTOR); + value = value << QDF_MON_STATUS_PRE_FEC_PAD_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_1, + PACKET_EXTENSION_PE_DISAMBIGUITY); + value = value << QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + /*data6*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_0, + DOPPLER_INDICATION); + value = value << QDF_MON_STATUS_DOPPLER_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_1, + TXOP_DURATION); + value = value << QDF_MON_STATUS_TXOP_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + + /* HE-MU Flags */ + /* HE-MU-flags1 */ + ppdu_info->rx_status.he_flags1 = + QDF_MON_STATUS_SIG_B_MCS_KNOWN | + QDF_MON_STATUS_SIG_B_DCM_KNOWN | + QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_1_KNOWN | + QDF_MON_STATUS_SIG_B_SYM_NUM_KNOWN | + QDF_MON_STATUS_RU_0_KNOWN; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, MCS_OF_SIG_B); + ppdu_info->rx_status.he_flags1 |= value; + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, DCM_OF_SIG_B); + value = value << QDF_MON_STATUS_DCM_FLAG_1_SHIFT; + ppdu_info->rx_status.he_flags1 |= value; + + /* HE-MU-flags2 */ + ppdu_info->rx_status.he_flags2 = + QDF_MON_STATUS_BW_KNOWN; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_flags2 |= value; + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, COMP_MODE_SIG_B); + value = value << QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT; + ppdu_info->rx_status.he_flags2 |= value; + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, NUM_SIG_B_SYMBOLS); + value = value - 1; + value = value << QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT; + ppdu_info->rx_status.he_flags2 |= value; + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_MU_MIMO; + break; + } + case WIFIPHYRX_HE_SIG_B1_MU_E: + { + + uint8_t *he_sig_b1_mu_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_HE_SIG_B1_MU_0, + HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS); + + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0; + /* TODO: Check on the availability of other fields in + * sig_b_common + */ + + value = HAL_RX_GET(he_sig_b1_mu_info, + HE_SIG_B1_MU_INFO_0, RU_ALLOCATION); + ppdu_info->rx_status.he_RU[0] = value; + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_MU_MIMO; + break; + } + case WIFIPHYRX_HE_SIG_B2_MU_E: + { + uint8_t *he_sig_b2_mu_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_HE_SIG_B2_MU_0, + HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS); + /* + * Not all "HE" fields can be updated from + * WIFIPHYRX_HE_SIG_A_MU_DL_E TLV. Use WIFIPHYRX_HE_SIG_B2_MU_E + * to populate rest of the "HE" fields for MU scenarios. + */ + + /* HE-data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_MCS_KNOWN | + QDF_MON_STATUS_HE_CODING_KNOWN; + + /* HE-data2 */ + + /* HE-data3 */ + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, STA_MCS); + ppdu_info->rx_status.mcs = value; + value = value << QDF_MON_STATUS_TRANSMIT_MCS_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, STA_CODING); + value = value << QDF_MON_STATUS_CODING_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /* HE-data4 */ + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, STA_ID); + value = value << QDF_MON_STATUS_STA_ID_SHIFT; + ppdu_info->rx_status.he_data4 |= value; + + /* HE-data5 */ + + /* HE-data6 */ + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, NSTS); + /* value n indicates n+1 spatial streams */ + value++; + ppdu_info->rx_status.nss = value; + ppdu_info->rx_status.he_data6 |= value; + + break; + + } + case WIFIPHYRX_HE_SIG_B2_OFDMA_E: + { + uint8_t *he_sig_b2_ofdma_info = + (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0, + HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS); + + /* + * Not all "HE" fields can be updated from + * WIFIPHYRX_HE_SIG_A_MU_DL_E TLV. Use WIFIPHYRX_HE_SIG_B2_MU_E + * to populate rest of "HE" fields for MU OFDMA scenarios. + */ + + /* HE-data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_MCS_KNOWN | + QDF_MON_STATUS_HE_DCM_KNOWN | + QDF_MON_STATUS_HE_CODING_KNOWN; + + /* HE-data2 */ + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_TXBF_KNOWN; + + /* HE-data3 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_MCS); + ppdu_info->rx_status.mcs = value; + value = value << QDF_MON_STATUS_TRANSMIT_MCS_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_DCM); + he_dcm = value; + value = value << QDF_MON_STATUS_DCM_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_CODING); + value = value << QDF_MON_STATUS_CODING_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /* HE-data4 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_ID); + value = value << QDF_MON_STATUS_STA_ID_SHIFT; + ppdu_info->rx_status.he_data4 |= value; + + /* HE-data5 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, TXBF); + value = value << QDF_MON_STATUS_TXBF_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + /* HE-data6 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, NSTS); + /* value n indicates n+1 spatial streams */ + value++; + ppdu_info->rx_status.nss = value; + ppdu_info->rx_status.he_data6 |= value; + ppdu_info->rx_status.reception_type = HAL_RX_TYPE_MU_OFDMA; + break; + } + case WIFIPHYRX_RSSI_LEGACY_E: + { + uint8_t reception_type; + int8_t rssi_value; + uint8_t *rssi_info_tlv = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(UNIFIED_PHYRX_RSSI_LEGACY_19, + RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS); + + ppdu_info->rx_status.rssi_comb = HAL_RX_GET(rx_tlv, + PHYRX_RSSI_LEGACY_35, RSSI_COMB); + ppdu_info->rx_status.bw = hal->ops->hal_rx_get_tlv(rx_tlv); + ppdu_info->rx_status.he_re = 0; + + reception_type = HAL_RX_GET(rx_tlv, + PHYRX_RSSI_LEGACY_0, + RECEPTION_TYPE); + switch (reception_type) { + case QDF_RECEPTION_TYPE_ULOFMDA: + ppdu_info->rx_status.reception_type = + HAL_RX_TYPE_MU_OFDMA; + ppdu_info->rx_status.ulofdma_flag = 1; + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_TRIG_FORMAT_TYPE; + break; + case QDF_RECEPTION_TYPE_ULMIMO: + ppdu_info->rx_status.reception_type = + HAL_RX_TYPE_MU_MIMO; + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_MU_FORMAT_TYPE; + break; + default: + ppdu_info->rx_status.reception_type = + HAL_RX_TYPE_SU; + break; + } + hal_rx_update_rssi_chain(ppdu_info, rssi_info_tlv); + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_0, RSSI_PRI20_CHAIN0); + ppdu_info->rx_status.rssi[0] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN0: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_2, RSSI_PRI20_CHAIN1); + ppdu_info->rx_status.rssi[1] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN1: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_4, RSSI_PRI20_CHAIN2); + ppdu_info->rx_status.rssi[2] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN2: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_6, RSSI_PRI20_CHAIN3); + ppdu_info->rx_status.rssi[3] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN3: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_8, RSSI_PRI20_CHAIN4); + ppdu_info->rx_status.rssi[4] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN4: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_10, + RSSI_PRI20_CHAIN5); + ppdu_info->rx_status.rssi[5] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN5: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_12, + RSSI_PRI20_CHAIN6); + ppdu_info->rx_status.rssi[6] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN6: %d\n", rssi_value); + + rssi_value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_14, + RSSI_PRI20_CHAIN7); + ppdu_info->rx_status.rssi[7] = rssi_value; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN7: %d\n", rssi_value); + break; + } + case WIFIPHYRX_OTHER_RECEIVE_INFO_E: + hal_rx_proc_phyrx_other_receive_info_tlv(hal, rx_tlv_hdr, + ppdu_info); + break; + case WIFIRX_HEADER_E: + { + struct hal_rx_ppdu_common_info *com_info = &ppdu_info->com_info; + uint16_t mpdu_cnt = com_info->mpdu_cnt; + + if (mpdu_cnt >= HAL_RX_MAX_MPDU) { + hal_alert("Number of MPDUs per PPDU exceeded"); + break; + } + /* Update first_msdu_payload for every mpdu and increment + * com_info->mpdu_cnt for every WIFIRX_HEADER_E TLV + */ + ppdu_info->ppdu_msdu_info[mpdu_cnt].first_msdu_payload = + rx_tlv; + ppdu_info->ppdu_msdu_info[mpdu_cnt].payload_len = tlv_len; + ppdu_info->ppdu_msdu_info[mpdu_cnt].nbuf = nbuf; + ppdu_info->msdu_info.first_msdu_payload = rx_tlv; + ppdu_info->msdu_info.payload_len = tlv_len; + ppdu_info->user_id = user_id; + ppdu_info->hdr_len = tlv_len; + ppdu_info->data = rx_tlv; + ppdu_info->data += 4; + + /* for every RX_HEADER TLV increment mpdu_cnt */ + com_info->mpdu_cnt++; + return HAL_TLV_STATUS_HEADER; + } + case WIFIRX_MPDU_START_E: + { + uint8_t *rx_mpdu_start = + (uint8_t *)rx_tlv + HAL_RX_OFFSET(UNIFIED_RX_MPDU_START_0, + RX_MPDU_INFO_RX_MPDU_INFO_DETAILS); + uint32_t ppdu_id = + HAL_RX_GET_PPDU_ID(rx_mpdu_start); + uint8_t filter_category = 0; + + ppdu_info->nac_info.fc_valid = + HAL_RX_GET_FC_VALID(rx_mpdu_start); + + ppdu_info->nac_info.to_ds_flag = + HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start); + + ppdu_info->nac_info.frame_control = + HAL_RX_GET(rx_mpdu_start, + RX_MPDU_INFO_14, + MPDU_FRAME_CONTROL_FIELD); + + ppdu_info->sw_frame_group_id = + HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start); + + if (ppdu_info->sw_frame_group_id == + HAL_MPDU_SW_FRAME_GROUP_NULL_DATA) { + ppdu_info->rx_status.frame_control_info_valid = + ppdu_info->nac_info.fc_valid; + ppdu_info->rx_status.frame_control = + ppdu_info->nac_info.frame_control; + } + + hal_get_mac_addr1(rx_mpdu_start, + ppdu_info); + + ppdu_info->nac_info.mac_addr2_valid = + HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start); + + *(uint16_t *)&ppdu_info->nac_info.mac_addr2[0] = + HAL_RX_GET(rx_mpdu_start, + RX_MPDU_INFO_16, + MAC_ADDR_AD2_15_0); + + *(uint32_t *)&ppdu_info->nac_info.mac_addr2[2] = + HAL_RX_GET(rx_mpdu_start, + RX_MPDU_INFO_17, + MAC_ADDR_AD2_47_16); + + if (ppdu_info->rx_status.prev_ppdu_id != ppdu_id) { + ppdu_info->rx_status.prev_ppdu_id = ppdu_id; + ppdu_info->rx_status.ppdu_len = + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_13, + MPDU_LENGTH); + } else { + ppdu_info->rx_status.ppdu_len += + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_13, + MPDU_LENGTH); + } + + filter_category = + HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start); + + if (filter_category == 0) + ppdu_info->rx_status.rxpcu_filter_pass = 1; + else if (filter_category == 1) + ppdu_info->rx_status.monitor_direct_used = 1; + + ppdu_info->nac_info.mcast_bcast = + HAL_RX_GET(rx_mpdu_start, + RX_MPDU_INFO_13, + MCAST_BCAST); + break; + } + case WIFIRX_MPDU_END_E: + ppdu_info->user_id = user_id; + ppdu_info->fcs_err = + HAL_RX_GET(rx_tlv, RX_MPDU_END_1, + FCS_ERR); + return HAL_TLV_STATUS_MPDU_END; + case WIFIRX_MSDU_END_E: + if (user_id < HAL_MAX_UL_MU_USERS) { + ppdu_info->rx_msdu_info[user_id].cce_metadata = + HAL_RX_MSDU_END_CCE_METADATA_GET(rx_tlv); + ppdu_info->rx_msdu_info[user_id].fse_metadata = + HAL_RX_MSDU_END_FSE_METADATA_GET(rx_tlv); + ppdu_info->rx_msdu_info[user_id].is_flow_idx_timeout = + HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(rx_tlv); + ppdu_info->rx_msdu_info[user_id].is_flow_idx_invalid = + HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(rx_tlv); + ppdu_info->rx_msdu_info[user_id].flow_idx = + HAL_RX_MSDU_END_FLOW_IDX_GET(rx_tlv); + } + return HAL_TLV_STATUS_MSDU_END; + case 0: + return HAL_TLV_STATUS_PPDU_DONE; + + default: + if (hal_rx_handle_other_tlvs(tlv_tag, rx_tlv, ppdu_info)) + unhandled = false; + else + unhandled = true; + break; + } + + if (!unhandled) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s TLV type: %d, TLV len:%d %s", + __func__, tlv_tag, tlv_len, + unhandled == true ? "unhandled" : ""); + + qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + rx_tlv, tlv_len); + + return HAL_TLV_STATUS_PPDU_NOT_DONE; +} + +/** + * hal_reo_setup - Initialize HW REO block + * + * @hal_soc: Opaque HAL SOC handle + * @reo_params: parameters needed by HAL for REO config + */ +static void hal_reo_setup_generic(struct hal_soc *soc, + void *reoparams) +{ + uint32_t reg_val; + struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams; + + reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)); + + hal_reo_config(soc, reg_val, reo_params); + /* Other ring enable bits and REO_ENABLE will be set by FW */ + + /* TODO: Setup destination ring mapping if enabled */ + + /* TODO: Error destination ring setting is left to default. + * Default setting is to send all errors to release ring. + */ + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000)); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000)); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000)); + + /* + * When hash based routing is enabled, routing of the rx packet + * is done based on the following value: 1 _ _ _ _ The last 4 + * bits are based on hash[3:0]. This means the possible values + * are 0x10 to 0x1f. This value is used to look-up the + * ring ID configured in Destination_Ring_Ctrl_IX_* register. + * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3 + * registers need to be configured to set-up the 16 entries to + * map the hash values to a ring number. There are 3 bits per + * hash entry – which are mapped as follows: + * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI), + * 7: NOT_USED. + */ + if (reo_params->rx_hash_enabled) { + HAL_REG_WRITE(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + reo_params->remap1); + + hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x", + HAL_REG_READ(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + reo_params->remap2); + + hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x", + HAL_REG_READ(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); + } + + /* TODO: Check if the following registers shoould be setup by host: + * AGING_CONTROL + * HIGH_MEMORY_THRESHOLD + * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2] + * GLOBAL_LINK_DESC_COUNT_CTRL + */ +} + +/** + * hal_get_hw_hptp_generic() - Get HW head and tail pointer value for any ring + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * @headp: Head Pointer + * @tailp: Tail Pointer + * @ring: Ring type + * + * Return: Update tail pointer and head pointer in arguments. + */ +static inline +void hal_get_hw_hptp_generic(struct hal_soc *hal_soc, + hal_ring_handle_t hal_ring_hdl, + uint32_t *headp, uint32_t *tailp, + uint8_t ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + struct hal_hw_srng_config *ring_config; + enum hal_ring_type ring_type = (enum hal_ring_type)ring; + + if (!hal_soc || !srng) { + QDF_TRACE(QDF_MODULE_ID_HAL, QDF_TRACE_LEVEL_ERROR, + "%s: Context is Null", __func__); + return; + } + + ring_config = HAL_SRNG_CONFIG(hal_soc, ring_type); + if (!ring_config->lmac_ring) { + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + *headp = SRNG_SRC_REG_READ(srng, HP); + *tailp = SRNG_SRC_REG_READ(srng, TP); + } else { + *headp = SRNG_DST_REG_READ(srng, HP); + *tailp = SRNG_DST_REG_READ(srng, TP); + } + } +} + +/** + * hal_srng_src_hw_init - Private function to initialize SRNG + * source ring HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline +void hal_srng_src_hw_init_generic(struct hal_soc *hal, + struct hal_srng *srng) +{ + uint32_t reg_val = 0; + uint64_t tp_addr = 0; + + hal_debug("hw_init srng %d", srng->ring_id); + + if (srng->flags & HAL_SRNG_MSI_INTR) { + SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB, + srng->msi_addr & 0xffffffff); + reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR), + (uint64_t)(srng->msi_addr) >> 32) | + SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, + MSI1_ENABLE), 1); + SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); + SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data); + } + + SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); + reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB), + ((uint64_t)(srng->ring_base_paddr) >> 32)) | + SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), + srng->entry_size * srng->num_entries); + SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val); + + reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); + SRNG_SRC_REG_WRITE(srng, ID, reg_val); + + /** + * Interrupt setup: + * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE + * if level mode is required + */ + reg_val = 0; + + /* + * WAR - Hawkeye v1 has a hardware bug which requires timer value to be + * programmed in terms of 1us resolution instead of 8us resolution as + * given in MLD. + */ + if (srng->intr_timer_thres_us) { + reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, + INTERRUPT_TIMER_THRESHOLD), + srng->intr_timer_thres_us); + /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */ + } + + if (srng->intr_batch_cntr_thres_entries) { + reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, + BATCH_COUNTER_THRESHOLD), + srng->intr_batch_cntr_thres_entries * + srng->entry_size); + } + SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val); + + reg_val = 0; + if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { + reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1, + LOW_THRESHOLD), srng->u.src_ring.low_threshold); + } + + SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val); + + /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should + * remain 0 to avoid some WBM stability issues. Remote head/tail + * pointers are not required since this ring is completely managed + * by WBM HW + */ + reg_val = 0; + if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) { + tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.src_ring.tp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr))); + SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff); + SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32); + } else { + reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, RING_ID_DISABLE), 1); + } + + /* Initilaize head and tail pointers to indicate ring is empty */ + SRNG_SRC_REG_WRITE(srng, HP, 0); + SRNG_SRC_REG_WRITE(srng, TP, 0); + *(srng->u.src_ring.tp_addr) = 0; + + reg_val |= ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? + SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? + SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_MSI_SWAP) ? + SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0); + + /* Loop count is not used for SRC rings */ + reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1); + + /* + * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); + * todo: update fw_api and replace with above line + * (when SRNG_ENABLE field for the MISC register is available in fw_api) + * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) + */ + reg_val |= 0x40; + + SRNG_SRC_REG_WRITE(srng, MISC, reg_val); +} + +/** + * hal_srng_dst_hw_init - Private function to initialize SRNG + * destination ring HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline +void hal_srng_dst_hw_init_generic(struct hal_soc *hal, + struct hal_srng *srng) +{ + uint32_t reg_val = 0; + uint64_t hp_addr = 0; + + hal_debug("hw_init srng %d", srng->ring_id); + + if (srng->flags & HAL_SRNG_MSI_INTR) { + SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB, + srng->msi_addr & 0xffffffff); + reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR), + (uint64_t)(srng->msi_addr) >> 32) | + SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, + MSI1_ENABLE), 1); + SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); + SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data); + } + + SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); + reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB), + ((uint64_t)(srng->ring_base_paddr) >> 32)) | + SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE), + srng->entry_size * srng->num_entries); + SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val); + + reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) | + SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size); + SRNG_DST_REG_WRITE(srng, ID, reg_val); + + + /** + * Interrupt setup: + * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE + * if level mode is required + */ + reg_val = 0; + if (srng->intr_timer_thres_us) { + reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, + INTERRUPT_TIMER_THRESHOLD), + srng->intr_timer_thres_us >> 3); + } + + if (srng->intr_batch_cntr_thres_entries) { + reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, + BATCH_COUNTER_THRESHOLD), + srng->intr_batch_cntr_thres_entries * + srng->entry_size); + } + + SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val); + hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.dst_ring.hp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr))); + SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff); + SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32); + + /* Initilaize head and tail pointers to indicate ring is empty */ + SRNG_DST_REG_WRITE(srng, HP, 0); + SRNG_DST_REG_WRITE(srng, TP, 0); + *(srng->u.dst_ring.hp_addr) = 0; + + reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? + SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? + SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_MSI_SWAP) ? + SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0); + + /* + * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); + * todo: update fw_api and replace with above line + * (when SRNG_ENABLE field for the MISC register is available in fw_api) + * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) + */ + reg_val |= 0x40; + + SRNG_DST_REG_WRITE(srng, MISC, reg_val); + +} + +#define HAL_RX_WBM_ERR_SRC_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_MASK) >> \ + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_LSB) + +#define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_REO_PUSH_REASON_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_REO_PUSH_REASON_MASK) >> \ + WBM_RELEASE_RING_2_REO_PUSH_REASON_LSB) + +#define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_REO_ERROR_CODE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_REO_ERROR_CODE_MASK) >> \ + WBM_RELEASE_RING_2_REO_ERROR_CODE_LSB) + +#define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_2_RXDMA_PUSH_REASON_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_RXDMA_PUSH_REASON_MASK) >> \ + WBM_RELEASE_RING_2_RXDMA_PUSH_REASON_LSB) + +#define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_2_RXDMA_ERROR_CODE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_RXDMA_ERROR_CODE_MASK) >> \ + WBM_RELEASE_RING_2_RXDMA_ERROR_CODE_LSB) + +/** + * hal_rx_wbm_err_info_get_generic(): Retrieves WBM error code and reason and + * save it to hal_wbm_err_desc_info structure passed by caller + * @wbm_desc: wbm ring descriptor + * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter. + * Return: void + */ +static inline void hal_rx_wbm_err_info_get_generic(void *wbm_desc, + void *wbm_er_info1) +{ + struct hal_wbm_err_desc_info *wbm_er_info = + (struct hal_wbm_err_desc_info *)wbm_er_info1; + + wbm_er_info->wbm_err_src = HAL_RX_WBM_ERR_SRC_GET(wbm_desc); + wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc); + wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc); + wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc); + wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc); +} + +/** + * hal_tx_comp_get_release_reason_generic() - TQM Release reason + * @hal_desc: completion ring descriptor pointer + * + * This function will return the type of pointer - buffer or descriptor + * + * Return: buffer type + */ +static inline uint8_t hal_tx_comp_get_release_reason_generic(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + WBM_RELEASE_RING_2_TQM_RELEASE_REASON_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_TQM_RELEASE_REASON_MASK) >> + WBM_RELEASE_RING_2_TQM_RELEASE_REASON_LSB; +} + +/** + * hal_get_wbm_internal_error_generic() - is WBM internal error + * @hal_desc: completion ring descriptor pointer + * + * This function will return 0 or 1 - is it WBM internal error or not + * + * Return: uint8_t + */ +static inline uint8_t hal_get_wbm_internal_error_generic(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *)(((uint8_t *)hal_desc) + + WBM_RELEASE_RING_2_WBM_INTERNAL_ERROR_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_WBM_INTERNAL_ERROR_MASK) >> + WBM_RELEASE_RING_2_WBM_INTERNAL_ERROR_LSB; +} + +/** + * hal_rx_dump_mpdu_start_tlv_generic: dump RX mpdu_start TLV in structured + * human readable format. + * @mpdu_start: pointer the rx_attention TLV in pkt. + * @dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_mpdu_start_tlv_generic(void *mpdustart, + uint8_t dbg_level) +{ + struct rx_mpdu_start *mpdu_start = (struct rx_mpdu_start *)mpdustart; + struct rx_mpdu_info *mpdu_info = + (struct rx_mpdu_info *)&mpdu_start->rx_mpdu_info_details; + + hal_verbose_debug( + "rx_mpdu_start tlv (1/5) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "ndp_frame: %x " + "phy_err: %x " + "phy_err_during_mpdu_header: %x " + "protocol_version_err: %x " + "ast_based_lookup_valid: %x " + "phy_ppdu_id: %x " + "ast_index: %x " + "sw_peer_id: %x " + "mpdu_frame_control_valid: %x " + "mpdu_duration_valid: %x " + "mac_addr_ad1_valid: %x " + "mac_addr_ad2_valid: %x " + "mac_addr_ad3_valid: %x " + "mac_addr_ad4_valid: %x " + "mpdu_sequence_control_valid: %x " + "mpdu_qos_control_valid: %x " + "mpdu_ht_control_valid: %x " + "frame_encryption_info_valid: %x ", + mpdu_info->rxpcu_mpdu_filter_in_category, + mpdu_info->sw_frame_group_id, + mpdu_info->ndp_frame, + mpdu_info->phy_err, + mpdu_info->phy_err_during_mpdu_header, + mpdu_info->protocol_version_err, + mpdu_info->ast_based_lookup_valid, + mpdu_info->phy_ppdu_id, + mpdu_info->ast_index, + mpdu_info->sw_peer_id, + mpdu_info->mpdu_frame_control_valid, + mpdu_info->mpdu_duration_valid, + mpdu_info->mac_addr_ad1_valid, + mpdu_info->mac_addr_ad2_valid, + mpdu_info->mac_addr_ad3_valid, + mpdu_info->mac_addr_ad4_valid, + mpdu_info->mpdu_sequence_control_valid, + mpdu_info->mpdu_qos_control_valid, + mpdu_info->mpdu_ht_control_valid, + mpdu_info->frame_encryption_info_valid); + + hal_verbose_debug( + "rx_mpdu_start tlv (2/5) - " + "fr_ds: %x " + "to_ds: %x " + "encrypted: %x " + "mpdu_retry: %x " + "mpdu_sequence_number: %x " + "epd_en: %x " + "all_frames_shall_be_encrypted: %x " + "encrypt_type: %x " + "mesh_sta: %x " + "bssid_hit: %x " + "bssid_number: %x " + "tid: %x " + "pn_31_0: %x " + "pn_63_32: %x " + "pn_95_64: %x " + "pn_127_96: %x " + "peer_meta_data: %x " + "rxpt_classify_info.reo_destination_indication: %x " + "rxpt_classify_info.use_flow_id_toeplitz_clfy: %x " + "rx_reo_queue_desc_addr_31_0: %x ", + mpdu_info->fr_ds, + mpdu_info->to_ds, + mpdu_info->encrypted, + mpdu_info->mpdu_retry, + mpdu_info->mpdu_sequence_number, + mpdu_info->epd_en, + mpdu_info->all_frames_shall_be_encrypted, + mpdu_info->encrypt_type, + mpdu_info->mesh_sta, + mpdu_info->bssid_hit, + mpdu_info->bssid_number, + mpdu_info->tid, + mpdu_info->pn_31_0, + mpdu_info->pn_63_32, + mpdu_info->pn_95_64, + mpdu_info->pn_127_96, + mpdu_info->peer_meta_data, + mpdu_info->rxpt_classify_info_details.reo_destination_indication, + mpdu_info->rxpt_classify_info_details.use_flow_id_toeplitz_clfy, + mpdu_info->rx_reo_queue_desc_addr_31_0); + + hal_verbose_debug( + "rx_mpdu_start tlv (3/5) - " + "rx_reo_queue_desc_addr_39_32: %x " + "receive_queue_number: %x " + "pre_delim_err_warning: %x " + "first_delim_err: %x " + "key_id_octet: %x " + "new_peer_entry: %x " + "decrypt_needed: %x " + "decap_type: %x " + "rx_insert_vlan_c_tag_padding: %x " + "rx_insert_vlan_s_tag_padding: %x " + "strip_vlan_c_tag_decap: %x " + "strip_vlan_s_tag_decap: %x " + "pre_delim_count: %x " + "ampdu_flag: %x " + "bar_frame: %x " + "mpdu_length: %x " + "first_mpdu: %x " + "mcast_bcast: %x " + "ast_index_not_found: %x " + "ast_index_timeout: %x ", + mpdu_info->rx_reo_queue_desc_addr_39_32, + mpdu_info->receive_queue_number, + mpdu_info->pre_delim_err_warning, + mpdu_info->first_delim_err, + mpdu_info->key_id_octet, + mpdu_info->new_peer_entry, + mpdu_info->decrypt_needed, + mpdu_info->decap_type, + mpdu_info->rx_insert_vlan_c_tag_padding, + mpdu_info->rx_insert_vlan_s_tag_padding, + mpdu_info->strip_vlan_c_tag_decap, + mpdu_info->strip_vlan_s_tag_decap, + mpdu_info->pre_delim_count, + mpdu_info->ampdu_flag, + mpdu_info->bar_frame, + mpdu_info->mpdu_length, + mpdu_info->first_mpdu, + mpdu_info->mcast_bcast, + mpdu_info->ast_index_not_found, + mpdu_info->ast_index_timeout); + + hal_verbose_debug( + "rx_mpdu_start tlv (4/5) - " + "power_mgmt: %x " + "non_qos: %x " + "null_data: %x " + "mgmt_type: %x " + "ctrl_type: %x " + "more_data: %x " + "eosp: %x " + "fragment_flag: %x " + "order: %x " + "u_apsd_trigger: %x " + "encrypt_required: %x " + "directed: %x " + "mpdu_frame_control_field: %x " + "mpdu_duration_field: %x " + "mac_addr_ad1_31_0: %x " + "mac_addr_ad1_47_32: %x " + "mac_addr_ad2_15_0: %x " + "mac_addr_ad2_47_16: %x " + "mac_addr_ad3_31_0: %x " + "mac_addr_ad3_47_32: %x ", + mpdu_info->power_mgmt, + mpdu_info->non_qos, + mpdu_info->null_data, + mpdu_info->mgmt_type, + mpdu_info->ctrl_type, + mpdu_info->more_data, + mpdu_info->eosp, + mpdu_info->fragment_flag, + mpdu_info->order, + mpdu_info->u_apsd_trigger, + mpdu_info->encrypt_required, + mpdu_info->directed, + mpdu_info->mpdu_frame_control_field, + mpdu_info->mpdu_duration_field, + mpdu_info->mac_addr_ad1_31_0, + mpdu_info->mac_addr_ad1_47_32, + mpdu_info->mac_addr_ad2_15_0, + mpdu_info->mac_addr_ad2_47_16, + mpdu_info->mac_addr_ad3_31_0, + mpdu_info->mac_addr_ad3_47_32); + + hal_verbose_debug( + "rx_mpdu_start tlv (5/5) - " + "mpdu_sequence_control_field: %x " + "mac_addr_ad4_31_0: %x " + "mac_addr_ad4_47_32: %x " + "mpdu_qos_control_field: %x " + "mpdu_ht_control_field: %x ", + mpdu_info->mpdu_sequence_control_field, + mpdu_info->mac_addr_ad4_31_0, + mpdu_info->mac_addr_ad4_47_32, + mpdu_info->mpdu_qos_control_field, + mpdu_info->mpdu_ht_control_field); +} + +/** + * hal_tx_desc_set_search_type - Set the search type value + * @desc: Handle to Tx Descriptor + * @search_type: search type + * 0 – Normal search + * 1 – Index based address search + * 2 – Index based flow search + * + * Return: void + */ +#ifdef TCL_DATA_CMD_2_SEARCH_TYPE_OFFSET +static void hal_tx_desc_set_search_type_generic(void *desc, + uint8_t search_type) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, SEARCH_TYPE) |= + HAL_TX_SM(TCL_DATA_CMD_2, SEARCH_TYPE, search_type); +} +#else +static void hal_tx_desc_set_search_type_generic(void *desc, + uint8_t search_type) +{ +} + +#endif + +/** + * hal_tx_desc_set_search_index - Set the search index value + * @desc: Handle to Tx Descriptor + * @search_index: The index that will be used for index based address or + * flow search. The field is valid when 'search_type' is + * 1 0r 2 + * + * Return: void + */ +#ifdef TCL_DATA_CMD_5_SEARCH_INDEX_OFFSET +static void hal_tx_desc_set_search_index_generic(void *desc, + uint32_t search_index) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, SEARCH_INDEX) |= + HAL_TX_SM(TCL_DATA_CMD_5, SEARCH_INDEX, search_index); +} +#else +static void hal_tx_desc_set_search_index_generic(void *desc, + uint32_t search_index) +{ +} +#endif + +/** + * hal_tx_desc_set_cache_set_num_generic - Set the cache-set-num value + * @desc: Handle to Tx Descriptor + * @cache_num: Cache set number that should be used to cache the index + * based search results, for address and flow search. + * This value should be equal to LSB four bits of the hash value + * of match data, in case of search index points to an entry + * which may be used in content based search also. The value can + * be anything when the entry pointed by search index will not be + * used for content based search. + * + * Return: void + */ +#ifdef TCL_DATA_CMD_5_CACHE_SET_NUM_OFFSET +static void hal_tx_desc_set_cache_set_num_generic(void *desc, + uint8_t cache_num) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, CACHE_SET_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, CACHE_SET_NUM, cache_num); +} +#else +static void hal_tx_desc_set_cache_set_num_generic(void *desc, + uint8_t cache_num) +{ +} +#endif + +/** + * hal_tx_set_pcp_tid_map_generic() - Configure default PCP to TID map table + * @soc: HAL SoC context + * @map: PCP-TID mapping table + * + * PCP are mapped to 8 TID values using TID values programmed + * in one set of mapping registers PCP_TID_MAP_<0 to 6> + * The mapping register has TID mapping for 8 PCP values + * + * Return: none + */ +static void hal_tx_set_pcp_tid_map_generic(struct hal_soc *soc, uint8_t *map) +{ + uint32_t addr, value; + + addr = HWIO_TCL_R0_PCP_TID_MAP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + value = (map[0] | + (map[1] << HWIO_TCL_R0_PCP_TID_MAP_PCP_1_SHFT) | + (map[2] << HWIO_TCL_R0_PCP_TID_MAP_PCP_2_SHFT) | + (map[3] << HWIO_TCL_R0_PCP_TID_MAP_PCP_3_SHFT) | + (map[4] << HWIO_TCL_R0_PCP_TID_MAP_PCP_4_SHFT) | + (map[5] << HWIO_TCL_R0_PCP_TID_MAP_PCP_5_SHFT) | + (map[6] << HWIO_TCL_R0_PCP_TID_MAP_PCP_6_SHFT) | + (map[7] << HWIO_TCL_R0_PCP_TID_MAP_PCP_7_SHFT)); + + HAL_REG_WRITE(soc, addr, (value & HWIO_TCL_R0_PCP_TID_MAP_RMSK)); +} + +/** + * hal_tx_update_pcp_tid_generic() - Update the pcp tid map table with + * value received from user-space + * @soc: HAL SoC context + * @pcp: pcp value + * @tid : tid value + * + * Return: void + */ +static +void hal_tx_update_pcp_tid_generic(struct hal_soc *soc, + uint8_t pcp, uint8_t tid) +{ + uint32_t addr, value, regval; + + addr = HWIO_TCL_R0_PCP_TID_MAP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + value = (uint32_t)tid << (HAL_TX_BITS_PER_TID * pcp); + + /* Read back previous PCP TID config and update + * with new config. + */ + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * pcp)); + regval |= value; + + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_PCP_TID_MAP_RMSK)); +} + +/** + * hal_tx_update_tidmap_prty_generic() - Update the tid map priority + * @soc: HAL SoC context + * @val: priority value + * + * Return: void + */ +static +void hal_tx_update_tidmap_prty_generic(struct hal_soc *soc, uint8_t value) +{ + uint32_t addr; + + addr = HWIO_TCL_R0_TID_MAP_PRTY_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + HAL_REG_WRITE(soc, addr, + (value & HWIO_TCL_R0_TID_MAP_PRTY_RMSK)); +} + +/** + * hal_rx_msdu_packet_metadata_get(): API to get the + * msdu information from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * @ hal_rx_msdu_metadata: pointer to the msdu info structure + */ +static void +hal_rx_msdu_packet_metadata_get_generic(uint8_t *buf, + void *pkt_msdu_metadata) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + struct hal_rx_msdu_metadata *msdu_metadata = + (struct hal_rx_msdu_metadata *)pkt_msdu_metadata; + + msdu_metadata->l3_hdr_pad = + HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + msdu_metadata->sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + msdu_metadata->da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + msdu_metadata->sa_sw_peer_id = + HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} +#endif /* _HAL_GENERIC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_hw_headers.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_hw_headers.h new file mode 100644 index 0000000000000000000000000000000000000000..49aff583a367982b9c41c3931d3035b0cd2d88d3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_hw_headers.h @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_HW_INTERNAL_H_ +#define _HAL_HW_INTERNAL_H_ +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "rx_msdu_link.h" +#include "rx_reo_queue.h" +#include "rx_reo_queue_ext.h" +#include "wcss_seq_hwiobase.h" +#include "tlv_hdr.h" +#include "tlv_tag_def.h" +#include "reo_destination_ring.h" +#include "reo_reg_seq_hwioreg.h" +#include "reo_entrance_ring.h" +#include "reo_get_queue_stats.h" +#include "reo_get_queue_stats_status.h" +#include "tcl_data_cmd.h" +#include "tcl_gse_cmd.h" +#include "tcl_status_ring.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "ce_src_desc.h" +#include "ce_stat_desc.h" +#ifdef QCA_WIFI_QCA6490 +#include "wfss_ce_channel_dst_reg_seq_hwioreg.h" +#include "wfss_ce_channel_src_reg_seq_hwioreg.h" +#else +#include "wfss_ce_reg_seq_hwioreg.h" +#endif /* QCA_WIFI_QCA6490 */ +#include "wbm_link_descriptor_ring.h" +#include "wbm_reg_seq_hwioreg.h" +#include "wbm_buffer_ring.h" +#include "wbm_release_ring.h" +#include "rx_msdu_desc_info.h" +#include "rx_mpdu_start.h" +#include "rx_mpdu_end.h" +#include "rx_msdu_start.h" +#include "rx_msdu_end.h" +#include "rx_attention.h" +#include "rx_ppdu_start.h" +#include "rx_ppdu_start_user_info.h" +#include "rx_ppdu_end_user_stats.h" +#include "rx_ppdu_end_user_stats_ext.h" +#include "rx_mpdu_desc_info.h" +#include "rxpcu_ppdu_end_info.h" +#include "phyrx_he_sig_a_su.h" +#include "phyrx_he_sig_a_mu_dl.h" +#if defined(QCA_WIFI_QCA6290_11AX_MU_UL) && defined(QCA_WIFI_QCA6290_11AX) +#include "phyrx_he_sig_a_mu_ul.h" +#endif +#include "phyrx_he_sig_b1_mu.h" +#include "phyrx_he_sig_b2_mu.h" +#include "phyrx_he_sig_b2_ofdma.h" +#include "phyrx_l_sig_a.h" +#include "phyrx_l_sig_b.h" +#include "phyrx_vht_sig_a.h" +#include "phyrx_ht_sig.h" +#include "tx_msdu_extension.h" +#include "receive_rssi_info.h" +#include "phyrx_pkt_end.h" +#include "phyrx_rssi_legacy.h" +#include "wcss_version.h" +#include "rx_msdu_link.h" +#include "hal_internal.h" + +#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_REO2SW1 +#define HAL_SRNG_REO_ALTERNATE_SELECT 0x7 +#define HAL_NON_QOS_TID 16 + +/* TODO: Check if the following can be provided directly by HW headers */ +#define SRNG_LOOP_CNT_MASK REO_DESTINATION_RING_15_LOOPING_COUNT_MASK +#define SRNG_LOOP_CNT_LSB REO_DESTINATION_RING_15_LOOPING_COUNT_LSB + +/* HAL Macro to get the buffer info size */ +#define HAL_RX_BUFFINFO_NUM_DWORDS NUM_OF_DWORDS_BUFFER_ADDR_INFO + +#define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS 100 /* milliseconds */ +#define HAL_DEFAULT_VO_REO_TIMEOUT_MS 40 /* milliseconds */ + +#define HAL_DESC_SET_FIELD(_desc, _word, _fld, _value) do { \ + ((uint32_t *)(_desc))[(_word ## _ ## _fld ## _OFFSET) >> 2] &= \ + ~(_word ## _ ## _fld ## _MASK); \ + ((uint32_t *)(_desc))[(_word ## _ ## _fld ## _OFFSET) >> 2] |= \ + ((_value) << _word ## _ ## _fld ## _LSB); \ +} while (0) + +#define HAL_SM(_reg, _fld, _val) \ + (((_val) << (_reg ## _ ## _fld ## _SHFT)) & \ + (_reg ## _ ## _fld ## _BMSK)) + +#define HAL_MS(_reg, _fld, _val) \ + (((_val) & (_reg ## _ ## _fld ## _BMSK)) >> \ + (_reg ## _ ## _fld ## _SHFT)) + +#define HAL_REG_WRITE(_soc, _reg, _value) \ + hal_write32_mb(_soc, (_reg), (_value)) + +/* Check register writing result */ +#define HAL_REG_WRITE_CONFIRM(_soc, _reg, _value) \ + hal_write32_mb_confirm(_soc, (_reg), (_value)) + +#define HAL_REG_WRITE_CONFIRM_RETRY(_soc, _reg, _value, _recovery) \ + hal_write32_mb_confirm_retry(_soc, (_reg), (_value), (_recovery)) + +#define HAL_REG_READ(_soc, _offset) \ + hal_read32_mb(_soc, (_offset)) + +#define WBM_IDLE_DESC_LIST 1 + +/** + * Common SRNG register access macros: + * The SRNG registers are distributed across various UMAC and LMAC HW blocks, + * but the register group and format is exactly same for all rings, with some + * difference between producer rings (these are 'producer rings' with respect + * to HW and referred as 'destination rings' in SW) and consumer rings (these + * are 'consumer rings' with respect to HW and + * referred as 'source rings' in SW). + * The following macros provide uniform access to all SRNG rings. + */ + +/* SRNG registers are split among two groups R0 and R2 and following + * definitions identify the group to which each register belongs to + */ +#define R0_INDEX 0 +#define R2_INDEX 1 + +#define HWREG_INDEX(_reg_group) _reg_group ## _ ## INDEX + +/* Registers in R0 group */ +#define BASE_LSB_GROUP R0 +#define BASE_MSB_GROUP R0 +#define ID_GROUP R0 +#define STATUS_GROUP R0 +#define MISC_GROUP R0 +#define HP_ADDR_LSB_GROUP R0 +#define HP_ADDR_MSB_GROUP R0 +#define PRODUCER_INT_SETUP_GROUP R0 +#define PRODUCER_INT_STATUS_GROUP R0 +#define PRODUCER_FULL_COUNTER_GROUP R0 +#define MSI1_BASE_LSB_GROUP R0 +#define MSI1_BASE_MSB_GROUP R0 +#define MSI1_DATA_GROUP R0 +#define HP_TP_SW_OFFSET_GROUP R0 +#define TP_ADDR_LSB_GROUP R0 +#define TP_ADDR_MSB_GROUP R0 +#define CONSUMER_INT_SETUP_IX0_GROUP R0 +#define CONSUMER_INT_SETUP_IX1_GROUP R0 +#define CONSUMER_INT_STATUS_GROUP R0 +#define CONSUMER_EMPTY_COUNTER_GROUP R0 +#define CONSUMER_PREFETCH_TIMER_GROUP R0 +#define CONSUMER_PREFETCH_STATUS_GROUP R0 + +/* Registers in R2 group */ +#define HP_GROUP R2 +#define TP_GROUP R2 + +/** + * Register definitions for all SRNG based rings are same, except few + * differences between source (HW consumer) and destination (HW producer) + * registers. Following macros definitions provide generic access to all + * SRNG based rings. + * For source rings, we will use the register/field definitions of SW2TCL1 + * ring defined in the HW header file mac_tcl_reg_seq_hwioreg.h. To setup + * individual fields, SRNG_SM macros should be used with fields specified + * using SRNG_SRC_FLD(, ), Register writes should be done + * using SRNG_SRC_REG_WRITE(, , ). + * Similarly for destination rings we will use definitions of REO2SW1 ring + * defined in the register reo_destination_ring.h. To setup individual + * fields SRNG_SM macros should be used with fields specified using + * SRNG_DST_FLD(, ). Register writes should be done using + * SRNG_DST_REG_WRITE(, , ). + */ + +#define SRNG_DST_REG_OFFSET(_reg, _reg_group) \ + HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg##_ADDR(0) + +#define SRNG_SRC_REG_OFFSET(_reg, _reg_group) \ + HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg ## _ADDR(0) + +#define _SRNG_DST_FLD(_reg_group, _reg_fld) \ + HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg_fld +#define _SRNG_SRC_FLD(_reg_group, _reg_fld) \ + HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg_fld + +#define _SRNG_FLD(_reg_group, _reg_fld, _dir) \ + _SRNG_ ## _dir ## _FLD(_reg_group, _reg_fld) + +#define SRNG_DST_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, DST) +#define SRNG_SRC_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, SRC) + +#define SRNG_SRC_R0_START_OFFSET SRNG_SRC_REG_OFFSET(BASE_LSB, R0) +#define SRNG_DST_R0_START_OFFSET SRNG_DST_REG_OFFSET(BASE_LSB, R0) + +#define SRNG_SRC_R2_START_OFFSET SRNG_SRC_REG_OFFSET(HP, R2) +#define SRNG_DST_R2_START_OFFSET SRNG_DST_REG_OFFSET(HP, R2) + +#define SRNG_SRC_START_OFFSET(_reg_group) \ + SRNG_SRC_ ## _reg_group ## _START_OFFSET +#define SRNG_DST_START_OFFSET(_reg_group) \ + SRNG_DST_ ## _reg_group ## _START_OFFSET +#define SRNG_REG_ADDR(_srng, _reg, _reg_group, _dir) \ + ((_srng)->hwreg_base[HWREG_INDEX(_reg_group)] + \ + ((_srng)->hal_soc->hal_hw_reg_offset[_dir ## _ ##_reg])) + +#define CALCULATE_REG_OFFSET(_dir, _reg, _reg_group) \ + (SRNG_ ## _dir ## _REG_OFFSET(_reg, _reg_group) - \ + SRNG_ ## _dir ## _START_OFFSET(_reg_group)) + +#define REG_OFFSET(_dir, _reg) \ + CALCULATE_REG_OFFSET(_dir, _reg, _reg ## _GROUP) + +#define SRNG_DST_ADDR(_srng, _reg) \ + SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, DST) + +#define SRNG_SRC_ADDR(_srng, _reg) \ + SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, SRC) + +#define SRNG_REG_WRITE(_srng, _reg, _value, _dir) \ + hal_write_address_32_mb(_srng->hal_soc,\ + SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value), false) + +#define SRNG_REG_WRITE_CONFIRM(_srng, _reg, _value, _dir) \ + hal_write_address_32_mb(_srng->hal_soc,\ + SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value), true) + +#define SRNG_REG_READ(_srng, _reg, _dir) \ + hal_read_address_32_mb(_srng->hal_soc, \ + SRNG_ ## _dir ## _ADDR(_srng, _reg)) + +#define SRNG_SRC_REG_WRITE(_srng, _reg, _value) \ + SRNG_REG_WRITE(_srng, _reg, _value, SRC) + +#define SRNG_DST_REG_WRITE(_srng, _reg, _value) \ + SRNG_REG_WRITE(_srng, _reg, _value, DST) + +#define SRNG_DST_REG_WRITE_CONFIRM(_srng, _reg, _value) \ + SRNG_REG_WRITE_CONFIRM(_srng, _reg, _value, DST) + +#define SRNG_SRC_REG_READ(_srng, _reg) \ + SRNG_REG_READ(_srng, _reg, SRC) + +#define SRNG_DST_REG_READ(_srng, _reg) \ + SRNG_REG_READ(_srng, _reg, DST) + +#define _SRNG_FM(_reg_fld) _reg_fld ## _BMSK +#define _SRNG_FS(_reg_fld) _reg_fld ## _SHFT + +#define SRNG_SM(_reg_fld, _val) \ + (((_val) << _SRNG_FS(_reg_fld)) & _SRNG_FM(_reg_fld)) + +#define SRNG_MS(_reg_fld, _val) \ + (((_val) & _SRNG_FM(_reg_fld)) >> _SRNG_FS(_reg_fld)) + +#define SRNG_MAX_SIZE_DWORDS \ + (SRNG_MS(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 0xffffffff)) + +/** + * HW ring configuration table to identify hardware ring attributes like + * register addresses, number of rings, ring entry size etc., for each type + * of SRNG ring. + * + * Currently there is just one HW ring table, but there could be multiple + * configurations in future based on HW variants from the same wifi3.0 family + * and hence need to be attached with hal_soc based on HW type + */ +#define HAL_SRNG_CONFIG(_hal_soc, _ring_type) \ + (&_hal_soc->hw_srng_table[_ring_type]) + +enum SRNG_REGISTERS { +DST_HP = 0, +DST_TP, +DST_ID, +DST_MISC, +DST_HP_ADDR_LSB, +DST_HP_ADDR_MSB, +DST_MSI1_BASE_LSB, +DST_MSI1_BASE_MSB, +DST_MSI1_DATA, +DST_BASE_LSB, +DST_BASE_MSB, +DST_PRODUCER_INT_SETUP, + +SRC_HP, +SRC_TP, +SRC_ID, +SRC_MISC, +SRC_TP_ADDR_LSB, +SRC_TP_ADDR_MSB, +SRC_MSI1_BASE_LSB, +SRC_MSI1_BASE_MSB, +SRC_MSI1_DATA, +SRC_BASE_LSB, +SRC_BASE_MSB, +SRC_CONSUMER_INT_SETUP_IX0, +SRC_CONSUMER_INT_SETUP_IX1, +}; + +/** + * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info + * HW structure + * + * @desc: Descriptor entry (from WBM_IDLE_LINK ring) + * @cookie: SW cookie for the buffer/descriptor + * @link_desc_paddr: Physical address of link descriptor entry + * + */ +static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie, + qdf_dma_addr_t link_desc_paddr) +{ + uint32_t *buf_addr = (uint32_t *)desc; + + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0, + link_desc_paddr & 0xffffffff); + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32, + (uint64_t)link_desc_paddr >> 32); + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER, + WBM_IDLE_DESC_LIST); + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE, + cookie); +} + +/** + * hal_get_reo_qdesc_size - Get size of reo queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @tid: TID number + * + */ +static inline +uint32_t hal_get_reo_qdesc_size(hal_soc_handle_t hal_soc_hdl, + uint32_t ba_window_size, int tid) +{ + /* Return descriptor size corresponding to window size of 2 since + * we set ba_window_size to 2 while setting up REO descriptors as + * a WAR to get 2k jump exception aggregates are received without + * a BA session. + */ + if (ba_window_size <= 1) { + if (tid != HAL_NON_QOS_TID) + return sizeof(struct rx_reo_queue) + + sizeof(struct rx_reo_queue_ext); + else + return sizeof(struct rx_reo_queue); + } + + if (ba_window_size <= 105) + return sizeof(struct rx_reo_queue) + + sizeof(struct rx_reo_queue_ext); + + if (ba_window_size <= 210) + return sizeof(struct rx_reo_queue) + + (2 * sizeof(struct rx_reo_queue_ext)); + + return sizeof(struct rx_reo_queue) + + (3 * sizeof(struct rx_reo_queue_ext)); +} + +#endif /* _HAL_HW_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_internal.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..6c796e3764255800676ef9d601b749a83ed16f74 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_internal.h @@ -0,0 +1,787 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_INTERNAL_H_ +#define _HAL_INTERNAL_H_ + +#include "qdf_types.h" +#include "qdf_atomic.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "pld_common.h" +#ifdef FEATURE_HAL_DELAYED_REG_WRITE +#include "qdf_defer.h" +#endif + +#define hal_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_HAL, params) +#define hal_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_HAL, params) +#define hal_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_HAL, params) +#define hal_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_HAL, params) +#define hal_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_HAL, params) + +#define hal_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_HAL, params) +#define hal_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_HAL, params) +#define hal_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_HAL, params) +#define hal_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_HAL, params) +#define hal_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HAL, params) + +#ifdef ENABLE_VERBOSE_DEBUG +extern bool is_hal_verbose_debug_enabled; +#define hal_verbose_debug(params...) \ + if (unlikely(is_hal_verbose_debug_enabled)) \ + do {\ + QDF_TRACE_DEBUG(QDF_MODULE_ID_HAL, params); \ + } while (0) +#define hal_verbose_hex_dump(params...) \ + if (unlikely(is_hal_verbose_debug_enabled)) \ + do {\ + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HAL, \ + QDF_TRACE_LEVEL_DEBUG, \ + params); \ + } while (0) +#else +#define hal_verbose_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_HAL, params) +#define hal_verbose_hex_dump(params...) \ + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HAL, QDF_TRACE_LEVEL_DEBUG, \ + params) +#endif + +/* + * dp_hal_soc - opaque handle for DP HAL soc + */ +struct hal_soc_handle; +typedef struct hal_soc_handle *hal_soc_handle_t; + +/* TBD: This should be movded to shared HW header file */ +enum hal_srng_ring_id { + /* UMAC rings */ + HAL_SRNG_REO2SW1 = 0, + HAL_SRNG_REO2SW2 = 1, + HAL_SRNG_REO2SW3 = 2, + HAL_SRNG_REO2SW4 = 3, + HAL_SRNG_REO2TCL = 4, + HAL_SRNG_SW2REO = 5, + /* 6-7 unused */ + HAL_SRNG_REO_CMD = 8, + HAL_SRNG_REO_STATUS = 9, + /* 10-15 unused */ + HAL_SRNG_SW2TCL1 = 16, + HAL_SRNG_SW2TCL2 = 17, + HAL_SRNG_SW2TCL3 = 18, + HAL_SRNG_SW2TCL4 = 19, /* FW2TCL ring */ + /* 20-23 unused */ + HAL_SRNG_SW2TCL_CMD = 24, + HAL_SRNG_TCL_STATUS = 25, + /* 26-31 unused */ + HAL_SRNG_CE_0_SRC = 32, + HAL_SRNG_CE_1_SRC = 33, + HAL_SRNG_CE_2_SRC = 34, + HAL_SRNG_CE_3_SRC = 35, + HAL_SRNG_CE_4_SRC = 36, + HAL_SRNG_CE_5_SRC = 37, + HAL_SRNG_CE_6_SRC = 38, + HAL_SRNG_CE_7_SRC = 39, + HAL_SRNG_CE_8_SRC = 40, + HAL_SRNG_CE_9_SRC = 41, + HAL_SRNG_CE_10_SRC = 42, + HAL_SRNG_CE_11_SRC = 43, + /* 44-55 unused */ + HAL_SRNG_CE_0_DST = 56, + HAL_SRNG_CE_1_DST = 57, + HAL_SRNG_CE_2_DST = 58, + HAL_SRNG_CE_3_DST = 59, + HAL_SRNG_CE_4_DST = 60, + HAL_SRNG_CE_5_DST = 61, + HAL_SRNG_CE_6_DST = 62, + HAL_SRNG_CE_7_DST = 63, + HAL_SRNG_CE_8_DST = 64, + HAL_SRNG_CE_9_DST = 65, + HAL_SRNG_CE_10_DST = 66, + HAL_SRNG_CE_11_DST = 67, + /* 68-79 unused */ + HAL_SRNG_CE_0_DST_STATUS = 80, + HAL_SRNG_CE_1_DST_STATUS = 81, + HAL_SRNG_CE_2_DST_STATUS = 82, + HAL_SRNG_CE_3_DST_STATUS = 83, + HAL_SRNG_CE_4_DST_STATUS = 84, + HAL_SRNG_CE_5_DST_STATUS = 85, + HAL_SRNG_CE_6_DST_STATUS = 86, + HAL_SRNG_CE_7_DST_STATUS = 87, + HAL_SRNG_CE_8_DST_STATUS = 88, + HAL_SRNG_CE_9_DST_STATUS = 89, + HAL_SRNG_CE_10_DST_STATUS = 90, + HAL_SRNG_CE_11_DST_STATUS = 91, + /* 92-103 unused */ + HAL_SRNG_WBM_IDLE_LINK = 104, + HAL_SRNG_WBM_SW_RELEASE = 105, + HAL_SRNG_WBM2SW0_RELEASE = 106, + HAL_SRNG_WBM2SW1_RELEASE = 107, + HAL_SRNG_WBM2SW2_RELEASE = 108, + HAL_SRNG_WBM2SW3_RELEASE = 109, + /* 110-127 unused */ + HAL_SRNG_UMAC_ID_END = 127, + /* LMAC rings - The following set will be replicated for each LMAC */ + HAL_SRNG_LMAC1_ID_START = 128, + HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 = HAL_SRNG_LMAC1_ID_START, +#ifdef IPA_OFFLOAD + HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 = (HAL_SRNG_LMAC1_ID_START + 1), + HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 = (HAL_SRNG_LMAC1_ID_START + 2), + HAL_SRNG_WMAC1_SW2RXDMA1_BUF = (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 + 1), +#else + HAL_SRNG_WMAC1_SW2RXDMA1_BUF = (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 1), +#endif + HAL_SRNG_WMAC1_SW2RXDMA2_BUF = (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 1), + HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF = (HAL_SRNG_WMAC1_SW2RXDMA2_BUF + 1), + HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF = + (HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF + 1), + HAL_SRNG_WMAC1_RXDMA2SW0 = (HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF + 1), + HAL_SRNG_WMAC1_RXDMA2SW1 = (HAL_SRNG_WMAC1_RXDMA2SW0 + 1), + HAL_SRNG_WMAC1_SW2RXDMA1_DESC = (HAL_SRNG_WMAC1_RXDMA2SW1 + 1), +#ifdef WLAN_FEATURE_CIF_CFR + HAL_SRNG_WIFI_POS_SRC_DMA_RING = (HAL_SRNG_WMAC1_SW2RXDMA1_DESC + 1), + HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING = (HAL_SRNG_WIFI_POS_SRC_DMA_RING + 1), +#else + HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING = (HAL_SRNG_WMAC1_SW2RXDMA1_DESC + 1), +#endif + /* -142 unused */ + HAL_SRNG_LMAC1_ID_END = 143 +}; + +#define HAL_RXDMA_MAX_RING_SIZE 0xFFFF +#define HAL_MAX_LMACS 3 +#define HAL_MAX_RINGS_PER_LMAC (HAL_SRNG_LMAC1_ID_END - HAL_SRNG_LMAC1_ID_START) +#define HAL_MAX_LMAC_RINGS (HAL_MAX_LMACS * HAL_MAX_RINGS_PER_LMAC) + +#define HAL_SRNG_ID_MAX (HAL_SRNG_UMAC_ID_END + HAL_MAX_LMAC_RINGS) + +enum hal_srng_dir { + HAL_SRNG_SRC_RING, + HAL_SRNG_DST_RING +}; + +/* Lock wrappers for SRNG */ +#define hal_srng_lock_t qdf_spinlock_t +#define SRNG_LOCK_INIT(_lock) qdf_spinlock_create(_lock) +#define SRNG_LOCK(_lock) qdf_spin_lock_bh(_lock) +#define SRNG_UNLOCK(_lock) qdf_spin_unlock_bh(_lock) +#define SRNG_LOCK_DESTROY(_lock) qdf_spinlock_destroy(_lock) + +struct hal_soc; + +/** + * dp_hal_ring - opaque handle for DP HAL SRNG + */ +struct hal_ring_handle; +typedef struct hal_ring_handle *hal_ring_handle_t; + +#define MAX_SRNG_REG_GROUPS 2 + +/* Hal Srng bit mask + * HAL_SRNG_FLUSH_EVENT: SRNG HP TP flush in case of link down + */ +#define HAL_SRNG_FLUSH_EVENT BIT(0) + +#ifdef FEATURE_HAL_DELAYED_REG_WRITE + +/** + * struct hal_reg_write_q_elem - delayed register write queue element + * @srng: hal_srng queued for a delayed write + * @addr: iomem address of the register + * @enqueue_val: register value at the time of delayed write enqueue + * @dequeue_val: register value at the time of delayed write dequeue + * @valid: whether this entry is valid or not + * @enqueue_time: enqueue time (qdf_log_timestamp) + * @work_scheduled_time: work scheduled time (qdf_log_timestamp) + * @dequeue_time: dequeue time (qdf_log_timestamp) + */ +struct hal_reg_write_q_elem { + struct hal_srng *srng; + void __iomem *addr; + uint32_t enqueue_val; + uint32_t dequeue_val; + uint8_t valid; + qdf_time_t enqueue_time; + qdf_time_t work_scheduled_time; + qdf_time_t dequeue_time; +}; + +/** + * struct hal_reg_write_srng_stats - srng stats to keep track of register writes + * @enqueues: writes enqueued to delayed work + * @dequeues: writes dequeued from delayed work (not written yet) + * @coalesces: writes not enqueued since srng is already queued up + * @direct: writes not enqueued and written to register directly + * @dequeue_delay: dequeue operation be delayed + */ +struct hal_reg_write_srng_stats { + uint32_t enqueues; + uint32_t dequeues; + uint32_t coalesces; + uint32_t direct; + uint32_t dequeue_delay; +}; + +/** + * enum hal_reg_sched_delay - ENUM for write sched delay histogram + * @REG_WRITE_SCHED_DELAY_SUB_100us: index for delay < 100us + * @REG_WRITE_SCHED_DELAY_SUB_1000us: index for delay < 1000us + * @REG_WRITE_SCHED_DELAY_SUB_5000us: index for delay < 5000us + * @REG_WRITE_SCHED_DELAY_GT_5000us: index for delay >= 5000us + * @REG_WRITE_SCHED_DELAY_HIST_MAX: Max value (nnsize of histogram array) + */ +enum hal_reg_sched_delay { + REG_WRITE_SCHED_DELAY_SUB_100us, + REG_WRITE_SCHED_DELAY_SUB_1000us, + REG_WRITE_SCHED_DELAY_SUB_5000us, + REG_WRITE_SCHED_DELAY_GT_5000us, + REG_WRITE_SCHED_DELAY_HIST_MAX, +}; + +/** + * struct hal_reg_write_soc_stats - soc stats to keep track of register writes + * @enqueues: writes enqueued to delayed work + * @dequeues: writes dequeued from delayed work (not written yet) + * @coalesces: writes not enqueued since srng is already queued up + * @direct: writes not enqueud and writted to register directly + * @prevent_l1_fails: prevent l1 API failed + * @q_depth: current queue depth in delayed register write queue + * @max_q_depth: maximum queue for delayed register write queue + * @sched_delay: = kernel work sched delay + bus wakeup delay, histogram + * @dequeue_delay: dequeue operation be delayed + */ +struct hal_reg_write_soc_stats { + qdf_atomic_t enqueues; + uint32_t dequeues; + qdf_atomic_t coalesces; + qdf_atomic_t direct; + uint32_t prevent_l1_fails; + qdf_atomic_t q_depth; + uint32_t max_q_depth; + uint32_t sched_delay[REG_WRITE_SCHED_DELAY_HIST_MAX]; + uint32_t dequeue_delay; +}; +#endif + +/* Common SRNG ring structure for source and destination rings */ +struct hal_srng { + /* Unique SRNG ring ID */ + uint8_t ring_id; + + /* Ring initialization done */ + uint8_t initialized; + + /* Interrupt/MSI value assigned to this ring */ + int irq; + + /* Physical base address of the ring */ + qdf_dma_addr_t ring_base_paddr; + + /* Virtual base address of the ring */ + uint32_t *ring_base_vaddr; + + /* Number of entries in ring */ + uint32_t num_entries; + + /* Ring size */ + uint32_t ring_size; + + /* Ring size mask */ + uint32_t ring_size_mask; + + /* Size of ring entry */ + uint32_t entry_size; + + /* Interrupt timer threshold – in micro seconds */ + uint32_t intr_timer_thres_us; + + /* Interrupt batch counter threshold – in number of ring entries */ + uint32_t intr_batch_cntr_thres_entries; + + /* Applicable only for CE dest ring */ + uint32_t prefetch_timer; + + /* MSI Address */ + qdf_dma_addr_t msi_addr; + + /* MSI data */ + uint32_t msi_data; + + /* Misc flags */ + uint32_t flags; + + /* Lock for serializing ring index updates */ + hal_srng_lock_t lock; + + /* Start offset of SRNG register groups for this ring + * TBD: See if this is required - register address can be derived + * from ring ID + */ + void *hwreg_base[MAX_SRNG_REG_GROUPS]; + + /* Source or Destination ring */ + enum hal_srng_dir ring_dir; + + union { + struct { + /* SW tail pointer */ + uint32_t tp; + + /* Shadow head pointer location to be updated by HW */ + uint32_t *hp_addr; + + /* Cached head pointer */ + uint32_t cached_hp; + + /* Tail pointer location to be updated by SW – This + * will be a register address and need not be + * accessed through SW structure */ + uint32_t *tp_addr; + + /* Current SW loop cnt */ + uint32_t loop_cnt; + + /* max transfer size */ + uint16_t max_buffer_length; + } dst_ring; + + struct { + /* SW head pointer */ + uint32_t hp; + + /* SW reap head pointer */ + uint32_t reap_hp; + + /* Shadow tail pointer location to be updated by HW */ + uint32_t *tp_addr; + + /* Cached tail pointer */ + uint32_t cached_tp; + + /* Head pointer location to be updated by SW – This + * will be a register address and need not be accessed + * through SW structure */ + uint32_t *hp_addr; + + /* Low threshold – in number of ring entries */ + uint32_t low_threshold; + } src_ring; + } u; + + struct hal_soc *hal_soc; + + /* Number of times hp/tp updated in runtime resume */ + uint32_t flush_count; + /* hal srng event flag*/ + unsigned long srng_event; + /* last flushed time stamp */ + uint64_t last_flush_ts; +#ifdef FEATURE_HAL_DELAYED_REG_WRITE + /* flag to indicate whether srng is already queued for delayed write */ + uint8_t reg_write_in_progress; + /* last dequeue elem time stamp */ + qdf_time_t last_dequeue_time; + + /* srng specific delayed write stats */ + struct hal_reg_write_srng_stats wstats; +#endif +}; + +/* HW SRNG configuration table */ +struct hal_hw_srng_config { + int start_ring_id; + uint16_t max_rings; + uint16_t entry_size; + uint32_t reg_start[MAX_SRNG_REG_GROUPS]; + uint16_t reg_size[MAX_SRNG_REG_GROUPS]; + uint8_t lmac_ring; + enum hal_srng_dir ring_dir; + uint32_t max_size; +}; + +#define MAX_SHADOW_REGISTERS 36 +#define MAX_GENERIC_SHADOW_REG 5 + +/** + * struct shadow_reg_config - Hal soc structure that contains + * the list of generic shadow registers + * @target_register: target reg offset + * @shadow_config_index: shadow config index in shadow config + * list sent to FW + * @va: virtual addr of shadow reg + * + * This structure holds the generic registers that are mapped to + * the shadow region and holds the mapping of the target + * register offset to shadow config index provided to FW during + * init + */ +struct shadow_reg_config { + uint32_t target_register; + int shadow_config_index; + uint64_t va; +}; + +/* REO parameters to be passed to hal_reo_setup */ +struct hal_reo_params { + /** rx hash steering enabled or disabled */ + bool rx_hash_enabled; + /** reo remap 1 register */ + uint32_t remap1; + /** reo remap 2 register */ + uint32_t remap2; + /** fragment destination ring */ + uint8_t frag_dst_ring; + /** padding */ + uint8_t padding[3]; +}; + +struct hal_hw_txrx_ops { + + /* init and setup */ + void (*hal_srng_dst_hw_init)(struct hal_soc *hal, + struct hal_srng *srng); + void (*hal_srng_src_hw_init)(struct hal_soc *hal, + struct hal_srng *srng); + void (*hal_get_hw_hptp)(struct hal_soc *hal, + hal_ring_handle_t hal_ring_hdl, + uint32_t *headp, uint32_t *tailp, + uint8_t ring_type); + void (*hal_reo_setup)(struct hal_soc *hal_soc, void *reoparams); + void (*hal_setup_link_idle_list)( + struct hal_soc *hal_soc, + qdf_dma_addr_t scatter_bufs_base_paddr[], + void *scatter_bufs_base_vaddr[], + uint32_t num_scatter_bufs, + uint32_t scatter_buf_size, + uint32_t last_buf_end_offset, + uint32_t num_entries); + qdf_iomem_t (*hal_get_window_address)(struct hal_soc *hal_soc, + qdf_iomem_t addr); + void (*hal_reo_set_err_dst_remap)(void *hal_soc); + + /* tx */ + void (*hal_tx_desc_set_dscp_tid_table_id)(void *desc, uint8_t id); + void (*hal_tx_set_dscp_tid_map)(struct hal_soc *hal_soc, uint8_t *map, + uint8_t id); + void (*hal_tx_update_dscp_tid)(struct hal_soc *hal_soc, uint8_t tid, + uint8_t id, + uint8_t dscp); + void (*hal_tx_desc_set_lmac_id)(void *desc, uint8_t lmac_id); + void (*hal_tx_desc_set_buf_addr)(void *desc, dma_addr_t paddr, + uint8_t pool_id, uint32_t desc_id, uint8_t type); + void (*hal_tx_desc_set_search_type)(void *desc, uint8_t search_type); + void (*hal_tx_desc_set_search_index)(void *desc, uint32_t search_index); + void (*hal_tx_desc_set_cache_set_num)(void *desc, uint8_t search_index); + void (*hal_tx_comp_get_status)(void *desc, void *ts, + struct hal_soc *hal); + uint8_t (*hal_tx_comp_get_release_reason)(void *hal_desc); + uint8_t (*hal_get_wbm_internal_error)(void *hal_desc); + void (*hal_tx_desc_set_mesh_en)(void *desc, uint8_t en); + + /* rx */ + uint32_t (*hal_rx_msdu_start_nss_get)(uint8_t *); + void (*hal_rx_mon_hw_desc_get_mpdu_status)(void *hw_desc_addr, + struct mon_rx_status *rs); + uint8_t (*hal_rx_get_tlv)(void *rx_tlv); + void (*hal_rx_proc_phyrx_other_receive_info_tlv)(void *rx_tlv_hdr, + void *ppdu_info_handle); + void (*hal_rx_dump_msdu_start_tlv)(void *msdu_start, uint8_t dbg_level); + void (*hal_rx_dump_msdu_end_tlv)(void *msdu_end, + uint8_t dbg_level); + uint32_t (*hal_get_link_desc_size)(void); + uint32_t (*hal_rx_mpdu_start_tid_get)(uint8_t *buf); + uint32_t (*hal_rx_msdu_start_reception_type_get)(uint8_t *buf); + uint16_t (*hal_rx_msdu_end_da_idx_get)(uint8_t *buf); + void* (*hal_rx_msdu_desc_info_get_ptr)(void *msdu_details_ptr); + void* (*hal_rx_link_desc_msdu0_ptr)(void *msdu_link_ptr); + void (*hal_reo_status_get_header)(uint32_t *d, int b, void *h); + uint32_t (*hal_rx_status_get_tlv_info)(void *rx_tlv_hdr, + void *ppdu_info, + hal_soc_handle_t hal_soc_hdl, + qdf_nbuf_t nbuf); + void (*hal_rx_wbm_err_info_get)(void *wbm_desc, + void *wbm_er_info); + void (*hal_rx_dump_mpdu_start_tlv)(void *mpdustart, + uint8_t dbg_level); + + void (*hal_tx_set_pcp_tid_map)(struct hal_soc *hal_soc, uint8_t *map); + void (*hal_tx_update_pcp_tid_map)(struct hal_soc *hal_soc, uint8_t pcp, + uint8_t id); + void (*hal_tx_set_tidmap_prty)(struct hal_soc *hal_soc, uint8_t prio); + uint8_t (*hal_rx_get_rx_fragment_number)(uint8_t *buf); + uint8_t (*hal_rx_msdu_end_da_is_mcbc_get)(uint8_t *buf); + uint8_t (*hal_rx_msdu_end_sa_is_valid_get)(uint8_t *buf); + uint16_t (*hal_rx_msdu_end_sa_idx_get)(uint8_t *buf); + uint32_t (*hal_rx_desc_is_first_msdu)(void *hw_desc_addr); + uint32_t (*hal_rx_msdu_end_l3_hdr_padding_get)(uint8_t *buf); + uint32_t (*hal_rx_encryption_info_valid)(uint8_t *buf); + void (*hal_rx_print_pn)(uint8_t *buf); + uint8_t (*hal_rx_msdu_end_first_msdu_get)(uint8_t *buf); + uint8_t (*hal_rx_msdu_end_da_is_valid_get)(uint8_t *buf); + uint8_t (*hal_rx_msdu_end_last_msdu_get)(uint8_t *buf); + bool (*hal_rx_get_mpdu_mac_ad4_valid)(uint8_t *buf); + uint32_t (*hal_rx_mpdu_start_sw_peer_id_get)(uint8_t *buf); + uint32_t (*hal_rx_mpdu_get_to_ds)(uint8_t *buf); + uint32_t (*hal_rx_mpdu_get_fr_ds)(uint8_t *buf); + uint8_t (*hal_rx_get_mpdu_frame_control_valid)(uint8_t *buf); + QDF_STATUS + (*hal_rx_mpdu_get_addr1)(uint8_t *buf, uint8_t *mac_addr); + QDF_STATUS + (*hal_rx_mpdu_get_addr2)(uint8_t *buf, uint8_t *mac_addr); + QDF_STATUS + (*hal_rx_mpdu_get_addr3)(uint8_t *buf, uint8_t *mac_addr); + QDF_STATUS + (*hal_rx_mpdu_get_addr4)(uint8_t *buf, uint8_t *mac_addr); + uint8_t (*hal_rx_get_mpdu_sequence_control_valid)(uint8_t *buf); + bool (*hal_rx_is_unicast)(uint8_t *buf); + uint32_t (*hal_rx_tid_get)(hal_soc_handle_t hal_soc_hdl, uint8_t *buf); + uint32_t (*hal_rx_hw_desc_get_ppduid_get)(void *hw_desc_addr); + uint32_t (*hal_rx_mpdu_start_mpdu_qos_control_valid_get)(uint8_t *buf); + uint32_t (*hal_rx_msdu_end_sa_sw_peer_id_get)(uint8_t *buf); + void * (*hal_rx_msdu0_buffer_addr_lsb)(void *link_desc_addr); + void * (*hal_rx_msdu_desc_info_ptr_get)(void *msdu0); + void * (*hal_ent_mpdu_desc_info)(void *hw_addr); + void * (*hal_dst_mpdu_desc_info)(void *hw_addr); + uint8_t (*hal_rx_get_fc_valid)(uint8_t *buf); + uint8_t (*hal_rx_get_to_ds_flag)(uint8_t *buf); + uint8_t (*hal_rx_get_mac_addr2_valid)(uint8_t *buf); + uint8_t (*hal_rx_get_filter_category)(uint8_t *buf); + uint32_t (*hal_rx_get_ppdu_id)(uint8_t *buf); + void (*hal_reo_config)(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params); + uint32_t (*hal_rx_msdu_flow_idx_get)(uint8_t *buf); + bool (*hal_rx_msdu_flow_idx_invalid)(uint8_t *buf); + bool (*hal_rx_msdu_flow_idx_timeout)(uint8_t *buf); + uint32_t (*hal_rx_msdu_fse_metadata_get)(uint8_t *buf); + uint16_t (*hal_rx_msdu_cce_metadata_get)(uint8_t *buf); + void + (*hal_rx_msdu_get_flow_params)( + uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index); + uint16_t (*hal_rx_tlv_get_tcp_chksum)(uint8_t *buf); + uint16_t (*hal_rx_get_rx_sequence)(uint8_t *buf); + void (*hal_rx_get_bb_info)(void *rx_tlv, void *ppdu_info_handle); + void (*hal_rx_get_rtt_info)(void *rx_tlv, void *ppdu_info_handle); + void (*hal_rx_msdu_packet_metadata_get)(uint8_t *buf, + void *msdu_pkt_metadata); + uint16_t (*hal_rx_get_fisa_cumulative_l4_checksum)(uint8_t *buf); + uint16_t (*hal_rx_get_fisa_cumulative_ip_length)(uint8_t *buf); + bool (*hal_rx_get_udp_proto)(uint8_t *buf); + bool (*hal_rx_get_fisa_flow_agg_continuation)(uint8_t *buf); + uint8_t (*hal_rx_get_fisa_flow_agg_count)(uint8_t *buf); + bool (*hal_rx_get_fisa_timeout)(uint8_t *buf); + void (*hal_rx_msdu_get_reo_destination_indication)(uint8_t *buf, + uint32_t *reo_destination_indication); +}; + +/** + * struct hal_soc_stats - Hal layer stats + * @reg_write_fail: number of failed register writes + * @wstats: delayed register write stats + * @shadow_reg_write_fail: shadow reg write failure stats + * @shadow_reg_write_succ: shadow reg write success stats + * + * This structure holds all the statistics at HAL layer. + */ +struct hal_soc_stats { + uint32_t reg_write_fail; +#ifdef FEATURE_HAL_DELAYED_REG_WRITE + struct hal_reg_write_soc_stats wstats; +#endif +#ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE + uint32_t shadow_reg_write_fail; + uint32_t shadow_reg_write_succ; +#endif +}; + +#ifdef ENABLE_HAL_REG_WR_HISTORY +/* The history size should always be a power of 2 */ +#define HAL_REG_WRITE_HIST_SIZE 8 + +/** + * struct hal_reg_write_fail_entry - Record of + * register write which failed. + * @timestamp: timestamp of reg write failure + * @reg_offset: offset of register where the write failed + * @write_val: the value which was to be written + * @read_val: the value read back from the register after write + */ +struct hal_reg_write_fail_entry { + uint64_t timestamp; + uint32_t reg_offset; + uint32_t write_val; + uint32_t read_val; +}; + +/** + * struct hal_reg_write_fail_history - Hal layer history + * of all the register write failures. + * @index: index to add the new record + * @record: array of all the records in history + * + * This structure holds the history of register write + * failures at HAL layer. + */ +struct hal_reg_write_fail_history { + qdf_atomic_t index; + struct hal_reg_write_fail_entry record[HAL_REG_WRITE_HIST_SIZE]; +}; +#endif + +/** + * struct hal_soc - HAL context to be used to access SRNG APIs + * (currently used by data path and + * transport (CE) modules) + * @list_shadow_reg_config: array of generic regs mapped to + * shadow regs + * @num_generic_shadow_regs_configured: number of generic regs + * mapped to shadow regs + */ +struct hal_soc { + /* HIF handle to access HW registers */ + struct hif_opaque_softc *hif_handle; + + /* QDF device handle */ + qdf_device_t qdf_dev; + + /* Device base address */ + void *dev_base_addr; + + /* HAL internal state for all SRNG rings. + * TODO: See if this is required + */ + struct hal_srng srng_list[HAL_SRNG_ID_MAX]; + + /* Remote pointer memory for HW/FW updates */ + uint32_t *shadow_rdptr_mem_vaddr; + qdf_dma_addr_t shadow_rdptr_mem_paddr; + + /* Shared memory for ring pointer updates from host to FW */ + uint32_t *shadow_wrptr_mem_vaddr; + qdf_dma_addr_t shadow_wrptr_mem_paddr; + + /* REO blocking resource index */ + uint8_t reo_res_bitmap; + uint8_t index; + uint32_t target_type; + + /* shadow register configuration */ + struct pld_shadow_reg_v2_cfg shadow_config[MAX_SHADOW_REGISTERS]; + int num_shadow_registers_configured; + bool use_register_windowing; + uint32_t register_window; + qdf_spinlock_t register_access_lock; + + /* Static window map configuration for multiple window write*/ + bool static_window_map; + + /* srng table */ + struct hal_hw_srng_config *hw_srng_table; + int32_t *hal_hw_reg_offset; + struct hal_hw_txrx_ops *ops; + + /* Indicate srngs initialization */ + bool init_phase; + /* Hal level stats */ + struct hal_soc_stats stats; +#ifdef ENABLE_HAL_REG_WR_HISTORY + struct hal_reg_write_fail_history *reg_wr_fail_hist; +#endif +#ifdef FEATURE_HAL_DELAYED_REG_WRITE + /* queue(array) to hold register writes */ + struct hal_reg_write_q_elem *reg_write_queue; + /* delayed work to be queued into workqueue */ + qdf_work_t reg_write_work; + /* workqueue for delayed register writes */ + qdf_workqueue_t *reg_write_wq; + /* write index used by caller to enqueue delayed work */ + qdf_atomic_t write_idx; + /* read index used by worker thread to dequeue/write registers */ + uint32_t read_idx; +#endif + qdf_atomic_t active_work_cnt; +#ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE + struct shadow_reg_config + list_shadow_reg_config[MAX_GENERIC_SHADOW_REG]; + int num_generic_shadow_regs_configured; +#endif +}; + +#ifdef FEATURE_HAL_DELAYED_REG_WRITE +/** + * hal_delayed_reg_write() - delayed regiter write + * @hal_soc: HAL soc handle + * @srng: hal srng + * @addr: iomem address + * @value: value to be written + * + * Return: none + */ +void hal_delayed_reg_write(struct hal_soc *hal_soc, + struct hal_srng *srng, + void __iomem *addr, + uint32_t value); +#endif + +void hal_qca6750_attach(struct hal_soc *hal_soc); +void hal_qca6490_attach(struct hal_soc *hal_soc); +void hal_qca6390_attach(struct hal_soc *hal_soc); +void hal_qca6290_attach(struct hal_soc *hal_soc); +void hal_qca8074_attach(struct hal_soc *hal_soc); + +/* + * hal_soc_to_dp_hal_roc - API to convert hal_soc to opaque + * dp_hal_soc handle type + * @hal_soc - hal_soc type + * + * Return: hal_soc_handle_t type + */ +static inline +hal_soc_handle_t hal_soc_to_hal_soc_handle(struct hal_soc *hal_soc) +{ + return (hal_soc_handle_t)hal_soc; +} + +/* + * hal_srng_to_hal_ring_handle - API to convert hal_srng to opaque + * dp_hal_ring handle type + * @hal_srng - hal_srng type + * + * Return: hal_ring_handle_t type + */ +static inline +hal_ring_handle_t hal_srng_to_hal_ring_handle(struct hal_srng *hal_srng) +{ + return (hal_ring_handle_t)hal_srng; +} + +/* + * hal_ring_handle_to_hal_srng - API to convert dp_hal_ring to hal_srng handle + * @hal_ring - hal_ring_handle_t type + * + * Return: hal_srng pointer type + */ +static inline +struct hal_srng *hal_ring_handle_to_hal_srng(hal_ring_handle_t hal_ring) +{ + return (struct hal_srng *)hal_ring; +} +#endif /* _HAL_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c new file mode 100644 index 0000000000000000000000000000000000000000..6a0b155a022b4b1f7f167134c85d7bb190ce4ec8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c @@ -0,0 +1,1412 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_api.h" +#include "hal_hw_headers.h" +#include "hal_reo.h" +#include "hal_tx.h" +#include "hal_rx.h" +#include "qdf_module.h" + +/* TODO: See if the following definition is available in HW headers */ +#define HAL_REO_OWNED 4 +#define HAL_REO_QUEUE_DESC 8 +#define HAL_REO_QUEUE_EXT_DESC 9 + +/* TODO: Using associated link desc counter 1 for Rx. Check with FW on + * how these counters are assigned + */ +#define HAL_RX_LINK_DESC_CNTR 1 +/* TODO: Following definition should be from HW headers */ +#define HAL_DESC_REO_OWNED 4 + +/** + * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro + * @owner - owner info + * @buffer_type - buffer type + */ +static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner, + uint32_t buffer_type) +{ + HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER, + owner); + HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE, + buffer_type); +} + +#ifndef TID_TO_WME_AC +#define WME_AC_BE 0 /* best effort */ +#define WME_AC_BK 1 /* background */ +#define WME_AC_VI 2 /* video */ +#define WME_AC_VO 3 /* voice */ + +#define TID_TO_WME_AC(_tid) ( \ + (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) +#endif +#define HAL_NON_QOS_TID 16 + +#ifdef HAL_DISABLE_NON_BA_2K_JUMP_ERROR +static inline uint32_t hal_update_non_ba_win_size(int tid, + uint32_t ba_window_size) +{ + return ba_window_size; +} +#else +static inline uint32_t hal_update_non_ba_win_size(int tid, + uint32_t ba_window_size) +{ + if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID)) + ba_window_size++; + + return ba_window_size; +} +#endif + +/** + * hal_reo_qdesc_setup - Setup HW REO queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory + * @hw_qdesc_paddr: Physical address of REO queue descriptor memory + * @tid: TID + * + */ +void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid, + uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, + qdf_dma_addr_t hw_qdesc_paddr, + int pn_type) +{ + uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr; + uint32_t *reo_queue_ext_desc; + uint32_t reg_val; + uint32_t pn_enable; + uint32_t pn_size = 0; + + qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue)); + + hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0, + RESERVED_0A, 0xDDBEEF); + + /* This a just a SW meta data and will be copied to REO destination + * descriptors indicated by hardware. + * TODO: Setting TID in this field. See if we should set something else. + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1, + RECEIVE_QUEUE_NUMBER, tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + VLD, 1); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR); + + /* + * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0 + */ + + reg_val = TID_TO_WME_AC(tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val); + + if (ba_window_size < 1) + ba_window_size = 1; + + /* WAR to get 2k exception in Non BA case. + * Setting window size to 2 to get 2k jump exception + * when we receive aggregates in Non BA case + */ + ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size); + + /* Set RTY bit for non-BA case. Duplicate detection is currently not + * done by HW in non-BA case if RTY bit is not set. + * TODO: This is a temporary War and should be removed once HW fix is + * made to check and discard duplicates even if RTY bit is not set. + */ + if (ba_window_size == 1) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1); + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE, + ba_window_size - 1); + + switch (pn_type) { + case HAL_PN_WPA: + pn_enable = 1; + pn_size = PN_SIZE_48; + break; + case HAL_PN_WAPI_EVEN: + case HAL_PN_WAPI_UNEVEN: + pn_enable = 1; + pn_size = PN_SIZE_128; + break; + default: + pn_enable = 0; + break; + } + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED, + pn_enable); + + if (pn_type == HAL_PN_WAPI_EVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + PN_SHALL_BE_EVEN, 1); + else if (pn_type == HAL_PN_WAPI_UNEVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + PN_SHALL_BE_UNEVEN, 1); + + /* + * TODO: Need to check if PN handling in SW needs to be enabled + * So far this is not a requirement + */ + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE, + pn_size); + + /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set + * based on BA window size and/or AMPDU capabilities + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + IGNORE_AMPDU_FLAG, 1); + + if (start_seq <= 0xfff) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN, + start_seq); + + /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA, + * but REO is not delivering packets if we set it to 1. Need to enable + * this once the issue is resolved + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0); + + /* TODO: Check if we should set start PN for WAPI */ + +#ifdef notyet + /* Setup first queue extension if BA window size is more than 1 */ + if (ba_window_size > 1) { + reo_queue_ext_desc = + (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) + + 1); + qdf_mem_zero(reo_queue_ext_desc, + sizeof(struct rx_reo_queue_ext)); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + } + /* Setup second queue extension if BA window size is more than 105 */ + if (ba_window_size > 105) { + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, + sizeof(struct rx_reo_queue_ext)); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + } + /* Setup third queue extension if BA window size is more than 210 */ + if (ba_window_size > 210) { + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, + sizeof(struct rx_reo_queue_ext)); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + } +#else + /* TODO: HW queue descriptors are currently allocated for max BA + * window size for all QOS TIDs so that same descriptor can be used + * later when ADDBA request is recevied. This should be changed to + * allocate HW queue descriptors based on BA window size being + * negotiated (0 for non BA cases), and reallocate when BA window + * size changes and also send WMI message to FW to change the REO + * queue descriptor in Rx peer entry as part of dp_rx_tid_update. + */ + if (tid != HAL_NON_QOS_TID) { + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue *)reo_queue_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, 3 * + sizeof(struct rx_reo_queue_ext)); + /* Initialize first reo queue extension descriptor */ + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF); + /* Initialize second reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF); + /* Initialize third reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF); + } +#endif +} +qdf_export_symbol(hal_reo_qdesc_setup); + +/** + * hal_get_ba_aging_timeout - Get BA Aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * @value: window size to get + */ +void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t *value) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + switch (ac) { + case WME_AC_BE: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + case WME_AC_BK: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + case WME_AC_VI: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + case WME_AC_VO: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid AC: %d\n", ac); + } +} + +qdf_export_symbol(hal_get_ba_aging_timeout); + +/** + * hal_set_ba_aging_timeout - Set BA Aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice + * @value: Input value to set + */ +void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t value) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + switch (ac) { + case WME_AC_BE: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + case WME_AC_BK: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + case WME_AC_VI: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + case WME_AC_VO: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid AC: %d\n", ac); + } +} + +qdf_export_symbol(hal_set_ba_aging_timeout); + +#define BLOCK_RES_MASK 0xF +static inline uint8_t hal_find_one_bit(uint8_t x) +{ + uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK; + uint8_t pos; + + for (pos = 0; y; y >>= 1) + pos++; + + return pos-1; +} + +static inline uint8_t hal_find_zero_bit(uint8_t x) +{ + uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK; + uint8_t pos; + + for (pos = 0; y; y >>= 1) + pos++; + + return pos-1; +} + +inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc, + enum hal_reo_cmd_type type, + uint32_t paddr_lo, + uint8_t paddr_hi) +{ + switch (type) { + case CMD_GET_QUEUE_STATS: + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_QUEUE: + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1, + FLUSH_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + FLUSH_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_CACHE: + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1, + FLUSH_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FLUSH_ADDR_39_32, paddr_hi); + break; + case CMD_UPDATE_RX_REO_QUEUE: + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid REO command type", __func__); + break; + } +} + +inline int hal_reo_cmd_queue_stats(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) + +{ + uint32_t *reo_desc, val; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E, + sizeof(struct reo_get_queue_stats)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_get_queue_stats) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS, + cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS, + cmd->u.stats_params.clear); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_queue_stats); + +inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E, + sizeof(struct reo_flush_queue)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_queue) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH, + cmd->u.fl_queue_params.block_use_after_flush); + + if (cmd->u.fl_queue_params.block_use_after_flush) { + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index); + } + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_flush_queue); + +inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_reo_cmd_flush_cache_params *cp; + uint8_t index = 0; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + cp = &cmd->u.fl_cache_params; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + + /* We need a cache block resource for this operation, and REO HW has + * only 4 such blocking resources. These resources are managed using + * reo_res_bitmap, and we return failure if none is available. + */ + if (cp->block_use_after_flush) { + index = hal_find_zero_bit(hal_soc->reo_res_bitmap); + if (index > 3) { + qdf_print("%s, No blocking resource available!", + __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + hal_soc->index = index; + } + + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + hal_srng_access_end(hal_soc, hal_ring_hdl); + hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl)); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E, + sizeof(struct reo_flush_cache)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_cache) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue); + + /* set it to 0 for now */ + cp->rel_block_index = 0; + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index); + + if (cp->block_use_after_flush) { + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + CACHE_BLOCK_RESOURCE_INDEX, index); + } + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE, + cp->flush_entire_cache); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_flush_cache); + +inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) + +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + uint8_t index = 0; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + index = hal_find_one_bit(hal_soc->reo_res_bitmap); + if (index > 3) { + hal_srng_access_end(hal_soc, hal_ring_hdl); + qdf_print("%s: No blocking resource to unblock!", + __func__); + return -EBUSY; + } + } + + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E, + sizeof(struct reo_unblock_cache)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_unblock_cache) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, + UNBLOCK_TYPE, cmd->u.unblk_cache_params.type); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, + CACHE_BLOCK_RESOURCE_INDEX, + cmd->u.unblk_cache_params.index); + } + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_unblock_cache); + +inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E, + sizeof(struct reo_flush_timeout_list)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_timeout_list) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST, + cmd->u.fl_tim_list_params.ac_list); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, + MINIMUM_RELEASE_DESC_COUNT, + cmd->u.fl_tim_list_params.min_rel_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, + MINIMUM_FORWARD_BUF_COUNT, + cmd->u.fl_tim_list_params.min_fwd_buf); + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_flush_timeout_list); + +inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + struct hal_reo_cmd_update_queue_params *p; + + p = &cmd->u.upd_queue_params; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E, + sizeof(struct reo_update_rx_reo_queue)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_update_rx_reo_queue) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD, + p->update_vld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->update_assoc_link_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SOFT_REORDER_ENABLE, + p->update_soft_reorder_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_AC, p->update_ac); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BAR, p->update_bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BAR, p->update_bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_RTY, p->update_rty); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_CHK_2K_MODE, p->update_chk_2k_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_OOR_MODE, p->update_oor_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SIZE, p->update_pn_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SVLD, p->update_svld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SSN, p->update_ssn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SEQ_2K_ERROR_DETECTED_FLAG, + p->update_seq_2k_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_VALID, p->update_pn_valid); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN, p->update_pn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + RECEIVE_QUEUE_NUMBER, p->rx_queue_num); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + VLD, p->vld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->assoc_link_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + SOFT_REORDER_ENABLE, p->soft_reorder_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + BAR, p->bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + CHK_2K_MODE, p->chk_2k_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + RTY, p->rty); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + OOR_MODE, p->oor_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_CHECK_NEEDED, p->pn_check_needed); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_SHALL_BE_EVEN, p->pn_even); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_SHALL_BE_UNEVEN, p->pn_uneven); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_HANDLING_ENABLE, p->pn_hand_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + IGNORE_AMPDU_FLAG, p->ignore_ampdu); + + if (p->ba_window_size < 1) + p->ba_window_size = 1; + /* + * WAR to get 2k exception in Non BA case. + * Setting window size to 2 to get 2k jump exception + * when we receive aggregates in Non BA case + */ + if (p->ba_window_size == 1) + p->ba_window_size++; + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + BA_WINDOW_SIZE, p->ba_window_size - 1); + + if (p->pn_size == 24) + p->pn_size = PN_SIZE_24; + else if (p->pn_size == 48) + p->pn_size = PN_SIZE_48; + else if (p->pn_size == 128) + p->pn_size = PN_SIZE_128; + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + PN_SIZE, p->pn_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SVLD, p->svld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SSN, p->ssn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + PN_ERROR_DETECTED_FLAG, p->pn_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5, + PN_31_0, p->pn_31_0); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6, + PN_63_32, p->pn_63_32); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7, + PN_95_64, p->pn_95_64); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8, + PN_127_96, p->pn_127_96); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_update_rx_queue); + +inline void +hal_reo_queue_stats_status(uint32_t *reo_desc, + struct hal_reo_queue_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV, + &(st->header), hal_soc); + + /* SSN */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)]; + st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val); + + /* current index */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, + CURRENT_INDEX)]; + st->curr_idx = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, + CURRENT_INDEX, val); + + /* PN bits */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3, + PN_31_0)]; + st->pn_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3, + PN_31_0, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4, + PN_63_32)]; + st->pn_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4, + PN_63_32, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5, + PN_95_64)]; + st->pn_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5, + PN_95_64, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6, + PN_127_96)]; + st->pn_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6, + PN_127_96, val); + + /* timestamps */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7, + LAST_RX_ENQUEUE_TIMESTAMP)]; + st->last_rx_enq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7, + LAST_RX_ENQUEUE_TIMESTAMP, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8, + LAST_RX_DEQUEUE_TIMESTAMP)]; + st->last_rx_deq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8, + LAST_RX_DEQUEUE_TIMESTAMP, val); + + /* rx bitmap */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9, + RX_BITMAP_31_0)]; + st->rx_bitmap_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9, + RX_BITMAP_31_0, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10, + RX_BITMAP_63_32)]; + st->rx_bitmap_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10, + RX_BITMAP_63_32, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11, + RX_BITMAP_95_64)]; + st->rx_bitmap_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11, + RX_BITMAP_95_64, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12, + RX_BITMAP_127_96)]; + st->rx_bitmap_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12, + RX_BITMAP_127_96, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13, + RX_BITMAP_159_128)]; + st->rx_bitmap_159_128 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13, + RX_BITMAP_159_128, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14, + RX_BITMAP_191_160)]; + st->rx_bitmap_191_160 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14, + RX_BITMAP_191_160, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15, + RX_BITMAP_223_192)]; + st->rx_bitmap_223_192 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15, + RX_BITMAP_223_192, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16, + RX_BITMAP_255_224)]; + st->rx_bitmap_255_224 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16, + RX_BITMAP_255_224, val); + + /* various counts */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MPDU_COUNT)]; + st->curr_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MSDU_COUNT)]; + st->curr_msdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MSDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + TIMEOUT_COUNT)]; + st->fwd_timeout_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + TIMEOUT_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + FORWARD_DUE_TO_BAR_COUNT)]; + st->fwd_bar_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + FORWARD_DUE_TO_BAR_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + DUPLICATE_COUNT)]; + st->dup_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + DUPLICATE_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, + FRAMES_IN_ORDER_COUNT)]; + st->frms_in_order_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, + FRAMES_IN_ORDER_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, + BAR_RECEIVED_COUNT)]; + st->bar_rcvd_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, + BAR_RECEIVED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20, + MPDU_FRAMES_PROCESSED_COUNT)]; + st->mpdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20, + MPDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21, + MSDU_FRAMES_PROCESSED_COUNT)]; + st->msdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21, + MSDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22, + TOTAL_PROCESSED_BYTE_COUNT)]; + st->total_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22, + TOTAL_PROCESSED_BYTE_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + LATE_RECEIVE_MPDU_COUNT)]; + st->late_recv_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + LATE_RECEIVE_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + WINDOW_JUMP_2K)]; + st->win_jump_2k = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + WINDOW_JUMP_2K, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + HOLE_COUNT)]; + st->hole_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + HOLE_COUNT, val); +} +qdf_export_symbol(hal_reo_queue_stats_status); + +inline void +hal_reo_flush_queue_status(uint32_t *reo_desc, + struct hal_reo_flush_queue_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, + val); +} +qdf_export_symbol(hal_reo_flush_queue_status); + +inline void +hal_reo_flush_cache_status(uint32_t *reo_desc, + struct hal_reo_flush_cache_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, + val); + + /* block error */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + BLOCK_ERROR_DETAILS)]; + st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + BLOCK_ERROR_DETAILS, + val); + if (!st->block_error) + qdf_set_bit(hal_soc->index, + (unsigned long *)&hal_soc->reo_res_bitmap); + + /* cache flush status */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_HIT)]; + st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_HIT, + val); + + /* cache flush descriptor type */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)]; + st->cache_flush_status_desc_type = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE, + val); + + /* cache flush count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_COUNT)]; + st->cache_flush_cnt = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_COUNT, + val); + +} +qdf_export_symbol(hal_reo_flush_cache_status); + +inline void hal_reo_unblock_cache_status(uint32_t *reo_desc, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_unblk_cache_status *st) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV, + &st->header, hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, + ERROR_DETECTED, + val); + + /* unblock type */ + val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, + UNBLOCK_TYPE)]; + st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, + UNBLOCK_TYPE, + val); + + if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX)) + qdf_clear_bit(hal_soc->index, + (unsigned long *)&hal_soc->reo_res_bitmap); +} +qdf_export_symbol(hal_reo_unblock_cache_status); + +inline void hal_reo_flush_timeout_list_status( + uint32_t *reo_desc, + struct hal_reo_flush_timeout_list_status *st, + hal_soc_handle_t hal_soc_hdl) + +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + ERROR_DETECTED, + val); + + /* list empty */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + TIMOUT_LIST_EMPTY)]; + st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + TIMOUT_LIST_EMPTY, + val); + + /* release descriptor count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + RELEASE_DESC_COUNT)]; + st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + RELEASE_DESC_COUNT, + val); + + /* forward buf count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + FORWARD_BUF_COUNT)]; + st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + FORWARD_BUF_COUNT, + val); +} +qdf_export_symbol(hal_reo_flush_timeout_list_status); + +inline void hal_reo_desc_thres_reached_status( + uint32_t *reo_desc, + struct hal_reo_desc_thres_reached_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, + HAL_REO_DESC_THRES_STATUS_TLV, + &(st->header), hal_soc); + + /* threshold index */ + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, + THRESHOLD_INDEX)]; + st->thres_index = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, + THRESHOLD_INDEX, + val); + + /* link desc counters */ + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, + LINK_DESCRIPTOR_COUNTER0)]; + st->link_desc_counter0 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, + LINK_DESCRIPTOR_COUNTER0, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, + LINK_DESCRIPTOR_COUNTER1)]; + st->link_desc_counter1 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, + LINK_DESCRIPTOR_COUNTER1, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, + LINK_DESCRIPTOR_COUNTER2)]; + st->link_desc_counter2 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, + LINK_DESCRIPTOR_COUNTER2, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, + LINK_DESCRIPTOR_COUNTER_SUM)]; + st->link_desc_counter_sum = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, + LINK_DESCRIPTOR_COUNTER_SUM, + val); +} +qdf_export_symbol(hal_reo_desc_thres_reached_status); + +inline void +hal_reo_rx_update_queue_status(uint32_t *reo_desc, + struct hal_reo_update_rx_queue_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(reo_desc, + HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV, + &(st->header), hal_soc); +} +qdf_export_symbol(hal_reo_rx_update_queue_status); + +/** + * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG + * with command number + * @hal_soc: Handle to HAL SoC structure + * @hal_ring: Handle to HAL SRNG structure + * + * Return: none + */ +inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + int cmd_num; + uint32_t *desc_addr; + struct hal_srng_params srng_params; + uint32_t desc_size; + uint32_t num_desc; + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params); + + desc_addr = (uint32_t *)(srng_params.ring_base_vaddr); + desc_addr += (sizeof(struct tlv_32_hdr) >> 2); + desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2; + num_desc = srng_params.num_entries; + cmd_num = 1; + while (num_desc) { + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0, + REO_CMD_NUMBER, cmd_num); + desc_addr += desc_size; + num_desc--; cmd_num++; + } + + soc->reo_res_bitmap = 0; +} +qdf_export_symbol(hal_reo_init_cmd_ring); diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.h new file mode 100644 index 0000000000000000000000000000000000000000..e6d4ba1fede6810aea743829afd0de0cdcff9139 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.h @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_REO_H_ +#define _HAL_REO_H_ + +#include +/* HW headers */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* SW headers */ +#include "hal_api.h" + +/*--------------------------------------------------------------------------- + Preprocessor definitions and constants + ---------------------------------------------------------------------------*/ + +/* TLV values */ +#define HAL_REO_GET_QUEUE_STATS_TLV WIFIREO_GET_QUEUE_STATS_E +#define HAL_REO_FLUSH_QUEUE_TLV WIFIREO_FLUSH_QUEUE_E +#define HAL_REO_FLUSH_CACHE_TLV WIFIREO_FLUSH_CACHE_E +#define HAL_REO_UNBLOCK_CACHE_TLV WIFIREO_UNBLOCK_CACHE_E +#define HAL_REO_FLUSH_TIMEOUT_LIST_TLV WIFIREO_FLUSH_TIMEOUT_LIST_E +#define HAL_REO_RX_UPDATE_QUEUE_TLV WIFIREO_UPDATE_RX_REO_QUEUE_E + +#define HAL_REO_QUEUE_STATS_STATUS_TLV WIFIREO_GET_QUEUE_STATS_STATUS_E +#define HAL_REO_FLUSH_QUEUE_STATUS_TLV WIFIREO_FLUSH_QUEUE_STATUS_E +#define HAL_REO_FLUSH_CACHE_STATUS_TLV WIFIREO_FLUSH_CACHE_STATUS_E +#define HAL_REO_UNBLK_CACHE_STATUS_TLV WIFIREO_UNBLOCK_CACHE_STATUS_E +#define HAL_REO_TIMOUT_LIST_STATUS_TLV WIFIREO_FLUSH_TIMEOUT_LIST_STATUS_E +#define HAL_REO_DESC_THRES_STATUS_TLV \ + WIFIREO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_E +#define HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV WIFIREO_UPDATE_RX_REO_QUEUE_STATUS_E + +#define HAL_SET_FIELD(block, field, value) \ + ((value << (block ## _ ## field ## _LSB)) & \ + (block ## _ ## field ## _MASK)) + +#define HAL_GET_FIELD(block, field, value) \ + ((value & (block ## _ ## field ## _MASK)) >> \ + (block ## _ ## field ## _LSB)) + +#define HAL_SET_TLV_HDR(desc, tag, len) \ + do { \ + ((struct tlv_32_hdr *) desc)->tlv_tag = tag; \ + ((struct tlv_32_hdr *) desc)->tlv_len = len; \ + } while (0) + +#define HAL_GET_TLV(desc) (((struct tlv_32_hdr *) desc)->tlv_tag) + +#define HAL_OFFSET_DW(_block, _field) (HAL_OFFSET(_block, _field) >> 2) +/* dword offsets in REO cmd TLV */ +#define CMD_HEADER_DW_OFFSET 0 + +/** + * enum reo_unblock_cache_type: Enum for unblock type in REO unblock command + * @UNBLOCK_RES_INDEX: Unblock a block resource + * @UNBLOCK_CACHE: Unblock cache + */ +enum reo_unblock_cache_type { + UNBLOCK_RES_INDEX = 0, + UNBLOCK_CACHE = 1 +}; + +/** + * enum reo_thres_index_reg: Enum for reo descriptor usage counter for + * which threshold status is being indicated. + * @reo_desc_counter0_threshold: counter0 reached threshold + * @reo_desc_counter1_threshold: counter1 reached threshold + * @reo_desc_counter2_threshold: counter2 reached threshold + * @reo_desc_counter_sum_threshold: Total count reached threshold + */ +enum reo_thres_index_reg { + reo_desc_counter0_threshold = 0, + reo_desc_counter1_threshold = 1, + reo_desc_counter2_threshold = 2, + reo_desc_counter_sum_threshold = 3 +}; + +/** + * enum reo_cmd_exec_status: Enum for execution status of REO command + * + * @HAL_REO_CMD_SUCCESS: Command has successfully be executed + * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue or cache + * was blocked + * @HAL_REO_CMD_FAILED: Command has encountered problems when executing, like + * the queue descriptor not being valid + */ +enum reo_cmd_exec_status { + HAL_REO_CMD_SUCCESS = 0, + HAL_REO_CMD_BLOCKED = 1, + HAL_REO_CMD_FAILED = 2, + HAL_REO_CMD_RESOURCE_BLOCKED = 3, + HAL_REO_CMD_DRAIN = 0xff +}; + +/** + * enum hal_reo_cmd_type: Enum for REO command type + * @CMD_GET_QUEUE_STATS: Get REO queue status/stats + * @CMD_FLUSH_QUEUE: Flush all frames in REO queue + * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache + * @CMD_UNBLOCK_CACHE: Unblock a descriptor’s address that was blocked + * earlier with a ‘REO_FLUSH_CACHE’ command + * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list + * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings + */ +enum hal_reo_cmd_type { + CMD_GET_QUEUE_STATS = 0, + CMD_FLUSH_QUEUE = 1, + CMD_FLUSH_CACHE = 2, + CMD_UNBLOCK_CACHE = 3, + CMD_FLUSH_TIMEOUT_LIST = 4, + CMD_UPDATE_RX_REO_QUEUE = 5 +}; + +/** + * struct hal_reo_cmd_params_std: Standard REO command parameters + * @need_status: Status required for the command + * @addr_lo: Lower 32 bits of REO queue descriptor address + * @addr_hi: Upper 8 bits of REO queue descriptor address + */ +struct hal_reo_cmd_params_std { + bool need_status; + uint32_t addr_lo; + uint8_t addr_hi; +}; + +/** + * struct hal_reo_cmd_get_queue_stats_params: Parameters to + * CMD_GET_QUEUE_STATScommand + * @clear: Clear stats after retreiving + */ +struct hal_reo_cmd_get_queue_stats_params { + bool clear; +}; + +/** + * struct hal_reo_cmd_flush_queue_params: Parameters to CMD_FLUSH_QUEUE + * @use_after_flush: Block usage after flush till unblock command + * @index: Blocking resource to be used + */ +struct hal_reo_cmd_flush_queue_params { + bool block_use_after_flush; + uint8_t index; +}; + +/** + * struct hal_reo_cmd_flush_cache_params: Parameters to CMD_FLUSH_CACHE + * @fwd_mpdus_in_queue: Forward MPDUs before flushing descriptor + * @rel_block_index: Release blocking resource used earlier + * @cache_block_res_index: Blocking resource to be used + * @flush_no_inval: Flush without invalidatig descriptor + * @use_after_flush: Block usage after flush till unblock command + * @flush_entire_cache: Flush entire REO cache + */ +struct hal_reo_cmd_flush_cache_params { + bool fwd_mpdus_in_queue; + bool rel_block_index; + uint8_t cache_block_res_index; + bool flush_no_inval; + bool block_use_after_flush; + bool flush_entire_cache; +}; + +/** + * struct hal_reo_cmd_unblock_cache_params: Parameters to CMD_UNBLOCK_CACHE + * @type: Unblock type (enum reo_unblock_cache_type) + * @index: Blocking index to be released + */ +struct hal_reo_cmd_unblock_cache_params { + enum reo_unblock_cache_type type; + uint8_t index; +}; + +/** + * struct hal_reo_cmd_flush_timeout_list_params: Parameters to + * CMD_FLUSH_TIMEOUT_LIST + * @ac_list: AC timeout list to be flushed + * @min_rel_desc: Min. number of link descriptors to be release + * @min_fwd_buf: Min. number of buffers to be forwarded + */ +struct hal_reo_cmd_flush_timeout_list_params { + uint8_t ac_list; + uint16_t min_rel_desc; + uint16_t min_fwd_buf; +}; + +/** + * struct hal_reo_cmd_update_queue_params: Parameters to CMD_UPDATE_RX_REO_QUEUE + * @update_rx_queue_num: Update receive queue number + * @update_vld: Update valid bit + * @update_assoc_link_desc: Update associated link descriptor + * @update_disable_dup_detect: Update duplicate detection + * @update_soft_reorder_enab: Update soft reorder enable + * @update_ac: Update access category + * @update_bar: Update BAR received bit + * @update_rty: Update retry bit + * @update_chk_2k_mode: Update chk_2k_mode setting + * @update_oor_mode: Update OOR mode setting + * @update_ba_window_size: Update BA window size + * @update_pn_check_needed: Update pn_check_needed + * @update_pn_even: Update pn_even + * @update_pn_uneven: Update pn_uneven + * @update_pn_hand_enab: Update pn_handling_enable + * @update_pn_size: Update pn_size + * @update_ignore_ampdu: Update ignore_ampdu + * @update_svld: update svld + * @update_ssn: Update SSN + * @update_seq_2k_err_detect: Update seq_2k_err_detected flag + * @update_pn_err_detect: Update pn_err_detected flag + * @update_pn_valid: Update pn_valid + * @update_pn: Update PN + * @rx_queue_num: rx_queue_num to be updated + * @vld: valid bit to be updated + * @assoc_link_desc: assoc_link_desc counter + * @disable_dup_detect: disable_dup_detect to be updated + * @soft_reorder_enab: soft_reorder_enab to be updated + * @ac: AC to be updated + * @bar: BAR flag to be updated + * @rty: RTY flag to be updated + * @chk_2k_mode: check_2k_mode setting to be updated + * @oor_mode: oor_mode to be updated + * @pn_check_needed: pn_check_needed to be updated + * @pn_even: pn_even to be updated + * @pn_uneven: pn_uneven to be updated + * @pn_hand_enab: pn_handling_enable to be updated + * @ignore_ampdu: ignore_ampdu to be updated + * @ba_window_size: BA window size to be updated + * @pn_size: pn_size to be updated + * @svld: svld flag to be updated + * @ssn: SSN to be updated + * @seq_2k_err_detect: seq_2k_err_detected flag to be updated + * @pn_err_detect: pn_err_detected flag to be updated + * @pn_31_0: PN bits 31-0 + * @pn_63_32: PN bits 63-32 + * @pn_95_64: PN bits 95-64 + * @pn_127_96: PN bits 127-96 + */ +struct hal_reo_cmd_update_queue_params { + uint32_t update_rx_queue_num:1, + update_vld:1, + update_assoc_link_desc:1, + update_disable_dup_detect:1, + update_soft_reorder_enab:1, + update_ac:1, + update_bar:1, + update_rty:1, + update_chk_2k_mode:1, + update_oor_mode:1, + update_ba_window_size:1, + update_pn_check_needed:1, + update_pn_even:1, + update_pn_uneven:1, + update_pn_hand_enab:1, + update_pn_size:1, + update_ignore_ampdu:1, + update_svld:1, + update_ssn:1, + update_seq_2k_err_detect:1, + update_pn_err_detect:1, + update_pn_valid:1, + update_pn:1; + uint32_t rx_queue_num:16, + vld:1, + assoc_link_desc:2, + disable_dup_detect:1, + soft_reorder_enab:1, + ac:2, + bar:1, + rty:1, + chk_2k_mode:1, + oor_mode:1, + pn_check_needed:1, + pn_even:1, + pn_uneven:1, + pn_hand_enab:1, + ignore_ampdu:1; + uint32_t ba_window_size:9, + pn_size:8, + svld:1, + ssn:12, + seq_2k_err_detect:1, + pn_err_detect:1; + uint32_t pn_31_0:32; + uint32_t pn_63_32:32; + uint32_t pn_95_64:32; + uint32_t pn_127_96:32; +}; + +/** + * struct hal_reo_cmd_params: Common structure to pass REO command parameters + * @hal_reo_cmd_params_std: Standard parameters + * @u: Union of various REO command parameters + */ +struct hal_reo_cmd_params { + struct hal_reo_cmd_params_std std; + union { + struct hal_reo_cmd_get_queue_stats_params stats_params; + struct hal_reo_cmd_flush_queue_params fl_queue_params; + struct hal_reo_cmd_flush_cache_params fl_cache_params; + struct hal_reo_cmd_unblock_cache_params unblk_cache_params; + struct hal_reo_cmd_flush_timeout_list_params fl_tim_list_params; + struct hal_reo_cmd_update_queue_params upd_queue_params; + } u; +}; + +/** + * struct hal_reo_status_header: Common REO status header + * @cmd_num: Command number + * @exec_time: execution time + * @status: command execution status + * @tstamp: Timestamp of status updated + */ +struct hal_reo_status_header { + uint16_t cmd_num; + uint16_t exec_time; + enum reo_cmd_exec_status status; + uint32_t tstamp; +}; + +/** + * struct hal_reo_queue_status: REO queue status structure + * @header: Common REO status header + * @ssn: SSN of current BA window + * @curr_idx: last forwarded pkt + * @pn_31_0, pn_63_32, pn_95_64, pn_127_96: + * PN number bits extracted from IV field + * @last_rx_enq_tstamp: Last enqueue timestamp + * @last_rx_deq_tstamp: Last dequeue timestamp + * @rx_bitmap_31_0, rx_bitmap_63_32, rx_bitmap_95_64 + * @rx_bitmap_127_96, rx_bitmap_159_128, rx_bitmap_191_160 + * @rx_bitmap_223_192, rx_bitmap_255_224: Each bit corresonds to a frame + * held in re-order queue + * @curr_mpdu_cnt, curr_msdu_cnt: Number of MPDUs and MSDUs in the queue + * @fwd_timeout_cnt: Frames forwarded due to timeout + * @fwd_bar_cnt: Frames forwarded BAR frame + * @dup_cnt: duplicate frames detected + * @frms_in_order_cnt: Frames received in order + * @bar_rcvd_cnt: BAR frame count + * @mpdu_frms_cnt, msdu_frms_cnt, total_cnt: MPDU, MSDU, total frames + processed by REO + * @late_recv_mpdu_cnt; received after window had moved on + * @win_jump_2k: 2K jump count + * @hole_cnt: sequence hole count + */ +struct hal_reo_queue_status { + struct hal_reo_status_header header; + uint16_t ssn; + uint8_t curr_idx; + uint32_t pn_31_0, pn_63_32, pn_95_64, pn_127_96; + uint32_t last_rx_enq_tstamp, last_rx_deq_tstamp; + uint32_t rx_bitmap_31_0, rx_bitmap_63_32, rx_bitmap_95_64; + uint32_t rx_bitmap_127_96, rx_bitmap_159_128, rx_bitmap_191_160; + uint32_t rx_bitmap_223_192, rx_bitmap_255_224; + uint8_t curr_mpdu_cnt, curr_msdu_cnt; + uint8_t fwd_timeout_cnt, fwd_bar_cnt; + uint16_t dup_cnt; + uint32_t frms_in_order_cnt; + uint8_t bar_rcvd_cnt; + uint32_t mpdu_frms_cnt, msdu_frms_cnt, total_cnt; + uint16_t late_recv_mpdu_cnt; + uint8_t win_jump_2k; + uint16_t hole_cnt; +}; + +/** + * struct hal_reo_flush_queue_status: FLUSH_QUEUE status structure + * @header: Common REO status header + * @error: Error detected + */ +struct hal_reo_flush_queue_status { + struct hal_reo_status_header header; + bool error; +}; + +/** + * struct hal_reo_flush_cache_status: FLUSH_CACHE status structure + * @header: Common REO status header + * @error: Error detected + * @block_error: Blocking related error + * @cache_flush_status: Cache hit/miss + * @cache_flush_status_desc_type: type of descriptor flushed + * @cache_flush_cnt: number of lines actually flushed + */ +struct hal_reo_flush_cache_status { + struct hal_reo_status_header header; + bool error; + uint8_t block_error; + bool cache_flush_status; + uint8_t cache_flush_status_desc_type; + uint8_t cache_flush_cnt; +}; + +/** + * struct hal_reo_unblk_cache_status: UNBLOCK_CACHE status structure + * @header: Common REO status header + * @error: error detected + * unblock_type: resoure or cache + */ +struct hal_reo_unblk_cache_status { + struct hal_reo_status_header header; + bool error; + enum reo_unblock_cache_type unblock_type; +}; + +/** + * struct hal_reo_flush_timeout_list_status: FLUSH_TIMEOUT_LIST status structure + * @header: Common REO status header + * @error: error detected + * @list_empty: timeout list empty + * @rel_desc_cnt: number of link descriptors released + * @fwd_buf_cnt: number of buffers forwarded to REO destination ring + */ +struct hal_reo_flush_timeout_list_status { + struct hal_reo_status_header header; + bool error; + bool list_empty; + uint16_t rel_desc_cnt; + uint16_t fwd_buf_cnt; +}; + +/** + * struct hal_reo_desc_thres_reached_status: desc_thres_reached status structure + * @header: Common REO status header + * @thres_index: Index of descriptor threshold counter + * @link_desc_counter0, link_desc_counter1, link_desc_counter2: descriptor + * counter values + * @link_desc_counter_sum: overall descriptor count + */ +struct hal_reo_desc_thres_reached_status { + struct hal_reo_status_header header; + enum reo_thres_index_reg thres_index; + uint32_t link_desc_counter0, link_desc_counter1, link_desc_counter2; + uint32_t link_desc_counter_sum; +}; + +/** + * struct hal_reo_update_rx_queue_status: UPDATE_RX_QUEUE status structure + * @header: Common REO status header + */ +struct hal_reo_update_rx_queue_status { + struct hal_reo_status_header header; +}; + +/** + * union hal_reo_status: Union to pass REO status to callbacks + * @queue_status: Refer to struct hal_reo_queue_status + * @fl_cache_status: Refer to struct hal_reo_flush_cache_status + * @fl_queue_status: Refer to struct hal_reo_flush_queue_status + * @fl_timeout_status: Refer to struct hal_reo_flush_timeout_list_status + * @unblk_cache_status: Refer to struct hal_reo_unblk_cache_status + * @thres_status: struct hal_reo_desc_thres_reached_status + * @rx_queue_status: struct hal_reo_update_rx_queue_status + */ +union hal_reo_status { + struct hal_reo_queue_status queue_status; + struct hal_reo_flush_cache_status fl_cache_status; + struct hal_reo_flush_queue_status fl_queue_status; + struct hal_reo_flush_timeout_list_status fl_timeout_status; + struct hal_reo_unblk_cache_status unblk_cache_status; + struct hal_reo_desc_thres_reached_status thres_status; + struct hal_reo_update_rx_queue_status rx_queue_status; +}; + +/* Prototypes */ +/* REO command ring routines */ +void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc, + enum hal_reo_cmd_type type, + uint32_t paddr_lo, + uint8_t paddr_hi); +int hal_reo_cmd_queue_stats(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd); + +/* REO status ring routines */ +void hal_reo_queue_stats_status(uint32_t *reo_desc, + struct hal_reo_queue_status *st, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_flush_queue_status(uint32_t *reo_desc, + struct hal_reo_flush_queue_status *st, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_flush_cache_status(uint32_t *reo_desc, + struct hal_reo_flush_cache_status *st, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_unblock_cache_status(uint32_t *reo_desc, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_unblk_cache_status *st); +void hal_reo_flush_timeout_list_status( + uint32_t *reo_desc, + struct hal_reo_flush_timeout_list_status *st, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_desc_thres_reached_status( + uint32_t *reo_desc, + struct hal_reo_desc_thres_reached_status *st, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_rx_update_queue_status(uint32_t *reo_desc, + struct hal_reo_update_rx_queue_status *st, + hal_soc_handle_t hal_soc_hdl); + +void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl); + +#endif /* _HAL_REO_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..2de9b90d51f41e876ba986f76fcd0a7598af276b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx.h @@ -0,0 +1,3807 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_RX_H_ +#define _HAL_RX_H_ + +#include + +#define HAL_RX_OFFSET(block, field) block##_##field##_OFFSET +#define HAL_RX_LSB(block, field) block##_##field##_LSB +#define HAL_RX_MASk(block, field) block##_##field##_MASK + +#define HAL_RX_GET(_ptr, block, field) \ + (((*((volatile uint32_t *)_ptr + (HAL_RX_OFFSET(block, field)>>2))) & \ + HAL_RX_MASk(block, field)) >> \ + HAL_RX_LSB(block, field)) + +/* BUFFER_SIZE = 1536 data bytes + 384 RX TLV bytes + some spare bytes */ +#ifndef RX_DATA_BUFFER_SIZE +#define RX_DATA_BUFFER_SIZE 2048 +#endif + +#ifndef RX_MONITOR_BUFFER_SIZE +#define RX_MONITOR_BUFFER_SIZE 2048 +#endif + +/* HAL_RX_NON_QOS_TID = NON_QOS_TID which is 16 */ +#define HAL_RX_NON_QOS_TID 16 + +enum { + HAL_HW_RX_DECAP_FORMAT_RAW = 0, + HAL_HW_RX_DECAP_FORMAT_NWIFI, + HAL_HW_RX_DECAP_FORMAT_ETH2, + HAL_HW_RX_DECAP_FORMAT_8023, +}; + +/** + * struct hal_wbm_err_desc_info: structure to hold wbm error codes and reasons + * + * @reo_psh_rsn: REO push reason + * @reo_err_code: REO Error code + * @rxdma_psh_rsn: RXDMA push reason + * @rxdma_err_code: RXDMA Error code + * @reserved_1: Reserved bits + * @wbm_err_src: WBM error source + * @pool_id: pool ID, indicates which rxdma pool + * @reserved_2: Reserved bits + */ +struct hal_wbm_err_desc_info { + uint16_t reo_psh_rsn:2, + reo_err_code:5, + rxdma_psh_rsn:2, + rxdma_err_code:5, + reserved_1:2; + uint8_t wbm_err_src:3, + pool_id:2, + reserved_2:3; +}; + +/** + * struct hal_rx_msdu_metadata:Structure to hold rx fast path information. + * + * @l3_hdr_pad: l3 header padding + * @reserved: Reserved bits + * @sa_sw_peer_id: sa sw peer id + * @sa_idx: sa index + * @da_idx: da index + */ +struct hal_rx_msdu_metadata { + uint32_t l3_hdr_pad:16, + sa_sw_peer_id:16; + uint32_t sa_idx:16, + da_idx:16; +}; + +/** + * enum hal_reo_error_code: Enum which encapsulates "reo_push_reason" + * + * @ HAL_REO_ERROR_DETECTED: Packets arrived because of an error detected + * @ HAL_REO_ROUTING_INSTRUCTION: Packets arrived because of REO routing + */ +enum hal_reo_error_status { + HAL_REO_ERROR_DETECTED = 0, + HAL_REO_ROUTING_INSTRUCTION = 1, +}; + +/** + * @msdu_flags: [0] first_msdu_in_mpdu + * [1] last_msdu_in_mpdu + * [2] msdu_continuation - MSDU spread across buffers + * [23] sa_is_valid - SA match in peer table + * [24] sa_idx_timeout - Timeout while searching for SA match + * [25] da_is_valid - Used to identtify intra-bss forwarding + * [26] da_is_MCBC + * [27] da_idx_timeout - Timeout while searching for DA match + * + */ +struct hal_rx_msdu_desc_info { + uint32_t msdu_flags; + uint16_t msdu_len; /* 14 bits for length */ +}; + +/** + * enum hal_rx_msdu_desc_flags: Enum for flags in MSDU_DESC_INFO + * + * @ HAL_MSDU_F_FIRST_MSDU_IN_MPDU: First MSDU in MPDU + * @ HAL_MSDU_F_LAST_MSDU_IN_MPDU: Last MSDU in MPDU + * @ HAL_MSDU_F_MSDU_CONTINUATION: MSDU continuation + * @ HAL_MSDU_F_SA_IS_VALID: Found match for SA in AST + * @ HAL_MSDU_F_SA_IDX_TIMEOUT: AST search for SA timed out + * @ HAL_MSDU_F_DA_IS_VALID: Found match for DA in AST + * @ HAL_MSDU_F_DA_IS_MCBC: DA is MC/BC address + * @ HAL_MSDU_F_DA_IDX_TIMEOUT: AST search for DA timed out + */ +enum hal_rx_msdu_desc_flags { + HAL_MSDU_F_FIRST_MSDU_IN_MPDU = (0x1 << 0), + HAL_MSDU_F_LAST_MSDU_IN_MPDU = (0x1 << 1), + HAL_MSDU_F_MSDU_CONTINUATION = (0x1 << 2), + HAL_MSDU_F_SA_IS_VALID = (0x1 << 23), + HAL_MSDU_F_SA_IDX_TIMEOUT = (0x1 << 24), + HAL_MSDU_F_DA_IS_VALID = (0x1 << 25), + HAL_MSDU_F_DA_IS_MCBC = (0x1 << 26), + HAL_MSDU_F_DA_IDX_TIMEOUT = (0x1 << 27) +}; + +/* + * @msdu_count: no. of msdus in the MPDU + * @mpdu_seq: MPDU sequence number + * @mpdu_flags [0] Fragment flag + * [1] MPDU_retry_bit + * [2] AMPDU flag + * [3] raw_ampdu + * @peer_meta_data: Upper bits containing peer id, vdev id + * @bar_frame: indicates if received frame is a bar frame + */ +struct hal_rx_mpdu_desc_info { + uint16_t msdu_count; + uint16_t mpdu_seq; /* 12 bits for length */ + uint32_t mpdu_flags; + uint32_t peer_meta_data; /* sw progamed meta-data:MAC Id & peer Id */ + uint16_t bar_frame; +}; + +/** + * enum hal_rx_mpdu_desc_flags: Enum for flags in MPDU_DESC_INFO + * + * @ HAL_MPDU_F_FRAGMENT: Fragmented MPDU (802.11 fragemtation) + * @ HAL_MPDU_F_RETRY_BIT: Retry bit is set in FC of MPDU + * @ HAL_MPDU_F_AMPDU_FLAG: MPDU received as part of A-MPDU + * @ HAL_MPDU_F_RAW_AMPDU: MPDU is a Raw MDPU + */ +enum hal_rx_mpdu_desc_flags { + HAL_MPDU_F_FRAGMENT = (0x1 << 20), + HAL_MPDU_F_RETRY_BIT = (0x1 << 21), + HAL_MPDU_F_AMPDU_FLAG = (0x1 << 22), + HAL_MPDU_F_RAW_AMPDU = (0x1 << 30) +}; + +/** + * enum hal_rx_ret_buf_manager: Enum for return_buffer_manager field in + * BUFFER_ADDR_INFO structure + * + * @ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list + * @ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list + * @ HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW + * @ HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host + * @ HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host + * @ HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host + * @ HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host + */ +enum hal_rx_ret_buf_manager { + HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST = 0, + HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST = 1, + HAL_RX_BUF_RBM_FW_BM = 2, + HAL_RX_BUF_RBM_SW0_BM = 3, + HAL_RX_BUF_RBM_SW1_BM = 4, + HAL_RX_BUF_RBM_SW2_BM = 5, + HAL_RX_BUF_RBM_SW3_BM = 6, +}; + +/* + * Given the offset of a field in bytes, returns uint8_t * + */ +#define _OFFSET_TO_BYTE_PTR(_ptr, _off_in_bytes) \ + (((uint8_t *)(_ptr)) + (_off_in_bytes)) + +/* + * Given the offset of a field in bytes, returns uint32_t * + */ +#define _OFFSET_TO_WORD_PTR(_ptr, _off_in_bytes) \ + (((uint32_t *)(_ptr)) + ((_off_in_bytes) >> 2)) + +#define _HAL_MS(_word, _mask, _shift) \ + (((_word) & (_mask)) >> (_shift)) + +/* + * macro to set the LSW of the nbuf data physical address + * to the rxdma ring entry + */ +#define HAL_RXDMA_PADDR_LO_SET(buff_addr_info, paddr_lo) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET >> 2))) = \ + (paddr_lo << BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB) & \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK) + +/* + * macro to set the LSB of MSW of the nbuf data physical address + * to the rxdma ring entry + */ +#define HAL_RXDMA_PADDR_HI_SET(buff_addr_info, paddr_hi) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET >> 2))) = \ + (paddr_hi << BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB) & \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK) + +#define HAL_RX_COOKIE_INVALID_MASK 0x80000000 + +/* + * macro to get the invalid bit for sw cookie + */ +#define HAL_RX_BUF_COOKIE_INVALID_GET(buff_addr_info) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) & \ + HAL_RX_COOKIE_INVALID_MASK) + +/* + * macro to set the invalid bit for sw cookie + */ +#define HAL_RX_BUF_COOKIE_INVALID_SET(buff_addr_info) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) |= \ + HAL_RX_COOKIE_INVALID_MASK) + +/* + * macro to set the cookie into the rxdma ring entry + */ +#define HAL_RXDMA_COOKIE_SET(buff_addr_info, cookie) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) &= \ + ~BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK); \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) |= \ + (cookie << BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB) & \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK) + +/* + * macro to set the manager into the rxdma ring entry + */ +#define HAL_RXDMA_MANAGER_SET(buff_addr_info, manager) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_OFFSET >> 2))) &= \ + ~BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK); \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_OFFSET >> 2))) |= \ + (manager << BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB) & \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK) + +#define HAL_RX_ERROR_STATUS_GET(reo_desc) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(reo_desc, \ + REO_DESTINATION_RING_7_REO_PUSH_REASON_OFFSET)),\ + REO_DESTINATION_RING_7_REO_PUSH_REASON_MASK, \ + REO_DESTINATION_RING_7_REO_PUSH_REASON_LSB)) + +#define HAL_RX_BUF_COOKIE_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET)), \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK, \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB)) + +#define HAL_RX_BUFFER_ADDR_39_32_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET)), \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK, \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB)) + +#define HAL_RX_BUFFER_ADDR_31_0_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET)), \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB)) + +#define HAL_RX_BUF_RBM_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_OFFSET)),\ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK, \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB)) + +#define HAL_RX_LINK_COOKIE_INVALID_MASK 0x40000000 + +#define HAL_RX_BUF_LINK_COOKIE_INVALID_GET(buff_addr_info) \ + ((*(((unsigned int *)buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) & \ + HAL_RX_LINK_COOKIE_INVALID_MASK) + +#define HAL_RX_BUF_LINK_COOKIE_INVALID_SET(buff_addr_info) \ + ((*(((unsigned int *)buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) |= \ + HAL_RX_LINK_COOKIE_INVALID_MASK) + +#define HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(reo_desc) \ + (HAL_RX_BUF_LINK_COOKIE_INVALID_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(reo_desc) \ + (HAL_RX_BUF_LINK_COOKIE_INVALID_SET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +/* TODO: Convert the following structure fields accesseses to offsets */ + +#define HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_desc) \ + (HAL_RX_BUFFER_ADDR_39_32_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_desc) \ + (HAL_RX_BUFFER_ADDR_31_0_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUF_COOKIE_INVALID_GET(reo_desc) \ + (HAL_RX_BUF_COOKIE_INVALID_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUF_COOKIE_INVALID_SET(reo_desc) \ + (HAL_RX_BUF_COOKIE_INVALID_SET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUF_COOKIE_GET(reo_desc) \ + (HAL_RX_BUF_COOKIE_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr \ + [RX_MPDU_DESC_INFO_0_MPDU_SEQUENCE_NUMBER_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_MPDU_SEQUENCE_NUMBER_MASK) >> \ + RX_MPDU_DESC_INFO_0_MPDU_SEQUENCE_NUMBER_LSB) + +#define HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr \ + [RX_MPDU_DESC_INFO_1_PEER_META_DATA_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_1_PEER_META_DATA_MASK) >> \ + RX_MPDU_DESC_INFO_1_PEER_META_DATA_LSB) + +#define HAL_RX_MPDU_MSDU_COUNT_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr[RX_MPDU_DESC_INFO_0_MSDU_COUNT_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_MSDU_COUNT_MASK) >> \ + RX_MPDU_DESC_INFO_0_MSDU_COUNT_LSB) + +#define HAL_RX_MPDU_FRAGMENT_FLAG_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_FRAGMENT_FLAG_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_FRAGMENT_FLAG_MASK) + +#define HAL_RX_MPDU_RETRY_BIT_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_MPDU_RETRY_BIT_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_MPDU_RETRY_BIT_MASK) + +#define HAL_RX_MPDU_AMPDU_FLAG_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_AMPDU_FLAG_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_AMPDU_FLAG_MASK) + +#define HAL_RX_MPDU_RAW_MPDU_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_RAW_MPDU_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_RAW_MPDU_MASK) + +#define HAL_RX_MPDU_FLAGS_GET(mpdu_info_ptr) \ + (HAL_RX_MPDU_FRAGMENT_FLAG_GET(mpdu_info_ptr) | \ + HAL_RX_MPDU_RETRY_BIT_GET(mpdu_info_ptr) | \ + HAL_RX_MPDU_AMPDU_FLAG_GET(mpdu_info_ptr) | \ + HAL_RX_MPDU_RAW_MPDU_GET(mpdu_info_ptr)) + +#define HAL_RX_MPDU_BAR_FRAME_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr[RX_MPDU_DESC_INFO_0_BAR_FRAME_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_BAR_FRAME_MASK) >> \ + RX_MPDU_DESC_INFO_0_BAR_FRAME_LSB) + + +#define HAL_RX_MSDU_PKT_LENGTH_GET(msdu_info_ptr) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_MSDU_LENGTH_OFFSET)), \ + RX_MSDU_DESC_INFO_0_MSDU_LENGTH_MASK, \ + RX_MSDU_DESC_INFO_0_MSDU_LENGTH_LSB)) + +/* + * NOTE: None of the following _GET macros need a right + * shift by the corresponding _LSB. This is because, they are + * finally taken and "OR'ed" into a single word again. + */ +#define HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_SET(msdu_info_ptr, val) \ + ((*(((uint32_t *)msdu_info_ptr) + \ + (RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_OFFSET >> 2))) |= \ + (val << RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_LSB) & \ + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_LAST_MSDU_IN_MPDU_FLAG_SET(msdu_info_ptr, val) \ + ((*(((uint32_t *)msdu_info_ptr) + \ + (RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_OFFSET >> 2))) |= \ + (val << RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_LSB) & \ + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_MSDU_CONTINUATION_FLAG_SET(msdu_info_ptr, val) \ + ((*(((uint32_t *)msdu_info_ptr) + \ + (RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_OFFSET >> 2))) |= \ + (val << RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_LSB) & \ + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK) + + +#define HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_LAST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_MSDU_CONTINUATION_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK) + +#define HAL_RX_MSDU_REO_DST_IND_GET(msdu_info_ptr) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_REO_DESTINATION_INDICATION_OFFSET)), \ + RX_MSDU_DESC_INFO_0_REO_DESTINATION_INDICATION_MASK, \ + RX_MSDU_DESC_INFO_0_REO_DESTINATION_INDICATION_LSB)) + +#define HAL_RX_MSDU_SA_IS_VALID_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_SA_IS_VALID_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_SA_IS_VALID_MASK) + +#define HAL_RX_MSDU_SA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_SA_IDX_TIMEOUT_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_SA_IDX_TIMEOUT_MASK) + +#define HAL_RX_MSDU_DA_IS_VALID_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_DA_IS_VALID_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_DA_IS_VALID_MASK) + +#define HAL_RX_MSDU_DA_IS_MCBC_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_DA_IS_MCBC_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_DA_IS_MCBC_MASK) + +#define HAL_RX_MSDU_DA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_DA_IDX_TIMEOUT_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_DA_IDX_TIMEOUT_MASK) + +#define HAL_RX_REO_MSDU_REO_DST_IND_GET(reo_desc) \ + (HAL_RX_MSDU_REO_DST_IND_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->rx_msdu_desc_info_details))) + +#define HAL_RX_MSDU_FLAGS_GET(msdu_info_ptr) \ + (HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_LAST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_CONTINUATION_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_SA_IS_VALID_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_SA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_DA_IS_VALID_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_DA_IS_MCBC_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_DA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr)) + +#define HAL_RX_MPDU_ENCRYPT_TYPE_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_3_ENCRYPT_TYPE_OFFSET)), \ + RX_MPDU_INFO_3_ENCRYPT_TYPE_MASK, \ + RX_MPDU_INFO_3_ENCRYPT_TYPE_LSB)) + +#define HAL_RX_FLD_SET(_ptr, _wrd, _field, _val) \ + (*(uint32_t *)(((uint8_t *)_ptr) + \ + _wrd ## _ ## _field ## _OFFSET) |= \ + ((_val << _wrd ## _ ## _field ## _LSB) & \ + _wrd ## _ ## _field ## _MASK)) + +#define HAL_RX_UNIFORM_HDR_SET(_rx_msdu_link, _field, _val) \ + HAL_RX_FLD_SET(_rx_msdu_link, UNIFORM_DESCRIPTOR_HEADER_0, \ + _field, _val) + +#define HAL_RX_MSDU_DESC_INFO_SET(_msdu_info_ptr, _field, _val) \ + HAL_RX_FLD_SET(_msdu_info_ptr, RX_MSDU_DESC_INFO_0, \ + _field, _val) + +#define HAL_RX_MPDU_DESC_INFO_SET(_mpdu_info_ptr, _field, _val) \ + HAL_RX_FLD_SET(_mpdu_info_ptr, RX_MPDU_DESC_INFO_0, \ + _field, _val) + +static inline void hal_rx_mpdu_desc_info_get(void *desc_addr, + struct hal_rx_mpdu_desc_info *mpdu_desc_info) +{ + struct reo_destination_ring *reo_dst_ring; + uint32_t *mpdu_info; + + reo_dst_ring = (struct reo_destination_ring *) desc_addr; + + mpdu_info = (uint32_t *)&reo_dst_ring->rx_mpdu_desc_info_details; + + mpdu_desc_info->msdu_count = HAL_RX_MPDU_MSDU_COUNT_GET(mpdu_info); + mpdu_desc_info->mpdu_seq = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_info); + mpdu_desc_info->mpdu_flags = HAL_RX_MPDU_FLAGS_GET(mpdu_info); + mpdu_desc_info->peer_meta_data = + HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info); + mpdu_desc_info->bar_frame = HAL_RX_MPDU_BAR_FRAME_GET(mpdu_info); +} + +/* + * @ hal_rx_msdu_desc_info_get: Gets the flags related to MSDU desciptor. + * @ Specifically flags needed are: + * @ first_msdu_in_mpdu, last_msdu_in_mpdu, + * @ msdu_continuation, sa_is_valid, + * @ sa_idx_timeout, da_is_valid, da_idx_timeout, + * @ da_is_MCBC + * + * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to the current + * @ descriptor + * @ msdu_desc_info: Holds MSDU descriptor info from HAL Rx descriptor + * @ Return: void + */ +static inline void hal_rx_msdu_desc_info_get(void *desc_addr, + struct hal_rx_msdu_desc_info *msdu_desc_info) +{ + struct reo_destination_ring *reo_dst_ring; + uint32_t *msdu_info; + + reo_dst_ring = (struct reo_destination_ring *) desc_addr; + + msdu_info = (uint32_t *)&reo_dst_ring->rx_msdu_desc_info_details; + msdu_desc_info->msdu_flags = HAL_RX_MSDU_FLAGS_GET(msdu_info); + msdu_desc_info->msdu_len = HAL_RX_MSDU_PKT_LENGTH_GET(msdu_info); +} + +/* + * hal_rxdma_buff_addr_info_set() - set the buffer_addr_info of the + * rxdma ring entry. + * @rxdma_entry: descriptor entry + * @paddr: physical address of nbuf data pointer. + * @cookie: SW cookie used as a index to SW rx desc. + * @manager: who owns the nbuf (host, NSS, etc...). + * + */ +static inline void hal_rxdma_buff_addr_info_set(void *rxdma_entry, + qdf_dma_addr_t paddr, uint32_t cookie, uint8_t manager) +{ + uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff); + uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32; + + HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo); + HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi); + HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie); + HAL_RXDMA_MANAGER_SET(rxdma_entry, manager); +} + +/* + * Structures & Macros to obtain fields from the TLV's in the Rx packet + * pre-header. + */ + +/* + * Every Rx packet starts at an offset from the top of the buffer. + * If the host hasn't subscribed to any specific TLV, there is + * still space reserved for the following TLV's from the start of + * the buffer: + * -- RX ATTENTION + * -- RX MPDU START + * -- RX MSDU START + * -- RX MSDU END + * -- RX MPDU END + * -- RX PACKET HEADER (802.11) + * If the host subscribes to any of the TLV's above, that TLV + * if populated by the HW + */ + +#define NUM_DWORDS_TAG 1 + +/* By default the packet header TLV is 128 bytes */ +#define NUM_OF_BYTES_RX_802_11_HDR_TLV 128 +#define NUM_OF_DWORDS_RX_802_11_HDR_TLV \ + (NUM_OF_BYTES_RX_802_11_HDR_TLV >> 2) + +#define RX_PKT_OFFSET_WORDS \ + ( \ + NUM_OF_DWORDS_RX_ATTENTION + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MPDU_START + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MSDU_START + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MSDU_END + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MPDU_END + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_802_11_HDR_TLV + NUM_DWORDS_TAG \ + ) + +#define RX_PKT_OFFSET_BYTES \ + (RX_PKT_OFFSET_WORDS << 2) + +#define RX_PKT_HDR_TLV_LEN 120 + +/* + * Each RX descriptor TLV is preceded by 1 DWORD "tag" + */ +struct rx_attention_tlv { + uint32_t tag; + struct rx_attention rx_attn; +}; + +struct rx_mpdu_start_tlv { + uint32_t tag; + struct rx_mpdu_start rx_mpdu_start; +}; + +struct rx_msdu_start_tlv { + uint32_t tag; + struct rx_msdu_start rx_msdu_start; +}; + +struct rx_msdu_end_tlv { + uint32_t tag; + struct rx_msdu_end rx_msdu_end; +}; + +struct rx_mpdu_end_tlv { + uint32_t tag; + struct rx_mpdu_end rx_mpdu_end; +}; + +struct rx_pkt_hdr_tlv { + uint32_t tag; /* 4 B */ + uint32_t phy_ppdu_id; /* 4 B */ + char rx_pkt_hdr[RX_PKT_HDR_TLV_LEN]; /* 120 B */ +}; + + +#define RXDMA_OPTIMIZATION + +/* rx_pkt_tlvs structure should be used to process Data buffers, monitor status + * buffers, monitor destination buffers and monitor descriptor buffers. + */ +#ifdef RXDMA_OPTIMIZATION +/* + * The RX_PADDING_BYTES is required so that the TLV's don't + * spread across the 128 byte boundary + * RXDMA optimization requires: + * 1) MSDU_END & ATTENTION TLV's follow in that order + * 2) TLV's don't span across 128 byte lines + * 3) Rx Buffer is nicely aligned on the 128 byte boundary + */ +#define RX_PADDING0_BYTES 4 +#define RX_PADDING1_BYTES 16 +struct rx_pkt_tlvs { + struct rx_msdu_end_tlv msdu_end_tlv; /* 72 bytes */ + struct rx_attention_tlv attn_tlv; /* 16 bytes */ + struct rx_msdu_start_tlv msdu_start_tlv;/* 40 bytes */ + uint8_t rx_padding0[RX_PADDING0_BYTES]; /* 4 bytes */ + struct rx_mpdu_start_tlv mpdu_start_tlv;/* 96 bytes */ + struct rx_mpdu_end_tlv mpdu_end_tlv; /* 12 bytes */ + uint8_t rx_padding1[RX_PADDING1_BYTES]; /* 16 bytes */ +#ifndef NO_RX_PKT_HDR_TLV + struct rx_pkt_hdr_tlv pkt_hdr_tlv; /* 128 bytes */ +#endif +}; +#else /* RXDMA_OPTIMIZATION */ +struct rx_pkt_tlvs { + struct rx_attention_tlv attn_tlv; + struct rx_mpdu_start_tlv mpdu_start_tlv; + struct rx_msdu_start_tlv msdu_start_tlv; + struct rx_msdu_end_tlv msdu_end_tlv; + struct rx_mpdu_end_tlv mpdu_end_tlv; + struct rx_pkt_hdr_tlv pkt_hdr_tlv; +}; +#endif /* RXDMA_OPTIMIZATION */ + +/* rx_mon_pkt_tlvs structure should be used to process monitor data buffers */ +#ifdef RXDMA_OPTIMIZATION +struct rx_mon_pkt_tlvs { + struct rx_msdu_end_tlv msdu_end_tlv; /* 72 bytes */ + struct rx_attention_tlv attn_tlv; /* 16 bytes */ + struct rx_msdu_start_tlv msdu_start_tlv;/* 40 bytes */ + uint8_t rx_padding0[RX_PADDING0_BYTES]; /* 4 bytes */ + struct rx_mpdu_start_tlv mpdu_start_tlv;/* 96 bytes */ + struct rx_mpdu_end_tlv mpdu_end_tlv; /* 12 bytes */ + uint8_t rx_padding1[RX_PADDING1_BYTES]; /* 16 bytes */ + struct rx_pkt_hdr_tlv pkt_hdr_tlv; /* 128 bytes */ +}; +#else /* RXDMA_OPTIMIZATION */ +struct rx_mon_pkt_tlvs { + struct rx_attention_tlv attn_tlv; + struct rx_mpdu_start_tlv mpdu_start_tlv; + struct rx_msdu_start_tlv msdu_start_tlv; + struct rx_msdu_end_tlv msdu_end_tlv; + struct rx_mpdu_end_tlv mpdu_end_tlv; + struct rx_pkt_hdr_tlv pkt_hdr_tlv; +}; +#endif + +#define SIZE_OF_MONITOR_TLV sizeof(struct rx_mon_pkt_tlvs) +#define SIZE_OF_DATA_RX_TLV sizeof(struct rx_pkt_tlvs) + +#define RX_PKT_TLVS_LEN SIZE_OF_DATA_RX_TLV + +#ifdef NO_RX_PKT_HDR_TLV +static inline uint8_t +*hal_rx_pkt_hdr_get(uint8_t *buf) +{ + return buf + RX_PKT_TLVS_LEN; +} +#else +static inline uint8_t +*hal_rx_pkt_hdr_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + return pkt_tlvs->pkt_hdr_tlv.rx_pkt_hdr; + +} +#endif + +#define RX_PKT_TLV_OFFSET(field) qdf_offsetof(struct rx_pkt_tlvs, field) + +#define HAL_RX_PKT_TLV_MPDU_START_OFFSET(hal_soc) \ + RX_PKT_TLV_OFFSET(mpdu_start_tlv) +#define HAL_RX_PKT_TLV_MPDU_END_OFFSET(hal_soc) RX_PKT_TLV_OFFSET(mpdu_end_tlv) +#define HAL_RX_PKT_TLV_MSDU_START_OFFSET(hal_soc) \ + RX_PKT_TLV_OFFSET(msdu_start_tlv) +#define HAL_RX_PKT_TLV_MSDU_END_OFFSET(hal_soc) RX_PKT_TLV_OFFSET(msdu_end_tlv) +#define HAL_RX_PKT_TLV_ATTN_OFFSET(hal_soc) RX_PKT_TLV_OFFSET(attn_tlv) +#define HAL_RX_PKT_TLV_PKT_HDR_OFFSET(hal_soc) RX_PKT_TLV_OFFSET(pkt_hdr_tlv) + +static inline uint8_t +*hal_rx_padding0_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + return pkt_tlvs->rx_padding0; +} + +/* + * hal_rx_encryption_info_valid(): Returns encryption type. + * + * @hal_soc_hdl: hal soc handle + * @buf: rx_tlv_hdr of the received packet + * + * Return: encryption type + */ +static inline uint32_t +hal_rx_encryption_info_valid(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_encryption_info_valid(buf); + +} + +/* + * hal_rx_print_pn: Prints the PN of rx packet. + * @hal_soc_hdl: hal soc handle + * @buf: rx_tlv_hdr of the received packet + * + * Return: void + */ +static inline void +hal_rx_print_pn(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_rx_print_pn(buf); +} + +/* + * Get msdu_done bit from the RX_ATTENTION TLV + */ +#define HAL_RX_ATTN_MSDU_DONE_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_2_MSDU_DONE_OFFSET)), \ + RX_ATTENTION_2_MSDU_DONE_MASK, \ + RX_ATTENTION_2_MSDU_DONE_LSB)) + +static inline uint32_t +hal_rx_attn_msdu_done_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t msdu_done; + + msdu_done = HAL_RX_ATTN_MSDU_DONE_GET(rx_attn); + + return msdu_done; +} + +#define HAL_RX_ATTN_FIRST_MPDU_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_FIRST_MPDU_OFFSET)), \ + RX_ATTENTION_1_FIRST_MPDU_MASK, \ + RX_ATTENTION_1_FIRST_MPDU_LSB)) + +/* + * hal_rx_attn_first_mpdu_get(): get fist_mpdu bit from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * reutm: uint32_t(first_msdu) + */ +static inline uint32_t +hal_rx_attn_first_mpdu_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t first_mpdu; + + first_mpdu = HAL_RX_ATTN_FIRST_MPDU_GET(rx_attn); + + return first_mpdu; +} + +#define HAL_RX_ATTN_TCP_UDP_CKSUM_FAIL_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_TCP_UDP_CHKSUM_FAIL_OFFSET)), \ + RX_ATTENTION_1_TCP_UDP_CHKSUM_FAIL_MASK, \ + RX_ATTENTION_1_TCP_UDP_CHKSUM_FAIL_LSB)) + +/* + * hal_rx_attn_tcp_udp_cksum_fail_get(): get tcp_udp cksum fail bit + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * Return: tcp_udp_cksum_fail + */ +static inline bool +hal_rx_attn_tcp_udp_cksum_fail_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + bool tcp_udp_cksum_fail; + + tcp_udp_cksum_fail = HAL_RX_ATTN_TCP_UDP_CKSUM_FAIL_GET(rx_attn); + + return tcp_udp_cksum_fail; +} + +#define HAL_RX_ATTN_IP_CKSUM_FAIL_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_IP_CHKSUM_FAIL_OFFSET)), \ + RX_ATTENTION_1_IP_CHKSUM_FAIL_MASK, \ + RX_ATTENTION_1_IP_CHKSUM_FAIL_LSB)) + +/* + * hal_rx_attn_ip_cksum_fail_get(): get ip cksum fail bit + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * Return: ip_cksum_fail + */ +static inline bool +hal_rx_attn_ip_cksum_fail_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + bool ip_cksum_fail; + + ip_cksum_fail = HAL_RX_ATTN_IP_CKSUM_FAIL_GET(rx_attn); + + return ip_cksum_fail; +} + +#define HAL_RX_ATTN_PHY_PPDU_ID_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_0_PHY_PPDU_ID_OFFSET)), \ + RX_ATTENTION_0_PHY_PPDU_ID_MASK, \ + RX_ATTENTION_0_PHY_PPDU_ID_LSB)) + +/* + * hal_rx_attn_phy_ppdu_id_get(): get phy_ppdu_id value + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * Return: phy_ppdu_id + */ +static inline uint16_t +hal_rx_attn_phy_ppdu_id_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint16_t phy_ppdu_id; + + phy_ppdu_id = HAL_RX_ATTN_PHY_PPDU_ID_GET(rx_attn); + + return phy_ppdu_id; +} + +#define HAL_RX_ATTN_CCE_MATCH_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_CCE_MATCH_OFFSET)), \ + RX_ATTENTION_1_CCE_MATCH_MASK, \ + RX_ATTENTION_1_CCE_MATCH_LSB)) + +/* + * hal_rx_msdu_cce_match_get(): get CCE match bit + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * Return: CCE match value + */ +static inline bool +hal_rx_msdu_cce_match_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + bool cce_match_val; + + cce_match_val = HAL_RX_ATTN_CCE_MATCH_GET(rx_attn); + return cce_match_val; +} + +/* + * Get peer_meta_data from RX_MPDU_INFO within RX_MPDU_START + */ +#define HAL_RX_MPDU_PEER_META_DATA_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_8_PEER_META_DATA_OFFSET)), \ + RX_MPDU_INFO_8_PEER_META_DATA_MASK, \ + RX_MPDU_INFO_8_PEER_META_DATA_LSB)) + +static inline uint32_t +hal_rx_mpdu_peer_meta_data_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t peer_meta_data; + + peer_meta_data = HAL_RX_MPDU_PEER_META_DATA_GET(mpdu_info); + + return peer_meta_data; +} + +#define HAL_RX_MPDU_INFO_AMPDU_FLAG_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_12_AMPDU_FLAG_OFFSET)), \ + RX_MPDU_INFO_12_AMPDU_FLAG_MASK, \ + RX_MPDU_INFO_12_AMPDU_FLAG_LSB)) +/** + * hal_rx_mpdu_info_ampdu_flag_get(): get ampdu flag bit + * from rx mpdu info + * @buf: pointer to rx_pkt_tlvs + * + * Return: ampdu flag + */ +static inline bool +hal_rx_mpdu_info_ampdu_flag_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + bool ampdu_flag; + + ampdu_flag = HAL_RX_MPDU_INFO_AMPDU_FLAG_GET(mpdu_info); + + return ampdu_flag; +} + +#define HAL_RX_MPDU_PEER_META_DATA_SET(_rx_mpdu_info, peer_mdata) \ + ((*(((uint32_t *)_rx_mpdu_info) + \ + (RX_MPDU_INFO_8_PEER_META_DATA_OFFSET >> 2))) = \ + (peer_mdata << RX_MPDU_INFO_8_PEER_META_DATA_LSB) & \ + RX_MPDU_INFO_8_PEER_META_DATA_MASK) + +/* + * @ hal_rx_mpdu_peer_meta_data_set: set peer meta data in RX mpdu start tlv + * + * @ buf: rx_tlv_hdr of the received packet + * @ peer_mdata: peer meta data to be set. + * @ Return: void + */ +static inline void +hal_rx_mpdu_peer_meta_data_set(uint8_t *buf, uint32_t peer_mdata) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + HAL_RX_MPDU_PEER_META_DATA_SET(mpdu_info, peer_mdata); +} + +/** +* LRO information needed from the TLVs +*/ +#define HAL_RX_TLV_GET_LRO_ELIGIBLE(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_9_LRO_ELIGIBLE_OFFSET)), \ + RX_MSDU_END_9_LRO_ELIGIBLE_MASK, \ + RX_MSDU_END_9_LRO_ELIGIBLE_LSB)) + +#define HAL_RX_TLV_GET_TCP_ACK(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_8_TCP_ACK_NUMBER_OFFSET)), \ + RX_MSDU_END_8_TCP_ACK_NUMBER_MASK, \ + RX_MSDU_END_8_TCP_ACK_NUMBER_LSB)) + +#define HAL_RX_TLV_GET_TCP_SEQ(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_7_TCP_SEQ_NUMBER_OFFSET)), \ + RX_MSDU_END_7_TCP_SEQ_NUMBER_MASK, \ + RX_MSDU_END_7_TCP_SEQ_NUMBER_LSB)) + +#define HAL_RX_TLV_GET_TCP_WIN(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_9_WINDOW_SIZE_OFFSET)), \ + RX_MSDU_END_9_WINDOW_SIZE_MASK, \ + RX_MSDU_END_9_WINDOW_SIZE_LSB)) + +#define HAL_RX_TLV_GET_TCP_PURE_ACK(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_TCP_ONLY_ACK_OFFSET)), \ + RX_MSDU_START_2_TCP_ONLY_ACK_MASK, \ + RX_MSDU_START_2_TCP_ONLY_ACK_LSB)) + +#define HAL_RX_TLV_GET_TCP_PROTO(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_TCP_PROTO_OFFSET)), \ + RX_MSDU_START_2_TCP_PROTO_MASK, \ + RX_MSDU_START_2_TCP_PROTO_LSB)) + +#define HAL_RX_TLV_GET_UDP_PROTO(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_UDP_PROTO_OFFSET)), \ + RX_MSDU_START_2_UDP_PROTO_MASK, \ + RX_MSDU_START_2_UDP_PROTO_LSB)) + +#define HAL_RX_TLV_GET_IPV6(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_IPV6_PROTO_OFFSET)), \ + RX_MSDU_START_2_IPV6_PROTO_MASK, \ + RX_MSDU_START_2_IPV6_PROTO_LSB)) + +#define HAL_RX_TLV_GET_IP_OFFSET(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_1_L3_OFFSET_OFFSET)), \ + RX_MSDU_START_1_L3_OFFSET_MASK, \ + RX_MSDU_START_1_L3_OFFSET_LSB)) + +#define HAL_RX_TLV_GET_TCP_OFFSET(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_1_L4_OFFSET_OFFSET)), \ + RX_MSDU_START_1_L4_OFFSET_MASK, \ + RX_MSDU_START_1_L4_OFFSET_LSB)) + +#define HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_OFFSET)), \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_MASK, \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_LSB)) + +/** + * hal_rx_msdu_end_l3_hdr_padding_get(): API to get the + * l3_header padding from rx_msdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static inline uint32_t +hal_rx_msdu_end_l3_hdr_padding_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_l3_hdr_padding_get(buf); +} + +/** + * hal_rx_msdu_end_sa_idx_get(): API to get the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static inline uint16_t +hal_rx_msdu_end_sa_idx_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_sa_idx_get(buf); +} + + /** + * hal_rx_msdu_end_sa_is_valid_get(): API to get the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static inline uint8_t +hal_rx_msdu_end_sa_is_valid_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_sa_is_valid_get(buf); +} + +#define HAL_RX_MSDU_START_MSDU_LEN_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_1_MSDU_LENGTH_OFFSET)), \ + RX_MSDU_START_1_MSDU_LENGTH_MASK, \ + RX_MSDU_START_1_MSDU_LENGTH_LSB)) + + /** + * hal_rx_msdu_start_msdu_len_get(): API to get the MSDU length + * from rx_msdu_start TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: msdu length + */ +static inline uint32_t +hal_rx_msdu_start_msdu_len_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t msdu_len; + + msdu_len = HAL_RX_MSDU_START_MSDU_LEN_GET(msdu_start); + + return msdu_len; +} + + /** + * hal_rx_msdu_start_msdu_len_set(): API to set the MSDU length + * from rx_msdu_start TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * @len: msdu length + * + * Return: none + */ +static inline void +hal_rx_msdu_start_msdu_len_set(uint8_t *buf, uint32_t len) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + void *wrd1; + + wrd1 = (uint8_t *)msdu_start + RX_MSDU_START_1_MSDU_LENGTH_OFFSET; + *(uint32_t *)wrd1 &= (~RX_MSDU_START_1_MSDU_LENGTH_MASK); + *(uint32_t *)wrd1 |= len; +} + +#define HAL_RX_MSDU_START_BW_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_RECEIVE_BANDWIDTH_OFFSET)), \ + RX_MSDU_START_5_RECEIVE_BANDWIDTH_MASK, \ + RX_MSDU_START_5_RECEIVE_BANDWIDTH_LSB)) + +/* + * hal_rx_msdu_start_bw_get(): API to get the Bandwidth + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(bw) + */ +static inline uint32_t +hal_rx_msdu_start_bw_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t bw; + + bw = HAL_RX_MSDU_START_BW_GET(msdu_start); + + return bw; +} + + +#define HAL_RX_MSDU_START_FLOWID_TOEPLITZ_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_OFFSET)), \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_MASK, \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_LSB)) + + /** + * hal_rx_msdu_start_toeplitz_get: API to get the toeplitz hash + * from rx_msdu_start TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: toeplitz hash + */ +static inline uint32_t +hal_rx_msdu_start_toeplitz_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + + return HAL_RX_MSDU_START_FLOWID_TOEPLITZ_GET(msdu_start); +} + +/** + * enum hal_rx_mpdu_info_sw_frame_group_id_type: Enum for group id in MPDU_INFO + * + * @ HAL_MPDU_SW_FRAME_GROUP_NDP_FRAME: NDP frame + * @ HAL_MPDU_SW_FRAME_GROUP_MULTICAST_DATA: multicast data frame + * @ HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA: unicast data frame + * @ HAL_MPDU_SW_FRAME_GROUP_NULL_DATA: NULL data frame + * @ HAL_MPDU_SW_FRAME_GROUP_MGMT: management frame + * @ HAL_MPDU_SW_FRAME_GROUP_MGMT_PROBE_REQ: probe req frame + * @ HAL_MPDU_SW_FRAME_GROUP_CTRL: control frame + * @ HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR: BAR frame + * @ HAL_MPDU_SW_FRAME_GROUP_CTRL_RTS: RTS frame + * @ HAL_MPDU_SW_FRAME_GROUP_UNSUPPORTED: unsupported + * @ HAL_MPDU_SW_FRAME_GROUP_MAX: max limit + */ +enum hal_rx_mpdu_info_sw_frame_group_id_type { + HAL_MPDU_SW_FRAME_GROUP_NDP_FRAME = 0, + HAL_MPDU_SW_FRAME_GROUP_MULTICAST_DATA, + HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA, + HAL_MPDU_SW_FRAME_GROUP_NULL_DATA, + HAL_MPDU_SW_FRAME_GROUP_MGMT, + HAL_MPDU_SW_FRAME_GROUP_MGMT_PROBE_REQ = 8, + HAL_MPDU_SW_FRAME_GROUP_MGMT_BEACON = 12, + HAL_MPDU_SW_FRAME_GROUP_CTRL = 20, + HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR = 28, + HAL_MPDU_SW_FRAME_GROUP_CTRL_RTS = 31, + HAL_MPDU_SW_FRAME_GROUP_UNSUPPORTED = 36, + HAL_MPDU_SW_FRAME_GROUP_MAX = 37, +}; + +/** + * hal_rx_mpdu_start_mpdu_qos_control_valid_get(): + * Retrieve qos control valid bit from the tlv. + * @hal_soc_hdl: hal_soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: qos control value. + */ +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get( + hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if ((!hal_soc) || (!hal_soc->ops)) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (hal_soc->ops->hal_rx_mpdu_start_mpdu_qos_control_valid_get) + return hal_soc->ops-> + hal_rx_mpdu_start_mpdu_qos_control_valid_get(buf); + + return QDF_STATUS_E_INVAL; +} + +/** + * hal_rx_is_unicast: check packet is unicast frame or not. + * @hal_soc_hdl: hal_soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static inline bool +hal_rx_is_unicast(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_is_unicast(buf); +} + +/** + * hal_rx_tid_get: get tid based on qos control valid. + * @hal_soc_hdl: hal soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static inline uint32_t +hal_rx_tid_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_tid_get(hal_soc_hdl, buf); +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get() - Retrieve sw peer id + * @hal_soc_hdl: hal soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: sw peer_id + */ +static inline uint32_t +hal_rx_mpdu_start_sw_peer_id_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_start_sw_peer_id_get(buf); +} + +#define HAL_RX_MSDU_START_SGI_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_SGI_OFFSET)), \ + RX_MSDU_START_5_SGI_MASK, \ + RX_MSDU_START_5_SGI_LSB)) +/** + * hal_rx_msdu_start_msdu_sgi_get(): API to get the Short Gaurd + * Interval from rx_msdu_start TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(sgi) + */ +static inline uint32_t +hal_rx_msdu_start_sgi_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t sgi; + + sgi = HAL_RX_MSDU_START_SGI_GET(msdu_start); + + return sgi; +} + +#define HAL_RX_MSDU_START_RATE_MCS_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_RATE_MCS_OFFSET)), \ + RX_MSDU_START_5_RATE_MCS_MASK, \ + RX_MSDU_START_5_RATE_MCS_LSB)) +/** + * hal_rx_msdu_start_msdu_rate_mcs_get(): API to get the MCS rate + * from rx_msdu_start TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(rate_mcs) + */ +static inline uint32_t +hal_rx_msdu_start_rate_mcs_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t rate_mcs; + + rate_mcs = HAL_RX_MSDU_START_RATE_MCS_GET(msdu_start); + + return rate_mcs; +} + +#define HAL_RX_ATTN_DECRYPT_STATUS_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_2_DECRYPT_STATUS_CODE_OFFSET)), \ + RX_ATTENTION_2_DECRYPT_STATUS_CODE_MASK, \ + RX_ATTENTION_2_DECRYPT_STATUS_CODE_LSB)) + +/* + * hal_rx_attn_msdu_get_is_decrypted(): API to get the decrypt status of the + * packet from rx_attention + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(decryt status) + */ + +static inline uint32_t +hal_rx_attn_msdu_get_is_decrypted(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t is_decrypt = 0; + uint32_t decrypt_status; + + decrypt_status = HAL_RX_ATTN_DECRYPT_STATUS_GET(rx_attn); + + if (!decrypt_status) + is_decrypt = 1; + + return is_decrypt; +} + +/* + * Get key index from RX_MSDU_END + */ +#define HAL_RX_MSDU_END_KEYID_OCTET_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_2_KEY_ID_OCTET_OFFSET)), \ + RX_MSDU_END_2_KEY_ID_OCTET_MASK, \ + RX_MSDU_END_2_KEY_ID_OCTET_LSB)) +/* + * hal_rx_msdu_get_keyid(): API to get the key id if the decrypted packet + * from rx_msdu_end + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(key id) + */ + +static inline uint32_t +hal_rx_msdu_get_keyid(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t keyid_octet; + + keyid_octet = HAL_RX_MSDU_END_KEYID_OCTET_GET(msdu_end); + + return keyid_octet & 0x3; +} + +#define HAL_RX_MSDU_START_RSSI_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_5_USER_RSSI_OFFSET)), \ + RX_MSDU_START_5_USER_RSSI_MASK, \ + RX_MSDU_START_5_USER_RSSI_LSB)) +/* + * hal_rx_msdu_start_get_rssi(): API to get the rssi of received pkt + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(rssi) + */ + +static inline uint32_t +hal_rx_msdu_start_get_rssi(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t rssi; + + rssi = HAL_RX_MSDU_START_RSSI_GET(msdu_start); + + return rssi; + +} + +#define HAL_RX_MSDU_START_FREQ_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_7_SW_PHY_META_DATA_OFFSET)), \ + RX_MSDU_START_7_SW_PHY_META_DATA_MASK, \ + RX_MSDU_START_7_SW_PHY_META_DATA_LSB)) + +/* + * hal_rx_msdu_start_get_freq(): API to get the frequency of operating channel + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(frequency) + */ + +static inline uint32_t +hal_rx_msdu_start_get_freq(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t freq; + + freq = HAL_RX_MSDU_START_FREQ_GET(msdu_start); + + return freq; +} + + +#define HAL_RX_MSDU_START_PKT_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_5_PKT_TYPE_OFFSET)), \ + RX_MSDU_START_5_PKT_TYPE_MASK, \ + RX_MSDU_START_5_PKT_TYPE_LSB)) + +/* + * hal_rx_msdu_start_get_pkt_type(): API to get the pkt type + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(pkt type) + */ + +static inline uint32_t +hal_rx_msdu_start_get_pkt_type(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t pkt_type; + + pkt_type = HAL_RX_MSDU_START_PKT_TYPE_GET(msdu_start); + + return pkt_type; +} + +/* + * hal_rx_mpdu_get_tods(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ + +static inline uint32_t +hal_rx_mpdu_get_to_ds(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_get_to_ds(buf); +} + + +/* + * hal_rx_mpdu_get_fr_ds(): API to get the from ds info + * from rx_mpdu_start + * @hal_soc_hdl: hal soc handle + * @buf: pointer to the start of RX PKT TLV header + * + * Return: uint32_t(fr_ds) + */ +static inline uint32_t +hal_rx_mpdu_get_fr_ds(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_get_fr_ds(buf); +} + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +/* + * hal_rx_mpdu_get_addr1(): API to check get address1 of the mpdu + * @hal_soc_hdl: hal soc handle + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr1(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, uint8_t *mac_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_get_addr1(buf, mac_addr); +} + +/* + * hal_rx_mpdu_get_addr2(): API to check get address2 of the mpdu + * in the packet + * @hal_soc_hdl: hal soc handle + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr2(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, uint8_t *mac_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_get_addr2(buf, mac_addr); +} + +/* + * hal_rx_mpdu_get_addr3(): API to get address3 of the mpdu + * in the packet + * @hal_soc_hdl: hal soc handle + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr3(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, uint8_t *mac_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_get_addr3(buf, mac_addr); +} + +/* + * hal_rx_mpdu_get_addr4(): API to get address4 of the mpdu + * in the packet + * @hal_soc_hdl: hal_soc handle + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr4(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, uint8_t *mac_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_get_addr4(buf, mac_addr); +} + + /** + * hal_rx_msdu_end_da_idx_get: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static inline uint16_t +hal_rx_msdu_end_da_idx_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_da_idx_get(buf); +} + +/** + * hal_rx_msdu_end_da_is_valid_get: API to check if da is valid + * from rx_msdu_end TLV + * @hal_soc_hdl: hal soc handle + * @ buf: pointer to the start of RX PKT TLV headers + * + * Return: da_is_valid + */ +static inline uint8_t +hal_rx_msdu_end_da_is_valid_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_da_is_valid_get(buf); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get: API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: da_is_mcbc + */ +static inline uint8_t +hal_rx_msdu_end_da_is_mcbc_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_da_is_mcbc_get(buf); +} + +/** + * hal_rx_msdu_end_first_msdu_get: API to get first msdu status + * from rx_msdu_end TLV + * @hal_soc_hdl: hal soc handle + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: first_msdu + */ +static inline uint8_t +hal_rx_msdu_end_first_msdu_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_first_msdu_get(buf); +} + +/** + * hal_rx_msdu_end_last_msdu_get: API to get last msdu status + * from rx_msdu_end TLV + * @hal_soc_hdl: hal soc handle + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: last_msdu + */ +static inline uint8_t +hal_rx_msdu_end_last_msdu_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_end_last_msdu_get(buf); +} + +/** + * hal_rx_msdu_cce_metadata_get: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * Return: cce_meta_data + */ +static inline uint16_t +hal_rx_msdu_cce_metadata_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_cce_metadata_get(buf); +} + +/******************************************************************************* + * RX ERROR APIS + ******************************************************************************/ + +#define HAL_RX_MPDU_END_DECRYPT_ERR_GET(_rx_mpdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_end),\ + RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_OFFSET)), \ + RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_MASK, \ + RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_LSB)) + +/** + * hal_rx_mpdu_end_decrypt_err_get(): API to get the Decrypt ERR + * from rx_mpdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(decrypt_err) + */ +static inline uint32_t +hal_rx_mpdu_end_decrypt_err_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_end *mpdu_end = + &pkt_tlvs->mpdu_end_tlv.rx_mpdu_end; + uint32_t decrypt_err; + + decrypt_err = HAL_RX_MPDU_END_DECRYPT_ERR_GET(mpdu_end); + + return decrypt_err; +} + +#define HAL_RX_MPDU_END_MIC_ERR_GET(_rx_mpdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_end),\ + RX_MPDU_END_1_TKIP_MIC_ERR_OFFSET)), \ + RX_MPDU_END_1_TKIP_MIC_ERR_MASK, \ + RX_MPDU_END_1_TKIP_MIC_ERR_LSB)) + +/** + * hal_rx_mpdu_end_mic_err_get(): API to get the MIC ERR + * from rx_mpdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(mic_err) + */ +static inline uint32_t +hal_rx_mpdu_end_mic_err_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_end *mpdu_end = + &pkt_tlvs->mpdu_end_tlv.rx_mpdu_end; + uint32_t mic_err; + + mic_err = HAL_RX_MPDU_END_MIC_ERR_GET(mpdu_end); + + return mic_err; +} + +/******************************************************************************* + * RX REO ERROR APIS + ******************************************************************************/ + +#define HAL_RX_NUM_MSDU_DESC 6 +#define HAL_RX_MAX_SAVED_RING_DESC 16 + +/* TODO: rework the structure */ +struct hal_rx_msdu_list { + struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC]; + uint32_t sw_cookie[HAL_RX_NUM_MSDU_DESC]; + uint8_t rbm[HAL_RX_NUM_MSDU_DESC]; + /* physical address of the msdu */ + uint64_t paddr[HAL_RX_NUM_MSDU_DESC]; +}; + +struct hal_buf_info { + uint64_t paddr; + uint32_t sw_cookie; + uint8_t rbm; +}; + +/** + * hal_rx_link_desc_msdu0_ptr - Get pointer to rx_msdu details + * @msdu_link_ptr - msdu link ptr + * @hal - pointer to hal_soc + * Return - Pointer to rx_msdu_details structure + * + */ +static inline +void *hal_rx_link_desc_msdu0_ptr(void *msdu_link_ptr, + struct hal_soc *hal_soc) +{ + return hal_soc->ops->hal_rx_link_desc_msdu0_ptr(msdu_link_ptr); +} + +/** + * hal_rx_msdu_desc_info_get_ptr() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * @hal - pointer to hal_soc + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static inline +void *hal_rx_msdu_desc_info_get_ptr(void *msdu_details_ptr, + struct hal_soc *hal_soc) +{ + return hal_soc->ops->hal_rx_msdu_desc_info_get_ptr(msdu_details_ptr); +} + +/* This special cookie value will be used to indicate FW allocated buffers + * received through RXDMA2SW ring for RXDMA WARs + */ +#define HAL_RX_COOKIE_SPECIAL 0x1fffff + +/** + * hal_rx_msdu_link_desc_get(): API to get the MSDU information + * from the MSDU link descriptor + * + * @msdu_link_desc: Opaque pointer used by HAL to get to the + * MSDU link descriptor (struct rx_msdu_link) + * + * @msdu_list: Return the list of MSDUs contained in this link descriptor + * + * @num_msdus: Number of MSDUs in the MPDU + * + * Return: void + */ +static inline void hal_rx_msdu_list_get(hal_soc_handle_t hal_soc_hdl, + void *msdu_link_desc, + struct hal_rx_msdu_list *msdu_list, + uint16_t *num_msdus) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct rx_msdu_details *msdu_details; + struct rx_msdu_desc_info *msdu_desc_info; + struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc; + int i; + + msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] msdu_link=%pK msdu_details=%pK", + __func__, __LINE__, msdu_link, msdu_details); + + for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { + /* num_msdus received in mpdu descriptor may be incorrect + * sometimes due to HW issue. Check msdu buffer address also + */ + if (!i && (HAL_RX_BUFFER_ADDR_31_0_GET( + &msdu_details[i].buffer_addr_info_details) == 0)) + break; + if (HAL_RX_BUFFER_ADDR_31_0_GET( + &msdu_details[i].buffer_addr_info_details) == 0) { + /* set the last msdu bit in the prev msdu_desc_info */ + msdu_desc_info = + hal_rx_msdu_desc_info_get_ptr(&msdu_details[i - 1], hal_soc); + HAL_RX_LAST_MSDU_IN_MPDU_FLAG_SET(msdu_desc_info, 1); + break; + } + msdu_desc_info = hal_rx_msdu_desc_info_get_ptr(&msdu_details[i], + hal_soc); + + /* set first MSDU bit or the last MSDU bit */ + if (!i) + HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_SET(msdu_desc_info, 1); + else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) + HAL_RX_LAST_MSDU_IN_MPDU_FLAG_SET(msdu_desc_info, 1); + + msdu_list->msdu_info[i].msdu_flags = + HAL_RX_MSDU_FLAGS_GET(msdu_desc_info); + msdu_list->msdu_info[i].msdu_len = + HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info); + msdu_list->sw_cookie[i] = + HAL_RX_BUF_COOKIE_GET( + &msdu_details[i].buffer_addr_info_details); + msdu_list->rbm[i] = HAL_RX_BUF_RBM_GET( + &msdu_details[i].buffer_addr_info_details); + msdu_list->paddr[i] = HAL_RX_BUFFER_ADDR_31_0_GET( + &msdu_details[i].buffer_addr_info_details) | + (uint64_t)HAL_RX_BUFFER_ADDR_39_32_GET( + &msdu_details[i].buffer_addr_info_details) << 32; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] i=%d sw_cookie=%d", + __func__, __LINE__, i, msdu_list->sw_cookie[i]); + } + *num_msdus = i; +} + +/** + * hal_rx_msdu_reo_dst_ind_get: Gets the REO + * destination ring ID from the msdu desc info + * + * @msdu_link_desc : Opaque cookie pointer used by HAL to get to + * the current descriptor + * + * Return: dst_ind (REO destination ring ID) + */ +static inline uint32_t +hal_rx_msdu_reo_dst_ind_get(hal_soc_handle_t hal_soc_hdl, void *msdu_link_desc) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct rx_msdu_details *msdu_details; + struct rx_msdu_desc_info *msdu_desc_info; + struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc; + uint32_t dst_ind; + + msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc); + + /* The first msdu in the link should exsist */ + msdu_desc_info = hal_rx_msdu_desc_info_get_ptr(&msdu_details[0], + hal_soc); + dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info); + return dst_ind; +} + +/** + * hal_rx_reo_buf_paddr_get: Gets the physical address and + * cookie from the REO destination ring element + * + * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to + * the current descriptor + * @ buf_info: structure to return the buffer information + * Return: void + */ +static inline +void hal_rx_reo_buf_paddr_get(hal_ring_desc_t rx_desc, + struct hal_buf_info *buf_info) +{ + struct reo_destination_ring *reo_ring = + (struct reo_destination_ring *)rx_desc; + + buf_info->paddr = + (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) | + ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32)); + + buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring); +} + +/** + * enum hal_reo_error_code: Indicates that type of buffer or descriptor + * + * @ HAL_RX_MSDU_BUF_ADDR_TYPE : Reo buffer address points to the MSDU buffer + * @ HAL_RX_MSDU_LINK_DESC_TYPE: Reo buffer address points to the link + * descriptor + */ +enum hal_rx_reo_buf_type { + HAL_RX_REO_MSDU_BUF_ADDR_TYPE = 0, + HAL_RX_REO_MSDU_LINK_DESC_TYPE, +}; + +#define HAL_RX_REO_BUF_TYPE_GET(reo_desc) (((*(((uint32_t *) reo_desc)+ \ + (REO_DESTINATION_RING_7_REO_DEST_BUFFER_TYPE_OFFSET >> 2))) & \ + REO_DESTINATION_RING_7_REO_DEST_BUFFER_TYPE_MASK) >> \ + REO_DESTINATION_RING_7_REO_DEST_BUFFER_TYPE_LSB) + +#define HAL_RX_REO_QUEUE_NUMBER_GET(reo_desc) (((*(((uint32_t *)reo_desc) + \ + (REO_DESTINATION_RING_7_RECEIVE_QUEUE_NUMBER_OFFSET >> 2))) & \ + REO_DESTINATION_RING_7_RECEIVE_QUEUE_NUMBER_MASK) >> \ + REO_DESTINATION_RING_7_RECEIVE_QUEUE_NUMBER_LSB) + +/** + * enum hal_reo_error_code: Error code describing the type of error detected + * + * @ HAL_REO_ERR_QUEUE_DESC_ADDR_0 : Reo queue descriptor provided in the + * REO_ENTRANCE ring is set to 0 + * @ HAL_REO_ERR_QUEUE_DESC_INVALID: Reo queue descriptor valid bit is NOT set + * @ HAL_REO_ERR_AMPDU_IN_NON_BA : AMPDU frame received without BA session + * having been setup + * @ HAL_REO_ERR_NON_BA_DUPLICATE : Non-BA session, SN equal to SSN, + * Retry bit set: duplicate frame + * @ HAL_REO_ERR_BA_DUPLICATE : BA session, duplicate frame + * @ HAL_REO_ERR_REGULAR_FRAME_2K_JUMP : A normal (management/data frame) + * received with 2K jump in SN + * @ HAL_REO_ERR_BAR_FRAME_2K_JUMP : A bar received with 2K jump in SSN + * @ HAL_REO_ERR_REGULAR_FRAME_OOR : A normal (management/data frame) received + * with SN falling within the OOR window + * @ HAL_REO_ERR_BAR_FRAME_OOR : A bar received with SSN falling within the + * OOR window + * @ HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION : A bar received without a BA session + * @ HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN : A bar received with SSN equal to SN + * @ HAL_REO_ERR_PN_CHECK_FAILED : PN Check Failed packet + * @ HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET : Frame is forwarded as a result + * of the Seq_2k_error_detected_flag been set in the REO Queue descriptor + * @ HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET : Frame is forwarded as a result + * of the pn_error_detected_flag been set in the REO Queue descriptor + * @ HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET : Frame is forwarded as a result of + * the queue descriptor(address) being blocked as SW/FW seems to be currently + * in the process of making updates to this descriptor + */ +enum hal_reo_error_code { + HAL_REO_ERR_QUEUE_DESC_ADDR_0 = 0, + HAL_REO_ERR_QUEUE_DESC_INVALID, + HAL_REO_ERR_AMPDU_IN_NON_BA, + HAL_REO_ERR_NON_BA_DUPLICATE, + HAL_REO_ERR_BA_DUPLICATE, + HAL_REO_ERR_REGULAR_FRAME_2K_JUMP, + HAL_REO_ERR_BAR_FRAME_2K_JUMP, + HAL_REO_ERR_REGULAR_FRAME_OOR, + HAL_REO_ERR_BAR_FRAME_OOR, + HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION, + HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN, + HAL_REO_ERR_PN_CHECK_FAILED, + HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET, + HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET, + HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET, + HAL_REO_ERR_MAX +}; + +/** + * enum hal_rxdma_error_code: Code describing the type of RxDMA error detected + * + * @HAL_RXDMA_ERR_OVERFLOW: MPDU frame is not complete due to a FIFO overflow + * @ HAL_RXDMA_ERR_OVERFLOW : MPDU frame is not complete due to a FIFO + * overflow + * @ HAL_RXDMA_ERR_MPDU_LENGTH : MPDU frame is not complete due to receiving + * incomplete + * MPDU from the PHY + * @ HAL_RXDMA_ERR_FCS : FCS check on the MPDU frame failed + * @ HAL_RXDMA_ERR_DECRYPT : Decryption error + * @ HAL_RXDMA_ERR_TKIP_MIC : TKIP MIC error + * @ HAL_RXDMA_ERR_UNENCRYPTED : Received a frame that was expected to be + * encrypted but wasn’t + * @ HAL_RXDMA_ERR_MSDU_LEN : MSDU related length error + * @ HAL_RXDMA_ERR_MSDU_LIMIT : Number of MSDUs in the MPDUs exceeded + * the max allowed + * @ HAL_RXDMA_ERR_WIFI_PARSE : wifi parsing error + * @ HAL_RXDMA_ERR_AMSDU_PARSE : Amsdu parsing error + * @ HAL_RXDMA_ERR_SA_TIMEOUT : Source Address search timeout + * @ HAL_RXDMA_ERR_DA_TIMEOUT : Destination Address search timeout + * @ HAL_RXDMA_ERR_FLOW_TIMEOUT : Flow Search Timeout + * @ HAL_RXDMA_ERR_FLUSH_REQUEST : RxDMA FIFO Flush request + * @ HAL_RXDMA_ERR_WAR : RxDMA WAR dummy errors + */ +enum hal_rxdma_error_code { + HAL_RXDMA_ERR_OVERFLOW = 0, + HAL_RXDMA_ERR_MPDU_LENGTH, + HAL_RXDMA_ERR_FCS, + HAL_RXDMA_ERR_DECRYPT, + HAL_RXDMA_ERR_TKIP_MIC, + HAL_RXDMA_ERR_UNENCRYPTED, + HAL_RXDMA_ERR_MSDU_LEN, + HAL_RXDMA_ERR_MSDU_LIMIT, + HAL_RXDMA_ERR_WIFI_PARSE, + HAL_RXDMA_ERR_AMSDU_PARSE, + HAL_RXDMA_ERR_SA_TIMEOUT, + HAL_RXDMA_ERR_DA_TIMEOUT, + HAL_RXDMA_ERR_FLOW_TIMEOUT, + HAL_RXDMA_ERR_FLUSH_REQUEST, + HAL_RXDMA_ERR_WAR = 31, + HAL_RXDMA_ERR_MAX +}; + +/** + * HW BM action settings in WBM release ring + */ +#define HAL_BM_ACTION_PUT_IN_IDLE_LIST 0 +#define HAL_BM_ACTION_RELEASE_MSDU_LIST 1 + +/** + * enum hal_rx_wbm_error_source: Indicates which module initiated the + * release of this buffer or descriptor + * + * @ HAL_RX_WBM_ERR_SRC_TQM : TQM released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_RXDMA: RXDMA released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_REO: REO released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_FW: FW released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_SW: SW released this buffer or descriptor + */ +enum hal_rx_wbm_error_source { + HAL_RX_WBM_ERR_SRC_TQM = 0, + HAL_RX_WBM_ERR_SRC_RXDMA, + HAL_RX_WBM_ERR_SRC_REO, + HAL_RX_WBM_ERR_SRC_FW, + HAL_RX_WBM_ERR_SRC_SW, +}; + +/** + * enum hal_rx_wbm_buf_type: Indicates that type of buffer or descriptor + * released + * + * @ HAL_RX_WBM_ERR_SRC_TQM : TQM released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_RXDMA: RXDMA released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_REO: REO released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_FW: FW released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_SW: SW released this buffer or descriptor + */ +enum hal_rx_wbm_buf_type { + HAL_RX_WBM_BUF_TYPE_REL_BUF = 0, + HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC, + HAL_RX_WBM_BUF_TYPE_MPDU_LINK_DESC, + HAL_RX_WBM_BUF_TYPE_MSDU_EXT_DESC, + HAL_RX_WBM_BUF_TYPE_Q_EXT_DESC, +}; + +#define HAL_RX_REO_ERROR_GET(reo_desc) (((*(((uint32_t *) reo_desc)+ \ + (REO_DESTINATION_RING_7_REO_ERROR_CODE_OFFSET >> 2))) & \ + REO_DESTINATION_RING_7_REO_ERROR_CODE_MASK) >> \ + REO_DESTINATION_RING_7_REO_ERROR_CODE_LSB) + +/** + * hal_rx_is_pn_error() - Indicate if this error was caused by a + * PN check failure + * + * @reo_desc: opaque pointer used by HAL to get the REO destination entry + * + * Return: true: error caused by PN check, false: other error + */ +static inline bool hal_rx_reo_is_pn_error(hal_ring_desc_t rx_desc) +{ + struct reo_destination_ring *reo_desc = + (struct reo_destination_ring *)rx_desc; + + return ((HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_PN_CHECK_FAILED) | + (HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET)) ? + true : false; +} + +/** + * hal_rx_is_2k_jump() - Indicate if this error was caused by a 2K jump in + * the sequence number + * + * @ring_desc: opaque pointer used by HAL to get the REO destination entry + * + * Return: true: error caused by 2K jump, false: other error + */ +static inline bool hal_rx_reo_is_2k_jump(hal_ring_desc_t rx_desc) +{ + struct reo_destination_ring *reo_desc = + (struct reo_destination_ring *)rx_desc; + + return ((HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_REGULAR_FRAME_2K_JUMP) | + (HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET)) ? + true : false; +} + +/** + * hal_rx_reo_is_oor_error() - Indicate if this error was caused by OOR + * + * @ring_desc: opaque pointer used by HAL to get the REO destination entry + * + * Return: true: error caused by OOR, false: other error + */ +static inline bool hal_rx_reo_is_oor_error(void *rx_desc) +{ + struct reo_destination_ring *reo_desc = + (struct reo_destination_ring *)rx_desc; + + return (HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_REGULAR_FRAME_OOR) ? true : false; +} + +#define HAL_WBM_RELEASE_RING_DESC_LEN_DWORDS (NUM_OF_DWORDS_WBM_RELEASE_RING) +/** + * hal_dump_wbm_rel_desc() - dump wbm release descriptor + * @hal_desc: hardware descriptor pointer + * + * This function will print wbm release descriptor + * + * Return: none + */ +static inline void hal_dump_wbm_rel_desc(void *src_srng_desc) +{ + uint32_t *wbm_comp = (uint32_t *)src_srng_desc; + uint32_t i; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL, + "Current Rx wbm release descriptor is"); + + for (i = 0; i < HAL_WBM_RELEASE_RING_DESC_LEN_DWORDS; i++) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL, + "DWORD[i] = 0x%x", wbm_comp[i]); + } +} + +/** + * hal_rx_msdu_link_desc_set: Retrieves MSDU Link Descriptor to WBM + * + * @ hal_soc_hdl : HAL version of the SOC pointer + * @ src_srng_desc : void pointer to the WBM Release Ring descriptor + * @ buf_addr_info : void pointer to the buffer_addr_info + * @ bm_action : put in IDLE list or release to MSDU_LIST + * + * Return: void + */ +/* look at implementation at dp_hw_link_desc_pool_setup()*/ +static inline +void hal_rx_msdu_link_desc_set(hal_soc_handle_t hal_soc_hdl, + void *src_srng_desc, + hal_buff_addrinfo_t buf_addr_info, + uint8_t bm_action) +{ + struct wbm_release_ring *wbm_rel_srng = + (struct wbm_release_ring *)src_srng_desc; + uint32_t addr_31_0; + uint8_t addr_39_32; + + /* Structure copy !!! */ + wbm_rel_srng->released_buff_or_desc_addr_info = + *((struct buffer_addr_info *)buf_addr_info); + + addr_31_0 = + wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0; + addr_39_32 = + wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32; + + HAL_DESC_SET_FIELD(src_srng_desc, WBM_RELEASE_RING_2, + RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW); + HAL_DESC_SET_FIELD(src_srng_desc, WBM_RELEASE_RING_2, BM_ACTION, + bm_action); + HAL_DESC_SET_FIELD(src_srng_desc, WBM_RELEASE_RING_2, + BUFFER_OR_DESC_TYPE, HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC); + + /* WBM error is indicated when any of the link descriptors given to + * WBM has a NULL address, and one those paths is the link descriptors + * released from host after processing RXDMA errors, + * or from Rx defrag path, and we want to add an assert here to ensure + * host is not releasing descriptors with NULL address. + */ + + if (qdf_unlikely(!addr_31_0 && !addr_39_32)) { + hal_dump_wbm_rel_desc(src_srng_desc); + qdf_assert_always(0); + } +} + +/* + * hal_rx_msdu_link_desc_reinject: Re-injects the MSDU link descriptor to + * REO entrance ring + * + * @ soc: HAL version of the SOC pointer + * @ pa: Physical address of the MSDU Link Descriptor + * @ cookie: SW cookie to get to the virtual address + * @ error_enabled_reo_q: Argument to determine whether this needs to go + * to the error enabled REO queue + * + * Return: void + */ +static inline void hal_rx_msdu_link_desc_reinject(struct hal_soc *soc, + uint64_t pa, uint32_t cookie, bool error_enabled_reo_q) +{ + /* TODO */ +} + +/** + * HAL_RX_BUF_ADDR_INFO_GET: Returns the address of the + * BUFFER_ADDR_INFO, give the RX descriptor + * (Assumption -- BUFFER_ADDR_INFO is the + * first field in the descriptor structure) + */ +#define HAL_RX_BUF_ADDR_INFO_GET(ring_desc) \ + ((hal_link_desc_t)(ring_desc)) + +#define HAL_RX_REO_BUF_ADDR_INFO_GET HAL_RX_BUF_ADDR_INFO_GET + +#define HAL_RX_WBM_BUF_ADDR_INFO_GET HAL_RX_BUF_ADDR_INFO_GET + +/** + * hal_rx_ret_buf_manager_get: Returns the "return_buffer_manager" + * from the BUFFER_ADDR_INFO structure + * given a REO destination ring descriptor. + * @ ring_desc: RX(REO/WBM release) destination ring descriptor + * + * Return: uint8_t (value of the return_buffer_manager) + */ +static inline +uint8_t hal_rx_ret_buf_manager_get(hal_ring_desc_t ring_desc) +{ + /* + * The following macro takes buf_addr_info as argument, + * but since buf_addr_info is the first field in ring_desc + * Hence the following call is OK + */ + return HAL_RX_BUF_RBM_GET(ring_desc); +} + + +/******************************************************************************* + * RX WBM ERROR APIS + ******************************************************************************/ + + +#define HAL_RX_WBM_BUF_TYPE_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_MASK) >> \ + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_LSB) + +/** + * enum - hal_rx_wbm_reo_push_reason: Indicates why REO pushed + * the frame to this release ring + * + * @ HAL_RX_WBM_REO_PSH_RSN_ERROR : Reo detected an error and pushed this + * frame to this queue + * @ HAL_RX_WBM_REO_PSH_RSN_ROUTE: Reo pushed the frame to this queue per + * received routing instructions. No error within REO was detected + */ +enum hal_rx_wbm_reo_push_reason { + HAL_RX_WBM_REO_PSH_RSN_ERROR = 0, + HAL_RX_WBM_REO_PSH_RSN_ROUTE, +}; + + +/** + * enum hal_rx_wbm_rxdma_push_reason: Indicates why REO pushed the frame to + * this release ring + * + * @ HAL_RX_WBM_RXDMA_PSH_RSN_ERROR : RXDMA detected an error and pushed + * this frame to this queue + * @ HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE: RXDMA pushed the frame to this queue + * per received routing instructions. No error within RXDMA was detected + */ +enum hal_rx_wbm_rxdma_push_reason { + HAL_RX_WBM_RXDMA_PSH_RSN_ERROR = 0, + HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE, +}; + + +#define HAL_RX_WBM_FIRST_MSDU_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_4_FIRST_MSDU_OFFSET >> 2))) & \ + WBM_RELEASE_RING_4_FIRST_MSDU_MASK) >> \ + WBM_RELEASE_RING_4_FIRST_MSDU_LSB) + +#define HAL_RX_WBM_LAST_MSDU_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_4_LAST_MSDU_OFFSET >> 2))) & \ + WBM_RELEASE_RING_4_LAST_MSDU_MASK) >> \ + WBM_RELEASE_RING_4_LAST_MSDU_LSB) + +#define HAL_RX_WBM_BUF_COOKIE_GET(wbm_desc) \ + HAL_RX_BUF_COOKIE_GET(&((struct wbm_release_ring *) \ + wbm_desc)->released_buff_or_desc_addr_info) + +/** + * hal_rx_dump_rx_attention_tlv: dump RX attention TLV in structured + * humman readable format. + * @ rx_attn: pointer the rx_attention TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_rx_attention_tlv(struct rx_attention *rx_attn, + uint8_t dbg_level) +{ + hal_verbose_debug( + "rx_attention tlv (1/2) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "reserved_0: %x " + "phy_ppdu_id: %x " + "first_mpdu : %x " + "reserved_1a: %x " + "mcast_bcast: %x " + "ast_index_not_found: %x " + "ast_index_timeout: %x " + "power_mgmt: %x " + "non_qos: %x " + "null_data: %x " + "mgmt_type: %x " + "ctrl_type: %x " + "more_data: %x " + "eosp: %x " + "a_msdu_error: %x " + "fragment_flag: %x " + "order: %x " + "cce_match: %x " + "overflow_err: %x " + "msdu_length_err: %x " + "tcp_udp_chksum_fail: %x " + "ip_chksum_fail: %x " + "sa_idx_invalid: %x " + "da_idx_invalid: %x " + "reserved_1b: %x " + "rx_in_tx_decrypt_byp: %x ", + rx_attn->rxpcu_mpdu_filter_in_category, + rx_attn->sw_frame_group_id, + rx_attn->reserved_0, + rx_attn->phy_ppdu_id, + rx_attn->first_mpdu, + rx_attn->reserved_1a, + rx_attn->mcast_bcast, + rx_attn->ast_index_not_found, + rx_attn->ast_index_timeout, + rx_attn->power_mgmt, + rx_attn->non_qos, + rx_attn->null_data, + rx_attn->mgmt_type, + rx_attn->ctrl_type, + rx_attn->more_data, + rx_attn->eosp, + rx_attn->a_msdu_error, + rx_attn->fragment_flag, + rx_attn->order, + rx_attn->cce_match, + rx_attn->overflow_err, + rx_attn->msdu_length_err, + rx_attn->tcp_udp_chksum_fail, + rx_attn->ip_chksum_fail, + rx_attn->sa_idx_invalid, + rx_attn->da_idx_invalid, + rx_attn->reserved_1b, + rx_attn->rx_in_tx_decrypt_byp); + + hal_verbose_debug( + "rx_attention tlv (2/2) - " + "encrypt_required: %x " + "directed: %x " + "buffer_fragment: %x " + "mpdu_length_err: %x " + "tkip_mic_err: %x " + "decrypt_err: %x " + "unencrypted_frame_err: %x " + "fcs_err: %x " + "flow_idx_timeout: %x " + "flow_idx_invalid: %x " + "wifi_parser_error: %x " + "amsdu_parser_error: %x " + "sa_idx_timeout: %x " + "da_idx_timeout: %x " + "msdu_limit_error: %x " + "da_is_valid: %x " + "da_is_mcbc: %x " + "sa_is_valid: %x " + "decrypt_status_code: %x " + "rx_bitmap_not_updated: %x " + "reserved_2: %x " + "msdu_done: %x ", + rx_attn->encrypt_required, + rx_attn->directed, + rx_attn->buffer_fragment, + rx_attn->mpdu_length_err, + rx_attn->tkip_mic_err, + rx_attn->decrypt_err, + rx_attn->unencrypted_frame_err, + rx_attn->fcs_err, + rx_attn->flow_idx_timeout, + rx_attn->flow_idx_invalid, + rx_attn->wifi_parser_error, + rx_attn->amsdu_parser_error, + rx_attn->sa_idx_timeout, + rx_attn->da_idx_timeout, + rx_attn->msdu_limit_error, + rx_attn->da_is_valid, + rx_attn->da_is_mcbc, + rx_attn->sa_is_valid, + rx_attn->decrypt_status_code, + rx_attn->rx_bitmap_not_updated, + rx_attn->reserved_2, + rx_attn->msdu_done); +} + +static inline void hal_rx_dump_mpdu_start_tlv(struct rx_mpdu_start *mpdu_start, + uint8_t dbg_level, + struct hal_soc *hal) +{ + + hal->ops->hal_rx_dump_mpdu_start_tlv(mpdu_start, dbg_level); +} +/** + * hal_rx_dump_msdu_end_tlv: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_msdu_end_tlv(struct hal_soc *hal_soc, + struct rx_msdu_end *msdu_end, + uint8_t dbg_level) +{ + hal_soc->ops->hal_rx_dump_msdu_end_tlv(msdu_end, dbg_level); +} + +/** + * hal_rx_dump_mpdu_end_tlv: dump RX mpdu_end TLV in structured + * human readable format. + * @ mpdu_end: pointer the mpdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_mpdu_end_tlv(struct rx_mpdu_end *mpdu_end, + uint8_t dbg_level) +{ + hal_verbose_debug( + "rx_mpdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "unsup_ktype_short_frame: %x " + "rx_in_tx_decrypt_byp: %x " + "overflow_err: %x " + "mpdu_length_err: %x " + "tkip_mic_err: %x " + "decrypt_err: %x " + "unencrypted_frame_err: %x " + "pn_fields_contain_valid_info: %x " + "fcs_err: %x " + "msdu_length_err: %x " + "rxdma0_destination_ring: %x " + "rxdma1_destination_ring: %x " + "decrypt_status_code: %x " + "rx_bitmap_not_updated: %x ", + mpdu_end->rxpcu_mpdu_filter_in_category, + mpdu_end->sw_frame_group_id, + mpdu_end->phy_ppdu_id, + mpdu_end->unsup_ktype_short_frame, + mpdu_end->rx_in_tx_decrypt_byp, + mpdu_end->overflow_err, + mpdu_end->mpdu_length_err, + mpdu_end->tkip_mic_err, + mpdu_end->decrypt_err, + mpdu_end->unencrypted_frame_err, + mpdu_end->pn_fields_contain_valid_info, + mpdu_end->fcs_err, + mpdu_end->msdu_length_err, + mpdu_end->rxdma0_destination_ring, + mpdu_end->rxdma1_destination_ring, + mpdu_end->decrypt_status_code, + mpdu_end->rx_bitmap_not_updated); +} + +#ifdef NO_RX_PKT_HDR_TLV +static inline void hal_rx_dump_pkt_hdr_tlv(struct rx_pkt_tlvs *pkt_tlvs, + uint8_t dbg_level) +{ +} +#else +/** + * hal_rx_dump_pkt_hdr_tlv: dump RX pkt header TLV in hex format + * @ pkt_hdr_tlv: pointer the pkt_hdr_tlv in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_pkt_hdr_tlv(struct rx_pkt_tlvs *pkt_tlvs, + uint8_t dbg_level) +{ + struct rx_pkt_hdr_tlv *pkt_hdr_tlv = &pkt_tlvs->pkt_hdr_tlv; + + hal_verbose_debug( + "\n---------------\n" + "rx_pkt_hdr_tlv \n" + "---------------\n" + "phy_ppdu_id %d ", + pkt_hdr_tlv->phy_ppdu_id); + + hal_verbose_hex_dump(pkt_hdr_tlv->rx_pkt_hdr, 128); +} +#endif + +/** + * hal_srng_ring_id_get: API to retrieve ring id from hal ring + * structure + * @hal_ring: pointer to hal_srng structure + * + * Return: ring_id + */ +static inline uint8_t hal_srng_ring_id_get(hal_ring_handle_t hal_ring_hdl) +{ + return ((struct hal_srng *)hal_ring_hdl)->ring_id; +} + +/* Rx MSDU link pointer info */ +struct hal_rx_msdu_link_ptr_info { + struct rx_msdu_link msdu_link; + struct hal_buf_info msdu_link_buf_info; +}; + +/** + * hal_rx_get_pkt_tlvs(): Function to retrieve pkt tlvs from nbuf + * + * @nbuf: Pointer to data buffer field + * Returns: pointer to rx_pkt_tlvs + */ +static inline +struct rx_pkt_tlvs *hal_rx_get_pkt_tlvs(uint8_t *rx_buf_start) +{ + return (struct rx_pkt_tlvs *)rx_buf_start; +} + +/** + * hal_rx_get_mpdu_info(): Function to retrieve mpdu info from pkt tlvs + * + * @pkt_tlvs: Pointer to pkt_tlvs + * Returns: pointer to rx_mpdu_info structure + */ +static inline +struct rx_mpdu_info *hal_rx_get_mpdu_info(struct rx_pkt_tlvs *pkt_tlvs) +{ + return &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; +} + +#define DOT11_SEQ_FRAG_MASK 0x000f +#define DOT11_FC1_MORE_FRAG_OFFSET 0x04 + +/** + * hal_rx_get_rx_fragment_number(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static inline +uint8_t hal_rx_get_rx_fragment_number(struct hal_soc *hal_soc, + uint8_t *buf) +{ + return hal_soc->ops->hal_rx_get_rx_fragment_number(buf); +} + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_OFFSET)), \ + RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_MASK, \ + RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_LSB)) +/** + * hal_rx_get_rx_more_frag_bit(): Function to retrieve more fragment bit + * + * @nbuf: Network buffer + * Returns: rx more fragment bit + */ +static inline +uint8_t hal_rx_get_rx_more_frag_bit(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint16_t frame_ctrl = 0; + + frame_ctrl = HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(rx_mpdu_info) >> + DOT11_FC1_MORE_FRAG_OFFSET; + + /* more fragment bit if at offset bit 4 */ + return frame_ctrl; +} + +/** + * hal_rx_get_frame_ctrl_field(): Function to retrieve frame control field + * + * @nbuf: Network buffer + * Returns: rx more fragment bit + * + */ +static inline +uint16_t hal_rx_get_frame_ctrl_field(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint16_t frame_ctrl = 0; + + frame_ctrl = HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(rx_mpdu_info); + + return frame_ctrl; +} + +/* + * hal_rx_msdu_is_wlan_mcast(): Check if the buffer is for multicast address + * + * @nbuf: Network buffer + * Returns: flag to indicate whether the nbuf has MC/BC address + */ +static inline +uint32_t hal_rx_msdu_is_wlan_mcast(qdf_nbuf_t nbuf) +{ + uint8 *buf = qdf_nbuf_data(nbuf); + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + + return rx_attn->mcast_bcast; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid(): Get mpdu sequence control valid + * @hal_soc_hdl: hal soc handle + * @nbuf: Network buffer + * + * Return: value of sequence control valid field + */ +static inline +uint8_t hal_rx_get_mpdu_sequence_control_valid(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_mpdu_sequence_control_valid(buf); +} + +/* + * hal_rx_get_mpdu_frame_control_valid(): Retrieves mpdu frame control valid + * @hal_soc_hdl: hal soc handle + * @nbuf: Network buffer + * + * Returns: value of frame control valid field + */ +static inline +uint8_t hal_rx_get_mpdu_frame_control_valid(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_mpdu_frame_control_valid(buf); +} + +/** + * hal_rx_get_mpdu_mac_ad4_valid(): Retrieves if mpdu 4th addr is valid + * @hal_soc_hdl: hal soc handle + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static inline +bool hal_rx_get_mpdu_mac_ad4_valid(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_mpdu_mac_ad4_valid(buf); +} + +/* + * hal_rx_clear_mpdu_desc_info(): Clears mpdu_desc_info + * + * @rx_mpdu_desc_info: HAL view of rx mpdu desc info + * Returns: None + */ +static inline +void hal_rx_clear_mpdu_desc_info( + struct hal_rx_mpdu_desc_info *rx_mpdu_desc_info) +{ + qdf_mem_zero(rx_mpdu_desc_info, + sizeof(*rx_mpdu_desc_info)); +} + +/* + * hal_rx_clear_msdu_link_ptr(): Clears msdu_link_ptr + * + * @msdu_link_ptr: HAL view of msdu link ptr + * @size: number of msdu link pointers + * Returns: None + */ +static inline +void hal_rx_clear_msdu_link_ptr(struct hal_rx_msdu_link_ptr_info *msdu_link_ptr, + int size) +{ + qdf_mem_zero(msdu_link_ptr, + (sizeof(*msdu_link_ptr) * size)); +} + +/* + * hal_rx_chain_msdu_links() - Chains msdu link pointers + * @msdu_link_ptr: msdu link pointer + * @mpdu_desc_info: mpdu descriptor info + * + * Build a list of msdus using msdu link pointer. If the + * number of msdus are more, chain them together + * + * Returns: Number of processed msdus + */ +static inline +int hal_rx_chain_msdu_links(struct hal_soc *hal_soc, qdf_nbuf_t msdu, + struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info, + struct hal_rx_mpdu_desc_info *mpdu_desc_info) +{ + int j; + struct rx_msdu_link *msdu_link_ptr = + &msdu_link_ptr_info->msdu_link; + struct rx_msdu_link *prev_msdu_link_ptr = NULL; + struct rx_msdu_details *msdu_details = + hal_rx_link_desc_msdu0_ptr(msdu_link_ptr, hal_soc); + uint8_t num_msdus = mpdu_desc_info->msdu_count; + struct rx_msdu_desc_info *msdu_desc_info; + uint8_t fragno, more_frag; + uint8_t *rx_desc_info; + struct hal_rx_msdu_list msdu_list; + + for (j = 0; j < num_msdus; j++) { + msdu_desc_info = + hal_rx_msdu_desc_info_get_ptr(&msdu_details[j], + hal_soc); + msdu_list.msdu_info[j].msdu_flags = + HAL_RX_MSDU_FLAGS_GET(msdu_desc_info); + msdu_list.msdu_info[j].msdu_len = + HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info); + msdu_list.sw_cookie[j] = HAL_RX_BUF_COOKIE_GET( + &msdu_details[j].buffer_addr_info_details); + } + + /* Chain msdu links together */ + if (prev_msdu_link_ptr) { + /* 31-0 bits of the physical address */ + prev_msdu_link_ptr-> + next_msdu_link_desc_addr_info.buffer_addr_31_0 = + msdu_link_ptr_info->msdu_link_buf_info.paddr & + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK; + /* 39-32 bits of the physical address */ + prev_msdu_link_ptr-> + next_msdu_link_desc_addr_info.buffer_addr_39_32 + = ((msdu_link_ptr_info->msdu_link_buf_info.paddr + >> 32) & + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK); + prev_msdu_link_ptr-> + next_msdu_link_desc_addr_info.sw_buffer_cookie = + msdu_link_ptr_info->msdu_link_buf_info.sw_cookie; + } + + /* There is space for only 6 MSDUs in a MSDU link descriptor */ + if (num_msdus < HAL_RX_NUM_MSDU_DESC) { + /* mark first and last MSDUs */ + rx_desc_info = qdf_nbuf_data(msdu); + fragno = hal_rx_get_rx_fragment_number(hal_soc, rx_desc_info); + more_frag = hal_rx_get_rx_more_frag_bit(rx_desc_info); + + /* TODO: create skb->fragslist[] */ + + if (more_frag == 0) { + msdu_list.msdu_info[num_msdus].msdu_flags |= + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK; + } else if (fragno == 1) { + msdu_list.msdu_info[num_msdus].msdu_flags |= + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK; + + msdu_list.msdu_info[num_msdus].msdu_flags |= + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK; + } + + num_msdus++; + + /* Number of MSDUs per mpdu descriptor is updated */ + mpdu_desc_info->msdu_count += num_msdus; + } else { + num_msdus = 0; + prev_msdu_link_ptr = msdu_link_ptr; + } + + return num_msdus; +} + +/* + * hal_rx_defrag_update_src_ring_desc(): updates reo src ring desc + * + * @ring_desc: HAL view of ring descriptor + * @mpdu_des_info: saved mpdu desc info + * @msdu_link_ptr: saved msdu link ptr + * + * API used explicitly for rx defrag to update ring desc with + * mpdu desc info and msdu link ptr before reinjecting the + * packet back to REO + * + * Returns: None + */ +static inline +void hal_rx_defrag_update_src_ring_desc( + hal_ring_desc_t ring_desc, + void *saved_mpdu_desc_info, + struct hal_rx_msdu_link_ptr_info *saved_msdu_link_ptr) +{ + struct reo_entrance_ring *reo_ent_ring; + struct rx_mpdu_desc_info *reo_ring_mpdu_desc_info; + struct hal_buf_info buf_info; + + reo_ent_ring = (struct reo_entrance_ring *)ring_desc; + reo_ring_mpdu_desc_info = &reo_ent_ring-> + reo_level_mpdu_frame_info.rx_mpdu_desc_info_details; + + qdf_mem_copy(&reo_ring_mpdu_desc_info, saved_mpdu_desc_info, + sizeof(*reo_ring_mpdu_desc_info)); + + /* + * TODO: Check for additional fields that need configuration in + * reo_ring_mpdu_desc_info + */ + + /* Update msdu_link_ptr in the reo entrance ring */ + hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); + buf_info.paddr = saved_msdu_link_ptr->msdu_link_buf_info.paddr; + buf_info.sw_cookie = + saved_msdu_link_ptr->msdu_link_buf_info.sw_cookie; +} + +/* + * hal_rx_defrag_save_info_from_ring_desc(): Saves info from ring desc + * + * @msdu_link_desc_va: msdu link descriptor handle + * @msdu_link_ptr_info: HAL view of msdu link pointer info + * + * API used to save msdu link information along with physical + * address. The API also copues the sw cookie. + * + * Returns: None + */ +static inline +void hal_rx_defrag_save_info_from_ring_desc(void *msdu_link_desc_va, + struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info, + struct hal_buf_info *hbi) +{ + struct rx_msdu_link *msdu_link_ptr = + (struct rx_msdu_link *)msdu_link_desc_va; + + qdf_mem_copy(&msdu_link_ptr_info->msdu_link, msdu_link_ptr, + sizeof(struct rx_msdu_link)); + + msdu_link_ptr_info->msdu_link_buf_info.paddr = hbi->paddr; + msdu_link_ptr_info->msdu_link_buf_info.sw_cookie = hbi->sw_cookie; +} + +/* + * hal_rx_get_desc_len(): Returns rx descriptor length + * + * Returns the size of rx_pkt_tlvs which follows the + * data in the nbuf + * + * Returns: Length of rx descriptor + */ +static inline +uint16_t hal_rx_get_desc_len(void) +{ + return SIZE_OF_DATA_RX_TLV; +} + +/* + * hal_rx_reo_ent_rxdma_push_reason_get(): Retrieves RXDMA push reason from + * reo_entrance_ring descriptor + * + * @reo_ent_desc: reo_entrance_ring descriptor + * Returns: value of rxdma_push_reason + */ +static inline +uint8_t hal_rx_reo_ent_rxdma_push_reason_get(hal_rxdma_desc_t reo_ent_desc) +{ + return _HAL_MS((*_OFFSET_TO_WORD_PTR(reo_ent_desc, + REO_ENTRANCE_RING_6_RXDMA_PUSH_REASON_OFFSET)), + REO_ENTRANCE_RING_6_RXDMA_PUSH_REASON_MASK, + REO_ENTRANCE_RING_6_RXDMA_PUSH_REASON_LSB); +} + +/** + * hal_rx_reo_ent_rxdma_error_code_get(): Retrieves RXDMA error code from + * reo_entrance_ring descriptor + * @reo_ent_desc: reo_entrance_ring descriptor + * Return: value of rxdma_error_code + */ +static inline +uint8_t hal_rx_reo_ent_rxdma_error_code_get(hal_rxdma_desc_t reo_ent_desc) +{ + return _HAL_MS((*_OFFSET_TO_WORD_PTR(reo_ent_desc, + REO_ENTRANCE_RING_6_RXDMA_ERROR_CODE_OFFSET)), + REO_ENTRANCE_RING_6_RXDMA_ERROR_CODE_MASK, + REO_ENTRANCE_RING_6_RXDMA_ERROR_CODE_LSB); +} + +/** + * hal_rx_wbm_err_info_get(): Retrieves WBM error code and reason and + * save it to hal_wbm_err_desc_info structure passed by caller + * @wbm_desc: wbm ring descriptor + * @wbm_er_info: hal_wbm_err_desc_info structure, output parameter. + * Return: void + */ +static inline void hal_rx_wbm_err_info_get(void *wbm_desc, + struct hal_wbm_err_desc_info *wbm_er_info, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_rx_wbm_err_info_get(wbm_desc, (void *)wbm_er_info); +} + +/** + * hal_rx_wbm_err_info_set_in_tlv(): Save the wbm error codes and reason to + * the reserved bytes of rx_tlv_hdr + * @buf: start of rx_tlv_hdr + * @wbm_er_info: hal_wbm_err_desc_info structure + * Return: void + */ +static inline void hal_rx_wbm_err_info_set_in_tlv(uint8_t *buf, + struct hal_wbm_err_desc_info *wbm_er_info) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + qdf_mem_copy(pkt_tlvs->rx_padding0, wbm_er_info, + sizeof(struct hal_wbm_err_desc_info)); +} + +/** + * hal_rx_wbm_err_info_get_from_tlv(): retrieve wbm error codes and reason from + * the reserved bytes of rx_tlv_hdr. + * @buf: start of rx_tlv_hdr + * @wbm_er_info: hal_wbm_err_desc_info structure, output parameter. + * Return: void + */ +static inline void hal_rx_wbm_err_info_get_from_tlv(uint8_t *buf, + struct hal_wbm_err_desc_info *wbm_er_info) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + qdf_mem_copy(wbm_er_info, pkt_tlvs->rx_padding0, + sizeof(struct hal_wbm_err_desc_info)); +} + +#define HAL_RX_MSDU_START_NSS_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_NSS_OFFSET)), \ + RX_MSDU_START_5_NSS_MASK, \ + RX_MSDU_START_5_NSS_LSB)) + +/** + * hal_rx_mon_hw_desc_get_mpdu_status: Retrieve MPDU status + * + * @ hal_soc: HAL version of the SOC pointer + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static inline +void hal_rx_mon_hw_desc_get_mpdu_status(hal_soc_handle_t hal_soc_hdl, + void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_rx_mon_hw_desc_get_mpdu_status(hw_desc_addr, rs); +} + +/* + * hal_rx_get_tlv(): API to get the tlv + * + * @hal_soc: HAL version of the SOC pointer + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static inline uint8_t hal_rx_get_tlv(struct hal_soc *hal_soc, void *rx_tlv) +{ + return hal_soc->ops->hal_rx_get_tlv(rx_tlv); +} + +/* + * hal_rx_msdu_start_nss_get(): API to get the NSS + * Interval from rx_msdu_start + * + * @hal_soc: HAL version of the SOC pointer + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static inline +uint32_t hal_rx_msdu_start_nss_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_start_nss_get(buf); +} + +/** + * hal_rx_dump_msdu_start_tlv: dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_msdu_start_tlv(struct hal_soc *hal_soc, + struct rx_msdu_start *msdu_start, + uint8_t dbg_level) +{ + hal_soc->ops->hal_rx_dump_msdu_start_tlv(msdu_start, dbg_level); +} + +/** + * hal_rx_mpdu_start_tid_get - Return tid info from the rx mpdu start + * info details + * + * @ buf - Pointer to buffer containing rx pkt tlvs. + * + * + */ +static inline uint32_t hal_rx_mpdu_start_tid_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_mpdu_start_tid_get(buf); +} + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static inline +uint32_t hal_rx_msdu_start_reception_type_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_start_reception_type_get(buf); +} + +/** + * hal_rx_dump_pkt_tlvs: API to print all member elements of + * RX TLVs + * @ buf: pointer the pkt buffer. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_pkt_tlvs(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, uint8_t dbg_level) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + struct rx_mpdu_end *mpdu_end = &pkt_tlvs->mpdu_end_tlv.rx_mpdu_end; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_rx_dump_rx_attention_tlv(rx_attn, dbg_level); + hal_rx_dump_mpdu_start_tlv(mpdu_start, dbg_level, hal_soc); + hal_rx_dump_msdu_start_tlv(hal_soc, msdu_start, dbg_level); + hal_rx_dump_mpdu_end_tlv(mpdu_end, dbg_level); + hal_rx_dump_msdu_end_tlv(hal_soc, msdu_end, dbg_level); + hal_rx_dump_pkt_hdr_tlv(pkt_tlvs, dbg_level); +} + + +/** + * hal_reo_status_get_header_generic - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h - Pointer to hal_reo_status_header where info to be stored + * @hal- pointer to hal_soc structure + * Return - none. + * + */ +static inline +void hal_reo_status_get_header(uint32_t *d, int b, + void *h, struct hal_soc *hal_soc) +{ + hal_soc->ops->hal_reo_status_get_header(d, b, h); +} + +/** + * hal_rx_desc_is_first_msdu() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static inline +uint32_t hal_rx_desc_is_first_msdu(hal_soc_handle_t hal_soc_hdl, + void *hw_desc_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_desc_is_first_msdu(hw_desc_addr); +} + +static inline +uint32_t +HAL_RX_DESC_GET_DECAP_FORMAT(void *hw_desc_addr) { + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + return HAL_RX_GET(rx_msdu_start, RX_MSDU_START_2, DECAP_FORMAT); +} + +#ifdef NO_RX_PKT_HDR_TLV +static inline +uint8_t * +HAL_RX_DESC_GET_80211_HDR(void *hw_desc_addr) { + uint8_t *rx_pkt_hdr; + struct rx_mon_pkt_tlvs *rx_desc = + (struct rx_mon_pkt_tlvs *)hw_desc_addr; + + rx_pkt_hdr = &rx_desc->pkt_hdr_tlv.rx_pkt_hdr[0]; + + return rx_pkt_hdr; +} +#else +static inline +uint8_t * +HAL_RX_DESC_GET_80211_HDR(void *hw_desc_addr) { + uint8_t *rx_pkt_hdr; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_pkt_hdr = &rx_desc->pkt_hdr_tlv.rx_pkt_hdr[0]; + + return rx_pkt_hdr; +} +#endif + +static inline +bool HAL_IS_DECAP_FORMAT_RAW(hal_soc_handle_t hal_soc_hdl, + uint8_t *rx_tlv_hdr) +{ + uint8_t decap_format; + + if (hal_rx_desc_is_first_msdu(hal_soc_hdl, rx_tlv_hdr)) { + decap_format = HAL_RX_DESC_GET_DECAP_FORMAT(rx_tlv_hdr); + if (decap_format == HAL_HW_RX_DECAP_FORMAT_RAW) + return true; + } + + return false; +} + +/** + * hal_rx_msdu_fse_metadata_get: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static inline uint32_t +hal_rx_msdu_fse_metadata_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_fse_metadata_get(buf); +} + +/** + * hal_rx_msdu_flow_idx_get: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t +hal_rx_msdu_flow_idx_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_flow_idx_get(buf); +} + +/** + * hal_rx_msdu_get_reo_destination_indication: API to get reo + * destination index from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @reo_destination_indication: pointer to return value of + * reo_destination_indication + * + * Return: reo_destination_indication value from MSDU END TLV + */ +static inline void +hal_rx_msdu_get_reo_destination_indication(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, + uint32_t *reo_destination_indication) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if ((!hal_soc) || (!hal_soc->ops)) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + hal_soc->ops->hal_rx_msdu_get_reo_destination_indication(buf, + reo_destination_indication); +} + +/** + * hal_rx_msdu_flow_idx_timeout: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static inline bool +hal_rx_msdu_flow_idx_timeout(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_flow_idx_timeout(buf); +} + +/** + * hal_rx_msdu_flow_idx_invalid: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static inline bool +hal_rx_msdu_flow_idx_invalid(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_flow_idx_invalid(buf); +} + +/** + * hal_rx_hw_desc_get_ppduid_get() - Retrieve ppdu id + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static inline +uint32_t hal_rx_hw_desc_get_ppduid_get(hal_soc_handle_t hal_soc_hdl, + void *hw_desc_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_hw_desc_get_ppduid_get(hw_desc_addr); +} + +/** + * hal_rx_msdu_end_sa_sw_peer_id_get() - get sw peer id + * @hal_soc_hdl: hal_soc handle + * @buf: rx tlv address + * + * Return: sw peer id + */ +static inline +uint32_t hal_rx_msdu_end_sa_sw_peer_id_get(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if ((!hal_soc) || (!hal_soc->ops)) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (hal_soc->ops->hal_rx_msdu_end_sa_sw_peer_id_get) + return hal_soc->ops->hal_rx_msdu_end_sa_sw_peer_id_get(buf); + + return QDF_STATUS_E_INVAL; +} + +static inline +void *hal_rx_msdu0_buffer_addr_lsb(hal_soc_handle_t hal_soc_hdl, + void *link_desc_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu0_buffer_addr_lsb(link_desc_addr); +} + +static inline +void *hal_rx_msdu_desc_info_ptr_get(hal_soc_handle_t hal_soc_hdl, + void *msdu_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_desc_info_ptr_get(msdu_addr); +} + +static inline +void *hal_ent_mpdu_desc_info(hal_soc_handle_t hal_soc_hdl, + void *hw_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_ent_mpdu_desc_info(hw_addr); +} + +static inline +void *hal_dst_mpdu_desc_info(hal_soc_handle_t hal_soc_hdl, + void *hw_addr) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_dst_mpdu_desc_info(hw_addr); +} + +static inline +uint8_t hal_rx_get_fc_valid(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_fc_valid(buf); +} + +static inline +uint8_t hal_rx_get_to_ds_flag(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_to_ds_flag(buf); +} + +static inline +uint8_t hal_rx_get_mac_addr2_valid(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_mac_addr2_valid(buf); +} + +static inline +uint8_t hal_rx_get_filter_category(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_filter_category(buf); +} + +static inline +uint32_t hal_rx_get_ppdu_id(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_ppdu_id(buf); +} + +/** + * hal_reo_config(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static inline +void hal_reo_config(struct hal_soc *hal_soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + hal_soc->ops->hal_reo_config(hal_soc, + reg_val, + reo_params); +} + +/** + * hal_rx_msdu_get_flow_params: API to get flow index, + * flow index invalid and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if ((!hal_soc) || (!hal_soc->ops)) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_rx_msdu_get_flow_params) + hal_soc->ops-> + hal_rx_msdu_get_flow_params(buf, + flow_invalid, + flow_timeout, + flow_index); +} + +static inline +uint16_t hal_rx_tlv_get_tcp_chksum(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_tlv_get_tcp_chksum(buf); +} + +static inline +uint16_t hal_rx_get_rx_sequence(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_get_rx_sequence(buf); +} + +static inline void +hal_rx_get_bb_info(hal_soc_handle_t hal_soc_hdl, + void *rx_tlv, + void *ppdu_info) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (hal_soc->ops->hal_rx_get_bb_info) + hal_soc->ops->hal_rx_get_bb_info(rx_tlv, ppdu_info); +} + +static inline void +hal_rx_get_rtt_info(hal_soc_handle_t hal_soc_hdl, + void *rx_tlv, + void *ppdu_info) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (hal_soc->ops->hal_rx_get_rtt_info) + hal_soc->ops->hal_rx_get_rtt_info(rx_tlv, ppdu_info); +} + +/** + * hal_rx_msdu_metadata_get(): API to get the + * fast path information from rx_msdu_end TLV + * + * @ hal_soc_hdl: DP soc handle + * @ buf: pointer to the start of RX PKT TLV headers + * @ msdu_metadata: Structure to hold msdu end information + * Return: none + */ +static inline void +hal_rx_msdu_metadata_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf, + struct hal_rx_msdu_metadata *msdu_md) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_rx_msdu_packet_metadata_get(buf, msdu_md); +} + +/** + * hal_rx_get_fisa_cumulative_l4_checksum: API to get cumulative_l4_checksum + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cumulative_l4_checksum + */ +static inline uint16_t +hal_rx_get_fisa_cumulative_l4_checksum(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (!hal_soc->ops->hal_rx_get_fisa_cumulative_l4_checksum) + return 0; + + return hal_soc->ops->hal_rx_get_fisa_cumulative_l4_checksum(buf); +} + +/** + * hal_rx_get_fisa_cumulative_ip_length: API to get cumulative_ip_length + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cumulative_ip_length + */ +static inline uint16_t +hal_rx_get_fisa_cumulative_ip_length(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (hal_soc->ops->hal_rx_get_fisa_cumulative_ip_length) + return hal_soc->ops->hal_rx_get_fisa_cumulative_ip_length(buf); + + return 0; +} + +/** + * hal_rx_get_udp_proto: API to get UDP proto field + * from rx_msdu_start TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: UDP proto field value + */ +static inline bool +hal_rx_get_udp_proto(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (hal_soc->ops->hal_rx_get_udp_proto) + return hal_soc->ops->hal_rx_get_udp_proto(buf); + + return 0; +} + +/** + * hal_rx_get_fisa_flow_agg_continuation: API to get fisa flow_agg_continuation + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow_agg_continuation bit field value + */ +static inline bool +hal_rx_get_fisa_flow_agg_continuation(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (hal_soc->ops->hal_rx_get_fisa_flow_agg_continuation) + return hal_soc->ops->hal_rx_get_fisa_flow_agg_continuation(buf); + + return 0; +} + +/** + * hal_rx_get_fisa_flow_agg_count: API to get fisa flow_agg count from + * rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow_agg count value + */ +static inline uint8_t +hal_rx_get_fisa_flow_agg_count(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (hal_soc->ops->hal_rx_get_fisa_flow_agg_count) + return hal_soc->ops->hal_rx_get_fisa_flow_agg_count(buf); + + return 0; +} + +/** + * hal_rx_get_fisa_timeout: API to get fisa time out from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fisa flow_agg timeout bit value + */ +static inline bool +hal_rx_get_fisa_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (hal_soc->ops->hal_rx_get_fisa_timeout) + return hal_soc->ops->hal_rx_get_fisa_timeout(buf); + + return 0; +} + +/** + * hal_rx_buffer_addr_info_get_paddr(): get paddr/sw_cookie from + * structure + * @buf_addr_info: pointer to structure + * @buf_info: structure to return the buffer information including + * paddr/cookie + * + * return: None + */ +static inline +void hal_rx_buffer_addr_info_get_paddr(void *buf_addr_info, + struct hal_buf_info *buf_info) +{ + buf_info->paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); +} + +/** + * hal_rx_get_next_msdu_link_desc_buf_addr_info(): get next msdu link desc + * buffer addr info + * @link_desc_va: pointer to current msdu link Desc + * @next_addr_info: buffer to save next msdu link Desc buffer addr info + * + * return: None + */ +static inline +void hal_rx_get_next_msdu_link_desc_buf_addr_info( + void *link_desc_va, + struct buffer_addr_info *next_addr_info) +{ + struct rx_msdu_link *msdu_link = link_desc_va; + + if (!msdu_link) { + qdf_mem_zero(next_addr_info, + sizeof(struct buffer_addr_info)); + return; + } + + *next_addr_info = msdu_link->next_msdu_link_desc_addr_info; +} + +/** + * hal_rx_is_buf_addr_info_valid(): check is the buf_addr_info valid + * + * @buf_addr_info: pointer to buf_addr_info structure + * + * return: true: has valid paddr, false: not. + */ +static inline +bool hal_rx_is_buf_addr_info_valid( + struct buffer_addr_info *buf_addr_info) +{ + return (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) == 0) ? + false : true; +} + +#define HAL_RX_ATTN_MSDU_LEN_ERR_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_MSDU_LENGTH_ERR_OFFSET)), \ + RX_ATTENTION_1_MSDU_LENGTH_ERR_MASK, \ + RX_ATTENTION_1_MSDU_LENGTH_ERR_LSB)) + +/** + * hal_rx_attn_msdu_len_err_get(): Get msdu_len_err value from + * rx attention tlvs + * @buf: pointer to rx pkt tlvs hdr + * + * Return: msdu_len_err value + */ +static inline uint32_t +hal_rx_attn_msdu_len_err_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + + return HAL_RX_ATTN_MSDU_LEN_ERR_GET(rx_attn); +} +#endif /* _HAL_RX_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx_flow.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx_flow.c new file mode 100644 index 0000000000000000000000000000000000000000..98d0923ea48c584612df6b89e3412eb9a69b3cbb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx_flow.c @@ -0,0 +1,774 @@ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" +#include "dp_types.h" +#include "hal_rx_flow.h" + +#if defined(WLAN_SUPPORT_RX_FISA) +void hal_rx_dump_fse_table(struct hal_rx_fst *fst) +{ + int i = 0; + struct rx_flow_search_entry *fse = + (struct rx_flow_search_entry *)fst->base_vaddr; + + dp_info("Number flow table entries %d", fst->add_flow_count); + for (i = 0; i < fst->max_entries; i++) { + if (fse[i].valid) { + dp_info("index %d:" + " src_ip_127_96 0x%x" + " src_ip_95_640 0x%x" + " src_ip_63_32 0x%x" + " src_ip_31_0 0x%x" + " dest_ip_127_96 0x%x" + " dest_ip_95_64 0x%x" + " dest_ip_63_32 0x%x" + " dest_ip_31_0 0x%x" + " src_port 0x%x" + " dest_port 0x%x" + " l4_protocol 0x%x" + " valid 0x%x" + " reo_destination_indication 0x%x" + " msdu_drop 0x%x" + " reo_destination_handler 0x%x" + " metadata 0x%x" + " aggregation_count0x%x" + " lro_eligible 0x%x" + " msdu_count 0x%x" + " msdu_byte_count 0x%x" + " timestamp 0x%x" + " cumulative_l4_checksum 0x%x" + " cumulative_ip_length 0x%x" + " tcp_sequence_number 0x%x", + i, + fse[i].src_ip_127_96, + fse[i].src_ip_95_64, + fse[i].src_ip_63_32, + fse[i].src_ip_31_0, + fse[i].dest_ip_127_96, + fse[i].dest_ip_95_64, + fse[i].dest_ip_63_32, + fse[i].dest_ip_31_0, + fse[i].src_port, + fse[i].dest_port, + fse[i].l4_protocol, + fse[i].valid, + fse[i].reo_destination_indication, + fse[i].msdu_drop, + fse[i].reo_destination_handler, + fse[i].metadata, + fse[i].aggregation_count, + fse[i].lro_eligible, + fse[i].msdu_count, + fse[i].msdu_byte_count, + fse[i].timestamp, + fse[i].cumulative_l4_checksum, + fse[i].cumulative_ip_length, + fse[i].tcp_sequence_number); + } + } +} +#else +void hal_rx_dump_fse_table(struct hal_rx_fst *fst) +{ +} +#endif + +/** + * hal_rx_flow_setup_fse() - Setup a flow search entry in HW FST + * @fst: Pointer to the Rx Flow Search Table + * @table_offset: offset into the table where the flow is to be setup + * @flow: Flow Parameters + * + * Return: Success/Failure + */ +#ifdef WLAN_SUPPORT_RX_FLOW_TAG +void * +hal_rx_flow_setup_fse(struct hal_rx_fst *fst, uint32_t table_offset, + struct hal_rx_flow *flow) +{ + uint8_t *fse; + bool fse_valid; + + if (table_offset >= fst->max_entries) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "HAL FSE table offset %u exceeds max entries %u", + table_offset, fst->max_entries); + return NULL; + } + + fse = (uint8_t *)fst->base_vaddr + + (table_offset * HAL_RX_FST_ENTRY_SIZE); + + fse_valid = HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID); + + if (fse_valid) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "HAL FSE %pK already valid", fse); + return NULL; + } + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96, + qdf_htonl(flow->tuple_info.src_ip_127_96)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64, + qdf_htonl(flow->tuple_info.src_ip_95_64)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32, + qdf_htonl(flow->tuple_info.src_ip_63_32)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0, + qdf_htonl(flow->tuple_info.src_ip_31_0)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96, + qdf_htonl(flow->tuple_info.dest_ip_127_96)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64, + qdf_htonl(flow->tuple_info.dest_ip_95_64)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32, + qdf_htonl(flow->tuple_info.dest_ip_63_32)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0, + qdf_htonl(flow->tuple_info.dest_ip_31_0)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, DEST_PORT, + (flow->tuple_info.dest_port)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, SRC_PORT, + (flow->tuple_info.src_port)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL, + flow->tuple_info.l4_protocol); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER, + flow->reo_destination_handler); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, VALID, 1); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_10, METADATA, + flow->fse_metadata); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, REO_DESTINATION_INDICATION); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, REO_DESTINATION_INDICATION) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_11, + REO_DESTINATION_INDICATION, + flow->reo_destination_indication); + + /* Reset all the other fields in FSE */ + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, RESERVED_9); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_DROP); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, RESERVED_11); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_COUNT); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_12, MSDU_BYTE_COUNT); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_13, TIMESTAMP); + + return fse; +} +#elif defined(WLAN_SUPPORT_RX_FISA) +/** + * hal_rx_flow_setup_fse() - Setup a flow search entry in HW FST + * @fst: Pointer to the Rx Flow Search Table + * @table_offset: offset into the table where the flow is to be setup + * @flow: Flow Parameters + * + * Flow table entry fields are updated in host byte order, little endian order. + * + * Return: Success/Failure + */ +void * +hal_rx_flow_setup_fse(struct hal_rx_fst *fst, uint32_t table_offset, + struct hal_rx_flow *flow) +{ + uint8_t *fse; + bool fse_valid; + + if (table_offset >= fst->max_entries) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "HAL FSE table offset %u exceeds max entries %u", + table_offset, fst->max_entries); + return NULL; + } + + fse = (uint8_t *)fst->base_vaddr + + (table_offset * HAL_RX_FST_ENTRY_SIZE); + + fse_valid = HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID); + + if (fse_valid) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "HAL FSE %pK already valid", fse); + return NULL; + } + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96, + (flow->tuple_info.src_ip_127_96)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64, + (flow->tuple_info.src_ip_95_64)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32, + (flow->tuple_info.src_ip_63_32)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0, + (flow->tuple_info.src_ip_31_0)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96, + (flow->tuple_info.dest_ip_127_96)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64, + (flow->tuple_info.dest_ip_95_64)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32, + (flow->tuple_info.dest_ip_63_32)); + + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0, + (flow->tuple_info.dest_ip_31_0)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, DEST_PORT, + (flow->tuple_info.dest_port)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, SRC_PORT, + (flow->tuple_info.src_port)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL, + flow->tuple_info.l4_protocol); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER, + flow->reo_destination_handler); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, VALID, 1); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA) = + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_10, METADATA, + (flow->fse_metadata)); + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_INDICATION); + HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_INDICATION) |= + HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, + REO_DESTINATION_INDICATION, + flow->reo_destination_indication); + + /* Reset all the other fields in FSE */ + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, RESERVED_9); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, MSDU_DROP); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_COUNT); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_12, MSDU_BYTE_COUNT); + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_13, TIMESTAMP); + + return fse; +} +#endif /* WLAN_SUPPORT_RX_FISA */ +qdf_export_symbol(hal_rx_flow_setup_fse); + +/** + * hal_rx_flow_delete_entry() - Delete a flow from the Rx Flow Search Table + * @fst: Pointer to the Rx Flow Search Table + * @hal_rx_fse: Pointer to the Rx Flow that is to be deleted from the FST + * + * Return: Success/Failure + */ +inline QDF_STATUS +hal_rx_flow_delete_entry(struct hal_rx_fst *fst, void *hal_rx_fse) +{ + uint8_t *fse = (uint8_t *)hal_rx_fse; + + if (!HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID)) + return QDF_STATUS_E_NOENT; + + HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hal_rx_flow_delete_entry); + +#ifndef WLAN_SUPPORT_RX_FISA +/** + * hal_rx_fst_key_configure() - Configure the Toeplitz key in the FST + * @fst: Pointer to the Rx Flow Search Table + * + * Return: Success/Failure + */ +static void hal_rx_fst_key_configure(struct hal_rx_fst *fst) +{ + uint8_t key_bytes[HAL_FST_HASH_KEY_SIZE_BYTES]; + + qdf_mem_copy(key_bytes, fst->key, HAL_FST_HASH_KEY_SIZE_BYTES); + + /** + * The Toeplitz algorithm as per the Microsoft spec works in a + * “big-endian†manner, using the MSBs of the key to hash the + * initial bytes of the input going on to use up the lower order bits + * of the key to hash further bytes of the input until the LSBs of the + * key are used finally. + * + * So first, rightshift 320-bit input key 5 times to get 315 MS bits + */ + key_bitwise_shift_left(key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES, 5); + key_reverse(fst->shifted_key, key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES); +} +#else +static void hal_rx_fst_key_configure(struct hal_rx_fst *fst) +{ +} +#endif + +/** + * hal_rx_fst_get_base() - Retrieve the virtual base address of the Rx FST + * @fst: Pointer to the Rx Flow Search Table + * + * Return: Success/Failure + */ +static inline void *hal_rx_fst_get_base(struct hal_rx_fst *fst) +{ + return fst->base_vaddr; +} + +/** + * hal_rx_fst_get_fse_size() - Retrieve the size of each entry(flow) in Rx FST + * + * Return: size of each entry/flow in Rx FST + */ +static inline uint32_t hal_rx_fst_get_fse_size(void) +{ + return HAL_RX_FST_ENTRY_SIZE; +} + +/** + * hal_rx_flow_get_tuple_info() - Retrieve the 5-tuple flow info for an entry + * @hal_fse: Pointer to the Flow in Rx FST + * @tuple_info: 5-tuple info of the flow returned to the caller + * + * Return: Success/Failure + */ +QDF_STATUS hal_rx_flow_get_tuple_info(void *hal_fse, + struct hal_flow_tuple_info *tuple_info) +{ + if (!hal_fse || !tuple_info) + return QDF_STATUS_E_INVAL; + + if (!HAL_GET_FLD(hal_fse, RX_FLOW_SEARCH_ENTRY_9, VALID)) + return QDF_STATUS_E_NOENT; + + tuple_info->src_ip_127_96 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_0, + SRC_IP_127_96)); + tuple_info->src_ip_95_64 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_1, + SRC_IP_95_64)); + tuple_info->src_ip_63_32 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_2, + SRC_IP_63_32)); + tuple_info->src_ip_31_0 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_3, + SRC_IP_31_0)); + tuple_info->dest_ip_127_96 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_4, + DEST_IP_127_96)); + tuple_info->dest_ip_95_64 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_5, + DEST_IP_95_64)); + tuple_info->dest_ip_63_32 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_6, + DEST_IP_63_32)); + tuple_info->dest_ip_31_0 = + qdf_ntohl(HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_7, + DEST_IP_31_0)); + tuple_info->dest_port = HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_8, + DEST_PORT); + tuple_info->src_port = HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_8, + SRC_PORT); + tuple_info->l4_protocol = HAL_GET_FLD(hal_fse, + RX_FLOW_SEARCH_ENTRY_9, + L4_PROTOCOL); + + return QDF_STATUS_SUCCESS; +} + +#ifndef WLAN_SUPPORT_RX_FISA +/** + * hal_flow_toeplitz_create_cache() - Calculate hashes for each possible + * byte value with the key taken as is + * + * @fst: FST Handle + * @key: Hash Key + * + * Return: Success/Failure + */ +static void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst) +{ + int bit; + int val; + int i; + uint8_t *key = fst->shifted_key; + + /* + * Initialise to first 32 bits of the key; shift in further key material + * through the loop + */ + uint32_t cur_key = (key[0] << 24) | (key[1] << 16) | (key[2] << 8) | + key[3]; + + for (i = 0; i < HAL_FST_HASH_KEY_SIZE_BYTES; i++) { + uint8_t new_key_byte; + uint32_t shifted_key[8]; + + if (i + 4 < HAL_FST_HASH_KEY_SIZE_BYTES) + new_key_byte = key[i + 4]; + else + new_key_byte = 0; + + shifted_key[0] = cur_key; + + for (bit = 1; bit < 8; bit++) { + /* + * For each iteration, shift out one more bit of the + * current key and shift in one more bit of the new key + * material + */ + shifted_key[bit] = cur_key << bit | + new_key_byte >> (8 - bit); + } + + for (val = 0; val < (1 << 8); val++) { + uint32_t hash = 0; + int mask; + + /* + * For each bit set in the input, XOR in + * the appropriately shifted key + */ + for (bit = 0, mask = 1 << 7; bit < 8; bit++, mask >>= 1) + if ((val & mask)) + hash ^= shifted_key[bit]; + + fst->key_cache[i][val] = hash; + } + + cur_key = cur_key << 8 | new_key_byte; + } +} +#else +static void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst) +{ +} +#endif + +/** + * hal_rx_fst_attach() - Initialize Rx flow search table in HW FST + * + * @qdf_dev: QDF device handle + * @hal_fst_base_paddr: Pointer to the physical base address of the Rx FST + * @max_entries: Max number of flows allowed in the FST + * @max_search: Number of collisions allowed in the hash-based FST + * @hash_key: Toeplitz key used for the hash FST + * + * Return: + */ +struct hal_rx_fst * +hal_rx_fst_attach(qdf_device_t qdf_dev, + uint64_t *hal_fst_base_paddr, uint16_t max_entries, + uint16_t max_search, uint8_t *hash_key) +{ + struct hal_rx_fst *fst = qdf_mem_malloc(sizeof(struct hal_rx_fst)); + + if (!fst) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("hal fst allocation failed,")); + return NULL; + } + + qdf_mem_set(fst, 0, sizeof(struct hal_rx_fst)); + + fst->key = hash_key; + fst->max_skid_length = max_search; + fst->max_entries = max_entries; + fst->hash_mask = max_entries - 1; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "HAL FST allocation %pK %d * %d\n", fst, + fst->max_entries, HAL_RX_FST_ENTRY_SIZE); + + fst->base_vaddr = (uint8_t *)qdf_mem_alloc_consistent(qdf_dev, + qdf_dev->dev, + (fst->max_entries * HAL_RX_FST_ENTRY_SIZE), + &fst->base_paddr); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "hal_rx_fst base address 0x%pK", (void *)fst->base_paddr); + if (!fst->base_vaddr) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("hal fst->base_vaddr allocation failed")); + qdf_mem_free(fst); + return NULL; + } + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, + (void *)fst->key, HAL_FST_HASH_KEY_SIZE_BYTES); + + qdf_mem_set((uint8_t *)fst->base_vaddr, 0, + (fst->max_entries * HAL_RX_FST_ENTRY_SIZE)); + + hal_rx_fst_key_configure(fst); + hal_flow_toeplitz_create_cache(fst); + *hal_fst_base_paddr = (uint64_t)fst->base_paddr; + return fst; +} +qdf_export_symbol(hal_rx_fst_attach); + +/** + * hal_rx_fst_detach() - De-init the Rx flow search table from HW + * + * @rx_fst: Pointer to the Rx FST + * @qdf_dev: QDF device handle + * + * Return: + */ +void hal_rx_fst_detach(struct hal_rx_fst *rx_fst, + qdf_device_t qdf_dev) +{ + if (!rx_fst || !qdf_dev) + return; + + qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, + rx_fst->max_entries * HAL_RX_FST_ENTRY_SIZE, + rx_fst->base_vaddr, rx_fst->base_paddr, 0); + + qdf_mem_free(rx_fst); +} +qdf_export_symbol(hal_rx_fst_detach); + +#ifndef WLAN_SUPPORT_RX_FISA +/** + * hal_flow_toeplitz_hash() - Calculate Toeplitz hash by using the cached key + * + * @hal_fst: FST Handle + * @flow: Flow Parameters + * + * Return: Success/Failure + */ +uint32_t +hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow) +{ + int i, j; + uint32_t hash = 0; + struct hal_rx_fst *fst = (struct hal_rx_fst *)hal_fst; + uint32_t input[HAL_FST_HASH_KEY_SIZE_WORDS]; + uint8_t *tuple; + + qdf_mem_zero(input, HAL_FST_HASH_KEY_SIZE_BYTES); + *(uint32_t *)&input[0] = qdf_htonl(flow->tuple_info.src_ip_127_96); + *(uint32_t *)&input[1] = qdf_htonl(flow->tuple_info.src_ip_95_64); + *(uint32_t *)&input[2] = qdf_htonl(flow->tuple_info.src_ip_63_32); + *(uint32_t *)&input[3] = qdf_htonl(flow->tuple_info.src_ip_31_0); + *(uint32_t *)&input[4] = qdf_htonl(flow->tuple_info.dest_ip_127_96); + *(uint32_t *)&input[5] = qdf_htonl(flow->tuple_info.dest_ip_95_64); + *(uint32_t *)&input[6] = qdf_htonl(flow->tuple_info.dest_ip_63_32); + *(uint32_t *)&input[7] = qdf_htonl(flow->tuple_info.dest_ip_31_0); + *(uint32_t *)&input[8] = (flow->tuple_info.dest_port << 16) | + (flow->tuple_info.src_port); + *(uint32_t *)&input[9] = flow->tuple_info.l4_protocol; + + tuple = (uint8_t *)input; + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + tuple, sizeof(input)); + for (i = 0, j = HAL_FST_HASH_DATA_SIZE - 1; + i < HAL_FST_HASH_KEY_SIZE_BYTES && j >= 0; i++, j--) { + hash ^= fst->key_cache[i][tuple[j]]; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "Hash value %u %u truncated hash %u\n", hash, + (hash >> 12), (hash >> 12) % (fst->max_entries)); + + hash >>= 12; + hash &= (fst->max_entries - 1); + + return hash; +} +#else +uint32_t +hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow) +{ + return 0; +} +#endif +qdf_export_symbol(hal_flow_toeplitz_hash); + +/** + * hal_rx_get_hal_hash() - Retrieve hash index of a flow in the FST table + * + * @hal_fst: HAL Rx FST Handle + * @flow_hash: Flow hash computed from flow tuple + * + * Return: hash index truncated to the size of the hash table + */ +uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash) +{ + uint32_t trunc_hash = flow_hash; + + /* Take care of hash wrap around scenario */ + if (flow_hash >= hal_fst->max_entries) + trunc_hash &= hal_fst->hash_mask; + return trunc_hash; +} +qdf_export_symbol(hal_rx_get_hal_hash); + +/** + * hal_rx_insert_flow_entry() - Add a flow into the FST table + * + * @hal_fst: HAL Rx FST Handle + * @flow_hash: Flow hash computed from flow tuple + * @flow_tuple_info: Flow tuple used to compute the hash + * @flow_index: Hash index of the flow in the table when inserted successfully + * + * Return: Success if flow is inserted into the table, error otherwise + */ +QDF_STATUS +hal_rx_insert_flow_entry(struct hal_rx_fst *fst, uint32_t flow_hash, + void *flow_tuple_info, uint32_t *flow_idx) +{ + int i; + void *hal_fse = NULL; + uint32_t hal_hash = 0; + struct hal_flow_tuple_info hal_tuple_info = { 0 }; + QDF_STATUS status; + + for (i = 0; i < fst->max_skid_length; i++) { + hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i)); + hal_fse = (uint8_t *)fst->base_vaddr + + (hal_hash * HAL_RX_FST_ENTRY_SIZE); + status = hal_rx_flow_get_tuple_info(hal_fse, &hal_tuple_info); + if (status == QDF_STATUS_E_NOENT) + break; + + /* Find the matching flow entry in HW FST */ + if (!qdf_mem_cmp(&hal_tuple_info, + flow_tuple_info, + sizeof(struct hal_flow_tuple_info))) { + dp_err("Duplicate flow entry in FST %u at skid %u ", + hal_hash, i); + return QDF_STATUS_E_EXISTS; + } + } + if (i == fst->max_skid_length) { + dp_err("Max skid length reached for hash %u", flow_hash); + return QDF_STATUS_E_RANGE; + } + *flow_idx = hal_hash; + dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d", + flow_hash, i, hal_fse, *flow_idx); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hal_rx_insert_flow_entry); + +/** + * hal_rx_find_flow_from_tuple() - Find a flow in the FST table + * + * @fst: HAL Rx FST Handle + * @flow_hash: Flow hash computed from flow tuple + * @flow_tuple_info: Flow tuple used to compute the hash + * @flow_index: Hash index of the flow in the table when found + * + * Return: Success if matching flow is found in the table, error otherwise + */ +QDF_STATUS +hal_rx_find_flow_from_tuple(struct hal_rx_fst *fst, uint32_t flow_hash, + void *flow_tuple_info, uint32_t *flow_idx) +{ + int i; + void *hal_fse = NULL; + uint32_t hal_hash = 0; + struct hal_flow_tuple_info hal_tuple_info = { 0 }; + QDF_STATUS status; + + for (i = 0; i < fst->max_skid_length; i++) { + hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i)); + hal_fse = (uint8_t *)fst->base_vaddr + + (hal_hash * HAL_RX_FST_ENTRY_SIZE); + status = hal_rx_flow_get_tuple_info(hal_fse, &hal_tuple_info); + if (status != QDF_STATUS_SUCCESS) + continue; + + /* Find the matching flow entry in HW FST */ + if (!qdf_mem_cmp(&hal_tuple_info, + flow_tuple_info, + sizeof(struct hal_flow_tuple_info))) { + break; + } + } + + if (i == fst->max_skid_length) { + dp_err("Max skid length reached for hash %u", flow_hash); + return QDF_STATUS_E_RANGE; + } + + *flow_idx = hal_hash; + dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d", + flow_hash, i, hal_fse, *flow_idx); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hal_rx_find_flow_from_tuple); diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx_flow.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..6101034e74be5946310620184def12858572dd3c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx_flow.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __HAL_RX_FLOW_H +#define __HAL_RX_FLOW_H + +#include "hal_flow.h" +#include "wlan_cfg.h" +#include "hal_api.h" +#include "qdf_mem.h" +#include "rx_flow_search_entry.h" + +#define HAL_FST_HASH_KEY_SIZE_BITS 315 +#define HAL_FST_HASH_KEY_SIZE_BYTES 40 +#define HAL_FST_HASH_KEY_SIZE_WORDS 10 +#define HAL_FST_HASH_DATA_SIZE 37 +#define HAL_FST_HASH_MASK 0x7ffff +#define HAL_RX_FST_ENTRY_SIZE (NUM_OF_DWORDS_RX_FLOW_SEARCH_ENTRY * 4) + +/** + * Four possible options for IP SA/DA prefix, currently use 0x0 which + * maps to type 2 in HW spec + */ +#define HAL_FST_IP_DA_SA_PFX_TYPE_IPV4_COMPATIBLE_IPV6 2 + +#define HAL_IP_DA_SA_PREFIX_IPV4_COMPATIBLE_IPV6 0x0 + +/** + * REO destination indication is a lower 4-bits of hash value + * This should match the REO destination used in Rx hash based routing. + */ +#define HAL_REO_DEST_IND_HASH_MASK 0xF + +/** + * REO destinations are valid from 16-31 for Hawkeye + * and 0-15 are not setup for SW + */ +#define HAL_REO_DEST_IND_START_OFFSET 0x10 + +/** + * struct hal_rx_flow - Rx Flow parameters to be sent to HW + * @tuple_info: Rx Flow 5-tuple (src & dest IP, src & dest ports, L4 protocol) + * @reo_destination_handler: REO destination for this flow + * @reo_destination_indication: REO indication for this flow + * @fse_metadata: Flow metadata or tag passed to HW for marking packets + */ +struct hal_rx_flow { + struct hal_flow_tuple_info tuple_info; + uint8_t reo_destination_handler; + uint8_t reo_destination_indication; + uint32_t fse_metadata; +}; + +/** + * enum hal_rx_fse_reo_destination_handler + * @HAL_RX_FSE_REO_DEST_FT: Use this entry's destination indication + * @HAL_RX_FSE_REO_DEST_ASPT: Use Address Search + Peer Table's entry + * @HAL_RX_FSE_REO_DEST_FT2: Use FT2's destination indication + * @HAL_RX_FSE_REO_DEST_CCE: Use CCE's destination indication for this entry + */ +enum hal_rx_fse_reo_destination_handler { + HAL_RX_FSE_REO_DEST_FT = 0, + HAL_RX_FSE_REO_DEST_ASPT = 1, + HAL_RX_FSE_REO_DEST_FT2 = 2, + HAL_RX_FSE_REO_DEST_CCE = 3, +}; + +/** + * struct hal_rx_fst - HAL RX Flow search table context + * @base_vaddr: Virtual Base address of HW FST + * @base_paddr: Physical Base address of HW FST + * @key: Pointer to 320-bit Key read from cfg + * @shifted_key: Pointer to left-shifted 320-bit Key used for Toeplitz Hash + * @max_entries : Max number of entries in flow searchh table + * @max_skid_length : Max search length if there is hash collision + * @hash_mask: Hash mask to apply to index into FST + * @key_cache: Toepliz Key Cache configured key + */ +struct hal_rx_fst { + uint8_t *base_vaddr; + qdf_dma_addr_t base_paddr; + uint8_t *key; +#ifndef WLAN_SUPPORT_RX_FISA + uint8_t shifted_key[HAL_FST_HASH_KEY_SIZE_BYTES]; + uint32_t key_cache[HAL_FST_HASH_KEY_SIZE_BYTES][1 << 8]; +#endif + uint16_t max_entries; + uint16_t max_skid_length; + uint16_t hash_mask; + uint32_t add_flow_count; + uint32_t del_flow_count; +}; + +/** + * hal_rx_flow_setup_fse() - Setup a flow search entry in HW FST + * @fst: Pointer to the Rx Flow Search Table + * @table_offset: offset into the table where the flow is to be setup + * @flow: Flow Parameters + * + * Return: Success/Failure + */ +void *hal_rx_flow_setup_fse(struct hal_rx_fst *fst, + uint32_t table_offset, + struct hal_rx_flow *flow); + +/** + * hal_rx_flow_delete_entry() - Delete a flow from the Rx Flow Search Table + * @fst: Pointer to the Rx Flow Search Table + * @hal_rx_fse: Pointer to the Rx Flow that is to be deleted from the FST + * + * Return: Success/Failure + */ +QDF_STATUS +hal_rx_flow_delete_entry(struct hal_rx_fst *fst, void *hal_rx_fse); + +/** + * hal_rx_flow_get_tuple_info() - Retrieve the 5-tuple flow info for an entry + * @hal_fse: Pointer to the Flow in Rx FST + * @tuple_info: 5-tuple info of the flow returned to the caller + * + * Return: Success/Failure + */ +QDF_STATUS hal_rx_flow_get_tuple_info(void *hal_fse, + struct hal_flow_tuple_info *tuple_info); + +/** + * hal_rx_fst_attach() - Initialize Rx flow search table in HW FST + * + * @qdf_dev: QDF device handle + * @hal_fst_base_paddr: Pointer to the physical base address of the Rx FST + * @max_entries: Max number of flows allowed in the FST + * @max_search: Number of collisions allowed in the hash-based FST + * @hash_key: Toeplitz key used for the hash FST + * + * Return: + */ +struct hal_rx_fst * +hal_rx_fst_attach(qdf_device_t qdf_dev, + uint64_t *hal_fst_base_paddr, uint16_t max_entries, + uint16_t max_search, uint8_t *hash_key); + +/** + * hal_rx_fst_detach() - De-init the Rx flow search table from HW + * + * @rx_fst: Pointer to the Rx FST + * @qdf_dev: QDF device handle + * + * Return: + */ +void hal_rx_fst_detach(struct hal_rx_fst *rx_fst, qdf_device_t qdf_dev); + +/** + * hal_rx_insert_flow_entry() - Add a flow into the FST table + * + * @hal_fst: HAL Rx FST Handle + * @flow_hash: Flow hash computed from flow tuple + * @flow_tuple_info: Flow tuple used to compute the hash + * @flow_index: Hash index of the flow in the table when inserted successfully + * + * Return: Success if flow is inserted into the table, error otherwise + */ +QDF_STATUS +hal_rx_insert_flow_entry(struct hal_rx_fst *fst, uint32_t flow_hash, + void *flow_tuple_info, uint32_t *flow_idx); + +/** + * hal_rx_find_flow_from_tuple() - Find a flow in the FST table + * + * @fst: HAL Rx FST Handle + * @flow_hash: Flow hash computed from flow tuple + * @flow_tuple_info: Flow tuple used to compute the hash + * @flow_index: Hash index of the flow in the table when found + * + * Return: Success if matching flow is found in the table, error otherwise + */ +QDF_STATUS +hal_rx_find_flow_from_tuple(struct hal_rx_fst *fst, uint32_t flow_hash, + void *flow_tuple_info, uint32_t *flow_idx); + +/** + * hal_rx_get_hal_hash() - Retrieve hash index of a flow in the FST table + * + * @hal_fst: HAL Rx FST Handle + * @flow_hash: Flow hash computed from flow tuple + * + * Return: hash index truncated to the size of the hash table + */ +uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash); + +/** + * hal_flow_toeplitz_hash() - Calculate Toeplitz hash by using the cached key + * + * @hal_fst: FST Handle + * @flow: Flow Parameters + * + * Return: Success/Failure + */ +uint32_t +hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow); + +void hal_rx_dump_fse_table(struct hal_rx_fst *fst); +#endif /* HAL_RX_FLOW_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c new file mode 100644 index 0000000000000000000000000000000000000000..1290984f77497e3b2d8696473b3fe2865e58933a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c @@ -0,0 +1,1461 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_hw_headers.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#ifdef QCA_WIFI_QCA8074 +void hal_qca6290_attach(struct hal_soc *hal); +#endif +#ifdef QCA_WIFI_QCA8074 +void hal_qca8074_attach(struct hal_soc *hal); +#endif +#if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) +void hal_qca8074v2_attach(struct hal_soc *hal); +#endif +#ifdef QCA_WIFI_QCA6390 +void hal_qca6390_attach(struct hal_soc *hal); +#endif +#ifdef QCA_WIFI_QCA6490 +void hal_qca6490_attach(struct hal_soc *hal); +#endif +#ifdef QCA_WIFI_QCN9000 +void hal_qcn9000_attach(struct hal_soc *hal); +#endif +#ifdef QCA_WIFI_QCA6750 +void hal_qca6750_attach(struct hal_soc *hal); +#endif + +#ifdef ENABLE_VERBOSE_DEBUG +bool is_hal_verbose_debug_enabled; +#endif + +#ifdef ENABLE_HAL_REG_WR_HISTORY +struct hal_reg_write_fail_history hal_reg_wr_hist; + +void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc, + uint32_t offset, + uint32_t wr_val, uint32_t rd_val) +{ + struct hal_reg_write_fail_entry *record; + int idx; + + idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index, + HAL_REG_WRITE_HIST_SIZE); + + record = &hal_soc->reg_wr_fail_hist->record[idx]; + + record->timestamp = qdf_get_log_timestamp(); + record->reg_offset = offset; + record->write_val = wr_val; + record->read_val = rd_val; +} + +static void hal_reg_write_fail_history_init(struct hal_soc *hal) +{ + hal->reg_wr_fail_hist = &hal_reg_wr_hist; + + qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1); +} +#else +static void hal_reg_write_fail_history_init(struct hal_soc *hal) +{ +} +#endif + +/** + * hal_get_srng_ring_id() - get the ring id of a descriped ring + * @hal: hal_soc data structure + * @ring_type: type enum describing the ring + * @ring_num: which ring of the ring type + * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) + * + * Return: the ring id or -EINVAL if the ring does not exist. + */ +static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, + int ring_num, int mac_id) +{ + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + int ring_id; + + if (ring_num >= ring_config->max_rings) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: ring_num exceeded maximum no. of supported rings", + __func__); + /* TODO: This is a programming error. Assert if this happens */ + return -EINVAL; + } + + if (ring_config->lmac_ring) { + ring_id = ring_config->start_ring_id + ring_num + + (mac_id * HAL_MAX_RINGS_PER_LMAC); + } else { + ring_id = ring_config->start_ring_id + ring_num; + } + + return ring_id; +} + +static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) +{ + /* TODO: Should we allocate srng structures dynamically? */ + return &(hal->srng_list[ring_id]); +} + +#define HP_OFFSET_IN_REG_START 1 +#define OFFSET_FROM_HP_TO_TP 4 +static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc, + int shadow_config_index, + int ring_type, + int ring_num) +{ + struct hal_srng *srng; + int ring_id; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal_soc, ring_type); + + ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); + if (ring_id < 0) + return; + + srng = hal_get_srng(hal_soc, ring_id); + + if (ring_config->ring_dir == HAL_SRNG_DST_RING) { + srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) + + hal_soc->dev_base_addr; + hal_debug("tp_addr=%pK dev base addr %pK index %u", + srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr, + shadow_config_index); + } else { + srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) + + hal_soc->dev_base_addr; + hal_debug("hp_addr=%pK dev base addr %pK index %u", + srng->u.src_ring.hp_addr, + hal_soc->dev_base_addr, shadow_config_index); + } + +} + +#ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE +void hal_set_one_target_reg_config(struct hal_soc *hal, + uint32_t target_reg_offset, + int list_index) +{ + int i = list_index; + + qdf_assert_always(i < MAX_GENERIC_SHADOW_REG); + hal->list_shadow_reg_config[i].target_register = + target_reg_offset; + hal->num_generic_shadow_regs_configured++; +} + +qdf_export_symbol(hal_set_one_target_reg_config); + +#define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4 +#define MAX_REO_REMAP_SHADOW_REGS 4 +QDF_STATUS hal_set_shadow_regs(void *hal_soc) +{ + uint32_t target_reg_offset; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + int i; + struct hal_hw_srng_config *srng_config = + &hal->hw_srng_table[WBM2SW_RELEASE]; + + target_reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + + for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) { + hal_set_one_target_reg_config(hal, target_reg_offset, i); + target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET; + } + + target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START]; + target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START] + * HAL_IPA_TX_COMP_RING_IDX); + + hal_set_one_target_reg_config(hal, target_reg_offset, i); + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(hal_set_shadow_regs); + +QDF_STATUS hal_construct_shadow_regs(void *hal_soc) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + int shadow_config_index = hal->num_shadow_registers_configured; + int i; + int num_regs = hal->num_generic_shadow_regs_configured; + + for (i = 0; i < num_regs; i++) { + qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS); + hal->shadow_config[shadow_config_index].addr = + hal->list_shadow_reg_config[i].target_register; + hal->list_shadow_reg_config[i].shadow_config_index = + shadow_config_index; + hal->list_shadow_reg_config[i].va = + SHADOW_REGISTER(shadow_config_index) + + (uintptr_t)hal->dev_base_addr; + hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x", + hal->shadow_config[shadow_config_index].addr, + SHADOW_REGISTER(shadow_config_index), + shadow_config_index); + shadow_config_index++; + hal->num_shadow_registers_configured++; + } + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(hal_construct_shadow_regs); +#endif + +QDF_STATUS hal_set_one_shadow_config(void *hal_soc, + int ring_type, + int ring_num) +{ + uint32_t target_register; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type]; + int shadow_config_index = hal->num_shadow_registers_configured; + + if (shadow_config_index >= MAX_SHADOW_REGISTERS) { + QDF_ASSERT(0); + return QDF_STATUS_E_RESOURCES; + } + + hal->num_shadow_registers_configured++; + + target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; + target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] + *ring_num); + + /* if the ring is a dst ring, we need to shadow the tail pointer */ + if (srng_config->ring_dir == HAL_SRNG_DST_RING) + target_register += OFFSET_FROM_HP_TO_TP; + + hal->shadow_config[shadow_config_index].addr = target_register; + + /* update hp/tp addr in the hal_soc structure*/ + hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, + ring_num); + + hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d", + target_register, + SHADOW_REGISTER(shadow_config_index), + shadow_config_index, + ring_type, ring_num); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(hal_set_one_shadow_config); + +QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc) +{ + int ring_type, ring_num; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { + struct hal_hw_srng_config *srng_config = + &hal->hw_srng_table[ring_type]; + + if (ring_type == CE_SRC || + ring_type == CE_DST || + ring_type == CE_DST_STATUS) + continue; + + if (srng_config->lmac_ring) + continue; + + for (ring_num = 0; ring_num < srng_config->max_rings; + ring_num++) + hal_set_one_shadow_config(hal_soc, ring_type, ring_num); + } + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(hal_construct_srng_shadow_regs); + +void hal_get_shadow_config(void *hal_soc, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + *shadow_config = hal->shadow_config; + *num_shadow_registers_configured = + hal->num_shadow_registers_configured; +} + +qdf_export_symbol(hal_get_shadow_config); + + +static bool hal_validate_shadow_register(struct hal_soc *hal, + uint32_t *destination, + uint32_t *shadow_address) +{ + unsigned int index; + uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; + int destination_ba_offset = + ((char *)destination) - (char *)hal->dev_base_addr; + + index = shadow_address - shadow_0_offset; + + if (index >= MAX_SHADOW_REGISTERS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: index %x out of bounds", __func__, index); + goto error; + } else if (hal->shadow_config[index].addr != destination_ba_offset) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: sanity check failure, expected %x, found %x", + __func__, destination_ba_offset, + hal->shadow_config[index].addr); + goto error; + } + return true; +error: + qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", + __func__, hal->dev_base_addr, destination, shadow_address, + shadow_0_offset, index); + QDF_BUG(0); + return false; +} + +static void hal_target_based_configure(struct hal_soc *hal) +{ + /** + * Indicate Initialization of srngs to avoid force wake + * as umac power collapse is not enabled yet + */ + hal->init_phase = true; + + switch (hal->target_type) { +#ifdef QCA_WIFI_QCA6290 + case TARGET_TYPE_QCA6290: + hal->use_register_windowing = true; + hal_qca6290_attach(hal); + break; +#endif +#ifdef QCA_WIFI_QCA6390 + case TARGET_TYPE_QCA6390: + hal->use_register_windowing = true; + hal_qca6390_attach(hal); + break; +#endif +#ifdef QCA_WIFI_QCA6490 + case TARGET_TYPE_QCA6490: + hal->use_register_windowing = true; + hal_qca6490_attach(hal); + hal->init_phase = false; + break; +#endif +#ifdef QCA_WIFI_QCA6750 + case TARGET_TYPE_QCA6750: + hal->use_register_windowing = true; + hal_qca6750_attach(hal); + break; +#endif +#if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0) + case TARGET_TYPE_QCA8074: + hal_qca8074_attach(hal); + break; +#endif + +#if defined(QCA_WIFI_QCA8074V2) + case TARGET_TYPE_QCA8074V2: + hal_qca8074v2_attach(hal); + break; +#endif + +#if defined(QCA_WIFI_QCA6018) + case TARGET_TYPE_QCA6018: + hal_qca8074v2_attach(hal); + break; +#endif + +#ifdef QCA_WIFI_QCN9000 + case TARGET_TYPE_QCN9000: + hal->use_register_windowing = true; + /* + * Static window map is enabled for qcn9000 to use 2mb bar + * size and use multiple windows to write into registers. + */ + hal->static_window_map = true; + hal_qcn9000_attach(hal); + break; +#endif + default: + break; + } +} + +uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hif_target_info *tgt_info = + hif_get_target_info_handle(hal_soc->hif_handle); + + return tgt_info->target_type; +} + +qdf_export_symbol(hal_get_target_type); + +#ifdef FEATURE_HAL_DELAYED_REG_WRITE +#ifdef MEMORY_DEBUG +/* + * Length of the queue(array) used to hold delayed register writes. + * Must be a multiple of 2. + */ +#define HAL_REG_WRITE_QUEUE_LEN 128 +#else +#define HAL_REG_WRITE_QUEUE_LEN 32 +#endif + +/** + * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes + * @hal: hal_soc pointer + * + * Return: true if throughput is high, else false. + */ +static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal) +{ + int bw_level = hif_get_bandwidth_level(hal->hif_handle); + + return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false; +} + +/** + * hal_process_reg_write_q_elem() - process a regiter write queue element + * @hal: hal_soc pointer + * @q_elem: pointer to hal regiter write queue element + * + * Return: The value which was written to the address + */ +static uint32_t +hal_process_reg_write_q_elem(struct hal_soc *hal, + struct hal_reg_write_q_elem *q_elem) +{ + struct hal_srng *srng = q_elem->srng; + uint32_t write_val; + + SRNG_LOCK(&srng->lock); + + srng->reg_write_in_progress = false; + srng->wstats.dequeues++; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + q_elem->dequeue_val = srng->u.src_ring.hp; + hal_write_address_32_mb(hal, + srng->u.src_ring.hp_addr, + srng->u.src_ring.hp, false); + write_val = srng->u.src_ring.hp; + } else { + q_elem->dequeue_val = srng->u.dst_ring.tp; + hal_write_address_32_mb(hal, + srng->u.dst_ring.tp_addr, + srng->u.dst_ring.tp, false); + write_val = srng->u.dst_ring.tp; + } + + q_elem->valid = 0; + srng->last_dequeue_time = q_elem->dequeue_time; + SRNG_UNLOCK(&srng->lock); + + return write_val; +} + +/** + * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal + * @hal: hal_soc pointer + * @delay: delay in us + * + * Return: None + */ +static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal, + uint64_t delay_us) +{ + uint32_t *hist; + + hist = hal->stats.wstats.sched_delay; + + if (delay_us < 100) + hist[REG_WRITE_SCHED_DELAY_SUB_100us]++; + else if (delay_us < 1000) + hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++; + else if (delay_us < 5000) + hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++; + else + hist[REG_WRITE_SCHED_DELAY_GT_5000us]++; +} + +#ifdef SHADOW_WRITE_DELAY + +#define SHADOW_WRITE_MIN_DELTA_US 5 +#define SHADOW_WRITE_DELAY_US 50 + +/* + * Never add those srngs which are performance relate. + * The delay itself will hit performance heavily. + */ +#define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \ + (s)->ring_id == HAL_SRNG_CE_1_DST) + +static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) +{ + struct hal_srng *srng = elem->srng; + struct hal_soc *hal; + qdf_time_t now; + qdf_iomem_t real_addr; + + if (qdf_unlikely(!srng)) + return false; + + hal = srng->hal_soc; + if (qdf_unlikely(!hal)) + return false; + + /* Check if it is target srng, and valid shadow reg */ + if (qdf_likely(!IS_SRNG_MATCH(srng))) + return false; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) + real_addr = SRNG_SRC_ADDR(srng, HP); + else + real_addr = SRNG_DST_ADDR(srng, TP); + if (!hal_validate_shadow_register(hal, real_addr, elem->addr)) + return false; + + /* Check the time delta from last write of same srng */ + now = qdf_get_log_timestamp(); + if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) > + SHADOW_WRITE_MIN_DELTA_US) + return false; + + /* Delay dequeue, and record */ + qdf_udelay(SHADOW_WRITE_DELAY_US); + + srng->wstats.dequeue_delay++; + hal->stats.wstats.dequeue_delay++; + + return true; +} +#else +static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem) +{ + return false; +} +#endif + +/** + * hal_reg_write_work() - Worker to process delayed writes + * @arg: hal_soc pointer + * + * Return: None + */ +static void hal_reg_write_work(void *arg) +{ + int32_t q_depth, write_val; + struct hal_soc *hal = arg; + struct hal_reg_write_q_elem *q_elem; + uint64_t delta_us; + uint8_t ring_id; + uint32_t *addr; + + q_elem = &hal->reg_write_queue[(hal->read_idx)]; + q_elem->work_scheduled_time = qdf_get_log_timestamp(); + + /* Make sure q_elem consistent in the memory for multi-cores */ + qdf_rmb(); + if (!q_elem->valid) + return; + + q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth); + if (q_depth > hal->stats.wstats.max_q_depth) + hal->stats.wstats.max_q_depth = q_depth; + + if (hif_prevent_link_low_power_states(hal->hif_handle)) { + hal->stats.wstats.prevent_l1_fails++; + return; + } + + while (true) { + qdf_rmb(); + if (!q_elem->valid) + break; + + q_elem->dequeue_time = qdf_get_log_timestamp(); + ring_id = q_elem->srng->ring_id; + addr = q_elem->addr; + delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time - + q_elem->enqueue_time); + hal_reg_write_fill_sched_delay_hist(hal, delta_us); + + hal->stats.wstats.dequeues++; + qdf_atomic_dec(&hal->stats.wstats.q_depth); + + if (hal_reg_write_need_delay(q_elem)) + hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK", + q_elem->srng->ring_id, q_elem->addr); + + write_val = hal_process_reg_write_q_elem(hal, q_elem); + hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us", + hal->read_idx, ring_id, addr, write_val, delta_us); + + qdf_atomic_dec(&hal->active_work_cnt); + hal->read_idx = (hal->read_idx + 1) & + (HAL_REG_WRITE_QUEUE_LEN - 1); + q_elem = &hal->reg_write_queue[(hal->read_idx)]; + } + + hif_allow_link_low_power_states(hal->hif_handle); +} + +/** + * hal_flush_reg_write_work() - flush all writes from regiter write queue + * @arg: hal_soc pointer + * + * Return: None + */ +static inline void hal_flush_reg_write_work(struct hal_soc *hal) +{ + qdf_cancel_work(&hal->reg_write_work); + qdf_flush_work(&hal->reg_write_work); + qdf_flush_workqueue(0, hal->reg_write_wq); +} + +/** + * hal_reg_write_enqueue() - enqueue register writes into kworker + * @hal_soc: hal_soc pointer + * @srng: srng pointer + * @addr: iomem address of regiter + * @value: value to be written to iomem address + * + * This function executes from within the SRNG LOCK + * + * Return: None + */ +static void hal_reg_write_enqueue(struct hal_soc *hal_soc, + struct hal_srng *srng, + void __iomem *addr, + uint32_t value) +{ + struct hal_reg_write_q_elem *q_elem; + uint32_t write_idx; + + if (srng->reg_write_in_progress) { + hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u", + srng->ring_id, addr, value); + qdf_atomic_inc(&hal_soc->stats.wstats.coalesces); + srng->wstats.coalesces++; + return; + } + + write_idx = qdf_atomic_inc_return(&hal_soc->write_idx); + + write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1); + + q_elem = &hal_soc->reg_write_queue[write_idx]; + + if (q_elem->valid) { + hal_err("queue full"); + QDF_BUG(0); + return; + } + + qdf_atomic_inc(&hal_soc->stats.wstats.enqueues); + srng->wstats.enqueues++; + + qdf_atomic_inc(&hal_soc->stats.wstats.q_depth); + + q_elem->srng = srng; + q_elem->addr = addr; + q_elem->enqueue_val = value; + q_elem->enqueue_time = qdf_get_log_timestamp(); + + /* + * Before the valid flag is set to true, all the other + * fields in the q_elem needs to be updated in memory. + * Else there is a chance that the dequeuing worker thread + * might read stale entries and process incorrect srng. + */ + qdf_wmb(); + q_elem->valid = true; + + /* + * After all other fields in the q_elem has been updated + * in memory successfully, the valid flag needs to be updated + * in memory in time too. + * Else there is a chance that the dequeuing worker thread + * might read stale valid flag and the work will be bypassed + * for this round. And if there is no other work scheduled + * later, this hal register writing won't be updated any more. + */ + qdf_wmb(); + + srng->reg_write_in_progress = true; + qdf_atomic_inc(&hal_soc->active_work_cnt); + + hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u", + write_idx, srng->ring_id, addr, value); + + qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq, + &hal_soc->reg_write_work); +} + +void hal_delayed_reg_write(struct hal_soc *hal_soc, + struct hal_srng *srng, + void __iomem *addr, + uint32_t value) +{ + if (pld_is_device_awake(hal_soc->qdf_dev->dev) || + hal_is_reg_write_tput_level_high(hal_soc)) { + qdf_atomic_inc(&hal_soc->stats.wstats.direct); + srng->wstats.direct++; + hal_write_address_32_mb(hal_soc, addr, value, false); + } else { + hal_reg_write_enqueue(hal_soc, srng, addr, value); + } +} + +/** + * hal_delayed_reg_write_init() - Initialization function for delayed reg writes + * @hal_soc: hal_soc pointer + * + * Initialize main data structures to process register writes in a delayed + * workqueue. + * + * Return: QDF_STATUS_SUCCESS on success else a QDF error. + */ +static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) +{ + hal->reg_write_wq = + qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq"); + qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal); + hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN * + sizeof(*hal->reg_write_queue)); + if (!hal->reg_write_queue) { + hal_err("unable to allocate memory"); + QDF_BUG(0); + return QDF_STATUS_E_NOMEM; + } + + /* Initial value of indices */ + hal->read_idx = 0; + qdf_atomic_set(&hal->write_idx, -1); + return QDF_STATUS_SUCCESS; +} + +/** + * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing + * @hal_soc: hal_soc pointer + * + * De-initialize main data structures to process register writes in a delayed + * workqueue. + * + * Return: None + */ +static void hal_delayed_reg_write_deinit(struct hal_soc *hal) +{ + hal_flush_reg_write_work(hal); + qdf_destroy_workqueue(0, hal->reg_write_wq); + qdf_mem_free(hal->reg_write_queue); +} + +static inline +char *hal_fill_reg_write_srng_stats(struct hal_srng *srng, + char *buf, qdf_size_t size) +{ + qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u", + srng->wstats.enqueues, srng->wstats.dequeues, + srng->wstats.coalesces, srng->wstats.direct); + return buf; +} + +/* bytes for local buffer */ +#define HAL_REG_WRITE_SRNG_STATS_LEN 100 + +void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl) +{ + struct hal_srng *srng; + char buf[HAL_REG_WRITE_SRNG_STATS_LEN]; + struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; + + srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1); + hal_debug("SW2TCL1: %s", + hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); + + srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE); + hal_debug("WBM2SW0: %s", + hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); + + srng = hal_get_srng(hal, HAL_SRNG_REO2SW1); + hal_debug("REO2SW1: %s", + hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); + + srng = hal_get_srng(hal, HAL_SRNG_REO2SW2); + hal_debug("REO2SW2: %s", + hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); + + srng = hal_get_srng(hal, HAL_SRNG_REO2SW3); + hal_debug("REO2SW3: %s", + hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf))); +} + +void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl) +{ + uint32_t *hist; + struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; + + hist = hal->stats.wstats.sched_delay; + + hal_debug("enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u", + qdf_atomic_read(&hal->stats.wstats.enqueues), + hal->stats.wstats.dequeues, + qdf_atomic_read(&hal->stats.wstats.coalesces), + qdf_atomic_read(&hal->stats.wstats.direct), + qdf_atomic_read(&hal->stats.wstats.q_depth), + hal->stats.wstats.max_q_depth, + hist[REG_WRITE_SCHED_DELAY_SUB_100us], + hist[REG_WRITE_SCHED_DELAY_SUB_1000us], + hist[REG_WRITE_SCHED_DELAY_SUB_5000us], + hist[REG_WRITE_SCHED_DELAY_GT_5000us]); +} + +int hal_get_reg_write_pending_work(void *hal_soc) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + return qdf_atomic_read(&hal->active_work_cnt); +} + +#else +static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal) +{ +} +#endif + +/** + * hal_attach - Initialize HAL layer + * @hif_handle: Opaque HIF handle + * @qdf_dev: QDF device + * + * Return: Opaque HAL SOC handle + * NULL on failure (if given ring is not available) + * + * This function should be called as part of HIF initialization (for accessing + * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() + * + */ +void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev) +{ + struct hal_soc *hal; + int i; + + hal = qdf_mem_malloc(sizeof(*hal)); + + if (!hal) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hal_soc allocation failed", __func__); + goto fail0; + } + qdf_minidump_log(hal, sizeof(*hal), "hal_soc"); + hal->hif_handle = hif_handle; + hal->dev_base_addr = hif_get_dev_ba(hif_handle); + hal->qdf_dev = qdf_dev; + hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( + qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * + HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); + if (!hal->shadow_rdptr_mem_paddr) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hal->shadow_rdptr_mem_paddr allocation failed", + __func__); + goto fail1; + } + qdf_mem_zero(hal->shadow_rdptr_mem_vaddr, + sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX); + + hal->shadow_wrptr_mem_vaddr = + (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, + sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, + &(hal->shadow_wrptr_mem_paddr)); + if (!hal->shadow_wrptr_mem_vaddr) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hal->shadow_wrptr_mem_vaddr allocation failed", + __func__); + goto fail2; + } + qdf_mem_zero(hal->shadow_wrptr_mem_vaddr, + sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS); + + for (i = 0; i < HAL_SRNG_ID_MAX; i++) { + hal->srng_list[i].initialized = 0; + hal->srng_list[i].ring_id = i; + } + + qdf_spinlock_create(&hal->register_access_lock); + hal->register_window = 0; + hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal)); + + hal_target_based_configure(hal); + + hal_reg_write_fail_history_init(hal); + + qdf_atomic_init(&hal->active_work_cnt); + hal_delayed_reg_write_init(hal); + + return (void *)hal; + +fail2: + qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, + sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, + hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); +fail1: + qdf_mem_free(hal); +fail0: + return NULL; +} +qdf_export_symbol(hal_attach); + +/** + * hal_mem_info - Retrieve hal memory base address + * + * @hal_soc: Opaque HAL SOC handle + * @mem: pointer to structure to be updated with hal mem info + */ +void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; + mem->dev_base_addr = (void *)hal->dev_base_addr; + mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; + mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; + mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; + mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; + hif_read_phy_mem_base((void *)hal->hif_handle, + (qdf_dma_addr_t *)&mem->dev_base_paddr); + return; +} +qdf_export_symbol(hal_get_meminfo); + +/** + * hal_detach - Detach HAL layer + * @hal_soc: HAL SOC handle + * + * Return: Opaque HAL SOC handle + * NULL on failure (if given ring is not available) + * + * This function should be called as part of HIF initialization (for accessing + * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() + * + */ +extern void hal_detach(void *hal_soc) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + hal_delayed_reg_write_deinit(hal); + + qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, + sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, + hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); + qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, + sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, + hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); + qdf_minidump_remove(hal); + qdf_mem_free(hal); + + return; +} +qdf_export_symbol(hal_detach); + +/** + * hal_ce_dst_setup - Initialize CE destination ring registers + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, + int ring_num) +{ + uint32_t reg_val = 0; + uint32_t reg_addr; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, CE_DST); + + /* set DEST_MAX_LENGTH according to ce assignment */ + reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( + ring_config->reg_start[R0_INDEX] + + (ring_num * ring_config->reg_size[R0_INDEX])); + + reg_val = HAL_REG_READ(hal, reg_addr); + reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; + reg_val |= srng->u.dst_ring.max_buffer_length & + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; + HAL_REG_WRITE(hal, reg_addr, reg_val); + + if (srng->prefetch_timer) { + reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR( + ring_config->reg_start[R0_INDEX] + + (ring_num * ring_config->reg_size[R0_INDEX])); + + reg_val = HAL_REG_READ(hal, reg_addr); + reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK; + reg_val |= srng->prefetch_timer; + HAL_REG_WRITE(hal, reg_addr, reg_val); + reg_val = HAL_REG_READ(hal, reg_addr); + } + +} + +/** + * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX + * @hal: HAL SOC handle + * @read: boolean value to indicate if read or write + * @ix0: pointer to store IX0 reg value + * @ix1: pointer to store IX1 reg value + * @ix2: pointer to store IX2 reg value + * @ix3: pointer to store IX3 reg value + */ +void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read, + uint32_t *ix0, uint32_t *ix1, + uint32_t *ix2, uint32_t *ix3) +{ + uint32_t reg_offset; + struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl; + + if (read) { + if (ix0) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + *ix0 = HAL_REG_READ(hal, reg_offset); + } + + if (ix1) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + *ix1 = HAL_REG_READ(hal, reg_offset); + } + + if (ix2) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + *ix2 = HAL_REG_READ(hal, reg_offset); + } + + if (ix3) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + *ix3 = HAL_REG_READ(hal, reg_offset); + } + } else { + if (ix0) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, + *ix0, true); + } + + if (ix1) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, + *ix1, true); + } + + if (ix2) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, + *ix2, true); + } + + if (ix3) { + reg_offset = + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset, + *ix3, true); + } + } +} + +/** + * hal_srng_dst_set_hp_paddr_confirm() - Set physical address to dest ring head + * pointer and confirm that write went through by reading back the value + * @srng: sring pointer + * @paddr: physical address + */ +void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr) +{ + SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff); + SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32); +} + +/** + * hal_srng_dst_init_hp() - Initialize destination ring head + * pointer + * @hal_soc: hal_soc handle + * @srng: sring pointer + * @vaddr: virtual address + */ +void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc, + struct hal_srng *srng, + uint32_t *vaddr) +{ + uint32_t reg_offset; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + if (!srng) + return; + + srng->u.dst_ring.hp_addr = vaddr; + reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr; + HAL_REG_WRITE_CONFIRM_RETRY( + hal, reg_offset, srng->u.dst_ring.cached_hp, true); + + if (vaddr) { + *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "hp_addr=%pK, cached_hp=%d, hp=%d", + (void *)srng->u.dst_ring.hp_addr, + srng->u.dst_ring.cached_hp, + *srng->u.dst_ring.hp_addr); + } +} + +/** + * hal_srng_hw_init - Private function to initialize SRNG HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_srng_hw_init(struct hal_soc *hal, + struct hal_srng *srng) +{ + if (srng->ring_dir == HAL_SRNG_SRC_RING) + hal_srng_src_hw_init(hal, srng); + else + hal_srng_dst_hw_init(hal, srng); +} + +#ifdef CONFIG_SHADOW_V2 +#define ignore_shadow false +#define CHECK_SHADOW_REGISTERS true +#else +#define ignore_shadow true +#define CHECK_SHADOW_REGISTERS false +#endif + +/** + * hal_srng_setup - Initialize HW SRNG ring. + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * @ring_num: Ring number if there are multiple rings of same type (staring + * from 0) + * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings + * @ring_params: SRNG ring params in hal_srng_params structure. + + * Callers are expected to allocate contiguous ring memory of size + * 'num_entries * entry_size' bytes and pass the physical and virtual base + * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in + * hal_srng_params structure. Ring base address should be 8 byte aligned + * and size of each ring entry should be queried using the API + * hal_srng_get_entrysize + * + * Return: Opaque pointer to ring on success + * NULL on failure (if given ring is not available) + */ +void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, + int mac_id, struct hal_srng_params *ring_params) +{ + int ring_id; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_srng *srng; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + void *dev_base_addr; + int i; + + ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); + if (ring_id < 0) + return NULL; + + hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id); + + srng = hal_get_srng(hal_soc, ring_id); + + if (srng->initialized) { + hal_verbose_debug("Ring (ring_type, ring_num) already initialized"); + return NULL; + } + + dev_base_addr = hal->dev_base_addr; + srng->ring_id = ring_id; + srng->ring_dir = ring_config->ring_dir; + srng->ring_base_paddr = ring_params->ring_base_paddr; + srng->ring_base_vaddr = ring_params->ring_base_vaddr; + srng->entry_size = ring_config->entry_size; + srng->num_entries = ring_params->num_entries; + srng->ring_size = srng->num_entries * srng->entry_size; + srng->ring_size_mask = srng->ring_size - 1; + srng->msi_addr = ring_params->msi_addr; + srng->msi_data = ring_params->msi_data; + srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; + srng->intr_batch_cntr_thres_entries = + ring_params->intr_batch_cntr_thres_entries; + srng->prefetch_timer = ring_params->prefetch_timer; + srng->hal_soc = hal_soc; + + for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { + srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] + + (ring_num * ring_config->reg_size[i]); + } + + /* Zero out the entire ring memory */ + qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * + srng->num_entries) << 2); + + srng->flags = ring_params->flags; +#ifdef BIG_ENDIAN_HOST + /* TODO: See if we should we get these flags from caller */ + srng->flags |= HAL_SRNG_DATA_TLV_SWAP; + srng->flags |= HAL_SRNG_MSI_SWAP; + srng->flags |= HAL_SRNG_RING_PTR_SWAP; +#endif + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + srng->u.src_ring.hp = 0; + srng->u.src_ring.reap_hp = srng->ring_size - + srng->entry_size; + srng->u.src_ring.tp_addr = + &(hal->shadow_rdptr_mem_vaddr[ring_id]); + srng->u.src_ring.low_threshold = + ring_params->low_threshold * srng->entry_size; + if (ring_config->lmac_ring) { + /* For LMAC rings, head pointer updates will be done + * through FW by writing to a shared memory location + */ + srng->u.src_ring.hp_addr = + &(hal->shadow_wrptr_mem_vaddr[ring_id - + HAL_SRNG_LMAC1_ID_START]); + srng->flags |= HAL_SRNG_LMAC_RING; + } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { + srng->u.src_ring.hp_addr = + hal_get_window_address(hal, + SRNG_SRC_ADDR(srng, HP)); + + if (CHECK_SHADOW_REGISTERS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: Ring (%d, %d) missing shadow config", + __func__, ring_type, ring_num); + } + } else { + hal_validate_shadow_register(hal, + SRNG_SRC_ADDR(srng, HP), + srng->u.src_ring.hp_addr); + } + } else { + /* During initialization loop count in all the descriptors + * will be set to zero, and HW will set it to 1 on completing + * descriptor update in first loop, and increments it by 1 on + * subsequent loops (loop count wraps around after reaching + * 0xffff). The 'loop_cnt' in SW ring state is the expected + * loop count in descriptors updated by HW (to be processed + * by SW). + */ + srng->u.dst_ring.loop_cnt = 1; + srng->u.dst_ring.tp = 0; + srng->u.dst_ring.hp_addr = + &(hal->shadow_rdptr_mem_vaddr[ring_id]); + if (ring_config->lmac_ring) { + /* For LMAC rings, tail pointer updates will be done + * through FW by writing to a shared memory location + */ + srng->u.dst_ring.tp_addr = + &(hal->shadow_wrptr_mem_vaddr[ring_id - + HAL_SRNG_LMAC1_ID_START]); + srng->flags |= HAL_SRNG_LMAC_RING; + } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { + srng->u.dst_ring.tp_addr = + hal_get_window_address(hal, + SRNG_DST_ADDR(srng, TP)); + + if (CHECK_SHADOW_REGISTERS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: Ring (%d, %d) missing shadow config", + __func__, ring_type, ring_num); + } + } else { + hal_validate_shadow_register(hal, + SRNG_DST_ADDR(srng, TP), + srng->u.dst_ring.tp_addr); + } + } + + if (!(ring_config->lmac_ring)) { + hal_srng_hw_init(hal, srng); + + if (ring_type == CE_DST) { + srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; + hal_ce_dst_setup(hal, srng, ring_num); + } + } + + SRNG_LOCK_INIT(&srng->lock); + + srng->srng_event = 0; + + srng->initialized = true; + + return (void *)srng; +} +qdf_export_symbol(hal_srng_setup); + +/** + * hal_srng_cleanup - Deinitialize HW SRNG ring. + * @hal_soc: Opaque HAL SOC handle + * @hal_srng: Opaque HAL SRNG pointer + */ +void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + SRNG_LOCK_DESTROY(&srng->lock); + srng->initialized = 0; +} +qdf_export_symbol(hal_srng_cleanup); + +/** + * hal_srng_get_entrysize - Returns size of ring entry in bytes + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + */ +uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + return ring_config->entry_size << 2; +} +qdf_export_symbol(hal_srng_get_entrysize); + +/** + * hal_srng_max_entries - Returns maximum possible number of ring entries + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + * Return: Maximum number of entries for the given ring_type + */ +uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + + return ring_config->max_size / ring_config->entry_size; +} +qdf_export_symbol(hal_srng_max_entries); + +enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + + return ring_config->ring_dir; +} + +/** + * hal_srng_dump - Dump ring status + * @srng: hal srng pointer + */ +void hal_srng_dump(struct hal_srng *srng) +{ + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + hal_debug("=== SRC RING %d ===", srng->ring_id); + hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u", + srng->u.src_ring.hp, + srng->u.src_ring.reap_hp, + *srng->u.src_ring.tp_addr, + srng->u.src_ring.cached_tp); + } else { + hal_debug("=== DST RING %d ===", srng->ring_id); + hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u", + srng->u.dst_ring.tp, + *srng->u.dst_ring.hp_addr, + srng->u.dst_ring.cached_hp, + srng->u.dst_ring.loop_cnt); + } +} + +/** + * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * @ring_params: SRNG parameters will be returned through this structure + */ +extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + struct hal_srng_params *ring_params) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl; + int i =0; + ring_params->ring_id = srng->ring_id; + ring_params->ring_dir = srng->ring_dir; + ring_params->entry_size = srng->entry_size; + + ring_params->ring_base_paddr = srng->ring_base_paddr; + ring_params->ring_base_vaddr = srng->ring_base_vaddr; + ring_params->num_entries = srng->num_entries; + ring_params->msi_addr = srng->msi_addr; + ring_params->msi_data = srng->msi_data; + ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; + ring_params->intr_batch_cntr_thres_entries = + srng->intr_batch_cntr_thres_entries; + ring_params->low_threshold = srng->u.src_ring.low_threshold; + ring_params->flags = srng->flags; + ring_params->ring_id = srng->ring_id; + for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) + ring_params->hwreg_base[i] = srng->hwreg_base[i]; +} +qdf_export_symbol(hal_get_srng_params); + +#ifdef FORCE_WAKE +void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase) +{ + struct hal_soc *hal_soc = (struct hal_soc *)soc; + + hal_soc->init_phase = init_phase; +} +#endif /* FORCE_WAKE */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..b5d7020ad58563225b7e7ef97332ab6544c13d0c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_tx.h @@ -0,0 +1,1138 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(HAL_TX_H) +#define HAL_TX_H + +/*--------------------------------------------------------------------------- + Include files + ---------------------------------------------------------------------------*/ +#include "hal_api.h" +#include "wcss_version.h" + +#define WBM_RELEASE_RING_5_TX_RATE_STATS_OFFSET 0x00000014 +#define WBM_RELEASE_RING_5_TX_RATE_STATS_LSB 0 +#define WBM_RELEASE_RING_5_TX_RATE_STATS_MASK 0xffffffff + +#define HAL_WBM_RELEASE_RING_2_BUFFER_TYPE 0 +#define HAL_WBM_RELEASE_RING_2_DESC_TYPE 1 + +/*--------------------------------------------------------------------------- + Preprocessor definitions and constants + ---------------------------------------------------------------------------*/ +#define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET + +#define HAL_SET_FLD(desc, block , field) \ + (*(uint32_t *) ((uint8_t *) desc + HAL_OFFSET(block, field))) + +#define HAL_SET_FLD_OFFSET(desc, block , field, offset) \ + (*(uint32_t *) ((uint8_t *) desc + HAL_OFFSET(block, field) + (offset))) + +#define HAL_TX_DESC_SET_TLV_HDR(desc, tag, len) \ +do { \ + ((struct tlv_32_hdr *) desc)->tlv_tag = (tag); \ + ((struct tlv_32_hdr *) desc)->tlv_len = (len); \ +} while (0) + +#define HAL_TX_TCL_DATA_TAG WIFITCL_DATA_CMD_E +#define HAL_TX_TCL_CMD_TAG WIFITCL_GSE_CMD_E + +#define HAL_TX_SM(block, field, value) \ + ((value << (block ## _ ## field ## _LSB)) & \ + (block ## _ ## field ## _MASK)) + +#define HAL_TX_MS(block, field, value) \ + (((value) & (block ## _ ## field ## _MASK)) >> \ + (block ## _ ## field ## _LSB)) + +#define HAL_TX_DESC_GET(desc, block, field) \ + HAL_TX_MS(block, field, HAL_SET_FLD(desc, block, field)) + +#define HAL_TX_DESC_SUBBLOCK_GET(desc, block, sub, field) \ + HAL_TX_MS(sub, field, HAL_SET_FLD(desc, block, sub)) + +#define HAL_TX_BUF_TYPE_BUFFER 0 +#define HAL_TX_BUF_TYPE_EXT_DESC 1 + +#define HAL_TX_DESC_LEN_DWORDS (NUM_OF_DWORDS_TCL_DATA_CMD) +#define HAL_TX_DESC_LEN_BYTES (NUM_OF_DWORDS_TCL_DATA_CMD * 4) +#define HAL_TX_EXTENSION_DESC_LEN_DWORDS (NUM_OF_DWORDS_TX_MSDU_EXTENSION) +#define HAL_TX_EXTENSION_DESC_LEN_BYTES (NUM_OF_DWORDS_TX_MSDU_EXTENSION * 4) + +#define HAL_TX_COMPLETION_DESC_LEN_DWORDS (NUM_OF_DWORDS_WBM_RELEASE_RING) +#define HAL_TX_COMPLETION_DESC_LEN_BYTES (NUM_OF_DWORDS_WBM_RELEASE_RING*4) +#define HAL_TX_BITS_PER_TID 3 +#define HAL_TX_TID_BITS_MASK ((1 << HAL_TX_BITS_PER_TID) - 1) +#define HAL_TX_NUM_DSCP_PER_REGISTER 10 +#define HAL_MAX_HW_DSCP_TID_MAPS 2 +#define HAL_MAX_HW_DSCP_TID_MAPS_11AX 32 + +#define HAL_MAX_HW_DSCP_TID_V2_MAPS 48 +#define HTT_META_HEADER_LEN_BYTES 64 +#define HAL_TX_EXT_DESC_WITH_META_DATA \ + (HTT_META_HEADER_LEN_BYTES + HAL_TX_EXTENSION_DESC_LEN_BYTES) + +#define HAL_TX_NUM_PCP_PER_REGISTER 8 + +/* Length of WBM release ring without the status words */ +#define HAL_TX_COMPLETION_DESC_BASE_LEN 12 + +#define HAL_TX_COMP_RELEASE_SOURCE_TQM 0 +#define HAL_TX_COMP_RELEASE_SOURCE_REO 2 +#define HAL_TX_COMP_RELEASE_SOURCE_FW 3 + +/* Define a place-holder release reason for FW */ +#define HAL_TX_COMP_RELEASE_REASON_FW 99 + +/* + * Offset of HTT Tx Descriptor in WBM Completion + * HTT Tx Desc structure is passed from firmware to host overlayed + * on wbm_release_ring DWORDs 2,3 ,4 and 5for software based completions + * (Exception frames and TQM bypass frames) + */ +#define HAL_TX_COMP_HTT_STATUS_OFFSET 8 +#ifdef CONFIG_BERYLLIUM +#define HAL_TX_COMP_HTT_STATUS_LEN 20 +#else +#define HAL_TX_COMP_HTT_STATUS_LEN 16 +#endif + +#define HAL_TX_BUF_TYPE_BUFFER 0 +#define HAL_TX_BUF_TYPE_EXT_DESC 1 + +#define HAL_TX_EXT_DESC_BUF_OFFSET TX_MSDU_EXTENSION_6_BUF0_PTR_31_0_OFFSET +#define HAL_TX_EXT_BUF_LOW_MASK TX_MSDU_EXTENSION_6_BUF0_PTR_31_0_MASK +#define HAL_TX_EXT_BUF_HI_MASK TX_MSDU_EXTENSION_7_BUF0_PTR_39_32_MASK +#define HAL_TX_EXT_BUF_LEN_MASK TX_MSDU_EXTENSION_7_BUF0_LEN_MASK +#define HAL_TX_EXT_BUF_LEN_LSB TX_MSDU_EXTENSION_7_BUF0_LEN_LSB +#define HAL_TX_EXT_BUF_WD_SIZE 2 + +#define HAL_TX_DESC_ADDRX_EN 0x1 +#define HAL_TX_DESC_ADDRY_EN 0x2 +#define HAL_TX_DESC_DEFAULT_LMAC_ID 0x3 + +#define HAL_TX_ADDR_SEARCH_DEFAULT 0x0 +#define HAL_TX_ADDR_INDEX_SEARCH 0x1 +#define HAL_TX_FLOW_INDEX_SEARCH 0x2 + +enum hal_tx_ret_buf_manager { + HAL_WBM_SW0_BM_ID = 3, + HAL_WBM_SW1_BM_ID = 4, + HAL_WBM_SW2_BM_ID = 5, + HAL_WBM_SW3_BM_ID = 6, +}; + +/*--------------------------------------------------------------------------- + Structures + ---------------------------------------------------------------------------*/ +/** + * struct hal_tx_completion_status - HAL Tx completion descriptor contents + * @status: frame acked/failed + * @release_src: release source = TQM/FW + * @ack_frame_rssi: RSSI of the received ACK or BA frame + * @first_msdu: Indicates this MSDU is the first MSDU in AMSDU + * @last_msdu: Indicates this MSDU is the last MSDU in AMSDU + * @msdu_part_of_amsdu : Indicates this MSDU was part of an A-MSDU in MPDU + * @bw: Indicates the BW of the upcoming transmission - + * + * + * + * + * @pkt_type: Transmit Packet Type + * @stbc: When set, STBC transmission rate was used + * @ldpc: When set, use LDPC transmission rates + * @sgi: Legacy normal GI + * Legacy short GI + * HE related GI + * HE + * @mcs: Transmit MCS Rate + * @ofdma: Set when the transmission was an OFDMA transmission + * @tones_in_ru: The number of tones in the RU used. + * @tsf: Lower 32 bits of the TSF + * @ppdu_id: TSF, snapshot of this value when transmission of the + * PPDU containing the frame finished. + * @transmit_cnt: Number of times this frame has been transmitted + * @tid: TID of the flow or MPDU queue + * @peer_id: Peer ID of the flow or MPDU queue + */ +struct hal_tx_completion_status { + uint8_t status; + uint8_t release_src; + uint8_t ack_frame_rssi; + uint8_t first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1; + uint32_t bw:2, + pkt_type:4, + stbc:1, + ldpc:1, + sgi:2, + mcs:4, + ofdma:1, + tones_in_ru:12, + valid:1; + uint32_t tsf; + uint32_t ppdu_id; + uint8_t transmit_cnt; + uint8_t tid; + uint16_t peer_id; +}; + +/** + * struct hal_tx_desc_comp_s - hal tx completion descriptor contents + * @desc: Transmit status information from descriptor + */ +struct hal_tx_desc_comp_s { + uint32_t desc[HAL_TX_COMPLETION_DESC_LEN_DWORDS]; +}; + +/* + * enum hal_tx_encrypt_type - Type of decrypt cipher used (valid only for RAW) + * @HAL_TX_ENCRYPT_TYPE_WEP_40: WEP 40-bit + * @HAL_TX_ENCRYPT_TYPE_WEP_10: WEP 10-bit + * @HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC: TKIP without MIC + * @HAL_TX_ENCRYPT_TYPE_WEP_128: WEP_128 + * @HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC: TKIP_WITH_MIC + * @HAL_TX_ENCRYPT_TYPE_WAPI: WAPI + * @HAL_TX_ENCRYPT_TYPE_AES_CCMP_128: AES_CCMP_128 + * @HAL_TX_ENCRYPT_TYPE_NO_CIPHER: NO CIPHER + * @HAL_TX_ENCRYPT_TYPE_AES_CCMP_256: AES_CCMP_256 + * @HAL_TX_ENCRYPT_TYPE_AES_GCMP_128: AES_GCMP_128 + * @HAL_TX_ENCRYPT_TYPE_AES_GCMP_256: AES_GCMP_256 + * @HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4: WAPI GCM SM4 + */ +enum hal_tx_encrypt_type { + HAL_TX_ENCRYPT_TYPE_WEP_40 = 0, + HAL_TX_ENCRYPT_TYPE_WEP_104 = 1 , + HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC = 2, + HAL_TX_ENCRYPT_TYPE_WEP_128 = 3, + HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC = 4, + HAL_TX_ENCRYPT_TYPE_WAPI = 5, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_128 = 6, + HAL_TX_ENCRYPT_TYPE_NO_CIPHER = 7, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_256 = 8, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_128 = 9, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_256 = 10, + HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4 = 11, +}; + +/* + * enum hal_tx_encap_type - Encapsulation type that HW will perform + * @HAL_TX_ENCAP_TYPE_RAW: Raw Packet Type + * @HAL_TX_ENCAP_TYPE_NWIFI: Native WiFi Type + * @HAL_TX_ENCAP_TYPE_ETHERNET: Ethernet + * @HAL_TX_ENCAP_TYPE_802_3: 802.3 Frame + */ +enum hal_tx_encap_type { + HAL_TX_ENCAP_TYPE_RAW = 0, + HAL_TX_ENCAP_TYPE_NWIFI = 1, + HAL_TX_ENCAP_TYPE_ETHERNET = 2, + HAL_TX_ENCAP_TYPE_802_3 = 3, +}; + +/** + * enum hal_tx_tqm_release_reason - TQM Release reason codes + * + * @HAL_TX_TQM_RR_FRAME_ACKED : ACK of BA for it was received + * @HAL_TX_TQM_RR_REM_CMD_REM : Remove cmd of type “Remove_mpdus†initiated + * by SW + * @HAL_TX_TQM_RR_REM_CMD_TX : Remove command of type Remove_transmitted_mpdus + * initiated by SW + * @HAL_TX_TQM_RR_REM_CMD_NOTX : Remove cmd of type Remove_untransmitted_mpdus + * initiated by SW + * @HAL_TX_TQM_RR_REM_CMD_AGED : Remove command of type “Remove_aged_mpdus†or + * “Remove_aged_msdus†initiated by SW + * @HAL_TX_TQM_RR_FW_REASON1 : Remove command where fw indicated that + * remove reason is fw_reason1 + * @HAL_TX_TQM_RR_FW_REASON2 : Remove command where fw indicated that + * remove reason is fw_reason2 + * @HAL_TX_TQM_RR_FW_REASON3 : Remove command where fw indicated that + * remove reason is fw_reason3 + * @HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE : Remove command where fw indicated that + * remove reason is remove disable queue + */ +enum hal_tx_tqm_release_reason { + HAL_TX_TQM_RR_FRAME_ACKED, + HAL_TX_TQM_RR_REM_CMD_REM, + HAL_TX_TQM_RR_REM_CMD_TX, + HAL_TX_TQM_RR_REM_CMD_NOTX, + HAL_TX_TQM_RR_REM_CMD_AGED, + HAL_TX_TQM_RR_FW_REASON1, + HAL_TX_TQM_RR_FW_REASON2, + HAL_TX_TQM_RR_FW_REASON3, + HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE, +}; + +/* enum - Table IDs for 2 DSCP-TID mapping Tables that TCL H/W supports + * @HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT: Default DSCP-TID mapping table + * @HAL_TX_DSCP_TID_MAP_TABLE_OVERRIDE: DSCP-TID map override table + */ +enum hal_tx_dscp_tid_table_id { + HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT, + HAL_TX_DSCP_TID_MAP_TABLE_OVERRIDE, +}; + +/*--------------------------------------------------------------------------- + Function declarations and documentation + ---------------------------------------------------------------------------*/ + +/*--------------------------------------------------------------------------- + TCL Descriptor accessor APIs + ---------------------------------------------------------------------------*/ + +/** + * hal_tx_desc_set_buf_length - Set Data length in bytes in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @data_length: MSDU length in case of direct descriptor. + * Length of link extension descriptor in case of Link extension + * descriptor.Includes the length of Metadata + * Return: None + */ +static inline void hal_tx_desc_set_buf_length(void *desc, + uint16_t data_length) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, DATA_LENGTH) |= + HAL_TX_SM(TCL_DATA_CMD_3, DATA_LENGTH, data_length); +} + +/** + * hal_tx_desc_set_buf_offset - Sets Packet Offset field in Tx descriptor + * @desc: Handle to Tx Descriptor + * @offset: Packet offset from Metadata in case of direct buffer descriptor. + * + * Return: void + */ +static inline void hal_tx_desc_set_buf_offset(void *desc, + uint8_t offset) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, PACKET_OFFSET) |= + HAL_TX_SM(TCL_DATA_CMD_3, PACKET_OFFSET, offset); +} + +/** + * hal_tx_desc_set_encap_type - Set encapsulation type in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @encap_type: Encapsulation that HW will perform + * + * Return: void + * + */ +static inline void hal_tx_desc_set_encap_type(void *desc, + enum hal_tx_encap_type encap_type) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ENCAP_TYPE) |= + HAL_TX_SM(TCL_DATA_CMD_2, ENCAP_TYPE, encap_type); +} + +/** + * hal_tx_desc_set_encrypt_type - Sets the Encrypt Type in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @type: Encrypt Type + * + * Return: void + */ +static inline void hal_tx_desc_set_encrypt_type(void *desc, + enum hal_tx_encrypt_type type) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ENCRYPT_TYPE) |= + HAL_TX_SM(TCL_DATA_CMD_2, ENCRYPT_TYPE, type); +} + +/** + * hal_tx_desc_set_addr_search_flags - Enable AddrX and AddrY search flags + * @desc: Handle to Tx Descriptor + * @flags: Bit 0 - AddrY search enable, Bit 1 - AddrX search enable + * + * Return: void + */ +static inline void hal_tx_desc_set_addr_search_flags(void *desc, + uint8_t flags) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ADDRX_EN) |= + HAL_TX_SM(TCL_DATA_CMD_2, ADDRX_EN, (flags & 0x1)); + + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ADDRY_EN) |= + HAL_TX_SM(TCL_DATA_CMD_2, ADDRY_EN, (flags >> 1)); +} + +/** + * hal_tx_desc_set_l4_checksum_en - Set TCP/IP checksum enable flags + * Tx Descriptor for MSDU_buffer type + * @desc: Handle to Tx Descriptor + * @en: UDP/TCP over ipv4/ipv6 checksum enable flags (5 bits) + * + * Return: void + */ +static inline void hal_tx_desc_set_l4_checksum_en(void *desc, + uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, IPV4_CHECKSUM_EN) |= + (HAL_TX_SM(TCL_DATA_CMD_3, UDP_OVER_IPV4_CHECKSUM_EN, en) | + HAL_TX_SM(TCL_DATA_CMD_3, UDP_OVER_IPV6_CHECKSUM_EN, en) | + HAL_TX_SM(TCL_DATA_CMD_3, TCP_OVER_IPV4_CHECKSUM_EN, en) | + HAL_TX_SM(TCL_DATA_CMD_3, TCP_OVER_IPV6_CHECKSUM_EN, en)); +} + +/** + * hal_tx_desc_set_l3_checksum_en - Set IPv4 checksum enable flag in + * Tx Descriptor for MSDU_buffer type + * @desc: Handle to Tx Descriptor + * @checksum_en_flags: ipv4 checksum enable flags + * + * Return: void + */ +static inline void hal_tx_desc_set_l3_checksum_en(void *desc, + uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, IPV4_CHECKSUM_EN) |= + HAL_TX_SM(TCL_DATA_CMD_3, IPV4_CHECKSUM_EN, en); +} + +/** + * hal_tx_desc_set_fw_metadata- Sets the metadata that is part of TCL descriptor + * @desc:Handle to Tx Descriptor + * @metadata: Metadata to be sent to Firmware + * + * Return: void + */ +static inline void hal_tx_desc_set_fw_metadata(void *desc, + uint16_t metadata) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, TCL_CMD_NUMBER) |= + HAL_TX_SM(TCL_DATA_CMD_2, TCL_CMD_NUMBER, metadata); +} + +/** + * hal_tx_desc_set_to_fw - Set To_FW bit in Tx Descriptor. + * @desc:Handle to Tx Descriptor + * @to_fw: if set, Forward packet to FW along with classification result + * + * Return: void + */ +static inline void hal_tx_desc_set_to_fw(void *desc, uint8_t to_fw) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, TO_FW) |= + HAL_TX_SM(TCL_DATA_CMD_3, TO_FW, to_fw); +} + +/** + * hal_tx_desc_set_mesh_en - Set mesh_enable flag in Tx descriptor + * @hal_soc_hdl: hal soc handle + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline void hal_tx_desc_set_mesh_en(hal_soc_handle_t hal_soc_hdl, + void *desc, uint8_t en) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_mesh_en(desc, en); +} + +/** + * hal_tx_desc_set_hlos_tid - Set the TID value (override DSCP/PCP fields in + * frame) to be used for Tx Frame + * @desc: Handle to Tx Descriptor + * @hlos_tid: HLOS TID + * + * Return: void + */ +static inline void hal_tx_desc_set_hlos_tid(void *desc, + uint8_t hlos_tid) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, HLOS_TID) |= + HAL_TX_SM(TCL_DATA_CMD_4, HLOS_TID, hlos_tid); + + HAL_SET_FLD(desc, TCL_DATA_CMD_4, HLOS_TID_OVERWRITE) |= + HAL_TX_SM(TCL_DATA_CMD_4, HLOS_TID_OVERWRITE, 1); +} +/** + * hal_tx_desc_sync - Commit the descriptor to Hardware + * @hal_tx_des_cached: Cached descriptor that software maintains + * @hw_desc: Hardware descriptor to be updated + */ +static inline void hal_tx_desc_sync(void *hal_tx_desc_cached, + void *hw_desc) +{ + qdf_mem_copy((hw_desc + sizeof(struct tlv_32_hdr)), + hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); +} + +/*--------------------------------------------------------------------------- + Tx MSDU Extension Descriptor accessor APIs + ---------------------------------------------------------------------------*/ +/** + * hal_tx_ext_desc_set_tso_enable() - Set TSO Enable Flag + * @desc: Handle to Tx MSDU Extension Descriptor + * @tso_en: bool value set to true if TSO is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tso_enable(void *desc, + uint8_t tso_en) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_0, TSO_ENABLE) |= + HAL_TX_SM(TX_MSDU_EXTENSION_0, TSO_ENABLE, tso_en); +} + +/** + * hal_tx_ext_desc_set_tso_flags() - Set TSO Flags + * @desc: Handle to Tx MSDU Extension Descriptor + * @falgs: 32-bit word with all TSO flags consolidated + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tso_flags(void *desc, + uint32_t tso_flags) +{ + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_0, TSO_ENABLE, 0) = + tso_flags; +} + +/** + * hal_tx_ext_desc_set_tcp_flags() - Enable HW Checksum offload + * @desc: Handle to Tx MSDU Extension Descriptor + * @tcp_flags: TCP flags {NS,CWR,ECE,URG,ACK,PSH, RST ,SYN,FIN} + * @mask: TCP flag mask. Tcp_flag is inserted into the header + * based on the mask, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tcp_flags(void *desc, + uint16_t tcp_flags, + uint16_t mask) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_0, TCP_FLAG) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_0, TCP_FLAG, tcp_flags)) | + (HAL_TX_SM(TX_MSDU_EXTENSION_0, TCP_FLAG_MASK, mask))); +} + +/** + * hal_tx_ext_desc_set_msdu_length() - Set L2 and IP Lengths + * @desc: Handle to Tx MSDU Extension Descriptor + * @l2_len: L2 length for the msdu, if tso is enabled + * @ip_len: IP length for the msdu, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_msdu_length(void *desc, + uint16_t l2_len, + uint16_t ip_len) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_1, L2_LENGTH) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_1, L2_LENGTH, l2_len)) | + (HAL_TX_SM(TX_MSDU_EXTENSION_1, IP_LENGTH, ip_len))); +} + +/** + * hal_tx_ext_desc_set_tcp_seq() - Set TCP Sequence number + * @desc: Handle to Tx MSDU Extension Descriptor + * @seq_num: Tcp_seq_number for the msdu, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tcp_seq(void *desc, + uint32_t seq_num) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_2, TCP_SEQ_NUMBER) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_2, TCP_SEQ_NUMBER, seq_num))); +} + + +/** + * hal_tx_ext_desc_set_ip_id() - Set IP Identification field + * @desc: Handle to Tx MSDU Extension Descriptor + * @id: IP Id field for the msdu, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_ip_id(void *desc, + uint16_t id) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_3, IP_IDENTIFICATION) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_3, IP_IDENTIFICATION, id))); +} +/** + * hal_tx_ext_desc_set_buffer() - Set Buffer Pointer and Length for a fragment + * @desc: Handle to Tx MSDU Extension Descriptor + * @frag_num: Fragment number (value can be 0 to 5) + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer(void *desc, + uint8_t frag_num, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_6, BUF0_PTR_31_0, + (frag_num << 3)) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_6, BUF0_PTR_31_0, paddr_lo))); + + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_7, BUF0_PTR_39_32, + (frag_num << 3)) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, BUF0_PTR_39_32, + (paddr_hi)))); + + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_7, BUF0_LEN, + (frag_num << 3)) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, BUF0_LEN, length))); +} + +/** + * hal_tx_ext_desc_set_buffer0_param() - Set Buffer 0 Pointer and Length + * @desc: Handle to Tx MSDU Extension Descriptor + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer 0 Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer0_param(void *desc, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_6, BUF0_PTR_31_0) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_6, BUF0_PTR_31_0, paddr_lo))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_7, BUF0_PTR_39_32) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, + BUF0_PTR_39_32, paddr_hi))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_7, BUF0_LEN) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, BUF0_LEN, length))); +} + +/** + * hal_tx_ext_desc_set_buffer1_param() - Set Buffer 1 Pointer and Length + * @desc: Handle to Tx MSDU Extension Descriptor + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer 1 Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer1_param(void *desc, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_8, BUF1_PTR_31_0) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_8, BUF1_PTR_31_0, paddr_lo))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_9, BUF1_PTR_39_32) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_9, + BUF1_PTR_39_32, paddr_hi))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_9, BUF1_LEN) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_9, BUF1_LEN, length))); +} + +/** + * hal_tx_ext_desc_set_buffer2_param() - Set Buffer 2 Pointer and Length + * @desc: Handle to Tx MSDU Extension Descriptor + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer 2 Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer2_param(void *desc, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_10, BUF2_PTR_31_0) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_10, BUF2_PTR_31_0, + paddr_lo))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_11, BUF2_PTR_39_32) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_11, BUF2_PTR_39_32, + paddr_hi))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_11, BUF2_LEN) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_11, BUF2_LEN, length))); +} + +/** + * hal_tx_ext_desc_sync - Commit the descriptor to Hardware + * @desc_cached: Cached descriptor that software maintains + * @hw_desc: Hardware descriptor to be updated + * + * Return: none + */ +static inline void hal_tx_ext_desc_sync(uint8_t *desc_cached, + uint8_t *hw_desc) +{ + qdf_mem_copy(&hw_desc[0], &desc_cached[0], + HAL_TX_EXT_DESC_WITH_META_DATA); +} + +/** + * hal_tx_ext_desc_get_tso_enable() - Set TSO Enable Flag + * @hal_tx_ext_desc: Handle to Tx MSDU Extension Descriptor + * + * Return: tso_enable value in the descriptor + */ +static inline uint32_t hal_tx_ext_desc_get_tso_enable(void *hal_tx_ext_desc) +{ + uint32_t *desc = (uint32_t *) hal_tx_ext_desc; + return (*desc & TX_MSDU_EXTENSION_0_TSO_ENABLE_MASK) >> + TX_MSDU_EXTENSION_0_TSO_ENABLE_LSB; +} + +/*--------------------------------------------------------------------------- + WBM Descriptor accessor APIs for Tx completions + ---------------------------------------------------------------------------*/ +/** + * hal_tx_comp_get_desc_id() - Get TX descriptor id within comp descriptor + * @hal_desc: completion ring descriptor pointer + * + * This function will tx descriptor id, cookie, within hardware completion + * descriptor + * + * Return: cookie + */ +static inline uint32_t hal_tx_comp_get_desc_id(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET); + + /* Cookie is placed on 2nd word */ + return (comp_desc & BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK) >> + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB; +} + +/** + * hal_tx_comp_get_paddr() - Get paddr within comp descriptor + * @hal_desc: completion ring descriptor pointer + * + * This function will get buffer physical address within hardware completion + * descriptor + * + * Return: Buffer physical address + */ +static inline qdf_dma_addr_t hal_tx_comp_get_paddr(void *hal_desc) +{ + uint32_t paddr_lo; + uint32_t paddr_hi; + + paddr_lo = *(uint32_t *) (((uint8_t *) hal_desc) + + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET); + + paddr_hi = *(uint32_t *) (((uint8_t *) hal_desc) + + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET); + + paddr_hi = (paddr_hi & BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK) >> + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB; + + return (qdf_dma_addr_t) (paddr_lo | (((uint64_t) paddr_hi) << 32)); +} + +/** + * hal_tx_comp_get_buffer_source() - Get buffer release source value + * @hal_desc: completion ring descriptor pointer + * + * This function will get buffer release source from Tx completion descriptor + * + * Return: buffer release source + */ +static inline uint32_t hal_tx_comp_get_buffer_source(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_MASK) >> + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_LSB; +} + +/** + * hal_tx_comp_get_buffer_type() - Buffer or Descriptor type + * @hal_desc: completion ring descriptor pointer + * + * This function will return the type of pointer - buffer or descriptor + * + * Return: buffer type + */ +static inline uint32_t hal_tx_comp_get_buffer_type(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_MASK) >> + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_LSB; +} + +/** + * hal_tx_comp_get_release_reason() - TQM Release reason + * @hal_desc: completion ring descriptor pointer + * + * This function will return the type of pointer - buffer or descriptor + * + * Return: buffer type + */ +static inline +uint8_t hal_tx_comp_get_release_reason(void *hal_desc, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_tx_comp_get_release_reason(hal_desc); +} + +/** + * hal_tx_comp_desc_sync() - collect hardware descriptor contents + * @hal_desc: hardware descriptor pointer + * @comp: software descriptor pointer + * @read_status: 0 - Do not read status words from descriptors + * 1 - Enable reading of status words from descriptor + * + * This function will collect hardware release ring element contents and + * translate to software descriptor content + * + * Return: none + */ + +static inline void hal_tx_comp_desc_sync(void *hw_desc, + struct hal_tx_desc_comp_s *comp, + bool read_status) +{ + if (!read_status) + qdf_mem_copy(comp, hw_desc, HAL_TX_COMPLETION_DESC_BASE_LEN); + else + qdf_mem_copy(comp, hw_desc, HAL_TX_COMPLETION_DESC_LEN_BYTES); +} + +/** + * hal_dump_comp_desc() - dump tx completion descriptor + * @hal_desc: hardware descriptor pointer + * + * This function will print tx completion descriptor + * + * Return: none + */ +static inline void hal_dump_comp_desc(void *hw_desc) +{ + struct hal_tx_desc_comp_s *comp = + (struct hal_tx_desc_comp_s *)hw_desc; + uint32_t i; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL, + "Current tx completion descriptor is"); + + for (i = 0; i < HAL_TX_COMPLETION_DESC_LEN_DWORDS; i++) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL, + "DWORD[i] = 0x%x", comp->desc[i]); + } +} + +/** + * hal_tx_comp_get_htt_desc() - Read the HTT portion of WBM Descriptor + * @hal_desc: Hardware (WBM) descriptor pointer + * @htt_desc: Software HTT descriptor pointer + * + * This function will read the HTT structure overlaid on WBM descriptor + * into a cached software descriptor + * + */ +static inline void hal_tx_comp_get_htt_desc(void *hw_desc, uint8_t *htt_desc) +{ + uint8_t *desc = hw_desc + HAL_TX_COMP_HTT_STATUS_OFFSET; + + qdf_mem_copy(htt_desc, desc, HAL_TX_COMP_HTT_STATUS_LEN); +} + +/** + * hal_tx_init_data_ring() - Initialize all the TCL Descriptors in SRNG + * @hal_soc_hdl: Handle to HAL SoC structure + * @hal_srng: Handle to HAL SRNG structure + * + * Return: none + */ +static inline void hal_tx_init_data_ring(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) +{ + uint8_t *desc_addr; + struct hal_srng_params srng_params; + uint32_t desc_size; + uint32_t num_desc; + + hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params); + + desc_addr = (uint8_t *)srng_params.ring_base_vaddr; + desc_size = sizeof(struct tcl_data_cmd); + num_desc = srng_params.num_entries; + + while (num_desc) { + HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, + desc_size); + desc_addr += (desc_size + sizeof(struct tlv_32_hdr)); + num_desc--; + } +} + +/** + * hal_tx_desc_set_dscp_tid_table_id() - Sets DSCP to TID conversion table ID + * @hal_soc: Handle to HAL SoC structure + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +static inline +void hal_tx_desc_set_dscp_tid_table_id(hal_soc_handle_t hal_soc_hdl, + void *desc, uint8_t id) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_dscp_tid_table_id(desc, id); +} + +/** + * hal_tx_set_dscp_tid_map_default() - Configure default DSCP to TID map table + * + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0,1 + * + * Return: void + */ +static inline void hal_tx_set_dscp_tid_map(hal_soc_handle_t hal_soc_hdl, + uint8_t *map, uint8_t id) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_set_dscp_tid_map(hal_soc, map, id); +} + +/** + * hal_tx_update_dscp_tid() - Update the dscp tid map table as updated by user + * + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static inline +void hal_tx_update_dscp_tid(hal_soc_handle_t hal_soc_hdl, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_update_dscp_tid(hal_soc, tid, id, dscp); +} + +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static inline void hal_tx_desc_set_lmac_id(hal_soc_handle_t hal_soc_hdl, + void *desc, uint8_t lmac_id) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_lmac_id(desc, lmac_id); +} + +/** + * hal_tx_desc_set_search_type - Set the search type value + * @desc: Handle to Tx Descriptor + * @search_type: search type + * 0 – Normal search + * 1 – Index based address search + * 2 – Index based flow search + * + * Return: void + */ +static inline void hal_tx_desc_set_search_type(hal_soc_handle_t hal_soc_hdl, + void *desc, uint8_t search_type) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_search_type(desc, search_type); +} + +/** + * hal_tx_desc_set_search_index - Set the search index value + * @desc: Handle to Tx Descriptor + * @search_index: The index that will be used for index based address or + * flow search. The field is valid when 'search_type' is + * 1 0r 2 + * + * Return: void + */ +static inline void hal_tx_desc_set_search_index(hal_soc_handle_t hal_soc_hdl, + void *desc, + uint32_t search_index) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_search_index(desc, search_index); +} + +/** + * hal_tx_desc_set_cache_set_num - Set the cache-set-num value + * @desc: Handle to Tx Descriptor + * @cache_num: Cache set number that should be used to cache the index + * based search results, for address and flow search. + * This value should be equal to LSB four bits of the hash value + * of match data, in case of search index points to an entry + * which may be used in content based search also. The value can + * be anything when the entry pointed by search index will not be + * used for content based search. + * + * Return: void + */ +static inline void hal_tx_desc_set_cache_set_num(hal_soc_handle_t hal_soc_hdl, + void *desc, + uint8_t cache_num) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_cache_set_num(desc, cache_num); +} + +/** + * hal_tx_comp_get_status() - TQM Release reason + * @hal_desc: completion ring Tx status + * + * This function will parse the WBM completion descriptor and populate in + * HAL structure + * + * Return: none + */ +static inline void hal_tx_comp_get_status(void *desc, void *ts, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_comp_get_status(desc, ts, hal_soc); +} + + +/** + * hal_tx_desc_set_buf_addr - Fill Buffer Address information in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @paddr: Physical Address + * @pool_id: Return Buffer Manager ID + * @desc_id: Descriptor ID + * @type: 0 - Address points to a MSDU buffer + * 1 - Address points to MSDU extension descriptor + * + * Return: void + */ +static inline +void hal_tx_desc_set_buf_addr(void *desc, dma_addr_t paddr, + uint8_t pool_id, uint32_t desc_id, + uint8_t type, hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_desc_set_buf_addr(desc, paddr, pool_id, + desc_id, type); + +} + +/** + * hal_tx_set_pcp_tid_map_default() - Configure default PCP to TID map table + * + * @soc: HAL SoC context + * @map: PCP-TID mapping table + * + * Return: void + */ +static inline void hal_tx_set_pcp_tid_map_default(hal_soc_handle_t hal_soc_hdl, + uint8_t *map) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_set_pcp_tid_map(hal_soc, map); +} + +/** + * hal_tx_update_pcp_tid_map() - Update PCP to TID map table + * + * @soc: HAL SoC context + * @pcp: pcp value + * @tid: tid no + * + * Return: void + */ +static inline void hal_tx_update_pcp_tid_map(hal_soc_handle_t hal_soc_hdl, + uint8_t pcp, uint8_t tid) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_update_pcp_tid_map(hal_soc, tid, tid); +} + +/** + * hal_tx_set_tidmap_prty() - Configure TIDmap priority + * + * @soc: HAL SoC context + * @val: priority value + * + * Return: void + */ +static inline +void hal_tx_set_tidmap_prty(hal_soc_handle_t hal_soc_hdl, uint8_t val) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_tx_set_tidmap_prty(hal_soc, val); +} + +/** + * hal_get_wbm_internal_error() - wbm internal error + * @hal_desc: completion ring descriptor pointer + * + * This function will return the type of pointer - buffer or descriptor + * + * Return: buffer type + */ +static inline +uint8_t hal_get_wbm_internal_error(hal_soc_handle_t hal_soc_hdl, void *hal_desc) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_get_wbm_internal_error(hal_desc); +} + +#endif /* HAL_TX_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_wbm.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_wbm.h new file mode 100644 index 0000000000000000000000000000000000000000..f78d1ac862df57618f199f2f49dffce6a6f9c09c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_wbm.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * hal_setup_link_idle_list - Setup scattered idle list using the + * buffer list provided + * + * @hal_soc: Opaque HAL SOC handle + * @scatter_bufs_base_paddr: Array of physical base addresses + * @scatter_bufs_base_vaddr: Array of virtual base addresses + * @num_scatter_bufs: Number of scatter buffers in the above lists + * @scatter_buf_size: Size of each scatter buffer + * @last_buf_end_offset: Offset to the last entry + * @num_entries: Total entries of all scatter bufs + * + */ +static void +hal_setup_link_idle_list_generic(struct hal_soc *soc, + qdf_dma_addr_t scatter_bufs_base_paddr[], + void *scatter_bufs_base_vaddr[], + uint32_t num_scatter_bufs, + uint32_t scatter_buf_size, + uint32_t last_buf_end_offset, + uint32_t num_entries) +{ + int i; + uint32_t *prev_buf_link_ptr = NULL; + uint32_t reg_scatter_buf_size, reg_tot_scatter_buf_size; + uint32_t val; + + /* Link the scatter buffers */ + for (i = 0; i < num_scatter_bufs; i++) { + if (i > 0) { + prev_buf_link_ptr[0] = + scatter_bufs_base_paddr[i] & 0xffffffff; + prev_buf_link_ptr[1] = HAL_SM( + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + BASE_ADDRESS_39_32, + ((uint64_t)(scatter_bufs_base_paddr[i]) + >> 32)) | HAL_SM( + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + ADDRESS_MATCH_TAG, + ADDRESS_MATCH_TAG_VAL); + } + prev_buf_link_ptr = (uint32_t *)(scatter_bufs_base_vaddr[i] + + scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE); + } + + /* TBD: Register programming partly based on MLD & the rest based on + * inputs from HW team. Not complete yet. + */ + + reg_scatter_buf_size = (scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)/64; + reg_tot_scatter_buf_size = ((scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) * num_scatter_bufs)/64; + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_IDLE_LIST_CONTROL_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_IDLE_LIST_CONTROL, SCATTER_BUFFER_SIZE, + reg_scatter_buf_size) | + HAL_SM(HWIO_WBM_R0_IDLE_LIST_CONTROL, LINK_DESC_IDLE_LIST_MODE, + 0x1)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_IDLE_LIST_SIZE_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_IDLE_LIST_SIZE, + SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, + reg_tot_scatter_buf_size)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[0] & 0xffffffff); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + ((uint64_t)(scatter_bufs_base_paddr[0]) >> 32) & + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_BASE_ADDRESS_39_32_BMSK); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + BASE_ADDRESS_39_32, ((uint64_t)(scatter_bufs_base_paddr[0]) + >> 32)) | + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + ADDRESS_MATCH_TAG, ADDRESS_MATCH_TAG_VAL)); + + /* ADDRESS_MATCH_TAG field in the above register is expected to match + * with the upper bits of link pointer. The above write sets this field + * to zero and we are also setting the upper bits of link pointers to + * zero while setting up the link list of scatter buffers above + */ + + /* Setup head and tail pointers for the idle list */ + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[num_scatter_bufs-1] & 0xffffffff); + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1, + BUFFER_ADDRESS_39_32, + ((uint64_t)(scatter_bufs_base_paddr[num_scatter_bufs-1]) + >> 32)) | + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1, + HEAD_POINTER_OFFSET, last_buf_end_offset >> 2)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[0] & 0xffffffff); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX0_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[0] & 0xffffffff); + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1, + BUFFER_ADDRESS_39_32, + ((uint64_t)(scatter_bufs_base_paddr[0]) >> + 32)) | HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1, + TAIL_POINTER_OFFSET, 0)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HP_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + 2*num_entries); + + /* Set RING_ID_DISABLE */ + val = HAL_SM(HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC, RING_ID_DISABLE, 1); + + /* + * SRNG_ENABLE bit is not available in HWK v1 (QCA8074v1). Hence + * check the presence of the bit before toggling it. + */ +#ifdef HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE_BMSK + val |= HAL_SM(HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC, SRNG_ENABLE, 1); +#endif + HAL_REG_WRITE(soc, + HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + val); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290.c new file mode 100644 index 0000000000000000000000000000000000000000..dc22c00a99e093bd5b9cf1a8a2dce3dd30af1356 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290.c @@ -0,0 +1,1541 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_types.h" +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB + +#include "hal_6290_tx.h" +#include "hal_6290_rx.h" +#include +#include + +/** + * hal_rx_get_rx_fragment_number_6290(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return (HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get: API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static inline uint8_t +hal_rx_msdu_end_da_is_mcbc_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_6290(): API to get_6290 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_6290(): API to get_6290 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static +uint16_t hal_rx_msdu_end_sa_idx_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_6290() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_6290(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_5, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_6290(): API to get_6290 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/* + * @ hal_rx_encryption_info_valid_6290: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static uint32_t hal_rx_encryption_info_valid_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * hal_rx_print_pn_6290: Prints the PN of rx packet. + * @buf: rx_tlv_hdr of the received packet + * + * Return: void + */ +static void hal_rx_print_pn_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msdu_get_6290: API to get first msdu status + * from rx_msdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t +hal_rx_msdu_end_first_msdu_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_6290: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_6290: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid_6290(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static bool hal_rx_get_mpdu_mac_ad4_valid_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_6290: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id: + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/* + * hal_rx_mpdu_get_to_ds_6290(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ + +static uint32_t hal_rx_mpdu_get_to_ds_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_6290(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_6290(): Retrieves mpdu frame + * control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_6290(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_6290(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_6290(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_6290(uint8_t *buf, + uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_6290(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_6290(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_6290(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_6290(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_6290(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_6290: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_6290: get tid based on qos control valid. + * @hal_soc_hdl: hal soc handle + * @ buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_6290(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_6290(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_6290(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_6290(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_0, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_6290 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_6290(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_rx_mpdu_start_mpdu_qos_control_valid_get_6290(): + * Retrieve qos control valid bit from the tlv. + * @buf: pointer to rx pkt TLV. + * + * Return: qos control value. + */ +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_msdu_end_sa_sw_peer_id_get_6290(): API to get the + * sa_sw_peer_id from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: sa_sw_peer_id index + */ +static inline uint32_t +hal_rx_msdu_end_sa_sw_peer_id_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} + +/** + * hal_tx_desc_set_mesh_en_6290 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_6290(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_4, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_6290(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_6290(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_6290(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_6290(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_6290(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_6290(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_6290(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_6290(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_6290(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_6290(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static +void hal_reo_config_6290(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_6290() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_6290(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_6290 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_6290(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_6290: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_6290: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_6290: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_6290: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_6290: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce_metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_flow_params_6290: API to get flow index, flow index invalid + * and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params_6290(uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); + *flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); + *flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_6290() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_6290(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_6290(): Function to retrieve rx sequence number + * @nbuf: Network buffer + * + * Return: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +/** + * hal_get_window_address_6290(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_6290(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return addr; +} + +struct hal_hw_txrx_ops qca6290_hal_hw_txrx_ops = { + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_6290, + NULL, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_6290, + hal_tx_set_dscp_tid_map_6290, + hal_tx_update_dscp_tid_6290, + hal_tx_desc_set_lmac_id_6290, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_6290, + /* rx */ + hal_rx_msdu_start_nss_get_6290, + hal_rx_mon_hw_desc_get_mpdu_status_6290, + hal_rx_get_tlv_6290, + hal_rx_proc_phyrx_other_receive_info_tlv_6290, + hal_rx_dump_msdu_start_tlv_6290, + hal_rx_dump_msdu_end_tlv_6290, + hal_get_link_desc_size_6290, + hal_rx_mpdu_start_tid_get_6290, + hal_rx_msdu_start_reception_type_get_6290, + hal_rx_msdu_end_da_idx_get_6290, + hal_rx_msdu_desc_info_get_ptr_6290, + hal_rx_link_desc_msdu0_ptr_6290, + hal_reo_status_get_header_6290, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_6290, + hal_rx_msdu_end_da_is_mcbc_get_6290, + hal_rx_msdu_end_sa_is_valid_get_6290, + hal_rx_msdu_end_sa_idx_get_6290, + hal_rx_desc_is_first_msdu_6290, + hal_rx_msdu_end_l3_hdr_padding_get_6290, + hal_rx_encryption_info_valid_6290, + hal_rx_print_pn_6290, + hal_rx_msdu_end_first_msdu_get_6290, + hal_rx_msdu_end_da_is_valid_get_6290, + hal_rx_msdu_end_last_msdu_get_6290, + hal_rx_get_mpdu_mac_ad4_valid_6290, + hal_rx_mpdu_start_sw_peer_id_get_6290, + hal_rx_mpdu_get_to_ds_6290, + hal_rx_mpdu_get_fr_ds_6290, + hal_rx_get_mpdu_frame_control_valid_6290, + hal_rx_mpdu_get_addr1_6290, + hal_rx_mpdu_get_addr2_6290, + hal_rx_mpdu_get_addr3_6290, + hal_rx_mpdu_get_addr4_6290, + hal_rx_get_mpdu_sequence_control_valid_6290, + hal_rx_is_unicast_6290, + hal_rx_tid_get_6290, + hal_rx_hw_desc_get_ppduid_get_6290, + hal_rx_mpdu_start_mpdu_qos_control_valid_get_6290, + hal_rx_msdu_end_sa_sw_peer_id_get_6290, + hal_rx_msdu0_buffer_addr_lsb_6290, + hal_rx_msdu_desc_info_ptr_get_6290, + hal_ent_mpdu_desc_info_6290, + hal_dst_mpdu_desc_info_6290, + hal_rx_get_fc_valid_6290, + hal_rx_get_to_ds_flag_6290, + hal_rx_get_mac_addr2_valid_6290, + hal_rx_get_filter_category_6290, + hal_rx_get_ppdu_id_6290, + hal_reo_config_6290, + hal_rx_msdu_flow_idx_get_6290, + hal_rx_msdu_flow_idx_invalid_6290, + hal_rx_msdu_flow_idx_timeout_6290, + hal_rx_msdu_fse_metadata_get_6290, + hal_rx_msdu_cce_metadata_get_6290, + hal_rx_msdu_get_flow_params_6290, + hal_rx_tlv_get_tcp_chksum_6290, + hal_rx_get_rx_sequence_6290, + NULL, + NULL, + /* rx - msdu end fast path info fields */ + hal_rx_msdu_packet_metadata_get_generic, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, +}; + +struct hal_hw_srng_config hw_srng_table_6290[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + .max_rings = 1, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qca6290[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + +/** + * hal_qca6290_attach() - Attach 6290 target specific hal_soc ops, + * offset and srng table + */ +void hal_qca6290_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_6290; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qca6290; + hal_soc->ops = &qca6290_hal_hw_txrx_ops; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..9417cf728f59706fb1ddd4c4e7e25abc64e30bbc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290_rx.h @@ -0,0 +1,784 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "rx_msdu_start.h" +#include "tlv_tag_def.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" +#include "phyrx_other_receive_info_ru_details.h" + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_5_DA_IS_MCBC_MASK, \ + RX_MSDU_END_5_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_SA_IS_VALID_MASK, \ + RX_MSDU_END_5_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_SA_IDX_OFFSET)), \ + RX_MSDU_END_13_SA_IDX_MASK, \ + RX_MSDU_END_13_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_5_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_5_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_4_PN_31_0_MASK, \ + RX_MPDU_INFO_4_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_5_PN_63_32_MASK, \ + RX_MPDU_INFO_5_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_6_PN_95_64_MASK, \ + RX_MPDU_INFO_6_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_7_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_7_PN_127_96_MASK, \ + RX_MPDU_INFO_7_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_5_FIRST_MSDU_MASK, \ + RX_MSDU_END_5_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_DA_IS_VALID_MASK, \ + RX_MSDU_END_5_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_5_LAST_MSDU_MASK, \ + RX_MSDU_END_5_LAST_MSDU_LSB)) + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_1_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_1_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_1_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_TO_DS_OFFSET)), \ + RX_MPDU_INFO_2_TO_DS_MASK, \ + RX_MPDU_INFO_2_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FR_DS_OFFSET)), \ + RX_MPDU_INFO_2_FR_DS_MASK, \ + RX_MPDU_INFO_2_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_16_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_16_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + (uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + (uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + (uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + (uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + (reg_val) &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_FRAGMENT_DEST_RING_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK);\ + (reg_val) |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring) | \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR(msdu_details_ptr, \ +UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_14_FLOW_IDX_MASK, \ + RX_MSDU_END_14_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_15_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_15_FSE_METADATA_MASK, \ + RX_MSDU_END_15_FSE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_16_CCE_METADATA_MASK, \ + RX_MSDU_END_16_CCE_METADATA_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_LSB)) + +#if defined(QCA_WIFI_QCA6290_11AX) +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start)\ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +/* + * hal_rx_msdu_start_nss_get_6290(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t +hal_rx_msdu_start_nss_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); +} +#else +static uint32_t +hal_rx_msdu_start_nss_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t nss; + + nss = HAL_RX_MSDU_START_NSS_GET(msdu_start); + return nss; +} +#endif + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_6290(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_6290(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; +#if !defined(QCA_WIFI_QCA6290_11AX) + rs->nr_ant = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, NSS); +#endif + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) + +static uint32_t hal_get_link_desc_size_6290(void) +{ + return LINK_DESC_SIZE; +} + + +#ifdef QCA_WIFI_QCA6290_11AX +/* + * hal_rx_get_tlv_6290(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_6290(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +} +#else +static uint8_t hal_rx_get_tlv_6290(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_35, RECEIVE_BANDWIDTH); +} +#endif + +#ifdef QCA_WIFI_QCA6290_11AX +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_6290() + * - process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static +void hal_rx_proc_phyrx_other_receive_info_tlv_6290(void *rx_tlv_hdr, + void *ppdu_info_handle) +{ + uint32_t tlv_tag, tlv_len; + uint32_t temp_len, other_tlv_len, other_tlv_tag; + void *rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + void *other_tlv_hdr = NULL; + void *other_tlv = NULL; + uint32_t ru_details_channel_0; + struct hal_rx_ppdu_info *ppdu_info = + (struct hal_rx_ppdu_info *)ppdu_info_handle; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + temp_len = 0; + + other_tlv_hdr = rx_tlv + HAL_RX_TLV32_HDR_SIZE; + + other_tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(other_tlv_hdr); + other_tlv_len = HAL_RX_GET_USER_TLV32_LEN(other_tlv_hdr); + temp_len += other_tlv_len; + other_tlv = other_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + + switch (other_tlv_tag) { + case WIFIPHYRX_OTHER_RECEIVE_INFO_RU_DETAILS_E: + ru_details_channel_0 = + HAL_RX_GET(other_tlv, + PHYRX_OTHER_RECEIVE_INFO_RU_DETAILS_0, + RU_DETAILS_CHANNEL_0); + + qdf_mem_copy(ppdu_info->rx_status.he_RU, + &ru_details_channel_0, + sizeof(ppdu_info->rx_status.he_RU)); + + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_20) { + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0; + } + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_40) { + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU1; + } + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_80) { + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU2; + } + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_160) { + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU3; + } + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s unhandled TLV type: %d, TLV len:%d", + __func__, other_tlv_tag, other_tlv_len); + break; + } +} +#else +static +void hal_rx_proc_phyrx_other_receive_info_tlv_6290(void *rx_tlv_hdr, + void *ppdu_info_handle) +{ +} +#endif /* QCA_WIFI_QCA6290_11AX */ + +/** + * hal_rx_dump_msdu_start_tlv_6290() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_6290(void *msdustart, + uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_start tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "msdu_length: %d " + "ipsec_esp: %d " + "l3_offset: %d " + "ipsec_ah: %d " + "l4_offset: %d " + "msdu_number: %d " + "decap_format: %d " + "ipv4_proto: %d " + "ipv6_proto: %d " + "tcp_proto: %d " + "udp_proto: %d " + "ip_frag: %d " + "tcp_only_ack: %d " + "da_is_bcast_mcast: %d " + "ip4_protocol_ip6_next_header: %d " + "toeplitz_hash_2_or_4: %d " + "flow_id_toeplitz: %d " + "user_rssi: %d " + "pkt_type: %d " + "stbc: %d " + "sgi: %d " + "rate_mcs: %d " + "receive_bandwidth: %d " + "reception_type: %d " +#if !defined(QCA_WIFI_QCA6290_11AX) + "toeplitz_hash: %d " + "nss: %d " +#endif + "ppdu_start_timestamp: %d " + "sw_phy_meta_data: %d ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, +#if !defined(QCA_WIFI_QCA6290_11AX) + msdu_start->toeplitz_hash, + msdu_start->nss, +#endif + msdu_start->ppdu_start_timestamp, + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_6290: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_6290(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "ip_hdr_chksum: %d " + "tcp_udp_chksum: %d " + "key_id_octet: %d " + "cce_super_rule: %d " + "cce_classify_not_done_truncat: %d " + "cce_classify_not_done_cce_dis: %d " + "ext_wapi_pn_63_48: %d " + "ext_wapi_pn_95_64: %d " + "ext_wapi_pn_127_96: %d " + "reported_mpdu_length: %d " + "first_msdu: %d " + "last_msdu: %d " + "sa_idx_timeout: %d " + "da_idx_timeout: %d " + "msdu_limit_error: %d " + "flow_idx_timeout: %d " + "flow_idx_invalid: %d " + "wifi_parser_error: %d " + "amsdu_parser_error: %d " + "sa_is_valid: %d " + "da_is_valid: %d " + "da_is_mcbc: %d " + "l3_header_padding: %d " + "ipv6_options_crc: %d " + "tcp_seq_number: %d " + "tcp_ack_number: %d " + "tcp_flag: %d " + "lro_eligible: %d " + "window_size: %d " + "da_offset: %d " + "sa_offset: %d " + "da_offset_valid: %d " + "sa_offset_valid: %d " + "rule_indication_31_0: %d " + "rule_indication_63_32: %d " + "sa_idx: %d " + "da_idx: %d " + "msdu_drop: %d " + "reo_destination_indication: %d " + "flow_idx: %d " + "fse_metadata: %d " + "cce_metadata: %d " + "sa_sw_peer_id: %d ", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->ext_wapi_pn_63_48, + msdu_end->ext_wapi_pn_95_64, + msdu_end->ext_wapi_pn_127_96, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error, + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->da_idx, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_3_TID_OFFSET)), \ + RX_MPDU_INFO_3_TID_MASK, \ + RX_MPDU_INFO_3_TID_LSB)) + +static uint32_t hal_rx_mpdu_start_tid_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static uint32_t hal_rx_msdu_start_reception_type_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_DA_IDX_OFFSET)), \ + RX_MSDU_END_13_DA_IDX_MASK, \ + RX_MSDU_END_13_DA_IDX_LSB)) + +/** + * hal_rx_msdu_end_da_idx_get_6290: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_6290(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..2d4cc75acc8d1aa8081d3a0f5f2bb072bf5b3443 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6290/hal_6290_tx.h @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_6290() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +#if defined(QCA_WIFI_QCA6290_11AX) +static void hal_tx_desc_set_dscp_tid_table_id_6290(void *desc, + uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM, id); +} +#else +static void hal_tx_desc_set_dscp_tid_table_id_6290(void *desc, + uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, + DSCP_TO_TID_PRIORITY_TABLE_ID) |= + HAL_TX_SM(TCL_DATA_CMD_3, + DSCP_TO_TID_PRIORITY_TABLE_ID, id); +} +#endif + + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE / 4) + +#if defined(QCA_WIFI_QCA6290_11AX) +/** + * hal_tx_set_dscp_tid_map_6290() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0-31 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in any of the 32 DSCP_TID_MAPS (id = 0-31). + * + * Return: none + */ +static void hal_tx_set_dscp_tid_map_6290(struct hal_soc *soc, + uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + if (id >= HAL_MAX_HW_DSCP_TID_MAPS_11AX) + return; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i + 1] << 0x3) | + (map[i + 2] << 0x6) | + (map[i + 3] << 0x9) | + (map[i + 4] << 0xc) | + (map[i + 5] << 0xf) | + (map[i + 6] << 0x12) | + (map[i + 7] << 0x15)); + + qdf_mem_copy(&val[cnt], &value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} +#else +static void hal_tx_set_dscp_tid_map_6290(struct hal_soc *soc, + uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr; + uint32_t value; + + if (id == HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT) { + addr = + HWIO_TCL_R0_DSCP_TID1_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + } else { + addr = + HWIO_TCL_R0_DSCP_TID2_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + } + + for (i = 0; i < 64; i += 10) { + value = (map[i] | + (map[i+1] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_1_SHFT) | + (map[i+2] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_2_SHFT) | + (map[i+3] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_3_SHFT) | + (map[i+4] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_4_SHFT) | + (map[i+5] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_5_SHFT) | + (map[i+6] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_6_SHFT) | + (map[i+7] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_7_SHFT) | + (map[i+8] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_8_SHFT) | + (map[i+9] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_9_SHFT)); + + HAL_REG_WRITE(soc, addr, + (value & HWIO_TCL_R0_DSCP_TID1_MAP_1_RMSK)); + + addr += 4; + } +} +#endif + +#ifdef QCA_WIFI_QCA6290_11AX +/** + * hal_tx_update_dscp_tid_6290() - Update the dscp tid map table as updated + * by the user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static void hal_tx_update_dscp_tid_6290(struct hal_soc *soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, id); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp / HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); +} +#else +static void hal_tx_update_dscp_tid_6290(struct hal_soc *soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + + if (id == HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT) + addr = + HWIO_TCL_R0_DSCP_TID1_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + else + addr = + HWIO_TCL_R0_DSCP_TID2_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp/HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + /* Read back previous DSCP TID config and update + * with new config. + */ + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID1_MAP_1_RMSK)); +} +#endif + +#ifdef QCA_WIFI_QCA6290_11AX +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_6290(void *desc, uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} +#else +static void hal_tx_desc_set_lmac_id_6290(void *desc, uint8_t lmac_id) +{ +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390.c new file mode 100644 index 0000000000000000000000000000000000000000..af5479b2c0195e0fe240100e2643f85a3b1d01fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390.c @@ -0,0 +1,1580 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_types.h" +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB + +#include "hal_6390_tx.h" +#include "hal_6390_rx.h" +#include +#include + +/** + * hal_rx_get_rx_fragment_number_6390(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return (HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get_6390(): API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static uint8_t +hal_rx_msdu_end_da_is_mcbc_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_6390(): API to get_6390 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_6390(): API to get_6390 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static +uint16_t hal_rx_msdu_end_sa_idx_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_6390() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_6390(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_5, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_6390(): API to get_6390 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/* + * @ hal_rx_encryption_info_valid_6390: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static uint32_t hal_rx_encryption_info_valid_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn_6390: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static void hal_rx_print_pn_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msduget_6390: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t hal_rx_msdu_end_first_msdu_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_6390: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_6390: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid_6390(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static bool hal_rx_get_mpdu_mac_ad4_valid_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_6390: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/* + * hal_rx_mpdu_get_to_ds_6390(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ +static uint32_t hal_rx_mpdu_get_to_ds_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_6390(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_6390(): Retrieves mpdu + * frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_6390(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_6390(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_6390(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_6390(uint8_t *buf, + uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_6390(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_6390(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_6390(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_6390(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_6390(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_6390: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_6390: get tid based on qos control valid. + * @hal_soc_hdl: hal soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_6390(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_6390(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_6390(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_6390(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_0, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_6390 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_6390(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_rx_mpdu_start_mpdu_qos_control_valid_get_6390(): + * Retrieve qos control valid bit from the tlv. + * @buf: pointer to rx pkt TLV. + * + * Return: qos control value. + */ +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_msdu_end_sa_sw_peer_id_get_6390(): API to get the + * sa_sw_peer_id from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: sa_sw_peer_id index + */ +static inline uint32_t +hal_rx_msdu_end_sa_sw_peer_id_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} + +/** + * hal_tx_desc_set_mesh_en_6390 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_6390(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_4, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_6390(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_6390(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_6390(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_6390(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_6390(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_6390(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_6390(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_6390(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_6390(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_6390(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static +void hal_reo_config_6390(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_6390() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_6390(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_6390 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_6390(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_6390: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_6390: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_6390: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_6390: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_6390: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_flow_params_6390: API to get flow index, flow index invalid + * and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params_6390(uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); + *flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); + *flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_6390() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_6390(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_6390(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +/** + * hal_get_window_address_6390(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_6390(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return addr; +} + +/** + * hal_reo_set_err_dst_remap_6390(): Function to set REO error destination + * ring remap register + * @hal_soc: Pointer to hal_soc + * + * Return: none. + */ +static void +hal_reo_set_err_dst_remap_6390(void *hal_soc) +{ + /* + * Set REO error 2k jump (error code 5) / OOR (error code 7) + * frame routed to REO2TCL ring. + */ + uint32_t dst_remap_ix0 = + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 0) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 1) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 2) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 3) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 4) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 5) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 6) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 7) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 8) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 9); + + HAL_REG_WRITE(hal_soc, + HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + dst_remap_ix0); + + hal_info("HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0 0x%x", + HAL_REG_READ( + hal_soc, + HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); +} + +struct hal_hw_txrx_ops qca6390_hal_hw_txrx_ops = { + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_6390, + hal_reo_set_err_dst_remap_6390, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_6390, + hal_tx_set_dscp_tid_map_6390, + hal_tx_update_dscp_tid_6390, + hal_tx_desc_set_lmac_id_6390, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_6390, + /* rx */ + hal_rx_msdu_start_nss_get_6390, + hal_rx_mon_hw_desc_get_mpdu_status_6390, + hal_rx_get_tlv_6390, + hal_rx_proc_phyrx_other_receive_info_tlv_6390, + hal_rx_dump_msdu_start_tlv_6390, + hal_rx_dump_msdu_end_tlv_6390, + hal_get_link_desc_size_6390, + hal_rx_mpdu_start_tid_get_6390, + hal_rx_msdu_start_reception_type_get_6390, + hal_rx_msdu_end_da_idx_get_6390, + hal_rx_msdu_desc_info_get_ptr_6390, + hal_rx_link_desc_msdu0_ptr_6390, + hal_reo_status_get_header_6390, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_6390, + hal_rx_msdu_end_da_is_mcbc_get_6390, + hal_rx_msdu_end_sa_is_valid_get_6390, + hal_rx_msdu_end_sa_idx_get_6390, + hal_rx_desc_is_first_msdu_6390, + hal_rx_msdu_end_l3_hdr_padding_get_6390, + hal_rx_encryption_info_valid_6390, + hal_rx_print_pn_6390, + hal_rx_msdu_end_first_msdu_get_6390, + hal_rx_msdu_end_da_is_valid_get_6390, + hal_rx_msdu_end_last_msdu_get_6390, + hal_rx_get_mpdu_mac_ad4_valid_6390, + hal_rx_mpdu_start_sw_peer_id_get_6390, + hal_rx_mpdu_get_to_ds_6390, + hal_rx_mpdu_get_fr_ds_6390, + hal_rx_get_mpdu_frame_control_valid_6390, + hal_rx_mpdu_get_addr1_6390, + hal_rx_mpdu_get_addr2_6390, + hal_rx_mpdu_get_addr3_6390, + hal_rx_mpdu_get_addr4_6390, + hal_rx_get_mpdu_sequence_control_valid_6390, + hal_rx_is_unicast_6390, + hal_rx_tid_get_6390, + hal_rx_hw_desc_get_ppduid_get_6390, + hal_rx_mpdu_start_mpdu_qos_control_valid_get_6390, + hal_rx_msdu_end_sa_sw_peer_id_get_6390, + hal_rx_msdu0_buffer_addr_lsb_6390, + hal_rx_msdu_desc_info_ptr_get_6390, + hal_ent_mpdu_desc_info_6390, + hal_dst_mpdu_desc_info_6390, + hal_rx_get_fc_valid_6390, + hal_rx_get_to_ds_flag_6390, + hal_rx_get_mac_addr2_valid_6390, + hal_rx_get_filter_category_6390, + hal_rx_get_ppdu_id_6390, + hal_reo_config_6390, + hal_rx_msdu_flow_idx_get_6390, + hal_rx_msdu_flow_idx_invalid_6390, + hal_rx_msdu_flow_idx_timeout_6390, + hal_rx_msdu_fse_metadata_get_6390, + hal_rx_msdu_cce_metadata_get_6390, + hal_rx_msdu_get_flow_params_6390, + hal_rx_tlv_get_tcp_chksum_6390, + hal_rx_get_rx_sequence_6390, + NULL, + NULL, + /* rx - msdu end fast path info fields */ + hal_rx_msdu_packet_metadata_get_generic, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL +}; + +struct hal_hw_srng_config hw_srng_table_6390[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + .max_rings = 1, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qca6390[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + +/** + * hal_qca6390_attach() - Attach 6390 target specific hal_soc ops, + * offset and srng table + */ +void hal_qca6390_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_6390; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qca6390; + hal_soc->ops = &qca6390_hal_hw_txrx_ops; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..095b9f8de03b820cad9e29bbf7d5dd900d870599 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390_rx.h @@ -0,0 +1,731 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "rx_msdu_start.h" +#include "tlv_tag_def.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" +#include "phyrx_other_receive_info_ru_details.h" + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_5_DA_IS_MCBC_MASK, \ + RX_MSDU_END_5_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_SA_IS_VALID_MASK, \ + RX_MSDU_END_5_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_SA_IDX_OFFSET)), \ + RX_MSDU_END_13_SA_IDX_MASK, \ + RX_MSDU_END_13_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_5_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_5_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_4_PN_31_0_MASK, \ + RX_MPDU_INFO_4_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_5_PN_63_32_MASK, \ + RX_MPDU_INFO_5_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_6_PN_95_64_MASK, \ + RX_MPDU_INFO_6_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_7_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_7_PN_127_96_MASK, \ + RX_MPDU_INFO_7_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_5_FIRST_MSDU_MASK, \ + RX_MSDU_END_5_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_DA_IS_VALID_MASK, \ + RX_MSDU_END_5_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_5_LAST_MSDU_MASK, \ + RX_MSDU_END_5_LAST_MSDU_LSB)) + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_1_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_1_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_1_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_TO_DS_OFFSET)), \ + RX_MPDU_INFO_2_TO_DS_MASK, \ + RX_MPDU_INFO_2_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FR_DS_OFFSET)), \ + RX_MPDU_INFO_2_FR_DS_MASK, \ + RX_MPDU_INFO_2_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start)\ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_16_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_16_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + (uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + (uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + (uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + (uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + reg_val &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_FRAGMENT_DEST_RING_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK | \ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring) | \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + reg_val = \ + HAL_REG_READ((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET)); \ + reg_val &= \ + (~HWIO_REO_R0_GENERAL_ENABLE_BAR_DEST_RING_BMSK |\ + (REO_REMAP_TCL << HWIO_REO_R0_GENERAL_ENABLE_BAR_DEST_RING_SHFT)); \ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR(msdu_details_ptr, \ +UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_14_FLOW_IDX_MASK, \ + RX_MSDU_END_14_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_15_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_15_FSE_METADATA_MASK, \ + RX_MSDU_END_15_FSE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_16_CCE_METADATA_MASK, \ + RX_MSDU_END_16_CCE_METADATA_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_LSB)) +/* + * hal_rx_msdu_start_nss_get_6390(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t +hal_rx_msdu_start_nss_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); + +} + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_6390(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_6390(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) + +static uint32_t hal_get_link_desc_size_6390(void) +{ + return LINK_DESC_SIZE; +} + +/* + * hal_rx_get_tlv_6390(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_6390(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +} + +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_6390() + * - process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static +void hal_rx_proc_phyrx_other_receive_info_tlv_6390(void *rx_tlv_hdr, + void *ppdu_info_handle) +{ + uint32_t tlv_tag, tlv_len; + uint32_t temp_len, other_tlv_len, other_tlv_tag; + void *rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + void *other_tlv_hdr = NULL; + void *other_tlv = NULL; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + temp_len = 0; + + other_tlv_hdr = rx_tlv + HAL_RX_TLV32_HDR_SIZE; + + other_tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(other_tlv_hdr); + other_tlv_len = HAL_RX_GET_USER_TLV32_LEN(other_tlv_hdr); + temp_len += other_tlv_len; + other_tlv = other_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + + switch (other_tlv_tag) { + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s unhandled TLV type: %d, TLV len:%d", + __func__, other_tlv_tag, other_tlv_len); + break; + } +} + +/** + * hal_rx_dump_msdu_start_tlv_6390() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_6390(void *msdustart, uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + hal_verbose_debug( + "rx_msdu_start tlv (1/2) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "msdu_length: %x " + "ipsec_esp: %x " + "l3_offset: %x " + "ipsec_ah: %x " + "l4_offset: %x " + "msdu_number: %x " + "decap_format: %x " + "ipv4_proto: %x " + "ipv6_proto: %x " + "tcp_proto: %x " + "udp_proto: %x " + "ip_frag: %x " + "tcp_only_ack: %x " + "da_is_bcast_mcast: %x " + "ip4_protocol_ip6_next_header: %x " + "toeplitz_hash_2_or_4: %x " + "flow_id_toeplitz: %x " + "user_rssi: %x " + "pkt_type: %x " + "stbc: %x " + "sgi: %x " + "rate_mcs: %x " + "receive_bandwidth: %x " + "reception_type: %x " + "ppdu_start_timestamp: %u ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, + msdu_start->ppdu_start_timestamp); + + hal_verbose_debug( + "rx_msdu_start tlv (2/2) - " + "sw_phy_meta_data: %x ", + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_6390: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_6390(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (1/2) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "ip_hdr_chksum: %x " + "tcp_udp_chksum: %x " + "key_id_octet: %x " + "cce_super_rule: %x " + "cce_classify_not_done_truncat: %x " + "cce_classify_not_done_cce_dis: %x " + "ext_wapi_pn_63_48: %x " + "ext_wapi_pn_95_64: %x " + "ext_wapi_pn_127_96: %x " + "reported_mpdu_length: %x " + "first_msdu: %x " + "last_msdu: %x " + "sa_idx_timeout: %x " + "da_idx_timeout: %x " + "msdu_limit_error: %x " + "flow_idx_timeout: %x " + "flow_idx_invalid: %x " + "wifi_parser_error: %x " + "amsdu_parser_error: %x", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->ext_wapi_pn_63_48, + msdu_end->ext_wapi_pn_95_64, + msdu_end->ext_wapi_pn_127_96, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error); + + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (2/2)- " + "sa_is_valid: %x " + "da_is_valid: %x " + "da_is_mcbc: %x " + "l3_header_padding: %x " + "ipv6_options_crc: %x " + "tcp_seq_number: %x " + "tcp_ack_number: %x " + "tcp_flag: %x " + "lro_eligible: %x " + "window_size: %x " + "da_offset: %x " + "sa_offset: %x " + "da_offset_valid: %x " + "sa_offset_valid: %x " + "rule_indication_31_0: %x " + "rule_indication_63_32: %x " + "sa_idx: %x " + "da_idx: %x " + "msdu_drop: %x " + "reo_destination_indication: %x " + "flow_idx: %x " + "fse_metadata: %x " + "cce_metadata: %x " + "sa_sw_peer_id: %x ", + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->da_idx_or_sw_peer_id, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_3_TID_OFFSET)), \ + RX_MPDU_INFO_3_TID_MASK, \ + RX_MPDU_INFO_3_TID_LSB)) + +static uint32_t hal_rx_mpdu_start_tid_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static +uint32_t hal_rx_msdu_start_reception_type_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_MASK, \ + RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_LSB)) + + /** + * hal_rx_msdu_end_da_idx_get_6390: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_6390(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd25873247bbfd39c847025dba587727c94fb75 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6390/hal_6390_tx.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_6390() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +static void hal_tx_desc_set_dscp_tid_table_id_6390(void *desc, uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM, id); +} + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE / 4) + +/** + * hal_tx_set_dscp_tid_map_6390() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0-31 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in any of the 32 DSCP_TID_MAPS (id = 0-31). + * + * Return: none + */ +static void hal_tx_set_dscp_tid_map_6390(struct hal_soc *soc, uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + if (id >= HAL_MAX_HW_DSCP_TID_MAPS_11AX) + return; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i + 1] << 0x3) | + (map[i + 2] << 0x6) | + (map[i + 3] << 0x9) | + (map[i + 4] << 0xc) | + (map[i + 5] << 0xf) | + (map[i + 6] << 0x12) | + (map[i + 7] << 0x15)); + + qdf_mem_copy(&val[cnt], &value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_update_dscp_tid_6390() - Update the dscp tid map table as updated + * by the user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static void hal_tx_update_dscp_tid_6390(struct hal_soc *soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, id); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp / HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); +} + +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_6390(void *desc, uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490.c new file mode 100644 index 0000000000000000000000000000000000000000..6124a42d9dfd725cf83a94f897118700596ea3eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490.c @@ -0,0 +1,2054 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_types.h" +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_PHYRX_L_SIG_B_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_PHYRX_L_SIG_B_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_PHYRX_L_SIG_A_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_PHYRX_VHT_SIG_A_INFO_DETAILS_BANDWIDTH_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_PHYRX_HE_SIG_A_SU_INFO_DETAILS_FORMAT_INDICATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_DL_UL_FLAG_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_RU_ALLOCATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_STA_ID_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_STA_ID_OFFSET + +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_PREAMBLE_RSSI_INFO_DETAILS_RSSI_PRI20_CHAIN0_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_0_RX_MPDU_INFO_DETAILS_RXPT_CLASSIFY_INFO_DETAILS_REO_DESTINATION_INDICATION_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUF_ADDR_INFO_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUF_ADDR_INFO_BUFFER_ADDR_39_32_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_LSB + +#include "hal_6490_tx.h" +#include "hal_6490_rx.h" +#include +#include + +/* + * hal_rx_msdu_start_nss_get_6490(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t +hal_rx_msdu_start_nss_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); +} + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_6490(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_6490(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) + +static uint32_t hal_get_link_desc_size_6490(void) +{ + return LINK_DESC_SIZE; +} + +/* + * hal_rx_get_tlv_6490(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_6490(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +} + +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_6490() + * - process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static +void hal_rx_proc_phyrx_other_receive_info_tlv_6490(void *rx_tlv_hdr, + void *ppdu_info_handle) +{ + uint32_t tlv_tag, tlv_len; + uint32_t temp_len, other_tlv_len, other_tlv_tag; + void *rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + void *other_tlv_hdr = NULL; + void *other_tlv = NULL; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + temp_len = 0; + + other_tlv_hdr = rx_tlv + HAL_RX_TLV32_HDR_SIZE; + + other_tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(other_tlv_hdr); + other_tlv_len = HAL_RX_GET_USER_TLV32_LEN(other_tlv_hdr); + temp_len += other_tlv_len; + other_tlv = other_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + + switch (other_tlv_tag) { + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s unhandled TLV type: %d, TLV len:%d", + __func__, other_tlv_tag, other_tlv_len); + break; + } +} + +/** + * hal_rx_dump_msdu_start_tlv_6490() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_6490(void *msdustart, uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_start tlv (1/2) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "msdu_length: %x " + "ipsec_esp: %x " + "l3_offset: %x " + "ipsec_ah: %x " + "l4_offset: %x " + "msdu_number: %x " + "decap_format: %x " + "ipv4_proto: %x " + "ipv6_proto: %x " + "tcp_proto: %x " + "udp_proto: %x " + "ip_frag: %x " + "tcp_only_ack: %x " + "da_is_bcast_mcast: %x " + "ip4_protocol_ip6_next_header: %x " + "toeplitz_hash_2_or_4: %x " + "flow_id_toeplitz: %x " + "user_rssi: %x " + "pkt_type: %x " + "stbc: %x " + "sgi: %x " + "rate_mcs: %x " + "receive_bandwidth: %x " + "reception_type: %x " + "ppdu_start_timestamp: %u ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, + msdu_start->ppdu_start_timestamp); + + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_start tlv (2/2) - " + "sw_phy_meta_data: %x ", + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_6490: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_6490(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (1/3) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "ip_hdr_chksum: %x " + "tcp_udp_chksum: %x " + "key_id_octet: %x " + "cce_super_rule: %x " + "cce_classify_not_done_truncat: %x " + "cce_classify_not_done_cce_dis: %x " + "ext_wapi_pn_63_48: %x " + "ext_wapi_pn_95_64: %x " + "ext_wapi_pn_127_96: %x " + "reported_mpdu_length: %x " + "first_msdu: %x " + "last_msdu: %x " + "sa_idx_timeout: %x " + "da_idx_timeout: %x " + "msdu_limit_error: %x " + "flow_idx_timeout: %x " + "flow_idx_invalid: %x " + "wifi_parser_error: %x " + "amsdu_parser_error: %x", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->ext_wapi_pn_63_48, + msdu_end->ext_wapi_pn_95_64, + msdu_end->ext_wapi_pn_127_96, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error); + + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (2/3)- " + "sa_is_valid: %x " + "da_is_valid: %x " + "da_is_mcbc: %x " + "l3_header_padding: %x " + "ipv6_options_crc: %x " + "tcp_seq_number: %x " + "tcp_ack_number: %x " + "tcp_flag: %x " + "lro_eligible: %x " + "window_size: %x " + "da_offset: %x " + "sa_offset: %x " + "da_offset_valid: %x " + "sa_offset_valid: %x " + "rule_indication_31_0: %x " + "rule_indication_63_32: %x " + "sa_idx: %x " + "da_idx: %x " + "msdu_drop: %x " + "reo_destination_indication: %x " + "flow_idx: %x " + "fse_metadata: %x " + "cce_metadata: %x " + "sa_sw_peer_id: %x ", + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->da_idx_or_sw_peer_id, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (3/3)" + "aggregation_count %x " + "flow_aggregation_continuation %x " + "fisa_timeout %x " + "cumulative_l4_checksum %x " + "cumulative_ip_length %x", + msdu_end->aggregation_count, + msdu_end->flow_aggregation_continuation, + msdu_end->fisa_timeout, + msdu_end->cumulative_l4_checksum, + msdu_end->cumulative_ip_length); +} + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_7_TID_OFFSET)), \ + RX_MPDU_INFO_7_TID_MASK, \ + RX_MPDU_INFO_7_TID_LSB)) + +static uint32_t hal_rx_mpdu_start_tid_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static +uint32_t hal_rx_msdu_start_reception_type_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +/** + * hal_rx_msdu_end_da_idx_get_6490: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} +/** + * hal_rx_get_rx_fragment_number_6490(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return (HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get_6490(): API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static uint8_t +hal_rx_msdu_end_da_is_mcbc_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_6490(): API to get_6490 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_6490(): API to get_6490 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static +uint16_t hal_rx_msdu_end_sa_idx_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_6490() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_6490(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_10, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_6490(): API to get_6490 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/* + * @ hal_rx_encryption_info_valid_6490: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static uint32_t hal_rx_encryption_info_valid_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn_6490: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static void hal_rx_print_pn_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msdu_get_6490: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t hal_rx_msdu_end_first_msdu_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_6490: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_6490: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid_6490(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static bool hal_rx_get_mpdu_mac_ad4_valid_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_6490: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_mpdu_get_to_ds_6490(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ +static uint32_t hal_rx_mpdu_get_to_ds_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_6490(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_6490(): Retrieves mpdu + * frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_6490(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_6490(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_6490(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_6490(uint8_t *buf, + uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_6490(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_6490(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_6490(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_6490(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_6490(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_6490: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_6490: get tid based on qos control valid. + * @hal_soc_hdl: hal_soc handle + * @ buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_6490(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_6490(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_6490(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_6490(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_9, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_6490 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_6490(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_tx_desc_set_mesh_en_6490 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_6490(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_5, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_6490(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_6490(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_6490(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_6490(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_6490(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_6490(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_6490(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_6490(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_6490(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_6490(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static +void hal_reo_config_6490(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_6490() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_6490(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_6490 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_6490(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_6490: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_reo_destination_indication_6490: API to get + * reo_destination_indication from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @reo_destination_indication: pointer to return value of reo_destination_indication + * + * Return: none + */ +static inline void +hal_rx_msdu_get_reo_destination_indication_6490(uint8_t *buf, + uint32_t *reo_destination_indication) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *reo_destination_indication = HAL_RX_MSDU_END_REO_DEST_IND_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_6490: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_6490: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_6490: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_6490: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce_metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_flow_params_6490: API to get flow index, flow index invalid + * and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params_6490(uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); + *flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); + *flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_6490() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_6490(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_6490(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +/** + * hal_get_window_address_6490(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_6490(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return addr; +} + +/** + * hal_rx_get_fisa_cumulative_l4_checksum_6490() - Retrieve cumulative + * checksum + * @buf: buffer pointer + * + * Return: cumulative checksum + */ +static inline +uint16_t hal_rx_get_fisa_cumulative_l4_checksum_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_FISA_CUMULATIVE_L4_CHECKSUM(buf); +} + +/** + * hal_rx_get_fisa_cumulative_ip_length_6490() - Retrieve cumulative + * ip length + * @buf: buffer pointer + * + * Return: cumulative length + */ +static inline +uint16_t hal_rx_get_fisa_cumulative_ip_length_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_FISA_CUMULATIVE_IP_LENGTH(buf); +} + +/** + * hal_rx_get_udp_proto_6490() - Retrieve udp proto value + * @buf: buffer + * + * Return: udp proto bit + */ +static inline +bool hal_rx_get_udp_proto_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_UDP_PROTO(buf); +} + +/** + * hal_rx_get_flow_agg_continuation_6490() - retrieve flow agg + * continuation + * @buf: buffer + * + * Return: flow agg + */ +static inline +bool hal_rx_get_flow_agg_continuation_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_FLOW_AGGR_CONT(buf); +} + +/** + * hal_rx_get_flow_agg_count_6490()- Retrieve flow agg count + * @buf: buffer + * + * Return: flow agg count + */ +static inline +uint8_t hal_rx_get_flow_agg_count_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_FLOW_AGGR_COUNT(buf); +} + +/** + * hal_rx_get_fisa_timeout_6490() - Retrieve fisa timeout + * @buf: buffer + * + * Return: fisa timeout + */ +static inline +bool hal_rx_get_fisa_timeout_6490(uint8_t *buf) +{ + return HAL_RX_TLV_GET_FISA_TIMEOUT(buf); +} + +/** + * hal_reo_set_err_dst_remap_6490(): Function to set REO error destination + * ring remap register + * @hal_soc: Pointer to hal_soc + * + * Return: none. + */ +static void +hal_reo_set_err_dst_remap_6490(void *hal_soc) +{ + /* + * Set REO error 2k jump (error code 5) / OOR (error code 7) + * frame routed to REO2TCL ring. + */ + uint32_t dst_remap_ix0 = + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 0) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 1) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 2) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 3) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_RELEASE, 4) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 5) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 6) | + HAL_REO_ERR_REMAP_IX0(REO_REMAP_TCL, 7); + + uint32_t dst_remap_ix1 = + HAL_REO_ERR_REMAP_IX1(REO_REMAP_RELEASE, 14) | + HAL_REO_ERR_REMAP_IX1(REO_REMAP_RELEASE, 13) | + HAL_REO_ERR_REMAP_IX1(REO_REMAP_RELEASE, 12) | + HAL_REO_ERR_REMAP_IX1(REO_REMAP_RELEASE, 11) | + HAL_REO_ERR_REMAP_IX1(REO_REMAP_RELEASE, 10) | + HAL_REO_ERR_REMAP_IX1(REO_REMAP_RELEASE, 9) | + HAL_REO_ERR_REMAP_IX1(REO_REMAP_TCL, 8); + + HAL_REG_WRITE(hal_soc, + HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + dst_remap_ix0); + + hal_info("HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0 0x%x", + HAL_REG_READ( + hal_soc, + HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); + + HAL_REG_WRITE(hal_soc, + HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + dst_remap_ix1); + + hal_info("HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1 0x%x", + HAL_REG_READ( + hal_soc, + HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); +} + +struct hal_hw_txrx_ops qca6490_hal_hw_txrx_ops = { + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_6490, + hal_reo_set_err_dst_remap_6490, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_6490, + hal_tx_set_dscp_tid_map_6490, + hal_tx_update_dscp_tid_6490, + hal_tx_desc_set_lmac_id_6490, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_6490, + + /* rx */ + hal_rx_msdu_start_nss_get_6490, + hal_rx_mon_hw_desc_get_mpdu_status_6490, + hal_rx_get_tlv_6490, + hal_rx_proc_phyrx_other_receive_info_tlv_6490, + hal_rx_dump_msdu_start_tlv_6490, + hal_rx_dump_msdu_end_tlv_6490, + hal_get_link_desc_size_6490, + hal_rx_mpdu_start_tid_get_6490, + hal_rx_msdu_start_reception_type_get_6490, + hal_rx_msdu_end_da_idx_get_6490, + hal_rx_msdu_desc_info_get_ptr_6490, + hal_rx_link_desc_msdu0_ptr_6490, + hal_reo_status_get_header_6490, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_6490, + hal_rx_msdu_end_da_is_mcbc_get_6490, + hal_rx_msdu_end_sa_is_valid_get_6490, + hal_rx_msdu_end_sa_idx_get_6490, + hal_rx_desc_is_first_msdu_6490, + hal_rx_msdu_end_l3_hdr_padding_get_6490, + hal_rx_encryption_info_valid_6490, + hal_rx_print_pn_6490, + hal_rx_msdu_end_first_msdu_get_6490, + hal_rx_msdu_end_da_is_valid_get_6490, + hal_rx_msdu_end_last_msdu_get_6490, + hal_rx_get_mpdu_mac_ad4_valid_6490, + hal_rx_mpdu_start_sw_peer_id_get_6490, + hal_rx_mpdu_get_to_ds_6490, + hal_rx_mpdu_get_fr_ds_6490, + hal_rx_get_mpdu_frame_control_valid_6490, + hal_rx_mpdu_get_addr1_6490, + hal_rx_mpdu_get_addr2_6490, + hal_rx_mpdu_get_addr3_6490, + hal_rx_mpdu_get_addr4_6490, + hal_rx_get_mpdu_sequence_control_valid_6490, + hal_rx_is_unicast_6490, + hal_rx_tid_get_6490, + hal_rx_hw_desc_get_ppduid_get_6490, + NULL, + NULL, + hal_rx_msdu0_buffer_addr_lsb_6490, + hal_rx_msdu_desc_info_ptr_get_6490, + hal_ent_mpdu_desc_info_6490, + hal_dst_mpdu_desc_info_6490, + hal_rx_get_fc_valid_6490, + hal_rx_get_to_ds_flag_6490, + hal_rx_get_mac_addr2_valid_6490, + hal_rx_get_filter_category_6490, + hal_rx_get_ppdu_id_6490, + hal_reo_config_6490, + hal_rx_msdu_flow_idx_get_6490, + hal_rx_msdu_flow_idx_invalid_6490, + hal_rx_msdu_flow_idx_timeout_6490, + hal_rx_msdu_fse_metadata_get_6490, + hal_rx_msdu_cce_metadata_get_6490, + hal_rx_msdu_get_flow_params_6490, + hal_rx_tlv_get_tcp_chksum_6490, + hal_rx_get_rx_sequence_6490, +#if defined(QCA_WIFI_QCA6490) && defined(WLAN_CFR_ENABLE) && \ + defined(WLAN_ENH_CFR_ENABLE) + hal_rx_get_bb_info_6490, + hal_rx_get_rtt_info_6490, +#else + NULL, + NULL, +#endif + /* rx - msdu end fast path info fields */ + hal_rx_msdu_packet_metadata_get_generic, + hal_rx_get_fisa_cumulative_l4_checksum_6490, + hal_rx_get_fisa_cumulative_ip_length_6490, + hal_rx_get_udp_proto_6490, + hal_rx_get_flow_agg_continuation_6490, + hal_rx_get_flow_agg_count_6490, + hal_rx_get_fisa_timeout_6490, + hal_rx_msdu_get_reo_destination_indication_6490 +}; + +struct hal_hw_srng_config hw_srng_table_6490[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CREDIT_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, +#ifdef CONFIG_PLD_PCIE_FW_SIM + .max_rings = 5, +#else + .max_rings = 4, +#endif + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + /* + * one ring is for spectral scan + * the other is for cfr + */ + .max_rings = 2, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qca6490[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + +/** + * hal_qca6490_attach() - Attach 6490 target specific hal_soc ops, + * offset and srng table + */ +void hal_qca6490_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_6490; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qca6490; + hal_soc->ops = &qca6490_hal_hw_txrx_ops; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..8859e95eb02febc828f1bbd0bb665ae9611a28af --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490_rx.h @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_6490_RX_H_ +#define _HAL_6490_RX_H_ +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "rx_msdu_start.h" +#include "tlv_tag_def.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" +#include "phyrx_other_receive_info_ru_details.h" + +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start)\ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_10_DA_IS_MCBC_MASK, \ + RX_MSDU_END_10_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_10_SA_IS_VALID_MASK, \ + RX_MSDU_END_10_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_11_SA_IDX_OFFSET)), \ + RX_MSDU_END_11_SA_IDX_MASK, \ + RX_MSDU_END_11_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_10_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_10_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_3_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_3_PN_31_0_MASK, \ + RX_MPDU_INFO_3_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_4_PN_63_32_MASK, \ + RX_MPDU_INFO_4_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_5_PN_95_64_MASK, \ + RX_MPDU_INFO_5_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_6_PN_127_96_MASK, \ + RX_MPDU_INFO_6_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_10_FIRST_MSDU_MASK, \ + RX_MSDU_END_10_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_10_DA_IS_VALID_MASK, \ + RX_MSDU_END_10_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_10_LAST_MSDU_MASK, \ + RX_MSDU_END_10_LAST_MSDU_LSB)) + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_10_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_10_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_10_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_TO_DS_OFFSET)), \ + RX_MPDU_INFO_11_TO_DS_MASK, \ + RX_MPDU_INFO_11_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_FR_DS_OFFSET)), \ + RX_MPDU_INFO_11_FR_DS_MASK, \ + RX_MPDU_INFO_11_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_OFFSET)),\ + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_14_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_14_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + (uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + (uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + (uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_DETAILS_MPDU_SEQUENCE_NUMBER_OFFSET + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + (uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + reg_val &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + reg_val = \ + HAL_REG_READ((soc), \ + HWIO_REO_R0_MISC_CTL_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET)); \ + reg_val &= \ + ~(HWIO_REO_R0_MISC_CTL_FRAGMENT_DEST_RING_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_MISC_CTL, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring); \ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_MISC_CTL_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + reg_val = \ + HAL_REG_READ((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET)); \ + reg_val &= \ + (~HWIO_REO_R0_GENERAL_ENABLE_BAR_DEST_RING_BMSK |\ + (REO_REMAP_TCL << HWIO_REO_R0_GENERAL_ENABLE_BAR_DEST_RING_SHFT)); \ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR(msdu_details_ptr, \ +RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_RESERVED_0A_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_12_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_12_FLOW_IDX_MASK, \ + RX_MSDU_END_12_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_REO_DEST_IND_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_12_REO_DESTINATION_INDICATION_OFFSET)), \ + RX_MSDU_END_12_REO_DESTINATION_INDICATION_MASK, \ + RX_MSDU_END_12_REO_DESTINATION_INDICATION_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_10_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_10_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_13_FSE_METADATA_MASK, \ + RX_MSDU_END_13_FSE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_14_CCE_METADATA_MASK, \ + RX_MSDU_END_14_CCE_METADATA_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_LSB)) + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_MASK, \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_LSB)) + +#define HAL_RX_TLV_GET_FLOW_AGGR_CONT(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_17_FLOW_AGGREGATION_CONTINUATION_OFFSET)), \ + RX_MSDU_END_17_FLOW_AGGREGATION_CONTINUATION_MASK, \ + RX_MSDU_END_17_FLOW_AGGREGATION_CONTINUATION_LSB)) + +#define HAL_RX_TLV_GET_FLOW_AGGR_COUNT(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_17_AGGREGATION_COUNT_OFFSET)), \ + RX_MSDU_END_17_AGGREGATION_COUNT_MASK, \ + RX_MSDU_END_17_AGGREGATION_COUNT_LSB)) + +#define HAL_RX_TLV_GET_FISA_TIMEOUT(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_17_FISA_TIMEOUT_OFFSET)), \ + RX_MSDU_END_17_FISA_TIMEOUT_MASK, \ + RX_MSDU_END_17_FISA_TIMEOUT_LSB)) + +#define HAL_RX_TLV_GET_FISA_CUMULATIVE_L4_CHECKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_18_CUMULATIVE_L4_CHECKSUM_OFFSET)), \ + RX_MSDU_END_18_CUMULATIVE_L4_CHECKSUM_MASK, \ + RX_MSDU_END_18_CUMULATIVE_L4_CHECKSUM_LSB)) + +#define HAL_RX_TLV_GET_FISA_CUMULATIVE_IP_LENGTH(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_18_CUMULATIVE_IP_LENGTH_OFFSET)), \ + RX_MSDU_END_18_CUMULATIVE_IP_LENGTH_MASK, \ + RX_MSDU_END_18_CUMULATIVE_IP_LENGTH_LSB)) + +#if defined(QCA_WIFI_QCA6490) && defined(WLAN_CFR_ENABLE) && \ + defined(WLAN_ENH_CFR_ENABLE) +static inline +void hal_rx_get_bb_info_6490(void *rx_tlv, + void *ppdu_info_hdl) +{ + struct hal_rx_ppdu_info *ppdu_info = ppdu_info_hdl; + + ppdu_info->cfr_info.bb_captured_channel = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_3, BB_CAPTURED_CHANNEL); + + ppdu_info->cfr_info.bb_captured_timeout = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_3, BB_CAPTURED_TIMEOUT); + + ppdu_info->cfr_info.bb_captured_reason = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_3, BB_CAPTURED_REASON); +} + +static inline +void hal_rx_get_rtt_info_6490(void *rx_tlv, + void *ppdu_info_hdl) +{ + struct hal_rx_ppdu_info *ppdu_info = ppdu_info_hdl; + + ppdu_info->cfr_info.rx_location_info_valid = + HAL_RX_GET(rx_tlv, PHYRX_PKT_END_13_RX_PKT_END_DETAILS, + RX_LOCATION_INFO_DETAILS_RX_LOCATION_INFO_VALID); + + ppdu_info->cfr_info.rtt_che_buffer_pointer_low32 = + HAL_RX_GET(rx_tlv, + PHYRX_PKT_END_12_RX_PKT_END_DETAILS_RX_LOCATION_INFO_DETAILS, + RTT_CHE_BUFFER_POINTER_LOW32); + + ppdu_info->cfr_info.rtt_che_buffer_pointer_high8 = + HAL_RX_GET(rx_tlv, + PHYRX_PKT_END_11_RX_PKT_END_DETAILS_RX_LOCATION_INFO_DETAILS, + RTT_CHE_BUFFER_POINTER_HIGH8); + + ppdu_info->cfr_info.chan_capture_status = + HAL_RX_GET(rx_tlv, + PHYRX_PKT_END_13_RX_PKT_END_DETAILS_RX_LOCATION_INFO_DETAILS, + RESERVED_8); +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..90cf14fc337297e3436d0a63518ee1057484e41a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6490/hal_6490_tx.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_6490() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +static void hal_tx_desc_set_dscp_tid_table_id_6490(void *desc, uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM, id); +} + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE / 4) + +/** + * hal_tx_set_dscp_tid_map_6490() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0-31 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in any of the 32 DSCP_TID_MAPS (id = 0-31). + * + * Return: none + */ +static void hal_tx_set_dscp_tid_map_6490(struct hal_soc *hal_soc, uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + if (id >= HAL_MAX_HW_DSCP_TID_MAPS_11AX) + return; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << + HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i + 1] << 0x3) | + (map[i + 2] << 0x6) | + (map[i + 3] << 0x9) | + (map[i + 4] << 0xc) | + (map[i + 5] << 0xf) | + (map[i + 6] << 0x12) | + (map[i + 7] << 0x15)); + + qdf_mem_copy(&val[cnt], (void *)&value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_update_dscp_tid_6490() - Update the dscp tid map table as updated + * by the user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static void hal_tx_update_dscp_tid_6490(struct hal_soc *hal_soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, id); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp / HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); +} + +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_6490(void *desc, uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750.c new file mode 100644 index 0000000000000000000000000000000000000000..0542cb8fbdf71548e009bbfc87d6cb95fbdb6bd2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750.c @@ -0,0 +1,1835 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_types.h" +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_PHYRX_L_SIG_B_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_PHYRX_L_SIG_B_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_PHYRX_L_SIG_A_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_PHYRX_VHT_SIG_A_INFO_DETAILS_BANDWIDTH_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_PHYRX_HE_SIG_A_SU_INFO_DETAILS_FORMAT_INDICATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_DL_UL_FLAG_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_RU_ALLOCATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_STA_ID_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_STA_ID_OFFSET + +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_PREAMBLE_RSSI_INFO_DETAILS_RSSI_PRI20_CHAIN0_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_0_RX_MPDU_INFO_DETAILS_RXPT_CLASSIFY_INFO_DETAILS_REO_DESTINATION_INDICATION_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUF_ADDR_INFO_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUF_ADDR_INFO_BUFFER_ADDR_39_32_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_LSB + +#include "hal_6750_tx.h" +#include "hal_6750_rx.h" +#include +#include + +/* + * hal_rx_msdu_start_nss_get_6750(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t +hal_rx_msdu_start_nss_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); +} + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_6750(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_6750(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) + +static uint32_t hal_get_link_desc_size_6750(void) +{ + return LINK_DESC_SIZE; +} + +/* + * hal_rx_get_tlv_6750(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_6750(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +} + +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_6750() + * - process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static +void hal_rx_proc_phyrx_other_receive_info_tlv_6750(void *rx_tlv_hdr, + void *ppdu_info_handle) +{ + uint32_t tlv_tag, tlv_len; + uint32_t temp_len, other_tlv_len, other_tlv_tag; + void *rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + void *other_tlv_hdr = NULL; + void *other_tlv = NULL; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + temp_len = 0; + + other_tlv_hdr = rx_tlv + HAL_RX_TLV32_HDR_SIZE; + + other_tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(other_tlv_hdr); + other_tlv_len = HAL_RX_GET_USER_TLV32_LEN(other_tlv_hdr); + temp_len += other_tlv_len; + other_tlv = other_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + + switch (other_tlv_tag) { + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s unhandled TLV type: %d, TLV len:%d", + __func__, other_tlv_tag, other_tlv_len); + break; + } +} + +/** + * hal_rx_dump_msdu_start_tlv_6750() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_6750(void *msdustart, uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + hal_verbose_debug( + "rx_msdu_start tlv (1/2) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "msdu_length: %x " + "ipsec_esp: %x " + "l3_offset: %x " + "ipsec_ah: %x " + "l4_offset: %x " + "msdu_number: %x " + "decap_format: %x " + "ipv4_proto: %x " + "ipv6_proto: %x " + "tcp_proto: %x " + "udp_proto: %x " + "ip_frag: %x " + "tcp_only_ack: %x " + "da_is_bcast_mcast: %x " + "ip4_protocol_ip6_next_header: %x " + "toeplitz_hash_2_or_4: %x " + "flow_id_toeplitz: %x " + "user_rssi: %x " + "pkt_type: %x " + "stbc: %x " + "sgi: %x " + "rate_mcs: %x " + "receive_bandwidth: %x " + "reception_type: %x " + "ppdu_start_timestamp: %u ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, + msdu_start->ppdu_start_timestamp); + + hal_verbose_debug( + "rx_msdu_start tlv (2/2) - " + "sw_phy_meta_data: %x ", + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_6750: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_6750(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (1/2) - " + "rxpcu_mpdu_filter_in_category: %x " + "sw_frame_group_id: %x " + "phy_ppdu_id: %x " + "ip_hdr_chksum: %x " + "tcp_udp_chksum: %x " + "key_id_octet: %x " + "cce_super_rule: %x " + "cce_classify_not_done_truncat: %x " + "cce_classify_not_done_cce_dis: %x " + "reported_mpdu_length: %x " + "first_msdu: %x " + "last_msdu: %x " + "sa_idx_timeout: %x " + "da_idx_timeout: %x " + "msdu_limit_error: %x " + "flow_idx_timeout: %x " + "flow_idx_invalid: %x " + "wifi_parser_error: %x " + "amsdu_parser_error: %x", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error); + __QDF_TRACE_RL(dbg_level, QDF_MODULE_ID_DP, + "rx_msdu_end tlv (2/2)- " + "sa_is_valid: %x " + "da_is_valid: %x " + "da_is_mcbc: %x " + "l3_header_padding: %x " + "ipv6_options_crc: %x " + "tcp_seq_number: %x " + "tcp_ack_number: %x " + "tcp_flag: %x " + "lro_eligible: %x " + "window_size: %x " + "da_offset: %x " + "sa_offset: %x " + "da_offset_valid: %x " + "sa_offset_valid: %x " + "rule_indication_31_0: %x " + "rule_indication_63_32: %x " + "sa_idx: %x " + "da_idx: %x " + "msdu_drop: %x " + "reo_destination_indication: %x " + "flow_idx: %x " + "fse_metadata: %x " + "cce_metadata: %x " + "sa_sw_peer_id: %x ", + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->da_idx_or_sw_peer_id, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_7_TID_OFFSET)), \ + RX_MPDU_INFO_7_TID_MASK, \ + RX_MPDU_INFO_7_TID_LSB)) + +static uint32_t hal_rx_mpdu_start_tid_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static +uint32_t hal_rx_msdu_start_reception_type_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +/** + * hal_rx_msdu_end_da_idx_get_6750: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} + +/** + * hal_rx_get_rx_fragment_number_6750(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return (HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get_6750(): API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static uint8_t +hal_rx_msdu_end_da_is_mcbc_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_6750(): API to get_6750 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_6750(): API to get_6750 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static +uint16_t hal_rx_msdu_end_sa_idx_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_6750() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_6750(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_10, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_6750(): API to get_6750 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/* + * @ hal_rx_encryption_info_valid_6750: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static uint32_t hal_rx_encryption_info_valid_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn_6750: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static void hal_rx_print_pn_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msdu_get_6750: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t hal_rx_msdu_end_first_msdu_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_6750: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_6750: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid_6750(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static bool hal_rx_get_mpdu_mac_ad4_valid_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_6750: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_mpdu_get_to_ds_6750(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ +static uint32_t hal_rx_mpdu_get_to_ds_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_6750(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_6750(): Retrieves mpdu + * frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_6750(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_6750(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_6750(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_6750(uint8_t *buf, + uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_6750(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_6750(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_6750(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_6750(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_6750(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_6750: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_6750: get tid based on qos control valid. + * @hal_soc_hdl: hal_soc handle + * @ buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_6750(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_6750(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_6750(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_6750(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_9, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_6750 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_6750(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + STATUS_HEADER_REO_STATUS_NUMBER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + STATUS_HEADER_TIMESTAMP)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_tx_desc_set_mesh_en_6750 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_6750(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_5, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_6750(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_6750(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_6750(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_6750(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_6750(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_6750(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_6750(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_6750(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_6750(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_6750(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static +void hal_reo_config_6750(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_6750() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_6750(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_6750 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_6750(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_6750: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_6750: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_6750: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_6750: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_6750: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce_metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_6750() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_6750(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_6750(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_6750(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +/** + * hal_get_window_address_6750(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_6750(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return addr; +} + +struct hal_hw_txrx_ops qca6750_hal_hw_txrx_ops = { + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_6750, + NULL, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_6750, + hal_tx_set_dscp_tid_map_6750, + hal_tx_update_dscp_tid_6750, + hal_tx_desc_set_lmac_id_6750, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_6750, + + /* rx */ + hal_rx_msdu_start_nss_get_6750, + hal_rx_mon_hw_desc_get_mpdu_status_6750, + hal_rx_get_tlv_6750, + hal_rx_proc_phyrx_other_receive_info_tlv_6750, + hal_rx_dump_msdu_start_tlv_6750, + hal_rx_dump_msdu_end_tlv_6750, + hal_get_link_desc_size_6750, + hal_rx_mpdu_start_tid_get_6750, + hal_rx_msdu_start_reception_type_get_6750, + hal_rx_msdu_end_da_idx_get_6750, + hal_rx_msdu_desc_info_get_ptr_6750, + hal_rx_link_desc_msdu0_ptr_6750, + hal_reo_status_get_header_6750, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_6750, + hal_rx_msdu_end_da_is_mcbc_get_6750, + hal_rx_msdu_end_sa_is_valid_get_6750, + hal_rx_msdu_end_sa_idx_get_6750, + hal_rx_desc_is_first_msdu_6750, + hal_rx_msdu_end_l3_hdr_padding_get_6750, + hal_rx_encryption_info_valid_6750, + hal_rx_print_pn_6750, + hal_rx_msdu_end_first_msdu_get_6750, + hal_rx_msdu_end_da_is_valid_get_6750, + hal_rx_msdu_end_last_msdu_get_6750, + hal_rx_get_mpdu_mac_ad4_valid_6750, + hal_rx_mpdu_start_sw_peer_id_get_6750, + hal_rx_mpdu_get_to_ds_6750, + hal_rx_mpdu_get_fr_ds_6750, + hal_rx_get_mpdu_frame_control_valid_6750, + hal_rx_mpdu_get_addr1_6750, + hal_rx_mpdu_get_addr2_6750, + hal_rx_mpdu_get_addr3_6750, + hal_rx_mpdu_get_addr4_6750, + hal_rx_get_mpdu_sequence_control_valid_6750, + hal_rx_is_unicast_6750, + hal_rx_tid_get_6750, + hal_rx_hw_desc_get_ppduid_get_6750, + NULL, + NULL, + hal_rx_msdu0_buffer_addr_lsb_6750, + hal_rx_msdu_desc_info_ptr_get_6750, + hal_ent_mpdu_desc_info_6750, + hal_dst_mpdu_desc_info_6750, + hal_rx_get_fc_valid_6750, + hal_rx_get_to_ds_flag_6750, + hal_rx_get_mac_addr2_valid_6750, + hal_rx_get_filter_category_6750, + hal_rx_get_ppdu_id_6750, + hal_reo_config_6750, + hal_rx_msdu_flow_idx_get_6750, + hal_rx_msdu_flow_idx_invalid_6750, + hal_rx_msdu_flow_idx_timeout_6750, + hal_rx_msdu_fse_metadata_get_6750, + hal_rx_msdu_cce_metadata_get_6750, + NULL, + hal_rx_tlv_get_tcp_chksum_6750, + hal_rx_get_rx_sequence_6750, + NULL, + NULL, + /* rx - msdu end fast path info fields */ + hal_rx_msdu_packet_metadata_get_generic +}; + +struct hal_hw_srng_config hw_srng_table_6750[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CREDIT_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_SOC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_BASE_LSB_ADDR, + HWIO_SOC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R2_SRC_RING_HP_ADDR, + }, + .reg_size = { + HWIO_SOC_CE_1_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_BASE_LSB_ADDR - + HWIO_SOC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_BASE_LSB_ADDR, + HWIO_SOC_CE_1_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_BASE_LSB_ADDR - + HWIO_SOC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_BASE_LSB_ADDR, + }, + .max_size = + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR, + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR, + }, + .reg_size = { + HWIO_SOC_CE_1_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR - + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR, + HWIO_SOC_CE_1_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR - + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR + }, + .max_size = + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR, + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR, + }, + /* TODO: check destination status ring registers */ + .reg_size = { + HWIO_SOC_CE_1_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR - + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR, + HWIO_SOC_CE_1_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR - + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR + }, + .max_size = + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_SOC_CE_0_DST_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + .max_rings = 1, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qca6750[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + +/** + * hal_qca6750_attach() - Attach 6750 target specific hal_soc ops, + * offset and srng table + */ +void hal_qca6750_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_6750; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qca6750; + hal_soc->ops = &qca6750_hal_hw_txrx_ops; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..10f70c5e0cc5f6113abe837eac2a378fee7274e4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750_rx.h @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_6750_RX_H_ +#define _HAL_6750_RX_H_ +#include "qdf_util.h" +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "rx_msdu_start.h" +#include "tlv_tag_def.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" +#include "phyrx_other_receive_info_ru_details.h" + +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start)\ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_10_DA_IS_MCBC_MASK, \ + RX_MSDU_END_10_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_10_SA_IS_VALID_MASK, \ + RX_MSDU_END_10_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_11_SA_IDX_OFFSET)), \ + RX_MSDU_END_11_SA_IDX_MASK, \ + RX_MSDU_END_11_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_10_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_10_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_3_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_3_PN_31_0_MASK, \ + RX_MPDU_INFO_3_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_4_PN_63_32_MASK, \ + RX_MPDU_INFO_4_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_5_PN_95_64_MASK, \ + RX_MPDU_INFO_5_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_6_PN_127_96_MASK, \ + RX_MPDU_INFO_6_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_10_FIRST_MSDU_MASK, \ + RX_MSDU_END_10_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_10_DA_IS_VALID_MASK, \ + RX_MSDU_END_10_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_10_LAST_MSDU_MASK, \ + RX_MSDU_END_10_LAST_MSDU_LSB)) + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_10_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_10_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_10_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_TO_DS_OFFSET)), \ + RX_MPDU_INFO_11_TO_DS_MASK, \ + RX_MPDU_INFO_11_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_FR_DS_OFFSET)), \ + RX_MPDU_INFO_11_FR_DS_MASK, \ + RX_MPDU_INFO_11_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_OFFSET)),\ + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_14_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_14_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + (uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + (uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + (uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_DETAILS_MPDU_SEQUENCE_NUMBER_OFFSET + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + (uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + reg_val &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + reg_val = \ + HAL_REG_READ((soc), \ + HWIO_REO_R0_MISC_CTL_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET)); \ + reg_val &= \ + ~(HWIO_REO_R0_MISC_CTL_FRAGMENT_DEST_RING_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_MISC_CTL, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring); \ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_MISC_CTL_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR(msdu_details_ptr, \ +RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_RESERVED_0A_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_12_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_12_FLOW_IDX_MASK, \ + RX_MSDU_END_12_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_10_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_10_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_13_FSE_METADATA_MASK, \ + RX_MSDU_END_13_FSE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_14_CCE_METADATA_MASK, \ + RX_MSDU_END_14_CCE_METADATA_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_LSB)) + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_MASK, \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_LSB)) +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..422b775ef8e1a56ad0abc5b4652b5ac31ad7ad05 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca6750/hal_6750_tx.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_6750_TX_H_ +#define _HAL_6750_TX_H_ +#include "tcl_data_cmd.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "phyrx_rssi_legacy.h" +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_6750() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +static void hal_tx_desc_set_dscp_tid_table_id_6750(void *desc, uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM, id); +} + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE / 4) + +/** + * hal_tx_set_dscp_tid_map_6750() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0-31 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in any of the 32 DSCP_TID_MAPS (id = 0-31). + * + * Return: none + */ +static void hal_tx_set_dscp_tid_map_6750(struct hal_soc *hal_soc, uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + if (id >= HAL_MAX_HW_DSCP_TID_MAPS_11AX) + return; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << + HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i + 1] << 0x3) | + (map[i + 2] << 0x6) | + (map[i + 3] << 0x9) | + (map[i + 4] << 0xc) | + (map[i + 5] << 0xf) | + (map[i + 6] << 0x12) | + (map[i + 7] << 0x15)); + + qdf_mem_copy(&val[cnt], (void *)&value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_update_dscp_tid_6750() - Update the dscp tid map table as updated + * by the user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static void hal_tx_update_dscp_tid_6750(struct hal_soc *hal_soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, id); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp / HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); +} + +/** + * hal_tx_desc_set_lmac_id_6750 - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_6750(void *desc, uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1.c new file mode 100644 index 0000000000000000000000000000000000000000..2296029949a67cd38057f1c774977477001f1070 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1.c @@ -0,0 +1,1540 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSE \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB + +#include "hal_8074v1_tx.h" +#include "hal_8074v1_rx.h" +#include +#include + +/** + * hal_get_window_address_8074(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_8074(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return addr; +} + +/** + * hal_rx_get_rx_fragment_number_8074v1(): Function to retrieve + * rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return (HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get_8074v1(): API to check if + * pkt is MCBC from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static uint8_t +hal_rx_msdu_end_da_is_mcbc_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_8074v1(): API to get_8074v1 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_8074v1(): API to get_8074v1 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static uint16_t hal_rx_msdu_end_sa_idx_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_8074v1() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_8074v1(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_5, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_8074v1(): API to get_8074v1 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/* + * @ hal_rx_encryption_info_valid_8074v1: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static uint32_t hal_rx_encryption_info_valid_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn_8074v1: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static void hal_rx_print_pn_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msdu_get_8074v1: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t +hal_rx_msdu_end_first_msdu_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_8074v1: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_8074v1: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid_8074v1(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static bool hal_rx_get_mpdu_mac_ad4_valid_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_8074v1: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/* + * hal_rx_mpdu_get_to_ds_8074v1(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ + +static uint32_t hal_rx_mpdu_get_to_ds_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_8074v1(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_8074v1(): Retrieves mpdu + * frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_8074v1(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_8074v1(uint8_t *buf, + uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_8074v1(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_8074v1(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_8074v1(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_8074v1(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_8074v1(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_8074v1(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_8074v1(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_8074v1: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_8074v1: get tid based on qos control valid. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_8074v1(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_8074(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_8074v1(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_8074v1(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_0, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_8074v1 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_8074v1(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_rx_mpdu_start_mpdu_qos_control_valid_get_8074v1(): + * Retrieve qos control valid bit from the tlv. + * @buf: pointer to rx pkt TLV. + * + * Return: qos control value. + */ +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_msdu_end_sa_sw_peer_id_get_8074v1(): API to get the + * sa_sw_peer_id from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: sa_sw_peer_id index + */ +static inline uint32_t +hal_rx_msdu_end_sa_sw_peer_id_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} + +/** + * hal_tx_desc_set_mesh_en_8074v1 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_8074v1(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_4, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_8074v1(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_8074v1(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_8074v1(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_8074v1(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_8074v1(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_8074v1(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_8074v1(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_8074v1(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_8074v1(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_8074v1(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static void +hal_reo_config_8074v1(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_8074v1() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_8074v1(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_8074v1 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_8074v1(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_8074v1: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_8074v1: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_8074v1: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_8074v1: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_8074v1: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce_metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_flow_params_8074v1: API to get flow index, flow index invalid + * and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params_8074v1(uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); + *flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); + *flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_8074v1() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_8074v1(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_8074v1(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_8074v1(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +struct hal_hw_txrx_ops qca8074_hal_hw_txrx_ops = { + + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_8074, + NULL, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_8074, + hal_tx_set_dscp_tid_map_8074, + hal_tx_update_dscp_tid_8074, + hal_tx_desc_set_lmac_id_8074, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_8074v1, + /* rx */ + hal_rx_msdu_start_nss_get_8074, + hal_rx_mon_hw_desc_get_mpdu_status_8074, + hal_rx_get_tlv_8074, + hal_rx_proc_phyrx_other_receive_info_tlv_8074, + hal_rx_dump_msdu_start_tlv_8074, + hal_rx_dump_msdu_end_tlv_8074, + hal_get_link_desc_size_8074, + hal_rx_mpdu_start_tid_get_8074, + hal_rx_msdu_start_reception_type_get_8074, + hal_rx_msdu_end_da_idx_get_8074, + hal_rx_msdu_desc_info_get_ptr_8074v1, + hal_rx_link_desc_msdu0_ptr_8074v1, + hal_reo_status_get_header_8074v1, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_8074v1, + hal_rx_msdu_end_da_is_mcbc_get_8074v1, + hal_rx_msdu_end_sa_is_valid_get_8074v1, + hal_rx_msdu_end_sa_idx_get_8074v1, + hal_rx_desc_is_first_msdu_8074v1, + hal_rx_msdu_end_l3_hdr_padding_get_8074v1, + hal_rx_encryption_info_valid_8074v1, + hal_rx_print_pn_8074v1, + hal_rx_msdu_end_first_msdu_get_8074v1, + hal_rx_msdu_end_da_is_valid_get_8074v1, + hal_rx_msdu_end_last_msdu_get_8074v1, + hal_rx_get_mpdu_mac_ad4_valid_8074v1, + hal_rx_mpdu_start_sw_peer_id_get_8074v1, + hal_rx_mpdu_get_to_ds_8074v1, + hal_rx_mpdu_get_fr_ds_8074v1, + hal_rx_get_mpdu_frame_control_valid_8074v1, + hal_rx_mpdu_get_addr1_8074v1, + hal_rx_mpdu_get_addr2_8074v1, + hal_rx_mpdu_get_addr3_8074v1, + hal_rx_mpdu_get_addr4_8074v1, + hal_rx_get_mpdu_sequence_control_valid_8074v1, + hal_rx_is_unicast_8074v1, + hal_rx_tid_get_8074v1, + hal_rx_hw_desc_get_ppduid_get_8074v1, + hal_rx_mpdu_start_mpdu_qos_control_valid_get_8074v1, + hal_rx_msdu_end_sa_sw_peer_id_get_8074v1, + hal_rx_msdu0_buffer_addr_lsb_8074v1, + hal_rx_msdu_desc_info_ptr_get_8074v1, + hal_ent_mpdu_desc_info_8074v1, + hal_dst_mpdu_desc_info_8074v1, + hal_rx_get_fc_valid_8074v1, + hal_rx_get_to_ds_flag_8074v1, + hal_rx_get_mac_addr2_valid_8074v1, + hal_rx_get_filter_category_8074v1, + hal_rx_get_ppdu_id_8074v1, + hal_reo_config_8074v1, + hal_rx_msdu_flow_idx_get_8074v1, + hal_rx_msdu_flow_idx_invalid_8074v1, + hal_rx_msdu_flow_idx_timeout_8074v1, + hal_rx_msdu_fse_metadata_get_8074v1, + hal_rx_msdu_cce_metadata_get_8074v1, + hal_rx_msdu_get_flow_params_8074v1, + hal_rx_tlv_get_tcp_chksum_8074v1, + hal_rx_get_rx_sequence_8074v1, + NULL, + NULL, + /* rx - msdu fast path info fields */ + hal_rx_msdu_packet_metadata_get_generic, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, +}; + +struct hal_hw_srng_config hw_srng_table_8074[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + .max_rings = 1, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qca8074[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + +/** + * hal_qca8074_attach() - Attach 8074 target specific hal_soc ops, + * offset and srng table + */ +void hal_qca8074_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_8074; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qca8074; + hal_soc->ops = &qca8074_hal_hw_txrx_ops; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..6df3e2d79928ab6328aafa2825c7d904a13c89e9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1_rx.h @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_5_DA_IS_MCBC_MASK, \ + RX_MSDU_END_5_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_SA_IS_VALID_MASK, \ + RX_MSDU_END_5_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_SA_IDX_OFFSET)), \ + RX_MSDU_END_13_SA_IDX_MASK, \ + RX_MSDU_END_13_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_5_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_5_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_4_PN_31_0_MASK, \ + RX_MPDU_INFO_4_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_5_PN_63_32_MASK, \ + RX_MPDU_INFO_5_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_6_PN_95_64_MASK, \ + RX_MPDU_INFO_6_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_7_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_7_PN_127_96_MASK, \ + RX_MPDU_INFO_7_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_5_FIRST_MSDU_MASK, \ + RX_MSDU_END_5_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_DA_IS_VALID_MASK, \ + RX_MSDU_END_5_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_5_LAST_MSDU_MASK, \ + RX_MSDU_END_5_LAST_MSDU_LSB)) + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_1_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_1_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_1_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_TO_DS_OFFSET)), \ + RX_MPDU_INFO_2_TO_DS_MASK, \ + RX_MPDU_INFO_2_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FR_DS_OFFSET)), \ + RX_MPDU_INFO_2_FR_DS_MASK, \ + RX_MPDU_INFO_2_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_16_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_16_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + (uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + (uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + (uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + (uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + reg_val &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_FRAGMENT_DEST_RING_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK | \ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring) | \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET),\ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR((msdu_details_ptr), \ +UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_14_FLOW_IDX_MASK, \ + RX_MSDU_END_14_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_15_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_15_FSE_METADATA_MASK, \ + RX_MSDU_END_15_FSE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_16_CCE_METADATA_MASK, \ + RX_MSDU_END_16_CCE_METADATA_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_LSB)) + +/* + * hal_rx_msdu_start_nss_get_8074(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t +hal_rx_msdu_start_nss_get_8074(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t nss; + + nss = HAL_RX_MSDU_START_NSS_GET(msdu_start); + return nss; +} + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_8074(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_8074(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; + rs->nr_ant = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, NSS); + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) +static uint32_t hal_get_link_desc_size_8074(void) +{ + return LINK_DESC_SIZE; +} + +/* + * hal_rx_get_tlv_8074(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_8074(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_35, RECEIVE_BANDWIDTH); +} + +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_8074() + * -process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static +void hal_rx_proc_phyrx_other_receive_info_tlv_8074(void *rx_tlv_hdr, + void *ppdu_info) +{ +} + + +/** + * hal_rx_dump_msdu_start_tlv_8074() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_8074(void *msdustart, + uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_start tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "msdu_length: %d " + "ipsec_esp: %d " + "l3_offset: %d " + "ipsec_ah: %d " + "l4_offset: %d " + "msdu_number: %d " + "decap_format: %d " + "ipv4_proto: %d " + "ipv6_proto: %d " + "tcp_proto: %d " + "udp_proto: %d " + "ip_frag: %d " + "tcp_only_ack: %d " + "da_is_bcast_mcast: %d " + "ip4_protocol_ip6_next_header: %d " + "toeplitz_hash_2_or_4: %d " + "flow_id_toeplitz: %d " + "user_rssi: %d " + "pkt_type: %d " + "stbc: %d " + "sgi: %d " + "rate_mcs: %d " + "receive_bandwidth: %d " + "reception_type: %d " + "toeplitz_hash: %d " + "nss: %d " + "ppdu_start_timestamp: %d " + "sw_phy_meta_data: %d ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, + msdu_start->toeplitz_hash, + msdu_start->nss, + msdu_start->ppdu_start_timestamp, + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_8074: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_8074(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "ip_hdr_chksum: %d " + "tcp_udp_chksum: %d " + "key_id_octet: %d " + "cce_super_rule: %d " + "cce_classify_not_done_truncat: %d " + "cce_classify_not_done_cce_dis: %d " + "ext_wapi_pn_63_48: %d " + "ext_wapi_pn_95_64: %d " + "ext_wapi_pn_127_96: %d " + "reported_mpdu_length: %d " + "first_msdu: %d " + "last_msdu: %d " + "sa_idx_timeout: %d " + "da_idx_timeout: %d " + "msdu_limit_error: %d " + "flow_idx_timeout: %d " + "flow_idx_invalid: %d " + "wifi_parser_error: %d " + "amsdu_parser_error: %d " + "sa_is_valid: %d " + "da_is_valid: %d " + "da_is_mcbc: %d " + "l3_header_padding: %d " + "ipv6_options_crc: %d " + "tcp_seq_number: %d " + "tcp_ack_number: %d " + "tcp_flag: %d " + "lro_eligible: %d " + "window_size: %d " + "da_offset: %d " + "sa_offset: %d " + "da_offset_valid: %d " + "sa_offset_valid: %d " + "rule_indication_31_0: %d " + "rule_indication_63_32: %d " + "sa_idx: %d " + "da_idx: %d " + "msdu_drop: %d " + "reo_destination_indication: %d " + "flow_idx: %d " + "fse_metadata: %d " + "cce_metadata: %d " + "sa_sw_peer_id: %d ", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->ext_wapi_pn_63_48, + msdu_end->ext_wapi_pn_95_64, + msdu_end->ext_wapi_pn_127_96, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error, + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->da_idx, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_3_TID_OFFSET)), \ + RX_MPDU_INFO_3_TID_MASK, \ + RX_MPDU_INFO_3_TID_LSB)) + +static uint32_t hal_rx_mpdu_start_tid_get_8074(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static uint32_t hal_rx_msdu_start_reception_type_get_8074(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_DA_IDX_OFFSET)), \ + RX_MSDU_END_13_DA_IDX_MASK, \ + RX_MSDU_END_13_DA_IDX_LSB)) + + /** + * hal_rx_msdu_end_da_idx_get_8074: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_8074(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..b051e18c8bf352ea79f407cc1f15f714a2beaae2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v1/hal_8074v1_tx.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_8074() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ + +static void hal_tx_desc_set_dscp_tid_table_id_8074(void *desc, uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, + DSCP_TO_TID_PRIORITY_TABLE_ID) |= + HAL_TX_SM(TCL_DATA_CMD_3, + DSCP_TO_TID_PRIORITY_TABLE_ID, id); +} + +/** + * hal_tx_set_dscp_tid_map_8074() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0,1 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in two set of mapping registers DSCP_TID1_MAP_<0 to 6> (id = 0) + * and DSCP_TID2_MAP_<0 to 6> (id = 1) + * Each mapping register has TID mapping for 10 DSCP values + * + * Return: none + */ +static void hal_tx_set_dscp_tid_map_8074(struct hal_soc *soc, uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr; + uint32_t value; + + if (id == HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT) { + addr = HWIO_TCL_R0_DSCP_TID1_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + } else { + addr = HWIO_TCL_R0_DSCP_TID2_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + } + + for (i = 0; i < 64; i += 10) { + value = + (map[i] | + (map[i + 1] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_1_SHFT) | + (map[i + 2] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_2_SHFT) | + (map[i + 3] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_3_SHFT) | + (map[i + 4] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_4_SHFT) | + (map[i + 5] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_5_SHFT) | + (map[i + 6] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_6_SHFT) | + (map[i + 7] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_7_SHFT) | + (map[i + 8] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_8_SHFT) | + (map[i + 9] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_9_SHFT)); + + HAL_REG_WRITE(soc, addr, + (value & HWIO_TCL_R0_DSCP_TID1_MAP_1_RMSK)); + + addr += 4; + } +} + +/** + * hal_tx_update_dscp_tid_8074() - Update the dscp tid map table as + updated by user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static +void hal_tx_update_dscp_tid_8074(struct hal_soc *soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + + if (id == HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT) + addr = HWIO_TCL_R0_DSCP_TID1_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + else + addr = HWIO_TCL_R0_DSCP_TID2_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp / HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + /* Read back previous DSCP TID config and update + * with new config. + */ + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID1_MAP_1_RMSK)); +} + +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_8074(void *desc, uint8_t lmac_id) +{ +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2.c new file mode 100644 index 0000000000000000000000000000000000000000..0fa5fac63d8c9b061a5626502b069f39dc2c7399 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2.c @@ -0,0 +1,1547 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_HT_SIG_0_PHYRX_HT_SIG_INFO_DETAILS_MCS_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_PHYRX_L_SIG_B_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_PHYRX_L_SIG_A_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_PHYRX_VHT_SIG_A_INFO_DETAILS_BANDWIDTH_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_PHYRX_HE_SIG_A_SU_INFO_DETAILS_FORMAT_INDICATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_DL_UL_FLAG_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_RU_ALLOCATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_STA_ID_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_STA_ID_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_PRE_RSSI_INFO_DETAILS_RSSI_PRI20_CHAIN0_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_PREAMBLE_RSSI_INFO_DETAILS_RSSI_PRI20_CHAIN0_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_0_RX_MPDU_INFO_DETAILS_RXPCU_MPDU_FILTER_IN_CATEGORY_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER \ + STATUS_HEADER_REO_STATUS_NUMBER +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + STATUS_HEADER_TIMESTAMP +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUF_ADDR_INFO_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUF_ADDR_INFO_BUFFER_ADDR_39_32_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_LSB +#include "hal_8074v2_tx.h" +#include "hal_8074v2_rx.h" +#include +#include + +/** + * hal_rx_get_rx_fragment_number_8074v2(): Function to retrieve + * rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK; +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get_8074v2: API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static uint8_t +hal_rx_msdu_end_da_is_mcbc_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_8074v2(): API to get_8074v2 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_8074v2(): API to get_8074v2 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static uint16_t hal_rx_msdu_end_sa_idx_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_8074v2() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_8074v2(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_5, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_8074v2(): API to get_8074v2 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/* + * @ hal_rx_encryption_info_valid_8074v2: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static uint32_t hal_rx_encryption_info_valid_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn_8074v2: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static void hal_rx_print_pn_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msdu_get_8074v2: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t hal_rx_msdu_end_first_msdu_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_8074v2: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_8074v2: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid_8074v2(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static bool hal_rx_get_mpdu_mac_ad4_valid_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_8074v2: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/* + * hal_rx_mpdu_get_to_ds_8074v2(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ +static uint32_t hal_rx_mpdu_get_to_ds_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_8074v2(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_8074v2(): Retrieves mpdu + * frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_8074v2(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_8074v2(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_8074v2(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_8074v2(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_8074v2(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_8074v2(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_8074v2(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_8074v2(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_8074v2(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_8074v2: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_0_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_8074v2: get tid based on qos control valid. + * @hal_soc_hdl: hal soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_8074v2(hal_soc_handle_t hal_soc_hdl, + uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_8074v2(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_8074v2(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_8074v2(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_0, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_8074v2 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_8074v2(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_rx_mpdu_start_mpdu_qos_control_valid_get_8074v2(): + * Retrieve qos control valid bit from the tlv. + * @buf: pointer to rx pkt TLV. + * + * Return: qos control value. + */ +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_msdu_end_sa_sw_peer_id_get_8074v2(): API to get the + * sa_sw_peer_id from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: sa_sw_peer_id index + */ +static inline uint32_t +hal_rx_msdu_end_sa_sw_peer_id_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} + +/** + * hal_tx_desc_set_mesh_en_8074v2 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_8074v2(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_4, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_8074v2(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_8074v2(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_8074v2(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_8074v2(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_8074v2(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_8074v2(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_8074v2(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_8074v2(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_8074v2(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_8074v2(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static void +hal_reo_config_8074v2(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_8074v2() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_8074v2(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_8074v2 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_8074v2(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_8074v2: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_8074v2: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_8074v2: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_8074v2: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_8074v2: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce_metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_flow_params_8074v2: API to get flow index, flow index invalid + * and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params_8074v2(uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); + *flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); + *flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_8074v2() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_8074v2(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_8074v2(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +/** + * hal_get_window_address_8074v2(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_8074v2(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + return addr; +} + +struct hal_hw_txrx_ops qca8074v2_hal_hw_txrx_ops = { + + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_8074v2, + NULL, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_8074v2, + hal_tx_set_dscp_tid_map_8074v2, + hal_tx_update_dscp_tid_8074v2, + hal_tx_desc_set_lmac_id_8074v2, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_8074v2, + + /* rx */ + hal_rx_msdu_start_nss_get_8074v2, + hal_rx_mon_hw_desc_get_mpdu_status_8074v2, + hal_rx_get_tlv_8074v2, + hal_rx_proc_phyrx_other_receive_info_tlv_8074v2, + hal_rx_dump_msdu_start_tlv_8074v2, + hal_rx_dump_msdu_end_tlv_8074v2, + hal_get_link_desc_size_8074v2, + hal_rx_mpdu_start_tid_get_8074v2, + hal_rx_msdu_start_reception_type_get_8074v2, + hal_rx_msdu_end_da_idx_get_8074v2, + hal_rx_msdu_desc_info_get_ptr_8074v2, + hal_rx_link_desc_msdu0_ptr_8074v2, + hal_reo_status_get_header_8074v2, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_8074v2, + hal_rx_msdu_end_da_is_mcbc_get_8074v2, + hal_rx_msdu_end_sa_is_valid_get_8074v2, + hal_rx_msdu_end_sa_idx_get_8074v2, + hal_rx_desc_is_first_msdu_8074v2, + hal_rx_msdu_end_l3_hdr_padding_get_8074v2, + hal_rx_encryption_info_valid_8074v2, + hal_rx_print_pn_8074v2, + hal_rx_msdu_end_first_msdu_get_8074v2, + hal_rx_msdu_end_da_is_valid_get_8074v2, + hal_rx_msdu_end_last_msdu_get_8074v2, + hal_rx_get_mpdu_mac_ad4_valid_8074v2, + hal_rx_mpdu_start_sw_peer_id_get_8074v2, + hal_rx_mpdu_get_to_ds_8074v2, + hal_rx_mpdu_get_fr_ds_8074v2, + hal_rx_get_mpdu_frame_control_valid_8074v2, + hal_rx_mpdu_get_addr1_8074v2, + hal_rx_mpdu_get_addr2_8074v2, + hal_rx_mpdu_get_addr3_8074v2, + hal_rx_mpdu_get_addr4_8074v2, + hal_rx_get_mpdu_sequence_control_valid_8074v2, + hal_rx_is_unicast_8074v2, + hal_rx_tid_get_8074v2, + hal_rx_hw_desc_get_ppduid_get_8074v2, + hal_rx_mpdu_start_mpdu_qos_control_valid_get_8074v2, + hal_rx_msdu_end_sa_sw_peer_id_get_8074v2, + hal_rx_msdu0_buffer_addr_lsb_8074v2, + hal_rx_msdu_desc_info_ptr_get_8074v2, + hal_ent_mpdu_desc_info_8074v2, + hal_dst_mpdu_desc_info_8074v2, + hal_rx_get_fc_valid_8074v2, + hal_rx_get_to_ds_flag_8074v2, + hal_rx_get_mac_addr2_valid_8074v2, + hal_rx_get_filter_category_8074v2, + hal_rx_get_ppdu_id_8074v2, + hal_reo_config_8074v2, + hal_rx_msdu_flow_idx_get_8074v2, + hal_rx_msdu_flow_idx_invalid_8074v2, + hal_rx_msdu_flow_idx_timeout_8074v2, + hal_rx_msdu_fse_metadata_get_8074v2, + hal_rx_msdu_cce_metadata_get_8074v2, + hal_rx_msdu_get_flow_params_8074v2, + hal_rx_tlv_get_tcp_chksum_8074v2, + hal_rx_get_rx_sequence_8074v2, +#if defined(QCA_WIFI_QCA6018) && defined(WLAN_CFR_ENABLE) && \ + defined(WLAN_ENH_CFR_ENABLE) + hal_rx_get_bb_info_8074v2, + hal_rx_get_rtt_info_8074v2, +#else + NULL, + NULL, +#endif + /* rx - msdu fast path info fields */ + hal_rx_msdu_packet_metadata_get_generic, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, +}; + +struct hal_hw_srng_config hw_srng_table_8074v2[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + /* one ring for spectral and one ring for cfr */ + .max_rings = 2, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qca8074v2[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + + +/** + * hal_qca8074v2_attach() - Attach 8074v2 target specific hal_soc ops, + * offset and srng table + */ +void hal_qca8074v2_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_8074v2; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qca8074v2; + hal_soc->ops = &qca8074v2_hal_hw_txrx_ops; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..8ceae38232775f6d6a8f646515f388737d7e2a09 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2_rx.h @@ -0,0 +1,802 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" +#ifndef QCA_WIFI_QCA6018 +#include "phyrx_other_receive_info_su_evm_details.h" +#endif + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_5_DA_IS_MCBC_MASK, \ + RX_MSDU_END_5_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_SA_IS_VALID_MASK, \ + RX_MSDU_END_5_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_SA_IDX_OFFSET)), \ + RX_MSDU_END_13_SA_IDX_MASK, \ + RX_MSDU_END_13_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_5_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_5_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_4_PN_31_0_MASK, \ + RX_MPDU_INFO_4_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_5_PN_63_32_MASK, \ + RX_MPDU_INFO_5_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_6_PN_95_64_MASK, \ + RX_MPDU_INFO_6_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_7_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_7_PN_127_96_MASK, \ + RX_MPDU_INFO_7_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_5_FIRST_MSDU_MASK, \ + RX_MSDU_END_5_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start)\ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_DA_IS_VALID_MASK, \ + RX_MSDU_END_5_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_5_LAST_MSDU_MASK, \ + RX_MSDU_END_5_LAST_MSDU_LSB)) + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_1_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_1_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_1_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_TO_DS_OFFSET)), \ + RX_MPDU_INFO_2_TO_DS_MASK, \ + RX_MPDU_INFO_2_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FR_DS_OFFSET)), \ + RX_MPDU_INFO_2_FR_DS_MASK, \ + RX_MPDU_INFO_2_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_QOS_CONTROL_VALID_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_16_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_16_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + (uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + (uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + (uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + (uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_2, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + reg_val &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_FRAGMENT_DEST_RING_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK | \ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring) | \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR((msdu_details_ptr), \ +UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_14_FLOW_IDX_MASK, \ + RX_MSDU_END_14_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_5_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_5_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_15_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_15_FSE_METADATA_MASK, \ + RX_MSDU_END_15_FSE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_16_CCE_METADATA_MASK, \ + RX_MSDU_END_16_CCE_METADATA_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_LSB)) + +/* + * hal_rx_msdu_start_nss_get_8074v2(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t hal_rx_msdu_start_nss_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); +} + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_8074v2(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_8074v2(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) +static uint32_t hal_get_link_desc_size_8074v2(void) +{ + return LINK_DESC_SIZE; +} + +/* + * hal_rx_get_tlv_8074v2(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_8074v2(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +} + +#ifndef QCA_WIFI_QCA6018 +#define HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, evm, pilot) \ + (ppdu_info)->evm_info.pilot_evm[pilot] = HAL_RX_GET(rx_tlv, \ + PHYRX_OTHER_RECEIVE_INFO, \ + SU_EVM_DETAILS_##evm##_PILOT_##pilot##_EVM) + +static inline void +hal_rx_update_su_evm_info(void *rx_tlv, + void *ppdu_info_hdl) +{ + struct hal_rx_ppdu_info *ppdu_info = + (struct hal_rx_ppdu_info *)ppdu_info_hdl; + + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 1, 0); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 2, 1); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 3, 2); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 4, 3); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 5, 4); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 6, 5); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 7, 6); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 8, 7); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 9, 8); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 10, 9); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 11, 10); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 12, 11); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 13, 12); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 14, 13); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 15, 14); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 16, 15); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 17, 16); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 18, 17); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 19, 18); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 20, 19); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 21, 20); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 22, 21); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 23, 22); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 24, 23); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 25, 24); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 26, 25); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 27, 26); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 28, 27); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 29, 28); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 30, 29); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 31, 30); + HAL_RX_UPDATE_SU_EVM_INFO(rx_tlv, ppdu_info, 32, 31); +} +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_8074v2() + * -process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static +void hal_rx_proc_phyrx_other_receive_info_tlv_8074v2(void *rx_tlv_hdr, + void *ppdu_info_hdl) +{ + uint16_t tlv_tag; + void *rx_tlv; + struct hal_rx_ppdu_info *ppdu_info = ppdu_info_hdl; + + /* Skip TLV_HDR for OTHER_RECEIVE_INFO and follows the + * embedded TLVs inside + */ + rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv); + + switch (tlv_tag) { + case WIFIPHYRX_OTHER_RECEIVE_INFO_SU_EVM_DETAILS_E: + + /* Skip TLV length to get TLV content */ + rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE; + + ppdu_info->evm_info.number_of_symbols = HAL_RX_GET(rx_tlv, + PHYRX_OTHER_RECEIVE_INFO, + SU_EVM_DETAILS_0_NUMBER_OF_SYMBOLS); + ppdu_info->evm_info.pilot_count = HAL_RX_GET(rx_tlv, + PHYRX_OTHER_RECEIVE_INFO, + SU_EVM_DETAILS_0_PILOT_COUNT); + ppdu_info->evm_info.nss_count = HAL_RX_GET(rx_tlv, + PHYRX_OTHER_RECEIVE_INFO, + SU_EVM_DETAILS_0_NSS_COUNT); + hal_rx_update_su_evm_info(rx_tlv, ppdu_info_hdl); + break; + } +} +#else +static inline +void hal_rx_proc_phyrx_other_receive_info_tlv_8074v2(void *rx_tlv_hdr, + void *ppdu_info_hdl) +{ +} +#endif + +#if defined(QCA_WIFI_QCA6018) && defined(WLAN_CFR_ENABLE) && \ + defined(WLAN_ENH_CFR_ENABLE) +static inline +void hal_rx_get_bb_info_8074v2(void *rx_tlv, + void *ppdu_info_hdl) +{ + struct hal_rx_ppdu_info *ppdu_info = ppdu_info_hdl; + + ppdu_info->cfr_info.bb_captured_channel = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_3, BB_CAPTURED_CHANNEL); + + ppdu_info->cfr_info.bb_captured_timeout = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_3, BB_CAPTURED_TIMEOUT); + + ppdu_info->cfr_info.bb_captured_reason = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_3, BB_CAPTURED_REASON); +} + +static inline +void hal_rx_get_rtt_info_8074v2(void *rx_tlv, + void *ppdu_info_hdl) +{ + struct hal_rx_ppdu_info *ppdu_info = ppdu_info_hdl; + + ppdu_info->cfr_info.rx_location_info_valid = + HAL_RX_GET(rx_tlv, PHYRX_PKT_END_13_RX_PKT_END_DETAILS, + RX_LOCATION_INFO_DETAILS_RX_LOCATION_INFO_VALID); + + ppdu_info->cfr_info.rtt_che_buffer_pointer_low32 = + HAL_RX_GET(rx_tlv, + PHYRX_PKT_END_12_RX_PKT_END_DETAILS_RX_LOCATION_INFO_DETAILS, + RTT_CHE_BUFFER_POINTER_LOW32); + + ppdu_info->cfr_info.rtt_che_buffer_pointer_high8 = + HAL_RX_GET(rx_tlv, + PHYRX_PKT_END_11_RX_PKT_END_DETAILS_RX_LOCATION_INFO_DETAILS, + RTT_CHE_BUFFER_POINTER_HIGH8); + + ppdu_info->cfr_info.chan_capture_status = + HAL_RX_GET(rx_tlv, + PHYRX_PKT_END_13_RX_PKT_END_DETAILS_RX_LOCATION_INFO_DETAILS, + RESERVED_8); +} +#endif + +/** + * hal_rx_dump_msdu_start_tlv_8074v2() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_8074v2(void *msdustart, + uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_start tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "msdu_length: %d " + "ipsec_esp: %d " + "l3_offset: %d " + "ipsec_ah: %d " + "l4_offset: %d " + "msdu_number: %d " + "decap_format: %d " + "ipv4_proto: %d " + "ipv6_proto: %d " + "tcp_proto: %d " + "udp_proto: %d " + "ip_frag: %d " + "tcp_only_ack: %d " + "da_is_bcast_mcast: %d " + "ip4_protocol_ip6_next_header: %d " + "toeplitz_hash_2_or_4: %d " + "flow_id_toeplitz: %d " + "user_rssi: %d " + "pkt_type: %d " + "stbc: %d " + "sgi: %d " + "rate_mcs: %d " + "receive_bandwidth: %d " + "reception_type: %d " + "ppdu_start_timestamp: %d " + "sw_phy_meta_data: %d ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, + msdu_start->ppdu_start_timestamp, + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_8074v2: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_8074v2(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "ip_hdr_chksum: %d " + "tcp_udp_chksum: %d " + "key_id_octet: %d " + "cce_super_rule: %d " + "cce_classify_not_done_truncat: %d " + "cce_classify_not_done_cce_dis: %d " + "ext_wapi_pn_63_48: %d " + "ext_wapi_pn_95_64: %d " + "ext_wapi_pn_127_96: %d " + "reported_mpdu_length: %d " + "first_msdu: %d " + "last_msdu: %d " + "sa_idx_timeout: %d " + "da_idx_timeout: %d " + "msdu_limit_error: %d " + "flow_idx_timeout: %d " + "flow_idx_invalid: %d " + "wifi_parser_error: %d " + "amsdu_parser_error: %d " + "sa_is_valid: %d " + "da_is_valid: %d " + "da_is_mcbc: %d " + "l3_header_padding: %d " + "ipv6_options_crc: %d " + "tcp_seq_number: %d " + "tcp_ack_number: %d " + "tcp_flag: %d " + "lro_eligible: %d " + "window_size: %d " + "da_offset: %d " + "sa_offset: %d " + "da_offset_valid: %d " + "sa_offset_valid: %d " + "rule_indication_31_0: %d " + "rule_indication_63_32: %d " + "sa_idx: %d " + "msdu_drop: %d " + "reo_destination_indication: %d " + "flow_idx: %d " + "fse_metadata: %d " + "cce_metadata: %d " + "sa_sw_peer_id: %d ", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->ext_wapi_pn_63_48, + msdu_end->ext_wapi_pn_95_64, + msdu_end->ext_wapi_pn_127_96, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error, + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_3_TID_OFFSET)), \ + RX_MPDU_INFO_3_TID_MASK, \ + RX_MPDU_INFO_3_TID_LSB)) + +static uint32_t hal_rx_mpdu_start_tid_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static uint32_t hal_rx_msdu_start_reception_type_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +/* RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_OFFSET */ +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_MASK, \ + RX_MSDU_END_13_DA_IDX_OR_SW_PEER_ID_LSB)) + /** + * hal_rx_msdu_end_da_idx_get_8074v2: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_8074v2(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..4cde5a5d1cae430c7aee13d1c2021b9520c1bd17 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qca8074v2/hal_8074v2_tx.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_8074v2() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ + +static void hal_tx_desc_set_dscp_tid_table_id_8074v2(void *desc, uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM, id); +} + + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE / 4) +#define HAL_TX_NUM_DSCP_REGISTER_SIZE 32 +/** + * hal_tx_set_dscp_tid_map_8074v2() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0,1 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in two set of mapping registers DSCP_TID1_MAP_<0 to 6> (id = 0) + * and DSCP_TID2_MAP_<0 to 6> (id = 1) + * Each mapping register has TID mapping for 10 DSCP values + * + * Return: none + */ + +static void hal_tx_set_dscp_tid_map_8074v2(struct hal_soc *soc, + uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + if (id >= HAL_MAX_HW_DSCP_TID_V2_MAPS) + return; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i + 1] << 0x3) | + (map[i + 2] << 0x6) | + (map[i + 3] << 0x9) | + (map[i + 4] << 0xc) | + (map[i + 5] << 0xf) | + (map[i + 6] << 0x12) | + (map[i + 7] << 0x15)); + + qdf_mem_copy(&val[cnt], &value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_update_dscp_tid_8074v2() - Update the dscp tid map table as + updated by user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static void hal_tx_update_dscp_tid_8074v2(struct hal_soc *soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + uint32_t addr, addr1, cmn_reg_addr; + uint32_t start_value = 0, end_value = 0; + uint32_t regval; + uint8_t end_bits = 0; + uint8_t start_bits = 0; + uint32_t start_index, end_index; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + start_index = dscp * HAL_TX_BITS_PER_TID; + end_index = (start_index + (HAL_TX_BITS_PER_TID - 1)) + % HAL_TX_NUM_DSCP_REGISTER_SIZE; + start_index = start_index % HAL_TX_NUM_DSCP_REGISTER_SIZE; + addr += (4 * ((dscp * HAL_TX_BITS_PER_TID) / + HAL_TX_NUM_DSCP_REGISTER_SIZE)); + + if (end_index < start_index) { + end_bits = end_index + 1; + start_bits = HAL_TX_BITS_PER_TID - end_bits; + start_value = tid << start_index; + end_value = tid >> start_bits; + addr1 = addr + 4; + } else { + start_bits = HAL_TX_BITS_PER_TID - end_bits; + start_value = tid << start_index; + addr1 = 0; + } + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + regval = HAL_REG_READ(soc, addr); + + if (end_index < start_index) + regval &= (~0) >> start_bits; + else + regval &= ~(7 << start_index); + + regval |= start_value; + + HAL_REG_WRITE(soc, addr, (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + + if (addr1) { + regval = HAL_REG_READ(soc, addr1); + regval &= (~0) << end_bits; + regval |= end_value; + + HAL_REG_WRITE(soc, addr1, (regval & + HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_8074v2(void *desc, uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000.c new file mode 100644 index 0000000000000000000000000000000000000000..41272e9b08177f644015efec91fa9fd0a7ad9001 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000.c @@ -0,0 +1,1927 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_OFFSET \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_OFFSET +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_MASK \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_MASK +#define UNIFIED_RXPCU_PPDU_END_INFO_8_RX_PPDU_DURATION_LSB \ + RXPCU_PPDU_END_INFO_9_RX_PPDU_DURATION_LSB +#define UNIFIED_PHYRX_HT_SIG_0_HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS_OFFSET \ + PHYRX_HT_SIG_0_PHYRX_HT_SIG_INFO_DETAILS_MCS_OFFSET +#define UNIFIED_PHYRX_L_SIG_B_0_L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_B_0_PHYRX_L_SIG_B_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_L_SIG_A_0_L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_L_SIG_A_0_PHYRX_L_SIG_A_INFO_DETAILS_RATE_OFFSET +#define UNIFIED_PHYRX_VHT_SIG_A_0_VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS_OFFSET \ + PHYRX_VHT_SIG_A_0_PHYRX_VHT_SIG_A_INFO_DETAILS_BANDWIDTH_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_SU_0_HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_SU_0_PHYRX_HE_SIG_A_SU_INFO_DETAILS_FORMAT_INDICATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_A_MU_DL_0_HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_A_MU_DL_0_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS_DL_UL_FLAG_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B1_MU_0_HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B1_MU_0_PHYRX_HE_SIG_B1_MU_INFO_DETAILS_RU_ALLOCATION_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_MU_0_HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_MU_0_PHYRX_HE_SIG_B2_MU_INFO_DETAILS_STA_ID_OFFSET +#define UNIFIED_PHYRX_HE_SIG_B2_OFDMA_0_HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_OFFSET \ + PHYRX_HE_SIG_B2_OFDMA_0_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS_STA_ID_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_3_RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_3_PRE_RSSI_INFO_DETAILS_RSSI_PRI20_CHAIN0_OFFSET +#define UNIFIED_PHYRX_RSSI_LEGACY_19_RECEIVE_RSSI_INFO_PREAMBLE_RSSI_INFO_DETAILS_OFFSET \ + PHYRX_RSSI_LEGACY_19_PREAMBLE_RSSI_INFO_DETAILS_RSSI_PRI20_CHAIN0_OFFSET +#define UNIFIED_RX_MPDU_START_0_RX_MPDU_INFO_RX_MPDU_INFO_DETAILS_OFFSET \ + RX_MPDU_START_9_RX_MPDU_INFO_DETAILS_RXPCU_MPDU_FILTER_IN_CATEGORY_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET +#define UNIFIED_RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET +#define UNIFIED_REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER \ + STATUS_HEADER_REO_STATUS_NUMBER +#define UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC \ + STATUS_HEADER_TIMESTAMP +#define UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET +#define UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_0_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_0_BUF_ADDR_INFO_BUFFER_ADDR_31_0_OFFSET +#define UNIFIED_TCL_DATA_CMD_1_BUFFER_ADDR_INFO_BUF_ADDR_INFO_OFFSET \ + TCL_DATA_CMD_1_BUF_ADDR_INFO_BUFFER_ADDR_39_32_OFFSET +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_OFFSET +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB +#define UNIFIED_BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB +#define UNIFIED_BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_LSB +#define UNIFIED_TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK \ + TCL_DATA_CMD_2_BUF_OR_EXT_DESC_TYPE_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_MASK \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_MASK +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_OFFSET \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_OFFSET +#define UNIFIED_WBM_RELEASE_RING_6_TX_RATE_STATS_INFO_TX_RATE_STATS_LSB \ + WBM_RELEASE_RING_6_TX_RATE_STATS_PPDU_TRANSMISSION_TSF_LSB + +#define CE_WINDOW_ADDRESS_9000 \ + ((CE_WFSS_CE_REG_BASE >> WINDOW_SHIFT) & WINDOW_VALUE_MASK) + +#define UMAC_WINDOW_ADDRESS_9000 \ + ((SEQ_WCSS_UMAC_OFFSET >> WINDOW_SHIFT) & WINDOW_VALUE_MASK) + +#define WINDOW_CONFIGURATION_VALUE_9000 \ + ((CE_WINDOW_ADDRESS_9000 << 6) |\ + (UMAC_WINDOW_ADDRESS_9000 << 12) | \ + WINDOW_ENABLE_BIT) + +#include +#include +#include +#include + +/** + * hal_rx_msdu_start_nss_get_9000(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ +static uint32_t hal_rx_msdu_start_nss_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); +} + +/** + * hal_rx_mon_hw_desc_get_mpdu_status_9000(): Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static void hal_rx_mon_hw_desc_get_mpdu_status_9000(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + const uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ +} + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) +/** + * hal_get_link_desc_size_9000(): API to get the link desc size + * + * Return: uint32_t + */ +static uint32_t hal_get_link_desc_size_9000(void) +{ + return LINK_DESC_SIZE; +} + +/** + * hal_rx_get_tlv_9000(): API to get the tlv + * + * @rx_tlv: TLV data extracted from the rx packet + * Return: uint8_t + */ +static uint8_t hal_rx_get_tlv_9000(void *rx_tlv) +{ + return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +} + +/** + * hal_rx_proc_phyrx_other_receive_info_tlv_9000(): API to get tlv info + * + * Return: uint32_t + */ +static inline +void hal_rx_proc_phyrx_other_receive_info_tlv_9000(void *rx_tlv_hdr, + void *ppdu_info_hdl) +{ +} + +/** + * hal_rx_dump_msdu_start_tlv_9000() : dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv_9000(void *msdustart, + uint8_t dbg_level) +{ + struct rx_msdu_start *msdu_start = (struct rx_msdu_start *)msdustart; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_start tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "msdu_length: %d " + "ipsec_esp: %d " + "l3_offset: %d " + "ipsec_ah: %d " + "l4_offset: %d " + "msdu_number: %d " + "decap_format: %d " + "ipv4_proto: %d " + "ipv6_proto: %d " + "tcp_proto: %d " + "udp_proto: %d " + "ip_frag: %d " + "tcp_only_ack: %d " + "da_is_bcast_mcast: %d " + "ip4_protocol_ip6_next_header: %d " + "toeplitz_hash_2_or_4: %d " + "flow_id_toeplitz: %d " + "user_rssi: %d " + "pkt_type: %d " + "stbc: %d " + "sgi: %d " + "rate_mcs: %d " + "receive_bandwidth: %d " + "reception_type: %d " + "ppdu_start_timestamp: %d " + "sw_phy_meta_data: %d ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, + msdu_start->ppdu_start_timestamp, + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv_9000: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_end_tlv_9000(void *msduend, + uint8_t dbg_level) +{ + struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend; + + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "ip_hdr_chksum: %d " + "reported_mpdu_length: %d " + "key_id_octet: %d " + "cce_super_rule: %d " + "cce_classify_not_done_truncat: %d " + "cce_classify_not_done_cce_dis: %d " + "rule_indication_31_0: %d " + "rule_indication_63_32: %d " + "da_offset: %d " + "sa_offset: %d " + "da_offset_valid: %d " + "sa_offset_valid: %d " + "ipv6_options_crc: %d " + "tcp_seq_number: %d " + "tcp_ack_number: %d " + "tcp_flag: %d " + "lro_eligible: %d " + "window_size: %d " + "tcp_udp_chksum: %d " + "sa_idx_timeout: %d " + "da_idx_timeout: %d " + "msdu_limit_error: %d " + "flow_idx_timeout: %d " + "flow_idx_invalid: %d " + "wifi_parser_error: %d " + "amsdu_parser_error: %d " + "sa_is_valid: %d " + "da_is_valid: %d " + "da_is_mcbc: %d " + "l3_header_padding: %d " + "first_msdu: %d " + "last_msdu: %d " + "sa_idx: %d " + "msdu_drop: %d " + "reo_destination_indication: %d " + "flow_idx: %d " + "fse_metadata: %d " + "cce_metadata: %d " + "sa_sw_peer_id: %d ", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->reported_mpdu_length, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->tcp_udp_chksum, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error, + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + +/** + * hal_rx_mpdu_start_tid_get_9000(): API to get tid + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(tid value) + */ +static uint32_t hal_rx_mpdu_start_tid_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET(&mpdu_start->rx_mpdu_info_details); + + return tid; +} + +/** + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static uint32_t hal_rx_msdu_start_reception_type_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + + /** + * hal_rx_msdu_end_da_idx_get_9000: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static uint16_t hal_rx_msdu_end_da_idx_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} + +/** + * hal_rx_get_rx_fragment_number_9000(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static +uint8_t hal_rx_get_rx_fragment_number_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + /* Return first 4 bits as fragment number */ + return (HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK); +} + +/** + * hal_rx_msdu_end_da_is_mcbc_get_9000(): API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static uint8_t +hal_rx_msdu_end_da_is_mcbc_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); +} + +/** + * hal_rx_msdu_end_sa_is_valid_get_9000(): API to get_9000 the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static uint8_t +hal_rx_msdu_end_sa_is_valid_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +/** + * hal_rx_msdu_end_sa_idx_get_9000(): API to get_9000 the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static uint16_t hal_rx_msdu_end_sa_idx_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +/** + * hal_rx_desc_is_first_msdu_9000() - Check if first msdu + * + * @hal_soc_hdl: hal_soc handle + * @hw_desc_addr: hardware descriptor address + * + * Return: 0 - success/ non-zero failure + */ +static uint32_t hal_rx_desc_is_first_msdu_9000(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_10, FIRST_MSDU); +} + +/** + * hal_rx_msdu_end_l3_hdr_padding_get_9000(): API to get_9000 the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static uint32_t hal_rx_msdu_end_l3_hdr_padding_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +/** + * @ hal_rx_encryption_info_valid_9000: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +inline uint32_t hal_rx_encryption_info_valid_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn_9000: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static void hal_rx_print_pn_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + + hal_debug("PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/** + * hal_rx_msdu_end_first_msdu_get_9000: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static uint8_t hal_rx_msdu_end_first_msdu_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +/** + * hal_rx_msdu_end_da_is_valid_get_9000: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static uint8_t hal_rx_msdu_end_da_is_valid_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +/** + * hal_rx_msdu_end_last_msdu_get_9000: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static uint8_t hal_rx_msdu_end_last_msdu_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} + +/* + * hal_rx_get_mpdu_mac_ad4_valid(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +inline bool hal_rx_get_mpdu_mac_ad4_valid_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(rx_mpdu_info); + + return ad4_valid; +} + +/** + * hal_rx_mpdu_start_sw_peer_id_get_9000: Retrieve sw peer_id + * @buf: network buffer + * + * Return: sw peer_id + */ +static uint32_t hal_rx_mpdu_start_sw_peer_id_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/* + * hal_rx_mpdu_get_to_ds_9000(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ +static uint32_t hal_rx_mpdu_get_to_ds_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_TODS(mpdu_info); +} + +/* + * hal_rx_mpdu_get_fr_ds_9000(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ +static uint32_t hal_rx_mpdu_get_fr_ds_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + return HAL_RX_MPDU_GET_FROMDS(mpdu_info); +} + +/* + * hal_rx_get_mpdu_frame_control_valid_9000(): Retrieves mpdu + * frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static uint8_t hal_rx_get_mpdu_frame_control_valid_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); +} + +/* + * hal_rx_mpdu_get_addr1_9000(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr1_9000(uint8_t *buf, + uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2_9000(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr2_9000(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3_9000(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr3_9000(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4_9000(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static QDF_STATUS hal_rx_mpdu_get_addr4_9000(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_get_mpdu_sequence_control_valid_9000(): Get mpdu + * sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static uint8_t hal_rx_get_mpdu_sequence_control_valid_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); +} + +/** + * hal_rx_is_unicast_9000: check packet is unicast frame or not. + * + * @ buf: pointer to rx pkt TLV. + * + * Return: true on unicast. + */ +static bool hal_rx_is_unicast_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t grp_id; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + + grp_id = (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_OFFSET)), + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_MASK, + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_LSB)); + + return (HAL_MPDU_SW_FRAME_GROUP_UNICAST_DATA == grp_id) ? true : false; +} + +/** + * hal_rx_tid_get_9000: get tid based on qos control valid. + * @hal_soc_hdl: hal soc handle + * @buf: pointer to rx pkt TLV. + * + * Return: tid + */ +static uint32_t hal_rx_tid_get_9000(hal_soc_handle_t hal_soc_hdl, uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint8_t *rx_mpdu_info = (uint8_t *)&mpdu_start->rx_mpdu_info_details; + uint8_t qos_control_valid = + (_HAL_MS((*_OFFSET_TO_WORD_PTR((rx_mpdu_info), + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_OFFSET)), + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_MASK, + RX_MPDU_INFO_11_MPDU_QOS_CONTROL_VALID_LSB)); + + if (qos_control_valid) + return hal_rx_mpdu_start_tid_get_9000(buf); + + return HAL_RX_NON_QOS_TID; +} + +/** + * hal_rx_hw_desc_get_ppduid_get_9000(): retrieve ppdu id + * @hw_desc_addr: hw addr + * + * Return: ppdu id + */ +static uint32_t hal_rx_hw_desc_get_ppduid_get_9000(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_9, PHY_PPDU_ID); +} + +/** + * hal_reo_status_get_header_9000 - Process reo desc info + * @d - Pointer to reo descriptior + * @b - tlv type info + * @h1 - Pointer to hal_reo_status_header where info to be stored + * + * Return - none. + * + */ +static void hal_reo_status_get_header_9000(uint32_t *d, int b, void *h1) +{ + uint32_t val1 = 0; + struct hal_reo_status_header *h = + (struct hal_reo_status_header *)h1; + + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_0, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->cmd_num = + HAL_GET_FIELD( + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, + val1); + h->exec_time = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + CMD_EXECUTION_TIME, val1); + h->status = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, + REO_CMD_EXECUTION_STATUS, val1); + switch (b) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + val1 = + d[HAL_OFFSET_DW(REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + val1 = d[HAL_OFFSET_DW(REO_UPDATE_RX_REO_QUEUE_STATUS_1, + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER_GENERIC)]; + break; + default: + qdf_nofl_err("ERROR: Unknown tlv\n"); + break; + } + h->tstamp = + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); +} + +/** + * hal_rx_mpdu_start_mpdu_qos_control_valid_get_9000(): + * Retrieve qos control valid bit from the tlv. + * @buf: pointer to rx pkt TLV. + * + * Return: qos control value. + */ +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + return HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET( + &mpdu_start->rx_mpdu_info_details); +} + +/** + * hal_rx_msdu_end_sa_sw_peer_id_get_9000(): API to get the + * sa_sw_peer_id from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: sa_sw_peer_id index + */ +static inline uint32_t +hal_rx_msdu_end_sa_sw_peer_id_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} + +/** + * hal_tx_desc_set_mesh_en_9000 - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline +void hal_tx_desc_set_mesh_en_9000(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_5, MESH_ENABLE, en); +} + +static +void *hal_rx_msdu0_buffer_addr_lsb_9000(void *link_desc_va) +{ + return (void *)HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va); +} + +static +void *hal_rx_msdu_desc_info_ptr_get_9000(void *msdu0) +{ + return (void *)HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0); +} + +static +void *hal_ent_mpdu_desc_info_9000(void *ent_ring_desc) +{ + return (void *)HAL_ENT_MPDU_DESC_INFO(ent_ring_desc); +} + +static +void *hal_dst_mpdu_desc_info_9000(void *dst_ring_desc) +{ + return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); +} + +static +uint8_t hal_rx_get_fc_valid_9000(uint8_t *buf) +{ + return HAL_RX_GET_FC_VALID(buf); +} + +static uint8_t hal_rx_get_to_ds_flag_9000(uint8_t *buf) +{ + return HAL_RX_GET_TO_DS_FLAG(buf); +} + +static uint8_t hal_rx_get_mac_addr2_valid_9000(uint8_t *buf) +{ + return HAL_RX_GET_MAC_ADDR2_VALID(buf); +} + +static uint8_t hal_rx_get_filter_category_9000(uint8_t *buf) +{ + return HAL_RX_GET_FILTER_CATEGORY(buf); +} + +static uint32_t +hal_rx_get_ppdu_id_9000(uint8_t *buf) +{ + return HAL_RX_GET_PPDU_ID(buf); +} + +/** + * hal_reo_config_9000(): Set reo config parameters + * @soc: hal soc handle + * @reg_val: value to be set + * @reo_params: reo parameters + * + * Return: void + */ +static void +hal_reo_config_9000(struct hal_soc *soc, + uint32_t reg_val, + struct hal_reo_params *reo_params) +{ + HAL_REO_R0_CONFIG(soc, reg_val, reo_params); +} + +/** + * hal_rx_msdu_desc_info_get_ptr_9000() - Get msdu desc info ptr + * @msdu_details_ptr - Pointer to msdu_details_ptr + * + * Return - Pointer to rx_msdu_desc_info structure. + * + */ +static void *hal_rx_msdu_desc_info_get_ptr_9000(void *msdu_details_ptr) +{ + return HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr); +} + +/** + * hal_rx_link_desc_msdu0_ptr_9000 - Get pointer to rx_msdu details + * @link_desc - Pointer to link desc + * + * Return - Pointer to rx_msdu_details structure + * + */ +static void *hal_rx_link_desc_msdu0_ptr_9000(void *link_desc) +{ + return HAL_RX_LINK_DESC_MSDU0_PTR(link_desc); +} + +/** + * hal_rx_msdu_flow_idx_get_9000: API to get flow index + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index value from MSDU END TLV + */ +static inline uint32_t hal_rx_msdu_flow_idx_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_invalid_9000: API to get flow index invalid + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index invalid value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_invalid_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); +} + +/** + * hal_rx_msdu_flow_idx_timeout_9000: API to get flow index timeout + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: flow index timeout value from MSDU END TLV + */ +static bool hal_rx_msdu_flow_idx_timeout_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); +} + +/** + * hal_rx_msdu_fse_metadata_get_9000: API to get FSE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: fse metadata value from MSDU END TLV + */ +static uint32_t hal_rx_msdu_fse_metadata_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_FSE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_cce_metadata_get_9000: API to get CCE metadata + * from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * + * Return: cce_metadata + */ +static uint16_t +hal_rx_msdu_cce_metadata_get_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end); +} + +/** + * hal_rx_msdu_get_flow_params_9000: API to get flow index, flow index invalid + * and flow index timeout from rx_msdu_end TLV + * @buf: pointer to the start of RX PKT TLV headers + * @flow_invalid: pointer to return value of flow_idx_valid + * @flow_timeout: pointer to return value of flow_idx_timeout + * @flow_index: pointer to return value of flow_idx + * + * Return: none + */ +static inline void +hal_rx_msdu_get_flow_params_9000(uint8_t *buf, + bool *flow_invalid, + bool *flow_timeout, + uint32_t *flow_index) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + + *flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end); + *flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end); + *flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end); +} + +/** + * hal_rx_tlv_get_tcp_chksum_9000() - API to get tcp checksum + * @buf: rx_tlv_hdr + * + * Return: tcp checksum + */ +static uint16_t +hal_rx_tlv_get_tcp_chksum_9000(uint8_t *buf) +{ + return HAL_RX_TLV_GET_TCP_CHKSUM(buf); +} + +/** + * hal_rx_get_rx_sequence_9000(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +static +uint16_t hal_rx_get_rx_sequence_9000(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + + return HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info); +} + +/** + * hal_get_window_address_9000(): Function to get hp/tp address + * @hal_soc: Pointer to hal_soc + * @addr: address offset of register + * + * Return: modified address offset of register + */ +static inline qdf_iomem_t hal_get_window_address_9000(struct hal_soc *hal_soc, + qdf_iomem_t addr) +{ + uint32_t offset = addr - hal_soc->dev_base_addr; + qdf_iomem_t new_offset; + + /* + * If offset lies within DP register range, use 3rd window to write + * into DP region. + */ + if ((offset ^ SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) { + new_offset = (hal_soc->dev_base_addr + (3 * WINDOW_START) + + (offset & WINDOW_RANGE_MASK)); + /* + * If offset lies within CE register range, use 2nd window to write + * into CE region. + */ + } else if ((offset ^ CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) { + new_offset = (hal_soc->dev_base_addr + (2 * WINDOW_START) + + (offset & WINDOW_RANGE_MASK)); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: ERROR: Accessing Wrong register\n", __func__); + qdf_assert_always(0); + return 0; + } + return new_offset; +} + +static inline void hal_write_window_register(struct hal_soc *hal_soc) +{ + /* Write value into window configuration register */ + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS, + WINDOW_CONFIGURATION_VALUE_9000); +} + +/** + * hal_rx_msdu_packet_metadata_get_9000(): API to get the + * msdu information from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * @ hal_rx_msdu_metadata: pointer to the msdu info structure + */ +static void +hal_rx_msdu_packet_metadata_get_9000(uint8_t *buf, + void *msdu_pkt_metadata) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + struct hal_rx_msdu_metadata *msdu_metadata = + (struct hal_rx_msdu_metadata *)msdu_pkt_metadata; + + msdu_metadata->l3_hdr_pad = + HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + msdu_metadata->sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + msdu_metadata->da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + msdu_metadata->sa_sw_peer_id = + HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); +} + +struct hal_hw_txrx_ops qcn9000_hal_hw_txrx_ops = { + + /* init and setup */ + hal_srng_dst_hw_init_generic, + hal_srng_src_hw_init_generic, + hal_get_hw_hptp_generic, + hal_reo_setup_generic, + hal_setup_link_idle_list_generic, + hal_get_window_address_9000, + NULL, + + /* tx */ + hal_tx_desc_set_dscp_tid_table_id_9000, + hal_tx_set_dscp_tid_map_9000, + hal_tx_update_dscp_tid_9000, + hal_tx_desc_set_lmac_id_9000, + hal_tx_desc_set_buf_addr_generic, + hal_tx_desc_set_search_type_generic, + hal_tx_desc_set_search_index_generic, + hal_tx_desc_set_cache_set_num_generic, + hal_tx_comp_get_status_generic, + hal_tx_comp_get_release_reason_generic, + hal_get_wbm_internal_error_generic, + hal_tx_desc_set_mesh_en_9000, + + /* rx */ + hal_rx_msdu_start_nss_get_9000, + hal_rx_mon_hw_desc_get_mpdu_status_9000, + hal_rx_get_tlv_9000, + hal_rx_proc_phyrx_other_receive_info_tlv_9000, + hal_rx_dump_msdu_start_tlv_9000, + hal_rx_dump_msdu_end_tlv_9000, + hal_get_link_desc_size_9000, + hal_rx_mpdu_start_tid_get_9000, + hal_rx_msdu_start_reception_type_get_9000, + hal_rx_msdu_end_da_idx_get_9000, + hal_rx_msdu_desc_info_get_ptr_9000, + hal_rx_link_desc_msdu0_ptr_9000, + hal_reo_status_get_header_9000, + hal_rx_status_get_tlv_info_generic, + hal_rx_wbm_err_info_get_generic, + hal_rx_dump_mpdu_start_tlv_generic, + + hal_tx_set_pcp_tid_map_generic, + hal_tx_update_pcp_tid_generic, + hal_tx_update_tidmap_prty_generic, + hal_rx_get_rx_fragment_number_9000, + hal_rx_msdu_end_da_is_mcbc_get_9000, + hal_rx_msdu_end_sa_is_valid_get_9000, + hal_rx_msdu_end_sa_idx_get_9000, + hal_rx_desc_is_first_msdu_9000, + hal_rx_msdu_end_l3_hdr_padding_get_9000, + hal_rx_encryption_info_valid_9000, + hal_rx_print_pn_9000, + hal_rx_msdu_end_first_msdu_get_9000, + hal_rx_msdu_end_da_is_valid_get_9000, + hal_rx_msdu_end_last_msdu_get_9000, + hal_rx_get_mpdu_mac_ad4_valid_9000, + hal_rx_mpdu_start_sw_peer_id_get_9000, + hal_rx_mpdu_get_to_ds_9000, + hal_rx_mpdu_get_fr_ds_9000, + hal_rx_get_mpdu_frame_control_valid_9000, + hal_rx_mpdu_get_addr1_9000, + hal_rx_mpdu_get_addr2_9000, + hal_rx_mpdu_get_addr3_9000, + hal_rx_mpdu_get_addr4_9000, + hal_rx_get_mpdu_sequence_control_valid_9000, + hal_rx_is_unicast_9000, + hal_rx_tid_get_9000, + hal_rx_hw_desc_get_ppduid_get_9000, + hal_rx_mpdu_start_mpdu_qos_control_valid_get_9000, + hal_rx_msdu_end_sa_sw_peer_id_get_9000, + hal_rx_msdu0_buffer_addr_lsb_9000, + hal_rx_msdu_desc_info_ptr_get_9000, + hal_ent_mpdu_desc_info_9000, + hal_dst_mpdu_desc_info_9000, + hal_rx_get_fc_valid_9000, + hal_rx_get_to_ds_flag_9000, + hal_rx_get_mac_addr2_valid_9000, + hal_rx_get_filter_category_9000, + hal_rx_get_ppdu_id_9000, + hal_reo_config_9000, + hal_rx_msdu_flow_idx_get_9000, + hal_rx_msdu_flow_idx_invalid_9000, + hal_rx_msdu_flow_idx_timeout_9000, + hal_rx_msdu_fse_metadata_get_9000, + hal_rx_msdu_cce_metadata_get_9000, + hal_rx_msdu_get_flow_params_9000, + hal_rx_tlv_get_tcp_chksum_9000, + hal_rx_get_rx_sequence_9000, + NULL, + NULL, + /* rx - msdu fast path info fields */ + hal_rx_msdu_packet_metadata_get_9000, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, +}; + +struct hal_hw_srng_config hw_srng_table_9000[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + .max_size = + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CREDIT_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_SW2TCL_CREDIT_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + .max_size = + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported + */ + .reg_size = {}, + .max_size = + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .max_size = + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + /* one ring for spectral and one ring for cfr */ + .max_rings = 2, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + .max_size = HAL_RXDMA_MAX_RING_SIZE, + }, +#endif +}; + +int32_t hal_hw_reg_offset_qcn9000[] = { + /* dst */ + REG_OFFSET(DST, HP), + REG_OFFSET(DST, TP), + REG_OFFSET(DST, ID), + REG_OFFSET(DST, MISC), + REG_OFFSET(DST, HP_ADDR_LSB), + REG_OFFSET(DST, HP_ADDR_MSB), + REG_OFFSET(DST, MSI1_BASE_LSB), + REG_OFFSET(DST, MSI1_BASE_MSB), + REG_OFFSET(DST, MSI1_DATA), + REG_OFFSET(DST, BASE_LSB), + REG_OFFSET(DST, BASE_MSB), + REG_OFFSET(DST, PRODUCER_INT_SETUP), + /* src */ + REG_OFFSET(SRC, HP), + REG_OFFSET(SRC, TP), + REG_OFFSET(SRC, ID), + REG_OFFSET(SRC, MISC), + REG_OFFSET(SRC, TP_ADDR_LSB), + REG_OFFSET(SRC, TP_ADDR_MSB), + REG_OFFSET(SRC, MSI1_BASE_LSB), + REG_OFFSET(SRC, MSI1_BASE_MSB), + REG_OFFSET(SRC, MSI1_DATA), + REG_OFFSET(SRC, BASE_LSB), + REG_OFFSET(SRC, BASE_MSB), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX0), + REG_OFFSET(SRC, CONSUMER_INT_SETUP_IX1), +}; + +/** + * hal_qcn9000_attach()- Attach 9000 target specific hal_soc ops, + * offset and srng table + * Return: void + */ +void hal_qcn9000_attach(struct hal_soc *hal_soc) +{ + hal_soc->hw_srng_table = hw_srng_table_9000; + hal_soc->hal_hw_reg_offset = hal_hw_reg_offset_qcn9000; + hal_soc->ops = &qcn9000_hal_hw_txrx_ops; + if (hal_soc->static_window_map) + hal_write_window_register(hal_soc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000_rx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..45528561301e4d30ccaa86511db482864ee714f7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000_rx.h @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define HAL_RX_MSDU0_BUFFER_ADDR_LSB(link_desc_va) \ + ((uint8_t *)(link_desc_va) + \ + RX_MSDU_LINK_8_MSDU_0_BUFFER_ADDR_INFO_DETAILS_BUFFER_ADDR_31_0_OFFSET) + +#define HAL_RX_MSDU_DESC_INFO_PTR_GET(msdu0) \ + ((uint8_t *)(msdu0) + \ + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_FIRST_MSDU_IN_MPDU_FLAG_OFFSET) + +#define HAL_ENT_MPDU_DESC_INFO(ent_ring_desc) \ + ((uint8_t *)(ent_ring_desc) + \ + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET) + +#define HAL_DST_MPDU_DESC_INFO(dst_ring_desc) \ + ((uint8_t *)(dst_ring_desc) + \ + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_DETAILS_MSDU_COUNT_OFFSET) + +#define HAL_RX_GET_FC_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MPDU_FRAME_CONTROL_VALID) + +#define HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, TO_DS) + +#define HAL_RX_GET_MAC_ADDR1_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MAC_ADDR_AD1_VALID) + +#define HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_11, MAC_ADDR_AD2_VALID) + +#define HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, RXPCU_MPDU_FILTER_IN_CATEGORY) + +#define HAL_RX_GET_PPDU_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, PHY_PPDU_ID) + +#define HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start) \ + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_9, SW_FRAME_GROUP_ID) + +#define HAL_REO_R0_CONFIG(soc, reg_val, reo_params) \ + do { \ + reg_val &= \ + ~(HWIO_REO_R0_GENERAL_ENABLE_FRAGMENT_DEST_RING_BMSK |\ + HWIO_REO_R0_GENERAL_ENABLE_AGING_LIST_ENABLE_BMSK | \ + HWIO_REO_R0_GENERAL_ENABLE_AGING_FLUSH_ENABLE_BMSK); \ + reg_val |= \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + FRAGMENT_DEST_RING, \ + (reo_params)->frag_dst_ring) | \ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_LIST_ENABLE, 1) |\ + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, \ + AGING_FLUSH_ENABLE, 1);\ + HAL_REG_WRITE((soc), \ + HWIO_REO_R0_GENERAL_ENABLE_ADDR( \ + SEQ_WCSS_UMAC_REO_REG_OFFSET), \ + (reg_val)); \ + } while (0) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR((msdu_details_ptr), \ + UNIFIED_RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET)) + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + UNIFIED_RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_10_TCP_UDP_CHKSUM_LSB)) + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_10_FIRST_MSDU_MASK, \ + RX_MSDU_END_10_FIRST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_10_LAST_MSDU_MASK, \ + RX_MSDU_END_10_LAST_MSDU_LSB)) + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_10_SA_IS_VALID_MASK, \ + RX_MSDU_END_10_SA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_10_DA_IS_VALID_MASK, \ + RX_MSDU_END_10_DA_IS_VALID_LSB)) + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_10_DA_IS_MCBC_MASK, \ + RX_MSDU_END_10_DA_IS_MCBC_LSB)) + +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_10_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_10_L3_HEADER_PADDING_LSB)) + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_11_SA_IDX_OFFSET)), \ + RX_MSDU_END_11_SA_IDX_MASK, \ + RX_MSDU_END_11_SA_IDX_LSB)) + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_14_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_14_SA_SW_PEER_ID_LSB)) + +#define HAL_RX_MSDU_END_CCE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_14_CCE_METADATA_OFFSET)), \ + RX_MSDU_END_14_CCE_METADATA_MASK, \ + RX_MSDU_END_14_CCE_METADATA_LSB)) + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_MASK, \ + RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_SW_FRAME_GROUP_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_OFFSET)), \ + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_MASK, \ + RX_MPDU_INFO_9_SW_FRAME_GROUP_ID_LSB)) \ + +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_10_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_10_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_10_SW_PEER_ID_LSB)) + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_FRAME_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_11_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_11_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_FR_DS_OFFSET)), \ + RX_MPDU_INFO_11_FR_DS_MASK, \ + RX_MPDU_INFO_11_FR_DS_LSB)) + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_TO_DS_OFFSET)), \ + RX_MPDU_INFO_11_TO_DS_MASK, \ + RX_MPDU_INFO_11_TO_DS_LSB)) + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_11_MPDU_SEQUENCE_NUMBER_LSB)) + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_3_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_3_PN_31_0_MASK, \ + RX_MPDU_INFO_3_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_4_PN_63_32_MASK, \ + RX_MPDU_INFO_4_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_5_PN_95_64_MASK, \ + RX_MPDU_INFO_5_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_6_PN_127_96_MASK, \ + RX_MPDU_INFO_6_PN_127_96_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_OFFSET)), \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_MASK, \ + RX_MSDU_END_10_FLOW_IDX_TIMEOUT_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_10_FLOW_IDX_INVALID_OFFSET)), \ + RX_MSDU_END_10_FLOW_IDX_INVALID_MASK, \ + RX_MSDU_END_10_FLOW_IDX_INVALID_LSB)) + +#define HAL_RX_MSDU_END_FLOW_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_12_FLOW_IDX_OFFSET)), \ + RX_MSDU_END_12_FLOW_IDX_MASK, \ + RX_MSDU_END_12_FLOW_IDX_LSB)) + +#define HAL_RX_MSDU_END_FSE_METADATA_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_FSE_METADATA_OFFSET)), \ + RX_MSDU_END_13_FSE_METADATA_MASK, \ + RX_MSDU_END_13_FSE_METADATA_LSB)) + +#define HAL_RX_MPDU_GET_PHY_PPDU_ID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_9_PHY_PPDU_ID_OFFSET)), \ + RX_MPDU_INFO_9_PHY_PPDU_ID_MASK, \ + RX_MPDU_INFO_9_PHY_PPDU_ID_LSB)) \ + +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start)\ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +#ifdef GET_MSDU_AGGREGATION +#define HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs)\ +{\ + struct rx_msdu_end *rx_msdu_end;\ + bool first_msdu, last_msdu; \ + rx_msdu_end = &rx_desc->msdu_end_tlv.rx_msdu_end;\ + first_msdu = HAL_RX_GET(rx_msdu_end, RX_MSDU_END_10, FIRST_MSDU);\ + last_msdu = HAL_RX_GET(rx_msdu_end, RX_MSDU_END_10, LAST_MSDU);\ + if (first_msdu && last_msdu)\ + rs->rs_flags &= (~IEEE80211_AMSDU_FLAG);\ + else\ + rs->rs_flags |= (IEEE80211_AMSDU_FLAG); \ +} \ + +#else +#define HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs) +#endif + +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_7_TID_OFFSET)), \ + RX_MPDU_INFO_7_TID_MASK, \ + RX_MPDU_INFO_7_TID_LSB)) + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..c6c2856df85ea40c69d69e6e225807870d2530d0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/qcn9000/hal_9000_tx.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hal_hw_headers.h" +#include "hal_internal.h" +#include "cdp_txrx_mon_struct.h" +#include "qdf_trace.h" +#include "hal_rx.h" +#include "hal_tx.h" +#include "dp_types.h" +#include "hal_api_mon.h" + +/** + * hal_tx_desc_set_dscp_tid_table_id_9000() - Sets DSCP to TID conversion + * table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +static void hal_tx_desc_set_dscp_tid_table_id_9000(void *desc, uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, DSCP_TID_TABLE_NUM, id); +} + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE / 4) +#define HAL_TX_NUM_DSCP_REGISTER_SIZE 32 + +/** + * hal_tx_set_dscp_tid_map_9000() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0,1 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in two set of mapping registers DSCP_TID1_MAP_<0 to 6> (id = 0) + * and DSCP_TID2_MAP_<0 to 6> (id = 1) + * Each mapping register has TID mapping for 10 DSCP values + * + * Return: none + */ +static void hal_tx_set_dscp_tid_map_9000(struct hal_soc *soc, + uint8_t *map, uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + if (id >= HAL_MAX_HW_DSCP_TID_V2_MAPS) + return; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i + 1] << 0x3) | + (map[i + 2] << 0x6) | + (map[i + 3] << 0x9) | + (map[i + 4] << 0xc) | + (map[i + 5] << 0xf) | + (map[i + 6] << 0x12) | + (map[i + 7] << 0x15)); + + qdf_mem_copy(&val[cnt], &value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_update_dscp_tid_9000() - Update the dscp tid map table as + updated by user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static void hal_tx_update_dscp_tid_9000(struct hal_soc *soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + uint32_t addr, addr1, cmn_reg_addr; + uint32_t start_value = 0, end_value = 0; + uint32_t regval; + uint8_t end_bits = 0; + uint8_t start_bits = 0; + uint32_t start_index, end_index; + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + start_index = dscp * HAL_TX_BITS_PER_TID; + end_index = (start_index + (HAL_TX_BITS_PER_TID - 1)) + % HAL_TX_NUM_DSCP_REGISTER_SIZE; + start_index = start_index % HAL_TX_NUM_DSCP_REGISTER_SIZE; + addr += (4 * ((dscp * HAL_TX_BITS_PER_TID) / + HAL_TX_NUM_DSCP_REGISTER_SIZE)); + + if (end_index < start_index) { + end_bits = end_index + 1; + start_bits = HAL_TX_BITS_PER_TID - end_bits; + start_value = tid << start_index; + end_value = tid >> start_bits; + addr1 = addr + 4; + } else { + start_bits = HAL_TX_BITS_PER_TID - end_bits; + start_value = tid << start_index; + addr1 = 0; + } + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + regval = HAL_REG_READ(soc, addr); + + if (end_index < start_index) + regval &= (~0) >> start_bits; + else + regval &= ~(7 << start_index); + + regval |= start_value; + + HAL_REG_WRITE(soc, addr, (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + + if (addr1) { + regval = HAL_REG_READ(soc, addr1); + regval &= (~0) << end_bits; + regval |= end_value; + + HAL_REG_WRITE(soc, addr1, (regval & + HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +/** + * hal_tx_desc_set_lmac_id_9000 - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static void hal_tx_desc_set_lmac_id_9000(void *desc, uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/cfg_hif.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/cfg_hif.h new file mode 100644 index 0000000000000000000000000000000000000000..41f0fd0bb77f6984e420e751a583908f9c520022 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/cfg_hif.h @@ -0,0 +1,89 @@ +/** + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CFG_HIF_H_ +#define _CFG_HIF_H_ + +/* Min/Max/default CE status srng timer threshold */ +#define WLAN_CFG_CE_STATUS_RING_TIMER_THRESH_MIN 0 +#define WLAN_CFG_CE_STATUS_RING_TIMER_THRESH_MAX 4096 +#define WLAN_CFG_CE_STATUS_RING_TIMER_THRESH_DEFAULT 4096 + +/* Min/Max/default CE status srng batch count threshold */ +#define WLAN_CFG_CE_STATUS_RING_BATCH_COUNT_THRESH_MIN 0 +#define WLAN_CFG_CE_STATUS_RING_BATCH_COUNT_THRESH_MAX 512 +#define WLAN_CFG_CE_STATUS_RING_BATCH_COUNT_THRESH_DEFAULT 1 + +#ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG +/** + * + * ce_status_ring_timer_thresh - ce status srng timer threshold + * @Min: 0 + * @Max: 4096 + * @Default: 0 + * + * This ini specifies the timer threshold for CE status srng to + * indicate the interrupt to be fired whenever the timer threshold + * runs out. + * + * Supported Feature: interrupt threshold for CE status srng + * + * Usage: Internal + * + * + */ +#define CFG_CE_STATUS_RING_TIMER_THRESHOLD \ + CFG_INI_UINT("ce_status_ring_timer_threshold", \ + WLAN_CFG_CE_STATUS_RING_TIMER_THRESH_MIN, \ + WLAN_CFG_CE_STATUS_RING_TIMER_THRESH_MAX, \ + WLAN_CFG_CE_STATUS_RING_TIMER_THRESH_DEFAULT, \ + CFG_VALUE_OR_DEFAULT, \ + "CE Status ring timer threshold") + +/** + * + * ce_status_ring_batch_count_thresh - ce status srng batch count threshold + * @Min: 0 + * @Max: 512 + * @Default: 1 + * + * This ini specifies the batch count threshold for CE status srng to + * indicate the interrupt to be fired for a given number of packets in + * the ring. + * + * Supported Feature: interrupt threshold for CE status srng + * + * Usage: Internal + * + * + */ +#define CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD \ + CFG_INI_UINT("ce_status_ring_batch_count_threshold", \ + WLAN_CFG_CE_STATUS_RING_BATCH_COUNT_THRESH_MIN, \ + WLAN_CFG_CE_STATUS_RING_BATCH_COUNT_THRESH_MAX, \ + WLAN_CFG_CE_STATUS_RING_BATCH_COUNT_THRESH_DEFAULT, \ + CFG_VALUE_OR_DEFAULT, \ + "CE Status ring batch count threshold") + +#define CFG_HIF \ + CFG(CFG_CE_STATUS_RING_TIMER_THRESHOLD) \ + CFG(CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD) +#else +#define CFG_HIF +#endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */ +#endif /* _CFG_HIF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h new file mode 100644 index 0000000000000000000000000000000000000000..d29cf3bb59efe8f91f04e7e72d3ea7c452ef3163 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h @@ -0,0 +1,1647 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_H_ +#define _HIF_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Header files */ +#include +#include "qdf_nbuf.h" +#include "qdf_lro.h" +#include "ol_if_athvar.h" +#include +#ifdef HIF_PCI +#include +#endif /* HIF_PCI */ +#ifdef HIF_USB +#include +#endif /* HIF_USB */ +#ifdef IPA_OFFLOAD +#include +#endif +#include "cfg_ucfg_api.h" +#include "qdf_dev.h" +#define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1 + +typedef void __iomem *A_target_id_t; +typedef void *hif_handle_t; + +#define HIF_TYPE_AR6002 2 +#define HIF_TYPE_AR6003 3 +#define HIF_TYPE_AR6004 5 +#define HIF_TYPE_AR9888 6 +#define HIF_TYPE_AR6320 7 +#define HIF_TYPE_AR6320V2 8 +/* For attaching Peregrine 2.0 board host_reg_tbl only */ +#define HIF_TYPE_AR9888V2 9 +#define HIF_TYPE_ADRASTEA 10 +#define HIF_TYPE_AR900B 11 +#define HIF_TYPE_QCA9984 12 +#define HIF_TYPE_IPQ4019 13 +#define HIF_TYPE_QCA9888 14 +#define HIF_TYPE_QCA8074 15 +#define HIF_TYPE_QCA6290 16 +#define HIF_TYPE_QCN7605 17 +#define HIF_TYPE_QCA6390 18 +#define HIF_TYPE_QCA8074V2 19 +#define HIF_TYPE_QCA6018 20 +#define HIF_TYPE_QCN9000 21 +#define HIF_TYPE_QCA6490 22 +#define HIF_TYPE_QCA6750 23 + +#ifdef IPA_OFFLOAD +#define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37 +#define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32 +#endif + +/* enum hif_ic_irq - enum defining integrated chip irq numbers + * defining irq nubers that can be used by external modules like datapath + */ +enum hif_ic_irq { + host2wbm_desc_feed = 16, + host2reo_re_injection, + host2reo_command, + host2rxdma_monitor_ring3, + host2rxdma_monitor_ring2, + host2rxdma_monitor_ring1, + reo2host_exception, + wbm2host_rx_release, + reo2host_status, + reo2host_destination_ring4, + reo2host_destination_ring3, + reo2host_destination_ring2, + reo2host_destination_ring1, + rxdma2host_monitor_destination_mac3, + rxdma2host_monitor_destination_mac2, + rxdma2host_monitor_destination_mac1, + ppdu_end_interrupts_mac3, + ppdu_end_interrupts_mac2, + ppdu_end_interrupts_mac1, + rxdma2host_monitor_status_ring_mac3, + rxdma2host_monitor_status_ring_mac2, + rxdma2host_monitor_status_ring_mac1, + host2rxdma_host_buf_ring_mac3, + host2rxdma_host_buf_ring_mac2, + host2rxdma_host_buf_ring_mac1, + rxdma2host_destination_ring_mac3, + rxdma2host_destination_ring_mac2, + rxdma2host_destination_ring_mac1, + host2tcl_input_ring4, + host2tcl_input_ring3, + host2tcl_input_ring2, + host2tcl_input_ring1, + wbm2host_tx_completions_ring3, + wbm2host_tx_completions_ring2, + wbm2host_tx_completions_ring1, + tcl2host_status_ring, +}; + +struct CE_state; +#define CE_COUNT_MAX 12 +#define HIF_MAX_GRP_IRQ 16 + +#ifndef HIF_MAX_GROUP +#define HIF_MAX_GROUP 7 +#endif + +#ifndef NAPI_YIELD_BUDGET_BASED +#ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT +#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4 +#endif +#else /* NAPI_YIELD_BUDGET_BASED */ +#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2 +#endif /* NAPI_YIELD_BUDGET_BASED */ + +#define QCA_NAPI_BUDGET 64 +#define QCA_NAPI_DEF_SCALE \ + (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT) + +#define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE) +/* NOTE: "napi->scale" can be changed, + * but this does not change the number of buckets + */ +#define QCA_NAPI_NUM_BUCKETS 4 + +/** + * qca_napi_stat - stats structure for execution contexts + * @napi_schedules - number of times the schedule function is called + * @napi_polls - number of times the execution context runs + * @napi_completes - number of times that the generating interrupt is reenabled + * @napi_workdone - cumulative of all work done reported by handler + * @cpu_corrected - incremented when execution context runs on a different core + * than the one that its irq is affined to. + * @napi_budget_uses - histogram of work done per execution run + * @time_limit_reache - count of yields due to time limit threshholds + * @rxpkt_thresh_reached - count of yields due to a work limit + * @poll_time_buckets - histogram of poll times for the napi + * + */ +struct qca_napi_stat { + uint32_t napi_schedules; + uint32_t napi_polls; + uint32_t napi_completes; + uint32_t napi_workdone; + uint32_t cpu_corrected; + uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS]; + uint32_t time_limit_reached; + uint32_t rxpkt_thresh_reached; + unsigned long long napi_max_poll_time; +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT + uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS]; +#endif +}; + + +/** + * per NAPI instance data structure + * This data structure holds stuff per NAPI instance. + * Note that, in the current implementation, though scale is + * an instance variable, it is set to the same value for all + * instances. + */ +struct qca_napi_info { + struct net_device netdev; /* dummy net_dev */ + void *hif_ctx; + struct napi_struct napi; + uint8_t scale; /* currently same on all instances */ + uint8_t id; + uint8_t cpu; + int irq; + cpumask_t cpumask; + struct qca_napi_stat stats[NR_CPUS]; +#ifdef RECEIVE_OFFLOAD + /* will only be present for data rx CE's */ + void (*offld_flush_cb)(void *); + struct napi_struct rx_thread_napi; + struct net_device rx_thread_netdev; +#endif /* RECEIVE_OFFLOAD */ + qdf_lro_ctx_t lro_ctx; +}; + +enum qca_napi_tput_state { + QCA_NAPI_TPUT_UNINITIALIZED, + QCA_NAPI_TPUT_LO, + QCA_NAPI_TPUT_HI +}; +enum qca_napi_cpu_state { + QCA_NAPI_CPU_UNINITIALIZED, + QCA_NAPI_CPU_DOWN, + QCA_NAPI_CPU_UP }; + +/** + * struct qca_napi_cpu - an entry of the napi cpu table + * @core_id: physical core id of the core + * @cluster_id: cluster this core belongs to + * @core_mask: mask to match all core of this cluster + * @thread_mask: mask for this core within the cluster + * @max_freq: maximum clock this core can be clocked at + * same for all cpus of the same core. + * @napis: bitmap of napi instances on this core + * @execs: bitmap of execution contexts on this core + * cluster_nxt: chain to link cores within the same cluster + * + * This structure represents a single entry in the napi cpu + * table. The table is part of struct qca_napi_data. + * This table is initialized by the init function, called while + * the first napi instance is being created, updated by hotplug + * notifier and when cpu affinity decisions are made (by throughput + * detection), and deleted when the last napi instance is removed. + */ +struct qca_napi_cpu { + enum qca_napi_cpu_state state; + int core_id; + int cluster_id; + cpumask_t core_mask; + cpumask_t thread_mask; + unsigned int max_freq; + uint32_t napis; + uint32_t execs; + int cluster_nxt; /* index, not pointer */ +}; + +/** + * struct qca_napi_data - collection of napi data for a single hif context + * @hif_softc: pointer to the hif context + * @lock: spinlock used in the event state machine + * @state: state variable used in the napi stat machine + * @ce_map: bit map indicating which ce's have napis running + * @exec_map: bit map of instanciated exec contexts + * @user_cpu_affin_map: CPU affinity map from INI config. + * @napi_cpu: cpu info for irq affinty + * @lilcl_head: + * @bigcl_head: + * @napi_mode: irq affinity & clock voting mode + * @cpuhp_handler: CPU hotplug event registration handle + */ +struct qca_napi_data { + struct hif_softc *hif_softc; + qdf_spinlock_t lock; + uint32_t state; + + /* bitmap of created/registered NAPI instances, indexed by pipe_id, + * not used by clients (clients use an id returned by create) + */ + uint32_t ce_map; + uint32_t exec_map; + uint32_t user_cpu_affin_mask; + struct qca_napi_info *napis[CE_COUNT_MAX]; + struct qca_napi_cpu napi_cpu[NR_CPUS]; + int lilcl_head, bigcl_head; + enum qca_napi_tput_state napi_mode; + struct qdf_cpuhp_handler *cpuhp_handler; + uint8_t flags; +}; + +/** + * struct hif_config_info - Place Holder for HIF configuration + * @enable_self_recovery: Self Recovery + * @enable_runtime_pm: Enable Runtime PM + * @runtime_pm_delay: Runtime PM Delay + * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq + * + * Structure for holding HIF ini parameters. + */ +struct hif_config_info { + bool enable_self_recovery; +#ifdef FEATURE_RUNTIME_PM + uint8_t enable_runtime_pm; + u_int32_t runtime_pm_delay; +#endif + uint64_t rx_softirq_max_yield_duration_ns; +}; + +/** + * struct hif_target_info - Target Information + * @target_version: Target Version + * @target_type: Target Type + * @target_revision: Target Revision + * @soc_version: SOC Version + * @hw_name: pointer to hardware name + * + * Structure to hold target information. + */ +struct hif_target_info { + uint32_t target_version; + uint32_t target_type; + uint32_t target_revision; + uint32_t soc_version; + char *hw_name; +}; + +struct hif_opaque_softc { +}; + +/** + * enum hif_event_type - Type of DP events to be recorded + * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event + * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event + * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event + * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event + */ +enum hif_event_type { + HIF_EVENT_IRQ_TRIGGER, + HIF_EVENT_BH_SCHED, + HIF_EVENT_SRNG_ACCESS_START, + HIF_EVENT_SRNG_ACCESS_END, + /* Do check hif_hist_skip_event_record when adding new events */ +}; + +/** + * enum hif_system_pm_state - System PM state + * HIF_SYSTEM_PM_STATE_ON: System in active state + * HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of + * system resume + * HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of + * system suspend + * HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend + */ +enum hif_system_pm_state { + HIF_SYSTEM_PM_STATE_ON, + HIF_SYSTEM_PM_STATE_BUS_RESUMING, + HIF_SYSTEM_PM_STATE_BUS_SUSPENDING, + HIF_SYSTEM_PM_STATE_BUS_SUSPENDED, +}; + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY + +/* HIF_EVENT_HIST_MAX should always be power of 2 */ +#define HIF_EVENT_HIST_MAX 512 +#define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP +#define HIF_EVENT_HIST_DISABLE_MASK 0 + +/** + * struct hif_event_record - an entry of the DP event history + * @hal_ring_id: ring id for which event is recorded + * @hp: head pointer of the ring (may not be applicable for all events) + * @tp: tail pointer of the ring (may not be applicable for all events) + * @cpu_id: cpu id on which the event occurred + * @timestamp: timestamp when event occurred + * @type: type of the event + * + * This structure represents the information stored for every datapath + * event which is logged in the history. + */ +struct hif_event_record { + uint8_t hal_ring_id; + uint32_t hp; + uint32_t tp; + int cpu_id; + uint64_t timestamp; + enum hif_event_type type; +}; + +/** + * struct hif_event_misc - history related misc info + * @last_irq_index: last irq event index in history + * @last_irq_ts: last irq timestamp + */ +struct hif_event_misc { + int32_t last_irq_index; + uint64_t last_irq_ts; +}; + +/** + * struct hif_event_history - history for one interrupt group + * @index: index to store new event + * @event: event entry + * + * This structure represents the datapath history for one + * interrupt group. + */ +struct hif_event_history { + qdf_atomic_t index; + struct hif_event_misc misc; + struct hif_event_record event[HIF_EVENT_HIST_MAX]; +}; + +/** + * hif_hist_record_event() - Record one datapath event in history + * @hif_ctx: HIF opaque context + * @event: DP event entry + * @intr_grp_id: interrupt group ID registered with hif + * + * Return: None + */ +void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, + struct hif_event_record *event, + uint8_t intr_grp_id); + +/** + * hif_record_event() - Wrapper function to form and record DP event + * @hif_ctx: HIF opaque context + * @intr_grp_id: interrupt group ID registered with hif + * @hal_ring_id: ring id for which event is recorded + * @hp: head pointer index of the srng + * @tp: tail pointer index of the srng + * @type: type of the event to be logged in history + * + * Return: None + */ +static inline void hif_record_event(struct hif_opaque_softc *hif_ctx, + uint8_t intr_grp_id, + uint8_t hal_ring_id, + uint32_t hp, + uint32_t tp, + enum hif_event_type type) +{ + struct hif_event_record event; + + event.hal_ring_id = hal_ring_id; + event.hp = hp; + event.tp = tp; + event.type = type; + + return hif_hist_record_event(hif_ctx, &event, + intr_grp_id); +} + +#else + +static inline void hif_record_event(struct hif_opaque_softc *hif_ctx, + uint8_t intr_grp_id, + uint8_t hal_ring_id, + uint32_t hp, + uint32_t tp, + enum hif_event_type type) +{ +} +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +/** + * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type + * + * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module + * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to + * minimize power + * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR + * platform-specific measures to completely power-off + * the module and associated hardware (i.e. cut power + * supplies) + */ +enum HIF_DEVICE_POWER_CHANGE_TYPE { + HIF_DEVICE_POWER_UP, + HIF_DEVICE_POWER_DOWN, + HIF_DEVICE_POWER_CUT +}; + +/** + * enum hif_enable_type: what triggered the enabling of hif + * + * @HIF_ENABLE_TYPE_PROBE: probe triggered enable + * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable + */ +enum hif_enable_type { + HIF_ENABLE_TYPE_PROBE, + HIF_ENABLE_TYPE_REINIT, + HIF_ENABLE_TYPE_MAX +}; + +/** + * enum hif_disable_type: what triggered the disabling of hif + * + * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable + * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable + * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable + * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable + */ +enum hif_disable_type { + HIF_DISABLE_TYPE_PROBE_ERROR, + HIF_DISABLE_TYPE_REINIT_ERROR, + HIF_DISABLE_TYPE_REMOVE, + HIF_DISABLE_TYPE_SHUTDOWN, + HIF_DISABLE_TYPE_MAX +}; +/** + * enum hif_device_config_opcode: configure mode + * + * @HIF_DEVICE_POWER_STATE: device power state + * @HIF_DEVICE_GET_BLOCK_SIZE: get block size + * @HIF_DEVICE_GET_ADDR: get block address + * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions + * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode + * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function + * @HIF_DEVICE_POWER_STATE_CHANGE: change power state + * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params + * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request + * @HIF_DEVICE_GET_OS_DEVICE: get OS device + * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state + * @HIF_BMI_DONE: bmi done + * @HIF_DEVICE_SET_TARGET_TYPE: set target type + * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context + * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context + */ +enum hif_device_config_opcode { + HIF_DEVICE_POWER_STATE = 0, + HIF_DEVICE_GET_BLOCK_SIZE, + HIF_DEVICE_GET_FIFO_ADDR, + HIF_DEVICE_GET_PENDING_EVENTS_FUNC, + HIF_DEVICE_GET_IRQ_PROC_MODE, + HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, + HIF_DEVICE_POWER_STATE_CHANGE, + HIF_DEVICE_GET_IRQ_YIELD_PARAMS, + HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT, + HIF_DEVICE_GET_OS_DEVICE, + HIF_DEVICE_DEBUG_BUS_STATE, + HIF_BMI_DONE, + HIF_DEVICE_SET_TARGET_TYPE, + HIF_DEVICE_SET_HTC_CONTEXT, + HIF_DEVICE_GET_HTC_CONTEXT, +}; + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +struct HID_ACCESS_LOG { + uint32_t seqnum; + bool is_write; + void *addr; + uint32_t value; +}; +#endif + +void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, + uint32_t value); +uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset); + +#define HIF_MAX_DEVICES 1 +/** + * struct htc_callbacks - Structure for HTC Callbacks methods + * @context: context to pass to the dsrhandler + * note : rwCompletionHandler is provided the context + * passed to hif_read_write + * @rwCompletionHandler: Read / write completion handler + * @dsrHandler: DSR Handler + */ +struct htc_callbacks { + void *context; + QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status); + QDF_STATUS(*dsr_handler)(void *context); +}; + +/** + * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state + * @context: Private data context + * @set_recovery_in_progress: To Set Driver state for recovery in progress + * @is_recovery_in_progress: Query if driver state is recovery in progress + * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress + * @is_driver_unloading: Query if driver is unloading. + * @get_bandwidth_level: Query current bandwidth level for the driver + * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem + * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool + * This Structure provides callback pointer for HIF to query hdd for driver + * states. + */ +struct hif_driver_state_callbacks { + void *context; + void (*set_recovery_in_progress)(void *context, uint8_t val); + bool (*is_recovery_in_progress)(void *context); + bool (*is_load_unload_in_progress)(void *context); + bool (*is_driver_unloading)(void *context); + bool (*is_target_ready)(void *context); + int (*get_bandwidth_level)(void *context); +#ifdef DP_MEM_PRE_ALLOC + void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size, + qdf_dma_addr_t *paddr, + uint32_t ring_type); + void (*prealloc_put_consistent_mem_unaligned)(void *vaddr); +#endif +}; + +/* This API detaches the HTC layer from the HIF device */ +void hif_detach_htc(struct hif_opaque_softc *hif_ctx); + +/****************************************************************/ +/* BMI and Diag window abstraction */ +/****************************************************************/ + +#define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0)) + +#define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be + * handled atomically by + * DiagRead/DiagWrite + */ + +#ifdef WLAN_FEATURE_BMI +/* + * API to handle HIF-specific BMI message exchanges, this API is synchronous + * and only allowed to be called from a context that can block (sleep) + */ +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, + qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, + uint8_t *pSendMessage, uint32_t Length, + uint8_t *pResponseMessage, + uint32_t *pResponseLength, uint32_t TimeoutMS); +void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx); +bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx); +#else /* WLAN_FEATURE_BMI */ +static inline void +hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx) +{ +} + +static inline bool +hif_needs_bmi(struct hif_opaque_softc *hif_ctx) +{ + return false; +} +#endif /* WLAN_FEATURE_BMI */ + +/* + * APIs to handle HIF specific diagnostic read accesses. These APIs are + * synchronous and only allowed to be called from a context that + * can block (sleep). They are not high performance APIs. + * + * hif_diag_read_access reads a 4 Byte aligned/length value from a + * Target register or memory word. + * + * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory. + */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t *data); +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address, + uint8_t *data, int nbytes); +void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, + void *ramdump_base, uint32_t address, uint32_t size); +/* + * APIs to handle HIF specific diagnostic write accesses. These APIs are + * synchronous and only allowed to be called from a context that + * can block (sleep). + * They are not high performance APIs. + * + * hif_diag_write_access writes a 4 Byte aligned/length value to a + * Target register or memory word. + * + * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory. + */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t data); +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint8_t *data, int nbytes); + +typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t); + +void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx); +bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx); + +/* + * Set the FASTPATH_mode_on flag in sc, for use by data path + */ +#ifdef WLAN_FEATURE_FASTPATH +void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx); +bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx); +void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret); +int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, + fastpath_msg_handler handler, void *context); +#else +static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, + fastpath_msg_handler handler, + void *context) +{ + return QDF_STATUS_E_FAILURE; +} +static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret) +{ + return NULL; +} + +#endif + +/* + * Enable/disable CDC max performance workaround + * For max-performace set this to 0 + * To allow SoC to enter sleep set this to 1 + */ +#define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0 + +void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); + +/** + * @brief List of callbacks - filled in by HTC. + */ +struct hif_msg_callbacks { + void *Context; + /**< context meaningful to HTC */ + QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf, + uint32_t transferID, + uint32_t toeplitz_hash_result); + QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf, + uint8_t pipeID); + void (*txResourceAvailHandler)(void *context, uint8_t pipe); + void (*fwEventHandler)(void *context, QDF_STATUS status); + void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle); +}; + +enum hif_target_status { + TARGET_STATUS_CONNECTED = 0, /* target connected */ + TARGET_STATUS_RESET, /* target got reset */ + TARGET_STATUS_EJECT, /* target got ejected */ + TARGET_STATUS_SUSPEND /*target got suspend */ +}; + +/** + * enum hif_attribute_flags: configure hif + * + * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE + * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor + * + No pktlog CE + */ +enum hif_attribute_flags { + HIF_LOWDESC_CE_CFG = 1, + HIF_LOWDESC_CE_NO_PKTLOG_CFG +}; + +#define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \ + (attr |= (v & 0x01) << 5) +#define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \ + (attr |= (v & 0x03) << 6) +#define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \ + (attr |= (v & 0x01) << 13) +#define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \ + (attr |= (v & 0x01) << 14) +#define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \ + (attr |= (v & 0x01) << 15) +#define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \ + (attr |= (v & 0x0FFF) << 16) +#define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \ + (attr |= (v & 0x01) << 30) + +struct hif_ul_pipe_info { + unsigned int nentries; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; /* cached copy */ + unsigned int hw_index; /* cached copy */ + void *base_addr_owner_space; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space; /* CE address space */ +}; + +struct hif_dl_pipe_info { + unsigned int nentries; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; /* cached copy */ + unsigned int hw_index; /* cached copy */ + void *base_addr_owner_space; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space; /* CE address space */ +}; + +struct hif_pipe_addl_info { + uint32_t pci_mem; + uint32_t ctrl_addr; + struct hif_ul_pipe_info ul_pipe; + struct hif_dl_pipe_info dl_pipe; +}; + +#ifdef CONFIG_SLUB_DEBUG_ON +#define MSG_FLUSH_NUM 16 +#else /* PERF build */ +#define MSG_FLUSH_NUM 32 +#endif /* SLUB_DEBUG_ON */ + +struct hif_bus_id; + +void hif_claim_device(struct hif_opaque_softc *hif_ctx); +QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, + int opcode, void *config, uint32_t config_len); +void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx); +void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx); +void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC, + struct hif_msg_callbacks *callbacks); +QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx); +void hif_stop(struct hif_opaque_softc *hif_ctx); +void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx); +void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start); +void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, + uint8_t cmd_id, bool start); + +QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, + uint32_t transferID, uint32_t nbytes, + qdf_nbuf_t wbuf, uint32_t data_attr); +void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, + int force); +void hif_shut_down_device(struct hif_opaque_softc *hif_ctx); +void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe, + uint8_t *DLPipe); +int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, + int *dl_is_polled); +uint16_t +hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID); +void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx); +uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset); +void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok, + bool wait_for_it); +int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx); +#ifndef HIF_PCI +static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) +{ + return 0; +} +#else +int hif_check_soc_status(struct hif_opaque_softc *hif_ctx); +#endif +void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, + u32 *revision, const char **target_name); + +#ifdef RECEIVE_OFFLOAD +/** + * hif_offld_flush_cb_register() - Register the offld flush callback + * @scn: HIF opaque context + * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread + * Or GRO/LRO flush when RxThread is not enabled. Called + * with corresponding context for flush. + * Return: None + */ +void hif_offld_flush_cb_register(struct hif_opaque_softc *scn, + void (offld_flush_handler)(void *ol_ctx)); + +/** + * hif_offld_flush_cb_deregister() - deRegister the offld flush callback + * @scn: HIF opaque context + * + * Return: None + */ +void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn); +#endif + +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT +/** + * hif_exec_should_yield() - Check if hif napi context should yield + * @hif_ctx - HIF opaque context + * @grp_id - grp_id of the napi for which check needs to be done + * + * The function uses grp_id to look for NAPI and checks if NAPI needs to + * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for + * yield decision. + * + * Return: true if NAPI needs to yield, else false + */ +bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id); +#else +static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, + uint grp_id) +{ + return false; +} +#endif + +void hif_disable_isr(struct hif_opaque_softc *hif_ctx); +void hif_reset_soc(struct hif_opaque_softc *hif_ctx); +void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, + int htc_htt_tx_endpoint); + +/** + * hif_open() - Create hif handle + * @qdf_ctx: qdf context + * @mode: Driver Mode + * @bus_type: Bus Type + * @cbk: CDS Callbacks + * @psoc: psoc object manager + * + * API to open HIF Context + * + * Return: HIF Opaque Pointer + */ +struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, + uint32_t mode, + enum qdf_bus_type bus_type, + struct hif_driver_state_callbacks *cbk, + struct wlan_objmgr_psoc *psoc); + +void hif_close(struct hif_opaque_softc *hif_ctx); +QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, + void *bdev, const struct hif_bus_id *bid, + enum qdf_bus_type bus_type, + enum hif_enable_type type); +void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type); +#ifdef CE_TASKLET_DEBUG_ENABLE +void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, + uint8_t value); +#endif +void hif_display_stats(struct hif_opaque_softc *hif_ctx); +void hif_clear_stats(struct hif_opaque_softc *hif_ctx); + +/** + * enum wlan_rtpm_dbgid - runtime pm put/get debug id + * @RTPM_ID_RESVERD: Reserved + * @RTPM_ID_WMI: WMI sending msg, expect put happen at + * tx completion from CE level directly. + * @RTPM_ID_HTC: pkt sending by HTT_DATA_MSG_SVC, expect + * put from fw response or just in + * htc_issue_packets + * @RTPM_ID_QOS_NOTIFY: pm qos notifer + * @RTPM_ID_DP_TX_DESC_ALLOC_FREE: tx desc alloc/free + * @RTPM_ID_CE_SEND_FAST: operation in ce_send_fast, not include + * the pkt put happens outside this function + * @RTPM_ID_SUSPEND_RESUME: suspend/resume in hdd + * @RTPM_ID_DW_TX_HW_ENQUEUE: operation in functin dp_tx_hw_enqueue + * @RTPM_ID_HAL_REO_CMD: HAL_REO_CMD operation + * @RTPM_ID_DP_PRINT_RING_STATS: operation in dp_print_ring_stats + */ +/* New value added to the enum must also be reflected in function + * rtpm_string_from_dbgid() + */ +typedef enum { + RTPM_ID_RESVERD = 0, + RTPM_ID_WMI = 1, + RTPM_ID_HTC = 2, + RTPM_ID_QOS_NOTIFY = 3, + RTPM_ID_DP_TX_DESC_ALLOC_FREE = 4, + RTPM_ID_CE_SEND_FAST = 5, + RTPM_ID_SUSPEND_RESUME = 6, + RTPM_ID_DW_TX_HW_ENQUEUE = 7, + RTPM_ID_HAL_REO_CMD = 8, + RTPM_ID_DP_PRINT_RING_STATS = 9, + + RTPM_ID_MAX, +} wlan_rtpm_dbgid; + +/** + * rtpm_string_from_dbgid() - Convert dbgid to respective string + * @id - debug id + * + * Debug support function to convert dbgid to string. + * Please note to add new string in the array at index equal to + * its enum value in wlan_rtpm_dbgid. + */ +static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id) +{ + static const char *strings[] = { "RTPM_ID_RESVERD", + "RTPM_ID_WMI", + "RTPM_ID_HTC", + "RTPM_ID_QOS_NOTIFY", + "RTPM_ID_DP_TX_DESC_ALLOC_FREE", + "RTPM_ID_CE_SEND_FAST", + "RTPM_ID_SUSPEND_RESUME", + "RTPM_ID_DW_TX_HW_ENQUEUE", + "RTPM_ID_HAL_REO_CMD", + "RTPM_ID_DP_PRINT_RING_STATS", + "RTPM_ID_MAX"}; + + return (char *)strings[id]; +} + +/** + * enum hif_pm_link_state - hif link state + * HIF_PM_LINK_STATE_DOWN: hif link state is down + * HIF_PM_LINK_STATE_UP: hif link state is up + */ +enum hif_pm_link_state { + HIF_PM_LINK_STATE_DOWN, + HIF_PM_LINK_STATE_UP +}; + +#ifdef FEATURE_RUNTIME_PM +struct hif_pm_runtime_lock; + +void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid); +int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid); +int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid); +void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid); +int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid); +int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid); +void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx); +int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); +void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, + struct hif_pm_runtime_lock *lock); +int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock); +int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock); +int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock, unsigned int delay); +bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx); +void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, + int val); +void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx); +qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx); + +/** + * hif_pm_set_link_state() - set link state during RTPM + * @hif_sc: HIF Context + * + * Return: None + */ +void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val); + +/** + * hif_is_link_state_up() - Is link state up + * @hif_sc: HIF Context + * + * Return: 1 link is up, 0 link is down + */ +uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle); +#else +struct hif_pm_runtime_lock { + const char *name; +}; +static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {} +static inline int +hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ return 0; } +static inline int +hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ return 0; } +static inline int +hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline void +hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{} + +static inline int +hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid) +{ return 0; } +static inline int +hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid) +{ return 0; } +static inline int +hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ return 0; } +static inline void +hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {}; +static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock, + const char *name) +{ return 0; } +static inline void +hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, + struct hif_pm_runtime_lock *lock) {} + +static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock) +{ return 0; } +static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock) +{ return 0; } +static inline int +hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock, unsigned int delay) +{ return 0; } +static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx) +{ return false; } +static inline int +hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline void +hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val) +{ return; } +static inline void +hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {}; +static inline int +hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline qdf_time_t +hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline +void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val) +{} + +#endif + +void hif_enable_power_management(struct hif_opaque_softc *hif_ctx, + bool is_packet_log_enabled); +void hif_disable_power_management(struct hif_opaque_softc *hif_ctx); + +void hif_vote_link_down(struct hif_opaque_softc *hif_ctx); +void hif_vote_link_up(struct hif_opaque_softc *hif_ctx); +bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx); + +#ifdef IPA_OFFLOAD +/** + * hif_get_ipa_hw_type() - get IPA hw type + * + * This API return the IPA hw type. + * + * Return: IPA hw type + */ +static inline +enum ipa_hw_type hif_get_ipa_hw_type(void) +{ + return ipa_get_hw_type(); +} + +/** + * hif_get_ipa_present() - get IPA hw status + * + * This API return the IPA hw status. + * + * Return: true if IPA is present or false otherwise + */ +static inline +bool hif_get_ipa_present(void) +{ + if (ipa_uc_reg_rdyCB(NULL) != -EPERM) + return true; + else + return false; +} +#endif +int hif_bus_resume(struct hif_opaque_softc *hif_ctx); +/** + * hif_bus_ealry_suspend() - stop non wmi tx traffic + * @context: hif context + */ +int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx); + +/** + * hif_bus_late_resume() - resume non wmi traffic + * @context: hif context + */ +int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx); +int hif_bus_suspend(struct hif_opaque_softc *hif_ctx); +int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx); +int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_irqs_enable() - Enables all irqs from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_enable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_irqs_disable() - Disables all irqs from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_disable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_enable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_disable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx); + +#ifdef FEATURE_RUNTIME_PM +int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx); +void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx); +int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx); +int hif_runtime_resume(struct hif_opaque_softc *hif_ctx); +void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx); +void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx); +void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx); +#endif + +int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size); +int hif_dump_registers(struct hif_opaque_softc *scn); +int ol_copy_ramdump(struct hif_opaque_softc *scn); +void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx); +void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, + u32 *revision, const char **target_name); +enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl); +struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc * + scn); +struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx); +struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx); +enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx); +void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum + hif_target_status); +void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, + struct hif_config_info *cfg); +void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls); +qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len, uint32_t sendhead); +QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len); +int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, + uint32_t transfer_id, uint32_t download_len); +void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len); +void hif_ce_war_disable(void); +void hif_ce_war_enable(void); +void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num); +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT +struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, + struct hif_pipe_addl_info *hif_info, uint32_t pipe_number); +uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, + uint32_t pipe_num); +int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc); +#endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */ + +void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled, + int rx_bundle_cnt); +int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx); + +void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib); + +void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl); + +enum hif_exec_type { + HIF_EXEC_NAPI_TYPE, + HIF_EXEC_TASKLET_TYPE, +}; + +typedef uint32_t (*ext_intr_handler)(void *, uint32_t); + +/** + * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id + * @softc: hif opaque context owning the exec context + * @id: the id of the interrupt context + * + * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID + * 'id' registered with the OS + */ +int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, + uint8_t id); + +uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); +void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); +uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx, + uint32_t numirq, uint32_t irq[], ext_intr_handler handler, + void *cb_ctx, const char *context_name, + enum hif_exec_type type, uint32_t scale); + +void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, + const char *context_name); + +void hif_update_pipe_callback(struct hif_opaque_softc *osc, + u_int8_t pipeid, + struct hif_msg_callbacks *callbacks); + +/** + * hif_print_napi_stats() - Display HIF NAPI stats + * @hif_ctx - HIF opaque context + * + * Return: None + */ +void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx); + +/* hif_clear_napi_stats() - function clears the stats of the + * latency when called. + * @hif_ctx - the HIF context to assign the callback to + * + * Return: None + */ +void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx); + +#ifdef __cplusplus +} +#endif + +#ifdef FORCE_WAKE +/** + * hif_force_wake_request() - Function to wake from power collapse + * @handle: HIF opaque handle + * + * Description: API to check if the device is awake or not before + * read/write to BAR + 4K registers. If device is awake return + * success otherwise write '1' to + * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt + * the device and does wakeup the PCI and MHI within 50ms + * and then the device writes a value to + * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the + * handshake process to let the host know the device is awake. + * + * Return: zero - success/non-zero - failure + */ +int hif_force_wake_request(struct hif_opaque_softc *handle); + +/** + * hif_force_wake_release() - API to release/reset the SOC wake register + * from interrupting the device. + * @handle: HIF opaque handle + * + * Description: API to set the + * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0' + * to release the interrupt line. + * + * Return: zero - success/non-zero - failure + */ +int hif_force_wake_release(struct hif_opaque_softc *handle); +#else +static inline +int hif_force_wake_request(struct hif_opaque_softc *handle) +{ + return 0; +} + +static inline +int hif_force_wake_release(struct hif_opaque_softc *handle) +{ + return 0; +} +#endif /* FORCE_WAKE */ + +#ifdef FEATURE_HAL_DELAYED_REG_WRITE +/** + * hif_prevent_link_low_power_states() - Prevent from going to low power states + * @hif - HIF opaque context + * + * Return: 0 on success. Error code on failure. + */ +int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif); + +/** + * hif_allow_link_low_power_states() - Allow link to go to low power states + * @hif - HIF opaque context + * + * Return: None + */ +void hif_allow_link_low_power_states(struct hif_opaque_softc *hif); + +#else + +static inline +int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif) +{ + return 0; +} + +static inline +void hif_allow_link_low_power_states(struct hif_opaque_softc *hif) +{ +} +#endif + +void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle); + +/** + * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function + * @hif_ctx - the HIF context to assign the callback to + * @callback - the callback to assign + * @priv - the private data to pass to the callback when invoked + * + * Return: None + */ +void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, + void (*callback)(void *), + void *priv); +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +ssize_t hif_dump_desc_trace_buf(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, + const char *buf, size_t size); +ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, + const char *buf, size_t size); +ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf); +ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf); +#endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/ + +/** + * hif_set_ce_service_max_yield_time() - sets CE service max yield time + * @hif: hif context + * @ce_service_max_yield_time: CE service max yield time to set + * + * This API storess CE service max yield time in hif context based + * on ini value. + * + * Return: void + */ +void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif, + uint32_t ce_service_max_yield_time); + +/** + * hif_get_ce_service_max_yield_time() - get CE service max yield time + * @hif: hif context + * + * This API returns CE service max yield time. + * + * Return: CE service max yield time + */ +unsigned long long +hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif); + +/** + * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush + * @hif: hif context + * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set + * + * This API stores CE service max rx ind flush in hif context based + * on ini value. + * + * Return: void + */ +void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif, + uint8_t ce_service_max_rx_ind_flush); + +#ifdef OL_ATH_SMART_LOGGING +/* + * hif_log_ce_dump() - Copy all the CE DEST ring to buf + * @scn : HIF handler + * @buf_cur: Current pointer in ring buffer + * @buf_init:Start of the ring buffer + * @buf_sz: Size of the ring buffer + * @ce: Copy Engine id + * @skb_sz: Max size of the SKB buffer to be copied + * + * Calls the respective function to dump all the CE SRC/DEST ring descriptors + * and buffers pointed by them in to the given buf + * + * Return: Current pointer in ring buffer + */ +uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, + uint8_t *buf_init, uint32_t buf_sz, + uint32_t ce, uint32_t skb_sz); +#endif /* OL_ATH_SMART_LOGGING */ + +/* + * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle + * to hif_opaque_softc handle + * @hif_handle - hif_softc type + * + * Return: hif_opaque_softc type + */ +static inline struct hif_opaque_softc * +hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle) +{ + return (struct hif_opaque_softc *)hif_handle; +} + +#ifdef FORCE_WAKE +/** + * hif_srng_init_phase(): Indicate srng initialization phase + * to avoid force wake as UMAC power collapse is not yet + * enabled + * @hif_ctx: hif opaque handle + * @init_phase: initialization phase + * + * Return: None + */ +void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, + bool init_phase); +#else +static inline +void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, + bool init_phase) +{ +} +#endif /* FORCE_WAKE */ + +#ifdef HIF_CPU_PERF_AFFINE_MASK +/** + * hif_config_irq_set_perf_affinity_hint() - API to set affinity + * @hif_ctx: hif opaque handle + * + * This function is used to move the WLAN IRQs to perf cores in + * case of defconfig builds. + * + * Return: None + */ +void hif_config_irq_set_perf_affinity_hint( + struct hif_opaque_softc *hif_ctx); + +#else +static inline void hif_config_irq_set_perf_affinity_hint( + struct hif_opaque_softc *hif_ctx) +{ +} +#endif + +#ifdef HIF_CE_LOG_INFO +/** + * hif_log_ce_info() - API to log ce info + * @scn: hif handle + * @data: hang event data buffer + * @offset: offset at which data needs to be written + * + * Return: None + */ +void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset); +#else +static inline +void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset) +{ +} +#endif + +#ifdef SYSTEM_PM_CHECK +/** + * __hif_system_pm_set_state() - Set system pm state + * @hif: hif opaque handle + * @state: system state + * + * Return: None + */ +void __hif_system_pm_set_state(struct hif_opaque_softc *hif, + enum hif_system_pm_state state); + +/** + * hif_system_pm_set_state_on() - Set system pm state to ON + * @hif: hif opaque handle + * + * Return: None + */ +static inline +void hif_system_pm_set_state_on(struct hif_opaque_softc *hif) +{ + __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON); +} + +/** + * hif_system_pm_set_state_resuming() - Set system pm state to resuming + * @hif: hif opaque handle + * + * Return: None + */ +static inline +void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif) +{ + __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING); +} + +/** + * hif_system_pm_set_state_suspending() - Set system pm state to suspending + * @hif: hif opaque handle + * + * Return: None + */ +static inline +void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif) +{ + __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING); +} + +/** + * hif_system_pm_set_state_suspended() - Set system pm state to suspended + * @hif: hif opaque handle + * + * Return: None + */ +static inline +void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif) +{ + __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED); +} + +/** + * hif_system_pm_get_state() - Get system pm state + * @hif: hif opaque handle + * + * Return: system state + */ +int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif); + +/** + * hif_system_pm_state_check() - Check system state and trigger resume + * if required + * @hif: hif opaque handle + * + * Return: 0 if system is in on state else error code + */ +int hif_system_pm_state_check(struct hif_opaque_softc *hif); +#else +static inline +void __hif_system_pm_set_state(struct hif_opaque_softc *hif, + enum hif_system_pm_state state) +{ +} + +static inline +void hif_system_pm_set_state_on(struct hif_opaque_softc *hif) +{ +} + +static inline +void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif) +{ +} + +static inline +void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif) +{ +} + +static inline +void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif) +{ +} + +static inline +int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif) +{ + return 0; +} + +static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif) +{ + return 0; +} +#endif +#endif /* _HIF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_unit_test_suspend.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_unit_test_suspend.h new file mode 100644 index 0000000000000000000000000000000000000000..cb036ecabb459b9952e9364315e83b8eab5f58de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_unit_test_suspend.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Public unit-test related APIs for triggering WoW suspend/resume while + * the application processor is still up. + */ + +#ifndef _HIF_UNIT_TEST_SUSPEND_H_ +#define _HIF_UNIT_TEST_SUSPEND_H_ + +#ifdef WLAN_SUSPEND_RESUME_TEST + +#include "qdf_status.h" +#include "hif.h" + +typedef void (*hif_ut_resume_callback)(void); + +/** + * hif_ut_apps_suspend() - Setup unit-test related suspend state. + * @opaque_scn: The HIF context to operate on + * @callback: The function to call when unit-test resume is triggered + * + * Call after a normal WoW suspend has been completed. + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ut_apps_suspend(struct hif_opaque_softc *opaque_scn, + hif_ut_resume_callback callback); + +/** + * hif_ut_apps_resume() - Cleanup unit-test related suspend state. + * @opaque_scn: The HIF context to operate on + * + * Call before doing a normal WoW resume if suspend was initiated via + * unit-test suspend. + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ut_apps_resume(struct hif_opaque_softc *opaque_scn); + +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +#endif /* _HIF_UNIT_TEST_SUSPEND_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/host_reg_init.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/host_reg_init.h new file mode 100644 index 0000000000000000000000000000000000000000..f154192c9f9f958844c029997559a6713af4d3d4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/host_reg_init.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HOST_REG_INIT_H +#define HOST_REG_INIT_H + +#include "reg_struct.h" +#include "targaddrs.h" + +#if defined(MY_HOST_DEF) + +#if !defined(FW_IND_HOST_READY) +#define FW_IND_HOST_READY 0 +#endif + +#if !defined(PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_LOCAL_BASE_ADDRESS 0 +#define PCIE_SOC_WAKE_RESET 0 +#define PCIE_SOC_WAKE_ADDRESS 0 +#define PCIE_SOC_WAKE_V_MASK 0 +#define RTC_STATE_ADDRESS 0 +#define RTC_STATE_COLD_RESET_MASK 0 +#define RTC_STATE_V_MASK 0 +#define RTC_STATE_V_LSB 0 +#define RTC_STATE_V_ON 0 +#define SOC_GLOBAL_RESET_ADDRESS 0 +#endif + +#if !defined(CE_COUNT) +#define CE_COUNT 0 +#endif + +#if !defined(TRANSACTION_ID_MASK) +#define TRANSACTION_ID_MASK 0xfff +#endif + +static struct hostdef_s my_host_def = { + .d_INT_STATUS_ENABLE_ERROR_LSB = INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB + = ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK + = ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB + = ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK + = ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB + = COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK + = COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK + = ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB + = ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK + = ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = RTC_STATE_COLD_RESET_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = PCIE_SOC_WAKE_V_MASK, + .d_RTC_STATE_V_MASK = RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = HOST_INT_STATUS_MBOX_DATA_LSB, +#endif + .d_FW_IND_HOST_READY = FW_IND_HOST_READY, + .d_HOST_CE_COUNT = CE_COUNT, + .d_TRANSACTION_ID_MASK = TRANSACTION_ID_MASK, +}; + +struct hostdef_s *MY_HOST_DEF = &my_host_def; +#else /* MY_HOST_DEF */ +#endif /* MY_HOST_DEF */ + + + +#if defined(MY_HOST_SHADOW_REGS) +struct host_shadow_regs_s my_host_shadow_regs = { + .d_A_LOCAL_SHADOW_REG_VALUE_0 = A_LOCAL_SHADOW_REG_VALUE_0; + .d_A_LOCAL_SHADOW_REG_VALUE_1 = A_LOCAL_SHADOW_REG_VALUE_1; + .d_A_LOCAL_SHADOW_REG_VALUE_2 = A_LOCAL_SHADOW_REG_VALUE_2; + .d_A_LOCAL_SHADOW_REG_VALUE_3 = A_LOCAL_SHADOW_REG_VALUE_3; + .d_A_LOCAL_SHADOW_REG_VALUE_4 = A_LOCAL_SHADOW_REG_VALUE_4; + .d_A_LOCAL_SHADOW_REG_VALUE_5 = A_LOCAL_SHADOW_REG_VALUE_5; + .d_A_LOCAL_SHADOW_REG_VALUE_6 = A_LOCAL_SHADOW_REG_VALUE_6; + .d_A_LOCAL_SHADOW_REG_VALUE_7 = A_LOCAL_SHADOW_REG_VALUE_7; + .d_A_LOCAL_SHADOW_REG_VALUE_8 = A_LOCAL_SHADOW_REG_VALUE_8; + .d_A_LOCAL_SHADOW_REG_VALUE_9 = A_LOCAL_SHADOW_REG_VALUE_9; + .d_A_LOCAL_SHADOW_REG_VALUE_10 = A_LOCAL_SHADOW_REG_VALUE_10; + .d_A_LOCAL_SHADOW_REG_VALUE_11 = A_LOCAL_SHADOW_REG_VALUE_11; + .d_A_LOCAL_SHADOW_REG_VALUE_12 = A_LOCAL_SHADOW_REG_VALUE_12; + .d_A_LOCAL_SHADOW_REG_VALUE_13 = A_LOCAL_SHADOW_REG_VALUE_13; + .d_A_LOCAL_SHADOW_REG_VALUE_14 = A_LOCAL_SHADOW_REG_VALUE_14; + .d_A_LOCAL_SHADOW_REG_VALUE_15 = A_LOCAL_SHADOW_REG_VALUE_15; + .d_A_LOCAL_SHADOW_REG_VALUE_16 = A_LOCAL_SHADOW_REG_VALUE_16; + .d_A_LOCAL_SHADOW_REG_VALUE_17 = A_LOCAL_SHADOW_REG_VALUE_17; + .d_A_LOCAL_SHADOW_REG_VALUE_18 = A_LOCAL_SHADOW_REG_VALUE_18; + .d_A_LOCAL_SHADOW_REG_VALUE_19 = A_LOCAL_SHADOW_REG_VALUE_19; + .d_A_LOCAL_SHADOW_REG_VALUE_20 = A_LOCAL_SHADOW_REG_VALUE_20; + .d_A_LOCAL_SHADOW_REG_VALUE_21 = A_LOCAL_SHADOW_REG_VALUE_21; + .d_A_LOCAL_SHADOW_REG_VALUE_22 = A_LOCAL_SHADOW_REG_VALUE_22; + .d_A_LOCAL_SHADOW_REG_VALUE_23 = A_LOCAL_SHADOW_REG_VALUE_23; + .d_A_LOCAL_SHADOW_REG_ADDRESS_0 = A_LOCAL_SHADOW_REG_ADDRESS_0; + .d_A_LOCAL_SHADOW_REG_ADDRESS_1 = A_LOCAL_SHADOW_REG_ADDRESS_1; + .d_A_LOCAL_SHADOW_REG_ADDRESS_2 = A_LOCAL_SHADOW_REG_ADDRESS_2; + .d_A_LOCAL_SHADOW_REG_ADDRESS_3 = A_LOCAL_SHADOW_REG_ADDRESS_3; + .d_A_LOCAL_SHADOW_REG_ADDRESS_4 = A_LOCAL_SHADOW_REG_ADDRESS_4; + .d_A_LOCAL_SHADOW_REG_ADDRESS_5 = A_LOCAL_SHADOW_REG_ADDRESS_5; + .d_A_LOCAL_SHADOW_REG_ADDRESS_6 = A_LOCAL_SHADOW_REG_ADDRESS_6; + .d_A_LOCAL_SHADOW_REG_ADDRESS_7 = A_LOCAL_SHADOW_REG_ADDRESS_7; + .d_A_LOCAL_SHADOW_REG_ADDRESS_8 = A_LOCAL_SHADOW_REG_ADDRESS_8; + .d_A_LOCAL_SHADOW_REG_ADDRESS_9 = A_LOCAL_SHADOW_REG_ADDRESS_9; + .d_A_LOCAL_SHADOW_REG_ADDRESS_10 = A_LOCAL_SHADOW_REG_ADDRESS_10; + .d_A_LOCAL_SHADOW_REG_ADDRESS_11 = A_LOCAL_SHADOW_REG_ADDRESS_11; + .d_A_LOCAL_SHADOW_REG_ADDRESS_12 = A_LOCAL_SHADOW_REG_ADDRESS_12; + .d_A_LOCAL_SHADOW_REG_ADDRESS_13 = A_LOCAL_SHADOW_REG_ADDRESS_13; + .d_A_LOCAL_SHADOW_REG_ADDRESS_14 = A_LOCAL_SHADOW_REG_ADDRESS_14; + .d_A_LOCAL_SHADOW_REG_ADDRESS_15 = A_LOCAL_SHADOW_REG_ADDRESS_15; + .d_A_LOCAL_SHADOW_REG_ADDRESS_16 = A_LOCAL_SHADOW_REG_ADDRESS_16; + .d_A_LOCAL_SHADOW_REG_ADDRESS_17 = A_LOCAL_SHADOW_REG_ADDRESS_17; + .d_A_LOCAL_SHADOW_REG_ADDRESS_18 = A_LOCAL_SHADOW_REG_ADDRESS_18; + .d_A_LOCAL_SHADOW_REG_ADDRESS_19 = A_LOCAL_SHADOW_REG_ADDRESS_19; + .d_A_LOCAL_SHADOW_REG_ADDRESS_20 = A_LOCAL_SHADOW_REG_ADDRESS_20; + .d_A_LOCAL_SHADOW_REG_ADDRESS_21 = A_LOCAL_SHADOW_REG_ADDRESS_21; + .d_A_LOCAL_SHADOW_REG_ADDRESS_22 = A_LOCAL_SHADOW_REG_ADDRESS_22; + .d_A_LOCAL_SHADOW_REG_ADDRESS_23 = A_LOCAL_SHADOW_REG_ADDRESS_23; +}; + +struct hostdef_s *MY_HOST_SHADOW_REGS = &my_host_shadow_regs; +#else /* MY_HOST_SHADOW_REGS */ +#endif /* MY_HOST_SHADOW_REGS */ +#endif /* HOST_REG_INIT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hostdef.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hostdef.h new file mode 100644 index 0000000000000000000000000000000000000000..a94df8efb53a7bd210fe29942a3875ea6000f83b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hostdef.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013-2016,2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HOSTDEFS_H_ +#define HOSTDEFS_H_ + +#include +#include +#include +#include "host_reg_init.h" + +extern struct hostdef_s *AR6002_HOSTdef; +extern struct hostdef_s *AR6003_HOSTdef; +extern struct hostdef_s *AR6004_HOSTdef; +extern struct hostdef_s *AR9888_HOSTdef; +extern struct hostdef_s *AR9888V2_HOSTdef; +extern struct hostdef_s *AR6320_HOSTdef; +extern struct hostdef_s *AR900B_HOSTdef; +extern struct hostdef_s *QCA9984_HOSTdef; +extern struct hostdef_s *QCA9888_HOSTdef; +extern struct hostdef_s *QCA6290_HOSTdef; +extern struct hostdef_s *QCA6390_HOSTdef; +extern struct hostdef_s *QCA6490_HOSTdef; +extern struct hostdef_s *QCA6750_HOSTdef; + +#ifdef ATH_AHB +extern struct hostdef_s *IPQ4019_HOSTdef; +#endif +extern struct hostdef_s *QCA8074_HOSTdef; +extern struct hostdef_s *QCA8074V2_HOSTDEF; +extern struct hostdef_s *QCA6018_HOSTDEF; +extern struct hostdef_s *QCN9000_HOSTDEF; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/reg_struct.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/reg_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..422b00146ed9782962eca4a1173e024dbcd5e1a9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/reg_struct.h @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REG_STRUCT_H +#define REG_STRUCT_H + +#define MISSING_REGISTER 0 +#define UNSUPPORTED_REGISTER_OFFSET 0xffffffff + +/** + * is_register_supported() - return true if the register offset is valid + * @reg: register address being checked + * + * Return: true if the register offset is valid + */ +static inline bool is_register_supported(uint32_t reg) +{ + return (reg != MISSING_REGISTER) && + (reg != UNSUPPORTED_REGISTER_OFFSET); +} + +struct targetdef_s { + uint32_t d_RTC_SOC_BASE_ADDRESS; + uint32_t d_RTC_WMAC_BASE_ADDRESS; + uint32_t d_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK; + uint32_t d_CLOCK_CONTROL_OFFSET; + uint32_t d_CLOCK_CONTROL_SI0_CLK_MASK; + uint32_t d_RESET_CONTROL_OFFSET; + uint32_t d_RESET_CONTROL_MBOX_RST_MASK; + uint32_t d_RESET_CONTROL_SI0_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_OFFSET; + uint32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK; + uint32_t d_GPIO_BASE_ADDRESS; + uint32_t d_GPIO_PIN0_OFFSET; + uint32_t d_GPIO_PIN1_OFFSET; + uint32_t d_GPIO_PIN0_CONFIG_MASK; + uint32_t d_GPIO_PIN1_CONFIG_MASK; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK; + uint32_t d_SI_CONFIG_I2C_LSB; + uint32_t d_SI_CONFIG_I2C_MASK; + uint32_t d_SI_CONFIG_POS_SAMPLE_LSB; + uint32_t d_SI_CONFIG_POS_SAMPLE_MASK; + uint32_t d_SI_CONFIG_INACTIVE_CLK_LSB; + uint32_t d_SI_CONFIG_INACTIVE_CLK_MASK; + uint32_t d_SI_CONFIG_INACTIVE_DATA_LSB; + uint32_t d_SI_CONFIG_INACTIVE_DATA_MASK; + uint32_t d_SI_CONFIG_DIVIDER_LSB; + uint32_t d_SI_CONFIG_DIVIDER_MASK; + uint32_t d_SI_BASE_ADDRESS; + uint32_t d_SI_CONFIG_OFFSET; + uint32_t d_SI_TX_DATA0_OFFSET; + uint32_t d_SI_TX_DATA1_OFFSET; + uint32_t d_SI_RX_DATA0_OFFSET; + uint32_t d_SI_RX_DATA1_OFFSET; + uint32_t d_SI_CS_OFFSET; + uint32_t d_SI_CS_DONE_ERR_MASK; + uint32_t d_SI_CS_DONE_INT_MASK; + uint32_t d_SI_CS_START_LSB; + uint32_t d_SI_CS_START_MASK; + uint32_t d_SI_CS_RX_CNT_LSB; + uint32_t d_SI_CS_RX_CNT_MASK; + uint32_t d_SI_CS_TX_CNT_LSB; + uint32_t d_SI_CS_TX_CNT_MASK; + uint32_t d_BOARD_DATA_SZ; + uint32_t d_BOARD_EXT_DATA_SZ; + uint32_t d_MBOX_BASE_ADDRESS; + uint32_t d_LOCAL_SCRATCH_OFFSET; + uint32_t d_CPU_CLOCK_OFFSET; + uint32_t d_LPO_CAL_OFFSET; + uint32_t d_GPIO_PIN10_OFFSET; + uint32_t d_GPIO_PIN11_OFFSET; + uint32_t d_GPIO_PIN12_OFFSET; + uint32_t d_GPIO_PIN13_OFFSET; + uint32_t d_CLOCK_GPIO_OFFSET; + uint32_t d_CPU_CLOCK_STANDARD_LSB; + uint32_t d_CPU_CLOCK_STANDARD_MASK; + uint32_t d_LPO_CAL_ENABLE_LSB; + uint32_t d_LPO_CAL_ENABLE_MASK; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK; + uint32_t d_ANALOG_INTF_BASE_ADDRESS; + uint32_t d_WLAN_MAC_BASE_ADDRESS; + uint32_t d_FW_INDICATOR_ADDRESS; + uint32_t d_FW_CPU_PLL_CONFIG; + uint32_t d_DRAM_BASE_ADDRESS; + uint32_t d_SOC_CORE_BASE_ADDRESS; + uint32_t d_CORE_CTRL_ADDRESS; + uint32_t d_CE_COUNT; + uint32_t d_MSI_NUM_REQUEST; + uint32_t d_MSI_ASSIGN_FW; + uint32_t d_MSI_ASSIGN_CE_INITIAL; + uint32_t d_PCIE_INTR_ENABLE_ADDRESS; + uint32_t d_PCIE_INTR_CLR_ADDRESS; + uint32_t d_PCIE_INTR_FIRMWARE_MASK; + uint32_t d_PCIE_INTR_CE_MASK_ALL; + uint32_t d_CORE_CTRL_CPU_INTR_MASK; + uint32_t d_WIFICMN_PCIE_BAR_REG_ADDRESS; + /* htt_rx.c */ + /* htt tx */ + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB; + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB; + /* copy_engine.c */ + uint32_t d_SR_WR_INDEX_ADDRESS; + uint32_t d_DST_WATERMARK_ADDRESS; + /* htt_rx.c */ + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_MASK; + uint32_t d_RX_MPDU_START_2_TID_LSB; + uint32_t d_RX_MPDU_START_2_TID_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_LSB; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB; + uint32_t d_RX_ATTENTION_0_FRAGMENT_MASK; + uint32_t d_RX_ATTENTION_0_FRAGMENT_LSB; + uint32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_MASK; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_LSB; + uint32_t d_RX_ATTENTION_0_MORE_DATA_MASK; + uint32_t d_RX_ATTENTION_0_MSDU_DONE_MASK; + uint32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK; + /* end */ + + /* PLL start */ + uint32_t d_EFUSE_OFFSET; + uint32_t d_EFUSE_XTAL_SEL_MSB; + uint32_t d_EFUSE_XTAL_SEL_LSB; + uint32_t d_EFUSE_XTAL_SEL_MASK; + uint32_t d_BB_PLL_CONFIG_OFFSET; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_LSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MASK; + uint32_t d_BB_PLL_CONFIG_FRAC_MSB; + uint32_t d_BB_PLL_CONFIG_FRAC_LSB; + uint32_t d_BB_PLL_CONFIG_FRAC_MASK; + uint32_t d_WLAN_PLL_SETTLE_TIME_MSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_LSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_MASK; + uint32_t d_WLAN_PLL_SETTLE_OFFSET; + uint32_t d_WLAN_PLL_SETTLE_SW_MASK; + uint32_t d_WLAN_PLL_SETTLE_RSTMASK; + uint32_t d_WLAN_PLL_SETTLE_RESET; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_LSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_LSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_RESET; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_DIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_DIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_OFFSET; + uint32_t d_WLAN_PLL_CONTROL_SW_MASK; + uint32_t d_WLAN_PLL_CONTROL_RSTMASK; + uint32_t d_WLAN_PLL_CONTROL_RESET; + uint32_t d_SOC_CORE_CLK_CTRL_OFFSET; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_LSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET; + uint32_t d_RTC_SYNC_STATUS_OFFSET; + uint32_t d_SOC_CPU_CLOCK_OFFSET; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_LSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MASK; + /* PLL end */ + + uint32_t d_SOC_POWER_REG_OFFSET; + uint32_t d_PCIE_INTR_CAUSE_ADDRESS; + uint32_t d_SOC_RESET_CONTROL_ADDRESS; + uint32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK; + uint32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB; + uint32_t d_SOC_RESET_CONTROL_CE_RST_MASK; + uint32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + uint32_t d_CPU_INTR_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + uint32_t d_SOC_LF_TIMER_STATUS0_ADDRESS; + + /* chip id start */ + uint32_t d_SI_CONFIG_ERR_INT_MASK; + uint32_t d_SI_CONFIG_ERR_INT_LSB; + uint32_t d_GPIO_ENABLE_W1TS_LOW_ADDRESS; + uint32_t d_GPIO_PIN0_CONFIG_LSB; + uint32_t d_GPIO_PIN0_PAD_PULL_LSB; + uint32_t d_GPIO_PIN0_PAD_PULL_MASK; + + uint32_t d_SOC_CHIP_ID_ADDRESS; + uint32_t d_SOC_CHIP_ID_VERSION_MASK; + uint32_t d_SOC_CHIP_ID_VERSION_LSB; + uint32_t d_SOC_CHIP_ID_REVISION_MASK; + uint32_t d_SOC_CHIP_ID_REVISION_LSB; + uint32_t d_SOC_CHIP_ID_REVISION_MSB; + uint32_t d_FW_AXI_MSI_ADDR; + uint32_t d_FW_AXI_MSI_DATA; + uint32_t d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS; + + /* chip id end */ + + uint32_t d_A_SOC_CORE_SCRATCH_0_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_1_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_2_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_3_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_4_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_5_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_6_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_7_ADDRESS; + uint32_t d_A_SOC_CORE_SPARE_0_REGISTER; + uint32_t d_PCIE_INTR_FIRMWARE_ROUTE_MASK; + uint32_t d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1; + uint32_t d_A_SOC_CORE_SPARE_1_REGISTER; + uint32_t d_A_SOC_CORE_PCIE_INTR_CLR_GRP1; + uint32_t d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1; + uint32_t d_A_SOC_PCIE_PCIE_SCRATCH_0; + uint32_t d_A_SOC_PCIE_PCIE_SCRATCH_1; + uint32_t d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA; + uint32_t d_A_SOC_PCIE_PCIE_SCRATCH_2; + uint32_t d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK; + + uint32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK; + uint32_t d_WLAN_DEBUG_CONTROL_OFFSET; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK; + uint32_t d_WLAN_DEBUG_OUT_OFFSET; + uint32_t d_WLAN_DEBUG_OUT_DATA_MSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_LSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_MASK; + uint32_t d_AMBA_DEBUG_BUS_OFFSET; + uint32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB; + uint32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB; + uint32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; + uint32_t d_AMBA_DEBUG_BUS_SEL_MSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_LSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_MASK; + +#ifdef QCA_WIFI_3_0_ADRASTEA + uint32_t d_Q6_ENABLE_REGISTER_0; + uint32_t d_Q6_ENABLE_REGISTER_1; + uint32_t d_Q6_CAUSE_REGISTER_0; + uint32_t d_Q6_CAUSE_REGISTER_1; + uint32_t d_Q6_CLEAR_REGISTER_0; + uint32_t d_Q6_CLEAR_REGISTER_1; +#endif +#ifdef CONFIG_BYPASS_QMI + uint32_t d_BYPASS_QMI_TEMP_REGISTER; +#endif + uint32_t d_WIFICMN_INT_STATUS_ADDRESS; +}; + +struct hostdef_s { + uint32_t d_INT_STATUS_ENABLE_ERROR_LSB; + uint32_t d_INT_STATUS_ENABLE_ERROR_MASK; + uint32_t d_INT_STATUS_ENABLE_CPU_LSB; + uint32_t d_INT_STATUS_ENABLE_CPU_MASK; + uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB; + uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_INT_STATUS_ENABLE_ADDRESS; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_HOST_INT_STATUS_ADDRESS; + uint32_t d_CPU_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK; + uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB; + uint32_t d_COUNT_DEC_ADDRESS; + uint32_t d_HOST_INT_STATUS_CPU_MASK; + uint32_t d_HOST_INT_STATUS_CPU_LSB; + uint32_t d_HOST_INT_STATUS_ERROR_MASK; + uint32_t d_HOST_INT_STATUS_ERROR_LSB; + uint32_t d_HOST_INT_STATUS_COUNTER_MASK; + uint32_t d_HOST_INT_STATUS_COUNTER_LSB; + uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS; + uint32_t d_WINDOW_DATA_ADDRESS; + uint32_t d_WINDOW_READ_ADDR_ADDRESS; + uint32_t d_WINDOW_WRITE_ADDR_ADDRESS; + uint32_t d_SOC_GLOBAL_RESET_ADDRESS; + uint32_t d_RTC_STATE_ADDRESS; + uint32_t d_RTC_STATE_COLD_RESET_MASK; + uint32_t d_PCIE_LOCAL_BASE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_RESET; + uint32_t d_PCIE_SOC_WAKE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_V_MASK; + uint32_t d_RTC_STATE_V_MASK; + uint32_t d_RTC_STATE_V_LSB; + uint32_t d_FW_IND_EVENT_PENDING; + uint32_t d_FW_IND_INITIALIZED; + uint32_t d_FW_IND_HELPER; + uint32_t d_RTC_STATE_V_ON; +#if defined(SDIO_3_0) + uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK; + uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB; +#endif + uint32_t d_PCIE_SOC_RDY_STATUS_ADDRESS; + uint32_t d_PCIE_SOC_RDY_STATUS_BAR_MASK; + uint32_t d_SOC_PCIE_BASE_ADDRESS; + uint32_t d_MSI_MAGIC_ADR_ADDRESS; + uint32_t d_MSI_MAGIC_ADDRESS; + uint32_t d_HOST_CE_COUNT; + uint32_t d_ENABLE_MSI; + uint32_t d_MUX_ID_MASK; + uint32_t d_TRANSACTION_ID_MASK; + uint32_t d_DESC_DATA_FLAG_MASK; + uint32_t d_A_SOC_PCIE_PCIE_BAR0_START; + uint32_t d_FW_IND_HOST_READY; +}; + +struct host_shadow_regs_s { + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_0; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_1; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_2; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_3; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_4; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_5; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_6; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_7; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_8; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_9; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_10; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_11; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_12; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_13; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_14; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_15; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_16; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_17; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_18; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_19; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_20; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_21; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_22; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_23; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_0; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_1; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_2; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_3; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_4; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_5; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_6; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_7; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_8; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_9; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_10; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_11; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_12; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_13; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_14; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_15; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_16; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_17; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_18; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_19; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_20; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_21; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_22; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_23; +}; + + +/* + * @d_DST_WR_INDEX_ADDRESS: Destination ring write index + * + * @d_SRC_WATERMARK_ADDRESS: Source ring watermark + * + * @d_SRC_WATERMARK_LOW_MASK: Bits indicating low watermark from Source ring + * watermark + * + * @d_SRC_WATERMARK_HIGH_MASK: Bits indicating high watermark from Source ring + * watermark + * + * @d_DST_WATERMARK_LOW_MASK: Bits indicating low watermark from Destination + * ring watermark + * + * @d_DST_WATERMARK_HIGH_MASK: Bits indicating high watermark from Destination + * ring watermark + * + * @d_CURRENT_SRRI_ADDRESS: Current source ring read index.The Start Offset + * will be reflected after a CE transfer is completed. + * + * @d_CURRENT_DRRI_ADDRESS: Current Destination ring read index. The Start + * Offset will be reflected after a CE transfer + * is completed. + * + * @d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK: Source ring high watermark + * Interrupt Status + * + * @d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK: Source ring low watermark + * Interrupt Status + * + * @d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK: Destination ring high watermark + * Interrupt Status + * + * @d_HOST_IS_DST_RING_LOW_WATERMARK_MASK: Source ring low watermark + * Interrupt Status + * + * @d_HOST_IS_ADDRESS: Host Interrupt Status Register + * + * @d_MISC_IS_ADDRESS: Miscellaneous Interrupt Status Register + * + * @d_HOST_IS_COPY_COMPLETE_MASK: Bits indicating Copy complete interrupt + * status from the Host Interrupt Status + * register + * + * @d_CE_WRAPPER_BASE_ADDRESS: Copy Engine Wrapper Base Address + * + * @d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS: CE Wrapper summary for interrupts + * to host + * + * @d_CE_WRAPPER_INDEX_BASE_LOW: The LSB Base address to which source and + * destination read indices are written + * + * @d_CE_WRAPPER_INDEX_BASE_HIGH: The MSB Base address to which source and + * destination read indices are written + * + * @d_HOST_IE_ADDRESS: Host Line Interrupt Enable Register + * + * @d_HOST_IE_COPY_COMPLETE_MASK: Bits indicating Copy complete interrupt + * enable from the IE register + * + * @d_SR_BA_ADDRESS: LSB of Source Ring Base Address + * + * @d_SR_BA_ADDRESS_HIGH: MSB of Source Ring Base Address + * + * @d_SR_SIZE_ADDRESS: Source Ring size - number of entries and Start Offset + * + * @d_CE_CTRL1_ADDRESS: CE Control register + * + * @d_CE_CTRL1_DMAX_LENGTH_MASK: Destination buffer Max Length used for error + * check + * + * @d_DR_BA_ADDRESS: Destination Ring Base Address Low + * + * @d_DR_BA_ADDRESS_HIGH: Destination Ring Base Address High + * + * @d_DR_SIZE_ADDRESS: Destination Ring size - number of entries Start Offset + * + * @d_CE_CMD_REGISTER: Implements commands to all CE Halt Flush + * + * @d_CE_MSI_ADDRESS: CE MSI LOW Address register + * + * @d_CE_MSI_ADDRESS_HIGH: CE MSI High Address register + * + * @d_CE_MSI_DATA: CE MSI Data Register + * + * @d_CE_MSI_ENABLE_BIT: Bit in CTRL1 register indication the MSI enable + * + * @d_MISC_IE_ADDRESS: Miscellaneous Interrupt Enable Register + * + * @d_MISC_IS_AXI_ERR_MASK: + * Bit in Misc IS indicating AXI Timeout Interrupt status + * + * @d_MISC_IS_DST_ADDR_ERR_MASK: + * Bit in Misc IS indicating Destination Address Error + * + * @d_MISC_IS_SRC_LEN_ERR_MASK: Bit in Misc IS indicating Source Zero Length + * Error Interrupt status + * + * @d_MISC_IS_DST_MAX_LEN_VIO_MASK: Bit in Misc IS indicating Destination Max + * Length Violated Interrupt status + * + * @d_MISC_IS_DST_RING_OVERFLOW_MASK: Bit in Misc IS indicating Destination + * Ring Overflow Interrupt status + * + * @d_MISC_IS_SRC_RING_OVERFLOW_MASK: Bit in Misc IS indicating Source Ring + * Overflow Interrupt status + * + * @d_SRC_WATERMARK_LOW_LSB: Source Ring Low Watermark LSB + * + * @d_SRC_WATERMARK_HIGH_LSB: Source Ring Low Watermark MSB + * + * @d_DST_WATERMARK_LOW_LSB: Destination Ring Low Watermark LSB + * + * @d_DST_WATERMARK_HIGH_LSB: Destination Ring High Watermark LSB + * + * @d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK: + * Bits in d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDR + * indicating Copy engine miscellaneous interrupt summary + * + * @d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB: + * Bits in d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDR + * indicating Host interrupts summary + * + * @d_CE_CTRL1_DMAX_LENGTH_LSB: + * LSB of Destination buffer Max Length used for error check + * + * @d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK: + * Bits indicating Source ring Byte Swap enable. + * Treats source ring memory organisation as big-endian. + * + * @d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK: + * Bits indicating Destination ring byte swap enable. + * Treats destination ring memory organisation as big-endian + * + * @d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB: + * LSB of Source ring Byte Swap enable + * + * @d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB: + * LSB of Destination ring Byte Swap enable + * + * @d_CE_WRAPPER_DEBUG_OFFSET: Offset of CE OBS BUS Select register + * + * @d_CE_WRAPPER_DEBUG_SEL_MSB: + * MSB of Control register selecting inputs for trace/debug + * + * @d_CE_WRAPPER_DEBUG_SEL_LSB: + * LSB of Control register selecting inputs for trace/debug + * + * @d_CE_WRAPPER_DEBUG_SEL_MASK: + * Bit mask for trace/debug Control register + * + * @d_CE_DEBUG_OFFSET: Offset of Copy Engine FSM Debug Status + * + * @d_CE_DEBUG_SEL_MSB: MSB of Copy Engine FSM Debug Status + * + * @d_CE_DEBUG_SEL_LSB: LSB of Copy Engine FSM Debug Status + * + * @d_CE_DEBUG_SEL_MASK: Bits indicating Copy Engine FSM Debug Status + * + */ +struct ce_reg_def { + /* copy_engine.c */ + uint32_t d_DST_WR_INDEX_ADDRESS; + uint32_t d_SRC_WATERMARK_ADDRESS; + uint32_t d_SRC_WATERMARK_LOW_MASK; + uint32_t d_SRC_WATERMARK_HIGH_MASK; + uint32_t d_DST_WATERMARK_LOW_MASK; + uint32_t d_DST_WATERMARK_HIGH_MASK; + uint32_t d_CURRENT_SRRI_ADDRESS; + uint32_t d_CURRENT_DRRI_ADDRESS; + uint32_t d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK; + uint32_t d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK; + uint32_t d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK; + uint32_t d_HOST_IS_DST_RING_LOW_WATERMARK_MASK; + uint32_t d_HOST_IS_ADDRESS; + uint32_t d_MISC_IS_ADDRESS; + uint32_t d_HOST_IS_COPY_COMPLETE_MASK; + uint32_t d_CE_WRAPPER_BASE_ADDRESS; + uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS; + uint32_t d_CE_DDR_ADDRESS_FOR_RRI_LOW; + uint32_t d_CE_DDR_ADDRESS_FOR_RRI_HIGH; + uint32_t d_HOST_IE_ADDRESS; + uint32_t d_HOST_IE_ADDRESS_2; + uint32_t d_HOST_IE_COPY_COMPLETE_MASK; + uint32_t d_SR_BA_ADDRESS; + uint32_t d_SR_BA_ADDRESS_HIGH; + uint32_t d_SR_SIZE_ADDRESS; + uint32_t d_CE_CTRL1_ADDRESS; + uint32_t d_CE_CTRL1_DMAX_LENGTH_MASK; + uint32_t d_DR_BA_ADDRESS; + uint32_t d_DR_BA_ADDRESS_HIGH; + uint32_t d_DR_SIZE_ADDRESS; + uint32_t d_CE_CMD_REGISTER; + uint32_t d_CE_MSI_ADDRESS; + uint32_t d_CE_MSI_ADDRESS_HIGH; + uint32_t d_CE_MSI_DATA; + uint32_t d_CE_MSI_ENABLE_BIT; + uint32_t d_MISC_IE_ADDRESS; + uint32_t d_MISC_IS_AXI_ERR_MASK; + uint32_t d_MISC_IS_DST_ADDR_ERR_MASK; + uint32_t d_MISC_IS_SRC_LEN_ERR_MASK; + uint32_t d_MISC_IS_DST_MAX_LEN_VIO_MASK; + uint32_t d_MISC_IS_DST_RING_OVERFLOW_MASK; + uint32_t d_MISC_IS_SRC_RING_OVERFLOW_MASK; + uint32_t d_SRC_WATERMARK_LOW_LSB; + uint32_t d_SRC_WATERMARK_HIGH_LSB; + uint32_t d_DST_WATERMARK_LOW_LSB; + uint32_t d_DST_WATERMARK_HIGH_LSB; + uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK; + uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB; + uint32_t d_CE_CTRL1_DMAX_LENGTH_LSB; + uint32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK; + uint32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK; + uint32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB; + uint32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB; + uint32_t d_CE_CTRL1_IDX_UPD_EN_MASK; + uint32_t d_CE_WRAPPER_DEBUG_OFFSET; + uint32_t d_CE_WRAPPER_DEBUG_SEL_MSB; + uint32_t d_CE_WRAPPER_DEBUG_SEL_LSB; + uint32_t d_CE_WRAPPER_DEBUG_SEL_MASK; + uint32_t d_CE_DEBUG_OFFSET; + uint32_t d_CE_DEBUG_SEL_MSB; + uint32_t d_CE_DEBUG_SEL_LSB; + uint32_t d_CE_DEBUG_SEL_MASK; + uint32_t d_CE0_BASE_ADDRESS; + uint32_t d_CE1_BASE_ADDRESS; + uint32_t d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES; + uint32_t d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS; + uint32_t d_HOST_IE_ADDRESS_3; + uint32_t d_HOST_IE_REG1_CE_LSB; + uint32_t d_HOST_IE_REG2_CE_LSB; + uint32_t d_HOST_IE_REG3_CE_LSB; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable.h new file mode 100644 index 0000000000000000000000000000000000000000..fb058bcfe1ae130b407b16b7c97ef9e5d05abe44 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2015-2016, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_H_ +#define _REGTABLE_H_ + +#ifdef HIF_SDIO +#include "regtable_sdio.h" +#endif + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#include "reg_struct.h" +#include "regtable_pcie.h" +#endif + +#if defined(HIF_USB) +#include "regtable_usb.h" +#endif + +#if defined(HIF_IPCI) +#include "reg_struct.h" +#include "regtable_ipcie.h" +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_ipcie.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_ipcie.h new file mode 100644 index 0000000000000000000000000000000000000000..a19b417916a95b40d087d6fb50ec1cafaaf427ec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_ipcie.h @@ -0,0 +1,662 @@ +/* + * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_IPCIE_H_ +#define _REGTABLE_IPCIE_H_ + +#define MISSING 0 + +#define A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK) +#define A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1) +#define A_SOC_CORE_SPARE_1_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_1_REGISTER) +#define A_SOC_CORE_PCIE_INTR_CLR_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_CLR_GRP1) +#define A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1) +#define A_SOC_PCIE_PCIE_SCRATCH_0 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_0) +#define A_SOC_PCIE_PCIE_SCRATCH_1 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_1) +#define A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA \ + (scn->targetdef->d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA) +#define A_SOC_PCIE_PCIE_SCRATCH_2 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_2) +/* end Q6 iHelium emu registers */ + +#define PCIE_INTR_FIRMWARE_ROUTE_MASK \ + (scn->targetdef->d_PCIE_INTR_FIRMWARE_ROUTE_MASK) +#define A_SOC_CORE_SPARE_0_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_0_REGISTER) +#define A_SOC_CORE_SCRATCH_0_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_0_ADDRESS) +#define A_SOC_CORE_SCRATCH_1_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_1_ADDRESS) +#define A_SOC_CORE_SCRATCH_2_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_2_ADDRESS) +#define A_SOC_CORE_SCRATCH_3_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_3_ADDRESS) +#define A_SOC_CORE_SCRATCH_4_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_4_ADDRESS) +#define A_SOC_CORE_SCRATCH_5_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_5_ADDRESS) +#define A_SOC_CORE_SCRATCH_6_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_6_ADDRESS) +#define A_SOC_CORE_SCRATCH_7_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_7_ADDRESS) +#define RTC_SOC_BASE_ADDRESS (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define A_SOC_CORE_SCRATCH_0 (scn->targetdef->d_A_SOC_CORE_SCRATCH_0) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CE_COUNT (scn->targetdef->d_CE_COUNT) +#define PCIE_INTR_ENABLE_ADDRESS (scn->targetdef->d_PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_CLR_ADDRESS (scn->targetdef->d_PCIE_INTR_CLR_ADDRESS) +#define PCIE_INTR_FIRMWARE_MASK (scn->targetdef->d_PCIE_INTR_FIRMWARE_MASK) +#define PCIE_INTR_CE_MASK_ALL (scn->targetdef->d_PCIE_INTR_CE_MASK_ALL) +#define CORE_CTRL_CPU_INTR_MASK (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define PCIE_INTR_CAUSE_ADDRESS (scn->targetdef->d_PCIE_INTR_CAUSE_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define HOST_GROUP0_MASK (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL | \ + A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK) +#define SOC_RESET_CONTROL_CE_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CE_RST_MASK) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) +#define SOC_LF_TIMER_STATUS0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_STATUS0_ADDRESS) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_GET(x) \ + (((x) & SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) >> \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_SET(x) \ + (((x) << SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) & \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +/* hif_ipci.c */ +#define CHIP_ID_ADDRESS (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) +/* hif_ipci.c end */ + +/* misc */ +#define SR_WR_INDEX_ADDRESS (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_OFFSET (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +#define FW_CPU_PLL_CONFIG \ + (scn->targetdef->d_FW_CPU_PLL_CONFIG) + +#define WIFICMN_PCIE_BAR_REG_ADDRESS \ + (sc->targetdef->d_WIFICMN_PCIE_BAR_REG_ADDRESS) + + /* htt tx */ +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB) + +#define CE_CMD_ADDRESS \ + (scn->targetdef->d_CE_CMD_ADDRESS) +#define CE_CMD_HALT_MASK \ + (scn->targetdef->d_CE_CMD_HALT_MASK) +#define CE_CMD_HALT_STATUS_MASK \ + (scn->targetdef->d_CE_CMD_HALT_STATUS_MASK) +#define CE_CMD_HALT_STATUS_LSB \ + (scn->targetdef->d_CE_CMD_HALT_STATUS_LSB) + +#define SI_CONFIG_ERR_INT_MASK \ + (scn->targetdef->d_SI_CONFIG_ERR_INT_MASK) +#define SI_CONFIG_ERR_INT_LSB \ + (scn->targetdef->d_SI_CONFIG_ERR_INT_LSB) +#define GPIO_ENABLE_W1TS_LOW_ADDRESS \ + (scn->targetdef->d_GPIO_ENABLE_W1TS_LOW_ADDRESS) +#define GPIO_PIN0_CONFIG_LSB \ + (scn->targetdef->d_GPIO_PIN0_CONFIG_LSB) +#define GPIO_PIN0_PAD_PULL_LSB \ + (scn->targetdef->d_GPIO_PIN0_PAD_PULL_LSB) +#define GPIO_PIN0_PAD_PULL_MASK \ + (scn->targetdef->d_GPIO_PIN0_PAD_PULL_MASK) + +#define SOC_CHIP_ID_REVISION_MSB \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_MSB) + +#define FW_AXI_MSI_ADDR \ + (scn->targetdef->d_FW_AXI_MSI_ADDR) +#define FW_AXI_MSI_DATA \ + (scn->targetdef->d_FW_AXI_MSI_DATA) +#define WLAN_SUBSYSTEM_CORE_ID_ADDRESS \ + (scn->targetdef->d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS) +#define FPGA_VERSION_ADDRESS \ + (scn->targetdef->d_FPGA_VERSION_ADDRESS) + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & WLAN_PLL_CONTROL_CLK_SEL_MASK) >> WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & SOC_CORE_CLK_CTRL_DIV_MASK) >> SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & SOC_CPU_CLOCK_STANDARD_MASK) >> SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ +#define WLAN_GPIO_PIN0_CONFIG_SET(x) \ + (((x) << GPIO_PIN0_CONFIG_LSB) & GPIO_PIN0_CONFIG_MASK) +#define WLAN_GPIO_PIN0_PAD_PULL_SET(x) \ + (((x) << GPIO_PIN0_PAD_PULL_LSB) & GPIO_PIN0_PAD_PULL_MASK) +#define SI_CONFIG_ERR_INT_SET(x) \ + (((x) << SI_CONFIG_ERR_INT_LSB) & SI_CONFIG_ERR_INT_MASK) + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define Q6_ENABLE_REGISTER_0 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_0) +#define Q6_ENABLE_REGISTER_1 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_1) +#define Q6_CAUSE_REGISTER_0 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_0) +#define Q6_CAUSE_REGISTER_1 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_1) +#define Q6_CLEAR_REGISTER_0 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_0) +#define Q6_CLEAR_REGISTER_1 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_1) +#endif + +#ifdef CONFIG_BYPASS_QMI +#define BYPASS_QMI_TEMP_REGISTER \ + (scn->targetdef->d_BYPASS_QMI_TEMP_REGISTER) +#endif + +#define A_SOC_PCIE_PCIE_BAR0_START (scn->hostdef->d_A_SOC_PCIE_PCIE_BAR0_START) +#define DESC_DATA_FLAG_MASK (scn->hostdef->d_DESC_DATA_FLAG_MASK) +#define MUX_ID_MASK (scn->hostdef->d_MUX_ID_MASK) +#define TRANSACTION_ID_MASK (scn->hostdef->d_TRANSACTION_ID_MASK) +#define HOST_CE_COUNT (scn->hostdef->d_HOST_CE_COUNT) +#define ENABLE_MSI (scn->hostdef->d_ENABLE_MSI) +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define PCIE_LOCAL_BASE_ADDRESS (scn->hostdef->d_PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_SOC_WAKE_RESET (scn->hostdef->d_PCIE_SOC_WAKE_RESET) +#define PCIE_SOC_WAKE_ADDRESS (scn->hostdef->d_PCIE_SOC_WAKE_ADDRESS) +#define PCIE_SOC_WAKE_V_MASK (scn->hostdef->d_PCIE_SOC_WAKE_V_MASK) +#define RTC_STATE_V_MASK (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED (scn->hostdef->d_FW_IND_INITIALIZED) +#define FW_IND_HELPER (scn->hostdef->d_FW_IND_HELPER) +#define RTC_STATE_V_ON (scn->hostdef->d_RTC_STATE_V_ON) + +#define FW_IND_HOST_READY (scn->hostdef->d_FW_IND_HOST_READY) + +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(SOC_PCIE_BASE_ADDRESS) +#define SOC_PCIE_BASE_ADDRESS 0 +#endif + +#if !defined(PCIE_SOC_RDY_STATUS_ADDRESS) +#define PCIE_SOC_RDY_STATUS_ADDRESS 0 +#define PCIE_SOC_RDY_STATUS_BAR_MASK 0 +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + SOC_REFCLK_UNKNOWN = -1, /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + uint32_t refdiv; + uint32_t div; + uint32_t rnfrac; + uint32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + uint32_t refclk_hz; + uint32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + uint32_t start_addr; + uint32_t end_addr; +}; + +struct tgt_reg_table { + const struct tgt_reg_section *section; + uint32_t section_size; +}; + +struct hif_softc; +void hif_target_register_tbl_attach(struct hif_softc *scn, u32 target_type); +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type); + +#endif /* _REGTABLE_IPCIE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_pcie.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_pcie.h new file mode 100644 index 0000000000000000000000000000000000000000..3dc81033a922252d1426864f148a2b57c66f312b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_pcie.h @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_PCIE_H_ +#define _REGTABLE_PCIE_H_ + +#define MISSING 0 + +#define A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK) +#define A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1) +#define A_SOC_CORE_SPARE_1_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_1_REGISTER) +#define A_SOC_CORE_PCIE_INTR_CLR_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_CLR_GRP1) +#define A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1) +#define A_SOC_PCIE_PCIE_SCRATCH_0 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_0) +#define A_SOC_PCIE_PCIE_SCRATCH_1 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_1) +#define A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA \ + (scn->targetdef->d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA) +#define A_SOC_PCIE_PCIE_SCRATCH_2 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_2) +/* end Q6 iHelium emu registers */ + +#define PCIE_INTR_FIRMWARE_ROUTE_MASK \ + (scn->targetdef->d_PCIE_INTR_FIRMWARE_ROUTE_MASK) +#define A_SOC_CORE_SPARE_0_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_0_REGISTER) +#define A_SOC_CORE_SCRATCH_0_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_0_ADDRESS) +#define A_SOC_CORE_SCRATCH_1_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_1_ADDRESS) +#define A_SOC_CORE_SCRATCH_2_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_2_ADDRESS) +#define A_SOC_CORE_SCRATCH_3_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_3_ADDRESS) +#define A_SOC_CORE_SCRATCH_4_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_4_ADDRESS) +#define A_SOC_CORE_SCRATCH_5_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_5_ADDRESS) +#define A_SOC_CORE_SCRATCH_6_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_6_ADDRESS) +#define A_SOC_CORE_SCRATCH_7_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_7_ADDRESS) +#define RTC_SOC_BASE_ADDRESS (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define A_SOC_CORE_SCRATCH_0 (scn->targetdef->d_A_SOC_CORE_SCRATCH_0) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CE_COUNT (scn->targetdef->d_CE_COUNT) +#define PCIE_INTR_ENABLE_ADDRESS (scn->targetdef->d_PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_CLR_ADDRESS (scn->targetdef->d_PCIE_INTR_CLR_ADDRESS) +#define PCIE_INTR_FIRMWARE_MASK (scn->targetdef->d_PCIE_INTR_FIRMWARE_MASK) +#define PCIE_INTR_CE_MASK_ALL (scn->targetdef->d_PCIE_INTR_CE_MASK_ALL) +#define CORE_CTRL_CPU_INTR_MASK (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define PCIE_INTR_CAUSE_ADDRESS (scn->targetdef->d_PCIE_INTR_CAUSE_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define HOST_GROUP0_MASK (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL | \ + A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK) +#define SOC_RESET_CONTROL_CE_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CE_RST_MASK) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) +#define SOC_LF_TIMER_STATUS0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_STATUS0_ADDRESS) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_GET(x) \ + (((x) & SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) >> \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_SET(x) \ + (((x) << SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) & \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +/* hif_pci.c */ +#define CHIP_ID_ADDRESS (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) +/* hif_pci.c end */ + +/* misc */ +#define SR_WR_INDEX_ADDRESS (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_OFFSET (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +#define FW_CPU_PLL_CONFIG \ + (scn->targetdef->d_FW_CPU_PLL_CONFIG) + +#define WIFICMN_PCIE_BAR_REG_ADDRESS \ + (sc->targetdef->d_WIFICMN_PCIE_BAR_REG_ADDRESS) + + /* htt tx */ +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB) + +#define CE_CMD_ADDRESS \ + (scn->targetdef->d_CE_CMD_ADDRESS) +#define CE_CMD_HALT_MASK \ + (scn->targetdef->d_CE_CMD_HALT_MASK) +#define CE_CMD_HALT_STATUS_MASK \ + (scn->targetdef->d_CE_CMD_HALT_STATUS_MASK) +#define CE_CMD_HALT_STATUS_LSB \ + (scn->targetdef->d_CE_CMD_HALT_STATUS_LSB) + +#define SI_CONFIG_ERR_INT_MASK \ + (scn->targetdef->d_SI_CONFIG_ERR_INT_MASK) +#define SI_CONFIG_ERR_INT_LSB \ + (scn->targetdef->d_SI_CONFIG_ERR_INT_LSB) +#define GPIO_ENABLE_W1TS_LOW_ADDRESS \ + (scn->targetdef->d_GPIO_ENABLE_W1TS_LOW_ADDRESS) +#define GPIO_PIN0_CONFIG_LSB \ + (scn->targetdef->d_GPIO_PIN0_CONFIG_LSB) +#define GPIO_PIN0_PAD_PULL_LSB \ + (scn->targetdef->d_GPIO_PIN0_PAD_PULL_LSB) +#define GPIO_PIN0_PAD_PULL_MASK \ + (scn->targetdef->d_GPIO_PIN0_PAD_PULL_MASK) + +#define SOC_CHIP_ID_REVISION_MSB \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_MSB) + +#define FW_AXI_MSI_ADDR \ + (scn->targetdef->d_FW_AXI_MSI_ADDR) +#define FW_AXI_MSI_DATA \ + (scn->targetdef->d_FW_AXI_MSI_DATA) +#define WLAN_SUBSYSTEM_CORE_ID_ADDRESS \ + (scn->targetdef->d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS) +#define FPGA_VERSION_ADDRESS \ + (scn->targetdef->d_FPGA_VERSION_ADDRESS) + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & WLAN_PLL_CONTROL_CLK_SEL_MASK) >> WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & SOC_CORE_CLK_CTRL_DIV_MASK) >> SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & SOC_CPU_CLOCK_STANDARD_MASK) >> SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ +#define WLAN_GPIO_PIN0_CONFIG_SET(x) \ + (((x) << GPIO_PIN0_CONFIG_LSB) & GPIO_PIN0_CONFIG_MASK) +#define WLAN_GPIO_PIN0_PAD_PULL_SET(x) \ + (((x) << GPIO_PIN0_PAD_PULL_LSB) & GPIO_PIN0_PAD_PULL_MASK) +#define SI_CONFIG_ERR_INT_SET(x) \ + (((x) << SI_CONFIG_ERR_INT_LSB) & SI_CONFIG_ERR_INT_MASK) + + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define Q6_ENABLE_REGISTER_0 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_0) +#define Q6_ENABLE_REGISTER_1 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_1) +#define Q6_CAUSE_REGISTER_0 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_0) +#define Q6_CAUSE_REGISTER_1 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_1) +#define Q6_CLEAR_REGISTER_0 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_0) +#define Q6_CLEAR_REGISTER_1 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_1) +#endif + +#ifdef CONFIG_BYPASS_QMI +#define BYPASS_QMI_TEMP_REGISTER \ + (scn->targetdef->d_BYPASS_QMI_TEMP_REGISTER) +#endif + +#define A_SOC_PCIE_PCIE_BAR0_START (scn->hostdef->d_A_SOC_PCIE_PCIE_BAR0_START) +#define DESC_DATA_FLAG_MASK (scn->hostdef->d_DESC_DATA_FLAG_MASK) +#define MUX_ID_MASK (scn->hostdef->d_MUX_ID_MASK) +#define TRANSACTION_ID_MASK (scn->hostdef->d_TRANSACTION_ID_MASK) +#define HOST_CE_COUNT (scn->hostdef->d_HOST_CE_COUNT) +#define ENABLE_MSI (scn->hostdef->d_ENABLE_MSI) +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define PCIE_LOCAL_BASE_ADDRESS (scn->hostdef->d_PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_SOC_WAKE_RESET (scn->hostdef->d_PCIE_SOC_WAKE_RESET) +#define PCIE_SOC_WAKE_ADDRESS (scn->hostdef->d_PCIE_SOC_WAKE_ADDRESS) +#define PCIE_SOC_WAKE_V_MASK (scn->hostdef->d_PCIE_SOC_WAKE_V_MASK) +#define RTC_STATE_V_MASK (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED (scn->hostdef->d_FW_IND_INITIALIZED) +#define FW_IND_HELPER (scn->hostdef->d_FW_IND_HELPER) +#define RTC_STATE_V_ON (scn->hostdef->d_RTC_STATE_V_ON) + +#define FW_IND_HOST_READY (scn->hostdef->d_FW_IND_HOST_READY) + +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(SOC_PCIE_BASE_ADDRESS) +#define SOC_PCIE_BASE_ADDRESS 0 +#endif + +#if !defined(PCIE_SOC_RDY_STATUS_ADDRESS) +#define PCIE_SOC_RDY_STATUS_ADDRESS 0 +#define PCIE_SOC_RDY_STATUS_BAR_MASK 0 +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + SOC_REFCLK_UNKNOWN = -1, /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + uint32_t refdiv; + uint32_t div; + uint32_t rnfrac; + uint32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + uint32_t refclk_hz; + uint32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + uint32_t start_addr; + uint32_t end_addr; +}; + +struct tgt_reg_table { + const struct tgt_reg_section *section; + uint32_t section_size; +}; + +struct hif_softc; +void hif_target_register_tbl_attach(struct hif_softc *scn, u32 target_type); +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type); + +#endif /* _REGTABLE_PCIE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/target_reg_init.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_reg_init.h new file mode 100644 index 0000000000000000000000000000000000000000..607a5cc1e35242c70afb59affaf4edd08c5e9726 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_reg_init.h @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef TARGET_REG_INIT_H +#define TARGET_REG_INIT_H +#include "reg_struct.h" +#include "targaddrs.h" +/*** WARNING : Add to the end of the TABLE! do not change the order ****/ +struct targetdef_s; + + + +#define ATH_UNSUPPORTED_REG_OFFSET UNSUPPORTED_REGISTER_OFFSET +#define ATH_SUPPORTED_BY_TARGET(reg_offset) \ + ((reg_offset) != ATH_UNSUPPORTED_REG_OFFSET) + +#if defined(MY_TARGET_DEF) + +/* Cross-platform compatibility */ +#if !defined(SOC_RESET_CONTROL_OFFSET) && defined(RESET_CONTROL_OFFSET) +#define SOC_RESET_CONTROL_OFFSET RESET_CONTROL_OFFSET +#endif + +#if !defined(CLOCK_GPIO_OFFSET) +#define CLOCK_GPIO_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#endif + +#if !defined(WLAN_MAC_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(CE0_BASE_ADDRESS) +#define CE0_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define CE1_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define CE_COUNT 0 +#endif + +#if !defined(MSI_NUM_REQUEST) +#define MSI_NUM_REQUEST 0 +#define MSI_ASSIGN_FW 0 +#define MSI_ASSIGN_CE_INITIAL 0 +#endif + +#if !defined(FW_INDICATOR_ADDRESS) +#define FW_INDICATOR_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FW_CPU_PLL_CONFIG) +#define FW_CPU_PLL_CONFIG ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(DRAM_BASE_ADDRESS) +#define DRAM_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_CORE_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(CPU_INTR_ADDRESS) +#define CPU_INTR_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_LF_TIMER_STATUS0_ADDRESS) +#define SOC_LF_TIMER_STATUS0_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_RESET_CONTROL_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define SOC_RESET_CONTROL_CE_RST_MASK ATH_UNSUPPORTED_REG_OFFSET +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(CORE_CTRL_ADDRESS) +#define CORE_CTRL_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define CORE_CTRL_CPU_INTR_MASK 0 +#endif + +#if !defined(PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_ENABLE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_CLR_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_FIRMWARE_MASK ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_CE_MASK_ALL ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_CAUSE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(WIFICMN_PCIE_BAR_REG_ADDRESS) +#define WIFICMN_PCIE_BAR_REG_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(WIFICMN_INT_STATUS_ADDRESS) +#define WIFICMN_INT_STATUS_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FW_AXI_MSI_ADDR) +#define FW_AXI_MSI_ADDR ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FW_AXI_MSI_DATA) +#define FW_AXI_MSI_DATA ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(WLAN_SUBSYSTEM_CORE_ID_ADDRESS) +#define WLAN_SUBSYSTEM_CORE_ID_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FPGA_VERSION_ADDRESS) +#define FPGA_VERSION_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SI_CONFIG_ADDRESS) +#define SI_CONFIG_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define SI_CONFIG_BIDIR_OD_DATA_LSB 0 +#define SI_CONFIG_BIDIR_OD_DATA_MASK 0 +#define SI_CONFIG_I2C_LSB 0 +#define SI_CONFIG_I2C_MASK 0 +#define SI_CONFIG_POS_SAMPLE_LSB 0 +#define SI_CONFIG_POS_SAMPLE_MASK 0 +#define SI_CONFIG_INACTIVE_CLK_LSB 0 +#define SI_CONFIG_INACTIVE_CLK_MASK 0 +#define SI_CONFIG_INACTIVE_DATA_LSB 0 +#define SI_CONFIG_INACTIVE_DATA_MASK 0 +#define SI_CONFIG_DIVIDER_LSB 0 +#define SI_CONFIG_DIVIDER_MASK 0 +#define SI_CONFIG_OFFSET 0 +#define SI_TX_DATA0_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_TX_DATA1_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_RX_DATA0_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_RX_DATA1_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_CS_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_CS_DONE_ERR_MASK 0 +#define SI_CS_DONE_INT_MASK 0 +#define SI_CS_START_LSB 0 +#define SI_CS_START_MASK 0 +#define SI_CS_RX_CNT_LSB 0 +#define SI_CS_RX_CNT_MASK 0 +#define SI_CS_TX_CNT_LSB 0 +#define SI_CS_TX_CNT_MASK 0 +#endif + +#ifndef SI_BASE_ADDRESS +#define SI_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN10_ADDRESS +#define WLAN_GPIO_PIN10_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN11_ADDRESS +#define WLAN_GPIO_PIN11_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN12_ADDRESS +#define WLAN_GPIO_PIN12_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN13_ADDRESS +#define WLAN_GPIO_PIN13_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WIFICMN_INT_STATUS_ADDRESS +#define WIFICMN_INT_STATUS_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +static struct targetdef_s my_target_def = { + .d_RTC_SOC_BASE_ADDRESS = RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_SI0_RST_MASK = RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = SI_CS_START_LSB, + .d_SI_CS_START_MASK = SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = MY_TARGET_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = MY_TARGET_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = CPU_CLOCK_OFFSET, + .d_GPIO_PIN10_OFFSET = GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = FW_INDICATOR_ADDRESS, + .d_FW_CPU_PLL_CONFIG = FW_CPU_PLL_CONFIG, + .d_DRAM_BASE_ADDRESS = DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = CORE_CTRL_ADDRESS, + .d_CE_COUNT = CE_COUNT, + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = PCIE_INTR_CE_MASK_ALL, + .d_CORE_CTRL_CPU_INTR_MASK = CORE_CTRL_CPU_INTR_MASK, + .d_WIFICMN_PCIE_BAR_REG_ADDRESS = WIFICMN_PCIE_BAR_REG_ADDRESS, + /* htt_rx.c */ + /* htt tx */ + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB, + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB, + /* copy_engine.c */ + .d_SR_WR_INDEX_ADDRESS = SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = DST_WATERMARK_ADDRESS, + + .d_PCIE_INTR_CAUSE_ADDRESS = PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_CE_RST_MASK = SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK + = SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK + = SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + .d_SOC_LF_TIMER_STATUS0_ADDRESS = SOC_LF_TIMER_STATUS0_ADDRESS, + .d_SI_CONFIG_ERR_INT_MASK = SI_CONFIG_ERR_INT_MASK, + .d_SI_CONFIG_ERR_INT_LSB = SI_CONFIG_ERR_INT_LSB, + .d_GPIO_ENABLE_W1TS_LOW_ADDRESS = GPIO_ENABLE_W1TS_LOW_ADDRESS, + .d_GPIO_PIN0_CONFIG_LSB = GPIO_PIN0_CONFIG_LSB, + .d_GPIO_PIN0_PAD_PULL_LSB = GPIO_PIN0_PAD_PULL_LSB, + .d_GPIO_PIN0_PAD_PULL_MASK = GPIO_PIN0_PAD_PULL_MASK, + .d_SOC_CHIP_ID_ADDRESS = SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_REVISION_MASK = SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = SOC_CHIP_ID_REVISION_LSB, + .d_SOC_CHIP_ID_REVISION_MSB = SOC_CHIP_ID_REVISION_MSB, + .d_WIFICMN_PCIE_BAR_REG_ADDRESS = WIFICMN_PCIE_BAR_REG_ADDRESS, + .d_FW_AXI_MSI_ADDR = FW_AXI_MSI_ADDR, + .d_FW_AXI_MSI_DATA = FW_AXI_MSI_DATA, + .d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS = WLAN_SUBSYSTEM_CORE_ID_ADDRESS, + .d_WIFICMN_INT_STATUS_ADDRESS = WIFICMN_INT_STATUS_ADDRESS, +}; + +struct targetdef_s *MY_TARGET_DEF = &my_target_def; +#else +#endif + +#if defined(MY_CEREG_DEF) + +#if !defined(CE_DDR_ADDRESS_FOR_RRI_LOW) +#define CE_DDR_ADDRESS_FOR_RRI_LOW ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_DDR_ADDRESS_FOR_RRI_HIGH) +#define CE_DDR_ADDRESS_FOR_RRI_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(SR_BA_ADDRESS_HIGH) +#define SR_BA_ADDRESS_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(DR_BA_ADDRESS_HIGH) +#define DR_BA_ADDRESS_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_CMD_REGISTER) +#define CE_CMD_REGISTER ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_ADDRESS) +#define CE_MSI_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_ADDRESS_HIGH) +#define CE_MSI_ADDRESS_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_DATA) +#define CE_MSI_DATA ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_ENABLE_BIT) +#define CE_MSI_ENABLE_BIT ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_CTRL1_IDX_UPD_EN_MASK) +#define CE_CTRL1_IDX_UPD_EN_MASK ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_WRAPPER_DEBUG_OFFSET) +#define CE_WRAPPER_DEBUG_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_DEBUG_OFFSET) +#define CE_DEBUG_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES) +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS) +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(HOST_IE_ADDRESS_2) +#define HOST_IE_ADDRESS_2 ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(HOST_IE_ADDRESS_3) +#define HOST_IE_ADDRESS_3 ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(HOST_IE_REG1_CE_LSB) +#define HOST_IE_REG1_CE_LSB 0 +#endif +#if !defined(HOST_IE_REG2_CE_LSB) +#define HOST_IE_REG2_CE_LSB 0 +#endif +#if !defined(HOST_IE_REG3_CE_LSB) +#define HOST_IE_REG3_CE_LSB 0 +#endif + +static struct ce_reg_def my_ce_reg_def = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK + = HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK + = HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK + = HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK + = HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = HOST_IS_ADDRESS, + .d_MISC_IS_ADDRESS = MISC_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS + = CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_CE_DDR_ADDRESS_FOR_RRI_LOW = CE_DDR_ADDRESS_FOR_RRI_LOW, + .d_CE_DDR_ADDRESS_FOR_RRI_HIGH = CE_DDR_ADDRESS_FOR_RRI_HIGH, + .d_HOST_IE_ADDRESS = HOST_IE_ADDRESS, + .d_HOST_IE_REG1_CE_LSB = HOST_IE_REG1_CE_LSB, + .d_HOST_IE_ADDRESS_2 = HOST_IE_ADDRESS_2, + .d_HOST_IE_REG2_CE_LSB = HOST_IE_REG2_CE_LSB, + .d_HOST_IE_ADDRESS_3 = HOST_IE_ADDRESS_3, + .d_HOST_IE_REG3_CE_LSB = HOST_IE_REG3_CE_LSB, + .d_HOST_IE_COPY_COMPLETE_MASK = HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = SR_BA_ADDRESS, + .d_SR_BA_ADDRESS_HIGH = SR_BA_ADDRESS_HIGH, + .d_SR_SIZE_ADDRESS = SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = DR_BA_ADDRESS, + .d_DR_BA_ADDRESS_HIGH = DR_BA_ADDRESS_HIGH, + .d_DR_SIZE_ADDRESS = DR_SIZE_ADDRESS, + .d_CE_CMD_REGISTER = CE_CMD_REGISTER, + .d_CE_MSI_ADDRESS = CE_MSI_ADDRESS, + .d_CE_MSI_ADDRESS_HIGH = CE_MSI_ADDRESS_HIGH, + .d_CE_MSI_DATA = CE_MSI_DATA, + .d_CE_MSI_ENABLE_BIT = CE_MSI_ENABLE_BIT, + .d_MISC_IE_ADDRESS = MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK + = CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB + = CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK + = CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK + = CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB + = CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB + = CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_IDX_UPD_EN_MASK = CE_CTRL1_IDX_UPD_EN_MASK, + .d_CE_WRAPPER_DEBUG_OFFSET = CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = CE1_BASE_ADDRESS, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES + = A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS + = A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS +}; + +struct ce_reg_def *MY_CEREG_DEF = &my_ce_reg_def; + +#else +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/target_type.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_type.h new file mode 100644 index 0000000000000000000000000000000000000000..8a5fc373e1ee54985e10096da16e8e21c8d3fc72 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_type.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_TYPE_H_ +#define _TARGET_TYPE_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Header files */ +#include "bmi_msg.h" + +/* TARGET definition needs to be abstracted in fw common + * header files, below is the placeholder till WIN codebase + * moved to latest copy of fw common header files. + */ +/* For Adrastea target */ +#define TARGET_TYPE_ADRASTEA 19 +#ifndef TARGET_TYPE_QCA8074 +#define TARGET_TYPE_QCA8074 20 +#endif +#ifndef TARGET_TYPE_QCA6290 +#define TARGET_TYPE_QCA6290 21 +#endif +#ifndef TARGET_TYPE_QCN7605 +#define TARGET_TYPE_QCN7605 22 +#endif +#ifndef TARGET_TYPE_QCA6390 +#define TARGET_TYPE_QCA6390 23 +#endif +#ifndef TARGET_TYPE_QCA8074V2 +#define TARGET_TYPE_QCA8074V2 24 +#endif +/* For Cypress */ +#ifndef TARGET_TYPE_QCA6018 +#define TARGET_TYPE_QCA6018 25 +#endif +#ifndef TARGET_TYPE_QCN9000 +#define TARGET_TYPE_QCN9000 26 +#endif +/* HastingsPrime */ +#ifndef TARGET_TYPE_QCA6490 +#define TARGET_TYPE_QCA6490 27 +#endif + +/* Moselle */ +#ifndef TARGET_TYPE_QCA6750 +#define TARGET_TYPE_QCA6750 28 +#endif + +#ifndef TARGET_TYPE_QCA5018 +#define TARGET_TYPE_QCA5018 29 +#endif + +#ifndef TARGET_TYPE_QCN6122 +#define TARGET_TYPE_QCN6122 30 +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _TARGET_TYPE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/targetdef.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/targetdef.h new file mode 100644 index 0000000000000000000000000000000000000000..8bd957216bc19d8360282ceb839733216b83a38a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/targetdef.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2013-2016,2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef TARGETDEFS_H_ +#define TARGETDEFS_H_ + +#include +#include +#include +#include "target_reg_init.h" + +extern struct targetdef_s *AR6002_TARGETdef; +extern struct targetdef_s *AR6003_TARGETdef; +extern struct targetdef_s *AR6004_TARGETdef; +extern struct targetdef_s *AR9888_TARGETdef; +extern struct targetdef_s *AR9888V2_TARGETdef; +extern struct targetdef_s *AR6320_TARGETdef; +extern struct targetdef_s *AR900B_TARGETdef; +extern struct targetdef_s *QCA9984_TARGETdef; +extern struct targetdef_s *QCA9888_TARGETdef; +extern struct targetdef_s *QCA6290_TARGETdef; +extern struct targetdef_s *QCA6390_TARGETdef; +extern struct targetdef_s *QCA6490_TARGETdef; +extern struct targetdef_s *QCA6750_TARGETdef; + +#ifdef ATH_AHB +extern struct targetdef_s *IPQ4019_TARGETdef; +#endif +extern struct targetdef_s *QCA8074_TARGETdef; +extern struct targetdef_s *QCA8074V2_TARGETDEF; +extern struct targetdef_s *QCA6018_TARGETDEF; +extern struct targetdef_s *QCN9000_TARGETDEF; + +extern struct ce_reg_def *AR6002_CE_TARGETdef; +extern struct ce_reg_def *AR6003_CE_TARGETdef; +extern struct ce_reg_def *AR6004_CE_TARGETdef; +extern struct ce_reg_def *AR9888_CE_TARGETdef; +extern struct ce_reg_def *AR9888V2_CE_TARGETdef; +extern struct ce_reg_def *AR6320_CE_TARGETdef; +extern struct ce_reg_def *AR900B_CE_TARGETdef; +extern struct ce_reg_def *QCA9984_CE_TARGETdef; +extern struct ce_reg_def *QCA9888_CE_TARGETdef; +extern struct ce_reg_def *QCA6290_CE_TARGETdef; +extern struct ce_reg_def *QCA6390_CE_TARGETdef; +extern struct ce_reg_def *QCA6490_CE_TARGETdef; +extern struct ce_reg_def *QCA6750_CE_TARGETdef; +#ifdef ATH_AHB +extern struct ce_reg_def *IPQ4019_CE_TARGETdef; +#endif +extern struct ce_reg_def *QCA8074_CE_TARGETdef; +extern struct ce_reg_def *QCA8074V2_CE_TARGETDEF; +extern struct ce_reg_def *QCA6018_CE_TARGETDEF; +extern struct ce_reg_def *QCN9000_CE_TARGETDEF; + + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/adrastea_reg_def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/adrastea_reg_def.h new file mode 100644 index 0000000000000000000000000000000000000000..1e88b0581cc5d07d2fb4b92ca1dc2303b84aeb53 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/adrastea_reg_def.h @@ -0,0 +1,2927 @@ +/* + * Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ADRASTEA_REG_DEF_H +#define ADRASTEA_REG_DEF_H + +/* + * Start auto-generated headers from register parser + * + * DO NOT CHANGE MANUALLY +*/ + + +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__SRC_FLUSH___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW (0x00241000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_MISC_P___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2 (0x00030028) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__FORCE_WAKE_CLEAR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__EXTERNAL_INTR___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW (0x00244000) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SOFT_RESET___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___M 0x000003FF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT_STATUS___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE (0x00032060) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6 (0x00030038) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_TIMEOUT_P___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH (0x00032064) +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID__BITS___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ERR_RESP_CLEAR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__FORCE_WAKE_CLEAR___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ECAHB_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__MCIM_INT___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN2_HW2SW_GRANT___M 0x00000080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__SIZE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ECAHB_TIMEOUT___M 0x00000010 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___S 17 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___M 0x0003FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX__SRC_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___M 0x00FFF000 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH__BASE_ADDR_HIGH___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4 (0x00030030) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_TIMEOUT_P___M 0x00000100 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ECAHB_TIMEOUT___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__DIRTY_BIT_SET___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX (0x00240040) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS (0x00240038) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_1_INTR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_PLL_REF_MUX_SEL___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR (0x002F1008) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_MAX_LEN_VIO___M 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__SRC_FLUSH___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20 (0x00030070) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12 (0x00032030) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__CLOCK_GATE_DISABLE___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET__CE_INTR_LINE_HOST_P___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__EXTERNAL_INTR___S 18 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS__SELECT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX__DST_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB (0x00032070) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN2_HW2SW_GRANT___M 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN2_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR__CE_INTR_LINE_HOST_P___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_SR_XO_SETTLE_TIMEOUT___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__BASE_ADDR_HIGH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__SIZE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13 (0x00032034) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3 (0x0003002C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID (0x000300E0) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22 (0x00032058) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_OVERFLOW___M 0x00000020 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__MSI_EN___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__VALUE_REG_UPDATED_WITH_INVALID_ADDR___M 0x00000020 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__SIZE___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__VALUE_REG_UPDATED_WITH_INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14 (0x00030058) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___M 0x0000007F +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__CLOCK_GATE_DISABLE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___S 19 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1 (0x00032004) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__BMH_INT___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__WCSS_CORE_WAKE_SLEEP_STATE___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__DIRTY_BIT_SET_ENABLE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET__CE_INTR_LINE_HOST_P___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14 (0x00032038) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_RF_XO_MUX_SEL___M 0x00000010 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__MISC___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_SR_XO_SETTLE_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY__ENABLE___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_STROBE_INTERRUPT___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___M 0x0000001F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23 (0x0003205C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE (0x00240034) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY (0x0024D000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15 (0x0003005C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN1_SLP_TMR_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI__CURRENT_SRRI___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE (0x0024002C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_SR_XO_SETTLE_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN2_HW2SW_GRANT___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___M 0x0000001F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__ADDRESS_BITS_17_TO_2___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ECAHB_TIMEOUT___M 0x00000010 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___M 0x01FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__PARSER_INT___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__ADDRESS_BITS_17_TO_2___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__ILL_REG___S 24 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__STATE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0 (0x00032000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR (0x00030014) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_MISC_P___M 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20 (0x00032050) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LMH_INT___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI__CURRENT_DRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ERR_RESP_ENABLE___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN2_SLP_TMR_INTR___M 0x00008000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX__SRC_WR_INDEX___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___M 0x00020000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW (0x0024B000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET (0x002F1004) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2 (0x00032008) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__DESC_SKIP_DWORD___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE__FORCE_WAKE_ENABLE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__BASE_ADDR_HIGH___POR 0x00 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__FORCE_WAKE_ENABLE___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__BMH_INT___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__DST_AXI_MAX_LEN___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__DESC_SKIP_DWORD___M 0x00000060 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SMH_INT___S 6 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__EXTERNAL_INTR___M 0x0FFC0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI__CURRENT_DRRI___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN1_HW2SW_GRANT___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_WCSS_WAKEUP_IRQ_ACK___S 8 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW (0x00245000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22 (0x00030078) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SMH_INT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___POR 0x00000005 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT_STATUS___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__DIRTY_BIT_SET___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__MISC___POR 0x000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__FORCE_WAKE_ENABLE___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__ADDRESS_BITS_17_TO_2___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__DST_AXI_MAX_LEN___POR 0x1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK (0x0024004C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ERR_RESP___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_WCSS_WAKEUP_IRQ_ACK___M 0x00000100 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_2_INTR___S 11 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17 (0x00030064) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW (0x0024000C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_1_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID__BITS___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__DESC_SKIP_DWORD___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW (0x0024A000) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN1_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__MCIM_INT___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__SRC_AXI_MAX_LEN___M 0x00000003 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_OVERFLOW___M 0x00000040 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_WCSS_WAKEUP_IRQ_ACK___M 0x00000100 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS (0x00030008) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_PLL_REF_MUX_SEL___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__MSI_EN___M 0x00010000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__PARSER_INT___M 0x000FF800 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_LEN_ERR___S 8 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ECAHB_TIMEOUT___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19 (0x0003204C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5 (0x00030034) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR__CE_INTR_LINE_HOST_P___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__HOST___M 0x00FFF000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_RF_XO_MUX_SEL___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__MSI_EN___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__VALUE_REG_UPDATED_WITH_INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___M 0x000FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__PARSER_INT___M 0x000FF800 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WFSS_DBG_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__SIZE___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SW_SLP_TMR_INTR___M 0x00010000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_WCSS_WAKEUP_IRQ_ACK___S 8 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___M 0x00000080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__SIZE___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5 (0x00032014) +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS (0x0003000C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__DIRTY_BIT_SET_CLEAR___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__BASE_ADDR_HIGH___M 0x0000001F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__SRC_FLUSH___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___S 16 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9 (0x00032024) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__WRITE_ACCESS___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___M 0x0000001F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__PARSER_INT___S 11 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__HOST___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_OVERFLOW___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN1_SLP_TMR_INTR___M 0x00004000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ERR_RESP_CLEAR___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__WRITE_ACCESS___M 0x00020000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__VALUE_REG_UPDATED_WITH_INVALID_ADDR___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT_STATUS___M 0x00000008 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_WCI2_INTERRUPT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___POR 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__DIRTY_BIT_SET_CLEAR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_SR_XO_SETTLE_TIMEOUT___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18 (0x00032048) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__NOC_WCMN_INTR___M 0x00001000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK (0x00240050) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__DIRTY_BIT_SET___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__DIRTY_BIT_SET_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN1_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW (0x00243000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__START_OFFSET___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY (0x00030080) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW (0x00248000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16 (0x00032040) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___M 0x0000000F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__START_OFFSET___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY (0x0024C000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN2_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_2_INTR___M 0x00000800 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___S 18 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS (0x00032078) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH__SPARE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID__BITS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS__SELECT___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LMH_INT___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12 (0x00030050) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS__SELECT___M 0x00000007 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__PMH_INT___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__ENABLE_APSS_FULL_ACCESS___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN1_SLP_TMR_INTR___S 14 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___M 0x00040000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_WCSS_WAKEUP_IRQ_ACK___S 8 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_LEN_ERR___M 0x00000100 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY__ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7 (0x0003003C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI__CURRENT_DRRI___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH__SPARE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_MAX_LEN_VIO___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET (0x002F0084) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET__CE_INTR_LINE_HOST_P___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__READ_ACCESS___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__SIZE___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__ENABLE_APSS_FULL_ACCESS___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES__CE_INTR_LINE_HOST_P___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY__ENABLE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8 (0x00030040) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS (0x00240030) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SOFT_RESET___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15 (0x0003203C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH (0x00240004) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__STATE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__WRITE_ACCESS___S 17 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_SR_XO_SETTLE_TIMEOUT___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ECAHB_TIMEOUT___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___POR 0x0080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_SR_XO_SETTLE_TIMEOUT___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN1_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_WCSS_WAKEUP_IRQ_ACK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW (0x00240000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13 (0x00030054) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN2_SLP_TMR_INTR___S 15 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH (0x00240010) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__PARSER_INT___S 11 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ERR_RESP_CLEAR___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4 (0x00032010) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_WCSS_WAKEUP_IRQ_ACK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SW_SLP_TMR_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___S 7 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_MAX_LEN_VIO___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW (0x00246000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_2_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_WCSS_WAKEUP_IRQ_ACK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__NOC_WCMN_INTR___S 12 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__PARSER_INT___POR 0x000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__DST_FLUSH___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23 (0x0003007C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___M 0x0000001F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___M 0x00000040 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SW_SLP_TMR_INTR___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___S 12 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH__BASE_ADDR_HIGH___POR 0x00 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___M 0x00000100 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__VALUE_REG_UPDATED_WITH_INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH__SPARE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE__FORCE_WAKE_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX__SRC_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ERR_RESP_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__ILL_REG___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__SRC_AXI_MAX_LEN___POR 0x1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1 (0x00240018) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19 (0x0003006C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX (0x0024003C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___M 0x0000000F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__VALUE_REG_UPDATED_WITH_INVALID_ADDR___S 5 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI__CURRENT_SRRI___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_RF_XO_MUX_SEL___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___M 0x0000000F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__INVALID_ADDR___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___M 0x0FFFDDFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7 (0x0003201C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__INVALID_ADDR___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__DST_AXI_MAX_LEN___M 0x0000000C +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__MISC___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__CLOCK_GATE_DISABLE___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__DIRTY_BIT_SET_CLEAR___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ECAHB_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__FORCE_WAKE___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_SR_XO_SETTLE_TIMEOUT___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__VALUE_REG_UPDATED_WITH_INVALID_ADDR___S 5 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__VALUE_REG_UPDATED_WITH_INVALID_ADDR___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_MISC_P___S 7 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__INVALID_ADDR___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX__DST_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_STROBE_INTERRUPT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___RWC QCSR_REG_WO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_SR_XO_SETTLE_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES__CE_INTR_LINE_HOST_P___M 0x00000FFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__FORCE_WAKE___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16 (0x00030060) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS (0x00030144) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__START_OFFSET___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_PLL_REF_MUX_SEL___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17 (0x00032044) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___M 0x000003FF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WFSS_DBG_INTR___M 0x00020000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__ENABLE_APSS_FULL_ACCESS___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__START_OFFSET___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_TIMEOUT_P___S 8 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__START_OFFSET___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___M 0x000FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ECAHB_TIMEOUT___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___M 0x000FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__FORCE_WAKE_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__INVALID_ADDR___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___M 0x00000007 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN1_HW2SW_GRANT___S 6 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW (0x00242000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN2_HW2SW_GRANT___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE (0x00030010) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__FORCE_WAKE_CLEAR___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8 (0x00032020) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___M 0x00000FFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___M 0x00080000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__BMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SOFT_RESET___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN1_HW2SW_GRANT___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0 (0x00030020) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__PMH_INT___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___M 0x00FFF000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX__DST_WR_INDEX___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__ILL_REG___M 0x01000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN2_SLP_TMR_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11 (0x0003004C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__PMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_1_INTR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB (0x0003206C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_WCSS_WAKEUP_IRQ_ACK___M 0x00000100 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__DIRTY_BIT_SET_ENABLE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE (0x00240014) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__STATE___M 0x00000007 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11 (0x0003202C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI (0x00240048) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__WCSS_CORE_WAKE_SLEEP_STATE___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW (0x00249000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___S 8 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ERR_RESP_ENABLE___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_WCI2_INTERRUPT___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__WCSS_CORE_WAKE_SLEEP_STATE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__READ_ACCESS___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__READ_ACCESS___M 0x00010000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR__CE_INTR_LINE_HOST_P___POR 0x000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL (0x00030000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI (0x00240044) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY__BITS___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___M 0x000003FF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD (0x00240020) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__NOC_WCMN_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__HOST___S 12 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES (0x002F1000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9 (0x00030044) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6 (0x00032018) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21 (0x00032054) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH__BASE_ADDR_HIGH___M 0x0000001F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___S 12 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW (0x00247000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18 (0x00030068) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN1_HW2SW_GRANT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ERR_RESP___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE (0x00240008) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY__BITS___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN1_HW2SW_GRANT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN2_HW2SW_GRANT___M 0x00000080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1 (0x00030024) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2 (0x0024001C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES__CE_INTR_LINE_HOST_P___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN2_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_STROBE_INTERRUPT___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__VALUE_REG_UPDATED_WITH_INVALID_ADDR___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__MCIM_INT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__FORCE_WAKE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21 (0x00030074) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY__BITS___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__DST_FLUSH___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN2_HW2SW_GRANT___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_LEN_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB (0x00032074) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE__FORCE_WAKE_ENABLE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__DST_FLUSH___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__START_OFFSET___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10 (0x00030048) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_OVERFLOW___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI__CURRENT_SRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ERR_RESP___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__INVALID_ADDR___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ECAHB_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__SRC_AXI_MAX_LEN___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB (0x00032068) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WFSS_DBG_INTR___S 17 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_SR_XO_SETTLE_TIMEOUT___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN1_HW2SW_GRANT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY (0x00030004) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_WCI2_INTERRUPT___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10 (0x00032028) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__INVALID_ADDR___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3 (0x0003200C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12__ADDRESS_REGISTER___S 0 + + +/* End auto-generated headers from register parser */ + +#define A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_LOW 0x0024C004 +#define A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_HIGH 0x0024C008 + +#define MISSING 0 +#define MISSING_FOR_ADRASTEA MISSING +#define ADRASTEA_PCIE_LOCAL_REG_BASE_ADDRESS 0 +#define ADRASTEA_WIFI_RTC_REG_BASE_ADDRESS 0x45000 +#define ADRASTEA_RTC_SOC_REG_BASE_ADDRESS 0x113000 +#define ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS 0x85000 +#define ADRASTEA_SI_REG_BASE_ADDRESS 0x84000 +#define ADRASTEA_SOC_CORE_REG_BASE_ADDRESS 0x113000 +#define ADRASTEA_CE_WRAPPER_REG_CSR_BASE_ADDRESS 0xC000 +#define ADRASTEA_MAC_WIFICMN_REG_BASE_ADDRESS MISSING + +/* Base Addresses */ +#define ADRASTEA_RTC_SOC_BASE_ADDRESS 0x00000000 +#define ADRASTEA_RTC_WMAC_BASE_ADDRESS 0x00000000 +#define ADRASTEA_MAC_COEX_BASE_ADDRESS 0x0000f000 +#define ADRASTEA_BT_COEX_BASE_ADDRESS 0x00002000 +#define ADRASTEA_SOC_PCIE_BASE_ADDRESS 0x00130000 +#define ADRASTEA_SOC_CORE_BASE_ADDRESS 0x00000000 +#define ADRASTEA_WLAN_UART_BASE_ADDRESS 0x00111000 +#define ADRASTEA_WLAN_SI_BASE_ADDRESS 0x00010000 +#define ADRASTEA_WLAN_GPIO_BASE_ADDRESS 0x00000000 +#define ADRASTEA_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00000000 +#define ADRASTEA_WLAN_MAC_BASE_ADDRESS 0x00000000 +#define ADRASTEA_EFUSE_BASE_ADDRESS 0x00024000 +#define ADRASTEA_FPGA_REG_BASE_ADDRESS 0x00039000 +#define ADRASTEA_WLAN_UART2_BASE_ADDRESS 0x00054c00 + +#define ADRASTEA_CE_WRAPPER_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY +#define ADRASTEA_CE0_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW +#define ADRASTEA_CE1_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW +#define ADRASTEA_CE2_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW +#define ADRASTEA_CE3_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW +#define ADRASTEA_CE4_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW +#define ADRASTEA_CE5_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW +#define ADRASTEA_CE6_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW +#define ADRASTEA_CE7_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW +#define ADRASTEA_CE8_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW +#define ADRASTEA_CE9_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW +#define ADRASTEA_CE10_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW +#define ADRASTEA_CE11_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW + +#define ADRASTEA_A_SOC_PCIE_SOC_PCIE_REG MISSING +#define ADRASTEA_DBI_BASE_ADDRESS MISSING +#define ADRASTEA_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS MISSING +#define ADRASTEA_WIFICMN_BASE_ADDRESS MISSING +#define ADRASTEA_BOARD_DATA_SZ MISSING +#define ADRASTEA_BOARD_EXT_DATA_SZ MISSING +#define ADRASTEA_A_SOC_PCIE_PCIE_BAR0_START MISSING +#define ADRASTEA_A_SOC_CORE_SCRATCH_0_ADDRESS MISSING +#define ADRASTEA_A_SOC_CORE_SPARE_0_REGISTER MISSING +#define ADRASTEA_PCIE_INTR_FIRMWARE_ROUTE_MASK MISSING +#define ADRASTEA_SCRATCH_3_ADDRESS MISSING +#define ADRASTEA_TARG_DRAM_START 0x00400000 +#define ADRASTEA_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0 +#define ADRASTEA_SOC_RESET_CONTROL_OFFSET \ + (0x00000000 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_CLOCK_CONTROL_OFFSET \ + (0x00000028 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define ADRASTEA_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 +#define ADRASTEA_WLAN_GPIO_PIN0_ADDRESS \ + (0x50 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN1_ADDRESS \ + (0x54 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define ADRASTEA_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define ADRASTEA_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define ADRASTEA_SOC_LPO_CAL_OFFSET \ + (0xe0 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN10_ADDRESS \ + (0x78 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN11_ADDRESS \ + (0x7c + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN12_ADDRESS \ + (0x80 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN13_ADDRESS \ + (0x84 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define ADRASTEA_SOC_LPO_CAL_ENABLE_LSB 20 +#define ADRASTEA_SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define ADRASTEA_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000002 +#define ADRASTEA_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000001 +#define ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define ADRASTEA_SI_CONFIG_I2C_LSB 16 +#define ADRASTEA_SI_CONFIG_I2C_MASK 0x00010000 +#define ADRASTEA_SI_CONFIG_POS_SAMPLE_LSB 7 +#define ADRASTEA_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define ADRASTEA_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define ADRASTEA_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define ADRASTEA_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define ADRASTEA_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define ADRASTEA_SI_CONFIG_DIVIDER_LSB 0 +#define ADRASTEA_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define ADRASTEA_SI_CONFIG_OFFSET (0x00000000 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_TX_DATA0_OFFSET (0x00000008 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_TX_DATA1_OFFSET (0x0000000c + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_RX_DATA0_OFFSET (0x00000010 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_RX_DATA1_OFFSET (0x00000014 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_CS_OFFSET (0x00000004 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_CS_DONE_ERR_MASK 0x00000400 +#define ADRASTEA_SI_CS_DONE_INT_MASK 0x00000200 +#define ADRASTEA_SI_CS_START_LSB 8 +#define ADRASTEA_SI_CS_START_MASK 0x00000100 +#define ADRASTEA_SI_CS_RX_CNT_LSB 4 +#define ADRASTEA_SI_CS_RX_CNT_MASK 0x000000f0 +#define ADRASTEA_SI_CS_TX_CNT_LSB 0 +#define ADRASTEA_SI_CS_TX_CNT_MASK 0x0000000f +#define ADRASTEA_CE_COUNT 12 +#define ADRASTEA_SR_WR_INDEX_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX \ + - ADRASTEA_CE0_BASE_ADDRESS) +#define ADRASTEA_DST_WATERMARK_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK \ + - ADRASTEA_CE0_BASE_ADDRESS) +#define ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define ADRASTEA_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define ADRASTEA_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define ADRASTEA_RX_MPDU_START_2_PN_47_32_LSB 0 +#define ADRASTEA_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define ADRASTEA_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define ADRASTEA_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define ADRASTEA_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define ADRASTEA_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define ADRASTEA_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff + +#define ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define ADRASTEA_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define ADRASTEA_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define ADRASTEA_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define ADRASTEA_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define ADRASTEA_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 + +#define ADRASTEA_DST_WR_INDEX_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SRC_WATERMARK_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define ADRASTEA_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define ADRASTEA_DST_WATERMARK_LOW_MASK 0xffff0000 +#define ADRASTEA_DST_WATERMARK_HIGH_MASK 0x0000ffff + +#define ADRASTEA_CURRENT_SRRI_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CURRENT_DRRI_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define ADRASTEA_HOST_IS_SRC_RING_LOW_WATERMARK_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___M + +#define ADRASTEA_HOST_IS_DST_RING_HIGH_WATERMARK_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___M + +#define ADRASTEA_HOST_IS_DST_RING_LOW_WATERMARK_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___M + +#define ADRASTEA_HOST_IS_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_MISC_IS_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_HOST_IS_COPY_COMPLETE_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___M + +#define ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY\ + - ADRASTEA_CE_WRAPPER_BASE_ADDRESS) + +/* + * Base address where the CE source and destination ring read + * indices are written to be viewed by host. + */ + +#define ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_LOW \ + (A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_LOW\ + - ADRASTEA_CE_WRAPPER_BASE_ADDRESS) + +#define ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_HIGH \ + (A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_HIGH - ADRASTEA_CE_WRAPPER_BASE_ADDRESS) + +#define ADRASTEA_HOST_IE_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_HOST_IE_COPY_COMPLETE_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___M + +#define ADRASTEA_SR_BA_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SR_BA_HIGH_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SR_SIZE_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CE_CTRL1_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1 \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CE_CTRL1_DMAX_LENGTH_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___M + +#define ADRASTEA_DR_BA_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_DR_BA_HIGH_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_DR_SIZE_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CE_CMD_REGISTER_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_MISC_IE_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_MISC_IS_AXI_ERR_MASK 0x00000100 + +#define ADRASTEA_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 + +#define ADRASTEA_MISC_IS_AXI_TIMEOUT_ERR \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___M + +#define ADRASTEA_MISC_IS_SRC_LEN_ERR_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___M + +#define ADRASTEA_MISC_IS_DST_MAX_LEN_VIO_MASK\ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___M + +#define ADRASTEA_MISC_IS_DST_RING_OVERFLOW_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___M + +#define ADRASTEA_MISC_IS_SRC_RING_OVERFLOW_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___M + +#define ADRASTEA_SRC_WATERMARK_LOW_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___S + +#define ADRASTEA_SRC_WATERMARK_HIGH_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___S + +#define ADRASTEA_DST_WATERMARK_LOW_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___S + +#define ADRASTEA_DST_WATERMARK_HIGH_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___S + +#define ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___M + +#define ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___S + +#define ADRASTEA_CE_CTRL1_DMAX_LENGTH_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___S + +#define ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___M + +#define ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___M + +#define ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___S + +#define ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___S + +#define ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x0000004 +#define ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 2 +#define ADRASTEA_SOC_GLOBAL_RESET_ADDRESS \ + (0x0008 + ADRASTEA_PCIE_LOCAL_REG_BASE_ADDRESS) +#define ADRASTEA_RTC_STATE_ADDRESS \ + (0x0000 + ADRASTEA_PCIE_LOCAL_REG_BASE_ADDRESS) +#define ADRASTEA_RTC_STATE_COLD_RESET_MASK 0x400 + +#define ADRASTEA_PCIE_SOC_WAKE_RESET 0x00000000 +#define ADRASTEA_PCIE_SOC_WAKE_ADDRESS (ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE) +#define ADRASTEA_PCIE_SOC_WAKE_V_MASK 0x00000001 + +#define ADRASTEA_RTC_STATE_V_MASK 0x00000007 +#define ADRASTEA_RTC_STATE_V_LSB 0 +#define ADRASTEA_RTC_STATE_V_ON 5 +#define ADRASTEA_PCIE_LOCAL_BASE_ADDRESS 0x0 +#define ADRASTEA_FW_IND_EVENT_PENDING 1 +#define ADRASTEA_FW_IND_INITIALIZED 2 +#define ADRASTEA_FW_IND_HELPER 4 + +#define ADRASTEA_PCIE_INTR_FIRMWARE_MASK 0x00000000 +#define ADRASTEA_PCIE_INTR_CE0_MASK 0x00000100 +#define ADRASTEA_PCIE_INTR_CE_MASK_ALL 0x00001ffe + +#define ADRASTEA_CPU_INTR_ADDRESS 0xffffffff +#define ADRASTEA_SOC_LF_TIMER_CONTROL0_ADDRESS 0xffffffff +#define ADRASTEA_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0xffffffff +#define ADRASTEA_SOC_LF_TIMER_STATUS0_ADDRESS 0xffffffff +#define ADRASTEA_SOC_RESET_CONTROL_ADDRESS \ + (0x00000000 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_RESET_CONTROL_CE_RST_MASK 0x0100 +#define ADRASTEA_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define ADRASTEA_CORE_CTRL_ADDRESS (0x0000 + ADRASTEA_SOC_CORE_REG_BASE_ADDRESS) +#define ADRASTEA_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define ADRASTEA_LOCAL_SCRATCH_OFFSET 0x00000018 +#define ADRASTEA_CLOCK_GPIO_OFFSET 0xffffffff +#define ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#define ADRASTEA_SOC_CHIP_ID_ADDRESS 0x000000f0 +#define ADRASTEA_SOC_CHIP_ID_VERSION_MASK 0xfffc0000 +#define ADRASTEA_SOC_CHIP_ID_VERSION_LSB 18 +#define ADRASTEA_SOC_CHIP_ID_REVISION_MASK 0x00000f00 +#define ADRASTEA_SOC_CHIP_ID_REVISION_LSB 8 +#define ADRASTEA_SOC_POWER_REG_OFFSET 0x0000010c + +/* Copy Engine Debug */ +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3 +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0 +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f +#define ADRASTEA_WLAN_DEBUG_CONTROL_OFFSET 0x00000108 +#define ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MSB 0 +#define ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_LSB 0 +#define ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001 +#define ADRASTEA_WLAN_DEBUG_OUT_OFFSET 0x00000110 +#define ADRASTEA_WLAN_DEBUG_OUT_DATA_MSB 19 +#define ADRASTEA_WLAN_DEBUG_OUT_DATA_LSB 0 +#define ADRASTEA_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff +#define ADRASTEA_AMBA_DEBUG_BUS_OFFSET 0x0000011c +#define ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13 +#define ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8 +#define ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00 +#define ADRASTEA_AMBA_DEBUG_BUS_SEL_MSB 4 +#define ADRASTEA_AMBA_DEBUG_BUS_SEL_LSB 0 +#define ADRASTEA_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f +#define ADRASTEA_CE_WRAPPER_DEBUG_OFFSET 0x0008 +#define ADRASTEA_CE_WRAPPER_DEBUG_SEL_MSB 4 +#define ADRASTEA_CE_WRAPPER_DEBUG_SEL_LSB 0 +#define ADRASTEA_CE_WRAPPER_DEBUG_SEL_MASK 0x0000001f +#define ADRASTEA_CE_DEBUG_OFFSET 0x0054 +#define ADRASTEA_CE_DEBUG_SEL_MSB 5 +#define ADRASTEA_CE_DEBUG_SEL_LSB 0 +#define ADRASTEA_CE_DEBUG_SEL_MASK 0x0000003f +/* End */ + +/* PLL start */ +#define ADRASTEA_EFUSE_OFFSET 0x0000032c +#define ADRASTEA_EFUSE_XTAL_SEL_MSB 10 +#define ADRASTEA_EFUSE_XTAL_SEL_LSB 8 +#define ADRASTEA_EFUSE_XTAL_SEL_MASK 0x00000700 +#define ADRASTEA_BB_PLL_CONFIG_OFFSET 0x000002f4 +#define ADRASTEA_BB_PLL_CONFIG_OUTDIV_MSB 20 +#define ADRASTEA_BB_PLL_CONFIG_OUTDIV_LSB 18 +#define ADRASTEA_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 +#define ADRASTEA_BB_PLL_CONFIG_FRAC_MSB 17 +#define ADRASTEA_BB_PLL_CONFIG_FRAC_LSB 0 +#define ADRASTEA_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define ADRASTEA_WLAN_PLL_SETTLE_TIME_MSB 10 +#define ADRASTEA_WLAN_PLL_SETTLE_TIME_LSB 0 +#define ADRASTEA_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff +#define ADRASTEA_WLAN_PLL_SETTLE_OFFSET 0x0018 +#define ADRASTEA_WLAN_PLL_SETTLE_SW_MASK 0x000007ff +#define ADRASTEA_WLAN_PLL_SETTLE_RSTMASK 0xffffffff +#define ADRASTEA_WLAN_PLL_SETTLE_RESET 0x00000400 +#define ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MSB 18 +#define ADRASTEA_WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MSB 16 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_RESET 0x1 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MSB 15 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_LSB 14 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MSB 13 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_RESET 0x0 +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_MSB 9 +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_LSB 0 +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_RESET 0x11 +#define ADRASTEA_WLAN_PLL_CONTROL_OFFSET 0x0014 +#define ADRASTEA_WLAN_PLL_CONTROL_SW_MASK 0x001fffff +#define ADRASTEA_WLAN_PLL_CONTROL_RSTMASK 0xffffffff +#define ADRASTEA_WLAN_PLL_CONTROL_RESET 0x00010011 +#define ADRASTEA_SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MSB 2 +#define ADRASTEA_SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0 +#define ADRASTEA_RTC_SYNC_STATUS_OFFSET 0x0244 +#define ADRASTEA_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_MSB 1 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +/* PLL end */ + +#define ADRASTEA_PCIE_INTR_CE_MASK(n) (ADRASTEA_PCIE_INTR_CE0_MASK << (n)) +#define ADRASTEA_DRAM_BASE_ADDRESS ADRASTEA_TARG_DRAM_START +#define ADRASTEA_FW_INDICATOR_ADDRESS \ + (ADRASTEA_WIFICMN_BASE_ADDRESS + ADRASTEA_SCRATCH_3_ADDRESS) +#define ADRASTEA_SYSTEM_SLEEP_OFFSET ADRASTEA_SOC_SYSTEM_SLEEP_OFFSET +#define ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET (0x002c + ADRASTEA_WIFI_RTC_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_RESET_CONTROL_OFFSET (0x0000 + ADRASTEA_WIFI_RTC_REG_BASE_ADDRESS) +#define ADRASTEA_CLOCK_CONTROL_OFFSET ADRASTEA_SOC_CLOCK_CONTROL_OFFSET +#define ADRASTEA_CLOCK_CONTROL_SI0_CLK_MASK \ + ADRASTEA_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define ADRASTEA_RESET_CONTROL_MBOX_RST_MASK 0x00000004 +#define ADRASTEA_RESET_CONTROL_SI0_RST_MASK \ + ADRASTEA_SOC_RESET_CONTROL_SI0_RST_MASK +#define ADRASTEA_GPIO_BASE_ADDRESS ADRASTEA_WLAN_GPIO_BASE_ADDRESS +#define ADRASTEA_GPIO_PIN0_OFFSET ADRASTEA_WLAN_GPIO_PIN0_ADDRESS +#define ADRASTEA_GPIO_PIN1_OFFSET ADRASTEA_WLAN_GPIO_PIN1_ADDRESS +#define ADRASTEA_GPIO_PIN0_CONFIG_MASK ADRASTEA_WLAN_GPIO_PIN0_CONFIG_MASK +#define ADRASTEA_GPIO_PIN1_CONFIG_MASK ADRASTEA_WLAN_GPIO_PIN1_CONFIG_MASK +#define ADRASTEA_SI_BASE_ADDRESS 0x00000000 +#define ADRASTEA_CPU_CLOCK_OFFSET (0x20 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_LPO_CAL_OFFSET ADRASTEA_SOC_LPO_CAL_OFFSET +#define ADRASTEA_GPIO_PIN10_OFFSET ADRASTEA_WLAN_GPIO_PIN10_ADDRESS +#define ADRASTEA_GPIO_PIN11_OFFSET ADRASTEA_WLAN_GPIO_PIN11_ADDRESS +#define ADRASTEA_GPIO_PIN12_OFFSET ADRASTEA_WLAN_GPIO_PIN12_ADDRESS +#define ADRASTEA_GPIO_PIN13_OFFSET ADRASTEA_WLAN_GPIO_PIN13_ADDRESS +#define ADRASTEA_CPU_CLOCK_STANDARD_LSB 0 +#define ADRASTEA_CPU_CLOCK_STANDARD_MASK 0x1 +#define ADRASTEA_LPO_CAL_ENABLE_LSB ADRASTEA_SOC_LPO_CAL_ENABLE_LSB +#define ADRASTEA_LPO_CAL_ENABLE_MASK ADRASTEA_SOC_LPO_CAL_ENABLE_MASK +#define ADRASTEA_ANALOG_INTF_BASE_ADDRESS ADRASTEA_WLAN_ANALOG_INTF_BASE_ADDRESS +#define ADRASTEA_MBOX_BASE_ADDRESS 0x00008000 +#define ADRASTEA_INT_STATUS_ENABLE_ERROR_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_ERROR_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_CPU_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_CPU_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_ADDRESS MISSING +#define ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_ADDRESS MISSING +#define ADRASTEA_CPU_INT_STATUS_ADDRESS MISSING +#define ADRASTEA_ERROR_INT_STATUS_ADDRESS MISSING +#define ADRASTEA_ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ADRASTEA_ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define ADRASTEA_COUNT_DEC_ADDRESS MISSING +#define ADRASTEA_HOST_INT_STATUS_CPU_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_CPU_LSB MISSING +#define ADRASTEA_HOST_INT_STATUS_ERROR_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_ERROR_LSB MISSING +#define ADRASTEA_HOST_INT_STATUS_COUNTER_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_COUNTER_LSB MISSING +#define ADRASTEA_RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define ADRASTEA_WINDOW_DATA_ADDRESS MISSING +#define ADRASTEA_WINDOW_READ_ADDR_ADDRESS MISSING +#define ADRASTEA_WINDOW_WRITE_ADDR_ADDRESS MISSING + +/* Shadow Registers - Start */ +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_0 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_1 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_2 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_3 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_4 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_5 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_6 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_7 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_8 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_9 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_10 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_11 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_12 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_13 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_14 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_15 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_16 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_17 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_18 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_19 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_20 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_21 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_22 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_23 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23 + +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_0 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_1 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_2 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_3 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_4 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_5 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_6 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_7 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_8 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_9 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_10 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_11 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_12 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_13 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_14 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_15 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_16 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_17 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_18 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_19 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_20 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_21 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_22 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_23 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23 + +/* Q6 iHelium emulation registers */ +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 0x00113018 +#define ADRASTEA_A_SOC_CORE_SPARE_1_REGISTER 0x00113184 +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_CLR_GRP1 0x00113020 +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 0x00113010 +#define ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_0 0x00130040 +#define ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_1 0x00130044 + +#define ADRASTEA_HOST_ENABLE_REGISTER 0x00188000 +#define ADRASTEA_Q6_ENABLE_REGISTER_0 0x00188004 +#define ADRASTEA_Q6_ENABLE_REGISTER_1 0x00188008 +#define ADRASTEA_HOST_CAUSE_REGISTER 0x0018800c +#define ADRASTEA_Q6_CAUSE_REGISTER_0 0x00188010 +#define ADRASTEA_Q6_CAUSE_REGISTER_1 0x00188014 +#define ADRASTEA_HOST_CLEAR_REGISTER 0x00188018 +#define ADRASTEA_Q6_CLEAR_REGISTER_0 0x0018801c +#define ADRASTEA_Q6_CLEAR_REGISTER_1 0x00188020 + +#define ADRASTEA_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA 0x08 +#define ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_2 0x0013005C +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK 0x0 +/* end: Q6 iHelium emulation registers */ + +#define ADRASTEA_BYPASS_QMI_TEMP_REGISTER 0x00032064 +#define GENOA_OFFSET 0x800000 + +struct targetdef_s adrastea_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = ADRASTEA_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = ADRASTEA_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = ADRASTEA_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = ADRASTEA_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = ADRASTEA_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = ADRASTEA_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = ADRASTEA_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = ADRASTEA_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + ADRASTEA_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + ADRASTEA_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = ADRASTEA_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = ADRASTEA_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = ADRASTEA_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = ADRASTEA_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = ADRASTEA_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = ADRASTEA_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = ADRASTEA_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = ADRASTEA_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = ADRASTEA_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = ADRASTEA_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = ADRASTEA_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = ADRASTEA_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = ADRASTEA_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = ADRASTEA_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = ADRASTEA_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = ADRASTEA_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = ADRASTEA_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = ADRASTEA_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = ADRASTEA_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = ADRASTEA_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = ADRASTEA_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = ADRASTEA_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = ADRASTEA_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = ADRASTEA_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = ADRASTEA_SI_CS_START_LSB, + .d_SI_CS_START_MASK = ADRASTEA_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = ADRASTEA_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = ADRASTEA_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = ADRASTEA_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = ADRASTEA_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = ADRASTEA_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = ADRASTEA_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = ADRASTEA_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = ADRASTEA_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = ADRASTEA_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = ADRASTEA_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = ADRASTEA_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = ADRASTEA_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = ADRASTEA_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = ADRASTEA_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = ADRASTEA_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = ADRASTEA_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = ADRASTEA_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = ADRASTEA_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = ADRASTEA_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = ADRASTEA_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = ADRASTEA_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = ADRASTEA_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = ADRASTEA_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = ADRASTEA_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = ADRASTEA_CORE_CTRL_ADDRESS, + .d_CE_COUNT = ADRASTEA_CE_COUNT, + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = ADRASTEA_HOST_ENABLE_REGISTER, + .d_PCIE_INTR_CLR_ADDRESS = ADRASTEA_HOST_CLEAR_REGISTER, + .d_PCIE_INTR_FIRMWARE_MASK = ADRASTEA_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = ADRASTEA_PCIE_INTR_CE_MASK_ALL, + .d_CORE_CTRL_CPU_INTR_MASK = ADRASTEA_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = ADRASTEA_SR_WR_INDEX_OFFSET, + .d_DST_WATERMARK_ADDRESS = ADRASTEA_DST_WATERMARK_OFFSET, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = ADRASTEA_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = ADRASTEA_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = ADRASTEA_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + ADRASTEA_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = ADRASTEA_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = ADRASTEA_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = ADRASTEA_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = ADRASTEA_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + ADRASTEA_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + ADRASTEA_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + ADRASTEA_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + ADRASTEA_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + ADRASTEA_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + ADRASTEA_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, + + /* PLL start */ + .d_EFUSE_OFFSET = ADRASTEA_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = ADRASTEA_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = ADRASTEA_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = ADRASTEA_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = ADRASTEA_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = ADRASTEA_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = ADRASTEA_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = ADRASTEA_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = ADRASTEA_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = ADRASTEA_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = ADRASTEA_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = ADRASTEA_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = ADRASTEA_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = ADRASTEA_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = ADRASTEA_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = ADRASTEA_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = ADRASTEA_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = ADRASTEA_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + ADRASTEA_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + ADRASTEA_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = ADRASTEA_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = ADRASTEA_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = ADRASTEA_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = ADRASTEA_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = ADRASTEA_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = ADRASTEA_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = ADRASTEA_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = ADRASTEA_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = ADRASTEA_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = ADRASTEA_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = ADRASTEA_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = ADRASTEA_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = ADRASTEA_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = ADRASTEA_HOST_CAUSE_REGISTER, + .d_SOC_RESET_CONTROL_ADDRESS = ADRASTEA_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + ADRASTEA_SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + ADRASTEA_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = ADRASTEA_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + ADRASTEA_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + ADRASTEA_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + .d_SOC_LF_TIMER_STATUS0_ADDRESS = + ADRASTEA_SOC_LF_TIMER_STATUS0_ADDRESS, + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = ADRASTEA_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = ADRASTEA_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = ADRASTEA_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = ADRASTEA_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = ADRASTEA_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ + .d_A_SOC_CORE_SCRATCH_0_ADDRESS = ADRASTEA_A_SOC_CORE_SCRATCH_0_ADDRESS, + .d_A_SOC_CORE_SPARE_0_REGISTER = ADRASTEA_A_SOC_CORE_SPARE_0_REGISTER, + .d_PCIE_INTR_FIRMWARE_ROUTE_MASK = + ADRASTEA_PCIE_INTR_FIRMWARE_ROUTE_MASK, + .d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1, + .d_A_SOC_CORE_SPARE_1_REGISTER = + ADRASTEA_A_SOC_CORE_SPARE_1_REGISTER, + .d_A_SOC_CORE_PCIE_INTR_CLR_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_CLR_GRP1, + .d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1, + .d_A_SOC_PCIE_PCIE_SCRATCH_0 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_0, + .d_A_SOC_PCIE_PCIE_SCRATCH_1 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_1, + .d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA = + ADRASTEA_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA, + .d_A_SOC_PCIE_PCIE_SCRATCH_2 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_2, + .d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK = + ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK, + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = ADRASTEA_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = ADRASTEA_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = ADRASTEA_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = ADRASTEA_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = ADRASTEA_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = ADRASTEA_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = ADRASTEA_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = ADRASTEA_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = ADRASTEA_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = ADRASTEA_AMBA_DEBUG_BUS_SEL_MASK, + +#ifdef QCA_WIFI_3_0_ADRASTEA + .d_Q6_ENABLE_REGISTER_0 = ADRASTEA_Q6_ENABLE_REGISTER_0, + .d_Q6_ENABLE_REGISTER_1 = ADRASTEA_Q6_ENABLE_REGISTER_1, + .d_Q6_CAUSE_REGISTER_0 = ADRASTEA_Q6_CAUSE_REGISTER_0, + .d_Q6_CAUSE_REGISTER_1 = ADRASTEA_Q6_CAUSE_REGISTER_1, + .d_Q6_CLEAR_REGISTER_0 = ADRASTEA_Q6_CLEAR_REGISTER_0, + .d_Q6_CLEAR_REGISTER_1 = ADRASTEA_Q6_CLEAR_REGISTER_1, +#endif + +#ifdef CONFIG_BYPASS_QMI + .d_BYPASS_QMI_TEMP_REGISTER = ADRASTEA_BYPASS_QMI_TEMP_REGISTER, +#endif +}; + +struct hostdef_s adrastea_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = ADRASTEA_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = ADRASTEA_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = ADRASTEA_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = ADRASTEA_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + ADRASTEA_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + ADRASTEA_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = ADRASTEA_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = ADRASTEA_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = ADRASTEA_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = ADRASTEA_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = ADRASTEA_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = ADRASTEA_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = ADRASTEA_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = ADRASTEA_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = ADRASTEA_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = ADRASTEA_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = ADRASTEA_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = ADRASTEA_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = ADRASTEA_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = ADRASTEA_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = ADRASTEA_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = ADRASTEA_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = ADRASTEA_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = ADRASTEA_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = ADRASTEA_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = ADRASTEA_RTC_STATE_COLD_RESET_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = ADRASTEA_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = ADRASTEA_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = ADRASTEA_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = ADRASTEA_PCIE_SOC_WAKE_V_MASK, + .d_RTC_STATE_V_MASK = ADRASTEA_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = ADRASTEA_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = ADRASTEA_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = ADRASTEA_FW_IND_INITIALIZED, + .d_FW_IND_HELPER = ADRASTEA_FW_IND_HELPER, + .d_RTC_STATE_V_ON = ADRASTEA_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + ADRASTEA_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + ADRASTEA_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = ADRASTEA_CE_COUNT, + .d_ENABLE_MSI = 0, + .d_MUX_ID_MASK = 0xf000, + .d_TRANSACTION_ID_MASK = 0x0fff, + .d_DESC_DATA_FLAG_MASK = 0x1FFFE3E0, + .d_A_SOC_PCIE_PCIE_BAR0_START = ADRASTEA_A_SOC_PCIE_PCIE_BAR0_START, +}; + + +struct ce_reg_def adrastea_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = ADRASTEA_DST_WR_INDEX_OFFSET, + .d_SRC_WATERMARK_ADDRESS = ADRASTEA_SRC_WATERMARK_OFFSET, + .d_SRC_WATERMARK_LOW_MASK = ADRASTEA_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = ADRASTEA_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = ADRASTEA_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = ADRASTEA_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = ADRASTEA_CURRENT_SRRI_OFFSET, + .d_CURRENT_DRRI_ADDRESS = ADRASTEA_CURRENT_DRRI_OFFSET, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + ADRASTEA_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + ADRASTEA_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + ADRASTEA_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + ADRASTEA_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = ADRASTEA_HOST_IS_OFFSET, + .d_MISC_IS_ADDRESS = ADRASTEA_MISC_IS_OFFSET, + .d_HOST_IS_COPY_COMPLETE_MASK = ADRASTEA_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = ADRASTEA_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS_OFFSET, + .d_CE_DDR_ADDRESS_FOR_RRI_LOW = + ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_LOW, + .d_CE_DDR_ADDRESS_FOR_RRI_HIGH = + ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_HIGH, + .d_HOST_IE_ADDRESS = ADRASTEA_HOST_IE_OFFSET, + .d_HOST_IE_COPY_COMPLETE_MASK = ADRASTEA_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = ADRASTEA_SR_BA_OFFSET, + .d_SR_BA_ADDRESS_HIGH = ADRASTEA_SR_BA_HIGH_OFFSET, + .d_SR_SIZE_ADDRESS = ADRASTEA_SR_SIZE_OFFSET, + .d_CE_CTRL1_ADDRESS = ADRASTEA_CE_CTRL1_OFFSET, + .d_CE_CTRL1_DMAX_LENGTH_MASK = ADRASTEA_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = ADRASTEA_DR_BA_OFFSET, + .d_DR_BA_ADDRESS_HIGH = ADRASTEA_DR_BA_HIGH_OFFSET, + .d_DR_SIZE_ADDRESS = ADRASTEA_DR_SIZE_OFFSET, + .d_CE_CMD_REGISTER = ADRASTEA_CE_CMD_REGISTER_OFFSET, + .d_CE_MSI_ADDRESS = MISSING_FOR_ADRASTEA, + .d_CE_MSI_ADDRESS_HIGH = MISSING_FOR_ADRASTEA, + .d_CE_MSI_DATA = MISSING_FOR_ADRASTEA, + .d_CE_MSI_ENABLE_BIT = MISSING_FOR_ADRASTEA, + .d_MISC_IE_ADDRESS = ADRASTEA_MISC_IE_OFFSET, + .d_MISC_IS_AXI_ERR_MASK = ADRASTEA_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = ADRASTEA_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = ADRASTEA_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = ADRASTEA_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + ADRASTEA_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + ADRASTEA_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = ADRASTEA_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = ADRASTEA_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = ADRASTEA_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = ADRASTEA_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = ADRASTEA_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_IDX_UPD_EN_MASK = + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___M, + .d_CE_WRAPPER_DEBUG_OFFSET = ADRASTEA_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = ADRASTEA_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = ADRASTEA_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = ADRASTEA_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = ADRASTEA_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = ADRASTEA_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = ADRASTEA_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = ADRASTEA_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = ADRASTEA_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = ADRASTEA_CE1_BASE_ADDRESS, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES = + MISSING_FOR_ADRASTEA, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS = + MISSING_FOR_ADRASTEA, +}; + + +struct host_shadow_regs_s adrastea_host_shadow_regs = { + .d_A_LOCAL_SHADOW_REG_VALUE_0 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_0, + .d_A_LOCAL_SHADOW_REG_VALUE_1 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_1, + .d_A_LOCAL_SHADOW_REG_VALUE_2 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_2, + .d_A_LOCAL_SHADOW_REG_VALUE_3 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_3, + .d_A_LOCAL_SHADOW_REG_VALUE_4 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_4, + .d_A_LOCAL_SHADOW_REG_VALUE_5 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_5, + .d_A_LOCAL_SHADOW_REG_VALUE_6 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_6, + .d_A_LOCAL_SHADOW_REG_VALUE_7 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_7, + .d_A_LOCAL_SHADOW_REG_VALUE_8 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_8, + .d_A_LOCAL_SHADOW_REG_VALUE_9 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_9, + .d_A_LOCAL_SHADOW_REG_VALUE_10 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_10, + .d_A_LOCAL_SHADOW_REG_VALUE_11 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_11, + .d_A_LOCAL_SHADOW_REG_VALUE_12 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_12, + .d_A_LOCAL_SHADOW_REG_VALUE_13 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_13, + .d_A_LOCAL_SHADOW_REG_VALUE_14 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_14, + .d_A_LOCAL_SHADOW_REG_VALUE_15 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_15, + .d_A_LOCAL_SHADOW_REG_VALUE_16 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_16, + .d_A_LOCAL_SHADOW_REG_VALUE_17 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_17, + .d_A_LOCAL_SHADOW_REG_VALUE_18 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_18, + .d_A_LOCAL_SHADOW_REG_VALUE_19 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_19, + .d_A_LOCAL_SHADOW_REG_VALUE_20 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_20, + .d_A_LOCAL_SHADOW_REG_VALUE_21 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_21, + .d_A_LOCAL_SHADOW_REG_VALUE_22 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_22, + .d_A_LOCAL_SHADOW_REG_VALUE_23 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_23, + .d_A_LOCAL_SHADOW_REG_ADDRESS_0 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_0, + .d_A_LOCAL_SHADOW_REG_ADDRESS_1 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_1, + .d_A_LOCAL_SHADOW_REG_ADDRESS_2 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_2, + .d_A_LOCAL_SHADOW_REG_ADDRESS_3 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_3, + .d_A_LOCAL_SHADOW_REG_ADDRESS_4 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_4, + .d_A_LOCAL_SHADOW_REG_ADDRESS_5 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_5, + .d_A_LOCAL_SHADOW_REG_ADDRESS_6 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_6, + .d_A_LOCAL_SHADOW_REG_ADDRESS_7 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_7, + .d_A_LOCAL_SHADOW_REG_ADDRESS_8 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_8, + .d_A_LOCAL_SHADOW_REG_ADDRESS_9 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_9, + .d_A_LOCAL_SHADOW_REG_ADDRESS_10 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_10, + .d_A_LOCAL_SHADOW_REG_ADDRESS_11 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_11, + .d_A_LOCAL_SHADOW_REG_ADDRESS_12 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_12, + .d_A_LOCAL_SHADOW_REG_ADDRESS_13 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_13, + .d_A_LOCAL_SHADOW_REG_ADDRESS_14 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_14, + .d_A_LOCAL_SHADOW_REG_ADDRESS_15 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_15, + .d_A_LOCAL_SHADOW_REG_ADDRESS_16 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_16, + .d_A_LOCAL_SHADOW_REG_ADDRESS_17 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_17, + .d_A_LOCAL_SHADOW_REG_ADDRESS_18 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_18, + .d_A_LOCAL_SHADOW_REG_ADDRESS_19 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_19, + .d_A_LOCAL_SHADOW_REG_ADDRESS_20 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_20, + .d_A_LOCAL_SHADOW_REG_ADDRESS_21 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_21, + .d_A_LOCAL_SHADOW_REG_ADDRESS_22 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_22, + .d_A_LOCAL_SHADOW_REG_ADDRESS_23 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_23 +}; + +struct targetdef_s genoa_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = ADRASTEA_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = ADRASTEA_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = ADRASTEA_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = ADRASTEA_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = ADRASTEA_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = ADRASTEA_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = ADRASTEA_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = ADRASTEA_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + ADRASTEA_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + ADRASTEA_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = ADRASTEA_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = ADRASTEA_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = ADRASTEA_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = ADRASTEA_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = ADRASTEA_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = ADRASTEA_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = ADRASTEA_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = ADRASTEA_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = ADRASTEA_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = ADRASTEA_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = ADRASTEA_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = ADRASTEA_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = ADRASTEA_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = ADRASTEA_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = ADRASTEA_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = ADRASTEA_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = ADRASTEA_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = ADRASTEA_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = ADRASTEA_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = ADRASTEA_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = ADRASTEA_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = ADRASTEA_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = ADRASTEA_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = ADRASTEA_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = ADRASTEA_SI_CS_START_LSB, + .d_SI_CS_START_MASK = ADRASTEA_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = ADRASTEA_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = ADRASTEA_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = ADRASTEA_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = ADRASTEA_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = ADRASTEA_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = ADRASTEA_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = ADRASTEA_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = ADRASTEA_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = ADRASTEA_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = ADRASTEA_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = ADRASTEA_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = ADRASTEA_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = ADRASTEA_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = ADRASTEA_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = ADRASTEA_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = ADRASTEA_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = ADRASTEA_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = ADRASTEA_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = ADRASTEA_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = ADRASTEA_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = ADRASTEA_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = ADRASTEA_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = ADRASTEA_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = ADRASTEA_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = ADRASTEA_CORE_CTRL_ADDRESS, + .d_CE_COUNT = ADRASTEA_CE_COUNT, + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = ADRASTEA_HOST_ENABLE_REGISTER, + .d_PCIE_INTR_CLR_ADDRESS = ADRASTEA_HOST_CLEAR_REGISTER, + .d_PCIE_INTR_FIRMWARE_MASK = ADRASTEA_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = ADRASTEA_PCIE_INTR_CE_MASK_ALL, + .d_CORE_CTRL_CPU_INTR_MASK = ADRASTEA_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = ADRASTEA_SR_WR_INDEX_OFFSET, + .d_DST_WATERMARK_ADDRESS = ADRASTEA_DST_WATERMARK_OFFSET, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = ADRASTEA_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = ADRASTEA_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = ADRASTEA_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + ADRASTEA_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = ADRASTEA_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = ADRASTEA_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = ADRASTEA_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = ADRASTEA_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + ADRASTEA_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + ADRASTEA_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + ADRASTEA_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + ADRASTEA_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + ADRASTEA_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + ADRASTEA_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, + + /* PLL start */ + .d_EFUSE_OFFSET = ADRASTEA_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = ADRASTEA_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = ADRASTEA_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = ADRASTEA_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = ADRASTEA_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = ADRASTEA_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = ADRASTEA_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = ADRASTEA_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = ADRASTEA_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = ADRASTEA_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = ADRASTEA_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = ADRASTEA_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = ADRASTEA_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = ADRASTEA_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = ADRASTEA_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = ADRASTEA_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = ADRASTEA_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = ADRASTEA_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + ADRASTEA_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + ADRASTEA_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = ADRASTEA_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = ADRASTEA_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = ADRASTEA_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = ADRASTEA_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = ADRASTEA_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = ADRASTEA_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = ADRASTEA_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = ADRASTEA_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = ADRASTEA_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = ADRASTEA_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = ADRASTEA_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = ADRASTEA_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = ADRASTEA_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = ADRASTEA_HOST_CAUSE_REGISTER, + .d_SOC_RESET_CONTROL_ADDRESS = ADRASTEA_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + ADRASTEA_SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + ADRASTEA_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = ADRASTEA_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + ADRASTEA_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + ADRASTEA_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = ADRASTEA_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = ADRASTEA_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = ADRASTEA_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = ADRASTEA_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = ADRASTEA_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ + .d_A_SOC_CORE_SCRATCH_0_ADDRESS = ADRASTEA_A_SOC_CORE_SCRATCH_0_ADDRESS, + .d_A_SOC_CORE_SPARE_0_REGISTER = ADRASTEA_A_SOC_CORE_SPARE_0_REGISTER, + .d_PCIE_INTR_FIRMWARE_ROUTE_MASK = + ADRASTEA_PCIE_INTR_FIRMWARE_ROUTE_MASK, + .d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1, + .d_A_SOC_CORE_SPARE_1_REGISTER = + ADRASTEA_A_SOC_CORE_SPARE_1_REGISTER, + .d_A_SOC_CORE_PCIE_INTR_CLR_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_CLR_GRP1, + .d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1, + .d_A_SOC_PCIE_PCIE_SCRATCH_0 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_0, + .d_A_SOC_PCIE_PCIE_SCRATCH_1 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_1, + .d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA = + ADRASTEA_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA, + .d_A_SOC_PCIE_PCIE_SCRATCH_2 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_2, + .d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK = + ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK, + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = ADRASTEA_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = ADRASTEA_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = ADRASTEA_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = ADRASTEA_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = ADRASTEA_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = ADRASTEA_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = ADRASTEA_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = ADRASTEA_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = ADRASTEA_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = ADRASTEA_AMBA_DEBUG_BUS_SEL_MASK, + +#ifdef QCA_WIFI_3_0_ADRASTEA + .d_Q6_ENABLE_REGISTER_0 = ADRASTEA_Q6_ENABLE_REGISTER_0, + .d_Q6_ENABLE_REGISTER_1 = ADRASTEA_Q6_ENABLE_REGISTER_1, + .d_Q6_CAUSE_REGISTER_0 = ADRASTEA_Q6_CAUSE_REGISTER_0, + .d_Q6_CAUSE_REGISTER_1 = ADRASTEA_Q6_CAUSE_REGISTER_1, + .d_Q6_CLEAR_REGISTER_0 = ADRASTEA_Q6_CLEAR_REGISTER_0, + .d_Q6_CLEAR_REGISTER_1 = ADRASTEA_Q6_CLEAR_REGISTER_1, +#endif + +#ifdef CONFIG_BYPASS_QMI + .d_BYPASS_QMI_TEMP_REGISTER = GENOA_OFFSET + + ADRASTEA_BYPASS_QMI_TEMP_REGISTER, +#endif +}; + +struct hostdef_s genoa_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = ADRASTEA_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = ADRASTEA_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = ADRASTEA_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = ADRASTEA_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + ADRASTEA_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + ADRASTEA_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = ADRASTEA_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = ADRASTEA_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = ADRASTEA_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = ADRASTEA_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = ADRASTEA_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = ADRASTEA_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = ADRASTEA_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = ADRASTEA_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = ADRASTEA_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = ADRASTEA_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = ADRASTEA_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = ADRASTEA_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = ADRASTEA_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = ADRASTEA_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = ADRASTEA_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = ADRASTEA_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = ADRASTEA_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = ADRASTEA_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = ADRASTEA_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = ADRASTEA_RTC_STATE_COLD_RESET_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = ADRASTEA_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = ADRASTEA_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = ADRASTEA_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = ADRASTEA_PCIE_SOC_WAKE_V_MASK, + .d_RTC_STATE_V_MASK = ADRASTEA_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = ADRASTEA_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = ADRASTEA_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = ADRASTEA_FW_IND_INITIALIZED, + .d_FW_IND_HELPER = ADRASTEA_FW_IND_HELPER, + .d_RTC_STATE_V_ON = ADRASTEA_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + ADRASTEA_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + ADRASTEA_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = ADRASTEA_CE_COUNT, + .d_ENABLE_MSI = 0, + .d_MUX_ID_MASK = 0xf000, + .d_TRANSACTION_ID_MASK = 0x0fff, + .d_DESC_DATA_FLAG_MASK = 0x1FFFE3E0, + .d_A_SOC_PCIE_PCIE_BAR0_START = ADRASTEA_A_SOC_PCIE_PCIE_BAR0_START, +}; + +struct ce_reg_def genoa_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = ADRASTEA_DST_WR_INDEX_OFFSET, + .d_SRC_WATERMARK_ADDRESS = ADRASTEA_SRC_WATERMARK_OFFSET, + .d_SRC_WATERMARK_LOW_MASK = ADRASTEA_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = ADRASTEA_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = ADRASTEA_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = ADRASTEA_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = ADRASTEA_CURRENT_SRRI_OFFSET, + .d_CURRENT_DRRI_ADDRESS = ADRASTEA_CURRENT_DRRI_OFFSET, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + ADRASTEA_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + ADRASTEA_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + ADRASTEA_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + ADRASTEA_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = ADRASTEA_HOST_IS_OFFSET, + .d_MISC_IS_ADDRESS = ADRASTEA_MISC_IS_OFFSET, + .d_HOST_IS_COPY_COMPLETE_MASK = ADRASTEA_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = GENOA_OFFSET + + ADRASTEA_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + GENOA_OFFSET + + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS_OFFSET, + .d_CE_DDR_ADDRESS_FOR_RRI_LOW = + GENOA_OFFSET + ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_LOW, + .d_CE_DDR_ADDRESS_FOR_RRI_HIGH = + GENOA_OFFSET + ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_HIGH, + .d_HOST_IE_ADDRESS = ADRASTEA_HOST_IE_OFFSET, + .d_HOST_IE_COPY_COMPLETE_MASK = ADRASTEA_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = ADRASTEA_SR_BA_OFFSET, + .d_SR_BA_ADDRESS_HIGH = ADRASTEA_SR_BA_HIGH_OFFSET, + .d_SR_SIZE_ADDRESS = ADRASTEA_SR_SIZE_OFFSET, + .d_CE_CTRL1_ADDRESS = ADRASTEA_CE_CTRL1_OFFSET, + .d_CE_CTRL1_DMAX_LENGTH_MASK = ADRASTEA_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = ADRASTEA_DR_BA_OFFSET, + .d_DR_BA_ADDRESS_HIGH = ADRASTEA_DR_BA_HIGH_OFFSET, + .d_DR_SIZE_ADDRESS = ADRASTEA_DR_SIZE_OFFSET, + .d_CE_CMD_REGISTER = ADRASTEA_CE_CMD_REGISTER_OFFSET, + .d_CE_MSI_ADDRESS = MISSING_FOR_ADRASTEA, + .d_CE_MSI_ADDRESS_HIGH = MISSING_FOR_ADRASTEA, + .d_CE_MSI_DATA = MISSING_FOR_ADRASTEA, + .d_CE_MSI_ENABLE_BIT = MISSING_FOR_ADRASTEA, + .d_MISC_IE_ADDRESS = ADRASTEA_MISC_IE_OFFSET, + .d_MISC_IS_AXI_ERR_MASK = ADRASTEA_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = ADRASTEA_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = ADRASTEA_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = ADRASTEA_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + ADRASTEA_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + ADRASTEA_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = ADRASTEA_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = ADRASTEA_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = ADRASTEA_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = ADRASTEA_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = ADRASTEA_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_IDX_UPD_EN_MASK = + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___M, + .d_CE_WRAPPER_DEBUG_OFFSET = ADRASTEA_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = ADRASTEA_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = ADRASTEA_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = ADRASTEA_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = ADRASTEA_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = ADRASTEA_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = ADRASTEA_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = ADRASTEA_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = GENOA_OFFSET + ADRASTEA_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = GENOA_OFFSET + ADRASTEA_CE1_BASE_ADDRESS, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES = + MISSING_FOR_ADRASTEA, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS = + MISSING_FOR_ADRASTEA, +}; + +struct host_shadow_regs_s genoa_host_shadow_regs = { + .d_A_LOCAL_SHADOW_REG_VALUE_0 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_0, + .d_A_LOCAL_SHADOW_REG_VALUE_1 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_1, + .d_A_LOCAL_SHADOW_REG_VALUE_2 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_2, + .d_A_LOCAL_SHADOW_REG_VALUE_3 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_3, + .d_A_LOCAL_SHADOW_REG_VALUE_4 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_4, + .d_A_LOCAL_SHADOW_REG_VALUE_5 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_5, + .d_A_LOCAL_SHADOW_REG_VALUE_6 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_6, + .d_A_LOCAL_SHADOW_REG_VALUE_7 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_7, + .d_A_LOCAL_SHADOW_REG_VALUE_8 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_8, + .d_A_LOCAL_SHADOW_REG_VALUE_9 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_9, + .d_A_LOCAL_SHADOW_REG_VALUE_10 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_10, + .d_A_LOCAL_SHADOW_REG_VALUE_11 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_11, + .d_A_LOCAL_SHADOW_REG_VALUE_12 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_12, + .d_A_LOCAL_SHADOW_REG_VALUE_13 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_13, + .d_A_LOCAL_SHADOW_REG_VALUE_14 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_14, + .d_A_LOCAL_SHADOW_REG_VALUE_15 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_15, + .d_A_LOCAL_SHADOW_REG_VALUE_16 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_16, + .d_A_LOCAL_SHADOW_REG_VALUE_17 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_17, + .d_A_LOCAL_SHADOW_REG_VALUE_18 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_18, + .d_A_LOCAL_SHADOW_REG_VALUE_19 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_19, + .d_A_LOCAL_SHADOW_REG_VALUE_20 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_20, + .d_A_LOCAL_SHADOW_REG_VALUE_21 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_21, + .d_A_LOCAL_SHADOW_REG_VALUE_22 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_22, + .d_A_LOCAL_SHADOW_REG_VALUE_23 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_23, + .d_A_LOCAL_SHADOW_REG_ADDRESS_0 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_0, + .d_A_LOCAL_SHADOW_REG_ADDRESS_1 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_1, + .d_A_LOCAL_SHADOW_REG_ADDRESS_2 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_2, + .d_A_LOCAL_SHADOW_REG_ADDRESS_3 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_3, + .d_A_LOCAL_SHADOW_REG_ADDRESS_4 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_4, + .d_A_LOCAL_SHADOW_REG_ADDRESS_5 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_5, + .d_A_LOCAL_SHADOW_REG_ADDRESS_6 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_6, + .d_A_LOCAL_SHADOW_REG_ADDRESS_7 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_7, + .d_A_LOCAL_SHADOW_REG_ADDRESS_8 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_8, + .d_A_LOCAL_SHADOW_REG_ADDRESS_9 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_9, + .d_A_LOCAL_SHADOW_REG_ADDRESS_10 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_10, + .d_A_LOCAL_SHADOW_REG_ADDRESS_11 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_11, + .d_A_LOCAL_SHADOW_REG_ADDRESS_12 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_12, + .d_A_LOCAL_SHADOW_REG_ADDRESS_13 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_13, + .d_A_LOCAL_SHADOW_REG_ADDRESS_14 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_14, + .d_A_LOCAL_SHADOW_REG_ADDRESS_15 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_15, + .d_A_LOCAL_SHADOW_REG_ADDRESS_16 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_16, + .d_A_LOCAL_SHADOW_REG_ADDRESS_17 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_17, + .d_A_LOCAL_SHADOW_REG_ADDRESS_18 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_18, + .d_A_LOCAL_SHADOW_REG_ADDRESS_19 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_19, + .d_A_LOCAL_SHADOW_REG_ADDRESS_20 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_20, + .d_A_LOCAL_SHADOW_REG_ADDRESS_21 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_21, + .d_A_LOCAL_SHADOW_REG_ADDRESS_22 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_22, + .d_A_LOCAL_SHADOW_REG_ADDRESS_23 = + GENOA_OFFSET + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_23 +}; + +#endif /* ADRASTEA_REG_DEF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6004def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6004def.c new file mode 100644 index 0000000000000000000000000000000000000000..88bdcc5df2320dfbbe3a0574e4b27a1d4a5eb646 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6004def.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013,2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(AR6004_HEADERS_DEF) +#define AR6004 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR6004/hw/apb_map.h" +#include "AR6004/hw/gpio_reg.h" +#include "AR6004/hw/rtc_reg.h" +#include "AR6004/hw/si_reg.h" +#include "AR6004/hw/mbox_reg.h" +#include "AR6004/hw/mbox_wlan_host_reg.h" + +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define SCRATCH_BASE_ADDRESS MBOX_BASE_ADDRESS + +#define MY_TARGET_DEF AR6004_TARGETdef +#define MY_HOST_DEF AR6004_HOSTdef +#define MY_CEREG_DEF AR6004_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR6004_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR6004_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR6004_TARGETdef; +struct hostdef_s *AR6004_HOSTdef; +#endif /*AR6004_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.c new file mode 100644 index 0000000000000000000000000000000000000000..5440ecdecd9a1462abfcfaa892d23e3ef1e919b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013,2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(AR6320_HEADERS_DEF) +#define AR6320 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR6320/hw/apb_map.h" +#include "AR6320/hw/gpio_reg.h" +#include "AR6320/hw/rtc_reg.h" +#include "AR6320/extra/hw/si_reg.h" +#include "AR6320/hw/mbox_reg.h" +#include "AR6320/extra/hw/ce_reg_csr.h" +#include "AR6320/hw/mbox_wlan_host_reg.h" +#include "soc_addrs.h" +#include "AR6320/extra/hw/soc_core_reg.h" +#include "AR6320/hw/pcie_local_reg.h" +#include "AR6320/hw/soc_pcie_reg.h" + +#ifndef SYSTEM_SLEEP_OFFSET +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#endif +#ifndef WLAN_SYSTEM_SLEEP_OFFSET +#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#endif +#ifndef WLAN_RESET_CONTROL_OFFSET +#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET +#endif +#ifndef RESET_CONTROL_SI0_RST_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#endif +#ifndef SI_BASE_ADDRESS +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#endif +#ifndef PCIE_LOCAL_BASE_ADDRESS +/* TBDXXX: Eventually, this Base Address will be defined in HW header files */ +#define PCIE_LOCAL_BASE_ADDRESS 0x80000 +#endif +#ifndef RTC_STATE_V_ON +#define RTC_STATE_V_ON 3 +#endif + +#define MY_TARGET_DEF AR6320_TARGETdef +#define MY_HOST_DEF AR6320_HOSTdef +#define MY_CEREG_DEF AR6320_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR6320_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR6320_BOARD_EXT_DATA_SZ +#define DRAM_BASE_ADDRESS TARG_DRAM_START +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR6320_TARGETdef; +struct hostdef_s *AR6320_HOSTdef; +#endif /* AR6320_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.h new file mode 100644 index 0000000000000000000000000000000000000000..7fdfb3de2bf9747eed5f814d27f4b4dd5a3d3add --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.h @@ -0,0 +1,805 @@ +/* + * Copyright (c) 2011-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AR6320DEF_H_ +#define _AR6320DEF_H_ + +/* Base Addresses */ +#define AR6320_RTC_SOC_BASE_ADDRESS 0x00000000 +#define AR6320_RTC_WMAC_BASE_ADDRESS 0x00001000 +#define AR6320_MAC_COEX_BASE_ADDRESS 0x0000f000 +#define AR6320_BT_COEX_BASE_ADDRESS 0x00002000 +#define AR6320_SOC_CORE_BASE_ADDRESS 0x0003a000 +#define AR6320_WLAN_UART_BASE_ADDRESS 0x0000c000 +#define AR6320_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR6320_WLAN_GPIO_BASE_ADDRESS 0x00005000 +#define AR6320_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00006000 +#define AR6320_WLAN_MAC_BASE_ADDRESS 0x00010000 +#define AR6320_EFUSE_BASE_ADDRESS 0x00024000 +#define AR6320_FPGA_REG_BASE_ADDRESS 0x00039000 +#define AR6320_WLAN_UART2_BASE_ADDRESS 0x00054c00 +#define AR6320_DBI_BASE_ADDRESS 0x0003c000 + +#define AR6320_SCRATCH_3_ADDRESS 0x0028 +#define AR6320_TARG_DRAM_START 0x00400000 +#define AR6320_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0 +#define AR6320_SOC_RESET_CONTROL_OFFSET 0x00000000 +#define AR6320_SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define AR6320_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define AR6320_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000000 +#define AR6320_WLAN_GPIO_PIN0_ADDRESS 0x00000068 +#define AR6320_WLAN_GPIO_PIN1_ADDRESS 0x0000006c +#define AR6320_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define AR6320_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define AR6320_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320_SOC_LPO_CAL_OFFSET 0x000000e0 +#define AR6320_WLAN_GPIO_PIN10_ADDRESS 0x00000090 +#define AR6320_WLAN_GPIO_PIN11_ADDRESS 0x00000094 +#define AR6320_WLAN_GPIO_PIN12_ADDRESS 0x00000098 +#define AR6320_WLAN_GPIO_PIN13_ADDRESS 0x0000009c +#define AR6320_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define AR6320_SOC_LPO_CAL_ENABLE_LSB 20 +#define AR6320_SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define AR6320_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define AR6320_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define AR6320_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define AR6320_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define AR6320_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define AR6320_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define AR6320_SI_CONFIG_I2C_LSB 16 +#define AR6320_SI_CONFIG_I2C_MASK 0x00010000 +#define AR6320_SI_CONFIG_POS_SAMPLE_LSB 7 +#define AR6320_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define AR6320_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define AR6320_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define AR6320_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define AR6320_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define AR6320_SI_CONFIG_DIVIDER_LSB 0 +#define AR6320_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define AR6320_SI_CONFIG_OFFSET 0x00000000 +#define AR6320_SI_TX_DATA0_OFFSET 0x00000008 +#define AR6320_SI_TX_DATA1_OFFSET 0x0000000c +#define AR6320_SI_RX_DATA0_OFFSET 0x00000010 +#define AR6320_SI_RX_DATA1_OFFSET 0x00000014 +#define AR6320_SI_CS_OFFSET 0x00000004 +#define AR6320_SI_CS_DONE_ERR_MASK 0x00000400 +#define AR6320_SI_CS_DONE_INT_MASK 0x00000200 +#define AR6320_SI_CS_START_LSB 8 +#define AR6320_SI_CS_START_MASK 0x00000100 +#define AR6320_SI_CS_RX_CNT_LSB 4 +#define AR6320_SI_CS_RX_CNT_MASK 0x000000f0 +#define AR6320_SI_CS_TX_CNT_LSB 0 +#define AR6320_SI_CS_TX_CNT_MASK 0x0000000f +#define AR6320_SR_WR_INDEX_ADDRESS 0x003c +#define AR6320_DST_WATERMARK_ADDRESS 0x0050 +#define AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define AR6320_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define AR6320_RX_MPDU_START_0_RETRY_LSB 14 +#define AR6320_RX_MPDU_START_0_RETRY_MASK 0x00004000 +#define AR6320_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define AR6320_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define AR6320_RX_MPDU_START_2_TID_LSB 28 +#define AR6320_RX_MPDU_START_2_TID_MASK 0xf0000000 +#if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI)) +#define AR6320_SOC_PCIE_BASE_ADDRESS 0x00038000 +#define AR6320_CE_WRAPPER_BASE_ADDRESS 0x00034000 +#define AR6320_CE0_BASE_ADDRESS 0x00034400 +#define AR6320_CE1_BASE_ADDRESS 0x00034800 +#define AR6320_CE2_BASE_ADDRESS 0x00034c00 +#define AR6320_CE3_BASE_ADDRESS 0x00035000 +#define AR6320_CE4_BASE_ADDRESS 0x00035400 +#define AR6320_CE5_BASE_ADDRESS 0x00035800 +#define AR6320_CE6_BASE_ADDRESS 0x00035c00 +#define AR6320_CE7_BASE_ADDRESS 0x00036000 +#define AR6320_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x00007800 +#define AR6320_CE_COUNT 8 +#define AR6320_CE_CTRL1_ADDRESS 0x0010 +#define AR6320_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define AR6320_CE_CTRL1_DMAX_LENGTH_LSB 0 +#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020 +#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 5 +#define AR6320_PCIE_SOC_WAKE_RESET 0x00000000 +#define AR6320_PCIE_SOC_WAKE_ADDRESS 0x0004 +#define AR6320_PCIE_SOC_WAKE_V_MASK 0x00000001 +#define AR6320_MUX_ID_MASK 0x0000 +#define AR6320_TRANSACTION_ID_MASK 0x3fff +#define AR6320_PCIE_LOCAL_BASE_ADDRESS 0x80000 +#define AR6320_FW_IND_HELPER 4 +#define AR6320_PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define AR6320_PCIE_INTR_CLR_ADDRESS 0x0014 +#define AR6320_PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define AR6320_PCIE_INTR_CE0_MASK 0x00000800 +#define AR6320_PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define AR6320_PCIE_INTR_CAUSE_ADDRESS 0x000c +#define AR6320_SOC_RESET_CONTROL_CE_RST_MASK 0x00000001 +#endif +#define AR6320_RX_MPDU_START_2_PN_47_32_LSB 0 +#define AR6320_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK 0x000000ff +#define AR6320_RX_MSDU_END_1_KEY_ID_OCT_LSB 0 +#define AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define AR6320_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define AR6320_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define AR6320_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define AR6320_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define AR6320_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define AR6320_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define AR6320_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define AR6320_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define AR6320_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff +#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define AR6320_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define AR6320_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define AR6320_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define AR6320_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 +#define AR6320_DST_WR_INDEX_ADDRESS 0x0040 +#define AR6320_SRC_WATERMARK_ADDRESS 0x004c +#define AR6320_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320_DST_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320_DST_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320_CURRENT_SRRI_ADDRESS 0x0044 +#define AR6320_CURRENT_DRRI_ADDRESS 0x0048 +#define AR6320_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define AR6320_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define AR6320_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define AR6320_HOST_IS_ADDRESS 0x0030 +#define AR6320_HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define AR6320_HOST_IE_ADDRESS 0x002c +#define AR6320_HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define AR6320_SR_BA_ADDRESS 0x0000 +#define AR6320_SR_SIZE_ADDRESS 0x0004 +#define AR6320_DR_BA_ADDRESS 0x0008 +#define AR6320_DR_SIZE_ADDRESS 0x000c +#define AR6320_MISC_IE_ADDRESS 0x0034 +#define AR6320_MISC_IS_AXI_ERR_MASK 0x00000400 +#define AR6320_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define AR6320_MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define AR6320_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define AR6320_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define AR6320_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 +#define AR6320_SRC_WATERMARK_LOW_LSB 16 +#define AR6320_SRC_WATERMARK_HIGH_LSB 0 +#define AR6320_DST_WATERMARK_LOW_LSB 16 +#define AR6320_DST_WATERMARK_HIGH_LSB 0 +#define AR6320_SOC_GLOBAL_RESET_ADDRESS 0x0008 +#define AR6320_RTC_STATE_ADDRESS 0x0000 +#define AR6320_RTC_STATE_COLD_RESET_MASK 0x00002000 +#define AR6320_RTC_STATE_V_MASK 0x00000007 +#define AR6320_RTC_STATE_V_LSB 0 +#define AR6320_RTC_STATE_V_ON 3 +#define AR6320_FW_IND_EVENT_PENDING 1 +#define AR6320_FW_IND_INITIALIZED 2 +#define AR6320_CPU_INTR_ADDRESS 0x0010 +#define AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 +#define AR6320_SOC_LF_TIMER_STATUS0_ADDRESS 0x00000054 +#define AR6320_SOC_RESET_CONTROL_ADDRESS 0x00000000 +#define AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define AR6320_CORE_CTRL_ADDRESS 0x0000 +#define AR6320_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define AR6320_LOCAL_SCRATCH_OFFSET 0x000000c0 +#define AR6320_CLOCK_GPIO_OFFSET 0xffffffff +#define AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#define AR6320_SOC_CHIP_ID_ADDRESS 0x000000f0 +#define AR6320_SOC_CHIP_ID_VERSION_MASK 0xfffc0000 +#define AR6320_SOC_CHIP_ID_VERSION_LSB 18 +#define AR6320_SOC_CHIP_ID_REVISION_MASK 0x00000f00 +#define AR6320_SOC_CHIP_ID_REVISION_LSB 8 +#if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI)) +#define AR6320_SOC_POWER_REG_OFFSET 0x0000010c +/* Copy Engine Debug */ +#define AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c +#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3 +#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0 +#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f +#define AR6320_WLAN_DEBUG_CONTROL_OFFSET 0x00000108 +#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_MSB 0 +#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_LSB 0 +#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001 +#define AR6320_WLAN_DEBUG_OUT_OFFSET 0x00000110 +#define AR6320_WLAN_DEBUG_OUT_DATA_MSB 19 +#define AR6320_WLAN_DEBUG_OUT_DATA_LSB 0 +#define AR6320_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff +#define AR6320_AMBA_DEBUG_BUS_OFFSET 0x0000011c +#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13 +#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8 +#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00 +#define AR6320_AMBA_DEBUG_BUS_SEL_MSB 4 +#define AR6320_AMBA_DEBUG_BUS_SEL_LSB 0 +#define AR6320_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f +#define AR6320_CE_WRAPPER_DEBUG_OFFSET 0x0008 +#define AR6320_CE_WRAPPER_DEBUG_SEL_MSB 5 +#define AR6320_CE_WRAPPER_DEBUG_SEL_LSB 0 +#define AR6320_CE_WRAPPER_DEBUG_SEL_MASK 0x0000003f +#define AR6320_CE_DEBUG_OFFSET 0x0054 +#define AR6320_CE_DEBUG_SEL_MSB 5 +#define AR6320_CE_DEBUG_SEL_LSB 0 +#define AR6320_CE_DEBUG_SEL_MASK 0x0000003f +/* End */ + +/* PLL start */ +#define AR6320_EFUSE_OFFSET 0x0000032c +#define AR6320_EFUSE_XTAL_SEL_MSB 10 +#define AR6320_EFUSE_XTAL_SEL_LSB 8 +#define AR6320_EFUSE_XTAL_SEL_MASK 0x00000700 +#define AR6320_BB_PLL_CONFIG_OFFSET 0x000002f4 +#define AR6320_BB_PLL_CONFIG_OUTDIV_MSB 20 +#define AR6320_BB_PLL_CONFIG_OUTDIV_LSB 18 +#define AR6320_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 +#define AR6320_BB_PLL_CONFIG_FRAC_MSB 17 +#define AR6320_BB_PLL_CONFIG_FRAC_LSB 0 +#define AR6320_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define AR6320_WLAN_PLL_SETTLE_TIME_MSB 10 +#define AR6320_WLAN_PLL_SETTLE_TIME_LSB 0 +#define AR6320_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff +#define AR6320_WLAN_PLL_SETTLE_OFFSET 0x0018 +#define AR6320_WLAN_PLL_SETTLE_SW_MASK 0x000007ff +#define AR6320_WLAN_PLL_SETTLE_RSTMASK 0xffffffff +#define AR6320_WLAN_PLL_SETTLE_RESET 0x00000400 +#define AR6320_WLAN_PLL_CONTROL_NOPWD_MSB 18 +#define AR6320_WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define AR6320_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_MSB 16 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_RESET 0x1 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_MSB 15 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_LSB 14 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_MSB 13 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_RESET 0x0 +#define AR6320_WLAN_PLL_CONTROL_DIV_MSB 9 +#define AR6320_WLAN_PLL_CONTROL_DIV_LSB 0 +#define AR6320_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define AR6320_WLAN_PLL_CONTROL_DIV_RESET 0x11 +#define AR6320_WLAN_PLL_CONTROL_OFFSET 0x0014 +#define AR6320_WLAN_PLL_CONTROL_SW_MASK 0x001fffff +#define AR6320_WLAN_PLL_CONTROL_RSTMASK 0xffffffff +#define AR6320_WLAN_PLL_CONTROL_RESET 0x00010011 +#define AR6320_SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define AR6320_SOC_CORE_CLK_CTRL_DIV_MSB 2 +#define AR6320_SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define AR6320_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0 +#define AR6320_RTC_SYNC_STATUS_OFFSET 0x0244 +#define AR6320_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320_SOC_CPU_CLOCK_STANDARD_MSB 1 +#define AR6320_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +/* PLL end */ +#define AR6320_PCIE_INTR_CE_MASK(n) \ + (AR6320_PCIE_INTR_CE0_MASK << (n)) +#endif +#define AR6320_DRAM_BASE_ADDRESS AR6320_TARG_DRAM_START +#define AR6320_FW_INDICATOR_ADDRESS \ + (AR6320_SOC_CORE_BASE_ADDRESS + AR6320_SCRATCH_3_ADDRESS) +#define AR6320_SYSTEM_SLEEP_OFFSET AR6320_SOC_SYSTEM_SLEEP_OFFSET +#define AR6320_WLAN_SYSTEM_SLEEP_OFFSET 0x002c +#define AR6320_WLAN_RESET_CONTROL_OFFSET AR6320_SOC_RESET_CONTROL_OFFSET +#define AR6320_CLOCK_CONTROL_OFFSET AR6320_SOC_CLOCK_CONTROL_OFFSET +#define AR6320_CLOCK_CONTROL_SI0_CLK_MASK AR6320_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define AR6320_RESET_CONTROL_MBOX_RST_MASK 0x00000004 +#define AR6320_RESET_CONTROL_SI0_RST_MASK AR6320_SOC_RESET_CONTROL_SI0_RST_MASK +#define AR6320_GPIO_BASE_ADDRESS AR6320_WLAN_GPIO_BASE_ADDRESS +#define AR6320_GPIO_PIN0_OFFSET AR6320_WLAN_GPIO_PIN0_ADDRESS +#define AR6320_GPIO_PIN1_OFFSET AR6320_WLAN_GPIO_PIN1_ADDRESS +#define AR6320_GPIO_PIN0_CONFIG_MASK AR6320_WLAN_GPIO_PIN0_CONFIG_MASK +#define AR6320_GPIO_PIN1_CONFIG_MASK AR6320_WLAN_GPIO_PIN1_CONFIG_MASK +#define AR6320_SI_BASE_ADDRESS 0x00050000 +#define AR6320_CPU_CLOCK_OFFSET AR6320_SOC_CPU_CLOCK_OFFSET +#define AR6320_LPO_CAL_OFFSET AR6320_SOC_LPO_CAL_OFFSET +#define AR6320_GPIO_PIN10_OFFSET AR6320_WLAN_GPIO_PIN10_ADDRESS +#define AR6320_GPIO_PIN11_OFFSET AR6320_WLAN_GPIO_PIN11_ADDRESS +#define AR6320_GPIO_PIN12_OFFSET AR6320_WLAN_GPIO_PIN12_ADDRESS +#define AR6320_GPIO_PIN13_OFFSET AR6320_WLAN_GPIO_PIN13_ADDRESS +#define AR6320_CPU_CLOCK_STANDARD_LSB AR6320_SOC_CPU_CLOCK_STANDARD_LSB +#define AR6320_CPU_CLOCK_STANDARD_MASK AR6320_SOC_CPU_CLOCK_STANDARD_MASK +#define AR6320_LPO_CAL_ENABLE_LSB AR6320_SOC_LPO_CAL_ENABLE_LSB +#define AR6320_LPO_CAL_ENABLE_MASK AR6320_SOC_LPO_CAL_ENABLE_MASK +#define AR6320_ANALOG_INTF_BASE_ADDRESS AR6320_WLAN_ANALOG_INTF_BASE_ADDRESS +#define AR6320_MBOX_BASE_ADDRESS 0x00008000 +#define AR6320_INT_STATUS_ENABLE_ERROR_LSB 7 +#define AR6320_INT_STATUS_ENABLE_ERROR_MASK 0x00000080 +#define AR6320_INT_STATUS_ENABLE_CPU_LSB 6 +#define AR6320_INT_STATUS_ENABLE_CPU_MASK 0x00000040 +#define AR6320_INT_STATUS_ENABLE_COUNTER_LSB 4 +#define AR6320_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010 +#define AR6320_INT_STATUS_ENABLE_MBOX_DATA_LSB 0 +#define AR6320_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f +#define AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 17 +#define AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 16 +#define AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00010000 +#define AR6320_COUNTER_INT_STATUS_ENABLE_BIT_LSB 24 +#define AR6320_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0xff000000 +#define AR6320_INT_STATUS_ENABLE_ADDRESS 0x0828 +#define AR6320_CPU_INT_STATUS_ENABLE_BIT_LSB 8 +#define AR6320_CPU_INT_STATUS_ENABLE_BIT_MASK 0x0000ff00 +#define AR6320_HOST_INT_STATUS_ADDRESS 0x0800 +#define AR6320_CPU_INT_STATUS_ADDRESS 0x0801 +#define AR6320_ERROR_INT_STATUS_ADDRESS 0x0802 +#define AR6320_ERROR_INT_STATUS_WAKEUP_MASK 0x00040000 +#define AR6320_ERROR_INT_STATUS_WAKEUP_LSB 18 +#define AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 17 +#define AR6320_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00010000 +#define AR6320_ERROR_INT_STATUS_TX_OVERFLOW_LSB 16 +#define AR6320_COUNT_DEC_ADDRESS 0x0840 +#define AR6320_HOST_INT_STATUS_CPU_MASK 0x00000040 +#define AR6320_HOST_INT_STATUS_CPU_LSB 6 +#define AR6320_HOST_INT_STATUS_ERROR_MASK 0x00000080 +#define AR6320_HOST_INT_STATUS_ERROR_LSB 7 +#define AR6320_HOST_INT_STATUS_COUNTER_MASK 0x00000010 +#define AR6320_HOST_INT_STATUS_COUNTER_LSB 4 +#define AR6320_RX_LOOKAHEAD_VALID_ADDRESS 0x0805 +#define AR6320_WINDOW_DATA_ADDRESS 0x0874 +#define AR6320_WINDOW_READ_ADDR_ADDRESS 0x087c +#define AR6320_WINDOW_WRITE_ADDR_ADDRESS 0x0878 +#define AR6320_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f +#define AR6320_HOST_INT_STATUS_MBOX_DATA_LSB 0 + +struct targetdef_s ar6320_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = AR6320_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = AR6320_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = AR6320_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = AR6320_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + AR6320_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + AR6320_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = AR6320_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = AR6320_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = AR6320_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = AR6320_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = AR6320_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = AR6320_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + AR6320_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + AR6320_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = AR6320_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = AR6320_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = AR6320_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = AR6320_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = AR6320_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR6320_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = AR6320_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = AR6320_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = AR6320_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = AR6320_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = AR6320_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = AR6320_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = AR6320_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = AR6320_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = AR6320_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = AR6320_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = AR6320_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = AR6320_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = AR6320_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = AR6320_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = AR6320_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = AR6320_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = AR6320_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = AR6320_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = AR6320_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = AR6320_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = AR6320_SI_CS_START_LSB, + .d_SI_CS_START_MASK = AR6320_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = AR6320_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = AR6320_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = AR6320_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = AR6320_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = AR6320_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = AR6320_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = AR6320_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = AR6320_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = AR6320_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = AR6320_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = AR6320_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = AR6320_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = AR6320_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = AR6320_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = AR6320_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = AR6320_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = AR6320_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = AR6320_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = AR6320_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = AR6320_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = AR6320_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = AR6320_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = AR6320_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = AR6320_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = AR6320_CORE_CTRL_ADDRESS, +#if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI)) + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, +#endif + .d_CORE_CTRL_CPU_INTR_MASK = AR6320_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = AR6320_SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = AR6320_DST_WATERMARK_ADDRESS, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + AR6320_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_RETRY_LSB = AR6320_RX_MPDU_START_0_RETRY_LSB, + .d_RX_MPDU_START_0_RETRY_MASK = AR6320_RX_MPDU_START_0_RETRY_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = AR6320_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = AR6320_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = AR6320_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + AR6320_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MPDU_START_2_TID_LSB = AR6320_RX_MPDU_START_2_TID_LSB, + .d_RX_MPDU_START_2_TID_MASK = AR6320_RX_MPDU_START_2_TID_MASK, + .d_RX_MSDU_END_1_KEY_ID_OCT_MASK = + AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK, + .d_RX_MSDU_END_1_KEY_ID_OCT_LSB = AR6320_RX_MSDU_END_1_KEY_ID_OCT_LSB, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = AR6320_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = AR6320_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + AR6320_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + AR6320_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = AR6320_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = AR6320_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + AR6320_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + AR6320_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + AR6320_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + AR6320_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + AR6320_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + AR6320_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + AR6320_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + AR6320_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + AR6320_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + AR6320_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, +#if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI)) + .d_CE_COUNT = AR6320_CE_COUNT, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = AR6320_PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = AR6320_PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = AR6320_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = AR6320_PCIE_INTR_CE_MASK_ALL, + /* PLL start */ + .d_EFUSE_OFFSET = AR6320_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = AR6320_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = AR6320_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = AR6320_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = AR6320_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = AR6320_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = AR6320_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = AR6320_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = AR6320_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = AR6320_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = AR6320_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = AR6320_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = AR6320_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = AR6320_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = AR6320_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = AR6320_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = AR6320_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = AR6320_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = AR6320_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = AR6320_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = AR6320_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = AR6320_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = AR6320_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = AR6320_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + AR6320_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = AR6320_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = AR6320_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + AR6320_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + AR6320_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = AR6320_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = AR6320_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = AR6320_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + AR6320_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = AR6320_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = AR6320_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = AR6320_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = AR6320_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = AR6320_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = AR6320_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = AR6320_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = AR6320_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = AR6320_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = AR6320_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = AR6320_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = AR6320_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = AR6320_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = AR6320_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = AR6320_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = AR6320_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = AR6320_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = AR6320_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = AR6320_PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = AR6320_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + AR6320_SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = AR6320_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + .d_SOC_LF_TIMER_STATUS0_ADDRESS = + AR6320_SOC_LF_TIMER_STATUS0_ADDRESS, + + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = AR6320_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + AR6320_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + AR6320_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + AR6320_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = AR6320_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = AR6320_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = AR6320_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = AR6320_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = AR6320_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = AR6320_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = AR6320_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = AR6320_AMBA_DEBUG_BUS_SEL_MASK, +#endif + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = AR6320_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = AR6320_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = AR6320_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = AR6320_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = AR6320_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ +}; + +struct hostdef_s ar6320_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = AR6320_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = AR6320_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = AR6320_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = AR6320_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + AR6320_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + AR6320_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + AR6320_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + AR6320_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + AR6320_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + AR6320_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = AR6320_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + AR6320_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + AR6320_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = AR6320_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = AR6320_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = AR6320_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = AR6320_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = AR6320_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + AR6320_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + AR6320_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = AR6320_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = AR6320_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = AR6320_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = AR6320_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = AR6320_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = AR6320_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = AR6320_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = AR6320_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = AR6320_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = AR6320_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = AR6320_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = AR6320_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = AR6320_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = AR6320_RTC_STATE_COLD_RESET_MASK, +#if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI)) + .d_PCIE_LOCAL_BASE_ADDRESS = AR6320_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = AR6320_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = AR6320_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = AR6320_PCIE_SOC_WAKE_V_MASK, + .d_MUX_ID_MASK = AR6320_MUX_ID_MASK, + .d_TRANSACTION_ID_MASK = AR6320_TRANSACTION_ID_MASK, + .d_FW_IND_HELPER = AR6320_FW_IND_HELPER, + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = 8, + .d_ENABLE_MSI = 0, +#endif + .d_RTC_STATE_V_MASK = AR6320_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = AR6320_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = AR6320_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = AR6320_FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = AR6320_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + AR6320_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + AR6320_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif +}; + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) +struct ce_reg_def ar6320_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = AR6320_DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = AR6320_SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = AR6320_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = AR6320_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = AR6320_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = AR6320_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = AR6320_CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = AR6320_CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + AR6320_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + AR6320_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + AR6320_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = AR6320_HOST_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = AR6320_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = AR6320_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_HOST_IE_ADDRESS = AR6320_HOST_IE_ADDRESS, + .d_HOST_IE_COPY_COMPLETE_MASK = AR6320_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = AR6320_SR_BA_ADDRESS, + .d_SR_SIZE_ADDRESS = AR6320_SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = AR6320_CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = AR6320_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = AR6320_DR_BA_ADDRESS, + .d_DR_SIZE_ADDRESS = AR6320_DR_SIZE_ADDRESS, + .d_MISC_IE_ADDRESS = AR6320_MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = AR6320_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = AR6320_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = AR6320_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = AR6320_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + AR6320_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + AR6320_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = AR6320_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = AR6320_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = AR6320_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = AR6320_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = AR6320_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_WRAPPER_DEBUG_OFFSET = AR6320_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = AR6320_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = AR6320_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = AR6320_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = AR6320_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = AR6320_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = AR6320_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = AR6320_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = AR6320_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = AR6320_CE1_BASE_ADDRESS, + +}; +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320v2def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320v2def.h new file mode 100644 index 0000000000000000000000000000000000000000..0985ae6cfff0b0abc320df889e3a422f0a3d1eb5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320v2def.h @@ -0,0 +1,829 @@ +/* + * Copyright (c) 2013-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AR6320V2DEF_H_ +#define _AR6320V2DEF_H_ + +/* Base Addresses */ +#define AR6320V2_RTC_SOC_BASE_ADDRESS 0x00000800 +#define AR6320V2_RTC_WMAC_BASE_ADDRESS 0x00001000 +#define AR6320V2_MAC_COEX_BASE_ADDRESS 0x0000f000 +#define AR6320V2_BT_COEX_BASE_ADDRESS 0x00002000 +#define AR6320V2_SOC_PCIE_BASE_ADDRESS 0x00038000 +#define AR6320V2_SOC_CORE_BASE_ADDRESS 0x0003a000 +#define AR6320V2_WLAN_UART_BASE_ADDRESS 0x0000c000 +#define AR6320V2_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR6320V2_WLAN_GPIO_BASE_ADDRESS 0x00005000 +#define AR6320V2_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00006000 +#define AR6320V2_WLAN_MAC_BASE_ADDRESS 0x00010000 +#define AR6320V2_EFUSE_BASE_ADDRESS 0x00024000 +#define AR6320V2_FPGA_REG_BASE_ADDRESS 0x00039000 +#define AR6320V2_WLAN_UART2_BASE_ADDRESS 0x00054c00 +#define AR6320V2_DBI_BASE_ADDRESS 0x0003c000 + +#define AR6320V2_SCRATCH_3_ADDRESS 0x0028 +#define AR6320V2_TARG_DRAM_START 0x00400000 +#define AR6320V2_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0 +#define AR6320V2_SOC_RESET_CONTROL_OFFSET 0x00000000 +#define AR6320V2_SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define AR6320V2_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define AR6320V2_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000000 +#define AR6320V2_WLAN_GPIO_PIN0_ADDRESS 0x00000068 +#define AR6320V2_WLAN_GPIO_PIN1_ADDRESS 0x0000006c +#define AR6320V2_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define AR6320V2_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define AR6320V2_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320V2_SOC_LPO_CAL_OFFSET 0x000000e0 +#define AR6320V2_WLAN_GPIO_PIN10_ADDRESS 0x00000090 +#define AR6320V2_WLAN_GPIO_PIN11_ADDRESS 0x00000094 +#define AR6320V2_WLAN_GPIO_PIN12_ADDRESS 0x00000098 +#define AR6320V2_WLAN_GPIO_PIN13_ADDRESS 0x0000009c +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define AR6320V2_SOC_LPO_CAL_ENABLE_LSB 20 +#define AR6320V2_SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define AR6320V2_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define AR6320V2_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define AR6320V2_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define AR6320V2_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define AR6320V2_SI_CONFIG_I2C_LSB 16 +#define AR6320V2_SI_CONFIG_I2C_MASK 0x00010000 +#define AR6320V2_SI_CONFIG_POS_SAMPLE_LSB 7 +#define AR6320V2_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define AR6320V2_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define AR6320V2_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define AR6320V2_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define AR6320V2_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define AR6320V2_SI_CONFIG_DIVIDER_LSB 0 +#define AR6320V2_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define AR6320V2_SI_CONFIG_OFFSET 0x00000000 +#define AR6320V2_SI_TX_DATA0_OFFSET 0x00000008 +#define AR6320V2_SI_TX_DATA1_OFFSET 0x0000000c +#define AR6320V2_SI_RX_DATA0_OFFSET 0x00000010 +#define AR6320V2_SI_RX_DATA1_OFFSET 0x00000014 +#define AR6320V2_SI_CS_OFFSET 0x00000004 +#define AR6320V2_SI_CS_DONE_ERR_MASK 0x00000400 +#define AR6320V2_SI_CS_DONE_INT_MASK 0x00000200 +#define AR6320V2_SI_CS_START_LSB 8 +#define AR6320V2_SI_CS_START_MASK 0x00000100 +#define AR6320V2_SI_CS_RX_CNT_LSB 4 +#define AR6320V2_SI_CS_RX_CNT_MASK 0x000000f0 +#define AR6320V2_SI_CS_TX_CNT_LSB 0 +#define AR6320V2_SI_CS_TX_CNT_MASK 0x0000000f +#define AR6320V2_CE_COUNT 8 +#define AR6320V2_SR_WR_INDEX_ADDRESS 0x003c +#define AR6320V2_DST_WATERMARK_ADDRESS 0x0050 +#define AR6320V2_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define AR6320V2_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define AR6320V2_RX_MPDU_START_0_RETRY_LSB 14 +#define AR6320V2_RX_MPDU_START_0_RETRY_MASK 0x00004000 +#define AR6320V2_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define AR6320V2_RX_MPDU_START_2_PN_47_32_LSB 0 +#define AR6320V2_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define AR6320V2_RX_MPDU_START_2_TID_LSB 28 +#define AR6320V2_RX_MPDU_START_2_TID_MASK 0xf0000000 +#define AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define AR6320V2_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define AR6320V2_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define AR6320V2_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define AR6320V2_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define AR6320V2_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define AR6320V2_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define AR6320V2_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff + +#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define AR6320V2_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define AR6320V2_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define AR6320V2_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 +#define AR6320V2_DST_WR_INDEX_ADDRESS 0x0040 +#define AR6320V2_SRC_WATERMARK_ADDRESS 0x004c +#define AR6320V2_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320V2_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320V2_DST_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320V2_DST_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320V2_CURRENT_SRRI_ADDRESS 0x0044 +#define AR6320V2_CURRENT_DRRI_ADDRESS 0x0048 +#define AR6320V2_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define AR6320V2_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define AR6320V2_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define AR6320V2_HOST_IS_ADDRESS 0x0030 +#define AR6320V2_HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define AR6320V2_HOST_IE_ADDRESS 0x002c +#define AR6320V2_HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define AR6320V2_SR_BA_ADDRESS 0x0000 +#define AR6320V2_SR_SIZE_ADDRESS 0x0004 +#define AR6320V2_DR_BA_ADDRESS 0x0008 +#define AR6320V2_DR_SIZE_ADDRESS 0x000c +#define AR6320V2_MISC_IE_ADDRESS 0x0034 +#define AR6320V2_MISC_IS_AXI_ERR_MASK 0x00000400 +#define AR6320V2_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define AR6320V2_MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define AR6320V2_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define AR6320V2_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define AR6320V2_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 +#define AR6320V2_SRC_WATERMARK_LOW_LSB 16 +#define AR6320V2_SRC_WATERMARK_HIGH_LSB 0 +#define AR6320V2_DST_WATERMARK_LOW_LSB 16 +#define AR6320V2_DST_WATERMARK_HIGH_LSB 0 +#define AR6320V2_SOC_GLOBAL_RESET_ADDRESS 0x0008 +#define AR6320V2_RTC_STATE_ADDRESS 0x0000 +#define AR6320V2_RTC_STATE_COLD_RESET_MASK 0x00002000 +#define AR6320V2_RTC_STATE_V_MASK 0x00000007 +#define AR6320V2_RTC_STATE_V_LSB 0 +#define AR6320V2_RTC_STATE_V_ON 3 +#define AR6320V2_FW_IND_EVENT_PENDING 1 +#define AR6320V2_FW_IND_INITIALIZED 2 +#define AR6320V2_CPU_INTR_ADDRESS 0x0010 +#define AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 +#define AR6320V2_SOC_LF_TIMER_STATUS0_ADDRESS 0x00000054 +#define AR6320V2_SOC_RESET_CONTROL_ADDRESS 0x00000000 +#define AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define AR6320V2_CORE_CTRL_ADDRESS 0x0000 +#define AR6320V2_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define AR6320V2_LOCAL_SCRATCH_OFFSET 0x000000c0 +#define AR6320V2_CLOCK_GPIO_OFFSET 0xffffffff +#define AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#define AR6320V2_SOC_CHIP_ID_ADDRESS 0x000000f0 +#define AR6320V2_SOC_CHIP_ID_VERSION_MASK 0xfffc0000 +#define AR6320V2_SOC_CHIP_ID_VERSION_LSB 18 +#define AR6320V2_SOC_CHIP_ID_REVISION_MASK 0x00000f00 +#define AR6320V2_SOC_CHIP_ID_REVISION_LSB 8 +#if defined(HIF_SDIO) +#define AR6320V2_FW_IND_HELPER 4 +#endif +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) +#define AR6320V2_CE_WRAPPER_BASE_ADDRESS 0x00034000 +#define AR6320V2_CE0_BASE_ADDRESS 0x00034400 +#define AR6320V2_CE1_BASE_ADDRESS 0x00034800 +#define AR6320V2_CE2_BASE_ADDRESS 0x00034c00 +#define AR6320V2_CE3_BASE_ADDRESS 0x00035000 +#define AR6320V2_CE4_BASE_ADDRESS 0x00035400 +#define AR6320V2_CE5_BASE_ADDRESS 0x00035800 +#define AR6320V2_CE6_BASE_ADDRESS 0x00035c00 +#define AR6320V2_CE7_BASE_ADDRESS 0x00036000 +#define AR6320V2_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x00007800 +#define AR6320V2_CE_CTRL1_ADDRESS 0x0010 +#define AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB 0 +#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020 +#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 5 +#define AR6320V2_PCIE_SOC_WAKE_RESET 0x00000000 +#define AR6320V2_PCIE_SOC_WAKE_ADDRESS 0x0004 +#define AR6320V2_PCIE_SOC_WAKE_V_MASK 0x00000001 +#define AR6320V2_MUX_ID_MASK 0x0000 +#define AR6320V2_TRANSACTION_ID_MASK 0x3fff +#define AR6320V2_PCIE_LOCAL_BASE_ADDRESS 0x80000 +#define AR6320V2_FW_IND_HELPER 4 +#define AR6320V2_PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define AR6320V2_PCIE_INTR_CLR_ADDRESS 0x0014 +#define AR6320V2_PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define AR6320V2_PCIE_INTR_CE0_MASK 0x00000800 +#define AR6320V2_PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define AR6320V2_PCIE_INTR_CAUSE_ADDRESS 0x000c +#define AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK 0x00000001 +#define AR6320V2_SOC_POWER_REG_OFFSET 0x0000010c +/* Copy Engine Debug */ +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3 +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0 +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f +#define AR6320V2_WLAN_DEBUG_CONTROL_OFFSET 0x00000108 +#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MSB 0 +#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_LSB 0 +#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001 +#define AR6320V2_WLAN_DEBUG_OUT_OFFSET 0x00000110 +#define AR6320V2_WLAN_DEBUG_OUT_DATA_MSB 19 +#define AR6320V2_WLAN_DEBUG_OUT_DATA_LSB 0 +#define AR6320V2_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff +#define AR6320V2_AMBA_DEBUG_BUS_OFFSET 0x0000011c +#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13 +#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8 +#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00 +#define AR6320V2_AMBA_DEBUG_BUS_SEL_MSB 4 +#define AR6320V2_AMBA_DEBUG_BUS_SEL_LSB 0 +#define AR6320V2_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f +#define AR6320V2_CE_WRAPPER_DEBUG_OFFSET 0x0008 +#define AR6320V2_CE_WRAPPER_DEBUG_SEL_MSB 5 +#define AR6320V2_CE_WRAPPER_DEBUG_SEL_LSB 0 +#define AR6320V2_CE_WRAPPER_DEBUG_SEL_MASK 0x0000003f +#define AR6320V2_CE_DEBUG_OFFSET 0x0054 +#define AR6320V2_CE_DEBUG_SEL_MSB 5 +#define AR6320V2_CE_DEBUG_SEL_LSB 0 +#define AR6320V2_CE_DEBUG_SEL_MASK 0x0000003f +/* End */ + +/* PLL start */ +#define AR6320V2_EFUSE_OFFSET 0x0000032c +#define AR6320V2_EFUSE_XTAL_SEL_MSB 10 +#define AR6320V2_EFUSE_XTAL_SEL_LSB 8 +#define AR6320V2_EFUSE_XTAL_SEL_MASK 0x00000700 +#define AR6320V2_BB_PLL_CONFIG_OFFSET 0x000002f4 +#define AR6320V2_BB_PLL_CONFIG_OUTDIV_MSB 20 +#define AR6320V2_BB_PLL_CONFIG_OUTDIV_LSB 18 +#define AR6320V2_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 +#define AR6320V2_BB_PLL_CONFIG_FRAC_MSB 17 +#define AR6320V2_BB_PLL_CONFIG_FRAC_LSB 0 +#define AR6320V2_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define AR6320V2_WLAN_PLL_SETTLE_TIME_MSB 10 +#define AR6320V2_WLAN_PLL_SETTLE_TIME_LSB 0 +#define AR6320V2_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff +#define AR6320V2_WLAN_PLL_SETTLE_OFFSET 0x0018 +#define AR6320V2_WLAN_PLL_SETTLE_SW_MASK 0x000007ff +#define AR6320V2_WLAN_PLL_SETTLE_RSTMASK 0xffffffff +#define AR6320V2_WLAN_PLL_SETTLE_RESET 0x00000400 +#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_MSB 18 +#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_MSB 16 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_RESET 0x1 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MSB 15 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_LSB 14 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_MSB 13 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_RESET 0x0 +#define AR6320V2_WLAN_PLL_CONTROL_DIV_MSB 9 +#define AR6320V2_WLAN_PLL_CONTROL_DIV_LSB 0 +#define AR6320V2_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define AR6320V2_WLAN_PLL_CONTROL_DIV_RESET 0x11 +#define AR6320V2_WLAN_PLL_CONTROL_OFFSET 0x0014 +#define AR6320V2_WLAN_PLL_CONTROL_SW_MASK 0x001fffff +#define AR6320V2_WLAN_PLL_CONTROL_RSTMASK 0xffffffff +#define AR6320V2_WLAN_PLL_CONTROL_RESET 0x00010011 +#define AR6320V2_SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_MSB 2 +#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0 +#define AR6320V2_RTC_SYNC_STATUS_OFFSET 0x0244 +#define AR6320V2_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MSB 1 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +/* PLL end */ + +#define AR6320V2_PCIE_INTR_CE_MASK(n) \ + (AR6320V2_PCIE_INTR_CE0_MASK << (n)) +#endif +#define AR6320V2_DRAM_BASE_ADDRESS AR6320V2_TARG_DRAM_START +#define AR6320V2_FW_INDICATOR_ADDRESS \ + (AR6320V2_SOC_CORE_BASE_ADDRESS + AR6320V2_SCRATCH_3_ADDRESS) +#define AR6320V2_SYSTEM_SLEEP_OFFSET AR6320V2_SOC_SYSTEM_SLEEP_OFFSET +#define AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET 0x002c +#define AR6320V2_WLAN_RESET_CONTROL_OFFSET AR6320V2_SOC_RESET_CONTROL_OFFSET +#define AR6320V2_CLOCK_CONTROL_OFFSET AR6320V2_SOC_CLOCK_CONTROL_OFFSET +#define AR6320V2_CLOCK_CONTROL_SI0_CLK_MASK \ + AR6320V2_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define AR6320V2_RESET_CONTROL_MBOX_RST_MASK 0x00000004 +#define AR6320V2_RESET_CONTROL_SI0_RST_MASK \ + AR6320V2_SOC_RESET_CONTROL_SI0_RST_MASK +#define AR6320V2_GPIO_BASE_ADDRESS AR6320V2_WLAN_GPIO_BASE_ADDRESS +#define AR6320V2_GPIO_PIN0_OFFSET AR6320V2_WLAN_GPIO_PIN0_ADDRESS +#define AR6320V2_GPIO_PIN1_OFFSET AR6320V2_WLAN_GPIO_PIN1_ADDRESS +#define AR6320V2_GPIO_PIN0_CONFIG_MASK AR6320V2_WLAN_GPIO_PIN0_CONFIG_MASK +#define AR6320V2_GPIO_PIN1_CONFIG_MASK AR6320V2_WLAN_GPIO_PIN1_CONFIG_MASK +#define AR6320V2_SI_BASE_ADDRESS 0x00050000 +#define AR6320V2_CPU_CLOCK_OFFSET AR6320V2_SOC_CPU_CLOCK_OFFSET +#define AR6320V2_LPO_CAL_OFFSET AR6320V2_SOC_LPO_CAL_OFFSET +#define AR6320V2_GPIO_PIN10_OFFSET AR6320V2_WLAN_GPIO_PIN10_ADDRESS +#define AR6320V2_GPIO_PIN11_OFFSET AR6320V2_WLAN_GPIO_PIN11_ADDRESS +#define AR6320V2_GPIO_PIN12_OFFSET AR6320V2_WLAN_GPIO_PIN12_ADDRESS +#define AR6320V2_GPIO_PIN13_OFFSET AR6320V2_WLAN_GPIO_PIN13_ADDRESS +#define AR6320V2_CPU_CLOCK_STANDARD_LSB AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB +#define AR6320V2_CPU_CLOCK_STANDARD_MASK AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK +#define AR6320V2_LPO_CAL_ENABLE_LSB AR6320V2_SOC_LPO_CAL_ENABLE_LSB +#define AR6320V2_LPO_CAL_ENABLE_MASK AR6320V2_SOC_LPO_CAL_ENABLE_MASK +#define AR6320V2_ANALOG_INTF_BASE_ADDRESS \ + AR6320V2_WLAN_ANALOG_INTF_BASE_ADDRESS +#define AR6320V2_MBOX_BASE_ADDRESS 0x00008000 +#define AR6320V2_INT_STATUS_ENABLE_ERROR_LSB 7 +#define AR6320V2_INT_STATUS_ENABLE_ERROR_MASK 0x00000080 +#define AR6320V2_INT_STATUS_ENABLE_CPU_LSB 6 +#define AR6320V2_INT_STATUS_ENABLE_CPU_MASK 0x00000040 +#define AR6320V2_INT_STATUS_ENABLE_COUNTER_LSB 4 +#define AR6320V2_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010 +#define AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_LSB 0 +#define AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f +#define AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 17 +#define AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 16 +#define AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00010000 +#define AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_LSB 24 +#define AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0xff000000 +#define AR6320V2_INT_STATUS_ENABLE_ADDRESS 0x0828 +#define AR6320V2_CPU_INT_STATUS_ENABLE_BIT_LSB 8 +#define AR6320V2_CPU_INT_STATUS_ENABLE_BIT_MASK 0x0000ff00 +#define AR6320V2_HOST_INT_STATUS_ADDRESS 0x0800 +#define AR6320V2_CPU_INT_STATUS_ADDRESS 0x0801 +#define AR6320V2_ERROR_INT_STATUS_ADDRESS 0x0802 +#define AR6320V2_ERROR_INT_STATUS_WAKEUP_MASK 0x00040000 +#define AR6320V2_ERROR_INT_STATUS_WAKEUP_LSB 18 +#define AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 17 +#define AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00010000 +#define AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_LSB 16 +#define AR6320V2_COUNT_DEC_ADDRESS 0x0840 +#define AR6320V2_HOST_INT_STATUS_CPU_MASK 0x00000040 +#define AR6320V2_HOST_INT_STATUS_CPU_LSB 6 +#define AR6320V2_HOST_INT_STATUS_ERROR_MASK 0x00000080 +#define AR6320V2_HOST_INT_STATUS_ERROR_LSB 7 +#define AR6320V2_HOST_INT_STATUS_COUNTER_MASK 0x00000010 +#define AR6320V2_HOST_INT_STATUS_COUNTER_LSB 4 +#define AR6320V2_RX_LOOKAHEAD_VALID_ADDRESS 0x0805 +#define AR6320V2_WINDOW_DATA_ADDRESS 0x0874 +#define AR6320V2_WINDOW_READ_ADDR_ADDRESS 0x087c +#define AR6320V2_WINDOW_WRITE_ADDR_ADDRESS 0x0878 +#define AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f +#define AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB 0 + +struct targetdef_s ar6320v2_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = AR6320V2_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = AR6320V2_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = AR6320V2_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = AR6320V2_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = AR6320V2_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = AR6320V2_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = AR6320V2_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = AR6320V2_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + AR6320V2_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + AR6320V2_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = AR6320V2_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = AR6320V2_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = AR6320V2_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = AR6320V2_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = AR6320V2_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR6320V2_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = + AR6320V2_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = AR6320V2_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = AR6320V2_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = AR6320V2_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = AR6320V2_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = AR6320V2_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = AR6320V2_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = AR6320V2_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = + AR6320V2_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = AR6320V2_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = AR6320V2_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = AR6320V2_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = AR6320V2_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = AR6320V2_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = AR6320V2_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = AR6320V2_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = AR6320V2_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = AR6320V2_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = AR6320V2_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = AR6320V2_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = AR6320V2_SI_CS_START_LSB, + .d_SI_CS_START_MASK = AR6320V2_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = AR6320V2_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = AR6320V2_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = AR6320V2_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = AR6320V2_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = AR6320_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = AR6320_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = AR6320V2_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = AR6320V2_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = AR6320V2_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = AR6320V2_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = AR6320V2_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = AR6320V2_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = AR6320V2_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = AR6320V2_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = AR6320V2_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = AR6320V2_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = AR6320V2_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = AR6320V2_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = AR6320V2_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = + AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = AR6320V2_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = AR6320V2_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = AR6320V2_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = AR6320V2_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = AR6320V2_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = AR6320V2_CORE_CTRL_ADDRESS, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, +#endif + .d_CORE_CTRL_CPU_INTR_MASK = AR6320V2_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = AR6320V2_SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = AR6320V2_DST_WATERMARK_ADDRESS, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + AR6320V2_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = + AR6320V2_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_RETRY_MASK = + AR6320V2_RX_MPDU_START_0_RETRY_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = + AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = + AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = AR6320V2_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = + AR6320V2_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + AR6320V2_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MPDU_START_2_TID_LSB = + AR6320V2_RX_MPDU_START_2_TID_LSB, + .d_RX_MPDU_START_2_TID_MASK = + AR6320V2_RX_MPDU_START_2_TID_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = + AR6320V2_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = AR6320V2_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + AR6320V2_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + AR6320V2_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = + AR6320V2_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = AR6320V2_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + AR6320V2_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + AR6320V2_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + AR6320V2_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + AR6320V2_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) + .d_CE_COUNT = AR6320V2_CE_COUNT, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = AR6320V2_PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = AR6320V2_PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = AR6320V2_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = AR6320V2_PCIE_INTR_CE_MASK_ALL, + /* PLL start */ + .d_EFUSE_OFFSET = AR6320V2_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = AR6320V2_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = AR6320V2_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = AR6320V2_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = AR6320V2_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = AR6320V2_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = AR6320V2_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = AR6320V2_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = AR6320V2_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = AR6320V2_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = AR6320V2_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = AR6320V2_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = AR6320V2_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = AR6320V2_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = AR6320V2_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = AR6320V2_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = AR6320V2_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = AR6320V2_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = AR6320V2_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = AR6320V2_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = AR6320V2_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = AR6320V2_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = AR6320V2_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = + AR6320V2_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + AR6320V2_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = AR6320V2_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = AR6320V2_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = + AR6320V2_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + AR6320V2_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = AR6320V2_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = AR6320V2_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = AR6320V2_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = AR6320V2_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = AR6320V2_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = AR6320V2_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = AR6320V2_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = AR6320V2_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = AR6320V2_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = AR6320V2_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = AR6320V2_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = AR6320V2_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = AR6320V2_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = AR6320V2_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = AR6320V2_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = AR6320V2_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = AR6320V2_PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = AR6320V2_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK, + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = + AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = + AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = AR6320V2_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = AR6320V2_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = AR6320V2_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = AR6320V2_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = AR6320V2_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = AR6320V2_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = AR6320V2_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = AR6320V2_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = AR6320V2_AMBA_DEBUG_BUS_SEL_MASK, +#endif + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = AR6320V2_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + .d_SOC_LF_TIMER_STATUS0_ADDRESS = + AR6320V2_SOC_LF_TIMER_STATUS0_ADDRESS, + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = AR6320V2_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = AR6320V2_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = AR6320V2_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = AR6320V2_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = AR6320V2_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ +}; + +struct hostdef_s ar6320v2_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = AR6320V2_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = + AR6320V2_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = AR6320V2_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = AR6320V2_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + AR6320V2_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + AR6320V2_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = AR6320V2_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + AR6320V2_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + AR6320V2_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = AR6320V2_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = AR6320V2_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = AR6320V2_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = + AR6320V2_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = AR6320V2_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = AR6320V2_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = AR6320V2_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = AR6320V2_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = AR6320V2_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = AR6320V2_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = + AR6320V2_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = AR6320V2_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = AR6320V2_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = AR6320V2_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = AR6320V2_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = AR6320V2_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = AR6320V2_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = AR6320V2_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = AR6320V2_RTC_STATE_COLD_RESET_MASK, + .d_RTC_STATE_V_MASK = AR6320V2_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = AR6320V2_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = AR6320V2_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = AR6320V2_FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = AR6320V2_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) + .d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER, + .d_MUX_ID_MASK = AR6320V2_MUX_ID_MASK, + .d_TRANSACTION_ID_MASK = AR6320V2_TRANSACTION_ID_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = AR6320V2_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = AR6320V2_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = AR6320V2_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = AR6320V2_PCIE_SOC_WAKE_V_MASK, + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = 8, + .d_ENABLE_MSI = 0, +#endif +#if defined(HIF_SDIO) + .d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER, +#endif +}; + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) +struct ce_reg_def ar6320v2_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = AR6320V2_DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = AR6320V2_SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = AR6320V2_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = AR6320V2_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = AR6320V2_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = AR6320V2_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = AR6320V2_CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = AR6320V2_CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + AR6320V2_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + AR6320V2_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + AR6320V2_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = AR6320V2_HOST_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = AR6320V2_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = AR6320V2_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_HOST_IE_ADDRESS = AR6320V2_HOST_IE_ADDRESS, + .d_HOST_IE_COPY_COMPLETE_MASK = AR6320V2_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = AR6320V2_SR_BA_ADDRESS, + .d_SR_SIZE_ADDRESS = AR6320V2_SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = AR6320V2_CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = AR6320V2_DR_BA_ADDRESS, + .d_DR_SIZE_ADDRESS = AR6320V2_DR_SIZE_ADDRESS, + .d_MISC_IE_ADDRESS = AR6320V2_MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = AR6320V2_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = AR6320V2_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = AR6320V2_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = + AR6320V2_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + AR6320V2_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + AR6320V2_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = AR6320V2_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = AR6320V2_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = AR6320V2_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = AR6320V2_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_WRAPPER_DEBUG_OFFSET = AR6320V2_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = AR6320V2_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = AR6320V2_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = AR6320V2_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = AR6320V2_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = AR6320V2_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = AR6320V2_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = AR6320V2_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = AR6320V2_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = AR6320V2_CE1_BASE_ADDRESS, + +}; +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar900Bdef.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar900Bdef.c new file mode 100644 index 0000000000000000000000000000000000000000..e2087d2615986286f2e2f8c97e49c25d6cac53b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar900Bdef.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2010, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(AR900B_HEADERS_DEF) +#define AR900B 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR900B/soc_addrs.h" +#include "AR900B/extra/hw/apb_map.h" +#include "AR900B/hw/gpio_athr_wlan_reg.h" +#ifdef WLAN_HEADERS +#include "AR900B/extra/hw/wifi_top_reg_map.h" +#include "AR900B/hw/rtc_soc_reg.h" +#endif +#include "AR900B/hw/si_reg.h" +#include "AR900B/extra/hw/pcie_local_reg.h" +#include "AR900B/hw/ce_wrapper_reg_csr.h" +/* TODO + * #include "hw/soc_core_reg.h" + * #include "hw/soc_pcie_reg.h" + * #include "hw/ce_reg_csr.h" + */ + +#include "AR900B/extra/hw/soc_core_reg.h" +#include "AR900B/hw/soc_pcie_reg.h" +#include "AR900B/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ +#define PCIE_LOCAL_BASE_ADDRESS 0 + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define SI_CONFIG_OFFSET SI_CONFIG_ADDRESS +#define SI_TX_DATA0_OFFSET SI_TX_DATA0_ADDRESS +#define SI_TX_DATA1_OFFSET SI_TX_DATA1_ADDRESS +#define SI_RX_DATA0_OFFSET SI_RX_DATA0_ADDRESS +#define SI_RX_DATA1_OFFSET SI_RX_DATA1_ADDRESS +#define SI_CS_OFFSET SI_CS_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF AR900B_TARGETdef +#define MY_HOST_DEF AR900B_HOSTdef +#define MY_CEREG_DEF AR900B_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR900B_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR900B_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(AR900B_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR900B_TARGETdef; +struct hostdef_s *AR900B_HOSTdef; +#endif /*AR900B_HEADERS_DEF */ +qdf_export_symbol(AR900B_TARGETdef); +qdf_export_symbol(AR900B_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.c new file mode 100644 index 0000000000000000000000000000000000000000..e0266ae58a86d5edd9ed6c837d3cc3bcdb909576 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2013,2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(AR9888_HEADERS_DEF) +#define AR9888 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR9888/v2/soc_addrs.h" +#include "AR9888/v2/hw/apb_athr_wlan_map.h" +#include "AR9888/v2/hw/gpio_athr_wlan_reg.h" +#include "AR9888/v2/hw/rtc_soc_reg.h" +#include "AR9888/v2/hw/rtc_wlan_reg.h" +#include "AR9888/v2/hw/si_reg.h" +#include "AR9888/v2/extra/hw/pcie_local_reg.h" + +#include "AR9888/v2/extra/hw/soc_core_reg.h" +#include "AR9888/v2/hw/soc_pcie_reg.h" +#include "AR9888/v2/extra/hw/ce_reg_csr.h" +#include "AR9888/v2/hw/ce_wrapper_reg_csr.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +/* TBDXXX: Eventually, this Base Address will be defined in HW header files */ +#define PCIE_LOCAL_BASE_ADDRESS 0x80000 + +#define FW_EVENT_PENDING_ADDRESS (SOC_CORE_BASE_ADDRESS+SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_MBOX_RST_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET +#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC descriptor */ +#define RX_ATTENTION_0_PHY_DATA_TYPE_MASK MISSING +#define RX_MSDU_END_8_LRO_ELIGIBLE_MASK MISSING +#define RX_MSDU_END_8_LRO_ELIGIBLE_LSB MISSING +#define RX_MSDU_END_8_L3_HEADER_PADDING_LSB MISSING +#define RX_MSDU_END_8_L3_HEADER_PADDING_MASK MISSING +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_19_RX_ANTENNA_OFFSET >> 2) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +/* GPIO Register */ + +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK MISSING +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT MISSING +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK MISSING +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT MISSING +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK MISSING +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT MISSING +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK MISSING +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT MISSING +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00003FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFFC0000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 18 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00003FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFFC0000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 18 +#endif + +#define MY_TARGET_DEF AR9888_TARGETdef +#define MY_HOST_DEF AR9888_HOSTdef +#define MY_CEREG_DEF AR9888_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR9888_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR9888_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(AR9888_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR9888_TARGETdef; +struct hostdef_s *AR9888_HOSTdef; +#endif /*AR9888_HEADERS_DEF */ +qdf_export_symbol(AR9888_TARGETdef); +qdf_export_symbol(AR9888_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.h new file mode 100644 index 0000000000000000000000000000000000000000..9ce32937f455ab5ff5f59c0ec6f9ee8bc73aeffe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.h @@ -0,0 +1,594 @@ +/* + * Copyright (c) 2011-2016, 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AR9888DEF_H_ +#define _AR9888DEF_H_ + +/* Base Addresses */ +#define AR9888_RTC_SOC_BASE_ADDRESS 0x00004000 +#define AR9888_RTC_WMAC_BASE_ADDRESS 0x00005000 +#define AR9888_MAC_COEX_BASE_ADDRESS 0x00006000 +#define AR9888_BT_COEX_BASE_ADDRESS 0x00007000 +#define AR9888_SOC_PCIE_BASE_ADDRESS 0x00008000 +#define AR9888_SOC_CORE_BASE_ADDRESS 0x00009000 +#define AR9888_WLAN_UART_BASE_ADDRESS 0x0000c000 +#define AR9888_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR9888_WLAN_GPIO_BASE_ADDRESS 0x00014000 +#define AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 +#define AR9888_WLAN_MAC_BASE_ADDRESS 0x00020000 +#define AR9888_EFUSE_BASE_ADDRESS 0x00030000 +#define AR9888_FPGA_REG_BASE_ADDRESS 0x00039000 +#define AR9888_WLAN_UART2_BASE_ADDRESS 0x00054c00 +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#define AR9888_CE_WRAPPER_BASE_ADDRESS 0x00057000 +#define AR9888_CE0_BASE_ADDRESS 0x00057400 +#define AR9888_CE1_BASE_ADDRESS 0x00057800 +#define AR9888_CE2_BASE_ADDRESS 0x00057c00 +#define AR9888_CE3_BASE_ADDRESS 0x00058000 +#define AR9888_CE4_BASE_ADDRESS 0x00058400 +#define AR9888_CE5_BASE_ADDRESS 0x00058800 +#define AR9888_CE6_BASE_ADDRESS 0x00058c00 +#define AR9888_CE7_BASE_ADDRESS 0x00059000 +#define AR9888_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 +#define AR9888_CE_CTRL1_ADDRESS 0x0010 +#define AR9888_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define AR9888_CE_CTRL1_DMAX_LENGTH_LSB 0 +#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000004 +#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 2 +#define AR9888_PCIE_SOC_WAKE_RESET 0x00000000 +#define AR9888_PCIE_SOC_WAKE_ADDRESS 0x0004 +#define AR9888_PCIE_SOC_WAKE_V_MASK 0x00000001 +#define AR9888_PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define AR9888_PCIE_INTR_CLR_ADDRESS 0x0014 +#define AR9888_PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define AR9888_PCIE_INTR_CE0_MASK 0x00000800 +#define AR9888_PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define AR9888_PCIE_INTR_CAUSE_ADDRESS 0x000c +#define AR9888_MUX_ID_MASK 0x0000 +#define AR9888_TRANSACTION_ID_MASK 0x3fff +#define AR9888_PCIE_LOCAL_BASE_ADDRESS 0x80000 +#define AR9888_SOC_RESET_CONTROL_CE_RST_MASK 0x00040000 +#define AR9888_PCIE_INTR_CE_MASK(n) (AR9888_PCIE_INTR_CE0_MASK << (n)) +#endif +#define AR9888_DBI_BASE_ADDRESS 0x00060000 +#define AR9888_SCRATCH_3_ADDRESS 0x0030 +#define AR9888_TARG_DRAM_START 0x00400000 +#define AR9888_SOC_SYSTEM_SLEEP_OFFSET 0x000000c4 +#define AR9888_SOC_RESET_CONTROL_OFFSET 0x00000000 +#define AR9888_SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define AR9888_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define AR9888_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 +#define AR9888_WLAN_GPIO_BASE_ADDRESS 0x00014000 +#define AR9888_WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define AR9888_WLAN_GPIO_PIN1_ADDRESS 0x0000002c +#define AR9888_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define AR9888_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define AR9888_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR9888_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR9888_SOC_LPO_CAL_OFFSET 0x000000e0 +#define AR9888_WLAN_GPIO_PIN10_ADDRESS 0x00000050 +#define AR9888_WLAN_GPIO_PIN11_ADDRESS 0x00000054 +#define AR9888_WLAN_GPIO_PIN12_ADDRESS 0x00000058 +#define AR9888_WLAN_GPIO_PIN13_ADDRESS 0x0000005c +#define AR9888_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR9888_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define AR9888_SOC_LPO_CAL_ENABLE_LSB 20 +#define AR9888_SOC_LPO_CAL_ENABLE_MASK 0x00100000 +#define AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 + +#define AR9888_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define AR9888_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define AR9888_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define AR9888_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define AR9888_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define AR9888_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define AR9888_SI_CONFIG_I2C_LSB 16 +#define AR9888_SI_CONFIG_I2C_MASK 0x00010000 +#define AR9888_SI_CONFIG_POS_SAMPLE_LSB 7 +#define AR9888_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define AR9888_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define AR9888_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define AR9888_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define AR9888_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define AR9888_SI_CONFIG_DIVIDER_LSB 0 +#define AR9888_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define AR9888_SI_CONFIG_OFFSET 0x00000000 +#define AR9888_SI_TX_DATA0_OFFSET 0x00000008 +#define AR9888_SI_TX_DATA1_OFFSET 0x0000000c +#define AR9888_SI_RX_DATA0_OFFSET 0x00000010 +#define AR9888_SI_RX_DATA1_OFFSET 0x00000014 +#define AR9888_SI_CS_OFFSET 0x00000004 +#define AR9888_SI_CS_DONE_ERR_MASK 0x00000400 +#define AR9888_SI_CS_DONE_INT_MASK 0x00000200 +#define AR9888_SI_CS_START_LSB 8 +#define AR9888_SI_CS_START_MASK 0x00000100 +#define AR9888_SI_CS_RX_CNT_LSB 4 +#define AR9888_SI_CS_RX_CNT_MASK 0x000000f0 +#define AR9888_SI_CS_TX_CNT_LSB 0 +#define AR9888_SI_CS_TX_CNT_MASK 0x0000000f +#define AR9888_CE_COUNT 8 +#define AR9888_SR_WR_INDEX_ADDRESS 0x003c +#define AR9888_DST_WATERMARK_ADDRESS 0x0050 +#define AR9888_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define AR9888_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define AR9888_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define AR9888_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define AR9888_RX_MPDU_START_2_PN_47_32_LSB 0 +#define AR9888_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define AR9888_RX_MSDU_END_1_KEY_ID_OCT_MASK 0x000000ff +#define AR9888_RX_MSDU_END_1_KEY_ID_OCT_LSB 0 +#define AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define AR9888_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define AR9888_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define AR9888_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define AR9888_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define AR9888_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define AR9888_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define AR9888_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define AR9888_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define AR9888_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff +#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define AR9888_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define AR9888_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define AR9888_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define AR9888_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 +#define AR9888_DST_WR_INDEX_ADDRESS 0x0040 +#define AR9888_SRC_WATERMARK_ADDRESS 0x004c +#define AR9888_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define AR9888_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define AR9888_DST_WATERMARK_LOW_MASK 0xffff0000 +#define AR9888_DST_WATERMARK_HIGH_MASK 0x0000ffff +#define AR9888_CURRENT_SRRI_ADDRESS 0x0044 +#define AR9888_CURRENT_DRRI_ADDRESS 0x0048 +#define AR9888_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define AR9888_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define AR9888_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define AR9888_HOST_IS_ADDRESS 0x0030 +#define AR9888_HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define AR9888_HOST_IE_ADDRESS 0x002c +#define AR9888_HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define AR9888_SR_BA_ADDRESS 0x0000 +#define AR9888_SR_SIZE_ADDRESS 0x0004 +#define AR9888_DR_BA_ADDRESS 0x0008 +#define AR9888_DR_SIZE_ADDRESS 0x000c +#define AR9888_MISC_IE_ADDRESS 0x0034 +#define AR9888_MISC_IS_AXI_ERR_MASK 0x00000400 +#define AR9888_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define AR9888_MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define AR9888_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define AR9888_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define AR9888_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 +#define AR9888_SRC_WATERMARK_LOW_LSB 16 +#define AR9888_SRC_WATERMARK_HIGH_LSB 0 +#define AR9888_DST_WATERMARK_LOW_LSB 16 +#define AR9888_DST_WATERMARK_HIGH_LSB 0 +#define AR9888_SOC_GLOBAL_RESET_ADDRESS 0x0008 +#define AR9888_RTC_STATE_ADDRESS 0x0000 +#define AR9888_RTC_STATE_COLD_RESET_MASK 0x00000400 + +#define AR9888_RTC_STATE_V_MASK 0x00000007 +#define AR9888_RTC_STATE_V_LSB 0 +#define AR9888_RTC_STATE_V_ON 3 +#define AR9888_FW_IND_EVENT_PENDING 1 +#define AR9888_FW_IND_INITIALIZED 2 +#define AR9888_CPU_INTR_ADDRESS 0x0010 +#define AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 +#define AR9888_SOC_LF_TIMER_STATUS0_ADDRESS 0x00000054 +#define AR9888_SOC_RESET_CONTROL_ADDRESS 0x00000000 +#define AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define AR9888_CORE_CTRL_ADDRESS 0x0000 +#define AR9888_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define AR9888_LOCAL_SCRATCH_OFFSET 0x18 +#define AR9888_CLOCK_GPIO_OFFSET 0xffffffff +#define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 + +#define AR9888_FW_EVENT_PENDING_ADDRESS \ + (AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS) +#define AR9888_DRAM_BASE_ADDRESS AR9888_TARG_DRAM_START +#define AR9888_FW_INDICATOR_ADDRESS \ + (AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS) +#define AR9888_SYSTEM_SLEEP_OFFSET AR9888_SOC_SYSTEM_SLEEP_OFFSET +#define AR9888_WLAN_SYSTEM_SLEEP_OFFSET AR9888_SOC_SYSTEM_SLEEP_OFFSET +#define AR9888_WLAN_RESET_CONTROL_OFFSET AR9888_SOC_RESET_CONTROL_OFFSET +#define AR9888_CLOCK_CONTROL_OFFSET AR9888_SOC_CLOCK_CONTROL_OFFSET +#define AR9888_CLOCK_CONTROL_SI0_CLK_MASK AR9888_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define AR9888_RESET_CONTROL_MBOX_RST_MASK MISSING +#define AR9888_RESET_CONTROL_SI0_RST_MASK AR9888_SOC_RESET_CONTROL_SI0_RST_MASK +#define AR9888_GPIO_BASE_ADDRESS AR9888_WLAN_GPIO_BASE_ADDRESS +#define AR9888_GPIO_PIN0_OFFSET AR9888_WLAN_GPIO_PIN0_ADDRESS +#define AR9888_GPIO_PIN1_OFFSET AR9888_WLAN_GPIO_PIN1_ADDRESS +#define AR9888_GPIO_PIN0_CONFIG_MASK AR9888_WLAN_GPIO_PIN0_CONFIG_MASK +#define AR9888_GPIO_PIN1_CONFIG_MASK AR9888_WLAN_GPIO_PIN1_CONFIG_MASK +#define AR9888_SI_BASE_ADDRESS AR9888_WLAN_SI_BASE_ADDRESS +#define AR9888_SCRATCH_BASE_ADDRESS AR9888_SOC_CORE_BASE_ADDRESS +#define AR9888_CPU_CLOCK_OFFSET AR9888_SOC_CPU_CLOCK_OFFSET +#define AR9888_LPO_CAL_OFFSET AR9888_SOC_LPO_CAL_OFFSET +#define AR9888_GPIO_PIN10_OFFSET AR9888_WLAN_GPIO_PIN10_ADDRESS +#define AR9888_GPIO_PIN11_OFFSET AR9888_WLAN_GPIO_PIN11_ADDRESS +#define AR9888_GPIO_PIN12_OFFSET AR9888_WLAN_GPIO_PIN12_ADDRESS +#define AR9888_GPIO_PIN13_OFFSET AR9888_WLAN_GPIO_PIN13_ADDRESS +#define AR9888_CPU_CLOCK_STANDARD_LSB AR9888_SOC_CPU_CLOCK_STANDARD_LSB +#define AR9888_CPU_CLOCK_STANDARD_MASK AR9888_SOC_CPU_CLOCK_STANDARD_MASK +#define AR9888_LPO_CAL_ENABLE_LSB AR9888_SOC_LPO_CAL_ENABLE_LSB +#define AR9888_LPO_CAL_ENABLE_MASK AR9888_SOC_LPO_CAL_ENABLE_MASK +#define AR9888_ANALOG_INTF_BASE_ADDRESS AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS +#define AR9888_MBOX_BASE_ADDRESS MISSING +#define AR9888_INT_STATUS_ENABLE_ERROR_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_ERROR_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_CPU_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_CPU_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define AR9888_COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define AR9888_COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_ADDRESS MISSING +#define AR9888_CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define AR9888_CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define AR9888_HOST_INT_STATUS_ADDRESS MISSING +#define AR9888_CPU_INT_STATUS_ADDRESS MISSING +#define AR9888_ERROR_INT_STATUS_ADDRESS MISSING +#define AR9888_ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define AR9888_ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define AR9888_ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define AR9888_ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define AR9888_COUNT_DEC_ADDRESS MISSING +#define AR9888_HOST_INT_STATUS_CPU_MASK MISSING +#define AR9888_HOST_INT_STATUS_CPU_LSB MISSING +#define AR9888_HOST_INT_STATUS_ERROR_MASK MISSING +#define AR9888_HOST_INT_STATUS_ERROR_LSB MISSING +#define AR9888_HOST_INT_STATUS_COUNTER_MASK MISSING +#define AR9888_HOST_INT_STATUS_COUNTER_LSB MISSING +#define AR9888_RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define AR9888_WINDOW_DATA_ADDRESS MISSING +#define AR9888_WINDOW_READ_ADDR_ADDRESS MISSING +#define AR9888_WINDOW_WRITE_ADDR_ADDRESS MISSING +#define AR9888_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f +#define AR9888_HOST_INT_STATUS_MBOX_DATA_LSB 0 + +struct targetdef_s ar9888_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = AR9888_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = AR9888_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = AR9888_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = AR9888_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + AR9888_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + AR9888_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = AR9888_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = AR9888_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = AR9888_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = AR9888_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = AR9888_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = AR9888_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + AR9888_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + AR9888_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = AR9888_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = AR9888_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = AR9888_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = AR9888_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = AR9888_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR9888_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = AR9888_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = AR9888_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = AR9888_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = AR9888_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = AR9888_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = AR9888_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = AR9888_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = AR9888_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = AR9888_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = AR9888_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = AR9888_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = AR9888_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = AR9888_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = AR9888_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = AR9888_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = AR9888_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = AR9888_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = AR9888_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = AR9888_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = AR9888_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = AR9888_SI_CS_START_LSB, + .d_SI_CS_START_MASK = AR9888_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = AR9888_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = AR9888_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = AR9888_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = AR9888_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = AR9888_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = AR9888_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = AR9888_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = AR9888_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = AR9888_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = AR9888_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = AR9888_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = AR9888_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = AR9888_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = AR9888_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = AR9888_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = AR9888_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = AR9888_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = AR9888_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = AR9888_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = AR9888_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = AR9888_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = AR9888_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = AR9888_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = AR9888_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = AR9888_CORE_CTRL_ADDRESS, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, +#endif + .d_CORE_CTRL_CPU_INTR_MASK = AR9888_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = AR9888_SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = AR9888_DST_WATERMARK_ADDRESS, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + AR9888_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = AR9888_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = AR9888_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = AR9888_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = AR9888_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + AR9888_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_1_KEY_ID_OCT_MASK = + AR9888_RX_MSDU_END_1_KEY_ID_OCT_MASK, + .d_RX_MSDU_END_1_KEY_ID_OCT_LSB = AR9888_RX_MSDU_END_1_KEY_ID_OCT_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = AR9888_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = AR9888_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + AR9888_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + AR9888_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = AR9888_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = AR9888_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + AR9888_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + AR9888_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + AR9888_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + AR9888_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + AR9888_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + AR9888_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + AR9888_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + AR9888_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + AR9888_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + AR9888_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_CE_COUNT = AR9888_CE_COUNT, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = AR9888_PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = AR9888_PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = AR9888_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = AR9888_PCIE_INTR_CE_MASK_ALL, + .d_PCIE_INTR_CAUSE_ADDRESS = AR9888_PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = AR9888_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + AR9888_SOC_RESET_CONTROL_CE_RST_MASK, +#endif + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = AR9888_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + .d_SOC_LF_TIMER_STATUS0_ADDRESS = + AR9888_SOC_LF_TIMER_STATUS0_ADDRESS, +}; + +struct hostdef_s ar9888_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = AR9888_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = AR9888_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = AR9888_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = AR9888_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + AR9888_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + AR9888_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + AR9888_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + AR9888_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + AR9888_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + AR9888_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = AR9888_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + AR9888_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + AR9888_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = AR9888_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = AR9888_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = AR9888_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = AR9888_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = AR9888_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + AR9888_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + AR9888_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = AR9888_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = AR9888_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = AR9888_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = AR9888_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = AR9888_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = AR9888_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = AR9888_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = AR9888_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = AR9888_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = AR9888_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = AR9888_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = AR9888_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = AR9888_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = AR9888_RTC_STATE_COLD_RESET_MASK, + .d_RTC_STATE_V_MASK = AR9888_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = AR9888_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = AR9888_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = AR9888_FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = AR9888_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + AR9888_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + AR9888_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_MUX_ID_MASK = AR9888_MUX_ID_MASK, + .d_TRANSACTION_ID_MASK = AR9888_TRANSACTION_ID_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = AR9888_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = AR9888_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = AR9888_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = AR9888_PCIE_SOC_WAKE_V_MASK, + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = 8, + .d_ENABLE_MSI = 0, +#endif +}; + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +struct ce_reg_def ar9888_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = AR9888_DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = AR9888_SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = AR9888_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = AR9888_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = AR9888_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = AR9888_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = AR9888_CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = AR9888_CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + AR9888_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + AR9888_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + AR9888_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = AR9888_HOST_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = AR9888_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = AR9888_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_HOST_IE_ADDRESS = AR9888_HOST_IE_ADDRESS, + .d_HOST_IE_COPY_COMPLETE_MASK = AR9888_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = AR9888_SR_BA_ADDRESS, + .d_SR_SIZE_ADDRESS = AR9888_SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = AR9888_CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = AR9888_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = AR9888_DR_BA_ADDRESS, + .d_DR_SIZE_ADDRESS = AR9888_DR_SIZE_ADDRESS, + .d_MISC_IE_ADDRESS = AR9888_MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = AR9888_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = AR9888_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = AR9888_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = AR9888_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + AR9888_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + AR9888_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = AR9888_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = AR9888_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = AR9888_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = AR9888_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = AR9888_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE0_BASE_ADDRESS = AR9888_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = AR9888_CE1_BASE_ADDRESS, + +}; +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ath_procfs.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ath_procfs.c new file mode 100644 index 0000000000000000000000000000000000000000..8e63dc8a7da86e759651717aeb8daa33a5f07811 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ath_procfs.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2013-2014, 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(CONFIG_ATH_PROCFS_DIAG_SUPPORT) +#include /* Specifically, a module */ +#include /* We're doing kernel work */ +#include /* We're doing kernel work */ +#include /* Necessary because we use the proc fs */ +#include /* for copy_from_user */ +#include "hif.h" +#include "hif_main.h" +#if defined(HIF_USB) +#include "if_usb.h" +#endif +#if defined(HIF_SDIO) +#include "if_sdio.h" +#endif +#include "hif_debug.h" +#include "pld_common.h" +#include "target_type.h" + +#define PROCFS_NAME "athdiagpfs" +#ifdef MULTI_IF_NAME +#define PROCFS_DIR "cld" MULTI_IF_NAME +#else +#define PROCFS_DIR "cld" +#endif + +/** + * This structure hold information about the /proc file + * + */ +static struct proc_dir_entry *proc_file, *proc_dir; + +static void *get_hif_hdl_from_file(struct file *file) +{ + struct hif_opaque_softc *scn; + + scn = (struct hif_opaque_softc *)PDE_DATA(file_inode(file)); + return (void *)scn; +} + +static ssize_t ath_procfs_diag_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + hif_handle_t hif_hdl; + int rv; + uint8_t *read_buffer = NULL; + struct hif_softc *scn; + uint32_t offset = 0, memtype = 0; + struct hif_target_info *tgt_info; + + hif_hdl = get_hif_hdl_from_file(file); + scn = HIF_GET_SOFTC(hif_hdl); + + if (scn->bus_ops.hif_addr_in_boundary(hif_hdl, (uint32_t)(*pos))) + return -EINVAL; + + read_buffer = qdf_mem_malloc(count); + if (!read_buffer) + return -ENOMEM; + + HIF_DBG("rd buff 0x%pK cnt %zu offset 0x%x buf 0x%pK", + read_buffer, count, (int)*pos, buf); + + tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(hif_hdl)); + if ((scn->bus_type == QDF_BUS_TYPE_SNOC) || + (scn->bus_type == QDF_BUS_TYPE_PCI && + ((tgt_info->target_type == TARGET_TYPE_QCA6290) || + (tgt_info->target_type == TARGET_TYPE_QCA6390) || + (tgt_info->target_type == TARGET_TYPE_QCA6490) || + (tgt_info->target_type == TARGET_TYPE_QCA8074) || + (tgt_info->target_type == TARGET_TYPE_QCA8074V2) || + (tgt_info->target_type == TARGET_TYPE_QCN9000) || + (tgt_info->target_type == TARGET_TYPE_QCA6018))) || + (scn->bus_type == QDF_BUS_TYPE_IPCI && + (tgt_info->target_type == TARGET_TYPE_QCA6750))) { + memtype = ((uint32_t)(*pos) & 0xff000000) >> 24; + offset = (uint32_t)(*pos) & 0xffffff; + HIF_DBG("%s: offset 0x%x memtype 0x%x, datalen %zu\n", + __func__, offset, memtype, count); + rv = pld_athdiag_read(scn->qdf_dev->dev, + offset, memtype, count, + (uint8_t *)read_buffer); + goto out; + } + + if ((count == 4) && ((((uint32_t) (*pos)) & 3) == 0)) { + /* reading a word? */ + rv = hif_diag_read_access(hif_hdl, (uint32_t)(*pos), + (uint32_t *)read_buffer); + } else { + rv = hif_diag_read_mem(hif_hdl, (uint32_t)(*pos), + (uint8_t *)read_buffer, count); + } + +out: + if (rv) { + qdf_mem_free(read_buffer); + return -EIO; + } + + if (copy_to_user(buf, read_buffer, count)) { + qdf_mem_free(read_buffer); + HIF_ERROR("%s: copy_to_user error in /proc/%s", + __func__, PROCFS_NAME); + return -EFAULT; + } + qdf_mem_free(read_buffer); + return count; +} + +static ssize_t ath_procfs_diag_write(struct file *file, + const char __user *buf, + size_t count, loff_t *pos) +{ + hif_handle_t hif_hdl; + int rv; + uint8_t *write_buffer = NULL; + struct hif_softc *scn; + uint32_t offset = 0, memtype = 0; + struct hif_target_info *tgt_info; + + hif_hdl = get_hif_hdl_from_file(file); + scn = HIF_GET_SOFTC(hif_hdl); + + if (scn->bus_ops.hif_addr_in_boundary(hif_hdl, (uint32_t)(*pos))) + return -EINVAL; + + write_buffer = qdf_mem_malloc(count); + if (!write_buffer) + return -ENOMEM; + + if (copy_from_user(write_buffer, buf, count)) { + qdf_mem_free(write_buffer); + HIF_ERROR("%s: copy_to_user error in /proc/%s", + __func__, PROCFS_NAME); + return -EFAULT; + } + + HIF_DBG("wr buff 0x%pK buf 0x%pK cnt %zu offset 0x%x value 0x%x", + write_buffer, buf, count, + (int)*pos, *((uint32_t *) write_buffer)); + + tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(hif_hdl)); + if ((scn->bus_type == QDF_BUS_TYPE_SNOC) || + ((scn->bus_type == QDF_BUS_TYPE_PCI) && + ((tgt_info->target_type == TARGET_TYPE_QCA6290) || + (tgt_info->target_type == TARGET_TYPE_QCA6390) || + (tgt_info->target_type == TARGET_TYPE_QCA6490) || + (tgt_info->target_type == TARGET_TYPE_QCA8074) || + (tgt_info->target_type == TARGET_TYPE_QCA8074V2) || + (tgt_info->target_type == TARGET_TYPE_QCN9000) || + (tgt_info->target_type == TARGET_TYPE_QCA6018))) || + (scn->bus_type == QDF_BUS_TYPE_IPCI && + (tgt_info->target_type == TARGET_TYPE_QCA6750))) { + memtype = ((uint32_t)(*pos) & 0xff000000) >> 24; + offset = (uint32_t)(*pos) & 0xffffff; + HIF_DBG("%s: offset 0x%x memtype 0x%x, datalen %zu\n", + __func__, offset, memtype, count); + rv = pld_athdiag_write(scn->qdf_dev->dev, + offset, memtype, count, + (uint8_t *)write_buffer); + goto out; + } + + if ((count == 4) && ((((uint32_t) (*pos)) & 3) == 0)) { + /* reading a word? */ + uint32_t value = *((uint32_t *)write_buffer); + + rv = hif_diag_write_access(hif_hdl, (uint32_t)(*pos), value); + } else { + rv = hif_diag_write_mem(hif_hdl, (uint32_t)(*pos), + (uint8_t *)write_buffer, count); + } + +out: + + qdf_mem_free(write_buffer); + if (rv == 0) + return count; + else + return -EIO; +} + +static const struct file_operations athdiag_fops = { + .read = ath_procfs_diag_read, + .write = ath_procfs_diag_write, +}; + +/* + * This function is called when the module is loaded + * + */ +int athdiag_procfs_init(void *scn) +{ + proc_dir = proc_mkdir(PROCFS_DIR, NULL); + if (!proc_dir) { + remove_proc_entry(PROCFS_DIR, NULL); + HIF_ERROR("%s: Error: Could not initialize /proc/%s", + __func__, PROCFS_DIR); + return -ENOMEM; + } + + proc_file = proc_create_data(PROCFS_NAME, 0600, proc_dir, + &athdiag_fops, (void *)scn); + if (!proc_file) { + remove_proc_entry(PROCFS_NAME, proc_dir); + HIF_ERROR("%s: Could not initialize /proc/%s", + __func__, PROCFS_NAME); + return -ENOMEM; + } + + HIF_DBG("/proc/%s/%s created", PROCFS_DIR, PROCFS_NAME); + return 0; /* everything is ok */ +} + +/* + * This function is called when the module is unloaded + * + */ +void athdiag_procfs_remove(void) +{ + if (proc_dir) { + remove_proc_entry(PROCFS_NAME, proc_dir); + HIF_DBG("/proc/%s/%s removed", PROCFS_DIR, PROCFS_NAME); + remove_proc_entry(PROCFS_DIR, NULL); + HIF_DBG("/proc/%s removed", PROCFS_DIR); + proc_dir = NULL; + } +} +#else +int athdiag_procfs_init(void *scn) +{ + return 0; +} +void athdiag_procfs_remove(void) {} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_api.h new file mode 100644 index 0000000000000000000000000000000000000000..7b6b97be4814c4a7c1d61309993177777e41f6fe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_api.h @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __COPY_ENGINE_API_H__ +#define __COPY_ENGINE_API_H__ + +#include "pld_common.h" +#include "ce_main.h" +#include "hif_main.h" + +/* TBDXXX: Use int return values for consistency with Target */ + +/* TBDXXX: Perhaps merge Host/Target-->common */ + +/* + * Copy Engine support: low-level Target-side Copy Engine API. + * This is a hardware access layer used by code that understands + * how to use copy engines. + */ + +/* + * A "struct CE_handle *" serves as an opaque pointer-sized + * handle to a specific copy engine. + */ +struct CE_handle; + +/* + * "Send Completion" callback type for Send Completion Notification. + * + * If a Send Completion callback is registered and one or more sends + * have completed, the callback is invoked. + * + * per_ce_send_context is a context supplied by the calling layer + * (via ce_send_cb_register). It is associated with a copy engine. + * + * per_transfer_send_context is context supplied by the calling layer + * (via the "send" call). It may be different for each invocation + * of send. + * + * The buffer parameter is the first byte sent of the first buffer + * sent (if more than one buffer). + * + * nbytes is the number of bytes of that buffer that were sent. + * + * transfer_id matches the value used when the buffer or + * buf_list was sent. + * + * Implementation note: Pops 1 completed send buffer from Source ring + */ +typedef void (*ce_send_cb)(struct CE_handle *copyeng, + void *per_ce_send_context, + void *per_transfer_send_context, + qdf_dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int sw_index, + unsigned int hw_index, + uint32_t toeplitz_hash_result); + +/* + * "Buffer Received" callback type for Buffer Received Notification. + * + * Implementation note: Pops 1 completed recv buffer from Dest ring + */ +typedef void (*CE_recv_cb)(struct CE_handle *copyeng, + void *per_CE_recv_context, + void *per_transfer_recv_context, + qdf_dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags); + +/* + * Copy Engine Watermark callback type. + * + * Allows upper layers to be notified when watermarks are reached: + * space is available and/or running short in a source ring + * buffers are exhausted and/or abundant in a destination ring + * + * The flags parameter indicates which condition triggered this + * callback. See CE_WM_FLAG_*. + * + * Watermark APIs are provided to allow upper layers "batch" + * descriptor processing and to allow upper layers to + * throttle/unthrottle. + */ +typedef void (*CE_watermark_cb)(struct CE_handle *copyeng, + void *per_CE_wm_context, unsigned int flags); + + +#define CE_WM_FLAG_SEND_HIGH 1 +#define CE_WM_FLAG_SEND_LOW 2 +#define CE_WM_FLAG_RECV_HIGH 4 +#define CE_WM_FLAG_RECV_LOW 8 +#define CE_HTT_TX_CE 4 + + +/** + * ce_service_srng_init() - Initialization routine for CE services + * in SRNG based targets + * Return : None + */ +void ce_service_srng_init(void); + +/** + * ce_service_legacy_init() - Initialization routine for CE services + * in legacy targets + * Return : None + */ +void ce_service_legacy_init(void); + +/* A list of buffers to be gathered and sent */ +struct ce_sendlist; + +/* Copy Engine settable attributes */ +struct CE_attr; + +/*==================Send=====================================================*/ + +/* ce_send flags */ +/* disable ring's byte swap, even if the default policy is to swap */ +#define CE_SEND_FLAG_SWAP_DISABLE 1 + +/* + * Queue a source buffer to be sent to an anonymous destination buffer. + * copyeng - which copy engine to use + * buffer - address of buffer + * nbytes - number of bytes to send + * transfer_id - arbitrary ID; reflected to destination + * flags - CE_SEND_FLAG_* values + * Returns 0 on success; otherwise an error status. + * + * Note: If no flags are specified, use CE's default data swap mode. + * + * Implementation note: pushes 1 buffer to Source ring + */ +int ce_send(struct CE_handle *copyeng, + void *per_transfer_send_context, + qdf_dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags, + unsigned int user_flags); + +#ifdef WLAN_FEATURE_FASTPATH +int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, + unsigned int transfer_id, uint32_t download_len); + +#endif + +void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls); +extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, + qdf_nbuf_t msdu, + uint32_t transfer_id, + uint32_t len, + uint32_t sendhead); + +QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, + qdf_nbuf_t msdu, + uint32_t transfer_id, + uint32_t len); +/* + * Register a Send Callback function. + * This function is called as soon as the contents of a Send + * have reached the destination, unless disable_interrupts is + * requested. In this case, the callback is invoked when the + * send status is polled, shortly after the send completes. + */ +void ce_send_cb_register(struct CE_handle *copyeng, + ce_send_cb fn_ptr, + void *per_ce_send_context, int disable_interrupts); + +/* + * Return the size of a SendList. This allows the caller to allocate + * a SendList while the SendList structure remains opaque. + */ +unsigned int ce_sendlist_sizeof(void); + +/* Initialize a sendlist */ +void ce_sendlist_init(struct ce_sendlist *sendlist); + +/* Append a simple buffer (address/length) to a sendlist. */ +int ce_sendlist_buf_add(struct ce_sendlist *sendlist, + qdf_dma_addr_t buffer, + unsigned int nbytes, + /* OR-ed with internal flags */ + uint32_t flags, + uint32_t user_flags); + +/* + * Queue a "sendlist" of buffers to be sent using gather to a single + * anonymous destination buffer + * copyeng - which copy engine to use + * sendlist - list of simple buffers to send using gather + * transfer_id - arbitrary ID; reflected to destination + * Returns 0 on success; otherwise an error status. + * + * Implementation note: Pushes multiple buffers with Gather to Source ring. + */ +int ce_sendlist_send(struct CE_handle *copyeng, + void *per_transfer_send_context, + struct ce_sendlist *sendlist, + unsigned int transfer_id); + +/*==================Recv=====================================================*/ + +/* + * Make a buffer available to receive. The buffer must be at least of a + * minimal size appropriate for this copy engine (src_sz_max attribute). + * copyeng - which copy engine to use + * per_transfer_recv_context - context passed back to caller's recv_cb + * buffer - address of buffer in CE space + * Returns 0 on success; otherwise an error status. + * + * Implementation note: Pushes a buffer to Dest ring. + */ +int ce_recv_buf_enqueue(struct CE_handle *copyeng, + void *per_transfer_recv_context, + qdf_dma_addr_t buffer); + +/* + * Register a Receive Callback function. + * This function is called as soon as data is received + * from the source. + */ +void ce_recv_cb_register(struct CE_handle *copyeng, + CE_recv_cb fn_ptr, + void *per_CE_recv_context, + int disable_interrupts); + +/*==================CE Watermark=============================================*/ + +/* + * Register a Watermark Callback function. + * This function is called as soon as a watermark level + * is crossed. A Watermark Callback function is free to + * handle received data "en masse"; but then some coordination + * is required with a registered Receive Callback function. + * [Suggestion: Either handle Receives in a Receive Callback + * or en masse in a Watermark Callback; but not both.] + */ +void ce_watermark_cb_register(struct CE_handle *copyeng, + CE_watermark_cb fn_ptr, + void *per_CE_wm_context); + +/* + * Set low/high watermarks for the send/source side of a copy engine. + * + * Typically, the destination side CPU manages watermarks for + * the receive side and the source side CPU manages watermarks + * for the send side. + * + * A low watermark of 0 is never hit (so the watermark function + * will never be called for a Low Watermark condition). + * + * A high watermark equal to nentries is never hit (so the + * watermark function will never be called for a High Watermark + * condition). + */ +void ce_send_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries); + +/* Set low/high watermarks for the receive/destination side of copy engine. */ +void ce_recv_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries); + +/* + * Return the number of entries that can be queued + * to a ring at an instant in time. + * + * For source ring, does not imply that destination-side + * buffers are available; merely indicates descriptor space + * in the source ring. + * + * For destination ring, does not imply that previously + * received buffers have been processed; merely indicates + * descriptor space in destination ring. + * + * Mainly for use with CE Watermark callback. + */ +unsigned int ce_send_entries_avail(struct CE_handle *copyeng); +unsigned int ce_recv_entries_avail(struct CE_handle *copyeng); + +/* recv flags */ +/* Data is byte-swapped */ +#define CE_RECV_FLAG_SWAPPED 1 + +/* + * Supply data for the next completed unprocessed receive descriptor. + * + * For use + * with CE Watermark callback, + * in a recv_cb function when processing buf_lists + * in a recv_cb function in order to mitigate recv_cb's. + * + * Implementation note: Pops buffer from Dest ring. + */ +int ce_completed_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); + +/* + * Supply data for the next completed unprocessed send descriptor. + * + * For use + * with CE Watermark callback + * in a send_cb function in order to mitigate send_cb's. + * + * Implementation note: Pops 1 completed send buffer from Source ring + */ +int ce_completed_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result); + +/*==================CE Engine Initialization=================================*/ + +/* Initialize an instance of a CE */ +struct CE_handle *ce_init(struct hif_softc *scn, + unsigned int CE_id, struct CE_attr *attr); + +/*==================CE Engine Shutdown=======================================*/ +/* + * Support clean shutdown by allowing the caller to revoke + * receive buffers. Target DMA must be stopped before using + * this API. + */ +QDF_STATUS +ce_revoke_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp); + +/* + * Support clean shutdown by allowing the caller to cancel + * pending sends. Target DMA must be stopped before using + * this API. + */ +QDF_STATUS +ce_cancel_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result); + +void ce_fini(struct CE_handle *copyeng); + +/*==================CE Interrupt Handlers====================================*/ +void ce_per_engine_service_any(int irq, struct hif_softc *scn); +int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id); +void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id); + +/*===================CE cmpl interrupt Enable/Disable =======================*/ +void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn); +void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn); + +/* API to check if any of the copy engine pipes has + * pending frames for prcoessing + */ +bool ce_get_rx_pending(struct hif_softc *scn); + +/** + * war_ce_src_ring_write_idx_set() - Set write index for CE source ring + * + * Return: None + */ +void war_ce_src_ring_write_idx_set(struct hif_softc *scn, + u32 ctrl_addr, unsigned int write_index); + +/* CE_attr.flags values */ +#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */ +#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */ +#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */ +#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */ +#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */ +#define CE_ATTR_DIAG 0x20 /* Diag CE */ + +/** + * struct CE_attr - Attributes of an instance of a Copy Engine + * @flags: CE_ATTR_* values + * @priority: TBD + * @src_nentries: #entries in source ring - Must be a power of 2 + * @src_sz_max: Max source send size for this CE. This is also the minimum + * size of a destination buffer + * @dest_nentries: #entries in destination ring - Must be a power of 2 + * @reserved: Future Use + */ +struct CE_attr { + unsigned int flags; + unsigned int priority; + unsigned int src_nentries; + unsigned int src_sz_max; + unsigned int dest_nentries; + void *reserved; +}; + +/* + * When using sendlist_send to transfer multiple buffer fragments, the + * transfer context of each fragment, except last one, will be filled + * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for + * each fragment done with send and the transfer context would be + * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the + * status of a send completion. + */ +#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) + +/* + * This is an opaque type that is at least large enough to hold + * a sendlist. A sendlist can only be accessed through CE APIs, + * but this allows a sendlist to be allocated on the run-time + * stack. TBDXXX: un-opaque would be simpler... + */ +struct ce_sendlist { + unsigned int word[62]; +}; + +#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */ +#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */ +#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */ + +#ifdef IPA_OFFLOAD +void ce_ipa_get_resource(struct CE_handle *ce, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); +#else +/** + * ce_ipa_get_resource() - get uc resource on copyengine + * @ce: copyengine context + * @ce_sr: copyengine source ring resource info + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * Copy engine should release resource to micro controller + * Micro controller needs + * - Copy engine source descriptor base address + * - Copy engine source descriptor size + * - PCI BAR address to access copy engine regiser + * + * Return: None + */ +static inline void ce_ipa_get_resource(struct CE_handle *ce, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ +} +#endif /* IPA_OFFLOAD */ + +static inline void ce_pkt_error_count_incr( + struct HIF_CE_state *_hif_state, + enum ol_ath_hif_pkt_ecodes _hif_ecode) +{ + struct hif_softc *scn = HIF_GET_SOFTC(_hif_state); + + if (_hif_ecode == HIF_PIPE_NO_RESOURCE) + (scn->pkt_stats.hif_pipe_no_resrc_count) + += 1; +} + +bool ce_check_rx_pending(struct CE_state *CE_state); +void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id); +struct ce_ops *ce_services_srng(void); +struct ce_ops *ce_services_legacy(void); +bool ce_srng_based(struct hif_softc *scn); +/* Forward declaration */ +struct CE_ring_state; + +struct ce_ops { + uint32_t (*ce_get_desc_size)(uint8_t ring_type); + int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr); + int (*ce_send_nolock)(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flags); + int (*ce_sendlist_send)(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id); + QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp); + QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng, + void **per_CE_contextp, void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result); + int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer); + bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags); + int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); + int (*ce_completed_send_next_nolock)(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result); + unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn, + struct CE_state *CE_state); + unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn, + struct CE_state *CE_state); + void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state, + int disable_copy_compl_intr); + void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured); + int (*ce_get_index_info)(struct hif_softc *scn, void *ce_state, + struct ce_index *info); +}; + +int hif_ce_bus_early_suspend(struct hif_softc *scn); +int hif_ce_bus_late_resume(struct hif_softc *scn); + +/* + * ce_engine_service_reg: + * @scn: hif_context + * @CE_id: Copy engine ID + * + * Called from ce_per_engine_service and goes through the regular interrupt + * handling that does not involve the WLAN fast path feature. + * + * Returns void + */ +void ce_engine_service_reg(struct hif_softc *scn, int CE_id); + +/** + * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs + * @scn: hif_context + * @ce_id: Copy engine ID + * + * Return: void + */ +void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id); + +#endif /* __COPY_ENGINE_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_assignment.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_assignment.h new file mode 100644 index 0000000000000000000000000000000000000000..dce741cffa2531e371ca56ee5754b228bf365a92 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_assignment.h @@ -0,0 +1,1223 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Implementation of the Host-side Host InterFace (HIF) API + * for a Host/Target interconnect using Copy Engines over PCIe. + */ + +#ifndef __HIF_PCI_INTERNAL_H__ +#define __HIF_PCI_INTERNAL_H__ + +#ifndef PEER_CACHEING_HOST_ENABLE +#define PEER_CACHEING_HOST_ENABLE 0 +#endif + +#define HIF_PCI_DEBUG ATH_DEBUG_MAKE_MODULE_MASK(0) +#define HIF_PCI_IPA_UC_ASSIGNED_CE 5 + +#if defined(WLAN_DEBUG) || defined(DEBUG) +static ATH_DEBUG_MASK_DESCRIPTION g_hif_debug_description[] = { + {HIF_PCI_DEBUG, "hif_pci"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, "hif", "PCIe Host Interface", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO, + ATH_DEBUG_DESCRIPTION_COUNT + (g_hif_debug_description), + g_hif_debug_description); +#endif + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +/* globals are initialized to 0 by the compiler */; +spinlock_t pcie_access_log_lock; +unsigned int pcie_access_log_seqnum; +struct HIF_ACCESS_LOG pcie_access_log[PCIE_ACCESS_LOG_NUM]; +static void hif_target_dump_access_log(void); +#endif + +/* + * Host software's Copy Engine configuration. + * This table is derived from the CE_PCI TABLE, above. + */ +#ifdef BIG_ENDIAN_HOST +#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA +#else +#define CE_ATTR_FLAGS 0 +#endif + +/* Maximum number of Copy Engine's supported */ +#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 +#define CE_HTT_H2T_MSG_SRC_NENTRIES_QCA6390 256 +#define CE_HTT_H2T_MSG_SRC_NENTRIES_QCA6490 256 +#define CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B 4096 +#define CE_HTT_H2T_MSG_SRC_NENTRIES_QCN7605 4096 + +#define EPPING_CE_FLAGS_POLL \ + (CE_ATTR_DISABLE_INTR|CE_ATTR_ENABLE_POLL|CE_ATTR_FLAGS) + +#define CE_ATTR_DIAG_FLAGS \ + (CE_ATTR_FLAGS | CE_ATTR_DIAG) + +#ifdef QCN7605_SUPPORT + +#define QCN7605_CE_COUNT 9 +static struct CE_attr host_ce_config_wlan_qcn7605[] = { + /* host->target HTC control and raw streams & WMI Control*/ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* target->host HTT */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_QCN7605, 256, 0, NULL,}, +#ifdef IPA_OFFLOAD + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, +#else + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, +#endif + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host PKTLOG */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +#ifdef IPA_OFFLOAD + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, +#else + /* target->host HTT*/ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +#endif + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qcn7605[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 4, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE3 */ 3, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 128, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ +#ifdef IPA_OFFLOAD + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 512, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, +#else + /* unused */ + { /* CE5 */ 5, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, +#endif + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE7 */ 7, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +#ifdef IPA_OFFLOAD + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +#else + /* target->host HTT*/ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +#endif + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; +#endif + +#ifdef QCA_WIFI_3_0 +static struct CE_attr host_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host HTT */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target -> host PKTLOG */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +}; + +static struct CE_pipe_config target_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +}; + +#ifdef WLAN_FEATURE_EPPING +static struct CE_attr host_ce_config_wlan_epping_poll[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; + +#ifdef QCN7605_SUPPORT +static struct CE_attr host_ce_config_wlan_epping_irq[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 16, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; + +/* + * EP-ping firmware's CE configuration + */ +static struct CE_pipe_config target_ce_config_wlan_epping[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 16, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE4 */ 4, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* EP-ping heartbeat */ + { /* CE5 */ 5, PIPEDIR_IN, 16, 2048, CE_ATTR_FLAGS, 0,}, + /* unused */ + { /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 4, 2048, CE_ATTR_FLAGS, 0,} +}; +#else +static struct CE_attr host_ce_config_wlan_epping_irq[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; +/* + * EP-ping firmware's CE configuration + */ +static struct CE_pipe_config target_ce_config_wlan_epping[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 16, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE1 */ 1, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE2 */ 2, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE3 */ 3, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE4 */ 4, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* EP-ping heartbeat */ + { /* CE5 */ 5, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* unused */ + { /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,} +}; +#endif +#endif +#else +static struct CE_attr host_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + 1024, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), + 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; + +static struct CE_pipe_config target_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, + /* target->host HTT + HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target HTC control */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,} +}; + +#ifdef WLAN_FEATURE_EPPING +static struct CE_attr host_ce_config_wlan_epping_poll[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* The following CEs are not being used yet */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, +}; +static struct CE_attr host_ce_config_wlan_epping_irq[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* The following CEs are not being used yet */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, +}; +/* + * EP-ping firmware's CE configuration + */ +static struct CE_pipe_config target_ce_config_wlan_epping[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 16, 256, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE1 */ 1, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE2 */ 2, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE3 */ 3, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE4 */ 4, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* EP-ping heartbeat */ + { /* CE5 */ 5, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* unused */ + { /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + { /* CE9 */ 9, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,}, + { /* CE10 */ 10, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,}, + { /* CE11 */ 11, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,}, +}; +#endif +#endif + +static struct CE_attr host_ce_config_wlan_ar9888[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* target->host BMI + HTC control */ + /* could be moved to share CE3 */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_ce_config_wlan_ar900b[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* target->host BMI + HTC control */ + /* could be moved to share CE3 */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* target->host pktlog */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_lowdesc_ce_cfg_wlan_ar9888[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* could be moved to share CE3 */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 64, NULL, }, +#else + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#endif + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_lowdesc_ce_cfg_wlan_ar900b[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* could be moved to share CE3 */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 64, NULL, }, +#else + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#endif + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* target->host pktlog */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_lowdesc_ce_cfg_wlan_ar900b_nopktlog[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* could be moved to share CE3 */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 64, NULL, }, +#else + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#endif + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* target->host pktlog */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_pipe_config target_ce_config_wlan_ar9888[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0, }, + /* target->host HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0, }, + /* NB: 50% of src nentries, since tx has 2 frags */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, +#else + /* unused */ + { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, +#endif + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0, }, + /* CE7 used only by Host */ +}; + +static struct CE_pipe_config target_ce_config_wlan_ar900b[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0, }, + /* target->host HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0, }, + /* NB: 50% of src nentries, since tx has 2 frags */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, +#else + /* unused */ + { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, +#endif + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0, }, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT, 0, 0, 0, 0, }, + { /* CE8 */ 8, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS + /* target->host packtlog */ + | CE_ATTR_DISABLE_INTR, 0, }, +#if PEER_CACHEING_HOST_ENABLE + /* target autonomous qcache memcpy */ + { /* CE9 */ 9, PIPEDIR_INOUT, 32, 2048, CE_ATTR_FLAGS | + CE_ATTR_DISABLE_INTR, 0, }, +#endif +}; + +static struct CE_attr host_ce_config_wlan_qca8074[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* host->target WMI (mac0) */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ +#ifdef REMOVE_PKT_LOG + { /* CE5 */ 0, 0, 0, 0, 0, NULL,}, +#else + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +#endif + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, 0, + 0, 0, NULL,}, + /* host->target WMI (mac1) */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac2) */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* target->host HTT */ + { /* CE10 unused */ 0, 0, 0, 0, 0, NULL,}, + { /* CE11 unused */ 0, 0, 0, 0, 0, NULL,}, +}; + +#if defined(QCA_LOWMEM_CONFIG) || defined(QCA_512M_CONFIG) +#define T2H_WMI_RING_SIZE 32 +#else +#define T2H_WMI_RING_SIZE 512 +#endif +static struct CE_pipe_config target_ce_config_wlan_qca8074[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, T2H_WMI_RING_SIZE, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* Target -> host PKTLOG */ +#ifdef REMOVE_PKT_LOG + { /* CE5 */ 5, PIPEDIR_NONE, 0, 0, 0, 0,}, +#else + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, 0, 0,}, +#endif + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 65535, 64, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_OUT, 32, 2048, + 8192, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 65535, 112, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_OUT, 32, 2048, 8192, 0,}, + /* CE10 target->host HTT */ + {/* CE10 unused */10, PIPEDIR_NONE, 0, 0, 0, 0,}, + {/* CE11 unused */11, PIPEDIR_NONE, 0, 0, 0, 0,}, +}; + +static struct CE_attr host_ce_config_wlan_qca8074_pci[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ EPPING_CE_FLAGS_POLL, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 256, NULL,}, + /* host->target WMI (mac0) */ + { /* CE3 */ EPPING_CE_FLAGS_POLL, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ +#ifdef REMOVE_PKT_LOG + { /* CE5 */ 0, 0, 0, 0, 0, NULL,}, +#else + { /* CE5 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 512, NULL,}, +#endif + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac1) */ + { /* CE7 */ EPPING_CE_FLAGS_POLL, 0, 32, 2048, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac2) */ + { /* CE9 */ EPPING_CE_FLAGS_POLL, 0, 32, 2048, 0, NULL,}, + /* target->host HTT */ + { /* CE10 unused */ 0, 0, 0, 0, 0, NULL,}, + { /* CE11 unused */ 0, 0, 0, 0, 0, NULL,}, +}; + +static struct CE_pipe_config target_ce_config_wlan_qca8074_pci[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +}; + +static struct CE_attr host_lowdesc_ce_config_wlan_adrastea_nopktlog[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 256, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 64, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host HTT */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 2048, 64, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 64, NULL,}, + /* target -> host PKTLOG */ + { /* CE11 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 0, 2048, 0, NULL,}, +}; + +static struct CE_attr host_ce_config_wlan_adrastea[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host HTT */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target -> host PKTLOG */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +}; + +static struct CE_pipe_config + target_lowdesc_ce_config_wlan_adrastea_nopktlog[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, +}; + +static struct CE_pipe_config target_ce_config_wlan_adrastea[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +}; + +#define QCN_9000_CE_COUNT 6 +/* QCN9000 enable polling mode */ +static struct CE_attr host_ce_config_wlan_qcn9000[] = { + /* host->target HTC control and raw streams */ + {/*CE0*/ (CE_ATTR_FLAGS), 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + {/*CE1*/ (CE_ATTR_FLAGS), 0, 0, 2048, + 512, NULL,}, + /* target->host WMI */ + {/*CE2*/ (CE_ATTR_FLAGS), 0, 0, 2048, + 32, NULL,}, + /* host->target WMI */ + {/*CE3*/ (CE_ATTR_FLAGS), 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + {/*CE4*/ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + {/*CE5*/ (CE_ATTR_FLAGS), 0, 0, 2048, + 512, NULL,}, + /* Target autonomous HIF_memcpy */ + {/*CE6*/ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac1) */ + {/*CE7*/ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* Reserved for target */ + {/*CE8*/ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qcn9000[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_OUT, 32, 2048, + 8192, 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +#define QCA_6290_CE_COUNT 9 +#ifdef QCA_6290_AP_MODE +static struct CE_attr host_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ (CE_ATTR_FLAGS | CE_ATTR_ENABLE_POLL), 0, 0, 2048, + 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ (CE_ATTR_FLAGS | CE_ATTR_ENABLE_POLL), 0, 0, 2048, + 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_ENABLE_POLL), 0, 0, 2048, + 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac1) */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_OUT, 32, 2048, + 8192, 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; +#else +static struct CE_attr host_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; +#endif + +#define QCA_6390_CE_COUNT 9 +static struct CE_attr host_ce_config_wlan_qca6390[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_QCA6390, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 0, DIAG_TRANSFER_LIMIT, 0, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6390[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +#define QCA_6490_CE_COUNT 9 +static struct CE_attr host_ce_config_wlan_qca6490[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 4096, 64, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_QCA6490, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 0, DIAG_TRANSFER_LIMIT, 0, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6490[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 4096, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +#define QCA_6750_CE_COUNT 9 +static struct CE_attr host_ce_config_wlan_qca6750[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 0, DIAG_TRANSFER_LIMIT, 0, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6750[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; +#endif /* __HIF_PCI_INTERNAL_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c new file mode 100644 index 0000000000000000000000000000000000000000..019c03ec1dbbfbb931b978a427dbba96f3afdc63 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include "regtable.h" +#define ATH_MODULE_NAME hif +#include +#include "hif_main.h" +#include "ce_api.h" +#include "ce_bmi.h" +#include "qdf_trace.h" +#include "hif_debug.h" +#include "bmi_msg.h" +#include "qdf_module.h" + +/* Track a BMI transaction that is in progress */ +#ifndef BIT +#define BIT(n) (1 << (n)) +#endif + +enum { + BMI_REQ_SEND_DONE = BIT(0), /* the bmi tx completion */ + BMI_RESP_RECV_DONE = BIT(1), /* the bmi respond is received */ +}; + +struct BMI_transaction { + struct HIF_CE_state *hif_state; + qdf_semaphore_t bmi_transaction_sem; + uint8_t *bmi_request_host; /* Req BMI msg in Host addr space */ + qdf_dma_addr_t bmi_request_CE; /* Req BMI msg in CE addr space */ + uint32_t bmi_request_length; /* Length of BMI request */ + uint8_t *bmi_response_host; /* Rsp BMI msg in Host addr space */ + qdf_dma_addr_t bmi_response_CE; /* Rsp BMI msg in CE addr space */ + unsigned int bmi_response_length; /* Length of received response */ + unsigned int bmi_timeout_ms; + uint32_t bmi_transaction_flags; /* flags for the transcation */ +}; + +/* + * send/recv completion functions for BMI. + * NB: The "net_buf" parameter is actually just a + * straight buffer, not an sk_buff. + */ +void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int sw_index, + unsigned int hw_index, uint32_t toeplitz_hash_result) +{ + struct BMI_transaction *transaction = + (struct BMI_transaction *)transfer_context; + +#ifdef BMI_RSP_POLLING + /* + * Fix EV118783, Release a semaphore after sending + * no matter whether a response is been expecting now. + */ + qdf_semaphore_release(&transaction->bmi_transaction_sem); +#else + /* + * If a response is anticipated, we'll complete the + * transaction if the response has been received. + * If no response is anticipated, complete the + * transaction now. + */ + transaction->bmi_transaction_flags |= BMI_REQ_SEND_DONE; + + /* resp is't needed or has already been received, + * never assume resp comes later then this + */ + if (!transaction->bmi_response_CE || + (transaction->bmi_transaction_flags & BMI_RESP_RECV_DONE)) { + qdf_semaphore_release(&transaction->bmi_transaction_sem); + } +#endif +} + +#ifndef BMI_RSP_POLLING +void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int flags) +{ + struct BMI_transaction *transaction = + (struct BMI_transaction *)transfer_context; + + transaction->bmi_response_length = nbytes; + transaction->bmi_transaction_flags |= BMI_RESP_RECV_DONE; + + /* when both send/recv are done, the sem can be released */ + if (transaction->bmi_transaction_flags & BMI_REQ_SEND_DONE) + qdf_semaphore_release(&transaction->bmi_transaction_sem); +} +#endif + +/* Timeout for BMI message exchange */ +#define HIF_EXCHANGE_BMI_MSG_TIMEOUT 6000 + +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, + qdf_dma_addr_t bmi_cmd_da, + qdf_dma_addr_t bmi_rsp_da, + uint8_t *bmi_request, + uint32_t request_length, + uint8_t *bmi_response, + uint32_t *bmi_response_lengthp, + uint32_t TimeoutMS) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct HIF_CE_pipe_info *send_pipe_info = + &(hif_state->pipe_info[BMI_CE_NUM_TO_TARG]); + struct CE_handle *ce_send_hdl = send_pipe_info->ce_hdl; + qdf_dma_addr_t CE_request, CE_response = 0; + struct BMI_transaction *transaction = NULL; + int status = QDF_STATUS_SUCCESS; + struct HIF_CE_pipe_info *recv_pipe_info = + &(hif_state->pipe_info[BMI_CE_NUM_TO_HOST]); + struct CE_handle *ce_recv = recv_pipe_info->ce_hdl; + unsigned int mux_id = 0; + unsigned int transaction_id = 0xffff; + unsigned int user_flags = 0; +#ifdef BMI_RSP_POLLING + qdf_dma_addr_t buf; + unsigned int completed_nbytes, id, flags; + int i; +#endif + + transaction = + (struct BMI_transaction *)qdf_mem_malloc(sizeof(*transaction)); + if (unlikely(!transaction)) + return QDF_STATUS_E_NOMEM; + + transaction_id = (mux_id & MUX_ID_MASK) | + (transaction_id & TRANSACTION_ID_MASK); +#ifdef QCA_WIFI_3_0 + user_flags &= DESC_DATA_FLAG_MASK; +#endif + A_TARGET_ACCESS_LIKELY(scn); + + /* Initialize bmi_transaction_sem to block */ + qdf_semaphore_init(&transaction->bmi_transaction_sem); + qdf_semaphore_acquire(&transaction->bmi_transaction_sem); + + transaction->hif_state = hif_state; + transaction->bmi_request_host = bmi_request; + transaction->bmi_request_length = request_length; + transaction->bmi_response_length = 0; + transaction->bmi_timeout_ms = TimeoutMS; + transaction->bmi_transaction_flags = 0; + + /* + * CE_request = dma_map_single(dev, + * (void *)bmi_request, request_length, DMA_TO_DEVICE); + */ + CE_request = bmi_cmd_da; + transaction->bmi_request_CE = CE_request; + + if (bmi_response) { + + /* + * CE_response = dma_map_single(dev, bmi_response, + * BMI_DATASZ_MAX, DMA_FROM_DEVICE); + */ + CE_response = bmi_rsp_da; + transaction->bmi_response_host = bmi_response; + transaction->bmi_response_CE = CE_response; + /* dma_cache_sync(dev, bmi_response, + * BMI_DATASZ_MAX, DMA_FROM_DEVICE); + */ + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, + CE_response, + BMI_DATASZ_MAX, + DMA_FROM_DEVICE); + ce_recv_buf_enqueue(ce_recv, transaction, + transaction->bmi_response_CE); + /* NB: see HIF_BMI_recv_done */ + } else { + transaction->bmi_response_host = NULL; + transaction->bmi_response_CE = 0; + } + + /* dma_cache_sync(dev, bmi_request, request_length, DMA_TO_DEVICE); */ + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_request, + request_length, DMA_TO_DEVICE); + + status = + ce_send(ce_send_hdl, transaction, + CE_request, request_length, + transaction_id, 0, user_flags); + ASSERT(status == QDF_STATUS_SUCCESS); + /* NB: see hif_bmi_send_done */ + + /* TBDXXX: handle timeout */ + + /* Wait for BMI request/response transaction to complete */ + /* Always just wait for BMI request here if + * BMI_RSP_POLLING is defined + */ + if (qdf_semaphore_acquire_timeout + (&transaction->bmi_transaction_sem, + HIF_EXCHANGE_BMI_MSG_TIMEOUT)) { + HIF_ERROR("%s: Fatal error, BMI transaction timeout. Please check the HW interface!!", + __func__); + qdf_mem_free(transaction); + return QDF_STATUS_E_TIMEOUT; + } + + if (bmi_response) { +#ifdef BMI_RSP_POLLING + /* Fix EV118783, do not wait a semaphore for the BMI response + * since the relative interruption may be lost. + * poll the BMI response instead. + */ + i = 0; + while (ce_completed_recv_next( + ce_recv, NULL, NULL, &buf, + &completed_nbytes, &id, + &flags) != QDF_STATUS_SUCCESS) { + if (i++ > BMI_RSP_TO_MILLISEC) { + HIF_ERROR("%s:error, can't get bmi response", + __func__); + status = QDF_STATUS_E_BUSY; + break; + } + OS_DELAY(1000); + } + + if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) + *bmi_response_lengthp = completed_nbytes; +#else + if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) { + *bmi_response_lengthp = + transaction->bmi_response_length; + } +#endif + + } + + /* dma_unmap_single(dev, transaction->bmi_request_CE, + * request_length, DMA_TO_DEVICE); + * bus_unmap_single(scn->sc_osdev, + * transaction->bmi_request_CE, + * request_length, BUS_DMA_TODEVICE); + */ + + if (status != QDF_STATUS_SUCCESS) { + qdf_dma_addr_t unused_buffer; + unsigned int unused_nbytes; + unsigned int unused_id; + unsigned int toeplitz_hash_result; + + ce_cancel_send_next(ce_send_hdl, + NULL, NULL, &unused_buffer, + &unused_nbytes, &unused_id, + &toeplitz_hash_result); + } + + A_TARGET_ACCESS_UNLIKELY(scn); + qdf_mem_free(transaction); + return status; +} +qdf_export_symbol(hif_exchange_bmi_msg); + +#ifdef BMI_RSP_POLLING +#define BMI_RSP_CB_REGISTER 0 +#else +#define BMI_RSP_CB_REGISTER 1 +#endif + +/** + * hif_register_bmi_callbacks() - register bmi callbacks + * @hif_sc: hif context + * + * Bmi phase uses different copy complete callbacks than mission mode. + */ +void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_pipe_info *pipe_info; + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + /* + * Initially, establish CE completion handlers for use with BMI. + * These are overwritten with generic handlers after we exit BMI phase. + */ + pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG]; + ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0); + + if (BMI_RSP_CB_REGISTER) { + pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST]; + ce_recv_cb_register( + pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.h new file mode 100644 index 0000000000000000000000000000000000000000..7d330cb1a2687289e8d7f75f45aee9b2c81a1e50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_BMI_H__ +#define __CE_BMI_H__ + +#include /* qdf_atomic_read */ +#include "qdf_lock.h" +#include "ce_api.h" +#include "cepci.h" + +void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int flags); +void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int sw_index, + unsigned int hw_index, uint32_t toeplitz_hash_result); +#endif /* __CE_BMI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_diag.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_diag.c new file mode 100644 index 0000000000000000000000000000000000000000..1fe51dbc4f05d8be2aab0a85be00a98e4dab714b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_diag.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "target_type.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include "regtable.h" +#include +#include "hif_main.h" +#include "ce_api.h" +#include "qdf_trace.h" +#include "hif_debug.h" +#include "qdf_module.h" + +void +hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, + uint32_t address, uint32_t size) +{ + uint32_t loc = address; + uint32_t val = 0; + uint32_t j = 0; + u8 *temp = ramdump_base; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + while (j < size) { + val = hif_read32_mb(scn, scn->mem + loc + j); + qdf_mem_copy(temp, &val, 4); + j += 4; + temp += 4; + } + + Q_TARGET_ACCESS_END(scn); +} +/* + * TBDXXX: Should be a function call specific to each Target-type. + * This convoluted macro converts from Target CPU Virtual Address + * Space to CE Address Space. As part of this process, we + * conservatively fetch the current PCIE_BAR. MOST of the time, + * this should match the upper bits of PCI space for this device; + * but that's not guaranteed. + */ +#ifdef QCA_WIFI_3_0 +#define TARG_CPU_SPACE_TO_CE_SPACE(sc, pci_addr, addr) \ + (scn->mem_pa + addr) +#else +#define TARG_CPU_SPACE_TO_CE_SPACE(sc, pci_addr, addr) \ + (((hif_read32_mb(sc, (pci_addr) + \ + (SOC_CORE_BASE_ADDRESS|CORE_CTRL_ADDRESS)) & 0x7ff) << 21) \ + | 0x100000 | ((addr) & 0xfffff)) +#endif + +#define TARG_CPU_SPACE_TO_CE_SPACE_IPQ4019(scn, pci_addr, addr) \ + (hif_read32_mb(scn, (pci_addr) + (WIFICMN_PCIE_BAR_REG_ADDRESS)) \ + | ((addr) & 0xfffff)) + +#define TARG_CPU_SPACE_TO_CE_SPACE_AR900B(scn, pci_addr, addr) \ + (hif_read32_mb(scn, (pci_addr) + (WIFICMN_PCIE_BAR_REG_ADDRESS)) \ + | 0x100000 | ((addr) & 0xfffff)) + +#define SRAM_BASE_ADDRESS 0xc0000 +#define SRAM_END_ADDRESS 0x100000 +#define WIFI0_IPQ4019_BAR 0xa000000 +#define WIFI1_IPQ4019_BAR 0xa800000 + +/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ +#define DIAG_ACCESS_CE_TIMEOUT_MS 10 + +/** + * get_ce_phy_addr() - get the physical address of an soc virtual address + * @sc: hif context + * @address: soc virtual address + * @target_type: target type being used. + * + * Return: soc physical address + */ +static qdf_dma_addr_t get_ce_phy_addr(struct hif_softc *sc, uint32_t address, + unsigned int target_type) +{ + qdf_dma_addr_t ce_phy_addr; + struct hif_softc *scn = sc; + unsigned int region = address & 0xfffff; + unsigned int bar = address & 0xfff00000; + unsigned int sramregion = 0; + + if ((target_type == TARGET_TYPE_IPQ4019) && + (region >= SRAM_BASE_ADDRESS && region <= SRAM_END_ADDRESS) + && (bar == WIFI0_IPQ4019_BAR || + bar == WIFI1_IPQ4019_BAR || bar == 0)) { + sramregion = 1; + } + + if ((target_type == TARGET_TYPE_IPQ4019) && sramregion == 1) { + ce_phy_addr = TARG_CPU_SPACE_TO_CE_SPACE_IPQ4019(sc, sc->mem, + address); + } else if ((target_type == TARGET_TYPE_AR900B) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_QCA9888)) { + ce_phy_addr = + TARG_CPU_SPACE_TO_CE_SPACE_AR900B(sc, sc->mem, address); + } else { + ce_phy_addr = + TARG_CPU_SPACE_TO_CE_SPACE(sc, sc->mem, address); + } + + return ce_phy_addr; +} + +/* + * Diagnostic read/write access is provided for startup/config/debug usage. + * Caller must guarantee proper alignment, when applicable, and single user + * at any moment. + */ + +#define FW_SRAM_ADDRESS 0x000C0000 + +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint8_t *data, int nbytes) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_dma_addr_t buf; + unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int id; + unsigned int flags; + struct CE_handle *ce_diag; + qdf_dma_addr_t CE_data; /* Host buffer address in CE space */ + qdf_dma_addr_t CE_data_base = 0; + void *data_buf = NULL; + int i; + unsigned int mux_id = 0; + unsigned int transaction_id = 0xffff; + qdf_dma_addr_t ce_phy_addr = address; + unsigned int toeplitz_hash_result; + unsigned int user_flags = 0; + unsigned int target_type = 0; + unsigned int boundary_addr = 0; + + ce_diag = hif_state->ce_diag; + if (!ce_diag) { + HIF_ERROR("%s: DIAG CE not present", __func__); + return QDF_STATUS_E_INVAL; + } + /* not supporting diag ce on srng based systems, therefore we know this + * isn't an srng based system */ + + transaction_id = (mux_id & MUX_ID_MASK) | + (transaction_id & TRANSACTION_ID_MASK); +#ifdef QCA_WIFI_3_0 + user_flags &= DESC_DATA_FLAG_MASK; +#endif + target_type = (hif_get_target_info_handle(hif_ctx))->target_type; + + /* This code cannot handle reads to non-memory space. Redirect to the + * register read fn but preserve the multi word read capability of + * this fn + */ + if ((target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_AR900B) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_AR9888) || + (target_type == TARGET_TYPE_QCA9888)) + boundary_addr = FW_SRAM_ADDRESS; + else + boundary_addr = DRAM_BASE_ADDRESS; + + if (address < boundary_addr) { + + if ((address & 0x3) || ((uintptr_t) data & 0x3)) + return QDF_STATUS_E_INVAL; + + while ((nbytes >= 4) && + (QDF_STATUS_SUCCESS == (status = + hif_diag_read_access(hif_ctx, address, + (uint32_t *)data)))) { + + nbytes -= sizeof(uint32_t); + address += sizeof(uint32_t); + data += sizeof(uint32_t); + + } + + return status; + } + + A_TARGET_ACCESS_LIKELY(scn); + + /* + * Allocate a temporary bounce buffer to hold caller's data + * to be DMA'ed from Target. This guarantees + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ + orig_nbytes = nbytes; + data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, &CE_data_base); + if (!data_buf) { + status = QDF_STATUS_E_NOMEM; + goto done; + } + qdf_mem_zero(data_buf, orig_nbytes); + + remaining_bytes = orig_nbytes; + CE_data = CE_data_base; + while (remaining_bytes) { + nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT); + { + status = ce_recv_buf_enqueue(ce_diag, NULL, CE_data); + if (status != QDF_STATUS_SUCCESS) + goto done; + } + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + /* convert soc virtual address to physical address */ + ce_phy_addr = get_ce_phy_addr(scn, address, target_type); + + if (Q_TARGET_ACCESS_END(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + /* Request CE to send from Target(!) + * address to Host buffer + */ + status = ce_send(ce_diag, NULL, ce_phy_addr, nbytes, + transaction_id, 0, user_flags); + if (status != QDF_STATUS_SUCCESS) + goto done; + + i = 0; + while (ce_completed_send_next(ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, NULL, NULL, + &toeplitz_hash_result) != QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + if (buf != ce_phy_addr) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + i = 0; + while (ce_completed_recv_next + (ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, + &flags) != QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + if (buf != CE_data) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + remaining_bytes -= nbytes; + address += nbytes; + CE_data += nbytes; + } + +done: + A_TARGET_ACCESS_UNLIKELY(scn); + + if (status == QDF_STATUS_SUCCESS) + qdf_mem_copy(data, data_buf, orig_nbytes); + else + HIF_ERROR("%s failure (0x%x)", __func__, address); + + if (data_buf) + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, data_buf, CE_data_base, 0); + + return status; +} +qdf_export_symbol(hif_diag_read_mem); + +/* Read 4-byte aligned data from Target memory or register */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t *data) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (address >= DRAM_BASE_ADDRESS) { + /* Assume range doesn't cross this boundary */ + return hif_diag_read_mem(hif_ctx, address, (uint8_t *) data, + sizeof(uint32_t)); + } else { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + *data = A_TARGET_READ(scn, address); + if (Q_TARGET_ACCESS_END(scn) < 0) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; + } +} + +/** + * hif_diag_write_mem() - write data into the soc memory + * @hif_ctx: hif context + * @address: soc virtual address + * @data: data to copy into the soc address + * @nbytes: number of bytes to coppy + */ +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint8_t *data, int nbytes) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_dma_addr_t buf; + unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int id; + unsigned int flags; + struct CE_handle *ce_diag; + void *data_buf = NULL; + qdf_dma_addr_t CE_data; /* Host buffer address in CE space */ + qdf_dma_addr_t CE_data_base = 0; + int i; + unsigned int mux_id = 0; + unsigned int transaction_id = 0xffff; + qdf_dma_addr_t ce_phy_addr = address; + unsigned int toeplitz_hash_result; + unsigned int user_flags = 0; + unsigned int target_type = 0; + + ce_diag = hif_state->ce_diag; + if (!ce_diag) { + HIF_ERROR("%s: DIAG CE not present", __func__); + return QDF_STATUS_E_INVAL; + } + /* not supporting diag ce on srng based systems, therefore we know this + * isn't an srng based system */ + + transaction_id = (mux_id & MUX_ID_MASK) | + (transaction_id & TRANSACTION_ID_MASK); +#ifdef QCA_WIFI_3_0 + user_flags &= DESC_DATA_FLAG_MASK; +#endif + + A_TARGET_ACCESS_LIKELY(scn); + + /* + * Allocate a temporary bounce buffer to hold caller's data + * to be DMA'ed to Target. This guarantees + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ + orig_nbytes = nbytes; + data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, &CE_data_base); + if (!data_buf) { + status = QDF_STATUS_E_NOMEM; + goto done; + } + + /* Copy caller's data to allocated DMA buf */ + qdf_mem_copy(data_buf, data, orig_nbytes); + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base, + orig_nbytes, DMA_TO_DEVICE); + + target_type = (hif_get_target_info_handle(hif_ctx))->target_type; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + /* convert soc virtual address to physical address */ + ce_phy_addr = get_ce_phy_addr(scn, address, target_type); + + if (Q_TARGET_ACCESS_END(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + remaining_bytes = orig_nbytes; + CE_data = CE_data_base; + while (remaining_bytes) { + nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT); + + /* Set up to receive directly into Target(!) address */ + status = ce_recv_buf_enqueue(ce_diag, NULL, ce_phy_addr); + if (status != QDF_STATUS_SUCCESS) + goto done; + + /* + * Request CE to send caller-supplied data that + * was copied to bounce buffer to Target(!) address. + */ + status = ce_send(ce_diag, NULL, (qdf_dma_addr_t) CE_data, + nbytes, transaction_id, 0, user_flags); + + if (status != QDF_STATUS_SUCCESS) + goto done; + + /* poll for transfer complete */ + i = 0; + while (ce_completed_send_next(ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, + NULL, NULL, &toeplitz_hash_result) != + QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + if (buf != CE_data) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + i = 0; + while (ce_completed_recv_next + (ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, + &flags) != QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + if (buf != ce_phy_addr) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + remaining_bytes -= nbytes; + address += nbytes; + CE_data += nbytes; + } + +done: + A_TARGET_ACCESS_UNLIKELY(scn); + + if (data_buf) { + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, data_buf, CE_data_base, 0); + } + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s failure (0x%llx)", __func__, + (uint64_t)ce_phy_addr); + } + + return status; +} + +/* Write 4B data to Target memory or register */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t data) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (address >= DRAM_BASE_ADDRESS) { + /* Assume range doesn't cross this boundary */ + uint32_t data_buf = data; + + return hif_diag_write_mem(hif_ctx, address, + (uint8_t *) &data_buf, + sizeof(uint32_t)); + } else { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + A_TARGET_WRITE(scn, address, data); + if (Q_TARGET_ACCESS_END(scn) < 0) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..94b148746e26962c854d3960ad7a5eee4e781ed9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h @@ -0,0 +1,742 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __COPY_ENGINE_INTERNAL_H__ +#define __COPY_ENGINE_INTERNAL_H__ + +#include /* A_TARGET_WRITE */ + +/* Copy Engine operational state */ +enum CE_op_state { + CE_UNUSED, + CE_PAUSED, + CE_RUNNING, + CE_PENDING, +}; + +enum ol_ath_hif_ce_ecodes { + CE_RING_DELTA_FAIL = 0 +}; + +struct CE_src_desc; + +/* Copy Engine Ring internal state */ +struct CE_ring_state { + + /* Number of entries in this ring; must be power of 2 */ + unsigned int nentries; + unsigned int nentries_mask; + + /* + * For dest ring, this is the next index to be processed + * by software after it was/is received into. + * + * For src ring, this is the last descriptor that was sent + * and completion processed by software. + * + * Regardless of src or dest ring, this is an invariant + * (modulo ring size): + * write index >= read index >= sw_index + */ + unsigned int sw_index; + unsigned int write_index; /* cached copy */ + /* + * For src ring, this is the next index not yet processed by HW. + * This is a cached copy of the real HW index (read index), used + * for avoiding reading the HW index register more often than + * necessary. + * This extends the invariant: + * write index >= read index >= hw_index >= sw_index + * + * For dest ring, this is currently unused. + */ + unsigned int hw_index; /* cached copy */ + + /* Start of DMA-coherent area reserved for descriptors */ + void *base_addr_owner_space_unaligned; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */ + + /* + * Actual start of descriptors. + * Aligned to descriptor-size boundary. + * Points into reserved DMA-coherent area, above. + */ + void *base_addr_owner_space; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space; /* CE address space */ + /* + * Start of shadow copy of descriptors, within regular memory. + * Aligned to descriptor-size boundary. + */ + char *shadow_base_unaligned; + struct CE_src_desc *shadow_base; + + unsigned int low_water_mark_nentries; + unsigned int high_water_mark_nentries; + void *srng_ctx; + void **per_transfer_context; + + /* HAL CE ring type */ + uint32_t hal_ring_type; + /* ring memory prealloc */ + uint8_t is_ring_prealloc; + + OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */ +}; + +/* Copy Engine internal state */ +struct CE_state { + struct hif_softc *scn; + unsigned int id; + unsigned int attr_flags; /* CE_ATTR_* */ + uint32_t ctrl_addr; /* relative to BAR */ + enum CE_op_state state; + +#ifdef WLAN_FEATURE_FASTPATH + fastpath_msg_handler fastpath_handler; + void *context; +#endif /* WLAN_FEATURE_FASTPATH */ + qdf_work_t oom_allocation_work; + + ce_send_cb send_cb; + void *send_context; + + CE_recv_cb recv_cb; + void *recv_context; + + /* misc_cbs - are any callbacks besides send and recv enabled? */ + uint8_t misc_cbs; + + CE_watermark_cb watermark_cb; + void *wm_context; + + /*Record the state of the copy compl interrupt */ + int disable_copy_compl_intr; + + unsigned int src_sz_max; + struct CE_ring_state *src_ring; + struct CE_ring_state *dest_ring; + struct CE_ring_state *status_ring; + atomic_t rx_pending; + + qdf_spinlock_t ce_index_lock; + /* Flag to indicate whether to break out the DPC context */ + bool force_break; + + /* time in nanoseconds to yield control of napi poll */ + unsigned long long ce_service_yield_time; + /* CE service start time in nanoseconds */ + unsigned long long ce_service_start_time; + /* Num Of Receive Buffers handled for one interrupt DPC routine */ + unsigned int receive_count; + /* epping */ + bool timer_inited; + qdf_timer_t poll_timer; + + /* datapath - for faster access, use bools instead of a bitmap */ + bool htt_tx_data; + bool htt_rx_data; + qdf_lro_ctx_t lro_data; + + void (*service)(struct hif_softc *scn, int CE_id); +}; + +/* Descriptor rings must be aligned to this boundary */ +#define CE_DESC_RING_ALIGN 8 +#define CLOCK_OVERRIDE 0x2 + +#ifdef QCA_WIFI_3_0 +#define HIF_CE_DESC_ADDR_TO_DMA(desc) \ + (qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \ + ((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32))) +#else +#define HIF_CE_DESC_ADDR_TO_DMA(desc) \ + (qdf_dma_addr_t)((desc)->buffer_addr) +#endif + +#ifdef QCA_WIFI_3_0 +struct CE_src_desc { + uint32_t buffer_addr:32; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t gather:1, + enable_11h:1, + meta_data_low:2, /* fw_metadata_low */ + packet_result_offset:12, + toeplitz_hash_enable:1, + addr_y_search_disable:1, + addr_x_search_disable:1, + misc_int_disable:1, + target_int_disable:1, + host_int_disable:1, + dest_byte_swap:1, + byte_swap:1, + type:2, + tx_classify:1, + buffer_addr_hi:5; + uint32_t meta_data:16, /* fw_metadata_high */ + nbytes:16; /* length in register map */ +#else + uint32_t buffer_addr_hi:5, + tx_classify:1, + type:2, + byte_swap:1, /* src_byte_swap */ + dest_byte_swap:1, + host_int_disable:1, + target_int_disable:1, + misc_int_disable:1, + addr_x_search_disable:1, + addr_y_search_disable:1, + toeplitz_hash_enable:1, + packet_result_offset:12, + meta_data_low:2, /* fw_metadata_low */ + enable_11h:1, + gather:1; + uint32_t nbytes:16, /* length in register map */ + meta_data:16; /* fw_metadata_high */ +#endif + uint32_t toeplitz_hash_result:32; +}; + +struct CE_dest_desc { + uint32_t buffer_addr:32; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t gather:1, + enable_11h:1, + meta_data_low:2, /* fw_metadata_low */ + packet_result_offset:12, + toeplitz_hash_enable:1, + addr_y_search_disable:1, + addr_x_search_disable:1, + misc_int_disable:1, + target_int_disable:1, + host_int_disable:1, + byte_swap:1, + src_byte_swap:1, + type:2, + tx_classify:1, + buffer_addr_hi:5; + uint32_t meta_data:16, /* fw_metadata_high */ + nbytes:16; /* length in register map */ +#else + uint32_t buffer_addr_hi:5, + tx_classify:1, + type:2, + src_byte_swap:1, + byte_swap:1, /* dest_byte_swap */ + host_int_disable:1, + target_int_disable:1, + misc_int_disable:1, + addr_x_search_disable:1, + addr_y_search_disable:1, + toeplitz_hash_enable:1, + packet_result_offset:12, + meta_data_low:2, /* fw_metadata_low */ + enable_11h:1, + gather:1; + uint32_t nbytes:16, /* length in register map */ + meta_data:16; /* fw_metadata_high */ +#endif + uint32_t toeplitz_hash_result:32; +}; +#else +struct CE_src_desc { + uint32_t buffer_addr; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t meta_data:12, + target_int_disable:1, + host_int_disable:1, + byte_swap:1, + gather:1, + nbytes:16; +#else + + uint32_t nbytes:16, + gather:1, + byte_swap:1, + host_int_disable:1, + target_int_disable:1, + meta_data:12; +#endif +}; + +struct CE_dest_desc { + uint32_t buffer_addr; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t meta_data:12, + target_int_disable:1, + host_int_disable:1, + byte_swap:1, + gather:1, + nbytes:16; +#else + uint32_t nbytes:16, + gather:1, + byte_swap:1, + host_int_disable:1, + target_int_disable:1, + meta_data:12; +#endif +}; +#endif /* QCA_WIFI_3_0 */ + +struct ce_srng_src_desc { + uint32_t buffer_addr_lo; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t nbytes:16, + rsvd:4, + gather:1, + dest_swap:1, + byte_swap:1, + toeplitz_hash_enable:1, + buffer_addr_hi:8; + uint32_t rsvd1:16, + meta_data:16; + uint32_t loop_count:4, + ring_id:8, + rsvd3:20; +#else + uint32_t buffer_addr_hi:8, + toeplitz_hash_enable:1, + byte_swap:1, + dest_swap:1, + gather:1, + rsvd:4, + nbytes:16; + uint32_t meta_data:16, + rsvd1:16; + uint32_t rsvd3:20, + ring_id:8, + loop_count:4; +#endif +}; +struct ce_srng_dest_desc { + uint32_t buffer_addr_lo; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t loop_count:4, + ring_id:8, + rsvd1:12, + buffer_addr_hi:8; +#else + uint32_t buffer_addr_hi:8, + rsvd1:12, + ring_id:8, + loop_count:4; +#endif +}; +struct ce_srng_dest_status_desc { +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t nbytes:16, + rsvd:4, + gather:1, + dest_swap:1, + byte_swap:1, + toeplitz_hash_enable:1, + rsvd0:8; + uint32_t rsvd1:16, + meta_data:16; +#else + uint32_t rsvd0:8, + toeplitz_hash_enable:1, + byte_swap:1, + dest_swap:1, + gather:1, + rsvd:4, + nbytes:16; + uint32_t meta_data:16, + rsvd1:16; +#endif + uint32_t toeplitz_hash; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t loop_count:4, + ring_id:8, + rsvd3:20; +#else + uint32_t rsvd3:20, + ring_id:8, + loop_count:4; +#endif +}; + +#define CE_SENDLIST_ITEMS_MAX 12 + +/** + * union ce_desc - unified data type for ce descriptors + * + * Both src and destination descriptors follow the same format. + * They use different data structures for different access symantics. + * Here we provice a unifying data type. + */ +union ce_desc { + struct CE_src_desc src_desc; + struct CE_dest_desc dest_desc; +}; + +/** + * union ce_srng_desc - unified data type for ce srng descriptors + * @src_desc: ce srng Source ring descriptor + * @dest_desc: ce srng destination ring descriptor + * @dest_status_desc: ce srng status ring descriptor + */ +union ce_srng_desc { + struct ce_srng_src_desc src_desc; + struct ce_srng_dest_desc dest_desc; + struct ce_srng_dest_status_desc dest_status_desc; +}; + +/** + * enum hif_ce_event_type - HIF copy engine event type + * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring. + * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring. + * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update) + * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring. + * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write + * index in a normal tx + * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring. + * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index + * of the RX ring in fastpath + * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software + * index of the RX ring in fastpath + * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index + * of the TX ring in fastpath + * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to + * the write index in fastpath + * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software + * index of the RX ring in fastpath + * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh + * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet + * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet + * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule + * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh + * @HIF_CE_REAP_EXIT: records when we process completion outside of a bh + * @NAPI_SCHEDULE: records when napi is scheduled from the irq context + * @NAPI_POLL_ENTER: records the start of the napi poll function + * @NAPI_COMPLETE: records when interrupts are reenabled + * @NAPI_POLL_EXIT: records when the napi poll function returns + * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate + * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails + * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails + * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring + * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring + * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring + * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped + * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation + * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map + * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map + */ +enum hif_ce_event_type { + HIF_RX_DESC_POST, + HIF_RX_DESC_COMPLETION, + HIF_TX_GATHER_DESC_POST, + HIF_TX_DESC_POST, + HIF_TX_DESC_SOFTWARE_POST, + HIF_TX_DESC_COMPLETION, + FAST_RX_WRITE_INDEX_UPDATE, + FAST_RX_SOFTWARE_INDEX_UPDATE, + FAST_TX_WRITE_INDEX_UPDATE, + FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE, + FAST_TX_SOFTWARE_INDEX_UPDATE, + RESUME_WRITE_INDEX_UPDATE, + + HIF_IRQ_EVENT = 0x10, + HIF_CE_TASKLET_ENTRY, + HIF_CE_TASKLET_RESCHEDULE, + HIF_CE_TASKLET_EXIT, + HIF_CE_REAP_ENTRY, + HIF_CE_REAP_EXIT, + NAPI_SCHEDULE, + NAPI_POLL_ENTER, + NAPI_COMPLETE, + NAPI_POLL_EXIT, + + HIF_RX_NBUF_ALLOC_FAILURE = 0x20, + HIF_RX_NBUF_MAP_FAILURE, + HIF_RX_NBUF_ENQUEUE_FAILURE, + + HIF_CE_SRC_RING_BUFFER_POST, + HIF_CE_DEST_RING_BUFFER_POST, + HIF_CE_DEST_RING_BUFFER_REAP, + HIF_CE_DEST_STATUS_RING_REAP, + + HIF_RX_DESC_PRE_NBUF_ALLOC, + HIF_RX_DESC_PRE_NBUF_MAP, + HIF_RX_DESC_POST_NBUF_MAP, + + HIF_EVENT_TYPE_MAX, +}; + +void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size); +void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id); +void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_desc *descriptor, void *memory, + int index, int len); + +enum ce_sendlist_type_e { + CE_SIMPLE_BUFFER_TYPE, + /* TBDXXX: CE_RX_DESC_LIST, */ +}; + +/* + * There's a public "ce_sendlist" and a private "ce_sendlist_s". + * The former is an opaque structure with sufficient space + * to hold the latter. The latter is the actual structure + * definition and it is only used internally. The opaque version + * of the structure allows callers to allocate an instance on the + * run-time stack without knowing any of the details of the + * structure layout. + */ +struct ce_sendlist_s { + unsigned int num_items; + struct ce_sendlist_item { + enum ce_sendlist_type_e send_type; + dma_addr_t data; /* e.g. buffer or desc list */ + union { + unsigned int nbytes; /* simple buffer */ + unsigned int ndesc; /* Rx descriptor list */ + } u; + /* flags: externally-specified flags; + * OR-ed with internal flags + */ + uint32_t flags; + uint32_t user_flags; + } item[CE_SENDLIST_ITEMS_MAX]; +}; + +bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state + *ce_state); + +#ifdef WLAN_FEATURE_FASTPATH +void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl); +void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl); +#else +static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) +{ +} + +static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) +{ +} +#endif + +/* which ring of a CE? */ +#define CE_RING_SRC 0 +#define CE_RING_DEST 1 +#define CE_RING_STATUS 2 + +#define CDC_WAR_MAGIC_STR 0xceef0000 +#define CDC_WAR_DATA_CE 4 + +/* Additional internal-only ce_send flags */ +#define CE_SEND_FLAG_GATHER 0x00010000 /* Use Gather */ + +/** + * hif_get_wake_ce_id() - gets the copy engine id used for waking up + * @scn: The hif context to use + * @ce_id: a pointer where the copy engine Id should be populated + * + * Return: errno + */ +int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id); + +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) + +#ifndef HIF_CE_HISTORY_MAX +#define HIF_CE_HISTORY_MAX 1024 +#endif + +#define CE_DEBUG_MAX_DATA_BUF_SIZE 64 + +/** + * struct hif_ce_desc_event - structure for detailing a ce event + * @index: location of the descriptor in the ce ring; + * @type: what the event was + * @time: when it happened + * @current_hp: holds the current ring hp value + * @current_tp: holds the current ring tp value + * @descriptor: descriptor enqueued or dequeued + * @memory: virtual address that was used + * @dma_addr: physical/iova address based on smmu status + * @dma_to_phy: physical address from iova address + * @virt_to_phy: physical address from virtual address + * @actual_data_len: length of the data + * @data: data pointed by descriptor + */ +struct hif_ce_desc_event { + int index; + enum hif_ce_event_type type; + uint64_t time; + int cpu_id; +#ifdef HELIUMPLUS + union ce_desc descriptor; +#else + uint32_t current_hp; + uint32_t current_tp; + union ce_srng_desc descriptor; +#endif + void *memory; + +#ifdef HIF_RECORD_PADDR + /* iova/pa based on smmu status */ + qdf_dma_addr_t dma_addr; + /* store pa from iova address */ + qdf_dma_addr_t dma_to_phy; + /* store pa */ + qdf_dma_addr_t virt_to_phy; +#endif /* HIF_RECORD_ADDR */ + +#ifdef HIF_CE_DEBUG_DATA_BUF + size_t actual_data_len; + uint8_t *data; +#endif /* HIF_CE_DEBUG_DATA_BUF */ +}; +#else +struct hif_ce_desc_event; +#endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/ + +/** + * get_next_record_index() - get the next record index + * @table_index: atomic index variable to increment + * @array_size: array size of the circular buffer + * + * Increment the atomic index and reserve the value. + * Takes care of buffer wrap. + * Guaranteed to be thread safe as long as fewer than array_size contexts + * try to access the array. If there are more than array_size contexts + * trying to access the array, full locking of the recording process would + * be needed to have sane logging. + */ +int get_next_record_index(qdf_atomic_t *table_index, int array_size); + +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +/** + * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor + * @scn: structure detailing a ce event + * @ce_id: length of the data + * @type: event_type + * @descriptor: ce src/dest/status ring descriptor + * @memory: nbuf + * @index: current sw/write index + * @len: len of the buffer + * @hal_ring: ce hw ring + * + * Return: None + */ +void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_srng_desc *descriptor, + void *memory, int index, + int len, void *hal_ring); + +/** + * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event + * upto data field before reusing it. + * + * @event: record every CE event + * + * Return: None + */ +void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event); +#else +static inline +void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_srng_desc *descriptor, + void *memory, int index, + int len, void *hal_ring) +{ +} + +static inline +void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) +{ +} +#endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */ + +#ifdef HIF_CE_DEBUG_DATA_BUF +/** + * hif_ce_desc_data_record() - Record data pointed by the CE descriptor + * @event: structure detailing a ce event + * @len: length of the data + * Return: + */ +void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len); + +QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id); +void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id); +#else +static inline +QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { } + +static inline +void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) +{ +} +#endif /*HIF_CE_DEBUG_DATA_BUF*/ + +#ifdef HIF_CONFIG_SLUB_DEBUG_ON +/** + * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors + * @nbytes: nbytes value being written into a send descriptor + * @ce_state: context of the copy engine + + * nbytes should be non-zero and less than max configured for the copy engine + * + * Return: none + */ +static inline void ce_validate_nbytes(uint32_t nbytes, + struct CE_state *ce_state) +{ + if (nbytes <= 0 || nbytes > ce_state->src_sz_max) + QDF_BUG(0); +} +#else +static inline void ce_validate_nbytes(uint32_t nbytes, + struct CE_state *ce_state) +{ +} +#endif /* HIF_CONFIG_SLUB_DEBUG_ON */ + +#if defined(HIF_RECORD_PADDR) +/** + * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU + * IOVA addr and MMU virtual addr for Rx + * @scn: hif_softc + * @nbuf: buffer posted to fw + * + * record physical address for ce_event_type HIF_RX_DESC_POST and + * HIF_RX_DESC_COMPLETION + * + * Return: none + */ +void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, + struct hif_ce_desc_event *event, + qdf_nbuf_t nbuf); +#else +static inline +void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, + struct hif_ce_desc_event *event, + qdf_nbuf_t nbuf) +{ +} +#endif /* HIF_RECORD_PADDR */ +#endif /* __COPY_ENGINE_INTERNAL_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c new file mode 100644 index 0000000000000000000000000000000000000000..ccf04ce52f13b66ed5e4e331135bd8d76d6ce90a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c @@ -0,0 +1,4304 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include +#include "regtable.h" +#define ATH_MODULE_NAME hif +#include +#include "hif_main.h" +#include "ce_api.h" +#include "qdf_trace.h" +#include "pld_common.h" +#include "hif_debug.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "ce_assignment.h" +#include "ce_tasklet.h" +#include "qdf_module.h" + +#define CE_POLL_TIMEOUT 10 /* ms */ + +#define AGC_DUMP 1 +#define CHANINFO_DUMP 2 +#define BB_WATCHDOG_DUMP 3 +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +#define PCIE_ACCESS_DUMP 4 +#endif +#include "mp_dev.h" +#ifdef HIF_CE_LOG_INFO +#include "qdf_hang_event_notifier.h" +#endif + +#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \ + defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG) +#define QCA_WIFI_SUPPORT_SRNG +#endif + +#ifdef QCA_WIFI_SUPPORT_SRNG +#include +#endif + +/* Forward references */ +QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); + +/* + * Fix EV118783, poll to check whether a BMI response comes + * other than waiting for the interruption which may be lost. + */ +/* #define BMI_RSP_POLLING */ +#define BMI_RSP_TO_MILLISEC 1000 + +#ifdef CONFIG_BYPASS_QMI +#define BYPASS_QMI 1 +#else +#define BYPASS_QMI 0 +#endif + +#ifdef ENABLE_10_4_FW_HDR +#if (ENABLE_10_4_FW_HDR == 1) +#define WDI_IPA_SERVICE_GROUP 5 +#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) +#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) +#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) +#endif /* ENABLE_10_4_FW_HDR == 1 */ +#endif /* ENABLE_10_4_FW_HDR */ + +QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); +static void hif_config_rri_on_ddr(struct hif_softc *scn); + +/** + * hif_target_access_log_dump() - dump access log + * + * dump access log + * + * Return: n/a + */ +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +static void hif_target_access_log_dump(void) +{ + hif_target_dump_access_log(); +} +#endif + + +void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, + uint8_t cmd_id, bool start) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + switch (cmd_id) { + case AGC_DUMP: + if (start) + priv_start_agc(scn); + else + priv_dump_agc(scn); + break; + case CHANINFO_DUMP: + if (start) + priv_start_cap_chaninfo(scn); + else + priv_dump_chaninfo(scn); + break; + case BB_WATCHDOG_DUMP: + priv_dump_bbwatchdog(scn); + break; +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG + case PCIE_ACCESS_DUMP: + hif_target_access_log_dump(); + break; +#endif + default: + HIF_ERROR("%s: Invalid htc dump command", __func__); + break; + } +} + +static void ce_poll_timeout(void *arg) +{ + struct CE_state *CE_state = (struct CE_state *)arg; + + if (CE_state->timer_inited) { + ce_per_engine_service(CE_state->scn, CE_state->id); + qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); + } +} + +static unsigned int roundup_pwr2(unsigned int n) +{ + int i; + unsigned int test_pwr2; + + if (!(n & (n - 1))) + return n; /* already a power of 2 */ + + test_pwr2 = 4; + for (i = 0; i < 29; i++) { + if (test_pwr2 > n) + return test_pwr2; + test_pwr2 = test_pwr2 << 1; + } + + QDF_ASSERT(0); /* n too large */ + return 0; +} + +#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C +#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 + +static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { + { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, +#ifdef QCA_WIFI_3_0_ADRASTEA + { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, +#endif +}; + +#ifdef QCN7605_SUPPORT +static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { + { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, +}; +#endif + +#ifdef WLAN_FEATURE_EPPING +static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { + { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, +}; +#endif + +/* CE_PCI TABLE */ +/* + * NOTE: the table below is out of date, though still a useful reference. + * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual + * mapping of HTC services to HIF pipes. + */ +/* + * This authoritative table defines Copy Engine configuration and the mapping + * of services/endpoints to CEs. A subset of this information is passed to + * the Target during startup as a prerequisite to entering BMI phase. + * See: + * target_service_to_ce_map - Target-side mapping + * hif_map_service_to_pipe - Host-side mapping + * target_ce_config - Target-side configuration + * host_ce_config - Host-side configuration + ============================================================================ + Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer + | | | ctio | Size | Frequency + | | | n | | + ============================================================================ + tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent + descriptor | | | | O(100B) | and regular + download | | | | | + ---------------------------------------------------------------------------- + rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and + indication | | | | O(10B) | regular + upload | | | | | + ---------------------------------------------------------------------------- + MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare + upload | | | | O(1000B) | (frequent + e.g. noise | | | | | during IP1.0 + packets | | | | | testing) + ---------------------------------------------------------------------------- + MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare + download | | | | O(1000B) | (frequent + e.g. | | | | | during IP1.0 + misdirecte | | | | | testing) + d EAPOL | | | | | + packets | | | | | + ---------------------------------------------------------------------------- + n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) + | DATA_VO (uplink) | | | | + ---------------------------------------------------------------------------- + n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) + | DATA_VO (downlink) | | | | + ---------------------------------------------------------------------------- + WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent + | | | | O(100B) | + ---------------------------------------------------------------------------- + WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent + messages | (downlink) | | | O(100B) | + | | | | | + ---------------------------------------------------------------------------- + n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) + | HTC_RAW_STREAMS | | | | + | (uplink) | | | | + ---------------------------------------------------------------------------- + n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) + | HTC_RAW_STREAMS | | | | + | (downlink) | | | | + ---------------------------------------------------------------------------- + diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window + | | | | | infrequent + ============================================================================ + */ + +/* + * Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + WMI_DATA_VO_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VO_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, /* could be moved to 3 (share with WMI) */ + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTT_DATA_MSG_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 4, + }, + { + HTT_DATA_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + WDI_IPA_TX_SVC, + PIPEDIR_OUT, /* in = DL = target -> host */ + 5, + }, +#if defined(QCA_WIFI_3_0_ADRASTEA) + { + HTT_DATA2_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 9, + }, + { + HTT_DATA3_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 10, + }, + { + PACKET_LOG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 11, + }, +#endif + /* (Additions here) */ + + { /* Must be last */ + 0, + 0, + 0, + }, +}; + +/* PIPEDIR_OUT = HOST to Target */ +/* PIPEDIR_IN = TARGET to HOST */ +#if (defined(QCA_WIFI_QCA8074)) +static struct service_to_pipe target_service_to_ce_map_qca8074[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, + { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca8074[] = { +}; +#endif + +#if (defined(QCA_WIFI_QCA8074V2)) +static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, + { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, + { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, + { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { +}; +#endif + +#if (defined(QCA_WIFI_QCA6018)) +static struct service_to_pipe target_service_to_ce_map_qca6018[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, + { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca6018[] = { +}; +#endif + +#if (defined(QCA_WIFI_QCN9000)) +static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, + { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { +}; +#endif + +/* PIPEDIR_OUT = HOST to Target */ +/* PIPEDIR_IN = TARGET to HOST */ +#ifdef QCN7605_SUPPORT +static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, +#ifdef IPA_OFFLOAD + { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, +#else + { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, +#endif + { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#endif + +#if (defined(QCA_WIFI_QCA6290)) +#ifdef QCA_6290_AP_MODE +static struct service_to_pipe target_service_to_ce_map_qca6290[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca6290[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#endif +#else +static struct service_to_pipe target_service_to_ce_map_qca6290[] = { +}; +#endif + +#if (defined(QCA_WIFI_QCA6390)) +static struct service_to_pipe target_service_to_ce_map_qca6390[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca6390[] = { +}; +#endif + +static struct service_to_pipe target_service_to_ce_map_qca6490[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; + +#if (defined(QCA_WIFI_QCA6750)) +static struct service_to_pipe target_service_to_ce_map_qca6750[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca6750[] = { +}; +#endif + +static struct service_to_pipe target_service_to_ce_map_ar900b[] = { + { + WMI_DATA_VO_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VO_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, /* could be moved to 3 (share with WMI) */ + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + HTT_DATA_MSG_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 4, + }, +#ifdef WLAN_FEATURE_FASTPATH + { + HTT_DATA_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 5, + }, +#else /* WLAN_FEATURE_FASTPATH */ + { + HTT_DATA_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, +#endif /* WLAN_FEATURE_FASTPATH */ + + /* (Additions here) */ + + { /* Must be last */ + 0, + 0, + 0, + }, +}; + +static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; +static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); + +#ifdef WLAN_FEATURE_EPPING +static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { + {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ + {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ + {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ + {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ + {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ + {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ + {0, 0, 0,}, /* Must be last */ +}; + +void hif_select_epping_service_to_pipe_map(struct service_to_pipe + **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ + *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_wlan_epping); +} +#endif + +#ifdef QCN7605_SUPPORT +static inline +void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ + *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; + *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); +} +#else +static inline +void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ + HIF_ERROR("%s: QCN7605 not supported", __func__); +} +#endif + +static void hif_select_service_to_pipe_map(struct hif_softc *scn, + struct service_to_pipe **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ + uint32_t mode = hif_get_conparam(scn); + struct hif_target_info *tgt_info = &scn->target_info; + + if (QDF_IS_EPPING_ENABLED(mode)) { + hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, + sz_tgt_svc_map_to_use); + } else { + switch (tgt_info->target_type) { + default: + *tgt_svc_map_to_use = target_service_to_ce_map_wlan; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_wlan); + break; + case TARGET_TYPE_QCN7605: + hif_select_ce_map_qcn7605(tgt_svc_map_to_use, + sz_tgt_svc_map_to_use); + break; + case TARGET_TYPE_AR900B: + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_IPQ4019: + case TARGET_TYPE_QCA9888: + case TARGET_TYPE_AR9888: + case TARGET_TYPE_AR9888V2: + *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_ar900b); + break; + case TARGET_TYPE_QCA6290: + *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca6290); + break; + case TARGET_TYPE_QCA6390: + *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca6390); + break; + case TARGET_TYPE_QCA6490: + *tgt_svc_map_to_use = target_service_to_ce_map_qca6490; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca6490); + break; + case TARGET_TYPE_QCA6750: + *tgt_svc_map_to_use = target_service_to_ce_map_qca6750; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca6750); + break; + case TARGET_TYPE_QCA8074: + *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca8074); + break; + case TARGET_TYPE_QCA8074V2: + *tgt_svc_map_to_use = + target_service_to_ce_map_qca8074_v2; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca8074_v2); + break; + case TARGET_TYPE_QCA6018: + *tgt_svc_map_to_use = + target_service_to_ce_map_qca6018; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca6018); + break; + case TARGET_TYPE_QCN9000: + *tgt_svc_map_to_use = + target_service_to_ce_map_qcn9000; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qcn9000); + break; + } + } +} + +/** + * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly + * @ce_state : pointer to the state context of the CE + * + * Description: + * Sets htt_rx_data attribute of the state structure if the + * CE serves one of the HTT DATA services. + * + * Return: + * false (attribute set to false) + * true (attribute set to true); + */ +static bool ce_mark_datapath(struct CE_state *ce_state) +{ + struct service_to_pipe *svc_map; + uint32_t map_sz, map_len; + int i; + bool rc = false; + + if (ce_state) { + hif_select_service_to_pipe_map(ce_state->scn, &svc_map, + &map_sz); + + map_len = map_sz / sizeof(struct service_to_pipe); + for (i = 0; i < map_len; i++) { + if ((svc_map[i].pipenum == ce_state->id) && + ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || + (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || + (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { + /* HTT CEs are unidirectional */ + if (svc_map[i].pipedir == PIPEDIR_IN) + ce_state->htt_rx_data = true; + else + ce_state->htt_tx_data = true; + rc = true; + } + } + } + return rc; +} + +/** + * ce_ring_test_initial_indexes() - tests the initial ce ring indexes + * @ce_id: ce in question + * @ring: ring state being examined + * @type: "src_ring" or "dest_ring" string for identifying the ring + * + * Warns on non-zero index values. + * Causes a kernel panic if the ring is not empty durring initialization. + */ +static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, + char *type) +{ + if (ring->write_index != 0 || ring->sw_index != 0) + HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", + ce_id, type, ring->sw_index, ring->write_index); + if (ring->write_index != ring->sw_index) + QDF_BUG(0); +} + +#ifdef IPA_OFFLOAD +/** + * ce_alloc_desc_ring() - Allocate copyengine descriptor ring + * @scn: softc instance + * @ce_id: ce in question + * @base_addr: pointer to copyengine ring base address + * @ce_ring: copyengine instance + * @nentries: number of entries should be allocated + * @desc_size: ce desc size + * + * Return: QDF_STATUS_SUCCESS - for success + */ +static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, + qdf_dma_addr_t *base_addr, + struct CE_ring_state *ce_ring, + unsigned int nentries, uint32_t desc_size) +{ + if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && + !ce_srng_based(scn)) { + if (!scn->ipa_ce_ring) { + scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( + scn->qdf_dev, + nentries * desc_size + CE_DESC_RING_ALIGN); + if (!scn->ipa_ce_ring) { + HIF_ERROR( + "%s: Failed to allocate memory for IPA ce ring", + __func__); + return QDF_STATUS_E_NOMEM; + } + } + *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, + &scn->ipa_ce_ring->mem_info); + ce_ring->base_addr_owner_space_unaligned = + scn->ipa_ce_ring->vaddr; + } else { + ce_ring->base_addr_owner_space_unaligned = + hif_mem_alloc_consistent_unaligned + (scn, + (nentries * desc_size + + CE_DESC_RING_ALIGN), + base_addr, + ce_ring->hal_ring_type, + &ce_ring->is_ring_prealloc); + + if (!ce_ring->base_addr_owner_space_unaligned) { + HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", + __func__, CE_id); + return QDF_STATUS_E_NOMEM; + } + } + return QDF_STATUS_SUCCESS; +} + +/** + * ce_free_desc_ring() - Frees copyengine descriptor ring + * @scn: softc instance + * @ce_id: ce in question + * @ce_ring: copyengine instance + * @desc_size: ce desc size + * + * Return: None + */ +static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, + struct CE_ring_state *ce_ring, uint32_t desc_size) +{ + if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && + !ce_srng_based(scn)) { + if (scn->ipa_ce_ring) { + qdf_mem_shared_mem_free(scn->qdf_dev, + scn->ipa_ce_ring); + scn->ipa_ce_ring = NULL; + } + ce_ring->base_addr_owner_space_unaligned = NULL; + } else { + hif_mem_free_consistent_unaligned + (scn, + ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_CE_space, 0, + ce_ring->is_ring_prealloc); + ce_ring->base_addr_owner_space_unaligned = NULL; + } +} +#else +static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, + qdf_dma_addr_t *base_addr, + struct CE_ring_state *ce_ring, + unsigned int nentries, uint32_t desc_size) +{ + ce_ring->base_addr_owner_space_unaligned = + hif_mem_alloc_consistent_unaligned + (scn, + (nentries * desc_size + + CE_DESC_RING_ALIGN), + base_addr, + ce_ring->hal_ring_type, + &ce_ring->is_ring_prealloc); + + if (!ce_ring->base_addr_owner_space_unaligned) { + HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", + __func__, CE_id); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, + struct CE_ring_state *ce_ring, uint32_t desc_size) +{ + hif_mem_free_consistent_unaligned + (scn, + ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_CE_space, 0, + ce_ring->is_ring_prealloc); + ce_ring->base_addr_owner_space_unaligned = NULL; +} +#endif /* IPA_OFFLOAD */ + +/* + * TODO: Need to explore the possibility of having this as part of a + * target context instead of a global array. + */ +static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); + +void ce_service_register_module(enum ce_target_type target_type, + struct ce_ops* (*ce_attach)(void)) +{ + if (target_type < CE_MAX_TARGET_TYPE) + ce_attach_register[target_type] = ce_attach; +} + +qdf_export_symbol(ce_service_register_module); + +/** + * ce_srng_based() - Does this target use srng + * @ce_state : pointer to the state context of the CE + * + * Description: + * returns true if the target is SRNG based + * + * Return: + * false (attribute set to false) + * true (attribute set to true); + */ +bool ce_srng_based(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + + switch (tgt_info->target_type) { + case TARGET_TYPE_QCA8074: + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6290: + case TARGET_TYPE_QCA6390: + case TARGET_TYPE_QCA6490: + case TARGET_TYPE_QCA6750: + case TARGET_TYPE_QCA6018: + case TARGET_TYPE_QCN9000: + return true; + default: + return false; + } + return false; +} +qdf_export_symbol(ce_srng_based); + +#ifdef QCA_WIFI_SUPPORT_SRNG +static struct ce_ops *ce_services_attach(struct hif_softc *scn) +{ + struct ce_ops *ops = NULL; + + if (ce_srng_based(scn)) { + if (ce_attach_register[CE_SVC_SRNG]) + ops = ce_attach_register[CE_SVC_SRNG](); + } else if (ce_attach_register[CE_SVC_LEGACY]) { + ops = ce_attach_register[CE_SVC_LEGACY](); + } + + return ops; +} + + +#else /* QCA_LITHIUM */ +static struct ce_ops *ce_services_attach(struct hif_softc *scn) +{ + if (ce_attach_register[CE_SVC_LEGACY]) + return ce_attach_register[CE_SVC_LEGACY](); + + return NULL; +} +#endif /* QCA_LITHIUM */ + +static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) { + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( + scn, shadow_config, num_shadow_registers_configured); +} + +static inline uint32_t ce_get_desc_size(struct hif_softc *scn, + uint8_t ring_type) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_get_desc_size(ring_type); +} + +#ifdef QCA_WIFI_SUPPORT_SRNG +static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) +{ + switch (ce_ring_type) { + case CE_RING_SRC: + return CE_SRC; + case CE_RING_DEST: + return CE_DST; + case CE_RING_STATUS: + return CE_DST_STATUS; + default: + return -EINVAL; + } +} +#else +static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type) +{ + return 0; +} +#endif +static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, + uint8_t ring_type, uint32_t nentries) +{ + uint32_t ce_nbytes; + char *ptr; + qdf_dma_addr_t base_addr; + struct CE_ring_state *ce_ring; + uint32_t desc_size; + struct hif_softc *scn = CE_state->scn; + + ce_nbytes = sizeof(struct CE_ring_state) + + (nentries * sizeof(void *)); + ptr = qdf_mem_malloc(ce_nbytes); + if (!ptr) + return NULL; + + ce_ring = (struct CE_ring_state *)ptr; + ptr += sizeof(struct CE_ring_state); + ce_ring->nentries = nentries; + ce_ring->nentries_mask = nentries - 1; + + ce_ring->low_water_mark_nentries = 0; + ce_ring->high_water_mark_nentries = nentries; + ce_ring->per_transfer_context = (void **)ptr; + ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type); + + desc_size = ce_get_desc_size(scn, ring_type); + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, + ce_ring, nentries, + desc_size) != + QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: ring has no DMA mem", + __func__); + qdf_mem_free(ce_ring); + return NULL; + } + ce_ring->base_addr_CE_space_unaligned = base_addr; + + /* Correctly initialize memory to 0 to + * prevent garbage data crashing system + * when download firmware + */ + qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, + nentries * desc_size + + CE_DESC_RING_ALIGN); + + if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { + + ce_ring->base_addr_CE_space = + (ce_ring->base_addr_CE_space_unaligned + + CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); + + ce_ring->base_addr_owner_space = (void *) + (((size_t) ce_ring->base_addr_owner_space_unaligned + + CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); + } else { + ce_ring->base_addr_CE_space = + ce_ring->base_addr_CE_space_unaligned; + ce_ring->base_addr_owner_space = + ce_ring->base_addr_owner_space_unaligned; + } + + return ce_ring; +} + +static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, + ring, attr); +} + +int hif_ce_bus_early_suspend(struct hif_softc *scn) +{ + uint8_t ul_pipe, dl_pipe; + int ce_id, status, ul_is_polled, dl_is_polled; + struct CE_state *ce_state; + + status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, + &ul_pipe, &dl_pipe, + &ul_is_polled, &dl_is_polled); + if (status) { + HIF_ERROR("%s: pipe_mapping failure", __func__); + return status; + } + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + if (ce_id == ul_pipe) + continue; + if (ce_id == dl_pipe) + continue; + + ce_state = scn->ce_id_to_state[ce_id]; + qdf_spin_lock_bh(&ce_state->ce_index_lock); + if (ce_state->state == CE_RUNNING) + ce_state->state = CE_PAUSED; + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + } + + return status; +} + +int hif_ce_bus_late_resume(struct hif_softc *scn) +{ + int ce_id; + struct CE_state *ce_state; + int write_index = 0; + bool index_updated; + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + ce_state = scn->ce_id_to_state[ce_id]; + qdf_spin_lock_bh(&ce_state->ce_index_lock); + if (ce_state->state == CE_PENDING) { + write_index = ce_state->src_ring->write_index; + CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, + write_index); + ce_state->state = CE_RUNNING; + index_updated = true; + } else { + index_updated = false; + } + + if (ce_state->state == CE_PAUSED) + ce_state->state = CE_RUNNING; + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + + if (index_updated) + hif_record_ce_desc_event(scn, ce_id, + RESUME_WRITE_INDEX_UPDATE, + NULL, NULL, write_index, 0); + } + + return 0; +} + +/** + * ce_oom_recovery() - try to recover rx ce from oom condition + * @context: CE_state of the CE with oom rx ring + * + * the executing work Will continue to be rescheduled until + * at least 1 descriptor is successfully posted to the rx ring. + * + * return: none + */ +static void ce_oom_recovery(void *context) +{ + struct CE_state *ce_state = context; + struct hif_softc *scn = ce_state->scn; + struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); + struct HIF_CE_pipe_info *pipe_info = + &ce_softc->pipe_info[ce_state->id]; + + hif_post_recv_buffers_for_pipe(pipe_info); +} + +#ifdef HIF_CE_DEBUG_DATA_BUF +/** + * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by + * the CE descriptors. + * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE + * @scn: hif scn handle + * ce_id: Copy Engine Id + * + * Return: QDF_STATUS + */ +QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) +{ + struct hif_ce_desc_event *event = NULL; + struct hif_ce_desc_event *hist_ev = NULL; + uint32_t index = 0; + + hist_ev = + (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; + + if (!hist_ev) + return QDF_STATUS_E_NOMEM; + + scn->hif_ce_desc_hist.data_enable[ce_id] = true; + for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { + event = &hist_ev[index]; + event->data = + (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); + if (!event->data) { + hif_err_rl("ce debug data alloc failed"); + return QDF_STATUS_E_NOMEM; + } + } + return QDF_STATUS_SUCCESS; +} + +/** + * free_mem_ce_debug_hist_data() - Free mem of the data pointed by + * the CE descriptors. + * @scn: hif scn handle + * ce_id: Copy Engine Id + * + * Return: + */ +void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) +{ + struct hif_ce_desc_event *event = NULL; + struct hif_ce_desc_event *hist_ev = NULL; + uint32_t index = 0; + + hist_ev = + (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; + + if (!hist_ev) + return; + + for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { + event = &hist_ev[index]; + if (event->data) + qdf_mem_free(event->data); + event->data = NULL; + event = NULL; + } + +} +#endif /* HIF_CE_DEBUG_DATA_BUF */ + +#ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX]; + +/** + * alloc_mem_ce_debug_history() - Allocate CE descriptor history + * @scn: hif scn handle + * @ce_id: Copy Engine Id + * @src_nentries: source ce ring entries + * Return: QDF_STATUS + */ +static QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id, + uint32_t src_nentries) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + + ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id]; + ce_hist->enable[ce_id] = 1; + + if (src_nentries) + alloc_mem_ce_debug_hist_data(scn, ce_id); + else + ce_hist->data_enable[ce_id] = false; + + return QDF_STATUS_SUCCESS; +} + +/** + * free_mem_ce_debug_history() - Free CE descriptor history + * @scn: hif scn handle + * @ce_id: Copy Engine Id + * + * Return: None + */ +static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + + ce_hist->enable[ce_id] = 0; + if (ce_hist->data_enable[ce_id]) { + ce_hist->data_enable[ce_id] = false; + free_mem_ce_debug_hist_data(scn, ce_id); + } + ce_hist->hist_ev[ce_id] = NULL; +} +#else +static inline QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, + uint32_t src_nentries) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void +free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } +#endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */ +#else +#if defined(HIF_CE_DEBUG_DATA_BUF) + +static QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, + uint32_t src_nentries) +{ + scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) + qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); + + if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) { + scn->hif_ce_desc_hist.enable[CE_id] = 0; + return QDF_STATUS_E_NOMEM; + } else { + scn->hif_ce_desc_hist.enable[CE_id] = 1; + return QDF_STATUS_SUCCESS; + } +} + +static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; + + if (!hist_ev) + return; + + if (ce_hist->data_enable[CE_id]) { + ce_hist->data_enable[CE_id] = false; + free_mem_ce_debug_hist_data(scn, CE_id); + } + + ce_hist->enable[CE_id] = 0; + qdf_mem_free(ce_hist->hist_ev[CE_id]); + ce_hist->hist_ev[CE_id] = NULL; +} + +#else + +static inline QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, + uint32_t src_nentries) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void +free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } +#endif /* HIF_CE_DEBUG_DATA_BUF */ +#endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */ + +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +/** + * reset_ce_debug_history() - reset the index and ce id used for dumping the + * CE records on the console using sysfs. + * @scn: hif scn handle + * + * Return: + */ +static inline void reset_ce_debug_history(struct hif_softc *scn) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + /* Initialise the CE debug history sysfs interface inputs ce_id and + * index. Disable data storing + */ + ce_hist->hist_index = 0; + ce_hist->hist_id = 0; +} +#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ +static inline void reset_ce_debug_history(struct hif_softc *scn) { } +#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ + +void ce_enable_polling(void *cestate) +{ + struct CE_state *CE_state = (struct CE_state *)cestate; + + if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) + CE_state->timer_inited = true; +} + +void ce_disable_polling(void *cestate) +{ + struct CE_state *CE_state = (struct CE_state *)cestate; + + if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) + CE_state->timer_inited = false; +} + +/* + * Initialize a Copy Engine based on caller-supplied attributes. + * This may be called once to initialize both source and destination + * rings or it may be called twice for separate source and destination + * initialization. It may be that only one side or the other is + * initialized by software/firmware. + * + * This should be called durring the initialization sequence before + * interupts are enabled, so we don't have to worry about thread safety. + */ +struct CE_handle *ce_init(struct hif_softc *scn, + unsigned int CE_id, struct CE_attr *attr) +{ + struct CE_state *CE_state; + uint32_t ctrl_addr; + unsigned int nentries; + bool malloc_CE_state = false; + bool malloc_src_ring = false; + int status; + + QDF_ASSERT(CE_id < scn->ce_count); + ctrl_addr = CE_BASE_ADDRESS(CE_id); + CE_state = scn->ce_id_to_state[CE_id]; + + if (!CE_state) { + CE_state = + (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); + if (!CE_state) + return NULL; + + malloc_CE_state = true; + qdf_spinlock_create(&CE_state->ce_index_lock); + + CE_state->id = CE_id; + CE_state->ctrl_addr = ctrl_addr; + CE_state->state = CE_RUNNING; + CE_state->attr_flags = attr->flags; + } + CE_state->scn = scn; + CE_state->service = ce_engine_service_reg; + + qdf_atomic_init(&CE_state->rx_pending); + if (!attr) { + /* Already initialized; caller wants the handle */ + return (struct CE_handle *)CE_state; + } + + if (CE_state->src_sz_max) + QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); + else + CE_state->src_sz_max = attr->src_sz_max; + + ce_init_ce_desc_event_log(scn, CE_id, + attr->src_nentries + attr->dest_nentries); + + /* source ring setup */ + nentries = attr->src_nentries; + if (nentries) { + struct CE_ring_state *src_ring; + + nentries = roundup_pwr2(nentries); + if (CE_state->src_ring) { + QDF_ASSERT(CE_state->src_ring->nentries == nentries); + } else { + src_ring = CE_state->src_ring = + ce_alloc_ring_state(CE_state, + CE_RING_SRC, + nentries); + if (!src_ring) { + /* cannot allocate src ring. If the + * CE_state is allocated locally free + * CE_State and return error. + */ + HIF_ERROR("%s: src ring has no mem", __func__); + if (malloc_CE_state) { + /* allocated CE_state locally */ + qdf_mem_free(CE_state); + malloc_CE_state = false; + } + return NULL; + } + /* we can allocate src ring. Mark that the src ring is + * allocated locally + */ + malloc_src_ring = true; + + /* + * Also allocate a shadow src ring in + * regular mem to use for faster access. + */ + src_ring->shadow_base_unaligned = + qdf_mem_malloc(nentries * + sizeof(struct CE_src_desc) + + CE_DESC_RING_ALIGN); + if (!src_ring->shadow_base_unaligned) + goto error_no_dma_mem; + + src_ring->shadow_base = (struct CE_src_desc *) + (((size_t) src_ring->shadow_base_unaligned + + CE_DESC_RING_ALIGN - 1) & + ~(CE_DESC_RING_ALIGN - 1)); + + status = ce_ring_setup(scn, CE_RING_SRC, CE_id, + src_ring, attr); + if (status < 0) + goto error_target_access; + + ce_ring_test_initial_indexes(CE_id, src_ring, + "src_ring"); + } + } + + /* destination ring setup */ + nentries = attr->dest_nentries; + if (nentries) { + struct CE_ring_state *dest_ring; + + nentries = roundup_pwr2(nentries); + if (CE_state->dest_ring) { + QDF_ASSERT(CE_state->dest_ring->nentries == nentries); + } else { + dest_ring = CE_state->dest_ring = + ce_alloc_ring_state(CE_state, + CE_RING_DEST, + nentries); + if (!dest_ring) { + /* cannot allocate dst ring. If the CE_state + * or src ring is allocated locally free + * CE_State and src ring and return error. + */ + HIF_ERROR("%s: dest ring has no mem", + __func__); + goto error_no_dma_mem; + } + + status = ce_ring_setup(scn, CE_RING_DEST, CE_id, + dest_ring, attr); + if (status < 0) + goto error_target_access; + + ce_ring_test_initial_indexes(CE_id, dest_ring, + "dest_ring"); + + /* For srng based target, init status ring here */ + if (ce_srng_based(CE_state->scn)) { + CE_state->status_ring = + ce_alloc_ring_state(CE_state, + CE_RING_STATUS, + nentries); + if (!CE_state->status_ring) { + /*Allocation failed. Cleanup*/ + qdf_mem_free(CE_state->dest_ring); + if (malloc_src_ring) { + qdf_mem_free + (CE_state->src_ring); + CE_state->src_ring = NULL; + malloc_src_ring = false; + } + if (malloc_CE_state) { + /* allocated CE_state locally */ + scn->ce_id_to_state[CE_id] = + NULL; + qdf_mem_free(CE_state); + malloc_CE_state = false; + } + + return NULL; + } + + status = ce_ring_setup(scn, CE_RING_STATUS, + CE_id, CE_state->status_ring, + attr); + if (status < 0) + goto error_target_access; + + } + + /* epping */ + /* poll timer */ + if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { + qdf_timer_init(scn->qdf_dev, + &CE_state->poll_timer, + ce_poll_timeout, + CE_state, + QDF_TIMER_TYPE_WAKE_APPS); + ce_enable_polling(CE_state); + qdf_timer_mod(&CE_state->poll_timer, + CE_POLL_TIMEOUT); + } + } + } + + if (!ce_srng_based(scn)) { + /* Enable CE error interrupts */ + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + goto error_target_access; + CE_ERROR_INTR_ENABLE(scn, ctrl_addr); + if (Q_TARGET_ACCESS_END(scn) < 0) + goto error_target_access; + } + + qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, + ce_oom_recovery, CE_state); + + /* update the htt_data attribute */ + ce_mark_datapath(CE_state); + scn->ce_id_to_state[CE_id] = CE_state; + + alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries); + + return (struct CE_handle *)CE_state; + +error_target_access: +error_no_dma_mem: + ce_fini((struct CE_handle *)CE_state); + return NULL; +} + +/** + * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs + * @hif_ctx: HIF Context + * + * API to check if polling is enabled on all CEs. Returns true when polling + * is enabled on all CEs. + * + * Return: bool + */ +bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_attr *attr; + int id; + + for (id = 0; id < scn->ce_count; id++) { + attr = &hif_state->host_ce_config[id]; + if (attr && (attr->dest_nentries) && + !(attr->flags & CE_ATTR_ENABLE_POLL)) + return false; + } + return true; +} +qdf_export_symbol(hif_is_polled_mode_enabled); + +#ifdef WLAN_FEATURE_FASTPATH +/** + * hif_enable_fastpath() Update that we have enabled fastpath mode + * @hif_ctx: HIF context + * + * For use in data path + * + * Retrun: void + */ +void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (ce_srng_based(scn)) { + HIF_INFO("%s, srng rings do not support fastpath", __func__); + return; + } + HIF_DBG("%s, Enabling fastpath mode", __func__); + scn->fastpath_mode_on = true; +} + +/** + * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled + * @hif_ctx: HIF Context + * + * For use in data path to skip HTC + * + * Return: bool + */ +bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->fastpath_mode_on; +} + +/** + * hif_get_ce_handle - API to get CE handle for FastPath mode + * @hif_ctx: HIF Context + * @id: CopyEngine Id + * + * API to return CE handle for fastpath mode + * + * Return: void + */ +void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->ce_id_to_state[id]; +} +qdf_export_symbol(hif_get_ce_handle); + +/** + * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. + * No processing is required inside this function. + * @ce_hdl: Cope engine handle + * Using an assert, this function makes sure that, + * the TX CE has been processed completely. + * + * This is called while dismantling CE structures. No other thread + * should be using these structures while dismantling is occurring + * therfore no locking is needed. + * + * Return: none + */ +void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) +{ + struct CE_state *ce_state = (struct CE_state *)ce_hdl; + struct CE_ring_state *src_ring = ce_state->src_ring; + struct hif_softc *sc = ce_state->scn; + uint32_t sw_index, write_index; + + if (hif_is_nss_wifi_enabled(sc)) + return; + + if (sc->fastpath_mode_on && ce_state->htt_tx_data) { + HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", + __func__, __LINE__); + sw_index = src_ring->sw_index; + write_index = src_ring->sw_index; + + /* At this point Tx CE should be clean */ + qdf_assert_always(sw_index == write_index); + } +} + +/** + * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. + * @ce_hdl: Handle to CE + * + * These buffers are never allocated on the fly, but + * are allocated only once during HIF start and freed + * only once during HIF stop. + * NOTE: + * The assumption here is there is no in-flight DMA in progress + * currently, so that buffers can be freed up safely. + * + * Return: NONE + */ +void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) +{ + struct CE_state *ce_state = (struct CE_state *)ce_hdl; + struct CE_ring_state *dst_ring = ce_state->dest_ring; + qdf_nbuf_t nbuf; + int i; + + if (ce_state->scn->fastpath_mode_on == false) + return; + + if (!ce_state->htt_rx_data) + return; + + /* + * when fastpath_mode is on and for datapath CEs. Unlike other CE's, + * this CE is completely full: does not leave one blank space, to + * distinguish between empty queue & full queue. So free all the + * entries. + */ + for (i = 0; i < dst_ring->nentries; i++) { + nbuf = dst_ring->per_transfer_context[i]; + + /* + * The reasons for doing this check are: + * 1) Protect against calling cleanup before allocating buffers + * 2) In a corner case, FASTPATH_mode_on may be set, but we + * could have a partially filled ring, because of a memory + * allocation failure in the middle of allocating ring. + * This check accounts for that case, checking + * fastpath_mode_on flag or started flag would not have + * covered that case. This is not in performance path, + * so OK to do this. + */ + if (nbuf) { + qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(nbuf); + } + } +} + +/** + * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 + * @scn: HIF handle + * + * Datapath Rx CEs are special case, where we reuse all the message buffers. + * Hence we have to post all the entries in the pipe, even, in the beginning + * unlike for other CE pipes where one less than dest_nentries are filled in + * the beginning. + * + * Return: None + */ +static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) +{ + int pipe_num; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (scn->fastpath_mode_on == false) + return; + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info = + &hif_state->pipe_info[pipe_num]; + struct CE_state *ce_state = + scn->ce_id_to_state[pipe_info->pipe_num]; + + if (ce_state->htt_rx_data) + atomic_inc(&pipe_info->recv_bufs_needed); + } +} +#else +static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) +{ +} + +static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) +{ + return false; +} +#endif /* WLAN_FEATURE_FASTPATH */ + +void ce_fini(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + unsigned int CE_id = CE_state->id; + struct hif_softc *scn = CE_state->scn; + uint32_t desc_size; + + bool inited = CE_state->timer_inited; + CE_state->state = CE_UNUSED; + scn->ce_id_to_state[CE_id] = NULL; + /* Set the flag to false first to stop processing in ce_poll_timeout */ + ce_disable_polling(CE_state); + + qdf_lro_deinit(CE_state->lro_data); + + if (CE_state->src_ring) { + /* Cleanup the datapath Tx ring */ + ce_h2t_tx_ce_cleanup(copyeng); + + desc_size = ce_get_desc_size(scn, CE_RING_SRC); + if (CE_state->src_ring->shadow_base_unaligned) + qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); + if (CE_state->src_ring->base_addr_owner_space_unaligned) + ce_free_desc_ring(scn, CE_state->id, + CE_state->src_ring, + desc_size); + qdf_mem_free(CE_state->src_ring); + } + if (CE_state->dest_ring) { + /* Cleanup the datapath Rx ring */ + ce_t2h_msg_ce_cleanup(copyeng); + + desc_size = ce_get_desc_size(scn, CE_RING_DEST); + if (CE_state->dest_ring->base_addr_owner_space_unaligned) + ce_free_desc_ring(scn, CE_state->id, + CE_state->dest_ring, + desc_size); + qdf_mem_free(CE_state->dest_ring); + + /* epping */ + if (inited) { + qdf_timer_free(&CE_state->poll_timer); + } + } + if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { + /* Cleanup the datapath Tx ring */ + ce_h2t_tx_ce_cleanup(copyeng); + + if (CE_state->status_ring->shadow_base_unaligned) + qdf_mem_free( + CE_state->status_ring->shadow_base_unaligned); + + desc_size = ce_get_desc_size(scn, CE_RING_STATUS); + if (CE_state->status_ring->base_addr_owner_space_unaligned) + ce_free_desc_ring(scn, CE_state->id, + CE_state->status_ring, + desc_size); + qdf_mem_free(CE_state->status_ring); + } + + free_mem_ce_debug_history(scn, CE_id); + reset_ce_debug_history(scn); + ce_deinit_ce_desc_event_log(scn, CE_id); + + qdf_spinlock_destroy(&CE_state->ce_index_lock); + qdf_mem_free(CE_state); +} + +void hif_detach_htc(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + qdf_mem_zero(&hif_state->msg_callbacks_pending, + sizeof(hif_state->msg_callbacks_pending)); + qdf_mem_zero(&hif_state->msg_callbacks_current, + sizeof(hif_state->msg_callbacks_current)); +} + +/* Send the first nbytes bytes of the buffer */ +QDF_STATUS +hif_send_head(struct hif_opaque_softc *hif_ctx, + uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, + qdf_nbuf_t nbuf, unsigned int data_attr) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); + struct CE_handle *ce_hdl = pipe_info->ce_hdl; + int bytes = nbytes, nfrags = 0; + struct ce_sendlist sendlist; + int status, i = 0; + unsigned int mux_id = 0; + + if (nbytes > qdf_nbuf_len(nbuf)) { + HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes, + (uint32_t)qdf_nbuf_len(nbuf)); + QDF_ASSERT(0); + } + + transfer_id = + (mux_id & MUX_ID_MASK) | + (transfer_id & TRANSACTION_ID_MASK); + data_attr &= DESC_DATA_FLAG_MASK; + /* + * The common case involves sending multiple fragments within a + * single download (the tx descriptor and the tx frame header). + * So, optimize for the case of multiple fragments by not even + * checking whether it's necessary to use a sendlist. + * The overhead of using a sendlist for a single buffer download + * is not a big deal, since it happens rarely (for WMI messages). + */ + ce_sendlist_init(&sendlist); + do { + qdf_dma_addr_t frag_paddr; + int frag_bytes; + + frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); + frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); + /* + * Clear the packet offset for all but the first CE desc. + */ + if (i++ > 0) + data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; + + status = ce_sendlist_buf_add(&sendlist, frag_paddr, + frag_bytes > + bytes ? bytes : frag_bytes, + qdf_nbuf_get_frag_is_wordstream + (nbuf, + nfrags) ? 0 : + CE_SEND_FLAG_SWAP_DISABLE, + data_attr); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: error, frag_num %d larger than limit", + __func__, nfrags); + return status; + } + bytes -= frag_bytes; + nfrags++; + } while (bytes > 0); + + /* Make sure we have resources to handle this request */ + qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); + if (pipe_info->num_sends_allowed < nfrags) { + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); + return QDF_STATUS_E_RESOURCES; + } + pipe_info->num_sends_allowed -= nfrags; + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + + if (qdf_unlikely(!ce_hdl)) { + HIF_ERROR("%s: error CE handle is null", __func__); + return A_ERROR; + } + + QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); + DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), + sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); + status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + + return status; +} + +void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, + int force) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + if (!force) { + int resources; + /* + * Decide whether to actually poll for completions, or just + * wait for a later chance. If there seem to be plenty of + * resources left, then just wait, since checking involves + * reading a CE register, which is a relatively expensive + * operation. + */ + resources = hif_get_free_queue_number(hif_ctx, pipe); + /* + * If at least 50% of the total resources are still available, + * don't bother checking again yet. + */ + if (resources > (hif_state->host_ce_config[pipe].src_nentries >> + 1)) + return; + } +#if ATH_11AC_TXCOMPACT + ce_per_engine_servicereap(scn, pipe); +#else + ce_per_engine_service(scn, pipe); +#endif +} + +uint16_t +hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); + uint16_t rv; + + qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); + rv = pipe_info->num_sends_allowed; + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + return rv; +} + +/* Called by lower (CE) layer when a send to Target completes. */ +static void +hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t CE_data, + unsigned int nbytes, unsigned int transfer_id, + unsigned int sw_index, unsigned int hw_index, + unsigned int toeplitz_hash_result) +{ + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *)ce_context; + unsigned int sw_idx = sw_index, hw_idx = hw_index; + struct hif_msg_callbacks *msg_callbacks = + &pipe_info->pipe_callbacks; + + do { + /* + * The upper layer callback will be triggered + * when last fragment is complteted. + */ + if (transfer_context != CE_SENDLIST_ITEM_CTXT) + msg_callbacks->txCompletionHandler( + msg_callbacks->Context, + transfer_context, transfer_id, + toeplitz_hash_result); + + qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); + pipe_info->num_sends_allowed++; + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + } while (ce_completed_send_next(copyeng, + &ce_context, &transfer_context, + &CE_data, &nbytes, &transfer_id, + &sw_idx, &hw_idx, + &toeplitz_hash_result) == QDF_STATUS_SUCCESS); +} + +/** + * hif_ce_do_recv(): send message from copy engine to upper layers + * @msg_callbacks: structure containing callback and callback context + * @netbuff: skb containing message + * @nbytes: number of bytes in the message + * @pipe_info: used for the pipe_number info + * + * Checks the packet length, configures the length in the netbuff, + * and calls the upper layer callback. + * + * return: None + */ +static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, + qdf_nbuf_t netbuf, int nbytes, + struct HIF_CE_pipe_info *pipe_info) { + if (nbytes <= pipe_info->buf_sz) { + qdf_nbuf_set_pktlen(netbuf, nbytes); + msg_callbacks-> + rxCompletionHandler(msg_callbacks->Context, + netbuf, pipe_info->pipe_num); + } else { + HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", + __func__, netbuf, nbytes); + + qdf_nbuf_free(netbuf); + } +} + +/* Called by lower (CE) layer when data is received from the Target. */ +static void +hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t CE_data, + unsigned int nbytes, unsigned int transfer_id, + unsigned int flags) +{ + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *)ce_context; + struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; + struct CE_state *ce_state = (struct CE_state *) copyeng; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn); + struct hif_msg_callbacks *msg_callbacks = + &pipe_info->pipe_callbacks; + + do { + hif_pm_runtime_mark_last_busy(hif_ctx); + qdf_nbuf_unmap_single(scn->qdf_dev, + (qdf_nbuf_t) transfer_context, + QDF_DMA_FROM_DEVICE); + + atomic_inc(&pipe_info->recv_bufs_needed); + hif_post_recv_buffers_for_pipe(pipe_info); + if (scn->target_status == TARGET_STATUS_RESET) + qdf_nbuf_free(transfer_context); + else + hif_ce_do_recv(msg_callbacks, transfer_context, + nbytes, pipe_info); + + /* Set up force_break flag if num of receices reaches + * MAX_NUM_OF_RECEIVES + */ + ce_state->receive_count++; + if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { + ce_state->force_break = 1; + break; + } + } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, + &CE_data, &nbytes, &transfer_id, + &flags) == QDF_STATUS_SUCCESS); + +} + +/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ + +void +hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, + struct hif_msg_callbacks *callbacks) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG + spin_lock_init(&pcie_access_log_lock); +#endif + /* Save callbacks for later installation */ + qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, + sizeof(hif_state->msg_callbacks_pending)); + +} + +static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) +{ + struct CE_handle *ce_diag = hif_state->ce_diag; + int pipe_num; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + struct hif_msg_callbacks *hif_msg_callbacks = + &hif_state->msg_callbacks_current; + + /* daemonize("hif_compl_thread"); */ + + if (scn->ce_count == 0) { + HIF_ERROR("%s: Invalid ce_count", __func__); + return -EINVAL; + } + + if (!hif_msg_callbacks || + !hif_msg_callbacks->rxCompletionHandler || + !hif_msg_callbacks->txCompletionHandler) { + HIF_ERROR("%s: no completion handler registered", __func__); + return -EFAULT; + } + + A_TARGET_ACCESS_LIKELY(scn); + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct CE_attr attr; + struct HIF_CE_pipe_info *pipe_info; + + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl == ce_diag) + continue; /* Handle Diagnostic CE specially */ + attr = hif_state->host_ce_config[pipe_num]; + if (attr.src_nentries) { + /* pipe used to send to target */ + HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", + __func__, pipe_num, pipe_info); + ce_send_cb_register(pipe_info->ce_hdl, + hif_pci_ce_send_done, pipe_info, + attr.flags & CE_ATTR_DISABLE_INTR); + pipe_info->num_sends_allowed = attr.src_nentries - 1; + } + if (attr.dest_nentries) { + /* pipe used to receive from target */ + ce_recv_cb_register(pipe_info->ce_hdl, + hif_pci_ce_recv_data, pipe_info, + attr.flags & CE_ATTR_DISABLE_INTR); + } + + if (attr.src_nentries) + qdf_spinlock_create(&pipe_info->completion_freeq_lock); + + qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, + sizeof(pipe_info->pipe_callbacks)); + } + + A_TARGET_ACCESS_UNLIKELY(scn); + return 0; +} + +/* + * Install pending msg callbacks. + * + * TBDXXX: This hack is needed because upper layers install msg callbacks + * for use with HTC before BMI is done; yet this HIF implementation + * needs to continue to use BMI msg callbacks. Really, upper layers + * should not register HTC callbacks until AFTER BMI phase. + */ +static void hif_msg_callbacks_install(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + qdf_mem_copy(&hif_state->msg_callbacks_current, + &hif_state->msg_callbacks_pending, + sizeof(hif_state->msg_callbacks_pending)); +} + +void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, + uint8_t *DLPipe) +{ + int ul_is_polled, dl_is_polled; + + (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, + ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); +} + +/** + * hif_dump_pipe_debug_count() - Log error count + * @scn: hif_softc pointer. + * + * Output the pipe error counts of each pipe to log file + * + * Return: N/A + */ +void hif_dump_pipe_debug_count(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int pipe_num; + + if (!hif_state) { + HIF_ERROR("%s hif_state is NULL", __func__); + return; + } + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + + pipe_info = &hif_state->pipe_info[pipe_num]; + + if (pipe_info->nbuf_alloc_err_count > 0 || + pipe_info->nbuf_dma_err_count > 0 || + pipe_info->nbuf_ce_enqueue_err_count) + HIF_ERROR( + "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", + __func__, pipe_info->pipe_num, + atomic_read(&pipe_info->recv_bufs_needed), + pipe_info->nbuf_alloc_err_count, + pipe_info->nbuf_dma_err_count, + pipe_info->nbuf_ce_enqueue_err_count); + } +} + +static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, + void *nbuf, uint32_t *error_cnt, + enum hif_ce_event_type failure_type, + const char *failure_type_string) +{ + int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); + struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; + struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); + int ce_id = CE_state->id; + uint32_t error_cnt_tmp; + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + error_cnt_tmp = ++(*error_cnt); + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", + __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, + failure_type_string); + hif_record_ce_desc_event(scn, ce_id, failure_type, + NULL, nbuf, bufs_needed_tmp, 0); + /* if we fail to allocate the last buffer for an rx pipe, + * there is no trigger to refill the ce and we will + * eventually crash + */ + if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 || + (ce_srng_based(scn) && + bufs_needed_tmp == CE_state->dest_ring->nentries - 2)) + qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); + +} + + + + +QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) +{ + struct CE_handle *ce_hdl; + qdf_size_t buf_sz; + struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); + QDF_STATUS status; + uint32_t bufs_posted = 0; + unsigned int ce_id; + + buf_sz = pipe_info->buf_sz; + if (buf_sz == 0) { + /* Unused Copy Engine */ + return QDF_STATUS_SUCCESS; + } + + ce_hdl = pipe_info->ce_hdl; + ce_id = ((struct CE_state *)ce_hdl)->id; + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { + qdf_dma_addr_t CE_data; /* CE space buffer address */ + qdf_nbuf_t nbuf; + + atomic_dec(&pipe_info->recv_bufs_needed); + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + + hif_record_ce_desc_event(scn, ce_id, + HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL, + 0, 0); + nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); + if (!nbuf) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_alloc_err_count, + HIF_RX_NBUF_ALLOC_FAILURE, + "HIF_RX_NBUF_ALLOC_FAILURE"); + return QDF_STATUS_E_NOMEM; + } + + hif_record_ce_desc_event(scn, ce_id, + HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf, + 0, 0); + /* + * qdf_nbuf_peek_header(nbuf, &data, &unused); + * CE_data = dma_map_single(dev, data, buf_sz, ); + * DMA_FROM_DEVICE); + */ + status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, + QDF_DMA_FROM_DEVICE); + + if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_dma_err_count, + HIF_RX_NBUF_MAP_FAILURE, + "HIF_RX_NBUF_MAP_FAILURE"); + qdf_nbuf_free(nbuf); + return status; + } + + CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); + hif_record_ce_desc_event(scn, ce_id, + HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf, + 0, 0); + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, + buf_sz, DMA_FROM_DEVICE); + status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); + if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_ce_enqueue_err_count, + HIF_RX_NBUF_ENQUEUE_FAILURE, + "HIF_RX_NBUF_ENQUEUE_FAILURE"); + + qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(nbuf); + return status; + } + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + bufs_posted++; + } + pipe_info->nbuf_alloc_err_count = + (pipe_info->nbuf_alloc_err_count > bufs_posted) ? + pipe_info->nbuf_alloc_err_count - bufs_posted : 0; + pipe_info->nbuf_dma_err_count = + (pipe_info->nbuf_dma_err_count > bufs_posted) ? + pipe_info->nbuf_dma_err_count - bufs_posted : 0; + pipe_info->nbuf_ce_enqueue_err_count = + (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? + pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; + + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + + return QDF_STATUS_SUCCESS; +} + +/* + * Try to post all desired receive buffers for all pipes. + * Returns 0 for non fastpath rx copy engine as + * oom_allocation_work will be scheduled to recover any + * failures, non-zero if unable to completely replenish + * receive buffers for fastpath rx Copy engine. + */ +QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int pipe_num; + struct CE_state *ce_state = NULL; + QDF_STATUS qdf_status; + + A_TARGET_ACCESS_LIKELY(scn); + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + + ce_state = scn->ce_id_to_state[pipe_num]; + pipe_info = &hif_state->pipe_info[pipe_num]; + + if (hif_is_nss_wifi_enabled(scn) && + ce_state && (ce_state->htt_rx_data)) + continue; + + qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); + if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && + ce_state->htt_rx_data && + scn->fastpath_mode_on) { + A_TARGET_ACCESS_UNLIKELY(scn); + return qdf_status; + } + } + + A_TARGET_ACCESS_UNLIKELY(scn); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + + hif_update_fastpath_recv_bufs_cnt(scn); + + hif_msg_callbacks_install(scn); + + if (hif_completion_thread_startup(hif_state)) + return QDF_STATUS_E_FAILURE; + + /* enable buffer cleanup */ + hif_state->started = true; + + /* Post buffers once to start things off. */ + qdf_status = hif_post_recv_buffers(scn); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + /* cleanup is done in hif_ce_disable */ + HIF_ERROR("%s:failed to post buffers", __func__); + return qdf_status; + } + + return qdf_status; +} + +static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) +{ + struct hif_softc *scn; + struct CE_handle *ce_hdl; + uint32_t buf_sz; + struct HIF_CE_state *hif_state; + qdf_nbuf_t netbuf; + qdf_dma_addr_t CE_data; + void *per_CE_context; + + buf_sz = pipe_info->buf_sz; + /* Unused Copy Engine */ + if (buf_sz == 0) + return; + + + hif_state = pipe_info->HIF_CE_state; + if (!hif_state->started) + return; + + scn = HIF_GET_SOFTC(hif_state); + ce_hdl = pipe_info->ce_hdl; + + if (!scn->qdf_dev) + return; + while (ce_revoke_recv_next + (ce_hdl, &per_CE_context, (void **)&netbuf, + &CE_data) == QDF_STATUS_SUCCESS) { + if (netbuf) { + qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(netbuf); + } + } +} + +static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) +{ + struct CE_handle *ce_hdl; + struct HIF_CE_state *hif_state; + struct hif_softc *scn; + qdf_nbuf_t netbuf; + void *per_CE_context; + qdf_dma_addr_t CE_data; + unsigned int nbytes; + unsigned int id; + uint32_t buf_sz; + uint32_t toeplitz_hash_result; + + buf_sz = pipe_info->buf_sz; + if (buf_sz == 0) { + /* Unused Copy Engine */ + return; + } + + hif_state = pipe_info->HIF_CE_state; + if (!hif_state->started) { + return; + } + + scn = HIF_GET_SOFTC(hif_state); + + ce_hdl = pipe_info->ce_hdl; + + while (ce_cancel_send_next + (ce_hdl, &per_CE_context, + (void **)&netbuf, &CE_data, &nbytes, + &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { + if (netbuf != CE_SENDLIST_ITEM_CTXT) { + /* + * Packets enqueued by htt_h2t_ver_req_msg() and + * htt_h2t_rx_ring_cfg_msg_ll() have already been + * freed in htt_htc_misc_pkt_pool_free() in + * wlantl_close(), so do not free them here again + * by checking whether it's the endpoint + * which they are queued in. + */ + if (id == scn->htc_htt_tx_endpoint) + return; + /* Indicate the completion to higher + * layer to free the buffer + */ + if (pipe_info->pipe_callbacks.txCompletionHandler) + pipe_info->pipe_callbacks. + txCompletionHandler(pipe_info-> + pipe_callbacks.Context, + netbuf, id, toeplitz_hash_result); + } + } +} + +/* + * Cleanup residual buffers for device shutdown: + * buffers that were enqueued for receive + * buffers that were to be sent + * Note: Buffers that had completed but which were + * not yet processed are on a completion queue. They + * are handled when the completion thread shuts down. + */ +static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) +{ + int pipe_num; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + struct CE_state *ce_state; + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + + ce_state = scn->ce_id_to_state[pipe_num]; + if (hif_is_nss_wifi_enabled(scn) && ce_state && + ((ce_state->htt_tx_data) || + (ce_state->htt_rx_data))) { + continue; + } + + pipe_info = &hif_state->pipe_info[pipe_num]; + hif_recv_buffer_cleanup_on_pipe(pipe_info); + hif_send_buffer_cleanup_on_pipe(pipe_info); + } +} + +void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + hif_buffer_cleanup(hif_state); +} + +static void hif_destroy_oom_work(struct hif_softc *scn) +{ + struct CE_state *ce_state; + int ce_id; + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + ce_state = scn->ce_id_to_state[ce_id]; + if (ce_state) + qdf_destroy_work(scn->qdf_dev, + &ce_state->oom_allocation_work); + } +} + +void hif_ce_stop(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int pipe_num; + + /* + * before cleaning up any memory, ensure irq & + * bottom half contexts will not be re-entered + */ + hif_disable_isr(&scn->osc); + hif_destroy_oom_work(scn); + scn->hif_init_done = false; + + /* + * At this point, asynchronous threads are stopped, + * The Target should not DMA nor interrupt, Host code may + * not initiate anything more. So we just need to clean + * up Host-side state. + */ + + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + + hif_buffer_cleanup(hif_state); + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + struct CE_attr attr; + struct CE_handle *ce_diag = hif_state->ce_diag; + + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + if (pipe_info->ce_hdl != ce_diag && + hif_state->started) { + attr = hif_state->host_ce_config[pipe_num]; + if (attr.src_nentries) + qdf_spinlock_destroy(&pipe_info-> + completion_freeq_lock); + } + ce_fini(pipe_info->ce_hdl); + pipe_info->ce_hdl = NULL; + pipe_info->buf_sz = 0; + qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); + } + } + + if (hif_state->sleep_timer_init) { + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_free(&hif_state->sleep_timer); + hif_state->sleep_timer_init = false; + } + + hif_state->started = false; +} + +static void hif_get_shadow_reg_cfg(struct hif_softc *scn, + struct shadow_reg_cfg + **target_shadow_reg_cfg_ret, + uint32_t *shadow_cfg_sz_ret) +{ + if (target_shadow_reg_cfg_ret) + *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; + if (shadow_cfg_sz_ret) + *shadow_cfg_sz_ret = shadow_cfg_sz; +} + +/** + * hif_get_target_ce_config() - get copy engine configuration + * @target_ce_config_ret: basic copy engine configuration + * @target_ce_config_sz_ret: size of the basic configuration in bytes + * @target_service_to_ce_map_ret: service mapping for the copy engines + * @target_service_to_ce_map_sz_ret: size of the mapping in bytes + * @target_shadow_reg_cfg_ret: shadow register configuration + * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes + * + * providing accessor to these values outside of this file. + * currently these are stored in static pointers to const sections. + * there are multiple configurations that are selected from at compile time. + * Runtime selection would need to consider mode, target type and bus type. + * + * Return: return by parameter. + */ +void hif_get_target_ce_config(struct hif_softc *scn, + struct CE_pipe_config **target_ce_config_ret, + uint32_t *target_ce_config_sz_ret, + struct service_to_pipe **target_service_to_ce_map_ret, + uint32_t *target_service_to_ce_map_sz_ret, + struct shadow_reg_cfg **target_shadow_reg_cfg_ret, + uint32_t *shadow_cfg_sz_ret) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + *target_ce_config_ret = hif_state->target_ce_config; + *target_ce_config_sz_ret = hif_state->target_ce_config_sz; + + hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, + target_service_to_ce_map_sz_ret); + hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, + shadow_cfg_sz_ret); +} + +#ifdef CONFIG_SHADOW_V2 +static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) +{ + int i; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); + + for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: i %d, val %x", __func__, i, + cfg->shadow_reg_v2_cfg[i].addr); + } +} + +#else +static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) +{ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: CONFIG_SHADOW_V2 not defined", __func__); +} +#endif + +#ifdef ADRASTEA_RRI_ON_DDR +/** + * hif_get_src_ring_read_index(): Called to get the SRRI + * + * @scn: hif_softc pointer + * @CE_ctrl_addr: base address of the CE whose RRI is to be read + * + * This function returns the SRRI to the caller. For CEs that + * dont have interrupts enabled, we look at the DDR based SRRI + * + * Return: SRRI + */ +inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + struct CE_attr attr; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; + if (attr.flags & CE_ATTR_DISABLE_INTR) { + return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); + } else { + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) + return A_TARGET_READ(scn, + (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); + else + return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, + CE_ctrl_addr); + } +} + +/** + * hif_get_dst_ring_read_index(): Called to get the DRRI + * + * @scn: hif_softc pointer + * @CE_ctrl_addr: base address of the CE whose RRI is to be read + * + * This function returns the DRRI to the caller. For CEs that + * dont have interrupts enabled, we look at the DDR based DRRI + * + * Return: DRRI + */ +inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + struct CE_attr attr; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; + + if (attr.flags & CE_ATTR_DISABLE_INTR) { + return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); + } else { + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) + return A_TARGET_READ(scn, + (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); + else + return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, + CE_ctrl_addr); + } +} + +/** + * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr + * @scn: hif_softc pointer + * + * Return: qdf status + */ +static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn) +{ + qdf_dma_addr_t paddr_rri_on_ddr = 0; + + scn->vaddr_rri_on_ddr = + (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)), + &paddr_rri_on_ddr); + + if (!scn->vaddr_rri_on_ddr) { + hif_err("dmaable page alloc fail"); + return QDF_STATUS_E_NOMEM; + } + + scn->paddr_rri_on_ddr = paddr_rri_on_ddr; + + qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t)); + + return QDF_STATUS_SUCCESS; +} +#endif + +#if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR) +/** + * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism + * + * @scn: hif_softc pointer + * + * This function allocates non cached memory on ddr and sends + * the physical address of this memory to the CE hardware. The + * hardware updates the RRI on this particular location. + * + * Return: None + */ +static inline void hif_config_rri_on_ddr(struct hif_softc *scn) +{ + unsigned int i; + uint32_t high_paddr, low_paddr; + + if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) + return; + + low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr); + high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr); + + HIF_DBG("%s using srri and drri from DDR", __func__); + + WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); + WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); + + for (i = 0; i < CE_COUNT; i++) + CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); +} +#else +/** + * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism + * + * @scn: hif_softc pointer + * + * This is a dummy implementation for platforms that don't + * support this functionality. + * + * Return: None + */ +static inline void hif_config_rri_on_ddr(struct hif_softc *scn) +{ +} +#endif + +/** + * hif_update_rri_over_ddr_config() - update rri_over_ddr config for + * QMI command + * @scn: hif context + * @cfg: wlan enable config + * + * In case of Genoa, rri_over_ddr memory configuration is passed + * to firmware through QMI configure command. + */ +#if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR) +static void hif_update_rri_over_ddr_config(struct hif_softc *scn, + struct pld_wlan_enable_cfg *cfg) +{ + if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) + return; + + cfg->rri_over_ddr_cfg_valid = true; + cfg->rri_over_ddr_cfg.base_addr_low = + BITS0_TO_31(scn->paddr_rri_on_ddr); + cfg->rri_over_ddr_cfg.base_addr_high = + BITS32_TO_35(scn->paddr_rri_on_ddr); +} +#else +static void hif_update_rri_over_ddr_config(struct hif_softc *scn, + struct pld_wlan_enable_cfg *cfg) +{ +} +#endif + +/** + * hif_wlan_enable(): call the platform driver to enable wlan + * @scn: HIF Context + * + * This function passes the con_mode and CE configuration to + * platform driver to enable wlan. + * + * Return: linux error code + */ +int hif_wlan_enable(struct hif_softc *scn) +{ + struct pld_wlan_enable_cfg cfg; + enum pld_driver_mode mode; + uint32_t con_mode = hif_get_conparam(scn); + + hif_get_target_ce_config(scn, + (struct CE_pipe_config **)&cfg.ce_tgt_cfg, + &cfg.num_ce_tgt_cfg, + (struct service_to_pipe **)&cfg.ce_svc_cfg, + &cfg.num_ce_svc_pipe_cfg, + (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, + &cfg.num_shadow_reg_cfg); + + /* translate from structure size to array size */ + cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); + cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); + cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); + + hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, + &cfg.num_shadow_reg_v2_cfg); + + hif_print_hal_shadow_register_cfg(&cfg); + + hif_update_rri_over_ddr_config(scn, &cfg); + + if (QDF_GLOBAL_FTM_MODE == con_mode) + mode = PLD_FTM; + else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) + mode = PLD_COLDBOOT_CALIBRATION; + else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode) + mode = PLD_FTM_COLDBOOT_CALIBRATION; + else if (QDF_IS_EPPING_ENABLED(con_mode)) + mode = PLD_EPPING; + else + mode = PLD_MISSION; + + if (BYPASS_QMI) + return 0; + else + return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode); +} + +#ifdef WLAN_FEATURE_EPPING + +#define CE_EPPING_USES_IRQ true + +void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) +{ + if (CE_EPPING_USES_IRQ) + hif_state->host_ce_config = host_ce_config_wlan_epping_irq; + else + hif_state->host_ce_config = host_ce_config_wlan_epping_poll; + hif_state->target_ce_config = target_ce_config_wlan_epping; + hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); + target_shadow_reg_cfg = target_shadow_reg_cfg_epping; + shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); +} +#endif + +#ifdef QCN7605_SUPPORT +static inline +void hif_set_ce_config_qcn7605(struct hif_softc *scn, + struct HIF_CE_state *hif_state) +{ + hif_state->host_ce_config = host_ce_config_wlan_qcn7605; + hif_state->target_ce_config = target_ce_config_wlan_qcn7605; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qcn7605); + target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605; + shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605); + scn->ce_count = QCN7605_CE_COUNT; +} +#else +static inline +void hif_set_ce_config_qcn7605(struct hif_softc *scn, + struct HIF_CE_state *hif_state) +{ + HIF_ERROR("QCN7605 not supported"); +} +#endif + +#ifdef CE_SVC_CMN_INIT +#ifdef QCA_WIFI_SUPPORT_SRNG +static inline void hif_ce_service_init(void) +{ + ce_service_srng_init(); +} +#else +static inline void hif_ce_service_init(void) +{ + ce_service_legacy_init(); +} +#endif +#else +static inline void hif_ce_service_init(void) +{ +} +#endif + + +/** + * hif_ce_prepare_config() - load the correct static tables. + * @scn: hif context + * + * Epping uses different static attribute tables than mission mode. + */ +void hif_ce_prepare_config(struct hif_softc *scn) +{ + uint32_t mode = hif_get_conparam(scn); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + hif_ce_service_init(); + hif_state->ce_services = ce_services_attach(scn); + + scn->ce_count = HOST_CE_COUNT; + /* if epping is enabled we need to use the epping configuration. */ + if (QDF_IS_EPPING_ENABLED(mode)) { + hif_ce_prepare_epping_config(hif_state); + return; + } + + switch (tgt_info->target_type) { + default: + hif_state->host_ce_config = host_ce_config_wlan; + hif_state->target_ce_config = target_ce_config_wlan; + hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); + break; + case TARGET_TYPE_QCN7605: + hif_set_ce_config_qcn7605(scn, hif_state); + break; + case TARGET_TYPE_AR900B: + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_IPQ4019: + case TARGET_TYPE_QCA9888: + if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { + hif_state->host_ce_config = + host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; + } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { + hif_state->host_ce_config = + host_lowdesc_ce_cfg_wlan_ar900b; + } else { + hif_state->host_ce_config = host_ce_config_wlan_ar900b; + } + + hif_state->target_ce_config = target_ce_config_wlan_ar900b; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_ar900b); + + break; + + case TARGET_TYPE_AR9888: + case TARGET_TYPE_AR9888V2: + if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { + hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; + } else { + hif_state->host_ce_config = host_ce_config_wlan_ar9888; + } + + hif_state->target_ce_config = target_ce_config_wlan_ar9888; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_ar9888); + + break; + + case TARGET_TYPE_QCA8074: + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6018: + if (scn->bus_type == QDF_BUS_TYPE_PCI) { + hif_state->host_ce_config = + host_ce_config_wlan_qca8074_pci; + hif_state->target_ce_config = + target_ce_config_wlan_qca8074_pci; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca8074_pci); + } else { + hif_state->host_ce_config = host_ce_config_wlan_qca8074; + hif_state->target_ce_config = + target_ce_config_wlan_qca8074; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca8074); + } + break; + case TARGET_TYPE_QCA6290: + hif_state->host_ce_config = host_ce_config_wlan_qca6290; + hif_state->target_ce_config = target_ce_config_wlan_qca6290; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca6290); + + scn->ce_count = QCA_6290_CE_COUNT; + break; + case TARGET_TYPE_QCN9000: + hif_state->host_ce_config = host_ce_config_wlan_qcn9000; + hif_state->target_ce_config = target_ce_config_wlan_qcn9000; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qcn9000); + scn->ce_count = QCN_9000_CE_COUNT; + scn->disable_wake_irq = 1; + break; + case TARGET_TYPE_QCA6390: + hif_state->host_ce_config = host_ce_config_wlan_qca6390; + hif_state->target_ce_config = target_ce_config_wlan_qca6390; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca6390); + + scn->ce_count = QCA_6390_CE_COUNT; + break; + case TARGET_TYPE_QCA6490: + hif_state->host_ce_config = host_ce_config_wlan_qca6490; + hif_state->target_ce_config = target_ce_config_wlan_qca6490; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca6490); + + scn->ce_count = QCA_6490_CE_COUNT; + break; + case TARGET_TYPE_QCA6750: + hif_state->host_ce_config = host_ce_config_wlan_qca6750; + hif_state->target_ce_config = target_ce_config_wlan_qca6750; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca6750); + + scn->ce_count = QCA_6750_CE_COUNT; + break; + case TARGET_TYPE_ADRASTEA: + if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { + hif_state->host_ce_config = + host_lowdesc_ce_config_wlan_adrastea_nopktlog; + hif_state->target_ce_config = + target_lowdesc_ce_config_wlan_adrastea_nopktlog; + hif_state->target_ce_config_sz = + sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog); + } else { + hif_state->host_ce_config = + host_ce_config_wlan_adrastea; + hif_state->target_ce_config = + target_ce_config_wlan_adrastea; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_adrastea); + } + break; + + } + QDF_BUG(scn->ce_count <= CE_COUNT_MAX); +} + +/** + * hif_ce_open() - do ce specific allocations + * @hif_sc: pointer to hif context + * + * return: 0 for success or QDF_STATUS_E_NOMEM + */ +QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + qdf_spinlock_create(&hif_state->irq_reg_lock); + qdf_spinlock_create(&hif_state->keep_awake_lock); + return QDF_STATUS_SUCCESS; +} + +/** + * hif_ce_close() - do ce specific free + * @hif_sc: pointer to hif context + */ +void hif_ce_close(struct hif_softc *hif_sc) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + qdf_spinlock_destroy(&hif_state->irq_reg_lock); + qdf_spinlock_destroy(&hif_state->keep_awake_lock); +} + +/** + * hif_unconfig_ce() - ensure resources from hif_config_ce are freed + * @hif_sc: hif context + * + * uses state variables to support cleaning up when hif_config_ce fails. + */ +void hif_unconfig_ce(struct hif_softc *hif_sc) +{ + int pipe_num; + struct HIF_CE_pipe_info *pipe_info; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); + + for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + ce_unregister_irq(hif_state, (1 << pipe_num)); + } + } + deinit_tasklet_workers(hif_hdl); + for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + ce_fini(pipe_info->ce_hdl); + pipe_info->ce_hdl = NULL; + pipe_info->buf_sz = 0; + qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); + } + } + if (hif_sc->athdiag_procfs_inited) { + athdiag_procfs_remove(); + hif_sc->athdiag_procfs_inited = false; + } +} + +#ifdef CONFIG_BYPASS_QMI +#ifdef QCN7605_SUPPORT +/** + * hif_post_static_buf_to_target() - post static buffer to WLAN FW + * @scn: pointer to HIF structure + * + * WLAN FW needs 2MB memory from DDR when QMI is disabled. + * + * Return: void + */ +static void hif_post_static_buf_to_target(struct hif_softc *scn) +{ + phys_addr_t target_pa; + struct ce_info *ce_info_ptr; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + uint32_t i = 0; + int ret; + + scn->vaddr_qmi_bypass = + (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, + FW_SHARED_MEM, + &target_pa); + if (!scn->vaddr_qmi_bypass) { + hif_err("Memory allocation failed could not post target buf"); + return; + } + + scn->paddr_qmi_bypass = target_pa; + + ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass; + + if (scn->vaddr_rri_on_ddr) { + ce_info_ptr->rri_over_ddr_low_paddr = + BITS0_TO_31(scn->paddr_rri_on_ddr); + ce_info_ptr->rri_over_ddr_high_paddr = + BITS32_TO_35(scn->paddr_rri_on_ddr); + } + + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) { + hif_err("Failed to get CE msi config"); + return; + } + + for (i = 0; i < CE_COUNT_MAX; i++) { + ce_info_ptr->cfg[i].ce_id = i; + ce_info_ptr->cfg[i].msi_vector = + (i % msi_data_count) + msi_irq_start; + } + + hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); + hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass, + &target_pa); +} + +/** + * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW + * @scn: pointer to HIF structure + * + * + * Return: void + */ +void hif_cleanup_static_buf_to_target(struct hif_softc *scn) +{ + void *target_va = scn->vaddr_qmi_bypass; + phys_addr_t target_pa = scn->paddr_qmi_bypass; + + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + FW_SHARED_MEM, target_va, + target_pa, 0); + hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); +} +#else +/** + * hif_post_static_buf_to_target() - post static buffer to WLAN FW + * @scn: pointer to HIF structure + * + * WLAN FW needs 2MB memory from DDR when QMI is disabled. + * + * Return: void + */ +static void hif_post_static_buf_to_target(struct hif_softc *scn) +{ + qdf_dma_addr_t target_pa; + + scn->vaddr_qmi_bypass = + (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, + FW_SHARED_MEM, + &target_pa); + if (!scn->vaddr_qmi_bypass) { + hif_err("Memory allocation failed could not post target buf"); + return; + } + + scn->paddr_qmi_bypass = target_pa; + hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); +} + +/** + * hif_cleanup_static_buf_to_target() - clean up static buffer to WLAN FW + * @scn: pointer to HIF structure + * + * + * Return: void + */ +void hif_cleanup_static_buf_to_target(struct hif_softc *scn) +{ + void *target_va = scn->vaddr_qmi_bypass; + phys_addr_t target_pa = scn->paddr_qmi_bypass; + + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + FW_SHARED_MEM, target_va, + target_pa, 0); + hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0); +} +#endif + +#else +static inline void hif_post_static_buf_to_target(struct hif_softc *scn) +{ +} + +void hif_cleanup_static_buf_to_target(struct hif_softc *scn) +{ +} +#endif + +static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, + bool wait_for_it) +{ + /* todo */ + return 0; +} + +/** + * hif_config_ce() - configure copy engines + * @scn: hif context + * + * Prepares fw, copy engine hardware and host sw according + * to the attributes selected by hif_ce_prepare_config. + * + * also calls athdiag_procfs_init + * + * return: 0 for success nonzero for failure. + */ +int hif_config_ce(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct HIF_CE_pipe_info *pipe_info; + int pipe_num; + struct CE_state *ce_state = NULL; + +#ifdef ADRASTEA_SHADOW_REGISTERS + int i; +#endif + QDF_STATUS rv = QDF_STATUS_SUCCESS; + + scn->notice_send = true; + scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; + + hif_post_static_buf_to_target(scn); + + hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; + + hif_config_rri_on_ddr(scn); + + if (ce_srng_based(scn)) + scn->bus_ops.hif_target_sleep_state_adjust = + &hif_srng_sleep_state_adjust; + + /* Initialise the CE debug history sysfs interface inputs ce_id and + * index. Disable data storing + */ + reset_ce_debug_history(scn); + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct CE_attr *attr; + + pipe_info = &hif_state->pipe_info[pipe_num]; + pipe_info->pipe_num = pipe_num; + pipe_info->HIF_CE_state = hif_state; + attr = &hif_state->host_ce_config[pipe_num]; + + pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); + ce_state = scn->ce_id_to_state[pipe_num]; + if (!ce_state) { + A_TARGET_ACCESS_UNLIKELY(scn); + goto err; + } + qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); + QDF_ASSERT(pipe_info->ce_hdl); + if (!pipe_info->ce_hdl) { + rv = QDF_STATUS_E_FAILURE; + A_TARGET_ACCESS_UNLIKELY(scn); + goto err; + } + + ce_state->lro_data = qdf_lro_init(); + + if (attr->flags & CE_ATTR_DIAG) { + /* Reserve the ultimate CE for + * Diagnostic Window support + */ + hif_state->ce_diag = pipe_info->ce_hdl; + continue; + } + + if (hif_is_nss_wifi_enabled(scn) && ce_state && + (ce_state->htt_rx_data)) + continue; + + pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); + if (attr->dest_nentries > 0) { + atomic_set(&pipe_info->recv_bufs_needed, + init_buffer_count(attr->dest_nentries - 1)); + /*SRNG based CE has one entry less */ + if (ce_srng_based(scn)) + atomic_dec(&pipe_info->recv_bufs_needed); + } else { + atomic_set(&pipe_info->recv_bufs_needed, 0); + } + ce_tasklet_init(hif_state, (1 << pipe_num)); + ce_register_irq(hif_state, (1 << pipe_num)); + } + + if (athdiag_procfs_init(scn) != 0) { + A_TARGET_ACCESS_UNLIKELY(scn); + goto err; + } + scn->athdiag_procfs_inited = true; + + HIF_DBG("%s: ce_init done", __func__); + + init_tasklet_workers(hif_hdl); + + HIF_DBG("%s: X, ret = %d", __func__, rv); + +#ifdef ADRASTEA_SHADOW_REGISTERS + HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); + for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { + HIF_DBG("%s Shadow Register%d is mapped to address %x", + __func__, i, + (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); + } +#endif + + return rv != QDF_STATUS_SUCCESS; + +err: + /* Failure, so clean up */ + hif_unconfig_ce(scn); + HIF_TRACE("%s: X, ret = %d", __func__, rv); + return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; +} + +#ifdef IPA_OFFLOAD +/** + * hif_ce_ipa_get_ce_resource() - get uc resource on hif + * @scn: bus context + * @ce_sr_base_paddr: copyengine source ring base physical address + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * IPA micro controller data path offload feature enabled, + * HIF should release copy engine related resource information to IPA UC + * IPA UC will access hardware resource with released information + * + * Return: None + */ +void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct HIF_CE_pipe_info *pipe_info = + &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); + struct CE_handle *ce_hdl = pipe_info->ce_hdl; + + ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, + ce_reg_paddr); +} +#endif /* IPA_OFFLOAD */ + + +#ifdef ADRASTEA_SHADOW_REGISTERS + +/* + * Current shadow register config + * + * ----------------------------------------------------------- + * Shadow Register | CE | src/dst write index + * ----------------------------------------------------------- + * 0 | 0 | src + * 1 No Config - Doesn't point to anything + * 2 No Config - Doesn't point to anything + * 3 | 3 | src + * 4 | 4 | src + * 5 | 5 | src + * 6 No Config - Doesn't point to anything + * 7 | 7 | src + * 8 No Config - Doesn't point to anything + * 9 No Config - Doesn't point to anything + * 10 No Config - Doesn't point to anything + * 11 No Config - Doesn't point to anything + * ----------------------------------------------------------- + * 12 No Config - Doesn't point to anything + * 13 | 1 | dst + * 14 | 2 | dst + * 15 No Config - Doesn't point to anything + * 16 No Config - Doesn't point to anything + * 17 No Config - Doesn't point to anything + * 18 No Config - Doesn't point to anything + * 19 | 7 | dst + * 20 | 8 | dst + * 21 No Config - Doesn't point to anything + * 22 No Config - Doesn't point to anything + * 23 No Config - Doesn't point to anything + * ----------------------------------------------------------- + * + * + * ToDo - Move shadow register config to following in the future + * This helps free up a block of shadow registers towards the end. + * Can be used for other purposes + * + * ----------------------------------------------------------- + * Shadow Register | CE | src/dst write index + * ----------------------------------------------------------- + * 0 | 0 | src + * 1 | 3 | src + * 2 | 4 | src + * 3 | 5 | src + * 4 | 7 | src + * ----------------------------------------------------------- + * 5 | 1 | dst + * 6 | 2 | dst + * 7 | 7 | dst + * 8 | 8 | dst + * ----------------------------------------------------------- + * 9 No Config - Doesn't point to anything + * 12 No Config - Doesn't point to anything + * 13 No Config - Doesn't point to anything + * 14 No Config - Doesn't point to anything + * 15 No Config - Doesn't point to anything + * 16 No Config - Doesn't point to anything + * 17 No Config - Doesn't point to anything + * 18 No Config - Doesn't point to anything + * 19 No Config - Doesn't point to anything + * 20 No Config - Doesn't point to anything + * 21 No Config - Doesn't point to anything + * 22 No Config - Doesn't point to anything + * 23 No Config - Doesn't point to anything + * ----------------------------------------------------------- +*/ +#ifndef QCN7605_SUPPORT +u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 0: + addr = SHADOW_VALUE0; + break; + case 3: + addr = SHADOW_VALUE3; + break; + case 4: + addr = SHADOW_VALUE4; + break; + case 5: + addr = SHADOW_VALUE5; + break; + case 7: + addr = SHADOW_VALUE7; + break; + default: + HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); + QDF_ASSERT(0); + } + return addr; + +} + +u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 1: + addr = SHADOW_VALUE13; + break; + case 2: + addr = SHADOW_VALUE14; + break; + case 5: + addr = SHADOW_VALUE17; + break; + case 7: + addr = SHADOW_VALUE19; + break; + case 8: + addr = SHADOW_VALUE20; + break; + case 9: + addr = SHADOW_VALUE21; + break; + case 10: + addr = SHADOW_VALUE22; + break; + case 11: + addr = SHADOW_VALUE23; + break; + default: + HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); + QDF_ASSERT(0); + } + + return addr; + +} +#else +u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 0: + addr = SHADOW_VALUE0; + break; + case 4: + addr = SHADOW_VALUE4; + break; + case 5: + addr = SHADOW_VALUE5; + break; + default: + HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); + QDF_ASSERT(0); + } + return addr; +} + +u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 1: + addr = SHADOW_VALUE13; + break; + case 2: + addr = SHADOW_VALUE14; + break; + case 3: + addr = SHADOW_VALUE15; + break; + case 5: + addr = SHADOW_VALUE17; + break; + case 7: + addr = SHADOW_VALUE19; + break; + case 8: + addr = SHADOW_VALUE20; + break; + case 9: + addr = SHADOW_VALUE21; + break; + case 10: + addr = SHADOW_VALUE22; + break; + case 11: + addr = SHADOW_VALUE23; + break; + default: + HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); + QDF_ASSERT(0); + } + + return addr; +} +#endif +#endif + +#if defined(FEATURE_LRO) +void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) +{ + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + + ce_state = scn->ce_id_to_state[ctx_id]; + + return ce_state->lro_data; +} +#endif + +/** + * hif_map_service_to_pipe() - returns the ce ids pertaining to + * this service + * @scn: hif_softc pointer. + * @svc_id: Service ID for which the mapping is needed. + * @ul_pipe: address of the container in which ul pipe is returned. + * @dl_pipe: address of the container in which dl pipe is returned. + * @ul_is_polled: address of the container in which a bool + * indicating if the UL CE for this service + * is polled is returned. + * @dl_is_polled: address of the container in which a bool + * indicating if the DL CE for this service + * is polled is returned. + * + * Return: Indicates whether the service has been found in the table. + * Upon return, ul_is_polled is updated only if ul_pipe is updated. + * There will be warning logs if either leg has not been updated + * because it missed the entry in the table (but this is not an err). + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, + int *dl_is_polled) +{ + int status = QDF_STATUS_E_INVAL; + unsigned int i; + struct service_to_pipe element; + struct service_to_pipe *tgt_svc_map_to_use; + uint32_t sz_tgt_svc_map_to_use; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + bool dl_updated = false; + bool ul_updated = false; + + hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, + &sz_tgt_svc_map_to_use); + + *dl_is_polled = 0; /* polling for received messages not supported */ + + for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { + + memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); + if (element.service_id == svc_id) { + if (element.pipedir == PIPEDIR_OUT) { + *ul_pipe = element.pipenum; + *ul_is_polled = + (hif_state->host_ce_config[*ul_pipe].flags & + CE_ATTR_DISABLE_INTR) != 0; + ul_updated = true; + } else if (element.pipedir == PIPEDIR_IN) { + *dl_pipe = element.pipenum; + dl_updated = true; + } + status = QDF_STATUS_SUCCESS; + } + } + if (ul_updated == false) + HIF_DBG("ul pipe is NOT updated for service %d", svc_id); + if (dl_updated == false) + HIF_DBG("dl pipe is NOT updated for service %d", svc_id); + + return status; +} + +#ifdef SHADOW_REG_DEBUG +inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + uint32_t read_from_hw, srri_from_ddr = 0; + + read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); + + srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); + + if (read_from_hw != srri_from_ddr) { + HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", + __func__, srri_from_ddr, read_from_hw, + CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); + QDF_ASSERT(0); + } + return srri_from_ddr; +} + + +inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + uint32_t read_from_hw, drri_from_ddr = 0; + + read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); + + drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); + + if (read_from_hw != drri_from_ddr) { + HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", + drri_from_ddr, read_from_hw, + CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); + QDF_ASSERT(0); + } + return drri_from_ddr; +} + +#endif + +/** + * hif_dump_ce_registers() - dump ce registers + * @scn: hif_opaque_softc pointer. + * + * Output the copy engine registers + * + * Return: 0 for success or error code + */ +int hif_dump_ce_registers(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + uint32_t ce_reg_address = CE0_BASE_ADDRESS; + uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; + uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; + uint16_t i; + QDF_STATUS status; + + for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { + if (!scn->ce_id_to_state[i]) { + HIF_DBG("CE%d not used.", i); + continue; + } + + status = hif_diag_read_mem(hif_hdl, ce_reg_address, + (uint8_t *) &ce_reg_values[0], + ce_reg_word_size * sizeof(uint32_t)); + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("Dumping CE register failed!"); + return -EACCES; + } + HIF_ERROR("CE%d=>\n", i); + qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, + (uint8_t *) &ce_reg_values[0], + ce_reg_word_size * sizeof(uint32_t)); + qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address + + SR_WR_INDEX_ADDRESS), + ce_reg_values[SR_WR_INDEX_ADDRESS/4]); + qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address + + CURRENT_SRRI_ADDRESS), + ce_reg_values[CURRENT_SRRI_ADDRESS/4]); + qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address + + DST_WR_INDEX_ADDRESS), + ce_reg_values[DST_WR_INDEX_ADDRESS/4]); + qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address + + CURRENT_DRRI_ADDRESS), + ce_reg_values[CURRENT_DRRI_ADDRESS/4]); + qdf_print("---"); + } + return 0; +} +qdf_export_symbol(hif_dump_ce_registers); +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT +struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, + struct hif_pipe_addl_info *hif_info, uint32_t pipe) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); + struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); + struct CE_handle *ce_hdl = pipe_info->ce_hdl; + struct CE_state *ce_state = (struct CE_state *)ce_hdl; + struct CE_ring_state *src_ring = ce_state->src_ring; + struct CE_ring_state *dest_ring = ce_state->dest_ring; + + if (src_ring) { + hif_info->ul_pipe.nentries = src_ring->nentries; + hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; + hif_info->ul_pipe.sw_index = src_ring->sw_index; + hif_info->ul_pipe.write_index = src_ring->write_index; + hif_info->ul_pipe.hw_index = src_ring->hw_index; + hif_info->ul_pipe.base_addr_CE_space = + src_ring->base_addr_CE_space; + hif_info->ul_pipe.base_addr_owner_space = + src_ring->base_addr_owner_space; + } + + + if (dest_ring) { + hif_info->dl_pipe.nentries = dest_ring->nentries; + hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; + hif_info->dl_pipe.sw_index = dest_ring->sw_index; + hif_info->dl_pipe.write_index = dest_ring->write_index; + hif_info->dl_pipe.hw_index = dest_ring->hw_index; + hif_info->dl_pipe.base_addr_CE_space = + dest_ring->base_addr_CE_space; + hif_info->dl_pipe.base_addr_owner_space = + dest_ring->base_addr_owner_space; + } + + hif_info->pci_mem = pci_resource_start(sc->pdev, 0); + hif_info->ctrl_addr = ce_state->ctrl_addr; + + return hif_info; +} +qdf_export_symbol(hif_get_addl_pipe_info); + +uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + + scn->nss_wifi_ol_mode = mode; + return 0; +} +qdf_export_symbol(hif_set_nss_wifiol_mode); +#endif + +void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + scn->hif_attribute = hif_attrib; +} + + +/* disable interrupts (only applicable for legacy copy engine currently */ +void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + + Q_TARGET_ACCESS_BEGIN(scn); + CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); + Q_TARGET_ACCESS_END(scn); +} +qdf_export_symbol(hif_disable_interrupt); + +/** + * hif_fw_event_handler() - hif fw event handler + * @hif_state: pointer to hif ce state structure + * + * Process fw events and raise HTC callback to process fw events. + * + * Return: none + */ +static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) +{ + struct hif_msg_callbacks *msg_callbacks = + &hif_state->msg_callbacks_current; + + if (!msg_callbacks->fwEventHandler) + return; + + msg_callbacks->fwEventHandler(msg_callbacks->Context, + QDF_STATUS_E_FAILURE); +} + +#ifndef QCA_WIFI_3_0 +/** + * hif_fw_interrupt_handler() - FW interrupt handler + * @irq: irq number + * @arg: the user pointer + * + * Called from the PCI interrupt handler when a + * firmware-generated interrupt to the Host. + * + * only registered for legacy ce devices + * + * Return: status of handled irq + */ +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) +{ + struct hif_softc *scn = arg; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + uint32_t fw_indicator_address, fw_indicator; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return ATH_ISR_NOSCHED; + + fw_indicator_address = hif_state->fw_indicator_address; + /* For sudden unplug this will return ~0 */ + fw_indicator = A_TARGET_READ(scn, fw_indicator_address); + + if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { + /* ACK: clear Target-side pending event */ + A_TARGET_WRITE(scn, fw_indicator_address, + fw_indicator & ~FW_IND_EVENT_PENDING); + if (Q_TARGET_ACCESS_END(scn) < 0) + return ATH_ISR_SCHED; + + if (hif_state->started) { + hif_fw_event_handler(hif_state); + } else { + /* + * Probable Target failure before we're prepared + * to handle it. Generally unexpected. + * fw_indicator used as bitmap, and defined as below: + * FW_IND_EVENT_PENDING 0x1 + * FW_IND_INITIALIZED 0x2 + * FW_IND_NEEDRECOVER 0x4 + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Early firmware event indicated 0x%x\n", + __func__, fw_indicator)); + } + } else { + if (Q_TARGET_ACCESS_END(scn) < 0) + return ATH_ISR_SCHED; + } + + return ATH_ISR_SCHED; +} +#else +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) +{ + return ATH_ISR_SCHED; +} +#endif /* #ifdef QCA_WIFI_3_0 */ + + +/** + * hif_wlan_disable(): call the platform driver to disable wlan + * @scn: HIF Context + * + * This function passes the con_mode to platform driver to disable + * wlan. + * + * Return: void + */ +void hif_wlan_disable(struct hif_softc *scn) +{ + enum pld_driver_mode mode; + uint32_t con_mode = hif_get_conparam(scn); + + if (scn->target_status == TARGET_STATUS_RESET) + return; + + if (QDF_GLOBAL_FTM_MODE == con_mode) + mode = PLD_FTM; + else if (QDF_IS_EPPING_ENABLED(con_mode)) + mode = PLD_EPPING; + else + mode = PLD_MISSION; + + pld_wlan_disable(scn->qdf_dev->dev, mode); +} + +int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) +{ + QDF_STATUS status; + uint8_t ul_pipe, dl_pipe; + int ul_is_polled, dl_is_polled; + + /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ + status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), + HTC_CTRL_RSVD_SVC, + &ul_pipe, &dl_pipe, + &ul_is_polled, &dl_is_polled); + if (status) { + HIF_ERROR("%s: failed to map pipe: %d", __func__, status); + return qdf_status_to_os_return(status); + } + + *ce_id = dl_pipe; + + return 0; +} + +#ifdef HIF_CE_LOG_INFO +/** + * ce_get_index_info(): Get CE index info + * @scn: HIF Context + * @ce_state: CE opaque handle + * @info: CE info + * + * Return: 0 for success and non zero for failure + */ +static +int ce_get_index_info(struct hif_softc *scn, void *ce_state, + struct ce_index *info) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_get_index_info(scn, ce_state, info); +} + +void hif_log_ce_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset) +{ + struct hang_event_info info = {0}; + static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) | + BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10); + uint8_t curr_index = 0; + uint8_t i; + uint16_t size; + + info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt); + info.active_grp_tasklet_cnt = + qdf_atomic_read(&scn->active_grp_tasklet_cnt); + + for (i = 0; i < scn->ce_count; i++) { + if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i]) + continue; + + if (ce_get_index_info(scn, scn->ce_id_to_state[i], + &info.ce_info[curr_index])) + continue; + + curr_index++; + } + + info.ce_count = curr_index; + size = sizeof(info) - + (CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index); + + if (*offset + size > QDF_WLAN_HANG_FW_OFFSET) + return; + + QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO, + size - QDF_HANG_EVENT_TLV_HDR_SIZE); + + qdf_mem_copy(data + *offset, &info, size); + *offset = *offset + size; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.h new file mode 100644 index 0000000000000000000000000000000000000000..d2875343644291a148ef480cb6b80d228bcde370 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.h @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_H__ +#define __CE_H__ + +#include "qdf_atomic.h" +#include "qdf_lock.h" +#include "hif_main.h" +#include "qdf_util.h" +#include "hif_exec.h" + +#ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE +#define DATA_CE_UPDATE_SWINDEX(x, scn, addr) \ + (x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr)) +#else +#define DATA_CE_UPDATE_SWINDEX(x, scn, addr) +#endif + +/* + * Number of times to check for any pending tx/rx completion on + * a copy engine, this count should be big enough. Once we hit + * this threashold we'll not check for any Tx/Rx comlpetion in same + * interrupt handling. Note that this threashold is only used for + * Rx interrupt processing, this can be used tor Tx as well if we + * suspect any infinite loop in checking for pending Tx completion. + */ +#define CE_TXRX_COMP_CHECK_THRESHOLD 20 + +#define CE_HTT_T2H_MSG 1 +#define CE_HTT_H2T_MSG 4 + +#define CE_OFFSET 0x00000400 +#define CE_USEFUL_SIZE 0x00000058 +#define CE_ALL_BITMAP 0xFFFF + +#define HIF_REQUESTED_EVENTS 20 +/** + * enum ce_id_type + * + * @ce_id_type: Copy engine ID + */ +enum ce_id_type { + CE_ID_0, + CE_ID_1, + CE_ID_2, + CE_ID_3, + CE_ID_4, + CE_ID_5, + CE_ID_6, + CE_ID_7, + CE_ID_8, + CE_ID_9, + CE_ID_10, + CE_ID_11, + CE_ID_MAX +}; + +/** + * enum ce_buckets + * + * @ce_buckets: CE tasklet time buckets + * @CE_BUCKET_500_US: tasklet bucket to store 0-0.5ms + * @CE_BUCKET_1_MS: tasklet bucket to store 0.5-1ms + * @CE_BUCKET_2_MS: tasklet bucket to store 1-2ms + * @CE_BUCKET_5_MS: tasklet bucket to store 2-5ms + * @CE_BUCKET_10_MS: tasklet bucket to store 5-10ms + * @CE_BUCKET_BEYOND: tasklet bucket to store > 10ms + * @CE_BUCKET_MAX: enum max value + */ +#ifdef CE_TASKLET_DEBUG_ENABLE +enum ce_buckets { + CE_BUCKET_500_US, + CE_BUCKET_1_MS, + CE_BUCKET_2_MS, + CE_BUCKET_5_MS, + CE_BUCKET_10_MS, + CE_BUCKET_BEYOND, + CE_BUCKET_MAX, +}; +#endif + +enum ce_target_type { + CE_SVC_LEGACY, + CE_SVC_SRNG, + CE_MAX_TARGET_TYPE +}; + +enum ol_ath_hif_pkt_ecodes { + HIF_PIPE_NO_RESOURCE = 0 +}; + +struct HIF_CE_state; + +/* Per-pipe state. */ +struct HIF_CE_pipe_info { + /* Handle of underlying Copy Engine */ + struct CE_handle *ce_hdl; + + /* Our pipe number; facilitiates use of pipe_info ptrs. */ + uint8_t pipe_num; + + /* Convenience back pointer to HIF_CE_state. */ + struct HIF_CE_state *HIF_CE_state; + + /* Instantaneous number of receive buffers that should be posted */ + atomic_t recv_bufs_needed; + qdf_size_t buf_sz; + qdf_spinlock_t recv_bufs_needed_lock; + + qdf_spinlock_t completion_freeq_lock; + /* Limit the number of outstanding send requests. */ + int num_sends_allowed; + + /* adding three counts for debugging ring buffer errors */ + uint32_t nbuf_alloc_err_count; + uint32_t nbuf_dma_err_count; + uint32_t nbuf_ce_enqueue_err_count; + struct hif_msg_callbacks pipe_callbacks; +}; + +/** + * struct ce_tasklet_entry + * + * @intr_tq: intr_tq + * @ce_id: ce_id + * @inited: inited + * @hif_ce_state: hif_ce_state + * @from_irq: from_irq + */ +struct ce_tasklet_entry { + struct tasklet_struct intr_tq; + enum ce_id_type ce_id; + bool inited; + void *hif_ce_state; +}; + +static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int + work_done) +{ + return true; +} + +extern struct hif_execution_ops tasklet_sched_ops; +extern struct hif_execution_ops napi_sched_ops; + +/** + * struct ce_stats + * + * @ce_per_cpu: Stats of the CEs running per CPU + * @record_index: Current index to store in time record + * @tasklet_sched_entry_ts: Timestamp when tasklet is scheduled + * @tasklet_exec_entry_ts: Timestamp when tasklet is started execuiton + * @tasklet_exec_time_record: Last N number of tasklets execution time + * @tasklet_sched_time_record: Last N number of tasklets scheduled time + * @ce_tasklet_exec_bucket: Tasklet execution time buckets + * @ce_tasklet_sched_bucket: Tasklet time in queue buckets + * @ce_tasklet_exec_last_update: Latest timestamp when bucket is updated + * @ce_tasklet_sched_last_update: Latest timestamp when bucket is updated + */ +struct ce_stats { + uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU]; +#ifdef CE_TASKLET_DEBUG_ENABLE + uint32_t record_index[CE_COUNT_MAX]; + uint64_t tasklet_sched_entry_ts[CE_COUNT_MAX]; + uint64_t tasklet_exec_entry_ts[CE_COUNT_MAX]; + uint64_t tasklet_exec_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS]; + uint64_t tasklet_sched_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS]; + uint64_t ce_tasklet_exec_bucket[CE_COUNT_MAX][CE_BUCKET_MAX]; + uint64_t ce_tasklet_sched_bucket[CE_COUNT_MAX][CE_BUCKET_MAX]; + uint64_t ce_tasklet_exec_last_update[CE_COUNT_MAX][CE_BUCKET_MAX]; + uint64_t ce_tasklet_sched_last_update[CE_COUNT_MAX][CE_BUCKET_MAX]; +#endif +}; + +struct HIF_CE_state { + struct hif_softc ol_sc; + bool started; + struct ce_tasklet_entry tasklets[CE_COUNT_MAX]; + struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP]; + uint32_t hif_num_extgroup; + qdf_spinlock_t keep_awake_lock; + qdf_spinlock_t irq_reg_lock; + unsigned int keep_awake_count; + bool verified_awake; + bool fake_sleep; + qdf_timer_t sleep_timer; + bool sleep_timer_init; + qdf_time_t sleep_ticks; + uint32_t ce_register_irq_done; + + struct CE_pipe_config *target_ce_config; + struct CE_attr *host_ce_config; + uint32_t target_ce_config_sz; + /* Per-pipe state. */ + struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX]; + /* to be activated after BMI_DONE */ + struct hif_msg_callbacks msg_callbacks_pending; + /* current msg callbacks in use */ + struct hif_msg_callbacks msg_callbacks_current; + + /* Target address used to signal a pending firmware event */ + uint32_t fw_indicator_address; + + /* Copy Engine used for Diagnostic Accesses */ + struct CE_handle *ce_diag; + struct ce_stats stats; + struct ce_ops *ce_services; +}; + +/* + * HIA Map Definition + */ +struct host_interest_area_t { + uint32_t hi_interconnect_state; + uint32_t hi_early_alloc; + uint32_t hi_option_flag2; + uint32_t hi_board_data; + uint32_t hi_board_data_initialized; + uint32_t hi_failure_state; + uint32_t hi_rddi_msi_num; + uint32_t hi_pcie_perst_couple_en; + uint32_t hi_sw_protocol_version; +}; + +struct shadow_reg_cfg { + uint16_t ce_id; + uint16_t reg_offset; +}; + +struct shadow_reg_v2_cfg { + uint32_t reg_value; +}; + +#ifdef CONFIG_BYPASS_QMI + +#define FW_SHARED_MEM (2 * 1024 * 1024) + +#ifdef QCN7605_SUPPORT +struct msi_cfg { + u16 ce_id; + u16 msi_vector; +} qdf_packed; + +struct ce_info { + u32 rri_over_ddr_low_paddr; + u32 rri_over_ddr_high_paddr; + struct msi_cfg cfg[CE_COUNT_MAX]; +} qdf_packed; +#endif +#endif + +/** + * struct ce_index + * + * @id: CE id + * @sw_index: sw index + * @write_index: write index + * @hp: ring head pointer + * @tp: ring tail pointer + * @status_hp: status ring head pointer + * @status_tp: status ring tail pointer + */ +struct ce_index { + uint8_t id; + union { + struct { + uint16_t sw_index; + uint16_t write_index; + } legacy_info; + struct { + uint16_t hp; + uint16_t tp; + uint16_t status_hp; + uint16_t status_tp; + } srng_info; + } u; +} qdf_packed; + +/** + * struct hang_event_info + * + * @tlv_header: tlv header + * @active_tasklet_count: active tasklet count + * @active_grp_tasklet_cnt: active grp tasklet count + * @ce_info: CE info + */ +struct hang_event_info { + uint16_t tlv_header; + uint8_t active_tasklet_count; + uint8_t active_grp_tasklet_cnt; + uint8_t ce_count; + struct ce_index ce_info[CE_COUNT_MAX]; +} qdf_packed; + +void hif_ce_stop(struct hif_softc *scn); +int hif_dump_ce_registers(struct hif_softc *scn); +void +hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, + uint32_t address, uint32_t size); + +#ifdef IPA_OFFLOAD +void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); +#else +static inline +void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ +} + +#endif +int hif_wlan_enable(struct hif_softc *scn); +void ce_enable_polling(void *cestate); +void ce_disable_polling(void *cestate); +void hif_wlan_disable(struct hif_softc *scn); +void hif_get_target_ce_config(struct hif_softc *scn, + struct CE_pipe_config **target_ce_config_ret, + uint32_t *target_ce_config_sz_ret, + struct service_to_pipe **target_service_to_ce_map_ret, + uint32_t *target_service_to_ce_map_sz_ret, + struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret, + uint32_t *shadow_cfg_v1_sz_ret); + +#ifdef WLAN_FEATURE_EPPING +void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state); +void hif_select_epping_service_to_pipe_map(struct service_to_pipe + **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use); + +#else +static inline +void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) +{ } +static inline +void hif_select_epping_service_to_pipe_map(struct service_to_pipe + **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ } +#endif + +void ce_service_register_module(enum ce_target_type target_type, + struct ce_ops* (*ce_attach)(void)); + +#endif /* __CE_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_reg.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..cb12694f38d9e11a5f9ff8ec6346bf34423067ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_reg.h @@ -0,0 +1,563 @@ +/* + * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_REG_H__ +#define __CE_REG_H__ + +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \ + - CE0_BASE_ADDRESS)/(CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) + +#define DST_WR_INDEX_ADDRESS (scn->target_ce_def->d_DST_WR_INDEX_ADDRESS) +#define SRC_WATERMARK_ADDRESS (scn->target_ce_def->d_SRC_WATERMARK_ADDRESS) +#define SRC_WATERMARK_LOW_MASK (scn->target_ce_def->d_SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_MASK (scn->target_ce_def->d_SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_MASK (scn->target_ce_def->d_DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_MASK (scn->target_ce_def->d_DST_WATERMARK_HIGH_MASK) +#define CURRENT_SRRI_ADDRESS (scn->target_ce_def->d_CURRENT_SRRI_ADDRESS) +#define CURRENT_DRRI_ADDRESS (scn->target_ce_def->d_CURRENT_DRRI_ADDRESS) + +#define SHADOW_VALUE0 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_0) +#define SHADOW_VALUE1 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_1) +#define SHADOW_VALUE2 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_2) +#define SHADOW_VALUE3 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_3) +#define SHADOW_VALUE4 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_4) +#define SHADOW_VALUE5 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_5) +#define SHADOW_VALUE6 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_6) +#define SHADOW_VALUE7 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_7) +#define SHADOW_VALUE8 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_8) +#define SHADOW_VALUE9 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_9) +#define SHADOW_VALUE10 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_10) +#define SHADOW_VALUE11 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_11) +#define SHADOW_VALUE12 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_12) +#define SHADOW_VALUE13 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_13) +#define SHADOW_VALUE14 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_14) +#define SHADOW_VALUE15 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_15) +#define SHADOW_VALUE16 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_16) +#define SHADOW_VALUE17 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_17) +#define SHADOW_VALUE18 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_18) +#define SHADOW_VALUE19 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_19) +#define SHADOW_VALUE20 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_20) +#define SHADOW_VALUE21 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_21) +#define SHADOW_VALUE22 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_22) +#define SHADOW_VALUE23 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_23) +#define SHADOW_ADDRESS0 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_0) +#define SHADOW_ADDRESS1 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_1) +#define SHADOW_ADDRESS2 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_2) +#define SHADOW_ADDRESS3 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_3) +#define SHADOW_ADDRESS4 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_4) +#define SHADOW_ADDRESS5 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_5) +#define SHADOW_ADDRESS6 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_6) +#define SHADOW_ADDRESS7 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_7) +#define SHADOW_ADDRESS8 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_8) +#define SHADOW_ADDRESS9 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_9) +#define SHADOW_ADDRESS10 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_10) +#define SHADOW_ADDRESS11 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_11) +#define SHADOW_ADDRESS12 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_12) +#define SHADOW_ADDRESS13 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_13) +#define SHADOW_ADDRESS14 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_14) +#define SHADOW_ADDRESS15 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_15) +#define SHADOW_ADDRESS16 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_16) +#define SHADOW_ADDRESS17 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_17) +#define SHADOW_ADDRESS18 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_18) +#define SHADOW_ADDRESS19 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_19) +#define SHADOW_ADDRESS20 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_20) +#define SHADOW_ADDRESS21 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_21) +#define SHADOW_ADDRESS22 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_22) +#define SHADOW_ADDRESS23 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_23) + +#define SHADOW_ADDRESS(i) \ + (SHADOW_ADDRESS0 + i*(SHADOW_ADDRESS1-SHADOW_ADDRESS0)) + +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK) +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_DST_RING_LOW_WATERMARK_MASK) +#define MISC_IS_ADDRESS (scn->target_ce_def->d_MISC_IS_ADDRESS) +#define HOST_IS_COPY_COMPLETE_MASK \ + (scn->target_ce_def->d_HOST_IS_COPY_COMPLETE_MASK) +#define CE_WRAPPER_BASE_ADDRESS (scn->target_ce_def->d_CE_WRAPPER_BASE_ADDRESS) +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS \ + (scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS) +#define CE_DDR_ADDRESS_FOR_RRI_LOW \ + (scn->target_ce_def->d_CE_DDR_ADDRESS_FOR_RRI_LOW) +#define CE_DDR_ADDRESS_FOR_RRI_HIGH \ + (scn->target_ce_def->d_CE_DDR_ADDRESS_FOR_RRI_HIGH) +#define HOST_IE_COPY_COMPLETE_MASK \ + (scn->target_ce_def->d_HOST_IE_COPY_COMPLETE_MASK) +#define SR_BA_ADDRESS (scn->target_ce_def->d_SR_BA_ADDRESS) +#define SR_BA_ADDRESS_HIGH (scn->target_ce_def->d_SR_BA_ADDRESS_HIGH) +#define SR_SIZE_ADDRESS (scn->target_ce_def->d_SR_SIZE_ADDRESS) +#define CE_CTRL1_ADDRESS (scn->target_ce_def->d_CE_CTRL1_ADDRESS) +#define CE_CTRL1_DMAX_LENGTH_MASK \ + (scn->target_ce_def->d_CE_CTRL1_DMAX_LENGTH_MASK) +#define DR_BA_ADDRESS (scn->target_ce_def->d_DR_BA_ADDRESS) +#define DR_BA_ADDRESS_HIGH (scn->target_ce_def->d_DR_BA_ADDRESS_HIGH) +#define DR_SIZE_ADDRESS (scn->target_ce_def->d_DR_SIZE_ADDRESS) +#define CE_CMD_REGISTER (scn->target_ce_def->d_CE_CMD_REGISTER) +#define CE_MSI_ADDRESS (scn->target_ce_def->d_CE_MSI_ADDRESS) +#define CE_MSI_ADDRESS_HIGH (scn->target_ce_def->d_CE_MSI_ADDRESS_HIGH) +#define CE_MSI_DATA (scn->target_ce_def->d_CE_MSI_DATA) +#define CE_MSI_ENABLE_BIT (scn->target_ce_def->d_CE_MSI_ENABLE_BIT) +#define MISC_IE_ADDRESS (scn->target_ce_def->d_MISC_IE_ADDRESS) +#define MISC_IS_AXI_ERR_MASK (scn->target_ce_def->d_MISC_IS_AXI_ERR_MASK) +#define MISC_IS_DST_ADDR_ERR_MASK \ + (scn->target_ce_def->d_MISC_IS_DST_ADDR_ERR_MASK) +#define MISC_IS_SRC_LEN_ERR_MASK \ + (scn->target_ce_def->d_MISC_IS_SRC_LEN_ERR_MASK) +#define MISC_IS_DST_MAX_LEN_VIO_MASK \ + (scn->target_ce_def->d_MISC_IS_DST_MAX_LEN_VIO_MASK) +#define MISC_IS_DST_RING_OVERFLOW_MASK \ + (scn->target_ce_def->d_MISC_IS_DST_RING_OVERFLOW_MASK) +#define MISC_IS_SRC_RING_OVERFLOW_MASK \ + (scn->target_ce_def->d_MISC_IS_SRC_RING_OVERFLOW_MASK) +#define SRC_WATERMARK_LOW_LSB (scn->target_ce_def->d_SRC_WATERMARK_LOW_LSB) +#define SRC_WATERMARK_HIGH_LSB (scn->target_ce_def->d_SRC_WATERMARK_HIGH_LSB) +#define DST_WATERMARK_LOW_LSB (scn->target_ce_def->d_DST_WATERMARK_LOW_LSB) +#define DST_WATERMARK_HIGH_LSB (scn->target_ce_def->d_DST_WATERMARK_HIGH_LSB) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + (scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + (scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_LSB \ + (scn->target_ce_def->d_CE_CTRL1_DMAX_LENGTH_LSB) +#define CE_CTRL1_IDX_UPD_EN (scn->target_ce_def->d_CE_CTRL1_IDX_UPD_EN_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK \ + (scn->target_ce_def->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK \ + (scn->target_ce_def->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \ + (scn->target_ce_def->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \ + (scn->target_ce_def->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) +#define WLAN_DEBUG_INPUT_SEL_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_OFFSET) +#define WLAN_DEBUG_INPUT_SEL_SRC_MSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_LSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_MASK \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_OFFSET (scn->targetdef->d_WLAN_DEBUG_CONTROL_OFFSET) +#define WLAN_DEBUG_CONTROL_ENABLE_MSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MSB) +#define WLAN_DEBUG_CONTROL_ENABLE_LSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_MASK \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_OFFSET (scn->targetdef->d_WLAN_DEBUG_OUT_OFFSET) +#define WLAN_DEBUG_OUT_DATA_MSB (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MSB) +#define WLAN_DEBUG_OUT_DATA_LSB (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_MASK (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_OFFSET (scn->targetdef->d_AMBA_DEBUG_BUS_OFFSET) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_MSB (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MSB) +#define AMBA_DEBUG_BUS_SEL_LSB (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_MASK (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_OFFSET \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_OFFSET) +#define CE_WRAPPER_DEBUG_SEL_MSB \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_MSB) +#define CE_WRAPPER_DEBUG_SEL_LSB \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_MASK \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_OFFSET (scn->target_ce_def->d_CE_DEBUG_OFFSET) +#define CE_DEBUG_SEL_MSB (scn->target_ce_def->d_CE_DEBUG_SEL_MSB) +#define CE_DEBUG_SEL_LSB (scn->target_ce_def->d_CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_MASK (scn->target_ce_def->d_CE_DEBUG_SEL_MASK) +#define HOST_IE_ADDRESS (scn->target_ce_def->d_HOST_IE_ADDRESS) +#define HOST_IE_REG1_CE_LSB (scn->target_ce_def->d_HOST_IE_REG1_CE_LSB) +#define HOST_IE_ADDRESS_2 (scn->target_ce_def->d_HOST_IE_ADDRESS_2) +#define HOST_IE_REG2_CE_LSB (scn->target_ce_def->d_HOST_IE_REG2_CE_LSB) +#define HOST_IE_ADDRESS_3 (scn->target_ce_def->d_HOST_IE_ADDRESS_3) +#define HOST_IE_REG3_CE_LSB (scn->target_ce_def->d_HOST_IE_REG3_CE_LSB) +#define HOST_IS_ADDRESS (scn->target_ce_def->d_HOST_IS_ADDRESS) + +#define SRC_WATERMARK_LOW_SET(x) \ + (((x) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_SET(x) \ + (((x) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_SET(x) \ + (((x) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_SET(x) \ + (((x) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ + (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_SET(x) \ + (((x) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define WLAN_DEBUG_INPUT_SEL_SRC_GET(x) \ + (((x) & WLAN_DEBUG_INPUT_SEL_SRC_MASK) >> \ + WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_SET(x) \ + (((x) << WLAN_DEBUG_INPUT_SEL_SRC_LSB) & \ + WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_ENABLE_GET(x) \ + (((x) & WLAN_DEBUG_CONTROL_ENABLE_MASK) >> \ + WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_SET(x) \ + (((x) << WLAN_DEBUG_CONTROL_ENABLE_LSB) & \ + WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_DATA_GET(x) \ + (((x) & WLAN_DEBUG_OUT_DATA_MASK) >> WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_SET(x) \ + (((x) << WLAN_DEBUG_OUT_DATA_LSB) & WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_GET(x) \ + (((x) & AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) >> \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) & \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_GET(x) \ + (((x) & AMBA_DEBUG_BUS_SEL_MASK) >> AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_SEL_LSB) & AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_SEL_GET(x) \ + (((x) & CE_WRAPPER_DEBUG_SEL_MASK) >> CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_SET(x) \ + (((x) << CE_WRAPPER_DEBUG_SEL_LSB) & CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_SEL_GET(x) (((x) & CE_DEBUG_SEL_MASK) >> CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_SET(x) (((x) << CE_DEBUG_SEL_LSB) & CE_DEBUG_SEL_MASK) +#define HOST_IE_REG1_CE_BIT(_ce_id) (1 << (_ce_id + HOST_IE_REG1_CE_LSB)) +#define HOST_IE_REG2_CE_BIT(_ce_id) (1 << (_ce_id + HOST_IE_REG2_CE_LSB)) +#define HOST_IE_REG3_CE_BIT(_ce_id) (1 << (_ce_id + HOST_IE_REG3_CE_LSB)) + +uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr); +uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr); + +#define BITS0_TO_31(val) ((uint32_t)((uint64_t)(val)\ + & (uint64_t)(0xFFFFFFFF))) +#define BITS32_TO_35(val) ((uint32_t)(((uint64_t)(val)\ + & (uint64_t)(0xF00000000))>>32)) + +#define VADDR_FOR_CE(scn, CE_ctrl_addr)\ + ((scn->vaddr_rri_on_ddr) + COPY_ENGINE_ID(CE_ctrl_addr)) + +#define SRRI_FROM_DDR_ADDR(addr) ((*(addr)) & 0xFFFF) +#define DRRI_FROM_DDR_ADDR(addr) (((*(addr))>>16) & 0xFFFF) + +#define CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS) +#define CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS) + +#ifdef ADRASTEA_RRI_ON_DDR +#ifdef SHADOW_REG_DEBUG +#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + DEBUG_CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr) +#define CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + DEBUG_CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr) +#else +#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)) +#define CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)) +#endif + +unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr); +unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr); + +#define CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr)\ + hif_get_src_ring_read_index(scn, CE_ctrl_addr) +#define CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)\ + hif_get_dst_ring_read_index(scn, CE_ctrl_addr) +#else +#define CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr) \ + CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) +#define CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)\ + CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) + +/** + * if RRI on DDR is not enabled, get idx from ddr defaults to + * using the register value & force wake must be used for + * non interrupt processing. + */ +#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS) +#endif + +#define CE_SRC_RING_BASE_ADDR_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_BA_ADDRESS, (addr)) + +#define CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_BA_ADDRESS_HIGH, (addr)) + +#define CE_SRC_RING_BASE_ADDR_HIGH_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + SR_BA_ADDRESS_HIGH) + +#define CE_SRC_RING_SZ_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_SIZE_ADDRESS, (n)) + +#define CE_SRC_RING_DMAX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, (CE_ctrl_addr) + \ + CE_CTRL1_ADDRESS) & ~CE_CTRL1_DMAX_LENGTH_MASK) | \ + CE_CTRL1_DMAX_LENGTH_SET(n)) + +#define CE_IDX_UPD_EN_SET(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \ + | CE_CTRL1_IDX_UPD_EN)) + +#define CE_CMD_REGISTER_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CMD_REGISTER) + +#define CE_CMD_REGISTER_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CMD_REGISTER, n) + +#define CE_MSI_ADDR_LOW_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_ADDRESS, (addr)) + +#define CE_MSI_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_ADDRESS_HIGH, (addr)) + +#define CE_MSI_DATA_SET(scn, CE_ctrl_addr, data) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_DATA, (data)) + +#define CE_CTRL_REGISTER1_SET(scn, CE_ctrl_addr, val) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, val) + +#define CE_CTRL_REGISTER1_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS) + +#define CE_SRC_RING_BYTE_SWAP_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \ + & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)) + +#define CE_DEST_RING_BYTE_SWAP_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr)+CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \ + & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)) + + +#define CE_DEST_RING_BASE_ADDR_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_BA_ADDRESS, (addr)) + +#define CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_BA_ADDRESS_HIGH, (addr)) + +#define CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + DR_BA_ADDRESS_HIGH) + +#define CE_DEST_RING_SZ_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_SIZE_ADDRESS, (n)) + +#define CE_SRC_RING_HIGHMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS) \ + & ~SRC_WATERMARK_HIGH_MASK) | \ + SRC_WATERMARK_HIGH_SET(n)) + +#define CE_SRC_RING_LOWMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS) \ + & ~SRC_WATERMARK_LOW_MASK) | \ + SRC_WATERMARK_LOW_SET(n)) + +#define CE_DEST_RING_HIGHMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + DST_WATERMARK_ADDRESS) \ + & ~DST_WATERMARK_HIGH_MASK) | \ + DST_WATERMARK_HIGH_SET(n)) + +#define CE_DEST_RING_LOWMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + DST_WATERMARK_ADDRESS) \ + & ~DST_WATERMARK_LOW_MASK) | \ + DST_WATERMARK_LOW_SET(n)) + +#define CE_COPY_COMPLETE_INTR_ENABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) | \ + HOST_IE_COPY_COMPLETE_MASK) + +#define CE_COPY_COMPLETE_INTR_DISABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) \ + & ~HOST_IE_COPY_COMPLETE_MASK) + +#define CE_BASE_ADDRESS(CE_id) \ + CE0_BASE_ADDRESS + ((CE1_BASE_ADDRESS - \ + CE0_BASE_ADDRESS)*(CE_id)) + +#define CE_WATERMARK_INTR_ENABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) | \ + CE_WATERMARK_MASK) + +#define CE_WATERMARK_INTR_DISABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) \ + & ~CE_WATERMARK_MASK) + +#define CE_ERROR_INTR_ENABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + MISC_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + MISC_IE_ADDRESS) | CE_ERROR_MASK) + +#define CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + MISC_IS_ADDRESS) + +#define CE_ENGINE_INT_STATUS_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + HOST_IS_ADDRESS) + +#define CE_ENGINE_INT_STATUS_CLEAR(scn, CE_ctrl_addr, mask) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IS_ADDRESS, (mask)) + +#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ + HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ + HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ + HOST_IS_DST_RING_HIGH_WATERMARK_MASK) + +#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ + MISC_IS_DST_ADDR_ERR_MASK | \ + MISC_IS_SRC_LEN_ERR_MASK | \ + MISC_IS_DST_MAX_LEN_VIO_MASK | \ + MISC_IS_DST_RING_OVERFLOW_MASK | \ + MISC_IS_SRC_RING_OVERFLOW_MASK) + +#define CE_SRC_RING_TO_DESC(baddr, idx) \ + (&(((struct CE_src_desc *)baddr)[idx])) +#define CE_DEST_RING_TO_DESC(baddr, idx) \ + (&(((struct CE_dest_desc *)baddr)[idx])) + +/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ +#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ + (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) + +#define CE_RING_IDX_INCR(nentries_mask, idx) \ + (((idx) + 1) & (nentries_mask)) + +#define CE_RING_IDX_ADD(nentries_mask, idx, num) \ + (((idx) + (num)) & (nentries_mask)) + +#define CE_INTERRUPT_SUMMARY(scn) \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ + A_TARGET_READ(scn, CE_WRAPPER_BASE_ADDRESS + \ + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) + +#define READ_CE_DDR_ADDRESS_FOR_RRI_LOW(scn) \ + (A_TARGET_READ(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_LOW)) + +#define READ_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn) \ + (A_TARGET_READ(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_HIGH)) + +#define WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, val) \ + (A_TARGET_WRITE(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_LOW, \ + val)) + +#define WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, val) \ + (A_TARGET_WRITE(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_HIGH, \ + val)) + +/*Macro to increment CE packet errors*/ +#define OL_ATH_CE_PKT_ERROR_COUNT_INCR(_scn, _ce_ecode) \ + do { if (_ce_ecode == CE_RING_DELTA_FAIL) \ + (_scn->pkt_stats.ce_ring_delta_fail_count) \ + += 1; } while (0) + +/* Given a Copy Engine's ID, determine the interrupt number for that + * copy engine's interrupts. + */ +#define CE_ID_TO_INUM(id) (A_INUM_CE0_COPY_COMP_BASE + (id)) +#define CE_INUM_TO_ID(inum) ((inum) - A_INUM_CE0_COPY_COMP_BASE) +#define CE0_BASE_ADDRESS (scn->target_ce_def->d_CE0_BASE_ADDRESS) +#define CE1_BASE_ADDRESS (scn->target_ce_def->d_CE1_BASE_ADDRESS) + + +#ifdef ADRASTEA_SHADOW_REGISTERS +#define NUM_SHADOW_REGISTERS 24 +u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr); +u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr); +#endif + + +#ifdef ADRASTEA_SHADOW_REGISTERS +#define CE_SRC_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, shadow_sr_wr_ind_addr(scn, CE_ctrl_addr), n) +#define CE_DEST_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, shadow_dst_wr_ind_addr(scn, CE_ctrl_addr), n) + +#else + +#define CE_SRC_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_WR_INDEX_ADDRESS, (n)) +#define CE_DEST_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WR_INDEX_ADDRESS, (n)) +#endif + +/* The write index read is only needed durring initialization because + * we keep track of the index that was last written. Thus the register + * is the only hardware supported location to read the initial value from. + */ +#define CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + SR_WR_INDEX_ADDRESS) +#define CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + DST_WR_INDEX_ADDRESS) + +#endif /* __CE_REG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c new file mode 100644 index 0000000000000000000000000000000000000000..4494e23d2fe1408cfeafbfa31d6ecd90326962f0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c @@ -0,0 +1,1959 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_io32.h" +#include "ce_api.h" +#include "ce_main.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "qdf_lock.h" +#include "regtable.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_napi.h" +#include "qdf_module.h" + +#ifdef IPA_OFFLOAD +#ifdef QCA_WIFI_3_0 +#define CE_IPA_RING_INIT(ce_desc) \ + do { \ + ce_desc->gather = 0; \ + ce_desc->enable_11h = 0; \ + ce_desc->meta_data_low = 0; \ + ce_desc->packet_result_offset = 64; \ + ce_desc->toeplitz_hash_enable = 0; \ + ce_desc->addr_y_search_disable = 0; \ + ce_desc->addr_x_search_disable = 0; \ + ce_desc->misc_int_disable = 0; \ + ce_desc->target_int_disable = 0; \ + ce_desc->host_int_disable = 0; \ + ce_desc->dest_byte_swap = 0; \ + ce_desc->byte_swap = 0; \ + ce_desc->type = 2; \ + ce_desc->tx_classify = 1; \ + ce_desc->buffer_addr_hi = 0; \ + ce_desc->meta_data = 0; \ + ce_desc->nbytes = 128; \ + } while (0) +#else +#define CE_IPA_RING_INIT(ce_desc) \ + do { \ + ce_desc->byte_swap = 0; \ + ce_desc->nbytes = 60; \ + ce_desc->gather = 0; \ + } while (0) +#endif /* QCA_WIFI_3_0 */ +#endif /* IPA_OFFLOAD */ + +static int war1_allow_sleep; +/* io32 write workaround */ +static int hif_ce_war1; + +/** + * hif_ce_war_disable() - disable ce war gobally + */ +void hif_ce_war_disable(void) +{ + hif_ce_war1 = 0; +} + +/** + * hif_ce_war_enable() - enable ce war gobally + */ +void hif_ce_war_enable(void) +{ + hif_ce_war1 = 1; +} + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) + +#define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1) +#define CE_DEBUG_DATA_PER_ROW 16 + +static const char *ce_event_type_to_str(enum hif_ce_event_type type); + +int get_next_record_index(qdf_atomic_t *table_index, int array_size) +{ + int record_index = qdf_atomic_inc_return(table_index); + + if (record_index == array_size) + qdf_atomic_sub(array_size, table_index); + + while (record_index >= array_size) + record_index -= array_size; + + return record_index; +} + +qdf_export_symbol(get_next_record_index); + +#ifdef HIF_CE_DEBUG_DATA_BUF +void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) +{ + uint8_t *data = NULL; + + if (!event->data) { + hif_err_rl("No ce debug memory allocated"); + return; + } + + if (event->memory && len > 0) + data = qdf_nbuf_data((qdf_nbuf_t)event->memory); + + event->actual_data_len = 0; + qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE); + + if (data && len > 0) { + qdf_mem_copy(event->data, data, + ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ? + len : CE_DEBUG_MAX_DATA_BUF_SIZE)); + event->actual_data_len = len; + } +} + +qdf_export_symbol(hif_ce_desc_data_record); + +void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) +{ + qdf_mem_zero(event, + offsetof(struct hif_ce_desc_event, data)); +} + +qdf_export_symbol(hif_clear_ce_desc_debug_data); +#else +void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event) +{ + qdf_mem_zero(event, sizeof(struct hif_ce_desc_event)); +} + +qdf_export_symbol(hif_clear_ce_desc_debug_data); +#endif /* HIF_CE_DEBUG_DATA_BUF */ + +#if defined(HIF_RECORD_PADDR) +void hif_ce_desc_record_rx_paddr(struct hif_softc *scn, + struct hif_ce_desc_event *event, + qdf_nbuf_t memory) +{ + if (memory) { + event->dma_addr = QDF_NBUF_CB_PADDR(memory); + event->dma_to_phy = qdf_mem_paddr_from_dmaaddr( + scn->qdf_dev, + event->dma_addr); + + event->virt_to_phy = + virt_to_phys(qdf_nbuf_data(memory)); + } +} +#endif /* HIF_RECORD_RX_PADDR */ + +/** + * hif_record_ce_desc_event() - record ce descriptor events + * @scn: hif_softc + * @ce_id: which ce is the event occurring on + * @type: what happened + * @descriptor: pointer to the descriptor posted/completed + * @memory: virtual address of buffer related to the descriptor + * @index: index that the descriptor was/will be at. + */ +void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_desc *descriptor, + void *memory, int index, + int len) +{ + int record_index; + struct hif_ce_desc_event *event; + + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + struct hif_ce_desc_event *hist_ev = NULL; + + if (ce_id < CE_COUNT_MAX) + hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; + else + return; + + if (ce_id >= CE_COUNT_MAX) + return; + + if (!ce_hist->enable[ce_id]) + return; + + if (!hist_ev) + return; + + record_index = get_next_record_index( + &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); + + event = &hist_ev[record_index]; + + hif_clear_ce_desc_debug_data(event); + + event->type = type; + event->time = qdf_get_log_timestamp(); + event->cpu_id = qdf_get_cpu(); + + if (descriptor) + qdf_mem_copy(&event->descriptor, descriptor, + sizeof(union ce_desc)); + + event->memory = memory; + event->index = index; + + if (event->type == HIF_RX_DESC_POST || + event->type == HIF_RX_DESC_COMPLETION) + hif_ce_desc_record_rx_paddr(scn, event, memory); + + if (ce_hist->data_enable[ce_id]) + hif_ce_desc_data_record(event, len); +} +qdf_export_symbol(hif_record_ce_desc_event); + +/** + * ce_init_ce_desc_event_log() - initialize the ce event log + * @ce_id: copy engine id for which we are initializing the log + * @size: size of array to dedicate + * + * Currently the passed size is ignored in favor of a precompiled value. + */ +void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + qdf_atomic_init(&ce_hist->history_index[ce_id]); + qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]); +} + +/** + * ce_deinit_ce_desc_event_log() - deinitialize the ce event log + * @ce_id: copy engine id for which we are deinitializing the log + * + */ +inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + + qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]); +} + +#else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ +void hif_record_ce_desc_event(struct hif_softc *scn, + int ce_id, enum hif_ce_event_type type, + union ce_desc *descriptor, void *memory, + int index, int len) +{ +} +qdf_export_symbol(hif_record_ce_desc_event); + +inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, + int size) +{ +} + +void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) +{ +} +#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ + +#ifdef NAPI_YIELD_BUDGET_BASED +bool hif_ce_service_should_yield(struct hif_softc *scn, + struct CE_state *ce_state) +{ + bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count); + + /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes + * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This + * can happen in fast path handling as processing is happenning in + * batches. + */ + if (yield) + ce_state->receive_count = MAX_NUM_OF_RECEIVES; + + return yield; +} +#else +/** + * hif_ce_service_should_yield() - return true if the service is hogging the cpu + * @scn: hif context + * @ce_state: context of the copy engine being serviced + * + * Return: true if the service should yield + */ +bool hif_ce_service_should_yield(struct hif_softc *scn, + struct CE_state *ce_state) +{ + bool yield, time_limit_reached, rxpkt_thresh_reached = 0; + + time_limit_reached = + sched_clock() > ce_state->ce_service_yield_time ? 1 : 0; + + if (!time_limit_reached) + rxpkt_thresh_reached = hif_max_num_receives_reached + (scn, ce_state->receive_count); + + /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes + * beyond MAX_NUM_OF_RECEIVES for NAPI backet calulation issue. This + * can happen in fast path handling as processing is happenning in + * batches. + */ + if (rxpkt_thresh_reached) + ce_state->receive_count = MAX_NUM_OF_RECEIVES; + + yield = time_limit_reached || rxpkt_thresh_reached; + + if (yield && + ce_state->htt_rx_data && + hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) { + hif_napi_update_yield_stats(ce_state, + time_limit_reached, + rxpkt_thresh_reached); + } + + return yield; +} +qdf_export_symbol(hif_ce_service_should_yield); +#endif + +/* + * Guts of ce_send, used by both ce_send and ce_sendlist_send. + * The caller takes responsibility for any needed locking. + */ + +void war_ce_src_ring_write_idx_set(struct hif_softc *scn, + u32 ctrl_addr, unsigned int write_index) +{ + if (hif_ce_war1) { + void __iomem *indicator_addr; + + indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS; + + if (!war1_allow_sleep + && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) { + hif_write32_mb(scn, indicator_addr, + (CDC_WAR_MAGIC_STR | write_index)); + } else { + unsigned long irq_flags; + + local_irq_save(irq_flags); + hif_write32_mb(scn, indicator_addr, 1); + + /* + * PCIE write waits for ACK in IPQ8K, there is no + * need to read back value. + */ + (void)hif_read32_mb(scn, indicator_addr); + /* conservative */ + (void)hif_read32_mb(scn, indicator_addr); + + CE_SRC_RING_WRITE_IDX_SET(scn, + ctrl_addr, write_index); + + hif_write32_mb(scn, indicator_addr, 0); + local_irq_restore(irq_flags); + } + } else { + CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); + } +} + +qdf_export_symbol(war_ce_src_ring_write_idx_set); + +int +ce_send(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flag) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + int status; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + status = hif_state->ce_services->ce_send_nolock(copyeng, + per_transfer_context, buffer, nbytes, + transfer_id, flags, user_flag); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} +qdf_export_symbol(ce_send); + +unsigned int ce_sendlist_sizeof(void) +{ + return sizeof(struct ce_sendlist); +} + +void ce_sendlist_init(struct ce_sendlist *sendlist) +{ + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + + sl->num_items = 0; +} + +int +ce_sendlist_buf_add(struct ce_sendlist *sendlist, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t flags, + uint32_t user_flags) +{ + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + unsigned int num_items = sl->num_items; + struct ce_sendlist_item *item; + + if (num_items >= CE_SENDLIST_ITEMS_MAX) { + QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX); + return QDF_STATUS_E_RESOURCES; + } + + item = &sl->item[num_items]; + item->send_type = CE_SIMPLE_BUFFER_TYPE; + item->data = buffer; + item->u.nbytes = nbytes; + item->flags = flags; + item->user_flags = user_flags; + sl->num_items = num_items + 1; + return QDF_STATUS_SUCCESS; +} + +int +ce_sendlist_send(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_sendlist_send(copyeng, + per_transfer_context, sendlist, transfer_id); +} + +#ifndef AH_NEED_TX_DATA_SWAP +#define AH_NEED_TX_DATA_SWAP 0 +#endif + +/** + * ce_batch_send() - sends bunch of msdus at once + * @ce_tx_hdl : pointer to CE handle + * @msdu : list of msdus to be sent + * @transfer_id : transfer id + * @len : Downloaded length + * @sendhead : sendhead + * + * Assumption : Called with an array of MSDU's + * Function: + * For each msdu in the array + * 1. Send each msdu + * 2. Increment write index accordinlgy. + * + * Return: list of msds not sent + */ +qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len, uint32_t sendhead) +{ + struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; + struct hif_softc *scn = ce_state->scn; + struct CE_ring_state *src_ring = ce_state->src_ring; + u_int32_t ctrl_addr = ce_state->ctrl_addr; + /* A_target_id_t targid = TARGID(scn);*/ + + uint32_t nentries_mask = src_ring->nentries_mask; + uint32_t sw_index, write_index; + + struct CE_src_desc *src_desc_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + uint32_t *src_desc; + + struct CE_src_desc lsrc_desc = {0}; + int deltacount = 0; + qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext; + + DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1); + + while (msdu) { + tempnext = qdf_nbuf_next(msdu); + + if (deltacount < 2) { + if (sendhead) + return msdu; + HIF_ERROR("%s: Out of descriptors", __func__); + src_ring->write_index = write_index; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, + write_index); + + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + deltacount = CE_RING_DELTA(nentries_mask, write_index, + sw_index-1); + if (!freelist) { + freelist = msdu; + hfreelist = msdu; + } else { + qdf_nbuf_set_next(freelist, msdu); + freelist = msdu; + } + qdf_nbuf_set_next(msdu, NULL); + msdu = tempnext; + continue; + } + + src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, + write_index); + + src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); + + lsrc_desc.meta_data = transfer_id; + if (len > msdu->len) + len = msdu->len; + lsrc_desc.nbytes = len; + /* Data packet is a byte stream, so disable byte swap */ + lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; + lsrc_desc.gather = 0; /*For the last one, gather is not set*/ + + src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; + + + src_ring->per_transfer_context[write_index] = msdu; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + if (sendhead) + break; + qdf_nbuf_set_next(msdu, NULL); + msdu = tempnext; + + } + + + src_ring->write_index = write_index; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); + + return hfreelist; +} + +/** + * ce_update_tx_ring() - Advance sw index. + * @ce_tx_hdl : pointer to CE handle + * @num_htt_cmpls : htt completions received. + * + * Function: + * Increment the value of sw index of src ring + * according to number of htt completions + * received. + * + * Return: void + */ +#ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE +void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) +{ + struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; + struct CE_ring_state *src_ring = ce_state->src_ring; + uint32_t nentries_mask = src_ring->nentries_mask; + /* + * Advance the s/w index: + * This effectively simulates completing the CE ring descriptors + */ + src_ring->sw_index = + CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index, + num_htt_cmpls); +} +#else +void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) +{} +#endif + +/** + * ce_send_single() - sends + * @ce_tx_hdl : pointer to CE handle + * @msdu : msdu to be sent + * @transfer_id : transfer id + * @len : Downloaded length + * + * Function: + * 1. Send one msdu + * 2. Increment write index of src ring accordinlgy. + * + * Return: QDF_STATUS: CE sent status + */ +QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len) +{ + struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; + struct hif_softc *scn = ce_state->scn; + struct CE_ring_state *src_ring = ce_state->src_ring; + uint32_t ctrl_addr = ce_state->ctrl_addr; + /*A_target_id_t targid = TARGID(scn);*/ + + uint32_t nentries_mask = src_ring->nentries_mask; + uint32_t sw_index, write_index; + + struct CE_src_desc *src_desc_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + uint32_t *src_desc; + + struct CE_src_desc lsrc_desc = {0}; + enum hif_ce_event_type event_type; + + DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, + sw_index-1) < 1)) { + /* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */ + HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask, + write_index, sw_index); + return QDF_STATUS_E_RESOURCES; + } + + src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index); + + src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); + + lsrc_desc.meta_data = transfer_id; + lsrc_desc.nbytes = len; + /* Data packet is a byte stream, so disable byte swap */ + lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; + lsrc_desc.gather = 0; /* For the last one, gather is not set */ + + src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; + + + src_ring->per_transfer_context[write_index] = msdu; + + if (((struct CE_src_desc *)src_desc)->gather) + event_type = HIF_TX_GATHER_DESC_POST; + else if (qdf_unlikely(ce_state->state != CE_RUNNING)) + event_type = HIF_TX_DESC_SOFTWARE_POST; + else + event_type = HIF_TX_DESC_POST; + + hif_record_ce_desc_event(scn, ce_state->id, event_type, + (union ce_desc *)src_desc, msdu, + write_index, len); + + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + src_ring->write_index = write_index; + + war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); + + return QDF_STATUS_SUCCESS; +} + +/** + * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine + * @coyeng: copy engine handle + * @per_recv_context: virtual address of the nbuf + * @buffer: physical address of the nbuf + * + * Return: 0 if the buffer is enqueued + */ +int +ce_recv_buf_enqueue(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_recv_buf_enqueue(copyeng, + per_recv_context, buffer); +} +qdf_export_symbol(ce_recv_buf_enqueue); + +void +ce_send_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries); + CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries); +} + +void +ce_recv_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, + low_alert_nentries); + CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, + high_alert_nentries); +} + +unsigned int ce_send_entries_avail(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index; + unsigned int write_index; + + qdf_spin_lock(&CE_state->ce_index_lock); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + qdf_spin_unlock(&CE_state->ce_index_lock); + + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} + +unsigned int ce_recv_entries_avail(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index; + unsigned int write_index; + + qdf_spin_lock(&CE_state->ce_index_lock); + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + qdf_spin_unlock(&CE_state->ce_index_lock); + + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} + +/* + * Guts of ce_completed_recv_next. + * The caller takes responsibility for any necessary locking. + */ +int +ce_completed_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, unsigned int *flagsp) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + int status; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct ce_ops *ce_services; + + ce_services = hif_state->ce_services; + qdf_spin_lock_bh(&CE_state->ce_index_lock); + status = + ce_services->ce_completed_recv_next_nolock(CE_state, + per_CE_contextp, per_transfer_contextp, bufferp, + nbytesp, transfer_idp, flagsp); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +QDF_STATUS +ce_revoke_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, qdf_dma_addr_t *bufferp) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_revoke_recv_next(copyeng, + per_CE_contextp, per_transfer_contextp, bufferp); +} + +QDF_STATUS +ce_cancel_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_cancel_send_next + (copyeng, per_CE_contextp, per_transfer_contextp, + bufferp, nbytesp, transfer_idp, toeplitz_hash_result); +} +qdf_export_symbol(ce_cancel_send_next); + +int +ce_completed_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + unsigned int *toeplitz_hash_result) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct ce_ops *ce_services; + int status; + + ce_services = hif_state->ce_services; + qdf_spin_lock_bh(&CE_state->ce_index_lock); + status = + ce_services->ce_completed_send_next_nolock(CE_state, + per_CE_contextp, per_transfer_contextp, + bufferp, nbytesp, transfer_idp, sw_idx, + hw_idx, toeplitz_hash_result); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +#ifdef ATH_11AC_TXCOMPACT +/* CE engine descriptor reap + * Similar to ce_per_engine_service , Only difference is ce_per_engine_service + * does receive and reaping of completed descriptor , + * This function only handles reaping of Tx complete descriptor. + * The Function is called from threshold reap poll routine + * hif_send_complete_check so should not countain receive functionality + * within it . + */ + +void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id) +{ + void *CE_context; + void *transfer_context; + qdf_dma_addr_t buf; + unsigned int nbytes; + unsigned int id; + unsigned int sw_idx, hw_idx; + uint32_t toeplitz_hash_result; + struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, + NULL, NULL, 0, 0); + + /* Since this function is called from both user context and + * tasklet context the spinlock has to lock the bottom halves. + * This fix assumes that ATH_11AC_TXCOMPACT flag is always + * enabled in TX polling mode. If this is not the case, more + * bottom halve spin lock changes are needed. Due to data path + * performance concern, after internal discussion we've decided + * to make minimum change, i.e., only address the issue occurred + * in this function. The possible negative effect of this minimum + * change is that, in the future, if some other function will also + * be opened to let the user context to use, those cases need to be + * addressed by change spin_lock to spin_lock_bh also. + */ + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + + if (CE_state->send_cb) { + { + struct ce_ops *ce_services = hif_state->ce_services; + /* Pop completed send buffers and call the + * registered send callback for each + */ + while (ce_services->ce_completed_send_next_nolock + (CE_state, &CE_context, + &transfer_context, &buf, + &nbytes, &id, &sw_idx, &hw_idx, + &toeplitz_hash_result) == + QDF_STATUS_SUCCESS) { + if (ce_id != CE_HTT_H2T_MSG) { + qdf_spin_unlock_bh( + &CE_state->ce_index_lock); + CE_state->send_cb( + (struct CE_handle *) + CE_state, CE_context, + transfer_context, buf, + nbytes, id, sw_idx, hw_idx, + toeplitz_hash_result); + qdf_spin_lock_bh( + &CE_state->ce_index_lock); + } else { + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *) + CE_context; + + qdf_spin_lock_bh(&pipe_info-> + completion_freeq_lock); + pipe_info->num_sends_allowed++; + qdf_spin_unlock_bh(&pipe_info-> + completion_freeq_lock); + } + } + } + } + + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, + NULL, NULL, 0, 0); + Q_TARGET_ACCESS_END(scn); +} + +#endif /*ATH_11AC_TXCOMPACT */ + +/* + * ce_engine_service_reg: + * + * Called from ce_per_engine_service and goes through the regular interrupt + * handling that does not involve the WLAN fast path feature. + * + * Returns void + */ +void ce_engine_service_reg(struct hif_softc *scn, int CE_id) +{ + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + void *CE_context; + void *transfer_context; + qdf_dma_addr_t buf; + unsigned int nbytes; + unsigned int id; + unsigned int flags; + unsigned int more_comp_cnt = 0; + unsigned int more_snd_comp_cnt = 0; + unsigned int sw_idx, hw_idx; + uint32_t toeplitz_hash_result; + uint32_t mode = hif_get_conparam(scn); + +more_completions: + if (CE_state->recv_cb) { + + /* Pop completed recv buffers and call + * the registered recv callback for each + */ + while (hif_state->ce_services->ce_completed_recv_next_nolock + (CE_state, &CE_context, &transfer_context, + &buf, &nbytes, &id, &flags) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock(&CE_state->ce_index_lock); + CE_state->recv_cb((struct CE_handle *)CE_state, + CE_context, transfer_context, buf, + nbytes, id, flags); + + qdf_spin_lock(&CE_state->ce_index_lock); + /* + * EV #112693 - + * [Peregrine][ES1][WB342][Win8x86][Performance] + * BSoD_0x133 occurred in VHT80 UDP_DL + * Break out DPC by force if number of loops in + * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES + * to avoid spending too long time in + * DPC for each interrupt handling. Schedule another + * DPC to avoid data loss if we had taken + * force-break action before apply to Windows OS + * only currently, Linux/MAC os can expand to their + * platform if necessary + */ + + /* Break the receive processes by + * force if force_break set up + */ + if (qdf_unlikely(CE_state->force_break)) { + qdf_atomic_set(&CE_state->rx_pending, 1); + return; + } + } + } + + /* + * Attention: We may experience potential infinite loop for below + * While Loop during Sending Stress test. + * Resolve the same way as Receive Case (Refer to EV #112693) + */ + + if (CE_state->send_cb) { + /* Pop completed send buffers and call + * the registered send callback for each + */ + +#ifdef ATH_11AC_TXCOMPACT + while (hif_state->ce_services->ce_completed_send_next_nolock + (CE_state, &CE_context, + &transfer_context, &buf, &nbytes, + &id, &sw_idx, &hw_idx, + &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { + + if (CE_id != CE_HTT_H2T_MSG || + QDF_IS_EPPING_ENABLED(mode)) { + qdf_spin_unlock(&CE_state->ce_index_lock); + CE_state->send_cb((struct CE_handle *)CE_state, + CE_context, transfer_context, + buf, nbytes, id, sw_idx, + hw_idx, toeplitz_hash_result); + qdf_spin_lock(&CE_state->ce_index_lock); + } else { + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *)CE_context; + + qdf_spin_lock_bh(&pipe_info-> + completion_freeq_lock); + pipe_info->num_sends_allowed++; + qdf_spin_unlock_bh(&pipe_info-> + completion_freeq_lock); + } + } +#else /*ATH_11AC_TXCOMPACT */ + while (hif_state->ce_services->ce_completed_send_next_nolock + (CE_state, &CE_context, + &transfer_context, &buf, &nbytes, + &id, &sw_idx, &hw_idx, + &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { + qdf_spin_unlock(&CE_state->ce_index_lock); + CE_state->send_cb((struct CE_handle *)CE_state, + CE_context, transfer_context, buf, + nbytes, id, sw_idx, hw_idx, + toeplitz_hash_result); + qdf_spin_lock(&CE_state->ce_index_lock); + } +#endif /*ATH_11AC_TXCOMPACT */ + } + +more_watermarks: + if (CE_state->misc_cbs) { + if (CE_state->watermark_cb && + hif_state->ce_services->watermark_int(CE_state, + &flags)) { + qdf_spin_unlock(&CE_state->ce_index_lock); + /* Convert HW IS bits to software flags */ + CE_state->watermark_cb((struct CE_handle *)CE_state, + CE_state->wm_context, flags); + qdf_spin_lock(&CE_state->ce_index_lock); + } + } + + /* + * Clear the misc interrupts (watermark) that were handled above, + * and that will be checked again below. + * Clear and check for copy-complete interrupts again, just in case + * more copy completions happened while the misc interrupts were being + * handled. + */ + if (!ce_srng_based(scn)) { + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, + CE_WATERMARK_MASK | + HOST_IS_COPY_COMPLETE_MASK); + } else { + qdf_atomic_set(&CE_state->rx_pending, 0); + hif_err_rl("%s: target access is not allowed", + __func__); + return; + } + } + + /* + * Now that per-engine interrupts are cleared, verify that + * no recv interrupts arrive while processing send interrupts, + * and no recv or send interrupts happened while processing + * misc interrupts.Go back and check again.Keep checking until + * we find no more events to process. + */ + if (CE_state->recv_cb && + hif_state->ce_services->ce_recv_entries_done_nolock(scn, + CE_state)) { + if (QDF_IS_EPPING_ENABLED(mode) || + more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { + goto more_completions; + } else { + if (!ce_srng_based(scn)) { + HIF_ERROR( + "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", + __func__, + CE_state->dest_ring->nentries_mask, + CE_state->dest_ring->sw_index, + CE_DEST_RING_READ_IDX_GET(scn, + CE_state->ctrl_addr)); + } + } + } + + if (CE_state->send_cb && + hif_state->ce_services->ce_send_entries_done_nolock(scn, + CE_state)) { + if (QDF_IS_EPPING_ENABLED(mode) || + more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { + goto more_completions; + } else { + if (!ce_srng_based(scn)) { + HIF_ERROR( + "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", + __func__, + CE_state->src_ring->nentries_mask, + CE_state->src_ring->sw_index, + CE_SRC_RING_READ_IDX_GET(scn, + CE_state->ctrl_addr)); + } + } + } + + if (CE_state->misc_cbs && CE_state->watermark_cb) { + if (hif_state->ce_services->watermark_int(CE_state, &flags)) + goto more_watermarks; + } + + qdf_atomic_set(&CE_state->rx_pending, 0); +} + +/* + * Guts of interrupt handler for per-engine interrupts on a particular CE. + * + * Invokes registered callbacks for recv_complete, + * send_complete, and watermarks. + * + * Returns: number of messages processed + */ +int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id) +{ + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + + if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data)) + return CE_state->receive_count; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + HIF_ERROR("[premature rc=0]"); + return 0; /* no work done */ + } + + /* Clear force_break flag and re-initialize receive_count to 0 */ + CE_state->receive_count = 0; + CE_state->force_break = 0; + CE_state->ce_service_start_time = sched_clock(); + CE_state->ce_service_yield_time = + CE_state->ce_service_start_time + + hif_get_ce_service_max_yield_time( + (struct hif_opaque_softc *)scn); + + qdf_spin_lock(&CE_state->ce_index_lock); + + CE_state->service(scn, CE_id); + + qdf_spin_unlock(&CE_state->ce_index_lock); + + if (Q_TARGET_ACCESS_END(scn) < 0) + HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count); + return CE_state->receive_count; +} +qdf_export_symbol(ce_per_engine_service); + +/* + * Handler for per-engine interrupts on ALL active CEs. + * This is used in cases where the system is sharing a + * single interrput for all CEs + */ + +void ce_per_engine_service_any(int irq, struct hif_softc *scn) +{ + int CE_id; + uint32_t intr_summary; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + if (!qdf_atomic_read(&scn->tasklet_from_intr)) { + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + + if (qdf_atomic_read(&CE_state->rx_pending)) { + qdf_atomic_set(&CE_state->rx_pending, 0); + ce_per_engine_service(scn, CE_id); + } + } + + Q_TARGET_ACCESS_END(scn); + return; + } + + intr_summary = CE_INTERRUPT_SUMMARY(scn); + + for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) { + if (intr_summary & (1 << CE_id)) + intr_summary &= ~(1 << CE_id); + else + continue; /* no intr pending on this CE */ + + ce_per_engine_service(scn, CE_id); + } + + Q_TARGET_ACCESS_END(scn); +} + +/*Iterate the CE_state list and disable the compl interrupt + * if it has been registered already. + */ +void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn) +{ + int CE_id; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + + /* if the interrupt is currently enabled, disable it */ + if (!CE_state->disable_copy_compl_intr + && (CE_state->send_cb || CE_state->recv_cb)) + CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); + + if (CE_state->watermark_cb) + CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); + } + Q_TARGET_ACCESS_END(scn); +} + +void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn) +{ + int CE_id; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + + /* + * If the CE is supposed to have copy complete interrupts + * enabled (i.e. there a callback registered, and the + * "disable" flag is not set), then re-enable the interrupt. + */ + if (!CE_state->disable_copy_compl_intr + && (CE_state->send_cb || CE_state->recv_cb)) + CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); + + if (CE_state->watermark_cb) + CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); + } + Q_TARGET_ACCESS_END(scn); +} + +/** + * ce_send_cb_register(): register completion handler + * @copyeng: CE_state representing the ce we are adding the behavior to + * @fn_ptr: callback that the ce should use when processing tx completions + * @disable_interrupts: if the interupts should be enabled or not. + * + * Caller should guarantee that no transactions are in progress before + * switching the callback function. + * + * Registers the send context before the fn pointer so that if the cb is valid + * the context should be valid. + * + * Beware that currently this function will enable completion interrupts. + */ +void +ce_send_cb_register(struct CE_handle *copyeng, + ce_send_cb fn_ptr, + void *ce_send_context, int disable_interrupts) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn; + struct HIF_CE_state *hif_state; + + if (!CE_state) { + HIF_ERROR("%s: Error CE state = NULL", __func__); + return; + } + scn = CE_state->scn; + hif_state = HIF_GET_CE_STATE(scn); + if (!hif_state) { + HIF_ERROR("%s: Error HIF state = NULL", __func__); + return; + } + CE_state->send_context = ce_send_context; + CE_state->send_cb = fn_ptr; + hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, + disable_interrupts); +} +qdf_export_symbol(ce_send_cb_register); + +/** + * ce_recv_cb_register(): register completion handler + * @copyeng: CE_state representing the ce we are adding the behavior to + * @fn_ptr: callback that the ce should use when processing rx completions + * @disable_interrupts: if the interupts should be enabled or not. + * + * Registers the send context before the fn pointer so that if the cb is valid + * the context should be valid. + * + * Caller should guarantee that no transactions are in progress before + * switching the callback function. + */ +void +ce_recv_cb_register(struct CE_handle *copyeng, + CE_recv_cb fn_ptr, + void *CE_recv_context, int disable_interrupts) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn; + struct HIF_CE_state *hif_state; + + if (!CE_state) { + HIF_ERROR("%s: ERROR CE state = NULL", __func__); + return; + } + scn = CE_state->scn; + hif_state = HIF_GET_CE_STATE(scn); + if (!hif_state) { + HIF_ERROR("%s: Error HIF state = NULL", __func__); + return; + } + CE_state->recv_context = CE_recv_context; + CE_state->recv_cb = fn_ptr; + hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, + disable_interrupts); +} +qdf_export_symbol(ce_recv_cb_register); + +/** + * ce_watermark_cb_register(): register completion handler + * @copyeng: CE_state representing the ce we are adding the behavior to + * @fn_ptr: callback that the ce should use when processing watermark events + * + * Caller should guarantee that no watermark events are being processed before + * switching the callback function. + */ +void +ce_watermark_cb_register(struct CE_handle *copyeng, + CE_watermark_cb fn_ptr, void *CE_wm_context) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + CE_state->watermark_cb = fn_ptr; + CE_state->wm_context = CE_wm_context; + hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, + 0); + if (fn_ptr) + CE_state->misc_cbs = 1; +} + +bool ce_get_rx_pending(struct hif_softc *scn) +{ + int CE_id; + + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + + if (qdf_atomic_read(&CE_state->rx_pending)) + return true; + } + + return false; +} + +/** + * ce_check_rx_pending() - ce_check_rx_pending + * @CE_state: context of the copy engine to check + * + * Return: true if there per_engine_service + * didn't process all the rx descriptors. + */ +bool ce_check_rx_pending(struct CE_state *CE_state) +{ + if (qdf_atomic_read(&CE_state->rx_pending)) + return true; + else + return false; +} +qdf_export_symbol(ce_check_rx_pending); + +#ifdef IPA_OFFLOAD +/** + * ce_ipa_get_resource() - get uc resource on copyengine + * @ce: copyengine context + * @ce_sr: copyengine source ring resource info + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * Copy engine should release resource to micro controller + * Micro controller needs + * - Copy engine source descriptor base address + * - Copy engine source descriptor size + * - PCI BAR address to access copy engine regiser + * + * Return: None + */ +void ce_ipa_get_resource(struct CE_handle *ce, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + struct CE_state *CE_state = (struct CE_state *)ce; + uint32_t ring_loop; + struct CE_src_desc *ce_desc; + qdf_dma_addr_t phy_mem_base; + struct hif_softc *scn = CE_state->scn; + + if (CE_UNUSED == CE_state->state) { + *qdf_mem_get_dma_addr_ptr(scn->qdf_dev, + &CE_state->scn->ipa_ce_ring->mem_info) = 0; + *ce_sr_ring_size = 0; + return; + } + + /* Update default value for descriptor */ + for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries; + ring_loop++) { + ce_desc = (struct CE_src_desc *) + ((char *)CE_state->src_ring->base_addr_owner_space + + ring_loop * (sizeof(struct CE_src_desc))); + CE_IPA_RING_INIT(ce_desc); + } + + /* Get BAR address */ + hif_read_phy_mem_base(CE_state->scn, &phy_mem_base); + + *ce_sr = CE_state->scn->ipa_ce_ring; + *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries * + sizeof(struct CE_src_desc)); + *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) + + SR_WR_INDEX_ADDRESS; +} +#endif /* IPA_OFFLOAD */ + +#ifdef HIF_CE_DEBUG_DATA_BUF +/** + * hif_dump_desc_data_buf() - record ce descriptor events + * @buf: buffer to copy to + * @pos: Current position till which the buf is filled + * @data: Data to be copied + * @data_len: Length of the data to be copied + */ +static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos, + uint8_t *data, uint32_t data_len) +{ + pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n", + CE_DEBUG_MAX_DATA_BUF_SIZE); + + if ((data_len > 0) && data) { + if (data_len < 16) { + hex_dump_to_buffer(data, + CE_DEBUG_DATA_PER_ROW, + 16, 1, buf + pos, + (ssize_t)PAGE_SIZE - pos, + false); + pos += CE_DEBUG_PRINT_BUF_SIZE(data_len); + pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); + } else { + uint32_t rows = (data_len / 16) + 1; + uint32_t row = 0; + + for (row = 0; row < rows; row++) { + hex_dump_to_buffer(data + (row * 16), + CE_DEBUG_DATA_PER_ROW, + 16, 1, buf + pos, + (ssize_t)PAGE_SIZE + - pos, false); + pos += + CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW); + pos += snprintf(buf + pos, PAGE_SIZE - pos, + "\n"); + } + } + } + + return pos; +} +#endif + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +static const char *ce_event_type_to_str(enum hif_ce_event_type type) +{ + switch (type) { + case HIF_RX_DESC_POST: + return "HIF_RX_DESC_POST"; + case HIF_RX_DESC_COMPLETION: + return "HIF_RX_DESC_COMPLETION"; + case HIF_TX_GATHER_DESC_POST: + return "HIF_TX_GATHER_DESC_POST"; + case HIF_TX_DESC_POST: + return "HIF_TX_DESC_POST"; + case HIF_TX_DESC_SOFTWARE_POST: + return "HIF_TX_DESC_SOFTWARE_POST"; + case HIF_TX_DESC_COMPLETION: + return "HIF_TX_DESC_COMPLETION"; + case FAST_RX_WRITE_INDEX_UPDATE: + return "FAST_RX_WRITE_INDEX_UPDATE"; + case FAST_RX_SOFTWARE_INDEX_UPDATE: + return "FAST_RX_SOFTWARE_INDEX_UPDATE"; + case FAST_TX_WRITE_INDEX_UPDATE: + return "FAST_TX_WRITE_INDEX_UPDATE"; + case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: + return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE"; + case FAST_TX_SOFTWARE_INDEX_UPDATE: + return "FAST_TX_SOFTWARE_INDEX_UPDATE"; + case RESUME_WRITE_INDEX_UPDATE: + return "RESUME_WRITE_INDEX_UPDATE"; + case HIF_IRQ_EVENT: + return "HIF_IRQ_EVENT"; + case HIF_CE_TASKLET_ENTRY: + return "HIF_CE_TASKLET_ENTRY"; + case HIF_CE_TASKLET_RESCHEDULE: + return "HIF_CE_TASKLET_RESCHEDULE"; + case HIF_CE_TASKLET_EXIT: + return "HIF_CE_TASKLET_EXIT"; + case HIF_CE_REAP_ENTRY: + return "HIF_CE_REAP_ENTRY"; + case HIF_CE_REAP_EXIT: + return "HIF_CE_REAP_EXIT"; + case NAPI_SCHEDULE: + return "NAPI_SCHEDULE"; + case NAPI_POLL_ENTER: + return "NAPI_POLL_ENTER"; + case NAPI_COMPLETE: + return "NAPI_COMPLETE"; + case NAPI_POLL_EXIT: + return "NAPI_POLL_EXIT"; + case HIF_RX_NBUF_ALLOC_FAILURE: + return "HIF_RX_NBUF_ALLOC_FAILURE"; + case HIF_RX_NBUF_MAP_FAILURE: + return "HIF_RX_NBUF_MAP_FAILURE"; + case HIF_RX_NBUF_ENQUEUE_FAILURE: + return "HIF_RX_NBUF_ENQUEUE_FAILURE"; + default: + return "invalid"; + } +} + +/** + * hif_dump_desc_event() - record ce descriptor events + * @buf: Buffer to which to be copied + * @ce_id: which ce is the event occurring on + * @index: index that the descriptor was/will be at. + */ +ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf) +{ + struct hif_ce_desc_event *event; + uint64_t secs, usecs; + ssize_t len = 0; + struct ce_desc_hist *ce_hist = NULL; + struct hif_ce_desc_event *hist_ev = NULL; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + if (ce_hist->hist_id >= CE_COUNT_MAX || + ce_hist->hist_index >= HIF_CE_HISTORY_MAX) { + qdf_print("Invalid values"); + return -EINVAL; + } + + hist_ev = + (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id]; + + if (!hist_ev) { + qdf_print("Low Memory"); + return -EINVAL; + } + + event = &hist_ev[ce_hist->hist_index]; + + qdf_log_timestamp_to_secs(event->time, &secs, &usecs); + + len += snprintf(buf, PAGE_SIZE - len, + "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK", + secs, usecs, ce_hist->hist_id, + ce_event_type_to_str(event->type), + event->index, event->memory); +#ifdef HIF_CE_DEBUG_DATA_BUF + len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%lu", + event->actual_data_len); +#endif + + len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: "); + + hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc), + 16, 1, buf + len, + (ssize_t)PAGE_SIZE - len, false); + len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc)); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + +#ifdef HIF_CE_DEBUG_DATA_BUF + if (ce_hist->data_enable[ce_hist->hist_id]) + len = hif_dump_desc_data_buf(buf, len, event->data, + (event->actual_data_len < + CE_DEBUG_MAX_DATA_BUF_SIZE) ? + event->actual_data_len : + CE_DEBUG_MAX_DATA_BUF_SIZE); +#endif /*HIF_CE_DEBUG_DATA_BUF*/ + + len += snprintf(buf + len, PAGE_SIZE - len, "END\n"); + + return len; +} + +/* + * hif_store_desc_trace_buf_index() - + * API to get the CE id and CE debug storage buffer index + * + * @dev: network device + * @attr: sysfs attribute + * @buf: data got from the user + * + * Return total length + */ +ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, + const char *buf, size_t size) +{ + struct ce_desc_hist *ce_hist = NULL; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + if (!size) { + qdf_nofl_err("%s: Invalid input buffer.", __func__); + return -EINVAL; + } + + if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id, + (unsigned int *)&ce_hist->hist_index) != 2) { + qdf_nofl_err("%s: Invalid input value.", __func__); + return -EINVAL; + } + if ((ce_hist->hist_id >= CE_COUNT_MAX) || + (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) { + qdf_print("Invalid values"); + return -EINVAL; + } + + return size; +} + +#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ + +#ifdef HIF_CE_DEBUG_DATA_BUF +/* + * hif_ce_en_desc_hist() - + * API to enable recording the CE desc history + * + * @dev: network device + * @attr: sysfs attribute + * @buf: buffer to copy the data. + * + * Starts recording the ce desc history + * + * Return total length copied + */ +ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size) +{ + struct ce_desc_hist *ce_hist = NULL; + uint32_t cfg = 0; + uint32_t ce_id = 0; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + if (!size) { + qdf_nofl_err("%s: Invalid input buffer.", __func__); + return -EINVAL; + } + + if (sscanf(buf, "%u %u", (unsigned int *)&ce_id, + (unsigned int *)&cfg) != 2) { + qdf_nofl_err("%s: Invalid input: Enter CE Id<1/0>.", + __func__); + return -EINVAL; + } + if (ce_id >= CE_COUNT_MAX) { + qdf_print("Invalid value CE Id"); + return -EINVAL; + } + + if ((cfg > 1 || cfg < 0)) { + qdf_print("Invalid values: enter 0 or 1"); + return -EINVAL; + } + + if (!ce_hist->hist_ev[ce_id]) + return -EINVAL; + + qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]); + if (cfg == 1) { + if (ce_hist->data_enable[ce_id] == 1) { + qdf_debug("Already Enabled"); + } else { + if (alloc_mem_ce_debug_hist_data(scn, ce_id) + == QDF_STATUS_E_NOMEM){ + ce_hist->data_enable[ce_id] = 0; + qdf_err("%s:Memory Alloc failed", __func__); + } else + ce_hist->data_enable[ce_id] = 1; + } + } else if (cfg == 0) { + if (ce_hist->data_enable[ce_id] == 0) { + qdf_debug("Already Disabled"); + } else { + ce_hist->data_enable[ce_id] = 0; + free_mem_ce_debug_hist_data(scn, ce_id); + } + } + qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]); + + return size; +} + +/* + * hif_disp_ce_enable_desc_data_hist() - + * API to display value of data_enable + * + * @dev: network device + * @attr: sysfs attribute + * @buf: buffer to copy the data. + * + * Return total length copied + */ +ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf) +{ + ssize_t len = 0; + uint32_t ce_id = 0; + struct ce_desc_hist *ce_hist = NULL; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) { + len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n", + ce_id, ce_hist->data_enable[ce_id]); + } + + return len; +} +#endif /* HIF_CE_DEBUG_DATA_BUF */ + +#ifdef OL_ATH_SMART_LOGGING +#define GUARD_SPACE 10 +#define LOG_ID_SZ 4 +/* + * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf + * @src_ring: SRC ring state + * @buf_cur: Current pointer in ring buffer + * @buf_init:Start of the ring buffer + * @buf_sz: Size of the ring buffer + * @skb_sz: Max size of the SKB buffer to be copied + * + * Dumps all the CE SRC ring descriptors and buffers pointed by them in to + * the given buf, skb_sz is the max buffer size to be copied + * + * Return: Current pointer in ring buffer + */ +static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring, + uint8_t *buf_cur, uint8_t *buf_init, + uint32_t buf_sz, uint32_t skb_sz) +{ + struct CE_src_desc *src_ring_base; + uint32_t len, entry; + struct CE_src_desc *src_desc; + qdf_nbuf_t nbuf; + uint32_t available_buf; + + src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space; + len = sizeof(struct CE_ring_state); + available_buf = buf_sz - (buf_cur - buf_init); + if (available_buf < (len + GUARD_SPACE)) { + buf_cur = buf_init; + } + + qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state)); + buf_cur += sizeof(struct CE_ring_state); + + for (entry = 0; entry < src_ring->nentries; entry++) { + src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry); + nbuf = src_ring->per_transfer_context[entry]; + if (nbuf) { + uint32_t skb_len = qdf_nbuf_len(nbuf); + uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); + + len = sizeof(struct CE_src_desc) + skb_cp_len + + LOG_ID_SZ + sizeof(skb_cp_len); + available_buf = buf_sz - (buf_cur - buf_init); + if (available_buf < (len + GUARD_SPACE)) { + buf_cur = buf_init; + } + qdf_mem_copy(buf_cur, src_desc, + sizeof(struct CE_src_desc)); + buf_cur += sizeof(struct CE_src_desc); + + available_buf = buf_sz - (buf_cur - buf_init); + buf_cur += snprintf(buf_cur, available_buf, "SKB%d", + skb_cp_len); + + if (skb_cp_len) { + qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), + skb_cp_len); + buf_cur += skb_cp_len; + } + } else { + len = sizeof(struct CE_src_desc) + LOG_ID_SZ; + available_buf = buf_sz - (buf_cur - buf_init); + if (available_buf < (len + GUARD_SPACE)) { + buf_cur = buf_init; + } + qdf_mem_copy(buf_cur, src_desc, + sizeof(struct CE_src_desc)); + buf_cur += sizeof(struct CE_src_desc); + available_buf = buf_sz - (buf_cur - buf_init); + buf_cur += snprintf(buf_cur, available_buf, "NUL"); + } + } + + return buf_cur; +} + +/* + * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf + * @dest_ring: SRC ring state + * @buf_cur: Current pointer in ring buffer + * @buf_init:Start of the ring buffer + * @buf_sz: Size of the ring buffer + * @skb_sz: Max size of the SKB buffer to be copied + * + * Dumps all the CE SRC ring descriptors and buffers pointed by them in to + * the given buf, skb_sz is the max buffer size to be copied + * + * Return: Current pointer in ring buffer + */ +static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring, + uint8_t *buf_cur, uint8_t *buf_init, + uint32_t buf_sz, uint32_t skb_sz) +{ + struct CE_dest_desc *dest_ring_base; + uint32_t len, entry; + struct CE_dest_desc *dest_desc; + qdf_nbuf_t nbuf; + uint32_t available_buf; + + dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + + len = sizeof(struct CE_ring_state); + available_buf = buf_sz - (buf_cur - buf_init); + if (available_buf < (len + GUARD_SPACE)) { + buf_cur = buf_init; + } + + qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state)); + buf_cur += sizeof(struct CE_ring_state); + + for (entry = 0; entry < dest_ring->nentries; entry++) { + dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry); + + nbuf = dest_ring->per_transfer_context[entry]; + if (nbuf) { + uint32_t skb_len = qdf_nbuf_len(nbuf); + uint32_t skb_cp_len = qdf_min(skb_len, skb_sz); + + len = sizeof(struct CE_dest_desc) + skb_cp_len + + LOG_ID_SZ + sizeof(skb_cp_len); + + available_buf = buf_sz - (buf_cur - buf_init); + if (available_buf < (len + GUARD_SPACE)) { + buf_cur = buf_init; + } + + qdf_mem_copy(buf_cur, dest_desc, + sizeof(struct CE_dest_desc)); + buf_cur += sizeof(struct CE_dest_desc); + available_buf = buf_sz - (buf_cur - buf_init); + buf_cur += snprintf(buf_cur, available_buf, "SKB%d", + skb_cp_len); + if (skb_cp_len) { + qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf), + skb_cp_len); + buf_cur += skb_cp_len; + } + } else { + len = sizeof(struct CE_dest_desc) + LOG_ID_SZ; + available_buf = buf_sz - (buf_cur - buf_init); + if (available_buf < (len + GUARD_SPACE)) { + buf_cur = buf_init; + } + qdf_mem_copy(buf_cur, dest_desc, + sizeof(struct CE_dest_desc)); + buf_cur += sizeof(struct CE_dest_desc); + available_buf = buf_sz - (buf_cur - buf_init); + buf_cur += snprintf(buf_cur, available_buf, "NUL"); + } + } + return buf_cur; +} + +/** + * hif_log_ce_dump() - Copy all the CE DEST ring to buf + * Calls the respective function to dump all the CE SRC/DEST ring descriptors + * and buffers pointed by them in to the given buf + */ +uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur, + uint8_t *buf_init, uint32_t buf_sz, + uint32_t ce, uint32_t skb_sz) +{ + struct CE_state *ce_state; + struct CE_ring_state *src_ring; + struct CE_ring_state *dest_ring; + + ce_state = scn->ce_id_to_state[ce]; + src_ring = ce_state->src_ring; + dest_ring = ce_state->dest_ring; + + if (src_ring) { + buf_cur = hif_log_src_ce_dump(src_ring, buf_cur, + buf_init, buf_sz, skb_sz); + } else if (dest_ring) { + buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur, + buf_init, buf_sz, skb_sz); + } + + return buf_cur; +} + +qdf_export_symbol(hif_log_dump_ce); +#endif /* OL_ATH_SMART_LOGGING */ + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_legacy.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_legacy.c new file mode 100644 index 0000000000000000000000000000000000000000..366069e97a25567979b72ceb27fc4b8361bf71fd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_legacy.c @@ -0,0 +1,1330 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ce_api.h" +#include "ce_internal.h" +#include "ce_main.h" +#include "ce_reg.h" +#include "hif.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "qdf_lock.h" +#include "hif_main.h" +#include "hif_napi.h" +#include "qdf_module.h" +#include "regtable.h" + +/* + * Support for Copy Engine hardware, which is mainly used for + * communication between Host and Target over a PCIe interconnect. + */ + +/* + * A single CopyEngine (CE) comprises two "rings": + * a source ring + * a destination ring + * + * Each ring consists of a number of descriptors which specify + * an address, length, and meta-data. + * + * Typically, one side of the PCIe interconnect (Host or Target) + * controls one ring and the other side controls the other ring. + * The source side chooses when to initiate a transfer and it + * chooses what to send (buffer address, length). The destination + * side keeps a supply of "anonymous receive buffers" available and + * it handles incoming data as it arrives (when the destination + * receives an interrupt). + * + * The sender may send a simple buffer (address/length) or it may + * send a small list of buffers. When a small list is sent, hardware + * "gathers" these and they end up in a single destination buffer + * with a single interrupt. + * + * There are several "contexts" managed by this layer -- more, it + * may seem -- than should be needed. These are provided mainly for + * maximum flexibility and especially to facilitate a simpler HIF + * implementation. There are per-CopyEngine recv, send, and watermark + * contexts. These are supplied by the caller when a recv, send, + * or watermark handler is established and they are echoed back to + * the caller when the respective callbacks are invoked. There is + * also a per-transfer context supplied by the caller when a buffer + * (or sendlist) is sent and when a buffer is enqueued for recv. + * These per-transfer contexts are echoed back to the caller when + * the buffer is sent/received. + * Target TX harsh result toeplitz_hash_result + */ + +/* NB: Modeled after ce_completed_send_next */ +/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */ +#define CE_WM_SHFT 1 + +#ifdef WLAN_FEATURE_FASTPATH +#ifdef QCA_WIFI_3_0 +static inline void +ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, + uint64_t dma_addr, + uint32_t user_flags) +{ + shadow_src_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0x1F); + user_flags |= shadow_src_desc->buffer_addr_hi; + memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, + sizeof(uint32_t)); +} +#else +static inline void +ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, + uint64_t dma_addr, + uint32_t user_flags) +{ +} +#endif + +#define SLOTS_PER_DATAPATH_TX 2 + +/** + * ce_send_fast() CE layer Tx buffer posting function + * @copyeng: copy engine handle + * @msdu: msdu to be sent + * @transfer_id: transfer_id + * @download_len: packet download length + * + * Assumption : Called with an array of MSDU's + * Function: + * For each msdu in the array + * 1. Check no. of available entries + * 2. Create src ring entries (allocated in consistent memory + * 3. Write index to h/w + * + * Return: No. of packets that could be sent + */ +int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, + unsigned int transfer_id, uint32_t download_len) +{ + struct CE_state *ce_state = (struct CE_state *)copyeng; + struct hif_softc *scn = ce_state->scn; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct CE_ring_state *src_ring = ce_state->src_ring; + u_int32_t ctrl_addr = ce_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + unsigned int frag_len; + uint64_t dma_addr; + uint32_t user_flags; + enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE; + bool ok_to_send = true; + + /* + * Create a log assuming the call will go through, and if not, we would + * add an error trace as well. + * Please add the same failure log for any additional error paths. + */ + DPTRACE(qdf_dp_trace(msdu, + QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(msdu), + sizeof(qdf_nbuf_data(msdu)), QDF_TX)); + + qdf_spin_lock_bh(&ce_state->ce_index_lock); + + /* + * Request runtime PM resume if it has already suspended and make + * sure there is no PCIe link access. + */ + if (hif_pm_runtime_get(hif_hdl, + RTPM_ID_CE_SEND_FAST) != 0) + ok_to_send = false; + + if (ok_to_send) { + Q_TARGET_ACCESS_BEGIN(scn); + DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); + } + + write_index = src_ring->write_index; + sw_index = src_ring->sw_index; + hif_record_ce_desc_event(scn, ce_state->id, + FAST_TX_SOFTWARE_INDEX_UPDATE, + NULL, NULL, sw_index, 0); + + if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) + < SLOTS_PER_DATAPATH_TX)) { + hif_err_rl("Source ring full, required %d, available %d", + SLOTS_PER_DATAPATH_TX, + CE_RING_DELTA(nentries_mask, write_index, + sw_index - 1)); + OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); + if (ok_to_send) + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + + DPTRACE(qdf_dp_trace(NULL, + QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + NULL, 0, QDF_TX)); + + return 0; + } + + { + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *shadow_base = + (struct CE_src_desc *)src_ring->shadow_base; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, write_index); + struct CE_src_desc *shadow_src_desc = + CE_SRC_RING_TO_DESC(shadow_base, write_index); + + hif_pm_runtime_get_noresume(hif_hdl, RTPM_ID_HTC); + + /* + * First fill out the ring descriptor for the HTC HTT frame + * header. These are uncached writes. Should we use a local + * structure instead? + */ + /* HTT/HTC header can be passed as a argument */ + dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0); + shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & + 0xFFFFFFFF); + user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK; + ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); + shadow_src_desc->meta_data = transfer_id; + shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0); + ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); + download_len -= shadow_src_desc->nbytes; + /* + * HTC HTT header is a word stream, so byte swap if CE byte + * swap enabled + */ + shadow_src_desc->byte_swap = ((ce_state->attr_flags & + CE_ATTR_BYTE_SWAP_DATA) != 0); + /* For the first one, it still does not need to write */ + shadow_src_desc->gather = 1; + *src_desc = *shadow_src_desc; + /* By default we could initialize the transfer context to this + * value + */ + src_ring->per_transfer_context[write_index] = + CE_SENDLIST_ITEM_CTXT; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index); + shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index); + /* + * Now fill out the ring descriptor for the actual data + * packet + */ + dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1); + shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & + 0xFFFFFFFF); + /* + * Clear packet offset for all but the first CE desc. + */ + user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; + ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); + shadow_src_desc->meta_data = transfer_id; + + /* get actual packet length */ + frag_len = qdf_nbuf_get_frag_len(msdu, 1); + + /* download remaining bytes of payload */ + shadow_src_desc->nbytes = download_len; + ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); + if (shadow_src_desc->nbytes > frag_len) + shadow_src_desc->nbytes = frag_len; + + /* Data packet is a byte stream, so disable byte swap */ + shadow_src_desc->byte_swap = 0; + /* For the last one, gather is not set */ + shadow_src_desc->gather = 0; + *src_desc = *shadow_src_desc; + src_ring->per_transfer_context[write_index] = msdu; + + hif_record_ce_desc_event(scn, ce_state->id, type, + (union ce_desc *)src_desc, + src_ring->per_transfer_context[write_index], + write_index, shadow_src_desc->nbytes); + + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + DPTRACE(qdf_dp_trace(msdu, + QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(msdu), + sizeof(qdf_nbuf_data(msdu)), QDF_TX)); + } + + src_ring->write_index = write_index; + + if (ok_to_send) { + if (qdf_likely(ce_state->state == CE_RUNNING)) { + type = FAST_TX_WRITE_INDEX_UPDATE; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, + write_index); + Q_TARGET_ACCESS_END(scn); + } else { + ce_state->state = CE_PENDING; + } + hif_pm_runtime_put(hif_hdl, RTPM_ID_CE_SEND_FAST); + } + + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + + /* sent 1 packet */ + return 1; +} + +/** + * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler + * @ce_state: handle to copy engine state + * @cmpl_msdus: Rx msdus + * @num_cmpls: number of Rx msdus + * @ctrl_addr: CE control address + * + * Return: None + */ +static void ce_fastpath_rx_handle(struct CE_state *ce_state, + qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls, + uint32_t ctrl_addr) +{ + struct hif_softc *scn = ce_state->scn; + struct CE_ring_state *dest_ring = ce_state->dest_ring; + uint32_t nentries_mask = dest_ring->nentries_mask; + uint32_t write_index; + + qdf_spin_unlock(&ce_state->ce_index_lock); + ce_state->fastpath_handler(ce_state->context, cmpl_msdus, num_cmpls); + qdf_spin_lock(&ce_state->ce_index_lock); + + /* Update Destination Ring Write Index */ + write_index = dest_ring->write_index; + write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls); + + hif_record_ce_desc_event(scn, ce_state->id, + FAST_RX_WRITE_INDEX_UPDATE, + NULL, NULL, write_index, 0); + + CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); + dest_ring->write_index = write_index; +} + +/** + * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs + * @scn: hif_context + * @ce_id: Copy engine ID + * 1) Go through the CE ring, and find the completions + * 2) For valid completions retrieve context (nbuf) for per_transfer_context[] + * 3) Unmap buffer & accumulate in an array. + * 4) Call message handler when array is full or when exiting the handler + * + * Return: void + */ + +void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) +{ + struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct CE_ring_state *dest_ring = ce_state->dest_ring; + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + + uint32_t nentries_mask = dest_ring->nentries_mask; + uint32_t sw_index = dest_ring->sw_index; + uint32_t nbytes; + qdf_nbuf_t nbuf; + dma_addr_t paddr; + struct CE_dest_desc *dest_desc; + qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM]; + uint32_t ctrl_addr = ce_state->ctrl_addr; + uint32_t nbuf_cmpl_idx = 0; + unsigned int more_comp_cnt = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct ce_ops *ce_services = hif_state->ce_services; + +more_data: + for (;;) { + dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, + sw_index); + + /* + * The following 2 reads are from non-cached memory + */ + nbytes = dest_desc->nbytes; + + /* If completion is invalid, break */ + if (qdf_unlikely(nbytes == 0)) + break; + + /* + * Build the nbuf list from valid completions + */ + nbuf = dest_ring->per_transfer_context[sw_index]; + + /* + * No lock is needed here, since this is the only thread + * that accesses the sw_index + */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + + /* + * CAREFUL : Uncached write, but still less expensive, + * since most modern caches use "write-combining" to + * flush multiple cache-writes all at once. + */ + dest_desc->nbytes = 0; + + /* + * Per our understanding this is not required on our + * since we are doing the same cache invalidation + * operation on the same buffer twice in succession, + * without any modifiication to this buffer by CPU in + * between. + * However, this code with 2 syncs in succession has + * been undergoing some testing at a customer site, + * and seemed to be showing no problems so far. Would + * like to validate from the customer, that this line + * is really not required, before we remove this line + * completely. + */ + paddr = QDF_NBUF_CB_PADDR(nbuf); + + qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr, + (skb_end_pointer(nbuf) - + (nbuf)->data), + DMA_FROM_DEVICE); + + qdf_nbuf_put_tail(nbuf, nbytes); + + qdf_assert_always(nbuf->data); + + QDF_NBUF_CB_RX_CTX_ID(nbuf) = + hif_get_rx_ctx_id(ce_state->id, hif_hdl); + cmpl_msdus[nbuf_cmpl_idx++] = nbuf; + + /* + * we are not posting the buffers back instead + * reusing the buffers + */ + if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) { + hif_record_ce_desc_event(scn, ce_state->id, + FAST_RX_SOFTWARE_INDEX_UPDATE, + NULL, NULL, sw_index, 0); + dest_ring->sw_index = sw_index; + ce_fastpath_rx_handle(ce_state, cmpl_msdus, + nbuf_cmpl_idx, ctrl_addr); + + ce_state->receive_count += nbuf_cmpl_idx; + if (qdf_unlikely(hif_ce_service_should_yield( + scn, ce_state))) { + ce_state->force_break = 1; + qdf_atomic_set(&ce_state->rx_pending, 1); + return; + } + + nbuf_cmpl_idx = 0; + more_comp_cnt = 0; + } + } + + hif_record_ce_desc_event(scn, ce_state->id, + FAST_RX_SOFTWARE_INDEX_UPDATE, + NULL, NULL, sw_index, 0); + + dest_ring->sw_index = sw_index; + + /* + * If there are not enough completions to fill the array, + * just call the message handler here + */ + if (nbuf_cmpl_idx) { + ce_fastpath_rx_handle(ce_state, cmpl_msdus, + nbuf_cmpl_idx, ctrl_addr); + + ce_state->receive_count += nbuf_cmpl_idx; + if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { + ce_state->force_break = 1; + qdf_atomic_set(&ce_state->rx_pending, 1); + return; + } + + /* check for more packets after upper layer processing */ + nbuf_cmpl_idx = 0; + more_comp_cnt = 0; + goto more_data; + } + + hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu()); + + qdf_atomic_set(&ce_state->rx_pending, 0); + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, + HOST_IS_COPY_COMPLETE_MASK); + } else { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + + if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) { + if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { + goto more_data; + } else { + HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", + __func__, nentries_mask, + ce_state->dest_ring->sw_index, + CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr)); + } + } +#ifdef NAPI_YIELD_BUDGET_BASED + /* + * Caution : Before you modify this code, please refer hif_napi_poll + * function to understand how napi_complete gets called and make the + * necessary changes. Force break has to be done till WIN disables the + * interrupt at source + */ + ce_state->force_break = 1; +#endif +} + +/** + * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled + * @scn: Handle to HIF context + * + * Return: true if fastpath is enabled else false. + */ +static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) +{ + return scn->fastpath_mode_on; +} +#else +void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) +{ +} + +static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) +{ + return false; +} +#endif /* WLAN_FEATURE_FASTPATH */ + +static int +ce_send_nolock_legacy(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flags) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int write_index = src_ring->write_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) <= 0)) { + OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_FAILURE; + } + { + enum hif_ce_event_type event_type; + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *shadow_base = + (struct CE_src_desc *)src_ring->shadow_base; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, write_index); + struct CE_src_desc *shadow_src_desc = + CE_SRC_RING_TO_DESC(shadow_base, write_index); + + /* Update low 32 bits source descriptor address */ + shadow_src_desc->buffer_addr = + (uint32_t)(dma_addr & 0xFFFFFFFF); +#ifdef QCA_WIFI_3_0 + shadow_src_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0x1F); + user_flags |= shadow_src_desc->buffer_addr_hi; + memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, + sizeof(uint32_t)); +#endif + shadow_src_desc->target_int_disable = 0; + shadow_src_desc->host_int_disable = 0; + + shadow_src_desc->meta_data = transfer_id; + + /* + * Set the swap bit if: + * typical sends on this CE are swapped (host is big-endian) + * and this send doesn't disable the swapping + * (data is not bytestream) + */ + shadow_src_desc->byte_swap = + (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) + != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); + shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); + shadow_src_desc->nbytes = nbytes; + ce_validate_nbytes(nbytes, CE_state); + + *src_desc = *shadow_src_desc; + + src_ring->per_transfer_context[write_index] = + per_transfer_context; + + /* Update Source Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + /* WORKAROUND */ + if (shadow_src_desc->gather) { + event_type = HIF_TX_GATHER_DESC_POST; + } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) { + event_type = HIF_TX_DESC_SOFTWARE_POST; + CE_state->state = CE_PENDING; + } else { + event_type = HIF_TX_DESC_POST; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, + write_index); + } + + /* src_ring->write index hasn't been updated event though + * the register has allready been written to. + */ + hif_record_ce_desc_event(scn, CE_state->id, event_type, + (union ce_desc *)shadow_src_desc, per_transfer_context, + src_ring->write_index, nbytes); + + src_ring->write_index = write_index; + status = QDF_STATUS_SUCCESS; + } + Q_TARGET_ACCESS_END(scn); + return status; +} + +static int +ce_sendlist_send_legacy(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id) +{ + int status = -ENOMEM; + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int num_items = sl->num_items; + unsigned int sw_index; + unsigned int write_index; + struct hif_softc *scn = CE_state->scn; + + QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + + if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data && + Q_TARGET_ACCESS_BEGIN(scn) == 0) { + src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR( + scn, CE_state->ctrl_addr); + Q_TARGET_ACCESS_END(scn); + } + + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >= + num_items) { + struct ce_sendlist_item *item; + int i; + + /* handle all but the last item uniformly */ + for (i = 0; i < num_items - 1; i++) { + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_legacy(copyeng, + CE_SENDLIST_ITEM_CTXT, + (qdf_dma_addr_t)item->data, + item->u.nbytes, transfer_id, + item->flags | CE_SEND_FLAG_GATHER, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + } + /* provide valid context pointer for final item */ + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_legacy(copyeng, per_transfer_context, + (qdf_dma_addr_t) item->data, + item->u.nbytes, + transfer_id, item->flags, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, + QDF_NBUF_TX_PKT_CE); + DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, + QDF_DP_TRACE_CE_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data), + sizeof(((qdf_nbuf_t)per_transfer_context)->data), + QDF_TX)); + } else { + /* + * Probably not worth the additional complexity to support + * partial sends with continuation or notification. We expect + * to use large rings and small sendlists. If we can't handle + * the entire request at once, punt it back to the caller. + */ + } + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +/** + * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine + * @coyeng: copy engine handle + * @per_recv_context: virtual address of the nbuf + * @buffer: physical address of the nbuf + * + * Return: 0 if the buffer is enqueued + */ +static int +ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + write_index = dest_ring->write_index; + sw_index = dest_ring->sw_index; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return -EIO; + } + + if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) || + (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) { + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + struct CE_dest_desc *dest_desc = + CE_DEST_RING_TO_DESC(dest_ring_base, write_index); + + /* Update low 32 bit destination descriptor */ + dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF); +#ifdef QCA_WIFI_3_0 + dest_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0x1F); +#endif + dest_desc->nbytes = 0; + + dest_ring->per_transfer_context[write_index] = + per_recv_context; + + hif_record_ce_desc_event(scn, CE_state->id, + HIF_RX_DESC_POST, + (union ce_desc *)dest_desc, + per_recv_context, + write_index, 0); + + /* Update Destination Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + if (write_index != sw_index) { + CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); + dest_ring->write_index = write_index; + } + status = QDF_STATUS_SUCCESS; + } else + status = QDF_STATUS_E_FAILURE; + + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return status; +} + +static unsigned int +ce_send_entries_done_nolock_legacy(struct hif_softc *scn, + struct CE_state *CE_state) +{ + struct CE_ring_state *src_ring = CE_state->src_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index; + unsigned int read_index; + + sw_index = src_ring->sw_index; + read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr); + + return CE_RING_DELTA(nentries_mask, sw_index, read_index); +} + +static unsigned int +ce_recv_entries_done_nolock_legacy(struct hif_softc *scn, + struct CE_state *CE_state) +{ + struct CE_ring_state *dest_ring = CE_state->dest_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index; + unsigned int read_index; + + sw_index = dest_ring->sw_index; + read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr); + + return CE_RING_DELTA(nentries_mask, sw_index, read_index); +} + +static int +ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) +{ + int status; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + struct hif_softc *scn = CE_state->scn; + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + struct CE_dest_desc *dest_desc = + CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); + int nbytes; + struct CE_dest_desc dest_desc_info; + /* + * By copying the dest_desc_info element to local memory, we could + * avoid extra memory read from non-cachable memory. + */ + dest_desc_info = *dest_desc; + nbytes = dest_desc_info.nbytes; + if (nbytes == 0) { + /* + * This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + status = QDF_STATUS_E_FAILURE; + goto done; + } + + hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION, + (union ce_desc *)dest_desc, + dest_ring->per_transfer_context[sw_index], + sw_index, 0); + + dest_desc->nbytes = 0; + + /* Return data from completed destination descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info); + *nbytesp = nbytes; + *transfer_idp = dest_desc_info.meta_data; + *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + } + dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + +done: + return status; +} + +/* NB: Modeled after ce_completed_recv_next_nolock */ +static QDF_STATUS +ce_revoke_recv_next_legacy(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp) +{ + struct CE_state *CE_state; + struct CE_ring_state *dest_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + QDF_STATUS status; + struct hif_softc *scn; + + CE_state = (struct CE_state *)copyeng; + dest_ring = CE_state->dest_ring; + if (!dest_ring) + return QDF_STATUS_E_FAILURE; + + scn = CE_state->scn; + qdf_spin_lock(&CE_state->ce_index_lock); + nentries_mask = dest_ring->nentries_mask; + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + if (write_index != sw_index) { + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring-> + base_addr_owner_space; + struct CE_dest_desc *dest_desc = + CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); + + /* Return data from completed destination descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc); + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + } + dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } else { + status = QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock(&CE_state->ce_index_lock); + + return status; +} + +/* + * Guts of ce_completed_send_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_send_next_nolock_legacy(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result) +{ + int status = QDF_STATUS_E_FAILURE; + struct CE_ring_state *src_ring = CE_state->src_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int read_index; + struct hif_softc *scn = CE_state->scn; + + if (src_ring->hw_index == sw_index) { + /* + * The SW completion index has caught up with the cached + * version of the HW completion index. + * Update the cached HW completion index to see whether + * the SW has really caught up to the HW, or if the cached + * value of the HW index has become stale. + */ + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + src_ring->hw_index = + CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr); + if (Q_TARGET_ACCESS_END(scn) < 0) + return QDF_STATUS_E_FAILURE; + } + read_index = src_ring->hw_index; + + if (sw_idx) + *sw_idx = sw_index; + + if (hw_idx) + *hw_idx = read_index; + + if ((read_index != sw_index) && (read_index != 0xffffffff)) { + struct CE_src_desc *shadow_base = + (struct CE_src_desc *)src_ring->shadow_base; + struct CE_src_desc *shadow_src_desc = + CE_SRC_RING_TO_DESC(shadow_base, sw_index); +#ifdef QCA_WIFI_3_0 + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, sw_index); +#endif + hif_record_ce_desc_event(scn, CE_state->id, + HIF_TX_DESC_COMPLETION, + (union ce_desc *)shadow_src_desc, + src_ring->per_transfer_context[sw_index], + sw_index, shadow_src_desc->nbytes); + + /* Return data from completed source descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc); + *nbytesp = shadow_src_desc->nbytes; + *transfer_idp = shadow_src_desc->meta_data; +#ifdef QCA_WIFI_3_0 + *toeplitz_hash_result = src_desc->toeplitz_hash_result; +#else + *toeplitz_hash_result = 0; +#endif + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + } + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +static QDF_STATUS +ce_cancel_send_next_legacy(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result) +{ + struct CE_state *CE_state; + struct CE_ring_state *src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + QDF_STATUS status; + struct hif_softc *scn; + + CE_state = (struct CE_state *)copyeng; + src_ring = CE_state->src_ring; + if (!src_ring) + return QDF_STATUS_E_FAILURE; + + scn = CE_state->scn; + qdf_spin_lock(&CE_state->ce_index_lock); + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (write_index != sw_index) { + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc); + *nbytesp = src_desc->nbytes; + *transfer_idp = src_desc->meta_data; +#ifdef QCA_WIFI_3_0 + *toeplitz_hash_result = src_desc->toeplitz_hash_result; +#else + *toeplitz_hash_result = 0; +#endif + + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + } + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } else { + status = QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock(&CE_state->ce_index_lock); + + return status; +} + +/* + * Adjust interrupts for the copy complete handler. + * If it's needed for either send or recv, then unmask + * this interrupt; otherwise, mask it. + * + * Called with target_lock held. + */ +static void +ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state, + int disable_copy_compl_intr) +{ + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + CE_state->disable_copy_compl_intr = disable_copy_compl_intr; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + + if ((!disable_copy_compl_intr) && + (CE_state->send_cb || CE_state->recv_cb)) + CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); + else + CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); + + if (CE_state->watermark_cb) + CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); + else + CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); + Q_TARGET_ACCESS_END(scn); +} + +static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *src_ring, + struct CE_attr *attr) +{ + uint32_t ctrl_addr; + uint64_t dma_addr; + + QDF_ASSERT(ce_id < scn->ce_count); + ctrl_addr = CE_BASE_ADDRESS(ce_id); + + src_ring->hw_index = + CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + src_ring->sw_index = src_ring->hw_index; + src_ring->write_index = + CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + dma_addr = src_ring->base_addr_CE_space; + CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr, + (uint32_t)(dma_addr & 0xFFFFFFFF)); + + /* if SR_BA_ADDRESS_HIGH register exists */ + if (is_register_supported(SR_BA_ADDRESS_HIGH)) { + uint32_t tmp; + + tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET( + scn, ctrl_addr); + tmp &= ~0x1F; + dma_addr = ((dma_addr >> 32) & 0x1F) | tmp; + CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, + ctrl_addr, (uint32_t)dma_addr); + } + CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries); + CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max); +#ifdef BIG_ENDIAN_HOST + /* Enable source ring byte swap for big endian host */ + CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); +#endif + CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0); + CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries); +} + +static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *dest_ring, + struct CE_attr *attr) +{ + uint32_t ctrl_addr; + uint64_t dma_addr; + + QDF_ASSERT(ce_id < scn->ce_count); + ctrl_addr = CE_BASE_ADDRESS(ce_id); + dest_ring->sw_index = + CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + dest_ring->write_index = + CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + dma_addr = dest_ring->base_addr_CE_space; + CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr, + (uint32_t)(dma_addr & 0xFFFFFFFF)); + + /* if DR_BA_ADDRESS_HIGH exists */ + if (is_register_supported(DR_BA_ADDRESS_HIGH)) { + uint32_t tmp; + + tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, + ctrl_addr); + tmp &= ~0x1F; + dma_addr = ((dma_addr >> 32) & 0x1F) | tmp; + CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, + ctrl_addr, (uint32_t)dma_addr); + } + + CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries); +#ifdef BIG_ENDIAN_HOST + /* Enable Dest ring byte swap for big endian host */ + CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); +#endif + CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0); + CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries); +} + +static uint32_t ce_get_desc_size_legacy(uint8_t ring_type) +{ + switch (ring_type) { + case CE_RING_SRC: + return sizeof(struct CE_src_desc); + case CE_RING_DEST: + return sizeof(struct CE_dest_desc); + case CE_RING_STATUS: + qdf_assert(0); + return 0; + default: + return 0; + } + + return 0; +} + +static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr) +{ + int status = Q_TARGET_ACCESS_BEGIN(scn); + + if (status < 0) + goto out; + + switch (ring_type) { + case CE_RING_SRC: + ce_legacy_src_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_DEST: + ce_legacy_dest_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_STATUS: + default: + qdf_assert(0); + break; + } + + Q_TARGET_ACCESS_END(scn); +out: + return status; +} + +static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) +{ + *num_shadow_registers_configured = 0; + *shadow_config = NULL; +} + +static bool ce_check_int_watermark(struct CE_state *CE_state, + unsigned int *flags) +{ + uint32_t ce_int_status; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr); + if (ce_int_status & CE_WATERMARK_MASK) { + /* Convert HW IS bits to software flags */ + *flags = + (ce_int_status & CE_WATERMARK_MASK) >> + CE_WM_SHFT; + return true; + } + + return false; +} + +#ifdef HIF_CE_LOG_INFO +/** + * ce_get_index_info_legacy(): Get CE index info + * @scn: HIF Context + * @ce_state: CE opaque handle + * @info: CE info + * + * Return: 0 for success and non zero for failure + */ +static +int ce_get_index_info_legacy(struct hif_softc *scn, void *ce_state, + struct ce_index *info) +{ + struct CE_state *state = (struct CE_state *)ce_state; + + info->id = state->id; + if (state->src_ring) { + info->u.legacy_info.sw_index = state->src_ring->sw_index; + info->u.legacy_info.write_index = state->src_ring->write_index; + } else if (state->dest_ring) { + info->u.legacy_info.sw_index = state->dest_ring->sw_index; + info->u.legacy_info.write_index = state->dest_ring->write_index; + } + + return 0; +} +#endif + +struct ce_ops ce_service_legacy = { + .ce_get_desc_size = ce_get_desc_size_legacy, + .ce_ring_setup = ce_ring_setup_legacy, + .ce_sendlist_send = ce_sendlist_send_legacy, + .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy, + .ce_revoke_recv_next = ce_revoke_recv_next_legacy, + .ce_cancel_send_next = ce_cancel_send_next_legacy, + .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy, + .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy, + .ce_send_nolock = ce_send_nolock_legacy, + .watermark_int = ce_check_int_watermark, + .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy, + .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy, + .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy, + .ce_prepare_shadow_register_v2_cfg = + ce_prepare_shadow_register_v2_cfg_legacy, +#ifdef HIF_CE_LOG_INFO + .ce_get_index_info = + ce_get_index_info_legacy, +#endif +}; + +struct ce_ops *ce_services_legacy(void) +{ + return &ce_service_legacy; +} + +qdf_export_symbol(ce_services_legacy); + +void ce_service_legacy_init(void) +{ + ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c new file mode 100644 index 0000000000000000000000000000000000000000..32d5cfe3623c6f3340c1c6b008ef67675e6d5812 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c @@ -0,0 +1,1026 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "hif_io32.h" +#include "reg_struct.h" +#include "ce_api.h" +#include "ce_main.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "qdf_lock.h" +#include "regtable.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hal_api.h" +#include "pld_common.h" +#include "qdf_module.h" +#include "hif.h" + +/* + * Support for Copy Engine hardware, which is mainly used for + * communication between Host and Target over a PCIe interconnect. + */ + +/* + * A single CopyEngine (CE) comprises two "rings": + * a source ring + * a destination ring + * + * Each ring consists of a number of descriptors which specify + * an address, length, and meta-data. + * + * Typically, one side of the PCIe interconnect (Host or Target) + * controls one ring and the other side controls the other ring. + * The source side chooses when to initiate a transfer and it + * chooses what to send (buffer address, length). The destination + * side keeps a supply of "anonymous receive buffers" available and + * it handles incoming data as it arrives (when the destination + * receives an interrupt). + * + * The sender may send a simple buffer (address/length) or it may + * send a small list of buffers. When a small list is sent, hardware + * "gathers" these and they end up in a single destination buffer + * with a single interrupt. + * + * There are several "contexts" managed by this layer -- more, it + * may seem -- than should be needed. These are provided mainly for + * maximum flexibility and especially to facilitate a simpler HIF + * implementation. There are per-CopyEngine recv, send, and watermark + * contexts. These are supplied by the caller when a recv, send, + * or watermark handler is established and they are echoed back to + * the caller when the respective callbacks are invoked. There is + * also a per-transfer context supplied by the caller when a buffer + * (or sendlist) is sent and when a buffer is enqueued for recv. + * These per-transfer contexts are echoed back to the caller when + * the buffer is sent/received. + * Target TX harsh result toeplitz_hash_result + */ + +#define CE_ADDR_COPY(desc, dma_addr) do {\ + (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\ + 0xFFFFFFFF);\ + (desc)->buffer_addr_hi =\ + (uint32_t)(((dma_addr) >> 32) & 0xFF);\ + } while (0) + +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_srng_desc *descriptor, + void *memory, int index, + int len, void *hal_ring) +{ + int record_index; + struct hif_ce_desc_event *event; + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + struct hif_ce_desc_event *hist_ev = NULL; + + if (ce_id < CE_COUNT_MAX) + hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; + else + return; + + if (ce_id >= CE_COUNT_MAX) + return; + + if (!ce_hist->enable[ce_id]) + return; + + if (!hist_ev) + return; + + record_index = get_next_record_index( + &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); + + event = &hist_ev[record_index]; + + hif_clear_ce_desc_debug_data(event); + + event->type = type; + event->time = qdf_get_log_timestamp(); + event->cpu_id = qdf_get_cpu(); + + if (descriptor) + qdf_mem_copy(&event->descriptor, descriptor, + hal_get_entrysize_from_srng(hal_ring)); + + if (hal_ring) + hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp, + &event->current_hp); + + event->memory = memory; + event->index = index; + + if (event->type == HIF_CE_SRC_RING_BUFFER_POST) + hif_ce_desc_record_rx_paddr(scn, event, memory); + + if (ce_hist->data_enable[ce_id]) + hif_ce_desc_data_record(event, len); +} +#endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */ + +static int +ce_send_nolock_srng(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flags) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int write_index = src_ring->write_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, + false) <= 0)) { + OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_FAILURE; + } + { + enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST; + struct ce_srng_src_desc *src_desc; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_FAILURE; + } + + src_desc = hal_srng_src_get_next_reaped(scn->hal_soc, + src_ring->srng_ctx); + if (!src_desc) { + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_INVAL; + } + + /* Update low 32 bits source descriptor address */ + src_desc->buffer_addr_lo = + (uint32_t)(dma_addr & 0xFFFFFFFF); + src_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0xFF); + + src_desc->meta_data = transfer_id; + + /* + * Set the swap bit if: + * typical sends on this CE are swapped (host is big-endian) + * and this send doesn't disable the swapping + * (data is not bytestream) + */ + src_desc->byte_swap = + (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) + != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); + src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); + src_desc->nbytes = nbytes; + + src_ring->per_transfer_context[write_index] = + per_transfer_context; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx); + + /* src_ring->write index hasn't been updated event though + * the register has allready been written to. + */ + hif_record_ce_srng_desc_event(scn, CE_state->id, event_type, + (union ce_srng_desc *)src_desc, + per_transfer_context, + src_ring->write_index, nbytes, + src_ring->srng_ctx); + + src_ring->write_index = write_index; + status = QDF_STATUS_SUCCESS; + } + Q_TARGET_ACCESS_END(scn); + return status; +} + +static int +ce_sendlist_send_srng(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id) +{ + int status = -ENOMEM; + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int num_items = sl->num_items; + unsigned int sw_index; + unsigned int write_index; + struct hif_softc *scn = CE_state->scn; + + QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >= + num_items) { + struct ce_sendlist_item *item; + int i; + + /* handle all but the last item uniformly */ + for (i = 0; i < num_items - 1; i++) { + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_srng(copyeng, + CE_SENDLIST_ITEM_CTXT, + (qdf_dma_addr_t) item->data, + item->u.nbytes, transfer_id, + item->flags | CE_SEND_FLAG_GATHER, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + } + /* provide valid context pointer for final item */ + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_srng(copyeng, per_transfer_context, + (qdf_dma_addr_t) item->data, + item->u.nbytes, + transfer_id, item->flags, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, + QDF_NBUF_TX_PKT_CE); + DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, + QDF_DP_TRACE_CE_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data), + sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX)); + } else { + /* + * Probably not worth the additional complexity to support + * partial sends with continuation or notification. We expect + * to use large rings and small sendlists. If we can't handle + * the entire request at once, punt it back to the caller. + */ + } + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +#define SLOTS_PER_DATAPATH_TX 2 + +#ifndef AH_NEED_TX_DATA_SWAP +#define AH_NEED_TX_DATA_SWAP 0 +#endif +/** + * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine + * @coyeng: copy engine handle + * @per_recv_context: virtual address of the nbuf + * @buffer: physical address of the nbuf + * + * Return: 0 if the buffer is enqueued + */ +static int +ce_recv_buf_enqueue_srng(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + struct ce_srng_dest_desc *dest_desc = NULL; + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + write_index = dest_ring->write_index; + sw_index = dest_ring->sw_index; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return -EIO; + } + + if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) { + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return QDF_STATUS_E_FAILURE; + } + + if ((hal_srng_src_num_avail(scn->hal_soc, + dest_ring->srng_ctx, false) > 0)) { + dest_desc = hal_srng_src_get_next(scn->hal_soc, + dest_ring->srng_ctx); + + if (!dest_desc) { + status = QDF_STATUS_E_FAILURE; + } else { + + CE_ADDR_COPY(dest_desc, dma_addr); + + dest_ring->per_transfer_context[write_index] = + per_recv_context; + + /* Update Destination Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, + write_index); + status = QDF_STATUS_SUCCESS; + } + } else { + dest_desc = NULL; + status = QDF_STATUS_E_FAILURE; + } + + dest_ring->write_index = write_index; + hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx); + hif_record_ce_srng_desc_event(scn, CE_state->id, + HIF_CE_DEST_RING_BUFFER_POST, + (union ce_srng_desc *)dest_desc, + per_recv_context, + dest_ring->write_index, 0, + dest_ring->srng_ctx); + + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return status; +} + +/* + * Guts of ce_recv_entries_done. + * The caller takes responsibility for any necessary locking. + */ +static unsigned int +ce_recv_entries_done_nolock_srng(struct hif_softc *scn, + struct CE_state *CE_state) +{ + struct CE_ring_state *status_ring = CE_state->status_ring; + + return hal_srng_dst_num_valid(scn->hal_soc, + status_ring->srng_ctx, false); +} + +/* + * Guts of ce_send_entries_done. + * The caller takes responsibility for any necessary locking. + */ +static unsigned int +ce_send_entries_done_nolock_srng(struct hif_softc *scn, + struct CE_state *CE_state) +{ + + struct CE_ring_state *src_ring = CE_state->src_ring; + int count = 0; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) + return 0; + + count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx); + + hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); + + return count; +} + +/* + * Guts of ce_completed_recv_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_recv_next_nolock_srng(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) +{ + int status; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + struct CE_ring_state *status_ring = CE_state->status_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + struct hif_softc *scn = CE_state->scn; + struct ce_srng_dest_status_desc *dest_status = NULL; + int nbytes; + struct ce_srng_dest_status_desc dest_status_info; + + if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx); + if (!dest_status) { + status = QDF_STATUS_E_FAILURE; + hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx); + goto done; + } + + /* + * By copying the dest_desc_info element to local memory, we could + * avoid extra memory read from non-cachable memory. + */ + dest_status_info = *dest_status; + nbytes = dest_status_info.nbytes; + if (nbytes == 0) { + uint32_t hp, tp; + + /* + * This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx, + &hp, &tp); + hif_info("No data to reap, hp %d tp %d", hp, tp); + status = QDF_STATUS_E_FAILURE; + hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx); + goto done; + } + + /* + * Move the tail pointer since nbytes is non-zero and + * this entry is processed. + */ + hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx); + + dest_status->nbytes = 0; + + *nbytesp = nbytes; + *transfer_idp = dest_status_info.meta_data; + *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + /* NOTE: sw_index is more like a read_index in this context. It has a + * one-to-one mapping with status ring. + * Get the per trasnfer context from dest_ring. + */ + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + + hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx); + hif_record_ce_srng_desc_event(scn, CE_state->id, + HIF_CE_DEST_RING_BUFFER_REAP, + NULL, + dest_ring-> + per_transfer_context[sw_index], + dest_ring->sw_index, nbytes, + dest_ring->srng_ctx); + +done: + hif_record_ce_srng_desc_event(scn, CE_state->id, + HIF_CE_DEST_STATUS_RING_REAP, + (union ce_srng_desc *)dest_status, + NULL, + -1, 0, + status_ring->srng_ctx); + + return status; +} + +static QDF_STATUS +ce_revoke_recv_next_srng(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, qdf_dma_addr_t *bufferp) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int sw_index; + + if (!dest_ring) + return QDF_STATUS_E_FAILURE; + + sw_index = dest_ring->sw_index; + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + /* NOTE: sw_index is more like a read_index in this context. It has a + * one-to-one mapping with status ring. + * Get the per trasnfer context from dest_ring. + */ + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + if (!dest_ring->per_transfer_context[sw_index]) + return QDF_STATUS_E_FAILURE; + + /* provide end condition */ + dest_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + return QDF_STATUS_SUCCESS; +} + +/* + * Guts of ce_completed_send_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_send_next_nolock_srng(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result) +{ + int status = QDF_STATUS_E_FAILURE; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int swi = src_ring->sw_index; + struct hif_softc *scn = CE_state->scn; + struct ce_srng_src_desc *src_desc; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { + status = QDF_STATUS_E_FAILURE; + return status; + } + + src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx); + if (src_desc) { + hif_record_ce_srng_desc_event(scn, CE_state->id, + HIF_TX_DESC_COMPLETION, + (union ce_srng_desc *)src_desc, + src_ring-> + per_transfer_context[swi], + swi, src_desc->nbytes, + src_ring->srng_ctx); + + /* Return data from completed source descriptor */ + *bufferp = (qdf_dma_addr_t) + (((uint64_t)(src_desc)->buffer_addr_lo + + ((uint64_t)((src_desc)->buffer_addr_hi & + 0xFF) << 32))); + *nbytesp = src_desc->nbytes; + *transfer_idp = src_desc->meta_data; + *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/ + + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + /* sw_index is used more like read index */ + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } + hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); + + return status; +} + +/* NB: Modelled after ce_completed_send_next */ +static QDF_STATUS +ce_cancel_send_next_srng(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result) +{ + struct CE_state *CE_state; + int status = QDF_STATUS_E_FAILURE; + struct CE_ring_state *src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + struct hif_softc *scn; + struct ce_srng_src_desc *src_desc; + + CE_state = (struct CE_state *)copyeng; + src_ring = CE_state->src_ring; + if (!src_ring) + return QDF_STATUS_E_FAILURE; + + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + scn = CE_state->scn; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { + status = QDF_STATUS_E_FAILURE; + return status; + } + + src_desc = hal_srng_src_pending_reap_next(scn->hal_soc, + src_ring->srng_ctx); + if (src_desc) { + /* Return data from completed source descriptor */ + *bufferp = (qdf_dma_addr_t) + (((uint64_t)(src_desc)->buffer_addr_lo + + ((uint64_t)((src_desc)->buffer_addr_hi & + 0xFF) << 32))); + *nbytesp = src_desc->nbytes; + *transfer_idp = src_desc->meta_data; + *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/ + + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + /* sw_index is used more like read index */ + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } + hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); + + return status; +} + +/* + * Adjust interrupts for the copy complete handler. + * If it's needed for either send or recv, then unmask + * this interrupt; otherwise, mask it. + * + * Called with target_lock held. + */ +static void +ce_per_engine_handler_adjust_srng(struct CE_state *CE_state, + int disable_copy_compl_intr) +{ +} + +static bool ce_check_int_watermark_srng(struct CE_state *CE_state, + unsigned int *flags) +{ + /*TODO*/ + return false; +} + +static uint32_t ce_get_desc_size_srng(uint8_t ring_type) +{ + switch (ring_type) { + case CE_RING_SRC: + return sizeof(struct ce_srng_src_desc); + case CE_RING_DEST: + return sizeof(struct ce_srng_dest_desc); + case CE_RING_STATUS: + return sizeof(struct ce_srng_dest_status_desc); + default: + return 0; + } + return 0; +} + +static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id, + struct hal_srng_params *ring_params) +{ + uint32_t addr_low; + uint32_t addr_high; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + int ret; + + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + + /* msi config not found */ + if (ret) + return; + + pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high); + + ring_params->msi_addr = addr_low; + ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); + ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start; + ring_params->flags |= HAL_SRNG_MSI_INTR; + + HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id, + (void *)ring_params->msi_addr, ring_params->msi_data); +} + +static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *src_ring, + struct CE_attr *attr) +{ + struct hal_srng_params ring_params = {0}; + + hif_debug("%s: ce_id %d", __func__, ce_id); + + ring_params.ring_base_paddr = src_ring->base_addr_CE_space; + ring_params.ring_base_vaddr = src_ring->base_addr_owner_space; + ring_params.num_entries = src_ring->nentries; + /* + * The minimum increment for the timer is 8us + * A default value of 0 disables the timer + * A valid default value caused continuous interrupts to + * fire with MSI enabled. Need to revisit usage of the timer + */ + + if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { + ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); + + ring_params.intr_timer_thres_us = 0; + ring_params.intr_batch_cntr_thres_entries = 1; + ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER; + } + + src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0, + &ring_params); +} + +/** + * ce_srng_initialize_dest_timer_interrupt_war() - war initialization + * @dest_ring: ring being initialized + * @ring_params: pointer to initialized parameters + * + * For Napier & Hawkeye v1, the status ring timer interrupts do not work + * As a work arround host configures the destination rings to be a proxy for + * work needing to be done. + * + * The interrupts are setup such that if the destination ring is less than fully + * posted, there is likely undone work for the status ring that the host should + * process. + * + * There is a timing bug in srng based copy engines such that a fully posted + * srng based copy engine has 2 empty entries instead of just one. The copy + * engine data sturctures work with 1 empty entry, but the software frequently + * fails to post the last entry due to the race condition. + */ +static void ce_srng_initialize_dest_timer_interrupt_war( + struct CE_ring_state *dest_ring, + struct hal_srng_params *ring_params) +{ + int num_buffers_when_fully_posted = dest_ring->nentries - 2; + + ring_params->low_threshold = num_buffers_when_fully_posted - 1; + ring_params->intr_timer_thres_us = 1024; + ring_params->intr_batch_cntr_thres_entries = 0; + ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; +} + +static void ce_srng_dest_ring_setup(struct hif_softc *scn, + uint32_t ce_id, + struct CE_ring_state *dest_ring, + struct CE_attr *attr) +{ + struct hal_srng_params ring_params = {0}; + bool status_ring_timer_thresh_work_arround = true; + + HIF_INFO("%s: ce_id %d", __func__, ce_id); + + ring_params.ring_base_paddr = dest_ring->base_addr_CE_space; + ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space; + ring_params.num_entries = dest_ring->nentries; + ring_params.max_buffer_length = attr->src_sz_max; + + if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { + ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); + if (status_ring_timer_thresh_work_arround) { + ce_srng_initialize_dest_timer_interrupt_war( + dest_ring, &ring_params); + } else { + /* normal behavior for future chips */ + ring_params.low_threshold = dest_ring->nentries >> 3; + ring_params.intr_timer_thres_us = 100000; + ring_params.intr_batch_cntr_thres_entries = 0; + ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; + } + ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER; + } + + /*Dest ring is also source ring*/ + dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0, + &ring_params); +} + +#ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG +/** + * ce_status_ring_config_int_threshold() - configure ce status ring interrupt + * thresholds + * @scn: hif handle + * @ring_params: ce srng params + * + * Return: None + */ +static inline +void ce_status_ring_config_int_threshold(struct hif_softc *scn, + struct hal_srng_params *ring_params) +{ + ring_params->intr_timer_thres_us = + scn->ini_cfg.ce_status_ring_timer_threshold; + ring_params->intr_batch_cntr_thres_entries = + scn->ini_cfg.ce_status_ring_batch_count_threshold; +} +#else +static inline +void ce_status_ring_config_int_threshold(struct hif_softc *scn, + struct hal_srng_params *ring_params) +{ + ring_params->intr_timer_thres_us = 0x1000; + ring_params->intr_batch_cntr_thres_entries = 0x1; +} +#endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */ + +static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *status_ring, + struct CE_attr *attr) +{ + struct hal_srng_params ring_params = {0}; + + HIF_INFO("%s: ce_id %d", __func__, ce_id); + + ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); + + ring_params.ring_base_paddr = status_ring->base_addr_CE_space; + ring_params.ring_base_vaddr = status_ring->base_addr_owner_space; + ring_params.num_entries = status_ring->nentries; + + if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { + ce_status_ring_config_int_threshold(scn, &ring_params); + } + + status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS, + ce_id, 0, &ring_params); +} + +static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr) +{ + switch (ring_type) { + case CE_RING_SRC: + ce_srng_src_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_DEST: + ce_srng_dest_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_STATUS: + ce_srng_status_ring_setup(scn, ce_id, ring, attr); + break; + default: + qdf_assert(0); + break; + } + + return 0; +} + +static void ce_construct_shadow_config_srng(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int ce_id; + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + if (hif_state->host_ce_config[ce_id].src_nentries) + hal_set_one_shadow_config(scn->hal_soc, + CE_SRC, ce_id); + + if (hif_state->host_ce_config[ce_id].dest_nentries) { + hal_set_one_shadow_config(scn->hal_soc, + CE_DST, ce_id); + + hal_set_one_shadow_config(scn->hal_soc, + CE_DST_STATUS, ce_id); + } + } +} + +static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) +{ + if (!scn->hal_soc) { + HIF_ERROR("%s: hal not initialized: not initializing shadow config", + __func__); + return; + } + + hal_get_shadow_config(scn->hal_soc, shadow_config, + num_shadow_registers_configured); + + if (*num_shadow_registers_configured != 0) { + HIF_ERROR("%s: hal shadow register configuration allready constructed", + __func__); + + /* return with original configuration*/ + return; + } + hal_construct_srng_shadow_regs(scn->hal_soc); + ce_construct_shadow_config_srng(scn); + hal_set_shadow_regs(scn->hal_soc); + hal_construct_shadow_regs(scn->hal_soc); + /* get updated configuration */ + hal_get_shadow_config(scn->hal_soc, shadow_config, + num_shadow_registers_configured); +} + +#ifdef HIF_CE_LOG_INFO +/** + * ce_get_index_info_srng(): Get CE index info + * @scn: HIF Context + * @ce_state: CE opaque handle + * @info: CE info + * + * Return: 0 for success and non zero for failure + */ +static +int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state, + struct ce_index *info) +{ + struct CE_state *CE_state = (struct CE_state *)ce_state; + uint32_t tp, hp; + + info->id = CE_state->id; + if (CE_state->src_ring) { + hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx, + &tp, &hp); + info->u.srng_info.tp = tp; + info->u.srng_info.hp = hp; + } else if (CE_state->dest_ring && CE_state->status_ring) { + hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx, + &tp, &hp); + info->u.srng_info.status_tp = tp; + info->u.srng_info.status_hp = hp; + hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx, + &tp, &hp); + info->u.srng_info.tp = tp; + info->u.srng_info.hp = hp; + } + + return 0; +} +#endif + +static struct ce_ops ce_service_srng = { + .ce_get_desc_size = ce_get_desc_size_srng, + .ce_ring_setup = ce_ring_setup_srng, + .ce_sendlist_send = ce_sendlist_send_srng, + .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng, + .ce_revoke_recv_next = ce_revoke_recv_next_srng, + .ce_cancel_send_next = ce_cancel_send_next_srng, + .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng, + .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng, + .ce_send_nolock = ce_send_nolock_srng, + .watermark_int = ce_check_int_watermark_srng, + .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng, + .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng, + .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng, + .ce_prepare_shadow_register_v2_cfg = + ce_prepare_shadow_register_v2_cfg_srng, +#ifdef HIF_CE_LOG_INFO + .ce_get_index_info = + ce_get_index_info_srng, +#endif +}; + +struct ce_ops *ce_services_srng() +{ + return &ce_service_srng; +} +qdf_export_symbol(ce_services_srng); + +void ce_service_srng_init(void) +{ + ce_service_register_module(CE_SVC_SRNG, &ce_services_srng); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c new file mode 100644 index 0000000000000000000000000000000000000000..ced2765a91d058dcbaba123dd86c422e4aeff547 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "qdf_lock.h" +#include "qdf_types.h" +#include "qdf_status.h" +#include "regtable.h" +#include "hif.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_reg.h" +#include "ce_internal.h" +#include "ce_tasklet.h" +#include "pld_common.h" +#include "hif_debug.h" +#include "hif_napi.h" + +/** + * struct tasklet_work + * + * @id: ce_id + * @work: work + */ +struct tasklet_work { + enum ce_id_type id; + void *data; + struct work_struct work; +}; + + +/** + * reschedule_ce_tasklet_work_handler() - reschedule work + * @work: struct work_struct + * + * Return: N/A + */ +static void reschedule_ce_tasklet_work_handler(struct work_struct *work) +{ + struct tasklet_work *ce_work = container_of(work, struct tasklet_work, + work); + struct hif_softc *scn = ce_work->data; + struct HIF_CE_state *hif_ce_state; + + if (!scn) { + HIF_ERROR("%s: tasklet scn is null", __func__); + return; + } + + hif_ce_state = HIF_GET_CE_STATE(scn); + + if (scn->hif_init_done == false) { + HIF_ERROR("%s: wlan driver is unloaded", __func__); + return; + } + if (hif_ce_state->tasklets[ce_work->id].inited) + tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq); +} + +static struct tasklet_work tasklet_workers[CE_ID_MAX]; +static bool work_initialized; + +/** + * init_tasklet_work() - init_tasklet_work + * @work: struct work_struct + * @work_handler: work_handler + * + * Return: N/A + */ +static void init_tasklet_work(struct work_struct *work, + work_func_t work_handler) +{ + INIT_WORK(work, work_handler); +} + +/** + * init_tasklet_workers() - init_tasklet_workers + * @scn: HIF Context + * + * Return: N/A + */ +void init_tasklet_workers(struct hif_opaque_softc *scn) +{ + uint32_t id; + + for (id = 0; id < CE_ID_MAX; id++) { + tasklet_workers[id].id = id; + tasklet_workers[id].data = scn; + init_tasklet_work(&tasklet_workers[id].work, + reschedule_ce_tasklet_work_handler); + } + work_initialized = true; +} + +/** + * deinit_tasklet_workers() - deinit_tasklet_workers + * @scn: HIF Context + * + * Return: N/A + */ +void deinit_tasklet_workers(struct hif_opaque_softc *scn) +{ + u32 id; + + for (id = 0; id < CE_ID_MAX; id++) + cancel_work_sync(&tasklet_workers[id].work); + + work_initialized = false; +} + +/** + * ce_schedule_tasklet() - schedule ce tasklet + * @tasklet_entry: struct ce_tasklet_entry + * + * Return: N/A + */ +static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) +{ + tasklet_schedule(&tasklet_entry->intr_tq); +} + +#ifdef CE_TASKLET_DEBUG_ENABLE +/** + * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution + * entry time + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: None + */ +static inline void +hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) +{ + struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); + + hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] = + qdf_get_log_timestamp_usecs(); +} + +/** + * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled + * entry time + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: None + */ +static inline void +hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) +{ + struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); + + hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] = + qdf_get_log_timestamp_usecs(); +} + +/** + * hif_ce_latency_stats() - Display ce latency information + * @hif_ctx: hif_softc struct + * + * Return: None + */ +static void +hif_ce_latency_stats(struct hif_softc *hif_ctx) +{ + uint8_t i, j; + uint32_t index, start_index; + static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2", + "2 - 5", "5 - 10", " > 10"}; + struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); + struct ce_stats *stats = &hif_ce_state->stats; + + hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS"); + for (i = 0; i < CE_COUNT_MAX; i++) { + hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i); + for (j = 0; j < CE_BUCKET_MAX; j++) { + hif_nofl_err("\t Bucket %sms :%llu\t last update:%llu", + buck_str[j], + stats->ce_tasklet_exec_bucket[i][j], + stats->ce_tasklet_exec_last_update[i][j]); + } + + hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i); + for (j = 0; j < CE_BUCKET_MAX; j++) { + hif_nofl_err("\t Bucket %sms :%llu\t last update :%lld", + buck_str[j], + stats->ce_tasklet_sched_bucket[i][j], + stats-> + ce_tasklet_sched_last_update[i][j]); + } + + hif_nofl_err("\n\t\t CE RING %d Last %d time records", + i, HIF_REQUESTED_EVENTS); + index = stats->record_index[i]; + start_index = stats->record_index[i]; + + for (j = 0; j < HIF_REQUESTED_EVENTS; j++) { + hif_nofl_err("\t Execuiton time: %luus Total Scheduled time: %luus", + stats->tasklet_exec_time_record[i][index], + stats-> + tasklet_sched_time_record[i][index]); + index = (index - 1) % HIF_REQUESTED_EVENTS; + if (index == start_index) + break; + } + } +} + +/** + * ce_tasklet_update_bucket() - update ce execution and scehduled time latency + * in corresponding time buckets + * @stats: struct ce_stats + * @ce_id: ce_id_type + * @entry_us: timestamp when tasklet is started to execute + * @exit_us: timestamp when tasklet is completed execution + * + * Return: N/A + */ +static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, + uint8_t ce_id) +{ + uint32_t index; + uint64_t exec_time, exec_ms; + uint64_t sched_time, sched_ms; + uint64_t curr_time = qdf_get_log_timestamp_usecs(); + struct ce_stats *stats = &hif_ce_state->stats; + + exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]); + sched_time = (stats->tasklet_exec_entry_ts[ce_id]) - + (stats->tasklet_sched_entry_ts[ce_id]); + + index = stats->record_index[ce_id]; + index = (index + 1) % HIF_REQUESTED_EVENTS; + + stats->tasklet_exec_time_record[ce_id][index] = exec_time; + stats->tasklet_sched_time_record[ce_id][index] = sched_time; + stats->record_index[ce_id] = index; + + exec_ms = qdf_do_div(exec_time, 1000); + sched_ms = qdf_do_div(sched_time, 1000); + + if (exec_ms > 10) { + stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++; + stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND] + = curr_time; + } else if (exec_ms > 5) { + stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++; + stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS] + = curr_time; + } else if (exec_ms > 2) { + stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++; + stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS] + = curr_time; + } else if (exec_ms > 1) { + stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++; + stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS] + = curr_time; + } else if (exec_time > 500) { + stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++; + stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS] + = curr_time; + } else { + stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++; + stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US] + = curr_time; + } + + if (sched_ms > 10) { + stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++; + stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND] + = curr_time; + } else if (sched_ms > 5) { + stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++; + stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS] + = curr_time; + } else if (sched_ms > 2) { + stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++; + stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS] + = curr_time; + } else if (sched_ms > 1) { + stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++; + stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS] + = curr_time; + } else if (sched_time > 500) { + stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++; + stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS] + = curr_time; + } else { + stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++; + stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US] + = curr_time; + } +} +#else +static inline void +hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id) +{ +} + +static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state, + uint8_t ce_id) +{ +} + +static inline void +hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id) +{ +} + +static void +hif_ce_latency_stats(struct hif_softc *hif_ctx) +{ +} +#endif /*CE_TASKLET_DEBUG_ENABLE*/ + +/** + * ce_tasklet() - ce_tasklet + * @data: data + * + * Return: N/A + */ +static void ce_tasklet(unsigned long data) +{ + struct ce_tasklet_entry *tasklet_entry = + (struct ce_tasklet_entry *)data; + struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); + struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; + + if (scn->ce_latency_stats) + hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id); + + hif_record_ce_desc_event(scn, tasklet_entry->ce_id, + HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0); + + if (qdf_atomic_read(&scn->link_suspended)) { + HIF_ERROR("%s: ce %d tasklet fired after link suspend.", + __func__, tasklet_entry->ce_id); + QDF_BUG(0); + } + + ce_per_engine_service(scn, tasklet_entry->ce_id); + + if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) { + /* + * There are frames pending, schedule tasklet to process them. + * Enable the interrupt only when there is no pending frames in + * any of the Copy Engine pipes. + */ + hif_record_ce_desc_event(scn, tasklet_entry->ce_id, + HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, -1, 0); + + ce_schedule_tasklet(tasklet_entry); + return; + } + + if (scn->target_status != TARGET_STATUS_RESET) + hif_irq_enable(scn, tasklet_entry->ce_id); + + hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, + NULL, NULL, -1, 0); + + if (scn->ce_latency_stats) + ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id); + + qdf_atomic_dec(&scn->active_tasklet_cnt); +} + +/** + * ce_tasklet_init() - ce_tasklet_init + * @hif_ce_state: hif_ce_state + * @mask: mask + * + * Return: N/A + */ +void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) +{ + int i; + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (mask & (1 << i)) { + hif_ce_state->tasklets[i].ce_id = i; + hif_ce_state->tasklets[i].inited = true; + hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; + tasklet_init(&hif_ce_state->tasklets[i].intr_tq, + ce_tasklet, + (unsigned long)&hif_ce_state->tasklets[i]); + } + } +} +/** + * ce_tasklet_kill() - ce_tasklet_kill + * @hif_ce_state: hif_ce_state + * + * Context: Non-Atomic context + * Return: N/A + */ +void ce_tasklet_kill(struct hif_softc *scn) +{ + int i; + struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); + + work_initialized = false; + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (hif_ce_state->tasklets[i].inited) { + hif_ce_state->tasklets[i].inited = false; + /* + * Cancel the tasklet work before tasklet_disable + * to avoid race between tasklet_schedule and + * tasklet_kill. Here cancel_work_sync() won't + * return before reschedule_ce_tasklet_work_handler() + * completes. Even if tasklet_schedule() happens + * tasklet_disable() will take care of that. + */ + cancel_work_sync(&tasklet_workers[i].work); + tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); + } + } + qdf_atomic_set(&scn->active_tasklet_cnt, 0); +} + +#define HIF_CE_DRAIN_WAIT_CNT 20 +/** + * hif_drain_tasklets(): wait until no tasklet is pending + * @scn: hif context + * + * Let running tasklets clear pending trafic. + * + * Return: 0 if no bottom half is in progress when it returns. + * -EFAULT if it times out. + */ +int hif_drain_tasklets(struct hif_softc *scn) +{ + uint32_t ce_drain_wait_cnt = 0; + int32_t tasklet_cnt; + + while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { + if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { + HIF_ERROR("%s: CE still not done with access: %d", + __func__, tasklet_cnt); + + return -EFAULT; + } + HIF_INFO("%s: Waiting for CE to finish access", __func__); + msleep(10); + } + return 0; +} + +#ifdef WLAN_SUSPEND_RESUME_TEST +/** + * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should + * trigger a unit-test resume. + * @scn: The HIF context to operate on + * @ce_id: The copy engine Id from the originating interrupt + * + * Return: true if the raised irq should trigger a unit-test resume + */ +static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) +{ + int errno; + uint8_t wake_ce_id; + + if (!hif_is_ut_suspended(scn)) + return false; + + /* ensure passed ce_id matches wake ce_id */ + errno = hif_get_wake_ce_id(scn, &wake_ce_id); + if (errno) { + HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno); + return false; + } + + return ce_id == wake_ce_id; +} +#else +static inline bool +hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) +{ + return false; +} +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +/** + * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler + * @irq: irq coming from kernel + * @context: context + * + * Return: N/A + */ +static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); + + return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), + tasklet_entry); +} + +/** + * hif_ce_increment_interrupt_count() - update ce stats + * @hif_ce_state: ce state + * @ce_id: ce id + * + * Return: none + */ +static inline void +hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) +{ + int cpu_id = qdf_get_cpu(); + + hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; +} + +/** + * hif_display_ce_stats() - display ce stats + * @hif_ce_state: ce state + * + * Return: none + */ +void hif_display_ce_stats(struct hif_softc *hif_ctx) +{ +#define STR_SIZE 128 + uint8_t i, j, pos; + char str_buffer[STR_SIZE]; + int size, ret; + struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx); + + qdf_debug("CE interrupt statistics:"); + for (i = 0; i < CE_COUNT_MAX; i++) { + size = STR_SIZE; + pos = 0; + for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { + ret = snprintf(str_buffer + pos, size, "[%d]:%d ", + j, hif_ce_state->stats.ce_per_cpu[i][j]); + if (ret <= 0 || ret >= size) + break; + size -= ret; + pos += ret; + } + qdf_debug("CE id[%2d] - %s", i, str_buffer); + } + + if (hif_ctx->ce_latency_stats) + hif_ce_latency_stats(hif_ctx); +#undef STR_SIZE +} + +/** + * hif_clear_ce_stats() - clear ce stats + * @hif_ce_state: ce state + * + * Return: none + */ +void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) +{ + qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); +} + +/** + * hif_tasklet_schedule() - schedule tasklet + * @hif_ctx: hif context + * @tasklet_entry: ce tasklet entry + * + * Return: false if tasklet already scheduled, otherwise true + */ +static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx, + struct ce_tasklet_entry *tasklet_entry) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) { + HIF_DBG("tasklet scheduled, return"); + qdf_atomic_dec(&scn->active_tasklet_cnt); + return false; + } + + tasklet_schedule(&tasklet_entry->intr_tq); + if (scn->ce_latency_stats) + hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id); + + return true; +} + +/** + * ce_dispatch_interrupt() - dispatch an interrupt to a processing context + * @ce_id: ce_id + * @tasklet_entry: context + * + * Return: N/A + */ +irqreturn_t ce_dispatch_interrupt(int ce_id, + struct ce_tasklet_entry *tasklet_entry) +{ + struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + if (tasklet_entry->ce_id != ce_id) { + HIF_ERROR("%s: ce_id (expect %d, received %d) does not match", + __func__, tasklet_entry->ce_id, ce_id); + return IRQ_NONE; + } + if (unlikely(ce_id >= CE_COUNT_MAX)) { + HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d", + __func__, tasklet_entry->ce_id, CE_COUNT_MAX); + return IRQ_NONE; + } + + hif_irq_disable(scn, ce_id); + + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) + return IRQ_HANDLED; + + hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, + NULL, NULL, 0, 0); + hif_ce_increment_interrupt_count(hif_ce_state, ce_id); + + if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { + hif_ut_fw_resume(scn); + hif_irq_enable(scn, ce_id); + return IRQ_HANDLED; + } + + qdf_atomic_inc(&scn->active_tasklet_cnt); + + if (hif_napi_enabled(hif_hdl, ce_id)) + hif_napi_schedule(hif_hdl, ce_id); + else + hif_tasklet_schedule(hif_hdl, tasklet_entry); + + return IRQ_HANDLED; +} + +/** + * const char *ce_name + * + * @ce_name: ce_name + */ +const char *ce_name[] = { + "WLAN_CE_0", + "WLAN_CE_1", + "WLAN_CE_2", + "WLAN_CE_3", + "WLAN_CE_4", + "WLAN_CE_5", + "WLAN_CE_6", + "WLAN_CE_7", + "WLAN_CE_8", + "WLAN_CE_9", + "WLAN_CE_10", + "WLAN_CE_11", +}; +/** + * ce_unregister_irq() - ce_unregister_irq + * @hif_ce_state: hif_ce_state copy engine device handle + * @mask: which coppy engines to unregister for. + * + * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, + * unregister for copy engine x. + * + * Return: QDF_STATUS + */ +QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) +{ + int id; + int ce_count; + int ret; + struct hif_softc *scn; + + if (!hif_ce_state) { + HIF_WARN("%s: hif_ce_state = NULL", __func__); + return QDF_STATUS_SUCCESS; + } + + scn = HIF_GET_SOFTC(hif_ce_state); + ce_count = scn->ce_count; + /* we are removing interrupts, so better stop NAPI */ + ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), + NAPI_EVT_INT_STATE, (void *)0); + if (ret != 0) + HIF_ERROR("%s: napi_event INT_STATE returned %d", + __func__, ret); + /* this is not fatal, continue */ + + /* filter mask to free only for ce's with irq registered */ + mask &= hif_ce_state->ce_register_irq_done; + for (id = 0; id < ce_count; id++) { + if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { + ret = pld_ce_free_irq(scn->qdf_dev->dev, id, + &hif_ce_state->tasklets[id]); + if (ret < 0) + HIF_ERROR( + "%s: pld_unregister_irq error - ce_id = %d, ret = %d", + __func__, id, ret); + } + ce_disable_polling(scn->ce_id_to_state[id]); + } + hif_ce_state->ce_register_irq_done &= ~mask; + + return QDF_STATUS_SUCCESS; +} +/** + * ce_register_irq() - ce_register_irq + * @hif_ce_state: hif_ce_state + * @mask: which coppy engines to unregister for. + * + * Registers copy engine irqs matching mask. If a 1 is set at bit x, + * Register for copy engine x. + * + * Return: QDF_STATUS + */ +QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) +{ + int id; + int ce_count; + int ret; + unsigned long irqflags = IRQF_TRIGGER_RISING; + uint32_t done_mask = 0; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); + + ce_count = scn->ce_count; + + for (id = 0; id < ce_count; id++) { + if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { + ret = pld_ce_request_irq(scn->qdf_dev->dev, id, + hif_snoc_interrupt_handler, + irqflags, ce_name[id], + &hif_ce_state->tasklets[id]); + if (ret) { + HIF_ERROR( + "%s: cannot register CE %d irq handler, ret = %d", + __func__, id, ret); + ce_unregister_irq(hif_ce_state, done_mask); + return QDF_STATUS_E_FAULT; + } + done_mask |= 1 << id; + } + } + hif_ce_state->ce_register_irq_done |= done_mask; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.h new file mode 100644 index 0000000000000000000000000000000000000000..d5ab58cefb11be0e7016e0d916ba1e654d183654 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2015-2016,2018,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_TASKLET_H__ +#define __CE_TASKLET_H__ +#include "ce_main.h" +void init_tasklet_workers(struct hif_opaque_softc *scn); +void deinit_tasklet_workers(struct hif_opaque_softc *scn); +void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask); +void ce_tasklet_kill(struct hif_softc *scn); +int hif_drain_tasklets(struct hif_softc *scn); +QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask); +QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask); +irqreturn_t ce_dispatch_interrupt(int irq, + struct ce_tasklet_entry *tasklet_entry); +void hif_display_ce_stats(struct hif_softc *hif_ctx); +void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state); +#endif /* __CE_TASKLET_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ahb_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ahb_api.h new file mode 100644 index 0000000000000000000000000000000000000000..c7fca4c195607ffd276063a9eccff92acca6ff8c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ahb_api.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2013-2018,2020-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __AHB_API_H +#define __AHB_API_H +struct hif_exec_context; + +QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_ahb_close(struct hif_softc *hif_ctx); + + +void hif_ahb_disable_isr(struct hif_softc *hif_ctx); +void hif_ahb_nointrs(struct hif_softc *scn); +void hif_ahb_reset_soc(struct hif_softc *hif_ctx); +QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_ahb_disable_bus(struct hif_softc *scn); +int hif_ahb_bus_configure(struct hif_softc *scn); +void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id); +void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id); +void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_grp); +void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_grp); +int hif_ahb_dump_registers(struct hif_softc *scn); + +int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc); +int hif_ahb_clk_enable_disable(struct device *dev, int enable); +void hif_ahb_device_reset(struct hif_softc *scn); +int hif_ahb_enable_radio(struct hif_pci_softc *sc, + struct platform_device *pdev, + const struct platform_device_id *id); +int hif_ahb_configure_irq(struct hif_pci_softc *sc); +int hif_ahb_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_grp); +void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn); +bool hif_ahb_needs_bmi(struct hif_softc *scn); +void hif_ahb_display_stats(struct hif_softc *scn); +void hif_ahb_clear_stats(struct hif_softc *scn); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.c new file mode 100644 index 0000000000000000000000000000000000000000..f4d323dcd2c3987ce6709c4b10d694843ca96aea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.c @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_types.h" +#include "dummy.h" +#include "hif_debug.h" + +/** + * hif_dummy_bus_prevent_linkdown() - prevent linkdown + * @hif_ctx: hif context + * @flag: weather to keep the bus alive or not + * + * Dummy function for busses and platforms that do not support + * link down. This may need to be replaced with a wakelock. + */ +void hif_dummy_bus_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + HIF_DBG("wlan: %s pcie power collapse ignored", + (flag ? "disable" : "enable")); +} + +/** + * hif_reset_soc(): reset soc + * + * this function resets soc + * + * @hif_ctx: HIF context + * + * Return: void + */ +/* Function to reset SoC */ +void hif_dummy_reset_soc(struct hif_softc *hif_ctx) +{ +} + +/** + * hif_dummy_suspend() - suspend the bus + * @hif_ctx: hif context + * + * dummy for busses that don't need to suspend. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_suspend(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_resume() - hif resume API + * + * This function resumes the bus. but snoc doesn't need to resume. + * Therefore do nothing. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_resume(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_suspend_noirq() - suspend the bus + * @hif_ctx: hif context + * + * dummy for busses that don't need to synchronize + * with interrupt disable. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_suspend_noirq(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_resume_noirq() - resume the bus + * @hif_ctx: hif context + * + * dummy for busses that don't need to synchronize + * with interrupt disable. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_resume_noirq(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_target_sleep_state_adjust() - api to adjust state of target + * @scn: hif context + * @sleep_ok: allow or deny target to go to sleep + * @wait_for_it: ensure target has change + */ +int hif_dummy_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it) +{ + return 0; +} + +/** + * hif_dummy_enable_power_management - dummy call + * hif_ctx: hif context + * is_packet_log_enabled: true if packet log is enabled + */ +void hif_dummy_enable_power_management(struct hif_softc *hif_ctx, + bool is_packet_log_enabled) +{} + +/** + * hif_dummy_disable_power_management - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_disable_power_management(struct hif_softc *hif_ctx) +{} + +/** + * hif_dummy_disable_isr - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_disable_isr(struct hif_softc *scn) +{} + +/** + * hif_dummy_nointrs - dummy call + * hif_sc: hif context + * + * Return: none + */ +void hif_dummy_nointrs(struct hif_softc *hif_sc) +{} + +/** + * hif_dummy_bus_configure - dummy call + * hif_ctx: hif context + * + * Return: 0 for success + */ +int hif_dummy_bus_configure(struct hif_softc *hif_sc) +{ + return 0; +} + +/** + * hif_dummy_get_config_item - dummy call + * @hif_sc: hif context + * @opcode: configuration type + * @config: configuration value to set + * @config_len: configuration length + * + * Return: 0 for success + */ +QDF_STATUS +hif_dummy_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len) +{ + return 0; +} + +/** + * hif_dummy_set_mailbox_swap - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_set_mailbox_swap(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_claim_device - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_claim_device(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_cancel_deferred_target_sleep - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_cancel_deferred_target_sleep(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_irq_enable - dummy call + * hif_ctx: hif context + * @irq_id: irq id + * + * Return: none + */ +void hif_dummy_irq_enable(struct hif_softc *hif_sc, int irq_id) +{} + +/** + * hif_dummy_grp_irq_enable - dummy call + * hif_ctx: hif context + * @irq_id: grp id + * + * Return: none + */ +void hif_dummy_grp_irq_enable(struct hif_softc *hif_sc, uint32_t grp_id) +{} + +/** + * hif_dummy_irq_disable - dummy call + * hif_ctx: hif context + * @irq_id: irq id + * + * Return: none + */ +void hif_dummy_irq_disable(struct hif_softc *hif_sc, int irq_id) +{} + +/** + * hif_dummy_grp_irq_disable- dummy call + * hif_ctx: hif context + * @grp_id: grp id + * + * Return: none + */ +void hif_dummy_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id) +{} + +/** + * hif_dummy_grp_irq_configure - dummy call + * hif_ctx: hif context + * + * Return: none + */ +int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *exec) +{ + return 0; +} + +/** + * hif_dummy_grp_irq_deconfigure - dummy call + * hif_sc: hif context + * + * Return: none + */ +void hif_dummy_grp_irq_deconfigure(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_dump_registers - dummy call + * hif_sc: hif context + * + * Return: 0 for success + */ +int hif_dummy_dump_registers(struct hif_softc *hif_sc) +{ + return 0; +} + +/** + * hif_dummy_dump_target_memory - dummy call + * @hif_sc: hif context + * @ramdump_base: base + * @address: address + * @size: size + * + * Return: None + */ +void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base, + uint32_t address, uint32_t size) +{ +} + +/** + * hif_dummy_ipa_get_ce_resource - dummy call + * @scn: HIF context + * @ce_sr: copyengine source ring resource info + * @sr_ring_size: source ring size + * @reg_paddr: bus physical address + * + * Return: None + */ +void hif_dummy_ipa_get_ce_resource(struct hif_softc *hif_sc, + qdf_shared_mem_t **ce_sr, + uint32_t *sr_ring_size, + qdf_dma_addr_t *reg_paddr) +{ +} + +/** + * hif_dummy_mask_interrupt_call - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_mask_interrupt_call(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_display_stats - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_display_stats(struct hif_softc *hif_ctx) +{} + +/** + * hif_dummy_clear_stats - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_clear_stats(struct hif_softc *hif_ctx) +{} +/** + * hif_dummy_set_bundle_mode() - dummy call + * @hif_sc: hif context + * @enabled: flag to enable/disable bundling + * @rx_bundle_cnt: bundle count to be used for RX + * + * Return: none + */ +void hif_dummy_set_bundle_mode(struct hif_softc *hif_ctx, + bool enabled, int rx_bundle_cnt) +{ +} + +/** + * hif_dummy_bus_reset_resume() - dummy call + * @hif_sc: hif context + * + * Return: int 0 for success, non zero for failure + */ +int hif_dummy_bus_reset_resume(struct hif_softc *hif_ctx) +{ + return 0; +} + +int hif_dummy_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + HIF_ERROR("%s: hif_map_ce_to_irq is not implemented on this platform", + __func__); + QDF_BUG(0); + return -(1); +} + +int hif_dummy_addr_in_boundary(struct hif_softc *scn, uint32_t offset) +{ + return 0; +} + +/** + * hif_dummy_config_irq_affinity - dummy call + * @scn: hif context + * + * Return: None + */ +void hif_dummy_config_irq_affinity(struct hif_softc *scn) +{ +} + +/** + * hif_dummy_log_bus_info - dummy call + * @scn: hif context + * @data: hang event data buffer + * @offset: offset at which data needs to be written + * + * Return: bool + */ +bool hif_dummy_log_bus_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset) +{ + return false; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.h new file mode 100644 index 0000000000000000000000000000000000000000..c048803d152a7736f1cba5aca5ba6105ed57709c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +struct hif_softc; +struct hif_exec_context; + +void hif_dummy_bus_prevent_linkdown(struct hif_softc *scn, bool flag); +void hif_dummy_reset_soc(struct hif_softc *scn); +int hif_dummy_bus_suspend(struct hif_softc *hif_ctx); +int hif_dummy_bus_resume(struct hif_softc *hif_ctx); +int hif_dummy_bus_suspend_noirq(struct hif_softc *hif_ctx); +int hif_dummy_bus_resume_noirq(struct hif_softc *hif_ctx); +int hif_dummy_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it); +void hif_dummy_enable_power_management(struct hif_softc *hif_ctx, + bool is_packet_log_enabled); +void hif_dummy_disable_power_management(struct hif_softc *hif_ctx); +void hif_dummy_disable_isr(struct hif_softc *scn); +void hif_dummy_nointrs(struct hif_softc *hif_sc); +int hif_dummy_bus_configure(struct hif_softc *hif_sc); +QDF_STATUS hif_dummy_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len); +void hif_dummy_set_mailbox_swap(struct hif_softc *hif_sc); +void hif_dummy_claim_device(struct hif_softc *hif_sc); +void hif_dummy_cancel_deferred_target_sleep(struct hif_softc *hif_sc); +void hif_dummy_irq_enable(struct hif_softc *hif_sc, int irq_id); +void hif_dummy_irq_disable(struct hif_softc *hif_sc, int irq_id); +void hif_dummy_grp_irq_enable(struct hif_softc *hif_sc, uint32_t grp_id); +void hif_dummy_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id); +int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *exec); +void hif_dummy_grp_irq_deconfigure(struct hif_softc *hif_sc); +int hif_dummy_dump_registers(struct hif_softc *hif_sc); +void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base, + uint32_t address, uint32_t size); +void hif_dummy_ipa_get_ce_resource(struct hif_softc *hif_sc, + qdf_shared_mem_t **ce_sr, + uint32_t *sr_ring_size, + qdf_dma_addr_t *reg_paddr); +void hif_dummy_mask_interrupt_call(struct hif_softc *hif_sc); +void hif_dummy_display_stats(struct hif_softc *hif_ctx); +void hif_dummy_clear_stats(struct hif_softc *hif_ctx); +void hif_dummy_set_bundle_mode(struct hif_softc *hif_ctx, + bool enabled, int rx_bundle_cnt); +int hif_dummy_bus_reset_resume(struct hif_softc *hif_ctx); +int hif_dummy_map_ce_to_irq(struct hif_softc *scn, int ce_id); +int hif_dummy_addr_in_boundary(struct hif_softc *scn, uint32_t offset); +void hif_dummy_config_irq_affinity(struct hif_softc *scn); +bool hif_dummy_log_bus_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ipci_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ipci_api.h new file mode 100644 index 0000000000000000000000000000000000000000..17353428912c9f88eda4ac264c654533f2e24881 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ipci_api.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _IPCI_API_H_ +#define _IPCI_API_H_ +struct hif_exec_context; + +/** + * hif_ipci_open(): hif_bus_open + * @hif_ctx: hif context + * @bus_type: bus type + * + * Return: 0 for success or QDF_STATUS_E_NOMEM + */ +QDF_STATUS hif_ipci_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); + +/** + * hif_ipci_close(): hif_bus_close + * @hif_ctx: hif context + * + * Return: n/a + */ +void hif_ipci_close(struct hif_softc *hif_ctx); + +/** + * hif_bus_prevent_linkdown(): allow or permit linkdown + * @scn: struct hif_softc + * @flag: true prevents linkdown, false allows + * + * Calls into the platform driver to vote against taking down the + * pcie link. + * + * Return: n/a + */ +void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag); + +/** + * hif_ipci_bus_suspend(): prepare hif for suspend + * @scn: struct hif_softc + * + * Return: Errno + */ +int hif_ipci_bus_suspend(struct hif_softc *scn); + +/** + * hif_ipci_bus_suspend_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_ipci_bus_suspend_noirq(struct hif_softc *scn); + +/** + * hif_ipci_bus_resume(): prepare hif for resume + * @scn: struct hif_softc + * + * Return: Errno + */ +int hif_ipci_bus_resume(struct hif_softc *scn); + +/** + * hif_ipci_bus_resume_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_ipci_bus_resume_noirq(struct hif_softc *scn); + +/** + * hif_ipci_disable_isr(): disable interrupt + * @scn: struct hif_softc + * + * Return: n/a + */ +void hif_ipci_disable_isr(struct hif_softc *scn); + +/** + * hif_ipci_nointrs(): disable IRQ + * @scn: struct hif_softc + * + * This function stops interrupt(s) + * + * Return: none + */ +void hif_ipci_nointrs(struct hif_softc *scn); + +/** + * hif_ipci_dump_registers(): dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_ipci_dump_registers(struct hif_softc *scn); + +/** + * hif_ipci_enable_bus(): enable bus + * + * This function enables the bus + * + * @ol_sc: soft_sc struct + * @dev: device pointer + * @bdev: bus dev pointer + * bid: bus id pointer + * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE + * Return: QDF_STATUS + */ +QDF_STATUS hif_ipci_enable_bus( + struct hif_softc *scn, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); + +/** + * hif_ipci_disable_bus(): hif_disable_bus + * + * This function disables the bus + * + * @scn: struct hif_softc + * + * Return: none + */ +void hif_ipci_disable_bus(struct hif_softc *scn); + +/** + * hif_ipci_bus_configure() - configure the pcie bus + * @hif_sc: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_ipci_bus_configure(struct hif_softc *scn); + +/** + * hif_ipci_enable_power_management() - enable power management + * @hif_ctx: hif context + * @is_packet_log_enabled: pktlog enabled or disabled + * + * Return: none + */ +void hif_ipci_enable_power_management( + struct hif_softc *hif_ctx, + bool is_packet_log_enabled); + +/** + * hif_ipci_disable_power_management() - disable power management + * @hif_ctx: hif context + * + * Return: none + */ +void hif_ipci_disable_power_management(struct hif_softc *hif_ctx); + +/** + * hif_ipci_configure_grp_irq() - configure HW block irq + * @scn: hif context + * @exec: hif exec context + * + * Return:Errno + */ +int hif_ipci_configure_grp_irq( + struct hif_softc *scn, + struct hif_exec_context *exec); + +/** + * hif_ipci_deconfigure_grp_irq() - deconfigure HW block irq + * @scn: hif context + * + * Return: None + */ +void hif_ipci_deconfigure_grp_irq(struct hif_softc *scn); + +/** + * hif_ipci_display_stats() - display stats + * @hif_ctx: hif context + * + * Return: none + */ +void hif_ipci_display_stats(struct hif_softc *hif_ctx); + +/** + * hif_ipci_clear_stats() - clear stats + * @hif_ctx: hif context + * + * Return: none + */ +void hif_ipci_clear_stats(struct hif_softc *hif_ctx); + +/** + * hif_ipci_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_ipci_needs_bmi(struct hif_softc *scn); + +/** + * hif_ipci_get_irq_name() - get irqname + * This function gives irqnumber to irqname + * mapping. + * + * @irq_no: irq number + * + * Return: irq name + */ +const char *hif_ipci_get_irq_name(int irq_no); + +#endif /* _IPCI_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.c new file mode 100644 index 0000000000000000000000000000000000000000..aa80361d6a8456f46dae343e79adc2222ee11327 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.c @@ -0,0 +1,551 @@ +/* + * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* this file dispatches functions to bus specific definitions */ +#include "hif_debug.h" +#include "hif.h" +#include "hif_main.h" +#include "hif_io32.h" +#include "multibus.h" +#include "dummy.h" +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) +#include "ce_main.h" +#include "ce_api.h" +#include "ce_internal.h" +#endif +#include "htc_services.h" +#include "a_types.h" +#include "dummy.h" +#include "qdf_module.h" + +/** + * hif_initialize_default_ops() - initializes default operations values + * + * bus specific features should assign their dummy implementations here. + */ +static void hif_initialize_default_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + /* must be filled in by hif_bus_open */ + bus_ops->hif_bus_close = NULL; + /* dummy implementations */ + bus_ops->hif_display_stats = + &hif_dummy_display_stats; + bus_ops->hif_clear_stats = + &hif_dummy_clear_stats; + bus_ops->hif_set_bundle_mode = &hif_dummy_set_bundle_mode; + bus_ops->hif_bus_reset_resume = &hif_dummy_bus_reset_resume; + bus_ops->hif_bus_suspend_noirq = &hif_dummy_bus_suspend_noirq; + bus_ops->hif_bus_resume_noirq = &hif_dummy_bus_resume_noirq; + bus_ops->hif_bus_early_suspend = &hif_dummy_bus_suspend; + bus_ops->hif_bus_late_resume = &hif_dummy_bus_resume; + bus_ops->hif_map_ce_to_irq = &hif_dummy_map_ce_to_irq; + bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure; + bus_ops->hif_grp_irq_deconfigure = &hif_dummy_grp_irq_deconfigure; + bus_ops->hif_config_irq_affinity = + &hif_dummy_config_irq_affinity; +} + +#define NUM_OPS (sizeof(struct hif_bus_ops) / sizeof(void *)) + +/** + * hif_verify_basic_ops() - ensure required bus apis are defined + * + * all bus operations must be defined to avoid crashes + * itterate over the structure and ensure all function pointers + * are non null. + * + * Return: QDF_STATUS_SUCCESS if all the operations are defined + */ +static QDF_STATUS hif_verify_basic_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + void **ops_array = (void *)bus_ops; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i; + + for (i = 0; i < NUM_OPS; i++) { + if (!ops_array[i]) { + HIF_ERROR("%s: function %d is null", __func__, i); + status = QDF_STATUS_E_NOSUPPORT; + } + } + return status; +} + +/** + * hif_bus_get_context_size - API to return size of the bus specific structure + * + * Return: sizeof of hif_pci_softc + */ +int hif_bus_get_context_size(enum qdf_bus_type bus_type) +{ + switch (bus_type) { + case QDF_BUS_TYPE_PCI: + return hif_pci_get_context_size(); + case QDF_BUS_TYPE_IPCI: + return hif_ipci_get_context_size(); + case QDF_BUS_TYPE_AHB: + return hif_ahb_get_context_size(); + case QDF_BUS_TYPE_SNOC: + return hif_snoc_get_context_size(); + case QDF_BUS_TYPE_SDIO: + return hif_sdio_get_context_size(); + case QDF_BUS_TYPE_USB: + return hif_usb_get_context_size(); + default: + return 0; + } +} + +/** + * hif_bus_open() - initialize the bus_ops and call the bus specific open + * hif_sc: hif_context + * bus_type: type of bus being enumerated + * + * Return: QDF_STATUS_SUCCESS or error + */ +QDF_STATUS hif_bus_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + + hif_initialize_default_ops(hif_sc); + + switch (bus_type) { + case QDF_BUS_TYPE_PCI: + status = hif_initialize_pci_ops(hif_sc); + break; + case QDF_BUS_TYPE_IPCI: + status = hif_initialize_ipci_ops(hif_sc); + break; + case QDF_BUS_TYPE_SNOC: + status = hif_initialize_snoc_ops(&hif_sc->bus_ops); + break; + case QDF_BUS_TYPE_AHB: + status = hif_initialize_ahb_ops(&hif_sc->bus_ops); + break; + case QDF_BUS_TYPE_SDIO: + status = hif_initialize_sdio_ops(hif_sc); + break; + case QDF_BUS_TYPE_USB: + status = hif_initialize_usb_ops(&hif_sc->bus_ops); + break; + default: + status = QDF_STATUS_E_NOSUPPORT; + break; + } + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: %d not supported", __func__, bus_type); + return status; + } + + status = hif_verify_basic_ops(hif_sc); + if (status != QDF_STATUS_SUCCESS) + return status; + + return hif_sc->bus_ops.hif_bus_open(hif_sc, bus_type); +} + +/** + * hif_bus_close() - close the bus + * @hif_sc: hif_context + */ +void hif_bus_close(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_bus_close(hif_sc); +} + +/** + * hif_bus_prevent_linkdown() - prevent linkdown + * @hif_ctx: hif context + * @flag: true = keep bus alive false = let bus go to sleep + * + * Keeps the bus awake durring suspend. + */ +void hif_bus_prevent_linkdown(struct hif_softc *hif_sc, bool flag) +{ + hif_sc->bus_ops.hif_bus_prevent_linkdown(hif_sc, flag); +} + + +void hif_reset_soc(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_reset_soc(hif_sc); +} + +int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_early_suspend(hif_sc); +} + +int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_late_resume(hif_sc); +} + +int hif_bus_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_suspend(hif_sc); +} + +int hif_bus_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_resume(hif_sc); +} + +int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_suspend_noirq(hif_sc); +} + +int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_resume_noirq(hif_sc); +} + +int hif_target_sleep_state_adjust(struct hif_softc *hif_sc, + bool sleep_ok, bool wait_for_it) +{ + return hif_sc->bus_ops.hif_target_sleep_state_adjust(hif_sc, + sleep_ok, wait_for_it); +} +qdf_export_symbol(hif_target_sleep_state_adjust); + +void hif_disable_isr(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_disable_isr(hif_sc); +} + +void hif_nointrs(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_nointrs(hif_sc); +} + +QDF_STATUS hif_enable_bus(struct hif_softc *hif_sc, struct device *dev, + void *bdev, const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + return hif_sc->bus_ops.hif_enable_bus(hif_sc, dev, bdev, bid, type); +} + +void hif_disable_bus(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_disable_bus(hif_sc); +} + +int hif_bus_configure(struct hif_softc *hif_sc) +{ + return hif_sc->bus_ops.hif_bus_configure(hif_sc); +} + +QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, + int opcode, void *config, uint32_t config_len) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_get_config_item(hif_sc, opcode, config, + config_len); +} + +void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_set_mailbox_swap(hif_sc); +} + +void hif_claim_device(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_claim_device(hif_sc); +} + +void hif_shutdown_device(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_shutdown_device(hif_sc); +} + +void hif_stop(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_stop(hif_sc); +} + +void hif_cancel_deferred_target_sleep(struct hif_softc *hif_sc) +{ + return hif_sc->bus_ops.hif_cancel_deferred_target_sleep(hif_sc); +} + +void hif_irq_enable(struct hif_softc *hif_sc, int irq_id) +{ + hif_sc->bus_ops.hif_irq_enable(hif_sc, irq_id); +} +qdf_export_symbol(hif_irq_enable); + +void hif_irq_disable(struct hif_softc *hif_sc, int irq_id) +{ + hif_sc->bus_ops.hif_irq_disable(hif_sc, irq_id); +} + +int hif_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *hif_exec) +{ + return hif_sc->bus_ops.hif_grp_irq_configure(hif_sc, hif_exec); +} + +void hif_grp_irq_deconfigure(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_grp_irq_deconfigure(hif_sc); +} + +int hif_dump_registers(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + return hif_sc->bus_ops.hif_dump_registers(hif_sc); +} + +void hif_dump_target_memory(struct hif_opaque_softc *hif_hdl, + void *ramdump_base, + uint32_t address, uint32_t size) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_dump_target_memory(hif_sc, ramdump_base, + address, size); +} + +void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_hdl, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_ipa_get_ce_resource(hif_sc, ce_sr, + ce_sr_ring_size, ce_reg_paddr); +} + +void hif_mask_interrupt_call(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_mask_interrupt_call(hif_sc); +} + +void hif_display_bus_stats(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + hif_sc->bus_ops.hif_display_stats(hif_sc); +} + +void hif_clear_bus_stats(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + hif_sc->bus_ops.hif_clear_stats(hif_sc); +} + +/** + * hif_enable_power_management() - enable power management after driver load + * @hif_hdl: opaque pointer to the hif context + * is_packet_log_enabled: true if packet log is enabled + * + * Driver load and firmware download are done in a high performance mode. + * Enable power management after the driver is loaded. + * packet log can require fewer power management features to be enabled. + */ +void hif_enable_power_management(struct hif_opaque_softc *hif_hdl, + bool is_packet_log_enabled) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_enable_power_management(hif_sc, + is_packet_log_enabled); +} + +/** + * hif_disable_power_management() - reset the bus power management + * @hif_hdl: opaque pointer to the hif context + * + * return the power management of the bus to its default state. + * This isn't necessarily a complete reversal of its counterpart. + * This should be called when unloading the driver. + */ +void hif_disable_power_management(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_disable_power_management(hif_sc); +} + +/** + * hif_set_bundle_mode() - enable bundling and set default rx bundle cnt + * @scn: pointer to hif_opaque_softc structure + * @enabled: flag to enable/disable bundling + * @rx_bundle_cnt: bundle count to be used for RX + * + * Return: none + */ +void hif_set_bundle_mode(struct hif_opaque_softc *scn, bool enabled, + int rx_bundle_cnt) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + hif_sc->bus_ops.hif_set_bundle_mode(hif_sc, enabled, rx_bundle_cnt); +} + +/** + * hif_bus_reset_resume() - resume the bus after reset + * @scn: struct hif_opaque_softc + * + * This function is called to tell the driver that USB device has been resumed + * and it has also been reset. The driver should redo any necessary + * initialization. This function resets WLAN SOC. + * + * Return: int 0 for success, non zero for failure + */ +int hif_bus_reset_resume(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + return hif_sc->bus_ops.hif_bus_reset_resume(hif_sc); +} + +int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + int i; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + /* if the wake_irq is shared, don't disable it twice */ + disable_irq(scn->wake_irq); + for (i = 0; i < scn->ce_count; ++i) { + int irq = scn->bus_ops.hif_map_ce_to_irq(scn, i); + + if (irq != scn->wake_irq) + disable_irq(irq); + } + + return 0; +} + +int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + int i; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + /* if the wake_irq is shared, don't enable it twice */ + enable_irq(scn->wake_irq); + for (i = 0; i < scn->ce_count; ++i) { + int irq = scn->bus_ops.hif_map_ce_to_irq(scn, i); + + if (irq != scn->wake_irq) + enable_irq(irq); + } + + return 0; +} + +int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + disable_irq(scn->wake_irq); + + return 0; +} + +int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + enable_irq(scn->wake_irq); + + return 0; +} + +#ifdef WLAN_FEATURE_BMI +bool hif_needs_bmi(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + return hif_sc->bus_ops.hif_needs_bmi(hif_sc); +} +qdf_export_symbol(hif_needs_bmi); +#endif /* WLAN_FEATURE_BMI */ + +void hif_config_irq_affinity(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_config_irq_affinity(hif_sc); +} + +#ifdef HIF_BUS_LOG_INFO +bool hif_log_bus_info(struct hif_softc *hif_sc, uint8_t *data, + unsigned int *offset) +{ + if (hif_sc->bus_ops.hif_log_bus_info) + return hif_sc->bus_ops.hif_log_bus_info(hif_sc, data, offset); + + return false; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.h new file mode 100644 index 0000000000000000000000000000000000000000..104a7924bacff51dfa9bc2289810f4ad4d612fc1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _MULTIBUS_H_ +#define _MULTIBUS_H_ + +#include "osdep.h" +#include "qdf_status.h" +#include "hif_debug.h" + +struct hif_softc; +struct hif_exec_context; + +struct hif_bus_ops { + QDF_STATUS (*hif_bus_open)(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type); + void (*hif_bus_close)(struct hif_softc *hif_sc); + void (*hif_bus_prevent_linkdown)(struct hif_softc *hif_sc, bool flag); + void (*hif_reset_soc)(struct hif_softc *hif_sc); + int (*hif_bus_early_suspend)(struct hif_softc *hif_ctx); + int (*hif_bus_late_resume)(struct hif_softc *hif_ctx); + int (*hif_bus_suspend)(struct hif_softc *hif_ctx); + int (*hif_bus_resume)(struct hif_softc *hif_ctx); + int (*hif_bus_suspend_noirq)(struct hif_softc *hif_ctx); + int (*hif_bus_resume_noirq)(struct hif_softc *hif_ctx); + int (*hif_target_sleep_state_adjust)(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it); + void (*hif_disable_isr)(struct hif_softc *hif_sc); + void (*hif_nointrs)(struct hif_softc *hif_sc); + QDF_STATUS (*hif_enable_bus)(struct hif_softc *hif_sc, + struct device *dev, + void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); + void (*hif_disable_bus)(struct hif_softc *hif_sc); + int (*hif_bus_configure)(struct hif_softc *hif_sc); + QDF_STATUS (*hif_get_config_item)(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len); + void (*hif_set_mailbox_swap)(struct hif_softc *hif_sc); + void (*hif_claim_device)(struct hif_softc *hif_sc); + void (*hif_shutdown_device)(struct hif_softc *hif_sc); + void (*hif_stop)(struct hif_softc *hif_sc); + void (*hif_cancel_deferred_target_sleep)(struct hif_softc *hif_sc); + void (*hif_irq_disable)(struct hif_softc *hif_sc, int ce_id); + void (*hif_irq_enable)(struct hif_softc *hif_sc, int ce_id); + int (*hif_grp_irq_configure)(struct hif_softc *hif_sc, + struct hif_exec_context *exec); + void (*hif_grp_irq_deconfigure)(struct hif_softc *hif_sc); + int (*hif_dump_registers)(struct hif_softc *hif_sc); + void (*hif_dump_target_memory)(struct hif_softc *hif_sc, + void *ramdump_base, + uint32_t address, uint32_t size); + void (*hif_ipa_get_ce_resource)(struct hif_softc *hif_sc, + qdf_shared_mem_t **ce_sr, + uint32_t *sr_ring_size, + qdf_dma_addr_t *reg_paddr); + void (*hif_mask_interrupt_call)(struct hif_softc *hif_sc); + void (*hif_enable_power_management)(struct hif_softc *hif_ctx, + bool is_packet_log_enabled); + void (*hif_disable_power_management)(struct hif_softc *hif_ctx); + void (*hif_display_stats)(struct hif_softc *hif_ctx); + void (*hif_clear_stats)(struct hif_softc *hif_ctx); + void (*hif_set_bundle_mode)(struct hif_softc *hif_ctx, bool enabled, + int rx_bundle_cnt); + int (*hif_bus_reset_resume)(struct hif_softc *hif_ctx); + int (*hif_map_ce_to_irq)(struct hif_softc *hif_sc, int ce_id); + int (*hif_addr_in_boundary)(struct hif_softc *scn, uint32_t offset); + bool (*hif_needs_bmi)(struct hif_softc *hif_sc); + void (*hif_config_irq_affinity)(struct hif_softc *hif_sc); + bool (*hif_log_bus_info)(struct hif_softc *scn, uint8_t *data, + unsigned int *offset); +}; + +#ifdef HIF_SNOC +QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *hif_sc); +int hif_snoc_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} +/** + * hif_snoc_get_context_size() - dummy when snoc isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_snoc_get_context_size(void) +{ + return 0; +} +#endif /* HIF_SNOC */ + +#ifdef HIF_PCI +QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc); +int hif_pci_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} +/** + * hif_pci_get_context_size() - dummy when pci isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_pci_get_context_size(void) +{ + return 0; +} +#endif /* HIF_PCI */ + +#ifdef HIF_IPCI +/** + * hif_initialize_ipci_ops() - initialize the pci ops + * @hif_sc: pointer to hif context + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc); + +/** + * hif_ipci_get_context_size() - return the size of the ipci context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_ipci_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * hif_ipci_get_context_size() - dummy when ipci isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_ipci_get_context_size(void) +{ + return 0; +} +#endif /* HIF_IPCI */ + +#ifdef HIF_AHB +QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops); +int hif_ahb_get_context_size(void); +#else +/** + * hif_initialize_ahb_ops() - dummy for when ahb not supported + * + * Return: QDF_STATUS_E_NOSUPPORT + */ +static inline QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * hif_ahb_get_context_size() - dummy for when ahb not supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_ahb_get_context_size(void) +{ + return 0; +} +#endif + +#ifdef HIF_SDIO +QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc); +int hif_sdio_get_context_size(void); +#else +/** + * hif_initialize_sdio_ops() - dummy for when sdio not supported + * + * Return: QDF_STATUS_E_NOSUPPORT + */ + +static inline QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * hif_sdio_get_context_size() - dummy when sdio isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_sdio_get_context_size(void) +{ + return 0; +} +#endif /* HIF_SDIO */ + +int hif_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *hif_exec); +void hif_grp_irq_deconfigure(struct hif_softc *hif_sc); +#ifdef HIF_USB +QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops); +int hif_usb_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} +/** + * hif_usb_get_context_size() - dummy when usb isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_usb_get_context_size(void) +{ + return 0; +} +#endif /* HIF_USB */ + +/** + * hif_config_irq_affinity() - Set IRQ affinity for WLAN IRQs + * @hif_sc - hif context + * + * Set IRQ affinity hint for WLAN IRQs in order to affine to + * gold cores. + * + * Return: None + */ +void hif_config_irq_affinity(struct hif_softc *hif_sc); + +#ifdef HIF_BUS_LOG_INFO +/** + * hif_log_bus_info() - API to log bus related info + * @scn: hif handle + * @data: hang event data buffer + * @offset: offset at which data needs to be written + * + * Return: true if bus_id is invalid else false + */ +bool hif_log_bus_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset); +#else +static inline +bool hif_log_bus_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset) +{ + return false; +} +#endif +#endif /* _MULTIBUS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ahb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ahb.c new file mode 100644 index 0000000000000000000000000000000000000000..e6fc2440024feebb5694949f3606573e91ea8dac --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ahb.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2016-2018,2020-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "ce_main.h" +#include "if_pci.h" +#include "ahb_api.h" +#include "dummy.h" + +/** + * hif_initialize_ahb_ops() - initialize the ahb ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * This function will assign the set of callbacks that needs + * to be called for ipq4019 platform + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops) +{ + bus_ops->hif_bus_open = &hif_ahb_open; + bus_ops->hif_bus_close = &hif_ahb_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_ahb_reset_soc; + bus_ops->hif_bus_suspend = &hif_dummy_bus_suspend; + bus_ops->hif_bus_resume = &hif_dummy_bus_resume; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + + bus_ops->hif_disable_isr = &hif_ahb_disable_isr; + bus_ops->hif_nointrs = &hif_ahb_nointrs; + bus_ops->hif_enable_bus = &hif_ahb_enable_bus; + bus_ops->hif_disable_bus = &hif_ahb_disable_bus; + bus_ops->hif_bus_configure = &hif_ahb_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_ahb_irq_disable; + bus_ops->hif_irq_enable = &hif_ahb_irq_enable; + bus_ops->hif_dump_registers = &hif_ahb_dump_registers; + bus_ops->hif_dump_target_memory = &hif_dummy_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_dummy_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_grp_irq_configure = &hif_ahb_configure_grp_irq; + bus_ops->hif_grp_irq_deconfigure = &hif_ahb_deconfigure_grp_irq; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_ahb_needs_bmi; + bus_ops->hif_display_stats = &hif_ahb_display_stats; + bus_ops->hif_clear_stats = &hif_ahb_clear_stats; + bus_ops->hif_config_irq_affinity = + &hif_dummy_config_irq_affinity; + bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_ahb_get_context_size() - return the size of the snoc context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_ahb_get_context_size(void) +{ + return sizeof(struct hif_pci_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ipci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ipci.c new file mode 100644 index 0000000000000000000000000000000000000000..b67e5e382a41982b32ed83b2a73612e2c130cd72 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ipci.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "ipci_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "ce_api.h" + +/** + * hif_initialize_ipci_ops() - initialize the pci ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + bus_ops->hif_bus_open = &hif_ipci_open; + bus_ops->hif_bus_close = &hif_ipci_close; + bus_ops->hif_bus_prevent_linkdown = &hif_ipci_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_suspend = &hif_ipci_bus_suspend; + bus_ops->hif_bus_resume = &hif_ipci_bus_resume; + bus_ops->hif_bus_suspend_noirq = &hif_ipci_bus_suspend_noirq; + bus_ops->hif_bus_resume_noirq = &hif_ipci_bus_resume_noirq; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + bus_ops->hif_disable_isr = &hif_ipci_disable_isr; + bus_ops->hif_nointrs = &hif_ipci_nointrs; + bus_ops->hif_enable_bus = &hif_ipci_enable_bus; + bus_ops->hif_disable_bus = &hif_ipci_disable_bus; + bus_ops->hif_bus_configure = &hif_ipci_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_dummy_irq_disable; + bus_ops->hif_irq_enable = &hif_dummy_irq_enable; + bus_ops->hif_dump_registers = &hif_ipci_dump_registers; + bus_ops->hif_dump_target_memory = &hif_ce_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_ce_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_ipci_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_ipci_disable_power_management; + bus_ops->hif_grp_irq_configure = &hif_ipci_configure_grp_irq; + bus_ops->hif_grp_irq_deconfigure = &hif_ipci_deconfigure_grp_irq; + bus_ops->hif_display_stats = + &hif_ipci_display_stats; + bus_ops->hif_clear_stats = + &hif_ipci_clear_stats; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_ipci_needs_bmi; + bus_ops->hif_config_irq_affinity = + &hif_dummy_config_irq_affinity; + bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_ipci_get_context_size() - return the size of the ipci context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_ipci_get_context_size(void) +{ + return sizeof(struct hif_ipci_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_pci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..35010a9f534c29cfe61ccc72a2639c7b6db08ee6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_pci.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "pci_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "ce_api.h" + +/** + * hif_initialize_pci_ops() - initialize the pci ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + bus_ops->hif_bus_open = &hif_pci_open; + bus_ops->hif_bus_close = &hif_pci_close; + bus_ops->hif_bus_prevent_linkdown = &hif_pci_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_pci_reset_soc; + bus_ops->hif_bus_suspend = &hif_pci_bus_suspend; + bus_ops->hif_bus_resume = &hif_pci_bus_resume; + bus_ops->hif_bus_suspend_noirq = &hif_pci_bus_suspend_noirq; + bus_ops->hif_bus_resume_noirq = &hif_pci_bus_resume_noirq; + + /* do not put the target to sleep for epping or maxperf mode */ + if (CONFIG_ATH_PCIE_MAX_PERF == 0 && + !QDF_IS_EPPING_ENABLED(hif_get_conparam(hif_sc))) + bus_ops->hif_target_sleep_state_adjust = + &hif_pci_target_sleep_state_adjust; + else + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + + bus_ops->hif_disable_isr = &hif_pci_disable_isr; + bus_ops->hif_nointrs = &hif_pci_nointrs; + bus_ops->hif_enable_bus = &hif_pci_enable_bus; + bus_ops->hif_disable_bus = &hif_pci_disable_bus; + bus_ops->hif_bus_configure = &hif_pci_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_pci_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_pci_irq_disable; + bus_ops->hif_irq_enable = &hif_pci_irq_enable; + bus_ops->hif_dump_registers = &hif_pci_dump_registers; + bus_ops->hif_dump_target_memory = &hif_ce_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_ce_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_pci_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_pci_disable_power_management; + bus_ops->hif_grp_irq_configure = &hif_pci_configure_grp_irq; + bus_ops->hif_grp_irq_deconfigure = &hif_pci_deconfigure_grp_irq; + bus_ops->hif_display_stats = + &hif_pci_display_stats; + bus_ops->hif_clear_stats = + &hif_pci_clear_stats; + bus_ops->hif_addr_in_boundary = &hif_pci_addr_in_boundary; + + /* default to legacy mapping handler; override as needed */ + bus_ops->hif_map_ce_to_irq = &hif_pci_legacy_map_ce_to_irq; + bus_ops->hif_needs_bmi = &hif_pci_needs_bmi; + + bus_ops->hif_config_irq_affinity = + &hif_pci_config_irq_affinity; + bus_ops->hif_log_bus_info = &hif_log_pcie_info; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_pci_get_context_size() - return the size of the pci context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_pci_get_context_size(void) +{ + return sizeof(struct hif_pci_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..97a3e194ebc40b89824c11e1a3235bc8a025d3ba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_sdio.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "sdio_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "if_sdio.h" + +/** + * hif_initialize_sdio_ops() - initialize the sdio ops + * @hif_sc: hif soft context + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + bus_ops->hif_bus_open = &hif_sdio_open; + bus_ops->hif_bus_close = &hif_sdio_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_suspend = &hif_sdio_bus_suspend; + bus_ops->hif_bus_resume = &hif_sdio_bus_resume; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + bus_ops->hif_disable_isr = &hif_dummy_disable_isr; + bus_ops->hif_nointrs = &hif_dummy_nointrs; + bus_ops->hif_enable_bus = &hif_sdio_enable_bus; + bus_ops->hif_disable_bus = &hif_sdio_disable_bus; + bus_ops->hif_bus_configure = &hif_dummy_bus_configure; + bus_ops->hif_get_config_item = &hif_sdio_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_sdio_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_sdio_claim_device; + bus_ops->hif_shutdown_device = &hif_sdio_shutdown; + bus_ops->hif_stop = &hif_sdio_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_dummy_irq_disable; + bus_ops->hif_irq_enable = &hif_dummy_irq_enable; + bus_ops->hif_dump_registers = &hif_dummy_dump_registers; + bus_ops->hif_dump_target_memory = &hif_dummy_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_dummy_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_sdio_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_sdio_needs_bmi; + bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_sdio_get_context_size() - return the size of the sdio context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_sdio_get_context_size(void) +{ + return sizeof(struct hif_sdio_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_snoc.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_snoc.c new file mode 100644 index 0000000000000000000000000000000000000000..0834315a4005eb295fc049d30d43e316183066a8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_snoc.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "ce_main.h" +#include "snoc_api.h" +#include "dummy.h" +#include "ce_api.h" + +/** + * hif_initialize_pci_ops() - initialize the pci ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *bus_ops) +{ + bus_ops->hif_bus_open = &hif_snoc_open; + bus_ops->hif_bus_close = &hif_snoc_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_early_suspend = &hif_ce_bus_early_suspend; + bus_ops->hif_bus_late_resume = &hif_ce_bus_late_resume; + bus_ops->hif_bus_suspend = &hif_snoc_bus_suspend; + bus_ops->hif_bus_resume = &hif_snoc_bus_resume; + bus_ops->hif_bus_suspend_noirq = &hif_snoc_bus_suspend_noirq; + /* snoc_bus_resume_noirq had no side effects, use dummy resume_noirq */ + bus_ops->hif_bus_resume_noirq = &hif_dummy_bus_resume_noirq; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + + bus_ops->hif_disable_isr = &hif_snoc_disable_isr; + bus_ops->hif_nointrs = &hif_snoc_nointrs; + bus_ops->hif_enable_bus = &hif_snoc_enable_bus; + bus_ops->hif_disable_bus = &hif_snoc_disable_bus; + bus_ops->hif_bus_configure = &hif_snoc_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_snoc_irq_disable; + bus_ops->hif_irq_enable = &hif_snoc_irq_enable; + bus_ops->hif_dump_registers = &hif_snoc_dump_registers; + bus_ops->hif_dump_target_memory = &hif_ce_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_ce_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_display_stats = + &hif_snoc_display_stats; + bus_ops->hif_clear_stats = + &hif_snoc_clear_stats; + bus_ops->hif_map_ce_to_irq = &hif_snoc_map_ce_to_irq; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_snoc_needs_bmi; + bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_snoc_get_context_size() - return the size of the snoc context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_snoc_get_context_size(void) +{ + return sizeof(struct HIF_CE_state); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..08b08ee15afc3bfae4e433b760adde27edb23eaa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_usb.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2016-2018,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "usb_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "if_usb.h" + +/** + * hif_initialize_usb_ops() - initialize the usb ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops) +{ + bus_ops->hif_bus_open = &hif_usb_open; + bus_ops->hif_bus_close = &hif_usb_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_suspend = &hif_usb_bus_suspend; + bus_ops->hif_bus_resume = &hif_usb_bus_resume; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + bus_ops->hif_disable_isr = &hif_usb_disable_isr; + bus_ops->hif_nointrs = &hif_usb_nointrs; + bus_ops->hif_enable_bus = &hif_usb_enable_bus; + bus_ops->hif_disable_bus = &hif_usb_disable_bus; + bus_ops->hif_bus_configure = &hif_usb_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_usb_shutdown_bus_device; + bus_ops->hif_stop = &hif_usb_stop_device; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_usb_irq_disable; + bus_ops->hif_irq_enable = &hif_usb_irq_enable; + bus_ops->hif_dump_registers = &hif_dummy_dump_registers; + bus_ops->hif_dump_target_memory = &hif_dummy_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_dummy_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_set_bundle_mode = &hif_usb_set_bundle_mode; + bus_ops->hif_bus_reset_resume = &hif_usb_bus_reset_resume; + bus_ops->hif_map_ce_to_irq = &hif_dummy_map_ce_to_irq; + bus_ops->hif_needs_bmi = &hif_usb_needs_bmi; + bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_usb_get_context_size() - return the size of the usb context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_usb_get_context_size(void) +{ + return sizeof(struct hif_usb_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/pci_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/pci_api.h new file mode 100644 index 0000000000000000000000000000000000000000..dddcedec779468dfc6da0f88a27d171b10f9b471 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/pci_api.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PCI_API_H_ +#define _PCI_API_H_ +struct hif_exec_context; + +QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_pci_close(struct hif_softc *hif_ctx); +void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag); +void hif_pci_reset_soc(struct hif_softc *ol_sc); +int hif_pci_bus_suspend(struct hif_softc *scn); +int hif_pci_bus_suspend_noirq(struct hif_softc *scn); +int hif_pci_bus_resume(struct hif_softc *scn); +int hif_pci_bus_resume_noirq(struct hif_softc *scn); +int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it); + +void hif_pci_disable_isr(struct hif_softc *scn); +void hif_pci_nointrs(struct hif_softc *scn); +QDF_STATUS hif_pci_enable_bus(struct hif_softc *scn, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_pci_disable_bus(struct hif_softc *scn); +int hif_pci_bus_configure(struct hif_softc *scn); +void hif_pci_irq_disable(struct hif_softc *scn, int ce_id); +void hif_pci_irq_enable(struct hif_softc *scn, int ce_id); +int hif_pci_dump_registers(struct hif_softc *scn); +void hif_pci_enable_power_management(struct hif_softc *hif_ctx, + bool is_packet_log_enabled); +void hif_pci_disable_power_management(struct hif_softc *hif_ctx); +int hif_pci_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *exec); +void hif_pci_deconfigure_grp_irq(struct hif_softc *scn); +void hif_pci_display_stats(struct hif_softc *hif_ctx); +void hif_pci_clear_stats(struct hif_softc *hif_ctx); +int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id); +bool hif_pci_needs_bmi(struct hif_softc *scn); +const char *hif_pci_get_irq_name(int irq_no); + +/** hif_pci_config_irq_affinity() - Set the IRQ affinity + * @scn: hif context + * + * Set IRQ affinity hint for WLAN IRQs to gold cores only for + * defconfig builds. + * + * Return: None + */ +void hif_pci_config_irq_affinity(struct hif_softc *scn); +#endif /* _PCI_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/sdio_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/sdio_api.h new file mode 100644 index 0000000000000000000000000000000000000000..76ced83ea2c7ba6d8744a3ee4483045d7b44c743 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/sdio_api.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type); +void hif_sdio_close(struct hif_softc *hif_sc); +int hif_sdio_bus_suspend(struct hif_softc *hif_ctx); +int hif_sdio_bus_resume(struct hif_softc *hif_ctx); +QDF_STATUS hif_sdio_enable_bus(struct hif_softc *hif_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_sdio_disable_bus(struct hif_softc *hif_sc); +QDF_STATUS +hif_sdio_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len); +void hif_sdio_set_mailbox_swap(struct hif_softc *hif_sc); +void hif_sdio_claim_device(struct hif_softc *hif_sc); +void hif_sdio_mask_interrupt_call(struct hif_softc *scn); +bool hif_sdio_needs_bmi(struct hif_softc *scn); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/snoc_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/snoc_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9b342f4966601091a81a88b2fb1c16e576fd222a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/snoc_api.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SNOC_API_H_ +#define _SNOC_API_H_ +QDF_STATUS hif_snoc_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_snoc_close(struct hif_softc *hif_ctx); +int hif_snoc_bus_suspend(struct hif_softc *hif_ctx); +int hif_snoc_bus_resume(struct hif_softc *hif_ctx); +int hif_snoc_bus_suspend_noirq(struct hif_softc *scn); +void hif_snoc_disable_isr(struct hif_softc *hif_ctx); +void hif_snoc_nointrs(struct hif_softc *scn); +QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_snoc_disable_bus(struct hif_softc *scn); +int hif_snoc_bus_configure(struct hif_softc *scn); +void hif_snoc_irq_disable(struct hif_softc *scn, int ce_id); +void hif_snoc_irq_enable(struct hif_softc *scn, int ce_id); +int hif_snoc_dump_registers(struct hif_softc *scn); +void hif_snoc_display_stats(struct hif_softc *hif_ctx); +void hif_snoc_clear_stats(struct hif_softc *hif_ctx); +int hif_snoc_map_ce_to_irq(struct hif_softc *scn, int ce_id); +bool hif_snoc_needs_bmi(struct hif_softc *scn); +#endif /* _SNOC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/usb_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/usb_api.h new file mode 100644 index 0000000000000000000000000000000000000000..dd85dcae4dc248fd56428bd01028df001cd550b4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/usb_api.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _USB_API_H_ +#define _USB_API_H_ +#include "if_usb.h" + +QDF_STATUS hif_usb_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_usb_close(struct hif_softc *hif_ctx); + + +void hif_usb_disable_isr(struct hif_softc *hif_ctx); +void hif_usb_nointrs(struct hif_softc *scn); +QDF_STATUS hif_usb_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_usb_disable_bus(struct hif_softc *scn); +int hif_usb_bus_configure(struct hif_softc *scn); +void hif_usb_irq_disable(struct hif_softc *scn, int ce_id); +void hif_usb_irq_enable(struct hif_softc *scn, int ce_id); +int hif_usb_dump_registers(struct hif_softc *scn); +int hif_usb_bus_suspend(struct hif_softc *hif_ctx); +int hif_usb_bus_resume(struct hif_softc *hif_ctx); +void hif_usb_stop_device(struct hif_softc *hif_sc); +void hif_usb_shutdown_bus_device(struct hif_softc *scn); +int hif_usb_bus_reset_resume(struct hif_softc *hif_ctx); +void hif_usb_set_bundle_mode(struct hif_softc *scn, + bool enabled, int rx_bundle_cnt); +void hif_usb_reg_tbl_attach(struct hif_softc *scn); +void hif_fw_assert_ramdump_pattern(struct hif_usb_softc *sc); +void hif_usb_ramdump_handler(struct hif_opaque_softc *scn); +bool hif_usb_needs_bmi(struct hif_softc *scn); +bool hif_is_supported_rx_ctrl_pipe(struct hif_softc *scn); +#endif /*_USB_API_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..5655a3033db80db379d2848ebcdc54d3bcb4d63c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014, 2016, 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_DEBUG_H__ +#define __HIF_DEBUG_H__ +#include "qdf_trace.h" + +#define hif_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_HIF, params) +#define hif_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_HIF, params) +#define hif_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_HIF, params) +#define hif_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_HIF, params) +#define hif_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HIF, params) + +/* Deprecated macros. Do not use it in new code */ +#define HIF_ERROR(args ...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_WARN(args ...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_INFO(args ...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_INFO_HI(args ...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_INFO_MED(args ...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_INFO_LO(args ...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_TRACE(args ...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_HIF, args) +#define HIF_DBG(args ...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_HIF, args) + +#define hif_alert(args ...) QDF_TRACE_FATAL(QDF_MODULE_ID_HIF, args) +#define hif_err(args ...) QDF_TRACE_ERROR(QDF_MODULE_ID_HIF, args) +#define hif_warn(args ...) QDF_TRACE_WARN(QDF_MODULE_ID_HIF, args) +#define hif_info(args ...) QDF_TRACE_INFO(QDF_MODULE_ID_HIF, args) +#define hif_debug(args ...) QDF_TRACE_DEBUG(QDF_MODULE_ID_HIF, args) +#define hif_info_high(args ...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_HIF, ## args) + +#define hif_nofl_alert(args ...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_HIF, args) +#define hif_nofl_err(args ...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_HIF, args) +#define hif_nofl_warn(args ...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_HIF, args) +#define hif_nofl_info(args ...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_HIF, args) +#define hif_nofl_debug(args ...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_HIF, args) + +#define HIF_ENTER() QDF_TRACE_ENTER(QDF_MODULE_ID_HIF, "enter") + +#define HIF_EXIT() QDF_TRACE_EXIT(QDF_MODULE_ID_HIF, "exit") + +#endif /* __HIF_DEBUG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.c new file mode 100644 index 0000000000000000000000000000000000000000..8f014ff8dd2d41cfc93ea140e3d6c0a1f12e7371 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.c @@ -0,0 +1,1065 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "qdf_module.h" +#include "qdf_net_if.h" +/* mapping NAPI budget 0 to internal budget 0 + * NAPI budget 1 to internal budget [1,scaler -1] + * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc + */ +#define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \ + (((n) << (s)) - 1) +#define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \ + (((n) + 1) >> (s)) + +static struct hif_exec_context *hif_exec_tasklet_create(void); + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS]; + +static inline +int hif_get_next_record_index(qdf_atomic_t *table_index, + int array_size) +{ + int record_index = qdf_atomic_inc_return(table_index); + + return record_index & (array_size - 1); +} + +/** + * hif_hist_is_prev_record() - Check if index is the immediate + * previous record wrt curr_index + * @curr_index: curr index in the event history + * @index: index to be checked + * @hist_size: history size + * + * Return: true if index is immediately behind curr_index else false + */ +static inline +bool hif_hist_is_prev_record(int32_t curr_index, int32_t index, + uint32_t hist_size) +{ + return (((index + 1) & (hist_size - 1)) == curr_index) ? + true : false; +} + +/** + * hif_hist_skip_event_record() - Check if current event needs to be + * recorded or not + * @hist_ev: HIF event history + * @event: DP event entry + * + * Return: true if current event needs to be skipped else false + */ +static bool +hif_hist_skip_event_record(struct hif_event_history *hist_ev, + struct hif_event_record *event) +{ + struct hif_event_record *rec; + struct hif_event_record *last_irq_rec; + int32_t index; + + index = qdf_atomic_read(&hist_ev->index); + if (index < 0) + return false; + + index &= (HIF_EVENT_HIST_MAX - 1); + rec = &hist_ev->event[index]; + + switch (event->type) { + case HIF_EVENT_IRQ_TRIGGER: + /* + * The prev record check is to prevent skipping the IRQ event + * record in case where BH got re-scheduled due to force_break + * but there are no entries to be reaped in the rings. + */ + if (rec->type == HIF_EVENT_BH_SCHED && + hif_hist_is_prev_record(index, + hist_ev->misc.last_irq_index, + HIF_EVENT_HIST_MAX)) { + last_irq_rec = + &hist_ev->event[hist_ev->misc.last_irq_index]; + last_irq_rec->timestamp = qdf_get_log_timestamp(); + last_irq_rec->cpu_id = qdf_get_cpu(); + last_irq_rec->hp++; + last_irq_rec->tp = last_irq_rec->timestamp - + hist_ev->misc.last_irq_ts; + return true; + } + break; + case HIF_EVENT_BH_SCHED: + if (rec->type == HIF_EVENT_BH_SCHED) { + rec->timestamp = qdf_get_log_timestamp(); + rec->cpu_id = qdf_get_cpu(); + return true; + } + break; + case HIF_EVENT_SRNG_ACCESS_START: + if (event->hp == event->tp) + return true; + break; + case HIF_EVENT_SRNG_ACCESS_END: + if (rec->type != HIF_EVENT_SRNG_ACCESS_START) + return true; + break; + default: + break; + } + + return false; +} + +void hif_hist_record_event(struct hif_opaque_softc *hif_ctx, + struct hif_event_record *event, uint8_t intr_grp_id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + struct hif_event_history *hist_ev; + struct hif_event_record *record; + int record_index; + + if (!hif_state->hif_num_extgroup) + return; + + if (scn->event_disable_mask & BIT(event->type)) + return; + + if (intr_grp_id >= HIF_NUM_INT_CONTEXTS) { + hif_err("Invalid interrupt group id %d", intr_grp_id); + return; + } + + hif_ext_group = hif_state->hif_ext_group[intr_grp_id]; + hist_ev = hif_ext_group->evt_hist; + + if (hif_hist_skip_event_record(hist_ev, event)) + return; + + record_index = hif_get_next_record_index( + &hist_ev->index, HIF_EVENT_HIST_MAX); + + record = &hist_ev->event[record_index]; + + if (event->type == HIF_EVENT_IRQ_TRIGGER) { + hist_ev->misc.last_irq_index = record_index; + hist_ev->misc.last_irq_ts = qdf_get_log_timestamp(); + } + + record->hal_ring_id = event->hal_ring_id; + record->hp = event->hp; + record->tp = event->tp; + record->cpu_id = qdf_get_cpu(); + record->timestamp = qdf_get_log_timestamp(); + record->type = event->type; +} + +static void hif_event_history_init(struct hif_exec_context *hif_ext_grp) +{ + hif_ext_grp->evt_hist = &hif_event_desc_history[hif_ext_grp->grp_id]; + qdf_atomic_set(&hif_ext_grp->evt_hist->index, -1); +} +#else +static inline void hif_event_history_init(struct hif_exec_context *hif_ext_grp) +{ +} +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +/** + * hif_print_napi_latency_stats() - print NAPI scheduling latency stats + * @hif_state: hif context + * + * return: void + */ +#ifdef HIF_LATENCY_PROFILE_ENABLE +static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) +{ + struct hif_exec_context *hif_ext_group; + int i, j; + int64_t cur_tstamp; + + const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = { + "0-2 ms", + "3-10 ms", + "11-20 ms", + "21-50 ms", + "51-100 ms", + "101-250 ms", + "251-500 ms", + "> 500 ms" + }; + + cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "Current timestamp: %lld", cur_tstamp); + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + if (hif_state->hif_ext_group[i]) { + hif_ext_group = hif_state->hif_ext_group[i]; + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "Interrupts in the HIF Group"); + + for (j = 0; j < hif_ext_group->numirq; j++) { + QDF_TRACE(QDF_MODULE_ID_HIF, + QDF_TRACE_LEVEL_FATAL, + " %s", + hif_ext_group->irq_name + (hif_ext_group->irq[j])); + } + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "Last serviced timestamp: %lld", + hif_ext_group->tstamp); + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "Latency Bucket | Time elapsed"); + + for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) { + QDF_TRACE(QDF_MODULE_ID_HIF, + QDF_TRACE_LEVEL_FATAL, + "%s | %lld", time_str[j], + hif_ext_group-> + sched_latency_stats[j]); + } + } + } +} +#else +static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state) +{ +} +#endif + +/** + * hif_clear_napi_stats() - reset NAPI stats + * @hif_ctx: hif context + * + * return: void + */ +void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct hif_exec_context *hif_ext_group; + size_t i; + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + + if (!hif_ext_group) + return; + + qdf_mem_set(hif_ext_group->sched_latency_stats, + sizeof(hif_ext_group->sched_latency_stats), + 0x0); + } +} + +qdf_export_symbol(hif_clear_napi_stats); + +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT +/** + * hif_get_poll_times_hist_str() - Get HIF poll times histogram string + * @stats: NAPI stats to get poll time buckets + * @buf: buffer to fill histogram string + * @buf_len: length of the buffer + * + * Return: void + */ +static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, + uint8_t buf_len) +{ + int i; + int str_index = 0; + + for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++) + str_index += qdf_scnprintf(buf + str_index, buf_len - str_index, + "%u|", stats->poll_time_buckets[i]); +} + +/** + * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI + * @hif_ext_group: hif_ext_group of type NAPI + * + * The function is called at the end of a NAPI poll to calculate poll time + * buckets. + * + * Return: void + */ +static +void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) +{ + struct qca_napi_stat *napi_stat; + unsigned long long poll_time_ns; + uint32_t poll_time_us; + uint32_t bucket_size_us = 500; + uint32_t bucket; + uint32_t cpu_id = qdf_get_cpu(); + + poll_time_ns = sched_clock() - hif_ext_group->poll_start_time; + poll_time_us = qdf_do_div(poll_time_ns, 1000); + + napi_stat = &hif_ext_group->stats[cpu_id]; + if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time) + hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns; + + bucket = poll_time_us / bucket_size_us; + if (bucket >= QCA_NAPI_NUM_BUCKETS) + bucket = QCA_NAPI_NUM_BUCKETS - 1; + ++napi_stat->poll_time_buckets[bucket]; +} + +/** + * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield + * @hif_ext_group: hif_ext_group of type NAPI + * + * Return: true if NAPI needs to yield, else false + */ +static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group) +{ + bool time_limit_reached = false; + unsigned long long poll_time_ns; + int cpu_id = qdf_get_cpu(); + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + struct hif_config_info *cfg = &scn->hif_config; + + poll_time_ns = sched_clock() - hif_ext_group->poll_start_time; + time_limit_reached = + poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0; + + if (time_limit_reached) { + hif_ext_group->stats[cpu_id].time_limit_reached++; + hif_ext_group->force_break = true; + } + + return time_limit_reached; +} + +bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + bool ret_val = false; + + if (!(grp_id < hif_state->hif_num_extgroup) || + !(grp_id < HIF_MAX_GROUP)) + return false; + + hif_ext_group = hif_state->hif_ext_group[grp_id]; + + if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE) + ret_val = hif_exec_poll_should_yield(hif_ext_group); + + return ret_val; +} + +/** + * hif_exec_update_service_start_time() - Update NAPI poll start time + * @hif_ext_group: hif_ext_group of type NAPI + * + * The function is called at the beginning of a NAPI poll to record the poll + * start time. + * + * Return: None + */ +static inline +void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) +{ + hif_ext_group->poll_start_time = sched_clock(); +} + +void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct hif_exec_context *hif_ext_group; + struct qca_napi_stat *napi_stats; + int i, j; + + /* + * Max value of uint_32 (poll_time_bucket) = 4294967295 + * Thus we need 10 chars + 1 space =11 chars for each bucket value. + * +1 space for '\0'. + */ + char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'}; + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)"); + + for (i = 0; + (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]); + i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + for (j = 0; j < num_possible_cpus(); j++) { + napi_stats = &hif_ext_group->stats[j]; + if (!napi_stats->napi_schedules) + continue; + + hif_get_poll_times_hist_str(napi_stats, + hist_str, + sizeof(hist_str)); + QDF_TRACE(QDF_MODULE_ID_HIF, + QDF_TRACE_LEVEL_ERROR, + "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s", + i, j, + napi_stats->napi_schedules, + napi_stats->napi_polls, + napi_stats->napi_completes, + napi_stats->napi_workdone, + napi_stats->time_limit_reached, + qdf_do_div(napi_stats->napi_max_poll_time, + 1000), + hist_str); + } + } + + hif_print_napi_latency_stats(hif_state); +} + +qdf_export_symbol(hif_print_napi_stats); + +#else + +static inline +void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf, + uint8_t buf_len) +{ +} + +static inline +void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group) +{ +} + +static inline +void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group) +{ +} + +void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct hif_exec_context *hif_ext_group; + struct qca_napi_stat *napi_stats; + int i, j; + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone"); + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + if (hif_state->hif_ext_group[i]) { + hif_ext_group = hif_state->hif_ext_group[i]; + for (j = 0; j < num_possible_cpus(); j++) { + napi_stats = &(hif_ext_group->stats[j]); + if (napi_stats->napi_schedules != 0) + QDF_TRACE(QDF_MODULE_ID_HIF, + QDF_TRACE_LEVEL_FATAL, + "NAPI[%2d]CPU[%d]: " + "%7d %7d %7d %7d ", + i, j, + napi_stats->napi_schedules, + napi_stats->napi_polls, + napi_stats->napi_completes, + napi_stats->napi_workdone); + } + } + } + + hif_print_napi_latency_stats(hif_state); +} +qdf_export_symbol(hif_print_napi_stats); +#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ + +static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx) +{ + struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); + + tasklet_schedule(&t_ctx->tasklet); +} + +/** + * hif_exec_tasklet() - grp tasklet + * data: context + * + * return: void + */ +static void hif_exec_tasklet_fn(unsigned long data) +{ + struct hif_exec_context *hif_ext_group = + (struct hif_exec_context *)data; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + unsigned int work_done; + + work_done = + hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET); + + if (hif_ext_group->work_complete(hif_ext_group, work_done)) { + qdf_atomic_dec(&(scn->active_grp_tasklet_cnt)); + hif_ext_group->irq_enable(hif_ext_group); + } else { + hif_exec_tasklet_schedule(hif_ext_group); + } +} + +/** + * hif_latency_profile_measure() - calculate latency and update histogram + * hif_ext_group: hif exec context + * + * return: None + */ +#ifdef HIF_LATENCY_PROFILE_ENABLE +static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) +{ + int64_t cur_tstamp; + int64_t time_elapsed; + + cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); + + if (cur_tstamp > hif_ext_group->tstamp) + time_elapsed = (cur_tstamp - hif_ext_group->tstamp); + else + time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp); + + hif_ext_group->tstamp = cur_tstamp; + + if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2) + hif_ext_group->sched_latency_stats[0]++; + else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10) + hif_ext_group->sched_latency_stats[1]++; + else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20) + hif_ext_group->sched_latency_stats[2]++; + else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50) + hif_ext_group->sched_latency_stats[3]++; + else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100) + hif_ext_group->sched_latency_stats[4]++; + else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250) + hif_ext_group->sched_latency_stats[5]++; + else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500) + hif_ext_group->sched_latency_stats[6]++; + else + hif_ext_group->sched_latency_stats[7]++; +} +#else +static inline +void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group) +{ +} +#endif + +/** + * hif_latency_profile_start() - Update the start timestamp for HIF ext group + * hif_ext_group: hif exec context + * + * return: None + */ +#ifdef HIF_LATENCY_PROFILE_ENABLE +static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) +{ + hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get()); +} +#else +static inline +void hif_latency_profile_start(struct hif_exec_context *hif_ext_group) +{ +} +#endif + +#ifdef FEATURE_NAPI +/** + * hif_exec_poll() - napi poll + * napi: napi struct + * budget: budget for napi + * + * Return: mapping of internal budget to napi + */ +static int hif_exec_poll(struct napi_struct *napi, int budget) +{ + struct hif_napi_exec_context *napi_exec_ctx = + qdf_container_of(napi, struct hif_napi_exec_context, napi); + struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + int work_done; + int normalized_budget = 0; + int actual_dones; + int shift = hif_ext_group->scale_bin_shift; + int cpu = smp_processor_id(); + + hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, + 0, 0, 0, HIF_EVENT_BH_SCHED); + + hif_ext_group->force_break = false; + hif_exec_update_service_start_time(hif_ext_group); + + if (budget) + normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift); + + hif_latency_profile_measure(hif_ext_group); + + work_done = hif_ext_group->handler(hif_ext_group->context, + normalized_budget); + + actual_dones = work_done; + + if (!hif_ext_group->force_break && work_done < normalized_budget) { + napi_complete(napi); + qdf_atomic_dec(&scn->active_grp_tasklet_cnt); + hif_ext_group->irq_enable(hif_ext_group); + hif_ext_group->stats[cpu].napi_completes++; + } else { + /* if the ext_group supports time based yield, claim full work + * done anyways */ + work_done = normalized_budget; + } + + hif_ext_group->stats[cpu].napi_polls++; + hif_ext_group->stats[cpu].napi_workdone += actual_dones; + + /* map internal budget to NAPI budget */ + if (work_done) + work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift); + + hif_exec_fill_poll_time_histogram(hif_ext_group); + + return work_done; +} + +/** + * hif_exec_napi_schedule() - schedule the napi exec instance + * @ctx: a hif_exec_context known to be of napi type + */ +static void hif_exec_napi_schedule(struct hif_exec_context *ctx) +{ + struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); + ctx->stats[smp_processor_id()].napi_schedules++; + + napi_schedule(&n_ctx->napi); +} + +/** + * hif_exec_napi_kill() - stop a napi exec context from being rescheduled + * @ctx: a hif_exec_context known to be of napi type + */ +static void hif_exec_napi_kill(struct hif_exec_context *ctx) +{ + struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); + int irq_ind; + + if (ctx->inited) { + napi_disable(&n_ctx->napi); + ctx->inited = 0; + } + + for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) + hif_irq_affinity_remove(ctx->os_irq[irq_ind]); + + hif_core_ctl_set_boost(false); + netif_napi_del(&(n_ctx->napi)); +} + +struct hif_execution_ops napi_sched_ops = { + .schedule = &hif_exec_napi_schedule, + .kill = &hif_exec_napi_kill, +}; + +/** + * hif_exec_napi_create() - allocate and initialize a napi exec context + * @scale: a binary shift factor to map NAPI budget from\to internal + * budget + */ +static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) +{ + struct hif_napi_exec_context *ctx; + + ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context)); + if (!ctx) + return NULL; + + ctx->exec_ctx.sched_ops = &napi_sched_ops; + ctx->exec_ctx.inited = true; + ctx->exec_ctx.scale_bin_shift = scale; + qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev); + netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll, + QCA_NAPI_BUDGET); + napi_enable(&ctx->napi); + + return &ctx->exec_ctx; +} +#else +static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) +{ + HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet", __func__); + return hif_exec_tasklet_create(); +} +#endif + + +/** + * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled + * @ctx: a hif_exec_context known to be of tasklet type + */ +static void hif_exec_tasklet_kill(struct hif_exec_context *ctx) +{ + struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); + int irq_ind; + + if (ctx->inited) { + tasklet_disable(&t_ctx->tasklet); + tasklet_kill(&t_ctx->tasklet); + } + ctx->inited = false; + + for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) + hif_irq_affinity_remove(ctx->os_irq[irq_ind]); +} + +struct hif_execution_ops tasklet_sched_ops = { + .schedule = &hif_exec_tasklet_schedule, + .kill = &hif_exec_tasklet_kill, +}; + +/** + * hif_exec_tasklet_schedule() - allocate and initialize a tasklet exec context + */ +static struct hif_exec_context *hif_exec_tasklet_create(void) +{ + struct hif_tasklet_exec_context *ctx; + + ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context)); + if (!ctx) + return NULL; + + ctx->exec_ctx.sched_ops = &tasklet_sched_ops; + tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn, + (unsigned long)ctx); + + ctx->exec_ctx.inited = true; + + return &ctx->exec_ctx; +} + +/** + * hif_exec_get_ctx() - retrieve an exec context based on an id + * @softc: the hif context owning the exec context + * @id: the id of the exec context + * + * mostly added to make it easier to rename or move the context array + */ +struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc, + uint8_t id) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); + + if (id < hif_state->hif_num_extgroup) + return hif_state->hif_ext_group[id]; + + return NULL; +} + +int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc, + uint8_t id) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); + + if (id < hif_state->hif_num_extgroup) + return hif_state->hif_ext_group[id]->os_irq[0]; + return -EINVAL; +} + +qdf_export_symbol(hif_get_int_ctx_irq_num); + +#ifdef HIF_CPU_PERF_AFFINE_MASK +void hif_config_irq_set_perf_affinity_hint( + struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_config_irq_affinity(scn); +} + +qdf_export_symbol(hif_config_irq_set_perf_affinity_hint); +#endif + +uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct hif_exec_context *hif_ext_group; + int i, status; + + if (scn->ext_grp_irq_configured) { + HIF_ERROR("%s Called after ext grp irq configured\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + status = 0; + qdf_spinlock_create(&hif_ext_group->irq_lock); + if (hif_ext_group->configured && + hif_ext_group->irq_requested == false) { + hif_ext_group->irq_enabled = true; + status = hif_grp_irq_configure(scn, hif_ext_group); + } + if (status != 0) { + HIF_ERROR("%s: failed for group %d", __func__, i); + hif_ext_group->irq_enabled = false; + } + } + + scn->ext_grp_irq_configured = true; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(hif_configure_ext_group_interrupts); + +void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn || !scn->ext_grp_irq_configured) { + hif_err("scn(%pk) is NULL or grp irq not configured", scn); + return; + } + + hif_grp_irq_deconfigure(scn); + scn->ext_grp_irq_configured = false; +} + +qdf_export_symbol(hif_deconfigure_ext_group_interrupts); + +#ifdef WLAN_SUSPEND_RESUME_TEST +/** + * hif_check_and_trigger_ut_resume() - check if unit-test command was used to + * to trigger fake-suspend command, if yes + * then issue resume procedure. + * @scn: opaque HIF software context + * + * This API checks if unit-test command was used to trigger fake-suspend command + * and if answer is yes then it would trigger resume procedure. + * + * Make this API inline to save API-switch overhead and do branch-prediction to + * optimize performance impact. + * + * Return: void + */ +static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) +{ + if (qdf_unlikely(hif_irq_trigger_ut_resume(scn))) + hif_ut_fw_resume(scn); +} +#else +static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn) +{ +} +#endif + +/** + * hif_ext_group_interrupt_handler() - handler for related interrupts + * @irq: irq number of the interrupt + * @context: the associated hif_exec_group context + * + * This callback function takes care of dissabling the associated interrupts + * and scheduling the expected bottom half for the exec_context. + * This callback function also helps keep track of the count running contexts. + */ +irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context) +{ + struct hif_exec_context *hif_ext_group = context; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + + if (hif_ext_group->irq_requested) { + hif_latency_profile_start(hif_ext_group); + + hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id, + 0, 0, 0, HIF_EVENT_IRQ_TRIGGER); + + hif_ext_group->irq_disable(hif_ext_group); + /* + * if private ioctl has issued fake suspend command to put + * FW in D0-WOW state then here is our chance to bring FW out + * of WOW mode. + * + * The reason why you need to explicitly wake-up the FW is here: + * APSS should have been in fully awake through-out when + * fake APSS suspend command was issued (to put FW in WOW mode) + * hence organic way of waking-up the FW + * (as part-of APSS-host wake-up) won't happen because + * in reality APSS didn't really suspend. + */ + hif_check_and_trigger_ut_resume(scn); + qdf_atomic_inc(&scn->active_grp_tasklet_cnt); + + hif_ext_group->sched_ops->schedule(hif_ext_group); + } + + return IRQ_HANDLED; +} + +/** + * hif_exec_kill() - grp tasklet kill + * scn: hif_softc + * + * return: void + */ +void hif_exec_kill(struct hif_opaque_softc *hif_ctx) +{ + int i; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + for (i = 0; i < hif_state->hif_num_extgroup; i++) + hif_state->hif_ext_group[i]->sched_ops->kill( + hif_state->hif_ext_group[i]); + + qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0); +} + +/** + * hif_register_ext_group() - API to register external group + * interrupt handler. + * @hif_ctx : HIF Context + * @numirq: number of irq's in the group + * @irq: array of irq values + * @handler: callback interrupt handler function + * @cb_ctx: context to passed in callback + * @type: napi vs tasklet + * + * Return: status + */ +uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx, + uint32_t numirq, uint32_t irq[], ext_intr_handler handler, + void *cb_ctx, const char *context_name, + enum hif_exec_type type, uint32_t scale) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + + if (scn->ext_grp_irq_configured) { + HIF_ERROR("%s Called after ext grp irq configured\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) { + HIF_ERROR("%s Max groups reached\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (numirq >= HIF_MAX_GRP_IRQ) { + HIF_ERROR("%s invalid numirq\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + hif_ext_group = hif_exec_create(type, scale); + if (!hif_ext_group) + return QDF_STATUS_E_FAILURE; + + hif_state->hif_ext_group[hif_state->hif_num_extgroup] = + hif_ext_group; + + hif_ext_group->numirq = numirq; + qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0])); + hif_ext_group->context = cb_ctx; + hif_ext_group->handler = handler; + hif_ext_group->configured = true; + hif_ext_group->grp_id = hif_state->hif_num_extgroup; + hif_ext_group->hif = hif_ctx; + hif_ext_group->context_name = context_name; + hif_ext_group->type = type; + hif_event_history_init(hif_ext_group); + + hif_state->hif_num_extgroup++; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hif_register_ext_group); + +/** + * hif_exec_create() - create an execution context + * @type: the type of execution context to create + */ +struct hif_exec_context *hif_exec_create(enum hif_exec_type type, + uint32_t scale) +{ + hif_debug("%s: create exec_type %d budget %d\n", + __func__, type, QCA_NAPI_BUDGET * scale); + + switch (type) { + case HIF_EXEC_NAPI_TYPE: + return hif_exec_napi_create(scale); + + case HIF_EXEC_TASKLET_TYPE: + return hif_exec_tasklet_create(); + default: + return NULL; + } +} + +/** + * hif_exec_destroy() - free the hif_exec context + * @ctx: context to free + * + * please kill the context before freeing it to avoid a use after free. + */ +void hif_exec_destroy(struct hif_exec_context *ctx) +{ + qdf_spinlock_destroy(&ctx->irq_lock); + qdf_mem_free(ctx); +} + +/** + * hif_deregister_exec_group() - API to free the exec contexts + * @hif_ctx: HIF context + * @context_name: name of the module whose contexts need to be deregistered + * + * This function deregisters the contexts of the requestor identified + * based on the context_name & frees the memory. + * + * Return: void + */ +void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, + const char *context_name) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + int i; + + for (i = 0; i < HIF_MAX_GROUP; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + + if (!hif_ext_group) + continue; + + hif_debug("%s: Deregistering grp id %d name %s\n", + __func__, + hif_ext_group->grp_id, + hif_ext_group->context_name); + + if (strcmp(hif_ext_group->context_name, context_name) == 0) { + hif_ext_group->sched_ops->kill(hif_ext_group); + hif_state->hif_ext_group[i] = NULL; + hif_exec_destroy(hif_ext_group); + hif_state->hif_num_extgroup--; + } + + } +} +qdf_export_symbol(hif_deregister_exec_group); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.h new file mode 100644 index 0000000000000000000000000000000000000000..42756e81882700a92ef82561a0e65b4b940d6e97 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_EXEC_H__ +#define __HIF_EXEC_H__ + +#include +#include +#include +/*Number of buckets for latency*/ +#define HIF_SCHED_LATENCY_BUCKETS 8 + +/*Buckets for latency between 0 to 2 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_0_2 2 +/*Buckets for latency between 3 to 10 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_3_10 10 +/*Buckets for latency between 11 to 20 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_11_20 20 +/*Buckets for latency between 21 to 50 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_21_50 50 +/*Buckets for latency between 50 to 100 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_51_100 100 +/*Buckets for latency between 100 to 250 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_101_250 250 +/*Buckets for latency between 250 to 500 ms*/ +#define HIF_SCHED_LATENCY_BUCKET_251_500 500 + +struct hif_exec_context; + +struct hif_execution_ops { + char *context_type; + void (*schedule)(struct hif_exec_context *); + void (*reschedule)(struct hif_exec_context *); + void (*kill)(struct hif_exec_context *); +}; + +/** + * hif_exec_context: only ever allocated as a subtype eg. + * hif_tasklet_exec_context + * + * @context: context for the handler function to use. + * @evt_hist: a pointer to the DP event history + * @context_name: a pointer to a const string for debugging. + * this should help whenever there could be ambiguity + * in what type of context the void* context points to + * @irq: irq handle coresponding to hw block + * @os_irq: irq handle for irq_afinity + * @cpu: the cpu this context should be affined to + * @work_complete: Function call called when leaving the execution context to + * determine if this context should reschedule or wait for an interrupt. + * This function may be used as a hook for post processing. + * + * @sched_latency_stats: schdule latency stats for different latency buckets + * @tstamp: timestamp when napi poll happens + * @irq_disable: called before scheduling the context. + * @irq_enable: called when the context leaves polling mode + * @irq_name: pointer to function to return irq name/string mapped to irq number + * @irq_lock: spinlock used while enabling/disabling IRQs + * @type: type of execution context + * @poll_start_time: hif napi poll start time in nanoseconds + * @force_break: flag to indicate if HIF execution context was forced to return + * to HIF. This means there is more work to be done. Hence do not + * call napi_complete. + */ +struct hif_exec_context { + struct hif_execution_ops *sched_ops; + struct hif_opaque_softc *hif; + uint32_t numirq; + uint32_t irq[HIF_MAX_GRP_IRQ]; + uint32_t os_irq[HIF_MAX_GRP_IRQ]; + cpumask_t cpumask; + uint32_t grp_id; + uint32_t scale_bin_shift; + const char *context_name; + void *context; + ext_intr_handler handler; + struct hif_event_history *evt_hist; + + bool (*work_complete)(struct hif_exec_context *, int work_done); + void (*irq_enable)(struct hif_exec_context *); + void (*irq_disable)(struct hif_exec_context *); + const char* (*irq_name)(int irq_no); + uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS]; + uint64_t tstamp; + + uint8_t cpu; + struct qca_napi_stat stats[NR_CPUS]; + bool inited; + bool configured; + bool irq_requested; + bool irq_enabled; + qdf_spinlock_t irq_lock; + enum hif_exec_type type; + unsigned long long poll_start_time; + bool force_break; +#ifdef HIF_CPU_PERF_AFFINE_MASK + /* Stores the affinity hint mask for each WLAN IRQ */ + qdf_cpu_mask new_cpu_mask[HIF_MAX_GRP_IRQ]; +#endif +}; + +/** + * struct hif_tasklet_exec_context - exec_context for tasklets + * @exec_ctx: inherited data type + * @tasklet: tasklet structure for scheduling + */ +struct hif_tasklet_exec_context { + struct hif_exec_context exec_ctx; + struct tasklet_struct tasklet; +}; + +/** + * struct hif_napi_exec_context - exec_context for NAPI + * @exec_ctx: inherited data type + * @netdev: dummy net device associated with the napi context + * @napi: napi structure used in scheduling + */ +struct hif_napi_exec_context { + struct hif_exec_context exec_ctx; + struct net_device netdev; /* dummy net_dev */ + struct napi_struct napi; +}; + +static inline struct hif_napi_exec_context* + hif_exec_get_napi(struct hif_exec_context *ctx) +{ + return (struct hif_napi_exec_context *) ctx; +} + +static inline struct hif_tasklet_exec_context* + hif_exec_get_tasklet(struct hif_exec_context *ctx) +{ + return (struct hif_tasklet_exec_context *) ctx; +} + +struct hif_exec_context *hif_exec_create(enum hif_exec_type type, + uint32_t scale); + +void hif_exec_destroy(struct hif_exec_context *ctx); + +int hif_grp_irq_configure(struct hif_softc *scn, + struct hif_exec_context *hif_exec); +void hif_grp_irq_deconfigure(struct hif_softc *scn); +irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context); + +struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif, + uint8_t id); +void hif_exec_kill(struct hif_opaque_softc *scn); + +#ifdef HIF_CPU_PERF_AFFINE_MASK +/** + * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity + * @hif_ext_group: hif_ext_group to extract the irq info + * + * This function will set the WLAN IRQ affinity to the gold + * cores only for defconfig builds + * + * Return: none + */ +void hif_pci_irq_set_affinity_hint( + struct hif_exec_context *hif_ext_group); + +/** + * hif_pci_ce_irq_set_affinity_hint() - API to set IRQ affinity + * + * This function will set the CE IRQ affinity to the gold cores + * only for defconfig builds + * + * @hif_softc: hif_softc to extract the CE irq info + * + * Return: none + */ +void hif_pci_ce_irq_set_affinity_hint( + struct hif_softc *scn); + +/** + * hif_pci_ce_irq_remove_affinity_hint() - remove affinity for the irq + * @irq: irq number to remove affinity from + */ +static inline void hif_pci_ce_irq_remove_affinity_hint(int irq) +{ + hif_irq_affinity_remove(irq); +} +#else +static inline void hif_pci_irq_set_affinity_hint( + struct hif_exec_context *hif_ext_group) +{ +} +static inline void hif_pci_ce_irq_set_affinity_hint( + struct hif_softc *scn) +{ +} +static inline void hif_pci_ce_irq_remove_affinity_hint(int irq) +{ +} +#endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */ +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_hw_version.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_hw_version.h new file mode 100644 index 0000000000000000000000000000000000000000..a11daf51e27bb8acf5509a565df08ddb80c6af34 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_hw_version.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2012-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HIF_HW_VERSION_H +#define HIF_HW_VERSION_H + +#define AR6004_VERSION_REV1_3 0x31c8088a +#define AR9888_REV2_VERSION 0x4100016c +#define AR9887_REV1_VERSION 0x4100016d +#define AR6320_REV1_VERSION 0x5000000 +#define AR6320_REV1_1_VERSION 0x5000001 +#define AR6320_REV1_3_VERSION 0x5000003 +#define AR6320_REV2_1_VERSION 0x5010000 +#define AR6320_REV3_VERSION 0x5020000 +#define AR6320_REV3_2_VERSION 0x5030000 +#define QCA9379_REV1_VERSION 0x5040000 +#define AR6320_DEV_VERSION 0x1000000 +#define QCA9377_REV1_1_VERSION 0x5020001 +#define QCA6390_V1 0x50040000 +#define QCA6490_V1 0x50060000 +#define WCN3990_v1 0x40000000 +#define WCN3990_v2 0x40010000 +#define WCN3990_v2_1 0x40010002 +#define WCN3998 0x40030001 +#define AR900B_REV_2 0x1 + +struct qwlan_hw { + u32 id; + u32 subid; + const char *name; +}; + +#endif /* HIF_HW_VERSION_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h new file mode 100644 index 0000000000000000000000000000000000000000..4a8f93a992a327e08b8f9fab9b891d5127dd3ea9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IO32_H__ +#define __HIF_IO32_H__ + +#include +#include "hif.h" +#include "hif_main.h" + +#if defined(HIF_REG_WINDOW_SUPPORT) && (defined(HIF_PCI) || \ + defined(HIF_IPCI)) + +static inline +void hif_write32_mb_reg_window(void *sc, + void __iomem *addr, uint32_t value); +static inline +uint32_t hif_read32_mb_reg_window(void *sc, + void __iomem *addr); +#define hif_read32_mb(scn, addr) \ + hif_read32_mb_reg_window((void *)scn, \ + (void __iomem *)addr) +#define hif_write32_mb(scn, addr, value) \ + hif_write32_mb_reg_window((void *)scn, \ + (void __iomem *)addr, value) + +#else +#define hif_read32_mb(scn, addr) ioread32((void __iomem *)addr) +#define hif_write32_mb(scn, addr, value) \ + iowrite32((u32)(value), (void __iomem *)(addr)) +#endif + +#define Q_TARGET_ACCESS_BEGIN(scn) \ + hif_target_sleep_state_adjust(scn, false, true) +#define Q_TARGET_ACCESS_END(scn) \ + hif_target_sleep_state_adjust(scn, true, false) +#define TARGET_REGISTER_ACCESS_ALLOWED(scn)\ + hif_is_target_register_access_allowed(scn) + +/* + * A_TARGET_ACCESS_LIKELY will not wait for the target to wake up before + * continuing execution. Because A_TARGET_ACCESS_LIKELY does not guarantee + * that the target is awake before continuing, Q_TARGET_ACCESS macros must + * protect the actual target access. Since Q_TARGET_ACCESS protect the actual + * target access, A_TARGET_ACCESS_LIKELY hints are optional. + * + * To ignore "LIKELY" hints, set CONFIG_TARGET_ACCESS_LIKELY to 0 + * (slightly worse performance, less power) + * + * To use "LIKELY" hints, set CONFIG_TARGET_ACCESS_LIKELY to 1 + * (slightly better performance, more power) + * + * note: if a bus doesn't use hif_target_sleep_state_adjust, this will have + * no impact. + */ +#define CONFIG_TARGET_ACCESS_LIKELY 0 +#if CONFIG_TARGET_ACCESS_LIKELY +#define A_TARGET_ACCESS_LIKELY(scn) \ + hif_target_sleep_state_adjust(scn, false, false) +#define A_TARGET_ACCESS_UNLIKELY(scn) \ + hif_target_sleep_state_adjust(scn, true, false) +#else /* CONFIG_ATH_PCIE_ACCESS_LIKELY */ +#define A_TARGET_ACCESS_LIKELY(scn) \ + do { \ + unsigned long unused = (unsigned long)(scn); \ + unused = unused; \ + } while (0) + +#define A_TARGET_ACCESS_UNLIKELY(scn) \ + do { \ + unsigned long unused = (unsigned long)(scn); \ + unused = unused; \ + } while (0) +#endif /* CONFIG_ATH_PCIE_ACCESS_LIKELY */ + + +#ifdef HIF_PCI +#include "hif_io32_pci.h" +#endif +#ifdef HIF_SNOC +#include "hif_io32_snoc.h" +#endif +#ifdef HIF_IPCI +#include "hif_io32_ipci.h" +#endif + +#if defined(HIF_REG_WINDOW_SUPPORT) && (defined(HIF_PCI) || \ + defined(HIF_IPCI)) + +#include "qdf_lock.h" +#include "qdf_util.h" + +/* Device memory is 32MB but bar size is only 1MB. + * Register remapping logic is used to access 32MB device memory. + * 0-512KB : Fixed address, 512KB-1MB : remapped address. + * Use PCIE_REMAP_1M_BAR_CTRL register to set window. + * Offset: 0x310C + * Bits : Field Name + * 31 FUNCTION_ENABLE_V + * 5:0 ADDR_24_19_V + */ + +#define MAX_UNWINDOWED_ADDRESS 0x80000 /* 512KB */ +#define WINDOW_ENABLE_BIT 0x80000000 /* 31st bit to enable window */ +#define WINDOW_REG_ADDRESS 0x310C /* PCIE_REMAP_1M_BAR_CTRL Reg offset */ +#define WINDOW_SHIFT 19 +#define WINDOW_VALUE_MASK 0x3F +#define WINDOW_START MAX_UNWINDOWED_ADDRESS +#define WINDOW_RANGE_MASK 0x7FFFF + +static inline void hif_select_window(struct hif_pci_softc *sc, uint32_t offset) +{ + uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK; + + if (window != sc->register_window) { + qdf_iowrite32(sc->mem + WINDOW_REG_ADDRESS, + WINDOW_ENABLE_BIT | window); + sc->register_window = window; + } +} + +/** + * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1 + * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS + * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window + * would be a bug + */ +static inline void hif_write32_mb_reg_window(void *scn, + void __iomem *addr, uint32_t value) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + uint32_t offset = addr - sc->mem; + + if (!sc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + qdf_iowrite32(addr, value); + } else { + qdf_spin_lock_irqsave(&sc->register_access_lock); + hif_select_window(sc, offset); + qdf_iowrite32(sc->mem + WINDOW_START + + (offset & WINDOW_RANGE_MASK), value); + qdf_spin_unlock_irqrestore(&sc->register_access_lock); + } +} + +static inline uint32_t hif_read32_mb_reg_window(void *scn, void __iomem *addr) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + uint32_t ret; + uint32_t offset = addr - sc->mem; + + if (!sc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + return qdf_ioread32(addr); + } + + qdf_spin_lock_irqsave(&sc->register_access_lock); + hif_select_window(sc, offset); + ret = qdf_ioread32(sc->mem + WINDOW_START + + (offset & WINDOW_RANGE_MASK)); + qdf_spin_unlock_irqrestore(&sc->register_access_lock); + + return ret; +} +#endif + +#ifdef CONFIG_IO_MEM_ACCESS_DEBUG +uint32_t hif_target_read_checked(struct hif_softc *scn, + uint32_t offset); +void hif_target_write_checked(struct hif_softc *scn, uint32_t offset, + uint32_t value); + +#define A_TARGET_READ(scn, offset) \ + hif_target_read_checked(scn, (offset)) +#define A_TARGET_WRITE(scn, offset, value) \ + hif_target_write_checked(scn, (offset), (value)) +#else /* CONFIG_ATH_PCIE_ACCESS_DEBUG */ +#define A_TARGET_READ(scn, offset) \ + hif_read32_mb(scn, scn->mem + (offset)) +#define A_TARGET_WRITE(scn, offset, value) \ + hif_write32_mb(scn, (scn->mem) + (offset), value) +#endif + +void hif_irq_enable(struct hif_softc *scn, int irq_id); +void hif_irq_disable(struct hif_softc *scn, int irq_id); + +#endif /* __HIF_IO32_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.c new file mode 100644 index 0000000000000000000000000000000000000000..a265344f43d94628453fca62c8fdc81d906497ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: hif_irq_afinity.c + * + * This irq afinity implementation is os dependent, so this can be treated as + * an abstraction layer... Should this be moved into a /linux folder? + */ + +#include /* memset */ + +/* Linux headers */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(FEATURE_NAPI_DEBUG) && defined(HIF_IRQ_AFFINITY) +/* + * Local functions + * - no argument checks, all internal/trusted callers + */ +static void hnc_dump_cpus(struct qca_napi_data *napid) +{ + hif_napi_stats(napid); +} +#else +static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; +#endif /* FEATURE_NAPI_DEBUG */ + +#ifdef HIF_IRQ_AFFINITY +/** + * + * hif_exec_event() - reacts to events that impact irq affinity + * @hif : pointer to hif context + * @evnt: event that has been detected + * @data: more data regarding the event + * + * Description: + * This function handles two types of events: + * 1- Events that change the state of NAPI (enabled/disabled): + * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} + * The state is retrievable by "hdd_napi_enabled(-1)" + * - NAPI will be on if either INI file is on and it has not been disabled + * by a subsequent vendor CMD, + * or it has been enabled by a vendor CMD. + * 2- Events that change the CPU affinity of a NAPI instance/IRQ: + * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} + * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode + * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() + * - In LO tput mode, NAPI will yield control if its interrupts to the system + * management functions. However in HI throughput mode, NAPI will actively + * manage its interrupts/instances (by trying to disperse them out to + * separate performance cores). + * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. + * + * + In some cases (roaming peer management is the only case so far), a + * a client can trigger a "SERIALIZE" event. Basically, this means that the + * users is asking NAPI to go into a truly single execution context state. + * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, + * (if called for the first time) and then moves all IRQs (for NAPI + * instances) to be collapsed to a single core. If called multiple times, + * it will just re-collapse the CPUs. This is because blacklist-on() API + * is reference-counted, and because the API has already been called. + * + * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go + * to its "normal" operation. Optionally, they can give a timeout value (in + * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this + * case, NAPI will just set the current throughput state to uninitialized + * and set the delay period. Once policy handler is called, it would skip + * applying the policy delay period times, and otherwise apply the policy. + * + * Return: + * < 0: some error + * = 0: event handled successfully + */ +int hif_exec_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, + void *data) +{ + int rc = 0; + uint32_t prev_state; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_data *napid = &(hif->napi_data); + enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; + enum { + BLACKLIST_NOT_PENDING, + BLACKLIST_ON_PENDING, + BLACKLIST_OFF_PENDING + } blacklist_pending = BLACKLIST_NOT_PENDING; + + NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); + + qdf_spin_lock_bh(&(napid->lock)); + prev_state = napid->state; + switch (event) { + case NAPI_EVT_INI_FILE: + case NAPI_EVT_CMD_STATE: + case NAPI_EVT_INT_STATE: + /* deprecated */ + break; + + case NAPI_EVT_CPU_STATE: { + int cpu = ((unsigned long int)data >> 16); + int val = ((unsigned long int)data & 0x0ff); + + NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", + __func__, cpu, val); + + /* state has already been set by hnc_cpu_notify_cb */ + if ((val == QCA_NAPI_CPU_DOWN) && + (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ + (napid->napi_cpu[cpu].napis != 0)) { + NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", + __func__, cpu); + rc = hif_exec_cpu_migrate(napid, + cpu, + HNC_ACT_RELOCATE); + napid->napi_cpu[cpu].napis = 0; + } + /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ + break; + } + + case NAPI_EVT_TPUT_STATE: { + tput_mode = (enum qca_napi_tput_state)data; + if (tput_mode == QCA_NAPI_TPUT_LO) { + /* from TPUT_HI -> TPUT_LO */ + NAPI_DEBUG("%s: Moving to napi_tput_LO state", + __func__); + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Ideally we should "collapse" interrupts here, since + * we are "dispersing" interrupts in the "else" case. + * This allows the possibility that our interrupts may + * still be on the perf cluster the next time we enter + * high tput mode. However, the irq_balancer is free + * to move our interrupts to power cluster once + * blacklisting has been turned off in the "else" case. + */ + } else { + /* from TPUT_LO -> TPUT->HI */ + NAPI_DEBUG("%s: Moving to napi_tput_HI state", + __func__); + rc = hif_exec_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_DISPERSE); + + blacklist_pending = BLACKLIST_ON_PENDING; + } + napid->napi_mode = tput_mode; + break; + } + + case NAPI_EVT_USR_SERIAL: { + unsigned long users = (unsigned long)data; + + NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", + __func__, users); + + rc = hif_exec_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_COLLAPSE); + if ((users == 0) && (rc == 0)) + blacklist_pending = BLACKLIST_ON_PENDING; + break; + } + case NAPI_EVT_USR_NORMAL: { + NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); + if (!napid->user_cpu_affin_mask) + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Deserialization timeout is handled at hdd layer; + * just mark current mode to uninitialized to ensure + * it will be set when the delay is over + */ + napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; + break; + } + default: { + HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", + __func__, event, (unsigned long) data); + break; + } /* default */ + }; /* switch */ + + + switch (blacklist_pending) { + case BLACKLIST_ON_PENDING: + /* assume the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_ON); + break; + case BLACKLIST_OFF_PENDING: + /* yield the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); + break; + default: /* nothing to do */ + break; + } /* switch blacklist_pending */ + + qdf_spin_unlock_bh(&(napid->lock)); + + NAPI_DEBUG("<--[rc=%d]", rc); + return rc; +} + +#endif + +/** + * hncm_migrate_to() - migrates a NAPI to a CPU + * @napid: pointer to NAPI block + * @ce_id: CE_id of the NAPI instance + * @didx : index in the CPU topology table for the CPU to migrate to + * + * Migrates NAPI (identified by the CE_id) to the destination core + * Updates the napi_map of the destination entry + * + * Return: + * =0 : success + * <0 : error + */ +static int hncm_exec_migrate_to(struct qca_napi_data *napid, uint8_t ctx_id, + int didx) +{ + struct hif_exec_context *exec_ctx; + int rc = 0; + int status = 0; + int ind; + + NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, ctx_id, didx); + + exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, ctx_id); + if (!exec_ctx) + return -EINVAL; + + exec_ctx->cpumask.bits[0] = (1 << didx); + + for (ind = 0; ind < exec_ctx->numirq; ind++) { + if (exec_ctx->os_irq[ind]) { + irq_modify_status(exec_ctx->os_irq[ind], + IRQ_NO_BALANCING, 0); + rc = irq_set_affinity_hint(exec_ctx->os_irq[ind], + &exec_ctx->cpumask); + if (rc) + status = rc; + } + } + + /* unmark the napis bitmap in the cpu table */ + napid->napi_cpu[exec_ctx->cpu].napis &= ~(0x01 << ctx_id); + /* mark the napis bitmap for the new designated cpu */ + napid->napi_cpu[didx].napis |= (0x01 << ctx_id); + exec_ctx->cpu = didx; + + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return status; +} + +/** + * hncm_dest_cpu() - finds a destination CPU for NAPI + * @napid: pointer to NAPI block + * @act : RELOCATE | COLLAPSE | DISPERSE + * + * Finds the designated destionation for the next IRQ. + * RELOCATE: translated to either COLLAPSE or DISPERSE based + * on napid->napi_mode (throughput state) + * COLLAPSE: All have the same destination: the first online CPU in lilcl + * DISPERSE: One of the CPU in bigcl, which has the smallest number of + * NAPIs on it + * + * Return: >=0 : index in the cpu topology table + * : < 0 : error + */ +static int hncm_dest_cpu(struct qca_napi_data *napid, int act) +{ + int destidx = -1; + int head, i; + + NAPI_DEBUG("-->%s(act=%d)", __func__, act); + if (act == HNC_ACT_RELOCATE) { + if (napid->napi_mode == QCA_NAPI_TPUT_LO) + act = HNC_ACT_COLLAPSE; + else + act = HNC_ACT_DISPERSE; + NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", + __func__, act); + } + if (act == HNC_ACT_COLLAPSE) { + head = i = napid->lilcl_head; +retry_collapse: + while (i >= 0) { + if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { + destidx = i; + break; + } + i = napid->napi_cpu[i].cluster_nxt; + } + if ((destidx < 0) && (head == napid->lilcl_head)) { + NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", + __func__); + head = i = napid->bigcl_head; + goto retry_collapse; + } + } else { /* HNC_ACT_DISPERSE */ + int smallest = 99; /* all 32 bits full */ + int smallidx = -1; + + head = i = napid->bigcl_head; +retry_disperse: + while (i >= 0) { + if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && + (hweight32(napid->napi_cpu[i].napis) <= smallest)) { + smallest = napid->napi_cpu[i].napis; + smallidx = i; + } + i = napid->napi_cpu[i].cluster_nxt; + } + destidx = smallidx; + if ((destidx < 0) && (head == napid->bigcl_head)) { + NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", + __func__); + head = i = napid->lilcl_head; + goto retry_disperse; + } + } + NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); + return destidx; +} +/** + * hif_napi_cpu_migrate() - migrate IRQs away + * @cpu: -1: all CPUs specific CPU + * @act: COLLAPSE | DISPERSE + * + * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible + * cores. Eligible cores are: + * act=COLLAPSE -> the first online core of the little cluster + * act=DISPERSE -> separate cores of the big cluster, so that each core will + * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) + * + * Note that this function is called with a spinlock acquired already. + * + * Return: =0: success + * <0: error + */ +int hif_exec_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) +{ + int rc = 0; + struct qca_napi_cpu *cpup; + int i, dind; + uint32_t napis; + + + NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", + __func__, cpu, action); + + if (napid->exec_map == 0) { + NAPI_DEBUG("%s: datapath contexts to disperse", __func__); + goto hncm_return; + } + cpup = napid->napi_cpu; + + switch (action) { + case HNC_ACT_RELOCATE: + case HNC_ACT_DISPERSE: + case HNC_ACT_COLLAPSE: { + /* first find the src napi set */ + if (cpu == HNC_ANY_CPU) + napis = napid->exec_map; + else + napis = cpup[cpu].napis; + /* then clear the napi bitmap on each CPU */ + for (i = 0; i < NR_CPUS; i++) + cpup[i].napis = 0; + /* then for each of the NAPIs to disperse: */ + for (i = 0; i < HIF_MAX_GROUP; i++) + if (napis & (1 << i)) { + /* find a destination CPU */ + dind = hncm_dest_cpu(napid, action); + if (dind >= 0) { + rc = hncm_exec_migrate_to(napid, i, + dind); + } else { + NAPI_DEBUG("No dest for NAPI ce%d", i); + hnc_dump_cpus(napid); + rc = -1; + } + } + break; + } + default: { + NAPI_DEBUG("%s: bad action: %d\n", __func__, action); + QDF_BUG(0); + break; + } + } /* switch action */ + +hncm_return: + hnc_dump_cpus(napid); + return rc; +} + + +/** + * hif_exec_bl_irq() - calls irq_modify_status to enable/disable blacklisting + * @napid: pointer to qca_napi_data structure + * @bl_flag: blacklist flag to enable/disable blacklisting + * + * The function enables/disables blacklisting for all the copy engine + * interrupts on which NAPI is enabled. + * + * Return: None + */ +static inline void hif_exec_bl_irq(struct qca_napi_data *napid, bool bl_flag) +{ + int i, j; + struct hif_exec_context *exec_ctx; + + for (i = 0; i < HIF_MAX_GROUP; i++) { + /* check if NAPI is enabled on the CE */ + if (!(napid->exec_map & (0x01 << i))) + continue; + + /*double check that NAPI is allocated for the CE */ + exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, i); + if (!(exec_ctx)) + continue; + + if (bl_flag == true) + for (j = 0; j < exec_ctx->numirq; j++) + irq_modify_status(exec_ctx->os_irq[j], + 0, IRQ_NO_BALANCING); + else + for (j = 0; j < exec_ctx->numirq; j++) + irq_modify_status(exec_ctx->os_irq[j], + IRQ_NO_BALANCING, 0); + HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); + } +} + +/** + * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. + * @napid: pointer to qca_napi_data structure + * @op: blacklist operation to perform + * + * The function enables/disables/queries blacklisting for all CE RX + * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables + * core_ctl_set_boost. + * Once blacklisting is enabled, the interrupts will not be managed by the IRQ + * balancer. + * + * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled + * for BLACKLIST_QUERY op - blacklist refcount + * for BLACKLIST_ON op - return value from core_ctl_set_boost API + * for BLACKLIST_OFF op - return value from core_ctl_set_boost API + */ +int hif_exec_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op) +{ + int rc = 0; + static int ref_count; /* = 0 by the compiler */ + uint8_t flags = napid->flags; + bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; + bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; + + NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); + + if (!(bl_en && ccb_en)) { + rc = -EINVAL; + goto out; + } + + switch (op) { + case BLACKLIST_QUERY: + rc = ref_count; + break; + case BLACKLIST_ON: + ref_count++; + rc = 0; + if (ref_count == 1) { + rc = hif_napi_core_ctl_set_boost(true); + NAPI_DEBUG("boost_on() returns %d - refcnt=%d", + rc, ref_count); + hif_exec_bl_irq(napid, true); + } + break; + case BLACKLIST_OFF: + if (ref_count) + ref_count--; + rc = 0; + if (ref_count == 0) { + rc = hif_napi_core_ctl_set_boost(false); + NAPI_DEBUG("boost_off() returns %d - refcnt=%d", + rc, ref_count); + hif_exec_bl_irq(napid, false); + } + break; + default: + NAPI_DEBUG("Invalid blacklist op: %d", op); + rc = -EINVAL; + } /* switch */ +out: + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return rc; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.h new file mode 100644 index 0000000000000000000000000000000000000000..c0d90e324dc3cfd9750b0f34403d36fb0f3c18f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IRQ_AFFINITY_H__ +#define __HIF_IRQ_AFFINITY_H__ + +#ifdef HIF_IRQ_AFFINITY +#ifndef FEATURE_NAPI +#error /*HIF_IRQ_AFFINITY currently relies on FEATURE_NAPI*/ +#endif +#endif + +/* CLD headers */ +#include /* struct hif_opaque_softc; */ +#include +struct hif_opaque_softc; +enum qca_blacklist_op; + +int hif_exec_cpu_migrate(struct qca_napi_data *napid, int cpu, int action); + +int hif_exec_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op); + +#ifdef HIF_IRQ_AFFINITY +int hif_exec_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data); + + +/* hif_irq_affinity_remove() - remove affinity before freeing the irq + * @os_irq: irq number to remove affinity from + */ +static inline void hif_irq_affinity_remove(int os_irq) +{ + irq_set_affinity_hint(os_irq, NULL); +} +#else +static inline void hif_irq_affinity_remove(int os_irq) +{ +} + +static inline int hif_exec_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data) +{ + return 0; +} +#endif + +/** + * hif_napi_core_ctl_set_boost() - This API is used to move + * tasks to CPUs with higher capacity + * @boost: If set move tasks to higher capacity CPUs + * + * This function moves tasks to higher capacity CPUs than those + * where the tasks would have normally ended up + * + * Return: None + */ +static inline int hif_napi_core_ctl_set_boost(bool boost) +{ + return qdf_core_ctl_set_boost(boost); +} + +#ifdef HIF_CPU_PERF_AFFINE_MASK +static inline int hif_core_ctl_set_boost(bool boost) +{ + return hif_napi_core_ctl_set_boost(boost); +} +#else +static inline int hif_core_ctl_set_boost(bool boost) +{ + return 0; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c new file mode 100644 index 0000000000000000000000000000000000000000..45bd30103d8cbc7311b18d66fea09f6fda397d22 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c @@ -0,0 +1,1765 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include +#include "regtable.h" +#define ATH_MODULE_NAME hif +#include +#include "hif_main.h" +#include "hif_hw_version.h" +#if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI)) +#include "ce_tasklet.h" +#include "ce_api.h" +#endif +#include "qdf_trace.h" +#include "qdf_status.h" +#include "hif_debug.h" +#include "mp_dev.h" +#if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) +#include "hal_api.h" +#endif +#include "hif_napi.h" +#include "hif_unit_test_suspend_i.h" +#include "qdf_module.h" +#ifdef HIF_CE_LOG_INFO +#include +#include +#endif + +void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start) +{ + hif_trigger_dump(hif_ctx, cmd_id, start); +} + +/** + * hif_get_target_id(): hif_get_target_id + * + * Return the virtual memory base address to the caller + * + * @scn: hif_softc + * + * Return: A_target_id_t + */ +A_target_id_t hif_get_target_id(struct hif_softc *scn) +{ + return scn->mem; +} + +/** + * hif_get_targetdef(): hif_get_targetdef + * @scn: scn + * + * Return: void * + */ +void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->targetdef; +} + +#ifdef FORCE_WAKE +void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx, + bool init_phase) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (ce_srng_based(scn)) + hal_set_init_phase(scn->hal_soc, init_phase); +} +#endif /* FORCE_WAKE */ + +/** + * hif_vote_link_down(): unvote for link up + * + * Call hif_vote_link_down to release a previous request made using + * hif_vote_link_up. A hif_vote_link_down call should only be made + * after a corresponding hif_vote_link_up, otherwise you could be + * negating a vote from another source. When no votes are present + * hif will not guarantee the linkstate after hif_bus_suspend. + * + * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread + * and initialization deinitialization sequencences. + * + * Return: n/a + */ +void hif_vote_link_down(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + QDF_BUG(scn); + scn->linkstate_vote--; + HIF_INFO("Down_linkstate_vote %d", scn->linkstate_vote); + if (scn->linkstate_vote == 0) + hif_bus_prevent_linkdown(scn, false); +} + +/** + * hif_vote_link_up(): vote to prevent bus from suspending + * + * Makes hif guarantee that fw can message the host normally + * durring suspend. + * + * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread + * and initialization deinitialization sequencences. + * + * Return: n/a + */ +void hif_vote_link_up(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + QDF_BUG(scn); + scn->linkstate_vote++; + HIF_INFO("Up_linkstate_vote %d", scn->linkstate_vote); + if (scn->linkstate_vote == 1) + hif_bus_prevent_linkdown(scn, true); +} + +/** + * hif_can_suspend_link(): query if hif is permitted to suspend the link + * + * Hif will ensure that the link won't be suspended if the upperlayers + * don't want it to. + * + * SYNCHRONIZATION: MC thread is stopped before bus suspend thus + * we don't need extra locking to ensure votes dont change while + * we are in the process of suspending or resuming. + * + * Return: false if hif will guarantee link up durring suspend. + */ +bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + QDF_BUG(scn); + return scn->linkstate_vote == 0; +} + +/** + * hif_hia_item_address(): hif_hia_item_address + * @target_type: target_type + * @item_offset: item_offset + * + * Return: n/a + */ +uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset) +{ + switch (target_type) { + case TARGET_TYPE_AR6002: + return AR6002_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6003: + return AR6003_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6004: + return AR6004_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6006: + return AR6006_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR9888: + return AR9888_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6320: + case TARGET_TYPE_AR6320V2: + return AR6320_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_ADRASTEA: + /* ADRASTEA doesn't have a host interest address */ + ASSERT(0); + return 0; + case TARGET_TYPE_AR900B: + return AR900B_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_QCA9984: + return QCA9984_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_QCA9888: + return QCA9888_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_IPQ4019: + return IPQ4019_HOST_INTEREST_ADDRESS + item_offset; + + default: + ASSERT(0); + return 0; + } +} + +/** + * hif_max_num_receives_reached() - check max receive is reached + * @scn: HIF Context + * @count: unsigned int. + * + * Output check status as bool + * + * Return: bool + */ +bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count) +{ + if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn))) + return count > 120; + else + return count > MAX_NUM_OF_RECEIVES; +} + +/** + * init_buffer_count() - initial buffer count + * @maxSize: qdf_size_t + * + * routine to modify the initial buffer count to be allocated on an os + * platform basis. Platform owner will need to modify this as needed + * + * Return: qdf_size_t + */ +qdf_size_t init_buffer_count(qdf_size_t maxSize) +{ + return maxSize; +} + +/** + * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint + * @hif_ctx: hif context + * @htc_htt_tx_endpoint: htt_tx_endpoint + * + * Return: void + */ +void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, + int htc_htt_tx_endpoint) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) { + HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!", + __func__); + return; + } + + scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint; +} +qdf_export_symbol(hif_save_htc_htt_config_endpoint); + +static const struct qwlan_hw qwlan_hw_list[] = { + { + .id = AR6320_REV1_VERSION, + .subid = 0, + .name = "QCA6174_REV1", + }, + { + .id = AR6320_REV1_1_VERSION, + .subid = 0x1, + .name = "QCA6174_REV1_1", + }, + { + .id = AR6320_REV1_3_VERSION, + .subid = 0x2, + .name = "QCA6174_REV1_3", + }, + { + .id = AR6320_REV2_1_VERSION, + .subid = 0x4, + .name = "QCA6174_REV2_1", + }, + { + .id = AR6320_REV2_1_VERSION, + .subid = 0x5, + .name = "QCA6174_REV2_2", + }, + { + .id = AR6320_REV3_VERSION, + .subid = 0x6, + .name = "QCA6174_REV2.3", + }, + { + .id = AR6320_REV3_VERSION, + .subid = 0x8, + .name = "QCA6174_REV3", + }, + { + .id = AR6320_REV3_VERSION, + .subid = 0x9, + .name = "QCA6174_REV3_1", + }, + { + .id = AR6320_REV3_2_VERSION, + .subid = 0xA, + .name = "AR6320_REV3_2_VERSION", + }, + { + .id = QCA6390_V1, + .subid = 0x0, + .name = "QCA6390_V1", + }, + { + .id = QCA6490_V1, + .subid = 0x0, + .name = "QCA6490_V1", + }, + { + .id = WCN3990_v1, + .subid = 0x0, + .name = "WCN3990_V1", + }, + { + .id = WCN3990_v2, + .subid = 0x0, + .name = "WCN3990_V2", + }, + { + .id = WCN3990_v2_1, + .subid = 0x0, + .name = "WCN3990_V2.1", + }, + { + .id = WCN3998, + .subid = 0x0, + .name = "WCN3998", + }, + { + .id = QCA9379_REV1_VERSION, + .subid = 0xC, + .name = "QCA9379_REV1", + }, + { + .id = QCA9379_REV1_VERSION, + .subid = 0xD, + .name = "QCA9379_REV1_1", + } +}; + +/** + * hif_get_hw_name(): get a human readable name for the hardware + * @info: Target Info + * + * Return: human readable name for the underlying wifi hardware. + */ +static const char *hif_get_hw_name(struct hif_target_info *info) +{ + int i; + + if (info->hw_name) + return info->hw_name; + + for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) { + if (info->target_version == qwlan_hw_list[i].id && + info->target_revision == qwlan_hw_list[i].subid) { + return qwlan_hw_list[i].name; + } + } + + info->hw_name = qdf_mem_malloc(64); + if (!info->hw_name) + return "Unknown Device (nomem)"; + + i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.", + info->target_version); + if (i < 0) + return "Unknown Device (snprintf failure)"; + else + return info->hw_name; +} + +/** + * hif_get_hw_info(): hif_get_hw_info + * @scn: scn + * @version: version + * @revision: revision + * + * Return: n/a + */ +void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision, + const char **target_name) +{ + struct hif_target_info *info = hif_get_target_info_handle(scn); + struct hif_softc *sc = HIF_GET_SOFTC(scn); + + if (sc->bus_type == QDF_BUS_TYPE_USB) + hif_usb_get_hw_info(sc); + + *version = info->target_version; + *revision = info->target_revision; + *target_name = hif_get_hw_name(info); +} + +/** + * hif_get_dev_ba(): API to get device base address. + * @scn: scn + * @version: version + * @revision: revision + * + * Return: n/a + */ +void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle) +{ + struct hif_softc *scn = (struct hif_softc *)hif_handle; + + return scn->mem; +} +qdf_export_symbol(hif_get_dev_ba); + +#ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG +/** + * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc + * @scn: hif context + * @psoc: psoc objmgr handle + * + * Return: None + */ +static inline +void hif_get_cfg_from_psoc(struct hif_softc *scn, + struct wlan_objmgr_psoc *psoc) +{ + if (psoc) { + scn->ini_cfg.ce_status_ring_timer_threshold = + cfg_get(psoc, + CFG_CE_STATUS_RING_TIMER_THRESHOLD); + scn->ini_cfg.ce_status_ring_batch_count_threshold = + cfg_get(psoc, + CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD); + } +} +#else +static inline +void hif_get_cfg_from_psoc(struct hif_softc *scn, + struct wlan_objmgr_psoc *psoc) +{ +} +#endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */ + +#ifdef HIF_CPU_PERF_AFFINE_MASK +/** + * __hif_cpu_hotplug_notify() - CPU hotplug event handler + * @cpu: CPU Id of the CPU generating the event + * @cpu_up: true if the CPU is online + * + * Return: None + */ +static void __hif_cpu_hotplug_notify(void *context, + uint32_t cpu, bool cpu_up) +{ + struct hif_softc *scn = context; + + if (!scn) + return; + if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn)) + return; + + if (cpu_up) { + hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn)); + hif_debug("Setting affinity for online CPU: %d", cpu); + } else { + hif_debug("Skip setting affinity for offline CPU: %d", cpu); + } +} + +/** + * hif_cpu_hotplug_notify - cpu core up/down notification + * handler + * @cpu: CPU generating the event + * @cpu_up: true if the CPU is online + * + * Return: None + */ +static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up) +{ + struct qdf_op_sync *op_sync; + + if (qdf_op_protect(&op_sync)) + return; + + __hif_cpu_hotplug_notify(context, cpu, cpu_up); + + qdf_op_unprotect(op_sync); +} + +static void hif_cpu_online_cb(void *context, uint32_t cpu) +{ + hif_cpu_hotplug_notify(context, cpu, true); +} + +static void hif_cpu_before_offline_cb(void *context, uint32_t cpu) +{ + hif_cpu_hotplug_notify(context, cpu, false); +} + +static void hif_cpuhp_register(struct hif_softc *scn) +{ + if (!scn) { + hif_info_high("cannot register hotplug notifiers"); + return; + } + qdf_cpuhp_register(&scn->cpuhp_event_handle, + scn, + hif_cpu_online_cb, + hif_cpu_before_offline_cb); +} + +static void hif_cpuhp_unregister(struct hif_softc *scn) +{ + if (!scn) { + hif_info_high("cannot unregister hotplug notifiers"); + return; + } + qdf_cpuhp_unregister(&scn->cpuhp_event_handle); +} + +#else +static void hif_cpuhp_register(struct hif_softc *scn) +{ +} + +static void hif_cpuhp_unregister(struct hif_softc *scn) +{ +} +#endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */ + +#if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO) +/** + * hif_recovery_notifier_cb - Recovery notifier callback to log + * hang event data + * @block: notifier block + * @state: state + * @data: notifier data + * + * Return: status + */ +static +int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state, + void *data) +{ + struct qdf_notifer_data *notif_data = data; + qdf_notif_block *notif_block; + struct hif_softc *hif_handle; + bool bus_id_invalid; + + if (!data || !block) + return -EINVAL; + + notif_block = qdf_container_of(block, qdf_notif_block, notif_block); + + hif_handle = notif_block->priv_data; + if (!hif_handle) + return -EINVAL; + + bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data, + ¬if_data->offset); + if (bus_id_invalid) + return NOTIFY_STOP_MASK; + + hif_log_ce_info(hif_handle, notif_data->hang_data, + ¬if_data->offset); + + return 0; +} + +/** + * hif_register_recovery_notifier - Register hif recovery notifier + * @hif_handle: hif handle + * + * Return: status + */ +static +QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle) +{ + qdf_notif_block *hif_notifier; + + if (!hif_handle) + return QDF_STATUS_E_FAILURE; + + hif_notifier = &hif_handle->hif_recovery_notifier; + + hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb; + hif_notifier->priv_data = hif_handle; + return qdf_hang_event_register_notifier(hif_notifier); +} + +/** + * hif_unregister_recovery_notifier - Un-register hif recovery notifier + * @hif_handle: hif handle + * + * Return: status + */ +static +QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle) +{ + qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier; + + return qdf_hang_event_unregister_notifier(hif_notifier); +} +#else +static inline +QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, + uint32_t mode, + enum qdf_bus_type bus_type, + struct hif_driver_state_callbacks *cbk, + struct wlan_objmgr_psoc *psoc) +{ + struct hif_softc *scn; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int bus_context_size = hif_bus_get_context_size(bus_type); + + if (bus_context_size == 0) { + HIF_ERROR("%s: context size 0 not allowed", __func__); + return NULL; + } + + scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size); + if (!scn) { + HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d", + __func__, bus_context_size); + return GET_HIF_OPAQUE_HDL(scn); + } + + scn->qdf_dev = qdf_ctx; + scn->hif_con_param = mode; + qdf_atomic_init(&scn->active_tasklet_cnt); + qdf_atomic_init(&scn->active_grp_tasklet_cnt); + qdf_atomic_init(&scn->link_suspended); + qdf_atomic_init(&scn->tasklet_from_intr); + hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn)); + qdf_mem_copy(&scn->callbacks, cbk, + sizeof(struct hif_driver_state_callbacks)); + scn->bus_type = bus_type; + + hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_DOWN); + hif_get_cfg_from_psoc(scn, psoc); + + hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn)); + status = hif_bus_open(scn, bus_type); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d", + __func__, status, bus_type); + qdf_mem_free(scn); + scn = NULL; + } + hif_cpuhp_register(scn); + return GET_HIF_OPAQUE_HDL(scn); +} + +#ifdef ADRASTEA_RRI_ON_DDR +/** + * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri + * @scn: hif context + * + * Return: none + */ +void hif_uninit_rri_on_ddr(struct hif_softc *scn) +{ + if (scn->vaddr_rri_on_ddr) + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + (CE_COUNT * sizeof(uint32_t)), + scn->vaddr_rri_on_ddr, + scn->paddr_rri_on_ddr, 0); + scn->vaddr_rri_on_ddr = NULL; +} +#endif + +/** + * hif_close(): hif_close + * @hif_ctx: hif_ctx + * + * Return: n/a + */ +void hif_close(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) { + HIF_ERROR("%s: hif_opaque_softc is NULL", __func__); + return; + } + + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + + if (scn->target_info.hw_name) { + char *hw_name = scn->target_info.hw_name; + + scn->target_info.hw_name = "ErrUnloading"; + qdf_mem_free(hw_name); + } + + hif_uninit_rri_on_ddr(scn); + hif_cleanup_static_buf_to_target(scn); + hif_cpuhp_unregister(scn); + + hif_bus_close(scn); + + qdf_mem_free(scn); +} + +/** + * hif_get_num_active_tasklets() - get the number of active + * tasklets pending to be completed. + * @scn: HIF context + * + * Returns: the number of tasklets which are active + */ +static inline int hif_get_num_active_tasklets(struct hif_softc *scn) +{ + return qdf_atomic_read(&scn->active_tasklet_cnt); +} + +/** + * hif_get_num_active_grp_tasklets() - get the number of active + * datapath group tasklets pending to be completed. + * @scn: HIF context + * + * Returns: the number of datapath group tasklets which are active + */ +static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn) +{ + return qdf_atomic_read(&scn->active_grp_tasklet_cnt); +} + +#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \ + defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \ + defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018)) +/** + * hif_get_num_pending_work() - get the number of entries in + * the workqueue pending to be completed. + * @scn: HIF context + * + * Returns: the number of tasklets which are active + */ +static inline int hif_get_num_pending_work(struct hif_softc *scn) +{ + return hal_get_reg_write_pending_work(scn->hal_soc); +} +#else + +static inline int hif_get_num_pending_work(struct hif_softc *scn) +{ + return 0; +} +#endif + +QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn) +{ + uint32_t task_drain_wait_cnt = 0; + int tasklet = 0, grp_tasklet = 0, work = 0; + + while ((tasklet = hif_get_num_active_tasklets(scn)) || + (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) || + (work = hif_get_num_pending_work(scn))) { + if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) { + hif_err("pending tasklets %d grp tasklets %d work %d", + tasklet, grp_tasklet, work); + return QDF_STATUS_E_FAULT; + } + hif_info("waiting for tasklets %d grp tasklets %d work %d", + tasklet, grp_tasklet, work); + msleep(10); + } + + return QDF_STATUS_SUCCESS; +} + +#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \ + defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \ + defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCA6750)) +static QDF_STATUS hif_hal_attach(struct hif_softc *scn) +{ + if (ce_srng_based(scn)) { + scn->hal_soc = hal_attach( + hif_softc_to_hif_opaque_softc(scn), + scn->qdf_dev); + if (!scn->hal_soc) + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS hif_hal_detach(struct hif_softc *scn) +{ + if (ce_srng_based(scn)) { + hal_detach(scn->hal_soc); + scn->hal_soc = NULL; + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS hif_hal_attach(struct hif_softc *scn) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS hif_hal_detach(struct hif_softc *scn) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * hif_enable(): hif_enable + * @hif_ctx: hif_ctx + * @dev: dev + * @bdev: bus dev + * @bid: bus ID + * @bus_type: bus type + * @type: enable type + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, + void *bdev, + const struct hif_bus_id *bid, + enum qdf_bus_type bus_type, + enum hif_enable_type type) +{ + QDF_STATUS status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) { + HIF_ERROR("%s: hif_ctx = NULL", __func__); + return QDF_STATUS_E_NULL_VALUE; + } + + status = hif_enable_bus(scn, dev, bdev, bid, type); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: hif_enable_bus error = %d", + __func__, status); + return status; + } + + hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_UP); + status = hif_hal_attach(scn); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: hal attach failed", __func__); + goto disable_bus; + } + + if (hif_bus_configure(scn)) { + HIF_ERROR("%s: Target probe failed.", __func__); + status = QDF_STATUS_E_FAILURE; + goto hal_detach; + } + + hif_ut_suspend_init(scn); + hif_register_recovery_notifier(scn); + + /* + * Flag to avoid potential unallocated memory access from MSI + * interrupt handler which could get scheduled as soon as MSI + * is enabled, i.e to take care of the race due to the order + * in where MSI is enabled before the memory, that will be + * in interrupt handlers, is allocated. + */ + + scn->hif_init_done = true; + + HIF_DBG("%s: OK", __func__); + + return QDF_STATUS_SUCCESS; + +hal_detach: + hif_hal_detach(scn); +disable_bus: + hif_disable_bus(scn); + return status; +} + +void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) + return; + + hif_unregister_recovery_notifier(scn); + + hif_nointrs(scn); + if (scn->hif_init_done == false) + hif_shutdown_device(hif_ctx); + else + hif_stop(hif_ctx); + + hif_hal_detach(scn); + + hif_pm_set_link_state(hif_ctx, HIF_PM_LINK_STATE_DOWN); + hif_disable_bus(scn); + + hif_wlan_disable(scn); + + scn->notice_send = false; + + HIF_DBG("%s: X", __func__); +} + +#ifdef CE_TASKLET_DEBUG_ENABLE +void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) + return; + + scn->ce_latency_stats = val; +} +#endif + +void hif_display_stats(struct hif_opaque_softc *hif_ctx) +{ + hif_display_bus_stats(hif_ctx); +} + +qdf_export_symbol(hif_display_stats); + +void hif_clear_stats(struct hif_opaque_softc *hif_ctx) +{ + hif_clear_bus_stats(hif_ctx); +} + +/** + * hif_crash_shutdown_dump_bus_register() - dump bus registers + * @hif_ctx: hif_ctx + * + * Return: n/a + */ +#if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \ +&& defined(DEBUG) + +static void hif_crash_shutdown_dump_bus_register(void *hif_ctx) +{ + struct hif_opaque_softc *scn = hif_ctx; + + if (hif_check_soc_status(scn)) + return; + + if (hif_dump_registers(scn)) + HIF_ERROR("Failed to dump bus registers!"); +} + +/** + * hif_crash_shutdown(): hif_crash_shutdown + * + * This function is called by the platform driver to dump CE registers + * + * @hif_ctx: hif_ctx + * + * Return: n/a + */ +void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!hif_ctx) + return; + + if (scn->bus_type == QDF_BUS_TYPE_SNOC) { + HIF_INFO_MED("%s: RAM dump disabled for bustype %d", + __func__, scn->bus_type); + return; + } + + if (TARGET_STATUS_RESET == scn->target_status) { + HIF_INFO_MED("%s: Target is already asserted, ignore!", + __func__); + return; + } + + if (hif_is_load_or_unload_in_progress(scn)) { + HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__); + return; + } + + hif_crash_shutdown_dump_bus_register(hif_ctx); + + if (ol_copy_ramdump(hif_ctx)) + goto out; + + HIF_INFO_MED("%s: RAM dump collecting completed!", __func__); + +out: + return; +} +#else +void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx) +{ + HIF_INFO_MED("%s: Collecting target RAM dump disabled", + __func__); +} +#endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */ + +#ifdef QCA_WIFI_3_0 +/** + * hif_check_fw_reg(): hif_check_fw_reg + * @scn: scn + * @state: + * + * Return: int + */ +int hif_check_fw_reg(struct hif_opaque_softc *scn) +{ + return 0; +} +#endif + +/** + * hif_read_phy_mem_base(): hif_read_phy_mem_base + * @scn: scn + * @phy_mem_base: physical mem base + * + * Return: n/a + */ +void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base) +{ + *phy_mem_base = scn->mem_pa; +} +qdf_export_symbol(hif_read_phy_mem_base); + +/** + * hif_get_device_type(): hif_get_device_type + * @device_id: device_id + * @revision_id: revision_id + * @hif_type: returned hif_type + * @target_type: returned target_type + * + * Return: int + */ +int hif_get_device_type(uint32_t device_id, + uint32_t revision_id, + uint32_t *hif_type, uint32_t *target_type) +{ + int ret = 0; + + switch (device_id) { + case ADRASTEA_DEVICE_ID_P2_E12: + + *hif_type = HIF_TYPE_ADRASTEA; + *target_type = TARGET_TYPE_ADRASTEA; + break; + + case AR9888_DEVICE_ID: + *hif_type = HIF_TYPE_AR9888; + *target_type = TARGET_TYPE_AR9888; + break; + + case AR6320_DEVICE_ID: + switch (revision_id) { + case AR6320_FW_1_1: + case AR6320_FW_1_3: + *hif_type = HIF_TYPE_AR6320; + *target_type = TARGET_TYPE_AR6320; + break; + + case AR6320_FW_2_0: + case AR6320_FW_3_0: + case AR6320_FW_3_2: + *hif_type = HIF_TYPE_AR6320V2; + *target_type = TARGET_TYPE_AR6320V2; + break; + + default: + HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x", + __func__, device_id, revision_id); + ret = -ENODEV; + goto end; + } + break; + + case AR9887_DEVICE_ID: + *hif_type = HIF_TYPE_AR9888; + *target_type = TARGET_TYPE_AR9888; + HIF_INFO(" *********** AR9887 **************"); + break; + + case QCA9984_DEVICE_ID: + *hif_type = HIF_TYPE_QCA9984; + *target_type = TARGET_TYPE_QCA9984; + HIF_INFO(" *********** QCA9984 *************"); + break; + + case QCA9888_DEVICE_ID: + *hif_type = HIF_TYPE_QCA9888; + *target_type = TARGET_TYPE_QCA9888; + HIF_INFO(" *********** QCA9888 *************"); + break; + + case AR900B_DEVICE_ID: + *hif_type = HIF_TYPE_AR900B; + *target_type = TARGET_TYPE_AR900B; + HIF_INFO(" *********** AR900B *************"); + break; + + case IPQ4019_DEVICE_ID: + *hif_type = HIF_TYPE_IPQ4019; + *target_type = TARGET_TYPE_IPQ4019; + HIF_INFO(" *********** IPQ4019 *************"); + break; + + case QCA8074_DEVICE_ID: + *hif_type = HIF_TYPE_QCA8074; + *target_type = TARGET_TYPE_QCA8074; + HIF_INFO(" *********** QCA8074 *************\n"); + break; + + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + *hif_type = HIF_TYPE_QCA6290; + *target_type = TARGET_TYPE_QCA6290; + HIF_INFO(" *********** QCA6290EMU *************\n"); + break; + + case QCN9000_DEVICE_ID: + *hif_type = HIF_TYPE_QCN9000; + *target_type = TARGET_TYPE_QCN9000; + HIF_INFO(" *********** QCN9000 *************\n"); + break; + + case QCN7605_DEVICE_ID: + case QCN7605_COMPOSITE: + case QCN7605_STANDALONE: + case QCN7605_STANDALONE_V2: + case QCN7605_COMPOSITE_V2: + *hif_type = HIF_TYPE_QCN7605; + *target_type = TARGET_TYPE_QCN7605; + HIF_INFO(" *********** QCN7605 *************\n"); + break; + + case QCA6390_DEVICE_ID: + case QCA6390_EMULATION_DEVICE_ID: + *hif_type = HIF_TYPE_QCA6390; + *target_type = TARGET_TYPE_QCA6390; + HIF_INFO(" *********** QCA6390 *************\n"); + break; + + case QCA6490_DEVICE_ID: + case QCA6490_EMULATION_DEVICE_ID: + *hif_type = HIF_TYPE_QCA6490; + *target_type = TARGET_TYPE_QCA6490; + HIF_INFO(" *********** QCA6490 *************\n"); + break; + + case QCA6750_DEVICE_ID: + case QCA6750_EMULATION_DEVICE_ID: + *hif_type = HIF_TYPE_QCA6750; + *target_type = TARGET_TYPE_QCA6750; + HIF_INFO(" *********** QCA6750 *************\n"); + break; + + case QCA8074V2_DEVICE_ID: + *hif_type = HIF_TYPE_QCA8074V2; + *target_type = TARGET_TYPE_QCA8074V2; + HIF_INFO(" *********** QCA8074V2 *************\n"); + break; + + case QCA6018_DEVICE_ID: + case RUMIM2M_DEVICE_ID_NODE0: + case RUMIM2M_DEVICE_ID_NODE1: + case RUMIM2M_DEVICE_ID_NODE2: + case RUMIM2M_DEVICE_ID_NODE3: + case RUMIM2M_DEVICE_ID_NODE4: + case RUMIM2M_DEVICE_ID_NODE5: + *hif_type = HIF_TYPE_QCA6018; + *target_type = TARGET_TYPE_QCA6018; + HIF_INFO(" *********** QCA6018 *************\n"); + break; + + default: + HIF_ERROR("%s: Unsupported device ID = 0x%x!", + __func__, device_id); + ret = -ENODEV; + break; + } + + if (*target_type == TARGET_TYPE_UNKNOWN) { + HIF_ERROR("%s: Unsupported target_type!", __func__); + ret = -ENODEV; + } +end: + return ret; +} + +/** + * hif_get_bus_type() - return the bus type + * + * Return: enum qdf_bus_type + */ +enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + + return scn->bus_type; +} + +/** + * Target info and ini parameters are global to the driver + * Hence these structures are exposed to all the modules in + * the driver and they don't need to maintains multiple copies + * of the same info, instead get the handle from hif and + * modify them in hif + */ + +/** + * hif_get_ini_handle() - API to get hif_config_param handle + * @hif_ctx: HIF Context + * + * Return: pointer to hif_config_info + */ +struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx); + + return &sc->hif_config; +} + +/** + * hif_get_target_info_handle() - API to get hif_target_info handle + * @hif_ctx: HIF context + * + * Return: Pointer to hif_target_info + */ +struct hif_target_info *hif_get_target_info_handle( + struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx); + + return &sc->target_info; + +} +qdf_export_symbol(hif_get_target_info_handle); + +#ifdef RECEIVE_OFFLOAD +void hif_offld_flush_cb_register(struct hif_opaque_softc *scn, + void (offld_flush_handler)(void *)) +{ + if (hif_napi_enabled(scn, -1)) + hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler); + else + HIF_ERROR("NAPI not enabled\n"); +} +qdf_export_symbol(hif_offld_flush_cb_register); + +void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn) +{ + if (hif_napi_enabled(scn, -1)) + hif_napi_rx_offld_flush_cb_deregister(scn); + else + HIF_ERROR("NAPI not enabled\n"); +} +qdf_export_symbol(hif_offld_flush_cb_deregister); + +int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl) +{ + if (hif_napi_enabled(hif_hdl, -1)) + return NAPI_PIPE2ID(ctx_id); + else + return ctx_id; +} +#else /* RECEIVE_OFFLOAD */ +int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl) +{ + return 0; +} +qdf_export_symbol(hif_get_rx_ctx_id); +#endif /* RECEIVE_OFFLOAD */ + +#if defined(FEATURE_LRO) + +/** + * hif_get_lro_info - Returns LRO instance for instance ID + * @ctx_id: LRO instance ID + * @hif_hdl: HIF Context + * + * Return: Pointer to LRO instance. + */ +void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl) +{ + void *data; + + if (hif_napi_enabled(hif_hdl, -1)) + data = hif_napi_get_lro_info(hif_hdl, ctx_id); + else + data = hif_ce_get_lro_ctx(hif_hdl, ctx_id); + + return data; +} +#endif + +/** + * hif_get_target_status - API to get target status + * @hif_ctx: HIF Context + * + * Return: enum hif_target_status + */ +enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->target_status; +} +qdf_export_symbol(hif_get_target_status); + +/** + * hif_set_target_status() - API to set target status + * @hif_ctx: HIF Context + * @status: Target Status + * + * Return: void + */ +void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum + hif_target_status status) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + scn->target_status = status; +} + +/** + * hif_init_ini_config() - API to initialize HIF configuration parameters + * @hif_ctx: HIF Context + * @cfg: HIF Configuration + * + * Return: void + */ +void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, + struct hif_config_info *cfg) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info)); +} + +/** + * hif_get_conparam() - API to get driver mode in HIF + * @scn: HIF Context + * + * Return: driver mode of operation + */ +uint32_t hif_get_conparam(struct hif_softc *scn) +{ + if (!scn) + return 0; + + return scn->hif_con_param; +} + +/** + * hif_get_callbacks_handle() - API to get callbacks Handle + * @scn: HIF Context + * + * Return: pointer to HIF Callbacks + */ +struct hif_driver_state_callbacks *hif_get_callbacks_handle( + struct hif_softc *scn) +{ + return &scn->callbacks; +} + +/** + * hif_is_driver_unloading() - API to query upper layers if driver is unloading + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_driver_unloading(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_driver_unloading) + return cbk->is_driver_unloading(cbk->context); + + return false; +} + +/** + * hif_is_load_or_unload_in_progress() - API to query upper layers if + * load/unload in progress + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_load_or_unload_in_progress(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_load_unload_in_progress) + return cbk->is_load_unload_in_progress(cbk->context); + + return false; +} + +/** + * hif_is_recovery_in_progress() - API to query upper layers if recovery in + * progress + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_recovery_in_progress(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_recovery_in_progress) + return cbk->is_recovery_in_progress(cbk->context); + + return false; +} + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \ + defined(HIF_IPCI) + +/** + * hif_update_pipe_callback() - API to register pipe specific callbacks + * @osc: Opaque softc + * @pipeid: pipe id + * @callbacks: callbacks to register + * + * Return: void + */ + +void hif_update_pipe_callback(struct hif_opaque_softc *osc, + u_int8_t pipeid, + struct hif_msg_callbacks *callbacks) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct HIF_CE_pipe_info *pipe_info; + + QDF_BUG(pipeid < CE_COUNT_MAX); + + HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid); + + pipe_info = &hif_state->pipe_info[pipeid]; + + qdf_mem_copy(&pipe_info->pipe_callbacks, + callbacks, sizeof(pipe_info->pipe_callbacks)); + + HIF_INFO_LO("-%s\n", __func__); +} +qdf_export_symbol(hif_update_pipe_callback); + +/** + * hif_is_target_ready() - API to query if target is in ready state + * progress + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_target_ready(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_target_ready) + return cbk->is_target_ready(cbk->context); + /* + * if callback is not registered then there is no way to determine + * if target is ready. In-such case return true to indicate that + * target is ready. + */ + return true; +} +qdf_export_symbol(hif_is_target_ready); + +int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_handle); + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->get_bandwidth_level) + return cbk->get_bandwidth_level(cbk->context); + + return 0; +} + +qdf_export_symbol(hif_get_bandwidth_level); + +#ifdef DP_MEM_PRE_ALLOC +void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn, + qdf_size_t size, + qdf_dma_addr_t *paddr, + uint32_t ring_type, + uint8_t *is_mem_prealloc) +{ + void *vaddr = NULL; + struct hif_driver_state_callbacks *cbk = + hif_get_callbacks_handle(scn); + + *is_mem_prealloc = false; + if (cbk && cbk->prealloc_get_consistent_mem_unaligned) { + vaddr = cbk->prealloc_get_consistent_mem_unaligned(size, + paddr, + ring_type); + if (vaddr) { + *is_mem_prealloc = true; + goto end; + } + } + + vaddr = qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, + size, + paddr); +end: + dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d", + *is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr, + (void *)*paddr, (int)size, ring_type); + + return vaddr; +} + +void hif_mem_free_consistent_unaligned(struct hif_softc *scn, + qdf_size_t size, + void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + uint8_t is_mem_prealloc) +{ + struct hif_driver_state_callbacks *cbk = + hif_get_callbacks_handle(scn); + + if (is_mem_prealloc) { + if (cbk && cbk->prealloc_put_consistent_mem_unaligned) { + cbk->prealloc_put_consistent_mem_unaligned(vaddr); + } else { + dp_warn("dp_prealloc_put_consistent_unligned NULL"); + QDF_BUG(0); + } + } else { + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + size, vaddr, paddr, memctx); + } +} +#endif + +/** + * hif_batch_send() - API to access hif specific function + * ce_batch_send. + * @osc: HIF Context + * @msdu : list of msdus to be sent + * @transfer_id : transfer id + * @len : donwloaded length + * + * Return: list of msds not sent + */ +qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len, uint32_t sendhead) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id, + len, sendhead); +} +qdf_export_symbol(hif_batch_send); + +/** + * hif_update_tx_ring() - API to access hif specific function + * ce_update_tx_ring. + * @osc: HIF Context + * @num_htt_cmpls : number of htt compl received. + * + * Return: void + */ +void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls); +} +qdf_export_symbol(hif_update_tx_ring); + + +/** + * hif_send_single() - API to access hif specific function + * ce_send_single. + * @osc: HIF Context + * @msdu : msdu to be sent + * @transfer_id: transfer id + * @len : downloaded length + * + * Return: msdu sent status + */ +QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id, + len); +} +qdf_export_symbol(hif_send_single); +#endif + +/** + * hif_reg_write() - API to access hif specific function + * hif_write32_mb. + * @hif_ctx : HIF Context + * @offset : offset on which value has to be written + * @value : value to be written + * + * Return: None + */ +void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, + uint32_t value) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_write32_mb(scn, scn->mem + offset, value); + +} +qdf_export_symbol(hif_reg_write); + +/** + * hif_reg_read() - API to access hif specific function + * hif_read32_mb. + * @hif_ctx : HIF Context + * @offset : offset from which value has to be read + * + * Return: Read value + */ +uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset) +{ + + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return hif_read32_mb(scn, scn->mem + offset); +} +qdf_export_symbol(hif_reg_read); + +/** + * hif_ramdump_handler(): generic ramdump handler + * @scn: struct hif_opaque_softc + * + * Return: None + */ +void hif_ramdump_handler(struct hif_opaque_softc *scn) +{ + if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB) + hif_usb_ramdump_handler(scn); +} + +irqreturn_t hif_wake_interrupt_handler(int irq, void *context) +{ + struct hif_softc *scn = context; + struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn); + + HIF_INFO("wake interrupt received on irq %d", irq); + + if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) { + hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); + hif_pm_runtime_request_resume(hif_ctx); + } + + if (scn->initial_wakeup_cb) + scn->initial_wakeup_cb(scn->initial_wakeup_priv); + + if (hif_is_ut_suspended(scn)) + hif_ut_fw_resume(scn); + + qdf_pm_system_wakeup(); + + return IRQ_HANDLED; +} + +void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, + void (*callback)(void *), + void *priv) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + scn->initial_wakeup_cb = callback; + scn->initial_wakeup_priv = priv; +} + +void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif, + uint32_t ce_service_max_yield_time) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + hif_ctx->ce_service_max_yield_time = + ce_service_max_yield_time * 1000; +} + +unsigned long long +hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + return hif_ctx->ce_service_max_yield_time; +} + +void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif, + uint8_t ce_service_max_rx_ind_flush) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + if (ce_service_max_rx_ind_flush == 0 || + ce_service_max_rx_ind_flush > MSG_FLUSH_NUM) + hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; + else + hif_ctx->ce_service_max_rx_ind_flush = + ce_service_max_rx_ind_flush; +} + +#ifdef SYSTEM_PM_CHECK +void __hif_system_pm_set_state(struct hif_opaque_softc *hif, + enum hif_system_pm_state state) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + qdf_atomic_set(&hif_ctx->sys_pm_state, state); +} + +int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + return qdf_atomic_read(&hif_ctx->sys_pm_state); +} + +int hif_system_pm_state_check(struct hif_opaque_softc *hif) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + int32_t sys_pm_state; + + if (!hif_ctx) { + hif_err("hif context is null"); + return -EFAULT; + } + + sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state); + if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING || + sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) { + hif_info("Triggering system wakeup"); + qdf_pm_system_wakeup(); + return -EAGAIN; + } + + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h new file mode 100644 index 0000000000000000000000000000000000000000..f6d5134d87d83dac08f8c569287bdcafd107bb44 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h @@ -0,0 +1,464 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * NB: Inappropriate references to "HTC" are used in this (and other) + * HIF implementations. HTC is typically the calling layer, but it + * theoretically could be some alternative. + */ + +/* + * This holds all state needed to process a pending send/recv interrupt. + * The information is saved here as soon as the interrupt occurs (thus + * allowing the underlying CE to re-use the ring descriptor). The + * information here is eventually processed by a completion processing + * thread. + */ + +#ifndef __HIF_MAIN_H__ +#define __HIF_MAIN_H__ + +#include /* qdf_atomic_read */ +#include "qdf_lock.h" +#include "cepci.h" +#include "hif.h" +#include "multibus.h" +#include "hif_unit_test_suspend_i.h" +#ifdef HIF_CE_LOG_INFO +#include "qdf_notifier.h" +#endif + +#define HIF_MIN_SLEEP_INACTIVITY_TIME_MS 50 +#define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60 + +#define HIF_MAX_BUDGET 0xFFFF + +#define HIF_STATS_INC(_handle, _field, _delta) \ +{ \ + (_handle)->stats._field += _delta; \ +} + +/* + * This macro implementation is exposed for efficiency only. + * The implementation may change and callers should + * consider the targid to be a completely opaque handle. + */ +#define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid))) + +#ifdef QCA_WIFI_3_0 +#define DISABLE_L1SS_STATES 1 +#endif + +#define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define ADRASTEA_BU 1 +#else +#define ADRASTEA_BU 0 +#endif + +#ifdef QCA_WIFI_3_0 +#define HAS_FW_INDICATOR 0 +#else +#define HAS_FW_INDICATOR 1 +#endif + + +#define AR9888_DEVICE_ID (0x003c) +#define AR6320_DEVICE_ID (0x003e) +#define AR6320_FW_1_1 (0x11) +#define AR6320_FW_1_3 (0x13) +#define AR6320_FW_2_0 (0x20) +#define AR6320_FW_3_0 (0x30) +#define AR6320_FW_3_2 (0x32) +#define QCA6290_EMULATION_DEVICE_ID (0xabcd) +#define QCA6290_DEVICE_ID (0x1100) +#define QCN9000_DEVICE_ID (0x1104) +#define QCA6390_EMULATION_DEVICE_ID (0x0108) +#define QCA6390_DEVICE_ID (0x1101) +/* TODO: change IDs for HastingsPrime */ +#define QCA6490_EMULATION_DEVICE_ID (0x010a) +#define QCA6490_DEVICE_ID (0x1103) + +/* TODO: change IDs for Moselle */ +#define QCA6750_EMULATION_DEVICE_ID (0x010c) +#define QCA6750_DEVICE_ID (0x1105) + +#define ADRASTEA_DEVICE_ID_P2_E12 (0x7021) +#define AR9887_DEVICE_ID (0x0050) +#define AR900B_DEVICE_ID (0x0040) +#define QCA9984_DEVICE_ID (0x0046) +#define QCA9888_DEVICE_ID (0x0056) +#ifndef IPQ4019_DEVICE_ID +#define IPQ4019_DEVICE_ID (0x12ef) +#endif +#define QCA8074_DEVICE_ID (0xffff) /* Todo: replace this with + actual number once available. + currently defining this to 0xffff for + emulation purpose */ +#define QCA8074V2_DEVICE_ID (0xfffe) /* Todo: replace this with actual number */ +#define QCA6018_DEVICE_ID (0xfffd) /* Todo: replace this with actual number */ +/* Genoa */ +#define QCN7605_DEVICE_ID (0x1102) /* Genoa PCIe device ID*/ +#define QCN7605_COMPOSITE (0x9901) +#define QCN7605_STANDALONE (0x9900) +#define QCN7605_STANDALONE_V2 (0x9902) +#define QCN7605_COMPOSITE_V2 (0x9903) + +#define RUMIM2M_DEVICE_ID_NODE0 0xabc0 +#define RUMIM2M_DEVICE_ID_NODE1 0xabc1 +#define RUMIM2M_DEVICE_ID_NODE2 0xabc2 +#define RUMIM2M_DEVICE_ID_NODE3 0xabc3 +#define RUMIM2M_DEVICE_ID_NODE4 0xaa10 +#define RUMIM2M_DEVICE_ID_NODE5 0xaa11 + +#define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn) +#define HIF_GET_IPCI_SOFTC(scn) ((struct hif_ipci_softc *)scn) +#define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn) +#define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn) +#define HIF_GET_USB_SOFTC(scn) ((struct hif_usb_softc *)scn) +#define HIF_GET_USB_DEVICE(scn) ((struct HIF_DEVICE_USB *)scn) +#define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn) +#define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn) + +struct hif_ce_stats { + int hif_pipe_no_resrc_count; + int ce_ring_delta_fail_count; +}; + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +struct ce_desc_hist { + qdf_atomic_t history_index[CE_COUNT_MAX]; + uint32_t enable[CE_COUNT_MAX]; + bool data_enable[CE_COUNT_MAX]; + qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX]; + uint32_t hist_index; + uint32_t hist_id; + void *hist_ev[CE_COUNT_MAX]; +}; +#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/ + +/** + * struct hif_cfg() - store ini config parameters in hif layer + * @ce_status_ring_timer_threshold: ce status ring timer threshold + * @ce_status_ring_batch_count_threshold: ce status ring batch count threshold + */ +struct hif_cfg { + uint16_t ce_status_ring_timer_threshold; + uint8_t ce_status_ring_batch_count_threshold; +}; + +struct hif_softc { + struct hif_opaque_softc osc; + struct hif_config_info hif_config; + struct hif_target_info target_info; + void __iomem *mem; + enum qdf_bus_type bus_type; + struct hif_bus_ops bus_ops; + void *ce_id_to_state[CE_COUNT_MAX]; + qdf_device_t qdf_dev; + bool hif_init_done; + bool request_irq_done; + bool ext_grp_irq_configured; + uint8_t ce_latency_stats; + /* Packet statistics */ + struct hif_ce_stats pkt_stats; + enum hif_target_status target_status; + uint64_t event_disable_mask; + + struct targetdef_s *targetdef; + struct ce_reg_def *target_ce_def; + struct hostdef_s *hostdef; + struct host_shadow_regs_s *host_shadow_regs; + + bool recovery; + bool notice_send; + bool per_ce_irq; + uint32_t ce_irq_summary; + /* No of copy engines supported */ + unsigned int ce_count; + atomic_t active_tasklet_cnt; + atomic_t active_grp_tasklet_cnt; + atomic_t link_suspended; + uint32_t *vaddr_rri_on_ddr; + qdf_dma_addr_t paddr_rri_on_ddr; +#ifdef CONFIG_BYPASS_QMI + uint32_t *vaddr_qmi_bypass; + qdf_dma_addr_t paddr_qmi_bypass; +#endif + int linkstate_vote; + bool fastpath_mode_on; + atomic_t tasklet_from_intr; + int htc_htt_tx_endpoint; + qdf_dma_addr_t mem_pa; + bool athdiag_procfs_inited; +#ifdef FEATURE_NAPI + struct qca_napi_data napi_data; +#endif /* FEATURE_NAPI */ + /* stores ce_service_max_yield_time in ns */ + unsigned long long ce_service_max_yield_time; + uint8_t ce_service_max_rx_ind_flush; + struct hif_driver_state_callbacks callbacks; + uint32_t hif_con_param; +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT + uint32_t nss_wifi_ol_mode; +#endif + void *hal_soc; + struct hif_ut_suspend_context ut_suspend_ctx; + uint32_t hif_attribute; + int wake_irq; + int disable_wake_irq; + void (*initial_wakeup_cb)(void *); + void *initial_wakeup_priv; +#ifdef REMOVE_PKT_LOG + /* Handle to pktlog device */ + void *pktlog_dev; +#endif + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) + struct ce_desc_hist hif_ce_desc_hist; +#endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/ +#ifdef IPA_OFFLOAD + qdf_shared_mem_t *ipa_ce_ring; +#endif + struct hif_cfg ini_cfg; +#ifdef HIF_CPU_PERF_AFFINE_MASK + /* The CPU hotplug event registration handle */ + struct qdf_cpuhp_handler *cpuhp_event_handle; +#endif +#ifdef HIF_CE_LOG_INFO + qdf_notif_block hif_recovery_notifier; +#endif +#ifdef FEATURE_RUNTIME_PM + /* Variable to track the link state change in RTPM */ + qdf_atomic_t pm_link_state; +#endif +#ifdef SYSTEM_PM_CHECK + qdf_atomic_t sys_pm_state; +#endif +}; + +static inline +void *hif_get_hal_handle(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *sc = (struct hif_softc *)hif_hdl; + + if (!sc) + return NULL; + + return sc->hal_soc; +} + +/** + * Max waiting time during Runtime PM suspend to finish all + * the tasks. This is in the multiple of 10ms. + */ +#define HIF_TASK_DRAIN_WAIT_CNT 25 + +/** + * hif_try_complete_tasks() - Try to complete all the pending tasks + * @scn: HIF context + * + * Try to complete all the pending datapath tasks, i.e. tasklets, + * DP group tasklets and works which are queued, in a given time + * slot. + * + * Returns: QDF_STATUS_SUCCESS if all the tasks were completed + * QDF error code, if the time slot exhausted + */ +QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn); + +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT +static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc) +{ + return !!(sc->nss_wifi_ol_mode); +} +#else +static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc) +{ + return false; +} +#endif + +static inline uint8_t hif_is_attribute_set(struct hif_softc *sc, + uint32_t hif_attrib) +{ + return sc->hif_attribute == hif_attrib; +} + +#ifdef WLAN_FEATURE_DP_EVENT_HISTORY +static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle) +{ + struct hif_softc *scn = (struct hif_softc *)hif_handle; + + scn->event_disable_mask = HIF_EVENT_HIST_DISABLE_MASK; +} +#else +static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle) +{ +} +#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ + +A_target_id_t hif_get_target_id(struct hif_softc *scn); +void hif_dump_pipe_debug_count(struct hif_softc *scn); +void hif_display_bus_stats(struct hif_opaque_softc *scn); +void hif_clear_bus_stats(struct hif_opaque_softc *scn); +bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count); +void hif_shutdown_device(struct hif_opaque_softc *hif_ctx); +int hif_bus_configure(struct hif_softc *scn); +void hif_cancel_deferred_target_sleep(struct hif_softc *scn); +int hif_config_ce(struct hif_softc *scn); +void hif_unconfig_ce(struct hif_softc *scn); +void hif_ce_prepare_config(struct hif_softc *scn); +QDF_STATUS hif_ce_open(struct hif_softc *scn); +void hif_ce_close(struct hif_softc *scn); +int athdiag_procfs_init(void *scn); +void athdiag_procfs_remove(void); +/* routine to modify the initial buffer count to be allocated on an os + * platform basis. Platform owner will need to modify this as needed + */ +qdf_size_t init_buffer_count(qdf_size_t maxSize); + +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg); +int hif_get_device_type(uint32_t device_id, + uint32_t revision_id, + uint32_t *hif_type, uint32_t *target_type); +/*These functions are exposed to HDD*/ +void hif_nointrs(struct hif_softc *scn); +void hif_bus_close(struct hif_softc *ol_sc); +QDF_STATUS hif_bus_open(struct hif_softc *ol_sc, + enum qdf_bus_type bus_type); +QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev, + void *bdev, const struct hif_bus_id *bid, enum hif_enable_type type); +void hif_disable_bus(struct hif_softc *scn); +void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag); +int hif_bus_get_context_size(enum qdf_bus_type bus_type); +void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value); +uint32_t hif_get_conparam(struct hif_softc *scn); +struct hif_driver_state_callbacks *hif_get_callbacks_handle( + struct hif_softc *scn); +bool hif_is_driver_unloading(struct hif_softc *scn); +bool hif_is_load_or_unload_in_progress(struct hif_softc *scn); +bool hif_is_recovery_in_progress(struct hif_softc *scn); +bool hif_is_target_ready(struct hif_softc *scn); + +/** + * hif_get_bandwidth_level() - API to get the current bandwidth level + * @scn: HIF Context + * + * Return: PLD bandwidth level + */ +int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle); + +void hif_wlan_disable(struct hif_softc *scn); +int hif_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, + bool wait_for_it); + +#ifdef DP_MEM_PRE_ALLOC +void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn, + qdf_size_t size, + qdf_dma_addr_t *paddr, + uint32_t ring_type, + uint8_t *is_mem_prealloc); + +void hif_mem_free_consistent_unaligned(struct hif_softc *scn, + qdf_size_t size, + void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + uint8_t is_mem_prealloc); +#else +static inline +void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn, + qdf_size_t size, + qdf_dma_addr_t *paddr, + uint32_t ring_type, + uint8_t *is_mem_prealloc) +{ + return qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, + size, + paddr); +} + +static inline +void hif_mem_free_consistent_unaligned(struct hif_softc *scn, + qdf_size_t size, + void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + uint8_t is_mem_prealloc) +{ + return qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + size, vaddr, paddr, memctx); +} +#endif + +/** + * hif_get_rx_ctx_id() - Returns NAPI instance ID based on CE ID + * @ctx_id: Rx CE context ID + * @hif_hdl: HIF Context + * + * Return: Rx instance ID + */ +int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl); +void hif_ramdump_handler(struct hif_opaque_softc *scn); +#ifdef HIF_USB +void hif_usb_get_hw_info(struct hif_softc *scn); +void hif_usb_ramdump_handler(struct hif_opaque_softc *scn); +#else +static inline void hif_usb_get_hw_info(struct hif_softc *scn) {} +static inline void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) {} +#endif + +/** + * hif_wake_interrupt_handler() - interrupt handler for standalone wake irq + * @irq: the irq number that fired + * @context: the opaque pointer passed to request_irq() + * + * Return: an irq return type + */ +irqreturn_t hif_wake_interrupt_handler(int irq, void *context); + +#ifdef HIF_SNOC +bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc); +#else +static inline +bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc) +{ + return true; +} +#endif + +#ifdef ADRASTEA_RRI_ON_DDR +void hif_uninit_rri_on_ddr(struct hif_softc *scn); +#else +static inline +void hif_uninit_rri_on_ddr(struct hif_softc *scn) {} +#endif +void hif_cleanup_static_buf_to_target(struct hif_softc *scn); +#endif /* __HIF_MAIN_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main_legacy.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main_legacy.c new file mode 100644 index 0000000000000000000000000000000000000000..190ebae3b25609da4e1f8597b16adda3c4a79704 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main_legacy.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_module.h" +#include "hif_main.h" + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#include "ce_api.h" +#include "ce_internal.h" +#endif + +#ifdef WLAN_FEATURE_FASTPATH +/** + * hif_send_fast() - API to access hif specific function + * ce_send_fast. + * @osc: HIF Context + * @msdu : array of msdus to be sent + * @num_msdus : number of msdus in an array + * @transfer_id: transfer id + * @download_len: download length + * + * Return: No. of packets that could be sent + */ +int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, + uint32_t transfer_id, uint32_t download_len) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + return ce_send_fast((struct CE_handle *)ce_tx_hdl, nbuf, + transfer_id, download_len); +} + +qdf_export_symbol(hif_send_fast); + +/** + * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler + * @handler: Callback funtcion + * @context: handle for callback function + * + * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE + */ +int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, + fastpath_msg_handler handler, + void *context) +{ + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + int i; + + if (!scn) { + HIF_ERROR("%s: scn is NULL", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + if (!scn->fastpath_mode_on) { + HIF_WARN("%s: Fastpath mode disabled", __func__); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < scn->ce_count; i++) { + ce_state = scn->ce_id_to_state[i]; + if (ce_state->htt_rx_data) { + ce_state->fastpath_handler = handler; + ce_state->context = context; + ce_state->service = ce_per_engine_service_fast; + } + } + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(hif_ce_fastpath_cb_register); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c new file mode 100644 index 0000000000000000000000000000000000000000..c20becfaff61d27867b59c4a10fe123d10d5ad75 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c @@ -0,0 +1,1788 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: hif_napi.c + * + * HIF NAPI interface implementation + */ + +#include /* memset */ + +/* Linux headers */ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SCHED_CORE_CTL +#include +#endif +#include +#include + +/* Driver headers */ +#include +#include +#include +#include +#include +#include +#include "qdf_cpuhp.h" +#include "qdf_module.h" +#include "qdf_net_if.h" +#include "qdf_dev.h" + +enum napi_decision_vector { + HIF_NAPI_NOEVENT = 0, + HIF_NAPI_INITED = 1, + HIF_NAPI_CONF_UP = 2 +}; +#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP) + +#ifdef RECEIVE_OFFLOAD +/** + * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI + * @napi: Rx_thread NAPI + * @budget: NAPI BUDGET + * + * Return: 0 as it is not supposed to be polled at all as it is not scheduled. + */ +static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget) +{ + HIF_ERROR("This napi_poll should not be polled as we don't schedule it"); + QDF_ASSERT(0); + return 0; +} + +/** + * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI + * @napii: Handle to napi_info holding rx_thread napi + * + * Return: None + */ +static void hif_init_rx_thread_napi(struct qca_napi_info *napii) +{ + init_dummy_netdev(&napii->rx_thread_netdev); + netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi, + hif_rxthread_napi_poll, 64); + napi_enable(&napii->rx_thread_napi); +} + +/** + * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI + * @napii: Handle to napi_info holding rx_thread napi + * + * Return: None + */ +static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) +{ + netif_napi_del(&napii->rx_thread_napi); +} +#else /* RECEIVE_OFFLOAD */ +static void hif_init_rx_thread_napi(struct qca_napi_info *napii) +{ +} + +static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) +{ +} +#endif + +/** + * hif_napi_create() - creates the NAPI structures for a given CE + * @hif : pointer to hif context + * @pipe_id: the CE id on which the instance will be created + * @poll : poll function to be used for this NAPI instance + * @budget : budget to be registered with the NAPI instance + * @scale : scale factor on the weight (to scaler budget to 1000) + * @flags : feature flags + * + * Description: + * Creates NAPI instances. This function is called + * unconditionally during initialization. It creates + * napi structures through the proper HTC/HIF calls. + * The structures are disabled on creation. + * Note that for each NAPI instance a separate dummy netdev is used + * + * Return: + * < 0: error + * = 0: + * > 0: id of the created object (for multi-NAPI, number of objects created) + */ +int hif_napi_create(struct hif_opaque_softc *hif_ctx, + int (*poll)(struct napi_struct *, int), + int budget, + int scale, + uint8_t flags) +{ + int i; + struct qca_napi_data *napid; + struct qca_napi_info *napii; + struct CE_state *ce_state; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + int rc = 0; + + NAPI_DEBUG("-->(budget=%d, scale=%d)", + budget, scale); + NAPI_DEBUG("hif->napi_data.state = 0x%08x", + hif->napi_data.state); + NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x", + hif->napi_data.ce_map); + + napid = &(hif->napi_data); + if (0 == (napid->state & HIF_NAPI_INITED)) { + memset(napid, 0, sizeof(struct qca_napi_data)); + qdf_spinlock_create(&(napid->lock)); + + napid->state |= HIF_NAPI_INITED; + napid->flags = flags; + + rc = hif_napi_cpu_init(hif_ctx); + if (rc != 0 && rc != -EALREADY) { + HIF_ERROR("NAPI_initialization failed,. %d", rc); + rc = napid->ce_map; + goto hnc_err; + } else + rc = 0; + + HIF_DBG("%s: NAPI structures initialized, rc=%d", + __func__, rc); + } + for (i = 0; i < hif->ce_count; i++) { + ce_state = hif->ce_id_to_state[i]; + NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d", + i, ce_state->htt_rx_data, + ce_state->htt_tx_data); + if (ce_srng_based(hif)) + continue; + + if (!ce_state->htt_rx_data) + continue; + + /* Now this is a CE where we need NAPI on */ + NAPI_DEBUG("Creating NAPI on pipe %d", i); + napii = qdf_mem_malloc(sizeof(*napii)); + napid->napis[i] = napii; + if (!napii) { + rc = -ENOMEM; + goto napii_free; + } + } + + for (i = 0; i < hif->ce_count; i++) { + napii = napid->napis[i]; + if (!napii) + continue; + + NAPI_DEBUG("initializing NAPI for pipe %d", i); + memset(napii, 0, sizeof(struct qca_napi_info)); + napii->scale = scale; + napii->id = NAPI_PIPE2ID(i); + napii->hif_ctx = hif_ctx; + napii->irq = pld_get_irq(hif->qdf_dev->dev, i); + + if (napii->irq < 0) + HIF_WARN("%s: bad IRQ value for CE %d: %d", + __func__, i, napii->irq); + + init_dummy_netdev(&(napii->netdev)); + + NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)", + &(napii->napi), &(napii->netdev), poll, budget); + netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget); + + NAPI_DEBUG("after napi_add"); + NAPI_DEBUG("napi=0x%pK, netdev=0x%pK", + &(napii->napi), &(napii->netdev)); + NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK", + napii->napi.dev_list.prev, + napii->napi.dev_list.next); + NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK", + napii->netdev.napi_list.prev, + napii->netdev.napi_list.next); + + hif_init_rx_thread_napi(napii); + napii->lro_ctx = qdf_lro_init(); + NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n", + i, napii->id, napii->lro_ctx); + + /* It is OK to change the state variable below without + * protection as there should be no-one around yet + */ + napid->ce_map |= (0x01 << i); + HIF_DBG("%s: NAPI id %d created for pipe %d", __func__, + napii->id, i); + } + + /* no ces registered with the napi */ + if (!ce_srng_based(hif) && napid->ce_map == 0) { + HIF_WARN("%s: no napis created for copy engines", __func__); + rc = -EFAULT; + goto napii_free; + } + + NAPI_DEBUG("napi map = %x", napid->ce_map); + NAPI_DEBUG("NAPI ids created for all applicable pipes"); + return napid->ce_map; + +napii_free: + for (i = 0; i < hif->ce_count; i++) { + napii = napid->napis[i]; + napid->napis[i] = NULL; + if (napii) + qdf_mem_free(napii); + } + +hnc_err: + NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map); + return rc; +} +qdf_export_symbol(hif_napi_create); + +#ifdef RECEIVE_OFFLOAD +void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, + void (offld_flush_handler)(void *)) +{ + int i; + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + if (!scn) { + HIF_ERROR("%s: hif_state NULL!", __func__); + QDF_ASSERT(0); + return; + } + + napid = hif_napi_get_all(hif_hdl); + for (i = 0; i < scn->ce_count; i++) { + ce_state = scn->ce_id_to_state[i]; + if (ce_state && (ce_state->htt_rx_data)) { + napii = napid->napis[i]; + napii->offld_flush_cb = offld_flush_handler; + HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK\n", + i, napii->id, napii->offld_flush_cb); + } + } +} + +void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl) +{ + int i; + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + if (!scn) { + HIF_ERROR("%s: hif_state NULL!", __func__); + QDF_ASSERT(0); + return; + } + + napid = hif_napi_get_all(hif_hdl); + for (i = 0; i < scn->ce_count; i++) { + ce_state = scn->ce_id_to_state[i]; + if (ce_state && (ce_state->htt_rx_data)) { + napii = napid->napis[i]; + HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n", + i, napii->id, napii->offld_flush_cb); + /* Not required */ + napii->offld_flush_cb = NULL; + } + } +} +#endif /* RECEIVE_OFFLOAD */ + +/** + * + * hif_napi_destroy() - destroys the NAPI structures for a given instance + * @hif : pointer to hif context + * @ce_id : the CE id whose napi instance will be destroyed + * @force : if set, will destroy even if entry is active (de-activates) + * + * Description: + * Destroy a given NAPI instance. This function is called + * unconditionally during cleanup. + * Refuses to destroy an entry of it is still enabled (unless force=1) + * Marks the whole napi_data invalid if all instances are destroyed. + * + * Return: + * -EINVAL: specific entry has not been created + * -EPERM : specific entry is still active + * 0 < : error + * 0 = : success + */ +int hif_napi_destroy(struct hif_opaque_softc *hif_ctx, + uint8_t id, + int force) +{ + uint8_t ce = NAPI_ID2PIPE(id); + int rc = 0; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + NAPI_DEBUG("-->(id=%d, force=%d)", id, force); + + if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) { + HIF_ERROR("%s: NAPI not initialized or entry %d not created", + __func__, id); + rc = -EINVAL; + } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) { + HIF_ERROR("%s: NAPI instance %d (pipe %d) not created", + __func__, id, ce); + if (hif->napi_data.napis[ce]) + HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)", + __func__, id, ce); + rc = -EINVAL; + } else { + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + napid = &(hif->napi_data); + napii = napid->napis[ce]; + if (!napii) { + if (napid->ce_map & (0x01 << ce)) + HIF_ERROR("%s: napii & ce_map out of sync(ce %d)", + __func__, ce); + return -EINVAL; + } + + + if (hif->napi_data.state == HIF_NAPI_CONF_UP) { + if (force) { + napi_disable(&(napii->napi)); + HIF_DBG("%s: NAPI entry %d force disabled", + __func__, id); + NAPI_DEBUG("NAPI %d force disabled", id); + } else { + HIF_ERROR("%s: Cannot destroy active NAPI %d", + __func__, id); + rc = -EPERM; + } + } + if (0 == rc) { + NAPI_DEBUG("before napi_del"); + NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK", + napii->napi.dev_list.prev, + napii->napi.dev_list.next); + NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK", + napii->netdev.napi_list.prev, + napii->netdev.napi_list.next); + + qdf_lro_deinit(napii->lro_ctx); + netif_napi_del(&(napii->napi)); + hif_deinit_rx_thread_napi(napii); + + napid->ce_map &= ~(0x01 << ce); + napid->napis[ce] = NULL; + napii->scale = 0; + qdf_mem_free(napii); + HIF_DBG("%s: NAPI %d destroyed\n", __func__, id); + + /* if there are no active instances and + * if they are all destroyed, + * set the whole structure to uninitialized state + */ + if (napid->ce_map == 0) { + rc = hif_napi_cpu_deinit(hif_ctx); + /* caller is tolerant to receiving !=0 rc */ + + qdf_spinlock_destroy(&(napid->lock)); + memset(napid, + 0, sizeof(struct qca_napi_data)); + HIF_DBG("%s: no NAPI instances. Zapped.", + __func__); + } + } + } + + return rc; +} +qdf_export_symbol(hif_napi_destroy); + +#ifdef FEATURE_LRO +void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + napid = &(scn->napi_data); + napii = napid->napis[NAPI_ID2PIPE(napi_id)]; + + if (napii) + return napii->lro_ctx; + return 0; +} +#endif + +/** + * + * hif_napi_get_all() - returns the address of the whole HIF NAPI structure + * @hif: pointer to hif context + * + * Description: + * Returns the address of the whole structure + * + * Return: + * : address of the whole HIF NAPI structure + */ +inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + return &(hif->napi_data); +} + +struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid) +{ + int id = NAPI_ID2PIPE(napi_id); + + return napid->napis[id]; +} + +/** + * + * hif_napi_event() - reacts to events that impact NAPI + * @hif : pointer to hif context + * @evnt: event that has been detected + * @data: more data regarding the event + * + * Description: + * This function handles two types of events: + * 1- Events that change the state of NAPI (enabled/disabled): + * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} + * The state is retrievable by "hdd_napi_enabled(-1)" + * - NAPI will be on if either INI file is on and it has not been disabled + * by a subsequent vendor CMD, + * or it has been enabled by a vendor CMD. + * 2- Events that change the CPU affinity of a NAPI instance/IRQ: + * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} + * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode + * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() + * - In LO tput mode, NAPI will yield control if its interrupts to the system + * management functions. However in HI throughput mode, NAPI will actively + * manage its interrupts/instances (by trying to disperse them out to + * separate performance cores). + * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. + * + * + In some cases (roaming peer management is the only case so far), a + * a client can trigger a "SERIALIZE" event. Basically, this means that the + * users is asking NAPI to go into a truly single execution context state. + * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, + * (if called for the first time) and then moves all IRQs (for NAPI + * instances) to be collapsed to a single core. If called multiple times, + * it will just re-collapse the CPUs. This is because blacklist-on() API + * is reference-counted, and because the API has already been called. + * + * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go + * to its "normal" operation. Optionally, they can give a timeout value (in + * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this + * case, NAPI will just set the current throughput state to uninitialized + * and set the delay period. Once policy handler is called, it would skip + * applying the policy delay period times, and otherwise apply the policy. + * + * Return: + * < 0: some error + * = 0: event handled successfully + */ +int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, + void *data) +{ + int rc = 0; + uint32_t prev_state; + int i; + bool state_changed; + struct napi_struct *napi; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_data *napid = &(hif->napi_data); + enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; + enum { + BLACKLIST_NOT_PENDING, + BLACKLIST_ON_PENDING, + BLACKLIST_OFF_PENDING + } blacklist_pending = BLACKLIST_NOT_PENDING; + + NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); + + if (ce_srng_based(hif)) + return hif_exec_event(hif_ctx, event, data); + + if ((napid->state & HIF_NAPI_INITED) == 0) { + NAPI_DEBUG("%s: got event when NAPI not initialized", + __func__); + return -EINVAL; + } + qdf_spin_lock_bh(&(napid->lock)); + prev_state = napid->state; + switch (event) { + case NAPI_EVT_INI_FILE: + case NAPI_EVT_CMD_STATE: + case NAPI_EVT_INT_STATE: { + int on = (data != ((void *)0)); + + HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)", + __func__, event, + on, prev_state); + if (on) + if (prev_state & HIF_NAPI_CONF_UP) { + HIF_DBG("%s: duplicate NAPI conf ON msg", + __func__); + } else { + HIF_DBG("%s: setting state to ON", + __func__); + napid->state |= HIF_NAPI_CONF_UP; + } + else /* off request */ + if (prev_state & HIF_NAPI_CONF_UP) { + HIF_DBG("%s: setting state to OFF", + __func__); + napid->state &= ~HIF_NAPI_CONF_UP; + } else { + HIF_DBG("%s: duplicate NAPI conf OFF msg", + __func__); + } + break; + } + /* case NAPI_INIT_FILE/CMD_STATE */ + + case NAPI_EVT_CPU_STATE: { + int cpu = ((unsigned long int)data >> 16); + int val = ((unsigned long int)data & 0x0ff); + + NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", + __func__, cpu, val); + + /* state has already been set by hnc_cpu_notify_cb */ + if ((val == QCA_NAPI_CPU_DOWN) && + (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ + (napid->napi_cpu[cpu].napis != 0)) { + NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", + __func__, cpu); + rc = hif_napi_cpu_migrate(napid, + cpu, + HNC_ACT_RELOCATE); + napid->napi_cpu[cpu].napis = 0; + } + /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ + break; + } + + case NAPI_EVT_TPUT_STATE: { + tput_mode = (enum qca_napi_tput_state)data; + if (tput_mode == QCA_NAPI_TPUT_LO) { + /* from TPUT_HI -> TPUT_LO */ + NAPI_DEBUG("%s: Moving to napi_tput_LO state", + __func__); + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Ideally we should "collapse" interrupts here, since + * we are "dispersing" interrupts in the "else" case. + * This allows the possibility that our interrupts may + * still be on the perf cluster the next time we enter + * high tput mode. However, the irq_balancer is free + * to move our interrupts to power cluster once + * blacklisting has been turned off in the "else" case. + */ + } else { + /* from TPUT_LO -> TPUT->HI */ + NAPI_DEBUG("%s: Moving to napi_tput_HI state", + __func__); + rc = hif_napi_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_DISPERSE); + + blacklist_pending = BLACKLIST_ON_PENDING; + } + napid->napi_mode = tput_mode; + break; + } + + case NAPI_EVT_USR_SERIAL: { + unsigned long users = (unsigned long)data; + + NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", + __func__, users); + + rc = hif_napi_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_COLLAPSE); + if ((users == 0) && (rc == 0)) + blacklist_pending = BLACKLIST_ON_PENDING; + break; + } + case NAPI_EVT_USR_NORMAL: { + NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); + if (!napid->user_cpu_affin_mask) + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Deserialization timeout is handled at hdd layer; + * just mark current mode to uninitialized to ensure + * it will be set when the delay is over + */ + napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; + break; + } + default: { + HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", + __func__, event, (unsigned long) data); + break; + } /* default */ + }; /* switch */ + + + switch (blacklist_pending) { + case BLACKLIST_ON_PENDING: + /* assume the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_ON); + break; + case BLACKLIST_OFF_PENDING: + /* yield the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); + break; + default: /* nothing to do */ + break; + } /* switch blacklist_pending */ + + /* we want to perform the comparison in lock: + * there is a possiblity of hif_napi_event get called + * from two different contexts (driver unload and cpu hotplug + * notification) and napid->state get changed + * in driver unload context and can lead to race condition + * in cpu hotplug context. Therefore, perform the napid->state + * comparison before releasing lock. + */ + state_changed = (prev_state != napid->state); + qdf_spin_unlock_bh(&(napid->lock)); + + if (state_changed) { + if (napid->state == ENABLE_NAPI_MASK) { + rc = 1; + for (i = 0; i < CE_COUNT_MAX; i++) { + struct qca_napi_info *napii = napid->napis[i]; + if (napii) { + napi = &(napii->napi); + NAPI_DEBUG("%s: enabling NAPI %d", + __func__, i); + napi_enable(napi); + } + } + } else { + rc = 0; + for (i = 0; i < CE_COUNT_MAX; i++) { + struct qca_napi_info *napii = napid->napis[i]; + if (napii) { + napi = &(napii->napi); + NAPI_DEBUG("%s: disabling NAPI %d", + __func__, i); + napi_disable(napi); + /* in case it is affined, remove it */ + qdf_dev_set_irq_affinity(napii->irq, + NULL); + } + } + } + } else { + HIF_DBG("%s: no change in hif napi state (still %d)", + __func__, prev_state); + } + + NAPI_DEBUG("<--[rc=%d]", rc); + return rc; +} +qdf_export_symbol(hif_napi_event); + +/** + * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not + * @hif: hif context + * @ce : CE instance (or -1, to check if any CEs are enabled) + * + * Return: bool + */ +int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce) +{ + int rc; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + if (-1 == ce) + rc = ((hif->napi_data.state == ENABLE_NAPI_MASK)); + else + rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) && + (hif->napi_data.ce_map & (0x01 << ce))); + return rc; +} +qdf_export_symbol(hif_napi_enabled); + +/** + * hif_napi_created() - checks whether NAPI is created for given ce or not + * @hif: hif context + * @ce : CE instance + * + * Return: bool + */ +bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce) +{ + int rc; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + rc = (hif->napi_data.ce_map & (0x01 << ce)); + + return !!rc; +} +qdf_export_symbol(hif_napi_created); + +/** + * hif_napi_enable_irq() - enables bus interrupts after napi_complete + * + * @hif: hif context + * @id : id of NAPI instance calling this (used to determine the CE) + * + * Return: void + */ +inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif); + + hif_irq_enable(scn, NAPI_ID2PIPE(id)); +} + + +/** + * hif_napi_schedule() - schedules napi, updates stats + * @scn: hif context + * @ce_id: index of napi instance + * + * Return: false if napi didn't enable or already scheduled, otherwise true + */ +bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id) +{ + int cpu = smp_processor_id(); + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_info *napii; + + napii = scn->napi_data.napis[ce_id]; + if (qdf_unlikely(!napii)) { + HIF_ERROR("%s, scheduling unallocated napi (ce:%d)", + __func__, ce_id); + qdf_atomic_dec(&scn->active_tasklet_cnt); + return false; + } + + if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) { + NAPI_DEBUG("napi scheduled, return"); + qdf_atomic_dec(&scn->active_tasklet_cnt); + return false; + } + + hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE, + NULL, NULL, 0, 0); + napii->stats[cpu].napi_schedules++; + NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id); + napi_schedule(&(napii->napi)); + + return true; +} +qdf_export_symbol(hif_napi_schedule); + +/** + * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed + * @napi_info: pointer to qca_napi_info for the napi instance + * + * Return: true => interrupt already on correct cpu, no correction needed + * false => interrupt on wrong cpu, correction done for cpu affinity + * of the interrupt + */ +static inline +bool hif_napi_correct_cpu(struct qca_napi_info *napi_info) +{ + bool right_cpu = true; + int rc = 0; + int cpu; + struct qca_napi_data *napid; + QDF_STATUS ret; + + napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx)); + + if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) { + + cpu = qdf_get_cpu(); + if (unlikely((hif_napi_cpu_blacklist(napid, + BLACKLIST_QUERY) > 0) && + (cpu != napi_info->cpu))) { + right_cpu = false; + + NAPI_DEBUG("interrupt on wrong CPU, correcting"); + napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu); + + irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0); + ret = qdf_dev_set_irq_affinity(napi_info->irq, + (struct qdf_cpu_mask *) + &napi_info->cpumask); + rc = qdf_status_to_os_return(ret); + irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING); + + if (rc) + HIF_ERROR("error setting irq affinity hint: %d", + rc); + else + napi_info->stats[cpu].cpu_corrected++; + } + } + return right_cpu; +} + +#ifdef RECEIVE_OFFLOAD +/** + * hif_napi_offld_flush_cb() - Call upper layer flush callback + * @napi_info: Handle to hif_napi_info + * + * Return: None + */ +static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) +{ + if (napi_info->offld_flush_cb) + napi_info->offld_flush_cb(napi_info); +} +#else +static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) +{ +} +#endif + +/** + * hif_napi_poll() - NAPI poll routine + * @napi : pointer to NAPI struct as kernel holds it + * @budget: + * + * This is the body of the poll function. + * The poll function is called by kernel. So, there is a wrapper + * function in HDD, which in turn calls this function. + * Two main reasons why the whole thing is not implemented in HDD: + * a) references to things like ce_service that HDD is not aware of + * b) proximity to the implementation of ce_tasklet, which the body + * of this function should be very close to. + * + * NOTE TO THE MAINTAINER: + * Consider this function and ce_tasklet very tightly coupled pairs. + * Any changes to ce_tasklet or this function may likely need to be + * reflected in the counterpart. + * + * Returns: + * int: the amount of work done in this poll (<= budget) + */ +int hif_napi_poll(struct hif_opaque_softc *hif_ctx, + struct napi_struct *napi, + int budget) +{ + int rc = 0; /* default: no work done, also takes care of error */ + int normalized = 0; + int bucket; + int cpu = smp_processor_id(); + bool poll_on_right_cpu; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_info *napi_info; + struct CE_state *ce_state = NULL; + + if (unlikely(!hif)) { + HIF_ERROR("%s: hif context is NULL", __func__); + QDF_ASSERT(0); + goto out; + } + + napi_info = (struct qca_napi_info *) + container_of(napi, struct qca_napi_info, napi); + + NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)", + __func__, napi_info->id, napi_info->irq, budget); + + napi_info->stats[cpu].napi_polls++; + + hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), + NAPI_POLL_ENTER, NULL, NULL, cpu, 0); + + rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id)); + NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs", + __func__, rc); + + hif_napi_offld_flush_cb(napi_info); + + /* do not return 0, if there was some work done, + * even if it is below the scale + */ + if (rc) { + napi_info->stats[cpu].napi_workdone += rc; + normalized = (rc / napi_info->scale); + if (normalized == 0) + normalized++; + bucket = (normalized - 1) / + (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS); + if (bucket >= QCA_NAPI_NUM_BUCKETS) { + bucket = QCA_NAPI_NUM_BUCKETS - 1; + HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)" + " normalized %d, napi budget %d", + bucket, QCA_NAPI_NUM_BUCKETS, + normalized, QCA_NAPI_BUDGET); + } + napi_info->stats[cpu].napi_budget_uses[bucket]++; + } else { + /* if ce_per engine reports 0, then poll should be terminated */ + NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI", + __func__, __LINE__); + } + + ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)]; + + /* + * Not using the API hif_napi_correct_cpu directly in the if statement + * below since the API may not get evaluated if put at the end if any + * prior condition would evaluate to be true. The CPU correction + * check should kick in every poll. + */ +#ifdef NAPI_YIELD_BUDGET_BASED + if (ce_state && (ce_state->force_break || 0 == rc)) { +#else + poll_on_right_cpu = hif_napi_correct_cpu(napi_info); + if ((ce_state) && + (!ce_check_rx_pending(ce_state) || (0 == rc) || + !poll_on_right_cpu)) { +#endif + napi_info->stats[cpu].napi_completes++; +#ifdef NAPI_YIELD_BUDGET_BASED + ce_state->force_break = 0; +#endif + + hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE, + NULL, NULL, 0, 0); + if (normalized >= budget) + normalized = budget - 1; + + napi_complete(napi); + /* enable interrupts */ + hif_napi_enable_irq(hif_ctx, napi_info->id); + /* support suspend/resume */ + qdf_atomic_dec(&(hif->active_tasklet_cnt)); + + NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts", + __func__, __LINE__); + } else { + /* 4.4 kernel NAPI implementation requires drivers to + * return full work when they ask to be re-scheduled, + * or napi_complete and re-start with a fresh interrupt + */ + normalized = budget; + } + + hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), + NAPI_POLL_EXIT, NULL, NULL, normalized, 0); + + NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized); + return normalized; +out: + return rc; +} +qdf_export_symbol(hif_napi_poll); + +void hif_update_napi_max_poll_time(struct CE_state *ce_state, + int ce_id, + int cpu_id) +{ + struct hif_softc *hif; + struct qca_napi_info *napi_info; + unsigned long long napi_poll_time = sched_clock() - + ce_state->ce_service_start_time; + + hif = ce_state->scn; + napi_info = hif->napi_data.napis[ce_id]; + if (napi_poll_time > + napi_info->stats[cpu_id].napi_max_poll_time) + napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time; +} +qdf_export_symbol(hif_update_napi_max_poll_time); + +#ifdef HIF_IRQ_AFFINITY +/** + * + * hif_napi_update_yield_stats() - update NAPI yield related stats + * @cpu_id: CPU ID for which stats needs to be updates + * @ce_id: Copy Engine ID for which yield stats needs to be updates + * @time_limit_reached: indicates whether the time limit was reached + * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached + * + * Return: None + */ +void hif_napi_update_yield_stats(struct CE_state *ce_state, + bool time_limit_reached, + bool rxpkt_thresh_reached) +{ + struct hif_softc *hif; + struct qca_napi_data *napi_data = NULL; + int ce_id = 0; + int cpu_id = 0; + + if (unlikely(!ce_state)) { + QDF_ASSERT(ce_state); + return; + } + + hif = ce_state->scn; + + if (unlikely(!hif)) { + QDF_ASSERT(hif); + return; + } + napi_data = &(hif->napi_data); + if (unlikely(!napi_data)) { + QDF_ASSERT(napi_data); + return; + } + + ce_id = ce_state->id; + cpu_id = qdf_get_cpu(); + + if (unlikely(!napi_data->napis[ce_id])) { + return; + } + + if (time_limit_reached) + napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++; + else + napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++; + + hif_update_napi_max_poll_time(ce_state, ce_id, + cpu_id); +} + +/** + * + * hif_napi_stats() - display NAPI CPU statistics + * @napid: pointer to qca_napi_data + * + * Description: + * Prints the various CPU cores on which the NAPI instances /CEs interrupts + * are being executed. Can be called from outside NAPI layer. + * + * Return: None + */ +void hif_napi_stats(struct qca_napi_data *napid) +{ + int i; + struct qca_napi_cpu *cpu; + + if (!napid) { + qdf_debug("%s: napiid struct is null", __func__); + return; + } + + cpu = napid->napi_cpu; + qdf_debug("NAPI CPU TABLE"); + qdf_debug("lilclhead=%d, bigclhead=%d", + napid->lilcl_head, napid->bigcl_head); + for (i = 0; i < NR_CPUS; i++) { + qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d", + i, + cpu[i].state, cpu[i].core_id, cpu[i].cluster_id, + cpu[i].core_mask.bits[0], + cpu[i].thread_mask.bits[0], + cpu[i].max_freq, cpu[i].napis, + cpu[i].cluster_nxt); + } +} + +#ifdef FEATURE_NAPI_DEBUG +/* + * Local functions + * - no argument checks, all internal/trusted callers + */ +static void hnc_dump_cpus(struct qca_napi_data *napid) +{ + hif_napi_stats(napid); +} +#else +static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; +#endif /* FEATURE_NAPI_DEBUG */ +/** + * hnc_link_clusters() - partitions to cpu table into clusters + * @napid: pointer to NAPI data + * + * Takes in a CPU topology table and builds two linked lists + * (big cluster cores, list-head at bigcl_head, and little cluster + * cores, list-head at lilcl_head) out of it. + * + * If there are more than two clusters: + * - bigcl_head and lilcl_head will be different, + * - the cluster with highest cpufreq will be considered the "big" cluster. + * If there are more than one with the highest frequency, the *last* of such + * clusters will be designated as the "big cluster" + * - the cluster with lowest cpufreq will be considered the "li'l" cluster. + * If there are more than one clusters with the lowest cpu freq, the *first* + * of such clusters will be designated as the "little cluster" + * - We only support up to 32 clusters + * Return: 0 : OK + * !0: error (at least one of lil/big clusters could not be found) + */ +#define HNC_MIN_CLUSTER 0 +#define HNC_MAX_CLUSTER 1 +static int hnc_link_clusters(struct qca_napi_data *napid) +{ + int rc = 0; + + int i; + int it = 0; + uint32_t cl_done = 0x0; + int cl, curcl, curclhead = 0; + int more; + unsigned int lilfrq = INT_MAX; + unsigned int bigfrq = 0; + unsigned int clfrq = 0; + int prev = 0; + struct qca_napi_cpu *cpus = napid->napi_cpu; + + napid->lilcl_head = napid->bigcl_head = -1; + + do { + more = 0; + it++; curcl = -1; + for (i = 0; i < NR_CPUS; i++) { + cl = cpus[i].cluster_id; + NAPI_DEBUG("Processing cpu[%d], cluster=%d\n", + i, cl); + if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) { + NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl); + /* continue if ASSERTs are disabled */ + continue; + }; + if (cpumask_weight(&(cpus[i].core_mask)) == 0) { + NAPI_DEBUG("Core mask 0. SKIPPED\n"); + continue; + } + if (cl_done & (0x01 << cl)) { + NAPI_DEBUG("Cluster already processed. SKIPPED\n"); + continue; + } else { + if (more == 0) { + more = 1; + curcl = cl; + curclhead = i; /* row */ + clfrq = cpus[i].max_freq; + prev = -1; + }; + if ((curcl >= 0) && (curcl != cl)) { + NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n", + cl, curcl); + continue; + } + if (cpus[i].max_freq != clfrq) + NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n", + cpus[i].max_freq, clfrq); + if (clfrq >= bigfrq) { + bigfrq = clfrq; + napid->bigcl_head = curclhead; + NAPI_DEBUG("bigcl=%d\n", curclhead); + } + if (clfrq < lilfrq) { + lilfrq = clfrq; + napid->lilcl_head = curclhead; + NAPI_DEBUG("lilcl=%d\n", curclhead); + } + if (prev != -1) + cpus[prev].cluster_nxt = i; + + prev = i; + } + } + if (curcl >= 0) + cl_done |= (0x01 << curcl); + + } while (more); + + if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0))) + rc = -EFAULT; + + hnc_dump_cpus(napid); /* if NAPI_DEBUG */ + return rc; +} +#undef HNC_MIN_CLUSTER +#undef HNC_MAX_CLUSTER + +/* + * hotplug function group + */ + +/** + * hnc_cpu_online_cb() - handles CPU hotplug "up" events + * @context: the associated HIF context + * @cpu: the CPU Id of the CPU the event happened on + * + * Return: None + */ +static void hnc_cpu_online_cb(void *context, uint32_t cpu) +{ + struct hif_softc *hif = context; + struct qca_napi_data *napid = &hif->napi_data; + + if (cpu >= NR_CPUS) + return; + + NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu); + + napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; + NAPI_DEBUG("%s: CPU %u marked %d", + __func__, cpu, napid->napi_cpu[cpu].state); + + NAPI_DEBUG("<--%s", __func__); +} + +/** + * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events + * @context: the associated HIF context + * @cpu: the CPU Id of the CPU the event happened on + * + * On transtion to offline, we act on PREP events, because we may need to move + * the irqs/NAPIs to another CPU before it is actually off-lined. + * + * Return: None + */ +static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu) +{ + struct hif_softc *hif = context; + struct qca_napi_data *napid = &hif->napi_data; + + if (cpu >= NR_CPUS) + return; + + NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu); + + napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN; + + NAPI_DEBUG("%s: CPU %u marked %d; updating affinity", + __func__, cpu, napid->napi_cpu[cpu].state); + + /** + * we need to move any NAPIs on this CPU out. + * if we are in LO throughput mode, then this is valid + * if the CPU is the the low designated CPU. + */ + hif_napi_event(GET_HIF_OPAQUE_HDL(hif), + NAPI_EVT_CPU_STATE, + (void *) + ((size_t)cpu << 16 | napid->napi_cpu[cpu].state)); + + NAPI_DEBUG("<--%s", __func__); +} + +static int hnc_hotplug_register(struct hif_softc *hif_sc) +{ + QDF_STATUS status; + + NAPI_DEBUG("-->%s", __func__); + + status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler, + hif_sc, + hnc_cpu_online_cb, + hnc_cpu_before_offline_cb); + + NAPI_DEBUG("<--%s [%d]", __func__, status); + + return qdf_status_to_os_return(status); +} + +static void hnc_hotplug_unregister(struct hif_softc *hif_sc) +{ + NAPI_DEBUG("-->%s", __func__); + + if (hif_sc->napi_data.cpuhp_handler) + qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler); + + NAPI_DEBUG("<--%s", __func__); +} + +/** + * hnc_install_tput() - installs a callback in the throughput detector + * @register: !0 => register; =0: unregister + * + * installs a callback to be called when wifi driver throughput (tx+rx) + * crosses a threshold. Currently, we are using the same criteria as + * TCP ack suppression (500 packets/100ms by default). + * + * Return: 0 : success + * <0: failure + */ + +static int hnc_tput_hook(int install) +{ + int rc = 0; + + /* + * Nothing, until the bw_calculation accepts registration + * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk + * hdd_napi_throughput_policy(...) + */ + return rc; +} + +/* + * Implementation of hif_napi_cpu API + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) +{ + cpumask_copy(&(cpus[i].thread_mask), + topology_sibling_cpumask(i)); +} +#else +static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) +{ +} +#endif + + +/** + * hif_napi_cpu_init() - initialization of irq affinity block + * @ctx: pointer to qca_napi_data + * + * called by hif_napi_create, after the first instance is called + * - builds napi_rss_cpus table from cpu topology + * - links cores of the same clusters together + * - installs hot-plug notifier + * - installs throughput trigger notifier (when such mechanism exists) + * + * Return: 0: OK + * <0: error code + */ +int hif_napi_cpu_init(struct hif_opaque_softc *hif) +{ + int rc = 0; + int i; + struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; + struct qca_napi_cpu *cpus = napid->napi_cpu; + + NAPI_DEBUG("--> "); + + if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) { + NAPI_DEBUG("NAPI RSS table already initialized.\n"); + rc = -EALREADY; + goto lab_rss_init; + } + + /* build CPU topology table */ + for_each_possible_cpu(i) { + cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask) + ? QCA_NAPI_CPU_UP + : QCA_NAPI_CPU_DOWN)); + cpus[i].core_id = topology_core_id(i); + cpus[i].cluster_id = topology_physical_package_id(i); + cpumask_copy(&(cpus[i].core_mask), + topology_core_cpumask(i)); + record_sibling_cpumask(cpus, i); + cpus[i].max_freq = cpufreq_quick_get_max(i); + cpus[i].napis = 0x0; + cpus[i].cluster_nxt = -1; /* invalid */ + } + + /* link clusters together */ + rc = hnc_link_clusters(napid); + if (0 != rc) + goto lab_err_topology; + + /* install hotplug notifier */ + rc = hnc_hotplug_register(HIF_GET_SOFTC(hif)); + if (0 != rc) + goto lab_err_hotplug; + + /* install throughput notifier */ + rc = hnc_tput_hook(1); + if (0 == rc) + goto lab_rss_init; + +lab_err_hotplug: + hnc_tput_hook(0); + hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); +lab_err_topology: + memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); +lab_rss_init: + NAPI_DEBUG("<-- [rc=%d]", rc); + return rc; +} + +/** + * hif_napi_cpu_deinit() - clean-up of irq affinity block + * + * called by hif_napi_destroy, when the last instance is removed + * - uninstalls throughput and hotplug notifiers + * - clears cpu topology table + * Return: 0: OK + */ +int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) +{ + int rc = 0; + struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; + + NAPI_DEBUG("-->%s(...)", __func__); + + /* uninstall tput notifier */ + rc = hnc_tput_hook(0); + + /* uninstall hotplug notifier */ + hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); + + /* clear the topology table */ + memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); + + NAPI_DEBUG("<--%s[rc=%d]", __func__, rc); + + return rc; +} + +/** + * hncm_migrate_to() - migrates a NAPI to a CPU + * @napid: pointer to NAPI block + * @ce_id: CE_id of the NAPI instance + * @didx : index in the CPU topology table for the CPU to migrate to + * + * Migrates NAPI (identified by the CE_id) to the destination core + * Updates the napi_map of the destination entry + * + * Return: + * =0 : success + * <0 : error + */ +static int hncm_migrate_to(struct qca_napi_data *napid, + int napi_ce, + int didx) +{ + int rc = 0; + QDF_STATUS status; + + NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); + + if (!napid->napis[napi_ce]) + return -EINVAL; + + napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx); + + irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0); + status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq, + (struct qdf_cpu_mask *) + &napid->napis[napi_ce]->cpumask); + rc = qdf_status_to_os_return(status); + + /* unmark the napis bitmap in the cpu table */ + napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce); + /* mark the napis bitmap for the new designated cpu */ + napid->napi_cpu[didx].napis |= (0x01 << napi_ce); + napid->napis[napi_ce]->cpu = didx; + + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return rc; +} +/** + * hncm_dest_cpu() - finds a destination CPU for NAPI + * @napid: pointer to NAPI block + * @act : RELOCATE | COLLAPSE | DISPERSE + * + * Finds the designated destionation for the next IRQ. + * RELOCATE: translated to either COLLAPSE or DISPERSE based + * on napid->napi_mode (throughput state) + * COLLAPSE: All have the same destination: the first online CPU in lilcl + * DISPERSE: One of the CPU in bigcl, which has the smallest number of + * NAPIs on it + * + * Return: >=0 : index in the cpu topology table + * : < 0 : error + */ +static int hncm_dest_cpu(struct qca_napi_data *napid, int act) +{ + int destidx = -1; + int head, i; + + NAPI_DEBUG("-->%s(act=%d)", __func__, act); + if (act == HNC_ACT_RELOCATE) { + if (napid->napi_mode == QCA_NAPI_TPUT_LO) + act = HNC_ACT_COLLAPSE; + else + act = HNC_ACT_DISPERSE; + NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", + __func__, act); + } + if (act == HNC_ACT_COLLAPSE) { + head = i = napid->lilcl_head; +retry_collapse: + while (i >= 0) { + if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { + destidx = i; + break; + } + i = napid->napi_cpu[i].cluster_nxt; + } + if ((destidx < 0) && (head == napid->lilcl_head)) { + NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", + __func__); + head = i = napid->bigcl_head; + goto retry_collapse; + } + } else { /* HNC_ACT_DISPERSE */ + int smallest = 99; /* all 32 bits full */ + int smallidx = -1; + + head = i = napid->bigcl_head; +retry_disperse: + while (i >= 0) { + if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && + (hweight32(napid->napi_cpu[i].napis) <= smallest)) { + smallest = napid->napi_cpu[i].napis; + smallidx = i; + } + i = napid->napi_cpu[i].cluster_nxt; + } + /* Check if matches with user sepecified CPU mask */ + smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ? + smallidx : -1; + + if ((smallidx < 0) && (head == napid->bigcl_head)) { + NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", + __func__); + head = i = napid->lilcl_head; + goto retry_disperse; + } + destidx = smallidx; + } + NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); + return destidx; +} +/** + * hif_napi_cpu_migrate() - migrate IRQs away + * @cpu: -1: all CPUs specific CPU + * @act: COLLAPSE | DISPERSE + * + * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible + * cores. Eligible cores are: + * act=COLLAPSE -> the first online core of the little cluster + * act=DISPERSE -> separate cores of the big cluster, so that each core will + * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) + * + * Note that this function is called with a spinlock acquired already. + * + * Return: =0: success + * <0: error + */ + +int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) +{ + int rc = 0; + struct qca_napi_cpu *cpup; + int i, dind; + uint32_t napis; + + NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", + __func__, cpu, action); + /* the following is really: hif_napi_enabled() with less overhead */ + if (napid->ce_map == 0) { + NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__); + goto hncm_return; + } + + cpup = napid->napi_cpu; + + switch (action) { + case HNC_ACT_RELOCATE: + case HNC_ACT_DISPERSE: + case HNC_ACT_COLLAPSE: { + /* first find the src napi set */ + if (cpu == HNC_ANY_CPU) + napis = napid->ce_map; + else + napis = cpup[cpu].napis; + /* then clear the napi bitmap on each CPU */ + for (i = 0; i < NR_CPUS; i++) + cpup[i].napis = 0; + /* then for each of the NAPIs to disperse: */ + for (i = 0; i < CE_COUNT_MAX; i++) + if (napis & (1 << i)) { + /* find a destination CPU */ + dind = hncm_dest_cpu(napid, action); + if (dind >= 0) { + NAPI_DEBUG("Migrating NAPI ce%d to %d", + i, dind); + rc = hncm_migrate_to(napid, i, dind); + } else { + NAPI_DEBUG("No dest for NAPI ce%d", i); + hnc_dump_cpus(napid); + rc = -1; + } + } + break; + } + default: { + NAPI_DEBUG("%s: bad action: %d\n", __func__, action); + QDF_BUG(0); + break; + } + } /* switch action */ + +hncm_return: + hnc_dump_cpus(napid); + return rc; +} + + +/** + * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting + * @napid: pointer to qca_napi_data structure + * @bl_flag: blacklist flag to enable/disable blacklisting + * + * The function enables/disables blacklisting for all the copy engine + * interrupts on which NAPI is enabled. + * + * Return: None + */ +static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag) +{ + int i; + struct qca_napi_info *napii; + + for (i = 0; i < CE_COUNT_MAX; i++) { + /* check if NAPI is enabled on the CE */ + if (!(napid->ce_map & (0x01 << i))) + continue; + + /*double check that NAPI is allocated for the CE */ + napii = napid->napis[i]; + if (!(napii)) + continue; + + if (bl_flag == true) + irq_modify_status(napii->irq, + 0, IRQ_NO_BALANCING); + else + irq_modify_status(napii->irq, + IRQ_NO_BALANCING, 0); + HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); + } +} + +/** + * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. + * @napid: pointer to qca_napi_data structure + * @op: blacklist operation to perform + * + * The function enables/disables/queries blacklisting for all CE RX + * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables + * core_ctl_set_boost. + * Once blacklisting is enabled, the interrupts will not be managed by the IRQ + * balancer. + * + * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled + * for BLACKLIST_QUERY op - blacklist refcount + * for BLACKLIST_ON op - return value from core_ctl_set_boost API + * for BLACKLIST_OFF op - return value from core_ctl_set_boost API + */ +int hif_napi_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op) +{ + int rc = 0; + static int ref_count; /* = 0 by the compiler */ + uint8_t flags = napid->flags; + bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; + bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; + + NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); + + if (!(bl_en && ccb_en)) { + rc = -EINVAL; + goto out; + } + + switch (op) { + case BLACKLIST_QUERY: + rc = ref_count; + break; + case BLACKLIST_ON: + ref_count++; + rc = 0; + if (ref_count == 1) { + rc = hif_napi_core_ctl_set_boost(true); + NAPI_DEBUG("boost_on() returns %d - refcnt=%d", + rc, ref_count); + hif_napi_bl_irq(napid, true); + } + break; + case BLACKLIST_OFF: + if (ref_count) { + ref_count--; + rc = 0; + if (ref_count == 0) { + rc = hif_napi_core_ctl_set_boost(false); + NAPI_DEBUG("boost_off() returns %d - refcnt=%d", + rc, ref_count); + hif_napi_bl_irq(napid, false); + } + } + break; + default: + NAPI_DEBUG("Invalid blacklist op: %d", op); + rc = -EINVAL; + } /* switch */ +out: + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return rc; +} + +/** + * hif_napi_serialize() - [de-]serialize NAPI operations + * @hif: context + * @is_on: 1: serialize, 0: deserialize + * + * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the + * following steps (see hif_napi_event for code): + * - put irqs of all NAPI instances on the same CPU + * - only for the first serialize call: blacklist + * + * hif_napi_serialize(hif, 0): + * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec) + * - at the end of the timer, check the current throughput state and + * implement it. + */ +static unsigned long napi_serialize_reqs; +int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) +{ + int rc = -EINVAL; + + if (hif) + switch (is_on) { + case 0: { /* de-serialize */ + rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL, + (void *) 0); + napi_serialize_reqs = 0; + break; + } /* end de-serialize */ + case 1: { /* serialize */ + rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL, + (void *)napi_serialize_reqs++); + break; + } /* end serialize */ + default: + break; /* no-op */ + } /* switch */ + return rc; +} + +#endif /* ifdef HIF_IRQ_AFFINITY */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.h new file mode 100644 index 0000000000000000000000000000000000000000..23084c001456d2fb94782051baaaddca34a07e33 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_NAPI_H__ +#define __HIF_NAPI_H__ + +/** + * DOC: hif_napi.h + * + * Interface to HIF implemented functions of NAPI. + * These are used by hdd_napi. + */ + + +/* CLD headers */ +#include /* struct hif_opaque_softc; */ + +/** + * common stuff + * The declarations until #ifdef FEATURE_NAPI below + * are valid whether or not FEATURE_NAPI has been + * defined. + */ + +/** + * NAPI manages the following states: + * NAPI state: per NAPI instance, ENABLED/DISABLED + * CPU state: per CPU, DOWN/UP + * TPUT state: global, LOW/HI + * + * "Dynamic" changes to state of various NAPI structures are + * managed by NAPI events. The events may be produced by + * various detection points. With each event, some data is + * sent. The main event handler in hif_napi handles and makes + * the state changes. + * + * event : data : generated + * ---------------:------------------:------------------ + * EVT_INI_FILE : cfg->napi_enable : after ini file processed + * EVT_CMD_STATE : cmd arg : by the vendor cmd + * EVT_INT_STATE : 0 : internal - shut off/disable + * EVT_CPU_STATE : (cpu << 16)|state: CPU hotplug events + * EVT_TPUT_STATE : (high/low) : tput trigger + * EVT_USR_SERIAL : num-serial_calls : WMA/ROAMING-START/IND + * EVT_USR_NORMAL : N/A : WMA/ROAMING-END + */ +enum qca_napi_event { + NAPI_EVT_INVALID, + NAPI_EVT_INI_FILE, + NAPI_EVT_CMD_STATE, + NAPI_EVT_INT_STATE, + NAPI_EVT_CPU_STATE, + NAPI_EVT_TPUT_STATE, + NAPI_EVT_USR_SERIAL, + NAPI_EVT_USR_NORMAL +}; +/** + * Following are some of NAPI related features controlled using feature flag + * These flags need to be enabled in the qca_napi_data->flags variable for the + * feature to kick in. +.* QCA_NAPI_FEATURE_CPU_CORRECTION - controls CPU correction logic +.* QCA_NAPI_FEATURE_IRQ_BLACKLISTING - controls call to irq_blacklist_on API +.* QCA_NAPI_FEATURE_CORE_CTL_BOOST - controls call to core_ctl_set_boost API + */ +#define QCA_NAPI_FEATURE_CPU_CORRECTION BIT(1) +#define QCA_NAPI_FEATURE_IRQ_BLACKLISTING BIT(2) +#define QCA_NAPI_FEATURE_CORE_CTL_BOOST BIT(3) + +/** + * Macros to map ids -returned by ...create()- to pipes and vice versa + */ +#define NAPI_ID2PIPE(i) ((i)-1) +#define NAPI_PIPE2ID(p) ((p)+1) + +#ifdef RECEIVE_OFFLOAD +/** + * hif_napi_rx_offld_flush_cb_register() - Register flush callback for Rx offld + * @hif_hdl: pointer to hif context + * @offld_flush_handler: register offld flush callback + * + * Return: None + */ +void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, + void (rx_ol_flush_handler)(void *arg)); + +/** + * hif_napi_rx_offld_flush_cb_deregister() - Degregister offld flush_cb + * @hif_hdl: pointer to hif context + * + * Return: NONE + */ +void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl); +#endif /* RECEIVE_OFFLOAD */ + +/** + * hif_napi_get_lro_info() - returns the address LRO data for napi_id + * @hif: pointer to hif context + * @napi_id: napi instance + * + * Description: + * Returns the address of the LRO structure + * + * Return: + * : address of the LRO structure + */ +void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id); + +enum qca_blacklist_op { + BLACKLIST_QUERY, + BLACKLIST_OFF, + BLACKLIST_ON +}; + +#ifdef FEATURE_NAPI + +/** + * NAPI HIF API + * + * the declarations below only apply to the case + * where FEATURE_NAPI is defined + */ + +int hif_napi_create(struct hif_opaque_softc *hif, + int (*poll)(struct napi_struct *, int), + int budget, + int scale, + uint8_t flags); +int hif_napi_destroy(struct hif_opaque_softc *hif, + uint8_t id, + int force); + +struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif); + +/** + * hif_get_napi() - get NAPI corresponding to napi_id + * @napi_id: NAPI instance + * @napid: Handle NAPI + * + * Return: napi corresponding napi_id + */ +struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid); + +int hif_napi_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data); + +/* called from the ISR within hif, so, ce is known */ +int hif_napi_enabled(struct hif_opaque_softc *hif, int ce); + +bool hif_napi_created(struct hif_opaque_softc *hif, int ce); + +/* called from hdd (napi_poll), using napi id as a selector */ +void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id); + +/* called by ce_tasklet.c::ce_dispatch_interrupt*/ +bool hif_napi_schedule(struct hif_opaque_softc *scn, int ce_id); + +/* called by hdd_napi, which is called by kernel */ +int hif_napi_poll(struct hif_opaque_softc *hif_ctx, + struct napi_struct *napi, int budget); + +#ifdef FEATURE_NAPI_DEBUG +#define NAPI_DEBUG(fmt, ...) \ + qdf_debug("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__) +#else +#define NAPI_DEBUG(fmt, ...) /* NO-OP */ +#endif /* FEATURE NAPI_DEBUG */ + +#define HNC_ANY_CPU (-1) +#define HNC_ACT_RELOCATE (0) +#define HNC_ACT_COLLAPSE (1) +#define HNC_ACT_DISPERSE (-1) + +/** + * hif_update_napi_max_poll_time() - updates NAPI max poll time + * @ce_state: ce state + * @ce_id: Copy engine ID + * @cpu_id: cpu id + * + * This API updates NAPI max poll time per CE per SPU. + * + * Return: void + */ +void hif_update_napi_max_poll_time(struct CE_state *ce_state, + int ce_id, + int cpu_id); +/** + * Local interface to HIF implemented functions of NAPI CPU affinity management. + * Note: + * 1- The symbols in this file are NOT supposed to be used by any + * entity other than hif_napi.c + * 2- The symbols are valid only if HELIUMPLUS is defined. They are otherwise + * mere wrappers. + * + */ + +#else /* ! defined(FEATURE_NAPI) */ + +/** + * Stub API + * + * The declarations in this section are valid only + * when FEATURE_NAPI has *not* been defined. + */ + +#define NAPI_DEBUG(fmt, ...) /* NO-OP */ + +static inline int hif_napi_create(struct hif_opaque_softc *hif, + uint8_t pipe_id, + int (*poll)(struct napi_struct *, int), + int budget, + int scale, + uint8_t flags) +{ return -EPERM; } + +static inline int hif_napi_destroy(struct hif_opaque_softc *hif, + uint8_t id, + int force) +{ return -EPERM; } + +static inline struct qca_napi_data *hif_napi_get_all( + struct hif_opaque_softc *hif) +{ return NULL; } + +static inline struct qca_napi_info *hif_get_napi(int napi_id, + struct qca_napi_data *napid) +{ return NULL; } + +static inline int hif_napi_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data) +{ return -EPERM; } + +/* called from the ISR within hif, so, ce is known */ +static inline int hif_napi_enabled(struct hif_opaque_softc *hif, int ce) +{ return 0; } + +static inline bool hif_napi_created(struct hif_opaque_softc *hif, int ce) +{ return false; } + +/* called from hdd (napi_poll), using napi id as a selector */ +static inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) +{ return; } + +static inline bool hif_napi_schedule(struct hif_opaque_softc *hif, int ce_id) +{ return false; } + +static inline int hif_napi_poll(struct napi_struct *napi, int budget) +{ return -EPERM; } + +/** + * hif_update_napi_max_poll_time() - updates NAPI max poll time + * @ce_state: ce state + * @ce_id: Copy engine ID + * @cpu_id: cpu id + * + * This API updates NAPI max poll time per CE per SPU. + * + * Return: void + */ +static inline void hif_update_napi_max_poll_time(struct CE_state *ce_state, + int ce_id, + int cpu_id) +{ return; } +#endif /* FEATURE_NAPI */ + +#if defined(HIF_IRQ_AFFINITY) && defined(FEATURE_NAPI) +/* + * prototype signatures + */ +int hif_napi_cpu_init(struct hif_opaque_softc *hif); +int hif_napi_cpu_deinit(struct hif_opaque_softc *hif); + +int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action); +int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on); + +int hif_napi_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op); + +/* not directly related to irq affinity, but oh well */ +void hif_napi_stats(struct qca_napi_data *napid); +void hif_napi_update_yield_stats(struct CE_state *ce_state, + bool time_limit_reached, + bool rxpkt_thresh_reached); +#else +struct qca_napi_data; +static inline int hif_napi_cpu_init(struct hif_opaque_softc *hif) +{ return 0; } + +static inline int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) +{ return 0; } + +static inline int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, + int action) +{ return 0; } + +static inline int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) +{ return -EPERM; } + +static inline void hif_napi_stats(struct qca_napi_data *napid) { } +static inline void hif_napi_update_yield_stats(struct CE_state *ce_state, + bool time_limit_reached, + bool rxpkt_thresh_reached) { } + +static inline int hif_napi_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op) +{ return 0; } +#endif /* HIF_IRQ_AFFINITY */ + +#endif /* __HIF_NAPI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend.c new file mode 100644 index 0000000000000000000000000000000000000000..112b25fb815c6e1b015ec160c7538b777abe310a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_status.h" +#include "hif_main.h" +#include "hif_unit_test_suspend.h" +#include "hif_unit_test_suspend_i.h" + +enum hif_ut_suspend_state_bits { + UT_SUSPENDED_BIT = 0 +}; + +/** + * hif_ut_fw_resume_work() - Work handler for firmware-triggered resume + * @work: The work struct being passed from the linux kernel + * + * Return: None + */ +static void hif_ut_fw_resume_work(struct work_struct *work) +{ + struct hif_ut_suspend_context *ctx = + container_of(work, struct hif_ut_suspend_context, resume_work); + + QDF_BUG(ctx); + if (!ctx) + return; + + QDF_BUG(ctx->resume_callback); + if (!ctx->resume_callback) + return; + + ctx->resume_callback(); + ctx->resume_callback = NULL; +} + +void hif_ut_suspend_init(struct hif_softc *scn) +{ + INIT_WORK(&scn->ut_suspend_ctx.resume_work, hif_ut_fw_resume_work); +} + +bool hif_is_ut_suspended(struct hif_softc *scn) +{ + QDF_BUG(scn); + if (!scn) + return false; + + return test_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state); +} + +QDF_STATUS hif_ut_apps_suspend(struct hif_opaque_softc *opaque_scn, + hif_ut_resume_callback callback) +{ + struct hif_softc *scn = HIF_GET_SOFTC(opaque_scn); + + QDF_BUG(scn); + if (!scn) + return QDF_STATUS_E_INVAL; + + QDF_BUG(callback); + if (!callback) + return QDF_STATUS_E_INVAL; + + if (test_and_set_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state)) + return QDF_STATUS_E_INVAL; + + scn->ut_suspend_ctx.resume_callback = callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hif_ut_apps_resume(struct hif_opaque_softc *opaque_scn) +{ + struct hif_softc *scn = HIF_GET_SOFTC(opaque_scn); + + QDF_BUG(scn); + if (!scn) + return QDF_STATUS_E_INVAL; + + if (!test_and_clear_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state)) + return QDF_STATUS_E_INVAL; + + scn->ut_suspend_ctx.resume_callback = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hif_ut_fw_resume(struct hif_softc *scn) +{ + QDF_BUG(scn); + if (!scn) + return QDF_STATUS_E_INVAL; + + if (!test_and_clear_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state)) + return QDF_STATUS_E_INVAL; + + schedule_work(&scn->ut_suspend_ctx.resume_work); + + return QDF_STATUS_SUCCESS; +} + +bool hif_irq_trigger_ut_resume(struct hif_softc *scn) +{ + if (!hif_is_ut_suspended(scn)) + return false; + + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend_i.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend_i.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4a4a97ca334978352e0efebd356765123bc181 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend_i.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: HIF internal unit-test related APIs for triggering WoW suspend/resume + * while the application processor is still up. + */ + +#ifndef _HIF_UNIT_TEST_SUSPEND_I_H_ +#define _HIF_UNIT_TEST_SUSPEND_I_H_ + +#include "qdf_status.h" +#include "hif_main.h" +#include "hif_unit_test_suspend.h" + +#ifdef WLAN_SUSPEND_RESUME_TEST + +struct hif_ut_suspend_context { + unsigned long state; + hif_ut_resume_callback resume_callback; + struct work_struct resume_work; +}; + +/** + * hif_ut_suspend_init() - Initialize the unit-test suspend context + * @scn: the hif context to initialize + * + * Return: None + */ +void hif_ut_suspend_init(struct hif_softc *scn); + +/** + * hif_is_ut_suspended() - Tests if the given hif context is unit-test suspended + * @scn: The HIF context to check + * + * Return: true, if unit-test suspended, otherwise false + */ +bool hif_is_ut_suspended(struct hif_softc *scn); + +/** + * hif_ut_fw_resume() - Initiate a firmware triggered unit-test resume + * @scn: The HIF context to operate on + * + * This schedules the callback previously registered via a call to + * hif_ut_apps_suspend for execution. + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ut_fw_resume(struct hif_softc *scn); + +/** + * hif_irq_trigger_ut_resume() - Test for given hif ctx unit-test resume needed + * @scn: The HIF context to check + * + * Return: true, if unit-test resume procedure is needed, otherwise false + */ +bool hif_irq_trigger_ut_resume(struct hif_softc *scn); + +#else /* WLAN_SUSPEND_RESUME_TEST */ + +struct hif_ut_suspend_context {}; + +static inline void hif_ut_suspend_init(struct hif_softc *scn) {} + +static inline bool hif_is_ut_suspended(struct hif_softc *scn) +{ + return false; +} + +static inline QDF_STATUS hif_ut_fw_resume(struct hif_softc *scn) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +#endif /* _HIF_UNIT_TEST_SUSPEND_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/hif_io32_ipci.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/hif_io32_ipci.h new file mode 100644 index 0000000000000000000000000000000000000000..d5fcb89c7a7de68690babdcc29e2419c08f80dd9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/hif_io32_ipci.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IO32_IPCI_H__ +#define __HIF_IO32_IPCI_H__ + +#ifdef HIF_IPCI + +#include "hif_main.h" +#include "regtable.h" +#include "ce_reg.h" +#include "qdf_atomic.h" +#include "if_ipci.h" +/* + * For maximum performance and no power management, set this to 1. + * For power management at the cost of performance, set this to 0. + */ +#ifndef CONFIG_ATH_IPCIE_MAX_PERF +#define CONFIG_ATH_IPCIE_MAX_PERF 0 +#endif + +/* + * PCIE_ACCESS_LOG_NUM specifies the number of + * read/write records to store + */ +#ifdef CONFIG_ATH_IPCIE_ACCESS_DEBUG +#define IPCIE_ACCESS_LOG_NUM 500 +#endif + +/* 64-bit MSI support */ +#define CONFIG_IPCIE_64BIT_MSI 0 + +/* AXI gating when L1, L2 to reduce power consumption */ +#define CONFIG_IPCIE_ENABLE_AXI_CLK_GATE 0 + +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg); +#endif /* HIF_IPCI */ +#endif /* __HIF_IO32_IPCI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.c new file mode 100644 index 0000000000000000000000000000000000000000..4f52c7fab3b36fe288eefb738495c592fe708e24 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.c @@ -0,0 +1,792 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#ifdef CONFIG_PCI_MSM +#include +#endif +#include "hif_io32.h" +#include "if_ipci.h" +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "ce_bmi.h" +#include "regtable.h" +#include "hif_hw_version.h" +#include +#include +#include "qdf_status.h" +#include "qdf_atomic.h" +#include "pld_common.h" +#include "mp_dev.h" +#include "hif_debug.h" + +#include "ce_tasklet.h" +#include "targaddrs.h" +#include "hif_exec.h" + +#include "ipci_api.h" + +void hif_ipci_enable_power_management(struct hif_softc *hif_sc, + bool is_packet_log_enabled) +{ +} + +void hif_ipci_disable_power_management(struct hif_softc *hif_ctx) +{ +} + +void hif_ipci_display_stats(struct hif_softc *hif_ctx) +{ + hif_display_ce_stats(hif_ctx); +} + +void hif_ipci_clear_stats(struct hif_softc *hif_ctx) +{ + struct hif_ipci_softc *ipci_ctx = HIF_GET_IPCI_SOFTC(hif_ctx); + + if (!ipci_ctx) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_clear_ce_stats(&ipci_ctx->ce_sc); +} + +QDF_STATUS hif_ipci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(hif_ctx); + + hif_ctx->bus_type = bus_type; + + qdf_spinlock_create(&sc->irq_lock); + + return hif_ce_open(hif_ctx); +} + +int hif_ipci_bus_configure(struct hif_softc *hif_sc) +{ + int status = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + hif_ce_prepare_config(hif_sc); + + /* initialize sleep state adjust variables */ + hif_state->sleep_timer_init = true; + hif_state->keep_awake_count = 0; + hif_state->fake_sleep = false; + hif_state->sleep_ticks = 0; + + status = hif_wlan_enable(hif_sc); + if (status) { + HIF_ERROR("%s: hif_wlan_enable error = %d", + __func__, status); + goto timer_free; + } + + A_TARGET_ACCESS_LIKELY(hif_sc); + + status = hif_config_ce(hif_sc); + if (status) + goto disable_wlan; + + status = hif_configure_irq(hif_sc); + if (status < 0) + goto unconfig_ce; + + A_TARGET_ACCESS_UNLIKELY(hif_sc); + + return status; + +unconfig_ce: + hif_unconfig_ce(hif_sc); +disable_wlan: + A_TARGET_ACCESS_UNLIKELY(hif_sc); + hif_wlan_disable(hif_sc); + +timer_free: + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_free(&hif_state->sleep_timer); + hif_state->sleep_timer_init = false; + + HIF_ERROR("%s: failed, status = %d", __func__, status); + return status; +} + +void hif_ipci_close(struct hif_softc *hif_sc) +{ + hif_ce_close(hif_sc); +} + +/** + * hif_ce_srng_msi_free_irq(): free CE msi IRQ + * @scn: struct hif_softc + * + * Return: ErrorNo + */ +static int hif_ce_srng_msi_free_irq(struct hif_softc *scn) +{ + int ret; + int ce_id, irq; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + return ret; + + /* needs to match the ce_id -> irq data mapping + * used in the srng parameter configuration + */ + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + unsigned int msi_data; + + if (!ce_sc->tasklets[ce_id].inited) + continue; + + msi_data = (ce_id % msi_data_count) + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + + hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__, + ce_id, msi_data, irq); + + free_irq(irq, &ce_sc->tasklets[ce_id]); + } + + return ret; +} + +/** + * hif_ipci_deconfigure_grp_irq(): deconfigure HW block IRQ + * @scn: struct hif_softc + * + * Return: none + */ +void hif_ipci_deconfigure_grp_irq(struct hif_softc *scn) +{ + int i, j, irq; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + if (hif_ext_group->irq_requested) { + hif_ext_group->irq_requested = false; + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->os_irq[j]; + free_irq(irq, hif_ext_group); + } + hif_ext_group->numirq = 0; + } + } +} + +void hif_ipci_nointrs(struct hif_softc *scn) +{ + int ret; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); + + if (scn->request_irq_done == false) + return; + + hif_ipci_deconfigure_grp_irq(scn); + + ret = hif_ce_srng_msi_free_irq(scn); + if (ret != -EINVAL) { + /* ce irqs freed in hif_ce_srng_msi_free_irq */ + + if (scn->wake_irq) + free_irq(scn->wake_irq, scn); + scn->wake_irq = 0; + } + + scn->request_irq_done = false; +} + +void hif_ipci_disable_bus(struct hif_softc *scn) +{ + struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn); + void __iomem *mem; + + /* Attach did not succeed, all resources have been + * freed in error handler + */ + if (!sc) + return; + + mem = (void __iomem *)sc->mem; + if (mem) { + hif_dump_pipe_debug_count(scn); + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + scn->mem = NULL; + } + HIF_INFO("%s: X", __func__); +} + +#if defined(CONFIG_PCI_MSM) +void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + int errno; + + HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable"); + + errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag); + if (errno) + HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d", + __func__, errno); +} +#else +void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable")); +} +#endif + +int hif_ipci_bus_suspend(struct hif_softc *scn) +{ + QDF_STATUS ret; + + hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn)); + + ret = hif_try_complete_tasks(scn); + if (QDF_IS_STATUS_ERROR(ret)) { + hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); + return -EBUSY; + } + + return 0; +} + +int hif_ipci_bus_resume(struct hif_softc *scn) +{ + hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); + + return 0; +} + +int hif_ipci_bus_suspend_noirq(struct hif_softc *scn) +{ + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + qdf_atomic_set(&scn->link_suspended, 1); + + hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn)); + + return 0; +} + +int hif_ipci_bus_resume_noirq(struct hif_softc *scn) +{ + hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn)); + + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + qdf_atomic_set(&scn->link_suspended, 0); + + return 0; +} + +void hif_ipci_disable_isr(struct hif_softc *scn) +{ + struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn); + + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + /* Cancel the pending tasklet */ + ce_tasklet_kill(scn); + tasklet_kill(&sc->intr_tq); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +int hif_ipci_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + + if (status) + HIF_ERROR("%s: Dump CE Registers Failed", __func__); + + return 0; +} + +/** + * hif_ce_interrupt_handler() - interrupt handler for copy engine + * @irq: irq number + * @context: tasklet context + * + * Return: irqreturn_t + */ +static irqreturn_t hif_ce_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + + return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); +} + +extern const char *ce_name[]; + +/** + * hif_ce_msi_map_ce_to_irq() - map CE to IRQ + * @scn: hif context + * @ce_id: CE Id + * + * Return: IRQ number + */ +static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn); + + return ipci_scn->ce_msi_irq_num[ce_id]; +} + +/* hif_ce_srng_msi_irq_disable() - disable the irq for msi + * @hif_sc: hif context + * @ce_id: which ce to disable copy complete interrupts for + * + * @Return: none + */ +static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) +{ + disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +/* hif_ce_srng_msi_irq_enable() - enable the irq for msi + * @hif_sc: hif context + * @ce_id: which ce to enable copy complete interrupts for + * + * @Return: none + */ +static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) +{ + enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +/* hif_ce_msi_configure_irq() - configure the irq + * @scn: hif context + * + * @Return: none + */ +static int hif_ce_msi_configure_irq(struct hif_softc *scn) +{ + int ret; + int ce_id, irq; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn); + + /* do wake irq assignment */ + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + return ret; + + scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start); + ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, + IRQF_NO_SUSPEND, "wlan_wake_irq", scn); + if (ret) + return ret; + + /* do ce irq assignments */ + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + goto free_wake_irq; + + scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable; + scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable; + scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq; + + /* needs to match the ce_id -> irq data mapping + * used in the srng parameter configuration + */ + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + unsigned int msi_data = (ce_id % msi_data_count) + + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)", + __func__, ce_id, msi_data, irq, + &ce_sc->tasklets[ce_id]); + + /* implies the ce is also initialized */ + if (!ce_sc->tasklets[ce_id].inited) + continue; + + ipci_sc->ce_msi_irq_num[ce_id] = irq; + ret = request_irq(irq, hif_ce_interrupt_handler, + IRQF_SHARED, + ce_name[ce_id], + &ce_sc->tasklets[ce_id]); + if (ret) + goto free_irq; + } + + return ret; + +free_irq: + /* the request_irq for the last ce_id failed so skip it. */ + while (ce_id > 0 && ce_id < scn->ce_count) { + unsigned int msi_data; + + ce_id--; + msi_data = (ce_id % msi_data_count) + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + free_irq(irq, &ce_sc->tasklets[ce_id]); + } + +free_wake_irq: + free_irq(scn->wake_irq, scn->qdf_dev->dev); + scn->wake_irq = 0; + + return ret; +} + +/** + * hif_exec_grp_irq_disable() - disable the irq for group + * @hif_ext_group: hif exec context + * + * Return: none + */ +static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) +{ + int i; + + for (i = 0; i < hif_ext_group->numirq; i++) + disable_irq_nosync(hif_ext_group->os_irq[i]); +} + +/** + * hif_exec_grp_irq_enable() - enable the irq for group + * @hif_ext_group: hif exec context + * + * Return: none + */ +static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) +{ + int i; + + for (i = 0; i < hif_ext_group->numirq; i++) + enable_irq(hif_ext_group->os_irq[i]); +} + +const char *hif_ipci_get_irq_name(int irq_no) +{ + return "pci-dummy"; +} + +int hif_ipci_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_group) +{ + int ret = 0; + int irq = 0; + int j; + + hif_ext_group->irq_enable = &hif_exec_grp_irq_enable; + hif_ext_group->irq_disable = &hif_exec_grp_irq_disable; + hif_ext_group->irq_name = &hif_ipci_get_irq_name; + hif_ext_group->work_complete = &hif_dummy_grp_done; + + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->irq[j]; + + hif_info("request_irq = %d for grp %d", + irq, hif_ext_group->grp_id); + ret = request_irq(irq, + hif_ext_group_interrupt_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, + "wlan_EXT_GRP", + hif_ext_group); + if (ret) { + HIF_ERROR("%s: request_irq failed ret = %d", + __func__, ret); + return -EFAULT; + } + hif_ext_group->os_irq[j] = irq; + } + hif_ext_group->irq_requested = true; + return 0; +} + +int hif_configure_irq(struct hif_softc *scn) +{ + int ret = 0; + + HIF_TRACE("%s: E", __func__); + + if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) { + scn->request_irq_done = false; + return 0; + } + + ret = hif_ce_msi_configure_irq(scn); + if (ret == 0) + goto end; + + if (ret < 0) { + HIF_ERROR("%s: hif_ipci_configure_irq error = %d", + __func__, ret); + return ret; + } +end: + scn->request_irq_done = true; + return 0; +} + +/** + * hif_ipci_get_soc_info_pld() - get soc info for ipcie bus from pld target + * @sc: ipci context + * @dev: device structure + * + * Return: none + */ +static void hif_ipci_get_soc_info_pld(struct hif_ipci_softc *sc, + struct device *dev) +{ + struct pld_soc_info info; + + pld_get_soc_info(dev, &info); + sc->mem = info.v_addr; + sc->ce_sc.ol_sc.mem = info.v_addr; + sc->ce_sc.ol_sc.mem_pa = info.p_addr; +} + +/** + * hif_ipci_get_soc_info_nopld() - get soc info for ipcie bus for non pld target + * @sc: ipci context + * @dev: device structure + * + * Return: none + */ +static void hif_ipci_get_soc_info_nopld(struct hif_ipci_softc *sc, + struct device *dev) +{} + +/** + * hif_is_pld_based_target() - verify if the target is pld based + * @sc: ipci context + * @device_id: device id + * + * Return: none + */ +static bool hif_is_pld_based_target(struct hif_ipci_softc *sc, + int device_id) +{ + if (!pld_have_platform_driver_support(sc->dev)) + return false; + + switch (device_id) { +#ifdef QCA_WIFI_QCA6750 + case QCA6750_DEVICE_ID: +#endif + return true; + } + return false; +} + +/** + * hif_ipci_init_deinit_ops_attach() - attach ops for ipci + * @sc: ipci context + * @device_id: device id + * + * Return: none + */ +static void hif_ipci_init_deinit_ops_attach(struct hif_ipci_softc *sc, + int device_id) +{ + if (hif_is_pld_based_target(sc, device_id)) + sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_pld; + else + sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_nopld; +} + +QDF_STATUS hif_ipci_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + uint32_t hif_type, target_type; + struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(ol_sc); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc); + uint16_t revision_id = 0; + struct pci_dev *pdev = bdev; + struct hif_target_info *tgt_info; + int device_id = QCA6750_DEVICE_ID; + + if (!ol_sc) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + return QDF_STATUS_E_NOMEM; + } + + sc->dev = dev; + tgt_info = hif_get_target_info_handle(hif_hdl); + hif_ipci_init_deinit_ops_attach(sc, device_id); + sc->hif_ipci_get_soc_info(sc, dev); + HIF_TRACE("%s: hif_enable_pci done", __func__); + + device_disable_async_suspend(&pdev->dev); + + ret = hif_get_device_type(device_id, revision_id, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device id/revision_id", __func__); + return QDF_STATUS_E_ABORTED; + } + HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + sc->use_register_windowing = false; + tgt_info->target_type = target_type; + + if (!ol_sc->mem_pa) { + HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__); + ret = -EIO; + return QDF_STATUS_E_ABORTED; + } + + return 0; +} + +bool hif_ipci_needs_bmi(struct hif_softc *scn) +{ + return !ce_srng_based(scn); +} + +#ifdef FORCE_WAKE +int hif_force_wake_request(struct hif_opaque_softc *hif_handle) +{ + uint32_t timeout = 0, value; + struct hif_softc *scn = (struct hif_softc *)hif_handle; + struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn); + + if (pld_force_wake_request(scn->qdf_dev->dev)) { + hif_err("force wake request send failed"); + return -EINVAL; + } + + HIF_STATS_INC(ipci_scn, mhi_force_wake_request_vote, 1); + while (!pld_is_device_awake(scn->qdf_dev->dev) && + timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) { + qdf_mdelay(FORCE_WAKE_DELAY_MS); + timeout += FORCE_WAKE_DELAY_MS; + } + + if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) { + hif_err("Unable to wake up mhi"); + HIF_STATS_INC(ipci_scn, mhi_force_wake_failure, 1); + return -EINVAL; + } + HIF_STATS_INC(ipci_scn, mhi_force_wake_success, 1); + hif_write32_mb(scn, + scn->mem + + PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG, + 0); + hif_write32_mb(scn, + scn->mem + + PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, + 1); + + HIF_STATS_INC(ipci_scn, soc_force_wake_register_write_success, 1); + /* + * do not reset the timeout + * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms + */ + do { + value = + hif_read32_mb(scn, + scn->mem + + PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG); + if (value) + break; + qdf_mdelay(FORCE_WAKE_DELAY_MS); + timeout += FORCE_WAKE_DELAY_MS; + } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS); + + if (!value) { + hif_err("failed handshake mechanism"); + HIF_STATS_INC(ipci_scn, soc_force_wake_failure, 1); + return -ETIMEDOUT; + } + + HIF_STATS_INC(ipci_scn, soc_force_wake_success, 1); + + return 0; +} + +int hif_force_wake_release(struct hif_opaque_softc *hif_handle) +{ + int ret; + struct hif_softc *scn = (struct hif_softc *)hif_handle; + struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn); + + ret = pld_force_wake_release(scn->qdf_dev->dev); + if (ret) { + hif_err("force wake release failure"); + HIF_STATS_INC(ipci_scn, mhi_force_wake_release_failure, 1); + return ret; + } + + HIF_STATS_INC(ipci_scn, mhi_force_wake_release_success, 1); + hif_write32_mb(scn, + scn->mem + + PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, + 0); + HIF_STATS_INC(ipci_scn, soc_force_wake_release_success, 1); + return 0; +} + +void hif_print_ipci_stats(struct hif_ipci_softc *ipci_handle) +{ + hif_debug("mhi_force_wake_request_vote: %d", + ipci_handle->stats.mhi_force_wake_request_vote); + hif_debug("mhi_force_wake_failure: %d", + ipci_handle->stats.mhi_force_wake_failure); + hif_debug("mhi_force_wake_success: %d", + ipci_handle->stats.mhi_force_wake_success); + hif_debug("soc_force_wake_register_write_success: %d", + ipci_handle->stats.soc_force_wake_register_write_success); + hif_debug("soc_force_wake_failure: %d", + ipci_handle->stats.soc_force_wake_failure); + hif_debug("soc_force_wake_success: %d", + ipci_handle->stats.soc_force_wake_success); + hif_debug("mhi_force_wake_release_failure: %d", + ipci_handle->stats.mhi_force_wake_release_failure); + hif_debug("mhi_force_wake_release_success: %d", + ipci_handle->stats.mhi_force_wake_release_success); + hif_debug("oc_force_wake_release_success: %d", + ipci_handle->stats.soc_force_wake_release_success); +} +#endif /* FORCE_WAKE */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.h new file mode 100644 index 0000000000000000000000000000000000000000..b7464b5beb78af63a89ffb7dfd84bd4d2caa7d09 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __ATH_IPCI_H__ +#define __ATH_IPCI_H__ + +#include +#include +#include + +#define ATH_DBG_DEFAULT 0 +#define DRAM_SIZE 0x000a8000 +#include "hif.h" +#include "cepci.h" +#include "ce_main.h" + +#ifdef FORCE_WAKE +/** + * struct hif_pci_stats - Account for hif pci based statistics + * @mhi_force_wake_request_vote: vote for mhi + * @mhi_force_wake_failure: mhi force wake failure + * @mhi_force_wake_success: mhi force wake success + * @soc_force_wake_register_write_success: write to soc wake + * @soc_force_wake_failure: soc force wake failure + * @soc_force_wake_success: soc force wake success + * @mhi_force_wake_release_success: mhi force wake release success + * @soc_force_wake_release_success: soc force wake release + */ +struct hif_ipci_stats { + uint32_t mhi_force_wake_request_vote; + uint32_t mhi_force_wake_failure; + uint32_t mhi_force_wake_success; + uint32_t soc_force_wake_register_write_success; + uint32_t soc_force_wake_failure; + uint32_t soc_force_wake_success; + uint32_t mhi_force_wake_release_failure; + uint32_t mhi_force_wake_release_success; + uint32_t soc_force_wake_release_success; +}; + +/* Register to wake the UMAC from power collapse */ +#define PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG (0x01E04000 + 0x40) +/* Register used for handshake mechanism to validate UMAC is awake */ +#define PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG (0x01E00000 + 0x3004) +/* Timeout duration to validate UMAC wake status */ +#ifdef HAL_CONFIG_SLUB_DEBUG_ON +#define FORCE_WAKE_DELAY_TIMEOUT_MS 500 +#else +#define FORCE_WAKE_DELAY_TIMEOUT_MS 50 +#endif /* HAL_CONFIG_SLUB_DEBUG_ON */ +/* Validate UMAC status every 5ms */ +#define FORCE_WAKE_DELAY_MS 5 +#endif /* FORCE_WAKE */ + +struct hif_ipci_softc { + struct HIF_CE_state ce_sc; + void __iomem *mem; /* PCI address. */ + + struct device *dev; /* For efficiency, should be first in struct */ + struct tasklet_struct intr_tq; /* tasklet */ + int ce_msi_irq_num[CE_COUNT_MAX]; + bool use_register_windowing; + uint32_t register_window; + qdf_spinlock_t register_access_lock; + qdf_spinlock_t irq_lock; + + void (*hif_ipci_get_soc_info)(struct hif_ipci_softc *sc, + struct device *dev); +#ifdef FORCE_WAKE + struct hif_ipci_stats stats; +#endif +}; + +int hif_configure_irq(struct hif_softc *sc); + +/* + * There may be some pending tx frames during platform suspend. + * Suspend operation should be delayed until those tx frames are + * transferred from the host to target. This macro specifies how + * long suspend thread has to sleep before checking pending tx + * frame count. + */ +#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 /* ms */ + +#ifdef FORCE_WAKE +/** + * hif_print_ipci_stats() - Display HIF IPCI stats + * @ipci_scn - HIF ipci handle + * + * Return: None + */ +void hif_print_ipci_stats(struct hif_ipci_softc *ipci_scn); +#else +static inline +void hif_print_ipci_stats(struct hif_ipci_softc *ipci_scn) +{ +} +#endif /* FORCE_WAKE */ + +#endif /* __IATH_PCI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ipq4019def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ipq4019def.c new file mode 100644 index 0000000000000000000000000000000000000000..02d560e0a887cfb7fd506fd3f1bec78be3399f62 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ipq4019def.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2015-2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(IPQ4019_HEADERS_DEF) +#define AR900B 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "IPQ4019/soc_addrs.h" +#include "IPQ4019/extra/hw/apb_map.h" +#ifdef WLAN_HEADERS +#include "IPQ4019/extra/hw/wifi_top_reg_map.h" +#include "IPQ4019/hw/rtc_soc_reg.h" +#endif +#include "IPQ4019/hw/ce_wrapper_reg_csr.h" + +#include "IPQ4019/extra/hw/soc_core_reg.h" +#include "IPQ4019/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +/*TBD:dakota Check if these can be removed for dakota */ +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF IPQ4019_TARGETdef +#define MY_HOST_DEF IPQ4019_HOSTdef +#define MY_CEREG_DEF IPQ4019_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ IPQ4019_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ IPQ4019_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(IPQ4019_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *IPQ4019_TARGETdef; +struct hostdef_s *IPQ4019_HOSTdef; +#endif /* IPQ4019_HEADERS_DEF */ +qdf_export_symbol(IPQ4019_TARGETdef); +qdf_export_symbol(IPQ4019_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.c b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..e5f2d80215d5dffe25ed6e3b5879543c7b28dcc7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif_io32.h" +#include "hif_debug.h" +#include "mp_dev.h" + +/*chaninfo*/ +#define CHANINFOMEM_S2_READ_MASK 0x00000008 +#define CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK 0x00000001 +#define CHANINFO_CTRL_CHANINFOMEM_BW_MASK 0x00000030 +#define MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK 0x00000007 + +/*agc*/ +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_ENABLE_MASK 0x00040000 +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_GC_MASK 0x00080000 +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_VOTING_MASK 0x00100000 +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_PHY_ERR_MASK 0x00200000 +#define AGC_HISTORY_DUMP_MASK (\ + GAINS_MIN_OFFSETS_CF_AGC_HIST_ENABLE_MASK| \ + GAINS_MIN_OFFSETS_CF_AGC_HIST_GC_MASK| \ + GAINS_MIN_OFFSETS_CF_AGC_HIST_VOTING_MASK| \ + GAINS_MIN_OFFSETS_CF_AGC_HIST_PHY_ERR_MASK \ + ) + +#define BB_chaninfo_ctrl 0x1a370 +#define BB_multichain_enable 0x1a2a0 +#define BB_chn_tables_intf_addr 0x19894 +#define BB_chn1_tables_intf_addr 0x1a894 +#define BB_chn_tables_intf_data 0x19898 +#define BB_chn1_tables_intf_data 0x1a898 +#define BB_gains_min_offsets 0x19e08 +#define BB_chaninfo_tab_b0 0x03200 +#define BB_chaninfo_tab_b1 0x03300 +#define BB_watchdog_status 0x1a7c0 +#define BB_watchdog_ctrl_1 0x1a7c4 +#define BB_watchdog_ctrl_2 0x1a7c8 +#define BB_watchdog_status_B 0x1a7e0 + + +#define PHY_BB_CHN_TABLES_INTF_ADDR 0x19894 +#define PHY_BB_CHN_TABLES_INTF_DATA 0x19898 + +#define PHY_BB_CHN1_TABLES_INTF_ADDR 0x1a894 +#define PHY_BB_CHN1_TABLES_INTF_DATA 0x1a898 + + +struct priv_ctrl_ctx { + uint32_t chaninfo_ctrl_orig; + uint32_t gain_min_offsets_orig; + uint32_t anyreg_start; + uint32_t anyreg_len; +}; + +static struct priv_ctrl_ctx g_priv_dump_ctx; + +static inline void set_target_reg_bits(struct hif_softc *scn, + void __iomem *mem, uint32_t reg, + uint32_t bitmask, uint32_t val) +{ + uint32_t value = hif_read32_mb(scn, mem + (reg)); + uint32_t shift = 0; + + value &= ~(bitmask); + while (!((bitmask >> shift) & 0x01)) + shift++; + + value |= (((val) << shift) & (bitmask)); + hif_write32_mb(scn, mem + (reg), value); +} + +static inline uint32_t get_target_reg_bits(struct hif_softc *scn, + void __iomem *mem, + uint32_t reg, uint32_t bitmask) +{ + uint32_t value = hif_read32_mb(scn, mem + (reg)); + uint32_t shift = 0; + + while (!((bitmask >> shift) & 0x01)) + shift++; + + return (value >> shift) & bitmask; +} + +void priv_start_cap_chaninfo(struct hif_softc *scn) +{ + set_target_reg_bits(scn, scn->mem, BB_chaninfo_ctrl, + CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK, 1); +} + +void priv_start_agc(struct hif_softc *scn) +{ + g_priv_dump_ctx.gain_min_offsets_orig = + hif_read32_mb(scn, scn->mem + BB_gains_min_offsets); + set_target_reg_bits(scn, scn->mem, BB_gains_min_offsets, + AGC_HISTORY_DUMP_MASK, + 0x0f); +} + +static void priv_stop_agc(struct hif_softc *scn) +{ + set_target_reg_bits(scn, scn->mem, BB_gains_min_offsets, + AGC_HISTORY_DUMP_MASK, + 0); +} + +void priv_dump_chaninfo(struct hif_softc *scn) +{ + uint32_t bw, val; + uint32_t len, i, tmp; + uint32_t chain_mask; + uint32_t chain0, chain1; + + chain_mask = + get_target_reg_bits(scn, scn->mem, BB_multichain_enable, + MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK); + chain0 = chain_mask & 1; + chain1 = chain_mask & 2; + + HIF_TRACE("%s: E", __func__); + bw = get_target_reg_bits(scn, scn->mem, BB_chaninfo_ctrl, + CHANINFO_CTRL_CHANINFOMEM_BW_MASK); + + if (bw == 0) + len = 53; + else if (bw == 1) + len = 57; + else if (bw == 2) + len = 59 * 2 - 1; + else + len = 60 * 2 + 61 * 2; + + /* + * each tone is 16 bit valid, write to 32bit buffer each. + * bw==0(legacy20): 53 tones. + * bw==1(ht/vht20): 57 tones. + * bw==2(ht/vht40): 59+58 tones. + * bw==3(vht80): 60*2+61*2 tones. + */ + + if (chain0) { + hif_write32_mb(scn, scn->mem + BB_chn_tables_intf_addr, + 0x80003200); + } + if (chain1) { + hif_write32_mb(scn, scn->mem + BB_chn1_tables_intf_addr, + 0x80003200); + } + + set_target_reg_bits(scn, scn->mem, BB_chaninfo_ctrl, + CHANINFOMEM_S2_READ_MASK, 0); + + if (chain0) { + if (bw < 2) { + len = (bw == 0) ? 53 : 57; + for (i = 0; i < len; i++) { + val = hif_read32_mb(scn, scn->mem + + BB_chn_tables_intf_data) & + 0x0000ffff; + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } else { + len = (bw == 2) ? 59 : 60; + for (i = 0; i < len; i++) { + tmp = hif_read32_mb(scn, scn->mem + + BB_chn_tables_intf_data); + qdf_debug("0x%x\t", ((tmp >> 16) & 0x0000ffff)); + qdf_debug("0x%x\t", (tmp & 0x0000ffff)); + if (i % 2 == 0) + qdf_debug("\n"); + } + if (bw > 2) { + /* bw == 3 for vht80 */ + hif_write32_mb(scn, scn->mem + + BB_chn_tables_intf_addr, + 0x80003300); + len = 61; + for (i = 0; i < len; i++) { + tmp = hif_read32_mb(scn, scn->mem + + BB_chn_tables_intf_data); + qdf_debug("0x%x\t", + ((tmp >> 16) & 0x0000ffff)); + qdf_debug("0x%x\t", (tmp & 0x0000ffff)); + if (i % 2 == 0) + qdf_debug("\n"); + } + } + } + } + if (chain1) { + if (bw < 2) { + len = (bw == 0) ? 53 : 57; + for (i = 0; i < len; i++) { + val = + hif_read32_mb(scn, scn->mem + + BB_chn1_tables_intf_data) & + 0x0000ffff; + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } else { + len = (bw == 2) ? 59 : 60; + for (i = 0; i < len; i++) { + tmp = + hif_read32_mb(scn, scn->mem + + BB_chn1_tables_intf_data); + qdf_debug("0x%x", (tmp >> 16) & 0x0000ffff); + qdf_debug("0x%x", tmp & 0x0000ffff); + if (i % 2 == 0) + qdf_debug("\n"); + } + if (bw > 2) { + /* bw == 3 for vht80 */ + hif_write32_mb(scn, scn->mem + + BB_chn1_tables_intf_addr, + 0x80003300); + len = 61; + for (i = 0; i < len; i++) { + tmp = + hif_read32_mb(scn, scn->mem + + BB_chn1_tables_intf_data); + qdf_debug("0x%x\t", + ((tmp >> 16) & 0x0000ffff)); + qdf_debug("0x%x\t", (tmp & 0x0000ffff)); + if (i % 2 == 0) + qdf_debug("\n"); + } + } + } + } + HIF_TRACE("%s: X", __func__); +} + +void priv_dump_agc(struct hif_softc *scn) +{ + int i, len = 30; /* check this value for Rome and Peregrine */ + uint32_t chain0, chain1, chain_mask, val; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + chain_mask = + get_target_reg_bits(scn, scn->mem, BB_multichain_enable, + MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK); + chain0 = chain_mask & 1; + chain1 = chain_mask & 2; + + len = len << 1; /* each agc item is 64bit, total*2 */ + priv_stop_agc(scn); + + set_target_reg_bits(scn, scn->mem, BB_chaninfo_ctrl, + CHANINFOMEM_S2_READ_MASK, 0); + + HIF_TRACE("%s: AGC history buffer dump: E", __func__); + if (chain0) { + for (i = 0; i < len; i++) { + hif_write32_mb(scn, scn->mem + + PHY_BB_CHN_TABLES_INTF_ADDR, + BB_chaninfo_tab_b0 + i * 4); + val = hif_read32_mb(scn, scn->mem + + PHY_BB_CHN_TABLES_INTF_DATA); + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } + if (chain1) { + for (i = 0; i < len; i++) { + hif_write32_mb(scn, scn->mem + + PHY_BB_CHN1_TABLES_INTF_ADDR, + BB_chaninfo_tab_b0 + i * 4); + val = hif_read32_mb(scn, scn->mem + + PHY_BB_CHN1_TABLES_INTF_DATA); + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } + HIF_TRACE("%s: AGC history buffer dump X", __func__); + /* restore original value */ + hif_write32_mb(scn, scn->mem + BB_gains_min_offsets, + g_priv_dump_ctx.gain_min_offsets_orig); + + Q_TARGET_ACCESS_END(scn); + +} + +void priv_dump_bbwatchdog(struct hif_softc *scn) +{ + uint32_t val; + + HIF_TRACE("%s: BB watchdog dump E", __func__); + val = hif_read32_mb(scn, scn->mem + BB_watchdog_status); + qdf_debug("0x%x\t", val); + val = hif_read32_mb(scn, scn->mem + BB_watchdog_ctrl_1); + qdf_debug("0x%x\t", val); + val = hif_read32_mb(scn, scn->mem + BB_watchdog_ctrl_2); + qdf_debug("0x%x\t", val); + val = hif_read32_mb(scn, scn->mem + BB_watchdog_status_B); + qdf_debug("0x%x", val); + HIF_TRACE("%s: BB watchdog dump X", __func__); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.h b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..208c911b57c8c91e45787271731f032f8f294b4e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MP_DEV_H__ +#define __MP_DEV_H__ +void priv_start_agc(struct hif_softc *scn); +void priv_dump_agc(struct hif_softc *scn); +void priv_start_cap_chaninfo(struct hif_softc *scn); +void priv_dump_chaninfo(struct hif_softc *scn); +void priv_dump_bbwatchdog(struct hif_softc *scn); +void hif_shutdown_device(struct hif_opaque_softc *scn); +#endif /* __MP_DEV_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/hif_io32_pci.h b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/hif_io32_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..65f46d024833beb8152347a7c0e62497cf0dd4a5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/hif_io32_pci.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IO32_PCI_H__ +#define __HIF_IO32_PCI_H__ + +#ifdef HIF_PCI + +#include "hif_main.h" +#include "regtable.h" +#include "ce_reg.h" +#include "qdf_atomic.h" +#include "if_pci.h" +/* + * For maximum performance and no power management, set this to 1. + * For power management at the cost of performance, set this to 0. + */ +#ifndef CONFIG_ATH_PCIE_MAX_PERF +#define CONFIG_ATH_PCIE_MAX_PERF 0 +#endif + +/* + * For keeping the target awake till the driver is + * loaded, set this to 1 + */ +#ifndef CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD +#define CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD 1 +#endif + +/* + * PCI-E L1 ASPPM sub-states + * To enable clock gating in L1 state, set this to 1. + * (less power, slightly more wakeup latency) + * To disable clock gating in L1 state, set this to 0. (slighly more power) + */ +#define CONFIG_PCIE_ENABLE_L1_CLOCK_GATE 1 + +/* + * PCIE_ACCESS_LOG_NUM specifies the number of + * read/write records to store + */ +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +#define PCIE_ACCESS_LOG_NUM 500 +#endif + +/* 64-bit MSI support */ +#define CONFIG_PCIE_64BIT_MSI 0 + +/* BAR0 ready checking for AR6320v2 */ +#define PCIE_BAR0_READY_CHECKING 0 + +/* AXI gating when L1, L2 to reduce power consumption */ +#define CONFIG_PCIE_ENABLE_AXI_CLK_GATE 0 + +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg); +#endif /* HIF_PCI */ +#endif /* __HIF_IO32_PCI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..76d167684e6821799193ee1772d9afa03d5daa0a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c @@ -0,0 +1,5242 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#ifdef CONFIG_PCI_MSM +#include +#endif +#include "hif_io32.h" +#include "if_pci.h" +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "ce_bmi.h" +#include "regtable.h" +#include "hif_hw_version.h" +#include +#include +#include "qdf_status.h" +#include "qdf_atomic.h" +#include "pld_common.h" +#include "mp_dev.h" +#include "hif_debug.h" + +#include "if_pci_internal.h" +#include "ce_tasklet.h" +#include "targaddrs.h" +#include "hif_exec.h" + +#include "pci_api.h" +#include "ahb_api.h" +#include "qdf_hang_event_notifier.h" +#include "qdf_platform.h" + +/* Maximum ms timeout for host to wake up target */ +#define PCIE_WAKE_TIMEOUT 1000 +#define RAMDUMP_EVENT_TIMEOUT 2500 + +/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent + * PCIe data bus error + * As workaround for this issue - changing the reset sequence to + * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET + */ +#define CPU_WARM_RESET_WAR + +/* + * Top-level interrupt handler for all PCI interrupts from a Target. + * When a block of MSI interrupts is allocated, this top-level handler + * is not used; instead, we directly call the correct sub-handler. + */ +struct ce_irq_reg_table { + uint32_t irq_enable; + uint32_t irq_status; +}; + +#ifndef QCA_WIFI_3_0_ADRASTEA +static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) +{ +} +#else +static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) +{ + struct hif_softc *scn = HIF_GET_SOFTC(sc); + unsigned int target_enable0, target_enable1; + unsigned int target_cause0, target_cause1; + + target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0); + target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1); + target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0); + target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1); + + if ((target_enable0 & target_cause0) || + (target_enable1 & target_cause1)) { + hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0); + hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0); + + if (scn->notice_send) + pld_intr_notify_q6(sc->dev); + } +} +#endif + + +/** + * pci_dispatch_ce_irq() - pci_dispatch_ce_irq + * @scn: scn + * + * Return: N/A + */ +static void pci_dispatch_interrupt(struct hif_softc *scn) +{ + uint32_t intr_summary; + int id; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (scn->hif_init_done != true) + return; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + intr_summary = CE_INTERRUPT_SUMMARY(scn); + + if (intr_summary == 0) { + if ((scn->target_status != TARGET_STATUS_RESET) && + (!qdf_atomic_read(&scn->link_suspended))) { + + hif_write32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + + hif_read32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + } + Q_TARGET_ACCESS_END(scn); + return; + } + Q_TARGET_ACCESS_END(scn); + + scn->ce_irq_summary = intr_summary; + for (id = 0; intr_summary && (id < scn->ce_count); id++) { + if (intr_summary & (1 << id)) { + intr_summary &= ~(1 << id); + ce_dispatch_interrupt(id, &hif_state->tasklets[id]); + } + } +} + +irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg) +{ + struct hif_pci_softc *sc = (struct hif_pci_softc *)arg; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg); + + volatile int tmp; + uint16_t val = 0; + uint32_t bar0 = 0; + uint32_t fw_indicator_address, fw_indicator; + bool ssr_irq = false; + unsigned int host_cause, host_enable; + + if (LEGACY_INTERRUPTS(sc)) { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return IRQ_HANDLED; + + if (ADRASTEA_BU) { + host_enable = hif_read32_mb(sc, sc->mem + + PCIE_INTR_ENABLE_ADDRESS); + host_cause = hif_read32_mb(sc, sc->mem + + PCIE_INTR_CAUSE_ADDRESS); + if (!(host_enable & host_cause)) { + hif_pci_route_adrastea_interrupt(sc); + return IRQ_HANDLED; + } + } + + /* Clear Legacy PCI line interrupts + * IMPORTANT: INTR_CLR regiser has to be set + * after INTR_ENABLE is set to 0, + * otherwise interrupt can not be really cleared + */ + hif_write32_mb(sc, sc->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), 0); + + hif_write32_mb(sc, sc->mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS), + ADRASTEA_BU ? + (host_enable & host_cause) : + HOST_GROUP0_MASK); + + if (ADRASTEA_BU) + hif_write32_mb(sc, sc->mem + 0x2f100c, + (host_cause >> 1)); + + /* IMPORTANT: this extra read transaction is required to + * flush the posted write buffer + */ + if (!ADRASTEA_BU) { + tmp = + hif_read32_mb(sc, sc->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + + if (tmp == 0xdeadbeef) { + HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!", + __func__); + + pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); + HIF_ERROR("%s: PCI Vendor ID = 0x%04x", + __func__, val); + + pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); + HIF_ERROR("%s: PCI Device ID = 0x%04x", + __func__, val); + + pci_read_config_word(sc->pdev, PCI_COMMAND, &val); + HIF_ERROR("%s: PCI Command = 0x%04x", __func__, + val); + + pci_read_config_word(sc->pdev, PCI_STATUS, &val); + HIF_ERROR("%s: PCI Status = 0x%04x", __func__, + val); + + pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, + &bar0); + HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__, + bar0); + + HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x", + __func__, + hif_read32_mb(sc, sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS)); + HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x", + __func__, + hif_read32_mb(sc, sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x", + __func__, + hif_read32_mb(sc, sc->mem + 0x80008), + hif_read32_mb(sc, sc->mem + 0x8000c)); + HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x", + __func__, + hif_read32_mb(sc, sc->mem + 0x80010), + hif_read32_mb(sc, sc->mem + 0x80014)); + HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x", + __func__, + hif_read32_mb(sc, sc->mem + 0x80018), + hif_read32_mb(sc, sc->mem + 0x8001c)); + QDF_BUG(0); + } + + PCI_CLR_CAUSE0_REGISTER(sc); + } + + if (HAS_FW_INDICATOR) { + fw_indicator_address = hif_state->fw_indicator_address; + fw_indicator = A_TARGET_READ(scn, fw_indicator_address); + if ((fw_indicator != ~0) && + (fw_indicator & FW_IND_EVENT_PENDING)) + ssr_irq = true; + } + + if (Q_TARGET_ACCESS_END(scn) < 0) + return IRQ_HANDLED; + } + /* TBDXXX: Add support for WMAC */ + + if (ssr_irq) { + sc->irq_event = irq; + qdf_atomic_set(&scn->tasklet_from_intr, 1); + + qdf_atomic_inc(&scn->active_tasklet_cnt); + tasklet_schedule(&sc->intr_tq); + } else { + pci_dispatch_interrupt(scn); + } + + return IRQ_HANDLED; +} + +bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem) +{ + return 1; /* FIX THIS */ +} + +int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + int i = 0; + + if (!irq || !size) { + return -EINVAL; + } + + if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) { + irq[0] = sc->irq; + return 1; + } + + if (sc->num_msi_intrs > size) { + qdf_print("Not enough space in irq buffer to return irqs"); + return -EINVAL; + } + + for (i = 0; i < sc->num_msi_intrs; i++) { + irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL; + } + + return sc->num_msi_intrs; +} + + +/** + * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep + * @scn: hif_softc + * + * Return: void + */ +#if CONFIG_ATH_PCIE_MAX_PERF == 0 +void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + A_target_id_t pci_addr = scn->mem; + + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + /* + * If the deferred sleep timer is running cancel it + * and put the soc into sleep. + */ + if (hif_state->fake_sleep == true) { + qdf_timer_stop(&hif_state->sleep_timer); + if (hif_state->verified_awake == false) { + hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); + } + hif_state->fake_sleep = false; + } + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); +} +#else +inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) +{ +} +#endif + +#define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \ + hif_read32_mb(sc, (char *)(mem) + \ + PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)) + +#define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \ + hif_write32_mb(sc, ((char *)(mem) + \ + PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val)) + +#ifdef QCA_WIFI_3_0 +/** + * hif_targ_is_awake() - check to see if the target is awake + * @hif_ctx: hif context + * + * emulation never goes to sleep + * + * Return: true if target is awake + */ +static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem) +{ + return true; +} +#else +/** + * hif_targ_is_awake() - check to see if the target is awake + * @hif_ctx: hif context + * + * Return: true if the targets clocks are on + */ +static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem) +{ + uint32_t val; + + if (scn->recovery) + return false; + val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON; +} +#endif + +#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */ +static void hif_pci_device_reset(struct hif_pci_softc *sc) +{ + void __iomem *mem = sc->mem; + int i; + uint32_t val; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (!scn->hostdef) + return; + + /* NB: Don't check resetok here. This form of reset + * is integral to correct operation. + */ + + if (!SOC_GLOBAL_RESET_ADDRESS) + return; + + if (!mem) + return; + + HIF_ERROR("%s: Reset Device", __func__); + + /* + * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first + * writing WAKE_V, the Target may scribble over Host memory! + */ + A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (hif_targ_is_awake(scn, mem)) + break; + + qdf_mdelay(1); + } + + /* Put Target, including PCIe, into RESET. */ + val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS); + val |= 1; + A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & + RTC_STATE_COLD_RESET_MASK) + break; + + qdf_mdelay(1); + } + + /* Pull Target, including PCIe, out of RESET. */ + val &= ~1; + A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (! + (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) & + RTC_STATE_COLD_RESET_MASK)) + break; + + qdf_mdelay(1); + } + + A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); +} + +/* CPU warm reset function + * Steps: + * 1. Disable all pending interrupts - so no pending interrupts on WARM reset + * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW + * correctly on WARM reset + * 3. Clear TARGET CPU LF timer interrupt + * 4. Reset all CEs to clear any pending CE tarnsactions + * 5. Warm reset CPU + */ +static void hif_pci_device_warm_reset(struct hif_pci_softc *sc) +{ + void __iomem *mem = sc->mem; + int i; + uint32_t val; + uint32_t fw_indicator; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + /* NB: Don't check resetok here. This form of reset is + * integral to correct operation. + */ + + if (!mem) + return; + + HIF_INFO_MED("%s: Target Warm Reset", __func__); + + /* + * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first + * writing WAKE_V, the Target may scribble over Host memory! + */ + A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (hif_targ_is_awake(scn, mem)) + break; + qdf_mdelay(1); + } + + /* + * Disable Pending interrupts + */ + val = + hif_read32_mb(sc, mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_CAUSE_ADDRESS)); + HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__, + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val); + /* Target CPU Intr Cause */ + val = hif_read32_mb(sc, mem + + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); + HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val); + + val = + hif_read32_mb(sc, mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + hif_write32_mb(sc, (mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0); + hif_write32_mb(sc, (mem + + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)), + HOST_GROUP0_MASK); + + qdf_mdelay(100); + + /* Clear FW_INDICATOR_ADDRESS */ + if (HAS_FW_INDICATOR) { + fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); + hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0); + } + + /* Clear Target LF Timer interrupts */ + val = + hif_read32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS + + SOC_LF_TIMER_CONTROL0_ADDRESS)); + HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__, + (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val); + val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + hif_write32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), + val); + + /* Reset CE */ + val = + hif_read32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + val |= SOC_RESET_CONTROL_CE_RST_MASK; + hif_write32_mb(sc, (mem + + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)), + val); + val = + hif_read32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + qdf_mdelay(10); + + /* CE unreset */ + val &= ~SOC_RESET_CONTROL_CE_RST_MASK; + hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS), val); + val = + hif_read32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + qdf_mdelay(10); + + /* Read Target CPU Intr Cause */ + val = hif_read32_mb(sc, mem + + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); + HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x", + __func__, val); + + /* CPU warm RESET */ + val = + hif_read32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS), val); + val = + hif_read32_mb(sc, mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x", + __func__, val); + + qdf_mdelay(100); + HIF_INFO_MED("%s: Target Warm reset complete", __func__); + +} + +#ifndef QCA_WIFI_3_0 +/* only applicable to legacy ce */ +int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + void __iomem *mem = sc->mem; + uint32_t val; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return ATH_ISR_NOSCHED; + val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS); + if (Q_TARGET_ACCESS_END(scn) < 0) + return ATH_ISR_SCHED; + + HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val); + + if (val & FW_IND_HELPER) + return 0; + + return 1; +} +#endif + +int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + uint16_t device_id = 0; + uint32_t val; + uint16_t timeout_count = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + /* Check device ID from PCIe configuration space for link status */ + pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id); + if (device_id != sc->devid) { + HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)", + __func__, device_id, sc->devid); + return -EACCES; + } + + /* Check PCIe local register for bar/memory access */ + val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val); + + /* Try to wake up taget if it sleeps */ + hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__, + hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + + /* Check if taget can be woken up */ + while (!hif_targ_is_awake(scn, sc->mem)) { + if (timeout_count >= PCIE_WAKE_TIMEOUT) { + HIF_ERROR("%s: wake up timeout, %08x, %08x", + __func__, + hif_read32_mb(sc, sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS), + hif_read32_mb(sc, sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + return -EACCES; + } + + hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + + qdf_mdelay(100); + timeout_count += 100; + } + + /* Check Power register for SoC internal bus issues */ + val = + hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS + + SOC_POWER_REG_OFFSET); + HIF_INFO_MED("%s: Power register is %08x", __func__, val); + + return 0; +} + +/** + * __hif_pci_dump_registers(): dump other PCI debug registers + * @scn: struct hif_softc + * + * This function dumps pci debug registers. The parrent function + * dumps the copy engine registers before calling this function. + * + * Return: void + */ +static void __hif_pci_dump_registers(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + void __iomem *mem = sc->mem; + uint32_t val, i, j; + uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + uint32_t ce_base; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + /* DEBUG_INPUT_SEL_SRC = 0x6 */ + val = + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_INPUT_SEL_OFFSET); + val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK; + val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6); + hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_INPUT_SEL_OFFSET, val); + + /* DEBUG_CONTROL_ENABLE = 0x1 */ + val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_CONTROL_OFFSET); + val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK; + val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1); + hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_CONTROL_OFFSET, val); + + HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__, + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_INPUT_SEL_OFFSET), + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_CONTROL_OFFSET)); + + HIF_INFO_MED("%s: Debug CE", __func__); + /* Loop CE debug output */ + /* AMBA_DEBUG_BUS_SEL = 0xc */ + val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET); + val &= ~AMBA_DEBUG_BUS_SEL_MASK; + val |= AMBA_DEBUG_BUS_SEL_SET(0xc); + hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, + val); + + for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) { + /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */ + val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_DEBUG_OFFSET); + val &= ~CE_WRAPPER_DEBUG_SEL_MASK; + val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]); + hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_DEBUG_OFFSET, val); + + HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x", + __func__, wrapper_idx[i], + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET), + hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_DEBUG_OFFSET)); + + if (wrapper_idx[i] <= 7) { + for (j = 0; j <= 5; j++) { + ce_base = CE_BASE_ADDRESS(wrapper_idx[i]); + /* For (j=0~5) write CE_DEBUG_SEL = j */ + val = + hif_read32_mb(sc, mem + ce_base + + CE_DEBUG_OFFSET); + val &= ~CE_DEBUG_SEL_MASK; + val |= CE_DEBUG_SEL_SET(j); + hif_write32_mb(sc, mem + ce_base + + CE_DEBUG_OFFSET, val); + + /* read (@gpio_athr_wlan_reg) + * WLAN_DEBUG_OUT_DATA + */ + val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET); + val = WLAN_DEBUG_OUT_DATA_GET(val); + + HIF_INFO_MED("%s: module%d: cedbg: %x out: %x", + __func__, j, + hif_read32_mb(sc, mem + ce_base + + CE_DEBUG_OFFSET), val); + } + } else { + /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ + val = + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET); + val = WLAN_DEBUG_OUT_DATA_GET(val); + + HIF_INFO_MED("%s: out: %x", __func__, val); + } + } + + HIF_INFO_MED("%s: Debug PCIe:", __func__); + /* Loop PCIe debug output */ + /* Write AMBA_DEBUG_BUS_SEL = 0x1c */ + val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET); + val &= ~AMBA_DEBUG_BUS_SEL_MASK; + val |= AMBA_DEBUG_BUS_SEL_SET(0x1c); + hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET, val); + + for (i = 0; i <= 8; i++) { + /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */ + val = + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET); + val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; + val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i); + hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET, val); + + /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ + val = + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET); + val = WLAN_DEBUG_OUT_DATA_GET(val); + + HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__, + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET), val, + hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET)); + } + + Q_TARGET_ACCESS_END(scn); +} + +/** + * hif_dump_registers(): dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_pci_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + + if (status) + HIF_ERROR("%s: Dump CE Registers Failed", __func__); + + /* dump non copy engine pci registers */ + __hif_pci_dump_registers(scn); + + return 0; +} + +#ifdef HIF_CONFIG_SLUB_DEBUG_ON + +/* worker thread to schedule wlan_tasklet in SLUB debug build */ +static void reschedule_tasklet_work_handler(void *arg) +{ + struct hif_pci_softc *sc = arg; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (!scn) { + HIF_ERROR("%s: hif_softc is NULL\n", __func__); + return; + } + + if (scn->hif_init_done == false) { + HIF_ERROR("%s: wlan driver is unloaded", __func__); + return; + } + + tasklet_schedule(&sc->intr_tq); +} + +/** + * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet + * work + * @sc: HIF PCI Context + * + * Return: void + */ +static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) +{ + qdf_create_work(0, &sc->reschedule_tasklet_work, + reschedule_tasklet_work_handler, NULL); +} +#else +static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { } +#endif /* HIF_CONFIG_SLUB_DEBUG_ON */ + +void wlan_tasklet(unsigned long data) +{ + struct hif_pci_softc *sc = (struct hif_pci_softc *)data; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (scn->hif_init_done == false) + goto end; + + if (qdf_atomic_read(&scn->link_suspended)) + goto end; + + if (!ADRASTEA_BU) { + hif_fw_interrupt_handler(sc->irq_event, scn); + if (scn->target_status == TARGET_STATUS_RESET) + goto end; + } + +end: + qdf_atomic_set(&scn->tasklet_from_intr, 0); + qdf_atomic_dec(&scn->active_tasklet_cnt); +} + +#ifdef FEATURE_RUNTIME_PM +static bool hif_pci_pm_runtime_enabled(struct hif_pci_softc *sc) +{ + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (scn->hif_config.enable_runtime_pm) + return true; + + return pm_runtime_enabled(sc->dev); +} + +static const char *hif_pm_runtime_state_to_string(uint32_t state) +{ + switch (state) { + case HIF_PM_RUNTIME_STATE_NONE: + return "INIT_STATE"; + case HIF_PM_RUNTIME_STATE_ON: + return "ON"; + case HIF_PM_RUNTIME_STATE_RESUMING: + return "RESUMING"; + case HIF_PM_RUNTIME_STATE_SUSPENDING: + return "SUSPENDING"; + case HIF_PM_RUNTIME_STATE_SUSPENDED: + return "SUSPENDED"; + default: + return "INVALID STATE"; + } +} + +#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \ + seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name) +/** + * hif_pci_runtime_pm_warn() - Runtime PM Debugging API + * @sc: hif_pci_softc context + * @msg: log message + * + * log runtime pm stats when something seems off. + * + * Return: void + */ +static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg) +{ + struct hif_pm_runtime_lock *ctx; + int i; + + hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d", + msg, atomic_read(&sc->dev->power.usage_count), + hif_pm_runtime_state_to_string( + atomic_read(&sc->pm_state)), + sc->prevent_suspend_cnt); + + hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d", + sc->dev->power.runtime_status, + sc->dev->power.runtime_error, + sc->dev->power.disable_depth, + sc->dev->power.autosuspend_delay); + + hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u", + qdf_atomic_read(&sc->pm_stats.runtime_get), + qdf_atomic_read(&sc->pm_stats.runtime_put), + sc->pm_stats.request_resume); + + hif_nofl_debug("get put get-timestamp put-timestamp :DBGID_NAME"); + for (i = 0; i < RTPM_ID_MAX; i++) { + hif_nofl_debug("%-10d %-10d 0x%-10llx 0x%-10llx :%-30s", + qdf_atomic_read( + &sc->pm_stats.runtime_get_dbgid[i]), + qdf_atomic_read( + &sc->pm_stats.runtime_put_dbgid[i]), + sc->pm_stats.runtime_get_timestamp_dbgid[i], + sc->pm_stats.runtime_put_timestamp_dbgid[i], + rtpm_string_from_dbgid(i)); + } + + hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u", + qdf_atomic_read(&sc->pm_stats.allow_suspend), + qdf_atomic_read(&sc->pm_stats.prevent_suspend)); + + hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u", + sc->pm_stats.prevent_suspend_timeout, + sc->pm_stats.allow_suspend_timeout); + + hif_nofl_debug("Suspended: %u, resumed: %u count", + sc->pm_stats.suspended, + sc->pm_stats.resumed); + + hif_nofl_debug("suspend_err: %u, runtime_get_err: %u", + sc->pm_stats.suspend_err, + sc->pm_stats.runtime_get_err); + + hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: "); + + list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { + hif_nofl_debug("source %s; timeout %d ms", + ctx->name, ctx->timeout); + } + + if (qdf_is_fw_down()) { + hif_err("fw is down"); + return; + } + + QDF_ASSERT(0); +} + +/** + * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm + * @s: file to print to + * @data: unused + * + * debugging tool added to the debug fs for displaying runtimepm stats + * + * Return: 0 + */ +static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data) +{ + struct hif_pci_softc *sc = s->private; + static const char * const autopm_state[] = {"NONE", "ON", "RESUMING", + "SUSPENDING", "SUSPENDED"}; + unsigned int msecs_age; + qdf_time_t usecs_age; + int pm_state = atomic_read(&sc->pm_state); + unsigned long timer_expires; + struct hif_pm_runtime_lock *ctx; + int i; + + seq_printf(s, "%30s: %s\n", "Runtime PM state", + autopm_state[pm_state]); + seq_printf(s, "%30s: %pf\n", "Last Resume Caller", + sc->pm_stats.last_resume_caller); + seq_printf(s, "%30s: %pf\n", "Last Busy Marker", + sc->pm_stats.last_busy_marker); + + usecs_age = qdf_get_log_timestamp_usecs() - + sc->pm_stats.last_busy_timestamp; + seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp", + sc->pm_stats.last_busy_timestamp / 1000000, + sc->pm_stats.last_busy_timestamp % 1000000); + seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since", + usecs_age / 1000000, usecs_age % 1000000); + + if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) { + msecs_age = jiffies_to_msecs(jiffies - + sc->pm_stats.suspend_jiffies); + seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since", + msecs_age / 1000, msecs_age % 1000); + } + + seq_printf(s, "%30s: %d\n", "PM Usage count", + atomic_read(&sc->dev->power.usage_count)); + + seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt", + sc->prevent_suspend_cnt); + + HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended); + HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err); + HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed); + + HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume); + seq_printf(s, "%30s: %u\n", "prevent_suspend", + qdf_atomic_read(&sc->pm_stats.prevent_suspend)); + seq_printf(s, "%30s: %u\n", "allow_suspend", + qdf_atomic_read(&sc->pm_stats.allow_suspend)); + + HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout); + HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout); + HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err); + + seq_printf(s, "%30s: %u\n", "runtime_get", + qdf_atomic_read(&sc->pm_stats.runtime_get)); + seq_printf(s, "%30s: %u\n", "runtime_put", + qdf_atomic_read(&sc->pm_stats.runtime_put)); + seq_printf(s, "get put get-timestamp put-timestamp :DBGID_NAME\n"); + for (i = 0; i < RTPM_ID_MAX; i++) { + seq_printf(s, "%-10d ", + qdf_atomic_read(&sc->pm_stats.runtime_get_dbgid[i])); + seq_printf(s, "%-10d ", + qdf_atomic_read(&sc->pm_stats.runtime_put_dbgid[i])); + seq_printf(s, "0x%-10llx ", + sc->pm_stats.runtime_get_timestamp_dbgid[i]); + seq_printf(s, "0x%-10llx ", + sc->pm_stats.runtime_put_timestamp_dbgid[i]); + seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i)); + } + + timer_expires = sc->runtime_timer_expires; + if (timer_expires > 0) { + msecs_age = jiffies_to_msecs(timer_expires - jiffies); + seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout", + msecs_age / 1000, msecs_age % 1000); + } + + spin_lock_bh(&sc->runtime_lock); + if (list_empty(&sc->prevent_suspend_list)) { + spin_unlock_bh(&sc->runtime_lock); + return 0; + } + + seq_printf(s, "%30s: ", "Active Wakeup_Sources"); + list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { + seq_printf(s, "%s", ctx->name); + if (ctx->timeout) + seq_printf(s, "(%d ms)", ctx->timeout); + seq_puts(s, " "); + } + seq_puts(s, "\n"); + spin_unlock_bh(&sc->runtime_lock); + + return 0; +} +#undef HIF_PCI_RUNTIME_PM_STATS + +/** + * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats + * @inode + * @file + * + * Return: linux error code of single_open. + */ +static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file) +{ + return single_open(file, hif_pci_pm_runtime_debugfs_show, + inode->i_private); +} + +static const struct file_operations hif_pci_runtime_pm_fops = { + .owner = THIS_MODULE, + .open = hif_pci_runtime_pm_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +/** + * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry + * @sc: pci context + * + * creates a debugfs entry to debug the runtime pm feature. + */ +static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc) +{ + sc->pm_dentry = debugfs_create_file("cnss_runtime_pm", + 0400, NULL, sc, + &hif_pci_runtime_pm_fops); +} + +/** + * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry + * @sc: pci context + * + * removes the debugfs entry to debug the runtime pm feature. + */ +static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc) +{ + debugfs_remove(sc->pm_dentry); +} + +static void hif_runtime_init(struct device *dev, int delay) +{ + pm_runtime_set_autosuspend_delay(dev, delay); + pm_runtime_use_autosuspend(dev); + pm_runtime_allow(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_noidle(dev); + pm_suspend_ignore_children(dev, true); +} + +static void hif_runtime_exit(struct device *dev) +{ + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); +} + +static void hif_pm_runtime_lock_timeout_fn(void *data); + +/** + * hif_pm_runtime_start(): start the runtime pm + * @sc: pci context + * + * After this call, runtime pm will be active. + */ +static void hif_pm_runtime_start(struct hif_pci_softc *sc) +{ + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + uint32_t mode = hif_get_conparam(ol_sc); + + if (!ol_sc->hif_config.enable_runtime_pm) { + HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__); + return; + } + + if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || + mode == QDF_GLOBAL_MONITOR_MODE) { + HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n", + __func__); + return; + } + + qdf_timer_init(NULL, &sc->runtime_timer, + hif_pm_runtime_lock_timeout_fn, + sc, QDF_TIMER_TYPE_WAKE_APPS); + + HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__, + ol_sc->hif_config.runtime_pm_delay); + + qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON); + hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay); + hif_runtime_pm_debugfs_create(sc); +} + +/** + * hif_pm_runtime_stop(): stop runtime pm + * @sc: pci context + * + * Turns off runtime pm and frees corresponding resources + * that were acquired by hif_runtime_pm_start(). + */ +static void hif_pm_runtime_stop(struct hif_pci_softc *sc) +{ + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + uint32_t mode = hif_get_conparam(ol_sc); + + if (!ol_sc->hif_config.enable_runtime_pm) + return; + + if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) || + mode == QDF_GLOBAL_MONITOR_MODE) + return; + + hif_runtime_exit(sc->dev); + + hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(sc)); + + qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); + + hif_runtime_pm_debugfs_remove(sc); + qdf_timer_free(&sc->runtime_timer); + /* doesn't wait for penting trafic unlike cld-2.0 */ +} + +/** + * hif_pm_runtime_open(): initialize runtime pm + * @sc: pci data structure + * + * Early initialization + */ +static void hif_pm_runtime_open(struct hif_pci_softc *sc) +{ + int i; + spin_lock_init(&sc->runtime_lock); + + qdf_atomic_init(&sc->pm_state); + qdf_runtime_lock_init(&sc->prevent_linkdown_lock); + qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); + qdf_atomic_init(&sc->pm_stats.runtime_get); + qdf_atomic_init(&sc->pm_stats.runtime_put); + qdf_atomic_init(&sc->pm_stats.allow_suspend); + qdf_atomic_init(&sc->pm_stats.prevent_suspend); + for (i = 0; i < RTPM_ID_MAX; i++) { + qdf_atomic_init(&sc->pm_stats.runtime_get_dbgid[i]); + qdf_atomic_init(&sc->pm_stats.runtime_put_dbgid[i]); + } + INIT_LIST_HEAD(&sc->prevent_suspend_list); +} + +/** + * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state + * @sc: pci context + * + * Ensure we have only one vote against runtime suspend before closing + * the runtime suspend feature. + * + * all gets by the wlan driver should have been returned + * one vote should remain as part of cnss_runtime_exit + * + * needs to be revisited if we share the root complex. + */ +static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc) +{ + struct hif_pm_runtime_lock *ctx, *tmp; + + if (atomic_read(&sc->dev->power.usage_count) != 1) + hif_pci_runtime_pm_warn(sc, "Driver UnLoaded"); + else + return; + + spin_lock_bh(&sc->runtime_lock); + list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { + spin_unlock_bh(&sc->runtime_lock); + hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx); + spin_lock_bh(&sc->runtime_lock); + } + spin_unlock_bh(&sc->runtime_lock); + + /* ensure 1 and only 1 usage count so that when the wlan + * driver is re-insmodded runtime pm won't be + * disabled also ensures runtime pm doesn't get + * broken on by being less than 1. + */ + if (atomic_read(&sc->dev->power.usage_count) <= 0) + atomic_set(&sc->dev->power.usage_count, 1); + while (atomic_read(&sc->dev->power.usage_count) > 1) + hif_pm_runtime_put_auto(sc->dev); +} + +static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, + struct hif_pm_runtime_lock *lock); + +/** + * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR + * @sc: PCIe Context + * + * API is used to empty the runtime pm prevent suspend list. + * + * Return: void + */ +static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc) +{ + struct hif_pm_runtime_lock *ctx, *tmp; + + spin_lock_bh(&sc->runtime_lock); + list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { + __hif_pm_runtime_allow_suspend(sc, ctx); + } + spin_unlock_bh(&sc->runtime_lock); +} + +/** + * hif_pm_runtime_close(): close runtime pm + * @sc: pci bus handle + * + * ensure runtime_pm is stopped before closing the driver + */ +static void hif_pm_runtime_close(struct hif_pci_softc *sc) +{ + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock); + if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE) + return; + + hif_pm_runtime_stop(sc); + + hif_is_recovery_in_progress(scn) ? + hif_pm_runtime_sanitize_on_ssr_exit(sc) : + hif_pm_runtime_sanitize_on_exit(sc); +} + +int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int pm_state; + + if (!sc) + return -EINVAL; + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + pm_state = qdf_atomic_read(&sc->pm_state); + if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || + pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) + HIF_INFO("Runtime PM resume is requested by %ps", + (void *)_RET_IP_); + + sc->pm_stats.request_resume++; + sc->pm_stats.last_resume_caller = (void *)_RET_IP_; + + return pm_runtime_resume(sc->dev); +} +#else +static void hif_pm_runtime_close(struct hif_pci_softc *sc) {} +static void hif_pm_runtime_open(struct hif_pci_softc *sc) {} +static void hif_pm_runtime_start(struct hif_pci_softc *sc) {} +static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {} +#endif + +/** + * hif_disable_power_gating() - disable HW power gating + * @hif_ctx: hif context + * + * disables pcie L1 power states + */ +static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!scn) { + HIF_ERROR("%s: Could not disable ASPM scn is null", + __func__); + return; + } + + /* Disable ASPM when pkt log is enabled */ + pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val); + pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00)); +} + +/** + * hif_enable_power_gating() - enable HW power gating + * @hif_ctx: hif context + * + * enables pcie L1 power states + */ +static void hif_enable_power_gating(struct hif_pci_softc *sc) +{ + if (!sc) { + HIF_ERROR("%s: Could not disable ASPM scn is null", + __func__); + return; + } + + /* Re-enable ASPM after firmware/OTP download is complete */ + pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val); +} + +/** + * hif_enable_power_management() - enable power management + * @hif_ctx: hif context + * + * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling + * soc-sleep after driver load (hif_pci_target_sleep_state_adjust). + * + * note: epping mode does not call this function as it does not + * care about saving power. + */ +void hif_pci_enable_power_management(struct hif_softc *hif_sc, + bool is_packet_log_enabled) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc); + uint32_t mode; + + if (!pci_ctx) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + + mode = hif_get_conparam(hif_sc); + if (mode == QDF_GLOBAL_FTM_MODE) { + HIF_INFO("%s: Enable power gating for FTM mode", __func__); + hif_enable_power_gating(pci_ctx); + return; + } + + hif_pm_runtime_start(pci_ctx); + + if (!is_packet_log_enabled) + hif_enable_power_gating(pci_ctx); + + if (!CONFIG_ATH_PCIE_MAX_PERF && + CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD && + !ce_srng_based(hif_sc)) { + /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */ + if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0) + HIF_ERROR("%s, failed to set target to sleep", + __func__); + } +} + +/** + * hif_disable_power_management() - disable power management + * @hif_ctx: hif context + * + * Currently disables runtime pm. Should be updated to behave + * if runtime pm is not started. Should be updated to take care + * of aspm and soc sleep for driver load. + */ +void hif_pci_disable_power_management(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!pci_ctx) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + + hif_pm_runtime_stop(pci_ctx); +} + +void hif_pci_display_stats(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!pci_ctx) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_display_ce_stats(hif_ctx); + + hif_print_pci_stats(pci_ctx); +} + +void hif_pci_clear_stats(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!pci_ctx) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_clear_ce_stats(&pci_ctx->ce_sc); +} + +#define ATH_PCI_PROBE_RETRY_MAX 3 +/** + * hif_bus_open(): hif_bus_open + * @scn: scn + * @bus_type: bus type + * + * Return: n/a + */ +QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + hif_ctx->bus_type = bus_type; + hif_pm_runtime_open(sc); + + qdf_spinlock_create(&sc->irq_lock); + + return hif_ce_open(hif_ctx); +} + +/** + * hif_wake_target_cpu() - wake the target's cpu + * @scn: hif context + * + * Send an interrupt to the device to wake up the Target CPU + * so it has an opportunity to notice any changed state. + */ +static void hif_wake_target_cpu(struct hif_softc *scn) +{ + QDF_STATUS rv; + uint32_t core_ctrl; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + rv = hif_diag_read_access(hif_hdl, + SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, + &core_ctrl); + QDF_ASSERT(rv == QDF_STATUS_SUCCESS); + /* A_INUM_FIRMWARE interrupt to Target CPU */ + core_ctrl |= CORE_CTRL_CPU_INTR_MASK; + + rv = hif_diag_write_access(hif_hdl, + SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, + core_ctrl); + QDF_ASSERT(rv == QDF_STATUS_SUCCESS); +} + +/** + * soc_wake_reset() - allow the target to go to sleep + * @scn: hif_softc + * + * Clear the force wake register. This is done by + * hif_sleep_entry and cancel defered timer sleep. + */ +static void soc_wake_reset(struct hif_softc *scn) +{ + hif_write32_mb(scn, scn->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); +} + +/** + * hif_sleep_entry() - gate target sleep + * @arg: hif context + * + * This function is the callback for the sleep timer. + * Check if last force awake critical section was at least + * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was, + * allow the target to go to sleep and cancel the sleep timer. + * otherwise reschedule the sleep timer. + */ +static void hif_sleep_entry(void *arg) +{ + struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + uint32_t idle_ms; + + if (scn->recovery) + return; + + if (hif_is_driver_unloading(scn)) + return; + + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + if (hif_state->fake_sleep) { + idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() + - hif_state->sleep_ticks); + if (!hif_state->verified_awake && + idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) { + if (!qdf_atomic_read(&scn->link_suspended)) { + soc_wake_reset(scn); + hif_state->fake_sleep = false; + } + } else { + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_start(&hif_state->sleep_timer, + HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); + } + } + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); +} + +#define HIF_HIA_MAX_POLL_LOOP 1000000 +#define HIF_HIA_POLLING_DELAY_MS 10 + +#ifdef QCA_HIF_HIA_EXTND + +static void hif_set_hia_extnd(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + uint32_t target_type = tgt_info->target_type; + + HIF_TRACE("%s: E", __func__); + + if ((target_type == TARGET_TYPE_AR900B) || + target_type == TARGET_TYPE_QCA9984 || + target_type == TARGET_TYPE_QCA9888) { + /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec + * in RTC space + */ + tgt_info->target_revision + = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem + + CHIP_ID_ADDRESS)); + qdf_print("chip_id 0x%x chip_revision 0x%x", + target_type, tgt_info->target_revision); + } + + { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr = + host_interest_item_address(target_type, + offsetof(struct host_interest_s, hi_skip_clock_init)); + + if ((ar900b_20_targ_clk != -1) && + (frac != -1) && (intval != -1)) { + hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + qdf_print("\n Setting clk_override"); + flag2_value |= CLOCK_OVERRIDE; + + hif_diag_write_access(hif_hdl, flag2_targ_addr, + flag2_value); + qdf_print("\n CLOCK PLL val set %d", flag2_value); + } else { + qdf_print("\n CLOCK PLL skipped"); + } + } + + if (target_type == TARGET_TYPE_AR900B + || target_type == TARGET_TYPE_QCA9984 + || target_type == TARGET_TYPE_QCA9888) { + + /* for AR9980_2.0, 300 mhz clock is used, right now we assume + * this would be supplied through module parameters, + * if not supplied assumed default or same behavior as 1.0. + * Assume 1.0 clock can't be tuned, reset to defaults + */ + + qdf_print(KERN_INFO + "%s: setting the target pll frac %x intval %x", + __func__, frac, intval); + + /* do not touch frac, and int val, let them be default -1, + * if desired, host can supply these through module params + */ + if (frac != -1 || intval != -1) { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr; + + flag2_targ_addr = + host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_clock_info)); + hif_diag_read_access(hif_hdl, + flag2_targ_addr, &flag2_value); + qdf_print("\n ====> FRAC Val %x Address %x", frac, + flag2_value); + hif_diag_write_access(hif_hdl, flag2_value, frac); + qdf_print("\n INT Val %x Address %x", + intval, flag2_value + 4); + hif_diag_write_access(hif_hdl, + flag2_value + 4, intval); + } else { + qdf_print(KERN_INFO + "%s: no frac provided, skipping pre-configuring PLL", + __func__); + } + + /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */ + if ((target_type == TARGET_TYPE_AR900B) + && (tgt_info->target_revision == AR900B_REV_2) + && ar900b_20_targ_clk != -1) { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr; + + flag2_targ_addr + = host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_desired_cpu_speed_hz)); + hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x", + flag2_value); + hif_diag_write_access(hif_hdl, flag2_value, + ar900b_20_targ_clk/*300000000u*/); + } else if (target_type == TARGET_TYPE_QCA9888) { + uint32_t flag2_targ_addr; + + if (200000000u != qca9888_20_targ_clk) { + qca9888_20_targ_clk = 300000000u; + /* Setting the target clock speed to 300 mhz */ + } + + flag2_targ_addr + = host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_desired_cpu_speed_hz)); + hif_diag_write_access(hif_hdl, flag2_targ_addr, + qca9888_20_targ_clk); + } else { + qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL", + __func__); + } + } else { + if (frac != -1 || intval != -1) { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr = + host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_clock_info)); + hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + qdf_print("\n ====> FRAC Val %x Address %x", frac, + flag2_value); + hif_diag_write_access(hif_hdl, flag2_value, frac); + qdf_print("\n INT Val %x Address %x", intval, + flag2_value + 4); + hif_diag_write_access(hif_hdl, flag2_value + 4, + intval); + } + } +} + +#else + +static void hif_set_hia_extnd(struct hif_softc *scn) +{ +} + +#endif + +/** + * hif_set_hia() - fill out the host interest area + * @scn: hif context + * + * This is replaced by hif_wlan_enable for integrated targets. + * This fills out the host interest area. The firmware will + * process these memory addresses when it is first brought out + * of reset. + * + * Return: 0 for success. + */ +static int hif_set_hia(struct hif_softc *scn) +{ + QDF_STATUS rv; + uint32_t interconnect_targ_addr = 0; + uint32_t pcie_state_targ_addr = 0; + uint32_t pipe_cfg_targ_addr = 0; + uint32_t svc_to_pipe_map = 0; + uint32_t pcie_config_flags = 0; + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr = 0; +#ifdef QCA_WIFI_3_0 + uint32_t host_interest_area = 0; + uint8_t i; +#else + uint32_t ealloc_value = 0; + uint32_t ealloc_targ_addr = 0; + uint8_t banks_switched = 1; + uint32_t chip_id; +#endif + uint32_t pipe_cfg_addr; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + uint32_t target_type = tgt_info->target_type; + uint32_t target_ce_config_sz, target_service_to_ce_map_sz; + static struct CE_pipe_config *target_ce_config; + struct service_to_pipe *target_service_to_ce_map; + + HIF_TRACE("%s: E", __func__); + + hif_get_target_ce_config(scn, + &target_ce_config, &target_ce_config_sz, + &target_service_to_ce_map, + &target_service_to_ce_map_sz, + NULL, NULL); + + if (ADRASTEA_BU) + return QDF_STATUS_SUCCESS; + +#ifdef QCA_WIFI_3_0 + i = 0; + while (i < HIF_HIA_MAX_POLL_LOOP) { + host_interest_area = hif_read32_mb(scn, scn->mem + + A_SOC_CORE_SCRATCH_0_ADDRESS); + if ((host_interest_area & 0x01) == 0) { + qdf_mdelay(HIF_HIA_POLLING_DELAY_MS); + host_interest_area = 0; + i++; + if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) + HIF_ERROR("%s: poll timeout(%d)", __func__, i); + } else { + host_interest_area &= (~0x01); + hif_write32_mb(scn, scn->mem + 0x113014, 0); + break; + } + } + + if (i >= HIF_HIA_MAX_POLL_LOOP) { + HIF_ERROR("%s: hia polling timeout", __func__); + return -EIO; + } + + if (host_interest_area == 0) { + HIF_ERROR("%s: host_interest_area = 0", __func__); + return -EIO; + } + + interconnect_targ_addr = host_interest_area + + offsetof(struct host_interest_area_t, + hi_interconnect_state); + + flag2_targ_addr = host_interest_area + + offsetof(struct host_interest_area_t, hi_option_flag2); + +#else + interconnect_targ_addr = hif_hia_item_address(target_type, + offsetof(struct host_interest_s, hi_interconnect_state)); + ealloc_targ_addr = hif_hia_item_address(target_type, + offsetof(struct host_interest_s, hi_early_alloc)); + flag2_targ_addr = hif_hia_item_address(target_type, + offsetof(struct host_interest_s, hi_option_flag2)); +#endif + /* Supply Target-side CE configuration */ + rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr, + &pcie_state_targ_addr); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d", + __func__, interconnect_targ_addr, rv); + goto done; + } + if (pcie_state_targ_addr == 0) { + rv = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: pcie state addr is 0", __func__); + goto done; + } + pipe_cfg_addr = pcie_state_targ_addr + + offsetof(struct pcie_state_s, + pipe_cfg_addr); + rv = hif_diag_read_access(hif_hdl, + pipe_cfg_addr, + &pipe_cfg_targ_addr); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d", + __func__, pipe_cfg_addr, rv); + goto done; + } + if (pipe_cfg_targ_addr == 0) { + rv = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: pipe cfg addr is 0", __func__); + goto done; + } + + rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr, + (uint8_t *) target_ce_config, + target_ce_config_sz); + + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv); + goto done; + } + + rv = hif_diag_read_access(hif_hdl, + pcie_state_targ_addr + + offsetof(struct pcie_state_s, + svc_to_pipe_map), + &svc_to_pipe_map); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv); + goto done; + } + if (svc_to_pipe_map == 0) { + rv = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: svc_to_pipe map is 0", __func__); + goto done; + } + + rv = hif_diag_write_mem(hif_hdl, + svc_to_pipe_map, + (uint8_t *) target_service_to_ce_map, + target_service_to_ce_map_sz); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv); + goto done; + } + + rv = hif_diag_read_access(hif_hdl, + pcie_state_targ_addr + + offsetof(struct pcie_state_s, + config_flags), + &pcie_config_flags); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv); + goto done; + } +#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE) + pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1; +#else + pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; +#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */ + pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT; +#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE) + pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE; +#endif + rv = hif_diag_write_mem(hif_hdl, + pcie_state_targ_addr + + offsetof(struct pcie_state_s, + config_flags), + (uint8_t *) &pcie_config_flags, + sizeof(pcie_config_flags)); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv); + goto done; + } + +#ifndef QCA_WIFI_3_0 + /* configure early allocation */ + ealloc_targ_addr = hif_hia_item_address(target_type, + offsetof( + struct host_interest_s, + hi_early_alloc)); + + rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr, + &ealloc_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get early alloc val (%d)", __func__, rv); + goto done; + } + + /* 1 bank is switched to IRAM, except ROME 1.0 */ + ealloc_value |= + ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & + HI_EARLY_ALLOC_MAGIC_MASK); + + rv = hif_diag_read_access(hif_hdl, + CHIP_ID_ADDRESS | + RTC_SOC_BASE_ADDRESS, &chip_id); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get chip id val (%d)", __func__, rv); + goto done; + } + if (CHIP_ID_VERSION_GET(chip_id) == 0xD) { + tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id); + switch (CHIP_ID_REVISION_GET(chip_id)) { + case 0x2: /* ROME 1.3 */ + /* 2 banks are switched to IRAM */ + banks_switched = 2; + break; + case 0x4: /* ROME 2.1 */ + case 0x5: /* ROME 2.2 */ + banks_switched = 6; + break; + case 0x8: /* ROME 3.0 */ + case 0x9: /* ROME 3.1 */ + case 0xA: /* ROME 3.2 */ + banks_switched = 9; + break; + case 0x0: /* ROME 1.0 */ + case 0x1: /* ROME 1.1 */ + default: + /* 3 banks are switched to IRAM */ + banks_switched = 3; + break; + } + } + + ealloc_value |= + ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) + & HI_EARLY_ALLOC_IRAM_BANKS_MASK); + + rv = hif_diag_write_access(hif_hdl, + ealloc_targ_addr, + ealloc_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: set early alloc val (%d)", __func__, rv); + goto done; + } +#endif + if ((target_type == TARGET_TYPE_AR900B) + || (target_type == TARGET_TYPE_QCA9984) + || (target_type == TARGET_TYPE_QCA9888) + || (target_type == TARGET_TYPE_AR9888)) { + hif_set_hia_extnd(scn); + } + + /* Tell Target to proceed with initialization */ + flag2_targ_addr = hif_hia_item_address(target_type, + offsetof( + struct host_interest_s, + hi_option_flag2)); + + rv = hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get option val (%d)", __func__, rv); + goto done; + } + + flag2_value |= HI_OPTION_EARLY_CFG_DONE; + rv = hif_diag_write_access(hif_hdl, flag2_targ_addr, + flag2_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: set option val (%d)", __func__, rv); + goto done; + } + + hif_wake_target_cpu(scn); + +done: + + return rv; +} + +/** + * hif_bus_configure() - configure the pcie bus + * @hif_sc: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_pci_bus_configure(struct hif_softc *hif_sc) +{ + int status = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc); + + hif_ce_prepare_config(hif_sc); + + /* initialize sleep state adjust variables */ + hif_state->sleep_timer_init = true; + hif_state->keep_awake_count = 0; + hif_state->fake_sleep = false; + hif_state->sleep_ticks = 0; + + qdf_timer_init(NULL, &hif_state->sleep_timer, + hif_sleep_entry, (void *)hif_state, + QDF_TIMER_TYPE_WAKE_APPS); + hif_state->sleep_timer_init = true; + + status = hif_wlan_enable(hif_sc); + if (status) { + HIF_ERROR("%s: hif_wlan_enable error = %d", + __func__, status); + goto timer_free; + } + + A_TARGET_ACCESS_LIKELY(hif_sc); + + if ((CONFIG_ATH_PCIE_MAX_PERF || + CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) && + !ce_srng_based(hif_sc)) { + /* + * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature + * prevent sleep when we want to keep firmware always awake + * note: when we want to keep firmware always awake, + * hif_target_sleep_state_adjust will point to a dummy + * function, and hif_pci_target_sleep_state_adjust must + * be called instead. + * note: bus type check is here because AHB bus is reusing + * hif_pci_bus_configure code. + */ + if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) { + if (hif_pci_target_sleep_state_adjust(hif_sc, + false, true) < 0) { + status = -EACCES; + goto disable_wlan; + } + } + } + + /* todo: consider replacing this with an srng field */ + if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || + (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || + (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && + (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) { + hif_sc->per_ce_irq = true; + } + + status = hif_config_ce(hif_sc); + if (status) + goto disable_wlan; + + if (hif_needs_bmi(hif_osc)) { + status = hif_set_hia(hif_sc); + if (status) + goto unconfig_ce; + + HIF_INFO_MED("%s: hif_set_hia done", __func__); + + } + + if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) || + (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) || + (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) && + (hif_sc->bus_type == QDF_BUS_TYPE_PCI)) + HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target", + __func__); + else { + status = hif_configure_irq(hif_sc); + if (status < 0) + goto unconfig_ce; + } + + A_TARGET_ACCESS_UNLIKELY(hif_sc); + + return status; + +unconfig_ce: + hif_unconfig_ce(hif_sc); +disable_wlan: + A_TARGET_ACCESS_UNLIKELY(hif_sc); + hif_wlan_disable(hif_sc); + +timer_free: + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_free(&hif_state->sleep_timer); + hif_state->sleep_timer_init = false; + + HIF_ERROR("%s: failed, status = %d", __func__, status); + return status; +} + +/** + * hif_bus_close(): hif_bus_close + * + * Return: n/a + */ +void hif_pci_close(struct hif_softc *hif_sc) +{ + struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc); + + hif_pm_runtime_close(hif_pci_sc); + hif_ce_close(hif_sc); +} + +#define BAR_NUM 0 + +static int hif_enable_pci_nopld(struct hif_pci_softc *sc, + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + void __iomem *mem; + int ret = 0; + uint16_t device_id = 0; + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + if (device_id != id->device) { + HIF_ERROR( + "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x", + __func__, device_id, id->device); + /* pci link is down, so returing with error code */ + return -EIO; + } + + /* FIXME: temp. commenting out assign_resource + * call for dev_attach to work on 2.6.38 kernel + */ +#if (!defined(__LINUX_ARM_ARCH__)) + if (pci_assign_resource(pdev, BAR_NUM)) { + HIF_ERROR("%s: pci_assign_resource error", __func__); + return -EIO; + } +#endif + if (pci_enable_device(pdev)) { + HIF_ERROR("%s: pci_enable_device error", + __func__); + return -EIO; + } + + /* Request MMIO resources */ + ret = pci_request_region(pdev, BAR_NUM, "ath"); + if (ret) { + HIF_ERROR("%s: PCI MMIO reservation error", __func__); + ret = -EIO; + goto err_region; + } + +#ifdef CONFIG_ARM_LPAE + /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask + * for 32 bits device also. + */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__); + goto err_dma; + } + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__); + goto err_dma; + } +#else + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__); + goto err_dma; + } + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!", + __func__); + goto err_dma; + } +#endif + + PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); + + /* Set bus master bit in PCI_COMMAND to enable DMA */ + pci_set_master(pdev); + + /* Arrange for access to Target SoC registers. */ + mem = pci_iomap(pdev, BAR_NUM, 0); + if (!mem) { + HIF_ERROR("%s: PCI iomap error", __func__); + ret = -EIO; + goto err_iomap; + } + + HIF_INFO("*****BAR is %pK\n", (void *)mem); + + sc->mem = mem; + + /* Hawkeye emulation specific change */ + if ((device_id == RUMIM2M_DEVICE_ID_NODE0) || + (device_id == RUMIM2M_DEVICE_ID_NODE1) || + (device_id == RUMIM2M_DEVICE_ID_NODE2) || + (device_id == RUMIM2M_DEVICE_ID_NODE3) || + (device_id == RUMIM2M_DEVICE_ID_NODE4) || + (device_id == RUMIM2M_DEVICE_ID_NODE5)) { + mem = mem + 0x0c000000; + sc->mem = mem; + HIF_INFO("%s: Changing PCI mem base to %pK\n", + __func__, sc->mem); + } + + sc->mem_len = pci_resource_len(pdev, BAR_NUM); + ol_sc->mem = mem; + ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM); + sc->pci_enabled = true; + return ret; + +err_iomap: + pci_clear_master(pdev); +err_dma: + pci_release_region(pdev, BAR_NUM); +err_region: + pci_disable_device(pdev); + return ret; +} + +static int hif_enable_pci_pld(struct hif_pci_softc *sc, + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); + sc->pci_enabled = true; + return 0; +} + + +static void hif_pci_deinit_nopld(struct hif_pci_softc *sc) +{ + pci_disable_msi(sc->pdev); + pci_iounmap(sc->pdev, sc->mem); + pci_clear_master(sc->pdev); + pci_release_region(sc->pdev, BAR_NUM); + pci_disable_device(sc->pdev); +} + +static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {} + +static void hif_disable_pci(struct hif_pci_softc *sc) +{ + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + + if (!ol_sc) { + HIF_ERROR("%s: ol_sc = NULL", __func__); + return; + } + hif_pci_device_reset(sc); + sc->hif_pci_deinit(sc); + + sc->mem = NULL; + ol_sc->mem = NULL; +} + +static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc) +{ + int ret = 0; + int targ_awake_limit = 500; +#ifndef QCA_WIFI_3_0 + uint32_t fw_indicator; +#endif + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + /* + * Verify that the Target was started cleanly.* + * The case where this is most likely is with an AUX-powered + * Target and a Host in WoW mode. If the Host crashes, + * loses power, or is restarted (without unloading the driver) + * then the Target is left (aux) powered and running. On a + * subsequent driver load, the Target is in an unexpected state. + * We try to catch that here in order to reset the Target and + * retry the probe. + */ + hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + while (!hif_targ_is_awake(scn, sc->mem)) { + if (0 == targ_awake_limit) { + HIF_ERROR("%s: target awake timeout", __func__); + ret = -EAGAIN; + goto end; + } + qdf_mdelay(1); + targ_awake_limit--; + } + +#if PCIE_BAR0_READY_CHECKING + { + int wait_limit = 200; + /* Synchronization point: wait the BAR0 is configured */ + while (wait_limit-- && + !(hif_read32_mb(sc, c->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_RDY_STATUS_ADDRESS) + & PCIE_SOC_RDY_STATUS_BAR_MASK)) { + qdf_mdelay(10); + } + if (wait_limit < 0) { + /* AR6320v1 doesn't support checking of BAR0 + * configuration, takes one sec to wait BAR0 ready + */ + HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0", + __func__); + } + } +#endif + +#ifndef QCA_WIFI_3_0 + fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS); + hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + + if (fw_indicator & FW_IND_INITIALIZED) { + HIF_ERROR("%s: Target is in an unknown state. EAGAIN", + __func__); + ret = -EAGAIN; + goto end; + } +#endif + +end: + return ret; +} + +static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc) +{ + int ret = 0; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + uint32_t target_type = scn->target_info.target_type; + + HIF_TRACE("%s: E", __func__); + + /* do notn support MSI or MSI IRQ failed */ + tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); + ret = request_irq(sc->pdev->irq, + hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED, + "wlan_pci", sc); + if (ret) { + HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret); + goto end; + } + scn->wake_irq = sc->pdev->irq; + /* Use sc->irq instead of sc->pdev-irq + * platform_device pdev doesn't have an irq field + */ + sc->irq = sc->pdev->irq; + /* Use Legacy PCI Interrupts */ + hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + + if ((target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_AR900B) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_AR9888) || + (target_type == TARGET_TYPE_QCA9888) || + (target_type == TARGET_TYPE_AR6320V1) || + (target_type == TARGET_TYPE_AR6320V2) || + (target_type == TARGET_TYPE_AR6320V3)) { + hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + } +end: + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s: X, ret = %d", __func__, ret); + return ret; +} + +static int hif_ce_srng_msi_free_irq(struct hif_softc *scn) +{ + int ret; + int ce_id, irq; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + struct CE_attr *host_ce_conf = ce_sc->host_ce_config; + + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + return ret; + + /* needs to match the ce_id -> irq data mapping + * used in the srng parameter configuration + */ + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + unsigned int msi_data; + + if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) + continue; + + if (!ce_sc->tasklets[ce_id].inited) + continue; + + msi_data = (ce_id % msi_data_count) + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + + hif_pci_ce_irq_remove_affinity_hint(irq); + + hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__, + ce_id, msi_data, irq); + + pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]); + } + + return ret; +} + +void hif_pci_deconfigure_grp_irq(struct hif_softc *scn) +{ + int i, j, irq; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + if (hif_ext_group->irq_requested) { + hif_ext_group->irq_requested = false; + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->os_irq[j]; + pfrm_free_irq(scn->qdf_dev->dev, + irq, hif_ext_group); + } + hif_ext_group->numirq = 0; + } + } +} + +#ifdef HIF_BUS_LOG_INFO +bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct hang_event_bus_info info = {0}; + size_t size; + + if (!sc) { + hif_err("HIF Bus Context is Invalid"); + return false; + } + + pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id); + + size = sizeof(info); + QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO, + size - QDF_HANG_EVENT_TLV_HDR_SIZE); + + if (*offset + size > QDF_WLAN_HANG_FW_OFFSET) + return false; + + qdf_mem_copy(data + *offset, &info, size); + *offset = *offset + size; + + if (info.dev_id == sc->devid) + return false; + + qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE); + qdf_get_bus_reg_dump(scn->qdf_dev->dev, data, + (QDF_WLAN_HANG_FW_OFFSET - size)); + return true; +} +#endif + +/** + * hif_nointrs(): disable IRQ + * + * This function stops interrupt(s) + * + * @scn: struct hif_softc + * + * Return: none + */ +void hif_pci_nointrs(struct hif_softc *scn) +{ + int i, ret; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); + + if (scn->request_irq_done == false) + return; + + hif_pci_deconfigure_grp_irq(scn); + + ret = hif_ce_srng_msi_free_irq(scn); + if (ret != -EINVAL) { + /* ce irqs freed in hif_ce_srng_msi_free_irq */ + + if (scn->wake_irq) + pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn); + scn->wake_irq = 0; + } else if (sc->num_msi_intrs > 0) { + /* MSI interrupt(s) */ + for (i = 0; i < sc->num_msi_intrs; i++) + free_irq(sc->irq + i, sc); + sc->num_msi_intrs = 0; + } else { + /* Legacy PCI line interrupt + * Use sc->irq instead of sc->pdev-irq + * platform_device pdev doesn't have an irq field + */ + free_irq(sc->irq, sc); + } + scn->request_irq_done = false; +} + +/** + * hif_disable_bus(): hif_disable_bus + * + * This function disables the bus + * + * @bdev: bus dev + * + * Return: none + */ +void hif_pci_disable_bus(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct pci_dev *pdev; + void __iomem *mem; + struct hif_target_info *tgt_info = &scn->target_info; + + /* Attach did not succeed, all resources have been + * freed in error handler + */ + if (!sc) + return; + + pdev = sc->pdev; + if (ADRASTEA_BU) { + hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); + + hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0); + hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS, + HOST_GROUP0_MASK); + } + +#if defined(CPU_WARM_RESET_WAR) + /* Currently CPU warm reset sequence is tested only for AR9888_REV2 + * Need to enable for AR9888_REV1 once CPU warm reset sequence is + * verified for AR9888_REV1 + */ + if ((tgt_info->target_version == AR9888_REV2_VERSION) || + (tgt_info->target_version == AR9887_REV1_VERSION)) + hif_pci_device_warm_reset(sc); + else + hif_pci_device_reset(sc); +#else + hif_pci_device_reset(sc); +#endif + mem = (void __iomem *)sc->mem; + if (mem) { + hif_dump_pipe_debug_count(scn); + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + sc->hif_pci_deinit(sc); + scn->mem = NULL; + } + HIF_INFO("%s: X", __func__); +} + +#define OL_ATH_PCI_PM_CONTROL 0x44 + +#ifdef FEATURE_RUNTIME_PM +/** + * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring + * @scn: hif context + * @flag: prevent linkdown if true otherwise allow + * + * this api should only be called as part of bus prevent linkdown + */ +static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + if (flag) + qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock); + else + qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock); +} +#else +static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) +{ +} +#endif + +#if defined(CONFIG_PCI_MSM) +/** + * hif_bus_prevent_linkdown(): allow or permit linkdown + * @flag: true prevents linkdown, false allows + * + * Calls into the platform driver to vote against taking down the + * pcie link. + * + * Return: n/a + */ +void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + int errno; + + HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable"); + hif_runtime_prevent_linkdown(scn, flag); + + errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag); + if (errno) + HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d", + __func__, errno); +} +#else +void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable")); + hif_runtime_prevent_linkdown(scn, flag); +} +#endif + +/** + * hif_pci_bus_suspend(): prepare hif for suspend + * + * Return: Errno + */ +int hif_pci_bus_suspend(struct hif_softc *scn) +{ + QDF_STATUS ret; + + hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn)); + + ret = hif_try_complete_tasks(scn); + if (QDF_IS_STATUS_ERROR(ret)) { + hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); + return -EBUSY; + } + + /* Stop the HIF Sleep Timer */ + hif_cancel_deferred_target_sleep(scn); + + return 0; +} + +#ifdef PCI_LINK_STATUS_SANITY +/** + * __hif_check_link_status() - API to check if PCIe link is active/not + * @scn: HIF Context + * + * API reads the PCIe config space to verify if PCIe link training is + * successful or not. + * + * Return: Success/Failure + */ +static int __hif_check_link_status(struct hif_softc *scn) +{ + uint16_t dev_id = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (!sc) { + HIF_ERROR("%s: HIF Bus Context is Invalid", __func__); + return -EINVAL; + } + + pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id); + + if (dev_id == sc->devid) + return 0; + + HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x", + __func__, dev_id); + + scn->recovery = true; + + if (cbk && cbk->set_recovery_in_progress) + cbk->set_recovery_in_progress(cbk->context, true); + else + HIF_ERROR("%s: Driver Global Recovery is not set", __func__); + + pld_is_pci_link_down(sc->dev); + return -EACCES; +} +#else +static inline int __hif_check_link_status(struct hif_softc *scn) +{ + return 0; +} +#endif + + +/** + * hif_pci_bus_resume(): prepare hif for resume + * + * Return: Errno + */ +int hif_pci_bus_resume(struct hif_softc *scn) +{ + int errno; + + errno = __hif_check_link_status(scn); + if (errno) + return errno; + + hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn)); + + return 0; +} + +/** + * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_pci_bus_suspend_noirq(struct hif_softc *scn) +{ + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + qdf_atomic_set(&scn->link_suspended, 1); + + hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn)); + + return 0; +} + +/** + * hif_pci_bus_resume_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_pci_bus_resume_noirq(struct hif_softc *scn) +{ + hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn)); + + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + qdf_atomic_set(&scn->link_suspended, 0); + + return 0; +} + +#ifdef FEATURE_RUNTIME_PM +/** + * __hif_runtime_pm_set_state(): utility function + * @state: state to set + * + * indexes into the runtime pm state and sets it. + */ +static void __hif_runtime_pm_set_state(struct hif_softc *scn, + enum hif_pm_runtime_state state) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + if (!sc) { + HIF_ERROR("%s: HIF_CTX not initialized", + __func__); + return; + } + + qdf_atomic_set(&sc->pm_state, state); +} + +/** + * hif_runtime_pm_set_state_on(): adjust runtime pm state + * + * Notify hif that a the runtime pm state should be on + */ +static void hif_runtime_pm_set_state_on(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON); +} + +/** + * hif_runtime_pm_set_state_resuming(): adjust runtime pm state + * + * Notify hif that a runtime pm resuming has started + */ +static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING); +} + +/** + * hif_runtime_pm_set_state_suspending(): adjust runtime pm state + * + * Notify hif that a runtime pm suspend has started + */ +static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING); +} + +/** + * hif_runtime_pm_set_state_suspended(): adjust runtime pm state + * + * Notify hif that a runtime suspend attempt has been completed successfully + */ +static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED); +} + +/** + * hif_log_runtime_suspend_success() - log a successful runtime suspend + */ +static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return; + + sc->pm_stats.suspended++; + sc->pm_stats.suspend_jiffies = jiffies; +} + +/** + * hif_log_runtime_suspend_failure() - log a failed runtime suspend + * + * log a failed runtime suspend + * mark last busy to prevent immediate runtime suspend + */ +static void hif_log_runtime_suspend_failure(void *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return; + + sc->pm_stats.suspend_err++; +} + +/** + * hif_log_runtime_resume_success() - log a successful runtime resume + * + * log a successful runtime resume + * mark last busy to prevent immediate runtime suspend + */ +static void hif_log_runtime_resume_success(void *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return; + + sc->pm_stats.resumed++; +} + +/** + * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure + * + * Record the failure. + * mark last busy to delay a retry. + * adjust the runtime_pm state. + */ +void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_log_runtime_suspend_failure(hif_ctx); + hif_pm_runtime_mark_last_busy(hif_ctx); + hif_runtime_pm_set_state_on(scn); +} + +/** + * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend + * + * Makes sure that the pci link will be taken down by the suspend opperation. + * If the hif layer is configured to leave the bus on, runtime suspend will + * not save any power. + * + * Set the runtime suspend state to in progress. + * + * return -EINVAL if the bus won't go down. otherwise return 0 + */ +int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!hif_can_suspend_link(hif_ctx)) { + HIF_ERROR("Runtime PM not supported for link up suspend"); + return -EINVAL; + } + + hif_runtime_pm_set_state_suspending(scn); + return 0; +} + +/** + * hif_process_runtime_suspend_success() - bookkeeping of suspend success + * + * Record the success. + * adjust the runtime_pm state + */ +void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_runtime_pm_set_state_suspended(scn); + hif_log_runtime_suspend_success(scn); +} + +/** + * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume + * + * update the runtime pm state. + */ +void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); + hif_runtime_pm_set_state_resuming(scn); +} + +/** + * hif_process_runtime_resume_success() - bookkeeping after a runtime resume + * + * record the success. + * adjust the runtime_pm state + */ +void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_log_runtime_resume_success(hif_ctx); + hif_pm_runtime_mark_last_busy(hif_ctx); + hif_runtime_pm_set_state_on(scn); +} + +/** + * hif_runtime_suspend() - do the bus suspend part of a runtime suspend + * + * Return: 0 for success and non-zero error code for failure + */ +int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int errno; + + errno = hif_bus_suspend(hif_ctx); + if (errno) { + HIF_ERROR("%s: failed bus suspend: %d", __func__, errno); + return errno; + } + + hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1); + + errno = hif_bus_suspend_noirq(hif_ctx); + if (errno) { + HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno); + hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0); + goto bus_resume; + } + + qdf_atomic_set(&sc->pm_dp_rx_busy, 0); + + return 0; + +bus_resume: + QDF_BUG(!hif_bus_resume(hif_ctx)); + + return errno; +} + +/** + * hif_fastpath_resume() - resume fastpath for runtimepm + * + * ensure that the fastpath write index register is up to date + * since runtime pm may cause ce_send_fast to skip the register + * write. + * + * fastpath only applicable to legacy copy engine + */ +void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct CE_state *ce_state; + + if (!scn) + return; + + if (scn->fastpath_mode_on) { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG]; + qdf_spin_lock_bh(&ce_state->ce_index_lock); + + /*war_ce_src_ring_write_idx_set */ + CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, + ce_state->src_ring->write_index); + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + Q_TARGET_ACCESS_END(scn); + } +} + +/** + * hif_runtime_resume() - do the bus resume part of a runtime resume + * + * Return: 0 for success and non-zero error code for failure + */ +int hif_runtime_resume(struct hif_opaque_softc *hif_ctx) +{ + int errno; + + QDF_BUG(!hif_bus_resume_noirq(hif_ctx)); + errno = hif_bus_resume(hif_ctx); + if (errno) + HIF_ERROR("%s: failed runtime resume: %d", __func__, errno); + + return errno; +} +#endif /* #ifdef FEATURE_RUNTIME_PM */ + +#if CONFIG_PCIE_64BIT_MSI +static void hif_free_msi_ctx(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = scn->hif_sc; + struct hif_msi_info *info = &sc->msi_info; + struct device *dev = scn->qdf_dev->dev; + + OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma, + OS_GET_DMA_MEM_CONTEXT(scn, dmacontext)); + info->magic = NULL; + info->magic_dma = 0; +} +#else +static void hif_free_msi_ctx(struct hif_softc *scn) +{ +} +#endif + +void hif_pci_disable_isr(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + hif_free_msi_ctx(scn); + /* Cancel the pending tasklet */ + ce_tasklet_kill(scn); + tasklet_kill(&sc->intr_tq); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +/* Function to reset SoC */ +void hif_pci_reset_soc(struct hif_softc *hif_sc) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); + struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc); + struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc); + +#if defined(CPU_WARM_RESET_WAR) + /* Currently CPU warm reset sequence is tested only for AR9888_REV2 + * Need to enable for AR9888_REV1 once CPU warm reset sequence is + * verified for AR9888_REV1 + */ + if (tgt_info->target_version == AR9888_REV2_VERSION) + hif_pci_device_warm_reset(sc); + else + hif_pci_device_reset(sc); +#else + hif_pci_device_reset(sc); +#endif +} + +#ifdef CONFIG_PCI_MSM +static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) +{ + msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0); + msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0); +} +#else +static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {}; +#endif + +/** + * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info + * @sc: HIF PCIe Context + * + * API to log PCIe Config space and SOC info when SOC wakeup timeout happens + * + * Return: Failure to caller + */ +static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc) +{ + uint16_t val = 0; + uint32_t bar = 0; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc); + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc); + struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl); + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + A_target_id_t pci_addr = scn->mem; + + HIF_ERROR("%s: keep_awake_count = %d", + __func__, hif_state->keep_awake_count); + + pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); + + HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val); + + pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); + + HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val); + + pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val); + + HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val); + + pfrm_read_config_word(sc->pdev, PCI_STATUS, &val); + + HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val); + + pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar); + + HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar); + + HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__, + hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + + HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__, + hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS)); + + HIF_ERROR("%s:error, wakeup target", __func__); + hif_msm_pcie_debug_info(sc); + + if (!cfg->enable_self_recovery) + QDF_BUG(0); + + scn->recovery = true; + + if (cbk->set_recovery_in_progress) + cbk->set_recovery_in_progress(cbk->context, true); + + pld_is_pci_link_down(sc->dev); + return -EACCES; +} + +/* + * For now, we use simple on-demand sleep/wake. + * Some possible improvements: + * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay + * (or perhaps spin/delay for a short while, then convert to sleep/interrupt) + * Careful, though, these functions may be used by + * interrupt handlers ("atomic") + * -Don't use host_reg_table for this code; instead use values directly + * -Use a separate timer to track activity and allow Target to sleep only + * if it hasn't done anything for a while; may even want to delay some + * processing for a short while in order to "batch" (e.g.) transmit + * requests with completion processing into "windows of up time". Costs + * some performance, but improves power utilization. + * -On some platforms, it might be possible to eliminate explicit + * sleep/wakeup. Instead, take a chance that each access works OK. If not, + * recover from the failure by forcing the Target awake. + * -Change keep_awake_count to an atomic_t in order to avoid spin lock + * overhead in some cases. Perhaps this makes more sense when + * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is + * disabled. + * -It is possible to compile this code out and simply force the Target + * to remain awake. That would yield optimal performance at the cost of + * increased power. See CONFIG_ATH_PCIE_MAX_PERF. + * + * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0). + */ +/** + * hif_target_sleep_state_adjust() - on-demand sleep/wake + * @scn: hif_softc pointer. + * @sleep_ok: bool + * @wait_for_it: bool + * + * Output the pipe error counts of each pipe to log file + * + * Return: int + */ +int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + A_target_id_t pci_addr = scn->mem; + static int max_delay; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + static int debug; + if (scn->recovery) + return -EACCES; + + if (qdf_atomic_read(&scn->link_suspended)) { + HIF_ERROR("%s:invalid access, PCIe link is down", __func__); + debug = true; + QDF_ASSERT(0); + return -EACCES; + } + + if (debug) { + wait_for_it = true; + HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended", + __func__); + QDF_ASSERT(0); + } + + if (sleep_ok) { + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + hif_state->keep_awake_count--; + if (hif_state->keep_awake_count == 0) { + /* Allow sleep */ + hif_state->verified_awake = false; + hif_state->sleep_ticks = qdf_system_ticks(); + } + if (hif_state->fake_sleep == false) { + /* Set the Fake Sleep */ + hif_state->fake_sleep = true; + + /* Start the Sleep Timer */ + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_start(&hif_state->sleep_timer, + HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); + } + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); + } else { + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + + if (hif_state->fake_sleep) { + hif_state->verified_awake = true; + } else { + if (hif_state->keep_awake_count == 0) { + /* Force AWAKE */ + hif_write32_mb(sc, pci_addr + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + } + } + hif_state->keep_awake_count++; + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); + + if (wait_for_it && !hif_state->verified_awake) { +#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */ + int tot_delay = 0; + int curr_delay = 5; + + for (;; ) { + if (hif_targ_is_awake(scn, pci_addr)) { + hif_state->verified_awake = true; + break; + } + if (!hif_pci_targ_is_present(scn, pci_addr)) + break; + if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT) + return hif_log_soc_wakeup_timeout(sc); + + OS_DELAY(curr_delay); + tot_delay += curr_delay; + + if (curr_delay < 50) + curr_delay += 5; + } + + /* + * NB: If Target has to come out of Deep Sleep, + * this may take a few Msecs. Typically, though + * this delay should be <30us. + */ + if (tot_delay > max_delay) + max_delay = tot_delay; + } + } + + if (debug && hif_state->verified_awake) { + debug = 0; + HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x", + __func__, + hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS), + hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CAUSE_ADDRESS), + hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + + CPU_INTR_ADDRESS), + hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CLR_ADDRESS), + hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); + } + + return 0; +} + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset) +{ + uint32_t value; + void *addr; + + addr = scn->mem + offset; + value = hif_read32_mb(scn, addr); + + { + unsigned long irq_flags; + int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; + + spin_lock_irqsave(&pcie_access_log_lock, irq_flags); + pcie_access_log[idx].seqnum = pcie_access_log_seqnum; + pcie_access_log[idx].is_write = false; + pcie_access_log[idx].addr = addr; + pcie_access_log[idx].value = value; + pcie_access_log_seqnum++; + spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); + } + + return value; +} + +void +hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value) +{ + void *addr; + + addr = scn->mem + (offset); + hif_write32_mb(scn, addr, value); + + { + unsigned long irq_flags; + int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; + + spin_lock_irqsave(&pcie_access_log_lock, irq_flags); + pcie_access_log[idx].seqnum = pcie_access_log_seqnum; + pcie_access_log[idx].is_write = true; + pcie_access_log[idx].addr = addr; + pcie_access_log[idx].value = value; + pcie_access_log_seqnum++; + spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); + } +} + +/** + * hif_target_dump_access_log() - dump access log + * + * dump access log + * + * Return: n/a + */ +void hif_target_dump_access_log(void) +{ + int idx, len, start_idx, cur_idx; + unsigned long irq_flags; + + spin_lock_irqsave(&pcie_access_log_lock, irq_flags); + if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) { + len = PCIE_ACCESS_LOG_NUM; + start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; + } else { + len = pcie_access_log_seqnum; + start_idx = 0; + } + + for (idx = 0; idx < len; idx++) { + cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM; + HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.", + __func__, idx, + pcie_access_log[cur_idx].seqnum, + pcie_access_log[cur_idx].is_write, + pcie_access_log[cur_idx].addr, + pcie_access_log[cur_idx].value); + } + + pcie_access_log_seqnum = 0; + spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); +} +#endif + +#ifndef HIF_AHB +int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) +{ + QDF_BUG(0); + return -EINVAL; +} + +int hif_ahb_configure_irq(struct hif_pci_softc *sc) +{ + QDF_BUG(0); + return -EINVAL; +} +#endif + +static irqreturn_t hif_ce_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); +} +extern const char *ce_name[]; + +static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + return pci_scn->ce_msi_irq_num[ce_id]; +} + +/* hif_srng_msi_irq_disable() - disable the irq for msi + * @hif_sc: hif context + * @ce_id: which ce to disable copy complete interrupts for + * + * since MSI interrupts are not level based, the system can function + * without disabling these interrupts. Interrupt mitigation can be + * added here for better system performance. + */ +static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) +{ + pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev, + hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) +{ + if (__hif_check_link_status(hif_sc)) + return; + + pfrm_enable_irq(hif_sc->qdf_dev->dev, + hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) +{ + disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) +{ + enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +static int hif_ce_msi_configure_irq(struct hif_softc *scn) +{ + int ret; + int ce_id, irq; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); + struct CE_attr *host_ce_conf = ce_sc->host_ce_config; + + if (!scn->disable_wake_irq) { + /* do wake irq assignment */ + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE", + &msi_data_count, + &msi_data_start, + &msi_irq_start); + if (ret) + return ret; + + scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, + msi_irq_start); + + ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq, + hif_wake_interrupt_handler, + IRQF_NO_SUSPEND, "wlan_wake_irq", scn); + + if (ret) + return ret; + } + + /* do ce irq assignments */ + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + goto free_wake_irq; + + if (ce_srng_based(scn)) { + scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable; + scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable; + } else { + scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable; + scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable; + } + + scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq; + + /* needs to match the ce_id -> irq data mapping + * used in the srng parameter configuration + */ + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + unsigned int msi_data = (ce_id % msi_data_count) + + msi_irq_start; + if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) + continue; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)", + __func__, ce_id, msi_data, irq, + &ce_sc->tasklets[ce_id]); + + /* implies the ce is also initialized */ + if (!ce_sc->tasklets[ce_id].inited) + continue; + + pci_sc->ce_msi_irq_num[ce_id] = irq; + ret = pfrm_request_irq(scn->qdf_dev->dev, + irq, hif_ce_interrupt_handler, + IRQF_SHARED, + ce_name[ce_id], + &ce_sc->tasklets[ce_id]); + if (ret) + goto free_irq; + } + + return ret; + +free_irq: + /* the request_irq for the last ce_id failed so skip it. */ + while (ce_id > 0 && ce_id < scn->ce_count) { + unsigned int msi_data; + + ce_id--; + msi_data = (ce_id % msi_data_count) + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + pfrm_free_irq(scn->qdf_dev->dev, + irq, &ce_sc->tasklets[ce_id]); + } + +free_wake_irq: + if (!scn->disable_wake_irq) { + pfrm_free_irq(scn->qdf_dev->dev, + scn->wake_irq, scn->qdf_dev->dev); + scn->wake_irq = 0; + } + + return ret; +} + +static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) +{ + int i; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + + for (i = 0; i < hif_ext_group->numirq; i++) + pfrm_disable_irq_nosync(scn->qdf_dev->dev, + hif_ext_group->os_irq[i]); +} + +static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) +{ + int i; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + + for (i = 0; i < hif_ext_group->numirq; i++) + pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]); +} + +/** + * hif_pci_get_irq_name() - get irqname + * This function gives irqnumber to irqname + * mapping. + * + * @irq_no: irq number + * + * Return: irq name + */ +const char *hif_pci_get_irq_name(int irq_no) +{ + return "pci-dummy"; +} + +#ifdef HIF_CPU_PERF_AFFINE_MASK +/** + * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity + * @hif_ext_group: hif_ext_group to extract the irq info + * + * This function will set the WLAN DP IRQ affinity to the gold + * cores only for defconfig builds + * + * @hif_ext_group: hif_ext_group to extract the irq info + * + * Return: none + */ +void hif_pci_irq_set_affinity_hint( + struct hif_exec_context *hif_ext_group) +{ + int i, ret; + unsigned int cpus; + bool mask_set = false; + + for (i = 0; i < hif_ext_group->numirq; i++) + qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]); + + for (i = 0; i < hif_ext_group->numirq; i++) { + qdf_for_each_online_cpu(cpus) { + if (qdf_topology_physical_package_id(cpus) == + CPU_CLUSTER_TYPE_PERF) { + qdf_cpumask_set_cpu(cpus, + &hif_ext_group-> + new_cpu_mask[i]); + mask_set = true; + } + } + } + for (i = 0; i < hif_ext_group->numirq; i++) { + if (mask_set) { + qdf_dev_modify_irq_status(hif_ext_group->os_irq[i], + IRQ_NO_BALANCING, 0); + ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i], + (struct qdf_cpu_mask *) + &hif_ext_group-> + new_cpu_mask[i]); + qdf_dev_modify_irq_status(hif_ext_group->os_irq[i], + 0, IRQ_NO_BALANCING); + if (ret) + qdf_err("Set affinity %*pbl fails for IRQ %d ", + qdf_cpumask_pr_args(&hif_ext_group-> + new_cpu_mask[i]), + hif_ext_group->os_irq[i]); + else + qdf_debug("Set affinity %*pbl for IRQ: %d", + qdf_cpumask_pr_args(&hif_ext_group-> + new_cpu_mask[i]), + hif_ext_group->os_irq[i]); + } else { + qdf_err("Offline CPU: Set affinity fails for IRQ: %d", + hif_ext_group->os_irq[i]); + } + } +} + +void hif_pci_ce_irq_set_affinity_hint( + struct hif_softc *scn) +{ + int ret; + unsigned int cpus; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); + struct CE_attr *host_ce_conf; + int ce_id; + qdf_cpu_mask ce_cpu_mask; + + host_ce_conf = ce_sc->host_ce_config; + qdf_cpumask_clear(&ce_cpu_mask); + + qdf_for_each_online_cpu(cpus) { + if (qdf_topology_physical_package_id(cpus) == + CPU_CLUSTER_TYPE_PERF) { + qdf_cpumask_set_cpu(cpus, + &ce_cpu_mask); + } else { + hif_err_rl("Unable to set cpu mask for offline CPU %d" + , cpus); + } + } + if (qdf_cpumask_empty(&ce_cpu_mask)) { + hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity"); + return; + } + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) + continue; + qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]); + qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id], + &ce_cpu_mask); + qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id], + IRQ_NO_BALANCING, 0); + ret = + qdf_dev_set_irq_affinity(pci_sc->ce_msi_irq_num[ce_id], + (struct qdf_cpu_mask *) + &pci_sc->ce_irq_cpu_mask[ce_id]); + qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id], + 0, IRQ_NO_BALANCING); + if (ret) + hif_err_rl("Set affinity %*pbl fails for CE IRQ %d", + qdf_cpumask_pr_args( + &pci_sc->ce_irq_cpu_mask[ce_id]), + pci_sc->ce_msi_irq_num[ce_id]); + else + hif_debug_rl("Set affinity %*pbl for CE IRQ: %d", + qdf_cpumask_pr_args( + &pci_sc->ce_irq_cpu_mask[ce_id]), + pci_sc->ce_msi_irq_num[ce_id]); + } +} +#endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */ + +void hif_pci_config_irq_affinity(struct hif_softc *scn) +{ + int i; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + + hif_core_ctl_set_boost(true); + /* Set IRQ affinity for WLAN DP interrupts*/ + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + hif_pci_irq_set_affinity_hint(hif_ext_group); + } + /* Set IRQ affinity for CE interrupts*/ + hif_pci_ce_irq_set_affinity_hint(scn); +} + +int hif_pci_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_group) +{ + int ret = 0; + int irq = 0; + int j; + + hif_ext_group->irq_enable = &hif_exec_grp_irq_enable; + hif_ext_group->irq_disable = &hif_exec_grp_irq_disable; + hif_ext_group->irq_name = &hif_pci_get_irq_name; + hif_ext_group->work_complete = &hif_dummy_grp_done; + + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->irq[j]; + + hif_debug("request_irq = %d for grp %d", + irq, hif_ext_group->grp_id); + ret = pfrm_request_irq( + scn->qdf_dev->dev, irq, + hif_ext_group_interrupt_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, + "wlan_EXT_GRP", + hif_ext_group); + if (ret) { + HIF_ERROR("%s: request_irq failed ret = %d", + __func__, ret); + return -EFAULT; + } + hif_ext_group->os_irq[j] = irq; + } + hif_ext_group->irq_requested = true; + return 0; +} + +/** + * hif_configure_irq() - configure interrupt + * + * This function configures interrupt(s) + * + * @sc: PCIe control struct + * @hif_hdl: struct HIF_CE_state + * + * Return: 0 - for success + */ +int hif_configure_irq(struct hif_softc *scn) +{ + int ret = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + HIF_TRACE("%s: E", __func__); + + if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) { + scn->request_irq_done = false; + return 0; + } + + hif_init_reschedule_tasklet_work(sc); + + ret = hif_ce_msi_configure_irq(scn); + if (ret == 0) { + goto end; + } + + switch (scn->target_info.target_type) { + case TARGET_TYPE_IPQ4019: + ret = hif_ahb_configure_legacy_irq(sc); + break; + case TARGET_TYPE_QCA8074: + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6018: + ret = hif_ahb_configure_irq(sc); + break; + default: + ret = hif_pci_configure_legacy_irq(sc); + break; + } + if (ret < 0) { + HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d", + __func__, ret); + return ret; + } +end: + scn->request_irq_done = true; + return 0; +} + +/** + * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0 + * @scn: hif control structure + * + * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift + * stuck at a polling loop in pcie_address_config in FW + * + * Return: none + */ +static void hif_trigger_timer_irq(struct hif_softc *scn) +{ + int tmp; + /* Trigger IRQ on Peregrine/Swift by setting + * IRQ Bit of LF_TIMER 0 + */ + tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + + SOC_LF_TIMER_STATUS0_ADDRESS)); + /* Set Raw IRQ Bit */ + tmp |= 1; + /* SOC_LF_TIMER_STATUS0 */ + hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS + + SOC_LF_TIMER_STATUS0_ADDRESS), tmp); +} + +/** + * hif_target_sync() : ensure the target is ready + * @scn: hif control structure + * + * Informs fw that we plan to use legacy interupts so that + * it can begin booting. Ensures that the fw finishes booting + * before continuing. Should be called before trying to write + * to the targets other registers for the first time. + * + * Return: none + */ +static void hif_target_sync(struct hif_softc *scn) +{ + hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + /* read to flush pcie write */ + (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + + hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + while (!hif_targ_is_awake(scn, scn->mem)) + ; + + if (HAS_FW_INDICATOR) { + int wait_limit = 500; + int fw_ind = 0; + int retry_count = 0; + uint32_t target_type = scn->target_info.target_type; +fw_retry: + HIF_TRACE("%s: Loop checking FW signal", __func__); + while (1) { + fw_ind = hif_read32_mb(scn, scn->mem + + FW_INDICATOR_ADDRESS); + if (fw_ind & FW_IND_INITIALIZED) + break; + if (wait_limit-- < 0) + break; + hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + /* read to flush pcie write */ + (void)hif_read32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)); + + qdf_mdelay(10); + } + if (wait_limit < 0) { + if (target_type == TARGET_TYPE_AR9888 && + retry_count++ < 2) { + hif_trigger_timer_irq(scn); + wait_limit = 500; + goto fw_retry; + } + HIF_TRACE("%s: FW signal timed out", + __func__); + qdf_assert_always(0); + } else { + HIF_TRACE("%s: Got FW signal, retries = %x", + __func__, 500-wait_limit); + } + } + hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); +} + +static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc, + struct device *dev) +{ + struct pld_soc_info info; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + pld_get_soc_info(dev, &info); + sc->mem = info.v_addr; + sc->ce_sc.ol_sc.mem = info.v_addr; + sc->ce_sc.ol_sc.mem_pa = info.p_addr; + scn->target_info.target_version = info.soc_id; + scn->target_info.target_revision = 0; +} + +static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc, + struct device *dev) +{} + +static bool hif_is_pld_based_target(struct hif_pci_softc *sc, + int device_id) +{ + if (!pld_have_platform_driver_support(sc->dev)) + return false; + + switch (device_id) { + case QCA6290_DEVICE_ID: + case QCN9000_DEVICE_ID: + case QCA6290_EMULATION_DEVICE_ID: + case QCA6390_DEVICE_ID: + case QCA6490_DEVICE_ID: + case AR6320_DEVICE_ID: + case QCN7605_DEVICE_ID: + return true; + } + return false; +} + +static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc, + int device_id) +{ + if (hif_is_pld_based_target(sc, device_id)) { + sc->hif_enable_pci = hif_enable_pci_pld; + sc->hif_pci_deinit = hif_pci_deinit_pld; + sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld; + } else { + sc->hif_enable_pci = hif_enable_pci_nopld; + sc->hif_pci_deinit = hif_pci_deinit_nopld; + sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld; + } +} + +#ifdef HIF_REG_WINDOW_SUPPORT +static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, + u32 target_type) +{ + switch (target_type) { + case TARGET_TYPE_QCN7605: + sc->use_register_windowing = true; + qdf_spinlock_create(&sc->register_access_lock); + sc->register_window = 0; + break; + default: + sc->use_register_windowing = false; + } +} +#else +static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc, + u32 target_type) +{ + sc->use_register_windowing = false; +} +#endif + +/** + * hif_enable_bus(): enable bus + * + * This function enables the bus + * + * @ol_sc: soft_sc struct + * @dev: device pointer + * @bdev: bus dev pointer + * bid: bus id pointer + * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE + * Return: QDF_STATUS + */ +QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + uint32_t hif_type; + uint32_t target_type = TARGET_TYPE_UNKNOWN; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc); + uint16_t revision_id = 0; + int probe_again = 0; + struct pci_dev *pdev = bdev; + const struct pci_device_id *id = (const struct pci_device_id *)bid; + struct hif_target_info *tgt_info; + + if (!ol_sc) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + return QDF_STATUS_E_NOMEM; + } + /* Following print is used by various tools to identify + * WLAN SOC (e.g. crash dump analysis and reporting tool). + */ + HIF_TRACE("%s: con_mode = 0x%x, WLAN_SOC_device_id = 0x%x", + __func__, hif_get_conparam(ol_sc), id->device); + + sc->pdev = pdev; + sc->dev = &pdev->dev; + sc->devid = id->device; + sc->cacheline_sz = dma_get_cache_alignment(); + tgt_info = hif_get_target_info_handle(hif_hdl); + hif_pci_init_deinit_ops_attach(sc, id->device); + sc->hif_pci_get_soc_info(sc, dev); +again: + ret = sc->hif_enable_pci(sc, pdev, id); + if (ret < 0) { + HIF_ERROR("%s: ERROR - hif_enable_pci error = %d", + __func__, ret); + goto err_enable_pci; + } + HIF_TRACE("%s: hif_enable_pci done", __func__); + + /* Temporary FIX: disable ASPM on peregrine. + * Will be removed after the OTP is programmed + */ + hif_disable_power_gating(hif_hdl); + + device_disable_async_suspend(&pdev->dev); + pfrm_read_config_word(pdev, 0x08, &revision_id); + + ret = hif_get_device_type(id->device, revision_id, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device id/revision_id", __func__); + goto err_tgtstate; + } + HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + + hif_pci_init_reg_windowing_support(sc, target_type); + + tgt_info->target_type = target_type; + + if (ce_srng_based(ol_sc)) { + HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__); + } else { + ret = hif_pci_probe_tgt_wakeup(sc); + if (ret < 0) { + HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d", + __func__, ret); + if (ret == -EAGAIN) + probe_again++; + goto err_tgtstate; + } + HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__); + } + + if (!ol_sc->mem_pa) { + HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__); + ret = -EIO; + goto err_tgtstate; + } + + if (!ce_srng_based(ol_sc)) { + hif_target_sync(ol_sc); + + if (ADRASTEA_BU) + hif_vote_link_up(hif_hdl); + } + + return 0; + +err_tgtstate: + hif_disable_pci(sc); + sc->pci_enabled = false; + HIF_ERROR("%s: error, hif_disable_pci done", __func__); + return QDF_STATUS_E_ABORTED; + +err_enable_pci: + if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) { + int delay_time; + + HIF_INFO("%s: pci reprobe", __func__); + /* 10, 40, 90, 100, 100, ... */ + delay_time = max(100, 10 * (probe_again * probe_again)); + qdf_mdelay(delay_time); + goto again; + } + return ret; +} + +/** + * hif_pci_irq_enable() - ce_irq_enable + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: void + */ +void hif_pci_irq_enable(struct hif_softc *scn, int ce_id) +{ + uint32_t tmp = 1 << ce_id; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + qdf_spin_lock_irqsave(&sc->irq_lock); + scn->ce_irq_summary &= ~tmp; + if (scn->ce_irq_summary == 0) { + /* Enable Legacy PCI line interrupts */ + if (LEGACY_INTERRUPTS(sc) && + (scn->target_status != TARGET_STATUS_RESET) && + (!qdf_atomic_read(&scn->link_suspended))) { + + hif_write32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + + hif_read32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + } + } + if (scn->hif_init_done == true) + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_irqrestore(&sc->irq_lock); + + /* check for missed firmware crash */ + hif_fw_interrupt_handler(0, scn); +} + +/** + * hif_pci_irq_disable() - ce_irq_disable + * @scn: hif_softc + * @ce_id: ce_id + * + * only applicable to legacy copy engine... + * + * Return: void + */ +void hif_pci_irq_disable(struct hif_softc *scn, int ce_id) +{ + /* For Rome only need to wake up target */ + /* target access is maintained until interrupts are re-enabled */ + Q_TARGET_ACCESS_BEGIN(scn); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * hif_pm_stats_runtime_get_record() - record runtime get statistics + * @sc: hif pci context + * @rtpm_dbgid: debug id to trace who use it + * + * + * Return: void + */ +static void hif_pm_stats_runtime_get_record(struct hif_pci_softc *sc, + wlan_rtpm_dbgid rtpm_dbgid) +{ + if (rtpm_dbgid >= RTPM_ID_MAX) { + QDF_BUG(0); + return; + } + qdf_atomic_inc(&sc->pm_stats.runtime_get); + qdf_atomic_inc(&sc->pm_stats.runtime_get_dbgid[rtpm_dbgid]); + sc->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] = + qdf_get_log_timestamp(); +} + +/** + * hif_pm_stats_runtime_put_record() - record runtime put statistics + * @sc: hif pci context + * @rtpm_dbgid: dbg_id to trace who use it + * + * + * Return: void + */ +static void hif_pm_stats_runtime_put_record(struct hif_pci_softc *sc, + wlan_rtpm_dbgid rtpm_dbgid) +{ + if (rtpm_dbgid >= RTPM_ID_MAX) { + QDF_BUG(0); + return; + } + + if (atomic_read(&sc->dev->power.usage_count) <= 0) { + QDF_BUG(0); + return; + } + + qdf_atomic_inc(&sc->pm_stats.runtime_put); + qdf_atomic_inc(&sc->pm_stats.runtime_put_dbgid[rtpm_dbgid]); + sc->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] = + qdf_get_log_timestamp(); +} + +/** + * hif_pm_runtime_get_sync() - do a get operation with sync resume + * @hif_ctx: pointer of HIF context + * @rtpm_dbgid: dbgid to trace who use it + * + * A get operation will prevent a runtime suspend until a corresponding + * put is done. Unlike hif_pm_runtime_get(), this API will do a sync + * resume instead of requesting a resume if it is runtime PM suspended + * so it can only be called in non-atomic context. + * + * Return: 0 if it is runtime PM resumed otherwise an error code. + */ +int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int pm_state; + int ret; + + if (!sc) + return -EINVAL; + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + pm_state = qdf_atomic_read(&sc->pm_state); + if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || + pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) + hif_info_high("Runtime PM resume is requested by %ps", + (void *)_RET_IP_); + + hif_pm_stats_runtime_get_record(sc, rtpm_dbgid); + ret = pm_runtime_get_sync(sc->dev); + + /* Get can return 1 if the device is already active, just return + * success in that case. + */ + if (ret > 0) + ret = 0; + + if (ret) { + sc->pm_stats.runtime_get_err++; + hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d", + qdf_atomic_read(&sc->pm_state), ret); + hif_pm_runtime_put(hif_ctx, rtpm_dbgid); + } + + return ret; +} + +/** + * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend + * @hif_ctx: pointer of HIF context + * @rtpm_dbgid: dbgid to trace who use it + * + * This API will do a runtime put operation followed by a sync suspend if usage + * count is 0 so it can only be called in non-atomic context. + * + * Return: 0 for success otherwise an error code + */ +int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int usage_count, pm_state; + char *err = NULL; + + if (!sc) + return -EINVAL; + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + usage_count = atomic_read(&sc->dev->power.usage_count); + if (usage_count == 1) { + pm_state = qdf_atomic_read(&sc->pm_state); + if (pm_state == HIF_PM_RUNTIME_STATE_NONE) + err = "Ignore unexpected Put as runtime PM is disabled"; + } else if (usage_count == 0) { + err = "Put without a Get Operation"; + } + + if (err) { + hif_pci_runtime_pm_warn(sc, err); + return -EINVAL; + } + + hif_pm_stats_runtime_put_record(sc, rtpm_dbgid); + return pm_runtime_put_sync_suspend(sc->dev); +} + +int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int pm_state; + + if (!sc) + return -EINVAL; + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + pm_state = qdf_atomic_read(&sc->pm_state); + if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || + pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) + HIF_INFO("Runtime PM resume is requested by %ps", + (void *)_RET_IP_); + + sc->pm_stats.request_resume++; + sc->pm_stats.last_resume_caller = (void *)_RET_IP_; + + return hif_pm_request_resume(sc->dev); +} + +void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return; + + sc->pm_stats.last_busy_marker = (void *)_RET_IP_; + sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs(); + + return pm_runtime_mark_last_busy(sc->dev); +} + +void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return; + + if (!hif_pci_pm_runtime_enabled(sc)) + return; + + hif_pm_stats_runtime_get_record(sc, rtpm_dbgid); + pm_runtime_get_noresume(sc->dev); +} + +/** + * hif_pm_runtime_get() - do a get opperation on the device + * @hif_ctx: pointer of HIF context + * @rtpm_dbgid: dbgid to trace who use it + * + * A get opperation will prevent a runtime suspend until a + * corresponding put is done. This api should be used when sending + * data. + * + * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, + * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! + * + * return: success if the bus is up and a get has been issued + * otherwise an error code. + */ +int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int ret; + int pm_state; + + if (!scn) { + hif_err("Could not do runtime get, scn is null"); + return -EFAULT; + } + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + pm_state = qdf_atomic_read(&sc->pm_state); + + if (pm_state == HIF_PM_RUNTIME_STATE_ON || + pm_state == HIF_PM_RUNTIME_STATE_NONE) { + hif_pm_stats_runtime_get_record(sc, rtpm_dbgid); + ret = __hif_pm_runtime_get(sc->dev); + + /* Get can return 1 if the device is already active, just return + * success in that case + */ + if (ret > 0) + ret = 0; + + if (ret) + hif_pm_runtime_put(hif_ctx, rtpm_dbgid); + + if (ret && ret != -EINPROGRESS) { + sc->pm_stats.runtime_get_err++; + hif_err("Runtime Get PM Error in pm_state:%d ret: %d", + qdf_atomic_read(&sc->pm_state), ret); + } + + return ret; + } + + if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED || + pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) { + hif_info_high("Runtime PM resume is requested by %ps", + (void *)_RET_IP_); + ret = -EAGAIN; + } else { + ret = -EBUSY; + } + + sc->pm_stats.request_resume++; + sc->pm_stats.last_resume_caller = (void *)_RET_IP_; + hif_pm_request_resume(sc->dev); + + return ret; +} + +/** + * hif_pm_runtime_put() - do a put operation on the device + * @hif_ctx: pointer of HIF context + * @rtpm_dbgid: dbgid to trace who use it + * + * A put operation will allow a runtime suspend after a corresponding + * get was done. This api should be used when sending data. + * + * This api will return a failure if runtime pm is stopped + * This api will return failure if it would decrement the usage count below 0. + * + * return: QDF_STATUS_SUCCESS if the put is performed + */ +int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int pm_state, usage_count; + char *error = NULL; + + if (!scn) { + HIF_ERROR("%s: Could not do runtime put, scn is null", + __func__); + return -EFAULT; + } + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + usage_count = atomic_read(&sc->dev->power.usage_count); + + if (usage_count == 1) { + pm_state = qdf_atomic_read(&sc->pm_state); + + if (pm_state == HIF_PM_RUNTIME_STATE_NONE) + error = "Ignoring unexpected put when runtime pm is disabled"; + + } else if (usage_count == 0) { + error = "PUT Without a Get Operation"; + } + + if (error) { + hif_pci_runtime_pm_warn(sc, error); + return -EINVAL; + } + + hif_pm_stats_runtime_put_record(sc, rtpm_dbgid); + + hif_pm_runtime_mark_last_busy(hif_ctx); + hif_pm_runtime_put_auto(sc->dev); + + return 0; +} + +/** + * hif_pm_runtime_put_noidle() - do a put operation with no idle + * @hif_ctx: pointer of HIF context + * @rtpm_dbgid: dbgid to trace who use it + * + * This API will do a runtime put no idle operation + * + * Return: 0 for success otherwise an error code + */ +int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx, + wlan_rtpm_dbgid rtpm_dbgid) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int usage_count, pm_state; + char *err = NULL; + + if (!sc) + return -EINVAL; + + if (!hif_pci_pm_runtime_enabled(sc)) + return 0; + + usage_count = atomic_read(&sc->dev->power.usage_count); + if (usage_count == 1) { + pm_state = qdf_atomic_read(&sc->pm_state); + if (pm_state == HIF_PM_RUNTIME_STATE_NONE) + err = "Ignore unexpected Put as runtime PM is disabled"; + } else if (usage_count == 0) { + err = "Put without a Get Operation"; + } + + if (err) { + hif_pci_runtime_pm_warn(sc, err); + return -EINVAL; + } + + hif_pm_stats_runtime_put_record(sc, rtpm_dbgid); + pm_runtime_put_noidle(sc->dev); + + return 0; +} + +/** + * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol + * reason + * @hif_sc: pci context + * @lock: runtime_pm lock being acquired + * + * Return 0 if successful. + */ +static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc + *hif_sc, struct hif_pm_runtime_lock *lock) +{ + int ret = 0; + + /* + * We shouldn't be setting context->timeout to zero here when + * context is active as we will have a case where Timeout API's + * for the same context called back to back. + * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm + * Set context->timeout to zero in hif_pm_runtime_prevent_suspend + * API to ensure the timeout version is no more active and + * list entry of this context will be deleted during allow suspend. + */ + if (lock->active) + return 0; + + ret = __hif_pm_runtime_get(hif_sc->dev); + + /** + * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or + * RPM_SUSPENDING. Any other negative value is an error. + * We shouldn't be do runtime_put here as in later point allow + * suspend gets called with the the context and there the usage count + * is decremented, so suspend will be prevented. + */ + + if (ret < 0 && ret != -EINPROGRESS) { + hif_sc->pm_stats.runtime_get_err++; + hif_pci_runtime_pm_warn(hif_sc, + "Prevent Suspend Runtime PM Error"); + } + + hif_sc->prevent_suspend_cnt++; + + lock->active = true; + + list_add_tail(&lock->list, &hif_sc->prevent_suspend_list); + + qdf_atomic_inc(&hif_sc->pm_stats.prevent_suspend); + + hif_debug("%s: in pm_state:%s ret: %d", __func__, + hif_pm_runtime_state_to_string( + qdf_atomic_read(&hif_sc->pm_state)), + ret); + + return ret; +} + +static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, + struct hif_pm_runtime_lock *lock) +{ + struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc); + int ret = 0; + int usage_count; + + if (hif_sc->prevent_suspend_cnt == 0) + return ret; + + if (!lock->active) + return ret; + + usage_count = atomic_read(&hif_sc->dev->power.usage_count); + + /* + * During Driver unload, platform driver increments the usage + * count to prevent any runtime suspend getting called. + * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the + * usage_count should be one. Ideally this shouldn't happen as + * context->active should be active for allow suspend to happen + * Handling this case here to prevent any failures. + */ + if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE + && usage_count == 1) || usage_count == 0) { + hif_pci_runtime_pm_warn(hif_sc, + "Allow without a prevent suspend"); + return -EINVAL; + } + + list_del(&lock->list); + + hif_sc->prevent_suspend_cnt--; + + lock->active = false; + lock->timeout = 0; + + hif_pm_runtime_mark_last_busy(hif_ctx); + ret = hif_pm_runtime_put_auto(hif_sc->dev); + + hif_debug("%s: in pm_state:%s ret: %d", __func__, + hif_pm_runtime_state_to_string( + qdf_atomic_read(&hif_sc->pm_state)), + ret); + + qdf_atomic_inc(&hif_sc->pm_stats.allow_suspend); + return ret; +} + +/** + * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout + * @data: calback data that is the pci context + * + * if runtime locks are acquired with a timeout, this function releases + * the locks when the last runtime lock expires. + * + * dummy implementation until lock acquisition is implemented. + */ +static void hif_pm_runtime_lock_timeout_fn(void *data) +{ + struct hif_pci_softc *hif_sc = data; + unsigned long timer_expires; + struct hif_pm_runtime_lock *context, *temp; + + spin_lock_bh(&hif_sc->runtime_lock); + + timer_expires = hif_sc->runtime_timer_expires; + + /* Make sure we are not called too early, this should take care of + * following case + * + * CPU0 CPU1 (timeout function) + * ---- ---------------------- + * spin_lock_irq + * timeout function called + * + * mod_timer() + * + * spin_unlock_irq + * spin_lock_irq + */ + if (timer_expires > 0 && !time_after(timer_expires, jiffies)) { + hif_sc->runtime_timer_expires = 0; + list_for_each_entry_safe(context, temp, + &hif_sc->prevent_suspend_list, list) { + if (context->timeout) { + __hif_pm_runtime_allow_suspend(hif_sc, context); + hif_sc->pm_stats.allow_suspend_timeout++; + } + } + } + + spin_unlock_bh(&hif_sc->runtime_lock); +} + +int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *data) +{ + struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); + struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); + struct hif_pm_runtime_lock *context = data; + + if (!sc->hif_config.enable_runtime_pm) + return 0; + + if (!context) + return -EINVAL; + + if (in_irq()) + WARN_ON(1); + + spin_lock_bh(&hif_sc->runtime_lock); + context->timeout = 0; + __hif_pm_runtime_prevent_suspend(hif_sc, context); + spin_unlock_bh(&hif_sc->runtime_lock); + + return 0; +} + +int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *data) +{ + struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); + struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); + struct hif_pm_runtime_lock *context = data; + + if (!sc->hif_config.enable_runtime_pm) + return 0; + + if (!context) + return -EINVAL; + + if (in_irq()) + WARN_ON(1); + + spin_lock_bh(&hif_sc->runtime_lock); + + __hif_pm_runtime_allow_suspend(hif_sc, context); + + /* The list can be empty as well in cases where + * we have one context in the list and the allow + * suspend came before the timer expires and we delete + * context above from the list. + * When list is empty prevent_suspend count will be zero. + */ + if (hif_sc->prevent_suspend_cnt == 0 && + hif_sc->runtime_timer_expires > 0) { + qdf_timer_free(&hif_sc->runtime_timer); + hif_sc->runtime_timer_expires = 0; + } + + spin_unlock_bh(&hif_sc->runtime_lock); + + return 0; +} + +/** + * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout + * @ol_sc: HIF context + * @lock: which lock is being acquired + * @delay: Timeout in milliseconds + * + * Prevent runtime suspend with a timeout after which runtime suspend would be + * allowed. This API uses a single timer to allow the suspend and timer is + * modified if the timeout is changed before timer fires. + * If the timeout is less than autosuspend_delay then use mark_last_busy instead + * of starting the timer. + * + * It is wise to try not to use this API and correct the design if possible. + * + * Return: 0 on success and negative error code on failure + */ +int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock, unsigned int delay) +{ + struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); + struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc); + + int ret = 0; + unsigned long expires; + struct hif_pm_runtime_lock *context = lock; + + if (hif_is_load_or_unload_in_progress(sc)) { + HIF_ERROR("%s: Load/unload in progress, ignore!", + __func__); + return -EINVAL; + } + + if (hif_is_recovery_in_progress(sc)) { + HIF_ERROR("%s: LOGP in progress, ignore!", __func__); + return -EINVAL; + } + + if (!sc->hif_config.enable_runtime_pm) + return 0; + + if (!context) + return -EINVAL; + + if (in_irq()) + WARN_ON(1); + + /* + * Don't use internal timer if the timeout is less than auto suspend + * delay. + */ + if (delay <= hif_sc->dev->power.autosuspend_delay) { + hif_pm_request_resume(hif_sc->dev); + hif_pm_runtime_mark_last_busy(ol_sc); + return ret; + } + + expires = jiffies + msecs_to_jiffies(delay); + expires += !expires; + + spin_lock_bh(&hif_sc->runtime_lock); + + context->timeout = delay; + ret = __hif_pm_runtime_prevent_suspend(hif_sc, context); + hif_sc->pm_stats.prevent_suspend_timeout++; + + /* Modify the timer only if new timeout is after already configured + * timeout + */ + if (time_after(expires, hif_sc->runtime_timer_expires)) { + qdf_timer_mod(&hif_sc->runtime_timer, delay); + hif_sc->runtime_timer_expires = expires; + } + + spin_unlock_bh(&hif_sc->runtime_lock); + + HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__, + hif_pm_runtime_state_to_string( + qdf_atomic_read(&hif_sc->pm_state)), + delay, ret); + + return ret; +} + +/** + * hif_runtime_lock_init() - API to initialize Runtime PM context + * @name: Context name + * + * This API initializes the Runtime PM context of the caller and + * return the pointer. + * + * Return: None + */ +int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) +{ + struct hif_pm_runtime_lock *context; + + HIF_INFO("Initializing Runtime PM wakelock %s", name); + + context = qdf_mem_malloc(sizeof(*context)); + if (!context) + return -ENOMEM; + + context->name = name ? name : "Default"; + lock->lock = context; + + return 0; +} + +/** + * hif_runtime_lock_deinit() - This API frees the runtime pm ctx + * @data: Runtime PM context + * + * Return: void + */ +void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, + struct hif_pm_runtime_lock *data) +{ + struct hif_pm_runtime_lock *context = data; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!context) { + HIF_ERROR("Runtime PM wakelock context is NULL"); + return; + } + + HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name); + + /* + * Ensure to delete the context list entry and reduce the usage count + * before freeing the context if context is active. + */ + if (sc) { + spin_lock_bh(&sc->runtime_lock); + __hif_pm_runtime_allow_suspend(sc, context); + spin_unlock_bh(&sc->runtime_lock); + } + + qdf_mem_free(context); +} + +/** + * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended + * @hif_ctx: HIF context + * + * Return: true for runtime suspended, otherwise false + */ +bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + return qdf_atomic_read(&sc->pm_state) == + HIF_PM_RUNTIME_STATE_SUSPENDED; +} + +/** + * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr + * @hif_ctx: HIF context + * + * monitor_wake_intr variable can be used to indicate if driver expects wake + * MSI for runtime PM + * + * Return: monitor_wake_intr variable + */ +int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + return qdf_atomic_read(&sc->monitor_wake_intr); +} + +/** + * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr + * @hif_ctx: HIF context + * @val: value to set + * + * monitor_wake_intr variable can be used to indicate if driver expects wake + * MSI for runtime PM + * + * Return: void + */ +void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, + int val) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + qdf_atomic_set(&sc->monitor_wake_intr, val); +} + +/** + * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path + * @hif_ctx: HIF context + * + * Return: void + */ +void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return; + + qdf_atomic_set(&sc->pm_dp_rx_busy, 1); + sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs(); + + hif_pm_runtime_mark_last_busy(hif_ctx); +} + +/** + * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx + * @hif_ctx: HIF context + * + * Return: dp rx busy set value + */ +int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return 0; + + return qdf_atomic_read(&sc->pm_dp_rx_busy); +} + +/** + * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp + * @hif_ctx: HIF context + * + * Return: timestamp of last mark busy by dp rx + */ +qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!sc) + return 0; + + return sc->dp_last_busy_timestamp; +} + +void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_handle); + + qdf_atomic_set(&scn->pm_link_state, val); +} + +uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_handle); + + return qdf_atomic_read(&scn->pm_link_state); +} +#endif /* FEATURE_RUNTIME_PM */ + +int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + /* legacy case only has one irq */ + return pci_scn->irq; +} + +int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct hif_target_info *tgt_info; + + tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn)); + + if (tgt_info->target_type == TARGET_TYPE_QCA6290 || + tgt_info->target_type == TARGET_TYPE_QCA6390 || + tgt_info->target_type == TARGET_TYPE_QCA6490 || + tgt_info->target_type == TARGET_TYPE_QCA8074) { + /* + * Need to consider offset's memtype for QCA6290/QCA8074, + * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be + * well initialized/defined. + */ + return 0; + } + + if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE) + || (offset + sizeof(unsigned int) <= sc->mem_len)) { + return 0; + } + + HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n", + offset, (uint32_t)(offset + sizeof(unsigned int)), + sc->mem_len); + + return -EINVAL; +} + +/** + * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_pci_needs_bmi(struct hif_softc *scn) +{ + return !ce_srng_based(scn); +} + +#ifdef FORCE_WAKE +#ifdef DEVICE_FORCE_WAKE_ENABLE +int hif_force_wake_request(struct hif_opaque_softc *hif_handle) +{ + uint32_t timeout, value; + struct hif_softc *scn = (struct hif_softc *)hif_handle; + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1); + + if (qdf_in_interrupt()) + timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000; + else + timeout = 0; + + if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) { + hif_err("force wake request send failed"); + HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1); + return -EINVAL; + } + + /* If device's M1 state-change event races here, it can be ignored, + * as the device is expected to immediately move from M2 to M0 + * without entering low power state. + */ + if (!pld_is_device_awake(scn->qdf_dev->dev)) + HIF_INFO("%s: state-change event races, ignore", __func__); + + HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1); + hif_write32_mb(scn, + scn->mem + + PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG, + 0); + hif_write32_mb(scn, + scn->mem + + PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, + 1); + + HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1); + /* + * do not reset the timeout + * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms + */ + timeout = 0; + do { + value = + hif_read32_mb(scn, + scn->mem + + PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG); + if (value) + break; + qdf_mdelay(FORCE_WAKE_DELAY_MS); + timeout += FORCE_WAKE_DELAY_MS; + } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS); + + if (!value) { + hif_err("failed handshake mechanism"); + HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1); + return -ETIMEDOUT; + } + + HIF_STATS_INC(pci_scn, soc_force_wake_success, 1); + return 0; +} + +int hif_force_wake_release(struct hif_opaque_softc *hif_handle) +{ + int ret; + struct hif_softc *scn = (struct hif_softc *)hif_handle; + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + ret = pld_force_wake_release(scn->qdf_dev->dev); + if (ret) { + hif_err("force wake release failure"); + HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1); + return ret; + } + + HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1); + hif_write32_mb(scn, + scn->mem + + PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG, + 0); + HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1); + return 0; +} + +#else /* DEVICE_FORCE_WAKE_ENABLE */ +/** hif_force_wake_request() - Disable the PCIE scratch register + * write/read + * + * Return: 0 + */ +int hif_force_wake_request(struct hif_opaque_softc *hif_handle) +{ + struct hif_softc *scn = (struct hif_softc *)hif_handle; + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + uint32_t timeout; + + HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1); + + if (qdf_in_interrupt()) + timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000; + else + timeout = 0; + + if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) { + hif_err("force wake request send failed"); + HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1); + return -EINVAL; + } + + /* If device's M1 state-change event races here, it can be ignored, + * as the device is expected to immediately move from M2 to M0 + * without entering low power state. + */ + if (!pld_is_device_awake(scn->qdf_dev->dev)) + HIF_INFO("%s: state-change event races, ignore", __func__); + + HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1); + + return 0; +} + +int hif_force_wake_release(struct hif_opaque_softc *hif_handle) +{ + int ret; + struct hif_softc *scn = (struct hif_softc *)hif_handle; + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + ret = pld_force_wake_release(scn->qdf_dev->dev); + if (ret) { + hif_err("force wake release failure"); + HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1); + return ret; + } + + HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1); + return 0; +} +#endif /* DEVICE_FORCE_WAKE_ENABLE */ + +void hif_print_pci_stats(struct hif_pci_softc *pci_handle) +{ + hif_debug("mhi_force_wake_request_vote: %d", + pci_handle->stats.mhi_force_wake_request_vote); + hif_debug("mhi_force_wake_failure: %d", + pci_handle->stats.mhi_force_wake_failure); + hif_debug("mhi_force_wake_success: %d", + pci_handle->stats.mhi_force_wake_success); + hif_debug("soc_force_wake_register_write_success: %d", + pci_handle->stats.soc_force_wake_register_write_success); + hif_debug("soc_force_wake_failure: %d", + pci_handle->stats.soc_force_wake_failure); + hif_debug("soc_force_wake_success: %d", + pci_handle->stats.soc_force_wake_success); + hif_debug("mhi_force_wake_release_failure: %d", + pci_handle->stats.mhi_force_wake_release_failure); + hif_debug("mhi_force_wake_release_success: %d", + pci_handle->stats.mhi_force_wake_release_success); + hif_debug("oc_force_wake_release_success: %d", + pci_handle->stats.soc_force_wake_release_success); +} +#endif /* FORCE_WAKE */ + +#ifdef FEATURE_HAL_DELAYED_REG_WRITE +int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif) +{ + return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev); +} + +void hif_allow_link_low_power_states(struct hif_opaque_softc *hif) +{ + pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.h b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..cdcb558eea1c29d22f6d3dabc00a4573a5eb06c9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __ATH_PCI_H__ +#define __ATH_PCI_H__ + +#include +#include +#include + +#define ATH_DBG_DEFAULT 0 +#define DRAM_SIZE 0x000a8000 +#include "hif.h" +#include "cepci.h" +#include "ce_main.h" + +#ifdef FORCE_WAKE +/* Register to wake the UMAC from power collapse */ +#define PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG (0x01E04000 + 0x40) +/* Register used for handshake mechanism to validate UMAC is awake */ +#define PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG (0x01E00000 + 0x3004) +/* Timeout duration to validate UMAC wake status */ +#ifdef HAL_CONFIG_SLUB_DEBUG_ON +#define FORCE_WAKE_DELAY_TIMEOUT_MS 500 +#else +#define FORCE_WAKE_DELAY_TIMEOUT_MS 50 +#endif /* HAL_CONFIG_SLUB_DEBUG_ON */ +/* Validate UMAC status every 5ms */ +#define FORCE_WAKE_DELAY_MS 5 +#endif /* FORCE_WAKE */ + +#ifdef QCA_HIF_HIA_EXTND +extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk; +#endif + +/* An address (e.g. of a buffer) in Copy Engine space. */ + +#define HIF_MAX_TASKLET_NUM 11 +struct hif_tasklet_entry { + uint8_t id; /* 0 - 9: maps to CE, 10: fw */ + void *hif_handler; /* struct hif_pci_softc */ +}; + +struct hang_event_bus_info { + uint16_t tlv_header; + uint16_t dev_id; +} qdf_packed; + +/** + * enum hif_pm_runtime_state - Driver States for Runtime Power Management + * HIF_PM_RUNTIME_STATE_NONE: runtime pm is off + * HIF_PM_RUNTIME_STATE_ON: runtime pm is active and link is active + * HIF_PM_RUNTIME_STATE_RESUMING: a runtime resume is in progress + * HIF_PM_RUNTIME_STATE_SUSPENDING: a runtime suspend is in progress + * HIF_PM_RUNTIME_STATE_SUSPENDED: the driver is runtime suspended + */ +enum hif_pm_runtime_state { + HIF_PM_RUNTIME_STATE_NONE, + HIF_PM_RUNTIME_STATE_ON, + HIF_PM_RUNTIME_STATE_RESUMING, + HIF_PM_RUNTIME_STATE_SUSPENDING, + HIF_PM_RUNTIME_STATE_SUSPENDED, +}; + +#ifdef FEATURE_RUNTIME_PM + +/** + * struct hif_pm_runtime_lock - data structure for preventing runtime suspend + * @list - global list of runtime locks + * @active - true if this lock is preventing suspend + * @name - character string for tracking this lock + */ +struct hif_pm_runtime_lock { + struct list_head list; + bool active; + uint32_t timeout; + const char *name; +}; + +/* Debugging stats for Runtime PM */ +struct hif_pci_pm_stats { + u32 suspended; + u32 suspend_err; + u32 resumed; + atomic_t runtime_get; + atomic_t runtime_put; + atomic_t runtime_get_dbgid[RTPM_ID_MAX]; + atomic_t runtime_put_dbgid[RTPM_ID_MAX]; + uint64_t runtime_get_timestamp_dbgid[RTPM_ID_MAX]; + uint64_t runtime_put_timestamp_dbgid[RTPM_ID_MAX]; + u32 request_resume; + atomic_t allow_suspend; + atomic_t prevent_suspend; + u32 prevent_suspend_timeout; + u32 allow_suspend_timeout; + u32 runtime_get_err; + void *last_resume_caller; + void *last_busy_marker; + qdf_time_t last_busy_timestamp; + unsigned long suspend_jiffies; +}; +#endif + +/** + * struct hif_msi_info - Structure to hold msi info + * @magic: cookie + * @magic_da: dma address + * @dmaContext: dma address + * + * Structure to hold MSI information for PCIe interrupts + */ +struct hif_msi_info { + void *magic; + dma_addr_t magic_da; + OS_DMA_MEM_CONTEXT(dmacontext); +}; + +/** + * struct hif_pci_stats - Account for hif pci based statistics + * @mhi_force_wake_request_vote: vote for mhi + * @mhi_force_wake_failure: mhi force wake failure + * @mhi_force_wake_success: mhi force wake success + * @soc_force_wake_register_write_success: write to soc wake + * @soc_force_wake_failure: soc force wake failure + * @soc_force_wake_success: soc force wake success + * @mhi_force_wake_release_success: mhi force wake release success + * @soc_force_wake_release_success: soc force wake release + */ +struct hif_pci_stats { + uint32_t mhi_force_wake_request_vote; + uint32_t mhi_force_wake_failure; + uint32_t mhi_force_wake_success; + uint32_t soc_force_wake_register_write_success; + uint32_t soc_force_wake_failure; + uint32_t soc_force_wake_success; + uint32_t mhi_force_wake_release_failure; + uint32_t mhi_force_wake_release_success; + uint32_t soc_force_wake_release_success; +}; + +struct hif_pci_softc { + struct HIF_CE_state ce_sc; + void __iomem *mem; /* PCI address. */ + size_t mem_len; + + struct device *dev; /* For efficiency, should be first in struct */ + struct pci_dev *pdev; + int num_msi_intrs; /* number of MSI interrupts granted */ + /* 0 --> using legacy PCI line interrupts */ + struct tasklet_struct intr_tq; /* tasklet */ + struct hif_msi_info msi_info; + int ce_msi_irq_num[CE_COUNT_MAX]; + int irq; + int irq_event; + int cacheline_sz; + u16 devid; + struct hif_tasklet_entry tasklet_entries[HIF_MAX_TASKLET_NUM]; + bool pci_enabled; + bool use_register_windowing; + uint32_t register_window; + qdf_spinlock_t register_access_lock; + qdf_spinlock_t irq_lock; + qdf_work_t reschedule_tasklet_work; + uint32_t lcr_val; +#ifdef FEATURE_RUNTIME_PM + atomic_t pm_state; + atomic_t monitor_wake_intr; + uint32_t prevent_suspend_cnt; + struct hif_pci_pm_stats pm_stats; + struct work_struct pm_work; + spinlock_t runtime_lock; + qdf_timer_t runtime_timer; + struct list_head prevent_suspend_list; + unsigned long runtime_timer_expires; + qdf_runtime_lock_t prevent_linkdown_lock; + atomic_t pm_dp_rx_busy; + qdf_time_t dp_last_busy_timestamp; +#ifdef WLAN_OPEN_SOURCE + struct dentry *pm_dentry; +#endif +#endif + int (*hif_enable_pci)(struct hif_pci_softc *sc, struct pci_dev *pdev, + const struct pci_device_id *id); + void (*hif_pci_deinit)(struct hif_pci_softc *sc); + void (*hif_pci_get_soc_info)(struct hif_pci_softc *sc, + struct device *dev); + struct hif_pci_stats stats; +#ifdef HIF_CPU_PERF_AFFINE_MASK + /* Stores the affinity hint mask for each CE IRQ */ + qdf_cpu_mask ce_irq_cpu_mask[CE_COUNT_MAX]; +#endif +}; + +bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem); +int hif_configure_irq(struct hif_softc *sc); +void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn); +void wlan_tasklet(unsigned long data); +irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg); +int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset); + +/* + * A firmware interrupt to the Host is indicated by the + * low bit of SCRATCH_3_ADDRESS being set. + */ +#define FW_EVENT_PENDING_REG_ADDRESS SCRATCH_3_ADDRESS + +/* + * Typically, MSI Interrupts are used with PCIe. To force use of legacy + * "ABCD" PCI line interrupts rather than MSI, define + * FORCE_LEGACY_PCI_INTERRUPTS. + * Even when NOT forced, the driver may attempt to use legacy PCI interrupts + * MSI allocation fails + */ +#define LEGACY_INTERRUPTS(sc) ((sc)->num_msi_intrs == 0) + +/* + * There may be some pending tx frames during platform suspend. + * Suspend operation should be delayed until those tx frames are + * transferred from the host to target. This macro specifies how + * long suspend thread has to sleep before checking pending tx + * frame count. + */ +#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 /* ms */ + +#define HIF_CE_DRAIN_WAIT_DELAY 10 /* ms */ +/* + * Wait time (in unit of OL_ATH_TX_DRAIN_WAIT_DELAY) for pending + * tx frame completion before suspend. Refer: hif_pci_suspend() + */ +#ifndef QCA_WIFI_3_0_EMU +#define OL_ATH_TX_DRAIN_WAIT_CNT 10 +#else +#define OL_ATH_TX_DRAIN_WAIT_CNT 60 +#endif + +#ifdef FORCE_WAKE +/** + * hif_print_pci_stats() - Display HIF PCI stats + * @hif_ctx - HIF pci handle + * + * Return: None + */ +void hif_print_pci_stats(struct hif_pci_softc *pci_scn); +#else +static inline +void hif_print_pci_stats(struct hif_pci_softc *pci_scn) +{ +} +#endif /* FORCE_WAKE */ + +#ifdef FEATURE_RUNTIME_PM +#include + +static inline int hif_pm_request_resume(struct device *dev) +{ + return pm_request_resume(dev); +} + +static inline int __hif_pm_runtime_get(struct device *dev) +{ + return pm_runtime_get(dev); +} + +static inline int hif_pm_runtime_put_auto(struct device *dev) +{ + return pm_runtime_put_autosuspend(dev); +} + +#endif /* FEATURE_RUNTIME_PM */ + +#ifdef HIF_BUS_LOG_INFO +bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset); +#else +static inline +bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data, + unsigned int *offset) +{ + return false; +} +#endif +#endif /* __ATH_PCI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..c33486219de3773a3887f01bcc18de04efc56925 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci_internal.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2015-2016, 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __IF_PCI_INTERNAL_H__ +#define __IF_PCI_INTERNAL_H__ + +#ifdef DISABLE_L1SS_STATES +#define PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, addr) \ +{ \ + uint32_t lcr_val; \ + pfrm_read_config_dword(pdev, addr, &lcr_val); \ + pfrm_write_config_dword(pdev, addr, (lcr_val & ~0x0000000f)); \ +} +#else +#define PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, addr) +#endif + +#ifdef QCA_WIFI_3_0 +#define PCI_CLR_CAUSE0_REGISTER(sc) \ +{ \ + uint32_t tmp_cause0; \ + tmp_cause0 = hif_read32_mb(sc, sc->mem + PCIE_INTR_CAUSE_ADDRESS); \ + hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS, \ + PCIE_INTR_FIRMWARE_MASK | tmp_cause0); \ + hif_read32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS); \ + hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS, 0); \ + hif_read32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS); \ +} +#else +#define PCI_CLR_CAUSE0_REGISTER(sc) +#endif +#endif /* __IF_PCI_INTERNAL_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca6018def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6018def.c new file mode 100644 index 0000000000000000000000000000000000000000..24a9d1f23fc03aa73cc57df34104a9b082a2567f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6018def.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCA6018_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "wcss_version.h" +#include "wcss_seq_hwiobase.h" +#include "wfss_ce_reg_seq_hwioreg.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING +#if defined(WCSS_VERSION) && (WCSS_VERSION > 68) +#define HOST_IE_ADDRESS \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG1_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_SRC_RING_IE_SHFT +#define HOST_IE_ADDRESS_2 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG2_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_STS_RING_IE_SHFT +#define HOST_IE_ADDRESS_3 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG3_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_DST_RING_IE_SHFT +#else +#define HOST_IE_ADDRESS UMAC_CE_COMMON_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_CE_HOST_IE_1 +#endif +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA6018_BOARD_DATA_SZ MISSING +#define QCA6018_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA6018_TARGETDEF +#define MY_HOST_DEF QCA6018_HOSTDEF +#define MY_CEREG_DEF QCA6018_CE_TARGETDEF +#define MY_TARGET_BOARD_DATA_SZ QCA6018_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA6018_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA6018_CE_TARGETDEF); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA6018_TARGETDEF; +struct hostdef_s *QCA6018_HOSTDEF; +#endif /*QCA6018_HEADERS_DEF */ +qdf_export_symbol(QCA6018_TARGETDEF); +qdf_export_symbol(QCA6018_HOSTDEF); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca6290def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6290def.c new file mode 100644 index 0000000000000000000000000000000000000000..4070b10ad4ce3a3bd64d3b430cdf01be96233900 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6290def.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(QCA6290_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "lithium_top_reg.h" +#include "wcss_version.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING + +#define HOST_IE_ADDRESS UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_1 + +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA6290_BOARD_DATA_SZ MISSING +#define QCA6290_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA6290_TARGETdef +#define MY_HOST_DEF QCA6290_HOSTdef +#define MY_CEREG_DEF QCA6290_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA6290_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA6290_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA6290_TARGETdef; +struct hostdef_s *QCA6290_HOSTdef; +#endif /*QCA6290_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca6390def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6390def.c new file mode 100644 index 0000000000000000000000000000000000000000..59fc7d56290a755db3d0a5de8b1d949c0c20391e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6390def.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(QCA6390_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "lithium_top_reg.h" +#include "wcss_version.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING + +#define HOST_IE_ADDRESS UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_1 + +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA6390_BOARD_DATA_SZ MISSING +#define QCA6390_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA6390_TARGETdef +#define MY_HOST_DEF QCA6390_HOSTdef +#define MY_CEREG_DEF QCA6390_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA6390_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA6390_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA6390_TARGETdef; +struct hostdef_s *QCA6390_HOSTdef; +#endif /*QCA6390_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca6490def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6490def.c new file mode 100644 index 0000000000000000000000000000000000000000..1dc1ec2a99475ea431e54acd504be7af5d5ae29a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6490def.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(QCA6490_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "lithium_top_reg.h" +#include "wcss_version.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING + +#if (defined(WCSS_VERSION) && (WCSS_VERSION >= 72)) +#define HOST_IE_ADDRESS UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_1 +#else /* WCSS_VERSION < 72 */ +#define HOST_IE_ADDRESS UMAC_CE_COMMON_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_CE_HOST_IE_1 +#endif /* WCSS_VERSION */ + +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA6490_BOARD_DATA_SZ MISSING +#define QCA6490_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA6490_TARGETdef +#define MY_HOST_DEF QCA6490_HOSTdef +#define MY_CEREG_DEF QCA6490_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA6490_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA6490_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA6490_TARGETdef; +struct hostdef_s *QCA6490_HOSTdef; +#endif /*QCA6490_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca6750def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6750def.c new file mode 100644 index 0000000000000000000000000000000000000000..6ec562ff6c28bfc54e69d328c479c7b2025e4a81 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6750def.c @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(QCA6750_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 +#include "lithium_top_reg.h" +#include "wfss_ce_reg_seq_hwioreg.h" +#include "wcss_version.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING + +#define HOST_IE_ADDRESS HWIO_SOC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR +#define HOST_IE_ADDRESS_2 HWIO_SOC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_1_ADDR + +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA6750_BOARD_DATA_SZ MISSING +#define QCA6750_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA6750_TARGETdef +#define MY_HOST_DEF QCA6750_HOSTdef +#define MY_CEREG_DEF QCA6750_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA6750_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA6750_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA6750_TARGETdef; +struct hostdef_s *QCA6750_HOSTdef; +#endif /*QCA6750_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074def.c new file mode 100644 index 0000000000000000000000000000000000000000..51d32fdd028d0b50120c7713dc5859023cfcb57e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074def.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCA8074_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "wcss_version.h" +#include "wcss_seq_hwiobase.h" +#include "wfss_ce_reg_seq_hwioreg.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING +#if defined(WCSS_VERSION) && (WCSS_VERSION > 68) +#define HOST_IE_ADDRESS \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG1_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_SRC_RING_IE_SHFT +#define HOST_IE_ADDRESS_2 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG2_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_STS_RING_IE_SHFT +#define HOST_IE_ADDRESS_3 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG3_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_DST_RING_IE_SHFT +#else +#define HOST_IE_ADDRESS UMAC_CE_COMMON_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_CE_HOST_IE_1 +#endif +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA8074_BOARD_DATA_SZ MISSING +#define QCA8074_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA8074_TARGETdef +#define MY_HOST_DEF QCA8074_HOSTdef +#define MY_CEREG_DEF QCA8074_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA8074_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA8074_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA8074_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA8074_TARGETdef; +struct hostdef_s *QCA8074_HOSTdef; +#endif /*QCA8074_HEADERS_DEF */ +qdf_export_symbol(QCA8074_TARGETdef); +qdf_export_symbol(QCA8074_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074v2def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074v2def.c new file mode 100644 index 0000000000000000000000000000000000000000..8b1667f76efeb6e56e00cdc612f0847bd4564b50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074v2def.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCA8074V2_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "wcss_version.h" +#include "wcss_seq_hwiobase.h" +#include "wfss_ce_reg_seq_hwioreg.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING +#if defined(WCSS_VERSION) && (WCSS_VERSION > 68) +#define HOST_IE_ADDRESS \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG1_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_SRC_RING_IE_SHFT +#define HOST_IE_ADDRESS_2 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG2_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_STS_RING_IE_SHFT +#define HOST_IE_ADDRESS_3 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG3_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_DST_RING_IE_SHFT +#else +#define HOST_IE_ADDRESS UMAC_CE_COMMON_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_CE_HOST_IE_1 +#endif +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA8074V2_BOARD_DATA_SZ MISSING +#define QCA8074V2_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA8074V2_TARGETDEF +#define MY_HOST_DEF QCA8074V2_HOSTDEF +#define MY_CEREG_DEF QCA8074V2_CE_TARGETDEF +#define MY_TARGET_BOARD_DATA_SZ QCA8074V2_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA8074V2_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA8074V2_CE_TARGETDEF); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA8074V2_TARGETDEF; +struct hostdef_s *QCA8074V2_HOSTDEF; +#endif /*QCA8074V2_HEADERS_DEF */ +qdf_export_symbol(QCA8074V2_TARGETDEF); +qdf_export_symbol(QCA8074V2_HOSTDEF); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca9888def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9888def.c new file mode 100644 index 0000000000000000000000000000000000000000..f44313cbb11c1d0920ae8966f1cc3f5ae6533f0c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9888def.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2015,2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_module.h" + +#if defined(QCA9888_HEADERS_DEF) +#define QCA9888 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "QCA9888/v2/soc_addrs.h" +#include "QCA9888/v2/extra/hw/apb_map.h" +#include "QCA9888/v2/hw/gpio_athr_wlan_reg.h" +#ifdef WLAN_HEADERS + +#include "QCA9888/v2/extra/hw/wifi_top_reg_map.h" +#include "QCA9888/v2/hw/rtc_soc_reg.h" + +#endif +#include "QCA9888/v2/hw/si_reg.h" +#include "QCA9888/v2/extra/hw/pcie_local_reg.h" +#include "QCA9888/v2/hw/ce_wrapper_reg_csr.h" + +#include "QCA9888/v2/extra/hw/soc_core_reg.h" +#include "QCA9888/v2/hw/soc_pcie_reg.h" +#include "QCA9888/v2/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ +#define PCIE_LOCAL_BASE_ADDRESS 0 + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define SI_CONFIG_OFFSET SI_CONFIG_ADDRESS +#define SI_TX_DATA0_OFFSET SI_TX_DATA0_ADDRESS +#define SI_TX_DATA1_OFFSET SI_TX_DATA1_ADDRESS +#define SI_RX_DATA0_OFFSET SI_RX_DATA0_ADDRESS +#define SI_RX_DATA1_OFFSET SI_RX_DATA1_ADDRESS +#define SI_CS_OFFSET SI_CS_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF QCA9888_TARGETdef +#define MY_HOST_DEF QCA9888_HOSTdef +#define MY_CEREG_DEF QCA9888_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA9888_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA9888_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA9888_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA9888_TARGETdef; +struct hostdef_s *QCA9888_HOSTdef; +#endif /* QCA9888_HEADERS_DEF */ +qdf_export_symbol(QCA9888_TARGETdef); +qdf_export_symbol(QCA9888_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca9984def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9984def.c new file mode 100644 index 0000000000000000000000000000000000000000..572d082171ee8562d4db4537a44950f84e7840ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9984def.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2015,2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCA9984_HEADERS_DEF) +#define QCA9984 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "QCA9984/soc_addrs.h" +#include "QCA9984/extra/hw/apb_map.h" +#include "QCA9984/hw/gpio_athr_wlan_reg.h" +#ifdef WLAN_HEADERS + +#include "QCA9984/extra/hw/wifi_top_reg_map.h" +#include "QCA9984/hw/rtc_soc_reg.h" + +#endif +#include "QCA9984/hw/si_reg.h" +#include "QCA9984/extra/hw/pcie_local_reg.h" +#include "QCA9984/hw/ce_wrapper_reg_csr.h" + +#include "QCA9984/extra/hw/soc_core_reg.h" +#include "QCA9984/hw/soc_pcie_reg.h" +#include "QCA9984/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ +#define PCIE_LOCAL_BASE_ADDRESS 0 + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define SI_CONFIG_OFFSET SI_CONFIG_ADDRESS +#define SI_TX_DATA0_OFFSET SI_TX_DATA0_ADDRESS +#define SI_TX_DATA1_OFFSET SI_TX_DATA1_ADDRESS +#define SI_RX_DATA0_OFFSET SI_RX_DATA0_ADDRESS +#define SI_RX_DATA1_OFFSET SI_RX_DATA1_ADDRESS +#define SI_CS_OFFSET SI_CS_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF QCA9984_TARGETdef +#define MY_HOST_DEF QCA9984_HOSTdef +#define MY_CEREG_DEF QCA9984_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA9984_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA9984_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA9984_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA9984_TARGETdef; +struct hostdef_s *QCA9984_HOSTdef; +#endif /* QCA9984_HEADERS_DEF */ +qdf_export_symbol(QCA9984_TARGETdef); +qdf_export_symbol(QCA9984_HOSTdef); + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qcn9000def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qcn9000def.c new file mode 100644 index 0000000000000000000000000000000000000000..8a9527c49441398cd2049c999f00fb49a73ffed4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qcn9000def.c @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCN9000_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "wcss_version.h" +#include "wcss_seq_hwiobase.h" +#include "wfss_ce_reg_seq_hwioreg.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING + +#define HOST_IE_ADDRESS \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + WFSS_CE_COMMON_REG_REG_BASE) +#define HOST_IE_REG1_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_SRC_RING_IE_SHFT +#define HOST_IE_ADDRESS_2 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_ADDR(\ + WFSS_CE_COMMON_REG_REG_BASE) +#define HOST_IE_REG2_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_STS_RING_IE_SHFT +#define HOST_IE_ADDRESS_3 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + WFSS_CE_COMMON_REG_REG_BASE) +#define HOST_IE_REG3_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_DST_RING_IE_SHFT + +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCN9000_BOARD_DATA_SZ MISSING +#define QCN9000_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCN9000_TARGETDEF +#define MY_HOST_DEF QCN9000_HOSTDEF +#define MY_CEREG_DEF QCN9000_CE_TARGETDEF +#define MY_TARGET_BOARD_DATA_SZ QCN9000_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCN9000_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCN9000_CE_TARGETDEF); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCN9000_TARGETDEF; +struct hostdef_s *QCN9000_HOSTDEF; +#endif /*QCN9000_HEADERS_DEF */ +qdf_export_symbol(QCN9000_TARGETDEF); +qdf_export_symbol(QCN9000_HOSTDEF); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/regtable.c b/drivers/staging/qca-wifi-host-cmn/hif/src/regtable.c new file mode 100644 index 0000000000000000000000000000000000000000..a6496b1cab0c0870314b303d5cb772b17e5b5d12 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/regtable.c @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targaddrs.h" +#include "target_type.h" +#include "cepci.h" +#include "regtable.h" +#include "ar6320def.h" +#include "ar6320v2def.h" +#include "hif_main.h" +#include "adrastea_reg_def.h" + +#include "targetdef.h" +#include "hostdef.h" + +void hif_target_register_tbl_attach(struct hif_softc *scn, u32 target_type) +{ + switch (target_type) { + case TARGET_TYPE_AR6320: + scn->targetdef = &ar6320_targetdef; + scn->target_ce_def = &ar6320_ce_targetdef; + break; + case TARGET_TYPE_AR6320V2: + scn->targetdef = &ar6320v2_targetdef; + scn->target_ce_def = &ar6320v2_ce_targetdef; + break; + case TARGET_TYPE_ADRASTEA: + scn->targetdef = &adrastea_targetdef; + scn->target_ce_def = &adrastea_ce_targetdef; + break; + case TARGET_TYPE_QCN7605: + scn->targetdef = &genoa_targetdef; + scn->target_ce_def = &genoa_ce_targetdef; + break; +#if defined(AR6002_HEADERS_DEF) + case TARGET_TYPE_AR6002: + scn->targetdef = AR6002_TARGETdef; + break; +#endif +#if defined(AR6003_HEADERS_DEF) + case TARGET_TYPE_AR6003: + scn->targetdef = AR6003_TARGETdef; + break; +#endif +#if defined(AR6004_HEADERS_DEF) + case TARGET_TYPE_AR6004: + scn->targetdef = AR6004_TARGETdef; + break; +#endif +#if defined(AR9888_HEADERS_DEF) + case TARGET_TYPE_AR9888: + scn->targetdef = AR9888_TARGETdef; + scn->target_ce_def = AR9888_CE_TARGETdef; + break; +#endif +#if defined(AR9888V2_HEADERS_DEF) + case TARGET_TYPE_AR9888V2: + scn->targetdef = AR9888V2_TARGETdef; + scn->target_ce_def = AR9888_CE_TARGETdef; + break; +#endif +#if defined(AR900B_HEADERS_DEF) + case TARGET_TYPE_AR900B: + scn->targetdef = AR900B_TARGETdef; + scn->target_ce_def = AR900B_CE_TARGETdef; + break; +#endif +#if defined(QCA9984_HEADERS_DEF) + case TARGET_TYPE_QCA9984: + scn->targetdef = QCA9984_TARGETdef; + scn->target_ce_def = QCA9984_CE_TARGETdef; + break; +#endif +#if defined(QCA9888_HEADERS_DEF) + case TARGET_TYPE_QCA9888: + scn->targetdef = QCA9888_TARGETdef; + scn->target_ce_def = QCA9888_CE_TARGETdef; + break; +#endif +#ifdef ATH_AHB +#if defined(IPQ4019_HEADERS_DEF) + case TARGET_TYPE_IPQ4019: + scn->targetdef = IPQ4019_TARGETdef; + scn->target_ce_def = IPQ4019_CE_TARGETdef; + break; +#endif +#endif +#if defined(QCA8074_HEADERS_DEF) + case TARGET_TYPE_QCA8074: + scn->targetdef = QCA8074_TARGETdef; + scn->target_ce_def = QCA8074_CE_TARGETdef; + break; +#endif + +#if defined(QCA6290_HEADERS_DEF) + case TARGET_TYPE_QCA6290: + scn->targetdef = QCA6290_TARGETdef; + scn->target_ce_def = QCA6290_CE_TARGETdef; + break; +#endif +#if defined(QCA8074V2_HEADERS_DEF) + case TARGET_TYPE_QCA8074V2: + scn->targetdef = QCA8074V2_TARGETDEF; + scn->target_ce_def = QCA8074V2_CE_TARGETDEF; + break; +#endif +#if defined(QCA6018_HEADERS_DEF) + case TARGET_TYPE_QCA6018: + scn->targetdef = QCA6018_TARGETDEF; + scn->target_ce_def = QCA6018_CE_TARGETDEF; + break; +#endif + +#if defined(QCN9000_HEADERS_DEF) + case TARGET_TYPE_QCN9000: + scn->targetdef = QCN9000_TARGETDEF; + scn->target_ce_def = QCN9000_CE_TARGETDEF; + HIF_TRACE("%s: TARGET_TYPE_QCN9000", __func__); + break; +#endif + +#if defined(QCA6390_HEADERS_DEF) + case TARGET_TYPE_QCA6390: + scn->targetdef = QCA6390_TARGETdef; + scn->target_ce_def = QCA6390_CE_TARGETdef; + HIF_TRACE("%s: TARGET_TYPE_QCA6390", __func__); + break; +#endif /* QCA6390_HEADERS_DEF */ + +#if defined(QCA6490_HEADERS_DEF) + case TARGET_TYPE_QCA6490: + scn->targetdef = QCA6490_TARGETdef; + scn->target_ce_def = QCA6490_CE_TARGETdef; + HIF_TRACE("%s: TARGET_TYPE_QCA6490", __func__); + break; +#endif /* QCA6490_HEADERS_DEF */ + +#if defined(QCA6750_HEADERS_DEF) + case TARGET_TYPE_QCA6750: + scn->targetdef = QCA6750_TARGETdef; + scn->target_ce_def = QCA6750_CE_TARGETdef; + HIF_TRACE("%s: TARGET_TYPE_QCA6750", __func__); + break; +#endif /* QCA6750_HEADERS_DEF */ + default: + break; + } +} + +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type) +{ + switch (hif_type) { + case HIF_TYPE_AR6320V2: + scn->hostdef = &ar6320v2_hostdef; + break; + case HIF_TYPE_ADRASTEA: + scn->hostdef = &adrastea_hostdef; + scn->host_shadow_regs = &adrastea_host_shadow_regs; + break; + case HIF_TYPE_QCN7605: + scn->hostdef = &genoa_hostdef; + scn->host_shadow_regs = &genoa_host_shadow_regs; + break; +#if defined(AR6002_HEADERS_DEF) + case HIF_TYPE_AR6002: + scn->hostdef = AR6002_HOSTdef; + break; +#endif +#if defined(AR6003_HEADERS_DEF) + case HIF_TYPE_AR6003: + scn->hostdef = AR6003_HOSTdef; + break; +#endif +#if defined(AR6004_HEADERS_DEF) + case HIF_TYPE_AR6004: + scn->hostdef = AR6004_HOSTdef; + break; +#endif +#if defined(AR9888_HEADERS_DEF) + case HIF_TYPE_AR9888: + scn->hostdef = AR9888_HOSTdef; + break; +#endif +#if defined(AR9888V2_HEADERS_DEF) + case HIF_TYPE_AR9888V2: + scn->hostdef = AR9888V2_HOSTdef; + break; +#endif +#if defined(AR900B_HEADERS_DEF) + case HIF_TYPE_AR900B: + scn->hostdef = AR900B_HOSTdef; + break; +#endif +#if defined(QCA9984_HEADERS_DEF) + case HIF_TYPE_QCA9984: + scn->hostdef = QCA9984_HOSTdef; + break; +#endif +#if defined(QCA9888_HEADERS_DEF) + case HIF_TYPE_QCA9888: + scn->hostdef = QCA9888_HOSTdef; + break; +#endif + +#ifdef ATH_AHB +#if defined(IPQ4019_HEADERS_DEF) + case HIF_TYPE_IPQ4019: + scn->hostdef = IPQ4019_HOSTdef; + break; +#endif +#endif +#if defined(QCA8074_HEADERS_DEF) + case HIF_TYPE_QCA8074: + scn->hostdef = QCA8074_HOSTdef; + break; +#endif +#if defined(QCA8074V2_HEADERS_DEF) + case HIF_TYPE_QCA8074V2: + scn->hostdef = QCA8074V2_HOSTDEF; + break; +#endif +#if defined(QCA6018_HEADERS_DEF) + case HIF_TYPE_QCA6018: + scn->hostdef = QCA6018_HOSTDEF; + HIF_TRACE("%s: HIF_TYPE_QCA6018", __func__); + break; +#endif +#if defined(QCA6290_HEADERS_DEF) + case HIF_TYPE_QCA6290: + scn->hostdef = QCA6290_HOSTdef; + break; +#endif +#if defined(QCN9000_HEADERS_DEF) + case HIF_TYPE_QCN9000: + scn->hostdef = QCN9000_HOSTDEF; + break; +#endif + +#if defined(QCA6390_HEADERS_DEF) + case HIF_TYPE_QCA6390: + scn->hostdef = QCA6390_HOSTdef; + HIF_TRACE("%s: HIF_TYPE_QCA6390", __func__); + break; +#endif /* QCA6390_HEADERS_DEF */ + +#if defined(QCA6490_HEADERS_DEF) + case HIF_TYPE_QCA6490: + scn->hostdef = QCA6490_HOSTdef; + HIF_TRACE("%s: HIF_TYPE_QCA6490", __func__); + break; +#endif /* QCA6490_HEADERS_DEF */ + +#if defined(QCA6750_HEADERS_DEF) + case TARGET_TYPE_QCA6750: + scn->hostdef = QCA6750_HOSTdef; + HIF_TRACE("%s: TARGET_TYPE_QCA6750", __func__); + break; +#endif /* QCA6750_HEADERS_DEF */ + default: + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_bmi_reg_access.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_bmi_reg_access.c new file mode 100644 index 0000000000000000000000000000000000000000..ada36b00f665b5f05ab6e3ef9964e6b8ae61d073 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_bmi_reg_access.c @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "athdefs.h" +#include "a_types.h" +#include "a_osapi.h" +#define ATH_MODULE_NAME hif +#include "a_debug.h" +#define ATH_DEBUG_BMI ATH_DEBUG_MAKE_MODULE_MASK(0) +#include "hif.h" +#include "bmi.h" +#include "htc_api.h" +#include "if_sdio.h" +#include "regtable_sdio.h" +#include "hif_sdio_dev.h" + +#define BMI_COMMUNICATION_TIMEOUT 100000 + +static bool pending_events_func_check; +static uint32_t command_credits; +static uint32_t *p_bmi_cmd_credits = &command_credits; + +/* BMI Access routines */ + +/** + * hif_bmi_buffer_send - call to send bmi buffer + * @device: hif context + * @buffer: buffer + * @length: length + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static QDF_STATUS +hif_bmi_buffer_send(struct hif_sdio_softc *scn, struct hif_sdio_dev *device, + char *buffer, uint32_t length) +{ + QDF_STATUS status; + uint32_t timeout; + uint32_t address; + uint32_t mbox_address[HTC_MAILBOX_NUM_MAX]; + + hif_configure_device(NULL, device, HIF_DEVICE_GET_FIFO_ADDR, + &mbox_address[0], sizeof(mbox_address)); + + *p_bmi_cmd_credits = 0; + timeout = BMI_COMMUNICATION_TIMEOUT; + + while (timeout-- && !(*p_bmi_cmd_credits)) { + /* Read the counter register to get the command credits */ + address = + COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; + /* hit the credit counter with a 4-byte access, the first + * byte read will hit the counter and cause + * a decrement, while the remaining 3 bytes has no effect. + * The rationale behind this is to make all HIF accesses + * 4-byte aligned + */ + status = + hif_read_write(device, address, + (uint8_t *) p_bmi_cmd_credits, 4, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to decrement the credit count register\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + /* the counter is only 8=bits, ignore anything in the + * upper 3 bytes + */ + (*p_bmi_cmd_credits) &= 0xFF; + } + + if (*p_bmi_cmd_credits) { + address = mbox_address[ENDPOINT1]; + status = hif_read_write(device, address, buffer, length, + HIF_WR_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to send the BMI data to the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:BMI Communication timeout - hif_bmi_buffer_send\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + return status; +} + +#if defined(SDIO_3_0) + +static QDF_STATUS +hif_bmi_read_write(struct hif_sdio_dev *device, + char *buffer, uint32_t length) +{ + QDF_STATUS status; + + status = hif_read_write(device, HOST_INT_STATUS_ADDRESS, + buffer, length, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read int status reg\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + *buffer = (HOST_INT_STATUS_MBOX_DATA_GET(*buffer) & (1 << ENDPOINT1)); + return status; +} +#else + +static QDF_STATUS +hif_bmi_read_write(struct hif_sdio_dev *device, + char *buffer, uint32_t length) +{ + QDF_STATUS status; + + status = hif_read_write(device, RX_LOOKAHEAD_VALID_ADDRESS, + buffer, length, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read rx lookahead reg\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + *buffer &= (1 << ENDPOINT1); + return status; +} +#endif + +/** + * hif_bmi_buffer_receive - call when bmi buffer is received + * @device: hif context + * @buffer: buffer + * @length: length + * @want_timeout: timeout is needed or not + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static QDF_STATUS +hif_bmi_buffer_receive(struct hif_sdio_dev *device, + char *buffer, uint32_t length, bool want_timeout) +{ + QDF_STATUS status; + uint32_t address; + uint32_t mbox_address[HTC_MAILBOX_NUM_MAX]; + struct _HIF_PENDING_EVENTS_INFO hif_pending_events; + + static HIF_PENDING_EVENTS_FUNC get_pending_events_func; + + if (!pending_events_func_check) { + /* see if the HIF layer implements an alternative + * function to get pending events + * do this only once! + */ + hif_configure_device(NULL, device, + HIF_DEVICE_GET_PENDING_EVENTS_FUNC, + &get_pending_events_func, + sizeof(get_pending_events_func)); + pending_events_func_check = true; + } + + hif_configure_device(NULL, device, HIF_DEVICE_GET_FIFO_ADDR, + &mbox_address[0], sizeof(mbox_address)); + + /* + * During normal bootup, small reads may be required. + * Rather than issue an HIF Read and then wait as the Target + * adds successive bytes to the FIFO, we wait here until + * we know that response data is available. + * + * This allows us to cleanly timeout on an unexpected + * Target failure rather than risk problems at the HIF level. In + * particular, this avoids SDIO timeouts and possibly garbage + * data on some host controllers. And on an interconnect + * such as Compact Flash (as well as some SDIO masters) which + * does not provide any indication on data timeout, it avoids + * a potential hang or garbage response. + * + * Synchronization is more difficult for reads larger than the + * size of the MBOX FIFO (128B), because the Target is unable + * to push the 129th byte of data until AFTER the Host posts an + * HIF Read and removes some FIFO data. So for large reads the + * Host proceeds to post an HIF Read BEFORE all the data is + * actually available to read. Fortunately, large BMI reads do + * not occur in practice -- they're supported for debug/development. + * + * So Host/Target BMI synchronization is divided into these cases: + * CASE 1: length < 4 + * Should not happen + * + * CASE 2: 4 <= length <= 128 + * Wait for first 4 bytes to be in FIFO + * If CONSERVATIVE_BMI_READ is enabled, also wait for + * a BMI command credit, which indicates that the ENTIRE + * response is available in the the FIFO + * + * CASE 3: length > 128 + * Wait for the first 4 bytes to be in FIFO + * + * For most uses, a small timeout should be sufficient and we will + * usually see a response quickly; but there may be some unusual + * (debug) cases of BMI_EXECUTE where we want an larger timeout. + * For now, we use an unbounded busy loop while waiting for + * BMI_EXECUTE. + * + * If BMI_EXECUTE ever needs to support longer-latency execution, + * especially in production, this code needs to be enhanced to sleep + * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently + * a function of Host processor speed. + */ + if (length >= 4) { /* NB: Currently, always true */ + /* + * NB: word_available is declared static for esoteric reasons + * having to do with protection on some OSes. + */ + static uint32_t word_available; + uint32_t timeout; + + word_available = 0; + timeout = BMI_COMMUNICATION_TIMEOUT; + while ((!want_timeout || timeout--) && !word_available) { + + if (get_pending_events_func) { + status = get_pending_events_func(device, + &hif_pending_events, + NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Failed to get pending events\n", + __func__)); + break; + } + + if (hif_pending_events.available_recv_bytes >= + sizeof(uint32_t)) { + word_available = 1; + } + continue; + } + status = hif_bmi_read_write(device, + (uint8_t *) &word_available, + sizeof(word_available)); + if (status != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + } + + if (!word_available) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:BMI Communication timeout FIFO empty\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + } + + address = mbox_address[ENDPOINT1]; + status = hif_read_write(device, address, buffer, length, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read the BMI data from the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_reg_based_get_target_info - to retrieve target info + * @hif_ctx: hif context + * @targ_info: bmi target info + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS +hif_reg_based_get_target_info(struct hif_opaque_softc *hif_ctx, + struct bmi_target_info *targ_info) +{ + QDF_STATUS status; + uint32_t cid; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *device = scn->hif_handle; + + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("BMI Get Target Info: Enter (device: 0x%pK)\n", + device)); + cid = BMI_GET_TARGET_INFO; + status = hif_bmi_buffer_send(scn, device, (char *)&cid, sizeof(cid)); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to write to the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + status = hif_bmi_buffer_receive(device, + (char *) &targ_info->target_ver, + sizeof(targ_info->target_ver), true); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read Target Version from the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + if (targ_info->target_ver == TARGET_VERSION_SENTINAL) { + /* Determine how many bytes are in the Target's targ_info */ + status = hif_bmi_buffer_receive(device, + (char *) &targ_info-> + target_info_byte_count, + sizeof(targ_info-> + target_info_byte_count), + true); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read target Info\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + /* + * The Target's targ_info doesn't match the Host's targ_info. + * We need to do some backwards compatibility work to make this + * OK. + */ + QDF_ASSERT(targ_info->target_info_byte_count == + sizeof(*targ_info)); + /* Read the remainder of the targ_info */ + status = hif_bmi_buffer_receive(device, + ((char *) targ_info) + + sizeof(targ_info-> + target_info_byte_count), + sizeof(*targ_info) - + sizeof(targ_info-> + target_info_byte_count), + true); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read Target Info (%d bytes)\n", + __func__, targ_info->target_info_byte_count)); + return QDF_STATUS_E_FAILURE; + } + } else { + /* + * Target must be an AR6001 whose firmware does not + * support BMI_GET_TARGET_INFO. Construct the data + * that it would have sent. + */ + targ_info->target_info_byte_count = sizeof(*targ_info); + targ_info->target_type = TARGET_TYPE_AR6001; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("BMI Get Target Info: Exit (ver: 0x%x type: 0x%x)\n", + targ_info->target_ver, + targ_info->target_type)); + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_exchange_bmi_msg - API to handle HIF-specific BMI message exchanges + * @hif_ctx: hif context + * @bmi_cmd_da: bmi cmd + * @bmi_rsp_da: bmi rsp + * @send_message: send message + * @length: length + * @response_message: response message + * @response_length: response length + * @timeout_ms: timeout in ms + * + * This API is synchronous + * and only allowed to be called from a context that can block (sleep) + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, + qdf_dma_addr_t bmi_cmd_da, + qdf_dma_addr_t bmi_rsp_da, + uint8_t *send_message, + uint32_t length, + uint8_t *response_message, + uint32_t *response_length, + uint32_t timeout_ms) { + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *device = scn->hif_handle; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!device) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Null device argument\n", + __func__)); + return QDF_STATUS_E_INVAL; + } + + status = hif_bmi_buffer_send(scn, device, send_message, length); + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to Send Message to device\n", + __func__)); + return status; + } + + if (response_message) { + status = hif_bmi_buffer_receive(device, response_message, + *response_length, + timeout_ms ? true : false); + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read response\n", + __func__)); + return status; + } + } + + return status; +} + +void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx) +{ +} + +#ifdef BRINGUP_DEBUG +#define SDIO_SCRATCH_1_ADDRESS 0x864 +/*Functions used for debugging*/ +/** + * hif_bmi_write_scratch_register - API to write scratch register + * @device: hif context + * @buffer: buffer + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_bmi_write_scratch_register(struct hif_sdio_dev *device, + uint32_t buffer) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = hif_read_write(device, SDIO_SCRATCH_1_ADDRESS, + (uint8_t *) &buffer, 4, + HIF_WR_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Unable to write to 0x%x\n", + __func__, SDIO_SCRATCH_1_ADDRESS)); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wrote 0x%x to 0x%x\n", __func__, + buffer, SDIO_SCRATCH_1_ADDRESS)); + + return status; +} + +/** + * hif_bmi_read_scratch_register - API to read from scratch register + * @device: hif context + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_bmi_read_scratch_register(struct hif_sdio_dev *device) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t buffer = 0; + + status = hif_read_write(device, SDIO_SCRATCH_1_ADDRESS, + (uint8_t *) &buffer, 4, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Unable to read from 0x%x\n", + __func__, SDIO_SCRATCH_1_ADDRESS)); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: read 0x%x from 0x%x\n", __func__, + buffer, SDIO_SCRATCH_1_ADDRESS)); + + return status; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_diag_reg_access.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_diag_reg_access.c new file mode 100644 index 0000000000000000000000000000000000000000..30e363552fd5d6556f2dd014f527f3eccc68acce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_diag_reg_access.c @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "athdefs.h" +#include "a_types.h" +#include "a_osapi.h" +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#include "targaddrs.h" +#include "hif.h" +#include "if_sdio.h" +#include "regtable_sdio.h" +#include "hif_sdio_dev.h" +#include "qdf_module.h" + +#define CPU_DBG_SEL_ADDRESS 0x00000483 +#define CPU_DBG_ADDRESS 0x00000484 +#define WORD_NON_ALIGNMENT_MASK 0x03 + +/** + * hif_ar6000_set_address_window_register - set the window address register + * (using 4-byte register access). + * @hif_device: hif context + * @register_addr: register address + * @addr: addr + * + * This mitigates host interconnect issues with non-4byte aligned bus requests, + * some interconnects use bus adapters that impose strict limitations. + * Since diag window access is not intended for performance critical operations, + * the 4byte mode should be satisfactory as it generates 4X the bus activity. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static +QDF_STATUS hif_ar6000_set_address_window_register( + struct hif_sdio_dev *hif_device, + uint32_t register_addr, + uint32_t addr) +{ + QDF_STATUS status; + static uint32_t address; + + address = addr; + /*AR6320,just write the 4-byte address to window register*/ + status = hif_read_write(hif_device, + register_addr, + (char *) (&address), + 4, HIF_WR_SYNC_BYTE_INC, NULL); + + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot write 0x%x to window reg: 0x%X\n", + addr, register_addr)); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_diag_read_access - Read from the AR6000 through its diagnostic window. + * @hif_ctx: hif context + * @address: address + * @data: data + * + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, + uint32_t *data) +{ + QDF_STATUS status; + static uint32_t readvalue; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + if (address & WORD_NON_ALIGNMENT_MASK) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr is not 4 bytes align.addr[0x%08x]\n", + __func__, address)); + return QDF_STATUS_E_FAILURE; + } + + /* set window register to start read cycle */ + status = hif_ar6000_set_address_window_register(hif_device, + WINDOW_READ_ADDR_ADDRESS, + address); + + if (status != QDF_STATUS_SUCCESS) + return status; + + /* read the data */ + status = hif_read_write(hif_device, + WINDOW_DATA_ADDRESS, + (char *) &readvalue, + sizeof(uint32_t), HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot read from WINDOW_DATA_ADDRESS\n")); + return status; + } + + *data = readvalue; + return status; +} + +/** + * hif_diag_write_access - Write to the AR6000 through its diagnostic window. + * @hif_ctx: hif context + * @address: address + * @data: data + * + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t data) +{ + QDF_STATUS status; + static uint32_t write_value; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + if (address & WORD_NON_ALIGNMENT_MASK) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr is not 4 bytes align.addr[0x%08x]\n", + __func__, address)); + return QDF_STATUS_E_FAILURE; + } + + write_value = data; + + /* set write data */ + status = hif_read_write(hif_device, + WINDOW_DATA_ADDRESS, + (char *) &write_value, + sizeof(uint32_t), HIF_WR_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot write 0x%x to WINDOW_DATA_ADDRESS\n", + data)); + return status; + } + + /* set window register, which starts the write cycle */ + return hif_ar6000_set_address_window_register(hif_device, + WINDOW_WRITE_ADDR_ADDRESS, + address); +} + +/** + * hif_diag_write_mem - Write a block data to the AR6000 through its diagnostic + * window. + * @scn: hif context + * @address: address + * @data: data + * @nbytes: nbytes + * + * This function may take some time. + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address, + uint8_t *data, int nbytes) +{ + QDF_STATUS status; + int32_t i; + uint32_t tmp_data; + + if ((address & WORD_NON_ALIGNMENT_MASK) || + (nbytes & WORD_NON_ALIGNMENT_MASK)) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr or length is not 4 bytes align.addr[0x%08x] len[0x%08x]\n", + __func__, address, nbytes)); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < nbytes; i += 4) { + tmp_data = + data[i] | (data[i + 1] << 8) | (data[i + 2] << 16) | + (data[i + 3] << 24); + status = hif_diag_write_access(scn, address + i, tmp_data); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Diag Write mem failed.addr[0x%08x] value[0x%08x]\n", + address + i, tmp_data)); + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_diag_read_mem - Read a block data to the AR6000 through its diagnostic + * window. + * @scn: hif context + * @data: data + * @nbytes: nbytes + * + * This function may take some time. + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn, + uint32_t address, uint8_t *data, + int nbytes) +{ + QDF_STATUS status; + int32_t i; + uint32_t tmp_data; + + if ((address & WORD_NON_ALIGNMENT_MASK) || + (nbytes & WORD_NON_ALIGNMENT_MASK)) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr or length is not 4 bytes align.addr[0x%08x] len[0x%08x]\n", + __func__, address, nbytes)); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < nbytes; i += 4) { + status = hif_diag_read_access(scn, address + i, &tmp_data); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Diag Write mem failed.addr[0x%08x] value[0x%08x]\n", + address + i, tmp_data)); + return status; + } + data[i] = tmp_data & 0xff; + data[i + 1] = tmp_data >> 8 & 0xff; + data[i + 2] = tmp_data >> 16 & 0xff; + data[i + 3] = tmp_data >> 24 & 0xff; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hif_diag_read_mem); + +/** + * hif_ar6k_read_target_register - call to read target register values + * @hif_device: hif context + * @regsel: register selection + * @regval: reg value + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static QDF_STATUS hif_ar6k_read_target_register(struct hif_sdio_dev *hif_device, + int regsel, uint32_t *regval) +{ + QDF_STATUS status; + char vals[4]; + char register_selection[4]; + + register_selection[0] = regsel & 0xff; + register_selection[1] = regsel & 0xff; + register_selection[2] = regsel & 0xff; + register_selection[3] = regsel & 0xff; + status = hif_read_write(hif_device, CPU_DBG_SEL_ADDRESS, + register_selection, 4, + HIF_WR_SYNC_BYTE_FIX, NULL); + + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot write CPU_DBG_SEL (%d)\n", regsel)); + return status; + } + + status = hif_read_write(hif_device, + CPU_DBG_ADDRESS, + (char *) vals, + sizeof(vals), HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot read from CPU_DBG_ADDRESS\n")); + return status; + } + + *regval = vals[0] << 0 | vals[1] << 8 | + vals[2] << 16 | vals[3] << 24; + + return status; +} + +/** + * hif_ar6k_fetch_target_regs - call to fetch target reg values + * @hif_device: hif context + * @targregs: target regs + * + * Return: None + */ +void hif_ar6k_fetch_target_regs(struct hif_sdio_dev *hif_device, + uint32_t *targregs) +{ + int i; + uint32_t val; + + for (i = 0; i < AR6003_FETCH_TARG_REGS_COUNT; i++) { + val = 0xffffffff; + hif_ar6k_read_target_register(hif_device, i, &val); + targregs[i] = val; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..5636c4702af864cf5df96d49414740ec84520179 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qdf_net_types.h" +#include "a_types.h" +#include "athdefs.h" +#include "a_osapi.h" +#include +#include +#include +#include "hif_sdio_dev.h" +#include "if_sdio.h" +#include "regtable_sdio.h" +#include + +#define ATH_MODULE_NAME hif_sdio + +/** + * hif_start() - start hif bus interface. + * @hif_ctx: HIF context + * + * Enables hif device interrupts + * + * Return: int + */ +uint32_t hif_start(struct hif_opaque_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + int ret = 0; + + HIF_ENTER(); + ret = hif_sdio_bus_configure(hif_sc); + if (ret) { + HIF_ERROR("%s: hif_sdio_bus_configure failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + hif_dev_enable_interrupts(htc_sdio_device); + HIF_EXIT(); + return QDF_STATUS_SUCCESS; +} + +/** + * hif_flush_surprise_remove() - remove hif bus interface. + * @hif_ctx: HIF context + * + * + * Return: none + */ +void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) +{ + +} + +/** + * hif_sdio_stop() - stop hif bus interface. + * @hif_ctx: HIF context + * + * Disable hif device interrupts and destroy hif context + * + * Return: none + */ +void hif_sdio_stop(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + HIF_ENTER(); + if (htc_sdio_device) { + hif_dev_disable_interrupts(htc_sdio_device); + hif_dev_destroy(htc_sdio_device); + } + HIF_EXIT(); +} + +/** + * hif_send_head() - send data on hif bus interface. + * @hif_ctx: HIF context + * + * send tx data on a given pipe id + * + * Return: int + */ +QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t pipe, + uint32_t transfer_id, uint32_t nbytes, qdf_nbuf_t buf, + uint32_t data_attr) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + return hif_dev_send_buffer(htc_sdio_device, + transfer_id, pipe, + nbytes, buf); +} + +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @hif_ctx: HIF hdl + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: int + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, + uint16_t service_id, uint8_t *ul_pipe, + uint8_t *dl_pipe, int *ul_is_polled, + int *dl_is_polled) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_hdl); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + return hif_dev_map_service_to_pipe(hif_device, + service_id, ul_pipe, dl_pipe); +} + +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @scn: HIF context + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: int + */ +void hif_get_default_pipe(struct hif_opaque_softc *scn, uint8_t *ul_pipe, + uint8_t *dl_pipe) +{ + hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC, + ul_pipe, dl_pipe, NULL, NULL); +} + +/** + * hif_post_init() - create hif device after probe. + * @hif_ctx: HIF context + * @target: HIF target + * @callbacks: htc callbacks + * + * + * Return: int + */ +void hif_post_init(struct hif_opaque_softc *hif_ctx, void *target, + struct hif_msg_callbacks *callbacks) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + HIF_ENTER(); + + if (!htc_sdio_device) + htc_sdio_device = hif_dev_create(hif_device, callbacks, target); + + if (htc_sdio_device) + hif_dev_setup(htc_sdio_device); + + HIF_EXIT(); +} + +/** + * hif_get_free_queue_number() - create hif device after probe. + * @hif_ctx: HIF context + * @pipe: pipe id + * + * SDIO uses credit based flow control at the HTC layer + * so transmit resource checks are bypassed + * Return: int + */ +uint16_t hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, + uint8_t pipe) +{ + uint16_t rv; + + rv = 1; + return rv; +} + +/** + * hif_send_complete_check() - check tx complete on a given pipe. + * @hif_ctx: HIF context + * @pipe: HIF target + * @force: check if need to pool for completion + * Decide whether to actually poll for completions, or just + * wait for a later chance. + * + * Return: int + */ +void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, + int force) +{ + +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_common.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_common.h new file mode 100644 index 0000000000000000000000000000000000000000..21977f2a7045be40c0ef5dd5bd237d32100079f0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_common.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_SDIO_COMMON_H_ +#define _HIF_SDIO_COMMON_H_ + +/* SDIO manufacturer ID and Codes */ +#define MANUFACTURER_ID_AR6002_BASE 0x200 +#define MANUFACTURER_ID_AR6003_BASE 0x300 +#define MANUFACTURER_ID_AR6004_BASE 0x400 +#define MANUFACTURER_ID_AR6320_BASE 0x500 +#define MANUFACTURER_ID_QCA9377_BASE 0x700 +#define MANUFACTURER_ID_QCA9379_BASE 0x800 +#define MANUFACTURER_ID_QCN7605 0x400B +#define MANUFACTURER_ID_QCN7605_BASE 0x4000 +#define MANUFACTURER_ID_AR6K_BASE_MASK 0xFF00 +#define MANUFACTURER_ID_AR6K_REV_MASK 0x00FF +#define FUNCTION_CLASS 0x0 +#define MANUFACTURER_CODE 0x271 /* Atheros Manufacturer ID */ +#define MANUFACTURER_QC_CODE 0x70 /* QC Manufacturer ID */ + + +#endif /* _HIF_SDIO_COMMON_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..ba2277a3bb866b4d3ca351a4bde53c64ae79794e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" +#include "if_sdio.h" +#include "regtable_sdio.h" + +/** + * hif_dev_alloc_rx_buffer() - allocate rx buffer. + * @pDev: sdio device context + * + * + * Return: htc buffer pointer + */ +HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pdev) +{ + HTC_PACKET *packet; + qdf_nbuf_t netbuf; + uint32_t bufsize = 0, headsize = 0; + + bufsize = HIF_SDIO_RX_BUFFER_SIZE + HIF_SDIO_RX_DATA_OFFSET; + headsize = sizeof(HTC_PACKET); + netbuf = qdf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, false); + if (!netbuf) { + hif_err_rl("Allocate netbuf failed"); + return NULL; + } + packet = (HTC_PACKET *) qdf_nbuf_data(netbuf); + qdf_nbuf_reserve(netbuf, headsize); + + SET_HTC_PACKET_INFO_RX_REFILL(packet, + pdev, + qdf_nbuf_data(netbuf), + bufsize, ENDPOINT_0); + SET_HTC_PACKET_NET_BUF_CONTEXT(packet, netbuf); + return packet; +} + +/** + * hif_dev_create() - create hif device after probe. + * @hif_device: HIF context + * @callbacks: htc callbacks + * @target: HIF target + * + * + * Return: int + */ +struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device, + struct hif_msg_callbacks *callbacks, void *target) +{ + + QDF_STATUS status; + struct hif_sdio_device *pdev; + + HIF_ENTER(); + pdev = qdf_mem_malloc(sizeof(struct hif_sdio_device)); + if (!pdev) { + A_ASSERT(false); + return NULL; + } + + qdf_spinlock_create(&pdev->Lock); + qdf_spinlock_create(&pdev->TxLock); + qdf_spinlock_create(&pdev->RxLock); + + pdev->HIFDevice = hif_device; + pdev->pTarget = target; + status = hif_configure_device(NULL, hif_device, + HIF_DEVICE_SET_HTC_CONTEXT, + (void *)pdev, sizeof(pdev)); + if (status != QDF_STATUS_SUCCESS) + HIF_ERROR("%s: set context failed", __func__); + + A_MEMCPY(&pdev->hif_callbacks, callbacks, sizeof(*callbacks)); + + HIF_EXIT(); + return pdev; +} + +/** + * hif_dev_destroy() - destroy hif device. + * @pDev: sdio device context + * + * + * Return: none + */ +void hif_dev_destroy(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + + status = hif_configure_device(NULL, pdev->HIFDevice, + HIF_DEVICE_SET_HTC_CONTEXT, + (void *)NULL, 0); + if (status != QDF_STATUS_SUCCESS) + HIF_ERROR("%s: set context failed", __func__); + + qdf_mem_free(pdev); +} + +/** + * hif_dev_from_hif() - get sdio device from hif device. + * @pDev: hif device context + * + * + * Return: hif sdio device context + */ +struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device) +{ + struct hif_sdio_device *pdev = NULL; + QDF_STATUS status; + + status = hif_configure_device(NULL, hif_device, + HIF_DEVICE_GET_HTC_CONTEXT, + (void **)&pdev, + sizeof(struct hif_sdio_device)); + if (status != QDF_STATUS_SUCCESS) + HIF_ERROR("%s: set context failed", __func__); + + return pdev; +} + +/** + * hif_dev_disable_interrupts() - disable hif device interrupts. + * @pDev: sdio device context + * + * + * Return: int + */ +QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *pdev) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_ENTER(); + + hif_dev_mask_interrupts(pdev); + + /* To Do mask the host controller interrupts */ + hif_mask_interrupt(pdev->HIFDevice); + + HIF_EXIT(); + return status; +} + +/** + * hif_dev_enable_interrupts() - enables hif device interrupts. + * @pDev: sdio device context + * + * + * Return: int + */ +QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + + HIF_ENTER(); + + /* for good measure, make sure interrupt are disabled + * before unmasking at the HIF layer. + * The rationale here is that between device insertion + * (where we clear the interrupts the first time) + * and when HTC is finally ready to handle interrupts, + * other software can perform target "soft" resets. + */ + status = hif_dev_disable_interrupts(pdev); + + /* Unmask the host controller interrupts */ + hif_un_mask_interrupt(pdev->HIFDevice); + + hif_dev_unmask_interrupts(pdev); + + HIF_EXIT(); + + return status; +} + +/** + * hif_dev_setup() - set up sdio device. + * @pDev: sdio device context + * + * + * Return: int + */ +QDF_STATUS hif_dev_setup(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + struct htc_callbacks htc_cbs; + struct hif_sdio_dev *hif_device = pdev->HIFDevice; + + HIF_ENTER(); + + status = hif_dev_setup_device(pdev); + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: device specific setup failed", __func__); + return QDF_STATUS_E_INVAL; + } + + pdev->BlockMask = pdev->BlockSize - 1; + A_ASSERT((pdev->BlockSize & pdev->BlockMask) == 0); + + /* assume we can process HIF interrupt events asynchronously */ + pdev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC; + + /* see if the HIF layer overrides this assumption */ + hif_configure_device(NULL, hif_device, + HIF_DEVICE_GET_IRQ_PROC_MODE, + &pdev->HifIRQProcessingMode, + sizeof(pdev->HifIRQProcessingMode)); + + switch (pdev->HifIRQProcessingMode) { + case HIF_DEVICE_IRQ_SYNC_ONLY: + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("HIF Interrupt processing is SYNC ONLY\n")); + /* see if HIF layer wants HTC to yield */ + hif_configure_device(NULL, hif_device, + HIF_DEVICE_GET_IRQ_YIELD_PARAMS, + &pdev->HifIRQYieldParams, + sizeof(pdev->HifIRQYieldParams)); + + if (pdev->HifIRQYieldParams.recv_packet_yield_count > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("HIF req of DSR yield per %d RECV packets\n", + pdev->HifIRQYieldParams. + recv_packet_yield_count)); + pdev->DSRCanYield = true; + } + break; + case HIF_DEVICE_IRQ_ASYNC_SYNC: + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("HIF Interrupt processing is ASYNC and SYNC\n")); + break; + default: + A_ASSERT(false); + break; + } + + pdev->HifMaskUmaskRecvEvent = NULL; + + /* see if the HIF layer implements the mask/unmask recv + * events function + */ + hif_configure_device(NULL, hif_device, + HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, + &pdev->HifMaskUmaskRecvEvent, + sizeof(pdev->HifMaskUmaskRecvEvent)); + + status = hif_dev_disable_interrupts(pdev); + + qdf_mem_zero(&htc_cbs, sizeof(struct htc_callbacks)); + /* the device layer handles these */ + htc_cbs.rw_compl_handler = hif_dev_rw_completion_handler; + htc_cbs.dsr_handler = hif_dev_dsr_handler; + htc_cbs.context = pdev; + status = hif_attach_htc(pdev->HIFDevice, &htc_cbs); + + HIF_EXIT(); + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..7ee6582e6aa80beb29bae570c427c80cb6059f19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013-2016, 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HIF_SDIO_DEV_H_ +#define HIF_SDIO_DEV_H_ + +#include "qdf_net_types.h" +#include "a_types.h" +#include "athdefs.h" +#include "a_osapi.h" +#include +#include "athstartpack.h" +#include "hif_internal.h" +#include "if_sdio.h" + +struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device); + +struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device, + struct hif_msg_callbacks *callbacks, + void *target); + +void hif_dev_destroy(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_setup(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_send_buffer(struct hif_sdio_device *htc_sdio_device, + unsigned int transfer_id, uint8_t pipe, + unsigned int nbytes, qdf_nbuf_t buf); + +QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev, + bool *done, + bool *async_processing); + +void hif_dev_mask_interrupts(struct hif_sdio_device *pdev); + +QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, + uint16_t service_id, + uint8_t *ul_pipe, + uint8_t *dl_pipe); + +void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev); + +int hif_dev_setup_device(struct hif_sdio_device *pdev); + +int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev, + void *config, + uint32_t config_len); + +void hif_dev_get_block_size(void *config); + +void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev); + +bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev); + +QDF_STATUS hif_read_write(struct hif_sdio_dev *device, unsigned long address, + char *buffer, uint32_t length, uint32_t request, + void *context); + +#ifdef CONFIG_SDIO_TRANSFER_MAILBOX +static inline struct hif_sdio_dev *get_hif_device(struct hif_softc *hif_ctx, + struct sdio_func *func) +{ + qdf_assert(func); + return (struct hif_sdio_dev *)sdio_get_drvdata(func); +} + +/** + * hif_sdio_set_drvdata() - set wlan driver data into upper layer private + * @hif_ctx: HIF object + * @func: pointer to sdio function + * @hifdevice: pointer to hif device + * + * Return: zero for success. + */ +static inline int hif_sdio_set_drvdata(struct hif_softc *hif_ctx, + struct sdio_func *func, + struct hif_sdio_dev *hifdevice) +{ + sdio_set_drvdata(func, hifdevice); + return 0; +} + +static inline int hif_dev_configure_pipes(struct hif_sdio_dev *pdev, + struct sdio_func *func) +{ + return 0; +} + +static inline int hif_dev_register_channels(struct hif_sdio_dev *dev, + struct sdio_func *func) +{ + return 0; +} + +static inline void hif_dev_unregister_channels(struct hif_sdio_dev *dev, + struct sdio_func *func) +{ +} +#else +static inline struct hif_sdio_dev *get_hif_device(struct hif_softc *hif_ctx, + struct sdio_func *func) +{ + struct hif_sdio_softc *scn = (struct hif_sdio_softc *)hif_ctx; + + return (struct hif_sdio_dev *)scn->hif_handle; +} + +/** + * hif_sdio_set_drvdata() - set wlan driver data into upper layer private + * @hif_ctx: HIF object + * @func: pointer to sdio function + * @hifdevice: pointer to hif device + * + * Return: zero for success. + */ +static inline int hif_sdio_set_drvdata(struct hif_softc *hif_ctx, + struct sdio_func *func, + struct hif_sdio_dev *hifdevice) +{ + struct hif_sdio_softc *sc = (struct hif_sdio_softc *)hif_ctx; + + sc->hif_handle = hifdevice; + + return 0; +} + +int hif_dev_configure_pipes(struct hif_sdio_dev *pdev, + struct sdio_func *func); + +int hif_dev_register_channels(struct hif_sdio_dev *dev, + struct sdio_func *func); + +void hif_dev_unregister_channels(struct hif_sdio_dev *dev, + struct sdio_func *func); +#endif /* SDIO_TRANSFER */ +QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device, + struct sdio_func *func, bool resume); +QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, + struct sdio_func *func, + bool reset); +A_STATUS hif_sdio_probe(struct hif_softc *ol_sc, + struct sdio_func *func, + struct hif_sdio_dev *device); +#endif /* HIF_SDIO_DEV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..b378575d085489bdb07b65752ba077cbba309036 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_internal.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_SDIO_INTERNAL_H_ +#define _HIF_SDIO_INTERNAL_H_ + +#include "a_debug.h" +#include "hif_sdio_dev.h" +#include "htc_packet.h" +#include "htc_api.h" +#include "hif_internal.h" + +#if defined(CONFIG_SDIO_TRANSFER_MAILBOX) +#include +#elif defined(CONFIG_SDIO_TRANSFER_ADMA) +#include +#else +#error "Error - Invalid transfer method" +#endif + +#define INVALID_MAILBOX_NUMBER 0xFF + +#define HIF_SDIO_RX_BUFFER_SIZE 1792 +#define HIF_SDIO_RX_DATA_OFFSET 64 + +/* TODO: print output level and mask control */ +#define ATH_DEBUG_IRQ ATH_DEBUG_MAKE_MODULE_MASK(4) +#define ATH_DEBUG_XMIT ATH_DEBUG_MAKE_MODULE_MASK(5) +#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(6) + +#define ATH_DEBUG_MAX_MASK 32 + +#define SDIO_NUM_DATA_RX_BUFFERS 64 +#define SDIO_DATA_RX_SIZE 1664 + +struct hif_sdio_device { + struct hif_sdio_dev *HIFDevice; + qdf_spinlock_t Lock; + qdf_spinlock_t TxLock; + qdf_spinlock_t RxLock; + struct hif_msg_callbacks hif_callbacks; + uint32_t BlockSize; + uint32_t BlockMask; + enum hif_device_irq_mode HifIRQProcessingMode; + struct hif_device_irq_yield_params HifIRQYieldParams; + bool DSRCanYield; + HIF_MASK_UNMASK_RECV_EVENT HifMaskUmaskRecvEvent; + int CurrentDSRRecvCount; + int RecheckIRQStatusCnt; + uint32_t RecvStateFlags; + void *pTarget; + struct devRegisters devRegisters; +#ifdef CONFIG_SDIO_TRANSFER_MAILBOX + bool swap_mailbox; + struct hif_device_mbox_info MailBoxInfo; +#endif +}; + +#define LOCK_HIF_DEV(device) qdf_spin_lock(&(device)->Lock) +#define UNLOCK_HIF_DEV(device) qdf_spin_unlock(&(device)->Lock) +#define LOCK_HIF_DEV_RX(t) qdf_spin_lock(&(t)->RxLock) +#define UNLOCK_HIF_DEV_RX(t) qdf_spin_unlock(&(t)->RxLock) +#define LOCK_HIF_DEV_TX(t) qdf_spin_lock(&(t)->TxLock) +#define UNLOCK_HIF_DEV_TX(t) qdf_spin_unlock(&(t)->TxLock) + +#define DEV_CALC_RECV_PADDED_LEN(pDev, length) \ + (((length) + (pDev)->BlockMask) & (~((pDev)->BlockMask))) +#define DEV_CALC_SEND_PADDED_LEN(pDev, length) \ + DEV_CALC_RECV_PADDED_LEN(pDev, length) +#define DEV_IS_LEN_BLOCK_ALIGNED(pDev, length) \ + (((length) % (pDev)->BlockSize) == 0) + +#define HTC_RECV_WAIT_BUFFERS (1 << 0) +#define HTC_OP_STATE_STOPPING (1 << 0) + +#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0) +#define HTC_RX_PKT_REFRESH_HDR (1 << 1) +#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2) +#define HTC_RX_PKT_NO_RECYCLE (1 << 3) +#define HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK (1 << 4) + +#define IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pDev) \ + ((pDev)->HifIRQProcessingMode != HIF_DEVICE_IRQ_SYNC_ONLY) + +/* hif_sdio_dev.c */ +HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pDev); + +/* hif_sdio_recv.c */ +QDF_STATUS hif_dev_rw_completion_handler(void *context, QDF_STATUS status); +QDF_STATUS hif_dev_dsr_handler(void *context); + +#endif /* _HIF_SDIO_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..102b84d09fff387d8e53f46ada087622e5493c32 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.c @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "if_sdio.h" +#include +#include +#include "regtable_sdio.h" +#include +#include "target_type.h" +#include "epping_main.h" +#include "targaddrs.h" +#include "sdio_api.h" +#include +#ifndef REMOVE_PKT_LOG +#include "ol_txrx_types.h" +#include "pktlog_ac_api.h" +#include "pktlog_ac.h" +#endif +#ifndef ATH_BUS_PM +#ifdef CONFIG_PM +#define ATH_BUS_PM +#endif /* CONFIG_PM */ +#endif /* ATH_BUS_PM */ + +#ifndef REMOVE_PKT_LOG +struct ol_pl_os_dep_funcs *g_ol_pl_os_dep_funcs; +#endif +#define HIF_SDIO_LOAD_TIMEOUT 1000 + +/** + * hif_sdio_bus_suspend() - suspend the bus + * + * This function suspends the bus, but sdio doesn't need to suspend. + * Therefore do nothing. + * + * Return: 0 for success and non-zero for failure + */ +int hif_sdio_bus_suspend(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct device *dev = &hif_device->func->dev; + + return hif_device_suspend(hif_ctx, dev); +} + + +/** + * hif_sdio_bus_resume() - hif resume API + * + * This function resumes the bus. but sdio doesn't need to resume. + * Therefore do nothing. + * + * Return: 0 for success and non-zero for failure + */ +int hif_sdio_bus_resume(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct device *dev = &hif_device->func->dev; + + hif_device_resume(hif_ctx, dev); + return 0; +} + +/** + * hif_enable_power_gating() - enable HW power gating + * + * Return: n/a + */ +void hif_enable_power_gating(void *hif_ctx) +{ +} + +/** + * hif_sdio_close() - hif_bus_close + * + * Return: None + */ +void hif_sdio_close(struct hif_softc *hif_sc) +{ +} + +/** + * hif_sdio_open() - hif_bus_open + * @hif_sc: hif context + * @bus_type: bus type + * + * Return: QDF status + */ +QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type) +{ + hif_sc->bus_type = bus_type; + + return QDF_STATUS_SUCCESS; +} + +void hif_get_target_revision(struct hif_softc *ol_sc) +{ + struct hif_softc *ol_sc_local = (struct hif_softc *)ol_sc; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc_local); + uint32_t chip_id = 0; + QDF_STATUS rv; + + rv = hif_diag_read_access(hif_hdl, + (CHIP_ID_ADDRESS | RTC_SOC_BASE_ADDRESS), &chip_id); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s[%d]: get chip id fail\n", __func__, __LINE__); + } else { + ol_sc_local->target_info.target_revision = + CHIP_ID_REVISION_GET(chip_id); + } +} + +/** + * hif_sdio_enable_bus() - hif_enable_bus + * @hif_sc: hif context + * @dev: dev + * @bdev: bus dev + * @bid: bus id + * @type: bus type + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_sdio_enable_bus(struct hif_softc *ol_sc, struct device *dev, + void *bdev, const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + const struct sdio_device_id *id = (const struct sdio_device_id *)bid; + + if (hif_sdio_device_inserted(ol_sc, dev, id)) { + HIF_ERROR("wlan: %s hif_sdio_device_inserted failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + return ret; +} + + +/** + * hif_sdio_disable_bus() - sdio disable bus + * @hif_sc: hif softc pointer + * + * Return: none + */ +void hif_sdio_disable_bus(struct hif_softc *hif_sc) +{ + struct sdio_func *func = dev_to_sdio_func(hif_sc->qdf_dev->dev); + + hif_sdio_device_removed(hif_sc, func); +} + +/** + * hif_sdio_get_config_item - sdio configure bus + * @hif_sc: hif context + * @opcode: configuration type + * @config: configuration value to set + * @config_len: configuration length + * + * Return: QDF_STATUS_SUCCESS for success + */ +QDF_STATUS hif_sdio_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len) +{ + struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc); + struct hif_sdio_dev *hif_device = sc->hif_handle; + + return hif_configure_device(hif_sc, hif_device, opcode, + config, config_len); +} + +/** + * hif_sdio_set_mailbox_swap - set mailbox swap + * @hif_sc: hif context + * + * Return: None + */ +void hif_sdio_set_mailbox_swap(struct hif_softc *hif_sc) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_sc); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + hif_dev_set_mailbox_swap(hif_device); +} + +/** + * hif_sdio_claim_device - set mailbox swap + * @hif_sc: hif context + * + * Return: None + */ +void hif_sdio_claim_device(struct hif_softc *hif_sc) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_sc); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + hif_device->claimed_ctx = hif_sc; +} + +/** + * hif_sdio_mask_interrupt_call() - disbale hif device irq + * @scn: pointr to softc structure + * + * Return: None + */ +void hif_sdio_mask_interrupt_call(struct hif_softc *scn) +{ + struct hif_sdio_softc *hif_ctx = HIF_GET_SDIO_SOFTC(scn); + struct hif_sdio_dev *hif_device = hif_ctx->hif_handle; + + hif_mask_interrupt(hif_device); +} + +/** + * hif_trigger_dump() - trigger various dump cmd + * @scn: struct hif_opaque_softc + * @cmd_id: dump command id + * @start: start/stop dump + * + * Return: None + */ +void hif_trigger_dump(struct hif_opaque_softc *scn, uint8_t cmd_id, bool start) +{ +} + +/** + * hif_check_fw_reg() - check fw selfrecovery indication + * @hif_ctx: hif_opaque_softc + * + * Return: int + */ +int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) +{ + int ret = 1; + uint32_t fw_indication = 0; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + + if (hif_diag_read_access(hif_ctx, FW_INDICATOR_ADDRESS, + &fw_indication) != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s Get fw indication failed\n", __func__); + return 1; + } + HIF_INFO("%s: fw indication is 0x%x def 0x%x.\n", __func__, + fw_indication, FW_IND_HELPER); + if (fw_indication & FW_IND_HELPER) + ret = 0; + + return ret; +} + +/** + * hif_wlan_disable() - call the platform driver to disable wlan + * @scn: scn + * + * Return: void + */ +void hif_wlan_disable(struct hif_softc *scn) +{ +} + +/** + * hif_sdio_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_sdio_needs_bmi(struct hif_softc *scn) +{ + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.h new file mode 100644 index 0000000000000000000000000000000000000000..191ea615ec237863b037d0798c4476bea9709904 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __IF_SDIO_H__ +#define __IF_SDIO_H__ + +#include +#include +#include +#include +#include +#include +#include "a_osapi.h" +#include "pld_sdio.h" +#include "hif_internal.h" + + +#define AR6320_HEADERS_DEF + +#define ATH_DBG_DEFAULT 0 + +#define RAMDUMP_ADDR 0x8F000000 +#define RAMDUMP_SIZE 0x700000 + +struct hif_sdio_softc { + struct hif_softc ol_sc; + struct device *dev; + /* + * Guard changes to Target HW state and to software + * structures that track hardware state. + */ + spinlock_t target_lock; + void *hif_handle; + void *ramdump_base; + unsigned long ramdump_address; + unsigned long ramdump_size; + struct targetdef_s *targetdef; + struct hostdef_s *hostdef; +}; + +#if defined(CONFIG_ATH_PROCFS_DIAG_SUPPORT) +int athdiag_procfs_init(void *scn); +void athdiag_procfs_remove(void); +#else +static inline int athdiag_procfs_init(void *scn) +{ + return 0; +} + +static inline void athdiag_procfs_remove(void) +{ +} +#endif + +#define DMA_MAPPING_ERROR(dev, addr) dma_mapping_error((dev), (addr)) + +int ath_sdio_probe(void *context, void *hif_handle); +void ath_sdio_remove(void *context, void *hif_handle); +int ath_sdio_suspend(void *context); +int ath_sdio_resume(void *context); + +/*These functions are exposed to HDD*/ +void hif_init_qdf_ctx(qdf_device_t qdf_dev, void *ol_sc); +void hif_deinit_qdf_ctx(void *ol_sc); + +int hif_sdio_device_inserted(struct hif_softc *ol_sc, + struct device *dev, + const struct sdio_device_id *id); +void hif_sdio_stop(struct hif_softc *hif_ctx); +void hif_sdio_shutdown(struct hif_softc *hif_ctx); +void hif_sdio_device_removed(struct hif_softc *hif_ctx, struct sdio_func *func); +int hif_device_suspend(struct hif_softc *ol_sc, struct device *dev); +int hif_device_resume(struct hif_softc *ol_sc, struct device *dev); +void hif_register_tbl_attach(struct hif_softc *scn, + u32 hif_type); +void target_register_tbl_attach(struct hif_softc *scn, + u32 target_type); +void hif_enable_power_gating(void *hif_ctx); +void hif_sdio_close(struct hif_softc *hif_sc); +QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type); +void hif_ar6k_fetch_target_regs(struct hif_sdio_dev *hif_device, + uint32_t *targregs); +QDF_STATUS hif_reg_based_get_target_info(struct hif_opaque_softc *hif_ctx, + struct bmi_target_info *targ_info); +#endif /* __IF_SDIO_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/include/hif_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/include/hif_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..41001c848ae984d2183361516ade24ef4ece9e96 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/include/hif_internal.h @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_INTERNAL_H_ +#define _HIF_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "athdefs.h" +#include "a_types.h" +#include "a_osapi.h" +#include /* qdf_device_t, qdf_print */ +#include /* qdf_system_ticks, etc. */ +#include +#include +#include +#include +#include "hif.h" +#include "hif_debug.h" +#include "hif_sdio_common.h" +#include +#include "hif_main.h" + +#define HIF_LINUX_MMC_SCATTER_SUPPORT + +#define BUS_REQUEST_MAX_NUM 105 + +#define SDIO_CLOCK_FREQUENCY_DEFAULT 25000000 +#define SDWLAN_ENABLE_DISABLE_TIMEOUT 20 +#define FLAGS_CARD_ENAB 0x02 +#define FLAGS_CARD_IRQ_UNMSK 0x04 + +/* + * direction - Direction of transfer (HIF_SDIO_READ/HIF_SDIO_WRITE). + */ +#define HIF_SDIO_READ 0x00000001 +#define HIF_SDIO_WRITE 0x00000002 +#define HIF_SDIO_DIR_MASK (HIF_SDIO_READ | HIF_SDIO_WRITE) + +/* + * type - An interface may support different kind of rd/wr commands. + * For example: SDIO supports CMD52/CMD53s. In case of MSIO it + * translates to using different kinds of TPCs. The command type + * is thus divided into a basic and an extended command and can + * be specified using HIF_BASIC_IO/HIF_EXTENDED_IO. + */ +#define HIF_BASIC_IO 0x00000004 +#define HIF_EXTENDED_IO 0x00000008 +#define HIF_TYPE_MASK (HIF_BASIC_IO | HIF_EXTENDED_IO) + +/* + * This indicates the whether the command is to be executed in a + * blocking or non-blocking fashion (HIF_SYNCHRONOUS/ + * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been + * implemented using the asynchronous mode allowing the the bus + * driver to indicate the completion of operation through the + * registered callback routine. The requirement primarily comes + * from the contexts these operations get called from (a driver's + * transmit context or the ISR context in case of receive). + * Support for both of these modes is essential. + */ +#define HIF_SYNCHRONOUS 0x00000010 +#define HIF_ASYNCHRONOUS 0x00000020 +#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS) + +/* + * An interface may support different kinds of commands based on + * the tradeoff between the amount of data it can carry and the + * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/ + * HIF_BLOCK_BASIS). In case of latter, the data is rounded off + * to the nearest block size by padding. The size of the block is + * configurable at compile time using the HIF_BLOCK_SIZE and is + * negotiated with the target during initialization after the + * AR6000 interrupts are enabled. + */ +#define HIF_BYTE_BASIS 0x00000040 +#define HIF_BLOCK_BASIS 0x00000080 +#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS) + +/* + * This indicates if the address has to be incremented on AR6000 + * after every read/write operation (HIF?FIXED_ADDRESS/ + * HIF_INCREMENTAL_ADDRESS). + */ +#define HIF_FIXED_ADDRESS 0x00000100 +#define HIF_INCREMENTAL_ADDRESS 0x00000200 +#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | \ + HIF_INCREMENTAL_ADDRESS) + +#define HIF_WR_ASYNC_BYTE_FIX \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_WR_ASYNC_BYTE_INC \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_ASYNC_BLOCK_INC \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_SYNC_BYTE_FIX \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_WR_SYNC_BYTE_INC \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_SYNC_BLOCK_INC \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_ASYNC_BLOCK_FIX \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) +#define HIF_WR_SYNC_BLOCK_FIX \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_SYNC_BYTE_INC \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_SYNC_BYTE_FIX \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_ASYNC_BYTE_FIX \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_ASYNC_BLOCK_FIX \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_ASYNC_BYTE_INC \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_ASYNC_BLOCK_INC \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_SYNC_BLOCK_INC \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_SYNC_BLOCK_FIX \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) + +enum hif_sdio_device_state { + HIF_DEVICE_STATE_ON, + HIF_DEVICE_STATE_DEEPSLEEP, + HIF_DEVICE_STATE_CUTPOWER, + HIF_DEVICE_STATE_WOW +}; + +struct bus_request { + struct bus_request *next; /* link list of available requests */ + struct bus_request *inusenext; /* link list of in use requests */ + struct semaphore sem_req; + unsigned long address; /* request data */ + char *buffer; + uint32_t length; + uint32_t request; + void *context; + QDF_STATUS status; + struct HIF_SCATTER_REQ_PRIV *scatter_req; +}; + +#define HIF_ADMA_MAX_CHANS 2 +#ifdef CONFIG_SDIO_TRANSFER_ADMA +struct rx_q_entry { + qdf_list_node_t entry; + qdf_nbuf_t nbuf; +}; +#endif + +struct hif_sdio_dev { + struct sdio_func *func; + qdf_spinlock_t asynclock; + struct task_struct *async_task; /* task to handle async commands */ + struct semaphore sem_async; /* wake up for async task */ + int async_shutdown; /* stop the async task */ + struct completion async_completion; /* thread completion */ + struct bus_request *asyncreq; /* request for async tasklet */ + struct bus_request *taskreq; /* async tasklet data */ + qdf_spinlock_t lock; + struct bus_request *bus_request_free_queue; /* free list */ + struct bus_request bus_request[BUS_REQUEST_MAX_NUM]; /* bus requests */ + void *claimed_ctx; + struct htc_callbacks htc_callbacks; + uint8_t *dma_buffer; + DL_LIST scatter_req_head; /* scatter request list head */ + bool scatter_enabled; /* scatter enabled flag */ + bool is_suspend; + bool is_disabled; + atomic_t irq_handling; + enum HIF_DEVICE_POWER_CHANGE_TYPE power_config; + enum hif_sdio_device_state device_state; + const struct sdio_device_id *id; + struct mmc_host *host; + void *htc_context; +#ifdef CONFIG_SDIO_TRANSFER_ADMA + struct sdio_al_client_handle *al_client; + struct sdio_al_channel_handle *al_chan[HIF_ADMA_MAX_CHANS]; + uint8_t adma_chans_used; + qdf_list_t rx_q; + qdf_spinlock_t rx_q_lock; + qdf_work_t rx_q_alloc_work; + bool rx_q_alloc_work_scheduled; +#endif +}; + +struct HIF_DEVICE_OS_DEVICE_INFO { + void *os_dev; +}; + +struct hif_mailbox_properties { + u_int32_t extended_address; /* extended address for larger writes */ + u_int32_t extended_size; +}; + +struct hif_device_irq_yield_params { + int recv_packet_yield_count; + /* max number of packets to force DSR to return */ +}; + +struct hif_device_mbox_info { + u_int32_t mbox_addresses[4]; + /* first element for legacy HIFs and return the address and ARRAY of + * 32bit words + */ + struct hif_mailbox_properties mbox_prop[4]; + u_int32_t gmbox_address; + u_int32_t gmbox_size; + u_int32_t flags; + /* flags to describe mbox behavior or usage */ +}; + +enum hif_device_irq_mode { + HIF_DEVICE_IRQ_SYNC_ONLY, + /* DSR to process all interrupts before returning */ + HIF_DEVICE_IRQ_ASYNC_SYNC, /* DSR to process interrupts */ +}; + +/* other interrupts are pending, host + * needs to read the to monitor + */ +#define HIF_OTHER_EVENTS (1 << 0) +/* pending recv packet */ +#define HIF_RECV_MSG_AVAIL (1 << 1) + +struct _HIF_PENDING_EVENTS_INFO { + uint32_t events; + uint32_t look_ahead; + uint32_t available_recv_bytes; +}; + +/* hif-sdio pending events handler type, some HIF modules + * use special mechanisms to detect packet available and other interrupts + */ +typedef int (*HIF_PENDING_EVENTS_FUNC)(struct hif_sdio_dev *device, + struct _HIF_PENDING_EVENTS_INFO * + events, void *async_context); + +#define HIF_MASK_RECV true +#define HIF_UNMASK_RECV false +/* hif-sdio Handler type to mask receive events */ +typedef int (*HIF_MASK_UNMASK_RECV_EVENT)(struct hif_sdio_dev *device, + bool mask, + void *async_context); + +QDF_STATUS hif_configure_device(struct hif_softc *ol_sc, + struct hif_sdio_dev *device, + enum hif_device_config_opcode opcode, + void *config, uint32_t config_len); + +QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device, + struct htc_callbacks *callbacks); + +void hif_ack_interrupt(struct hif_sdio_dev *device); + +void hif_mask_interrupt(struct hif_sdio_dev *device); + +void hif_un_mask_interrupt(struct hif_sdio_dev *device); + +int hif_sdio_configure_pipes(struct hif_sdio_dev *dev, struct sdio_func *func); + +struct _HIF_SCATTER_ITEM { + u_int8_t *buffer; /* CPU accessible address of buffer */ + int length; /* length of transfer to/from this buffer */ + void *caller_contexts[2]; /* caller context */ +}; + +struct _HIF_SCATTER_REQ; + +typedef void (*HIF_SCATTER_COMP_CB)(struct _HIF_SCATTER_REQ *); + +enum HIF_SCATTER_METHOD { + HIF_SCATTER_NONE = 0, + HIF_SCATTER_DMA_REAL, /* Real SG support no restrictions */ + HIF_SCATTER_DMA_BOUNCE, /* Uses SG DMA */ +}; + +struct _HIF_SCATTER_REQ { + DL_LIST list_link; /* link management */ + u_int32_t address; /* address for the read/write operation */ + u_int32_t request; /* request flags */ + u_int32_t total_length; /* total length of entire transfer */ + u_int32_t caller_flags; /* caller specific flags */ + HIF_SCATTER_COMP_CB completion_routine; /* completion callback */ + int completion_status; /* status of completion */ + void *context; /* caller context for this request */ + int valid_scatter_entries; /* no of valid entries */ + /* scatter method handled by HIF */ + enum HIF_SCATTER_METHOD scatter_method; + void *hif_private[4]; /* HIF private area */ + u_int8_t *scatter_bounce_buffer; /* bounce buffers */ + struct _HIF_SCATTER_ITEM scatter_list[1]; /* start of scatter list */ +}; + +typedef struct _HIF_SCATTER_REQ * (*HIF_ALLOCATE_SCATTER_REQUEST)( + struct hif_sdio_dev *device); +typedef void (*HIF_FREE_SCATTER_REQUEST)(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *request); +typedef QDF_STATUS (*HIF_READWRITE_SCATTER)(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *request); + +struct HIF_DEVICE_SCATTER_SUPPORT_INFO { + /* information returned from HIF layer */ + HIF_ALLOCATE_SCATTER_REQUEST allocate_req_func; + HIF_FREE_SCATTER_REQUEST free_req_func; + HIF_READWRITE_SCATTER read_write_scatter_func; + int max_scatter_entries; + int max_tx_size_per_scatter_req; +}; + +void hif_get_target_revision(struct hif_softc *ol_sc); +struct HIF_SCATTER_REQ_PRIV; + +#define HIF_DMA_BUFFER_SIZE (4 * 1024) +#define CMD53_FIXED_ADDRESS 1 +#define CMD53_INCR_ADDRESS 2 + +struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device); +void hif_free_bus_request(struct hif_sdio_dev *device, + struct bus_request *busrequest); +void add_to_async_list(struct hif_sdio_dev *device, + struct bus_request *busrequest); +void hif_dump_cccr(struct hif_sdio_dev *hif_device); + +#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT + +#define MAX_SCATTER_REQUESTS 4 +#define MAX_SCATTER_ENTRIES_PER_REQ 16 +#define MAX_SCATTER_REQ_TRANSFER_SIZE (32*1024) + +struct HIF_SCATTER_REQ_PRIV { + struct _HIF_SCATTER_REQ *hif_scatter_req; + struct hif_sdio_dev *device; /* this device */ + struct bus_request *busrequest; + /* scatter list for linux */ + struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ]; +}; + +#define ATH_DEBUG_SCATTER ATH_DEBUG_MAKE_MODULE_MASK(0) + +QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device, + struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info); +void cleanup_hif_scatter_resources(struct hif_sdio_dev *device); +QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device, + struct bus_request *busrequest); + +#else /* HIF_LINUX_MMC_SCATTER_SUPPORT */ + +static inline QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device, + struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#define cleanup_hif_scatter_resources(d) { } + +#endif /* HIF_LINUX_MMC_SCATTER_SUPPORT */ + +#define SDIO_SET_CMD52_ARG(arg, rw, func, raw, address, writedata) \ + ((arg) = (((rw) & 1) << 31) | \ + (((func) & 0x7) << 28) | \ + (((raw) & 1) << 27) | \ + (1 << 26) | \ + (((address) & 0x1FFFF) << 9) | \ + (1 << 8) | \ + ((writedata) & 0xFF)) + +#define SDIO_SET_CMD52_READ_ARG(arg, func, address) \ + SDIO_SET_CMD52_ARG(arg, 0, (func), 0, address, 0x00) +#define SDIO_SET_CMD52_WRITE_ARG(arg, func, address, value) \ + SDIO_SET_CMD52_ARG(arg, 1, (func), 0, address, value) + +void hif_sdio_quirk_force_drive_strength(struct hif_softc *ol_sc, + struct sdio_func *func); +void hif_sdio_quirk_write_cccr(struct hif_softc *ol_sc, struct sdio_func *func); +int hif_sdio_quirk_mod_strength(struct hif_softc *ol_sc, + struct sdio_func *func); +int hif_sdio_quirk_async_intr(struct hif_softc *ol_sc, struct sdio_func *func); +int hif_sdio_set_bus_speed(struct hif_softc *ol_sc, struct sdio_func *func); +int hif_sdio_set_bus_width(struct hif_softc *ol_sc, struct sdio_func *func); +QDF_STATUS hif_sdio_func_disable(struct hif_sdio_dev *device, + struct sdio_func *func, + bool reset); +QDF_STATUS reinit_sdio(struct hif_sdio_dev *device); + +int func0_cmd52_write_byte(struct mmc_card *card, + unsigned int address, + unsigned char byte); + +int func0_cmd52_read_byte(struct mmc_card *card, + unsigned int address, + unsigned char *byte); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +/** + * sdio_card_highspeed() - check if high speed supported + * @card: pointer to mmc card struct + * + * Return: non zero if card supports high speed. + */ +static inline int sdio_card_highspeed(struct mmc_card *card) +{ + return mmc_card_highspeed(card); +} +#else +static inline int sdio_card_highspeed(struct mmc_card *card) +{ + return mmc_card_hs(card); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +/** + * sdio_card_set_highspeed() - set high speed + * @card: pointer to mmc card struct + * + * Return: none. + */ +static inline void sdio_card_set_highspeed(struct mmc_card *card) +{ + mmc_card_set_highspeed(card); +} +#else +static inline void sdio_card_set_highspeed(struct mmc_card *card) +{ +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +/** + * sdio_card_state() - set card state + * @card: pointer to mmc card struct + * + * Return: none. + */ +static inline void sdio_card_state(struct mmc_card *card) +{ + card->state &= ~MMC_STATE_HIGHSPEED; +} +#else +static inline void sdio_card_state(struct mmc_card *card) +{ +} +#endif +#endif /* _HIF_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/dev_quirks.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/dev_quirks.c new file mode 100644 index 0000000000000000000000000000000000000000..7c7e63ba01185d2c75f436bba67bbaaec01d425e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/dev_quirks.c @@ -0,0 +1,737 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_dev.h" +#include "if_sdio.h" +#include "regtable_sdio.h" +#include "wma_api.h" +#include "hif_internal.h" +#include + +/* QUIRK PARAMETERS */ +unsigned int writecccr1; +module_param(writecccr1, uint, 0644); +unsigned int writecccr1value; +module_param(writecccr1value, uint, 0644); + +unsigned int writecccr2; +module_param(writecccr2, uint, 0644); +unsigned int writecccr2value; +module_param(writecccr2value, uint, 0644); + +unsigned int writecccr3; +module_param(writecccr3, uint, 0644); +unsigned int writecccr3value; +module_param(writecccr3value, uint, 0644); + +unsigned int writecccr4; +module_param(writecccr4, uint, 0644); +unsigned int writecccr4value; +module_param(writecccr4value, uint, 0644); + +unsigned int modstrength; +module_param(modstrength, uint, 0644); +MODULE_PARM_DESC(modstrength, "Adjust internal driver strength"); + +unsigned int mmcbuswidth; +/* PERM:S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH */ +module_param(mmcbuswidth, uint, 0644); +MODULE_PARM_DESC(mmcbuswidth, + "Set MMC driver Bus Width: 1-1Bit, 4-4Bit, 8-8Bit"); + +unsigned int mmcclock; +module_param(mmcclock, uint, 0644); +MODULE_PARM_DESC(mmcclock, "Set MMC driver Clock value"); + +#ifdef CONFIG_X86 +unsigned int asyncintdelay = 2; +module_param(asyncintdelay, uint, 0644); +MODULE_PARM_DESC(asyncintdelay, "Delay clock count for async interrupt, 2 is default, valid values are 1 and 2"); +#else +unsigned int asyncintdelay; +module_param(asyncintdelay, uint, 0644); +MODULE_PARM_DESC(asyncintdelay, "Delay clock count for async interrupt, 0 is default, valid values are 1 and 2"); +#endif + +unsigned int brokenirq; +module_param(brokenirq, uint, 0644); +MODULE_PARM_DESC(brokenirq, + "Set as 1 to use polling method instead of interrupt mode"); + +#ifdef CONFIG_SDIO_TRANSFER_MAILBOX +/** + * hif_sdio_force_drive_strength() - Set SDIO drive strength + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * This function forces the driver strength of the SDIO + * Call this with the sdhci host claimed + * + * Return: none. + */ +void hif_sdio_quirk_force_drive_strength(struct hif_softc *ol_sc, + struct sdio_func *func) +{ + int err = 0; + unsigned char value = 0; + uint32_t mask = 0, addr = SDIO_CCCR_DRIVE_STRENGTH; + + err = func0_cmd52_read_byte(func->card, addr, &value); + if (err) { + HIF_ERROR("%s: read driver strength 0x%02X fail %d\n", + __func__, addr, err); + return; + } + + mask = (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT); + value = (value & ~mask) | SDIO_DTSx_SET_TYPE_D; + err = func0_cmd52_write_byte(func->card, addr, value); + if (err) { + HIF_ERROR("%s: write driver strength failed", __func__); + HIF_ERROR("%s: 0x%02X to 0x%02X failed: %d\n", __func__, + (uint32_t)value, addr, err); + return; + } + + value = 0; + addr = CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR; + err = func0_cmd52_read_byte(func->card, addr, &value); + if (err) { + HIF_ERROR("%s Read CCCR 0x%02X failed: %d\n", + __func__, addr, err); + return; + } + + mask = CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK; + value = (value & ~mask) | CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D; + err = func0_cmd52_write_byte(func->card, addr, value); + if (err) + HIF_ERROR("%s Write CCCR 0x%02X to 0x%02X failed: %d\n", + __func__, addr, value, err); +} + +/** + * hif_sdio_quirk_async_intr() - Set asynchronous interrupt settings + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * The values are taken from the module parameter asyncintdelay + * Call this with the sdhci host claimed + * + * Return: none. + */ +int hif_sdio_quirk_async_intr(struct hif_softc *ol_sc, struct sdio_func *func) +{ + uint8_t data; + uint16_t manfid; + int set_async_irq = 0, ret = 0; + struct hif_sdio_dev *device = get_hif_device(ol_sc, func); + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + switch (manfid) { + case MANUFACTURER_ID_AR6003_BASE: + set_async_irq = 1; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6003, + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003); + if (ret) + return ret; + break; + case MANUFACTURER_ID_AR6320_BASE: + case MANUFACTURER_ID_QCA9377_BASE: + case MANUFACTURER_ID_QCA9379_BASE: + set_async_irq = 1; + ret = func0_cmd52_read_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + &data); + if (ret) + return ret; + + data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; + ret = func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + data); + if (ret) + return ret; + break; + } + + if (asyncintdelay) { + /* Set CCCR 0xF0[7:6] to increase async interrupt delay clock + * to fix interrupt missing issue on dell 8460p + */ + + ret = func0_cmd52_read_byte(func->card, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + &data); + if (ret) + return ret; + + data = (data & ~CCCR_SDIO_ASYNC_INT_DELAY_MASK) | + ((asyncintdelay << CCCR_SDIO_ASYNC_INT_DELAY_LSB) & + CCCR_SDIO_ASYNC_INT_DELAY_MASK); + + ret = func0_cmd52_write_byte(func->card, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + data); + if (ret) + return ret; + } + + return ret; +} +#else +/** + * hif_sdio_force_drive_strength() - Set SDIO drive strength + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * This function forces the driver strength of the SDIO + * Call this with the sdhci host claimed + * + * Return: none. + */ +void hif_sdio_quirk_force_drive_strength(struct hif_softc *ol_sc, + struct sdio_func *func) +{ +} + +/** + * hif_sdio_quirk_async_intr() - Set asynchronous interrupt settings + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * The values are taken from the module parameter asyncintdelay + * Call this with the sdhci host claimed + * + * Return: none. + */ +int hif_sdio_quirk_async_intr(struct hif_softc *ol_sc, struct sdio_func *func) +{ + return 0; +} +#endif + +/** + * hif_sdio_quirk_write_cccr() - write a desired CCCR register + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * The values are taken from the module parameter writecccr + * Call this with the sdhci host claimed + * + * Return: none. + */ +void hif_sdio_quirk_write_cccr(struct hif_softc *ol_sc, struct sdio_func *func) +{ + int32_t err; + + if (writecccr1) { + err = func0_cmd52_write_byte(func->card, writecccr1, + writecccr1value); + if (err) + HIF_ERROR("%s Write CCCR 0x%02X to 0x%02X failed: %d\n", + __func__, + (unsigned int)writecccr1, + (unsigned int)writecccr1value, + err); + else + HIF_INFO("%s Write CCCR 0x%02X to 0x%02X OK\n", + __func__, + (unsigned int)writecccr1, + writecccr1value); + } + + if (writecccr2) { + err = func0_cmd52_write_byte(func->card, writecccr2, + writecccr2value); + if (err) + HIF_ERROR("%s Write CCCR 0x%02X to 0x%02X failed: %d\n", + __func__, + (unsigned int)writecccr2, + (unsigned int)writecccr2value, + err); + else + HIF_INFO("%s Write CCCR 0x%02X to 0x%02X OK\n", + __func__, + (unsigned int)writecccr2, + (unsigned int)writecccr2value); + } + if (writecccr3) { + err = func0_cmd52_write_byte(func->card, writecccr3, + writecccr3value); + if (err) + HIF_ERROR("%s Write CCCR 0x%02X to 0x%02X failed: %d\n", + __func__, + (unsigned int)writecccr3, + (unsigned int)writecccr3value, + err); + else + HIF_INFO("%s Write CCCR 0x%02X to 0x%02X OK\n", + __func__, + (unsigned int)writecccr3, + (unsigned int)writecccr3value); + } + if (writecccr4) { + err = func0_cmd52_write_byte(func->card, writecccr4, + writecccr4value); + if (err) + HIF_ERROR("%s Write CCCR 0x%02X to 0x%02X failed: %d\n", + __func__, + (unsigned int)writecccr4, + (unsigned int)writecccr4value, + err); + else + HIF_INFO("%s Write CCCR 0x%02X to 0x%02X OK\n", + __func__, + (unsigned int)writecccr4, + (unsigned int)writecccr4value); + } +} + +/** + * hif_sdio_quirk_mod_strength() - write a desired CCCR register + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * The values are taken from the module parameter writecccr + * Call this with the sdhci host claimed + * + * Return: none. + */ +int hif_sdio_quirk_mod_strength(struct hif_softc *ol_sc, struct sdio_func *func) +{ + int ret = 0; + uint32_t addr, value; + struct hif_sdio_dev *device = get_hif_device(ol_sc, func); + uint16_t manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (!modstrength) /* TODO: Dont set this : scn is not popolated yet */ + return 0; + + if (!scn) { + HIF_ERROR("%s: scn is null", __func__); + return -1; + } + + if (!scn->hostdef) { + HIF_ERROR("%s: scn->hostdef is null", __func__); + return -1; + } + + switch (manfid) { + case MANUFACTURER_ID_QCN7605_BASE: + break; + default: + addr = WINDOW_DATA_ADDRESS; + value = 0x0FFF; + ret = sdio_memcpy_toio(func, addr, &value, 4); + if (ret) { + HIF_ERROR("%s write 0x%x 0x%x error:%d\n", + __func__, addr, value, ret); + break; + } + HIF_INFO("%s: addr 0x%x val 0x%x", __func__, addr, value); + + addr = WINDOW_WRITE_ADDR_ADDRESS; + value = 0x50F8; + ret = sdio_memcpy_toio(func, addr, &value, 4); + if (ret) { + HIF_ERROR("%s write 0x%x 0x%x error:%d\n", + __func__, addr, value, ret); + break; + } + HIF_INFO("%s: addr 0x%x val 0x%x\n", __func__, addr, value); + break; + } + + return ret; +} + +#if KERNEL_VERSION(3, 4, 0) <= LINUX_VERSION_CODE +#ifdef SDIO_BUS_WIDTH_8BIT +static int hif_cmd52_write_byte_8bit(struct sdio_func *func) +{ + return func0_cmd52_write_byte(func->card, SDIO_CCCR_IF, + SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_8BIT); +} +#else +static int hif_cmd52_write_byte_8bit(struct sdio_func *func) +{ + HIF_ERROR("%s: 8BIT Bus Width not supported\n", __func__); + return QDF_STATUS_E_FAILURE; +} +#endif +#endif + +/** + * hif_sdio_set_bus_speed() - Set the sdio bus speed + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * Return: 0 on success, error number otherwise. + */ +int hif_sdio_set_bus_speed(struct hif_softc *ol_sc, struct sdio_func *func) +{ + uint32_t clock, clock_set = 12500000; + struct hif_sdio_dev *device = get_hif_device(ol_sc, func); + uint16_t manfid; + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (manfid == MANUFACTURER_ID_QCN7605_BASE) + return 0; + + if (mmcclock > 0) + clock_set = mmcclock; +#if (KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE) + if (sdio_card_highspeed(func->card)) +#else + if (mmc_card_hs(func->card)) +#endif + clock = 50000000; + else + clock = func->card->cis.max_dtr; + + if (clock > device->host->f_max) + clock = device->host->f_max; + + HIF_INFO("%s: Clock setting: (%d,%d)\n", __func__, + func->card->cis.max_dtr, device->host->f_max); + + /* Limit clock if specified */ + if (mmcclock > 0) { + HIF_INFO("%s: Limit clock from %d to %d\n", + __func__, clock, clock_set); + device->host->ios.clock = clock_set; + device->host->ops->set_ios(device->host, + &device->host->ios); + } + + return 0; +} + +/** + * hif_set_bus_width() - Set the sdio bus width + * @ol_sc: softc instance + * @func: pointer to sdio_func + * + * Return: 0 on success, error number otherwise. + */ +int hif_sdio_set_bus_width(struct hif_softc *ol_sc, struct sdio_func *func) +{ + int ret = 0; + uint16_t manfid; + uint8_t data = 0; + struct hif_sdio_dev *device = get_hif_device(ol_sc, func); + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (manfid == MANUFACTURER_ID_QCN7605_BASE) + return ret; + +#if KERNEL_VERSION(3, 4, 0) <= LINUX_VERSION_CODE + if (mmcbuswidth == 0) + return ret; + + /* Set MMC Bus Width: 1-1Bit, 4-4Bit, 8-8Bit */ + if (mmcbuswidth == 1) { + data = SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_1BIT; + ret = func0_cmd52_write_byte(func->card, + SDIO_CCCR_IF, + data); + if (ret) + HIF_ERROR("%s: Bus Width 0x%x failed %d\n", + __func__, data, ret); + device->host->ios.bus_width = MMC_BUS_WIDTH_1; + device->host->ops->set_ios(device->host, + &device->host->ios); + } else if (mmcbuswidth == 4 && + (device->host->caps & MMC_CAP_4_BIT_DATA)) { + data = SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_4BIT; + ret = func0_cmd52_write_byte(func->card, + SDIO_CCCR_IF, + data); + if (ret) + HIF_ERROR("%s: Bus Width 0x%x failed: %d\n", + __func__, data, ret); + device->host->ios.bus_width = MMC_BUS_WIDTH_4; + device->host->ops->set_ios(device->host, + &device->host->ios); + } else if (mmcbuswidth == 8 && + (device->host->caps & MMC_CAP_8_BIT_DATA)) { + ret = hif_cmd52_write_byte_8bit(func); + if (ret) + HIF_ERROR("%s: Bus Width 8 failed: %d\n", + __func__, ret); + device->host->ios.bus_width = MMC_BUS_WIDTH_8; + device->host->ops->set_ios(device->host, + &device->host->ios); + } else { + HIF_ERROR("%s: Unsupported bus width %d", + __func__, mmcbuswidth); + ret = QDF_STATUS_E_FAILURE; + } + + HIF_INFO("%s: Bus with : %d\n", __func__, mmcbuswidth); +#endif + return ret; +} + + +/** + * hif_mask_interrupt() - Disable hif device irq + * @device: pointer to struct hif_sdio_dev + * + * + * Return: None. + */ +void hif_mask_interrupt(struct hif_sdio_dev *device) +{ + int ret; + uint16_t manfid; + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (manfid == MANUFACTURER_ID_QCN7605_BASE) + return; + + HIF_ENTER(); + + /* Mask our function IRQ */ + sdio_claim_host(device->func); + while (atomic_read(&device->irq_handling)) { + sdio_release_host(device->func); + schedule_timeout_interruptible(HZ / 10); + sdio_claim_host(device->func); + } + ret = sdio_release_irq(device->func); + sdio_release_host(device->func); + if (ret) + HIF_ERROR("%s: Failed %d\n", __func__, ret); + + HIF_EXIT(); +} + +/** + * hif_irq_handler() - hif-sdio interrupt handler + * @func: pointer to sdio_func + * + * Return: None. + */ +static void hif_irq_handler(struct sdio_func *func) +{ + struct hif_sdio_dev *device = get_hif_device(NULL, func); + atomic_set(&device->irq_handling, 1); + /* release the host during intr so we can use + * it when we process cmds + */ + sdio_release_host(device->func); + device->htc_callbacks.dsr_handler(device->htc_callbacks.context); + sdio_claim_host(device->func); + atomic_set(&device->irq_handling, 0); +} + +/** + * hif_un_mask_interrupt() - Re-enable hif device irq + * @device: pointer to struct hif_sdio_dev + * + * + * Return: None. + */ +void hif_un_mask_interrupt(struct hif_sdio_dev *device) +{ + int ret; + uint16_t manfid; + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (manfid == MANUFACTURER_ID_QCN7605_BASE) + return; + + HIF_ENTER(); + /* + * On HP Elitebook 8460P, interrupt mode is not stable + * in high throughput, so polling method should be used + * instead of interrupt mode. + */ + if (brokenirq) { + HIF_INFO("%s: Using broken IRQ mode", __func__); + device->func->card->host->caps &= ~MMC_CAP_SDIO_IRQ; + } + /* Register the IRQ Handler */ + sdio_claim_host(device->func); + ret = sdio_claim_irq(device->func, hif_irq_handler); + sdio_release_host(device->func); + + HIF_EXIT(); +} + +/** + * hif_sdio_func_disable() - Handle device enabling as per device + * @device: HIF device object + * @func: function pointer + * + * Return success or failure + */ +QDF_STATUS hif_sdio_func_disable(struct hif_sdio_dev *device, + struct sdio_func *func, + bool reset) +{ + int ret = 0; + uint16_t manfid; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (manfid == MANUFACTURER_ID_QCN7605_BASE) + return 0; + + /* Disable the card */ + sdio_claim_host(device->func); + + ret = sdio_disable_func(device->func); + if (ret) + status = QDF_STATUS_E_FAILURE; + + if (reset && status == QDF_STATUS_SUCCESS) + ret = func0_cmd52_write_byte(device->func->card, + SDIO_CCCR_ABORT, + (1 << 3)); + + if (ret) { + status = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: reset failed : %d", __func__, ret); + } + + sdio_release_host(device->func); + + return status; +} + +/** + * reinit_sdio() - re-initialize sdio bus + * @device: pointer to hif device + * + * Return: 0 on success, error number otherwise. + */ +QDF_STATUS reinit_sdio(struct hif_sdio_dev *device) +{ + int32_t err = 0; + struct mmc_host *host; + struct mmc_card *card; + struct sdio_func *func; + uint8_t cmd52_resp; + uint32_t clock; + uint16_t manfid; + + func = device->func; + card = func->card; + host = card->host; + + manfid = device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + if (manfid == MANUFACTURER_ID_QCN7605_BASE) + return 0; + + sdio_claim_host(func); + + do { + /* Enable high speed */ + if (card->host->caps & MMC_CAP_SD_HIGHSPEED) { + HIF_INFO_HI("%s: Set high speed mode", __func__); + err = func0_cmd52_read_byte(card, SDIO_CCCR_SPEED, + &cmd52_resp); + if (err) { + HIF_ERROR("%s: CCCR speed set failed : %d", + __func__, err); + sdio_card_state(card); + /* no need to break */ + } else { + err = func0_cmd52_write_byte(card, + SDIO_CCCR_SPEED, + (cmd52_resp | + SDIO_SPEED_EHS)); + if (err) { + HIF_ERROR("%s:CCCR speed set failed:%d", + __func__, err); + break; + } + sdio_card_set_highspeed(card); + host->ios.timing = MMC_TIMING_SD_HS; + host->ops->set_ios(host, &host->ios); + } + } + + /* Set clock */ + if (sdio_card_highspeed(card)) + clock = 50000000; + else + clock = card->cis.max_dtr; + + if (clock > host->f_max) + clock = host->f_max; + /* + * In fpga mode the clk should be set to 12500000, + * or will result in scan channel setting timeout error. + * So in fpga mode, please set module parameter mmcclock + * to 12500000. + */ + if (mmcclock > 0) + clock = mmcclock; + host->ios.clock = clock; + host->ops->set_ios(host, &host->ios); + + if (card->host->caps & MMC_CAP_4_BIT_DATA) { + /* Set bus width & disable card detect resistor */ + err = func0_cmd52_write_byte(card, SDIO_CCCR_IF, + SDIO_BUS_CD_DISABLE | + SDIO_BUS_WIDTH_4BIT); + if (err) { + HIF_ERROR("%s: Set bus mode failed : %d", + __func__, err); + break; + } + host->ios.bus_width = MMC_BUS_WIDTH_4; + host->ops->set_ios(host, &host->ios); + } + } while (0); + + sdio_release_host(func); + + return (err) ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c new file mode 100644 index 0000000000000000000000000000000000000000..7100baf1d616e981760ec4352f21f586195d91db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c @@ -0,0 +1,1098 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_dev.h" +#include "if_sdio.h" +#include "regtable_sdio.h" +#include "wma_api.h" +#include "hif_internal.h" +#include + +#define HIF_USE_DMA_BOUNCE_BUFFER 1 +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#define MAX_HIF_DEVICES 2 +#ifdef HIF_MBOX_SLEEP_WAR +#define HIF_MIN_SLEEP_INACTIVITY_TIME_MS 50 +#define HIF_SLEEP_DISABLE_UPDATE_DELAY 1 +#define HIF_IS_WRITE_REQUEST_MBOX1_TO_3(request) \ + ((request->request & HIF_SDIO_WRITE) && \ + (request->address >= 0x1000 && \ + request->address < 0x1FFFF)) +#endif +unsigned int forcesleepmode; +module_param(forcesleepmode, uint, 0644); +MODULE_PARM_DESC(forcesleepmode, + "Set sleep mode: 0-host capbility, 1-force WOW, 2-force DeepSleep, 3-force CutPower"); + +unsigned int forcecard; +module_param(forcecard, uint, 0644); +MODULE_PARM_DESC(forcecard, + "Ignore card capabilities information to switch bus mode"); + +unsigned int debugcccr = 1; +module_param(debugcccr, uint, 0644); +MODULE_PARM_DESC(debugcccr, "Output this cccr values"); + +#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) +#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) +static struct hif_sdio_dev *add_hif_device(struct hif_softc *hif_ctx, + struct sdio_func *func); +static void del_hif_device(struct hif_sdio_dev *device); + +int reset_sdio_on_unload; +module_param(reset_sdio_on_unload, int, 0644); + +uint32_t nohifscattersupport = 1; + +/* ------ Static Variables ------ */ +static const struct sdio_device_id ar6k_id_table[] = { +#ifdef AR6002_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1))}, +#endif +#ifdef AR6003_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, +#endif +#ifdef AR6004_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, +#endif +#ifdef AR6320_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x1))}, +#endif + { /* null */ }, +}; + +struct hif_sdio_softc *scn; + +static struct hif_sdio_dev *hif_devices[MAX_HIF_DEVICES]; + +#if defined(WLAN_DEBUG) || defined(DEBUG) +ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, + "hif", + "(Linux MMC) Host Interconnect Framework", + ATH_DEBUG_MASK_DEFAULTS, 0, NULL); +#endif + +/** + * add_to_async_list() - add bus reqest to async task list + * @device: pointer to hif device + * @busrequest: pointer to type of bus request + * + * Return: None. + */ +void add_to_async_list(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + struct bus_request *async; + struct bus_request *active; + + qdf_spin_lock_irqsave(&device->asynclock); + active = device->asyncreq; + if (!active) { + device->asyncreq = busrequest; + device->asyncreq->inusenext = NULL; + } else { + for (async = device->asyncreq; + async; async = async->inusenext) { + active = async; + } + active->inusenext = busrequest; + busrequest->inusenext = NULL; + } + qdf_spin_unlock_irqrestore(&device->asynclock); +} + +/* + * Setup IRQ mode for deep sleep and WoW + * Switch back to 1 bits mode when we suspend for + * WoW in order to detect SDIO irq without clock. + * Re-enable async 4-bit irq mode for some host controllers + * after resume. + */ +#ifdef CONFIG_SDIO_TRANSFER_MAILBOX +static int sdio_enable4bits(struct hif_sdio_dev *device, int enable) +{ + int ret = 0; + struct sdio_func *func = device->func; + struct mmc_card *card = func->card; + struct mmc_host *host = card->host; + + if (!(host->caps & (MMC_CAP_4_BIT_DATA))) + return 0; + + if (card->cccr.low_speed && !card->cccr.wide_bus) + return 0; + + sdio_claim_host(func); + do { + int setAsyncIRQ = 0; + __u16 manufacturer_id = + device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + /* Re-enable 4-bit ASYNC interrupt on AR6003x + * after system resume for some host controller + */ + if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) { + setAsyncIRQ = 1; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6003, + enable ? + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003 + : 0); + } else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE || + manufacturer_id == MANUFACTURER_ID_QCA9377_BASE || + manufacturer_id == MANUFACTURER_ID_QCA9379_BASE) { + unsigned char data = 0; + + setAsyncIRQ = 1; + ret = + func0_cmd52_read_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + &data); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to read interrupt extension register %d\n", + __func__, ret)); + sdio_release_host(func); + return ret; + } + if (enable) + data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; + else + data &= ~SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + data); + } + if (setAsyncIRQ) { + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to setup 4-bit ASYNC IRQ mode into %d err %d\n", + __func__, enable, ret)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s: Setup 4-bit ASYNC IRQ mode into %d successfully\n", + __func__, enable)); + } + } + } while (0); + sdio_release_host(func); + + return ret; +} +#else +static int sdio_enable4bits(struct hif_sdio_dev *device, int enable) +{ + return 0; +} +#endif + +/** + * hif_sdio_probe() - configure sdio device + * @ol_sc: HIF device context + * @func: SDIO function context + * @device: pointer to hif handle + * + * Return: 0 for success and non-zero for failure + */ +A_STATUS hif_sdio_probe(struct hif_softc *ol_sc, + struct sdio_func *func, + struct hif_sdio_dev *device) +{ + int ret = 0; + const struct sdio_device_id *id; + uint32_t target_type; + + HIF_ENTER(); + scn = (struct hif_sdio_softc *)ol_sc; + + scn->hif_handle = device; + spin_lock_init(&scn->target_lock); + /* + * Attach Target register table. This is needed early on + * even before BMI since PCI and HIF initialization + * directly access Target registers. + * + * TBDXXX: targetdef should not be global -- should be stored + * in per-device struct so that we can support multiple + * different Target types with a single Host driver. + * The whole notion of an "hif type" -- (not as in the hif + * module, but generic "Host Interface Type") is bizarre. + * At first, one one expect it to be things like SDIO, USB, PCI. + * But instead, it's an actual platform type. Inexplicably, the + * values used for HIF platform types are *different* from the + * values used for Target Types. + */ + +#if defined(CONFIG_AR9888_SUPPORT) + hif_register_tbl_attach(ol_sc, HIF_TYPE_AR9888); + target_register_tbl_attach(ol_sc, TARGET_TYPE_AR9888); + target_type = TARGET_TYPE_AR9888; +#elif defined(CONFIG_AR6320_SUPPORT) + id = device->id; + if (((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_QCA9377_BASE) || + ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_QCA9379_BASE)) { + hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320V2); + target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320V2); + } else if ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_AR6320_BASE) { + int ar6kid = id->device & MANUFACTURER_ID_AR6K_REV_MASK; + + if (ar6kid >= 1) { + /* v2 or higher silicon */ + hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320V2); + target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320V2); + } else { + /* legacy v1 silicon */ + hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320); + target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320); + } + } + target_type = TARGET_TYPE_AR6320; + +#endif + scn->targetdef = ol_sc->targetdef; + scn->hostdef = ol_sc->hostdef; + scn->dev = &func->dev; + ol_sc->bus_type = QDF_BUS_TYPE_SDIO; + ol_sc->target_info.target_type = target_type; + + scn->ramdump_base = + pld_hif_sdio_get_virt_ramdump_mem(&func->dev, + &scn->ramdump_size); + if (!scn->ramdump_base || !scn->ramdump_size) { + HIF_ERROR("%s: Failed ramdump res alloc - base:%s, len:%lu", + __func__, + scn->ramdump_base ? "ok" : "null", + scn->ramdump_size); + } else { + HIF_INFO("%s: ramdump base %pK size %lu", __func__, + scn->ramdump_base, scn->ramdump_size); + } + + if (athdiag_procfs_init(scn) != 0) { + ret = QDF_STATUS_E_FAILURE; + goto err_attach1; + } + + ret = hif_dev_register_channels(device, func); + + return ret; + +err_attach1: + if (scn->ramdump_base) + pld_hif_sdio_release_ramdump_mem(scn->ramdump_base); + scn = NULL; + return ret; +} + +/** + * power_state_change_notify() - SDIO bus power notification handler + * @ol_sc: HIF device context + * @config: hif device power change type + * + * Return: 0 on success, error number otherwise. + */ +static QDF_STATUS +power_state_change_notify(struct hif_softc *ol_sc, + struct hif_sdio_dev *device, + enum HIF_DEVICE_POWER_CHANGE_TYPE config) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct sdio_func *func = device->func; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: config type %d\n", + __func__, config)); + switch (config) { + case HIF_DEVICE_POWER_DOWN: + /* Disable 4bits to allow SDIO bus to detect + * DAT1 as interrupt source + */ + sdio_enable4bits(device, 0); + break; + case HIF_DEVICE_POWER_CUT: + status = hif_disable_func(device, func, 1); + if (!device->is_suspend) { + device->power_config = config; + mmc_detect_change(device->host, HZ / 3); + } + break; + case HIF_DEVICE_POWER_UP: + if (device->power_config == HIF_DEVICE_POWER_CUT) { + if (device->is_suspend) { + status = reinit_sdio(device); + /* set power_config before EnableFunc to + * passthrough sdio r/w action when resuming + * from cut power + */ + device->power_config = config; + if (status == QDF_STATUS_SUCCESS) + status = hif_enable_func(ol_sc, device, + func, true); + } else { + /* device->func is bad pointer at this time */ + mmc_detect_change(device->host, 0); + return QDF_STATUS_E_PENDING; + } + } else if (device->power_config == HIF_DEVICE_POWER_DOWN) { + int ret = sdio_enable4bits(device, 1); + + status = (ret == 0) ? QDF_STATUS_SUCCESS : + QDF_STATUS_E_FAILURE; + } + break; + } + device->power_config = config; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s:\n", __func__)); + + return status; +} + + +/** + * hif_configure_device() - configure sdio device + * @ol_sc: HIF device context + * @device: pointer to hif device structure + * @opcode: configuration type + * @config: configuration value to set + * @configLen: configuration length + * + * Return: 0 on success, error number otherwise. + */ +QDF_STATUS +hif_configure_device(struct hif_softc *ol_sc, struct hif_sdio_dev *device, + enum hif_device_config_opcode opcode, + void *config, uint32_t config_len) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (opcode) { + case HIF_DEVICE_GET_BLOCK_SIZE: + hif_dev_get_block_size(config); + break; + + case HIF_DEVICE_GET_FIFO_ADDR: + hif_dev_get_fifo_address(device, config, config_len); + break; + + case HIF_DEVICE_GET_PENDING_EVENTS_FUNC: + HIF_WARN("%s: opcode %d", __func__, opcode); + status = QDF_STATUS_E_FAILURE; + break; + case HIF_DEVICE_GET_IRQ_PROC_MODE: + *((enum hif_device_irq_mode *) config) = + HIF_DEVICE_IRQ_SYNC_ONLY; + break; + case HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: + HIF_WARN("%s: opcode %d", __func__, opcode); + status = QDF_STATUS_E_FAILURE; + break; + case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: + if (!device->scatter_enabled) + return QDF_STATUS_E_NOSUPPORT; + status = + setup_hif_scatter_support(device, + (struct HIF_DEVICE_SCATTER_SUPPORT_INFO *) + config); + if (QDF_IS_STATUS_ERROR(status)) + device->scatter_enabled = false; + break; + case HIF_DEVICE_GET_OS_DEVICE: + /* pass back a pointer to the SDIO function's "dev" struct */ + ((struct HIF_DEVICE_OS_DEVICE_INFO *) config)->os_dev = + &device->func->dev; + break; + case HIF_DEVICE_POWER_STATE_CHANGE: + status = + power_state_change_notify(ol_sc, device, + *(enum HIF_DEVICE_POWER_CHANGE_TYPE *) + config); + break; + case HIF_DEVICE_GET_IRQ_YIELD_PARAMS: + HIF_WARN("%s: opcode %d", __func__, opcode); + status = QDF_STATUS_E_FAILURE; + break; + case HIF_DEVICE_SET_HTC_CONTEXT: + device->htc_context = config; + break; + case HIF_DEVICE_GET_HTC_CONTEXT: + if (!config) { + HIF_ERROR("%s: htc context is NULL", __func__); + return QDF_STATUS_E_FAILURE; + } + *(void **)config = device->htc_context; + break; + case HIF_BMI_DONE: + HIF_ERROR("%s: BMI_DONE", __func__); + break; + default: + HIF_ERROR("%s: Unsupported opcode: %d", __func__, opcode); + status = QDF_STATUS_E_FAILURE; + } + + return status; +} + +/** + * hif_sdio_shutdown() - hif-sdio shutdown routine + * @hif_ctx: pointer to hif_softc structore + * + * Return: None. + */ +void hif_sdio_shutdown(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Enter\n", __func__)); + if (hif_device) { + AR_DEBUG_ASSERT(hif_device->power_config == HIF_DEVICE_POWER_CUT + || hif_device->func); + } else { + int i; + /* since we are unloading the driver anyways, + * reset all cards in case the SDIO card is + * externally powered and we are unloading the SDIO + * stack. This avoids the problem when the SDIO stack + * is reloaded and attempts are made to re-enumerate + * a card that is already enumerated + */ + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] && !hif_devices[i]->func) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Remove pending hif_device %pK\n", + __func__, hif_devices[i])); + del_hif_device(hif_devices[i]); + hif_devices[i] = NULL; + } + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Exit\n", __func__)); +} + +/** + * hif_device_inserted() - hif-sdio driver probe handler + * @ol_sc: HIF device context + * @func: pointer to sdio_func + * @id: pointer to sdio_device_id + * + * Return: 0 on success, error number otherwise. + */ +static int hif_device_inserted(struct hif_softc *ol_sc, + struct sdio_func *func, + const struct sdio_device_id *id) +{ + int i, ret = 0, count; + struct hif_sdio_dev *device = NULL; + + HIF_INFO("%s: F%X, VID: 0x%X, DevID: 0x%X, block size: 0x%X/0x%X\n", + __func__, func->num, func->vendor, id->device, + func->max_blksize, func->cur_blksize); + + /* dma_mask should be populated here. Use the parent device's setting */ + func->dev.dma_mask = mmc_dev(func->card->host)->dma_mask; + + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + struct hif_sdio_dev *hifdevice = hif_devices[i]; + + if (hifdevice && + hifdevice->power_config == HIF_DEVICE_POWER_CUT && + hifdevice->host == func->card->host) { + device = get_hif_device(ol_sc, func); + hifdevice->func = func; + hifdevice->power_config = HIF_DEVICE_POWER_UP; + hif_sdio_set_drvdata(ol_sc, func, hifdevice); + + if (device->is_suspend) { + HIF_INFO("%s: Resume from suspend", __func__); + ret = reinit_sdio(device); + } + break; + } + } + + /* If device not found, then it is a new insertion, alloc and add it */ + if (!device) { + if (!add_hif_device(ol_sc, func)) + return QDF_STATUS_E_FAILURE; + + device = get_hif_device(ol_sc, func); + + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (!hif_devices[i]) { + hif_devices[i] = device; + break; + } + } + if (i == MAX_HIF_DEVICES) { + HIF_ERROR("%s: No more slots", __func__); + goto del_hif_dev; + } + + device->id = id; + device->host = func->card->host; + device->is_disabled = true; + /* TODO: MMC SDIO3.0 Setting should also be modified in ReInit() + * function when Power Manage work. + */ + sdio_claim_host(func); + + hif_sdio_quirk_force_drive_strength(ol_sc, func); + + hif_sdio_quirk_write_cccr(ol_sc, func); + + ret = hif_sdio_set_bus_speed(ol_sc, func); + + ret = hif_sdio_set_bus_width(ol_sc, func); + if (debugcccr) + hif_dump_cccr(device); + + sdio_release_host(func); + } + + qdf_spinlock_create(&device->lock); + + qdf_spinlock_create(&device->asynclock); + + DL_LIST_INIT(&device->scatter_req_head); + + if (!nohifscattersupport) { + /* try to allow scatter operation on all instances, + * unless globally overridden + */ + device->scatter_enabled = true; + } else + device->scatter_enabled = false; + + /* Initialize the bus requests to be used later */ + qdf_mem_zero(device->bus_request, sizeof(device->bus_request)); + for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) { + sema_init(&device->bus_request[count].sem_req, 0); + hif_free_bus_request(device, &device->bus_request[count]); + } + sema_init(&device->sem_async, 0); + + ret = hif_enable_func(ol_sc, device, func, false); + if ((ret == QDF_STATUS_SUCCESS || ret == QDF_STATUS_E_PENDING)) + return 0; + ret = QDF_STATUS_E_FAILURE; +del_hif_dev: + del_hif_device(device); + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] == device) { + hif_devices[i] = NULL; + break; + } + } + if (i == MAX_HIF_DEVICES) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: No hif_devices[] slot for %pK", + __func__, device)); + } + return ret; +} + +/** + * hif_ack_interrupt() - Acknowledge hif device irq + * @device: pointer to struct hif_sdio_dev + * + * This should translate to an acknowledgment to the bus driver indicating that + * the previous interrupt request has been serviced and the all the relevant + * sources have been cleared. HTC is ready to process more interrupts. + * This should prevent the bus driver from raising an interrupt unless the + * previous one has been serviced and acknowledged using the previous API. + * + * Return: None. + */ +void hif_ack_interrupt(struct hif_sdio_dev *device) +{ + AR_DEBUG_ASSERT(device); + + /* Acknowledge our function IRQ */ +} + +/** + * hif_sdio_configure_pipes - Configure pipes for the lower layer bus + * @pdev - HIF layer object + * @func - SDIO bus function object + * + * Return - error in case of failure to configure, else success + */ +int hif_sdio_configure_pipes(struct hif_sdio_dev *dev, struct sdio_func *func) +{ + return hif_dev_configure_pipes(dev, func); +} + +/** + * hif_allocate_bus_request() - Allocate hif bus request + * @device: pointer to struct hif_sdio_dev + * + * + * Return: pointer to struct bus_request structure. + */ +struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device) +{ + struct bus_request *busrequest; + + qdf_spin_lock_irqsave(&device->lock); + busrequest = device->bus_request_free_queue; + /* Remove first in list */ + if (busrequest) + device->bus_request_free_queue = busrequest->next; + + /* Release lock */ + qdf_spin_unlock_irqrestore(&device->lock); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: hif_allocate_bus_request: 0x%pK\n", + __func__, busrequest)); + + return busrequest; +} + +/** + * hif_free_bus_request() - Free hif bus request + * @device: pointer to struct hif_sdio_dev + * + * + * Return: None. + */ +void hif_free_bus_request(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + AR_DEBUG_ASSERT(busrequest); + /* Acquire lock */ + qdf_spin_lock_irqsave(&device->lock); + + /* Insert first in list */ + busrequest->next = device->bus_request_free_queue; + busrequest->inusenext = NULL; + device->bus_request_free_queue = busrequest; + + /* Release lock */ + qdf_spin_unlock_irqrestore(&device->lock); +} + +int hif_device_suspend(struct hif_softc *ol_sc, struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct hif_sdio_dev *device = get_hif_device(ol_sc, func); + mmc_pm_flag_t pm_flag = 0; + enum HIF_DEVICE_POWER_CHANGE_TYPE config; + struct mmc_host *host = func->card->host; + + host = device->func->card->host; + + device->is_suspend = true; + + switch (forcesleepmode) { + case 0: /* depend on sdio host pm capbility */ + pm_flag = sdio_get_host_pm_caps(func); + break; + case 1: /* force WOW */ + pm_flag |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; + break; + case 2: /* force DeepSleep */ + pm_flag &= ~MMC_PM_WAKE_SDIO_IRQ; + pm_flag |= MMC_PM_KEEP_POWER; + break; + case 3: /* force CutPower */ + pm_flag &= + ~(MMC_PM_WAKE_SDIO_IRQ | MMC_PM_WAKE_SDIO_IRQ); + break; + } + + if (!(pm_flag & MMC_PM_KEEP_POWER)) { + /* setting power_config before hif_configure_device to + * skip sdio r/w when suspending with cut power + */ + HIF_INFO("%s: Power cut", __func__); + config = HIF_DEVICE_POWER_CUT; + device->power_config = config; + + hif_configure_device(ol_sc, device, + HIF_DEVICE_POWER_STATE_CHANGE, + &config, + sizeof(config)); + hif_mask_interrupt(device); + device->device_state = HIF_DEVICE_STATE_CUTPOWER; + return 0; + } + + if (sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER)) { + HIF_ERROR("%s: set pm_flags failed", __func__); + return -EINVAL; + } + + if (pm_flag & MMC_PM_WAKE_SDIO_IRQ) { + HIF_INFO("%s: WOW mode ", __func__); + config = HIF_DEVICE_POWER_DOWN; + hif_configure_device(ol_sc, device, + HIF_DEVICE_POWER_STATE_CHANGE, + &config, + sizeof(config)); + + if (sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ)) { + HIF_ERROR("%s: set pm_flags failed", __func__); + return -EINVAL; + } + hif_mask_interrupt(device); + device->device_state = HIF_DEVICE_STATE_WOW; + return 0; + } else { + HIF_INFO("%s: deep sleep enter", __func__); + msleep(100); + hif_mask_interrupt(device); + device->device_state = HIF_DEVICE_STATE_DEEPSLEEP; + return 0; + } + + return 0; +} + +int hif_device_resume(struct hif_softc *ol_sc, struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + QDF_STATUS status = QDF_STATUS_SUCCESS; + enum HIF_DEVICE_POWER_CHANGE_TYPE config; + struct hif_sdio_dev *device; + + device = get_hif_device(ol_sc, func); + if (!device) { + HIF_ERROR("%s: hif object is null", __func__); + return -EINVAL; + } + + if (device->device_state == HIF_DEVICE_STATE_CUTPOWER) { + config = HIF_DEVICE_POWER_UP; + hif_configure_device(ol_sc, device, + HIF_DEVICE_POWER_STATE_CHANGE, + &config, + sizeof(config)); + hif_enable_func(ol_sc, device, func, true); + } else if (device->device_state == HIF_DEVICE_STATE_DEEPSLEEP) { + hif_un_mask_interrupt(device); + } else if (device->device_state == HIF_DEVICE_STATE_WOW) { + /*TODO:WOW support */ + hif_un_mask_interrupt(device); + } + + device->is_suspend = false; + device->device_state = HIF_DEVICE_STATE_ON; + + return QDF_IS_STATUS_SUCCESS(status) ? 0 : status; +} + +/** + * hif_sdio_remove() - remove sdio device + * @conext: sdio device context + * @hif_handle: pointer to sdio function + * + * Return: 0 for success and non-zero for failure + */ +static A_STATUS hif_sdio_remove(void *context, void *hif_handle) +{ + HIF_ENTER(); + + if (!scn) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "Global SDIO context is NULL"); + return A_ERROR; + } + + athdiag_procfs_remove(); + +#ifndef TARGET_DUMP_FOR_NON_QC_PLATFORM + iounmap(scn->ramdump_base); +#endif + + HIF_EXIT(); + + return 0; +} + +static void hif_device_removed(struct hif_softc *ol_sc, struct sdio_func *func) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct hif_sdio_dev *device; + int i; + + AR_DEBUG_ASSERT(func); + HIF_ENTER(); + device = get_hif_device(ol_sc, func); + + if (device->power_config == HIF_DEVICE_POWER_CUT) { + device->func = NULL; /* func will be free by mmc stack */ + return; /* Just return for cut-off mode */ + } + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] == device) + hif_devices[i] = NULL; + } + + hif_sdio_remove(device->claimed_ctx, device); + + hif_mask_interrupt(device); + + if (device->is_disabled) + device->is_disabled = false; + else + status = hif_disable_func(device, func, + reset_sdio_on_unload ? true : false); + + + del_hif_device(device); + if (status != QDF_STATUS_SUCCESS) + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: Unable to disable sdio func\n", + __func__)); + + HIF_EXIT(); +} + +static struct hif_sdio_dev *add_hif_device(struct hif_softc *ol_sc, + struct sdio_func *func) +{ + struct hif_sdio_dev *hifdevice = NULL; + int ret = 0; + + HIF_ENTER(); + AR_DEBUG_ASSERT(func); + hifdevice = (struct hif_sdio_dev *) qdf_mem_malloc(sizeof( + struct hif_sdio_dev)); + AR_DEBUG_ASSERT(hifdevice); + if (!hifdevice) + return NULL; + +#if HIF_USE_DMA_BOUNCE_BUFFER + hifdevice->dma_buffer = qdf_mem_malloc(HIF_DMA_BUFFER_SIZE); + AR_DEBUG_ASSERT(hifdevice->dma_buffer); + if (!hifdevice->dma_buffer) { + qdf_mem_free(hifdevice); + return NULL; + } +#endif + hifdevice->func = func; + hifdevice->power_config = HIF_DEVICE_POWER_UP; + hifdevice->device_state = HIF_DEVICE_STATE_ON; + ret = hif_sdio_set_drvdata(ol_sc, func, hifdevice); + HIF_INFO("status %d", ret); + + return hifdevice; +} + +static void del_hif_device(struct hif_sdio_dev *device) +{ + AR_DEBUG_ASSERT(device); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: deleting hif device 0x%pK\n", + __func__, device)); + if (device->dma_buffer) + qdf_mem_free(device->dma_buffer); + + qdf_mem_free(device); +} + +QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device, + struct htc_callbacks *callbacks) +{ + if (device->htc_callbacks.context) + /* already in use! */ + return QDF_STATUS_E_FAILURE; + device->htc_callbacks = *callbacks; + + return QDF_STATUS_SUCCESS; +} + +void hif_detach_htc(struct hif_opaque_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + qdf_mem_zero(&hif_device->htc_callbacks, + sizeof(hif_device->htc_callbacks)); +} + +int func0_cmd52_write_byte(struct mmc_card *card, + unsigned int address, + unsigned char byte) +{ + struct mmc_command io_cmd; + unsigned long arg; + int status = 0; + + memset(&io_cmd, 0, sizeof(io_cmd)); + SDIO_SET_CMD52_WRITE_ARG(arg, 0, address, byte); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.arg = arg; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + status = mmc_wait_for_cmd(card->host, &io_cmd, 0); + + if (status) + HIF_ERROR("%s: mmc_wait_for_cmd returned %d", + __func__, status); + + return status; +} + +int func0_cmd52_read_byte(struct mmc_card *card, + unsigned int address, + unsigned char *byte) +{ + struct mmc_command io_cmd; + unsigned long arg; + int32_t err; + + memset(&io_cmd, 0, sizeof(io_cmd)); + SDIO_SET_CMD52_READ_ARG(arg, 0, address); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.arg = arg; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &io_cmd, 0); + + if ((!err) && (byte)) + *byte = io_cmd.resp[0] & 0xFF; + + if (err) + HIF_ERROR("%s: mmc_wait_for_cmd returned %d", + __func__, err); + + return err; +} + +void hif_dump_cccr(struct hif_sdio_dev *hif_device) +{ + unsigned int i; + uint8_t cccr_val; + uint32_t err; + + HIF_ERROR("%s: Enter", __func__); + + if (!hif_device || !hif_device->func || + !hif_device->func->card) { + HIF_ERROR("%s: incorrect input", __func__); + return; + } + + for (i = 0; i <= 0x16; i++) { + err = func0_cmd52_read_byte(hif_device->func->card, + i, &cccr_val); + if (err) + HIF_ERROR("%s:Reading CCCR 0x%02X failed: %d", + __func__, i, (unsigned int)err); + else + HIF_ERROR("%X(%X) ", i, (unsigned int)cccr_val); + } + + HIF_ERROR("%s: Exit", __func__); +} + +int hif_sdio_device_inserted(struct hif_softc *ol_sc, + struct device *dev, + const struct sdio_device_id *id) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + int status = 0; + + HIF_ERROR("%s: Enter", __func__); + status = hif_device_inserted(ol_sc, func, id); + HIF_ERROR("%s: Exit: status:%d", __func__, status); + + return status; +} + +void hif_sdio_device_removed(struct hif_softc *ol_sc, struct sdio_func *func) +{ + hif_device_removed(ol_sc, func); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif_scatter.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif_scatter.c new file mode 100644 index 0000000000000000000000000000000000000000..cb8e83b4cb86bafa561493066af15fae38d9a0f3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif_scatter.c @@ -0,0 +1,478 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "hif_internal.h" +#include +#include "dl_list.h" +#define ATH_MODULE_NAME hif +#include "a_debug.h" +#include + +#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT + +#define _CMD53_ARG_READ 0 +#define _CMD53_ARG_WRITE 1 +#define _CMD53_ARG_BLOCK_BASIS 1 +#define _CMD53_ARG_FIXED_ADDRESS 0 +#define _CMD53_ARG_INCR_ADDRESS 1 + +#define SDIO_SET_CMD53_ARG(arg, rw, func, mode, opcode, address, bytes_blocks) \ + ((arg) = (((rw) & 1) << 31) | \ + ((func & 0x7) << 28) | \ + (((mode) & 1) << 27) | \ + (((opcode) & 1) << 26) | \ + (((address) & 0x1FFFF) << 9) | \ + ((bytes_blocks) & 0x1FF)) + +/** + * free_scatter_req() - free scattered request. + * @device: hif device context + * @pReq: scatter list node + * + * Return: none + */ +static void free_scatter_req(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *pReq) +{ + qdf_spin_lock_irqsave(&device->lock); + + dl_list_insert_tail(&device->scatter_req_head, &pReq->list_link); + + qdf_spin_unlock_irqrestore(&device->lock); +} + +/** + * alloc_scatter_req() - allocate scattered request. + * @device: hif device context + * + * + * Return: pointer to allocated scatter list node + */ +static struct _HIF_SCATTER_REQ *alloc_scatter_req(struct hif_sdio_dev *device) +{ + DL_LIST *item; + + qdf_spin_lock_irqsave(&device->lock); + + item = dl_list_remove_item_from_head(&device->scatter_req_head); + + qdf_spin_unlock_irqrestore(&device->lock); + + if (item) + return A_CONTAINING_STRUCT(item, + struct _HIF_SCATTER_REQ, list_link); + + return NULL; +} + +/** + * do_hif_read_write_scatter() - rd/wr scattered operation. + * @device: hif device context + * @busrequest: rd/wr bus request + * + * called by async task to perform the operation synchronously + * using direct MMC APIs + * Return: int + */ +QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + int i; + uint8_t rw; + uint8_t opcode; + struct mmc_request mmcreq; + struct mmc_command cmd; + struct mmc_data data; + struct HIF_SCATTER_REQ_PRIV *req_priv; + struct _HIF_SCATTER_REQ *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scatterlist *sg; + + HIF_ENTER(); + + req_priv = busrequest->scatter_req; + + A_ASSERT(req_priv); + if (!req_priv) { + return QDF_STATUS_E_FAILURE; + } + + req = req_priv->hif_scatter_req; + + memset(&mmcreq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + data.blksz = HIF_BLOCK_SIZE; + data.blocks = req->total_length / HIF_BLOCK_SIZE; + + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d), (tot:%d,sg:%d)\n", + (req->request & HIF_SDIO_WRITE) ? "WRITE" : "READ", + req->address, data.blksz, data.blocks, + req->total_length, req->valid_scatter_entries)); + + if (req->request & HIF_SDIO_WRITE) { + rw = _CMD53_ARG_WRITE; + data.flags = MMC_DATA_WRITE; + } else { + rw = _CMD53_ARG_READ; + data.flags = MMC_DATA_READ; + } + + if (req->request & HIF_FIXED_ADDRESS) + opcode = _CMD53_ARG_FIXED_ADDRESS; + else + opcode = _CMD53_ARG_INCR_ADDRESS; + + /* fill SG entries */ + sg = req_priv->sgentries; + sg_init_table(sg, req->valid_scatter_entries); + + /* assemble SG list */ + for (i = 0; i < req->valid_scatter_entries; i++, sg++) { + /* setup each sg entry */ + if ((unsigned long)req->scatter_list[i].buffer & 0x3) { + /* note some scatter engines can handle unaligned + * buffers, print this as informational only + */ + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF: (%s) Scatter Buf is unaligned 0x%lx\n", + req-> + request & HIF_SDIO_WRITE ? "WRITE" : "READ", + (unsigned long)req->scatter_list[i]. + buffer)); + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + (" %d: Addr:0x%lX, Len:%d\n", i, + (unsigned long)req->scatter_list[i].buffer, + req->scatter_list[i].length)); + + sg_set_buf(sg, req->scatter_list[i].buffer, + req->scatter_list[i].length); + } + /* set scatter-gather table for request */ + data.sg = req_priv->sgentries; + data.sg_len = req->valid_scatter_entries; + /* set command argument */ + SDIO_SET_CMD53_ARG(cmd.arg, + rw, + device->func->num, + _CMD53_ARG_BLOCK_BASIS, + opcode, req->address, data.blocks); + + cmd.opcode = SD_IO_RW_EXTENDED; + cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + + mmcreq.cmd = &cmd; + mmcreq.data = &data; + + mmc_set_data_timeout(&data, device->func->card); + /* synchronous call to process request */ + mmc_wait_for_req(device->func->card->host, &mmcreq); + + if (cmd.error) { + status = QDF_STATUS_E_FAILURE; + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: cmd error: %d\n", cmd.error)); + } + + if (data.error) { + status = QDF_STATUS_E_FAILURE; + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: data error: %d\n", data.error)); + } + + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n", + (req->request & HIF_SDIO_WRITE) ? "WRITE" : "READ", + req->address, data.blksz, data.blocks)); + } + + /* set completion status, fail or success */ + req->completion_status = status; + + if (req->request & HIF_ASYNCHRONOUS) { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n", + (unsigned long)busrequest, status)); + /* complete the request */ + A_ASSERT(req->completion_routine); + if (req->completion_routine) { + req->completion_routine(req); + } + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER async_task upping busreq : 0x%lX (%d)\n", + (unsigned long)busrequest, status)); + /* signal wait */ + up(&busrequest->sem_req); + } + HIF_EXIT(); + + return status; +} + +/** + * alloc_scatter_req() - callback to issue a read-write + * scatter request. + * @device: hif device context + * @pReq: rd/wr scatter request + * + * Return: int + */ +static QDF_STATUS hif_read_write_scatter(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *req) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + uint32_t request = req->request; + struct HIF_SCATTER_REQ_PRIV *req_priv = + (struct HIF_SCATTER_REQ_PRIV *) req->hif_private[0]; + + do { + + A_ASSERT(req_priv); + if (!req_priv) { + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: total len: %d Scatter Entries: %d\n", + req->total_length, + req->valid_scatter_entries)); + + if (!(request & HIF_EXTENDED_IO)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid command type: 0x%08x\n", + request)); + break; + } + + if (!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid mode: 0x%08x\n", + request)); + break; + } + + if (!(request & HIF_BLOCK_BASIS)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid data mode: 0x%08x\n", + request)); + break; + } + + if (req->total_length > MAX_SCATTER_REQ_TRANSFER_SIZE) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid length: %d\n", + req->total_length)); + break; + } + + if (req->total_length == 0) { + A_ASSERT(false); + break; + } + + /* add bus request to the async list for the async + * I/O thread to process + */ + add_to_async_list(device, req_priv->busrequest); + + if (request & HIF_SYNCHRONOUS) { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: queued sync req: 0x%lX\n", + (unsigned long)req_priv->busrequest)); + /* signal thread and wait */ + up(&device->sem_async); + if (down_interruptible(&req_priv->busrequest->sem_req) + != 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: interrupted!\n")); + /* interrupted, exit */ + status = QDF_STATUS_E_FAILURE; + break; + } + status = req->completion_status; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: queued async req: 0x%lX\n", + (unsigned long)req_priv->busrequest)); + /* wake thread, it will process and then take + * care of the async callback + */ + up(&device->sem_async); + status = QDF_STATUS_SUCCESS; + } + + } while (false); + + if (QDF_IS_STATUS_ERROR(status) && (request & HIF_ASYNCHRONOUS)) { + req->completion_status = status; + req->completion_routine(req); + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +/** + * setup_hif_scatter_support() - setup of HIF scatter resources + * scatter request. + * @device: hif device context + * @pInfo: scatter info + * + * Return: int + */ +QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device, + struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int i; + struct HIF_SCATTER_REQ_PRIV *req_priv; + struct bus_request *busrequest; + + if (device->func->card->host->max_segs < + MAX_SCATTER_ENTRIES_PER_REQ) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("host only supports scatter of : %d entries, need: %d\n", + device->func->card->host->max_segs, + MAX_SCATTER_ENTRIES_PER_REQ)); + status = QDF_STATUS_E_NOSUPPORT; + goto end; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("max scatter req : %d entries: %d\n", + MAX_SCATTER_REQUESTS, + MAX_SCATTER_ENTRIES_PER_REQ)); + + for (i = 0; i < MAX_SCATTER_REQUESTS; i++) { + /* allocate the private request blob */ + req_priv = + (struct HIF_SCATTER_REQ_PRIV *) + qdf_mem_malloc(sizeof( + struct HIF_SCATTER_REQ_PRIV)); + if (!req_priv) + goto end; + /* save the device instance */ + req_priv->device = device; + /* allocate the scatter request */ + req_priv->hif_scatter_req = + (struct _HIF_SCATTER_REQ *) + qdf_mem_malloc(sizeof(struct _HIF_SCATTER_REQ) + + (MAX_SCATTER_ENTRIES_PER_REQ - + 1) * (sizeof(struct _HIF_SCATTER_ITEM))); + + if (!req_priv->hif_scatter_req) { + qdf_mem_free(req_priv); + goto end; + } + /* back pointer to the private struct */ + req_priv->hif_scatter_req->hif_private[0] = req_priv; + /* allocate a bus request for this scatter request */ + busrequest = hif_allocate_bus_request(device); + if (!busrequest) { + qdf_mem_free(req_priv->hif_scatter_req); + qdf_mem_free(req_priv); + goto end; + } + /* assign the scatter request to this bus request */ + busrequest->scatter_req = req_priv; + /* point back to the request */ + req_priv->busrequest = busrequest; + /* req_priv it to the scatter pool */ + free_scatter_req(device, req_priv->hif_scatter_req); + } + + if (i != MAX_SCATTER_REQUESTS) { + status = QDF_STATUS_E_NOMEM; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("failed to alloc scatter resources !\n")); + goto end; + } + + /* set scatter function pointers */ + info->allocate_req_func = alloc_scatter_req; + info->free_req_func = free_scatter_req; + info->read_write_scatter_func = hif_read_write_scatter; + info->max_scatter_entries = MAX_SCATTER_ENTRIES_PER_REQ; + info->max_tx_size_per_scatter_req = + MAX_SCATTER_REQ_TRANSFER_SIZE; + + status = QDF_STATUS_SUCCESS; + +end: + if (QDF_IS_STATUS_ERROR(status)) + cleanup_hif_scatter_resources(device); + + return status; +} + +/** + * cleanup_hif_scatter_resources() - cleanup HIF scatter resources + * scatter request. + * @device: hif device context + * + * + * Return: none + */ +void cleanup_hif_scatter_resources(struct hif_sdio_dev *device) +{ + struct HIF_SCATTER_REQ_PRIV *req_priv; + struct _HIF_SCATTER_REQ *req; + + /* empty the free list */ + + while (true) { + req = alloc_scatter_req(device); + + if (!req) + break; + + req_priv = (struct HIF_SCATTER_REQ_PRIV *)req->hif_private[0]; + A_ASSERT(req_priv); + if (!req_priv) { + continue; + } + + if (req_priv->busrequest) { + req_priv->busrequest->scatter_req = NULL; + /* free bus request */ + hif_free_bus_request(device, req_priv->busrequest); + req_priv->busrequest = NULL; + } + + if (req_priv->hif_scatter_req) { + qdf_mem_free(req_priv->hif_scatter_req); + req_priv->hif_scatter_req = NULL; + } + + qdf_mem_free(req_priv); + } +} + +#endif /* HIF_LINUX_MMC_SCATTER_SUPPORT */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..35632d676a069f6889087842c8373de30e327af5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "bmi_msg.h" +#include "target_type.h" +#include "cepci.h" + +#define MISSING 0 +#include "regtable_sdio.h" +#include "targaddrs.h" +#include "if_sdio.h" +#include "ar9888def.h" +#include "ar6320def.h" +#include "ar6320v2def.h" + +void target_register_tbl_attach(struct hif_softc *scn, u32 target_type) +{ + switch (target_type) { + case TARGET_TYPE_AR9888: + scn->targetdef = &ar9888_targetdef; + break; + case TARGET_TYPE_AR6320: + scn->targetdef = &ar6320_targetdef; + break; + case TARGET_TYPE_AR6320V2: + scn->targetdef = &ar6320v2_targetdef; + break; + default: + break; + } +} + +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type) +{ + if (!scn) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s: sc is NULL", __func__); + return; + } + + switch (hif_type) { + case HIF_TYPE_AR9888: + scn->hostdef = &ar9888_hostdef; + break; + case HIF_TYPE_AR6320: + scn->hostdef = &ar6320_hostdef; + break; + case HIF_TYPE_AR6320V2: + scn->hostdef = &ar6320v2_hostdef; + break; + default: + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.h new file mode 100644 index 0000000000000000000000000000000000000000..0f5eae5569dc0b41706e5d9d87c8f5f34a34b69e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.h @@ -0,0 +1,811 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_SDIO_H_ +#define _REGTABLE_SDIO_H_ + +#define MISSING 0 +extern struct hif_sdio_softc *scn; + +struct targetdef_s { + uint32_t d_RTC_SOC_BASE_ADDRESS; + uint32_t d_RTC_WMAC_BASE_ADDRESS; + uint32_t d_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK; + uint32_t d_CLOCK_CONTROL_OFFSET; + uint32_t d_CLOCK_CONTROL_SI0_CLK_MASK; + uint32_t d_RESET_CONTROL_OFFSET; + uint32_t d_RESET_CONTROL_MBOX_RST_MASK; + uint32_t d_RESET_CONTROL_SI0_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_OFFSET; + uint32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK; + uint32_t d_GPIO_BASE_ADDRESS; + uint32_t d_GPIO_PIN0_OFFSET; + uint32_t d_GPIO_PIN1_OFFSET; + uint32_t d_GPIO_PIN0_CONFIG_MASK; + uint32_t d_GPIO_PIN1_CONFIG_MASK; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK; + uint32_t d_SI_CONFIG_I2C_LSB; + uint32_t d_SI_CONFIG_I2C_MASK; + uint32_t d_SI_CONFIG_POS_SAMPLE_LSB; + uint32_t d_SI_CONFIG_POS_SAMPLE_MASK; + uint32_t d_SI_CONFIG_INACTIVE_CLK_LSB; + uint32_t d_SI_CONFIG_INACTIVE_CLK_MASK; + uint32_t d_SI_CONFIG_INACTIVE_DATA_LSB; + uint32_t d_SI_CONFIG_INACTIVE_DATA_MASK; + uint32_t d_SI_CONFIG_DIVIDER_LSB; + uint32_t d_SI_CONFIG_DIVIDER_MASK; + uint32_t d_SI_BASE_ADDRESS; + uint32_t d_SI_CONFIG_OFFSET; + uint32_t d_SI_TX_DATA0_OFFSET; + uint32_t d_SI_TX_DATA1_OFFSET; + uint32_t d_SI_RX_DATA0_OFFSET; + uint32_t d_SI_RX_DATA1_OFFSET; + uint32_t d_SI_CS_OFFSET; + uint32_t d_SI_CS_DONE_ERR_MASK; + uint32_t d_SI_CS_DONE_INT_MASK; + uint32_t d_SI_CS_START_LSB; + uint32_t d_SI_CS_START_MASK; + uint32_t d_SI_CS_RX_CNT_LSB; + uint32_t d_SI_CS_RX_CNT_MASK; + uint32_t d_SI_CS_TX_CNT_LSB; + uint32_t d_SI_CS_TX_CNT_MASK; + uint32_t d_BOARD_DATA_SZ; + uint32_t d_BOARD_EXT_DATA_SZ; + uint32_t d_MBOX_BASE_ADDRESS; + uint32_t d_LOCAL_SCRATCH_OFFSET; + uint32_t d_CPU_CLOCK_OFFSET; + uint32_t d_LPO_CAL_OFFSET; + uint32_t d_GPIO_PIN10_OFFSET; + uint32_t d_GPIO_PIN11_OFFSET; + uint32_t d_GPIO_PIN12_OFFSET; + uint32_t d_GPIO_PIN13_OFFSET; + uint32_t d_CLOCK_GPIO_OFFSET; + uint32_t d_CPU_CLOCK_STANDARD_LSB; + uint32_t d_CPU_CLOCK_STANDARD_MASK; + uint32_t d_LPO_CAL_ENABLE_LSB; + uint32_t d_LPO_CAL_ENABLE_MASK; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK; + uint32_t d_ANALOG_INTF_BASE_ADDRESS; + uint32_t d_WLAN_MAC_BASE_ADDRESS; + uint32_t d_FW_INDICATOR_ADDRESS; + uint32_t d_DRAM_BASE_ADDRESS; + uint32_t d_SOC_CORE_BASE_ADDRESS; + uint32_t d_CORE_CTRL_ADDRESS; + uint32_t d_MSI_NUM_REQUEST; + uint32_t d_MSI_ASSIGN_FW; + uint32_t d_CORE_CTRL_CPU_INTR_MASK; + uint32_t d_SR_WR_INDEX_ADDRESS; + uint32_t d_DST_WATERMARK_ADDRESS; + + /* htt_rx.c */ + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_MASK; + uint32_t d_RX_MPDU_START_2_TID_LSB; + uint32_t d_RX_MPDU_START_2_TID_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_LSB; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB; + uint32_t d_RX_ATTENTION_0_FRAGMENT_MASK; + uint32_t d_RX_ATTENTION_0_FRAGMENT_LSB; + uint32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_MASK; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_LSB; + uint32_t d_RX_ATTENTION_0_MORE_DATA_MASK; + uint32_t d_RX_ATTENTION_0_MSDU_DONE_MASK; + uint32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK; + /* end */ + + /* PLL start */ + uint32_t d_EFUSE_OFFSET; + uint32_t d_EFUSE_XTAL_SEL_MSB; + uint32_t d_EFUSE_XTAL_SEL_LSB; + uint32_t d_EFUSE_XTAL_SEL_MASK; + uint32_t d_BB_PLL_CONFIG_OFFSET; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_LSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MASK; + uint32_t d_BB_PLL_CONFIG_FRAC_MSB; + uint32_t d_BB_PLL_CONFIG_FRAC_LSB; + uint32_t d_BB_PLL_CONFIG_FRAC_MASK; + uint32_t d_WLAN_PLL_SETTLE_TIME_MSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_LSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_MASK; + uint32_t d_WLAN_PLL_SETTLE_OFFSET; + uint32_t d_WLAN_PLL_SETTLE_SW_MASK; + uint32_t d_WLAN_PLL_SETTLE_RSTMASK; + uint32_t d_WLAN_PLL_SETTLE_RESET; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_LSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_LSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_RESET; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_DIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_DIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_OFFSET; + uint32_t d_WLAN_PLL_CONTROL_SW_MASK; + uint32_t d_WLAN_PLL_CONTROL_RSTMASK; + uint32_t d_WLAN_PLL_CONTROL_RESET; + uint32_t d_SOC_CORE_CLK_CTRL_OFFSET; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_LSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET; + uint32_t d_RTC_SYNC_STATUS_OFFSET; + uint32_t d_SOC_CPU_CLOCK_OFFSET; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_LSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MASK; + /* PLL end */ + + uint32_t d_SOC_POWER_REG_OFFSET; + uint32_t d_SOC_RESET_CONTROL_ADDRESS; + uint32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + uint32_t d_CPU_INTR_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + uint32_t d_SOC_LF_TIMER_STATUS0_ADDRESS; + + /* chip id start */ + uint32_t d_SOC_CHIP_ID_ADDRESS; + uint32_t d_SOC_CHIP_ID_VERSION_MASK; + uint32_t d_SOC_CHIP_ID_VERSION_LSB; + uint32_t d_SOC_CHIP_ID_REVISION_MASK; + uint32_t d_SOC_CHIP_ID_REVISION_LSB; + /* chip id end */ + + uint32_t d_A_SOC_CORE_SCRATCH_0_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_1_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_2_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_3_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_4_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_5_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_6_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_7_ADDRESS; + uint32_t d_A_SOC_CORE_SPARE_0_REGISTER; + uint32_t d_A_SOC_CORE_SPARE_1_REGISTER; + + uint32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK; + uint32_t d_WLAN_DEBUG_CONTROL_OFFSET; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK; + uint32_t d_WLAN_DEBUG_OUT_OFFSET; + uint32_t d_WLAN_DEBUG_OUT_DATA_MSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_LSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_MASK; + uint32_t d_AMBA_DEBUG_BUS_OFFSET; + uint32_t d_AMBA_DEBUG_BUS_SEL_MSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_LSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_MASK; + +#ifdef QCA_WIFI_3_0_ADRASTEA + uint32_t d_Q6_ENABLE_REGISTER_0; + uint32_t d_Q6_ENABLE_REGISTER_1; + uint32_t d_Q6_CAUSE_REGISTER_0; + uint32_t d_Q6_CAUSE_REGISTER_1; + uint32_t d_Q6_CLEAR_REGISTER_0; + uint32_t d_Q6_CLEAR_REGISTER_1; +#endif +}; + +#define A_SOC_CORE_SPARE_0_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_0_REGISTER) +#define A_SOC_CORE_SCRATCH_0_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_0_ADDRESS) +#define A_SOC_CORE_SCRATCH_1_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_1_ADDRESS) +#define A_SOC_CORE_SCRATCH_2_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_2_ADDRESS) +#define A_SOC_CORE_SCRATCH_3_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_3_ADDRESS) +#define A_SOC_CORE_SCRATCH_4_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_4_ADDRESS) +#define A_SOC_CORE_SCRATCH_5_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_5_ADDRESS) +#define A_SOC_CORE_SCRATCH_6_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_6_ADDRESS) +#define A_SOC_CORE_SCRATCH_7_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_7_ADDRESS) +#define RTC_SOC_BASE_ADDRESS (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define A_SOC_CORE_SCRATCH_0 (scn->targetdef->d_A_SOC_CORE_SCRATCH_0) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CORE_CTRL_CPU_INTR_MASK (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define SOC_RESET_CONTROL_ADDRESS (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) +#define SOC_LF_TIMER_STATUS0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_STATUS0_ADDRESS) + + +#define CHIP_ID_ADDRESS (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) + +/* misc */ +#define SR_WR_INDEX_ADDRESS (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_OFFSET (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & WLAN_PLL_CONTROL_CLK_SEL_MASK) >> WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & SOC_CORE_CLK_CTRL_DIV_MASK) >> SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & SOC_CPU_CLOCK_STANDARD_MASK) >> SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define Q6_ENABLE_REGISTER_0 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_0) +#define Q6_ENABLE_REGISTER_1 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_1) +#define Q6_CAUSE_REGISTER_0 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_0) +#define Q6_CAUSE_REGISTER_1 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_1) +#define Q6_CLEAR_REGISTER_0 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_0) +#define Q6_CLEAR_REGISTER_1 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_1) +#endif + +struct hostdef_s { + uint32_t d_INT_STATUS_ENABLE_ERROR_LSB; + uint32_t d_INT_STATUS_ENABLE_ERROR_MASK; + uint32_t d_INT_STATUS_ENABLE_CPU_LSB; + uint32_t d_INT_STATUS_ENABLE_CPU_MASK; + uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB; + uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_INT_STATUS_ENABLE_ADDRESS; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_HOST_INT_STATUS_ADDRESS; + uint32_t d_CPU_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK; + uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB; + uint32_t d_COUNT_DEC_ADDRESS; + uint32_t d_HOST_INT_STATUS_CPU_MASK; + uint32_t d_HOST_INT_STATUS_CPU_LSB; + uint32_t d_HOST_INT_STATUS_ERROR_MASK; + uint32_t d_HOST_INT_STATUS_ERROR_LSB; + uint32_t d_HOST_INT_STATUS_COUNTER_MASK; + uint32_t d_HOST_INT_STATUS_COUNTER_LSB; + uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS; + uint32_t d_WINDOW_DATA_ADDRESS; + uint32_t d_WINDOW_READ_ADDR_ADDRESS; + uint32_t d_WINDOW_WRITE_ADDR_ADDRESS; + uint32_t d_SOC_GLOBAL_RESET_ADDRESS; + uint32_t d_RTC_STATE_ADDRESS; + uint32_t d_RTC_STATE_COLD_RESET_MASK; + uint32_t d_RTC_STATE_V_MASK; + uint32_t d_RTC_STATE_V_LSB; + uint32_t d_FW_IND_EVENT_PENDING; + uint32_t d_FW_IND_INITIALIZED; + uint32_t d_FW_IND_HELPER; + uint32_t d_RTC_STATE_V_ON; +#if defined(SDIO_3_0) + uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK; + uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB; +#endif + uint32_t d_MSI_MAGIC_ADR_ADDRESS; + uint32_t d_MSI_MAGIC_ADDRESS; + uint32_t d_ENABLE_MSI; + uint32_t d_MUX_ID_MASK; + uint32_t d_TRANSACTION_ID_MASK; + uint32_t d_DESC_DATA_FLAG_MASK; +}; +#define DESC_DATA_FLAG_MASK (scn->hostdef->d_DESC_DATA_FLAG_MASK) +#define MUX_ID_MASK (scn->hostdef->d_MUX_ID_MASK) +#define TRANSACTION_ID_MASK (scn->hostdef->d_TRANSACTION_ID_MASK) +#define ENABLE_MSI (scn->hostdef->d_ENABLE_MSI) +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define RTC_STATE_V_MASK (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED (scn->hostdef->d_FW_IND_INITIALIZED) +#define FW_IND_HELPER (scn->hostdef->d_FW_IND_HELPER) +#define RTC_STATE_V_ON (scn->hostdef->d_RTC_STATE_V_ON) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + SOC_REFCLK_UNKNOWN = -1, /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + uint32_t refdiv; + uint32_t div; + uint32_t rnfrac; + uint32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + uint32_t refclk_hz; + uint32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + uint32_t start_addr; + uint32_t end_addr; +}; + + +struct tgt_reg_table { + const struct tgt_reg_section *section; + uint32_t section_size; +}; +#endif /* _REGTABLE_SDIO_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.c new file mode 100644 index 0000000000000000000000000000000000000000..bce8c52bdff4f18cda9bb7dff247b6f1acef07ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.c @@ -0,0 +1,862 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "adma.h" +#include "hif_sdio_internal.h" +#include "pld_sdio.h" +#include "if_sdio.h" + +/** + * hif_dev_get_fifo_address() - get the fifo addresses for dma + * @pdev: SDIO HIF object + * @c : FIFO address config pointer + * + * Return : 0 for success, non-zero for error + */ +int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev, + void *c, + uint32_t config_len) +{ + /* SDIO AL handles DMA Addresses */ + return 0; +} + +/** + * hif_dev_get_block_size() - get the adma block size for dma + * @config : block size config pointer + * + * Return : NONE + */ +void hif_dev_get_block_size(void *config) +{ + /* TODO Get block size used by AL Layer in Mission ROM Mode */ + *((uint32_t *)config) = HIF_BLOCK_SIZE; /* QCN_SDIO_MROM_BLK_SZ TODO */ +} + +/** + * hif_dev_configure_pipes() - configure pipes + * @pdev: SDIO HIF object + * @func: sdio function object + * + * Return : 0 for success, non-zero for error + */ +int hif_dev_configure_pipes(struct hif_sdio_dev *pdev, struct sdio_func *func) +{ + /* SDIO AL Configures SDIO Channels */ + return 0; +} + +/** hif_dev_set_mailbox_swap() - Set the mailbox swap + * @pdev : The HIF layer object + * + * Return: none + */ +void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev) +{ + /* SDIO AL doesn't use mailbox architecture */ +} + +/** hif_dev_get_mailbox_swap() - Get the mailbox swap setting + * @pdev : The HIF layer object + * + * Return: true or false + */ +bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev) +{ + /* SDIO AL doesn't use mailbox architecture */ + return false; +} + +/** + * hif_dev_dsr_handler() - Synchronous interrupt handler + * + * @context: hif send context + * + * Return: 0 for success and non-zero for failure + */ +QDF_STATUS hif_dev_dsr_handler(void *context) +{ + /* SDIO AL handles interrupts */ + return QDF_STATUS_SUCCESS; +} + +/** + * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id. + * @pDev: SDIO HIF object + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * + * Return: 0 on success, error value on invalid map + */ +QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc, + uint8_t *ul_pipe, uint8_t *dl_pipe) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (svc) { + case HTT_DATA_MSG_SVC: + *dl_pipe = 2; + *ul_pipe = 3; + break; + + case HTC_CTRL_RSVD_SVC: + case HTC_RAW_STREAMS_SVC: + *dl_pipe = 0; + *ul_pipe = 1; + break; + + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + case WMI_DATA_VI_SVC: + case WMI_DATA_VO_SVC: + *dl_pipe = 2; + *ul_pipe = 3; + break; + + case WMI_CONTROL_SVC: + *dl_pipe = 0; + *ul_pipe = 1; + break; + + default: + HIF_ERROR("%s: Err : Invalid service (%d)", + __func__, svc); + status = QDF_STATUS_E_INVAL; + break; + } + return status; +} + +/** + * hif_bus_configure() - configure the bus + * @hif_sc: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_sdio_bus_configure(struct hif_softc *hif_sc) +{ + struct pld_wlan_enable_cfg cfg; + enum pld_driver_mode mode; + uint32_t con_mode = hif_get_conparam(hif_sc); + + if (con_mode == QDF_GLOBAL_FTM_MODE) + mode = PLD_FTM; + else if (con_mode == QDF_GLOBAL_COLDBOOT_CALIB_MODE) + mode = PLD_COLDBOOT_CALIBRATION; + else if (QDF_IS_EPPING_ENABLED(con_mode)) + mode = PLD_EPPING; + else + mode = PLD_MISSION; + + return pld_wlan_enable(hif_sc->qdf_dev->dev, &cfg, mode); +} + +/** hif_dev_setup_device() - Setup device specific stuff here required for hif + * @pdev : HIF layer object + * + * return 0 on success, error otherwise + */ +int hif_dev_setup_device(struct hif_sdio_device *pdev) +{ + hif_dev_get_block_size(&pdev->BlockSize); + + return 0; +} + +/** hif_dev_mask_interrupts() - Disable the interrupts in the device + * @pdev SDIO HIF Object + * + * Return: NONE + */ +void hif_dev_mask_interrupts(struct hif_sdio_device *pdev) +{ + /* SDIO AL Handles Interrupts */ +} + +/** hif_dev_unmask_interrupts() - Enable the interrupts in the device + * @pdev SDIO HIF Object + * + * Return: NONE + */ +void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev) +{ + /* SDIO AL Handles Interrupts */ +} + +/** + * hif_dev_map_pipe_to_adma_chan() - maps pipe id to adma chan + * @pdev: The pointer to the hif device object + * @pipeid: pipe index + * + * Return: adma channel handle + */ +struct sdio_al_channel_handle *hif_dev_map_pipe_to_adma_chan +( +struct hif_sdio_device *dev, +uint8_t pipeid +) +{ + struct hif_sdio_dev *pdev = dev->HIFDevice; + + HIF_ENTER(); + + if ((pipeid == 0) || (pipeid == 1)) + return pdev->al_chan[0]; + else if ((pipeid == 2) || (pipeid == 3)) + return pdev->al_chan[1]; + else + return NULL; +} + +/** + * hif_dev_map_adma_chan_to_pipe() - map adma chan to htc pipe + * @pdev: The pointer to the hif device object + * @chan: channel number + * @upload: boolean to decide upload or download + * + * Return: Invalid pipe index + */ +uint8_t hif_dev_map_adma_chan_to_pipe(struct hif_sdio_device *pdev, + uint8_t chan, bool upload) +{ + HIF_INFO("%s: chan: %u, %s", __func__, chan, + upload ? "Upload" : "Download"); + + if (chan == 0) /* chan 0 is mapped to HTT */ + return upload ? 1 : 0; + else if (chan == 1) /* chan 1 is mapped to WMI */ + return upload ? 3 : 2; + + return (uint8_t)-1; /* invalid channel id */ +} + +/** + * hif_get_send_address() - Get the transfer pipe address + * @pdev: The pointer to the hif device object + * @pipe: The pipe identifier + * + * Return 0 for success and non-zero for failure to map + */ +int hif_get_send_address(struct hif_sdio_device *pdev, + uint8_t pipe, unsigned long *addr) +{ + struct sdio_al_channel_handle *chan = NULL; + + if (!addr) + return -EINVAL; + + *addr = 0; + chan = hif_dev_map_pipe_to_adma_chan(pdev, pipe); + + if (!chan) + return -EINVAL; + + *addr = (unsigned long)chan; + + return 0; +} + +/** + * hif_fixup_write_param() - Tweak the address and length parameters + * @pdev: The pointer to the hif device object + * @length: The length pointer + * @addr: The addr pointer + * + * Return: None + */ +void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req, + uint32_t *length, uint32_t *addr) +{ + HIF_ENTER(); + HIF_EXIT(); +} + +#define HIF_MAX_RX_Q_ALLOC 0 /* TODO */ +#define HIF_RX_Q_ALLOC_THRESHOLD 100 +QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, + struct sdio_func *func, + bool reset) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; +#if HIF_MAX_RX_Q_ALLOC + qdf_list_node_t *node; + struct rx_q_entry *rx_q_elem; +#endif + HIF_ENTER(); + +#if HIF_MAX_RX_Q_ALLOC + qdf_spin_lock_irqsave(&device->rx_q_lock); + + for (; device->rx_q.count; ) { + qdf_list_remove_back(&device->rx_q, &node); + rx_q_elem = container_of(node, struct rx_q_entry, entry); + if (rx_q_elem) { + if (rx_q_elem->nbuf) + qdf_nbuf_free(rx_q_elem->nbuf); + qdf_mem_free(rx_q_elem); + } + } + qdf_destroy_work(0, &device->rx_q_alloc_work); + + qdf_spin_unlock_irqrestore(&device->rx_q_lock); + + qdf_spinlock_destroy(&device->rx_q_lock); +#endif + + status = hif_sdio_func_disable(device, func, reset); + if (status == QDF_STATUS_SUCCESS) + device->is_disabled = true; + + cleanup_hif_scatter_resources(device); + + HIF_EXIT(); + + return status; +} + +/** + * hif_enable_func() - Enable SDIO function + * + * @ol_sc: HIF object pointer + * @device: HIF device pointer + * @sdio_func: SDIO function pointer + * @resume: If this is called from resume or probe + * + * Return: 0 in case of success, else error value + */ +QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device, + struct sdio_func *func, bool resume) +{ + int ret = QDF_STATUS_SUCCESS; + + if (!device) { + HIF_ERROR("%s: HIF device is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!resume) + ret = hif_sdio_probe(ol_sc, func, device); + +#if HIF_MAX_RX_Q_ALLOC + if (!ret) { + qdf_list_create(&device->rx_q, HIF_MAX_RX_Q_ALLOC); + qdf_spinlock_create(&device->rx_q_lock); + qdf_create_work(0, &device->rx_q_alloc_work, + hif_sdio_rx_q_alloc, (void *)device); + device->rx_q_alloc_work_scheduled = true; + qdf_sched_work(0, &device->rx_q_alloc_work); + } +#endif + return ret; +} + +/** + * hif_sdio_get_net_buf() - Get a network buffer from the rx q + * @dev - HIF device object + * + * Return - NULL if out of buffers, else qdf_nbuf_t + */ +#if HIF_MAX_RX_Q_ALLOC +static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len) +{ + qdf_list_node_t *node; + qdf_nbuf_t nbuf = NULL; + qdf_list_t *q = &dev->rx_q; + struct rx_q_entry *elem = NULL; + + /* TODO - Alloc nbuf based on buf_len */ + qdf_spin_lock_irqsave(&dev->rx_q_lock); + + if (q->count) { + qdf_list_remove_front(q, &node); + elem = qdf_container_of(node, struct rx_q_entry, entry); + nbuf = elem->nbuf; + } else { + HIF_ERROR("%s: no rx q elements", __func__); + } + + if (q->count <= HIF_RX_Q_ALLOC_THRESHOLD && + !dev->rx_q_alloc_work_scheduled) { + dev->rx_q_alloc_work_scheduled = true; + qdf_sched_work(0, &dev->rx_q_alloc_work); + } + + qdf_spin_unlock_irqrestore(&dev->rx_q_lock); + + qdf_mem_free(elem); + + return nbuf; +} +#else +static qdf_nbuf_t hif_sdio_get_nbuf(struct hif_sdio_dev *dev, uint16_t buf_len) +{ + qdf_nbuf_t nbuf; + + if (!buf_len) + buf_len = HIF_SDIO_RX_BUFFER_SIZE; + + nbuf = qdf_nbuf_alloc(NULL, buf_len, 0, 4, false); + + return nbuf; +} +#endif +/** + * hif_sdio_rx_q_alloc() - Deferred work for pre-alloc rx q + * @ctx - Pointer to context object + * + * Return NONE + */ +#if HIF_MAX_RX_Q_ALLOC +void hif_sdio_rx_q_alloc(void *ctx) +{ + struct rx_q_entry *rx_q_elem; + struct hif_sdio_dev *dev = (struct hif_sdio_dev *)ctx; + unsigned int rx_q_count = dev->rx_q.count; + + HIF_ENTER(); + qdf_spin_lock_irqsave(&dev->rx_q_lock); + + for (; rx_q_count < dev->rx_q.max_size; rx_q_count++) { + rx_q_elem = qdf_mem_malloc(sizeof(struct rx_q_entry)); + if (!rx_q_elem) { + HIF_ERROR("%s: failed to alloc rx q elem", __func__); + break; + } + + /* TODO - Alloc nbuf based on payload_len in HTC Header */ + rx_q_elem->nbuf = qdf_nbuf_alloc(NULL, HIF_SDIO_RX_BUFFER_SIZE, + 0, 4, false); + if (!rx_q_elem->nbuf) { + HIF_ERROR("%s: failed to alloc nbuf for rx", __func__); + qdf_mem_free(rx_q_elem); + break; + } + + qdf_list_insert_back(&dev->rx_q, &rx_q_elem->entry); + } + dev->rx_q_alloc_work_scheduled = false; + + qdf_spin_unlock_irqrestore(&dev->rx_q_lock); + HIF_EXIT(); +} +#else +void hif_sdio_rx_q_alloc(void *ctx) +{ +} +#endif + +#include + +struct sdio_al_channel_data qcn7605_chan[HIF_SDIO_MAX_AL_CHANNELS] = { + { + .name = "SDIO_AL_WLAN_CH0", /* HTT */ + .client_data = NULL, /* populate from client handle */ + .ul_xfer_cb = ul_xfer_cb, + .dl_xfer_cb = dl_xfer_cb, + .dl_data_avail_cb = dl_data_avail_cb, + .dl_meta_data_cb = NULL + }, + { + .name = "SDIO_AL_WLAN_CH1", /* WMI */ + .client_data = NULL, /* populate from client handle */ + .ul_xfer_cb = ul_xfer_cb, + .dl_xfer_cb = dl_xfer_cb, + .dl_data_avail_cb = dl_data_avail_cb, + .dl_meta_data_cb = NULL + } +}; + +/** + * hif_dev_register_channels()- Register transport layer channels + * @dev : HIF device object + * @func : SDIO function pointer + * + * Return : success on configuration, else failure + */ +int hif_dev_register_channels(struct hif_sdio_dev *dev, struct sdio_func *func) +{ + int ret = 0; + unsigned int chan; + struct sdio_al_channel_data *chan_data[HIF_ADMA_MAX_CHANS]; + + HIF_ENTER(); + + dev->al_client = pld_sdio_get_sdio_al_client_handle(func); + if (ret || !dev->al_client) { + HIF_ERROR("%s: Failed to get get sdio al handle", __func__); + return ret; + } + + if ((func->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_QCN7605_BASE) { + dev->adma_chans_used = 2; + qcn7605_chan[0].client_data = dev->al_client->client_data; + qcn7605_chan[1].client_data = dev->al_client->client_data; + chan_data[0] = &qcn7605_chan[0]; + chan_data[1] = &qcn7605_chan[1]; + } else { + dev->adma_chans_used = 0; + } + + for (chan = 0; chan < dev->adma_chans_used; chan++) { + dev->al_chan[chan] = + pld_sdio_register_sdio_al_channel(dev->al_client, + chan_data[chan]); + if (!dev->al_chan[chan] || IS_ERR(dev->al_chan[chan])) { + ret = -EINVAL; + HIF_ERROR("%s: Channel registration failed", __func__); + } else { + dev->al_chan[chan]->priv = (void *)dev; + HIF_INFO("%s: chan %s : id : %u", __func__, + chan_data[chan]->name, + dev->al_chan[chan]->channel_id); + } + } + + HIF_EXIT(); + + return ret; +} + +/** + * hif_dev_unregister_channels()- Register transport layer channels + * @dev : HIF device object + * @func : SDIO Function pointer + * + * Return : None + */ +void hif_dev_unregister_channels(struct hif_sdio_dev *dev, + struct sdio_func *func) +{ + unsigned int chan; + + if (!dev) { + HIF_ERROR("%s: hif_sdio_dev is null", __func__); + return; + } + + for (chan = 0; chan < dev->adma_chans_used; chan++) { + dev->al_chan[chan]->priv = NULL; + pld_sdio_unregister_sdio_al_channel(dev->al_chan[chan]); + } +} + +/** + * hif_read_write() - queue a read/write request + * @dev: pointer to hif device structure + * @address: address to read, actually channel pointer + * @buffer: buffer to hold read/write data + * @length: length to read/write + * @request: read/write/sync/async request + * @context: pointer to hold calling context + * + * Return: 0, pending on success, error number otherwise. + */ +QDF_STATUS +hif_read_write(struct hif_sdio_dev *dev, + unsigned long sdio_al_ch_handle, + char *cbuffer, uint32_t length, + uint32_t request, void *context) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct sdio_al_channel_handle *ch; + struct bus_request *bus_req; + enum sdio_al_dma_direction dir; + struct hif_sdio_device *device; + QDF_STATUS (*rx_comp)(void *, qdf_nbuf_t, uint8_t); + qdf_nbuf_t nbuf; + int ret = 0, payload_len = 0; + unsigned char *buffer = (unsigned char *)cbuffer; + + if (!dev || !sdio_al_ch_handle) { + HIF_ERROR("%s: device = %pK, addr = %lu", __func__, + dev, sdio_al_ch_handle); + return QDF_STATUS_E_INVAL; + } + + if (!(request & HIF_ASYNCHRONOUS) && + !(request & HIF_SYNCHRONOUS)) { + HIF_ERROR("%s: Invalid request mode", __func__); + return QDF_STATUS_E_INVAL; + } + + /*sdio r/w action is not needed when suspend, so just return */ + if ((dev->is_suspend) && + (dev->power_config == HIF_DEVICE_POWER_CUT)) { + HIF_INFO("%s: skip in suspend", __func__); + return QDF_STATUS_SUCCESS; + } + + ch = (struct sdio_al_channel_handle *)sdio_al_ch_handle; + + bus_req = hif_allocate_bus_request(dev); + if (!bus_req) { + HIF_ERROR("%s: Bus alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + bus_req->address = sdio_al_ch_handle; + bus_req->length = length; + bus_req->request = request; + bus_req->context = context; + bus_req->buffer = buffer; + + /* Request SDIO AL to do transfer */ + dir = (request & HIF_SDIO_WRITE) ? SDIO_AL_TX : SDIO_AL_RX; + + if (request & HIF_SYNCHRONOUS) { + ret = sdio_al_queue_transfer(ch, + dir, + bus_req->buffer, + bus_req->length, + 1); /* higher priority */ + if (ret) { + status = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: SYNC REQ failed ret=%d", __func__, ret); + } else { + status = QDF_STATUS_SUCCESS; + } + + hif_free_bus_request(dev, bus_req); + + if ((status == QDF_STATUS_SUCCESS) && (dir == SDIO_AL_RX)) { + nbuf = (qdf_nbuf_t)context; + payload_len = HTC_GET_FIELD(bus_req->buffer, + HTC_FRAME_HDR, + PAYLOADLEN); + qdf_nbuf_set_pktlen(nbuf, payload_len + HTC_HDR_LENGTH); + device = (struct hif_sdio_device *)dev->htc_context; + rx_comp = device->hif_callbacks.rxCompletionHandler; + rx_comp(device->hif_callbacks.Context, nbuf, 0); + } + } else { + ret = sdio_al_queue_transfer_async(ch, + dir, + bus_req->buffer, + bus_req->length, + 1, /* higher priority */ + (void *)bus_req); + if (ret) { + status = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: ASYNC REQ fail ret=%d for len=%d ch=%d", + __func__, ret, length, ch->channel_id); + hif_free_bus_request(dev, bus_req); + } else { + status = QDF_STATUS_E_PENDING; + } + } + return status; +} + +/** + * ul_xfer_cb() - Completion call back for asynchronous transfer + * @ch_handle: The sdio al channel handle + * @result: The result of the operation + * @context: pointer to request context + * + * Return: None + */ +void ul_xfer_cb(struct sdio_al_channel_handle *ch_handle, + struct sdio_al_xfer_result *result, + void *ctx) +{ + struct bus_request *req = (struct bus_request *)ctx; + struct hif_sdio_dev *dev; + + if (!ch_handle || !result) { + HIF_ERROR("%s: Invalid args", __func__); + qdf_assert_always(0); + return; + } + + dev = (struct hif_sdio_dev *)ch_handle->priv; + + if (result->xfer_status) { + req->status = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: ASYNC Tx failed status=%d", __func__, + result->xfer_status); + } else { + req->status = QDF_STATUS_SUCCESS; + } + + dev->htc_callbacks.rw_compl_handler(req->context, req->status); + + hif_free_bus_request(dev, req); +} + +/** + * dl_data_avail_cb() - Called when data is available on a channel + * @ch_handle: The sdio al channel handle + * @len: The len of data available to download + * + * Return: None + */ +/* Use the asynchronous method of transfer. This will help in + * completing READ in the transfer done callback later which + * runs in sdio al thread context. If we do the syncronous + * transfer here, the thread context won't be available and + * perhaps a new thread may be required here. + */ +void dl_data_avail_cb(struct sdio_al_channel_handle *ch_handle, + unsigned int len) +{ + struct hif_sdio_dev *dev; + unsigned int chan; + qdf_nbuf_t nbuf; + + if (!ch_handle || !len) { + HIF_ERROR("%s: Invalid args %u", __func__, len); + qdf_assert_always(0); + return; + } + + dev = (struct hif_sdio_dev *)ch_handle->priv; + chan = ch_handle->channel_id; + + if (chan > HIF_SDIO_MAX_AL_CHANNELS) { + HIF_ERROR("%s: Invalid Ch ID %d", __func__, chan); + return; + } + + /* allocate a buffer for reading the data from the chip. + * Note that this is raw, unparsed buffer and will be + * processed in the transfer done callback. + */ + /* TODO, use global buffer instead of runtime allocations */ + nbuf = qdf_nbuf_alloc(NULL, len, 0, 4, false); + + if (!nbuf) { + HIF_ERROR("%s: Unable to alloc netbuf %u bytes", __func__, len); + return; + } + + hif_read_write(dev, (unsigned long)ch_handle, nbuf->data, len, + HIF_RD_ASYNC_BLOCK_FIX, nbuf); +} + +#define is_pad_block(buf) (*((uint32_t *)buf) == 0xbabababa) +uint16_t g_dbg_payload_len; + +/** + * dl_xfer_cb() - Call from lower layer after transfer is completed + * @ch_handle: The sdio al channel handle + * @result: The xfer result + * @ctx: Context passed in the transfer queuing + * + * Return: None + */ +void dl_xfer_cb(struct sdio_al_channel_handle *ch_handle, + struct sdio_al_xfer_result *result, + void *ctx) +{ + unsigned char *buf; + qdf_nbuf_t nbuf; + uint32_t len; + uint16_t payload_len = 0; + struct hif_sdio_dev *dev; + struct hif_sdio_device *device; + struct bus_request *bus_req = (struct bus_request *)ctx; + QDF_STATUS (*rx_completion)(void *, qdf_nbuf_t, uint8_t); + + if (!bus_req) { + HIF_ERROR("%s: Bus Req NULL!!!", __func__); + qdf_assert_always(0); + return; + } + + if (!ch_handle || !result) { + HIF_ERROR("%s: Invalid args %pK %pK", __func__, + ch_handle, result); + qdf_assert_always(0); + return; + } + + dev = (struct hif_sdio_dev *)ch_handle->priv; + if (result->xfer_status) { + HIF_ERROR("%s: ASYNC Rx failed %d", __func__, + result->xfer_status); + qdf_nbuf_free((qdf_nbuf_t)bus_req->context); + hif_free_bus_request(dev, bus_req); + return; + } + + device = (struct hif_sdio_device *)dev->htc_context; + rx_completion = device->hif_callbacks.rxCompletionHandler; + + buf = (unsigned char *)result->buf_addr; + len = (unsigned int)result->xfer_len; + + while (len >= sizeof(HTC_FRAME_HDR)) { + if (is_pad_block(buf)) { + /* End of Rx Buffer */ + break; + } + + if (HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID) >= + ENDPOINT_MAX) { + HIF_ERROR("%s: invalid endpoint id: %u", __func__, + HTC_GET_FIELD(buf, HTC_FRAME_HDR, + ENDPOINTID)); + break; + } + + /* Copy the HTC frame to the alloc'd packet buffer */ + payload_len = HTC_GET_FIELD(buf, HTC_FRAME_HDR, PAYLOADLEN); + payload_len = qdf_le16_to_cpu(payload_len); + if (!payload_len) { + HIF_ERROR("%s:Invalid Payload len %d bytes", __func__, + payload_len); + break; + } + if (payload_len > g_dbg_payload_len) { + g_dbg_payload_len = payload_len; + HIF_ERROR("Max Rx HTC Payload = %d", g_dbg_payload_len); + } + + nbuf = hif_sdio_get_nbuf(dev, payload_len + HTC_HEADER_LEN); + if (!nbuf) { + HIF_ERROR("%s: failed to alloc rx buffer", __func__); + break; + } + + /* Check if payload fits in skb */ + if (qdf_nbuf_tailroom(nbuf) < payload_len + HTC_HEADER_LEN) { + HIF_ERROR("%s: Payload + HTC_HDR %d > skb tailroom %d", + __func__, (payload_len + 8), + qdf_nbuf_tailroom(nbuf)); + qdf_nbuf_free(nbuf); + break; + } + + qdf_mem_copy((uint8_t *)qdf_nbuf_data(nbuf), buf, + payload_len + HTC_HEADER_LEN); + + qdf_nbuf_put_tail(nbuf, payload_len + HTC_HDR_LENGTH); + + rx_completion(device->hif_callbacks.Context, nbuf, + 0); /* don't care, not used */ + + len -= payload_len + HTC_HDR_LENGTH; + buf += payload_len + HTC_HDR_LENGTH; + } + + qdf_nbuf_free((qdf_nbuf_t)bus_req->context); + hif_free_bus_request(dev, bus_req); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.h new file mode 100644 index 0000000000000000000000000000000000000000..cfda66db25deb143a2bb73cb0738da06f8943010 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/adma.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ADMA_H_ +#define _ADMA_H_ + +#include "hif_sdio_dev.h" +#include "htc_packet.h" +#include "htc_api.h" +#include "hif_internal.h" + +/* This should align with the underlying transport layer */ +#define HIF_DEFAULT_IO_BLOCK_SIZE 512 +#define HIF_BLOCK_SIZE HIF_DEFAULT_IO_BLOCK_SIZE +#define HIF_DUMMY_SPACE_MASK 0x0FFFFFFF + +#define HIF_SDIO_MAX_AL_CHANNELS 2 + +struct devRegisters { + uint32_t dummy; +}; + +#include "transfer.h" +#define DEV_REGISTERS_SIZE sizeof(struct devRegisters) + +uint8_t hif_dev_map_adma_chan_to_pipe(struct hif_sdio_device *pdev, + uint8_t chan, bool upload); + +struct sdio_al_channel_handle *hif_dev_map_pipe_to_adma_chan +( +struct hif_sdio_device *pdev, +uint8_t pipeid +); + +void dl_xfer_cb(struct sdio_al_channel_handle *ch_handle, + struct sdio_al_xfer_result *result, + void *ctx); +void ul_xfer_cb(struct sdio_al_channel_handle *ch_handle, + struct sdio_al_xfer_result *result, + void *ctx); + +void dl_data_avail_cb(struct sdio_al_channel_handle *ch_handle, + unsigned int len); + +void hif_sdio_rx_q_alloc(void *ctx); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c new file mode 100644 index 0000000000000000000000000000000000000000..91ad93f4cd1de610ae305433475c85c752503494 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.c @@ -0,0 +1,1978 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef CONFIG_SDIO_TRANSFER_MAILBOX +#define ATH_MODULE_NAME hif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" +#include "if_sdio.h" +#include "regtable.h" +#include "transfer.h" + +/* by default setup a bounce buffer for the data packets, + * if the underlying host controller driver + * does not use DMA you may be able to skip this step + * and save the memory allocation and transfer time + */ +#define HIF_USE_DMA_BOUNCE_BUFFER 1 +#if HIF_USE_DMA_BOUNCE_BUFFER +/* macro to check if DMA buffer is WORD-aligned and DMA-able. + * Most host controllers assume the + * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack). + * virt_addr_valid check fails on stack memory. + */ +#define BUFFER_NEEDS_BOUNCE(buffer) (((unsigned long)(buffer) & 0x3) || \ + !virt_addr_valid((buffer))) +#else +#define BUFFER_NEEDS_BOUNCE(buffer) (false) +#endif + +#ifdef SDIO_3_0 +/** + * set_extended_mbox_size() - set extended MBOX size + * @pinfo: sdio mailbox info + * + * Return: none. + */ +static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo) +{ + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0; + pinfo->mbox_prop[1].extended_size = + HIF_MBOX1_EXTENDED_WIDTH_AR6320; +} + +/** + * set_extended_mbox_address() - set extended MBOX address + * @pinfo: sdio mailbox info + * + * Return: none. + */ +static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo) +{ + pinfo->mbox_prop[1].extended_address = + pinfo->mbox_prop[0].extended_address + + pinfo->mbox_prop[0].extended_size + + HIF_MBOX_DUMMY_SPACE_SIZE_AR6320; +} +#else +static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo) +{ + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320; +} + +static inline void +set_extended_mbox_address(struct hif_device_mbox_info *pinfo) +{ +} +#endif + +/** + * set_extended_mbox_window_info() - set extended MBOX window + * information for SDIO interconnects + * @manf_id: manufacturer id + * @pinfo: sdio mailbox info + * + * Return: none. + */ +static void set_extended_mbox_window_info(uint16_t manf_id, + struct hif_device_mbox_info *pinfo) +{ + switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) { + case MANUFACTURER_ID_AR6002_BASE: + /* MBOX 0 has an extended range */ + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6004; + + break; + case MANUFACTURER_ID_AR6003_BASE: + /* MBOX 0 has an extended range */ + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + case MANUFACTURER_ID_AR6004_BASE: + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6004; + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + case MANUFACTURER_ID_AR6320_BASE: + { + uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK; + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320; + if (rev < 4) + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320; + else + set_extended_mbox_size(pinfo); + set_extended_mbox_address(pinfo); + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + } + case MANUFACTURER_ID_QCA9377_BASE: + case MANUFACTURER_ID_QCA9379_BASE: + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0; + pinfo->mbox_prop[1].extended_address = + pinfo->mbox_prop[0].extended_address + + pinfo->mbox_prop[0].extended_size + + HIF_MBOX_DUMMY_SPACE_SIZE_AR6320; + pinfo->mbox_prop[1].extended_size = + HIF_MBOX1_EXTENDED_WIDTH_AR6320; + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + default: + A_ASSERT(false); + break; + } +} + +/** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware + * @pdev : The HIF layer object + * + * Return: none + */ +void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev) +{ + struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev); + + HIF_ENTER(); + + hif_device->swap_mailbox = true; + + HIF_EXIT(); +} + +/** hif_dev_get_mailbox_swap() - Get the mailbox swap setting + * @pdev : The HIF layer object + * + * Return: true or false + */ +bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev) +{ + struct hif_sdio_device *hif_device; + + HIF_ENTER(); + + hif_device = hif_dev_from_hif(pdev); + + HIF_EXIT(); + + return hif_device->swap_mailbox; +} + +/** + * hif_dev_get_fifo_address() - get the fifo addresses for dma + * @pdev: SDIO HIF object + * @config: mbox address config pointer + * + * Return : 0 for success, non-zero for error + */ +int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev, + void *config, + uint32_t config_len) +{ + uint32_t count; + struct hif_device_mbox_info *cfg = + (struct hif_device_mbox_info *)config; + + for (count = 0; count < 4; count++) + cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count); + + if (config_len >= sizeof(struct hif_device_mbox_info)) { + set_extended_mbox_window_info((uint16_t)pdev->func->device, + cfg); + return 0; + } + + return -EINVAL; +} + +/** + * hif_dev_get_block_size() - get the mbox block size for dma + * @config : mbox size config pointer + * + * Return : NONE + */ +void hif_dev_get_block_size(void *config) +{ + ((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE; + ((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE; + ((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE; + ((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE; +} + +/** + * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id. + * @pDev: SDIO HIF object + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * + * Return: 0 on success, error value on invalid map + */ +QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc, + uint8_t *ul_pipe, uint8_t *dl_pipe) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (svc) { + case HTT_DATA_MSG_SVC: + if (hif_dev_get_mailbox_swap(pdev)) { + *ul_pipe = 1; + *dl_pipe = 0; + } else { + *ul_pipe = 3; + *dl_pipe = 2; + } + break; + + case HTC_CTRL_RSVD_SVC: + case HTC_RAW_STREAMS_SVC: + *ul_pipe = 1; + *dl_pipe = 0; + break; + + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + case WMI_DATA_VI_SVC: + case WMI_DATA_VO_SVC: + *ul_pipe = 1; + *dl_pipe = 0; + break; + + case WMI_CONTROL_SVC: + if (hif_dev_get_mailbox_swap(pdev)) { + *ul_pipe = 3; + *dl_pipe = 2; + } else { + *ul_pipe = 1; + *dl_pipe = 0; + } + break; + + default: + hif_err("%s: Err : Invalid service (%d)", + __func__, svc); + status = QDF_STATUS_E_INVAL; + break; + } + return status; +} + +/** hif_dev_setup_device() - Setup device specific stuff here required for hif + * @pdev : HIF layer object + * + * return 0 on success, error otherwise + */ +int hif_dev_setup_device(struct hif_sdio_device *pdev) +{ + int status = 0; + uint32_t blocksizes[MAILBOX_COUNT]; + + status = hif_configure_device(NULL, pdev->HIFDevice, + HIF_DEVICE_GET_FIFO_ADDR, + &pdev->MailBoxInfo, + sizeof(pdev->MailBoxInfo)); + + if (status != QDF_STATUS_SUCCESS) + hif_err("%s: HIF_DEVICE_GET_MBOX_ADDR failed", __func__); + + status = hif_configure_device(NULL, pdev->HIFDevice, + HIF_DEVICE_GET_BLOCK_SIZE, + blocksizes, sizeof(blocksizes)); + if (status != QDF_STATUS_SUCCESS) + hif_err("%s: HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail", __func__); + + pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE]; + + return status; +} + +/** hif_dev_mask_interrupts() - Disable the interrupts in the device + * @pdev SDIO HIF Object + * + * Return: NONE + */ +void hif_dev_mask_interrupts(struct hif_sdio_device *pdev) +{ + int status = QDF_STATUS_SUCCESS; + + HIF_ENTER(); + /* Disable all interrupts */ + LOCK_HIF_DEV(pdev); + mboxEnaRegs(pdev).int_status_enable = 0; + mboxEnaRegs(pdev).cpu_int_status_enable = 0; + mboxEnaRegs(pdev).error_status_enable = 0; + mboxEnaRegs(pdev).counter_int_status_enable = 0; + UNLOCK_HIF_DEV(pdev); + + /* always synchronous */ + status = hif_read_write(pdev->HIFDevice, + INT_STATUS_ENABLE_ADDRESS, + (char *)&mboxEnaRegs(pdev), + sizeof(struct MBOX_IRQ_ENABLE_REGISTERS), + HIF_WR_SYNC_BYTE_INC, NULL); + + if (status != QDF_STATUS_SUCCESS) + hif_err("%s: Err updating intr reg: %d", __func__, status); +} + +/** hif_dev_unmask_interrupts() - Enable the interrupts in the device + * @pdev SDIO HIF Object + * + * Return: NONE + */ +void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + LOCK_HIF_DEV(pdev); + + /* Enable all the interrupts except for the internal + * AR6000 CPU interrupt + */ + mboxEnaRegs(pdev).int_status_enable = + INT_STATUS_ENABLE_ERROR_SET(0x01) | + INT_STATUS_ENABLE_CPU_SET(0x01) + | INT_STATUS_ENABLE_COUNTER_SET(0x01); + + /* enable 2 mboxs INT */ + mboxEnaRegs(pdev).int_status_enable |= + INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) | + INT_STATUS_ENABLE_MBOX_DATA_SET(0x02); + + /* Set up the CPU Interrupt Status Register, enable + * CPU sourced interrupt #0, #1. + * #0 is used for report assertion from target + * #1 is used for inform host that credit arrived + */ + mboxEnaRegs(pdev).cpu_int_status_enable = 0x03; + + /* Set up the Error Interrupt Status Register */ + mboxEnaRegs(pdev).error_status_enable = + (ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) + | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16; + + /* Set up the Counter Interrupt Status Register + * (only for debug interrupt to catch fatal errors) + */ + mboxEnaRegs(pdev).counter_int_status_enable = + (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24; + + UNLOCK_HIF_DEV(pdev); + + /* always synchronous */ + status = hif_read_write(pdev->HIFDevice, + INT_STATUS_ENABLE_ADDRESS, + (char *)&mboxEnaRegs(pdev), + sizeof(struct MBOX_IRQ_ENABLE_REGISTERS), + HIF_WR_SYNC_BYTE_INC, + NULL); + + if (status != QDF_STATUS_SUCCESS) + hif_err("%s: Err updating intr reg: %d", __func__, status); +} + +void hif_dev_dump_registers(struct hif_sdio_device *pdev, + struct MBOX_IRQ_PROC_REGISTERS *irq_proc, + struct MBOX_IRQ_ENABLE_REGISTERS *irq_en, + struct MBOX_COUNTER_REGISTERS *mbox_regs) +{ + int i = 0; + + hif_debug("%s: Mailbox registers:", __func__); + + if (irq_proc) { + hif_debug("HostIntStatus: 0x%x ", irq_proc->host_int_status); + hif_debug("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status); + hif_debug("ErrorIntStatus: 0x%x ", irq_proc->error_int_status); + hif_debug("CounterIntStat: 0x%x ", + irq_proc->counter_int_status); + hif_debug("MboxFrame: 0x%x ", irq_proc->mbox_frame); + hif_debug("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid); + hif_debug("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]); + hif_debug("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]); + hif_debug("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]); + hif_debug("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]); + + if (pdev->MailBoxInfo.gmbox_address != 0) { + hif_debug("GMBOX-HostIntStatus2: 0x%x ", + irq_proc->host_int_status2); + hif_debug("GMBOX-RX-Avail: 0x%x ", + irq_proc->gmbox_rx_avail); + } + } + + if (irq_en) { + hif_debug("IntStatusEnable: 0x%x\n", + irq_en->int_status_enable); + hif_debug("CounterIntStatus: 0x%x\n", + irq_en->counter_int_status_enable); + } + + for (i = 0; mbox_regs && i < 4; i++) + hif_debug("Counter[%d]: 0x%x\n", i, mbox_regs->counter[i]); +} + +/* under HL SDIO, with Interface Memory support, we have + * the following reasons to support 2 mboxs: + * a) we need place different buffers in different + * mempool, for example, data using Interface Memory, + * desc and other using DRAM, they need different SDIO + * mbox channels. + * b) currently, tx mempool in LL case is separated from + * main mempool, the structure (descs at the beginning + * of every pool buffer) is different, because they only + * need store tx desc from host. To align with LL case, + * we also need 2 mbox support just as PCIe LL cases. + */ + +/** + * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox. + * @pdev: The pointer to the hif device object + * @pipeid: pipe index + * + * Return: mailbox index + */ +static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev, + uint8_t pipeid) +{ + if (2 == pipeid || 3 == pipeid) + return 1; + else if (0 == pipeid || 1 == pipeid) + return 0; + + hif_err("%s: pipeid=%d invalid", __func__, pipeid); + + qdf_assert(0); + + return INVALID_MAILBOX_NUMBER; +} + +/** + * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe. + * @pdev: The pointer to the hif device object + * @mboxIndex: mailbox index + * @upload: boolean to decide mailbox index + * + * Return: Invalid pipe index + */ +static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev, + uint8_t mbox_index, bool upload) +{ + if (mbox_index == 0) + return upload ? 1 : 0; + else if (mbox_index == 1) + return upload ? 3 : 2; + + hif_err("%s: mbox_index=%d, upload=%d invalid", + __func__, mbox_index, upload); + + qdf_assert(0); + + return INVALID_MAILBOX_NUMBER; /* invalid pipe id */ +} + +/** + * hif_get_send_addr() - Get the transfer pipe address + * @pdev: The pointer to the hif device object + * @pipe: The pipe identifier + * + * Return 0 for success and non-zero for failure to map + */ +int hif_get_send_address(struct hif_sdio_device *pdev, + uint8_t pipe, unsigned long *addr) +{ + uint8_t mbox_index = INVALID_MAILBOX_NUMBER; + + if (!addr) + return -EINVAL; + + mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe); + + if (mbox_index == INVALID_MAILBOX_NUMBER) + return -EINVAL; + + *addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address; + + return 0; +} + +/** + * hif_fixup_write_param() - Tweak the address and length parameters + * @pdev: The pointer to the hif device object + * @length: The length pointer + * @addr: The addr pointer + * + * Return: None + */ +void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req, + uint32_t *length, uint32_t *addr) +{ + struct hif_device_mbox_info mboxinfo; + uint32_t taddr = *addr, mboxlen = 0; + + hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR, + &mboxinfo, sizeof(mboxinfo)); + + if (taddr >= 0x800 && taddr < 0xC00) { + /* Host control register and CIS Window */ + mboxlen = 0; + } else if (taddr == mboxinfo.mbox_addresses[0] || + taddr == mboxinfo.mbox_addresses[1] || + taddr == mboxinfo.mbox_addresses[2] || + taddr == mboxinfo.mbox_addresses[3]) { + mboxlen = HIF_MBOX_WIDTH; + } else if (taddr == mboxinfo.mbox_prop[0].extended_address) { + mboxlen = mboxinfo.mbox_prop[0].extended_size; + } else if (taddr == mboxinfo.mbox_prop[1].extended_address) { + mboxlen = mboxinfo.mbox_prop[1].extended_size; + } else { + hif_err("%s: Invalid write addr: 0x%08x\n", __func__, taddr); + return; + } + + if (mboxlen != 0) { + if (*length > mboxlen) { + hif_err("%s: Error (%u > %u)", + __func__, *length, mboxlen); + return; + } + + taddr = taddr + (mboxlen - *length); + taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16); + *addr = taddr; + } +} + +/** + * hif_dev_recv_packet() - Receieve HTC packet/packet information from device + * @pdev : HIF device object + * @packet : The HTC packet pointer + * @recv_length : The length of information to be received + * @mbox_index : The mailbox that contains this information + * + * Return 0 for success and non zero of error + */ +static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev, + HTC_PACKET *packet, + uint32_t recv_length, + uint32_t mbox_index) +{ + QDF_STATUS status; + uint32_t padded_length; + bool sync = (packet->Completion) ? false : true; + uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX; + + /* adjust the length to be a multiple of block size if appropriate */ + padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length); + + if (padded_length > packet->BufferLength) { + hif_err("%s: No space for padlen:%d recvlen:%d bufferlen:%d", + __func__, padded_length, + recv_length, packet->BufferLength); + if (packet->Completion) { + COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL); + return QDF_STATUS_SUCCESS; + } + return QDF_STATUS_E_INVAL; + } + + /* mailbox index is saved in Endpoint member */ + hif_debug("%s : hdr:0x%x, len:%d, padded length: %d Mbox:0x%x", + __func__, packet->PktInfo.AsRx.ExpectedHdr, recv_length, + padded_length, mbox_index); + + status = hif_read_write(pdev->HIFDevice, + pdev->MailBoxInfo.mbox_addresses[mbox_index], + packet->pBuffer, + padded_length, + req, sync ? NULL : packet); + + if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING) + hif_err("%s : Failed %d", __func__, status); + + if (sync) { + packet->Status = status; + if (status == QDF_STATUS_SUCCESS) { + HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer; + + hif_debug("%s:EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X\n", + __func__, + hdr->EndpointID, hdr->PayloadLen, + hdr->Flags, hdr->ControlBytes0, + hdr->ControlBytes1); + } + } + + return status; +} + +static QDF_STATUS hif_dev_issue_recv_packet_bundle +( + struct hif_sdio_device *pdev, + HTC_PACKET_QUEUE *recv_pkt_queue, + HTC_PACKET_QUEUE *sync_completion_queue, + uint8_t mail_box_index, + int *num_packets_fetched, + bool partial_bundle +) +{ + uint32_t padded_length; + int i, total_length = 0; + HTC_TARGET *target = NULL; + int bundleSpaceRemaining = 0; + unsigned char *bundle_buffer = NULL; + HTC_PACKET *packet, *packet_rx_bundle; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + target = (HTC_TARGET *)pdev->pTarget; + + if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) - + HTC_MAX_MSG_PER_BUNDLE_RX) > 0) { + partial_bundle = true; + hif_warn("%s, partial bundle detected num: %d, %d\n", + __func__, + HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue), + HTC_MAX_MSG_PER_BUNDLE_RX); + } + + bundleSpaceRemaining = + HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize; + packet_rx_bundle = allocate_htc_bundle_packet(target); + if (!packet_rx_bundle) { + hif_err("%s: packet_rx_bundle is NULL\n", __func__); + qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME); /* 100 msec sleep */ + return QDF_STATUS_E_NOMEM; + } + bundle_buffer = packet_rx_bundle->pBuffer; + + for (i = 0; + !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX; + i++) { + packet = htc_packet_dequeue(recv_pkt_queue); + A_ASSERT(packet); + if (!packet) + break; + padded_length = + DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength); + if (packet->PktInfo.AsRx.HTCRxFlags & + HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK) + padded_length += HIF_BLOCK_SIZE; + if ((bundleSpaceRemaining - padded_length) < 0) { + /* exceeds what we can transfer, put the packet back */ + HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet); + break; + } + bundleSpaceRemaining -= padded_length; + + if (partial_bundle || + HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) { + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_IGNORE_LOOKAHEAD; + } + packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE; + + if (sync_completion_queue) + HTC_PACKET_ENQUEUE(sync_completion_queue, packet); + + total_length += padded_length; + } +#if DEBUG_BUNDLE + qdf_print("Recv bundle count %d, length %d.", + sync_completion_queue ? + HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0, + total_length); +#endif + + status = hif_read_write(pdev->HIFDevice, + pdev->MailBoxInfo. + mbox_addresses[(int)mail_box_index], + bundle_buffer, total_length, + HIF_RD_SYNC_BLOCK_FIX, NULL); + + if (status != QDF_STATUS_SUCCESS) { + hif_err("%s, hif_send Failed status:%d\n", + __func__, status); + } else { + unsigned char *buffer = bundle_buffer; + *num_packets_fetched = i; + if (sync_completion_queue) { + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE( + sync_completion_queue, packet) { + padded_length = + DEV_CALC_RECV_PADDED_LEN(pdev, + packet->ActualLength); + if (packet->PktInfo.AsRx.HTCRxFlags & + HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK) + padded_length += + HIF_BLOCK_SIZE; + A_MEMCPY(packet->pBuffer, + buffer, padded_length); + buffer += padded_length; + } HTC_PACKET_QUEUE_ITERATE_END; + } + } + /* free bundle space under Sync mode */ + free_htc_bundle_packet(target, packet_rx_bundle); + return status; +} + +#define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle +static +QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev, + uint8_t mail_box_index, + uint32_t msg_look_aheads[], + int num_look_aheads, + bool *async_proc, + int *num_pkts_fetched) +{ + int pkts_fetched; + HTC_PACKET *pkt; + HTC_ENDPOINT_ID id; + bool partial_bundle; + int total_fetched = 0; + bool asyncProc = false; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX]; + HTC_PACKET_QUEUE recv_q, sync_comp_q; + QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t, uint8_t); + + hif_debug("%s: NumLookAheads: %d\n", __func__, num_look_aheads); + + if (num_pkts_fetched) + *num_pkts_fetched = 0; + + if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) { + /* We use async mode to get the packets if the + * device layer supports it. The device layer + * interfaces with HIF in which HIF may have + * restrictions on how interrupts are processed + */ + asyncProc = true; + } + + if (async_proc) { + /* indicate to caller how we decided to process this */ + *async_proc = asyncProc; + } + + if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) { + A_ASSERT(false); + return QDF_STATUS_E_PROTO; + } + + A_MEMCPY(look_aheads, msg_look_aheads, + (sizeof(uint32_t)) * num_look_aheads); + while (true) { + /* reset packets queues */ + INIT_HTC_PACKET_QUEUE(&recv_q); + INIT_HTC_PACKET_QUEUE(&sync_comp_q); + if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) { + status = QDF_STATUS_E_PROTO; + A_ASSERT(false); + break; + } + + /* first lookahead sets the expected endpoint IDs for + * all packets in a bundle + */ + id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID; + + if (id >= ENDPOINT_MAX) { + hif_err("%s: Invalid Endpoint in lookahead: %d\n", + __func__, id); + status = QDF_STATUS_E_PROTO; + break; + } + /* try to allocate as many HTC RX packets indicated + * by the lookaheads these packets are stored + * in the recvPkt queue + */ + status = hif_dev_alloc_and_prepare_rx_packets(pdev, + look_aheads, + num_look_aheads, + &recv_q); + if (QDF_IS_STATUS_ERROR(status)) + break; + total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q); + + /* we've got packet buffers for all we can currently fetch, + * this count is not valid anymore + */ + num_look_aheads = 0; + partial_bundle = false; + + /* now go fetch the list of HTC packets */ + while (!HTC_QUEUE_EMPTY(&recv_q)) { + pkts_fetched = 0; + if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) { + /* there are enough packets to attempt a bundle + * transfer and recv bundling is allowed + */ + status = ISSUE_BUNDLE(pdev, + &recv_q, + asyncProc ? NULL : + &sync_comp_q, + mail_box_index, + &pkts_fetched, + partial_bundle); + if (QDF_IS_STATUS_ERROR(status)) { + hif_dev_free_recv_pkt_queue( + &recv_q); + break; + } + + if (HTC_PACKET_QUEUE_DEPTH(&recv_q) != + 0) { + /* we couldn't fetch all packets at one, + * time this creates a broken + * bundle + */ + partial_bundle = true; + } + } + + /* see if the previous operation fetched any + * packets using bundling + */ + if (pkts_fetched == 0) { + /* dequeue one packet */ + pkt = htc_packet_dequeue(&recv_q); + A_ASSERT(pkt); + if (!pkt) + break; + + pkt->Completion = NULL; + + if (HTC_PACKET_QUEUE_DEPTH(&recv_q) > + 0) { + /* lookaheads in all packets except the + * last one in must be ignored + */ + pkt->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_IGNORE_LOOKAHEAD; + } + + /* go fetch the packet */ + status = + hif_dev_recv_packet(pdev, pkt, + pkt->ActualLength, + mail_box_index); + while (QDF_IS_STATUS_ERROR(status) && + !HTC_QUEUE_EMPTY(&recv_q)) { + qdf_nbuf_t nbuf; + + pkt = htc_packet_dequeue(&recv_q); + if (!pkt) + break; + nbuf = pkt->pNetBufContext; + if (nbuf) + qdf_nbuf_free(nbuf); + } + + if (QDF_IS_STATUS_ERROR(status)) + break; + /* sent synchronously, queue this packet for + * synchronous completion + */ + HTC_PACKET_ENQUEUE(&sync_comp_q, pkt); + } + } + + /* synchronous handling */ + if (pdev->DSRCanYield) { + /* for the SYNC case, increment count that tracks + * when the DSR should yield + */ + pdev->CurrentDSRRecvCount++; + } + + /* in the sync case, all packet buffers are now filled, + * we can process each packet, check lookahead , then repeat + */ + rxCompletion = pdev->hif_callbacks.rxCompletionHandler; + + /* unload sync completion queue */ + while (!HTC_QUEUE_EMPTY(&sync_comp_q)) { + uint8_t pipeid; + qdf_nbuf_t netbuf; + + pkt = htc_packet_dequeue(&sync_comp_q); + A_ASSERT(pkt); + if (!pkt) + break; + + num_look_aheads = 0; + status = hif_dev_process_recv_header(pdev, pkt, + look_aheads, + &num_look_aheads); + if (QDF_IS_STATUS_ERROR(status)) { + HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt); + break; + } + + netbuf = (qdf_nbuf_t)pkt->pNetBufContext; + /* set data length */ + qdf_nbuf_put_tail(netbuf, pkt->ActualLength); + + if (rxCompletion) { + pipeid = + hif_dev_map_mail_box_to_pipe(pdev, + mail_box_index, + true); + rxCompletion(pdev->hif_callbacks.Context, + netbuf, pipeid); + } + } + + if (QDF_IS_STATUS_ERROR(status)) { + if (!HTC_QUEUE_EMPTY(&sync_comp_q)) + hif_dev_free_recv_pkt_queue( + &sync_comp_q); + break; + } + + if (num_look_aheads == 0) { + /* no more look aheads */ + break; + } + /* check whether other OS contexts have queued any WMI + * command/data for WLAN. This check is needed only if WLAN + * Tx and Rx happens in same thread context + */ + /* A_CHECK_DRV_TX(); */ + } + if (num_pkts_fetched) + *num_pkts_fetched = total_fetched; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n")); + return status; +} + +/** + * hif_dev_service_cpu_interrupt() - service fatal interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + uint8_t reg_buffer[4]; + uint8_t cpu_int_status; + + cpu_int_status = mboxProcRegs(pdev).cpu_int_status & + mboxEnaRegs(pdev).cpu_int_status_enable; + + hif_err("%s: 0x%x", __func__, (uint32_t)cpu_int_status); + + /* Clear the interrupt */ + mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status; + + /*set up the register transfer buffer to hit the register + * 4 times , this is done to make the access 4-byte aligned + * to mitigate issues with host bus interconnects that + * restrict bus transfer lengths to be a multiple of 4-bytes + * set W1C value to clear the interrupt, this hits the register + * first + */ + reg_buffer[0] = cpu_int_status; + /* the remaining 4 values are set to zero which have no-effect */ + reg_buffer[1] = 0; + reg_buffer[2] = 0; + reg_buffer[3] = 0; + + status = hif_read_write(pdev->HIFDevice, + CPU_INT_STATUS_ADDRESS, + reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL); + + A_ASSERT(status == QDF_STATUS_SUCCESS); + + /* The Interrupt sent to the Host is generated via bit0 + * of CPU INT register + */ + if (cpu_int_status & 0x1) { + if (pdev->hif_callbacks.fwEventHandler) + /* It calls into HTC which propagates this + * to ol_target_failure() + */ + pdev->hif_callbacks.fwEventHandler( + pdev->hif_callbacks.Context, + QDF_STATUS_E_FAILURE); + } else { + hif_err("%s: Unrecognized CPU event", __func__); + } + + return status; +} + +/** + * hif_dev_service_error_interrupt() - service error interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + uint8_t reg_buffer[4]; + uint8_t error_int_status = 0; + + error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F; + hif_err("%s: 0x%x", __func__, error_int_status); + + if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status)) + hif_err("%s: Error : Wakeup", __func__); + + if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status)) + hif_err("%s: Error : Rx Underflow", __func__); + + if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status)) + hif_err("%s: Error : Tx Overflow", __func__); + + /* Clear the interrupt */ + mboxProcRegs(pdev).error_int_status &= ~error_int_status; + + /* set up the register transfer buffer to hit the register + * 4 times , this is done to make the access 4-byte + * aligned to mitigate issues with host bus interconnects that + * restrict bus transfer lengths to be a multiple of 4-bytes + */ + + /* set W1C value to clear the interrupt */ + reg_buffer[0] = error_int_status; + /* the remaining 4 values are set to zero which have no-effect */ + reg_buffer[1] = 0; + reg_buffer[2] = 0; + reg_buffer[3] = 0; + + status = hif_read_write(pdev->HIFDevice, + ERROR_INT_STATUS_ADDRESS, + reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL); + + A_ASSERT(status == QDF_STATUS_SUCCESS); + return status; +} + +/** + * hif_dev_service_debug_interrupt() - service debug interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev) +{ + uint32_t dummy; + QDF_STATUS status; + + /* Send a target failure event to the application */ + hif_err("%s: Target debug interrupt", __func__); + + /* clear the interrupt , the debug error interrupt is counter 0 + * read counter to clear interrupt + */ + status = hif_read_write(pdev->HIFDevice, + COUNT_DEC_ADDRESS, + (uint8_t *)&dummy, + 4, HIF_RD_SYNC_BYTE_INC, NULL); + + A_ASSERT(status == QDF_STATUS_SUCCESS); + return status; +} + +/** + * hif_dev_service_counter_interrupt() - service counter interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static +QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev) +{ + uint8_t counter_int_status; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n")); + + counter_int_status = mboxProcRegs(pdev).counter_int_status & + mboxEnaRegs(pdev).counter_int_status_enable; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n", + counter_int_status)); + + /* Check if the debug interrupt is pending + * NOTE: other modules like GMBOX may use the counter interrupt + * for credit flow control on other counters, we only need to + * check for the debug assertion counter interrupt + */ + if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) + return hif_dev_service_debug_interrupt(pdev); + + return QDF_STATUS_SUCCESS; +} + +#define RX_LOOAHEAD_GET(pdev, i) \ + mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i] +/** + * hif_dev_process_pending_irqs() - process pending interrupts + * @pDev: hif sdio device context + * @pDone: pending irq completion status + * @pASyncProcessing: sync/async processing flag + * + * Return: QDF_STATUS_SUCCESS for success + */ +QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev, + bool *done, + bool *async_processing) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t host_int_status = 0; + uint32_t l_ahead[MAILBOX_USED_COUNT]; + int i; + + qdf_mem_zero(&l_ahead, sizeof(l_ahead)); + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("+ProcessPendingIRQs: (dev: 0x%lX)\n", + (unsigned long)pdev)); + + /* NOTE: the HIF implementation guarantees that the context + * of this call allows us to perform SYNCHRONOUS I/O, + * that is we can block, sleep or call any API that + * can block or switch thread/task ontexts. + * This is a fully schedulable context. + */ + do { + if (mboxEnaRegs(pdev).int_status_enable == 0) { + /* interrupt enables have been cleared, do not try + * to process any pending interrupts that + * may result in more bus transactions. + * The target may be unresponsive at this point. + */ + break; + } + status = hif_read_write(pdev->HIFDevice, + HOST_INT_STATUS_ADDRESS, + (uint8_t *)&mboxProcRegs(pdev), + sizeof(mboxProcRegs(pdev)), + HIF_RD_SYNC_BYTE_INC, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) { + hif_dev_dump_registers(pdev, + &mboxProcRegs(pdev), + &mboxEnaRegs(pdev), + &mboxCountRegs(pdev)); + } + + /* Update only those registers that are enabled */ + host_int_status = mboxProcRegs(pdev).host_int_status + & mboxEnaRegs(pdev).int_status_enable; + + /* only look at mailbox status if the HIF layer did not + * provide this function, on some HIF interfaces reading + * the RX lookahead is not valid to do + */ + for (i = 0; i < MAILBOX_USED_COUNT; i++) { + l_ahead[i] = 0; + if (host_int_status & (1 << i)) { + /* mask out pending mailbox value, we use + * "lookAhead" as the real flag for + * mailbox processing below + */ + host_int_status &= ~(1 << i); + if (mboxProcRegs(pdev). + rx_lookahead_valid & (1 << i)) { + /* mailbox has a message and the + * look ahead is valid + */ + l_ahead[i] = RX_LOOAHEAD_GET(pdev, i); + } + } + } /*end of for loop */ + } while (false); + + do { + bool bLookAheadValid = false; + /* did the interrupt status fetches succeed? */ + if (QDF_IS_STATUS_ERROR(status)) + break; + + for (i = 0; i < MAILBOX_USED_COUNT; i++) { + if (l_ahead[i] != 0) { + bLookAheadValid = true; + break; + } + } + + if ((host_int_status == 0) && !bLookAheadValid) { + /* nothing to process, the caller can use this + * to break out of a loop + */ + *done = true; + break; + } + + if (bLookAheadValid) { + for (i = 0; i < MAILBOX_USED_COUNT; i++) { + int fetched = 0; + + if (l_ahead[i] == 0) + continue; + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("mbox[%d],lookahead:0x%X\n", + i, l_ahead[i])); + /* Mailbox Interrupt, the HTC layer may issue + * async requests to empty the mailbox... + * When emptying the recv mailbox we use the + * async handler from the completion routine of + * routine of the callers read request. + * This can improve performance by reducing + * the context switching when we rapidly + * pull packets + */ + status = hif_dev_recv_message_pending_handler( + pdev, i, + &l_ahead + [i], 1, + async_processing, + &fetched); + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (!fetched) { + /* HTC could not pull any messages out + * due to lack of resources force DSR + * handle to ack the interrupt + */ + *async_processing = false; + pdev->RecheckIRQStatusCnt = 0; + } + } + } + + /* now handle the rest of them */ + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Valid source for OTHER interrupts: 0x%x\n", + host_int_status)); + + if (HOST_INT_STATUS_CPU_GET(host_int_status)) { + /* CPU Interrupt */ + status = hif_dev_service_cpu_interrupt(pdev); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + if (HOST_INT_STATUS_ERROR_GET(host_int_status)) { + /* Error Interrupt */ + status = hif_dev_service_error_interrupt(pdev); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) { + /* Counter Interrupt */ + status = hif_dev_service_counter_interrupt(pdev); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + } while (false); + + /* an optimization to bypass reading the IRQ status registers + * unecessarily which can re-wake the target, if upper layers + * determine that we are in a low-throughput mode, we can + * rely on taking another interrupt rather than re-checking + * the status registers which can re-wake the target. + * + * NOTE : for host interfaces that use the special + * GetPendingEventsFunc, this optimization cannot be used due to + * possible side-effects. For example, SPI requires the host + * to drain all messages from the mailbox before exiting + * the ISR routine. + */ + if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) { + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Bypass IRQ Status re-check, forcing done\n")); + *done = true; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n", + *done, *async_processing, status)); + + return status; +} + +#define DEV_CHECK_RECV_YIELD(pdev) \ + ((pdev)->CurrentDSRRecvCount >= \ + (pdev)->HifIRQYieldParams.recv_packet_yield_count) +/** + * hif_dev_dsr_handler() - Synchronous interrupt handler + * + * @context: hif send context + * + * Return: 0 for success and non-zero for failure + */ +QDF_STATUS hif_dev_dsr_handler(void *context) +{ + struct hif_sdio_device *pdev = (struct hif_sdio_device *)context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + bool done = false; + bool async_proc = false; + + /* reset the recv counter that tracks when we need + * to yield from the DSR + */ + pdev->CurrentDSRRecvCount = 0; + /* reset counter used to flag a re-scan of IRQ + * status registers on the target + */ + pdev->RecheckIRQStatusCnt = 0; + + while (!done) { + status = hif_dev_process_pending_irqs(pdev, &done, &async_proc); + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) { + /* the HIF layer does not allow async IRQ processing, + * override the asyncProc flag + */ + async_proc = false; + /* this will cause us to re-enter ProcessPendingIRQ() + * and re-read interrupt status registers. + * This has a nice side effect of blocking us until all + * async read requests are completed. This behavior is + * required as we do not allow ASYNC processing + * in interrupt handlers (like Windows CE) + */ + + if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev)) + /* ProcessPendingIRQs() pulled enough recv + * messages to satisfy the yield count, stop + * checking for more messages and return + */ + break; + } + + if (async_proc) { + /* the function does some async I/O for performance, + * we need to exit the ISR immediately, the check below + * will prevent the interrupt from being + * Ack'd while we handle it asynchronously + */ + break; + } + } + + if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) { + /* Ack the interrupt only if : + * 1. we did not get any errors in processing interrupts + * 2. there are no outstanding async processing requests + */ + if (pdev->DSRCanYield) { + /* if the DSR can yield do not ACK the interrupt, there + * could be more pending messages. The HIF layer + * must ACK the interrupt on behalf of HTC + */ + hif_info("%s: Yield (RX count: %d)", + __func__, pdev->CurrentDSRRecvCount); + } else { + hif_ack_interrupt(pdev->HIFDevice); + } + } + + return status; +} + +/** + * hif_read_write() - queue a read/write request + * @device: pointer to hif device structure + * @address: address to read + * @buffer: buffer to hold read/write data + * @length: length to read/write + * @request: read/write/sync/async request + * @context: pointer to hold calling context + * + * Return: 0 on success, error number otherwise. + */ +QDF_STATUS +hif_read_write(struct hif_sdio_dev *device, + unsigned long address, + char *buffer, uint32_t length, + uint32_t request, void *context) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct bus_request *busrequest; + + AR_DEBUG_ASSERT(device); + AR_DEBUG_ASSERT(device->func); + hif_debug("%s: device 0x%pK addr 0x%lX buffer 0x%pK", + __func__, device, address, buffer); + hif_debug("%s: len %d req 0x%X context 0x%pK", + __func__, length, request, context); + + /*sdio r/w action is not needed when suspend, so just return */ + if ((device->is_suspend) && + (device->power_config == HIF_DEVICE_POWER_CUT)) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n")); + return QDF_STATUS_SUCCESS; + } + do { + if ((request & HIF_ASYNCHRONOUS) || + (request & HIF_SYNCHRONOUS)) { + /* serialize all requests through the async thread */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Execution mode: %s\n", __func__, + (request & HIF_ASYNCHRONOUS) ? "Async" + : "Synch")); + busrequest = hif_allocate_bus_request(device); + if (!busrequest) { + hif_err("%s:bus requests unavail", __func__); + hif_err("%s, addr:0x%lX, len:%d", + request & HIF_SDIO_READ ? "READ" : + "WRITE", address, length); + return QDF_STATUS_E_FAILURE; + } + busrequest->address = address; + busrequest->buffer = buffer; + busrequest->length = length; + busrequest->request = request; + busrequest->context = context; + + add_to_async_list(device, busrequest); + + if (request & HIF_SYNCHRONOUS) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: queued sync req: 0x%lX\n", + __func__, + (unsigned long)busrequest)); + + /* wait for completion */ + up(&device->sem_async); + if (down_interruptible(&busrequest->sem_req) == + 0) { + QDF_STATUS status = busrequest->status; + + hif_debug("%s: sync freeing 0x%lX:0x%X", + __func__, + (unsigned long)busrequest, + busrequest->status); + hif_debug("%s: freeing req: 0x%X", + __func__, + (unsigned int)request); + hif_free_bus_request(device, + busrequest); + return status; + } else { + /* interrupted, exit */ + return QDF_STATUS_E_FAILURE; + } + } else { + hif_debug("%s: queued async req: 0x%lX", + __func__, (unsigned long)busrequest); + up(&device->sem_async); + return QDF_STATUS_E_PENDING; + } + } else { + hif_err("%s: Invalid execution mode: 0x%08x", + __func__, (unsigned int)request); + status = QDF_STATUS_E_INVAL; + break; + } + } while (0); + + return status; +} + +/** + * hif_sdio_func_enable() - Handle device enabling as per device + * @device: HIF device object + * @func: function pointer + * + * Return success or failure + */ +static int hif_sdio_func_enable(struct hif_softc *ol_sc, + struct sdio_func *func) +{ + struct hif_sdio_dev *device = get_hif_device(ol_sc, func); + + if (device->is_disabled) { + int ret = 0; + + sdio_claim_host(func); + + ret = hif_sdio_quirk_async_intr(ol_sc, func); + if (ret) { + hif_err("%s: Error setting async intr:%d", + __func__, ret); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + + func->enable_timeout = 100; + ret = sdio_enable_func(func); + if (ret) { + hif_err("%s: Unable to enable function: %d", + __func__, ret); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + + ret = sdio_set_block_size(func, HIF_BLOCK_SIZE); + if (ret) { + hif_err("%s: Unable to set block size 0x%X : %d\n", + __func__, HIF_BLOCK_SIZE, ret); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + + ret = hif_sdio_quirk_mod_strength(ol_sc, func); + if (ret) { + hif_err("%s: Error setting mod strength : %d\n", + __func__, ret); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + + sdio_release_host(func); + } + + return 0; +} + +/** + * __hif_read_write() - sdio read/write wrapper + * @device: pointer to hif device structure + * @address: address to read + * @buffer: buffer to hold read/write data + * @length: length to read/write + * @request: read/write/sync/async request + * @context: pointer to hold calling context + * + * Return: 0 on success, error number otherwise. + */ +static QDF_STATUS +__hif_read_write(struct hif_sdio_dev *device, + uint32_t address, char *buffer, + uint32_t length, uint32_t request, void *context) +{ + uint8_t opcode; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int ret = A_OK; + uint8_t *tbuffer; + bool bounced = false; + + if (!device) { + hif_err("%s: device null!", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!device->func) { + hif_err("%s: func null!", __func__); + return QDF_STATUS_E_INVAL; + } + + hif_debug("%s: addr:0X%06X, len:%08d, %s, %s", __func__, + address, length, + request & HIF_SDIO_READ ? "Read " : "Write", + request & HIF_ASYNCHRONOUS ? "Async" : "Sync "); + + do { + if (request & HIF_EXTENDED_IO) { + //HIF_INFO_HI("%s: Command type: CMD53\n", __func__); + } else { + hif_err("%s: Invalid command type: 0x%08x\n", + __func__, request); + status = QDF_STATUS_E_INVAL; + break; + } + + if (request & HIF_BLOCK_BASIS) { + /* round to whole block length size */ + length = + (length / HIF_BLOCK_SIZE) * + HIF_BLOCK_SIZE; + hif_debug("%s: Block mode (BlockLen: %d)\n", + __func__, length); + } else if (request & HIF_BYTE_BASIS) { + hif_debug("%s: Byte mode (BlockLen: %d)\n", + __func__, length); + } else { + hif_err("%s: Invalid data mode: 0x%08x\n", + __func__, request); + status = QDF_STATUS_E_INVAL; + break; + } + if (request & HIF_SDIO_WRITE) { + hif_fixup_write_param(device, request, + &length, &address); + + hif_debug("addr:%08X, len:0x%08X, dummy:0x%04X\n", + address, length, + (request & HIF_DUMMY_SPACE_MASK) >> 16); + } + + if (request & HIF_FIXED_ADDRESS) { + opcode = CMD53_FIXED_ADDRESS; + hif_debug("%s: Addr mode: fixed 0x%X\n", + __func__, address); + } else if (request & HIF_INCREMENTAL_ADDRESS) { + opcode = CMD53_INCR_ADDRESS; + hif_debug("%s: Address mode: Incremental 0x%X\n", + __func__, address); + } else { + hif_err("%s: Invalid address mode: 0x%08x\n", + __func__, request); + status = QDF_STATUS_E_INVAL; + break; + } + + if (request & HIF_SDIO_WRITE) { +#if HIF_USE_DMA_BOUNCE_BUFFER + if (BUFFER_NEEDS_BOUNCE(buffer)) { + AR_DEBUG_ASSERT(device->dma_buffer); + tbuffer = device->dma_buffer; + /* copy the write data to the dma buffer */ + AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); + if (length > HIF_DMA_BUFFER_SIZE) { + hif_err("%s: Invalid write len: %d\n", + __func__, length); + status = QDF_STATUS_E_INVAL; + break; + } + memcpy(tbuffer, buffer, length); + bounced = true; + } else { + tbuffer = buffer; + } +#else + tbuffer = buffer; +#endif + if (opcode == CMD53_FIXED_ADDRESS && tbuffer) { + ret = sdio_writesb(device->func, address, + tbuffer, length); + hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer); + } else if (tbuffer) { + ret = sdio_memcpy_toio(device->func, address, + tbuffer, length); + hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer); + } + } else if (request & HIF_SDIO_READ) { +#if HIF_USE_DMA_BOUNCE_BUFFER + if (BUFFER_NEEDS_BOUNCE(buffer)) { + AR_DEBUG_ASSERT(device->dma_buffer); + AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); + if (length > HIF_DMA_BUFFER_SIZE) { + hif_err("%s: Invalid read len: %d\n", + __func__, length); + status = QDF_STATUS_E_INVAL; + break; + } + tbuffer = device->dma_buffer; + bounced = true; + } else { + tbuffer = buffer; + } +#else + tbuffer = buffer; +#endif + if (opcode == CMD53_FIXED_ADDRESS && tbuffer) { + ret = sdio_readsb(device->func, tbuffer, + address, length); + hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer); + } else if (tbuffer) { + ret = sdio_memcpy_fromio(device->func, + tbuffer, address, + length); + hif_debug("%s:r=%d addr:0x%X, len:%d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer); + } +#if HIF_USE_DMA_BOUNCE_BUFFER + if (bounced && tbuffer) + memcpy(buffer, tbuffer, length); +#endif + } else { + hif_err("%s: Invalid dir: 0x%08x", __func__, request); + status = QDF_STATUS_E_INVAL; + return status; + } + + if (ret) { + hif_err("%s: SDIO bus operation failed!", __func__); + hif_err("%s: MMC stack returned : %d", __func__, ret); + hif_err("%s: addr:0X%06X, len:%08d, %s, %s", + __func__, address, length, + request & HIF_SDIO_READ ? "Read " : "Write", + request & HIF_ASYNCHRONOUS ? + "Async" : "Sync"); + status = QDF_STATUS_E_FAILURE; + } + } while (false); + + return status; +} + +/** + * async_task() - thread function to serialize all bus requests + * @param: pointer to hif device + * + * thread function to serialize all requests, both sync and async + * Return: 0 on success, error number otherwise. + */ +static int async_task(void *param) +{ + struct hif_sdio_dev *device; + struct bus_request *request; + QDF_STATUS status; + bool claimed = false; + + device = (struct hif_sdio_dev *)param; + set_current_state(TASK_INTERRUPTIBLE); + while (!device->async_shutdown) { + /* wait for work */ + if (down_interruptible(&device->sem_async) != 0) { + /* interrupted, exit */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async task interrupted\n", + __func__)); + break; + } + if (device->async_shutdown) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async task stopping\n", + __func__)); + break; + } + /* we want to hold the host over multiple cmds + * if possible, but holding the host blocks + * card interrupts + */ + qdf_spin_lock_irqsave(&device->asynclock); + /* pull the request to work on */ + while (device->asyncreq) { + request = device->asyncreq; + if (request->inusenext) + device->asyncreq = request->inusenext; + else + device->asyncreq = NULL; + qdf_spin_unlock_irqrestore(&device->asynclock); + hif_debug("%s: processing req: 0x%lX", + __func__, (unsigned long)request); + + if (!claimed) { + sdio_claim_host(device->func); + claimed = true; + } + if (request->scatter_req) { + A_ASSERT(device->scatter_enabled); + /* pass the request to scatter routine which + * executes it synchronously, note, no need + * to free the request since scatter requests + * are maintained on a separate list + */ + status = do_hif_read_write_scatter(device, + request); + } else { + /* call hif_read_write in sync mode */ + status = + __hif_read_write(device, + request->address, + request->buffer, + request->length, + request-> + request & + ~HIF_SYNCHRONOUS, + NULL); + if (request->request & HIF_ASYNCHRONOUS) { + void *context = request->context; + + hif_free_bus_request(device, request); + device->htc_callbacks. + rw_compl_handler(context, status); + } else { + hif_debug("%s: upping req: 0x%lX", + __func__, + (unsigned long)request); + request->status = status; + up(&request->sem_req); + } + } + qdf_spin_lock_irqsave(&device->asynclock); + } + qdf_spin_unlock_irqrestore(&device->asynclock); + if (claimed) { + sdio_release_host(device->func); + claimed = false; + } + } + + complete_and_exit(&device->async_completion, 0); + + return 0; +} + +/** + * hif_disable_func() - Disable SDIO function + * + * @device: HIF device pointer + * @func: SDIO function pointer + * @reset: If this is called from resume or probe + * + * Return: 0 in case of success, else error value + */ +QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, + struct sdio_func *func, + bool reset) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_ENTER(); + if (!IS_ERR(device->async_task)) { + init_completion(&device->async_completion); + device->async_shutdown = 1; + up(&device->sem_async); + wait_for_completion(&device->async_completion); + device->async_task = NULL; + sema_init(&device->sem_async, 0); + } + + status = hif_sdio_func_disable(device, func, reset); + if (status == QDF_STATUS_SUCCESS) + device->is_disabled = true; + + cleanup_hif_scatter_resources(device); + + HIF_EXIT(); + + return status; +} + +/** + * hif_enable_func() - Enable SDIO function + * + * @ol_sc: HIF object pointer + * @device: HIF device pointer + * @sdio_func: SDIO function pointer + * @resume: If this is called from resume or probe + * + * Return: 0 in case of success, else error value + */ +QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device, + struct sdio_func *func, bool resume) +{ + int ret = QDF_STATUS_SUCCESS; + + HIF_ENTER(); + + if (!device) { + hif_err("%s: HIF device is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + if (hif_sdio_func_enable(ol_sc, func)) + return QDF_STATUS_E_FAILURE; + + /* create async I/O thread */ + if (!device->async_task && device->is_disabled) { + device->async_shutdown = 0; + device->async_task = kthread_create(async_task, + (void *)device, + "AR6K Async"); + if (IS_ERR(device->async_task)) { + hif_err("%s: Error creating async task", + __func__); + return QDF_STATUS_E_FAILURE; + } + device->is_disabled = false; + wake_up_process(device->async_task); + } + + if (!resume) + ret = hif_sdio_probe(ol_sc, func, device); + + HIF_EXIT(); + + return ret; +} +#endif /* CONFIG_SDIO_TRANSFER_MAILBOX */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.h new file mode 100644 index 0000000000000000000000000000000000000000..3e5913215241fec47eda0b056a0960562efd20d0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/mailbox.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved. + * + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef _MAILBOX_H_ +#define _MAILBOX_H__ + +#include "a_debug.h" +#include "hif_sdio_dev.h" +#include "htc_packet.h" +#include "htc_api.h" +#include "hif_internal.h" + +#define INVALID_MAILBOX_NUMBER 0xFF + +#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \ + INT_STATUS_ENABLE_CPU_MASK | \ + INT_STATUS_ENABLE_COUNTER_MASK) + +/* HTC operational parameters */ +#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ +#define HTC_TARGET_DEBUG_INTR_MASK 0x01 +#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 + +#define MAILBOX_COUNT 4 +#define MAILBOX_FOR_BLOCK_SIZE 1 +#define MAILBOX_USED_COUNT 2 +#if defined(SDIO_3_0) +#define MAILBOX_LOOKAHEAD_SIZE_IN_WORD 2 +#else +#define MAILBOX_LOOKAHEAD_SIZE_IN_WORD 1 +#endif +#define AR6K_TARGET_DEBUG_INTR_MASK 0x01 + +/* Mailbox address in SDIO address space */ +#if defined(SDIO_3_0) +#define HIF_MBOX_BASE_ADDR 0x1000 +#define HIF_MBOX_DUMMY_WIDTH 0x800 +#else +#define HIF_MBOX_BASE_ADDR 0x800 +#define HIF_MBOX_DUMMY_WIDTH 0 +#endif + +#define HIF_MBOX_WIDTH 0x800 + +#define HIF_MBOX_START_ADDR(mbox) \ + (HIF_MBOX_BASE_ADDR + mbox * (HIF_MBOX_WIDTH + HIF_MBOX_DUMMY_WIDTH)) + +#define HIF_MBOX_END_ADDR(mbox) \ + (HIF_MBOX_START_ADDR(mbox) + HIF_MBOX_WIDTH - 1) + +/* extended MBOX address for larger MBOX writes to MBOX 0*/ +#if defined(SDIO_3_0) +#define HIF_MBOX0_EXTENDED_BASE_ADDR 0x5000 +#else +#define HIF_MBOX0_EXTENDED_BASE_ADDR 0x2800 +#endif +#define HIF_MBOX0_EXTENDED_WIDTH_AR6002 (6 * 1024) +#define HIF_MBOX0_EXTENDED_WIDTH_AR6003 (18 * 1024) + +/* version 1 of the chip has only a 12K extended mbox range */ +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1 0x4000 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1 (12 * 1024) + +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004 0x2800 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6004 (18 * 1024) + +#if defined(SDIO_3_0) +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320 0x5000 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6320 (36 * 1024) +#define HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0 (56 * 1024) +#define HIF_MBOX1_EXTENDED_WIDTH_AR6320 (36 * 1024) +#define HIF_MBOX_DUMMY_SPACE_SIZE_AR6320 (2 * 1024) +#else +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320 0x2800 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6320 (24 * 1024) +#define HIF_MBOX1_EXTENDED_WIDTH_AR6320 (24 * 1024) +#define HIF_MBOX_DUMMY_SPACE_SIZE_AR6320 0 +#endif + +/* GMBOX addresses */ +#define HIF_GMBOX_BASE_ADDR 0x7000 +#define HIF_GMBOX_WIDTH 0x4000 + +/* for SDIO we recommend a 128-byte block size */ +#if defined(WITH_BACKPORTS) +#define HIF_DEFAULT_IO_BLOCK_SIZE 128 +#else +#define HIF_DEFAULT_IO_BLOCK_SIZE 256 +#endif + +#define FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868 +#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF +#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000 +/* In SDIO 2.0, asynchronous interrupt is not in SPEC + * requirement, but AR6003 support it, so the register + * is placed in vendor specific field 0xF0(bit0) + * In SDIO 3.0, the register is defined in SPEC, and its + * address is 0x16(bit1) + */ +/* interrupt mode register of AR6003 */ +#define CCCR_SDIO_IRQ_MODE_REG_AR6003 0xF0 +/* mode to enable special 4-bit interrupt assertion without clock */ +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003 (1 << 0) +/* interrupt mode register of AR6320 */ +#define CCCR_SDIO_IRQ_MODE_REG_AR6320 0x16 +/* mode to enable special 4-bit interrupt assertion without clock */ +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320 (1 << 1) + +#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0 +#define CCCR_SDIO_ASYNC_INT_DELAY_LSB 0x06 +#define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0 + +/* Vendor Specific Driver Strength Settings */ +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xf2 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK 0x0e +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08 + +#define HIF_BLOCK_SIZE HIF_DEFAULT_IO_BLOCK_SIZE +#define HIF_MBOX0_BLOCK_SIZE 1 +#define HIF_MBOX1_BLOCK_SIZE HIF_BLOCK_SIZE +#define HIF_MBOX2_BLOCK_SIZE HIF_BLOCK_SIZE +#define HIF_MBOX3_BLOCK_SIZE HIF_BLOCK_SIZE + +/* + * data written into the dummy space will not put into the final mbox FIFO + */ +#define HIF_DUMMY_SPACE_MASK 0xFFFF0000 + +PREPACK struct MBOX_IRQ_PROC_REGISTERS { + uint8_t host_int_status; + uint8_t cpu_int_status; + uint8_t error_int_status; + uint8_t counter_int_status; + uint8_t mbox_frame; + uint8_t rx_lookahead_valid; + uint8_t host_int_status2; + uint8_t gmbox_rx_avail; + uint32_t rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * MAILBOX_COUNT]; + uint32_t int_status_enable; +} POSTPACK; + +PREPACK struct MBOX_IRQ_ENABLE_REGISTERS { + uint8_t int_status_enable; + uint8_t cpu_int_status_enable; + uint8_t error_status_enable; + uint8_t counter_int_status_enable; +} POSTPACK; + +#define TOTAL_CREDIT_COUNTER_CNT 4 + +PREPACK struct MBOX_COUNTER_REGISTERS { + uint32_t counter[TOTAL_CREDIT_COUNTER_CNT]; +} POSTPACK; + +struct devRegisters { + struct MBOX_IRQ_PROC_REGISTERS IrqProcRegisters; + struct MBOX_IRQ_ENABLE_REGISTERS IrqEnableRegisters; + struct MBOX_COUNTER_REGISTERS MailBoxCounterRegisters; +}; + +#define mboxProcRegs(hdev) hdev->devRegisters.IrqProcRegisters +#define mboxEnaRegs(hdev) hdev->devRegisters.IrqEnableRegisters +#define mboxCountRegs(hdev) hdev->devRegisters.MailBoxCounterRegisters + +#define DEV_REGISTERS_SIZE (sizeof(struct MBOX_IRQ_PROC_REGISTERS) + \ + sizeof(struct MBOX_IRQ_ENABLE_REGISTERS) + \ + sizeof(struct MBOX_COUNTER_REGISTERS)) + +void hif_dev_dump_registers(struct hif_sdio_device *pdev, + struct MBOX_IRQ_PROC_REGISTERS *irq_proc, + struct MBOX_IRQ_ENABLE_REGISTERS *irq_en, + struct MBOX_COUNTER_REGISTERS *mbox_regs); +#endif /* _MAILBOX_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/transfer.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/transfer.c new file mode 100644 index 0000000000000000000000000000000000000000..f75680ce06a8ddc5d852e39b539db5667dfe2f53 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/transfer.c @@ -0,0 +1,721 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +#define ATH_MODULE_NAME hif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" +#include "transfer.h" + +/** + * hif_dev_rw_completion_handler() - Completion routine + * for ALL HIF layer async I/O + * @context: hif send context + * @status: completion routine sync/async context + * + * Return: 0 for success and non-zero for failure + */ + +QDF_STATUS hif_dev_rw_completion_handler(void *ctx, QDF_STATUS status) +{ + QDF_STATUS (*txCompHandler)(void *, qdf_nbuf_t, uint32_t, uint32_t); + struct hif_sendContext *sctx = (struct hif_sendContext *)ctx; + struct hif_sdio_device *pdev = sctx->pDev; + unsigned int xfer_id = sctx->transferID; + uint32_t toeplitz_hash_result = 0; + qdf_nbuf_t buf = sctx->netbuf; + + if (sctx->bNewAlloc) + qdf_mem_free(ctx); + else + qdf_nbuf_pull_head(buf, sctx->head_data_len); + + txCompHandler = pdev->hif_callbacks.txCompletionHandler; + if (txCompHandler) { + txCompHandler(pdev->hif_callbacks.Context, buf, + xfer_id, toeplitz_hash_result); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_dev_send_buffer() - send buffer to sdio device + * @pDev: HIf device object + * @xfer_id: transfer id + * @pipe: ul/dl pipe + * @nbytes: no of bytes to transfer + * @buf: pointer to buffer + * + * Return: 0 for success and non-zero for failure + */ +QDF_STATUS hif_dev_send_buffer(struct hif_sdio_device *pdev, uint32_t xfer_id, + uint8_t pipe, uint32_t nbytes, qdf_nbuf_t buf) +{ + QDF_STATUS status; + unsigned char *pData; + struct hif_sendContext *sctx; + uint32_t request = hif_get_send_buffer_flags(pdev); + uint32_t padded_length; + unsigned long addr = 0; + int frag_count = 0, i, count, head_len; + + if (hif_get_send_address(pdev, pipe, &addr)) { + hif_err("%s: Invalid address map for pipe 0x%x", + __func__, pipe); + + return QDF_STATUS_E_INVAL; + } + + padded_length = DEV_CALC_SEND_PADDED_LEN(pdev, nbytes); + A_ASSERT(padded_length - nbytes < HIF_DUMMY_SPACE_MASK + 1); + + request |= ((padded_length - nbytes) << 16); + + frag_count = qdf_nbuf_get_num_frags(buf); + + if (frag_count > 1) { + /* Header data length should be total sending length. + * Subtract internal data length of netbuf + */ + head_len = sizeof(struct hif_sendContext) + + (nbytes - qdf_nbuf_get_frag_len(buf, frag_count - 1)); + } else { + /* + * | hif_sendContext | netbuf->data + */ + head_len = sizeof(struct hif_sendContext); + } + + /* Check whether head room is enough to save extra head data */ + if ((head_len <= qdf_nbuf_headroom(buf)) && + (qdf_nbuf_tailroom(buf) >= (padded_length - nbytes))) { + sctx = (struct hif_sendContext *)qdf_nbuf_push_head(buf, + head_len); + sctx->bNewAlloc = false; + } else { + sctx = (struct hif_sendContext *)qdf_mem_malloc(sizeof(*sctx) + + padded_length); + if (sctx) + sctx->bNewAlloc = true; + else + return QDF_STATUS_E_NOMEM; + } + + sctx->netbuf = buf; + sctx->pDev = pdev; + sctx->transferID = xfer_id; + sctx->head_data_len = head_len; + /* + * Copy data to head part of netbuf or head of allocated buffer. + * if buffer is new allocated, the last buffer should be copied also. + * It assume last fragment is internal buffer of netbuf + * sometime total length of fragments larger than nbytes + */ + pData = (unsigned char *)sctx + sizeof(struct hif_sendContext); + for (i = 0, count = sctx->bNewAlloc ? frag_count : frag_count - 1; + i < count; + i++) { + int frag_len = qdf_nbuf_get_frag_len(buf, i); + unsigned char *frag_addr = qdf_nbuf_get_frag_vaddr(buf, i); + + if (frag_len > nbytes) + frag_len = nbytes; + memcpy(pData, frag_addr, frag_len); + pData += frag_len; + nbytes -= frag_len; + if (nbytes <= 0) + break; + } + + /* Reset pData pointer and sctx out */ + pData = (unsigned char *)sctx + sizeof(struct hif_sendContext); + + status = hif_read_write(pdev->HIFDevice, addr, (char *)pData, + padded_length, request, (void *)sctx); + + if (status == QDF_STATUS_E_PENDING) { + /* + * it will return QDF_STATUS_E_PENDING in native HIF + * implementation, which should be treated as successful + * result here. + */ + status = QDF_STATUS_SUCCESS; + } + + /* release buffer or move back data pointer when failed */ + if (status != QDF_STATUS_SUCCESS) { + if (sctx->bNewAlloc) + qdf_mem_free(sctx); + else + qdf_nbuf_pull_head(buf, head_len); + } + + return status; +} + +/** + * hif_dev_alloc_and_prepare_rx_packets() - Allocate packets for recv frames. + * @pdev : HIF device object + * @look_aheads : Look ahead information on the frames + * @messages : Number of messages + * @queue : Queue to put the allocated frames + * + * Return : QDF_STATUS_SUCCESS on success else error value + */ +QDF_STATUS hif_dev_alloc_and_prepare_rx_packets(struct hif_sdio_device *pdev, + uint32_t look_aheads[], + int messages, + HTC_PACKET_QUEUE *queue) +{ + int i, j; + bool no_recycle; + int num_messages; + HTC_PACKET *packet; + HTC_FRAME_HDR *hdr; + uint32_t full_length; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + /* lock RX while we assemble the packet buffers */ + LOCK_HIF_DEV_RX(pdev); + + for (i = 0; i < messages; i++) { + hdr = (HTC_FRAME_HDR *)&look_aheads[i]; + if (hdr->EndpointID >= ENDPOINT_MAX) { + hif_err("%s: Invalid Endpoint:%d\n", + __func__, hdr->EndpointID); + status = QDF_STATUS_E_INVAL; + break; + } + + if (hdr->PayloadLen > HTC_MAX_PAYLOAD_LENGTH) { + hif_err("%s: Payload length %d exceeds max HTC : %u", + __func__, + hdr->PayloadLen, + (uint32_t)HTC_MAX_PAYLOAD_LENGTH); + status = QDF_STATUS_E_INVAL; + break; + } + + if ((hdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) == 0) { + /* HTC header only indicates 1 message to fetch */ + num_messages = 1; + } else { + /* HTC header indicates that every packet to follow + * has the same padded length so that it can + * be optimally fetched as a full bundle + */ + num_messages = GET_RECV_BUNDLE_COUNT(hdr->Flags); + /* the count doesn't include the starter frame, just + * a count of frames to follow + */ + num_messages++; + + hif_info("%s: HTC header : %u messages in bundle", + __func__, num_messages); + } + + full_length = DEV_CALC_RECV_PADDED_LEN(pdev, + hdr->PayloadLen + + sizeof(HTC_FRAME_HDR)); + + /* get packet buffers for each message, if there was a + * bundle detected in the header, + * use pHdr as a template to fetch all packets in the bundle + */ + for (j = 0; j < num_messages; j++) { + /* reset flag, any packets allocated using the + * RecvAlloc() API cannot be recycled on cleanup, + * they must be explicitly returned + */ + no_recycle = false; + packet = hif_dev_alloc_rx_buffer(pdev); + + if (!packet) { + /* No error, simply need to mark that + * we are waiting for buffers. + */ + pdev->RecvStateFlags |= HTC_RECV_WAIT_BUFFERS; + /* pDev->EpWaitingForBuffers = pEndpoint->Id; */ + status = QDF_STATUS_E_RESOURCES; + break; + } + /* clear flags */ + packet->PktInfo.AsRx.HTCRxFlags = 0; + packet->PktInfo.AsRx.IndicationFlags = 0; + packet->Status = QDF_STATUS_SUCCESS; + + if (no_recycle) { + /* flag that these packets cannot be recycled, + * they have to be returned to the user + */ + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_NO_RECYCLE; + } + /* add packet to queue (also incase we need to + * cleanup down below) + */ + HTC_PACKET_ENQUEUE(queue, packet); + + /* if (HTC_STOPPING(target)) { + * status = QDF_STATUS_E_CANCELED; + * break; + * } + */ + + /* make sure message can fit in the endpoint buffer */ + if (full_length > packet->BufferLength) { + hif_err("%s: Payload Length Error", __func__); + hif_err("%s: header reports payload: %u(%u)", + __func__, hdr->PayloadLen, + full_length); + hif_err("%s: endpoint buffer size: %d\n", + __func__, packet->BufferLength); + status = QDF_STATUS_E_INVAL; + break; + } + + if (j > 0) { + /* for messages fetched in a bundle the expected + * lookahead is unknown as we are only using the + * lookahead of the first packet as a template + * of what to expect for lengths + */ + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_REFRESH_HDR; + /* set it to something invalid */ + packet->PktInfo.AsRx.ExpectedHdr = 0xFFFFFFFF; + } else { + packet->PktInfo.AsRx.ExpectedHdr = + look_aheads[i]; + } + /* set the amount of data to fetch */ + packet->ActualLength = + hdr->PayloadLen + HTC_HDR_LENGTH; + if ((j == (num_messages - 1)) && + ((hdr->Flags) & HTC_FLAGS_RECV_1MORE_BLOCK)) + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK; + packet->Endpoint = hdr->EndpointID; + packet->Completion = NULL; + } + + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + UNLOCK_HIF_DEV_RX(pdev); + + /* for NO RESOURCE error, no need to flush data queue */ + if (QDF_IS_STATUS_ERROR(status) && + (status != QDF_STATUS_E_RESOURCES)) { + while (!HTC_QUEUE_EMPTY(queue)) { + qdf_nbuf_t netbuf; + + packet = htc_packet_dequeue(queue); + if (!packet) + break; + netbuf = (qdf_nbuf_t)packet->pNetBufContext; + if (netbuf) + qdf_nbuf_free(netbuf); + } + } + if (status == QDF_STATUS_E_RESOURCES) + status = QDF_STATUS_SUCCESS; + return status; +} + +/** + * hif_dev_process_trailer() - Process the receive frame trailer + * @pdev : HIF device object + * @buffer : The buffer containing the trailer + * @length : Length of the buffer + * @next_look_aheads : The lookahead that is next + * @num_look_aheads : Number of lookahead information + * @from_endpoint : The endpoint on which the trailer is received + */ +QDF_STATUS hif_dev_process_trailer(struct hif_sdio_device *pdev, + uint8_t *buffer, int length, + uint32_t *next_look_aheads, + int *num_look_aheads, + HTC_ENDPOINT_ID from_endpoint) +{ + int orig_length; + QDF_STATUS status; + uint8_t *record_buf; + uint8_t *orig_buffer; + HTC_RECORD_HDR *record; + HTC_LOOKAHEAD_REPORT *look_ahead; + + hif_debug("%s: length:%d", __func__, length); + + orig_buffer = buffer; + orig_length = length; + status = QDF_STATUS_SUCCESS; + + while (length > 0) { + if (length < sizeof(HTC_RECORD_HDR)) { + status = QDF_STATUS_E_PROTO; + break; + } + /* these are byte aligned structs */ + record = (HTC_RECORD_HDR *)buffer; + length -= sizeof(HTC_RECORD_HDR); + buffer += sizeof(HTC_RECORD_HDR); + + if (record->Length > length) { + /* no room left in buffer for record */ + hif_err("%s: invalid record len: (%u, %u)", + __func__, record->Length, + record->RecordID); + hif_err("%s: buffer has %d bytes left", + __func__, length); + status = QDF_STATUS_E_PROTO; + break; + } + /* start of record follows the header */ + record_buf = buffer; + + switch (record->RecordID) { + case HTC_RECORD_CREDITS: + /* Process in HTC, ignore here */ + break; + case HTC_RECORD_LOOKAHEAD: + A_ASSERT(record->Length >= sizeof(*look_ahead)); + look_ahead = (HTC_LOOKAHEAD_REPORT *)record_buf; + if ((look_ahead->PreValid == + ((~look_ahead->PostValid) & 0xFF)) && + next_look_aheads) { + hif_debug("%s: look_ahead Report", __func__); + hif_debug("%s:prevalid:0x%x, postvalid:0x%x", + __func__, look_ahead->PreValid, + look_ahead->PostValid); + hif_debug("%s:from endpoint %d : %u", + __func__, from_endpoint, + look_ahead->LookAhead0); + /* look ahead bytes are valid, copy them over */ + ((uint8_t *)(&next_look_aheads[0]))[0] = + look_ahead->LookAhead0; + ((uint8_t *)(&next_look_aheads[0]))[1] = + look_ahead->LookAhead1; + ((uint8_t *)(&next_look_aheads[0]))[2] = + look_ahead->LookAhead2; + ((uint8_t *)(&next_look_aheads[0]))[3] = + look_ahead->LookAhead3; + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { + debug_dump_bytes((uint8_t *) + next_look_aheads, 4, + "Next Look Ahead"); + } + /* just one normal lookahead */ + if (num_look_aheads) + *num_look_aheads = 1; + } + break; + case HTC_RECORD_LOOKAHEAD_BUNDLE: + A_ASSERT(record->Length >= + sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT)); + if ((record->Length >= + sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT)) && + next_look_aheads) { + HTC_BUNDLED_LOOKAHEAD_REPORT + *pBundledLookAheadRpt; + int i; + + pBundledLookAheadRpt = + (HTC_BUNDLED_LOOKAHEAD_REPORT *)record_buf; + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { + debug_dump_bytes(record_buf, + record->Length, + "Bundle look_ahead"); + } + + if ((record->Length / + (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT))) + > HTC_MAX_MSG_PER_BUNDLE_RX) { + /* this should never happen, the target + * restricts the number of messages per + * bundle configured by the host + */ + A_ASSERT(false); + status = QDF_STATUS_E_PROTO; + break; + } + for (i = 0; + i < + (int)(record->Length / + (sizeof + (HTC_BUNDLED_LOOKAHEAD_REPORT))); + i++) { + ((uint8_t *)(&next_look_aheads[i]))[0] = + pBundledLookAheadRpt->LookAhead0; + ((uint8_t *)(&next_look_aheads[i]))[1] = + pBundledLookAheadRpt->LookAhead1; + ((uint8_t *)(&next_look_aheads[i]))[2] = + pBundledLookAheadRpt->LookAhead2; + ((uint8_t *)(&next_look_aheads[i]))[3] = + pBundledLookAheadRpt->LookAhead3; + pBundledLookAheadRpt++; + } + if (num_look_aheads) + *num_look_aheads = i; + } + break; + default: + hif_err("%s: HIF unhandled record: id:%u length:%u", + __func__, record->RecordID, record->Length); + break; + } + + if (QDF_IS_STATUS_ERROR(status)) + break; + + /* advance buffer past this record for next time around */ + buffer += record->Length; + length -= record->Length; + } + + if (QDF_IS_STATUS_ERROR(status)) + debug_dump_bytes(orig_buffer, orig_length, + "BAD Recv Trailer"); + + hif_debug("%s: status = %d", __func__, status); + + return status; +} + +/* process a received message (i.e. strip off header, + * process any trailer data). + * note : locks must be released when this function is called + */ +QDF_STATUS hif_dev_process_recv_header(struct hif_sdio_device *pdev, + HTC_PACKET *packet, + uint32_t *next_look_aheads, + int *num_look_aheads) +{ + uint8_t temp; + uint8_t *buf; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint16_t payloadLen; + uint32_t look_ahead, actual_length; + + buf = packet->pBuffer; + actual_length = packet->ActualLength; + + if (num_look_aheads) + *num_look_aheads = 0; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessRecvHeader\n")); + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) + AR_DEBUG_PRINTBUF(buf, packet->ActualLength, "HTC Recv PKT"); + + do { + /* note, we cannot assume the alignment of pBuffer, + * so we use the safe macros to + * retrieve 16 bit fields + */ + payloadLen = HTC_GET_FIELD(buf, HTC_FRAME_HDR, + PAYLOADLEN); + + ((uint8_t *)&look_ahead)[0] = buf[0]; + ((uint8_t *)&look_ahead)[1] = buf[1]; + ((uint8_t *)&look_ahead)[2] = buf[2]; + ((uint8_t *)&look_ahead)[3] = buf[3]; + + if (packet->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_REFRESH_HDR) { + /* refresh expected hdr, since this was unknown + * at the time we grabbed the packets + * as part of a bundle + */ + packet->PktInfo.AsRx.ExpectedHdr = look_ahead; + /* refresh actual length since we now have the + * real header + */ + packet->ActualLength = payloadLen + HTC_HDR_LENGTH; + + /* validate the actual header that was refreshed */ + if (packet->ActualLength > packet->BufferLength) { + hif_err("%s: Bundled RECV Look ahead: 0x%X", + __func__, look_ahead); + hif_err("%s: Invalid HDR payload length(%d)", + __func__, payloadLen); + /* limit this to max buffer just to print out + * some of the buffer + */ + packet->ActualLength = + min(packet->ActualLength, + packet->BufferLength); + status = QDF_STATUS_E_PROTO; + break; + } + + if (packet->Endpoint + != HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID)) { + hif_err("%s: Refreshed HDR EP (%d)", + __func__, + HTC_GET_FIELD(buf, HTC_FRAME_HDR, + ENDPOINTID)); + hif_err("%s: doesn't match expected EP (%d)", + __func__, packet->Endpoint); + status = QDF_STATUS_E_PROTO; + break; + } + } + + if (look_ahead != packet->PktInfo.AsRx.ExpectedHdr) { + /* somehow the lookahead that gave us the full read + * length did not reflect the actual header + * in the pending message + */ + hif_err("%s: lookahead mismatch!", __func__); + hif_err("%s: pPkt:0x%lX flags:0x%X", + __func__, (unsigned long)packet, + packet->PktInfo.AsRx.HTCRxFlags); + hif_err("%s: look_ahead 0x%08X != 0x%08X", + __func__, look_ahead, + packet->PktInfo.AsRx.ExpectedHdr); +#ifdef ATH_DEBUG_MODULE + debug_dump_bytes((uint8_t *)&packet->PktInfo.AsRx. + ExpectedHdr, 4, + "Expected Message look_ahead"); + debug_dump_bytes(buf, sizeof(HTC_FRAME_HDR), + "Current Frame Header"); +#ifdef HTC_CAPTURE_LAST_FRAME + debug_dump_bytes((uint8_t *)&target->LastFrameHdr, + sizeof(HTC_FRAME_HDR), + "Last Frame Header"); + if (target->LastTrailerLength != 0) + debug_dump_bytes(target->LastTrailer, + target->LastTrailerLength, + "Last trailer"); +#endif +#endif + status = QDF_STATUS_E_PROTO; + break; + } + + /* get flags */ + temp = HTC_GET_FIELD(buf, HTC_FRAME_HDR, FLAGS); + + if (temp & HTC_FLAGS_RECV_TRAILER) { + /* this packet has a trailer */ + + /* extract the trailer length in control byte 0 */ + temp = HTC_GET_FIELD(buf, HTC_FRAME_HDR, CONTROLBYTES0); + + if ((temp < sizeof(HTC_RECORD_HDR)) || + (temp > payloadLen)) { + hif_err("%s: invalid header", + __func__); + hif_err("%s: payloadlength should be :%d", + __func__, payloadLen); + hif_err("%s: But control bytes is :%d)", + __func__, temp); + status = QDF_STATUS_E_PROTO; + break; + } + + if (packet->PktInfo.AsRx. + HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { + /* this packet was fetched as part of an HTC + * bundle as the lookahead is not valid. + * Next packet may have already been fetched as + * part of the bundle + */ + next_look_aheads = NULL; + num_look_aheads = NULL; + } + + /* process trailer data that follows HDR and + * application payload + */ + status = + hif_dev_process_trailer(pdev, + (buf + HTC_HDR_LENGTH + + payloadLen - temp), + temp, + next_look_aheads, + num_look_aheads, + packet->Endpoint); + + if (QDF_IS_STATUS_ERROR(status)) + break; + } + } while (false); + + if (QDF_IS_STATUS_ERROR(status)) { + /* dump the whole packet */ + debug_dump_bytes(buf, packet->ActualLength, + "BAD HTC Recv PKT"); + } else { + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { + if (packet->ActualLength > 0) { + AR_DEBUG_PRINTBUF(packet->pBuffer, + packet->ActualLength, + "HTC - Application Msg"); + } + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("-hif_dev_process_recv_header\n")); + return status; +} + +/** + * hif_dev_free_recv_pkt() - Free the allocated recv packets in the queue + * @recv_pkt_queue : The queue that contains the packets to be queued + * + * Return : NONE + */ +void hif_dev_free_recv_pkt_queue(HTC_PACKET_QUEUE *recv_pkt_queue) +{ + HTC_PACKET *packet; + qdf_nbuf_t netbuf; + + while (!HTC_QUEUE_EMPTY(recv_pkt_queue)) { + packet = htc_packet_dequeue(recv_pkt_queue); + if (!packet) + break; + netbuf = (qdf_nbuf_t)packet->pNetBufContext; + if (netbuf) + qdf_nbuf_free(netbuf); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/transfer.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/transfer.h new file mode 100644 index 0000000000000000000000000000000000000000..31950bfeb1a2158eb5fb133cf1d05b4315241308 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/transfer/transfer.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __TRANSFER_H_ +#define __TRANSFER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" + +#if defined(CONFIG_SDIO_TRANSFER_MAILBOX) && defined(CONFIG_SDIO_TRANSFER_ADMA) +#error "-----------------------------------------------" +#error "Error - Both transfer methods cannot be enabled" +#error "-----------------------------------------------" +#endif + +#define NBUF_ALLOC_FAIL_WAIT_TIME 100 +/* high nibble */ +#define BUNDLE_COUNT_HIGH(f) (((f) & 0x0C) << 2) +/* low nibble */ +#define BUNDLE_COUNT_LOW(f) (((f) & 0xF0) >> 4) +#define GET_RECV_BUNDLE_COUNT(f) (BUNDLE_COUNT_HIGH(f) + BUNDLE_COUNT_LOW(f)) + +/* + * Data structure to record required sending context data + */ +struct hif_sendContext { + bool bNewAlloc; + struct hif_sdio_device *pDev; + qdf_nbuf_t netbuf; + unsigned int transferID; + unsigned int head_data_len; +}; + +int hif_get_send_address(struct hif_sdio_device *pdev, + uint8_t pipe, unsigned long *addr); + +QDF_STATUS hif_dev_alloc_and_prepare_rx_packets(struct hif_sdio_device *pdev, + uint32_t look_aheads[], + int messages, + HTC_PACKET_QUEUE *queue); + +QDF_STATUS hif_dev_process_trailer(struct hif_sdio_device *pdev, + uint8_t *buffer, int length, + uint32_t *next_look_aheads, + int *num_look_aheads, + HTC_ENDPOINT_ID from_endpoint); + +void hif_dev_free_recv_pkt_queue(HTC_PACKET_QUEUE *recv_pkt_queue); + +QDF_STATUS hif_dev_process_recv_header(struct hif_sdio_device *pdev, + HTC_PACKET *packet, + uint32_t *next_look_aheads, + int *num_look_aheads); +void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req, + uint32_t *length, uint32_t *addr); + +#ifdef CONFIG_SDIO_TRANSFER_MAILBOX +static inline uint32_t hif_get_send_buffer_flags(struct hif_sdio_device *pdev) +{ + if (pdev) + return (uint32_t)HIF_WR_ASYNC_BLOCK_INC; + + HIF_ERROR("%s: hif obj is null. Not populating xfer flags", __func__); + + return 0; +} + +static inline int hif_sdio_bus_configure(struct hif_softc *hif_sc) +{ + return 0; +} + +#elif defined(CONFIG_SDIO_TRANSFER_ADMA) +static inline uint32_t hif_get_send_buffer_flags(struct hif_sdio_device *pdev) +{ + /* ADAM-TODO */ + return (uint32_t)HIF_WR_ASYNC_BLOCK_FIX; +} + +int hif_sdio_bus_configure(struct hif_softc *hif_sc); +#endif + +#endif /* __TRANSFER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h new file mode 100644 index 0000000000000000000000000000000000000000..5053206fb95b2bfc459db31a5d6ea5dd43469c2a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: hif_io32_snoc.h + * + * snoc specific implementations and configurations + */ + +#ifndef __HIF_IO32_SNOC_H__ +#define __HIF_IO32_SNOC_H__ + +#include "hif.h" +#include "regtable.h" +#include "ce_reg.h" +#include "qdf_atomic.h" +#include "hif_main.h" +#include "hif_debug.h" + +static inline void ce_enable_irq_in_individual_register(struct hif_softc *scn, + int ce_id) +{ + uint32_t offset; + + offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id); + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + hif_write32_mb(scn, scn->mem + offset, 1); +} + +static inline void ce_disable_irq_in_individual_register(struct hif_softc *scn, + int ce_id) +{ + uint32_t offset; + + offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id); + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + hif_write32_mb(scn, scn->mem + offset, 0); + + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + hif_read32_mb(scn, scn->mem + offset); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c new file mode 100644 index 0000000000000000000000000000000000000000..49672a5b8a9997fa35da169265a3507706c50b05 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c @@ -0,0 +1,859 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_ahb.c + * + * c file for ahb specific implementations. + */ + +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_tasklet.h" +#include "if_ahb.h" +#include "if_pci.h" +#include "ahb_api.h" +#include "pci_api.h" +#include "hif_napi.h" +#include "qal_vbus_dev.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) +#define IRQF_DISABLED 0x00000020 +#endif + +#define HIF_IC_CE0_IRQ_OFFSET 4 +#define HIF_IC_MAX_IRQ 52 + +static uint8_t ic_irqnum[HIF_IC_MAX_IRQ]; +/* integrated chip irq names */ +const char *ic_irqname[HIF_IC_MAX_IRQ] = { +"misc-pulse1", +"misc-latch", +"sw-exception", +"watchdog", +"ce0", +"ce1", +"ce2", +"ce3", +"ce4", +"ce5", +"ce6", +"ce7", +"ce8", +"ce9", +"ce10", +"ce11", +"host2wbm-desc-feed", +"host2reo-re-injection", +"host2reo-command", +"host2rxdma-monitor-ring3", +"host2rxdma-monitor-ring2", +"host2rxdma-monitor-ring1", +"reo2ost-exception", +"wbm2host-rx-release", +"reo2host-status", +"reo2host-destination-ring4", +"reo2host-destination-ring3", +"reo2host-destination-ring2", +"reo2host-destination-ring1", +"rxdma2host-monitor-destination-mac3", +"rxdma2host-monitor-destination-mac2", +"rxdma2host-monitor-destination-mac1", +"ppdu-end-interrupts-mac3", +"ppdu-end-interrupts-mac2", +"ppdu-end-interrupts-mac1", +"rxdma2host-monitor-status-ring-mac3", +"rxdma2host-monitor-status-ring-mac2", +"rxdma2host-monitor-status-ring-mac1", +"host2rxdma-host-buf-ring-mac3", +"host2rxdma-host-buf-ring-mac2", +"host2rxdma-host-buf-ring-mac1", +"rxdma2host-destination-ring-mac3", +"rxdma2host-destination-ring-mac2", +"rxdma2host-destination-ring-mac1", +"host2tcl-input-ring4", +"host2tcl-input-ring3", +"host2tcl-input-ring2", +"host2tcl-input-ring1", +"wbm2host-tx-completions-ring3", +"wbm2host-tx-completions-ring2", +"wbm2host-tx-completions-ring1", +"tcl2host-status-ring", +}; + +/** hif_ahb_get_irq_name() - get irqname + * This function gives irqnumber to irqname + * mapping. + * + * @irq_no: irq number + * + * Return: irq name + */ +const char *hif_ahb_get_irq_name(int irq_no) +{ + return ic_irqname[irq_no]; +} + +/** + * hif_disable_isr() - disable isr + * + * This function disables isr and kills tasklets + * + * @hif_ctx: struct hif_softc + * + * Return: void + */ +void hif_ahb_disable_isr(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + ce_tasklet_kill(scn); + tasklet_kill(&sc->intr_tq); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +/** + * hif_dump_registers() - dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_ahb_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + if (status) + HIF_ERROR("%s: Dump CE Registers Failed status %d", __func__, + status); + + return 0; +} + +/** + * hif_ahb_close() - hif_bus_close + * @scn: pointer to the hif context. + * + * This is a callback function for hif_bus_close. + * + * + * Return: n/a + */ +void hif_ahb_close(struct hif_softc *scn) +{ + hif_ce_close(scn); +} + +/** + * hif_bus_open() - hif_ahb open + * @hif_ctx: hif context + * @bus_type: bus type + * + * This is a callback function for hif_bus_open. + * + * Return: n/a + */ +QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + qdf_spinlock_create(&sc->irq_lock); + return hif_ce_open(hif_ctx); +} + +/** + * hif_bus_configure() - Configure the bus + * @scn: pointer to the hif context. + * + * This function configure the ahb bus + * + * return: 0 for success. nonzero for failure. + */ +int hif_ahb_bus_configure(struct hif_softc *scn) +{ + return hif_pci_bus_configure(scn); +} + +/** + * hif_configure_msi_ahb - Configure MSI interrupts + * @sc : pointer to the hif context + * + * return: 0 for success. nonzero for failure. + */ + +int hif_configure_msi_ahb(struct hif_pci_softc *sc) +{ + return 0; +} + +/** + * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ + * @sc: pointer to the hif context. + * + * This function registers the irq handler and enables legacy interrupts + * + * return: 0 for success. nonzero for failure. + */ +int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) +{ + int ret = 0; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct platform_device *pdev = (struct platform_device *)sc->pdev; + int irq = 0; + + /* do not support MSI or MSI IRQ failed */ + tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); + qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev, "legacy", &irq); + if (irq < 0) { + dev_err(&pdev->dev, "Unable to get irq\n"); + ret = -EFAULT; + goto end; + } + ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler, + IRQF_DISABLED, "wlan_ahb", sc); + if (ret) { + dev_err(&pdev->dev, "ath_request_irq failed\n"); + ret = -EFAULT; + goto end; + } + sc->irq = irq; + + /* Use Legacy PCI Interrupts */ + hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + /* read once to flush */ + hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + +end: + return ret; +} + +int hif_ahb_configure_irq(struct hif_pci_softc *sc) +{ + int ret = 0; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct platform_device *pdev = (struct platform_device *)sc->pdev; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_attr *host_ce_conf = hif_state->host_ce_config; + int irq = 0; + int i; + + /* configure per CE interrupts */ + for (i = 0; i < scn->ce_count; i++) { + if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR) + continue; + ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev, + ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i], + HIF_IC_CE0_IRQ_OFFSET + i, &irq); + if (ret) { + dev_err(&pdev->dev, "get irq failed\n"); + ret = -EFAULT; + goto end; + } + + ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i] = irq; + ret = pfrm_request_irq(&pdev->dev, irq, + hif_ahb_interrupt_handler, + IRQF_TRIGGER_RISING, + ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i], + &hif_state->tasklets[i]); + if (ret) { + dev_err(&pdev->dev, "ath_request_irq failed\n"); + ret = -EFAULT; + goto end; + } + hif_ahb_irq_enable(scn, i); + } + +end: + return ret; +} + +int hif_ahb_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_group) +{ + int ret = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct platform_device *pdev = (struct platform_device *)sc->pdev; + int irq = 0; + int j; + + /* configure external interrupts */ + hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable; + hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable; + hif_ext_group->irq_name = &hif_ahb_get_irq_name; + hif_ext_group->work_complete = &hif_dummy_grp_done; + + for (j = 0; j < hif_ext_group->numirq; j++) { + ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev, + ic_irqname[hif_ext_group->irq[j]], + hif_ext_group->irq[j], &irq); + if (ret) { + dev_err(&pdev->dev, "get irq failed\n"); + ret = -EFAULT; + goto end; + } + ic_irqnum[hif_ext_group->irq[j]] = irq; + hif_ext_group->os_irq[j] = irq; + } + + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->os_irq[j]; + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + ret = pfrm_request_irq(scn->qdf_dev->dev, + irq, hif_ext_group_interrupt_handler, + IRQF_TRIGGER_RISING, + ic_irqname[hif_ext_group->irq[j]], + hif_ext_group); + if (ret) { + dev_err(&pdev->dev, "ath_request_irq failed\n"); + ret = -EFAULT; + goto end; + } + } + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); + + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + hif_ext_group->irq_requested = true; + +end: + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); + return ret; +} + +void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + int i, j; + int irq = 0; + + /* configure external interrupts */ + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + if (hif_ext_group->irq_requested == true) { + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + hif_ext_group->irq_requested = false; + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->os_irq[j]; + hif_ext_group->irq_enabled = false; + irq_clear_status_flags(irq, + IRQ_DISABLE_UNLAZY); + } + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); + + /* Avoid holding the irq_lock while freeing the irq + * as the same lock is being held by the irq handler + * while disabling the irq. This causes a deadlock + * between free_irq and irq_handler. + */ + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->os_irq[j]; + pfrm_free_irq(scn->qdf_dev->dev, + irq, hif_ext_group); + } + } + } +} + +irqreturn_t hif_ahb_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); +} + +/** + * hif_target_sync() : ensure the target is ready + * @scn: hif control structure + * + * Informs fw that we plan to use legacy interupts so that + * it can begin booting. Ensures that the fw finishes booting + * before continuing. Should be called before trying to write + * to the targets other registers for the first time. + * + * Return: none + */ +int hif_target_sync_ahb(struct hif_softc *scn) +{ + int val = 0; + int limit = 0; + + while (limit < 50) { + hif_write32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + qdf_mdelay(10); + val = hif_read32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)); + if (val == 0) + break; + limit++; + } + hif_write32_mb(scn, scn->mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY); + if (HAS_FW_INDICATOR) { + int wait_limit = 500; + int fw_ind = 0; + + while (1) { + fw_ind = hif_read32_mb(scn, scn->mem + + FW_INDICATOR_ADDRESS); + if (fw_ind & FW_IND_INITIALIZED) + break; + if (wait_limit-- < 0) + break; + hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK); + qdf_mdelay(10); + } + if (wait_limit < 0) { + HIF_TRACE("%s: FW signal timed out", __func__); + return -EIO; + } + HIF_TRACE("%s: Got FW signal, retries = %x", __func__, + 500-wait_limit); + } + + return 0; +} + +/** + * hif_disable_bus() - Disable the bus + * @scn : pointer to the hif context + * + * This function disables the bus and helds the target in reset state + * + * Return: none + */ +void hif_ahb_disable_bus(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + void __iomem *mem; + struct platform_device *pdev = (struct platform_device *)sc->pdev; + struct resource *memres = NULL; + int mem_pa_size = 0; + struct hif_target_info *tgt_info = NULL; + struct qdf_vbus_resource *vmres = NULL; + QDF_STATUS status; + + tgt_info = &scn->target_info; + /*Disable WIFI clock input*/ + if (sc->mem) { + status = pfrm_platform_get_resource( + scn->qdf_dev->dev, + (struct qdf_pfm_hndl *)pdev, &vmres, + IORESOURCE_MEM, 0); + if (QDF_IS_STATUS_ERROR(status)) { + HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", + __func__); + return; + } + memres = (struct resource *)vmres; + if (memres) + mem_pa_size = memres->end - memres->start + 1; + + /* Should not be executed on 8074 platform */ + if ((tgt_info->target_type != TARGET_TYPE_QCA8074) && + (tgt_info->target_type != TARGET_TYPE_QCA8074V2) && + (tgt_info->target_type != TARGET_TYPE_QCA6018)) { + hif_ahb_clk_enable_disable(&pdev->dev, 0); + + hif_ahb_device_reset(scn); + } + mem = (void __iomem *)sc->mem; + if (mem) { + pfrm_devm_iounmap(&pdev->dev, mem); + pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa, + mem_pa_size); + sc->mem = NULL; + } + } + scn->mem = NULL; +} + +/** + * hif_enable_bus() - Enable the bus + * @dev: dev + * @bdev: bus dev + * @bid: bus id + * @type: bus type + * + * This function enables the radio bus by enabling necessary + * clocks and waits for the target to get ready to proceed futher + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + int hif_type; + int target_type; + const struct platform_device_id *id = (struct platform_device_id *)bid; + struct platform_device *pdev = bdev; + struct hif_target_info *tgt_info = NULL; + struct resource *memres = NULL; + void __iomem *mem = NULL; + uint32_t revision_id = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); + QDF_STATUS status; + struct qdf_vbus_resource *vmres = NULL; + + sc->pdev = (struct pci_dev *)pdev; + sc->dev = &pdev->dev; + sc->devid = id->driver_data; + + ret = hif_get_device_type(id->driver_data, revision_id, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device ret %d id %d revision_id %d", + __func__, ret, (int)id->driver_data, revision_id); + return QDF_STATUS_E_FAILURE; + } + + status = pfrm_platform_get_resource(&pdev->dev, + (struct qdf_pfm_hndl *)pdev, + &vmres, + IORESOURCE_MEM, 0); + if (QDF_IS_STATUS_ERROR(status)) { + HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__); + return -EIO; + } + memres = (struct resource *)vmres; + if (!memres) { + HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__); + return -EIO; + } + + ret = pfrm_dma_set_mask(dev, 32); + if (ret) { + HIF_INFO("ath: 32-bit DMA not available\n"); + goto err_cleanup1; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + ret = pfrm_dma_set_mask_and_coherent(dev, 32); +#else + ret = pfrm_dma_set_coherent_mask(dev, 32); +#endif + if (ret) { + HIF_ERROR("%s: failed to set dma mask error = %d", + __func__, ret); + return ret; + } + + /* Arrange for access to Target SoC registers. */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + status = pfrm_devm_ioremap_resource(dev, + (struct qdf_vbus_resource *)memres, + &mem); +#else + status = pfrm_devm_request_and_ioremap( + dev, + (struct qdf_vbus_resource *)memres, + &mem); +#endif + if (QDF_IS_STATUS_ERROR(status)) { + HIF_INFO("ath: ioremap error\n"); + ret = PTR_ERR(mem); + goto err_cleanup1; + } + + sc->mem = mem; + ol_sc->mem = mem; + ol_sc->mem_pa = memres->start; + + tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc); + + tgt_info->target_type = target_type; + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + + if ((tgt_info->target_type != TARGET_TYPE_QCA8074) && + (tgt_info->target_type != TARGET_TYPE_QCA8074V2) && + (tgt_info->target_type != TARGET_TYPE_QCA6018)) { + if (hif_ahb_enable_radio(sc, pdev, id) != 0) { + HIF_INFO("error in enabling soc\n"); + return -EIO; + } + + if (hif_target_sync_ahb(ol_sc) < 0) { + ret = -EIO; + goto err_target_sync; + } + } + HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + return QDF_STATUS_SUCCESS; +err_target_sync: + if ((tgt_info->target_type != TARGET_TYPE_QCA8074) && + (tgt_info->target_type != TARGET_TYPE_QCA8074V2) && + (tgt_info->target_type != TARGET_TYPE_QCA6018)) { + HIF_INFO("Error: Disabling target\n"); + hif_ahb_disable_bus(ol_sc); + } +err_cleanup1: + return ret; +} + + +/** + * hif_reset_soc() - reset soc + * + * @hif_ctx: HIF context + * + * This function resets soc and helds the + * target in reset state + * + * Return: void + */ +/* Function to reset SoC */ +void hif_ahb_reset_soc(struct hif_softc *hif_ctx) +{ + hif_ahb_device_reset(hif_ctx); +} + + +/** + * hif_nointrs() - disable IRQ + * + * @scn: struct hif_softc + * + * This function stops interrupt(s) + * + * Return: none + */ +void hif_ahb_nointrs(struct hif_softc *scn) +{ + int i; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_attr *host_ce_conf = hif_state->host_ce_config; + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); + + if (scn->request_irq_done == false) + return; + + if (sc->num_msi_intrs > 0) { + /* MSI interrupt(s) */ + for (i = 0; i < sc->num_msi_intrs; i++) { + pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc); + } + sc->num_msi_intrs = 0; + } else { + if (!scn->per_ce_irq) { + pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc); + } else { + for (i = 0; i < scn->ce_count; i++) { + if (host_ce_conf[i].flags + & CE_ATTR_DISABLE_INTR) + continue; + + pfrm_free_irq( + scn->qdf_dev->dev, + ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i], + &hif_state->tasklets[i]); + } + hif_ahb_deconfigure_grp_irq(scn); + } + } + scn->request_irq_done = false; + +} + +/** + * ce_irq_enable() - enable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * This function enables the interrupt for the radio. + * + * Return: N/A + */ +void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id) +{ + uint32_t regval; + uint32_t reg_offset = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id]; + struct hif_target_info *tgt_info = &scn->target_info; + + if (scn->per_ce_irq) { + if (target_ce_conf->pipedir & PIPEDIR_OUT) { + reg_offset = HOST_IE_ADDRESS; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn, scn->mem + reg_offset); + regval |= HOST_IE_REG1_CE_BIT(ce_id); + hif_write32_mb(scn, scn->mem + reg_offset, regval); + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + if (target_ce_conf->pipedir & PIPEDIR_IN) { + reg_offset = HOST_IE_ADDRESS_2; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn, scn->mem + reg_offset); + regval |= HOST_IE_REG2_CE_BIT(ce_id); + hif_write32_mb(scn, scn->mem + reg_offset, regval); + if (tgt_info->target_type == TARGET_TYPE_QCA8074 || + tgt_info->target_type == TARGET_TYPE_QCA8074V2 || + tgt_info->target_type == TARGET_TYPE_QCA6018) { + /* Enable destination ring interrupts for + * 8074, 8074V2 and 6018 + */ + regval = hif_read32_mb(scn, scn->mem + + HOST_IE_ADDRESS_3); + regval |= HOST_IE_REG3_CE_BIT(ce_id); + + hif_write32_mb(scn, scn->mem + + HOST_IE_ADDRESS_3, regval); + } + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + } else { + hif_pci_irq_enable(scn, ce_id); + } +} + +/** + * ce_irq_disable() - disable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * Return: N/A + */ +void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id) +{ + uint32_t regval; + uint32_t reg_offset = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id]; + struct hif_target_info *tgt_info = &scn->target_info; + + if (scn->per_ce_irq) { + if (target_ce_conf->pipedir & PIPEDIR_OUT) { + reg_offset = HOST_IE_ADDRESS; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn, scn->mem + reg_offset); + regval &= ~HOST_IE_REG1_CE_BIT(ce_id); + hif_write32_mb(scn, scn->mem + reg_offset, regval); + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + if (target_ce_conf->pipedir & PIPEDIR_IN) { + reg_offset = HOST_IE_ADDRESS_2; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn, scn->mem + reg_offset); + regval &= ~HOST_IE_REG2_CE_BIT(ce_id); + hif_write32_mb(scn, scn->mem + reg_offset, regval); + if (tgt_info->target_type == TARGET_TYPE_QCA8074 || + tgt_info->target_type == TARGET_TYPE_QCA8074V2 || + tgt_info->target_type == TARGET_TYPE_QCA6018) { + /* Disable destination ring interrupts for + * 8074, 8074V2 and 6018 + */ + regval = hif_read32_mb(scn, scn->mem + + HOST_IE_ADDRESS_3); + regval &= ~HOST_IE_REG3_CE_BIT(ce_id); + + hif_write32_mb(scn, scn->mem + + HOST_IE_ADDRESS_3, regval); + } + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + } +} + +void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) +{ + int i; + + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + if (hif_ext_group->irq_enabled) { + for (i = 0; i < hif_ext_group->numirq; i++) { + disable_irq_nosync(hif_ext_group->os_irq[i]); + } + hif_ext_group->irq_enabled = false; + } + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); +} + +void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) +{ + int i; + + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) { + for (i = 0; i < hif_ext_group->numirq; i++) { + enable_irq(hif_ext_group->os_irq[i]); + } + hif_ext_group->irq_enabled = true; + } + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); +} + +/** + * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_ahb_needs_bmi(struct hif_softc *scn) +{ + return !ce_srng_based(scn); +} + +void hif_ahb_display_stats(struct hif_softc *scn) +{ + if (!scn) { + HIF_ERROR("%s, hif_scn null", __func__); + return; + } + hif_display_ce_stats(scn); +} + +void hif_ahb_clear_stats(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (!hif_state) { + HIF_ERROR("%s, hif_state null", __func__); + return; + } + hif_clear_ce_stats(hif_state); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.h b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.h new file mode 100644 index 0000000000000000000000000000000000000000..6ab4568856f0d26b85fb6bef340bb017ce026637 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_ahb.h + * + * h file for ahb specific implementations. + */ + +#ifndef __IF_AHB_H +#define __IF_AHB_H + +#define GCC_BASE 0x1800000 +#define GCC_SIZE 0x60000 +#define GCC_FEPLL_PLL_DIV 0x2f020 +#define GCC_FEPLL_PLL_CLK_WIFI_0_SEL_MASK 0x00000300 +#define GCC_FEPLL_PLL_CLK_WIFI_0_SEL_SHIFT 8 +#define GCC_FEPLL_PLL_CLK_WIFI_1_SEL_MASK 0x00003000 +#define GCC_FEPLL_PLL_CLK_WIFI_1_SEL_SHIFT 12 + + +/* These registers are outsize Wifi space. */ +/* TBD: Should we add these offsets as device tree properties? */ +#define TCSR_BASE 0x1900000 +#define TCSR_SIZE 0x80000 +#define TCSR_WIFI0_GLB_CFG 0x49000 +#define TCSR_WIFI1_GLB_CFG 0x49004 +#define TCSR_WCSS0_HALTREQ 0x52000 +#define TCSR_WCSS1_HALTREQ 0x52004 +#define TCSR_WCSS0_HALTACK 0x52010 +#define TCSR_WCSS1_HALTACK 0x52014 +#define ATH_AHB_RESET_WAIT_MAX 10 /* Ms */ + +irqreturn_t hif_ahb_interrupt_handler(int irq, void *context); + +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb_reset.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb_reset.c new file mode 100644 index 0000000000000000000000000000000000000000..963678aadfc236eaa70f260fc9f4b7e77ec828da --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb_reset.c @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_ahb_reset.c + * + * c file for ahb ipq4019 specific implementations. + */ + +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_tasklet.h" +#include "ahb_api.h" +#include "if_ahb.h" +#include "qal_vbus_dev.h" + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +#include +#endif + +/** + * clk_enable_disable() - Enable/disable clock + * @dev : pointer to device structure + * @str : clock name + * @enable : should be true, if the clock needs to be enabled + * should be false, if the clock needs to be enabled + * + * This is a helper function for hif_ahb_clk_enable_disable to enable + * disable clocks. + * clk_prepare_enable will enable the clock + * clk_disable_unprepare will disable the clock + * + * Return: zero on success, non-zero incase of error. + */ + +static int clk_enable_disable(struct device *dev, const char *str, int enable) +{ + struct clk *clk_t = NULL; + int ret; + QDF_STATUS status; + + clk_t = clk_get(dev, str); + if (IS_ERR(clk_t)) { + HIF_INFO("%s: Failed to get %s clk %ld\n", + __func__, str, PTR_ERR(clk_t)); + return -EFAULT; + } + if (true == enable) { + /* Prepare and Enable clk */ + status = qal_vbus_enable_devclk((struct qdf_dev_clk *)clk_t); + ret = qdf_status_to_os_return(status); + if (ret) { + HIF_INFO("%s: err enabling clk %s , error:%d\n", + __func__, str, ret); + } + } else { + /* Disable and unprepare clk */ + status = qal_vbus_disable_devclk((struct qdf_dev_clk *)clk_t); + ret = qdf_status_to_os_return(status); + } + return ret; +} + + +/** + * hif_ahb_clk_enable_disable() - Enable/disable ahb clock + * @dev : pointer to device structure + * @enable : should be true, if the clock needs to be enabled + * should be false, if the clock needs to be enabled + * + * This functions helps to enable/disable all the necesasary clocks + * for bus access. + * + * Return: zero on success, non-zero incase of error + */ +int hif_ahb_clk_enable_disable(struct device *dev, int enable) +{ + int ret; + + ret = clk_enable_disable(dev, "wifi_wcss_cmd", enable); + if (ret) + return ret; + ret = clk_enable_disable(dev, "wifi_wcss_ref", enable); + if (ret) + return ret; + ret = clk_enable_disable(dev, "wifi_wcss_rtc", enable); + if (ret) + return ret; + return 0; +} + +/** + * hif_enable_radio() - Enable the target radio. + * @sc : pointer to the hif context + * + * This function helps to release the target from reset state + * + * Return : zero on success, non-zero incase of error. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +int hif_ahb_enable_radio(struct hif_pci_softc *sc, + struct platform_device *pdev, + const struct platform_device_id *id) +{ + struct reset_control *reset_ctl = NULL; + uint32_t msi_addr, msi_base, wifi_core_id; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct device_node *dev_node = pdev->dev.of_node; + bool msienable = false; + int ret = 0; + struct qdf_vbus_rstctl *vrstctl = NULL; + + ret = of_property_read_u32(dev_node, "qca,msi_addr", &msi_addr); + if (ret) { + HIF_INFO("%s: Unable to get msi_addr - error:%d\n", + __func__, ret); + return -EIO; + } + ret = of_property_read_u32(dev_node, "qca,msi_base", &msi_base); + if (ret) { + HIF_INFO("%s: Unable to get msi_base - error:%d\n", + __func__, ret); + return -EIO; + } + ret = of_property_read_u32(dev_node, "core-id", &wifi_core_id); + if (ret) { + HIF_INFO("%s: Unable to get core-id - error:%d\n", + __func__, ret); + return -EIO; + } + + /* Program the above values into Wifi scratch regists */ + if (msienable) { + hif_write32_mb(sc, sc->mem + FW_AXI_MSI_ADDR, msi_addr); + hif_write32_mb(sc, sc->mem + FW_AXI_MSI_DATA, msi_base); + } + + /* TBD: Temporary changes. Frequency should be + * retrieved through clk_xxx once kernel GCC driver is available + */ + { + void __iomem *mem_gcc; + uint32_t clk_sel; + uint32_t gcc_fepll_pll_div; + uint32_t wifi_cpu_freq[4] = {266700000, 250000000, 222200000, + 200000000}; + uint32_t current_freq = 0; + + /* Enable WIFI clock input */ + if (scn->target_info.target_type == TARGET_TYPE_IPQ4019) { + ret = hif_ahb_clk_enable_disable(&pdev->dev, 1); + if (ret) { + HIF_INFO("%s:Error while enabling clock :%d\n", + __func__, ret); + return ret; + } + } + + mem_gcc = ioremap_nocache(GCC_BASE, GCC_SIZE); + if (IS_ERR(mem_gcc)) { + HIF_INFO("%s: GCC ioremap failed\n", __func__); + return PTR_ERR(mem_gcc); + } + gcc_fepll_pll_div = hif_read32_mb(sc, mem_gcc + + GCC_FEPLL_PLL_DIV); + clk_sel = (wifi_core_id == 0) ? ((gcc_fepll_pll_div & + GCC_FEPLL_PLL_CLK_WIFI_0_SEL_MASK) >> + GCC_FEPLL_PLL_CLK_WIFI_0_SEL_SHIFT) : + ((gcc_fepll_pll_div & GCC_FEPLL_PLL_CLK_WIFI_1_SEL_MASK) + >> GCC_FEPLL_PLL_CLK_WIFI_1_SEL_SHIFT); + current_freq = wifi_cpu_freq[clk_sel]; + + HIF_INFO("Wifi%d CPU frequency %u\n", wifi_core_id, + current_freq); + hif_write32_mb(sc, sc->mem + FW_CPU_PLL_CONFIG, + gcc_fepll_pll_div); + iounmap(mem_gcc); + } + + /* De-assert radio cold reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_radio_cold", &vrstctl); + reset_ctl = (struct reset_control *)vrstctl; + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get radio cold reset control\n", + __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + qal_vbus_deactivate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + + /* De-assert radio warm reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_radio_warm", &vrstctl); + reset_ctl = (struct reset_control *)vrstctl; + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get radio warm reset control\n", + __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + qal_vbus_deactivate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + + /* De-assert radio srif reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_radio_srif", &vrstctl); + reset_ctl = (struct reset_control *)vrstctl; + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get radio srif reset control\n", + __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + qal_vbus_deactivate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + + /* De-assert target CPU reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_cpu_init", &vrstctl); + reset_ctl = (struct reset_control *)vrstctl; + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get cpu init reset control", __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + qal_vbus_deactivate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)reset_ctl); + + return 0; + +err_reset: + return -EIO; +} +#else +int hif_ahb_enable_radio(struct hif_pci_softc *sc, + struct platform_device *pdev, + const struct platform_device_id *id) +{ + qdf_print("%s:%d:Reset routines not available in kernel version.", + __func__, __LINE__); + return -EIO; +} +#endif + +/* "wifi_core_warm" is the other reset type */ +#define AHB_RESET_TYPE "wifi_core_cold" + +/** + * hif_ahb_device_reset() - Disable the radio and held the radio is reset state. + * @scn : pointer to the hif context + * + * This function will hold the target in reset state. + * Will be called while unload the driver or any graceful unload path. + * + * Return : n/a. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +void hif_ahb_device_reset(struct hif_softc *scn) +{ + struct reset_control *resetctl = NULL; + struct reset_control *core_resetctl = NULL; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct platform_device *pdev = (struct platform_device *)(sc->pdev); + uint32_t glb_cfg_offset; + uint32_t haltreq_offset; + uint32_t haltack_offset; + void __iomem *mem_tcsr; + uint32_t wifi_core_id = 0XFFFFFFFF; + uint32_t reg_value; + int wait_limit = ATH_AHB_RESET_WAIT_MAX; + struct qdf_vbus_rstctl *vrstctl = NULL; + + + wifi_core_id = hif_read32_mb(sc, sc->mem + + WLAN_SUBSYSTEM_CORE_ID_ADDRESS); + glb_cfg_offset = (wifi_core_id == 0) ? TCSR_WIFI0_GLB_CFG : + TCSR_WIFI1_GLB_CFG; + haltreq_offset = (wifi_core_id == 0) ? TCSR_WCSS0_HALTREQ : + TCSR_WCSS1_HALTREQ; + haltack_offset = (wifi_core_id == 0) ? TCSR_WCSS0_HALTACK : + TCSR_WCSS1_HALTACK; + + mem_tcsr = ioremap_nocache(TCSR_BASE, TCSR_SIZE); + if (IS_ERR(mem_tcsr)) { + HIF_INFO("%s: TCSR ioremap failed\n", __func__); + return; + } + reg_value = hif_read32_mb(sc, mem_tcsr + haltreq_offset); + hif_write32_mb(sc, mem_tcsr + haltreq_offset, reg_value | 0x1); + /* Wait for halt ack before asserting reset */ + while (wait_limit) { + + if (hif_read32_mb(sc, mem_tcsr + haltack_offset) & 0x1) + break; + + qdf_mdelay(1); + wait_limit--; + } + + reg_value = hif_read32_mb(sc, mem_tcsr + glb_cfg_offset); + hif_write32_mb(sc, mem_tcsr + glb_cfg_offset, reg_value | (1 << 25)); + + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + AHB_RESET_TYPE, &vrstctl); + core_resetctl = (struct reset_control *)vrstctl; + if (IS_ERR(core_resetctl)) { + HIF_INFO("Failed to get wifi core cold reset control\n"); + return; + } + + /* Reset wifi core */ + qal_vbus_activate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)core_resetctl); + + /* TBD: Check if we should also assert other bits (radio_cold, radio_ + * warm, radio_srif, cpu_ini) + */ + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + + /* Assert radio cold reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_radio_cold", &vrstctl); + resetctl = (struct reset_control *)vrstctl; + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get radio cold reset control\n", + __func__); + return; + } + qal_vbus_activate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + + /* Assert radio warm reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_radio_warm", &vrstctl); + resetctl = (struct reset_control *)vrstctl; + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get radio warm reset control\n", + __func__); + return; + } + qal_vbus_activate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + + /* Assert radio srif reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_radio_srif", &vrstctl); + resetctl = (struct reset_control *)vrstctl; + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get radio srif reset control\n", + __func__); + return; + } + qal_vbus_activate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + + /* Assert target CPU reset */ + qal_vbus_get_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + "wifi_cpu_init", &vrstctl); + resetctl = (struct reset_control *)vrstctl; + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get cpu init reset control", __func__); + return; + } + qal_vbus_activate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + qdf_mdelay(10); /* TBD: Get reqd delay from HW team */ + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)resetctl); + + /* Clear gbl_cfg and haltreq before clearing Wifi core reset */ + reg_value = hif_read32_mb(sc, mem_tcsr + haltreq_offset); + hif_write32_mb(sc, mem_tcsr + haltreq_offset, reg_value & ~0x1); + reg_value = hif_read32_mb(sc, mem_tcsr + glb_cfg_offset); + hif_write32_mb(sc, mem_tcsr + glb_cfg_offset, reg_value & ~(1 << 25)); + + /* de-assert wifi core reset */ + qal_vbus_deactivate_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)core_resetctl); + + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + + /* TBD: Check if we should de-assert other bits here */ + qal_vbus_release_dev_rstctl((struct qdf_pfm_hndl *)&pdev->dev, + (struct qdf_vbus_rstctl *)core_resetctl); + iounmap(mem_tcsr); + HIF_INFO("Reset complete for wifi core id : %d\n", wifi_core_id); +} +#else +void hif_ahb_device_reset(struct hif_softc *scn) +{ + qdf_print("%s:%d:Reset routines not available in kernel version.", + __func__, __LINE__); +} +#endif + + + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c new file mode 100644 index 0000000000000000000000000000000000000000..51350887fecceb656e97ebc012902ea4d98a5f44 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_snoc.c + * + * c file for snoc specif implementations. + */ + +#include "hif.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_tasklet.h" +#include "ce_api.h" +#include "ce_internal.h" +#include "snoc_api.h" +#include "pld_common.h" +#include "qdf_util.h" +#ifdef IPA_OFFLOAD +#include +#endif +#include "target_type.h" + +/** + * hif_disable_isr(): disable isr + * + * This function disables isr and kills tasklets + * + * @hif_ctx: struct hif_softc + * + * Return: void + */ +void hif_snoc_disable_isr(struct hif_softc *scn) +{ + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + ce_tasklet_kill(scn); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +/** + * hif_dump_registers(): dump bus debug registers + * @hif_ctx: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_snoc_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + if (status) + HIF_ERROR("%s: Dump CE Registers Failed", __func__); + + return 0; +} + +void hif_snoc_display_stats(struct hif_softc *hif_ctx) +{ + if (!hif_ctx) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_display_ce_stats(hif_ctx); +} + +void hif_snoc_clear_stats(struct hif_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + if (!hif_state) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_clear_ce_stats(hif_state); +} + +/** + * hif_snoc_close(): hif_bus_close + * + * Return: n/a + */ +void hif_snoc_close(struct hif_softc *scn) +{ + hif_ce_close(scn); +} + +/** + * hif_bus_open(): hif_bus_open + * @hif_ctx: hif context + * @bus_type: bus type + * + * Return: n/a + */ +QDF_STATUS hif_snoc_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + return hif_ce_open(hif_ctx); +} + +/** + * hif_snoc_get_soc_info() - populates scn with hw info + * + * fills in the virtual and physical base address as well as + * soc version info. + * + * return 0 or QDF_STATUS_E_FAILURE + */ +static QDF_STATUS hif_snoc_get_soc_info(struct hif_softc *scn) +{ + int ret; + struct pld_soc_info soc_info; + + qdf_mem_zero(&soc_info, sizeof(soc_info)); + + ret = pld_get_soc_info(scn->qdf_dev->dev, &soc_info); + if (ret < 0) { + HIF_ERROR("%s: pld_get_soc_info error = %d", __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + scn->mem = soc_info.v_addr; + scn->mem_pa = soc_info.p_addr; + + scn->target_info.soc_version = soc_info.soc_id; + scn->target_info.target_version = soc_info.soc_id; + scn->target_info.target_revision = 0; + return QDF_STATUS_SUCCESS; +} + +/** + * hif_bus_configure() - configure the snoc bus + * @scn: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_snoc_bus_configure(struct hif_softc *scn) +{ + int ret; + uint8_t wake_ce_id; + + ret = hif_snoc_get_soc_info(scn); + if (ret) + return ret; + + hif_ce_prepare_config(scn); + + ret = hif_wlan_enable(scn); + if (ret) { + HIF_ERROR("%s: hif_wlan_enable error = %d", + __func__, ret); + return ret; + } + + ret = hif_config_ce(scn); + if (ret) + goto wlan_disable; + + ret = hif_get_wake_ce_id(scn, &wake_ce_id); + if (ret) + goto unconfig_ce; + + scn->wake_irq = pld_get_irq(scn->qdf_dev->dev, wake_ce_id); + + HIF_INFO(FL("expecting wake from ce %d, irq %d"), + wake_ce_id, scn->wake_irq); + + return 0; + +unconfig_ce: + hif_unconfig_ce(scn); + +wlan_disable: + hif_wlan_disable(scn); + + return ret; +} + +/** + * hif_snoc_get_target_type(): Get the target type + * + * This function is used to query the target type. + * + * @ol_sc: hif_softc struct pointer + * @dev: device pointer + * @bdev: bus dev pointer + * @bid: bus id pointer + * @hif_type: HIF type such as HIF_TYPE_QCA6180 + * @target_type: target type such as TARGET_TYPE_QCA6180 + * + * Return: 0 for success + */ +static inline int hif_snoc_get_target_type(struct hif_softc *ol_sc, + struct device *dev, void *bdev, const struct hif_bus_id *bid, + uint32_t *hif_type, uint32_t *target_type) +{ + /* TODO: need to use HW version. Hard code for now */ +#ifdef QCA_WIFI_3_0_ADRASTEA + *hif_type = HIF_TYPE_ADRASTEA; + *target_type = TARGET_TYPE_ADRASTEA; +#else + *hif_type = 0; + *target_type = 0; +#endif + return 0; +} + +#ifdef IPA_OFFLOAD +static int hif_set_dma_coherent_mask(qdf_device_t osdev) +{ + uint8_t addr_bits; + + if (false == hif_get_ipa_present()) + return qdf_set_dma_coherent_mask(osdev->dev, + DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE); + + if (hif_get_ipa_hw_type() < IPA_HW_v3_0) + addr_bits = DMA_COHERENT_MASK_BELOW_IPA_VER_3; + else + addr_bits = DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE; + + return qdf_set_dma_coherent_mask(osdev->dev, addr_bits); +} +#else +static int hif_set_dma_coherent_mask(qdf_device_t osdev) +{ + return qdf_set_dma_coherent_mask(osdev->dev, 37); +} +#endif + +/** + * hif_enable_bus(): hif_enable_bus + * @dev: dev + * @bdev: bus dev + * @bid: bus id + * @type: bus type + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret; + int hif_type; + int target_type; + + if (!ol_sc) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + return QDF_STATUS_E_NOMEM; + } + + ret = hif_set_dma_coherent_mask(ol_sc->qdf_dev); + if (ret) { + HIF_ERROR("%s: failed to set dma mask error = %d", + __func__, ret); + return ret; + } + + ret = qdf_device_init_wakeup(ol_sc->qdf_dev, true); + if (ret == -EEXIST) + HIF_WARN("%s: device_init_wakeup already done", + __func__); + else if (ret) { + HIF_ERROR("%s: device_init_wakeup: err= %d", + __func__, ret); + return ret; + } + + ret = hif_snoc_get_target_type(ol_sc, dev, bdev, bid, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device id/revision_id", __func__); + return QDF_STATUS_E_FAILURE; + } + + ol_sc->target_info.target_type = target_type; + + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + + /* the bus should remain on durring suspend for snoc */ + hif_vote_link_up(GET_HIF_OPAQUE_HDL(ol_sc)); + + HIF_DBG("%s: X - hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_disable_bus(): hif_disable_bus + * + * This function disables the bus + * + * @bdev: bus dev + * + * Return: none + */ +void hif_snoc_disable_bus(struct hif_softc *scn) +{ + int ret; + + hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); + + ret = qdf_device_init_wakeup(scn->qdf_dev, false); + if (ret) + HIF_ERROR("%s: device_init_wakeup: err %d", __func__, ret); +} + +/** + * hif_nointrs(): disable IRQ + * + * This function stops interrupt(s) + * + * @scn: struct hif_softc + * + * Return: none + */ +void hif_snoc_nointrs(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); +} + +/** + * ce_irq_enable() - enable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * Return: N/A + */ +void hif_snoc_irq_enable(struct hif_softc *scn, + int ce_id) +{ + ce_enable_irq_in_individual_register(scn, ce_id); +} + +/** + * ce_irq_disable() - disable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * Return: N/A + */ +void hif_snoc_irq_disable(struct hif_softc *scn, int ce_id) +{ + ce_disable_irq_in_individual_register(scn, ce_id); +} + +/* + * hif_snoc_setup_wakeup_sources() - enable/disable irq wake on correct irqs + * @hif_softc: hif context + * + * Firmware will send a wakeup request to the HTC_CTRL_RSVD_SVC when waking up + * the host driver. Ensure that the copy complete interrupt from this copy + * engine can wake up the apps processor. + * + * Return: 0 for success + */ +static +QDF_STATUS hif_snoc_setup_wakeup_sources(struct hif_softc *scn, bool enable) +{ + int ret; + + if (enable) + ret = enable_irq_wake(scn->wake_irq); + else + ret = disable_irq_wake(scn->wake_irq); + + if (ret) { + HIF_ERROR("%s: Fail to setup wake IRQ!", __func__); + return QDF_STATUS_E_RESOURCES; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_snoc_bus_suspend() - prepare to suspend the bus + * @scn: hif context + * + * Setup wakeup interrupt configuration. + * Disable CE interrupts (wakeup interrupt will still wake apps) + * Drain tasklets. - make sure that we don't suspend while processing + * the wakeup message. + * + * Return: 0 on success. + */ +int hif_snoc_bus_suspend(struct hif_softc *scn) +{ + if (hif_snoc_setup_wakeup_sources(scn, true) != QDF_STATUS_SUCCESS) + return -EFAULT; + return 0; +} + +/** + * hif_snoc_bus_resume() - snoc bus resume function + * @scn: hif context + * + * Clear wakeup interrupt configuration. + * Reenable ce interrupts + * + * Return: 0 on success + */ +int hif_snoc_bus_resume(struct hif_softc *scn) +{ + if (hif_snoc_setup_wakeup_sources(scn, false) != QDF_STATUS_SUCCESS) + QDF_BUG(0); + + return 0; +} + +/** + * hif_snoc_bus_suspend_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_snoc_bus_suspend_noirq(struct hif_softc *scn) +{ + if (hif_drain_tasklets(scn) != 0) + return -EBUSY; + return 0; +} + +int hif_snoc_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + return pld_get_irq(scn->qdf_dev->dev, ce_id); +} + +/** + * hif_is_target_register_access_allowed(): Check target register access allow + * @scn: HIF Context + * + * This function help to check whether target register access is allowed or not + * + * Return: true if target access is allowed else false + */ +bool hif_is_target_register_access_allowed(struct hif_softc *scn) +{ + if (hif_is_recovery_in_progress(scn)) + return hif_is_target_ready(scn); + else + return true; +} + +/** + * hif_snoc_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_snoc_needs_bmi(struct hif_softc *scn) +{ + return false; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..8ed33841763e4952619265d7b8bbca6e1f532267 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb.c @@ -0,0 +1,964 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "qdf_net_types.h" +#include +#include +#include +#define ATH_MODULE_NAME hif +#include +#include "qdf_module.h" +#include "hif_usb_internal.h" +#include "if_usb.h" +#include "usb_api.h" +#include "target_type.h" + +#if defined(WLAN_DEBUG) || defined(DEBUG) +static ATH_DEBUG_MASK_DESCRIPTION g_hif_debug_description[] = { + {USB_HIF_DEBUG_CTRL_TRANS, "Control Transfers"}, + {USB_HIF_DEBUG_BULK_IN, "BULK In Transfers"}, + {USB_HIF_DEBUG_BULK_OUT, "BULK Out Transfers"}, + {USB_HIF_DEBUG_DUMP_DATA, "Dump data"}, + {USB_HIF_DEBUG_ENUM, "Enumeration"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, + "hif", + "USB Host Interface", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO | + USB_HIF_DEBUG_ENUM, + ATH_DEBUG_DESCRIPTION_COUNT + (g_hif_debug_description), + g_hif_debug_description); + +#endif + +#ifdef USB_ISOC_SUPPORT +unsigned int hif_usb_isoch_vo = 1; +#else +unsigned int hif_usb_isoch_vo; +#endif +unsigned int hif_usb_disable_rxdata2 = 1; + +/** + * usb_hif_usb_transmit_complete() - completion routing for tx urb's + * @urb: pointer to urb for which tx completion is called + * + * Return: none + */ +static void usb_hif_usb_transmit_complete(struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *)urb->context; + qdf_nbuf_t buf; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + struct hif_usb_send_context *send_context; + + HIF_DBG("+%s: pipe: %d, stat:%d, len:%d", __func__, + pipe->logical_pipe_num, urb->status, urb->actual_length); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + + if (urb->status != 0) { + HIF_ERROR("%s: pipe: %d, failed:%d", + __func__, pipe->logical_pipe_num, urb->status); + } + + buf = urb_context->buf; + send_context = urb_context->send_context; + + if (send_context->new_alloc) + qdf_mem_free(send_context); + else + qdf_nbuf_pull_head(buf, send_context->head_data_len); + + urb_context->buf = NULL; + usb_hif_cleanup_transmit_urb(urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, buf); + HIF_USB_SCHEDULE_WORK(pipe); + + HIF_DBG("-%s", __func__); +} + +/** + * hif_send_internal() - HIF internal routine to prepare and submit tx urbs + * @hif_usb_device: pointer to HIF_DEVICE_USB structure + * @pipe_id: HIF pipe on which data is to be sent + * @hdr_buf: any header buf to be prepended, currently ignored + * @buf: qdf_nbuf_t containing data to be transmitted + * @nbytes: number of bytes to be transmitted + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +static QDF_STATUS hif_send_internal(struct HIF_DEVICE_USB *hif_usb_device, + uint8_t pipe_id, + qdf_nbuf_t hdr_buf, + qdf_nbuf_t buf, unsigned int nbytes) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_DEVICE_USB *device = hif_usb_device; + struct HIF_USB_PIPE *pipe = &device->pipes[pipe_id]; + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int usb_status; + int i; + struct hif_usb_send_context *send_context; + uint8_t frag_count; + uint32_t head_data_len, tmp_frag_count = 0; + unsigned char *data_ptr; + + HIF_DBG("+%s pipe : %d, buf:0x%pK nbytes %u", + __func__, pipe_id, buf, nbytes); + + frag_count = qdf_nbuf_get_num_frags(buf); + if (frag_count == 1) { + /* + * | hif_usb_send_context | netbuf->data + */ + head_data_len = sizeof(struct hif_usb_send_context); + } else if ((frag_count - 1) <= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS) { + /* + * means have extra fragment buf in skb + * header data length should be total sending length subtract + * internal data length of netbuf + * | hif_usb_send_context | fragments except internal buffer | + * netbuf->data + */ + head_data_len = sizeof(struct hif_usb_send_context); + while (tmp_frag_count < (frag_count - 1)) { + head_data_len = + head_data_len + qdf_nbuf_get_frag_len(buf, + tmp_frag_count); + tmp_frag_count = tmp_frag_count + 1; + } + } else { + /* Extra fragments overflow */ + HIF_ERROR("%s Extra fragments count overflow : %d\n", + __func__, frag_count); + status = QDF_STATUS_E_RESOURCES; + goto err; + } + + /* Check whether head room is enough to save extra head data */ + if (head_data_len <= qdf_nbuf_headroom(buf)) { + send_context = (struct hif_usb_send_context *) + qdf_nbuf_push_head(buf, head_data_len); + send_context->new_alloc = false; + } else { + send_context = + qdf_mem_malloc(sizeof(struct hif_usb_send_context) + + head_data_len + nbytes); + if (!send_context) { + status = QDF_STATUS_E_NOMEM; + goto err; + } + send_context->new_alloc = true; + } + send_context->netbuf = buf; + send_context->hif_usb_device = hif_usb_device; + send_context->transfer_id = pipe_id; + send_context->head_data_len = head_data_len; + /* + * Copy data to head part of netbuf or head of allocated buffer. + * if buffer is new allocated, the last buffer should be copied also. + * It assume last fragment is internal buffer of netbuf + * sometime total length of fragments larger than nbytes + */ + data_ptr = (unsigned char *)send_context + + sizeof(struct hif_usb_send_context); + for (i = 0; + i < (send_context->new_alloc ? frag_count : frag_count - 1); i++) { + int frag_len = qdf_nbuf_get_frag_len(buf, i); + unsigned char *frag_addr = qdf_nbuf_get_frag_vaddr(buf, i); + + qdf_mem_copy(data_ptr, frag_addr, frag_len); + data_ptr += frag_len; + } + /* Reset pData pointer and send out */ + data_ptr = (unsigned char *)send_context + + sizeof(struct hif_usb_send_context); + + urb_context = usb_hif_alloc_urb_from_pipe(pipe); + if (!urb_context) { + /* TODO : note, it is possible to run out of urbs if 2 + * endpoints map to the same pipe ID + */ + HIF_ERROR("%s pipe:%d no urbs left. URB Cnt : %d", + __func__, pipe_id, pipe->urb_cnt); + status = QDF_STATUS_E_RESOURCES; + goto err; + } + urb_context->send_context = send_context; + urb = urb_context->urb; + urb_context->buf = buf; + data = data_ptr; + len = nbytes; + + usb_fill_bulk_urb(urb, + device->udev, + pipe->usb_pipe_handle, + data, + (len % pipe->max_packet_size) == + 0 ? (len + 1) : len, + usb_hif_usb_transmit_complete, urb_context); + + if ((len % pipe->max_packet_size) == 0) + /* hit a max packet boundary on this pipe */ + + HIF_DBG + ("athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->ep_address, nbytes); + + usb_hif_enqueue_pending_transfer(pipe, urb_context); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + if (usb_status) { + if (send_context->new_alloc) + qdf_mem_free(send_context); + else + qdf_nbuf_pull_head(buf, head_data_len); + urb_context->buf = NULL; + HIF_ERROR("athusb : usb bulk transmit failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_cleanup_transmit_urb(urb_context); + status = QDF_STATUS_E_FAILURE; + goto err; + } + +err: + if (!QDF_IS_STATUS_SUCCESS(status) && + (status != QDF_STATUS_E_RESOURCES)) { + HIF_ERROR("athusb send failed %d", status); + } + + HIF_DBG("-%s pipe : %d", __func__, pipe_id); + + return status; +} + +/** + * hif_send_head() - HIF routine exposed to upper layers to send data + * @scn: pointer to hif_opaque_softc structure + * @pipe_id: HIF pipe on which data is to be sent + * @transfer_id: endpoint ID on which data is to be sent + * @nbytes: number of bytes to be transmitted + * @wbuf: qdf_nbuf_t containing data to be transmitted + * @hdr_buf: any header buf to be prepended, currently ignored + * @data_attr: data_attr field from cvg_nbuf_cb of wbuf + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +QDF_STATUS hif_send_head(struct hif_opaque_softc *scn, uint8_t pipe_id, + uint32_t transfer_id, uint32_t nbytes, + qdf_nbuf_t wbuf, uint32_t data_attr) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + status = hif_send_internal(device, pipe_id, NULL, wbuf, nbytes); + return status; +} + +/** + * hif_get_free_queue_number() - get # of free TX resources in a given HIF pipe + * @scn: pointer to hif_opaque_softc structure + * @pipe_id: HIF pipe which is being polled for free resources + * + * Return: # of free resources in pipe_id + */ +uint16_t hif_get_free_queue_number(struct hif_opaque_softc *scn, + uint8_t pipe_id) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + struct HIF_USB_PIPE *pipe = &device->pipes[pipe_id]; + u16 urb_cnt; + + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + urb_cnt = pipe->urb_cnt; + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); + + return urb_cnt; +} + +/** + * hif_post_init() - copy HTC callbacks to HIF + * @scn: pointer to hif_opaque_softc structure + * @target: pointer to HTC_TARGET structure + * @callbacks: htc callbacks + * + * Return: none + */ +void hif_post_init(struct hif_opaque_softc *scn, void *target, + struct hif_msg_callbacks *callbacks) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + qdf_mem_copy(&device->htc_callbacks, callbacks, + sizeof(device->htc_callbacks)); +} + +/** + * hif_detach_htc() - remove HTC callbacks from HIF + * @scn: pointer to hif_opaque_softc structure + * + * Return: none + */ +void hif_detach_htc(struct hif_opaque_softc *scn) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + usb_hif_flush_all(device); + qdf_mem_zero(&device->htc_callbacks, sizeof(device->htc_callbacks)); +} + +/** + * hif_usb_device_deinit() - de- init HIF_DEVICE_USB, cleanup pipe resources + * @sc: pointer to hif_usb_softc structure + * + * Return: None + */ +void hif_usb_device_deinit(struct hif_usb_softc *sc) +{ + struct HIF_DEVICE_USB *device = &sc->hif_hdl; + + HIF_TRACE("+%s", __func__); + + usb_hif_cleanup_pipe_resources(device); + + if (device->diag_cmd_buffer) + qdf_mem_free(device->diag_cmd_buffer); + + if (device->diag_resp_buffer) + qdf_mem_free(device->diag_resp_buffer); + + HIF_TRACE("-%s", __func__); +} + +/** + * hif_usb_device_init() - init HIF_DEVICE_USB, setup pipe resources + * @sc: pointer to hif_usb_softc structure + * + * Return: QDF_STATUS_SUCCESS on success or a QDF error + */ +QDF_STATUS hif_usb_device_init(struct hif_usb_softc *sc) +{ + int i; + struct HIF_DEVICE_USB *device = &sc->hif_hdl; + struct usb_interface *interface = sc->interface; + struct usb_device *dev = interface_to_usbdev(interface); + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_USB_PIPE *pipe; + + HIF_TRACE("+%s", __func__); + + do { + + qdf_spinlock_create(&(device->cs_lock)); + qdf_spinlock_create(&(device->rx_lock)); + qdf_spinlock_create(&(device->tx_lock)); + qdf_spinlock_create(&device->rx_prestart_lock); + device->udev = dev; + device->interface = interface; + + HIF_ERROR("%s device %pK device->udev %pK device->interface %pK", + __func__, + device, + device->udev, + device->interface); + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) { + pipe = &device->pipes[i]; + + HIF_USB_INIT_WORK(pipe); + skb_queue_head_init(&pipe->io_comp_queue); + } + + device->diag_cmd_buffer = + qdf_mem_malloc(USB_CTRL_MAX_DIAG_CMD_SIZE); + if (!device->diag_cmd_buffer) { + status = QDF_STATUS_E_NOMEM; + break; + } + device->diag_resp_buffer = + qdf_mem_malloc(USB_CTRL_MAX_DIAG_RESP_SIZE); + if (!device->diag_resp_buffer) { + status = QDF_STATUS_E_NOMEM; + break; + } + + status = usb_hif_setup_pipe_resources(device); + + } while (false); + + if (hif_is_supported_rx_ctrl_pipe(HIF_GET_SOFTC(sc))) + device->rx_ctrl_pipe_supported = 1; + + if (status != QDF_STATUS_SUCCESS) + HIF_ERROR("%s: abnormal condition", __func__); + + HIF_TRACE("+%s", __func__); + return status; +} + +/** + * hif_start() - Enable HIF TX and RX + * @scn: pointer to hif_opaque_softc structure + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_start(struct hif_opaque_softc *scn) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + int i; + + HIF_TRACE("+%s", __func__); + usb_hif_prestart_recv_pipes(device); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = HIF_TX_CTRL_PIPE; i <= HIF_TX_DATA_HP_PIPE; i++) { + device->pipes[i].urb_cnt_thresh = + device->pipes[i].urb_alloc / 2; + } + + HIF_TRACE("-%s", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * hif_usb_stop_device() - Stop/flush all HIF communication + * @scn: pointer to hif_opaque_softc structure + * + * Return: none + */ +void hif_usb_stop_device(struct hif_softc *hif_sc) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(hif_sc); + + HIF_TRACE("+%s", __func__); + + usb_hif_flush_all(device); + + HIF_TRACE("-%s", __func__); +} + +/** + * hif_get_default_pipe() - get default pipes for HIF TX/RX + * @scn: pointer to hif_opaque_softc structure + * @ul_pipe: pointer to TX pipe + * @ul_pipe: pointer to TX pipe + * + * Return: none + */ +void hif_get_default_pipe(struct hif_opaque_softc *scn, uint8_t *ul_pipe, + uint8_t *dl_pipe) +{ + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_CTRL_PIPE; +} + +#if defined(USB_MULTI_IN_TEST) || defined(USB_ISOC_TEST) +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @scn: HIF context + * @svc_id: sevice index + * @ul_pipe: pointer to uplink pipe id + * @dl_pipe: pointer to down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *scn, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, + int *ul_is_polled, int *dl_is_polled) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (svc_id) { + case HTC_CTRL_RSVD_SVC: + case WMI_CONTROL_SVC: + case HTC_RAW_STREAMS_SVC: + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_BE_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_BK_SVC: + *ul_pipe = HIF_TX_DATA_MP_PIPE; + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case WMI_DATA_VI_SVC: + *ul_pipe = HIF_TX_DATA_HP_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_VO_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + default: + status = QDF_STATUS_E_FAILURE; + break; + } + + return status; +} +#else + +#ifdef QCA_TX_HTT2_SUPPORT +#define USB_TX_CHECK_HTT2_SUPPORT 1 +#else +#define USB_TX_CHECK_HTT2_SUPPORT 0 +#endif + +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @scn: HIF context + * @svc_id: sevice index + * @ul_pipe: pointer to uplink pipe id + * @dl_pipe: pointer to down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *scn, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, + int *ul_is_polled, int *dl_is_polled) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + switch (svc_id) { + case HTC_CTRL_RSVD_SVC: + case WMI_CONTROL_SVC: + *ul_pipe = HIF_TX_CTRL_PIPE; + if (device->rx_ctrl_pipe_supported) + *dl_pipe = HIF_RX_CTRL_PIPE; + else + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case WMI_DATA_VI_SVC: + *ul_pipe = HIF_TX_DATA_MP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case WMI_DATA_VO_SVC: + *ul_pipe = HIF_TX_DATA_HP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case HTC_RAW_STREAMS_SVC: + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case HTT_DATA_MSG_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case HTT_DATA2_MSG_SVC: + if (USB_TX_CHECK_HTT2_SUPPORT) { + *ul_pipe = HIF_TX_DATA_HP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + } + break; + default: + status = QDF_STATUS_E_FAILURE; + break; + } + + return status; +} +#endif + +/** + * hif_ctrl_msg_exchange() - send usb ctrl message and receive response + * @macp: pointer to HIF_DEVICE_USB + * @send_req_val: USB send message request value + * @send_msg: pointer to data to send + * @len: length in bytes of the data to send + * @response_req_val: USB response message request value + * @response_msg: pointer to response msg + * @response_len: length of the response message + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +static QDF_STATUS hif_ctrl_msg_exchange(struct HIF_DEVICE_USB *macp, + uint8_t send_req_val, + uint8_t *send_msg, + uint32_t len, + uint8_t response_req_val, + uint8_t *response_msg, + uint32_t *response_len) +{ + QDF_STATUS status; + + do { + + /* send command */ + status = usb_hif_submit_ctrl_out(macp, send_req_val, 0, 0, + send_msg, len); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + + if (!response_msg) { + /* no expected response */ + break; + } + + /* get response */ + status = usb_hif_submit_ctrl_in(macp, response_req_val, 0, 0, + response_msg, *response_len); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + + } while (false); + + return status; +} + +#ifdef WLAN_FEATURE_BMI +/** + * hif_exchange_bmi_msg() - send/recev ctrl message of type BMI_CMD/BMI_RESP + * @scn: pointer to hif_opaque_softc + * @bmi_request: pointer to data to send + * @request_length: length in bytes of the data to send + * @bmi_response: pointer to response msg + * @bmi_response_length: length of the response message + * @timeout_ms: timeout to wait for response (ignored in current implementation) + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ + +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *scn, + qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, + uint8_t *bmi_request, + uint32_t request_length, + uint8_t *bmi_response, + uint32_t *bmi_response_lengthp, + uint32_t timeout_ms) +{ + struct HIF_DEVICE_USB *macp = HIF_GET_USB_DEVICE(scn); + + return hif_ctrl_msg_exchange(macp, + USB_CONTROL_REQ_SEND_BMI_CMD, + bmi_request, + request_length, + USB_CONTROL_REQ_RECV_BMI_RESP, + bmi_response, bmi_response_lengthp); +} + +void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx) +{ +} +#endif /* WLAN_FEATURE_BMI */ + +/** + * hif_diag_read_access() - Read data from target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to read from + * @data: pointer to buffer to store the value read from the register + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *scn, uint32_t address, + uint32_t *data) +{ + struct HIF_DEVICE_USB *macp = HIF_GET_USB_DEVICE(scn); + QDF_STATUS status; + USB_CTRL_DIAG_CMD_READ *cmd; + uint32_t respLength; + + cmd = (USB_CTRL_DIAG_CMD_READ *) macp->diag_cmd_buffer; + + qdf_mem_zero(cmd, sizeof(*cmd)); + cmd->Cmd = USB_CTRL_DIAG_CC_READ; + cmd->Address = address; + respLength = sizeof(USB_CTRL_DIAG_RESP_READ); + + status = hif_ctrl_msg_exchange(macp, + USB_CONTROL_REQ_DIAG_CMD, + (uint8_t *) cmd, + sizeof(*cmd), + USB_CONTROL_REQ_DIAG_RESP, + macp->diag_resp_buffer, &respLength); + + if (QDF_IS_STATUS_SUCCESS(status)) { + USB_CTRL_DIAG_RESP_READ *pResp = + (USB_CTRL_DIAG_RESP_READ *) macp->diag_resp_buffer; + *data = pResp->ReadValue; + status = QDF_STATUS_SUCCESS; + } else { + status = QDF_STATUS_E_FAILURE; + } + + return status; +} + +/** + * hif_diag_write_access() - write data to target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to write to + * @data: value to be written to the address + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *scn, + uint32_t address, + uint32_t data) +{ + struct HIF_DEVICE_USB *macp = HIF_GET_USB_DEVICE(scn); + USB_CTRL_DIAG_CMD_WRITE *cmd; + + cmd = (USB_CTRL_DIAG_CMD_WRITE *) macp->diag_cmd_buffer; + + qdf_mem_zero(cmd, sizeof(*cmd)); + cmd->Cmd = USB_CTRL_DIAG_CC_WRITE; + cmd->Address = address; + cmd->Value = data; + + return hif_ctrl_msg_exchange(macp, + USB_CONTROL_REQ_DIAG_CMD, + (uint8_t *) cmd, + sizeof(*cmd), 0, NULL, 0); +} + +/** + * hif_dump_info() - dump info about all HIF pipes and endpoints + * @scn: pointer to hif_opaque_softc + * + * Return: none + */ +void hif_dump_info(struct hif_opaque_softc *scn) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + struct HIF_USB_PIPE *pipe = NULL; + struct usb_host_interface *iface_desc = NULL; + struct usb_endpoint_descriptor *ep_desc; + uint8_t i = 0; + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) { + pipe = &device->pipes[i]; + HIF_ERROR("PipeIndex : %d URB Cnt : %d PipeHandle : %x", + i, pipe->urb_cnt, + pipe->usb_pipe_handle); + if (usb_pipeisoc(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type ISOC"); + else if (usb_pipebulk(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type BULK"); + else if (usb_pipeint(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type INT"); + else if (usb_pipecontrol(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type control"); + } + + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { + ep_desc = &iface_desc->endpoint[i].desc; + if (ep_desc) { + HIF_INFO( + "ep_desc : %pK Index : %d: DescType : %d Addr : %d Maxp : %d Atrrib : %d", + ep_desc, i, ep_desc->bDescriptorType, + ep_desc->bEndpointAddress, + ep_desc->wMaxPacketSize, + ep_desc->bmAttributes); + if ((ep_desc) && (usb_endpoint_type(ep_desc) == + USB_ENDPOINT_XFER_ISOC)) { + HIF_INFO("ISOC EP Detected"); + } + } + } + +} + +/** + * hif_flush_surprise_remove() - Cleanup residual buffers for device shutdown + * @scn: HIF context + * + * Not applicable to USB bus + * + * Return: none + */ +void hif_flush_surprise_remove(struct hif_opaque_softc *scn) +{ +/* TO DO... */ +} + +/** + * hif_diag_read_mem() -read nbytes of data from target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to read from + * @data: buffer to store the value read + * @nbytes: number of bytes to be read from 'address' + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn, + uint32_t address, uint8_t *data, + int nbytes) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_TRACE("+%s", __func__); + + if ((address & 0x3) || ((uintptr_t)data & 0x3)) + return QDF_STATUS_E_IO; + + while ((nbytes >= 4) && + QDF_IS_STATUS_SUCCESS(status = + hif_diag_read_access(scn, + address, + (uint32_t *)data))) { + + nbytes -= sizeof(uint32_t); + address += sizeof(uint32_t); + data += sizeof(uint32_t); + + } + HIF_TRACE("-%s", __func__); + return status; +} +qdf_export_symbol(hif_diag_read_mem); + +/** + * hif_diag_write_mem() -write nbytes of data to target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to write to + * @data: buffer containing data to be written + * @nbytes: number of bytes to be written + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, + uint32_t address, + uint8_t *data, int nbytes) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_TRACE("+%s", __func__); + if ((address & 0x3) || ((uintptr_t)data & 0x3)) + return QDF_STATUS_E_IO; + + while (nbytes >= 4 && + QDF_IS_STATUS_SUCCESS(status = + hif_diag_write_access(scn, + address, + *((uint32_t *)data)))) { + + nbytes -= sizeof(uint32_t); + address += sizeof(uint32_t); + data += sizeof(uint32_t); + + } + HIF_TRACE("-%s", __func__); + return status; +} + +void hif_send_complete_check(struct hif_opaque_softc *scn, + uint8_t PipeID, int force) +{ + /* NO-OP*/ +} + +/* diagnostic command defnitions */ +#define USB_CTRL_DIAG_CC_READ 0 +#define USB_CTRL_DIAG_CC_WRITE 1 +#define USB_CTRL_DIAG_CC_WARM_RESET 2 + +void hif_suspend_wow(struct hif_opaque_softc *scn) +{ + HIF_INFO("HIFsuspendwow - TODO"); +} + +/** + * hif_usb_set_bundle_mode() - enable bundling and set default rx bundle cnt + * @scn: pointer to hif_opaque_softc structure + * @enabled: flag to enable/disable bundling + * @rx_bundle_cnt: bundle count to be used for RX + * + * Return: none + */ +void hif_usb_set_bundle_mode(struct hif_softc *scn, + bool enabled, int rx_bundle_cnt) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + device->is_bundle_enabled = enabled; + device->rx_bundle_cnt = rx_bundle_cnt; + if (device->is_bundle_enabled && (device->rx_bundle_cnt == 0)) + device->rx_bundle_cnt = 1; + + device->rx_bundle_buf_len = device->rx_bundle_cnt * + HIF_USB_RX_BUNDLE_ONE_PKT_SIZE; + + HIF_DBG("athusb bundle %s cnt %d", enabled ? "enabled" : "disabled", + rx_bundle_cnt); +} + +/** + * hif_is_supported_rx_ctrl_pipe() - return true if device supports exclusive + * control pipe in the RX direction. + * @scn: hif context + * + * Return: true if device supports RX control pipe. + */ +bool hif_is_supported_rx_ctrl_pipe(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + + switch (tgt_info->target_type) { + case TARGET_TYPE_QCN7605: + return true; + default: + return false; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..3f3a91eafe952481595b8c1ac6f3bcd5b23aea21 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb_internal.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_USB_INTERNAL_H +#define _HIF_USB_INTERNAL_H + +#include +#include "a_types.h" +#include "athdefs.h" +#include "a_osapi.h" +#include "a_usb_defs.h" +#include +#include +#include "hif.h" +#include "if_usb.h" + +#ifdef QCN7605_SUPPORT +#define TX_URB_COUNT 64 +#else +#define TX_URB_COUNT 32 +#endif + +#define RX_URB_COUNT 32 + +#define HIF_USB_RX_BUFFER_SIZE (1792 + 8) +#define HIF_USB_RX_BUNDLE_ONE_PKT_SIZE (1792 + 8) + +#ifdef HIF_USB_TASKLET +#define HIF_USB_SCHEDULE_WORK(pipe)\ + tasklet_schedule(&pipe->io_complete_tasklet) + +#define HIF_USB_INIT_WORK(pipe)\ + tasklet_init(&pipe->io_complete_tasklet,\ + usb_hif_io_comp_tasklet,\ + (unsigned long)pipe) + +#define HIF_USB_FLUSH_WORK(pipe) flush_work(&pipe->io_complete_work) +#else +#define HIF_USB_SCHEDULE_WORK(pipe) queue_work(system_highpri_wq,\ + &(pipe)->io_complete_work) +#define HIF_USB_INIT_WORK(pipe)\ + INIT_WORK(&pipe->io_complete_work,\ + usb_hif_io_comp_work) +#define HIF_USB_FLUSH_WORK(pipe) +#endif + +/* debug masks */ +#define USB_HIF_DEBUG_CTRL_TRANS ATH_DEBUG_MAKE_MODULE_MASK(0) +#define USB_HIF_DEBUG_BULK_IN ATH_DEBUG_MAKE_MODULE_MASK(1) +#define USB_HIF_DEBUG_BULK_OUT ATH_DEBUG_MAKE_MODULE_MASK(2) +#define USB_HIF_DEBUG_ENUM ATH_DEBUG_MAKE_MODULE_MASK(3) +#define USB_HIF_DEBUG_DUMP_DATA ATH_DEBUG_MAKE_MODULE_MASK(4) +#define USB_HIF_SUSPEND ATH_DEBUG_MAKE_MODULE_MASK(5) +#define USB_HIF_ISOC_SUPPORT ATH_DEBUG_MAKE_MODULE_MASK(6) + +struct HIF_USB_PIPE; + +struct HIF_URB_CONTEXT { + DL_LIST link; + struct HIF_USB_PIPE *pipe; + qdf_nbuf_t buf; + struct urb *urb; + struct hif_usb_send_context *send_context; +}; + +#define HIF_USB_PIPE_FLAG_TX (1 << 0) + +/* + * Data structure to record required sending context data + */ +struct hif_usb_send_context { + A_BOOL new_alloc; + struct HIF_DEVICE_USB *hif_usb_device; + qdf_nbuf_t netbuf; + unsigned int transfer_id; + unsigned int head_data_len; +}; + +extern unsigned int hif_usb_disable_rxdata2; + +extern QDF_STATUS usb_hif_submit_ctrl_in(struct HIF_DEVICE_USB *macp, + uint8_t req, + uint16_t value, + uint16_t index, + void *data, uint32_t size); + +extern QDF_STATUS usb_hif_submit_ctrl_out(struct HIF_DEVICE_USB *macp, + uint8_t req, + uint16_t value, + uint16_t index, + void *data, uint32_t size); + +QDF_STATUS usb_hif_setup_pipe_resources(struct HIF_DEVICE_USB *device); +void usb_hif_cleanup_pipe_resources(struct HIF_DEVICE_USB *device); +void usb_hif_prestart_recv_pipes(struct HIF_DEVICE_USB *device); +void usb_hif_start_recv_pipes(struct HIF_DEVICE_USB *device); +void usb_hif_flush_all(struct HIF_DEVICE_USB *device); +void usb_hif_cleanup_transmit_urb(struct HIF_URB_CONTEXT *urb_context); +void usb_hif_enqueue_pending_transfer(struct HIF_USB_PIPE *pipe, + struct HIF_URB_CONTEXT *urb_context); +void usb_hif_remove_pending_transfer(struct HIF_URB_CONTEXT *urb_context); +struct HIF_URB_CONTEXT *usb_hif_alloc_urb_from_pipe(struct HIF_USB_PIPE *pipe); +void hif_usb_device_deinit(struct hif_usb_softc *sc); +QDF_STATUS hif_usb_device_init(struct hif_usb_softc *sc); +#ifdef HIF_USB_TASKLET +void usb_hif_io_comp_tasklet(unsigned long context); +#else +void usb_hif_io_comp_work(struct work_struct *work); +#endif +QDF_STATUS hif_diag_write_warm_reset(struct usb_interface *interface, + uint32_t address, uint32_t data); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..7917380e060b6a7ffabdebb9027f56bd5349c4d1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.c @@ -0,0 +1,778 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "if_usb.h" +#include "hif_usb_internal.h" +#include "target_type.h" /* TARGET_TYPE_ */ +#include "regtable_usb.h" +#include "ol_fw.h" +#include "hif_debug.h" +#include "epping_main.h" +#include "hif_main.h" +#include "usb_api.h" + +#define DELAY_FOR_TARGET_READY 200 /* 200ms */ + +/* Save memory addresses where we save FW ram dump, and then we could obtain + * them by symbol table. + */ +uint32_t fw_stack_addr; +void *fw_ram_seg_addr[FW_RAM_SEG_CNT]; + + + +static int hif_usb_unload_dev_num = -1; +struct hif_usb_softc *g_usb_sc; + +/** + * hif_usb_diag_write_cold_reset() - reset SOC by sending a diag command + * @scn: pointer to ol_softc structure + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +static inline QDF_STATUS +hif_usb_diag_write_cold_reset(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = &scn->target_info; + + /* For Genoa, chip-reset is handled in CNSS driver */ + if (tgt_info->target_type == TARGET_TYPE_QCN7605) + return QDF_STATUS_SUCCESS; + + HIF_DBG("%s: resetting SOC", __func__); + + return hif_diag_write_access(hif_hdl, + (ROME_USB_SOC_RESET_CONTROL_COLD_RST_LSB | + ROME_USB_RTC_SOC_BASE_ADDRESS), + SOC_RESET_CONTROL_COLD_RST_SET(1)); +} + +/** + * hif_usb_procfs_init() - create init procfs + * @scn: pointer to hif_usb_softc structure + * + * Return: int 0 if success else an appropriate error number + */ +static int +hif_usb_procfs_init(struct hif_softc *scn) +{ + int ret = 0; + + HIF_ENTER(); + + if (athdiag_procfs_init(scn) != 0) { + HIF_ERROR("athdiag_procfs_init failed"); + ret = A_ERROR; + } + + scn->athdiag_procfs_inited = true; + + HIF_EXIT(); + return ret; +} + +/** + * hif_nointrs(): disable IRQ + * @scn: pointer to struct hif_softc + * + * This function stops interrupt(s) + * + * Return: none + */ +void hif_usb_nointrs(struct hif_softc *scn) +{ + +} + +/** + * hif_usb_reboot() - called at reboot time to reset WLAN SOC + * @nb: pointer to notifier_block registered during register_reboot_notifier + * @val: code indicating reboot reason + * @v: unused pointer + * + * Return: int 0 if success else an appropriate error number + */ +static int hif_usb_reboot(struct notifier_block *nb, unsigned long val, + void *v) +{ + struct hif_usb_softc *sc; + + HIF_ENTER(); + sc = container_of(nb, struct hif_usb_softc, reboot_notifier); + /* do cold reset */ + hif_usb_diag_write_cold_reset(HIF_GET_SOFTC(sc)); + HIF_EXIT(); + return NOTIFY_DONE; +} + +/** + * hif_usb_disable_lpm() - Disable lpm feature of usb2.0 + * @udev: pointer to usb_device for which LPM is to be disabled + * + * LPM needs to be disabled to avoid usb2.0 probe timeout + * + * Return: int 0 if success else an appropriate error number + */ +static int hif_usb_disable_lpm(struct usb_device *udev) +{ + struct usb_hcd *hcd; + int ret = -EPERM; + + HIF_ENTER(); + + if (!udev || !udev->bus) { + HIF_ERROR("Invalid input parameters"); + goto exit; + } + + hcd = bus_to_hcd(udev->bus); + if (udev->usb2_hw_lpm_enabled) { + if (hcd->driver->set_usb2_hw_lpm) { + ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, false); + if (!ret) { + udev->usb2_hw_lpm_enabled = false; + udev->usb2_hw_lpm_capable = false; + HIF_TRACE("%s: LPM is disabled", __func__); + } else { + HIF_TRACE("%s: Fail to disable LPM", + __func__); + } + } else { + HIF_TRACE("%s: hcd doesn't support LPM", + __func__); + } + } else { + HIF_TRACE("%s: LPM isn't enabled", __func__); + } +exit: + HIF_EXIT(); + return ret; +} + +/** + * hif_usb_enable_bus() - enable usb bus + * @ol_sc: hif_softc struct + * @dev: device pointer + * @bdev: bus dev pointer + * @bid: bus id pointer + * @type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +QDF_STATUS hif_usb_enable_bus(struct hif_softc *scn, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) + +{ + struct usb_interface *interface = (struct usb_interface *)bdev; + struct usb_device_id *id = (struct usb_device_id *)bid; + int ret = 0; + struct hif_usb_softc *sc; + struct usb_device *usbdev = interface_to_usbdev(interface); + int vendor_id, product_id; + struct hif_target_info *tgt_info; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + u32 hif_type; + u32 target_type; + + if (!scn) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + goto err_usb; + } + + sc = HIF_GET_USB_SOFTC(scn); + + HIF_INFO("%s hif_softc %pK usbdev %pK interface %pK\n", + __func__, + scn, + usbdev, + interface); + + vendor_id = qdf_le16_to_cpu(usbdev->descriptor.idVendor); + product_id = qdf_le16_to_cpu(usbdev->descriptor.idProduct); + + HIF_ERROR("%s: con_mode = 0x%x, vendor_id = 0x%x product_id = 0x%x", + __func__, hif_get_conparam(scn), vendor_id, product_id); + + sc->pdev = (void *)usbdev; + sc->dev = &usbdev->dev; + sc->devid = id->idProduct; + + hif_get_device_type(product_id, 0, &hif_type, &target_type); + tgt_info = hif_get_target_info_handle(hif_hdl); + if (target_type == TARGET_TYPE_QCN7605) + tgt_info->target_type = TARGET_TYPE_QCN7605; + + /* + * For Genoa, skip set_configuration, since it is handled + * by CNSS driver. + */ + if (target_type != TARGET_TYPE_QCN7605) { + usb_get_dev(usbdev); + if ((usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), + USB_REQ_SET_CONFIGURATION, 0, 1, 0, + NULL, 0, HZ)) < 0) { + HIF_ERROR("%s[%d]", __func__, __LINE__); + goto err_usb; + } + usb_set_interface(usbdev, 0, 0); + sc->reboot_notifier.notifier_call = hif_usb_reboot; + register_reboot_notifier(&sc->reboot_notifier); + } + + /* disable lpm to avoid usb2.0 probe timeout */ + hif_usb_disable_lpm(usbdev); + + /* params need to be added - TODO + * scn->enableuartprint = 1; + * scn->enablefwlog = 0; + * scn->max_no_of_peers = 1; + */ + + sc->interface = interface; + if (hif_usb_device_init(sc) != QDF_STATUS_SUCCESS) { + HIF_ERROR("ath: %s: hif_usb_device_init failed", __func__); + goto err_reset; + } + + if (hif_usb_procfs_init(scn)) + goto err_reset; + + hif_usb_unload_dev_num = usbdev->devnum; + g_usb_sc = sc; + HIF_EXIT(); + return 0; + +err_reset: + hif_usb_diag_write_cold_reset(scn); + g_usb_sc = NULL; + hif_usb_unload_dev_num = -1; + if (target_type != TARGET_TYPE_QCN7605) + unregister_reboot_notifier(&sc->reboot_notifier); +err_usb: + ret = QDF_STATUS_E_FAILURE; + if (target_type != TARGET_TYPE_QCN7605) + usb_put_dev(usbdev); + return ret; +} + + +/** + * hif_usb_close(): close bus, delete hif_sc + * @ol_sc: soft_sc struct + * + * Return: none + */ +void hif_usb_close(struct hif_softc *scn) +{ + g_usb_sc = NULL; +} + +/** + * hif_usb_disable_bus(): This function disables usb bus + * @hif_ctx: pointer to struct hif_softc + * + * Return: none + */ +void hif_usb_disable_bus(struct hif_softc *hif_ctx) +{ + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(hif_ctx); + struct usb_interface *interface = sc->interface; + struct usb_device *udev = interface_to_usbdev(interface); + struct hif_target_info *tgt_info = &hif_ctx->target_info; + + HIF_TRACE("%s: trying to remove hif_usb!", __func__); + + /* disable lpm to avoid following cold reset will + * cause xHCI U1/U2 timeout + */ + if (tgt_info->target_type != TARGET_TYPE_QCN7605) + usb_disable_lpm(udev); + + /* wait for disable lpm */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(DELAY_FOR_TARGET_READY)); + set_current_state(TASK_RUNNING); + + /* do cold reset */ + hif_usb_diag_write_cold_reset(hif_ctx); + + if (g_usb_sc->suspend_state) + hif_bus_resume(GET_HIF_OPAQUE_HDL(hif_ctx)); + + if (tgt_info->target_type != TARGET_TYPE_QCN7605) { + unregister_reboot_notifier(&sc->reboot_notifier); + usb_put_dev(udev); + } + + hif_usb_device_deinit(sc); + + HIF_TRACE("%s hif_usb removed !!!!!!", __func__); +} + +/** + * hif_usb_bus_suspend() - suspend the bus + * @hif_ctx: hif_ctx + * + * This function suspends the bus, but usb doesn't need to suspend. + * Therefore just remove all the pending urb transactions + * + * Return: 0 for success and non-zero for failure + */ +int hif_usb_bus_suspend(struct hif_softc *hif_ctx) +{ + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(hif_ctx); + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(hif_ctx); + + HIF_ENTER(); + sc->suspend_state = 1; + usb_hif_flush_all(device); + HIF_EXIT(); + return 0; +} + +/** + * hif_usb_bus_resume() - hif resume API + * @hif_ctx: struct hif_opaque_softc + * + * This function resumes the bus. but usb doesn't need to resume. + * Post recv urbs for RX data pipe + * + * Return: 0 for success and non-zero for failure + */ +int hif_usb_bus_resume(struct hif_softc *hif_ctx) +{ + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(hif_ctx); + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(hif_ctx); + + HIF_ENTER(); + sc->suspend_state = 0; + usb_hif_start_recv_pipes(device); + + HIF_EXIT(); + return 0; +} + +/** + * hif_usb_bus_reset_resume() - resume the bus after reset + * @scn: struct hif_opaque_softc + * + * This function is called to tell the driver that USB device has been resumed + * and it has also been reset. The driver should redo any necessary + * initialization. This function resets WLAN SOC. + * + * Return: int 0 for success, non zero for failure + */ +int hif_usb_bus_reset_resume(struct hif_softc *hif_ctx) +{ + int ret = 0; + + HIF_ENTER(); + if (hif_usb_diag_write_cold_reset(hif_ctx) != QDF_STATUS_SUCCESS) + ret = 1; + + HIF_EXIT(); + return ret; +} + +/** + * hif_usb_open()- initialization routine for usb bus + * @ol_sc: ol_sc + * @bus_type: bus type + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +QDF_STATUS hif_usb_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type) +{ + hif_ctx->bus_type = bus_type; + return QDF_STATUS_SUCCESS; +} + +/** + * hif_usb_disable_isr(): disable isr + * @hif_ctx: struct hif_softc + * + * Return: void + */ +void hif_usb_disable_isr(struct hif_softc *hif_ctx) +{ + /* TODO */ +} + +/** + * hif_usb_reg_tbl_attach()- attach hif, target register tables + * @scn: pointer to ol_softc structure + * + * Attach host and target register tables based on target_type, target_version + * + * Return: none + */ +void hif_usb_reg_tbl_attach(struct hif_softc *scn) +{ + u_int32_t hif_type, target_type; + int32_t ret = 0; + uint32_t chip_id; + QDF_STATUS rv; + struct hif_target_info *tgt_info = &scn->target_info; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + if (!scn->hostdef && !scn->targetdef) { + switch (tgt_info->target_type) { + case TARGET_TYPE_AR6320: + switch (tgt_info->target_version) { + case AR6320_REV1_VERSION: + case AR6320_REV1_1_VERSION: + case AR6320_REV1_3_VERSION: + hif_type = HIF_TYPE_AR6320; + target_type = TARGET_TYPE_AR6320; + break; + case AR6320_REV2_1_VERSION: + case AR6320_REV3_VERSION: + case QCA9377_REV1_1_VERSION: + case QCA9379_REV1_VERSION: + hif_type = HIF_TYPE_AR6320V2; + target_type = TARGET_TYPE_AR6320V2; + break; + default: + ret = -1; + break; + } + break; + default: + ret = -1; + break; + } + + if (ret) + return; + + /* assign target register table if we find + * corresponding type + */ + hif_register_tbl_attach(scn, hif_type); + target_register_tbl_attach(scn, target_type); + /* read the chip revision*/ + rv = hif_diag_read_access(hif_hdl, + (CHIP_ID_ADDRESS | + RTC_SOC_BASE_ADDRESS), + &chip_id); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get chip id val (%d)", __func__, + rv); + } + tgt_info->target_revision = + CHIP_ID_REVISION_GET(chip_id); + } +} + +/** + * hif_usb_get_hw_info()- attach register table for USB + * @hif_ctx: pointer to hif_softc structure + + * This function is used to attach the host and target register tables. + * Ideally, we should not attach register tables as a part of this function. + * There is scope of cleanup to move register table attach during + * initialization for USB bus. + * + * The reason we are doing register table attach for USB here is that, it relies + * on target_info->target_type and target_info->target_version, + * which get populated during bmi_firmware_download. "hif_get_fw_info" is the + * only initialization related call into HIF there after. + * + * To fix this, we can move the "get target info, functionality currently in + * bmi_firmware_download into hif initialization functions. This change will + * affect all buses. Can be taken up as a part of convergence. + * + * Return: none + */ +void hif_usb_get_hw_info(struct hif_softc *hif_ctx) +{ + hif_usb_reg_tbl_attach(hif_ctx); +} + +#if defined(CONFIG_PLD_USB_CNSS) && !defined(CONFIG_BYPASS_QMI) +/** + * hif_bus_configure() - configure the bus + * @scn: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_usb_bus_configure(struct hif_softc *scn) +{ + struct pld_wlan_enable_cfg cfg; + enum pld_driver_mode mode; + uint32_t con_mode = hif_get_conparam(scn); + + if (QDF_GLOBAL_FTM_MODE == con_mode) + mode = PLD_FTM; + else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) + mode = PLD_COLDBOOT_CALIBRATION; + else if (QDF_IS_EPPING_ENABLED(con_mode)) + mode = PLD_EPPING; + else + mode = PLD_MISSION; + + return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode); +} +#else +/** + * hif_bus_configure() - configure the bus + * @scn: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_usb_bus_configure(struct hif_softc *scn) +{ + return 0; +} +#endif + +/** + * hif_usb_irq_enable() - hif_usb_irq_enable + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: void + */ +void hif_usb_irq_enable(struct hif_softc *scn, int ce_id) +{ +} + +/** + * hif_usb_irq_disable() - hif_usb_irq_disable + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: void + */ +void hif_usb_irq_disable(struct hif_softc *scn, int ce_id) +{ +} + +/** + * hif_usb_shutdown_bus_device() - This function shuts down the device + * @scn: hif opaque pointer + * + * Return: void + */ +void hif_usb_shutdown_bus_device(struct hif_softc *scn) +{ +} + +/** + * hif_trigger_dump() - trigger various dump cmd + * @scn: struct hif_opaque_softc + * @cmd_id: dump command id + * @start: start/stop dump + * + * Return: None + */ +void hif_trigger_dump(struct hif_opaque_softc *scn, uint8_t cmd_id, bool start) +{ +} + +/** + * hif_wlan_disable() - call the platform driver to disable wlan + * @scn: scn + * + * Return: void + */ +void hif_wlan_disable(struct hif_softc *scn) +{ +} + +/** + * hif_fw_assert_ramdump_pattern() - handle firmware assert with ramdump pattern + * @sc: pointer to hif_usb_softc structure + * + * Return: void + */ + +void hif_fw_assert_ramdump_pattern(struct hif_usb_softc *sc) +{ + uint32_t *reg, pattern, i = 0; + uint32_t len; + uint8_t *data; + uint8_t *ram_ptr = NULL; + char *fw_ram_seg_name[FW_RAM_SEG_CNT] = {"DRAM", "IRAM", "AXI"}; + size_t fw_ram_reg_size[FW_RAM_SEG_CNT] = { + FW_RAMDUMP_DRAMSIZE, + FW_RAMDUMP_IRAMSIZE, + FW_RAMDUMP_AXISIZE }; + + data = sc->fw_data; + len = sc->fw_data_len; + pattern = *((uint32_t *) data); + + qdf_assert(sc->ramdump_index < FW_RAM_SEG_CNT); + i = sc->ramdump_index; + reg = (uint32_t *) (data + 4); + if (sc->fw_ram_dumping == 0) { + sc->fw_ram_dumping = 1; + HIF_ERROR("Firmware %s dump:\n", fw_ram_seg_name[i]); + sc->ramdump[i] = + qdf_mem_malloc(sizeof(struct fw_ramdump) + + fw_ram_reg_size[i]); + if (!sc->ramdump[i]) + QDF_BUG(0); + + (sc->ramdump[i])->mem = (uint8_t *) (sc->ramdump[i] + 1); + fw_ram_seg_addr[i] = (sc->ramdump[i])->mem; + HIF_ERROR("FW %s start addr = %#08x\n", + fw_ram_seg_name[i], *reg); + HIF_ERROR("Memory addr for %s = %pK\n", + fw_ram_seg_name[i], + (sc->ramdump[i])->mem); + (sc->ramdump[i])->start_addr = *reg; + (sc->ramdump[i])->length = 0; + } + reg++; + ram_ptr = (sc->ramdump[i])->mem + (sc->ramdump[i])->length; + (sc->ramdump[i])->length += (len - 8); + if (sc->ramdump[i]->length <= fw_ram_reg_size[i]) { + qdf_mem_copy(ram_ptr, (uint8_t *) reg, len - 8); + } else { + HIF_ERROR("memory copy overlap\n"); + QDF_BUG(0); + } + + if (pattern == FW_RAMDUMP_END_PATTERN) { + HIF_ERROR("%s memory size = %d\n", fw_ram_seg_name[i], + (sc->ramdump[i])->length); + if (i == (FW_RAM_SEG_CNT - 1)) + QDF_BUG(0); + + sc->ramdump_index++; + sc->fw_ram_dumping = 0; + } +} + +/** + * hif_usb_ramdump_handler(): dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function is to receive information of firmware crash dump, and + * save it in host memory. It consists of 5 parts: registers, call stack, + * DRAM dump, IRAM dump, and AXI dump, and they are reported to host in order. + * + * registers: wrapped in a USB packet by starting as FW_ASSERT_PATTERN and + * 60 registers. + * call stack: wrapped in multiple USB packets, and each of them starts as + * FW_REG_PATTERN and contains multiple double-words. The tail + * of the last packet is FW_REG_END_PATTERN. + * DRAM dump: wrapped in multiple USB pakcets, and each of them start as + * FW_RAMDUMP_PATTERN and contains multiple double-wors. The tail + * of the last packet is FW_RAMDUMP_END_PATTERN; + * IRAM dump and AXI dump are with the same format as DRAM dump. + * + * Return: 0 for success or error code + */ + +void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) +{ + uint32_t *reg, pattern, i, start_addr = 0; + uint32_t len; + uint8_t *data; + uint8_t str_buf[128]; + uint32_t remaining; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(scn); + struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn); + struct hif_target_info *tgt_info = &hif_ctx->target_info; + + data = sc->fw_data; + len = sc->fw_data_len; + pattern = *((uint32_t *) data); + + if (pattern == FW_ASSERT_PATTERN) { + HIF_ERROR("Firmware crash detected...\n"); + HIF_ERROR("target_type: %d.target_version %d. target_revision%d.", + tgt_info->target_type, + tgt_info->target_version, + tgt_info->target_revision); + + reg = (uint32_t *) (data + 4); + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, 16, 4, reg, + min_t(uint32_t, len - 4, FW_REG_DUMP_CNT * 4), + false); + sc->fw_ram_dumping = 0; + + } else if (pattern == FW_REG_PATTERN) { + reg = (uint32_t *) (data + 4); + start_addr = *reg++; + if (sc->fw_ram_dumping == 0) { + qdf_nofl_err("Firmware stack dump:"); + sc->fw_ram_dumping = 1; + fw_stack_addr = start_addr; + } + remaining = len - 8; + /* len is in byte, but it's printed in double-word. */ + for (i = 0; i < (len - 8); i += 16) { + if ((*reg == FW_REG_END_PATTERN) && (i == len - 12)) { + sc->fw_ram_dumping = 0; + qdf_nofl_err("Stack start address = %#08x", + fw_stack_addr); + break; + } + hex_dump_to_buffer(reg, remaining, 16, 4, str_buf, + sizeof(str_buf), false); + qdf_nofl_err("%#08x: %s", start_addr + i, str_buf); + remaining -= 16; + reg += 4; + } + } else if ((!sc->enable_self_recovery) && + ((pattern & FW_RAMDUMP_PATTERN_MASK) == + FW_RAMDUMP_PATTERN)) { + hif_fw_assert_ramdump_pattern(sc); + } +} + +#ifndef QCA_WIFI_3_0 +/** + * hif_check_fw_reg(): hif_check_fw_reg + * @scn: scn + * @state: + * + * Return: int + */ +int hif_check_fw_reg(struct hif_opaque_softc *scn) +{ + return 0; +} +#endif + +/** + * hif_usb_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_usb_needs_bmi(struct hif_softc *scn) +{ + struct hif_target_info *tgt_info = &scn->target_info; + + /* BMI is not supported in Genoa */ + if (tgt_info->target_type == TARGET_TYPE_QCN7605) + return false; + + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.h b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.h new file mode 100644 index 0000000000000000000000000000000000000000..1dd3840559446f3ceb10c02ab6f45c973cca0b59 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __ATH_USB_H__ +#define __ATH_USB_H__ + +#include + +/* + * There may be some pending tx frames during platform suspend. + * Suspend operation should be delayed until those tx frames are + * transferred from the host to target. This macro specifies how + * long suspend thread has to sleep before checking pending tx + * frame count. + */ +#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 /* ms */ +/* + * Wait time (in unit of OL_ATH_TX_DRAIN_WAIT_DELAY) for pending + * tx frame completion before suspend. Refer: hif_pci_suspend() + */ +#define OL_ATH_TX_DRAIN_WAIT_CNT 10 + +#define CONFIG_COPY_ENGINE_SUPPORT /* TBDXXX: here for now */ +#define ATH_DBG_DEFAULT 0 +#include +#include +#include +#include "osapi_linux.h" +#include "hif_main.h" +#include "hif.h" + +#define FW_REG_DUMP_CNT 60 + +/* Magic patterns for FW to report crash information (Rome USB) */ +#define FW_ASSERT_PATTERN 0x0000c600 +#define FW_REG_PATTERN 0x0000d600 +#define FW_REG_END_PATTERN 0x0000e600 +#define FW_RAMDUMP_PATTERN 0x0000f600 +#define FW_RAMDUMP_END_PATTERN 0x0000f601 +#define FW_RAMDUMP_PATTERN_MASK 0xfffffff0 +#define FW_RAMDUMP_DRAMSIZE 0x00098000 +#define FW_RAMDUMP_IRAMSIZE 0x000C0000 +#define FW_RAMDUMP_AXISIZE 0x00020000 + +/* FW RAM segments (Rome USB) */ +enum { + FW_RAM_SEG_DRAM, + FW_RAM_SEG_IRAM, + FW_RAM_SEG_AXI, + FW_RAM_SEG_CNT +}; + +/* Allocate 384K memory to save each segment of ram dump */ +#define FW_RAMDUMP_SEG_SIZE 393216 + +/* structure to save RAM dump information */ +struct fw_ramdump { + uint32_t start_addr; + uint32_t length; + uint8_t *mem; +}; + +/* USB Endpoint definition */ +enum HIF_USB_PIPE_ID { + HIF_TX_CTRL_PIPE = 0, + HIF_TX_DATA_LP_PIPE, + HIF_TX_DATA_MP_PIPE, + HIF_TX_DATA_HP_PIPE, + HIF_RX_CTRL_PIPE, + HIF_RX_DATA_PIPE, + HIF_RX_DATA2_PIPE, + HIF_RX_INT_PIPE, + HIF_USB_PIPE_MAX +}; + +#define HIF_USB_PIPE_INVALID HIF_USB_PIPE_MAX + +struct HIF_USB_PIPE { + DL_LIST urb_list_head; + DL_LIST urb_pending_list; + int32_t urb_alloc; + int32_t urb_cnt; + int32_t urb_cnt_thresh; + unsigned int usb_pipe_handle; + uint32_t flags; + uint8_t ep_address; + uint8_t logical_pipe_num; + struct HIF_DEVICE_USB *device; + uint16_t max_packet_size; +#ifdef HIF_USB_TASKLET + struct tasklet_struct io_complete_tasklet; +#else + struct work_struct io_complete_work; +#endif + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; + int32_t urb_prestart_cnt; +}; + +struct HIF_DEVICE_USB { + struct hif_softc ol_sc; + qdf_spinlock_t cs_lock; + qdf_spinlock_t tx_lock; + qdf_spinlock_t rx_lock; + qdf_spinlock_t rx_prestart_lock; + struct hif_msg_callbacks htc_callbacks; + struct usb_device *udev; + struct usb_interface *interface; + struct HIF_USB_PIPE pipes[HIF_USB_PIPE_MAX]; + uint8_t *diag_cmd_buffer; + uint8_t *diag_resp_buffer; + void *claimed_context; + A_BOOL is_bundle_enabled; + uint16_t rx_bundle_cnt; + uint32_t rx_bundle_buf_len; + bool rx_ctrl_pipe_supported; +}; + +struct hif_usb_softc { + struct HIF_DEVICE_USB hif_hdl; + /* For efficiency, should be first in struct */ + struct device *dev; + struct usb_dev *pdev; + /* + * Guard changes to Target HW state and to software + * structures that track hardware state. + */ + u16 devid; + struct usb_interface *interface; + struct notifier_block reboot_notifier; /* default mode before reboot */ + u8 suspend_state; + u8 *fw_data; + u32 fw_data_len; + /* structure to save FW RAM dump (Rome USB) */ + struct fw_ramdump *ramdump[FW_RAM_SEG_CNT]; + uint8_t ramdump_index; + bool fw_ram_dumping; + /* enable FW self-recovery for Rome USB */ + bool enable_self_recovery; +}; + +/** + * hif_dump_info() - dump info about all HIF pipes and endpoints + * @scn: pointer to hif_opaque_softc + * + * Return: none + */ +void hif_dump_info(struct hif_opaque_softc *scn); + +/** + * hif_suspend_wow() - Send wow suspend command + * @scn: pointer to hif_opaque_softc + * + * Return: none + */ +void hif_suspend_wow(struct hif_opaque_softc *scn); +#endif /* __ATH_USB_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..991eea70ba824f2465fcb7f697ceaa41a152940d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "target_type.h" +#include "targaddrs.h" +#include "regtable_usb.h" +#include "ar9888def.h" +#include "ar6320def.h" +#include "ar6320v2def.h" +#include "hif_debug.h" + +void target_register_tbl_attach(struct hif_softc *scn, + uint32_t target_type) +{ + switch (target_type) { + case TARGET_TYPE_AR9888: + scn->targetdef = &ar9888_targetdef; + break; + case TARGET_TYPE_AR6320: + scn->targetdef = &ar6320_targetdef; + break; + case TARGET_TYPE_AR6320V2: + scn->targetdef = &ar6320v2_targetdef; + break; + default: + HIF_ERROR("%s: unknown target_type %u", __func__, target_type); + break; + } +} +void hif_register_tbl_attach(struct hif_softc *scn, uint32_t hif_type) +{ + switch (hif_type) { + case HIF_TYPE_AR9888: + scn->hostdef = &ar9888_hostdef; + break; + case HIF_TYPE_AR6320: + scn->hostdef = &ar6320_hostdef; + break; + case HIF_TYPE_AR6320V2: + scn->hostdef = &ar6320v2_hostdef; + break; + default: + HIF_ERROR("%s: unknown hif_type %u", __func__, hif_type); + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.h b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.h new file mode 100644 index 0000000000000000000000000000000000000000..eded85b9d114b5a31e3e2b0e61b3121982293723 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.h @@ -0,0 +1,1222 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_USB_H_ +#define _REGTABLE_USB_H_ +#include "if_usb.h" + +#define MISSING 0 + +struct targetdef_s { + u_int32_t d_RTC_SOC_BASE_ADDRESS; + u_int32_t d_RTC_WMAC_BASE_ADDRESS; + u_int32_t d_SYSTEM_SLEEP_OFFSET; + u_int32_t d_WLAN_SYSTEM_SLEEP_OFFSET; + u_int32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB; + u_int32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK; + u_int32_t d_CLOCK_CONTROL_OFFSET; + u_int32_t d_CLOCK_CONTROL_SI0_CLK_MASK; + u_int32_t d_RESET_CONTROL_OFFSET; + u_int32_t d_RESET_CONTROL_MBOX_RST_MASK; + u_int32_t d_RESET_CONTROL_SI0_RST_MASK; + u_int32_t d_WLAN_RESET_CONTROL_OFFSET; + u_int32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK; + u_int32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK; + u_int32_t d_GPIO_BASE_ADDRESS; + u_int32_t d_GPIO_PIN0_OFFSET; + u_int32_t d_GPIO_PIN1_OFFSET; + u_int32_t d_GPIO_PIN0_CONFIG_MASK; + u_int32_t d_GPIO_PIN1_CONFIG_MASK; + u_int32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB; + u_int32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK; + u_int32_t d_SI_CONFIG_I2C_LSB; + u_int32_t d_SI_CONFIG_I2C_MASK; + u_int32_t d_SI_CONFIG_POS_SAMPLE_LSB; + u_int32_t d_SI_CONFIG_POS_SAMPLE_MASK; + u_int32_t d_SI_CONFIG_INACTIVE_CLK_LSB; + u_int32_t d_SI_CONFIG_INACTIVE_CLK_MASK; + u_int32_t d_SI_CONFIG_INACTIVE_DATA_LSB; + u_int32_t d_SI_CONFIG_INACTIVE_DATA_MASK; + u_int32_t d_SI_CONFIG_DIVIDER_LSB; + u_int32_t d_SI_CONFIG_DIVIDER_MASK; + u_int32_t d_SI_BASE_ADDRESS; + u_int32_t d_SI_CONFIG_OFFSET; + u_int32_t d_SI_TX_DATA0_OFFSET; + u_int32_t d_SI_TX_DATA1_OFFSET; + u_int32_t d_SI_RX_DATA0_OFFSET; + u_int32_t d_SI_RX_DATA1_OFFSET; + u_int32_t d_SI_CS_OFFSET; + u_int32_t d_SI_CS_DONE_ERR_MASK; + u_int32_t d_SI_CS_DONE_INT_MASK; + u_int32_t d_SI_CS_START_LSB; + u_int32_t d_SI_CS_START_MASK; + u_int32_t d_SI_CS_RX_CNT_LSB; + u_int32_t d_SI_CS_RX_CNT_MASK; + u_int32_t d_SI_CS_TX_CNT_LSB; + u_int32_t d_SI_CS_TX_CNT_MASK; + u_int32_t d_BOARD_DATA_SZ; + u_int32_t d_BOARD_EXT_DATA_SZ; + u_int32_t d_MBOX_BASE_ADDRESS; + u_int32_t d_LOCAL_SCRATCH_OFFSET; + u_int32_t d_CPU_CLOCK_OFFSET; + u_int32_t d_LPO_CAL_OFFSET; + u_int32_t d_GPIO_PIN10_OFFSET; + u_int32_t d_GPIO_PIN11_OFFSET; + u_int32_t d_GPIO_PIN12_OFFSET; + u_int32_t d_GPIO_PIN13_OFFSET; + u_int32_t d_CLOCK_GPIO_OFFSET; + u_int32_t d_CPU_CLOCK_STANDARD_LSB; + u_int32_t d_CPU_CLOCK_STANDARD_MASK; + u_int32_t d_LPO_CAL_ENABLE_LSB; + u_int32_t d_LPO_CAL_ENABLE_MASK; + u_int32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB; + u_int32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK; + u_int32_t d_ANALOG_INTF_BASE_ADDRESS; + u_int32_t d_WLAN_MAC_BASE_ADDRESS; + u_int32_t d_CE0_BASE_ADDRESS; + u_int32_t d_CE1_BASE_ADDRESS; + u_int32_t d_FW_INDICATOR_ADDRESS; + u_int32_t d_DRAM_BASE_ADDRESS; + u_int32_t d_SOC_CORE_BASE_ADDRESS; + u_int32_t d_CORE_CTRL_ADDRESS; + u_int32_t d_CE_COUNT; + u_int32_t d_MSI_NUM_REQUEST; + u_int32_t d_MSI_ASSIGN_FW; + u_int32_t d_MSI_ASSIGN_CE_INITIAL; + u_int32_t d_PCIE_INTR_ENABLE_ADDRESS; + u_int32_t d_PCIE_INTR_CLR_ADDRESS; + u_int32_t d_PCIE_INTR_FIRMWARE_MASK; + u_int32_t d_PCIE_INTR_CE_MASK_ALL; + u_int32_t d_CORE_CTRL_CPU_INTR_MASK; + u_int32_t d_SR_WR_INDEX_ADDRESS; + u_int32_t d_DST_WATERMARK_ADDRESS; + + /* htt_rx.c */ + u_int32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK; + u_int32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_MASK; + u_int32_t d_RX_MPDU_START_0_SEQ_NUM_MASK; + u_int32_t d_RX_MPDU_START_0_SEQ_NUM_LSB; + u_int32_t d_RX_MPDU_START_2_PN_47_32_LSB; + u_int32_t d_RX_MPDU_START_2_PN_47_32_MASK; + uint32_t d_RX_MPDU_START_2_TID_LSB; + uint32_t d_RX_MPDU_START_2_TID_MASK; + u_int32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK; + u_int32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB; + u_int32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK; + u_int32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB; + u_int32_t d_RX_MSDU_END_4_LAST_MSDU_MASK; + u_int32_t d_RX_MSDU_END_4_LAST_MSDU_LSB; + u_int32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK; + u_int32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB; + u_int32_t d_RX_ATTENTION_0_FRAGMENT_MASK; + u_int32_t d_RX_ATTENTION_0_FRAGMENT_LSB; + u_int32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK; + u_int32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK; + u_int32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB; + u_int32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK; + u_int32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB; + u_int32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET; + u_int32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK; + u_int32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB; + u_int32_t d_RX_MPDU_START_0_ENCRYPTED_MASK; + u_int32_t d_RX_MPDU_START_0_ENCRYPTED_LSB; + u_int32_t d_RX_ATTENTION_0_MORE_DATA_MASK; + u_int32_t d_RX_ATTENTION_0_MSDU_DONE_MASK; + u_int32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK; + /* end */ + /* copy_engine.c */ + u_int32_t d_DST_WR_INDEX_ADDRESS; + u_int32_t d_SRC_WATERMARK_ADDRESS; + u_int32_t d_SRC_WATERMARK_LOW_MASK; + u_int32_t d_SRC_WATERMARK_HIGH_MASK; + u_int32_t d_DST_WATERMARK_LOW_MASK; + u_int32_t d_DST_WATERMARK_HIGH_MASK; + u_int32_t d_CURRENT_SRRI_ADDRESS; + u_int32_t d_CURRENT_DRRI_ADDRESS; + u_int32_t d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK; + u_int32_t d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK; + u_int32_t d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK; + u_int32_t d_HOST_IS_DST_RING_LOW_WATERMARK_MASK; + u_int32_t d_HOST_IS_ADDRESS; + u_int32_t d_HOST_IS_COPY_COMPLETE_MASK; + u_int32_t d_CE_WRAPPER_BASE_ADDRESS; + u_int32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS; + u_int32_t d_HOST_IE_ADDRESS; + u_int32_t d_HOST_IE_COPY_COMPLETE_MASK; + u_int32_t d_SR_BA_ADDRESS; + u_int32_t d_SR_SIZE_ADDRESS; + u_int32_t d_CE_CTRL1_ADDRESS; + u_int32_t d_CE_CTRL1_DMAX_LENGTH_MASK; + u_int32_t d_DR_BA_ADDRESS; + u_int32_t d_DR_SIZE_ADDRESS; + u_int32_t d_MISC_IE_ADDRESS; + u_int32_t d_MISC_IS_AXI_ERR_MASK; + u_int32_t d_MISC_IS_DST_ADDR_ERR_MASK; + u_int32_t d_MISC_IS_SRC_LEN_ERR_MASK; + u_int32_t d_MISC_IS_DST_MAX_LEN_VIO_MASK; + u_int32_t d_MISC_IS_DST_RING_OVERFLOW_MASK; + u_int32_t d_MISC_IS_SRC_RING_OVERFLOW_MASK; + u_int32_t d_SRC_WATERMARK_LOW_LSB; + u_int32_t d_SRC_WATERMARK_HIGH_LSB; + u_int32_t d_DST_WATERMARK_LOW_LSB; + u_int32_t d_DST_WATERMARK_HIGH_LSB; + u_int32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK; + u_int32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB; + u_int32_t d_CE_CTRL1_DMAX_LENGTH_LSB; + u_int32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK; + u_int32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK; + u_int32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB; + u_int32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK; + u_int32_t d_WLAN_DEBUG_CONTROL_OFFSET; + u_int32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB; + u_int32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB; + u_int32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK; + u_int32_t d_WLAN_DEBUG_OUT_OFFSET; + u_int32_t d_WLAN_DEBUG_OUT_DATA_MSB; + u_int32_t d_WLAN_DEBUG_OUT_DATA_LSB; + u_int32_t d_WLAN_DEBUG_OUT_DATA_MASK; + u_int32_t d_AMBA_DEBUG_BUS_OFFSET; + u_int32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB; + u_int32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB; + u_int32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; + u_int32_t d_AMBA_DEBUG_BUS_SEL_MSB; + u_int32_t d_AMBA_DEBUG_BUS_SEL_LSB; + u_int32_t d_AMBA_DEBUG_BUS_SEL_MASK; + u_int32_t d_CE_WRAPPER_DEBUG_OFFSET; + u_int32_t d_CE_WRAPPER_DEBUG_SEL_MSB; + u_int32_t d_CE_WRAPPER_DEBUG_SEL_LSB; + u_int32_t d_CE_WRAPPER_DEBUG_SEL_MASK; + u_int32_t d_CE_DEBUG_OFFSET; + u_int32_t d_CE_DEBUG_SEL_MSB; + u_int32_t d_CE_DEBUG_SEL_LSB; + u_int32_t d_CE_DEBUG_SEL_MASK; + /* end */ + /* PLL start */ + u_int32_t d_EFUSE_OFFSET; + u_int32_t d_EFUSE_XTAL_SEL_MSB; + u_int32_t d_EFUSE_XTAL_SEL_LSB; + u_int32_t d_EFUSE_XTAL_SEL_MASK; + u_int32_t d_BB_PLL_CONFIG_OFFSET; + u_int32_t d_BB_PLL_CONFIG_OUTDIV_MSB; + u_int32_t d_BB_PLL_CONFIG_OUTDIV_LSB; + u_int32_t d_BB_PLL_CONFIG_OUTDIV_MASK; + u_int32_t d_BB_PLL_CONFIG_FRAC_MSB; + u_int32_t d_BB_PLL_CONFIG_FRAC_LSB; + u_int32_t d_BB_PLL_CONFIG_FRAC_MASK; + u_int32_t d_WLAN_PLL_SETTLE_TIME_MSB; + u_int32_t d_WLAN_PLL_SETTLE_TIME_LSB; + u_int32_t d_WLAN_PLL_SETTLE_TIME_MASK; + u_int32_t d_WLAN_PLL_SETTLE_OFFSET; + u_int32_t d_WLAN_PLL_SETTLE_SW_MASK; + u_int32_t d_WLAN_PLL_SETTLE_RSTMASK; + u_int32_t d_WLAN_PLL_SETTLE_RESET; + u_int32_t d_WLAN_PLL_CONTROL_NOPWD_MSB; + u_int32_t d_WLAN_PLL_CONTROL_NOPWD_LSB; + u_int32_t d_WLAN_PLL_CONTROL_NOPWD_MASK; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_MSB; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_LSB; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_MASK; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_RESET; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_MSB; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_LSB; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_MASK; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_RESET; + u_int32_t d_WLAN_PLL_CONTROL_DIV_MSB; + u_int32_t d_WLAN_PLL_CONTROL_DIV_LSB; + u_int32_t d_WLAN_PLL_CONTROL_DIV_MASK; + u_int32_t d_WLAN_PLL_CONTROL_DIV_RESET; + u_int32_t d_WLAN_PLL_CONTROL_OFFSET; + u_int32_t d_WLAN_PLL_CONTROL_SW_MASK; + u_int32_t d_WLAN_PLL_CONTROL_RSTMASK; + u_int32_t d_WLAN_PLL_CONTROL_RESET; + u_int32_t d_SOC_CORE_CLK_CTRL_OFFSET; + u_int32_t d_SOC_CORE_CLK_CTRL_DIV_MSB; + u_int32_t d_SOC_CORE_CLK_CTRL_DIV_LSB; + u_int32_t d_SOC_CORE_CLK_CTRL_DIV_MASK; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET; + u_int32_t d_RTC_SYNC_STATUS_OFFSET; + u_int32_t d_SOC_CPU_CLOCK_OFFSET; + u_int32_t d_SOC_CPU_CLOCK_STANDARD_MSB; + u_int32_t d_SOC_CPU_CLOCK_STANDARD_LSB; + u_int32_t d_SOC_CPU_CLOCK_STANDARD_MASK; + /* PLL end */ + u_int32_t d_SOC_POWER_REG_OFFSET; + u_int32_t d_PCIE_INTR_CAUSE_ADDRESS; + u_int32_t d_SOC_RESET_CONTROL_ADDRESS; + u_int32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK; + u_int32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB; + u_int32_t d_SOC_RESET_CONTROL_CE_RST_MASK; + u_int32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + u_int32_t d_CPU_INTR_ADDRESS; + u_int32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS; + u_int32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + u_int32_t d_SOC_LF_TIMER_STATUS0_ADDRESS; + /* chip id start */ + u_int32_t d_SOC_CHIP_ID_ADDRESS; + u_int32_t d_SOC_CHIP_ID_VERSION_MASK; + u_int32_t d_SOC_CHIP_ID_VERSION_LSB; + u_int32_t d_SOC_CHIP_ID_REVISION_MASK; + u_int32_t d_SOC_CHIP_ID_REVISION_LSB; + /* chip id end */ +}; + +#define RTC_SOC_BASE_ADDRESS \ + (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS \ + (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET \ + (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET \ + (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS \ + (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET \ + (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET \ + (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK \ + (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK \ + (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB \ + (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB \ + (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK \ + (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS \ + (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET \ + (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET \ + (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET \ + (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET \ + (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET \ + (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET \ + (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK \ + (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK \ + (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB \ + (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK \ + (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB \ + (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK \ + (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB \ + (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK \ + (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ \ + (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ \ + (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS \ + (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET \ + (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET \ + (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET \ + (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET \ + (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET \ + (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET \ + (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET \ + (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET \ + (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB \ + (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK \ + (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS \ + (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS \ + (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define CE0_BASE_ADDRESS \ + (scn->targetdef->d_CE0_BASE_ADDRESS) +#define CE1_BASE_ADDRESS \ + (scn->targetdef->d_CE1_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS \ + (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS \ + (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS \ + (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS \ + (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CE_COUNT \ + (scn->targetdef->d_CE_COUNT) +#define PCIE_INTR_ENABLE_ADDRESS \ + (scn->targetdef->d_PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_CLR_ADDRESS \ + (scn->targetdef->d_PCIE_INTR_CLR_ADDRESS) +#define PCIE_INTR_FIRMWARE_MASK \ + (scn->targetdef->d_PCIE_INTR_FIRMWARE_MASK) +#define PCIE_INTR_CE_MASK_ALL \ + (scn->targetdef->d_PCIE_INTR_CE_MASK_ALL) +#define CORE_CTRL_CPU_INTR_MASK \ + (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define PCIE_INTR_CAUSE_ADDRESS \ + (scn->targetdef->d_PCIE_INTR_CAUSE_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS \ + (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define SOC_RESET_CONTROL_CE_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CE_RST_MASK) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK\ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS \ + (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) +#define SOC_LF_TIMER_STATUS0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_STATUS0_ADDRESS) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_GET(x) \ + (((x) & SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) >> \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_SET(x) \ + (((x) << SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) & \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +/* hif_pci.c */ +#define CHIP_ID_ADDRESS \ + (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK \ + (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB \ + (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) +/* hif_pci.c end */ + +/* misc */ +#define SR_WR_INDEX_ADDRESS \ + (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS \ + (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET \ + (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +/* copy_engine.c */ +#define DST_WR_INDEX_ADDRESS \ + (scn->targetdef->d_DST_WR_INDEX_ADDRESS) +#define SRC_WATERMARK_ADDRESS \ + (scn->targetdef->d_SRC_WATERMARK_ADDRESS) +#define SRC_WATERMARK_LOW_MASK \ + (scn->targetdef->d_SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_MASK \ + (scn->targetdef->d_SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_MASK \ + (scn->targetdef->d_DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_MASK \ + (scn->targetdef->d_DST_WATERMARK_HIGH_MASK) +#define CURRENT_SRRI_ADDRESS \ + (scn->targetdef->d_CURRENT_SRRI_ADDRESS) +#define CURRENT_DRRI_ADDRESS \ + (scn->targetdef->d_CURRENT_DRRI_ADDRESS) +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK \ + (scn->targetdef->d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK\ + (scn->targetdef->d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK) +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK \ + (scn->targetdef->d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK\ + (scn->targetdef->d_HOST_IS_DST_RING_LOW_WATERMARK_MASK) +#define HOST_IS_ADDRESS \ + (scn->targetdef->d_HOST_IS_ADDRESS) +#define HOST_IS_COPY_COMPLETE_MASK \ + (scn->targetdef->d_HOST_IS_COPY_COMPLETE_MASK) +#define CE_WRAPPER_BASE_ADDRESS \ + (scn->targetdef->d_CE_WRAPPER_BASE_ADDRESS) +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS \ + (scn->targetdef->d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS) +#define HOST_IE_ADDRESS \ + (scn->targetdef->d_HOST_IE_ADDRESS) +#define HOST_IE_COPY_COMPLETE_MASK \ + (scn->targetdef->d_HOST_IE_COPY_COMPLETE_MASK) +#define SR_BA_ADDRESS \ + (scn->targetdef->d_SR_BA_ADDRESS) +#define SR_SIZE_ADDRESS \ + (scn->targetdef->d_SR_SIZE_ADDRESS) +#define CE_CTRL1_ADDRESS \ + (scn->targetdef->d_CE_CTRL1_ADDRESS) +#define CE_CTRL1_DMAX_LENGTH_MASK \ + (scn->targetdef->d_CE_CTRL1_DMAX_LENGTH_MASK) +#define DR_BA_ADDRESS \ + (scn->targetdef->d_DR_BA_ADDRESS) +#define DR_SIZE_ADDRESS \ + (scn->targetdef->d_DR_SIZE_ADDRESS) +#define MISC_IE_ADDRESS \ + (scn->targetdef->d_MISC_IE_ADDRESS) +#define MISC_IS_AXI_ERR_MASK \ + (scn->targetdef->d_MISC_IS_AXI_ERR_MASK) +#define MISC_IS_DST_ADDR_ERR_MASK \ + (scn->targetdef->d_MISC_IS_DST_ADDR_ERR_MASK) +#define MISC_IS_SRC_LEN_ERR_MASK \ + (scn->targetdef->d_MISC_IS_SRC_LEN_ERR_MASK) +#define MISC_IS_DST_MAX_LEN_VIO_MASK \ + (scn->targetdef->d_MISC_IS_DST_MAX_LEN_VIO_MASK) +#define MISC_IS_DST_RING_OVERFLOW_MASK \ + (scn->targetdef->d_MISC_IS_DST_RING_OVERFLOW_MASK) +#define MISC_IS_SRC_RING_OVERFLOW_MASK \ + (scn->targetdef->d_MISC_IS_SRC_RING_OVERFLOW_MASK) +#define SRC_WATERMARK_LOW_LSB \ + (scn->targetdef->d_SRC_WATERMARK_LOW_LSB) +#define SRC_WATERMARK_HIGH_LSB \ + (scn->targetdef->d_SRC_WATERMARK_HIGH_LSB) +#define DST_WATERMARK_LOW_LSB \ + (scn->targetdef->d_DST_WATERMARK_LOW_LSB) +#define DST_WATERMARK_HIGH_LSB \ + (scn->targetdef->d_DST_WATERMARK_HIGH_LSB) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + (scn->targetdef->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + (scn->targetdef->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_LSB \ + (scn->targetdef->d_CE_CTRL1_DMAX_LENGTH_LSB) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK\ + (scn->targetdef->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK\ + (scn->targetdef->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \ + (scn->targetdef->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \ + (scn->targetdef->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) +#define WLAN_DEBUG_INPUT_SEL_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_OFFSET) +#define WLAN_DEBUG_INPUT_SEL_SRC_MSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_LSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_MASK \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_OFFSET) +#define WLAN_DEBUG_CONTROL_ENABLE_MSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MSB) +#define WLAN_DEBUG_CONTROL_ENABLE_LSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_MASK \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_OUT_OFFSET) +#define WLAN_DEBUG_OUT_DATA_MSB \ + (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MSB) +#define WLAN_DEBUG_OUT_DATA_LSB \ + (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_MASK \ + (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_OFFSET \ + (scn->targetdef->d_AMBA_DEBUG_BUS_OFFSET) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_MSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MSB) +#define AMBA_DEBUG_BUS_SEL_LSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_MASK \ + (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_OFFSET \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_OFFSET) +#define CE_WRAPPER_DEBUG_SEL_MSB \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_SEL_MSB) +#define CE_WRAPPER_DEBUG_SEL_LSB \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_MASK \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_OFFSET \ + (scn->targetdef->d_CE_DEBUG_OFFSET) +#define CE_DEBUG_SEL_MSB \ + (scn->targetdef->d_CE_DEBUG_SEL_MSB) +#define CE_DEBUG_SEL_LSB \ + (scn->targetdef->d_CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_MASK \ + (scn->targetdef->d_CE_DEBUG_SEL_MASK) +/* end */ +/* PLL start */ +#define EFUSE_OFFSET \ + (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB \ + (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB \ + (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK \ + (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET \ + (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB \ + (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB \ + (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK \ + (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB \ + (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB \ + (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK \ + (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB \ + (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB \ + (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK \ + (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET \ + (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK \ + (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK \ + (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET \ + (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET \ + (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET \ + (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & \ + SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) \ + (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & \ + SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & \ + SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & \ + SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) \ + (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) \ + (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) \ + (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & \ + CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +#define SRC_WATERMARK_LOW_SET(x) \ + (((x) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_SET(x) \ + (((x) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_SET(x) \ + (((x) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_SET(x) \ + (((x) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) (((x) & \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_SET(x) \ + (((x) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define WLAN_DEBUG_INPUT_SEL_SRC_GET(x) \ + (((x) & \ + WLAN_DEBUG_INPUT_SEL_SRC_MASK) >> \ + WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_SET(x) \ + (((x) << WLAN_DEBUG_INPUT_SEL_SRC_LSB) & \ + WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_ENABLE_GET(x) \ + (((x) & \ + WLAN_DEBUG_CONTROL_ENABLE_MASK) >> \ + WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_SET(x) \ + (((x) << WLAN_DEBUG_CONTROL_ENABLE_LSB) & \ + WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_DATA_GET(x) \ + (((x) & WLAN_DEBUG_OUT_DATA_MASK) >> WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_SET(x) \ + (((x) << WLAN_DEBUG_OUT_DATA_LSB) & WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_GET(x) \ + (((x) & \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) >> \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) & \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_GET(x) \ + (((x) & AMBA_DEBUG_BUS_SEL_MASK) >> AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_SEL_LSB) & AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_SEL_GET(x) \ + (((x) & CE_WRAPPER_DEBUG_SEL_MASK) >> CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_SET(x) \ + (((x) << CE_WRAPPER_DEBUG_SEL_LSB) & CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_SEL_GET(x) \ + (((x) & CE_DEBUG_SEL_MASK) >> CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_SET(x) \ + (((x) << CE_DEBUG_SEL_LSB) & CE_DEBUG_SEL_MASK) +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_NOPWD_MASK) >> \ + WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & \ + WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_BYPASS_MASK) >> \ + WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & \ + WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_CLK_SEL_MASK) >> \ + WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & \ + WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_REFDIV_MASK) >> \ + WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & \ + WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_DIV_MASK) >> \ + WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & \ + WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & \ + SOC_CORE_CLK_CTRL_DIV_MASK) >> \ + SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & \ + SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & \ + SOC_CPU_CLOCK_STANDARD_MASK) >> \ + SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & \ + SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +struct hostdef_s { + uint32_t d_INT_STATUS_ENABLE_ERROR_LSB; + uint32_t d_INT_STATUS_ENABLE_ERROR_MASK; + uint32_t d_INT_STATUS_ENABLE_CPU_LSB; + uint32_t d_INT_STATUS_ENABLE_CPU_MASK; + uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB; + uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_INT_STATUS_ENABLE_ADDRESS; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_HOST_INT_STATUS_ADDRESS; + uint32_t d_CPU_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK; + uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB; + uint32_t d_COUNT_DEC_ADDRESS; + uint32_t d_HOST_INT_STATUS_CPU_MASK; + uint32_t d_HOST_INT_STATUS_CPU_LSB; + uint32_t d_HOST_INT_STATUS_ERROR_MASK; + uint32_t d_HOST_INT_STATUS_ERROR_LSB; + uint32_t d_HOST_INT_STATUS_COUNTER_MASK; + uint32_t d_HOST_INT_STATUS_COUNTER_LSB; + uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS; + uint32_t d_WINDOW_DATA_ADDRESS; + uint32_t d_WINDOW_READ_ADDR_ADDRESS; + uint32_t d_WINDOW_WRITE_ADDR_ADDRESS; + uint32_t d_SOC_GLOBAL_RESET_ADDRESS; + uint32_t d_RTC_STATE_ADDRESS; + uint32_t d_RTC_STATE_COLD_RESET_MASK; + uint32_t d_PCIE_LOCAL_BASE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_RESET; + uint32_t d_PCIE_SOC_WAKE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_V_MASK; + uint32_t d_RTC_STATE_V_MASK; + uint32_t d_RTC_STATE_V_LSB; + uint32_t d_FW_IND_EVENT_PENDING; + uint32_t d_FW_IND_INITIALIZED; + uint32_t d_RTC_STATE_V_ON; +#if defined(SDIO_3_0) + uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK; + uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB; +#endif + uint32_t d_PCIE_SOC_RDY_STATUS_ADDRESS; + uint32_t d_PCIE_SOC_RDY_STATUS_BAR_MASK; + uint32_t d_SOC_PCIE_BASE_ADDRESS; + uint32_t d_MSI_MAGIC_ADR_ADDRESS; + uint32_t d_MSI_MAGIC_ADDRESS; +}; + +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB\ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS \ + (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS \ + (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS \ + (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS \ + (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS \ + (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS \ + (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS \ + (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS \ + (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS \ + (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS \ + (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK \ + (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define PCIE_LOCAL_BASE_ADDRESS \ + (scn->hostdef->d_PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_SOC_WAKE_RESET \ + (scn->hostdef->d_PCIE_SOC_WAKE_RESET) +#define PCIE_SOC_WAKE_ADDRESS \ + (scn->hostdef->d_PCIE_SOC_WAKE_ADDRESS) +#define PCIE_SOC_WAKE_V_MASK \ + (scn->hostdef->d_PCIE_SOC_WAKE_V_MASK) +#define RTC_STATE_V_MASK \ + (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB \ + (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING \ + (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED \ + (scn->hostdef->d_FW_IND_INITIALIZED) +#define RTC_STATE_V_ON \ + (scn->hostdef->d_RTC_STATE_V_ON) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(SOC_PCIE_BASE_ADDRESS) +#define SOC_PCIE_BASE_ADDRESS 0 +#endif + +#if !defined(PCIE_SOC_RDY_STATUS_ADDRESS) +#define PCIE_SOC_RDY_STATUS_ADDRESS 0 +#define PCIE_SOC_RDY_STATUS_BAR_MASK 0 +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & \ + INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & \ + INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x)\ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> \ + HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> \ + HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> \ + HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + + + +#define ROME_USB_RTC_SOC_BASE_ADDRESS 0x00000800 +#define ROME_USB_SOC_RESET_CONTROL_COLD_RST_LSB 0x0 +#define SOC_RESET_CONTROL_COLD_RST_LSB 8 +#define SOC_RESET_CONTROL_COLD_RST_MASK 0x00000100 +#define SOC_RESET_CONTROL_COLD_RST_SET(x) \ + (((x) << SOC_RESET_CONTROL_COLD_RST_LSB) & \ + SOC_RESET_CONTROL_COLD_RST_MASK) + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_UNKNOWN = -1, + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + u_int32_t refdiv; + u_int32_t div; + u_int32_t rnfrac; + u_int32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + u_int32_t refclk_hz; + u_int32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + u_int32_t start_addr; + u_int32_t end_addr; +}; + +struct tgt_reg_table { + const struct tgt_reg_section *section; + u_int32_t section_size; +}; + +void target_register_tbl_attach(struct hif_softc *scn, + uint32_t target_type); +void hif_register_tbl_attach(struct hif_softc *scn, + uint32_t target_type); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/usbdrv.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/usbdrv.c new file mode 100644 index 0000000000000000000000000000000000000000..c56aa3152825ec261910f4872cc79ce76705e46c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/usbdrv.c @@ -0,0 +1,1316 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define ATH_MODULE_NAME hif +#include "a_debug.h" +#include "hif_usb_internal.h" +#include "if_usb.h" +#include "cds_api.h" +#include "hif_debug.h" + +#define IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define IS_DIR_IN(addr) ((addr) & 0x80) + +#define IS_FW_CRASH_DUMP(x)(((x == FW_ASSERT_PATTERN) || \ + (x == FW_REG_PATTERN) || \ + ((x & FW_RAMDUMP_PATTERN_MASK) == \ + FW_RAMDUMP_PATTERN)) ? 1 : 0) + +static void usb_hif_post_recv_transfers(struct HIF_USB_PIPE *recv_pipe, + int buffer_length); +static void usb_hif_post_recv_bundle_transfers + (struct HIF_USB_PIPE *recv_pipe, + int buffer_length); +static void usb_hif_cleanup_recv_urb(struct HIF_URB_CONTEXT *urb_context); + + +/** + * usb_hif_free_urb_to_pipe() - add urb back to urb list of a pipe + * @pipe: pointer to struct HIF_USB_PIPE + * @urb_context: pointer to struct HIF_URB_CONTEXT + * + * Return: none + */ +static void usb_hif_free_urb_to_pipe(struct HIF_USB_PIPE *pipe, + struct HIF_URB_CONTEXT *urb_context) +{ + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + pipe->urb_cnt++; + DL_ListAdd(&pipe->urb_list_head, &urb_context->link); + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); +} + +/** + * usb_hif_alloc_urb_from_pipe() - remove urb back from urb list of a pipe + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: struct HIF_URB_CONTEXT urb context removed from the urb list + */ +struct HIF_URB_CONTEXT *usb_hif_alloc_urb_from_pipe(struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context = NULL; + DL_LIST *item; + + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + item = dl_list_remove_item_from_head(&pipe->urb_list_head); + if (item) { + urb_context = A_CONTAINING_STRUCT(item, struct HIF_URB_CONTEXT, + link); + pipe->urb_cnt--; + } + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); + + return urb_context; +} + +/** + * usb_hif_dequeue_pending_transfer() - remove urb from pending xfer list + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: struct HIF_URB_CONTEXT urb context removed from the pending xfer list + */ +static struct HIF_URB_CONTEXT *usb_hif_dequeue_pending_transfer + (struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context = NULL; + DL_LIST *item; + + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + item = dl_list_remove_item_from_head(&pipe->urb_pending_list); + if (item) + urb_context = A_CONTAINING_STRUCT(item, struct HIF_URB_CONTEXT, + link); + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); + + return urb_context; +} + +/** + * usb_hif_enqueue_pending_transfer() - add urb to pending xfer list + * @pipe: pointer to struct HIF_USB_PIPE + * @urb_context: pointer to struct HIF_URB_CONTEXT to be added to the xfer list + * + * Return: none + */ +void usb_hif_enqueue_pending_transfer(struct HIF_USB_PIPE *pipe, + struct HIF_URB_CONTEXT *urb_context) +{ + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + dl_list_insert_tail(&pipe->urb_pending_list, &urb_context->link); + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); +} + + +/** + * usb_hif_remove_pending_transfer() - remove urb from its own list + * @urb_context: pointer to struct HIF_URB_CONTEXT to be removed + * + * Return: none + */ +void +usb_hif_remove_pending_transfer(struct HIF_URB_CONTEXT *urb_context) +{ + qdf_spin_lock_irqsave(&urb_context->pipe->device->cs_lock); + dl_list_remove(&urb_context->link); + qdf_spin_unlock_irqrestore(&urb_context->pipe->device->cs_lock); +} + +/** + * usb_hif_alloc_pipe_resources() - allocate urb_cnt urbs to a HIF pipe + * @pipe: pointer to struct HIF_USB_PIPE to which resources will be allocated + * @urb_cnt: number of urbs to be added to the HIF pipe + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +static QDF_STATUS usb_hif_alloc_pipe_resources + (struct HIF_USB_PIPE *pipe, int urb_cnt) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i; + struct HIF_URB_CONTEXT *urb_context; + + DL_LIST_INIT(&pipe->urb_list_head); + DL_LIST_INIT(&pipe->urb_pending_list); + + for (i = 0; i < urb_cnt; i++) { + urb_context = qdf_mem_malloc(sizeof(*urb_context)); + if (!urb_context) { + status = QDF_STATUS_E_NOMEM; + break; + } + urb_context->pipe = pipe; + urb_context->urb = usb_alloc_urb(0, GFP_KERNEL); + + if (!urb_context->urb) { + status = QDF_STATUS_E_NOMEM; + qdf_mem_free(urb_context); + HIF_ERROR("urb_context->urb is null"); + break; + } + + /* note we are only allocate the urb contexts here, the actual + * URB is + * allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + + usb_hif_free_urb_to_pipe(pipe, urb_context); + } + + HIF_DBG("athusb: alloc resources lpipe:%d hpipe:0x%X urbs:%d", + pipe->logical_pipe_num, + pipe->usb_pipe_handle, + pipe->urb_alloc); + return status; +} + +/** + * usb_hif_free_pipe_resources() - free urb resources allocated to a HIF pipe + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: none + */ +static void usb_hif_free_pipe_resources(struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context; + + if (!pipe->device) { + /* nothing allocated for this pipe */ + HIF_ERROR("pipe->device is null"); + return; + } + + HIF_TRACE("athusb: free resources lpipe:%d hpipe:0x%X urbs:%d avail:%d", + pipe->logical_pipe_num, + pipe->usb_pipe_handle, pipe->urb_alloc, + pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + HIF_ERROR("athusb: urb leak! lpipe:%d hpipe:0x%X urbs:%d avail:%d", + pipe->logical_pipe_num, + pipe->usb_pipe_handle, pipe->urb_alloc, + pipe->urb_cnt); + } + + while (true) { + urb_context = usb_hif_alloc_urb_from_pipe(pipe); + if (!urb_context) + break; + + if (urb_context->buf) { + qdf_nbuf_free(urb_context->buf); + urb_context->buf = NULL; + } + + usb_free_urb(urb_context->urb); + urb_context->urb = NULL; + qdf_mem_free(urb_context); + } + +} + +#ifdef QCN7605_SUPPORT +/** + * usb_hif_get_logical_pipe_num() - get pipe number for a particular enpoint + * @device: pointer to HIF_DEVICE_USB structure + * @ep_address: endpoint address + * @urb_count: number of urb resources to be allocated to the pipe + * + * Return: uint8_t pipe number corresponding to ep_address + */ +static uint8_t usb_hif_get_logical_pipe_num(struct HIF_DEVICE_USB *device, + uint8_t ep_address, + int *urb_count) +{ + uint8_t pipe_num = HIF_USB_PIPE_INVALID; + + switch (ep_address) { + case USB_EP_ADDR_APP_CTRL_IN: + pipe_num = HIF_RX_CTRL_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_IN: + pipe_num = HIF_RX_DATA_PIPE; + *urb_count = RX_URB_COUNT; + break; + break; + case USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = HIF_TX_CTRL_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_OUT: + pipe_num = HIF_TX_DATA_LP_PIPE; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} +#else +/** + * usb_hif_get_logical_pipe_num() - get pipe number for a particular enpoint + * @device: pointer to HIF_DEVICE_USB structure + * @ep_address: endpoint address + * @urb_count: number of urb resources to be allocated to the pipe + * + * Return: uint8_t pipe number corresponding to ep_address + */ +static uint8_t usb_hif_get_logical_pipe_num + (struct HIF_DEVICE_USB *device, + uint8_t ep_address, + int *urb_count) +{ + uint8_t pipe_num = HIF_USB_PIPE_INVALID; + + switch (ep_address) { + case USB_EP_ADDR_APP_CTRL_IN: + pipe_num = HIF_RX_CTRL_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_IN: + pipe_num = HIF_RX_DATA_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_INT_IN: + pipe_num = HIF_RX_INT_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA2_IN: + pipe_num = HIF_RX_DATA2_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = HIF_TX_CTRL_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = HIF_TX_DATA_LP_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = HIF_TX_DATA_MP_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = HIF_TX_DATA_HP_PIPE; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} +#endif /* QCN7605_SUPPORT */ + +/** + * usb_hif_get_logical_pipe_num() - setup urb resources for all pipes + * @device: pointer to HIF_DEVICE_USB structure + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS usb_hif_setup_pipe_resources(struct HIF_DEVICE_USB *device) +{ + struct usb_interface *interface = device->interface; + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + int i; + int urbcount; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_USB_PIPE *pipe; + uint8_t pipe_num; + + /* walk decriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (IS_BULK_EP(endpoint->bmAttributes)) { + HIF_DBG("%s Bulk Ep:0x%2.2X maxpktsz:%d", + IS_DIR_IN(endpoint->bEndpointAddress) ? + "RX" : "TX", + endpoint->bEndpointAddress, + qdf_le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (IS_INT_EP(endpoint->bmAttributes)) { + HIF_DBG("%s Int Ep:0x%2.2X maxpktsz:%d interval:%d", + IS_DIR_IN(endpoint->bEndpointAddress) ? + "RX" : "TX", + endpoint->bEndpointAddress, + qdf_le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + HIF_DBG("%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d", + IS_DIR_IN(endpoint->bEndpointAddress) ? + "RX" : "TX", + endpoint->bEndpointAddress, + qdf_le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + urbcount = 0; + + pipe_num = usb_hif_get_logical_pipe_num(device, + endpoint->bEndpointAddress, + &urbcount); + if (HIF_USB_PIPE_INVALID == pipe_num) + continue; + + pipe = &device->pipes[pipe_num]; + if (pipe->device) { + /*pipe was already setup */ + continue; + } + + pipe->device = device; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = + qdf_le16_to_cpu(endpoint->wMaxPacketSize); + + if (IS_BULK_EP(endpoint->bmAttributes)) { + if (IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(device->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(device->udev, + pipe->ep_address); + } + } else if (IS_INT_EP(endpoint->bmAttributes)) { + if (IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(device->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(device->udev, + pipe->ep_address); + } + } else if (IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(device->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(device->udev, + pipe->ep_address); + } + } + pipe->ep_desc = endpoint; + + if (!IS_DIR_IN(pipe->ep_address)) + pipe->flags |= HIF_USB_PIPE_FLAG_TX; + + status = usb_hif_alloc_pipe_resources(pipe, urbcount); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + + } + + return status; +} + + +/** + * usb_hif_cleanup_pipe_resources() - free urb resources for all pipes + * @device: pointer to HIF_DEVICE_USB structure + * + * Return: none + */ +void usb_hif_cleanup_pipe_resources(struct HIF_DEVICE_USB *device) +{ + int i; + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) + usb_hif_free_pipe_resources(&device->pipes[i]); +} + +/** + * usb_hif_flush_pending_transfers() - kill pending urbs for a pipe + * @pipe: pointer to struct HIF_USB_PIPE structure + * + * Return: none + */ +static void usb_hif_flush_pending_transfers(struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context; + + HIF_TRACE("+%s pipe : %d", __func__, pipe->logical_pipe_num); + + while (1) { + urb_context = usb_hif_dequeue_pending_transfer(pipe); + if (!urb_context) { + HIF_WARN("urb_context is NULL"); + break; + } + HIF_TRACE(" pending urb ctxt: 0x%pK", urb_context); + if (urb_context->urb) { + HIF_TRACE(" killing urb: 0x%pK", urb_context->urb); + /* killing the URB will cause the completion routines to + * run + */ + usb_kill_urb(urb_context->urb); + } + } + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_flush_all() - flush pending transfers for all pipes for a usb bus + * @device: pointer to HIF_DEVICE_USB structure + * + * Return: none + */ +void usb_hif_flush_all(struct HIF_DEVICE_USB *device) +{ + int i; + struct HIF_USB_PIPE *pipe; + + HIF_TRACE("+%s", __func__); + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) { + if (device->pipes[i].device) { + usb_hif_flush_pending_transfers(&device->pipes[i]); + pipe = &device->pipes[i]; + + HIF_USB_FLUSH_WORK(pipe); + } + } + + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_cleanup_recv_urb() - cleanup recv urb + * @urb_context: pointer to struct HIF_URB_CONTEXT structure + * + * Return: none + */ +static void usb_hif_cleanup_recv_urb(struct HIF_URB_CONTEXT *urb_context) +{ + + if (urb_context->buf) { + qdf_nbuf_free(urb_context->buf); + urb_context->buf = NULL; + } + + usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +/** + * usb_hif_cleanup_transmit_urb() - cleanup transmit urb + * @urb_context: pointer to struct HIF_URB_CONTEXT structure + * + * Return: none + */ +void usb_hif_cleanup_transmit_urb(struct HIF_URB_CONTEXT *urb_context) +{ + usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +/** + * usb_hif_usb_recv_prestart_complete() - completion routine for prestart rx urb + * @urb: urb for which the completion routine is being called + * + * Return: none + */ +static void usb_hif_usb_recv_prestart_complete + (struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *) urb->context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t buf = NULL; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(pipe->device); + + HIF_DBG("+%s: recv pipe: %d, stat:%d,len:%d urb:0x%pK", + __func__, + pipe->logical_pipe_num, + urb->status, urb->actual_length, + urb); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + do { + if (urb->status != 0) { + status = A_ECOMM; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* NOTE: no need to spew these errors when + * device is removed + * or urb is killed due to driver shutdown + */ + status = A_ECANCELED; + break; + default: + HIF_ERROR("%s recv pipe: %d (ep:0x%2.2X), failed:%d", + __func__, + pipe->logical_pipe_num, + pipe->ep_address, + urb->status); + break; + } + break; + } + if (urb->actual_length == 0) + break; + buf = urb_context->buf; + /* we are going to pass it up */ + urb_context->buf = NULL; + qdf_nbuf_put_tail(buf, urb->actual_length); + + if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { + uint8_t *data; + uint32_t len; + + qdf_nbuf_peek_header(buf, &data, &len); + debug_dump_bytes(data, len, "hif recv data"); + } + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, buf); + + HIF_USB_SCHEDULE_WORK(pipe); + } while (false); + + usb_hif_cleanup_recv_urb(urb_context); + + /* Prestart URBs runs out and now start working receive pipe. */ + qdf_spin_lock_irqsave(&pipe->device->rx_prestart_lock); + if ((--pipe->urb_prestart_cnt == 0) && !sc->suspend_state) + usb_hif_start_recv_pipes(pipe->device); + qdf_spin_unlock_irqrestore(&pipe->device->rx_prestart_lock); + + HIF_DBG("-%s", __func__); +} + +/** + * usb_hif_usb_recv_complete() - completion routine for rx urb + * @urb: urb for which the completion routine is being called + * + * Return: none + */ +static void usb_hif_usb_recv_complete(struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *) urb->context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t buf = NULL; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(pipe->device); + + HIF_DBG("+%s: recv pipe: %d, stat:%d,len:%d urb:0x%pK", + __func__, + pipe->logical_pipe_num, + urb->status, urb->actual_length, + urb); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + + do { + + if (urb->status != 0) { + status = A_ECOMM; + switch (urb->status) { +#ifdef RX_SG_SUPPORT + case -EOVERFLOW: + urb->actual_length = HIF_USB_RX_BUFFER_SIZE; + status = QDF_STATUS_SUCCESS; + break; +#endif + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* NOTE: no need to spew these errors when + * device is removed + * or urb is killed due to driver shutdown + */ + status = A_ECANCELED; + break; + default: + HIF_ERROR("%s recv pipe: %d (ep:0x%2.2X), failed:%d", + __func__, + pipe->logical_pipe_num, + pipe->ep_address, + urb->status); + break; + } + break; + } + if (urb->actual_length == 0) + break; + buf = urb_context->buf; + /* we are going to pass it up */ + urb_context->buf = NULL; + qdf_nbuf_put_tail(buf, urb->actual_length); + if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { + uint8_t *data; + uint32_t len; + + qdf_nbuf_peek_header(buf, &data, &len); + debug_dump_bytes(data, len, "hif recv data"); + } + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, buf); + + if (pipe->device->htc_callbacks.update_bundle_stats) + pipe->device->htc_callbacks.update_bundle_stats + (pipe->device->htc_callbacks.Context, 1); + + HIF_USB_SCHEDULE_WORK(pipe); + } while (false); + + usb_hif_cleanup_recv_urb(urb_context); + + /* Only re-submit URB when STATUS is success and HIF is not at the + * suspend state. + */ + if (QDF_IS_STATUS_SUCCESS(status) && !sc->suspend_state) { + if (pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + usb_hif_post_recv_transfers(pipe, + HIF_USB_RX_BUFFER_SIZE); + } + } else { + HIF_ERROR("%s: pipe: %d, fail to post URB: status(%d) suspend (%d)", + __func__, + pipe->logical_pipe_num, + urb->status, + sc->suspend_state); + } + + HIF_DBG("-%s", __func__); +} + +/** + * usb_hif_usb_recv_bundle_complete() - completion routine for rx bundling urb + * @urb: urb for which the completion routine is being called + * + * Return: none + */ +static void usb_hif_usb_recv_bundle_complete(struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *) urb->context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t buf = NULL; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + uint8_t *netdata, *netdata_new; + uint32_t netlen, netlen_new; + HTC_FRAME_HDR *HtcHdr; + uint16_t payloadLen; + qdf_nbuf_t new_skb = NULL; + uint8_t no_of_pkt_in_bundle; + + HIF_DBG("+%s: recv pipe: %d, stat:%d,len:%d urb:0x%pK", + __func__, + pipe->logical_pipe_num, + urb->status, urb->actual_length, + urb); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + + do { + + if (urb->status != 0) { + status = A_ECOMM; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* NOTE: no need to spew these errors when + * device is removed + * or urb is killed due to driver shutdown + */ + status = A_ECANCELED; + break; + default: + HIF_ERROR("%s recv pipe: %d (ep:0x%2.2X), failed:%d", + __func__, + pipe->logical_pipe_num, + pipe->ep_address, + urb->status); + break; + } + break; + } + if (urb->actual_length == 0) + break; + buf = urb_context->buf; + if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { + uint8_t *data; + uint32_t len; + + qdf_nbuf_peek_header(buf, &data, &len); + debug_dump_bytes(data, len, "hif recv data"); + } + + qdf_nbuf_peek_header(buf, &netdata, &netlen); + netlen = urb->actual_length; + no_of_pkt_in_bundle = 0; + + do { + uint16_t frame_len; + + if (IS_FW_CRASH_DUMP(*(uint32_t *) netdata)) + frame_len = netlen; + else { + /* Hack into HTC header for bundle processing */ + HtcHdr = (HTC_FRAME_HDR *) netdata; + if (HtcHdr->EndpointID >= ENDPOINT_MAX) { + HIF_ERROR("athusb: Rx: invalid EndpointID=%d", + HtcHdr->EndpointID); + break; + } + + payloadLen = HtcHdr->PayloadLen; + payloadLen = qdf_le16_to_cpu(payloadLen); + + if (payloadLen > HIF_USB_RX_BUFFER_SIZE) { + HIF_ERROR("athusb: payloadLen too long %u", + payloadLen); + break; + } + frame_len = (HTC_HDR_LENGTH + payloadLen); + } + + if (netlen < frame_len) { + HIF_ERROR("athusb: subframe length %d not fitted into bundle packet length %d" + , netlen, frame_len); + break; + } + + /* allocate a new skb and copy */ + new_skb = + qdf_nbuf_alloc(NULL, frame_len, 0, 4, false); + if (!new_skb) { + HIF_ERROR("athusb: allocate skb (len=%u) failed" + , frame_len); + break; + } + + qdf_nbuf_peek_header(new_skb, &netdata_new, + &netlen_new); + qdf_mem_copy(netdata_new, netdata, frame_len); + qdf_nbuf_put_tail(new_skb, frame_len); + skb_queue_tail(&pipe->io_comp_queue, new_skb); + new_skb = NULL; + netdata += frame_len; + netlen -= frame_len; + no_of_pkt_in_bundle++; + } while (netlen); + + if (pipe->device->htc_callbacks.update_bundle_stats) + pipe->device->htc_callbacks.update_bundle_stats + (pipe->device->htc_callbacks.Context, + no_of_pkt_in_bundle); + + HIF_USB_SCHEDULE_WORK(pipe); + } while (false); + + if (!urb_context->buf) + HIF_ERROR("athusb: buffer in urb_context is NULL"); + + /* reset urb_context->buf ==> seems not necessary */ + usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); + + if (QDF_IS_STATUS_SUCCESS(status)) { + if (pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + usb_hif_post_recv_bundle_transfers(pipe, + pipe->device->rx_bundle_buf_len); + } + } + + HIF_DBG("-%s", __func__); +} + +/** + * usb_hif_post_recv_prestart_transfers() - post prestart recv urbs for a pipe + * @recv_pipe: rx data pipe + * @prestart_urb: number of prestart recv urbs to be posted + * + * Return: none + */ +static void usb_hif_post_recv_prestart_transfers(struct HIF_USB_PIPE *recv_pipe, + int prestart_urb) +{ + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int i, usb_status, buffer_length = HIF_USB_RX_BUFFER_SIZE; + + HIF_TRACE("+%s", __func__); + + qdf_spin_lock_irqsave(&recv_pipe->device->rx_prestart_lock); + for (i = 0; i < prestart_urb; i++) { + urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); + if (!urb_context) + break; + + urb_context->buf = + qdf_nbuf_alloc(NULL, buffer_length, 0, 4, false); + if (!urb_context->buf) { + usb_hif_cleanup_recv_urb(urb_context); + break; + } + + qdf_nbuf_peek_header(urb_context->buf, &data, &len); + + urb = urb_context->urb; + + usb_fill_bulk_urb(urb, + recv_pipe->device->udev, + recv_pipe->usb_pipe_handle, + data, + buffer_length, + usb_hif_usb_recv_prestart_complete, + urb_context); + + HIF_DBG("athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%pK", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, + recv_pipe->ep_address, buffer_length, + urb_context->buf); + + usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + HIF_ERROR("athusb : usb bulk recv failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_cleanup_recv_urb(urb_context); + break; + } + recv_pipe->urb_prestart_cnt++; + } + qdf_spin_unlock_irqrestore(&recv_pipe->device->rx_prestart_lock); + + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_post_recv_transfers() - post recv urbs for a given pipe + * @recv_pipe: recv pipe for which urbs need to be posted + * @buffer_length: buffer length of the recv urbs + * + * Return: none + */ +static void usb_hif_post_recv_transfers(struct HIF_USB_PIPE *recv_pipe, + int buffer_length) +{ + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int usb_status; + + while (1) { + + urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); + if (!urb_context) + break; + + urb_context->buf = qdf_nbuf_alloc(NULL, buffer_length, 0, + 4, false); + if (!urb_context->buf) { + usb_hif_cleanup_recv_urb(urb_context); + break; + } + + qdf_nbuf_peek_header(urb_context->buf, &data, &len); + + urb = urb_context->urb; + + usb_fill_bulk_urb(urb, + recv_pipe->device->udev, + recv_pipe->usb_pipe_handle, + data, + buffer_length, + usb_hif_usb_recv_complete, urb_context); + + HIF_DBG("athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%pK", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, + recv_pipe->ep_address, buffer_length, + urb_context->buf); + + usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); + + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + HIF_ERROR("athusb : usb bulk recv failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_cleanup_recv_urb(urb_context); + break; + } + } + +} + +/** + * usb_hif_post_recv_bundle_transfers() - post recv urbs for a given pipe + * @recv_pipe: recv pipe for which urbs need to be posted + * @buffer_length: maximum length of rx bundle + * + * Return: none + */ +static void usb_hif_post_recv_bundle_transfers(struct HIF_USB_PIPE *recv_pipe, + int buffer_length) +{ + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int usb_status; + + while (1) { + + urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); + if (!urb_context) + break; + + if (!urb_context->buf) { + urb_context->buf = + qdf_nbuf_alloc(NULL, buffer_length, 0, 4, false); + if (!urb_context->buf) { + usb_hif_cleanup_recv_urb(urb_context); + break; + } + } + + qdf_nbuf_peek_header(urb_context->buf, &data, &len); + + urb = urb_context->urb; + usb_fill_bulk_urb(urb, + recv_pipe->device->udev, + recv_pipe->usb_pipe_handle, + data, + buffer_length, + usb_hif_usb_recv_bundle_complete, + urb_context); + + HIF_DBG("athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%pK", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, + recv_pipe->ep_address, buffer_length, + urb_context->buf); + + usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); + + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + HIF_ERROR("athusb : usb bulk recv failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_free_urb_to_pipe(urb_context->pipe, + urb_context); + break; + } + + } + +} + +/** + * usb_hif_prestart_recv_pipes() - post prestart recv urbs + * @device: HIF device for which prestart recv urbs need to be posted + * + * Return: none + */ +void usb_hif_prestart_recv_pipes(struct HIF_DEVICE_USB *device) +{ + struct HIF_USB_PIPE *pipe; + int prestart_cnt = 8; + + if (device->rx_ctrl_pipe_supported) { + pipe = &device->pipes[HIF_RX_CTRL_PIPE]; + prestart_cnt = 4; + usb_hif_post_recv_prestart_transfers(pipe, prestart_cnt); + } + /* + * USB driver learn to support bundle or not until the firmware + * download and ready. Only allocate some URBs for control message + * communication during the initial phase then start the final + * working pipe after all information understood. + */ + pipe = &device->pipes[HIF_RX_DATA_PIPE]; + usb_hif_post_recv_prestart_transfers(pipe, prestart_cnt); +} + +/** + * usb_hif_start_recv_pipes() - start recv urbs + * @device: HIF device for which recv urbs need to be posted + * + * This function is called after all prestart recv urbs are exhausted + * + * Return: none + */ +void usb_hif_start_recv_pipes(struct HIF_DEVICE_USB *device) +{ + struct HIF_USB_PIPE *pipe; + uint32_t buf_len; + + HIF_ENTER(); + pipe = &device->pipes[HIF_RX_DATA_PIPE]; + pipe->urb_cnt_thresh = pipe->urb_alloc / 2; + + HIF_TRACE("Post URBs to RX_DATA_PIPE: %d is_bundle %d", + device->pipes[HIF_RX_DATA_PIPE].urb_cnt, + device->is_bundle_enabled); + if (device->is_bundle_enabled) { + usb_hif_post_recv_bundle_transfers(pipe, + pipe->device->rx_bundle_buf_len); + } else { + buf_len = HIF_USB_RX_BUFFER_SIZE; + usb_hif_post_recv_transfers(pipe, buf_len); + } + + HIF_DBG("athusb bulk recv len %d", buf_len); + + if (!hif_usb_disable_rxdata2) { + HIF_TRACE("Post URBs to RX_DATA2_PIPE: %d", + device->pipes[HIF_RX_DATA2_PIPE].urb_cnt); + + pipe = &device->pipes[HIF_RX_DATA2_PIPE]; + pipe->urb_cnt_thresh = pipe->urb_alloc / 2; + usb_hif_post_recv_transfers(pipe, HIF_USB_RX_BUFFER_SIZE); + } + + if (device->rx_ctrl_pipe_supported) { + HIF_TRACE("Post URBs to RX_CONTROL_PIPE: %d", + device->pipes[HIF_RX_CTRL_PIPE].urb_cnt); + + pipe = &device->pipes[HIF_RX_CTRL_PIPE]; + pipe->urb_cnt_thresh = pipe->urb_alloc / 2; + usb_hif_post_recv_transfers(pipe, HIF_USB_RX_BUFFER_SIZE); + } + HIF_EXIT(); +} + +/** + * usb_hif_submit_ctrl_out() - send out a ctrl urb + * @device: HIF device for which urb needs to be posted + * @req: request value for the ctrl message + * @value: USB message value + * @index: USB message index value + * @data: pointer to data containing ctrl message to send + * @size: size of the control message to send + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS usb_hif_submit_ctrl_out(struct HIF_DEVICE_USB *device, + uint8_t req, uint16_t value, uint16_t index, + void *data, uint32_t size) +{ + int32_t result = 0; + QDF_STATUS ret = QDF_STATUS_SUCCESS; + uint8_t *buf = NULL; + + do { + + if (size > 0) { + buf = qdf_mem_malloc(size); + if (!buf) { + ret = QDF_STATUS_E_NOMEM; + break; + } + qdf_mem_copy(buf, (uint8_t *) data, size); + } + + HIF_DBG("ctrl-out req:0x%2.2X, value:0x%4.4X index:0x%4.4X, datasize:%d", + req, value, index, size); + + result = usb_control_msg(device->udev, + usb_sndctrlpipe(device->udev, 0), + req, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2 * HZ); + + if (result < 0) { + HIF_ERROR("%s failed,result = %d", __func__, result); + ret = QDF_STATUS_E_FAILURE; + } + + } while (false); + + if (buf) + qdf_mem_free(buf); + + return ret; +} + +/** + * usb_hif_submit_ctrl_in() - recv a resonse to the ctrl message sent out + * @device: HIF device for which urb needs to be received + * @req: request value for the ctrl message + * @value: USB message value + * @index: USB message index value + * @data: pointer to data containing ctrl message to be received + * @size: size of the control message to be received + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS usb_hif_submit_ctrl_in(struct HIF_DEVICE_USB *device, + uint8_t req, uint16_t value, uint16_t index, + void *data, uint32_t size) +{ + int32_t result = 0; + QDF_STATUS ret = QDF_STATUS_SUCCESS; + uint8_t *buf = NULL; + + do { + + if (size > 0) { + buf = qdf_mem_malloc(size); + if (!buf) { + ret = QDF_STATUS_E_NOMEM; + break; + } + } + + HIF_DBG("ctrl-in req:0x%2.2X, value:0x%4.4X index:0x%4.4X, datasize:%d", + req, value, index, size); + + result = usb_control_msg(device->udev, + usb_rcvctrlpipe(device->udev, 0), + req, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2 * HZ); + + if (result < 0) { + HIF_ERROR("%s failed, result = %d", __func__, result); + ret = QDF_STATUS_E_FAILURE; + break; + } + + qdf_mem_copy((uint8_t *) data, buf, size); + + } while (false); + + if (buf) + qdf_mem_free(buf); + + return ret; +} + +/** + * usb_hif_io_complete() - transmit call back for tx urb + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: none + */ +static void usb_hif_io_complete(struct HIF_USB_PIPE *pipe) +{ + qdf_nbuf_t buf; + struct HIF_DEVICE_USB *device; + HTC_FRAME_HDR *HtcHdr; + uint8_t *data; + uint32_t len; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(pipe->device); + + device = pipe->device; + HIF_ENTER(); + while ((buf = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & HIF_USB_PIPE_FLAG_TX) { + HIF_DBG("+athusb xmit callback buf:0x%pK", buf); + HtcHdr = (HTC_FRAME_HDR *) + qdf_nbuf_get_frag_vaddr(buf, 0); + +#ifdef ATH_11AC_TXCOMPACT +/* ATH_11AC_TXCOMPACT does not support High Latency mode */ +#else + device->htc_callbacks.txCompletionHandler(device-> + htc_callbacks. + Context, buf, + HtcHdr-> + EndpointID, 0); +#endif + HIF_DBG("-athusb xmit callback"); + } else { + HIF_DBG("+athusb recv callback buf: 0x%pK", buf); + qdf_nbuf_peek_header(buf, &data, &len); + + if (IS_FW_CRASH_DUMP(*((uint32_t *) data))) { + sc->fw_data = data; + sc->fw_data_len = len; + device->htc_callbacks.fwEventHandler( + device->htc_callbacks.Context, + QDF_STATUS_E_USB_ERROR); + qdf_nbuf_free(buf); + } else { + device->htc_callbacks.rxCompletionHandler( + device->htc_callbacks.Context, buf, + pipe->logical_pipe_num); + } + HIF_DBG("-athusb recv callback"); + } + } + + HIF_EXIT(); +} + +#ifdef HIF_USB_TASKLET +/** + * usb_hif_io_comp_tasklet() - per pipe tasklet routine + * @context: pointer to HIF USB pipe + * + * Return: none + */ +void usb_hif_io_comp_tasklet(unsigned long context) +{ + struct HIF_USB_PIPE *pipe = (struct HIF_USB_PIPE *) context; + + usb_hif_io_complete(pipe); +} + +#else +/** + * usb_hif_io_comp_work() - per pipe work queue + * @work: pointer to struct work_struct + * + * Return: none + */ +void usb_hif_io_comp_work(struct work_struct *work) +{ + struct HIF_USB_PIPE *pipe = container_of(work, struct HIF_USB_PIPE, + io_complete_work); + + usb_hif_io_complete(pipe); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/htc/dl_list.h b/drivers/staging/qca-wifi-host-cmn/htc/dl_list.h new file mode 100644 index 0000000000000000000000000000000000000000..468ea0362e0e6d0dbce87050fe5e3bbc84d9ebfe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/dl_list.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2013-2014, 2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*=========================================================================== */ +/* Double-link list definitions (adapted from Atheros SDIO stack) */ +/* */ +/* Author(s): ="Atheros" */ +/*=========================================================================== */ + +#ifndef __DL_LIST_H___ +#define __DL_LIST_H___ + +#define A_CONTAINING_STRUCT(address, struct_type, field_name) \ + ((struct_type *)((char *)(address) - \ + (char *)(&((struct_type *)0)->field_name))) + +/* list functions */ +/* pointers for the list */ +typedef struct _DL_LIST { + struct _DL_LIST *pPrev; + struct _DL_LIST *pNext; +} DL_LIST, *PDL_LIST; +/* + * DL_LIST_INIT , initialize doubly linked list + */ +#define DL_LIST_INIT(pList) \ + {(pList)->pPrev = pList; (pList)->pNext = pList; } + +/* faster macro to init list and add a single item */ +#define DL_LIST_INIT_AND_ADD(pList, pItem) \ + { (pList)->pPrev = (pItem); \ + (pList)->pNext = (pItem); \ + (pItem)->pNext = (pList); \ + (pItem)->pPrev = (pList); \ + } + +#define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && \ + ((pList)->pNext == (pList))) +#define DL_LIST_GET_ITEM_AT_HEAD(pList) (pList)->pNext +#define DL_LIST_GET_ITEM_AT_TAIL(pList) (pList)->pPrev +/* + * ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member + * NOT: do not use this function if the items in the list are deleted inside the + * iteration loop + */ +#define ITERATE_OVER_LIST(pStart, pTemp) \ + for ((pTemp) = (pStart)->pNext; pTemp != (pStart); \ + (pTemp) = (pTemp)->pNext) + +static inline bool dl_list_is_entry_in_list(const DL_LIST *pList, + const DL_LIST *pEntry) +{ + const DL_LIST *pTmp; + + if (pList == pEntry) + return true; + + ITERATE_OVER_LIST(pList, pTmp) { + if (pTmp == pEntry) + return true; + } + + return false; +} + +/* safe iterate macro that allows the item to be removed from the list + * the iteration continues to the next item in the list + */ +#define ITERATE_OVER_LIST_ALLOW_REMOVE(pStart, pItem, st, offset) \ + { \ + PDL_LIST pTemp; \ + { pTemp = (pStart)->pNext; } \ + while (pTemp != (pStart)) { \ + { (pItem) = A_CONTAINING_STRUCT(pTemp, st, offset); } \ + { pTemp = pTemp->pNext; } \ + +#define ITERATE_IS_VALID(pStart) dl_list_is_entry_in_list(pStart, pTemp) +#define ITERATE_RESET(pStart) { pTemp = (pStart)->pNext; } + +#define ITERATE_END }} + +/* + * dl_list_insert_tail - insert pAdd to the end of the list + */ +static inline PDL_LIST dl_list_insert_tail(PDL_LIST pList, PDL_LIST pAdd) +{ + /* insert at tail */ + pAdd->pPrev = pList->pPrev; + pAdd->pNext = pList; + if (pList->pPrev) + pList->pPrev->pNext = pAdd; + pList->pPrev = pAdd; + return pAdd; +} + +/* + * dl_list_insert_head - insert pAdd into the head of the list + */ +static inline PDL_LIST dl_list_insert_head(PDL_LIST pList, PDL_LIST pAdd) +{ + /* insert at head */ + pAdd->pPrev = pList; + pAdd->pNext = pList->pNext; + pList->pNext->pPrev = pAdd; + pList->pNext = pAdd; + return pAdd; +} + +#define DL_ListAdd(pList, pItem) dl_list_insert_head((pList), (pItem)) +/* + * dl_list_remove - remove pDel from list + */ +static inline PDL_LIST dl_list_remove(PDL_LIST pDel) +{ + if (pDel->pNext) + pDel->pNext->pPrev = pDel->pPrev; + if (pDel->pPrev) + pDel->pPrev->pNext = pDel->pNext; + /* point back to itself just to be safe, if remove is called again */ + pDel->pNext = pDel; + pDel->pPrev = pDel; + return pDel; +} + +/* + * dl_list_remove_item_from_head - get a list item from the head + */ +static inline PDL_LIST dl_list_remove_item_from_head(PDL_LIST pList) +{ + PDL_LIST pItem = NULL; + + if (pList->pNext != pList) { + pItem = pList->pNext; + /* remove the first item from head */ + dl_list_remove(pItem); + } + return pItem; +} + +static inline PDL_LIST dl_list_remove_item_from_tail(PDL_LIST pList) +{ + PDL_LIST pItem = NULL; + + if (pList->pPrev != pList) { + pItem = pList->pPrev; + /* remove the item from tail */ + dl_list_remove(pItem); + } + return pItem; +} + +/* transfer src list items to the tail of the destination list */ +static inline void dl_list_transfer_items_to_tail(PDL_LIST pDest, PDL_LIST pSrc) +{ + /* only concatenate if src is not empty */ + if (!DL_LIST_IS_EMPTY(pSrc)) { + /* cut out circular list in src and re-attach to end of dest */ + pSrc->pPrev->pNext = pDest; + pSrc->pNext->pPrev = pDest->pPrev; + pDest->pPrev->pNext = pSrc->pNext; + pDest->pPrev = pSrc->pPrev; + /* terminate src list, it is now empty */ + pSrc->pPrev = pSrc; + pSrc->pNext = pSrc; + } +} + +/* transfer src list items to the head of the destination list */ +static inline void dl_list_transfer_items_to_head(PDL_LIST pDest, PDL_LIST pSrc) +{ + /* only concatenate if src is not empty */ + if (!DL_LIST_IS_EMPTY(pSrc)) { + /* cut out circular list in src and reattach to start of dest */ + pSrc->pNext->pPrev = pDest; + pDest->pNext->pPrev = pSrc->pPrev; + pSrc->pPrev->pNext = pDest->pNext; + pDest->pNext = pSrc->pNext; + /* terminate src list, it is now empty */ + pSrc->pPrev = pSrc; + pSrc->pNext = pSrc; + } +} + +#endif /* __DL_LIST_H___ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc.c b/drivers/staging/qca-wifi-host-cmn/htc/htc.c new file mode 100644 index 0000000000000000000000000000000000000000..f2d420cdc72e321b6014d47b5a8c3f6274b3a72a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc.c @@ -0,0 +1,1212 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include "htc_hang_event.h" +#include +#include /* qdf_nbuf_t */ +#include /* qdf_print */ + +#define MAX_HTC_RX_BUNDLE 2 + +#if defined(WLAN_DEBUG) || defined(DEBUG) +static ATH_DEBUG_MASK_DESCRIPTION g_htc_debug_description[] = { + {ATH_DEBUG_SEND, "Send"}, + {ATH_DEBUG_RECV, "Recv"}, + {ATH_DEBUG_SYNC, "Sync"}, + {ATH_DEBUG_DUMP, "Dump Data (RX or TX)"}, + {ATH_DEBUG_SETUP, "Setup"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(htc, + "htc", + "Host Target Communications", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO | + ATH_DEBUG_SETUP, + ATH_DEBUG_DESCRIPTION_COUNT + (g_htc_debug_description), + g_htc_debug_description); + +#endif + +#if (defined(WMI_MULTI_MAC_SVC) || defined(QCA_WIFI_QCA8074) || \ + defined(QCA_WIFI_QCA6018)) +static const uint32_t svc_id[] = {WMI_CONTROL_SVC, WMI_CONTROL_SVC_WMAC1, + WMI_CONTROL_SVC_WMAC2}; +#else +static const uint32_t svc_id[] = {WMI_CONTROL_SVC}; +#endif + +extern unsigned int htc_credit_flow; + +static void reset_endpoint_states(HTC_TARGET *target); + +static void destroy_htc_tx_ctrl_packet(HTC_PACKET *pPacket) +{ + qdf_nbuf_t netbuf; + + netbuf = (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + if (netbuf) + qdf_nbuf_free(netbuf); + qdf_mem_free(pPacket); +} + +static HTC_PACKET *build_htc_tx_ctrl_packet(qdf_device_t osdev) +{ + HTC_PACKET *pPacket = NULL; + qdf_nbuf_t netbuf; + + do { + pPacket = (HTC_PACKET *) qdf_mem_malloc(sizeof(HTC_PACKET)); + if (!pPacket) + break; + netbuf = qdf_nbuf_alloc(osdev, HTC_CONTROL_BUFFER_SIZE, + 20, 4, true); + if (!netbuf) { + qdf_mem_free(pPacket); + pPacket = NULL; + break; + } + SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf); + } while (false); + + return pPacket; +} + +void htc_free_control_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + +#ifdef TODO_FIXME + LOCK_HTC(target); + HTC_PACKET_ENQUEUE(&target->ControlBufferTXFreeList, pPacket); + UNLOCK_HTC(target); + /* TODO_FIXME netbufs cannot be RESET! */ +#else + destroy_htc_tx_ctrl_packet(pPacket); +#endif + +} + +HTC_PACKET *htc_alloc_control_tx_packet(HTC_TARGET *target) +{ +#ifdef TODO_FIXME + HTC_PACKET *pPacket; + + LOCK_HTC(target); + pPacket = htc_packet_dequeue(&target->ControlBufferTXFreeList); + UNLOCK_HTC(target); + + return pPacket; +#else + return build_htc_tx_ctrl_packet(target->osdev); +#endif +} + +/* Set the target failure handling callback */ +void htc_set_target_failure_callback(HTC_HANDLE HTCHandle, + HTC_TARGET_FAILURE Callback) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + target->HTCInitInfo.TargetFailure = Callback; +} + +void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + hif_dump(target->hif_dev, CmdId, start); +} + +void htc_ce_tasklet_debug_dump(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return; + + hif_display_stats(target->hif_dev); +} + +/* cleanup the HTC instance */ +static void htc_cleanup(HTC_TARGET *target) +{ + HTC_PACKET *pPacket; + int i; + HTC_ENDPOINT *endpoint; + HTC_PACKET_QUEUE *pkt_queue; + qdf_nbuf_t netbuf; + + while (htc_dec_return_runtime_cnt((void *)target) >= 0) + hif_pm_runtime_put(target->hif_dev, RTPM_ID_HTC); + + if (target->hif_dev) { + hif_detach_htc(target->hif_dev); + hif_mask_interrupt_call(target->hif_dev); + target->hif_dev = NULL; + } + + while (true) { + pPacket = allocate_htc_packet_container(target); + if (!pPacket) + break; + qdf_mem_free(pPacket); + } + + LOCK_HTC_TX(target); + pPacket = target->pBundleFreeList; + target->pBundleFreeList = NULL; + UNLOCK_HTC_TX(target); + while (pPacket) { + HTC_PACKET *pPacketTmp = (HTC_PACKET *) pPacket->ListLink.pNext; + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + if (netbuf) + qdf_nbuf_free(netbuf); + pkt_queue = pPacket->pContext; + if (pkt_queue) + qdf_mem_free(pkt_queue); + qdf_mem_free(pPacket); + pPacket = pPacketTmp; + } + +#ifdef TODO_FIXME + while (true) { + pPacket = htc_alloc_control_tx_packet(target); + if (!pPacket) + break; + netbuf = (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + if (netbuf) + qdf_nbuf_free(netbuf); + qdf_mem_free(pPacket); + } +#endif + + htc_flush_endpoint_txlookupQ(target, ENDPOINT_0, true); + + qdf_spinlock_destroy(&target->HTCLock); + qdf_spinlock_destroy(&target->HTCRxLock); + qdf_spinlock_destroy(&target->HTCTxLock); + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + qdf_spinlock_destroy(&endpoint->lookup_queue_lock); + } + + /* free our instance */ + qdf_mem_free(target); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * htc_runtime_pm_init(): runtime pm related intialization + * + * need to initialize a work item. + */ +static void htc_runtime_pm_init(HTC_TARGET *target) +{ + qdf_create_work(0, &target->queue_kicker, htc_kick_queues, target); +} + +/** + * htc_runtime_suspend() - runtime suspend HTC + * + * @htc_ctx: HTC context pointer + * + * This is a dummy function for symmetry. + * + * Return: 0 for success + */ +int htc_runtime_suspend(HTC_HANDLE htc_ctx) +{ + return 0; +} + +/** + * htc_runtime_resume(): resume htc + * + * The htc message queue needs to be kicked off after + * a runtime resume. Otherwise messages would get stuck. + * + * @htc_ctx: HTC context pointer + * + * Return: 0 for success; + */ +int htc_runtime_resume(HTC_HANDLE htc_ctx) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_ctx); + + if (!target) + return 0; + + qdf_sched_work(0, &target->queue_kicker); + return 0; +} + +/** + * htc_runtime_pm_deinit(): runtime pm related de-intialization + * + * need to de-initialize the work item. + * + * @target: HTC target pointer + * + */ +static void htc_runtime_pm_deinit(HTC_TARGET *target) +{ + if (!target) + return; + + qdf_destroy_work(0, &target->queue_kicker); +} + +int32_t htc_dec_return_runtime_cnt(HTC_HANDLE htc) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc); + + return qdf_atomic_dec_return(&target->htc_runtime_cnt); +} + +/** + * htc_init_runtime_cnt: Initialize htc runtime count + * @htc: HTC handle + * + * Return: None + */ +static inline +void htc_init_runtime_cnt(HTC_TARGET *target) +{ + qdf_atomic_init(&target->htc_runtime_cnt); +} +#else +static inline void htc_runtime_pm_init(HTC_TARGET *target) { } +static inline void htc_runtime_pm_deinit(HTC_TARGET *target) { } + +static inline +void htc_init_runtime_cnt(HTC_TARGET *target) +{ +} +#endif + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) +static +void htc_update_rx_bundle_stats(void *ctx, uint8_t no_of_pkt_in_bundle) +{ + HTC_TARGET *target = (HTC_TARGET *)ctx; + + no_of_pkt_in_bundle--; + if (target && (no_of_pkt_in_bundle < HTC_MAX_MSG_PER_BUNDLE_RX)) + target->rx_bundle_stats[no_of_pkt_in_bundle]++; +} +#else +static +void htc_update_rx_bundle_stats(void *ctx, uint8_t no_of_pkt_in_bundle) +{ +} +#endif + +/* registered target arrival callback from the HIF layer */ +HTC_HANDLE htc_create(void *ol_sc, struct htc_init_info *pInfo, + qdf_device_t osdev, uint32_t con_mode) +{ + struct hif_msg_callbacks htcCallbacks; + HTC_ENDPOINT *pEndpoint = NULL; + HTC_TARGET *target = NULL; + int i; + + if (!ol_sc) { + HTC_ERROR("%s: ol_sc = NULL", __func__); + return NULL; + } + HTC_TRACE("+htc_create .. HIF :%pK", ol_sc); + + A_REGISTER_MODULE_DEBUG_INFO(htc); + + target = (HTC_TARGET *) qdf_mem_malloc(sizeof(HTC_TARGET)); + if (!target) + return NULL; + + htc_runtime_pm_init(target); + htc_credit_history_init(); + qdf_spinlock_create(&target->HTCLock); + qdf_spinlock_create(&target->HTCRxLock); + qdf_spinlock_create(&target->HTCTxLock); + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + qdf_spinlock_create(&pEndpoint->lookup_queue_lock); + } + target->is_nodrop_pkt = false; + target->htc_hdr_length_check = false; + target->wmi_ep_count = 1; + + do { + qdf_mem_copy(&target->HTCInitInfo, pInfo, + sizeof(struct htc_init_info)); + target->host_handle = pInfo->pContext; + target->osdev = osdev; + target->con_mode = con_mode; + + reset_endpoint_states(target); + + INIT_HTC_PACKET_QUEUE(&target->ControlBufferTXFreeList); + + for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) { + HTC_PACKET *pPacket = (HTC_PACKET *) + qdf_mem_malloc(sizeof(HTC_PACKET)); + if (pPacket) + free_htc_packet_container(target, pPacket); + } + +#ifdef TODO_FIXME + for (i = 0; i < NUM_CONTROL_TX_BUFFERS; i++) { + pPacket = build_htc_tx_ctrl_packet(); + if (!pPacket) + break; + htc_free_control_tx_packet(target, pPacket); + } +#endif + + /* setup HIF layer callbacks */ + qdf_mem_zero(&htcCallbacks, sizeof(struct hif_msg_callbacks)); + htcCallbacks.Context = target; + htcCallbacks.rxCompletionHandler = htc_rx_completion_handler; + htcCallbacks.txCompletionHandler = htc_tx_completion_handler; + htcCallbacks.txResourceAvailHandler = + htc_tx_resource_avail_handler; + htcCallbacks.fwEventHandler = htc_fw_event_handler; + htcCallbacks.update_bundle_stats = htc_update_rx_bundle_stats; + target->hif_dev = ol_sc; + + /* Get HIF default pipe for HTC message exchange */ + pEndpoint = &target->endpoint[ENDPOINT_0]; + + hif_post_init(target->hif_dev, target, &htcCallbacks); + hif_get_default_pipe(target->hif_dev, &pEndpoint->UL_PipeID, + &pEndpoint->DL_PipeID); + hif_set_initial_wakeup_cb(target->hif_dev, + pInfo->target_initial_wakeup_cb, + pInfo->target_psoc); + + } while (false); + + htc_recv_init(target); + htc_init_runtime_cnt(target); + + HTC_TRACE("-htc_create: (0x%pK)", target); + + htc_hang_event_notifier_register(target); + + return (HTC_HANDLE) target; +} + +void htc_destroy(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("+htc_destroy .. Destroying :0x%pK\n", target)); + htc_hang_event_notifier_unregister(); + hif_stop(htc_get_hif_device(HTCHandle)); + if (target) + htc_cleanup(target); + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_destroy\n")); +} + +/* get the low level HIF device for the caller , the caller may wish to do low + * level HIF requests + */ +void *htc_get_hif_device(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + return target->hif_dev; +} + +static void htc_control_tx_complete(void *Context, HTC_PACKET *pPacket) +{ + HTC_TARGET *target = (HTC_TARGET *) Context; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("+-htc_control_tx_complete 0x%pK (l:%d)\n", pPacket, + pPacket->ActualLength)); + htc_free_control_tx_packet(target, pPacket); +} + +/* TODO, this is just a temporary max packet size */ +#define MAX_MESSAGE_SIZE 1536 + +/** + * htc_setup_epping_credit_allocation() - allocate credits/HTC buffers to WMI + * @scn: pointer to hif_opaque_softc + * @pEntry: pointer to tx credit allocation entry + * @credits: number of credits + * + * Return: None + */ +static void +htc_setup_epping_credit_allocation(struct hif_opaque_softc *scn, + struct htc_service_tx_credit_allocation *pEntry, + int credits) +{ + switch (hif_get_bus_type(scn)) { + case QDF_BUS_TYPE_PCI: + case QDF_BUS_TYPE_USB: + pEntry++; + pEntry->service_id = WMI_DATA_BE_SVC; + pEntry->CreditAllocation = (credits >> 1); + + pEntry++; + pEntry->service_id = WMI_DATA_BK_SVC; + pEntry->CreditAllocation = (credits >> 1); + break; + case QDF_BUS_TYPE_SDIO: + pEntry++; + pEntry->service_id = WMI_DATA_BE_SVC; + pEntry->CreditAllocation = credits; + break; + default: + break; + } +} + +/** + * htc_setup_target_buffer_assignments() - setup target buffer assignments + * @target: HTC Target Pointer + * + * Return: A_STATUS + */ +static +A_STATUS htc_setup_target_buffer_assignments(HTC_TARGET *target) +{ + struct htc_service_tx_credit_allocation *pEntry; + A_STATUS status; + int credits; + int creditsPerMaxMsg; + + creditsPerMaxMsg = MAX_MESSAGE_SIZE / target->TargetCreditSize; + if (MAX_MESSAGE_SIZE % target->TargetCreditSize) + creditsPerMaxMsg++; + + /* TODO, this should be configured by the caller! */ + + credits = target->TotalTransmitCredits; + pEntry = &target->ServiceTxAllocTable[0]; + + status = A_OK; + /* + * Allocate all credists/HTC buffers to WMI. + * no buffers are used/required for data. data always + * remains on host. + */ + if (HTC_IS_EPPING_ENABLED(target->con_mode)) { + pEntry++; + pEntry->service_id = WMI_CONTROL_SVC; + pEntry->CreditAllocation = credits; + /* endpoint ping is a testing tool directly on top of HTC in + * both target and host sides. + * In target side, the endppint ping fw has no wlan stack and + * FW mboxping app directly sits on HTC and it simply drops + * or loops back TX packets. For rx perf, FW mboxping app + * generates packets and passes packets to HTC to send to host. + * There is no WMI message exchanges between host and target + * in endpoint ping case. + * In host side, the endpoint ping driver is a Ethernet driver + * and it directly sits on HTC. Only HIF, HTC, QDF, ADF are + * used by the endpoint ping driver. There is no wifi stack + * at all in host side also. For tx perf use case, + * the user space mboxping app sends the raw packets to endpoint + * ping driver and it directly forwards to HTC for transmission + * to stress the bus. For the rx perf, HTC passes the received + * packets to endpoint ping driver and it is passed to the user + * space through the Ethernet interface. + * For credit allocation, in SDIO bus case, only BE service is + * used for tx/rx perf testing so that all credits are given + * to BE service. In PCIe and USB bus case, endpoint ping uses + * both BE and BK services to stress the bus so that the total + * credits are equally distributed to BE and BK services. + */ + + htc_setup_epping_credit_allocation(target->hif_dev, + pEntry, credits); + } else { + int i; + uint32_t max_wmi_svc = (sizeof(svc_id) / sizeof(uint32_t)); + + if ((target->wmi_ep_count == 0) || + (target->wmi_ep_count > max_wmi_svc)) + return A_ERROR; + + /* + * Divide credit among number of endpoints for WMI + */ + credits = credits / target->wmi_ep_count; + for (i = 0; i < target->wmi_ep_count; i++) { + status = A_OK; + pEntry++; + pEntry->service_id = svc_id[i]; + pEntry->CreditAllocation = credits; + } + } + + if (A_SUCCESS(status)) { + int i; + + for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) { + if (target->ServiceTxAllocTable[i].service_id != 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_INIT, + ("SVS Index : %d TX : 0x%2.2X : alloc:%d", + i, + target->ServiceTxAllocTable[i]. + service_id, + target->ServiceTxAllocTable[i]. + CreditAllocation)); + } + } + } + + return status; +} + +uint8_t htc_get_credit_allocation(HTC_TARGET *target, uint16_t service_id) +{ + uint8_t allocation = 0; + int i; + + for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) { + if (target->ServiceTxAllocTable[i].service_id == service_id) { + allocation = + target->ServiceTxAllocTable[i].CreditAllocation; + } + } + + if (0 == allocation) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("HTC Service TX : 0x%2.2X : allocation is zero!\n", + service_id)); + } + + return allocation; +} + +QDF_STATUS htc_wait_target(HTC_HANDLE HTCHandle) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_READY_EX_MSG *pReadyMsg; + struct htc_service_connect_req connect; + struct htc_service_connect_resp resp; + HTC_READY_MSG *rdy_msg; + uint16_t htc_rdy_msg_id; + uint8_t i = 0; + HTC_PACKET *rx_bundle_packet, *temp_bundle_packet; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("htc_wait_target - Enter (target:0x%pK)\n", HTCHandle)); + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, ("+HWT\n")); + + do { + + status = hif_start(target->hif_dev); + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("hif_start failed\n")); + break; + } + + status = htc_wait_recv_ctrl_message(target); + + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (target->CtrlResponseLength < (sizeof(HTC_READY_EX_MSG))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid HTC Ready Msg Len:%d!\n", + target->CtrlResponseLength)); + status = QDF_STATUS_E_BADMSG; + break; + } + + pReadyMsg = (HTC_READY_EX_MSG *) target->CtrlResponseBuffer; + + rdy_msg = &pReadyMsg->Version2_0_Info; + htc_rdy_msg_id = + HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, MESSAGEID); + if (htc_rdy_msg_id != HTC_MSG_READY_ID) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid HTC Ready Msg : 0x%X!\n", + htc_rdy_msg_id)); + status = QDF_STATUS_E_BADMSG; + break; + } + + target->TotalTransmitCredits = HTC_GET_FIELD(rdy_msg, + HTC_READY_MSG, CREDITCOUNT); + if (target->HTCInitInfo.cfg_wmi_credit_cnt && + (target->HTCInitInfo.cfg_wmi_credit_cnt < + target->TotalTransmitCredits)) + /* + * If INI configured value is less than FW advertised, + * then use INI configured value, otherwise use FW + * advertised. + */ + target->TotalTransmitCredits = + target->HTCInitInfo.cfg_wmi_credit_cnt; + + target->TargetCreditSize = + (int)HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, CREDITSIZE); + target->MaxMsgsPerHTCBundle = + (uint8_t) pReadyMsg->MaxMsgsPerHTCBundle; + UPDATE_ALT_CREDIT(target, pReadyMsg->AltDataCreditSize); + /* for old fw this value is set to 0. But the minimum value + * should be 1, i.e., no bundling + */ + if (target->MaxMsgsPerHTCBundle < 1) + target->MaxMsgsPerHTCBundle = 1; + + AR_DEBUG_PRINTF(ATH_DEBUG_INIT, + ("Target Ready! TX resource : %d size:%d, MaxMsgsPerHTCBundle = %d", + target->TotalTransmitCredits, + target->TargetCreditSize, + target->MaxMsgsPerHTCBundle)); + + if ((0 == target->TotalTransmitCredits) + || (0 == target->TargetCreditSize)) { + status = QDF_STATUS_E_ABORTED; + break; + } + + /* Allocate expected number of RX bundle buffer allocation */ + if (HTC_RX_BUNDLE_ENABLED(target)) { + temp_bundle_packet = NULL; + for (i = 0; i < MAX_HTC_RX_BUNDLE; i++) { + rx_bundle_packet = + allocate_htc_bundle_packet(target); + if (rx_bundle_packet) + rx_bundle_packet->ListLink.pNext = + (DL_LIST *)temp_bundle_packet; + else + break; + + temp_bundle_packet = rx_bundle_packet; + } + LOCK_HTC_TX(target); + target->pBundleFreeList = temp_bundle_packet; + UNLOCK_HTC_TX(target); + } + + /* done processing */ + target->CtrlResponseProcessing = false; + + htc_setup_target_buffer_assignments(target); + + /* setup our pseudo HTC control endpoint connection */ + qdf_mem_zero(&connect, sizeof(connect)); + qdf_mem_zero(&resp, sizeof(resp)); + connect.EpCallbacks.pContext = target; + connect.EpCallbacks.EpTxComplete = htc_control_tx_complete; + connect.EpCallbacks.EpRecv = htc_control_rx_complete; + connect.MaxSendQueueDepth = NUM_CONTROL_TX_BUFFERS; + connect.service_id = HTC_CTRL_RSVD_SVC; + + /* connect fake service */ + status = htc_connect_service((HTC_HANDLE) target, + &connect, &resp); + + } while (false); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_wait_target - Exit (%d)\n", + status)); + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, ("-HWT\n")); + return status; +} + +/* start HTC, this is called after all services are connected */ +static A_STATUS htc_config_target_hif_pipe(HTC_TARGET *target) +{ + + return A_OK; +} + +static void reset_endpoint_states(HTC_TARGET *target) +{ + HTC_ENDPOINT *pEndpoint; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + pEndpoint->service_id = 0; + pEndpoint->MaxMsgLength = 0; + pEndpoint->MaxTxQueueDepth = 0; + pEndpoint->Id = i; + INIT_HTC_PACKET_QUEUE(&pEndpoint->TxQueue); + INIT_HTC_PACKET_QUEUE(&pEndpoint->TxLookupQueue); + INIT_HTC_PACKET_QUEUE(&pEndpoint->RxBufferHoldQueue); + pEndpoint->target = target; + pEndpoint->TxCreditFlowEnabled = (bool)htc_credit_flow; + qdf_atomic_init(&pEndpoint->TxProcessCount); + } +} + +/** + * htc_start() - Main HTC function to trigger HTC start + * @HTCHandle: pointer to HTC handle + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_start(HTC_HANDLE HTCHandle) +{ + qdf_nbuf_t netbuf; + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_SETUP_COMPLETE_EX_MSG *pSetupComp; + HTC_PACKET *pSendPacket; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Enter\n")); + + do { + + htc_config_target_hif_pipe(target); + + /* allocate a buffer to send */ + pSendPacket = htc_alloc_control_tx_packet(target); + if (!pSendPacket) { + AR_DEBUG_ASSERT(false); + qdf_print("%s: allocControlTxPacket failed", + __func__); + status = QDF_STATUS_E_NOMEM; + break; + } + + netbuf = + (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket); + /* assemble setup complete message */ + qdf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_EX_MSG)); + pSetupComp = + (HTC_SETUP_COMPLETE_EX_MSG *) qdf_nbuf_data(netbuf); + qdf_mem_zero(pSetupComp, sizeof(HTC_SETUP_COMPLETE_EX_MSG)); + + HTC_SET_FIELD(pSetupComp, HTC_SETUP_COMPLETE_EX_MSG, + MESSAGEID, HTC_MSG_SETUP_COMPLETE_EX_ID); + + if (!htc_credit_flow) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("HTC will not use TX credit flow control")); + pSetupComp->SetupFlags |= + HTC_SETUP_COMPLETE_FLAGS_DISABLE_TX_CREDIT_FLOW; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("HTC using TX credit flow control")); + } + + if ((hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_SDIO) || + (hif_get_bus_type(target->hif_dev) == + QDF_BUS_TYPE_USB)) { + if (HTC_RX_BUNDLE_ENABLED(target)) + pSetupComp->SetupFlags |= + HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV; + hif_set_bundle_mode(target->hif_dev, true, + HTC_MAX_MSG_PER_BUNDLE_RX); + pSetupComp->MaxMsgsPerBundledRecv = HTC_MAX_MSG_PER_BUNDLE_RX; + } + + SET_HTC_PACKET_INFO_TX(pSendPacket, + NULL, + (uint8_t *) pSetupComp, + sizeof(HTC_SETUP_COMPLETE_EX_MSG), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + status = htc_send_pkt((HTC_HANDLE) target, pSendPacket); + if (QDF_IS_STATUS_ERROR(status)) + break; + } while (false); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Exit\n")); + return status; +} + +/*flush all queued buffers for surpriseremove case*/ +void htc_flush_surprise_remove(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + int i; + HTC_ENDPOINT *pEndpoint; +#ifdef RX_SG_SUPPORT + qdf_nbuf_t netbuf; + qdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; +#endif + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+htc_flush_surprise_remove\n")); + + /* cleanup endpoints */ + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + htc_flush_rx_hold_queue(target, pEndpoint); + htc_flush_endpoint_tx(target, pEndpoint, HTC_TX_PACKET_TAG_ALL); + } + + hif_flush_surprise_remove(target->hif_dev); + +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + while ((netbuf = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL) + qdf_nbuf_free(netbuf); + RESET_RX_SG_CONFIG(target); + UNLOCK_HTC_RX(target); +#endif + + reset_endpoint_states(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_flush_surprise_remove\n")); +} + +/* stop HTC communications, i.e. stop interrupt reception, and flush all queued + * buffers + */ +void htc_stop(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + int i; + HTC_ENDPOINT *endpoint; +#ifdef RX_SG_SUPPORT + qdf_nbuf_t netbuf; + qdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; +#endif + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+htc_stop\n")); + + htc_runtime_pm_deinit(target); + + HTC_INFO("%s: endpoints cleanup\n", __func__); + /* cleanup endpoints */ + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + htc_flush_rx_hold_queue(target, endpoint); + htc_flush_endpoint_tx(target, endpoint, HTC_TX_PACKET_TAG_ALL); + if (endpoint->ul_is_polled) { + qdf_timer_stop(&endpoint->ul_poll_timer); + qdf_timer_free(&endpoint->ul_poll_timer); + } + } + + /* Note: htc_flush_endpoint_tx for all endpoints should be called before + * hif_stop - otherwise htc_tx_completion_handler called from + * hif_send_buffer_cleanup_on_pipe for residual tx frames in HIF layer, + * might queue the packet again to HIF Layer - which could cause tx + * buffer leak + */ + + HTC_INFO("%s: stopping hif layer\n", __func__); + hif_stop(target->hif_dev); + +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + while ((netbuf = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL) + qdf_nbuf_free(netbuf); + RESET_RX_SG_CONFIG(target); + UNLOCK_HTC_RX(target); +#endif + + /** + * In SSR case, HTC tx completion callback for wmi will be blocked + * by TARGET_STATUS_RESET and HTC packets will be left unfreed on + * lookup queue. + * + * In case of target failing to send wmi_ready_event, the htc connect + * msg buffer will be left unmapped and not freed. So calling the + * completion handler for this buffer will handle this scenario. + */ + HTC_INFO("%s: flush endpoints Tx lookup queue\n", __func__); + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + if (endpoint->service_id == WMI_CONTROL_SVC) + htc_flush_endpoint_txlookupQ(target, i, false); + else if (endpoint->service_id == HTC_CTRL_RSVD_SVC) + htc_flush_endpoint_txlookupQ(target, i, true); + } + HTC_INFO("%s: resetting endpoints state\n", __func__); + + reset_endpoint_states(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_stop\n")); +} + +void htc_dump_credit_states(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + int i; + + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (0 == pEndpoint->service_id) + continue; + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("--- EP : %d service_id: 0x%X --------------\n", + pEndpoint->Id, pEndpoint->service_id)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxCredits : %d\n", + pEndpoint->TxCredits)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxCreditSize : %d\n", + pEndpoint->TxCreditSize)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxCreditsPerMaxMsg : %d\n", + pEndpoint->TxCreditsPerMaxMsg)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxQueueDepth : %d\n", + HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue))); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("----------------------------------------\n")); + } +} + +bool htc_get_endpoint_statistics(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, + enum htc_endpoint_stat_action Action, + struct htc_endpoint_stats *pStats) +{ +#ifdef HTC_EP_STAT_PROFILING + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + bool clearStats = false; + bool sample = false; + + switch (Action) { + case HTC_EP_STAT_SAMPLE: + sample = true; + break; + case HTC_EP_STAT_SAMPLE_AND_CLEAR: + sample = true; + clearStats = true; + break; + case HTC_EP_STAT_CLEAR: + clearStats = true; + break; + default: + break; + } + + A_ASSERT(Endpoint < ENDPOINT_MAX); + + /* lock out TX and RX while we sample and/or clear */ + LOCK_HTC_TX(target); + LOCK_HTC_RX(target); + + if (sample) { + A_ASSERT(pStats); + /* return the stats to the caller */ + qdf_mem_copy(pStats, &target->endpoint[Endpoint].endpoint_stats, + sizeof(struct htc_endpoint_stats)); + } + + if (clearStats) { + /* reset stats */ + qdf_mem_zero(&target->endpoint[Endpoint].endpoint_stats, + sizeof(struct htc_endpoint_stats)); + } + + UNLOCK_HTC_RX(target); + UNLOCK_HTC_TX(target); + + return true; +#else + return false; +#endif +} + +void *htc_get_targetdef(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return hif_get_targetdef(target->hif_dev); +} + +#ifdef IPA_OFFLOAD +/** + * htc_ipa_get_ce_resource() - get uc resource on lower layer + * @htc_handle: htc context + * @ce_sr_base_paddr: copyengine source ring base physical address + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * Return: None + */ +void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (target->hif_dev) + hif_ipa_get_ce_resource(target->hif_dev, + ce_sr, ce_sr_ring_size, ce_reg_paddr); +} +#endif /* IPA_OFFLOAD */ + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + +void htc_dump_bundle_stats(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + int total, i; + + total = 0; + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_RX; i++) + total += target->rx_bundle_stats[i]; + + if (total) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("RX Bundle stats:\n")); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("Total RX packets: %d\n", + total)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ( + "Number of bundle: Number of packets\n")); + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_RX; i++) + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("%10d:%10d(%2d%s)\n", (i+1), + target->rx_bundle_stats[i], + ((target->rx_bundle_stats[i]*100)/ + total), "%")); + } + + + total = 0; + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_TX; i++) + total += target->tx_bundle_stats[i]; + + if (total) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("TX Bundle stats:\n")); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("Total TX packets: %d\n", + total)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Number of bundle: Number of packets\n")); + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_TX; i++) + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("%10d:%10d(%2d%s)\n", (i+1), + target->tx_bundle_stats[i], + ((target->tx_bundle_stats[i]*100)/ + total), "%")); + } +} + +void htc_clear_bundle_stats(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + qdf_mem_zero(&target->rx_bundle_stats, sizeof(target->rx_bundle_stats)); + qdf_mem_zero(&target->tx_bundle_stats, sizeof(target->tx_bundle_stats)); +} +#endif + +/** + * htc_vote_link_down - API to vote for link down + * @htc_handle: HTC handle + * + * API for upper layers to call HIF to vote for link down + * + * Return: void + */ +void htc_vote_link_down(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return; + + hif_vote_link_down(target->hif_dev); +} + +/** + * htc_vote_link_up - API to vote for link up + * @htc_handle: HTC Handle + * + * API for upper layers to call HIF to vote for link up + * + * Return: void + */ +void htc_vote_link_up(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return; + + hif_vote_link_up(target->hif_dev); +} + +/** + * htc_can_suspend_link - API to query HIF for link status + * @htc_handle: HTC Handle + * + * API for upper layers to call HIF to query if the link can suspend + * + * Return: void + */ +bool htc_can_suspend_link(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return false; + + return hif_can_suspend_link(target->hif_dev); +} + +#ifdef FEATURE_RUNTIME_PM +int htc_pm_runtime_get(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return hif_pm_runtime_get(target->hif_dev, + RTPM_ID_HTC); +} + +int htc_pm_runtime_put(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return hif_pm_runtime_put(target->hif_dev, + RTPM_ID_HTC); +} +#endif + +/** + * htc_set_wmi_endpoint_count: Set number of WMI endpoint + * @htc_handle: HTC handle + * @wmi_ep_count: WMI enpoint count + * + * return: None + */ +void htc_set_wmi_endpoint_count(HTC_HANDLE htc_handle, uint8_t wmi_ep_count) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + target->wmi_ep_count = wmi_ep_count; +} + +/** + * htc_get_wmi_endpoint_count: Get number of WMI endpoint + * @htc_handle: HTC handle + * + * return: WMI enpoint count + */ +uint8_t htc_get_wmi_endpoint_count(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return target->wmi_ep_count; +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_api.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5ed879821a3045097152af611568df28d1afa67d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_api.h @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_API_H_ +#define _HTC_API_H_ + +#include +#include +#include /* qdf_device_t */ +#include "htc_packet.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* TODO.. for BMI */ +#define ENDPOINT1 0 +/* TODO -remove me, but we have to fix BMI first */ +#define HTC_MAILBOX_NUM_MAX 4 + +/* this is the amount of header room required by users of HTC */ +#define HTC_HEADER_LEN HTC_HDR_LENGTH + +#define HTC_HTT_TRANSFER_HDRSIZE 24 + +/* + * NOTE WELL: struct opaque_htc_handle is not defined anywhere. This + * reference is used to help ensure that a HTC_HANDLE is never used + * where a different handle type is expected + */ +struct opaque_htc_handle; +typedef struct opaque_htc_handle *HTC_HANDLE; + +typedef uint16_t HTC_SERVICE_ID; + +typedef void (*HTC_TARGET_FAILURE)(void *Instance, QDF_STATUS Status); + +struct htc_init_info { + void *pContext; /* context for target notifications */ + void (*TargetFailure)(void *Instance, QDF_STATUS Status); + void (*TargetSendSuspendComplete)(void *ctx, bool is_nack); + void (*target_initial_wakeup_cb)(void *cb_ctx); + void *target_psoc; + uint32_t cfg_wmi_credit_cnt; +}; + +/* Struct for HTC layer packet stats*/ +struct ol_ath_htc_stats { + int htc_get_pkt_q_fail_count; + int htc_pkt_q_empty_count; + int htc_send_q_empty_count; +}; + +/* To resume HTT Tx queue during runtime resume */ +typedef void (*HTC_EP_RESUME_TX_QUEUE)(void *); + +typedef int (*HTC_EP_PADDING_CREDIT_UPDATE) (void *, int); + +/* per service connection send completion */ +typedef void (*HTC_EP_SEND_PKT_COMPLETE)(void *, HTC_PACKET *); +/* per service connection callback when a plurality of packets have been sent + * The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from + * the callback) to hold a list of completed send packets. + * If the handler cannot fully traverse the packet queue before returning, it + * should transfer the items of the queue into the caller's private queue using: + * HTC_PACKET_ENQUEUE() + */ +typedef void (*HTC_EP_SEND_PKT_COMP_MULTIPLE)(void *, + HTC_PACKET_QUEUE *); +/* per service connection pkt received */ +typedef void (*HTC_EP_RECV_PKT)(void *, HTC_PACKET *); +/* per service connection callback when a plurality of packets are received + * The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from + * the callback) to hold a list of recv packets. + * If the handler cannot fully traverse the packet queue before returning, it + * should transfer the items of the queue into the caller's private queue using: + * HTC_PACKET_ENQUEUE() + */ +typedef void (*HTC_EP_RECV_PKT_MULTIPLE)(void *, HTC_PACKET_QUEUE *); + +/* Optional per service connection receive buffer re-fill callback, + * On some OSes (like Linux) packets are allocated from a global pool and + * indicated up to the network stack. The driver never gets the packets back + * from the OS. For these OSes a refill callback can be used to allocate and + * re-queue buffers into HTC. + * + * On other OSes, the network stack can call into the driver's OS-specific + * "return_packet" handler and the driver can re-queue these buffers into HTC. + * In this regard a refill callback is unnecessary + */ +typedef void (*HTC_EP_RECV_REFILL)(void *, HTC_ENDPOINT_ID Endpoint); + +/* Optional per service connection receive buffer allocation callback. + * On some systems packet buffers are an extremely limited resource. Rather than + * queue largest-possible-sized buffers to HTC, some systems would rather + * allocate a specific size as the packet is received. The trade off is + * slightly more processing (callback invoked for each RX packet) + * for the benefit of committing fewer buffer resources into HTC. + * + * The callback is provided the length of the pending packet to fetch. This + * includes the HTC header length plus the length of payload. The callback can + * return a pointer to the allocated HTC packet for immediate use. + * + * Alternatively a variant of this handler can be used to allocate large receive + * packets as needed. For example an application can use the refill mechanism + * for normal packets and the recv-alloc mechanism to handle the case where a + * large packet buffer is required. This can significantly reduce the + * amount of "committed" memory used to receive packets. + */ +typedef HTC_PACKET *(*HTC_EP_RECV_ALLOC)(void *, + HTC_ENDPOINT_ID Endpoint, + int Length); + +/* Optional per service connection callback to log packet information. + */ +typedef void (*HTC_EP_LOG_PKT)(void *, HTC_PACKET *); + +enum htc_send_full_action { + /* packet that overflowed should be kept in the queue */ + HTC_SEND_FULL_KEEP = 0, + /* packet that overflowed should be dropped */ + HTC_SEND_FULL_DROP = 1, +}; + +/* Optional per service connection callback when a send queue is full. This can + * occur if host continues queueing up TX packets faster than credits can arrive + * To prevent the host (on some Oses like Linux) from continuously queueing pkts + * and consuming resources, this callback is provided so that that the host + * can disable TX in the subsystem (i.e. network stack). + * This callback is invoked for each packet that "overflows" the HTC queue. The + * callback can determine whether the new packet that overflowed the queue can + * be kept (HTC_SEND_FULL_KEEP) or dropped (HTC_SEND_FULL_DROP). If a packet is + * dropped, the EpTxComplete handler will be called and the packet's status + * field will be set to A_NO_RESOURCE. + * Other OSes require a "per-packet" indication for each completed TX packet, + * this closed loop mechanism will prevent the network stack from overunning the + * NIC. The packet to keep or drop is passed for inspection to the registered + * handler the handler must ONLY inspect the packet, it may not free or reclaim + * the packet. + */ +typedef enum htc_send_full_action (*HTC_EP_SEND_QUEUE_FULL)(void *, + HTC_PACKET *pPacket); + +struct htc_ep_callbacks { + /* context for each callback */ + void *pContext; + /* tx completion callback for connected endpoint */ + HTC_EP_SEND_PKT_COMPLETE EpTxComplete; + /* receive callback for connected endpoint */ + HTC_EP_RECV_PKT EpRecv; + /* OPTIONAL receive re-fill callback for connected endpoint */ + HTC_EP_RECV_REFILL EpRecvRefill; + /* OPTIONAL send full callback */ + HTC_EP_SEND_QUEUE_FULL EpSendFull; + /* OPTIONAL recv allocation callback */ + HTC_EP_RECV_ALLOC EpRecvAlloc; + /* OPTIONAL recv allocation callback based on a threshold */ + HTC_EP_RECV_ALLOC EpRecvAllocThresh; + /* OPTIONAL completion handler for multiple complete + * indications (EpTxComplete must be NULL) + */ + HTC_EP_SEND_PKT_COMP_MULTIPLE EpTxCompleteMultiple; + + HTC_EP_RESUME_TX_QUEUE ep_resume_tx_queue; + + HTC_EP_PADDING_CREDIT_UPDATE ep_padding_credit_update; + /* if EpRecvAllocThresh is non-NULL, HTC will compare the + * threshold value to the current recv packet length and invoke + * the EpRecvAllocThresh callback to acquire a packet buffer + */ + int RecvAllocThreshold; + /* if a EpRecvRefill handler is provided, this value + * can be used to set a trigger refill callback + * when the recv queue drops below this value + * if set to 0, the refill is only called when packets + * are empty + */ + int RecvRefillWaterMark; + /* OPTIONAL callback to log packet information */ + HTC_EP_LOG_PKT ep_log_pkt; +}; + +/* service connection information */ +struct htc_service_connect_req { + /* service ID to connect to */ + HTC_SERVICE_ID service_id; + /* connection flags, see htc protocol definition */ + uint16_t ConnectionFlags; + /* ptr to optional service-specific meta-data */ + uint8_t *pMetaData; + /* optional meta data length */ + uint8_t MetaDataLength; + /* endpoint callbacks */ + struct htc_ep_callbacks EpCallbacks; + /* maximum depth of any send queue */ + int MaxSendQueueDepth; + /* HTC flags for the host-side (local) connection */ + uint32_t LocalConnectionFlags; + /* override max message size in send direction */ + unsigned int MaxSendMsgSize; +}; + +/* enable send bundle padding for this endpoint */ +#define HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING (1 << 0) + +/* service connection response information */ +struct htc_service_connect_resp { + /* caller supplied buffer to optional meta-data */ + uint8_t *pMetaData; + /* length of caller supplied buffer */ + uint8_t BufferLength; + /* actual length of meta data */ + uint8_t ActualLength; + /* endpoint to communicate over */ + HTC_ENDPOINT_ID Endpoint; + /* max length of all messages over this endpoint */ + unsigned int MaxMsgLength; + /* connect response code from target */ + uint8_t ConnectRespCode; +}; + +/* endpoint distribution structure */ +struct htc_endpoint_credit_dist { + struct _htc_endpoint_credit_dist *pNext; + struct _htc_endpoint_credit_dist *pPrev; + /* Service ID (set by HTC) */ + HTC_SERVICE_ID service_id; + /* endpoint for this distribution struct (set by HTC) */ + HTC_ENDPOINT_ID Endpoint; + /* distribution flags, distribution function can + * set default activity using SET_EP_ACTIVE() macro + */ + uint32_t DistFlags; + /* credits for normal operation, anything above this + * indicates the endpoint is over-subscribed, this field + * is only relevant to the credit distribution function + */ + int TxCreditsNorm; + /* floor for credit distribution, this field is + * only relevant to the credit distribution function + */ + int TxCreditsMin; + /* number of credits assigned to this EP, this field + * is only relevant to the credit dist function + */ + int TxCreditsAssigned; + /* current credits available, this field is used by + * HTC to determine whether a message can be sent or + * must be queued + */ + int TxCredits; + /* pending credits to distribute on this endpoint, this + * is set by HTC when credit reports arrive. + * The credit distribution functions sets this to zero + * when it distributes the credits + */ + int TxCreditsToDist; + /* this is the number of credits that the current pending TX + * packet needs to transmit. This is set by HTC when + * and endpoint needs credits in order to transmit + */ + int TxCreditsSeek; + /* size in bytes of each credit (set by HTC) */ + int TxCreditSize; + /* credits required for a maximum sized messages (set by HTC) */ + int TxCreditsPerMaxMsg; + /* reserved for HTC use */ + void *pHTCReserved; + /* current depth of TX queue , i.e. messages waiting for credits + * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE + * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint + * that has non-zero credits to recover + */ + int TxQueueDepth; +}; + +#define HTC_EP_ACTIVE ((uint32_t) (1u << 31)) + +/* macro to check if an endpoint has gone active, useful for credit + * distributions */ +#define IS_EP_ACTIVE(epDist) ((epDist)->DistFlags & HTC_EP_ACTIVE) +#define SET_EP_ACTIVE(epDist) (epDist)->DistFlags |= HTC_EP_ACTIVE + +/* credit distibution code that is passed into the distrbution function, + * there are mandatory and optional codes that must be handled + */ +enum htc_credit_dist_reason { + /* credits available as a result of completed + * send operations (MANDATORY) resulting in credit reports + */ + HTC_CREDIT_DIST_SEND_COMPLETE = 0, + /* a change in endpoint activity occurred (OPTIONAL) */ + HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, + /* an endpoint needs to "seek" credits (OPTIONAL) */ + HTC_CREDIT_DIST_SEEK_CREDITS, + /* for debugging, dump any state information that is kept by + * the distribution function + */ + HTC_DUMP_CREDIT_STATE +}; + +typedef void (*HTC_CREDIT_DIST_CALLBACK)(void *Context, + struct htc_endpoint_credit_dist * + pEPList, + enum htc_credit_dist_reason + Reason); + +typedef void (*HTC_CREDIT_INIT_CALLBACK)(void *Context, + struct htc_endpoint_credit_dist * + pEPList, int TotalCredits); + +/* endpoint statistics action */ +enum htc_endpoint_stat_action { + /* only read statistics */ + HTC_EP_STAT_SAMPLE = 0, + /* sample and immediately clear statistics */ + HTC_EP_STAT_SAMPLE_AND_CLEAR = 1, + /* clear only */ + HTC_EP_STAT_CLEAR +}; + +/* endpoint statistics */ +struct htc_endpoint_stats { + /* number of TX packets posted to the endpoint */ + uint32_t TxPosted; + /* number of times the host set the credit-low flag in a send message on + * this endpoint + */ + uint32_t TxCreditLowIndications; + /* running count of total TX packets issued */ + uint32_t TxIssued; + /* running count of TX packets that were issued in bundles */ + uint32_t TxPacketsBundled; + /* running count of TX bundles that were issued */ + uint32_t TxBundles; + /* tx packets that were dropped */ + uint32_t TxDropped; + /* running count of total credit reports received for this endpoint */ + uint32_t TxCreditRpts; + /* credit reports received from this endpoint's RX packets */ + uint32_t TxCreditRptsFromRx; + /* credit reports received from RX packets of other endpoints */ + uint32_t TxCreditRptsFromOther; + /* credit reports received from endpoint 0 RX packets */ + uint32_t TxCreditRptsFromEp0; + /* count of credits received via Rx packets on this endpoint */ + uint32_t TxCreditsFromRx; + /* count of credits received via another endpoint */ + uint32_t TxCreditsFromOther; + /* count of credits received via another endpoint */ + uint32_t TxCreditsFromEp0; + /* count of consummed credits */ + uint32_t TxCreditsConsummed; + /* count of credits returned */ + uint32_t TxCreditsReturned; + /* count of RX packets received */ + uint32_t RxReceived; + /* count of lookahead records + * found in messages received on this endpoint + */ + uint32_t RxLookAheads; + /* count of recv packets received in a bundle */ + uint32_t RxPacketsBundled; + /* count of number of bundled lookaheads */ + uint32_t RxBundleLookAheads; + /* count of the number of bundle indications from the HTC header */ + uint32_t RxBundleIndFromHdr; + /* number of times the recv allocation threshold was hit */ + uint32_t RxAllocThreshHit; + /* total number of bytes */ + uint32_t RxAllocThreshBytes; +}; + +/* ------ Function Prototypes ------ */ +/** + * htc_create - Create an instance of HTC over the underlying HIF device + * @HifDevice: hif device handle, + * @pInfo: initialization information + * @osdev: QDF device structure + * @con_mode: driver connection mode + * + * Return: HTC_HANDLE on success, NULL on failure + */ +HTC_HANDLE htc_create(void *HifDevice, struct htc_init_info *pInfo, + qdf_device_t osdev, uint32_t con_mode); + +/** + * htc_get_hif_device - Get the underlying HIF device handle + * @HTCHandle: handle passed into the AddInstance callback + * + * Return: opaque HIF device handle usable in HIF API calls. + */ +void *htc_get_hif_device(HTC_HANDLE HTCHandle); + +/** + * htc_set_credit_distribution - Set credit distribution parameters + * @HTCHandle: HTC handle + * @pCreditDistCont: caller supplied context to pass into distribution functions + * @CreditDistFunc: Distribution function callback + * @CreditDistInit: Credit Distribution initialization callback + * @ServicePriorityOrder: Array containing list of service IDs, lowest index + * @is highestpriority: ListLength - number of elements in ServicePriorityOrder + * + * The user can set a custom credit distribution function to handle + * special requirementsfor each endpoint. A default credit distribution + * routine can be used by setting CreditInitFunc to NULL. The default + * credit distribution is only provided for simple "fair" credit distribution + * without regard to any prioritization. + * Return: None + */ +void htc_set_credit_distribution(HTC_HANDLE HTCHandle, + void *pCreditDistContext, + HTC_CREDIT_DIST_CALLBACK CreditDistFunc, + HTC_CREDIT_INIT_CALLBACK CreditInitFunc, + HTC_SERVICE_ID ServicePriorityOrder[], + int ListLength); + +/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + * Wait for the target to indicate the HTC layer is ready + * htc_wait_target + * @HTCHandle - HTC handle + * + * This API blocks until the target responds with an HTC ready message. + * The caller should not connect services until the target has indicated it is + * ready. + * Return: None + */ +QDF_STATUS htc_wait_target(HTC_HANDLE HTCHandle); + +/** + * htc_start - Start target service communications + * @HTCHandle - HTC handle + * + * This API indicates to the target that the service connection phase + * is completeand the target can freely start all connected services. This + * API should only be called AFTER all service connections have been made. + * TCStart will issue a SETUP_COMPLETE message to the target to indicate that + * all service connections have been made and the target can start + * communicating over the endpoints. + * Return: None + */ +QDF_STATUS htc_start(HTC_HANDLE HTCHandle); + +/** + * htc_connect_service - Connect to an HTC service + * @HTCHandle - HTC handle + * @pReq - connection details + * @pResp - connection response + * + * Service connections must be performed before htc_start. + * User provides callback handlersfor various endpoint events. + * Return: None + */ +QDF_STATUS htc_connect_service(HTC_HANDLE HTCHandle, + struct htc_service_connect_req *pReq, + struct htc_service_connect_resp *pResp); + +/** + * htc_dump - HTC register log dump + * @HTCHandle - HTC handle + * @CmdId - Log command + * @start - start/print logs + * + * Register logs will be started/printed/ be flushed. + * Return: None + */ +void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start); + +/** + * htc_ce_taklet_debug_dump - Dump ce tasklet rings debug data + * @HTCHandle - HTC handle + * + * Debug logs will be printed. + * Return: None + */ +void htc_ce_tasklet_debug_dump(HTC_HANDLE htc_handle); + +/** + * htc_send_pkt - Send an HTC packet + * @HTCHandle - HTC handle + * @pPacket - packet to send + * + * Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro. + * This interface is fully asynchronous. On error, HTC SendPkt will + * call the registered Endpoint callback to cleanup the packet. + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS htc_send_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket); + +/** + * htc_send_data_pkt - Send an HTC packet containing a tx descriptor and data + * @HTCHandle - HTC handle + * @pPacket - packet to send + * + * Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro. + * Caller must provide headroom in an initial fragment added to the + * network buffer to store a HTC_FRAME_HDR. + * This interface is fully asynchronous. On error, htc_send_data_pkt will + * call the registered Endpoint EpDataTxComplete callback to cleanup + * the packet. + * Return: A_OK + */ +#ifdef ATH_11AC_TXCOMPACT +QDF_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, qdf_nbuf_t netbuf, + int Epid, int ActualLength); +#else /*ATH_11AC_TXCOMPACT */ +QDF_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket, + uint8_t more_data); +#endif /*ATH_11AC_TXCOMPACT */ + +/** + * htc_flush_surprise_remove - Flush HTC when target is removed surprisely + * service communications + * @HTCHandle - HTC handle + * + * All receive and pending TX packets will be flushed. + * Return: None + */ +void htc_flush_surprise_remove(HTC_HANDLE HTCHandle); + +/** + * htc_stop - Stop HTC service communications + * @HTCHandle - HTC handle + * + * HTC communications is halted. All receive and pending TX packets + * will be flushed. + * Return: None + */ +void htc_stop(HTC_HANDLE HTCHandle); + +/** + * htc_destroy - Destroy HTC service + * @HTCHandle - HTC handle + * + * This cleans up all resources allocated by htc_create(). + * Return: None + */ +void htc_destroy(HTC_HANDLE HTCHandle); + +/** + * htc_flush_endpoint - Flush pending TX packets + * @HTCHandle - HTC handle + * @Endpoint - Endpoint to flush + * @Tag - flush tag + * + * The Tag parameter is used to selectively flush packets with matching + * tags. The value of 0 forces all packets to be flush regardless of tag + * Return: None + */ +void htc_flush_endpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, + HTC_TX_TAG Tag); +/** + * htc_dump_credit_states - Dump credit distribution state + * @HTCHandle - HTC handle + * + * This dumps all credit distribution information to the debugger + * Return: None + */ +void htc_dump_credit_states(HTC_HANDLE HTCHandle); + +/** + * htc_indicate_activity_change - Indicate a traffic activity change on an + * endpoint + * @HTCHandle - HTC handle + * @Endpoint - endpoint in which activity has changed + * @Active - true if active, false if it has become inactive + * + * This triggers the registered credit distribution function to + * re-adjust credits for active/inactive endpoints. + * Return: None + */ +void htc_indicate_activity_change(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, bool Active); + +/** + * htc_get_endpoint_statistics - Get endpoint statistics + * @HTCHandle - HTC handle + * @Endpoint - Endpoint identifier + * @Action - action to take with statistics + * @pStats - statistics that were sampled (can be NULL if Action is + * HTC_EP_STAT_CLEAR) + * + * Statistics is a compile-time option and this function may return + * false if HTC is not compiled with profiling. + * The caller can specify the statistic "action" to take when sampling + * the statistics. This includes : + * HTC_EP_STAT_SAMPLE : The pStats structure is filled with the current + * values. + * HTC_EP_STAT_SAMPLE_AND_CLEAR : The structure is filled and the current + * statisticsare cleared. + * HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass + * a NULL value for pStats + * Return: true if statistics profiling is enabled, otherwise false. + */ +bool htc_get_endpoint_statistics(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, + enum htc_endpoint_stat_action Action, + struct htc_endpoint_stats *pStats); + +/** + * htc_unblock_recv - Unblock HTC message reception + * @HTCHandle - HTC handle + * + * HTC will block the receiver if the EpRecvAlloc callback fails to provide a + * packet. The caller can use this API to indicate to HTC when resources + * (buffers) are available such that the receiver can be unblocked and HTC + * may re-attempt fetching the pending message. + * This API is not required if the user uses the EpRecvRefill callback or uses + * the HTCAddReceivePacket()API to recycle or provide receive packets to HTC. + * Return: None + */ +void htc_unblock_recv(HTC_HANDLE HTCHandle); + +/** + * htc_add_receive_pkt_multiple - Add multiple receive packets to HTC + * @HTCHandle - HTC handle + * @pPktQueue - HTC receive packet queue holding packets to add + * + * User must supply HTC packets for capturing incoming HTC frames. + * The caller mmust initialize each HTC packet using the + * SET_HTC_PACKET_INFO_RX_REFILL() macro. The queue must only contain + * recv packets for the same endpoint. Caller supplies a pointer to an + * HTC_PACKET_QUEUE structure holding the recv packet. This API will + * remove the packets from the pkt queue and place them into internal + * recv packet list. + * The caller may allocate the pkt queue on the stack to hold the pkts. + * Return: A_OK on success + */ +A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle, + HTC_PACKET_QUEUE *pPktQueue); + +/** + * htc_is_endpoint_active - Check if an endpoint is marked active + * @HTCHandle - HTC handle + * @Endpoint - endpoint to check for active state + * + * Return: returns true if Endpoint is Active + */ +bool htc_is_endpoint_active(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint); + +/** + * htc_set_nodrop_pkt - Set up nodrop pkt flag for mboxping nodrop pkt + * @HTCHandle - HTC handle + * @isNodropPkt - indicates whether it is nodrop pkt + * + * Return: None + * + */ +void htc_set_nodrop_pkt(HTC_HANDLE HTCHandle, A_BOOL isNodropPkt); + +/** + * htc_enable_hdr_length_check - Set up htc_hdr_length_check flag + * @HTCHandle - HTC handle + * @htc_hdr_length_check - flag to indicate whether htc header length check is + * required + * + * Return: None + * + */ +void +htc_enable_hdr_length_check(HTC_HANDLE htc_handle, bool htc_hdr_length_check); + +/** + * htc_get_num_recv_buffers - Get the number of recv buffers currently queued + * into an HTC endpoint + * @HTCHandle - HTC handle + * @Endpoint - endpoint to check + * + * Return: returns number of buffers in queue + * + */ +int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint); + +/** + * htc_set_target_failure_callback - Set the target failure handling callback + * in HTC layer + * @HTCHandle - HTC handle + * @Callback - target failure handling callback + * + * Return: None + */ +void htc_set_target_failure_callback(HTC_HANDLE HTCHandle, + HTC_TARGET_FAILURE Callback); + +/* internally used functions for testing... */ +void htc_enable_recv(HTC_HANDLE HTCHandle); +void htc_disable_recv(HTC_HANDLE HTCHandle); +A_STATUS HTCWaitForPendingRecv(HTC_HANDLE HTCHandle, + uint32_t TimeoutInMs, + bool *pbIsRecvPending); + +/* function to fetch stats from htc layer*/ +struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE + HTCHandle); +/** + * htc_get_tx_queue_depth() - get the tx queue depth of an htc endpoint + * @htc_handle: htc handle + * @enpoint_id: endpoint to check + * + * Return: htc_handle tx queue depth + */ +int htc_get_tx_queue_depth(HTC_HANDLE htc_handle, HTC_ENDPOINT_ID endpoint_id); + +#ifdef WLAN_FEATURE_FASTPATH +void htc_ctrl_msg_cmpl(HTC_HANDLE htc_pdev, HTC_ENDPOINT_ID htc_ep_id); + +#define HTC_TX_DESC_FILL(_htc_tx_desc, _download_len, _ep_id, _seq_no) \ +do { \ + HTC_WRITE32((_htc_tx_desc), \ + SM((_download_len), HTC_FRAME_HDR_PAYLOADLEN) | \ + SM((_ep_id), HTC_FRAME_HDR_ENDPOINTID)); \ + HTC_WRITE32((uint32_t *)(_htc_tx_desc) + 1, \ + SM((_seq_no), HTC_FRAME_HDR_CONTROLBYTES1)); \ +} while (0) +#endif /* WLAN_FEATURE_FASTPATH */ + +#ifdef __cplusplus +} +#endif +void htc_get_control_endpoint_tx_host_credits(HTC_HANDLE HTCHandle, + int *credit); +void htc_dump_counter_info(HTC_HANDLE HTCHandle); +void *htc_get_targetdef(HTC_HANDLE htc_handle); +#ifdef FEATURE_RUNTIME_PM +int htc_runtime_suspend(HTC_HANDLE htc_ctx); +int htc_runtime_resume(HTC_HANDLE htc_ctx); +#endif +void htc_global_credit_flow_disable(void); +void htc_global_credit_flow_enable(void); + +/* Disable ASPM : Disable PCIe low power */ +bool htc_can_suspend_link(HTC_HANDLE HTCHandle); +void htc_vote_link_down(HTC_HANDLE HTCHandle); +void htc_vote_link_up(HTC_HANDLE HTCHandle); +#ifdef IPA_OFFLOAD +void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); +#else +#define htc_ipa_get_ce_resource(htc_handle, \ + ce_sr, ce_sr_ring_size, ce_reg_paddr) /* NO-OP */ +#endif /* IPA_OFFLOAD */ + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + +/** + * htc_dump_bundle_stats() - dump tx and rx htc message bundle stats + * @HTCHandle: htc handle + * + * Return: None + */ +void htc_dump_bundle_stats(HTC_HANDLE HTCHandle); + +/** + * htc_clear_bundle_stats() - clear tx and rx htc message bundle stats + * @HTCHandle: htc handle + * + * Return: None + */ +void htc_clear_bundle_stats(HTC_HANDLE HTCHandle); +#endif + +#ifdef FEATURE_RUNTIME_PM +int htc_pm_runtime_get(HTC_HANDLE htc_handle); +int htc_pm_runtime_put(HTC_HANDLE htc_handle); + +/** + * htc_dec_return_runtime_cnt: Decrement htc runtime count + * @htc: HTC handle + * + * Return: value of runtime count after decrement + */ +int32_t htc_dec_return_runtime_cnt(HTC_HANDLE htc); +#else +static inline int htc_pm_runtime_get(HTC_HANDLE htc_handle) { return 0; } +static inline int htc_pm_runtime_put(HTC_HANDLE htc_handle) { return 0; } + +static inline +int32_t htc_dec_return_runtime_cnt(HTC_HANDLE htc) +{ + return -1; +} +#endif + +/** + * htc_set_async_ep() - set async HTC end point + * user should call this function after htc_connect_service before + * queing any packets to end point + * @HTCHandle: htc handle + * @HTC_ENDPOINT_ID: end point id + * @value: true or false + * + * Return: None + */ + +void htc_set_async_ep(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID htc_ep_id, bool value); + +/** + * htc_set_wmi_endpoint_count: Set number of WMI endpoint + * @htc_handle: HTC handle + * @wmi_ep_count: WMI enpoint count + * + * return: None + */ +void htc_set_wmi_endpoint_count(HTC_HANDLE htc_handle, uint8_t wmi_ep_count); + +/** + * htc_get_wmi_endpoint_count: Get number of WMI endpoint + * @htc_handle: HTC handle + * + * return: WMI enpoint count + */ +uint8_t htc_get_wmi_endpoint_count(HTC_HANDLE htc_handle); + +/** + * htc_print_credit_history: print HTC credit history in buffer + * @htc: HTC handle + * @count: Number of lines to be copied + * @print: Print callback to print in the buffer + * @print_priv: any data required by the print method, e.g. a file handle + * + * return: None + */ +#ifdef FEATURE_HTC_CREDIT_HISTORY +void htc_print_credit_history(HTC_HANDLE htc, uint32_t count, + qdf_abstract_print * print, void *print_priv); +#else +static inline +void htc_print_credit_history(HTC_HANDLE htc, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + print(print_priv, "HTC Credit History Feature is disabled"); +} +#endif + +#ifdef SYSTEM_PM_CHECK +/** + * htc_system_resume() - Send out any pending WMI/HTT + * messages pending in htc queues on system resume. + * @htc: HTC handle + * + * Return: None + */ +void htc_system_resume(HTC_HANDLE htc); +#else +static inline void htc_system_resume(HTC_HANDLE htc) +{ +} +#endif +#endif /* _HTC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.c new file mode 100644 index 0000000000000000000000000000000000000000..becfd4e3ced3cc5765c349d75b0091b1e7b6e1eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include +#include +#include + +struct HTC_CREDIT_HISTORY { + enum htc_credit_exchange_type type; + uint64_t time; + uint32_t tx_credit; + uint32_t htc_tx_queue_depth; +}; + +struct htc_hang_data_fixed_param { + uint16_t tlv_header; + struct HTC_CREDIT_HISTORY credit_hist; +} qdf_packed; + +static qdf_spinlock_t g_htc_credit_lock; +static uint32_t g_htc_credit_history_idx; +static uint32_t g_htc_credit_history_length; +static +struct HTC_CREDIT_HISTORY htc_credit_history_buffer[HTC_CREDIT_HISTORY_MAX]; + + +#ifdef QCA_WIFI_NAPIER_EMULATION +#define HTC_EMULATION_DELAY_IN_MS 20 +/** + * htc_add_delay(): Adds a delay in before proceeding, only for emulation + * + * Return: None + */ +static inline void htc_add_emulation_delay(void) +{ + qdf_mdelay(HTC_EMULATION_DELAY_IN_MS); +} +#else +static inline void htc_add_emulation_delay(void) +{ +} +#endif + +void htc_credit_history_init(void) +{ + qdf_spinlock_create(&g_htc_credit_lock); + g_htc_credit_history_idx = 0; + g_htc_credit_history_length = 0; +} + +/** + * htc_credit_record() - records tx que state & credit transactions + * @type: type of echange can be HTC_REQUEST_CREDIT + * or HTC_PROCESS_CREDIT_REPORT + * @tx_credits: current number of tx_credits + * @htc_tx_queue_depth: current hct tx queue depth + * + * This function records the credits and pending commands whenever a command is + * sent or credits are returned. Call this after the credits have been updated + * according to the transaction. Call this before dequeing commands. + * + * Consider making this function accept an HTC_ENDPOINT and find the current + * credits and queue depth itself. + * + */ +void htc_credit_record(enum htc_credit_exchange_type type, uint32_t tx_credit, + uint32_t htc_tx_queue_depth) +{ + qdf_spin_lock_bh(&g_htc_credit_lock); + if (g_htc_credit_history_idx >= HTC_CREDIT_HISTORY_MAX) + g_htc_credit_history_idx = 0; + + htc_credit_history_buffer[g_htc_credit_history_idx].type = type; + htc_credit_history_buffer[g_htc_credit_history_idx].time = + qdf_get_log_timestamp(); + htc_credit_history_buffer[g_htc_credit_history_idx].tx_credit = + tx_credit; + htc_credit_history_buffer[g_htc_credit_history_idx].htc_tx_queue_depth = + htc_tx_queue_depth; + + g_htc_credit_history_idx++; + g_htc_credit_history_length++; + htc_add_emulation_delay(); + qdf_spin_unlock_bh(&g_htc_credit_lock); +} + +void htc_print_credit_history(HTC_HANDLE htc, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + uint32_t idx; + + print(print_priv, "HTC Credit History (count %u)", count); + qdf_spin_lock_bh(&g_htc_credit_lock); + + if (count > HTC_CREDIT_HISTORY_MAX) + count = HTC_CREDIT_HISTORY_MAX; + if (count > g_htc_credit_history_length) + count = g_htc_credit_history_length; + + /* subtract count from index, and wrap if necessary */ + idx = HTC_CREDIT_HISTORY_MAX + g_htc_credit_history_idx - count; + idx %= HTC_CREDIT_HISTORY_MAX; + + print(print_priv, + "Time (seconds) Type Credits Queue Depth"); + while (count) { + struct HTC_CREDIT_HISTORY *hist = + &htc_credit_history_buffer[idx]; + uint64_t secs, usecs; + + qdf_log_timestamp_to_secs(hist->time, &secs, &usecs); + print(print_priv, "% 8lld.%06lld %-25s %-7.d %d", + secs, + usecs, + htc_credit_exchange_type_str(hist->type), + hist->tx_credit, + hist->htc_tx_queue_depth); + + --count; + ++idx; + if (idx >= HTC_CREDIT_HISTORY_MAX) + idx = 0; + } + + qdf_spin_unlock_bh(&g_htc_credit_lock); +} + +#ifdef WLAN_HANG_EVENT +void htc_log_hang_credit_history(struct notifier_block *block, void *data) +{ + qdf_notif_block *notif_block = qdf_container_of(block, qdf_notif_block, + notif_block); + struct qdf_notifer_data *htc_hang_data = data; + uint32_t count = 1, idx, total_len; + HTC_HANDLE htc; + struct htc_hang_data_fixed_param *cmd; + uint8_t *htc_buf_ptr; + + htc = notif_block->priv_data; + + if (!htc) + return; + + if (!htc_hang_data) + return; + + total_len = sizeof(struct htc_hang_data_fixed_param); + qdf_spin_lock_bh(&g_htc_credit_lock); + + if (count > HTC_CREDIT_HISTORY_MAX) + count = HTC_CREDIT_HISTORY_MAX; + if (count > g_htc_credit_history_length) + count = g_htc_credit_history_length; + + idx = HTC_CREDIT_HISTORY_MAX + g_htc_credit_history_idx - count; + idx %= HTC_CREDIT_HISTORY_MAX; + + qdf_spin_unlock_bh(&g_htc_credit_lock); + + while (count) { + struct HTC_CREDIT_HISTORY *hist = + &htc_credit_history_buffer[idx]; + htc_buf_ptr = htc_hang_data->hang_data + htc_hang_data->offset; + cmd = (struct htc_hang_data_fixed_param *)htc_buf_ptr; + + if (htc_hang_data->offset + total_len > QDF_WLAN_HANG_FW_OFFSET) + return; + + QDF_HANG_EVT_SET_HDR(&cmd->tlv_header, + HANG_EVT_TAG_HTC_CREDIT_HIST, + QDF_HANG_GET_STRUCT_TLVLEN(struct htc_hang_data_fixed_param)); + qdf_mem_copy(&cmd->credit_hist, hist, sizeof(*hist)); + --count; + ++idx; + if (idx >= HTC_CREDIT_HISTORY_MAX) + idx = 0; + htc_hang_data->offset += total_len; + } +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.h new file mode 100644 index 0000000000000000000000000000000000000000..bd02c998a154b588d62577c5df91f1a05a0da5f5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_CREDIT_HISTORY_H_ +#define _HTC_CREDIT_HISTORY_H_ + +#include "htc_internal.h" + +#ifdef FEATURE_HTC_CREDIT_HISTORY + +/** + * htc_credit_history_init(): Init helper function to initialize HTC credit + * history buffers and variable. + * Return: None + */ + +void htc_credit_history_init(void); +void htc_credit_record(enum htc_credit_exchange_type type, uint32_t tx_credit, + uint32_t htc_tx_queue_depth); +#ifdef WLAN_HANG_EVENT +/** + * htc_log_hang_credit_history: Log the credit history into a buffer + * @block: Notifier block + * @data: Private data of the block. + * + * HTC hang event notifier callback inovked when the recovery is triggered + * to log the credit information to understand the reason for recovery. + * + * Return: none + */ +void htc_log_hang_credit_history(struct notifier_block *block, void *data); +#else +static inline +void htc_log_hang_credit_history(struct notifier_block *block, void *data) +{ +} +#endif +#else /* FEATURE_HTC_CREDIT_HISTORY */ + +static inline +void htc_credit_history_init(void) +{ +} + +static inline +void htc_credit_record(enum htc_credit_exchange_type type, uint32_t tx_credit, + uint32_t htc_tx_queue_depth) +{ } +#endif /* FEATURE_HTC_CREDIT_HISTORY */ +#endif /* _HTC_CREDIT_HISTORY_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_debug.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..9ba247f7e394a5a57bc95ca21d15f775945198de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_debug.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_DEBUG_H_ +#define HTC_DEBUG_H_ + +#define ATH_MODULE_NAME htc +#include "a_debug.h" +#include "qdf_trace.h" + +/* ------- Debug related stuff ------- */ + +#define ATH_DEBUG_SEND ATH_DEBUG_MAKE_MODULE_MASK(0) +#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(1) +#define ATH_DEBUG_SYNC ATH_DEBUG_MAKE_MODULE_MASK(2) +#define ATH_DEBUG_DUMP ATH_DEBUG_MAKE_MODULE_MASK(3) +#define ATH_DEBUG_SETUP ATH_DEBUG_MAKE_MODULE_MASK(4) +#define HTC_ERROR(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_ERROR, ## args) +#define HTC_WARN(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_WARN, ## args) +#define HTC_INFO(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_INFO, ## args) +#define HTC_TRACE(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_DEBUG, ## args) +#endif /*HTC_DEBUG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_hang_event.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_hang_event.c new file mode 100644 index 0000000000000000000000000000000000000000..b6b75760dce7f684484dbab407515bb93b2b47d0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_hang_event.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include "htc_hang_event.h" +#include "htc_internal.h" +#include "htc_credit_history.h" + +static int htc_recovery_notifier_call(struct notifier_block *block, + unsigned long state, + void *data) +{ + htc_log_hang_credit_history(block, data); + + return NOTIFY_OK; +} + +static qdf_notif_block htc_recovery_notifier = { + .notif_block.notifier_call = htc_recovery_notifier_call, +}; + +QDF_STATUS htc_hang_event_notifier_register(HTC_TARGET *target) +{ + htc_recovery_notifier.priv_data = target; + return qdf_hang_event_register_notifier(&htc_recovery_notifier); +} + +QDF_STATUS htc_hang_event_notifier_unregister(void) +{ + return qdf_hang_event_unregister_notifier(&htc_recovery_notifier); +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_hang_event.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_hang_event.h new file mode 100644 index 0000000000000000000000000000000000000000..3304541bca93d02d60296af08cf7e23441daaf90 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_hang_event.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef HTC_HANG_EVENT_H +#define HTC_HANG_EVENT_H + +#include "htc_internal.h" + +#ifdef WLAN_HANG_EVENT +/** + * htc_hang_event_notifier_register() - HTC hang event notifier register + * @target: Target specific htc hangle + * + * This function registers htc layer notifier for the hang event notifier chain. + * + * Return: QDF_STATUS + */ +QDF_STATUS htc_hang_event_notifier_register(HTC_TARGET *target); + +/** + * htc_hang_event_notifier_unregister() - htc hang event notifier unregister + * + * This function unregisters htc layer notifier for the hang event notifier + * chain. + * + * Return: QDF_STATUS + */ +QDF_STATUS htc_hang_event_notifier_unregister(void); +#else +static inline QDF_STATUS htc_hang_event_notifier_register(HTC_TARGET *target) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS htc_hang_event_notifier_unregister(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_internal.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..a51e910a59460bc13de664844124ece2f7b6c9f0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_internal.h @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_INTERNAL_H_ +#define _HTC_INTERNAL_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "htc_api.h" +#include "htc_packet.h" +#include +#include +#include +#include +#include +#include +#include +#include + +/* HTC operational parameters */ +#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ +#define HTC_TARGET_DEBUG_INTR_MASK 0x01 +#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 +#define HTC_MIN_MSG_PER_BUNDLE 2 + +#if defined(HIF_USB) + +#define HTC_MAX_MSG_PER_BUNDLE_RX 11 +#if defined(CFG_HTC_MAX_MSG_PER_BUNDLE_TX) +#define HTC_MAX_MSG_PER_BUNDLE_TX CFG_HTC_MAX_MSG_PER_BUNDLE_TX +#else +#define HTC_MAX_MSG_PER_BUNDLE_TX 8 +#endif /* CFG_HTC_MAX_MSG_PER_BUNDLE_TX */ +#else +#define HTC_MAX_MSG_PER_BUNDLE_RX 64 +#define HTC_MAX_MSG_PER_BUNDLE 16 +#define HTC_MAX_MSG_PER_BUNDLE_TX 32 +#endif + +#ifdef HIF_SDIO +#define UPDATE_ALT_CREDIT(tar, val) (tar->AltDataCreditSize = (uint16_t) val) +#else +#define UPDATE_ALT_CREDIT(tar, val) /* no-op */ +#endif + +/* + * HTC_MAX_TX_BUNDLE_SEND_LIMIT - + * This value is in units of tx frame fragments. + * It needs to be at least as large as the maximum number of tx frames in a + * HTC download bundle times the average number of fragments in each such frame + * (In certain operating systems, such as Linux, we expect to only have + * a single fragment per frame anyway.) + */ +#define HTC_MAX_TX_BUNDLE_SEND_LIMIT 255 + +#define HTC_PACKET_CONTAINER_ALLOCATION 32 +#define NUM_CONTROL_TX_BUFFERS 2 +#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CONTROL_MESSAGE_LENGTH + \ + HTC_HDR_LENGTH) +#define HTC_CONTROL_BUFFER_ALIGN 32 +#define HTC_TARGET_RESPONSE_POLL_MS 10 +#if !defined(A_SIMOS_DEVHOST) +#define HTC_TARGET_MAX_RESPONSE_POLL 200 /* actual HW */ +#else +#define HTC_TARGET_MAX_RESPONSE_POLL 600 /* host + target simulation */ +#endif + +#define HTC_SERVICE_TX_PACKET_TAG HTC_TX_PACKET_TAG_INTERNAL + +#ifndef HTC_CREDIT_HISTORY_MAX +#define HTC_CREDIT_HISTORY_MAX 1024 +#endif + +#define HTC_IS_EPPING_ENABLED(_x) ((_x) == QDF_GLOBAL_EPPING_MODE) + +enum htc_credit_exchange_type { + HTC_REQUEST_CREDIT, + HTC_PROCESS_CREDIT_REPORT, + HTC_SUSPEND_ACK, + HTC_SUSPEND_NACK, + HTC_INITIAL_WAKE_UP, +}; + +static inline const char* +htc_credit_exchange_type_str(enum htc_credit_exchange_type type) +{ + switch (type) { + case HTC_REQUEST_CREDIT: + return "HTC_REQUEST_CREDIT"; + case HTC_PROCESS_CREDIT_REPORT: + return "HTC_PROCESS_CREDIT_REPORT"; + case HTC_SUSPEND_ACK: + return "HTC_SUSPEND_ACK"; + case HTC_SUSPEND_NACK: + return "HTC_SUSPEND_NACK"; + case HTC_INITIAL_WAKE_UP: + return "HTC_INITIAL_WAKE_UP"; + default: + return "Unknown htc_credit_exchange_type"; + } +} + +typedef struct _HTC_ENDPOINT { + HTC_ENDPOINT_ID Id; + + /* service ID this endpoint is bound to + * non-zero value means this endpoint is in use + */ + HTC_SERVICE_ID service_id; + + /* callbacks associated with this endpoint */ + struct htc_ep_callbacks EpCallBacks; + /* HTC frame buffer TX queue */ + HTC_PACKET_QUEUE TxQueue; + /* max depth of the TX queue before calling driver's full handler */ + int MaxTxQueueDepth; + /* max length of endpoint message */ + int MaxMsgLength; + uint8_t UL_PipeID; + uint8_t DL_PipeID; + /* Need to call HIF to get tx completion callbacks? */ + int ul_is_polled; + qdf_timer_t ul_poll_timer; + int ul_poll_timer_active; + int ul_outstanding_cnt; + /* Need to call HIF to fetch rx? (Not currently supported.) */ + int dl_is_polled; + /* not currently supported */ + /* qdf_timer_t dl_poll_timer; */ + + /* lookup queue to match netbufs to htc packets */ + HTC_PACKET_QUEUE TxLookupQueue; + /* temporary hold queue for back compatibility */ + HTC_PACKET_QUEUE RxBufferHoldQueue; + /* TX seq no (helpful) for debugging */ + uint8_t SeqNo; + /* serialization */ + qdf_atomic_t TxProcessCount; + struct _HTC_TARGET *target; + /* TX credits available on this endpoint */ + int TxCredits; + /* size in bytes of each credit (set by HTC) */ + int TxCreditSize; + /* credits required per max message (precalculated) */ + int TxCreditsPerMaxMsg; +#ifdef HTC_EP_STAT_PROFILING + /* endpoint statistics */ + struct htc_endpoint_stats endpoint_stats; +#endif + bool TxCreditFlowEnabled; + bool async_update; /* packets can be queued asynchronously */ + qdf_spinlock_t lookup_queue_lock; +} HTC_ENDPOINT; + +#ifdef HTC_EP_STAT_PROFILING +#define INC_HTC_EP_STAT(p, stat, count) ((p)->endpoint_stats.stat += (count)) +#else +#define INC_HTC_EP_STAT(p, stat, count) +#endif + +struct htc_service_tx_credit_allocation { + uint16_t service_id; + uint8_t CreditAllocation; +}; + +#define HTC_MAX_SERVICE_ALLOC_ENTRIES 8 + +/* Error codes for HTC layer packet stats*/ +enum ol_ath_htc_pkt_ecodes { + /* error- get packet at head of HTC_PACKET_Q */ + GET_HTC_PKT_Q_FAIL = 0, + HTC_PKT_Q_EMPTY, + HTC_SEND_Q_EMPTY +}; +/* our HTC target state */ +typedef struct _HTC_TARGET { + struct hif_opaque_softc *hif_dev; + HTC_ENDPOINT endpoint[ENDPOINT_MAX]; + qdf_spinlock_t HTCLock; + qdf_spinlock_t HTCRxLock; + qdf_spinlock_t HTCTxLock; + uint32_t HTCStateFlags; + void *host_handle; + struct htc_init_info HTCInitInfo; + HTC_PACKET *pHTCPacketStructPool; /* pool of HTC packets */ + HTC_PACKET_QUEUE ControlBufferTXFreeList; + uint8_t CtrlResponseBuffer[HTC_MAX_CONTROL_MESSAGE_LENGTH]; + int CtrlResponseLength; + qdf_event_t ctrl_response_valid; + bool CtrlResponseProcessing; + int TotalTransmitCredits; + struct htc_service_tx_credit_allocation + ServiceTxAllocTable[HTC_MAX_SERVICE_ALLOC_ENTRIES]; + int TargetCreditSize; +#ifdef RX_SG_SUPPORT + qdf_nbuf_queue_t RxSgQueue; + bool IsRxSgInprogress; + uint32_t CurRxSgTotalLen; /* current total length */ + uint32_t ExpRxSgTotalLen; /* expected total length */ +#endif + qdf_device_t osdev; + struct ol_ath_htc_stats htc_pkt_stats; + HTC_PACKET *pBundleFreeList; + uint32_t ce_send_cnt; + uint32_t TX_comp_cnt; + uint8_t MaxMsgsPerHTCBundle; + qdf_work_t queue_kicker; + +#ifdef HIF_SDIO + uint16_t AltDataCreditSize; +#endif + uint32_t avail_tx_credits; +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + uint32_t rx_bundle_stats[HTC_MAX_MSG_PER_BUNDLE_RX]; + uint32_t tx_bundle_stats[HTC_MAX_MSG_PER_BUNDLE_TX]; +#endif + + uint32_t con_mode; + + /* + * This flag is from the mboxping tool. It indicates that we cannot + * drop it. Besides, nodrop pkts have higher priority than normal pkts. + */ + A_BOOL is_nodrop_pkt; + + /* + * Number of WMI endpoints used. + * Default value is 1. But it should be overidden after htc_create to + * reflect the actual count. + */ + uint8_t wmi_ep_count; + /* Flag to indicate whether htc header length check is required */ + bool htc_hdr_length_check; + +#ifdef FEATURE_RUNTIME_PM + /* Runtime count for H2T msg with response */ + qdf_atomic_t htc_runtime_cnt; +#endif +} HTC_TARGET; + + +#ifdef RX_SG_SUPPORT +#define RESET_RX_SG_CONFIG(_target) \ +do { \ + _target->ExpRxSgTotalLen = 0; \ + _target->CurRxSgTotalLen = 0; \ + _target->IsRxSgInprogress = false; \ +} while (0) +#endif + +#define HTC_STATE_STOPPING (1 << 0) +#define HTC_STOPPING(t) ((t)->HTCStateFlags & HTC_STATE_STOPPING) +#define LOCK_HTC(t) qdf_spin_lock_bh(&(t)->HTCLock) +#define UNLOCK_HTC(t) qdf_spin_unlock_bh(&(t)->HTCLock) +#define LOCK_HTC_RX(t) qdf_spin_lock_bh(&(t)->HTCRxLock) +#define UNLOCK_HTC_RX(t) qdf_spin_unlock_bh(&(t)->HTCRxLock) +#define LOCK_HTC_TX(t) qdf_spin_lock_bh(&(t)->HTCTxLock) +#define UNLOCK_HTC_TX(t) qdf_spin_unlock_bh(&(t)->HTCTxLock) +#define LOCK_HTC_EP_TX_LOOKUP(t) qdf_spin_lock_bh(&(t)->lookup_queue_lock) +#define UNLOCK_HTC_EP_TX_LOOKUP(t) qdf_spin_unlock_bh(&(t)->lookup_queue_lock) + +#define GET_HTC_TARGET_FROM_HANDLE(hnd) ((HTC_TARGET *)(hnd)) + +#define IS_TX_CREDIT_FLOW_ENABLED(ep) ((ep)->TxCreditFlowEnabled) + +#define HTC_POLL_CLEANUP_PERIOD_MS 10 /* milliseconds */ + +/* Macro to Increment the HTC_PACKET_ERRORS for Tx.*/ +#define OL_ATH_HTC_PKT_ERROR_COUNT_INCR(_target, _ecode) \ + do { \ + if (_ecode == GET_HTC_PKT_Q_FAIL) \ + (_target->htc_pkt_stats.htc_get_pkt_q_fail_count) += 1; \ + if (_ecode == HTC_PKT_Q_EMPTY) \ + (_target->htc_pkt_stats.htc_pkt_q_empty_count) += 1; \ + if (_ecode == HTC_SEND_Q_EMPTY) \ + (_target->htc_pkt_stats.htc_send_q_empty_count) += 1; \ + } while (0) +/* internal HTC functions */ + +QDF_STATUS htc_rx_completion_handler(void *Context, qdf_nbuf_t netbuf, + uint8_t pipeID); +QDF_STATUS htc_tx_completion_handler(void *Context, qdf_nbuf_t netbuf, + unsigned int transferID, + uint32_t toeplitz_hash_result); + +HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target); +void free_htc_bundle_packet(HTC_TARGET *target, HTC_PACKET *pPacket); + +HTC_PACKET *allocate_htc_packet_container(HTC_TARGET *target); +void free_htc_packet_container(HTC_TARGET *target, HTC_PACKET *pPacket); +void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint); +void htc_flush_endpoint_tx(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, + HTC_TX_TAG Tag); + +/** + * htc_flush_endpoint_txlookupQ() - Flush EP's lookup queue + * @target: HTC target + * @endpoint_id: EP ID + * @call_ep_callback: whether to call EP tx completion callback + * + * Return: void + */ +void htc_flush_endpoint_txlookupQ(HTC_TARGET *target, + HTC_ENDPOINT_ID endpoint_id, + bool call_ep_callback); + +void htc_recv_init(HTC_TARGET *target); +QDF_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target); +void htc_free_control_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket); +HTC_PACKET *htc_alloc_control_tx_packet(HTC_TARGET *target); +uint8_t htc_get_credit_allocation(HTC_TARGET *target, uint16_t service_id); +void htc_tx_resource_avail_handler(void *context, uint8_t pipeID); +void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket); +void htc_process_credit_rpt(HTC_TARGET *target, + HTC_CREDIT_REPORT *pRpt, + int NumEntries, HTC_ENDPOINT_ID FromEndpoint); +void htc_fw_event_handler(void *context, QDF_STATUS status); +void htc_send_complete_check_cleanup(void *context); +#ifdef FEATURE_RUNTIME_PM +void htc_kick_queues(void *context); +#endif + +static inline void htc_send_complete_poll_timer_stop(HTC_ENDPOINT * + pEndpoint) { + LOCK_HTC_TX(pEndpoint->target); + if (pEndpoint->ul_poll_timer_active) { + /* qdf_timer_stop(&pEndpoint->ul_poll_timer); */ + pEndpoint->ul_poll_timer_active = 0; + } + UNLOCK_HTC_TX(pEndpoint->target); +} + +static inline void htc_send_complete_poll_timer_start(HTC_ENDPOINT * + pEndpoint) { + LOCK_HTC_TX(pEndpoint->target); + if (pEndpoint->ul_outstanding_cnt + && !pEndpoint->ul_poll_timer_active) { + /* qdf_timer_start( + * &pEndpoint->ul_poll_timer, HTC_POLL_CLEANUP_PERIOD_MS); + */ + pEndpoint->ul_poll_timer_active = 1; + } + UNLOCK_HTC_TX(pEndpoint->target); +} + +static inline void +htc_send_complete_check(HTC_ENDPOINT *pEndpoint, int force) { + /* + * Stop the polling-cleanup timer that will result in a later call to + * this function. It may get started again below, if there are still + * outsending sends. + */ + htc_send_complete_poll_timer_stop(pEndpoint); + /* + * Check whether HIF has any prior sends that have finished, + * have not had the post-processing done. + */ + hif_send_complete_check(pEndpoint->target->hif_dev, + pEndpoint->UL_PipeID, force); + /* + * If there are still outstanding sends after polling, start a timer + * to check again a little later. + */ + htc_send_complete_poll_timer_start(pEndpoint); +} + +#ifdef __cplusplus +} +#endif + +#ifndef DEBUG_BUNDLE +#define DEBUG_BUNDLE 0 +#endif + +#if defined(HIF_SDIO) || defined(HIF_USB) +#ifndef ENABLE_BUNDLE_TX +#define ENABLE_BUNDLE_TX 1 +#endif + +#ifndef ENABLE_BUNDLE_RX +#define ENABLE_BUNDLE_RX 1 +#endif +#endif /*defined(HIF_SDIO) || defined(HIF_USB)*/ + +#if defined ENABLE_BUNDLE_TX +#define HTC_TX_BUNDLE_ENABLED(target) (target->MaxMsgsPerHTCBundle > 1) +#else +#define HTC_TX_BUNDLE_ENABLED(target) 0 +#endif + +#if defined ENABLE_BUNDLE_RX +#define HTC_RX_BUNDLE_ENABLED(target) (target->MaxMsgsPerHTCBundle > 1) +#else +#define HTC_RX_BUNDLE_ENABLED(target) 0 +#endif + +#define HTC_ENABLE_BUNDLE(target) (target->MaxMsgsPerHTCBundle > 1) + +#endif /* !_HTC_HOST_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_packet.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_packet.h new file mode 100644 index 0000000000000000000000000000000000000000..78e0a3b95bb6bfe0b87b9ff7f4f281f9d907a070 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_packet.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2013-2014, 2016-2017, 2019-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_PACKET_H_ +#define HTC_PACKET_H_ + +#include +#include "dl_list.h" + +/* ------ Endpoint IDS ------ */ +typedef enum { + ENDPOINT_UNUSED = -1, + ENDPOINT_0 = 0, + ENDPOINT_1 = 1, + ENDPOINT_2 = 2, + ENDPOINT_3, + ENDPOINT_4, + ENDPOINT_5, + ENDPOINT_6, + ENDPOINT_7, + ENDPOINT_8, + ENDPOINT_MAX, +} HTC_ENDPOINT_ID; + +struct _HTC_PACKET; + +typedef void (*HTC_PACKET_COMPLETION)(void *, struct _HTC_PACKET *); + +typedef uint16_t HTC_TX_TAG; + +/** + * struct htc_tx_packet_info - HTC TX packet information + * @Tag: tag used to selective flush packets + * @CreditsUsed: number of credits used for this TX packet (HTC internal) + * @SendFlags: send flags (HTC internal) + * @SeqNo: internal seq no for debugging (HTC internal) + * @Flags: Internal use + */ +struct htc_tx_packet_info { + HTC_TX_TAG Tag; + int CreditsUsed; + uint8_t SendFlags; + int SeqNo; + uint32_t Flags; +}; + +/** + * HTC_TX_PACKET_TAG_XXX - #defines for tagging packets for special handling + * HTC_TX_PACKET_TAG_ALL: zero is reserved and used to flush ALL packets + * HTC_TX_PACKET_TAG_INTERNAL: internal tags start here + * HTC_TX_PACKET_TAG_USER_DEFINED: user-defined tags start here + * HTC_TX_PACKET_TAG_BUNDLED: indicate this is a bundled tx packet + * HTC_TX_PACKET_TAG_AUTO_PM: indicate a power management wmi command + */ +#define HTC_TX_PACKET_TAG_ALL 0 +#define HTC_TX_PACKET_TAG_INTERNAL 1 +#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_TX_PACKET_TAG_INTERNAL + 9) +#define HTC_TX_PACKET_TAG_BUNDLED (HTC_TX_PACKET_TAG_USER_DEFINED + 1) +#define HTC_TX_PACKET_TAG_AUTO_PM (HTC_TX_PACKET_TAG_USER_DEFINED + 2) + +/* Tag packet for runtime put after sending */ +#define HTC_TX_PACKET_TAG_RUNTIME_PUT (HTC_TX_PACKET_TAG_USER_DEFINED + 3) + +/*Tag packet for runtime put in response or cleanup */ +#define HTC_TX_PACKET_TAG_RTPM_PUT_RC (HTC_TX_PACKET_TAG_USER_DEFINED + 4) + +#define HTC_TX_PACKET_SYSTEM_SUSPEND (HTC_TX_PACKET_TAG_USER_DEFINED + 5) +#define HTC_TX_PACKET_SYSTEM_RESUME (HTC_TX_PACKET_TAG_USER_DEFINED + 6) + +#define HTC_TX_PACKET_FLAG_FIXUP_NETBUF (1 << 0) +#define HTC_TX_PACKET_FLAG_HTC_HEADER_IN_NETBUF_DATA (1 << 1) + +/** + * struct htc_rx_packet_info - HTC RX Packet information + * @ExpectedHdr: HTC Internal use + * @HTCRxFlags: HTC Internal use + * @IndicationFlags: indication flags set on each RX packet indication + */ +struct htc_rx_packet_info { + uint32_t ExpectedHdr; + uint32_t HTCRxFlags; + uint32_t IndicationFlags; +}; + +/* more packets on this endpoint are being fetched */ +#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0) +#define HTC_PACKET_MAGIC_COOKIE 0xdeadbeef + +/* wrapper around endpoint-specific packets */ +/** + * struct _HTC_PACKET - HTC Packet data structure + * @ListLink: double link + * @pPktContext: caller's per packet specific context + * @pBufferStart: The true buffer start, the caller can store the real buffer + * start here. In receive callbacks, the HTC layer sets pBuffer + * to the start of the payload past the header. This field allows + * the caller to reset pBuffer when it recycles receive packets + * back to HTC + * @pBuffer: payload start (RX/TX) + * @BufferLength: length of buffer + * @ActualLength: actual length of payload + * @Endpoint: endpoint that this packet was sent/recv'd from + * @Status: completion status + * @PktInfo: Packet specific info + * @netbufOrigHeadRoom: Original head room of skb + * @Completion: completion + * @pContext: HTC private completion context + * @pNetBufContext: optimization for network-oriented data, the HTC packet can + * pass the network buffer corresponding to the HTC packet + * lower layers may optimized the transfer knowing this is a + * network buffer + * @magic_cookie: HTC Magic cookie + */ +typedef struct _HTC_PACKET { + DL_LIST ListLink; + void *pPktContext; + uint8_t *pBufferStart; + /* + * Pointer to the start of the buffer. In the transmit + * direction this points to the start of the payload. In the + * receive direction, however, the buffer when queued up + * points to the start of the HTC header but when returned + * to the caller points to the start of the payload + */ + uint8_t *pBuffer; + uint32_t BufferLength; + uint32_t ActualLength; + HTC_ENDPOINT_ID Endpoint; + QDF_STATUS Status; + union { + struct htc_tx_packet_info AsTx; + struct htc_rx_packet_info AsRx; + } PktInfo; + /* the following fields are for internal HTC use */ + uint32_t netbufOrigHeadRoom; + HTC_PACKET_COMPLETION Completion; + void *pContext; + void *pNetBufContext; + uint32_t magic_cookie; +} HTC_PACKET; + +#define COMPLETE_HTC_PACKET(p, status) \ + { \ + (p)->Status = (status); \ + (p)->Completion((p)->pContext, (p)); \ + } + +#define INIT_HTC_PACKET_INFO(p, b, len) \ + { \ + (p)->pBufferStart = (b); \ + (p)->BufferLength = (len); \ + } + +/* macro to set an initial RX packet for refilling HTC */ +#define SET_HTC_PACKET_INFO_RX_REFILL(p, c, b, len, ep) \ + do { \ + (p)->pPktContext = (c); \ + (p)->pBuffer = (b); \ + (p)->pBufferStart = (b); \ + (p)->BufferLength = (len); \ + (p)->Endpoint = (ep); \ + } while (0) + +/* fast macro to recycle an RX packet that will be re-queued to HTC */ +#define HTC_PACKET_RESET_RX(p) \ + { (p)->pBuffer = (p)->pBufferStart; (p)->ActualLength = 0; } + +/* macro to set packet parameters for TX */ +#define SET_HTC_PACKET_INFO_TX(p, c, b, len, ep, tag) \ + do { \ + (p)->pPktContext = (c); \ + (p)->pBuffer = (b); \ + (p)->ActualLength = (len); \ + (p)->Endpoint = (ep); \ + (p)->PktInfo.AsTx.Tag = (tag); \ + (p)->PktInfo.AsTx.Flags = 0; \ + (p)->PktInfo.AsTx.SendFlags = 0; \ + } while (0) + +#define SET_HTC_PACKET_NET_BUF_CONTEXT(p, nb) \ + { \ + (p)->pNetBufContext = (nb); \ + } + +#define GET_HTC_PACKET_NET_BUF_CONTEXT(p) (p)->pNetBufContext + +/* HTC Packet Queueing Macros */ +typedef struct _HTC_PACKET_QUEUE { + DL_LIST QueueHead; + int Depth; +} HTC_PACKET_QUEUE; + +/* initialize queue */ +#define INIT_HTC_PACKET_QUEUE(pQ) \ + { \ + DL_LIST_INIT(&(pQ)->QueueHead); \ + (pQ)->Depth = 0; \ + } + +/* enqueue HTC packet to the tail of the queue */ +#define HTC_PACKET_ENQUEUE(pQ, p) \ + { dl_list_insert_tail(&(pQ)->QueueHead, &(p)->ListLink); \ + (pQ)->Depth++; \ + } + +/* enqueue HTC packet to the tail of the queue */ +#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ, p) \ + { dl_list_insert_head(&(pQ)->QueueHead, &(p)->ListLink); \ + (pQ)->Depth++; \ + } +/* test if a queue is empty */ +#define HTC_QUEUE_EMPTY(pQ) ((pQ)->Depth == 0) +/* get packet at head without removing it */ +static inline HTC_PACKET *htc_get_pkt_at_head(HTC_PACKET_QUEUE *queue) +{ + if (queue->Depth == 0) + return NULL; + + return A_CONTAINING_STRUCT((DL_LIST_GET_ITEM_AT_HEAD( + &queue->QueueHead)), + HTC_PACKET, ListLink); +} + +/* remove a packet from a queue, where-ever it is in the queue */ +#define HTC_PACKET_REMOVE(pQ, p) \ + { \ + dl_list_remove(&(p)->ListLink); \ + (pQ)->Depth--; \ + } + +/* dequeue an HTC packet from the head of the queue */ +static inline HTC_PACKET *htc_packet_dequeue(HTC_PACKET_QUEUE *queue) +{ + DL_LIST *pItem = dl_list_remove_item_from_head(&queue->QueueHead); + + if (pItem) { + queue->Depth--; + return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink); + } + return NULL; +} + +/* dequeue an HTC packet from the tail of the queue */ +static inline HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue) +{ + DL_LIST *pItem = dl_list_remove_item_from_tail(&queue->QueueHead); + + if (pItem) { + queue->Depth--; + return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink); + } + return NULL; +} + +#define HTC_PACKET_QUEUE_DEPTH(pQ) (pQ)->Depth + +#define HTC_GET_ENDPOINT_FROM_PKT(p) (p)->Endpoint +#define HTC_GET_TAG_FROM_PKT(p) (p)->PktInfo.AsTx.Tag + +/* transfer the packets from one queue to the tail of another queue */ +#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest, pQSrc) \ + { \ + dl_list_transfer_items_to_tail(&(pQDest)->QueueHead, \ + &(pQSrc)->QueueHead); \ + (pQDest)->Depth += (pQSrc)->Depth; \ + (pQSrc)->Depth = 0; \ + } + +/* + * Transfer the packets from one queue to the head of another queue. + * This xfer_to_head(q1,q2) is basically equivalent to xfer_to_tail(q2,q1), + * but it updates the queue descriptor object for the initial queue to refer + * to the concatenated queue. + */ +#define HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(pQDest, pQSrc) \ + { \ + dl_list_transfer_items_to_head(&(pQDest)->QueueHead, \ + &(pQSrc)->QueueHead); \ + (pQDest)->Depth += (pQSrc)->Depth; \ + (pQSrc)->Depth = 0; \ + } + +/* fast version to init and add a single packet to a queue */ +#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ, pP) \ + { \ + DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead, &(pP)->ListLink) \ + (pQ)->Depth = 1; \ + } + +#define HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQ, pPTemp) \ + ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead, \ + (pPTemp), HTC_PACKET, ListLink) + +#define HTC_PACKET_QUEUE_ITERATE_IS_VALID(pQ) ITERATE_IS_VALID(&(pQ)->QueueHead) +#define HTC_PACKET_QUEUE_ITERATE_RESET(pQ) ITERATE_RESET(&(pQ)->QueueHead) + +#define HTC_PACKET_QUEUE_ITERATE_END ITERATE_END + +/** + * htc_packet_set_magic_cookie() - set magic cookie in htc packet + * htc_pkt - pointer to htc packet + * value - value to set in magic cookie + * + * This API sets the magic cookie passed in htc packet. + * + * Return : None + */ +static inline void htc_packet_set_magic_cookie(HTC_PACKET *htc_pkt, + uint32_t value) +{ + htc_pkt->magic_cookie = value; +} + +/** + * htc_packet_set_magic_cookie() - get magic cookie in htc packet + * htc_pkt - pointer to htc packet + * + * This API returns the magic cookie in htc packet. + * + * Return : magic cookie + */ +static inline uint32_t htc_packet_get_magic_cookie(HTC_PACKET *htc_pkt) +{ + return htc_pkt->magic_cookie; +} + +#endif /*HTC_PACKET_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c new file mode 100644 index 0000000000000000000000000000000000000000..2d6047da83efd86b91630b43a39cb5e91052ef45 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c @@ -0,0 +1,728 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include /* qdf_nbuf_t */ + +/* HTC Control message receive timeout msec */ +#define HTC_CONTROL_RX_TIMEOUT 6000 + +#if defined(WLAN_DEBUG) || defined(DEBUG) +void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription) +{ + int8_t stream[60]; + int8_t byteOffsetStr[10]; + uint32_t i; + uint16_t offset, count, byteOffset; + + A_PRINTF("<---------Dumping %d Bytes : %s ------>\n", length, + pDescription); + + count = 0; + offset = 0; + byteOffset = 0; + for (i = 0; i < length; i++) { + A_SNPRINTF(stream + offset, (sizeof(stream) - offset), + "%02X ", buffer[i]); + count++; + offset += 3; + + if (count == 16) { + count = 0; + offset = 0; + A_SNPRINTF(byteOffsetStr, sizeof(byteOffset), "%4.4X", + byteOffset); + A_PRINTF("[%s]: %s\n", byteOffsetStr, stream); + qdf_mem_zero(stream, 60); + byteOffset += 16; + } + } + + if (offset != 0) { + A_SNPRINTF(byteOffsetStr, sizeof(byteOffset), "%4.4X", + byteOffset); + A_PRINTF("[%s]: %s\n", byteOffsetStr, stream); + } + + A_PRINTF("<------------------------------------------------->\n"); +} +#else +void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription) +{ +} +#endif + +static A_STATUS htc_process_trailer(HTC_TARGET *target, + uint8_t *pBuffer, + int Length, HTC_ENDPOINT_ID FromEndpoint); + +static void do_recv_completion_pkt(HTC_ENDPOINT *pEndpoint, + HTC_PACKET *pPacket) +{ + if (!pEndpoint->EpCallBacks.EpRecv) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC ep %d has NULL recv callback on packet %pK\n", + pEndpoint->Id, + pPacket)); + if (pPacket) + qdf_nbuf_free(pPacket->pPktContext); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("HTC calling ep %d recv callback on packet %pK\n", + pEndpoint->Id, pPacket)); + pEndpoint->EpCallBacks.EpRecv(pEndpoint->EpCallBacks.pContext, + pPacket); + } +} + +static void do_recv_completion(HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pQueueToIndicate) +{ + HTC_PACKET *pPacket; + + if (HTC_QUEUE_EMPTY(pQueueToIndicate)) { + /* nothing to indicate */ + return; + } + + while (!HTC_QUEUE_EMPTY(pQueueToIndicate)) { + pPacket = htc_packet_dequeue(pQueueToIndicate); + do_recv_completion_pkt(pEndpoint, pPacket); + } +} + +void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket) +{ + /* TODO, can't really receive HTC control messages yet.... */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid call to htc_control_rx_complete\n")); +} + +void htc_unblock_recv(HTC_HANDLE HTCHandle) +{ + /* TODO find the Need in new model */ +} + +void htc_enable_recv(HTC_HANDLE HTCHandle) +{ + + /* TODO find the Need in new model */ +} + +void htc_disable_recv(HTC_HANDLE HTCHandle) +{ + + /* TODO find the Need in new model */ +} + +int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + HTC_ENDPOINT *pEndpoint = &target->endpoint[Endpoint]; + return HTC_PACKET_QUEUE_DEPTH(&pEndpoint->RxBufferHoldQueue); +} + +HTC_PACKET *allocate_htc_packet_container(HTC_TARGET *target) +{ + HTC_PACKET *pPacket; + + LOCK_HTC_RX(target); + + if (!target->pHTCPacketStructPool) { + UNLOCK_HTC_RX(target); + return NULL; + } + + pPacket = target->pHTCPacketStructPool; + target->pHTCPacketStructPool = (HTC_PACKET *) pPacket->ListLink.pNext; + + UNLOCK_HTC_RX(target); + + pPacket->ListLink.pNext = NULL; + return pPacket; +} + +void free_htc_packet_container(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + pPacket->ListLink.pPrev = NULL; + + LOCK_HTC_RX(target); + if (!target->pHTCPacketStructPool) { + target->pHTCPacketStructPool = pPacket; + pPacket->ListLink.pNext = NULL; + } else { + pPacket->ListLink.pNext = + (DL_LIST *) target->pHTCPacketStructPool; + target->pHTCPacketStructPool = pPacket; + } + + UNLOCK_HTC_RX(target); +} + +#ifdef RX_SG_SUPPORT +qdf_nbuf_t rx_sg_to_single_netbuf(HTC_TARGET *target) +{ + qdf_nbuf_t skb; + uint8_t *anbdata; + uint8_t *anbdata_new; + uint32_t anblen; + qdf_nbuf_t new_skb = NULL; + uint32_t sg_queue_len; + qdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; + + sg_queue_len = qdf_nbuf_queue_len(rx_sg_queue); + + if (sg_queue_len <= 1) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("rx_sg_to_single_netbuf: invalid sg queue len %u\n")); + goto _failed; + } + + new_skb = qdf_nbuf_alloc(target->ExpRxSgTotalLen, 0, 4, false); + if (!new_skb) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("rx_sg_to_single_netbuf: can't allocate %u size netbuf\n", + target->ExpRxSgTotalLen)); + goto _failed; + } + + qdf_nbuf_peek_header(new_skb, &anbdata_new, &anblen); + + skb = qdf_nbuf_queue_remove(rx_sg_queue); + do { + qdf_nbuf_peek_header(skb, &anbdata, &anblen); + qdf_mem_copy(anbdata_new, anbdata, qdf_nbuf_len(skb)); + qdf_nbuf_put_tail(new_skb, qdf_nbuf_len(skb)); + anbdata_new += qdf_nbuf_len(skb); + qdf_nbuf_free(skb); + skb = qdf_nbuf_queue_remove(rx_sg_queue); + } while (skb); + + RESET_RX_SG_CONFIG(target); + return new_skb; + +_failed: + + while ((skb = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL) + qdf_nbuf_free(skb); + + RESET_RX_SG_CONFIG(target); + return NULL; +} +#endif + +QDF_STATUS htc_rx_completion_handler(void *Context, qdf_nbuf_t netbuf, + uint8_t pipeID) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_FRAME_HDR *HtcHdr; + HTC_TARGET *target = (HTC_TARGET *) Context; + uint8_t *netdata; + uint32_t netlen; + HTC_ENDPOINT *pEndpoint, *currendpoint; + HTC_PACKET *pPacket; + uint16_t payloadLen; + uint32_t trailerlen = 0; + uint8_t htc_ep_id; + int i; +#ifdef HTC_MSG_WAKEUP_FROM_SUSPEND_ID + struct htc_init_info *info; +#endif + +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + if (target->IsRxSgInprogress) { + target->CurRxSgTotalLen += qdf_nbuf_len(netbuf); + qdf_nbuf_queue_add(&target->RxSgQueue, netbuf); + if (target->CurRxSgTotalLen == target->ExpRxSgTotalLen) { + netbuf = rx_sg_to_single_netbuf(target); + if (!netbuf) { + UNLOCK_HTC_RX(target); + goto _out; + } + } else { + netbuf = NULL; + UNLOCK_HTC_RX(target); + goto _out; + } + } + UNLOCK_HTC_RX(target); +#endif + + netdata = qdf_nbuf_data(netbuf); + netlen = qdf_nbuf_len(netbuf); + + HtcHdr = (HTC_FRAME_HDR *) netdata; + + do { + + htc_ep_id = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, ENDPOINTID); + + if (htc_ep_id >= ENDPOINT_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC Rx: invalid EndpointID=%d\n", + htc_ep_id)); + debug_dump_bytes((uint8_t *) HtcHdr, + sizeof(HTC_FRAME_HDR), + "BAD HTC Header"); + status = QDF_STATUS_E_FAILURE; + DPTRACE(qdf_dp_trace( + netbuf, + QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(netbuf), + sizeof(qdf_nbuf_data(netbuf)), + QDF_RX)); + break; + } + + pEndpoint = &target->endpoint[htc_ep_id]; + + /* + * If this endpoint that received a message from the target has + * a to-target HIF pipe whose send completions are polled rather + * than interrupt driven, this is a good point to ask HIF to + * check whether it has any completed sends to handle. + */ + if (pEndpoint->ul_is_polled) { + for (i = 0; i < ENDPOINT_MAX; i++) { + currendpoint = &target->endpoint[i]; + if ((currendpoint->DL_PipeID == + pEndpoint->DL_PipeID) && + currendpoint->ul_is_polled) { + htc_send_complete_check(currendpoint, + 1); + } + } + } + + payloadLen = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, PAYLOADLEN); + + if (netlen < (payloadLen + HTC_HDR_LENGTH)) { +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + target->IsRxSgInprogress = true; + qdf_nbuf_queue_init(&target->RxSgQueue); + qdf_nbuf_queue_add(&target->RxSgQueue, netbuf); + target->ExpRxSgTotalLen = (payloadLen + HTC_HDR_LENGTH); + target->CurRxSgTotalLen += netlen; + UNLOCK_HTC_RX(target); + netbuf = NULL; + break; +#else + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC Rx: insufficient length, got:%d expected =%zu\n", + netlen, payloadLen + HTC_HDR_LENGTH)); + debug_dump_bytes((uint8_t *) HtcHdr, + sizeof(HTC_FRAME_HDR), + "BAD RX packet length"); + status = QDF_STATUS_E_FAILURE; + DPTRACE(qdf_dp_trace( + netbuf, + QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(netbuf), + sizeof(qdf_nbuf_data(netbuf)), + QDF_RX)); + break; +#endif + } +#ifdef HTC_EP_STAT_PROFILING + LOCK_HTC_RX(target); + INC_HTC_EP_STAT(pEndpoint, RxReceived, 1); + UNLOCK_HTC_RX(target); +#endif + + /* if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { */ + { + uint8_t temp; + A_STATUS temp_status; + /* get flags to check for trailer */ + temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, FLAGS); + if (temp & HTC_FLAGS_RECV_TRAILER) { + /* extract the trailer length */ + temp = + HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, + CONTROLBYTES0); + if ((temp < sizeof(HTC_RECORD_HDR)) + || (temp > payloadLen)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("htc_rx_completion_handler, invalid header (payloadlength should be :%d, CB[0] is:%d)\n", + payloadLen, temp)); + status = QDF_STATUS_E_INVAL; + break; + } + + trailerlen = temp; + /* process trailer data that follows HDR + + * application payload + */ + temp_status = htc_process_trailer(target, + ((uint8_t *) HtcHdr + + HTC_HDR_LENGTH + + payloadLen - temp), + temp, htc_ep_id); + if (A_FAILED(temp_status)) { + status = QDF_STATUS_E_FAILURE; + break; + } + + } + } + + if (((int)payloadLen - (int)trailerlen) <= 0) { + /* 0 length packet with trailer data, just drop these */ + break; + } + + if (htc_ep_id == ENDPOINT_0) { + uint16_t message_id; + HTC_UNKNOWN_MSG *htc_msg; + bool wow_nack; + + /* remove HTC header */ + qdf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); + netdata = qdf_nbuf_data(netbuf); + netlen = qdf_nbuf_len(netbuf); + + htc_msg = (HTC_UNKNOWN_MSG *) netdata; + message_id = HTC_GET_FIELD(htc_msg, HTC_UNKNOWN_MSG, + MESSAGEID); + + switch (message_id) { + default: + /* handle HTC control message */ + if (target->CtrlResponseProcessing) { + /* this is a fatal error, target should + * not be sending unsolicited messages + * on the endpoint 0 + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC Rx Ctrl still processing\n")); + status = QDF_STATUS_E_FAILURE; + QDF_BUG(false); + break; + } + + LOCK_HTC_RX(target); + target->CtrlResponseLength = + min((int)netlen, + HTC_MAX_CONTROL_MESSAGE_LENGTH); + qdf_mem_copy(target->CtrlResponseBuffer, + netdata, + target->CtrlResponseLength); + + /* Requester will clear this flag */ + target->CtrlResponseProcessing = true; + UNLOCK_HTC_RX(target); + + qdf_event_set(&target->ctrl_response_valid); + break; +#ifdef HTC_MSG_WAKEUP_FROM_SUSPEND_ID + case HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Received initial wake up")); + htc_credit_record(HTC_INITIAL_WAKE_UP, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH( + &pEndpoint->TxQueue)); + info = &target->HTCInitInfo; + if (info && info->target_initial_wakeup_cb) + info->target_initial_wakeup_cb( + info->target_psoc); + else + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("No initial wake up cb")); + break; +#endif + case HTC_MSG_SEND_SUSPEND_COMPLETE: + wow_nack = false; + htc_credit_record(HTC_SUSPEND_ACK, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH( + &pEndpoint->TxQueue)); + target->HTCInitInfo.TargetSendSuspendComplete( + target->HTCInitInfo.target_psoc, + wow_nack); + + break; + case HTC_MSG_NACK_SUSPEND: + wow_nack = true; + htc_credit_record(HTC_SUSPEND_ACK, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH( + &pEndpoint->TxQueue)); + target->HTCInitInfo.TargetSendSuspendComplete( + target->HTCInitInfo.target_psoc, + wow_nack); + break; + } + + qdf_nbuf_free(netbuf); + netbuf = NULL; + break; + } + + /* the current message based HIF architecture allocates net bufs + * for recv packets since this layer bridges that HIF to upper + * layers , which expects HTC packets, we form the packets here + * TODO_FIXME + */ + pPacket = allocate_htc_packet_container(target); + if (!pPacket) { + status = QDF_STATUS_E_RESOURCES; + break; + } + pPacket->Status = QDF_STATUS_SUCCESS; + pPacket->Endpoint = htc_ep_id; + pPacket->pPktContext = netbuf; + pPacket->pBuffer = qdf_nbuf_data(netbuf) + HTC_HDR_LENGTH; + pPacket->ActualLength = netlen - HTC_HEADER_LEN - trailerlen; + + qdf_nbuf_pull_head(netbuf, HTC_HEADER_LEN); + qdf_nbuf_set_pktlen(netbuf, pPacket->ActualLength); + + do_recv_completion_pkt(pEndpoint, pPacket); + + /* recover the packet container */ + free_htc_packet_container(target, pPacket); + + netbuf = NULL; + + } while (false); + +#ifdef RX_SG_SUPPORT +_out: +#endif + + if (netbuf) + qdf_nbuf_free(netbuf); + + return status; + +} + +A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle, + HTC_PACKET_QUEUE *pPktQueue) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_PACKET *pFirstPacket; + A_STATUS status = A_OK; + HTC_PACKET *pPacket; + + pFirstPacket = htc_get_pkt_at_head(pPktQueue); + + if (!pFirstPacket) { + A_ASSERT(false); + return A_EINVAL; + } + + if (pFirstPacket->Endpoint >= ENDPOINT_MAX) { + A_ASSERT(false); + return A_EINVAL; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("+- htc_add_receive_pkt_multiple : endPointId: %d, cnt:%d, length: %d\n", + pFirstPacket->Endpoint, + HTC_PACKET_QUEUE_DEPTH(pPktQueue), + pFirstPacket->BufferLength)); + + pEndpoint = &target->endpoint[pFirstPacket->Endpoint]; + + LOCK_HTC_RX(target); + + do { + + if (HTC_STOPPING(target)) { + status = A_ERROR; + break; + } + + /* store receive packets */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RxBufferHoldQueue, + pPktQueue); + + } while (false); + + UNLOCK_HTC_RX(target); + + if (A_FAILED(status)) { + /* walk through queue and mark each one canceled */ + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue, pPacket) { + pPacket->Status = QDF_STATUS_E_CANCELED; + } + HTC_PACKET_QUEUE_ITERATE_END; + + do_recv_completion(pEndpoint, pPktQueue); + } + + return status; +} + +void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint) +{ + HTC_PACKET *pPacket; + + LOCK_HTC_RX(target); + + while (1) { + pPacket = htc_packet_dequeue(&pEndpoint->RxBufferHoldQueue); + if (!pPacket) + break; + UNLOCK_HTC_RX(target); + pPacket->Status = QDF_STATUS_E_CANCELED; + pPacket->ActualLength = 0; + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("Flushing RX packet:%pK, length:%d, ep:%d\n", + pPacket, pPacket->BufferLength, + pPacket->Endpoint)); + /* give the packet back */ + do_recv_completion_pkt(pEndpoint, pPacket); + LOCK_HTC_RX(target); + } + + UNLOCK_HTC_RX(target); +} + +void htc_recv_init(HTC_TARGET *target) +{ + /* Initialize ctrl_response_valid to block */ + qdf_event_create(&target->ctrl_response_valid); +} + +/* polling routine to wait for a control packet to be received */ +QDF_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target) +{ +/* int count = HTC_TARGET_MAX_RESPONSE_POLL; */ + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCWaitCtrlMessageRecv\n")); + + /* Wait for BMI request/response transaction to complete */ + if (qdf_wait_single_event(&target->ctrl_response_valid, + HTC_CONTROL_RX_TIMEOUT)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to receive control message\n")); + return QDF_STATUS_E_FAILURE; + } + + LOCK_HTC_RX(target); + /* caller will clear this flag */ + target->CtrlResponseProcessing = true; + + UNLOCK_HTC_RX(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCWaitCtrlMessageRecv success\n")); + return QDF_STATUS_SUCCESS; +} + +static A_STATUS htc_process_trailer(HTC_TARGET *target, + uint8_t *pBuffer, + int Length, HTC_ENDPOINT_ID FromEndpoint) +{ + HTC_RECORD_HDR *pRecord; + uint8_t htc_rec_id; + uint8_t htc_rec_len; + uint8_t *pRecordBuf; + uint8_t *pOrigBuffer; + int origLength; + A_STATUS status; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("+htc_process_trailer (length:%d)\n", Length)); + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) + AR_DEBUG_PRINTBUF(pBuffer, Length, "Recv Trailer"); + + pOrigBuffer = pBuffer; + origLength = Length; + status = A_OK; + + while (Length > 0) { + + if (Length < sizeof(HTC_RECORD_HDR)) { + status = A_EPROTO; + break; + } + /* these are byte aligned structs */ + pRecord = (HTC_RECORD_HDR *) pBuffer; + Length -= sizeof(HTC_RECORD_HDR); + pBuffer += sizeof(HTC_RECORD_HDR); + + htc_rec_len = HTC_GET_FIELD(pRecord, HTC_RECORD_HDR, LENGTH); + htc_rec_id = HTC_GET_FIELD(pRecord, HTC_RECORD_HDR, RECORDID); + + if (htc_rec_len > Length) { + /* no room left in buffer for record */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("invalid record length: %d (id:%d) buffer has: %d bytes left\n", + htc_rec_len, htc_rec_id, Length)); + status = A_EPROTO; + break; + } + /* start of record follows the header */ + pRecordBuf = pBuffer; + + switch (htc_rec_id) { + case HTC_RECORD_CREDITS: + AR_DEBUG_ASSERT(htc_rec_len >= + sizeof(HTC_CREDIT_REPORT)); + htc_process_credit_rpt(target, + (HTC_CREDIT_REPORT *) pRecordBuf, + htc_rec_len / + (sizeof(HTC_CREDIT_REPORT)), + FromEndpoint); + break; + +#ifdef HIF_SDIO + case HTC_RECORD_LOOKAHEAD: + /* Process in HIF layer */ + break; + + case HTC_RECORD_LOOKAHEAD_BUNDLE: + /* Process in HIF layer */ + break; +#endif /* HIF_SDIO */ + + default: + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC unhandled record: id:%d length:%d\n", + htc_rec_id, htc_rec_len)); + break; + } + + if (A_FAILED(status)) { + break; + } + + /* advance buffer past this record for next time around */ + pBuffer += htc_rec_len; + Length -= htc_rec_len; + } + + if (A_FAILED(status)) + debug_dump_bytes(pOrigBuffer, origLength, "BAD Recv Trailer"); + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-htc_process_trailer\n")); + return status; + +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c new file mode 100644 index 0000000000000000000000000000000000000000..89170c47df806162344fb0ba861cd7b863bf2e84 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c @@ -0,0 +1,2747 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include /* qdf_mem_malloc */ +#include /* qdf_nbuf_t */ +#include "qdf_module.h" + +/* #define USB_HIF_SINGLE_PIPE_DATA_SCHED */ +/* #ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED */ +#define DATA_EP_SIZE 4 +/* #endif */ +#define HTC_DATA_RESOURCE_THRS 256 +#define HTC_DATA_MINDESC_PERPACKET 2 + +enum HTC_SEND_QUEUE_RESULT { + HTC_SEND_QUEUE_OK = 0, /* packet was queued */ + HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */ +}; + +#ifndef DEBUG_CREDIT +#define DEBUG_CREDIT 0 +#endif + +#if DEBUG_CREDIT +/* bit mask to enable debug certain endpoint */ +static unsigned int ep_debug_mask = + (1 << ENDPOINT_0) | (1 << ENDPOINT_1) | (1 << ENDPOINT_2); +#endif + +#ifdef QCA_WIFI_NAPIER_EMULATION +#define HTC_EMULATION_DELAY_IN_MS 20 +/** + * htc_add_delay(): Adds a delay in before proceeding, only for emulation + * + * Return: None + */ +static inline void htc_add_emulation_delay(void) +{ + qdf_mdelay(HTC_EMULATION_DELAY_IN_MS); +} +#else +static inline void htc_add_emulation_delay(void) +{ +} +#endif + +void htc_dump_counter_info(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("\n%s: ce_send_cnt = %d, TX_comp_cnt = %d\n", + __func__, target->ce_send_cnt, target->TX_comp_cnt)); +} + +int htc_get_tx_queue_depth(HTC_HANDLE htc_handle, HTC_ENDPOINT_ID endpoint_id) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + HTC_ENDPOINT *endpoint = &target->endpoint[endpoint_id]; + + return HTC_PACKET_QUEUE_DEPTH(&endpoint->TxQueue); +} +qdf_export_symbol(htc_get_tx_queue_depth); + +void htc_get_control_endpoint_tx_host_credits(HTC_HANDLE HTCHandle, + int *credits) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + int i; + + if (!credits || !target) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: invalid args", __func__)); + return; + } + + *credits = 0; + LOCK_HTC_TX(target); + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (pEndpoint->service_id == WMI_CONTROL_SVC) { + *credits = pEndpoint->TxCredits; + break; + } + } + UNLOCK_HTC_TX(target); +} + +static inline void restore_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + qdf_nbuf_t netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + + if (pPacket->PktInfo.AsTx.Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) { + qdf_nbuf_unmap(target->osdev, netbuf, QDF_DMA_TO_DEVICE); + pPacket->PktInfo.AsTx.Flags &= ~HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + } + if (pPacket->PktInfo.AsTx.Flags & + HTC_TX_PACKET_FLAG_HTC_HEADER_IN_NETBUF_DATA) { + qdf_nbuf_pull_head(netbuf, sizeof(HTC_FRAME_HDR)); + } +} + +static void send_packet_completion(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + HTC_ENDPOINT *pEndpoint = &target->endpoint[pPacket->Endpoint]; + HTC_EP_SEND_PKT_COMPLETE EpTxComplete; + + restore_tx_packet(target, pPacket); + + /* + * In case of SSR, we cannot call the upper layer completion + * callbacks, hence just free the nbuf and HTC packet here. + */ + if (target->hif_dev && hif_get_target_status(target->hif_dev)) { + htc_free_control_tx_packet(target, pPacket); + return; + } + + /* do completion */ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("HTC calling ep %d send complete callback on packet %pK\n", + pEndpoint->Id, pPacket)); + + EpTxComplete = pEndpoint->EpCallBacks.EpTxComplete; + if (EpTxComplete) + EpTxComplete(pEndpoint->EpCallBacks.pContext, pPacket); + else + qdf_nbuf_free(pPacket->pPktContext); + + +} + +#ifdef FEATURE_RUNTIME_PM +/** + * log_packet_info() - Log HTC packet information + * + * @target: handle of HTC context + * @pPacket: handle of HTC packet + * + * Return: None + */ +static void log_packet_info(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + HTC_ENDPOINT *pEndpoint = &target->endpoint[pPacket->Endpoint]; + HTC_EP_LOG_PKT ep_log_pkt; + qdf_nbuf_t netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + + ep_log_pkt = pEndpoint->EpCallBacks.ep_log_pkt; + if (ep_log_pkt) { + qdf_nbuf_pull_head(netbuf, sizeof(HTC_FRAME_HDR)); + ep_log_pkt(pEndpoint->EpCallBacks.pContext, pPacket); + qdf_nbuf_push_head(netbuf, sizeof(HTC_FRAME_HDR)); + } +} + +/** + * htc_inc_runtime_cnt: Increment htc runtime count + * @target: handle of HTC context + * + * Return: None + */ +static inline +void htc_inc_runtime_cnt(HTC_TARGET *target) +{ + qdf_atomic_inc(&target->htc_runtime_cnt); +} +#else +static void log_packet_info(HTC_TARGET *target, HTC_PACKET *pPacket) +{ +} + +static inline +void htc_inc_runtime_cnt(HTC_TARGET *target) +{ +} +#endif + +void htc_send_complete_check_cleanup(void *context) +{ + HTC_ENDPOINT *pEndpoint = (HTC_ENDPOINT *) context; + + htc_send_complete_check(pEndpoint, 1); +} + +HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target) +{ + HTC_PACKET *pPacket; + HTC_PACKET_QUEUE *pQueueSave; + qdf_nbuf_t netbuf; + + LOCK_HTC_TX(target); + if (!target->pBundleFreeList) { + UNLOCK_HTC_TX(target); + netbuf = qdf_nbuf_alloc(NULL, + target->MaxMsgsPerHTCBundle * + target->TargetCreditSize, 0, 4, false); + AR_DEBUG_ASSERT(netbuf); + if (!netbuf) + return NULL; + pPacket = qdf_mem_malloc(sizeof(HTC_PACKET)); + AR_DEBUG_ASSERT(pPacket); + if (!pPacket) { + qdf_nbuf_free(netbuf); + return NULL; + } + pQueueSave = qdf_mem_malloc(sizeof(HTC_PACKET_QUEUE)); + AR_DEBUG_ASSERT(pQueueSave); + if (!pQueueSave) { + qdf_nbuf_free(netbuf); + qdf_mem_free(pPacket); + return NULL; + } + INIT_HTC_PACKET_QUEUE(pQueueSave); + pPacket->pContext = pQueueSave; + SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf); + pPacket->pBuffer = qdf_nbuf_data(netbuf); + pPacket->BufferLength = qdf_nbuf_len(netbuf); + + /* store the original head room so that we can restore this + * when we "free" the packet. + * free packet puts the packet back on the free list + */ + pPacket->netbufOrigHeadRoom = qdf_nbuf_headroom(netbuf); + return pPacket; + } + /* already done malloc - restore from free list */ + pPacket = target->pBundleFreeList; + AR_DEBUG_ASSERT(pPacket); + if (!pPacket) { + UNLOCK_HTC_TX(target); + return NULL; + } + target->pBundleFreeList = (HTC_PACKET *) pPacket->ListLink.pNext; + UNLOCK_HTC_TX(target); + pPacket->ListLink.pNext = NULL; + + return pPacket; +} + +void free_htc_bundle_packet(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + uint32_t curentHeadRoom; + qdf_nbuf_t netbuf; + HTC_PACKET_QUEUE *pQueueSave; + + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_ASSERT(netbuf); + if (!netbuf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("\n%s: Invalid netbuf in HTC Packet\n", + __func__)); + return; + } + /* HIF adds data to the headroom section of the nbuf, restore thei + * original size. If this is not done, headroom keeps shrinking with + * every HIF send and eventually HIF ends up doing another malloc big + * enough to store the data + its header + */ + + curentHeadRoom = qdf_nbuf_headroom(netbuf); + qdf_nbuf_pull_head(netbuf, + pPacket->netbufOrigHeadRoom - curentHeadRoom); + qdf_nbuf_trim_tail(netbuf, qdf_nbuf_len(netbuf)); + + /* restore the pBuffer pointer. HIF changes this */ + pPacket->pBuffer = qdf_nbuf_data(netbuf); + pPacket->BufferLength = qdf_nbuf_len(netbuf); + + /* restore queue */ + pQueueSave = (HTC_PACKET_QUEUE *) pPacket->pContext; + if (qdf_unlikely(!pQueueSave)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("\n%s: Invalid pQueueSave in HTC Packet\n", + __func__)); + AR_DEBUG_ASSERT(pQueueSave); + } else + INIT_HTC_PACKET_QUEUE(pQueueSave); + + LOCK_HTC_TX(target); + if (!target->pBundleFreeList) { + target->pBundleFreeList = pPacket; + pPacket->ListLink.pNext = NULL; + } else { + pPacket->ListLink.pNext = (DL_LIST *) target->pBundleFreeList; + target->pBundleFreeList = pPacket; + } + UNLOCK_HTC_TX(target); +} + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + +/** + * htc_send_update_tx_bundle_stats() - update tx bundle stats depends + * on max bundle size + * @target: hif context + * @data_len: tx data len + * @TxCreditSize: endpoint tx credit size + * + * Return: None + */ +static inline void +htc_send_update_tx_bundle_stats(HTC_TARGET *target, + qdf_size_t data_len, + int TxCreditSize) +{ + int index = ((data_len + TxCreditSize - 1) / TxCreditSize) - 1; + + if (index < HTC_MAX_MSG_PER_BUNDLE_TX) + target->tx_bundle_stats[index]++; +} + +/** + * htc_issue_tx_bundle_stats_inc() - increment in tx bundle stats + * on max bundle size + * @target: hif context + * + * Return: None + */ +static inline void +htc_issue_tx_bundle_stats_inc(HTC_TARGET *target) +{ + target->tx_bundle_stats[0]++; +} +#else + +static inline void +htc_send_update_tx_bundle_stats(HTC_TARGET *target, + qdf_size_t data_len, + int TxCreditSize) +{ +} + +static inline void +htc_issue_tx_bundle_stats_inc(HTC_TARGET *target) +{ +} +#endif + +#if defined(HIF_USB) || defined(HIF_SDIO) +#ifdef ENABLE_BUNDLE_TX +static QDF_STATUS htc_send_bundled_netbuf(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + unsigned char *pBundleBuffer, + HTC_PACKET *pPacketTx) +{ + qdf_size_t data_len; + QDF_STATUS status; + qdf_nbuf_t bundleBuf; + uint32_t data_attr = 0; + + bundleBuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacketTx); + data_len = pBundleBuffer - qdf_nbuf_data(bundleBuf); + qdf_nbuf_put_tail(bundleBuf, data_len); + SET_HTC_PACKET_INFO_TX(pPacketTx, + target, + pBundleBuffer, + data_len, + pEndpoint->Id, HTC_TX_PACKET_TAG_BUNDLED); + LOCK_HTC_TX(target); + HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacketTx); + pEndpoint->ul_outstanding_cnt++; + UNLOCK_HTC_TX(target); +#if DEBUG_BUNDLE + qdf_print(" Send bundle EP%d buffer size:0x%x, total:0x%x, count:%d.", + pEndpoint->Id, + pEndpoint->TxCreditSize, + data_len, data_len / pEndpoint->TxCreditSize); +#endif + + htc_send_update_tx_bundle_stats(target, data_len, + pEndpoint->TxCreditSize); + + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, + pEndpoint->Id, data_len, + bundleBuf, data_attr); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + HTC_PACKET_QUEUE requeue; + + qdf_print("hif_send_head failed(len=%zu).", data_len); + INIT_HTC_PACKET_QUEUE(&requeue); + LOCK_HTC_TX(target); + pEndpoint->ul_outstanding_cnt--; + HTC_PACKET_REMOVE(&pEndpoint->TxLookupQueue, pPacketTx); + + if (pPacketTx->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_BUNDLED) { + HTC_PACKET *temp_packet; + HTC_PACKET_QUEUE *packet_queue = + (HTC_PACKET_QUEUE *)pPacketTx->pContext; + + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(packet_queue, + temp_packet) { + HTC_PACKET_ENQUEUE(&requeue, temp_packet); + } HTC_PACKET_QUEUE_ITERATE_END; + + UNLOCK_HTC_TX(target); + free_htc_bundle_packet(target, pPacketTx); + LOCK_HTC_TX(target); + + } else { + HTC_PACKET_ENQUEUE(&requeue, pPacketTx); + } + + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &requeue); + UNLOCK_HTC_TX(target); + } + return status; +} + +#ifdef QCA_TX_PADDING_CREDIT_SUPPORT +#define SDIO_BLOCK_SIZE 512 +static int htc_tx_pad_credit_avail(HTC_ENDPOINT *ep) +{ + int ret = 0; + + if (!ep || !ep->EpCallBacks.pContext || + !ep->EpCallBacks.ep_padding_credit_update) + return 1; + + ret = ep->EpCallBacks.ep_padding_credit_update(ep->EpCallBacks.pContext, + 0); + + if (ret < 2) + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s ret %d\n", __func__, ret)); + + return ret; +} + +static bool htc_handle_extra_tx_credit(HTC_ENDPOINT *ep, + HTC_PACKET *p_last_htc_pkt, + unsigned char *p_last_pkt_bundle_buffer, + unsigned char **p_bundle_buffer, + int tot_data_len) +{ + bool extra_tx_credit = FALSE; + HTC_FRAME_HDR *p_htc_hdr; + int first_buf_bundled_len = 0, last_buf_len = 0; + int sdio_pad = 0, free_space = 0; + int (*update_ep_padding_credit)(void *, int); + + update_ep_padding_credit = ep->EpCallBacks.ep_padding_credit_update; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s Tot data_len = %d\n", __func__, tot_data_len)); + + if (!p_last_htc_pkt) + return extra_tx_credit; + + last_buf_len = (p_last_htc_pkt->ActualLength + HTC_HDR_LENGTH); + if (tot_data_len != last_buf_len) { + first_buf_bundled_len = tot_data_len - ep->TxCreditSize; + free_space = tot_data_len - + (first_buf_bundled_len + last_buf_len); + } else { + free_space = ep->TxCreditSize - tot_data_len; + } + + sdio_pad = SDIO_BLOCK_SIZE - ((first_buf_bundled_len + last_buf_len) % + SDIO_BLOCK_SIZE); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s first_buf_bundled_len = %d last_buf_len = %d\n", + __func__, first_buf_bundled_len, last_buf_len)); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s sdio_pad = %d free_space = %d\n", __func__, + sdio_pad, free_space)); + + if (sdio_pad <= free_space) { + if (p_bundle_buffer && *p_bundle_buffer) { + /* Align Tx bundled buf to avoid a extra Padding buf */ + *p_bundle_buffer -= (free_space - sdio_pad); + } + } else { + /* Extra Padding Buffer needed, consume extra tx credit */ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s Used a Tx credit for Padding Buffer\n", + __func__)); + p_htc_hdr = (HTC_FRAME_HDR *)(p_last_pkt_bundle_buffer); + p_htc_hdr->Flags |= HTC_FLAGS_PADDING_CHECK; + extra_tx_credit = TRUE; + if (ep->EpCallBacks.ep_padding_credit_update) { + /* Decrement 1 credit at host, + * due to extra tx credit consumed by padding buffer + */ + update_ep_padding_credit(ep->EpCallBacks.pContext, -1); + } + } + return extra_tx_credit; +} +#else +static int htc_tx_pad_credit_avail(HTC_ENDPOINT *ep) +{ + return 1; +} + +static bool htc_handle_extra_tx_credit(HTC_ENDPOINT *ep, + HTC_PACKET *p_last_htc_pkt, + unsigned char *p_last_pkt_bundle_buffer, + unsigned char **p_bundle_buffer, + int tot_data_len) +{ + return FALSE; +} +#endif + +/** + * htc_issue_packets_bundle() - HTC function to send bundle packets from a queue + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pPktQueue: HTC packet queue containing the list of packets to be sent + * + * Return: void + */ +static void htc_issue_packets_bundle(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pPktQueue) +{ + int i, frag_count, nbytes; + qdf_nbuf_t netbuf, bundleBuf; + unsigned char *pBundleBuffer = NULL; + HTC_PACKET *pPacket = NULL, *pPacketTx = NULL; + HTC_FRAME_HDR *pHtcHdr; + int last_credit_pad = 0; + int creditPad, creditRemainder, transferLength, bundlesSpaceRemaining = + 0; + HTC_PACKET_QUEUE *pQueueSave = NULL; + HTC_PACKET *p_last_htc_pkt = NULL; + unsigned char *p_last_pkt_bundle_buffer = NULL; + + bundlesSpaceRemaining = + target->MaxMsgsPerHTCBundle * pEndpoint->TxCreditSize; + pPacketTx = allocate_htc_bundle_packet(target); + if (!pPacketTx) { + /* good time to panic */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("allocate_htc_bundle_packet failed\n")); + AR_DEBUG_ASSERT(false); + return; + } + bundleBuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacketTx); + pBundleBuffer = qdf_nbuf_data(bundleBuf); + pQueueSave = (HTC_PACKET_QUEUE *) pPacketTx->pContext; + while (1) { + if (pEndpoint->EpCallBacks.ep_padding_credit_update) { + if (htc_tx_pad_credit_avail(pEndpoint) < 1) + break; + } + pPacket = htc_packet_dequeue(pPktQueue); + if (!pPacket) + break; + creditPad = 0; + transferLength = pPacket->ActualLength + HTC_HDR_LENGTH; + creditRemainder = transferLength % pEndpoint->TxCreditSize; + if (creditRemainder != 0) { + if (transferLength < pEndpoint->TxCreditSize) { + creditPad = pEndpoint->TxCreditSize - + transferLength; + } else { + creditPad = creditRemainder; + } + transferLength += creditPad; + } + + if (bundlesSpaceRemaining < transferLength) { + htc_handle_extra_tx_credit(pEndpoint, p_last_htc_pkt, + p_last_pkt_bundle_buffer, + &pBundleBuffer, + pBundleBuffer - + qdf_nbuf_data(bundleBuf)); + + /* send out previous buffer */ + htc_send_bundled_netbuf(target, pEndpoint, + pBundleBuffer - last_credit_pad, + pPacketTx); + /* One packet has been dequeued from sending queue when enter + * this loop, so need to add 1 back for this checking. + */ + if ((HTC_PACKET_QUEUE_DEPTH(pPktQueue) + 1) < + HTC_MIN_MSG_PER_BUNDLE) { + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + return; + } + bundlesSpaceRemaining = + target->MaxMsgsPerHTCBundle * + pEndpoint->TxCreditSize; + pPacketTx = allocate_htc_bundle_packet(target); + if (!pPacketTx) { + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + /* good time to panic */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("allocate_htc_bundle_packet failed\n")); + AR_DEBUG_ASSERT(false); + return; + } + bundleBuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacketTx); + pBundleBuffer = qdf_nbuf_data(bundleBuf); + pQueueSave = (HTC_PACKET_QUEUE *) pPacketTx->pContext; + } + + p_last_htc_pkt = pPacket; + p_last_pkt_bundle_buffer = pBundleBuffer; + + bundlesSpaceRemaining -= transferLength; + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + + if (hif_get_bus_type(target->hif_dev) != QDF_BUS_TYPE_USB) { + pHtcHdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr( + netbuf, 0); + HTC_WRITE32(pHtcHdr, + SM(pPacket->ActualLength, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->PktInfo.AsTx.SendFlags | + HTC_FLAGS_SEND_BUNDLE, + HTC_FRAME_HDR_FLAGS) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + HTC_WRITE32((uint32_t *) pHtcHdr + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1) | SM(creditPad, + HTC_FRAME_HDR_RESERVED)); + pHtcHdr->reserved = creditPad; + } + frag_count = qdf_nbuf_get_num_frags(netbuf); + nbytes = pPacket->ActualLength + HTC_HDR_LENGTH; + for (i = 0; i < frag_count && nbytes > 0; i++) { + int frag_len = qdf_nbuf_get_frag_len(netbuf, i); + unsigned char *frag_addr = + qdf_nbuf_get_frag_vaddr(netbuf, i); + if (frag_len > nbytes) + frag_len = nbytes; + qdf_mem_copy(pBundleBuffer, frag_addr, frag_len); + nbytes -= frag_len; + pBundleBuffer += frag_len; + } + HTC_PACKET_ENQUEUE(pQueueSave, pPacket); + pBundleBuffer += creditPad; + + /* last one can't be packed. */ + if (hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_USB) + last_credit_pad = creditPad; + } + /* send out remaining buffer */ + if (pBundleBuffer != qdf_nbuf_data(bundleBuf)) { + htc_handle_extra_tx_credit(pEndpoint, p_last_htc_pkt, + p_last_pkt_bundle_buffer, + &pBundleBuffer, + pBundleBuffer - + qdf_nbuf_data(bundleBuf)); + + htc_send_bundled_netbuf(target, pEndpoint, + pBundleBuffer - last_credit_pad, + pPacketTx); + } else { + free_htc_bundle_packet(target, pPacketTx); + } +} +#endif /* ENABLE_BUNDLE_TX */ +#else +static int htc_tx_pad_credit_avail(HTC_ENDPOINT *ep) +{ + return 1; +} + +bool htc_handle_extra_tx_credit(HTC_ENDPOINT *ep, + HTC_PACKET *p_last_htc_pkt, + unsigned char *p_last_pkt_bundle_buffer, + unsigned char **p_bundle_buffer, + int tot_data_len); +bool htc_handle_extra_tx_credit(HTC_ENDPOINT *ep, + HTC_PACKET *p_last_htc_pkt, + unsigned char *p_last_pkt_bundle_buffer, + unsigned char **p_bundle_buffer, + int tot_data_len) +{ + return FALSE; +} + +static void htc_issue_packets_bundle(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pPktQueue) +{ +} +#endif + +/** + * htc_issue_packets() - HTC function to send packets from a queue + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pPktQueue: HTC packet queue containing the list of packets to be sent + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +static QDF_STATUS htc_issue_packets(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pPktQueue) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t netbuf; + HTC_PACKET *pPacket = NULL; + uint16_t payloadLen; + HTC_FRAME_HDR *pHtcHdr; + uint32_t data_attr = 0; + enum qdf_bus_type bus_type; + QDF_STATUS ret; + bool rt_put = false; + bool used_extra_tx_credit = false; + uint8_t *buf = NULL; + int (*update_ep_padding_credit)(void *, int); + void *ctx = NULL; + bool rt_put_in_resp; + int32_t sys_state = HIF_SYSTEM_PM_STATE_ON; + + update_ep_padding_credit = + pEndpoint->EpCallBacks.ep_padding_credit_update; + + bus_type = hif_get_bus_type(target->hif_dev); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+htc_issue_packets: Queue: %pK, Pkts %d\n", pPktQueue, + HTC_PACKET_QUEUE_DEPTH(pPktQueue))); + while (true) { + rt_put_in_resp = false; + if (HTC_TX_BUNDLE_ENABLED(target) && + HTC_PACKET_QUEUE_DEPTH(pPktQueue) >= + HTC_MIN_MSG_PER_BUNDLE) { + switch (bus_type) { + case QDF_BUS_TYPE_SDIO: + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) + break; + if (update_ep_padding_credit) { + if (htc_tx_pad_credit_avail + (pEndpoint) < 1) + break; + } + case QDF_BUS_TYPE_USB: + htc_issue_packets_bundle(target, + pEndpoint, + pPktQueue); + break; + default: + break; + } + } + /* if not bundling or there was a packet that could not be + * placed in a bundle, and send it by normal way + */ + if (pEndpoint->EpCallBacks.ep_padding_credit_update) { + if (htc_tx_pad_credit_avail(pEndpoint) < 1) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + + pPacket = htc_packet_dequeue(pPktQueue); + if (!pPacket) { + /* local queue is fully drained */ + break; + } + + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_ASSERT(netbuf); + /* Non-credit enabled endpoints have been mapped and setup by + * now, so no need to revisit the HTC headers + */ + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + + payloadLen = pPacket->ActualLength; + /* setup HTC frame header */ + + pHtcHdr = (HTC_FRAME_HDR *) + qdf_nbuf_get_frag_vaddr(netbuf, 0); + if (qdf_unlikely(!pHtcHdr)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s Invalid pHtcHdr\n", + __func__)); + AR_DEBUG_ASSERT(pHtcHdr); + status = QDF_STATUS_E_FAILURE; + break; + } + + HTC_WRITE32(pHtcHdr, + SM(payloadLen, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->PktInfo.AsTx.SendFlags, + HTC_FRAME_HDR_FLAGS) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + HTC_WRITE32(((uint32_t *) pHtcHdr) + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1)); + + /* + * Now that the HTC frame header has been added, the + * netbuf can be mapped. This only applies to non-data + * frames, since data frames were already mapped as they + * entered into the driver. + */ + ret = qdf_nbuf_map(target->osdev, + GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket), + QDF_DMA_TO_DEVICE); + if (ret != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s nbuf Map Fail Endpnt %pK\n", + __func__, pEndpoint)); + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + status = QDF_STATUS_E_FAILURE; + break; + } + pPacket->PktInfo.AsTx.Flags |= + HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + } + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + /* store in look up queue to match completions */ + HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacket); + INC_HTC_EP_STAT(pEndpoint, TxIssued, 1); + pEndpoint->ul_outstanding_cnt++; + if (!pEndpoint->async_update) { + UNLOCK_HTC_TX(target); + hif_send_complete_check(target->hif_dev, + pEndpoint->UL_PipeID, false); + } + + if (pPacket->PktInfo.AsTx.Tag == HTC_TX_PACKET_SYSTEM_SUSPEND) { + sys_state = hif_system_pm_get_state(target->hif_dev); + hif_system_pm_set_state_suspending(target->hif_dev); + } + + htc_packet_set_magic_cookie(pPacket, HTC_PACKET_MAGIC_COOKIE); + /* + * For HTT messages without a response from fw, + * do the runtime put here. + * otherwise runtime put will be done when the fw response comes + */ + if (pPacket->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_RUNTIME_PUT) + rt_put = true; + else if (pPacket->PktInfo.AsTx.Tag == + HTC_TX_PACKET_TAG_RTPM_PUT_RC) { + rt_put_in_resp = true; + htc_inc_runtime_cnt(target); + } + +#if DEBUG_BUNDLE + qdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.", + pEndpoint->Id, + pEndpoint->TxCreditSize, + HTC_HDR_LENGTH + pPacket->ActualLength); +#endif + buf = (uint8_t *)qdf_nbuf_get_frag_vaddr(netbuf, 0); + used_extra_tx_credit = + htc_handle_extra_tx_credit(pEndpoint, pPacket, buf, + NULL, pPacket->ActualLength + + HTC_HDR_LENGTH); + + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, pEndpoint->Id, + HTC_HDR_LENGTH + pPacket->ActualLength, + netbuf, data_attr); + + if (status != QDF_STATUS_SUCCESS) { + if (rt_put_in_resp) + htc_dec_return_runtime_cnt((void *)target); + + if (pPacket->PktInfo.AsTx.Tag == + HTC_TX_PACKET_SYSTEM_SUSPEND) + __hif_system_pm_set_state(target->hif_dev, + sys_state); + + if (pEndpoint->EpCallBacks.ep_padding_credit_update) { + if (used_extra_tx_credit) { + ctx = pEndpoint->EpCallBacks.pContext; + update_ep_padding_credit(ctx, 1); + } + } + } + + htc_issue_tx_bundle_stats_inc(target); + + target->ce_send_cnt++; + + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + if (status != QDF_STATUS_E_RESOURCES) { + /* TODO : if more than 1 endpoint maps to the + * same PipeID it is possible to run out of + * resources in the HIF layer. Don't emit the + * error + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("hif_send Failed status:%d\n", + status)); + } + + /* only unmap if we mapped in this function */ + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + qdf_nbuf_unmap(target->osdev, + GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket), + QDF_DMA_TO_DEVICE); + pPacket->PktInfo.AsTx.Flags &= + ~HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + } + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + target->ce_send_cnt--; + pEndpoint->ul_outstanding_cnt--; + HTC_PACKET_REMOVE(&pEndpoint->TxLookupQueue, pPacket); + htc_packet_set_magic_cookie(pPacket, 0); + /* put it back into the callers queue */ + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + /* reclaim credits */ + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue, + pPacket) { + pEndpoint->TxCredits += + pPacket->PktInfo.AsTx.CreditsUsed; + } HTC_PACKET_QUEUE_ITERATE_END; + if (!pEndpoint->async_update) { + UNLOCK_HTC_TX(target); + } + break; + } + if (rt_put) { + hif_pm_runtime_put(target->hif_dev, + RTPM_ID_HTC); + rt_put = false; + } + } + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("htc_issue_packets, failed pkt:0x%pK status:%d", + pPacket, status)); + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_issue_packets\n")); + + return status; +} + +#ifdef FEATURE_RUNTIME_PM +/** + * extract_htc_pm_packets(): move pm packets from endpoint into queue + * @endpoint: which enpoint to extract packets from + * @queue: a queue to store extracted packets in. + * + * remove pm packets from the endpoint's tx queue. + * queue them into a queue + */ +static void extract_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{ + HTC_PACKET *packet; + + /* only WMI endpoint has power management packets */ + if (endpoint->service_id != WMI_CONTROL_SVC) + return; + + ITERATE_OVER_LIST_ALLOW_REMOVE(&endpoint->TxQueue.QueueHead, packet, + HTC_PACKET, ListLink) { + if (packet->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_AUTO_PM) { + HTC_PACKET_REMOVE(&endpoint->TxQueue, packet); + HTC_PACKET_ENQUEUE(queue, packet); + } + } ITERATE_END +} + +/** + * queue_htc_pm_packets(): queue pm packets with priority + * @endpoint: enpoint to queue packets to + * @queue: queue of pm packets to enque + * + * suspend resume packets get special treatment & priority. + * need to queue them at the front of the queue. + */ +static void queue_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{ + if (endpoint->service_id != WMI_CONTROL_SVC) + return; + + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&endpoint->TxQueue, queue); +} +#else +static void extract_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{} + +static void queue_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{} +#endif + +/** + * htc_send_pkts_rtpm_dbgid_get() - get runtime pm dbgid by service_id + * @service_id: service for endpoint + * + * For service_id HTT_DATA_MSG_SVC, HTT message donot have a tx complete + * from CE level, so they need runtime put which only can happen in fw + * response. runtime put will happens at 2 ways. + * 1 if packet tag HTC_TX_PACKET_TAG_RUNTIME_PUT, runtime put + * will be just in htc_issue_packets. as such pkt doesn't have + * a response from fw. + * 2 other pkt must have a response from fw, it will be handled + * by fw response using htc_pm_runtime_put. + * + * For other service_id, they have tx_completion from CE, so they will be + * handled in htc_tx_completion_handler, except packet tag as + * HTC_TX_PACKET_TAG_AUTO_PM, pm related wmi cmd don't need a runtime + * put/get. + * + * + * Return: rtpm_dbgid to trace who use it + */ +static wlan_rtpm_dbgid +htc_send_pkts_rtpm_dbgid_get(HTC_SERVICE_ID service_id) +{ + wlan_rtpm_dbgid rtpm_dbgid; + + if (service_id == HTT_DATA_MSG_SVC) + rtpm_dbgid = RTPM_ID_HTC; + else + rtpm_dbgid = RTPM_ID_WMI; + + return rtpm_dbgid; +} + +#ifdef SYSTEM_PM_CHECK +/** + * extract_htc_system_resume_pkts(): Move system pm resume packets from endpoint + * into queue + * @endpoint: which enpoint to extract packets from + * @queue: a queue to store extracted packets in. + * + * Remove pm packets from the endpoint's tx queue and enqueue + * them into a queue + */ +static void extract_htc_system_resume_pkts(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{ + HTC_PACKET *packet; + + /* only WMI endpoint has power management packets */ + if (endpoint->service_id != WMI_CONTROL_SVC) + return; + + ITERATE_OVER_LIST_ALLOW_REMOVE(&endpoint->TxQueue.QueueHead, packet, + HTC_PACKET, ListLink) { + if (packet->PktInfo.AsTx.Tag == HTC_TX_PACKET_SYSTEM_RESUME) { + HTC_PACKET_REMOVE(&endpoint->TxQueue, packet); + HTC_PACKET_ENQUEUE(queue, packet); + } + } ITERATE_END +} +#else +static inline +void extract_htc_system_resume_pkts(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{ +} +#endif + +/** + * get_htc_send_packets_credit_based() - get packets based on available credits + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pQueue: HTC packet queue containing the list of packets to be sent + * + * Get HTC send packets from TX queue on an endpoint based on available credits. + * The function moves the packets from TX queue of the endpoint to pQueue. + * + * Return: None + */ +static void get_htc_send_packets_credit_based(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pQueue) +{ + int creditsRequired; + int remainder; + uint8_t sendFlags; + HTC_PACKET *pPacket; + unsigned int transferLength; + HTC_PACKET_QUEUE *tx_queue; + HTC_PACKET_QUEUE pm_queue; + bool do_pm_get = false; + wlan_rtpm_dbgid rtpm_dbgid = 0; + int ret; + HTC_PACKET_QUEUE sys_pm_queue; + bool sys_pm_check = false; + + /*** NOTE : the TX lock is held when this function is called ***/ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+get_htc_send_packets_credit_based\n")); + + INIT_HTC_PACKET_QUEUE(&pm_queue); + extract_htc_pm_packets(pEndpoint, &pm_queue); + if (HTC_QUEUE_EMPTY(&pm_queue)) { + do_pm_get = true; + + INIT_HTC_PACKET_QUEUE(&sys_pm_queue); + extract_htc_system_resume_pkts(pEndpoint, &sys_pm_queue); + if (HTC_QUEUE_EMPTY(&sys_pm_queue)) { + tx_queue = &pEndpoint->TxQueue; + sys_pm_check = true; + } else { + tx_queue = &sys_pm_queue; + } + } else { + tx_queue = &pm_queue; + } + + /* loop until we can grab as many packets out of the queue as we can */ + while (true) { + if (do_pm_get) { + rtpm_dbgid = + htc_send_pkts_rtpm_dbgid_get( + pEndpoint->service_id); + ret = hif_pm_runtime_get(target->hif_dev, + rtpm_dbgid); + if (ret) { + /* bus suspended, runtime resume issued */ + QDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0); + if (ret == -EAGAIN) { + pPacket = htc_get_pkt_at_head(tx_queue); + if (!pPacket) + break; + log_packet_info(target, pPacket); + } + break; + } + } + + sendFlags = 0; + /* get packet at head, but don't remove it */ + pPacket = htc_get_pkt_at_head(tx_queue); + if (!pPacket) { + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev, + rtpm_dbgid); + break; + } + + if (sys_pm_check && + hif_system_pm_state_check(target->hif_dev)) { + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev, rtpm_dbgid); + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Got head packet:%pK , Queue Depth: %d\n", + pPacket, + HTC_PACKET_QUEUE_DEPTH(tx_queue))); + + transferLength = pPacket->ActualLength + HTC_HDR_LENGTH; + + if (transferLength <= pEndpoint->TxCreditSize) { + creditsRequired = 1; + } else { + /* figure out how many credits this message requires */ + creditsRequired = + transferLength / pEndpoint->TxCreditSize; + remainder = transferLength % pEndpoint->TxCreditSize; + + if (remainder) + creditsRequired++; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Credits Required:%d Got:%d\n", + creditsRequired, pEndpoint->TxCredits)); + + if (pEndpoint->Id == ENDPOINT_0) { + /* + * endpoint 0 is special, it always has a credit and + * does not require credit based flow control + */ + creditsRequired = 0; + } else { + + if (pEndpoint->TxCredits < creditsRequired) { +#if DEBUG_CREDIT + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("EP%d,No Credit now.%d < %d\n", + pEndpoint->Id, + pEndpoint->TxCredits, + creditsRequired)); +#endif + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev, + rtpm_dbgid); + break; + } + + pEndpoint->TxCredits -= creditsRequired; + INC_HTC_EP_STAT(pEndpoint, TxCreditsConsummed, + creditsRequired); + + /* check if we need credits back from the target */ + if (pEndpoint->TxCredits <= + pEndpoint->TxCreditsPerMaxMsg) { + /* tell the target we need credits ASAP! */ + sendFlags |= HTC_FLAGS_NEED_CREDIT_UPDATE; + if (pEndpoint->service_id == WMI_CONTROL_SVC) { + htc_credit_record(HTC_REQUEST_CREDIT, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH + (tx_queue)); + } + INC_HTC_EP_STAT(pEndpoint, + TxCreditLowIndications, 1); +#if DEBUG_CREDIT + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" EP%d Needs Credits\n", + pEndpoint->Id)); +#endif + } + } + + /* now we can fully dequeue */ + pPacket = htc_packet_dequeue(tx_queue); + if (pPacket) { + /* save the number of credits this packet consumed */ + pPacket->PktInfo.AsTx.CreditsUsed = creditsRequired; + /* save send flags */ + pPacket->PktInfo.AsTx.SendFlags = sendFlags; + + /* queue this packet into the caller's queue */ + HTC_PACKET_ENQUEUE(pQueue, pPacket); + } + } + + if (!HTC_QUEUE_EMPTY(&pm_queue)) + queue_htc_pm_packets(pEndpoint, &pm_queue); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("-get_htc_send_packets_credit_based\n")); + +} + +static void get_htc_send_packets(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pQueue, int Resources) +{ + + HTC_PACKET *pPacket; + HTC_PACKET_QUEUE *tx_queue; + HTC_PACKET_QUEUE pm_queue; + bool do_pm_get = false; + wlan_rtpm_dbgid rtpm_dbgid = 0; + int ret; + + /*** NOTE : the TX lock is held when this function is called ***/ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+get_htc_send_packets %d resources\n", Resources)); + + INIT_HTC_PACKET_QUEUE(&pm_queue); + extract_htc_pm_packets(pEndpoint, &pm_queue); + if (HTC_QUEUE_EMPTY(&pm_queue)) { + tx_queue = &pEndpoint->TxQueue; + do_pm_get = true; + } else { + tx_queue = &pm_queue; + } + + /* loop until we can grab as many packets out of the queue as we can */ + while (Resources > 0) { + int num_frags; + + if (do_pm_get) { + rtpm_dbgid = + htc_send_pkts_rtpm_dbgid_get( + pEndpoint->service_id); + ret = hif_pm_runtime_get(target->hif_dev, + rtpm_dbgid); + if (ret) { + /* bus suspended, runtime resume issued */ + QDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0); + if (ret == -EAGAIN) { + pPacket = htc_get_pkt_at_head(tx_queue); + if (!pPacket) + break; + log_packet_info(target, pPacket); + } + break; + } + } + + ret = hif_system_pm_state_check(target->hif_dev); + if (ret) { + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev, rtpm_dbgid); + break; + } + + pPacket = htc_packet_dequeue(tx_queue); + if (!pPacket) { + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev, rtpm_dbgid); + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Got packet:%pK , New Queue Depth: %d\n", + pPacket, + HTC_PACKET_QUEUE_DEPTH(tx_queue))); + /* For non-credit path the sequence number is already embedded + * in the constructed HTC header + */ + pPacket->PktInfo.AsTx.SendFlags = 0; + pPacket->PktInfo.AsTx.CreditsUsed = 0; + /* queue this packet into the caller's queue */ + HTC_PACKET_ENQUEUE(pQueue, pPacket); + + /* + * FIX THIS: + * For now, avoid calling qdf_nbuf_get_num_frags before calling + * qdf_nbuf_map, because the MacOS version of qdf_nbuf_t doesn't + * support qdf_nbuf_get_num_frags until after qdf_nbuf_map has + * been done. + * Assume that the non-data netbufs, i.e. WMI message netbufs, + * consist of a single fragment. + */ + /* WMI messages are in a single-fragment network buf */ + num_frags = + (pPacket->PktInfo.AsTx. + Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) ? 1 : + qdf_nbuf_get_num_frags(GET_HTC_PACKET_NET_BUF_CONTEXT + (pPacket)); + Resources -= num_frags; + } + + if (!HTC_QUEUE_EMPTY(&pm_queue)) + queue_htc_pm_packets(pEndpoint, &pm_queue); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-get_htc_send_packets\n")); + +} + +/** + * htc_try_send() - Send packets in a queue on an endpoint + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pCallersSendQueue: packet queue containing the list of packets to be sent + * + * Return: enum HTC_SEND_QUEUE_RESULT indicates whether the packet was queued to + * be sent or the packet should be dropped by the upper layer + */ +static enum HTC_SEND_QUEUE_RESULT htc_try_send(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pCallersSendQueue) +{ + /* temp queue to hold packets at various stages */ + HTC_PACKET_QUEUE sendQueue; + HTC_PACKET *pPacket; + int tx_resources; + int overflow; + enum HTC_SEND_QUEUE_RESULT result = HTC_SEND_QUEUE_OK; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+htc_try_send (Queue:%pK Depth:%d)\n", + pCallersSendQueue, + (pCallersSendQueue == + NULL) ? 0 : + HTC_PACKET_QUEUE_DEPTH + (pCallersSendQueue))); + + /* init the local send queue */ + INIT_HTC_PACKET_QUEUE(&sendQueue); + + do { + + /* caller didn't provide a queue, just wants us to check + * queues and send + */ + if (!pCallersSendQueue) + break; + + if (HTC_QUEUE_EMPTY(pCallersSendQueue)) { + /* empty queue */ + OL_ATH_HTC_PKT_ERROR_COUNT_INCR(target, + HTC_PKT_Q_EMPTY); + result = HTC_SEND_QUEUE_DROP; + break; + } + + if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) >= + pEndpoint->MaxTxQueueDepth) { + /* we've already overflowed */ + overflow = HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue); + } else { + /* figure out how much we will overflow by */ + overflow = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue); + overflow += HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue); + /* get how much we will overflow the TX queue by */ + overflow -= pEndpoint->MaxTxQueueDepth; + } + + /* if overflow is negative or zero, we are okay */ + if (overflow > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("Endpoint %d, TX queue will overflow :%d , Tx Depth:%d, Max:%d\n", + pEndpoint->Id, overflow, + HTC_PACKET_QUEUE_DEPTH(&pEndpoint-> + TxQueue), + pEndpoint->MaxTxQueueDepth)); + } + if ((overflow <= 0) + || (!pEndpoint->EpCallBacks.EpSendFull)) { + /* all packets will fit or caller did not provide send + * full indication handler + * just move all of them to local sendQueue object + */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&sendQueue, + pCallersSendQueue); + } else { + int i; + int goodPkts = + HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue) - + overflow; + + A_ASSERT(goodPkts >= 0); + /* we have overflowed and callback is provided. Dequeue + * all non-overflow packets into the sendqueue + */ + for (i = 0; i < goodPkts; i++) { + /* pop off caller's queue */ + pPacket = htc_packet_dequeue(pCallersSendQueue); + A_ASSERT(pPacket); + if (pPacket) + /* insert into local queue */ + HTC_PACKET_ENQUEUE(&sendQueue, + pPacket); + } + + /* the caller's queue has all the packets that won't fit + * walk through the caller's queue and indicate each one + * to the send full handler + */ + ITERATE_OVER_LIST_ALLOW_REMOVE(&pCallersSendQueue-> + QueueHead, pPacket, + HTC_PACKET, ListLink) { + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("Indicating overflowed TX packet: %pK\n", + pPacket)); + /* + * Remove headroom reserved for HTC_FRAME_HDR + * before giving the packet back to the user via + * the EpSendFull callback. + */ + restore_tx_packet(target, pPacket); + + if (pEndpoint->EpCallBacks. + EpSendFull(pEndpoint->EpCallBacks.pContext, + pPacket) == HTC_SEND_FULL_DROP) { + /* callback wants the packet dropped */ + INC_HTC_EP_STAT(pEndpoint, TxDropped, + 1); + /* leave this one in the caller's queue + * for cleanup + */ + } else { + /* callback wants to keep this packet, + * remove from caller's queue + */ + HTC_PACKET_REMOVE(pCallersSendQueue, + pPacket); + /* put it in the send queue + * add HTC_FRAME_HDR space reservation + * again + */ + qdf_nbuf_push_head + (GET_HTC_PACKET_NET_BUF_CONTEXT + (pPacket), + sizeof(HTC_FRAME_HDR)); + + HTC_PACKET_ENQUEUE(&sendQueue, pPacket); + } + + } + ITERATE_END; + + if (HTC_QUEUE_EMPTY(&sendQueue)) { + /* no packets made it in, caller will cleanup */ + OL_ATH_HTC_PKT_ERROR_COUNT_INCR(target, + HTC_SEND_Q_EMPTY); + result = HTC_SEND_QUEUE_DROP; + break; + } + } + + } while (false); + + if (result != HTC_SEND_QUEUE_OK) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send: %d\n", + result)); + return result; + } + + LOCK_HTC_TX(target); + + if (!HTC_QUEUE_EMPTY(&sendQueue)) { + if (target->is_nodrop_pkt) { + /* + * nodrop pkts have higher priority than normal pkts, + * insert nodrop pkt to head for proper + * start/termination of test. + */ + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &sendQueue); + target->is_nodrop_pkt = false; + } else { + /* transfer packets to tail */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->TxQueue, + &sendQueue); + A_ASSERT(HTC_QUEUE_EMPTY(&sendQueue)); + INIT_HTC_PACKET_QUEUE(&sendQueue); + } + } + + /* increment tx processing count on entry */ + if (qdf_atomic_inc_return(&pEndpoint->TxProcessCount) > 1) { + /* another thread or task is draining the TX queues on this + * endpoint that thread will reset the tx processing count when + * the queue is drained + */ + qdf_atomic_dec(&pEndpoint->TxProcessCount); + UNLOCK_HTC_TX(target); + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send (busy)\n")); + return HTC_SEND_QUEUE_OK; + } + + /***** beyond this point only 1 thread may enter ******/ + + /* now drain the endpoint TX queue for transmission as long as we have + * enough transmit resources + */ + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + } else { + tx_resources = 0; + } + + while (true) { + + if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) == 0) + break; + + if (pEndpoint->async_update && + (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) && + (!tx_resources)) + break; + + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { +#if DEBUG_CREDIT + int cred = pEndpoint->TxCredits; +#endif + /* credit based mechanism provides flow control based on + * target transmit resource availability, we assume that + * the HIF layer will always have bus resources greater + * than target transmit resources + */ + get_htc_send_packets_credit_based(target, pEndpoint, + &sendQueue); +#if DEBUG_CREDIT + if (ep_debug_mask & (1 << pEndpoint->Id)) { + if (cred - pEndpoint->TxCredits > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Decrease EP%d %d - %d = %d credits.\n", + pEndpoint->Id, cred, + cred - + pEndpoint->TxCredits, + pEndpoint->TxCredits)); + } + } +#endif + } else { + + /* + * Header and payload belongs to the different fragments and + * consume 2 resource for one HTC package but USB combine into + * one transfer.And one WMI message only consumes one single + * resource. + */ + if (HTC_TX_BUNDLE_ENABLED(target) && tx_resources && + hif_get_bus_type(target->hif_dev) == + QDF_BUS_TYPE_USB) { + if (pEndpoint->service_id == + WMI_CONTROL_SVC) + tx_resources = + HTC_MAX_MSG_PER_BUNDLE_TX; + else + tx_resources = + (HTC_MAX_MSG_PER_BUNDLE_TX * 2); + } + /* get all the packets for this endpoint that we can for + * this pass + */ + get_htc_send_packets(target, pEndpoint, &sendQueue, + tx_resources); + } + + if (HTC_PACKET_QUEUE_DEPTH(&sendQueue) == 0) { + /* didn't get any packets due to a lack of resources or + * TX queue was drained + */ + break; + } + + if (!pEndpoint->async_update) + UNLOCK_HTC_TX(target); + + /* send what we can */ + if (htc_issue_packets(target, pEndpoint, &sendQueue)) { + int i; + wlan_rtpm_dbgid rtpm_dbgid; + + result = HTC_SEND_QUEUE_DROP; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("htc_issue_packets, failed status:%d put it back to head of callersSendQueue", + result)); + + rtpm_dbgid = + htc_send_pkts_rtpm_dbgid_get( + pEndpoint->service_id); + for (i = HTC_PACKET_QUEUE_DEPTH(&sendQueue); i > 0; i--) + hif_pm_runtime_put(target->hif_dev, + rtpm_dbgid); + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &sendQueue); + break; + } + + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + } + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + + } + + /* done with this endpoint, we can clear the count */ + qdf_atomic_init(&pEndpoint->TxProcessCount); + + UNLOCK_HTC_TX(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send:\n")); + + return HTC_SEND_QUEUE_OK; +} + +#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED +static uint16_t htc_send_pkts_sched_check(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID id) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_ENDPOINT_ID eid; + HTC_PACKET_QUEUE *pTxQueue; + uint16_t resources; + uint16_t acQueueStatus[DATA_EP_SIZE] = { 0, 0, 0, 0 }; + + if (id < ENDPOINT_2 || id > ENDPOINT_5) + return 1; + + for (eid = ENDPOINT_2; eid <= ENDPOINT_5; eid++) { + pEndpoint = &target->endpoint[eid]; + pTxQueue = &pEndpoint->TxQueue; + + if (HTC_QUEUE_EMPTY(pTxQueue)) + acQueueStatus[eid - 2] = 1; + } + + switch (id) { + case ENDPOINT_2: /* BE */ + return acQueueStatus[0] && acQueueStatus[2] + && acQueueStatus[3]; + case ENDPOINT_3: /* BK */ + return acQueueStatus[0] && acQueueStatus[1] && acQueueStatus[2] + && acQueueStatus[3]; + case ENDPOINT_4: /* VI */ + return acQueueStatus[2] && acQueueStatus[3]; + case ENDPOINT_5: /* VO */ + return acQueueStatus[3]; + default: + return 0; + } + +} + +static A_STATUS htc_send_pkts_sched_queue(HTC_TARGET *target, + HTC_PACKET_QUEUE *pPktQueue, + HTC_ENDPOINT_ID eid) +{ + HTC_ENDPOINT *pEndpoint; + HTC_PACKET_QUEUE *pTxQueue; + HTC_PACKET *pPacket; + int goodPkts; + + pEndpoint = &target->endpoint[eid]; + pTxQueue = &pEndpoint->TxQueue; + + LOCK_HTC_TX(target); + + goodPkts = + pEndpoint->MaxTxQueueDepth - + HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue); + + if (goodPkts > 0) { + while (!HTC_QUEUE_EMPTY(pPktQueue)) { + pPacket = htc_packet_dequeue(pPktQueue); + HTC_PACKET_ENQUEUE(pTxQueue, pPacket); + goodPkts--; + + if (goodPkts <= 0) + break; + } + } + + if (HTC_PACKET_QUEUE_DEPTH(pPktQueue)) { + ITERATE_OVER_LIST_ALLOW_REMOVE(&pPktQueue->QueueHead, pPacket, + HTC_PACKET, ListLink) { + + if (pEndpoint->EpCallBacks. + EpSendFull(pEndpoint->EpCallBacks.pContext, + pPacket) == HTC_SEND_FULL_DROP) { + INC_HTC_EP_STAT(pEndpoint, TxDropped, 1); + } else { + HTC_PACKET_REMOVE(pPktQueue, pPacket); + HTC_PACKET_ENQUEUE(pTxQueue, pPacket); + } + } + ITERATE_END; + } + + UNLOCK_HTC_TX(target); + + return A_OK; +} + +#endif + +static inline QDF_STATUS __htc_send_pkt(HTC_HANDLE HTCHandle, + HTC_PACKET *pPacket) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_PACKET_QUEUE pPktQueue; + qdf_nbuf_t netbuf; + HTC_FRAME_HDR *htc_hdr; + QDF_STATUS status; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+__htc_send_pkt\n")); + + /* get packet at head to figure out which endpoint these packets will + * go into + */ + if (!pPacket) { + OL_ATH_HTC_PKT_ERROR_COUNT_INCR(target, GET_HTC_PKT_Q_FAIL); + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-__htc_send_pkt\n")); + return QDF_STATUS_E_INVAL; + } + + if ((pPacket->Endpoint >= ENDPOINT_MAX) || + (pPacket->Endpoint <= ENDPOINT_UNUSED)) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s endpoint is invalid\n", __func__)); + AR_DEBUG_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + pEndpoint = &target->endpoint[pPacket->Endpoint]; + + if (!pEndpoint->service_id) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("%s service_id is invalid\n", + __func__)); + return QDF_STATUS_E_INVAL; + } + +#ifdef HTC_EP_STAT_PROFILING + LOCK_HTC_TX(target); + INC_HTC_EP_STAT(pEndpoint, TxPosted, 1); + UNLOCK_HTC_TX(target); +#endif + + /* provide room in each packet's netbuf for the HTC frame header */ + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_ASSERT(netbuf); + if (!netbuf) + return QDF_STATUS_E_INVAL; + + qdf_nbuf_push_head(netbuf, sizeof(HTC_FRAME_HDR)); + pPacket->PktInfo.AsTx.Flags |= + HTC_TX_PACKET_FLAG_HTC_HEADER_IN_NETBUF_DATA; + /* setup HTC frame header */ + htc_hdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr(netbuf, 0); + AR_DEBUG_ASSERT(htc_hdr); + if (!htc_hdr) + return QDF_STATUS_E_INVAL; + + HTC_WRITE32(htc_hdr, + SM(pPacket->ActualLength, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + LOCK_HTC_TX(target); + + pPacket->PktInfo.AsTx.SeqNo = pEndpoint->SeqNo; + pEndpoint->SeqNo++; + + HTC_WRITE32(((uint32_t *)htc_hdr) + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1)); + + UNLOCK_HTC_TX(target); + + /* + * For flow control enabled endpoints mapping is done in + * htc_issue_packets and for non flow control enabled endpoints + * its done here. + */ + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + pPacket->PktInfo.AsTx.Flags |= HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + status = qdf_nbuf_map(target->osdev, + GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket), + QDF_DMA_TO_DEVICE); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: nbuf map failed, endpoint %pK, seq_no. %d\n", + __func__, pEndpoint, pEndpoint->SeqNo)); + return status; + } + } + + INIT_HTC_PACKET_QUEUE_AND_ADD(&pPktQueue, pPacket); +#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED + if (!htc_send_pkts_sched_check(HTCHandle, pEndpoint->Id)) + htc_send_pkts_sched_queue(HTCHandle, &pPktQueue, pEndpoint->Id); + else + htc_try_send(target, pEndpoint, &pPktQueue); +#else + htc_try_send(target, pEndpoint, &pPktQueue); +#endif + + /* do completion on any packets that couldn't get in */ + while (!HTC_QUEUE_EMPTY(&pPktQueue)) { + pPacket = htc_packet_dequeue(&pPktQueue); + + if (HTC_STOPPING(target)) + pPacket->Status = QDF_STATUS_E_CANCELED; + else + pPacket->Status = QDF_STATUS_E_RESOURCES; + + send_packet_completion(target, pPacket); + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-__htc_send_pkt\n")); + + return QDF_STATUS_SUCCESS; +} + +/* HTC API - htc_send_pkt */ +QDF_STATUS htc_send_pkt(HTC_HANDLE htc_handle, HTC_PACKET *htc_packet) +{ + if (!htc_handle) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: HTCHandle is NULL \n", __func__)); + return QDF_STATUS_E_FAILURE; + } + + if (!htc_packet) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: pPacket is NULL \n", __func__)); + return QDF_STATUS_E_FAILURE; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+-htc_send_pkt: Enter endPointId: %d, buffer: %pK, length: %d\n", + htc_packet->Endpoint, htc_packet->pBuffer, + htc_packet->ActualLength)); + return __htc_send_pkt(htc_handle, htc_packet); +} +qdf_export_symbol(htc_send_pkt); + +#ifdef ATH_11AC_TXCOMPACT +/** + * htc_send_data_pkt() - send single data packet on an endpoint + * @HTCHandle: pointer to HTC handle + * @netbuf: network buffer containing the data to be sent + * @ActualLength: length of data that needs to be transmitted + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_send_data_pkt(HTC_HANDLE htc_hdl, qdf_nbuf_t netbuf, int ep_id, + int actual_length) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_hdl); + HTC_ENDPOINT *pEndpoint; + HTC_FRAME_HDR *p_htc_hdr; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int tx_resources; + uint32_t data_attr = 0; + int htc_payload_len = actual_length; + wlan_rtpm_dbgid rtpm_dbgid; + + pEndpoint = &target->endpoint[ep_id]; + + tx_resources = hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + + if (tx_resources < HTC_DATA_RESOURCE_THRS) { + if (pEndpoint->ul_is_polled) { + hif_send_complete_check(pEndpoint->target->hif_dev, + pEndpoint->UL_PipeID, 1); + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + } + if (tx_resources < HTC_DATA_MINDESC_PERPACKET) + return QDF_STATUS_E_FAILURE; + } + + rtpm_dbgid = + htc_send_pkts_rtpm_dbgid_get(pEndpoint->service_id); + if (hif_pm_runtime_get(target->hif_dev, rtpm_dbgid)) + return QDF_STATUS_E_FAILURE; + + p_htc_hdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr(netbuf, 0); + AR_DEBUG_ASSERT(p_htc_hdr); + + data_attr = qdf_nbuf_data_attr_get(netbuf); + + if (target->htc_hdr_length_check) + htc_payload_len = actual_length - HTC_HEADER_LEN; + + HTC_WRITE32(p_htc_hdr, SM(htc_payload_len, HTC_FRAME_HDR_PAYLOADLEN) + | SM(ep_id, HTC_FRAME_HDR_ENDPOINTID)); + /* + * If the HIF pipe for the data endpoint is polled rather than + * interrupt-driven, this is a good point to check whether any + * data previously sent through the HIF pipe have finished being + * sent. + * Since this may result in callbacks to htc_tx_completion_handler, + * which can take the HTC tx lock, make the hif_send_complete_check + * call before acquiring the HTC tx lock. + * Call hif_send_complete_check directly, rather than calling + * htc_send_complete_check, and call the PollTimerStart separately + * after calling hif_send_head, so the timer will be started to + * check for completion of the new outstanding download (in the + * unexpected event that other polling calls don't catch it). + */ + + LOCK_HTC_TX(target); + + HTC_WRITE32(((uint32_t *)p_htc_hdr) + 1, + SM(pEndpoint->SeqNo, HTC_FRAME_HDR_CONTROLBYTES1)); + + pEndpoint->SeqNo++; + + QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_HTC); + DPTRACE(qdf_dp_trace(netbuf, QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(netbuf), + sizeof(qdf_nbuf_data(netbuf)), QDF_TX)); + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, + pEndpoint->Id, actual_length, netbuf, data_attr); + + UNLOCK_HTC_TX(target); + return status; +} +#else /*ATH_11AC_TXCOMPACT */ + +/** + * htc_send_data_pkt() - htc_send_data_pkt + * @HTCHandle: pointer to HTC handle + * @pPacket: pointer to HTC_PACKET + * @more_data: indicates whether more data is to follow + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket, + uint8_t more_data) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_FRAME_HDR *pHtcHdr; + HTC_PACKET_QUEUE sendQueue; + qdf_nbuf_t netbuf = NULL; + int tx_resources; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t data_attr = 0; + bool used_extra_tx_credit = false; + + if (pPacket) { + if ((pPacket->Endpoint >= ENDPOINT_MAX) || + (pPacket->Endpoint <= ENDPOINT_UNUSED)) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s endpoint is invalid\n", __func__)); + AR_DEBUG_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + pEndpoint = &target->endpoint[pPacket->Endpoint]; + + /* add HTC_FRAME_HDR in the initial fragment */ + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + pHtcHdr = (HTC_FRAME_HDR *) qdf_nbuf_get_frag_vaddr(netbuf, 0); + AR_DEBUG_ASSERT(pHtcHdr); + + HTC_WRITE32(pHtcHdr, + SM(pPacket->ActualLength, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->PktInfo.AsTx.SendFlags, + HTC_FRAME_HDR_FLAGS) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + /* + * If the HIF pipe for the data endpoint is polled rather than + * interrupt-driven, this is a good point to check whether any + * data previously sent through the HIF pipe have finished being + * sent. Since this may result in callbacks to + * htc_tx_completion_handler, which can take the HTC tx lock, + * make the hif_send_complete_check call before acquiring the + * HTC tx lock. + * Call hif_send_complete_check directly, rather than calling + * htc_send_complete_check, and call the PollTimerStart + * separately after calling hif_send_head, so the timer will be + * started to check for completion of the new outstanding + * download (in the unexpected event that other polling calls + * don't catch it). + */ + if (pEndpoint->ul_is_polled) { + htc_send_complete_poll_timer_stop(pEndpoint); + hif_send_complete_check(pEndpoint->target->hif_dev, + pEndpoint->UL_PipeID, 0); + } + + LOCK_HTC_TX(target); + + pPacket->PktInfo.AsTx.SeqNo = pEndpoint->SeqNo; + pEndpoint->SeqNo++; + + HTC_WRITE32(((uint32_t *) pHtcHdr) + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1)); + + /* append new packet to pEndpoint->TxQueue */ + HTC_PACKET_ENQUEUE(&pEndpoint->TxQueue, pPacket); + if (HTC_TX_BUNDLE_ENABLED(target) && (more_data)) { + UNLOCK_HTC_TX(target); + return QDF_STATUS_SUCCESS; + } + + QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_HTC); + DPTRACE(qdf_dp_trace(netbuf, QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(netbuf), + sizeof(qdf_nbuf_data(netbuf)), QDF_TX)); + } else { + LOCK_HTC_TX(target); + pEndpoint = &target->endpoint[1]; + } + + /* increment tx processing count on entry */ + qdf_atomic_inc(&pEndpoint->TxProcessCount); + if (qdf_atomic_read(&pEndpoint->TxProcessCount) > 1) { + /* + * Another thread or task is draining the TX queues on this + * endpoint. That thread will reset the tx processing count when + * the queue is drained. + */ + qdf_atomic_dec(&pEndpoint->TxProcessCount); + UNLOCK_HTC_TX(target); + return QDF_STATUS_SUCCESS; + } + + /***** beyond this point only 1 thread may enter ******/ + + INIT_HTC_PACKET_QUEUE(&sendQueue); + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { +#if DEBUG_CREDIT + int cred = pEndpoint->TxCredits; +#endif + get_htc_send_packets_credit_based(target, pEndpoint, + &sendQueue); +#if DEBUG_CREDIT + if (ep_debug_mask & (1 << pEndpoint->Id)) { + if (cred - pEndpoint->TxCredits > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Decrease EP%d %d - %d = %d credits.\n", + pEndpoint->Id, cred, + cred - pEndpoint->TxCredits, + pEndpoint->TxCredits)); + } + } +#endif + UNLOCK_HTC_TX(target); + } + + else if (HTC_TX_BUNDLE_ENABLED(target)) { + if (hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_USB) { + if (hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID)) + /* + * Header and payload belongs to the different + * fragments and consume 2 resource for one HTC + * package but USB combine into one transfer. + */ + get_htc_send_packets(target, pEndpoint, + &sendQueue, + HTC_MAX_MSG_PER_BUNDLE_TX + * 2); + } else { + /* Dequeue max packets from endpoint tx queue */ + get_htc_send_packets(target, pEndpoint, &sendQueue, + HTC_MAX_TX_BUNDLE_SEND_LIMIT); + } + UNLOCK_HTC_TX(target); + } else { + /* + * Now drain the endpoint TX queue for transmission as long as + * we have enough transmit resources + */ + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + get_htc_send_packets(target, pEndpoint, &sendQueue, + tx_resources); + UNLOCK_HTC_TX(target); + } + + /* send what we can */ + while (true) { + if (HTC_TX_BUNDLE_ENABLED(target) && + (HTC_PACKET_QUEUE_DEPTH(&sendQueue) >= + HTC_MIN_MSG_PER_BUNDLE) && + (hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_SDIO || + hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_USB)) { + if (pEndpoint->EpCallBacks.ep_padding_credit_update) { + if (htc_tx_pad_credit_avail(pEndpoint) < 1) { + status = QDF_STATUS_E_RESOURCES; + /* put the sendQueue back at the front + * of pEndpoint->TxQueue + */ + LOCK_HTC_TX(target); + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD( + &pEndpoint->TxQueue, + &sendQueue); + UNLOCK_HTC_TX(target); + break; + } + } + htc_issue_packets_bundle(target, pEndpoint, &sendQueue); + } + if (pEndpoint->EpCallBacks.ep_padding_credit_update) { + if (htc_tx_pad_credit_avail(pEndpoint) < 1) { + status = QDF_STATUS_E_RESOURCES; + /* put the sendQueue back at the front + * of pEndpoint->TxQueue + */ + LOCK_HTC_TX(target); + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD( + &pEndpoint->TxQueue, + &sendQueue); + UNLOCK_HTC_TX(target); + break; + } + } + pPacket = htc_packet_dequeue(&sendQueue); + if (!pPacket) + break; + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + pHtcHdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr(netbuf, 0); + + LOCK_HTC_TX(target); + /* store in look up queue to match completions */ + HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacket); + INC_HTC_EP_STAT(pEndpoint, TxIssued, 1); + pEndpoint->ul_outstanding_cnt++; + UNLOCK_HTC_TX(target); + + used_extra_tx_credit = + htc_handle_extra_tx_credit(pEndpoint, pPacket, + (uint8_t *)pHtcHdr, + NULL, + pPacket->ActualLength + + HTC_HDR_LENGTH); + + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, + pEndpoint->Id, + HTC_HDR_LENGTH + pPacket->ActualLength, + netbuf, data_attr); + if (status != QDF_STATUS_SUCCESS) { + if (pEndpoint->EpCallBacks.ep_padding_credit_update) { + if (used_extra_tx_credit) { + pEndpoint->EpCallBacks. + ep_padding_credit_update + (pEndpoint->EpCallBacks.pContext, 1); + } + } + } +#if DEBUG_BUNDLE + qdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.", + pEndpoint->Id, + pEndpoint->TxCreditSize, + HTC_HDR_LENGTH + pPacket->ActualLength); +#endif + + htc_issue_tx_bundle_stats_inc(target); + + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + LOCK_HTC_TX(target); + pEndpoint->ul_outstanding_cnt--; + /* remove this packet from the tx completion queue */ + HTC_PACKET_REMOVE(&pEndpoint->TxLookupQueue, pPacket); + + /* + * Don't bother reclaiming credits - HTC flow control + * is not applicable to tx data. + * In LL systems, there is no download flow control, + * since there's virtually no download delay. + * In HL systems, the txrx SW explicitly performs the + * tx flow control. + */ + /* pEndpoint->TxCredits += + * pPacket->PktInfo.AsTx.CreditsUsed; + */ + + /* put this frame back at the front of the sendQueue */ + HTC_PACKET_ENQUEUE_TO_HEAD(&sendQueue, pPacket); + + /* put the sendQueue back at the front of + * pEndpoint->TxQueue + */ + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &sendQueue); + UNLOCK_HTC_TX(target); + break; /* still need to reset TxProcessCount */ + } + } + /* done with this endpoint, we can clear the count */ + qdf_atomic_init(&pEndpoint->TxProcessCount); + + if (pEndpoint->ul_is_polled) { + /* + * Start a cleanup timer to poll for download completion. + * The download completion should be noticed promptly from + * other polling calls, but the timer provides a safety net + * in case other polling calls don't occur as expected. + */ + htc_send_complete_poll_timer_start(pEndpoint); + } + + return status; +} +#endif /*ATH_11AC_TXCOMPACT */ +qdf_export_symbol(htc_send_data_pkt); + +/* + * In the adapted HIF layer, qdf_nbuf_t are passed between HIF and HTC, + * since upper layers expects HTC_PACKET containers we use the completed netbuf + * and lookup its corresponding HTC packet buffer from a lookup list. + * This is extra overhead that can be fixed by re-aligning HIF interfaces + * with HTC. + * + */ +static HTC_PACKET *htc_lookup_tx_packet(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + qdf_nbuf_t netbuf) +{ + HTC_PACKET *pPacket = NULL; + HTC_PACKET *pFoundPacket = NULL; + HTC_PACKET_QUEUE lookupQueue; + + INIT_HTC_PACKET_QUEUE(&lookupQueue); + LOCK_HTC_EP_TX_LOOKUP(pEndpoint); + + LOCK_HTC_TX(target); + + /* mark that HIF has indicated the send complete for another packet */ + pEndpoint->ul_outstanding_cnt--; + + /* Dequeue first packet directly because of in-order completion */ + pPacket = htc_packet_dequeue(&pEndpoint->TxLookupQueue); + if (qdf_unlikely(!pPacket)) { + UNLOCK_HTC_TX(target); + UNLOCK_HTC_EP_TX_LOOKUP(pEndpoint); + return NULL; + } + if (netbuf == (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket)) { + UNLOCK_HTC_TX(target); + UNLOCK_HTC_EP_TX_LOOKUP(pEndpoint); + return pPacket; + } + HTC_PACKET_ENQUEUE(&lookupQueue, pPacket); + + /* + * Move TX lookup queue to temp queue because most of packets that are + * not index 0 are not top 10 packets. + */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&lookupQueue, + &pEndpoint->TxLookupQueue); + + ITERATE_OVER_LIST_ALLOW_REMOVE(&lookupQueue.QueueHead, pPacket, + HTC_PACKET, ListLink) { + + if (!pPacket) { + pFoundPacket = pPacket; + break; + } + /* check for removal */ + if (netbuf == + (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket)) { + /* found it */ + HTC_PACKET_REMOVE(&lookupQueue, pPacket); + pFoundPacket = pPacket; + break; + } + + } + ITERATE_END; + + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxLookupQueue, + &lookupQueue); + UNLOCK_HTC_TX(target); + UNLOCK_HTC_EP_TX_LOOKUP(pEndpoint); + + return pFoundPacket; +} + +/** + * htc_tx_completion_handler() - htc tx completion handler + * @Context: pointer to HTC_TARGET structure + * @netbuf: pointer to netbuf for which completion handler is being called + * @EpID: end point Id on which the packet was sent + * @toeplitz_hash_result: toeplitz hash result + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_tx_completion_handler(void *Context, + qdf_nbuf_t netbuf, unsigned int EpID, + uint32_t toeplitz_hash_result) +{ + HTC_TARGET *target = (HTC_TARGET *) Context; + HTC_ENDPOINT *pEndpoint; + HTC_PACKET *pPacket; +#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED + HTC_ENDPOINT_ID eid[DATA_EP_SIZE] = { ENDPOINT_5, ENDPOINT_4, + ENDPOINT_2, ENDPOINT_3 }; + int epidIdx; + uint16_t resourcesThresh[DATA_EP_SIZE]; /* urb resources */ + uint16_t resources; + uint16_t resourcesMax; +#endif + + pEndpoint = &target->endpoint[EpID]; + target->TX_comp_cnt++; + + do { + pPacket = htc_lookup_tx_packet(target, pEndpoint, netbuf); + if (!pPacket) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC TX lookup failed!\n")); + /* may have already been flushed and freed */ + netbuf = NULL; + break; + } + if (pPacket->PktInfo.AsTx.Tag != HTC_TX_PACKET_TAG_AUTO_PM) + hif_pm_runtime_put(target->hif_dev, + RTPM_ID_WMI); + + if (pPacket->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_BUNDLED) { + HTC_PACKET *pPacketTemp; + HTC_PACKET_QUEUE *pQueueSave = + (HTC_PACKET_QUEUE *) pPacket->pContext; + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQueueSave, + pPacketTemp) { + pPacket->Status = QDF_STATUS_SUCCESS; + send_packet_completion(target, pPacketTemp); + } + HTC_PACKET_QUEUE_ITERATE_END; + free_htc_bundle_packet(target, pPacket); + + if (hif_get_bus_type(target->hif_dev) == + QDF_BUS_TYPE_USB) { + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) + htc_try_send(target, pEndpoint, NULL); + } + + return QDF_STATUS_SUCCESS; + } + /* will be giving this buffer back to upper layers */ + netbuf = NULL; + pPacket->Status = QDF_STATUS_SUCCESS; + send_packet_completion(target, pPacket); + + } while (false); + + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + /* note: when using TX credit flow, the re-checking of queues + * happens when credits flow back from the target. In the non-TX + * credit case, we recheck after the packet completes + */ + if ((qdf_atomic_read(&pEndpoint->TxProcessCount) == 0) || + (!pEndpoint->async_update)) { + htc_try_send(target, pEndpoint, NULL); + } + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_FASTPATH +/** + * htc_ctrl_msg_cmpl(): checks for tx completion for the endpoint specified + * @HTC_HANDLE : pointer to the htc target context + * @htc_ep_id : end point id + * + * checks HTC tx completion + * + * Return: none + */ +void htc_ctrl_msg_cmpl(HTC_HANDLE htc_pdev, HTC_ENDPOINT_ID htc_ep_id) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_pdev); + HTC_ENDPOINT *pendpoint = &target->endpoint[htc_ep_id]; + + htc_send_complete_check(pendpoint, 1); +} +qdf_export_symbol(htc_ctrl_msg_cmpl); +#endif + +/* callback when TX resources become available */ +void htc_tx_resource_avail_handler(void *context, uint8_t pipeID) +{ + int i; + HTC_TARGET *target = (HTC_TARGET *) context; + HTC_ENDPOINT *pEndpoint = NULL; + + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (pEndpoint->service_id != 0) { + if (pEndpoint->UL_PipeID == pipeID) + break; + } + } + + if (i >= ENDPOINT_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid pipe indicated for TX resource avail : %d!\n", + pipeID)); + return; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("HIF indicated more resources for pipe:%d\n", + pipeID)); + + htc_try_send(target, pEndpoint, NULL); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * htc_kick_queues(): resumes tx transactions of suspended endpoints + * @context: pointer to the htc target context + * + * Iterates through the enpoints and provides a context to empty queues + * int the hif layer when they are stalled due to runtime suspend. + * + * Return: none + */ +void htc_kick_queues(void *context) +{ + int i; + HTC_TARGET *target = (HTC_TARGET *)context; + HTC_ENDPOINT *endpoint = NULL; + + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + + if (endpoint->service_id == 0) + continue; + + if (endpoint->EpCallBacks.ep_resume_tx_queue) + endpoint->EpCallBacks.ep_resume_tx_queue( + endpoint->EpCallBacks.pContext); + + htc_try_send(target, endpoint, NULL); + } + + hif_fastpath_resume(target->hif_dev); +} +#endif + +/* flush endpoint TX queue */ +void htc_flush_endpoint_tx(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, + HTC_TX_TAG Tag) +{ + HTC_PACKET *pPacket; + + LOCK_HTC_TX(target); + while (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)) { + pPacket = htc_packet_dequeue(&pEndpoint->TxQueue); + + if (pPacket) { + /* let the sender know the packet was not delivered */ + pPacket->Status = QDF_STATUS_E_CANCELED; + send_packet_completion(target, pPacket); + } + } + UNLOCK_HTC_TX(target); +} + +/* flush pending entries in endpoint TX Lookup queue */ +void htc_flush_endpoint_txlookupQ(HTC_TARGET *target, + HTC_ENDPOINT_ID endpoint_id, + bool call_ep_callback) +{ + HTC_PACKET *packet; + HTC_ENDPOINT *endpoint; + + endpoint = &target->endpoint[endpoint_id]; + + if (!endpoint && endpoint->service_id == 0) + return; + + LOCK_HTC_TX(target); + while (HTC_PACKET_QUEUE_DEPTH(&endpoint->TxLookupQueue)) { + packet = htc_packet_dequeue(&endpoint->TxLookupQueue); + + if (packet) { + if (call_ep_callback == true) { + packet->Status = QDF_STATUS_E_CANCELED; + send_packet_completion(target, packet); + } else { + qdf_mem_free(packet); + } + } + } + UNLOCK_HTC_TX(target); +} + +/* HTC API to flush an endpoint's TX queue*/ +void htc_flush_endpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, + HTC_TX_TAG Tag) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint = &target->endpoint[Endpoint]; + + if (pEndpoint->service_id == 0) { + AR_DEBUG_ASSERT(false); + /* not in use.. */ + return; + } + + htc_flush_endpoint_tx(target, pEndpoint, Tag); +} + +/* HTC API to indicate activity to the credit distribution function */ +void htc_indicate_activity_change(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, bool Active) +{ + /* TODO */ +} + +bool htc_is_endpoint_active(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint) +{ + return true; +} + +void htc_set_nodrop_pkt(HTC_HANDLE HTCHandle, A_BOOL isNodropPkt) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + target->is_nodrop_pkt = isNodropPkt; +} + +void htc_enable_hdr_length_check(HTC_HANDLE htc_hdl, bool htc_hdr_length_check) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_hdl); + + target->htc_hdr_length_check = htc_hdr_length_check; +} + +/** + * htc_process_credit_rpt() - process credit report, call distribution function + * @target: pointer to HTC_TARGET + * @pRpt: pointer to HTC_CREDIT_REPORT + * @NumEntries: number of entries in credit report + * @FromEndpoint: endpoint for which credit report is received + * + * Return: A_OK for success or an appropriate A_STATUS error + */ +void htc_process_credit_rpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt, + int NumEntries, HTC_ENDPOINT_ID FromEndpoint) +{ + int i; + HTC_ENDPOINT *pEndpoint; + int totalCredits = 0; + uint8_t rpt_credits, rpt_ep_id; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+htc_process_credit_rpt, Credit Report Entries:%d\n", + NumEntries)); + + /* lock out TX while we update credits */ + LOCK_HTC_TX(target); + + for (i = 0; i < NumEntries; i++, pRpt++) { + + rpt_ep_id = HTC_GET_FIELD(pRpt, HTC_CREDIT_REPORT, ENDPOINTID); + + if (rpt_ep_id >= ENDPOINT_MAX) { + AR_DEBUG_ASSERT(false); + break; + } + + rpt_credits = HTC_GET_FIELD(pRpt, HTC_CREDIT_REPORT, CREDITS); + + pEndpoint = &target->endpoint[rpt_ep_id]; +#if DEBUG_CREDIT + if (ep_debug_mask & (1 << pEndpoint->Id)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Increase EP%d %d + %d = %d credits\n", + rpt_ep_id, pEndpoint->TxCredits, + rpt_credits, + pEndpoint->TxCredits + rpt_credits)); + } +#endif + +#ifdef HTC_EP_STAT_PROFILING + + INC_HTC_EP_STAT(pEndpoint, TxCreditRpts, 1); + INC_HTC_EP_STAT(pEndpoint, TxCreditsReturned, rpt_credits); + + if (FromEndpoint == rpt_ep_id) { + /* this credit report arrived on the same endpoint + * indicating it arrived in an RX packet + */ + INC_HTC_EP_STAT(pEndpoint, TxCreditsFromRx, + rpt_credits); + INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromRx, 1); + } else if (FromEndpoint == ENDPOINT_0) { + /* this credit arrived on endpoint 0 as a NULL msg */ + INC_HTC_EP_STAT(pEndpoint, TxCreditsFromEp0, + rpt_credits); + INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromEp0, 1); + } else { + /* arrived on another endpoint */ + INC_HTC_EP_STAT(pEndpoint, TxCreditsFromOther, + rpt_credits); + INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromOther, 1); + } + +#endif + + if (pEndpoint->service_id == WMI_CONTROL_SVC) { + htc_credit_record(HTC_PROCESS_CREDIT_REPORT, + pEndpoint->TxCredits + rpt_credits, + HTC_PACKET_QUEUE_DEPTH(&pEndpoint-> + TxQueue)); + } + + pEndpoint->TxCredits += rpt_credits; + + if (pEndpoint->TxCredits + && HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)) { + UNLOCK_HTC_TX(target); +#ifdef ATH_11AC_TXCOMPACT + htc_try_send(target, pEndpoint, NULL); +#else + if (pEndpoint->service_id == HTT_DATA_MSG_SVC) + htc_send_data_pkt((HTC_HANDLE)target, NULL, 0); + else + htc_try_send(target, pEndpoint, NULL); +#endif + LOCK_HTC_TX(target); + } + totalCredits += rpt_credits; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Report indicated %d credits to distribute\n", + totalCredits)); + + UNLOCK_HTC_TX(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_process_credit_rpt\n")); +} + +/* function to fetch stats from htc layer*/ +struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + return &(target->htc_pkt_stats); +} + +#ifdef SYSTEM_PM_CHECK +void htc_system_resume(HTC_HANDLE htc) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc); + HTC_ENDPOINT *endpoint = NULL; + int i; + + if (!target) + return; + + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + + if (endpoint->service_id == 0) + continue; + + htc_try_send(target, endpoint, NULL); + } +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c new file mode 100644 index 0000000000000000000000000000000000000000..fa0a8010a1724e82725200e35d0d457a9952b5fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c @@ -0,0 +1,430 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include +#include /* qdf_nbuf_t */ +#include "qdf_module.h" + +/* use credit flow control over HTC */ +unsigned int htc_credit_flow = 1; +#ifndef DEBUG_CREDIT +#define DEBUG_CREDIT 0 +#endif + +/* HTC credit flow global disable */ +void htc_global_credit_flow_disable(void) +{ + htc_credit_flow = 0; +} + +/* HTC credit flow global enable */ +void htc_global_credit_flow_enable(void) +{ + htc_credit_flow = 1; +} + +#ifdef HIF_SDIO + +/** + * htc_alt_data_credit_size_update() - update tx credit size info + * on max bundle size + * @target: hif context + * @ul_pipe: endpoint ul pipe id + * @dl_pipe: endpoint dl pipe id + * @txCreditSize: endpoint tx credit size + * + * + * When AltDataCreditSize is non zero, it indicates the credit size for + * HTT and all other services on Mbox0. Mbox1 has WMI_CONTROL_SVC which + * uses the default credit size. Use AltDataCreditSize only when + * mailbox is swapped. Mailbox swap bit is set by bmi_target_ready at + * the end of BMI phase. + * + * The Credit Size is a parameter associated with the mbox rather than + * a service. Multiple services can run on this mbox. + * + * If AltDataCreditSize is 0, that means the firmware doesn't support + * this feature. Default to the TargetCreditSize + * + * Return: None + */ +static inline void +htc_alt_data_credit_size_update(HTC_TARGET *target, + uint8_t *ul_pipe, + uint8_t *dl_pipe, + int *txCreditSize) +{ + if ((target->AltDataCreditSize) && + (*ul_pipe == 1) && (*dl_pipe == 0)) + *txCreditSize = target->AltDataCreditSize; + +} +#else + +static inline void +htc_alt_data_credit_size_update(HTC_TARGET *target, + uint8_t *ul_pipe, + uint8_t *dl_pipe, + int *txCreditSize) +{ +} +#endif + +QDF_STATUS htc_connect_service(HTC_HANDLE HTCHandle, + struct htc_service_connect_req *pConnectReq, + struct htc_service_connect_resp *pConnectResp) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_PACKET *pSendPacket = NULL; + HTC_CONNECT_SERVICE_RESPONSE_MSG *pResponseMsg; + HTC_CONNECT_SERVICE_MSG *pConnectMsg; + HTC_ENDPOINT_ID assignedEndpoint = ENDPOINT_MAX; + HTC_ENDPOINT *pEndpoint; + unsigned int maxMsgSize = 0; + qdf_nbuf_t netbuf; + uint8_t txAlloc; + int length; + bool disableCreditFlowCtrl = false; + uint16_t conn_flags; + uint16_t rsp_msg_id, rsp_msg_serv_id, rsp_msg_max_msg_size; + uint8_t rsp_msg_status, rsp_msg_end_id, rsp_msg_serv_meta_len; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("+htc_connect_service, target:%pK SvcID:0x%X\n", target, + pConnectReq->service_id)); + + do { + + AR_DEBUG_ASSERT(pConnectReq->service_id != 0); + + if (HTC_CTRL_RSVD_SVC == pConnectReq->service_id) { + /* special case for pseudo control service */ + assignedEndpoint = ENDPOINT_0; + maxMsgSize = HTC_MAX_CONTROL_MESSAGE_LENGTH; + txAlloc = 0; + + } else { + + txAlloc = htc_get_credit_allocation(target, + pConnectReq->service_id); + + if (!txAlloc) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("Service %d does not allocate target credits!\n", + pConnectReq->service_id)); + } + + /* allocate a packet to send to the target */ + pSendPacket = htc_alloc_control_tx_packet(target); + + if (!pSendPacket) { + AR_DEBUG_ASSERT(false); + status = QDF_STATUS_E_NOMEM; + break; + } + + netbuf = + (qdf_nbuf_t) + GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket); + length = + sizeof(HTC_CONNECT_SERVICE_MSG) + + pConnectReq->MetaDataLength; + + /* assemble connect service message */ + qdf_nbuf_put_tail(netbuf, length); + pConnectMsg = + (HTC_CONNECT_SERVICE_MSG *) qdf_nbuf_data(netbuf); + + if (!pConnectMsg) { + AR_DEBUG_ASSERT(0); + status = QDF_STATUS_E_FAULT; + break; + } + + qdf_mem_zero(pConnectMsg, + sizeof(HTC_CONNECT_SERVICE_MSG)); + + conn_flags = + (pConnectReq-> + ConnectionFlags & ~HTC_SET_RECV_ALLOC_MASK) | + HTC_CONNECT_FLAGS_SET_RECV_ALLOCATION(txAlloc); + HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG, + MESSAGEID, HTC_MSG_CONNECT_SERVICE_ID); + HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG, + SERVICE_ID, pConnectReq->service_id); + HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG, + CONNECTIONFLAGS, conn_flags); + + if (pConnectReq-> + ConnectionFlags & + HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL) { + disableCreditFlowCtrl = true; + } + + if (!htc_credit_flow) + disableCreditFlowCtrl = true; + + /* check caller if it wants to transfer meta data */ + if ((pConnectReq->pMetaData) && + (pConnectReq->MetaDataLength <= + HTC_SERVICE_META_DATA_MAX_LENGTH)) { + /* copy meta data into msg buffer (after hdr) */ + qdf_mem_copy((uint8_t *) pConnectMsg + + sizeof(HTC_CONNECT_SERVICE_MSG), + pConnectReq->pMetaData, + pConnectReq->MetaDataLength); + + HTC_SET_FIELD(pConnectMsg, + HTC_CONNECT_SERVICE_MSG, + SERVICEMETALENGTH, + pConnectReq->MetaDataLength); + } + + SET_HTC_PACKET_INFO_TX(pSendPacket, + NULL, + (uint8_t *) pConnectMsg, + length, + ENDPOINT_0, + HTC_SERVICE_TX_PACKET_TAG); + + status = htc_send_pkt((HTC_HANDLE) target, pSendPacket); + /* we don't own it anymore */ + pSendPacket = NULL; + if (QDF_IS_STATUS_ERROR(status)) + break; + + /* wait for response */ + status = htc_wait_recv_ctrl_message(target); + if (QDF_IS_STATUS_ERROR(status)) + break; + /* we controlled the buffer creation so it has to be + * properly aligned + */ + pResponseMsg = + (HTC_CONNECT_SERVICE_RESPONSE_MSG *) target-> + CtrlResponseBuffer; + + rsp_msg_id = HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + MESSAGEID); + rsp_msg_serv_id = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + SERVICEID); + rsp_msg_status = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + STATUS); + rsp_msg_end_id = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + ENDPOINTID); + rsp_msg_max_msg_size = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + MAXMSGSIZE); + rsp_msg_serv_meta_len = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + SERVICEMETALENGTH); + + if ((rsp_msg_id != HTC_MSG_CONNECT_SERVICE_RESPONSE_ID) + || (target->CtrlResponseLength < + sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG))) { + /* this message is not valid */ + AR_DEBUG_ASSERT(false); + status = QDF_STATUS_E_PROTO; + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("htc_connect_service, service 0x%X connect response from target status:%d, assigned ep: %d\n", + rsp_msg_serv_id, rsp_msg_status, + rsp_msg_end_id)); + + pConnectResp->ConnectRespCode = rsp_msg_status; + + /* check response status */ + if (rsp_msg_status != HTC_SERVICE_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Target failed service 0x%X connect request (status:%d)\n", + rsp_msg_serv_id, + rsp_msg_status)); + status = QDF_STATUS_E_PROTO; +/* TODO: restore the ifdef when FW supports services 301 and 302 + * (HTT_MSG_DATA[23]_MSG_SVC) + */ +/* #ifdef QCA_TX_HTT2_SUPPORT */ + /* Keep work and not to block the control msg */ + target->CtrlResponseProcessing = false; +/* #endif */ /* QCA_TX_HTT2_SUPPORT */ + break; + } + + assignedEndpoint = (HTC_ENDPOINT_ID) rsp_msg_end_id; + maxMsgSize = rsp_msg_max_msg_size; + + if ((pConnectResp->pMetaData) && + (rsp_msg_serv_meta_len > 0) && + (rsp_msg_serv_meta_len <= + HTC_SERVICE_META_DATA_MAX_LENGTH)) { + /* caller supplied a buffer and the target + * responded with data + */ + int copyLength = + min((int)pConnectResp->BufferLength, + (int)rsp_msg_serv_meta_len); + /* copy the meta data */ + qdf_mem_copy(pConnectResp->pMetaData, + ((uint8_t *) pResponseMsg) + + sizeof + (HTC_CONNECT_SERVICE_RESPONSE_MSG), + copyLength); + pConnectResp->ActualLength = copyLength; + } + /* done processing response buffer */ + target->CtrlResponseProcessing = false; + } + + /* rest of these are parameter checks so set the error status */ + status = QDF_STATUS_E_PROTO; + + if (assignedEndpoint >= ENDPOINT_MAX) { + AR_DEBUG_ASSERT(false); + break; + } + + if (0 == maxMsgSize) { + AR_DEBUG_ASSERT(false); + break; + } + + pEndpoint = &target->endpoint[assignedEndpoint]; + pEndpoint->Id = assignedEndpoint; + if (pEndpoint->service_id != 0) { + /* endpoint already in use! */ + AR_DEBUG_ASSERT(false); + break; + } + + /* return assigned endpoint to caller */ + pConnectResp->Endpoint = assignedEndpoint; + pConnectResp->MaxMsgLength = maxMsgSize; + + /* setup the endpoint */ + /* service_id marks the endpoint in use */ + pEndpoint->service_id = pConnectReq->service_id; + pEndpoint->MaxTxQueueDepth = pConnectReq->MaxSendQueueDepth; + pEndpoint->MaxMsgLength = maxMsgSize; + pEndpoint->TxCredits = txAlloc; + pEndpoint->TxCreditSize = target->TargetCreditSize; + pEndpoint->TxCreditsPerMaxMsg = + maxMsgSize / target->TargetCreditSize; + if (maxMsgSize % target->TargetCreditSize) + pEndpoint->TxCreditsPerMaxMsg++; +#if DEBUG_CREDIT + qdf_print(" Endpoint%d initial credit:%d, size:%d.", + pEndpoint->Id, pEndpoint->TxCredits, + pEndpoint->TxCreditSize); +#endif + + /* copy all the callbacks */ + pEndpoint->EpCallBacks = pConnectReq->EpCallbacks; + pEndpoint->async_update = 0; + + status = hif_map_service_to_pipe(target->hif_dev, + pEndpoint->service_id, + &pEndpoint->UL_PipeID, + &pEndpoint->DL_PipeID, + &pEndpoint->ul_is_polled, + &pEndpoint->dl_is_polled); + if (QDF_IS_STATUS_ERROR(status)) + break; + + htc_alt_data_credit_size_update(target, + &pEndpoint->UL_PipeID, + &pEndpoint->DL_PipeID, + &pEndpoint->TxCreditSize); + + /* not currently supported */ + qdf_assert(!pEndpoint->dl_is_polled); + + if (pEndpoint->ul_is_polled) { + qdf_timer_init(target->osdev, + &pEndpoint->ul_poll_timer, + htc_send_complete_check_cleanup, + pEndpoint, + QDF_TIMER_TYPE_SW); + } + + HTC_TRACE("SVC:0x%4.4X, ULpipe:%d DLpipe:%d id:%d Ready", + pEndpoint->service_id, pEndpoint->UL_PipeID, + pEndpoint->DL_PipeID, pEndpoint->Id); + + if (disableCreditFlowCtrl && pEndpoint->TxCreditFlowEnabled) { + pEndpoint->TxCreditFlowEnabled = false; + HTC_TRACE("SVC:0x%4.4X ep:%d TX flow control disabled", + pEndpoint->service_id, assignedEndpoint); + } + + } while (false); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_connect_service\n")); + + return status; +} +qdf_export_symbol(htc_connect_service); + +void htc_set_credit_distribution(HTC_HANDLE HTCHandle, + void *pCreditDistContext, + HTC_CREDIT_DIST_CALLBACK CreditDistFunc, + HTC_CREDIT_INIT_CALLBACK CreditInitFunc, + HTC_SERVICE_ID ServicePriorityOrder[], + int ListLength) +{ + /* NOT Supported, this transport does not use a credit based flow + * control mechanism + */ + +} + +void htc_fw_event_handler(void *context, QDF_STATUS status) +{ + HTC_TARGET *target = (HTC_TARGET *) context; + struct htc_init_info *initInfo = &target->HTCInitInfo; + + /* check if target failure handler exists and pass error code to it. */ + if (target->HTCInitInfo.TargetFailure) + initInfo->TargetFailure(initInfo->pContext, status); +} + + +void htc_set_async_ep(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID htc_ep_id, bool value) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint = &target->endpoint[htc_ep_id]; + + pEndpoint->async_update = value; + qdf_print("%s: htc_handle %pK, ep %d, value %d", __func__, + HTCHandle, htc_ep_id, value); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/inc/dispatcher_init_deinit.h b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/inc/dispatcher_init_deinit.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f23b132ada759b4a53c3860a99b0a36a6a12ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/inc/dispatcher_init_deinit.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file provides various init/deinit trigger point for new + * components. + */ + +#if !defined(__DISPATCHER_INIT_H) +#define __DISPATCHER_INIT_H + +#include +#include +#include +#include + +/* Function pointer for spectral pdev open handler */ +typedef QDF_STATUS (*spectral_pdev_open_handler)( + struct wlan_objmgr_pdev *pdev); + +/** + * dispatcher_init(): API to init all new components + * + * This API calls all new components init APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver load sequence + * 2) before probing the attached device. + * 3) FW is not ready + * 4) WMI channel is not established + * + * A component can't communicate with FW during init stage. + * + * Return: none + */ +QDF_STATUS dispatcher_init(void); + +/** + * dispatcher_deinit(): API to de-init all new components + * + * This API calls all new components de-init APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver unload sequence + * 2) FW is dead + * 3) WMI channel is destroyed + * 4) all PDEV and PSOC objects are destroyed + * + * A component can't communicate with FW during de-init stage. + * + * Return: none + */ +QDF_STATUS dispatcher_deinit(void); + +/** + * dispatcher_enable(): global (above psoc) level component start + * + * Prepare components to service requests. Must only be called after + * dispatcher_init(). + * + * Return: QDF_STATUS + */ +QDF_STATUS dispatcher_enable(void); + +/** + * dispatcher_disable(): global (above psoc) level component stop + * + * Stop components from servicing requests. Must be called before + * scheduler_deinit(). + * + * Return: QDF_STATUS + */ +QDF_STATUS dispatcher_disable(void); + +/** + * dispatcher_psoc_open(): API to trigger PSOC open for all new components + * @psoc: psoc context + * + * This API calls all new components PSOC OPEN APIs. This is invoked from + * HDD/OS_If layer during: + * 1) Driver load sequence + * 2) PSOC object is created + * 3) FW is not yet ready + * 4) WMI channel is not yet established with FW + * + * PSOC open happens before FW WMI ready and hence a component can't + * communicate with FW during PSOC open sequence. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_psoc_close(): API to trigger PSOC close for all new components + * @psoc: psoc context + * + * This API calls all new components PSOC CLOSE APIs. This is invoked from + * HDD/OS_If layer during: + * 1) Driver unload sequence + * 2) PSOC object is destroyed + * 3) FW is already dead(PDEV suspended) + * 4) WMI channel is destroyed with FW + * + * A component can't communicate with FW during PSOC close. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_psoc_enable(): API to trigger PSOC enable(start) for all new + * components + * @psoc: psoc context + * + * This API calls all new components PSOC enable(start) APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver load sequence + * 2) PSOC object is created + * 3) WMI endpoint and WMI channel is ready with FW + * 4) WMI FW ready event is also received from FW. + * + * FW is already ready and WMI channel is established by this time so a + * component can communicate with FW during PSOC enable sequence. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_psoc_disable(): API to trigger PSOC disable(stop) for all new + * components + * @psoc: psoc context + * + * This API calls all new components PSOC disable(stop) APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver unload sequence + * 2) WMI channel is still available + * 3) FW is still running and up + * 4) PSOC object is not destroyed + * + * A component should abort all its ongign transaction with FW at this stage + * for example scan component needs to abort all its ongoing scan in FW because + * is goign to be stopped very soon. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_pdev_open(): API to trigger PDEV open for all new components + * @pdev: pdev context + * + * This API calls all new components PDEV OPEN APIs. This is invoked from + * during PDEV object is created. + * + * Return: none + */ +QDF_STATUS dispatcher_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * dispatcher_pdev_close(): API to trigger PDEV close for all new components + * @pdev: pdev context + * + * This API calls all new components PDEV CLOSE APIs. This is invoked from + * during driver unload sequence. + * + * Return: none + */ +QDF_STATUS dispatcher_pdev_close(struct wlan_objmgr_pdev *pdev); + +/** + * dispatcher_register_spectral_pdev_open_handler(): + * API to register spectral pdev open handler + * @handler: pdev open handler + * + * This API registers spectral pdev open handler. + * + * Return: none + */ +QDF_STATUS dispatcher_register_spectral_pdev_open_handler(QDF_STATUS (*handler) + (struct wlan_objmgr_pdev *pdev)); + +#endif /* End of !defined(__DISPATCHER_INIT_H) */ diff --git a/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/src/dispatcher_init_deinit.c b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/src/dispatcher_init_deinit.c new file mode 100644 index 0000000000000000000000000000000000000000..b06da00f509a49930e2aef3a572f357faf4a1f12 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/src/dispatcher_init_deinit.c @@ -0,0 +1,1202 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "cfg_dispatcher.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef WLAN_ATF_ENABLE +#include +#endif +#ifdef QCA_SUPPORT_SON +#include +#endif +#ifdef WLAN_SA_API_ENABLE +#include +#endif +#ifdef WIFI_POS_CONVERGED +#include "wifi_pos_api.h" +#endif /* WIFI_POS_CONVERGED */ +#include +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +#include "wlan_crypto_main.h" +#endif +#ifdef DFS_COMPONENT_ENABLE +#include +#endif + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +#include +#endif + +#ifdef WLAN_SUPPORT_SPLITMAC +#include +#endif +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include +#endif +#ifdef WLAN_SUPPORT_FILS +#include +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +#include +#endif + +#ifdef QCA_SUPPORT_CP_STATS +#include +#endif + +#ifdef WLAN_CFR_ENABLE +#include +#endif + +#ifdef FEATURE_COEX +#include +#endif + +#include + +/** + * DOC: This file provides various init/deinit trigger point for new + * components. + */ + +/* All new components needs to replace their dummy init/deinit + * psoc_open, psco_close, psoc_enable and psoc_disable APIs once + * their actual handlers are ready + */ + +spectral_pdev_open_handler dispatcher_spectral_pdev_open_handler_cb; + +#ifdef WLAN_CFR_ENABLE +static QDF_STATUS dispatcher_init_cfr(void) +{ + return wlan_cfr_init(); +} + +static QDF_STATUS dispatcher_deinit_cfr(void) +{ + return wlan_cfr_deinit(); +} + +static QDF_STATUS dispatcher_cfr_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + return wlan_cfr_pdev_open(pdev); +} + +static QDF_STATUS dispatcher_cfr_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return wlan_cfr_pdev_close(pdev); +} +#else +static QDF_STATUS dispatcher_init_cfr(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_cfr(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_cfr_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_cfr_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +#endif + +#ifdef QCA_SUPPORT_CP_STATS +static QDF_STATUS dispatcher_init_cp_stats(void) +{ + return wlan_cp_stats_init(); +} + +static QDF_STATUS dispatcher_deinit_cp_stats(void) +{ + return wlan_cp_stats_deinit(); +} + +static QDF_STATUS cp_stats_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_open(psoc); +} + +static QDF_STATUS cp_stats_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_close(psoc); +} + +static QDF_STATUS cp_stats_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_enable(psoc); +} + +static QDF_STATUS cp_stats_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_cp_stats(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_cp_stats(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined QCA_SUPPORT_SON && QCA_SUPPORT_SON >= 1 +static QDF_STATUS dispatcher_init_son(void) +{ + return wlan_son_init(); +} +static QDF_STATUS son_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_son_psoc_open(psoc); +} +static QDF_STATUS dispatcher_deinit_son(void) +{ + return wlan_son_deinit(); +} + +static QDF_STATUS son_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_son_psoc_close(psoc); +} +#else +static QDF_STATUS dispatcher_init_son(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_son(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS son_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS son_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +#endif /* END of QCA_SUPPORT_SON */ + +static QDF_STATUS dispatcher_regulatory_init(void) +{ + return wlan_regulatory_init(); +} + +static QDF_STATUS dispatcher_regulatory_deinit(void) +{ + return wlan_regulatory_deinit(); +} + +static QDF_STATUS dispatcher_regulatory_psoc_open(struct wlan_objmgr_psoc + *psoc) +{ + return regulatory_psoc_open(psoc); +} + +static QDF_STATUS dispatcher_regulatory_psoc_close(struct wlan_objmgr_psoc + *psoc) +{ + return regulatory_psoc_close(psoc); +} + +#if defined(WLAN_CONV_SPECTRAL_ENABLE) && defined(SPECTRAL_MODULIZED_ENABLE) +QDF_STATUS dispatcher_register_spectral_pdev_open_handler( + spectral_pdev_open_handler handler) +{ + dispatcher_spectral_pdev_open_handler_cb = handler; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_register_spectral_pdev_open_handler); + +static QDF_STATUS dispatcher_spectral_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return dispatcher_spectral_pdev_open_handler_cb(pdev); +} + +static QDF_STATUS dispatcher_spectral_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dispatcher_spectral_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_spectral_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS dispatcher_regulatory_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return regulatory_pdev_open(pdev); +} + +static QDF_STATUS dispatcher_regulatory_pdev_close(struct wlan_objmgr_pdev + *pdev) +{ + return regulatory_pdev_close(pdev); +} + +#ifdef WLAN_SA_API_ENABLE +static QDF_STATUS dispatcher_init_sa_api(void) +{ + return wlan_sa_api_init(); +} + +static QDF_STATUS dispatcher_deinit_sa_api(void) +{ + return wlan_sa_api_deinit(); +} + +static QDF_STATUS sa_api_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_sa_api_enable(psoc); +} + +static QDF_STATUS sa_api_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_sa_api_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_sa_api(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_sa_api(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS sa_api_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS sa_api_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_SA_API_ENABLE */ + + +#ifdef WLAN_ATF_ENABLE +static QDF_STATUS dispatcher_init_atf(void) +{ + return wlan_atf_init(); +} + +static QDF_STATUS dispatcher_deinit_atf(void) +{ + return wlan_atf_deinit(); +} + +static QDF_STATUS atf_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_open(psoc); +} + +static QDF_STATUS atf_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_close(psoc); +} + +static QDF_STATUS atf_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_enable(psoc); +} + +static QDF_STATUS atf_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_atf(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_atf(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_ATF_ENABLE */ + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +static QDF_STATUS dispatcher_init_crypto(void) +{ + return wlan_crypto_init(); +} + +static QDF_STATUS dispatcher_deinit_crypto(void) +{ + return wlan_crypto_deinit(); +} + +static QDF_STATUS dispatcher_crypto_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_crypto_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_crypto_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_crypto_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_crypto(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_crypto(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_crypto_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_crypto_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_CONV_CRYPTO_SUPPORTED */ + +#ifdef WIFI_POS_CONVERGED +static QDF_STATUS dispatcher_init_wifi_pos(void) +{ + return wifi_pos_init(); +} + +static QDF_STATUS dispatcher_deinit_wifi_pos(void) +{ + return wifi_pos_deinit(); +} + +static QDF_STATUS dispatcher_wifi_pos_enable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_pos_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_wifi_pos_disable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_pos_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_wifi_pos(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_wifi_pos(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_wifi_pos_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_wifi_pos_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef DFS_COMPONENT_ENABLE +static QDF_STATUS dispatcher_init_dfs(void) +{ + return dfs_init(); +} + +static QDF_STATUS dispatcher_deinit_dfs(void) +{ + return dfs_deinit(); +} + +static QDF_STATUS dispatcher_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_dfs_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_dfs_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_dfs(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_dfs(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +static QDF_STATUS dispatcher_offchan_txrx_init(void) +{ + return wlan_offchan_txrx_init(); +} + +static QDF_STATUS dispatcher_offchan_txrx_deinit(void) +{ + return wlan_offchan_txrx_deinit(); +} +#else +static QDF_STATUS dispatcher_offchan_txrx_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_offchan_txrx_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /*WLAN_OFFCHAN_TXRX_ENABLE*/ + +#ifdef WLAN_SUPPORT_SPLITMAC +static QDF_STATUS dispatcher_splitmac_init(void) +{ + return wlan_splitmac_init(); +} + +static QDF_STATUS dispatcher_splitmac_deinit(void) +{ + return wlan_splitmac_deinit(); +} +#else +static QDF_STATUS dispatcher_splitmac_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_splitmac_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_SPLITMAC */ + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#ifndef SPECTRAL_MODULIZED_ENABLE +static QDF_STATUS dispatcher_spectral_init(void) +{ + return wlan_spectral_init(); +} + +static QDF_STATUS dispatcher_spectral_deinit(void) +{ + return wlan_spectral_deinit(); +} +#else +static QDF_STATUS dispatcher_spectral_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_spectral_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#else +static QDF_STATUS dispatcher_spectral_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_spectral_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef DIRECT_BUF_RX_ENABLE +static QDF_STATUS dispatcher_dbr_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tx_ops *tx_ops; + + tx_ops = wlan_psoc_get_lmac_if_txops(psoc); + if (tx_ops->dbr_tx_ops.direct_buf_rx_register_events) + return tx_ops->dbr_tx_ops.direct_buf_rx_register_events(psoc); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dbr_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tx_ops *tx_ops; + + tx_ops = wlan_psoc_get_lmac_if_txops(psoc); + if (tx_ops->dbr_tx_ops.direct_buf_rx_unregister_events) + return tx_ops->dbr_tx_ops.direct_buf_rx_unregister_events(psoc); + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dispatcher_dbr_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dbr_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS dispatcher_green_ap_init(void) +{ + return wlan_green_ap_init(); +} + +static QDF_STATUS dispatcher_green_ap_pdev_open( + struct wlan_objmgr_pdev *pdev) +{ + return wlan_green_ap_pdev_open(pdev); +} + +/* Only added this for symmetry */ +static QDF_STATUS dispatcher_green_ap_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_green_ap_deinit(void) +{ + return wlan_green_ap_deinit(); +} +#else +static QDF_STATUS dispatcher_green_ap_init(void) +{ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS dispatcher_green_ap_pdev_open( + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +/* Only added this for symmetry */ +static QDF_STATUS dispatcher_green_ap_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_green_ap_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_SUPPORT_FILS +static QDF_STATUS dispatcher_fd_init(void) +{ + return wlan_fd_init(); +} + +static QDF_STATUS dispatcher_fd_deinit(void) +{ + return wlan_fd_deinit(); +} + +static QDF_STATUS fd_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_fd_enable(psoc); +} + +static QDF_STATUS fd_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_fd_disable(psoc); +} +#else +static QDF_STATUS dispatcher_fd_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_fd_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS fd_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS fd_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_FILS */ + +#ifdef FEATURE_COEX +static QDF_STATUS dispatcher_coex_init(void) +{ + return wlan_coex_init(); +} + +static QDF_STATUS dispatcher_coex_deinit(void) +{ + return wlan_coex_deinit(); +} + +static QDF_STATUS dispatcher_coex_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_coex_psoc_open(psoc); +} + +static QDF_STATUS dispatcher_coex_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_coex_psoc_close(psoc); +} +#else +static inline QDF_STATUS dispatcher_coex_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS dispatcher_coex_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +dispatcher_coex_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +dispatcher_coex_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_COEX */ + +QDF_STATUS dispatcher_init(void) +{ + if (QDF_STATUS_SUCCESS != wlan_objmgr_global_obj_init()) + goto out; + + if (QDF_STATUS_SUCCESS != wlan_mgmt_txrx_init()) + goto mgmt_txrx_init_fail; + + if (QDF_STATUS_SUCCESS != ucfg_scan_init()) + goto ucfg_scan_init_fail; + + if (QDF_STATUS_SUCCESS != wlan_serialization_init()) + goto serialization_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_crypto()) + goto crypto_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_cp_stats()) + goto cp_stats_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_atf()) + goto atf_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_sa_api()) + goto sa_api_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_wifi_pos()) + goto wifi_pos_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_dfs()) + goto dfs_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_regulatory_init()) + goto regulatory_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_offchan_txrx_init()) + goto offchan_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_son()) + goto son_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_splitmac_init()) + goto splitmac_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_fd_init()) + goto fd_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_green_ap_init()) + goto green_ap_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_ftm_init()) + goto ftm_init_fail; + + if (QDF_IS_STATUS_ERROR(cfg_dispatcher_init())) + goto cfg_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_spectral_init()) + goto spectral_init_fail; + + if (QDF_STATUS_SUCCESS != wlan_cmn_mlme_init()) + goto cmn_mlme_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_cfr()) + goto cfr_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_coex_init()) + goto coex_init_fail; + + if (QDF_STATUS_SUCCESS != wlan_gpio_init()) + goto gpio_init_fail; + + /* + * scheduler INIT has to be the last as each component's + * initialization has to happen first and then at the end + * scheduler needs to start accepting the service. + */ + if (QDF_STATUS_SUCCESS != scheduler_init()) + goto scheduler_init_fail; + + return QDF_STATUS_SUCCESS; + +scheduler_init_fail: + wlan_gpio_deinit(); +gpio_init_fail: + dispatcher_coex_deinit(); +coex_init_fail: + dispatcher_deinit_cfr(); +cfr_init_fail: + wlan_cmn_mlme_deinit(); +cmn_mlme_init_fail: + dispatcher_spectral_deinit(); +spectral_init_fail: + cfg_dispatcher_deinit(); +cfg_init_fail: + dispatcher_ftm_deinit(); +ftm_init_fail: + dispatcher_green_ap_deinit(); +green_ap_init_fail: + dispatcher_fd_deinit(); +fd_init_fail: + dispatcher_splitmac_deinit(); +splitmac_init_fail: + dispatcher_deinit_son(); +son_init_fail: + dispatcher_offchan_txrx_deinit(); +offchan_init_fail: + dispatcher_regulatory_deinit(); +regulatory_init_fail: + dispatcher_deinit_dfs(); +dfs_init_fail: + dispatcher_deinit_wifi_pos(); +wifi_pos_init_fail: + dispatcher_deinit_sa_api(); +sa_api_init_fail: + dispatcher_deinit_atf(); +atf_init_fail: + dispatcher_deinit_cp_stats(); +cp_stats_init_fail: + dispatcher_deinit_crypto(); +crypto_init_fail: + wlan_serialization_deinit(); +serialization_init_fail: + ucfg_scan_deinit(); +ucfg_scan_init_fail: + wlan_mgmt_txrx_deinit(); +mgmt_txrx_init_fail: + wlan_objmgr_global_obj_deinit(); + +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_init); + +QDF_STATUS dispatcher_deinit(void) +{ + QDF_STATUS status; + + QDF_BUG(QDF_STATUS_SUCCESS == scheduler_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_gpio_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_coex_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_cfr()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_cmn_mlme_deinit()); + + status = cfg_dispatcher_deinit(); + QDF_BUG(QDF_IS_STATUS_SUCCESS(status)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_ftm_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_green_ap_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_fd_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_spectral_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_splitmac_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_son()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_offchan_txrx_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_regulatory_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_dfs()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_wifi_pos()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_sa_api()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_atf()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_cp_stats()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_crypto()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_serialization_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == ucfg_scan_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mgmt_txrx_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_objmgr_global_obj_deinit()); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_deinit); + +QDF_STATUS dispatcher_enable(void) +{ + QDF_STATUS status; + + status = scheduler_enable(); + + return status; +} +qdf_export_symbol(dispatcher_enable); + +QDF_STATUS dispatcher_disable(void) +{ + QDF_BUG(QDF_IS_STATUS_SUCCESS(scheduler_disable())); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_disable); + +QDF_STATUS dispatcher_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + if (QDF_STATUS_SUCCESS != wlan_mgmt_txrx_psoc_open(psoc)) + goto out; + + if (QDF_STATUS_SUCCESS != ucfg_scan_psoc_open(psoc)) + goto scan_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != cp_stats_psoc_open(psoc)) + goto cp_stats_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != atf_psoc_open(psoc)) + goto atf_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_regulatory_psoc_open(psoc)) + goto regulatory_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != son_psoc_open(psoc)) + goto psoc_son_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_ftm_psoc_open(psoc)) + goto ftm_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_coex_psoc_open(psoc)) + goto coex_psoc_open_fail; + + return QDF_STATUS_SUCCESS; + +coex_psoc_open_fail: + dispatcher_ftm_psoc_close(psoc); +ftm_psoc_open_fail: + son_psoc_close(psoc); +psoc_son_fail: + regulatory_psoc_close(psoc); +regulatory_psoc_open_fail: + atf_psoc_close(psoc); +atf_psoc_open_fail: + cp_stats_psoc_close(psoc); +cp_stats_psoc_open_fail: + ucfg_scan_psoc_close(psoc); +scan_psoc_open_fail: + wlan_mgmt_txrx_psoc_close(psoc); + +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_psoc_open); + +QDF_STATUS dispatcher_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_coex_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_ftm_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == son_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_regulatory_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == atf_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == cp_stats_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == ucfg_scan_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mgmt_txrx_psoc_close(psoc)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_psoc_close); + +QDF_STATUS dispatcher_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + if (QDF_STATUS_SUCCESS != wlan_serialization_psoc_enable(psoc)) + goto out; + + if (QDF_STATUS_SUCCESS != ucfg_scan_psoc_enable(psoc)) + goto serialization_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != sa_api_psoc_enable(psoc)) + goto sa_api_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != cp_stats_psoc_enable(psoc)) + goto cp_stats_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != atf_psoc_enable(psoc)) + goto atf_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_wifi_pos_enable(psoc)) + goto wifi_pos_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_dfs_psoc_enable(psoc)) + goto wifi_dfs_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != fd_psoc_enable(psoc)) + goto fd_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_dbr_psoc_enable(psoc)) + goto dbr_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_crypto_psoc_enable(psoc)) + goto crypto_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != wlan_mlme_psoc_enable(psoc)) + goto mlme_psoc_enable_fail; + + return QDF_STATUS_SUCCESS; + +mlme_psoc_enable_fail: + dispatcher_crypto_psoc_disable(psoc); +crypto_psoc_enable_fail: + dispatcher_dbr_psoc_disable(psoc); +dbr_psoc_enable_fail: + fd_psoc_disable(psoc); +fd_psoc_enable_fail: + dispatcher_dfs_psoc_disable(psoc); +wifi_dfs_psoc_enable_fail: + dispatcher_wifi_pos_disable(psoc); +wifi_pos_psoc_enable_fail: + atf_psoc_disable(psoc); +atf_psoc_enable_fail: + cp_stats_psoc_disable(psoc); +cp_stats_psoc_enable_fail: + sa_api_psoc_disable(psoc); +sa_api_psoc_enable_fail: + ucfg_scan_psoc_disable(psoc); +serialization_psoc_enable_fail: + wlan_serialization_psoc_disable(psoc); +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_psoc_enable); + +QDF_STATUS dispatcher_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mlme_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_crypto_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_dbr_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == fd_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_dfs_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_wifi_pos_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == atf_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == cp_stats_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == sa_api_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == ucfg_scan_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_serialization_psoc_disable(psoc)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_psoc_disable); + +QDF_STATUS dispatcher_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + QDF_STATUS status; + + if (QDF_STATUS_SUCCESS != dispatcher_regulatory_pdev_open(pdev)) + goto regulatory_pdev_open_fail; + + status = dispatcher_spectral_pdev_open(pdev); + if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_COMP_DISABLED) + goto spectral_pdev_open_fail; + + status = dispatcher_cfr_pdev_open(pdev); + if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_COMP_DISABLED) + goto cfr_pdev_open_fail; + + if (QDF_STATUS_SUCCESS != wlan_mgmt_txrx_pdev_open(pdev)) + goto mgmt_txrx_pdev_open_fail; + if (QDF_IS_STATUS_ERROR(dispatcher_green_ap_pdev_open(pdev))) + goto green_ap_pdev_open_fail; + + return QDF_STATUS_SUCCESS; + +green_ap_pdev_open_fail: + wlan_mgmt_txrx_pdev_close(pdev); +mgmt_txrx_pdev_open_fail: + dispatcher_cfr_pdev_close(pdev); +cfr_pdev_open_fail: + dispatcher_spectral_pdev_close(pdev); +spectral_pdev_open_fail: + dispatcher_regulatory_pdev_close(pdev); +regulatory_pdev_open_fail: + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_pdev_open); + +QDF_STATUS dispatcher_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + QDF_STATUS status; + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_green_ap_pdev_close(pdev)); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mgmt_txrx_pdev_close(pdev)); + + status = dispatcher_cfr_pdev_close(pdev); + QDF_BUG((QDF_STATUS_SUCCESS == status) || + (QDF_STATUS_COMP_DISABLED == status)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_spectral_pdev_close(pdev)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_regulatory_pdev_close(pdev)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_pdev_close); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/coex/inc/wlan_cfg80211_coex.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/coex/inc/wlan_cfg80211_coex.h new file mode 100644 index 0000000000000000000000000000000000000000..cd3e6bd43240249608e994fdd3dda8edcc310701 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/coex/inc/wlan_cfg80211_coex.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_COEX_H_ +#define _WLAN_CFG80211_COEX_H_ +#include +#include + +#ifdef FEATURE_COEX +int wlan_cfg80211_coex_set_btc_chain_mode(struct wlan_objmgr_vdev *vdev, + const void *data, int data_len); +#else +static inline int +wlan_cfg80211_coex_set_btc_chain_mode(struct wlan_objmgr_vdev *vdev, + const void *data, int data_len) +{ + return -ENOTSUPP; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/coex/src/wlan_cfg80211_coex.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/coex/src/wlan_cfg80211_coex.c new file mode 100644 index 0000000000000000000000000000000000000000..754b16796e48947850d595d64e169ab0042b783e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/coex/src/wlan_cfg80211_coex.c @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ +#include +#include +#include +#include +#include +#include +#include + +static const struct nla_policy +btc_chain_mode_policy[QCA_VENDOR_ATTR_BTC_CHAIN_MODE_MAX + 1] = { + [QCA_VENDOR_ATTR_BTC_CHAIN_MODE] = {.type = NLA_U32}, + [QCA_VENDOR_ATTR_BTC_CHAIN_MODE_RESTART] = {.type = NLA_FLAG}, +}; + +static int +__wlan_cfg80211_coex_set_btc_chain_mode(struct wlan_objmgr_vdev *vdev, + uint8_t mode, bool do_restart) +{ + QDF_STATUS status; + uint8_t cur_mode; + int err; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev_tmp; + int vdev_id; + struct coex_psoc_obj *coex_obj; + + if (!vdev) { + coex_err("Null vdev"); + return -EINVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + coex_err("NULL psoc"); + return -EINVAL; + } + + coex_obj = wlan_psoc_get_coex_obj(psoc); + if (!coex_obj) + return -EINVAL; + + status = ucfg_coex_psoc_get_btc_chain_mode(psoc, &cur_mode); + if (QDF_IS_STATUS_ERROR(status)) { + coex_err("failed to get cur BTC chain mode, status %d", status); + return -EFAULT; + } + + if (cur_mode == mode) + return -EALREADY; + + status = ucfg_coex_psoc_set_btc_chain_mode(psoc, mode); + if (!QDF_IS_STATUS_SUCCESS(status)) { + coex_err("unable to set BTC chain mode to %d", mode); + return -EFAULT; + } + + wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev_tmp) { + status = ucfg_coex_send_btc_chain_mode(vdev, mode); + err = qdf_status_to_os_return(status); + if (err) { + coex_err("Failed to set btc chain mode to %d for vdev %d", + mode, vdev_id); + return err; + } + coex_debug("Set btc chain mode to %d for vdev %d", + mode, vdev_id); + + if (!do_restart) + continue; + + wlan_coex_config_updated(vdev, COEX_CONFIG_BTC_CHAIN_MODE); + } + + return 0; +} + +/** + * wlan_hdd_cfg80211_set_btc_chain_mode() - set btc chain mode + * @wiphy: pointer to wireless wiphy structure. + * @wdev: pointer to wireless_dev structure. + * @data: pointer to btc chain mode command parameters. + * @data_len: the length in byte of btc chain mode command parameters. + * + * Return: An error code or 0 on success. + */ +int wlan_cfg80211_coex_set_btc_chain_mode(struct wlan_objmgr_vdev *vdev, + const void *data, int data_len) +{ + struct nlattr *tb[QCA_VENDOR_ATTR_BTC_CHAIN_MODE_MAX + 1]; + uint32_t mode; + bool restart; + + if (wlan_cfg80211_nla_parse(tb, QCA_VENDOR_ATTR_BTC_CHAIN_MODE_MAX, + data, data_len, btc_chain_mode_policy)) { + coex_err("Invalid btc chain mode ATTR"); + return -EINVAL; + } + + if (!tb[QCA_VENDOR_ATTR_BTC_CHAIN_MODE]) { + coex_err("btc chain mode - no attr mode"); + return -EINVAL; + } + + mode = nla_get_u32(tb[QCA_VENDOR_ATTR_BTC_CHAIN_MODE]); + if (mode < QCA_BTC_CHAIN_SHARED || mode > QCA_BTC_CHAIN_SEPARATED) { + coex_err("Invalid btc chain mode %d", mode); + return -EINVAL; + } + + restart = nla_get_flag(tb[QCA_VENDOR_ATTR_BTC_CHAIN_MODE_RESTART]); + + coex_debug("vdev_id %u mode %u restart %u", + wlan_vdev_get_id(vdev), mode, restart); + + return __wlan_cfg80211_coex_set_btc_chain_mode(vdev, mode, restart); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_ic_cp_stats.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_ic_cp_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..66ab78badd06806048dc5d4fbca8888e05286508 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_ic_cp_stats.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_ic_cp_stats.h + * + * This Header file provide declaration for cfg80211 command handler API + * registered cp stats and specific with ic + */ + +#ifndef __WLAN_CFG80211_IC_CP_STATS_H__ +#define __WLAN_CFG80211_IC_CP_STATS_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#ifdef WLAN_ATF_ENABLE +#include +#endif +#include + +/** + * wlan_cfg80211_get_peer_cp_stats() - API to get peer stats object + * @peer_obj: peer object as input + * @peer_cp_stats: peer stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct peer_ic_cp_stats *peer_cp_stats); + +/** + * wlan_cfg80211_get_vdev_cp_stats() - API to get vdev stats object + * @vdev_obj: vdev object as input + * @vdev_cp_stats: vdev stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_vdev_cp_stats(struct wlan_objmgr_vdev *vdev_obj, + struct vdev_ic_cp_stats *vdev_cp_stats); + +/** + * wlan_cfg80211_get_pdev_cp_stats() - API to get pdev cp stats object + * @pdev_obj: pdev object as input + * @pdev_cp_stats: pdev cp stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_pdev_cp_stats(struct wlan_objmgr_pdev *pdev_obj, + struct pdev_ic_cp_stats *pdev_cp_stats); + +#ifdef WLAN_ATF_ENABLE +/** + * wlan_cfg80211_get_peer_atf_cp_stats() - API to get ATF peer stats object + * @peer_obj: peer object as input + * @atf_cp_stats: atf peer cp stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int +wlan_cfg80211_get_atf_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct atf_peer_cp_stats *atf_cp_stats); + +/** + * wlan_cfg80211_get_peer_atf_cp_stats_from_mac() - API to get ATF peer + * stats object from peer mac address + * @vdev_obj: vdev object as input + * @mac: peer mac address as input + * @atf_cp_stats: atf peer cp stats object to populate + * + * API used from ucfg layer to get ATF peer cp stats object when only peer + * mac address is available + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_atf_peer_cp_stats_from_mac( + struct wlan_objmgr_vdev *vdev_obj, + uint8_t *mac, + struct atf_peer_cp_stats *atf_cp_stats); +#endif /* WLAN_ATF_ENABLE */ + +/** + * wlan_cfg80211_get_dcs_pdev_cp_stats() - API to get DCS chan stats + * @pdev_obj: pdev object as input + * @dcs_chan_stats: DCS pdev stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_dcs_pdev_cp_stats( + struct wlan_objmgr_pdev *pdev_obj, + struct pdev_dcs_chan_stats *dcs_chan_stats); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CFG80211_IC_CP_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_mc_cp_stats.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_mc_cp_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..9b44112c80e4a76799c1e3762da25420f2519508 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_mc_cp_stats.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_mc_cp_stats.h + * + * This Header file provide declaration for cfg80211 command handler API + * registered cp stats and specific with ic + */ + +#ifndef __WLAN_CFG80211_MC_CP_STATS_H__ +#define __WLAN_CFG80211_MC_CP_STATS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +/* forward declaration */ +struct wiphy; +struct wlan_objmgr_psoc; + +/** + * wlan_cfg80211_mc_cp_stats_get_wakelock_stats() - API to request wake lock + * stats. Stats are returned to user space via vender event + * @psoc: Pointer to psoc + * @wiphy: wiphy pointer + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_mc_cp_stats_get_wakelock_stats(struct wlan_objmgr_psoc *psoc, + struct wiphy *wiphy); + +/** + * wlan_cfg80211_mc_cp_stats_get_tx_power() - API to fetch tx power + * @vdev: Pointer to vdev + * @dbm: Pointer to TX power in dbm + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm); +#ifdef WLAN_FEATURE_MIB_STATS +/** + * wlan_cfg80211_mc_cp_stats_get_mib_stats() - API to get mib stats + * statistics from firmware + * @vdev: Pointer to vdev + * @errno: error type in case of failure + * + * Callers of this API must call wlan_cfg80211_mc_cp_stats_free_stats_event + * API. + * Return: stats buffer on success, Null on failure + */ +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_mib_stats(struct wlan_objmgr_vdev *vdev, + int *errno); +#endif + +/** + * wlan_cfg80211_mc_cp_stats_get_station_stats() - API to get station + * statistics to firmware + * @vdev: Pointer to vdev + * @errno: error type in case of failure + * + * Call of this API must call wlan_cfg80211_mc_cp_stats_free_stats_event + * API when done with information provided by info. + * Return: stats buffer on success, Null on failure + */ +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_station_stats(struct wlan_objmgr_vdev *vdev, + int *errno); + +/** + * wlan_cfg80211_mc_cp_stats_free_stats_event() - API to release station + * statistics buffer + * @vdev: Pointer to vdev + * @info: pointer to object to populate with station stats + * + * Return: None + */ +void wlan_cfg80211_mc_cp_stats_free_stats_event(struct stats_event *info); + +/** + * wlan_cfg80211_mc_cp_stats_get_peer_rssi() - API to fetch peer rssi + * @vdev: Pointer to vdev + * @macaddress: mac address + * @errno: error type in case of failure + * + * Call of this API must call wlan_cfg80211_mc_cp_stats_free_stats_event + * API when done with information provided by rssi_info. + * Return: stats buffer on success, Null on failure + */ +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_peer_rssi(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddress, int *errno); +#else +static inline int wlan_cfg80211_mc_cp_stats_get_tx_power( + struct wlan_objmgr_vdev *vdev, + int *dbm) +{ + return 0; +} + +static inline int wlan_cfg80211_mc_cp_stats_get_wakelock_stats( + struct wlan_objmgr_psoc *psoc, + struct wiphy *wiphy) +{ + return 0; +} + +static inline struct stats_event * +wlan_cfg80211_mc_cp_stats_get_peer_rssi(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddress, int *errno) +{ + return NULL; +} + +static inline void wlan_cfg80211_mc_cp_stats_free_stats_event( + struct stats_event *info) +{} + +static inline struct stats_event * +wlan_cfg80211_mc_cp_stats_get_station_stats(struct wlan_objmgr_vdev *vdev, + int *errno) +{ + return NULL; +} +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CFG80211_MC_CP_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_ic_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_ic_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..df38bb1ad8f7b90b1cb689e6ab482a2acee4e704 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_ic_cp_stats.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_ic_cp_stats.c + * + * This file provide definitions to os_if cp_stats APIs + */ +#include +#include +#include +#include + +int wlan_cfg80211_get_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct peer_ic_cp_stats *peer_cp_stats) +{ + QDF_STATUS status; + + if (!peer_obj) { + osif_err("Invalid input, peer obj NULL"); + return -EINVAL; + } + + if (!peer_cp_stats) { + osif_err("Invalid input, peer cp obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_peer_cp_stats(peer_obj, peer_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_cfg80211_get_peer_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_get_vdev_cp_stats(struct wlan_objmgr_vdev *vdev_obj, + struct vdev_ic_cp_stats *vdev_cp_stats) +{ + QDF_STATUS status; + + if (!vdev_obj) { + osif_err("Invalid input, vdev obj is NULL"); + return -EINVAL; + } + + if (!vdev_cp_stats) { + osif_err("Invalid input, vdev cp obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_vdev_cp_stats(vdev_obj, vdev_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_cfg80211_get_vdev_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_get_pdev_cp_stats(struct wlan_objmgr_pdev *pdev_obj, + struct pdev_ic_cp_stats *pdev_cp_stats) +{ + QDF_STATUS status; + + if (!pdev_obj) { + osif_err("Invalid input, pdev obj is NULL"); + return -EINVAL; + } + + if (!pdev_cp_stats) { + osif_err("Invalid input, pdev cp obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_pdev_cp_stats(pdev_obj, pdev_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_cfg80211_get_pdev_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +qdf_export_symbol(wlan_cfg80211_get_pdev_cp_stats); + +#ifdef WLAN_ATF_ENABLE +int +wlan_cfg80211_get_atf_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct atf_peer_cp_stats *atf_cp_stats) +{ + QDF_STATUS status; + + if (!peer_obj) { + osif_err("Invalid input, peer obj is NULL"); + return -EINVAL; + } + + if (!atf_cp_stats) { + osif_err("Invalid input, ATF peer cp obj is NULL!"); + return -EINVAL; + } + + status = wlan_ucfg_get_atf_peer_cp_stats(peer_obj, atf_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_cfg80211_get_atf_peer_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_get_atf_peer_cp_stats_from_mac( + struct wlan_objmgr_vdev *vdev_obj, + uint8_t *mac, + struct atf_peer_cp_stats *atf_cp_stats) +{ + QDF_STATUS status; + + if (!vdev_obj) { + osif_err("Invalid input, vdev obj is NULL"); + return -EINVAL; + } + + if (!mac) { + osif_err("Invalid input, peer mac is NULL"); + return -EINVAL; + } + + if (!atf_cp_stats) { + osif_err("Invalid input, ATF peer cp stats obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_atf_peer_cp_stats_from_mac(vdev_obj, mac, + atf_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_cfg80211_get_cp_stats_from_mac status: %d", + status); + } + + return qdf_status_to_os_return(status); +} +#endif + +int +wlan_cfg80211_get_dcs_pdev_cp_stats(struct wlan_objmgr_pdev *pdev_obj, + struct pdev_dcs_chan_stats *dcs_chan_stats) +{ + QDF_STATUS status; + + if (!pdev_obj) { + osif_err("Invalid input, pdev obj is NULL"); + return -EINVAL; + } + + if (!dcs_chan_stats) { + osif_err("Invalid input, dcs chan stats is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_dcs_chan_stats(pdev_obj, dcs_chan_stats); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_cfg80211_get_dcs_pdev_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_mc_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_mc_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..81eb1efd6aa97bcef47ebd0b5621f4e3824e348c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_mc_cp_stats.c @@ -0,0 +1,734 @@ +/* + * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_mc_cp_stats.c + * + * This file provide definitions to cp stats supported cfg80211 cmd handlers + */ + +#include +#include +#include +#include +#include "wlan_osif_request_manager.h" +#include "wlan_objmgr_peer_obj.h" + +/* max time in ms, caller may wait for stats request get serviced */ +#define CP_STATS_WAIT_TIME_STAT 800 + +#ifdef WLAN_FEATURE_MIB_STATS +/** + * wlan_free_mib_stats() - free allocations for mib stats + * @stats: Pointer to stats event statucture + * + * Return: None + */ +static void wlan_free_mib_stats(struct stats_event *stats) +{ + qdf_mem_free(stats->mib_stats); + stats->mib_stats = NULL; +} +#else +static void wlan_free_mib_stats(struct stats_event *stats) +{ +} +#endif + +/** + * wlan_cfg80211_mc_cp_stats_dealloc() - callback to free priv + * allocations for stats + * @priv: Pointer to priv data statucture + * + * Return: None + */ +static void wlan_cfg80211_mc_cp_stats_dealloc(void *priv) +{ + struct stats_event *stats = priv; + + if (!stats) { + osif_err("stats is NULL"); + return; + } + + qdf_mem_free(stats->pdev_stats); + qdf_mem_free(stats->peer_stats); + qdf_mem_free(stats->cca_stats); + qdf_mem_free(stats->vdev_summary_stats); + qdf_mem_free(stats->vdev_chain_rssi); + qdf_mem_free(stats->peer_adv_stats); + wlan_free_mib_stats(stats); +} + +/** + * wlan_cfg80211_mc_cp_stats_send_wake_lock_stats() - API to send wakelock stats + * @wiphy: wiphy pointer + * @stats: stats data to be sent + * + * Return: 0 on success, error number otherwise. + */ +static int wlan_cfg80211_mc_cp_stats_send_wake_lock_stats(struct wiphy *wiphy, + struct wake_lock_stats *stats) +{ + struct sk_buff *skb; + uint32_t nl_buf_len; + uint32_t icmpv6_cnt; + uint32_t ipv6_rx_multicast_addr_cnt; + uint32_t total_rx_data_wake, rx_multicast_cnt; + + nl_buf_len = NLMSG_HDRLEN; + nl_buf_len += QCA_WLAN_VENDOR_GET_WAKE_STATS_MAX * + (NLMSG_HDRLEN + sizeof(uint32_t)); + + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, nl_buf_len); + + if (!skb) { + osif_err("cfg80211_vendor_cmd_alloc_reply_skb failed"); + return -ENOMEM; + } + + osif_debug("wow_ucast_wake_up_count %d", + stats->ucast_wake_up_count); + osif_debug("wow_bcast_wake_up_count %d", + stats->bcast_wake_up_count); + osif_debug("wow_ipv4_mcast_wake_up_count %d", + stats->ipv4_mcast_wake_up_count); + osif_debug("wow_ipv6_mcast_wake_up_count %d", + stats->ipv6_mcast_wake_up_count); + osif_debug("wow_ipv6_mcast_ra_stats %d", + stats->ipv6_mcast_ra_stats); + osif_debug("wow_ipv6_mcast_ns_stats %d", + stats->ipv6_mcast_ns_stats); + osif_debug("wow_ipv6_mcast_na_stats %d", + stats->ipv6_mcast_na_stats); + osif_debug("wow_icmpv4_count %d", + stats->icmpv4_count); + osif_debug("wow_icmpv6_count %d", + stats->icmpv6_count); + osif_debug("wow_rssi_breach_wake_up_count %d", + stats->rssi_breach_wake_up_count); + osif_debug("wow_low_rssi_wake_up_count %d", + stats->low_rssi_wake_up_count); + osif_debug("wow_gscan_wake_up_count %d", + stats->gscan_wake_up_count); + osif_debug("wow_pno_complete_wake_up_count %d", + stats->pno_complete_wake_up_count); + osif_debug("wow_pno_match_wake_up_count %d", + stats->pno_match_wake_up_count); + + ipv6_rx_multicast_addr_cnt = stats->ipv6_mcast_wake_up_count; + icmpv6_cnt = stats->icmpv6_count; + rx_multicast_cnt = stats->ipv4_mcast_wake_up_count + + ipv6_rx_multicast_addr_cnt; + total_rx_data_wake = stats->ucast_wake_up_count + + stats->bcast_wake_up_count + rx_multicast_cnt; + + if (nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TOTAL_CMD_EVENT_WAKE, 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_PTR, 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_SZ, 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TOTAL_DRIVER_FW_LOCAL_WAKE, + 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_PTR, + 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_SZ, + 0) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_TOTAL_RX_DATA_WAKE, + total_rx_data_wake) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RX_UNICAST_CNT, + stats->ucast_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RX_MULTICAST_CNT, + rx_multicast_cnt) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RX_BROADCAST_CNT, + stats->bcast_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP_PKT, + stats->icmpv4_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_PKT, + icmpv6_cnt) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_RA, + stats->ipv6_mcast_ra_stats) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_NA, + stats->ipv6_mcast_na_stats) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_NS, + stats->ipv6_mcast_ns_stats) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP4_RX_MULTICAST_CNT, + stats->ipv4_mcast_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_RX_MULTICAST_CNT, + ipv6_rx_multicast_addr_cnt) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RSSI_BREACH_CNT, + stats->rssi_breach_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_LOW_RSSI_CNT, + stats->low_rssi_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_GSCAN_CNT, + stats->gscan_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_PNO_COMPLETE_CNT, + stats->pno_complete_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_PNO_MATCH_CNT, + stats->pno_match_wake_up_count)) { + osif_err("nla put fail"); + goto nla_put_failure; + } + + wlan_cfg80211_vendor_cmd_reply(skb); + return 0; + +nla_put_failure: + wlan_cfg80211_vendor_free_skb(skb); + return -EINVAL; +} + +int wlan_cfg80211_mc_cp_stats_get_wakelock_stats(struct wlan_objmgr_psoc *psoc, + struct wiphy *wiphy) +{ + /* refer __wlan_hdd_cfg80211_get_wakelock_stats */ + QDF_STATUS status; + struct wake_lock_stats stats = {0}; + + status = ucfg_mc_cp_stats_get_psoc_wake_lock_stats(psoc, &stats); + if (QDF_IS_STATUS_ERROR(status)) + return qdf_status_to_os_return(status); + + return wlan_cfg80211_mc_cp_stats_send_wake_lock_stats(wiphy, &stats); +} + +struct tx_power_priv { + int dbm; +}; + +/** + * get_tx_power_cb() - "Get tx power" callback function + * @tx_power: tx_power + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_tx_power_cb(int tx_power, void *cookie) +{ + struct osif_request *request; + struct tx_power_priv *priv; + + request = osif_request_get(cookie); + if (!request) { + osif_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + priv->dbm = tx_power; + osif_request_complete(request); + osif_request_put(request); +} + +int wlan_cfg80211_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm) +{ + int ret = 0; + void *cookie; + QDF_STATUS status; + struct request_info info = {0}; + struct wlan_objmgr_peer *peer; + struct tx_power_priv *priv = NULL; + struct osif_request *request = NULL; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = CP_STATS_WAIT_TIME_STAT, + }; + + request = osif_request_alloc(¶ms); + if (!request) { + osif_err("Request allocation failure, return cached value"); + goto fetch_tx_power; + } + + cookie = osif_request_cookie(request); + info.cookie = cookie; + info.u.get_tx_power_cb = get_tx_power_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_CP_STATS_ID); + if (!peer) { + ret = -EINVAL; + goto peer_is_null; + } + qdf_mem_copy(info.peer_mac_addr, peer->macaddr, QDF_MAC_ADDR_SIZE); + + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + status = ucfg_mc_cp_stats_send_stats_request(vdev, + TYPE_CONNECTION_TX_POWER, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("wlan_mc_cp_stats_request_tx_power status: %d", + status); + ret = qdf_status_to_os_return(status); + } else { + ret = osif_request_wait_for_response(request); + if (ret) + osif_err("wait failed or timed out ret: %d", ret); + else + priv = osif_request_priv(request); + } + +fetch_tx_power: + if (priv) { + *dbm = priv->dbm; + } else { + status = ucfg_mc_cp_stats_get_tx_power(vdev, dbm); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("ucfg_mc_cp_stats_get_tx_power status: %d", + status); + ret = qdf_status_to_os_return(status); + } + } + +peer_is_null: + /* + * either we never sent a request, we sent a request and + * received a response or we sent a request and timed out. + * regardless we are done with the request. + */ + if (request) + osif_request_put(request); + + return ret; +} + +/** + * get_peer_rssi_cb() - get_peer_rssi_cb callback function + * @ev: peer stats buffer + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_peer_rssi_cb(struct stats_event *ev, void *cookie) +{ + struct stats_event *priv; + struct osif_request *request; + uint32_t rssi_size; + + request = osif_request_get(cookie); + if (!request) { + osif_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + rssi_size = sizeof(*ev->peer_stats) * ev->num_peer_stats; + if (rssi_size == 0) { + osif_err("Invalid rssi stats"); + goto get_peer_rssi_cb_fail; + } + + priv->peer_stats = qdf_mem_malloc(rssi_size); + if (!priv->peer_stats) + goto get_peer_rssi_cb_fail; + + priv->num_peer_stats = ev->num_peer_stats; + qdf_mem_copy(priv->peer_stats, ev->peer_stats, rssi_size); + +get_peer_rssi_cb_fail: + osif_request_complete(request); + osif_request_put(request); +} + +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_peer_rssi(struct wlan_objmgr_vdev *vdev, + uint8_t *mac_addr, + int *errno) +{ + void *cookie; + QDF_STATUS status; + struct stats_event *priv, *out; + struct request_info info = {0}; + struct osif_request *request = NULL; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = CP_STATS_WAIT_TIME_STAT, + .dealloc = wlan_cfg80211_mc_cp_stats_dealloc, + }; + + out = qdf_mem_malloc(sizeof(*out)); + if (!out) { + *errno = -ENOMEM; + return NULL; + } + + request = osif_request_alloc(¶ms); + if (!request) { + osif_err("Request allocation failure, return cached value"); + *errno = -ENOMEM; + qdf_mem_free(out); + return NULL; + } + + cookie = osif_request_cookie(request); + priv = osif_request_priv(request); + info.cookie = cookie; + info.u.get_peer_rssi_cb = get_peer_rssi_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + qdf_mem_copy(info.peer_mac_addr, mac_addr, QDF_MAC_ADDR_SIZE); + status = ucfg_mc_cp_stats_send_stats_request(vdev, TYPE_PEER_STATS, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("stats req failed: %d", status); + *errno = qdf_status_to_os_return(status); + goto get_peer_rssi_fail; + } + + *errno = osif_request_wait_for_response(request); + if (*errno) { + osif_err("wait failed or timed out ret: %d", *errno); + goto get_peer_rssi_fail; + } + + if (!priv->peer_stats || priv->num_peer_stats == 0) { + osif_err("Invalid peer stats, count %d, data %pK", + priv->num_peer_stats, priv->peer_stats); + *errno = -EINVAL; + goto get_peer_rssi_fail; + } + out->num_peer_stats = priv->num_peer_stats; + out->peer_stats = priv->peer_stats; + priv->peer_stats = NULL; + osif_request_put(request); + + return out; + +get_peer_rssi_fail: + osif_request_put(request); + wlan_cfg80211_mc_cp_stats_free_stats_event(out); + + return NULL; +} + +/** + * get_station_stats_cb() - get_station_stats_cb callback function + * @ev: station stats buffer + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_station_stats_cb(struct stats_event *ev, void *cookie) +{ + struct stats_event *priv; + struct osif_request *request; + uint32_t summary_size, rssi_size, peer_adv_size; + + request = osif_request_get(cookie); + if (!request) { + osif_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + summary_size = sizeof(*ev->vdev_summary_stats) * ev->num_summary_stats; + rssi_size = sizeof(*ev->vdev_chain_rssi) * ev->num_chain_rssi_stats; + peer_adv_size = sizeof(*ev->peer_adv_stats) * ev->num_peer_adv_stats; + + if (summary_size == 0 || rssi_size == 0) { + osif_err("Invalid stats, summary %d rssi %d", + summary_size, rssi_size); + goto station_stats_cb_fail; + } + if (priv->vdev_summary_stats || priv->vdev_chain_rssi || + priv->peer_adv_stats) { + osif_err("invalid context cookie %pK request %pK", + cookie, request); + goto station_stats_cb_fail; + } + + priv->vdev_summary_stats = qdf_mem_malloc(summary_size); + if (!priv->vdev_summary_stats) + goto station_stats_cb_fail; + + priv->vdev_chain_rssi = qdf_mem_malloc(rssi_size); + if (!priv->vdev_chain_rssi) + goto station_stats_cb_fail; + + if (peer_adv_size) { + priv->peer_adv_stats = qdf_mem_malloc(peer_adv_size); + if (!priv->peer_adv_stats) + goto station_stats_cb_fail; + + qdf_mem_copy(priv->peer_adv_stats, ev->peer_adv_stats, + peer_adv_size); + } + + priv->num_summary_stats = ev->num_summary_stats; + priv->num_chain_rssi_stats = ev->num_chain_rssi_stats; + priv->tx_rate = ev->tx_rate; + priv->rx_rate = ev->rx_rate; + priv->tx_rate_flags = ev->tx_rate_flags; + priv->num_peer_adv_stats = ev->num_peer_adv_stats; + qdf_mem_copy(priv->vdev_chain_rssi, ev->vdev_chain_rssi, rssi_size); + qdf_mem_copy(priv->vdev_summary_stats, ev->vdev_summary_stats, + summary_size); + +station_stats_cb_fail: + osif_request_complete(request); + osif_request_put(request); +} + +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_station_stats(struct wlan_objmgr_vdev *vdev, + int *errno) +{ + void *cookie; + QDF_STATUS status; + struct stats_event *priv, *out; + struct wlan_objmgr_peer *peer; + struct osif_request *request; + struct request_info info = {0}; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = 2 * CP_STATS_WAIT_TIME_STAT, + .dealloc = wlan_cfg80211_mc_cp_stats_dealloc, + }; + + osif_debug("Enter"); + + out = qdf_mem_malloc(sizeof(*out)); + if (!out) { + *errno = -ENOMEM; + return NULL; + } + + request = osif_request_alloc(¶ms); + if (!request) { + qdf_mem_free(out); + *errno = -ENOMEM; + return NULL; + } + + cookie = osif_request_cookie(request); + priv = osif_request_priv(request); + info.cookie = cookie; + info.u.get_station_stats_cb = get_station_stats_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_CP_STATS_ID); + if (!peer) { + osif_err("peer is null"); + *errno = -EINVAL; + goto get_station_stats_fail; + } + qdf_mem_copy(info.peer_mac_addr, peer->macaddr, QDF_MAC_ADDR_SIZE); + + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + status = ucfg_mc_cp_stats_send_stats_request(vdev, TYPE_STATION_STATS, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Failed to send stats request status: %d", status); + *errno = qdf_status_to_os_return(status); + goto get_station_stats_fail; + } + + *errno = osif_request_wait_for_response(request); + if (*errno) { + osif_err("wait failed or timed out ret: %d", *errno); + goto get_station_stats_fail; + } + + if (!priv->vdev_summary_stats || !priv->vdev_chain_rssi || + priv->num_summary_stats == 0 || priv->num_chain_rssi_stats == 0) { + osif_err("Invalid stats"); + osif_err("summary %d:%pK, rssi %d:%pK", + priv->num_summary_stats, priv->vdev_summary_stats, + priv->num_chain_rssi_stats, priv->vdev_chain_rssi); + *errno = -EINVAL; + goto get_station_stats_fail; + } + + out->tx_rate = priv->tx_rate; + out->rx_rate = priv->rx_rate; + out->tx_rate_flags = priv->tx_rate_flags; + out->num_summary_stats = priv->num_summary_stats; + out->num_chain_rssi_stats = priv->num_chain_rssi_stats; + out->vdev_summary_stats = priv->vdev_summary_stats; + priv->vdev_summary_stats = NULL; + out->vdev_chain_rssi = priv->vdev_chain_rssi; + priv->vdev_chain_rssi = NULL; + out->num_peer_adv_stats = priv->num_peer_adv_stats; + if (priv->peer_adv_stats) + out->peer_adv_stats = priv->peer_adv_stats; + priv->peer_adv_stats = NULL; + osif_request_put(request); + + osif_debug("Exit"); + + return out; + +get_station_stats_fail: + osif_request_put(request); + wlan_cfg80211_mc_cp_stats_free_stats_event(out); + + osif_debug("Exit"); + + return NULL; +} + +#ifdef WLAN_FEATURE_MIB_STATS +/** + * get_mib_stats_cb() - get mib stats from fw callback function + * @ev: mib stats buffer + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_mib_stats_cb(struct stats_event *ev, void *cookie) +{ + struct stats_event *priv; + struct osif_request *request; + + request = osif_request_get(cookie); + if (!request) { + osif_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + + priv->mib_stats = qdf_mem_malloc(sizeof(*ev->mib_stats)); + if (!priv->mib_stats) + goto get_mib_stats_cb_fail; + + priv->num_mib_stats = ev->num_mib_stats; + qdf_mem_copy(priv->mib_stats, ev->mib_stats, sizeof(*ev->mib_stats)); + +get_mib_stats_cb_fail: + osif_request_complete(request); + osif_request_put(request); +} + +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_mib_stats(struct wlan_objmgr_vdev *vdev, + int *errno) +{ + void *cookie; + QDF_STATUS status; + struct stats_event *priv, *out; + struct wlan_objmgr_peer *peer; + struct osif_request *request; + struct request_info info = {0}; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = 2 * CP_STATS_WAIT_TIME_STAT, + .dealloc = wlan_cfg80211_mc_cp_stats_dealloc, + }; + + out = qdf_mem_malloc(sizeof(*out)); + if (!out) { + *errno = -ENOMEM; + return NULL; + } + + request = osif_request_alloc(¶ms); + if (!request) { + qdf_mem_free(out); + *errno = -ENOMEM; + return NULL; + } + + cookie = osif_request_cookie(request); + priv = osif_request_priv(request); + info.cookie = cookie; + info.u.get_mib_stats_cb = get_mib_stats_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_CP_STATS_ID); + if (!peer) { + osif_err("peer is null"); + *errno = -EINVAL; + goto get_mib_stats_fail; + } + qdf_mem_copy(info.peer_mac_addr, peer->macaddr, QDF_MAC_ADDR_SIZE); + + osif_debug("vdev id %d, pdev id %d, peer " QDF_MAC_ADDR_FMT, + info.vdev_id, info.pdev_id, + QDF_MAC_ADDR_REF(info.peer_mac_addr)); + + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + status = ucfg_mc_cp_stats_send_stats_request(vdev, TYPE_MIB_STATS, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Failed to send stats request status: %d", status); + *errno = qdf_status_to_os_return(status); + goto get_mib_stats_fail; + } + + *errno = osif_request_wait_for_response(request); + if (*errno) { + osif_err("wait failed or timed out ret: %d", *errno); + goto get_mib_stats_fail; + } + + if (!priv->mib_stats || priv->num_mib_stats == 0 ) { + osif_err("Invalid mib stats %d:%pK", + priv->num_mib_stats, priv->mib_stats); + *errno = -EINVAL; + goto get_mib_stats_fail; + } + + out->num_mib_stats = priv->num_mib_stats; + out->mib_stats = priv->mib_stats; + priv->mib_stats = NULL; + + osif_request_put(request); + + return out; + +get_mib_stats_fail: + osif_request_put(request); + wlan_cfg80211_mc_cp_stats_free_stats_event(out); + + return NULL; +} +#endif + +void wlan_cfg80211_mc_cp_stats_free_stats_event(struct stats_event *stats) +{ + if (!stats) + return; + + qdf_mem_free(stats->pdev_stats); + qdf_mem_free(stats->peer_stats); + qdf_mem_free(stats->cca_stats); + qdf_mem_free(stats->vdev_summary_stats); + qdf_mem_free(stats->vdev_chain_rssi); + qdf_mem_free(stats->peer_adv_stats); + wlan_free_mib_stats(stats); + qdf_mem_free(stats); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/inc/wlan_cfg80211_crypto.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/inc/wlan_cfg80211_crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..18ad13aa8e7fec52d0b77a7f1f7249edd27f3542 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/inc/wlan_cfg80211_crypto.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2019,2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares crypto functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_CRYPTO_H_ +#define _WLAN_CFG80211_CRYPTO_H_ +#include +#include "wlan_crypto_global_def.h" +#ifdef CONFIG_CRYPTO_COMPONENT +/** + * wlan_cfg80211_set_default_key() - to set the default key to be used + * @vdev: VDEV Object pointer + * @key_index: Index to be set as the default + * @bssid: BSSID for which the key is to be set + * + * Return: Zero for success and negative for failure. + */ +int wlan_cfg80211_set_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + struct qdf_mac_addr *bssid); +#else +static inline int wlan_cfg80211_set_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + struct qdf_mac_addr *bssid) +{ + return 0; +} +#endif + +/** + * wlan_cfg80211_store_key() - Store the key + * @vdev: VDEV Object pointer + * @key_index: Index to be set as the default + * @key_type: denotes if the key is pairwise or group key + * @mac_addr: BSSID for which the key is to be set + * @key_params: Params received from the kernel + * + * Return: Zero for success and negative for failure. + */ +int wlan_cfg80211_store_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + enum wlan_crypto_key_type key_type, + const u8 *mac_addr, struct key_params *params); + +/** + * wlan_cfg80211_crypto_add_key() - Add key for the specified vdev + * @vdev: vdev object + * @key_type: denotes if the add key request is for pairwise or group key + * @key_index: Index of the key that needs to be added + * @sync: flag to indicate whether or not to add key synchronously. + * DO NOT set to true if it's in scheduler context. + * + * Return: Zero on Success, negative value on failure + */ +int wlan_cfg80211_crypto_add_key(struct wlan_objmgr_vdev *vdev, + enum wlan_crypto_key_type key_type, + uint8_t key_index, bool sync); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/inc/wlan_nl_to_crypto_params.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/inc/wlan_nl_to_crypto_params.h new file mode 100644 index 0000000000000000000000000000000000000000..c557394256311ce81471fc7f0a13319ed7de5796 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/inc/wlan_nl_to_crypto_params.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_nl_to_crypto_params.h + * + * Conversion of NL param type to Crypto param type APIs implementation + * + */ + +/** + * osif_nl_to_crypto_auth_type() - populate auth type for crypto + * @auth_type: NL auth_type + * + * set the crypto auth type for corresponding auth type received + * from NL + * + * Return: crypto auth type + */ +wlan_crypto_auth_mode +osif_nl_to_crypto_auth_type(enum nl80211_auth_type auth_type); + +/** + * osif_nl_to_crypto_akm_type() - populate akm type for crypto + * @akm_type: NL akm_type + * + * set the crypto akm type for corresponding akm type received + * from NL + * + * Return: crypto akm type + */ +wlan_crypto_key_mgmt osif_nl_to_crypto_akm_type(u32 key_mgmt); + +/** + * osif_nl_to_crypto_cipher_type() - populate cipher type for crypto + * @cipher: NL cipher type + * + * set the crypto cipher type for corresponding cipher type received + * from NL. + * + * Return: crypto cipher type + */ +enum wlan_crypto_cipher_type osif_nl_to_crypto_cipher_type(u32 cipher); + +/** + * osif_nl_to_crypto_cipher_len() - return the cipher length + * @cipher: NL cipher type + * + * Check the cipher type and return the corresponding length + * + * Return: crypto cipher length, negative value for failure + */ +int osif_nl_to_crypto_cipher_len(u32 cipher); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/src/wlan_cfg80211_crypto.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/src/wlan_cfg80211_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..02085d287726e647ce30e614ea86bbdb7604c07a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/src/wlan_cfg80211_crypto.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines crypto driver functions interfacing with linux kernel + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_cfg80211_crypto.h" +#include +#include + +static void wlan_cfg80211_translate_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + enum wlan_crypto_key_type key_type, + const u8 *mac_addr, + struct key_params *params, + struct wlan_crypto_key *crypto_key) +{ + qdf_mem_zero(crypto_key, sizeof(*crypto_key)); + crypto_key->keylen = params->key_len; + crypto_key->keyix = key_index; + osif_debug("key_type %d, opmode %d, key_len %d, seq_len %d", + key_type, vdev->vdev_mlme.vdev_opmode, + params->key_len, params->seq_len); + qdf_mem_copy(&crypto_key->keyval[0], params->key, params->key_len); + qdf_mem_copy(&crypto_key->keyrsc[0], params->seq, params->seq_len); + + crypto_key->cipher_type = osif_nl_to_crypto_cipher_type(params->cipher); + if (IS_WEP_CIPHER(crypto_key->cipher_type) && !mac_addr) { + /* + * This is a valid scenario in case of WEP, where-in the + * keys are passed by the user space during the connect request + * but since we did not connect yet, so we do not know the peer + * address yet. + */ + osif_debug("No Mac Address to copy"); + return; + } + if (key_type == WLAN_CRYPTO_KEY_TYPE_UNICAST) { + qdf_mem_copy(&crypto_key->macaddr, mac_addr, QDF_MAC_ADDR_SIZE); + } else { + if ((vdev->vdev_mlme.vdev_opmode == QDF_STA_MODE) || + (vdev->vdev_mlme.vdev_opmode == QDF_P2P_CLIENT_MODE)) + qdf_mem_copy(&crypto_key->macaddr, mac_addr, + QDF_MAC_ADDR_SIZE); + else + qdf_mem_copy(&crypto_key->macaddr, + vdev->vdev_mlme.macaddr, + QDF_MAC_ADDR_SIZE); + } + osif_debug("mac "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(crypto_key->macaddr)); +} + +int wlan_cfg80211_store_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + enum wlan_crypto_key_type key_type, + const u8 *mac_addr, struct key_params *params) +{ + struct wlan_crypto_key *crypto_key = NULL; + enum wlan_crypto_cipher_type cipher; + int cipher_len; + QDF_STATUS status; + + if (!vdev) { + osif_err("vdev is NULL"); + return -EINVAL; + } + if (!params) { + osif_err("Key params is NULL"); + return -EINVAL; + } + cipher_len = osif_nl_to_crypto_cipher_len(params->cipher); + if (cipher_len < 0 || params->key_len < cipher_len) { + osif_err("cipher length %d less than reqd len %d", + params->key_len, cipher_len); + return -EINVAL; + } + cipher = osif_nl_to_crypto_cipher_type(params->cipher); + if (!IS_WEP_CIPHER(cipher)) { + if ((key_type == WLAN_CRYPTO_KEY_TYPE_UNICAST) && + !mac_addr) { + osif_err("mac_addr is NULL for pairwise Key"); + return -EINVAL; + } + } + status = wlan_crypto_validate_key_params(cipher, key_index, + params->key_len, + params->seq_len); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Invalid key params"); + return -EINVAL; + } + + /* + * key may already exist at times and may be retrieved only to + * update it. + */ + crypto_key = wlan_crypto_get_key(vdev, key_index); + if (!crypto_key) { + crypto_key = qdf_mem_malloc(sizeof(*crypto_key)); + if (!crypto_key) + return -EINVAL; + } + + wlan_cfg80211_translate_key(vdev, key_index, key_type, mac_addr, + params, crypto_key); + + status = wlan_crypto_save_key(vdev, key_index, crypto_key); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Failed to save key"); + qdf_mem_free(crypto_key); + return -EINVAL; + } + return 0; +} + +#define WLAN_WAIT_TIME_ADD_KEY 100 + +static void +wlan_cfg80211_crypto_add_key_cb(void *context, + struct crypto_add_key_result *result) +{ + struct osif_request *request; + struct crypto_add_key_result *priv; + + request = osif_request_get(context); + if (!request) { + osif_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + qdf_mem_copy(priv, result, sizeof(*priv)); + osif_request_complete(request); + osif_request_put(request); +} + +int wlan_cfg80211_crypto_add_key(struct wlan_objmgr_vdev *vdev, + enum wlan_crypto_key_type key_type, + uint8_t key_index, bool sync) +{ + struct wlan_crypto_key *crypto_key; + QDF_STATUS status; + struct osif_request *request; + struct crypto_add_key_result *result; + struct wlan_crypto_comp_priv *priv; + int ret; + static const struct osif_request_params params = { + .priv_size = sizeof(*result), + .timeout_ms = WLAN_WAIT_TIME_ADD_KEY, + }; + + crypto_key = wlan_crypto_get_key(vdev, key_index); + if (!crypto_key) { + osif_err("Crypto KEY is NULL"); + return -EINVAL; + } + + if (sync) { + priv = wlan_get_vdev_crypto_obj(vdev); + if (!priv) { + osif_err("Invalid crypto_priv"); + return -EINVAL; + } + + request = osif_request_alloc(¶ms); + if (!request) { + osif_err("Request allocation failure"); + return -ENOMEM; + } + + priv->add_key_ctx = osif_request_cookie(request);; + priv->add_key_cb = wlan_cfg80211_crypto_add_key_cb; + + status = ucfg_crypto_set_key_req(vdev, crypto_key, key_type); + if (QDF_IS_STATUS_SUCCESS(status)) { + ret = osif_request_wait_for_response(request); + if (ret) { + osif_err("Target response timed out"); + } else { + result = osif_request_priv(request); + osif_debug("complete, vdev_id %u, ix: %u, flags: %u, status: %u", + result->vdev_id, result->key_ix, + result->key_flags, result->status); + } + } + + priv->add_key_ctx = NULL; + priv->add_key_cb = NULL; + osif_request_put(request); + } else { + status = ucfg_crypto_set_key_req(vdev, crypto_key, key_type); + } + + return qdf_status_to_os_return(status); +} + +#ifdef CONFIG_CRYPTO_COMPONENT +int wlan_cfg80211_set_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, struct qdf_mac_addr *bssid) +{ + return wlan_crypto_default_key(vdev, (uint8_t *)bssid, + key_index, true); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/src/wlan_nl_to_crypto_params.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/src/wlan_nl_to_crypto_params.c new file mode 100644 index 0000000000000000000000000000000000000000..3e8430d5e884b03d5c588dc1fdde31831ae585f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/crypto/src/wlan_nl_to_crypto_params.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_nl_to_crypto_params.c + * + * Conversion of NL param type to Crypto param type APIs implementation + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "wlan_objmgr_vdev_obj.h" +#include + +#include "wlan_nl_to_crypto_params.h" +#include "wlan_crypto_global_def.h" + +/** + * struct osif_akm_crypto_mapping - mapping akm type received from + * NL to internal crypto type + * @akm_suite: NL akm type + * @akm_type_crypto: akm crypto type + * + * mapping akm type received from NL to internal crypto type + */ +struct osif_akm_type_crypto_mapping { + u32 akm_suite; + wlan_crypto_key_mgmt akm_type_crypto; +}; + +/** + * struct osif_cipher_crypto_mapping - mapping cipher type received from NL + * to internal crypto cipher type + * @cipher_suite: NL cipher type + * @cipher_crypto: cipher crypto type + * @cipher_len: Length of the cipher + * + * mapping cipher type received from NL to internal crypto cipher type + */ +struct osif_cipher_crypto_mapping { + u32 cipher_suite; + wlan_crypto_cipher_type cipher_crypto; + u32 cipher_len; +}; + +/** + * mapping table for auth type received from NL and cryto auth type + */ +static const wlan_crypto_auth_mode + osif_auth_type_crypto_mapping[] = { + [NL80211_AUTHTYPE_AUTOMATIC] = WLAN_CRYPTO_AUTH_AUTO, + [NL80211_AUTHTYPE_OPEN_SYSTEM] = WLAN_CRYPTO_AUTH_OPEN, + [NL80211_AUTHTYPE_FT] = WLAN_CRYPTO_AUTH_OPEN, + [NL80211_AUTHTYPE_SHARED_KEY] = WLAN_CRYPTO_AUTH_SHARED, + [NL80211_AUTHTYPE_NETWORK_EAP] = WLAN_CRYPTO_AUTH_8021X, +#if defined(WLAN_FEATURE_FILS_SK) && \ + (defined(CFG80211_FILS_SK_OFFLOAD_SUPPORT) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))) + [NL80211_AUTHTYPE_FILS_SK] = WLAN_CRYPTO_AUTH_FILS_SK, +#endif + [NL80211_AUTHTYPE_SAE] = WLAN_CRYPTO_AUTH_SAE, +}; + +/* mapping table for akm type received from NL and cryto akm type */ +static const struct osif_akm_type_crypto_mapping + osif_akm_type_crypto_mapping[] = { + { + .akm_suite = WLAN_AKM_SUITE_8021X, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_IEEE8021X, + }, + { + .akm_suite = WLAN_AKM_SUITE_PSK, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_PSK, + }, + { + .akm_suite = WLAN_AKM_SUITE_8021X_SHA256, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256, + }, + { + .akm_suite = WLAN_AKM_SUITE_PSK_SHA256, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_PSK_SHA256, + }, + { + .akm_suite = WLAN_AKM_SUITE_SAE, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_SAE, + }, + { + .akm_suite = WLAN_AKM_SUITE_FT_OVER_SAE, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FT_SAE, + }, +#if defined(WLAN_AKM_SUITE_FT_8021X) || \ + defined(FEATURE_WLAN_FT_IEEE8021X) + { + .akm_suite = WLAN_AKM_SUITE_FT_8021X, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X, + }, +#endif +#if defined(WLAN_AKM_SUITE_FT_PSK) || \ + defined(FEATURE_WLAN_FT_PSK) + { + .akm_suite = WLAN_AKM_SUITE_FT_PSK, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FT_PSK, + }, +#endif +#ifdef FEATURE_WLAN_ESE + { +#ifndef WLAN_AKM_SUITE_CCKM +#define WLAN_AKM_SUITE_CCKM 0x00409600 +#endif + .akm_suite = WLAN_AKM_SUITE_CCKM, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_CCKM, + }, +#endif + { +#ifndef WLAN_AKM_SUITE_OSEN +#define WLAN_AKM_SUITE_OSEN 0x506f9a01 +#endif + .akm_suite = WLAN_AKM_SUITE_OSEN, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_OSEN, + }, +#if defined(WLAN_AKM_SUITE_8021X_SUITE_B) || \ + defined(FEATURE_WLAN_IEEE8021X_SUITE_B) + { + .akm_suite = WLAN_AKM_SUITE_8021X_SUITE_B, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B, + }, +#endif +#if defined(WLAN_AKM_SUITE_8021X_SUITE_B_192) || \ + defined(FEATURE_WLAN_IEEE8021X_SUITE_B) + { + .akm_suite = WLAN_AKM_SUITE_8021X_SUITE_B_192, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192, + }, +#endif +#if defined(WLAN_AKM_SUITE_FILS_SHA256) || \ + defined(FEATURE_WLAN_FILS) + { + .akm_suite = WLAN_AKM_SUITE_FILS_SHA256, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FILS_SHA256, + }, +#endif +#if defined(WLAN_AKM_SUITE_FILS_SHA384) || \ + defined(FEATURE_WLAN_FILS) + { + .akm_suite = WLAN_AKM_SUITE_FILS_SHA384, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FILS_SHA384, + }, +#endif +#if defined(WLAN_AKM_SUITE_FT_FILS_SHA256) || \ + defined(FEATURE_WLAN_FILS) + { + .akm_suite = WLAN_AKM_SUITE_FT_FILS_SHA256, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256, + }, +#endif +#if defined(WLAN_AKM_SUITE_FT_FILS_SHA384) || \ + defined(FEATURE_WLAN_FILS) + { + .akm_suite = WLAN_AKM_SUITE_FT_FILS_SHA384, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384, + }, +#endif + { +#ifndef WLAN_AKM_SUITE_OWE +#define WLAN_AKM_SUITE_OWE 0x000FAC12 +#endif + .akm_suite = WLAN_AKM_SUITE_OWE, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_OWE, + }, + { +#ifndef WLAN_AKM_SUITE_DPP +#define WLAN_AKM_SUITE_DPP 0x506f9a02 +#endif + .akm_suite = WLAN_AKM_SUITE_DPP, + .akm_type_crypto = WLAN_CRYPTO_KEY_MGMT_DPP, + }, +}; + +/* mapping table for cipher type received from NL and cryto cipher type */ +static const struct osif_cipher_crypto_mapping + osif_cipher_crypto_mapping[] = { + { + .cipher_suite = IW_AUTH_CIPHER_NONE, + .cipher_crypto = WLAN_CRYPTO_CIPHER_NONE, + .cipher_len = 0, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_WEP40, + .cipher_crypto = WLAN_CRYPTO_CIPHER_WEP_40, + .cipher_len = WLAN_CRYPTO_KEY_WEP40_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_TKIP, + .cipher_crypto = WLAN_CRYPTO_CIPHER_TKIP, + .cipher_len = WLAN_CRYPTO_KEY_TKIP_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_CCMP, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_CCM, + .cipher_len = WLAN_CRYPTO_KEY_CCMP_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_WEP104, + .cipher_crypto = WLAN_CRYPTO_CIPHER_WEP_104, + .cipher_len = WLAN_CRYPTO_KEY_WEP104_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_GCMP, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_GCM, + .cipher_len = WLAN_CRYPTO_KEY_GCMP_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_GCMP_256, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_GCM_256, + .cipher_len = WLAN_CRYPTO_KEY_GCMP_256_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_CCMP_256, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_CCM_256, + .cipher_len = WLAN_CRYPTO_KEY_CCMP_256_LEN, + }, + { + .cipher_suite = WLAN_CIPHER_SUITE_AES_CMAC, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_CMAC, + .cipher_len = WLAN_CRYPTO_KEY_CCMP_LEN, + }, +#ifdef WLAN_CIPHER_SUITE_BIP_GMAC_128 + { + .cipher_suite = WLAN_CIPHER_SUITE_BIP_GMAC_128, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_GMAC, + .cipher_len = WLAN_CRYPTO_KEY_GMAC_LEN, + }, +#endif +#ifdef WLAN_CIPHER_SUITE_BIP_GMAC_256 + { + .cipher_suite = WLAN_CIPHER_SUITE_BIP_GMAC_256, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_GMAC_256, + .cipher_len = WLAN_CRYPTO_KEY_GMAC_256_LEN, + }, +#endif +#ifdef WLAN_CIPHER_SUITE_BIP_CMAC_256 + { + .cipher_suite = WLAN_CIPHER_SUITE_BIP_CMAC_256, + .cipher_crypto = WLAN_CRYPTO_CIPHER_AES_CMAC_256, + .cipher_len = WLAN_CRYPTO_KEY_CCMP_256_LEN, + }, +#endif +#ifdef FEATURE_WLAN_WAPI + { + .cipher_suite = WLAN_CIPHER_SUITE_SMS4, + .cipher_crypto = WLAN_CRYPTO_CIPHER_WAPI_SMS4, + .cipher_len = WLAN_CRYPTO_KEY_WAPI_LEN, + }, +#endif +}; + +wlan_crypto_auth_mode +osif_nl_to_crypto_auth_type(enum nl80211_auth_type auth_type) +{ + wlan_crypto_auth_mode crypto_auth_type = WLAN_CRYPTO_AUTH_NONE; + + if (auth_type < NL80211_AUTHTYPE_OPEN_SYSTEM || + auth_type >= QDF_ARRAY_SIZE(osif_auth_type_crypto_mapping)) { + QDF_TRACE_ERROR(QDF_MODULE_ID_OS_IF, "Unknown type: %d", + auth_type); + return crypto_auth_type; + } + + crypto_auth_type = osif_auth_type_crypto_mapping[auth_type]; + QDF_TRACE_DEBUG(QDF_MODULE_ID_OS_IF, "Auth type, NL: %d, crypto: %d", + auth_type, crypto_auth_type); + + return crypto_auth_type; +} + +wlan_crypto_key_mgmt osif_nl_to_crypto_akm_type(u32 key_mgmt) +{ + uint8_t index; + wlan_crypto_key_mgmt crypto_akm_type = WLAN_CRYPTO_KEY_MGMT_NONE; + bool akm_type_crypto_exist = false; + + for (index = 0; index < QDF_ARRAY_SIZE(osif_akm_type_crypto_mapping); + index++) { + if (osif_akm_type_crypto_mapping[index].akm_suite == key_mgmt) { + crypto_akm_type = osif_akm_type_crypto_mapping[index]. + akm_type_crypto; + akm_type_crypto_exist = true; + break; + } + } + if (!akm_type_crypto_exist) + QDF_TRACE_ERROR(QDF_MODULE_ID_OS_IF, "Unknown type: %d", + key_mgmt); + else + QDF_TRACE_DEBUG(QDF_MODULE_ID_OS_IF, "Akm suite, NL: %d, crypto: %d", + key_mgmt, crypto_akm_type); + + return crypto_akm_type; +} + +enum wlan_crypto_cipher_type osif_nl_to_crypto_cipher_type(u32 cipher) +{ + uint8_t index; + bool cipher_crypto_exist = false; + wlan_crypto_cipher_type crypto_cipher_type = WLAN_CRYPTO_CIPHER_NONE; + + for (index = 0; index < QDF_ARRAY_SIZE(osif_cipher_crypto_mapping); + index++) { + if (osif_cipher_crypto_mapping[index].cipher_suite == cipher) { + crypto_cipher_type = osif_cipher_crypto_mapping[index]. + cipher_crypto; + cipher_crypto_exist = true; + break; + } + } + if (!cipher_crypto_exist) { + QDF_TRACE_ERROR(QDF_MODULE_ID_OS_IF, "Unknown type: %d", + cipher); + return WLAN_CRYPTO_CIPHER_INVALID; + } + QDF_TRACE_DEBUG(QDF_MODULE_ID_OS_IF, "Cipher suite, NL: %d, crypto: %d", + cipher, crypto_cipher_type); + + return crypto_cipher_type; +} + +int osif_nl_to_crypto_cipher_len(u32 cipher) +{ + uint8_t index; + + for (index = 0; index < QDF_ARRAY_SIZE(osif_cipher_crypto_mapping); + index++) { + if (osif_cipher_crypto_mapping[index].cipher_suite == cipher) + return osif_cipher_crypto_mapping[index].cipher_len; + } + + return -EINVAL; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_cfg80211_ftm.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_cfg80211_ftm.h new file mode 100644 index 0000000000000000000000000000000000000000..030ac3f19ffbbbcd8a1d393377c5262ed68b56f9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_cfg80211_ftm.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver FTM functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_FTM_H_ +#define _WLAN_CFG80211_FTM_H_ + +/** + * enum wlan_cfg80211_ftm_attr - FTM Netlink attributes + * @WLAN_CFG80211_FTM_ATTR_INVALID: attribute is invalid + * @WLAN_CFG80211_FTM_ATTR_CMD: attribute type is FTM command + * @WLAN_CFG80211_FTM_ATTR_DATA: attribute type is data + * + * @WLAN_CFG80211_FTM_ATTR_MAX: Max number of attributes + */ +enum wlan_cfg80211_ftm_attr { + WLAN_CFG80211_FTM_ATTR_INVALID = 0, + WLAN_CFG80211_FTM_ATTR_CMD = 1, + WLAN_CFG80211_FTM_ATTR_DATA = 2, + + /* keep last */ + WLAN_CFG80211_FTM_ATTR_MAX, +}; + +/** + * enum wlan_cfg80211_ftm_cmd - FTM command types + * @WLAN_CFG80211_FTM_CMD_WLAN_FTM: command is of type FTM + */ +enum wlan_cfg80211_ftm_cmd { + WLAN_CFG80211_FTM_CMD_WLAN_FTM = 0, +}; + +#define WLAN_FTM_DATA_MAX_LEN 2048 + +/** + * wlan_cfg80211_ftm_testmode_cmd() - process cfg80211 testmode command + * @pdev: pdev object + * @data: ftm testmode command data of type void + * @len: length of the data + * + * Return: 0 on success or -Eerrno otherwise + */ +int wlan_cfg80211_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + void *data, uint32_t len); + +/** + * wlan_cfg80211_ftm_rx_event() - handle the received ftm event + * @pdev: pdev object + * @data: ftm event data + * @len: length of the data + * + * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E errno otherwise + */ +#ifdef QCA_WIFI_FTM_NL80211 +QDF_STATUS wlan_cfg80211_ftm_rx_event(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len); +#else +static inline QDF_STATUS +wlan_cfg80211_ftm_rx_event(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len) +{ + return QDF_STATUS_E_NOSUPPORT; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_ioctl_ftm.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_ioctl_ftm.h new file mode 100644 index 0000000000000000000000000000000000000000..0d2a3d71a2a5bb4253970ea3ae3c82c68430307f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_ioctl_ftm.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver FTM functions interfacing with linux kernel + */ + +#ifndef _WLAN_IOCTL_FTM_H_ +#define _WLAN_IOCTL_FTM_H_ + +/** + * wlan_ioctl_ftm_testmode_cmd() - handle the ftm ioctl command + * @pdev: pdev object + * @cmd: ftm command + * @userdata: the content of the command + * + * Return: 0 on success, otherwise the error code. + */ +int wlan_ioctl_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, int cmd, + uint8_t *userdata); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_cfg80211_ftm.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_cfg80211_ftm.c new file mode 100644 index 0000000000000000000000000000000000000000..dd8ed8728cff6c9b54aaecf8173cc23bc27d2ca5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_cfg80211_ftm.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: implementation of the driver FTM functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct nla_policy +wlan_cfg80211_ftm_policy[WLAN_CFG80211_FTM_ATTR_MAX + 1] = { + [WLAN_CFG80211_FTM_ATTR_CMD] = {.type = NLA_U32}, + [WLAN_CFG80211_FTM_ATTR_DATA] = {.type = NLA_BINARY, + .len = WLAN_FTM_DATA_MAX_LEN}, +}; + +static int +wlan_cfg80211_process_ftm_cmd(struct wlan_objmgr_pdev *pdev, + struct nlattr *tb[]) +{ + int buf_len; + void *buf; + QDF_STATUS status; + + if (!tb[WLAN_CFG80211_FTM_ATTR_DATA]) { + ftm_err("WLAN_CFG80211_FTM_ATTR_DATA attribute is invalid"); + return -EINVAL; + } + + buf = nla_data(tb[WLAN_CFG80211_FTM_ATTR_DATA]); + buf_len = nla_len(tb[WLAN_CFG80211_FTM_ATTR_DATA]); + + if (buf_len > WLAN_FTM_DATA_MAX_LEN) + return -EINVAL; + + ftm_debug("****FTM Tx cmd len = %d*****", buf_len); + + status = ucfg_wlan_ftm_testmode_cmd(pdev, buf, buf_len); + + if (QDF_IS_STATUS_ERROR(status)) + status = QDF_STATUS_E_BUSY; + + return qdf_status_to_os_return(status); +} + +int +wlan_cfg80211_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + void *data, uint32_t len) +{ + struct nlattr *tb[WLAN_CFG80211_FTM_ATTR_MAX + 1]; + int err = 0, cmd; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return -EINVAL; + } + + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_NL80211; + + err = wlan_cfg80211_nla_parse(tb, WLAN_CFG80211_FTM_ATTR_MAX - 1, data, + len, wlan_cfg80211_ftm_policy); + if (err) { + ftm_err("Testmode INV ATTR"); + return err; + } + + if (!tb[WLAN_CFG80211_FTM_ATTR_CMD]) { + ftm_err("Testmode INV CMD"); + return -EINVAL; + } + cmd = nla_get_u32(tb[WLAN_CFG80211_FTM_ATTR_CMD]); + + switch (cmd) { + case WLAN_CFG80211_FTM_CMD_WLAN_FTM: + err = wlan_cfg80211_process_ftm_cmd(pdev, tb); + break; + + default: + ftm_err("unknown command: %d", cmd); + return -ENOENT; + } + + return err; +} + +qdf_export_symbol(wlan_cfg80211_ftm_testmode_cmd); + +QDF_STATUS +wlan_cfg80211_ftm_rx_event(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len) +{ + struct pdev_osif_priv *pdev_ospriv; + qdf_nbuf_t skb = NULL; + + if (!data || !len) { + ftm_err("Null data or invalid length"); + return QDF_STATUS_E_INVAL; + } + + pdev_ospriv = wlan_pdev_get_ospriv(pdev); + if (!pdev_ospriv) { + ftm_err("pdev_ospriv is NULL"); + return QDF_STATUS_E_INVAL; + } + + ftm_debug("Testmode response event generated"); + skb = cfg80211_testmode_alloc_event_skb(pdev_ospriv->wiphy, + len, GFP_ATOMIC); + if (!skb) + return QDF_STATUS_E_NOMEM; + + if (nla_put_u32(skb, WLAN_CFG80211_FTM_ATTR_CMD, + WLAN_CFG80211_FTM_CMD_WLAN_FTM) || + nla_put(skb, WLAN_CFG80211_FTM_ATTR_DATA, len, data)) { + goto nla_put_failure; + } + cfg80211_testmode_event(skb, GFP_ATOMIC); + + return QDF_STATUS_SUCCESS; + +nla_put_failure: + qdf_nbuf_free(skb); + ftm_err("nla_put failed on testmode rx skb!"); + + return QDF_STATUS_E_INVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_ioctl_ftm.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_ioctl_ftm.c new file mode 100644 index 0000000000000000000000000000000000000000..3b25b25fdd1ee76145d7e17a8d759a5bf891acf4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_ioctl_ftm.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: implementation of the driver FTM functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS +wlan_process_ftm_ioctl_cmd(struct wlan_objmgr_pdev *pdev, uint8_t *userdata) +{ + uint8_t *buffer; + QDF_STATUS error; + int length; + + if (get_user(length, (uint32_t *)userdata) != 0) + return QDF_STATUS_E_FAILURE; + + if (length > WLAN_FTM_DATA_MAX_LEN) + return QDF_STATUS_E_FAILURE; + + buffer = qdf_mem_malloc(length); + if (!buffer) + return QDF_STATUS_E_NOMEM; + + if (copy_from_user(buffer, &userdata[sizeof(length)], length)) + error = QDF_STATUS_E_FAILURE; + else + error = ucfg_wlan_ftm_testmode_cmd(pdev, buffer, length); + + qdf_mem_free(buffer); + + return error; +} + +static QDF_STATUS +wlan_process_ftm_ioctl_rsp(struct wlan_objmgr_pdev *pdev, uint8_t *userdata) +{ + uint8_t *buffer; + QDF_STATUS error; + int length; + + length = WLAN_FTM_DATA_MAX_LEN + sizeof(u_int32_t); + + buffer = qdf_mem_malloc(length); + if (!buffer) + return QDF_STATUS_E_NOMEM; + + error = ucfg_wlan_ftm_testmode_rsp(pdev, buffer); + if (!error) + error = copy_to_user((userdata - sizeof(int)), buffer, length); + else + error = QDF_STATUS_E_AGAIN; + + qdf_mem_free(buffer); + + return error; +} + +int +wlan_ioctl_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, int cmd, + uint8_t *userdata) +{ + QDF_STATUS error; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_IOCTL; + + switch (cmd) { + case FTM_IOCTL_UNIFIED_UTF_CMD: + error = wlan_process_ftm_ioctl_cmd(pdev, userdata); + break; + case FTM_IOCTL_UNIFIED_UTF_RSP: + error = wlan_process_ftm_ioctl_rsp(pdev, userdata); + break; + default: + ftm_err("FTM Unknown cmd - not supported"); + error = QDF_STATUS_E_NOSUPPORT; + } + + return qdf_status_to_os_return(error); +} + +qdf_export_symbol(wlan_ioctl_ftm_testmode_cmd); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/gpio/inc/wlan_cfg80211_gpio.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/gpio/inc/wlan_cfg80211_gpio.h new file mode 100644 index 0000000000000000000000000000000000000000..924c290eec615ddab706ea312060fbbcf229f3fd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/gpio/inc/wlan_cfg80211_gpio.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_gpio.h + * + * This Header file provide declaration for cfg80211 command handler API + */ + +#ifndef __WLAN_CFG80211_GPIO_CFG_H__ +#define __WLAN_CFG80211_GPIO_CFG_H__ + +#include +#include +#include +#include + +#ifdef WLAN_FEATURE_GPIO_CFG + +extern const struct nla_policy + wlan_cfg80211_gpio_config_policy + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MAX + 1]; + +/** + * wlan_cfg80211_start_gpio_config() - set GPIO config + * @psoc: pointer to psoc common object + * @data: Pointer to the data to be passed via vendor interface + * @data_len: Length of the data to be passed + * + * Return: Return the Success or Failure code + */ +int wlan_cfg80211_start_gpio_config(struct wiphy *wiphy, + struct wlan_objmgr_psoc *psoc, + const void *data, int data_len); +#else +static inline +int wlan_cfg80211_start_gpio_config(struct wiphy *wiphy, + struct wlan_objmgr_psoc *psoc, + const void *data, int data_len) +{ + return 0; +} +#endif /* WLAN_FEATURE_GPIO_CFG */ +#endif /* __WLAN_CFG80211_GPIO_CFG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/gpio/src/wlan_cfg80211_gpio.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/gpio/src/wlan_cfg80211_gpio.c new file mode 100644 index 0000000000000000000000000000000000000000..b99898353a70e18408a42fc26420b08e7323af8a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/gpio/src/wlan_cfg80211_gpio.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "qdf_module.h" + +const struct nla_policy +wlan_cfg80211_gpio_config_policy[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_VALUE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PULL_TYPE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTR_MODE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DIR] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MUX_CONFIG] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DRIVE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, + [QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG] = { + .type = NLA_U32, + .len = sizeof(uint32_t) }, +}; + +/** + * convert_vendor_gpio_direction() - Function to convert vendor gpio direction + * @dir: pointer to enum qca_gpio_direction + * + * Convert the vendor gpio direction to wmi unified gpio direction + * + * Return: wmi unified gpio direction + */ +static enum gpio_direction +convert_vendor_gpio_direction(enum qca_gpio_direction dir) +{ + switch (dir) { + case QCA_WLAN_GPIO_INPUT: + return WMI_HOST_GPIO_INPUT; + case QCA_WLAN_GPIO_OUTPUT: + return WMI_HOST_GPIO_OUTPUT; + default: + return WMI_HOST_GPIO_INPUT; + } +} + +/** + * convert_vendor_gpio_pull_type() - Function to convert vendor pull type + * @pull_type: pointer to enum qca_gpio_pull_type + * + * Convert the vendor pull type to wmi unified pull type + * + * Return: wmi unified gpio pull type + */ +static enum gpio_pull_type +convert_vendor_gpio_pull_type(enum qca_gpio_pull_type pull_type) +{ + switch (pull_type) { + case QCA_WLAN_GPIO_PULL_NONE: + return WMI_HOST_GPIO_PULL_NONE; + case QCA_WLAN_GPIO_PULL_UP: + return WMI_HOST_GPIO_PULL_UP; + case QCA_WLAN_GPIO_PULL_DOWN: + return WMI_HOST_GPIO_PULL_DOWN; + default: + return WMI_HOST_GPIO_PULL_NONE; + } +} + +/** + * convert_vendor_gpio_interrupt_mode() - Function to convert + * vendor interrupt mode + * @intr_mode: pointer to enum qca_gpio_interrupt_mode + * + * Convert the vendor interrupt mode to wmi unified interrupt mode + * + * Return: wmi unified gpio interrupt mode + */ +static enum gpio_interrupt_mode +convert_vendor_gpio_interrupt_mode(enum qca_gpio_interrupt_mode intr_mode) +{ + switch (intr_mode) { + case QCA_WLAN_GPIO_INTMODE_DISABLE: + return WMI_HOST_GPIO_INTMODE_DISABLE; + case QCA_WLAN_GPIO_INTMODE_RISING_EDGE: + return WMI_HOST_GPIO_INTMODE_RISING_EDGE; + case QCA_WLAN_GPIO_INTMODE_FALLING_EDGE: + return WMI_HOST_GPIO_INTMODE_FALLING_EDGE; + case QCA_WLAN_GPIO_INTMODE_BOTH_EDGE: + return WMI_HOST_GPIO_INTMODE_BOTH_EDGE; + case QCA_WLAN_GPIO_INTMODE_LEVEL_LOW: + return WMI_HOST_GPIO_INTMODE_LEVEL_LOW; + case QCA_WLAN_GPIO_INTMODE_LEVEL_HIGH: + return WMI_HOST_GPIO_INTMODE_LEVEL_HIGH; + default: + return WMI_HOST_GPIO_INTMODE_DISABLE; + } +} + +/** + * convert_vendor_gpio_output_value() - Function to convert vendor + * gpio output value + * @value: pointer to enum qca_gpio_value + * + * Convert the vendor gpio value to wmi unified gpio output value + * + * Return: wmi unified gpio output value + */ +static enum gpio_value +convert_vendor_gpio_output_value(enum qca_gpio_value value) +{ + switch (value) { + case QCA_WLAN_GPIO_LEVEL_LOW: + return WMI_HOST_GPIO_LEVEL_LOW; + case QCA_WLAN_GPIO_LEVEL_HIGH: + return WMI_HOST_GPIO_LEVEL_HIGH; + default: + return WMI_HOST_GPIO_LEVEL_LOW; + } +} + +/** + * convert_vendor_gpio_drive() - Function to convert vendor + * gpio drive + * @drive: value of enum gpio_drive + * + * Convert the vendor gpio drive to wmi unified gpio output drive + * + * Return: wmi unified gpio output drive config + */ +static enum gpio_drive +convert_vendor_gpio_drive(enum qca_gpio_drive drive) +{ + switch (drive) { + case QCA_WLAN_GPIO_DRIVE_2MA: + return WMI_HOST_GPIO_DRIVE_2MA; + case QCA_WLAN_GPIO_DRIVE_4MA: + return WMI_HOST_GPIO_DRIVE_4MA; + case QCA_WLAN_GPIO_DRIVE_6MA: + return WMI_HOST_GPIO_DRIVE_6MA; + case QCA_WLAN_GPIO_DRIVE_8MA: + return WMI_HOST_GPIO_DRIVE_8MA; + case QCA_WLAN_GPIO_DRIVE_10MA: + return WMI_HOST_GPIO_DRIVE_10MA; + case QCA_WLAN_GPIO_DRIVE_12MA: + return WMI_HOST_GPIO_DRIVE_12MA; + case QCA_WLAN_GPIO_DRIVE_14MA: + return WMI_HOST_GPIO_DRIVE_14MA; + case QCA_WLAN_GPIO_DRIVE_16MA: + return WMI_HOST_GPIO_DRIVE_16MA; + default: + return WMI_HOST_GPIO_DRIVE_2MA; + } +} + +/** + * convert_vendor_gpio_init_enable() - Function to convert vendor + * gpio init_enable + * @internal_config: Param to decide whether to use internal config + * + * Convert the vendor internal_config to wmi unified gpio output init_enable + * + * Return: wmi unified gpio output init_enable config + */ +static enum gpio_init_enable +convert_vendor_gpio_init_enable(uint32_t internal_config) +{ + if(internal_config) + return WMI_HOST_GPIO_INIT_DISABLE; + else + return WMI_HOST_GPIO_INIT_ENABLE; +} + +/** + * wlan_set_gpio_config() - set the gpio configuration info + * @psoc: the pointer of wlan_objmgr_psoc + * @attr: list of attributes + * + * Return: 0 on success; errno on failure + */ +static int +wlan_set_gpio_config(struct wlan_objmgr_psoc *psoc, + struct nlattr **attr) +{ + struct gpio_config_params cfg_param; + struct nlattr *gpio_attr; + enum qca_gpio_direction pin_dir; + enum qca_gpio_pull_type pull_type; + enum qca_gpio_interrupt_mode intr_mode; + enum qca_gpio_drive drive; + uint32_t internal_config; + QDF_STATUS status; + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM]; + if (!gpio_attr) { + osif_err_rl("attr gpio number failed"); + return -EINVAL; + } + cfg_param.pin_num = nla_get_u32(gpio_attr); + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DIR]; + if (!gpio_attr) { + osif_err_rl("attr gpio dir failed"); + return -EINVAL; + } + pin_dir = nla_get_u32(gpio_attr); + if (pin_dir >= QCA_WLAN_GPIO_DIR_MAX) { + osif_err_rl("attr gpio direction invalid"); + return -EINVAL; + } + cfg_param.pin_dir = convert_vendor_gpio_direction(pin_dir); + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PULL_TYPE]; + if (!gpio_attr) { + osif_err_rl("attr gpio pull failed"); + return -EINVAL; + } + pull_type = nla_get_u32(gpio_attr); + if (pull_type >= QCA_WLAN_GPIO_PULL_MAX) { + osif_err_rl("attr gpio pull type invalid"); + return -EINVAL; + } + cfg_param.pin_pull_type = convert_vendor_gpio_pull_type(pull_type); + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTR_MODE]; + if (!gpio_attr) { + osif_err_rl("attr gpio interrupt mode failed"); + return -EINVAL; + } + intr_mode = nla_get_u32(gpio_attr); + if (intr_mode >= QCA_WLAN_GPIO_INTMODE_MAX) { + osif_err_rl("attr gpio interrupt mode invalid"); + return -EINVAL; + } + cfg_param.pin_intr_mode = convert_vendor_gpio_interrupt_mode(intr_mode); + + /* Below are optional parameters. Initialize to zero */ + cfg_param.mux_config_val = WMI_HOST_GPIO_MUX_DEFAULT; + cfg_param.drive = WMI_HOST_GPIO_DRIVE_2MA; + cfg_param.init_enable = WMI_HOST_GPIO_INIT_DISABLE; + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MUX_CONFIG]; + if (gpio_attr) { + cfg_param.mux_config_val = nla_get_u32(gpio_attr); + } + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DRIVE]; + if (gpio_attr) { + drive = nla_get_u32(gpio_attr); + if (drive >= QCA_WLAN_GPIO_DRIVE_MAX) { + osif_err_rl("attr gpio drive invalid"); + return -EINVAL; + } + cfg_param.drive = convert_vendor_gpio_drive(drive); + } + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG]; + if (gpio_attr) { + internal_config = nla_get_u32(gpio_attr); + cfg_param.init_enable = + convert_vendor_gpio_init_enable(internal_config); + } + + status = ucfg_set_gpio_config(psoc, &cfg_param); + return status; +} + +/** + * wlan_set_gpio_output() - set the gpio output info + * @psoc: the pointer of wlan_objmgr_psoc + * @attr: list of attributes + * + * Return: 0 on success; errno on failure + */ +static int +wlan_set_gpio_output(struct wlan_objmgr_psoc *psoc, + struct nlattr **attr) +{ + struct gpio_output_params out_param; + struct nlattr *gpio_attr; + enum qca_gpio_value pin_set; + QDF_STATUS status; + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM]; + if (!gpio_attr) { + osif_err_rl("attr gpio number failed"); + return -EINVAL; + } + out_param.pin_num = nla_get_u32(gpio_attr); + + gpio_attr = attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_VALUE]; + if (!gpio_attr) { + osif_err_rl("attr gpio value failed"); + return -EINVAL; + } + pin_set = nla_get_u32(gpio_attr); + if (pin_set >= QCA_WLAN_GPIO_LEVEL_MAX) { + osif_err_rl("attr gpio level invalid"); + return -EINVAL; + } + out_param.pin_set = convert_vendor_gpio_output_value(pin_set); + + status = ucfg_set_gpio_output(psoc, &out_param); + return status; +} + +/** + * wlan_cfg80211_start_gpio_config - Set the gpio configuration + * @wiphy: pointer to wiphy + * @psoc: the pointer of wlan_objmgr_psoc + * @data: pointer to data + * @data_len: data length + * + * __wlan_cfg80211_set_gpio_config will forward the GPIO setting to FW by + * WMI_GPIO_CONFIG/OUTPUT_CMDID + * + * Return: 0 on success; errno on failure + */ +int +wlan_cfg80211_start_gpio_config(struct wiphy *wiphy, + struct wlan_objmgr_psoc *psoc, + const void *data, + int data_len) +{ + uint32_t command; + struct nlattr *attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MAX + 1]; + int ret; + + if (wlan_cfg80211_nla_parse(attr, QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MAX, + data, data_len, + wlan_cfg80211_gpio_config_policy)) { + return -EINVAL; + } + + if (attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND]) { + command = nla_get_u32( + attr[QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND]); + + if (command == QCA_WLAN_VENDOR_GPIO_CONFIG) { + ret = wlan_set_gpio_config(psoc, attr); + } else if (command == QCA_WLAN_VENDOR_GPIO_OUTPUT) { + ret = wlan_set_gpio_output(psoc, attr); + } else { + osif_err_rl("Invalid command"); + return -EINVAL; + } + } else { + osif_err_rl("Invalid command"); + return -EINVAL; + } + + return ret; +} +qdf_export_symbol(wlan_cfg80211_start_gpio_config); + diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/mlme/inc/wlan_cfg80211_vdev_mlme.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/mlme/inc/wlan_cfg80211_vdev_mlme.h new file mode 100644 index 0000000000000000000000000000000000000000..bf8e264c31ef1aba32388f8f4451c2e045ccbc21 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/mlme/inc/wlan_cfg80211_vdev_mlme.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_vdev_mlme.h + * + * This header file provide API declarations for osif layer + * to get and set vdev mgr mlme structure parameters + */ + +#ifndef __WLAN_CFG80211_VDEV_MLME_H__ +#define __WLAN_CFG80211_VDEV_MLME_H__ +#include +#include +#include "include/wlan_vdev_mlme.h" + +/** + * wlan_cfg80211_vdev_mlme_get_param_ssid() – cfg80211 MLME API to + * get ssid + * @vdev: pointer to vdev object + * @ssid: pointer to store the ssid + * @ssid_len: pointer to store the ssid length value + * + * Return: 0 on success, negative value on failure + */ +int +wlan_cfg80211_vdev_mlme_get_param_ssid(struct wlan_objmgr_vdev *vdev, + uint8_t *ssid, + uint8_t *ssid_len); + +/** + * wlan_cfg80211_vdev_mlme_get_trans_bssid() – cfg80211 MLME API to + * get trans bssid + * @vdev: pointer to vdev object + * @addr: pointer to store the addr of the transmission bssid + * + * Return: 0 on success, negative value on failure + */ +int +wlan_cfg80211_vdev_mlme_get_trans_bssid(struct wlan_objmgr_vdev *vdev, + uint8_t *addr); + +/** + * wlan_cfg80211_vdev_mlme_set_param() – cfg80211 MLME API to fill common + * parameters of vdev_mlme object + * @vdev: pointer to vdev object + * @param_id: param id for which the value should be set + * @mlme_cfg: structure of a union to set the parameter + * + * Return: void + */ +void +wlan_cfg80211_vdev_mlme_set_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + struct wlan_vdev_mgr_cfg mlme_cfg); + +/** + * wlan_cfg80211_vdev_mlme_get_param() – cfg80211 MLME API to get common + * parameters of vdev_mlme object + * @vdev: pointer to vdev object + * @param_id: param id for which the value should be set + * @value: pointer to store the value of the req vdev_mlme object + * + * Return: void + */ +void +wlan_cfg80211_vdev_mlme_get_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t *value); +#endif /* __WLAN_CFG80211_VDEV_MLME_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/mlme/src/wlan_cfg80211_vdev_mlme.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/mlme/src/wlan_cfg80211_vdev_mlme.c new file mode 100644 index 0000000000000000000000000000000000000000..16a6a0cd8568d10eb5e988b612dc33addcaddc57 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/mlme/src/wlan_cfg80211_vdev_mlme.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_vdev_mlme.c + * + * This file provide API definitions for osif layer + * to get and set vdev mgr mlme structure parameters + */ + +#include + +int +wlan_cfg80211_vdev_mlme_get_param_ssid(struct wlan_objmgr_vdev *vdev, + uint8_t *ssid, + uint8_t *ssid_len) +{ + if (!vdev) { + osif_err("VDEV is NULL!!!!"); + return qdf_status_to_os_return(QDF_STATUS_E_FAILURE); + } + + ucfg_wlan_vdev_mgr_get_param_ssid(vdev, ssid, ssid_len); + return qdf_status_to_os_return(QDF_STATUS_SUCCESS); +} + +int +wlan_cfg80211_vdev_mlme_get_trans_bssid(struct wlan_objmgr_vdev *vdev, + uint8_t *addr) +{ + if (!vdev) { + osif_err("VDEV is NULL!!!!"); + return qdf_status_to_os_return(QDF_STATUS_E_FAILURE); + } + + ucfg_wlan_vdev_mlme_get_trans_bssid(vdev, addr); + return qdf_status_to_os_return(QDF_STATUS_SUCCESS); +} + +void +wlan_cfg80211_vdev_mlme_set_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + struct wlan_vdev_mgr_cfg mlme_cfg) +{ + if (!vdev) { + osif_err("VDEV is NULL!!!!"); + return; + } + + ucfg_wlan_vdev_mgr_set_param(vdev, param_id, mlme_cfg); +} + +void +wlan_cfg80211_vdev_mlme_get_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t *value) +{ + if (!vdev) { + osif_err("VDEV is NULL!!!!"); + return; + } + + ucfg_wlan_vdev_mgr_get_param(vdev, param_id, value); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/qca_vendor.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/qca_vendor.h new file mode 100644 index 0000000000000000000000000000000000000000..eed687a55264f7780cf041889095e002f4a315d8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/qca_vendor.h @@ -0,0 +1,9485 @@ +/* + * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares vendor commands interfacing with linux kernel + */ + + +#ifndef _WLAN_QCA_VENDOR_H_ +#define _WLAN_QCA_VENDOR_H_ + +/* Vendor id to be used in vendor specific command and events + * to user space. + * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, + * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and + * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in + * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that + */ + +#define QCA_NL80211_VENDOR_ID 0x001374 + +#ifndef BIT +#define BIT(x) (1U << (x)) +#endif + +/** + * enum qca_nl80211_vendor_subcmds: NL 80211 vendor sub command + * + * @QCA_NL80211_VENDOR_SUBCMD_UNSPEC: Unspecified + * @QCA_NL80211_VENDOR_SUBCMD_TEST: Test + * Sub commands 2 to 8 are not used + * @QCA_NL80211_VENDOR_SUBCMD_ROAMING: Roaming + * @QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY: Avoid frequency. + * @QCA_NL80211_VENDOR_SUBCMD_DFS_CAPABILITY: DFS capability + * @QCA_NL80211_VENDOR_SUBCMD_NAN: NAN command/event which is used to pass + * NAN Request/Response and NAN Indication messages. These messages are + * interpreted between the framework and the firmware component. While + * sending the command from userspace to the driver, payload is not + * encapsulated inside any attribute. Attribute QCA_WLAN_VENDOR_ATTR_NAN + * is used when receiving vendor events in userspace from the driver. + * @QCA_NL80211_VENDOR_SUBCMD_STATS_EXT: Ext stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET: Link layer stats set + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET: Link layer stats get + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR: Link layer stats clear + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_RADIO_RESULTS: Link layer stats radio + * results + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_IFACE_RESULTS: Link layer stats interface + * results + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_PEERS_RESULTS: Link layer stats peer + * results + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START: Ext scan start + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP: Ext scan stop + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_VALID_CHANNELS: Ext scan get valid + * channels + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES: Ext scan get capability + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS: Ext scan get cached + * results + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE: Ext scan results + * available. Used when report_threshold is reached in scan cache. + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT: Ext scan full scan + * result. Used to report scan results when each probe rsp. is received, + * if report_events enabled in wifi_scan_cmd_params. + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT: Ext scan event from target. + * Indicates progress of scanning state-machine. + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND: Ext scan hotlist + * ap found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST: Ext scan set hotlist + * bssid + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST: Ext scan reset + * hotlist bssid + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE: Ext scan significant + * change + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE: Ext scan set + * significant change + * ap found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE: Ext scan reset + * significant change + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_ENABLE: Ext tdls enable + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_DISABLE: Ext tdls disable + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_GET_STATUS: Ext tdls get status + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE: Ext tdls state + * @QCA_NL80211_VENDOR_SUBCMD_GET_SUPPORTED_FEATURES: Get supported features + * @QCA_NL80211_VENDOR_SUBCMD_SCANNING_MAC_OUI: Set scanning_mac_oui + * @QCA_NL80211_VENDOR_SUBCMD_NO_DFS_FLAG: No DFS flag + * @QCA_NL80211_VENDOR_SUBCMD_GET_CONCURRENCY_MATRIX: Get Concurrency Matrix + * @QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_SET_KEY: Get the key mgmt offload keys + * @QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH: After roaming, send the + * roaming and auth information. + * @QCA_NL80211_VENDOR_SUBCMD_OCB_SET_SCHED: Set OCB schedule + * + * @QCA_NL80211_VENDOR_SUBCMD_DO_ACS: ACS command/event which is used to + * invoke the ACS function in device and pass selected channels to + * hostapd. Uses enum qca_wlan_vendor_attr_acs_offload attributes. + * + * @QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES: Get the supported features by the + * driver. + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_STARTED: Indicate that driver + * started CAC on DFS channel + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_FINISHED: Indicate that driver + * completed the CAC check on DFS channel + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_ABORTED: Indicate that the CAC + * check was aborted by the driver + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_NOP_FINISHED: Indicate that the + * driver completed NOP + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_RADAR_DETECTED: Indicate that the + * driver detected radar signal on the current operating channel + * @QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_INFO: get wlan driver information + * @QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_START: start wifi logger + * @QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_MEMORY_DUMP: memory dump request + * @QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET: get logger feature set + * @QCA_NL80211_VENDOR_SUBCMD_ROAM: roam + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST: extscan set ssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST: + * extscan reset ssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND: hotlist ssid found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST: hotlist ssid lost + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_LIST: set pno list + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_PASSPOINT_LIST: set passpoint list + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_RESET_PASSPOINT_LIST: + * reset passpoint list + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND: pno network found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND: + * passpoint network found + * @QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION: set wifi config + * @QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION: get wifi config + * @QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET: get logging features + * @QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES: get link properties + * @QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG: set gateway parameters + * @QCA_NL80211_VENDOR_SUBCMD_GET_PREFERRED_FREQ_LIST: get preferred channel + list + * @QCA_NL80211_VENDOR_SUBCMD_SET_PROBABLE_OPER_CHANNEL: channel hint + * @QCA_NL80211_VENDOR_SUBCMD_SETBAND: Command to configure the band + * to the host driver. This command sets the band through either + * the attribute QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE or + * QCA_WLAN_VENDOR_ATTR_SETBAND_MASK. QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE + * refers enum qca_set_band as unsigned integer values and + * QCA_WLAN_VENDOR_ATTR_SETBAND_MASK refers it as 32 bit unsigned BitMask + * values. Also, the acceptable values for + * QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE are only till QCA_SETBAND_2G. Further + * values/bitmask's are valid for QCA_WLAN_VENDOR_ATTR_SETBAND_MASK. The + * attribute QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE is deprecated and the + * recommendation is to use the QCA_WLAN_VENDOR_ATTR_SETBAND_MASK. If the + * implementations configure using both the attributes, the configurations + * through QCA_WLAN_VENDOR_ATTR_SETBAND_MASK shall always take the + * precedence. + * @QCA_NL80211_VENDOR_SUBCMD_TRIGGER_SCAN: venodr scan command + * @QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE: vendor scan complete + * @QCA_NL80211_VENDOR_SUBCMD_ABORT_SCAN: vendor abort scan + * @QCA_NL80211_VENDOR_SUBCMD_OTA_TEST: enable OTA test + * @QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE: set tx power by percentage + * @QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE_DECR_DB: reduce tx power by DB + * @QCA_NL80211_VENDOR_SUBCMD_SET_SAP_CONFIG: SAP configuration + * @QCA_NL80211_VENDOR_SUBCMD_TSF: TSF operations command + * @QCA_NL80211_VENDOR_SUBCMD_WISA: WISA mode configuration + * @QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_START: Command used to + * start the P2P Listen Offload function in device and pass the listen + * channel, period, interval, count, number of device types, device + * types and vendor information elements to device driver and firmware. + * @QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_STOP: Command/event used to + * indicate stop request/response of the P2P Listen Offload function in + * device. As an event, it indicates either the feature stopped after it + * was already running or feature has actually failed to start. + * @QCA_NL80211_VENDOR_SUBCMD_GET_STATION: send BSS Information + * @QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH: After SAP starts + * beaconing, this sub command provides the driver, the frequencies on the + * 5 GHz to check for any radar activity. Driver selects one channel from + * this priority list provided through + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_FREQ_LIST and starts + * to check for radar activity on it. If no radar activity is detected + * during the channel availability check period, driver internally switches + * to the selected frequency of operation. If the frequency is zero, driver + * internally selects a channel. The status of this conditional switch is + * indicated through an event using the same sub command through + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_STATUS. Attributes are + * listed in qca_wlan_vendor_attr_sap_conditional_chan_switch + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT: Command/event used to config + * indication period and threshold for MAC layer counters. + * @QCA_NL80211_VENDOR_SUBCMD_CONFIGURE_TDLS: Configure the TDLS behavior + * in the host driver. The different TDLS configurations are defined + * by the attributes in enum qca_wlan_vendor_attr_tdls_configuration. + * @QCA_NL80211_VENDOR_SUBCMD_GET_HE_CAPABILITIES: Get HE related capabilities + * @QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS:Set the Specific Absorption Rate + * (SAR) power limits. A critical regulation for FCC compliance, OEMs + * require methods to set SAR limits on TX power of WLAN/WWAN. + * enum qca_vendor_attr_sar_limits attributes are used with this command. + * @QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS: Vendor command used to get/set + * configuration of vendor ACS. + * @QCA_NL80211_VENDOR_SUBCMD_CHIP_PWRSAVE_FAILURE: Vendor event carrying the + * requisite information leading to a power save failure. The information + * carried as part of this event is represented by the + * enum qca_attr_chip_power_save_failure attributes. + * @QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET: Start/Stop the NUD statistics + * collection. Uses attributes defined in enum qca_attr_nud_stats_set. + * @QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET: Get the NUD statistics. These + * statistics are represented by the enum qca_attr_nud_stats_get + * attributes. + * @QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS: Sub-command to fetch + * the BSS transition status, whether accept or reject, for a list of + * candidate BSSIDs provided by the userspace. This uses the vendor + * attributes QCA_WLAN_VENDOR_ATTR_BTM_MBO_TRANSITION_REASON and + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO. The userspace shall specify + * the attributes QCA_WLAN_VENDOR_ATTR_BTM_MBO_TRANSITION_REASON and an + * array of QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID nested in + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO in the request. In the response + * the driver shall specify array of + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID and + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_STATUS pairs nested in + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO. + * @QCA_NL80211_VENDOR_SUBCMD_SET_TRACE_LEVEL: Set the trace level for a + * specific QCA module. The trace levels are represented by + * enum qca_attr_trace_level attributes. + * @QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT: Set the Beam Refinement + * Protocol antenna limit in different modes. See enum + * qca_wlan_vendor_attr_brp_ant_limit_mode. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START: Start spectral scan. The scan + * parameters are specified by enum qca_wlan_vendor_attr_spectral_scan. + * This returns a cookie (%QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE) + * identifying the operation in success case. In failure cases an + * error code (%QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE) + * describing the reason for the failure is returned. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_STOP: Stop spectral scan. This uses + * a cookie (%QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE) from + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START to identify the scan to + * be stopped. + * @QCA_NL80211_VENDOR_SUBCMD_ACTIVE_TOS: Set the active Type Of Service on the + * specific interface. This can be used to modify some of the low level + * scan parameters (off channel dwell time, home channel time) in the + * driver/firmware. These parameters are maintained within the host + * driver. + * This command is valid only when the interface is in the connected + * state. + * These scan parameters shall be reset by the driver/firmware once + * disconnected. The attributes used with this command are defined in + * enum qca_wlan_vendor_attr_active_tos. + * @QCA_NL80211_VENDOR_SUBCMD_HANG: Event indicating to the user space that the + * driver has detected an internal failure. This event carries the + * information indicating the reason that triggered this detection. The + * attributes for this command are defined in + * enum qca_wlan_vendor_attr_hang. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CONFIG: Get the current values + * of spectral parameters used. The spectral scan parameters are specified + * by enum qca_wlan_vendor_attr_spectral_scan. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_DIAG_STATS: Get the debug stats + * for spectral scan functionality. The debug stats are specified by + * enum qca_wlan_vendor_attr_spectral_diag_stats. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO: Get spectral + * scan system capabilities. The capabilities are specified + * by enum qca_wlan_vendor_attr_spectral_cap. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS: Get the current + * status of spectral scan. The status values are specified + * by enum qca_wlan_vendor_attr_spectral_scan_status. + * @QCA_NL80211_VENDOR_SUBCMD_HTT_STATS: Request the firmware + * DP stats for a particualr stats type for response evnet + * it carries the stats data sent from the FW + * @QCA_NL80211_VENDOR_SUBCMD_GET_RROP_INFO: Get vendor specific Representative + * RF Operating Parameter (RROP) information. The attributes for this + * information are defined in enum qca_wlan_vendor_attr_rrop_info. This is + * intended for use by external Auto Channel Selection applications. + * @QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS: Get the Specific Absorption Rate + * (SAR) power limits. This is a companion to the command + * @QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS and is used to retrieve the + * settings currently in use. The attributes returned by this command are + * defined by enum qca_vendor_attr_sar_limits. + * @QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO: Provides the current behaviour of + * the WLAN hardware MAC's associated with each WLAN netdev interface. + * This works both as a query (user space asks the current mode) or event + * interface (driver advertizing the current mode to the user space). + * Driver does not trigger this event for temporary hardware mode changes. + * Mode changes w.r.t Wi-Fi connection updation ( VIZ creation / deletion, + * channel change etc ) are updated with this event. Attributes for this + * interface are defined in enum qca_wlan_vendor_attr_mac. + * @QCA_NL80211_VENDOR_SUBCMD_SET_QDEPTH_THRESH: Set MSDU queue depth threshold + * per peer per TID. Attributes for this command are define in + * enum qca_wlan_set_qdepth_thresh_attr + * @QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION: Sub command to set WiFi + * test configuration. Attributes for this command are defined in + * enum qca_wlan_vendor_attr_wifi_test_config. + * @QCA_NL80211_VENDOR_SUBCMD_NAN_EXT: An extendable version of NAN vendor + * command. The earlier command for NAN, QCA_NL80211_VENDOR_SUBCMD_NAN, + * carried a payload which was a binary blob of data. The command was not + * extendable to send more information. The newer version carries the + * legacy blob encapsulated within an attribute and can be extended with + * additional vendor attributes that can enhance the NAN command + * interface. + * @QCA_NL80211_VENDOR_SUBCMD_PEER_CFR_CAPTURE_CFG: This command is used to + * configure parameters per peer to capture Channel Frequency Response + * (CFR) and enable Periodic CFR capture. The attributes for this command + * are defined in enum qca_wlan_vendor_peer_cfr_capture_attr. + * @QCA_NL80211_VENDOR_SUBCMD_GET_FW_STATE: Sub command to get firmware state. + * The returned firmware state is specified in the attribute + * QCA_WLAN_VENDOR_ATTR_FW_STATE. + * @QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH: This vendor subcommand + * is used by host driver to flush per-peer cached statistics to user space + * application. This interface is used as an event from host driver to + * user space application. Attributes for this event are specified in + * enum qca_wlan_vendor_attr_peer_stats_cache_params. + * QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_DATA attribute is expected to be + * sent as event from host driver. + * @QCA_NL80211_VENDOR_SUBCMD_MPTA_HELPER_CONFIG: This sub command is used to + * improve the success rate of Zigbee joining network. + * Due to PTA master limitation, zigbee joining network success rate is + * low while wlan is working. Wlan host driver need to configure some + * parameters including Zigbee state and specific WLAN periods to enhance + * PTA master. All this parameters are delivered by the NetLink attributes + * defined in "enum qca_mpta_helper_vendor_attr". + * @QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING: This sub command is used to + * implement Beacon frame reporting feature. + * + * Userspace can request the driver/firmware to periodically report + * received Beacon frames whose BSSID is same as the current connected + * BSS's MAC address. + * + * In case the STA seamlessly (without sending disconnect indication to + * userspace) roams to a different BSS, Beacon frame reporting will be + * automatically enabled for the Beacon frames whose BSSID is same as the + * MAC address of the new BSS. Beacon reporting will be stopped when the + * STA is disconnected (when the disconnect indication is sent to + * userspace) and need to be explicitly enabled by userspace for next + * connection. + * + * When a Beacon frame matching configured conditions is received, and if + * userspace has requested to send asynchronous beacon reports, the + * driver/firmware will encapsulate the details of the Beacon frame in an + * event and send it to userspace along with updating the BSS information + * in cfg80211 scan cache, otherwise driver will only update the cfg80211 + * scan cache with the information from the received Beacon frame but + * will not send any active report to userspace. + * + * The userspace can request the driver/firmware to stop reporting Beacon + * frames. If the driver/firmware is not able to receive Beacon frames + * due to other Wi-Fi operations such as off-channel activities, etc., + * the driver/firmware will send a pause event to userspace and stop + * reporting Beacon frames. Whether the beacon reporting will be + * automatically resumed or not by the driver/firmware later will be + * reported to userspace using the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_AUTO_RESUMES flag. The beacon + * reporting shall be resumed for all the cases except either when + * userspace sets QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_DO_NOT_RESUME flag + * in the command which triggered the current beacon reporting or during + * any disconnection case as indicated by setting + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_PAUSE_REASON to + * QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_DISCONNECTED by the + * driver. + * + * After QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_PAUSE event is received + * by userspace with QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_AUTO_RESUMES + * flag not set, the next first + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO event from the driver + * shall be considered as un-pause event. + * + * All the attributes used with this command are defined in + * enum qca_wlan_vendor_attr_beacon_reporting_params. + * @QCA_NL80211_VENDOR_SUBCMD_INTEROP_ISSUES_AP: In practice, some aps have + * interop issues with the DUT. This sub command is used to transfer the + * ap info between driver and user space. This works both as a command + * or event. As a command, it configs the stored list of aps from user + * space to firmware; as an event, it indicates the ap info detected by + * firmware to user space for persistent storage. The attributes defined + * in enum qca_vendor_attr_interop_issues_ap are used to deliver the + * parameters. + * @QCA_NL80211_VENDOR_SUBCMD_OEM_DATA: This command is used to send OEM data + * binary blobs from application/service to firmware. The attributes + * defined in enum qca_wlan_vendor_attr_oem_data_params are used to + * deliver the parameters. + * @QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_EXT: This command/event is used + * to send/receive avoid frequency data using + * enum qca_wlan_vendor_attr_avoid_frequency_ext. + * This new command is alternative to existing command + * QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY since existing command/event + * is using stream of bytes instead of structured data using vendor + * attributes. + * @QCA_NL80211_VENDOR_SUBCMD_ADD_STA_NODE: This vendor subcommand is used to + * add the STA node details in driver/firmware. Attributes for this event + * are specified in enum qca_wlan_vendor_attr_add_sta_node_params. + * @QCA_NL80211_VENDOR_SUBCMD_BTC_CHAIN_MODE: This command is used to set BT + * coex chain mode from application/service. + * The attributes defined in enum qca_vendor_attr_btc_chain_mode are used + * to deliver the parameters. + * @QCA_NL80211_VENDOR_SUBCMD_GET_STA_INFO: This vendor subcommand is used to + * get information of a station from driver to userspace. This command can + * be used in both STA and AP modes. For STA mode, it provides information + * of the current association when in connected state or the last + * association when in disconnected state. For AP mode, only information + * of the currently connected stations is available. This command uses + * attributes defined in enum qca_wlan_vendor_attr_get_sta_info. + * @QCA_NL80211_VENDOR_SUBCMD_REQUEST_SAR_LIMITS_EVENT: This acts as an event. + * Host drivers can request the user space entity to set the SAR power + * limits with this event. Accordingly, the user space entity is expected + * to set the SAR power limits. Host drivers can retry this event to the + * user space for the SAR power limits configuration from user space. If + * the driver does not get the SAR power limits from user space for all + * the retried attempts, it can configure a default SAR power limit. + * @QCA_NL80211_VENDOR_SUBCMD_UPDATE_STA_INFO: This acts as a vendor event and + * is used to update the information about the station from the driver to + * userspace. Uses attributes from enum + * qca_wlan_vendor_attr_update_sta_info. + * + * @QCA_NL80211_VENDOR_SUBCMD_DRIVER_DISCONNECT_REASON: This acts as an event. + * The host driver initiates the disconnection for scenarios such as beacon + * miss, NUD failure, peer kick out, etc. The disconnection indication + * through cfg80211_disconnected() expects the reason codes from enum + * ieee80211_reasoncode which does not signify these various reasons why + * the driver has triggered the disconnection. This event will be used to + * send the driver specific reason codes by the host driver to userspace. + * Host drivers should trigger this event and pass the respective reason + * code immediately prior to triggering cfg80211_disconnected(). The + * attributes used with this event are defined in enum + * qca_wlan_vendor_attr_driver_disconnect_reason. + * + * @QCA_NL80211_VENDOR_SUBCMD_CONFIG_TWT: Vendor subcommand to configure TWT. + * Uses attributes defined in enum qca_wlan_vendor_attr_config_twt. + * + * @QCA_NL80211_VENDOR_SUBCMD_GETBAND: Command to get the configured band from + * the host driver. The band configurations obtained are referred through + * QCA_WLAN_VENDOR_ATTR_SETBAND_MASK. + * + */ + +enum qca_nl80211_vendor_subcmds { + QCA_NL80211_VENDOR_SUBCMD_UNSPEC = 0, + QCA_NL80211_VENDOR_SUBCMD_TEST = 1, + QCA_NL80211_VENDOR_SUBCMD_ROAMING = 9, + QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY = 10, + QCA_NL80211_VENDOR_SUBCMD_DFS_CAPABILITY = 11, + QCA_NL80211_VENDOR_SUBCMD_NAN = 12, + QCA_NL80211_VENDOR_SUBCMD_STATS_EXT = 13, + + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET = 14, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET = 15, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR = 16, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_RADIO_RESULTS = 17, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_IFACE_RESULTS = 18, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_PEERS_RESULTS = 19, + + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START = 20, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP = 21, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_VALID_CHANNELS = 22, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES = 23, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS = 24, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE = 25, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT = 26, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT = 27, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND = 28, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST = 29, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST = 30, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE = 31, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE = 32, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE = 33, + + QCA_NL80211_VENDOR_SUBCMD_TDLS_ENABLE = 34, + QCA_NL80211_VENDOR_SUBCMD_TDLS_DISABLE = 35, + QCA_NL80211_VENDOR_SUBCMD_TDLS_GET_STATUS = 36, + QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE = 37, + + QCA_NL80211_VENDOR_SUBCMD_GET_SUPPORTED_FEATURES = 38, + + QCA_NL80211_VENDOR_SUBCMD_SCANNING_MAC_OUI = 39, + QCA_NL80211_VENDOR_SUBCMD_NO_DFS_FLAG = 40, + + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_LOST = 41, + + /* Get Concurrency Matrix */ + QCA_NL80211_VENDOR_SUBCMD_GET_CONCURRENCY_MATRIX = 42, + + QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_SET_KEY = 50, + QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH = 51, + QCA_NL80211_VENDOR_SUBCMD_APFIND = 52, + + /* Deprecated */ + QCA_NL80211_VENDOR_SUBCMD_OCB_SET_SCHED = 53, + + QCA_NL80211_VENDOR_SUBCMD_DO_ACS = 54, + + QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES = 55, + + /* Off loaded DFS events */ + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_STARTED = 56, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_FINISHED = 57, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_ABORTED = 58, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_NOP_FINISHED = 59, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_RADAR_DETECTED = 60, + + QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_INFO = 61, + QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_START = 62, + QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_MEMORY_DUMP = 63, + QCA_NL80211_VENDOR_SUBCMD_ROAM = 64, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST = 65, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST = 66, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND = 67, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST = 68, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_LIST = 69, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_PASSPOINT_LIST = 70, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_RESET_PASSPOINT_LIST = 71, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND = 72, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND = 73, + + /* Wi-Fi Configuration subcommands */ + QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION = 74, + QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION = 75, + QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET = 76, + QCA_NL80211_VENDOR_SUBCMD_GET_RING_DATA = 77, + + QCA_NL80211_VENDOR_SUBCMD_TDLS_GET_CAPABILITIES = 78, + QCA_NL80211_VENDOR_SUBCMD_OFFLOADED_PACKETS = 79, + QCA_NL80211_VENDOR_SUBCMD_MONITOR_RSSI = 80, + QCA_NL80211_VENDOR_SUBCMD_NDP = 81, + + /* NS Offload enable/disable cmd */ + QCA_NL80211_VENDOR_SUBCMD_ND_OFFLOAD = 82, + + QCA_NL80211_VENDOR_SUBCMD_PACKET_FILTER = 83, + QCA_NL80211_VENDOR_SUBCMD_GET_BUS_SIZE = 84, + + QCA_NL80211_VENDOR_SUBCMD_GET_WAKE_REASON_STATS = 85, + + QCA_NL80211_VENDOR_SUBCMD_DATA_OFFLOAD = 91, + /* OCB commands */ + QCA_NL80211_VENDOR_SUBCMD_OCB_SET_CONFIG = 92, + QCA_NL80211_VENDOR_SUBCMD_OCB_SET_UTC_TIME = 93, + QCA_NL80211_VENDOR_SUBCMD_OCB_START_TIMING_ADVERT = 94, + QCA_NL80211_VENDOR_SUBCMD_OCB_STOP_TIMING_ADVERT = 95, + QCA_NL80211_VENDOR_SUBCMD_OCB_GET_TSF_TIMER = 96, + QCA_NL80211_VENDOR_SUBCMD_DCC_GET_STATS = 97, + QCA_NL80211_VENDOR_SUBCMD_DCC_CLEAR_STATS = 98, + QCA_NL80211_VENDOR_SUBCMD_DCC_UPDATE_NDL = 99, + QCA_NL80211_VENDOR_SUBCMD_DCC_STATS_EVENT = 100, + + /* subcommand to get link properties */ + QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES = 101, + /* LFR Subnet Detection */ + QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG = 102, + + /* DBS subcommands */ + QCA_NL80211_VENDOR_SUBCMD_GET_PREFERRED_FREQ_LIST = 103, + QCA_NL80211_VENDOR_SUBCMD_SET_PROBABLE_OPER_CHANNEL = 104, + + /* Vendor setband command */ + QCA_NL80211_VENDOR_SUBCMD_SETBAND = 105, + + /* Vendor scan commands */ + QCA_NL80211_VENDOR_SUBCMD_TRIGGER_SCAN = 106, + QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE = 107, + + /* OTA test subcommand */ + QCA_NL80211_VENDOR_SUBCMD_OTA_TEST = 108, + /* Tx power scaling subcommands */ + QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE = 109, + /* Tx power scaling in db subcommands */ + QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE_DECR_DB = 115, + QCA_NL80211_VENDOR_SUBCMD_ACS_POLICY = 116, + QCA_NL80211_VENDOR_SUBCMD_STA_CONNECT_ROAM_POLICY = 117, + QCA_NL80211_VENDOR_SUBCMD_SET_SAP_CONFIG = 118, + QCA_NL80211_VENDOR_SUBCMD_TSF = 119, + QCA_NL80211_VENDOR_SUBCMD_WISA = 120, + QCA_NL80211_VENDOR_SUBCMD_GET_STATION = 121, + QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_START = 122, + QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_STOP = 123, + QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH = 124, + QCA_NL80211_VENDOR_SUBCMD_GPIO_CONFIG_COMMAND = 125, + + QCA_NL80211_VENDOR_SUBCMD_GET_HW_CAPABILITY = 126, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT = 127, + /* FTM/indoor location subcommands */ + QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128, + QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129, + QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130, + QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT = 131, + QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE = 132, + QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER = 133, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134, + QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136, + + /* Encrypt/Decrypt command */ + QCA_NL80211_VENDOR_SUBCMD_ENCRYPTION_TEST = 137, + + QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI = 138, + /* DMG low level RF sector operations */ + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142, + + /* Configure the TDLS mode from user space */ + QCA_NL80211_VENDOR_SUBCMD_CONFIGURE_TDLS = 143, + + QCA_NL80211_VENDOR_SUBCMD_GET_HE_CAPABILITIES = 144, + + /* Vendor abort scan command */ + QCA_NL80211_VENDOR_SUBCMD_ABORT_SCAN = 145, + + /* Set Specific Absorption Rate(SAR) Power Limits */ + QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS = 146, + + /* External Auto channel configuration setting */ + QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS = 147, + + QCA_NL80211_VENDOR_SUBCMD_CHIP_PWRSAVE_FAILURE = 148, + QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET = 149, + QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET = 150, + QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS = 151, + + /* Set the trace level for QDF */ + QCA_NL80211_VENDOR_SUBCMD_SET_TRACE_LEVEL = 152, + + QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT = 153, + + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START = 154, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_STOP = 155, + QCA_NL80211_VENDOR_SUBCMD_ACTIVE_TOS = 156, + QCA_NL80211_VENDOR_SUBCMD_HANG = 157, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CONFIG = 158, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_DIAG_STATS = 159, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO = 160, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS = 161, + QCA_NL80211_VENDOR_SUBCMD_HTT_STATS = 162, + QCA_NL80211_VENDOR_SUBCMD_GET_RROP_INFO = 163, + QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS = 164, + QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO = 165, + QCA_NL80211_VENDOR_SUBCMD_SET_QDEPTH_THRESH = 166, + QCA_NL80211_VENDOR_SUBCMD_THERMAL_CMD = 167, + /* Wi-Fi test configuration subcommand */ + QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION = 169, + QCA_NL80211_VENDOR_SUBCMD_NAN_EXT = 171, + QCA_NL80211_VENDOR_SUBCMD_PEER_CFR_CAPTURE_CFG = 173, + QCA_NL80211_VENDOR_SUBCMD_THROUGHPUT_CHANGE_EVENT = 174, + QCA_NL80211_VENDOR_SUBCMD_COEX_CONFIG = 175, + QCA_NL80211_VENDOR_SUBCMD_GET_FW_STATE = 177, + QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH = 178, + QCA_NL80211_VENDOR_SUBCMD_MPTA_HELPER_CONFIG = 179, + QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING = 180, + QCA_NL80211_VENDOR_SUBCMD_INTEROP_ISSUES_AP = 181, + QCA_NL80211_VENDOR_SUBCMD_OEM_DATA = 182, + QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_EXT = 183, + QCA_NL80211_VENDOR_SUBCMD_ADD_STA_NODE = 184, + QCA_NL80211_VENDOR_SUBCMD_BTC_CHAIN_MODE = 185, + QCA_NL80211_VENDOR_SUBCMD_GET_STA_INFO = 186, + QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS_EVENT = 187, + QCA_NL80211_VENDOR_SUBCMD_UPDATE_STA_INFO = 188, + QCA_NL80211_VENDOR_SUBCMD_DRIVER_DISCONNECT_REASON = 189, + QCA_NL80211_VENDOR_SUBCMD_CONFIG_TWT = 191, + QCA_NL80211_VENDOR_SUBCMD_GETBAND = 192, +}; + +enum qca_wlan_vendor_tos { + QCA_WLAN_VENDOR_TOS_BK = 0, + QCA_WLAN_VENDOR_TOS_BE = 1, + QCA_WLAN_VENDOR_TOS_VI = 2, + QCA_WLAN_VENDOR_TOS_VO = 3, +}; + +/** + * enum qca_wlan_vendor_attr_active_tos - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_ACTIVE_TOS. + */ +enum qca_wlan_vendor_attr_active_tos { + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS_INVALID = 0, + /* Type Of Service - Represented by qca_wlan_vendor_tos */ + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS = 1, + /* Flag attribute representing the start (attribute included) or stop + * (attribute not included) of the respective TOS. + */ + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS_START = 2, + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS_MAX = 3, +}; + +enum qca_wlan_vendor_hang_reason { + /* Unspecified reason */ + QCA_WLAN_HANG_REASON_UNSPECIFIED = 0, + /* No Map for the MAC entry for the received frame */ + QCA_WLAN_HANG_RX_HASH_NO_ENTRY_FOUND = 1, + /* peer deletion timeout happened */ + QCA_WLAN_HANG_PEER_DELETION_TIMEDOUT = 2, + /* peer unmap timeout */ + QCA_WLAN_HANG_PEER_UNMAP_TIMEDOUT = 3, + /* Scan request timed out */ + QCA_WLAN_HANG_SCAN_REQ_EXPIRED = 4, + /* Consecutive Scan attempt failures */ + QCA_WLAN_HANG_SCAN_ATTEMPT_FAILURES = 5, + /* Unable to get the message buffer */ + QCA_WLAN_HANG_GET_MSG_BUFF_FAILURE = 6, + /* Current command processing is timedout */ + QCA_WLAN_HANG_ACTIVE_LIST_TIMEOUT = 7, + /* Timeout for an ACK from FW for suspend request */ + QCA_WLAN_HANG_SUSPEND_TIMEOUT = 8, + /* Timeout for an ACK from FW for resume request */ + QCA_WLAN_HANG_RESUME_TIMEOUT = 9, + /* Transmission timeout for consecutive data frames */ + QCA_WLAN_HANG_TRANSMISSIONS_TIMEOUT = 10, + /* Timeout for the TX completion status of data frame */ + QCA_WLAN_HANG_TX_COMPLETE_TIMEOUT = 11, + /* DXE failure for tx/Rx, DXE resource unavailability */ + QCA_WLAN_HANG_DXE_FAILURE = 12, + /* WMI pending commands exceed the maximum count */ + QCA_WLAN_HANG_WMI_EXCEED_MAX_PENDING_CMDS = 13, + /* Timeout for peer STA connection accept command's response from the + * FW in AP mode. This command is triggered when a STA (peer) connects + * to AP (DUT). + */ + QCA_WLAN_HANG_AP_STA_CONNECT_REQ_TIMEOUT = 14, + /* Timeout for the AP connection accept command's response from the FW + * in STA mode. This command is triggered when the STA (DUT) connects + * to an AP (peer). + */ + QCA_WLAN_HANG_STA_AP_CONNECT_REQ_TIMEOUT = 15, + /* Timeout waiting for the response to the MAC HW mode change command + * sent to FW as a part of MAC mode switch among DBS (Dual Band + * Simultaneous), SCC (Single Channel Concurrency), and MCC (Multi + * Channel Concurrency) mode. + */ + QCA_WLAN_HANG_MAC_HW_MODE_CHANGE_TIMEOUT = 16, + /* Timeout waiting for the response from FW to configure the MAC HW's + * mode. This operation is to configure the single/two MACs in either + * SCC/MCC/DBS mode. + */ + QCA_WLAN_HANG_MAC_HW_MODE_CONFIG_TIMEOUT = 17, + /* Timeout waiting for response of VDEV start command from the FW */ + QCA_WLAN_HANG_VDEV_START_RESPONSE_TIMED_OUT = 18, + /* Timeout waiting for response of VDEV restart command from the FW */ + QCA_WLAN_HANG_VDEV_RESTART_RESPONSE_TIMED_OUT = 19, + /* Timeout waiting for response of VDEV stop command from the FW */ + QCA_WLAN_HANG_VDEV_STOP_RESPONSE_TIMED_OUT = 20, + /* Timeout waiting for response of VDEV delete command from the FW */ + QCA_WLAN_HANG_VDEV_DELETE_RESPONSE_TIMED_OUT = 21, + /* Timeout waiting for response of peer all delete request command to + * the FW on a specific VDEV. + */ + QCA_WLAN_HANG_VDEV_PEER_DELETE_ALL_RESPONSE_TIMED_OUT = 22, + /* WMI sequence mismatch between WMI command and Tx completion */ + QCA_WLAN_HANG_WMI_BUF_SEQUENCE_MISMATCH = 23, + /* Write to Device HAL register failed */ + QCA_WLAN_HANG_REG_WRITE_FAILURE = 24, + /* No credit left to send the wow_wakeup_from_sleep to firmware */ + QCA_WLAN_HANG_SUSPEND_NO_CREDIT = 25, + /* Bus failure */ + QCA_WLAN_HANG_BUS_FAILURE = 26, +}; + +/** + * enum qca_wlan_vendor_attr_hang - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_HANG. + */ +enum qca_wlan_vendor_attr_hang { + QCA_WLAN_VENDOR_ATTR_HANG_INVALID = 0, + /* + * Reason for the Hang - Represented by enum + * qca_wlan_vendor_hang_reason. + */ + QCA_WLAN_VENDOR_ATTR_HANG_REASON = 1, + /* The binary blob data associated with the hang reason specified by + * QCA_WLAN_VENDOR_ATTR_HANG_REASON. This binary data is expected to + * contain the required dump to analyze the reason for the hang. + * NLA_BINARY attribute, the max size is 1024 bytes. + */ + QCA_WLAN_VENDOR_ATTR_HANG_REASON_DATA = 2, + QCA_WLAN_VENDOR_ATTR_HANG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_HANG_MAX = + QCA_WLAN_VENDOR_ATTR_HANG_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_set_trace_level - Config params for QDF set trace level + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_INVALID: Invalid trace level + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM : Trace level parameters + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MODULE_ID : Module of which trace + level needs to be updated. + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_TRACE_MASK : verbose mask, which need + * to be set. + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_AFTER_LAST : after last. + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MAX : Max attribute. + */ +enum qca_vendor_attr_set_trace_level { + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_INVALID = 0, + /* + * Array of QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM + * attributes. + */ + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM = 1, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MODULE_ID = 2, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_TRACE_MASK = 3, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MAX = + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_get_station - Sub commands used by + * QCA_NL80211_VENDOR_SUBCMD_GET_STATION to get the corresponding + * station information. The information obtained through these + * commands signify the current info in connected state and + * latest cached information during the connected state , if queried + * when in disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INVALID: Invalid attribute + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO: bss info + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_ASSOC_FAIL_REASON: assoc fail reason + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_REMOTE: remote station info + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_AFTER_LAST: After last + */ +enum qca_wlan_vendor_attr_get_station { + QCA_WLAN_VENDOR_ATTR_GET_STATION_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO, + QCA_WLAN_VENDOR_ATTR_GET_STATION_ASSOC_FAIL_REASON, + QCA_WLAN_VENDOR_ATTR_GET_STATION_REMOTE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_STATION_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_STATION_MAX = + QCA_WLAN_VENDOR_ATTR_GET_STATION_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_update_sta_info - Defines attributes + * used by QCA_NL80211_VENDOR_SUBCMD_UPDATE_STA_INFO vendor command. + * + * @QCA_WLAN_VENDOR_ATTR_UPDATE_STA_INFO_CONNECT_CHANNELS: Type is NLA_UNSPEC. + * Used in STA mode. This attribute represents the list of channel center + * frequencies in MHz (u32) the station has learnt during the last connection + * or roaming attempt. This information shall not signify the channels for + * an explicit scan request from the user space. Host drivers can update this + * information to the user space in both connected and disconnected state. + * In the disconnected state this information shall signify the channels + * scanned in the last connection/roam attempt that lead to the disconnection. + */ +enum qca_wlan_vendor_attr_update_sta_info { + QCA_WLAN_VENDOR_ATTR_UPDATE_STA_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_UPDATE_STA_INFO_CONNECT_CHANNELS = 1, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_UPDATE_STA_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_UPDATE_STA_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_UPDATE_STA_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_802_11_mode - dot11 mode + * @QCA_WLAN_802_11_MODE_11B: mode B + * @QCA_WLAN_802_11_MODE_11G: mode G + * @QCA_WLAN_802_11_MODE_11N: mode N + * @QCA_WLAN_802_11_MODE_11A: mode A + * @QCA_WLAN_802_11_MODE_11AC: mode AC + * @QCA_WLAN_802_11_MODE_11AX: mode AX + * @QCA_WLAN_802_11_MODE_INVALID: Invalid dot11 mode + */ +enum qca_wlan_802_11_mode { + QCA_WLAN_802_11_MODE_11B, + QCA_WLAN_802_11_MODE_11G, + QCA_WLAN_802_11_MODE_11N, + QCA_WLAN_802_11_MODE_11A, + QCA_WLAN_802_11_MODE_11AC, + QCA_WLAN_802_11_MODE_11AX, + QCA_WLAN_802_11_MODE_INVALID, +}; + +/** + * enum qca_wlan_auth_type - Authentication key management type + * @QCA_WLAN_AUTH_TYPE_INVALID: Invalid key management type + * @QCA_WLAN_AUTH_TYPE_OPEN: Open key + * @QCA_WLAN_AUTH_TYPE_SHARED: shared key + * @QCA_WLAN_AUTH_TYPE_WPA: wpa key + * @QCA_WLAN_AUTH_TYPE_WPA_PSK: wpa psk key + * @QCA_WLAN_AUTH_TYPE_WPA_NONE: wpa none key + * @QCA_WLAN_AUTH_TYPE_RSN: rsn key + * @QCA_WLAN_AUTH_TYPE_RSN_PSK: rsn psk key + * @QCA_WLAN_AUTH_TYPE_FT: ft key + * @QCA_WLAN_AUTH_TYPE_FT_PSK: ft psk key + * @QCA_WLAN_AUTH_TYPE_SHA256: shared 256 key + * @QCA_WLAN_AUTH_TYPE_SHA256_PSK: shared 256 psk + * @QCA_WLAN_AUTH_TYPE_WAI: wai key + * @QCA_WLAN_AUTH_TYPE_WAI_PSK wai psk key + * @QCA_WLAN_AUTH_TYPE_CCKM_WPA: cckm wpa key + * @QCA_WLAN_AUTH_TYPE_CCKM_RSN: cckm rsn key + * @QCA_WLAN_AUTH_TYPE_FT_SAE: FT sae akm + * @QCA_WLAN_AUTH_TYPE_FT_SUITEB_EAP_SHA384: FT suite B SHA384 + */ +enum qca_wlan_auth_type { + QCA_WLAN_AUTH_TYPE_INVALID, + QCA_WLAN_AUTH_TYPE_OPEN, + QCA_WLAN_AUTH_TYPE_SHARED, + QCA_WLAN_AUTH_TYPE_WPA, + QCA_WLAN_AUTH_TYPE_WPA_PSK, + QCA_WLAN_AUTH_TYPE_WPA_NONE, + QCA_WLAN_AUTH_TYPE_RSN, + QCA_WLAN_AUTH_TYPE_RSN_PSK, + QCA_WLAN_AUTH_TYPE_FT, + QCA_WLAN_AUTH_TYPE_FT_PSK, + QCA_WLAN_AUTH_TYPE_SHA256, + QCA_WLAN_AUTH_TYPE_SHA256_PSK, + QCA_WLAN_AUTH_TYPE_WAI, + QCA_WLAN_AUTH_TYPE_WAI_PSK, + QCA_WLAN_AUTH_TYPE_CCKM_WPA, + QCA_WLAN_AUTH_TYPE_CCKM_RSN, + QCA_WLAN_AUTH_TYPE_AUTOSWITCH, + QCA_WLAN_AUTH_TYPE_FT_SAE, + QCA_WLAN_AUTH_TYPE_FT_SUITEB_EAP_SHA384, +}; + +/** + * enum qca_wlan_vendor_attr_get_station_info - Station Info queried + * through QCA_NL80211_VENDOR_SUBCMD_GET_STATION. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_INVALID: Invalid Attribute + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_LINK_STANDARD_NL80211_ATTR: + * Get the standard NL attributes Nested with this attribute. + * Ex : Query BW , BITRATE32 , NSS , Signal , Noise of the Link - + * NL80211_ATTR_SSID / NL80211_ATTR_SURVEY_INFO (Connected Channel) / + * NL80211_ATTR_STA_INFO + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_STANDARD_NL80211_ATTR: + * Get the standard NL attributes Nested with this attribute. + * Ex : Query HT/VHT Capability advertized by the AP. + * NL80211_ATTR_VHT_CAPABILITY / NL80211_ATTR_HT_CAPABILITY + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ROAM_COUNT: + * Number of successful Roam attempts before a + * disconnect, Unsigned 32 bit value + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AKM: + * Authentication Key Management Type used for the connected session. + * Signified by enum qca_wlan_auth_type + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_802_11_MODE: 802.11 Mode of the + * connected Session, signified by enum qca_wlan_802_11_mode + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_HS20_INDICATION: + * HS20 Indication Element + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ASSOC_FAIL_REASON: + * Status Code Corresponding to the Association Failure. + * Unsigned 32 bit value. + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_MAX_PHY_RATE: + * Max phy rate of remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_PACKETS: + * TX packets to remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_BYTES: + * TX bytes to remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_PACKETS: + * RX packets from remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_BYTES: + * RX bytes from remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_TX_RATE: + * Last TX rate with remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_RX_RATE: + * Last RX rate with remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_WMM: + * Remote station enable/disable WMM + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SUPPORTED_MODE: + * Remote station connection mode + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_AMPDU: + * Remote station AMPDU enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_STBC: + * Remote station TX Space-time block coding enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_STBC: + * Remote station RX Space-time block coding enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_CH_WIDTH: + * Remote station channel width + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SGI_ENABLE: + * Remote station short GI enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_PAD: Attribute type for padding + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_BEACON_IES: Binary attribute + * containing the raw information elements from Beacon frames. Represents + * the Beacon frames of the current BSS in the connected state. When queried + * in the disconnected state, these IEs correspond to the last connected BSSID. + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_DRIVER_DISCONNECT_REASON: u32, Driver + * disconnect reason for the last disconnection if the disconnection is + * triggered from the host driver. The values are referred from + * enum qca_disconnect_reason_codes. If the disconnect is from + * peer/userspace this value is QCA_DISCONNECT_REASON_UNSPECIFIED. + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ASSOC_REQ_IES: Binary attribute + * Applicable in AP mode only. It contains the raw information elements + * from assoc request frame of the given peer station. User queries with the + * mac address of peer station when it disconnects. Host driver sends + * assoc request frame of the given station. Host driver doesn't provide + * the IEs when the peer station is still in connected state. + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_HE_OPERATION: Attribute type for + * sending HE operation info. + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AFTER_LAST: After last + */ +enum qca_wlan_vendor_attr_get_station_info { + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_LINK_STANDARD_NL80211_ATTR, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_STANDARD_NL80211_ATTR, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ROAM_COUNT, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AKM, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_802_11_MODE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_HS20_INDICATION, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_HT_OPERATION, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_VHT_OPERATION, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ASSOC_FAIL_REASON, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_MAX_PHY_RATE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_PACKETS, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_BYTES, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_PACKETS, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_BYTES, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_TX_RATE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_RX_RATE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_WMM, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SUPPORTED_MODE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_AMPDU, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_STBC, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_STBC, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_CH_WIDTH, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SGI_ENABLE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_PAD, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_RETRY_COUNT, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_BC_MC_COUNT, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_FAILURE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_AVG_RSSI_PER_CHAIN, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_RETRY_SUCCEED, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_LAST_PKT_RSSI, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_RETRY, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_RETRY_EXHAUST, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_TOTAL_FW, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_RETRY_FW, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_RETRY_EXHAUST_FW, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_BEACON_IES, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_DRIVER_DISCONNECT_REASON, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ASSOC_REQ_IES, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_HE_OPERATION, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_nl80211_vendor_subcmds_index - vendor sub commands index + * + * @QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_INDEX: Avoid frequency + * @QCA_NL80211_VENDOR_SUBCMD_NAN_INDEX: Nan + * @QCA_NL80211_VENDOR_SUBCMD_STATS_EXT_INDEX: Ext stats + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START_INDEX: Ext scan start + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP_INDEX: Ext scan stop + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES_INDEX: Ext scan get + * capability + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS_INDEX: Ext scan get + * cached results + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE_INDEX: Ext scan + * results available + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT_INDEX: Ext scan full + * scan result + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT_INDEX: Ext scan event + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND_INDEX: Ext scan hot list + * AP found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST_INDEX: Ext scan set + * bssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST_INDEX: Ext scan reset + * bssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE_INDEX: Ext scan + * significant change + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE_INDEX: Ext scan + * set significant change + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE_INDEX: Ext scan + * reset significant change + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET_INDEX: Set stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET_INDEX: Get stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR_INDEX: Clear stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_RADIO_STATS_INDEX: Radio stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_IFACE_STATS_INDEX: Iface stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_PEER_INFO_STATS_INDEX: Peer info stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT_INDEX: MAC layer counters + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE_CHANGE_INDEX: Ext tdls state change + * @QCA_NL80211_VENDOR_SUBCMD_DO_ACS_INDEX: ACS command + * @QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH_INDEX: Pass Roam and Auth info + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_LOST_INDEX: hotlist ap lost + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND_INDEX: + * pno network found index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND_INDEX: + * passpoint match found index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST_INDEX: + * set ssid hotlist index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST_INDEX: + * reset ssid hotlist index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND_INDEX: + * hotlist ssid found index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST_INDEX: + * hotlist ssid lost index + * @QCA_NL80211_VENDOR_SUBCMD_DCC_STATS_EVENT_INDEX + * dcc stats event index + * @QCA_NL80211_VENDOR_SUBCMD_SCAN_INDEX: vendor scan index + * @QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE_INDEX: + * vendor scan complete event index + * @QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG_INDEX: + * update gateway parameters index + * @QCA_NL80211_VENDOR_SUBCMD_INTEROP_ISSUES_AP_INDEX: + * update aps info which has interop issues events index + * @QCA_NL80211_VENDOR_SUBCMD_TSF_INDEX: TSF response events index + * @QCA_NL80211_VENDOR_SUBCMD_P2P_LO_EVENT_INDEX: + * P2P listen offload index + * @QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH_INDEX: SAP + * conditional channel switch index + * @QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET_INDEX: NUD DEBUG Stats index + * @QCA_NL80211_VENDOR_SUBCMD_HANG_REASON_INDEX: hang event reason index + * @QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO_INDEX: MAC mode info index + * @QCA_NL80211_VENDOR_SUBCMD_NAN_EXT_INDEX: NAN Extended index + * @QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING_INDEX: Beacon reporting index + * @QCA_NL80211_VENDOR_SUBCMD_REQUEST_SAR_LIMITS_INDEX: Request SAR limit index + */ + +enum qca_nl80211_vendor_subcmds_index { + QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_INDEX = 0, + + QCA_NL80211_VENDOR_SUBCMD_NAN_INDEX, + +#ifdef WLAN_FEATURE_STATS_EXT + QCA_NL80211_VENDOR_SUBCMD_STATS_EXT_INDEX, +#endif /* WLAN_FEATURE_STATS_EXT */ + +#ifdef FEATURE_WLAN_EXTSCAN + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE_INDEX, +#endif /* FEATURE_WLAN_EXTSCAN */ + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_RADIO_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_IFACE_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_PEER_INFO_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT_INDEX, +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + + QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE_CHANGE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DO_ACS_INDEX, +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH_INDEX, +#endif + /* DFS */ + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_STARTED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_FINISHED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_ABORTED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_NOP_FINISHED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_RADAR_DETECTED_INDEX, +#ifdef FEATURE_WLAN_EXTSCAN + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_LOST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST_INDEX, +#endif /* FEATURE_WLAN_EXTSCAN */ + QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION_INDEX, + QCA_NL80211_VENDOR_SUBCMD_MONITOR_RSSI_INDEX, +#ifdef WLAN_FEATURE_MEMDUMP + QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_MEMORY_DUMP_INDEX, +#endif /* WLAN_FEATURE_MEMDUMP */ + /* OCB events */ + QCA_NL80211_VENDOR_SUBCMD_DCC_STATS_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_SCAN_INDEX, + QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG_INDEX, + QCA_NL80211_VENDOR_SUBCMD_INTEROP_ISSUES_AP_INDEX, +#ifdef WLAN_FEATURE_TSF + QCA_NL80211_VENDOR_SUBCMD_TSF_INDEX, +#endif + QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + QCA_NL80211_VENDOR_SUBCMD_P2P_LO_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH_INDEX, + QCA_NL80211_VENDOR_SUBCMD_UPDATE_EXTERNAL_ACS_CONFIG, + QCA_NL80211_VENDOR_SUBCMD_PWR_SAVE_FAIL_DETECTED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET_INDEX, + QCA_NL80211_VENDOR_SUBCMD_HANG_REASON_INDEX, + QCA_NL80211_VENDOR_SUBCMD_HTT_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO_INDEX, + QCA_NL80211_VENDOR_SUBCMD_NAN_EXT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_THROUGHPUT_CHANGE_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES_INDEX, + QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH_INDEX, + QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING_INDEX, + QCA_NL80211_VENDOR_SUBCMD_ROAM_INDEX, + QCA_NL80211_VENDOR_SUBCMD_OEM_DATA_INDEX, + QCA_NL80211_VENDOR_SUBCMD_REQUEST_SAR_LIMITS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_UPDATE_STA_INFO_INDEX, +}; + +/** + * enum qca_wlan_vendor_attr_tdls_enable - TDLS enable attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAC_ADDR: An array of 6 x Unsigned 8-bit + * value + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_CHANNEL: Signed 32-bit value, but lets + * keep as unsigned for now + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_GLOBAL_OPERATING_CLASS: operating class + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX_LATENCY_MS: Enable max latency in ms + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MIN_BANDWIDTH_KBPS: Enable min bandwidth + * in KBPS + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_enable { + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_CHANNEL, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_GLOBAL_OPERATING_CLASS, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX_LATENCY_MS, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MIN_BANDWIDTH_KBPS, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_tdls_disable: tdls disable attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAC_ADDR: An array of 6 x Unsigned + * 8-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_disable { + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_AFTER_LAST - 1, +}; + +/** + * qca_chip_power_save_failure_reason: Power save failure reason + * @QCA_CHIP_POWER_SAVE_FAILURE_REASON_PROTOCOL: Indicates power save failure + * due to protocol/module. + * @QCA_CHIP_POWER_SAVE_FAILURE_REASON_HARDWARE: power save failure + * due to hardware + */ +enum qca_chip_power_save_failure_reason { + QCA_CHIP_POWER_SAVE_FAILURE_REASON_PROTOCOL = 0, + QCA_CHIP_POWER_SAVE_FAILURE_REASON_HARDWARE = 1, +}; + +/** + * qca_attr_chip_power_save_failure: attributes to vendor subcmd + * @QCA_NL80211_VENDOR_SUBCMD_CHIP_PWRSAVE_FAILURE. This carry the requisite + * information leading to the power save failure. + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_INVALID : invalid + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_REASON : power save failure reason + * represented by enum qca_chip_power_save_failure_reason + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_LAST : Last + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_MAX : Max value + */ +enum qca_attr_chip_power_save_failure { + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_INVALID = 0, + + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_REASON = 1, + + /* keep last */ + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_LAST, + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_MAX = + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_LAST - 1, +}; + + +/** + * enum qca_wlan_vendor_attr_tdls_get_status - tdls get status attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAC_ADDR: An array of 6 x Unsigned + * 8-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_STATE: get status state, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_REASON: get status reason + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_CHANNEL: get status channel, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_GLOBAL_OPERATING_CLASS: get operating + * class, unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_get_status { + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_STATE, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_REASON, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_CHANNEL, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_GLOBAL_OPERATING_CLASS, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_tdls_state - tdls state attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_INVALID: Initial invalid value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAC_ADDR: An array of 6 x Unsigned + * 8-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_NEW_STATE: TDLS new state, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_REASON: TDLS state reason + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_CHANNEL: TDLS state channel, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_GLOBAL_OPERATING_CLASS: TDLS state + * operating class, unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_state { + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_NEW_STATE, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_REASON, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_CHANNEL, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_GLOBAL_OPERATING_CLASS, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_AFTER_LAST - 1, +}; + +/* enum's to provide TDLS capabilities */ +enum qca_wlan_vendor_attr_get_tdls_capabilities { + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_MAX_CONC_SESSIONS = 1, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_FEATURES_SUPPORTED = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_AFTER_LAST - 1, +}; + +enum qca_wlan_vendor_attr { + QCA_WLAN_VENDOR_ATTR_INVALID = 0, + /* used by QCA_NL80211_VENDOR_SUBCMD_DFS_CAPABILITY */ + QCA_WLAN_VENDOR_ATTR_DFS = 1, + /* used by QCA_NL80211_VENDOR_SUBCMD_NAN */ + QCA_WLAN_VENDOR_ATTR_NAN = 2, + /* used by QCA_NL80211_VENDOR_SUBCMD_STATS_EXT */ + QCA_WLAN_VENDOR_ATTR_STATS_EXT = 3, + /* used by QCA_NL80211_VENDOR_SUBCMD_STATS_EXT */ + QCA_WLAN_VENDOR_ATTR_IFINDEX = 4, + /* + * used by QCA_NL80211_VENDOR_SUBCMD_ROAMING, u32 with values defined + * by enum qca_roaming_policy. + */ + QCA_WLAN_VENDOR_ATTR_ROAMING_POLICY = 5, + QCA_WLAN_VENDOR_ATTR_MAC_ADDR = 6, + /* used by QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES */ + QCA_WLAN_VENDOR_ATTR_FEATURE_FLAGS = 7, + QCA_WLAN_VENDOR_ATTR_TEST = 8, + /* + * used by QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES + * Unsigned 32-bit value. + */ + QCA_WLAN_VENDOR_ATTR_CONCURRENCY_CAPA = 9, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_MAX_CONCURRENT_CHANNELS_2_4_BAND = 10, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_MAX_CONCURRENT_CHANNELS_5_0_BAND = 11, + /* Unsigned 32-bit value from enum qca_set_band. Also, the acceptable + * value for this attribute are only till QCA_SETBAND_2G. This attribute + * is deprecated. Recommendation is to use + * QCA_WLAN_VENDOR_ATTR_SETBAND_MASK instead. If the band is configured + * using both the attributes, the ones configured through + * QCA_WLAN_VENDOR_ATTR_SETBAND_MASK take the precedence. + */ + QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE = 12, + /* Dummy (NOP) attribute for 64 bit padding */ + QCA_WLAN_VENDOR_ATTR_PAD = 13, + /* + * Unique FTM session cookie (Unsigned 64 bit). Specified in + * QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION. Reported in + * the session in QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT and + * QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE. + */ + QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE = 14, + /* + * Indoor location capabilities, returned by + * QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA. + * see enum qca_wlan_vendor_attr_loc_capa. + */ + QCA_WLAN_VENDOR_ATTR_LOC_CAPA = 15, + /* + * Array of nested attributes containing information about each peer + * in FTM measurement session. See enum qca_wlan_vendor_attr_peer_info + * for supported attributes for each peer. + */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS = 16, + /* + * Array of nested attributes containing measurement results for + * one or more peers, reported by the + * QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT event. + * See enum qca_wlan_vendor_attr_peer_result for list of supported + * attributes. + */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS = 17, + /* Flag attribute for enabling or disabling responder functionality. */ + QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE = 18, + /* + * Used in the QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * command to specify the LCI report that will be sent by + * the responder during a measurement exchange. The format is + * defined in IEEE P802.11-REVmc/D7.0, 9.4.2.22.10. + */ + QCA_WLAN_VENDOR_ATTR_FTM_LCI = 19, + /* + * Used in the QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * command to specify the location civic report that will + * be sent by the responder during a measurement exchange. + * The format is defined in IEEE P802.11-REVmc/D7.0, 9.4.2.22.13. + */ + QCA_WLAN_VENDOR_ATTR_FTM_LCR = 20, + /* + * Session/measurement completion status code, + * reported in QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE and + * QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + * see enum qca_vendor_attr_loc_session_status. + */ + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS = 21, + /* + * Initial dialog token used by responder (0 if not specified), + * unsigned 8 bit value. + */ + QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN = 22, + /* + * AOA measurement type. Requested in QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS + * and optionally in QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION if + * AOA measurements are needed as part of an FTM session. + * Reported by QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT. See + * enum qca_wlan_vendor_attr_aoa_type. + */ + QCA_WLAN_VENDOR_ATTR_AOA_TYPE = 23, + /* + * A bit mask (unsigned 32 bit value) of antenna arrays used + * by indoor location measurements. Refers to the antenna + * arrays described by QCA_VENDOR_ATTR_LOC_CAPA_ANTENNA_ARRAYS. + */ + QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK = 24, + /* + * AOA measurement data. Its contents depends on the AOA measurement + * type and antenna array mask: + * QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: array of U16 values, + * phase of the strongest CIR path for each antenna in the measured + * array(s). + * QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: array of 2 U16 + * values, phase and amplitude of the strongest CIR path for each + * antenna in the measured array(s). + */ + QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT = 25, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to specify the chain number (unsigned 32 bit value) to inquire + * the corresponding antenna RSSI value */ + QCA_WLAN_VENDOR_ATTR_CHAIN_INDEX = 26, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to report the specific antenna RSSI value (unsigned 32 bit value) */ + QCA_WLAN_VENDOR_ATTR_CHAIN_RSSI = 27, + /* Frequency in MHz, various uses. Unsigned 32 bit value */ + QCA_WLAN_VENDOR_ATTR_FREQ = 28, + /* + * TSF timer value, unsigned 64 bit value. + * May be returned by various commands. + */ + QCA_WLAN_VENDOR_ATTR_TSF = 29, + /* + * DMG RF sector index, unsigned 16 bit number. Valid values are + * 0..127 for sector indices or 65535 as special value used to + * unlock sector selection in + * QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_INDEX = 30, + /* + * DMG RF sector type, unsigned 8 bit value. One of the values + * in enum qca_wlan_vendor_attr_dmg_rf_sector_type. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE = 31, + /* + * Bitmask of DMG RF modules for which information is requested. Each + * bit corresponds to an RF module with the same index as the bit + * number. Unsigned 32 bit number but only low 8 bits can be set since + * all DMG chips currently have up to 8 RF modules. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_MODULE_MASK = 32, + /* + * Array of nested attributes where each entry is DMG RF sector + * configuration for a single RF module. + * Attributes for each entry are taken from enum + * qca_wlan_vendor_attr_dmg_rf_sector_cfg. + * Specified in QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG + * and returned by QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG = 33, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_STATS_EXT command + * to report frame aggregation statistics to userspace. + */ + QCA_WLAN_VENDOR_ATTR_RX_AGGREGATION_STATS_HOLES_NUM = 34, + QCA_WLAN_VENDOR_ATTR_RX_AGGREGATION_STATS_HOLES_INFO = 35, + /* + * Unsigned 8-bit value representing MBO transition reason code as + * provided by the AP used by subcommand + * QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS. This is + * specified by the userspace in the request to the driver. + */ + QCA_WLAN_VENDOR_ATTR_BTM_MBO_TRANSITION_REASON = 36, + /* + * Array of nested attributes, BSSID and status code, used by subcommand + * QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS, where each + * entry is taken from enum qca_wlan_vendor_attr_btm_candidate_info. + * The userspace space specifies the list/array of candidate BSSIDs in + * the order of preference in the request. The driver specifies the + * status code, for each BSSID in the list, in the response. The + * acceptable candidates are listed in the order preferred by the + * driver. + */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO = 37, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT command + * See enum qca_wlan_vendor_attr_brp_ant_limit_mode. + */ + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE = 38, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT command + * to define the number of antennas to use for BRP. + * different purpose in each ANT_LIMIT_MODE: + * DISABLE - ignored + * EFFECTIVE - upper limit to number of antennas to be used + * FORCE - exact number of antennas to be used + * unsigned 8 bit value + */ + QCA_WLAN_VENDOR_ATTR_BRP_ANT_NUM_LIMIT = 39, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to report the corresponding antenna index to the chain RSSI value + */ + QCA_WLAN_VENDOR_ATTR_ANTENNA_INFO = 40, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to report the specific antenna EVM value (unsigned 32 bit value). + * With a determinate group of antennas, the driver specifies the + * EVM value for each antenna ID, and application extract them + * in user space. + */ + QCA_WLAN_VENDOR_ATTR_CHAIN_EVM = 41, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_FW_STATE command to report + * wlan firmware current state. FW state is an unsigned 8 bit value, + * one of the values in enum qca_wlan_vendor_attr_fw_state. + */ + QCA_WLAN_VENDOR_ATTR_FW_STATE = 42, + + /* Unsigned 32-bitmask value from enum qca_set_band. Substitutes the + * attribute QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE for which only the + * integer values of enum qca_set_band till QCA_SETBAND_2G are valid. + * This attribute shall consider the bitmask combinations to define + * the respective Band combinations and always takes precedence over + * QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE. + */ + QCA_WLAN_VENDOR_ATTR_SETBAND_MASK = 43, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_AFTER_LAST - 1 +}; + +enum qca_wlan_vendor_attr_extscan_config_params { + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_INVALID = 0, + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_REQUEST_ID = 1, + + /* + * Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_VALID_CHANNELS sub command. + */ + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_VALID_CHANNELS_CONFIG_PARAM_WIFI_BAND + = 2, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_VALID_CHANNELS_CONFIG_PARAM_MAX_CHANNELS + = 3, + + /* + * Attributes for input params used by + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START sub command. + */ + + /* Unsigned 32-bit value; channel frequency */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_CHANNEL = 4, + /* Unsigned 32-bit value; dwell time in ms. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_DWELL_TIME = 5, + /* Unsigned 8-bit value; 0: active; 1: passive; N/A for DFS */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_PASSIVE = 6, + /* Unsigned 8-bit value; channel class */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_CLASS = 7, + + /* Unsigned 8-bit value; bucket index, 0 based */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_INDEX = 8, + /* Unsigned 8-bit value; band. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_BAND = 9, + /* Unsigned 32-bit value; desired period, in ms. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_PERIOD = 10, + /* Unsigned 8-bit value; report events semantics. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_REPORT_EVENTS = 11, + /* + * Unsigned 32-bit value. Followed by a nested array of + * EXTSCAN_CHANNEL_SPEC_* attributes. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_NUM_CHANNEL_SPECS = 12, + + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_* attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_NUM_CHANNEL_SPECS + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC = 13, + + /* Unsigned 32-bit value; base timer period in ms. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_BASE_PERIOD = 14, + /* + * Unsigned 32-bit value; number of APs to store in each scan in the + * BSSID/RSSI history buffer (keep the highest RSSI APs). + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_MAX_AP_PER_SCAN = 15, + /* + * Unsigned 8-bit value; in %, when scan buffer is this much full, wake + * up AP. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_REPORT_THRESHOLD_PERCENT + = 16, + + /* + * Unsigned 8-bit value; number of scan bucket specs; followed by a + * nested array of_EXTSCAN_BUCKET_SPEC_* attributes and values. The size + * of the array is determined by NUM_BUCKETS. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_NUM_BUCKETS = 17, + + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_* attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_NUM_BUCKETS + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC = 18, + + /* Unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_CACHED_SCAN_RESULTS_CONFIG_PARAM_FLUSH + = 19, + /* Unsigned 32-bit value; maximum number of results to be returned. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_CACHED_SCAN_RESULTS_CONFIG_PARAM_MAX + = 20, + + /* An array of 6 x unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_BSSID = 21, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_RSSI_LOW = 22, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_RSSI_HIGH = 23, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_CHANNEL = 24, + + /* + * Number of hotlist APs as unsigned 32-bit value, followed by a nested + * array of AP_THRESHOLD_PARAM attributes and values. The size of the + * array is determined by NUM_AP. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BSSID_HOTLIST_PARAMS_NUM_AP = 25, + + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_* attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_NUM_CHANNEL_SPECS + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM = 26, + + /* Unsigned 32-bit value; number of samples for averaging RSSI. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_RSSI_SAMPLE_SIZE + = 27, + /* Unsigned 32-bit value; number of samples to confirm AP loss. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_LOST_AP_SAMPLE_SIZE + = 28, + /* Unsigned 32-bit value; number of APs breaching threshold. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_MIN_BREACHING = 29, + /* + * Unsigned 32-bit value; number of APs. Followed by an array of + * AP_THRESHOLD_PARAM attributes. Size of the array is NUM_AP. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_NUM_AP = 30, + /* Unsigned 32-bit value; number of samples to confirm AP loss. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BSSID_HOTLIST_PARAMS_LOST_AP_SAMPLE_SIZE + = 31, + /* + * Unsigned 32-bit value. If max_period is non zero or different than + * period, then this bucket is an exponential backoff bucket. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_MAX_PERIOD = 32, + /* Unsigned 32-bit value. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_BASE = 33, + /* + * Unsigned 32-bit value. For exponential back off bucket, number of + * scans to perform for a given period. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_STEP_COUNT = 34, + /* + * Unsigned 8-bit value; in number of scans, wake up AP after these + * many scans. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_REPORT_THRESHOLD_NUM_SCANS + = 35, + + /* + * Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST sub command. + */ + + /* Unsigned 3-2bit value; number of samples to confirm SSID loss. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_HOTLIST_PARAMS_LOST_SSID_SAMPLE_SIZE + = 36, + /* + * Number of hotlist SSIDs as unsigned 32-bit value, followed by a + * nested array of SSID_THRESHOLD_PARAM_* attributes and values. The + * size of the array is determined by NUM_SSID. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_HOTLIST_PARAMS_NUM_SSID = 37, + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_* + * attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_HOTLIST_PARAMS_NUM_SSID + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM = 38, + + /* An array of 33 x unsigned 8-bit value; NULL terminated SSID */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_SSID = 39, + /* Unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_BAND = 40, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_RSSI_LOW = 41, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_RSSI_HIGH = 42, + /* Unsigned 32-bit value; a bitmask with additional extscan config flag. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CONFIGURATION_FLAGS = 43, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_AFTER_LAST - 1, +}; + +enum qca_wlan_vendor_attr_extscan_results { + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_INVALID = 0, + + /* + * Unsigned 32-bit value; must match the request Id supplied by + * Wi-Fi HAL in the corresponding subcmd NL msg. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_REQUEST_ID = 1, + + /* + * Unsigned 32-bit value; used to indicate the status response from + * firmware/driver for the vendor sub-command. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_STATUS = 2, + + /* + * EXTSCAN Valid Channels attributes */ + /* Unsigned 32bit value; followed by a nested array of CHANNELS. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_CHANNELS = 3, + /* + * An array of NUM_CHANNELS x unsigned 32-bit value integers + * representing channel numbers. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CHANNELS = 4, + + /* EXTSCAN Capabilities attributes */ + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SCAN_CACHE_SIZE = 5, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SCAN_BUCKETS = 6, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_AP_CACHE_PER_SCAN + = 7, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_RSSI_SAMPLE_SIZE + = 8, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SCAN_REPORTING_THRESHOLD + = 9, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_HOTLIST_BSSIDS = 10, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SIGNIFICANT_WIFI_CHANGE_APS + = 11, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_BSSID_HISTORY_ENTRIES + = 12, + + /* + * EXTSCAN Attributes used with + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE sub-command. + */ + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE = 13, + + /* + * EXTSCAN attributes used with + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT sub-command. + */ + + /* + * An array of NUM_RESULTS_AVAILABLE x + * QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_* + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST = 14, + + /* Unsigned 64-bit value; age of sample at the time of retrieval */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_TIME_STAMP = 15, + /* 33 x unsigned 8-bit value; NULL terminated SSID */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_SSID = 16, + /* An array of 6 x unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_BSSID = 17, + /* Unsigned 32-bit value; channel frequency in MHz */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_CHANNEL = 18, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_RSSI = 19, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_RTT = 20, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_RTT_SD = 21, + /* Unsigned 16-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_BEACON_PERIOD = 22, + /* Unsigned 16-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_CAPABILITY = 23, + /* Unsigned 32-bit value; size of the IE DATA blob */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_IE_LENGTH = 24, + /* + * An array of IE_LENGTH x unsigned 8-bit value; blob of all the + * information elements found in the beacon; this data should be a + * packed list of wifi_information_element objects, one after the + * other. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_IE_DATA = 25, + + /* + * Unsigned 8-bit value; set by driver to indicate more scan results are + * available. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_MORE_DATA = 26, + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT sub-command. + */ + /* Unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_EVENT_TYPE = 27, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_EVENT_STATUS = 28, + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND sub-command. + */ + /* + * Use attr QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE + * to indicate number of results. + * Also, use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the + * list of results. + */ + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE sub-command. + */ + /* An array of 6 x unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_BSSID = 29, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_CHANNEL + = 30, + /* Unsigned 32-bit value. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_NUM_RSSI + = 31, + /* + * A nested array of signed 32-bit RSSI values. Size of the array is + * determined by (NUM_RSSI of SIGNIFICANT_CHANGE_RESULT_NUM_RSSI. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_RSSI_LIST + = 32, + + /* + * EXTSCAN attributes used with + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS sub-command. + */ + /* + * Use attr QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE + * to indicate number of extscan cached results returned. + * Also, use QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_LIST to indicate + * the list of extscan cached results. + */ + + /* + * An array of NUM_RESULTS_AVAILABLE x + * QCA_NL80211_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_* + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_LIST = 33, + /* Unsigned 32-bit value; a unique identifier for the scan unit. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_SCAN_ID = 34, + /* + * Unsigned 32-bit value; a bitmask w/additional information about scan. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_FLAGS = 35, + /* + * Use attr QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE + * to indicate number of wifi scan results/bssids retrieved by the scan. + * Also, use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the + * list of wifi scan results returned for each cached result block. + */ + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_PNO_NETWORK_FOUND sub-command. + */ + /* + * Use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE for + * number of results. + * Use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the nested + * list of wifi scan results returned for each + * wifi_passpoint_match_result block. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE. + */ + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_PNO_PASSPOINT_NETWORK_FOUND sub-command. + */ + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_NETWORK_FOUND_NUM_MATCHES + = 36, + /* + * A nested array of + * QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_* + * attributes. Array size = + * *_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_NETWORK_FOUND_NUM_MATCHES. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_RESULT_LIST = 37, + + /* Unsigned 32-bit value; network block id for the matched network */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_ID = 38, + /* + * Use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the nested + * list of wifi scan results returned for each + * wifi_passpoint_match_result block. + */ + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_ANQP_LEN = 39, + /* + * An array size of PASSPOINT_MATCH_ANQP_LEN of unsigned 8-bit values; + * ANQP data in the information_element format. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_ANQP = 40, + + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_HOTLIST_SSIDS = 41, + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_NUM_EPNO_NETS = 42, + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_NUM_EPNO_NETS_BY_SSID + = 43, + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_NUM_WHITELISTED_SSID + = 44, + + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_BUCKETS_SCANNED = 45, + QCA_WLAN_VENDOR_ATTR_EXTSCAN_MAX_NUM_BLACKLISTED_BSSID = 46, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_MAX = + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_interop_issues_ap_type - interop issues type + * This enum defines the valid set of values of interop issues type. These + * values are used by attribute %QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_TYPE. + * + * @QCA_VENDOR_INTEROP_ISSUES_AP_ON_STA_PS: the ap has power save interop issue + * when the STA's Qpower feature is enabled. + */ +enum qca_vendor_interop_issues_ap_type { + QCA_VENDOR_INTEROP_ISSUES_AP_INVALID = 0, + QCA_VENDOR_INTEROP_ISSUES_AP_ON_STA_PS = 1, +}; + +/** + * enum qca_vendor_attr_interop_issues_ap - attribute for ap with interop issues + * values are used by %QCA_NL80211_VENDOR_SUBCMD_INTEROP_ISSUES_AP. + * + * @QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_TYPE: interop issues type + * 32-bit unsigned value, The type defined in enum + * qca_vendor_interop_issues_ap_type are used. + * @QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_LIST: aps' bssid container + * array of nested QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_BSSID attributes, + * it is present and mandatory for the command but is not used for + * the event since only a single BSSID is reported in an event. + * @QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_BSSID: ap's bssid + * 6-byte MAC address. It is used within the nested + * QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_LIST attribute in command case + * and without such encapsulation in the event case. + * @QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_MAX: max value + */ +enum qca_vendor_attr_interop_issues_ap { + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_INVALID, + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_TYPE, + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_LIST, + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_BSSID, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_MAX = + QCA_WLAN_VENDOR_ATTR_INTEROP_ISSUES_AP_AFTER_LAST - 1 +}; + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS + +/** + * enum qca_wlan_vendor_attr_ll_stats_set - vendor attribute set stats + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_MPDU_SIZE_THRESHOLD: Size threshold + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_AGGRESSIVE_STATS_GATHERING: + * Aggresive stats gathering + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_set { + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_MPDU_SIZE_THRESHOLD = 1, + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_AGGRESSIVE_STATS_GATHERING, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_get - vendor attribute get stats + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_ID: Unsigned 32bit value + * provided by the caller issuing the GET stats command. When reporting + * the stats results, the driver uses the same value to indicate which + * GET request the results correspond to. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_MASK: Get config request mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_RSP_MASK: Config response mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_RSP: Config stop response + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_get { + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_ID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_MASK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_clr - vendor attribute clear stats + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_REQ_MASK: Config request mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_REQ: Config stop mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_RSP_MASK: Config response mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_RSP: Config stop response + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_clr { + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_REQ_MASK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_REQ, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_RSP_MASK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_RSP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_results_type - ll stats result type + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_INVALID: Initial invalid value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_RADIO: Link layer stats type radio + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_IFACE: Link layer stats type interface + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_PEER: Link layer stats type peer + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_AFTER_LAST: Last value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_results_type { + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_RADIO = 1, + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_IFACE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_PEER, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_results - vendor attribute stats results + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_* are nested + * within the interface stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_* could be nested + * within the interface stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_* are nested + * within the interface stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_* could be nested + * within the peer info stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_* could be + * nested within the channel stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_ could be nested + * within the radio stats. + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_REQ_ID: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_BEACON_RX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_RX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_RX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_TX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_MGMT: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_DATA: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_ACK: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MODE: Interface mode, e.g., STA, + * SOFTAP, IBSS, etc. Type = enum wifi_interface_mode + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MAC_ADDR: Interface MAC address. + * An array of 6 Unsigned int8_t + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_STATE: + * Type = enum wifi_connection_state, e.g., DISCONNECTED, AUTHENTICATING, + * etc. Valid for STA, CLI only + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_ROAMING: + * Type = enum wifi_roam_state. Roaming state, e.g., IDLE or ACTIVE + * (is that valid for STA only?) + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_CAPABILITIES: Unsigned 32bit value. + * WIFI_CAPABILITY_XXX + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_SSID: NULL terminated SSID. An + * array of 33 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_BSSID: BSSID. An array of 6 + * Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_AP_COUNTRY_STR: Country string + * advertised by AP. An array of 3 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_COUNTRY_STR: Country string for + * this association. An array of 3 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_AC: Type = enum wifi_traffic_ac e.g. + * V0, VI, BE and BK + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MCAST: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MCAST: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_AMPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_AMPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_MPDU_LOST: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES_SHORT: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MIN: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MAX: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_AVG: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_NUM_SAMPLES: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_NUM_PEERS: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_TYPE: Type = enum wifi_peer_type + * Peer type, e.g., STA, AP, P2P GO etc + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_MAC_ADDRESS: MAC addr corresponding + * to respective peer. An array of 6 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_CAPABILITIES: Unsigned int 32bit + * value representing capabilities corresponding to respective peer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_NUM_RATES: Unsigned 32bit value. + * Number of rates + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_PREAMBLE: Unsigned int 8bit value: + * 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_NSS: Unsigned int 8bit value: + * 0:1x1, 1:2x2, 3:3x3, 4:4x4 + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BW: Unsigned int 8bit value: + * 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MCS_INDEX: Unsigned int 8bit value: + * OFDM/CCK rate code would be as per IEEE Std in the units of 0.5mbps + * HT/VHT it would be mcs index + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BIT_RATE: Unsigned 32bit value. + * Bit rate in units of 100Kbps + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_TX_MPDU: Unsigned int 32bit value. + * Number of successfully transmitted data pkts i.e., with ACK received + * corresponding to the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RX_MPDU: Unsigned int 32bit value. + * Number of received data pkts corresponding to the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MPDU_LOST: Unsigned int 32bit value. + * Number of data pkts losses, i.e., no ACK received corresponding to + * the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES: Unsigned int 32bit value. + * Total number of data pkt retries for the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_SHORT: Unsigned int 32bit value. + * Total number of short data pkt retries for the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_LONG: Unsigned int 32bit value. + * Total number of long data pkt retries for the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ID: Radio id + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME: Unsigned 32bit value. + * Total number of msecs the radio is awake accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME: Unsigned 32bit value. + * Total number of msecs the radio is transmitting accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_RX_TIME: Unsigned 32bit value. + * Total number of msecs the radio is in active receive accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_SCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to all scan accruing + * over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_NBD: Unsigned 32bit value. + * Total number of msecs the radio is awake due to NAN accruing over time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_GSCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to GSCAN accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_ROAM_SCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to roam scan accruing over + * time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_PNO_SCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to PNO scan accruing over + * time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_HS20: Unsigned 32bit value. + * Total number of msecs the radio is awake due to HS2.0 scans and GAS + * exchange accruing over time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_CHANNELS: Unsigned 32bit value. + * Number of channels + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_WIDTH: + * Type = enum wifi_channel_width. Channel width, e.g., 20, 40, 80, etc. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ: + * Unsigned 32bit value. Primary 20MHz channel. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ0: + * Unsigned 32bit value. Center frequency (MHz) first segment. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ1: + * Unsigned 32bit value. Center frequency (MHz) second segment. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_ON_TIME: Unsigned int 32bit value + * representing total number of msecs the radio is awake on that channel + * accruing over time, corresponding to the respective channel. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_CCA_BUSY_TIME: Unsigned int 32bit + * value representing total number of msecs the CCA register is busy + * accruing over time corresponding to the respective channel. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_NUM_RADIOS: Number of radios + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CH_INFO: Channel info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO: Peer info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_RATE_INFO: Peer rate info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_INFO: WMM info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_MORE_DATA: Unsigned 8bit value. + * Used by the driver; if set to 1, it indicates that more stats, e.g., + * peers or radio, are to follow in the next + * QCA_NL80211_VENDOR_SUBCMD_LL_STATS_*_RESULTS event. Otherwise, it + * is set to 0. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_AVERAGE_TSF_OFFSET: tsf offset + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_DETECTED: leaky ap detected + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_AVG_NUM_FRAMES_LEAKED: + * average number of frames leaked + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_GUARD_TIME: guard time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE: Link Layer stats type + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_TX_LEVELS: LL Radio Number of + * Tx Levels + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME_PER_LEVEL:Number of msecs the + * radio spent in transmitting for each power level + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_SUCC_CNT: RTS successful count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_FAIL_CNT: RTS fail count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_SUCC_CNT: PPDU successful count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_FAIL_CNT: PPDU fail count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_MAX: Max value + */ + +enum qca_wlan_vendor_attr_ll_stats_results { + QCA_WLAN_VENDOR_ATTR_LL_STATS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_REQ_ID = 1, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_BEACON_RX = 2, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_RX = 3, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_RX = 4, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_TX = 5, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_MGMT = 6, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_DATA = 7, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_ACK = 8, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MODE = 9, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MAC_ADDR = 10, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_STATE = 11, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_ROAMING = 12, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_CAPABILITIES = 13, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_SSID = 14, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_BSSID = 15, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_AP_COUNTRY_STR = 16, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_COUNTRY_STR = 17, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_AC = 18, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MPDU = 19, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MPDU = 20, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MCAST = 21, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MCAST = 22, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_AMPDU = 23, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_AMPDU = 24, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_MPDU_LOST = 25, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES = 26, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES_SHORT = 27, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES_LONG = 28, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MIN = 29, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MAX = 30, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_AVG = 31, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_NUM_SAMPLES = 32, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_NUM_PEERS = 33, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_TYPE = 34, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_MAC_ADDRESS = 35, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_CAPABILITIES = 36, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_NUM_RATES = 37, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_PREAMBLE = 38, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_NSS = 39, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BW = 40, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MCS_INDEX = 41, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BIT_RATE = 42, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_TX_MPDU = 43, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RX_MPDU = 44, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MPDU_LOST = 45, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES = 46, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_SHORT = 47, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_LONG = 48, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ID = 49, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME = 50, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME = 51, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_RX_TIME = 52, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_SCAN = 53, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_NBD = 54, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_GSCAN = 55, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_ROAM_SCAN = 56, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_PNO_SCAN = 57, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_HS20 = 58, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_CHANNELS = 59, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_WIDTH = 60, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ = 61, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ0 = 62, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ1 = 63, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_ON_TIME = 64, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_CCA_BUSY_TIME = 65, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_NUM_RADIOS = 66, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CH_INFO = 67, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO = 68, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_RATE_INFO = 69, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_INFO = 70, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_MORE_DATA = 71, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_AVERAGE_TSF_OFFSET = 72, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_DETECTED = 73, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_AVG_NUM_FRAMES_LEAKED = 74, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_GUARD_TIME = 75, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE = 76, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_TX_LEVELS = 77, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME_PER_LEVEL = 78, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_SUCC_CNT = 79, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_FAIL_CNT = 80, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_SUCC_CNT = 81, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_FAIL_CNT = 82, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_AFTER_LAST - 1 +}; + +enum qca_wlan_vendor_attr_ll_stats_type { + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_INVALID = 0, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_RADIO = 1, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_IFACE = 2, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_PEERS = 3, + + /* keep last */ + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_AFTER_LAST, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_MAX = + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_AFTER_LAST - 1, +}; + +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + +/** + * enum qca_wlan_vendor_attr_get_supported_features - get supported feature + * + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_MAX: Max value + */ +enum qca_wlan_vendor_attr_get_supported_features { + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_FEATURE_SET = 1, + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_MAX = + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_set_scanning_mac_oui - set scanning mac oui + * + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI: An array of 3 x Unsigned 8-bit + * value + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_MAX: Max value + */ +enum qca_wlan_vendor_attr_set_scanning_mac_oui { + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI = 1, + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_MAX = + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_scan - Specifies vendor scan attributes + * + * @QCA_WLAN_VENDOR_ATTR_SCAN_IE: IEs that should be included as part of scan + * @QCA_WLAN_VENDOR_ATTR_SCAN_FREQUENCIES: Nested unsigned 32-bit attributes + * with frequencies to be scanned (in MHz) + * @QCA_WLAN_VENDOR_ATTR_SCAN_SSIDS: Nested attribute with SSIDs to be scanned + * @QCA_WLAN_VENDOR_ATTR_SCAN_SUPP_RATES: Nested array attribute of supported + * rates to be included + * @QCA_WLAN_VENDOR_ATTR_SCAN_TX_NO_CCK_RATE: flag used to send probe requests + * at non CCK rate in 2GHz band + * @QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS: Unsigned 32-bit scan flags + * @QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE: Unsigned 64-bit cookie provided by the + * driver for the specific scan request + * @QCA_WLAN_VENDOR_ATTR_SCAN_STATUS: Unsigned 8-bit status of the scan + * request decoded as in enum scan_status + * @QCA_WLAN_VENDOR_ATTR_SCAN_MAC: 6-byte MAC address to use when randomisation + * scan flag is set + * @QCA_WLAN_VENDOR_ATTR_SCAN_MAC_MASK: 6-byte MAC address mask to be used with + * randomisation + * @QCA_WLAN_VENDOR_ATTR_SCAN_BSSID: BSSID provided to do scan for specific BSS + */ +enum qca_wlan_vendor_attr_scan { + QCA_WLAN_VENDOR_ATTR_SCAN_INVALID_PARAM = 0, + QCA_WLAN_VENDOR_ATTR_SCAN_IE, + QCA_WLAN_VENDOR_ATTR_SCAN_FREQUENCIES, + QCA_WLAN_VENDOR_ATTR_SCAN_SSIDS, + QCA_WLAN_VENDOR_ATTR_SCAN_SUPP_RATES, + QCA_WLAN_VENDOR_ATTR_SCAN_TX_NO_CCK_RATE, + QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS, + QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE, + QCA_WLAN_VENDOR_ATTR_SCAN_STATUS, + QCA_WLAN_VENDOR_ATTR_SCAN_MAC, + QCA_WLAN_VENDOR_ATTR_SCAN_MAC_MASK, + QCA_WLAN_VENDOR_ATTR_SCAN_BSSID, + QCA_WLAN_VENDOR_ATTR_SCAN_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SCAN_MAX = + QCA_WLAN_VENDOR_ATTR_SCAN_AFTER_LAST - 1 +}; + +/** + * enum scan_status - Specifies the valid values the vendor scan attribute + * QCA_WLAN_VENDOR_ATTR_SCAN_STATUS can take + * @VENDOR_SCAN_STATUS_NEW_RESULTS: implies the vendor scan is successful with + * new scan results + * @VENDOR_SCAN_STATUS_ABORTED: implies the vendor scan was aborted in-between + */ +enum scan_status { + VENDOR_SCAN_STATUS_NEW_RESULTS, + VENDOR_SCAN_STATUS_ABORTED, + VENDOR_SCAN_STATUS_MAX, +}; + +/** + * enum qca_wlan_vendor_attr_get_concurrency_matrix - get concurrency matrix + * + * NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_GET_CONCURRENCY_MATRIX sub command. + * + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_CONFIG_PARAM_SET_SIZE_MAX: + * Unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET_SIZE: + * Unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET: Set results. An + * array of SET_SIZE x Unsigned 32bit values representing concurrency + * combinations + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_MAX: Max value + */ +enum qca_wlan_vendor_attr_get_concurrency_matrix { + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_CONFIG_PARAM_SET_SIZE_MAX + = 1, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET_SIZE = 2, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET = 3, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_MAX = + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_set_no_dfs_flag - vendor attribute set no dfs flag + * + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG: Unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_MAX: Max value + */ +enum qca_wlan_vendor_attr_set_no_dfs_flag { + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG = 1, + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_MAX = + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_wisa_cmd + * @QCA_WLAN_VENDOR_ATTR_WISA_INVALID: Invalid attr + * @QCA_WLAN_VENDOR_ATTR_WISA_MODE: WISA mode value attr (u32) + * @QCA_WLAN_VENDOR_ATTR_WISA_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_WISA_MAX: Max Value + * WISA setup vendor commands + */ +enum qca_vendor_attr_wisa_cmd { + QCA_WLAN_VENDOR_ATTR_WISA_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WISA_MODE, + QCA_WLAN_VENDOR_ATTR_WISA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WISA_MAX = + QCA_WLAN_VENDOR_ATTR_WISA_AFTER_LAST - 1 +}; + +enum qca_roaming_policy { + QCA_ROAMING_NOT_ALLOWED, + QCA_ROAMING_ALLOWED_WITHIN_ESS, +}; + +/** + * enum qca_roam_reason - Represents the reason codes for roaming. Used by + * QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REASON. + * + * @QCA_ROAM_REASON_UNKNOWN: Any reason that do not classify under the below + * reasons. + * + * @QCA_ROAM_REASON_PER: Roam triggered when packet error rates(PER) breached + * the configured threshold. + * + * @QCA_ROAM_REASON_BEACON_MISS: Roam triggered due to the continuous configured + * beacon misses from the then connected AP. + * + * @QCA_ROAM_REASON_POOR_RSSI: Roam triggered due to the poor RSSI reported + * by the connected AP. + * + * @QCA_ROAM_REASON_BETTER_RSSI: Roam triggered for finding a BSSID with a + * better RSSI than the connected BSSID. Here the RSSI of the current BSSID is + * not poor. + * + * @QCA_ROAM_REASON_CONGESTION: Roam triggered considering the connected channel + * or environment being very noisy / congested. + * + * @QCA_ROAM_REASON_EXPLICIT_REQUEST: Roam triggered due to an explicit request + * from the user (user space). + * + * @QCA_ROAM_REASON_BTM: Roam triggered due to BTM request frame received from + * connected AP. + * + * @QCA_ROAM_REASON_BSS_LOAD: Roam triggered due to the channel utilization + * breaching out the configured threshold. + * + */ +enum qca_roam_reason { + QCA_ROAM_REASON_UNKNOWN, + QCA_ROAM_REASON_PER, + QCA_ROAM_REASON_BEACON_MISS, + QCA_ROAM_REASON_POOR_RSSI, + QCA_ROAM_REASON_BETTER_RSSI, + QCA_ROAM_REASON_CONGESTION, + QCA_ROAM_REASON_USER_TRIGGER, + QCA_ROAM_REASON_BTM, + QCA_ROAM_REASON_BSS_LOAD, +}; + +/** + * enum qca_wlan_vendor_attr_roam_auth - vendor event for roaming + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID: BSSID of the roamed AP + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REQ_IE: Request IE + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RESP_IE: Response IE + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AUTHORIZED: Authorization Status + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_KEY_REPLAY_CTR: Replay Counter + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KCK: KCK of the PTK + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KEK: KEK of the PTK + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_SUBNET_STATUS: subnet change status + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_STATUS: + * Indicates the status of re-association requested by user space for + * the BSSID specified by QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID. + * Type u16. + * Represents the status code from AP. Use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if the device cannot give you the + * real status code for failures. + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RETAIN_CONNECTION: + * This attribute indicates that the old association was maintained when + * a re-association is requested by user space and that re-association + * attempt fails (i.e., cannot connect to the requested BSS, but can + * remain associated with the BSS with which the association was in + * place when being requested to roam). Used along with + * WLAN_VENDOR_ATTR_ROAM_AUTH_STATUS to indicate the current + * re-association status. Type flag. + * This attribute is applicable only for re-association failure cases. + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMK: AUTH PMK + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMKID: AUTH PMKID + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_FILS_ERP_NEXT_SEQ_NUM: FILS erp next + * seq number + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REASON: A 16-bit unsigned value + * representing the reasons for the roaming. Defined by enum + * qca_roam_reason. + */ +enum qca_wlan_vendor_attr_roam_auth { + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REQ_IE, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RESP_IE, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AUTHORIZED, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_KEY_REPLAY_CTR, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KCK, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KEK, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_SUBNET_STATUS, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_STATUS, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RETAIN_CONNECTION, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMK, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMKID, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_FILS_ERP_NEXT_SEQ_NUM, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REASON, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_MAX = + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_wifi_config - wifi config + * + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_DYNAMIC_DTIM: dynamic DTIM + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_STATS_AVG_FACTOR: avg factor + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_GUARD_TIME: guard time + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_MAX: max value + */ +enum qca_wlan_vendor_attr_wifi_config { + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_DYNAMIC_DTIM = 1, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_STATS_AVG_FACTOR = 2, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_GUARD_TIME = 3, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_epno_type - the type of request to the EPNO command + * @QCA_WLAN_EPNO: epno type request + * @QCA_WLAN_PNO: pno type request + */ +enum qca_wlan_epno_type { + QCA_WLAN_EPNO, + QCA_WLAN_PNO +}; + +/** + * enum qca_wlan_vendor_attr_pno_config_params - pno config params + * + * @QCA_WLAN_VENDOR_ATTR_PNO_INVALID - Invalid initial value + * + * NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_PNO_SET_PASSPOINT_LIST sub command. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NUM: + * Unsigned 32-bit value; pno passpoint number of networks + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NETWORK_ARRAY: + * Array of nested QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_* + * attributes. Array size = + * QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NUM. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID: + * Unsigned 32-bit value; network id + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM: + * An array of 256 x Unsigned 8-bit value; NULL terminated UTF8 encoded + * realm, 0 if unspecified. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID: + * An array of 16 x Unsigned 32-bit value; roaming consortium ids + * to match, 0 if unspecified. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN: + * An array of 6 x Unsigned 8-bit value; mcc/mnc combination, 0s if + * unspecified. + * + * NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_PNO_SET_LIST sub command. + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_NUM_NETWORKS: + * Unsigned 32-bit value; set pno number of networks + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORKS_LIST: + * Array of nested + * QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_* + * attributes. Array size = + * QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_NUM_NETWORKS + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_SSID: + * An array of 33 x Unsigned 8-bit value; NULL terminated SSID + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_RSSI_THRESHOLD: + * Signed 8-bit value; threshold for considering this SSID as found, + * required granularity for this threshold is 4dBm to 8dBm + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_FLAGS: + * Unsigned 8-bit value; WIFI_PNO_FLAG_XXX + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_AUTH_BIT: + * Unsigned 8-bit value; auth bit field for matching WPA IE + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_TYPE + * Unsigned 8-bit to indicate ePNO type; values from qca_wlan_epno_type + *@QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_CHANNEL_LIST + * Nested attribute to send the channel list + *@QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_SCAN_INTERVAL + * Unsigned 32-bit value; indicates the Interval between PNO scan + * cycles in msec + *@QCA_WLAN_VENDOR_ATTR_EPNO_MIN5GHZ_RSSI + * Signed 32-bit value; minimum 5GHz RSSI for a BSSID to be considered + *@QCA_WLAN_VENDOR_ATTR_EPNO_MIN24GHZ_RSSI + * Signed 32-bit value; minimum 2.4GHz RSSI for a BSSID to be considered + * This attribute is obsolete now. + *@QCA_WLAN_VENDOR_ATTR_EPNO_INITIAL_SCORE_MAX + * Signed 32-bit value; the maximum score that a network + * can have before bonuses + *@QCA_WLAN_VENDOR_ATTR_EPNO_CURRENT_CONNECTION_BONUS + * Signed 32-bit value; only report when there is a network's + * score this much higher han the current connection + *@QCA_WLAN_VENDOR_ATTR_EPNO_SAME_NETWORK_BONUS + * Signed 32-bit value; score bonus for all networks with + * the same network flag + *@QCA_WLAN_VENDOR_ATTR_EPNO_SECURE_BONUS + * Signed 32-bit value; score bonus for networks that are not open + *@QCA_WLAN_VENDOR_ATTR_EPNO_BAND5GHZ_BONUS + * Signed 32-bit value; 5GHz RSSI score bonus applied to all + * 5GHz networks + *@QCA_WLAN_VENDOR_ATTR_PNO_CONFIG_REQUEST_ID + * Unsigned 32-bit value, representing the PNO Request ID + * @QCA_WLAN_VENDOR_ATTR_PNO_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_PNO_MAX: max + */ +enum qca_wlan_vendor_attr_pno_config_params { + QCA_WLAN_VENDOR_ATTR_PNO_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NUM = 1, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NETWORK_ARRAY = 2, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID = 3, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM = 4, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID = 5, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN = 6, + + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_NUM_NETWORKS = 7, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORKS_LIST = 8, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_SSID = 9, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_RSSI_THRESHOLD = 10, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_FLAGS = 11, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_AUTH_BIT = 12, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_TYPE = 13, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_CHANNEL_LIST = 14, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_SCAN_INTERVAL = 15, + QCA_WLAN_VENDOR_ATTR_EPNO_MIN5GHZ_RSSI = 16, + QCA_WLAN_VENDOR_ATTR_EPNO_MIN24GHZ_RSSI = 17, + QCA_WLAN_VENDOR_ATTR_EPNO_INITIAL_SCORE_MAX = 18, + QCA_WLAN_VENDOR_ATTR_EPNO_CURRENT_CONNECTION_BONUS = 19, + QCA_WLAN_VENDOR_ATTR_EPNO_SAME_NETWORK_BONUS = 20, + QCA_WLAN_VENDOR_ATTR_EPNO_SECURE_BONUS = 21, + QCA_WLAN_VENDOR_ATTR_EPNO_BAND5GHZ_BONUS = 22, + + QCA_WLAN_VENDOR_ATTR_PNO_CONFIG_REQUEST_ID = 23, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_PNO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PNO_MAX = + QCA_WLAN_VENDOR_ATTR_PNO_AFTER_LAST - 1, +}; + +/** + * enum qca_scan_freq_list_type: Frequency list types + * + * @QCA_PREFERRED_SCAN_FREQ_LIST: The driver shall use the scan frequency list + * specified with attribute QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST as + * a preferred frequency list for roaming. + * + * @QCA_SPECIFIC_SCAN_FREQ_LIST: The driver shall use the frequency list + * specified with attribute QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST as + * a specific frequency list for roaming. + */ +enum qca_scan_freq_list_type { + QCA_PREFERRED_SCAN_FREQ_LIST = 1, + QCA_SPECIFIC_SCAN_FREQ_LIST = 2, +}; + +/** + * enum qca_vendor_attr_scan_freq_list_scheme: Frequency list scheme + * + * @QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST: An array of unsigned 32-bit values. + * List of frequencies in MHz to be considered for a roam scan. + * + * @QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST_TYPE: Unsigned 32-bit value. + * Type of frequency list scheme being configured/gotten as defined by the + * enum qca_scan_freq_list_type. + */ +enum qca_vendor_attr_scan_freq_list_scheme { + QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST = 1, + QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST_TYPE = 2, + + /* keep last */ + QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST_SCHEME_AFTER_LAST, + QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST_SCHEME_MAX = + QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST_SCHEME_AFTER_LAST - 1, +}; + +/* + * enum qca_vendor_roam_triggers: Bitmap of roaming triggers + * + * @QCA_ROAM_TRIGGER_REASON_PER: Set if the roam has to be triggered based on + * a bad packet error rates (PER). + * @QCA_ROAM_TRIGGER_REASON_BEACON_MISS: Set if the roam has to be triggered + * based on beacon misses from the connected AP. + * @QCA_ROAM_TRIGGER_REASON_POOR_RSSI: Set if the roam has to be triggered + * due to poor RSSI of the connected AP. + * @QCA_ROAM_TRIGGER_REASON_BETTER_RSSI: Set if the roam has to be triggered + * upon finding a BSSID with a better RSSI than the connected BSSID. + * Here the RSSI of the current BSSID need not be poor. + * @QCA_ROAM_TRIGGER_REASON_PERIODIC: Set if the roam has to be triggered + * by triggering a periodic scan to find a better AP to roam. + * @QCA_ROAM_TRIGGER_REASON_DENSE: Set if the roam has to be triggered + * when the connected channel environment is too noisy/congested. + * @QCA_ROAM_TRIGGER_REASON_BTM: Set if the roam has to be triggered + * when BTM Request frame is received from the connected AP. + * @QCA_ROAM_TRIGGER_REASON_BSS_LOAD: Set if the roam has to be triggered + * when the channel utilization is goes above the configured threshold. + * + * Set the corresponding roam trigger reason bit to consider it for roam + * trigger. + * Userspace can set multiple bits and send to the driver. The driver shall + * consider all of them to trigger/initiate a roam scan. + */ +enum qca_vendor_roam_triggers { + QCA_ROAM_TRIGGER_REASON_PER = 1 << 0, + QCA_ROAM_TRIGGER_REASON_BEACON_MISS = 1 << 1, + QCA_ROAM_TRIGGER_REASON_POOR_RSSI = 1 << 2, + QCA_ROAM_TRIGGER_REASON_BETTER_RSSI = 1 << 3, + QCA_ROAM_TRIGGER_REASON_PERIODIC = 1 << 4, + QCA_ROAM_TRIGGER_REASON_DENSE = 1 << 5, + QCA_ROAM_TRIGGER_REASON_BTM = 1 << 6, + QCA_ROAM_TRIGGER_REASON_BSS_LOAD = 1 << 7, +}; + +/** + * enum qca_vendor_attr_roam_candidate_selection_criteria: + * + * Each attribute carries a weightage in percentage (%). + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_RSSI: Unsigned 8-bit value. + * Represents the weightage to be given for the RSSI selection + * criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_RATE: Unsigned 8-bit value. + * Represents the weightage to be given for the rate selection + * criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_BW: Unsigned 8-bit value. + * Represents the weightage to be given for the band width selection + * criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_BAND: Unsigned 8-bit value. + * Represents the weightage to be given for the band selection + * criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_NSS: Unsigned 8-bit value. + * Represents the weightage to be given for the NSS selection + * criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_CHAN_CONGESTION: Unsigned 8-bit value. + * Represents the weightage to be given for the channel congestion + * selection criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_BEAMFORMING: Unsigned 8-bit value. + * Represents the weightage to be given for the beamforming selection + * criteria among other parameters. + * + * @QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_OCE_WAN: Unsigned 8-bit value. + * Represents the weightage to be given for the OCE selection + * criteria among other parameters. + */ +enum qca_vendor_attr_roam_candidate_selection_criteria { + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_RSSI = 1, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_RATE = 2, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_BW = 3, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_BAND = 4, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_NSS = 5, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_CHAN_CONGESTION = 6, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_BEAMFORMING = 7, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_SCORE_OCE_WAN = 8, + + /* keep last */ + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_RATE_AFTER_LAST, + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_RATE_MAX = + QCA_ATTR_ROAM_CAND_SEL_CRITERIA_RATE_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_roam_control - Attributes to carry roam configuration + * The following attributes are used to set/get/clear the respective + * configurations to/from the driver. + * For the get, the attribute for the configuration to be queried shall + * carry any of its acceptable values to the driver. In return, the driver + * shall send the configured values within the same attribute to the user + * space. + * + * @QCA_ATTR_ROAM_CONTROL_ENABLE: Unsigned 8-bit value. + * Signifies to enable/disable roam control in driver. + * 1-enable, 0-disable + * Enable: Mandates the driver to do the further roams using the + * configuration parameters set through + * QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_SET. + * Disable: Disables the driver/firmware roaming triggered through + * QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_SET. Further roaming is + * expected to continue with the default configurations. + * + * @QCA_ATTR_ROAM_CONTROL_STATUS: Unsigned 8-bit value. + * This is used along with QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_GET. + * Roam control status is obtained through this attribute. + * + * @QCA_ATTR_ROAM_CONTROL_CLEAR_ALL: Flag attribute to indicate the + * complete config set through QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_SET + * is to be cleared in the driver. + * This is used along with QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_CLEAR + * and shall be ignored if used with other sub commands. + * If this attribute is specified along with subcmd + * QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_CLEAR, the driver shall ignore + * all other attributes, if there are any. + * If this attribute is not specified when the subcmd + * QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_CLEAR is sent, the driver shall + * clear the data corresponding to the attributes specified. + * + * @QCA_ATTR_ROAM_CONTROL_FREQ_LIST_SCHEME: Nested attribute to carry the + * list of frequencies and its type, represented by + * enum qca_vendor_attr_scan_freq_list_scheme. + * Frequency list and its type are mandatory for this attribute to set + * the frequencies. + * Frequency type is mandatory for this attribute to get the frequencies + * and the frequency list is obtained through + * QCA_ATTR_ROAM_CONTROL_SCAN_FREQ_LIST. + * Frequency list type is mandatory for this attribute to clear the + * frequencies. + * + * @QCA_ATTR_ROAM_CONTROL_SCAN_PERIOD: Unsigned 32-bit value. + * Carries the value of scan period in seconds to set. + * The value of scan period is obtained with the same attribute for get. + * Clears the scan period in the driver when specified with clear command. + * Scan period is the idle time in seconds between each subsequent + * channel scans. + * + * @QCA_ATTR_ROAM_CONTROL_FULL_SCAN_PERIOD: Unsigned 32-bit value. + * Carries the value of full scan period in seconds to set. + * The value of full scan period is obtained with the same attribute for + * get. + * Clears the full scan period in the driver when specified with clear + * command. Full scan period is the idle period in seconds between two + * successive full channel roam scans. + * + * @QCA_ATTR_ROAM_CONTROL_TRIGGERS: Unsigned 32-bit value. + * Carries a bitmap of the roam triggers specified in + * enum qca_vendor_roam_triggers. + * The driver shall enable roaming by enabling corresponding roam triggers + * based on the trigger bits sent with this attribute. + * If this attribute is not configured, the driver shall proceed with + * default behavior. + * The bitmap configured is obtained with the same attribute for get. + * Clears the bitmap configured in driver when specified with clear + * command. + * + * @QCA_ATTR_ROAM_CONTROL_SELECTION_CRITERIA: Nested attribute signifying the + * weightage in percentage (%) to be given for each selection criteria. + * Different roam candidate selection criteria are represented by + * enum qca_vendor_attr_roam_candidate_selection_criteria. + * The driver shall select the roam candidate based on corresponding + * candidate selection scores sent. + * + * An empty nested attribute is used to indicate that no specific + * preference score/criteria is configured (i.e., to disable this mechanism + * in the set case and to show that the mechanism is disabled in the get + * case). + * + * Userspace can send multiple attributes out of this enum to the driver. + * Since this attribute represents the weight/percentage of preference for + * the respective selection criteria, it is preferred to configure 100% + * total weightage. The value in each attribute or cumulative weight of the + * values in all the nested attributes should not exceed 100%. The driver + * shall reject such configuration. + * + * If the weights configured through this attribute are less than 100%, + * the driver shall honor the weights (x%) passed for the corresponding + * selection criteria and choose/distribute rest of the weight (100-x)% + * for the other selection criteria, based on its internal logic. + * + * The selection criteria configured is obtained with the same + * attribute for get. + * + * Clears the selection criteria configured in the driver when specified + * with clear command. + */ +enum qca_vendor_attr_roam_control { + QCA_ATTR_ROAM_CONTROL_ENABLE = 1, + QCA_ATTR_ROAM_CONTROL_STATUS = 2, + QCA_ATTR_ROAM_CONTROL_CLEAR_ALL = 3, + QCA_ATTR_ROAM_CONTROL_FREQ_LIST_SCHEME = 4, + QCA_ATTR_ROAM_CONTROL_SCAN_PERIOD = 5, + QCA_ATTR_ROAM_CONTROL_FULL_SCAN_PERIOD = 6, + QCA_ATTR_ROAM_CONTROL_TRIGGERS = 7, + QCA_ATTR_ROAM_CONTROL_SELECTION_CRITERIA = 8, + + /* keep last */ + QCA_ATTR_ROAM_CONTROL_AFTER_LAST, + QCA_ATTR_ROAM_CONTROL_MAX = + QCA_ATTR_ROAM_CONTROL_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_roaming_config_params: Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_ROAM sub command. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_SUBCMD: Unsigned 32-bit value. + * Represents the different roam sub commands referred by + * enum qca_wlan_vendor_roaming_subcmd. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_REQ_ID: Unsigned 32-bit value. + * Represents the Request ID for the specific set of commands. + * This also helps to map specific set of commands to the respective + * ID / client. e.g., helps to identify the user entity configuring the + * Blacklist BSSID and accordingly clear the respective ones with the + * matching ID. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_NUM_NETWORKS: Unsigned + * 32-bit value.Represents the number of whitelist SSIDs configured. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_LIST: Nested attribute + * to carry the list of Whitelist SSIDs. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID: SSID (binary attribute, + * 0..32 octets). Represents the white list SSID. Whitelist SSIDs + * represent the list of SSIDs to which the firmware/driver can consider + * to roam to. + * + * The following PARAM_A_BAND_XX attributes are applied to 5GHz BSSIDs when + * comparing with a 2.4GHz BSSID. They are not applied when comparing two + * 5GHz BSSIDs.The following attributes are set through the Roaming SUBCMD - + * QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_EXTSCAN_ROAM_PARAMS. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_THRESHOLD: Signed 32-bit + * value, RSSI threshold above which 5GHz RSSI is favored. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_THRESHOLD: Signed 32-bit + * value, RSSI threshold below which 5GHz RSSI is penalized. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_FACTOR: Unsigned 32-bit + * value, factor by which 5GHz RSSI is boosted. + * boost=(RSSI_measured-5GHz_boost_threshold)*5GHz_boost_factor + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_FACTOR: Unsigned 32-bit + * value, factor by which 5GHz RSSI is penalized. + * penalty=(5GHz_penalty_threshold-RSSI_measured)*5GHz_penalty_factor + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_MAX_BOOST: Unsigned 32-bit + * value, maximum boost that can be applied to a 5GHz RSSI. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_LAZY_ROAM_HISTERESYS: Unsigned 32-bit + * value, boost applied to current BSSID to ensure the currently + * associated BSSID is favored so as to prevent ping-pong situations. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_ALERT_ROAM_RSSI_TRIGGER: Signed 32-bit + * value, RSSI below which "Alert" roam is enabled. + * "Alert" mode roaming - firmware is "urgently" hunting for another BSSID + * because the RSSI is low, or because many successive beacons have been + * lost or other bad link conditions. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_ENABLE: Unsigned 32-bit + * value. 1-Enable, 0-Disable. Represents "Lazy" mode, where + * firmware is hunting for a better BSSID or white listed SSID even though + * the RSSI of the link is good. The parameters enabling the roaming are + * configured through the PARAM_A_BAND_XX attrbutes. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PREFS: Nested attribute, + * represents the BSSIDs preferred over others while evaluating them + * for the roaming. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_NUM_BSSID: Unsigned + * 32-bit value. Represents the number of preferred BSSIDs set. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_BSSID: 6-byte MAC + * address representing the BSSID to be preferred. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_RSSI_MODIFIER: Signed + * 32-bit value, representing the modifier to be applied to the RSSI of + * the BSSID for the purpose of comparing it with other roam candidate. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS: Nested attribute, + * represents the BSSIDs to get blacklisted for roaming. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_NUM_BSSID: Unsigned + * 32-bit value, represents the number of blacklisted BSSIDs. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_BSSID: 6-byte MAC + * address representing the Blacklisted BSSID. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_HINT: Flag attribute, + * indicates this BSSID blacklist as a hint to the driver. The driver can + * select this BSSID in the worst case (when no other BSSIDs are better). + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_CONTROL: Nested attribute to + * set/get/clear the roam control config as + * defined @enum qca_vendor_attr_roam_control. + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_MAX: Max + */ +enum qca_wlan_vendor_attr_roaming_config_params { + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_ROAMING_SUBCMD = 1, + QCA_WLAN_VENDOR_ATTR_ROAMING_REQ_ID = 2, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_NUM_NETWORKS = 3, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_LIST = 4, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID = 5, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_THRESHOLD = 6, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_THRESHOLD = 7, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_FACTOR = 8, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_FACTOR = 9, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_MAX_BOOST = 10, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_LAZY_ROAM_HISTERESYS = 11, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_ALERT_ROAM_RSSI_TRIGGER = 12, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_ENABLE = 13, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PREFS = 14, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_NUM_BSSID = 15, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_BSSID = 16, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_RSSI_MODIFIER = 17, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS = 18, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_NUM_BSSID = 19, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_BSSID = 20, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_HINT = 21, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_CONTROL = 22, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_roaming_subcmd: Referred by + * QCA_WLAN_VENDOR_ATTR_ROAMING_SUBCMD. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_SSID_WHITE_LIST: Sub command to + * configure the white list SSIDs. These are configured through + * the following attributes. + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_NUM_NETWORKS, + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_LIST, + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_EXTSCAN_ROAM_PARAMS: Sub command to + * configure the Roam params. These parameters are evaluated on the extscan + * results. Refers the attributes PARAM_A_BAND_XX above to configure the + * params. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_LAZY_ROAM: Sets the Lazy roam. Uses + * the attribute QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_ENABLE + * to enable/disable Lazy roam. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_BSSID_PREFS: Sets the BSSID + * preference. Contains the attribute + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PREFS to set the BSSID + * preference. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_BSSID_PARAMS: set bssid params + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_BLACKLIST_BSSID: Sets the Blacklist + * BSSIDs. Refers QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS to + * set the same. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_SET: Command to set the + * roam control config to the driver with the attribute + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_CONTROL. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_GET: Command to obtain the + * roam control config from driver with the attribute + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_CONTROL. + * For the get, the attribute for the configuration to be queried shall + * carry any of its acceptable value to the driver. In return, the driver + * shall send the configured values within the same attribute to the user + * space. + * + * @QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_CLEAR: Command to clear the + * roam control config in the driver with the attribute + * QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_CONTROL. + * The driver shall continue with its default roaming behavior when data + * corresponding to an attribute is cleared. + */ +enum qca_wlan_vendor_roaming_subcmd { + QCA_WLAN_VENDOR_ROAMING_SUBCMD_SSID_WHITE_LIST = 1, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_EXTSCAN_ROAM_PARAMS = 2, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_LAZY_ROAM = 3, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_BSSID_PREFS = 4, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_BSSID_PARAMS = 5, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_SET_BLACKLIST_BSSID = 6, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_SET = 7, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_GET = 8, + QCA_WLAN_VENDOR_ROAMING_SUBCMD_CONTROL_CLEAR = 9, +}; + +/** + * enum qca_wlan_vendor_attr_get_wifi_info - wifi driver information + * + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_DRIVER_VERSION: get host driver version + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_FIRMWARE_VERSION: ger firmware version + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_RADIO_INDEX - get radio index + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_MAX: subcmd max + */ +enum qca_wlan_vendor_attr_get_wifi_info { + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_DRIVER_VERSION = 1, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_FIRMWARE_VERSION = 2, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_RADIO_INDEX = 3, + + /* KEEP LAST */ + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_AFTER_LAST - 1, +}; + +enum qca_wlan_vendor_attr_logger_results { + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_INVALID = 0, + + /* + * Unsigned 32-bit value; must match the request Id supplied by + * Wi-Fi HAL in the corresponding subcmd NL msg. + */ + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_REQUEST_ID = 1, + + /* + * Unsigned 32-bit value; used to indicate the size of memory + * dump to be allocated. + */ + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_MEMDUMP_SIZE = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_MAX = + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_AFTER_LAST - 1, +}; + +/** + * qca_wlan_vendor_channel_prop_flags: This represent the flags for a channel. + * This is used by QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS. + */ +enum qca_wlan_vendor_channel_prop_flags { + /* Bits 0, 1, 2, and 3 are reserved */ + + /* Turbo channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_TURBO = 1 << 4, + /* CCK channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_CCK = 1 << 5, + /* OFDM channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_OFDM = 1 << 6, + /* 2.4 GHz spectrum channel. */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_2GHZ = 1 << 7, + /* 5 GHz spectrum channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_5GHZ = 1 << 8, + /* Only passive scan allowed */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_PASSIVE = 1 << 9, + /* Dynamic CCK-OFDM channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_DYN = 1 << 10, + /* GFSK channel (FHSS PHY) */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_GFSK = 1 << 11, + /* Radar found on channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_RADAR = 1 << 12, + /* 11a static turbo channel only */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_STURBO = 1 << 13, + /* Half rate channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HALF = 1 << 14, + /* Quarter rate channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_QUARTER = 1 << 15, + /* HT 20 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT20 = 1 << 16, + /* HT 40 with extension channel above */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40PLUS = 1 << 17, + /* HT 40 with extension channel below */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40MINUS = 1 << 18, + /* HT 40 intolerant */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40INTOL = 1 << 19, + /* VHT 20 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT20 = 1 << 20, + /* VHT 40 with extension channel above */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT40PLUS = 1 << 21, + /* VHT 40 with extension channel below */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT40MINUS = 1 << 22, + /* VHT 80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT80 = 1 << 23, + /* HT 40 intolerant mark bit for ACS use */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40INTOLMARK = 1 << 24, + /* Channel temporarily blocked due to noise */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_BLOCKED = 1 << 25, + /* VHT 160 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT160 = 1 << 26, + /* VHT 80+80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT80_80 = 1 << 27, + /* HE 20 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE20 = 1 << 28, + /* HE 40 with extension channel above */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40PLUS = 1 << 29, + /* HE 40 with extension channel below */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40MINUS = 1 << 30, + /* HE 40 intolerant */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40INTOL = 1 << 31, +}; + +/** + * qca_wlan_vendor_channel_prop_flags_2: This represents the flags for a + * channel, and is a continuation of qca_wlan_vendor_channel_prop_flags. This is + * used by QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS_2. + */ +enum qca_wlan_vendor_channel_prop_flags_2 { + /* HE 40 intolerant mark bit for ACS use */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40INTOLMARK = 1 << 0, + /* HE 80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE80 = 1 << 1, + /* HE 160 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE160 = 1 << 2, + /* HE 80+80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE80_80 = 1 << 3, +}; + +/** + * qca_wlan_vendor_channel_prop_flags_ext: This represent the extended flags for + * each channel. This is used by + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAG_EXT. + */ +enum qca_wlan_vendor_channel_prop_flags_ext { + /* Radar found on channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_RADAR_FOUND = 1 << 0, + /* DFS required on channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DFS = 1 << 1, + /* DFS required on channel for 2nd band of 80+80 */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DFS_CFREQ2 = 1 << 2, + /* If channel has been checked for DFS */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DFS_CLEAR = 1 << 3, + /* Excluded in 802.11d */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_11D_EXCLUDED = 1 << 4, + /* Channel Switch Announcement received on this channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_CSA_RECEIVED = 1 << 5, + /* Ad-hoc is not allowed */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DISALLOW_ADHOC = 1 << 6, + /* Station only channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DISALLOW_HOSTAP = 1 << 7, + /* DFS radar history for slave device (STA mode) */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_HISTORY_RADAR = 1 << 8, + /* DFS CAC valid for slave device (STA mode) */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_CAC_VALID = 1 << 9, +}; + +/** + * qca_wlan_vendor_attr_nud_stats_set: Attributes to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carries the requisite + * information to start/stop the NUD statistics collection. + */ +enum qca_attr_nud_stats_set { + QCA_ATTR_NUD_STATS_SET_INVALID = 0, + + /* + * Flag to start/stop the NUD statistics collection. + * Start - If included, Stop - If not included + */ + QCA_ATTR_NUD_STATS_SET_START = 1, + /* IPv4 address of the default gateway (in network byte order) */ + QCA_ATTR_NUD_STATS_GW_IPV4 = 2, + /* + * Represents the data packet type to be monitored. + * Host driver tracks the stats corresponding to each data frame + * represented by these flags. + * These data packets are represented by + * enum qca_wlan_vendor_nud_stats_set_data_pkt_info. + */ + QCA_ATTR_NUD_STATS_SET_DATA_PKT_INFO = 3, + /* keep last */ + QCA_ATTR_NUD_STATS_SET_LAST, + QCA_ATTR_NUD_STATS_SET_MAX = + QCA_ATTR_NUD_STATS_SET_LAST - 1, +}; + +/** + * enum qca_attr_connectivity_check_stats_set - attribute to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carry the requisite + * information to start / stop the NUD stats collection. + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_STATS_PKT_INFO_TYPE: set pkt info stats + * Bitmap to Flag to Start / Stop the NUD stats collection + * Start - If included , Stop - If not included + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DNS_DOMAIN_NAME: set gatway ipv4 address + * IPv4 address of Default Gateway (in network byte order) + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carry the requisite + * information to start / stop the NUD stats collection. + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_SRC_PORT: set nud debug stats + * Flag to Start / Stop the NUD stats collection + * Start - If included , Stop - If not included + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_PORT: set gatway ipv4 address + * IPv4 address of Default Gateway (in network byte order) + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carry the requisite + * information to start / stop the NUD stats collection. + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV4: set nud debug stats + * Flag to Start / Stop the NUD stats collection + * Start - If included , Stop - If not included + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV6: set gatway ipv4 address + * IPv4 address of Default Gateway (in network byte order) + */ +enum qca_attr_connectivity_check_stats_set { + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_INVALID = 0, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_STATS_PKT_INFO_TYPE = 1, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DNS_DOMAIN_NAME = 2, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SRC_PORT = 3, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_PORT = 4, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV4 = 5, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV6 = 6, + /* keep last */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_LAST, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_MAX = + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_LAST - 1, +}; + +/** + * qca_wlan_vendor_nud_stats_data_pkt_flags: Flag representing the various + * data types for which the stats have to get collected. + */ +enum qca_wlan_vendor_connectivity_check_pkt_flags { + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_ARP = 1 << 0, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_DNS = 1 << 1, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE = 1 << 2, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_ICMPV4 = 1 << 3, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_ICMPV6 = 1 << 4, + /* Used by QCA_ATTR_NUD_STATS_PKT_TYPE only in nud stats get + * to represent the stats of respective data type. + */ + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_SYN = 1 << 5, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK = 1 << 6, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_ACK = 1 << 7, +}; + +enum qca_attr_connectivity_check_stats { + QCA_ATTR_CONNECTIVITY_CHECK_STATS_INVALID = 0, + /* Data packet type for which the stats are collected. + * Represented by enum qca_wlan_vendor_nud_stats_data_pkt_flags + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_TYPE = 1, + /* ID corresponding to the DNS frame for which the respective DNS stats + * are monitored (u32). + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DNS_DOMAIN_NAME = 2, + /* source / destination port on which the respective proto stats are + * collected (u32). + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_SRC_PORT = 3, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DEST_PORT = 4, + /* IPv4/IPv6 address for which the destined data packets are + * monitored. (in network byte order) + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DEST_IPV4 = 5, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DEST_IPV6 = 6, + /* Data packet Request count received from netdev */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_COUNT_FROM_NETDEV = 7, + /* Data packet Request count sent to lower MAC from upper MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_COUNT_TO_LOWER_MAC = 8, + /* Data packet Request count received by lower MAC from upper MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_RX_COUNT_BY_LOWER_MAC = 9, + /* Data packet Request count successfully transmitted by the device */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_COUNT_TX_SUCCESS = 10, + /* Data packet Response count received by lower MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_RX_COUNT_BY_LOWER_MAC = 11, + /* Data packet Response count received by upper MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_RX_COUNT_BY_UPPER_MAC = 12, + /* Data packet Response count delivered to netdev */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_COUNT_TO_NETDEV = 13, + /* Data Packet Response count that are dropped out of order */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_COUNT_OUT_OF_ORDER_DROP = 14, + + /* keep last */ + QCA_ATTR_CONNECTIVITY_CHECK_DATA_STATS_LAST, + QCA_ATTR_CONNECTIVITY_CHECK_DATA_STATS_MAX = + QCA_ATTR_CONNECTIVITY_CHECK_DATA_STATS_LAST - 1, +}; + +/** + * qca_attr_nud_stats_get: Attributes to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET. This carries the requisite + * NUD statistics collected when queried. + */ +enum qca_attr_nud_stats_get { + QCA_ATTR_NUD_STATS_GET_INVALID = 0, + /* ARP Request count from netdev */ + QCA_ATTR_NUD_STATS_ARP_REQ_COUNT_FROM_NETDEV = 1, + /* ARP Request count sent to lower MAC from upper MAC */ + QCA_ATTR_NUD_STATS_ARP_REQ_COUNT_TO_LOWER_MAC = 2, + /* ARP Request count received by lower MAC from upper MAC */ + QCA_ATTR_NUD_STATS_ARP_REQ_RX_COUNT_BY_LOWER_MAC = 3, + /* ARP Request count successfully transmitted by the device */ + QCA_ATTR_NUD_STATS_ARP_REQ_COUNT_TX_SUCCESS = 4, + /* ARP Response count received by lower MAC */ + QCA_ATTR_NUD_STATS_ARP_RSP_RX_COUNT_BY_LOWER_MAC = 5, + /* ARP Response count received by upper MAC */ + QCA_ATTR_NUD_STATS_ARP_RSP_RX_COUNT_BY_UPPER_MAC = 6, + /* ARP Response count delivered to netdev */ + QCA_ATTR_NUD_STATS_ARP_RSP_COUNT_TO_NETDEV = 7, + /* ARP Response count delivered to netdev */ + QCA_ATTR_NUD_STATS_ARP_RSP_COUNT_OUT_OF_ORDER_DROP = 8, + /* + * Flag indicating if the station's link to the AP is active. + * Active Link - If included, Inactive link - If not included + */ + QCA_ATTR_NUD_STATS_AP_LINK_ACTIVE = 9, + /* + * Flag indicating if there is any duplicate address detected (DAD). + * Yes - If detected, No - If not detected. + */ + QCA_ATTR_NUD_STATS_IS_DAD = 10, + /* + * List of Data types for which the stats are requested. + * This list does not carry ARP stats as they are done by the + * above attributes. Represented by enum qca_attr_nud_data_stats. + */ + QCA_ATTR_NUD_STATS_DATA_PKT_STATS = 11, + /* keep last */ + QCA_ATTR_NUD_STATS_GET_LAST, + QCA_ATTR_NUD_STATS_GET_MAX = + QCA_ATTR_NUD_STATS_GET_LAST - 1, +}; + +enum qca_wlan_btm_candidate_status { + QCA_STATUS_ACCEPT = 0, + QCA_STATUS_REJECT_EXCESSIVE_FRAME_LOSS_EXPECTED = 1, + QCA_STATUS_REJECT_EXCESSIVE_DELAY_EXPECTED = 2, + QCA_STATUS_REJECT_INSUFFICIENT_QOS_CAPACITY = 3, + QCA_STATUS_REJECT_LOW_RSSI = 4, + QCA_STATUS_REJECT_HIGH_INTERFERENCE = 5, + QCA_STATUS_REJECT_UNKNOWN = 6, +}; + +enum qca_wlan_vendor_attr_btm_candidate_info { + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_INVALID = 0, + + /* 6-byte MAC address representing the BSSID of transition candidate */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID = 1, + /* + * Unsigned 32-bit value from enum qca_wlan_btm_candidate_status + * returned by the driver. It says whether the BSSID provided in + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID is acceptable by + * the driver, if not it specifies the reason for rejection. + * Note that the user-space can overwrite the transition reject reason + * codes provided by driver based on more information. + */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_STATUS = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_AFTER_LAST - 1, +}; + +enum qca_attr_trace_level { + QCA_ATTR_TRACE_LEVEL_INVALID = 0, + /* + * Nested array of the following attributes: + * QCA_ATTR_TRACE_LEVEL_MODULE, + * QCA_ATTR_TRACE_LEVEL_MASK. + */ + QCA_ATTR_TRACE_LEVEL_PARAM = 1, + /* + * Specific QCA host driver module. Please refer to the QCA host + * driver implementation to get the specific module ID. + */ + QCA_ATTR_TRACE_LEVEL_MODULE = 2, + /* Different trace level masks represented in the QCA host driver. */ + QCA_ATTR_TRACE_LEVEL_MASK = 3, + + /* keep last */ + QCA_ATTR_TRACE_LEVEL_AFTER_LAST, + QCA_ATTR_TRACE_LEVEL_MAX = + QCA_ATTR_TRACE_LEVEL_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_get_logger_features - value for logger + * supported features + * @QCA_WLAN_VENDOR_ATTR_LOGGER_INVALID - Invalid + * @QCA_WLAN_VENDOR_ATTR_LOGGER_SUPPORTED - Indicate the supported features + * @QCA_WLAN_VENDOR_ATTR_LOGGER_AFTER_LAST - To keep track of the last enum + * @QCA_WLAN_VENDOR_ATTR_LOGGER_MAX - max value possible for this type + * + * enum values are used for NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET sub command. + */ +enum qca_wlan_vendor_attr_get_logger_features { + QCA_WLAN_VENDOR_ATTR_LOGGER_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LOGGER_SUPPORTED = 1, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOGGER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOGGER_MAX = + QCA_WLAN_VENDOR_ATTR_LOGGER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_link_properties - link properties + * + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_NSS: Unsigned 8-bit value to + * specify the number of spatial streams negotiated + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_RATE_FLAGS: Unsigned 8-bit value + * to specify negotiated rate flags i.e. ht, vht and channel width + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_FREQ: Unsigned 32bit value to + * specify the operating frequency + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAC_ADDR: MAC Address of the peer + * (STA / AP ) for the connected link. + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_FLAGS: Attribute containing a + * &struct nl80211_sta_flag_update for the respective connected link. MAC + * address of the peer represented by + * QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAC_ADDR. + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAX: max value + */ +enum qca_wlan_vendor_attr_link_properties { + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_NSS = 1, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_RATE_FLAGS = 2, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_FREQ = 3, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAC_ADDR = 4, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_FLAGS = 5, + + /* KEEP LAST */ + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAX = + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_nd_offload - vendor NS offload support + * + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_INVALID - Invalid + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_FLAG - Flag to set NS offload + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_AFTER_LAST - To keep track of the last enum + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_MAX - max value possible for this type + * + * enum values are used for NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_ND_OFFLOAD sub command. + */ +enum qca_wlan_vendor_attr_nd_offload { + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_FLAG, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_MAX = + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_features - Vendor device/driver feature flags + * + * @QCA_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD: Device supports key + * management offload, a mechanism where the station's firmware + * does the exchange with the AP to establish the temporal keys + * after roaming, rather than having the user space wpa_supplicant do it. + * @QCA_WLAN_VENDOR_FEATURE_SUPPORT_HW_MODE_ANY: Device supports automatic + * band selection based on channel selection results. + * @QCA_WLAN_VENDOR_FEATURE_OFFCHANNEL_SIMULTANEOUS: Device supports + * simultaneous off-channel operations. + * @QCA_WLAN_VENDOR_FEATURE_P2P_LISTEN_OFFLOAD: Device supports P2P + * Listen offload; a mechanism where the station's firmware takes care of + * responding to incoming Probe Request frames received from other P2P + * Devices whilst in Listen state, rather than having the user space + * wpa_supplicant do it. Information from received P2P requests are + * forwarded from firmware to host whenever the host processor wakes up. + * @QCA_WLAN_VENDOR_FEATURE_OCE_STA: Device supports all OCE non-AP STA + * specific features. + * @QCA_WLAN_VENDOR_FEATURE_OCE_AP: Device supports all OCE AP specific + * features. + * @QCA_WLAN_VENDOR_FEATURE_OCE_STA_CFON: Device supports OCE STA-CFON + * specific features only. If a Device sets this bit but not the + * %QCA_WLAN_VENDOR_FEATURE_OCE_AP, the userspace shall assume that + * this Device may not support all OCE AP functionalities but can support + * only OCE STA-CFON functionalities. + * @QCA_WLAN_VENDOR_FEATURE_SELF_MANAGED_REGULATORY: Device supports self + * managed regulatory. + * @QCA_WLAN_VENDOR_FEATURE_TWT: Device supports TWT (Target Wake Time). + * @QCA_WLAN_VENDOR_FEATURE_11AX: Device supports 802.11ax (HE) + * @QCA_WLAN_VENDOR_FEATURE_6GHZ_SUPPORT: Device supports 6 GHz band operation + * @QCA_WLAN_VENDOR_FEATURE_THERMAL_CONFIG: Device is capable of receiving + * and applying thermal configuration through + * %QCA_WLAN_VENDOR_ATTR_THERMAL_LEVEL and + * %QCA_WLAN_VENDOR_ATTR_THERMAL_COMPLETION_WINDOW attributes from + * userspace. + * @QCA_WLAN_VENDOR_FEATURE_CONCURRENT_BAND_SESSIONS: Device supports + * concurrent network sessions on different Wi-Fi Bands. This feature + * capability is attributed to the hardware's capability to support + * the same (e.g., DBS). + * @NUM_QCA_WLAN_VENDOR_FEATURES: Number of assigned feature bits + */ +enum qca_wlan_vendor_features { + QCA_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD = 0, + QCA_WLAN_VENDOR_FEATURE_SUPPORT_HW_MODE_ANY = 1, + QCA_WLAN_VENDOR_FEATURE_OFFCHANNEL_SIMULTANEOUS = 2, + QCA_WLAN_VENDOR_FEATURE_P2P_LISTEN_OFFLOAD = 3, + QCA_WLAN_VENDOR_FEATURE_OCE_STA = 4, + QCA_WLAN_VENDOR_FEATURE_OCE_AP = 5, + QCA_WLAN_VENDOR_FEATURE_OCE_STA_CFON = 6, + QCA_WLAN_VENDOR_FEATURE_SELF_MANAGED_REGULATORY = 7, + QCA_WLAN_VENDOR_FEATURE_TWT = 8, + QCA_WLAN_VENDOR_FEATURE_11AX = 9, + QCA_WLAN_VENDOR_FEATURE_6GHZ_SUPPORT = 10, + QCA_WLAN_VENDOR_FEATURE_THERMAL_CONFIG = 11, + QCA_WLAN_VENDOR_FEATURE_CONCURRENT_BAND_SESSIONS = 13, + + NUM_QCA_WLAN_VENDOR_FEATURES /* keep last */ +}; + +/** + * enum qca_wlan_vendor_attr_sap_conditional_chan_switch - Parameters for SAP + * conditional channel switch + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_INVALID: Invalid initial + * value + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_FREQ_LIST: Priority based + * frequency list (an array of u32 values in host byte order) + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_STATUS: Status of the + * conditional switch (u32)- 0: Success, Non-zero: Failure + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_MAX: Subcommand max + */ +enum qca_wlan_vendor_attr_sap_conditional_chan_switch { + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_FREQ_LIST = 1, + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_STATUS = 2, + + /* Keep Last */ + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_MAX = + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_AFTER_LAST - 1, +}; + +/** + * enum wifi_logger_supported_features - values for supported logger features + * @WIFI_LOGGER_MEMORY_DUMP_SUPPORTED: Memory dump of FW + * @WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED: Per packet statistics + * @WIFI_LOGGER_CONNECT_EVENT_SUPPORTED: Logging of Connectivity events + * @WIFI_LOGGER_POWER_EVENT_SUPPORTED: Power of driver + * @WIFI_LOGGER_WAKE_LOCK_SUPPORTE: Wakelock of driver + * @WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED: monitor FW health + * @WIFI_LOGGER_DRIVER_DUMP_SUPPORTED: support driver dump + * @WIFI_LOGGER_PACKET_FATE_SUPPORTED: tracks connection packets fate + */ +enum wifi_logger_supported_features { + WIFI_LOGGER_MEMORY_DUMP_SUPPORTED = (1 << (0)), + WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), + WIFI_LOGGER_CONNECT_EVENT_SUPPORTED = (1 << (2)), + WIFI_LOGGER_POWER_EVENT_SUPPORTED = (1 << (3)), + WIFI_LOGGER_WAKE_LOCK_SUPPORTED = (1 << (4)), + WIFI_LOGGER_VERBOSE_SUPPORTED = (1 << (5)), + WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED = (1 << (6)), + WIFI_LOGGER_DRIVER_DUMP_SUPPORTED = (1 << (7)), + WIFI_LOGGER_PACKET_FATE_SUPPORTED = (1 << (8)) +}; + +/** + * enum qca_wlan_vendor_attr_acs_offload - Defines attributes to be used with + * vendor command/event QCA_NL80211_VENDOR_SUBCMD_DO_ACS. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL: Required (u8). + * Used with event to notify the primary channel number selected in ACS + * operation. + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL is deprecated; use + * QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_FREQUENCY instead. + * To maintain backward compatibility, QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL + * is still used if either of the driver or user space application doesn't + * support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL: Required (u8). + * Used with event to notify the secondary channel number selected in ACS + * operation. + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL is deprecated; use + * QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_FREQUENCY instead. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL is still used if either of + * the driver or user space application doesn't support 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_HW_MODE: Required (u8). + * (a) Used with command to configure hw_mode from + * enum qca_wlan_vendor_acs_hw_mode for ACS operation. + * (b) Also used with event to notify the hw_mode of selected primary channel + * in ACS operation. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_HT_ENABLED: Flag attribute. + * Used with command to configure ACS operation for HT mode. + * Disable (flag attribute not present) - HT disabled and + * Enable (flag attribute present) - HT enabled. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_HT40_ENABLED: Flag attribute. + * Used with command to configure ACS operation for HT40 mode. + * Disable (flag attribute not present) - HT40 disabled and + * Enable (flag attribute present) - HT40 enabled. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_VHT_ENABLED: Flag attribute. + * Used with command to configure ACS operation for VHT mode. + * Disable (flag attribute not present) - VHT disabled and + * Enable (flag attribute present) - VHT enabled. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_CHWIDTH: Optional (u16) with command and + * mandatory with event. + * If specified in command path, ACS operation is configured with the given + * channel width (in MHz). + * In event path, specifies the channel width of the primary channel selected. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST: Required and type is NLA_UNSPEC. + * Used with command to configure channel list using an array of + * channel numbers (u8). + * Note: If both the driver and user-space application supports the 6 GHz band, + * the driver mandates use of QCA_WLAN_VENDOR_ATTR_ACS_FREQ_LIST whereas + * QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST is optional. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_CHANNEL: Required (u8). + * Used with event to notify the VHT segment 0 center channel number selected in + * ACS operation. + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_CHANNEL is deprecated; use + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_FREQUENCY instead. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_CHANNEL is still used if either of + * the driver or user space application doesn't support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_CHANNEL: Required (u8). + * Used with event to notify the VHT segment 1 center channel number selected in + * ACS operation. + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_CHANNEL is deprecated; use + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_FREQUENCY instead. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_CHANNEL is still used if either of + * the driver or user space application doesn't support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_FREQ_LIST: Required and type is NLA_UNSPEC. + * Used with command to configure the channel list using an array of channel + * center frequencies in MHz (u32). + * Note: If both the driver and user-space application supports the 6 GHz band, + * the driver first parses the frequency list and if it fails to get a frequency + * list, parses the channel list specified using + * QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST (considers only 2 GHz and 5 GHz channels in + * QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST). + * + * @QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_FREQUENCY: Required (u32). + * Used with event to notify the primary channel center frequency (MHz) selected + * in ACS operation. + * Note: If the driver supports the 6 GHz band, the event sent from the driver + * includes this attribute along with QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_FREQUENCY: Required (u32). + * Used with event to notify the secondary channel center frequency (MHz) + * selected in ACS operation. + * Note: If the driver supports the 6 GHz band, the event sent from the driver + * includes this attribute along with + * QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_FREQUENCY: Required (u32). + * Used with event to notify the VHT segment 0 center channel frequency (MHz) + * selected in ACS operation. + * Note: If the driver supports the 6 GHz band, the event sent from the driver + * includes this attribute along with + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_CHANNEL. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_FREQUENCY: Required (u32). + * Used with event to notify the VHT segment 1 center channel frequency (MHz) + * selected in ACS operation. + * Note: If the driver supports the 6 GHz band, the event sent from the driver + * includes this attribute along with + * QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_CHANNEL. + */ +enum qca_wlan_vendor_attr_acs_offload { + QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL = 1, + QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL = 2, + QCA_WLAN_VENDOR_ATTR_ACS_HW_MODE = 3, + QCA_WLAN_VENDOR_ATTR_ACS_HT_ENABLED = 4, + QCA_WLAN_VENDOR_ATTR_ACS_HT40_ENABLED = 5, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_ENABLED = 6, + QCA_WLAN_VENDOR_ATTR_ACS_CHWIDTH = 7, + QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST = 8, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_CHANNEL = 9, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_CHANNEL = 10, + QCA_WLAN_VENDOR_ATTR_ACS_FREQ_LIST = 11, + QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_FREQUENCY = 12, + QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_FREQUENCY = 13, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_FREQUENCY = 14, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_FREQUENCY = 15, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ACS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ACS_MAX = + QCA_WLAN_VENDOR_ATTR_ACS_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_acs_hw_mode - Defines HW mode to be used with the + * vendor command/event QCA_NL80211_VENDOR_SUBCMD_DO_ACS. + * + * @QCA_ACS_MODE_IEEE80211B: 802.11b mode + * @QCA_ACS_MODE_IEEE80211G: 802.11g mode + * @QCA_ACS_MODE_IEEE80211A: 802.11a mode + * @QCA_ACS_MODE_IEEE80211AD: 802.11ad mode + * @QCA_ACS_MODE_IEEE80211ANY: all modes + * @QCA_ACS_MODE_IEEE80211AX: 802.11ax mode + */ +enum qca_wlan_vendor_acs_hw_mode { + QCA_ACS_MODE_IEEE80211B, + QCA_ACS_MODE_IEEE80211G, + QCA_ACS_MODE_IEEE80211A, + QCA_ACS_MODE_IEEE80211AD, + QCA_ACS_MODE_IEEE80211ANY, + QCA_ACS_MODE_IEEE80211AX, +}; + +/** + * enum qca_access_policy - access control policy + * + * Access control policy is applied on the configured IE + * (QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY_IE). + * To be set with QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY. + * + * @QCA_ACCESS_POLICY_ACCEPT_UNLESS_LISTED: Deny Wi-Fi Connections which match + *» with the specific configuration (IE) set, i.e. allow all the + *» connections which do not match the configuration. + * @QCA_ACCESS_POLICY_DENY_UNLESS_LISTED: Accept Wi-Fi Connections which match + *» with the specific configuration (IE) set, i.e. deny all the + *» connections which do not match the configuration. + */ +enum qca_access_policy { + QCA_ACCESS_POLICY_ACCEPT_UNLESS_LISTED, + QCA_ACCESS_POLICY_DENY_UNLESS_LISTED, +}; + +/** + * enum qca_ignore_assoc_disallowed - Ignore assoc disallowed values + * + * The valid values for the ignore assoc disallowed + * + * @QCA_IGNORE_ASSOC_DISALLOWED_DISABLE: Disable ignore assoc disallowed + * @QCA_IGNORE_ASSOC_DISALLOWED_ENABLE: Enable ignore assoc disallowed + * + */ +enum qca_ignore_assoc_disallowed { + QCA_IGNORE_ASSOC_DISALLOWED_DISABLE, + QCA_IGNORE_ASSOC_DISALLOWED_ENABLE +}; + +/* Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION and + * QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION subcommands. + */ +#define QCA_WLAN_VENDOR_ATTR_DISCONNECT_IES\ + QCA_WLAN_VENDOR_ATTR_CONFIG_DISCONNECT_IES +#define QCA_WLAN_VENDOR_ATTR_BEACON_REPORT_FAIL\ + QCA_WLAN_VENDOR_ATTR_CONFIG_BEACON_REPORT_FAIL +#define QCA_WLAN_VENDOR_ATTR_ROAM_REASON\ + QCA_WLAN_VENDOR_ATTR_CONFIG_ROAM_REASON +enum qca_wlan_vendor_attr_config { + QCA_WLAN_VENDOR_ATTR_CONFIG_INVALID = 0, + /* + * Unsigned 32-bit value to set the DTIM period. + * Whether the wifi chipset wakes at every dtim beacon or a multiple of + * the DTIM period. If DTIM is set to 3, the STA shall wake up every 3 + * DTIM beacons. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_MODULATED_DTIM = 1, + /* + * Unsigned 32-bit value to set the wifi_iface stats averaging factor + * used to calculate statistics like average the TSF offset or average + * number of frame leaked. + * For instance, upon Beacon frame reception: + * current_avg = ((beacon_TSF - TBTT) * factor + previous_avg * (0x10000 - factor) ) / 0x10000 + * For instance, when evaluating leaky APs: + * current_avg = ((num frame received within guard time) * factor + previous_avg * (0x10000 - factor)) / 0x10000 + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_STATS_AVG_FACTOR = 2, + /* + * Unsigned 32-bit value to configure guard time, i.e., when + * implementing IEEE power management based on frame control PM bit, how + * long the driver waits before shutting down the radio and after + * receiving an ACK frame for a Data frame with PM bit set. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GUARD_TIME = 3, + /* Unsigned 32-bit value to change the FTM capability dynamically */ + QCA_WLAN_VENDOR_ATTR_CONFIG_FINE_TIME_MEASUREMENT = 4, + /* Unsigned 16-bit value to configure maximum TX rate dynamically */ + QCA_WLAN_VENDOR_ATTR_CONF_TX_RATE = 5, + /* + * Unsigned 32-bit value to configure the number of continuous + * Beacon Miss which shall be used by the firmware to penalize + * the RSSI. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PENALIZE_AFTER_NCONS_BEACON_MISS = 6, + /* + * Unsigned 8-bit value to configure the channel avoidance indication + * behavior. Firmware to send only one indication and ignore duplicate + * indications when set to avoid multiple Apps wakeups. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_CHANNEL_AVOIDANCE_IND = 7, + /* + * 8-bit unsigned value to configure the maximum TX MPDU for + * aggregation. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TX_MPDU_AGGREGATION = 8, + /* + * 8-bit unsigned value to configure the maximum RX MPDU for + * aggregation. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_MPDU_AGGREGATION = 9, + /* + * 8-bit unsigned value to configure the Non aggregrate/11g sw + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_NON_AGG_RETRY = 10, + /* + * 8-bit unsigned value to configure the aggregrate sw + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_AGG_RETRY = 11, + /* + * 8-bit unsigned value to configure the MGMT frame + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_MGMT_RETRY = 12, + /* + * 8-bit unsigned value to configure the CTRL frame + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_CTRL_RETRY = 13, + /* + * 8-bit unsigned value to configure the propagation delay for + * 2G/5G band (0~63, units in us) + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PROPAGATION_DELAY = 14, + /* + * Unsigned 32-bit value to configure the number of unicast TX fail + * packet count. The peer is disconnected once this threshold is + * reached. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TX_FAIL_COUNT = 15, + /* + * Attribute used to set scan default IEs to the driver. + * + * These IEs can be used by scan operations that will be initiated by + * the driver/firmware. + * + * For further scan requests coming to the driver, these IEs should be + * merged with the IEs received along with scan request coming to the + * driver. If a particular IE is present in the scan default IEs but not + * present in the scan request, then that IE should be added to the IEs + * sent in the Probe Request frames for that scan request. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_SCAN_DEFAULT_IES = 16, + /* Unsigned 32-bit attribute for generic commands */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_COMMAND = 17, + /* Unsigned 32-bit value attribute for generic commands */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_VALUE = 18, + /* Unsigned 32-bit data attribute for generic command response */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_DATA = 19, + /* + * Unsigned 32-bit length attribute for + * QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_DATA + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_LENGTH = 20, + /* + * Unsigned 32-bit flags attribute for + * QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_DATA + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_FLAGS = 21, + /* + * Unsigned 32-bit, defining the access policy. + * See enum qca_access_policy. Used with + * QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY_IE_LIST. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY = 22, + /* + * Sets the list of full set of IEs for which a specific access policy + * has to be applied. Used along with + * QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY to control the access. + * Zero length payload can be used to clear this access constraint. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY_IE_LIST = 23, + /* + * Unsigned 32-bit, specifies the interface index (netdev) for which the + * corresponding configurations are applied. If the interface index is + * not specified, the configurations are attributed to the respective + * wiphy. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_IFINDEX = 24, + /* + * 8-bit unsigned value to trigger QPower: + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_QPOWER = 25, + /* + * 8-bit unsigned value to configure the driver and below layers to + * ignore the assoc disallowed set by APs while connecting + * 1-Ignore, 0-Don't ignore + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_IGNORE_ASSOC_DISALLOWED = 26, + /* + * 32-bit unsigned value to trigger antenna diversity features: + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_ENA = 27, + /* 32-bit unsigned value to configure specific chain antenna */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_CHAIN = 28, + /* + * 32-bit unsigned value to trigger cycle selftest + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_SELFTEST = 29, + /* + * 32-bit unsigned to configure the cycle time of selftest + * the unit is micro-second + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_SELFTEST_INTVL = 30, + /* 32-bit unsigned value to set reorder timeout for AC_VO */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_VOICE = 31, + /* 32-bit unsigned value to set reorder timeout for AC_VI */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_VIDEO = 32, + /* 32-bit unsigned value to set reorder timeout for AC_BE */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_BESTEFFORT = 33, + /* 32-bit unsigned value to set reorder timeout for AC_BK */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_BACKGROUND = 34, + /* 6-byte MAC address to point out the specific peer */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_BLOCKSIZE_PEER_MAC = 35, + /* 32-bit unsigned value to set window size for specific peer */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_BLOCKSIZE_WINLIMIT = 36, + /* 8-bit unsigned value to set the beacon miss threshold in 2.4 GHz */ + QCA_WLAN_VENDOR_ATTR_CONFIG_BEACON_MISS_THRESHOLD_24 = 37, + /* 8-bit unsigned value to set the beacon miss threshold in 5 GHz */ + QCA_WLAN_VENDOR_ATTR_CONFIG_BEACON_MISS_THRESHOLD_5 = 38, + /* + * 32-bit unsigned value to configure 5 or 10 MHz channel width for + * station device while in disconnect state. The attribute use the + * value of enum nl80211_chan_width: NL80211_CHAN_WIDTH_5 means 5 MHz, + * NL80211_CHAN_WIDTH_10 means 10 MHz. If set, the device work in 5 or + * 10 MHz channel width, the station will not connect to a BSS using 20 + * MHz or higher bandwidth. Set to NL80211_CHAN_WIDTH_20_NOHT to + * clear this constraint. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_SUB20_CHAN_WIDTH = 39, + /* + * 32-bit unsigned value to configure the propagation absolute delay + * for 2G/5G band (units in us) + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PROPAGATION_ABS_DELAY = 40, + /* 32-bit unsigned value to set probe period */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_PROBE_PERIOD = 41, + /* 32-bit unsigned value to set stay period */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_STAY_PERIOD = 42, + /* 32-bit unsigned value to set snr diff */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_SNR_DIFF = 43, + /* 32-bit unsigned value to set probe dwell time */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_PROBE_DWELL_TIME = 44, + /* 32-bit unsigned value to set mgmt snr weight */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_MGMT_SNR_WEIGHT = 45, + /* 32-bit unsigned value to set data snr weight */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_DATA_SNR_WEIGHT = 46, + /* 32-bit unsigned value to set ack snr weight */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_ACK_SNR_WEIGHT = 47, + /* + * 32-bit unsigned value to configure the listen interval. + * This is in units of beacon intervals. This configuration alters + * the negotiated listen interval with the AP during the connection. + * It is highly recommended to configure a value less than or equal to + * the one negotiated during the association. Configuring any greater + * value can have adverse effects (frame loss, AP disassociating STA, + * etc.). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LISTEN_INTERVAL = 48, + /* + * 8 bit unsigned value that is set on an AP/GO virtual interface to + * disable operations that would cause the AP/GO to leave its operating + * channel. + * + * This will restrict the scans to the AP/GO operating channel and the + * channels of the other band, if DBS is supported.A STA/CLI interface + * brought up after this setting is enabled, will be restricted to + * connecting to devices only on the AP/GO interface's operating channel + * or on the other band in DBS case. P2P supported channel list is + * modified, to only include AP interface's operating-channel and the + * channels of the other band if DBS is supported. + * + * These restrictions are only applicable as long as the AP/GO interface + * is alive. If the AP/GO interface is brought down then this + * setting/restriction is forgotten. + * + * If this variable is set on an AP/GO interface while a multi-channel + * concurrent session is active, it has no effect on the operation of + * the current interfaces, other than restricting the scan to the AP/GO + * operating channel and the other band channels if DBS is supported. + * However, if the STA is brought down and restarted then the new STA + * connection will either be formed on the AP/GO channel or on the + * other band in a DBS case. This is because of the scan being + * restricted on these channels as mentioned above. + * + * 1-Disable offchannel operations, 0-Enable offchannel operations. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RESTRICT_OFFCHANNEL = 49, + + /* + * 8 bit unsigned value to enable/disable LRO (Large Receive Offload) + * on an interface. + * 1 - Enable , 0 - Disable. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LRO = 50, + + /* + * 8 bit unsigned value to globally enable/disable scan + * 1 - Enable, 0 - Disable. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_SCAN_ENABLE = 51, + + /* 8-bit unsigned value to set the total beacon miss count */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TOTAL_BEACON_MISS_COUNT = 52, + + /* + * Unsigned 32-bit value to configure the number of continuous + * Beacon Miss which shall be used by the firmware to penalize + * the RSSI for BTC. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PENALIZE_AFTER_NCONS_BEACON_MISS_BTC = 53, + + /* + * 8-bit unsigned value to configure the driver and below layers to + * enable/disable all fils features. + * 0-enable, 1-disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_DISABLE_FILS = 54, + + /* 16-bit unsigned value to configure the level of WLAN latency + * module. See enum qca_wlan_vendor_attr_config_latency_level. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL = 55, + + /* + * 8-bit unsigned value indicating the driver to use the RSNE as-is from + * the connect interface. Exclusively used for the scenarios where the + * device is used as a test bed device with special functionality and + * not recommended for production. This helps driver to not validate the + * RSNE passed from user space and thus allow arbitrary IE data to be + * used for testing purposes. + * 1-enable, 0-disable. + * Applications set/reset this configuration. If not reset, this + * parameter remains in use until the driver is unloaded. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RSN_IE = 56, + + /* + * 8-bit unsigned value to trigger green Tx power saving. + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GTX = 57, + + /* + * Attribute to configure disconnect IEs to the driver. + * This carries an array of unsigned 8-bit characters. + * + * If this is configured, driver shall fill the IEs in disassoc/deauth + * frame. + * These IEs are expected to be considered only for the next + * immediate disconnection (disassoc/deauth frame) originated by + * the DUT, irrespective of the entity (user space/driver/firmware) + * triggering the disconnection. + * The host drivers are not expected to use the IEs set through + * this interface for further disconnections after the first immediate + * disconnection initiated post the configuration. + * If the IEs are also updated through cfg80211 interface (after the + * enhancement to cfg80211_disconnect), host driver is expected to + * take the union of IEs from both of these interfaces and send in + * further disassoc/deauth frames. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_DISCONNECT_IES = 58, + + /* 8-bit unsigned value for ELNA bypass. + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ELNA_BYPASS = 59, + + QCA_WLAN_VENDOR_ATTR_CONFIG_BEACON_REPORT_FAIL = 60, + + /* 8-bit unsigned value. This attribute enables/disables the host driver + * to send roam reason information in the reassociation request to the + * AP. 1-Enable, 0-Disable. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ROAM_REASON = 61, + + /* + * 8-bit unsigned value to trigger Optimized Power Management: + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_OPTIMIZED_POWER_MANAGEMENT = 71, + + /* 8-bit unsigned value. This attribute takes the QOS/access category + * value represented by the enum qca_wlan_ac_type and expects the driver + * to upgrade the UDP frames to this QOS. The value of QCA_WLAN_AC_ALL + * is invalid for this attribute. This will override the DSCP value + * configured in the frame with the intention to only upgrade the QOS. + * That said, it is not intended to downgrade the QOS for the frames. + * Set the value to 0 ( corresponding to BE ) if the QOS upgrade needs + * to disable. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_UDP_QOS_UPGRADE = 72, + + /* 8-bit unsigned value. This attribute is used to dynamically configure + * the number of chains to be used for transmitting data. This + * configuration is allowed only when in connected state and will be + * effective until disconnected. The driver rejects this configuration + * if the number of spatial streams being used in the current connection + * cannot be supported by this configuration. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_NUM_TX_CHAINS = 73, + + /* 8-bit unsigned value. This attribute is used to dynamically configure + * the number of chains to be used for receiving data. This + * configuration is allowed only when in connected state and will be + * effective until disconnected. The driver rejects this configuration + * if the number of spatial streams being used in the current connection + * cannot be supported by this configuration. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_NUM_RX_CHAINS = 74, + + /* 8-bit unsigned value. This attribute is used to dynamically configure + * the number of spatial streams used for transmitting the data. When + * configured in the disconnected state, the configured value will + * be considered for the following connection attempt. + * If the NSS is updated after the connection, the updated NSS value + * is notified to the peer using the Operating Mode Notification/Spatial + * Multiplexing Power Save frame. + * The TX NSS value configured after the connection shall not be greater + * than the value negotiated during the connection. Any such higher + * value configuration shall be treated as invalid configuration by + * the driver. This attribute shall be configured along with + * QCA_WLAN_VENDOR_ATTR_CONFIG_RX_NSS attribute to define the symmetric + * configuration (such as 2X2 or 1X1) or the asymmetric + * configuration (such as 1X2). + * If QCA_WLAN_VENDOR_ATTR_CONFIG_NSS attribute is also provided along + * with this QCA_WLAN_VENDOR_ATTR_CONFIG_TX_NSS attribute the driver + * will update the TX NSS based on QCA_WLAN_VENDOR_ATTR_CONFIG_TX_NSS. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TX_NSS = 77, + + /* 8-bit unsigned value. This attribute is used to dynamically configure + * the number of spatial streams used for receiving the data. When + * configured in the disconnected state, the configured value will + * be considered for the following connection attempt. + * If the NSS is updated after the connection, the updated NSS value + * is notified to the peer using the Operating Mode Notification/Spatial + * Multiplexing Power Save frame. + * The RX NSS value configured after the connection shall not be greater + * than the value negotiated during the connection. Any such higher + * value configuration shall be treated as invalid configuration by + * the driver. This attribute shall be configured along with + * QCA_WLAN_VENDOR_ATTR_CONFIG_TX_NSS attribute to define the symmetric + * configuration (such as 2X2 or 1X1) or the asymmetric + * configuration (such as 1X2). + * If QCA_WLAN_VENDOR_ATTR_CONFIG_NSS attribute is also provided along + * with this QCA_WLAN_VENDOR_ATTR_CONFIG_RX_NSS attribute the driver + * will update the RX NSS based on QCA_WLAN_VENDOR_ATTR_CONFIG_RX_NSS. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_NSS = 78, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_wifi_logger_start - Enum for wifi logger starting + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_INVALID: Invalid attribute + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_RING_ID: Ring ID + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_VERBOSE_LEVEL: Verbose level + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_FLAGS: Flag + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_AFTER_LAST: Last value + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_MAX: Max value + */ +enum qca_wlan_vendor_attr_wifi_logger_start { + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_RING_ID = 1, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_VERBOSE_LEVEL = 2, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_FLAGS = 3, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_AFTER_LAST - 1, +}; + +/* + * enum qca_wlan_vendor_attr_wifi_logger_get_ring_data - Get ring data + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_INVALID: Invalid attribute + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_ID: Ring ID + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_AFTER_LAST: Last value + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_MAX: Max value + */ +enum qca_wlan_vendor_attr_wifi_logger_get_ring_data { + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_ID = 1, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_AFTER_LAST - 1, +}; + +#ifdef WLAN_FEATURE_OFFLOAD_PACKETS +/** + * enum wlan_offloaded_packets_control - control commands + * @WLAN_START_OFFLOADED_PACKETS: start offloaded packets + * @WLAN_STOP_OFFLOADED_PACKETS: stop offloaded packets + * + */ +enum wlan_offloaded_packets_control { + WLAN_START_OFFLOADED_PACKETS = 1, + WLAN_STOP_OFFLOADED_PACKETS = 2 +}; + +/** + * enum qca_wlan_vendor_attr_data_offload_ind - Vendor Data Offload Indication + * + * @QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_SESSION: Session corresponding to + * the offloaded data. + * @QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_PROTOCOL: Protocol of the offloaded + * data. + * @QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_EVENT: Event type for the data offload + * indication. + */ +enum qca_wlan_vendor_attr_data_offload_ind { + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_SESSION, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_PROTOCOL, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_EVENT, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_MAX = + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_AFTER_LAST - 1 +}; + + +/** + * enum qca_wlan_vendor_attr_offloaded_packets - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_OFFLOADED_PACKETS. + */ +enum qca_wlan_vendor_attr_offloaded_packets { + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_INVALID = 0, + /* + * Takes valid value from the enum + * qca_wlan_offloaded_packets_sending_control + * Unsigned 32-bit value + **/ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_SENDING_CONTROL, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_REQUEST_ID, + /* array of u8 len: Max packet size */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_IP_PACKET_DATA, + /* 6-byte MAC address used to represent source MAC address */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_SRC_MAC_ADDR, + /* 6-byte MAC address used to represent destination MAC address */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_DST_MAC_ADDR, + /* Unsigned 32-bit value, in milli seconds */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_PERIOD, + /* + * This attribute is used and optional for specifying + * ethernet protocol type, if not specified it will default to ipv4 + * Unsigned 16-bit value + **/ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_ETHER_PROTO_TYPE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_MAX = + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_AFTER_LAST - 1, +}; + +#endif + +/** + * enum qca_wlan_rssi_monitoring_control - rssi control commands + * @QCA_WLAN_RSSI_MONITORING_CONTROL_INVALID: invalid + * @QCA_WLAN_RSSI_MONITORING_START: rssi monitoring start + * @QCA_WLAN_RSSI_MONITORING_STOP: rssi monitoring stop + */ +enum qca_wlan_rssi_monitoring_control { + QCA_WLAN_RSSI_MONITORING_CONTROL_INVALID = 0, + QCA_WLAN_RSSI_MONITORING_START, + QCA_WLAN_RSSI_MONITORING_STOP, +}; + +/** + * enum qca_wlan_vendor_attr_rssi_monitoring - rssi monitoring + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CONTROL: control + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX_RSSI: max rssi + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MIN_RSSI: min rssi + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_BSSID: current bssid + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_RSSI: current rssi + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX: max + */ +enum qca_wlan_vendor_attr_rssi_monitoring { + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CONTROL, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_REQUEST_ID, + + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX_RSSI, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MIN_RSSI, + + /* attributes to be used/received in callback */ + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_BSSID, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_RSSI, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX = + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ndp_params - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_NDP. + * @QCA_WLAN_VENDOR_ATTR_NDP_PARAM_INVALID + * @QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD: sub commands values in qca_wlan_ndp_sub_cmd + * @QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID: + * @QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID: indicats a service info + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL: channel frequency in MHz + * @QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR: Interface Discovery MAC + * address + * @QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR: Interface name on which NDP is being + * created + * @QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_SECURITY: CONFIG_SECURITY is deprecated, use + * NCS_SK_TYPE/PMK/SCID instead + * @QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS: value for QoS + * @QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO: app info + * @QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID: NDP instance Id + * @QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY: Array of instance Ids + * @QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE: initiator/responder NDP response + * code: accept/reject + * @QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR: NDI MAC address + * @QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE: errors types returned by + * driver + * @QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE: value error values returned by + * driver + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG: Channel setup configuration + * @QCA_WLAN_VENDOR_ATTR_NDP_CSID: Cipher Suite Shared Key Type + * @QCA_WLAN_VENDOR_ATTR_NDP_PMK: PMK_INFO + * @QCA_WLAN_VENDOR_ATTR_NDP_SCID: Security Context Identifier that contains the + * PMKID + * @QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE: passphrase + * @QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME: service name + * @QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_REASON: bitmap indicating schedule + * update: + * BIT_0: NSS Update + * BIT_1: Channel list update + * @QCA_WLAN_VENDOR_ATTR_NDP_NSS: nss + * @QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS: NUMBER NDP CHANNEL + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH: CHANNEL BANDWIDTH: + * 0:20 MHz, + * 1:40 MHz, + * 2:80 MHz, + * 3:160 MHz + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO: Array of channel/band width + * @QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_AFTER_LAST: id after last valid attribute + * @QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX: max value of this enum type + * @QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR: IPv6 address used by NDP, 16 bytes array + * @QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT: Unsigned 16-bit value indicating + * transport port used by NDP. + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL: Unsigned 8-bit value indicating + * protocol used by NDP and assigned by the Internet Assigned Numbers Authority + * as per: www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +*/ +enum qca_wlan_vendor_attr_ndp_params { + QCA_WLAN_VENDOR_ATTR_NDP_PARAM_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = 1, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID = 2, + QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID = 3, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL = 4, + QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR = 5, + QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR = 6, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_SECURITY = 7, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS = 8, + QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO = 9, + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID = 10, + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY = 11, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE = 12, + QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR = 13, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE = 14, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE = 15, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG = 16, + QCA_WLAN_VENDOR_ATTR_NDP_CSID = 17, + QCA_WLAN_VENDOR_ATTR_NDP_PMK = 18, + QCA_WLAN_VENDOR_ATTR_NDP_SCID = 19, + QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE = 20, + QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME = 21, + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_REASON = 22, + QCA_WLAN_VENDOR_ATTR_NDP_NSS = 23, + QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS = 24, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH = 25, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO = 26, + QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR = 27, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT = 28, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL = 29, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX = + QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_ndp_sub_cmd - NDP sub comands types for + * QCA_NL80211_VENDOR_SUBCMD_NDP. + * @QCA_WLAN_VENDOR_ATTR_NDP_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE: create a ndi + * @QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE: delete a ndi + * @QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_REQUEST: initiate a ndp session + * @QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_RESPONSE: response for above + * @QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_REQUEST: respond to ndp session + * @QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE: response for above + * @QCA_WLAN_VENDOR_ATTR_NDP_END_REQUEST: initiate a ndp end + * @QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE: response for above + * @QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND: notify the peer about the end request + * @QCA_WLAN_VENDOR_ATTR_NDP_CONFIRM_IND: confirm the ndp session is complete + * @QCA_WLAN_VENDOR_ATTR_NDP_END_IND: indicate the peer about the end request + * being received + * @QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_IND: indicate the peer of schedule + * update + */ +enum qca_wlan_ndp_sub_cmd { + QCA_WLAN_VENDOR_ATTR_NDP_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE = 1, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE = 2, + QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_REQUEST = 3, + QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_RESPONSE = 4, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_REQUEST = 5, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE = 6, + QCA_WLAN_VENDOR_ATTR_NDP_END_REQUEST = 7, + QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE = 8, + QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND = 9, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIRM_IND = 10, + QCA_WLAN_VENDOR_ATTR_NDP_END_IND = 11, + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_IND = 12 +}; + +/** + * qca_wlan_vendor_external_acs_event_chan_info_attr: Represents per channel + * information. These attributes are sent as part of + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_INFO. Each set of the following + * attributes correspond to a single channel. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS: A bitmask (u16) + * with flags specified in qca_wlan_vendor_channel_prop_flags_ext. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAG_EXT: A bitmask (u16) + * with flags specified in qca_wlan_vendor_channel_prop_flags_ext. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ: frequency + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_REG_POWER: maximum + * regulatory transmission power + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_POWER: maximum + * transmission power + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MIN_POWER: minimum + * transmission power + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_REG_CLASS_ID: regulatory + * class id + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_ANTENNA_GAIN: maximum + * antenna gain in dbm + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_0: vht segment 0 + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_1: vht segment 1 + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_0: vht + * segment 0 in center freq in MHz. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_1: vht + * segment 1 in center freq in MHz. + * + */ +enum qca_wlan_vendor_external_acs_event_chan_info_attr { + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_INVALID = 0, + + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS = 1, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAG_EXT = 2, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ = 3, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_REG_POWER = 4, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_POWER = 5, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MIN_POWER = 6, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_REG_CLASS_ID = 7, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_ANTENNA_GAIN = 8, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_0 = 9, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_1 = 10, + /* + * A bitmask (u32) with flags specified in + * enum qca_wlan_vendor_channel_prop_flags_2. + */ + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS_2 = 11, + + /* + * VHT segment 0 in MHz (u32) and the attribute is mandatory. + * Note: Event QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS includes + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_0 + * along with + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_0. + * + * If both the driver and user-space application supports the 6 GHz + * band, QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_0 + * is deprecated and + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_0 + * should be used. + * + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_0 + * is still used if either of the driver or user space application + * doesn't support the 6 GHz band. + */ + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_0 = 12, + + /* + * VHT segment 1 in MHz (u32) and the attribute is mandatory. + * Note: Event QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS includes + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_1 + * along with + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_1. + * + * If both the driver and user-space application supports the 6 GHz + * band, QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_1 + * is deprecated and + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_1 + * should be considered. + * + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_1 + * is still used if either of the driver or user space application + * doesn't support the 6 GHz band. + */ + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ_VHT_SEG_1 = 13, + + /* keep last */ + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_LAST, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX = + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_start_acs_config: attribute to vendor sub-command + * QCA_NL80211_VENDOR_SUBCMD_START_ACS. This will be triggered by host + * driver. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_REASON: This reason refers to + * qca_wlan_vendor_acs_select_reason. This helps acs module to understand why + * ACS need to be started + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_SPECTRAL_SUPPORTED: Does + * driver supports spectral scanning or not + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_OFFLOAD_ENABLED: Is 11ac is + * offloaded to firmware. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_ADD_CHAN_STATS_SUPPORT: Does driver + * provides additional channel capability as part of scan operation. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_AP_UP:Flag attribute to indicate + * interface status is UP + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_SAP_MODE: Operating mode of + * interface. It takes one of nl80211_iftype values. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_WIDTH: This is the upper bound + * of chan width. ACS logic should try to get a channel with specified width + * if not found then look for lower values. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_BAND: nl80211_bands + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PHY_MODE: PHY/HW mode such as + * a/b/g/n/ac. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_FREQ_LIST: Supported frequency list + * among which ACS should choose best frequency. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL:Preferred Chan List by the + * driver which will have format as array of + * nested values. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_INFO: Array of nested attribute + * for each channel. It takes attr as defined in + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_POLICY:External ACS policy such as + * PCL mandatory, PCL preferred, etc.It uses values defined in enum + * qca_wlan_vendor_attr_external_acs_policy. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_RROPAVAIL_INFO: Reference RF + * Operating Parameter (RROP) availability information (u16). It uses values + * defined in enum qca_wlan_vendor_attr_rropavail_info. + */ +enum qca_wlan_vendor_attr_external_acs_event { + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_REASON = 1, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_SPECTRAL_SUPPORTED = 2, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_OFFLOAD_ENABLED = 3, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_ADD_CHAN_STATS_SUPPORT = 4, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_AP_UP = 5, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_SAP_MODE = 6, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_WIDTH = 7, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_BAND = 8, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PHY_MODE = 9, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_FREQ_LIST = 10, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL = 11, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_INFO = 12, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_POLICY = 13, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_RROPAVAIL_INFO = 14, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_LAST, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_MAX = + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_LAST - 1, +}; + +enum qca_iface_type { + QCA_IFACE_TYPE_STA, + QCA_IFACE_TYPE_AP, + QCA_IFACE_TYPE_P2P_CLIENT, + QCA_IFACE_TYPE_P2P_GO, + QCA_IFACE_TYPE_IBSS, + QCA_IFACE_TYPE_TDLS, +}; + +/** + * enum qca_wlan_vendor_attr_pcl_config: attribute to vendor sub-command + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL and + * QCA_NL80211_VENDOR_SUBCMD_GET_PREFERRED_FREQ_LIST. + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_CHANNEL: pcl channel number + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_WEIGHT: pcl channel weight + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_FREQ: pcl channel frequency + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_FLAG: pcl channel flag bitmask + */ +enum qca_wlan_vendor_attr_pcl_config { + QCA_WLAN_VENDOR_ATTR_PCL_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_PCL_CHANNEL = 1, + QCA_WLAN_VENDOR_ATTR_PCL_WEIGHT = 2, + QCA_WLAN_VENDOR_ATTR_PCL_FREQ = 3, + QCA_WLAN_VENDOR_ATTR_PCL_FLAG = 4, +}; + +enum qca_set_band { + QCA_SETBAND_AUTO = 0, + QCA_SETBAND_5G = BIT(0), + QCA_SETBAND_2G = BIT(1), + QCA_SETBAND_6G = BIT(2), +}; + +/** + * enum set_reset_packet_filter - set packet filter control commands + * @QCA_WLAN_SET_PACKET_FILTER: Set Packet Filter + * @QCA_WLAN_GET_PACKET_FILTER: Get Packet filter + * @QCA_WLAN_WRITE_PACKET_FILTER: Write packet filter program/data + * @QCA_WLAN_READ_PACKET_FILTER: Read packet filter program/data + * @QCA_WLAN_ENABLE_PACKET_FILTER: Enable APF interpreter + * @QCA_WLAN_DISABLE_PACKET_FILTER: Disable APF interpreter + */ +enum set_reset_packet_filter { + QCA_WLAN_SET_PACKET_FILTER = 1, + QCA_WLAN_GET_PACKET_FILTER = 2, + QCA_WLAN_WRITE_PACKET_FILTER = 3, + QCA_WLAN_READ_PACKET_FILTER = 4, + QCA_WLAN_ENABLE_PACKET_FILTER = 5, + QCA_WLAN_DISABLE_PACKET_FILTER = 6, +}; + +/** + * enum qca_wlan_vendor_attr_packet_filter - APF control commands + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER: Filter ID + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION: Filter Version + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE: Total Length + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET: Current offset + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM: length of APF instructions + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH: length of the program + * section in packet filter buffer + */ +enum qca_wlan_vendor_attr_packet_filter { + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_ID, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_MAX = + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_wake_stats - wake lock stats + * @QCA_WLAN_VENDOR_ATTR_GET_WAKE_STATS_INVALID: invalid + * @QCA_WLAN_VENDOR_ATTR_TOTAL_CMD_EVENT_WAKE: + * @QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_PTR: + * @QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_SZ: + * @QCA_WLAN_VENDOR_ATTR_TOTAL_DRIVER_FW_LOCAL_WAKE: + * @QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_PTR: + * @QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_SZ: + * @QCA_WLAN_VENDOR_ATTR_TOTAL_RX_DATA_WAKE: + * total rx wakeup count + * @QCA_WLAN_VENDOR_ATTR_RX_UNICAST_CNT: + * Total rx unicast packet which woke up host + * @QCA_WLAN_VENDOR_ATTR_RX_MULTICAST_CNT: + * Total rx multicast packet which woke up host + * @QCA_WLAN_VENDOR_ATTR_RX_BROADCAST_CNT: + * Total rx broadcast packet which woke up host + * @QCA_WLAN_VENDOR_ATTR_ICMP_PKT: + * wake icmp packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_PKT: + * wake icmp6 packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_RA: + * wake icmp6 RA packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_NA: + * wake icmp6 NA packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_NS: + * wake icmp6 NS packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP4_RX_MULTICAST_CNT: + * Rx wake packet count due to ipv4 multicast + * @QCA_WLAN_VENDOR_ATTR_ICMP6_RX_MULTICAST_CNT: + * Rx wake packet count due to ipv6 multicast + * @QCA_WLAN_VENDOR_ATTR_OTHER_RX_MULTICAST_CNT: + * Rx wake packet count due to non-ipv4 and non-ipv6 packets + * @QCA_WLAN_VENDOR_ATTR_RSSI_BREACH_CNT: + * wake rssi breach packet count + * @QCA_WLAN_VENDOR_ATTR_LOW_RSSI_CNT: + * wake low rssi packet count + * @QCA_WLAN_VENDOR_ATTR_GSCAN_CNT: + * wake gscan packet count + * @QCA_WLAN_VENDOR_ATTR_PNO_COMPLETE_CNT: + * wake pno complete packet count + * @QCA_WLAN_VENDOR_ATTR_PNO_MATCH_CNT: + * wake pno match packet count + */ +enum qca_wlan_vendor_attr_wake_stats { + QCA_WLAN_VENDOR_ATTR_GET_WAKE_STATS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TOTAL_CMD_EVENT_WAKE, + QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_PTR, + QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_SZ, + QCA_WLAN_VENDOR_ATTR_TOTAL_DRIVER_FW_LOCAL_WAKE, + QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_PTR, + QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_SZ, + QCA_WLAN_VENDOR_ATTR_TOTAL_RX_DATA_WAKE, + QCA_WLAN_VENDOR_ATTR_RX_UNICAST_CNT, + QCA_WLAN_VENDOR_ATTR_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_RX_BROADCAST_CNT, + QCA_WLAN_VENDOR_ATTR_ICMP_PKT, + QCA_WLAN_VENDOR_ATTR_ICMP6_PKT, + QCA_WLAN_VENDOR_ATTR_ICMP6_RA, + QCA_WLAN_VENDOR_ATTR_ICMP6_NA, + QCA_WLAN_VENDOR_ATTR_ICMP6_NS, + QCA_WLAN_VENDOR_ATTR_ICMP4_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_ICMP6_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_OTHER_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_RSSI_BREACH_CNT, + QCA_WLAN_VENDOR_ATTR_LOW_RSSI_CNT, + QCA_WLAN_VENDOR_ATTR_GSCAN_CNT, + QCA_WLAN_VENDOR_ATTR_PNO_COMPLETE_CNT, + QCA_WLAN_VENDOR_ATTR_PNO_MATCH_CNT, + /* keep last */ + QCA_WLAN_VENDOR_GET_WAKE_STATS_AFTER_LAST, + QCA_WLAN_VENDOR_GET_WAKE_STATS_MAX = + QCA_WLAN_VENDOR_GET_WAKE_STATS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_thermal_level - Defines various thermal levels + * configured by userspace to the driver/firmware. The values will be + * encapsulated in QCA_WLAN_VENDOR_ATTR_THERMAL_LEVEL attribute. + * The driver/firmware takes necessary actions requested by userspace + * such as throttling wifi tx etc. in order to mitigate high temperature. + * + * @QCA_WLAN_VENDOR_THERMAL_LEVEL_NONE: Stop/clear all throttling actions. + * @QCA_WLAN_VENDOR_THERMAL_LEVEL_LIGHT: Throttle tx lightly. + * @QCA_WLAN_VENDOR_THERMAL_LEVEL_MODERATE: Throttle tx moderately. + * @QCA_WLAN_VENDOR_THERMAL_LEVEL_SEVERE: Throttle tx severely. + * @QCA_WLAN_VENDOR_THERMAL_LEVEL_CRITICAL: Critical thermal level reached. + * @QCA_WLAN_VENDOR_THERMAL_LEVEL_EMERGENCY: Emergency thermal level reached. + */ +enum qca_wlan_vendor_thermal_level { + QCA_WLAN_VENDOR_THERMAL_LEVEL_NONE = 0, + QCA_WLAN_VENDOR_THERMAL_LEVEL_LIGHT = 1, + QCA_WLAN_VENDOR_THERMAL_LEVEL_MODERATE = 2, + QCA_WLAN_VENDOR_THERMAL_LEVEL_SEVERE = 3, + QCA_WLAN_VENDOR_THERMAL_LEVEL_CRITICAL = 4, + QCA_WLAN_VENDOR_THERMAL_LEVEL_EMERGENCY = 5, +}; + +/** + * enum qca_wlan_vendor_attr_thermal_cmd - Vendor subcmd attributes to set + * cmd value. Used for NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_THERMAL_CMD sub command. + */ +enum qca_wlan_vendor_attr_thermal_cmd { + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_INVALID = 0, + /* + * The value of command, driver will implement different operations + * according to this value. It uses values defined in + * enum qca_wlan_vendor_attr_thermal_cmd_type. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_VALUE = 1, + /* + * Userspace uses this attribute to configure thermal level to + * driver/firmware. Used in request, u32 attribute, possible values + * are defined in enum qca_wlan_vendor_thermal_level. + */ + QCA_WLAN_VENDOR_ATTR_THERMAL_LEVEL = 2, + /* + * Userspace uses this attribute to configure the time in which the + * driver/firmware should complete applying settings it received from + * userspace with QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_SET_LEVEL + * command type. Used in request, u32 attribute, value is in milli + * seconds. A value of zero indicates to apply the settings + * immediately. The driver/firmware can delay applying the configured + * thermal settings within the time specified in this attribute if + * there is any critical ongoing operation. + */ + QCA_WLAN_VENDOR_ATTR_THERMAL_COMPLETION_WINDOW = 3, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_MAX = + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_AFTER_LAST - 1 +}; + +/** + * qca_wlan_vendor_attr_thermal_cmd_type: Attribute values for + * QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_VALUE to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_THERMAL_CMD. This represents the + * thermal command types sent to driver. + * @QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_GET_PARAMS: Request to + * get thermal shutdown configuration parameters for display. Parameters + * responded from driver are defined in + * enum qca_wlan_vendor_attr_get_thermal_params_rsp. + * @QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_GET_TEMPERATURE: Request to + * get temperature. Host should respond with a temperature data. It is defined + * in enum qca_wlan_vendor_attr_thermal_get_temperature. + * @QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_SUSPEND: Request to execute thermal + * suspend action. + * @QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_RESUME: Request to execute thermal + * resume action. + * @QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_SET_LEVEL: Configure thermal level to + * the driver/firmware. + */ +enum qca_wlan_vendor_attr_thermal_cmd_type { + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_GET_PARAMS, + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_GET_TEMPERATURE, + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_SUSPEND, + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_RESUME, + QCA_WLAN_VENDOR_ATTR_THERMAL_CMD_TYPE_SET_LEVEL, +}; + +/** + * enum qca_vendor_element_id - QCA Vendor Specific element types + * + * These values are used to identify QCA Vendor Specific elements. The + * payload of the element starts with the three octet OUI (OUI_QCA) and + * is followed by a single octet type which is defined by this enum. + * + * @QCA_VENDOR_ELEM_P2P_PREF_CHAN_LIST: P2P preferred channel list. + * This element can be used to specify preference order for supported + * channels. The channels in this list are in preference order (the first + * one has the highest preference) and are described as a pair of + * (global) Operating Class and Channel Number (each one octet) fields. + * + * This extends the standard P2P functionality by providing option to have + * more than one preferred operating channel. When this element is present, + * it replaces the preference indicated in the Operating Channel attribute. + * For supporting other implementations, the Operating Channel attribute is + * expected to be used with the highest preference channel. Similarly, all + * the channels included in this Preferred channel list element are + * expected to be included in the Channel List attribute. + * + * This vendor element may be included in GO Negotiation Request, P2P + * Invitation Request, and Provision Discovery Request frames. + * + * @QCA_VENDOR_ELEM_HE_CAPAB: HE Capabilities element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID. The payload of this + * vendor specific element is defined by the latest P802.11ax draft. + * Please note that the draft is still work in progress and this element + * payload is subject to change. + * + * @QCA_VENDOR_ELEM_HE_OPER: HE Operation element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID. The payload of this + * vendor specific element is defined by the latest P802.11ax draft. + * Please note that the draft is still work in progress and this element + * payload is subject to change. + * + * @QCA_VENDOR_ELEM_RAPS: RAPS element (OFDMA-based Random Access Parameter Set + * element). + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID extension. The payload of + * this vendor specific element is defined by the latest P802.11ax draft + * (not including the Element ID Extension field). Please note that the + * draft is still work in progress and this element payload is subject to + * change. + * + * @QCA_VENDOR_ELEM_MU_EDCA_PARAMS: MU EDCA Parameter Set element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID extension. The payload of + * this vendor specific element is defined by the latest P802.11ax draft + * (not including the Element ID Extension field). Please note that the + * draft is still work in progress and this element payload is subject to + * change. + * + * @QCA_VENDOR_ELEM_BSS_COLOR_CHANGE: BSS Color Change Announcement element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID extension. The payload of + * this vendor specific element is defined by the latest P802.11ax draft + * (not including the Element ID Extension field). Please note that the + * draft is still work in progress and this element payload is subject to + * change. + */ +enum qca_vendor_element_id { + QCA_VENDOR_ELEM_P2P_PREF_CHAN_LIST = 0, + QCA_VENDOR_ELEM_HE_CAPAB = 1, + QCA_VENDOR_ELEM_HE_OPER = 2, + QCA_VENDOR_ELEM_RAPS = 3, + QCA_VENDOR_ELEM_MU_EDCA_PARAMS = 4, + QCA_VENDOR_ELEM_BSS_COLOR_CHANGE = 5, +}; + +/** + * enum qca_vendor_attr_get_tsf: Vendor attributes for TSF capture + * @QCA_WLAN_VENDOR_ATTR_TSF_INVALID: Invalid attribute value + * @QCA_WLAN_VENDOR_ATTR_TSF_CMD: enum qca_tsf_operation (u32) + * @QCA_WLAN_VENDOR_ATTR_TSF_TIMER_VALUE: Unsigned 64 bit TSF timer value + * @QCA_WLAN_VENDOR_ATTR_TSF_SOC_TIMER_VALUE: Unsigned 64 bit Synchronized + * SOC timer value at TSF capture + * @QCA_WLAN_VENDOR_ATTR_TSF_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_TSF_MAX: Max value + */ +enum qca_vendor_attr_tsf_cmd { + QCA_WLAN_VENDOR_ATTR_TSF_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TSF_CMD, + QCA_WLAN_VENDOR_ATTR_TSF_TIMER_VALUE, + QCA_WLAN_VENDOR_ATTR_TSF_SOC_TIMER_VALUE, + QCA_WLAN_VENDOR_ATTR_TSF_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TSF_MAX = + QCA_WLAN_VENDOR_ATTR_TSF_AFTER_LAST - 1 +}; + +/** + * enum qca_tsf_operation: TSF driver commands + * @QCA_TSF_CAPTURE: Initiate TSF Capture + * @QCA_TSF_GET: Get TSF capture value + * @QCA_TSF_SYNC_GET: Initiate TSF capture and return with captured value + */ +enum qca_tsf_cmd { + QCA_TSF_CAPTURE, + QCA_TSF_GET, + QCA_TSF_SYNC_GET, +}; + +/** + * enum qca_vendor_attr_get_preferred_freq_list - get preferred channel list + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_IFACE_TYPE: interface type + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST: preferred frequency list + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_WEIGHED_PCL: pcl with weight + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_MAX: max + */ +enum qca_vendor_attr_get_preferred_freq_list { + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_INVALID, + /* A 32-unsigned value; the interface type/mode for which the preferred + * frequency list is requested (see enum qca_iface_type for possible + * values); used in both south- and north-bound. + */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_IFACE_TYPE, + /* An array of 32-unsigned values; values are frequency (MHz); used + * in north-bound only. + */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST, + /* An array of nested values as per enum + * qca_wlan_vendor_attr_pcl_config attribute. + * Each element contains frequency (MHz), weight, and flag + * bit mask indicating how the frequency should be used in P2P + * negotiation. + */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_WEIGHED_PCL, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_MAX = + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_AFTER_LAST - 1 +}; + +/** + * enum qca_vendor_attr_probable_oper_channel - channel hint + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_IFACE_TYPE: interface type + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_FREQ: frequency hint value + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_AFTER_LAST: last + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_MAX: max + */ +enum qca_vendor_attr_probable_oper_channel { + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_INVALID, + /* 32-bit unsigned value; indicates the connection/iface type likely to + * come on this channel (see enum qca_iface_type). + */ + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_IFACE_TYPE, + /* 32-bit unsigned value; the frequency (MHz) of the probable channel */ + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_FREQ, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_MAX = + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_gw_param_config - gateway param config + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_GW_MAC_ADDR: gateway mac addr + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV4_ADDR: ipv4 addr + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV6_ADDR: ipv6 addr + */ +enum qca_wlan_vendor_attr_gw_param_config { + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_GW_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV4_ADDR, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV6_ADDR, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum drv_dbs_capability - DBS capability + * @DRV_DBS_CAPABILITY_DISABLED: DBS disabled + * @DRV_DBS_CAPABILITY_1X1: 1x1 + * @DRV_DBS_CAPABILITY_2X2: 2x2 + */ +enum drv_dbs_capability { + DRV_DBS_CAPABILITY_DISABLED, /* not supported or disabled */ + DRV_DBS_CAPABILITY_1X1, + DRV_DBS_CAPABILITY_2X2, +}; + +/** + * enum qca_vendor_attr_txpower_decr_db - Attributes for TX power decrease + * + * These attributes are used with QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_DECR_DB. + */ +enum qca_vendor_attr_txpower_decr_db { + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_INVALID, + /* + * 8-bit unsigned value to indicate the reduction of TX power in dB for + * a virtual interface. + */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_MAX = + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_AFTER_LAST - 1 +}; + +/** + * enum qca_vendor_attr_ota_test - Enable OTA test + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_ENABLE: enable OTA test + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_MAX: max + */ +enum qca_vendor_attr_ota_test { + QCA_WLAN_VENDOR_ATTR_OTA_TEST_INVALID, + /* 8-bit unsigned value to indicate if OTA test is enabled */ + QCA_WLAN_VENDOR_ATTR_OTA_TEST_ENABLE, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_OTA_TEST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_OTA_TEST_MAX = + QCA_WLAN_VENDOR_ATTR_OTA_TEST_AFTER_LAST - 1 +}; + +/** enum qca_vendor_attr_txpower_scale - vendor sub commands index + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE: scaling value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_MAX: max value + */ +enum qca_vendor_attr_txpower_scale { + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_INVALID, + /* 8-bit unsigned value to indicate the scaling of tx power */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_MAX = + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_AFTER_LAST - 1 +}; + +/** + * enum qca_vendor_attr_txpower_scale_decr_db - vendor sub commands index + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB: scaling value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_MAX: max value + */ +enum qca_vendor_attr_txpower_scale_decr_db { + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_INVALID, + /* 8-bit unsigned value to indicate the scaling of tx power */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_MAX = + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_AFTER_LAST - 1 +}; + +/** + * enum dfs_mode - state of DFS mode + * @DFS_MODE_NONE: DFS mode attribute is none + * @DFS_MODE_ENABLE: DFS mode is enabled + * @DFS_MODE_DISABLE: DFS mode is disabled + * @DFS_MODE_DEPRIORITIZE: Deprioritize DFS channels in scanning + */ +enum dfs_mode { + DFS_MODE_NONE, + DFS_MODE_ENABLE, + DFS_MODE_DISABLE, + DFS_MODE_DEPRIORITIZE +}; + +/** + * enum qca_wlan_vendor_attr_acs_config - Defines Configuration attributes + * used by the vendor command QCA_NL80211_VENDOR_SUBCMD_ACS_POLICY. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_DFS_MODE: Required (u8) + * DFS mode for ACS operation from enum qca_acs_dfs_mode. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_HINT: Required (u8) + * channel number hint for ACS operation, if valid channel is specified then + * ACS operation gives priority to this channel. + * Note: If both the driver and user space application supports the 6 GHz band, + * this attribute is deprecated and QCA_WLAN_VENDOR_ATTR_ACS_FREQUENCY_HINT + * should be used. + * To maintain backward compatibility, QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_HINT + * is still used if either of the driver or user space application doesn't + * support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_ACS_FREQUENCY_HINT: Required (u32). + * Channel center frequency (MHz) hint for ACS operation, if a valid center + * frequency is specified, ACS operation gives priority to this channel. + */ +enum qca_wlan_vendor_attr_acs_config { + QCA_WLAN_VENDOR_ATTR_ACS_MODE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ACS_DFS_MODE = 1, + QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_HINT = 2, + QCA_WLAN_VENDOR_ATTR_ACS_FREQUENCY_HINT = 3, + + QCA_WLAN_VENDOR_ATTR_ACS_DFS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ACS_DFS_MAX = + QCA_WLAN_VENDOR_ATTR_ACS_DFS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_get_hw_capability - Wi-Fi hardware capability + */ +enum qca_wlan_vendor_attr_get_hw_capability { + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_INVALID, + /* + * Antenna isolation + * An attribute used in the response. + * The content of this attribute is encoded in a byte array. Each byte + * value is an antenna isolation value. The array length is the number + * of antennas. + */ + QCA_WLAN_VENDOR_ATTR_ANTENNA_ISOLATION, + /* + * Request HW capability + * An attribute used in the request. + * The content of this attribute is a u32 array for one or more of + * hardware capabilities (attribute IDs) that are being requested. Each + * u32 value has a value from this + * enum qca_wlan_vendor_attr_get_hw_capability + * identifying which capabilities are requested. + */ + QCA_WLAN_VENDOR_ATTR_GET_HW_CAPABILITY, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_MAX = + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_sta_connect_roam_policy_config - + * config params for sta roam policy + * @QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_STA_DFS_MODE: If sta should skip Dfs channels + * @QCA_WLAN_VENDOR_ATTR_STA_SKIP_UNSAFE_CHANNEL: + * If sta should skip unsafe channels or not in scanning + * @QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_LAST: + * @QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_MAX: max attribute + */ +enum qca_wlan_vendor_attr_sta_connect_roam_policy_config { + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_STA_DFS_MODE, + QCA_WLAN_VENDOR_ATTR_STA_SKIP_UNSAFE_CHANNEL, + + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_MAX = + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_AFTER_LAST - 1, +}; + +/* Attributes for FTM commands and events */ + +/** + * enum qca_wlan_vendor_attr_loc_capa - Indoor location capabilities + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS: Various flags. See + * enum qca_wlan_vendor_attr_loc_capa_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS: Maximum number + * of measurement sessions that can run concurrently. + * Default is one session (no session concurrency). + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS: The total number of unique + * peers that are supported in running sessions. For example, + * if the value is 8 and maximum number of sessions is 2, you can + * have one session with 8 unique peers, or 2 sessions with 4 unique + * peers each, and so on. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP: Maximum number + * of bursts per peer, as an exponent (2^value). Default is 0, + * meaning no multi-burst support. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST: Maximum number + * of measurement exchanges allowed in a single burst. + * @QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES: Supported AOA measurement + * types. A bit mask (unsigned 32 bit value), each bit corresponds + * to an AOA type as defined by enum qca_vendor_attr_aoa_type. + */ +enum qca_wlan_vendor_attr_loc_capa { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_MAX = + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_loc_capa_flags: Indoor location capability flags + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER: Set if driver + * can be configured as an FTM responder (for example, an AP that + * services FTM requests). QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * will be supported if set. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR: Set if driver + * can run FTM sessions. QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION + * will be supported if set. +* @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP: Set if FTM responder + * supports immediate (ASAP) response. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA: Set if driver supports standalone + * AOA measurement using QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM: Set if driver supports + * requesting AOA measurements as part of an FTM session. + */ +enum qca_wlan_vendor_attr_loc_capa_flags { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER = 1 << 0, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR = 1 << 1, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP = 1 << 2, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA = 1 << 3, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM = 1 << 4, +}; + +/** + * enum qca_wlan_vendor_attr_sap_config - Parameters for AP configuration + * + * @QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_CHANNEL: Optional (u8) + * Channel number on which Access Point should restart. + * Note: If both the driver and user space application supports the 6 GHz band, + * this attribute is deprecated and QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_FREQUENCY + * should be used. + * To maintain backward compatibility, QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_CHANNEL + * is still used if either of the driver or user space application doesn't + * support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_SAP_MANDATORY_FREQUENCY_LIST: Required + * Nested attribute to hold list of center frequencies on which AP is + * expected to operate. This is irrespective of ACS configuration. + * This list is a priority based one and is looked for before the AP is + * created to ensure the best concurrency sessions (avoid MCC and use DBS/SCC) + * co-exist in the system. + * + * @QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_FREQUENCY: Optional (u32) + * Channel center frequency (MHz) on which the access point should restart. + */ +enum qca_wlan_vendor_attr_sap_config { + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_CHANNEL = 1, + QCA_WLAN_VENDOR_ATTR_SAP_MANDATORY_FREQUENCY_LIST = 2, + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_FREQUENCY = 3, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_info: Information about + * a single peer in a measurement session. + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR: The MAC address of the peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS: Various flags related + * to measurement. See enum qca_wlan_vendor_attr_ftm_peer_meas_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS: Nested attribute of + * FTM measurement parameters, as specified by IEEE P802.11-REVmc/D7.0 + * 9.4.2.167. See enum qca_wlan_vendor_attr_ftm_meas_param for + * list of supported attributes. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID: Initial token ID for + * secure measurement. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD: Request AOA + * measurement every bursts. If 0 or not specified, + * AOA measurements will be disabled for this peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ: Frequency in MHz where + * the measurement frames are exchanged. Optional; if not + * specified, try to locate the peer in the kernel scan + * results cache and use frequency from there. + */ +enum qca_wlan_vendor_attr_ftm_peer_info { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_meas_flags: Measurement request flags, + * per-peer + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP: If set, request + * immediate (ASAP) response from peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI: If set, request + * LCI report from peer. The LCI report includes the absolute + * location of the peer in "official" coordinates (similar to GPS). + * See IEEE P802.11-REVmc/D7.0, 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR: If set, request + * Location civic report from peer. The LCR includes the location + * of the peer in free-form format. See IEEE P802.11-REVmc/D7.0, + * 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE: If set, + * request a secure measurement. + * QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID must also be provided. + */ +enum qca_wlan_vendor_attr_ftm_peer_meas_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP = 1 << 0, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI = 1 << 1, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR = 1 << 2, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE = 1 << 3, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas_param: Measurement parameters + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST: Number of measurements + * to perform in a single burst. + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP: Number of bursts to + * perform, specified as an exponent (2^value). + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION: Duration of burst + * instance, as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD: Time between bursts, + * as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. Must + * be larger than QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION. + */ +enum qca_wlan_vendor_attr_ftm_meas_param { + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result: Per-peer results + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR: MAC address of the reported + * peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS: Status of measurement + * request for this peer. + * See enum qca_wlan_vendor_attr_ftm_peer_result_status. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS: Various flags related + * to measurement results for this peer. + * See enum qca_wlan_vendor_attr_ftm_peer_result_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS: Specified when + * request failed and peer requested not to send an additional request + * for this number of seconds. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI: LCI report when received + * from peer. In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.10. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR: Location civic report when + * received from peer. In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.13. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS: Reported when peer + * overridden some measurement request parameters. See + * enum qca_wlan_vendor_attr_ftm_meas_param. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS: AOA measurement + * for this peer. Same contents as @QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS: Array of measurement + * results. Each entry is a nested attribute defined + * by enum qca_wlan_vendor_attr_ftm_meas. + */ +enum qca_wlan_vendor_attr_ftm_peer_result { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_status + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK: Request sent ok and results + * will be provided. Peer may have overridden some measurement parameters, + * in which case overridden parameters will be report by + * QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAM attribute. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE: Peer is incapable + * of performing the measurement request. No more results will be sent + * for this peer in this session. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED: Peer reported request + * failed, and requested not to send an additional request for number + * of seconds specified by QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS + * attribute. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID: Request validation + * failed. Request was not sent over the air. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_status { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_flags: Various flags + * for measurement result, per-peer + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE: If set, + * measurement completed for this peer. No more results will be reported + * for this peer in this session. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE = 1 << 0, +}; + +/** + * enum qca_vendor_attr_loc_session_status: Session completion status code + * + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK: Session completed + * successfully. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED: Session aborted + * by request. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID: Session request + * was invalid and was not started. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED: Session had an error + * and did not complete normally (for example out of resources). + */ +enum qca_vendor_attr_loc_session_status { + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas: Single measurement data + * + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1: Time of departure (TOD) of FTM packet as + * recorded by responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2: Time of arrival (TOA) of FTM packet at + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3: TOD of ACK packet as recorded by + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4: TOA of ACK packet at + * responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI: RSSI (signal level) as recorded + * during this measurement exchange. Optional and will be provided if + * the hardware can measure it. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR: TOD error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR: TOA error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR: TOD error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR: TOA error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD: Dummy attribute for padding. + */ +enum qca_wlan_vendor_attr_ftm_meas { + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_p2p_listen_offload - vendor sub commands index + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CHANNEL: + * A 32-bit unsigned value; the P2P listen frequency (MHz); must be one + * of the social channels. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_PERIOD: listen offload period + * A 32-bit unsigned value; the P2P listen offload period (ms). + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INTERVAL: + * A 32-bit unsigned value; the P2P listen interval duration (ms). + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_COUNT: + * A 32-bit unsigned value; number of interval times the Firmware needs + * to run the offloaded P2P listen operation before it stops. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_DEVICE_TYPES: device types + * An array of unsigned 8-bit characters; vendor information elements. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_VENDOR_IE: vendor IEs + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CTRL_FLAG: control flag for FW + * A 32-bit unsigned value; a control flag to indicate whether listen + * results need to be flushed to wpa_supplicant. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_STOP_REASON: offload stop reason + * A 8-bit unsigned value; reason code for P2P listen offload stop + * event. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_MAX: max value + */ +enum qca_wlan_vendor_attr_p2p_listen_offload { + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CHANNEL, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_PERIOD, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INTERVAL, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_COUNT, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_DEVICE_TYPES, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_VENDOR_IE, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CTRL_FLAG, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_STOP_REASON, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_MAX = + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_drv_info - WLAN driver info + * @QCA_WLAN_VENDOR_ATTR_DRV_INFO_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_DRV_INFO_BUS_SIZE: Maximum Message size info + * between Firmware & Host. + */ +enum qca_wlan_vendor_drv_info { + QCA_WLAN_VENDOR_ATTR_DRV_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DRV_INFO_BUS_SIZE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DRV_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DRV_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_DRV_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_aoa_type - AOA measurement type + * + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: Phase of the strongest + * CIR (channel impulse response) path for each antenna. + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: Phase and amplitude + * of the strongest CIR path for each antenna. + */ +enum qca_wlan_vendor_attr_aoa_type { + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX +}; + +/** + * enum qca_wlan_vendor_attr_encryption_test - Attributes to + * validate encryption engine + * + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_NEEDS_DECRYPTION: Flag attribute. + * This will be included if the request is for decryption; if not included, + * the request is treated as a request for encryption by default. + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_CIPHER: Unsigned 32-bit value + * indicating the key cipher suite. Takes same values as + * NL80211_ATTR_KEY_CIPHER. + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_KEYID: Unsigned 8-bit value + * Key Id to be used for encryption + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_TK: Array of 8-bit values. + * Key (TK) to be used for encryption/decryption + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_PN: Array of 8-bit values. + * Packet number to be specified for encryption/decryption + * 6 bytes for TKIP/CCMP/GCMP. + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_DATA: Array of 8-bit values + * representing the 802.11 packet (header + payload + FCS) that + * needs to be encrypted/decrypted. + * Encrypted/decrypted response from the driver will also be sent + * to userspace with the same attribute. + */ +enum qca_wlan_vendor_attr_encryption_test { + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_NEEDS_DECRYPTION, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_CIPHER, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_KEYID, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_TK, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_PN, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_DATA, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_MAX = + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_dmg_rf_sector_type - Type of + * sector for DMG RF sector operations. + * + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_RX: RX sector + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_TX: TX sector + */ +enum qca_wlan_vendor_attr_dmg_rf_sector_type { + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_RX, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_TX, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_MAX +}; + +/** + * enum qca_wlan_vendor_attr_fw_state - State of firmware + * + * @QCA_WLAN_VENDOR_ATTR_FW_STATE_ERROR: FW is in bad state + * @QCA_WLAN_VENDOR_ATTR_FW_STATE_ACTIVE: FW is active + */ +enum qca_wlan_vendor_attr_fw_state { + QCA_WLAN_VENDOR_ATTR_FW_STATE_ERROR, + QCA_WLAN_VENDOR_ATTR_FW_STATE_ACTIVE, + QCA_WLAN_VENDOR_ATTR_FW_STATE_MAX +}; + +/** + * BRP antenna limit mode + * + * @QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_DISABLE: Disable BRP force + * antenna limit, BRP will be performed as usual. + * @QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_EFFECTIVE: Define maximal + * antennas limit. the hardware may use less antennas than the + * maximum limit. + * @QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_FORCE: The hardware will + * use exactly the specified number of antennas for BRP. + */ +enum qca_wlan_vendor_attr_brp_ant_limit_mode { + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_DISABLE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_EFFECTIVE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_FORCE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_MAX +}; + +/** + * enum qca_wlan_vendor_attr_dmg_rf_sector_cfg - Attributes for + * DMG RF sector configuration for a single RF module. + * The values are defined in a compact way which closely matches + * the way it is stored in HW registers. + * The configuration provides values for 32 antennas and 8 distribution + * amplifiers, and together describes the characteristics of the RF + * sector - such as a beam in some direction with some gain. + * + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX: Index + * of RF module for this configuration. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE0: Bit 0 of edge + * amplifier gain index. Unsigned 32 bit number containing + * bits for all 32 antennas. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE1: Bit 1 of edge + * amplifier gain index. Unsigned 32 bit number containing + * bits for all 32 antennas. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE2: Bit 2 of edge + * amplifier gain index. Unsigned 32 bit number containing + * bits for all 32 antennas. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_HI: Phase values + * for first 16 antennas, 2 bits per antenna. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_LO: Phase values + * for last 16 antennas, 2 bits per antenna. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16: Contains + * DTYPE values (3 bits) for each distribution amplifier, followed + * by X16 switch bits for each distribution amplifier. There are + * total of 8 distribution amplifiers. + */ +enum qca_wlan_vendor_attr_dmg_rf_sector_cfg { + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX = 1, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE0 = 2, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE1 = 3, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE2 = 4, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_HI = 5, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_LO = 6, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16 = 7, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_MAX = + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_tdls_configuration - Attributes for + * @QCA_NL80211_VENDOR_SUBCMD_CONFIGURE_TDLS configuration to the host driver. + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TRIGGER_MODE: Configure the TDLS trigger + * mode in the host driver. enum qca_wlan_vendor_tdls_trigger_mode + * represents the different TDLS trigger modes. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_STATS_PERIOD: Duration (u32) within + * which QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_THRESHOLD number + * of packets shall meet the criteria for implicit TDLS setup. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_THRESHOLD: Number (u32) of Tx/Rx + * packets within a duration. + * QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_STATS_PERIOD to initiate + * a TDLS setup. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_DISCOVERY_PERIOD: Time (u32) to inititate + * a TDLS Discovery to the Peer. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_MAX_DISCOVERY_ATTEMPT: Max number (u32) of + * discovery attempts to know the TDLS capability of the peer. A peer is + * marked as TDLS not capable if there is no response for all the attempts. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_TIMEOUT: Represents a duration (u32) + * within which QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_PACKET_THRESHOLD + * number of TX / RX frames meet the criteria for TDLS teardown. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_PACKET_THRESHOLD: Minimum number + * (u32) of Tx/Rx packets within a duration + * CA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_TIMEOUT to tear down a TDLS link + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_SETUP_RSSI_THRESHOLD: Threshold + * corresponding to the RSSI of the peer below which a TDLS + * setup is triggered. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TEARDOWN_RSSI_THRESHOLD: Threshold + * corresponding to the RSSI of the peer above which + * a TDLS teardown is triggered. + */ +enum qca_wlan_vendor_attr_tdls_configuration { + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TRIGGER_MODE = 1, + + /* Attributes configuring the TDLS Implicit Trigger */ + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_STATS_PERIOD = 2, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_THRESHOLD = 3, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_DISCOVERY_PERIOD = 4, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_MAX_DISCOVERY_ATTEMPT = 5, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_TIMEOUT = 6, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_PACKET_THRESHOLD = 7, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_SETUP_RSSI_THRESHOLD = 8, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TEARDOWN_RSSI_THRESHOLD = 9, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_tdls_trigger_mode: Represents the TDLS trigger mode in + * the driver. + * + * The following are the different values for + * QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TRIGGER_MODE. + * + * @QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT: The trigger to + * initiate/teardown the TDLS connection to a respective peer comes + * from the user space. wpa_supplicant provides the commands + * TDLS_SETUP, TDLS_TEARDOWN, TDLS_DISCOVER to do this. + * @QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT: Host driver triggers this TDLS + * setup/teardown to the eligible peer once the configured criteria + * (such as TX/RX threshold, RSSI) is met. The attributes + * in QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IMPLICIT_PARAMS correspond to + * the different configuration criteria for the TDLS trigger from the + * host driver. + * @QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL: Enables the driver to trigger + * the TDLS setup / teardown through the implicit mode, only to the + * configured MAC addresses(wpa_supplicant, with tdls_external_control = 1, + * configures the MAC address through TDLS_SETUP/TDLS_TEARDOWN commands). + * External mode works on top of the implicit mode, thus the host Driver + * is expected to be configured in TDLS Implicit mode too to operate in + * External mode. Configuring External mode alone without Implicit + * mode is invalid. + * + * All the above implementations work as expected only when the host driver + * advertises the capability WPA_DRIVER_FLAGS_TDLS_EXTERNAL_SETUP - + * representing that the TDLS message exchange is not internal to the host + * driver, but depends on wpa_supplicant to do the message exchange. + */ +enum qca_wlan_vendor_tdls_trigger_mode { + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT = 1 << 0, + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT = 1 << 1, + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL = 1 << 2, +}; + +/** + * enum qca_vendor_attr_sar_limits_selections - Source of SAR power limits + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF0: Select SAR profile #0 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF1: Select SAR profile #1 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF2: Select SAR profile #2 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF3: Select SAR profile #3 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF4: Select SAR profile #4 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_NONE: Do not select any + * source of SAR power limits, thereby disabling the SAR power + * limit feature. + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_USER: Select the SAR power + * limits configured by %QCA_NL80211_VENDOR_SUBCMD_SET_SAR. + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_V2_0: Select the SAR power + * limits version 2.0 configured by %QCA_NL80211_VENDOR_SUBCMD_SET_SAR. + * + * This enumerates the valid set of values that may be supplied for + * attribute %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT in an instance of + * the %QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS vendor command or in + * the response to an instance of the + * %QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS vendor command. + */ +enum qca_vendor_attr_sar_limits_selections { + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF0 = 0, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF1 = 1, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF2 = 2, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF3 = 3, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF4 = 4, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_NONE = 5, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_USER = 6, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_V2_0 = 7, +}; + +/** + * enum qca_vendor_attr_sar_limits_spec_modulations - + * SAR limits specification modulation + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_CCK - + * CCK modulation + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_OFDM - + * OFDM modulation + * + * This enumerates the valid set of values that may be supplied for + * attribute %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION in an + * instance of attribute %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC in an + * instance of the %QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS vendor + * command or in the response to an instance of the + * %QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS vendor command. + */ +enum qca_vendor_attr_sar_limits_spec_modulations { + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_CCK = 0, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_OFDM = 1, +}; + +/** + * enum qca_vendor_attr_sar_limits - Attributes for SAR power limits + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SAR_ENABLE: Optional (u32) value to + * select which SAR power limit table should be used. Valid + * values are enumerated in enum + * %qca_vendor_attr_sar_limits_selections. The existing SAR + * power limit selection is unchanged if this attribute is not + * present. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_NUM_SPECS: Optional (u32) value + * which specifies the number of SAR power limit specifications + * which will follow. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC: Nested array of SAR power + * limit specifications. The number of specifications is + * specified by @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_NUM_SPECS. Each + * specification contains a set of + * QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_* attributes. A + * specification is uniquely identified by the attributes + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_BAND, + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_CHAIN, and + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION and always + * contains as a payload the attribute + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT. + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX. + * Either %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT or + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX is + * needed based upon the value of + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SAR_ENABLE. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_BAND: Optional (u32) value to + * indicate for which band this specification applies. Valid + * values are enumerated in enum %nl80211_band (although not all + * bands may be supported by a given device). If the attribute is + * not supplied then the specification will be applied to all + * supported bands. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_CHAIN: Optional (u32) value + * to indicate for which antenna chain this specification + * applies, i.e. 1 for chain 1, 2 for chain 2, etc. If the + * attribute is not supplied then the specification will be + * applied to all chains. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION: Optional (u32) + * value to indicate for which modulation scheme this + * specification applies. Valid values are enumerated in enum + * %qca_vendor_attr_sar_limits_spec_modulations. If the attribute + * is not supplied then the specification will be applied to all + * modulation schemes. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT: Required (u32) + * value to specify the actual power limit value in units of 0.5 + * dBm (i.e., a value of 11 represents 5.5 dBm). + * This is required, when %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT is + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_USER. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX: Required (u32) + * value to indicate SAR V2 indices (0 - 11) to select SAR V2 profiles. + * This is required, when %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT is + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_V2_0. + * + * These attributes are used with %QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS + * and %QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS. + */ +enum qca_vendor_attr_sar_limits { + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SAR_ENABLE = 1, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_NUM_SPECS = 2, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC = 3, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_BAND = 4, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_CHAIN = 5, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION = 6, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT = 7, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX = 8, + + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_MAX = + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_ext - Attributes for MAC layer monitoring + * offload which is an extension for LL_STATS. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_PERIOD: Monitoring period. Unit in ms. + * If MAC counters do not exceed the threshold, FW will report monitored + * link layer counters periodically as this setting. The first report is + * always triggered by this timer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_THRESHOLD: It is a percentage (1-99). + * For each MAC layer counter, FW holds two copies. One is the current value. + * The other is the last report. Once a current counter's increment is larger + * than the threshold, FW will indicate that counter to host even if the + * monitoring timer does not expire. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_CHG: Peer STA power state change + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TID: TID of MSDU + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NUM_MSDU: Count of MSDU with the same + * failure code. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_STATUS: TX failure code + * 1: TX packet discarded + * 2: No ACK + * 3: Postpone + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_MAC_ADDRESS: peer MAC address + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_STATE: Peer STA current state + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_GLOBAL: Global threshold. + * Threshold for all monitored parameters. If per counter dedicated threshold + * is not enabled, this threshold will take effect. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_EVENT_MODE: Indicate what triggers this + * event, PERORID_TIMEOUT == 1, THRESH_EXCEED == 0. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_ID: interface ID + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ID: peer ID + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BITMAP: bitmap for TX counters + * Bit0: TX counter unit in MSDU + * Bit1: TX counter unit in MPDU + * Bit2: TX counter unit in PPDU + * Bit3: TX counter unit in byte + * Bit4: Dropped MSDUs + * Bit5: Dropped Bytes + * Bit6: MPDU retry counter + * Bit7: MPDU failure counter + * Bit8: PPDU failure counter + * Bit9: MPDU aggregation counter + * Bit10: MCS counter for ACKed MPDUs + * Bit11: MCS counter for Failed MPDUs + * Bit12: TX Delay counter + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BITMAP: bitmap for RX counters + * Bit0: MAC RX counter unit in MPDU + * Bit1: MAC RX counter unit in byte + * Bit2: PHY RX counter unit in PPDU + * Bit3: PHY RX counter unit in byte + * Bit4: Disorder counter + * Bit5: Retry counter + * Bit6: Duplication counter + * Bit7: Discard counter + * Bit8: MPDU aggregation size counter + * Bit9: MCS counter + * Bit10: Peer STA power state change (wake to sleep) counter + * Bit11: Peer STA power save counter, total time in PS mode + * Bit12: Probe request counter + * Bit13: Other management frames counter + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS_BITMAP: bitmap for CCA + * Bit0: Idle time + * Bit1: TX time + * Bit2: time RX in current bss + * Bit3: Out of current bss time + * Bit4: Wireless medium busy time + * Bit5: RX in bad condition time + * Bit6: TX in bad condition time + * Bit7: time wlan card not available + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_SIGNAL_BITMAP: bitmap for signal + * Bit0: Per channel SNR counter + * Bit1: Per channel noise floor counter + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_NUM: number of peers + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CHANNEL_NUM: number of channels + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_AC_RX_NUM: number of RX stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS: per channel BSS CCA stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER: container for per PEER stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MSDU: Number of total TX MSDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MPDU: Number of total TX MPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_PPDU: Number of total TX PPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BYTES: bytes of TX data + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP: Number of dropped TX packets + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP_BYTES: Bytes dropped + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_RETRY: waiting time without an ACK + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_ACK: number of MPDU not-ACKed + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_BACK: number of PPDU not-ACKed + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR_NUM: + * aggregation stats buffer length + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS_NUM: length of mcs stats + * buffer for ACKed MPDUs. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS_NUM: length of mcs stats + * buffer for failed MPDUs. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_DELAY_ARRAY_SIZE: + * length of delay stats array. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR: TX aggregation stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS: MCS stats for ACKed MPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS: MCS stats for failed MPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DELAY: tx delay stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU: MPDUs received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_BYTES: bytes received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU: PPDU received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU_BYTES: PPDU bytes received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_LOST: packets lost + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_RETRY: number of RX packets + * flagged as retransmissions + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DUP: number of RX packets + * flagged as duplicated + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DISCARD: number of RX + * packets discarded + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR_NUM: length of RX aggregation + * stats buffer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS_NUM: length of RX mcs + * stats buffer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS: RX mcs stats buffer + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR: aggregation stats buffer + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_TIMES: times STAs go to sleep + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_DURATION: STAs' total sleep time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PROBE_REQ: number of probe + * requests received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MGMT: number of other mgmt + * frames received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IDLE_TIME: Percentage of idle time + * there is no TX, nor RX, nor interference. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_TIME: percentage of time + * transmitting packets. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_TIME: percentage of time + * for receiving. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BUSY: percentage of time + * interference detected. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BAD: percentage of time + * receiving packets with errors. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BAD: percentage of time + * TX no-ACK. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NO_AVAIL: percentage of time + * the chip is unable to work in normal conditions. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IN_BSS_TIME: percentage of time + * receiving packets in current BSS. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_OUT_BSS_TIME: percentage of time + * receiving packets not in current BSS. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ANT_NUM: number of antennas + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_SIGNAL: + * This is a container for per antenna signal stats. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_SNR: per antenna SNR value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_NF: per antenna NF value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_RSSI_BEACON: RSSI of beacon + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_SNR_BEACON: SNR of beacon + */ +enum qca_wlan_vendor_attr_ll_stats_ext { + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_INVALID = 0, + + /* Attributes for configurations */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_PERIOD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_THRESHOLD, + + /* Peer STA power state change */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_CHG, + + /* TX failure event */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NUM_MSDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_STATUS, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_STATE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_MAC_ADDRESS, + + /* MAC counters */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_GLOBAL, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_EVENT_MODE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_ID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_SIGNAL_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CHANNEL_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER, + + /* Sub-attributes for PEER_AC_TX */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MSDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_PPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_RETRY, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_ACK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_BACK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_DELAY_ARRAY_SIZE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DELAY, + + /* Sub-attributes for PEER_AC_RX */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_LOST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_RETRY, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DUP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DISCARD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_TIMES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_DURATION, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PROBE_REQ, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MGMT, + + /* Sub-attributes for CCA_BSS */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IDLE_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BUSY, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BAD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BAD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NO_AVAIL, + + /* sub-attribute for BSS_RX_TIME */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IN_BSS_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_OUT_BSS_TIME, + + /* Sub-attributes for PEER_SIGNAL */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ANT_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_SIGNAL, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_SNR, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_NF, + + /* Sub-attributes for IFACE_BSS */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_RSSI_BEACON, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_SNR_BEACON, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_external_acs_channels: Attributes to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS. This carries a list of channels + * in priority order as decided after ACS operation in userspace. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_REASON: Required (u8). + * One of reason code from enum qca_wlan_vendor_acs_select_reason. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LIST: Required + * Array of nested values for each channel with following attributes: + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LIST is deprecated and use + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_LIST. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LIST + * is still used if either of the driver or user space application doesn't + * support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY: Required (u8). + * Primary channel number + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY is deprecated and use + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_PRIMARY. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY + * is still used if either of the driver or user space application doesn't + * support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY: Required (u8). + * Secondary channel number, required only for 160 and 80+80 MHz bandwidths. + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY is deprecated and use + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_SECONDARY. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY + * is still used if either of the driver or user space application + * doesn't support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0: Required (u8). + * VHT seg0 channel number + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0 is deprecated and use + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG0. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0 + * is still used if either of the driver or user space application + * doesn't support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1: Required (u8). + * VHT seg1 channel number + * Note: If both the driver and user-space application supports the 6 GHz band, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1 is deprecated and use + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG1. + * To maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1 + * is still used if either of the driver or user space application + * doesn't support the 6 GHz band. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH: Required (u8). + * Takes one of enum nl80211_chan_width values. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_LIST: Required + * Array of nested values for each channel with following attributes: + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_PRIMARY in MHz (u32), + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_SECONDARY in MHz (u32), + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG0 in MHz (u32), + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG1 in MHz (u32), + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH + * Note: If user-space application has no support of the 6 GHz band, this + * attribute is optional. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_PRIMARY: Required (u32) + * Primary channel frequency in MHz + * Note: If user-space application has no support of the 6 GHz band, this + * attribute is optional. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_SECONDARY: Required (u32) + * Secondary channel frequency in MHz used for HT 40 MHz channels. + * Note: If user-space application has no support of the 6 GHz band, this + * attribute is optional. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG0: Required (u32) + * VHT seg0 channel frequency in MHz + * Note: If user-space application has no support of the 6GHz band, this + * attribute is optional. + * + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG1: Required (u32) + * VHT seg1 channel frequency in MHz + * Note: If user-space application has no support of the 6 GHz band, this + * attribute is optional. + */ +enum qca_wlan_vendor_attr_external_acs_channels { + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_INVALID = 0, + + /* One of reason code (u8) from enum qca_wlan_vendor_acs_select_reason + */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_REASON = 1, + + /* Array of nested values for each channel with following attributes: + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_BAND, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH + */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LIST = 2, + /* This (u8) will hold values of one of enum nl80211_bands */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_BAND = 3, + /* Primary channel (u8) */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY = 4, + /* Secondary channel (u8) used for HT 40 MHz channels */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY = 5, + /* VHT seg0 channel (u8) */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0 = 6, + /* VHT seg1 channel (u8) */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1 = 7, + /* Channel width (u8). Takes one of enum nl80211_chan_width values. */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH = 8, + + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_LIST = 9, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_PRIMARY = 10, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_SECONDARY = 11, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG0 = 12, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_FREQUENCY_CENTER_SEG1 = 13, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LAST, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_MAX = + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LAST - 1 +}; + +/** + * qca_wlan_vendor_acs_select_reason: This represents the different reasons why + * the ACS has to be triggered. These values are used by + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_REASON and + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_REASON + */ +enum qca_wlan_vendor_acs_select_reason { + /* Represents the reason that the ACS triggered during the AP start */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_INIT, + /* Represents the reason that DFS found with the current channel */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_DFS, + /* Represents the reason that LTE co-exist in the current band. */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_LTE_COEX, + /* Represents the reason that generic, uncategorized interference has + * been found in the current channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_GENERIC_INTERFERENCE, + /* Represents the reason that excessive 802.11 interference has been + * found in the current channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_80211_INTERFERENCE, + /* Represents the reason that generic Continuous Wave (CW) interference + * has been found in the current channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_CW_INTERFERENCE, + /* Represents the reason that Microwave Oven (MWO) interference has been + * found in the current channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_MWO_INTERFERENCE, + /* Represents the reason that generic Frequency-Hopping Spread Spectrum + * (FHSS) interference has been found in the current channel. This may + * include 802.11 waveforms. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_FHSS_INTERFERENCE, + /* Represents the reason that non-802.11 generic Frequency-Hopping + * Spread Spectrum (FHSS) interference has been found in the current + * channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_NON_80211_FHSS_INTERFERENCE, + /* Represents the reason that generic Wideband (WB) interference has + * been found in the current channel. This may include 802.11 waveforms. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_WB_INTERFERENCE, + /* Represents the reason that non-802.11 generic Wideband (WB) + * interference has been found in the current channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_NON_80211_WB_INTERFERENCE, + /* Represents the reason that Jammer interference has been found in the + * current channel. + */ + QCA_WLAN_VENDOR_ACS_SELECT_REASON_JAMMER_INTERFERENCE, +}; + +/** + * enum qca_wlan_gpio_attr - Parameters for GPIO configuration + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND: Required (u32) + * value to specify the gpio command, please refer to enum qca_gpio_cmd_type + * to get the available value that this item can use. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM: Required (u32) + * value to specify the gpio number. + * Required, when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG or %.QCA_WLAN_VENDOR_GPIO_OUTPUT. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_VALUE: Required (u32) + * value to specify the gpio output level, please refer to enum qca_gpio_value + * to get the available value that this item can use. + * Required, when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_OUTPUT. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PULL_TYPE: Optional (u32) + * value to specify the gpio pull type, please refer to enum qca_gpio_pull_type + * to get the available value that this item can use. + * Required, when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG and + * %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG attribute is not present. + * Optional when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG + * attribute is present. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTR_MODE: Optional (u32) + * value to specify the gpio interrupt mode, please refer to enum + * qca_gpio_interrupt_mode to get the available value that this item can use. + * Required, when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG and + * %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG attribute is not present. + * Optional when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG + * attribute is present. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DIR: Optional (u32) + * value to specify the gpio direction, please refer to enum qca_gpio_direction + * to get the available value that this item can use. + * Required, when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG and + * %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG attribute is not present. + * Optional when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG + * attribute is present. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MUX_CONFIG: Optional (u32) + * Value to specify the mux config. Meaning of a given value is dependent + * on the target chipset and gpio pin. Must be of the range 0-15. + * Optional when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG. Defaults to 0. + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DRIVE: Optional (u32) + * Value to specify the drive, Refer to enum qca_gpio_drive. + * Optional when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG. Defaults to QCA_WLAN_GPIO_DRIVE_2MA(0). + * + * @QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG: Optional (flag) + * Optional when %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND is + * %QCA_WLAN_VENDOR_GPIO_CONFIG. When present this attribute signals that all + * other parameters for the given GPIO will be obtained from internal + * configuration. Only %QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM must be + * specified to indicate the GPIO pin being configured. + */ +enum qca_wlan_gpio_attr { + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INVALID = 0, + /* Unsigned 32-bit attribute for GPIO command */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND = 1, + /* Unsigned 32-bit attribute for GPIO PIN number to configure */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM = 2, + /* Unsigned 32-bit attribute for GPIO value to configure */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_VALUE = 3, + /* Unsigned 32-bit attribute for GPIO pull type */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PULL_TYPE = 4, + /* Unsigned 32-bit attribute for GPIO interrupt mode */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTR_MODE = 5, + /* Unsigned 32-bit attribute for GPIO direction to configure */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DIR = 6, + /* Unsigned 32-bit attribute for GPIO mux config */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MUX_CONFIG = 7, + /* Unsigned 32-bit attribute for GPIO drive */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_DRIVE = 8, + /* Flag attribute for using internal gpio configuration */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTERNAL_CONFIG = 9, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_LAST, + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_LAST - 1, +}; + +/** + * enum gpio_cmd_type - GPIO configuration command type + * @QCA_WLAN_VENDOR_GPIO_CONFIG: set gpio configuration info + * @QCA_WLAN_VENDOR_GPIO_OUTPUT: set gpio output level + */ +enum qca_gpio_cmd_type { + QCA_WLAN_VENDOR_GPIO_CONFIG = 0, + QCA_WLAN_VENDOR_GPIO_OUTPUT = 1, +}; + +/** + * enum qca_gpio_pull_type - GPIO pull type + * @QCA_WLAN_GPIO_PULL_NONE: set gpio pull type to none + * @QCA_WLAN_GPIO_PULL_UP: set gpio pull up + * @QCA_WLAN_GPIO_PULL_DOWN: set gpio pull down + */ +enum qca_gpio_pull_type { + QCA_WLAN_GPIO_PULL_NONE = 0, + QCA_WLAN_GPIO_PULL_UP = 1, + QCA_WLAN_GPIO_PULL_DOWN = 2, + QCA_WLAN_GPIO_PULL_MAX, +}; + +/** + * enum qca_gpio_direction - GPIO direction + * @QCA_WLAN_GPIO_INPUT: set gpio as input mode + * @QCA_WLAN_GPIO_OUTPUT: set gpio as output mode + * @QCA_WLAN_GPIO_VALUE_MAX: invalid value + */ +enum qca_gpio_direction { + QCA_WLAN_GPIO_INPUT = 0, + QCA_WLAN_GPIO_OUTPUT = 1, + QCA_WLAN_GPIO_DIR_MAX, +}; + +/** + * enum qca_gpio_value - GPIO Value + * @QCA_WLAN_GPIO_LEVEL_LOW: set gpio output level to low + * @QCA_WLAN_GPIO_LEVEL_HIGH: set gpio output level to high + * @QCA_WLAN_GPIO_LEVEL_MAX: invalid value + */ +enum qca_gpio_value { + QCA_WLAN_GPIO_LEVEL_LOW = 0, + QCA_WLAN_GPIO_LEVEL_HIGH = 1, + QCA_WLAN_GPIO_LEVEL_MAX, +}; + +/** + * enum gpio_interrupt_mode - GPIO interrupt mode + * @QCA_WLAN_GPIO_INTMODE_DISABLE: disable interrupt trigger + * @QCA_WLAN_GPIO_INTMODE_RISING_EDGE: interrupt with gpio rising edge trigger + * @QCA_WLAN_GPIO_INTMODE_FALLING_EDGE: interrupt with gpio falling edge trigger + * @QCA_WLAN_GPIO_INTMODE_BOTH_EDGE: interrupt with gpio both edge trigger + * @QCA_WLAN_GPIO_INTMODE_LEVEL_LOW: interrupt with gpio level low trigger + * @QCA_WLAN_GPIO_INTMODE_LEVEL_HIGH: interrupt with gpio level high trigger + * @QCA_WLAN_GPIO_INTMODE_MAX: invalid value + */ +enum qca_gpio_interrupt_mode { + QCA_WLAN_GPIO_INTMODE_DISABLE = 0, + QCA_WLAN_GPIO_INTMODE_RISING_EDGE = 1, + QCA_WLAN_GPIO_INTMODE_FALLING_EDGE = 2, + QCA_WLAN_GPIO_INTMODE_BOTH_EDGE = 3, + QCA_WLAN_GPIO_INTMODE_LEVEL_LOW = 4, + QCA_WLAN_GPIO_INTMODE_LEVEL_HIGH = 5, + QCA_WLAN_GPIO_INTMODE_MAX, +}; + +/** + * enum gpio_drive - GPIO drive + * @QCA_WLAN_GPIO_DRIVE_2MA: drive 2MA + * @QCA_WLAN_GPIO_DRIVE_4MA: drive 4MA + * @QCA_WLAN_GPIO_DRIVE_6MA: drive 6MA + * @QCA_WLAN_GPIO_DRIVE_8MA: drive 8MA + * @QCA_WLAN_GPIO_DRIVE_10MA: drive 10MA + * @QCA_WLAN_GPIO_DRIVE_12MA: drive 12MA + * @QCA_WLAN_GPIO_DRIVE_14MA: drive 14MA + * @QCA_WLAN_GPIO_DRIVE_16MA: drive 16MA + * @QCA_WLAN_GPIO_DRIVE_MAX: invalid gpio drive + */ +enum qca_gpio_drive { + QCA_WLAN_GPIO_DRIVE_2MA = 0, + QCA_WLAN_GPIO_DRIVE_4MA = 1, + QCA_WLAN_GPIO_DRIVE_6MA = 2, + QCA_WLAN_GPIO_DRIVE_8MA = 3, + QCA_WLAN_GPIO_DRIVE_10MA = 4, + QCA_WLAN_GPIO_DRIVE_12MA = 5, + QCA_WLAN_GPIO_DRIVE_14MA = 6, + QCA_WLAN_GPIO_DRIVE_16MA = 7, + QCA_WLAN_GPIO_DRIVE_MAX, +}; + +/** + * qca_wlan_set_qdepth_thresh_attr - Parameters for setting + * MSDUQ depth threshold per peer per tid in the target + * + * Associated Vendor Command: + * QCA_NL80211_VENDOR_SUBCMD_SET_QDEPTH_THRESH + */ +enum qca_wlan_set_qdepth_thresh_attr { + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_INVALID = 0, + /* 6-byte MAC address */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_MAC_ADDR, + /* Unsigned 32-bit attribute for holding the TID */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_TID, + /* Unsigned 32-bit attribute for holding the update mask + * bit 0 - Update high priority msdu qdepth threshold + * bit 1 - Update low priority msdu qdepth threshold + * bit 2 - Update UDP msdu qdepth threshold + * bit 3 - Update Non UDP msdu qdepth threshold + * rest of bits are reserved + */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_UPDATE_MASK, + /* Unsigned 32-bit attribute for holding the threshold value */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_VALUE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_LAST, + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_MAX = + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_LAST - 1, +}; + +/** + * qca_wlan_vendor_attr_external_acs_policy: Attribute values for + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_POLICY to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS. This represents the + * external ACS policies to select the channels w.r.t. the PCL weights. + * (QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL represents the channels and + * their PCL weights.) + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_MANDATORY: Mandatory to + * select a channel with non-zero PCL weight. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_PREFERRED: Prefer a + * channel with non-zero PCL weight. + * + */ +enum qca_wlan_vendor_attr_external_acs_policy { + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_PREFERRED, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_MANDATORY, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_scan - Spectral scan config parameters + */ +enum qca_wlan_vendor_attr_spectral_scan { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INVALID = 0, + /* + * Number of times the chip enters spectral scan mode before + * deactivating spectral scans. When set to 0, chip will enter spectral + * scan mode continuously. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT = 1, + /* + * Spectral scan period. Period increment resolution is 256*Tclk, + * where Tclk = 1/44 MHz (Gmode), 1/40 MHz (Amode). u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD = 2, + /* Spectral scan priority. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY = 3, + /* Number of FFT data points to compute. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE = 4, + /* + * Enable targeted gain change before starting the spectral scan FFT. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA = 5, + /* Restart a queued spectral scan. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA = 6, + /* + * Noise floor reference number for the calculation of bin power. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF = 7, + /* + * Disallow spectral scan triggers after TX/RX packets by setting + * this delay value to roughly SIFS time period or greater. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY = 8, + /* + * Number of strong bins (inclusive) per sub-channel, below + * which a signal is declared a narrow band tone. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR = 9, + /* + * Specify the threshold over which a bin is declared strong (for + * scan bandwidth analysis). u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR = 10, + /* Spectral scan report mode. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE = 11, + /* + * RSSI report mode, if the ADC RSSI is below + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR, + * then FFTs will not trigger, but timestamps and summaries get + * reported. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE = 12, + /* + * ADC RSSI must be greater than or equal to this threshold (signed dB) + * to ensure spectral scan reporting with normal error code. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR = 13, + /* + * Format of frequency bin magnitude for spectral scan triggered FFTs: + * 0: linear magnitude, 1: log magnitude (20*log10(lin_mag)). + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT = 14, + /* + * Format of FFT report to software for spectral scan triggered FFTs. + * 0: No FFT report (only spectral scan summary report) + * 1: 2-dword summary of metrics for each completed FFT + spectral scan + * report + * 2: 2-dword summary of metrics for each completed FFT + 1x-oversampled + * bins (in-band) per FFT + spectral scan summary report + * 3: 2-dword summary of metrics for each completed FFT + 2x-oversampled + * bins (all) per FFT + spectral scan summary report + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE = 15, + /* + * Number of LSBs to shift out in order to scale the FFT bins. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE = 16, + /* + * Set to 1 (with spectral_scan_pwr_format=1), to report bin magnitudes + * in dBm power. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ = 17, + /* + * Per chain enable mask to select input ADC for search FFT. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK = 18, + /* + * An unsigned 64-bit integer provided by host driver to identify the + * spectral scan request. This attribute is included in the scan + * response message for @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START + * and used as an attribute in + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_STOP to identify the + * specific scan to be stopped. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE = 19, + /* Skip interval for FFT reports. u32 attribute */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD = 20, + /* Set to report only one set of FFT results. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT = 21, + /* Debug level for spectral module in driver. + * 0 : Verbosity level 0 + * 1 : Verbosity level 1 + * 2 : Verbosity level 2 + * 3 : Matched filterID display + * 4 : One time dump of FFT report + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL = 22, + /* Type of spectral scan request. u32 attribute. + * It uses values defined in enum + * qca_wlan_vendor_attr_spectral_scan_request_type. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE = 23, + /* This specifies the frequency span over which spectral + * scan would be carried out. Its value depends on the + * value of QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE and + * the relation is as follows. + * QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_NORMAL + * Not applicable. Spectral scan would happen in the + * operating span. + * QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_AGILE + * Center frequency (in MHz) of the span of interest or + * for convenience, center frequency (in MHz) of any channel + * in the span of interest. If agile spectral scan is initiated + * without setting a valid frequency it returns the error code + * (QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED). + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FREQUENCY = 24, + /* Spectral scan mode. u32 attribute. + * It uses values defined in enum qca_wlan_vendor_spectral_scan_mode. + * If this attribute is not present, it is assumed to be + * normal mode (QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_NORMAL). + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE = 25, + /* Spectral scan error code. u32 attribute. + * It uses values defined in enum + * qca_wlan_vendor_spectral_scan_error_code. + * This attribute is included only in failure scenarios. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE = 26, + /* 8-bit unsigned value to enable/disable debug of the + * Spectral DMA ring. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_RING_DEBUG = 27, + /* 8-bit unsigned value to enable/disable debug of the + * Spectral DMA buffers. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_BUFFER_DEBUG = 28, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_diag_stats - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_DIAG_STATS. + */ +enum qca_wlan_vendor_attr_spectral_diag_stats { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_INVALID = 0, + /* Number of spectral TLV signature mismatches. + * u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SIG_MISMATCH = 1, + /* Number of spectral phyerror events with insufficient length when + * parsing for secondary 80 search FFT report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SEC80_SFFT_INSUFFLEN = 2, + /* Number of spectral phyerror events without secondary 80 + * search FFT report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_NOSEC80_SFFT = 3, + /* Number of spectral phyerror events with vht operation segment 1 id + * mismatches in search fft report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG1ID_MISMATCH = 4, + /* Number of spectral phyerror events with vht operation segment 2 id + * mismatches in search fft report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG2ID_MISMATCH = 5, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_cap - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO. + */ +enum qca_wlan_vendor_attr_spectral_cap { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_INVALID = 0, + /* Flag attribute to indicate phydiag capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_PHYDIAG = 1, + /* Flag attribute to indicate radar detection capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_RADAR = 2, + /* Flag attribute to indicate spectral capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_SPECTRAL = 3, + /* Flag attribute to indicate advanced spectral capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_ADVANCED_SPECTRAL = 4, + /* Spectral hardware generation. u32 attribute. + * It uses values defined in enum + * qca_wlan_vendor_spectral_scan_cap_hw_gen. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HW_GEN = 5, + /* Spectral bin scaling formula ID. u16 attribute. + * It uses values defined in enum + * qca_wlan_vendor_spectral_scan_cap_formula_id. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_FORMULA_ID = 6, + /* Spectral bin scaling param - low level offset. + * s16 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_LOW_LEVEL_OFFSET = 7, + /* Spectral bin scaling param - high level offset. + * s16 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HIGH_LEVEL_OFFSET = 8, + /* Spectral bin scaling param - RSSI threshold. + * s16 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_RSSI_THR = 9, + /* Spectral bin scaling param - default AGC max gain. + * u8 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_DEFAULT_AGC_MAX_GAIN = 10, + /* Flag attribute to indicate agile spectral scan capability + * for 20/40/80 MHz modes. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AGILE_SPECTRAL = 11, + /* Flag attribute to indicate agile spectral scan capability + * for 160 MHz mode. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AGILE_SPECTRAL_160 = 12, + /* Flag attribute to indicate agile spectral scan capability + * for 80+80 MHz mode. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AGILE_SPECTRAL_80_80 = 13, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_scan_status - used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS. + */ +enum qca_wlan_vendor_attr_spectral_scan_status { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_INVALID = 0, + /* Flag attribute to indicate whether spectral scan is enabled */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ENABLED = 1, + /* Flag attribute to indicate whether spectral scan is in progress*/ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ACTIVE = 2, + /* Spectral scan mode. u32 attribute. + * It uses values defined in enum qca_wlan_vendor_spectral_scan_mode. + * If this attribute is not present, normal mode + * (QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_NORMAL is assumed to be + * requested. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MODE = 3, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_config_latency_level - Level for + * wlan latency module. + * + * There will be various of Wi-Fi functionality like scan/roaming/adaptive + * power saving which would causing data exchange out of service, this + * would be a big impact on latency. For latency sensitive applications over + * Wi-Fi are intolerant to such operations and thus would configure them + * to meet their respective needs. It is well understood by such applications + * that altering the default behavior would degrade the Wi-Fi functionality + * w.r.t the above pointed WLAN operations. + * + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_NORMAL: + * Default WLAN operation level which throughput orientated. + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_MODERATE: + * Use moderate level to improve latency by limit scan duration. + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_LOW: + * Use low latency level to benifit application like concurrent + * downloading or video streaming via constraint scan/adaptive PS. + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_ULTRALOW: + * Use ultra low latency level to benefit for gaming/voice + * application via constraint scan/roaming/adaptive PS. + */ +enum qca_wlan_vendor_attr_config_latency_level { + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_NORMAL = 1, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_MODERATE = 2, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_LOW = 3, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_ULTRALOW = 4, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_MAX = + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_AFTER_LAST - 1, +}; + +/** + * qca_wlan_vendor_attr_spectral_scan_request_type: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START. This represents the + * spectral scan request types. + * @QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG: Request to + * set the spectral parameters and start scan. + * @QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN: Request to + * only set the spectral parameters. + * @QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_CONFIG: Request to + * only start the spectral scan. + */ +enum qca_wlan_vendor_attr_spectral_scan_request_type { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_CONFIG, +}; + +/** + * qca_wlan_vendor_spectral_scan_mode: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE in the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START and + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MODE in the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS. This represents the + * spectral scan modes. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_NORMAL: Normal spectral scan: + * spectral scan in the current operating span. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_AGILE: Agile spectral scan: + * spectral scan in the configured agile span. + */ +enum qca_wlan_vendor_spectral_scan_mode { + QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_NORMAL = 0, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_AGILE = 1, +}; + +/** + * qca_wlan_vendor_spectral_scan_error_code: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE in the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_UNSUPPORTED: Changing the value + * of a parameter is not supported. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED: Requested spectral scan + * mode is not supported. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE: A parameter + * has invalid value. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED: A parameter + * is not initialized. + */ +enum qca_wlan_vendor_spectral_scan_error_code { + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_UNSUPPORTED = 0, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED = 1, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE = 2, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED = 3, +}; + +/** + * qca_wlan_vendor_spectral_scan_cap_hw_gen: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HW_GEN to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO. This represents the + * spectral hardware generation. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_1: generation 1 + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_2: generation 2 + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_3: generation 3 + */ +enum qca_wlan_vendor_spectral_scan_cap_hw_gen { + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_1 = 0, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_2 = 1, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_3 = 2, +}; + +/** + * qca_wlan_vendor_spectral_scan_cap_formula_id: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_FORMULA_ID in the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO. This represents the + * Spectral bin scaling formula ID. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_NO_SCALING: No scaling + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_AGC_GAIN_RSSI_CORR_BASED: AGC gain + * and RSSI threshold based formula. + */ +enum qca_wlan_vendor_spectral_scan_cap_formula_id { + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_NO_SCALING = 0, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_AGC_GAIN_RSSI_CORR_BASED = 1, +}; + +/** + * enum qca_wlan_vendor_attr_rropavail_info - Specifies whether Representative + * RF Operating Parameter (RROP) information is available, and if so, at which + * point in the application-driver interaction sequence it can be retrieved by + * the application from the driver. This point may vary by architecture and + * other factors. This is a u16 value. + */ +enum qca_wlan_vendor_attr_rropavail_info { + /* RROP information is unavailable. */ + QCA_WLAN_VENDOR_ATTR_RROPAVAIL_INFO_UNAVAILABLE, + /* RROP information is available and the application can retrieve the + * information after receiving an QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS + * event from the driver. + */ + QCA_WLAN_VENDOR_ATTR_RROPAVAIL_INFO_EXTERNAL_ACS_START, + /* RROP information is available only after a vendor specific scan + * (requested using QCA_NL80211_VENDOR_SUBCMD_TRIGGER_SCAN) has + * successfully completed. The application can retrieve the information + * after receiving the QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE event from + * the driver. + */ + QCA_WLAN_VENDOR_ATTR_RROPAVAIL_INFO_VSCAN_END, +}; + +/** + * enum qca_wlan_vendor_attr_rrop_info - Specifies vendor specific + * Representative RF Operating Parameter (RROP) information. It is sent for the + * vendor command QCA_NL80211_VENDOR_SUBCMD_GET_RROP_INFO. This information is + * intended for use by external Auto Channel Selection applications. It provides + * guidance values for some RF parameters that are used by the system during + * operation. These values could vary by channel, band, radio, and so on. + */ +enum qca_wlan_vendor_attr_rrop_info { + QCA_WLAN_VENDOR_ATTR_RROP_INFO_INVALID = 0, + + /* Representative Tx Power List (RTPL) which has an array of nested + * values as per attributes in enum qca_wlan_vendor_attr_rtplinst. + */ + QCA_WLAN_VENDOR_ATTR_RROP_INFO_RTPL = 1, + + QCA_WLAN_VENDOR_ATTR_RROP_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_RROP_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_RROP_INFO_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_rtplinst - Specifies attributes for individual list + * entry instances in the Representative Tx Power List (RTPL). It provides + * simplified power values intended for helping external Auto channel Selection + * applications compare potential Tx power performance between channels, other + * operating conditions remaining identical. These values are not necessarily + * the actual Tx power values that will be used by the system. They are also not + * necessarily the max or average values that will be used. Instead, they are + * relative, summarized keys for algorithmic use computed by the driver or + * underlying firmware considering a number of vendor specific factors. + */ +enum qca_wlan_vendor_attr_rtplinst { + QCA_WLAN_VENDOR_ATTR_RTPLINST_INVALID = 0, + + /* Primary channel number (u8). + * Note: If both the driver and user space application support the + * 6 GHz band, this attribute is deprecated and + * QCA_WLAN_VENDOR_ATTR_RTPLINST_PRIMARY_FREQUENCY should be used. To + * maintain backward compatibility, + * QCA_WLAN_VENDOR_ATTR_RTPLINST_PRIMARY is still used if either the + * driver or user space application or both do not support the 6 GHz + * band. + */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_PRIMARY = 1, + /* Representative Tx power in dBm (s32) with emphasis on throughput. */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_TXPOWER_THROUGHPUT = 2, + /* Representative Tx power in dBm (s32) with emphasis on range. */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_TXPOWER_RANGE = 3, + /* Primary channel center frequency (u32) in MHz */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_PRIMARY_FREQUENCY = 4, + + QCA_WLAN_VENDOR_ATTR_RTPLINST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_RTPLINST_MAX = + QCA_WLAN_VENDOR_ATTR_RTPLINST_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_mac - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO. + * + * @QCA_WLAN_VENDOR_ATTR_MAC_INFO: MAC mode info list which has an + * array of nested values as per attributes in + * enum qca_wlan_vendor_attr_mac_mode_info. + */ +enum qca_wlan_vendor_attr_mac { + QCA_WLAN_VENDOR_ATTR_MAC_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_MAC_INFO = 1, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_MAC_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAC_MAX = + QCA_WLAN_VENDOR_ATTR_MAC_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_mac_iface_info - Information of the connected + * WiFi netdev interface on a respective MAC. Used by the attribute + * QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO. + * + * @QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_ID: Wi-Fi Netdev's interface id(u32). + * @QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_FREQ: Associated frequency in MHz of + * the connected Wi-Fi interface(u32). + */ +enum qca_wlan_vendor_attr_mac_iface_info { + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_IFINDEX = 1, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_FREQ = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_mac_info - Points to MAC the information. + * Used by the attribute QCA_WLAN_VENDOR_ATTR_MAC_INFO of the + * vendor command QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO. + * + * @QCA_WLAN_VENDOR_ATTR_MAC_INFO_MAC_ID: Hardware MAC ID associated for the + * MAC (u32) + * @QCA_WLAN_VENDOR_ATTR_MAC_INFO_BAND: Band supported by the respective MAC + * at a given point. This is a u32 bitmask of BIT(NL80211_BAND_*) as described + * in enum nl80211_band. + * @QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO: Refers to list of WLAN net dev + * interfaces associated with this MAC. Represented by enum + * qca_wlan_vendor_attr_mac_iface_info. + */ +enum qca_wlan_vendor_attr_mac_info { + QCA_WLAN_VENDOR_ATTR_MAC_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_MAC_INFO_MAC_ID = 1, + QCA_WLAN_VENDOR_ATTR_MAC_INFO_BAND = 2, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO = 3, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_MAC_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAC_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_MAC_INFO_AFTER_LAST - 1, + +}; + +#if !(defined (SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) && \ + !(defined(WITH_BACKPORTS)) + +static inline struct sk_buff * +backported_cfg80211_vendor_event_alloc(struct wiphy *wiphy, + struct wireless_dev *wdev, + int approxlen, + int event_idx, gfp_t gfp) +{ + struct sk_buff *skb; + + skb = cfg80211_vendor_event_alloc(wiphy, approxlen, event_idx, gfp); + + if (skb && wdev) { + struct nlattr *attr; + u32 ifindex = wdev->netdev->ifindex; + + nla_nest_cancel(skb, ((void **)skb->cb)[2]); + if (nla_put_u32(skb, NL80211_ATTR_IFINDEX, ifindex)) + goto nla_fail; + + attr = nla_nest_start(skb, NL80211_ATTR_VENDOR_DATA); + ((void **)skb->cb)[2] = attr; + } + + return skb; + +nla_fail: + kfree_skb(skb); + + return NULL; +} +#define cfg80211_vendor_event_alloc backported_cfg80211_vendor_event_alloc +#endif + +/** + * enum he_fragmentation_val - HE fragmentation support values + * Indicates level of dynamic fragmentation that is supported by + * a STA as a recipient. + * HE fragmentation values are defined as per 11ax spec and are used in + * HE capability IE to advertise the support. These values are validated + * in the driver to check the device capability and advertised in the HE + * capability element. + * + * @HE_FRAG_DISABLE: no support for dynamic fragmentation + * @HE_FRAG_LEVEL1: support for dynamic fragments that are + * contained within an MPDU or S-MPDU, no support for dynamic fragments + * within an A-MPDU that is not an S-MPDU. + * @HE_FRAG_LEVEL2: support for dynamic fragments that are + * contained within an MPDU or S-MPDU and support for up to one dynamic + * fragment for each MSDU, each A-MSDU if supported by the recipient, and + * each MMPDU within an A-MPDU or multi-TID A-MPDU that is not an + * MPDU or S-MPDU. + * @HE_FRAG_LEVEL3: support for dynamic fragments that are + * contained within an MPDU or S-MPDU and support for multiple dynamic + * fragments for each MSDU and for each A-MSDU if supported by the + * recipient within an A-MPDU or multi-TID AMPDU and up to one dynamic + * fragment for each MMPDU in a multi-TID A-MPDU that is not an S-MPDU. + */ +enum he_fragmentation_val { + HE_FRAG_DISABLE, + HE_FRAG_LEVEL1, + HE_FRAG_LEVEL2, + HE_FRAG_LEVEL3, +}; + +/** + * enum he_mcs_config - HE MCS support configuration + * + * Configures the HE Tx/Rx MCS map in HE capability IE for given bandwidth. + * These values are used in driver to configure the HE MCS map to advertise + * Tx/Rx MCS map in HE capability and these values are applied for all the + * streams supported by the device. To configure MCS for different bandwidths, + * vendor command needs to be sent using this attribute with appropriate value. + * For example, to configure HE_80_MCS_0_7, send vendor command using HE MCS + * attribute with QCA_WLAN_VENDOR_ATTR_HE_80_MCS0_7. And to configure HE MCS + * for HE_160_MCS0_11 send this command using HE MCS config attribute with + * value QCA_WLAN_VENDOR_ATTR_HE_160_MCS0_11; + * + * @HE_80_MCS0_7: support for HE 80/40/20MHz MCS 0 to 7 + * @HE_80_MCS0_9: support for HE 80/40/20MHz MCS 0 to 9 + * @HE_80_MCS0_11: support for HE 80/40/20MHz MCS 0 to 11 + * @HE_160_MCS0_7: support for HE 160MHz MCS 0 to 7 + * @HE_160_MCS0_9: support for HE 160MHz MCS 0 to 9 + * @HE_160_MCS0_11: support for HE 160MHz MCS 0 to 11 + * @HE_80p80_MCS0_7: support for HE 80p80MHz MCS 0 to 7 + * @HE_80p80_MCS0_9: support for HE 80p80MHz MCS 0 to 9 + * @HE_80p80_MCS0_11: support for HE 80p80MHz MCS 0 to 11 + */ +enum he_mcs_config { + HE_80_MCS0_7 = 0, + HE_80_MCS0_9 = 1, + HE_80_MCS0_11 = 2, + HE_160_MCS0_7 = 4, + HE_160_MCS0_9 = 5, + HE_160_MCS0_11 = 6, + HE_80p80_MCS0_7 = 8, + HE_80p80_MCS0_9 = 9, + HE_80p80_MCS0_11 = 10, +}; + +/** + * enum qca_wlan_ba_session_config - BA session configuration + * + * Indicates the configuration values for BA session configuration attribute. + * + * @QCA_WLAN_ADD_BA: Establish a new BA session with given configuration. + * @QCA_WLAN_DELETE_BA: Delete the existing BA session for given TID. + */ +enum qca_wlan_ba_session_config { + QCA_WLAN_ADD_BA = 1, + QCA_WLAN_DELETE_BA = 2, +}; + +/** + * enum qca_wlan_ac_type - access category type + * + * Indicates the access category type value. + * + * @QCA_WLAN_AC_BE: BE access category + * @QCA_WLAN_AC_BK: BK access category + * @QCA_WLAN_AC_VI: VI access category + * @QCA_WLAN_AC_VO: VO access category + * @QCA_WLAN_AC_ALL: All ACs + */ +enum qca_wlan_ac_type { + QCA_WLAN_AC_BE = 0, + QCA_WLAN_AC_BK = 1, + QCA_WLAN_AC_VI = 2, + QCA_WLAN_AC_VO = 3, + QCA_WLAN_AC_ALL = 4, +}; + +/** + * enum qca_wlan_he_ltf_cfg - HE LTF configuration + * + * Indicates the HE LTF configuration value. + * + * @QCA_WLAN_HE_LTF_AUTO: HE-LTF is automatically set to + * the mandatory HE-LTF based on the GI setting + * @QCA_WLAN_HE_LTF_1X: 1X HE LTF is 3.2us LTF + * @QCA_WLAN_HE_LTF_2X: 2X HE LTF is 6.4us LTF + * @QCA_WLAN_HE_LTF_4X: 4X HE LTF is 12.8us LTF + */ +enum qca_wlan_he_ltf_cfg { + QCA_WLAN_HE_LTF_AUTO = 0, + QCA_WLAN_HE_LTF_1X = 1, + QCA_WLAN_HE_LTF_2X = 2, + QCA_WLAN_HE_LTF_4X = 3, +}; + +/** + * enum qca_wlan_he_mac_padding_dur - HE trigger frame MAC padding duration + * + * Indicates the HE trigger frame MAC padding duration value. + * + * @QCA_WLAN_HE_NO_ADDITIONAL_PROCESS_TIME: no additional time required to + * process the trigger frame. + * @QCA_WLAN_HE_8US_OF_PROCESS_TIME: indicates the 8us of processing time for + * trigger frame. + * @QCA_WLAN_HE_16US_OF_PROCESS_TIME: indicates the 16us of processing time for + * trigger frame. + */ +enum qca_wlan_he_mac_padding_dur { + QCA_WLAN_HE_NO_ADDITIONAL_PROCESS_TIME = 0, + QCA_WLAN_HE_8US_OF_PROCESS_TIME = 1, + QCA_WLAN_HE_16US_OF_PROCESS_TIME = 2, +}; + +/** + * enum qca_wlan_he_om_ctrl_ch_bw - HE OM control field BW configuration + * + * Indicates the HE Operating mode control channel width setting value. + * + * @QCA_WLAN_HE_OM_CTRL_BW_20M: Primary 20 MHz + * @QCA_WLAN_HE_OM_CTRL_BW_40M: Primary 40 MHz + * @QCA_WLAN_HE_OM_CTRL_BW_80M: Primary 80 MHz + * @QCA_WLAN_HE_OM_CTRL_BW_160M: 160 MHz and 80+80 MHz + */ +enum qca_wlan_he_om_ctrl_ch_bw { + QCA_WLAN_HE_OM_CTRL_BW_20M = 0, + QCA_WLAN_HE_OM_CTRL_BW_40M = 1, + QCA_WLAN_HE_OM_CTRL_BW_80M = 2, + QCA_WLAN_HE_OM_CTRL_BW_160M = 3, +}; + +/** + * enum qca_wlan_vendor_attr_he_omi_tx: Represents attributes for + * HE operating mode control transmit request. These attributes are + * sent as part of QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_OMI_TX and + * QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION. + * + * @QCA_WLAN_VENDOR_ATTR_HE_OMI_RX_NSS: Mandatory 8-bit unsigned value + * indicates the maximum number of spatial streams, NSS, that the STA + * supports in reception for PPDU bandwidths less than or equal to 80 MHz + * and is set to NSS - 1. + * + * @QCA_WLAN_VENDOR_ATTR_HE_OMI_CH_BW: Mandatory 8-bit unsigned value + * indicates the operating channel width supported by the STA for both + * reception and transmission. Uses enum qca_wlan_he_om_ctrl_ch_bw values. + * + * @QCA_WLAN_VENDOR_ATTR_HE_OMI_ULMU_DISABLE: Mandatory 8-bit unsigned value + * indicates the all trigger based UL MU operations by the STA. + * 0 - UL MU operations are enabled by the STA. + * 1 - All triggered UL MU transmissions are suspended by the STA. + * + * @QCA_WLAN_VENDOR_ATTR_HE_OMI_TX_NSTS: Mandatory 8-bit unsigned value + * indicates the maximum number of space-time streams, NSTS, that + * the STA supports in transmission and is set to NSTS - 1. + * + * @QCA_WLAN_VENDOR_ATTR_HE_OMI_ULMU_DATA_DISABLE: 8-bit unsigned value + * combined with the UL MU Disable subfield and the recipient's setting + * of the OM Control UL MU Data Disable RX Support subfield in the HE MAC + * capabilities to determine which HE TB PPDUs are possible by the + * STA to transmit. + * 0 - UL MU data operations are enabled by the STA. + * 1 - Determine which HE TB PPDU types are allowed by the STA if UL MU disable + * bit is not set, else UL MU Tx is suspended. + * + */ +enum qca_wlan_vendor_attr_he_omi_tx { + QCA_WLAN_VENDOR_ATTR_HE_OMI_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_HE_OMI_RX_NSS = 1, + QCA_WLAN_VENDOR_ATTR_HE_OMI_CH_BW = 2, + QCA_WLAN_VENDOR_ATTR_HE_OMI_ULMU_DISABLE = 3, + QCA_WLAN_VENDOR_ATTR_HE_OMI_TX_NSTS = 4, + QCA_WLAN_VENDOR_ATTR_HE_OMI_ULMU_DATA_DISABLE = 5, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_HE_OMI_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_HE_OMI_MAX = + QCA_WLAN_VENDOR_ATTR_HE_OMI_AFTER_LAST - 1, +}; + +/* Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION + */ +enum qca_wlan_vendor_attr_wifi_test_config { + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_INVALID = 0, + /* 8-bit unsigned value to configure the driver to enable/disable + * WMM feature. This attribute is used to configure testbed device. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_WMM_ENABLE = 1, + + /* 8-bit unsigned value to configure the driver to accept/reject + * the addba request from peer. This attribute is used to configure + * the testbed device. + * 1-accept addba, 0-reject addba + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ACCEPT_ADDBA_REQ = 2, + + /* 8-bit unsigned value to configure the driver to send or not to + * send the addba request to peer. + * This attribute is used to configure the testbed device. + * 1-send addba, 0-do not send addba + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_SEND_ADDBA_REQ = 3, + + /* 8-bit unsigned value to indicate the HE fragmentation support. + * Uses enum he_fragmentation_val values. + * This attribute is used to configure the testbed device to + * allow the advertised hardware capabilities to be downgraded + * for testing purposes. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_FRAGMENTATION = 4, + + /* 8-bit unsigned value to indicate the HE MCS support. + * Uses enum he_mcs_config values. + * This attribute is used to configure the testbed device to + * allow the advertised hardware capabilities to be downgraded + * for testing purposes. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MCS = 5, + + /* 8-bit unsigned value to configure the driver to allow or not to + * allow the connection with WEP/TKIP in HT/VHT/HE modes. + * This attribute is used to configure the testbed device. + * 1-allow wep/tkip in HT/VHT/HE, 0-do not allow wep/tkip + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_WEP_TKIP_IN_HE = 6, + + /* 8-bit unsigned value to configure the driver to add a + * new BA session or delete the existing BA session for + * given TID. ADDBA command uses the buffer size and tid + * configuration if user specifies the values else default + * value for buffer size is used for all tids if the tid + * also not specified. For DEL_BA command TID value is + * required to process the command. + * Uses enum qca_wlan_ba_session_config values. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ADD_DEL_BA_SESSION = 7, + + /* 16-bit unsigned value to configure the buffer size in addba + * request and response frames. + * This attribute is used to configure the testbed device. + * The range of the value is 0 to 256. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ADDBA_BUFF_SIZE = 8, + + /* 8-bit unsigned value to configure the buffer size in addba + * request and response frames. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_BA_TID = 9, + + /* 8-bit unsigned value to configure the no ack policy. + * To configure no ack policy, access category value + * is required to process the command. + * This attribute is used to configure the testbed device. + * 1 - enable no ack, 0 - disable no ack + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ENABLE_NO_ACK = 10, + + /* 8-bit unsigned value to configure the AC for no ack policy + * This attribute is used to configure the testbed device. + * uses the enum qca_wlan_ac_type values + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_NO_ACK_AC = 11, + + /* 8-bit unsigned value to configure the HE LTF + * This attribute is used to configure the testbed device. + * Uses the enum qca_wlan_he_ltf_cfg values. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_LTF = 12, + + /* 8-bit unsigned value to configure the tx beamformee. + * This attribute is used to configure the testbed device. + * 1 - enable, 0 - disable. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ENABLE_TX_BEAMFORMEE = 13, + + /* 8-bit unsigned value to configure the tx beamformee number of + * space-time streams. + * This attribute is used to configure the testbed device. + * The range of the value is 0 to 8 + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_TX_BEAMFORMEE_NSTS = 14, + + /* 8-bit unsigned value to configure the MU EDCA params for given AC + * This attribute is used to configure the testbed device. + * Uses the enum qca_wlan_ac_type values. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MU_EDCA_AC = 15, + + /* 8-bit unsigned value to configure the MU EDCA AIFSN for given AC + * To configure MU EDCA AIFSN value, MU EDCA access category value + * is required to process the command. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MU_EDCA_AIFSN = 16, + + /* 8-bit unsigned value to configure the MU EDCA ECW min value for + * given AC. + * To configure MU EDCA ECW min value, MU EDCA access category value + * is required to process the command. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MU_EDCA_ECWMIN = 17, + + /* 8-bit unsigned value to configure the MU EDCA ECW max value for + * given AC. + * To configure MU EDCA ECW max value, MU EDCA access category value + * is required to process the command. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MU_EDCA_ECWMAX = 18, + + /* 8-bit unsigned value to configure the MU EDCA timer for given AC + * To configure MU EDCA timer value, MU EDCA access category value + * is required to process the command. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MU_EDCA_TIMER = 19, + + /* 8-bit unsigned value to configure the HE trigger frame MAC padding + * duration. + * This attribute is used to configure the testbed device. + * Uses the enum qca_wlan_he_mac_padding_dur values. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MAC_PADDING_DUR = 20, + + /* 8-bit unsigned value to override the MU EDCA params to defaults + * regardless of the AP beacon MU EDCA params. If it is enabled use + * the default values else use the MU EDCA params from AP beacon. + * This attribute is used to configure the testbed device. + * 1 - enable override, 0 - disable. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_OVERRIDE_MU_EDCA = 21, + + /* 8-bit unsigned value to configure the support for receiving + * an MPDU that contains an operating mode control subfield. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_OM_CTRL_SUPP = 22, + + /* Nested attribute values required to setup the TWT session. + * enum qca_wlan_vendor_attr_twt_setup provides the necessary + * information to set up the session. It contains broadcast flags, + * set_up flags, trigger value, flow type, flow ID, wake interval + * exponent, protection, target wake time, wake duration, wake interval + * mantissa. These nested attributes are used to setup a host triggered + * TWT session. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_TWT_SETUP = 23, + + /* This nested attribute is used to terminate the current TWT session. + * It does not currently carry any attributes. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_TWT_TERMINATE = 24, + + /* This nested attribute is used to suspend the current TWT session. + * It does not currently carry any attributes. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_TWT_SUSPEND = 25, + + /* Nested attribute values to indicate the request for resume. + * This attribute is used to resume the TWT session. + * enum qca_wlan_vendor_attr_twt_resume provides the necessary + * parameters required to resume the TWT session. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_TWT_RESUME = 26, + + /* 8-bit unsigned value to set the HE operating mode control + * (OM CTRL) Channel Width subfield. + * The Channel Width subfield indicates the operating channel width + * supported by the STA for both reception and transmission. + * Uses the enum qca_wlan_he_om_ctrl_ch_bw values. + * This setting is cleared with the + * QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_CLEAR_HE_OM_CTRL_CONFIG + * flag attribute to reset defaults. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_OM_CTRL_BW = 27, + + /* 8-bit unsigned value to configure the number of spatial + * streams in HE operating mode control field. + * This setting is cleared with the + * QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_CLEAR_HE_OM_CTRL_CONFIG + * flag attribute to reset defaults. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_OM_CTRL_NSS = 28, + + /* Flag attribute to configure the UL MU disable bit in + * HE operating mode control field. + * This setting is cleared with the + * QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_CLEAR_HE_OM_CTRL_CONFIG + * flag attribute to reset defaults. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_OM_CTRL_UL_MU_DISABLE = 29, + + /* Flag attribute to clear the previously set HE operating mode + * control field configuration. + * This attribute is used to configure the testbed device to reset + * defaults to clear any previously set HE operating mode control + * field configuration. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_CLEAR_HE_OM_CTRL_CONFIG = 30, + + /* 8-bit unsigned value to configure HE single user PPDU + * transmission. By default this setting is disabled and it + * is disabled in the reset defaults of the device configuration. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_TX_SUPPDU = 31, + + /* 8-bit unsigned value to configure action frame transmission + * in HE trigger based PPDU transmission. + * By default this setting is disabled and it is disabled in + * the reset defaults of the device configuration. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_ACTION_TX_TB_PPDU = 32, + + /* Nested attribute to indicate HE operating mode control field + * transmission. It contains operating mode control field Nss, + * channel bandwidth, Tx Nsts and UL MU disable attributes. + * These nested attributes are used to send HE operating mode control + * with configured values. + * Uses the enum qca_wlan_vendor_attr_he_omi_tx attributes. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_OMI_TX = 33, + + /* 8-bit unsigned value to configure +HTC_HE support to indicate the + * support for the reception of a frame that carries an HE variant + * HT Control field. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_HTC_HE_SUPP = 34, + + /* 8-bit unsigned value to configure VHT support in 2.4G band. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ENABLE_2G_VHT = 35, + + /* 8-bit unsigned value to configure HE testbed defaults. + * This attribute is used to configure the testbed device. + * 1-set the device HE capabilities to testbed defaults. + * 0-reset the device HE capabilities to supported config. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_SET_HE_TESTBED_DEFAULTS = 36, + + /* 8-bit unsigned value to configure twt request support. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_TWT_REQ_SUPPORT = 37, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_twt_operation - Operation of the config TWT request + * Values for %QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_OPERATION. + * + * @QCA_WLAN_TWT_SET: Setup a TWT session. Required parameters are configured + * through QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_PARAMS. Refers the enum + * qca_wlan_vendor_attr_twt_setup. + * + * @QCA_WLAN_TWT_GET: Get the configured TWT parameters. Required parameters are + * obtained through QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_PARAMS. Refers the enum + * qca_wlan_vendor_attr_twt_setup. + * + * @QCA_WLAN_TWT_TERMINATE: Terminate the TWT session. Does not carry any + * parameters. Valid only after the TWT session is setup. + * + * @QCA_WLAN_TWT_SUSPEND: Terminate the TWT session. Does not carry any + * parameters. Valid only after the TWT session is setup. + * + * @QCA_WLAN_TWT_RESUME: Resume the TWT session. Required parameters are + * configured through QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_PARAMS. Refers the enum + * qca_wlan_vendor_attr_twt_resume. + */ +enum qca_wlan_twt_operation { + QCA_WLAN_TWT_SET = 0, + QCA_WLAN_TWT_GET = 1, + QCA_WLAN_TWT_TERMINATE = 2, + QCA_WLAN_TWT_SUSPEND = 3, + QCA_WLAN_TWT_RESUME = 4, +}; + +/* enum qca_wlan_vendor_attr_config_twt: Defines attributes used by + * %QCA_NL80211_VENDOR_SUBCMD_CONFIG_TWT + * + * @QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_OPERATION: + * u8 attribute. Specify the TWT operation of this request. Possible values + * are defined in enum qca_wlan_twt_operation. The parameters for the + * respective operation is specified through + * QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_PARAMS. + * + * @QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_PARAMS: Nested attribute representing the + * parameters configured for TWT. These parameters are represented by + * enum qca_wlan_vendor_attr_twt_setup or enum qca_wlan_vendor_attr_twt_resume + * based on the operation. + */ +enum qca_wlan_vendor_attr_config_twt { + QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_OPERATION = 1, + QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_PARAMS = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_MAX = + QCA_WLAN_VENDOR_ATTR_CONFIG_TWT_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_twt_setup: Represents attributes for + * TWT (Target Wake Time) setup request. These attributes are sent as part of + * %QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_TWT_SETUP and + * %QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION. Also used by + * attributes through %QCA_NL80211_VENDOR_SUBCMD_CONFIG_TWT. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_BCAST: Flag attribute. + * Disable (flag attribute not present) - Individual TWT + * Enable (flag attribute present) - Broadcast TWT. + * Individual means the session is between the STA and the AP. + * This session is established using a separate negotiation between + * STA and AP. + * Broadcast means the session is across multiple STAs and an AP. The + * configuration parameters are announced in Beacon frames by the AP. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_REQ_TYPE: Required (u8). + * Unsigned 8-bit qca_wlan_vendor_twt_setup_req_type to + * specify the TWT request type + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_TRIGGER: Flag attribute + * Enable (flag attribute present) - TWT with trigger support. + * Disable (flag attribute not present) - TWT without trigger support. + * Trigger means the AP will send the trigger frame to allow STA to send data. + * Without trigger, the STA will wait for the MU EDCA timer before + * transmitting the data. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_FLOW_TYPE: Required (u8) + * 0 - Announced TWT - In this mode, STA may skip few service periods to + * save more power. If STA wants to wake up, it will send a PS-POLL/QoS + * NULL frame to AP. + * 1 - Unannounced TWT - The STA will wakeup during every SP. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_FLOW_ID: Optional (u8) + * Flow ID is the unique identifier for each TWT session. + * Currently this is not required and dialog ID will be set to zero. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_INTVL_EXP: Required (u8) + * This attribute (exp) is used along with the mantissa to derive the + * wake interval using the following formula: + * pow(2,exp) = wake_intvl_us/wake_intvl_mantis + * Wake interval is the interval between 2 successive SP. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_PROTECTION: Flag attribute + * Enable (flag attribute present) - Protection required. + * Disable (flag attribute not present) - Protection not required. + * If protection is enabled, then the AP will use protection + * mechanism using RTS/CTS to self to reserve the airtime. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_TIME: Optional (u32) + * This attribute is used as the SP offset which is the offset from + * TSF after which the wake happens. The units are in microseconds. If + * this attribute is not provided, then the value will be set to zero. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_DURATION: Required (u32) + * This is the duration of the service period. The units are in TU. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_INTVL_MANTISSA: Required (u32) + * This attribute is used to configure wake interval mantissa. + * The units are in TU. + */ +enum qca_wlan_vendor_attr_twt_setup { + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_BCAST = 1, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_REQ_TYPE = 2, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_TRIGGER = 3, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_FLOW_TYPE = 4, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_FLOW_ID = 5, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_INTVL_EXP = 6, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_PROTECTION = 7, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_TIME = 8, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_DURATION = 9, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_WAKE_INTVL_MANTISSA = 10, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_MAX = + QCA_WLAN_VENDOR_ATTR_TWT_SETUP_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_twt_resume: Represents attributes for + * TWT (Target Wake Time) resume request. These attributes are sent as part of + * %QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_TWT_RESUME and + * %QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION. Also used by + * attributes through %QCA_NL80211_VENDOR_SUBCMD_CONFIG_TWT. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_RESUME_NEXT_TWT: Optional (u8) + * This attribute is used as the SP offset which is the offset from + * TSF after which the wake happens. The units are in microseconds. + * If this attribute is not provided, then the value will be set to + * zero. + * + * @QCA_WLAN_VENDOR_ATTR_TWT_RESUME_NEXT_TWT_SIZE: Required (u32) + * This attribute represents the next TWT subfield size. + */ +enum qca_wlan_vendor_attr_twt_resume { + QCA_WLAN_VENDOR_ATTR_TWT_RESUME_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TWT_RESUME_NEXT_TWT = 1, + QCA_WLAN_VENDOR_ATTR_TWT_RESUME_NEXT_TWT_SIZE = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TWT_RESUME_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TWT_RESUME_MAX = + QCA_WLAN_VENDOR_ATTR_TWT_RESUME_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_twt_setup_req_type - Required (u8) + * Represents the setup type being requested for TWT. + * @QCA_WLAN_VENDOR_TWT_SETUP_REQUEST: STA is not specifying all the TWT + * parameters but relying on AP to fill the parameters during the negotiation. + * @QCA_WLAN_VENDOR_TWT_SETUP_SUGGEST: STA will provide all the suggested + * values which the AP may accept or AP may provide alternative parameters + * which the STA may accept. + * @QCA_WLAN_VENDOR_TWT_SETUP_DEMAND: STA is not willing to accept any + * alternate parameters than the requested ones. + */ +enum qca_wlan_vendor_twt_setup_req_type { + QCA_WLAN_VENDOR_TWT_SETUP_REQUEST = 1, + QCA_WLAN_VENDOR_TWT_SETUP_SUGGEST = 2, + QCA_WLAN_VENDOR_TWT_SETUP_DEMAND = 3, +}; + +/** + * enum qca_wlan_throughput_level - Current throughput level + * + * Indicates the current level of throughput calculated by driver. The driver + * may choose different thresholds to decide whether the throughput level is + * low or medium or high based on variety of parameters like physical link + * capacity of current connection, number of pakcets being dispatched per + * second etc. The throughput level events might not be consistent with the + * actual current throughput value being observed. + * + * @QCA_WLAN_THROUGHPUT_LEVEL_LOW: Low level of throughput + * @QCA_WLAN_THROUGHPUT_LEVEL_MEDIUM: Medium level of throughput + * @QCA_WLAN_THROUGHPUT_LEVEL_HIGH: High level of throughput + */ +enum qca_wlan_throughput_level { + QCA_WLAN_THROUGHPUT_LEVEL_LOW = 0, + QCA_WLAN_THROUGHPUT_LEVEL_MEDIUM = 1, + QCA_WLAN_THROUGHPUT_LEVEL_HIGH = 2, +}; + +/** + * enum qca_wlan_vendor_attr_throughput_change - Vendor subcmd attributes to + * report throughput changes from driver to user space. enum values are used + * for NL attributes sent with + * %QCA_NL80211_VENDOR_SUBCMD_THROUGHPUT_CHANGE_EVENT sub command. + */ +enum qca_wlan_vendor_attr_throughput_change { + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_INVALID = 0, + /* + * Indicates the direction of throughput in which the change is being + * reported. u8 attribute. Value is 0 for TX and 1 for RX. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_DIRECTION = 1, + + /* + * Indicates the newly observed throughput level. + * qca_wlan_throughput_level describes the possible range of values. + * u8 attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_THROUGHPUT_LEVEL = 2, + + /* + * Indicates the driver's guidance on the new value to be set to + * kernel's tcp parameter tcp_limit_output_bytes. u32 attribute. Driver + * may optionally include this attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_TCP_LIMIT_OUTPUT_BYTES = 3, + + /* + * Indicates the driver's guidance on the new value to be set to + * kernel's tcp parameter tcp_adv_win_scale. s8 attribute. Possible + * values are from -31 to 31. Driver may optionally include this + * attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_TCP_ADV_WIN_SCALE = 4, + + /* + * Indicates the driver's guidance on the new value to be set to + * kernel's tcp parameter tcp_delack_seg. u32 attribute. Driver may + * optionally include this attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_TCP_DELACK_SEG = 5, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_MAX = + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_AFTER_LAST - 1, +}; + +/* enum qca_wlan_nan_subcmd_type - Type of NAN command used by attribute + * QCA_WLAN_VENDOR_ATTR_NAN_SUBCMD_TYPE as a part of vendor command + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT. + */ +enum qca_wlan_nan_ext_subcmd_type { + /* Subcmd of type NAN Enable Request */ + QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ = 1, + /* Subcmd of type NAN Disable Request */ + QCA_WLAN_NAN_EXT_SUBCMD_TYPE_DISABLE_REQ = 2, +}; + +/** + * enum qca_wlan_vendor_attr_nan_params - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT. + */ +enum qca_wlan_vendor_attr_nan_params { + QCA_WLAN_VENDOR_ATTR_NAN_INVALID = 0, + /* Carries NAN command for firmware component. Every vendor command + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT must contain this attribute with a + * payload containing the NAN command. NLA_BINARY attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_CMD_DATA = 1, + /* Indicates the type of NAN command sent with + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT. enum qca_wlan_nan_ext_subcmd_type + * describes the possible range of values. This attribute is mandatory + * if the command being issued is either + * QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ or + * QCA_WLAN_NAN_EXT_SUBCMD_TYPE_DISABLE_REQ. NLA_U32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_SUBCMD_TYPE = 2, + /* Frequency (in MHz) of primary NAN discovery social channel in 2.4 GHz + * band. This attribute is mandatory when command type is + * QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ. NLA_U32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_DISC_24GHZ_BAND_FREQ = 3, + /* Frequency (in MHz) of secondary NAN discovery social channel in 5 GHz + * band. This attribute is optional and should be included when command + * type is QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ and NAN discovery + * has to be started on 5GHz along with 2.4GHz. NLA_U32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_DISC_5GHZ_BAND_FREQ = 4, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_NAN_PARAMS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_NAN_PARAMS_MAX = + QCA_WLAN_VENDOR_ATTR_NAN_PARAMS_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_cfr_method - QCA vendor CFR methods used by + * attribute QCA_WLAN_VENDOR_ATTR_PEER_CFR_METHOD as part of vendor + * command QCA_NL80211_VENDOR_SUBCMD_PEER_CFR_CAPTURE_CFG. + * @QCA_WLAN_VENDOR_CFR_METHOD_QOS_NULL: CFR method using QOS Null frame. + * @QCA_WLAN_VENDOR_CFR_QOS_NULL_WITH_PHASE: CFR method using QOS Null frame + * with phase + * @QCA_WLAN_VENDOR_CFR_PROBE_RESPONSE: CFR method using probe response frame + */ +enum qca_wlan_vendor_cfr_method { + QCA_WLAN_VENDOR_CFR_METHOD_QOS_NULL = 0, + QCA_WLAN_VENDOR_CFR_QOS_NULL_WITH_PHASE = 1, + QCA_WLAN_VENDOR_CFR_PROBE_RESPONSE = 2, +}; + +/** + * enum qca_wlan_vendor_cfr_capture_type - QCA vendor CFR capture type used by + * attribute QCA_WLAN_VENDOR_ATTR_PEER_CFR_CAPTURE_TYPE. + * @QCA_WLAN_VENDOR_CFR_DIRECT_FTM: Filter directed FTM ACK frames. + * @QCA_WLAN_VENDOR_CFR_ALL_FTM_ACK: Filter all FTM ACK frames. + * @QCA_WLAN_VENDOR_CFR_DIRECT_NDPA_NDP: Filter NDPA NDP directed frames. + * @QCA_WLAN_VENDOR_CFR_TA_RA: Filter frames based on TA/RA/Subtype which + * is provided by one or more of below attributes: + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA_MASK + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA_MASK + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_MGMT_FILTER + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_CTRL_FILTER + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_DATA_FILTER + * @QCA_WLAN_CFR_ALL_PACKET: Filter all packets. + * @QCA_WLAN_VENDOR_CFR_NDPA_NDP_ALL: Filter all NDPA NDP frames. + */ +enum qca_wlan_vendor_cfr_capture_type { + QCA_WLAN_VENDOR_CFR_DIRECT_FTM = 0, + QCA_WLAN_VENDOR_CFR_ALL_FTM_ACK = 1, + QCA_WLAN_VENDOR_CFR_DIRECT_NDPA_NDP = 2, + QCA_WLAN_VENDOR_CFR_TA_RA = 3, + QCA_WLAN_VENDOR_CFR_ALL_PACKET = 4, + QCA_WLAN_VENDOR_CFR_NDPA_NDP_ALL = 5, +}; + +/** + * enum qca_wlan_vendor_peer_cfr_capture_attr - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_PEER_CFR_CAPTURE_CFG to configure peer + * Channel Frequency Response capture parameters and enable periodic CFR + * capture. + * + * @QCA_WLAN_VENDOR_ATTR_CFR_PEER_MAC_ADDR: Optional (6-byte MAC address) + * MAC address of peer. This is for CFR version 1 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_ENABLE: Required (flag) + * Enable peer CFR Capture. This attribute is mandatory to + * enable peer CFR capture. If this attribute is not present, + * peer CFR capture is disabled. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_BANDWIDTH: Optional (u8) + * BW of measurement, attribute uses the values in enum nl80211_chan_width + * Supported values: 20, 40, 80, 80+80, 160. + * Note that all targets may not support all bandwidths. + * This attribute is mandatory for version 1 if attribute + * QCA_WLAN_VENDOR_ATTR_PEER_CFR_ENABLE is used. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_PERIODICITY: Optional (u32) + * Periodicity of CFR measurement in msec. + * Periodicity should be a multiple of Base timer. + * Current Base timer value supported is 10 msecs (default). + * 0 for one shot capture. + * This attribute is mandatory for version 1 if attribute + * QCA_WLAN_VENDOR_ATTR_PEER_CFR_ENABLE is used. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_METHOD: Optional (u8) + * Method used to capture Channel Frequency Response. + * Attribute uses the values defined in enum qca_wlan_vendor_cfr_method. + * This attribute is mandatory for version 1 if attribute + * QCA_WLAN_VENDOR_ATTR_PEER_CFR_ENABLE is used. + * + * @QCA_WLAN_VENDOR_ATTR_PERIODIC_CFR_CAPTURE_ENABLE: Optional (flag) + * Enable periodic CFR capture. + * This attribute is mandatory for version 1 to enable Periodic CFR capture. + * If this attribute is not present, periodic CFR capture is disabled. + * + * @QCA_WLAN_VENDOR_ATTR_CFR_VERSION: Optional (u8) + * Value is 1 or 2 since there are two versions of CFR capture. Two versions + * can't be enabled at same time. This attribute is mandatory if target + * support both versions and use one of them. + * + * @QCA_WLAN_VENDOR_ATTR_CFR_ENABLE_GROUP_BITMAP: Optional (u32) + * This attribute is mandatory for version 2 if + * QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_ENTRY is used. + * Bits 15:0 Bit fields indicating which group to be enabled. + * Bits 31:16 Reserved for future use. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_DURATION: Optional (u32) + * CFR capture duration in microsecond. This attribute is mandatory for + * version 2 if attribute QCA_WLAN_VENDOR_ATTR_PEER_CFR_INTERVAL is used. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_INTERVAL: Optional (u32) + * CFR capture interval in microsecond. This attribute is mandatory for + * version 2 if attribute QCA_WLAN_VENDOR_ATTR_PEER_CFR_DURATION is used. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_CAPTURE_TYPE: Optional (u32) + * CFR capture type is defined in enum qca_wlan_vendor_cfr_capture_type. + * This attribute is mandatory for version 2. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_UL_MU_MASK: Optional (u64) + * Bit fields indicating which user in the current UL MU + * transmissions are enabled for CFR capture. Bits 36 to 0 indicating + * user indexes for 37 users in a UL MU transmission. If bit 0 is set, + * then the CFR capture will happen for user index 0 in the current + * UL MU transmission. If bits 0,2 are set, then CFR capture for UL MU + * TX corresponds to user indices 0 and 2. Bits 63:37 Reserved for future use. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_FREEZE_TLV_DELAY_COUNT: Optional (u32) + * Indicates the number of consecutive Rx packets to be skipped + * before CFR capture is enabled again. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TABLE: Nested attribute containing + * one or more %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_ENTRY attributes. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_ENTRY: Nested attribute containing + * the following GROUP attributes: + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_NUMBER, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA_MASK, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA_MASK, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_NSS, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_BW, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_MGMT_FILTER, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_CTRL_FILTER, + * %QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_DATA_FILTER + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_NUMBER: Optional (u32) + * Target support multiple groups for some configurations. Group number could be + * any value between 0 and 15. This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA: Optional (6-byte MAC address) + * Transmitter address which is used to filter packets, this MAC address takes + * effect with QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA_MASK. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA: Optional (6-byte MAC address) + * Receiver address which is used to filter packets, this MAC address takes + * effect with QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA_MASK. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA_MASK: Optional (6-byte MAC address) + * Mask of transmitter address which is used to filter packets. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA_MASK: Optional (6-byte MAC address) + * Mask of receiver address which is used to filter packets. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_NSS: Optional (u32) + * Indicates packets with a specific NSS will be filtered for CFR capture. + * This is for CFR version 2 only. This is a bitmask. Bits 7:0, CFR capture will + * be done for packets matching the NSS specified within this bitmask. + * Bits 31:8 Reserved for future use. Bits 7:0 map to NSS: + * bit 0 : NSS 1 + * bit 1 : NSS 2 + * ... + * bit 7 : NSS 8 + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_BW: Optional (u32) + * Indicates packets with a specific BW will be filtered for CFR capture. + * This is for CFR version 2 only. This is a bitmask. Bits 4:0, CFR capture + * will be done for packets matching the bandwidths specified within this + * bitmask. Bits 31:5 Reserved for future use. Bits 4:0 map to bandwidth + * numerated in enum nl80211_band (although not all bands may be supported + * by a given device). + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_MGMT_FILTER: Optional (u32) + * Management packets matching the subtype filter categories will be + * filtered in by MAC for CFR capture. This is a bitmask, in which each bit + * represents the corresponding mgmt subtype value as per + * IEEE 802.11(2016) 9.2.4.1.3 Type and Subtype subfields. + * For example, beacon frame control type is 8, its value is 1<<8 = 0x100. + * This is for CFR version 2 only + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_CTRL_FILTER: Optional (u32) + * Control packets matching the subtype filter categories will be + * filtered in by MAC for CFR capture. This is a bitmask, in which each bit + * represents the corresponding control subtype value as per + * IEEE 802.11(2016) 9.2.4.1.3 Type and Subtype subfields. + * This is for CFR version 2 only. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_DATA_FILTER: Optional (u32) + * Data packets matching the subtype filter categories will be + * filtered in by MAC for CFR capture. This is a bitmask, in which each bit + * represents the corresponding data subtype value as per + * IEEE 802.11(2016) 9.2.4.1.3 Type and Subtype subfields. + * This is for CFR version 2 only. + * + */ +enum qca_wlan_vendor_peer_cfr_capture_attr { + QCA_WLAN_VENDOR_ATTR_PEER_CFR_CAPTURE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_CFR_PEER_MAC_ADDR = 1, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_ENABLE = 2, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_BANDWIDTH = 3, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_PERIODICITY = 4, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_METHOD = 5, + QCA_WLAN_VENDOR_ATTR_PERIODIC_CFR_CAPTURE_ENABLE = 6, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_VERSION = 7, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_ENABLE_GROUP_BITMAP = 8, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_DURATION = 9, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_INTERVAL = 10, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_CAPTURE_TYPE = 11, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_UL_MU_MASK = 12, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_FREEZE_TLV_DELAY_COUNT = 13, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TABLE = 14, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_ENTRY = 15, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_NUMBER = 16, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA = 17, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA = 18, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_TA_MASK = 19, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_RA_MASK = 20, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_NSS = 21, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_BW = 22, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_MGMT_FILTER = 23, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_CTRL_FILTER = 24, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_GROUP_DATA_FILTER = 25, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_PEER_CFR_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PEER_CFR_MAX = + QCA_WLAN_VENDOR_ATTR_PEER_CFR_AFTER_LAST - 1, +}; + +/** + * enum qca_coex_config_profiles - This enum defines different types of + * traffic streams that can be prioritized one over the other during coex + * scenarios. + * The types defined in this enum are categorized in the below manner. + * 0 - 31 values corresponds to WLAN + * 32 - 63 values corresponds to BT + * 64 - 95 values corresponds to Zigbee + * @QCA_WIFI_STA_DISCOVERY: Prioritize discovery frames for WLAN STA + * @QCA_WIFI_STA_CONNECTION: Prioritize connection frames for WLAN STA + * @QCA_WIFI_STA_CLASS_3_MGMT: Prioritize class 3 mgmt frames for WLAN STA + * @QCA_WIFI_STA_DATA : Prioritize data frames for WLAN STA + * @QCA_WIFI_STA_ALL: Priritize all frames for WLAN STA + * @QCA_WIFI_SAP_DISCOVERY: Prioritize discovery frames for WLAN SAP + * @QCA_WIFI_SAP_CONNECTION: Prioritize connection frames for WLAN SAP + * @QCA_WIFI_SAP_CLASS_3_MGMT: Prioritize class 3 mgmt frames for WLAN SAP + * @QCA_WIFI_SAP_DATA: Prioritize data frames for WLAN SAP + * @QCA_WIFI_SAP_ALL: Prioritize all frames for WLAN SAP + * @QCA_BT_A2DP: Prioritize BT A2DP + * @QCA_BT_BLE: Prioritize BT BLE + * @QCA_BT_SCO: Prioritize BT SCO + * @QCA_ZB_LOW: Prioritize Zigbee Low + * @QCA_ZB_HIGH: Prioritize Zigbee High + */ +enum qca_coex_config_profiles { + /* 0 - 31 corresponds to WLAN */ + QCA_WIFI_STA_DISCOVERY = 0, + QCA_WIFI_STA_CONNECTION = 1, + QCA_WIFI_STA_CLASS_3_MGMT = 2, + QCA_WIFI_STA_DATA = 3, + QCA_WIFI_STA_ALL = 4, + QCA_WIFI_SAP_DISCOVERY = 5, + QCA_WIFI_SAP_CONNECTION = 6, + QCA_WIFI_SAP_CLASS_3_MGMT = 7, + QCA_WIFI_SAP_DATA = 8, + QCA_WIFI_SAP_ALL = 9, + QCA_WIFI_CASE_MAX = 31, + /* 32 - 63 corresponds to BT */ + QCA_BT_A2DP = 32, + QCA_BT_BLE = 33, + QCA_BT_SCO = 34, + QCA_BT_CASE_MAX = 63, + /* 64 - 95 corresponds to Zigbee */ + QCA_ZB_LOW = 64, + QCA_ZB_HIGH = 65, + QCA_ZB_CASE_MAX = 95, + /* 0xff is default value if the u8 profile value is not set. */ + QCA_PROFILE_DEFAULT_VALUE = 255 +}; + +/** + * enum qca_vendor_attr_coex_config_types - Coex configurations types. + * This enum defines the valid set of values of coex configuration types. These + * values may used by attribute + * %QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_CONFIG_TYPE. + * + * @QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_RESET: Reset all the + * weights to default values. + * @QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_START: Start to config + * weights with configurability value. + */ +enum qca_vendor_attr_coex_config_types { + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_RESET = 1, + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_START = 2, + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_TYPE_MAX +}; + +/** + * enum qca_vendor_attr_coex_config_three_way - Specifies vendor coex config + * attributes + * Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_COEX_CONFIG + * + * QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_CONFIG_TYPE: u32 attribute. + * Indicate config type. + * the config types are 32-bit values from qca_vendor_attr_coex_config_types + * + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_1: u32 attribute. + * Indicate the Priority 1 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_2: u32 attribute. + * Indicate the Priority 2 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_3: u32 attribute. + * Indicate the Priority 3 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_4: u32 attribute. + * Indicate the Priority 4 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * NOTE: + * limitations for QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_x priority + * arrangement: + * 1: In the same u32 attribute(priority x), the profiles enum values own + * same priority level. + * 2: 0xff is default value if the u8 profile value is not set. + * 3: max to 4 rules/profiles in same priority level. + * 4: max to 4 priority level (priority 1 - priority 4) + * 5: one priority level only supports one scenario from WLAN/BT/ZB, + * hybrid rules not support. + * 6: if WMI_COEX_CONFIG_THREE_WAY_COEX_RESET called, priority x will + * remain blank to reset all parameters. + * For example: + * + * If the attributes as follow: + * priority 1: + * ------------------------------------ + * | 0xff | 0 | 1 | 2 | + * ------------------------------------ + * priority 2: + * ------------------------------------- + * | 0xff | 0xff | 0xff | 32 | + * ------------------------------------- + * priority 3: + * ------------------------------------- + * | 0xff | 0xff | 0xff | 65 | + * ------------------------------------- + * then it means: + * 1: WIFI_STA_DISCOVERY, WIFI_STA_CLASS_3_MGMT and WIFI_STA_CONNECTION + * owns same priority level. + * 2: WIFI_STA_DISCOVERY, WIFI_STA_CLASS_3_MGMT and WIFI_STA_CONNECTION + * has priority over BT_A2DP and ZB_HIGH. + * 3: BT_A2DP has priority over ZB_HIGH. + */ + +enum qca_vendor_attr_coex_config_three_way { + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_INVALID = 0, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_CONFIG_TYPE = 1, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_1 = 2, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_2 = 3, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_3 = 4, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_4 = 5, + + /* Keep last */ + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_AFTER_LAST, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_MAX = + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_peer_stats_cache_type - Represents peer stats cache type + * This enum defines the valid set of values of peer stats cache types. These + * values are used by attribute + * %QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE_INVALID. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_TX_RATE_STATS: Represents peer tx rate statistics. + * @QCA_WLAN_VENDOR_ATTR_PEER_RX_RATE_STATS: Represents peer rx rate statistics. + * @QCA_WLAN_VENDOR_ATTR_PEER_TX_SOJOURN_STATS: Represents peer tx sojourn + * statistics + */ +enum qca_vendor_attr_peer_stats_cache_type { + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_PEER_TX_RATE_STATS, + QCA_WLAN_VENDOR_ATTR_PEER_RX_RATE_STATS, + QCA_WLAN_VENDOR_ATTR_PEER_TX_SOJOURN_STATS, +}; + +/** + * enum qca_wlan_vendor_attr_peer_stats_cache_params - This enum defines + * attributes required for QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH + * Attributes are required to flush peer rate statistics from driver to + * user application. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE: Unsigned 32-bit attribute + * Indicate peer stats cache type. + * The statistics types are 32-bit values from + * qca_vendor_attr_peer_stats_cache_type + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_MAC: Unsigned 8-bit array + * of size 6, representing peer mac address. + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_DATA: Opaque data attribute + * containing buffer of statistics to send event to application layer entity. + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_COOKIE: Unsigned 64-bit attribute + * representing cookie for peer unique session. + */ +enum qca_wlan_vendor_attr_peer_stats_cache_params { + QCA_WLAN_VENDOR_ATTR_PEER_STATS_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE = 1, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_MAC = 2, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_DATA = 3, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_COOKIE = 4, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_LAST, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_MAX = + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_LAST - 1 +}; + +/** + * enum qca_mpta_helper_attr_zigbee_state - current states of zegbee. + * this enum defines all the possible state of zigbee, which can be + * delivered by NetLink attribute QCA_MPTA_HELPER_VENDOR_ATTR_ZIGBEE_STATE. + * + * @ZIGBEE_IDLE: zigbee in idle state + * @ZIGBEE_FORM_NETWORK: zibee forming network + * @ZIGBEE_WAIT_JOIN: zigbee waiting for joining network + * @ZIGBEE_JOIN: zigbee joining network + * @ZIGBEE_NETWORK_UP: zigbee network is up + * @ZIGBEE_HMI: zigbee in HMI mode + */ +enum qca_mpta_helper_attr_zigbee_state { + ZIGBEE_IDLE = 0, + ZIGBEE_FORM_NETWORK = 1, + ZIGBEE_WAIT_JOIN = 2, + ZIGBEE_JOIN = 3, + ZIGBEE_NETWORK_UP = 4, + ZIGBEE_HMI = 5, +}; + +/** + * enum qca_mpta_helper_vendor_attr - used for NL attributes sent by + * vendor sub-command QCA_NL80211_VENDOR_SUBCMD_MPTA_HELPER_CONFIG. + */ +enum qca_mpta_helper_vendor_attr { + QCA_MPTA_HELPER_VENDOR_ATTR_INVALID = 0, + /* Optional attribute used to update zigbee state. + * enum qca_mpta_helper_attr_zigbee_state. + * NLA_U32 attribute. + */ + QCA_MPTA_HELPER_VENDOR_ATTR_ZIGBEE_STATE = 1, + /* Optional attribute used to configure wlan duration for Shape-OCS + * during interrupt. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_INT_NON_WLAN_DURATION. + * Value range 0 ~ 300 (ms). + * NLA_U32 attribute. + */ + QCA_MPTA_HELPER_VENDOR_ATTR_INT_WLAN_DURATION = 2, + /* Optional attribute used to configure non wlan duration for Shape-OCS + * during interrupt. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_INT_WLAN_DURATION. + * Value range 0 ~ 300 (ms). + * NLA_U32 attribute. + */ + QCA_MPTA_HELPER_VENDOR_ATTR_INT_NON_WLAN_DURATION = 3, + /* Optional attribute used to configure wlan duration for Shape-OCS + * monitor period. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_MON_NON_WLAN_DURATION. + * Value range 0 ~ 300 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_MON_WLAN_DURATION = 4, + /* Optional attribute used to configure non wlan duration for Shape-OCS + * monitor period. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_MON_WLAN_DURATION. + * Value range 0 ~ 300 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_MON_NON_WLAN_DURATION = 5, + /* Optional attribute used to configure ocs interrupt duration. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_MON_OCS_DURATION. + * Value range 1000 ~ 20000 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_INT_OCS_DURATION = 6, + /* Optional attribute used to configure ocs monitor duration. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_INT_OCS_DURATION. + * Value range 1000 ~ 20000 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_MON_OCS_DURATION = 7, + /* Optional attribute used to notify wlan FW current zigbee channel. + * Value range 11 ~ 26 + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_ZIGBEE_CHAN = 8, + /* Optional attribute used to configure wlan mute duration. + * Value range 0 ~ 400 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_WLAN_MUTE_DURATION = 9, + + /* keep last */ + QCA_MPTA_HELPER_VENDOR_ATTR_AFTER_LAST, + QCA_MPTA_HELPER_VENDOR_ATTR_MAX = + QCA_MPTA_HELPER_VENDOR_ATTR_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_beacon_reporting_op_types - Defines different types of + * operations for which %QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING can be + * used. Will be used by %QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE. + * + * @QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START: Sent by userspace to the driver + * to request the driver to start reporting Beacon frames. + * @QCA_WLAN_VENDOR_BEACON_REPORTING_OP_STOP: Sent by userspace to the driver + * to request the driver to stop reporting Beacon frames. + * @QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO: Sent by the driver to + * userspace to report received Beacon frames. + * @QCA_WLAN_VENDOR_BEACON_REPORTING_OP_PAUSE: Sent by the driver to userspace + * to indicate that the driver is going to pause reporting Beacon frames. + */ +enum qca_wlan_vendor_beacon_reporting_op_types { + QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START = 0, + QCA_WLAN_VENDOR_BEACON_REPORTING_OP_STOP = 1, + QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO = 2, + QCA_WLAN_VENDOR_BEACON_REPORTING_OP_PAUSE = 3, +}; + +/** + * enum qca_wlan_vendor_beacon_reporting_pause_reasons - Defines different + * types of reasons for which the driver is pausing reporting Beacon frames. + * Will be used by %QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_PAUSE_REASON. + * + * @QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_UNSPECIFIED: For unspecified + * reasons. + * @QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_SCAN_STARTED: When the + * driver/firmware is starting a scan. + * @QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_DISCONNECTED: When the + * driver/firmware disconnects from the ESS and indicates the disconnection to + * userspace (non-seamless roaming case). This reason code will be used by the + * driver/firmware to indicate stopping of beacon report events. Userspace + * will need to start beacon reporting again (if desired) by sending vendor + * command QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING with + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START after the next connection is + * completed. + */ +enum qca_wlan_vendor_beacon_reporting_pause_reasons { + QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_UNSPECIFIED = 0, + QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_SCAN_STARTED = 1, + QCA_WLAN_VENDOR_BEACON_REPORTING_PAUSE_REASON_DISCONNECTED = 2, +}; + +/* + * enum qca_wlan_vendor_attr_beacon_reporting_params - List of attributes used + * in vendor sub-command QCA_NL80211_VENDOR_SUBCMD_BEACON_REPORTING. + */ +enum qca_wlan_vendor_attr_beacon_reporting_params { + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_INVALID = 0, + /* Specifies the type of operation that the vendor command/event is + * intended for. Possible values for this attribute are defined in + * enum qca_wlan_vendor_beacon_reporting_op_types. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE = 1, + /* Optionally set by userspace to request the driver to report Beacon + * frames using asynchronous vendor events when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START. NLA_FLAG attribute. + * If this flag is not set, the driver will only update Beacon frames + * in cfg80211 scan cache but not send any vendor events. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_ACTIVE_REPORTING = 2, + /* Optionally used by userspace to request the driver/firmware to + * report Beacon frames periodically when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START. + * u32 attribute, indicates the period of Beacon frames to be reported + * and in the units of beacon interval. + * If this attribute is missing in the command, then the default value + * of 1 will be assumed by driver, i.e., to report every Beacon frame. + * Zero is an invalid value. + * If a valid value is received for this attribute, the driver will + * update the cfg80211 scan cache periodically as per the value + * received in this attribute in addition to updating the cfg80211 scan + * cache when there is significant change in Beacon frame IEs. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_PERIOD = 3, + /* Used by the driver to encapsulate the SSID when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. + * u8 array with a maximum size of 32. + * + * When generating beacon report from non-MBSSID Beacon frame, the SSID + * will be taken from the SSID element of the received Beacon frame. + * + * When generating beacon report from Multiple BSSID Beacon frame and + * if the BSSID of the current connected BSS matches the BSSID of the + * transmitting BSS, the SSID will be taken from the SSID element of + * the received Beacon frame. + * + * When generating beacon report from Multiple BSSID Beacon frame and + * if the BSSID of the current connected BSS matches the BSSID of one + * of the* nontransmitting BSSs, the SSID will be taken from the SSID + * field included in the nontransmitted BSS profile whose derived BSSI + * is same as the BSSID of the current connected BSS. When there is no + * nontransmitted BSS profile whose derived BSSID is same as the BSSID + * of current connected* BSS, this attribute will not be present. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_SSID = 4, + /* Used by the driver to encapsulate the BSSID of the AP to which STA + * is currently connected to when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. u8 array with a + * fixed size of 6 bytes. + * + * When generating beacon report from a Multiple BSSID beacon and the + * current connected BSSID matches one of the nontransmitted BSSIDs in + * a Multiple BSSID set, this BSSID will be that particular + * nontransmitted BSSID and not the transmitted BSSID (i.e., the + * transmitting address of the Beacon frame). + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_BSSID = 5, + /* Used by the driver to encapsulate the frequency in MHz on which + * the Beacon frame was received when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is + * set to QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_FREQ = 6, + /* Used by the driver to encapsulate the Beacon interval + * when the QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. + * u16 attribute. The value will be copied from the Beacon frame and + * the units are TUs. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_BI = 7, + /* Used by the driver to encapsulate the Timestamp field from the + * Beacon frame when the QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE + * is set to QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. + * u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_TSF = 8, + /* Used by the driver to encapsulate the CLOCK_BOOTTIME when this + * Beacon frame is received in the driver when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. u64 attribute, in + * the units of nanoseconds. This value is expected to have accuracy of + * about 10 ms. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_BOOTTIME_WHEN_RECEIVED = 9, + /* Used by the driver to encapsulate the IEs of the Beacon frame from + * which this event is generated when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_BEACON_INFO. u8 array. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_IES = 10, + /* Used by the driver to specify the reason for the driver/firmware to + * pause sending beacons to userspace when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_PAUSE. Possible values are + * defined in enum qca_wlan_vendor_beacon_reporting_pause_reasons, u32 + * attribute. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_PAUSE_REASON = 11, + /* Used by the driver to specify whether the driver will automatically + * resume reporting beacon events to userspace later (for example after + * the ongoing off-channel activity is completed etc.) when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_PAUSE. NLA_FLAG attribute. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_AUTO_RESUMES = 12, + /* Optionally set by userspace to request the driver not to resume + * beacon reporting after a pause is completed, when the + * QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_OP_TYPE is set to + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START. NLA_FLAG attribute. + * If this flag is set, the driver will not resume beacon reporting + * after any pause in beacon reporting is completed. Userspace has to + * send QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START command again in order + * to initiate beacon reporting again. If this flag is set in the recent + * QCA_WLAN_VENDOR_BEACON_REPORTING_OP_START command, then in the + * subsequent QCA_WLAN_VENDOR_BEACON_REPORTING_OP_PAUSE event (if any) + * the QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_AUTO_RESUMES shall not be + * set by the driver. Setting this flag until and unless there is a + * specific need is not recommended as there is a chance of some beacons + * received after pause command and next start command being not + * reported. + */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_DO_NOT_RESUME = 13, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_LAST, + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_MAX = + QCA_WLAN_VENDOR_ATTR_BEACON_REPORTING_LAST - 1 +}; + +/* + * enum qca_wlan_vendor_attr_oem_data_params - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_OEM_DATA. + * + * @QCA_WLAN_VENDOR_ATTR_OEM_DATA_CMD_DATA: This NLA_BINARY attribute is + * used to set/query the data to/from the firmware. On query, the same + * attribute is used to carry the respective data in the reply sent by the + * driver to userspace. The request to set/query the data and the format of the + * respective data from the firmware are embedded in the attribute. The + * maximum size of the attribute payload is 1024 bytes. + * Userspace has to set the QCA_WLAN_VENDOR_ATTR_OEM_DATA_RESPONSE_EXPECTED + * attribute when the data is queried from the firmware. + * + * @QCA_WLAN_VENDOR_ATTR_OEM_DATA_RESPONSE_EXPECTED: This NLA_FLAG attribute + * is set when the userspace queries data from the firmware. This attribute + * should not be set when userspace sets the OEM data to the firmware. + */ +enum qca_wlan_vendor_attr_oem_data_params { + QCA_WLAN_VENDOR_ATTR_OEM_DATA_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_OEM_DATA_CMD_DATA = 1, + QCA_WLAN_VENDOR_ATTR_OEM_DATA_RESPONSE_EXPECTED = 3, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_OEM_DATA_PARAMS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_OEM_DATA_PARAMS_MAX = + QCA_WLAN_VENDOR_ATTR_OEM_DATA_PARAMS_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_avoid_frequency_ext - Defines attributes to be + * used with vendor command/event QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_EXT. + * + * @QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_RANGE: Required + * Nested attribute containing multiple ranges with following attributes: + * QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_START and + * QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_END. + * + * @QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_START: Required (u32) + * Starting center frequency in MHz. + * + * @QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_END: Required (u32) + * Ending center frequency in MHz. + */ +enum qca_wlan_vendor_attr_avoid_frequency_ext { + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_RANGE = 1, + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_START = 2, + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_END = 3, + + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_MAX = + QCA_WLAN_VENDOR_ATTR_AVOID_FREQUENCY_AFTER_LAST - 1 +}; + +/* + * enum qca_wlan_vendor_attr_add_sta_node_params - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_ADD_STA_NODE. + */ +enum qca_wlan_vendor_attr_add_sta_node_params { + QCA_WLAN_VENDOR_ATTR_ADD_STA_NODE_INVALID = 0, + /* 6 byte MAC address of STA */ + QCA_WLAN_VENDOR_ATTR_ADD_STA_NODE_MAC_ADDR = 1, + /* Authentication algorithm used by the station of size u16; + * defined in enum nl80211_auth_type. + */ + QCA_WLAN_VENDOR_ATTR_ADD_STA_NODE_AUTH_ALGO = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ADD_STA_NODE_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ADD_STA_NODE_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_ADD_STA_NODE_PARAM_AFTER_LAST - 1 +}; + +/** + * enum qca_btc_chain_mode - Specifies BT coex chain mode. + * This enum defines the valid set of values of BT coex chain mode. + * These values are used by attribute %QCA_VENDOR_ATTR_BTC_CHAIN_MODE of + * %QCA_NL80211_VENDOR_SUBCMD_BTC_CHAIN_MODE. + * + * @QCA_BTC_CHAIN_SHARED: chains of BT and WLAN 2.4G are shared. + * @QCA_BTC_CHAIN_SEPARATED: chains of BT and WLAN 2.4G are separated. + */ +enum qca_btc_chain_mode { + QCA_BTC_CHAIN_SHARED = 0, + QCA_BTC_CHAIN_SEPARATED = 1, +}; + +/** + * enum qca_vendor_attr_btc_chain_mode - Specifies attributes for BT coex + * chain mode. + * Attributes for data used by QCA_NL80211_VENDOR_SUBCMD_BTC_CHAIN_MODE. + * + * @QCA_VENDOR_ATTR_COEX_BTC_CHAIN_MODE: u32 attribute. + * Indicates the BT coex chain mode, are 32-bit values from + * enum qca_btc_chain_mode. This attribute is mandatory. + * + * @QCA_VENDOR_ATTR_COEX_BTC_CHAIN_MODE_RESTART: flag attribute. + * If set, vdev should be restarted when BT coex chain mode is updated. + * This attribute is optional. + */ +enum qca_vendor_attr_btc_chain_mode { + QCA_VENDOR_ATTR_BTC_CHAIN_MODE_INVALID = 0, + QCA_VENDOR_ATTR_BTC_CHAIN_MODE = 1, + QCA_VENDOR_ATTR_BTC_CHAIN_MODE_RESTART = 2, + + /* Keep last */ + QCA_VENDOR_ATTR_BTC_CHAIN_MODE_LAST, + QCA_VENDOR_ATTR_BTC_CHAIN_MODE_MAX = + QCA_VENDOR_ATTR_BTC_CHAIN_MODE_LAST - 1, +}; + +/** + * enum qca_vendor_wlan_sta_flags - Station feature flags + * Bits will be set to 1 if the corresponding features are enabled. + * @QCA_VENDOR_WLAN_STA_FLAG_AMPDU: AMPDU is enabled for the station + * @QCA_VENDOR_WLAN_STA_FLAG_TX_STBC: TX Space-time block coding is enabled + for the station + * @QCA_VENDOR_WLAN_STA_FLAG_RX_STBC: RX Space-time block coding is enabled + for the station + */ +enum qca_vendor_wlan_sta_flags { + QCA_VENDOR_WLAN_STA_FLAG_AMPDU = BIT(0), + QCA_VENDOR_WLAN_STA_FLAG_TX_STBC = BIT(1), + QCA_VENDOR_WLAN_STA_FLAG_RX_STBC = BIT(2), +}; + +/** + * enum qca_vendor_wlan_sta_guard_interval - Station guard interval + * @QCA_VENDOR_WLAN_STA_GI_800_NS: Legacy normal guard interval + * @QCA_VENDOR_WLAN_STA_GI_400_NS: Legacy short guard interval + * @QCA_VENDOR_WLAN_STA_GI_1600_NS: Guard interval used by HE + * @QCA_VENDOR_WLAN_STA_GI_3200_NS: Guard interval used by HE + */ +enum qca_vendor_wlan_sta_guard_interval { + QCA_VENDOR_WLAN_STA_GI_800_NS = 0, + QCA_VENDOR_WLAN_STA_GI_400_NS = 1, + QCA_VENDOR_WLAN_STA_GI_1600_NS = 2, + QCA_VENDOR_WLAN_STA_GI_3200_NS = 3, +}; + +/** + * enum qca_wlan_vendor_attr_get_sta_info - Defines attributes + * used by QCA_NL80211_VENDOR_SUBCMD_GET_STA_INFO vendor command. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_MAC: + * Required attribute in request for AP mode only, 6-byte MAC address, + * corresponding to the station's MAC address for which information is + * requested. For STA mode this is not required as the info always correspond + * to the self STA and the current/last association. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_FLAGS: + * Optionally used in response, u32 attribute, contains a bitmap of different + * fields defined in enum qca_vendor_wlan_sta_flags, used in AP mode only. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_GUARD_INTERVAL: + * Optionally used in response, u32 attribute, possible values are defined in + * enum qca_vendor_wlan_sta_guard_interval, used in AP mode only. + * Guard interval used by the station. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_RETRY_COUNT: + * Optionally used in response, u32 attribute, used in AP mode only. + * Value indicates the number of data frames received from station with retry + * bit set to 1 in FC. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_BC_MC_COUNT: + * Optionally used in response, u32 attribute, used in AP mode only. + * Counter for number of data frames with broadcast or multicast address in + * the destination address received from the station. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RETRY_SUCCEED: + * Optionally used in response, u32 attribute, used in both STA and AP modes. + * Value indicates the number of data frames successfully transmitted only + * after retrying the packets and for which the TX status has been updated + * back to host from target. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RETRY_EXHAUSTED: + * Optionally used in response, u32 attribute, used in both STA and AP mode. + * Value indicates the number of data frames not transmitted successfully even + * after retrying the packets for the number of times equal to the total number + * of retries allowed for that packet and for which the TX status has been + * updated back to host from target. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_TOTAL: + * Optionally used in response, u32 attribute, used in AP mode only. + * Counter in the target for the number of data frames successfully transmitted + * to the station. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_RETRY: + * Optionally used in response, u32 attribute, used in AP mode only. + * Value indicates the number of data frames successfully transmitted only + * after retrying the packets. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_RETRY_EXHAUSTED: + * Optionally used in response, u32 attribute, used in both STA & AP mode. + * Value indicates the number of data frames not transmitted successfully even + * after retrying the packets for the number of times equal to the total number + * of retries allowed for that packet. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_PROBE_REQ_BMISS_COUNT: u32, used in + * the STA mode only. Represent the number of probe requests sent by the STA + * while attempting to roam on missing certain number of beacons from the + * connected AP. If queried in the disconnected state, this represents the + * count for the last connected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_PROBE_RESP_BMISS_COUNT: u32, used in + * the STA mode. Represent the number of probe responses received by the station + * while attempting to roam on missing certain number of beacons from the + * connected AP. When queried in the disconnected state, this represents the + * count when in last connected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_ALL_COUNT: u32, used in the + * STA mode only. Represents the total number of frames sent out by STA + * including Data, ACK, RTS, CTS, Control Management. This data is maintained + * only for the connect session. Represents the count of last connected session, + * when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RTS_COUNT: u32, used in the STA mode. + * Total number of RTS sent out by the STA. This data is maintained per connect + * session. Represents the count of last connected session, when queried in the + * disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RTS_RETRY_FAIL_COUNT: u32, used in the + * STA mode.Represent the number of RTS transmission failure that reach retry + * limit. This data is maintained per connect session. Represents the count of + * last connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_DATA_NON_AGGREGATED_COUNT: u32, used in + * the STA mode. Represent the total number of non aggregated frames transmitted + * by the STA. This data is maintained per connect session. Represents the count + * of last connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_DATA_AGGREGATED_COUNT: u32, used in the + * STA mode. Represent the total number of aggregated frames transmitted by the + * STA. This data is maintained per connect session. Represents the count of + * last connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_FRAMES_GOOD_PLCP_COUNT: u32, used in + * the STA mode. Represents the number of received frames with a good PLCP. This + * data is maintained per connect session. Represents the count of last + * connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_FRAMES_INVALID_DELIMITER_COUNT: u32, + * used in the STA mode. Represents the number of occasions that no valid + * delimiter is detected by A-MPDU parser. This data is maintained per connect + * session. Represents the count of last connected session, when queried in the + * disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_FRAMES_CRC_FAIL_COUNT: u32, used in the + * STA mode. Represents the number of frames for which CRC check failed in the + * MAC. This data is maintained per connect session. Represents the count of + * last connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_ACKS_GOOD_FCS_COUNT: u32, used in the + * STA mode. Represents the number of unicast ACKs received with good FCS. This + * data is maintained per connect session. Represents the count of last + * connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_BLOCKACK_COUNT: u32, used in the STA + * mode. Represents the number of received Block Acks. This data is maintained + * per connect session. Represents the count of last connected session, when + * queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_BEACON_COUNT: u32, used in the STA + * mode. Represents the number of beacons received from the connected BSS. This + * data is maintained per connect session. Represents the count of last + * connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_OTHER_BEACON_COUNT: u32, used in the + * STA mode. Represents the number of beacons received by the other BSS when in + * connected state (through the probes done by the STA). This data is maintained + * per connect session. Represents the count of last connected session, when + * queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_UCAST_DATA_GOOD_FCS_COUNT: u64, used in + * the STA mode. Represents the number of received DATA frames with good FCS and + * matching Receiver Address when in connected state. This data is maintained + * per connect session. Represents the count of last connected session, when + * queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_DATA_BC_MC_DROP_COUNT: u32, used in the + * STA mode. Represents the number of RX Data multicast frames dropped by the HW + * when in the connected state. This data is maintained per connect session. + * Represents the count of last connected session, when queried in the + * disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_24G_1MBPS: u32, used in the + * STA mode. This represents the target power in dBm for the transmissions done + * to the AP in 2.4 GHz at 1 Mbps (DSSS) rate. This data is maintained per + * connect session. Represents the count of last connected session, when + * queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_24G_6MBPS: u32, used in the + * STA mode. This represents the Target power in dBm for transmissions done to + * the AP in 2.4 GHz at 6 Mbps (OFDM) rate. This data is maintained per connect + * session. Represents the count of last connected session, when queried in the + * disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_24G_MCS0: u32, used in the + * STA mode. This represents the Target power in dBm for transmissions done to + * the AP in 2.4 GHz at MCS0 rate. This data is maintained per connect session. + * Represents the count of last connected session, when queried in the + * disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_5G_6MBPS: u32, used in the + * STA mode. This represents the Target power in dBm for transmissions done to + * the AP in 5 GHz at 6 Mbps (OFDM) rate. This data is maintained per connect + * session. Represents the count of last connected session, when queried in + * the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_5G_MCS0: u32, used in the + * STA mode. This represents the Target power in dBm for for transmissions done + * to the AP in 5 GHz at MCS0 rate. This data is maintained per connect session. + * Represents the count of last connected session, when queried in the + * disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_HW_BUFFERS_OVERFLOW_COUNT: u32, used + * in the STA mode. This represents the Nested attribute representing the + * overflow counts of each receive buffer allocated to the hardware during the + * STA's connection. The number of hw buffers might vary for each WLAN + * solution and hence this attribute represents the nested array of all such + * HW buffer count. This data is maintained per connect session. Represents + * the count of last connected session, when queried in the disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_MAX_TX_POWER: u32, Max TX power (dBm) + * allowed as per the regulatory requirements for the current or last connected + * session. Used in the STA mode. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_LATEST_TX_POWER: u32, Latest TX power + * (dBm) used by the station in its latest unicast frame while communicating + * to the AP in the connected state. When queried in the disconnected state, + * this represents the TX power used by the STA with last AP communication + * when in connected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_ANI_LEVEL: u32, Adaptive noise immunity + * level used to adjust the RX sensitivity. Represents the current ANI level + * when queried in the connected state. When queried in the disconnected + * state, this corresponds to the latest ANI level at the instance of + * disconnection. + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BIP_MIC_ERROR_COUNT: u32, used in STA mode + * only. This represents the number of group addressed robust management frames + * received from this station with an invalid MIC or a missing MME when PMF is + * enabled. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BIP_REPLAY_COUNT: u32, used in STA mode + * only. This represents the number of group addressed robust management frames + * received from this station with the packet number less than or equal to the + * last received packet number when PMF is enabled. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BEACON_MIC_ERROR_COUNT: u32, used in STA + * mode only. This represents the number of Beacon frames received from this + * station with an invalid MIC or a missing MME when beacon protection is + * enabled. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BEACON_REPLAY_COUNT: u32, used in STA mode + * only. This represents number of Beacon frames received from this station with + * the packet number less than or equal to the last received packet number when + * beacon protection is enabled. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_CONNECT_FAIL_REASON_CODE: u32, used in + * STA mode only. Driver uses this attribute to populate the connection failure + * reason codes and the values are defined in + * enum qca_sta_connect_fail_reason_codes. Userspace applications can send + * QCA_NL80211_VENDOR_SUBCMD_GET_STA_INFO vendor command after receiving + * connection failure from driver. The driver shall not include this attribute + * in response to QCA_NL80211_VENDOR_SUBCMD_GET_STA_INFO command if there is no + * connection failure observed in the last attempted connection. + */ +enum qca_wlan_vendor_attr_get_sta_info { + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_MAC = 1, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_FLAGS = 2, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_GUARD_INTERVAL = 3, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_RETRY_COUNT = 4, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_BC_MC_COUNT = 5, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RETRY_SUCCEED = 6, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RETRY_EXHAUSTED = 7, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_TOTAL = 8, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_RETRY = 9, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_RETRY_EXHAUSTED = 10, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_PROBE_REQ_BMISS_COUNT = 11, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_PROBE_RESP_BMISS_COUNT = 12, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_TX_ALL_COUNT = 13, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RTS_COUNT = 14, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_RTS_RETRY_FAIL_COUNT = 15, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_DATA_NON_AGGREGATED_COUNT = 16, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TX_DATA_AGGREGATED_COUNT = 17, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_FRAMES_GOOD_PLCP_COUNT = 18, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_FRAMES_INVALID_DELIMITER_COUNT = 19, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_FRAMES_CRC_FAIL_COUNT = 20, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_ACKS_GOOD_FCS_COUNT = 21, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_BLOCKACK_COUNT = 22, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_BEACON_COUNT = 23, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_OTHER_BEACON_COUNT = 24, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_UCAST_DATA_GOOD_FCS_COUNT = 25, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_DATA_BC_MC_DROP_COUNT = 26, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_24G_1MBPS = 27, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_24G_6MBPS = 28, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_24G_MCS0 = 29, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_5G_6MBPS = 30, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_TARGET_POWER_5G_MCS0 = 31, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_RX_HW_BUFFERS_OVERFLOW_COUNT = 32, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_MAX_TX_POWER = 33, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_LATEST_TX_POWER = 34, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_ANI_LEVEL = 35, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BIP_MIC_ERROR_COUNT = 39, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BIP_REPLAY_COUNT = 40, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BEACON_MIC_ERROR_COUNT = 41, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_BEACON_REPLAY_COUNT = 42, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_CONNECT_FAIL_REASON_CODE = 43, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_disconnect_reason_codes - Specifies driver disconnect reason codes. + * Used when the driver triggers the STA to disconnect from the AP. + * + * @QCA_DISCONNECT_REASON_UNSPECIFIED: The host driver triggered the + * disconnection with the AP due to unspecified reasons. + * + * @QCA_DISCONNECT_REASON_INTERNAL_ROAM_FAILURE: The host driver triggered the + * disconnection with the AP due to a roaming failure. This roaming is triggered + * internally (host driver/firmware). + * + * @QCA_DISCONNECT_REASON_EXTERNAL_ROAM_FAILURE: The driver disconnected from + * the AP when the user/external triggered roaming fails. + * + * @QCA_DISCONNECT_REASON_GATEWAY_REACHABILITY_FAILURE: This reason code is used + * by the host driver whenever gateway reachability failure is detected and the + * driver disconnects with AP. + * + * @QCA_DISCONNECT_REASON_UNSUPPORTED_CHANNEL_CSA: The driver disconnected from + * the AP on a channel switch announcement from it with an unsupported channel. + * + * @QCA_DISCONNECT_REASON_OPER_CHANNEL_DISABLED_INDOOR: On a concurrent AP start + * with indoor channels disabled and if the STA is connected on one of these + * disabled channels, the host driver disconnected the STA with this reason + * code. + * + * @QCA_DISCONNECT_REASON_OPER_CHANNEL_USER_DISABLED: Disconnection due to an + * explicit request from the user to disable the current operating channel. + * + * @QCA_DISCONNECT_REASON_DEVICE_RECOVERY: STA disconnected from the AP due to + * the internal host driver/firmware recovery. + * + * @QCA_DISCONNECT_REASON_KEY_TIMEOUT: The driver triggered the disconnection on + * a timeout for the key installations from the user space. + * + * @QCA_DISCONNECT_REASON_OPER_CHANNEL_BAND_CHANGE: The dDriver disconnected the + * STA on a band change request from the user space to a different band from the + * current operation channel/band. + * + * @QCA_DISCONNECT_REASON_IFACE_DOWN: The STA disconnected from the AP on an + * interface down trigger from the user space. + * + * @QCA_DISCONNECT_REASON_PEER_XRETRY_FAIL: The host driver disconnected the + * STA on getting continuous transmission failures for multiple Data frames. + * + * @QCA_DISCONNECT_REASON_PEER_INACTIVITY: The STA does a keep alive + * notification to the AP by transmitting NULL/G-ARP frames. This disconnection + * represents inactivity from AP on such transmissions. + + * @QCA_DISCONNECT_REASON_SA_QUERY_TIMEOUT: This reason code is used on + * disconnection when SA Query times out (AP does not respond to SA Query). + * + * @QCA_DISCONNECT_REASON_BEACON_MISS_FAILURE: The host driver disconnected the + * STA on missing the beacons continuously from the AP. + * + * @QCA_DISCONNECT_REASON_CHANNEL_SWITCH_FAILURE: Disconnection due to STA not + * able to move to the channel mentioned by the AP in CSA. + * + * @QCA_DISCONNECT_REASON_USER_TRIGGERED: User triggered disconnection. + */ +enum qca_disconnect_reason_codes { + QCA_DISCONNECT_REASON_UNSPECIFIED = 0, + QCA_DISCONNECT_REASON_INTERNAL_ROAM_FAILURE = 1, + QCA_DISCONNECT_REASON_EXTERNAL_ROAM_FAILURE = 2, + QCA_DISCONNECT_REASON_GATEWAY_REACHABILITY_FAILURE = 3, + QCA_DISCONNECT_REASON_UNSUPPORTED_CHANNEL_CSA = 4, + QCA_DISCONNECT_REASON_OPER_CHANNEL_DISABLED_INDOOR = 5, + QCA_DISCONNECT_REASON_OPER_CHANNEL_USER_DISABLED = 6, + QCA_DISCONNECT_REASON_DEVICE_RECOVERY = 7, + QCA_DISCONNECT_REASON_KEY_TIMEOUT = 8, + QCA_DISCONNECT_REASON_OPER_CHANNEL_BAND_CHANGE = 9, + QCA_DISCONNECT_REASON_IFACE_DOWN = 10, + QCA_DISCONNECT_REASON_PEER_XRETRY_FAIL = 11, + QCA_DISCONNECT_REASON_PEER_INACTIVITY = 12, + QCA_DISCONNECT_REASON_SA_QUERY_TIMEOUT = 13, + QCA_DISCONNECT_REASON_BEACON_MISS_FAILURE = 14, + QCA_DISCONNECT_REASON_CHANNEL_SWITCH_FAILURE = 15, + QCA_DISCONNECT_REASON_USER_TRIGGERED = 16, +}; + +/** + * enum qca_sta_connect_fail_reason_codes - Defines values carried + * by QCA_WLAN_VENDOR_ATTR_GET_STA_INFO_CONNECT_FAIL_REASON_CODE vendor + * attribute. + * @QCA_STA_CONNECT_FAIL_REASON_NO_BSS_FOUND: No probe response frame received + * for unicast probe request. + * @QCA_STA_CONNECT_FAIL_REASON_AUTH_TX_FAIL: STA failed to send auth request. + * @QCA_STA_CONNECT_FAIL_REASON_AUTH_NO_ACK_RECEIVED: AP didn't send ACK for + * ath request. + * @QCA_STA_CONNECT_FAIL_REASON_AUTH_NO_RESP_RECEIVED: Auth response is not + * received from AP. + * @QCA_STA_CONNECT_FAIL_REASON_ASSOC_REQ_TX_FAIL: STA failed to send assoc + * request. + * @QCA_STA_CONNECT_FAIL_REASON_ASSOC_NO_ACK_RECEIVED: AP didn't send ACK for + * assoc request. + * @QCA_STA_CONNECT_FAIL_REASON_ASSOC_NO_RESP_RECEIVED: Assoc response is not + * received from AP. + */ +enum qca_sta_connect_fail_reason_codes { + QCA_STA_CONNECT_FAIL_REASON_NO_BSS_FOUND = 1, + QCA_STA_CONNECT_FAIL_REASON_AUTH_TX_FAIL = 2, + QCA_STA_CONNECT_FAIL_REASON_AUTH_NO_ACK_RECEIVED = 3, + QCA_STA_CONNECT_FAIL_REASON_AUTH_NO_RESP_RECEIVED = 4, + QCA_STA_CONNECT_FAIL_REASON_ASSOC_REQ_TX_FAIL = 5, + QCA_STA_CONNECT_FAIL_REASON_ASSOC_NO_ACK_RECEIVED = 6, + QCA_STA_CONNECT_FAIL_REASON_ASSOC_NO_RESP_RECEIVED = 7, +}; + +/** + * enum qca_wlan_vendor_attr_driver_disconnect_reason - Defines attributes + * used by %QCA_NL80211_VENDOR_SUBCMD_DRIVER_DISCONNECT_REASON vendor command. + * + * @QCA_WLAN_VENDOR_ATTR_DRIVER_DISCONNECT_REASCON_CODE: u32 attribute. + * This attribute represents the driver specific reason codes (local + * driver/firmware initiated reasons for disconnection) defined + * in enum qca_disconnect_reason_codes. + */ +enum qca_wlan_vendor_attr_driver_disconnect_reason { + QCA_WLAN_VENDOR_ATTR_DRIVER_DISCONNECT_REASON_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DRIVER_DISCONNECT_REASCON_CODE = 1, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DRIVER_DISCONNECT_REASON_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DRIVER_DISCONNECT_REASON_MAX = + QCA_WLAN_VENDOR_ATTR_DRIVER_DISCONNECT_REASON_AFTER_LAST - 1, +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/inc/wlan_cfg80211_scan.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/inc/wlan_cfg80211_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..7d7aecd2c93cb54aa9483a6ef085e9994927278c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/inc/wlan_cfg80211_scan.h @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + + +#ifndef _WLAN_CFG80211_SCAN_H_ +#define _WLAN_CFG80211_SCAN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Max number of scans allowed from userspace */ +#define WLAN_MAX_SCAN_COUNT 8 + +/* GPS application requirement */ +#define QCOM_VENDOR_IE_ID 221 +#define QCOM_OUI1 0x00 +#define QCOM_OUI2 0xA0 +#define QCOM_OUI3 0xC6 +#define QCOM_VENDOR_IE_AGE_TYPE 0x100 +#define QCOM_VENDOR_IE_AGE_LEN (sizeof(qcom_ie_age) - 2) +#define SCAN_DONE_EVENT_BUF_SIZE 4096 +#define SCAN_WAKE_LOCK_CONNECT_DURATION (1 * 1000) /* in msec */ +#define SCAN_WAKE_LOCK_SCAN_DURATION (5 * 1000) /* in msec */ + +/** + * typedef struct qcom_ie_age - age ie + * + * @element_id: Element id + * @len: Length + * @oui_1: OUI 1 + * @oui_2: OUI 2 + * @oui_3: OUI 3 + * @type: Type + * @age: Age + * @tsf_delta: tsf delta from FW + * @beacon_tsf: original beacon TSF + * @seq_ctrl: sequence control field + */ +typedef struct { + u8 element_id; + u8 len; + u8 oui_1; + u8 oui_2; + u8 oui_3; + u32 type; + u32 age; + u32 tsf_delta; + u64 beacon_tsf; + u16 seq_ctrl; +} __attribute__ ((packed)) qcom_ie_age; + +/** + * struct osif_scan_pdev - OS scan private strcutre + * scan_req_q: Scan request queue + * scan_req_q_lock: Protect scan request queue + * req_id: Scan request Id + * runtime_pm_lock: Runtime suspend lock + * scan_wake_lock: Scan wake lock + */ +struct osif_scan_pdev{ + qdf_list_t scan_req_q; + qdf_mutex_t scan_req_q_lock; + wlan_scan_requester req_id; + qdf_runtime_lock_t runtime_pm_lock; + qdf_wake_lock_t scan_wake_lock; +}; + +/* + * enum scan_source - scan request source + * @NL_SCAN: Scan initiated from NL + * @VENDOR_SCAN: Scan intiated from vendor command + */ +enum scan_source { + NL_SCAN, + VENDOR_SCAN, +}; + +/** + * struct scan_req - Scan Request entry + * @node : List entry element + * @scan_request: scan request holder + * @scan_id: scan identifier used across host layers which is generated at WMI + * @source: scan request originator (NL/Vendor scan) + * @dev: net device (same as what is in scan_request) + * @scan_start_timestamp: scan start time + * + * Scan request linked list element + */ +struct scan_req { + qdf_list_node_t node; + struct cfg80211_scan_request *scan_request; + uint32_t scan_id; + uint8_t source; + struct net_device *dev; + qdf_time_t scan_start_timestamp; +}; + +/** + * struct scan_params - Scan params + * @source: scan request source + * @default_ie: default scan ie + * @vendor_ie: vendor ie + * @priority: scan priority + * @half_rate: Half rate flag + * @quarter_rate: Quarter rate flag + * @strict_pscan: strict passive scan flag + * @dwell_time_active: Active dwell time. Ignored if zero or inapplicable. + * @dwell_time_active_2g: 2.4 GHz specific active dwell time. Ignored if zero or + * inapplicable. + * @dwell_time_passive: Passive dwell time. Ignored if zero or inapplicable. + * @dwell_time_active_6g: 6 GHz specific active dwell time. Ignored if zero or + * inapplicable. + * @dwell_time_passive_6g: 6 GHz specific passive dwell time. Ignored if zero or + * inapplicable. + */ +struct scan_params { + uint8_t source; + struct element_info default_ie; + struct element_info vendor_ie; + enum scan_priority priority; + bool half_rate; + bool quarter_rate; + bool strict_pscan; + uint32_t dwell_time_active; + uint32_t dwell_time_active_2g; + uint32_t dwell_time_passive; + uint32_t dwell_time_active_6g; + uint32_t dwell_time_passive_6g; +}; + +/** + * struct wlan_cfg80211_inform_bss - BSS inform data + * @chan: channel the frame was received on + * @mgmt: beacon/probe resp frame + * @frame_len: frame length + * @rssi: signal strength in mBm (100*dBm) + * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was received. + * @per_chain_rssi: per chain rssi received + */ +struct wlan_cfg80211_inform_bss { + struct ieee80211_channel *chan; + struct ieee80211_mgmt *mgmt; + size_t frame_len; + int rssi; + uint64_t boottime_ns; + uint8_t per_chain_rssi[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; +}; + + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * wlan_cfg80211_sched_scan_start() - cfg80211 scheduled scan(pno) start + * @vdev: vdev pointer + * @request: Pointer to cfg80211 scheduled scan start request + * @scan_backoff_multiplier: multiply scan period by this after max cycles + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_sched_scan_start(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + uint8_t scan_backoff_multiplier); + +/** + * wlan_cfg80211_sched_scan_stop() - cfg80211 scheduled scan(pno) stop + * @vdev: vdev pointer + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_sched_scan_stop(struct wlan_objmgr_vdev *vdev); +#endif + +/** + * wlan_scan_runtime_pm_init() - API to initialize runtime pm context for scan + * @pdev: Pointer to pdev + * + * This will help to initialize scan runtime pm context separately. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_runtime_pm_init(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_scan_runtime_pm_deinit() - API to deinitialize runtime pm + * for scan. + * @pdev: Pointer to pdev + * + * This will help to deinitialize scan runtime pm before deinitialize + * HIF + * + * Return: void + */ +void wlan_scan_runtime_pm_deinit(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfg80211_scan_priv_init() - API to initialize cfg80211 scan + * @pdev: Pointer to net device + * + * API to initialize cfg80211 scan module. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_cfg80211_scan_priv_init(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfg80211_scan_priv_deinit() - API to deinitialize cfg80211 scan + * @pdev: Pointer to net device + * + * API to deinitialize cfg80211 scan module. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_cfg80211_scan_priv_deinit( + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfg80211_scan() - API to process cfg80211 scan request + * @vdev: Pointer to vdev + * @request: Pointer to scan request + * @params: scan params + * + * API to trigger scan and update cfg80211 scan database. + * scan dump command can be used to fetch scan results + * on receipt of scan complete event. + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_scan(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_params *params); + +/** + * wlan_cfg80211_inform_bss_frame_data() - API to inform beacon to cfg80211 + * @wiphy: wiphy + * @bss_data: bss data + * + * API to inform beacon to cfg80211 + * + * Return: pointer to bss entry + */ +struct cfg80211_bss * +wlan_cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct wlan_cfg80211_inform_bss *bss); + +/** + * wlan_cfg80211_inform_bss_frame() - API to inform beacon to cfg80211 + * @pdev: Pointer to pdev + * @scan_params: scan entry + * @request: Pointer to scan request + * + * API to inform beacon to cfg80211 + * + * Return: void + */ +void wlan_cfg80211_inform_bss_frame(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params); + +/** + * __wlan_cfg80211_unlink_bss_list() - flush bss from the kernel cache + * @wiphy: wiphy + * @bssid: bssid of the BSS to find + * @ssid: ssid of the BSS to find + * @ssid_len: ssid len of of the BSS to find + * + * Return: None + */ +void __wlan_cfg80211_unlink_bss_list(struct wiphy *wiphy, uint8_t *bssid, + uint8_t *ssid, uint8_t ssid_len); + +/** + * wlan_cfg80211_get_bss() - Get the bss entry matching the chan, bssid and ssid + * @wiphy: wiphy + * @channel: channel of the BSS to find + * @bssid: bssid of the BSS to find + * @ssid: ssid of the BSS to find + * @ssid_len: ssid len of of the BSS to find + * + * The API is a wrapper to get bss from kernel matching the chan, + * bssid and ssid + * + * Return: bss structure if found else NULL + */ +struct cfg80211_bss *wlan_cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *ssid, size_t ssid_len); + +/* + * wlan_cfg80211_unlink_bss_list : flush bss from the kernel cache + * @pdev: Pointer to pdev + * @scan_entry: scan entry + * + * Return: bss which is unlinked from kernel cache + */ +void wlan_cfg80211_unlink_bss_list(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * wlan_vendor_abort_scan() - API to vendor abort scan + * @pdev: Pointer to pdev + * @data: pointer to data + * @data_len: Data length + * + * API to abort scan through vendor command + * + * Return: 0 for success, non zero for failure + */ +int wlan_vendor_abort_scan(struct wlan_objmgr_pdev *pdev, + const void *data, int data_len); + +/** + * wlan_cfg80211_abort_scan() - API to abort scan through cfg80211 + * @pdev: Pointer to pdev + * + * API to abort scan through cfg80211 request + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_abort_scan(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_abort_scan() - Generic API to abort scan request + * @pdev: Pointer to pdev + * @pdev_id: pdev id + * @vdev_id: vdev id + * @scan_id: scan id + * @sync: if wait for scan complete is required + * + * Generic API to abort scans + * + * Return: 0 for success, non zero for failure + */ +QDF_STATUS wlan_abort_scan(struct wlan_objmgr_pdev *pdev, + uint32_t pdev_id, + uint32_t vdev_id, + wlan_scan_id scan_id, + bool sync); + +/** + * wlan_cfg80211_cleanup_scan_queue() - remove entries in scan queue + * @pdev: pdev pointer + * @dev: net device pointer + * + * Removes entries in scan queue depending on dev provided and sends scan + * complete event to NL. + * Removes all entries in scan queue, if dev provided is NULL + * + * Return: None + */ +void wlan_cfg80211_cleanup_scan_queue(struct wlan_objmgr_pdev *pdev, + struct net_device *dev); + +/** + * wlan_hdd_cfg80211_add_connected_pno_support() - Set connected PNO support + * @wiphy: Pointer to wireless phy + * + * This function is used to set connected PNO support to kernel + * + * Return: None + */ +#if defined(CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +void wlan_scan_cfg80211_add_connected_pno_support(struct wiphy *wiphy); + +#else +static inline +void wlan_scan_cfg80211_add_connected_pno_support(struct wiphy *wiphy) +{ +} +#endif + +#if ((LINUX_VERSION_CODE > KERNEL_VERSION(4, 4, 0)) || \ + defined(CFG80211_MULTI_SCAN_PLAN_BACKPORT)) && \ + defined(FEATURE_WLAN_SCAN_PNO) +/** + * hdd_config_sched_scan_plans_to_wiphy() - configure sched scan plans to wiphy + * @wiphy: pointer to wiphy + * @config: pointer to config + * + * Return: None + */ +void wlan_config_sched_scan_plans_to_wiphy(struct wiphy *wiphy, + struct wlan_objmgr_psoc *psoc); +#else +static inline +void wlan_config_sched_scan_plans_to_wiphy(struct wiphy *wiphy, + struct wlan_objmgr_psoc *psoc) +{ +} +#endif /* FEATURE_WLAN_SCAN_PNO */ + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..199feba31c5076357d7b84095f46d7f19696e5ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c @@ -0,0 +1,2171 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cfg_ucfg_api.h" +#ifdef WLAN_POLICY_MGR_ENABLE +#include +#endif +#include +#ifdef FEATURE_WLAN_DIAG_SUPPORT +#include "host_diag_core_event.h" +#endif + +static const +struct nla_policy scan_policy[QCA_WLAN_VENDOR_ATTR_SCAN_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS] = {.type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SCAN_TX_NO_CCK_RATE] = {.type = NLA_FLAG}, + [QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE] = {.type = NLA_U64}, +}; + +#if defined(CFG80211_SCAN_RANDOM_MAC_ADDR) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +/** + * wlan_fill_scan_rand_attrs() - Populate the scan randomization attrs + * @vdev: pointer to objmgr vdev + * @flags: cfg80211 scan flags + * @mac_addr: random mac addr from cfg80211 + * @mac_addr_mask: mac addr mask from cfg80211 + * @randomize: output variable to check scan randomization status + * @addr: output variable to hold random addr + * @mask: output variable to hold mac mask + * + * Return: None + */ +static void wlan_fill_scan_rand_attrs(struct wlan_objmgr_vdev *vdev, + uint32_t flags, + uint8_t *mac_addr, + uint8_t *mac_addr_mask, + bool *randomize, + uint8_t *addr, + uint8_t *mask) +{ + *randomize = false; + if (!(flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) + return; + + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE) + return; + + if (wlan_vdev_is_up(vdev) == QDF_STATUS_SUCCESS) + return; + + *randomize = true; + memcpy(addr, mac_addr, QDF_MAC_ADDR_SIZE); + memcpy(mask, mac_addr_mask, QDF_MAC_ADDR_SIZE); + osif_debug("Random mac addr: "QDF_MAC_ADDR_FMT" and Random mac mask: "QDF_FULL_MAC_FMT, + QDF_MAC_ADDR_REF(addr), QDF_FULL_MAC_REF(mask)); +} + +/** + * wlan_scan_rand_attrs() - Wrapper function to fill scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 scan request + * @req: pointer to cmn module scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal scan request with cfg80211_scan_request + * + * Return: None + */ +static void wlan_scan_rand_attrs(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_start_request *req) +{ + bool *randomize = &req->scan_req.scan_random.randomize; + uint8_t *mac_addr = req->scan_req.scan_random.mac_addr; + uint8_t *mac_mask = req->scan_req.scan_random.mac_mask; + + wlan_fill_scan_rand_attrs(vdev, request->flags, request->mac_addr, + request->mac_addr_mask, randomize, mac_addr, + mac_mask); + if (!*randomize) + return; + + req->scan_req.scan_f_add_spoofed_mac_in_probe = true; + req->scan_req.scan_f_add_rand_seq_in_probe = true; +} +#else +/** + * wlan_scan_rand_attrs() - Wrapper function to fill scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 scan request + * @req: pointer to cmn module scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal scan request with cfg80211_scan_request + * + * Return: None + */ +static void wlan_scan_rand_attrs(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_start_request *req) +{ +} +#endif + +#ifdef FEATURE_WLAN_SCAN_PNO +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + defined(CFG80211_MULTI_SCAN_PLAN_BACKPORT)) + +/** + * wlan_config_sched_scan_plan() - configures the sched scan plans + * from the framework. + * @pno_req: pointer to PNO scan request + * @request: pointer to scan request from framework + * + * Return: None + */ +static void +wlan_config_sched_scan_plan(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *pno_req, + struct cfg80211_sched_scan_request *request) +{ + if (!ucfg_scan_get_user_config_sched_scan_plan(psoc) || + request->n_scan_plans == 1) { + pno_req->fast_scan_period = + request->scan_plans[0].interval * MSEC_PER_SEC; + /* + * if only one scan plan is configured from framework + * then both fast and slow scan should be configured with the + * same value that is why fast scan cycles are hardcoded to one + */ + pno_req->fast_scan_max_cycles = 1; + pno_req->slow_scan_period = + request->scan_plans[0].interval * MSEC_PER_SEC; + } + /* + * As of now max 2 scan plans were supported by firmware + * if number of scan plan supported by firmware increased below logic + * must change. + */ + else if (request->n_scan_plans == SCAN_PNO_MAX_PLAN_REQUEST) { + pno_req->fast_scan_period = + request->scan_plans[0].interval * MSEC_PER_SEC; + pno_req->fast_scan_max_cycles = + request->scan_plans[0].iterations; + pno_req->slow_scan_period = + request->scan_plans[1].interval * MSEC_PER_SEC; + } else { + osif_err("Invalid number of scan plans %d !!", + request->n_scan_plans); + } +} +#else +#define wlan_config_sched_scan_plan(psoc, pno_req, request) \ + __wlan_config_sched_scan_plan(pno_req, request, psoc) + +static void +__wlan_config_sched_scan_plan(struct pno_scan_req_params *pno_req, + struct cfg80211_sched_scan_request *request, + struct wlan_objmgr_psoc *psoc) +{ + uint32_t scan_timer_repeat_value, slow_scan_multiplier; + + scan_timer_repeat_value = ucfg_scan_get_scan_timer_repeat_value(psoc); + slow_scan_multiplier = ucfg_scan_get_slow_scan_multiplier(psoc); + + pno_req->fast_scan_period = request->interval; + pno_req->fast_scan_max_cycles = scan_timer_repeat_value; + pno_req->slow_scan_period = + (slow_scan_multiplier * pno_req->fast_scan_period); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +static inline void +wlan_cfg80211_sched_scan_results(struct wiphy *wiphy, uint64_t reqid) +{ + cfg80211_sched_scan_results(wiphy); +} +#else +static inline void +wlan_cfg80211_sched_scan_results(struct wiphy *wiphy, uint64_t reqid) +{ + cfg80211_sched_scan_results(wiphy, reqid); +} +#endif + +/** + * wlan_cfg80211_pno_callback() - pno callback function to handle + * pno events. + * @vdev: vdev ptr + * @event: scan events + * @args: argument + * + * Return: void + */ +static void wlan_cfg80211_pno_callback(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, + void *args) +{ + struct wlan_objmgr_pdev *pdev; + struct pdev_osif_priv *pdev_ospriv; + + if (event->type != SCAN_EVENT_TYPE_NLO_COMPLETE) + return; + + osif_debug("vdev id = %d", event->vdev_id); + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + osif_err("pdev is NULL"); + return; + } + + pdev_ospriv = wlan_pdev_get_ospriv(pdev); + if (!pdev_ospriv) { + osif_err("pdev_ospriv is NULL"); + return; + } + wlan_cfg80211_sched_scan_results(pdev_ospriv->wiphy, 0); +} + +#ifdef WLAN_POLICY_MGR_ENABLE +static bool wlan_cfg80211_is_ap_go_present(struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_mode_specific_connection_count(psoc, + PM_SAP_MODE, + NULL) || + policy_mgr_mode_specific_connection_count(psoc, + PM_P2P_GO_MODE, + NULL); +} + +static QDF_STATUS wlan_cfg80211_is_chan_ok_for_dnbs( + struct wlan_objmgr_psoc *psoc, + u16 chan_freq, bool *ok) +{ + QDF_STATUS status = policy_mgr_is_chan_ok_for_dnbs( + psoc, chan_freq, ok); + + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("DNBS check failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} +#else +static bool wlan_cfg80211_is_ap_go_present(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static QDF_STATUS wlan_cfg80211_is_chan_ok_for_dnbs( + struct wlan_objmgr_psoc *psoc, + u16 chan_freq, + bool *ok) +{ + if (!ok) + return QDF_STATUS_E_INVAL; + + *ok = true; + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(CFG80211_SCAN_RANDOM_MAC_ADDR) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +/** + * wlan_pno_scan_rand_attr() - Wrapper function to fill sched scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 sched scan request + * @req: pointer to cmn module pno scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal pno scan + * with cfg80211_sched_scan_request + * + * Return: None + */ +static void wlan_pno_scan_rand_attr(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + struct pno_scan_req_params *req) +{ + bool *randomize = &req->scan_random.randomize; + uint8_t *mac_addr = req->scan_random.mac_addr; + uint8_t *mac_mask = req->scan_random.mac_mask; + + wlan_fill_scan_rand_attrs(vdev, request->flags, request->mac_addr, + request->mac_addr_mask, randomize, mac_addr, + mac_mask); +} +#else +/** + * wlan_pno_scan_rand_attr() - Wrapper function to fill sched scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 sched scan request + * @req: pointer to cmn module pno scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal pno scan + * with cfg80211_sched_scan_request + * + * Return: None + */ +static void wlan_pno_scan_rand_attr(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + struct pno_scan_req_params *req) +{ +} +#endif + +/** + * wlan_hdd_sched_scan_update_relative_rssi() - update CPNO params + * @pno_request: pointer to PNO scan request + * @request: Pointer to cfg80211 scheduled scan start request + * + * This function is used to update Connected PNO params sent by kernel + * + * Return: None + */ +#if defined(CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +static inline void wlan_hdd_sched_scan_update_relative_rssi( + struct pno_scan_req_params *pno_request, + struct cfg80211_sched_scan_request *request) +{ + pno_request->relative_rssi_set = request->relative_rssi_set; + pno_request->relative_rssi = request->relative_rssi; + if (NL80211_BAND_2GHZ == request->rssi_adjust.band) + pno_request->band_rssi_pref.band = WLAN_BAND_2_4_GHZ; + else if (NL80211_BAND_5GHZ == request->rssi_adjust.band) + pno_request->band_rssi_pref.band = WLAN_BAND_5_GHZ; + pno_request->band_rssi_pref.rssi = request->rssi_adjust.delta; +} +#else +static inline void wlan_hdd_sched_scan_update_relative_rssi( + struct pno_scan_req_params *pno_request, + struct cfg80211_sched_scan_request *request) +{ +} +#endif + +#ifdef FEATURE_WLAN_SCAN_PNO +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static uint32_t wlan_config_sched_scan_start_delay( + struct cfg80211_sched_scan_request *request) +{ + return request->delay; +} +#else +static uint32_t wlan_config_sched_scan_start_delay( + struct cfg80211_sched_scan_request *request) +{ + return 0; +} +#endif /*(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */ +#endif /* FEATURE_WLAN_SCAN_PNO */ + +int wlan_cfg80211_sched_scan_start(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + uint8_t scan_backoff_multiplier) +{ + struct pno_scan_req_params *req; + int i, j, ret = 0; + QDF_STATUS status; + uint8_t num_chan = 0; + uint16_t chan_freq; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_objmgr_psoc *psoc; + uint32_t valid_ch[SCAN_PNO_MAX_NETW_CHANNELS_EX] = {0}; + bool enable_dfs_pno_chnl_scan; + + if (ucfg_scan_get_pno_in_progress(vdev)) { + osif_debug("pno is already in progress"); + return -EBUSY; + } + + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) { + status = wlan_abort_scan(pdev, + wlan_objmgr_pdev_get_pdev_id(pdev), + INVAL_VDEV_ID, INVAL_SCAN_ID, true); + if (QDF_IS_STATUS_ERROR(status)) + return -EBUSY; + } + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) + return -ENOMEM; + + wlan_pdev_obj_lock(pdev); + psoc = wlan_pdev_get_psoc(pdev); + wlan_pdev_obj_unlock(pdev); + + req->networks_cnt = request->n_match_sets; + req->vdev_id = wlan_vdev_get_id(vdev); + + if ((!req->networks_cnt) || + (req->networks_cnt > SCAN_PNO_MAX_SUPP_NETWORKS)) { + osif_err("Network input is not correct %d", + req->networks_cnt); + ret = -EINVAL; + goto error; + } + + if (request->n_channels > SCAN_PNO_MAX_NETW_CHANNELS_EX) { + osif_err("Incorrect number of channels %d", + request->n_channels); + ret = -EINVAL; + goto error; + } + + enable_dfs_pno_chnl_scan = ucfg_scan_is_dfs_chnl_scan_enabled(psoc); + if (request->n_channels) { + uint32_t buff_len; + char *chl; + int len = 0; + bool ap_or_go_present = wlan_cfg80211_is_ap_go_present(psoc); + + buff_len = (request->n_channels * 5) + 1; + chl = qdf_mem_malloc(buff_len); + if (!chl) { + ret = -ENOMEM; + goto error; + } + for (i = 0; i < request->n_channels; i++) { + chan_freq = request->channels[i]->center_freq; + if ((!enable_dfs_pno_chnl_scan) && + (wlan_reg_is_dfs_for_freq(pdev, chan_freq))) { + osif_debug("Dropping DFS channel freq :%d", + chan_freq); + continue; + } + if (wlan_reg_is_dsrc_freq(chan_freq)) + continue; + + if (ap_or_go_present) { + bool ok; + + status = + wlan_cfg80211_is_chan_ok_for_dnbs(psoc, + chan_freq, + &ok); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("DNBS check failed"); + qdf_mem_free(chl); + chl = NULL; + ret = -EINVAL; + goto error; + } + if (!ok) + continue; + } + len += qdf_scnprintf(chl + len, buff_len - len, " %d", chan_freq); + valid_ch[num_chan++] = chan_freq; + } + osif_debug("Channel-List[%d]:%s", num_chan, chl); + qdf_mem_free(chl); + chl = NULL; + /* If all channels are DFS and dropped, + * then ignore the PNO request + */ + if (!num_chan) { + osif_notice("Channel list empty due to filtering of DSRC"); + ret = -EINVAL; + goto error; + } + } + + /* Filling per profile params */ + for (i = 0; i < req->networks_cnt; i++) { + req->networks_list[i].ssid.length = + request->match_sets[i].ssid.ssid_len; + + if ((!req->networks_list[i].ssid.length) || + (req->networks_list[i].ssid.length > WLAN_SSID_MAX_LEN)) { + osif_err(" SSID Len %d is not correct for network %d", + req->networks_list[i].ssid.length, i); + ret = -EINVAL; + goto error; + } + + qdf_mem_copy(req->networks_list[i].ssid.ssid, + request->match_sets[i].ssid.ssid, + req->networks_list[i].ssid.length); + req->networks_list[i].authentication = 0; /*eAUTH_TYPE_ANY */ + req->networks_list[i].encryption = 0; /*eED_ANY */ + req->networks_list[i].bc_new_type = 0; /*eBCAST_UNKNOWN */ + + /*Copying list of valid channel into request */ + qdf_mem_copy(req->networks_list[i].channels, valid_ch, + num_chan * sizeof(uint32_t)); + req->networks_list[i].channel_cnt = num_chan; + req->networks_list[i].rssi_thresh = + request->match_sets[i].rssi_thold; + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (0 == request->n_ssids) + req->do_passive_scan = true; + else + req->do_passive_scan = false; + + for (i = 0; i < request->n_ssids; i++) { + j = 0; + while (j < req->networks_cnt) { + if ((req->networks_list[j].ssid.length == + request->ssids[i].ssid_len) && + (!qdf_mem_cmp(req->networks_list[j].ssid.ssid, + request->ssids[i].ssid, + req->networks_list[j].ssid.length))) { + req->networks_list[j].bc_new_type = + SSID_BC_TYPE_HIDDEN; + break; + } + j++; + } + } + + /* + * Before Kernel 4.4 + * Driver gets only one time interval which is hard coded in + * supplicant for 10000ms. + * + * After Kernel 4.4 + * User can configure multiple scan_plans, each scan would have + * separate scan cycle and interval. (interval is in unit of second.) + * For our use case, we would only have supplicant set one scan_plan, + * and firmware also support only one as well, so pick up the first + * index. + * + * Taking power consumption into account + * firmware after gPNOScanTimerRepeatValue times fast_scan_period + * switches slow_scan_period. This is less frequent scans and firmware + * shall be in slow_scan_period mode until next PNO Start. + */ + wlan_config_sched_scan_plan(psoc, req, request); + req->delay_start_time = wlan_config_sched_scan_start_delay(request); + req->scan_backoff_multiplier = scan_backoff_multiplier; + + wlan_hdd_sched_scan_update_relative_rssi(req, request); + + psoc = wlan_pdev_get_psoc(pdev); + ucfg_scan_register_pno_cb(psoc, + wlan_cfg80211_pno_callback, NULL); + ucfg_scan_get_pno_def_params(vdev, req); + + if (req->scan_random.randomize) + wlan_pno_scan_rand_attr(vdev, request, req); + + if (ucfg_ie_whitelist_enabled(psoc, vdev)) + ucfg_copy_ie_whitelist_attrs(psoc, &req->ie_whitelist); + + osif_debug("Network count %d n_ssids %d fast_scan_period: %d msec slow_scan_period: %d msec, fast_scan_max_cycles: %d, relative_rssi %d band_pref %d, rssi_pref %d", + req->networks_cnt, request->n_ssids, req->fast_scan_period, + req->slow_scan_period, req->fast_scan_max_cycles, + req->relative_rssi, req->band_rssi_pref.band, + req->band_rssi_pref.rssi); + + for (i = 0; i < req->networks_cnt; i++) + osif_debug("[%d] ssid: %.*s, RSSI th %d bc NW type %u", + i, req->networks_list[i].ssid.length, + req->networks_list[i].ssid.ssid, + req->networks_list[i].rssi_thresh, + req->networks_list[i].bc_new_type); + + status = ucfg_scan_pno_start(vdev, req); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Failed to enable PNO"); + ret = -EINVAL; + goto error; + } + +error: + qdf_mem_free(req); + return ret; +} + +int wlan_cfg80211_sched_scan_stop(struct wlan_objmgr_vdev *vdev) +{ + QDF_STATUS status; + + status = ucfg_scan_pno_stop(vdev); + if (QDF_IS_STATUS_ERROR(status)) + osif_debug("Failed to disable PNO"); + + return 0; +} +#endif /*FEATURE_WLAN_SCAN_PNO */ + +/** + * wlan_copy_bssid_scan_request() - API to copy the bssid to Scan request + * @scan_req: Pointer to scan_start_request + * @request: scan request from Supplicant + * + * This API copies the BSSID in scan request from Supplicant and copies it to + * the scan_start_request + * + * Return: None + */ +#if defined(CFG80211_SCAN_BSSID) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) +static inline void +wlan_copy_bssid_scan_request(struct scan_start_request *scan_req, + struct cfg80211_scan_request *request) +{ + qdf_mem_copy(scan_req->scan_req.bssid_list[0].bytes, + request->bssid, QDF_MAC_ADDR_SIZE); +} +#else +static inline void +wlan_copy_bssid_scan_request(struct scan_start_request *scan_req, + struct cfg80211_scan_request *request) +{ + +} +#endif + +/** + * wlan_schedule_scan_start_request() - Schedule scan start request + * @pdev: pointer to pdev object + * @req: Pointer to the scan request + * @source: source of the scan request + * @scan_start_req: pointer to scan start request + * + * Schedule scan start request and enqueue scan request in the global scan + * list. This list stores the active scan request information. + * + * Return: QDF_STATUS + */ +static QDF_STATUS +wlan_schedule_scan_start_request(struct wlan_objmgr_pdev *pdev, + struct cfg80211_scan_request *req, + uint8_t source, + struct scan_start_request *scan_start_req) +{ + struct scan_req *scan_req; + QDF_STATUS status; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *osif_scan; + + scan_req = qdf_mem_malloc(sizeof(*scan_req)); + if (!scan_req) { + ucfg_scm_scan_free_scan_request_mem(scan_start_req); + return QDF_STATUS_E_NOMEM; + } + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + osif_scan = osif_ctx->osif_scan; + scan_req->scan_request = req; + scan_req->source = source; + scan_req->scan_id = scan_start_req->scan_req.scan_id; + scan_req->dev = req->wdev->netdev; + scan_req->scan_start_timestamp = qdf_get_time_of_the_day_ms(); + + qdf_mutex_acquire(&osif_scan->scan_req_q_lock); + if (qdf_list_size(&osif_scan->scan_req_q) < WLAN_MAX_SCAN_COUNT) { + status = ucfg_scan_start(scan_start_req); + if (QDF_IS_STATUS_SUCCESS(status)) { + qdf_list_insert_back(&osif_scan->scan_req_q, + &scan_req->node); + } else { + osif_err("scan req failed with error %d", status); + if (status == QDF_STATUS_E_RESOURCES) + osif_err("HO is in progress.So defer the scan by informing busy"); + } + } else { + ucfg_scm_scan_free_scan_request_mem(scan_start_req); + status = QDF_STATUS_E_RESOURCES; + } + + qdf_mutex_release(&osif_scan->scan_req_q_lock); + if (QDF_IS_STATUS_ERROR(status)) { + osif_rl_debug("Failed to enqueue Scan Req as max scan %d already queued", + qdf_list_size(&osif_scan->scan_req_q)); + qdf_mem_free(scan_req); + } + + return status; +} + +/** + * wlan_scan_request_dequeue() - dequeue scan request + * @nl_ctx: Global HDD context + * @scan_id: scan id + * @req: scan request + * @dev: net device + * @source : returns source of the scan request + * + * Return: QDF_STATUS + */ +static QDF_STATUS wlan_scan_request_dequeue( + struct wlan_objmgr_pdev *pdev, + uint32_t scan_id, struct cfg80211_scan_request **req, + uint8_t *source, struct net_device **dev, + qdf_time_t *scan_start_timestamp) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct scan_req *scan_req; + qdf_list_node_t *node = NULL, *next_node = NULL; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *scan_priv; + + if ((!source) || (!req)) { + osif_err("source or request is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + if (!osif_ctx) { + osif_err("Failed to retrieve osif context"); + return status; + } + scan_priv = osif_ctx->osif_scan; + + qdf_mutex_acquire(&scan_priv->scan_req_q_lock); + if (qdf_list_empty(&scan_priv->scan_req_q)) { + osif_info("Scan List is empty"); + qdf_mutex_release(&scan_priv->scan_req_q_lock); + return QDF_STATUS_E_FAILURE; + } + + if (QDF_STATUS_SUCCESS != + qdf_list_peek_front(&scan_priv->scan_req_q, &next_node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + osif_err("Failed to remove Scan Req from queue"); + return QDF_STATUS_E_FAILURE; + } + + do { + node = next_node; + scan_req = qdf_container_of(node, struct scan_req, node); + if (scan_req->scan_id == scan_id) { + status = qdf_list_remove_node(&scan_priv->scan_req_q, + node); + if (status == QDF_STATUS_SUCCESS) { + *req = scan_req->scan_request; + *source = scan_req->source; + *dev = scan_req->dev; + *scan_start_timestamp = + scan_req->scan_start_timestamp; + qdf_mem_free(scan_req); + qdf_mutex_release(&scan_priv->scan_req_q_lock); + osif_debug("removed Scan id: %d, req = %pK, pending scans %d", + scan_id, req, + qdf_list_size(&scan_priv->scan_req_q)); + return QDF_STATUS_SUCCESS; + } else { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + osif_err("Failed to remove scan id %d, pending scans %d", + scan_id, + qdf_list_size(&scan_priv->scan_req_q)); + return status; + } + } + } while (QDF_STATUS_SUCCESS == + qdf_list_peek_next(&scan_priv->scan_req_q, node, &next_node)); + qdf_mutex_release(&scan_priv->scan_req_q_lock); + osif_debug("Failed to find scan id %d", scan_id); + + return status; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) +/** + * wlan_cfg80211_scan_done() - Scan completed callback to cfg80211 + * @netdev: Net device + * @req : Scan request + * @aborted : true scan aborted false scan success + * + * This function notifies scan done to cfg80211 + * + * Return: none + */ +static void wlan_cfg80211_scan_done(struct net_device *netdev, + struct cfg80211_scan_request *req, + bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted + }; + + if (netdev->flags & IFF_UP) + cfg80211_scan_done(req, &info); +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +/** + * wlan_cfg80211_scan_done() - Scan completed callback to cfg80211 + * @netdev: Net device + * @req : Scan request + * @aborted : true scan aborted false scan success + * + * This function notifies scan done to cfg80211 + * + * Return: none + */ +static void wlan_cfg80211_scan_done(struct net_device *netdev, + struct cfg80211_scan_request *req, + bool aborted) +{ + if (netdev->flags & IFF_UP) + cfg80211_scan_done(req, aborted); +} +#endif + +/** + * wlan_vendor_scan_callback() - Scan completed callback event + * + * @req : Scan request + * @aborted : true scan aborted false scan success + * + * This function sends scan completed callback event to NL. + * + * Return: none + */ +static void wlan_vendor_scan_callback(struct cfg80211_scan_request *req, + bool aborted) +{ + struct sk_buff *skb; + struct nlattr *attr; + int i; + uint8_t scan_status; + uint64_t cookie; + int index = QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE_INDEX; + + skb = wlan_cfg80211_vendor_event_alloc(req->wdev->wiphy, req->wdev, + SCAN_DONE_EVENT_BUF_SIZE + 4 + + NLMSG_HDRLEN, + index, + GFP_ATOMIC); + + if (!skb) { + osif_err("skb alloc failed"); + qdf_mem_free(req); + return; + } + + cookie = (uintptr_t)req; + + attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_SCAN_SSIDS); + if (!attr) + goto nla_put_failure; + for (i = 0; i < req->n_ssids; i++) { + if (nla_put(skb, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_SCAN_FREQUENCIES); + if (!attr) + goto nla_put_failure; + for (i = 0; i < req->n_channels; i++) { + if (nla_put_u32(skb, i, req->channels[i]->center_freq)) + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + if (req->ie && + nla_put(skb, QCA_WLAN_VENDOR_ATTR_SCAN_IE, req->ie_len, + req->ie)) + goto nla_put_failure; + + if (req->flags && + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS, req->flags)) + goto nla_put_failure; + + if (wlan_cfg80211_nla_put_u64(skb, QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE, + cookie)) + goto nla_put_failure; + + scan_status = (aborted == true) ? VENDOR_SCAN_STATUS_ABORTED : + VENDOR_SCAN_STATUS_NEW_RESULTS; + if (nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_SCAN_STATUS, scan_status)) + goto nla_put_failure; + + wlan_cfg80211_vendor_event(skb, GFP_ATOMIC); + qdf_mem_free(req); + + return; + +nla_put_failure: + wlan_cfg80211_vendor_free_skb(skb); + qdf_mem_free(req); +} + +/** + * wlan_scan_acquire_wake_lock_timeout() - acquire scan wake lock + * @psoc: psoc ptr + * @scan_wake_lock: Scan wake lock + * @timeout: timeout in ms + * + * Return: void + */ +static inline +void wlan_scan_acquire_wake_lock_timeout(struct wlan_objmgr_psoc *psoc, + qdf_wake_lock_t *scan_wake_lock, + uint32_t timeout) +{ + if (!psoc || !scan_wake_lock) + return; + + if (ucfg_scan_wake_lock_in_user_scan(psoc)) + qdf_wake_lock_timeout_acquire(scan_wake_lock, timeout); +} + + +/** + * wlan_scan_release_wake_lock() - release scan wake lock + * @psoc: psoc ptr + * @scan_wake_lock: Scan wake lock + * + * Return: void + */ +#ifdef FEATURE_WLAN_DIAG_SUPPORT +static inline +void wlan_scan_release_wake_lock(struct wlan_objmgr_psoc *psoc, + qdf_wake_lock_t *scan_wake_lock) +{ + if (!psoc || !scan_wake_lock) + return; + + if (ucfg_scan_wake_lock_in_user_scan(psoc)) + qdf_wake_lock_release(scan_wake_lock, + WIFI_POWER_EVENT_WAKELOCK_SCAN); +} +#else +static inline +void wlan_scan_release_wake_lock(struct wlan_objmgr_psoc *psoc, + qdf_wake_lock_t *scan_wake_lock) +{ + if (!psoc || !scan_wake_lock) + return; + + if (ucfg_scan_wake_lock_in_user_scan(psoc)) + qdf_wake_lock_release(scan_wake_lock, 0); +} +#endif + +static +uint32_t wlan_scan_get_bss_count_for_scan(struct wlan_objmgr_pdev *pdev, + qdf_time_t scan_start_ts) +{ + struct scan_filter *filter; + qdf_list_t *list = NULL; + uint32_t count = 0; + + if (!scan_start_ts) + return count; + + filter = qdf_mem_malloc(sizeof(*filter)); + if (!filter) + return count; + + filter->ignore_auth_enc_type = true; + filter->age_threshold = qdf_get_time_of_the_day_ms() - scan_start_ts; + + list = ucfg_scan_get_result(pdev, filter); + + qdf_mem_free(filter); + + if (list) { + count = qdf_list_size(list); + ucfg_scan_purge_results(list); + } + + return count; +} + +/** + * wlan_cfg80211_scan_done_callback() - scan done callback function called after + * scan is finished + * @vdev: vdev ptr + * @event: Scan event + * @args: Scan cb arg + * + * Return: void + */ +static void wlan_cfg80211_scan_done_callback( + struct wlan_objmgr_vdev *vdev, + struct scan_event *event, + void *args) +{ + struct cfg80211_scan_request *req = NULL; + bool success = false; + uint32_t scan_id; + uint8_t source = NL_SCAN; + struct wlan_objmgr_pdev *pdev; + struct pdev_osif_priv *osif_priv; + struct net_device *netdev = NULL; + QDF_STATUS status; + qdf_time_t scan_start_timestamp = 0; + uint32_t unique_bss_count = 0; + + if (!event) { + osif_nofl_err("Invalid scan event received"); + return; + } + + scan_id = event->scan_id; + + qdf_mtrace(QDF_MODULE_ID_SCAN, QDF_MODULE_ID_OS_IF, event->type, + event->vdev_id, scan_id); + + if (event->type == SCAN_EVENT_TYPE_STARTED) + osif_nofl_info("scan start scan id %d", scan_id); + + if (!util_is_scan_completed(event, &success)) + return; + + pdev = wlan_vdev_get_pdev(vdev); + status = wlan_scan_request_dequeue( + pdev, scan_id, &req, &source, &netdev, + &scan_start_timestamp); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Dequeue of scan request failed ID: %d", scan_id); + goto allow_suspend; + } + + if (!netdev) { + osif_err("net dev is NULL,Drop scan event Id: %d", scan_id); + goto allow_suspend; + } + + /* Make sure vdev is active */ + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_OSIF_ID); + if (QDF_IS_STATUS_ERROR(status)) { + osif_err("Failed to get vdev reference: scan Id: %d", scan_id); + goto allow_suspend; + } + + /* + * Scan can be triggred from NL or vendor scan + * - If scan is triggered from NL then cfg80211 scan done should be + * called to updated scan completion to NL. + * - If scan is triggred through vendor command then + * scan done event will be posted + */ + if (NL_SCAN == source) + wlan_cfg80211_scan_done(netdev, req, !success); + else + wlan_vendor_scan_callback(req, !success); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_OSIF_ID); + + unique_bss_count = wlan_scan_get_bss_count_for_scan(pdev, + scan_start_timestamp); + osif_nofl_info("vdev %d, scan id %d type %s(%d) reason %s(%d) scan found %d bss", + event->vdev_id, scan_id, + util_scan_get_ev_type_name(event->type), event->type, + util_scan_get_ev_reason_name(event->reason), + event->reason, unique_bss_count); +allow_suspend: + osif_priv = wlan_pdev_get_ospriv(pdev); + qdf_mutex_acquire(&osif_priv->osif_scan->scan_req_q_lock); + if (qdf_list_empty(&osif_priv->osif_scan->scan_req_q)) { + struct wlan_objmgr_psoc *psoc; + + qdf_mutex_release(&osif_priv->osif_scan->scan_req_q_lock); + qdf_runtime_pm_allow_suspend( + &osif_priv->osif_scan->runtime_pm_lock); + + psoc = wlan_pdev_get_psoc(pdev); + wlan_scan_release_wake_lock(psoc, + &osif_priv->osif_scan->scan_wake_lock); + /* + * Acquire wakelock to handle the case where APP's tries + * to suspend immediately after the driver gets connect + * request(i.e after scan) from supplicant, this result in + * app's is suspending and not able to process the connect + * request to AP + */ + wlan_scan_acquire_wake_lock_timeout(psoc, + &osif_priv->osif_scan->scan_wake_lock, + SCAN_WAKE_LOCK_CONNECT_DURATION); + } else { + qdf_mutex_release(&osif_priv->osif_scan->scan_req_q_lock); + } + +} + +QDF_STATUS wlan_scan_runtime_pm_init(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + + wlan_pdev_obj_lock(pdev); + osif_priv = wlan_pdev_get_ospriv(pdev); + wlan_pdev_obj_unlock(pdev); + + scan_priv = osif_priv->osif_scan; + + return qdf_runtime_lock_init(&scan_priv->runtime_pm_lock); +} + +void wlan_scan_runtime_pm_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + + wlan_pdev_obj_lock(pdev); + osif_priv = wlan_pdev_get_ospriv(pdev); + wlan_pdev_obj_unlock(pdev); + + scan_priv = osif_priv->osif_scan; + qdf_runtime_lock_deinit(&scan_priv->runtime_pm_lock); +} + +QDF_STATUS wlan_cfg80211_scan_priv_init(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + struct wlan_objmgr_psoc *psoc; + wlan_scan_requester req_id; + + psoc = wlan_pdev_get_psoc(pdev); + + req_id = ucfg_scan_register_requester(psoc, "CFG", + wlan_cfg80211_scan_done_callback, NULL); + + osif_priv = wlan_pdev_get_ospriv(pdev); + scan_priv = qdf_mem_malloc(sizeof(*scan_priv)); + if (!scan_priv) + return QDF_STATUS_E_NOMEM; + + /* Initialize the scan request queue */ + osif_priv->osif_scan = scan_priv; + scan_priv->req_id = req_id; + qdf_list_create(&scan_priv->scan_req_q, WLAN_MAX_SCAN_COUNT); + qdf_mutex_create(&scan_priv->scan_req_q_lock); + qdf_wake_lock_create(&scan_priv->scan_wake_lock, "scan_wake_lock"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cfg80211_scan_priv_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + osif_priv = wlan_pdev_get_ospriv(pdev); + + wlan_cfg80211_cleanup_scan_queue(pdev, NULL); + scan_priv = osif_priv->osif_scan; + qdf_wake_lock_destroy(&scan_priv->scan_wake_lock); + qdf_mutex_destroy(&scan_priv->scan_req_q_lock); + qdf_list_destroy(&scan_priv->scan_req_q); + ucfg_scan_unregister_requester(psoc, scan_priv->req_id); + osif_priv->osif_scan = NULL; + qdf_mem_free(scan_priv); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_cfg80211_enqueue_for_cleanup() - Function to populate scan cleanup queue + * @scan_cleanup_q: Scan cleanup queue to be populated + * @scan_priv: Pointer to scan related data used by cfg80211 scan + * @dev: Netdevice pointer + * + * The function synchrounously iterates through the global scan queue to + * identify entries that have to be cleaned up, copies identified entries + * to another queue(to send scan complete event to NL later) and removes the + * entry from the global scan queue. + * + * Return: None + */ +static void +wlan_cfg80211_enqueue_for_cleanup(qdf_list_t *scan_cleanup_q, + struct osif_scan_pdev *scan_priv, + struct net_device *dev) +{ + struct scan_req *scan_req, *scan_cleanup; + qdf_list_node_t *node = NULL, *next_node = NULL; + + qdf_mutex_acquire(&scan_priv->scan_req_q_lock); + if (QDF_STATUS_SUCCESS != + qdf_list_peek_front(&scan_priv->scan_req_q, + &node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + return; + } + + while (node) { + /* + * Keep track of the next node, to traverse through the list + * in the event of the current node being deleted. + */ + qdf_list_peek_next(&scan_priv->scan_req_q, + node, &next_node); + scan_req = qdf_container_of(node, struct scan_req, node); + if (!dev || (dev == scan_req->dev)) { + scan_cleanup = qdf_mem_malloc(sizeof(struct scan_req)); + if (!scan_cleanup) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + return; + } + scan_cleanup->scan_request = scan_req->scan_request; + scan_cleanup->scan_id = scan_req->scan_id; + scan_cleanup->source = scan_req->source; + scan_cleanup->dev = scan_req->dev; + qdf_list_insert_back(scan_cleanup_q, + &scan_cleanup->node); + if (QDF_STATUS_SUCCESS != + qdf_list_remove_node(&scan_priv->scan_req_q, + node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + osif_err("Failed to remove scan request"); + return; + } + qdf_mem_free(scan_req); + } + node = next_node; + next_node = NULL; + } + qdf_mutex_release(&scan_priv->scan_req_q_lock); +} + +void wlan_cfg80211_cleanup_scan_queue(struct wlan_objmgr_pdev *pdev, + struct net_device *dev) +{ + struct scan_req *scan_req; + struct cfg80211_scan_request *req; + uint8_t source; + bool aborted = true; + struct pdev_osif_priv *osif_priv; + qdf_list_t scan_cleanup_q; + qdf_list_node_t *node = NULL; + + if (!pdev) { + osif_err("pdev is Null"); + return; + } + + osif_priv = wlan_pdev_get_ospriv(pdev); + + /* + * To avoid any race conditions, create a local list to copy all the + * scan entries to be removed and then send scan complete for each of + * the identified entries to NL. + */ + qdf_list_create(&scan_cleanup_q, WLAN_MAX_SCAN_COUNT); + wlan_cfg80211_enqueue_for_cleanup(&scan_cleanup_q, + osif_priv->osif_scan, dev); + + while (!qdf_list_empty(&scan_cleanup_q)) { + if (QDF_STATUS_SUCCESS != qdf_list_remove_front(&scan_cleanup_q, + &node)) { + osif_err("Failed to remove scan request"); + return; + } + scan_req = container_of(node, struct scan_req, node); + req = scan_req->scan_request; + source = scan_req->source; + if (NL_SCAN == source) + wlan_cfg80211_scan_done(scan_req->dev, req, + aborted); + else + wlan_vendor_scan_callback(req, aborted); + + qdf_mem_free(scan_req); + } + qdf_list_destroy(&scan_cleanup_q); + + return; +} + +/** + * wlan_cfg80211_update_scan_policy_type_flags() - Set scan flags according to + * scan request + * @scan_req: Pointer to csr scan req + * + * Return: None + */ +#if defined(CFG80211_SCAN_DBS_CONTROL_SUPPORT) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) +static void wlan_cfg80211_update_scan_policy_type_flags( + struct cfg80211_scan_request *req, + struct scan_req_params *scan_req) +{ + if (req->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) + scan_req->scan_policy_high_accuracy = true; + if (req->flags & NL80211_SCAN_FLAG_LOW_SPAN) + scan_req->scan_policy_low_span = true; + if (req->flags & NL80211_SCAN_FLAG_LOW_POWER) + scan_req->scan_policy_low_power = true; +} +#else +static inline void wlan_cfg80211_update_scan_policy_type_flags( + struct cfg80211_scan_request *req, + struct scan_req_params *scan_req) +{ +} +#endif + +#ifdef WLAN_POLICY_MGR_ENABLE +static bool +wlan_cfg80211_allow_simultaneous_scan(struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_is_scan_simultaneous_capable(psoc); +} +#else +static bool +wlan_cfg80211_allow_simultaneous_scan(struct wlan_objmgr_psoc *psoc) +{ + return true; +} +#endif + +int wlan_cfg80211_scan(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_params *params) +{ + struct scan_start_request *req; + struct wlan_ssid *pssid; + uint8_t i; + int ret = 0; + uint8_t num_chan = 0; + uint32_t c_freq; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + wlan_scan_requester req_id; + struct pdev_osif_priv *osif_priv; + struct wlan_objmgr_psoc *psoc; + wlan_scan_id scan_id; + bool is_p2p_scan = false; + enum wlan_band band; + QDF_STATUS qdf_status; + enum QDF_OPMODE opmode; + uint32_t extra_ie_len = 0; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + osif_err("Invalid psoc object"); + return -EINVAL; + } + opmode = wlan_vdev_mlme_get_opmode(vdev); + + osif_debug("%s(vdev%d): mode %d", request->wdev->netdev->name, + wlan_vdev_get_id(vdev), opmode); + + /* Get NL global context from objmgr*/ + osif_priv = wlan_pdev_get_ospriv(pdev); + if (!osif_priv) { + osif_err("Invalid osif priv object"); + return -EINVAL; + } + + /* + * For a non-SAP vdevs, if a scan is already going on i.e the scan queue + * is not empty, and the simultaneous scan is disabled, dont allow 2nd + * scan. + */ + qdf_mutex_acquire(&osif_priv->osif_scan->scan_req_q_lock); + if (!wlan_cfg80211_allow_simultaneous_scan(psoc) && + !qdf_list_empty(&osif_priv->osif_scan->scan_req_q) && + opmode != QDF_SAP_MODE) { + qdf_mutex_release(&osif_priv->osif_scan->scan_req_q_lock); + osif_err("Simultaneous scan disabled, reject scan"); + return -EBUSY; + } + qdf_mutex_release(&osif_priv->osif_scan->scan_req_q_lock); + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) + return -EINVAL; + + /* Initialize the scan global params */ + ucfg_scan_init_default_params(vdev, req); + + req_id = osif_priv->osif_scan->req_id; + scan_id = ucfg_scan_get_scan_id(psoc); + if (!scan_id) { + osif_err("Invalid scan id"); + qdf_mem_free(req); + return -EINVAL; + } + + /* fill the scan request structure */ + req->vdev = vdev; + req->scan_req.vdev_id = wlan_vdev_get_id(vdev); + req->scan_req.scan_id = scan_id; + req->scan_req.scan_req_id = req_id; + + /* Update scan policy type flags according to cfg scan request */ + wlan_cfg80211_update_scan_policy_type_flags(request, + &req->scan_req); + /* + * Even though supplicant doesn't provide any SSIDs, n_ssids is + * set to 1. Because of this, driver is assuming that this is not + * wildcard scan and so is not aging out the scan results. + */ + if ((request->ssids) && (request->n_ssids == 1) && + ('\0' == request->ssids->ssid[0])) { + request->n_ssids = 0; + } + + if ((request->ssids) && (0 < request->n_ssids)) { + int j; + req->scan_req.num_ssids = request->n_ssids; + + if (req->scan_req.num_ssids > WLAN_SCAN_MAX_NUM_SSID) { + osif_info("number of ssid %d greater than MAX %d", + req->scan_req.num_ssids, + WLAN_SCAN_MAX_NUM_SSID); + req->scan_req.num_ssids = WLAN_SCAN_MAX_NUM_SSID; + } + /* copy all the ssid's and their length */ + for (j = 0; j < req->scan_req.num_ssids; j++) { + pssid = &req->scan_req.ssid[j]; + /* get the ssid length */ + pssid->length = request->ssids[j].ssid_len; + if (pssid->length > WLAN_SSID_MAX_LEN) + pssid->length = WLAN_SSID_MAX_LEN; + qdf_mem_copy(pssid->ssid, + &request->ssids[j].ssid[0], + pssid->length); + } + } + if (request->ssids || + (opmode == QDF_P2P_GO_MODE) || (opmode == QDF_P2P_DEVICE_MODE)) + req->scan_req.scan_f_passive = false; + + if (params->half_rate) + req->scan_req.scan_f_half_rate = true; + else if (params->quarter_rate) + req->scan_req.scan_f_quarter_rate = true; + + if (params->strict_pscan) + req->scan_req.scan_f_strict_passive_pch = true; + + if ((request->n_ssids == 1) && request->ssids && + !qdf_mem_cmp(&request->ssids[0], "DIRECT-", 7)) + is_p2p_scan = true; + + if (is_p2p_scan && request->no_cck) + req->scan_req.scan_type = SCAN_TYPE_P2P_SEARCH; + + if (params->dwell_time_active) + req->scan_req.dwell_time_active = params->dwell_time_active; + + if (params->dwell_time_active_2g) + req->scan_req.dwell_time_active_2g = + params->dwell_time_active_2g; + + if (params->dwell_time_passive) + req->scan_req.dwell_time_passive = params->dwell_time_passive; + + if (params->dwell_time_active_6g) + req->scan_req.dwell_time_active_6g = + params->dwell_time_active_6g; + + if (params->dwell_time_passive_6g) + req->scan_req.dwell_time_passive_6g = + params->dwell_time_passive_6g; + + /* Set dwell time mode according to scan policy type flags */ + if (ucfg_scan_cfg_honour_nl_scan_policy_flags(psoc)) { + if (req->scan_req.scan_policy_high_accuracy) + req->scan_req.adaptive_dwell_time_mode = + SCAN_DWELL_MODE_STATIC; + if (req->scan_req.scan_policy_low_power || + req->scan_req.scan_policy_low_span) + req->scan_req.adaptive_dwell_time_mode = + SCAN_DWELL_MODE_AGGRESSIVE; + } + + /* + * FW require at least 1 MAC to send probe request. + * If MAC is all 0 set it to BC addr as this is the address on + * which fw will send probe req. + */ + req->scan_req.num_bssid = 1; + wlan_copy_bssid_scan_request(req, request); + if (qdf_is_macaddr_zero(&req->scan_req.bssid_list[0])) + qdf_set_macaddr_broadcast(&req->scan_req.bssid_list[0]); + + if (request->n_channels) { +#ifdef WLAN_POLICY_MGR_ENABLE + bool ap_or_go_present = + policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL) || + policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL); +#endif + for (i = 0; i < request->n_channels; i++) { + c_freq = request->channels[i]->center_freq; + if (wlan_reg_is_dsrc_freq(c_freq)) + continue; +#ifdef WLAN_POLICY_MGR_ENABLE + if (ap_or_go_present) { + bool ok; + + qdf_status = policy_mgr_is_chan_ok_for_dnbs( + psoc, c_freq, &ok); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + osif_err("DNBS check failed"); + ret = -EINVAL; + goto err; + } + if (!ok) + continue; + } +#endif + req->scan_req.chan_list.chan[num_chan].freq = c_freq; + band = util_scan_scm_freq_to_band(c_freq); + if (band == WLAN_BAND_2_4_GHZ) + req->scan_req.chan_list.chan[num_chan].phymode = + SCAN_PHY_MODE_11G; + else + req->scan_req.chan_list.chan[num_chan].phymode = + SCAN_PHY_MODE_11A; + num_chan++; + if (num_chan >= NUM_CHANNELS) + break; + } + } + if (!num_chan) { + osif_err("Received zero non-dsrc channels"); + ret = -EINVAL; + goto err; + } + req->scan_req.chan_list.num_chan = num_chan; + + /* P2P increase the scan priority */ + if (is_p2p_scan) + req->scan_req.scan_priority = SCAN_PRIORITY_HIGH; + + if (params->priority != SCAN_PRIORITY_COUNT) + req->scan_req.scan_priority = params->priority; + + if (request->ie_len) + extra_ie_len = request->ie_len; + else if (params->default_ie.ptr && params->default_ie.len) + extra_ie_len = params->default_ie.len; + + if (params->vendor_ie.ptr && params->vendor_ie.len) + extra_ie_len += params->vendor_ie.len; + + if (extra_ie_len) { + req->scan_req.extraie.ptr = qdf_mem_malloc(extra_ie_len); + if (!req->scan_req.extraie.ptr) { + ret = -ENOMEM; + goto err; + } + } + + if (request->ie_len) { + req->scan_req.extraie.len = request->ie_len; + qdf_mem_copy(req->scan_req.extraie.ptr, request->ie, + request->ie_len); + } else if (params->default_ie.ptr && params->default_ie.len) { + req->scan_req.extraie.len = params->default_ie.len; + qdf_mem_copy(req->scan_req.extraie.ptr, params->default_ie.ptr, + params->default_ie.len); + } + + if (params->vendor_ie.ptr && params->vendor_ie.len) { + qdf_mem_copy((req->scan_req.extraie.ptr + + req->scan_req.extraie.len), + params->vendor_ie.ptr, params->vendor_ie.len); + + req->scan_req.extraie.len += params->vendor_ie.len; + } + + if (!is_p2p_scan) { + if (req->scan_req.scan_random.randomize) + wlan_scan_rand_attrs(vdev, request, req); + if (ucfg_ie_whitelist_enabled(psoc, vdev) && + ucfg_copy_ie_whitelist_attrs(psoc, + &req->scan_req.ie_whitelist)) + req->scan_req.scan_f_en_ie_whitelist_in_probe = true; + } + + if (request->flags & NL80211_SCAN_FLAG_FLUSH) + ucfg_scan_flush_results(pdev, NULL); + + /* + * Acquire wakelock to handle the case where APP's send scan to connect. + * If suspend is received during scan scan will be aborted and APP will + * not get scan result and not connect. eg if PNO is implemented in + * framework. + */ + wlan_scan_acquire_wake_lock_timeout(psoc, + &osif_priv->osif_scan->scan_wake_lock, + SCAN_WAKE_LOCK_SCAN_DURATION); + + qdf_runtime_pm_prevent_suspend( + &osif_priv->osif_scan->runtime_pm_lock); + + qdf_status = wlan_schedule_scan_start_request(pdev, request, + params->source, req); + if (QDF_IS_STATUS_ERROR(qdf_status)) { + qdf_mutex_acquire(&osif_priv->osif_scan->scan_req_q_lock); + if (qdf_list_empty(&osif_priv->osif_scan->scan_req_q)) { + qdf_mutex_release( + &osif_priv->osif_scan->scan_req_q_lock); + qdf_runtime_pm_allow_suspend( + &osif_priv->osif_scan->runtime_pm_lock); + wlan_scan_release_wake_lock( + psoc, + &osif_priv->osif_scan->scan_wake_lock); + } else { + qdf_mutex_release( + &osif_priv->osif_scan->scan_req_q_lock); + } + } + + return qdf_status_to_os_return(qdf_status); + +err: + qdf_mem_free(req); + return ret; +} + +/** + * wlan_get_scanid() - API to get the scan id + * from the scan cookie attribute. + * @pdev: Pointer to pdev object + * @scan_id: Pointer to scan id + * @cookie : Scan cookie attribute + * + * API to get the scan id from the scan cookie attribute + * sent from supplicant by matching scan request. + * + * Return: 0 for success, non zero for failure + */ +static int wlan_get_scanid(struct wlan_objmgr_pdev *pdev, + uint32_t *scan_id, uint64_t cookie) +{ + struct scan_req *scan_req; + qdf_list_node_t *node = NULL; + qdf_list_node_t *ptr_node = NULL; + int ret = -EINVAL; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *scan_priv; + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + if (!osif_ctx) { + osif_err("Failed to retrieve osif context"); + return ret; + } + scan_priv = osif_ctx->osif_scan; + qdf_mutex_acquire(&scan_priv->scan_req_q_lock); + if (qdf_list_empty(&scan_priv->scan_req_q)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + osif_err("Failed to retrieve scan id"); + return ret; + } + + if (QDF_STATUS_SUCCESS != + qdf_list_peek_front(&scan_priv->scan_req_q, + &ptr_node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + return ret; + } + + do { + node = ptr_node; + scan_req = qdf_container_of(node, struct scan_req, node); + if (cookie == + (uintptr_t)(scan_req->scan_request)) { + *scan_id = scan_req->scan_id; + ret = 0; + break; + } + } while (QDF_STATUS_SUCCESS == + qdf_list_peek_next(&scan_priv->scan_req_q, + node, &ptr_node)); + + qdf_mutex_release(&scan_priv->scan_req_q_lock); + + return ret; +} + +QDF_STATUS wlan_abort_scan(struct wlan_objmgr_pdev *pdev, + uint32_t pdev_id, uint32_t vdev_id, + wlan_scan_id scan_id, bool sync) +{ + struct scan_cancel_request *req; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *scan_priv; + QDF_STATUS status; + struct wlan_objmgr_vdev *vdev; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) + return QDF_STATUS_E_NOMEM; + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + if (!osif_ctx) { + osif_err("Failed to retrieve osif context"); + qdf_mem_free(req); + return QDF_STATUS_E_FAILURE; + } + if (vdev_id == INVAL_VDEV_ID) + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_OSIF_ID); + else + vdev = wlan_objmgr_get_vdev_by_id_from_pdev(pdev, + vdev_id, WLAN_OSIF_ID); + + if (!vdev) { + qdf_mem_free(req); + return QDF_STATUS_E_INVAL; + } + scan_priv = osif_ctx->osif_scan; + req->cancel_req.requester = scan_priv->req_id; + req->vdev = vdev; + req->cancel_req.scan_id = scan_id; + req->cancel_req.pdev_id = pdev_id; + req->cancel_req.vdev_id = vdev_id; + if (scan_id != INVAL_SCAN_ID && scan_id != CANCEL_HOST_SCAN_ID) + req->cancel_req.req_type = WLAN_SCAN_CANCEL_SINGLE; + else if (scan_id == CANCEL_HOST_SCAN_ID) + req->cancel_req.req_type = WLAN_SCAN_CANCEL_HOST_VDEV_ALL; + else if (vdev_id == INVAL_VDEV_ID) + req->cancel_req.req_type = WLAN_SCAN_CANCEL_PDEV_ALL; + else + req->cancel_req.req_type = WLAN_SCAN_CANCEL_VDEV_ALL; + + osif_debug("Type %d Vdev %d pdev %d scan id %d sync %d", + req->cancel_req.req_type, req->cancel_req.vdev_id, + req->cancel_req.pdev_id, req->cancel_req.scan_id, sync); + + if (sync) + status = ucfg_scan_cancel_sync(req); + else + status = ucfg_scan_cancel(req); + if (QDF_IS_STATUS_ERROR(status)) + osif_err("Cancel scan request failed"); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_OSIF_ID); + + return status; +} + +qdf_export_symbol(wlan_abort_scan); + +int wlan_cfg80211_abort_scan(struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) + wlan_abort_scan(pdev, pdev_id, + INVAL_VDEV_ID, INVAL_SCAN_ID, true); + + return 0; +} + +int wlan_vendor_abort_scan(struct wlan_objmgr_pdev *pdev, + const void *data, int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SCAN_MAX + 1]; + int ret = -EINVAL; + wlan_scan_id scan_id; + uint64_t cookie; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (wlan_cfg80211_nla_parse(tb, QCA_WLAN_VENDOR_ATTR_SCAN_MAX, data, + data_len, scan_policy)) { + osif_err("Invalid ATTR"); + return ret; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE]) { + cookie = nla_get_u64( + tb[QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE]); + ret = wlan_get_scanid(pdev, &scan_id, cookie); + if (ret != 0) + return ret; + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) + wlan_abort_scan(pdev, INVAL_PDEV_ID, + INVAL_VDEV_ID, scan_id, true); + } + return 0; +} + +static inline struct ieee80211_channel * +wlan_get_ieee80211_channel(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + int chan_freq) +{ + struct ieee80211_channel *chan; + + chan = ieee80211_get_channel(wiphy, chan_freq); + if (!chan) + osif_err("chan is NULL, freq: %d", chan_freq); + + return chan; +} + +#ifdef WLAN_ENABLE_AGEIE_ON_SCAN_RESULTS +static inline int wlan_get_frame_len(struct scan_cache_entry *scan_params) +{ + return util_scan_entry_frame_len(scan_params) + sizeof(qcom_ie_age); +} + +static inline void wlan_add_age_ie(uint8_t *mgmt_frame, + struct scan_cache_entry *scan_params) +{ + qcom_ie_age *qie_age = NULL; + + /* GPS Requirement: need age ie per entry. Using vendor specific. */ + /* Assuming this is the last IE, copy at the end */ + qie_age = (qcom_ie_age *) (mgmt_frame + + util_scan_entry_frame_len(scan_params)); + qie_age->element_id = QCOM_VENDOR_IE_ID; + qie_age->len = QCOM_VENDOR_IE_AGE_LEN; + qie_age->oui_1 = QCOM_OUI1; + qie_age->oui_2 = QCOM_OUI2; + qie_age->oui_3 = QCOM_OUI3; + qie_age->type = QCOM_VENDOR_IE_AGE_TYPE; + /* + * Lowi expects the timestamp of bss in units of 1/10 ms. In driver + * all bss related timestamp is in units of ms. Due to this when scan + * results are sent to lowi the scan age is high.To address this, + * send age in units of 1/10 ms. + */ + qie_age->age = + (uint32_t)(qdf_mc_timer_get_system_time() - + scan_params->scan_entry_time)/10; + qie_age->tsf_delta = scan_params->tsf_delta; + memcpy(&qie_age->beacon_tsf, scan_params->tsf_info.data, + sizeof(qie_age->beacon_tsf)); + memcpy(&qie_age->seq_ctrl, &scan_params->seq_num, + sizeof(qie_age->seq_ctrl)); +} +#else +static inline int wlan_get_frame_len(struct scan_cache_entry *scan_params) +{ + return util_scan_entry_frame_len(scan_params); +} + +static inline void wlan_add_age_ie(uint8_t *mgmt_frame, + struct scan_cache_entry *scan_params) +{ +} +#endif /* WLAN_ENABLE_AGEIE_ON_SCAN_RESULTS */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + defined(CFG80211_INFORM_BSS_FRAME_DATA) +/** + * wlan_fill_per_chain_rssi() - fill per chain RSSI in inform bss + * @data: bss data + * @per_chain_snr: per chain RSSI + * + * Return: void + */ +#if defined(CFG80211_SCAN_PER_CHAIN_RSSI_SUPPORT) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) +static void wlan_fill_per_chain_rssi(struct cfg80211_inform_bss *data, + struct wlan_cfg80211_inform_bss *bss) +{ + + uint32_t i; + + if (!bss || !data) { + osif_err("Received bss is NULL"); + return; + } + for (i = 0; i < WLAN_MGMT_TXRX_HOST_MAX_ANTENNA; i++) { + if (!bss->per_chain_rssi[i] || + (bss->per_chain_rssi[i] == WLAN_INVALID_PER_CHAIN_RSSI)) + continue; + data->chain_signal[i] = bss->per_chain_rssi[i]; + data->chains |= BIT(i); + } +} +#else +static inline void +wlan_fill_per_chain_rssi(struct cfg80211_inform_bss *data, + struct wlan_cfg80211_inform_bss *bss) +{ +} +#endif + +struct cfg80211_bss * +wlan_cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct wlan_cfg80211_inform_bss *bss) +{ + struct cfg80211_inform_bss data = {0}; + + if (!bss) { + osif_err("bss is null"); + return NULL; + } + wlan_fill_per_chain_rssi(&data, bss); + + data.chan = bss->chan; + data.boottime_ns = bss->boottime_ns; + data.signal = bss->rssi; + return cfg80211_inform_bss_frame_data(wiphy, &data, bss->mgmt, + bss->frame_len, GFP_ATOMIC); +} +#else +struct cfg80211_bss * +wlan_cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct wlan_cfg80211_inform_bss *bss) + +{ + return cfg80211_inform_bss_frame(wiphy, bss->chan, bss->mgmt, + bss->frame_len, + bss->rssi, GFP_ATOMIC); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) +static inline void wlan_cfg80211_put_bss(struct wiphy *wiphy, + struct cfg80211_bss *bss) +{ + cfg80211_put_bss(wiphy, bss); +} +#else +static inline void wlan_cfg80211_put_bss(struct wiphy *wiphy, + struct cfg80211_bss *bss) +{ + cfg80211_put_bss(bss); +} +#endif + +void wlan_cfg80211_inform_bss_frame(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params) +{ + struct pdev_osif_priv *pdev_ospriv = wlan_pdev_get_ospriv(pdev); + struct wiphy *wiphy; + struct cfg80211_bss *bss = NULL; + struct wlan_cfg80211_inform_bss bss_data = {0}; + + if (!pdev_ospriv) { + osif_err("os_priv is NULL"); + return; + } + + wiphy = pdev_ospriv->wiphy; + + bss_data.frame_len = wlan_get_frame_len(scan_params); + bss_data.mgmt = qdf_mem_malloc_atomic(bss_data.frame_len); + if (!bss_data.mgmt) { + osif_err("bss mem alloc failed for seq %d", + scan_params->seq_num); + return; + } + qdf_mem_copy(bss_data.mgmt, + util_scan_entry_frame_ptr(scan_params), + util_scan_entry_frame_len(scan_params)); + /* + * Android does not want the timestamp from the frame. + * Instead it wants a monotonic increasing value + */ + bss_data.mgmt->u.probe_resp.timestamp = qdf_get_monotonic_boottime(); + wlan_add_age_ie((uint8_t *)bss_data.mgmt, scan_params); + /* + * Based on .ini configuration, raw rssi can be reported for bss. + * Raw rssi is typically used for estimating power. + */ + bss_data.rssi = scan_params->rssi_raw; + + bss_data.chan = wlan_get_ieee80211_channel(wiphy, pdev, + scan_params->channel.chan_freq); + if (!bss_data.chan) { + osif_err("Channel not found for bss "QDF_MAC_ADDR_FMT" seq %d chan_freq %d", + QDF_MAC_ADDR_REF(bss_data.mgmt->bssid), + scan_params->seq_num, + scan_params->channel.chan_freq); + qdf_mem_free(bss_data.mgmt); + return; + } + + /* + * Supplicant takes the signal strength in terms of + * mBm (1 dBm = 100 mBm). + */ + bss_data.rssi = QDF_MIN(bss_data.rssi, 0) * 100; + + bss_data.boottime_ns = scan_params->boottime_ns; + + qdf_mem_copy(bss_data.per_chain_rssi, scan_params->per_chain_rssi, + WLAN_MGMT_TXRX_HOST_MAX_ANTENNA); + + bss = wlan_cfg80211_inform_bss_frame_data(wiphy, &bss_data); + if (!bss) + osif_err("failed to inform bss "QDF_MAC_ADDR_FMT" seq %d", + QDF_MAC_ADDR_REF(bss_data.mgmt->bssid), + scan_params->seq_num); + else + wlan_cfg80211_put_bss(wiphy, bss); + + qdf_mem_free(bss_data.mgmt); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) && \ + !defined(WITH_BACKPORTS) && !defined(IEEE80211_PRIVACY) +struct cfg80211_bss *wlan_cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *ssid, + size_t ssid_len) +{ + return cfg80211_get_bss(wiphy, channel, bssid, + ssid, ssid_len, + WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); +} +#else +struct cfg80211_bss *wlan_cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *ssid, + size_t ssid_len) +{ + return cfg80211_get_bss(wiphy, channel, bssid, + ssid, ssid_len, + IEEE80211_BSS_TYPE_ESS, + IEEE80211_PRIVACY_ANY); +} +#endif + +void __wlan_cfg80211_unlink_bss_list(struct wiphy *wiphy, uint8_t *bssid, + uint8_t *ssid, uint8_t ssid_len) +{ + struct cfg80211_bss *bss = NULL; + + bss = wlan_cfg80211_get_bss(wiphy, NULL, bssid, + ssid, ssid_len); + if (!bss) { + osif_info("BSS "QDF_MAC_ADDR_FMT" not found", + QDF_MAC_ADDR_REF(bssid)); + } else { + osif_debug("unlink entry for ssid:%.*s and BSSID "QDF_MAC_ADDR_FMT, + ssid_len, ssid, QDF_MAC_ADDR_REF(bssid)); + cfg80211_unlink_bss(wiphy, bss); + wlan_cfg80211_put_bss(wiphy, bss); + } + + /* + * Kernel creates separate entries into it's bss list for probe resp + * and beacon for hidden AP. Both have separate ref count and thus + * deleting one will not delete other entry. + * If beacon entry of the hidden AP is not deleted and AP switch to + * broadcasting SSID from Hiding SSID, kernel will reject the beacon + * entry. So unlink the hidden beacon entry (if present) as well from + * kernel, to avoid such issue. + */ + bss = wlan_cfg80211_get_bss(wiphy, NULL, bssid, NULL, 0); + if (!bss) { + osif_debug("Hidden bss not found for Ssid:%.*s BSSID: "QDF_MAC_ADDR_FMT" sid_len %d", + ssid_len, ssid, QDF_MAC_ADDR_REF(bssid), ssid_len); + } else { + osif_debug("unlink entry for Hidden ssid:%.*s and BSSID "QDF_MAC_ADDR_FMT, + ssid_len, ssid, QDF_MAC_ADDR_REF(bssid)); + + cfg80211_unlink_bss(wiphy, bss); + /* cfg80211_get_bss get bss with ref count so release it */ + wlan_cfg80211_put_bss(wiphy, bss); + } +} +void wlan_cfg80211_unlink_bss_list(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry) +{ + struct pdev_osif_priv *pdev_ospriv = wlan_pdev_get_ospriv(pdev); + struct wiphy *wiphy; + + if (!pdev_ospriv) { + osif_err("os_priv is NULL"); + return; + } + + wiphy = pdev_ospriv->wiphy; + + __wlan_cfg80211_unlink_bss_list(wiphy, scan_entry->bssid.bytes, + scan_entry->ssid.ssid, + scan_entry->ssid.length); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +/* + * wlan_scan_wiphy_set_max_sched_scans() - set maximum number of scheduled scans + * to wiphy. + * @wiphy: pointer to wiphy + * @max_scans: max num scans to be configured + * + */ +static inline void +wlan_scan_wiphy_set_max_sched_scans(struct wiphy *wiphy, uint8_t max_scans) +{ + if (max_scans == 0) + wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + else + wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; +} +#else +static inline void +wlan_scan_wiphy_set_max_sched_scans(struct wiphy *wiphy, uint8_t max_scans) +{ + wiphy->max_sched_scan_reqs = max_scans; +} +#endif /* KERNEL_VERSION(4, 12, 0) */ + +#if defined(CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +void wlan_scan_cfg80211_add_connected_pno_support(struct wiphy *wiphy) +{ + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI); +} +#endif + +#if ((LINUX_VERSION_CODE > KERNEL_VERSION(4, 4, 0)) || \ + defined(CFG80211_MULTI_SCAN_PLAN_BACKPORT)) && \ + defined(FEATURE_WLAN_SCAN_PNO) +void wlan_config_sched_scan_plans_to_wiphy(struct wiphy *wiphy, + struct wlan_objmgr_psoc *psoc) +{ + if (ucfg_scan_get_pno_scan_support(psoc)) { + wlan_scan_wiphy_set_max_sched_scans(wiphy, 1); + wiphy->max_sched_scan_ssids = SCAN_PNO_MAX_SUPP_NETWORKS; + wiphy->max_match_sets = SCAN_PNO_MAX_SUPP_NETWORKS; + wiphy->max_sched_scan_ie_len = SCAN_MAX_IE_LENGTH; + wiphy->max_sched_scan_plans = SCAN_PNO_MAX_PLAN_REQUEST; + + wiphy->max_sched_scan_plan_interval = + ucfg_scan_get_max_sched_scan_plan_interval(psoc); + + wiphy->max_sched_scan_plan_iterations = + ucfg_scan_get_max_sched_scan_plan_iterations(psoc); + } +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/os_if_spectral_netlink.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/os_if_spectral_netlink.h new file mode 100644 index 0000000000000000000000000000000000000000..9028654621d5ec13da0239dc6de44451769b9325 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/os_if_spectral_netlink.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2011, 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _OS_IF_SPECTRAL_NETLINK_H +#define _OS_IF_SPECTRAL_NETLINK_H + +#include +#include +#include +#include +#include + +/* NETLINK related declarations */ +#if (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +void os_if_spectral_nl_data_ready(struct sock *sk, int len); +#else +void os_if_spectral_nl_data_ready(struct sk_buff *skb); +#endif /* VERSION CHECK */ + +#ifndef SPECTRAL_NETLINK +#define SPECTRAL_NETLINK (NETLINK_GENERIC + 1) +#endif +#define MAX_SPECTRAL_PAYLOAD (2004) + +/* Init's network namespace */ +extern struct net init_net; + +/** + * os_if_spectral_netlink_init() - Initialize Spectral Netlink data structures + * and register the NL handlers with Spectral target_if + * @pdev: Pointer to pdev + * + * Preparing socket buffer and sending Netlink messages to application layer are + * defined in os_if layer, they need to be registered with Spectral target_if + * + * Return: None + */ +#ifdef WLAN_CONV_SPECTRAL_ENABLE +void os_if_spectral_netlink_init(struct wlan_objmgr_pdev *pdev); +/** + * os_if_spectral_prep_skb() - Prepare socket buffer + * @pdev : Pointer to pdev + * @smsg_type: Spectral scan message type + * @buf_type: Spectral report buffer type + * + * Prepare socket buffer to send the data to application layer + * + * Return: NLMSG_DATA of the created skb or NULL if no memory + */ +void *os_if_spectral_prep_skb(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type, + enum spectral_msg_buf_type buf_type); + +/** + * os_if_spectral_netlink_deinit() - De-initialize Spectral Netlink data + * structures and de-register the NL handlers from Spectral target_if + * @pdev: Pointer to pdev + * + * Return: None + */ +void os_if_spectral_netlink_deinit(struct wlan_objmgr_pdev *pdev); +#else + +static inline void os_if_spectral_netlink_init(struct wlan_objmgr_pdev *pdev) +{ +} + +static inline void os_if_spectral_netlink_deinit(struct wlan_objmgr_pdev *pdev) +{ +} + +#endif +#endif /* _OS_IF_SPECTRAL_NETLINK_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/wlan_cfg80211_spectral.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/wlan_cfg80211_spectral.h new file mode 100644 index 0000000000000000000000000000000000000000..ec5081c1ea50be9e9d4f03aa2dbba216a77bb983 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/wlan_cfg80211_spectral.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_SPECTRAL_H_ +#define _WLAN_CFG80211_SPECTRAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CONFIG_REQUESTED(type) ((type == \ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG) || \ + (type == QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_CONFIG)) + +#define SCAN_REQUESTED(type) ((type == \ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG) || \ + (type == QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN)) + +/** + * wlan_cfg80211_register_spectral_cmd_handler() - Registration api for spectral + * @pdev: Pointer to pdev + * @idx: Index in function table + * @handler: Pointer to handler + * + * Return: 0 on success, negative value on failure + */ +void wlan_cfg80211_register_spectral_cmd_handler(struct wlan_objmgr_pdev *pdev, + int idx, + void *handler); + +/** + * wlan_cfg80211_spectral_scan_config_and_start() - Start spectral scan + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_config_and_start(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_stop() - Stop spectral scan + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_stop(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_config() - Get spectral scan config + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_config(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_cap() - Get spectral system capabilities + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_cap(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_diag_stats() - Get spectral diag stats + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_diag_stats(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_status() - Get spectral scan status + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_status(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_dma_debug_config() - configure DMA debug + * @pdev: Pointer to pdev + * @tb: Pointer to Spectral Scan config attribute + * @sscan_mode: Spectral scan mode + * + * Return QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS wlan_cfg80211_spectral_scan_dma_debug_config( + struct wlan_objmgr_pdev *pdev, + struct nlattr **tb, + enum spectral_scan_mode sscan_mode); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/os_if_spectral_netlink.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/os_if_spectral_netlink.c new file mode 100644 index 0000000000000000000000000000000000000000..748a078e4d81599982be83c431da9fc9de986097 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/os_if_spectral_netlink.c @@ -0,0 +1,627 @@ +/* + * Copyright (c) 2011, 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#ifdef CNSS_GENL +#include +#endif +#include + +/** + * os_if_spectral_remove_nbuf_debug_entry() - Remove nbuf from nbuf debug table + * @nbuf - nbuf to remove from the nbuf debug table + * + * Remove nbuf from the nbuf debug hash table and decrement the nbuf count + * + * Return: None + */ +static inline void os_if_spectral_remove_nbuf_debug_entry(qdf_nbuf_t nbuf) +{ + qdf_nbuf_count_dec(nbuf); + qdf_net_buf_debug_release_skb(nbuf); +} + +#ifndef CNSS_GENL +static struct sock *os_if_spectral_nl_sock; +static atomic_t spectral_nl_users = ATOMIC_INIT(0); +#endif + +#if (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +void +os_if_spectral_nl_data_ready(struct sock *sk, int len) +{ + spectral_debug("%d", __LINE__); +} + +#else +void +os_if_spectral_nl_data_ready(struct sk_buff *skb) +{ + spectral_debug("%d", __LINE__); +} +#endif /* VERSION */ + +#ifndef CNSS_GENL +/** + * os_if_spectral_init_nl_cfg() - Initialize netlink kernel + * configuration parameters + * @cfg : Pointer to netlink_kernel_cfg + * + * Initialize netlink kernel configuration parameters required + * for spectral module + * + * Return: None + */ +#if KERNEL_VERSION(3, 6, 0) <= LINUX_VERSION_CODE +static void +os_if_spectral_init_nl_cfg(struct netlink_kernel_cfg *cfg) +{ + cfg->groups = 1; + cfg->input = os_if_spectral_nl_data_ready; +} +#else +static void +os_if_spectral_init_nl_cfg(struct netlink_kernel_cfg *cfg) +{ +} +#endif +/** + * os_if_spectral_create_nl_sock() - Create Netlink socket + * @cfg : Pointer to netlink_kernel_cfg + * + * Create Netlink socket required for spectral module + * + * Return: None + */ +#if KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create(&init_net, + SPECTRAL_NETLINK, cfg); +} +#elif KERNEL_VERSION(3, 6, 0) <= LINUX_VERSION_CODE +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create(&init_net, + SPECTRAL_NETLINK, + THIS_MODULE, cfg); +} +#elif (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create( + SPECTRAL_NETLINK, 1, + &os_if_spectral_nl_data_ready, + THIS_MODULE); +} +#else +#if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + memset(cfg, 0, sizeof(*cfg)); + cfg->groups = 1; + cfg->input = &os_if_spectral_nl_data_ready; + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create(&init_net, + SPECTRAL_NETLINK, cfg); +} +#else +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create( + &init_net, + SPECTRAL_NETLINK, 1, + &os_if_spectral_nl_data_ready, + NULL, THIS_MODULE); +} +#endif +#endif + +/** + * os_if_spectral_init_nl() - Initialize netlink data structures for + * spectral module + * @pdev : Pointer to pdev + * + * Return: 0 on success else failure + */ +static int +os_if_spectral_init_nl(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + struct netlink_kernel_cfg cfg; + + memset(&cfg, 0, sizeof(cfg)); + if (!pdev) { + osif_err("PDEV is NULL!"); + return -EINVAL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + os_if_spectral_init_nl_cfg(&cfg); + + if (!os_if_spectral_nl_sock) { + os_if_spectral_create_nl_sock(&cfg); + + if (!os_if_spectral_nl_sock) { + osif_err("NETLINK_KERNEL_CREATE FAILED"); + return -ENODEV; + } + } + ps->spectral_sock = os_if_spectral_nl_sock; + + if (!ps->spectral_sock) { + osif_err("ps->spectral_sock is NULL"); + return -ENODEV; + } + atomic_inc(&spectral_nl_users); + + return 0; +} + +/** + * os_if_spectral_destroy_netlink() - De-initialize netlink data structures for + * spectral module + * @pdev : Pointer to pdev + * + * Return: Success/Failure + */ +static int +os_if_spectral_destroy_netlink(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return -EINVAL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + ps->spectral_sock = NULL; + if (atomic_dec_and_test(&spectral_nl_users)) { + sock_release(os_if_spectral_nl_sock->sk_socket); + os_if_spectral_nl_sock = NULL; + } + return 0; +} +#else + +static int +os_if_spectral_init_nl(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} + +static int +os_if_spectral_destroy_netlink(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} +#endif + +void * +os_if_spectral_prep_skb(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type, + enum spectral_msg_buf_type buf_type) +{ + struct pdev_spectral *ps = NULL; + struct nlmsghdr *spectral_nlh = NULL; + void *buf = NULL; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return NULL; + } + + if (smsg_type >= SPECTRAL_MSG_TYPE_MAX) { + osif_err("Invalid Spectral message type %u", smsg_type); + return NULL; + } + + if (buf_type >= SPECTRAL_MSG_BUF_TYPE_MAX) { + osif_err("Invalid Spectral message buffer type %u", + buf_type); + return NULL; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return NULL; + } + + if (buf_type == SPECTRAL_MSG_BUF_NEW) { + QDF_ASSERT(!ps->skb[smsg_type]); + ps->skb[smsg_type] = + qdf_nbuf_alloc(NULL, MAX_SPECTRAL_PAYLOAD, + 0, 0, false); + + if (!ps->skb[smsg_type]) { + osif_err("alloc skb (len=%u, msg_type=%u) failed", + MAX_SPECTRAL_PAYLOAD, smsg_type); + return NULL; + } + + qdf_nbuf_put_tail(ps->skb[smsg_type], MAX_SPECTRAL_PAYLOAD); + spectral_nlh = (struct nlmsghdr *)ps->skb[smsg_type]->data; + + qdf_mem_zero(spectral_nlh, sizeof(*spectral_nlh)); + + /* + * Possible bug that size of struct spectral_samp_msg and + * SPECTRAL_MSG differ by 3 bytes so we miss 3 bytes + */ + + spectral_nlh->nlmsg_len = + NLMSG_SPACE(sizeof(struct spectral_samp_msg)); + spectral_nlh->nlmsg_pid = 0; + spectral_nlh->nlmsg_flags = 0; + spectral_nlh->nlmsg_type = WLAN_NL_MSG_SPECTRAL_SCAN; + + qdf_mem_zero(NLMSG_DATA(spectral_nlh), + sizeof(struct spectral_samp_msg)); + buf = NLMSG_DATA(spectral_nlh); + } else if (buf_type == SPECTRAL_MSG_BUF_SAVED) { + QDF_ASSERT(ps->skb[smsg_type]); + spectral_nlh = (struct nlmsghdr *)ps->skb[smsg_type]->data; + buf = NLMSG_DATA(spectral_nlh); + } else { + osif_err("Failed to get spectral report buffer"); + buf = NULL; + } + + return buf; +} + +#if (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +static inline void +os_if_init_spectral_skb_dst_pid( + struct sk_buff *skb, + struct pdev_spectral *ps) +{ + NETLINK_CB(skb).dst_pid = + ps->spectral_pid; +} +#else +static inline void +os_if_init_spectral_skb_dst_pid( + struct sk_buff *skb, + struct pdev_spectral *ps) +{ +} +#endif /* VERSION - field deprecated by newer kernels */ + +#if KERNEL_VERSION(3, 7, 0) > LINUX_VERSION_CODE +static inline void +os_if_init_spectral_skb_pid_portid(struct sk_buff *skb) +{ + NETLINK_CB(skb).pid = 0; /* from kernel */ +} +#else +static inline void +os_if_init_spectral_skb_pid_portid(struct sk_buff *skb) +{ + NETLINK_CB(skb).portid = 0; /* from kernel */ +} +#endif + + +/** + * os_if_spectral_nl_unicast_msg() - Sends unicast Spectral message to user + * space + * @pdev : Pointer to pdev + * @smsg_type: Spectral message type + * + * Return: void + */ +#ifndef CNSS_GENL +static int +os_if_spectral_nl_unicast_msg(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type) +{ + struct pdev_spectral *ps = NULL; + int status; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return -EINVAL; + } + + if (smsg_type >= SPECTRAL_MSG_TYPE_MAX) { + osif_err("Invalid Spectral message type %u", smsg_type); + return -EINVAL; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + + if (!ps->skb[smsg_type]) { + osif_err("Socket buffer is null, msg_type= %u", smsg_type); + return -EINVAL; + } + + if (!ps->spectral_sock) { + osif_err("Spectral Socket is invalid, msg_type= %u", + smsg_type); + qdf_nbuf_free(ps->skb[smsg_type]); + ps->skb[smsg_type] = NULL; + + return -EINVAL; + } + + os_if_init_spectral_skb_dst_pid(ps->skb[smsg_type], ps); + + os_if_init_spectral_skb_pid_portid(ps->skb[smsg_type]); + + /* to mcast group 1<<0 */ + NETLINK_CB(ps->skb[smsg_type]).dst_group = 0; + + os_if_spectral_remove_nbuf_debug_entry(ps->skb[smsg_type]); + status = netlink_unicast(ps->spectral_sock, + ps->skb[smsg_type], + ps->spectral_pid, MSG_DONTWAIT); + + /* clear the local copy, free would be done by netlink layer */ + ps->skb[smsg_type] = NULL; + + return status; +} +#else + +static int +os_if_spectral_nl_unicast_msg(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type) +{ + struct pdev_spectral *ps = NULL; + int status; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return -EINVAL; + } + + if (smsg_type >= SPECTRAL_MSG_TYPE_MAX) { + osif_err("Invalid Spectral message type %u", smsg_type); + return -EINVAL; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + + if (!ps->skb[smsg_type]) { + osif_err("Socket buffer is null, msg_type= %u", smsg_type); + return -EINVAL; + } + + os_if_init_spectral_skb_pid_portid(ps->skb[smsg_type]); + + os_if_spectral_remove_nbuf_debug_entry(ps->skb[smsg_type]); + status = nl_srv_ucast(ps->skb[smsg_type], ps->spectral_pid, + MSG_DONTWAIT, WLAN_NL_MSG_SPECTRAL_SCAN, + CLD80211_MCGRP_OEM_MSGS); + if (status < 0) + osif_err("failed to send to spectral scan app"); + + /* clear the local copy, free would be done by netlink layer */ + ps->skb[smsg_type] = NULL; + + return status; +} + +#endif + +/** + * os_if_spectral_nl_bcast_msg() - Sends broadcast Spectral message to user + * space + * @pdev : Pointer to pdev + * @smsg_type: Spectral message type + * + * Return: void + */ +static int +os_if_spectral_nl_bcast_msg(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type) +{ +#if (KERNEL_VERSION(2, 6, 31) >= LINUX_VERSION_CODE) + fd_set write_set; +#endif + int status; + struct pdev_spectral *ps = NULL; + +#if (KERNEL_VERSION(2, 6, 31) >= LINUX_VERSION_CODE) + FD_ZERO(&write_set); +#endif + + if (!pdev) { + osif_err("PDEV is NULL!"); + return -EINVAL; + } + + if (smsg_type >= SPECTRAL_MSG_TYPE_MAX) { + osif_err("Invalid Spectral message type %u", smsg_type); + return -EINVAL; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + + if (!ps->skb[smsg_type]) { + osif_err("Socket buffer is null, msg_type= %u", smsg_type); + return -EINVAL; + } + + if (!ps->spectral_sock) { + qdf_nbuf_free(ps->skb[smsg_type]); + ps->skb[smsg_type] = NULL; + + return -EINVAL; + } + + os_if_spectral_remove_nbuf_debug_entry(ps->skb[smsg_type]); + status = netlink_broadcast(ps->spectral_sock, + ps->skb[smsg_type], + 0, 1, GFP_ATOMIC); + + /* clear the local copy, free would be done by netlink layer */ + ps->skb[smsg_type] = NULL; + + return status; +} + +/** + * os_if_spectral_free_skb() - Free spectral SAMP message skb + * + * @pdev : Pointer to pdev + * @smsg_type: Spectral message type + * + * Return: void + */ +static void +os_if_spectral_free_skb(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type) +{ + struct pdev_spectral *ps = NULL; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return; + } + + if (smsg_type >= SPECTRAL_MSG_TYPE_MAX) { + osif_err("Invalid Spectral message type %u", smsg_type); + return; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + osif_err("PDEV SPECTRAL object is NULL!"); + return; + } + + if (!ps->skb[smsg_type]) { + osif_info("Socket buffer is null, msg_type= %u", smsg_type); + return; + } + + /* Free buffer */ + qdf_nbuf_free(ps->skb[smsg_type]); + + /* clear the local copy */ + ps->skb[smsg_type] = NULL; +} + +qdf_export_symbol(os_if_spectral_free_skb); + +void +os_if_spectral_netlink_init(struct wlan_objmgr_pdev *pdev) +{ + struct spectral_nl_cb nl_cb = {0}; + struct spectral_context *sptrl_ctx; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return; + } + + sptrl_ctx = spectral_get_spectral_ctx_from_pdev(pdev); + + if (!sptrl_ctx) { + osif_err("Spectral context is NULL!"); + return; + } + + os_if_spectral_init_nl(pdev); + + /* Register Netlink handlers */ + nl_cb.get_sbuff = os_if_spectral_prep_skb; + nl_cb.send_nl_bcast = os_if_spectral_nl_bcast_msg; + nl_cb.send_nl_unicast = os_if_spectral_nl_unicast_msg; + nl_cb.free_sbuff = os_if_spectral_free_skb; + + if (sptrl_ctx->sptrlc_register_netlink_cb) + sptrl_ctx->sptrlc_register_netlink_cb(pdev, &nl_cb); +} +qdf_export_symbol(os_if_spectral_netlink_init); + +void os_if_spectral_netlink_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct spectral_context *sptrl_ctx; + enum spectral_msg_type msg_type = SPECTRAL_MSG_NORMAL_MODE; + + if (!pdev) { + osif_err("PDEV is NULL!"); + return; + } + + sptrl_ctx = spectral_get_spectral_ctx_from_pdev(pdev); + + if (!sptrl_ctx) { + osif_err("Spectral context is NULL!"); + return; + } + + for (; msg_type < SPECTRAL_MSG_TYPE_MAX; msg_type++) + os_if_spectral_free_skb(pdev, msg_type); + + if (sptrl_ctx->sptrlc_deregister_netlink_cb) + sptrl_ctx->sptrlc_deregister_netlink_cb(pdev); + + os_if_spectral_destroy_netlink(pdev); +} +qdf_export_symbol(os_if_spectral_netlink_deinit); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/wlan_cfg80211_spectral.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/wlan_cfg80211_spectral.c new file mode 100644 index 0000000000000000000000000000000000000000..b45efef147a7dde04d841918ecb8600762485026 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/wlan_cfg80211_spectral.c @@ -0,0 +1,903 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct nla_policy spectral_scan_policy[ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE] = { + .type = NLA_U64}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FREQUENCY] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_RING_DEBUG] = { + .type = NLA_U8}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_BUFFER_DEBUG] = { + .type = NLA_U8}, +}; + +static void wlan_spectral_intit_config(struct spectral_config *config_req) +{ + config_req->ss_period = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_count = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_fft_period = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_short_report = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_spectral_pri = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_fft_size = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_gc_ena = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_restart_ena = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_noise_floor_ref = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_init_delay = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_nb_tone_thr = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_str_bin_thr = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_wb_rpt_mode = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_rssi_rpt_mode = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_rssi_thr = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_pwr_format = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_rpt_mode = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_bin_scale = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_dbm_adj = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_chn_mask = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_frequency = SPECTRAL_PHYERR_PARAM_NOVAL; +} + +/** + * convert_spectral_mode_nl_to_internal() - Get Spectral mode + * @nl_spectral_mode: Spectral mode in vendor attribute + * @mode: Converted Spectral mode + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +static QDF_STATUS +convert_spectral_mode_nl_to_internal + (enum qca_wlan_vendor_spectral_scan_mode nl_spectral_mode, + enum spectral_scan_mode *mode) +{ + switch (nl_spectral_mode) { + case QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_NORMAL: + *mode = SPECTRAL_SCAN_MODE_NORMAL; + break; + + case QCA_WLAN_VENDOR_SPECTRAL_SCAN_MODE_AGILE: + *mode = SPECTRAL_SCAN_MODE_AGILE; + break; + + default: + osif_err("Invalid spectral mode %u", nl_spectral_mode); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * convert_spectral_err_code_internal_to_nl() - Get Spectral error code + * @spectral_err_code: Spectral error code used internally + * @nl_err_code: Spectral error code for cfg80211 + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +static QDF_STATUS +convert_spectral_err_code_internal_to_nl + (enum spectral_cp_error_code spectral_err_code, + enum qca_wlan_vendor_spectral_scan_error_code *nl_err_code) +{ + switch (spectral_err_code) { + case SPECTRAL_SCAN_ERR_PARAM_UNSUPPORTED: + *nl_err_code = + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_UNSUPPORTED; + break; + + case SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED: + *nl_err_code = + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + break; + + case SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE: + *nl_err_code = + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + break; + + case SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED: + *nl_err_code = + QCA_WLAN_VENDOR_SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED; + break; + + default: + osif_err("Invalid spectral error code %u", spectral_err_code); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef DIRECT_BUF_RX_DEBUG +QDF_STATUS wlan_cfg80211_spectral_scan_dma_debug_config( + struct wlan_objmgr_pdev *pdev, + struct nlattr **tb, + enum spectral_scan_mode sscan_mode) +{ + struct spectral_cp_request sscan_req; + uint8_t dma_debug_enable; + QDF_STATUS status; + + if (!tb || !pdev) + return QDF_STATUS_E_FAILURE; + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_RING_DEBUG]) { + dma_debug_enable = nla_get_u8(tb[ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_RING_DEBUG]); + sscan_req.ss_mode = sscan_mode; + sscan_req.dma_debug_req.dma_debug_enable = !!dma_debug_enable; + sscan_req.dma_debug_req.dma_debug_type = + SPECTRAL_DMA_RING_DEBUG; + sscan_req.req_id = SPECTRAL_SET_DMA_DEBUG; + status = ucfg_spectral_control(pdev, &sscan_req); + if (status != QDF_STATUS_SUCCESS) { + osif_err("Could not configure dma ring debug"); + return QDF_STATUS_E_FAILURE; + } + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_BUFFER_DEBUG]) { + dma_debug_enable = nla_get_u8(tb[ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DMA_BUFFER_DEBUG]); + sscan_req.ss_mode = sscan_mode; + sscan_req.dma_debug_req.dma_debug_enable = !!dma_debug_enable; + sscan_req.dma_debug_req.dma_debug_type = + SPECTRAL_DMA_BUFFER_DEBUG; + sscan_req.req_id = SPECTRAL_SET_DMA_DEBUG; + return ucfg_spectral_control(pdev, &sscan_req); + } + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS wlan_cfg80211_spectral_scan_dma_debug_config( + struct wlan_objmgr_pdev *pdev, + struct nlattr **tb, + enum spectral_scan_mode sscan_mode) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_DEBUG */ + +int wlan_cfg80211_spectral_scan_config_and_start(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + 1]; + struct spectral_config config_req; + QDF_STATUS status; + uint64_t cookie; + struct sk_buff *skb; + uint32_t spectral_dbg_level; + uint32_t scan_req_type = 0; + struct spectral_cp_request sscan_req; + enum spectral_scan_mode sscan_mode = SPECTRAL_SCAN_MODE_NORMAL; + uint16_t skb_len; + + if (wlan_cfg80211_nla_parse( + tb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX, + data, + data_len, + spectral_scan_policy)) { + osif_err("Invalid Spectral Scan config ATTR"); + return -EINVAL; + } + + wlan_spectral_intit_config(&config_req); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT]) + config_req.ss_count = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD]) + config_req.ss_period = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY]) + config_req.ss_spectral_pri = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE]) + config_req.ss_fft_size = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA]) + config_req.ss_gc_ena = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA]) + config_req.ss_restart_ena = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF]) + config_req.ss_noise_floor_ref = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY]) + config_req.ss_init_delay = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR]) + config_req.ss_nb_tone_thr = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR]) + config_req.ss_str_bin_thr = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE]) + config_req.ss_wb_rpt_mode = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE]) + config_req.ss_rssi_rpt_mode = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR]) + config_req.ss_rssi_thr = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT]) + config_req.ss_pwr_format = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE]) + config_req.ss_rpt_mode = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE]) + config_req.ss_bin_scale = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ]) + config_req.ss_dbm_adj = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK]) + config_req.ss_chn_mask = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD]) + config_req.ss_fft_period = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT]) + config_req.ss_short_report = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FREQUENCY]) + config_req.ss_frequency = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FREQUENCY]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE]) { + status = convert_spectral_mode_nl_to_internal(nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE]), &sscan_mode); + + if (QDF_IS_STATUS_ERROR(status)) + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL]) { + spectral_dbg_level = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL]); + sscan_req.ss_mode = sscan_mode; + sscan_req.debug_req.spectral_dbg_level = spectral_dbg_level; + sscan_req.req_id = SPECTRAL_SET_DEBUG_LEVEL; + status = ucfg_spectral_control(pdev, &sscan_req); + if (QDF_IS_STATUS_ERROR(status)) + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE]) + scan_req_type = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE]); + + skb_len = NLMSG_HDRLEN; + /* QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE */ + skb_len += NLA_HDRLEN + sizeof(u32); + /* QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE */ + skb_len += NLA_HDRLEN + sizeof(u64); + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, skb_len); + + if (!skb) { + osif_err(" reply skb alloc failed"); + return -ENOMEM; + } + + status = wlan_cfg80211_spectral_scan_dma_debug_config( + pdev, tb, sscan_mode); + if (QDF_IS_STATUS_ERROR(status)) { + status = QDF_STATUS_E_INVAL; + goto free_skb_return_os_status; + } + + if (CONFIG_REQUESTED(scan_req_type)) { + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_SET_CONFIG; + qdf_mem_copy(&sscan_req.config_req.sscan_config, &config_req, + qdf_min(sizeof(sscan_req.config_req.sscan_config), + sizeof(config_req))); + status = ucfg_spectral_control(pdev, &sscan_req); + if (QDF_IS_STATUS_ERROR(status)) { + enum qca_wlan_vendor_spectral_scan_error_code + spectral_nl_err_code; + + /* No error reasons populated, just return error */ + if (sscan_req.config_req.sscan_err_code == + SPECTRAL_SCAN_ERR_INVALID) + goto free_skb_return_os_status; + + status = convert_spectral_err_code_internal_to_nl + (sscan_req.config_req.sscan_err_code, + &spectral_nl_err_code); + if (QDF_IS_STATUS_ERROR(status)) { + status = QDF_STATUS_E_INVAL; + goto free_skb_return_os_status; + } + + if (nla_put_u32 + (skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE, + spectral_nl_err_code)) { + status = QDF_STATUS_E_INVAL; + goto free_skb_return_os_status; + } + } + } + + if (SCAN_REQUESTED(scan_req_type)) { + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_ACTIVATE_SCAN; + status = ucfg_spectral_control(pdev, &sscan_req); + if (QDF_IS_STATUS_ERROR(status)) { + enum qca_wlan_vendor_spectral_scan_error_code + spectral_nl_err_code; + + /* No error reasons populated, just return error */ + if (sscan_req.action_req.sscan_err_code == + SPECTRAL_SCAN_ERR_INVALID) + goto free_skb_return_os_status; + + status = convert_spectral_err_code_internal_to_nl + (sscan_req.action_req.sscan_err_code, + &spectral_nl_err_code); + if (QDF_IS_STATUS_ERROR(status)) { + status = QDF_STATUS_E_INVAL; + goto free_skb_return_os_status; + } + + if (nla_put_u32 + (skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE, + spectral_nl_err_code)) { + status = QDF_STATUS_E_INVAL; + goto free_skb_return_os_status; + } + } + } + + cookie = 0; + if (wlan_cfg80211_nla_put_u64(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE, + cookie)) { + status = QDF_STATUS_E_INVAL; + goto free_skb_return_os_status; + } + + wlan_cfg80211_qal_devcfg_send_response((qdf_nbuf_t)skb); + return 0; +free_skb_return_os_status: + wlan_cfg80211_vendor_free_skb(skb); + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_spectral_scan_stop(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + 1]; + QDF_STATUS status; + struct spectral_cp_request sscan_req; + enum spectral_scan_mode sscan_mode = SPECTRAL_SCAN_MODE_NORMAL; + struct sk_buff *skb; + + if (wlan_cfg80211_nla_parse( + tb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX, + data, + data_len, + spectral_scan_policy)) { + osif_err("Invalid Spectral Scan stop ATTR"); + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE]) { + status = convert_spectral_mode_nl_to_internal(nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE]), &sscan_mode); + + if (QDF_IS_STATUS_ERROR(status)) + return -EINVAL; + } + + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_STOP_SCAN; + status = ucfg_spectral_control(pdev, &sscan_req); + if (QDF_IS_STATUS_ERROR(status)) { + enum qca_wlan_vendor_spectral_scan_error_code + spectral_nl_err_code; + + /* No error reasons populated, just return error */ + if (sscan_req.action_req.sscan_err_code == + SPECTRAL_SCAN_ERR_INVALID) + return qdf_status_to_os_return(status); + + status = convert_spectral_err_code_internal_to_nl + (sscan_req.action_req.sscan_err_code, + &spectral_nl_err_code); + if (QDF_IS_STATUS_ERROR(status)) + return -EINVAL; + + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, + NLMSG_HDRLEN + sizeof(u32) + NLA_HDRLEN); + + if (!skb) { + osif_err(" reply skb alloc failed"); + return -ENOMEM; + } + + if (nla_put_u32 + (skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_ERROR_CODE, + spectral_nl_err_code)) { + wlan_cfg80211_vendor_free_skb(skb); + return -EINVAL; + } + wlan_cfg80211_qal_devcfg_send_response((qdf_nbuf_t)skb); + } + + return 0; +} + +int wlan_cfg80211_spectral_scan_get_config(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + 1]; + struct spectral_config *sconfig; + uint32_t spectral_dbg_level; + struct sk_buff *skb; + struct spectral_cp_request sscan_req; + enum spectral_scan_mode sscan_mode = SPECTRAL_SCAN_MODE_NORMAL; + QDF_STATUS status; + + if (wlan_cfg80211_nla_parse( + tb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX, + data, + data_len, + spectral_scan_policy)) { + osif_err("Invalid Spectral Scan config ATTR"); + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE]) { + status = convert_spectral_mode_nl_to_internal(nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_MODE]), &sscan_mode); + + if (QDF_IS_STATUS_ERROR(status)) + return -EINVAL; + } + + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, + (sizeof(u32) + + NLA_HDRLEN) * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + + NLMSG_HDRLEN); + if (!skb) { + osif_err(" reply skb alloc failed"); + return -ENOMEM; + } + + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_GET_CONFIG; + status = ucfg_spectral_control(pdev, &sscan_req); + sconfig = &sscan_req.config_req.sscan_config; + if (nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT, + sconfig->ss_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD, + sconfig->ss_period) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY, + sconfig->ss_spectral_pri) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE, + sconfig->ss_fft_size) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA, + sconfig->ss_gc_ena) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA, + sconfig->ss_restart_ena) || + nla_put_u32( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF, + sconfig->ss_noise_floor_ref) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY, + sconfig->ss_init_delay) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR, + sconfig->ss_nb_tone_thr) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR, + sconfig->ss_str_bin_thr) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE, + sconfig->ss_wb_rpt_mode) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE, + sconfig->ss_rssi_rpt_mode) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR, + sconfig->ss_rssi_thr) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT, + sconfig->ss_pwr_format) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE, + sconfig->ss_rpt_mode) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE, + sconfig->ss_bin_scale) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ, + sconfig->ss_dbm_adj) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK, + sconfig->ss_chn_mask) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD, + sconfig->ss_fft_period) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT, + sconfig->ss_short_report) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FREQUENCY, + sconfig->ss_frequency)) + goto fail; + + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_GET_DEBUG_LEVEL; + status = ucfg_spectral_control(pdev, &sscan_req); + spectral_dbg_level = sscan_req.debug_req.spectral_dbg_level; + if (nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL, + spectral_dbg_level)) + goto fail; + + wlan_cfg80211_qal_devcfg_send_response((qdf_nbuf_t)skb); + return 0; +fail: + wlan_cfg80211_vendor_free_skb(skb); + return -EINVAL; +} + +int wlan_cfg80211_spectral_scan_get_cap(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct spectral_caps *scaps; + struct sk_buff *skb; + struct spectral_cp_request sscan_req; + QDF_STATUS status; + + sscan_req.req_id = SPECTRAL_GET_CAPABILITY_INFO; + status = ucfg_spectral_control(pdev, &sscan_req); + scaps = &sscan_req.caps_req.sscan_caps; + + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, + (sizeof(u32) + + NLA_HDRLEN) * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_MAX + + NLMSG_HDRLEN); + if (!skb) { + osif_err(" reply skb alloc failed"); + return -ENOMEM; + } + + if (scaps->phydiag_cap) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_PHYDIAG)) + goto fail; + + if (scaps->radar_cap) + if (nla_put_flag(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_RADAR)) + goto fail; + + if (scaps->spectral_cap) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_SPECTRAL)) + goto fail; + + if (scaps->advncd_spectral_cap) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_ADVANCED_SPECTRAL)) + goto fail; + + if (nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HW_GEN, + scaps->hw_gen)) + goto fail; + + if (scaps->is_scaling_params_populated) { + if (nla_put_u16( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_FORMULA_ID, + scaps->formula_id)) + goto fail; + + if (nla_put_u16( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_LOW_LEVEL_OFFSET, + scaps->low_level_offset)) + goto fail; + + if (nla_put_u16( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HIGH_LEVEL_OFFSET, + scaps->high_level_offset)) + goto fail; + + if (nla_put_u16( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_RSSI_THR, + scaps->rssi_thr)) + goto fail; + + if (nla_put_u8( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_DEFAULT_AGC_MAX_GAIN, + scaps->default_agc_max_gain)) + goto fail; + } + + if (scaps->agile_spectral_cap) { + int ret; + + ret = nla_put_flag + (skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AGILE_SPECTRAL); + if (ret) + goto fail; + } + + if (scaps->agile_spectral_cap_160) { + int ret; + + ret = nla_put_flag + (skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AGILE_SPECTRAL_160); + if (ret) + goto fail; + } + if (scaps->agile_spectral_cap_80p80) { + int ret; + + ret = nla_put_flag + (skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AGILE_SPECTRAL_80_80); + if (ret) + goto fail; + } + wlan_cfg80211_qal_devcfg_send_response((qdf_nbuf_t)skb); + + return 0; + +fail: + wlan_cfg80211_vendor_free_skb(skb); + return -EINVAL; +} + +int wlan_cfg80211_spectral_scan_get_diag_stats(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct spectral_diag_stats *spetcral_diag; + struct sk_buff *skb; + struct spectral_cp_request sscan_req; + QDF_STATUS status; + + sscan_req.req_id = SPECTRAL_GET_DIAG_STATS; + status = ucfg_spectral_control(pdev, &sscan_req); + spetcral_diag = &sscan_req.diag_req.sscan_diag; + + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, + (sizeof(u64) + NLA_HDRLEN) * + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_MAX + + NLMSG_HDRLEN); + if (!skb) { + osif_err(" reply skb alloc failed"); + return -ENOMEM; + } + + if (wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SIG_MISMATCH, + spetcral_diag->spectral_mismatch) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SEC80_SFFT_INSUFFLEN, + spetcral_diag->spectral_sec80_sfft_insufflen) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_NOSEC80_SFFT, + spetcral_diag->spectral_no_sec80_sfft) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG1ID_MISMATCH, + spetcral_diag->spectral_vhtseg1id_mismatch) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG2ID_MISMATCH, + spetcral_diag->spectral_vhtseg2id_mismatch)) { + wlan_cfg80211_vendor_free_skb(skb); + return -EINVAL; + } + wlan_cfg80211_qal_devcfg_send_response((qdf_nbuf_t)skb); + + return 0; +} + +int wlan_cfg80211_spectral_scan_get_status(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MAX + 1]; + struct spectral_scan_state sscan_state = { 0 }; + struct sk_buff *skb; + struct spectral_cp_request sscan_req; + enum spectral_scan_mode sscan_mode = SPECTRAL_SCAN_MODE_NORMAL; + QDF_STATUS status; + + if (wlan_cfg80211_nla_parse( + tb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MAX, + data, + data_len, + NULL)) { + osif_err("Invalid Spectral Scan config ATTR"); + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MODE]) { + status = convert_spectral_mode_nl_to_internal(nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MODE]), &sscan_mode); + + if (QDF_IS_STATUS_ERROR(status)) + return -EINVAL; + } + + /* Sending a request and extracting response from it has to be atomic */ + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_IS_ACTIVE; + status = ucfg_spectral_control(pdev, &sscan_req); + sscan_state.is_active = sscan_req.status_req.is_active; + + sscan_req.ss_mode = sscan_mode; + sscan_req.req_id = SPECTRAL_IS_ENABLED; + status = ucfg_spectral_control(pdev, &sscan_req); + sscan_state.is_enabled = sscan_req.status_req.is_enabled; + + skb = wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, + 2 * (sizeof(u32) + NLA_HDRLEN) + NLMSG_HDRLEN); + if (!skb) { + osif_err(" reply skb alloc failed"); + return -ENOMEM; + } + + if (sscan_state.is_enabled) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ENABLED)) + goto fail; + + if (sscan_state.is_active) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ACTIVE)) + goto fail; + wlan_cfg80211_qal_devcfg_send_response((qdf_nbuf_t)skb); + + return 0; +fail: + wlan_cfg80211_vendor_free_skb(skb); + return -EINVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/inc/os_if_wifi_pos.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/inc/os_if_wifi_pos.h new file mode 100644 index 0000000000000000000000000000000000000000..cc6616d8cfbd05967496bdc295f6cae8c6b7dcec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/inc/os_if_wifi_pos.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2012-2017, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: os_if_wifi_pos.h + * This file provide declaration of wifi_pos's os_if APIs + */ +#ifndef _OS_IF_WIFI_POS_H_ +#define _OS_IF_WIFI_POS_H_ + +#include "qdf_types.h" +#include "qdf_status.h" + + +/* forward declaration */ +struct wifi_pos_ch_info; +struct wlan_objmgr_psoc; +struct wifi_pos_driver_caps; + +#ifdef WIFI_POS_CONVERGED +/** + * os_if_wifi_pos_register_nl() - abstration API to register callback with GENL + * socket. + * + * Return: status of operation + */ +int os_if_wifi_pos_register_nl(void); + +/** + * os_if_wifi_pos_deregister_nl() - abstration API to deregister callback with + * GENL socket. + * + * Return: status of operation + */ +int os_if_wifi_pos_deregister_nl(void); + +/** + * os_if_wifi_pos_send_peer_status() - Function to send peer status to a + * registered application + * @peer_mac: MAC address of peer + * @peer_status: ePeerConnected or ePeerDisconnected + * @peer_timing_meas_cap: 0: RTT/RTT2, 1: RTT3. Default is 0 + * @session_id: SME session id, i.e. vdev_id + * @chan_info: operating channel information + * @dev_mode: dev mode for which indication is sent + * + * Return: none + */ +void os_if_wifi_pos_send_peer_status(struct qdf_mac_addr *peer_mac, + uint8_t peer_status, + uint8_t peer_timing_meas_cap, + uint8_t session_id, + struct wifi_pos_ch_info *chan_info, + enum QDF_OPMODE dev_mode); + +/** + * os_if_wifi_pos_populate_caps() - populate oem capabilities + * @psoc: psoc object + * @caps: pointer to populate the capabilities + * + * Return: error code + */ +int os_if_wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps); +#else +static inline int os_if_wifi_pos_register_nl(void) +{ + return 0; +} + +static inline int os_if_wifi_pos_deregister_nl(void) +{ + return 0; +} + +static inline void os_if_wifi_pos_send_peer_status( + struct qdf_mac_addr *peer_mac, + uint8_t peer_status, + uint8_t peer_timing_meas_cap, + uint8_t session_id, + struct wifi_pos_ch_info *chan_info, + enum QDF_OPMODE dev_mode) +{ +} + +static inline int os_if_wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + return 0; +} +#endif + +#ifdef CNSS_GENL +/** + * enum cld80211_vendor_sub_cmds + * @CLD80211_VENDOR_SUB_CMD_INVALID: invalid cmd type + * @CLD80211_VENDOR_SUB_CMD_REGISTRATION: app registration + * @CLD80211_VENDOR_SUB_CMD_SET_CAPS: set driver capabilities + * @CLD80211_VENDOR_SUB_CMD_GET_CAPS: get driver capabilities + * @CLD80211_VENDOR_SUB_CMD_GET_CH_INFO: get channel info + * @CLD80211_VENDOR_SUB_CMD_OEM_DATA: oem data req/rsp + * @CLD80211_VENDOR_SUB_CMD_OEM_ERROR: oem error rsp + * @CLD80211_VENDOR_SUB_CMD_PEER_STATUS_IND: peer status indication + * @CLD80211_VENDOR_SUB_CMD_MAX: Max cld80211 vendor sub cmds + */ +enum cld80211_vendor_sub_cmds { + CLD80211_VENDOR_SUB_CMD_INVALID = 0, + CLD80211_VENDOR_SUB_CMD_REGISTRATION = 1, + CLD80211_VENDOR_SUB_CMD_SET_CAPS = 2, + CLD80211_VENDOR_SUB_CMD_GET_CAPS = 3, + CLD80211_VENDOR_SUB_CMD_GET_CH_INFO = 4, + CLD80211_VENDOR_SUB_CMD_OEM_DATA = 5, + CLD80211_VENDOR_SUB_CMD_OEM_ERROR = 6, + CLD80211_VENDOR_SUB_CMD_PEER_STATUS_IND = 7, + /* keep last */ + CLD80211_VENDOR_SUB_CMD__AFTER_LAST, + CLD80211_VENDOR_SUB_CMD_MAX = + CLD80211_VENDOR_SUB_CMD__AFTER_LAST - 1 +}; + +/** + * enum cld80211_sub_attr_cap_rsp - Capability response sub attribute + * @CLD80211_SUB_ATTR_CAPS_INVALID: Invalid capability + * @CLD80211_SUB_ATTR_CAPS_OEM_TARGET_SIGNATURE: OEM target signature + * @CLD80211_SUB_ATTR_CAPS_OEM_TARGET_TYPE: OEM target type + * @CLD80211_SUB_ATTR_CAPS_OEM_FW_VERSION: OEM firmware version + * @CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MAJOR: Driver version major + * @CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MINOR: Driver version minor + * @CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_PATCH: Driver version patch + * @CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_BUILD: Driver version build + * @CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MIN: Allowed dwell time min + * @CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MAX: Allowed dwell time max + * @CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MIN: Current dwell time min + * @CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MAX: Current dwell time max + * @CLD80211_SUB_ATTR_CAPS_SUPPORTED_BANDS: Supported bands + * @CLD80211_SUB_ATTR_CAPS_USER_DEFINED_CAPS: User defined capabilities + * @CLD80211_SUB_ATTR_CAPS_MAX: Max number for CAP sub attribute + * + */ +enum cld80211_sub_attr_cap_rsp { + CLD80211_SUB_ATTR_CAPS_INVALID = 0, + CLD80211_SUB_ATTR_CAPS_OEM_TARGET_SIGNATURE = 1, + CLD80211_SUB_ATTR_CAPS_OEM_TARGET_TYPE = 2, + CLD80211_SUB_ATTR_CAPS_OEM_FW_VERSION = 3, + CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MAJOR = 4, + CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MINOR = 5, + CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_PATCH = 6, + CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_BUILD = 7, + CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MIN = 8, + CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MAX = 9, + CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MIN = 10, + CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MAX = 11, + CLD80211_SUB_ATTR_CAPS_SUPPORTED_BANDS = 12, + CLD80211_SUB_ATTR_CAPS_USER_DEFINED_CAPS = 13, + + /* keep last */ + CLD80211_SUB_ATTR_CAPS_AFTER_LAST, + CLD80211_SUB_ATTR_CAPS_MAX = + CLD80211_SUB_ATTR_CAPS_AFTER_LAST - 1 +}; + +/** + * enum cld80211_sub_attr_channel_rsp - Chan info response sub attribute + * @CLD80211_SUB_ATTR_CH_RESP_INVALID: Invalid channel resp + * @CLD80211_SUB_ATTR_CH_MORE_DATA: More date sub attr for frag response + * @CLD80211_SUB_ATTR_CHANNEL_NUM_CHAN: Number of channels in response + * @CLD80211_SUB_ATTR_CHANNEL_LIST: Channel list nesting + * @CLD80211_SUB_ATTR_CH_CHAN_ID: Channel number + * @CLD80211_SUB_ATTR_CH_MHZ: Channel frequency + * @CLD80211_SUB_ATTR_CH_BAND_CF_1: Center frequency 1 + * @CLD80211_SUB_ATTR_CH_BAND_CF_2: Center frequency 2 + * @CLD80211_SUB_ATTR_CH_INFO: channel info + * @CLD80211_SUB_ATTR_CH_REG_INFO_1: regulatory info field 1 + * @CLD80211_SUB_ATTR_CH_REG_INFO_2: regulatory info field 2 + * @CLD80211_SUB_ATTR_CAPS_MAX: Max number for CHAN Info sub attribute + * + */ +enum cld80211_sub_attr_channel_rsp { + CLD80211_SUB_ATTR_CH_RESP_INVALID = 0, + CLD80211_SUB_ATTR_CH_MORE_DATA = 1, + CLD80211_SUB_ATTR_CHANNEL_NUM_CHAN = 2, + CLD80211_SUB_ATTR_CH_LIST = 3, + /* CH_* belongs to CH_LIST */ + CLD80211_SUB_ATTR_CH_CHAN_ID = 4, + CLD80211_SUB_ATTR_CH_MHZ = 5, + CLD80211_SUB_ATTR_CH_BAND_CF_1 = 6, + CLD80211_SUB_ATTR_CH_BAND_CF_2 = 7, + CLD80211_SUB_ATTR_CH_INFO = 8, + CLD80211_SUB_ATTR_CH_REG_INFO_1 = 9, + CLD80211_SUB_ATTR_CH_REG_INFO_2 = 10, + + /* keep last */ + CLD80211_SUB_ATTR_CH_AFTER_LAST, + CLD80211_SUB_ATTR_CH_MAX = + CLD80211_SUB_ATTR_CH_AFTER_LAST - 1 + +}; + +/** + * enum cld80211_sub_attr_oem_data_req - OEM data req sub attribute + * @CLD80211_SUB_ATTR_MSG_OEM_DATA_INVALID: Invalid OEM data request + * @CLD80211_SUB_ATTR_MSG_OEM_DATA_FW: Data to Firmware + * @CLD80211_SUB_ATTR_MSG_OEM_DATA_DRIVER: Data to driver + * @CLD80211_SUB_ATTR_MSG_OEM_DATA_REQ_MAX: Max number for OEM data req sub + * attribute + * + * OEM data request sub attributes are NLA attributes in NLA type OEM data + * request. + * + */ +enum cld80211_sub_attr_oem_data_req { + CLD80211_SUB_ATTR_MSG_OEM_DATA_INVALID = 0, + CLD80211_SUB_ATTR_MSG_OEM_DATA_FW = 1, + CLD80211_SUB_ATTR_MSG_OEM_DATA_DRIVER = 2, + + /* keep last */ + CLD80211_SUB_ATTR_MSG_OEM_DATA_REQ_AFTER_LAST, + CLD80211_SUB_ATTR_MSG_OEM_DATA_REQ_MAX = + CLD80211_SUB_ATTR_MSG_OEM_DATA_REQ_AFTER_LAST - 1 +}; + +/** + * enum cld80211_sub_attr_oem_data_resp - OEM message sub attribute + * @CLD80211_SUB_ATTR_OEM_DATA_INVALID: Invalid oem data resp + * @CLD80211_SUB_ATTR_OEM_MORE_DATA: more date sub attribute + * @CLD80211_SUB_ATTR_BINARY_DATA: Binary data sub attribute + * @CLD80211_SUB_ATTR_OEM_DATA_RESP_MAX: Max number for OEM data resp + * sub attribute + * + * OEM message sub attributes are interface between apps and driver to + * process NLA type request and response messages. + * + */ +enum cld80211_sub_attr_oem_data_resp { + CLD80211_SUB_ATTR_OEM_DATA_INVALID = 0, + CLD80211_SUB_ATTR_OEM_MORE_DATA = 1, + CLD80211_SUB_ATTR_BINARY_DATA = 2, + + /* keep last */ + CLD80211_SUB_ATTR_OEM_DATA_RESP_AFTER_LAST, + CLD80211_SUB_ATTR_OEM_DATA_RESP_MAX = + CLD80211_SUB_ATTR_OEM_DATA_RESP_AFTER_LAST - 1 +}; + +/** + * enum cld80211_sub_attr_peer_info - peer info sub attribute + * @CLD80211_SUB_ATTR_PEER_INVALID: Invalid peer info + * @CLD80211_SUB_ATTR_PEER_MAC_ADDR: peer mac address + * @CLD80211_SUB_ATTR_PEER_STATUS: peer status + * @CLD80211_SUB_ATTR_PEER_VDEV_ID: peer vdevid + * @CLD80211_SUB_ATTR_PEER_CAPABILITY: peer capabilities + * @CLD80211_SUB_ATTR_PEER_RESERVED: reserved bytes + * @CLD80211_SUB_ATTR_PEER_CHAN_INFO: peer channel info + * + */ +enum cld80211_sub_attr_peer_info { + CLD80211_SUB_ATTR_PEER_INVALID = 0, + CLD80211_SUB_ATTR_PEER_MAC_ADDR = 1, + CLD80211_SUB_ATTR_PEER_STATUS = 2, + CLD80211_SUB_ATTR_PEER_VDEV_ID = 3, + CLD80211_SUB_ATTR_PEER_CAPABILITY = 4, + CLD80211_SUB_ATTR_PEER_RESERVED = 5, + CLD80211_SUB_ATTR_PEER_CHAN_INFO = 6, + + /* keep last */ + CLD80211_SUB_ATTR_PEER_AFTER_LAST, + CLD80211_SUB_ATTR_PEER_MAX = + CLD80211_SUB_ATTR_PEER_AFTER_LAST - 1 +}; +#endif +#endif /* _OS_IF_WIFI_POS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/src/os_if_wifi_pos.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/src/os_if_wifi_pos.c new file mode 100644 index 0000000000000000000000000000000000000000..d85a23ce0ca9aa7b05109ec9b45511239b02a741 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/src/os_if_wifi_pos.c @@ -0,0 +1,1018 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_hdd_wifi_pos.c + * This file defines the important functions pertinent to wifi positioning + * component's os_if layer. + */ + +#include "qdf_platform.h" +#include "qdf_module.h" +#include "wlan_nlink_srv.h" +#include "wlan_ptt_sock_svc.h" +#include "wlan_nlink_common.h" +#include "os_if_wifi_pos.h" +#include "wifi_pos_api.h" +#include "wlan_cfg80211.h" +#include "wlan_objmgr_psoc_obj.h" +#ifdef CNSS_GENL +#include +#include "linux/genetlink.h" +#include "wifi_pos_utils_pub.h" +#endif + +#ifdef CNSS_GENL +#define WLAN_CLD80211_MAX_SIZE SKB_WITH_OVERHEAD(8192UL) + +#define CLD80211_ATTR_CMD 4 +#define CLD80211_ATTR_CMD_TAG_DATA 5 +#define CLD80211_ATTR_MAX 5 + +static const uint32_t +cap_resp_sub_attr_len[CLD80211_SUB_ATTR_CAPS_MAX + 1] = { + [CLD80211_SUB_ATTR_CAPS_OEM_TARGET_SIGNATURE] = + OEM_TARGET_SIGNATURE_LEN, + [CLD80211_SUB_ATTR_CAPS_OEM_TARGET_TYPE] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CAPS_OEM_FW_VERSION] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MAJOR] = sizeof(uint8_t), + [CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MINOR] = sizeof(uint8_t), + [CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_PATCH] = sizeof(uint8_t), + [CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_BUILD] = sizeof(uint8_t), + [CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MIN] = sizeof(uint16_t), + [CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MAX] = sizeof(uint16_t), + [CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MIN] = sizeof(uint16_t), + [CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MAX] = sizeof(uint16_t), + [CLD80211_SUB_ATTR_CAPS_SUPPORTED_BANDS] = sizeof(uint16_t), + [CLD80211_SUB_ATTR_CAPS_USER_DEFINED_CAPS] = + sizeof(struct wifi_pos_user_defined_caps), +}; + +static const uint32_t +peer_status_sub_attr_len[CLD80211_SUB_ATTR_PEER_MAX + 1] = { + [CLD80211_SUB_ATTR_PEER_MAC_ADDR] = ETH_ALEN, + [CLD80211_SUB_ATTR_PEER_STATUS] = sizeof(uint8_t), + [CLD80211_SUB_ATTR_PEER_VDEV_ID] = sizeof(uint8_t), + [CLD80211_SUB_ATTR_PEER_CAPABILITY] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_PEER_RESERVED] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_PEER_CHAN_INFO] = + sizeof(struct wifi_pos_ch_info_rsp), +}; + +static const uint32_t +ch_resp_sub_attr_len[CLD80211_SUB_ATTR_CH_MAX + 1] = { + [CLD80211_SUB_ATTR_CHANNEL_NUM_CHAN] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_LIST] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_CHAN_ID] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_MHZ] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_BAND_CF_1] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_BAND_CF_2] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_INFO] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_REG_INFO_1] = sizeof(uint32_t), + [CLD80211_SUB_ATTR_CH_REG_INFO_2] = sizeof(uint32_t), +}; +#endif + +static int map_wifi_pos_cmd_to_ani_msg_rsp( + enum wifi_pos_cmd_ids cmd) +{ + switch (cmd) { + case WIFI_POS_CMD_REGISTRATION: + return ANI_MSG_APP_REG_RSP; + case WIFI_POS_CMD_SET_CAPS: + return ANI_MSG_SET_OEM_CAP_RSP; + case WIFI_POS_CMD_GET_CAPS: + return ANI_MSG_GET_OEM_CAP_RSP; + case WIFI_POS_CMD_GET_CH_INFO: + return ANI_MSG_CHANNEL_INFO_RSP; + case WIFI_POS_CMD_OEM_DATA: + return ANI_MSG_OEM_DATA_RSP; + case WIFI_POS_CMD_ERROR: + return ANI_MSG_OEM_ERROR; + case WIFI_POS_PEER_STATUS_IND: + return ANI_MSG_PEER_STATUS_IND; + default: + osif_err("response message is invalid :%d", cmd); + return -EINVAL; + } +} + +static enum wifi_pos_cmd_ids +map_ani_msg_req_to_wifi_pos_cmd(uint32_t cmd) +{ + switch (cmd) { + case ANI_MSG_APP_REG_REQ: + return WIFI_POS_CMD_REGISTRATION; + case ANI_MSG_SET_OEM_CAP_REQ: + return WIFI_POS_CMD_SET_CAPS; + case ANI_MSG_GET_OEM_CAP_REQ: + return WIFI_POS_CMD_GET_CAPS; + case ANI_MSG_CHANNEL_INFO_REQ: + return WIFI_POS_CMD_GET_CH_INFO; + case ANI_MSG_OEM_DATA_REQ: + return WIFI_POS_CMD_OEM_DATA; + default: + osif_err("ani req is invalid :%d", cmd); + return WIFI_POS_CMD_INVALID; + } +} + +#ifdef CNSS_GENL +static enum wifi_pos_cmd_ids +map_cld_vendor_sub_cmd_to_wifi_pos_cmd( + enum cld80211_vendor_sub_cmds cmd) +{ + switch (cmd) { + case CLD80211_VENDOR_SUB_CMD_REGISTRATION: + return WIFI_POS_CMD_REGISTRATION; + case CLD80211_VENDOR_SUB_CMD_SET_CAPS: + return WIFI_POS_CMD_SET_CAPS; + case CLD80211_VENDOR_SUB_CMD_GET_CAPS: + return WIFI_POS_CMD_GET_CAPS; + case CLD80211_VENDOR_SUB_CMD_GET_CH_INFO: + return WIFI_POS_CMD_GET_CH_INFO; + case CLD80211_VENDOR_SUB_CMD_OEM_DATA: + return WIFI_POS_CMD_OEM_DATA; + default: + osif_err("cld vendor subcmd is invalid :%d", cmd); + return WIFI_POS_CMD_INVALID; + } +} + +static enum cld80211_vendor_sub_cmds +map_wifi_pos_cmd_to_cld_vendor_sub_cmd( + enum wifi_pos_cmd_ids cmd) +{ + switch (cmd) { + case WIFI_POS_CMD_REGISTRATION: + return CLD80211_VENDOR_SUB_CMD_REGISTRATION; + case WIFI_POS_CMD_SET_CAPS: + return CLD80211_VENDOR_SUB_CMD_SET_CAPS; + case WIFI_POS_CMD_GET_CAPS: + return CLD80211_VENDOR_SUB_CMD_GET_CAPS; + case WIFI_POS_CMD_GET_CH_INFO: + return CLD80211_VENDOR_SUB_CMD_GET_CH_INFO; + case WIFI_POS_CMD_OEM_DATA: + return CLD80211_VENDOR_SUB_CMD_OEM_DATA; + case WIFI_POS_CMD_ERROR: + return ANI_MSG_OEM_ERROR; + case WIFI_POS_PEER_STATUS_IND: + return ANI_MSG_PEER_STATUS_IND; + default: + osif_err("response message is invalid :%d", cmd); + return CLD80211_VENDOR_SUB_CMD_INVALID; + } +} + +static void os_if_wifi_pos_send_peer_nl_status(uint32_t pid, uint8_t *buf) +{ + void *hdr; + int flags = GFP_KERNEL; + struct sk_buff *msg = NULL; + struct nlattr *nest1, *nest2, *nest3; + struct wifi_pos_peer_status_info *peer_info; + struct wifi_pos_ch_info_rsp *chan_info; + + msg = cld80211_oem_rsp_alloc_skb(pid, &hdr, &nest1, &flags); + if (!msg) { + osif_err("alloc_skb failed"); + return; + } + + peer_info = (struct wifi_pos_peer_status_info *)buf; + chan_info = &peer_info->peer_chan_info; + + nla_put_u32(msg, CLD80211_ATTR_CMD, + CLD80211_VENDOR_SUB_CMD_PEER_STATUS_IND); + nest2 = nla_nest_start(msg, CLD80211_ATTR_CMD_TAG_DATA); + if (!nest2) { + osif_err("nla_nest_start failed"); + dev_kfree_skb(msg); + return; + } + + nla_put(msg, CLD80211_SUB_ATTR_PEER_MAC_ADDR, + ETH_ALEN, peer_info->peer_mac_addr); + nla_put_u8(msg, CLD80211_SUB_ATTR_PEER_STATUS, + peer_info->peer_status); + nla_put_u8(msg, CLD80211_SUB_ATTR_PEER_VDEV_ID, + peer_info->vdev_id); + nla_put_u32(msg, CLD80211_SUB_ATTR_PEER_CAPABILITY, + peer_info->peer_capability); + nla_put_u32(msg, CLD80211_SUB_ATTR_PEER_RESERVED, + peer_info->reserved0); + nest3 = nla_nest_start(msg, CLD80211_SUB_ATTR_PEER_CHAN_INFO); + if (!nest3) { + osif_err("nla_nest_start failed"); + dev_kfree_skb(msg); + return; + } + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_CHAN_ID, + chan_info->chan_id); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_MHZ, chan_info->mhz); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_BAND_CF_1, + chan_info->band_center_freq1); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_BAND_CF_2, + chan_info->band_center_freq2); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_INFO, chan_info->info); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_REG_INFO_1, + chan_info->reg_info_1); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_REG_INFO_2, + chan_info->reg_info_2); + + nla_nest_end(msg, nest3); + nla_nest_end(msg, nest2); + + osif_debug("sending oem rsp: type: %d to pid (%d)", + CLD80211_VENDOR_SUB_CMD_PEER_STATUS_IND, pid); + + cld80211_oem_send_reply(msg, hdr, nest1, flags); +} + +static void os_if_send_cap_nl_resp(uint32_t pid, uint8_t *buf) +{ + void *hdr; + int flags = GFP_KERNEL; + struct sk_buff *msg = NULL; + struct nlattr *nest1, *nest2; + struct wifi_pos_oem_get_cap_rsp *cap_rsp; + + msg = cld80211_oem_rsp_alloc_skb(pid, &hdr, &nest1, &flags); + if (!msg) { + osif_err("alloc_skb failed"); + return; + } + + nla_put_u32(msg, CLD80211_ATTR_CMD, + map_wifi_pos_cmd_to_cld_vendor_sub_cmd(WIFI_POS_CMD_GET_CAPS)); + + cap_rsp = (struct wifi_pos_oem_get_cap_rsp *)(buf); + nest2 = nla_nest_start(msg, CLD80211_ATTR_CMD_TAG_DATA); + + if (!nest2) { + osif_err("nla_nest_start failed"); + dev_kfree_skb(msg); + return; + } + + nla_put(msg, CLD80211_SUB_ATTR_CAPS_OEM_TARGET_SIGNATURE, + OEM_TARGET_SIGNATURE_LEN, OEM_TARGET_SIGNATURE); + nla_put_u32(msg, CLD80211_SUB_ATTR_CAPS_OEM_TARGET_TYPE, + cap_rsp->driver_cap.oem_target_type); + nla_put_u32(msg, CLD80211_SUB_ATTR_CAPS_OEM_FW_VERSION, + cap_rsp->driver_cap.oem_fw_version); + nla_put_u8(msg, CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MAJOR, + cap_rsp->driver_cap.driver_version.major); + nla_put_u8(msg, CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_MINOR, + cap_rsp->driver_cap.driver_version.minor); + nla_put_u8(msg, CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_PATCH, + cap_rsp->driver_cap.driver_version.patch); + nla_put_u8(msg, CLD80211_SUB_ATTR_CAPS_DRIVER_VERSION_BUILD, + cap_rsp->driver_cap.driver_version.build); + nla_put_u16(msg, CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MIN, + cap_rsp->driver_cap.allowed_dwell_time_min); + nla_put_u16(msg, CLD80211_SUB_ATTR_CAPS_ALLOWED_DWELL_TIME_MAX, + cap_rsp->driver_cap.allowed_dwell_time_max); + nla_put_u16(msg, CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MIN, + cap_rsp->driver_cap.curr_dwell_time_min); + nla_put_u16(msg, CLD80211_SUB_ATTR_CAPS_CURRENT_DWELL_TIME_MAX, + cap_rsp->driver_cap.curr_dwell_time_max); + nla_put_u16(msg, CLD80211_SUB_ATTR_CAPS_SUPPORTED_BANDS, + cap_rsp->driver_cap.supported_bands); + nla_put(msg, CLD80211_SUB_ATTR_CAPS_USER_DEFINED_CAPS, + sizeof(struct wifi_pos_user_defined_caps), + &cap_rsp->user_defined_cap); + nla_nest_end(msg, nest2); + + osif_debug("sending oem rsp: type: %d to pid (%d)", + CLD80211_VENDOR_SUB_CMD_GET_CAPS, pid); + + cld80211_oem_send_reply(msg, hdr, nest1, flags); +} + +static void +os_if_get_chan_nl_resp_len(uint32_t *chan_info, uint32_t *attr_headers) +{ + uint32_t i; + struct nlattr more_data; + struct nlattr attr_tag_data; + struct nlattr cld80211_subattr_ch_list; + struct nlattr chan_iter; + + *attr_headers = NLA_ALIGN(sizeof(attr_tag_data)); + *attr_headers += NLA_ALIGN(sizeof(more_data)); + *attr_headers += nla_total_size( + ch_resp_sub_attr_len[CLD80211_SUB_ATTR_CHANNEL_NUM_CHAN]); + *attr_headers += sizeof(cld80211_subattr_ch_list); + + *chan_info = NLA_ALIGN(sizeof(chan_iter)); + i = CLD80211_SUB_ATTR_CH_LIST; + for (; i <= CLD80211_SUB_ATTR_CH_MAX; i++) + *chan_info += nla_total_size(ch_resp_sub_attr_len[i]); +} + +static uint8_t os_if_get_max_chan_nl_resp(uint8_t chan_num) +{ + struct nlattr vendor_data; + struct nlattr attr_cmd; + uint32_t chan_info = 0, attr_headers = 0; + uint32_t chan_info_msg_len, chan_allow = 0; + + os_if_get_chan_nl_resp_len(&chan_info, &attr_headers); + attr_headers += NLA_ALIGN(sizeof(vendor_data)); + attr_headers += NLA_ALIGN(sizeof(attr_cmd)); + + chan_info_msg_len = WLAN_CLD80211_MAX_SIZE; + chan_info_msg_len -= WIFIPOS_RESERVE_BYTES; + chan_info_msg_len -= attr_headers; + + chan_allow = chan_info_msg_len / chan_info; + + if (chan_num > chan_allow) + return chan_allow; + else + return chan_num; +} + +static int +os_if_create_ch_nl_resp(uint32_t pid, uint8_t *buf, uint16_t num_chan, + bool is_frag) +{ + void *hdr; + int i; + int flags = GFP_KERNEL; + struct sk_buff *msg = NULL; + struct nlattr *nest1, *nest2; + struct nlattr *nest3, *nest4; + struct wifi_pos_ch_info_rsp *channel_rsp; + + channel_rsp = (struct wifi_pos_ch_info_rsp *)buf; + + msg = cld80211_oem_rsp_alloc_skb(pid, &hdr, &nest1, &flags); + if (!msg) { + osif_err("alloc_skb failed"); + return -EPERM; + } + + nla_put_u32(msg, CLD80211_ATTR_CMD, + CLD80211_VENDOR_SUB_CMD_GET_CH_INFO); + + nest2 = nla_nest_start(msg, CLD80211_ATTR_CMD_TAG_DATA); + if (!nest2) + goto fail; + + if (is_frag) + nla_put_flag(msg, CLD80211_SUB_ATTR_CH_MORE_DATA); + + nla_put_u32(msg, CLD80211_SUB_ATTR_CHANNEL_NUM_CHAN, num_chan); + + nest3 = nla_nest_start(msg, CLD80211_SUB_ATTR_CH_LIST); + if (!nest3) + goto fail; + for (i = 0; i < num_chan; i++) { + nest4 = nla_nest_start(msg, i); + if (!nest4) + goto fail; + + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_CHAN_ID, + channel_rsp->chan_id); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_MHZ, channel_rsp->mhz); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_BAND_CF_1, + channel_rsp->band_center_freq1); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_BAND_CF_2, + channel_rsp->band_center_freq2); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_INFO, channel_rsp->info); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_REG_INFO_1, + channel_rsp->reg_info_1); + nla_put_u32(msg, CLD80211_SUB_ATTR_CH_REG_INFO_2, + channel_rsp->reg_info_2); + nla_nest_end(msg, nest4); + channel_rsp++; + } + + nla_nest_end(msg, nest3); + nla_nest_end(msg, nest2); + + osif_debug("sending oem rsp: type: %d to pid (%d)", + CLD80211_VENDOR_SUB_CMD_GET_CH_INFO, pid); + + cld80211_oem_send_reply(msg, hdr, nest1, flags); + return 0; + +fail: + osif_err("failed to fill CHAN_RESP attributes"); + dev_kfree_skb(msg); + return -EPERM; +} + +static void os_if_send_chan_nl_resp(uint32_t pid, uint8_t *buf) +{ + int err; + uint8_t check_chans = 0; + uint8_t *chnk_ptr, chan_allow = 0; + bool resp_frag = false; + + check_chans = buf[0]; + chnk_ptr = &buf[1]; + + do { + chan_allow = os_if_get_max_chan_nl_resp(check_chans); + + if (check_chans > chan_allow) + resp_frag = true; + else + resp_frag = false; + check_chans -= chan_allow; + + err = os_if_create_ch_nl_resp(pid, chnk_ptr, + chan_allow, resp_frag); + if (err) { + osif_err("failed to alloc memory for ch_nl_resp"); + return; + } + chnk_ptr += (sizeof(struct wifi_pos_ch_info_rsp) * + chan_allow); + } while (resp_frag); +} + +static int +os_if_create_oemdata_resp(uint32_t pid, uint8_t *buf, bool frag_resp, + uint32_t chnk_len) +{ + void *hdr; + int flags = GFP_KERNEL; + struct sk_buff *msg = NULL; + struct nlattr *nest1, *nest2; + + msg = cld80211_oem_rsp_alloc_skb(pid, &hdr, &nest1, &flags); + if (!msg) { + osif_err("alloc_skb failed"); + return -EPERM; + } + + nla_put_u32(msg, CLD80211_ATTR_CMD, CLD80211_VENDOR_SUB_CMD_OEM_DATA); + + nest2 = nla_nest_start(msg, CLD80211_ATTR_CMD_TAG_DATA); + if (!nest2) + goto fail; + + if (frag_resp) + nla_put_flag(msg, CLD80211_SUB_ATTR_OEM_MORE_DATA); + + nla_put(msg, CLD80211_SUB_ATTR_BINARY_DATA, chnk_len, buf); + + nla_nest_end(msg, nest2); + osif_debug("sending oem rsp: type: %d to pid (%d)", + CLD80211_VENDOR_SUB_CMD_OEM_DATA, pid); + cld80211_oem_send_reply(msg, hdr, nest1, flags); + return 0; + +fail: + osif_err("failed to fill CHAN_RESP attributes"); + dev_kfree_skb(msg); + return -EPERM; +} + +static void +os_if_send_oem_data_nl_resp(uint32_t pid, uint8_t *buf, + uint32_t buf_len) +{ + int err; + uint32_t attr_len; + uint32_t chnk_len, remain_len; + uint8_t *chnk_ptr; + bool frag_resp = false; + + struct nlattr vendor_data; + struct nlattr attr_cmd; + struct nlattr attr_tag_data; + struct nlattr cld80211_subattr_bindata; + struct nlattr more_data; + + attr_len = WIFIPOS_RESERVE_BYTES; + attr_len += NLMSG_ALIGN(sizeof(vendor_data)); + attr_len += NLMSG_ALIGN(sizeof(attr_cmd)); + attr_len += NLMSG_ALIGN(sizeof(attr_tag_data)); + attr_len += NLMSG_ALIGN(sizeof(more_data)); + + chnk_ptr = buf; + chnk_len = buf_len; + remain_len = buf_len; + do { + if (attr_len + nla_total_size(chnk_len) > + WLAN_CLD80211_MAX_SIZE) { + frag_resp = true; + + chnk_len = WLAN_CLD80211_MAX_SIZE - (attr_len + + sizeof(cld80211_subattr_bindata)); + } else { + frag_resp = false; + } + + remain_len -= chnk_len; + + err = os_if_create_oemdata_resp(pid, chnk_ptr, + frag_resp, chnk_len); + if (err) { + osif_err("failed to alloc memory for oem_nl_resp"); + return; + } + chnk_ptr += chnk_len; + chnk_len = remain_len; + } while (frag_resp); +} + +static void os_if_send_nl_resp(uint32_t pid, uint8_t *buf, + enum wifi_pos_cmd_ids cmd, uint32_t len) +{ + switch (cmd) { + case WIFI_POS_CMD_GET_CAPS: + os_if_send_cap_nl_resp(pid, buf); + break; + case WIFI_POS_CMD_GET_CH_INFO: + os_if_send_chan_nl_resp(pid, buf); + break; + case WIFI_POS_CMD_OEM_DATA: + os_if_send_oem_data_nl_resp(pid, buf, len); + break; + case WIFI_POS_PEER_STATUS_IND: + os_if_wifi_pos_send_peer_nl_status(pid, buf); + break; + default: + osif_err("response message is invalid :%d", cmd); + } +} +#else +static void os_if_send_nl_resp(uint32_t pid, uint8_t *buf, + enum wifi_pos_cmd_ids cmd, uint32_t len) +{ +} +#endif + +/** + * os_if_wifi_pos_send_rsp() - send oem registration response + * + * This function sends oem message to registered application process + * + * Return: none + */ +static void os_if_wifi_pos_send_rsp(uint32_t pid, enum wifi_pos_cmd_ids cmd, + uint32_t buf_len, uint8_t *buf) +{ + tAniMsgHdr *aniHdr; + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + + /* OEM msg is always to a specific process and cannot be a broadcast */ + if (pid == 0) { + osif_err("invalid dest pid"); + return; + } + + if (ucfg_wifi_pos_is_nl_rsp(psoc)) { + os_if_send_nl_resp(pid, buf, cmd, buf_len); + } else { + skb = alloc_skb(NLMSG_SPACE(sizeof(tAniMsgHdr) + buf_len), + GFP_ATOMIC); + if (!skb) { + osif_alert("alloc_skb failed"); + return; + } + + nlh = (struct nlmsghdr *)skb->data; + nlh->nlmsg_pid = 0; /* from kernel */ + nlh->nlmsg_flags = 0; + nlh->nlmsg_seq = 0; + nlh->nlmsg_type = WLAN_NL_MSG_OEM; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(tAniMsgHdr) + buf_len); + + aniHdr = NLMSG_DATA(nlh); + aniHdr->type = map_wifi_pos_cmd_to_ani_msg_rsp(cmd); + qdf_mem_copy(&aniHdr[1], buf, buf_len); + aniHdr->length = buf_len; + + skb_put(skb, NLMSG_SPACE(sizeof(tAniMsgHdr) + buf_len)); + osif_debug("sending oem rsp: type: %d len(%d) to pid (%d)", + aniHdr->type, buf_len, pid); + nl_srv_ucast_oem(skb, pid, MSG_DONTWAIT); + } +} + +#ifdef CNSS_GENL +static int +wifi_pos_parse_nla_oemdata_req(uint32_t len, uint8_t *buf, + struct wifi_pos_req_msg *req) +{ + struct nlattr *tb_oem_data[CLD80211_SUB_ATTR_MSG_OEM_DATA_REQ_MAX + 1]; + + if (wlan_cfg80211_nla_parse(tb_oem_data, + CLD80211_SUB_ATTR_MSG_OEM_DATA_REQ_MAX, + (struct nlattr *)buf, len, NULL)) { + osif_err("invalid data in request"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + + if (!tb_oem_data[CLD80211_SUB_ATTR_MSG_OEM_DATA_FW]) { + osif_err("CLD80211_SUB_ATTR_MSG_OEM_DATA_FW not present"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + req->buf_len = nla_len( + tb_oem_data[CLD80211_SUB_ATTR_MSG_OEM_DATA_FW]); + req->buf = nla_data( + tb_oem_data[CLD80211_SUB_ATTR_MSG_OEM_DATA_FW]); + + return 0; +} + +static int wifi_pos_parse_nla_req(const void *data, int len, int pid, + struct wifi_pos_req_msg *req) +{ + uint8_t *msg; + struct nlattr *tb[CLD80211_ATTR_MAX + 1]; + uint32_t msg_len; + + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, data, len, NULL)) { + osif_err("invalid data in request"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + + req->pid = pid; + req->msg_type = map_cld_vendor_sub_cmd_to_wifi_pos_cmd( + nla_get_u32(tb[CLD80211_ATTR_CMD])); + req->rsp_version = WIFI_POS_RSP_V2_NL; + + if (tb[CLD80211_ATTR_CMD_TAG_DATA]) { + msg_len = nla_len(tb[CLD80211_ATTR_CMD_TAG_DATA]); + msg = nla_data(tb[CLD80211_ATTR_CMD_TAG_DATA]); + + if (req->msg_type == WIFI_POS_CMD_OEM_DATA) { + if (wifi_pos_parse_nla_oemdata_req(msg_len, msg, req)) { + osif_err("parsing oemdata req failed"); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + } else { + req->buf_len = msg_len; + req->buf = msg; + } + } + if (tb[CLD80211_ATTR_META_DATA]) + osif_err("meta data dropped. Apps can use CLD80211_ATTR_CMD_TAG_DATA sub attrs"); + + return 0; +} + +static int wifi_pos_parse_ani_req(const void *data, int len, int pid, + struct wifi_pos_req_msg *req) +{ + tAniMsgHdr *msg_hdr; + struct nlattr *tb[CLD80211_ATTR_MAX + 1]; + uint32_t msg_len, id, nl_field_info_size, expected_field_info_size; + struct wifi_pos_field_info *field_info; + + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, data, len, NULL)) { + osif_err("invalid data in request"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + + msg_len = nla_len(tb[CLD80211_ATTR_DATA]); + if (msg_len < sizeof(*msg_hdr)) { + osif_err("Insufficient length for msg_hdr: %u", msg_len); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + msg_hdr = nla_data(tb[CLD80211_ATTR_DATA]); + req->msg_type = map_ani_msg_req_to_wifi_pos_cmd( + (uint32_t)msg_hdr->type); + req->rsp_version = WIFI_POS_RSP_V1_FLAT_MEMORY; + + if (msg_len < sizeof(*msg_hdr) + msg_hdr->length) { + osif_err("Insufficient length for msg_hdr buffer: %u", + msg_len); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + req->buf_len = msg_hdr->length; + req->buf = (uint8_t *)&msg_hdr[1]; + req->pid = pid; + + id = CLD80211_ATTR_META_DATA; + if (!tb[id]) + return 0; + + nl_field_info_size = nla_len(tb[id]); + if (nl_field_info_size < sizeof(*field_info)) { + osif_err("Insufficient length for field_info_buf: %u", + nl_field_info_size); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + field_info = nla_data(tb[id]); + if (!field_info->count) { + osif_debug("field_info->count is zero, ignoring META_DATA"); + return 0; + } + + expected_field_info_size = sizeof(*field_info) + + (field_info->count - 1) * sizeof(struct wifi_pos_field); + + if (nl_field_info_size < expected_field_info_size) { + osif_err("Insufficient len for total no.of %u fields", + field_info->count); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + req->field_info_buf = field_info; + req->field_info_buf_len = nl_field_info_size; + + return 0; +} + + +static int wifi_pos_parse_req(const void *data, int len, int pid, + struct wifi_pos_req_msg *req) +{ + int status = 0; + struct nlattr *tb[CLD80211_ATTR_MAX + 1]; + + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, data, len, NULL)) { + osif_err("invalid data in request"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + + if (tb[CLD80211_ATTR_DATA]) { + status = wifi_pos_parse_ani_req(data, len, pid, req); + } else if (tb[CLD80211_ATTR_CMD]) { + status = wifi_pos_parse_nla_req(data, len, pid, req); + } else { + osif_err("Valid CLD80211 ATTR not present"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + return status; +} +#else +static int wifi_pos_parse_req(struct sk_buff *skb, struct wifi_pos_req_msg *req) +{ + /* SKB->data contains NL msg */ + /* NLMSG_DATA(nlh) contains ANI msg */ + struct nlmsghdr *nlh; + tAniMsgHdr *msg_hdr; + size_t field_info_len; + + nlh = (struct nlmsghdr *)skb->data; + if (!nlh) { + osif_err("Netlink header null"); + return OEM_ERR_NULL_MESSAGE_HEADER; + } + + if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*msg_hdr))) { + osif_err("nlmsg_len(%d) and msg_hdr_size(%zu) mis-match", + nlh->nlmsg_len, sizeof(*msg_hdr)); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + msg_hdr = NLMSG_DATA(nlh); + if (!msg_hdr) { + osif_err("Message header null"); + return OEM_ERR_NULL_MESSAGE_HEADER; + } + + if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*msg_hdr) + msg_hdr->length)) { + osif_err("nlmsg_len(%d) and animsg_len(%d) mis-match", + nlh->nlmsg_len, msg_hdr->length); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + req->msg_type = map_ani_msg_req_to_wifi_pos_cmd( + (uint32_t)msg_hdr->type); + req->rsp_version = WIFI_POS_RSP_V1_FLAT_MEMORY; + req->buf_len = msg_hdr->length; + req->buf = (uint8_t *)&msg_hdr[1]; + req->pid = nlh->nlmsg_pid; + req->field_info_buf = NULL; + + field_info_len = nlh->nlmsg_len - + (NLMSG_LENGTH(sizeof(*msg_hdr) + msg_hdr->length)); + if (field_info_len) { + req->field_info_buf = (struct wifi_pos_field_info *) + (req->buf + req->buf_len); + req->field_info_buf_len = field_info_len; + } + + return 0; +} +#endif + +/** + * __os_if_wifi_pos_callback() - callback registered with NL service socket to + * process wifi pos request + * @skb: request message sk_buff + * + * Return: status of operation + */ +#ifdef CNSS_GENL +static void __os_if_wifi_pos_callback(const void *data, int data_len, + void *ctx, int pid) +{ + uint8_t err; + QDF_STATUS status; + struct wifi_pos_req_msg req = {0}; + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + + osif_debug("enter: pid %d", pid); + if (!psoc) { + osif_err("global psoc object not registered yet."); + return; + } + + wlan_objmgr_psoc_get_ref(psoc, WLAN_WIFI_POS_OSIF_ID); + err = wifi_pos_parse_req(data, data_len, pid, &req); + if (err) { + os_if_wifi_pos_send_rsp(wifi_pos_get_app_pid(psoc), + WIFI_POS_CMD_ERROR, sizeof(err), &err); + status = QDF_STATUS_E_INVAL; + goto release_psoc_ref; + } + + status = ucfg_wifi_pos_process_req(psoc, &req, os_if_wifi_pos_send_rsp); + if (QDF_IS_STATUS_ERROR(status)) + osif_err("ucfg_wifi_pos_process_req failed. status: %d", + status); + +release_psoc_ref: + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_OSIF_ID); +} + +static void os_if_wifi_pos_callback(const void *data, int data_len, + void *ctx, int pid) +{ + struct qdf_op_sync *op_sync; + + if (qdf_op_protect(&op_sync)) + return; + + __os_if_wifi_pos_callback(data, data_len, ctx, pid); + qdf_op_unprotect(op_sync); +} +#else +static int __os_if_wifi_pos_callback(struct sk_buff *skb) +{ + uint8_t err; + QDF_STATUS status; + struct wifi_pos_req_msg req = {0}; + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + + osif_debug("enter"); + if (!psoc) { + osif_err("global psoc object not registered yet."); + return -EINVAL; + } + + wlan_objmgr_psoc_get_ref(psoc, WLAN_WIFI_POS_OSIF_ID); + err = wifi_pos_parse_req(skb, &req); + if (err) { + os_if_wifi_pos_send_rsp(wifi_pos_get_app_pid(psoc), + WIFI_POS_CMD_ERROR, sizeof(err), &err); + status = QDF_STATUS_E_INVAL; + goto release_psoc_ref; + } + + status = ucfg_wifi_pos_process_req(psoc, &req, os_if_wifi_pos_send_rsp); + if (QDF_IS_STATUS_ERROR(status)) + osif_err("ucfg_wifi_pos_process_req failed. status: %d", + status); + +release_psoc_ref: + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_OSIF_ID); + + return qdf_status_to_os_return(status); +} + +static int os_if_wifi_pos_callback(struct sk_buff *skb) +{ + struct qdf_op_sync *op_sync; + int err; + + if (qdf_op_protect(&op_sync)) + return -EINVAL; + + err = __os_if_wifi_pos_callback(skb); + qdf_op_unprotect(op_sync); + + return err; +} +#endif + +#ifdef CNSS_GENL +int os_if_wifi_pos_register_nl(void) +{ + int ret = register_cld_cmd_cb(WLAN_NL_MSG_OEM, + os_if_wifi_pos_callback, NULL); + if (ret) + osif_err("register_cld_cmd_cb failed"); + + return ret; +} +#else +int os_if_wifi_pos_register_nl(void) +{ + return nl_srv_register(WLAN_NL_MSG_OEM, os_if_wifi_pos_callback); +} +#endif /* CNSS_GENL */ +qdf_export_symbol(os_if_wifi_pos_register_nl); + +#ifdef CNSS_GENL +int os_if_wifi_pos_deregister_nl(void) +{ + int ret = deregister_cld_cmd_cb(WLAN_NL_MSG_OEM); + if (ret) + osif_err("deregister_cld_cmd_cb failed"); + + return ret; +} +#else +int os_if_wifi_pos_deregister_nl(void) +{ + return 0; +} +#endif /* CNSS_GENL */ + +void os_if_wifi_pos_send_peer_status(struct qdf_mac_addr *peer_mac, + uint8_t peer_status, + uint8_t peer_timing_meas_cap, + uint8_t session_id, + struct wifi_pos_ch_info *chan_info, + enum QDF_OPMODE dev_mode) +{ + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + struct wifi_pos_peer_status_info *peer_info; + + if (!psoc) { + osif_err("global wifi_pos psoc object not registered"); + return; + } + + if (!wifi_pos_is_app_registered(psoc) || + wifi_pos_get_app_pid(psoc) == 0) { + osif_debug("app is not registered or pid is invalid"); + return; + } + + peer_info = qdf_mem_malloc(sizeof(*peer_info)); + if (!peer_info) + return; + + qdf_mem_copy(peer_info->peer_mac_addr, peer_mac->bytes, + sizeof(peer_mac->bytes)); + peer_info->peer_status = peer_status; + peer_info->vdev_id = session_id; + peer_info->peer_capability = peer_timing_meas_cap; + peer_info->reserved0 = 0; + /* Set 0th bit of reserved0 for STA mode */ + if (QDF_STA_MODE == dev_mode) + peer_info->reserved0 |= 0x01; + + if (chan_info) { + peer_info->peer_chan_info.chan_id = chan_info->chan_id; + peer_info->peer_chan_info.reserved0 = 0; + peer_info->peer_chan_info.mhz = chan_info->mhz; + peer_info->peer_chan_info.band_center_freq1 = + chan_info->band_center_freq1; + peer_info->peer_chan_info.band_center_freq2 = + chan_info->band_center_freq2; + peer_info->peer_chan_info.info = chan_info->info; + peer_info->peer_chan_info.reg_info_1 = chan_info->reg_info_1; + peer_info->peer_chan_info.reg_info_2 = chan_info->reg_info_2; + } + + os_if_wifi_pos_send_rsp(wifi_pos_get_app_pid(psoc), + WIFI_POS_PEER_STATUS_IND, + sizeof(*peer_info), (uint8_t *)peer_info); + qdf_mem_free(peer_info); +} + +int os_if_wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + if (!psoc || !caps) { + osif_err("psoc or caps buffer is null"); + return -EINVAL; + } + + return qdf_status_to_os_return(wifi_pos_populate_caps(psoc, caps)); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.c new file mode 100644 index 0000000000000000000000000000000000000000..37e0a993888cd36f22d69d2858cf4cc3712f75b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ +#include "wlan_cfg80211.h" diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.h new file mode 100644 index 0000000000000000000000000000000000000000..52f21045018d8cf2962b4c8dc9a9adeb22da597f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + + +#ifndef _WLAN_CFG80211_H_ +#define _WLAN_CFG80211_H_ + +#include +#include +#include +#include +#include +#include +#include "qal_devcfg.h" + +#define osif_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_OS_IF, params) +#define osif_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_OS_IF, params) +#define osif_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_OS_IF, params) +#define osif_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_OS_IF, params) +#define osif_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_OS_IF, params) +#define osif_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_OS_IF, params) +#define osif_rl_debug(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_OS_IF, params) +#define osif_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_OS_IF, params) + +#define osif_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_OS_IF, params) +#define osif_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_OS_IF, params) +#define osif_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_OS_IF, params) +#define osif_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_OS_IF, params) +#define osif_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_OS_IF, params) + +/* For kernel version >= 5.2, driver needs to provide policy */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +#define vendor_command_policy(__policy, __maxattr) \ + .policy = __policy, \ + .maxattr = __maxattr +#else +#define vendor_command_policy(__policy, __maxattr) +#endif /*End of (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) */ + +#if defined(NBUF_MEMORY_DEBUG) && defined(NETLINK_BUF_TRACK) +#define wlan_cfg80211_vendor_free_skb(skb) \ + qdf_nbuf_free(skb) + +#define wlan_cfg80211_vendor_event(skb, gfp) \ +{ \ + qdf_nbuf_count_dec(skb); \ + qdf_net_buf_debug_release_skb(skb); \ + cfg80211_vendor_event(skb, gfp); \ +} + +#define wlan_cfg80211_vendor_cmd_reply(skb) \ +{ \ + qdf_nbuf_count_dec(skb); \ + qdf_net_buf_debug_release_skb(skb); \ + cfg80211_vendor_cmd_reply(skb); \ +} + +static inline QDF_STATUS wlan_cfg80211_qal_devcfg_send_response(qdf_nbuf_t skb) +{ + qdf_nbuf_count_dec(skb); + qdf_net_buf_debug_release_skb(skb); + return qal_devcfg_send_response(skb); +} + +static inline struct sk_buff * +__cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int len, + const char *func, uint32_t line) +{ + struct sk_buff *skb; + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len); + if (skb) { + qdf_nbuf_count_inc(skb); + qdf_net_buf_debug_acquire_skb(skb, func, line); + } + return skb; +} +#define wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len) \ + __cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len, __func__, __LINE__) + +static inline struct sk_buff * +__cfg80211_vendor_event_alloc(struct wiphy *wiphy, + struct wireless_dev *wdev, + int approxlen, + int event_idx, + gfp_t gfp, + const char *func, + uint32_t line) +{ + struct sk_buff *skb; + + skb = cfg80211_vendor_event_alloc(wiphy, wdev, + approxlen, + event_idx, + gfp); + if (skb) { + qdf_nbuf_count_inc(skb); + qdf_net_buf_debug_acquire_skb(skb, func, line); + } + return skb; +} +#define wlan_cfg80211_vendor_event_alloc(wiphy, wdev, len, idx, gfp) \ + __cfg80211_vendor_event_alloc(wiphy, wdev, len, \ + idx, gfp, \ + __func__, __LINE__) +#else /* NBUF_MEMORY_DEBUG && NETLINK_BUF_TRACK */ +#define wlan_cfg80211_vendor_free_skb(skb) \ + kfree_skb(skb) + +#define wlan_cfg80211_vendor_event(skb, gfp) \ + cfg80211_vendor_event(skb, gfp) + +#define wlan_cfg80211_vendor_cmd_reply(skb) \ + cfg80211_vendor_cmd_reply(skb) + +#define wlan_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len) \ + cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len) + +#define wlan_cfg80211_vendor_event_alloc(wiphy, wdev, len, idx, gfp) \ + cfg80211_vendor_event_alloc(wiphy, wdev, len, idx, gfp) + +static inline QDF_STATUS wlan_cfg80211_qal_devcfg_send_response( qdf_nbuf_t skb) +{ + return qal_devcfg_send_response(skb); +} +#endif /* NBUF_MEMORY_DEBUG && NETLINK_BUF_TRACK */ + +#undef nla_parse +#undef nla_parse_nested +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +static inline int wlan_cfg80211_nla_parse(struct nlattr **tb, + int maxtype, + const struct nlattr *head, + int len, + const struct nla_policy *policy) +{ + return nla_parse(tb, maxtype, head, len, policy); +} + +static inline int +wlan_cfg80211_nla_parse_nested(struct nlattr *tb[], + int maxtype, + const struct nlattr *nla, + const struct nla_policy *policy) +{ + return nla_parse_nested(tb, maxtype, nla, policy); +} +#else +static inline int wlan_cfg80211_nla_parse(struct nlattr **tb, + int maxtype, + const struct nlattr *head, + int len, + const struct nla_policy *policy) +{ + return nla_parse(tb, maxtype, head, len, policy, NULL); +} + +static inline int +wlan_cfg80211_nla_parse_nested(struct nlattr *tb[], + int maxtype, + const struct nlattr *nla, + const struct nla_policy *policy) +{ + return nla_parse_nested(tb, maxtype, nla, policy, NULL); +} +#endif +#define nla_parse(...) (obsolete, use wlan_cfg80211_nla_parse) +#define nla_parse_nested(...) (obsolete, use wlan_cfg80211_nla_parse_nested) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) +static inline int +wlan_cfg80211_nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_u64(skb, attrtype, value); +} +#else +static inline int +wlan_cfg80211_nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_u64_64bit(skb, attrtype, value, NL80211_ATTR_PAD); +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_priv.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..f3eb7603d68af06c14383646f96029e12bea8ffa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_priv.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef _WLAN_OSIF_PRIV_H_ +#define _WLAN_OSIF_PRIV_H_ + +struct osif_scan_pdev; +struct osif_tdls_vdev; + +/** + * struct pdev_osif_priv - OS private structure + * @wiphy: wiphy handle + * @legacy_osif_priv: legacy osif private handle + * @scan_priv: Scan related data used by cfg80211 scan + */ +struct pdev_osif_priv { + struct wiphy *wiphy; + void *legacy_osif_priv; + struct osif_scan_pdev *osif_scan; +}; + +/** + * struct vdev_osif_priv - OS private structure of vdev + * @wdev: wireless device handle + * @legacy_osif_priv: legacy osif private handle + */ +struct vdev_osif_priv { + struct wireless_dev *wdev; + void *legacy_osif_priv; + struct osif_tdls_vdev *osif_tdls; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.c new file mode 100644 index 0000000000000000000000000000000000000000..52cb1a54f1c614a95727dc4ea4aba6d5afa72e3b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "qdf_mem.h" +#include "qdf_list.h" +#include "qdf_event.h" +#include "wlan_cfg80211.h" +#include "wlan_osif_request_manager.h" + +/* arbitrary value */ +#define MAX_NUM_REQUESTS 20 + +static bool is_initialized; +static qdf_list_t requests; +static qdf_spinlock_t spinlock; +static void *cookie; + +struct osif_request { + qdf_list_node_t node; + void *cookie; + uint32_t reference_count; + struct osif_request_params params; + qdf_event_t completed; +}; + +/* must be called with spinlock held */ +static void osif_request_unlink(struct osif_request *request) +{ + qdf_list_remove_node(&requests, &request->node); +} + +static void osif_request_destroy(struct osif_request *request) +{ + struct osif_request_params *params; + + params = &request->params; + if (params->dealloc) { + void *priv = osif_request_priv(request); + + params->dealloc(priv); + } + qdf_event_destroy(&request->completed); + qdf_mem_free(request); +} + +/* must be called with spinlock held */ +static struct osif_request *osif_request_find(void *cookie) +{ + QDF_STATUS status; + struct osif_request *request; + qdf_list_node_t *node; + + status = qdf_list_peek_front(&requests, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + request = qdf_container_of(node, struct osif_request, node); + if (request->cookie == cookie) + return request; + status = qdf_list_peek_next(&requests, node, &node); + } + + return NULL; +} + +struct osif_request *osif_request_alloc(const struct osif_request_params *params) +{ + size_t length; + struct osif_request *request; + + if (!is_initialized) { + osif_err("invoked when not initialized"); + return NULL; + } + + length = sizeof(*request) + params->priv_size; + request = qdf_mem_malloc(length); + if (!request) + return NULL; + + request->reference_count = 1; + request->params = *params; + qdf_event_create(&request->completed); + qdf_spin_lock_bh(&spinlock); + request->cookie = cookie++; + qdf_list_insert_back(&requests, &request->node); + qdf_spin_unlock_bh(&spinlock); + + return request; +} + +void *osif_request_priv(struct osif_request *request) +{ + /* private data area immediately follows the struct osif_request */ + return request + 1; +} + +void *osif_request_cookie(struct osif_request *request) +{ + return request->cookie; +} + +struct osif_request *osif_request_get(void *cookie) +{ + struct osif_request *request; + + if (!is_initialized) { + osif_err("invoked when not initialized"); + return NULL; + } + qdf_spin_lock_bh(&spinlock); + request = osif_request_find(cookie); + if (request) + request->reference_count++; + qdf_spin_unlock_bh(&spinlock); + + return request; +} + +void osif_request_put(struct osif_request *request) +{ + bool unlinked = false; + + qdf_spin_lock_bh(&spinlock); + request->reference_count--; + if (0 == request->reference_count) { + osif_request_unlink(request); + unlinked = true; + } + qdf_spin_unlock_bh(&spinlock); + if (unlinked) + osif_request_destroy(request); +} + +int osif_request_wait_for_response(struct osif_request *request) +{ + QDF_STATUS status; + + status = qdf_wait_for_event_completion(&request->completed, + request->params.timeout_ms); + + return qdf_status_to_os_return(status); +} + +void osif_request_complete(struct osif_request *request) +{ + (void) qdf_event_set(&request->completed); +} + +void osif_request_manager_init(void) +{ + if (is_initialized) + return; + + qdf_list_create(&requests, MAX_NUM_REQUESTS); + qdf_spinlock_create(&spinlock); + is_initialized = true; +} + +/* + * osif_request_manager_deinit implementation note: + * It is intentional that we do not destroy the list or the spinlock. + * This allows threads to still access the infrastructure even when it + * has been deinitialized. Since neither lists nor spinlocks consume + * resources this does not result in a resource leak. + */ +void osif_request_manager_deinit(void) +{ + is_initialized = false; +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..c32bdd92595215f69e997a312c4d659d65518858 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_OSIF_REQUEST_MANAGER_H__ +#define __WLAN_OSIF_REQUEST_MANAGER_H__ + +/** + * DOC: WLAN OSIF REQUEST MANAGER + * + * Many operations within the wlan driver occur in an asynchronous + * manner. Requests are received by OSIF via one of the kernel + * interfaces (ioctl, nl80211, virtual file system, etc.). The + * requests are translated to an internal format and are then passed + * to lower layers, usually via SME, for processing. For requests + * which require a response, that response comes up from the lower + * layers in a separate thread of execution, ultimately resulting in a + * call to a callback function that was provided by OSIF as part of the + * initial request. So a mechanism is needed to synchronize the + * request and response. This framework provides that mechanism. + * + * Once the framework has been initialized, the typical sequence of + * events is as follows: + * + * Request Thread: + * 1. Create a &struct osif_request_params which describes the request. + * 2. Call osif_request_alloc() to allocate a &struct osif_request. + * 3. Call osif_request_priv() to get a pointer to the private data. + * 4. Place any information which must be shared with the Response + * Callback in the private data area. + * 5. Call osif_request_cookie() to get the unique cookie assigned + * to the request. + * 6. Call the underlying request handling API, passing the cookie + * as the callback's private context. + * 7. Call osif_request_wait_for_response() to wait for the response + * (or for the request to time out). + * 8. Use the return status to see if the request was successful. If + * it was, retrieve any response information from the private + * structure and prepare a response for userspace. + * 9. Call osif_request_put() to relinquish access to the request. + * 10. Return status to the caller. + * + * Response Callback: + * 1. Call osif_request_get() with the provided cookie to see if the + * request structure is still valid. If it returns %NULL then + * return since this means the request thread has already timed + * out. + * 2. Call osif_request_priv() to get access to the private data area. + * 3. Write response data into the private data area. + * 4. Call osif_request_complete() to indicate that the response is + * ready to be processed by the request thread. + * 5. Call osif_request_put() to relinquish the callback function's + * reference to the request. + */ + +/* this is opaque to clients */ +struct osif_request; + +/** + * typedef osif_request_dealloc - Private data deallocation function + */ +typedef void (*osif_request_dealloc)(void *priv); + +/** + * struct osif_request_params - OSIF request parameters + * @priv_size: Size of the private data area required to pass + * information between the request thread and the response callback. + * @timeout_ms: The amount of time to wait for a response in milliseconds. + * @dealloc: Function to be called when the request is destroyed to + * deallocate any allocations made in the private area of the + * request struct. Can be %NULL if no private allocations are + * made. + */ +struct osif_request_params { + uint32_t priv_size; + uint32_t timeout_ms; + osif_request_dealloc dealloc; +}; + +/** + * osif_request_alloc() - Allocate a request struct + * @params: parameter block that specifies the attributes of the + * request + * + * This function will attempt to allocate a &struct osif_request with + * the specified @params. If successful, the caller can then use + * request struct to make an asynchronous request. Once the request is + * no longer needed, the reference should be relinquished via a call + * to osif_request_put(). + * + * Return: A pointer to an allocated &struct osif_request (which also + * contains room for the private buffer) if the allocation is + * successful, %NULL if the allocation fails. + */ +struct osif_request *osif_request_alloc(const struct osif_request_params *params); + +/** + * osif_request_priv() - Get pointer to request private data + * @request: The request struct that contains the private data + * + * This function will return a pointer to the private data area that + * is part of the request struct. The caller must already have a valid + * reference to @request from either osif_request_alloc() or + * osif_request_get(). + * + * Returns: pointer to the private data area. Note that this pointer + * will always be an offset from the input @request pointer and hence + * this function will never return %NULL. + */ +void *osif_request_priv(struct osif_request *request); + +/** + * osif_request_cookie() - Get cookie of a request + * @request: The request struct associated with the request + * + * This function will return the unique cookie that has been assigned + * to the request. This cookie can subsequently be passed to + * osif_request_get() to retrieve the request. + * + * Note that the cookie is defined as a void pointer as it is intended + * to be passed as an opaque context pointer from OSIF to underlying + * layers when making a request, and subsequently passed back to OSIF + * as an opaque pointer in an asynchronous callback. + * + * Returns: The cookie assigned to the request. + */ +void *osif_request_cookie(struct osif_request *request); + +/** + * osif_request_get() - Get a reference to a request struct + * @cookie: The cookie of the request struct that needs to be + * referenced + * + * This function will use the cookie to determine if the associated + * request struct is valid, and if so, will increment the reference + * count of the struct. This means the caller is guaranteed that the + * request struct is valid and the underlying private data can be + * dereferenced. + * + * Returns: The pointer to the request struct associated with @cookie + * if the request is still valid, %NULL if the underlying request + * struct is no longer valid. + */ +struct osif_request *osif_request_get(void *cookie); + +/** + * osif_request_put() - Release a reference to a request struct + * @request: The request struct that no longer needs to be referenced + * + * This function will decrement the reference count of the struct, and + * will clean up the request if this is the last reference. The caller + * must already have a valid reference to @request, either from + * osif_request_alloc() or osif_request_get(). + * + * Returns: Nothing + */ +void osif_request_put(struct osif_request *request); + +/** + * osif_request_wait_for_response() - Wait for a response + * @request: The request struct associated with the request + * + * This function will wait until either a response is received and + * communicated via osif_request_complete(), or until the request + * timeout period expires. + * + * Returns: 0 if a response was received, -ETIMEDOUT if the response + * timed out. + */ +int osif_request_wait_for_response(struct osif_request *request); + +/** + * osif_request_complete() - Complete a request + * @request: The request struct associated with the request + * + * This function is used to indicate that a response has been received + * and that any information required by the request thread has been + * copied into the private data area of the request struct. This will + * unblock any osif_request_wait_for_response() that is pending on this + * @request. + * + * Returns: Nothing + */ +void osif_request_complete(struct osif_request *request); + +/** + * osif_request_manager_init() - Initialize the OSIF Request Manager + * + * This function must be called during system initialization to + * initialize the OSIF Request Manager. + * + * Returns: Nothing + */ +void osif_request_manager_init(void); + +/** + * osif_request_manager_deinit() - Deinitialize the OSIF Request Manager + * + * This function must be called during system shutdown to deinitialize + * the OSIF Request Manager. + * + * Returns: Nothing + */ +void osif_request_manager_deinit(void); + +#endif /* __WLAN_OSIF_REQUEST_MANAGER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/qal/inc/qal_devcfg.h b/drivers/staging/qca-wifi-host-cmn/qal/inc/qal_devcfg.h new file mode 100644 index 0000000000000000000000000000000000000000..2d4a88abc82ff824ea3e061f0f0e3b747499e486 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qal/inc/qal_devcfg.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qal_devcfg + * QCA abstraction layer (QAL) device config APIs + */ + +#if !defined(__QAL_DEVCFG_H) +#define __QAL_DEVCFG_H + +/* Include Files */ +#include +#include + +#ifdef ENHANCED_OS_ABSTRACTION +/** + * qal_devcfg_send_response() - send devcfg response + * @cfgbuf: response buffer + * + * This function will send the response for a config request + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_devcfg_send_response(qdf_nbuf_t cfgbuf); +#else +static inline QDF_STATUS +qal_devcfg_send_response(qdf_nbuf_t cfgbuf) +{ + return __qal_devcfg_send_response(cfgbuf); +} +#endif +#endif /* __QAL_DEVCFG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qal/inc/qal_vbus_dev.h b/drivers/staging/qca-wifi-host-cmn/qal/inc/qal_vbus_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..67eab99e95c636132990c39fd7bb4176a990a1da --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qal/inc/qal_vbus_dev.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qal_vbus_dev + * QCA abstraction layer (QAL) virtual bus management APIs + */ + +#if !defined(__QAL_VBUS_DEV_H) +#define __QAL_VBUS_DEV_H + +/* Include Files */ +#include +#include + +struct qdf_vbus_resource; +struct qdf_vbus_rstctl; +struct qdf_dev_clk; +struct qdf_pfm_hndl; +struct qdf_pfm_drv; + +#ifdef ENHANCED_OS_ABSTRACTION +/** + * qal_vbus_get_iorsc() - acquire io resource + * @devnum: Device Number + * @flag: Property bitmap for the io resource + * @devname: Device name string + * + * This function will allocate the io resource for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_get_iorsc(int devnum, uint32_t flag, char *devname); + +/** + * qdf_vbus_release_iorsc() - release io resource + * @devnum: Device Number + * + * This function will release the io resource attached to a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_release_iorsc(int devnum); + +/** + * qal_vbus_enable_devclk() - enable device clock + * @clk: Device clock + * + * This function will enable the clock for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_enable_devclk(struct qdf_dev_clk *clk); + +/** + * qal_vbus_disable_devclk() - disable device clock + * @clk: Device clock + * + * This function will disable the clock for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_disable_devclk(struct qdf_dev_clk *clk); + +/** + * qal_vbus_acquire_dev_rstctl() - get device reset control + * @pfhndl: Device handle + * @state: Device state information + * @rstctl: Device reset control handle + * + * This function will acquire the control to reset the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_get_dev_rstctl(struct qdf_pfm_hndl *pfhndl, const char *state, + struct qdf_vbus_rstctl **rstctl); + +/** + * qal_vbus_release_dev_rstctl() - release device reset control + * @pfhndl: Device handle + * @rstctl: Device reset control handle + * + * This function will release the control to reset the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_release_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl); + +/** + * qal_vbus_activate_dev_rstctl() - activate device reset control + * @pfhndl: Device handle + * @rstctl: Device reset control handle + * + * This function will activate the reset control for the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_activate_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl); + +/** + * qal_vbus_deactivate_dev_rstctl() - deactivate device reset control + * @pfhndl: Device handle + * @rstctl: Device reset control handle + * + * This function will deactivate the reset control for the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_deactivate_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl); + +/** + * qal_vbus_get_resource() - get resource + * @pfhndl: Device handle + * @rsc: Resource handle + * @restype: Resource type + * @residx: Resource index + * + * This function will acquire a particular resource and attach it to the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_get_resource(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_resource **rsc, uint32_t restype, + uint32_t residx); + +/** + * qal_vbus_get_irq() - get irq + * @pfhndl: Device handle + * @str: Device identifier + * @irq: irq number + * + * This function will acquire an irq for the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_get_irq(struct qdf_pfm_hndl *pfhndl, const char *str, int *irq); + +/** + * qal_vbus_register_driver() - register driver + * @pfdev: Device handle + * + * This function will initialize a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_register_driver(struct qdf_pfm_drv *pfdev); + +/** + * qal_vbus_deregister_driver() - deregister driver + * @pfdev: Device handle + * + * This function will deregister the driver for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qal_vbus_deregister_driver(struct qdf_pfm_drv *pfdev); +#else +static inline QDF_STATUS +qal_vbus_get_iorsc(int devnum, uint32_t flag, char *devname) +{ + return __qal_vbus_get_iorsc(devnum, flag, devname); +} + +static inline QDF_STATUS +qal_vbus_release_iorsc(int devnum) +{ + return __qal_vbus_release_iorsc(devnum); +} + +static inline QDF_STATUS +qal_vbus_enable_devclk(struct qdf_dev_clk *clk) +{ + return __qal_vbus_enable_devclk(clk); +} + +static inline QDF_STATUS +qal_vbus_disable_devclk(struct qdf_dev_clk *clk) +{ + return __qal_vbus_disable_devclk(clk); +} + +static inline QDF_STATUS +qal_vbus_get_dev_rstctl(struct qdf_pfm_hndl *pfhndl, const char *state, + struct qdf_vbus_rstctl **rstctl) +{ + return __qal_vbus_get_dev_rstctl(pfhndl, state, rstctl); +} + +static inline QDF_STATUS +qal_vbus_release_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl) +{ + return __qal_vbus_release_dev_rstctl(pfhndl, rstctl); +} + +static inline QDF_STATUS +qal_vbus_activate_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl) +{ + return __qal_vbus_activate_dev_rstctl(pfhndl, rstctl); +} + +static inline QDF_STATUS +qal_vbus_deactivate_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl) +{ + return __qal_vbus_deactivate_dev_rstctl(pfhndl, rstctl); +} + +static inline QDF_STATUS +qal_vbus_get_resource(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_resource **rsc, uint32_t restype, + uint32_t residx) +{ + return __qal_vbus_get_resource(pfhndl, rsc, restype, residx); +} + +static inline QDF_STATUS +qal_vbus_get_irq(struct qdf_pfm_hndl *pfhndl, const char *str, int *irq) +{ + return __qal_vbus_get_irq(pfhndl, str, irq); +} + +static inline QDF_STATUS +qal_vbus_register_driver(struct qdf_pfm_drv *pfdev) +{ + return __qal_vbus_register_driver(pfdev); +} + +static inline QDF_STATUS +qal_vbus_deregister_driver(struct qdf_pfm_drv *pfdev) +{ + return __qal_vbus_deregister_driver(pfdev); +} +#endif +#endif /* __QAL_VBUS_DEV_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qal/linux/src/i_qal_devcfg.h b/drivers/staging/qca-wifi-host-cmn/qal/linux/src/i_qal_devcfg.h new file mode 100644 index 0000000000000000000000000000000000000000..aebb66c7dd4572260f39b6e0a3b777874c29c2e9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qal/linux/src/i_qal_devcfg.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qal_devcfg + * QCA abstraction layer (QAL) device config APIs + */ + +#if !defined(__I_QAL_DEVCFG_H) +#define __I_QAL_DEVCFG_H + +/* Include Files */ +#include +#include + +/** + * __qal_devcfg_send_response() - send devcfg response + * @cfgbuf: response buffer + * + * This function will send the response for a config request + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_devcfg_send_response(qdf_nbuf_t cfgbuf) +{ + cfg80211_vendor_cmd_reply(cfgbuf); + + return QDF_STATUS_SUCCESS; +} +#endif /* __I_QAL_DEVCFG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qal/linux/src/i_qal_vbus_dev.h b/drivers/staging/qca-wifi-host-cmn/qal/linux/src/i_qal_vbus_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..f9c0653d99534d40abb10f8d3134fc42eb255ff0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qal/linux/src/i_qal_vbus_dev.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qal_vbus_dev + * QCA abstraction layer (QAL) virtual bus management APIs + */ + +#if !defined(__I_QAL_VBUS_DEV_H) +#define __I_QAL_VBUS_DEV_H + +/* Include Files */ +#include +#include "qdf_util.h" +#include "qdf_module.h" +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +#include +#endif + +struct qdf_vbus_resource; +struct qdf_vbus_rstctl; +struct qdf_dev_clk; +struct qdf_pfm_hndl; +struct qdf_pfm_drv; + +/** + * __qal_vbus_get_iorsc() - acquire io resource + * @devnum: Device Number + * @flag: Property bitmap for the io resource + * @devname: Device name string + * + * This function will allocate the io resource for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_get_iorsc(int devnum, uint32_t flag, char *devname) +{ + int ret; + + ret = gpio_request_one(devnum, flag, devname); + + return qdf_status_from_os_return(ret); +} + +/** + * __qal_vbus_release_iorsc() - release io resource + * @devnum: Device Number + * + * This function will release the io resource attached to a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_release_iorsc(int devnum) +{ + gpio_free(devnum); + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_enable_devclk() - enable device clock + * @clk: Device clock + * + * This function will enable the clock for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_enable_devclk(struct qdf_dev_clk *clk) +{ + int ret; + + ret = clk_prepare_enable((struct clk *)clk); + + return qdf_status_from_os_return(ret); +} + +/** + * __qal_vbus_disable_devclk() - disable device clock + * @clk: Device clock + * + * This function will disable the clock for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_disable_devclk(struct qdf_dev_clk *clk) +{ + clk_disable_unprepare((struct clk *)clk); + + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_get_dev_rstctl() - get device reset control + * @pfhndl: Device handle + * @state: Device state information + * @rstctl: Device reset control handle + * + * This function will acquire the control to reset the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_get_dev_rstctl(struct qdf_pfm_hndl *pfhndl, const char *state, + struct qdf_vbus_rstctl **rstctl) +{ + struct reset_control *rsctl; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + rsctl = reset_control_get_optional((struct device *)pfhndl, state); +#else + rsctl = NULL; +#endif + *rstctl = (struct qdf_vbus_rstctl *)rsctl; + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_release_dev_rstctl() - release device reset control + * @pfhndl: Device handle + * @rstctl: Device reset control handle + * + * This function will release the control to reset the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_release_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl) +{ + reset_control_put((struct reset_control *)rstctl); + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_activate_dev_rstctl() - activate device reset control + * @pfhndl: Device handle + * @rstctl: Device reset control handle + * + * This function will activate the reset control for the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_activate_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl) +{ + reset_control_assert((struct reset_control *)rstctl); + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_deactivate_dev_rstctl() - deactivate device reset control + * @pfhndl: Device handle + * @rstctl: Device reset control handle + * + * This function will deactivate the reset control for the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_deactivate_dev_rstctl(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_rstctl *rstctl) +{ + reset_control_deassert((struct reset_control *)rstctl); + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_get_resource() - get resource + * @pfhndl: Device handle + * @rsc: Resource handle + * @restype: Resource type + * @residx: Resource index + * + * This function will acquire a particular resource and attach it to the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_get_resource(struct qdf_pfm_hndl *pfhndl, + struct qdf_vbus_resource **rsc, uint32_t restype, + uint32_t residx) +{ + struct resource *rsrc; + + rsrc = platform_get_resource((struct platform_device *)pfhndl, + restype, residx); + *rsc = (struct qdf_vbus_resource *)rsrc; + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_get_irq() - get irq + * @pfhndl: Device handle + * @str: Device identifier + * @irq: irq number + * + * This function will acquire an irq for the device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_get_irq(struct qdf_pfm_hndl *pfhndl, const char *str, int *irq) +{ + *irq = platform_get_irq_byname((struct platform_device *)pfhndl, str); + + if (*irq < 0) + return QDF_STATUS_E_FAULT; + + return QDF_STATUS_SUCCESS; +} + +/** + * __qal_vbus_register_driver() - register driver + * @pfdev: Device handle + * + * This function will initialize a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_register_driver(struct qdf_pfm_drv *pfdev) +{ + int ret; + + ret = platform_driver_register((struct platform_driver *)pfdev); + + return qdf_status_from_os_return(ret); +} + +/** + * __qal_vbus_deregister_driver() - deregister driver + * @pfdev: Device handle + * + * This function will deregister the driver for a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qal_vbus_deregister_driver(struct qdf_pfm_drv *pfdev) +{ + platform_driver_unregister((struct platform_driver *)pfdev); + + return QDF_STATUS_SUCCESS; +} +#endif /* __I_QAL_VBUS_DEV_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/Kbuild b/drivers/staging/qca-wifi-host-cmn/qdf/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..85f1b10faefacab7ec745935bbf14c1e5fee229a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/Kbuild @@ -0,0 +1,128 @@ +ifeq ($(obj),) +obj := . +endif + +DEPTH := ../.. + +HOST_CMN_CONVG_SRC := $(DEPTH)/cmn_dev +HOST_CMN_CONVG_NLINK := $(DEPTH)/cmn_dev/utils/nlink +HOST_CMN_CONVG_LOGGING := $(DEPTH)/cmn_dev/utils/logging +HOST_CMN_CONVG_PTT := $(DEPTH)/cmn_dev/utils/ptt +HOST_QAL_SRC := $(DEPTH)/component_dev/qal + +include $(obj)/$(DEPTH)/os/linux/Makefile-linux.common + +INCS += -Iinclude/nbuf -Iinclude/net -Iinclude/os +INCS += -Inbuf/linux -Inet/linux -Ios/linux +INCS += -I$(WLAN_TOP)/../../include +INCS += -I$(WLAN_TOP)/cmn_dev/qdf/inc +INCS += -I$(WLAN_TOP)/cmn_dev/qal/inc +INCS += -I$(WLAN_TOP)/cmn_dev/utils/sys +INCS += -I$(WLAN_TOP)/component_dev/qal/inc +INCS += -I$(WLAN_TOP)/cmn_dev/qal/linux/src +INCS += -I$(WLAN_TOP)/cmn_dev/qdf/linux/src +INCS += -I$(obj)/$(HOST_CMN_CONVG_PTT)/inc \ + -I$(obj)/$(HOST_CMN_CONVG_NLINK)/inc \ + -I$(obj)/$(HOST_CMN_CONVG_LOGGING)/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/utils/host_diag_log/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/utils/host_diag_log/src \ + -I$(obj)/$(DEPTH)/cmn_dev/utils/ptt/inc \ + -I$(WLAN_TOP)/pld/inc + +obj-m += qdf.o + +EXTRA_CFLAGS+= $(INCS) $(COPTS) -Wno-unused-function + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +MOD_CFLAGS = -D"KBUILD_STR(s)=\#s" -D"KBUILD_BASENAME=KBUILD_STR(qdf.mod)" -D"KBUILD_MODNAME=KBUILD_STR(qdf)" +endif + +qdf-objs := \ +linux/src/qdf_defer.o \ +linux/src/qdf_dev.o \ +linux/src/qdf_event.o \ +linux/src/qdf_file.o \ +linux/src/qdf_list.o \ +linux/src/qdf_lock.o \ +linux/src/qdf_mc_timer.o \ +linux/src/qdf_mem.o \ +linux/src/qdf_module.o \ +linux/src/qdf_net_if.o \ +linux/src/qdf_nbuf.o \ +linux/src/qdf_perf.o \ +linux/src/qdf_status.o \ +linux/src/qdf_threads.o \ +linux/src/qdf_trace.o \ +linux/src/qdf_vfs.o \ +src/qdf_flex_mem.o \ +src/qdf_parse.o \ +src/qdf_str.o \ +src/qdf_types.o \ +src/qdf_platform.o \ +$(HOST_CMN_CONVG_NLINK)/src/wlan_nlink_srv.o \ +$(HOST_QAL_SRC)/linux/src/qal_devcfg.o \ +$(HOST_QAL_SRC)/linux/src/qal_vbus_dev.o \ +$(HOST_QAL_SRC)/linux/src/qal_streamfs.o \ +$(HOST_QAL_SRC)/linux/src/qal_notifier.o \ +#linux/src/qdf_net.o \ +#linux/src/qdf_net_event.o \ +#linux/src/qdf_net_ioctl.o +#linux/src/qdf_net_wext.o + +ifeq ($(MEMORY_DEBUG),1) +qdf-objs += src/qdf_debug_domain.o +endif + +ifeq ($(LOGGING_UTILS_SUPPORT),1) +qdf-objs += \ +$(HOST_CMN_CONVG_LOGGING)/src/wlan_logging_sock_svc.o +endif + +ifeq ($(WLAN_DEBUGFS),1) +qdf-objs += linux/src/qdf_debugfs.o +endif + +ifeq ($(BUILD_ADF_NET_IOCTL),1) +EXTRA_CFLAGS+= -DADF_NET_IOCTL_SUPPORT +#adf-objs += os/linux/adf_os_netlink_pvt.o \ +# net/linux/adf_net_event.o \ +# net/linux/adf_net_wext.o \ +# net/linux/adf_net_ioctl.o +endif + +ifeq ($(BUILD_ADF_DEFER_PVT),1) +#adf-objs += os/linux/adf_os_defer_pvt.o +endif +ifeq ($(BUILD_ADF_IRQ_PVT),1) +#adf-objs += os/linux/adf_os_irq_pvt.o +endif + +ifeq ($(BUILD_ADF_PERF_PROFILING),1) +#adf-objs += os/linux/adf_os_perf_pvt.o +ifeq ($(BUILD_ADF_MIPS_PERF_PROFILING),1) +#adf-objs += os/linux/adf_os_mips_perf_pvt.o +endif +endif + +# os/linux/adf_os_pci_pvt.o \ +# net/linux/adf_net_ioctl.o \ +# net/linux/adf_net_pseudo.o \ + +clean-files := modules.order + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +all: qdf.ko + +qdf.mod.o: qdf.mod.c + ${CC} -c -o $@ ${EXTRA_CFLAGS} ${MOD_CFLAGS} $< + +adf.o: ${adf-objs} + $(LD) -m elf32btsmip -r -o adf.o $(adf-objs) + $(KERNELPATH)/scripts/mod/modpost qdf.o + +qdf.ko: qdf.o qdf.mod.o + $(LD) $(LDOPTS) -o qdf.ko qdf.o qdf.mod.o + +%.o: %.c + ${CC} -c -o $@ ${EXTRA_CFLAGS} $< +endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_m.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_m.h new file mode 100644 index 0000000000000000000000000000000000000000..da6cbd22693f2d223e042d9c41c670f3215730a3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_m.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2014-2017,2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_api_m.h + * + * Platform specific qdf_nbuf_public network buffer API + * This file defines the network buffer abstraction. + * Included by qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _QDF_NBUF_M_H +#define _QDF_NBUF_M_H + +static inline int qdf_nbuf_ipa_owned_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_ipa_owned_get(buf); +} + +static inline void qdf_nbuf_ipa_owned_set(qdf_nbuf_t buf) +{ + __qdf_nbuf_ipa_owned_set(buf); +} + +static inline void qdf_nbuf_ipa_owned_clear(qdf_nbuf_t buf) +{ + __qdf_nbuf_ipa_owned_clear(buf); +} + +static inline int qdf_nbuf_ipa_priv_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_ipa_priv_get(buf); +} + +static inline void qdf_nbuf_ipa_priv_set(qdf_nbuf_t buf, uint32_t priv) +{ + + QDF_BUG(!(priv & QDF_NBUF_IPA_CHECK_MASK)); + __qdf_nbuf_ipa_priv_set(buf, priv); +} + +/** + * qdf_nbuf_set_rx_protocol_tag() + * @buf: Network buffer + * @val: Value to be set in the nbuf + * Return: None + */ +static inline void qdf_nbuf_set_rx_protocol_tag(qdf_nbuf_t buf, uint16_t val) +{ +} + +/** + * qdf_nbuf_get_rx_protocol_tag() + * @buf: Network buffer + * Return: Value of rx protocol tag, here 0 + */ +static inline uint16_t qdf_nbuf_get_rx_protocol_tag(qdf_nbuf_t buf) +{ + return 0; +} + +/** + * qdf_nbuf_set_rx_flow_tag() - set given value in flow tag field + * of buf(skb->cb) + * @buf: Network buffer + * @val: Rx Flow Tag to be set in the nbuf + * Return: None + */ +static inline void qdf_nbuf_set_rx_flow_tag(qdf_nbuf_t buf, uint16_t val) +{ +} + +/** + * qdf_nbuf_get_rx_flow_tag() - Get the value of flow_tag + * field of buf(skb->cb) + * @buf: Network buffer + * Return: Value of rx flow tag, here 0 + */ +static inline uint16_t qdf_nbuf_get_rx_flow_tag(qdf_nbuf_t buf) +{ + return 0; +} + +/** + * qdf_nbuf_set_exc_frame() - set exception frame flag + * @buf: Network buffer whose cb is to set exception frame flag + * @value: exception frame flag, value 0 or 1. + * + * Return: none + */ +static inline void qdf_nbuf_set_exc_frame(qdf_nbuf_t buf, uint8_t value) +{ + QDF_NBUF_CB_RX_PACKET_EXC_FRAME(buf) = value; +} + +/** + * qdf_nbuf_is_exc_frame() - check exception frame flag bit + * @buf: Network buffer to get exception flag + * + * Return: 0 or 1 + */ +static inline uint8_t qdf_nbuf_is_exc_frame(qdf_nbuf_t buf) +{ + return QDF_NBUF_CB_RX_PACKET_EXC_FRAME(buf); +} + +/** + * qdf_nbuf_set_rx_reo_dest_ind() - set reo destination indication + * @buf: Network buffer + * @value: reo destination indication value to set + * + * Return: none + */ +static inline void qdf_nbuf_set_rx_reo_dest_ind(qdf_nbuf_t buf, + uint8_t value) +{ + QDF_NBUF_CB_RX_PACKET_REO_DEST_IND(buf) = value; +} + +/** + * qdf_nbuf_get_rx_reo_dest_ind() - get reo destination indication + * @buf: Network buffer + * + * Return reo destination indication value (0 ~ 31) + */ +static inline uint8_t qdf_nbuf_get_rx_reo_dest_ind(qdf_nbuf_t buf) +{ + return QDF_NBUF_CB_RX_PACKET_REO_DEST_IND(buf); +} + +/** + * qdf_nbuf_set_rx_ipa_smmu_map() - set ipa smmu mapped flag + * @buf: Network buffer + * @value: 1 - ipa smmu mapped, 0 - ipa smmu unmapped + * + * Return: none + */ +static inline void qdf_nbuf_set_rx_ipa_smmu_map(qdf_nbuf_t buf, + uint8_t value) +{ + QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(buf) = value; +} + +/** + * qdf_nbuf_is_rx_ipa_smmu_map() - check ipa smmu map flag + * @buf: Network buffer + * + * Return 0 or 1 + */ +static inline uint8_t qdf_nbuf_is_rx_ipa_smmu_map(qdf_nbuf_t buf) +{ + return QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(buf); +} +#endif /* _QDF_NBUF_M_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_w.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_w.h new file mode 100644 index 0000000000000000000000000000000000000000..04be21b63cd20072187132eea60b4b05ac6de586 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_w.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2014-2017,2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_api_w.h + * + * Platform specific qdf_nbuf_public network buffer API + * This file defines the network buffer abstraction. + * Included by qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _QDF_NBUF_W_H +#define _QDF_NBUF_W_H + +static inline void *qdf_nbuf_get_tx_fctx(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_fctx(buf); +} + +static inline void *qdf_nbuf_get_rx_fctx(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_rx_fctx(buf); +} + + +static inline void +qdf_nbuf_set_tx_fctx_type(qdf_nbuf_t buf, void *ctx, uint8_t type) +{ + __qdf_nbuf_set_tx_fctx_type(buf, ctx, type); +} + +static inline void +qdf_nbuf_set_rx_fctx_type(qdf_nbuf_t buf, void *ctx, uint8_t type) +{ + __qdf_nbuf_set_rx_fctx_type(buf, ctx, type); +} + + +static inline void * +qdf_nbuf_get_ext_cb(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_ext_cb(buf); +} + +static inline void +qdf_nbuf_set_ext_cb(qdf_nbuf_t buf, void *ref) +{ + __qdf_nbuf_set_ext_cb(buf, ref); +} + +/** + * qdf_nbuf_set_rx_protocol_tag() - set given value in protocol_tag + * field of buf(skb->cb) + * @buf: Network buffer + * @val: Value to be set in the nbuf + * Return: None + */ +static inline void qdf_nbuf_set_rx_protocol_tag(qdf_nbuf_t buf, uint16_t val) +{ + __qdf_nbuf_set_rx_protocol_tag(buf, val); +} + +/** + * qdf_nbuf_get_rx_protocol_tag() - Get the value of protocol_tag + * field of buf(skb->cb) + * @buf: Network buffer + * Return: Value of Rx protocol tag in the nbuf + */ +static inline uint16_t qdf_nbuf_get_rx_protocol_tag(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_rx_protocol_tag(buf); +} + +/** + * qdf_nbuf_set_rx_flow_tag() - set given value in flow tag field + * of buf(skb->cb) + * @buf: Network buffer + * @val: Value of Rx flow tag to be set in the nbuf + * Return: None + */ +static inline void qdf_nbuf_set_rx_flow_tag(qdf_nbuf_t buf, uint16_t val) +{ + __qdf_nbuf_set_rx_flow_tag(buf, val); +} + +/** + * qdf_nbuf_get_rx_flow_tag() - Get the value of flow_tag + * field of buf(skb->cb) + * @buf: Network buffer + * Return: Value of the Rx flow tag in the nbuf + */ +static inline uint16_t qdf_nbuf_get_rx_flow_tag(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_rx_flow_tag(buf); +} + +/** + * qdf_nbuf_set_exc_frame() - set exception frame flag + * @buf: Network buffer whose cb is to set exception frame flag + * @value: exception frame flag, value 0 or 1. + * + * Return: none + */ +static inline void qdf_nbuf_set_exc_frame(qdf_nbuf_t buf, uint8_t value) +{ +} + +/** + * qdf_nbuf_set_rx_reo_dest_ind() - set reo destination indication + * @buf: Network buffer + * @value: reo destination indication value to set + * + * Return: none + */ +static inline void qdf_nbuf_set_rx_reo_dest_ind(qdf_nbuf_t buf, + uint8_t value) +{ +} + +/** +* qdf_nbuf_set_rx_ipa_smmu_map() - set ipa smmu mapped flag + * @buf: Network buffer + * @value: 1 - ipa smmu mapped, 0 - ipa smmu unmapped + * + * Return: none + */ +static inline void qdf_nbuf_set_rx_ipa_smmu_map(qdf_nbuf_t buf, + uint8_t value) +{ +} + +/** + * qdf_nbuf_is_rx_ipa_smmu_map() - check ipa smmu map flag + * @buf: Network buffer + * + * Return 0 or 1 + */ +static inline uint8_t qdf_nbuf_is_rx_ipa_smmu_map(qdf_nbuf_t buf) +{ + return 0; +} +#endif /* _QDF_NBUF_W_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/osdep.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..b8456e687b5411107f6b4c4af163ff4e407570d6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/osdep.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: osdep + * This file provides OS abstraction for osdependent APIs. + */ + +#ifndef _OSDEP_H +#define _OSDEP_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * ATH_DEBUG - + * Control whether debug features (printouts, assertions) are compiled + * into the driver. + */ +#ifndef ATH_DEBUG +#define ATH_DEBUG 1 /* default: include debug code */ +#endif + +#if ATH_DEBUG +#ifndef ASSERT +#define ASSERT(expr) qdf_assert(expr) +#endif +#else +#define ASSERT(expr) +#endif /* ATH_DEBUG */ + +/* + * Need to define byte order based on the CPU configuration. + */ +#ifndef _LITTLE_ENDIAN +#define _LITTLE_ENDIAN 1234 +#endif +#ifndef _BIG_ENDIAN +#define _BIG_ENDIAN 4321 +#endif +#ifdef __BIG_ENDIAN +#define _BYTE_ORDER _BIG_ENDIAN +#else +#define _BYTE_ORDER _LITTLE_ENDIAN +#endif + +/* + * Deduce if tasklets are available. If not then + * fall back to using the immediate work queue. + */ +#define qdf_sysctl_decl(f, ctl, write, filp, buffer, lenp, ppos) \ + f(struct ctl_table *ctl, int32_t write, void *buffer, \ + size_t *lenp, loff_t *ppos) + +#define QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \ + __QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) + +#define EOK (0) + +#ifndef ARPHRD_IEEE80211 +#define ARPHRD_IEEE80211 801 /* IEEE 802.11. */ +#endif + +/* + * Normal Delay functions. Time specified in microseconds. + */ +#define OS_DELAY(_us) qdf_udelay(_us) + +/* + * memory data manipulation functions. + */ +#define OS_MEMCPY(_dst, _src, _len) qdf_mem_copy(_dst, _src, _len) +#define OS_MEMMOVE(_dst, _src, _len) qdf_mem_move(_dst, _src, _len) +#define OS_MEMZERO(_buf, _len) qdf_mem_zero(_buf, _len) +#define OS_MEMSET(_buf, _ch, _len) qdf_mem_set(_buf, _len, _ch) +#define OS_MEMCMP(_mem1, _mem2, _len) qdf_mem_cmp(_mem1, _mem2, _len) + + +/* + * System time interface + */ +typedef qdf_time_t systime_t; + +/** + * os_get_timestamp() - gives the timestamp in ticks + * Return: unsigned long + */ +static inline qdf_time_t os_get_timestamp(void) +{ + /* Fix double conversion from jiffies to ms */ + return qdf_system_ticks(); +} + +struct _NIC_DEV; + +#define OS_FREE(_p) qdf_mem_free(_p) + +#define OS_DMA_MEM_CONTEXT(context) \ + dma_addr_t context + +#define OS_GET_DMA_MEM_CONTEXT(var, field) \ + &(var->field) + +/* + * Timer Interfaces. Use these macros to declare timer + * and retrieve timer argument. This is mainly for resolving + * different argument types for timer function in different OS. + */ +#define os_timer_func(_fn) \ + void _fn(void *timer_arg) + +#define OS_GET_TIMER_ARG(_arg, _type) \ + ((_arg) = (_type)(timer_arg)) + +#define OS_SET_TIMER(_timer, _ms) qdf_timer_mod(_timer, _ms) + +/* + * These are required for network manager support + */ +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(ndev, pdev) +#endif + +#endif /* end of _OSDEP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_atomic.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..3b888c388f32a9e1d130787c897cb49061e234ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_atomic.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_atomic.h + * This file provides OS abstraction for atomic APIs. + */ + +#ifndef _QDF_ATOMIC_H +#define _QDF_ATOMIC_H + +#include + +/** + * qdf_atomic_t - atomic type of variable + * + * Use this when you want a simple resource counter etc. which is atomic + * across multiple CPU's. These maybe slower than usual counters on some + * platforms/OS'es, so use them with caution. + */ + +typedef __qdf_atomic_t qdf_atomic_t; + +/** + * qdf_atomic_init() - initialize an atomic type variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline QDF_STATUS qdf_atomic_init(qdf_atomic_t *v) +{ + return __qdf_atomic_init(v); +} + +/** + * qdf_atomic_read() - read the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t qdf_atomic_read(qdf_atomic_t *v) +{ + return __qdf_atomic_read(v); +} + +/** + * qdf_atomic_inc() - increment the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void qdf_atomic_inc(qdf_atomic_t *v) +{ + __qdf_atomic_inc(v); +} + +/** + * qdf_atomic_dec() - decrement the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void qdf_atomic_dec(qdf_atomic_t *v) +{ + __qdf_atomic_dec(v); +} + +/** + * qdf_atomic_add() - add a value to the value of an atomic variable + * @i: The amount by which to increase the atomic counter + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void qdf_atomic_add(int i, qdf_atomic_t *v) +{ + __qdf_atomic_add(i, v); +} + +/** + * qdf_atomic_sub() - Subtract a value from an atomic variable + * @i: the amount by which to decrease the atomic counter + * @v: a pointer to an opaque atomic variable + * + * Return: none + */ +static inline void qdf_atomic_sub(int i, qdf_atomic_t *v) +{ + __qdf_atomic_sub(i, v); +} + +/** + * qdf_atomic_dec_and_test() - decrement an atomic variable and check if the + * new value is zero + * @v: A pointer to an opaque atomic variable + * + * Return: + * true (non-zero) if the new value is zero, + * false (0) if the new value is non-zero + */ +static inline int32_t qdf_atomic_dec_and_test(qdf_atomic_t *v) +{ + return __qdf_atomic_dec_and_test(v); +} + +/** + * qdf_atomic_set() - set a value to the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * @i: required value to set + * + * Atomically sets the value of v to i + * Return: None + */ +static inline void qdf_atomic_set(qdf_atomic_t *v, int i) +{ + __qdf_atomic_set(v, i); +} + +/** + * qdf_atomic_inc_return() - return the incremented value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t qdf_atomic_inc_return(qdf_atomic_t *v) +{ + return __qdf_atomic_inc_return(v); +} + +/** + * qdf_atomic_dec_return() - return the decremented value of an atomic + * variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t qdf_atomic_dec_return(qdf_atomic_t *v) +{ + return __qdf_atomic_dec_return(v); +} + +/** + * qdf_atomic_inc_not_zero() - increment if not zero + * @v: A pointer to an opaque atomic variable + * + * Return: Returns non-zero on successful increment and zero otherwise + */ +static inline int32_t qdf_atomic_inc_not_zero(qdf_atomic_t *v) +{ + return __qdf_atomic_inc_not_zero(v); +} + +/** + * qdf_atomic_set_bit - Atomically set a bit in memory + * @nr: bit to set + * @addr: the address to start counting from + * + * Return: none + */ +static inline void qdf_atomic_set_bit(int nr, volatile unsigned long *addr) +{ + __qdf_atomic_set_bit(nr, addr); +} + +/** + * qdf_atomic_clear_bit - Atomically clear a bit in memory + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: none + */ +static inline void qdf_atomic_clear_bit(int nr, volatile unsigned long *addr) +{ + __qdf_atomic_clear_bit(nr, addr); +} + +/** + * qdf_atomic_change_bit - Atomically toggle a bit in memory + * from addr + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: none + */ +static inline void qdf_atomic_change_bit(int nr, volatile unsigned long *addr) +{ + __qdf_atomic_change_bit(nr, addr); +} + +/** + * qdf_atomic_test_and_set_bit - Atomically set a bit and return its old value + * @nr: Bit to set + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int qdf_atomic_test_and_set_bit(int nr, + volatile unsigned long *addr) +{ + return __qdf_atomic_test_and_set_bit(nr, addr); +} + +/** + * qdf_atomic_test_and_clear_bit - Atomically clear a bit and return its old + * value + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int qdf_atomic_test_and_clear_bit(int nr, + volatile unsigned long *addr) +{ + return __qdf_atomic_test_and_clear_bit(nr, addr); +} + +/** + * qdf_atomic_test_and_change_bit - Atomically toggle a bit and return its old + * value + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int qdf_atomic_test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + return __qdf_atomic_test_and_change_bit(nr, addr); +} + +/** + * qdf_atomic_test_bit - Atomically get the nr-th bit value starting from addr + * @nr: bit to get + * @addr: the address to start counting from + * + * Return: return nr bit value + */ +static inline int qdf_atomic_test_bit(int nr, volatile unsigned long *addr) +{ + return __qdf_atomic_test_bit(nr, addr); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_cpuhp.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_cpuhp.h new file mode 100644 index 0000000000000000000000000000000000000000..586f4a2a831e2b8a941c6dd6b84d862bad1e2528 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_cpuhp.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_cpuhp (CPU hotplug) + * QCA driver framework (QDF) CPU hotplug APIs + */ + +#ifndef __QDF_CPUHP_H +#define __QDF_CPUHP_H + +#include "qdf_status.h" +#include "qdf_types.h" + +/** + * struct qdf_cpuhp_handler - an opaque hotplug event registration handle + */ +struct qdf_cpuhp_handler; + +typedef void (*qdf_cpuhp_callback)(void *context, uint32_t cpu); + +#ifdef QCA_CONFIG_SMP +/** + * qdf_cpuhp_init() - Initialize the CPU hotplug event infrastructure + * + * To be called once, globally. + * + * Return: None + */ +QDF_STATUS qdf_cpuhp_init(void); + +/** + * qdf_cpuhp_deinit() - De-initialize the CPU hotplug event infrastructure + * + * To be called once, globally. + * + * Return: None + */ +QDF_STATUS qdf_cpuhp_deinit(void); + +/** + * qdf_cpuhp_register() - Register for CPU up/down event notifications + * @handler: a double pointer to the event registration handle to allocate + * @context: an opaque context to pass back to event listeners + * @up_callback: the function pointer to invoke for CPU up events + * @down_callback: the function pointer to invoke for CPU down events + * + * "Up" happens just after the CPU is up. Inversely, "down" happens just before + * the CPU goes down. + * + * @handler will point to a valid memory address on success, or NULL on failure. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_cpuhp_register(struct qdf_cpuhp_handler **handler, + void *context, + qdf_cpuhp_callback up_callback, + qdf_cpuhp_callback down_callback); + +/** + * qdf_cpuhp_unregister() - Un-register for CPU up/down event notifications + * @handler: a double pointer to the event registration handle to de-allocate + * + * @handler point to NULL upon completion + * + * Return: None + */ +void qdf_cpuhp_unregister(struct qdf_cpuhp_handler **handler); +#else +static inline QDF_STATUS qdf_cpuhp_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_cpuhp_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_cpuhp_register(struct qdf_cpuhp_handler **handler, + void *context, + qdf_cpuhp_callback up_callback, + qdf_cpuhp_callback down_callback) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void qdf_cpuhp_unregister(struct qdf_cpuhp_handler **handler) {} +#endif /* QCA_CONFIG_SMP */ + +#endif /* __QDF_CPUHP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_crypto.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..8d592fdd44bf5e5379a8b85797a4f67a3e44a6ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_crypto.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_crypto.h + * This file provides OS abstraction for crypto APIs. + */ + +#if !defined(__QDF_CRYPTO_H) +#define __QDF_CRYPTO_H + +/* Include Files */ +#include "qdf_status.h" +#include +#include + +/* Preprocessor definitions and constants */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define AES_BLOCK_SIZE 16 +#define HMAC_SHA256_CRYPTO_TYPE "hmac(sha256)" +#define HMAC_SHA386_CRYPTO_TYPE "hmac(sha384)" + +#define SHA256_CRYPTO_TYPE "sha256" +#define SHA386_CRYPTO_TYPE "sha384" + +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 + +#define FIXED_PARAM_OFFSET_ASSOC_REQ 4 +#define FIXED_PARAM_OFFSET_ASSOC_RSP 6 + +#define AAD_LEN 20 +#define IEEE80211_MMIE_GMAC_MICLEN 16 + +#define IS_VALID_CTR_KEY_LEN(len) ((((len) == 16) || ((len) == 32) || \ + ((len) == 48)) ? 1 : 0) + +/* Function declarations and documenation */ + +/** + * qdf_get_hash: API to get hash using specific crypto and scatterlist + * @type: crypto type + * @element_cnt: scatterlist element count + * @addr: scatterlist element array + * @addr_len: element length array + * @hash: new hash + * + * Return: 0 if success else error code + */ +int qdf_get_hash(uint8_t *type, uint8_t element_cnt, + uint8_t *addr[], uint32_t *addr_len, + int8_t *hash); + +/** + * qdf_get_hmac_hash: API to get hmac hash using specific crypto and + * scatterlist elements. + * @type: crypto type + * @key: key needs to be used for hmac api + * @keylen: length of key + * @element_cnt: scatterlist element count + * @addr: scatterlist element array + * @addr_len: element length array + * @hash: new hash + * + * Return: 0 if success else error code + */ +int qdf_get_hmac_hash(uint8_t *type, uint8_t *key, + uint32_t keylen, uint8_t element_cnt, + uint8_t *addr[], uint32_t *addr_len, int8_t *hash); + +/** + * qdf_get_keyed_hash: API to get hash using specific crypto and + * scatterlist elements. + * @type: crypto type + * @key: key needs to be used for hmac api + * @keylen: length of key + * @element_cnt: scatterlist element count + * @addr: scatterlist element array + * @addr_len: element length array + * @hash: new hash + * + * Return: 0 if success else error code + */ +int qdf_get_keyed_hash(const char *alg, const uint8_t *key, + unsigned int key_len, const uint8_t *src[], + size_t *src_len, size_t num_elements, uint8_t *out); +/** + * qdf_update_dbl: This API does the doubling operation as defined in RFC5297 + * @d: input for doubling + * + * Return: None + */ +void qdf_update_dbl(uint8_t *d); + +/** + * qdf_aes_s2v: This API gets vector from AES string as defined in RFC5297 + * output length will be AES_BLOCK_SIZE. + * @key: key used for operation + * @key_len: key len + * @s: addresses of elements to be used + * @s_len: array of element length + * @num_s: number of elements + * @out: pointer to output vector + * + * Return: 0 if success else Error number + */ +int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[], + size_t s_len[], size_t num_s, uint8_t *out); + +/** + * qdf_aes_ctr: This API defines AES Counter Mode + * @key: key used for operation + * @key_len: key len + * @siv: Initialization vector + * @src: input + * @src_len: input len + * @dest: output + * @enc: if encryption needs to be done or decryption + * + * Return: 0 if success else Error number + */ +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc); + +/** + * qdf_crypto_aes_gmac: This API calculates MIC for GMAC + * @key: key used for operation + * @key_length: key length + * @iv: Initialization vector + * @aad: Additional authentication data + * @data: Pointer to data + * @data_len: Length of data + * @mic: Pointer to MIC + * + * Return: 0 if success else Error number + */ +int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, + uint8_t *iv, uint8_t *aad, uint8_t *data, + uint16_t data_len, uint8_t *mic); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __QDF_CRYPTO_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debug_domain.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debug_domain.h new file mode 100644 index 0000000000000000000000000000000000000000..39210d77bc6c026327ae6e1e3914d99fe041a728 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debug_domain.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debug_domain + * QCA driver framework (QDF) debug domain APIs. Debug domains are used to track + * resource allocations across different driver states, particularly for runtime + * leak detection. + */ + +#ifndef __QDF_DEBUG_DOMAIN_H +#define __QDF_DEBUG_DOMAIN_H + +#include "qdf_types.h" + +/** + * struct qdf_debug_domain - debug domains for tracking resource allocations + * @QDF_DEBUG_DOMAIN_INIT: The default debug domain, tied to driver load + * @QDF_DEBUG_DOMAIN_ACTIVE: The active debug domain, tied some "running" state + * @QDF_DEBUG_DOMAIN_COUNT: The number of debug domains for iterating, etc. + */ +enum qdf_debug_domain { + QDF_DEBUG_DOMAIN_INIT, + QDF_DEBUG_DOMAIN_ACTIVE, + + /* keep last */ + QDF_DEBUG_DOMAIN_COUNT, +}; + +/** + * qdf_debug_domain_get() - Get the current debug domain + * + * Return: the current debug domain + */ +enum qdf_debug_domain qdf_debug_domain_get(void); + +/** + * qdf_debug_domain_set() - Set the current debug domain + * @domain: the domain to change to + * + * Return: None + */ +void qdf_debug_domain_set(enum qdf_debug_domain domain); + +/** + * qdf_debug_domain_name() - Get the human readable name of a debug domain + * @domain: The domain to return the name of + * + * Return: name of the given domain + */ +const char *qdf_debug_domain_name(enum qdf_debug_domain domain); + +/** + * qdf_debug_domain_valid() - bounds checks the given domain + * @domain: the domain to validate + * + * Return: true is the given domain is a valid debug domain + */ +bool qdf_debug_domain_valid(enum qdf_debug_domain domain); + +#endif /* __QDF_DEBUG_DOMAIN_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..46dc3a945a5ae2cc89d24747966a4965c3fd4f59 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debugfs.h + * This file provides OS abstraction for debug filesystem APIs. + */ + +#ifndef _QDF_DEBUGFS_H +#define _QDF_DEBUGFS_H + +#include +#include +#include +#include + +/* representation of qdf dentry */ +typedef __qdf_dentry_t qdf_dentry_t; +typedef __qdf_debugfs_file_t qdf_debugfs_file_t; + +/* qdf file modes */ +#define QDF_FILE_USR_READ 00400 +#define QDF_FILE_USR_WRITE 00200 + +#define QDF_FILE_GRP_READ 00040 +#define QDF_FILE_GRP_WRITE 00020 + +#define QDF_FILE_OTH_READ 00004 +#define QDF_FILE_OTH_WRITE 00002 + +/** + * struct qdf_debugfs_fops - qdf debugfs operations + * @show: Callback for show operation. + * Following functions can be used to print data in the show function, + * qdf_debugfs_print() + * qdf_debugfs_hexdump() + * qdf_debugfs_write() + * @write: Callback for write operation. + * @priv: Private pointer which will be passed in the registered callbacks. + */ +struct qdf_debugfs_fops { + QDF_STATUS(*show)(qdf_debugfs_file_t file, void *arg); + QDF_STATUS(*write)(void *priv, const char *buf, qdf_size_t len); + void *priv; +}; + +#ifdef WLAN_DEBUGFS +/** + * qdf_debugfs_init() - initialize debugfs + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_debugfs_init(void); + +/** + * qdf_debugfs_exit() - cleanup debugfs + * + * Return: None + */ +void qdf_debugfs_exit(void); + +/** + * qdf_debugfs_create_dir() - create a debugfs directory + * @name: name of the new directory + * @parent: parent node. If NULL, defaults to base qdf_debugfs_root + * + * Return: dentry structure pointer in case of success, otherwise NULL. + * + */ +qdf_dentry_t qdf_debugfs_create_dir(const char *name, qdf_dentry_t parent); + +/** + * qdf_debugfs_create_file() - create a debugfs file + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base qdf_debugfs_root + * @fops: file operations { .read, .write ... } + * + * Return: dentry structure pointer in case of success, otherwise NULL. + * + */ +qdf_dentry_t qdf_debugfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + struct qdf_debugfs_fops *fops); + +/** + * qdf_debugfs_printf() - print formated string into debugfs file + * @file: debugfs file handle passed in fops->show() function + * @f: the format string to use + * @...: arguments for the format string + */ +void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, ...); + +/** + * qdf_debugfs_hexdump() - print hexdump into debugfs file + * @file: debugfs file handle passed in fops->show() function. + * @buf: data + * @len: data length + * @rowsize: row size in bytes to dump + * @groupsize: group size in bytes to dump + * + */ +void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len, int rowsize, int groupsize); + +/** + * qdf_debugfs_overflow() - check overflow occurrence in debugfs buffer + * @file: debugfs file handle passed in fops->show() function. + * + * Return: 1 on overflow occurrence else 0 + * + */ +bool qdf_debugfs_overflow(qdf_debugfs_file_t file); + +/** + * qdf_debugfs_write() - write data into debugfs file + * @file: debugfs file handle passed in fops->show() function. + * @buf: data + * @len: data length + * + */ +void qdf_debugfs_write(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len); + +/** + * qdf_debugfs_create_u8() - create a debugfs file for a u8 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u8 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u8(const char *name, uint16_t mode, + qdf_dentry_t parent, u8 *value); + +/** + * qdf_debugfs_create_u16() - create a debugfs file for a u16 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u16 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u16(const char *name, uint16_t mode, + qdf_dentry_t parent, u16 *value); + +/** + * qdf_debugfs_create_u32() - create a debugfs file for a u32 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u32 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u32(const char *name, uint16_t mode, + qdf_dentry_t parent, u32 *value); + +/** + * qdf_debugfs_create_u64() - create a debugfs file for a u64 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u64 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u64(const char *name, uint16_t mode, + qdf_dentry_t parent, u64 *value); + +/** + * qdf_debugfs_create_atomic() - create a debugfs file for an atomic variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to an atomic variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_atomic(const char *name, uint16_t mode, + qdf_dentry_t parent, + qdf_atomic_t *value); + +/** + * qdf_debugfs_create_string() - create a debugfs file for a string + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @str: a pointer to NULL terminated string (global/static). + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_string(const char *name, uint16_t mode, + qdf_dentry_t parent, char *str); + +/** + * qdf_debugfs_remove_dir_recursive() - remove directory recursively + * @d: debugfs node + * + * This function will recursively removes a dreictory in debugfs that was + * previously createed with a call to qdf_debugfs_create_file() or it's + * variant functions. + */ +void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d); + +/** + * qdf_debugfs_remove_dir() - remove debugfs directory + * @d: debugfs node + * + */ +void qdf_debugfs_remove_dir(qdf_dentry_t d); + +/** + * qdf_debugfs_remove_file() - remove debugfs file + * @d: debugfs node + * + */ +void qdf_debugfs_remove_file(qdf_dentry_t d); + +/** + * qdf_debugfs_create_file_simplified() - Create a simple debugfs file + * where a single function call produces all the desired output + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @fops: file operations { .show, .write , .priv... } + * + * Users just have to define the show() function and pass it via @fops.show() + * argument. When the output time comes, the show() will be called once. + * The show() function must do everything that is needed to write the data, + * all in one function call. + * This is useful either for writing small amounts of data to debugfs or + * for cases in which the output is not iterative. + * The private data can be passed via @fops.priv, which will be available + * inside the show() function as the 'private' filed of the qdf_debugfs_file_t. + * + * Return: dentry structure pointer in case of success, otherwise NULL. + * + */ + +qdf_dentry_t qdf_debugfs_create_file_simplified(const char *name, uint16_t mode, + qdf_dentry_t parent, + struct qdf_debugfs_fops *fops); + +/** + * qdf_debugfs_printer() - Print formated string into debugfs file + * @priv: The private data + * @fmt: Format string + * @...: arguments for the format string + * + * This function prints a new line character after printing the formatted + * string into the debugfs file. + * This function can be passed when the argument is of type qdf_abstract_print + */ +int qdf_debugfs_printer(void *priv, const char *fmt, ...); + +#else /* WLAN_DEBUGFS */ + +static inline QDF_STATUS qdf_debugfs_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void qdf_debugfs_exit(void) { } + +static inline qdf_dentry_t qdf_debugfs_create_dir(const char *name, + qdf_dentry_t parent) +{ + return NULL; +} + +static inline qdf_dentry_t +qdf_debugfs_create_file(const char *name, uint16_t mode, qdf_dentry_t parent, + struct qdf_debugfs_fops *fops) +{ + return NULL; +} + +static inline void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, + ...) +{ +} + +static inline void qdf_debugfs_hexdump(qdf_debugfs_file_t file, + const uint8_t *buf, qdf_size_t len, + int rowsize, int groupsize) +{ +} + +static inline bool qdf_debugfs_overflow(qdf_debugfs_file_t file) +{ + return 0; +} + +static inline void qdf_debugfs_write(qdf_debugfs_file_t file, + const uint8_t *buf, qdf_size_t len) +{ +} + +static inline qdf_dentry_t qdf_debugfs_create_u8(const char *name, + uint16_t mode, + qdf_dentry_t parent, u8 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_u16(const char *name, + uint16_t mode, + qdf_dentry_t parent, + u16 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_u32(const char *name, + uint16_t mode, + qdf_dentry_t parent, + u32 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_u64(const char *name, + uint16_t mode, + qdf_dentry_t parent, + u64 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_atomic(const char *name, + uint16_t mode, + qdf_dentry_t parent, + qdf_atomic_t *value) +{ + return NULL; +} + +static inline qdf_dentry_t debugfs_create_string(const char *name, + uint16_t mode, + qdf_dentry_t parent, char *str) +{ + return NULL; +} + +static inline void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d) {} +static inline void qdf_debugfs_remove_dir(qdf_dentry_t d) {} +static inline void qdf_debugfs_remove_file(qdf_dentry_t d) {} + +static inline +qdf_dentry_t qdf_debugfs_create_file_simplified(const char *name, uint16_t mode, + qdf_dentry_t parent, + struct qdf_debugfs_fops *fops) +{ + return NULL; +} + +static inline +int qdf_debugfs_printer(void *priv, const char *fmt, ...) +{ + return 0; +} +#endif /* WLAN_DEBUGFS */ +#endif /* _QDF_DEBUGFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_defer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_defer.h new file mode 100644 index 0000000000000000000000000000000000000000..c73f0b98ace3d8b9e305f3c6898a8a4792dff237 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_defer.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_defer.h + * This file abstracts deferred execution API's. + */ + +#ifndef __QDF_DEFER_H +#define __QDF_DEFER_H + +#include +#include + +/** + * TODO This implements work queues (worker threads, kernel threads etc.). + * Note that there is no cancel on a scheduled work. You cannot free a work + * item if its queued. You cannot know if a work item is queued or not unless + * its running, hence you know its not queued. + * + * so if, say, a module is asked to unload itself, how exactly will it make + * sure that the work's not queued, for OS'es that dont provide such a + * mechanism?? + */ + +/* + * Representation of a work queue. + */ +typedef __qdf_work_t qdf_work_t; +typedef __qdf_workqueue_t qdf_workqueue_t; + +/* + * Representation of a bottom half. + */ +typedef __qdf_bh_t qdf_bh_t; + +/** + * qdf_create_bh - creates the bottom half deferred handler + * @bh: pointer to bottom + * @func: deferred function to run at bottom half interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline void +qdf_create_bh(qdf_bh_t *bh, qdf_defer_fn_t func, void *arg) +{ + __qdf_init_bh(bh, func, arg); +} + +/** + * qdf_sched - schedule a bottom half (DPC) + * @bh: pointer to bottom + * Return: none + */ +static inline void qdf_sched_bh(qdf_bh_t *bh) +{ + __qdf_sched_bh(bh); +} + +/** + * qdf_destroy_bh - destroy the bh (synchronous) + * @bh: pointer to bottom + * Return: none + */ +static inline void qdf_destroy_bh(qdf_bh_t *bh) +{ + __qdf_disable_bh(bh); +} + +/*********************Non-Interrupt Context deferred Execution***************/ + +/** + * qdf_create_work - create a work/task queue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @hdl: OS handle + * @work: pointer to work + * @func: deferred function to run at bottom half non-interrupt context. + * @arg: argument for the deferred function + * + * Return: QDF status + */ +static inline QDF_STATUS qdf_create_work(qdf_handle_t hdl, qdf_work_t *work, + qdf_defer_fn_t func, void *arg) +{ + return __qdf_init_work(work, func, arg); +} + +/** + * qdf_create_workqueue - create a workqueue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @name: string + * Return: pointer of type qdf_workqueue_t + */ +static inline qdf_workqueue_t *qdf_create_workqueue(char *name) +{ + return __qdf_create_workqueue(name); +} + +/** + * qdf_create_singlethread_workqueue() - create a single threaded workqueue + * @name: string + * + * This API creates a dedicated work queue with a single worker thread to avoid + * wasting unnecessary resources when works which needs to be submitted in this + * queue are not very critical and frequent. + * + * Return: pointer of type qdf_workqueue_t + */ +static inline qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name) +{ + return __qdf_create_singlethread_workqueue(name); +} + +/** + * qdf_alloc_high_prior_ordered_workqueue - alloc high-prior ordered workqueue + * @name: string + * + * Return: pointer of type qdf_workqueue_t + */ +static inline +qdf_workqueue_t *qdf_alloc_high_prior_ordered_workqueue(char *name) +{ + return __qdf_alloc_high_prior_ordered_workqueue(name); +} + +/** + * qdf_alloc_unbound_workqueue - allocate an unbound workqueue + * @name: string + * + * Return: pointer of type qdf_workqueue_t + */ +static inline qdf_workqueue_t *qdf_alloc_unbound_workqueue(char *name) +{ + return __qdf_alloc_unbound_workqueue(name); +} + +/** + * qdf_queue_work - Queue the work/task + * @hdl: OS handle + * @wqueue: pointer to workqueue + * @work: pointer to work + * Return: none + */ +static inline void +qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work) +{ + return __qdf_queue_work(wqueue, work); +} + +/** + * qdf_flush_workqueue - flush the workqueue + * @hdl: OS handle + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void qdf_flush_workqueue(qdf_handle_t hdl, + qdf_workqueue_t *wqueue) +{ + return __qdf_flush_workqueue(wqueue); +} + +/** + * qdf_destroy_workqueue - Destroy the workqueue + * @hdl: OS handle + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void qdf_destroy_workqueue(qdf_handle_t hdl, + qdf_workqueue_t *wqueue) +{ + return __qdf_destroy_workqueue(wqueue); +} + +/** + * qdf_sched_work - Schedule a deferred task on non-interrupt context + * @hdl: OS handle + * @work: pointer to work + * Retrun: none + */ +static inline void qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work) +{ + __qdf_sched_work(work); +} + +/** + * qdf_cancel_work() - Cancel a work + * @work: pointer to work + * + * Cancel work and wait for its execution to finish. + * This function can be used even if the work re-queues + * itself or migrates to another workqueue. On return + * from this function, work is guaranteed to be not + * pending or executing on any CPU. The caller must + * ensure that the workqueue on which work was last + * queued can't be destroyed before this function returns. + * + * Return: true if work was pending, false otherwise + */ +static inline bool qdf_cancel_work(qdf_work_t *work) +{ + return __qdf_cancel_work(work); +} + +/** + * qdf_flush_work - Flush a deferred task on non-interrupt context + * @work: pointer to work + * + * Wait until work has finished execution. work is guaranteed to be + * idle on return if it hasn't been requeued since flush started. + * + * Return: none + */ +static inline void qdf_flush_work(qdf_work_t *work) +{ + __qdf_flush_work(work); +} + +/** + * qdf_disable_work - disable the deferred task (synchronous) + * @work: pointer to work + * Return: unsigned int + */ +static inline uint32_t qdf_disable_work(qdf_work_t *work) +{ + return __qdf_disable_work(work); +} + +/** + * qdf_destroy_work - destroy the deferred task (synchronous) + * @hdl: OS handle + * @work: pointer to work + * Return: none + */ +static inline void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work) +{ + __qdf_disable_work(work); +} + +#endif /*_QDF_DEFER_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_delayed_work.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_delayed_work.h new file mode 100644 index 0000000000000000000000000000000000000000..67af24933f501193a1b7316d56e4e99ed614febb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_delayed_work.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_delayed_work.h + * A simple, delayed work type for executing a callback after some delay. + */ + +#ifndef __QDF_DELAYED_WORK_H +#define __QDF_DELAYED_WORK_H + +#include "i_qdf_delayed_work.h" +#include "qdf_status.h" +#include "qdf_types.h" + +typedef void (*qdf_delayed_work_cb)(void *context); + +/** + * struct qdf_delayed_work - a defered work type which executes a callback after + * some delay + * @dwork: OS-specific delayed work + * @callback: the callback to be executed + * @context: the context to pass to the callback + */ +struct qdf_delayed_work { + struct __qdf_opaque_delayed_work dwork; + qdf_delayed_work_cb callback; + void *context; +}; + +/** + * qdf_delayed_work_create() - initialized a delayed work @dwork + * @dwork: the delayed work to initialize + * @callback: the callback to be executed + * @context: the context to pass to the callback + * + * Return: QDF_STATUS + */ +#define qdf_delayed_work_create(dwork, callback, context) \ + __qdf_delayed_work_create(dwork, callback, context, __func__, __LINE__) + +qdf_must_check QDF_STATUS +__qdf_delayed_work_create(struct qdf_delayed_work *dwork, + qdf_delayed_work_cb callback, void *context, + const char *func, uint32_t line); + +/** + * qdf_delayed_work_destroy() - deinitialize a delayed work @dwork + * @dwork: the delayed work to de-initialize + * + * Return: None + */ +#define qdf_delayed_work_destroy(dwork) \ + __qdf_delayed_work_destroy(dwork, __func__, __LINE__) + +void __qdf_delayed_work_destroy(struct qdf_delayed_work *dwork, + const char *func, uint32_t line); + +/** + * qdf_delayed_work_start() - schedule execution of @dwork callback + * @dwork: the delayed work to start + * @msec: the delay before execution in milliseconds + * + * Return: true if started successfully + */ +bool qdf_delayed_work_start(struct qdf_delayed_work *dwork, uint32_t msec); + +/** + * qdf_delayed_work_stop_sync() - Synchronously stop execution of @dwork + * @dwork: the delayed work to stop + * + * When this returns, @dwork is guaranteed to not be queued, and its callback + * not executing. + * + * Return: true if @dwork was queued or running + */ +bool qdf_delayed_work_stop_sync(struct qdf_delayed_work *dwork); + +#ifdef WLAN_DELAYED_WORK_DEBUG +/** + * qdf_delayed_work_check_for_leaks() - assert no delayed work leaks + * + * Return: None + */ +void qdf_delayed_work_check_for_leaks(void); + +/** + * qdf_delayed_work_feature_init() - global init logic for delayed work + * + * Return: None + */ +void qdf_delayed_work_feature_init(void); + +/** + * qdf_delayed_work_feature_deinit() - global de-init logic for delayed work + * + * Return: None + */ +void qdf_delayed_work_feature_deinit(void); +#else +static inline void qdf_delayed_work_check_for_leaks(void) { } +static inline void qdf_delayed_work_feature_init(void) { } +static inline void qdf_delayed_work_feature_deinit(void) { } +#endif /* WLAN_DELAYED_WORK_DEBUG */ + +#endif /* __QDF_DELAYED_WORK_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_dev.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..886742eda0e6aed6a6564d2d2875a2c0a44f1817 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_dev.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_dev + * QCA driver framework (QDF) device management APIs + */ + +#if !defined(__QDF_DEV_H) +#define __QDF_DEV_H + +/* Include Files */ +#include +#include "i_qdf_dev.h" + +#define qdf_cpumask_pr_args(maskp) __qdf_cpumask_pr_args(maskp) +#define qdf_for_each_possible_cpu(cpu) __qdf_for_each_possible_cpu(cpu) +#define qdf_for_each_online_cpu(cpu) __qdf_for_each_online_cpu(cpu) + +#ifdef ENHANCED_OS_ABSTRACTION +/** + * qdf_dev_alloc_mem() - allocate memory + * @qdfdev: Device handle + * @mrptr: Pointer to the allocated memory + * @reqsize: Allocation request in bytes + * @mask: Property mask to be associated to the allocated memory + * + * This function will acquire memory to be associated with a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_dev_alloc_mem(struct qdf_dev *qdfdev, struct qdf_devm **mrptr, + uint32_t reqsize, uint32_t mask); + +/** + * qdf_dev_release_mem() - release memory + * @qdfdev: Device handle + * @mrptr: Pointer to the allocated memory + * + * This function will acquire memory to be associated with a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_dev_release_mem(struct qdf_dev *qdfdev, struct qdf_devm *mrptr); + +/** + * qdf_dev_modify_irq() - modify irq + * @irnum: irq number + * @cmask: Bitmap to be cleared for the property mask + * @smask: Bitmap to be set for the property mask + * + * This function will acquire memory to be associated with a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_dev_modify_irq_status(uint32_t irnum, unsigned long cmask, + unsigned long smask); + +/** + * qdf_dev_set_irq_affinity() - set irq affinity + * @irnum: irq number + * @cpmask: cpu affinity bitmap + * + * This function will set the affinity level for an irq + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask); +#else +static inline QDF_STATUS +qdf_dev_alloc_mem(struct qdf_dev *qdfdev, struct qdf_devm **mrptr, + uint32_t reqsize, uint32_t mask) +{ + return __qdf_dev_alloc_mem(qdfdev, mrptr, reqsize, mask); +} + +static inline QDF_STATUS +qdf_dev_release_mem(struct qdf_dev *qdfdev, struct qdf_devm *mrptr) +{ + return __qdf_dev_release_mem(qdfdev, mrptr); +} + +static inline QDF_STATUS +qdf_dev_modify_irq_status(uint32_t irnum, unsigned long cmask, + unsigned long smask) +{ + return __qdf_dev_modify_irq_status(irnum, cmask, smask); +} + +static inline QDF_STATUS +qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask) +{ + return __qdf_dev_set_irq_affinity(irnum, cpmask); +} +#endif + +static inline int qdf_topology_physical_package_id(unsigned int cpu) +{ + return __qdf_topology_physical_package_id(cpu); +} + +static inline int qdf_cpumask_subset(qdf_cpu_mask *srcp1, + const qdf_cpu_mask *srcp2) +{ + return __qdf_cpumask_subset(srcp1, srcp2); +} + +static inline int qdf_cpumask_intersects(qdf_cpu_mask *srcp1, + const qdf_cpu_mask *srcp2) +{ + return __qdf_cpumask_intersects(srcp1, srcp2); +} + +static inline int qdf_core_ctl_set_boost(bool boost) +{ + return __qdf_core_ctl_set_boost(boost); +} +#endif /* __QDF_DEV_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_event.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..27de4c79369c114c82fd985dace2beaa3c157d96 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_event.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_event.h + * This file provides OS abstraction for event APIs. + */ + +#if !defined(__QDF_EVENT_H) +#define __QDF_EVENT_H + +/* Include Files */ +#include "qdf_status.h" +#include +#include +#include +#include + +/* Preprocessor definitions and constants */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef __qdf_event_t qdf_event_t; +/* Function declarations and documenation */ + +QDF_STATUS qdf_event_create(qdf_event_t *event); + +QDF_STATUS qdf_event_set(qdf_event_t *event); + +QDF_STATUS qdf_event_reset(qdf_event_t *event); + +QDF_STATUS qdf_event_destroy(qdf_event_t *event); + +QDF_STATUS qdf_wait_single_event(qdf_event_t *event, + uint32_t timeout); + +/** + * qdf_complete_wait_events() - Sets all the events which are in the list. + * + * This function traverses the list of events and sets all of them. It + * sets the flag force_set as TRUE to indicate that these events have + * been forcefully set. + * + * Return: None + */ +void qdf_complete_wait_events(void); + +/** + * qdf_wait_for_event_completion() - Waits for an event to be set. + * @event: Pointer to an event to wait on. + * @timeout: Timeout value (in milliseconds). + * + * This function adds the event in a list and waits on it until it + * is set or the timeout duration elapses. The purpose of waiting + * is considered complete only if the event is set and the flag + * force_set is FALSE, it returns success in this case. In other + * cases it returns appropriate error status. + * + * Return: QDF status + */ +QDF_STATUS qdf_wait_for_event_completion(qdf_event_t *event, + uint32_t timeout); + +/** + * qdf_event_list_init() - Creates a list and spinlock for events. + * + * This function creates a list for maintaining events on which threads + * wait for completion. A spinlock is also created to protect related + * operations. + * + * Return: None + */ +void qdf_event_list_init(void); + +/** + * qdf_event_list_destroy() - Destroys list and spinlock created for events. + * + * This function destroys the list and spinlock created for events on which + * threads wait for completion. + * + * Return: None + */ +void qdf_event_list_destroy(void); + +/** + * qdf_exit_thread() - exit thread execution + * @status: QDF status + * + * Return: QDF status + */ +QDF_STATUS qdf_exit_thread(QDF_STATUS status); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __QDF_EVENT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_file.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_file.h new file mode 100644 index 0000000000000000000000000000000000000000..a90b921400101e3d93114a0807332ca48ea91d5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_file.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Thin filesystem API abstractions + */ + +#ifndef __QDF_FILE_H +#define __QDF_FILE_H + +#include "qdf_status.h" + +/** + * qdf_file_read() - read the entire contents of a file + * @path: the full path of the file to read + * @out_buf: double pointer for referring to the file contents buffer + * + * This API allocates a new, null-terminated buffer containing the contents of + * the file at @path. On success, @out_buf points to this new buffer, otherwise + * @out_buf is set to NULL. + * + * Consumers must free the allocated buffer by calling qdf_file_buf_free(). + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_file_read(const char *path, char **out_buf); + +/** + * qdf_file_buf_free() - free a previously allocated file buffer + * @file_buf: pointer to the file buffer to free + * + * This API is used in conjunction with qdf_file_read(). + * + * Return: None + */ +void qdf_file_buf_free(char *file_buf); + +#endif /* __QDF_FILE_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_flex_mem.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_flex_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..2170b71458ea8afa94a0bc04e0084b6fe250a44e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_flex_mem.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_flex_mem (flexibly sized memory allocator) + * QCA driver framework (QDF) flex mem APIs + * + * A flex memory allocator is a memory pool which not only dynamically expands, + * but also dynamically reduces as well. Benefits over full dynamic memory + * allocation are amoritized allocation cost, and reduced memory fragmentation. + * + * The allocator consists of 3 parts: the pool, segments, and items. Items are + * the smallest chuncks of memory that are handed out via the alloc call, and + * are all of a uniform size. Segments are groups of items, representing the + * smallest amount of memory that can be dynamically allocated or freed. A pool + * is simply a collection of segments. + */ + +#ifndef __QDF_FLEX_MEM_H +#define __QDF_FLEX_MEM_H + +#include "qdf_list.h" +#include "qdf_lock.h" + +#define QDF_FM_BITMAP uint32_t +#define QDF_FM_BITMAP_BITS (sizeof(QDF_FM_BITMAP) * 8) + +/** + * qdf_flex_mem_pool - a pool of memory segments + * @seg_list: the list containing the memory segments + * @lock: spinlock for protecting internal data structures + * @reduction_limit: the minimum number of segments to keep during reduction + * @item_size: the size of the items the pool will allocate + */ +struct qdf_flex_mem_pool { + qdf_list_t seg_list; + struct qdf_spinlock lock; + uint16_t reduction_limit; + uint16_t item_size; +}; + +/** + * qdf_flex_mem_segment - a memory pool segment + * @node: the list node for membership in the memory pool + * @dynamic: true if this segment was dynamically allocated + * @used_bitmap: bitmap for tracking which items in the segment are in use + * @bytes: raw memory for allocating items from + */ +struct qdf_flex_mem_segment { + qdf_list_node_t node; + bool dynamic; + QDF_FM_BITMAP used_bitmap; + uint8_t *bytes; +}; + +/** + * DEFINE_QDF_FLEX_MEM_POOL() - define a new flex mem pool with one segment + * @name: the name of the pool variable + * @size_of_item: size of the items the pool will allocate + * @rm_limit: min number of segments to keep during reduction + */ +#define DEFINE_QDF_FLEX_MEM_POOL(name, size_of_item, rm_limit) \ + struct qdf_flex_mem_pool name; \ + uint8_t __ ## name ## _head_bytes[QDF_FM_BITMAP_BITS * (size_of_item)];\ + struct qdf_flex_mem_segment __ ## name ## _head = { \ + .node = QDF_LIST_NODE_INIT_SINGLE( \ + QDF_LIST_ANCHOR(name.seg_list)), \ + .bytes = __ ## name ## _head_bytes, \ + }; \ + struct qdf_flex_mem_pool name = { \ + .seg_list = QDF_LIST_INIT_SINGLE(__ ## name ## _head.node), \ + .reduction_limit = (rm_limit), \ + .item_size = (size_of_item), \ + } + +/** + * qdf_flex_mem_init() - initialize a qdf_flex_mem_pool + * @pool: the pool to initialize + * + * Return: None + */ +void qdf_flex_mem_init(struct qdf_flex_mem_pool *pool); + +/** + * qdf_flex_mem_deinit() - deinitialize a qdf_flex_mem_pool + * @pool: the pool to deinitialize + * + * Return: None + */ +void qdf_flex_mem_deinit(struct qdf_flex_mem_pool *pool); + +/** + * qdf_flex_mem_alloc() - logically allocate memory from the pool + * @pool: the pool to allocate from + * + * This function returns any unused item from any existing segment in the pool. + * If there are no unused items in the pool, a new segment is dynamically + * allocated to service the request. The size of the allocated memory is the + * size originally used to create the pool. + * + * Return: Point to newly allocated memory, NULL on failure + */ +void *qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool); + +/** + * qdf_flex_mem_free() - logically frees @ptr from the pool + * @pool: the pool to return the memory to + * @ptr: a pointer received via a call to qdf_flex_mem_alloc() + * + * This function marks the item corresponding to @ptr as unused. If that item + * was the last used item in the segment it belongs to, and the segment was + * dynamically allocated, the segment will be freed. + * + * Return: None + */ +void qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr); + +#endif /* __QDF_FLEX_MEM_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hang_event_notifier.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hang_event_notifier.h new file mode 100644 index 0000000000000000000000000000000000000000..532ea47ca0e2e20a0bb5df111b0871ba2dbdacea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hang_event_notifier.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: qdf_hang_event_notifier + * This file provides OS dependent QDF notifier call for hang event + */ + +#ifndef QDF_HANG_EVENT_NOTIFIER_H +#define QDF_HANG_EVENT_NOTIFIER_H + +#include + +#define QDF_HANG_EVENT_VERSION "1" +/* Max hang event buffer size */ +#define QDF_HANG_EVENT_DATA_SIZE 390 +/* Max offset which host can write */ +#define QDF_WLAN_MAX_HOST_OFFSET 194 +/* Start of the Firmware Data offset */ +#define QDF_WLAN_HANG_FW_OFFSET 195 + +/** + * hang_event_tag: Hang event tag for various modules + * @HANG_EVT_TAG_CDS: CDS module hang event tag + * @HANG_EVT_TAG_OS_IF: OS interface module hang event tag + * @HANG_EVT_TAG_OS_IF_SCAN: scan module hang event tag + * @HANG_EVT_TAG_LEGACY_MAC: Legacy mac module hang event tag + * @HANG_EVT_TAG_WMI_EVT_HIST: WMI event history hang event tag + * @HANG_EVT_TAG_WMI_CMD_HIST: WMI command history hang event tag + * @HANG_EVT_TAG_WMI_CMD_HIST: HTC event tag wmi command history hang event tag + * @HANG_EVT_TAG_DP_PEER_INFO: DP peer info hang event tag + * @HANG_EVT_TAG_CE_INFO: Copy Engine hang event tag + * @HANG_EVT_TAG_BUS_INFO: Bus hang event tag + */ +enum hang_event_tag { + HANG_EVT_TAG_CDS, + HANG_EVT_TAG_OS_IF, + HANG_EVT_TAG_OS_IF_SCAN, + HANG_EVT_TAG_LEGACY_MAC, + HANG_EVT_TAG_WMI_EVT_HIST, + HANG_EVT_TAG_WMI_CMD_HIST, + HANG_EVT_TAG_HTC_CREDIT_HIST, + HANG_EVT_TAG_DP_PEER_INFO, + HANG_EVT_TAG_CE_INFO, + HANG_EVT_TAG_BUS_INFO +}; + +#define QDF_HANG_EVENT_TLV_HDR_SIZE (sizeof(uint16_t)) + +#define QDF_HANG_EVT_SET_HDR(tlv_buf, tag, len) \ + (((uint16_t *)(tlv_buf))[0]) = (((tag) << 8) | ((len) & 0x000000FF)) + +#define QDF_HANG_GET_STRUCT_TLVLEN(tlv_struct) \ + ((uint16_t)(sizeof(tlv_struct) - QDF_HANG_EVENT_TLV_HDR_SIZE)) + +/** + * qdf_notifier_data - Private data for notifier data + * @hang_data: Data filled by notifier + * @offset: Current offset of the hang data buffer + */ +struct qdf_notifer_data { + uint8_t *hang_data; + unsigned int offset; +}; + +#ifdef WLAN_HANG_EVENT +/** + * qdf_hang_event_register_notifier() - Hang data notifier register + * @nb: Notifier block + * + * This function registers notifier block for the hang data notifier chain + * the registered function will be invoked when the hang data notifier call + * is invoked. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_hang_event_register_notifier(qdf_notif_block *nb); + +/** + * qdf_hang_event_unregister_notifier() - Hang data notifier unregister + * @nb: Notifier block + * + * This function unregisters notifier block for the hang data notifier chain. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_hang_event_unregister_notifier(qdf_notif_block *nb); + +/** + * qdf_hang_event_notifier_call() - Hang data notifier register + * @v: state + * @data: Private data for this notifier chain + * + * This function when invoked will call the functions registered with this + * notifier chain. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_hang_event_notifier_call(unsigned long v, void *data); +#else +static inline +QDF_STATUS qdf_hang_event_register_notifier(qdf_notif_block *nb) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS qdf_hang_event_unregister_notifier(qdf_notif_block *nb) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS qdf_hang_event_notifier_call(unsigned long v, void *data) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hashtable.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hashtable.h new file mode 100644 index 0000000000000000000000000000000000000000..7bb09b4fd8b15c9daf210afbb749f94c1d6a98ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hashtable.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_hashtable.h - Public APIs for a hashtable data structure + */ + +#ifndef __QDF_HASHTABLE_H +#define __QDF_HASHTABLE_H + +#include "i_qdf_hashtable.h" + +/** + * struct qdf_ht - opaque hashtable data type + */ +#define qdf_ht __qdf_ht + +/** + * struct qdf_ht_entry - opaque hashtable entry for membership in a qdf_ht + */ +#define qdf_ht_entry __qdf_ht_entry + +/** + * qdf_ht_declare() - declare a new qdf_ht + * @name: variable name of the hashtable to declare + * @bits: number of hash bits to use; buckets=2^bits; Needs to be a compile + * time constant + * + */ +#define qdf_ht_declare(name, bits) __qdf_ht_declare(name, bits) + +/** + * qdf_ht_init() - initialize a qdf_ht instance + * @table: a non-pointer qdf_ht instance to initialize + * + * Return: none + */ +#define qdf_ht_init(table) __qdf_ht_init(table) + +/** + * qdf_ht_deinit() - de-initialize a qdf_ht instance + * @table: a non-pointer qdf_ht instance to de-initialize + * + * Return: none + */ +#define qdf_ht_deinit(table) __qdf_ht_deinit(table) + +/** + * qdf_ht_empty() - check if a qdf_ht has any entries + * @table: a non-pointer qdf_ht instance to check + * + * Return: true if the hashtable is empty + */ +#define qdf_ht_empty(table) __qdf_ht_empty(table) + +/** + * qdf_ht_add() - add an entry to a qdf_ht instance + * @table: a non-pointer qdf_ht instance to add an entry to + * @entry: pinter to a qdf_ht_entry instance to add to @table + * @key: the key to use for entry insertion and lookup + * + * Return: none + */ +#define qdf_ht_add(table, entry, key) __qdf_ht_add(table, entry, key) + +/** + * qdf_ht_remove() - remove and entry from a qdf_ht instance + * @entry: pointer to a qdf_ht_entry instance to remove + * + * Return: none + */ +#define qdf_ht_remove(entry) __qdf_ht_remove(entry) + +/** + * qdf_ht_for_each() - iterate all entries in @table + * @table: a non-pointer qdf_ht instance to iterate + * @i: int type cursor populated with the bucket index + * @cursor: container struct pointer populated with each iteration + * @entry_field: name of the entry field in the entry container struct + */ +#define qdf_ht_for_each(table, i, cursor, entry_field) \ + __qdf_ht_for_each(table, i, cursor, entry_field) + +/** + * qdf_ht_for_each_safe() - iterate all entries in @table safe against removal + * of hash entry. + * @table: a non-pointer qdf_ht instance to iterate + * @i: int type cursor populated with the bucket index + * @tmp: a &struct used for temporary storage + * @cursor: container struct pointer populated with each iteration + * @entry_field: name of the entry field in the entry container struct + */ +#define qdf_ht_for_each_safe(table, i, tmp, cursor, entry_field) \ + __qdf_ht_for_each_safe(table, i, tmp, cursor, entry_field) + +/** + * qdf_ht_for_each_in_bucket() - iterate entries in the bucket for @key + * @table: a non-pointer qdf_ht instance to iterate + * @cursor: container struct pointer populated with each iteration + * @entry_field: name of the entry field in the entry container struct + * @key: key used to lookup the hashtable bucket + */ +#define qdf_ht_for_each_in_bucket(table, cursor, entry_field, key) \ + __qdf_ht_for_each_in_bucket(table, cursor, entry_field, key) + +/** + * qdf_ht_for_each_match() - iterates through each entry matching @key + * @table: a non-pointer qdf_ht instance to iterate + * @cursor: container struct pointer populated with each iteration + * @entry_field: name of the entry field in the entry container struct + * @key: key used to lookup the entries + * @key_field: name of the key field in the entry container struct + */ +#define qdf_ht_for_each_match(table, cursor, entry_field, key, key_field) \ + __qdf_ht_for_each_match(table, cursor, entry_field, key, key_field) + +/** + * qdf_ht_get() - get the first entry with a key matching @key + * @table: a non-pointer qdf_ht instance to look in + * @cursor: container struct pointer populated with each iteration + * @entry_field: name of the entry field in the entry container struct + * @key: key used to lookup the entry + * @key_field: name of the key field in the entry container struct + */ +#define qdf_ht_get(table, cursor, entry_field, key, key_field) \ + __qdf_ht_get(table, cursor, entry_field, key, key_field) + +#endif /* __QDF_HASHTABLE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hrtimer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hrtimer.h new file mode 100644 index 0000000000000000000000000000000000000000..0f48678ddc2c54deab24d57528f72810d91ab5c6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hrtimer.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_hrtimer + * This file abstracts high resolution timers running in hardware context. + */ + +#ifndef _QDF_HRTIMER_H +#define _QDF_HRTIMER_H + +#include +#include +#include + +/* Context independent hrtimer object */ +typedef __qdf_hrtimer_data_t qdf_hrtimer_data_t; + +/* Platform independent timer callback function */ +typedef enum qdf_hrtimer_restart_status(*qdf_hrtimer_func_t) + (qdf_hrtimer_data_t *timer); + +/** + * qdf_hrtimer_start() - Starts hrtimer in given context + * @timer: pointer to the qdf_hrtimer_data_t object + * @interval: interval to forward as qdf_ktime_t object + * @mode: mode of qdf_hrtimer_data_t + * + * Starts hrtimer in given context + * + * Return: void + */ +static inline +void qdf_hrtimer_start(qdf_hrtimer_data_t *timer, qdf_ktime_t interval, + enum qdf_hrtimer_mode mode) +{ + __qdf_hrtimer_start(timer, interval, mode); +} + +/** + * qdf_hrtimer_cancel() - Cancels hrtimer in given context + * @timer: pointer to the qdf_hrtimer_data_t object + * + * Cancels hrtimer in given context + * + * Return: int + */ +static inline +int qdf_hrtimer_cancel(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_cancel(timer); +} + +/** + * qdf_hrtimer_init() - init hrtimer based on context + * @timer: pointer to the qdf_hrtimer_data_t object + * @callback: callback function to be fired + * @qdf_clock_id: clock type + * @qdf_hrtimer_mode: mode of qdf_hrtimer_data_t + * @qdf_context_mode: interrupt context mode + * + * starts hrtimer in a context passed as per qdf_context_mode + * + * Return: void + */ +static inline void qdf_hrtimer_init(qdf_hrtimer_data_t *timer, + qdf_hrtimer_func_t callback, + enum qdf_clock_id clock, + enum qdf_hrtimer_mode mode, + enum qdf_context_mode ctx) +{ + __qdf_hrtimer_init(timer, callback, clock, mode, ctx); +} + +/** + * qdf_hrtimer_kill() - kills hrtimer in given context + * @timer: pointer to the hrtimer object + * + * kills hrtimer in given context + * + * Return: void + */ +static inline +void qdf_hrtimer_kill(__qdf_hrtimer_data_t *timer) +{ + __qdf_hrtimer_kill(timer); +} + +/** + * qdf_hrtimer_get_remaining() - check remaining time in the timer + * @timer: pointer to the qdf_hrtimer_data_t object + * + * check whether the timer is on one of the queues + * + * Return: remaining time as qdf_ktime_t object + */ +static inline qdf_ktime_t qdf_hrtimer_get_remaining(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_get_remaining(timer); +} + +/** + * qdf_hrtimer_is_queued() - check whether the timer is on one of the queues + * @timer: pointer to the qdf_hrtimer_data_t object + * + * check whether the timer is on one of the queues + * + * Return: false when the timer was not in queue + * true when the timer was in queue + */ +static inline bool qdf_hrtimer_is_queued(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_is_queued(timer); +} + +/** + * qdf_hrtimer_callback_running() - check if callback is running + * @timer: pointer to the qdf_hrtimer_data_t object + * + * check whether the timer is running the callback function + * + * Return: false when callback is not running + * true when callback is running + */ +static inline bool qdf_hrtimer_callback_running(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_callback_running(timer); +} + +/** + * qdf_hrtimer_active() - check if timer is active + * @timer: pointer to the qdf_hrtimer_data_t object + * + * Check if timer is active. A timer is active, when it is enqueued into + * the rbtree or the callback function is running. + * + * Return: false if timer is not active + * true if timer is active + */ +static inline bool qdf_hrtimer_active(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_active(timer); +} + +/** + * qdf_hrtimer_cb_get_time() - get remaining time in callback + * @timer: pointer to the qdf_hrtimer_data_t object + * + * Get remaining time in the hrtimer callback + * + * Return: time remaining as qdf_ktime_t object + */ +static inline qdf_ktime_t qdf_hrtimer_cb_get_time(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_cb_get_time(timer); +} + +/** + * qdf_hrtimer_forward() - forward the hrtimer + * @timer: pointer to the qdf_hrtimer_data_t object + * @now: current time as qdf_ktime_t object + * @interval: interval to forward as qdf_ktime_t object + * + * Forward the timer expiry so it will expire in the future + * + * Return: the number of overruns + */ +static inline uint64_t qdf_hrtimer_forward(qdf_hrtimer_data_t *timer, + qdf_ktime_t now, + qdf_ktime_t interval) +{ + return __qdf_hrtimer_forward(timer, now, interval); +} + +#endif /* _QDF_HRTIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_idr.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_idr.h new file mode 100644 index 0000000000000000000000000000000000000000..7ad63aca898e3c579d2740cefc1d45a95cc2282e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_idr.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_idr(ID Allocation) + * QCA driver framework (QDF) ID allocation APIs + */ + +#if !defined(__QDF_IDR_H) +#define __QDF_IDR_H + +/* Include Files */ +#include +#include +#include + +/** + * qdf_idr - platform idr object + */ +typedef __qdf_idr qdf_idr; + +/** + * qdf_idr_create() - idr initialization function + * @idp: pointer to qdf idr + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_create(qdf_idr *idp); + +/** + * qdf_idr_destroy() - idr deinitialization function + * @idp: pointer to qdf idr + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_destroy(qdf_idr *idp); + +/** + * qdf_idr_alloc() - Allocates an unused ID + * @idp: pointer to qdf idr + * @ptr: pointer to be associated with the new ID + * @id: pointer to return new ID + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t *id); + +/** + * qdf_idr_remove() - Removes this ID from the IDR. + * @idp: pointer to qdf idr + * @id: ID to be remove + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_remove(qdf_idr *idp, int32_t id); + +/** + * qdf_idr_find() - find the user pointer from the IDR by id. + * @idp: pointer to qdf idr + * @id: ID to be remove + * @ptr: pointer to return user pointer for given ID + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_find(qdf_idr *idp, int32_t id, void **ptr); + +#endif /* __QDF_IDR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..6d13de7d69a6497a2f6ca853ef49a7a51bbe636b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa.h @@ -0,0 +1,686 @@ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _QDF_IPA_H +#define _QDF_IPA_H + +#ifdef IPA_OFFLOAD + +#include + +/** + * enum qdf_ipa_wlan_event - QDF IPA events + * @QDF_IPA_CLIENT_CONNECT: Client Connects + * @QDF_IPA_CLIENT_DISCONNECT: Client Disconnects + * @QDF_IPA_AP_CONNECT: SoftAP is started + * @QDF_IPA_AP_DISCONNECT: SoftAP is stopped + * @QDF_IPA_STA_CONNECT: STA associates to AP + * @QDF_IPA_STA_DISCONNECT: STA dissociates from AP + * @QDF_IPA_CLIENT_CONNECT_EX: Peer associates/re-associates to softap + * @QDF_SWITCH_TO_SCC: WLAN interfaces in scc mode + * @QDF_SWITCH_TO_MCC: WLAN interfaces in mcc mode + * @QDF_WDI_ENABLE: WDI enable complete + * @QDF_WDI_DISABLE: WDI teardown + * @QDF_FWR_SSR_BEFORE_SHUTDOWN: WLAN FW recovery + * @QDF_IPA_WLAN_EVENT_MAX: Max value for the enum + */ +typedef enum { + QDF_IPA_CLIENT_CONNECT, + QDF_IPA_CLIENT_DISCONNECT, + QDF_IPA_AP_CONNECT, + QDF_IPA_AP_DISCONNECT, + QDF_IPA_STA_CONNECT, + QDF_IPA_STA_DISCONNECT, + QDF_IPA_CLIENT_CONNECT_EX, + QDF_SWITCH_TO_SCC, + QDF_SWITCH_TO_MCC, + QDF_WDI_ENABLE, + QDF_WDI_DISABLE, + QDF_FWR_SSR_BEFORE_SHUTDOWN, + QDF_IPA_WLAN_EVENT_MAX +} qdf_ipa_wlan_event; + +/** + * qdf_ipa_wdi_meter_evt_type_t - type of event client callback is + * for AP+STA mode metering + * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA - + * use ipa_get_wdi_sap_stats structure + * @IPA_SET_WIFI_QUOTA: set quota limit on STA - + * use ipa_set_wifi_quota structure + */ +typedef __qdf_ipa_wdi_meter_evt_type_t qdf_ipa_wdi_meter_evt_type_t; + +typedef __qdf_ipa_get_wdi_sap_stats_t qdf_ipa_get_wdi_sap_stats_t; + +/** + * qdf_ipa_set_wifi_quota_t - structure used for + * IPA_SET_WIFI_QUOTA. + */ +typedef __qdf_ipa_set_wifi_quota_t qdf_ipa_set_wifi_quota_t; + +/** + * qdf_ipa_connect_params_t - low-level client connect input parameters. Either + * client allocates the data and desc FIFO and specifies that in data+desc OR + * specifies sizes and pipe_mem pref and IPA does the allocation. + */ +typedef __qdf_ipa_connect_params_t qdf_ipa_connect_params_t; + +/** + * qdf_ipa_tx_meta_t - meta-data for the TX packet + */ +typedef __qdf_ipa_tx_meta_t qdf_ipa_tx_meta_t; + +/** + * __qdf_ipa_sps_params_t - SPS related output parameters resulting from + */ +typedef __qdf_ipa_sps_params_t qdf_ipa_sps_params_t; + +/** + * qdf_ipa_tx_intf_t - interface tx properties + */ +typedef __qdf_ipa_tx_intf_t qdf_ipa_tx_intf_t; + +/** + * qdf_ipa_rx_intf_t - interface rx properties + */ +typedef __qdf_ipa_rx_intf_t qdf_ipa_rx_intf_t; + +/** + * qdf_ipa_ext_intf_t - interface ext properties + */ +typedef __qdf_ipa_ext_intf_t qdf_ipa_ext_intf_t; + +/** + * qdf_ipa_sys_connect_params_t - information needed to setup an IPA end-point + * in system-BAM mode + */ +typedef __qdf_ipa_sys_connect_params_t qdf_ipa_sys_connect_params_t; + +/** + * __qdf_pa_rm_event_t - IPA RM events + * + * Indicate the resource state change + */ +typedef __qdf_ipa_rm_event_t qdf_ipa_rm_event_t; + +/** + * struct qdf_ipa_rm_register_params_t - information needed to + * register IPA RM client with IPA RM + */ +typedef __qdf_ipa_rm_register_params_t qdf_ipa_rm_register_params_t; + +/** + * struct qdf_ipa_rm_create_params_t - information needed to initialize + * the resource + * + * IPA RM client is expected to perform non blocking operations only + * in request_resource and release_resource functions and + * release notification context as soon as possible. + */ +typedef __qdf_ipa_rm_create_params_t qdf_ipa_rm_create_params_t; + +/** + * qdf_ipa_rm_perf_profile_t - information regarding IPA RM client performance + * profile + */ +typedef __qdf_ipa_rm_perf_profile_t qdf_ipa_rm_perf_profile_t; + +/** + * qdf_ipa_tx_data_desc_t - information needed + * to send data packet to HW link: link to data descriptors + * priv: client specific private data + */ +typedef __qdf_ipa_tx_data_desc_t qdf_ipa_tx_data_desc_t; + +/** + * qdf_ipa_rx_data_t - information needed + * to send to wlan driver on receiving data from ipa hw + */ +typedef __qdf_ipa_rx_data_t qdf_ipa_rx_data_t; + +/** + * qdf_ipa_wdi_ul_params_t - WDI_RX configuration + */ +typedef __qdf_ipa_wdi_ul_params_t qdf_ipa_wdi_ul_params_t; + +/** + * qdf_ipa_wdi_ul_params_smmu_t - WDI_RX configuration (with WLAN SMMU) + */ +typedef __qdf_ipa_wdi_ul_params_smmu_t qdf_ipa_wdi_ul_params_smmu_t; + +/** + * qdf_ipa_wdi_dl_params_t - WDI_TX configuration + */ +typedef __qdf_ipa_wdi_dl_params_t qdf_ipa_wdi_dl_params_t; + +/** + * qdf_ipa_wdi_dl_params_smmu_t - WDI_TX configuration (with WLAN SMMU) + */ +typedef __qdf_ipa_wdi_dl_params_smmu_t qdf_ipa_wdi_dl_params_smmu_t; + +/** + * qdf_ipa_wdi_in_params_t - information provided by WDI client + */ +typedef __qdf_ipa_wdi_in_params_t qdf_ipa_wdi_in_params_t; + +/** + * qdf_ipa_wdi_out_params_t - information provided to WDI client + */ +typedef __qdf_ipa_wdi_out_params_t qdf_ipa_wdi_out_params_t; + +/** + * qdf_ipa_wdi_db_params_t - information provided to retrieve + * physical address of uC doorbell + */ +typedef __qdf_ipa_wdi_db_params_t qdf_ipa_wdi_db_params_t; + +/** + * qdf_ipa_wdi_uc_ready_params_t - uC ready CB parameters + */ +typedef void (*qdf_ipa_uc_ready_cb)(void *priv); +typedef __qdf_ipa_wdi_uc_ready_params_t qdf_ipa_wdi_uc_ready_params_t; + +/** + * qdf_ipa_wdi_buffer_info_t - address info of a WLAN allocated buffer + * + * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa + */ +typedef __qdf_ipa_wdi_buffer_info_t qdf_ipa_wdi_buffer_info_t; + +/** + * qdf_ipa_gsi_ep_config_t - IPA GSI endpoint configurations + */ +typedef __qdf_ipa_gsi_ep_config_t qdf_ipa_gsi_ep_config_t; + +/** + * qdf_ipa_dp_evt_type_t - type of event client callback is + * invoked for on data path + * @IPA_RECEIVE: data is struct sk_buff + * @IPA_WRITE_DONE: data is struct sk_buff + */ +typedef __qdf_ipa_dp_evt_type_t qdf_ipa_dp_evt_type_t; + +#ifdef WDI3_STATS_UPDATE +/** + * qdf_ipa_wdi_tx_info_t - WLAN embedded TX bytes information + * + * WLAN host fills this structure to update IPA driver about + * embedded TX information. + */ +typedef __qdf_ipa_wdi_tx_info_t qdf_ipa_wdi_tx_info_t; + +/** + * qdf_ipa_wdi_bw_info_t - BW threshold levels to be monitored + * by IPA uC + */ +typedef __qdf_ipa_wdi_bw_info_t qdf_ipa_wdi_bw_info_t; + +/** + * qdf_ipa_inform_wlan_bw_t - BW information given by IPA driver + * whenever uC detects threshold level reached + */ +typedef __qdf_ipa_inform_wlan_bw_t qdf_ipa_inform_wlan_bw_t; +#endif + +typedef __qdf_ipa_hdr_add_t qdf_ipa_hdr_add_t; +typedef __qdf_ipa_hdr_del_t qdf_ipa_hdr_del_t; +typedef __qdf_ipa_ioc_add_hdr_t qdf_ipa_ioc_add_hdr_t; +typedef __qdf_ipa_ioc_del_hdr_t qdf_ipa_ioc_del_hdr_t; +typedef __qdf_ipa_ioc_get_hdr_t qdf_ipa_ioc_get_hdr_t; +typedef __qdf_ipa_ioc_copy_hdr_t qdf_ipa_ioc_copy_hdr_t; +typedef __qdf_ipa_ioc_add_hdr_proc_ctx_t qdf_ipa_ioc_add_hdr_proc_ctx_t; +typedef __qdf_ipa_ioc_del_hdr_proc_ctx_t qdf_ipa_ioc_del_hdr_proc_ctx_t; +typedef __qdf_ipa_msg_meta_t qdf_ipa_msg_meta_t; +typedef __qdf_ipa_client_type_t qdf_ipa_client_type_t; +typedef __qdf_ipa_hw_stats_wdi_info_data_t qdf_ipa_hw_stats_wdi_info_data_t; +typedef __qdf_ipa_rm_resource_name_t qdf_ipa_rm_resource_name_t; +typedef __qdf_ipa_wlan_event_t qdf_ipa_wlan_event_t; +typedef __qdf_ipa_wlan_msg_t qdf_ipa_wlan_msg_t; +typedef __qdf_ipa_wlan_msg_ex_t qdf_ipa_wlan_msg_ex_t; +typedef __qdf_ipa_ioc_tx_intf_prop_t qdf_ipa_ioc_tx_intf_prop_t; +typedef __qdf_ipa_ioc_rx_intf_prop_t qdf_ipa_ioc_rx_intf_prop_t; +typedef __qdf_ipa_wlan_hdr_attrib_val_t qdf_ipa_wlan_hdr_attrib_val_t; +typedef int (*qdf_ipa_msg_pull_fn)(void *buff, u32 len, u32 type); +typedef void (*qdf_ipa_ready_cb)(void *user_data); + +#define QDF_IPA_SET_META_MSG_TYPE(meta, msg_type) \ + __QDF_IPA_SET_META_MSG_TYPE(meta, msg_type) + +#define QDF_IPA_RM_RESOURCE_GRANTED __QDF_IPA_RM_RESOURCE_GRANTED +#define QDF_IPA_RM_RESOURCE_RELEASED __QDF_IPA_RM_RESOURCE_RELEASED + +#define QDF_IPA_VOLTAGE_LEVEL __QDF_IPA_VOLTAGE_LEVEL + +#define QDF_IPA_RM_RESOURCE_WLAN_PROD __QDF_IPA_RM_RESOURCE_WLAN_PROD +#define QDF_IPA_RM_RESOURCE_WLAN_CONS __QDF_IPA_RM_RESOURCE_WLAN_CONS +#define QDF_IPA_RM_RESOURCE_APPS_CONS __QDF_IPA_RM_RESOURCE_APPS_CONS + +#define QDF_IPA_CLIENT_WLAN1_PROD __QDF_IPA_CLIENT_WLAN1_PROD +#define QDF_IPA_CLIENT_WLAN1_CONS __QDF_IPA_CLIENT_WLAN1_CONS +#define QDF_IPA_CLIENT_WLAN2_CONS __QDF_IPA_CLIENT_WLAN2_CONS +#define QDF_IPA_CLIENT_WLAN3_CONS __QDF_IPA_CLIENT_WLAN3_CONS +#define QDF_IPA_CLIENT_WLAN4_CONS __QDF_IPA_CLIENT_WLAN4_CONS + +/* + * Resume / Suspend + */ +static inline int qdf_ipa_reset_endpoint(u32 clnt_hdl) +{ + return __qdf_ipa_reset_endpoint(clnt_hdl); +} + +/* + * Remove ep delay + */ +static inline int qdf_ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + return __qdf_ipa_clear_endpoint_delay(clnt_hdl); +} + +/* + * Header removal / addition + */ +static inline int qdf_ipa_add_hdr(qdf_ipa_ioc_add_hdr_t *hdrs) +{ + return __qdf_ipa_add_hdr(hdrs); +} + +static inline int qdf_ipa_del_hdr(qdf_ipa_ioc_del_hdr_t *hdls) +{ + return __qdf_ipa_del_hdr(hdls); +} + +static inline int qdf_ipa_commit_hdr(void) +{ + return __qdf_ipa_commit_hdr(); +} + +static inline int qdf_ipa_get_hdr(qdf_ipa_ioc_get_hdr_t *lookup) +{ + return __qdf_ipa_get_hdr(lookup); +} + +static inline int qdf_ipa_put_hdr(u32 hdr_hdl) +{ + return __qdf_ipa_put_hdr(hdr_hdl); +} + +static inline int qdf_ipa_copy_hdr(qdf_ipa_ioc_copy_hdr_t *copy) +{ + return __qdf_ipa_copy_hdr(copy); +} + +/* + * Messaging + */ +static inline int qdf_ipa_send_msg(qdf_ipa_msg_meta_t *meta, void *buff, + ipa_msg_free_fn callback) +{ + return __qdf_ipa_send_msg(meta, buff, callback); +} + +static inline int qdf_ipa_register_pull_msg(qdf_ipa_msg_meta_t *meta, + qdf_ipa_msg_pull_fn callback) +{ + return __qdf_ipa_register_pull_msg(meta, callback); +} + +static inline int qdf_ipa_deregister_pull_msg(qdf_ipa_msg_meta_t *meta) +{ + return __qdf_ipa_deregister_pull_msg(meta); +} + +/* + * Interface + */ +static inline int qdf_ipa_register_intf(const char *name, + const qdf_ipa_tx_intf_t *tx, + const qdf_ipa_rx_intf_t *rx) +{ + return __qdf_ipa_register_intf(name, tx, rx); +} + +static inline int qdf_ipa_register_intf_ext(const char *name, + const qdf_ipa_tx_intf_t *tx, + const qdf_ipa_rx_intf_t *rx, + const qdf_ipa_ext_intf_t *ext) +{ + return __qdf_ipa_register_intf_ext(name, tx, rx, ext); +} + +static inline int qdf_ipa_deregister_intf(const char *name) +{ + return __qdf_ipa_deregister_intf(name); +} + +/* + * Data path + */ +static inline int qdf_ipa_tx_dp(qdf_ipa_client_type_t dst, struct sk_buff *skb, + qdf_ipa_tx_meta_t *metadata) +{ + return __qdf_ipa_tx_dp(dst, skb, metadata); +} + +/* + * To transfer multiple data packets + */ +static inline int qdf_ipa_tx_dp_mul( + qdf_ipa_client_type_t dst, + qdf_ipa_tx_data_desc_t *data_desc) +{ + return __qdf_ipa_tx_dp_mul(dst, data_desc); +} + +static inline void qdf_ipa_free_skb(qdf_ipa_rx_data_t *rx_in) +{ + return __qdf_ipa_free_skb(rx_in);; +} + +/* + * System pipes + */ +static inline u16 qdf_ipa_get_smem_restr_bytes(void) +{ + return __qdf_ipa_get_smem_restr_bytes(); +} + +static inline int qdf_ipa_setup_sys_pipe(qdf_ipa_sys_connect_params_t *sys_in, + u32 *clnt_hdl) +{ + return __qdf_ipa_setup_sys_pipe(sys_in, clnt_hdl); +} + +static inline int qdf_ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_teardown_sys_pipe(clnt_hdl); +} + +static inline int qdf_ipa_connect_wdi_pipe(qdf_ipa_wdi_in_params_t *in, + qdf_ipa_wdi_out_params_t *out) +{ + return __qdf_ipa_connect_wdi_pipe(in, out); +} + +static inline int qdf_ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_disconnect_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_enable_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_disable_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_resume_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_suspend_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_uc_wdi_get_dbpa( + qdf_ipa_wdi_db_params_t *out) +{ + return __qdf_ipa_uc_wdi_get_dbpa(out); +} + +static inline int qdf_ipa_uc_reg_rdyCB( + qdf_ipa_wdi_uc_ready_params_t *param) +{ + return __qdf_ipa_uc_reg_rdyCB(param); +} + +static inline int qdf_ipa_uc_dereg_rdyCB(void) +{ + return __qdf_ipa_uc_dereg_rdyCB(); +} + + +/* + * Resource manager + */ +static inline int qdf_ipa_rm_create_resource( + qdf_ipa_rm_create_params_t *create_params) +{ + return __qdf_ipa_rm_create_resource(create_params); +} + +static inline int qdf_ipa_rm_delete_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_delete_resource(resource_name); +} + +static inline int qdf_ipa_rm_register(qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_register_params_t *reg_params) +{ + return __qdf_ipa_rm_register(resource_name, reg_params); +} + +static inline int qdf_ipa_rm_set_perf_profile( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_perf_profile_t *profile) +{ + return __qdf_ipa_rm_set_perf_profile(resource_name, profile); +} + +static inline int qdf_ipa_rm_deregister(qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_register_params_t *reg_params) +{ + return __qdf_ipa_rm_deregister(resource_name, reg_params); +} + +static inline int qdf_ipa_rm_add_dependency( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_resource_name_t depends_on_name) +{ + return __qdf_ipa_rm_add_dependency(resource_name, depends_on_name); +} + +static inline int qdf_ipa_rm_add_dependency_sync( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_resource_name_t depends_on_name) +{ + return __qdf_ipa_rm_add_dependency_sync(resource_name, depends_on_name); +} + +static inline int qdf_ipa_rm_delete_dependency( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_resource_name_t depends_on_name) +{ + return __qdf_ipa_rm_delete_dependency(resource_name, depends_on_name); +} + +static inline int qdf_ipa_rm_request_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_request_resource(resource_name); +} + +static inline int qdf_ipa_rm_release_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_release_resource(resource_name); +} + +static inline int qdf_ipa_rm_notify_completion(qdf_ipa_rm_event_t event, + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_notify_completion(event, resource_name); +} + +static inline int qdf_ipa_rm_inactivity_timer_init( + qdf_ipa_rm_resource_name_t resource_name, + unsigned long msecs) +{ + return __qdf_ipa_rm_inactivity_timer_init(resource_name, msecs); +} + +static inline int qdf_ipa_rm_inactivity_timer_destroy( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_inactivity_timer_destroy(resource_name); +} + +static inline int qdf_ipa_rm_inactivity_timer_request_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_inactivity_timer_request_resource(resource_name); +} + +static inline int qdf_ipa_rm_inactivity_timer_release_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_inactivity_timer_release_resource(resource_name); +} + +/* + * Miscellaneous + */ +static inline void qdf_ipa_bam_reg_dump(void) +{ + return __qdf_ipa_bam_reg_dump(); +} + +static inline int qdf_ipa_get_wdi_stats(qdf_ipa_hw_stats_wdi_info_data_t *stats) +{ + return __qdf_ipa_get_wdi_stats(stats); +} + +static inline int qdf_ipa_get_ep_mapping(qdf_ipa_client_type_t client) +{ + return __qdf_ipa_get_ep_mapping(client); +} + +static inline bool qdf_ipa_is_ready(void) +{ + return __qdf_ipa_is_ready(); +} + +static inline void qdf_ipa_proxy_clk_vote(void) +{ + return __qdf_ipa_proxy_clk_vote(); +} + +static inline void qdf_ipa_proxy_clk_unvote(void) +{ + return __qdf_ipa_proxy_clk_unvote(); +} + +static inline bool qdf_ipa_is_client_handle_valid(u32 clnt_hdl) +{ + return __qdf_ipa_is_client_handle_valid(clnt_hdl); +} + +static inline qdf_ipa_client_type_t qdf_ipa_get_client_mapping(int pipe_idx) +{ + return __qdf_ipa_get_client_mapping(pipe_idx); +} + +static inline qdf_ipa_rm_resource_name_t qdf_ipa_get_rm_resource_from_ep( + int pipe_idx) +{ + return __qdf_ipa_get_rm_resource_from_ep(pipe_idx); +} + +static inline bool qdf_ipa_get_modem_cfg_emb_pipe_flt(void) +{ + return __qdf_ipa_get_modem_cfg_emb_pipe_flt(); +} + +static inline int qdf_ipa_create_wdi_mapping(u32 num_buffers, + __qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_create_wdi_mapping(num_buffers, info); +} + +static inline int qdf_ipa_release_wdi_mapping(u32 num_buffers, + qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_release_wdi_mapping(num_buffers, info); +} + +static inline int qdf_ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, + uint32_t agg_count) +{ + return __qdf_ipa_disable_apps_wan_cons_deaggr(agg_size, agg_count); +} + +static inline const qdf_ipa_gsi_ep_config_t *qdf_ipa_get_gsi_ep_info(qdf_ipa_client_type_t client) +{ + return __qdf_ipa_get_gsi_ep_info(client); +} + +static inline int qdf_ipa_stop_gsi_channel(u32 clnt_hdl) +{ + return __qdf_ipa_stop_gsi_channel(clnt_hdl); +} + +static inline int qdf_ipa_register_ipa_ready_cb( + void (*qdf_ipa_ready_cb)(void *user_data), + void *user_data) +{ + return __qdf_ipa_register_ipa_ready_cb(qdf_ipa_ready_cb, user_data); +} + +#ifdef FEATURE_METERING +static inline int qdf_ipa_broadcast_wdi_quota_reach_ind(uint32_t index, + uint64_t quota_bytes) +{ + return __qdf_ipa_broadcast_wdi_quota_reach_ind(index, quota_bytes); +} +#endif + +#ifdef ENABLE_SMMU_S1_TRANSLATION +/** + * qdf_get_ipa_smmu_enabled() - to get IPA SMMU enable status + * + * Return: true when IPA SMMU enabled, otherwise false + */ +static inline bool qdf_get_ipa_smmu_enabled(void) +{ + return __qdf_get_ipa_smmu_enabled(); +} +#endif + +#ifdef IPA_LAN_RX_NAPI_SUPPORT +/** + * qdf_ipa_get_lan_rx_napi() - Check if NAPI is enabled in LAN + * RX DP + * + * Returns: true if enabled, false otherwise + */ +static inline bool qdf_ipa_get_lan_rx_napi(void) +{ + return __qdf_ipa_get_lan_rx_napi(); +} +#else +static inline bool qdf_ipa_get_lan_rx_napi(void) +{ + return false; +} +#endif /* IPA_LAN_RX_NAPI_SUPPORT */ +#endif /* IPA_OFFLOAD */ +#endif /* _QDF_IPA_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa_wdi3.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa_wdi3.h new file mode 100644 index 0000000000000000000000000000000000000000..cdb133e262985be5e50f74374878644bc09ef89f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa_wdi3.h @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_ipa_wdi3.h + * This file provides OS abstraction for IPA WDI APIs. + */ + +#ifndef _QDF_IPA_WDI3_H +#define _QDF_IPA_WDI3_H + +#ifdef IPA_OFFLOAD + +#include +#include + +#ifdef CONFIG_IPA_WDI_UNIFIED_API +/** + * qdf_ipa_wdi_version_t - IPA WDI version + */ +typedef __qdf_ipa_wdi_version_t qdf_ipa_wdi_version_t; + +/** + * qdf_ipa_wdi_init_in_params_t - wdi init input parameters + */ +typedef __qdf_ipa_wdi_init_in_params_t qdf_ipa_wdi_init_in_params_t; + +#define QDF_IPA_WDI_INIT_IN_PARAMS_WDI_VERSION(in_params) \ + __QDF_IPA_WDI_INIT_IN_PARAMS_WDI_VERSION(in_params) +#define QDF_IPA_WDI_INIT_IN_PARAMS_NOTIFY(in_params) \ + __QDF_IPA_WDI_INIT_IN_PARAMS_NOTIFY(in_params) +#define QDF_IPA_WDI_INIT_IN_PARAMS_PRIV(in_params) \ + __QDF_IPA_WDI_INIT_IN_PARAMS_PRIV(in_params) +#define QDF_IPA_WDI_INIT_IN_PARAMS_WDI_NOTIFY(in_params) \ + __QDF_IPA_WDI_INIT_IN_PARAMS_WDI_NOTIFY(in_params) + +/** + * qdf_ipa_wdi_init_out_params_t - wdi init output parameters + */ +typedef __qdf_ipa_wdi_init_out_params_t qdf_ipa_wdi_init_out_params_t; + +#define QDF_IPA_WDI_INIT_OUT_PARAMS_IS_UC_READY(out_params) \ + __QDF_IPA_WDI_INIT_OUT_PARAMS_IS_UC_READY(out_params) +#define QDF_IPA_WDI_INIT_OUT_PARAMS_IS_SMMU_ENABLED(out_params) \ + __QDF_IPA_WDI_INIT_OUT_PARAMS_IS_SMMU_ENABLED(out_params) + +/** + * qdf_ipa_wdi_pipe_setup_info_smmu_t - WDI TX/Rx configuration + */ +typedef __qdf_ipa_wdi_pipe_setup_info_smmu_t qdf_ipa_wdi_pipe_setup_info_smmu_t; + +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(txrx) + +#define QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(txrx) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(txrx) + +typedef __qdf_ipa_ep_cfg_t qdf_ipa_ep_cfg_t; + +#define QDF_IPA_EP_CFG_NAT_EN(cfg) \ + __QDF_IPA_EP_CFG_NAT_EN(cfg) +#define QDF_IPA_EP_CFG_HDR_LEN(cfg) \ + __QDF_IPA_EP_CFG_HDR_LEN(cfg) +#define QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(cfg) \ + __QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(cfg) +#define QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(cfg) \ + __QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(cfg) +#define QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(cfg) \ + __QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(cfg) +#define QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(cfg) \ + __QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(cfg) +#define QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(cfg) \ + __QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(cfg) +#define QDF_IPA_EP_CFG_MODE(cfg) \ + __QDF_IPA_EP_CFG_MODE(cfg) +#define QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(cfg) \ + __QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(cfg) + +/** + * qdf_ipa_wdi_init - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_init(qdf_ipa_wdi_init_in_params_t *in, + qdf_ipa_wdi_init_out_params_t *out) +{ + return __qdf_ipa_wdi_init(in, out); +} + +/** + * qdf_ipa_wdi_cleanup - Client should call this function to + * clean up WDI IPA offload data path + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_cleanup(void) +{ + return __qdf_ipa_wdi_cleanup(); +} +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +/** + * qdf_ipa_wdi_hdr_info_t - Header to install on IPA HW + */ +typedef __qdf_ipa_wdi_hdr_info_t qdf_ipa_wdi_hdr_info_t; + +#define QDF_IPA_WDI_HDR_INFO_HDR(hdr_info) \ + __QDF_IPA_WDI_HDR_INFO_HDR(hdr_info) +#define QDF_IPA_WDI_HDR_INFO_HDR_LEN(hdr_info) \ + __QDF_IPA_WDI_HDR_INFO_HDR_LEN(hdr_info) +#define QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(hdr_info) \ + __QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(hdr_info) +#define QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) \ + __QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) + +/** + * qdf_ipa_wdi_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef __qdf_ipa_wdi_reg_intf_in_params_t qdf_ipa_wdi_reg_intf_in_params_t; + +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(in) \ + __QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(in) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in) \ + __QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(in) \ + __QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(in) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(in) \ + __QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(in) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) \ + __QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) \ + __QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) + +/** + * qdf_ipa_wdi_pipe_setup_info_t - WDI TX/Rx configuration + */ +typedef __qdf_ipa_wdi_pipe_setup_info_t qdf_ipa_wdi_pipe_setup_info_t; + +#define QDF_IPA_WDI_SETUP_INFO_EP_CFG(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_EP_CFG(txrx) + +#define QDF_IPA_WDI_SETUP_INFO_NAT_EN(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_NAT_EN(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_LEN(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_LEN(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(txrx) +#define QDF_IPA_WDI_SETUP_INFO_MODE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_MODE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(txrx) + +#define QDF_IPA_WDI_SETUP_INFO_CLIENT(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_CLIENT(txrx) +#define QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(txrx) +#define QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(txrx) +#define QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(txrx) +#define QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(txrx) +#define QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(txrx) +#define QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(txrx) +#define QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(txrx) +#define QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(txrx) +#define QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(txrx) +#define QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(txrx) \ + __QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(txrx) + +/** + * qdf_ipa_wdi_conn_in_params_t - information provided by + * uC offload client + */ +typedef __qdf_ipa_wdi_conn_in_params_t qdf_ipa_wdi_conn_in_params_t; + +#define QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in) \ + __QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in) + +/** + * qdf_ipa_wdi_conn_out_params_t - information provided + * to WLAN druver + */ +typedef __qdf_ipa_wdi_conn_out_params_t qdf_ipa_wdi_conn_out_params_t; + +#define QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(pipe_out) \ + __QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(pipe_out) +#define QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(pipe_out) \ + __QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(pipe_out) +#define QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(pipe_out) \ + __QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(pipe_out) + +/** + * qdf_ipa_wdi_perf_profile_t - To set BandWidth profile + */ +typedef __qdf_ipa_wdi_perf_profile_t qdf_ipa_wdi_perf_profile_t; + +#define QDF_IPA_WDI_PERF_PROFILE_CLIENT(profile) \ + __QDF_IPA_WDI_PERF_PROFILE_CLIENT(profile) +#define QDF_IPA_WDI_PERF_PROFILE_MAX_SUPPORTED_BW_MBPS(profile) \ + __QDF_IPA_WDI_PERF_PROFILE_MAX_SUPPORTED_BW_MBPS(profile) + +/** + * qdf_ipa_wdi_reg_intf - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_reg_intf( + qdf_ipa_wdi_reg_intf_in_params_t *in) +{ + return __qdf_ipa_wdi_reg_intf(in); +} + +/** + * qdf_ipa_wdi_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_dereg_intf(const char *netdev_name) +{ + return __qdf_ipa_wdi_dereg_intf(netdev_name); +} + +/** + * qdf_ipa_wdi_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_conn_pipes(qdf_ipa_wdi_conn_in_params_t *in, + qdf_ipa_wdi_conn_out_params_t *out) +{ + return __qdf_ipa_wdi_conn_pipes(in, out); +} + +/** + * qdf_ipa_wdi_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_disconn_pipes(void) +{ + return __qdf_ipa_wdi_disconn_pipes(); +} + +/** + * qdf_ipa_wdi_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_enable_pipes(void) +{ + return __qdf_ipa_wdi_enable_pipes(); +} + +/** + * qdf_ipa_wdi_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_disable_pipes(void) +{ + return __qdf_ipa_wdi_disable_pipes(); +} + +/** + * qdf_ipa_wdi_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_set_perf_profile( + qdf_ipa_wdi_perf_profile_t *profile) +{ + return __qdf_ipa_wdi_set_perf_profile(profile); +} + +/** + * qdf_ipa_wdi_create_smmu_mapping() - Client should call this function to + * create smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_create_smmu_mapping(uint32_t num_buffers, + qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_wdi_create_smmu_mapping(num_buffers, info); +} + +/** + * qdf_ipa_wdi_release_smmu_mapping() - Client should call this function to + * release smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_release_smmu_mapping(uint32_t num_buffers, + qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_wdi_release_smmu_mapping(num_buffers, info); +} + +#ifdef WDI3_STATS_UPDATE +/** + * qdf_ipa_wdi_wlan_stats() - Client should call this function to + * send Tx byte counts to IPA driver + * @tx_count: number of Tx bytes + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_wlan_stats(qdf_ipa_wdi_tx_info_t *tx_stats) +{ + return __qdf_ipa_wdi_wlan_stats(tx_stats); +} + +/** + * qdf_ipa_uc_bw_monitor() - start/stop uc bw monitoring + * @bw_info: set bw info levels to monitor + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_uc_bw_monitor(qdf_ipa_wdi_bw_info_t *bw_info) +{ + return __qdf_ipa_uc_bw_monitor(bw_info); +} +#endif + +#endif /* IPA_OFFLOAD */ +#endif /* _QDF_IPA_WDI3_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_list.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_list.h new file mode 100644 index 0000000000000000000000000000000000000000..4f8b32c95e89199ecde224d51b3ae1427df6070b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_list.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_list.h + * QCA driver framework (QDF) list APIs + * Definitions for QDF Linked Lists API + * + * Lists are implemented as a doubly linked list. An item in a list can + * be of any type as long as the datatype contains a field of type + * qdf_link_t. + * + * In general, a list is a doubly linked list of items with a pointer + * to the front of the list and a pointer to the end of the list. The + * list items contain a forward and back link. + * + * QDF linked list APIs are NOT thread safe so make sure to use appropriate + * locking mechanisms to assure operations on the list are thread safe. + */ + +#if !defined(__QDF_LIST_H) +#define __QDF_LIST_H + +/* Include Files */ +#include +#include +#include +#include + +typedef __qdf_list_node_t qdf_list_node_t; +typedef __qdf_list_t qdf_list_t; + +/* Function declarations */ + +/** + * qdf_list_insert_before() - insert new node before the node + * @list: Pointer to list + * @new_node: Pointer to input node + * @node: node before which new node should be added. + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_before(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node); +/** + * qdf_list_insert_after() - insert new node after the node + * @list: Pointer to list + * @new_node: Pointer to input node + * @node: node after which new node should be added. + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_after(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node); +QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node); + +QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list, qdf_list_node_t *node, + uint32_t *size); + +QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node1); + +QDF_STATUS qdf_list_peek_next(qdf_list_t *list, qdf_list_node_t *node, + qdf_list_node_t **node1); + +/** + * qdf_list_create() - Create qdf list and initialize list head + * @list: object of list + * @max_size: max size of the list + * + * Return: none + */ +static inline void qdf_list_create(__qdf_list_t *list, uint32_t max_size) +{ + __qdf_list_create(list, max_size); +} + +#define QDF_LIST_ANCHOR(list) __QDF_LIST_ANCHOR(list) + +#define QDF_LIST_NODE_INIT(prev, next) __QDF_LIST_NODE_INIT(prev, next) +#define QDF_LIST_NODE_INIT_SINGLE(node) __QDF_LIST_NODE_INIT_SINGLE(node) + +#define QDF_LIST_INIT(tail, head) __QDF_LIST_INIT(tail, head) +#define QDF_LIST_INIT_SINGLE(node) __QDF_LIST_INIT_SINGLE(node) +#define QDF_LIST_INIT_EMPTY(list) __QDF_LIST_INIT_EMPTY(list) + +#define qdf_list_for_each(list_ptr, cursor, node_field) \ + __qdf_list_for_each(list_ptr, cursor, node_field) + +#define qdf_list_for_each_del(list_ptr, cursor, next, node_field) \ + __qdf_list_for_each_del(list_ptr, cursor, next, node_field) + +/** + * qdf_init_list_head() - initialize list head + * @list_head: pointer to list head + * + * Return: none + */ +static inline void qdf_init_list_head(__qdf_list_node_t *list_head) +{ + __qdf_init_list_head(list_head); +} + +/** + * qdf_list_destroy() - Destroy the list + * @list: object of list + * Return: none + */ +static inline void qdf_list_destroy(qdf_list_t *list) +{ + if (list->count != 0) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "%s: list length not equal to zero", __func__); + QDF_ASSERT(0); + } +} + +/** + * qdf_list_size() - gives the size of the list + * @list: object of list + * @size: size of the list + * Return: uint32_t + */ +static inline uint32_t qdf_list_size(qdf_list_t *list) +{ + return __qdf_list_size(list); +} + +/** + * qdf_list_max_size() - gives the max size of the list + * @list: object of list + * Return: max size of the list + */ +static inline uint32_t qdf_list_max_size(qdf_list_t *list) +{ + return __qdf_list_max_size(list); +} + +QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node); + +QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node1); + +QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node1); + +QDF_STATUS qdf_list_remove_node(qdf_list_t *list, + qdf_list_node_t *node_to_remove); + +bool qdf_list_empty(qdf_list_t *list); + +/** + * qdf_list_has_node() - check if a node is in a list + * @list: pointer to the list being searched + * @node: pointer to the node to search for + * + * This API has a time complexity of O(n). + * + * Return: true if the node is in the list + */ +bool qdf_list_has_node(qdf_list_t *list, qdf_list_node_t *node); + +/** + * qdf_list_node_in_any_list() - ensure @node is a member of a list + * @node: list node to check + * + * This API has a time complexity of O(1). See also qdf_list_has_node(). + * + * Return: true, if @node appears to be in a list + */ +bool qdf_list_node_in_any_list(const qdf_list_node_t *node); + +#endif /* __QDF_LIST_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lock.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..91f8948f723fded7f48ad9ced091eba5b8f7379f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lock.h @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file qdf_lock.h + * This file abstracts locking operations. + */ + +#ifndef _QDF_LOCK_H +#define _QDF_LOCK_H + +#include +#include +#include +#include + +#ifndef QDF_LOCK_STATS +#define QDF_LOCK_STATS 0 +#endif +#ifndef QDF_LOCK_STATS_DESTROY_PRINT +#define QDF_LOCK_STATS_DESTROY_PRINT 0 +#endif +#ifndef QDF_LOCK_STATS_BUG_ON +#define QDF_LOCK_STATS_BUG_ON 0 +#endif +#ifndef QDF_LOCK_STATS_LIST +#define QDF_LOCK_STATS_LIST 0 +#endif + +/* Max hold time in micro seconds, 0 to disable detection*/ +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ 10000 +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK 0 + +#if QDF_LOCK_STATS +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 2000000 +#else +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 1000000 +#endif + +#if !QDF_LOCK_STATS +struct lock_stats {}; +#define BEFORE_LOCK(x...) do {} while (0) +#define AFTER_LOCK(x...) do {} while (0) +#define BEFORE_TRYLOCK(x...) do {} while (0) +#define AFTER_TRYLOCK(x...) do {} while (0) +#define BEFORE_UNLOCK(x...) do {} while (0) +#define qdf_lock_stats_create(x...) do {} while (0) +#define qdf_lock_stats_destroy(x...) do {} while (0) +#define qdf_lock_stats_init(x...) do {} while (0) +#define qdf_lock_stats_deinit(x...) do {} while (0) +#else +void qdf_lock_stats_init(void); +void qdf_lock_stats_deinit(void); +struct qdf_lock_cookie; +struct lock_stats { + const char *initialization_fn; + const char *acquired_by; + int line; + int acquired; + int contended; + uint64_t contention_time; + uint64_t non_contention_time; + uint64_t held_time; + uint64_t last_acquired; + uint64_t max_contention_wait; + uint64_t max_held_time; + int num_large_contentions; + int num_large_holds; + struct qdf_lock_cookie *cookie; +}; +#define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US + +#define BEFORE_LOCK(lock, was_locked) \ +do { \ + uint64_t BEFORE_LOCK_time; \ + uint64_t AFTER_LOCK_time; \ + bool BEFORE_LOCK_is_locked = was_locked; \ + BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \ + do {} while (0) + + +#define AFTER_LOCK(lock, func) \ + lock->stats.acquired_by = func; \ + AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \ + lock->stats.acquired++; \ + lock->stats.last_acquired = AFTER_LOCK_time; \ + if (BEFORE_LOCK_is_locked) { \ + lock->stats.contended++; \ + lock->stats.contention_time += \ + (AFTER_LOCK_time - BEFORE_LOCK_time); \ + } else { \ + lock->stats.non_contention_time += \ + (AFTER_LOCK_time - BEFORE_LOCK_time); \ + } \ +\ + if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \ + lock->stats.num_large_contentions++; \ +\ + if (AFTER_LOCK_time - BEFORE_LOCK_time > \ + lock->stats.max_contention_wait) \ + lock->stats.max_contention_wait = \ + AFTER_LOCK_time - BEFORE_LOCK_time; \ +} while (0) + +#define BEFORE_TRYLOCK(lock) \ +do { \ + uint64_t BEFORE_LOCK_time; \ + uint64_t AFTER_LOCK_time; \ + BEFORE_LOCK_time = qdf_get_log_timestamp_lightweight(); \ + do {} while (0) + +#define AFTER_TRYLOCK(lock, trylock_return, func) \ + AFTER_LOCK_time = qdf_get_log_timestamp_lightweight(); \ + if (trylock_return) { \ + lock->stats.acquired++; \ + lock->stats.last_acquired = AFTER_LOCK_time; \ + lock->stats.non_contention_time += \ + (AFTER_LOCK_time - BEFORE_LOCK_time); \ + lock->stats.acquired_by = func; \ + } \ +} while (0) + +/* max_hold_time in US */ +#define BEFORE_UNLOCK(lock, max_hold_time) \ +do {\ + uint64_t BEFORE_UNLOCK_time; \ + uint64_t held_time; \ + BEFORE_UNLOCK_time = qdf_get_log_timestamp_lightweight(); \ +\ + if (unlikely(BEFORE_UNLOCK_time < lock->stats.last_acquired)) \ + held_time = 0; \ + else \ + held_time = BEFORE_UNLOCK_time - lock->stats.last_acquired; \ +\ + lock->stats.held_time += held_time; \ +\ + if (held_time > lock->stats.max_held_time) \ + lock->stats.max_held_time = held_time; \ +\ + if (held_time > LARGE_CONTENTION) \ + lock->stats.num_large_holds++; \ + if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \ + held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, \ + "BEFORE_UNLOCK: lock held too long (%lluus)", \ + qdf_log_timestamp_to_usecs(held_time)); \ + QDF_BUG(0); \ + } \ + lock->stats.acquired_by = NULL; \ +} while (0) + +void qdf_lock_stats_cookie_destroy(struct lock_stats *stats); +void qdf_lock_stats_cookie_create(struct lock_stats *stats, + const char *func, int line); + +static inline void qdf_lock_stats_destroy(struct lock_stats *stats) +{ + if (QDF_LOCK_STATS_DESTROY_PRINT) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: lock: %s %d \t" + "acquired:\t%d\tcontended:\t%d\t" + "contention_time\t%llu\tmax_contention_wait:\t%llu\t" + "non_contention_time\t%llu\t" + "held_time\t%llu\tmax_held:\t%llu" + , __func__, stats->initialization_fn, stats->line, + stats->acquired, stats->contended, + qdf_log_timestamp_to_usecs(stats->contention_time), + qdf_log_timestamp_to_usecs(stats->max_contention_wait), + qdf_log_timestamp_to_usecs(stats->non_contention_time), + qdf_log_timestamp_to_usecs(stats->held_time), + qdf_log_timestamp_to_usecs(stats->max_held_time)); + } + + if (QDF_LOCK_STATS_LIST) + qdf_lock_stats_cookie_destroy(stats); +} + +#ifndef MEMORY_DEBUG +#define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x) +#endif + +/* qdf_lock_stats_create() - initialize the lock stats structure + * + */ +static inline void qdf_lock_stats_create(struct lock_stats *stats, + const char *func, int line) +{ + qdf_mem_zero(stats, sizeof(*stats)); + stats->initialization_fn = func; + stats->line = line; + + if (QDF_LOCK_STATS_LIST) + qdf_lock_stats_cookie_create(stats, func, line); +} +#endif + +#include + +#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0 +#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0 +#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1 + +/** + * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout + * @m: semaphore to take + * @timeout: maximum time to try to take the semaphore + * Return: int + */ +static inline int qdf_semaphore_acquire_timeout(struct semaphore *m, + unsigned long timeout) +{ + return __qdf_semaphore_acquire_timeout(m, timeout); +} + +struct qdf_spinlock { + __qdf_spinlock_t lock; + struct lock_stats stats; +}; + +/** + * @brief Platform spinlock object + */ +typedef struct qdf_spinlock qdf_spinlock_t; + + +/** + * @brief Platform mutex object + */ +typedef __qdf_semaphore_t qdf_semaphore_t; +typedef __qdf_mutex_t qdf_mutex_t; + +/* function Declaration */ +QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line); +#define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__) + +QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m); + +QDF_STATUS qdf_mutex_release(qdf_mutex_t *m); + +QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock); + +/** + * qdf_spinlock_create - Initialize a spinlock + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func, + int line) +{ + __qdf_spinlock_create(&lock->lock); + + /* spinlock stats create relies on the spinlock working allread */ + qdf_lock_stats_create(&lock->stats, func, line); +} + +#define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__) + +/** + * qdf_spinlock_destroy - Delete a spinlock + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock) +{ + qdf_lock_stats_destroy(&lock->stats); + __qdf_spinlock_destroy(&lock->lock); +} + +/** + * qdf_spin_is_locked() - check if the spinlock is locked + * @lock: spinlock object + * + * Return: nonzero if lock is held. + */ +static inline int qdf_spin_is_locked(qdf_spinlock_t *lock) +{ + return __qdf_spin_is_locked(&lock->lock); +} + +/** + * qdf_spin_trylock_bh() - spin trylock bottomhalf + * @lock: spinlock object + * + * Return: nonzero if lock is acquired + */ +static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func) +{ + int trylock_return; + + BEFORE_TRYLOCK(lock); + trylock_return = __qdf_spin_trylock_bh(&lock->lock); + AFTER_TRYLOCK(lock, trylock_return, func); + + return trylock_return; +} +#define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__) + +/** + * qdf_spin_trylock() - spin trylock + * @lock: spinlock object + * Return: int + */ +static inline int qdf_spin_trylock(qdf_spinlock_t *lock, const char *func) +{ + int result = 0; + + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + result = __qdf_spin_trylock(&lock->lock); + AFTER_LOCK(lock, func); + + return result; +} + +#define qdf_spin_trylock(lock) qdf_spin_trylock(lock, __func__) + +/** + * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock_bh(&lock->lock); + AFTER_LOCK(lock, func); +} + +#define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__) + +/** + * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH); + __qdf_spin_unlock_bh(&lock->lock); +} + +/** + * qdf_spinlock_irq_exec - Execute the input function with spinlock held + * and interrupt disabled. + * @hdl: OS handle + * @lock: spinlock to be held for the critical region + * @func: critical region function that to be executed + * @context: context of the critical region function + * Return: Boolean status returned by the critical region function + */ +static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl, + qdf_spinlock_t *lock, + qdf_irqlocked_func_t func, void *arg) +{ + return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg); +} + +/** + * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock(&lock->lock); + AFTER_LOCK(lock, func); +} +#define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__) + +/** + * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_unlock(qdf_spinlock_t *lock) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK); + __qdf_spin_unlock(&lock->lock); +} + +/** + * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state + * @lock: Lock object + * @flags: flags + * + * Return: none + */ +static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags, + const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock_irq(&lock->lock.spinlock, flags); + AFTER_LOCK(lock, func); +} +#define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__) + +/** + * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption + * (Preemptive) and disable IRQs + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock_irqsave(&lock->lock); + AFTER_LOCK(lock, func); +} +#define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__) + +/** + * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the + * Preemption and enable IRQ + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); + __qdf_spin_unlock_irqrestore(&lock->lock); +} + +/** + * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state + * @lock: Lock object + * @flags: flags + * + * Return: none + */ +static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock, + unsigned long flags) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); + __qdf_spin_unlock_irq(&lock->lock.spinlock, flags); +} + +/** + * qdf_semaphore_init() - initialize a semaphore + * @m: Semaphore to initialize + * Return: None + */ +static inline void qdf_semaphore_init(qdf_semaphore_t *m) +{ + __qdf_semaphore_init(m); +} + +/** + * qdf_semaphore_acquire() - take the semaphore + * @m: Semaphore to take + * Return: int + */ +static inline int qdf_semaphore_acquire(qdf_semaphore_t *m) +{ + return __qdf_semaphore_acquire(m); +} + +/** + * qdf_semaphore_release() - give the semaphore + * @m: Semaphore to give + * Return: None + */ +static inline void qdf_semaphore_release(qdf_semaphore_t *m) +{ + __qdf_semaphore_release(m); +} + +/** + * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version + * @osdev: OS Device + * @m: mutex to take + * Return: int + */ +static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m) +{ + return __qdf_semaphore_acquire_intr(m); +} + +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name); + +QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason); + +const char *qdf_wake_lock_name(qdf_wake_lock_t *lock); +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, + uint32_t msec); + +QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason); + +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock); + +void qdf_pm_system_wakeup(void); + +QDF_STATUS qdf_runtime_pm_get(void); +QDF_STATUS qdf_runtime_pm_put(void); +QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock); +QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock); + +QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); + +#define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock) + +void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock); + +QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock); + +QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock); +#endif /* _QDF_LOCK_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lro.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lro.h new file mode 100644 index 0000000000000000000000000000000000000000..8049cf1e8575448bec91a4ebfa1f3425c88d1849 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lro.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Large Receive Offload API + * This file defines the Large receive offload API. + */ +#ifndef _QDF_LRO_H +#define _QDF_LRO_H + +#include +#include + +/** + * @qdf_nbuf_t - Platform indepedent LRO context abstraction + */ +typedef __qdf_lro_ctx_t qdf_lro_ctx_t; + +/** + * qdf_lro_info_s - LRO information + * @iph: IP header + * @tcph: TCP header + */ +struct qdf_lro_info { + uint8_t *iph; + uint8_t *tcph; +}; + +#if defined(FEATURE_LRO) + +/** + * qdf_lro_init() - LRO initialization function + * + * Return: LRO context + */ +qdf_lro_ctx_t qdf_lro_init(void); + +/** + * qdf_lro_deinit() - LRO deinitialization function + * @lro_ctx: LRO context + * + * Return: nothing + */ +void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx); + +/** + * qdf_lro_get_info() - Update the LRO information + * + * @lro_ctx: LRO context + * @nbuf: network buffer + * @info: LRO related information passed in by the caller + * @plro_desc: lro information returned as output + * + * Look-up the LRO descriptor based on the LRO information and + * the network buffer provided. Update the skb cb with the + * descriptor found + * + * Return: true: LRO eligible false: LRO ineligible + */ +bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf, + struct qdf_lro_info *info, + void **plro_desc); + +/** + * qdf_lro_flush_pkt() - function to flush the LRO flow + * @info: LRO related information passed by the caller + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for the + * flow indicated by the TCP and IP header + * + * Return: none + */ +void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx, + struct qdf_lro_info *info); + +/** + * qdf_lro_flush() - LRO flush API + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for all + * the flows + * + * Return: none + */ +void qdf_lro_flush(qdf_lro_ctx_t lro_ctx); + +/** + * qdf_lro_desc_free() - Free the LRO descriptor + * @desc: LRO descriptor + * @lro_ctx: LRO context + * + * Return the LRO descriptor to the free pool + * + * Return: none + */ +void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx, void *desc); + +#else + +static inline qdf_lro_ctx_t qdf_lro_init(void) +{ + return NULL; +} + +static inline void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx) +{ +} + +static inline void qdf_lro_flush(qdf_lro_ctx_t lro_ctx) +{ +} +#endif /* FEATURE_LRO */ +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..5d94df6d98352f26384ecce718adb9cb4e873e19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mc_timer + * QCA driver framework timer APIs serialized to MC thread + */ + +#if !defined(__QDF_MC_TIMER_H) +#define __QDF_MC_TIMER_H + +/* Include Files */ +#include +#include +#include +#include + +#ifdef TIMER_MANAGER +#include +#endif + +/* Preprocessor definitions and constants */ +#define QDF_TIMER_STATE_COOKIE (0x12) +#define QDF_MC_TIMER_TO_MS_UNIT (1000) +#define QDF_MC_TIMER_TO_SEC_UNIT (1000000) + +/* Type declarations */ +/* qdf Timer callback function prototype (well, actually a prototype for + * a pointer to this callback function) + */ +typedef void (*qdf_mc_timer_callback_t)(void *user_data); + +typedef enum { + QDF_TIMER_STATE_UNUSED = QDF_TIMER_STATE_COOKIE, + QDF_TIMER_STATE_STOPPED, + QDF_TIMER_STATE_STARTING, + QDF_TIMER_STATE_RUNNING, +} QDF_TIMER_STATE; + +#ifdef TIMER_MANAGER +struct qdf_mc_timer_s; +typedef struct qdf_mc_timer_node_s { + qdf_list_node_t node; + char *file_name; + uint32_t line_num; + struct qdf_mc_timer_s *qdf_timer; +} qdf_mc_timer_node_t; +#endif + +typedef struct qdf_mc_timer_s { +#ifdef TIMER_MANAGER + qdf_mc_timer_node_t *timer_node; +#endif + qdf_mc_timer_platform_t platform_info; + qdf_mc_timer_callback_t callback; + void *user_data; + qdf_mutex_t lock; + QDF_TIMER_TYPE type; + QDF_TIMER_STATE state; +} qdf_mc_timer_t; + + +void qdf_try_allowing_sleep(QDF_TIMER_TYPE type); + +/* Function declarations and documenation */ +#ifdef TIMER_MANAGER +void qdf_mc_timer_manager_init(void); +void qdf_mc_timer_manager_exit(void); +void qdf_mc_timer_check_for_leaks(void); +#else +/** + * qdf_mc_timer_manager_init() - initialize QDF debug timer manager + * This API initializes QDF timer debug functionality. + * + * Return: none + */ +static inline void qdf_mc_timer_manager_init(void) +{ +} + +/** + * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality + * This API exists QDF timer debug functionality + * + * Return: none + */ +static inline void qdf_mc_timer_manager_exit(void) +{ +} + +/** + * qdf_mc_timer_check_for_leaks() - Assert there are no active mc timers + * + * If there are active timers, this API prints them and panics. + * + * Return: None + */ +static inline void qdf_mc_timer_check_for_leaks(void) { } +#endif +/** + * qdf_mc_timer_get_current_state() - get the current state of the timer + * @timer: Pointer to timer object + * + * Return: + * QDF_TIMER_STATE - qdf timer state + */ + +QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_init() - initialize a QDF timer + * @timer: Pointer to timer object + * @timer_type: Type of timer + * @callback: Callback to be called after timer expiry + * @ser_data: User data which will be passed to callback function + * + * This API initializes a QDF Timer object. + * + * qdf_mc_timer_init() initializes a QDF Timer object. A timer must be + * initialized by calling qdf_mc_timer_initialize() before it may be used in + * any other timer functions. + * + * Attempting to initialize timer that is already initialized results in + * a failure. A destroyed timer object can be re-initialized with a call to + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer such + * as qdf_mc_timer_set() will fail if the timer is not initialized or has + * been destroyed. Therefore, don't use the timer after it has been + * destroyed until it has been re-initialized. + * + * All callback will be executed within the CDS main thread unless it is + * initialized from the Tx thread flow, in which case it will be executed + * within the tx thread flow. + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +#ifdef TIMER_MANAGER +#define qdf_mc_timer_init(timer, timer_type, callback, userdata) \ + qdf_mc_timer_init_debug(timer, timer_type, callback, userdata, \ + __FILE__, __LINE__) + +QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data, char *file_name, + uint32_t line_num); +#else +QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data); +#endif + +/** + * qdf_mc_timer_destroy() - destroy QDF timer + * @timer: Pointer to timer object + * + * qdf_mc_timer_destroy() function shall destroy the timer object. + * After a successful return from \a qdf_mc_timer_destroy() the timer + * object becomes, in effect, uninitialized. + * + * A destroyed timer object can be re-initialized by calling + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer, such + * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore, + * don't use the timer after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_start() - start a QDF Timer object + * @timer: Pointer to timer object + * @expiration_time: Time to expire + * + * qdf_mc_timer_start() function starts a timer to expire after the + * specified interval, thus running the timer callback function when + * the interval expires. + * + * A timer only runs once (a one-shot timer). To re-start the + * timer, qdf_mc_timer_start() has to be called after the timer runs + * or has been cancelled. + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time); + +/** + * qdf_mc_timer_stop() - stop a QDF Timer + * @timer: Pointer to timer object + * qdf_mc_timer_stop() function stops a timer that has been started but + * has not expired, essentially cancelling the 'start' request. + * + * After a timer is stopped, it goes back to the state it was in after it + * was created and can be started again via a call to qdf_mc_timer_start(). + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_stop_sync() - stop a QDF Timer + * @timer: Pointer to timer object + * qdf_mc_timer_stop_sync() function stops a timer synchronously + * that has been started but has not expired, essentially + * cancelling the 'start' request. + * + * After a timer is stopped, it goes back to the state it was in after it + * was created and can be started again via a call to qdf_mc_timer_start(). + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_stop_sync(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks + * + * qdf_mc_timer_get_system_ticks() function returns the current number + * of timer ticks in 10msec intervals. This function is suitable timestamping + * and calculating time intervals by calculating the difference between two + * timestamps. + * + * Return: + * The current system tick count (in 10msec intervals). This + * function cannot fail. + */ +unsigned long qdf_mc_timer_get_system_ticks(void); + +/** + * qdf_mc_timer_get_system_time() - Get the system time in milliseconds + * + * qdf_mc_timer_get_system_time() function returns the number of milliseconds + * that have elapsed since the system was started + * + * Return: + * The current system time in milliseconds + */ +unsigned long qdf_mc_timer_get_system_time(void); + +/** + * qdf_get_monotonic_boottime_ns() - Get kernel boottime in ns + * + * Return: kernel boottime in nano sec (includes time spent in suspend) + */ +s64 qdf_get_monotonic_boottime_ns(void); + +/** + * qdf_timer_module_init() - initializes a QDF timer module. + * + * This API initializes the QDF timer module. This needs to be called + * exactly once prior to using any QDF timers. + * + * Return: none + */ +void qdf_timer_module_init(void); + +/** + * qdf_get_time_of_the_day_ms() - get time of the day in millisec + * + * Return: time of the day in ms + */ +qdf_time_t qdf_get_time_of_the_day_ms(void); + +/** + * qdf_timer_module_deinit() - Deinitializes a QDF timer module. + * + * This API deinitializes the QDF timer module. + * Return: none + */ +void qdf_timer_module_deinit(void); + +/** + * qdf_get_time_of_the_day_in_hr_min_sec_usec() - Get system time + * @tbuf: Pointer to time stamp buffer + * @len: Time buffer size + * + * This function updates the 'tbuf' with system time in hr:min:sec:msec format + * + * Return: None + */ +void qdf_get_time_of_the_day_in_hr_min_sec_usec(char *tbuf, int len); + +void qdf_register_mc_timer_callback(void (*callback) (qdf_mc_timer_t *data)); + +/** + * qdf_timer_set_multiplier() - set the global QDF timer scalar value + * @multiplier: the scalar value to apply + * + * Return: None + */ +void qdf_timer_set_multiplier(uint32_t multiplier); + +/** + * qdf_timer_get_multiplier() - get the global QDF timer scalar value + * + * Return: the global QDF timer scalar value + */ +uint32_t qdf_timer_get_multiplier(void); + +#endif /* __QDF_MC_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mem.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..56c3c498008caf1d1417ffd6c8be29782791c704 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mem.h @@ -0,0 +1,865 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mem + * QCA driver framework (QDF) memory management APIs + */ + +#if !defined(__QDF_MEMORY_H) +#define __QDF_MEMORY_H + +/* Include Files */ +#include +#include +#include + +#define QDF_CACHE_LINE_SZ __qdf_cache_line_sz + +/** + * qdf_align() - align to the given size. + * @a: input that needs to be aligned. + * @align_size: boundary on which 'a' has to be alinged. + * + * Return: aligned value. + */ +#define qdf_align(a, align_size) __qdf_align(a, align_size) +#define qdf_page_size __page_size + +/** + * struct qdf_mem_dma_page_t - Allocated dmaable page + * @page_v_addr_start: Page start virtual address + * @page_v_addr_end: Page end virtual address + * @page_p_addr: Page start physical address + */ +struct qdf_mem_dma_page_t { + char *page_v_addr_start; + char *page_v_addr_end; + qdf_dma_addr_t page_p_addr; +}; + +/** + * struct qdf_mem_multi_page_t - multiple page allocation information storage + * @num_element_per_page: Number of element in single page + * @num_pages: Number of allocation needed pages + * @dma_pages: page information storage in case of coherent memory + * @cacheable_pages: page information storage in case of cacheable memory + * @is_mem_prealloc: flag for multiple pages pre-alloc or not + */ +struct qdf_mem_multi_page_t { + uint16_t num_element_per_page; + uint16_t num_pages; + struct qdf_mem_dma_page_t *dma_pages; + void **cacheable_pages; + qdf_size_t page_size; +#ifdef DP_MEM_PRE_ALLOC + uint8_t is_mem_prealloc; +#endif +}; + + +/* Preprocessor definitions and constants */ + +typedef __qdf_mempool_t qdf_mempool_t; + +/** + * qdf_mem_init() - Initialize QDF memory module + * + * Return: None + * + */ +void qdf_mem_init(void); + +/** + * qdf_mem_exit() - Exit QDF memory module + * + * Return: None + * + */ +void qdf_mem_exit(void); + +#define QDF_MEM_FUNC_NAME_SIZE 48 + +#ifdef MEMORY_DEBUG +/** + * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled + * + * Return: value of mem_debug_disabled qdf module argument + */ +bool qdf_mem_debug_config_get(void); + +/** + * qdf_mem_malloc_debug() - debug version of QDF memory allocation API + * @size: Number of bytes of memory to allocate. + * @func: Function name of the call site + * @line: Line number of the call site + * @caller: Address of the caller function + * @flag: GFP flag + * + * This function will dynamicallly allocate the specified number of bytes of + * memory and add it to the qdf tracking list to check for memory leaks and + * corruptions + * + * Return: A valid memory location on success, or NULL on failure + */ +void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line, + void *caller, uint32_t flag); + +#define qdf_mem_malloc(size) \ + qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0) + +#define qdf_mem_malloc_fl(size, func, line) \ + qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0) + +#define qdf_mem_malloc_atomic(size) \ + qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC) +/** + * qdf_mem_free_debug() - debug version of qdf_mem_free + * @ptr: Pointer to the starting address of the memory to be freed. + * + * This function will free the memory pointed to by 'ptr'. It also checks for + * memory corruption, underrun, overrun, double free, domain mismatch, etc. + * + * Return: none + */ +void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line); + +#define qdf_mem_free(ptr) \ + qdf_mem_free_debug(ptr, __func__, __LINE__) + +void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + size_t element_size, uint16_t element_num, + qdf_dma_context_t memctxt, bool cacheable, + const char *func, uint32_t line, + void *caller); + +#define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\ + memctxt, cacheable) \ + qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \ + element_num, memctxt, cacheable, \ + __func__, __LINE__, QDF_RET_IP) + +void qdf_mem_multi_pages_free_debug(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, bool cacheable, + const char *func, uint32_t line); + +#define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \ + qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \ + __func__, __LINE__) + +/** + * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty + * + * Call this to ensure there are no active memory allocations being tracked + * against the current debug domain. For example, one should call this function + * immediately before a call to qdf_debug_domain_set() as a memory leak + * detection mechanism. + * + * e.g. + * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE); + * + * ... + * + * // memory is allocated and freed + * + * ... + * + * // before transitioning back to inactive state, + * // make sure all active memory has been freed + * qdf_mem_check_for_leaks(); + * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT); + * + * ... + * + * // also, before program exit, make sure init time memory is freed + * qdf_mem_check_for_leaks(); + * exit(); + * + * Return: None + */ +void qdf_mem_check_for_leaks(void); + +/** + * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory + * @osdev: OS device handle + * @dev: Pointer to device handle + * @size: Size to be allocated + * @paddr: Physical address + * @func: Function name of the call site + * @line: line numbe rof the call site + * @caller: Address of the caller function + * + * Return: pointer of allocated memory or null if memory alloc fails + */ +void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr, + const char *func, uint32_t line, + void *caller); + +#define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \ + qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \ + __func__, __LINE__, QDF_RET_IP) + +/** + * qdf_mem_free_consistent_debug() - free consistent qdf memory + * @osdev: OS device handle + * @size: Size to be allocated + * @vaddr: virtual address + * @paddr: Physical address + * @memctx: Pointer to DMA context + * @func: Function name of the call site + * @line: line numbe rof the call site + * + * Return: none + */ +void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + const char *func, uint32_t line); + +#define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \ + qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \ + __func__, __LINE__) +#else +static inline bool qdf_mem_debug_config_get(void) +{ + return false; +} + +/** + * qdf_mem_malloc() - allocation QDF memory + * @size: Number of bytes of memory to allocate. + * + * This function will dynamicallly allocate the specified number of bytes of + * memory. + * + * Return: + * Upon successful allocate, returns a non-NULL pointer to the allocated + * memory. If this function is unable to allocate the amount of memory + * specified (for any reason) it returns NULL. + */ +#define qdf_mem_malloc(size) \ + __qdf_mem_malloc(size, __func__, __LINE__) + +#define qdf_mem_malloc_fl(size, func, line) \ + __qdf_mem_malloc(size, func, line) + +/** + * qdf_mem_malloc_atomic() - allocation QDF memory atomically + * @size: Number of bytes of memory to allocate. + * + * This function will dynamicallly allocate the specified number of bytes of + * memory. + * + * Return: + * Upon successful allocate, returns a non-NULL pointer to the allocated + * memory. If this function is unable to allocate the amount of memory + * specified (for any reason) it returns NULL. + */ +#define qdf_mem_malloc_atomic(size) \ + qdf_mem_malloc_atomic_fl(size, __func__, __LINE__) + +void *qdf_mem_malloc_atomic_fl(qdf_size_t size, + const char *func, + uint32_t line); + +#define qdf_mem_free(ptr) \ + __qdf_mem_free(ptr) + +static inline void qdf_mem_check_for_leaks(void) { } + +#define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \ + __qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__) + +#define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \ + __qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) + +void qdf_mem_multi_pages_alloc(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + size_t element_size, uint16_t element_num, + qdf_dma_context_t memctxt, bool cacheable); + +void qdf_mem_multi_pages_free(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, bool cacheable); + +#endif /* MEMORY_DEBUG */ + +/** + * qdf_mem_multi_pages_zero() - zero out each page memory + * @pages: Multi page information storage + * @cacheable: Coherent memory or cacheable memory + * + * This function will zero out each page memory + * + * Return: None + */ +void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages, + bool cacheable); + +/** + * qdf_aligned_malloc() - allocates aligned QDF memory. + * @size: Size to be allocated + * @vaddr_unaligned: Unaligned virtual address. + * @paddr_unaligned: Unaligned physical address. + * @paddr_aligned: Aligned physical address. + * @align: Base address alignment. + * @func: Function name of the call site. + * @line: Line number of the call site. + * + * This function will dynamically allocate the specified number of bytes of + * memory. Checks if the allocated base address is aligned with base_align. + * If not, it frees the allocated memory, adds base_align to alloc size and + * re-allocates the memory. + * + * Return: + * Upon successful allocate, returns an aligned base address of the allocated + * memory. If this function is unable to allocate the amount of memory + * specified (for any reason) it returns NULL. + */ +#define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \ + paddr_aligned, align) \ + qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \ + paddr_aligned, align, __func__, __LINE__) + +void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned, + qdf_dma_addr_t *paddr_unaligned, + qdf_dma_addr_t *paddr_aligned, + uint32_t align, + const char *func, uint32_t line); + +/** + * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory + * @osdev: OS device handle + * @size: Size to be allocated + * @vaddr_unaligned: Unaligned virtual address. + * @paddr_unaligned: Unaligned physical address. + * @paddr_aligned: Aligned physical address. + * @align: Base address alignment. + * @func: Function name of the call site. + * @line: Line number of the call site. + * + * Return: pointer of allocated memory or null if memory alloc fails. + */ +#define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \ + paddr_unaligned, paddr_aligned, \ + align) \ + qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \ + paddr_unaligned, paddr_aligned, \ + align, __func__, __LINE__) + +void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size, + void **vaddr_unaligned, + qdf_dma_addr_t *paddr_unaligned, + qdf_dma_addr_t *paddr_aligned, + uint32_t align, const char *func, + uint32_t line); + +#define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr) + +void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value); + +void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, + uint32_t num_bytes); + +/** + * qdf_mem_set() - set (fill) memory with a specified byte value. + * @ptr: Pointer to memory that will be set + * @num_bytes: Number of bytes to be set + * @value: Byte set in memory + * + * WARNING: parameter @num_bytes and @value are swapped comparing with + * standard C function "memset", please ensure correct usage of this function! + * + * Return: None + */ +void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value); + +/** + * qdf_mem_zero() - zero out memory + * @ptr: pointer to memory that will be set to zero + * @num_bytes: number of bytes zero + * + * This function sets the memory location to all zeros, essentially clearing + * the memory. + * + * Return: None + */ +static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes) +{ + qdf_mem_set(ptr, num_bytes, 0); +} + +/** + * qdf_mem_copy() - copy memory + * @dst_addr: Pointer to destination memory location (to copy to) + * @src_addr: Pointer to source memory location (to copy from) + * @num_bytes: Number of bytes to copy. + * + * Copy host memory from one location to another, similar to memcpy in + * standard C. Note this function does not specifically handle overlapping + * source and destination memory locations. Calling this function with + * overlapping source and destination memory locations will result in + * unpredictable results. Use qdf_mem_move() if the memory locations + * for the source and destination are overlapping (or could be overlapping!) + * + * Return: none + */ +void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes); + +/** + * qdf_mem_move() - move memory + * @dst_addr: pointer to destination memory location (to move to) + * @src_addr: pointer to source memory location (to move from) + * @num_bytes: number of bytes to move. + * + * Move host memory from one location to another, similar to memmove in + * standard C. Note this function *does* handle overlapping + * source and destination memory locations. + + * Return: None + */ +void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes); + +/** + * qdf_mem_cmp() - memory compare + * @left: pointer to one location in memory to compare + * @right: pointer to second location in memory to compare + * @size: the number of bytes to compare + * + * Function to compare two pieces of memory, similar to memcmp function + * in standard C. + * + * Return: + * 0 -- equal + * < 0 -- *memory1 is less than *memory2 + * > 0 -- *memory1 is bigger than *memory2 + */ +int qdf_mem_cmp(const void *left, const void *right, size_t size); + +void qdf_ether_addr_copy(void *dst_addr, const void *src_addr); + +/** + * qdf_mem_map_nbytes_single - Map memory for DMA + * @osdev: pomter OS device context + * @buf: pointer to memory to be dma mapped + * @dir: DMA map direction + * @nbytes: number of bytes to be mapped. + * @phy_addr: ponter to recive physical address. + * + * Return: success/failure + */ +static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf, + qdf_dma_dir_t dir, int nbytes, + qdf_dma_addr_t *phy_addr) +{ +#if defined(HIF_PCI) || defined(HIF_IPCI) + return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr); +#else + return 0; +#endif +} + +static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev, + qdf_dma_addr_t buf, + qdf_dma_dir_t dir, + int nbytes) +{ + __qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes); +} + +/** + * qdf_mem_unmap_nbytes_single() - un_map memory for DMA + * @osdev: pomter OS device context + * @phy_addr: physical address of memory to be dma unmapped + * @dir: DMA unmap direction + * @nbytes: number of bytes to be unmapped. + * + * Return: none + */ +static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev, + qdf_dma_addr_t phy_addr, + qdf_dma_dir_t dir, + int nbytes) +{ +#if defined(HIF_PCI) || defined(HIF_IPCI) + __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes); +#endif +} + +/** + * qdf_mempool_init - Create and initialize memory pool + * @osdev: platform device object + * @pool_addr: address of the pool created + * @elem_cnt: no. of elements in pool + * @elem_size: size of each pool element in bytes + * @flags: flags + * Return: Handle to memory pool or NULL if allocation failed + */ +static inline int qdf_mempool_init(qdf_device_t osdev, + qdf_mempool_t *pool_addr, int elem_cnt, + size_t elem_size, uint32_t flags) +{ + return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size, + flags); +} + +/** + * qdf_mempool_destroy - Destroy memory pool + * @osdev: platform device object + * @Handle: to memory pool + * Return: none + */ +static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool) +{ + __qdf_mempool_destroy(osdev, pool); +} + +/** + * qdf_mempool_alloc - Allocate an element memory pool + * @osdev: platform device object + * @Handle: to memory pool + * Return: Pointer to the allocated element or NULL if the pool is empty + */ +static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool) +{ + return (void *)__qdf_mempool_alloc(osdev, pool); +} + +/** + * qdf_mempool_free - Free a memory pool element + * @osdev: Platform device object + * @pool: Handle to memory pool + * @buf: Element to be freed + * Return: none + */ +static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool, + void *buf) +{ + __qdf_mempool_free(osdev, pool, buf); +} + +void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + __dma_data_direction direction); + +void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + __dma_data_direction direction); + +int qdf_mem_multi_page_link(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + uint32_t elem_size, uint32_t elem_count, uint8_t cacheable); + +#ifdef WLAN_DEBUGFS + +/** + * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count + * @size: number of bytes to increment by + * + * Return: None + */ +void qdf_mem_kmalloc_inc(qdf_size_t size); + +/** + * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count + * @size: number of bytes to decrement by + * + * Return: None + */ +void qdf_mem_kmalloc_dec(qdf_size_t size); + +#else + +static inline void qdf_mem_kmalloc_inc(qdf_size_t size) { } +static inline void qdf_mem_kmalloc_dec(qdf_size_t size) { } + +#endif /* WLAN_DEBUGFS */ + +/** + * qdf_mem_skb_inc() - increment total skb allocation size + * @size: size to be added + * + * Return: none + */ +void qdf_mem_skb_inc(qdf_size_t size); + +/** + * qdf_mem_skb_dec() - decrement total skb allocation size + * @size: size to be decremented + * + * Return: none + */ +void qdf_mem_skb_dec(qdf_size_t size); + +/** + * qdf_mem_map_table_alloc() - Allocate shared memory info structure + * @num: number of required storage + * + * Allocate mapping table for DMA memory allocation. This is needed for + * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled. + * + * Return: shared memory info storage table pointer + */ +static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num) +{ + qdf_mem_info_t *mem_info_arr; + + mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0])); + return mem_info_arr; +} + +/** + * qdf_update_mem_map_table() - Update DMA memory map info + * @osdev: Parent device instance + * @mem_info: Pointer to shared memory information + * @dma_addr: dma address + * @mem_size: memory size allocated + * + * Store DMA shared memory information + * + * Return: none + */ +static inline void qdf_update_mem_map_table(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_addr, + uint32_t mem_size) +{ + if (!mem_info) { + qdf_nofl_err("%s: NULL mem_info", __func__); + return; + } + + __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size); +} + +/** + * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status + * @osdev parent device instance + * + * Return: true if smmu s1 enabled, false if smmu s1 is bypassed + */ +static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev) +{ + return __qdf_mem_smmu_s1_enabled(osdev); +} + +/** + * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address + * @osdev: Parent device instance + * @dma_addr: DMA/IOVA address + * + * Get actual physical address from dma_addr based on SMMU enablement status. + * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address + * (IOVA) otherwise returns physical address. So get SMMU physical address + * mapping from IOVA. + * + * Return: dmaable physical address + */ +static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, + qdf_dma_addr_t dma_addr) +{ + return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); +} + +/** + * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table + * @dev: device instace + * @sgt: scatter gather table pointer + * @cpu_addr: HLOS virtual address + * @dma_addr: dma address + * @size: allocated memory size + * + * Return: physical address + */ +static inline int +qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, + qdf_dma_addr_t dma_addr, size_t size) +{ + return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +/** + * qdf_mem_free_sgtable() - Free a previously allocated sg table + * @sgt: the mapped sg table header + * + * Return: None + */ +static inline void +qdf_mem_free_sgtable(struct sg_table *sgt) +{ + __qdf_os_mem_free_sgtable(sgt); +} + +/** + * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements + * @sgt: scatter gather table pointer + * + * Return: None + */ +static inline void +qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt) +{ + __qdf_dma_get_sgtable_dma_addr(sgt); +} + +/** + * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status. + * @osdev: Parent device instance + * @mem_info: Pointer to allocated memory information + * + * Get dma address based on SMMU enablement status. If SMMU Stage 1 + * tranlation is enabled, DMA APIs return IO virtual address otherwise + * returns physical address. + * + * Return: dma address + */ +static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_addr(osdev, mem_info); +} + +/** + * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct + * @osdev: Parent device instance + * @mem_info: Pointer to allocated memory information + * + * Based on smmu stage 1 translation enablement, return corresponding dma + * address storage pointer. + * + * Return: dma address storage pointer + */ +static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_addr_ptr(osdev, mem_info); +} + + +/** + * qdf_mem_get_dma_size() - Return DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA memory size + */ +static inline uint32_t +qdf_mem_get_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_size(osdev, mem_info); +} + +/** + * qdf_mem_set_dma_size() - Set DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @mem_size: memory size allocated + * + * Return: none + */ +static inline void +qdf_mem_set_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + uint32_t mem_size) +{ + __qdf_mem_set_dma_size(osdev, mem_info, mem_size); +} + +/** + * qdf_mem_get_dma_size() - Return DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA physical address + */ +static inline qdf_dma_addr_t +qdf_mem_get_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_pa(osdev, mem_info); +} + +/** + * qdf_mem_set_dma_size() - Set DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @dma_pa: DMA phsical address + * + * Return: none + */ +static inline void +qdf_mem_set_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_pa) +{ + __qdf_mem_set_dma_pa(osdev, mem_info, dma_pa); +} + +/** + * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @size: size to be allocated + * + * Allocate DMA memory which will be shared with external kernel module. This + * information is needed for SMMU mapping. + * + * Return: 0 success + */ +qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size); + +/** + * qdf_mem_shared_mem_free() - Free shared memory + * @osdev: parent device instance + * @shared_mem: shared memory information storage + * + * Free DMA shared memory resource + * + * Return: None + */ +static inline void qdf_mem_shared_mem_free(qdf_device_t osdev, + qdf_shared_mem_t *shared_mem) +{ + if (!shared_mem) { + qdf_nofl_err("%s: NULL shared mem struct passed", + __func__); + return; + } + + if (shared_mem->vaddr) { + qdf_mem_free_consistent(osdev, osdev->dev, + qdf_mem_get_dma_size(osdev, + &shared_mem->mem_info), + shared_mem->vaddr, + qdf_mem_get_dma_addr(osdev, + &shared_mem->mem_info), + qdf_get_dma_mem_context(shared_mem, + memctx)); + } + qdf_mem_free_sgtable(&shared_mem->sgtable); + qdf_mem_free(shared_mem); +} + +#endif /* __QDF_MEMORY_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_module.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_module.h new file mode 100644 index 0000000000000000000000000000000000000000..4ff235bb6a6315be7a5963558889bdec11cf4df9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_module.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file qdf_module.h + * This file abstracts "kernel module" semantics. + */ + +#ifndef _QDF_MODULE_H +#define _QDF_MODULE_H + +#include + +typedef uint32_t (*module_init_func_t)(void); + +/** + * qdf_virt_module_init - Specify the module's entry point. + */ +#define qdf_virt_module_init(_mod_init_func) \ + __qdf_virt_module_init(_mod_init_func) + +/** + * qdf_virt_module_exit - Specify the module's exit point. + */ +#define qdf_virt_module_exit(_mod_exit_func) \ + __qdf_virt_module_exit(_mod_exit_func) + +/** + * qdf_virt_module_name - Specify the module's name. + */ +#define qdf_virt_module_name(_name) __qdf_virt_module_name(_name) + + +/** + * qdf_export_symbol - Export a symbol from a module. + */ +#define qdf_export_symbol(_sym) __qdf_export_symbol(_sym) + +/** + * qdf_declare_param - Declare a module parameter. + */ +#define qdf_declare_param(name, _type) __qdf_declare_param(name, _type) + +/** + * qdf_declare_param_array - Declare a module parameter. + */ +#define qdf_declare_param_array(name, _type, _num) \ + __qdf_declare_param_array(name, _type, _num) + +#endif /*_QDF_MODULE_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_nbuf.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_nbuf.h new file mode 100644 index 0000000000000000000000000000000000000000..e4aa99db54f8b0ba990ead38814726e9655e1690 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_nbuf.h @@ -0,0 +1,3633 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_nbuf_public network buffer API + * This file defines the network buffer abstraction. + */ + +#ifndef _QDF_NBUF_H +#define _QDF_NBUF_H + +#include +#include +#include +#include +#include +#include + +#define IPA_NBUF_OWNER_ID 0xaa55aa55 +#define QDF_NBUF_PKT_TRAC_TYPE_DNS 0x01 +#define QDF_NBUF_PKT_TRAC_TYPE_EAPOL 0x02 +#define QDF_NBUF_PKT_TRAC_TYPE_DHCP 0x04 +#define QDF_NBUF_PKT_TRAC_TYPE_MGMT_ACTION 0x08 +#define QDF_NBUF_PKT_TRAC_TYPE_ARP 0x10 +#define QDF_NBUF_PKT_TRAC_TYPE_ICMP 0x20 +#define QDF_NBUF_PKT_TRAC_TYPE_ICMPv6 0x40 +#define QDF_HL_CREDIT_TRACKING 0x80 + +#define QDF_NBUF_PKT_TRAC_MAX_STRING 12 +#define QDF_NBUF_PKT_TRAC_PROTO_STRING 4 +#define QDF_NBUF_PKT_ERROR 1 + +#define QDF_NBUF_TRAC_IPV4_OFFSET 14 +#define QDF_NBUF_TRAC_IPV4_HEADER_MASK 0xF +#define QDF_NBUF_TRAC_IPV4_HEADER_SIZE 20 +#define QDF_NBUF_TRAC_DHCP_SRV_PORT 67 +#define QDF_NBUF_TRAC_DHCP_CLI_PORT 68 +#define QDF_NBUF_TRAC_ETH_TYPE_OFFSET 12 +#define QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET 16 +#define QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET 20 +#define QDF_NBUF_TRAC_EAPOL_ETH_TYPE 0x888E +#define QDF_NBUF_TRAC_WAPI_ETH_TYPE 0x88b4 +#define QDF_NBUF_TRAC_ARP_ETH_TYPE 0x0806 +#define QDF_NBUF_PKT_IPV4_DSCP_MASK 0xFC +#define QDF_NBUF_PKT_IPV4_DSCP_SHIFT 0x02 +#define QDF_NBUF_TRAC_TDLS_ETH_TYPE 0x890D +#define QDF_NBUF_TRAC_IPV4_ETH_TYPE 0x0800 +#define QDF_NBUF_TRAC_IPV6_ETH_TYPE 0x86dd +#define QDF_NBUF_DEST_MAC_OFFSET 0 +#define QDF_NBUF_SRC_MAC_OFFSET 6 +#define QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET 23 +#define QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET 30 +#define QDF_NBUF_TRAC_IPV4_SRC_ADDR_OFFSET 26 +#define QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET 20 +#define QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK 0xE0000000 +#define QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK 0xF0000000 +#define QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET 38 +#define QDF_NBUF_TRAC_IPV6_DEST_ADDR 0xFF00 +#define QDF_NBUF_TRAC_IPV6_OFFSET 14 +#define QDF_NBUF_TRAC_IPV6_HEADER_SIZE 40 +#define QDF_NBUF_TRAC_ICMP_TYPE 1 +#define QDF_NBUF_TRAC_TCP_TYPE 6 +#define QDF_NBUF_TRAC_TCP_FLAGS_OFFSET (47 - 34) +#define QDF_NBUF_TRAC_TCP_ACK_OFFSET (42 - 34) +#define QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET (46 - 34) +#define QDF_NBUF_TRAC_TCP_ACK_MASK 0x10 +#define QDF_NBUF_TRAC_TCP_SPORT_OFFSET (34 - 34) +#define QDF_NBUF_TRAC_TCP_DPORT_OFFSET (36 - 34) +#define QDF_NBUF_TRAC_UDP_TYPE 17 +#define QDF_NBUF_TRAC_ICMPV6_TYPE 0x3a +#define QDF_NBUF_TRAC_DHCP6_SRV_PORT 547 +#define QDF_NBUF_TRAC_DHCP6_CLI_PORT 546 +#define QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT 5353 +#define QDF_NBUF_TRAC_IP_OFFSET 14 +#define QDF_NBUF_TRAC_VLAN_IP_OFFSET 18 +#define QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET 22 +/* One dword for IPv4 header size unit */ +#define QDF_NBUF_IPV4_HDR_SIZE_UNIT 4 + +/* EAPOL Related MASK */ +#define EAPOL_PACKET_TYPE_OFFSET 15 +#define EAPOL_KEY_INFO_OFFSET 19 +#define EAPOL_PKT_LEN_OFFSET 16 +#define EAPOL_KEY_LEN_OFFSET 21 +#define EAPOL_MASK 0x8013 +#define EAPOL_M1_BIT_MASK 0x8000 +#define EAPOL_M2_BIT_MASK 0x0001 +#define EAPOL_M3_BIT_MASK 0x8013 +#define EAPOL_M4_BIT_MASK 0x0003 + +/* ARP Related MASK */ +#define QDF_NBUF_PKT_ARP_OPCODE_OFFSET 20 +#define QDF_NBUF_PKT_ARPOP_REQ 1 +#define QDF_NBUF_PKT_ARPOP_REPLY 2 +#define QDF_NBUF_PKT_ARP_SRC_IP_OFFSET 28 +#define QDF_NBUF_PKT_ARP_TGT_IP_OFFSET 38 + +/* ICMPv4 Related MASK */ +#define QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET 34 +#define QDF_NBUF_PKT_ICMPv4OP_REQ 0x08 +#define QDF_NBUF_PKT_ICMPv4OP_REPLY 0x00 +#define QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET 26 +#define QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET 30 + +/* TCP Related MASK */ +#define QDF_NBUF_PKT_TCP_OPCODE_OFFSET 47 +#define QDF_NBUF_PKT_TCPOP_SYN 0x02 +#define QDF_NBUF_PKT_TCPOP_SYN_ACK 0x12 +#define QDF_NBUF_PKT_TCPOP_ACK 0x10 +#define QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET 34 +#define QDF_NBUF_PKT_TCP_DST_PORT_OFFSET 36 + +/* DNS Related MASK */ +#define QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET 44 +#define QDF_NBUF_PKT_DNSOP_BITMAP 0xF800 +#define QDF_NBUF_PKT_DNSOP_STANDARD_QUERY 0x0000 +#define QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE 0x8000 +#define QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET 34 +#define QDF_NBUF_PKT_DNS_DST_PORT_OFFSET 36 +#define QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET 54 +#define QDF_NBUF_PKT_DNS_STANDARD_PORT 53 + +/* Tracked Packet types */ +#define QDF_NBUF_TX_PKT_INVALID 0 +#define QDF_NBUF_TX_PKT_DATA_TRACK 1 +#define QDF_NBUF_TX_PKT_MGMT_TRACK 2 +#define QDF_NBUF_RX_PKT_DATA_TRACK 3 + +/* Different Packet states */ +#define QDF_NBUF_TX_PKT_HDD 1 +#define QDF_NBUF_TX_PKT_TXRX_ENQUEUE 2 +#define QDF_NBUF_TX_PKT_TXRX_DEQUEUE 3 +#define QDF_NBUF_TX_PKT_TXRX 4 +#define QDF_NBUF_TX_PKT_HTT 5 +#define QDF_NBUF_TX_PKT_HTC 6 +#define QDF_NBUF_TX_PKT_HIF 7 +#define QDF_NBUF_TX_PKT_CE 8 +#define QDF_NBUF_TX_PKT_FREE 9 +#define QDF_NBUF_TX_PKT_STATE_MAX 10 +#define QDF_NBUF_TX_PKT_LI_DP 11 + +/* qdf_nbuf allocate and map max retry threshold when failed */ +#define QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD 20 + +/* Enable flag to print TSO specific prints in datapath */ +#ifdef TSO_DEBUG_LOG_ENABLE +#define TSO_DEBUG(fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_NONE, \ + fmt, ## args) +#else +#define TSO_DEBUG(fmt, args ...) +#endif + +#define IEEE80211_AMPDU_FLAG 0x01 + +#ifdef GET_MSDU_AGGREGATION +#define IEEE80211_AMSDU_FLAG 0x02 +#endif + +#define MAX_CHAIN 8 +#define QDF_MON_STATUS_MPDU_FCS_BMAP_NWORDS 8 + +/** + * struct mon_rx_status - This will have monitor mode rx_status extracted from + * htt_rx_desc used later to update radiotap information. + * @tsft: Time Synchronization Function timer + * @ppdu_timestamp: Timestamp in the PPDU_START TLV + * @preamble_type: Preamble type in radio header + * @chan_freq: Capture channel frequency + * @chan_num: Capture channel number + * @chan_flags: Bitmap of Channel flags, IEEE80211_CHAN_TURBO, + * IEEE80211_CHAN_CCK... + * @ht_flags: HT flags, only present for HT frames. + * @vht_flags: VHT flags, only present for VHT frames. + * @vht_flag_values1-5: Contains corresponding data for flags field + * @he_flags: HE (11ax) flags, only present in HE frames + * @he_mu_flags: HE-MU (11ax) flags, only present in HE frames + * @he_mu_other_flags: HE-MU-OTHER (11ax) flags, only present in HE frames + * @he_sig_A1_known: HE (11ax) sig A1 known field + * @he_sig_A2_known: HE (11ax) sig A2 known field + * @he_sig_b_common: HE (11ax) sig B common field + * @he_sig_b_common_known: HE (11ax) sig B common known field + * @l_sig_a_info: L_SIG_A value coming in Rx descriptor + * @l_sig_b_info: L_SIG_B value coming in Rx descriptor + * @rate: Rate in terms 500Kbps + * @rtap_flags: Bit map of available fields in the radiotap + * @ant_signal_db: Rx packet RSSI + * @nr_ant: Number of Antennas used for streaming + * @mcs: MCS index of Rx frame + * @ht_mcs: MCS index for HT RX frames + * @nss: Number of spatial streams + * @bw: bandwidth of rx frame + * @is_stbc: Is STBC enabled + * @sgi: Rx frame short guard interval + * @he_re: HE range extension + * @ldpc: ldpc enabled + * @beamformed: Is frame beamformed. + * @he_sig_b_common_RU[4]: HE (11ax) common RU assignment index + * @rssi_comb: Combined RSSI + * @rssi[MAX_CHAIN]: 8 bits RSSI per 20Mhz per chain + * @duration: 802.11 Duration + * @frame_control_info_valid: field indicates if fc value is valid + * @frame_control: frame control field + * @ast_index: AST table hash index + * @tid: QoS traffic tid number + * @rs_fcs_err: FCS error flag + * @rs_flags: Flags to indicate AMPDU or AMSDU aggregation + * @cck_flag: Flag to indicate CCK modulation + * @ofdm_flag: Flag to indicate OFDM modulation + * @ulofdma_flag: Flag to indicate UL OFDMA PPDU + * @he_per_user_1: HE per user info1 + * @he_per_user_2: HE per user info2 + * @he_per_user_position: HE per user position info + * @he_per_user_known: HE per user known info + * @he_flags1: HE flags + * @he_flags2: HE flags + * @he_RU[4]: HE RU assignment index + * @he_data1: HE property of received frame + * @he_data2: HE property of received frame + * @he_data3: HE property of received frame + * @he_data4: HE property of received frame + * @he_data5: HE property of received frame + * @prev_ppdu_id: ppdu_id in previously received message + * @ppdu_id: Id of the PLCP protocol data unit + * + * The following variables are not coming from the TLVs. + * These variables are placeholders for passing information to update_radiotap + * function. + * @device_id: Device ID coming from sub-system (PCI, AHB etc..) + * @chan_noise_floor: Channel Noise Floor for the pdev + * @data_sequence_control_info_valid: field to indicate validity of seq control + * @first_data_seq_ctrl: Sequence ctrl field of first data frame + * @rxpcu_filter_pass: Flag which indicates whether RX packets are received in + * BSS mode(not in promisc mode) + * @rssi_chain: Rssi chain per nss per bw + */ +struct mon_rx_status { + uint64_t tsft; + uint32_t ppdu_timestamp; + uint32_t preamble_type; + qdf_freq_t chan_freq; + uint16_t chan_num; + uint16_t chan_flags; + uint16_t ht_flags; + uint16_t vht_flags; + uint16_t vht_flag_values6; + uint16_t he_flags; + uint16_t he_mu_flags; + uint16_t he_mu_other_flags; + uint16_t he_sig_A1_known; + uint16_t he_sig_A2_known; + uint16_t he_sig_b_common; + uint16_t he_sig_b_common_known; + uint32_t l_sig_a_info; + uint32_t l_sig_b_info; + uint8_t rate; + uint8_t rtap_flags; + uint8_t ant_signal_db; + uint8_t nr_ant; + uint8_t mcs; + uint8_t ht_mcs; + uint8_t nss; + uint16_t tcp_msdu_count; + uint16_t udp_msdu_count; + uint16_t other_msdu_count; + uint8_t bw; + uint8_t vht_flag_values1; + uint8_t vht_flag_values2; + uint8_t vht_flag_values3[4]; + uint8_t vht_flag_values4; + uint8_t vht_flag_values5; + uint8_t is_stbc; + uint8_t sgi; + uint8_t he_re; + uint8_t ldpc; + uint8_t beamformed; + uint8_t he_sig_b_common_RU[4]; + int8_t rssi_comb; + uint64_t rssi[MAX_CHAIN]; + uint8_t reception_type; + uint16_t duration; + uint8_t frame_control_info_valid; + uint16_t frame_control; + uint32_t ast_index; + uint32_t tid; + uint8_t rs_fcs_err; + uint8_t rs_flags; + uint8_t cck_flag; + uint8_t ofdm_flag; + uint8_t ulofdma_flag; + /* New HE radiotap fields */ + uint16_t he_per_user_1; + uint16_t he_per_user_2; + uint8_t he_per_user_position; + uint8_t he_per_user_known; + uint16_t he_flags1; + uint16_t he_flags2; + uint8_t he_RU[4]; + uint16_t he_data1; + uint16_t he_data2; + uint16_t he_data3; + uint16_t he_data4; + uint16_t he_data5; + uint16_t he_data6; + uint32_t ppdu_len; + uint32_t prev_ppdu_id; + uint32_t ppdu_id; + uint32_t device_id; + int16_t chan_noise_floor; + uint8_t monitor_direct_used; + uint8_t data_sequence_control_info_valid; + uint16_t first_data_seq_ctrl; + uint8_t ltf_size; + uint8_t rxpcu_filter_pass; + int8_t rssi_chain[8][8]; + uint32_t rx_antenna; +}; + +/** + * struct mon_rx_user_status - This will have monitor mode per user rx_status + * extracted from hardware TLV. + * @mcs: MCS index of Rx frame + * @nss: Number of spatial streams + * @mu_ul_info_valid: MU UL info below is valid + * @ofdma_ru_start_index: OFDMA RU start index + * @ofdma_ru_width: OFDMA total RU width + * @ofdma_ru_size: OFDMA RU size index + * @mu_ul_user_v0_word0: MU UL user info word 0 + * @mu_ul_user_v0_word1: MU UL user info word 1 + * @ast_index: AST table hash index + * @tid: QoS traffic tid number + * @tcp_msdu_count: tcp protocol msdu count + * @udp_msdu_count: udp protocol msdu count + * @other_msdu_count: other protocol msdu count + * @frame_control: frame control field + * @frame_control_info_valid: field indicates if fc value is valid + * @data_sequence_control_info_valid: field to indicate validity of seq control + * @first_data_seq_ctrl: Sequence ctrl field of first data frame + * @preamble_type: Preamble type in radio header + * @ht_flags: HT flags, only present for HT frames. + * @vht_flags: VHT flags, only present for VHT frames. + * @he_flags: HE (11ax) flags, only present in HE frames + * @rtap_flags: Bit map of available fields in the radiotap + * @rs_flags: Flags to indicate AMPDU or AMSDU aggregation + * @mpdu_cnt_fcs_ok: mpdu count received with fcs ok + * @mpdu_cnt_fcs_err: mpdu count received with fcs ok bitmap + * @mpdu_fcs_ok_bitmap: mpdu with fcs ok bitmap + * @mpdu_ok_byte_count: mpdu byte count with fcs ok + * @mpdu_err_byte_count: mpdu byte count with fcs err + */ +struct mon_rx_user_status { + uint32_t mcs:4, + nss:3, + mu_ul_info_valid:1, + ofdma_ru_start_index:7, + ofdma_ru_width:7, + ofdma_ru_size:8; + uint32_t mu_ul_user_v0_word0; + uint32_t mu_ul_user_v0_word1; + uint32_t ast_index; + uint32_t tid; + uint16_t tcp_msdu_count; + uint16_t udp_msdu_count; + uint16_t other_msdu_count; + uint16_t frame_control; + uint8_t frame_control_info_valid; + uint8_t data_sequence_control_info_valid; + uint16_t first_data_seq_ctrl; + uint32_t preamble_type; + uint16_t ht_flags; + uint16_t vht_flags; + uint16_t he_flags; + uint8_t rtap_flags; + uint8_t rs_flags; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; + uint32_t mpdu_fcs_ok_bitmap[QDF_MON_STATUS_MPDU_FCS_BMAP_NWORDS]; + uint32_t mpdu_ok_byte_count; + uint32_t mpdu_err_byte_count; +}; + +/** + * struct qdf_radiotap_vendor_ns - Vendor Namespace header as per + * Radiotap spec: https://www.radiotap.org/fields/Vendor%20Namespace.html + * @oui: Vendor OUI + * @selector: sub_namespace selector + * @skip_length: How many bytes of Vendor Namespace data that follows + */ +struct qdf_radiotap_vendor_ns { + uint8_t oui[3]; + uint8_t selector; + uint16_t skip_length; +} __attribute__((__packed__)); + +/** + * strcut qdf_radiotap_vendor_ns_ath - Combined QTI Vendor NS + * including the Radiotap specified Vendor Namespace header and + * QTI specific Vendor Namespace data + * @lsig: L_SIG_A (or L_SIG) + * @device_id: Device Identification + * @lsig_b: L_SIG_B + * @ppdu_start_timestamp: Timestamp from RX_PPDU_START TLV + */ +struct qdf_radiotap_vendor_ns_ath { + struct qdf_radiotap_vendor_ns hdr; + /* QTI specific data follows */ + uint32_t lsig; + uint32_t device_id; + uint32_t lsig_b; + uint32_t ppdu_start_timestamp; +} __attribute__((__packed__)); + +/* Masks for HE SIG known fields in mon_rx_status structure */ +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0 0x00000001 +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU1 0x00000002 +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU2 0x00000004 +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU3 0x00000008 +#define QDF_MON_STATUS_HE_SIG_B_USER_KNOWN_SIG_B_ALL 0x00fe0000 +#define QDF_MON_STATUS_HE_SIG_A1_HE_FORMAT_SU 0x00000000 +#define QDF_MON_STATUS_HE_SIG_A1_HE_FORMAT_EXT_SU 0x40000000 +#define QDF_MON_STATUS_HE_SIG_A1_HE_FORMAT_TRIG 0xc0000000 + +/* DHCP Related Mask */ +#define QDF_DHCP_OPTION53 (0x35) +#define QDF_DHCP_OPTION53_LENGTH (1) +#define QDF_DHCP_OPTION53_OFFSET (0x11A) +#define QDF_DHCP_OPTION53_LENGTH_OFFSET (0x11B) +#define QDF_DHCP_OPTION53_STATUS_OFFSET (0x11C) +#define DHCP_PKT_LEN_OFFSET 16 +#define DHCP_TRANSACTION_ID_OFFSET 46 +#define QDF_DHCP_DISCOVER (1) +#define QDF_DHCP_OFFER (2) +#define QDF_DHCP_REQUEST (3) +#define QDF_DHCP_DECLINE (4) +#define QDF_DHCP_ACK (5) +#define QDF_DHCP_NAK (6) +#define QDF_DHCP_RELEASE (7) +#define QDF_DHCP_INFORM (8) + +/* ARP Related Mask */ +#define ARP_SUB_TYPE_OFFSET 20 +#define ARP_REQUEST (1) +#define ARP_RESPONSE (2) + +/* IPV4 header fields offset values */ +#define IPV4_PKT_LEN_OFFSET 16 +#define IPV4_TCP_SEQ_NUM_OFFSET 38 +#define IPV4_SRC_ADDR_OFFSET 26 +#define IPV4_DST_ADDR_OFFSET 30 +#define IPV4_SRC_PORT_OFFSET 34 +#define IPV4_DST_PORT_OFFSET 36 + +/* IPV4 ICMP Related Mask */ +#define ICMP_SEQ_NUM_OFFSET 40 +#define ICMP_SUBTYPE_OFFSET 34 +#define ICMP_REQUEST 0x08 +#define ICMP_RESPONSE 0x00 + +#define IPV6_ADDR_STR "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\ + "%02x%02x:%02x%02x" + +/* IPV6 header fields offset values */ +#define IPV6_PKT_LEN_OFFSET 18 +#define IPV6_TCP_SEQ_NUM_OFFSET 58 +#define IPV6_SRC_ADDR_OFFSET 22 +#define IPV6_DST_ADDR_OFFSET 38 +#define IPV6_SRC_PORT_OFFSET 54 +#define IPV6_DST_PORT_OFFSET 56 + +/* IPV6 ICMPV6 Related Mask */ +#define ICMPV6_SEQ_NUM_OFFSET 60 +#define ICMPV6_SUBTYPE_OFFSET 54 +#define ICMPV6_REQUEST 0x80 +#define ICMPV6_RESPONSE 0x81 +#define ICMPV6_RS 0x85 +#define ICMPV6_RA 0x86 +#define ICMPV6_NS 0x87 +#define ICMPV6_NA 0x88 + +#define QDF_NBUF_IPA_CHECK_MASK 0x80000000 + +/* HE Radiotap data1 Mask */ +#define QDF_MON_STATUS_HE_SU_FORMAT_TYPE 0x0000 +#define QDF_MON_STATUS_HE_EXT_SU_FORMAT_TYPE 0x0001 +#define QDF_MON_STATUS_HE_MU_FORMAT_TYPE 0x0002 +#define QDF_MON_STATUS_HE_TRIG_FORMAT_TYPE 0x0003 + + +#define QDF_MON_STATUS_HE_BEAM_CHANGE_KNOWN 0x0008 +#define QDF_MON_STATUS_HE_DL_UL_KNOWN 0x0010 +#define QDF_MON_STATUS_HE_MCS_KNOWN 0x0020 +#define QDF_MON_STATUS_HE_DCM_KNOWN 0x0040 +#define QDF_MON_STATUS_HE_CODING_KNOWN 0x0080 +#define QDF_MON_STATUS_HE_LDPC_EXTRA_SYMBOL_KNOWN 0x0100 +#define QDF_MON_STATUS_HE_STBC_KNOWN 0x0200 +#define QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN 0x4000 +#define QDF_MON_STATUS_HE_DOPPLER_KNOWN 0x8000 +#define QDF_MON_STATUS_HE_BSS_COLOR_KNOWN 0x0004 + +/* HE Radiotap data2 Mask */ +#define QDF_MON_STATUS_HE_GI_KNOWN 0x0002 +#define QDF_MON_STATUS_TXBF_KNOWN 0x0010 +#define QDF_MON_STATUS_PE_DISAMBIGUITY_KNOWN 0x0020 +#define QDF_MON_STATUS_TXOP_KNOWN 0x0040 +#define QDF_MON_STATUS_LTF_SYMBOLS_KNOWN 0x0004 +#define QDF_MON_STATUS_PRE_FEC_PADDING_KNOWN 0x0008 +#define QDF_MON_STATUS_MIDABLE_PERIODICITY_KNOWN 0x0080 + +/* HE radiotap data3 shift values */ +#define QDF_MON_STATUS_BEAM_CHANGE_SHIFT 6 +#define QDF_MON_STATUS_DL_UL_SHIFT 7 +#define QDF_MON_STATUS_TRANSMIT_MCS_SHIFT 8 +#define QDF_MON_STATUS_DCM_SHIFT 12 +#define QDF_MON_STATUS_CODING_SHIFT 13 +#define QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT 14 +#define QDF_MON_STATUS_STBC_SHIFT 15 + +/* HE radiotap data4 shift values */ +#define QDF_MON_STATUS_STA_ID_SHIFT 4 + +/* HE radiotap data5 */ +#define QDF_MON_STATUS_GI_SHIFT 4 +#define QDF_MON_STATUS_HE_LTF_SIZE_SHIFT 6 +#define QDF_MON_STATUS_HE_LTF_SYM_SHIFT 8 +#define QDF_MON_STATUS_TXBF_SHIFT 14 +#define QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT 15 +#define QDF_MON_STATUS_PRE_FEC_PAD_SHIFT 12 + +/* HE radiotap data6 */ +#define QDF_MON_STATUS_DOPPLER_SHIFT 4 +#define QDF_MON_STATUS_TXOP_SHIFT 8 + +/* HE radiotap HE-MU flags1 */ +#define QDF_MON_STATUS_SIG_B_MCS_KNOWN 0x0010 +#define QDF_MON_STATUS_SIG_B_DCM_KNOWN 0x0040 +#define QDF_MON_STATUS_SIG_B_SYM_NUM_KNOWN 0x8000 +#define QDF_MON_STATUS_RU_0_KNOWN 0x0100 +#define QDF_MON_STATUS_RU_1_KNOWN 0x0200 +#define QDF_MON_STATUS_RU_2_KNOWN 0x0400 +#define QDF_MON_STATUS_RU_3_KNOWN 0x0800 +#define QDF_MON_STATUS_DCM_FLAG_1_SHIFT 5 +#define QDF_MON_STATUS_SPATIAL_REUSE_MU_KNOWN 0x0100 +#define QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_1_KNOWN 0x4000 + +/* HE radiotap HE-MU flags2 */ +#define QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT 3 +#define QDF_MON_STATUS_BW_KNOWN 0x0004 +#define QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT 4 +#define QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_KNOWN 0x0100 +#define QDF_MON_STATUS_NUM_SIG_B_FLAG_2_SHIFT 9 +#define QDF_MON_STATUS_LTF_FLAG_2_SYMBOLS_SHIFT 12 +#define QDF_MON_STATUS_LTF_KNOWN 0x8000 + +/* HE radiotap per_user_1 */ +#define QDF_MON_STATUS_STA_SPATIAL_SHIFT 11 +#define QDF_MON_STATUS_TXBF_SHIFT 14 +#define QDF_MON_STATUS_RESERVED_SET_TO_1_SHIFT 19 +#define QDF_MON_STATUS_STA_CODING_SHIFT 20 + +/* HE radiotap per_user_2 */ +#define QDF_MON_STATUS_STA_MCS_SHIFT 4 +#define QDF_MON_STATUS_STA_DCM_SHIFT 5 + +/* HE radiotap per user known */ +#define QDF_MON_STATUS_USER_FIELD_POSITION_KNOWN 0x01 +#define QDF_MON_STATUS_STA_ID_PER_USER_KNOWN 0x02 +#define QDF_MON_STATUS_STA_NSTS_KNOWN 0x04 +#define QDF_MON_STATUS_STA_TX_BF_KNOWN 0x08 +#define QDF_MON_STATUS_STA_SPATIAL_CONFIG_KNOWN 0x10 +#define QDF_MON_STATUS_STA_MCS_KNOWN 0x20 +#define QDF_MON_STATUS_STA_DCM_KNOWN 0x40 +#define QDF_MON_STATUS_STA_CODING_KNOWN 0x80 + +/** + * enum qdf_proto_type - protocol type + * @QDF_PROTO_TYPE_DHCP - DHCP + * @QDF_PROTO_TYPE_EAPOL - EAPOL + * @QDF_PROTO_TYPE_ARP - ARP + * @QDF_PROTO_TYPE_MGMT - MGMT + * @QDF_PROTO_TYPE_ICMP - ICMP + * @QDF_PROTO_TYPE_ICMPv6 - ICMPv6 + * @QDF_PROTO_TYPE_EVENT - EVENT + * @QDF_PROTO_TYPE_DNS - DNS + */ +enum qdf_proto_type { + QDF_PROTO_TYPE_DHCP, + QDF_PROTO_TYPE_EAPOL, + QDF_PROTO_TYPE_ARP, + QDF_PROTO_TYPE_MGMT, + QDF_PROTO_TYPE_ICMP, + QDF_PROTO_TYPE_ICMPv6, + QDF_PROTO_TYPE_EVENT, + QDF_PROTO_TYPE_DNS, + QDF_PROTO_TYPE_MAX +}; + +/** + * qdf_reception_type - reception type used by lithium phy TLV + * @QDF_RECEPTION_TYPE_ULOFMDA - UL OFDMA + * @QDF_RECEPTION_TYPE_ULMIMO - UL MIMO + * @QQDF_RECEPTION_TYPE_FRAMELESS - Frame less + * @QDF_RECEPTION_TYPE_OTHER - All the other types + */ +enum qdf_reception_type { + QDF_RECEPTION_TYPE_ULOFMDA, + QDF_RECEPTION_TYPE_ULMIMO, + QDF_RECEPTION_TYPE_OTHER, + QDF_RECEPTION_TYPE_FRAMELESS +}; + +/** + * cb_ftype - Frame type information in skb cb + * @CB_FTYPE_INVALID - Invalid + * @CB_FTYPE_MCAST2UCAST - Multicast to Unicast converted packet + * @CB_FTYPE_TSO - TCP Segmentation Offload + * @CB_FTYPE_TSO_SG - TSO Scatter Gather + * @CB_FTYPE_SG - Scatter Gather + * @CB_FTYPE_INTRABSS_FWD - Intra BSS forwarding + * @CB_FTYPE_RX_INFO - Rx information + * @CB_FTYPE_MESH_RX_INFO - Mesh Rx information + * @CB_FTYPE_MESH_TX_INFO - Mesh Tx information + * @CB_FTYPE_DMS - Directed Multicast Service + */ +enum cb_ftype { + CB_FTYPE_INVALID = 0, + CB_FTYPE_MCAST2UCAST = 1, + CB_FTYPE_TSO = 2, + CB_FTYPE_TSO_SG = 3, + CB_FTYPE_SG = 4, + CB_FTYPE_INTRABSS_FWD = 5, + CB_FTYPE_RX_INFO = 6, + CB_FTYPE_MESH_RX_INFO = 7, + CB_FTYPE_MESH_TX_INFO = 8, + CB_FTYPE_DMS = 9, +}; + +/** + * @qdf_nbuf_t - Platform indepedent packet abstraction + */ +typedef __qdf_nbuf_t qdf_nbuf_t; + +/** + * typedef qdf_nbuf_queue_head_t - Platform indepedent nbuf queue head + */ +typedef __qdf_nbuf_queue_head_t qdf_nbuf_queue_head_t; + +/** + * @qdf_dma_map_cb_t - Dma map callback prototype + */ +typedef void (*qdf_dma_map_cb_t)(void *arg, qdf_nbuf_t buf, + qdf_dma_map_t dmap); + +/** + * @qdf_nbuf_queue_t - Platform independent packet queue abstraction + */ +typedef __qdf_nbuf_queue_t qdf_nbuf_queue_t; + +/* BUS/DMA mapping routines */ + +static inline QDF_STATUS +qdf_nbuf_dmamap_create(qdf_device_t osdev, qdf_dma_map_t *dmap) +{ + return __qdf_nbuf_dmamap_create(osdev, dmap); +} + +static inline void +qdf_nbuf_dmamap_destroy(qdf_device_t osdev, qdf_dma_map_t dmap) +{ + __qdf_nbuf_dmamap_destroy(osdev, dmap); +} + +static inline void +qdf_nbuf_dmamap_set_cb(qdf_dma_map_t dmap, qdf_dma_map_cb_t cb, void *arg) +{ + __qdf_nbuf_dmamap_set_cb(dmap, cb, arg); +} + +static inline void +qdf_nbuf_set_send_complete_flag(qdf_nbuf_t buf, bool flag) +{ + __qdf_nbuf_set_send_complete_flag(buf, flag); +} + +#define QDF_NBUF_QUEUE_WALK_SAFE(queue, var, tvar) \ + __qdf_nbuf_queue_walk_safe(queue, var, tvar) + +#ifdef NBUF_MAP_UNMAP_DEBUG +/** + * qdf_nbuf_map_check_for_leaks() - check for nbut map leaks + * + * Check for net buffers that have been mapped, but never unmapped. + * + * Returns: None + */ +void qdf_nbuf_map_check_for_leaks(void); + +QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line); + +#define qdf_nbuf_map(osdev, buf, dir) \ + qdf_nbuf_map_debug(osdev, buf, dir, __func__, __LINE__) + +void qdf_nbuf_unmap_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line); + +#define qdf_nbuf_unmap(osdev, buf, dir) \ + qdf_nbuf_unmap_debug(osdev, buf, dir, __func__, __LINE__) + +QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line); + +#define qdf_nbuf_map_single(osdev, buf, dir) \ + qdf_nbuf_map_single_debug(osdev, buf, dir, __func__, __LINE__) + +void qdf_nbuf_unmap_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line); + +#define qdf_nbuf_unmap_single(osdev, buf, dir) \ + qdf_nbuf_unmap_single_debug(osdev, buf, dir, __func__, __LINE__) + +QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line); + +#define qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes) \ + qdf_nbuf_map_nbytes_debug(osdev, buf, dir, nbytes, __func__, __LINE__) + +void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line); + +#define qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes) \ + qdf_nbuf_unmap_nbytes_debug(osdev, buf, dir, nbytes, __func__, __LINE__) + +QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line); + +#define qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes) \ + qdf_nbuf_map_nbytes_single_debug(osdev, buf, dir, nbytes, \ + __func__, __LINE__) + +void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line); + +#define qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes) \ + qdf_nbuf_unmap_nbytes_single_debug(osdev, buf, dir, nbytes, \ + __func__, __LINE__) + +#else /* NBUF_MAP_UNMAP_DEBUG */ + +static inline void qdf_nbuf_map_check_for_leaks(void) {} + +static inline QDF_STATUS +qdf_nbuf_map(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + return __qdf_nbuf_map(osdev, buf, dir); +} + +static inline void +qdf_nbuf_unmap(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + __qdf_nbuf_unmap(osdev, buf, dir); +} + +static inline QDF_STATUS +qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + return __qdf_nbuf_map_single(osdev, buf, dir); +} + +static inline void +qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + __qdf_nbuf_unmap_single(osdev, buf, dir); +} + +static inline QDF_STATUS +qdf_nbuf_map_nbytes(qdf_device_t osdev, qdf_nbuf_t buf, + qdf_dma_dir_t dir, int nbytes) +{ + return __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes); +} + +static inline void +qdf_nbuf_unmap_nbytes(qdf_device_t osdev, + qdf_nbuf_t buf, qdf_dma_dir_t dir, int nbytes) +{ + __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes); +} + +static inline QDF_STATUS +qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir, int nbytes) +{ + return __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes); +} + +static inline void +qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir, int nbytes) +{ + return __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes); +} +#endif /* NBUF_MAP_UNMAP_DEBUG */ + +/** + * qdf_nbuf_queue_head_dequeue() - dequeue nbuf from the head of queue + * @nbuf_queue_head: pointer to nbuf queue head + * + * Return: pointer to network buffer dequeued + */ +static inline +qdf_nbuf_t qdf_nbuf_queue_head_dequeue(qdf_nbuf_queue_head_t *nbuf_queue_head) +{ + return __qdf_nbuf_queue_head_dequeue(nbuf_queue_head); +} + +/** + * qdf_nbuf_queue_head_qlen() - length of the queue + * @nbuf_queue_head: pointer to nbuf queue head + * + * Return: length of queue (number of nbufs) pointed by qdf_nbuf_queue_head_t + */ +static inline +uint32_t qdf_nbuf_queue_head_qlen(qdf_nbuf_queue_head_t *nbuf_queue_head) +{ + return __qdf_nbuf_queue_head_qlen(nbuf_queue_head); +} + +/** + * qdf_nbuf_queue_head_enqueue_tail() - enqueue nbuf into queue tail + * @nbuf_queue_head: pointer to nbuf queue head + * @nbuf: nbuf to be enqueued + * + * Return: None + */ +static inline +void qdf_nbuf_queue_head_enqueue_tail(qdf_nbuf_queue_head_t *nbuf_queue_head, + qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_queue_head_enqueue_tail(nbuf_queue_head, nbuf); +} + +/** + * qdf_nbuf_queue_head_init() - initialize qdf_nbuf_queue_head_t + * @nbuf_queue_head: pointer to nbuf queue head to be initialized + * + * Return: None + */ +static inline +void qdf_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head) +{ + return __qdf_nbuf_queue_head_init(nbuf_queue_head); +} + +/** + * qdf_nbuf_queue_head_purge() - purge qdf_nbuf_queue_head_t + * @nbuf_queue_head: pointer to nbuf queue head to be purged + * + * Return: None + */ +static inline +void qdf_nbuf_queue_head_purge(qdf_nbuf_queue_head_t *nbuf_queue_head) +{ + return __qdf_nbuf_queue_head_purge(nbuf_queue_head); +} + +/** + * qdf_nbuf_queue_head_lock() - Acquire the nbuf_queue_head lock + * @head: nbuf_queue_head of the nbuf_list for which lock is to be acquired + * + * Return: void + */ +static inline void qdf_nbuf_queue_head_lock(qdf_nbuf_queue_head_t *head) +{ + __qdf_nbuf_queue_head_lock(head); +} + +/** + * qdf_nbuf_queue_head_unlock() - Release the nbuf queue lock + * @head: nbuf_queue_head of the nbuf_list for which lock is to be release + * + * Return: void + */ +static inline void qdf_nbuf_queue_head_unlock(qdf_nbuf_queue_head_t *head) +{ + __qdf_nbuf_queue_head_unlock(head); +} + +static inline void +qdf_nbuf_sync_for_cpu(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + __qdf_nbuf_sync_for_cpu(osdev, buf, dir); +} + +static inline int qdf_nbuf_get_num_frags(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_num_frags(buf); +} + +/** + * qdf_nbuf_get_frag_len() - get fragment length + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment length + */ +static inline int qdf_nbuf_get_frag_len(qdf_nbuf_t buf, int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_len(buf, frag_num); +} + +/** + * qdf_nbuf_get_frag_vaddr() - get fragment virtual address + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment virtual address + */ +static inline unsigned char *qdf_nbuf_get_frag_vaddr(qdf_nbuf_t buf, + int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_vaddr(buf, frag_num); +} + +/** + * qdf_nbuf_get_frag_vaddr_always() - get fragment virtual address + * @buf: Network buffer + * + * Return: Fragment virtual address + */ +static inline unsigned char * +qdf_nbuf_get_frag_vaddr_always(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_frag_vaddr_always(buf); +} + +/** + * qdf_nbuf_get_frag_paddr() - get physical address for skb linear buffer + * or skb fragment, based on frag_num passed + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment physical address + */ +static inline qdf_dma_addr_t qdf_nbuf_get_frag_paddr(qdf_nbuf_t buf, + unsigned int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_paddr(buf, frag_num); +} + +/** + * qdf_nbuf_get_tx_frag_paddr() - get physical address for skb fragments only + * @buf: Network buffer + * + * Return: Fragment physical address + * Usage guideline: Use “qdf_nbuf_frag_map()†to dma map the specific + * skb fragment , followed by “qdf_nbuf_get_tx_frag_paddr†+ */ +static inline qdf_dma_addr_t qdf_nbuf_get_tx_frag_paddr(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_frag_paddr(buf); +} + +/** + * qdf_nbuf_get_frag_is_wordstream() - is fragment wordstream + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment wordstream or not + */ +static inline int qdf_nbuf_get_frag_is_wordstream(qdf_nbuf_t buf, int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_is_wordstream(buf, frag_num); +} + +/** + * qdf_nbuf_set_frag_is_wordstream() - set fragment wordstream + * @buf: Network buffer + * @frag_num: Fragment number + * @is_wordstream: Wordstream + * + * Return: none + */ +static inline void +qdf_nbuf_set_frag_is_wordstream(qdf_nbuf_t buf, + int frag_num, int is_wordstream) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + __qdf_nbuf_set_frag_is_wordstream(buf, frag_num, is_wordstream); +} + +static inline void +qdf_nbuf_set_vdev_ctx(qdf_nbuf_t buf, uint8_t vdev_id) +{ + __qdf_nbuf_set_vdev_ctx(buf, vdev_id); +} + +static inline void +qdf_nbuf_set_tx_ftype(qdf_nbuf_t buf, enum cb_ftype type) +{ + __qdf_nbuf_set_tx_ftype(buf, type); +} + +static inline void +qdf_nbuf_set_rx_ftype(qdf_nbuf_t buf, enum cb_ftype type) +{ + __qdf_nbuf_set_rx_ftype(buf, type); +} + + + +static inline uint8_t +qdf_nbuf_get_vdev_ctx(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_vdev_ctx(buf); +} + +static inline enum cb_ftype qdf_nbuf_get_tx_ftype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_ftype(buf); +} + +static inline enum cb_ftype qdf_nbuf_get_rx_ftype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_rx_ftype(buf); +} + + +static inline qdf_dma_addr_t +qdf_nbuf_mapped_paddr_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_mapped_paddr_get(buf); +} + +static inline void +qdf_nbuf_mapped_paddr_set(qdf_nbuf_t buf, qdf_dma_addr_t paddr) +{ + __qdf_nbuf_mapped_paddr_set(buf, paddr); +} + +static inline void +qdf_nbuf_frag_push_head(qdf_nbuf_t buf, + int frag_len, char *frag_vaddr, + qdf_dma_addr_t frag_paddr) +{ + __qdf_nbuf_frag_push_head(buf, frag_len, frag_vaddr, frag_paddr); +} + +#define qdf_nbuf_num_frags_init(_nbuf) __qdf_nbuf_num_frags_init((_nbuf)) + +/** + * qdf_nbuf_set_rx_chfrag_start() - set msdu start bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_rx_chfrag_start(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_chfrag_start(buf, val); +} + +/** + * qdf_nbuf_is_rx_chfrag_start() - get msdu start bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_chfrag_start(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_chfrag_start(buf); +} + +/** + * qdf_nbuf_set_rx_chfrag_cont() - set msdu continuation bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_rx_chfrag_cont(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_chfrag_cont(buf, val); +} + +/** + * qdf_nbuf_is_rx_chfrag_cont() - get msdu continuation bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_chfrag_cont(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_chfrag_cont(buf); +} + +/** + * qdf_nbuf_set_rx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_rx_chfrag_end(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_chfrag_end(buf, val); +} + +/** + * qdf_nbuf_is_rx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_chfrag_end(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_chfrag_end(buf); +} + +/** + * qdf_nbuf_set_da_mcbc() - set da is mcbc + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_da_mcbc(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_da_mcbc(buf, val); +} + +/** + * qdf_nbuf_is_da_mcbc() - get da is mcbc bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_da_mcbc(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_da_mcbc(buf); +} + +/** + * qdf_nbuf_set_da_valid() - set da valid bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_da_valid(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_da_valid(buf, val); +} + +/** + * qdf_nbuf_is_da_valid() - get da valid bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_da_valid(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_da_valid(buf); +} + +/** + * qdf_nbuf_set_sa_valid() - set sa valid bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_sa_valid(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_sa_valid(buf, val); +} + +/** + * qdf_nbuf_is_sa_valid() - get da valid bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_sa_valid(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_sa_valid(buf); +} + +/** + * qdf_nbuf_set_rx_retry_flag() - set rx retry flag bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_rx_retry_flag(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_retry_flag(buf, val); +} + +/** + * qdf_nbuf_is_rx_retry_flag() - get rx retry flag bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_retry_flag(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_retry_flag(buf); +} + +/** + * qdf_nbuf_set_raw_frame() - set raw_frame bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_raw_frame(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_raw_frame(buf, val); +} + +/** + * qdf_nbuf_is_raw_frame() - get raw_frame bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_raw_frame(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_raw_frame(buf); +} + +/** + * qdf_nbuf_set_tid_val() - set tid_val + * @buf: Network buffer + * @val: 4 bits tid value + */ +static inline void qdf_nbuf_set_tid_val(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tid_val(buf, val); +} + +/** + * qdf_nbuf_get_tid_val() - get tid_val + * @buf: Network buffer + * + * Return: integer value[4 bits tid value] + */ +static inline uint8_t qdf_nbuf_get_tid_val(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tid_val(buf); +} + +/** + * qdf_nbuf_set_frag_list() - set frag list bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_is_frag(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_is_frag(buf, val); +} + +/** + * qdf_nbuf_is_sa_valid() - get da frag list bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_frag(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_frag(buf); +} + +/** + * qdf_nbuf_set_tx_chfrag_start() - set msdu start bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_tx_chfrag_start(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tx_chfrag_start(buf, val); +} + +/** + * qdf_nbuf_is_tx_chfrag_start() - get msdu start bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_tx_chfrag_start(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_tx_chfrag_start(buf); +} + +/** + * qdf_nbuf_set_tx_chfrag_cont() - set msdu continuation bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_tx_chfrag_cont(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tx_chfrag_cont(buf, val); +} + +/** + * qdf_nbuf_is_tx_chfrag_cont() - get msdu continuation bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_tx_chfrag_cont(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_tx_chfrag_cont(buf); +} + +/** + * qdf_nbuf_set_tx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_tx_chfrag_end(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tx_chfrag_end(buf, val); +} + +/** + * qdf_nbuf_is_tx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_tx_chfrag_end(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_tx_chfrag_end(buf); +} + +static inline void +qdf_nbuf_dma_map_info(qdf_dma_map_t bmap, qdf_dmamap_info_t *sg) +{ + __qdf_nbuf_dma_map_info(bmap, sg); +} + +/** + * qdf_nbuf_is_tso() - is the network buffer a jumbo packet? + * @buf: Network buffer + * + * Return: 1 - this is a jumbo packet 0 - not a jumbo packet + */ +static inline uint8_t qdf_nbuf_is_tso(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_is_tso(nbuf); +} + +/** + * qdf_nbuf_get_users() - function to get the number of users referencing this + * network buffer + * + * @nbuf: network buffer + * + * Return: number of user references to nbuf. + */ +static inline int qdf_nbuf_get_users(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_users(nbuf); +} + +/** + * qdf_nbuf_next() - get the next packet in the linked list + * @buf: Network buffer + * + * This function can be used when nbufs are directly linked into a list, + * rather than using a separate network buffer queue object. + * + * Return: next network buffer in the linked list + */ +static inline qdf_nbuf_t qdf_nbuf_next(qdf_nbuf_t buf) +{ + return __qdf_nbuf_next(buf); +} + +#ifdef NBUF_MEMORY_DEBUG +void qdf_net_buf_debug_init(void); +void qdf_net_buf_debug_exit(void); +void qdf_net_buf_debug_clean(void); +void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size, + const char *func_name, uint32_t line_num); +/** + * qdf_net_buf_debug_update_node() - update nbuf in debug hash table + * + * Return: none + */ +void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name, + uint32_t line_num); +void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf); + +/** + * qdf_net_buf_debug_update_map_node() - update nbuf in debug + * hash table with the mapping function info + * @nbuf: network buffer + * @func: function name that requests for mapping the nbuf + * @line_num: function line number + * + * Return: none + */ +void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num); + +/** + * qdf_net_buf_debug_update_unmap_node() - update nbuf in debug + * hash table with the unmap function info + * @nbuf: network buffer + * @func: function name that requests for unmapping the nbuf + * @line_num: function line number + * + * Return: none + */ +void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num); + +/** + * qdf_net_buf_debug_acquire_skb() - acquire skb to avoid memory leak + * @net_buf: Network buf holding head segment (single) + * @func_name: pointer to function name + * @line_num: line number + * + * WLAN driver module's SKB which are allocated by network stack are + * suppose to call this API before freeing it such that the SKB + * is not reported as memory leak. + * + * Return: none + */ +void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num); +void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf); + +/* nbuf allocation rouines */ + +#define qdf_nbuf_alloc(d, s, r, a, p) \ + qdf_nbuf_alloc_debug(d, s, r, a, p, __func__, __LINE__) + +qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size, + int reserve, int align, int prio, + const char *func, uint32_t line); + +#define qdf_nbuf_free(d) \ + qdf_nbuf_free_debug(d, __func__, __LINE__) + +void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line); + +#define qdf_nbuf_clone(buf) \ + qdf_nbuf_clone_debug(buf, __func__, __LINE__) + +/** + * qdf_nbuf_clone_debug() - clone the nbuf (copy is readonly) + * @buf: nbuf to clone from + * @func: name of the calling function + * @line: line number of the callsite + * + * This function clones the nbuf and creates a memory tracking + * node corresponding to that cloned skbuff structure. + * + * Return: cloned buffer + */ +qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, + uint32_t line); + +#define qdf_nbuf_copy(buf) \ + qdf_nbuf_copy_debug(buf, __func__, __LINE__) + +/** + * qdf_nbuf_copy_debug() - returns a private copy of the buf + * @buf: nbuf to copy from + * @func: name of the calling function + * @line: line number of the callsite + * + * This API returns a private copy of the buf, the buf returned is completely + * modifiable by callers. It also creates a memory tracking node corresponding + * to that new skbuff structure. + * + * Return: copied buffer + */ +qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line); + +#define qdf_nbuf_copy_expand(buf, headroom, tailroom) \ + qdf_nbuf_copy_expand_debug(buf, headroom, tailroom, __func__, __LINE__) + +/** + * qdf_nbuf_copy_expand_debug() - copy and expand nbuf + * @buf: Network buf instance + * @headroom: Additional headroom to be added + * @tailroom: Additional tailroom to be added + * @func: name of the calling function + * @line: line number of the callsite + * + * Return: New nbuf that is a copy of buf, with additional head and tailroom + * or NULL if there is no memory + */ +qdf_nbuf_t +qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom, + const char *func, uint32_t line); + +#else /* NBUF_MEMORY_DEBUG */ + +static inline void qdf_net_buf_debug_init(void) {} +static inline void qdf_net_buf_debug_exit(void) {} + +static inline void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num) +{ +} + +static inline void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf) +{ +} + +static inline void +qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name, + uint32_t line_num) +{ +} + +static inline void +qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num) +{ +} + +static inline void +qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num) +{ +} +/* Nbuf allocation rouines */ + +#define qdf_nbuf_alloc(osdev, size, reserve, align, prio) \ + qdf_nbuf_alloc_fl(osdev, size, reserve, align, prio, \ + __func__, __LINE__) +static inline qdf_nbuf_t +qdf_nbuf_alloc_fl(qdf_device_t osdev, qdf_size_t size, int reserve, int align, + int prio, const char *func, uint32_t line) +{ + return __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line); +} + +static inline void qdf_nbuf_free(qdf_nbuf_t buf) +{ + if (qdf_likely(buf)) + __qdf_nbuf_free(buf); +} + +/** + * qdf_nbuf_clone() - clone the nbuf (copy is readonly) + * @buf: Pointer to network buffer + * + * This function clones the nbuf and returns new sk_buff + * structure. + * + * Return: cloned skb + */ +static inline qdf_nbuf_t qdf_nbuf_clone(qdf_nbuf_t buf) +{ + return __qdf_nbuf_clone(buf); +} + +/** + * qdf_nbuf_copy() - returns a private copy of the buf + * @buf: Pointer to network buffer + * + * This API returns a private copy of the buf, the buf returned is completely + * modifiable by callers + * + * Return: skb or NULL + */ +static inline qdf_nbuf_t qdf_nbuf_copy(qdf_nbuf_t buf) +{ + return __qdf_nbuf_copy(buf); +} + +/** + * qdf_nbuf_copy_expand() - copy and expand nbuf + * @buf: Network buf instance + * @headroom: Additional headroom to be added + * @tailroom: Additional tailroom to be added + * + * Return: New nbuf that is a copy of buf, with additional head and tailroom + * or NULL if there is no memory + */ +static inline qdf_nbuf_t qdf_nbuf_copy_expand(qdf_nbuf_t buf, int headroom, + int tailroom) +{ + return __qdf_nbuf_copy_expand(buf, headroom, tailroom); +} +#endif /* NBUF_MEMORY_DEBUG */ + +#ifdef WLAN_FEATURE_FASTPATH +/** + * qdf_nbuf_init_fast() - before put buf into pool,turn it to init state + * + * @buf: buf instance + * Return: data pointer of this buf where new data has to be + * put, or NULL if there is not enough room in this buf. + */ +void qdf_nbuf_init_fast(qdf_nbuf_t nbuf); +#endif /* WLAN_FEATURE_FASTPATH */ + +/** + * @qdf_nbuf_list_free() - free a list of nbufs + * @buf_list: A list of nbufs to be freed + * + * Return: none + */ + +static inline void qdf_nbuf_list_free(qdf_nbuf_t buf_list) +{ + while (buf_list) { + qdf_nbuf_t next = qdf_nbuf_next(buf_list); + qdf_nbuf_free(buf_list); + buf_list = next; + } +} + +static inline void qdf_nbuf_tx_free(qdf_nbuf_t buf_list, int tx_err) +{ + qdf_nbuf_list_free(buf_list); +} + +static inline void qdf_nbuf_ref(qdf_nbuf_t buf) +{ + __qdf_nbuf_ref(buf); +} + +static inline int qdf_nbuf_shared(qdf_nbuf_t buf) +{ + return __qdf_nbuf_shared(buf); +} + +static inline QDF_STATUS qdf_nbuf_cat(qdf_nbuf_t dst, qdf_nbuf_t src) +{ + return __qdf_nbuf_cat(dst, src); +} + +/** + * @qdf_nbuf_copy_bits() - return the length of the copy bits for skb + * @skb: SKB pointer + * @offset: offset + * @len: Length + * @to: To + * + * Return: int32_t + */ +static inline int32_t +qdf_nbuf_copy_bits(qdf_nbuf_t nbuf, uint32_t offset, uint32_t len, void *to) +{ + return __qdf_nbuf_copy_bits(nbuf, offset, len, to); +} + + +/* nbuf manipulation routines */ + +/** + * @qdf_nbuf_head() - return the address of an nbuf's buffer + * @buf: netbuf + * + * Return: head address + */ +static inline uint8_t *qdf_nbuf_head(qdf_nbuf_t buf) +{ + return __qdf_nbuf_head(buf); +} + +/** + * qdf_nbuf_data() - Return the address of the start of data within an nbuf + * @buf: Network buffer + * + * Return: Data address + */ +static inline uint8_t *qdf_nbuf_data(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data(buf); +} + +/** + * qdf_nbuf_data_addr() - Return the address of skb->data + * @buf: Network buffer + * + * Return: Data address + */ +static inline uint8_t *qdf_nbuf_data_addr(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_addr(buf); +} + +/** + * qdf_nbuf_headroom() - amount of headroom int the current nbuf + * @buf: Network buffer + * + * Return: Amount of head room + */ +static inline uint32_t qdf_nbuf_headroom(qdf_nbuf_t buf) +{ + return __qdf_nbuf_headroom(buf); +} + +/** + * qdf_nbuf_tailroom() - amount of tail space available + * @buf: Network buffer + * + * Return: amount of tail room + */ +static inline uint32_t qdf_nbuf_tailroom(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tailroom(buf); +} + +/** + * qdf_nbuf_push_head() - push data in the front + * @buf: Network buf instance + * @size: Size to be pushed + * + * Return: New data pointer of this buf after data has been pushed, + * or NULL if there is not enough room in this buf. + */ +static inline uint8_t *qdf_nbuf_push_head(qdf_nbuf_t buf, qdf_size_t size) +{ + return __qdf_nbuf_push_head(buf, size); +} + +/** + * qdf_nbuf_put_tail() - puts data in the end + * @buf: Network buf instance + * @size: Size to be pushed + * + * Return: Data pointer of this buf where new data has to be + * put, or NULL if there is not enough room in this buf. + */ +static inline uint8_t *qdf_nbuf_put_tail(qdf_nbuf_t buf, qdf_size_t size) +{ + return __qdf_nbuf_put_tail(buf, size); +} + +/** + * qdf_nbuf_pull_head() - pull data out from the front + * @buf: Network buf instance + * @size: Size to be popped + * + * Return: New data pointer of this buf after data has been popped, + * or NULL if there is not sufficient data to pull. + */ +static inline uint8_t *qdf_nbuf_pull_head(qdf_nbuf_t buf, qdf_size_t size) +{ + return __qdf_nbuf_pull_head(buf, size); +} + +/** + * qdf_nbuf_trim_tail() - trim data out from the end + * @buf: Network buf instance + * @size: Size to be popped + * + * Return: none + */ +static inline void qdf_nbuf_trim_tail(qdf_nbuf_t buf, qdf_size_t size) +{ + __qdf_nbuf_trim_tail(buf, size); +} + +/** + * qdf_nbuf_len() - get the length of the buf + * @buf: Network buf instance + * + * Return: total length of this buf. + */ +static inline qdf_size_t qdf_nbuf_len(qdf_nbuf_t buf) +{ + return __qdf_nbuf_len(buf); +} + +/** + * qdf_nbuf_set_pktlen() - set the length of the buf + * @buf: Network buf instance + * @size: Size to be set + * + * Return: none + */ +static inline void qdf_nbuf_set_pktlen(qdf_nbuf_t buf, uint32_t len) +{ + __qdf_nbuf_set_pktlen(buf, len); +} + +/** + * qdf_nbuf_reserve() - trim data out from the end + * @buf: Network buf instance + * @size: Size to be popped + * + * Return: none + */ +static inline void qdf_nbuf_reserve(qdf_nbuf_t buf, qdf_size_t size) +{ + __qdf_nbuf_reserve(buf, size); +} + +/** + * qdf_nbuf_set_data_pointer() - set data pointer + * @buf: Network buf instance + * @data: data pointer + * + * Return: none + */ +static inline void qdf_nbuf_set_data_pointer(qdf_nbuf_t buf, uint8_t *data) +{ + __qdf_nbuf_set_data_pointer(buf, data); +} + +/** + * qdf_nbuf_set_len() - set data length + * @buf: Network buf instance + * @len: data length + * Return: none + */ +static inline void qdf_nbuf_set_len(qdf_nbuf_t buf, uint32_t len) +{ + __qdf_nbuf_set_len(buf, len); +} + +/** + * qdf_nbuf_set_tail_pointer() - set data tail pointer + * @buf: Network buf instance + * @len: length of data + * + * Return: none + */ +static inline void qdf_nbuf_set_tail_pointer(qdf_nbuf_t buf, int len) +{ + __qdf_nbuf_set_tail_pointer(buf, len); +} + +/** + * qdf_nbuf_unlink_no_lock() - unlink a nbuf from nbuf list + * @buf: Network buf instance + * @list: list to use + * + * This is a lockless version, driver must acquire locks if it + * needs to synchronize + * + * Return: none + */ +static inline void +qdf_nbuf_unlink_no_lock(qdf_nbuf_t buf, qdf_nbuf_queue_head_t *list) +{ + __qdf_nbuf_unlink_no_lock(buf, list); +} + +/** + * qdf_nbuf_reset() - reset the buffer data and pointer + * @buf: Network buf instance + * @reserve: reserve + * @align: align + * + * Return: none + */ +static inline void qdf_nbuf_reset(qdf_nbuf_t buf, int reserve, int align) +{ + __qdf_nbuf_reset(buf, reserve, align); +} + +/** + * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer + * in kernel + * + * Return: true if dev_scratch is supported + * false if dev_scratch is not supported + */ +static inline bool qdf_nbuf_is_dev_scratch_supported(void) +{ + return __qdf_nbuf_is_dev_scratch_supported(); +} + +/** + * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer + * @buf: Pointer to network buffer + * + * Return: dev_scratch if dev_scratch supported + * 0 if dev_scratch not supported + */ +static inline unsigned long qdf_nbuf_get_dev_scratch(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_dev_scratch(buf); +} + +/** + * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer + * @buf: Pointer to network buffer + * @value: value to be set in dev_scratch of network buffer + * + * Return: void + */ +static inline void qdf_nbuf_set_dev_scratch(qdf_nbuf_t buf, unsigned long value) +{ + __qdf_nbuf_set_dev_scratch(buf, value); +} + +/** + * qdf_nbuf_peek_header() - return the data pointer & length of the header + * @buf: Network nbuf + * @addr: Data pointer + * @len: Length of the data + * + * Return: none + */ +static inline void +qdf_nbuf_peek_header(qdf_nbuf_t buf, uint8_t **addr, uint32_t *len) +{ + __qdf_nbuf_peek_header(buf, addr, len); +} + +/* nbuf queue routines */ + +/** + * qdf_nbuf_queue_init() - initialize buf queue + * @head: Network buf queue head + * + * Return: none + */ +static inline void qdf_nbuf_queue_init(qdf_nbuf_queue_t *head) +{ + __qdf_nbuf_queue_init(head); +} + +/** + * qdf_nbuf_queue_add() - append a nbuf to the tail of the buf queue + * @head: Network buf queue head + * @buf: Network buf + * + * Return: none + */ +static inline void qdf_nbuf_queue_add(qdf_nbuf_queue_t *head, qdf_nbuf_t buf) +{ + __qdf_nbuf_queue_add(head, buf); +} + +/** + * qdf_nbuf_queue_insert_head() - insert nbuf at the head of queue + * @head: Network buf queue head + * @buf: Network buf + * + * Return: none + */ +static inline void +qdf_nbuf_queue_insert_head(qdf_nbuf_queue_t *head, qdf_nbuf_t buf) +{ + __qdf_nbuf_queue_insert_head(head, buf); +} + +/** + * qdf_nbuf_queue_remove() - retrieve a buf from the head of the buf queue + * @head: Network buf queue head + * + * Return: The head buf in the buf queue. + */ +static inline qdf_nbuf_t qdf_nbuf_queue_remove(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_remove(head); +} + +/** + * qdf_nbuf_queue_len() - get the length of the queue + * @head: Network buf queue head + * + * Return: length of the queue + */ +static inline uint32_t qdf_nbuf_queue_len(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_len(head); +} + +/** + * qdf_nbuf_queue_next() - get the next guy/packet of the given buffer + * @buf: Network buffer + * + * Return: next buffer/packet + */ +static inline qdf_nbuf_t qdf_nbuf_queue_next(qdf_nbuf_t buf) +{ + return __qdf_nbuf_queue_next(buf); +} + +/** + * @qdf_nbuf_is_queue_empty() - check if the buf queue is empty + * @nbq: Network buf queue handle + * + * Return: true if queue is empty + * false if queue is not emty + */ +static inline bool qdf_nbuf_is_queue_empty(qdf_nbuf_queue_t *nbq) +{ + return __qdf_nbuf_is_queue_empty(nbq); +} + +static inline qdf_nbuf_queue_t * +qdf_nbuf_queue_append(qdf_nbuf_queue_t *dest, qdf_nbuf_queue_t *src) +{ + return __qdf_nbuf_queue_append(dest, src); +} + +static inline void +qdf_nbuf_queue_free(qdf_nbuf_queue_t *head) +{ + __qdf_nbuf_queue_free(head); +} + +static inline qdf_nbuf_t +qdf_nbuf_queue_first(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_first(head); +} + +static inline qdf_nbuf_t +qdf_nbuf_queue_last(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_last(head); +} + +/** + * qdf_nbuf_get_protocol() - return the protocol value of the skb + * @skb: Pointer to network buffer + * + * Return: skb protocol + */ +static inline uint16_t qdf_nbuf_get_protocol(struct sk_buff *skb) +{ + return __qdf_nbuf_get_protocol(skb); +} + +/** + * qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb + * @skb: Pointer to network buffer + * + * Return: skb ip_summed + */ +static inline uint8_t qdf_nbuf_get_ip_summed(struct sk_buff *skb) +{ + return __qdf_nbuf_get_ip_summed(skb); +} + +/** + * qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb + * @skb: Pointer to network buffer + * @ip_summed: ip checksum + * + * Return: none + */ +static inline void qdf_nbuf_set_ip_summed(struct sk_buff *skb, + uint8_t ip_summed) +{ + __qdf_nbuf_set_ip_summed(skb, ip_summed); +} + +/** + * qdf_nbuf_set_next() - add a packet to a linked list + * @this_buf: Predecessor buffer + * @next_buf: Successor buffer + * + * This function can be used to directly link nbufs, rather than using + * a separate network buffer queue object. + * + * Return: none + */ +static inline void qdf_nbuf_set_next(qdf_nbuf_t this_buf, qdf_nbuf_t next_buf) +{ + __qdf_nbuf_set_next(this_buf, next_buf); +} + +/* nbuf extension routines */ + +/** + * qdf_nbuf_set_next_ext() - link extension of this packet contained in a new + * nbuf + * @this_buf: predecessor buffer + * @next_buf: successor buffer + * + * This function is used to link up many nbufs containing a single logical + * packet - not a collection of packets. Do not use for linking the first + * extension to the head + * + * Return: none + */ +static inline void +qdf_nbuf_set_next_ext(qdf_nbuf_t this_buf, qdf_nbuf_t next_buf) +{ + __qdf_nbuf_set_next_ext(this_buf, next_buf); +} + +/** + * qdf_nbuf_next_ext() - get the next packet extension in the linked list + * @buf: Network buffer + * + * Return: Next network buffer in the linked list + */ +static inline qdf_nbuf_t qdf_nbuf_next_ext(qdf_nbuf_t buf) +{ + return __qdf_nbuf_next_ext(buf); +} + +/** + * qdf_nbuf_append_ext_list() - link list of packet extensions to the head + * segment + * @head_buf: Network buf holding head segment (single) + * @ext_list: Network buf list holding linked extensions to the head + * @ext_len: Total length of all buffers in the extension list + * + * This function is used to link up a list of packet extensions (seg1, 2, + * ...) to the nbuf holding the head segment (seg0) + * + * Return: none + */ +static inline void +qdf_nbuf_append_ext_list(qdf_nbuf_t head_buf, qdf_nbuf_t ext_list, + qdf_size_t ext_len) +{ + __qdf_nbuf_append_ext_list(head_buf, ext_list, ext_len); +} + +/** + * qdf_nbuf_get_ext_list() - Get the link to extended nbuf list. + * @head_buf: Network buf holding head segment (single) + * + * This ext_list is populated when we have Jumbo packet, for example in case of + * monitor mode amsdu packet reception, and are stiched using frags_list. + * + * Return: Network buf list holding linked extensions from head buf. + */ +static inline qdf_nbuf_t qdf_nbuf_get_ext_list(qdf_nbuf_t head_buf) +{ + return (qdf_nbuf_t)__qdf_nbuf_get_ext_list(head_buf); +} + +/** + * qdf_nbuf_get_tx_cksum() - gets the tx checksum offload demand + * @buf: Network buffer + * + * Return: qdf_nbuf_tx_cksum_t checksum offload demand for the frame + */ +static inline qdf_nbuf_tx_cksum_t qdf_nbuf_get_tx_cksum(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_cksum(buf); +} + +/** + * qdf_nbuf_set_rx_cksum() - drivers that support hw checksumming use this to + * indicate checksum info to the stack. + * @buf: Network buffer + * @cksum: Checksum + * + * Return: none + */ +static inline void +qdf_nbuf_set_rx_cksum(qdf_nbuf_t buf, qdf_nbuf_rx_cksum_t *cksum) +{ + __qdf_nbuf_set_rx_cksum(buf, cksum); +} + +/** + * qdf_nbuf_get_tid() - this function extracts the TID value from nbuf + * @buf: Network buffer + * + * Return: TID value + */ +static inline uint8_t qdf_nbuf_get_tid(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tid(buf); +} + +/** + * qdf_nbuf_set_tid() - this function sets the TID value in nbuf + * @buf: Network buffer + * @tid: TID value + * + * Return: none + */ +static inline void qdf_nbuf_set_tid(qdf_nbuf_t buf, uint8_t tid) +{ + __qdf_nbuf_set_tid(buf, tid); +} + +/** + * qdf_nbuf_get_exemption_type() - this function extracts the exemption type + * from nbuf + * @buf: Network buffer + * + * Return: Exemption type + */ +static inline uint8_t qdf_nbuf_get_exemption_type(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_exemption_type(buf); +} + +/** + * qdf_nbuf_set_protocol() - this function peeks data into the buffer at given + * offset + * @buf: Network buffer + * @proto: Protocol + * + * Return: none + */ +static inline void qdf_nbuf_set_protocol(qdf_nbuf_t buf, uint16_t proto) +{ + __qdf_nbuf_set_protocol(buf, proto); +} + +/** + * qdf_nbuf_trace_get_proto_type() - this function return packet proto type + * @buf: Network buffer + * + * Return: Packet protocol type + */ +static inline uint8_t qdf_nbuf_trace_get_proto_type(qdf_nbuf_t buf) +{ + return __qdf_nbuf_trace_get_proto_type(buf); +} + +/** + * qdf_nbuf_reg_trace_cb() - this function registers protocol trace callback + * @cb_func_ptr: Callback pointer + * + * Return: none + */ +static inline void qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr) +{ + __qdf_nbuf_reg_trace_cb(cb_func_ptr); +} + + +/** + * qdf_nbuf_set_tx_parallel_dnload_frm() - set tx parallel download + * @buf: Network buffer + * @candi: Candidate of parallel download frame + * + * This function stores a flag specifying this TX frame is suitable for + * downloading though a 2nd TX data pipe that is used for short frames for + * protocols that can accept out-of-order delivery. + * + * Return: none + */ +static inline void +qdf_nbuf_set_tx_parallel_dnload_frm(qdf_nbuf_t buf, uint8_t candi) +{ + __qdf_nbuf_set_tx_htt2_frm(buf, candi); +} + +/** + * qdf_nbuf_get_tx_parallel_dnload_frm() - get tx parallel download + * @buf: Network buffer + * + * This function return whether this TX frame is allow to download though a 2nd + * TX data pipe or not. + * + * Return: none + */ +static inline uint8_t qdf_nbuf_get_tx_parallel_dnload_frm(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_htt2_frm(buf); +} + +/** + * qdf_nbuf_get_dhcp_subtype() - get the subtype + * of DHCP packet. + * @buf: Pointer to DHCP packet buffer + * + * This func. returns the subtype of DHCP packet. + * + * Return: subtype of the DHCP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_dhcp_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_dhcp_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_dhcp_subtype() - get the subtype + * of DHCP packet. + * @buf: Pointer to DHCP packet data buffer + * + * This func. returns the subtype of DHCP packet. + * + * Return: subtype of the DHCP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_dhcp_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_dhcp_subtype(data); +} + +/** + * qdf_nbuf_get_eapol_subtype() - get the subtype + * of EAPOL packet. + * @buf: Pointer to EAPOL packet buffer + * + * This func. returns the subtype of EAPOL packet. + * + * Return: subtype of the EAPOL packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_eapol_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_eapol_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_eapol_subtype() - get the subtype + * of EAPOL packet. + * @data: Pointer to EAPOL packet data buffer + * + * This func. returns the subtype of EAPOL packet. + * + * Return: subtype of the EAPOL packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_eapol_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_eapol_subtype(data); +} + +/** + * qdf_nbuf_get_arp_subtype() - get the subtype + * of ARP packet. + * @buf: Pointer to ARP packet buffer + * + * This func. returns the subtype of ARP packet. + * + * Return: subtype of the ARP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_arp_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_arp_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_arp_subtype() - get the subtype + * of ARP packet. + * @data: Pointer to ARP packet data buffer + * + * This func. returns the subtype of ARP packet. + * + * Return: subtype of the ARP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_arp_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_arp_subtype(data); +} + +/** + * qdf_nbuf_get_icmp_subtype() - get the subtype + * of IPV4 ICMP packet. + * @buf: Pointer to IPV4 ICMP packet buffer + * + * This func. returns the subtype of ICMP packet. + * + * Return: subtype of the ICMP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_icmp_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_icmp_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_icmp_subtype() - get the subtype + * of IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. returns the subtype of ICMP packet. + * + * Return: subtype of the ICMP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_icmp_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_icmp_subtype(data); +} + +/** + * qdf_nbuf_get_icmpv6_subtype() - get the subtype + * of IPV6 ICMPV6 packet. + * @buf: Pointer to IPV6 ICMPV6 packet buffer + * + * This func. returns the subtype of ICMPV6 packet. + * + * Return: subtype of the ICMPV6 packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_icmpv6_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_icmpv6_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_icmpv6_subtype() - get the subtype + * of IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. returns the subtype of ICMPV6 packet. + * + * Return: subtype of the ICMPV6 packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_icmpv6_subtype(data); +} + +/** + * qdf_nbuf_data_get_ipv4_proto() - get the proto type + * of IPV4 packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. returns the proto type of IPV4 packet. + * + * Return: proto type of IPV4 packet. + */ +static inline uint8_t +qdf_nbuf_data_get_ipv4_proto(uint8_t *data) +{ + return __qdf_nbuf_data_get_ipv4_proto(data); +} + +/** + * qdf_nbuf_data_get_ipv6_proto() - get the proto type + * of IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. returns the proto type of IPV6 packet. + * + * Return: proto type of IPV6 packet. + */ +static inline uint8_t +qdf_nbuf_data_get_ipv6_proto(uint8_t *data) +{ + return __qdf_nbuf_data_get_ipv6_proto(data); +} + +/** + * qdf_nbuf_is_ipv4_pkt() - check if packet is a ipv4 packet or not + * @buf: buffer + * + * This api is for Tx packets. + * + * Return: true if packet is ipv4 packet + */ +static inline +bool qdf_nbuf_is_ipv4_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet or not + * @data: data + * + * This api is for Tx packets. + * + * Return: true if packet is ipv4 packet + */ +static inline +bool qdf_nbuf_data_is_ipv4_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_dhcp_pkt() - check if packet is a dhcp packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is DHCP packet + */ +static inline +bool qdf_nbuf_is_ipv4_dhcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_dhcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if it is DHCP packet. + * @data: Pointer to DHCP packet data buffer + * + * This func. checks whether it is a DHCP packet or not. + * + * Return: true if it is a DHCP packet + * false if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_dhcp_pkt(data); +} + +/** + * qdf_nbuf_data_is_ipv6_mdsn_pkt() - check if it is MDNS packet. + * @data: Pointer to packet data buffer + * + * This func. checks whether it is a MDNS packet or not. + * + * Return: true if it is a MDNS packet, false if not + */ +static inline +bool qdf_nbuf_is_ipv6_mdns_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_mdns_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if it is DHCP packet. + * @data: Pointer to DHCP packet data buffer + * + * This func. checks whether it is a DHCP packet or not. + * + * Return: true if it is a DHCP packet + * false if not + */ +static inline +bool qdf_nbuf_is_ipv6_dhcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_dhcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_is_ipv4_eapol_pkt() - check if packet is a eapol packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is EAPOL packet + */ +static inline +bool qdf_nbuf_is_ipv4_eapol_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_eapol_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_eapol_pkt() - check if it is EAPOL packet. + * @data: Pointer to EAPOL packet data buffer + * + * This func. checks whether it is a EAPOL packet or not. + * + * Return: true if it is a EAPOL packet + * false if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_eapol_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_wapi_pkt() - check if packet is a wapi packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is WAPI packet + */ +static inline +bool qdf_nbuf_is_ipv4_wapi_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_ipv4_wapi_pkt(buf); +} + +/** + * qdf_nbuf_is_ipv4_tdls_pkt() - check if packet is a tdls packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is TDLS packet + */ +static inline +bool qdf_nbuf_is_ipv4_tdls_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_ipv4_tdls_pkt(buf); +} + +/** + * qdf_nbuf_is_ipv4_arp_pkt() - check if packet is a arp packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP packet + */ +static inline +bool qdf_nbuf_is_ipv4_arp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_arp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_arp_pkt() - check if it is ARP packet. + * @data: Pointer to ARP packet data buffer + * + * This func. checks whether it is a ARP packet or not. + * + * Return: TRUE if it is a ARP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_arp_pkt(data); +} + +/** + * qdf_nbuf_data_is_arp_req() - check if ARP packet is request. + * @buf: buffer + * + * This func. checks whether it is a ARP request or not. + * + * Return: true if it is a ARP request or FALSE if not + */ +static inline +bool qdf_nbuf_data_is_arp_req(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_arp_req(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_arp_rsp() - check if ARP packet is response. + * @buf: buffer + * + * This func. checks whether it is a ARP response or not. + * + * Return: true if it is a ARP response or FALSE if not + */ +static inline +bool qdf_nbuf_data_is_arp_rsp(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_arp_rsp(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_arp_src_ip() - get ARP packet source IP gateway. + * @buf: buffer + * + * Return: ARP packet source IP value. + */ +static inline +uint32_t qdf_nbuf_get_arp_src_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_arp_src_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_arp_tgt_ip() - get ARP packet target IP gateway. + * @buf: buffer + * + * Return: ARP packet target IP value. + */ +static inline +uint32_t qdf_nbuf_get_arp_tgt_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_arp_tgt_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_get_dns_domain_name() - get dns domain name of required length + * @buf: buffer + * @len: length to copy + * + * Return: dns domain name + */ +static inline +uint8_t *qdf_nbuf_get_dns_domain_name(qdf_nbuf_t buf, uint32_t len) +{ + return __qdf_nbuf_get_dns_domain_name(qdf_nbuf_data(buf), len); +} + +/** + * qdf_nbuf_data_is_dns_query() - check if skb data is a dns query + * @buf: buffer + * + * Return: true if packet is dns query packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_dns_query(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_dns_query(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_dns_response() - check if skb data is a dns response + * @buf: buffer + * + * Return: true if packet is dns response packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_dns_response(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_dns_response(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn + * @buf: buffer + * + * Return: true if packet is tcp syn packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_tcp_syn(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_tcp_syn(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack + * @buf: buffer + * + * Return: true if packet is tcp syn ack packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_tcp_syn_ack(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_tcp_syn_ack(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack + * @buf: buffer + * + * Return: true if packet is tcp ack packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_tcp_ack(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_tcp_ack(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_tcp_src_port() - get tcp src port + * @buf: buffer + * + * Return: tcp source port value. + */ +static inline +uint16_t qdf_nbuf_data_get_tcp_src_port(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_tcp_src_port(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port + * @buf: buffer + * + * Return: tcp destination port value. + */ +static inline +uint16_t qdf_nbuf_data_get_tcp_dst_port(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_tcp_dst_port(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmpv4_req() - check if ICMPv4 packet is request. + * @buf: buffer + * + * This func. checks whether it is a ICMPv4 request or not. + * + * Return: true if it is a ICMPv4 request or fALSE if not + */ +static inline +bool qdf_nbuf_data_is_icmpv4_req(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmpv4_req(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmpv4_rsp() - check if ICMPv4 packet is res + * @buf: buffer + * + * Return: true if packet is icmpv4 response + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_icmpv4_rsp(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmpv4_rsp(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_get_icmpv4_src_ip() - get icmpv4 src IP + * @buf: buffer + * + * Return: icmpv4 packet source IP value. + */ +static inline +uint32_t qdf_nbuf_get_icmpv4_src_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_icmpv4_src_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP + * @buf: buffer + * + * Return: icmpv4 packet target IP value. + */ +static inline +uint32_t qdf_nbuf_get_icmpv4_tgt_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_icmpv4_tgt_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_is_ipv6_pkt() - check if it is IPV6 packet. + * @buf: Pointer to IPV6 packet buffer + * + * This func. checks whether it is a IPV6 packet or not. + * + * Return: TRUE if it is a IPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv6_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 packet or not. + * + * Return: TRUE if it is a IPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_pkt(data); +} + +/** + * qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. checks whether it is a IPV4 multicast packet or not. + * + * Return: TRUE if it is a IPV4 multicast packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_mcast_pkt(data); +} + +/** + * qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 multicast packet or not. + * + * Return: TRUE if it is a IPV6 multicast packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_mcast_pkt(data); +} + +/** + * qdf_nbuf_is_icmp_pkt() - check if it is IPV4 ICMP packet. + * @buf: Pointer to IPV4 ICMP packet buffer + * + * This func. checks whether it is a ICMP packet or not. + * + * Return: TRUE if it is a ICMP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_icmp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. checks whether it is a ICMP packet or not. + * + * Return: TRUE if it is a ICMP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_icmp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_icmp_pkt(data); +} + +/** + * qdf_nbuf_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. + * @buf: Pointer to IPV6 ICMPV6 packet buffer + * + * This func. checks whether it is a ICMPV6 packet or not. + * + * Return: TRUE if it is a ICMPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_icmpv6_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmpv6_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. checks whether it is a ICMPV6 packet or not. + * + * Return: TRUE if it is a ICMPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_icmpv6_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. + * @buf: Pointer to IPV4 UDP packet buffer + * + * This func. checks whether it is a IPV4 UDP packet or not. + * + * Return: TRUE if it is a IPV4 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv4_udp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_udp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. + * @data: Pointer to IPV4 UDP packet data buffer + * + * This func. checks whether it is a IPV4 UDP packet or not. + * + * Return: TRUE if it is a IPV4 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_udp_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. + * @buf: Pointer to IPV4 TCP packet buffer + * + * This func. checks whether it is a IPV4 TCP packet or not. + * + * Return: TRUE if it is a IPV4 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv4_tcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_tcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. + * @data: Pointer to IPV4 TCP packet data buffer + * + * This func. checks whether it is a IPV4 TCP packet or not. + * + * Return: TRUE if it is a IPV4 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_tcp_pkt(data); +} + +/** + * qdf_nbuf_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. + * @buf: Pointer to IPV6 UDP packet buffer + * + * This func. checks whether it is a IPV6 UDP packet or not. + * + * Return: TRUE if it is a IPV6 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv6_udp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_udp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. + * @data: Pointer to IPV6 UDP packet data buffer + * + * This func. checks whether it is a IPV6 UDP packet or not. + * + * Return: TRUE if it is a IPV6 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_udp_pkt(data); +} + +/** + * qdf_nbuf_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. + * @buf: Pointer to IPV6 TCP packet buffer + * + * This func. checks whether it is a IPV6 TCP packet or not. + * + * Return: TRUE if it is a IPV6 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv6_tcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_tcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. + * @data: Pointer to IPV6 TCP packet data buffer + * + * This func. checks whether it is a IPV6 TCP packet or not. + * + * Return: TRUE if it is a IPV6 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_tcp_pkt(data); +} + +/** + * qdf_nbuf_is_bcast_pkt() - check if it is broadcast packet. + * @buf: Network buffer + * + * This func. checks whether packet is broadcast or not. + * + * Return: TRUE if it is broadcast packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_bcast_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_bcast_pkt(buf); +} + +/** + * qdf_nbuf_reset_num_frags() - decrement the number of fragments + * @buf: Network buffer + * + * Return: Number of fragments + */ +static inline void qdf_nbuf_reset_num_frags(qdf_nbuf_t buf) +{ + __qdf_nbuf_reset_num_frags(buf); +} + +/** + * qdf_dmaaddr_to_32s - return high and low parts of dma_addr + * + * Returns the high and low 32-bits of the DMA addr in the provided ptrs + * + * Return: N/A + */ +static inline void qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, + uint32_t *lo, uint32_t *hi) +{ + return __qdf_dmaaddr_to_32s(dmaaddr, lo, hi); +} + +/** + * qdf_nbuf_get_tso_info() - function to divide a jumbo TSO + * network buffer into segments + * @nbuf: network buffer to be segmented + * @tso_info: This is the output. The information about the + * TSO segments will be populated within this. + * + * This function fragments a TCP jumbo packet into smaller + * segments to be transmitted by the driver. It chains the TSO + * segments created into a list. + * + * Return: number of TSO segments + */ +static inline uint32_t qdf_nbuf_get_tso_info(qdf_device_t osdev, + qdf_nbuf_t nbuf, struct qdf_tso_info_t *tso_info) +{ + return __qdf_nbuf_get_tso_info(osdev, nbuf, tso_info); +} + +/** + * qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element + * + * @osdev: qdf device handle + * @tso_seg: TSO segment element to be unmapped + * @is_last_seg: whether this is last tso seg or not + * + * Return: none + */ +static inline void qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, + struct qdf_tso_seg_elem_t *tso_seg, + bool is_last_seg) +{ + return __qdf_nbuf_unmap_tso_segment(osdev, tso_seg, is_last_seg); +} + +/** + * qdf_nbuf_get_tcp_payload_len() - function to return the tso payload len + * @nbuf: network buffer + * + * Return: size of the tso packet + */ +static inline size_t qdf_nbuf_get_tcp_payload_len(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_tcp_payload_len(nbuf); +} + +/** + * qdf_nbuf_get_tso_num_seg() - function to calculate the number + * of TCP segments within the TSO jumbo packet + * @nbuf: TSO jumbo network buffer to be segmented + * + * This function calculates the number of TCP segments that the + network buffer can be divided into. + * + * Return: number of TCP segments + */ +static inline uint32_t qdf_nbuf_get_tso_num_seg(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_tso_num_seg(nbuf); +} + +/** + * qdf_nbuf_inc_users() - function to increment the number of + * users referencing this network buffer + * + * @nbuf: network buffer + * + * This function increments the number of users referencing this + * network buffer + * + * Return: the network buffer + */ +static inline qdf_nbuf_t qdf_nbuf_inc_users(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_inc_users(nbuf); +} + +/** + * qdf_nbuf_data_attr_get() - Get data_attr field from cvg_nbuf_cb + * + * @nbuf: Network buffer (skb on linux) + * + * This function returns the values of data_attr field + * in struct cvg_nbuf_cb{}, to which skb->cb is typecast. + * This value is actually the value programmed in CE descriptor. + * + * Return: Value of data_attr + */ +static inline uint32_t qdf_nbuf_data_attr_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_attr_get(buf); +} + +/** + * qdf_nbuf_data_attr_set() - Sets data_attr field in cvg_nbuf_cb + * + * @nbuf: Network buffer (skb on linux) + * @data_attr: Value to be stored cvg_nbuf_cb->data_attr + * + * This function stores the value to be programmed in CE + * descriptor as part skb->cb which is typecast to struct cvg_nbuf_cb{} + * + * Return: void + */ +static inline +void qdf_nbuf_data_attr_set(qdf_nbuf_t buf, uint32_t data_attr) +{ + __qdf_nbuf_data_attr_set(buf, data_attr); +} + +/** + * qdf_nbuf_tx_info_get() - Parse skb and get Tx metadata + * + * @nbuf: Network buffer (skb on linux) + * + * This function parses the payload to figure out relevant + * Tx meta-data e.g. whether to enable tx_classify bit + * in CE. + * + * Return: void + */ +#define qdf_nbuf_tx_info_get __qdf_nbuf_tx_info_get + +void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state); +void qdf_nbuf_tx_desc_count_display(void); +void qdf_nbuf_tx_desc_count_clear(void); + +static inline qdf_nbuf_t +qdf_nbuf_realloc_headroom(qdf_nbuf_t buf, uint32_t headroom) +{ + return __qdf_nbuf_realloc_headroom(buf, headroom); +} + +static inline qdf_nbuf_t +qdf_nbuf_realloc_tailroom(qdf_nbuf_t buf, uint32_t tailroom) +{ + return __qdf_nbuf_realloc_tailroom(buf, tailroom); +} + +static inline qdf_nbuf_t +qdf_nbuf_expand(qdf_nbuf_t buf, uint32_t headroom, uint32_t tailroom) +{ + return __qdf_nbuf_expand(buf, headroom, tailroom); +} + +static inline int +qdf_nbuf_linearize(qdf_nbuf_t buf) +{ + return __qdf_nbuf_linearize(buf); +} + +#ifdef NBUF_MEMORY_DEBUG +#define qdf_nbuf_unshare(d) \ + qdf_nbuf_unshare_debug(d, __func__, __LINE__) + +static inline qdf_nbuf_t +qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name, uint32_t line_num) +{ + qdf_nbuf_t unshared_buf; + + unshared_buf = __qdf_nbuf_unshare(buf); + + if (qdf_likely(buf != unshared_buf)) { + qdf_net_buf_debug_delete_node(buf); + + if (unshared_buf) + qdf_net_buf_debug_add_node(unshared_buf, 0, + func_name, line_num); + } + + return unshared_buf; +} + +#else +static inline qdf_nbuf_t +qdf_nbuf_unshare(qdf_nbuf_t buf) +{ + return __qdf_nbuf_unshare(buf); +} +#endif + +static inline bool +qdf_nbuf_is_cloned(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_cloned(buf); +} + +static inline void +qdf_nbuf_frag_info(qdf_nbuf_t buf, qdf_sglist_t *sg) +{ + __qdf_nbuf_frag_info(buf, sg); +} + +static inline qdf_nbuf_tx_cksum_t +qdf_nbuf_tx_cksum_info(qdf_nbuf_t buf, uint8_t **hdr_off, uint8_t **where) +{ + return __qdf_nbuf_tx_cksum_info(buf, hdr_off, where); +} + +static inline void qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf) +{ + __qdf_nbuf_reset_ctxt(nbuf); +} + +static inline void qdf_nbuf_init(qdf_nbuf_t buf) +{ + __qdf_nbuf_init(buf); +} + +static inline void *qdf_nbuf_network_header(qdf_nbuf_t buf) +{ + return __qdf_nbuf_network_header(buf); +} + +static inline void *qdf_nbuf_transport_header(qdf_nbuf_t buf) +{ + return __qdf_nbuf_transport_header(buf); +} + +static inline qdf_size_t qdf_nbuf_tcp_tso_size(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tcp_tso_size(buf); +} + +static inline void *qdf_nbuf_get_cb(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_cb(nbuf); +} + +static inline uint32_t qdf_nbuf_get_nr_frags(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_nr_frags(nbuf); +} + +static inline qdf_size_t qdf_nbuf_headlen(qdf_nbuf_t buf) +{ + return __qdf_nbuf_headlen(buf); +} + +static inline QDF_STATUS qdf_nbuf_frag_map(qdf_device_t osdev, + qdf_nbuf_t buf, int offset, + qdf_dma_dir_t dir, int cur_frag) +{ + return __qdf_nbuf_frag_map(osdev, buf, offset, dir, cur_frag); +} + +static inline bool qdf_nbuf_tso_tcp_v4(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tso_tcp_v4(buf); +} + +static inline bool qdf_nbuf_tso_tcp_v6(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tso_tcp_v6(buf); +} + +static inline uint32_t qdf_nbuf_tcp_seq(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tcp_seq(buf); +} + +static inline qdf_size_t qdf_nbuf_l2l3l4_hdr_len(qdf_nbuf_t buf) +{ + return __qdf_nbuf_l2l3l4_hdr_len(buf); +} + +static inline bool qdf_nbuf_is_nonlinear(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_nonlinear(buf); +} + +static inline uint32_t +qdf_nbuf_get_frag_size(qdf_nbuf_t buf, uint32_t frag_num) +{ + return __qdf_nbuf_get_frag_size(buf, frag_num); +} + +static inline uint32_t qdf_nbuf_get_priority(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_priority(buf); +} + +static inline void qdf_nbuf_set_priority(qdf_nbuf_t buf, uint32_t p) +{ + __qdf_nbuf_set_priority(buf, p); +} + +static inline void qdf_nbuf_record_rx_queue(qdf_nbuf_t buf, uint32_t queue_id) +{ + __qdf_nbuf_record_rx_queue(buf, queue_id); +} + +static inline uint16_t +qdf_nbuf_get_queue_mapping(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_queue_mapping(buf); +} + +static inline uint8_t * +qdf_nbuf_get_priv_ptr(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_priv_ptr(buf); +} + +/** + * qdf_nbuf_update_radiotap() - update radiotap at head of nbuf. + * @rx_status: rx_status containing required info to update radiotap + * @nbuf: Pointer to nbuf + * @headroom_sz: Available headroom size + * + * Return: radiotap length. + */ +unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, + qdf_nbuf_t nbuf, uint32_t headroom_sz); + +/** + * qdf_nbuf_mark_wakeup_frame() - mark wakeup frame. + * @buf: Pointer to nbuf + * + * Return: None + */ +static inline void +qdf_nbuf_mark_wakeup_frame(qdf_nbuf_t buf) +{ + __qdf_nbuf_mark_wakeup_frame(buf); +} + +/** + * qdf_nbuf_reg_free_cb - Registers nbuf free callback + * @cb_func_ptr: Callback pointer + * + * This function registers nbuf free callback + * + * Return: void + */ +static inline void +qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr) +{ + __qdf_nbuf_reg_free_cb(cb_func_ptr); +} + +/** + * qdf_nbuf_set_timestamp() - set the timestamp for frame + * + * @buf: sk buff + * + * Return: void + */ +static inline void +qdf_nbuf_set_timestamp(struct sk_buff *skb) +{ + __qdf_nbuf_set_timestamp(skb); +} + +/** + * qdf_nbuf_get_timestamp() - get the timestamp for frame + * + * @buf: sk buff + * + * Return: timestamp stored in skb in ms + */ +static inline uint64_t +qdf_nbuf_get_timestamp(struct sk_buff *skb) +{ + return __qdf_nbuf_get_timestamp(skb); +} + +/** + * qdf_nbuf_get_timedelta_ms() - get time difference in ms + * + * @buf: sk buff + * + * Return: time difference ms + */ +static inline uint64_t +qdf_nbuf_get_timedelta_ms(struct sk_buff *skb) +{ + return __qdf_nbuf_get_timedelta_ms(skb); +} + +/** + * qdf_nbuf_get_timedelta_us() - get time difference in micro seconds + * + * @buf: sk buff + * + * Return: time difference in micro seconds + */ +static inline uint64_t +qdf_nbuf_get_timedelta_us(struct sk_buff *skb) +{ + return __qdf_nbuf_get_timedelta_us(skb); +} + +/** + * qdf_nbuf_count_get() - get global nbuf gauge + * + * Return: global nbuf gauge + */ +static inline int qdf_nbuf_count_get(void) +{ + return __qdf_nbuf_count_get(); +} + +/** + * qdf_nbuf_count_inc() - increment nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +static inline void qdf_nbuf_count_inc(qdf_nbuf_t buf) +{ + return __qdf_nbuf_count_inc(buf); +} + +/** + * qdf_nbuf_count_dec() - decrement nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +static inline void qdf_nbuf_count_dec(qdf_nbuf_t buf) +{ + return __qdf_nbuf_count_dec(buf); +} + +/** + * qdf_nbuf_mod_init() - Intialization routine for qdf_nbuf + * + * Return void + */ +static inline void qdf_nbuf_mod_init(void) +{ + return __qdf_nbuf_mod_init(); +} + +/** + * qdf_nbuf_mod_init() - Unintialization routine for qdf_nbuf + * + * Return void + */ +static inline void qdf_nbuf_mod_exit(void) +{ + return __qdf_nbuf_mod_exit(); +} + +/** + * qdf_nbuf_orphan() - orphan a nbuf + * @buf: Pointer to network buffer + * + * If a buffer currently has an owner then we call the + * owner's destructor function + * + * Return: void + */ +static inline void qdf_nbuf_orphan(qdf_nbuf_t buf) +{ + return __qdf_nbuf_orphan(buf); +} + +#ifdef CONFIG_NBUF_AP_PLATFORM +#include +#else +#include +#endif +#endif /* _QDF_NBUF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_if.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_if.h new file mode 100644 index 0000000000000000000000000000000000000000..e0e190929acfbfc097c685a56b152b31b6ccffa1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_if.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_net_if + * QCA driver framework (QDF) network interface management APIs + */ + +#if !defined(__QDF_NET_IF_H) +#define __QDF_NET_IF_H + +/* Include Files */ +#include +#include + +struct qdf_net_if; + +#ifdef ENHANCED_OS_ABSTRACTION +/** + * qdf_net_if_create_dummy_if() - create dummy interface + * @nif: interface handle + * + * This function will create a dummy network interface + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_net_if_create_dummy_if(struct qdf_net_if *nif); +#else +static inline QDF_STATUS +qdf_net_if_create_dummy_if(struct qdf_net_if *nif) +{ + return __qdf_net_if_create_dummy_if(nif); +} +#endif +#endif /* __QDF_NET_IF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_types.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a60736bbdac77cce89fced2bd2adbeebe207b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_types.h @@ -0,0 +1,609 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_net_types + * This file defines types used in the networking stack abstraction. + */ + +#ifndef _QDF_NET_TYPES_H +#define _QDF_NET_TYPES_H + +#include /* uint8_t, etc. */ +#include + +/* Extended Traffic ID passed to target if the TID is unknown */ +#define QDF_NBUF_TX_EXT_TID_INVALID 0x1f + +/** + * qdf_nbuf_exemption_type - QDF net buf exemption types for encryption + * @QDF_NBUF_EXEMPT_NO_EXEMPTION: No exemption + * @QDF_NBUF_EXEMPT_ALWAYS: Exempt always + * @QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: Exempt on key mapping + */ +enum qdf_nbuf_exemption_type { + QDF_NBUF_EXEMPT_NO_EXEMPTION = 0, + QDF_NBUF_EXEMPT_ALWAYS, + QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE +}; + +typedef __wsum_t wsum_t; +typedef __in6_addr_t in6_addr_t; + + +#define QDF_NET_MAC_ADDR_MAX_LEN 6 +#define QDF_NET_IF_NAME_SIZE 64 +#define QDF_NET_ETH_LEN QDF_NET_MAC_ADDR_MAX_LEN +#define QDF_NET_MAX_MCAST_ADDR 64 +#define QDF_NET_IPV4_LEN 4 +#define QDF_TID_VI 5 +#define QDF_TID_VO 6 +#define QDF_TID_BE 0 +#define QDF_TID_BK 1 +/* Extended Traffic ID passed to target if the TID is unknown */ +#define QDF_NBUF_TX_EXT_TID_INVALID 0x1f + +#define QDF_ETH_TYPE_IPV4 0x0800 /* IPV4 */ +#define QDF_ETH_TYPE_IPV6 0x86dd /* IPV6 */ +#define QDF_ETH_TYPE_8021Q 0x8100 /* 802.1Q vlan protocol */ +#define QDF_ETH_TYPE_8021AD 0x88a8 /* 802.1AD vlan protocol */ +#define QDF_IEEE80211_4ADDR_HDR_LEN 30 +#define QDF_IEEE80211_3ADDR_HDR_LEN 24 +#define QDF_IEEE80211_FC0_SUBTYPE_QOS 0x80 +#define QDF_IEEE80211_FC1_TODS 0x01 +#define QDF_IEEE80211_FC1_FROMDS 0x02 + +#define QDF_IEEE80211_FC0_TYPE_MASK 0x0c +#define QDF_IEEE80211_FC0_SUBTYPE_MASK 0xf0 + +#define QDF_IEEE80211_FC0_TYPE_DATA 0x08 +#define QDF_IEEE80211_FC0_SUBTYPE_DATA 0x00 +#define QDF_IEEE80211_FC0_SUBTYPE_QOS 0x80 + +#define QDF_IEEE80211_FC0_SUBTYPE_QOS_NULL 0xC0 +#define QDF_IEEE80211_FC0_SUBTYPE_NODATA 0x40 + +#define QDF_IEEE80211_FC0_TYPE_CTL 0x04 +#define QDF_IEEE80211_FC0_SUBTYPE_BEAM_REPORT_POLL 0x40 +#define QDF_IEEE80211_FC0_SUBTYPE_VHT_NDP_AN 0x50 +#define QDF_IEEE80211_FC0_SUBTYPE_CTL_FRAME_EXTN 0x60 +#define QDF_IEEE80211_FC0_SUBTYPE_CTL_WRAPPER 0x70 +#define QDF_IEEE80211_FC0_SUBTYPE_BAR 0x80 +#define QDF_IEEE80211_FC0_SUBTYPE_BA 0x90 +#define QDF_IEEE80211_FC0_SUBTYPE_PSPOLL 0xA0 +#define QDF_IEEE80211_FC0_SUBTYPE_RTS 0xB0 +#define QDF_IEEE80211_FC0_SUBTYPE_ACK 0xD0 +#define QDF_IEEE80211_FC0_SUBTYPE_CF_END 0xE0 +#define QDF_IEEE80211_FC0_SUBTYPE_CF_END_CF_ACK 0xF0 + +#define QDF_NET_IS_MAC_MULTICAST(_a) (*(_a) & 0x01) + +/** + * In LLC header individual LSAP address 0x42 in + * DSAP and SSAP signifies IEEE 802.1 Bridge + * Spanning Tree Protocol + */ +#define QDF_LLC_STP 0x4242 + +typedef struct qdf_net_ethaddr { + uint8_t addr[QDF_NET_ETH_LEN]; +} qdf_net_ethaddr_t; + +/** + * typedef qdf_net_arphdr_t - ARP header info + * @ar_hrd: hardware type + * @ar_pro: protocol type + * @ar_hln: hardware address length + * @ar_pln: protocol length + * @ar_op: arp operation code + * @ar_sha: sender hardware address + * @ar_sip: sender IP address + * @ar_tha: target hardware address + * @ar_tip: target IP address + */ +typedef struct qdf_net_arphdr { + uint16_t ar_hrd; + uint16_t ar_pro; + uint8_t ar_hln; + uint8_t ar_pln; + uint16_t ar_op; + uint8_t ar_sha[QDF_NET_ETH_LEN]; + uint8_t ar_sip[QDF_NET_IPV4_LEN]; + uint8_t ar_tha[QDF_NET_ETH_LEN]; + uint8_t ar_tip[QDF_NET_IPV4_LEN]; +} qdf_net_arphdr_t; + +/** + * typedef qdf_net_icmp6_11addr_t - ICMP6 header info + * @type: hardware type + * @len: hardware address length + * @addr: hardware address + */ +typedef struct qdf_net_icmp6_11addr { + uint8_t type; + uint8_t len; + uint8_t addr[QDF_NET_ETH_LEN]; +} qdf_net_icmp6_11addr_t; + +#define QDF_TCPHDR_FIN __QDF_TCPHDR_FIN +#define QDF_TCPHDR_SYN __QDF_TCPHDR_SYN +#define QDF_TCPHDR_RST __QDF_TCPHDR_RST +#define QDF_TCPHDR_PSH __QDF_TCPHDR_PSH +#define QDF_TCPHDR_ACK __QDF_TCPHDR_ACK +#define QDF_TCPHDR_URG __QDF_TCPHDR_URG +#define QDF_TCPHDR_ECE __QDF_TCPHDR_ECE +#define QDF_TCPHDR_CWR __QDF_TCPHDR_CWR + +typedef struct { + uint16_t source; + uint16_t dest; + uint32_t seq; + uint32_t ack_seq; +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint16_t res1:4, + doff:4, + fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint16_t doff:4, + res1:4, + cwr:1, + ece:1, + urg:1, + ack:1, + psh:1, + rst:1, + syn:1, + fin:1; +#else +#error "Adjust your byte order" +#endif + uint16_t window; + uint16_t check; + uint16_t urg_ptr; +} qdf_net_tcphdr_t; + +typedef struct { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint8_t ip_hl:4, + ip_version:4; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t ip_version:4, + ip_hl:4; +#else +#error "Please fix" +#endif + uint8_t ip_tos; + uint16_t ip_len; + uint16_t ip_id; + uint16_t ip_frag_off; + uint8_t ip_ttl; + uint8_t ip_proto; + uint16_t ip_check; + uint32_t ip_saddr; + uint32_t ip_daddr; + /*The options start here. */ +} qdf_net_iphdr_t; + +/* V3 group record types [grec_type] */ +#define IGMPV3_MODE_IS_INCLUDE 1 +#define IGMPV3_MODE_IS_EXCLUDE 2 +#define IGMPV3_CHANGE_TO_INCLUDE 3 +#define IGMPV3_CHANGE_TO_EXCLUDE 4 +#define IGMPV3_ALLOW_NEW_SOURCES 5 +#define IGMPV3_BLOCK_OLD_SOURCES 6 + +/** + * qdf_net_cmd_vid_t - Command for set/unset vid + */ +typedef uint16_t qdf_net_cmd_vid_t ; /*get/set vlan id*/ + +/** + * typedef qdf_nbuf_tx_cksum_t - transmit checksum offload types + * @QDF_NBUF_TX_CKSUM_NONE: No checksum offload + * @QDF_NBUF_TX_CKSUM_IP: IP header checksum offload + * @QDF_NBUF_TX_CKSUM_TCP_UDP: TCP/UDP checksum offload + * @QDF_NBUF_TX_CKSUM_TCP_UDP_IP: TCP/UDP and IP header checksum offload + */ + +typedef enum { + QDF_NBUF_TX_CKSUM_NONE, + QDF_NBUF_TX_CKSUM_IP, + QDF_NBUF_TX_CKSUM_TCP_UDP, + QDF_NBUF_TX_CKSUM_TCP_UDP_IP, + +} qdf_nbuf_tx_cksum_t; + +/** + * typedef qdf_nbuf_l4_rx_cksum_type_t - receive checksum API types + * @QDF_NBUF_RX_CKSUM_ZERO: Rx checksum zero + * @QDF_NBUF_RX_CKSUM_TCP: Rx checksum TCP + * @QDF_NBUF_RX_CKSUM_UDP: Rx checksum UDP + * @QDF_NBUF_RX_CKSUM_TCPIPV6: Rx checksum TCP IPV6 + * @QDF_NBUF_RX_CKSUM_UDPIPV6: Rx checksum UDP IPV6 + * @QDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER: Rx checksum TCP no pseudo header + * @QDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER: Rx checksum UDP no pseudo header + * @QDF_NBUF_RX_CKSUM_TCPSUM16: Rx checksum TCP SUM16 + */ +typedef enum { + QDF_NBUF_RX_CKSUM_ZERO = 0x0000, + QDF_NBUF_RX_CKSUM_TCP = 0x0001, + QDF_NBUF_RX_CKSUM_UDP = 0x0002, + QDF_NBUF_RX_CKSUM_TCPIPV6 = 0x0010, + QDF_NBUF_RX_CKSUM_UDPIPV6 = 0x0020, + QDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER = 0x0100, + QDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER = 0x0200, + QDF_NBUF_RX_CKSUM_TCPSUM16 = 0x1000, +} qdf_nbuf_l4_rx_cksum_type_t; + +/** + * typedef qdf_nbuf_l4_rx_cksum_result_t - receive checksum status types + * @QDF_NBUF_RX_CKSUM_NONE: Device failed to checksum + * @QDF_NBUF_RX_CKSUM_TCP_UDP_HW: TCP/UDP cksum successful and value returned + * @QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: TCP/UDP cksum successful, no value + */ +typedef enum { + QDF_NBUF_RX_CKSUM_NONE = 0x0000, + QDF_NBUF_RX_CKSUM_TCP_UDP_HW = 0x0010, + QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY = 0x0020, +} qdf_nbuf_l4_rx_cksum_result_t; + +/** + * typedef qdf_nbuf_rx_cksum_t - receive checksum type + * @l4_type: L4 type + * @l4_result: L4 result + */ +typedef struct { + qdf_nbuf_l4_rx_cksum_type_t l4_type; + qdf_nbuf_l4_rx_cksum_result_t l4_result; + uint32_t val; +} qdf_nbuf_rx_cksum_t; + +#define QDF_ARP_REQ 1 /* ARP request */ +#define QDF_ARP_RSP 2 /* ARP response */ +#define QDF_ARP_RREQ 3 /* RARP request */ +#define QDF_ARP_RRSP 4 /* RARP response */ + +#define QDF_NEXTHDR_ICMP 58 /* ICMP for IPv6. */ + +/* Neighbor Discovery */ +#define QDF_ND_RSOL 133 /* Router Solicitation */ +#define QDF_ND_RADVT 134 /* Router Advertisement */ +#define QDF_ND_NSOL 135 /* Neighbor Solicitation */ +#define QDF_ND_NADVT 136 /* Neighbor Advertisement */ + +/** + * typedef qdf_net_udphdr_t - UDP header info + * @src_port: source port + * @dst_port: destination port + * @udp_len: length + * @udp_cksum: checksum + */ +typedef struct { + uint16_t src_port; + uint16_t dst_port; + uint16_t udp_len; + uint16_t udp_cksum; +} qdf_net_udphdr_t; + +/** + * typedef qdf_net_dhcphdr_t - DHCP header info + * @dhcp_msg_type: message type + * @dhcp_hw_type: hardware type + * @dhcp_hw_addr_len: hardware address length + * @dhcp_num_hops: number of hops + * @dhcp_transc_id: transaction id + * @dhcp_secs_elapsed: time elapsed + * @dhcp_flags: flags + * @dhcp_ciaddr: client IP + * @dhcp_yiaddr: device IP + * @dhcp_siaddr_nip: Server IP + * @dhcp_gateway_nip: relay agent IP + * @dhcp_chaddr: LLC hardware address + * @dhcp_sname: server host name + * @dhcp_file: boot file name + * @dhcp_cookie: cookie + */ +typedef struct { + uint8_t dhcp_msg_type; + uint8_t dhcp_hw_type; + uint8_t dhcp_hw_addr_len; + uint8_t dhcp_num_hops; + uint32_t dhcp_transc_id; + uint16_t dhcp_secs_elapsed; + uint16_t dhcp_flags; + uint32_t dhcp_ciaddr; + uint32_t dhcp_yiaddr; + uint32_t dhcp_siaddr_nip; + uint32_t dhcp_gateway_nip; + uint8_t dhcp_chaddr[16]; + uint8_t dhcp_sname[64]; + uint8_t dhcp_file[128]; + uint8_t dhcp_cookie[4]; +} qdf_net_dhcphdr_t; + + +/** + * qdf_net_vlanhdr_t - Vlan header + */ +typedef struct qdf_net_vlanhdr { + uint16_t tpid; +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint16_t vid:12; /* Vlan id*/ + uint8_t cfi:1; /* reserved for CFI, don't use*/ + uint8_t prio:3; /* Priority*/ +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t prio:3; /* Priority*/ + uint8_t cfi:1; /* reserved for CFI, don't use*/ + uint16_t vid:12; /* Vlan id*/ +#else +#error "Please fix" +#endif +} qdf_net_vlanhdr_t; + +typedef struct qdf_net_vid { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint16_t val:12; + uint8_t res:4; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t res:4; + uint16_t val:12; +#else +#error "Please fix" +#endif +} qdf_net_vid_t; + +typedef enum { + QDF_NET_TSO_NONE, + QDF_NET_TSO_IPV4, /**< for tsp ipv4 only*/ + QDF_NET_TSO_ALL, /**< ip4 & ipv6*/ +} qdf_net_tso_type_t; + +/** + * qdf_net_dev_info_t - Basic device info + */ +typedef struct { + uint8_t if_name[QDF_NET_IF_NAME_SIZE]; + uint8_t dev_addr[QDF_NET_MAC_ADDR_MAX_LEN]; + uint16_t header_len; + uint16_t mtu_size; + uint32_t unit; +} qdf_net_dev_info_t; + +/** + * qdf_nbuf_tso_t - For TCP large Segment Offload + */ +typedef struct { + qdf_net_tso_type_t type; + uint16_t mss; + uint8_t hdr_off; +} qdf_nbuf_tso_t; + +/** + * qdf_net_wireless_event_t - Wireless events + * QDF_IEEE80211_ASSOC = station associate (bss mode) + * QDF_IEEE80211_REASSOC = station re-associate (bss mode) + * QDF_IEEE80211_DISASSOC = station disassociate (bss mode) + * QDF_IEEE80211_JOIN = station join (ap mode) + * QDF_IEEE80211_LEAVE = station leave (ap mode) + * QDF_IEEE80211_SCAN = scan complete, results available + * QDF_IEEE80211_REPLAY = sequence counter replay detected + * QDF_IEEE80211_MICHAEL = Michael MIC failure detected + * QDF_IEEE80211_REJOIN = station re-associate (ap mode) + * QDF_CUSTOM_PUSH_BUTTON = WPS push button + */ +typedef enum qdf_net_wireless_events { + QDF_IEEE80211_ASSOC = __QDF_IEEE80211_ASSOC, + QDF_IEEE80211_REASSOC = __QDF_IEEE80211_REASSOC, + QDF_IEEE80211_DISASSOC = __QDF_IEEE80211_DISASSOC, + QDF_IEEE80211_JOIN = __QDF_IEEE80211_JOIN, + QDF_IEEE80211_LEAVE = __QDF_IEEE80211_LEAVE, + QDF_IEEE80211_SCAN = __QDF_IEEE80211_SCAN, + QDF_IEEE80211_REPLAY = __QDF_IEEE80211_REPLAY, + QDF_IEEE80211_MICHAEL = __QDF_IEEE80211_MICHAEL, + QDF_IEEE80211_REJOIN = __QDF_IEEE80211_REJOIN, + QDF_CUSTOM_PUSH_BUTTON = __QDF_CUSTOM_PUSH_BUTTON +} qdf_net_wireless_event_t; + +/** + * qdf_net_ipv6_addr_t - IPv6 Address + */ +typedef struct { + union { + uint8_t u6_addr8[16]; + uint16_t u6_addr16[8]; + uint32_t u6_addr32[4]; + } in6_u; +#define s6_addr32 in6_u.u6_addr32 +} qdf_net_ipv6_addr_t; + +/** + * qdf_net_ipv6hdr_t - IPv6 Header + */ +typedef struct { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint8_t ipv6_priority:4, + ipv6_version:4; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t ipv6_version:4, + ipv6_priority:4; +#else +#error "Please fix" +#endif + uint8_t ipv6_flow_lbl[3]; + + uint16_t ipv6_payload_len; + uint8_t ipv6_nexthdr, + ipv6_hop_limit; + + qdf_net_ipv6_addr_t ipv6_saddr, + ipv6_daddr; +} qdf_net_ipv6hdr_t; + +/** + * qdf_net_icmpv6hdr_t - ICMPv6 Header + */ +typedef struct { + uint8_t icmp6_type; + uint8_t icmp6_code; + uint16_t icmp6_cksum; + + union { + uint32_t un_data32[1]; + uint16_t un_data16[2]; + uint8_t un_data8[4]; + + struct { + uint16_t identifier; + uint16_t sequence; + } u_echo; + + struct { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint32_t reserved:5, + override:1, + solicited:1, + router:1, + reserved2:24; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint32_t router:1, + solicited:1, + override:1, + reserved:29; +#else +#error "Please fix" +#endif + } u_nd_advt; + + struct { + uint8_t hop_limit; +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint8_t reserved:6, + other:1, + managed:1; + +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t managed:1, + other:1, + reserved:6; +#else +#error "Please fix" +#endif + uint16_t rt_lifetime; + } u_nd_ra; + + } icmp6_dataun; + +} qdf_net_icmpv6hdr_t; + +/** + * qdf_net_nd_msg_t - Neighbor Discovery Message + */ +typedef struct { + qdf_net_icmpv6hdr_t nd_icmph; + qdf_net_ipv6_addr_t nd_target; + uint8_t nd_opt[0]; +} qdf_net_nd_msg_t; + + +static inline int32_t qdf_csum_ipv6(const in6_addr_t *saddr, + const in6_addr_t *daddr, + __u32 len, unsigned short proto, + wsum_t sum) +{ + return (int32_t)__qdf_csum_ipv6(saddr, daddr, len, proto, sum); +} + +typedef struct { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + uint8_t i_addr1[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t i_addr2[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t i_addr3[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t i_seq[2]; + uint8_t i_qos[2]; +} qdf_dot3_qosframe_t; + +typedef struct { + uint8_t ether_dhost[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t ether_shost[QDF_NET_MAC_ADDR_MAX_LEN]; + uint16_t vlan_TCI; + uint16_t vlan_encapsulated_proto; + uint16_t ether_type; +} qdf_ethervlan_header_t; + +/** + * typedef qdf_ether_header_t - ethernet header info + * @ether_dhost: destination hardware address + * @ether_shost: source hardware address + * @ether_type: ethernet type + */ +typedef struct { + uint8_t ether_dhost[QDF_NET_ETH_LEN]; + uint8_t ether_shost[QDF_NET_ETH_LEN]; + uint16_t ether_type; +} qdf_ether_header_t; + +typedef struct { + uint8_t llc_dsap; + uint8_t llc_ssap; + union { + struct { + uint8_t control; + uint8_t format_id; + uint8_t class; + uint8_t window_x2; + } __packed type_u; + struct { + uint8_t num_snd_x2; + uint8_t num_rcv_x2; + } __packed type_i; + struct { + uint8_t control; + uint8_t num_rcv_x2; + } __packed type_s; + struct { + uint8_t control; + /* + * We cannot put the following fields in a structure + * because the structure rounding might cause padding. + */ + uint8_t frmr_rej_pdu0; + uint8_t frmr_rej_pdu1; + uint8_t frmr_control; + uint8_t frmr_control_ext; + uint8_t frmr_cause; + } __packed type_frmr; + struct { + uint8_t control; + uint8_t org_code[3]; + uint16_t ether_type; + } __packed type_snap; + struct { + uint8_t control; + uint8_t control_ext; + } __packed type_raw; + } llc_un /* XXX __packed ??? */; +} qdf_llc_t; +#endif /*_QDF_NET_TYPES_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_notifier.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_notifier.h new file mode 100644 index 0000000000000000000000000000000000000000..1d090e01766c1da4b67df75bde1c26da65a4e7b5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_notifier.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file qdf_notifier.h + * This file abstracts notifier chain call operations. + */ + +#ifndef _QDF_NOTIFIER_H +#define _QDF_NOTIFIER_H + +#include +#include + +/* + * qdf_notif_block - qdf notifier block + * @__qdf_notifier_block: OS specific notifier block + * @priv_data: private data of the notifier block + */ +typedef struct { + __qdf_notifier_block notif_block; + void *priv_data; +} qdf_notif_block; + +typedef __qdf_blocking_notif_head qdf_blocking_notif_head; +typedef __qdf_atomic_notif_head qdf_atomic_notif_head; +typedef __qdf_notifier_block qdf_notifier_block; + +#ifdef WLAN_HANG_EVENT + +/** + * qdf_register_blocking_notifier_chain() - Register for blocking notifier chain + * @qdf_blocking_notif_head: Head of blocking notifier chain + * @qdf_notif_block: Notifier Block to be registered for this head chain + * + * This function is invoked to add a notifier block for the specific notifier + * head chain. + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_register_blocking_notifier_chain(qdf_blocking_notif_head *head, + qdf_notif_block *qnb); +/** + * qdf_unregister_blocking_notifier_chain() - Unregister for blocking notifier + * chain + * @qdf_blocking_notif_head: Head of blocking notifier chain + * @qdf_notif_block: Notifier Block to be registered for this head chain + * + * This function is invoked to remove a notifier block for the specific notifier + * head chain. + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_unregister_blocking_notifier_chain(qdf_blocking_notif_head *head, + qdf_notif_block *qnb); +/** + * qdf_blocking_notfier_call() - Invoke the function in the blocking chain + * @qdf_blocking_notif_head: Head of blocking notifier chain + * @state: state passed during the invoking of the notifier + * @data: Private data to be passed to all the notifier functions + * + * This function is called to invoke all the notifier blocks for the specific + * notifier chain with state and private data. + * when success the notifier reply with NOTIFY_OK. + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_blocking_notfier_call(qdf_blocking_notif_head *head, + unsigned long state, void *data); + +/** + * qdf_register_atomic_notifier_chain() - Register for atomic notifier chain + * @qdf_blocking_notif_head: Head of atomic notifier chain + * @qdf_notif_block: Notifier Block to be registered for this head chain + * + * This function is invoked to add a notifier block for the specific atomic + * notifier head chain. + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_register_atomic_notifier_chain(qdf_atomic_notif_head *head, + qdf_notif_block *qnb); +/** + * qdf_unregister_atmoic_notifier_chain() - Unregister for atomic notifier + * chain + * @qdf_blocking_notif_head: Head of blocking notifier chain + * @qdf_notif_block: Notifier Block to be registered for this head chain + * + * This function is invoked to remove a notifier block for the specific notifier + * head chain. + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_unregister_atomic_notifier_chain(qdf_atomic_notif_head *head, + qdf_notif_block *qnb); +/** + * qdf_blocking_notfier_call() - Invoke the function in the blocking chain + * @qdf_blocking_notif_head: Head of blocking notifier chain + * @v: Generally state passed during the invoking of the notifier + * @data: Private data to be passed to all the notifier functions + * + * This function is invoke a notifier block for the specific notifier head chain + * with state and private data. when success the notifier reply with NOTIFY_OK. + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_atomic_notfier_call(qdf_atomic_notif_head *head, + unsigned long v, void *data); +#else + +static inline QDF_STATUS qdf_register_blocking_notifier_chain( + qdf_blocking_notif_head *head, + qdf_notif_block *qnb) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_unregister_blocking_notifier_chain( + qdf_blocking_notif_head *head, + qdf_notif_block *qnb) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_blocking_notfier_call( + qdf_blocking_notif_head *head, + unsigned long v, void *data) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_register_atomic_notifier_chain( + qdf_atomic_notif_head *head, + qdf_notif_block *qnb) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_unregister_atomic_notifier_chain( + qdf_atomic_notif_head *head, + qdf_notif_block *qnb) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS qdf_atomic_notfier_call(qdf_atomic_notif_head *head, + unsigned long v, void *data) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_parse.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_parse.h new file mode 100644 index 0000000000000000000000000000000000000000..781e6542b027f8de67d40c77621a0313ba4c5aec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_parse.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Text parsing related abstractions, not related to a specific type + */ + +#ifndef __QDF_PARSE_H +#define __QDF_PARSE_H + +#include "qdf_status.h" + +typedef QDF_STATUS (*qdf_ini_section_cb)(void *context, const char *name); +typedef QDF_STATUS (*qdf_ini_item_cb)(void *context, + const char *key, + const char *value); + +/** + * qdf_ini_parse() - parse an ini file + * @ini_path: The full file path of the ini file to parse + * @context: The caller supplied context to pass into callbacks + * @item_cb: Ini item (key/value pair) handler callback function + * Return QDF_STATUS_SUCCESS to continue parsing, else to abort + * @section_cb: Ini section header handler callback function + * Return QDF_STATUS_SUCCESS to continue parsing, else to abort + * + * The *.ini file format is a simple format consisting of a list of key/value + * pairs (items), separated by an '=' character. Comments are initiated with + * a '#' character. Sections are also supported, using '[' and ']' around the + * section name. e.g. + * + * # comments are started with a '#' character + * # items are key/value string pairs, separated by the '=' character + * someKey1=someValue1 + * someKey2=someValue2 # this is also a comment + * + * # section headers are enclosed in square brackets + * [some section header] # new section begins + * someKey3=someValue3 + * + * Return: QDF_STATUS + */ +QDF_STATUS +qdf_ini_parse(const char *ini_path, void *context, + qdf_ini_item_cb item_cb, qdf_ini_section_cb section_cb); + +#endif /* __QDF_PARSE_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_perf.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_perf.h new file mode 100644 index 0000000000000000000000000000000000000000..aad7982c77f2e8c0885205917f7db501a8062aea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_perf.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_perf + * This file provides OS abstraction perf API's. + */ + +#ifndef _QDF_PERF_H +#define _QDF_PERF_H + +/* headers */ +#include + +#ifdef QCA_PERF_PROFILING + +/* Typedefs */ +typedef __qdf_perf_id_t qdf_perf_id_t; + +typedef int (*proc_read_t)(char *page, char **start, off_t off, int count, + int *eof, void *data); +typedef int (*proc_write_t)(struct file *file, const char *buf, + unsigned long count, void *data); +typedef void (*perf_sample_t)(struct qdf_perf_entry *entry, + uint8_t done); + +typedef void (*perf_init_t)(struct qdf_perf_entry *entry, uint32_t def_val); + +/** + * typedef proc_api_tbl_t - contains functions to read, write to proc FS + * @proc_read: function pointer to read function + * @proc_write: function pointer to write function + * @sample: function pointer to sample function + * @init: function pointer to init function + * @def_val: int contains default value + */ +typedef struct proc_api_tbl { + proc_read_t proc_read; + proc_write_t proc_write; + perf_sample_t sample; + perf_init_t init; + uint32_t def_val; +} proc_api_tbl_t; + +proc_api_tbl_t api_tbl[]; + +/* Macros */ +#define INIT_API(name, val) { \ + .proc_read = read_##name, \ + .proc_write = write_##name, \ + .sample = sample_event, \ + .init = init_##name, \ + .def_val = val, \ +} + +#define PERF_ENTRY(hdl) ((qdf_perf_entry_t *)hdl) + +#define qdf_perf_init(_parent, _id, _ctr_type) \ + __qdf_perf_init((_parent), (_id), (_ctr_type)) + +#define qdf_perf_destroy(_id) __qdf_perf_destroy((_id)) + +#define qdf_perf_start(_id) __qdf_perf_start((_id)) + +#define qdf_perf_end(_id) __qdf_perf_end((_id)) + +/* Extern declarations */ +extern __qdf_perf_id_t + __qdf_perf_init(qdf_perf_id_t parent, + uint8_t *id_name, + qdf_perf_cntr_t type)(__qdf_perf_id_t parent, + uint8_t *id_name, + uint32_t type); + +extern bool __qdf_perf_destroy(qdf_perf_id_t id)(__qdf_perf_id_t id); + +extern void __qdf_perf_start(qdf_perf_id_t id)(__qdf_perf_id_t id); +extern void __qdf_perf_end(qdf_perf_id_t id)(__qdf_perf_id_t id); + +extern int +qdf_perfmod_init(void); +extern void +qdf_perfmod_exit(void); + +#else /* !QCA_PERF_PROFILING */ + +#define qdf_perfmod_init() +#define qdf_perfmod_exit() +#define DECLARE_N_EXPORT_PERF_CNTR(id) +#define START_PERF_CNTR(_id, _name) +#define END_PERF_CNTR(_id) + +#endif /* QCA_PERF_PROFILING */ + +#endif /* end of _QDF_PERF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_periodic_work.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_periodic_work.h new file mode 100644 index 0000000000000000000000000000000000000000..56d1dd8da66bec5816ba3f123a3bc5d26a369b0e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_periodic_work.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_periodic_work.h + * A simple, periodic work type for repeatedly executing a callback with a + * certain frequency. + */ + +#ifndef __QDF_PERIODIC_WORK_H +#define __QDF_PERIODIC_WORK_H + +#include "i_qdf_periodic_work.h" +#include "qdf_status.h" +#include "qdf_types.h" + +typedef void (*qdf_periodic_work_cb)(void *context); + +/** + * struct qdf_periodic_work - a defered work type which executes a callback + * periodically until stopped + * @dwork: OS-specific delayed work + * @callback: the callback to be executed periodically + * @context: the context to pass to the callback + * @msec: the delay between executions in milliseconds + */ +struct qdf_periodic_work { + struct __qdf_opaque_delayed_work dwork; + qdf_periodic_work_cb callback; + void *context; + uint32_t msec; +}; + +/** + * qdf_periodic_work_create() - initialized a periodic work @pwork + * @pwork: the periodic work to initialize + * @callback: the callback to be executed periodically + * @context: the context to pass to the callback + * + * Return: QDF_STATUS + */ +#define qdf_periodic_work_create(pwork, callback, context) \ + __qdf_periodic_work_create(pwork, callback, context, __func__, __LINE__) + +qdf_must_check QDF_STATUS +__qdf_periodic_work_create(struct qdf_periodic_work *pwork, + qdf_periodic_work_cb callback, void *context, + const char *func, uint32_t line); + +/** + * qdf_periodic_work_destroy() - deinitialize a periodic work @pwork + * @pwork: the periodic work to de-initialize + * + * Return: None + */ +#define qdf_periodic_work_destroy(pwork) \ + __qdf_periodic_work_destroy(pwork, __func__, __LINE__) + +void __qdf_periodic_work_destroy(struct qdf_periodic_work *pwork, + const char *func, uint32_t line); + +/** + * qdf_periodic_work_start() - begin periodic execution of @pwork callback + * @pwork: the periodic work to start + * @msec: the delay between executions in milliseconds + * + * Return: true if started successfully + */ +bool qdf_periodic_work_start(struct qdf_periodic_work *pwork, uint32_t msec); + +/** + * qdf_periodic_work_stop_async() - Asynchronously stop execution of @pwork + * @pwork: the periodic work to stop + * + * When this returns, @pwork is guaranteed to not be queued, *but* its callback + * may still be executing. + * + * This is safe to call from the @pwork callback. + * + * Return: true if @pwork was previously started + */ +bool qdf_periodic_work_stop_async(struct qdf_periodic_work *pwork); + +/** + * qdf_periodic_work_stop_sync() - Synchronously stop execution of @pwork + * @pwork: the periodic work to stop + * + * When this returns, @pwork is guaranteed to not be queued, and its callback + * not executing. + * + * This will deadlock if called from the @pwork callback. + * + * Return: true if @pwork was previously started + */ +bool qdf_periodic_work_stop_sync(struct qdf_periodic_work *pwork); + +#ifdef WLAN_PERIODIC_WORK_DEBUG +/** + * qdf_periodic_work_check_for_leaks() - assert no periodic work leaks + * + * Return: None + */ +void qdf_periodic_work_check_for_leaks(void); + +/** + * qdf_periodic_work_feature_init() - global init logic for periodic work + * + * Return: None + */ +void qdf_periodic_work_feature_init(void); + +/** + * qdf_periodic_work_feature_deinit() - global de-init logic for periodic work + * + * Return: None + */ +void qdf_periodic_work_feature_deinit(void); +#else +static inline void qdf_periodic_work_check_for_leaks(void) { } +static inline void qdf_periodic_work_feature_init(void) { } +static inline void qdf_periodic_work_feature_deinit(void) { } +#endif /* WLAN_PERIODIC_WORK_DEBUG */ + +#endif /* __QDF_PERIODIC_WORK_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_platform.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..36e738eb004f4bc88706127e339d64378c6fc632 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_platform.h @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_platform.h + * This file defines platform API abstractions. + */ + +#ifndef _QDF_PLATFORM_H +#define _QDF_PLATFORM_H + +#include "qdf_types.h" + +/** + * qdf_self_recovery_callback() - callback for self recovery + * @psoc: pointer to the posc object + * @reason: the reason for the recovery request + * @func: the caller's function name + * @line: the line number of the callsite + * + * Return: none + */ +typedef void (*qdf_self_recovery_callback)(void *psoc, + enum qdf_hang_reason reason, + const char *func, + const uint32_t line); + +/** + * qdf_is_fw_down_callback() - callback to query if fw is down + * + * Return: true if fw is down and false if fw is not down + */ +typedef bool (*qdf_is_fw_down_callback)(void); + +/** + * qdf_register_fw_down_callback() - API to register fw down callback + * @is_fw_down: callback to query if fw is down or not + * + * Return: none + */ +void qdf_register_fw_down_callback(qdf_is_fw_down_callback is_fw_down); + +/** + * qdf_is_fw_down() - API to check if fw is down or not + * + * Return: true: if fw is down + * false: if fw is not down + */ +bool qdf_is_fw_down(void); + +/** + * qdf_wmi_recv_qmi_cb() - callback to receive WMI over QMI + * @cb_ctx: WMI event recv callback context(wmi_handle) + * @buf: WMI buffer + * @len: WMI buffer len + * + * Return: 0 if success otherwise -EINVAL + */ +typedef int (*qdf_wmi_recv_qmi_cb)(void *cb_ctx, void *buf, int len); + +/** + * qdf_wmi_send_over_qmi_callback() - callback to send WMI over QMI + * @buf: WMI buffer + * @len: WMI buffer len + * @cb_ctx: WMI event recv callback context(wmi_handle) + * @wmi_rx_cb: WMI event receive call back + * + * Return: QDF_STATUS_SUCCESS if success otherwise QDF error code + */ +typedef QDF_STATUS (*qdf_wmi_send_over_qmi_callback)(void *buf, uint32_t len, + void *cb_ctx, + qdf_wmi_recv_qmi_cb + wmi_rx_cb); + +/** + * qdf_register_wmi_send_recv_qmi_callback() - Register WMI over QMI callback + * @qdf_wmi_send_over_qmi_callback: callback to send recv WMI data over QMI + * + * Return: none + */ +void qdf_register_wmi_send_recv_qmi_callback(qdf_wmi_send_over_qmi_callback + wmi_send_recv_qmi_cb); + +/** + * qdf_wmi_send_recv_qmi() - API to send receive WMI data over QMI + * @buf: WMI buffer + * @len: WMI buffer len + * @cb_ctx: WMI event recv callback context(wmi_handle) + * @wmi_rx_cb: WMI event receive call back + * + * Return: QDF STATUS of operation + */ +QDF_STATUS qdf_wmi_send_recv_qmi(void *buf, uint32_t len, void *cb_ctx, + qdf_wmi_recv_qmi_cb wmi_rx_cb); + +/** + * qdf_is_driver_unloading_callback() - callback to get driver unloading in progress + * or not + * + * Return: true if driver is unloading else false + */ +typedef bool (*qdf_is_driver_unloading_callback)(void); + +/** + * qdf_register_is_driver_unloading_callback() - driver unloading callback + * @callback: driver unloading callback + * + * Return: None + */ +void qdf_register_is_driver_unloading_callback( + qdf_is_driver_unloading_callback callback); + +/** + * qdf_register_self_recovery_callback() - register self recovery callback + * @callback: self recovery callback + * + * Return: None + */ +void qdf_register_self_recovery_callback(qdf_self_recovery_callback callback); + +/** + * qdf_trigger_self_recovery () - trigger self recovery + * @psoc: the psoc at which the recovery is being triggered + * @reason: the reason for the recovery request + * + * Call API only in case of fatal error, + * if self_recovery_cb callback is registered, injcets fw crash and recovers + * else raises QDF_BUG() + * + * Return: None + */ +#define qdf_trigger_self_recovery(psoc, reason) \ + __qdf_trigger_self_recovery(psoc, reason, __func__, __LINE__) +void __qdf_trigger_self_recovery(void *psoc, enum qdf_hang_reason reason, + const char *func, const uint32_t line); + +/** + * qdf_is_recovering_callback() - callback to get driver recovering in + * progress or not + * + * Return: true if driver is doing recovering else false + */ +typedef bool (*qdf_is_recovering_callback)(void); + +/** + * qdf_register_recovering_state_query_callback() - register recover status + * query callback + * + * Return: none + */ +void qdf_register_recovering_state_query_callback( + qdf_is_recovering_callback is_recovering); + +/** + * qdf_is_driver_unloading() - get driver unloading in progress status + * or not + * + * Return: true if driver is unloading else false + */ +bool qdf_is_driver_unloading(void); + +/** + * qdf_is_recovering() - get driver recovering in progress status + * or not + * + * Return: true if driver is doing recovering else false + */ +bool qdf_is_recovering(void); + +/** + * struct qdf_op_sync - opaque operation synchronization context handle + */ +struct qdf_op_sync; + +typedef int (*qdf_op_protect_cb)(void **out_sync, const char *func); +typedef void (*qdf_op_unprotect_cb)(void *sync, const char *func); + +/** + * qdf_op_protect() - attempt to protect a driver operation + * @out_sync: output parameter for the synchronization context, populated on + * success + * + * Return: Errno + */ +#define qdf_op_protect(out_sync) __qdf_op_protect(out_sync, __func__) + +qdf_must_check int +__qdf_op_protect(struct qdf_op_sync **out_sync, const char *func); + +/** + * qdf_op_unprotect() - release driver operation protection + * @sync: synchronization context returned from qdf_op_protect() + * + * Return: None + */ +#define qdf_op_unprotect(sync) __qdf_op_unprotect(sync, __func__) + +void __qdf_op_unprotect(struct qdf_op_sync *sync, const char *func); + +/** + * qdf_op_callbacks_register() - register driver operation protection callbacks + * + * Return: None + */ +void qdf_op_callbacks_register(qdf_op_protect_cb on_protect, + qdf_op_unprotect_cb on_unprotect); + +/** + * qdf_is_drv_connected_callback() - callback to query if drv is connected + * + * Return: true if drv is connected else false + */ +typedef bool (*qdf_is_drv_connected_callback)(void); + +/** + * qdf_is_drv_connected() - API to check if drv is connected or not + * + * DRV is dynamic request voting using which fw can do page fault and + * bring in page back without apps wake up + * + * Return: true: if drv is connected + * false: if drv is not connected + */ +bool qdf_is_drv_connected(void); + +/** + * qdf_register_drv_connected_callback() - API to register drv connected cb + * @is_drv_connected: callback to query if drv is connected or not + * + * Return: none + */ +void qdf_register_drv_connected_callback(qdf_is_drv_connected_callback + is_drv_connected); + +/** + * qdf_check_state_before_panic() - API to check if FW is down + * or driver is in recovery before calling assert + * + * Return: none + */ +void qdf_check_state_before_panic(void); + +/** + * qdf_is_drv_supported_callback() - callback to query if drv is supported + * + * Return: true if drv is supported else false + */ +typedef bool (*qdf_is_drv_supported_callback)(void); + +/** + * qdf_is_drv_supported_callback() - API to check if drv is supported or not + * + * DRV is dynamic request voting using which fw can do page fault and + * bring in page back without apps wake up + * + * Return: true: if drv is supported + * false: if drv is not supported + */ +bool qdf_is_drv_supported(void); + +/** + * qdf_register_drv_supported_callback() - API to register drv supported cb + * @is_drv_supported: callback to query if drv is supported or not + * + * Return: none + */ +void qdf_register_drv_supported_callback(qdf_is_drv_supported_callback + is_drv_supported); + +typedef void (*qdf_recovery_reason_update_callback)(enum qdf_hang_reason + reason); + +/** + * qdf_register_recovery_reason_update() - Register callback to update recovery + * reason + * @qdf_recovery_reason_update_callback: callback to update recovery reason + * + * Return: none + */ +void qdf_register_recovery_reason_update(qdf_recovery_reason_update_callback + callback); + +/** + * qdf_recovery_reason_update() - update recovery reason + * @reason: recovery reason + * + * Return: none + */ +void qdf_recovery_reason_update(enum qdf_hang_reason reason); + +/** + * qdf_bus_reg_dump() - callback for getting bus specific register dump + * @dev: Bus specific device + * @buf: Hang event buffer in which the data will be populated + * @len: length of data to be populated in the hang event buffer + * + * Return: none + */ +typedef void (*qdf_bus_reg_dump)(struct device *dev, uint8_t *buf, + uint32_t len); + +/** + * qdf_register_get_bus_reg_dump() - Register callback to update bus register + * dump + * @qdf_bus_reg_dump: callback to update bus register dump + * + * Return: none + */ +void qdf_register_get_bus_reg_dump(qdf_bus_reg_dump callback); + +/** + * qdf_get_bus_reg_dump() - Get the register dump for the bus + * @dev: device + * @buffer: buffer for hang data + * @len: len of hang data + * + * Return: none + */ +void qdf_get_bus_reg_dump(struct device *dev, uint8_t *buf, uint32_t len); +#endif /*_QDF_PLATFORM_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ptr_hash.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ptr_hash.h new file mode 100644 index 0000000000000000000000000000000000000000..b1b85cb58d59aea13a691cd8620dbb3c6d725ad0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ptr_hash.h @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_ptr_hash.h + * + * A minimal hashtable implementation for doing fast lookups via pointer. + * + * qdf_ptr_hash also has the benefit of knowing its own size, allowing a pointer + * to the hashtable to be passed around and embedded in other structs. Since + * every hashtable is not necessarily of the same size, this allows using hash + * tables in a lot of new places which would be impossible with the current + * kernel hashtable implementation. + * + * Because the size of the hashtable varies with the number of bits used in the + * hash, declaring a qdf_ptr_hash is a bit different. If you want to embed a + * qdf_ptr_hash in another type, use a combination of qdf_ptr_hash_declare() and + * qdf_ptr_hash_ptr(). If you just want to declare and use a qdf_ptr_hash, use + * qdf_ptr_hash_declare_ptr() instead. Either method will ensure the appropriate + * number of bytes is accounted for using an internal union, and provides the + * consumer with a pointer to a qdf_ptr_hash type which can be used with all of + * the other qdf_ptr_hash APIs. Alternatively, you can skip these complexities + * by simply dynamically allocating the qdf_ptr_hash via qdf_ptr_hash_create(). + */ + +#ifndef __QDF_PTR_HASH_H +#define __QDF_PTR_HASH_H + +#include "i_qdf_ptr_hash.h" +#include "qdf_mem.h" +#include "qdf_slist.h" +#include "qdf_types.h" +#include "qdf_util.h" + +/** + * struct qdf_ptr_hash_bucket - a type representing a hash bucket + * @list: the list used for hash chaining + */ +struct qdf_ptr_hash_bucket { + struct qdf_slist list; +}; + +/** + * struct qdf_ptr_hash - a hash table type for doing fast lookups via pointer + * @bits: the number of bits to use when hashing keys + * @count: the number of buckets, always equal to 2^@bits + * @buckets: empty bucket array for accessing a variable length array of buckets + */ +struct qdf_ptr_hash { + int8_t bits; + int16_t count; + struct qdf_ptr_hash_bucket buckets[0]; +}; + +/** + * struct qdf_ptr_hash_entry - entry type of membership in a qdf_ptr_hash + * @key: the value used as the key for insertion/lookup + * @node: the list node used for hash chaining + */ +struct qdf_ptr_hash_entry { + uintptr_t key; + struct qdf_slist_node node; +}; + +#define __qdf_ptr_hash_size(bits) (sizeof(struct qdf_ptr_hash) + \ + sizeof(((struct qdf_ptr_hash *)0)->buckets[0]) * (1 << bits)) + +/** + * qdf_ptr_hash_declare() - declare a new qdf_ptr_hash + * @name: the C identifier to use for the new hash table + * @bits: The number of bits to use for hashing + * + * Return: None + */ +#define qdf_ptr_hash_declare(name, _bits) \ +union { \ + struct qdf_ptr_hash ht; \ + uint8_t __raw[__qdf_ptr_hash_size(_bits)]; \ +} __##name = { .ht = { .bits = _bits, .count = (1 << _bits) } } + +/** + * qdf_ptr_hash_ptr() - get a pointer to a declared qdf_ptr_hash + * @name: the C identifier of the declared qdf_ptr_hash + * + * Return: pointer to a qdf_ptr_hash + */ +#define qdf_ptr_hash_ptr(name) &__##name.ht + +/** + * qdf_ptr_hash_declare_ptr() - declare a pointer to a new qdf_ptr_hash + * @name: the C identifier to use for the pointer to the new qdf_ptr_hash + * @bits: The number of bits to use for hashing + * + * Return: None + */ +#define qdf_ptr_hash_declare_ptr(name, bits) \ +qdf_ptr_hash_declare(name, bits); \ +struct qdf_ptr_hash *name = qdf_ptr_hash_ptr(name) + +#define __qdf_ptr_hash_for_each_bucket(ht, bkt) \ + for ((bkt) = (ht)->buckets; \ + (bkt) < (ht)->buckets + (ht)->count; \ + (bkt)++) + +/** + * qdf_ptr_hash_init() - initialize a qdf_ptr_hash + * @ht: the hash table to initialize + * + * Return: None + */ +static inline void qdf_ptr_hash_init(struct qdf_ptr_hash *ht) +{ + struct qdf_ptr_hash_bucket *bucket; + + __qdf_ptr_hash_for_each_bucket(ht, bucket) + qdf_slist_init(&bucket->list); +} + +/** + * qdf_ptr_hash_deinit() - de-initialize a qdf_ptr_hash + * @ht: the hash table to de-initialize + * + * Return: None + */ +static inline void qdf_ptr_hash_deinit(struct qdf_ptr_hash *ht) +{ + struct qdf_ptr_hash_bucket *bucket; + + __qdf_ptr_hash_for_each_bucket(ht, bucket) + qdf_slist_deinit(&bucket->list); +} + +/** + * qdf_ptr_hash_create() - allocate and initialize a qdf_ptr_hash + * @bits: the number of bits to use for hashing + * + * Return: qdf_ptr_hash pointer on succes, NULL on allocation failure + */ +static inline struct qdf_ptr_hash *qdf_ptr_hash_create(uint8_t bits) +{ + struct qdf_ptr_hash *ht = qdf_mem_malloc(__qdf_ptr_hash_size(bits)); + + if (!ht) + return NULL; + + ht->bits = bits; + ht->count = 1 << bits; + qdf_ptr_hash_init(ht); + + return ht; +} + +/** + * qdf_ptr_hash_destroy() - de-initialize and de-allocate a qdf_ptr_hash + * @ht: the qdf_ptr_hash to destroy + * + * Return: None + */ +static inline void qdf_ptr_hash_destroy(struct qdf_ptr_hash *ht) +{ + qdf_ptr_hash_deinit(ht); + qdf_mem_free(ht); +} + +/** + * qdf_ptr_hash_empty() - check if a qdf_ptr_hash has any entries + * @ht: the qdf_ptr_hash to check + * + * Return: true if @ht contains no entries + */ +static inline bool qdf_ptr_hash_empty(struct qdf_ptr_hash *ht) +{ + struct qdf_ptr_hash_bucket *bucket; + + __qdf_ptr_hash_for_each_bucket(ht, bucket) + if (!qdf_slist_empty(&bucket->list)) + return false; + + return true; +} + +#ifdef ENABLE_QDF_PTR_HASH_DEBUG +/** + * qdf_ptr_hash_dup_check_in_bucket() - check if a hash_entry is duplicated + in hash_bucket + * @bucket: qdf_ptr_hash_bucket pointer + * cmp_entry: the hash_entry to be checked + * + * if the cmp_entry is found in bucket list, then trigger + * assert to report duplication. + * + * Return: None + */ +static inline void qdf_ptr_hash_dup_check_in_bucket( + struct qdf_ptr_hash_bucket *bucket, + struct qdf_ptr_hash_entry *cmp_entry) +{ + struct qdf_ptr_hash_entry *tmp_entry; + + qdf_slist_for_each(&bucket->list, tmp_entry, node) + qdf_assert_always(tmp_entry != cmp_entry); +} +#else +#define qdf_ptr_hash_dup_check_in_bucket(_bucket, _cmp_entry) /* no op */ +#endif + +static inline struct qdf_ptr_hash_bucket * +__qdf_ptr_hash_get_bucket(struct qdf_ptr_hash *ht, uintptr_t key) +{ + return ht->buckets + __qdf_ptr_hash_key(key, ht->bits); +} + +/** + * qdf_ptr_hash_add() - insert an entry into a qdf_ptr_hash + * @ht: the qdf_ptr_hash to insert into + * @key: the pointer to use as an insertion/lookup key + * @item: a pointer to a type that contains a qdf_ptr_hash_entry + * @entry_field: C identifier for the qdf_ptr_hash_entry field in @item + * + * Return: None + */ +#define qdf_ptr_hash_add(ht, key, item, entry_field) \ + __qdf_ptr_hash_add(ht, (uintptr_t)key, &(item)->entry_field) + +static inline void __qdf_ptr_hash_add(struct qdf_ptr_hash *ht, uintptr_t key, + struct qdf_ptr_hash_entry *entry) +{ + entry->key = key; + /* check hash_enrty exist or not before push */ + qdf_ptr_hash_dup_check_in_bucket(__qdf_ptr_hash_get_bucket(ht, key), + entry); + qdf_slist_push(&__qdf_ptr_hash_get_bucket(ht, key)->list, entry, node); +} + +/** + * qdf_ptr_hash_remove() - remove an entry from a qdf_ptr_hash + * @ht: the qdf_ptr_hash to remove from + * @key: the pointer to use as a lookup key + * @cursor: a pointer to a type that contains a qdf_ptr_hash_entry + * @entry_field: C identifier for the qdf_ptr_hash_entry field in @cursor + * + * Return: removed item of type @cursor on success, NULL otherwise + */ +#define qdf_ptr_hash_remove(ht, key, cursor, entry_field) ({ \ + struct qdf_ptr_hash_entry *_e = \ + __qdf_ptr_hash_remove(ht, (uintptr_t)key); \ + cursor = _e ? qdf_container_of(_e, typeof(*(cursor)), \ + entry_field) : NULL; \ + cursor; }) + +static inline struct qdf_ptr_hash_entry * +__qdf_ptr_hash_remove(struct qdf_ptr_hash *ht, uintptr_t key) +{ + struct qdf_ptr_hash_bucket *bucket = __qdf_ptr_hash_get_bucket(ht, key); + struct qdf_ptr_hash_entry *prev; + struct qdf_ptr_hash_entry *entry; + + qdf_slist_for_each_del(&bucket->list, prev, entry, node) { + if (entry->key == key) { + qdf_slist_remove(&bucket->list, prev, node); + /* check hash_enrty exist or not after remove */ + qdf_ptr_hash_dup_check_in_bucket(bucket, entry); + entry->key = 0; + return entry; + } + } + + return NULL; +} + +#define __qdf_ptr_hash_for_each_in_bucket(bucket, cursor, entry_field) \ + qdf_slist_for_each(&(bucket)->list, cursor, entry_field.node) + +/** + * qdf_ptr_hash_for_each() - qdf_ptr_hash item iterator for all items + * @ht: the qdf_ptr_hash to iterate over + * @bucket: qdf_ptr_hash_bucket cursor pointer + * @cursor: a pointer to a type that contains a qdf_ptr_hash_entry + * @entry_field: C identifier for the qdf_ptr_hash_entry field in @cursor + */ +#define qdf_ptr_hash_for_each(ht, bucket, cursor, entry_field) \ + __qdf_ptr_hash_for_each_bucket(ht, bucket) \ + __qdf_ptr_hash_for_each_in_bucket(bucket, cursor, entry_field) + +/** + * qdf_ptr_hash_for_each_by_hash() - qdf_ptr_hash item iterator for items which + * hash to the same value as @key + * @ht: the qdf_ptr_hash to iterate over + * @key: the pointer to use as a lookup key + * @cursor: a pointer to a type that contains a qdf_ptr_hash_entry + * @entry_field: C identifier for the qdf_ptr_hash_entry field in @cursor + */ +#define qdf_ptr_hash_for_each_by_hash(ht, key, cursor, entry_field) \ + __qdf_ptr_hash_for_each_in_bucket( \ + __qdf_ptr_hash_get_bucket(ht, (uintptr_t)key), \ + cursor, entry_field) + +/** + * qdf_ptr_hash_for_each_by_key() - qdf_ptr_hash item iterator for items whose + * keys equal @key + * @ht: the qdf_ptr_hash to iterate over + * @key: the pointer to use as a lookup key + * @cursor: a pointer to a type that contains a qdf_ptr_hash_entry + * @entry_field: C identifier for the qdf_ptr_hash_entry field in @cursor + */ +#define qdf_ptr_hash_for_each_by_key(ht, _key, cursor, entry_field) \ + qdf_ptr_hash_for_each_by_hash(ht, _key, cursor, entry_field) \ + if ((cursor)->entry_field.key == (uintptr_t)_key) + +/** + * qdf_ptr_hash_get() - get the first item whose key matches @key + * @ht: the qdf_ptr_hash to look in + * @key: the pointer to use as a lookup key + * @cursor: a pointer to a type that contains a qdf_ptr_hash_entry + * @entry_field: C identifier for the qdf_ptr_hash_entry field in @cursor + * + * Return: first item matching @key of type @cursor on success, NULL otherwise + */ +#define qdf_ptr_hash_get(ht, key, cursor, entry_field) ({ \ + cursor = NULL; \ + qdf_ptr_hash_for_each_by_key(ht, key, cursor, entry_field) \ + break; \ + cursor; }) + +#endif /* __QDF_PTR_HASH_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_slist.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_slist.h new file mode 100644 index 0000000000000000000000000000000000000000..b56f829f9b5ca85a4afae574ea7b1b798f730b37 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_slist.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_slist.h + * + * A minimal, singly linked list implementation, with push front, pop front, and + * remove capabilities. These are all O(1) operations. + * + * In order to remove an item, a pointer to the previous item must be known. + * Thus, removing an item is most efficient when combined with + * qdf_slist_for_each_del(). For cases where you need efficient removal of an + * arbitrary list node without iteration, consider using the doubly linked list + * qdf_list instead. + */ + +#ifndef __QDF_SLIST_H +#define __QDF_SLIST_H + +#include "qdf_trace.h" +#include "qdf_util.h" + +#define __qdf_slist_poison ((void *)0xdeaddeaddeaddeadull) + +/** + * struct qdf_slist - a singly linked list + * @head: pointer to the head of the list + */ +struct qdf_slist { + struct qdf_slist_node *head; +}; + +/** + * struct qdf_slist_node - a singly linked list node + * @next: pointer to the next node in the list, NULL if there is none + */ +struct qdf_slist_node { + struct qdf_slist_node *next; +}; + +#define __qdf_slist_item(node, cursor, node_field) ({ \ + struct qdf_slist_node *__n = (node); \ + (__n ? qdf_container_of(__n, typeof(*(cursor)), node_field) : NULL); }) + +#define __qdf_slist_next_item(slist, cursor, node_field) \ + __qdf_slist_item(cursor ? (cursor)->node_field.next : \ + (slist)->head, cursor, node_field) + +/** + * qdf_slist_for_each - iterate over all of the items in @slist + * @slist: pointer to the qdf_slist to iterate over + * @cursor: cursor pointer of the list's item type, populated for each item + * @node_field: name of the qdf_slist_node field in the item's type + */ +#define qdf_slist_for_each(slist, cursor, node_field) \ + for (cursor = __qdf_slist_item((slist)->head, cursor, node_field); \ + cursor; \ + cursor = __qdf_slist_item((cursor)->node_field.next, \ + cursor, node_field)) + +/** + * qdf_slist_for_each_del - iterate over all of the items in @slist, + * allowing for the safe deletion of nodes during iteration + * @slist: pointer to the qdf_slist to iterate over + * @prev: cursor pointer, populated with the previous item + * @cursor: cursor pointer of the list's item type, populated for each item + * @node_field: name of the qdf_slist_node field in the item's type + */ +#define qdf_slist_for_each_del(slist, prev, cursor, node_field) \ + for (prev = NULL, \ + cursor = __qdf_slist_item((slist)->head, cursor, node_field); \ + cursor; \ + prev = __qdf_slist_next_item(slist, prev, node_field) == \ + cursor ? cursor : prev, \ + cursor = __qdf_slist_next_item(slist, prev, node_field)) + +/** + * qdf_slist_init() - initialize a qdf_slist + * @slist: the list to initialize + * + * Return: None + */ +static inline void qdf_slist_init(struct qdf_slist *slist) +{ + slist->head = NULL; +} + +/** + * qdf_slist_deinit() - deinitialize a qdf_slist + * @slist: the list to deinitialize + * + * Return: None + */ +static inline void qdf_slist_deinit(struct qdf_slist *slist) +{ + QDF_BUG(!slist->head); + slist->head = __qdf_slist_poison; +} + +/** + * qdf_slist_empty() - check if a qdf_slist is empty + * @slist: the list to check + * + * Return: true if @slist contains zero items + */ +static inline bool qdf_slist_empty(struct qdf_slist *slist) +{ + return !slist->head; +} + +/** + * qdf_slist_push() - push an item into the front of a qdf_slist + * @slist: the list to push into + * @cursor: the item to push + * @node_field: name of the qdf_slist_node field in the item's type + * + * Return: None + */ +#define qdf_slist_push(slist, cursor, node_field) \ + __qdf_slist_push(slist, &(cursor)->node_field) + +static inline void +__qdf_slist_push(struct qdf_slist *slist, struct qdf_slist_node *node) +{ + node->next = slist->head; + slist->head = node; +} + +/** + * qdf_slist_pop() - pop an item from the front of a qdf_slist + * @slist: the list to pop from + * @cursor: cursor pointer of the list's item type, not populated + * @node_field: name of the qdf_slist_node field in the item's type + * + * Return: pointer to the popped item, NULL if @slist was empty + */ +#define qdf_slist_pop(slist, cursor, node_field) \ + __qdf_slist_item(__qdf_slist_pop(slist), cursor, node_field) + +static inline struct qdf_slist_node *__qdf_slist_pop(struct qdf_slist *slist) +{ + struct qdf_slist_node *node = slist->head; + + if (!node) + return NULL; + + slist->head = node->next; + node->next = __qdf_slist_poison; + + return node; +} + +/** + * qdf_slist_remove() - remove an item from a qdf_slist + * @slist: the list to remove from + * @prev: pointer to the item previous to the item to remove, NULL removes head + * @node_field: name of the qdf_slist_node field in the item's type + * + * Return: pointer to the removed item, NULL if none was removed + */ +#define qdf_slist_remove(slist, prev, node_field) \ + __qdf_slist_item(__qdf_slist_remove(slist, \ + prev ? &(prev)->node_field : NULL), prev, node_field) + +static inline struct qdf_slist_node * +__qdf_slist_remove(struct qdf_slist *slist, struct qdf_slist_node *prev) +{ + struct qdf_slist_node *node; + + if (!prev) + return __qdf_slist_pop(slist); + + if (!prev->next) + return NULL; + + node = prev->next; + prev->next = node->next; + node->next = __qdf_slist_poison; + + return node; +} + +#endif /* __QDF_SLIST_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_status.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_status.h new file mode 100644 index 0000000000000000000000000000000000000000..95850ffca9dbfa788cad93a49dcb5185c38a2a0c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_status.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_status + * QCA driver framework (QDF) status codes + * Basic status codes/definitions used by QDF + */ + +#ifndef __QDF_STATUS_H +#define __QDF_STATUS_H + +/** + * typedef QDF_STATUS - QDF error codes + * @QDF_STATUS_SUCCESS: success + * @QDF_STATUS_E_RESOURCES: system resource(other than memory) not available + * @QDF_STATUS_E_NOMEM: not enough memory + * @QDF_STATUS_E_AGAIN: try again + * @QDF_STATUS_E_INVAL: invalid request + * @QDF_STATUS_E_FAULT: system fault + * @QDF_STATUS_E_ALREADY: another request already in progress + * @QDF_STATUS_E_BADMSG: bad message + * @QDF_STATUS_E_BUSY: device or resource busy + * @QDF_STATUS_E_CANCELED: request cancelled + * @QDF_STATUS_E_ABORTED: request aborted + * @QDF_STATUS_E_NOSUPPORT: request not supported + * @QDF_STATUS_E_PERM: operation not permitted + * @QDF_STATUS_E_EMPTY: empty condition + * @QDF_STATUS_E_EXISTS: existence failure + * @QDF_STATUS_E_TIMEOUT: operation timeout + * @QDF_STATUS_E_FAILURE: unknown reason do not use unless nothign else applies + * @QDF_STATUS_E_NOENT: No such file or directory + * @QDF_STATUS_E_E2BIG: Arg list too long + * @QDF_STATUS_E_NOSPC: no space left on device + * @QDF_STATUS_E_ADDRNOTAVAIL: Cannot assign requested address + * @QDF_STATUS_E_ENXIO: No such device or address + * @QDF_STATUS_E_NETDOWN: network is down + * @QDF_STATUS_E_IO: I/O Error + * @QDF_STATUS_E_PENDING: pending status + * @QDF_STATUS_E_NETRESET: Network dropped connection because of reset + * @QDF_STATUS_E_SIG: Exit due to received SIGINT + * @QDF_STATUS_E_PROTO: protocol error + * @QDF_STATUS_NOT_INITIALIZED: resource not initialized + * @QDF_STATUS_E_NULL_VALUE: request is null + * @QDF_STATUS_PMC_PENDING: request pendign in pmc + * @QDF_STATUS_PMC_DISABLED: pmc is disabled + * @QDF_STATUS_PMC_NOT_NOW: pmc not ready now + * @QDF_STATUS_PMC_AC_POWER: pmc ac power + * @QDF_STATUS_PMC_SYS_ERROR: pmc system error + * @QDF_STATUS_HEARTBEAT_TMOUT: hearbeat timeout error + * @QDF_STATUS_NTH_BEACON_DELIVERY: Nth beacon delivery + * @QDF_STATUS_CSR_WRONG_STATE: csr in wrong state + * @QDF_STATUS_FT_PREAUTH_KEY_SUCCESS: ft preauth key success + * @QDF_STATUS_FT_PREAUTH_KEY_FAILED: ft preauth key failed + * @QDF_STATUS_CMD_NOT_QUEUED: command not queued + * @QDF_STATUS_FW_MSG_TIMEDOUT: target message timeout + * @QDF_STATUS_E_USB_ERROR: USB transaction error + * @QDF_STATUS_MAXCOMP_FAIL: Component id is more than MAX UMAC components + * @QDF_STATUS_COMP_DISABLED: UMAC Component is disabled + * @QDF_STATUS_COMP_ASYNC: UMAC component runs in asynchronous communication + * @QDF_STATUS_CRYPTO_PN_ERROR: PN ERROR in received frame + * @QDF_STATUS_CRYPTO_MIC_FAILURE: MIC failure in received frame + * @QDF_STATUS_CRYPTO_ENCRYPT_FAILED: encryption failed + * @QDF_STATUS_CRYPTO_DECRYPT_FAILED: decryption failed + * @QDF_STATUS_E_RANGE: result/parameter/operation was out of range + * @QDF_STATUS_E_GRO_DROP: return code for GRO drop + * @QDF_STATUS_MAX: not a realy value just a place holder for max + */ +typedef enum { + QDF_STATUS_SUCCESS, + QDF_STATUS_E_RESOURCES, + QDF_STATUS_E_NOMEM, + QDF_STATUS_E_AGAIN, + QDF_STATUS_E_INVAL, + QDF_STATUS_E_FAULT, + QDF_STATUS_E_ALREADY, + QDF_STATUS_E_BADMSG, + QDF_STATUS_E_BUSY, + QDF_STATUS_E_CANCELED, + QDF_STATUS_E_ABORTED, + QDF_STATUS_E_NOSUPPORT, + QDF_STATUS_E_PERM, + QDF_STATUS_E_EMPTY, + QDF_STATUS_E_EXISTS, + QDF_STATUS_E_TIMEOUT, + QDF_STATUS_E_FAILURE, + QDF_STATUS_E_NOENT, + QDF_STATUS_E_E2BIG, + QDF_STATUS_E_NOSPC, + QDF_STATUS_E_ADDRNOTAVAIL, + QDF_STATUS_E_ENXIO, + QDF_STATUS_E_NETDOWN, + QDF_STATUS_E_IO, + QDF_STATUS_E_PENDING, + QDF_STATUS_E_NETRESET, + QDF_STATUS_E_SIG, + QDF_STATUS_E_PROTO, + QDF_STATUS_NOT_INITIALIZED, + QDF_STATUS_E_NULL_VALUE, + QDF_STATUS_PMC_PENDING, + QDF_STATUS_PMC_DISABLED, + QDF_STATUS_PMC_NOT_NOW, + QDF_STATUS_PMC_AC_POWER, + QDF_STATUS_PMC_SYS_ERROR, + QDF_STATUS_HEARTBEAT_TMOUT, + QDF_STATUS_NTH_BEACON_DELIVERY, + QDF_STATUS_CSR_WRONG_STATE, + QDF_STATUS_FT_PREAUTH_KEY_SUCCESS, + QDF_STATUS_FT_PREAUTH_KEY_FAILED, + QDF_STATUS_CMD_NOT_QUEUED, + QDF_STATUS_FW_MSG_TIMEDOUT, + QDF_STATUS_E_USB_ERROR, + QDF_STATUS_MAXCOMP_FAIL, + QDF_STATUS_COMP_DISABLED, + QDF_STATUS_COMP_ASYNC, + QDF_STATUS_CRYPTO_PN_ERROR, + QDF_STATUS_CRYPTO_MIC_FAILURE, + QDF_STATUS_CRYPTO_ENCRYPT_FAILED, + QDF_STATUS_CRYPTO_DECRYPT_FAILED, + QDF_STATUS_E_DEFRAG_ERROR, + QDF_STATUS_E_RANGE, + QDF_STATUS_E_GRO_DROP, + QDF_STATUS_MAX +} QDF_STATUS; + +#define QDF_IS_STATUS_SUCCESS(status) (QDF_STATUS_SUCCESS == (status)) +#define QDF_IS_STATUS_ERROR(status) (QDF_STATUS_SUCCESS != (status)) + +/** + * qdf_status_to_os_return() - map a QDF_STATUS into an OS specific return code + * @status: QDF_STATUS to map + * + * Return: an OS specific error code + */ +int qdf_status_to_os_return(QDF_STATUS status); + +/** + * qdf_status_from_os_return() - map an OS specific return code to a QDF_STATUS + * @rc: the input return code to map + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_status_from_os_return(int rc); + +#endif /* __QDF_STATUS_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_str.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_str.h new file mode 100644 index 0000000000000000000000000000000000000000..5c04baa5bd485902b36502c305194fbbcdba35b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_str.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_str + * QCA driver framework (QDF) string APIs. + */ + +#ifndef __QDF_STR_H +#define __QDF_STR_H + +#include "i_qdf_str.h" +#include "qdf_types.h" + +/** + * qdf_is_space() - check if @c is a whitespace character + * @c: the character to check + * + * Whitespace characters include HT, LF, VT, FF, CR, space, and nbsp + * + * Return: true if @ is a whitespace character + */ +static inline bool qdf_is_space(char c) +{ + return __qdf_is_space(c); +} + +/** + * qdf_str_cmp - Compare two strings + * @str1: First string + * @str2: Second string + * Return: + * 0 - strings are equal + * <0 - str1 sorts lexicographically before str2 + * >0 - str1 sorts lexicographically after str2 + */ +static inline int32_t qdf_str_cmp(const char *str1, const char *str2) +{ + return __qdf_str_cmp(str1, str2); +} + +/** + * qdf_str_dup() - duplicate null-terminated string @src + * @dest: double pointer to be populated + * @src: the null-terminated string to be duplicated + * + * @dest must be freed using qdf_mem_free() to avoid memory leaks. + * + * Return: QDF_STATUS; @dest set to NULL on failure, a valid address on success + */ +QDF_STATUS qdf_str_dup(char **dest, const char *src); + +/** + * qdf_str_eq - compare two null-terminated strings for equality + * @left: the string left of the equality + * @right: the string right of the equality + * + * This is a thin wrapper over `if (strcmp(left, right) == 0)` for clarity. + * + * Return: true if strings are equal + */ +static inline bool qdf_str_eq(const char *left, const char *right) +{ + return qdf_str_cmp(left, right) == 0; +} + +/** + * qdf_str_lcopy - Bounded copy from one string to another + * @dest: destination string + * @src: source string + * @dest_size: max number of bytes to copy (incl. null terminator) + * + * If the return value is >= @dest_size, @dest has been truncated. + * + * Return: length of @src + */ +static inline qdf_size_t +qdf_str_lcopy(char *dest, const char *src, uint32_t dest_size) +{ + return __qdf_str_lcopy(dest, src, dest_size); +} + +/** + * qdf_str_left_trim() - Trim any leading whitespace from @str + * @str: the string to trim + * + * Return: A pointer to the first non-space character in @str + */ +static inline const char *qdf_str_left_trim(const char *str) +{ + return __qdf_str_left_trim(str); +} + +/** + * qdf_str_len() - returns the length of a null-terminated string + * @str: input string + * + * Return: length of @str (without null terminator) + */ +static inline qdf_size_t qdf_str_len(const char *str) +{ + return __qdf_str_len(str); +} + +/** + * qdf_str_right_trim() - Trim any trailing whitespace from @str + * @str: the string to trim + * + * Note: The first trailing whitespace character is replaced with a + * null-terminator + * + * Return: None + */ +void qdf_str_right_trim(char *str); + +/** + * qdf_str_trim() - Trim any leading/trailing whitespace from @str + * @str: the string to trim + * + * Note: The first trailing whitespace character is replaced with a + * null-terminator + * + * Return: A pointer to the first non-space character in @str + */ +static inline char *qdf_str_trim(char *str) +{ + return __qdf_str_trim(str); +} + +/** + * qdf_str_nlen() - Get string length up to @limit characters + * @str: the string to get the length of + * @limit: the maximum number of characters to check + * + * Return: the less of @limit or the length of @str (without null terminator) + */ +static inline qdf_size_t qdf_str_nlen(const char *str, qdf_size_t limit) +{ + return __qdf_str_nlen(str, limit); +} + +/** + * qdf_str_ncmp - Compare two strings + * @str1: First string + * @str2: Second string + * @limit: the maximum number of characters to check + * Return: + * 0 - strings are equal + * <0 - str1 sorts lexicographically before str2 + * >0 - str1 sorts lexicographically after str2 + */ +static inline int32_t +qdf_str_ncmp(const char *str1, const char *str2, qdf_size_t limit) +{ + return __qdf_str_ncmp(str1, str2, limit); +} + +#endif /* __QDF_STR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_streamfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_streamfs.h new file mode 100644 index 0000000000000000000000000000000000000000..f6449cbe4745c898ed4d8ede27d17c373ef1d5f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_streamfs.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_streamfs.h + * This file provides OS abstraction for stream filesystem APIs. + */ + +#ifndef _QDF_STREAMFS_H +#define _QDF_STREAMFS_H + +#include +#include +#include + +typedef __qdf_streamfs_chan_t qdf_streamfs_chan_t; +typedef __qdf_streamfs_chan_buf_t qdf_streamfs_chan_buf_t; + +#ifdef WLAN_STREAMFS +/** + * qdf_streamfs_create_dir() - wrapper to create a debugfs directory + * @name: name of the new directory + * @parent: parent node. If NULL, defaults to base qdf_debugfs_root + * + * Return: dentry structure pointer in case of success, otherwise NULL. + * + */ +static inline qdf_dentry_t qdf_streamfs_create_dir( + const char *name, qdf_dentry_t parent) +{ + return qdf_debugfs_create_dir(name, parent); +} + +/** + * qdf_streamfs_remove_file() - wrapper to remove streamfs file + * @d: streamfs node + * + */ +static inline void qdf_streamfs_remove_file(qdf_dentry_t d) +{ + qdf_debugfs_remove_file(d); +} + +/** + * qdf_debugfs_remove_dir_recursive() - wrapper to remove directory recursively + * @d: debugfs node + * + * This function will recursively remove a directory in streamfs that was + * previously created with a call to qdf_debugfs_create_file() or it's + * variant functions. + */ +static inline void qdf_streamfs_remove_dir_recursive(qdf_dentry_t d) +{ + qdf_debugfs_remove_dir_recursive(d); +} + +/** + * qdf_streamfs_create_file() - Create streamfs chan buffer file + * @name: base name of file to create + * @mode: filemode + * @parent: dentry of parent directory, NULL for root directory + * @buf: pointer to chan buffer + * + * Returns file dentry pointer if successful, NULL otherwise. + */ +qdf_dentry_t qdf_streamfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + qdf_streamfs_chan_buf_t buf); + +/** + * qdf_streamfs_open() - Create streamfs channel for data trasfer + * @base_filename: base name of files to create, %NULL for buffering only + * @parent: dentry of parent directory, %NULL for root directory + * @subbuf_size: size of sub-buffers + * @n_subbufs: number of sub-buffers + * @private_data: user-defined data + * + * Returns channel pointer if successful, %NULL otherwise. + */ +qdf_streamfs_chan_t qdf_streamfs_open(const char *base_filename, + qdf_dentry_t parent, + size_t subbuf_size, size_t n_subbufs, + void *private_data); + +/** + * qdf_streamfs_close() - Closes all channel buffers and frees the channel. + * @chan: pointer to qdf_streamfs_chan. + * + * Returns NONE + */ +void qdf_streamfs_close(qdf_streamfs_chan_t chan); + +/** + * qdf_streamfs_flush() - Flushes all channel buffers. + * @chan: pointer to qdf_streamfs_chan. + * + * Returns NONE + */ +void qdf_streamfs_flush(qdf_streamfs_chan_t chan); + +/** + * qdf_streamfs_reset() - Reset streamfs channel + * @chan: pointer to qdf_streamfs_chan. + * + * This erases data from all channel buffers and restarting the channel + * in its initial state. The buffers are not freed, so any mappings are + * still in effect. + * + * Returns NONE + */ +void qdf_streamfs_reset(qdf_streamfs_chan_t chan); + +/** + * qdf_streamfs_subbufs_consumed() - update the buffer's sub-buffers-consumed + * count + * @chan: pointer to qdf_streamfs_chan. + * @cpu: the cpu associated with the channel buffer to update + * @subbufs_consumed: number of sub-buffers to add to current buf's count + * + * Returns NONE + */ +void qdf_streamfs_subbufs_consumed(qdf_streamfs_chan_t chan, + unsigned int cpu, + size_t consumed); + +/** + * qdf_streamfs_write() - write data into the channel + * @chan: relay channel + * @data: data to be written + * @length: number of bytes to write + * + * Writes data into the current cpu's channel buffer. + */ +void qdf_streamfs_write(qdf_streamfs_chan_t chan, const void *data, + size_t length); +#else +static inline qdf_dentry_t qdf_streamfs_create_dir( + const char *name, qdf_dentry_t parent) +{ + return NULL; +} + +static inline void qdf_streamfs_remove_file(qdf_dentry_t d) +{ +} + +static inline void qdf_streamfs_remove_dir_recursive(qdf_dentry_t d) +{ +} + +static inline +qdf_dentry_t qdf_streamfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + qdf_streamfs_chan_buf_t buf) +{ + return NULL; +} + +static inline +qdf_streamfs_chan_t qdf_streamfs_open(const char *base_filename, + qdf_dentry_t parent, + size_t subbuf_size, size_t n_subbufs, + void *private_data) +{ + return NULL; +} + +static inline void qdf_streamfs_close(qdf_streamfs_chan_t chan) +{ +} + +static inline void qdf_streamfs_flush(qdf_streamfs_chan_t chan) +{ +} + +static inline void qdf_streamfs_reset(qdf_streamfs_chan_t chan) +{ +} + +static inline void +qdf_streamfs_subbufs_consumed(qdf_streamfs_chan_t chan, + unsigned int cpu, size_t consumed) +{ +} + +static inline void +qdf_streamfs_write(qdf_streamfs_chan_t chan, const void *data, + size_t length) +{ +} +#endif /* WLAN_STREAMFS */ +#endif /* _QDF_STREAMFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_talloc.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_talloc.h new file mode 100644 index 0000000000000000000000000000000000000000..003c3f354a810db1ec690aa49472c44ca7eb91a7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_talloc.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_talloc.h - Public APIs for t(ree) alloc(ate) memory management + * + * These APIs allocate memory like malloc, but track those allocations via a + * parent-child relationship, or tree. If the parent is freed while it still has + * children, a panic will be triggered. This effectively gives you the ability + * to limit the lifetime of an allocation by ensuring the child allocation + * lifetime will be strictly less than the parent allocation lifetime. + */ + +#ifndef __QDF_TALLOC_H +#define __QDF_TALLOC_H + +#include "i_qdf_talloc.h" +#include "qdf_status.h" + +/** + * qdf_talloc() - t(ree) alloc(ate) memory + * @parent: the parent memory of the new allocation + * @size: requested size of the newly allocated memory + * + * Return: pointer to the newly allocated memory + */ +#define qdf_talloc(parent, size) \ + qdf_talloc_fl(parent, size, __func__, __LINE__) + +/** + * qdf_talloc_type() - t(ree) alloc(ate) memory for a type + * @parent: the parent memory of the new allocation + * @cursor: pointer to the type of memory to allocate + * + * This API automatically determines the correct size needed or an allocation + * based on the type of @cursor. If you need to allocate an arbitrary number + * of bytes, use qdf_talloc() instead. + * + * Return: pointer to the newly allocated memory + */ +#define qdf_talloc_type(parent, cursor) \ + qdf_talloc(parent, sizeof(*(cursor))) + +/** + * qdf_talloc_fl() - t(ree) alloc(ate) memory with function and line info + * @parent: the parent memory of the new allocation + * @size: requested size of the newly allocated memory + * @func: name of the function requesting the allocation + * @line: line number of the call site in @func + * + * Return: pointer to the newly allocated memory + */ +#define qdf_talloc_fl(parent, size, func, line) \ + __qdf_talloc_fl(parent, size, func, line) + +/** + * qdf_tfree() - free memory allocated using the *_talloc() function family + * @inout_ptr: double point to memory to free, set to NULL + * + * Return: None + */ +#define qdf_tfree(ptr) \ + qdf_tfree_fl(ptr, __func__, __LINE__) + +/** + * qdf_tfree_fl() - free memory allocated using the *_talloc() function family + * with function and line info + * @ptr: pointer to memory to free + * @func: name of the function requesting the free + * @line: line number of the call site in @func + * + * Return: None + */ +#define qdf_tfree_fl(ptr, func, line) \ +do { \ + __qdf_tfree_fl(ptr, func, line); \ + ptr = (void *)1; \ +} while (false) + +/** + * qdf_talloc_assert_no_children() - assert @parent has not child allocations + * @parent: the parent memory ponter to check + * + * Return: None + */ +#define qdf_talloc_assert_no_children(parent) \ + qdf_talloc_assert_no_children_fl(parent, __func__, __LINE__) + +#ifdef WLAN_TALLOC_DEBUG + +/** + * qdf_talloc_feature_init() - initialize the QDF talloc feature + * + * Must be called before allocating memory via a qdf_talloc API. + * + * Return: None + */ +QDF_STATUS qdf_talloc_feature_init(void); + +/** + * qdf_talloc_feature_deinit() - deinitialize the QDF talloc feature + * + * Memory must not be allocated via a qdf_talloc API after this is called. This + * API asserts that the parent/child relationship table is empty in order to + * catch memory leaks. + * + * Return: None + */ +void qdf_talloc_feature_deinit(void); + +void *__qdf_talloc_fl(const void *parent, const size_t size, + const char *func, const uint16_t line); + +void __qdf_tfree_fl(void *ptr, const char *func, const uint16_t line); + +/** + * qdf_talloc_assert_no_children_fl() - assert @parent has not child allocations + * @parent: the parent memory ponter to check + * @func: name of the function requesting the assert + * @line: line number of the call site in @func + * + * Return: None + */ +void qdf_talloc_assert_no_children_fl(const void *parent, + const char *func, const uint16_t line); + +#else /* WLAN_TALLOC_DEBUG */ + +static inline QDF_STATUS qdf_talloc_feature_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void qdf_talloc_feature_deinit(void) { } + +static inline void *__qdf_talloc_fl(const void *parent, const size_t size, + const char *func, const uint16_t line) +{ + return __zalloc_auto(size); +} + +static inline void +__qdf_tfree_fl(void *ptr, const char *func, const uint16_t line) +{ + __free(ptr); +} + +static inline void +qdf_talloc_assert_no_children_fl(const void *parent, + const char *func, const uint16_t line) { } + +#endif /* WLAN_TALLOC_DEBUG */ + +#endif /* __QDF_TALLOC_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_threads.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_threads.h new file mode 100644 index 0000000000000000000000000000000000000000..17c8fe467462ff6c4f7ce512dce490d704be5b84 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_threads.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_threads + * QCA driver framework (QDF) thread related APIs + */ + +#if !defined(__QDF_THREADS_H) +#define __QDF_THREADS_H + +#include +#include "i_qdf_threads.h" + +typedef __qdf_thread_t qdf_thread_t; +typedef QDF_STATUS (*qdf_thread_func)(void *context); + +/* Function declarations and documenation */ + +void qdf_sleep(uint32_t ms_interval); + +void qdf_sleep_us(uint32_t us_interval); + +void qdf_busy_wait(uint32_t us_interval); + +/** + * qdf_set_wake_up_idle() - set wakeup idle value + * @idle: true/false value for wake up idle + * + * Return: none + */ +void qdf_set_wake_up_idle(bool idle); + +/** + * qdf_set_user_nice() - set thread's nice value + * @thread: pointer to thread + * @nice: nice value + * + * Return: void + */ +void qdf_set_user_nice(qdf_thread_t *thread, long nice); + +/** + * qdf_create_thread() - create a kernel thread + * @thread: pointer to thread + * @nice: nice value + * + * Return: pointer to created kernel thread on success else NULL + */ +qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data, + const char thread_name[]); + +/** + * qdf_thread_run() - run the given function in a new thread + * + * You must call qdf_thread_join() to avoid a reasource leak! + * + * For more flexibility, use qdf_create_thread() instead. + * + * Return: a new qdf_thread pointer + */ +qdf_thread_t *qdf_thread_run(qdf_thread_func callback, void *context); + +/** + * qdf_thread_join() - signal and wait for a thread to stop + * + * This sets a flag that the given thread can check to see if it should exit. + * The thread can check to see if this flag has been set by calling + * qdf_thread_should_stop(). + * + * Return: QDF_STATUS - the return value from the thread function + */ +QDF_STATUS qdf_thread_join(qdf_thread_t *thread); + +/** + * qdf_thread_should_stop() - true if the current thread was signalled to stop + * + * If qdf_thread_join() has been called on the current thread, this API returns + * true. Otherwise, this returns false. + * + * Return: true if the current thread should stop + */ +bool qdf_thread_should_stop(void); + +/** + * qdf_wake_up_process() - wake up given thread + * @thread: pointer to thread which needs to be woken up + * + * Return: none + */ +int qdf_wake_up_process(qdf_thread_t *thread); + +/** + * qdf_print_stack_trace_thread() - prints the stack trace of the given thread + * @thread: the thread for which the stack trace will be printed + * + * Return: None + */ +void qdf_print_thread_trace(qdf_thread_t *thread); + +/** + * qdf_get_current_task() - get current task struct + * + * Return: pointer to task struct + */ +qdf_thread_t *qdf_get_current_task(void); + +/** + * qdf_get_current_pid() - get current task's process id + * + * Return: current task's process id (int) + */ +int qdf_get_current_pid(void); + +/** + * qdf_get_current_comm() - get current task's command name + * + * Return: current task's command name(char *) + */ +const char *qdf_get_current_comm(void); + +/** + * qdf_thread_set_cpus_allowed_mask() - set cpu mask for a particular thread + * @thread: thread for which new cpu mask is set + * @new_mask: new cpu mask to be set for the thread + * + * Return: None + */ +void +qdf_thread_set_cpus_allowed_mask(qdf_thread_t *thread, qdf_cpu_mask *new_mask); + +/** + * qdf_cpumask_clear() - clear all cpus in a cpumask + * @dstp: cpumask pointer + * + * Return: None + */ +void qdf_cpumask_clear(qdf_cpu_mask *dstp); + +/** + * qdf_cpumask_set_cpu() - set a cpu in a cpumask + * @cpu: cpu number + * @dstp: cpumask pointer + * + * Return: None + */ +void qdf_cpumask_set_cpu(unsigned int cpu, qdf_cpu_mask *dstp); + +/** + * qdf_cpumask_setall - set all cpus + * @dstp: cpumask pointer + * + * Return: None + */ +void qdf_cpumask_setall(qdf_cpu_mask *dstp); + +/** + * qdf_cpumask_empty - Check if cpu_mask is empty + * @srcp: cpumask pointer + * + * Return: true or false + * + */ +bool qdf_cpumask_empty(const struct cpumask *srcp); + +/** + * qdf_cpumask_copy - Copy srcp cpumask to dstp + * @srcp: source cpumask pointer + * @dstp: destination cpumask pointer + * + * Return: None + * + */ +void qdf_cpumask_copy(struct cpumask *dstp, + const struct cpumask *srcp); +#endif /* __QDF_THREADS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_time.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_time.h new file mode 100644 index 0000000000000000000000000000000000000000..575e851bd41061da2d2fcf004c036e061af0eb65 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_time.h @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_time + * This file abstracts time related functionality. + */ + +#ifndef _QDF_OS_TIME_H +#define _QDF_OS_TIME_H + +#include + +typedef __qdf_time_t qdf_time_t; +typedef __qdf_ktime_t qdf_ktime_t; + +/** + * qdf_ns_to_ktime - Converts nanoseconds to a qdf_ktime_t object + * @ns: time in nanoseconds + * + * Return: nanoseconds as qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ns_to_ktime(uint64_t ns) +{ + return __qdf_ns_to_ktime(ns); +} + +/** + * qdf_ktime_add - Adds two qdf_ktime_t objects and returns + * a qdf_ktime_t object + * @ktime1: time as qdf_ktime_t object + * @ktime2: time as qdf_ktime_t object + * + * Return: sum of both qdf_ktime_t as qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ktime_add(qdf_ktime_t ktime1, qdf_ktime_t ktime2) +{ + return __qdf_ktime_add(ktime1, ktime2); +} + +/** + * qdf_ktime_get - Gets the current time as qdf_ktime_t object + * + * Return: current time as qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ktime_get(void) +{ + return __qdf_ktime_get(); +} + +/** + * qdf_ktime_add_ns - Adds qdf_ktime_t object and nanoseconds value and + * returns the qdf_ktime_t object + * @ktime: time as qdf_ktime_t object + * @ns: time in nanoseconds + * + * Return: qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ktime_add_ns(qdf_ktime_t ktime, int64_t ns) +{ + return __qdf_ktime_add_ns(ktime, ns); +} + +/** + * qdf_ktime_to_ms - Convert the qdf_ktime_t object into milliseconds + * @ktime: time as qdf_ktime_t object + * + * Return: qdf_ktime_t in milliseconds + */ + +static inline int64_t qdf_ktime_to_ms(qdf_ktime_t ktime) +{ + return __qdf_ktime_to_ms(ktime); +} + +/** + * qdf_ktime_to_ns - Convert the qdf_ktime_t object into nanoseconds + * @ktime: time as qdf_ktime_t object + * + * Return: qdf_ktime_t in nanoseconds + */ + +static inline int64_t qdf_ktime_to_ns(qdf_ktime_t ktime) +{ + return __qdf_ktime_to_ns(ktime); +} + +/** + * qdf_system_ticks - Count the number of ticks elapsed from the time when + * the system booted + * + * Return: ticks + */ +static inline qdf_time_t qdf_system_ticks(void) +{ + return __qdf_system_ticks(); +} + +#define qdf_system_ticks_per_sec __qdf_system_ticks_per_sec + +/** + * qdf_system_ticks_to_msecs - convert ticks to milliseconds + * @clock_ticks: Number of ticks + * + * Return: unsigned int Time in milliseconds + */ +static inline uint32_t qdf_system_ticks_to_msecs(unsigned long clock_ticks) +{ + return __qdf_system_ticks_to_msecs(clock_ticks); +} + +/** + * qdf_system_msecs_to_ticks - convert milliseconds to ticks + * @msec: Time in milliseconds + * + * Return: unsigned long number of ticks + */ +static inline qdf_time_t qdf_system_msecs_to_ticks(uint32_t msecs) +{ + return __qdf_system_msecs_to_ticks(msecs); +} + +/** + * qdf_get_system_uptime - Return a monotonically increasing time + * This increments once per HZ ticks + * + * Return: qdf_time_t system up time in ticks + */ +static inline qdf_time_t qdf_get_system_uptime(void) +{ + return __qdf_get_system_uptime(); +} + +/** + * qdf_get_bootbased_boottime_ns() - Get the bootbased time in nanoseconds + * + * qdf_get_bootbased_boottime_ns() function returns the number of nanoseconds + * that have elapsed since the system was booted. It also includes the time when + * system was suspended. + * + * Return: + * The time since system booted in nanoseconds + */ + +static inline uint64_t qdf_get_bootbased_boottime_ns(void) +{ + return __qdf_get_bootbased_boottime_ns(); +} + +/** + * qdf_get_system_timestamp - Return current timestamp + * + * Return: unsigned long timestamp in ms. + */ +static inline unsigned long qdf_get_system_timestamp(void) +{ + return __qdf_get_system_timestamp(); +} + +/** + * qdf_udelay - delay in microseconds + * @usecs: Number of microseconds to delay + * + * Return: none + */ +static inline void qdf_udelay(int usecs) +{ + __qdf_udelay(usecs); +} + +/** + * qdf_mdelay - Delay in milliseconds. + * @msec: Number of milliseconds to delay + * + * Return: none + */ +static inline void qdf_mdelay(int msecs) +{ + __qdf_mdelay(msecs); +} + +/** + * qdf_system_time_after() - Check if a is later than b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a < b else false + */ +static inline bool qdf_system_time_after(qdf_time_t a, qdf_time_t b) +{ + return __qdf_system_time_after(a, b); +} + +/** + * qdf_system_time_before() - Check if a is before b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a is before b else false + */ +static inline bool qdf_system_time_before(qdf_time_t a, qdf_time_t b) +{ + return __qdf_system_time_before(a, b); +} + +/** + * qdf_system_time_after_eq() - Check if a atleast as recent as b, if not + * later + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a >= b else false + */ +static inline bool qdf_system_time_after_eq(qdf_time_t a, qdf_time_t b) +{ + return __qdf_system_time_after_eq(a, b); +} + +/** + * enum qdf_timestamp_unit - what unit the qdf timestamp is in + * @KERNEL_LOG: boottime time in uS (micro seconds) + * @QTIMER: QTIME in (1/19200)S + * + * This enum is used to distinguish which timer source is used. + */ +enum qdf_timestamp_unit { + KERNEL_LOG, + QTIMER, +}; + +#ifdef MSM_PLATFORM +#define QDF_LOG_TIMESTAMP_UNIT QTIMER +#define QDF_LOG_TIMESTAMP_CYCLES_PER_10_US 192 + +static inline uint64_t qdf_log_timestamp_to_usecs(uint64_t time) +{ + /* + * Try to preserve precision by multiplying by 10 first. + * If that would cause a wrap around, divide first instead. + */ + if (time * 10 < time) { + do_div(time, QDF_LOG_TIMESTAMP_CYCLES_PER_10_US); + return time * 10; + } + + time = time * 10; + do_div(time, QDF_LOG_TIMESTAMP_CYCLES_PER_10_US); + + return time; +} + +/** + * qdf_get_log_timestamp_lightweight - get time stamp for logging + * For adrastea this API returns QTIMER tick which is needed to synchronize + * host and fw log timestamps + * For ROME and other discrete solution this API returns system boot time stamp + * + * Return: + * QTIMER ticks(19.2MHz) for adrastea + * System tick for rome and other 3rd party platform solutions + */ +static inline uint64_t qdf_get_log_timestamp_lightweight(void) +{ + return __qdf_get_log_timestamp(); +} +#else +#define QDF_LOG_TIMESTAMP_UNIT KERNEL_LOG +#define QDF_LOG_TIMESTAMP_CYCLES_PER_10_US 10 + +static inline uint64_t qdf_log_timestamp_to_usecs(uint64_t time) +{ + /* timestamps are already in micro seconds */ + return time; +} + +static inline uint64_t qdf_get_log_timestamp_lightweight(void) +{ + uint64_t timestamp_us; + + /* explicitly change to uint64_t, otherwise it will assign + * uint32_t to timestamp_us, which lose high 32bits. + * on 64bit platform, it will only use low 32bits jiffies in + * jiffies_to_msecs. + * eg: HZ=250, it will overflow every (0xffff ffff<<2==0x3fff ffff) + * ticks. it is 1193 hours. + */ + timestamp_us = + (uint64_t)__qdf_system_ticks_to_msecs(qdf_system_ticks()) * 1000; + return timestamp_us; +} +#endif /* end of MSM_PLATFORM */ + +static inline void qdf_log_timestamp_to_secs(uint64_t time, uint64_t *secs, + uint64_t *usecs) +{ + *secs = qdf_log_timestamp_to_usecs(time); + *usecs = do_div(*secs, 1000000ul); +} + +static inline uint64_t qdf_usecs_to_log_timestamp(uint64_t usecs) +{ + return (usecs * QDF_LOG_TIMESTAMP_CYCLES_PER_10_US) / 10; +} + +/** + * qdf_get_log_timestamp - get time stamp for logging + * For adrastea this API returns QTIMER tick which is needed to synchronize + * host and fw log timestamps + * For ROME and other discrete solution this API returns system boot time stamp + * + * Return: + * QTIMER ticks(19.2MHz) for adrastea + * System tick for rome and other future discrete solutions + */ +static inline uint64_t qdf_get_log_timestamp(void) +{ + return __qdf_get_log_timestamp(); +} + +/** + * qdf_get_log_timestamp_usecs() - get time stamp for logging in microseconds + * + * Return: The current logging timestamp normalized to microsecond precision + */ +static inline uint64_t qdf_get_log_timestamp_usecs(void) +{ + return qdf_log_timestamp_to_usecs(qdf_get_log_timestamp()); +} + +/** + * qdf_get_monotonic_boottime - get monotonic kernel boot time + * This API is similar to qdf_get_system_boottime but it includes + * time spent in suspend. + * + * Return: Time in microseconds + */ +static inline uint64_t qdf_get_monotonic_boottime(void) +{ + return __qdf_get_monotonic_boottime(); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..83ec8d3bd18264ff689c5a235d6bb7011da13f32 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_timer.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2014-2016, 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_timer + * This file abstracts OS timers running in soft IRQ context. + */ + +#ifndef _QDF_TIMER_H +#define _QDF_TIMER_H + +#include +#include + +typedef struct __qdf_timer_t qdf_timer_t; + +/** + * qdf_timer_init() - initialize a timer + * @hdl: OS handle + * @timer: Timer object pointer + * @func: Timer function + * @arg: Argument of timer function + * @type: deferrable or non deferrable timer type + * + * Timer type QDF_TIMER_TYPE_SW means its a deferrable sw timer which will + * not cause CPU wake upon expiry + * Timer type QDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which + * will cause CPU wake up on expiry + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +qdf_timer_init(qdf_handle_t hdl, qdf_timer_t *timer, qdf_timer_func_t func, + void *arg, QDF_TIMER_TYPE type) +{ + return __qdf_timer_init(timer, func, arg, type); +} + +/** + * qdf_timer_start() - start a timer + * @timer: timer to start + * @msec: Expiration period in milliseconds + * + * If QDF timer multiplier is set, the timeout value may get scaled. + * + * Return: none + */ +static inline void qdf_timer_start(qdf_timer_t *timer, int msec) +{ + __qdf_timer_start(timer, msec); +} + +/** + * qdf_timer_mod() - modify the timeout on a timer + * @timer: timer to modify + * @msec: Expiration period in milliseconds + * + * If @timer is not active, it will be activated. + * + * If QDF timer multiplier is set, the timeout value may get scaled. + * + * Return: none + */ +static inline void qdf_timer_mod(qdf_timer_t *timer, int msec) +{ + __qdf_timer_mod(timer, msec); +} + +/** + * qdf_timer_stop() - cancel a timer + * @timer: timer to cancel + * + * Note! The timer callback may be executing when this function call returns. + * If you want to ensure that it is not, use qdf_timer_sync_cancel() instead. + * + * Return: true if @timer was deactivated, false if @timer was not active + */ +static inline bool qdf_timer_stop(qdf_timer_t *timer) +{ + return __qdf_timer_stop(timer); +} + +/** + * qdf_timer_sync_cancel - Cancel a timer synchronously + * @timer: timer to cancel + * + * If the timer callback is already running, this function blocks until it + * completes. + * + * Return: true if @timer was deactivated, false if @timer was not active + */ +static inline bool qdf_timer_sync_cancel(qdf_timer_t *timer) +{ + return __qdf_timer_sync_cancel(timer); +} + +/** + * qdf_timer_free() - free a timer + * @timer: timer to free + * + * If the timer callback is already running, this function blocks until it + * completes. + * + * Return: none + */ +static inline void qdf_timer_free(qdf_timer_t *timer) +{ + __qdf_timer_free(timer); +} + +#endif /* _QDF_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..cfc389c12460fa04fc5e573cb9fef5eb905982d4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h @@ -0,0 +1,1522 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__QDF_TRACE_H) +#define __QDF_TRACE_H + +/** + * DOC: qdf_trace + * QCA driver framework trace APIs + * Trace, logging, and debugging definitions and APIs + */ + +/* Include Files */ +#include /* For QDF_MODULE_ID... */ +#include /* For va_list... */ +#include +#include +#include +#include + + +/* Type declarations */ + +#define FL(x) "%s: %d: " x, __func__, __LINE__ + +#define QDF_TRACE_BUFFER_SIZE (512) + +/* + * Extracts the 8-bit group id from the wmi command id by performing the + * reverse operation of WMI_CMD_GRP_START_ID + */ +#define QDF_WMI_MTRACE_GRP_ID(message_id) (((message_id) >> 12) & 0xFF) +/* + * Number of bits reserved for WMI mtrace command id + */ + #define QDF_WMI_MTRACE_CMD_NUM_BITS 7 +/* + * Extracts the 7-bit group specific command id from the wmi command id + */ +#define QDF_WMI_MTRACE_CMD_ID(message_id) ((message_id) & 0x7F) + +#ifdef QDF_TRACE_PRINT_ENABLE +#define QDF_DEFAULT_TRACE_LEVEL (1 << QDF_TRACE_LEVEL_INFO) +#endif + +#define QDF_CATEGORY_INFO_U16(val) (((val >> 16) & 0x0000FFFF)) +#define QDF_TRACE_LEVEL_INFO_L16(val) (val & 0x0000FFFF) + +typedef int (qdf_abstract_print)(void *priv, const char *fmt, ...); + +/* + * Log levels + */ +#define QDF_DEBUG_FUNCTRACE 0x01 +#define QDF_DEBUG_LEVEL0 0x02 +#define QDF_DEBUG_LEVEL1 0x04 +#define QDF_DEBUG_LEVEL2 0x08 +#define QDF_DEBUG_LEVEL3 0x10 +#define QDF_DEBUG_ERROR 0x20 +#define QDF_DEBUG_CFG 0x40 + +/* + * Rate limit based on pkt prototype + */ +#define QDF_MAX_DHCP_PKTS_PER_SEC (20) +#define QDF_MAX_EAPOL_PKTS_PER_SEC (50) +#define QDF_MAX_ARP_PKTS_PER_SEC (5) +#define QDF_MAX_DNS_PKTS_PER_SEC (5) +#define QDF_MAX_OTHER_PKTS_PER_SEC (1) + +/* DP Trace Implementation */ +#ifdef CONFIG_DP_TRACE +#define DPTRACE(p) p +#define DPTRACE_PRINT(args...) \ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, args) +#else +#define DPTRACE(p) +#define DPTRACE_PRINT(args...) +#endif + +/* By default Data Path module will have all log levels enabled, except debug + * log level. Debug level will be left up to the framework or user space modules + * to be enabled when issue is detected + */ +#define QDF_DATA_PATH_TRACE_LEVEL \ + ((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR) | \ + (1 << QDF_TRACE_LEVEL_WARN) | (1 << QDF_TRACE_LEVEL_INFO) | \ + (1 << QDF_TRACE_LEVEL_INFO_HIGH) | (1 << QDF_TRACE_LEVEL_INFO_MED) | \ + (1 << QDF_TRACE_LEVEL_INFO_LOW)) + +/* Preprocessor definitions and constants */ +#define ASSERT_BUFFER_SIZE (512) + +#ifndef MAX_QDF_TRACE_RECORDS +#define MAX_QDF_TRACE_RECORDS 4000 +#endif + +#define QDF_TRACE_DEFAULT_PDEV_ID 0xff +#define INVALID_QDF_TRACE_ADDR 0xffffffff +#define DEFAULT_QDF_TRACE_DUMP_COUNT 0 +#define QDF_TRACE_DEFAULT_MSDU_ID 0 + +/* + * first parameter to iwpriv command - dump_dp_trace + * iwpriv wlan0 dump_dp_trace 0 0 -> dump full buffer + * iwpriv wlan0 dump_dp_trace 1 0 -> enable live view mode + * iwpriv wlan0 dump_dp_trace 2 0 -> clear dp trace buffer + * iwpriv wlan0 dump_dp_trace 3 0 -> disable live view mode + */ +#define DUMP_DP_TRACE 0 +#define ENABLE_DP_TRACE_LIVE_MODE 1 +#define CLEAR_DP_TRACE_BUFFER 2 +#define DISABLE_DP_TRACE_LIVE_MODE 3 + + +#ifdef TRACE_RECORD + +#define MTRACE(p) p + +#else +#define MTRACE(p) do { } while (0) + +#endif +#define NO_SESSION 0xFF + +/** + * typedef struct qdf_trace_record_s - keep trace record + * @qtime: qtimer ticks + * @time: user timestamp + * @module: module name + * @code: hold record of code + * @session: hold record of session + * @data: hold data + * @pid: hold pid of the process + */ +typedef struct qdf_trace_record_s { + uint64_t qtime; + char time[18]; + uint8_t module; + uint8_t code; + uint16_t session; + uint32_t data; + uint32_t pid; +} qdf_trace_record_t, *tp_qdf_trace_record; + +/** + * typedef struct s_qdf_trace_data - MTRACE logs are stored in ring buffer + * @head: position of first record + * @tail: position of last record + * @num: count of total record + * @num_since_last_dump: count from last dump + * @enable: config for controlling the trace + * @dump_count: Dump after number of records reach this number + */ +typedef struct s_qdf_trace_data { + uint32_t head; + uint32_t tail; + uint32_t num; + uint16_t num_since_last_dump; + uint8_t enable; + uint16_t dump_count; +} t_qdf_trace_data; + +#define CASE_RETURN_STRING(str) case ((str)): return (uint8_t *)(# str); + +#ifndef MAX_QDF_DP_TRACE_RECORDS +#define MAX_QDF_DP_TRACE_RECORDS 2000 +#endif + +#define QDF_DP_TRACE_RECORD_SIZE 40 +#define INVALID_QDF_DP_TRACE_ADDR 0xffffffff +#define QDF_DP_TRACE_VERBOSITY_HIGH 4 +#define QDF_DP_TRACE_VERBOSITY_MEDIUM 3 +#define QDF_DP_TRACE_VERBOSITY_LOW 2 +#define QDF_DP_TRACE_VERBOSITY_ULTRA_LOW 1 +#define QDF_DP_TRACE_VERBOSITY_BASE 0 + +/** + * enum QDF_DP_TRACE_ID - Generic ID to identify various events in data path + * @QDF_DP_TRACE_INVALID - invalid + * @QDF_DP_TRACE_DROP_PACKET_RECORD - record drop packet + * @QDF_DP_TRACE_EAPOL_PACKET_RECORD - record EAPOL packet + * @QDF_DP_TRACE_DHCP_PACKET_RECORD - record DHCP packet + * @QDF_DP_TRACE_ARP_PACKET_RECORD - record ARP packet + * @QDF_DP_TRACE_MGMT_PACKET_RECORD - record MGMT pacekt + * @QDF_DP_TRACE_EVENT_RECORD - record events + * @QDF_DP_TRACE_BASE_VERBOSITY - below this are part of base verbosity + * @QDF_DP_TRACE_ICMP_PACKET_RECORD - record ICMP packet + * @QDF_DP_TRACE_ICMPv6_PACKET_RECORD - record ICMPv6 packet + * @QDF_DP_TRACE_HDD_TX_TIMEOUT - HDD tx timeout + * @QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT- SOFTAP HDD tx timeout + * @QDF_DP_TRACE_TX_CREDIT_RECORD - credit update record + * @QDF_DP_TRACE_ULTRA_LOW_VERBOSITY - Below this is not logged for >4PPS + * @QDF_DP_TRACE_TX_PACKET_RECORD - record 32 bytes of tx pkt at any layer + * @QDF_DP_TRACE_RX_PACKET_RECORD - record 32 bytes of rx pkt at any layer + * @QDF_DP_TRACE_HDD_TX_PACKET_RECORD - record 32 bytes of tx pkt at HDD + * @QDF_DP_TRACE_HDD_RX_PACKET_RECORD - record 32 bytes of rx pkt at HDD + * @QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD - record data bytes of tx pkt at LI_DP + * @QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD - record data bytes of rx pkt at LI_DP + * @QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD - tx completion ptr record for + * lithium + * @QDF_DP_TRACE_FREE_PACKET_PTR_RECORD - tx completion ptr record + * @QDF_DP_TRACE_LOW_VERBOSITY - below this are part of low verbosity + * @QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD - HDD layer ptr record + * @QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD - Lithium DP layer ptr record + * @QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD - HDD RX record + * @QDF_DP_TRACE_CE_PACKET_PTR_RECORD - CE layer ptr record + * @QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD- CE fastpath ptr record + * @QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD- CE fastpath error record + * @QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD - HTT RX record + * @QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD- HTT RX offload record + * @QDF_DP_TRACE_RX_LI_DP_PACKET_PTR_RECORD - Lithium DP RX record + * @QDF_DP_TRACE_MED_VERBOSITY - below this are part of med verbosity + * @QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD -tx queue ptr record + * @QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD - txrx packet ptr record + * @QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD - txrx fast path record + * @QDF_DP_TRACE_HTT_PACKET_PTR_RECORD - htt packet ptr record + * @QDF_DP_TRACE_HTC_PACKET_PTR_RECORD - htc packet ptr record + * @QDF_DP_TRACE_HIF_PACKET_PTR_RECORD - hif packet ptr record + * @QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD - txrx packet ptr record + * @QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD + * - record data bytes of rx null_queue pkt at LI_DP + * @QDF_DP_TRACE_HIGH_VERBOSITY - below this are part of high verbosity + */ + +enum QDF_DP_TRACE_ID { + QDF_DP_TRACE_INVALID, + QDF_DP_TRACE_DROP_PACKET_RECORD, + QDF_DP_TRACE_EAPOL_PACKET_RECORD, + QDF_DP_TRACE_DHCP_PACKET_RECORD, + QDF_DP_TRACE_ARP_PACKET_RECORD, + QDF_DP_TRACE_MGMT_PACKET_RECORD, + QDF_DP_TRACE_EVENT_RECORD, + QDF_DP_TRACE_BASE_VERBOSITY, + QDF_DP_TRACE_ICMP_PACKET_RECORD, + QDF_DP_TRACE_ICMPv6_PACKET_RECORD, + QDF_DP_TRACE_HDD_TX_TIMEOUT, + QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT, + QDF_DP_TRACE_TX_CREDIT_RECORD, + QDF_DP_TRACE_ULTRA_LOW_VERBOSITY, + QDF_DP_TRACE_TX_PACKET_RECORD, + QDF_DP_TRACE_RX_PACKET_RECORD, + QDF_DP_TRACE_HDD_TX_PACKET_RECORD, + QDF_DP_TRACE_HDD_RX_PACKET_RECORD, + QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, + QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD, + QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, + QDF_DP_TRACE_FREE_PACKET_PTR_RECORD, + QDF_DP_TRACE_LOW_VERBOSITY, + QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD, + QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD, + QDF_DP_TRACE_CE_PACKET_PTR_RECORD, + QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, + QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, + QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_LI_DP_PACKET_PTR_RECORD, + QDF_DP_TRACE_MED_VERBOSITY, + QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD, + QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD, + QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD, + QDF_DP_TRACE_HTT_PACKET_PTR_RECORD, + QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD, + QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD, + QDF_DP_TRACE_HIGH_VERBOSITY, + QDF_DP_TRACE_MAX +}; + +/** + * qdf_proto_dir - direction + * @QDF_TX: TX direction + * @QDF_RX: RX direction + * @QDF_NA: not applicable + */ +enum qdf_proto_dir { + QDF_TX, + QDF_RX, + QDF_NA +}; + +/** + * QDF_CREDIT_UPDATE_SOURCE - source of credit record + * @QDF_TX_SCHED: Tx scheduler + * @QDF_TX_COMP: TX completion + * @QDF_TX_CREDIT_UPDATE: credit update indication + * @QDF_HTT_ATTACH: HTT attach + * @QDF_TX_HTT_MSG: HTT TX message + */ +enum QDF_CREDIT_UPDATE_SOURCE { + QDF_TX_SCHED, + QDF_TX_COMP, + QDF_TX_CREDIT_UPDATE, + QDF_HTT_ATTACH, + QDF_TX_HTT_MSG +}; + +/** + * QDF_CREDIT_OPERATION - operation on credit + * @QDF_CREDIT_INC: credit increment + * @QDF_CREDIT_DEC: credit decrement + * @QDF_CREDIT_ABS: Abosolute credit + * @QDF_OP_NA: Not applicable + */ +enum QDF_CREDIT_OPERATION { + QDF_CREDIT_INC, + QDF_CREDIT_DEC, + QDF_CREDIT_ABS, + QDF_OP_NA +}; + +/** + * struct qdf_dp_trace_ptr_buf - pointer record buffer + * @cookie: cookie value + * @msdu_id: msdu_id + * @status: completion status + */ +struct qdf_dp_trace_ptr_buf { + uint64_t cookie; + uint16_t msdu_id; + uint16_t status; +}; + +/** + * struct qdf_dp_trace_proto_buf - proto packet buffer + * @sa: source address + * @da: destination address + * @vdev_id : vdev id + * @type: packet type + * @subtype: packet subtype + * @dir: direction + */ +struct qdf_dp_trace_proto_buf { + struct qdf_mac_addr sa; + struct qdf_mac_addr da; + uint8_t vdev_id; + uint8_t type; + uint8_t subtype; + uint8_t dir; +}; + +/** + * struct qdf_dp_trace_mgmt_buf - mgmt packet buffer + * @vdev_id : vdev id + * @type: packet type + * @subtype: packet subtype + */ +struct qdf_dp_trace_mgmt_buf { + uint8_t vdev_id; + uint8_t type; + uint8_t subtype; +}; + +/** + * struct qdf_dp_trace_credit_record - tx credit record + * @source: credit record source + * @operation: credit operation + * @delta: delta of credit + * @total_credits: total credit + * @g0_credit: group 0 credit + * @g1_credit: group 1 credit + */ +struct qdf_dp_trace_credit_record { + enum QDF_CREDIT_UPDATE_SOURCE source; + enum QDF_CREDIT_OPERATION operation; + int delta; + int total_credits; + int g0_credit; + int g1_credit; +}; + +/** + * struct qdf_dp_trace_event_buf - event buffer + * @vdev_id : vdev id + * @type: packet type + * @subtype: packet subtype + */ +struct qdf_dp_trace_event_buf { + uint8_t vdev_id; + uint8_t type; + uint8_t subtype; +}; + +/** + * struct qdf_dp_trace_data_buf - nbuf data buffer + * @msdu_id : msdu id + */ +struct qdf_dp_trace_data_buf { + uint16_t msdu_id; +}; + +/** + * struct qdf_dp_trace_record_s - Describes a record in DP trace + * @time: time when it got stored + * @code: Describes the particular event + * @data: buffer to store data + * @size: Length of the valid data stored in this record + * @pid : process id which stored the data in this record + */ +struct qdf_dp_trace_record_s { + uint64_t time; + uint8_t code; + uint8_t data[QDF_DP_TRACE_RECORD_SIZE]; + uint8_t size; + uint32_t pid; + uint8_t pdev_id; +}; + +/** + * struct qdf_dp_trace_data - Parameters to configure/control DP trace + * @head: Position of first record + * @tail: Position of last record + * @num: Current index + * @proto_bitmap: defines which protocol to be traced + * @no_of_record: defines every nth packet to be traced + * @num_records_to_dump: defines number of records to be dumped + * @dump_counter: counter to track number of records dumped + * @verbosity : defines verbosity level + * @ini_conf_verbosity: Configured verbosity from INI + * @enable: enable/disable DP trace + * @count: current packet number + * @live_mode_config: configuration as received during initialization + * @live_mode: current live mode, enabled or disabled, can be throttled based + * on throughput + * @force_live_mode: flag to enable live mode all the time for all packets. + * This can be set/unset from userspace and overrides other + * live mode flags. + * @dynamic_verbosity_modify: Dynamic user configured verbosity overrides all + * @print_pkt_cnt: count of number of packets printed in live mode + * @high_tput_thresh: thresh beyond which live mode is turned off + * @thresh_time_limit: max time, in terms of BW timer intervals to wait, + * for determining if high_tput_thresh has been crossed. ~1s + * @arp_req: stats for arp reqs + * @arp_resp: stats for arp resps + * @icmp_req: stats for icmp reqs + * @icmp_resp: stats for icmp resps + * @dhcp_disc: stats for dhcp discover msgs + * @dhcp_req: stats for dhcp req msgs + * @dhcp_off: stats for dhcp offer msgs + * @dhcp_ack: stats for dhcp ack msgs + * @dhcp_nack: stats for dhcp nack msgs + * @dhcp_others: stats for other dhcp pkts types + * @eapol_m1: stats for eapol m1 + * @eapol_m2: stats for eapol m2 + * @eapol_m3: stats for eapol m3 + * @eapol_m4: stats for eapol m4 + * @eapol_others: stats for other eapol pkt types + * @icmpv6_req: stats for icmpv6 reqs + * @icmpv6_resp: stats for icmpv6 resps + * @icmpv6_ns: stats for icmpv6 nss + * @icmpv6_na: stats for icmpv6 nas + * @icmpv6_rs: stats for icmpv6 rss + * @icmpv6_ra: stats for icmpv6 ras + * @proto_event_bitmap: defines which protocol to be diag logged. + * refer QDF_NBUF_PKT_TRAC_TYPE_DNS to QDF_NBUF_PKT_TRAC_TYPE_ARP + * for bitmap. + */ +struct s_qdf_dp_trace_data { + uint32_t head; + uint32_t tail; + uint32_t num; + uint32_t proto_bitmap; + uint8_t no_of_record; + uint16_t num_records_to_dump; + uint16_t dump_counter; + uint8_t verbosity; + uint8_t ini_conf_verbosity; + bool enable; + bool live_mode_config; + bool live_mode; + uint32_t curr_pos; + uint32_t saved_tail; + bool force_live_mode; + bool dynamic_verbosity_modify; + uint8_t print_pkt_cnt; + uint8_t high_tput_thresh; + uint16_t thresh_time_limit; + /* Stats */ + uint32_t tx_count; + uint32_t rx_count; + u16 arp_req; + u16 arp_resp; + u16 dhcp_disc; + u16 dhcp_req; + u16 dhcp_off; + u16 dhcp_ack; + u16 dhcp_nack; + u16 dhcp_others; + u16 eapol_m1; + u16 eapol_m2; + u16 eapol_m3; + u16 eapol_m4; + u16 eapol_others; + u16 icmp_req; + u16 icmp_resp; + u16 icmpv6_req; + u16 icmpv6_resp; + u16 icmpv6_ns; + u16 icmpv6_na; + u16 icmpv6_rs; + u16 icmpv6_ra; + uint32_t proto_event_bitmap; +}; + +/** + * struct qdf_dpt_debugfs_state - state to control read to debugfs file + * @QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID: invalid state + * @QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT: initial state + * @QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS: read is in progress + * @QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE: read complete + */ + +enum qdf_dpt_debugfs_state { + QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID, + QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT, + QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS, + QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE, +}; + +typedef void (*tp_qdf_trace_cb)(void *p_mac, tp_qdf_trace_record, uint16_t); +typedef void (*tp_qdf_state_info_cb) (char **buf, uint16_t *size); +#ifdef WLAN_FEATURE_MEMDUMP_ENABLE +void qdf_register_debugcb_init(void); +void qdf_register_debug_callback(QDF_MODULE_ID module_id, + tp_qdf_state_info_cb qdf_state_infocb); +QDF_STATUS qdf_state_info_dump_all(char *buf, uint16_t size, + uint16_t *driver_dump_size); +#else /* WLAN_FEATURE_MEMDUMP_ENABLE */ +static inline void qdf_register_debugcb_init(void) +{ +} +#endif /* WLAN_FEATURE_MEMDUMP_ENABLE */ + +#ifdef TRACE_RECORD +void qdf_trace_register(QDF_MODULE_ID, tp_qdf_trace_cb); +void qdf_trace_init(void); +void qdf_trace_deinit(void); +void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data); +void qdf_trace_enable(uint32_t, uint8_t enable); +void qdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t); +QDF_STATUS qdf_trace_spin_lock_init(void); +#else +#ifndef QDF_TRACE_PRINT_ENABLE +static inline +void qdf_trace_init(void) +{ +} + +static inline +void qdf_trace_deinit(void) +{ +} + +static inline +void qdf_trace_enable(uint32_t bitmask_of_module_id, uint8_t enable) +{ +} + +static inline +void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data) +{ +} + +static inline +void qdf_trace_dump_all(void *p_mac, uint8_t code, uint8_t session, + uint32_t count, uint32_t bitmask_of_module) +{ +} + +static inline +QDF_STATUS qdf_trace_spin_lock_init(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif + +#ifdef ENABLE_MTRACE_LOG +/** + * qdf_mtrace_log() - Logs a message tracepoint to DIAG + * Infrastructure. + * @src_module: Enum of source module (basically module id) + * from where the message with message_id is posted. + * @dst_module: Enum of destination module (basically module id) + * to which the message with message_id is posted. + * @message_id: Id of the message to be posted + * @vdev_id: Vdev Id + * + * This function logs to the DIAG Infrastructure a tracepoint for a + * message being sent from a source module to a destination module + * with a specific ID for the benefit of a specific vdev. + * For non-vdev messages vdev_id will be NO_SESSION + * Return: None + */ +void qdf_mtrace_log(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id); +#else +static inline +void qdf_mtrace_log(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id) +{ +} +#endif + +#ifdef TRACE_RECORD +/** + * qdf_mtrace() - puts the messages in to ring-buffer + * and logs a message tracepoint to DIAG Infrastructure. + * @src_module: Enum of source module (basically module id) + * from where the message with message_id is posted. + * @dst_module: Enum of destination module (basically module id) + * to which the message with message_id is posted. + * @message_id: Id of the message to be posted + * @vdev_id: Vdev Id + * @data: Actual message contents + * + * This function will be called from each module which wants to record the + * messages in circular queue. Before calling this function make sure you + * have registered your module with qdf through qdf_trace_register function. + * In addition of the recording the messages in circular queue this function + * will log the message tracepoint to the DIAG infrastructure. + * these logs will be later used by post processing script. + * + * Return: None + */ +void qdf_mtrace(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id, uint32_t data); +#else +static inline +void qdf_mtrace(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id, uint32_t data) +{ +} +#endif + +#ifdef CONFIG_DP_TRACE +void qdf_dp_set_proto_bitmap(uint32_t val); +void qdf_dp_trace_set_verbosity(uint32_t val); +void qdf_dp_set_no_of_record(uint32_t val); +#define QDF_DP_TRACE_RECORD_INFO_LIVE (0x1) +#define QDF_DP_TRACE_RECORD_INFO_THROTTLED (0x1 << 1) + +/** + * qdf_dp_trace_log_pkt() - log packet type enabled through iwpriv + * @vdev_id: vdev_id + * @skb: skb pointer + * @dir: direction + * @pdev_id: pdev_id + * + * Return: true: some protocol was logged, false: no protocol was logged. + */ +bool qdf_dp_trace_log_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id); + +void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh, + uint16_t time_limit, uint8_t verbosity, + uint32_t proto_bitmap); +void qdf_dp_trace_deinit(void); +void qdf_dp_trace_spin_lock_init(void); +void qdf_dp_trace_set_value(uint32_t proto_bitmap, uint8_t no_of_records, + uint8_t verbosity); +void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir); +void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, uint8_t pdev_id, + uint8_t *data, uint8_t size, enum qdf_proto_dir dir); +void qdf_dp_trace_dump_all(uint32_t count, uint8_t pdev_id); + +/** + * qdf_dpt_get_curr_pos_debugfs() - get curr position to start read + * @file: debugfs file to read + * @state: state to control read to debugfs file + * + * Return: curr pos + */ +uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file, + enum qdf_dpt_debugfs_state state); +/** + * qdf_dpt_dump_stats_debugfs() - dump DP Trace stats to debugfs file + * @file: debugfs file to read + * @curr_pos: curr position to start read + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file, + uint32_t curr_pos); + +/** + * qdf_dpt_set_value_debugfs() - set value of DP Trace debugfs params + * @proto_bitmap: defines which protocol to be traced + * @no_of_record: defines every nth packet to be traced + * @verbosity : defines verbosity level + * @num_records_to_dump: defines number of records to be dumped + * + * Return: none + */ +void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity, uint16_t num_records_to_dump); + + +/** + * qdf_dp_trace_dump_stats() - dump DP Trace stats + * + * Return: none + */ +void qdf_dp_trace_dump_stats(void); +typedef void (*tp_qdf_dp_trace_cb)(struct qdf_dp_trace_record_s*, + uint16_t, uint8_t, uint8_t info); +/** + * qdf_dp_display_record() - Displays a record in DP trace + * @record: pointer to a record in DP trace + * @index: record index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: None + */ +void qdf_dp_display_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_ptr_record() - display record + * @record: dptrace record + * @rec_index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_ptr_record(struct qdf_dp_trace_record_s *record, + uint16_t rec_index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_proto_pkt() - display proto packet + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_proto_pkt(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, + uint8_t info); +/** + * qdf_dp_display_data_pkt_record() - Displays a data packet in DP trace + * @record: pointer to a record in DP trace + * @rec_index: record index + * @pdev_id: pdev id + * @info: display info regarding record + * + * Return: None + */ +void +qdf_dp_display_data_pkt_record(struct qdf_dp_trace_record_s *record, + uint16_t rec_index, uint8_t pdev_id, + uint8_t info); + +void qdf_dp_trace_ptr(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, + uint8_t pdev_id, uint8_t *data, uint8_t size, + uint16_t msdu_id, uint16_t status); +void qdf_dp_trace_throttle_live_mode(bool high_bw_request); + +/** + * qdf_dp_trace_tput_policy() - Change verbosity based on the TPUT + * @is_data_traffic: Is traffic more than low TPUT threashould + * + * Return: None + */ +void qdf_dp_trace_apply_tput_policy(bool is_data_traffic); + +/** + * qdf_dp_trace_data_pkt() - trace data packet + * @nbuf: nbuf which needs to be traced + * @pdev_id: pdev_id + * @code: QDF_DP_TRACE_ID for the packet (TX or RX) + * @msdu_id: tx desc id for the nbuf (Only applies to TX packets) + * @dir: TX or RX packet direction + * + * Return: None + */ +void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, uint8_t pdev_id, + enum QDF_DP_TRACE_ID code, uint16_t msdu_id, + enum qdf_proto_dir dir); + +uint32_t qdf_dp_get_proto_bitmap(void); +uint8_t qdf_dp_get_verbosity(void); +uint8_t qdf_dp_get_no_of_record(void); + +/** + * qdf_dp_trace_proto_pkt() - record proto packet + * @code: dptrace code + * @vdev_id: vdev id + * @sa: source mac address + * @da: destination mac address + * @type: proto type + * @subtype: proto subtype + * @dir: direction + * @pdev_id: pdev id + * @print: to print this proto pkt or not + * + * Return: none + */ +void +qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t *sa, uint8_t *da, enum qdf_proto_type type, + enum qdf_proto_subtype subtype, enum qdf_proto_dir dir, + uint8_t pdev_id, bool print); + +void qdf_dp_trace_disable_live_mode(void); +void qdf_dp_trace_enable_live_mode(void); +void qdf_dp_trace_clear_buffer(void); +/** + * qdf_dp_trace_mgmt_pkt() - record mgmt packet + * @code: dptrace code + * @vdev_id: vdev id + * @pdev_id: pdev_id + * @type: proto type + * @subtype: proto subtype + * + * Return: none + */ +void qdf_dp_trace_mgmt_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype); + +/** + * qdf_dp_trace_credit_record() - record credit update + * @source: source of record + * @operation: credit operation + * @delta: credit delta + * @total_credits: total credit + * @g0_credit: group 0 credit + * @g1_credit: group 1 credit + */ +void qdf_dp_trace_credit_record(enum QDF_CREDIT_UPDATE_SOURCE source, + enum QDF_CREDIT_OPERATION operation, + int delta, int total_credits, + int g0_credit, int g1_credit); + +/** + * qdf_dp_display_mgmt_pkt() - display proto packet + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_mgmt_pkt(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info); + +/** + * qdf_dp_display_credit_record() - display credit record + * @record: dptrace record + * @index: index + * @pdev_id: pdev id + * @info: metadeta info + */ +void qdf_dp_display_credit_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_event_record() - display event records + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_event_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info); + +void qdf_dp_trace_record_event(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype); + +/** + * qdf_dp_set_proto_event_bitmap() - Set the protocol event bitmap + * @value: proto event bitmap value. + * + * QDF_NBUF_PKT_TRAC_TYPE_DNS 0x01 + * QDF_NBUF_PKT_TRAC_TYPE_EAPOL 0x02 + * QDF_NBUF_PKT_TRAC_TYPE_DHCP 0x04 + * QDF_NBUF_PKT_TRAC_TYPE_ARP 0x10 + * + * Return: none + */ +void qdf_dp_set_proto_event_bitmap(uint32_t value); + +/** + * qdf_dp_log_proto_pkt_info() - Send diag log event + * @sa: source MAC address + * @da: destination MAC address + * @type: pkt type + * @subtype: pkt subtype + * @dir: tx or rx + * @msdu_id: msdu id + * @status: status + * + * Return: none + */ +void qdf_dp_log_proto_pkt_info(uint8_t *sa, uint8_t *da, uint8_t type, + uint8_t subtype, uint8_t dir, uint16_t msdu_id, + uint8_t status); + +/** + * qdf_dp_track_noack_check() - Check if no ack count should be tracked for + * the configured protocol packet types + * @nbuf: nbuf + * @subtype: subtype of packet to be tracked + * + * Return: none + */ +void qdf_dp_track_noack_check(qdf_nbuf_t nbuf, enum qdf_proto_subtype *subtype); +#else +static inline +bool qdf_dp_trace_log_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + return false; +} +static inline +void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh, + uint16_t time_limit, uint8_t verbosity, + uint32_t proto_bitmap) +{ +} + +static inline +void qdf_dp_trace_deinit(void) +{ +} + +static inline +void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir) +{ +} +static inline +void qdf_dp_trace_set_value(uint32_t proto_bitmap, uint8_t no_of_records, + uint8_t verbosity) +{ +} + +static inline +void qdf_dp_trace_dump_all(uint32_t count, uint8_t pdev_id) +{ +} + +static inline +uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file, + enum qdf_dpt_debugfs_state state) +{ + return 0; +} + +static inline +QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file, + uint32_t curr_pos) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity, uint16_t num_records_to_dump) +{ +} + +static inline void qdf_dp_trace_dump_stats(void) +{ +} + +static inline +void qdf_dp_trace_disable_live_mode(void) +{ +} + +static inline +void qdf_dp_trace_enable_live_mode(void) +{ +} + +static inline +void qdf_dp_trace_throttle_live_mode(bool high_bw_request) +{ +} + +static inline +void qdf_dp_trace_clear_buffer(void) +{ +} + +static inline +void qdf_dp_trace_apply_tput_policy(bool is_data_traffic) +{ +} + +static inline +void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, uint8_t pdev_id, + enum QDF_DP_TRACE_ID code, uint16_t msdu_id, + enum qdf_proto_dir dir) +{ +} + +static inline +void qdf_dp_log_proto_pkt_info(uint8_t *sa, uint8_t *da, uint8_t type, + uint8_t subtype, uint8_t dir, uint16_t msdu_id, + uint8_t status) +{ +} + +static inline +void qdf_dp_track_noack_check(qdf_nbuf_t nbuf, enum qdf_proto_subtype *subtype) +{ +} +#endif + +void qdf_trace_display(void); + +void __printf(3, 4) qdf_snprintf(char *str_buffer, unsigned int size, + char *str_format, ...); + +#define QDF_SNPRINTF qdf_snprintf + +#ifdef TSOSEG_DEBUG + +static inline void qdf_tso_seg_dbg_bug(char *msg) +{ + qdf_print("%s", msg); + QDF_BUG(0); +}; + +/** + * qdf_tso_seg_dbg_init - initialize TSO segment debug structure + * @tsoseg : structure to initialize + * + * TSO segment dbg structures are attached to qdf_tso_seg_elem_t + * structures and are allocated only of TSOSEG_DEBUG is defined. + * When allocated, at the time of the tso_seg_pool initialization, + * which goes with tx_desc initialization (1:1), each structure holds + * a number of (currently 16) history entries, basically describing + * what operation has been performed on this particular tso_seg_elem. + * This history buffer is a circular buffer and the current index is + * held in an atomic variable called cur. It is incremented every + * operation. Each of these operations are added with the function + * qdf_tso_seg_dbg_record. + * For each segment, this initialization function MUST be called PRIOR + * TO any _dbg_record() function calls. + * On free, qdf_tso_seg_elem structure is cleared (using qdf_tso_seg_dbg_zero) + * which clears the tso_desc, BUT DOES NOT CLEAR THE HISTORY element. + * + * Return: + * None + */ +static inline +void qdf_tso_seg_dbg_init(struct qdf_tso_seg_elem_t *tsoseg) +{ + tsoseg->dbg.txdesc = NULL; + qdf_atomic_init(&tsoseg->dbg.cur); /* history empty */ +} + +/** + * qdf_tso_seg_dbg_record - add a history entry to TSO debug structure + * @tsoseg : structure to initialize + * @id : operation ID (identifies the caller) + * + * Adds a history entry to the history circular buffer. Each entry + * contains an operation id (caller, as currently each ID is used only + * once in the source, so it directly identifies the src line that invoked + * the recording. + * + * qdf_tso_seg_dbg_record CAN ONLY BE CALLED AFTER the entry is initialized + * by qdf_tso_seg_dbg_init. + * + * The entry to be added is written at the location pointed by the atomic + * variable called cur. Cur is an ever increasing atomic variable. It is + * masked so that only the lower 4 bits are used (16 history entries). + * + * Return: + * int: the entry this record was recorded at + */ +static inline +int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, short id) +{ + int rc = -1; + unsigned int c; + + qdf_assert(tsoseg); + + if (id == TSOSEG_LOC_ALLOC) { + c = qdf_atomic_read(&tsoseg->dbg.cur); + /* dont crash on the very first alloc on the segment */ + c &= 0x0f; + /* allow only INIT and FREE ops before ALLOC */ + if (tsoseg->dbg.h[c].id >= id) + qdf_tso_seg_dbg_bug("Rogue TSO seg alloc"); + } + c = qdf_atomic_inc_return(&tsoseg->dbg.cur); + + c &= 0x0f; + tsoseg->dbg.h[c].ts = qdf_get_log_timestamp(); + tsoseg->dbg.h[c].id = id; + rc = c; + + return rc; +}; + +static inline void +qdf_tso_seg_dbg_setowner(struct qdf_tso_seg_elem_t *tsoseg, void *owner) +{ + if (tsoseg) + tsoseg->dbg.txdesc = owner; +}; + +static inline void +qdf_tso_seg_dbg_zero(struct qdf_tso_seg_elem_t *tsoseg) +{ + memset(tsoseg, 0, offsetof(struct qdf_tso_seg_elem_t, dbg)); + return; +}; + +#else +static inline +void qdf_tso_seg_dbg_init(struct qdf_tso_seg_elem_t *tsoseg) +{ +}; +static inline +int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, short id) +{ + return 0; +}; +static inline void qdf_tso_seg_dbg_bug(char *msg) +{ +}; +static inline void +qdf_tso_seg_dbg_setowner(struct qdf_tso_seg_elem_t *tsoseg, void *owner) +{ +}; +static inline int +qdf_tso_seg_dbg_zero(struct qdf_tso_seg_elem_t *tsoseg) +{ + memset(tsoseg, 0, sizeof(struct qdf_tso_seg_elem_t)); + return 0; +}; + +#endif /* TSOSEG_DEBUG */ + +/** + * qdf_trace_hex_dump() - externally called hex dump function + * @module: Module identifier a member of the QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message. + * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be + * issued. More severe conditions are more likely to be logged. + * @data: The base address of the buffer to be logged. + * @buf_len: The size of the buffer to be logged. + * + * Checks the level of severity and accordingly prints the trace messages + * + * Return: None + */ +void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len); + +/** + * qdf_trace_hex_ascii_dump() - externally called hex and ascii dump function + * @module: Module identifier a member of the QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message. + * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be + * issued. More severe conditions are more likely to be logged. + * @data: The base address of the buffer to be logged. + * @buf_len: The size of the buffer to be logged. + * + * Checks the level of severity and accordingly prints the trace messages + * + * Return: None + */ +void qdf_trace_hex_ascii_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len); + +#define ERROR_CODE -1 +#define QDF_MAX_NAME_SIZE 32 +#define MAX_PRINT_CONFIG_SUPPORTED 32 + +#define MAX_SUPPORTED_CATEGORY QDF_MODULE_ID_MAX + +/** + * qdf_set_pidx() - Sets the global qdf_pidx. + * @pidx : Index of print control object assigned to the module + * + */ +void qdf_set_pidx(int pidx); + +/** + * qdf_get_pidx() - Returns the global qdf_pidx. + * + * Return : Current qdf print index. + */ +int qdf_get_pidx(void); +/* + * Shared print control index + * for converged debug framework + */ + +#define QDF_PRINT_IDX_SHARED -1 + +/** + * QDF_PRINT_INFO() - Generic wrapper API for logging + * @idx : Index of print control object + * @module : Module identifier. A member of QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message + * @level : Trace level. A member of QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be + * issued. + * @str_format : Format string that contains the message to be logged. + * + * + * This wrapper will be used for any generic logging messages. Wrapper will + * compile a call to converged QDF trace message API. + * + * Return : Nothing + * + */ +void QDF_PRINT_INFO(unsigned int idx, QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + char *str_format, ...); + +/** + * struct category_info : Category information structure + * @category_verbose_mask: Embeds information about category's verbose level + */ +struct category_info { + uint16_t category_verbose_mask; +}; + +/** + * struct category_name_info : Category name information structure + * @category_name_str: Embeds information about category name + */ +struct category_name_info { + unsigned char category_name_str[QDF_MAX_NAME_SIZE]; +}; + +/** + * qdf_trace_msg_cmn()- Converged logging API + * @idx: Index of print control object assigned to the module + * @category: Category identifier. A member of the QDF_MODULE_ID enumeration + * that identifies the category issuing the trace message. + * @verbose: Verbose level. A member of the QDF_TRACE_LEVEL enumeration + * indicating the severity of the condition causing the trace + * message to be issued. More severe conditions are more likely + * to be logged. + * @str_format: Format string. The message to be logged. This format string + * contains printf-like replacement parameters, which follow this + * parameter in the variable argument list. + * @val: Variable argument list part of the log message + * + * Return: nothing + * + */ +void qdf_trace_msg_cmn(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + const char *str_format, + va_list val); + +/** + * struct qdf_print_ctrl: QDF Print Control structure + * Statically allocated objects of print control + * structure are declared that will support maximum of + * 32 print control objects. Any module that needs to + * register to the print control framework needs to + * obtain a print control object using + * qdf_print_ctrl_register API. It will have to pass + * pointer to category info structure, name and + * custom print function to be used if required. + * @name : Optional name for the control object + * @cat_info : Array of category_info struct + * @custom_print : Custom print handler + * @custom_ctxt : Custom print context + * @dbglvlmac_on : Flag to enable/disable MAC level filtering + * @in_use : Boolean to indicate if control object is in use + */ +struct qdf_print_ctrl { + char name[QDF_MAX_NAME_SIZE]; + struct category_info cat_info[MAX_SUPPORTED_CATEGORY]; + void (*custom_print)(void *ctxt, const char *fmt, va_list args); + void *custom_ctxt; +#ifdef DBG_LVL_MAC_FILTERING + unsigned char dbglvlmac_on; +#endif + bool in_use; +}; + +/** + * qdf_print_ctrl_register() - Allocate QDF print control object, assign + * pointer to category info or print control + * structure and return the index to the callee + * @cinfo : Pointer to array of category info structure + * @custom_print_handler : Pointer to custom print handler + * @custom_ctx : Pointer to custom context + * @pctrl_name : Pointer to print control object name + * + * Return : Index of qdf_print_ctrl structure + * + */ +int qdf_print_ctrl_register(const struct category_info *cinfo, + void *custom_print_handler, + void *custom_ctx, + const char *pctrl_name); + +/** + * qdf_shared_print_ctrl_init() - Initialize the shared print ctrl obj with + * all categories set to the default level + * + * Return : void + * + */ +void qdf_shared_print_ctrl_init(void); + +/** + * qdf_print_setup() - Setup default values to all the print control objects + * + * Register new print control object for the callee + * + * Return : QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE + * on failure + */ +QDF_STATUS qdf_print_setup(void); + +/** + * qdf_print_ctrl_cleanup() - Clean up a print control object + * + * Cleanup the print control object for the callee + * + * @pctrl : Index of print control object + * + * Return : QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS qdf_print_ctrl_cleanup(unsigned int idx); + +/** + * qdf_print_ctrl_shared_cleanup() - Clean up of the shared object + * + * Cleanup the shared print-ctrl-object + * + * Return : void + */ +void qdf_shared_print_ctrl_cleanup(void); + +/** + * qdf_print_set_category_verbose() - Enable/Disable category for a + * print control object with + * user provided verbose level + * + * @idx : Index of the print control object assigned to callee + * @category : Category information + * @verbose: Verbose information + * @is_set: Flag indicating if verbose level needs to be enabled or disabled + * + * Return : QDF_STATUS_SUCCESS for success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS qdf_print_set_category_verbose(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + bool is_set); + +/** + * qdf_log_dump_at_kernel_level() - Enable/Disable printk call + * @enable: Indicates whether printk is enabled in QDF_TRACE + * + * Return: void + */ +void qdf_log_dump_at_kernel_level(bool enable); + +/** + * qdf_logging_set_flush_timer() - Set the time period in which host logs + * should be flushed out to user-space + * @milliseconds: milliseconds after which the logs should be flushed out to + * user-space + * + * Return: QDF_STATUS_SUCCESS for success and QDF_STATUS_E_FAILURE for failure + */ +int qdf_logging_set_flush_timer(uint32_t milliseconds); + +/** + * qdf_logging_flush_logs() - Flush out the logs to user-space one time + * + * Return: void + */ +void qdf_logging_flush_logs(void); + +/** + * qdf_print_is_category_enabled() - Get category information for the + * print control object + * + * @idx : Index of print control object + * @category : Category information + * + * Return : Verbose enabled(true) or disabled(false) or invalid input (false) + */ +bool qdf_print_is_category_enabled(unsigned int idx, + QDF_MODULE_ID category); + +/** + * qdf_print_is_verbose_enabled() - Get verbose information of a category for + * the print control object + * + * @idx : Index of print control object + * @category : Category information + * @verbose : Verbose information + * + * Return : Verbose enabled(true) or disabled(false) or invalid input (false) + */ +bool qdf_print_is_verbose_enabled(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose); + +/** + * qdf_print_clean_node_flag() - Clean up node flag for print control object + * + * @idx : Index of print control object + * + * Return : None + */ +void qdf_print_clean_node_flag(unsigned int idx); + +#ifdef DBG_LVL_MAC_FILTERING + +/** + * qdf_print_set_node_flag() - Set flag to enable MAC level filtering + * + * @idx : Index of print control object + * @enable : Enable/Disable bit sent by callee + * + * Return : QDF_STATUS_SUCCESS on Success and QDF_STATUS_E_FAILURE on Failure + */ +QDF_STATUS qdf_print_set_node_flag(unsigned int idx, + uint8_t enable); + +/** + * qdf_print_get_node_flag() - Get flag that controls MAC level filtering + * + * @idx : Index of print control object + * + * Return : Flag that indicates enable(1) or disable(0) or invalid(-1) + */ +bool qdf_print_get_node_flag(unsigned int idx); + +#endif + +/** + * qdf_logging_init() - Initialize msg logging functionality + * + * + * Return : void + */ +void qdf_logging_init(void); + +/** + * qdf_logging_exit() - Cleanup msg logging functionality + * + * + * Return : void + */ +void qdf_logging_exit(void); + +#define QDF_SYMBOL_LEN __QDF_SYMBOL_LEN + +/** + * qdf_sprint_symbol() - prints the name of a symbol into a string buffer + * @buffer: the string buffer to print into + * @addr: address of the symbol to lookup and print + * + * Return: number of characters printed + */ +int qdf_sprint_symbol(char *buffer, void *addr); + +/** + * qdf_minidump_log() - Log memory address to be included in minidump + * @start_addr: Start address of the memory to be dumped + * @size: Size in bytes + * @name: String to identify this entry + */ +static inline +void qdf_minidump_log(void *start_addr, size_t size, const char *name) +{ + __qdf_minidump_log(start_addr, size, name); +} + +/** + * qdf_minidump_remove() - Remove memory address from minidump + * @addr: Start address of the memory previously added + */ +static inline +void qdf_minidump_remove(void *addr) +{ + __qdf_minidump_remove(addr); +} + +#endif /* __QDF_TRACE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_tracker.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_tracker.h new file mode 100644 index 0000000000000000000000000000000000000000..04dee0e7b246ab9ec6f2d98cde8bae7460e67c8e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_tracker.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_TRACKER_H +#define __QDF_TRACKER_H + +#include "qdf_lock.h" +#include "qdf_ptr_hash.h" +#include "qdf_status.h" +#include "qdf_types.h" + +#define QDF_TRACKER_FUNC_SIZE 48 + +/** + * struct qdf_tracker - a generic type for tracking resources + * @leak_title: the string title to use when logging leaks + * @track_title: the string title to use when logging double tracking issues + * @untrack_title: the string title to use when logging double untracking issues + * @lock: lock for simultaneous access to @ht + * @ht: the hashtable used for storing tracking information + */ +struct qdf_tracker { + const char *leak_title; + const char *track_title; + const char *untrack_title; + struct qdf_spinlock lock; + struct qdf_ptr_hash *ht; +}; + +/** + * qdf_tracker_declare() - statically declare a qdf_tacker instance + * @name: C identifier to use for the new qdf_tracker + * @bits: the number of bits to use for hashing the resource pointers + * @leak_title: the string title to use when logging leaks + * @track_title: the string title to use when logging double tracking issues + * @untrack_title: the string title to use when logging double untracking issues + */ +#define qdf_tracker_declare(name, bits, _leak_title, \ + _track_title, _untrack_title) \ +qdf_ptr_hash_declare(name ## _ht, bits); \ +struct qdf_tracker name = { \ + .leak_title = _leak_title, \ + .track_title = _track_title, \ + .untrack_title = _untrack_title, \ + .ht = qdf_ptr_hash_ptr(name ## _ht), \ +} + +/** + * qdf_tracker_init() - initialize a qdf_tracker + * @tracker: the qdf_tracker to initialize + * + * Return: None + */ +void qdf_tracker_init(struct qdf_tracker *tracker); + +/** + * qdf_tracker_deinit() - de-initialize a qdf_tracker + * @tracker: the qdf_tracker to de-initialize + * + * Return: None + */ +void qdf_tracker_deinit(struct qdf_tracker *tracker); + +/** + * qdf_tracker_track() - track a resource with @tracker + * @tracker: the qdf_tracker to track with + * @ptr: an opaque pointer to the resource to track + * @func: name of the caller function operating on @ptr + * @line: line number of the call site operating on @ptr + * + * Return: QDF_STATUS + */ +qdf_must_check QDF_STATUS +qdf_tracker_track(struct qdf_tracker *tracker, void *ptr, + const char *func, uint32_t line); + +/** + * qdf_tracker_untrack() - untrack a resource with @tracker + * @tracker: the qdf_tracker used to track @ptr + * @ptr: an opaque pointer to the resource to untrack + * @func: name of the caller function operating on @ptr + * @line: line number of the call site operating on @ptr + * + * Return: None + */ +void qdf_tracker_untrack(struct qdf_tracker *tracker, void *ptr, + const char *func, uint32_t line); + +/** + * qdf_tracker_check_for_leaks() - assert @tracker has no tracked resources + * for the current debug domain + * @tracker: the qdf_tracker to check + * + * Return: None + */ +void qdf_tracker_check_for_leaks(struct qdf_tracker *tracker); + +/** + * qdf_tracker_lookup() - query tracking information for @ptr + * @tracker: the qdf_tracker to check + * @ptr: the opaque pointer of the resource to lookup + * @out_func: function name provided when @ptr was tracked, populated on success + * @out_line: line number provided when @ptr was tracked, populated on success + * + * Note: @out_func is assumed to be sizeof(QDF_TRACKER_FUNC_SIZE). + * + * Return: true if @tracker is tracking @ptr + */ +qdf_must_check bool +qdf_tracker_lookup(struct qdf_tracker *tracker, void *ptr, + char (*out_func)[QDF_TRACKER_FUNC_SIZE], + uint32_t *out_line); + +#endif /* __QDF_TRACKER_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h new file mode 100644 index 0000000000000000000000000000000000000000..87cf7569553f57c77305fc115ffbc3b0d86682ab --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h @@ -0,0 +1,1437 @@ +/* + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_types.h + * QCA driver framework (QDF) basic type definitions + */ + +#if !defined(__QDF_TYPES_H) +#define __QDF_TYPES_H + +#define qdf_must_check __qdf_must_check + +/* Include Files */ +#include +#include +#ifdef TSOSEG_DEBUG +#include +#endif +#include "qdf_status.h" + +/* Preprocessor definitions and constants */ +#define QDF_MAX_SGLIST 4 + +#define CPU_CLUSTER_TYPE_LITTLE 0 +#define CPU_CLUSTER_TYPE_PERF 1 + +/** + * struct qdf_sglist - scatter-gather list + * @nsegs: total number of segments + * struct __sg_segs - scatter-gather segment list + * @vaddr: Virtual address of the segment + * @len: Length of the segment + */ +typedef struct qdf_sglist { + uint32_t nsegs; + struct __sg_segs { + uint8_t *vaddr; + uint32_t len; + } sg_segs[QDF_MAX_SGLIST]; +} qdf_sglist_t; + +#define QDF_MAX_SCATTER __QDF_MAX_SCATTER +#define QDF_NSEC_PER_MSEC __QDF_NSEC_PER_MSEC + +/** + * QDF_SWAP_U16 - swap input u16 value + * @_x: variable to swap + */ +#define QDF_SWAP_U16(_x) \ + ((((_x) << 8) & 0xFF00) | (((_x) >> 8) & 0x00FF)) + +/** + * QDF_SWAP_U32 - swap input u32 value + * @_x: variable to swap + */ +#define QDF_SWAP_U32(_x) \ + (((((_x) << 24) & 0xFF000000) | (((_x) >> 24) & 0x000000FF)) | \ + ((((_x) << 8) & 0x00FF0000) | (((_x) >> 8) & 0x0000FF00))) + +/* ticks per second */ +#define QDF_TICKS_PER_SECOND (1000) + +/** + * QDF_ARRAY_SIZE - get array size + * @_arr: array variable name + */ +#define QDF_ARRAY_SIZE(_arr) (sizeof(_arr) / sizeof((_arr)[0])) + +#define QDF_MAX_SCATTER __QDF_MAX_SCATTER + +/** + * qdf_packed - denotes structure is packed. + */ +#define qdf_packed __qdf_packed + +/** + * qdf_toupper - char lower to upper. + */ +#define qdf_toupper __qdf_toupper + +typedef void *qdf_net_handle_t; + +typedef void *qdf_netlink_handle_t; +typedef void *qdf_drv_handle_t; +typedef void *qdf_os_handle_t; +typedef void *qdf_pm_t; + + +/** + * typedef qdf_handle_t - handles opaque to each other + */ +typedef void *qdf_handle_t; + +/** + * typedef qdf_freq_t - define frequency as a 16 bit/32 bit + * unsigned integer depending on the requirement + */ +#ifdef CONFIG_16_BIT_FREQ_TYPE +typedef uint16_t qdf_freq_t; +#else +typedef uint32_t qdf_freq_t; +#endif +/** + * typedef qdf_device_t - Platform/bus generic handle. + * Used for bus specific functions. + */ +typedef __qdf_device_t qdf_device_t; + +/* Byte order identifiers */ +typedef __qdf_le16_t qdf_le16_t; +typedef __qdf_le32_t qdf_le32_t; +typedef __qdf_le64_t qdf_le64_t; +typedef __qdf_be16_t qdf_be16_t; +typedef __qdf_be32_t qdf_be32_t; +typedef __qdf_be64_t qdf_be64_t; + +/** + * typedef qdf_size_t - size of an object + */ +typedef __qdf_size_t qdf_size_t; + +/** + * typedef __qdf_off_t - offset for API's that need them. + */ +typedef __qdf_off_t qdf_off_t; + +/** + * typedef qdf_dma_map_t - DMA mapping object. + */ +typedef __qdf_dma_map_t qdf_dma_map_t; + +/** + * tyepdef qdf_dma_addr_t - DMA address. + */ +typedef __qdf_dma_addr_t qdf_dma_addr_t; + +/** + * typedef __qdf_dma_size_t - DMA size. + */ +typedef __qdf_dma_size_t qdf_dma_size_t; + +/** + * tyepdef qdf_dma_context_t - DMA context. + */ +typedef __qdf_dma_context_t qdf_dma_context_t; + +typedef __qdf_mem_info_t qdf_mem_info_t; +typedef __sgtable_t sgtable_t; + +/** + * typepdef qdf_cpu_mask - CPU Mask + */ +typedef __qdf_cpu_mask qdf_cpu_mask; + +/** + * pointer to net device + */ +typedef __qdf_netdev_t qdf_netdev_t; + +/** + * struct qdf_dma_map_info - Information inside a DMA map. + * @nsegs: total number mapped segments + * struct __dma_segs - Information of physical address. + * @paddr: physical(dam'able) address of the segment + * @len: length of the segment + */ +typedef struct qdf_dma_map_info { + uint32_t nsegs; + struct __dma_segs { + qdf_dma_addr_t paddr; + qdf_dma_size_t len; + } dma_segs[QDF_MAX_SCATTER]; +} qdf_dmamap_info_t; + +/** + * struct qdf_shared_mem - Shared memory resource + * @mem_info: memory info struct + * @vaddr: virtual address + * @sgtable: scatter-gather table + * @memctx: dma address + */ +typedef struct qdf_shared_mem { + qdf_mem_info_t mem_info; + void *vaddr; + sgtable_t sgtable; + qdf_dma_mem_context(memctx); +} qdf_shared_mem_t; + +#define qdf_iomem_t __qdf_iomem_t + +/** + * typedef enum QDF_TIMER_TYPE - QDF timer type + * @QDF_TIMER_TYPE_SW: Deferrable SW timer it will not cause CPU to wake up + * on expiry + * @QDF_TIMER_TYPE_WAKE_APPS: Non deferrable timer which will cause CPU to + * wake up on expiry + */ +typedef enum { + QDF_TIMER_TYPE_SW, + QDF_TIMER_TYPE_WAKE_APPS +} QDF_TIMER_TYPE; + +/** + * tyepdef qdf_resource_type_t - hw resources + * @QDF_RESOURCE_TYPE_MEM: memory resource + * @QDF_RESOURCE_TYPE_IO: io resource + * Define the hw resources the OS has allocated for the device + * Note that start defines a mapped area. + */ +typedef enum { + QDF_RESOURCE_TYPE_MEM, + QDF_RESOURCE_TYPE_IO, +} qdf_resource_type_t; + +/** + * tyepdef qdf_resource_t - representation of a h/w resource. + * @start: start + * @end: end + * @type: resource type + */ +typedef struct { + uint64_t start; + uint64_t end; + qdf_resource_type_t type; +} qdf_resource_t; + +/** + * typedef qdf_dma_dir_t - DMA directions + * @QDF_DMA_BIDIRECTIONAL: bidirectional data + * @QDF_DMA_TO_DEVICE: data going from device to memory + * @QDF_DMA_FROM_DEVICE: data going from memory to device + */ +typedef enum { + QDF_DMA_BIDIRECTIONAL = __QDF_DMA_BIDIRECTIONAL, + QDF_DMA_TO_DEVICE = __QDF_DMA_TO_DEVICE, + QDF_DMA_FROM_DEVICE = __QDF_DMA_FROM_DEVICE, +} qdf_dma_dir_t; + +/** + * enum qdf_driver_type - Indicate the driver type and based on this + * do appropriate initialization. + * + * @QDF_DRIVER_TYPE_PRODUCTION: Driver used in the production + * @QDF_DRIVER_TYPE_MFG: Driver used in the Factory + * @QDF_DRIVER_TYPE_INVALID: Invalid and unrecognized type + * + */ +enum qdf_driver_type { + QDF_DRIVER_TYPE_PRODUCTION = 0, + QDF_DRIVER_TYPE_MFG = 1, + QDF_DRIVER_TYPE_INVALID = 0x7FFFFFFF +}; + +/* work queue(kernel thread)/DPC function callback */ +typedef void (*qdf_defer_fn_t)(void *); + +/* + * Prototype of the critical region function that is to be + * executed with spinlock held and interrupt disalbed + */ +typedef bool (*qdf_irqlocked_func_t)(void *); + +#define qdf_offsetof(type, field) offsetof(type, field) + +/** + * typedef enum QDF_MODULE_ID - Debug category level + * @QDF_MODULE_ID_MIN: The smallest/starting module id + * @QDF_MODULE_ID_TDLS: TDLS + * @QDF_MODULE_ID_ACS: auto channel selection + * @QDF_MODULE_ID_SCAN_SM: scan state machine + * @QDF_MODULE_ID_SCANENTRY: scan entry + * @QDF_MODULE_ID_WDS: WDS handling + * @QDF_MODULE_ID_ACTION: action management frames + * @QDF_MODULE_ID_ROAM: sta mode roaming + * @QDF_MODULE_ID_INACT: inactivity handling + * @QDF_MODULE_ID_DOTH: 11.h + * @QDF_MODULE_ID_IQUE: IQUE features + * @QDF_MODULE_ID_WME: WME protocol + * @QDF_MODULE_ID_ACL: ACL handling + * @QDF_MODULE_ID_WPA: WPA/RSN protocol + * @QDF_MODULE_ID_RADKEYS: dump 802.1x keys + * @QDF_MODULE_ID_RADDUMP: dump 802.1x radius packets + * @QDF_MODULE_ID_RADIUS: 802.1x radius client + * @QDF_MODULE_ID_DOT1XSM: 802.1x state machine + * @QDF_MODULE_ID_DOT1X: 802.1x authenticator + * @QDF_MODULE_ID_POWER: power save handling + * @QDF_MODULE_ID_STATS: state machine + * @QDF_MODULE_ID_OUTPUT: output handling + * @QDF_MODULE_ID_SCAN: scanning + * @QDF_MODULE_ID_AUTH: authentication handling + * @QDF_MODULE_ID_ASSOC: association handling + * @QDF_MODULE_ID_NODE: node handling + * @QDF_MODULE_ID_ELEMID: element id parsing + * @QDF_MODULE_ID_XRATE: rate set handling + * @QDF_MODULE_ID_INPUT: input handling + * @QDF_MODULE_ID_CRYPTO: crypto work + * @QDF_MODULE_ID_DUMPPKTS: IFF_LINK2 equivalant + * @QDF_MODULE_ID_DEBUG: IFF_DEBUG equivalent + * @QDF_MODULE_ID_MLME: MLME + * @QDF_MODULE_ID_RRM: Radio resource measurement + * @QDF_MODULE_ID_WNM: Wireless Network Management + * @QDF_MODULE_ID_P2P_PROT: P2P Protocol driver + * @QDF_MODULE_ID_PROXYARP: 11v Proxy ARP + * @QDF_MODULE_ID_L2TIF: Hotspot 2.0 L2 TIF + * @QDF_MODULE_ID_WIFIPOS: WifiPositioning Feature + * @QDF_MODULE_ID_WRAP: WRAP or Wireless ProxySTA + * @QDF_MODULE_ID_DFS: DFS debug mesg + * @QDF_MODULE_ID_TLSHIM: TLSHIM module ID + * @QDF_MODULE_ID_WMI: WMI module ID + * @QDF_MODULE_ID_HTT: HTT module ID + * @QDF_MODULE_ID_HDD: HDD module ID + * @QDF_MODULE_ID_SME: SME module ID + * @QDF_MODULE_ID_PE: PE module ID + * @QDF_MODULE_ID_WMA: WMA module ID + * @QDF_MODULE_ID_SYS: SYS module ID + * @QDF_MODULE_ID_QDF: QDF module ID + * @QDF_MODULE_ID_SAP: SAP module ID + * @QDF_MODULE_ID_HDD_SOFTAP: HDD SAP module ID + * @QDF_MODULE_ID_HDD_DATA: HDD DATA module ID + * @QDF_MODULE_ID_HDD_SAP_DATA: HDD SAP DATA module ID + * @QDF_MODULE_ID_HIF: HIF module ID + * @QDF_MODULE_ID_HTC: HTC module ID + * @QDF_MODULE_ID_TXRX: TXRX module ID + * @QDF_MODULE_ID_QDF_DEVICE: QDF DEVICE module ID + * @QDF_MODULE_ID_CFG: CFG module ID + * @QDF_MODULE_ID_BMI: BMI module ID + * @QDF_MODULE_ID_EPPING: EPPING module ID + * @QDF_MODULE_ID_QVIT: QVIT module ID + * @QDF_MODULE_ID_DP: Data-path module ID + * @QDF_MODULE_ID_HAL: Hal abstraction module ID + * @QDF_MODULE_ID_SOC: SOC module ID + * @QDF_MODULE_ID_OS_IF: OS-interface module ID + * @QDF_MODULE_ID_TARGET_IF: targer interface module ID + * @QDF_MODULE_ID_SCHEDULER: schduler module ID + * @QDF_MODULE_ID_MGMT_TXRX: management TX/RX module ID + * @QDF_MODULE_ID_SERIALIZATION: serialization module ID + * @QDF_MODULE_ID_PMO: PMO (power manager and offloads) Module ID + * @QDF_MODULE_ID_P2P: P2P module ID + * @QDF_MODULE_ID_POLICY_MGR: Policy Manager module ID + * @QDF_MODULE_ID_CONFIG: CFG (configuration) component ID + * @QDF_MODULE_ID_REGULATORY: REGULATORY module ID + * @QDF_MODULE_ID_NAN: NAN module ID + * @QDF_MODULE_ID_SPECTRAL: Spectral module ID + * @QDF_MODULE_ID_ROAM_DEBUG: Roam Debug logging + * @QDF_MODULE_ID_CDP: Converged Data Path module ID + * @QDF_MODULE_ID_DIRECT_BUF_RX: Direct Buffer Receive module ID + * @QDF_MODULE_ID_DISA: DISA (encryption test) module ID + * @QDF_MODULE_ID_GREEN_AP: Green AP related logging + * @QDF_MODULE_ID_FTM: FTM module ID + * @QDF_MODULE_ID_EXTAP: Extender AP module ID + * @QDF_MODULE_ID_FD: FILS discovery logging + * @QDF_MODULE_ID_OCB: OCB module ID + * @QDF_MODULE_ID_IPA: IPA module ID + * @QDF_MODULE_ID_CP_STATS: Control Plane Statistics ID + * @QDF_MODULE_ID_ACTION_OUI: ACTION OUI module ID + * @QDF_MODULE_ID_TARGET: Target module ID + * @QDF_MODULE_ID_MBSSIE: MBSS IE ID + * @QDF_MODULE_ID_FWOL: FW Offload module ID + * @QDF_MODULE_ID_SM_ENGINE: SM engine module ID + * @QDF_MODULE_ID_CMN_MLME: CMN MLME module ID + * @QDF_MODULE_ID_CFR: CFR module ID + * @QDF_MODULE_ID_TX_CAPTURE: Tx capture enhancement feature ID + * @QDF_MODULE_ID_INTEROP_ISSUES_AP: interop issues ap module ID + * @QDF_MODULE_ID_BLACKLIST_MGR: Blacklist Manager module + * @QDF_MODULE_ID_QLD: QCA Live Debug module ID + * @QDF_MODULE_ID_DYNAMIC_MODE_CHG: Dynamic mode change module ID + * @QDF_MODULE_ID_COEX: Coex related config module ID + * @QDF_MODULE_ID_FTM_TIME_SYNC: FTM Time sync module ID + * @QDF_MODULE_ID_PKT_CAPTURE: PACKET CAPTURE module ID + * @QDF_MODULE_ID_MON_FILTER: Monitor filter related config module ID + * @QDF_MODULE_ID_GPIO: GPIO configuration module ID + * @QDF_MODULE_ID_ANY: anything + * @QDF_MODULE_ID_MAX: Max place holder module ID + */ +typedef enum { + QDF_MODULE_ID_MIN = 0, + QDF_MODULE_ID_TDLS = QDF_MODULE_ID_MIN, + QDF_MODULE_ID_ACS, + QDF_MODULE_ID_SCAN_SM, + QDF_MODULE_ID_SCANENTRY, + QDF_MODULE_ID_WDS, + QDF_MODULE_ID_ACTION, + QDF_MODULE_ID_ROAM, + QDF_MODULE_ID_INACT, + QDF_MODULE_ID_DOTH = 8, + QDF_MODULE_ID_IQUE, + QDF_MODULE_ID_WME, + QDF_MODULE_ID_ACL, + QDF_MODULE_ID_WPA, + QDF_MODULE_ID_RADKEYS, + QDF_MODULE_ID_RADDUMP, + QDF_MODULE_ID_RADIUS, + QDF_MODULE_ID_DOT1XSM = 16, + QDF_MODULE_ID_DOT1X, + QDF_MODULE_ID_POWER, + QDF_MODULE_ID_STATE, + QDF_MODULE_ID_OUTPUT, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_AUTH, + QDF_MODULE_ID_ASSOC, + QDF_MODULE_ID_NODE = 24, + QDF_MODULE_ID_ELEMID, + QDF_MODULE_ID_XRATE, + QDF_MODULE_ID_INPUT, + QDF_MODULE_ID_CRYPTO, + QDF_MODULE_ID_DUMPPKTS, + QDF_MODULE_ID_DEBUG, + QDF_MODULE_ID_MLME, + QDF_MODULE_ID_RRM = 32, + QDF_MODULE_ID_WNM, + QDF_MODULE_ID_P2P_PROT, + QDF_MODULE_ID_PROXYARP, + QDF_MODULE_ID_L2TIF, + QDF_MODULE_ID_WIFIPOS, + QDF_MODULE_ID_WRAP, + QDF_MODULE_ID_DFS, + QDF_MODULE_ID_ATF = 40, + QDF_MODULE_ID_SPLITMAC, + QDF_MODULE_ID_IOCTL, + QDF_MODULE_ID_NAC, + QDF_MODULE_ID_MESH, + QDF_MODULE_ID_MBO, + QDF_MODULE_ID_EXTIOCTL_CHANSWITCH, + QDF_MODULE_ID_EXTIOCTL_CHANSSCAN, + QDF_MODULE_ID_TLSHIM = 48, + QDF_MODULE_ID_WMI, + QDF_MODULE_ID_HTT, + QDF_MODULE_ID_HDD, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_PE, + QDF_MODULE_ID_WMA, + QDF_MODULE_ID_SYS, + QDF_MODULE_ID_QDF = 56, + QDF_MODULE_ID_SAP, + QDF_MODULE_ID_HDD_SOFTAP, + QDF_MODULE_ID_HDD_DATA, + QDF_MODULE_ID_HDD_SAP_DATA, + QDF_MODULE_ID_HIF, + QDF_MODULE_ID_HTC, + QDF_MODULE_ID_TXRX, + QDF_MODULE_ID_QDF_DEVICE = 64, + QDF_MODULE_ID_CFG, + QDF_MODULE_ID_BMI, + QDF_MODULE_ID_EPPING, + QDF_MODULE_ID_QVIT, + QDF_MODULE_ID_DP, + QDF_MODULE_ID_HAL, + QDF_MODULE_ID_SOC, + QDF_MODULE_ID_OS_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_SCHEDULER, + QDF_MODULE_ID_MGMT_TXRX, + QDF_MODULE_ID_SERIALIZATION, + QDF_MODULE_ID_PMO, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_POLICY_MGR, + QDF_MODULE_ID_CONFIG, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_SA_API, + QDF_MODULE_ID_NAN, + QDF_MODULE_ID_OFFCHAN_TXRX, + QDF_MODULE_ID_SON, + QDF_MODULE_ID_SPECTRAL, + QDF_MODULE_ID_OBJ_MGR, + QDF_MODULE_ID_NSS, + QDF_MODULE_ID_ROAM_DEBUG, + QDF_MODULE_ID_CDP, + QDF_MODULE_ID_DIRECT_BUF_RX, + QDF_MODULE_ID_DISA, + QDF_MODULE_ID_GREEN_AP, + QDF_MODULE_ID_FTM, + QDF_MODULE_ID_EXTAP, + QDF_MODULE_ID_FD, + QDF_MODULE_ID_OCB, + QDF_MODULE_ID_IPA, + QDF_MODULE_ID_CP_STATS, + QDF_MODULE_ID_ACTION_OUI, + QDF_MODULE_ID_TARGET, + QDF_MODULE_ID_MBSSIE, + QDF_MODULE_ID_FWOL, + QDF_MODULE_ID_SM_ENGINE, + QDF_MODULE_ID_CMN_MLME, + QDF_MODULE_ID_BSSCOLOR, + QDF_MODULE_ID_CFR, + QDF_MODULE_ID_TX_CAPTURE, + QDF_MODULE_ID_INTEROP_ISSUES_AP, + QDF_MODULE_ID_BLACKLIST_MGR, + QDF_MODULE_ID_QLD, + QDF_MODULE_ID_DYNAMIC_MODE_CHG, + QDF_MODULE_ID_COEX, + QDF_MODULE_ID_FTM_TIME_SYNC, + QDF_MODULE_ID_PKT_CAPTURE, + QDF_MODULE_ID_MON_FILTER, + QDF_MODULE_ID_GPIO = 123, + QDF_MODULE_ID_ANY, + QDF_MODULE_ID_MAX, +} QDF_MODULE_ID; + +/** + * typedef enum QDF_TRACE_LEVEL - Debug verbose level + * @QDF_TRACE_LEVEL_NONE: no trace will be logged. This value is in place + * for the qdf_trace_setlevel() to allow the user + * to turn off all traces + * @QDF_TRACE_LEVEL_FATAL: Indicates fatal error conditions + * @QDF_TRACE_LEVEL_ERROR: Indicates error conditions + * @QDF_TRACE_LEVEL_WARN: May indicate that an error will occur if action + * is not taken + * @QDF_TRACE_LEVEL_INFO: Normal operational messages that require no action + * @QDF_TRACE_LEVEL_INFO_HIGH: High level operational messages that require + * no action + * @QDF_TRACE_LEVEL_INFO_MED: Middle level operational messages that require + * no action + * @QDF_TRACE_LEVEL_INFO_LOW: Low level operational messages that require + * no action + * @QDF_TRACE_LEVEL_DEBUG: Information useful to developers for debugging + * @QDF_TRACE_LEVEL_TRACE: Indicates trace level for automation scripts, + * whenever there is a context switch in driver, one + * print using this trace level will be added with + * the help of qdf_trace api. + * @QDF_TRACE_LEVEL_ALL: All trace levels + * @QDF_TRACE_LEVEL_MAX: Max trace level + */ +typedef enum { + QDF_TRACE_LEVEL_NONE, + QDF_TRACE_LEVEL_FATAL, + QDF_TRACE_LEVEL_ERROR, + QDF_TRACE_LEVEL_WARN, + QDF_TRACE_LEVEL_INFO, + QDF_TRACE_LEVEL_INFO_HIGH, + QDF_TRACE_LEVEL_INFO_MED, + QDF_TRACE_LEVEL_INFO_LOW, + QDF_TRACE_LEVEL_DEBUG, + QDF_TRACE_LEVEL_TRACE, + QDF_TRACE_LEVEL_ALL, + QDF_TRACE_LEVEL_MAX +} QDF_TRACE_LEVEL; + +/** + * enum QDF_OPMODE - vdev operating mode + * @QDF_STA_MODE: STA mode + * @QDF_SAP_MODE: SAP mode + * @QDF_P2P_CLIENT_MODE: P2P client mode + * @QDF_P2P_GO_MODE: P2P GO mode + * @QDF_FTM_MODE: FTM mode + * @QDF_IBSS_MODE: IBSS mode + * @QDF_MONITOR_MODE: Monitor mode + * @QDF_P2P_DEVICE_MODE: P2P device mode + * @QDF_OCB_MODE: OCB device mode + * @QDF_EPPING_MODE: EPPING device mode + * @QDF_QVIT_MODE: QVIT device mode + * @QDF_NDI_MODE: NAN datapath mode + * @QDF_WDS_MODE: WDS mode + * @QDF_BTAMP_MODE: BTAMP mode + * @QDF_AHDEMO_MODE: AHDEMO mode + * @QDF_TDLS_MODE: TDLS device mode + * @QDF_NAN_DISC_MODE: NAN Discovery device mode + * @QDF_MAX_NO_OF_MODE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum QDF_OPMODE { + QDF_STA_MODE, + QDF_SAP_MODE, + QDF_P2P_CLIENT_MODE, + QDF_P2P_GO_MODE, + QDF_FTM_MODE, + QDF_IBSS_MODE, + QDF_MONITOR_MODE, + QDF_P2P_DEVICE_MODE, + QDF_OCB_MODE, + QDF_EPPING_MODE, + QDF_QVIT_MODE, + QDF_NDI_MODE, + QDF_WDS_MODE, + QDF_BTAMP_MODE, + QDF_AHDEMO_MODE, + QDF_TDLS_MODE, + QDF_NAN_DISC_MODE, + + /* Add new OP Modes to qdf_opmode_str as well */ + + QDF_MAX_NO_OF_MODE +}; + +/** + * qdf_opmode_str() - Return a human readable string representation of @opmode + * @opmode: The opmode to convert + * + * Return: string representation of @opmode + */ +const char *qdf_opmode_str(const enum QDF_OPMODE opmode); + +/** + * enum QDF_GLOBAL_MODE - global mode when driver is loaded. + * + * @QDF_GLOBAL_MISSION_MODE: mission mode (STA, SAP...) + * @QDF_GLOBAL_WALTEST_MODE: WAL Test Mode + * @QDF_GLOBAL_MONITOR_MODE: Monitor Mode + * @QDF_GLOBAL_FTM_MODE: FTM mode + * @QDF_GLOBAL_IBSS_MODE: IBSS mode + * @QDF_GLOBAL_COLDBOOT_CALIB_MODEL: Cold Boot Calibration Mode + * @QDF_GLOBAL_EPPING_MODE: EPPING mode + * @QDF_GLOBAL_QVIT_MODE: QVIT global mode + * @QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE: Cold Boot Calibration in FTM Mode + * @QDF_GLOBAL_MAX_MODE: Max place holder + */ +enum QDF_GLOBAL_MODE { + QDF_GLOBAL_MISSION_MODE, + QDF_GLOBAL_WALTEST_MODE = 3, + QDF_GLOBAL_MONITOR_MODE = 4, + QDF_GLOBAL_FTM_MODE = 5, + QDF_GLOBAL_IBSS_MODE = 6, + QDF_GLOBAL_COLDBOOT_CALIB_MODE = 7, + QDF_GLOBAL_EPPING_MODE = 8, + QDF_GLOBAL_QVIT_MODE = 9, + QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE = 10, + QDF_GLOBAL_MAX_MODE +}; + +#define QDF_IS_EPPING_ENABLED(mode) (mode == QDF_GLOBAL_EPPING_MODE) + +#ifdef QDF_TRACE_PRINT_ENABLE +#define qdf_print(args...) QDF_TRACE_INFO(QDF_MODULE_ID_ANY, ## args) +#define qdf_alert(args...) QDF_TRACE_FATAL(QDF_MODULE_ID_ANY, ## args) +#define qdf_err(args...) QDF_TRACE_ERROR(QDF_MODULE_ID_ANY, ## args) +#define qdf_warn(args...) QDF_TRACE_WARN(QDF_MODULE_ID_ANY, ## args) +#define qdf_info(args...) QDF_TRACE_INFO(QDF_MODULE_ID_ANY, ## args) +#define qdf_debug(args...) QDF_TRACE_DEBUG(QDF_MODULE_ID_ANY, ## args) + +#define qdf_nofl_print(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_ANY, ## params) +#define qdf_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_ANY, ## params) +#define qdf_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_ANY, ## params) +#define qdf_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_ANY, ## params) +#define qdf_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_ANY, ## params) +#define qdf_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_ANY, ## params) + +#else /* QDF_TRACE_PRINT_ENABLE */ +#define qdf_print(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, ## params) +#define qdf_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_QDF, ## params) +#define qdf_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, ## params) +#define qdf_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_QDF, ## params) +#define qdf_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_QDF, ## params) +#define qdf_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_QDF, ## params) + +#define qdf_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_QDF, ## params) + +#endif /* QDF_TRACE_PRINT_ENABLE */ + +#define qdf_rl_alert(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_err(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_warn(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_info(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_debug(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_QDF, ## params) + +#define qdf_rl_nofl_alert(params...) \ + QDF_TRACE_FATAL_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_err(params...) \ + QDF_TRACE_ERROR_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_warn(params...) \ + QDF_TRACE_WARN_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_info(params...) \ + QDF_TRACE_INFO_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_debug(params...) \ + QDF_TRACE_DEBUG_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) + +#define qdf_snprint __qdf_snprint + +#define qdf_kstrtoint __qdf_kstrtoint +#define qdf_kstrtouint __qdf_kstrtouint + +#ifdef WLAN_OPEN_P2P_INTERFACE +/* This should match with WLAN_MAX_INTERFACES */ +#define QDF_MAX_CONCURRENCY_PERSONA (WLAN_MAX_VDEVS) +#else +#define QDF_MAX_CONCURRENCY_PERSONA (WLAN_MAX_VDEVS - 1) +#endif + +#define QDF_STA_MASK (1 << QDF_STA_MODE) +#define QDF_SAP_MASK (1 << QDF_SAP_MODE) +#define QDF_P2P_CLIENT_MASK (1 << QDF_P2P_CLIENT_MODE) +#define QDF_P2P_GO_MASK (1 << QDF_P2P_GO_MODE) +#define QDF_MONITOR_MASK (1 << QDF_MONITOR_MODE) + +#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH + +/** + * typedef tQDF_MCC_TO_SCC_SWITCH_MODE - MCC to SCC switch mode. + * @QDF_MCC_TO_SCC_SWITCH_DISABLE: Disable switch + * @QDF_MCC_TO_SCC_SWITCH_FORCE_WITHOUT_DISCONNECTION: Force switch without + * restart of SAP + * @QDF_MCC_TO_SCC_SWITCH_WITH_FAVORITE_CHANNEL: Switch using fav channel(s) + * without SAP restart + * @QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION: Force switch + * without SAP restart. MCC is allowed only in below exception cases: + * Exception Case-1: When STA is operating on DFS channel. + * Exception Case-2: When STA is operating on LTE-CoEx channel. + * Exception Case-3: When STA is operating on AP disabled channel. + * @QDF_MCC_TO_SCC_WITH_PREFERRED_BAND: Force SCC only in user preferred band. + * Allow MCC if STA is operating or comes up on other than user preferred band. + * + * @QDF_MCC_TO_SCC_SWITCH_MAX: max switch + */ +typedef enum { + QDF_MCC_TO_SCC_SWITCH_DISABLE = 0, + QDF_MCC_TO_SCC_SWITCH_FORCE_WITHOUT_DISCONNECTION = 3, + QDF_MCC_TO_SCC_SWITCH_WITH_FAVORITE_CHANNEL, + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION, + QDF_MCC_TO_SCC_WITH_PREFERRED_BAND, + QDF_MCC_TO_SCC_SWITCH_MAX +} tQDF_MCC_TO_SCC_SWITCH_MODE; +#endif + +#if !defined(NULL) +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +/** + * qdf_bool_parse() - parse the given string as a boolean value + * @bool_str: the input boolean string to parse + * @out_bool: the output boolean value, populated on success + * + * 1, y, Y are mapped to true, 0, n, N are mapped to false. + * Leading/trailing whitespace is ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_bool_parse(const char *bool_str, bool *out_bool); + +/** + * qdf_int32_parse() - parse the given string as a 32-bit signed integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_int32_parse(const char *int_str, int32_t *out_int); + +/** + * qdf_uint32_parse() - parse the given string as a 32-bit unsigned integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_uint32_parse(const char *int_str, uint32_t *out_int); + +/** + * qdf_int64_parse() - parse the given string as a 64-bit signed integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_int64_parse(const char *int_str, int64_t *out_int); + +/** + * qdf_uint64_parse() - parse the given string as a 64-bit unsigned integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_uint64_parse(const char *int_str, uint64_t *out_int); + +#define QDF_MAC_ADDR_SIZE 6 + +/** + * If the feature CONFIG_WLAN_TRACE_HIDE_MAC_ADDRESS is enabled, + * then the requirement is to hide 2nd, 3rd and 4th octet of the + * MAC address in the kernel logs and driver logs. + * But other management interfaces like ioctl, debugfs, sysfs, + * wext, unit test code or non-production simulator sw (iot_sim) + * should continue to log the full mac address. + * + * Developers must use QDF_FULL_MAC_FMT instead of "%pM", + * as this macro helps avoid accidentally breaking the feature + * CONFIG_WLAN_TRACE_HIDE_MAC_ADDRESS if enabled and code auditing + * becomes easy. + */ +#define QDF_FULL_MAC_FMT "%pM" +#define QDF_FULL_MAC_REF(a) (a) + +#if defined(WLAN_TRACE_HIDE_MAC_ADDRESS) +#define QDF_MAC_ADDR_FMT "%02x:**:**:**:%02x:%02x" + +/* + * The input data type for QDF_MAC_ADDR_REF can be pointer or an array. + * In case of array, compiler was throwing following warning + * 'address of array will always evaluate as ‘true’ + * and if the pointer is NULL, zero is passed to the format specifier + * which results in zero mac address (00:**:**:**:00:00) + * For this reason, input data type is typecasted to (uintptr_t). + */ +#define QDF_MAC_ADDR_REF(a) \ + (((uintptr_t)NULL != (uintptr_t)(a)) ? (a)[0] : 0), \ + (((uintptr_t)NULL != (uintptr_t)(a)) ? (a)[4] : 0), \ + (((uintptr_t)NULL != (uintptr_t)(a)) ? (a)[5] : 0) +#else +#define QDF_MAC_ADDR_FMT "%pM" +#define QDF_MAC_ADDR_REF(a) (a) +#endif /* WLAN_TRACE_HIDE_MAC_ADDRESS */ + +#define QDF_MAC_ADDR_BCAST_INIT { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } } +#define QDF_MAC_ADDR_ZERO_INIT { { 0, 0, 0, 0, 0, 0 } } + +/** + * struct qdf_mac_addr - A MAC address + * @bytes: the raw address bytes array + */ +struct qdf_mac_addr { + uint8_t bytes[QDF_MAC_ADDR_SIZE]; +}; + +/** + * enum qdf_proto_subtype - subtype of packet + * @QDF_PROTO_EAPOL_M1 - EAPOL 1/4 + * @QDF_PROTO_EAPOL_M2 - EAPOL 2/4 + * @QDF_PROTO_EAPOL_M3 - EAPOL 3/4 + * @QDF_PROTO_EAPOL_M4 - EAPOL 4/4 + * @QDF_PROTO_DHCP_DISCOVER - discover + * @QDF_PROTO_DHCP_REQUEST - request + * @QDF_PROTO_DHCP_OFFER - offer + * @QDF_PROTO_DHCP_ACK - ACK + * @QDF_PROTO_DHCP_NACK - NACK + * @QDF_PROTO_DHCP_RELEASE - release + * @QDF_PROTO_DHCP_INFORM - inform + * @QDF_PROTO_DHCP_DECLINE - decline + * @QDF_PROTO_ARP_REQ - arp request + * @QDF_PROTO_ARP_RES - arp response + * @QDF_PROTO_ICMP_REQ - icmp request + * @QDF_PROTO_ICMP_RES - icmp response + * @QDF_PROTO_ICMPV6_REQ - icmpv6 request + * @QDF_PROTO_ICMPV6_RES - icmpv6 response + * @QDF_PROTO_ICMPV6_RS - icmpv6 rs packet + * @QDF_PROTO_ICMPV6_RA - icmpv6 ra packet + * @QDF_PROTO_ICMPV6_NS - icmpv6 ns packet + * @QDF_PROTO_ICMPV6_NA - icmpv6 na packet + * @QDF_PROTO_IPV4_UDP - ipv4 udp + * @QDF_PROTO_IPV4_TCP - ipv4 tcp + * @QDF_PROTO_IPV6_UDP - ipv6 udp + * @QDF_PROTO_IPV6_TCP - ipv6 tcp + * @QDF_PROTO_MGMT_ASSOC -assoc + * @QDF_PROTO_MGMT_DISASSOC - disassoc + * @QDF_PROTO_MGMT_AUTH - auth + * @QDF_PROTO_MGMT_DEAUTH - deauth + * @QDF_ROAM_SYNCH - roam synch indication from fw + * @QDF_ROAM_COMPLETE - roam complete cmd to fw + * @QDF_ROAM_EVENTID - roam eventid from fw + * @QDF_PROTO_DNS_QUERY - dns query + * @QDF_PROTO_DNS_RES -dns response + */ +enum qdf_proto_subtype { + QDF_PROTO_INVALID, + QDF_PROTO_EAPOL_M1, + QDF_PROTO_EAPOL_M2, + QDF_PROTO_EAPOL_M3, + QDF_PROTO_EAPOL_M4, + QDF_PROTO_DHCP_DISCOVER, + QDF_PROTO_DHCP_REQUEST, + QDF_PROTO_DHCP_OFFER, + QDF_PROTO_DHCP_ACK, + QDF_PROTO_DHCP_NACK, + QDF_PROTO_DHCP_RELEASE, + QDF_PROTO_DHCP_INFORM, + QDF_PROTO_DHCP_DECLINE, + QDF_PROTO_ARP_REQ, + QDF_PROTO_ARP_RES, + QDF_PROTO_ICMP_REQ, + QDF_PROTO_ICMP_RES, + QDF_PROTO_ICMPV6_REQ, + QDF_PROTO_ICMPV6_RES, + QDF_PROTO_ICMPV6_RS, + QDF_PROTO_ICMPV6_RA, + QDF_PROTO_ICMPV6_NS, + QDF_PROTO_ICMPV6_NA, + QDF_PROTO_IPV4_UDP, + QDF_PROTO_IPV4_TCP, + QDF_PROTO_IPV6_UDP, + QDF_PROTO_IPV6_TCP, + QDF_PROTO_MGMT_ASSOC, + QDF_PROTO_MGMT_DISASSOC, + QDF_PROTO_MGMT_AUTH, + QDF_PROTO_MGMT_DEAUTH, + QDF_ROAM_SYNCH, + QDF_ROAM_COMPLETE, + QDF_ROAM_EVENTID, + QDF_PROTO_DNS_QUERY, + QDF_PROTO_DNS_RES, + QDF_PROTO_SUBTYPE_MAX +}; + +/** + * qdf_mac_parse() - parse the given string as a MAC address + * @mac_str: the input MAC address string to parse + * @out_addr: the output MAC address value, populated on success + * + * A MAC address is a set of 6, colon-delimited, hexadecimal encoded octets. + * + * E.g. + * 00:00:00:00:00:00 (zero address) + * ff:ff:ff:ff:ff:ff (broadcast address) + * 12:34:56:78:90:ab (an arbitrary address) + * + * This implementation also accepts MAC addresses without colons. Historically, + * other delimiters and groupings have been used to represent MAC addresses, but + * these are not supported here. Hexadecimal digits may be in either upper or + * lower case. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_mac_parse(const char *mac_str, struct qdf_mac_addr *out_addr); + +#define QDF_IPV4_ADDR_SIZE 4 +#define QDF_IPV4_ADDR_STR "%d.%d.%d.%d" +#define QDF_IPV4_ADDR_ARRAY(a) (a)[0], (a)[1], (a)[2], (a)[3] +#define QDF_IPV4_ADDR_ZERO_INIT { { 0, 0, 0, 0 } } + +/** + * struct qdf_ipv4_addr - An IPV4 address + * @bytes: the raw address bytes array + */ +struct qdf_ipv4_addr { + uint8_t bytes[QDF_IPV4_ADDR_SIZE]; +}; + +/** + * qdf_ipv4_parse() - parse the given string as an IPV4 address + * @ipv4_str: the input IPV4 address string to parse + * @out_addr: the output IPV4 address value, populated on success + * + * An IPV4 address is a set of 4, dot-delimited, decimal encoded octets. + * + * E.g. + * 0.0.0.0 (wildcard address) + * 127.0.0.1 (loopback address) + * 255.255.255.255 (broadcast address) + * 192.168.0.1 (an arbitrary address) + * + * Historically, non-decimal encodings have also been used to represent IPV4 + * addresses, but these are not supported here. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_ipv4_parse(const char *ipv4_str, struct qdf_ipv4_addr *out_addr); + +#define QDF_IPV6_ADDR_SIZE 16 +#define QDF_IPV6_ADDR_HEXTET_COUNT 8 +#define QDF_IPV6_ADDR_STR "%x:%x:%x:%x:%x:%x:%x:%x" +#define QDF_IPV6_ADDR_ARRAY(a) \ + ((a)[0] << 8) + (a)[1], ((a)[2] << 8) + (a)[3], \ + ((a)[4] << 8) + (a)[5], ((a)[6] << 8) + (a)[7], \ + ((a)[8] << 8) + (a)[9], ((a)[10] << 8) + (a)[11], \ + ((a)[12] << 8) + (a)[13], ((a)[14] << 8) + (a)[15] +#define QDF_IPV6_ADDR_ZERO_INIT \ + { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } + +/** + * struct qdf_ipv6_addr - An IPV6 address + * @bytes: the raw address bytes array + */ +struct qdf_ipv6_addr { + uint8_t bytes[QDF_IPV6_ADDR_SIZE]; +}; + +/** + * qdf_ipv6_parse() - parse the given string as an IPV6 address + * @ipv6_str: the input IPV6 address string to parse + * @out_addr: the output IPV6 address value, populated on success + * + * A hextet is a pair of octets. An IPV6 address is a set of 8, colon-delimited, + * hexadecimal encoded hextets. Each hextet may omit leading zeros. One or more + * zero-hextets may be "compressed" using a pair of colons ("::"). Up to one + * such zero-compression is allowed per address. + * + * E.g. + * 0:0:0:0:0:0:0:0 (unspecified address) + * :: (also the unspecified address) + * 0:0:0:0:0:0:0:1 (loopback address) + * ::1 (also the loopback address) + * 900a:ae7::6 (an arbitrary address) + * 900a:ae7:0:0:0:0:0:6 (the same arbitrary address) + * + * Hexadecimal digits may be in either upper or lower case. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_ipv6_parse(const char *ipv6_str, struct qdf_ipv6_addr *out_addr); + +/** + * qdf_uint32_array_parse() - parse the given string as uint32 array + * @in_str: the input string to parse + * @out_array: the output uint32 array, populated on success + * @array_size: size of the array + * @out_size: size of the populated array + * + * This API is called to convert string (each value separated by + * a comma) into an uint32 array + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_uint32_array_parse(const char *in_str, uint32_t *out_array, + qdf_size_t array_size, qdf_size_t *out_size); + +/** + * qdf_uint16_array_parse() - parse the given string as uint16 array + * @in_str: the input string to parse + * @out_array: the output uint16 array, populated on success + * @array_size: size of the array + * @out_size: size of the populated array + * + * This API is called to convert string (each value separated by + * a comma) into an uint16 array + * + * Return: QDF_STATUS + */ + +QDF_STATUS qdf_uint16_array_parse(const char *in_str, uint16_t *out_array, + qdf_size_t array_size, qdf_size_t *out_size); + +/** + * qdf_uint8_array_parse() - parse the given string as uint8 array + * @in_str: the input string to parse + * @out_array: the output uint8 array, populated on success + * @array_size: size of the array + * @out_size: size of the populated array + * + * This API is called to convert string (each byte separated by + * a comma) into an u8 array + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_uint8_array_parse(const char *in_str, uint8_t *out_array, + qdf_size_t array_size, qdf_size_t *out_size); + +#define QDF_BCAST_MAC_ADDR (0xFF) +#define QDF_MCAST_IPV4_MAC_ADDR (0x01) +#define QDF_MCAST_IPV6_MAC_ADDR (0x33) + +/** + * struct qdf_tso_frag_t - fragments of a single TCP segment + * @paddr_low_32: Lower 32 bits of the buffer pointer + * @paddr_upper_16: upper 16 bits of the buffer pointer + * @length: length of the buffer + * @vaddr: virtual address + * + * This structure holds the fragments of a single TCP segment of a + * given jumbo TSO network buffer + */ +struct qdf_tso_frag_t { + uint16_t length; + unsigned char *vaddr; + qdf_dma_addr_t paddr; +}; + +#define FRAG_NUM_MAX 6 +#define TSO_SEG_MAGIC_COOKIE 0x1EED + +/** + * struct qdf_tso_flags_t - TSO specific flags + * @tso_enable: Enable transmit segmentation offload + * @tcp_flags_mask: Tcp_flag is inserted into the header based + * on the mask + * @l2_len: L2 length for the msdu + * @ip_len: IP length for the msdu + * @tcp_seq_num: TCP sequence number + * @ip_id: IP identification number + * + * This structure holds the TSO specific flags extracted from the TSO network + * buffer for a given TCP segment + */ +struct qdf_tso_flags_t { + uint32_t tso_enable:1, + reserved_0a:6, + fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1, + ns:1, + tcp_flags_mask:9, + reserved_0b:7; + + uint32_t l2_len:16, + ip_len:16; + + uint32_t tcp_seq_num; + + uint32_t ip_id:16, + ipv4_checksum_en:1, + udp_ipv4_checksum_en:1, + udp_ipv6_checksum_en:1, + tcp_ipv4_checksum_en:1, + tcp_ipv6_checksum_en:1, + partial_checksum_en:1, + reserved_3a:10; + + uint32_t checksum_offset:14, + reserved_4a:2, + payload_start_offset:14, + reserved_4b:2; + + uint32_t payload_end_offset:14, + reserved_5:18; +}; + +/** + * struct qdf_tso_seg_t - single TSO segment + * @tso_flags: TSO flags + * @num_frags: number of fragments + * @total_len: total length of the packet + * @tso_frags: array holding the fragments + * + * This structure holds the information of a single TSO segment of a jumbo + * TSO network buffer + */ +struct qdf_tso_seg_t { + struct qdf_tso_flags_t tso_flags; + uint32_t num_frags; + uint32_t total_len; + struct qdf_tso_frag_t tso_frags[FRAG_NUM_MAX]; +}; + +/** + * TSO seg elem action caller locations: goes into dbg.history below. + * Needed to be defined outside of the feature so that + * callers can be coded without ifdefs (even if they get + * resolved to nothing) + */ +enum tsoseg_dbg_caller_e { + TSOSEG_LOC_UNDEFINED, + TSOSEG_LOC_INIT1, + TSOSEG_LOC_INIT2, + TSOSEG_LOC_FREE, + TSOSEG_LOC_ALLOC, + TSOSEG_LOC_DEINIT, + TSOSEG_LOC_GETINFO, + TSOSEG_LOC_FILLHTTSEG, + TSOSEG_LOC_FILLCMNSEG, + TSOSEG_LOC_PREPARETSO, + TSOSEG_LOC_TXPREPLLFAST, + TSOSEG_LOC_UNMAPTSO, + TSOSEG_LOC_UNMAPLAST, + TSOSEG_LOC_FORCE_FREE, +}; +#ifdef TSOSEG_DEBUG + +/** + * WARNING: Don't change the history size without changing the wrap + * code in qdf_tso_seg_dbg_record function + */ +#define MAX_TSO_SEG_ACT_HISTORY 16 +struct qdf_tso_seg_dbg_history_t { + uint64_t ts; + short id; +}; +struct qdf_tso_seg_dbg_t { + void *txdesc; /* owner - (ol_txrx_tx_desc_t *) */ + qdf_atomic_t cur; /* index of last valid entry */ + struct qdf_tso_seg_dbg_history_t h[MAX_TSO_SEG_ACT_HISTORY]; +}; +#endif /* TSOSEG_DEBUG */ + +/** + * qdf_tso_seg_elem_t - tso segment element + * @next: pointer to the next segment + * @seg: instance of segment + */ +struct qdf_tso_seg_elem_t { + struct qdf_tso_seg_elem_t *next; + struct qdf_tso_seg_t seg; + uint32_t cookie:13, + on_freelist:1, + sent_to_target:1, + force_free:1; +#ifdef TSOSEG_DEBUG + struct qdf_tso_seg_dbg_t dbg; +#endif /* TSOSEG_DEBUG */ +}; + +/** + * struct qdf_tso_num_seg_t - single element to count for num of seg + * @tso_cmn_num_seg: num of seg in a jumbo skb + * + * This structure holds the information of num of segments of a jumbo + * TSO network buffer. + */ +struct qdf_tso_num_seg_t { + uint32_t tso_cmn_num_seg; +}; + +/** + * qdf_tso_num_seg_elem_t - num of tso segment element for jumbo skb + * @next: pointer to the next segment + * @num_seg: instance of num of seg + */ +struct qdf_tso_num_seg_elem_t { + struct qdf_tso_num_seg_elem_t *next; + struct qdf_tso_num_seg_t num_seg; +}; + +/** + * struct qdf_tso_info_t - TSO information extracted + * @is_tso: is this is a TSO frame + * @num_segs: number of segments + * @tso_seg_list: list of TSO segments for this jumbo packet + * @curr_seg: segment that is currently being processed + * @tso_num_seg_list: num of tso seg for this jumbo packet + * @msdu_stats_idx: msdu index for tso stats + * + * This structure holds the TSO information extracted after parsing the TSO + * jumbo network buffer. It contains a chain of the TSO segments belonging to + * the jumbo packet + */ +struct qdf_tso_info_t { + uint8_t is_tso; + uint32_t num_segs; + struct qdf_tso_seg_elem_t *tso_seg_list; + struct qdf_tso_seg_elem_t *curr_seg; + struct qdf_tso_num_seg_elem_t *tso_num_seg_list; + uint32_t msdu_stats_idx; +}; + +/** + * Used to set classify bit in CE desc. + */ +#define QDF_CE_TX_CLASSIFY_BIT_S 5 + +/** + * QDF_CE_TX_PKT_TYPE_BIT_S - 2 bits starting at bit 6 in CE desc. + */ +#define QDF_CE_TX_PKT_TYPE_BIT_S 6 + +/** + * QDF_CE_TX_PKT_OFFSET_BIT_S - 12 bits --> 16-27, in the CE desciptor + * the length of HTT/HTC descriptor + */ +#define QDF_CE_TX_PKT_OFFSET_BIT_S 16 + +/** + * QDF_CE_TX_PKT_OFFSET_BIT_M - Mask for packet offset in the CE descriptor. + */ +#define QDF_CE_TX_PKT_OFFSET_BIT_M 0x0fff0000 + +/** + * enum qdf_suspend_type - type of suspend + * @QDF_SYSTEM_SUSPEND: System suspend triggered wlan suspend + * @QDF_RUNTIME_SUSPEND: Runtime pm inactivity timer triggered wlan suspend + */ +enum qdf_suspend_type { + QDF_SYSTEM_SUSPEND, + QDF_RUNTIME_SUSPEND +}; + +/** + * enum qdf_hang_reason - host hang/ssr reason + * @QDF_REASON_UNSPECIFIED: Unspecified reason + * @QDF_RX_HASH_NO_ENTRY_FOUND: No Map for the MAC entry for the received frame + * @QDF_PEER_DELETION_TIMEDOUT: peer deletion timeout happened + * @QDF_PEER_UNMAP_TIMEDOUT: peer unmap timeout + * @QDF_SCAN_REQ_EXPIRED: Scan request timed out + * @QDF_SCAN_ATTEMPT_FAILURES: Consecutive Scan attempt failures + * @QDF_GET_MSG_BUFF_FAILURE: Unable to get the message buffer + * @QDF_ACTIVE_LIST_TIMEOUT: Current command processing is timedout + * @QDF_SUSPEND_TIMEOUT: Timeout for an ACK from FW for suspend request + * @QDF_RESUME_TIMEOUT: Timeout for an ACK from FW for resume request + * @QDF_WMI_EXCEED_MAX_PENDING_CMDS: wmi exceed max pending cmd + * @QDF_AP_STA_CONNECT_REQ_TIMEOUT: SAP peer assoc timeout from FW + * @QDF_STA_AP_CONNECT_REQ_TIMEOUT: STA peer assoc timeout from FW + * @QDF_MAC_HW_MODE_CHANGE_TIMEOUT: HW mode change timeout from FW + * @QDF_MAC_HW_MODE_CONFIG_TIMEOUT: HW dual mac cfg timeout from FW + * @QDF_VDEV_START_RESPONSE_TIMED_OUT: Start response timeout from FW + * @QDF_VDEV_RESTART_RESPONSE_TIMED_OUT: Restart response timeout from FW + * @QDF_VDEV_STOP_RESPONSE_TIMED_OUT: Stop response timeout from FW + * @QDF_VDEV_DELETE_RESPONSE_TIMED_OUT: Delete response timeout from FW + * @QDF_VDEV_PEER_DELETE_ALL_RESPONSE_TIMED_OUT: Peer delete all resp timeout + * @QDF_WMI_BUF_SEQUENCE_MISMATCH: WMI Tx completion buffer sequence mismatch + * @QDF_HAL_REG_WRITE_FAILURE: HAL register writing failures + * @QDF_SUSPEND_NO_CREDIT: host lack of credit after suspend + * @QCA_HANG_BUS_FAILURE: Bus failure + */ +enum qdf_hang_reason { + QDF_REASON_UNSPECIFIED, + QDF_RX_HASH_NO_ENTRY_FOUND, + QDF_PEER_DELETION_TIMEDOUT, + QDF_PEER_UNMAP_TIMEDOUT, + QDF_SCAN_REQ_EXPIRED, + QDF_SCAN_ATTEMPT_FAILURES, + QDF_GET_MSG_BUFF_FAILURE, + QDF_ACTIVE_LIST_TIMEOUT, + QDF_SUSPEND_TIMEOUT, + QDF_RESUME_TIMEOUT, + QDF_WMI_EXCEED_MAX_PENDING_CMDS, + QDF_AP_STA_CONNECT_REQ_TIMEOUT, + QDF_STA_AP_CONNECT_REQ_TIMEOUT, + QDF_MAC_HW_MODE_CHANGE_TIMEOUT, + QDF_MAC_HW_MODE_CONFIG_TIMEOUT, + QDF_VDEV_START_RESPONSE_TIMED_OUT, + QDF_VDEV_RESTART_RESPONSE_TIMED_OUT, + QDF_VDEV_STOP_RESPONSE_TIMED_OUT, + QDF_VDEV_DELETE_RESPONSE_TIMED_OUT, + QDF_VDEV_PEER_DELETE_ALL_RESPONSE_TIMED_OUT, + QDF_WMI_BUF_SEQUENCE_MISMATCH, + QDF_HAL_REG_WRITE_FAILURE, + QDF_SUSPEND_NO_CREDIT, + QCA_HANG_BUS_FAILURE, +}; + +/** + * enum qdf_stats_verbosity_level - Verbosity levels for stats + * for which want to have different levels + * @QDF_STATS_VERBOSITY_LEVEL_LOW: Stats verbosity level low + * @QDF_STATS_VERBOSITY_LEVEL_HIGH: Stats verbosity level high + */ +enum qdf_stats_verbosity_level { + QDF_STATS_VERBOSITY_LEVEL_LOW, + QDF_STATS_VERBOSITY_LEVEL_HIGH +}; + +/** + * enum qdf_clock_id - The clock IDs of the various system clocks + * @QDF_CLOCK_REALTIME: Clock is close to current time of day + * @QDF_CLOCK_MONOTONIC: Clock is absolute elapsed time + */ +enum qdf_clock_id { + QDF_CLOCK_REALTIME = __QDF_CLOCK_REALTIME, + QDF_CLOCK_MONOTONIC = __QDF_CLOCK_MONOTONIC +}; + +/** + * enum qdf_hrtimer_mode - Mode arguments of qdf_hrtimer_data_t + * related functions + * @QDF_HRTIMER_MODE_ABS: Time value is absolute + * @QDF_HRTIMER_MODE_REL: Time value is relative to now + * @QDF_HRTIMER_MODE_PINNED: Timer is bound to CPU + */ +enum qdf_hrtimer_mode { + QDF_HRTIMER_MODE_ABS = __QDF_HRTIMER_MODE_ABS, + QDF_HRTIMER_MODE_REL = __QDF_HRTIMER_MODE_REL, + QDF_HRTIMER_MODE_PINNED = __QDF_HRTIMER_MODE_PINNED, +}; + +/** + * enum qdf_hrtimer_restart_status - Return values for the + * qdf_hrtimer_data_t callback function + * @QDF_HRTIMER_NORESTART: Timer is not restarted + * @QDF_HRTIMER_RESTART: Timer must be restarted + */ +enum qdf_hrtimer_restart_status { + QDF_HRTIMER_NORESTART = __QDF_HRTIMER_NORESTART, + QDF_HRTIMER_RESTART = __QDF_HRTIMER_RESTART, +}; + +/** + * enum qdf_context_mode - Values for the + * hrtimer context + * @QDF_CONTEXT_HARDWARE: Runs in hw interrupt context + * @QDF_CONTEXT_TASKLET: Runs in tasklet context + */ +enum qdf_context_mode { + QDF_CONTEXT_HARDWARE = 0, + QDF_CONTEXT_TASKLET = 1, +}; + +/** + * enum qdf_dp_tx_rx_status - TX/RX packet status + * @QDF_TX_RX_STATUS_INVALID: default invalid status + * @QDF_TX_RX_STATUS_OK: successfully sent + acked + * @QDF_TX_RX_STATUS_FW_DISCARD: packet not sent + * @QDF_TX_RX_STATUS_NO_ACK: packet sent but no ack + * @QDF_TX_RX_STATUS_DROP: packet dropped in host + */ +enum qdf_dp_tx_rx_status { + QDF_TX_RX_STATUS_INVALID, + QDF_TX_RX_STATUS_OK, + QDF_TX_RX_STATUS_FW_DISCARD, + QDF_TX_RX_STATUS_NO_ACK, + QDF_TX_RX_STATUS_DROP, +}; + +#endif /* __QDF_TYPES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h new file mode 100644 index 0000000000000000000000000000000000000000..2710afefcdf6004dff9e5c9878d7fee0460a8734 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h @@ -0,0 +1,787 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_util.h + * This file defines utility functions. + */ + +#ifndef _QDF_UTIL_H +#define _QDF_UTIL_H + +#include + +#ifdef QCA_CONFIG_SMP +#define QDF_MAX_AVAILABLE_CPU 8 +#else +#define QDF_MAX_AVAILABLE_CPU 1 +#endif + +typedef __qdf_wait_queue_head_t qdf_wait_queue_head_t; + +/** + * qdf_unlikely - Compiler-dependent macro denoting code likely to execute + * @_expr: expression to be checked + */ +#define qdf_unlikely(_expr) __qdf_unlikely(_expr) + +/** + * qdf_likely - Compiler-dependent macro denoting code unlikely to execute + * @_expr: expression to be checked + */ +#define qdf_likely(_expr) __qdf_likely(_expr) + +/** + * qdf_wmb - write memory barrier. + */ +#define qdf_wmb() __qdf_wmb() + +/** + * qdf_rmb - read memory barrier. + */ +#define qdf_rmb() __qdf_rmb() + +/** + * qdf_mb - read + write memory barrier. + */ +#define qdf_mb() __qdf_mb() + +/** + * qdf_ioread32 - read a register + * @offset: register address + */ +#define qdf_ioread32(offset) __qdf_ioread32(offset) +/** + * qdf_iowrite32 - write a register + * @offset: register address + * @value: value to write (32bit value) + */ +#define qdf_iowrite32(offset, value) __qdf_iowrite32(offset, value) + +/** + * qdf_assert - assert "expr" evaluates to false. + */ +#ifdef QDF_DEBUG +#define qdf_assert(expr) __qdf_assert(expr) +#else +#define qdf_assert(expr) +#endif /* QDF_DEBUG */ + +/** + * qdf_assert_always - alway assert "expr" evaluates to false. + */ +#define qdf_assert_always(expr) __qdf_assert(expr) + +/** + * qdf_target_assert_always - alway target assert "expr" evaluates to false. + */ +#define qdf_target_assert_always(expr) __qdf_target_assert(expr) + +/** + * QDF_MAX - get maximum of two values + * @_x: 1st argument + * @_y: 2nd argument + */ +#define QDF_MAX(_x, _y) (((_x) > (_y)) ? (_x) : (_y)) + +/** + * QDF_MIN - get minimum of two values + * @_x: 1st argument + * @_y: 2nd argument + */ +#define QDF_MIN(_x, _y) (((_x) < (_y)) ? (_x) : (_y)) + +/** + * QDF_IS_ADDR_BROADCAST - is mac address broadcast mac address + * @_a: pointer to mac address + */ +#define QDF_IS_ADDR_BROADCAST(_a) \ + ((_a)[0] == 0xff && \ + (_a)[1] == 0xff && \ + (_a)[2] == 0xff && \ + (_a)[3] == 0xff && \ + (_a)[4] == 0xff && \ + (_a)[5] == 0xff) + +#define QDF_DECLARE_EWMA(name, factor, weight) \ + __QDF_DECLARE_EWMA(name, factor, weight) + +#define qdf_ewma_tx_lag __qdf_ewma_tx_lag + +#define qdf_ewma_tx_lag_init(tx_lag) \ + __qdf_ewma_tx_lag_init(tx_lag) + +#define qdf_ewma_tx_lag_add(tx_lag, value) \ + __qdf_ewma_tx_lag_add(tx_lag, value) + +#define qdf_ewma_tx_lag_read(tx_lag) \ + __qdf_ewma_tx_lag_read(tx_lag) + +#define qdf_ewma_rx_rssi __qdf_ewma_rx_rssi + +#define qdf_ewma_rx_rssi_init(rx_rssi) \ + __qdf_ewma_rx_rssi_init(rx_rssi) + +#define qdf_ewma_rx_rssi_add(rx_rssi, value) \ + __qdf_ewma_rx_rssi_add(rx_rssi, value) + +#define qdf_ewma_rx_rssi_read(rx_rssi) \ + __qdf_ewma_rx_rssi_read(rx_rssi) +/** + * qdf_set_bit() - set bit in address + * @nr: bit number to be set + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_set_bit(nr, addr) __qdf_set_bit(nr, addr) + +/** + * qdf_clear_bit() - clear bit in address + * @nr: bit number to be clear + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_clear_bit(nr, addr) __qdf_clear_bit(nr, addr) + +/** + * qdf_test_bit() - test bit position in address + * @nr: bit number to be tested + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_test_bit(nr, addr) __qdf_test_bit(nr, addr) + +/** + * qdf_test_and_clear_bit() - test and clear bit position in address + * @nr: bit number to be tested + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_test_and_clear_bit(nr, addr) __qdf_test_and_clear_bit(nr, addr) + +/** + * qdf_find_first_bit() - find first bit position in address + * @addr: address buffer pointer + * @nbits: number of bits + * + * Return: position first set bit in addr + */ +#define qdf_find_first_bit(addr, nbits) __qdf_find_first_bit(addr, nbits) + +#define qdf_wait_queue_interruptible(wait_queue, condition) \ + __qdf_wait_queue_interruptible(wait_queue, condition) + +/** + * qdf_wait_queue_timeout() - wait for specified time on given condition + * @wait_queue: wait queue to wait on + * @condition: condition to wait on + * @timeout: timeout value in jiffies + * + * Return: 0 if condition becomes false after timeout + * 1 or remaining jiffies, if condition becomes true during timeout + */ +#define qdf_wait_queue_timeout(wait_queue, condition, timeout) \ + __qdf_wait_queue_timeout(wait_queue, \ + condition, timeout) + + +#define qdf_init_waitqueue_head(_q) __qdf_init_waitqueue_head(_q) + +#define qdf_wake_up_interruptible(_q) __qdf_wake_up_interruptible(_q) + +/** + * qdf_wake_up() - wakes up sleeping waitqueue + * @wait_queue: wait queue, which needs wake up + * + * Return: none + */ +#define qdf_wake_up(_q) __qdf_wake_up(_q) + +#define qdf_wake_up_completion(_q) __qdf_wake_up_completion(_q) + +/** + * qdf_container_of - cast a member of a structure out to the containing + * structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + */ +#define qdf_container_of(ptr, type, member) \ + __qdf_container_of(ptr, type, member) + +/** + * qdf_is_pwr2 - test input value is power of 2 integer + * @value: input integer + */ +#define QDF_IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1) + +/** + * qdf_roundup() - roundup the input value + * @x: value to roundup + * @y: input value rounded to multiple of this + * + * Return: rounded value + */ +#define qdf_roundup(x, y) __qdf_roundup(x, y) + +/** + * qdf_is_macaddr_equal() - compare two QDF MacAddress + * @mac_addr1: Pointer to one qdf MacAddress to compare + * @mac_addr2: Pointer to the other qdf MacAddress to compare + * + * This function returns a bool that tells if a two QDF MacAddress' + * are equivalent. + * + * Return: true if the MacAddress's are equal + * not true if the MacAddress's are not equal + */ +static inline bool qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1, + struct qdf_mac_addr *mac_addr2) +{ + return __qdf_is_macaddr_equal(mac_addr1, mac_addr2); +} + + +/** + * qdf_is_macaddr_zero() - check for a MacAddress of all zeros. + * @mac_addr: pointer to the struct qdf_mac_addr to check. + * + * This function returns a bool that tells if a MacAddress is made up of + * all zeros. + * + * Return: true if the MacAddress is all Zeros + * false if the MacAddress is not all Zeros. + */ +static inline bool qdf_is_macaddr_zero(struct qdf_mac_addr *mac_addr) +{ + struct qdf_mac_addr zero_mac_addr = QDF_MAC_ADDR_ZERO_INIT; + + return qdf_is_macaddr_equal(mac_addr, &zero_mac_addr); +} + +/** + * qdf_zero_macaddr() - zero out a MacAddress + * @mac_addr: pointer to the struct qdf_mac_addr to zero. + * + * This function zeros out a QDF MacAddress type. + * + * Return: none + */ +static inline void qdf_zero_macaddr(struct qdf_mac_addr *mac_addr) +{ + __qdf_zero_macaddr(mac_addr); +} + + +/** + * qdf_is_macaddr_group() - check for a MacAddress is a 'group' address + * @mac_addr1: pointer to the qdf MacAddress to check + * + * This function returns a bool that tells if a the input QDF MacAddress + * is a "group" address. Group addresses have the 'group address bit' turned + * on in the MacAddress. Group addresses are made up of Broadcast and + * Multicast addresses. + * + * Return: true if the input MacAddress is a Group address + * false if the input MacAddress is not a Group address + */ +static inline bool qdf_is_macaddr_group(struct qdf_mac_addr *mac_addr) +{ + return mac_addr->bytes[0] & 0x01; +} + + +/** + * qdf_is_macaddr_broadcast() - check for a MacAddress is a broadcast address + * @mac_addr: Pointer to the qdf MacAddress to check + * + * This function returns a bool that tells if a the input QDF MacAddress + * is a "broadcast" address. + * + * Return: true if the input MacAddress is a broadcast address + * flase if the input MacAddress is not a broadcast address + */ +static inline bool qdf_is_macaddr_broadcast(struct qdf_mac_addr *mac_addr) +{ + struct qdf_mac_addr broadcast_mac_addr = QDF_MAC_ADDR_BCAST_INIT; + return qdf_is_macaddr_equal(mac_addr, &broadcast_mac_addr); +} + +/** + * qdf_copy_macaddr() - copy a QDF MacAddress + * @dst_addr: pointer to the qdf MacAddress to copy TO (the destination) + * @src_addr: pointer to the qdf MacAddress to copy FROM (the source) + * + * This function copies a QDF MacAddress into another QDF MacAddress. + * + * Return: none + */ +static inline void qdf_copy_macaddr(struct qdf_mac_addr *dst_addr, + struct qdf_mac_addr *src_addr) +{ + *dst_addr = *src_addr; +} + +/** + * qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast' + * @mac_addr: pointer to the qdf MacAddress to set to broadcast + * + * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast + * MacAddress contains all 0xFF bytes. + * + * Return: none + */ +static inline void qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr) +{ + __qdf_set_macaddr_broadcast(mac_addr); +} + +/** + * qdf_set_u16() - Assign 16-bit unsigned value to a byte array base on CPU's + * endianness. + * @ptr: Starting address of a byte array + * @value: The value to assign to the byte array + * + * Caller must validate the byte array has enough space to hold the vlaue + * + * Return: The address to the byte after the assignment. This may or may not + * be valid. Caller to verify. + */ +static inline uint8_t *qdf_set_u16(uint8_t *ptr, uint16_t value) +{ +#if defined(ANI_BIG_BYTE_ENDIAN) + *(ptr) = (uint8_t) (value >> 8); + *(ptr + 1) = (uint8_t) (value); +#else + *(ptr + 1) = (uint8_t) (value >> 8); + *(ptr) = (uint8_t) (value); +#endif + return ptr + 2; +} + +/** + * qdf_get_u16() - Retrieve a 16-bit unsigned value from a byte array base on + * CPU's endianness. + * @ptr: Starting address of a byte array + * @value: Pointer to a caller allocated buffer for 16 bit value. Value is to + * assign to this location. + * + * Caller must validate the byte array has enough space to hold the vlaue + * + * Return: The address to the byte after the assignment. This may or may not + * be valid. Caller to verify. + */ +static inline uint8_t *qdf_get_u16(uint8_t *ptr, uint16_t *value) +{ +#if defined(ANI_BIG_BYTE_ENDIAN) + *value = (((uint16_t) (*ptr << 8)) | ((uint16_t) (*(ptr + 1)))); +#else + *value = (((uint16_t) (*(ptr + 1) << 8)) | ((uint16_t) (*ptr))); +#endif + return ptr + 2; +} + +/** + * qdf_get_u32() - retrieve a 32-bit unsigned value from a byte array base on + * CPU's endianness. + * @ptr: Starting address of a byte array + * @value: Pointer to a caller allocated buffer for 32 bit value. Value is to + * assign to this location. + * + * Caller must validate the byte array has enough space to hold the vlaue + * + * Return: The address to the byte after the assignment. This may or may not + * be valid. Caller to verify. + */ +static inline uint8_t *qdf_get_u32(uint8_t *ptr, uint32_t *value) +{ +#if defined(ANI_BIG_BYTE_ENDIAN) + *value = ((uint32_t) (*(ptr) << 24) | + (uint32_t) (*(ptr + 1) << 16) | + (uint32_t) (*(ptr + 2) << 8) | (uint32_t) (*(ptr + 3))); +#else + *value = ((uint32_t) (*(ptr + 3) << 24) | + (uint32_t) (*(ptr + 2) << 16) | + (uint32_t) (*(ptr + 1) << 8) | (uint32_t) (*(ptr))); +#endif + return ptr + 4; +} + +/** + * qdf_ntohs - Convert a 16-bit value from network byte order to host byte order + */ +#define qdf_ntohs(x) __qdf_ntohs(x) + +/** + * qdf_ntohl - Convert a 32-bit value from network byte order to host byte order + */ +#define qdf_ntohl(x) __qdf_ntohl(x) + +/** + * qdf_htons - Convert a 16-bit value from host byte order to network byte order + */ +#define qdf_htons(x) __qdf_htons(x) + +/** + * qdf_htonl - Convert a 32-bit value from host byte order to network byte order + */ +#define qdf_htonl(x) __qdf_htonl(x) + +/** + * qdf_cpu_to_le16 - Convert a 16-bit value from CPU byte order to + * little-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_le16(x) __qdf_cpu_to_le16(x) + +/** + * qdf_cpu_to_le32 - Convert a 32-bit value from CPU byte order to + * little-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_le32(x) __qdf_cpu_to_le32(x) + +/** + * qdf_cpu_to_le64 - Convert a 64-bit value from CPU byte order to + * little-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_le64(x) __qdf_cpu_to_le64(x) + +/** + * qdf_le16_to_cpu - Convert a 16-bit value from little-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_le16_to_cpu(x) __qdf_le16_to_cpu(x) + +/** + * qdf_le32_to_cpu - Convert a 32-bit value from little-endian byte + * order to CPU byte order + * + * @x: value to be converted + */ +#define qdf_le32_to_cpu(x) __qdf_le32_to_cpu(x) + +/** + * qdf_le64_to_cpu - Convert a 64-bit value from little-endian byte + * order to CPU byte order + * + * @x: value to be converted + */ +#define qdf_le64_to_cpu(x) __qdf_le64_to_cpu(x) + +/** + * qdf_cpu_to_be16 - Convert a 16-bit value from CPU byte order to + * big-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_be16(x) __qdf_cpu_to_be16(x) + +/** + * qdf_cpu_to_be32 - Convert a 32-bit value from CPU byte order to + * big-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_be32(x) __qdf_cpu_to_be32(x) + +/** + * qdf_cpu_to_be64 - Convert a 64-bit value from CPU byte order to + * big-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_be64(x) __qdf_cpu_to_be64(x) + + +/** + * qdf_be16_to_cpu - Convert a 16-bit value from big-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_be16_to_cpu(x) __qdf_be16_to_cpu(x) + +/** + * qdf_be32_to_cpu - Convert a 32-bit value from big-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_be32_to_cpu(x) __qdf_be32_to_cpu(x) + +/** + * qdf_be64_to_cpu - Convert a 64-bit value from big-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_be64_to_cpu(x) __qdf_be64_to_cpu(x) + +/** + * qdf_function - replace with the name of the current function + */ +#define qdf_function __qdf_function + +/** + * qdf_min - minimum of two numbers + */ +#define qdf_min(a, b) __qdf_min(a, b) + +/** + * qdf_ffz() - find first (least significant) zero bit + * @mask: the bitmask to check + * + * Return: The zero-based index of the first zero bit, or -1 if none are found + */ +#define qdf_ffz(mask) __qdf_ffz(mask) + +/** + * qdf_prefetch - prefetches the cacheline for read + * + * @x: address to be prefetched + */ +#define qdf_prefetch(x) __qdf_prefetch(x) + +/** + * qdf_get_pwr2() - get next power of 2 integer from input value + * @value: input value to find next power of 2 integer + * + * Get next power of 2 integer from input value + * + * Return: Power of 2 integer + */ +static inline int qdf_get_pwr2(int value) +{ + int log2; + + if (QDF_IS_PWR2(value)) + return value; + + log2 = 0; + while (value) { + value >>= 1; + log2++; + } + return 1 << log2; +} + +static inline +int qdf_get_cpu(void) +{ + return __qdf_get_cpu(); +} + +/** + * qdf_get_hweight8() - count num of 1's in bitmap + * @value: input bitmap + * + * Count num of 1's set in the bitmap + * + * Return: num of 1's + */ +static inline +unsigned int qdf_get_hweight8(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res + (res >> 4)) & 0x0F; +} + +/** + * qdf_device_init_wakeup() - allow a device to wake up the aps system + * @qdf_dev: the qdf device context + * @enable: enable/disable the device as a wakup source + * + * Return: 0 or errno + */ +static inline int qdf_device_init_wakeup(qdf_device_t qdf_dev, bool enable) +{ + return __qdf_device_init_wakeup(qdf_dev, enable); +} + +static inline +uint64_t qdf_get_totalramsize(void) +{ + return __qdf_get_totalramsize(); +} + +/** + * qdf_get_lower_32_bits() - get lower 32 bits from an address. + * @addr: address + * + * This api returns the lower 32 bits of an address. + * + * Return: lower 32 bits. + */ +static inline +uint32_t qdf_get_lower_32_bits(qdf_dma_addr_t addr) +{ + return __qdf_get_lower_32_bits(addr); +} + +/** + * qdf_get_upper_32_bits() - get upper 32 bits from an address. + * @addr: address + * + * This api returns the upper 32 bits of an address. + * + * Return: upper 32 bits. + */ +static inline +uint32_t qdf_get_upper_32_bits(qdf_dma_addr_t addr) +{ + return __qdf_get_upper_32_bits(addr); +} + +/** + * qdf_rounddown_pow_of_two() - Round down to nearest power of two + * @n: number to be tested + * + * Test if the input number is power of two, and return the nearest power of two + * + * Return: number rounded down to the nearest power of two + */ +static inline +unsigned long qdf_rounddown_pow_of_two(unsigned long n) +{ + return __qdf_rounddown_pow_of_two(n); +} + +/** + * qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr + * @dev: device pointer + * @addr_bits: max number of bits allowed in dma address + * + * This API sets the maximum allowed number of bits in the dma address. + * + * Return: 0 - success, non zero - failure + */ +static inline +int qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits) +{ + return __qdf_set_dma_coherent_mask(dev, addr_bits); +} + +/** + * qdf_do_div() - wrapper function for kernel macro(do_div). + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: Quotient + */ +static inline +uint64_t qdf_do_div(uint64_t dividend, uint32_t divisor) +{ + return __qdf_do_div(dividend, divisor); +} + +/** + * qdf_do_div_rem() - wrapper function for kernel macro(do_div) + * to get remainder. + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: remainder + */ +static inline +uint64_t qdf_do_div_rem(uint64_t dividend, uint32_t divisor) +{ + return __qdf_do_div_rem(dividend, divisor); +} + +/** + * qdf_get_random_bytes() - returns nbytes bytes of random + * data + * + * Return: random bytes of data + */ +static inline +void qdf_get_random_bytes(void *buf, int nbytes) +{ + return __qdf_get_random_bytes(buf, nbytes); +} + +/** + * qdf_hex_to_bin() - QDF API to Convert hexa decimal ASCII character to + * unsigned integer value. + * @ch: hexa decimal ASCII character + * + * Return: For hexa decimal ASCII char return actual decimal value + * else -1 for bad input. + */ +static inline +int qdf_hex_to_bin(char ch) +{ + return __qdf_hex_to_bin(ch); +} + +/** + * qdf_hex_str_to_binary() - QDF API to Convert string of hexa decimal + * ASCII characters to array of unsigned integers. + * @dst: output array to hold converted values + * @src: input string of hexa decimal ASCII characters + * @count: size of dst string + * + * This function is used to convert string of hexa decimal characters to + * array of unsigned integers and caller should ensure: + * a) @dst, @src are not NULL, + * b) size of @dst should be (size of src / 2) + * + * Example 1: + * src = 11aa, means, src[0] = '1', src[1] = '2', src[2] = 'a', src[3] = 'a' + * count = (size of src / 2) = 2 + * after conversion, dst[0] = 0x11, dst[1] = oxAA and return (0). + * + * Example 2: + * src = 11az, means, src[0] = '1', src[1] = '2', src[2] = 'a', src[3] = 'z' + * src[3] is not ASCII hexa decimal character, return negative value (-1). + * + * Return: For a string of hexa decimal ASCII characters return 0 + * else -1 for bad input. + */ +static inline +int qdf_hex_str_to_binary(u8 *dst, const char *src, size_t count) +{ + return __qdf_hex_str_to_binary(dst, src, count); +} + +/** + * qdf_fls() - find last set bit in a given 32 bit input + * @x: 32 bit mask + * + * Return: zero if the input is zero, otherwise returns the bit + * position of the last set bit, where the LSB is 1 and MSB is 32. + */ +static inline +int qdf_fls(uint32_t x) +{ + return __qdf_fls(x); +} + +#endif /*_QDF_UTIL_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_vfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_vfs.h new file mode 100644 index 0000000000000000000000000000000000000000..4876bd975d391f8b2f81c41c49f60896272f7979 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_vfs.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_vfs + * QCA driver framework (QDF) virtual filesystem management APIs + */ + +#if !defined(__QDF_VFS_H) +#define __QDF_VFS_H + +/* Include Files */ +#include + +struct qdf_vfs_attr; +struct qdf_vf_bin_attr; +struct qdf_dev_obj; + +#ifdef ENHANCED_OS_ABSTRACTION +/** + * qdf_vfs_set_file_attributes() - set file attributes + * @devobj: Device object + * @attr: File attribute + * + * This function will set the attributes of a file + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_vfs_set_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr); + +/** + * qdf_vfs_clear_file_attributes() - clear file attributes + * @devobj: Device object + * @attr: File attribute + * + * This function will clear the attributes of a file + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_vfs_clear_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr); + +/** + * qdf_vfs_create_binfile() - create binfile + * @devobj: Device object + * @attr: File attribute + * + * This function will create a binary file + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_vfs_create_binfile(struct qdf_dev_obj *devobj, + struct qdf_vf_bin_attr *attr); + +/** + * qdf_vfs_delete_binfile() - delete binfile + * @devobj: Device object + * @attr: File attribute + * + * This function will delete a binary file + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +qdf_vfs_delete_binfile(struct qdf_dev_obj *devobj, + struct qdf_vf_bin_attr *attr); +#else +static inline QDF_STATUS +qdf_vfs_set_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr) +{ + return __qdf_vfs_set_file_attributes(devobj, attr); +} + +static inline QDF_STATUS +qdf_vfs_clear_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr) +{ + return __qdf_vfs_clear_file_attributes(devobj, attr); +} + +statuc inline QDF_STATUS +qdf_vfs_create_binfile(struct qdf_dev_obj *devobj, + struct qdf_vf_bin_attr *attr) +{ + return __qdf_vfs_create_binfile(devobj, attr); +} + +static inline QDF_STATUS +qdf_vfs_delete_binfile(struct qdf_dev_obj *devobj, + struct qdf_vf_bin_attr *attr) +{ + return __qdf_vfs_delete_binfile(devobj, attr); +} +#endif +#endif /* __QDF_VFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_osdep.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..877f43f757f2fa848f11db957d7a83b610611ce1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_osdep.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_osdep + * QCA driver framework OS dependent types + */ + +#ifndef _I_OSDEP_H +#define _I_OSDEP_H + +#include "queue.h" + +/* + * Byte Order stuff + */ +#define le16toh(_x) le16_to_cpu(_x) +#define htole16(_x) cpu_to_le16(_x) +#define htobe16(_x) cpu_to_be16(_x) +#define le32toh(_x) le32_to_cpu(_x) +#define htole32(_x) cpu_to_le32(_x) +#define be16toh(_x) be16_to_cpu(_x) +#define be32toh(_x) be32_to_cpu(_x) +#define htobe32(_x) cpu_to_be32(_x) + +#ifdef CONFIG_SMP +/* Undo the one provided by the kernel to debug spin locks */ +#undef spin_lock +#undef spin_unlock +#undef spin_trylock + +#define spin_lock(x) spin_lock_bh(x) + +#define spin_unlock(x) \ + do { \ + if (!spin_is_locked(x)) { \ + WARN_ON(1); \ + qdf_info("unlock addr=%pK, %s", x, \ + !spin_is_locked(x) ? "Not locked" : ""); \ + } \ + spin_unlock_bh(x); \ + } while (0) +#define spin_trylock(x) spin_trylock_bh(x) +#define OS_SUPPORT_ASYNC_Q 1 /* support for handling asyn function calls */ + +#else +#define OS_SUPPORT_ASYNC_Q 0 +#endif /* ifdef CONFIG_SMP */ + +/** + * struct os_mest_t - maintain attributes of message + * @mesg_next: pointer to the nexgt message + * @mest_type: type of message + * @mesg_len: length of the message + */ +typedef struct _os_mesg_t { + STAILQ_ENTRY(_os_mesg_t) mesg_next; + uint16_t mesg_type; + uint16_t mesg_len; +} os_mesg_t; + +/** + * struct qdf_bus_context - Bus to hal context handoff + * @bc_tag: bus context tag + * @cal_in_flash: calibration data stored in flash + * @bc_handle: bus context handle + * @bc_bustype: bus type + */ +typedef struct qdf_bus_context { + void *bc_tag; + int cal_in_flash; + char *bc_handle; + enum qdf_bus_type bc_bustype; +} QDF_BUS_CONTEXT; + +typedef struct _NIC_DEV *osdev_t; + +typedef void (*os_mesg_handler_t)(void *ctx, uint16_t mesg_type, + uint16_t mesg_len, + void *mesg); + + +/** + * typedef os_mesg_queue_t - Object to maintain message queue + * @dev_handle: OS handle + * @num_queued: number of queued messages + * @mesg_len: message length + * @mesg_queue_buf: pointer to message queue buffer + * @mesg_head: queued mesg buffers + * @mesg_free_head: free mesg buffers + * @lock: spinlock object + * @ev_handler_lock: spinlock object to event handler + * @task: pointer to task + * @_timer: instance of timer + * @handler: message handler + * @ctx: pointer to context + * @is_synchronous: bit to save synchronous status + * @del_progress: delete in progress + */ +typedef struct { + osdev_t dev_handle; + int32_t num_queued; + int32_t mesg_len; + uint8_t *mesg_queue_buf; + + STAILQ_HEAD(, _os_mesg_t) mesg_head; + STAILQ_HEAD(, _os_mesg_t) mesg_free_head; + spinlock_t lock; + spinlock_t ev_handler_lock; +#ifdef USE_SOFTINTR + void *_task; +#else + qdf_timer_t _timer; +#endif + os_mesg_handler_t handler; + void *ctx; + uint8_t is_synchronous:1; + uint8_t del_progress; +} os_mesg_queue_t; + +/** + * struct _NIC_DEV - Definition of OS-dependent device structure. + * It'll be opaque to the actual ATH layer. + * @qdf_dev: qdf device + * @bdev: bus device handle + * @netdev: net device handle (wifi%d) + * @intr_tq: tasklet + * @devstats: net device statistics + * @bc: hal bus context + * @device: generic device + * @event_queue: instance to wait queue + * @is_device_asleep: keep device status, sleep or awakei + * @acfg_event_list: event list + * @acfg_event_queue_lock: queue lock + * @acfg_event_os_work: schedule or create work + * @acfg_netlink_wq_init_done: Work queue ready + * @osdev_acfg_handle: acfg handle + * @vap_hardstart: Tx function specific to the radio + * initiailzed during VAP create + */ +struct _NIC_DEV { + qdf_device_t qdf_dev; + void *bdev; + struct net_device *netdev; + qdf_bh_t intr_tq; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + struct rtnl_link_stats64 devstats; +#else + struct net_device_stats devstats; +#endif + QDF_BUS_CONTEXT bc; +#ifdef ATH_PERF_PWR_OFFLOAD + struct device *device; + wait_queue_head_t event_queue; +#endif /* PERF_PWR_OFFLOAD */ +#if OS_SUPPORT_ASYNC_Q + os_mesg_queue_t async_q; +#endif +#ifdef ATH_BUS_PM + uint8_t is_device_asleep; +#endif /* ATH_BUS_PM */ + qdf_nbuf_queue_t acfg_event_list; + qdf_spinlock_t acfg_event_queue_lock; + qdf_work_t acfg_event_os_work; + uint8_t acfg_netlink_wq_init_done; + +#ifdef UMAC_SUPPORT_ACFG +#ifdef ACFG_NETLINK_TX + void *osdev_acfg_handle; +#endif /* ACFG_NETLINK_TX */ +#endif /* UMAC_SUPPORT_ACFG */ + int (*vap_hardstart)(struct sk_buff *skb, struct net_device *dev); +}; + +#define __QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \ + proc_dointvec(ctl, write, buffer, lenp, ppos) + +#endif /* _I_OSDEP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_atomic.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..1d2cb2b30a879d72883684e065ff672f30c632ba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_atomic.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_atomic.h + * This file provides OS dependent atomic APIs. + */ + +#ifndef I_QDF_ATOMIC_H +#define I_QDF_ATOMIC_H + +#include /* QDF_STATUS */ +#include +#include + +typedef atomic_t __qdf_atomic_t; + +/** + * __qdf_atomic_init() - initialize an atomic type variable + * @v: A pointer to an opaque atomic variable + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS __qdf_atomic_init(__qdf_atomic_t *v) +{ + atomic_set(v, 0); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_atomic_read() - read the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t __qdf_atomic_read(__qdf_atomic_t *v) +{ + return atomic_read(v); +} + +/** + * __qdf_atomic_inc() - increment the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_inc(__qdf_atomic_t *v) +{ + atomic_inc(v); +} + +/** + * __qdf_atomic_dec() - decrement the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_dec(__qdf_atomic_t *v) +{ + atomic_dec(v); +} + +/** + * __qdf_atomic_add() - add a value to the value of an atomic variable + * @i: The amount by which to increase the atomic counter + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_add(int i, __qdf_atomic_t *v) +{ + atomic_add(i, v); +} + +/** + * __qdf_atomic_sub() - Subtract a value from an atomic variable + * @i: the amount by which to decrease the atomic counter + * @v: a pointer to an opaque atomic variable + * + * Return: none + */ +static inline void __qdf_atomic_sub(int i, __qdf_atomic_t *v) +{ + atomic_sub(i, v); +} + +/** + * __qdf_atomic_dec_and_test() - decrement an atomic variable and check if the + * new value is zero + * @v: A pointer to an opaque atomic variable + * + * Return: + * true (non-zero) if the new value is zero, + * false (0) if the new value is non-zero + */ +static inline int32_t __qdf_atomic_dec_and_test(__qdf_atomic_t *v) +{ + return atomic_dec_and_test(v); +} + +/** + * __qdf_atomic_set() - set a value to the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_set(__qdf_atomic_t *v, int i) +{ + atomic_set(v, i); +} + +/** + * __qdf_atomic_inc_return() - return the incremented value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t __qdf_atomic_inc_return(__qdf_atomic_t *v) +{ + return atomic_inc_return(v); +} + +/** + * __qdf_atomic_dec_return() - return the decremented value of an atomic + * variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t __qdf_atomic_dec_return(__qdf_atomic_t *v) +{ + return atomic_dec_return(v); +} + +/** + * __qdf_atomic_inc_not_zero() - increment if not zero + * @v: A pointer to an opaque atomic variable + * + * Return: Returns non-zero on successful increment and zero otherwise + */ +static inline int32_t __qdf_atomic_inc_not_zero(__qdf_atomic_t *v) +{ + return atomic_inc_not_zero(v); +} + +/** + * __qdf_atomic_set_bit - Atomically set a bit in memory + * @nr: bit to set + * @addr: the address to start counting from + * + * Return: none + */ +static inline void __qdf_atomic_set_bit(int nr, volatile unsigned long *addr) +{ + set_bit(nr, addr); +} + +/** + * __qdf_atomic_clear_bit - Atomically clear a bit in memory + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: none + */ +static inline void __qdf_atomic_clear_bit(int nr, volatile unsigned long *addr) +{ + clear_bit(nr, addr); +} + +/** + * __qdf_atomic_change_bit - Atomically toggle a bit in memory + * from addr + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: none + */ +static inline void __qdf_atomic_change_bit(int nr, volatile unsigned long *addr) +{ + change_bit(nr, addr); +} + +/** + * __qdf_atomic_test_and_set_bit - Atomically set a bit and return its old value + * @nr: Bit to set + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int __qdf_atomic_test_and_set_bit(int nr, + volatile unsigned long *addr) +{ + return test_and_set_bit(nr, addr); +} + +/** + * __qdf_atomic_test_and_clear_bit - Atomically clear a bit and return its old + * value + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int __qdf_atomic_test_and_clear_bit(int nr, + volatile unsigned long *addr) +{ + return test_and_clear_bit(nr, addr); +} + +/** + * __qdf_atomic_test_and_change_bit - Atomically toggle a bit and return its old + * value + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int __qdf_atomic_test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + return test_and_change_bit(nr, addr); +} + +/** + * __qdf_atomic_test_bit - Atomically get the nr-th bit value starting from addr + * @nr: bit to get + * @addr: the address to start counting from + * + * Return: return nr bit value + */ +static inline int __qdf_atomic_test_bit(int nr, volatile unsigned long *addr) +{ + return test_bit(nr, addr); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_cpuhp.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_cpuhp.h new file mode 100644 index 0000000000000000000000000000000000000000..7cee453c6502eaa5c1f87b94b564b54dbfe16b15 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_cpuhp.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_cpuhp.h (CPU hotplug) + * Linux-specific definitions for QDF CPU hotplug API's + */ + +#ifndef __I_QDF_CPUHP_H +#define __I_QDF_CPUHP_H + +#include "linux/types.h" + +typedef void (*__qdf_cpuhp_emit)(uint32_t cpu); + +void __qdf_cpuhp_os_init(__qdf_cpuhp_emit on_up, __qdf_cpuhp_emit on_down); +void __qdf_cpuhp_os_deinit(void); + +#endif /* __I_QDF_CPUHP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..506b6f04add108e8739562b427acf8212502779f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debugfs.h + * Linux specific implementation for debug filesystem APIs. + */ + + +#ifndef _I_QDF_DEBUGFS_H +#define _I_QDF_DEBUGFS_H + +#include +#include + +typedef struct dentry *__qdf_dentry_t; +typedef struct seq_file *__qdf_debugfs_file_t; + +#ifdef WLAN_DEBUGFS + +/** + * qdf_debugfs_get_root() - get debugfs root + * + * Return: dentry * or NULL in case of failure + */ +struct dentry *qdf_debugfs_get_root(void); + +/** + * qdf_debugfs_get_filemode() - get Linux specific file mode + * @mode: This is a bitmap of file modes, + * QDF_FILE_USR_READ + * QDF_FILE_USR_WRITE + * QDF_FILE_OTH_READ + * QDF_FILE_OTH_WRITE + * QDF_FILE_GRP_READ + * QDF_FILE_GRP_WRITE + * + * Return: Linux specific file mode + */ +umode_t qdf_debugfs_get_filemode(uint16_t mode); + +#endif /* WLAN_DEBUGFS */ +#endif /* _I_QDF_DEBUGFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_defer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_defer.h new file mode 100644 index 0000000000000000000000000000000000000000..f809ada9146c50e843c5a05dfb344c549cc4d7eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_defer.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_defer.h + * This file provides OS dependent deferred API's. + */ + +#ifndef _I_QDF_DEFER_H +#define _I_QDF_DEFER_H + +#include +#include +#include +#include +#include + +typedef struct tasklet_struct __qdf_bh_t; +typedef struct workqueue_struct __qdf_workqueue_t; + +/** + * __qdf_work_t - wrapper around the real task func + * @work: Instance of work + * @fn: function pointer to the handler + * @arg: pointer to argument + */ +typedef struct { + struct work_struct work; + qdf_defer_fn_t fn; + void *arg; +} __qdf_work_t; + +extern void __qdf_defer_func(struct work_struct *work); + +typedef void (*__qdf_bh_fn_t)(unsigned long arg); + +/** + * __qdf_init_work - Initialize a work/task queue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @work: pointer to work + * @func: deferred function to run at bottom half non-interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline QDF_STATUS +__qdf_init_work(__qdf_work_t *work, qdf_defer_fn_t func, void *arg) +{ + work->fn = func; + work->arg = arg; + INIT_WORK(&work->work, __qdf_defer_func); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_queue_work - Queue the work/task + * @wqueue: pointer to workqueue + * @work: pointer to work + * Return: none + */ +static inline void +__qdf_queue_work(__qdf_workqueue_t *wqueue, __qdf_work_t *work) +{ + queue_work(wqueue, &work->work); +} + +/** + * __qdf_sched_work - Schedule a deferred task on non-interrupt context + * @work: pointer to work + * Retrun: none + */ +static inline QDF_STATUS __qdf_sched_work(__qdf_work_t *work) +{ + schedule_work(&work->work); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_cancel_work() - Cancel a work + * @work: pointer to work + * Return: true if work was pending, false otherwise + */ +static inline bool __qdf_cancel_work(__qdf_work_t *work) +{ + return cancel_work_sync(&work->work); +} + +/** + * __qdf_flush_work - Flush a deferred task on non-interrupt context + * @work: pointer to work + * Return: none + */ +static inline uint32_t __qdf_flush_work(__qdf_work_t *work) +{ + flush_work(&work->work); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_create_workqueue - create a workqueue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @name: string + * Return: pointer of type qdf_workqueue_t + */ +static inline __qdf_workqueue_t *__qdf_create_workqueue(char *name) +{ + return create_workqueue(name); +} + +/** + * __qdf_create_singlethread_workqueue() - create a single threaded workqueue + * @name: string + * + * This API creates a dedicated work queue with a single worker thread to avoid + * wasting unnecessary resources when works which needs to be submitted in this + * queue are not very critical and frequent. + * + * Return: pointer of type qdf_workqueue_t + */ +static inline __qdf_workqueue_t *__qdf_create_singlethread_workqueue(char *name) +{ + return create_singlethread_workqueue(name); +} + +/** + * __qdf_alloc_high_prior_ordered_workqueue - alloc high-prior ordered workqueue + * @name: string + * + * Return: pointer of type qdf_workqueue_t + */ +static inline +__qdf_workqueue_t *__qdf_alloc_high_prior_ordered_workqueue(char *name) +{ + return alloc_ordered_workqueue(name, WQ_HIGHPRI); +} + +/** + * __qdf_alloc_unbound_workqueue - alloc an unbound workqueue + * @name: string + * + * Return: pointer of type qdf_workqueue_t + */ +static inline __qdf_workqueue_t *__qdf_alloc_unbound_workqueue(char *name) +{ + return alloc_workqueue(name, WQ_UNBOUND, 0); +} + +/** + * __qdf_flush_workqueue - flush the workqueue + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void __qdf_flush_workqueue(__qdf_workqueue_t *wqueue) +{ + flush_workqueue(wqueue); +} + +/** + * __qdf_destroy_workqueue - Destroy the workqueue + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void __qdf_destroy_workqueue(__qdf_workqueue_t *wqueue) +{ + destroy_workqueue(wqueue); +} + +/** + * __qdf_init_bh - creates the Bottom half deferred handler + * @bh: pointer to bottom + * @func: deferred function to run at bottom half interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline QDF_STATUS +__qdf_init_bh(struct tasklet_struct *bh, qdf_defer_fn_t func, void *arg) +{ + tasklet_init(bh, (__qdf_bh_fn_t) func, (unsigned long)arg); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_sched_bh - schedule a bottom half (DPC) + * @bh: pointer to bottom + * Return: none + */ +static inline QDF_STATUS __qdf_sched_bh(struct tasklet_struct *bh) +{ + tasklet_schedule(bh); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_disable_work - disable the deferred task (synchronous) + * @work: pointer to work + * Return: unsigned int + */ +static inline QDF_STATUS __qdf_disable_work(__qdf_work_t *work) +{ + if (cancel_work_sync(&work->work)) + return QDF_STATUS_E_ALREADY; + + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_disable_bh - destroy the bh (synchronous) + * @bh: pointer to bottom + * Return: none + */ +static inline QDF_STATUS __qdf_disable_bh(struct tasklet_struct *bh) +{ + tasklet_kill(bh); + return QDF_STATUS_SUCCESS; +} + +#endif /*_I_QDF_DEFER_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_delayed_work.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_delayed_work.h new file mode 100644 index 0000000000000000000000000000000000000000..e72b8644b04463c5f26d03030b59e1369dcc1aaa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_delayed_work.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __I_QDF_DELAYED_WORK_H +#define __I_QDF_DELAYED_WORK_H + +#include "linux/workqueue.h" + +#define __qdf_opaque_delayed_work delayed_work + +#endif /* __I_QDF_DELAYED_WORK_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_dev.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..1b519822fd62d691da542c9d014aa62b3551ce9c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_dev.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_dev + * QCA driver framework (QDF) device management APIs + */ + +#if !defined(__I_QDF_DEV_H) +#define __I_QDF_DEV_H + +/* Include Files */ +#include +#include "qdf_util.h" +#include +#ifdef CONFIG_SCHED_CORE_CTL +#include +#endif + +struct qdf_cpu_mask; +struct qdf_devm; +struct qdf_dev; + +#define __qdf_cpumask_pr_args(maskp) cpumask_pr_args(maskp) +#define __qdf_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu) +#define __qdf_for_each_online_cpu(cpu) for_each_online_cpu(cpu) + +/** + * __qdf_dev_alloc_mem() - allocate memory + * @qdfdev: Device handle + * @mrptr: Pointer to the allocated memory + * @reqsize: Allocation request in bytes + * @mask: Property mask to be associated to the allocated memory + * + * This function will acquire memory to be associated with a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_dev_alloc_mem(struct qdf_dev *qdfdev, struct qdf_devm **mrptr, + uint32_t reqsize, uint32_t mask) +{ + *mrptr = devm_kzalloc((struct device *)qdfdev, reqsize, mask); + + if (!*mrptr) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_dev_release_mem() - release memory + * @qdfdev: Device handle + * @mrptr: Pointer to the allocated memory + * + * This function will acquire memory to be associated with a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_dev_release_mem(struct qdf_dev *qdfdev, struct qdf_devm *mrptr) +{ + devm_kfree((struct device *)qdfdev, mrptr); + + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_dev_modify_irq() - modify irq + * @irnum: irq number + * @cmask: Bitmap to be cleared for the property mask + * @smask: Bitmap to be set for the property mask + * + * This function will modify the properties of the irq associated with a device + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_dev_modify_irq_status(uint32_t irnum, unsigned long cmask, + unsigned long smask) +{ + irq_modify_status(irnum, cmask, smask); + + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_dev_set_irq_affinity() - set irq affinity + * @irnum: irq number + * @cpmask: cpu affinity bitmap + * + * This function will set the affinity level for an irq + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask) +{ + int ret; + + ret = irq_set_affinity_hint(irnum, (struct cpumask *)cpmask); + return qdf_status_from_os_return(ret); +} + +/** + * __qdf_topology_physical_package_id() - API to retrieve the + * cluster info + * @cpu: cpu core + * + * This function returns the cluster information for give cpu + * core + * + * Return: 1 for perf and 0 for non-perf cluster + */ +static inline int __qdf_topology_physical_package_id(unsigned int cpu) +{ + return topology_physical_package_id(cpu); +} + +/** + * __qdf_cpumask_subset() - API to check for subset in cpumasks + * @srcp1: first cpu mask + * @srcp1: second cpu mask + * + * This checks for *srcp1 & ~*srcp2 + * + * Return: 1 if srcp1 is subset of srcp2 else 0 + */ +static inline int __qdf_cpumask_subset(qdf_cpu_mask *srcp1, + const qdf_cpu_mask *srcp2) +{ + return cpumask_subset(srcp1, srcp2); +} + +/** + * __qdf_cpumask_intersects() - API to check if cpumasks + * intersect + * @srcp1: first cpu mask + * @srcp1: second cpu mask + * + * This checks for (*srcp1 & *srcp2) != 0 + * + * Return: 1 if srcp1 and srcp2 intersect else 0 + */ +static inline int __qdf_cpumask_intersects(qdf_cpu_mask *srcp1, + const qdf_cpu_mask *srcp2) +{ + return cpumask_intersects(srcp1, srcp2); +} + +#ifdef CONFIG_SCHED_CORE_CTL +/** + * __qdf_core_ctl_set_boost() - This API is used to move tasks + * to CPUs with higher capacity + * + * This function moves tasks to higher capacity CPUs than those + * where the tasks would have normally ended up. This is + * applicable only to defconfig builds. + * + * Return: 0 on success + */ +static inline int __qdf_core_ctl_set_boost(bool boost) +{ + return core_ctl_set_boost(boost); +} +#else +static inline int __qdf_core_ctl_set_boost(bool boost) +{ + return 0; +} +#endif +#endif /* __I_QDF_DEV_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_event.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..5d26c7d9a36a94d0e0bc141708d4307c2934e009 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_event.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_event.h + * This file provides OS dependent event API's. + */ + +#if !defined(__I_QDF_EVENT_H) +#define __I_QDF_EVENT_H + +#include + +/** + * qdf_event_t - manages events + * @complete: instance to completion + * @cookie: unsigned int + * @done: indicate completion + * @force_set: indicate forceful completion + */ +typedef struct qdf_evt { + struct completion complete; + uint32_t cookie; + bool done; + bool force_set; +} __qdf_event_t; + +/* Preprocessor definitions and constants */ +#define LINUX_EVENT_COOKIE 0x12341234 + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +#define INIT_COMPLETION(event) reinit_completion(&event) +#endif + +#endif /*__I_QDF_EVENT_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hashtable.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hashtable.h new file mode 100644 index 0000000000000000000000000000000000000000..f9ad3806e71b257d5853e515d5bddf6eebf4638e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hashtable.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __I_QDF_HASHTABLE_H +#define __I_QDF_HASHTABLE_H + +#include "linux/hashtable.h" + +#define __qdf_ht hlist_head +#define __qdf_ht_entry hlist_node +#define __qdf_ht_declare(name, bits) DECLARE_HASHTABLE(name, bits) +#define __qdf_ht_init(table) hash_init(table) +#define __qdf_ht_deinit(table) do { } while (false) +#define __qdf_ht_empty(table) hash_empty(table) +#define __qdf_ht_add(table, entry, key) hash_add(table, entry, key) +#define __qdf_ht_remove(entry) hash_del(entry) + +#define __qdf_ht_for_each(table, i, cursor, entry_field) \ + hash_for_each(table, i, cursor, entry_field) + +#define __qdf_ht_for_each_in_bucket(table, cursor, entry_field, key) \ + hash_for_each_possible(table, cursor, entry_field, key) + +#define __qdf_ht_for_each_match(table, cursor, entry_field, key, key_field) \ + hash_for_each_possible(table, (cursor), entry_field, (key)) \ + if ((cursor)->key_field == (key)) + +#define __qdf_ht_get(table, cursor, entry_field, key, key_field) \ +do { \ + cursor = NULL; \ + __qdf_ht_for_each_match(table, cursor, entry_field, key, key_field) \ + break; \ +} while (false) + +#define __qdf_ht_for_each_safe(table, i, tmp, cursor, entry_field) \ + hash_for_each_safe(table, i, tmp, cursor, entry_field) + +#endif /* __I_QDF_HASHTABLE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hrtimer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hrtimer.h new file mode 100644 index 0000000000000000000000000000000000000000..00055036f9411949c5f2303c80dbb5e868441dc8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hrtimer.h @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_hrtimer + * This file provides OS dependent timer API's. + */ + +#ifndef _I_QDF_HRTIMER_H +#define _I_QDF_HRTIMER_H + +#include +#include +#include +#include +#include + +/* hrtimer data type */ +typedef struct { + union { + struct hrtimer hrtimer; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) + struct tasklet_hrtimer tasklet_hrtimer; +#endif + } u; + enum qdf_context_mode ctx; +} __qdf_hrtimer_data_t; + +/** + * __qdf_hrtimer_get_mode() - Get hrtimer_mode with qdf mode + * @mode: mode of hrtimer + * + * Get hrtimer_mode with qdf hrtimer mode + * + * Return: void + */ +static inline +enum hrtimer_mode __qdf_hrtimer_get_mode(enum qdf_hrtimer_mode mode) +{ + return (enum hrtimer_mode)mode; +} + +/** + * __qdf_hrtimer_start() - Starts hrtimer in given context + * @timer: pointer to the hrtimer object + * @interval: interval to forward as qdf_ktime_t object + * @mode: mode of hrtimer + * + * Starts hrtimer in given context + * + * Return: void + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline +void __qdf_hrtimer_start(__qdf_hrtimer_data_t *timer, ktime_t interval, + enum qdf_hrtimer_mode mode) +{ + enum hrtimer_mode hrt_mode = __qdf_hrtimer_get_mode(mode); + + hrtimer_start(&timer->u.hrtimer, interval, hrt_mode); +} +#else +static inline +void __qdf_hrtimer_start(__qdf_hrtimer_data_t *timer, ktime_t interval, + enum qdf_hrtimer_mode mode) +{ + enum hrtimer_mode hrt_mode = __qdf_hrtimer_get_mode(mode); + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + hrtimer_start(&timer->u.hrtimer, interval, hrt_mode); + else if (timer->ctx == QDF_CONTEXT_TASKLET) + tasklet_hrtimer_start(&timer->u.tasklet_hrtimer, + interval, hrt_mode); +} +#endif + +/** + * __qdf_hrtimer_cancel() - cancels hrtimer in given context + * @timer: pointer to the hrtimer object + * + * cancels hrtimer in given context + * + * Return: int + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline +int __qdf_hrtimer_cancel(__qdf_hrtimer_data_t *timer) +{ + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_cancel(&timer->u.hrtimer); + + return 0; +} +#else +static inline +int __qdf_hrtimer_cancel(__qdf_hrtimer_data_t *timer) +{ + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_cancel(&timer->u.hrtimer); + else if (timer->ctx == QDF_CONTEXT_TASKLET) + return hrtimer_cancel(&timer->u.tasklet_hrtimer.timer); + + return 0; +} +#endif + +/** + * __qdf_hrtimer_init() - init hrtimer in a given context + * @timer: pointer to the hrtimer object + * @cback: callback function to be fired + * @clock: clock id + * @hrtimer_mode: mode of hrtimer + * + * starts hrtimer in a context passed as per the context + * + * Return: void + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline void __qdf_hrtimer_init(__qdf_hrtimer_data_t *timer, + void *cback, + enum qdf_clock_id clock, + enum qdf_hrtimer_mode mode, + enum qdf_context_mode ctx) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + enum hrtimer_mode hrt_mode = __qdf_hrtimer_get_mode(mode); + + timer->ctx = ctx; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) { + hrtimer_init(hrtimer, clock, hrt_mode); + hrtimer->function = cback; + } else if (timer->ctx == QDF_CONTEXT_TASKLET) { + QDF_BUG(0); + } +} +#else +static inline void __qdf_hrtimer_init(__qdf_hrtimer_data_t *timer, + void *cback, + enum qdf_clock_id clock, + enum qdf_hrtimer_mode mode, + enum qdf_context_mode ctx) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + enum hrtimer_mode hrt_mode = __qdf_hrtimer_get_mode(mode); + + timer->ctx = ctx; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) { + hrtimer_init(hrtimer, clock, hrt_mode); + hrtimer->function = cback; + } else if (timer->ctx == QDF_CONTEXT_TASKLET) { + tasklet_hrtimer_init(tasklet_hrtimer, cback, clock, hrt_mode); + } +} +#endif + +/** + * __qdf_hrtimer_kill() - kills hrtimer in given context + * @timer: pointer to the hrtimer object + * + * kills hrtimer in given context + * + * Return: void + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline +void __qdf_hrtimer_kill(__qdf_hrtimer_data_t *timer) +{ + hrtimer_cancel(&timer->u.hrtimer); +} +#else +static inline +void __qdf_hrtimer_kill(__qdf_hrtimer_data_t *timer) +{ + if (timer->ctx == QDF_CONTEXT_HARDWARE) + hrtimer_cancel(&timer->u.hrtimer); + else if (timer->ctx == QDF_CONTEXT_TASKLET) + tasklet_hrtimer_cancel(&timer->u.tasklet_hrtimer); +} +#endif + +/** + * __qdf_hrtimer_get_remaining() - check remaining time in the timer + * @timer: pointer to the hrtimer object + * + * check whether the timer is on one of the queues + * + * Return: remaining time as ktime object + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline ktime_t __qdf_hrtimer_get_remaining(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + + return hrtimer_get_remaining(hrtimer); +} +#else +static inline ktime_t __qdf_hrtimer_get_remaining(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_get_remaining(hrtimer); + else + return hrtimer_get_remaining(&tasklet_hrtimer->timer); +} +#endif + +/** + * __qdf_hrtimer_is_queued() - check whether the timer is on one of the queues + * @timer: pointer to the hrtimer object + * + * check whether the timer is on one of the queues + * + * Return: false when the timer was not in queue + * true when the timer was in queue + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline bool __qdf_hrtimer_is_queued(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + + return hrtimer_is_queued(hrtimer); +} +#else +static inline bool __qdf_hrtimer_is_queued(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_is_queued(hrtimer); + else + return hrtimer_is_queued(&tasklet_hrtimer->timer); +} +#endif + +/** + * __qdf_hrtimer_callback_running() - check if callback is running + * @timer: pointer to the hrtimer object + * + * check whether the timer is running the callback function + * + * Return: false when callback is not running + * true when callback is running + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline bool __qdf_hrtimer_callback_running(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + + return hrtimer_callback_running(hrtimer); +} +#else +static inline bool __qdf_hrtimer_callback_running(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_callback_running(hrtimer); + else + return hrtimer_callback_running(&tasklet_hrtimer->timer); +} +#endif + +/** + * __qdf_hrtimer_active() - check if timer is active + * @timer: pointer to the hrtimer object + * + * Check if timer is active. A timer is active, when it is enqueued into + * the rbtree or the callback function is running. + * + * Return: false if timer is not active + * true if timer is active + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline bool __qdf_hrtimer_active(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + + return hrtimer_active(hrtimer); +} +#else +static inline bool __qdf_hrtimer_active(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_active(hrtimer); + else + return hrtimer_active(&tasklet_hrtimer->timer); +} +#endif + +/** + * __qdf_hrtimer_cb_get_time() - get remaining time in callback + * @timer: pointer to the hrtimer object + * + * Get remaining time in the hrtimer callback + * + * Return: time remaining as ktime object + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline ktime_t __qdf_hrtimer_cb_get_time(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + + return hrtimer_cb_get_time(hrtimer); +} +#else +static inline ktime_t __qdf_hrtimer_cb_get_time(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_cb_get_time(hrtimer); + else + return hrtimer_cb_get_time(&tasklet_hrtimer->timer); +} +#endif + +/** + * __qdf_hrtimer_forward() - forward the hrtimer + * @timer: pointer to the hrtimer object + * @now: current ktime + * @interval: interval to forward as ktime object + * + * Forward the timer expiry so it will expire in the future + * + * Return:the number of overruns + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline uint64_t __qdf_hrtimer_forward(__qdf_hrtimer_data_t *timer, + ktime_t now, + ktime_t interval) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + + return hrtimer_forward(hrtimer, now, interval); +} +#else +static inline uint64_t __qdf_hrtimer_forward(__qdf_hrtimer_data_t *timer, + ktime_t now, + ktime_t interval) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_forward(hrtimer, now, interval); + else + return hrtimer_forward(&tasklet_hrtimer->timer, now, interval); +} +#endif + +#endif /* _I_QDF_HRTIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_idr.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_idr.h new file mode 100644 index 0000000000000000000000000000000000000000..57b56c031104db90637f3858cc93872ebf2f14cb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_idr.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_idr.h (ID Allocation) + * Linux-specific definitions for QDF ID Allocation API's + */ + +#if !defined(__I_QDF_IDR_H) +#define __I_QDF_IDR_H + +#include +#include + +/** + * struct __qdf_idr_s + * @lock: qdf spinlock + * @idr: idr handler + */ +struct __qdf_idr_s { + qdf_spinlock_t lock; + struct idr idr; +}; + +typedef struct __qdf_idr_s __qdf_idr; + +#endif /* __I_QDF_IDR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..273f5a3748ccfe92a874a80a2a06372b87601820 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa.h @@ -0,0 +1,987 @@ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _I_QDF_IPA_H +#define _I_QDF_IPA_H + +#ifdef IPA_OFFLOAD + +#include +#include + +/** + * __qdf_ipa_wdi_meter_evt_type_t - type of event client callback is + * for AP+STA mode metering + * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA - + * use ipa_get_wdi_sap_stats structure + * @IPA_SET_WIFI_QUOTA: set quota limit on STA - + * use ipa_set_wifi_quota structure + */ +typedef enum ipa_wdi_meter_evt_type __qdf_ipa_wdi_meter_evt_type_t; + +typedef struct ipa_get_wdi_sap_stats __qdf_ipa_get_wdi_sap_stats_t; + +#define QDF_IPA_GET_WDI_SAP_STATS_RESET_STATS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->reset_stats) +#define QDF_IPA_GET_WDI_SAP_STATS_STATS_VALID(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->stats_valid) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_TX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_tx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_TX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_tx_bytes) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_RX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_rx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_RX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_rx_bytes) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_TX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_tx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_TX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_tx_bytes) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_RX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_rx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_RX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_rx_bytes) + +/** + * __qdf_ipa_set_wifi_quota_t - structure used for + * IPA_SET_WIFI_QUOTA. + */ +typedef struct ipa_set_wifi_quota __qdf_ipa_set_wifi_quota_t; + +#define QDF_IPA_SET_WIFI_QUOTA_BYTES(ipa_set_quota) \ + (((struct ipa_set_wifi_quota *)(ipa_set_quota))->quota_bytes) +#define QDF_IPA_SET_WIFI_QUOTA_SET_QUOTA(ipa_set_quota) \ + (((struct ipa_set_wifi_quota *)(ipa_set_quota))->set_quota) +#define QDF_IPA_SET_WIFI_QUOTA_SET_VALID(ipa_set_quota) \ + (((struct ipa_set_wifi_quota *)(ipa_set_quota))->set_valid) + +/** + * __qdf_ipa_connect_params_t - low-level client connect input parameters. Either + * client allocates the data and desc FIFO and specifies that in data+desc OR + * specifies sizes and pipe_mem pref and IPA does the allocation. + */ +typedef struct ipa_connect_params __qdf_ipa_connect_params_t; + +/** + * __qdf_ipa_tx_meta_t - meta-data for the TX packet + */ +typedef struct ipa_tx_meta __qdf_ipa_tx_meta_t; + +/** + * __qdf_ipa_msg_free_fn_t - callback function + * + * Message callback registered by kernel client with IPA driver to + * free message payload after IPA driver processing is complete + */ +typedef void (*__qdf_ipa_msg_free_fn_t)(void *buff, u32 len, u32 type); + +/** + * __qdf_ipa_sps_params_t - SPS related output parameters resulting from + */ +typedef struct ipa_sps_params __qdf_ipa_sps_params_t; + +/** + * __qdf_ipa_tx_intf_t - interface tx properties + */ +typedef struct ipa_tx_intf __qdf_ipa_tx_intf_t; + +#define QDF_IPA_TX_INTF_PROP(tx_intf) \ + (((struct ipa_tx_intf *)(tx_intf))->prop) + +/** + * __qdf_ipa_rx_intf_t - interface rx properties + */ +typedef struct ipa_rx_intf __qdf_ipa_rx_intf_t; + +#define QDF_IPA_RX_INTF_PROP(rx_intf) \ + (((struct ipa_rx_intf *)(rx_intf))->prop) + +/** + * __qdf_ipa_ext_intf_t - interface ext properties + */ +typedef struct ipa_ext_intf __qdf_ipa_ext_intf_t; + +/** + * __qdf_ipa_sys_connect_params_t - information needed to setup an IPA end-point + * in system-BAM mode + */ +typedef struct ipa_sys_connect_params __qdf_ipa_sys_connect_params_t; + +#define QDF_IPA_SYS_PARAMS_NAT_EN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.nat.nat_en) +#define QDF_IPA_SYS_PARAMS_HDR_LEN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_len) +#define QDF_IPA_SYS_PARAMS_HDR_ADDITIONAL_CONST_LEN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_additional_const_len) +#define QDF_IPA_SYS_PARAMS_HDR_OFST_PKT_SIZE_VALID(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid) +#define QDF_IPA_SYS_PARAMS_HDR_OFST_PKT_SIZE(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size) +#define QDF_IPA_SYS_PARAMS_HDR_LITTLE_ENDIAN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr_ext.hdr_little_endian) +#define QDF_IPA_SYS_PARAMS_MODE(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.mode.mode) +#define QDF_IPA_SYS_PARAMS_CLIENT(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->client) +#define QDF_IPA_SYS_PARAMS_DESC_FIFO_SZ(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->desc_fifo_sz) +#define QDF_IPA_SYS_PARAMS_PRIV(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->priv) +#define QDF_IPA_SYS_PARAMS_NOTIFY(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->notify) +#define QDF_IPA_SYS_PARAMS_SKIP_EP_CFG(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->skip_ep_cfg) +#define QDF_IPA_SYS_PARAMS_KEEP_IPA_AWAKE(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->keep_ipa_awake) + +/** + * __qdf_pa_rm_event_t - IPA RM events + * + * Indicate the resource state change + */ +typedef enum ipa_rm_event __qdf_ipa_rm_event_t; + +/** + * struct __qdf_ipa_rm_register_params_t - information needed to + * register IPA RM client with IPA RM + */ +typedef struct ipa_rm_register_params __qdf_ipa_rm_register_params_t; + +/** + * struct __qdf_ipa_rm_create_params_t - information needed to initialize + * the resource + * + * IPA RM client is expected to perform non blocking operations only + * in request_resource and release_resource functions and + * release notification context as soon as possible. + */ +typedef struct ipa_rm_create_params __qdf_ipa_rm_create_params_t; + +#define QDF_IPA_RM_CREATE_PARAMS_NAME(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->name) +#define QDF_IPA_RM_CREATE_PARAMS_USER_DATA(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->reg_params.user_data) +#define QDF_IPA_RM_CREATE_PARAMS_NOTIFY_CB(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->reg_params.notify_cb) +#define QDF_IPA_RM_CREATE_PARAMS_REQUEST_RESOURCE(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->request_resource) +#define QDF_IPA_RM_CREATE_PARAMS_RELEASE_RESOURCE(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->release_resource) +#define QDF_IPA_RM_CREATE_PARAMS_FLOOR_VOLTAGE(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->floor_voltage) + +/** + * __qdf_ipa_rm_perf_profile_t - information regarding IPA RM client performance + * profile + */ +typedef struct ipa_rm_perf_profile __qdf_ipa_rm_perf_profile_t; + +#define QDF_IPA_RM_PERF_PROFILE_MAX_SUPPORTED_BANDWIDTH_MBPS(profile) \ + (((struct ipa_rm_perf_profile *)(profile))->max_supported_bandwidth_mbps) + +/** + * __qdf_ipa_tx_data_desc_t - information needed + * to send data packet to HW link: link to data descriptors + * priv: client specific private data + */ +typedef struct ipa_tx_data_desc __qdf_ipa_tx_data_desc_t; + +/** + * __qdf_ipa_rx_data_t - information needed + * to send to wlan driver on receiving data from ipa hw + */ +typedef struct ipa_rx_data __qdf_ipa_rx_data_t; + +#define QDF_IPA_RX_DATA_SKB(desc) \ + (((struct ipa_rx_data *)(desc))->skb) +#define QDF_IPA_RX_DATA_SKB_LEN(desc) \ + (((struct ipa_rx_data *)(desc))->skb->len) +#define QDF_IPA_RX_DATA_DMA_ADDR(desc) \ + (((struct ipa_rx_data *)(desc))->dma_addr) + +/** + * __qdf_ipa_wdi_ul_params_t - WDI_RX configuration + */ +typedef struct ipa_wdi_ul_params __qdf_ipa_wdi_ul_params_t; + +/** + * __qdf_ipa_wdi_ul_params_smmu_t - WDI_RX configuration (with WLAN SMMU) + */ +typedef struct ipa_wdi_ul_params_smmu __qdf_ipa_wdi_ul_params_smmu_t; + +/** + * __qdf_ipa_wdi_dl_params_t - WDI_TX configuration + */ +typedef struct ipa_wdi_dl_params __qdf_ipa_wdi_dl_params_t; + +/** + * __qdf_ipa_wdi_dl_params_smmu_t - WDI_TX configuration (with WLAN SMMU) + */ +typedef struct ipa_wdi_dl_params_smmu __qdf_ipa_wdi_dl_params_smmu_t; + +/** + * __qdf_ipa_wdi_in_params_t - information provided by WDI client + */ +typedef struct ipa_wdi_in_params __qdf_ipa_wdi_in_params_t; + +#define QDF_IPA_PIPE_IN_NAT_EN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.nat.nat_en) +#define QDF_IPA_PIPE_IN_HDR_LEN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_len) +#define QDF_IPA_PIPE_IN_HDR_OFST_METADATA_VALID(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid) +#define QDF_IPA_PIPE_IN_HDR_METADATA_REG_VALID(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid) +#define QDF_IPA_PIPE_IN_HDR_OFST_PKT_SIZE_VALID(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid) +#define QDF_IPA_PIPE_IN_HDR_OFST_PKT_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size) +#define QDF_IPA_PIPE_IN_HDR_ADDITIONAL_CONST_LEN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_additional_const_len) +#define QDF_IPA_PIPE_IN_MODE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.mode.mode) +#define QDF_IPA_PIPE_IN_CLIENT(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.client) +#define QDF_IPA_PIPE_IN_DESC_FIFO_SZ(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.desc_fifo_sz) +#define QDF_IPA_PIPE_IN_PRIV(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.priv) +#define QDF_IPA_PIPE_IN_HDR_LITTLE_ENDIAN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr_ext.hdr_little_endian) +#define QDF_IPA_PIPE_IN_NOTIFY(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.notify) +#define QDF_IPA_PIPE_IN_KEEP_IPA_AWAKE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.keep_ipa_awake) +#define QDF_IPA_PIPE_IN_KEEP_IPA_AWAKE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.keep_ipa_awake) +#ifdef FEATURE_METERING +#define QDF_IPA_PIPE_IN_WDI_NOTIFY(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->wdi_notify) +#endif + +#ifdef ENABLE_SMMU_S1_TRANSLATION +#define QDF_IPA_PIPE_IN_SMMU_ENABLED(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->smmu_enabled) + +#define QDF_IPA_PIPE_IN_DL_SMMU_COMP_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.comp_ring) +#define QDF_IPA_PIPE_IN_DL_SMMU_CE_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.ce_ring) +#define QDF_IPA_PIPE_IN_DL_SMMU_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.comp_ring_size) +#define QDF_IPA_PIPE_IN_DL_SMMU_CE_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.ce_ring_size) +#define QDF_IPA_PIPE_IN_DL_SMMU_CE_DOOR_BELL_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.ce_door_bell_pa) +#define QDF_IPA_PIPE_IN_DL_SMMU_NUM_TX_BUFFERS(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.num_tx_buffers) + +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring_size) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_RP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring_rp_pa) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_RP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring_rp_va) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring_size) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_WP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring_wp_pa) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_WP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring_wp_va) +#endif + +#define QDF_IPA_PIPE_IN_DL_COMP_RING_BASE_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.comp_ring_base_pa) +#define QDF_IPA_PIPE_IN_DL_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.comp_ring_size) +#define QDF_IPA_PIPE_IN_DL_CE_RING_BASE_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.ce_ring_base_pa) +#define QDF_IPA_PIPE_IN_DL_CE_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.ce_ring_size) +#define QDF_IPA_PIPE_IN_DL_CE_DOOR_BELL_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.ce_door_bell_pa) +#define QDF_IPA_PIPE_IN_DL_NUM_TX_BUFFERS(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.num_tx_buffers) + +#define QDF_IPA_PIPE_IN_UL_RDY_RING_BASE_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_base_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_size) +#define QDF_IPA_PIPE_IN_UL_RDY_RING_RP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_rp_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_RING_RP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_rp_va) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_base_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_size) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_WP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_wp_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_WP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_wp_va) + +/** + * __qdf_ipa_wdi_out_params_t - information provided to WDI client + */ +typedef struct ipa_wdi_out_params __qdf_ipa_wdi_out_params_t; + +#define QDF_IPA_PIPE_OUT_UC_DOOR_BELL_PA(pipe_out) \ + (((struct ipa_wdi_out_params *)(pipe_out))->uc_door_bell_pa) +#define QDF_IPA_PIPE_OUT_CLNT_HDL(pipe_out) \ + (((struct ipa_wdi_out_params *)(pipe_out))->clnt_hdl) + +/** + * __qdf_ipa_wdi_db_params_t - information provided to retrieve + * physical address of uC doorbell + */ +typedef struct ipa_wdi_db_params __qdf_ipa_wdi_db_params_t; + +/** + * __qdf_ipa_wdi_uc_ready_params_t - uC ready CB parameters + */ +typedef void (*__qdf_ipa_uc_ready_cb)(void *priv); +typedef struct ipa_wdi_uc_ready_params __qdf_ipa_wdi_uc_ready_params_t; + +#define QDF_IPA_UC_READY_PARAMS_IS_UC_READY(uc_ready_param) \ + (((struct ipa_wdi_uc_ready_params *)(uc_ready_param))->is_uC_ready) +#define QDF_IPA_UC_READY_PARAMS_PRIV(uc_ready_param) \ + (((struct ipa_wdi_uc_ready_params *)(uc_ready_param))->priv) +#define QDF_IPA_UC_READY_PARAMS_NOTIFY(uc_ready_param) \ + (((struct ipa_wdi_uc_ready_params *)(uc_ready_param))->notify) + +/** + * __qdf_ipa_wdi_buffer_info_t - address info of a WLAN allocated buffer + * + * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa + */ +typedef struct ipa_wdi_buffer_info __qdf_ipa_wdi_buffer_info_t; + +/** + * __qdf_ipa_gsi_ep_config_t - IPA GSI endpoint configurations + */ +typedef struct ipa_gsi_ep_config __qdf_ipa_gsi_ep_config_t; + +#ifdef WDI3_STATS_UPDATE +/** + * __qdf_ipa_wdi_tx_info_t - WLAN embedded TX information + */ +typedef struct ipa_wdi_tx_info __qdf_ipa_wdi_tx_info_t; + +#define QDF_IPA_WDI_TX_INFO_STA_TX_BYTES(stats_info) \ + (((struct ipa_wdi_tx_info *)stats_info)->sta_tx) +#define QDF_IPA_WDI_TX_INFO_SAP_TX_BYTES(stats_info) \ + (((struct ipa_wdi_tx_info *)stats_info)->ap_tx) +/** + * __qdf_ipa_wdi_bw_info_t - BW levels to be monitored by uC + */ +typedef struct ipa_wdi_bw_info __qdf_ipa_wdi_bw_info_t; + +#define QDF_IPA_WDI_BW_INFO_THRESHOLD_LEVEL_1(bw_info) \ + (((struct ipa_wdi_bw_info *)bw_info)->threshold[0]) +#define QDF_IPA_WDI_BW_INFO_THRESHOLD_LEVEL_2(bw_info) \ + (((struct ipa_wdi_bw_info *)bw_info)->threshold[1]) +#define QDF_IPA_WDI_BW_INFO_THRESHOLD_LEVEL_3(bw_info) \ + (((struct ipa_wdi_bw_info *)bw_info)->threshold[2]) +#define QDF_IPA_WDI_BW_INFO_START_STOP(bw_info) \ + (((struct ipa_wdi_bw_info *)bw_info)->stop) + +/** + * __qdf_ipa_inform_wlan_bw_t - BW information given by IPA driver + */ +typedef struct ipa_inform_wlan_bw __qdf_ipa_inform_wlan_bw_t; + +#define QDF_IPA_INFORM_WLAN_BW_INDEX(bw_inform) \ + (((struct ipa_inform_wlan_bw*)bw_inform)->index) +#define QDF_IPA_INFORM_WLAN_BW_THROUGHPUT(bw_inform) \ + (((struct ipa_inform_wlan_bw*)bw_inform)->throughput) + +#endif /* WDI3_STATS_UPDATE */ + +/** + * __qdf_ipa_dp_evt_type_t - type of event client callback is + * invoked for on data path + * @IPA_RECEIVE: data is struct sk_buff + * @IPA_WRITE_DONE: data is struct sk_buff + */ +typedef enum ipa_dp_evt_type __qdf_ipa_dp_evt_type_t; + +typedef struct ipa_hdr_add __qdf_ipa_hdr_add_t; +typedef struct ipa_hdr_del __qdf_ipa_hdr_del_t; +typedef struct ipa_ioc_add_hdr __qdf_ipa_ioc_add_hdr_t; + +#define QDF_IPA_IOC_ADD_HDR_COMMIT(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->commit) +#define QDF_IPA_IOC_ADD_HDR_NUM_HDRS(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->num_hdrs) +#define QDF_IPA_IOC_ADD_HDR_NAME(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].name) +#define QDF_IPA_IOC_ADD_HDR_HDR(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].hdr) +#define QDF_IPA_IOC_ADD_HDR_HDR_LEN(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].hdr_len) +#define QDF_IPA_IOC_ADD_HDR_TYPE(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].type) +#define QDF_IPA_IOC_ADD_HDR_IS_PARTIAL(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].is_partial) +#define QDF_IPA_IOC_ADD_HDR_HDR_HDL(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].hdr_hdl) +#define QDF_IPA_IOC_ADD_HDR_STATUS(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].status) +#define QDF_IPA_IOC_ADD_HDR_IS_ETH2_OFST_VALID(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].is_eth2_ofst_valid) +#define QDF_IPA_IOC_ADD_HDR_ETH2_OFST(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].eth2_ofst) + +typedef struct ipa_ioc_del_hdr __qdf_ipa_ioc_del_hdr_t; + +#define QDF_IPA_IOC_DEL_HDR_COMMIT(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->commit) +#define QDF_IPA_IOC_DEL_HDR_NUM_HDRS(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->num_hdls) +#define QDF_IPA_IOC_DEL_HDR_HDL(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->hdl[0].hdl) +#define QDF_IPA_IOC_DEL_HDR_STATUS(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->hdl[0].status) + +typedef struct ipa_ioc_get_hdr __qdf_ipa_ioc_get_hdr_t; + +#define QDF_IPA_IOC_GET_HDR_NAME(ipa_hdr) \ + (((struct ipa_ioc_get_hdr *)(ipa_hdr))->name) +#define QDF_IPA_IOC_GET_HDR_HDL(ipa_hdr) \ + (((struct ipa_ioc_get_hdr *)(ipa_hdr))->hdl) + +typedef struct ipa_ioc_copy_hdr __qdf_ipa_ioc_copy_hdr_t; +typedef struct ipa_ioc_add_hdr_proc_ctx __qdf_ipa_ioc_add_hdr_proc_ctx_t; +typedef struct ipa_ioc_del_hdr_proc_ctx __qdf_ipa_ioc_del_hdr_proc_ctx_t; +typedef struct ipa_msg_meta __qdf_ipa_msg_meta_t; + +#define QDF_IPA_MSG_META_MSG_TYPE(meta) \ + (((struct ipa_msg_meta *)(meta))->msg_type) +#define QDF_IPA_MSG_META_MSG_LEN(meta) \ + (((struct ipa_msg_meta *)(meta))->msg_len) + +typedef enum ipa_client_type __qdf_ipa_client_type_t; +typedef struct IpaHwStatsWDIInfoData_t __qdf_ipa_hw_stats_wdi_info_data_t; +typedef enum ipa_rm_resource_name __qdf_ipa_rm_resource_name_t; +typedef enum ipa_wlan_event __qdf_ipa_wlan_event_t; +typedef struct ipa_wlan_msg __qdf_ipa_wlan_msg_t; + +#define QDF_IPA_WLAN_MSG_NAME(ipa_msg) \ + (((struct ipa_wlan_msg *)(ipa_msg))->name) +#define QDF_IPA_WLAN_MSG_MAC_ADDR(ipa_msg) \ + (((struct ipa_wlan_msg *)(ipa_msg))->mac_addr) + +typedef struct ipa_wlan_msg_ex __qdf_ipa_wlan_msg_ex_t; + +#define QDF_IPA_WLAN_MSG_EX_NAME(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->name) +#define QDF_IPA_WLAN_MSG_EX_EXNUM_OF_ATTRIBS(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->num_of_attribs) +#define QDF_IPA_WLAN_MSG_EX_ATTRIB_TYPE(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->attribs.attrib_type) +#define QDF_IPA_WLAN_MSG_EX_OFFSET(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->attribs.offset) +#define QDF_IPA_WLAN_MSG_EX_MAC_ADDR(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->attribs.u.mac_addr) + +typedef struct ipa_ioc_tx_intf_prop __qdf_ipa_ioc_tx_intf_prop_t; + +#define QDF_IPA_IOC_TX_INTF_PROP_IP(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->ip) +#define QDF_IPA_IOC_TX_INTF_PROP_ATTRIB_MASK(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->attrib.attrib_mask) +#define QDF_IPA_IOC_TX_INTF_PROP_META_DATA(rx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->attrib.meta_data) +#define QDF_IPA_IOC_TX_INTF_PROP_META_DATA_MASK(rx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->attrib.meta_data_mask) +#define QDF_IPA_IOC_TX_INTF_PROP_DST_PIPE(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->dst_pipe) +#define QDF_IPA_IOC_TX_INTF_PROP_ALT_DST_PIPE(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->alt_dst_pipe) +#define QDF_IPA_IOC_TX_INTF_PROP_HDR_NAME(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->hdr_name) +#define QDF_IPA_IOC_TX_INTF_PROP_HDR_L2_TYPE(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->hdr_l2_type) + +typedef struct ipa_ioc_rx_intf_prop __qdf_ipa_ioc_rx_intf_prop_t; + +#define QDF_IPA_IOC_RX_INTF_PROP_IP(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->ip) +#define QDF_IPA_IOC_RX_INTF_PROP_ATTRIB_MASK(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->attrib.attrib_mask) +#define QDF_IPA_IOC_RX_INTF_PROP_META_DATA(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->attrib.meta_data) +#define QDF_IPA_IOC_RX_INTF_PROP_META_DATA_MASK(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->attrib.meta_data_mask) +#define QDF_IPA_IOC_RX_INTF_PROP_SRC_PIPE(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->src_pipe) +#define QDF_IPA_IOC_RX_INTF_PROP_HDR_L2_TYPE(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->hdr_l2_type) + +typedef struct ipa_wlan_hdr_attrib_val __qdf_ipa_wlan_hdr_attrib_val_t; + +#define __QDF_IPA_SET_META_MSG_TYPE(meta, msg_type) \ + __qdf_ipa_set_meta_msg_type(meta, msg_type) + +#define __QDF_IPA_RM_RESOURCE_GRANTED IPA_RM_RESOURCE_GRANTED +#define __QDF_IPA_RM_RESOURCE_RELEASED IPA_RM_RESOURCE_RELEASED + +#define __QDF_IPA_RM_RESOURCE_WLAN_PROD IPA_RM_RESOURCE_WLAN_PROD +#define __QDF_IPA_RM_RESOURCE_WLAN_CONS IPA_RM_RESOURCE_WLAN_CONS +#define __QDF_IPA_RM_RESOURCE_APPS_CONS IPA_RM_RESOURCE_APPS_CONS + +#define __QDF_IPA_VOLTAGE_LEVEL IPA_VOLTAGE_SVS + +#define __QDF_IPA_CLIENT_WLAN1_PROD IPA_CLIENT_WLAN1_PROD +#define __QDF_IPA_CLIENT_WLAN1_CONS IPA_CLIENT_WLAN1_CONS +#define __QDF_IPA_CLIENT_WLAN2_CONS IPA_CLIENT_WLAN2_CONS +#define __QDF_IPA_CLIENT_WLAN3_CONS IPA_CLIENT_WLAN3_CONS +#define __QDF_IPA_CLIENT_WLAN4_CONS IPA_CLIENT_WLAN4_CONS + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) +#define IPA_LAN_RX_NAPI_SUPPORT +#endif + +/* + * Resume / Suspend + */ +static inline int __qdf_ipa_reset_endpoint(u32 clnt_hdl) +{ + return ipa_reset_endpoint(clnt_hdl); +} + +/* + * Remove ep delay + */ +static inline int __qdf_ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + return ipa_clear_endpoint_delay(clnt_hdl); +} + +/* + * Header removal / addition + */ +static inline int __qdf_ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return ipa_add_hdr(hdrs); +} + +static inline int __qdf_ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return ipa_del_hdr(hdls); +} + +static inline int __qdf_ipa_commit_hdr(void) +{ + return ipa_commit_hdr(); +} + +static inline int __qdf_ipa_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + return ipa_get_hdr(lookup); +} + +static inline int __qdf_ipa_put_hdr(u32 hdr_hdl) +{ + return ipa_put_hdr(hdr_hdl); +} + +static inline int __qdf_ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + return ipa_copy_hdr(copy); +} + +/* + * Messaging + */ +static inline int __qdf_ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + return ipa_send_msg(meta, buff, callback); +} + +static inline int __qdf_ipa_register_pull_msg(struct ipa_msg_meta *meta, + ipa_msg_pull_fn callback) +{ + return ipa_register_pull_msg(meta, callback); +} + +static inline int __qdf_ipa_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + return ipa_deregister_pull_msg(meta); +} + +/* + * Interface + */ +static inline int __qdf_ipa_register_intf(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + return ipa_register_intf(name, tx, rx); +} + +static inline int __qdf_ipa_register_intf_ext(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + return ipa_register_intf_ext(name, tx, rx, ext); +} + +static inline int __qdf_ipa_deregister_intf(const char *name) +{ + return ipa_deregister_intf(name); +} + +/* + * Data path + */ +static inline int __qdf_ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return ipa_tx_dp(dst, skb, metadata); +} + +/* + * To transfer multiple data packets + */ +static inline int __qdf_ipa_tx_dp_mul( + enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc) +{ + return ipa_tx_dp_mul(dst, data_desc); +} + +static inline void __qdf_ipa_free_skb(struct ipa_rx_data *rx_in) +{ + return ipa_free_skb(rx_in);; +} + +/* + * System pipes + */ +static inline u16 __qdf_ipa_get_smem_restr_bytes(void) +{ + return ipa_get_smem_restr_bytes(); +} + +static inline int __qdf_ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, + u32 *clnt_hdl) +{ + return ipa_setup_sys_pipe(sys_in, clnt_hdl); +} + +static inline int __qdf_ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + return ipa_teardown_sys_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + return ipa_connect_wdi_pipe(in, out); +} + +static inline int __qdf_ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + return ipa_disconnect_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + return ipa_enable_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + return ipa_disable_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + return ipa_resume_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + return ipa_suspend_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *out) +{ + return ipa_uc_wdi_get_dbpa(out); +} + +static inline int __qdf_ipa_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *param) +{ + return ipa_uc_reg_rdyCB(param); +} + +static inline int __qdf_ipa_uc_dereg_rdyCB(void) +{ + return ipa_uc_dereg_rdyCB(); +} + + +/* + * Resource manager + */ +static inline int __qdf_ipa_rm_create_resource( + struct ipa_rm_create_params *create_params) +{ + return ipa_rm_create_resource(create_params); +} + +static inline int __qdf_ipa_rm_delete_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_delete_resource(resource_name); +} + +static inline int __qdf_ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return ipa_rm_register(resource_name, reg_params); +} + +static inline int __qdf_ipa_rm_set_perf_profile( + enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + return ipa_rm_set_perf_profile(resource_name, profile); +} + +static inline int __qdf_ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return ipa_rm_deregister(resource_name, reg_params); +} + +static inline int __qdf_ipa_rm_add_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return ipa_rm_add_dependency(resource_name, depends_on_name); +} + +static inline int __qdf_ipa_rm_add_dependency_sync( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return ipa_rm_add_dependency_sync(resource_name, depends_on_name); +} + +static inline int __qdf_ipa_rm_delete_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return ipa_rm_delete_dependency(resource_name, depends_on_name); +} + +static inline int __qdf_ipa_rm_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_request_resource(resource_name); +} + +static inline int __qdf_ipa_rm_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_release_resource(resource_name); +} + +static inline int __qdf_ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_notify_completion(event, resource_name); +} + +static inline int __qdf_ipa_rm_inactivity_timer_init( + enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + return ipa_rm_inactivity_timer_init(resource_name, msecs); +} + +static inline int __qdf_ipa_rm_inactivity_timer_destroy( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_inactivity_timer_destroy(resource_name); +} + +static inline int __qdf_ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_inactivity_timer_request_resource(resource_name); +} + +static inline int __qdf_ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_inactivity_timer_release_resource(resource_name); +} + +/* + * Miscellaneous + */ +static inline void __qdf_ipa_bam_reg_dump(void) +{ + return ipa_bam_reg_dump(); +} + +static inline int __qdf_ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + return ipa_get_wdi_stats(stats); +} + +static inline int __qdf_ipa_get_ep_mapping(enum ipa_client_type client) +{ + return ipa_get_ep_mapping(client); +} + +static inline bool __qdf_ipa_is_ready(void) +{ + return ipa_is_ready(); +} + +static inline void __qdf_ipa_proxy_clk_vote(void) +{ + return ipa_proxy_clk_vote(); +} + +static inline void __qdf_ipa_proxy_clk_unvote(void) +{ + return ipa_proxy_clk_unvote(); +} + +static inline bool __qdf_ipa_is_client_handle_valid(u32 clnt_hdl) +{ + return ipa_is_client_handle_valid(clnt_hdl); +} + +static inline enum ipa_client_type __qdf_ipa_get_client_mapping(int pipe_idx) +{ + return ipa_get_client_mapping(pipe_idx); +} + +static inline enum ipa_rm_resource_name __qdf_ipa_get_rm_resource_from_ep( + int pipe_idx) +{ + return ipa_get_rm_resource_from_ep(pipe_idx); +} + +static inline bool __qdf_ipa_get_modem_cfg_emb_pipe_flt(void) +{ + return ipa_get_modem_cfg_emb_pipe_flt(); +} + +static inline enum ipa_transport_type __qdf_ipa_get_transport_type(void) +{ + return ipa_get_transport_type(); +} + +static inline struct device *__qdf_ipa_get_dma_dev(void) +{ + return ipa_get_dma_dev(); +} + +static inline struct iommu_domain *__qdf_ipa_get_smmu_domain(void) +{ + return ipa_get_smmu_domain(); +} + +static inline int __qdf_ipa_create_wdi_mapping(u32 num_buffers, + __qdf_ipa_wdi_buffer_info_t *info) +{ + return ipa_create_wdi_mapping(num_buffers, info); +} + +static inline int __qdf_ipa_release_wdi_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_release_wdi_mapping(num_buffers, info); +} + +static inline int __qdf_ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + return ipa_disable_apps_wan_cons_deaggr(agg_size, agg_count); +} + +static inline const struct ipa_gsi_ep_config *__qdf_ipa_get_gsi_ep_info(enum ipa_client_type client) +{ + return ipa_get_gsi_ep_info(client); +} + +static inline int __qdf_ipa_stop_gsi_channel(u32 clnt_hdl) +{ + return ipa_stop_gsi_channel(clnt_hdl); +} + +static inline int __qdf_ipa_register_ipa_ready_cb( + void (*ipa_ready_cb)(void *user_data), + void *user_data) +{ + return ipa_register_ipa_ready_cb(ipa_ready_cb, user_data); +} + +#ifdef FEATURE_METERING +static inline int __qdf_ipa_broadcast_wdi_quota_reach_ind(uint32_t index, + uint64_t quota_bytes) +{ + return ipa_broadcast_wdi_quota_reach_ind(index, quota_bytes); +} +#endif + +void __qdf_ipa_set_meta_msg_type(__qdf_ipa_msg_meta_t *meta, int type); + +#ifdef ENABLE_SMMU_S1_TRANSLATION +/** + * __qdf_get_ipa_smmu_enabled() - to get IPA SMMU enable status + * + * Return: true when IPA SMMU enabled, otherwise false + */ +static bool __qdf_get_ipa_smmu_enabled(void) +{ + struct ipa_smmu_in_params params_in; + struct ipa_smmu_out_params params_out; + + params_in.smmu_client = IPA_SMMU_WLAN_CLIENT; + ipa_get_smmu_params(¶ms_in, ¶ms_out); + + return params_out.smmu_enable; +} +#endif + +#ifdef IPA_LAN_RX_NAPI_SUPPORT +/** + * ipa_get_lan_rx_napi() - Check if NAPI is enabled in LAN RX DP + * + * Returns: true if enabled, false otherwise + */ +static inline bool __qdf_ipa_get_lan_rx_napi(void) +{ + return ipa_get_lan_rx_napi(); +} +#endif /* IPA_LAN_RX_NAPI_SUPPORT */ +#endif /* IPA_OFFLOAD */ +#endif /* _I_QDF_IPA_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa_wdi3.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa_wdi3.h new file mode 100644 index 0000000000000000000000000000000000000000..b7d8d79b002f7ec16ce970306b0ff1a74e8a112d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa_wdi3.h @@ -0,0 +1,629 @@ +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_ipa_wdi3.h + * This file provides OS dependent IPA WDI APIs. + */ + +#ifndef I_QDF_IPA_WDI_H +#define I_QDF_IPA_WDI_H + +#ifdef IPA_OFFLOAD + +#include /* QDF_STATUS */ +#include + +#ifdef CONFIG_IPA_WDI_UNIFIED_API + +/** + * __qdf_ipa_wdi_version_t - IPA WDI version + */ +typedef enum ipa_wdi_version __qdf_ipa_wdi_version_t; + +/** + * __qdf_ipa_wdi_init_in_params_t - wdi init input parameters + */ +typedef struct ipa_wdi_init_in_params __qdf_ipa_wdi_init_in_params_t; + +#define __QDF_IPA_WDI_INIT_IN_PARAMS_WDI_VERSION(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->wdi_version) +#define __QDF_IPA_WDI_INIT_IN_PARAMS_NOTIFY(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->notify) +#define __QDF_IPA_WDI_INIT_IN_PARAMS_PRIV(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->priv) +#define __QDF_IPA_WDI_INIT_IN_PARAMS_WDI_NOTIFY(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->wdi_notify) + +/** + * __qdf_ipa_wdi_init_out_params_t - wdi init output parameters + */ +typedef struct ipa_wdi_init_out_params __qdf_ipa_wdi_init_out_params_t; + +#define __QDF_IPA_WDI_INIT_OUT_PARAMS_IS_UC_READY(out_params) \ + (((struct ipa_wdi_init_out_params *)(out_params))->is_uC_ready) +#define __QDF_IPA_WDI_INIT_OUT_PARAMS_IS_SMMU_ENABLED(out_params) \ + (((struct ipa_wdi_init_out_params *)(out_params))->is_smmu_enabled) +#ifdef IPA_WDI3_GSI +#define QDF_IPA_WDI_INIT_OUT_PARAMS_IS_OVER_GSI(out_params) \ + (((struct ipa_wdi_init_out_params *)(out_params))->is_over_gsi) +#else +#define QDF_IPA_WDI_INIT_OUT_PARAMS_IS_OVER_GSI(out_params) \ + false +#endif + +/** + * __qdf_ipa_wdi_hdr_info_t - Header to install on IPA HW + */ +typedef struct ipa_wdi_hdr_info __qdf_ipa_wdi_hdr_info_t; + +#define __QDF_IPA_WDI_HDR_INFO_HDR(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->hdr) +#define __QDF_IPA_WDI_HDR_INFO_HDR_LEN(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->hdr_len) +#define __QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->dst_mac_addr_offset) +#define __QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->hdr_type) + +/** + * __qdf_ipa_wdi_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef struct ipa_wdi_reg_intf_in_params __qdf_ipa_wdi_reg_intf_in_params_t; + +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->netdev_name) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->hdr_info) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->alt_dst_pipe) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->is_meta_data_valid) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->meta_data) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->meta_data_mask) + +typedef struct ipa_ep_cfg __qdf_ipa_ep_cfg_t; + +#define __QDF_IPA_EP_CFG_NAT_EN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->nat.nat_en) +#define __QDF_IPA_EP_CFG_HDR_LEN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_len) +#define __QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_ofst_metadata_valid) +#define __QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_metadata_reg_valid) +#define __QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_ofst_pkt_size_valid) +#define __QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_ofst_pkt_size) +#define __QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_additional_const_len) +#define __QDF_IPA_EP_CFG_MODE(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->mode.mode) +#define __QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr_ext.hdr_little_endian) + +/** + * __qdf_ipa_wdi_pipe_setup_info_t - WDI TX/Rx configuration + */ +typedef struct ipa_wdi_pipe_setup_info __qdf_ipa_wdi_pipe_setup_info_t; + +#define __QDF_IPA_WDI_SETUP_INFO_EP_CFG(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->ipa_ep_cfg) + +#define __QDF_IPA_WDI_SETUP_INFO_CLIENT(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->client) +#define __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->transfer_ring_base_pa) +#define __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->transfer_ring_size) +#define __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->transfer_ring_doorbell_pa) +#define __QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->is_txr_rn_db_pcie_addr) +#define __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->event_ring_base_pa) +#define __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->event_ring_size) +#define __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->event_ring_doorbell_pa) +#define __QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->is_evt_rn_db_pcie_addr) +#define __QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->num_pkt_buffers) +#define __QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->pkt_offset) +#define __QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->desc_format_template) + +/** + * __qdf_ipa_wdi_pipe_setup_info_smmu_t - WDI TX/Rx configuration + */ +typedef struct ipa_wdi_pipe_setup_info_smmu __qdf_ipa_wdi_pipe_setup_info_smmu_t; + +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->ipa_ep_cfg) + +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->client) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->transfer_ring_base) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->transfer_ring_size) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->transfer_ring_doorbell_pa) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *) \ + (txrx))->is_txr_rn_db_pcie_addr) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->event_ring_base) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->event_ring_size) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->event_ring_doorbell_pa) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *) \ + (txrx))->is_evt_rn_db_pcie_addr) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->num_pkt_buffers) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->pkt_offset) +#define __QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->desc_format_template) + +/** + * __qdf_ipa_wdi_conn_in_params_t - information provided by + * uC offload client + */ +typedef struct ipa_wdi_conn_in_params __qdf_ipa_wdi_conn_in_params_t; + +#define __QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->notify) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->priv) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->is_smmu_enabled) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->num_sys_pipe_needed) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->sys_in) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_tx.tx) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_tx.tx_smmu) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_rx.rx) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_rx.rx_smmu) + +/** + * __qdf_ipa_wdi_conn_out_params_t - information provided + * to WLAN druver + */ +typedef struct ipa_wdi_conn_out_params __qdf_ipa_wdi_conn_out_params_t; + +#define __QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi_conn_out_params *)(pipe_out))->tx_uc_db_pa) +#define __QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi_conn_out_params *)(pipe_out))->rx_uc_db_pa) + +/** + * __qdf_ipa_wdi_perf_profile_t - To set BandWidth profile + */ +typedef struct ipa_wdi_perf_profile __qdf_ipa_wdi_perf_profile_t; + +#define __QDF_IPA_WDI_PERF_PROFILE_CLIENT(profile) \ + (((struct ipa_wdi_perf_profile *)(profile))->client) +#define __QDF_IPA_WDI_PERF_PROFILE_MAX_SUPPORTED_BW_MBPS(profile) \ + (((struct ipa_wdi_perf_profile *)(profile))->max_supported_bw_mbps) + +/** + * __qdf_ipa_wdi_init - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_init(struct ipa_wdi_init_in_params *in, + struct ipa_wdi_init_out_params *out) +{ + return ipa_wdi_init(in, out); +} + +/** + * __qdf_ipa_wdi_cleanup - Client should call this function to + * clean up WDI IPA offload data path + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_cleanup(void) +{ + return ipa_wdi_cleanup(); +} + +/** + * __qdf_ipa_wdi_reg_intf - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_reg_intf( + struct ipa_wdi_reg_intf_in_params *in) +{ + return ipa_wdi_reg_intf(in); +} + +/** + * __qdf_ipa_wdi_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_dereg_intf(const char *netdev_name) +{ + return ipa_wdi_dereg_intf(netdev_name); +} + +/** + * __qdf_ipa_wdi_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out) +{ + return ipa_wdi_conn_pipes(in, out); +} + +/** + * __qdf_ipa_wdi_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_disconn_pipes(void) +{ + return ipa_wdi_disconn_pipes(); +} + +/** + * __qdf_ipa_wdi_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_enable_pipes(void) +{ + return ipa_wdi_enable_pipes(); +} + +/** + * __qdf_ipa_wdi_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_disable_pipes(void) +{ + return ipa_wdi_disable_pipes(); +} + +/** + * __qdf_ipa_wdi_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_set_perf_profile( + struct ipa_wdi_perf_profile *profile) +{ + return ipa_wdi_set_perf_profile(profile); +} + +/** + * __qdf_ipa_wdi_create_smmu_mapping() - Client should call this function to + * create smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_create_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_wdi_create_smmu_mapping(num_buffers, info); +} + +/** + * __qdf_ipa_wdi_release_smmu_mapping() - Client should call this function to + * release smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_release_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_wdi_release_smmu_mapping(num_buffers, info); +} + +#ifdef WDI3_STATS_UPDATE +/** + * __qdf_ipa_wdi_wlan_stats() - Client should call this function to + * send Tx byte counts to IPA driver + * @tx_stats: number of Tx bytes on STA and SAP + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_wlan_stats(struct ipa_wdi_tx_info *tx_stats) +{ + return ipa_wdi_sw_stats(tx_stats); +} + +/** + * ipa_uc_bw_monitor() - start/stop uc bw monitoring + * @bw_info: set bw info levels to monitor + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_uc_bw_monitor(struct ipa_wdi_bw_info *bw_info) +{ + return ipa_uc_bw_monitor(bw_info); +} +#endif +#else /* CONFIG_IPA_WDI_UNIFIED_API */ + +/** + * __qdf_ipa_wdi_hdr_info_t - Header to install on IPA HW + */ +typedef struct ipa_wdi3_hdr_info __qdf_ipa_wdi_hdr_info_t; + +#define __QDF_IPA_WDI_HDR_INFO_HDR(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->hdr) +#define __QDF_IPA_WDI_HDR_INFO_HDR_LEN(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->hdr_len) +#define __QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->dst_mac_addr_offset) +#define __QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->hdr_type) + +/** + * __qdf_ipa_wdi_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef struct ipa_wdi3_reg_intf_in_params __qdf_ipa_wdi_reg_intf_in_params_t; + +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->netdev_name) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->hdr_info) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->is_meta_data_valid) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->meta_data) +#define __QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->meta_data_mask) + +/** + * __qdf_ipa_wdi_setup_info_t - WDI3 TX/Rx configuration + */ +typedef struct ipa_wdi3_setup_info __qdf_ipa_wdi_pipe_setup_info_t; + +#define __QDF_IPA_WDI_SETUP_INFO_NAT_EN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.nat.nat_en) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_LEN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_len) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_metadata_reg_valid) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_additional_const_len) +#define __QDF_IPA_WDI_SETUP_INFO_MODE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.mode.mode) +#define __QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr_ext.hdr_little_endian) + +#define __QDF_IPA_WDI_SETUP_INFO_CLIENT(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->client) +#define __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->transfer_ring_base_pa) +#define __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->transfer_ring_size) +#define __QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->transfer_ring_doorbell_pa) +#define __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->event_ring_base_pa) +#define __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->event_ring_size) +#define __QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->event_ring_doorbell_pa) +#define __QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->num_pkt_buffers) +#define __QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->pkt_offset) +#define __QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->desc_format_template) + +/** + * __qdf_ipa_wdi_conn_in_params_t - information provided by + * uC offload client + */ +typedef struct ipa_wdi3_conn_in_params __qdf_ipa_wdi_conn_in_params_t; + +#define __QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->notify) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->priv) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->tx) +#define __QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->rx) + +/** + * __qdf_ipa_wdi_conn_out_params_t - information provided + * to WLAN druver + */ +typedef struct ipa_wdi3_conn_out_params __qdf_ipa_wdi_conn_out_params_t; + +#define __QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi3_conn_out_params *)(pipe_out))->tx_uc_db_pa) +#define __QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(pipe_out) \ + (((struct ipa_wdi3_conn_out_params *)(pipe_out))->tx_uc_db_va) +#define __QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi3_conn_out_params *)(pipe_out))->rx_uc_db_pa) + +/** + * __qdf_ipa_wdi_perf_profile_t - To set BandWidth profile + */ +typedef struct ipa_wdi3_perf_profile __qdf_ipa_wdi_perf_profile_t; + +#define __QDF_IPA_WDI_PERF_PROFILE_CLIENT(profile) \ + (((struct ipa_wdi3_perf_profile *)(profile))->client) +#define __QDF_IPA_WDI_PERF_PROFILE_MAX_SUPPORTED_BW_MBPS(profile) \ + (((struct ipa_wdi3_perf_profile *)(profile))->max_supported_bw_mbps) + +/** + * __qdf_ipa_wdi_reg_intf - Client should call this function to + * init WDI3 IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_reg_intf( + struct ipa_wdi3_reg_intf_in_params *in) +{ + return ipa_wdi3_reg_intf(in); +} + +/** + * __qdf_ipa_wdi_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_dereg_intf(const char *netdev_name) +{ + return ipa_wdi3_dereg_intf(netdev_name); +} + +/** + * __qdf_ipa_wdi_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_conn_pipes(struct ipa_wdi3_conn_in_params *in, + struct ipa_wdi3_conn_out_params *out) +{ + return ipa_wdi3_conn_pipes(in, out); +} + +/** + * __qdf_ipa_wdi_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_disconn_pipes(void) +{ + return ipa_wdi3_disconn_pipes(); +} + +/** + * __qdf_ipa_wdi_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_enable_pipes(void) +{ + return ipa_wdi3_enable_pipes(); +} + +/** + * __qdf_ipa_wdi_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_disable_pipes(void) +{ + return ipa_wdi3_disable_pipes(); +} + +/** + * __qdf_ipa_wdi_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_set_perf_profile( + struct ipa_wdi3_perf_profile *profile) +{ + return ipa_wdi3_set_perf_profile(profile); +} + +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +#endif /* IPA_OFFLOAD */ +#endif /* I_QDF_IPA_WDI_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_list.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_list.h new file mode 100644 index 0000000000000000000000000000000000000000..0f5b0808d2c0b6c5ea5f83f68f1c8e9a2338fca6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_list.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_list.h + * This file provides OS dependent list API's. + */ + +#if !defined(__I_QDF_LIST_H) +#define __I_QDF_LIST_H + +#include + +/* Type declarations */ +typedef struct list_head __qdf_list_node_t; + +/* Preprocessor definitions and constants */ + +typedef struct qdf_list_s { + __qdf_list_node_t anchor; + uint32_t count; + uint32_t max_size; +} __qdf_list_t; + +/** + * __qdf_list_create() - Create qdf list and initialize list head + * @list: object of list + * @max_size: max size of the list + * + * Return: none + */ +static inline void __qdf_list_create(__qdf_list_t *list, uint32_t max_size) +{ + INIT_LIST_HEAD(&list->anchor); + list->count = 0; + list->max_size = max_size; +} + +/** + * __qdf_list_size() - gives the size of the list + * @list: object of list + * Return: size of the list + */ +static inline uint32_t __qdf_list_size(__qdf_list_t *list) +{ + return list->count; +} + +/** + * __qdf_list_max_size() - gives the max size of the list + * @list: object of list + * Return: max size of the list + */ +static inline uint32_t __qdf_list_max_size(__qdf_list_t *list) +{ + return list->max_size; +} + +#define __QDF_LIST_ANCHOR(list) ((list).anchor) + +#define __QDF_LIST_NODE_INIT(prev_node, next_node) \ + { .prev = &(prev_node), .next = &(next_node), } + +#define __QDF_LIST_NODE_INIT_SINGLE(node) \ + __QDF_LIST_NODE_INIT(node, node) + +#define __QDF_LIST_INIT(tail, head) \ + { .anchor = __QDF_LIST_NODE_INIT(tail, head), } + +#define __QDF_LIST_INIT_SINGLE(node) \ + __QDF_LIST_INIT(node, node) + +#define __QDF_LIST_INIT_EMPTY(list) \ + __QDF_LIST_INIT_SINGLE(list.anchor) + +#define __qdf_list_for_each(list_ptr, cursor, node_field) \ + list_for_each_entry(cursor, &(list_ptr)->anchor, node_field) + +#define __qdf_list_for_each_del(list_ptr, cursor, next, node_field) \ + list_for_each_entry_safe(cursor, next, &(list_ptr)->anchor, node_field) + +/** + * __qdf_init_list_head() - initialize list head + * @list_head: pointer to list head + * + * Return: none + */ +static inline void __qdf_init_list_head(__qdf_list_node_t *list_head) +{ + INIT_LIST_HEAD(list_head); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..ecc7114d5d54644999f1b62031364ce9f6552c17 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_lock.h + * Linux-specific definitions for QDF Lock API's + */ + +#if !defined(__I_QDF_LOCK_H) +#define __I_QDF_LOCK_H + +/* Include Files */ +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) +#include +#else +#include +#endif +#include +#include + +/* define for flag */ +#define QDF_LINUX_UNLOCK_BH 1 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +enum { + LOCK_RELEASED = 0x11223344, + LOCK_ACQUIRED, + LOCK_DESTROYED +}; + +/** + * typedef struct - __qdf_mutex_t + * @m_lock: Mutex lock + * @cookie: Lock cookie + * @process_id: Process ID to track lock + * @state: Lock status + * @refcount: Reference count for recursive lock + * @stats: a structure that contains usage statistics + */ +struct qdf_lock_s { + struct mutex m_lock; + uint32_t cookie; + int process_id; + uint32_t state; + uint8_t refcount; + struct lock_stats stats; +}; + +typedef struct qdf_lock_s __qdf_mutex_t; + +/** + * typedef struct - qdf_spinlock_t + * @spinlock: Spin lock + * @flags: Lock flag + */ +typedef struct __qdf_spinlock { + spinlock_t spinlock; + unsigned long flags; +} __qdf_spinlock_t; + +typedef struct semaphore __qdf_semaphore_t; + +/** + * typedef struct - qdf_wake_lock_t + * @lock: this lock needs to be used in kernel version < 5.4 + * @priv: this lock pointer needs to be used in kernel version >= 5.4 + */ +typedef struct qdf_wake_lock { + struct wakeup_source lock; + struct wakeup_source *priv; +} qdf_wake_lock_t; + +struct hif_pm_runtime_lock; +typedef struct qdf_runtime_lock { + struct hif_pm_runtime_lock *lock; +} qdf_runtime_lock_t; + +#define LINUX_LOCK_COOKIE 0x12345678 + +/* Function declarations and documenation */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) +/** + * __qdf_semaphore_init() - initialize the semaphore + * @m: Semaphore object + * + * Return: QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m) +{ + init_MUTEX(m); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m) +{ + sema_init(m, 1); + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * __qdf_semaphore_acquire() - acquire semaphore + * @m: Semaphore object + * + * Return: 0 + */ +static inline int __qdf_semaphore_acquire(struct semaphore *m) +{ + down(m); + return 0; +} + +/** + * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space + * process that is waiting on a semaphore to be interrupted by the user. + * If the operation is interrupted, the function returns a nonzero value, + * and the caller does not hold the semaphore. + * Always checking the return value and responding accordingly. + * @osdev: OS device handle + * @m: Semaphore object + * + * Return: int + */ +static inline int __qdf_semaphore_acquire_intr(struct semaphore *m) +{ + return down_interruptible(m); +} + +/** + * __qdf_semaphore_release() - release semaphore + * @m: Semaphore object + * + * Return: result of UP operation in integer + */ +static inline void __qdf_semaphore_release(struct semaphore *m) +{ + up(m); +} + +/** + * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout + * @m: semaphore to take + * @timeout: maximum time to try to take the semaphore + * Return: int + */ +static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m, + unsigned long timeout) +{ + unsigned long jiffie_val = msecs_to_jiffies(timeout); + + return down_timeout(m, jiffie_val); +} + +/** + * __qdf_spinlock_create() - initialize spin lock + * @lock: Spin lock object + * + * Return: QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock) +{ + spin_lock_init(&lock->spinlock); + lock->flags = 0; + return QDF_STATUS_SUCCESS; +} + +#define __qdf_spinlock_destroy(lock) + +/** + * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_lock(__qdf_spinlock_t *lock) +{ + spin_lock(&lock->spinlock); +} + +/** + * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock) +{ + spin_unlock(&lock->spinlock); +} + +/** + * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption + * (Preemptive) and disable IRQs + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock) +{ + spin_lock_irqsave(&lock->spinlock, lock->flags); +} + +/** + * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the + * Preemption and enable IRQ + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock) +{ + spin_unlock_irqrestore(&lock->spinlock, lock->flags); +} + +/* + * Synchronous versions - only for OS' that have interrupt disable + */ +#define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags) +#define __qdf_spin_unlock_irq(_p_lock, _flags) \ + spin_unlock_irqrestore(_p_lock, _flags) + +/** + * __qdf_spin_is_locked(__qdf_spinlock_t *lock) + * @lock: spinlock object + * + * Return: nonzero if lock is held. + */ +static inline int __qdf_spin_is_locked(__qdf_spinlock_t *lock) +{ + return spin_is_locked(&lock->spinlock); +} + +/** + * __qdf_spin_trylock_bh() - spin trylock bottomhalf + * @lock: spinlock object + * + * Return: nonzero if lock is acquired + */ +static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock) +{ + if (likely(irqs_disabled() || in_irq() || in_softirq())) + return spin_trylock(&lock->spinlock); + + if (spin_trylock_bh(&lock->spinlock)) { + lock->flags |= QDF_LINUX_UNLOCK_BH; + return 1; + } + + return 0; +} + +/** + * __qdf_spin_trylock() - spin trylock + * @lock: spinlock object + * + * Return: int + */ +static inline int __qdf_spin_trylock(__qdf_spinlock_t *lock) +{ + return spin_trylock(&lock->spinlock); +} + +/** + * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock) +{ + if (likely(irqs_disabled() || in_irq() || in_softirq())) { + spin_lock(&lock->spinlock); + } else { + spin_lock_bh(&lock->spinlock); + lock->flags |= QDF_LINUX_UNLOCK_BH; + } +} + +/** + * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock) +{ + if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) { + lock->flags &= (unsigned long)~QDF_LINUX_UNLOCK_BH; + spin_unlock_bh(&lock->spinlock); + } else + spin_unlock(&lock->spinlock); +} + +/** + * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled. + * @hdl: OS handle + * @lock: spinlock to be held for the critical region + * @func: critical region function that to be executed + * @context: context of the critical region function + * @return - Boolean status returned by the critical region function + */ +static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl, + __qdf_spinlock_t *lock, + qdf_irqlocked_func_t func, + void *arg) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&lock->spinlock, flags); + ret = func(arg); + spin_unlock_irqrestore(&lock->spinlock, flags); + + return ret; +} + +/** + * __qdf_in_softirq() - in soft irq context + * + * Return: true if in softirs context else false + */ +static inline bool __qdf_in_softirq(void) +{ + return in_softirq(); +} + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __I_QDF_LOCK_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lro.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lro.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2ff42ce75af1c16600c353b1ad2ade3a4780ef --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lro.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_lro.h + * This file provides OS dependent LRO API's. + */ + +#ifndef _I_QDF_LRO_H +#define _I_QDF_LRO_H + +#if defined(FEATURE_LRO) +#include +#include +#include + +#include + +/** + * qdf_lro_desc_entry - defines the LRO descriptor + * element stored in the list + * @lro_node: node of the list + * @lro_desc: the LRO descriptor contained in this list entry + */ +struct qdf_lro_desc_entry { + struct list_head lro_node; + struct net_lro_desc *lro_desc; +}; + +/** + * qdf_lro_desc_pool - pool of free LRO descriptors + * @lro_desc_array: array of LRO descriptors allocated + * @lro_free_list_head: head of the list + * @lro_pool_lock: lock to protect access to the list + */ +struct qdf_lro_desc_pool { + struct qdf_lro_desc_entry *lro_desc_array; + struct list_head lro_free_list_head; +}; + +/** + * qdf_lro_desc_table - defines each entry of the LRO hash table + * @lro_desc_list: list of LRO descriptors + */ +struct qdf_lro_desc_table { + struct list_head lro_desc_list; +}; + +/** + * qdf_lro_desc_info - structure containing the LRO descriptor + * information + * @lro_hash_table: hash table used for a quick desc. look-up + * @lro_hash_lock: lock to protect access to the hash table + * @lro_desc_pool: Free pool of LRO descriptors + */ +struct qdf_lro_desc_info { + struct qdf_lro_desc_table *lro_hash_table; + struct qdf_lro_desc_pool lro_desc_pool; +}; + +/** + * qdf_lro_info_s - LRO information + * @lro_mgr: LRO manager + * @lro_desc_info: LRO descriptor information + * @lro_mgr_arr_access_lock: Lock to access LRO manager array. + * @lro_stats: LRO statistics + */ +struct qdf_lro_s { + struct net_lro_mgr *lro_mgr; + struct qdf_lro_desc_info lro_desc_info; +}; + +typedef struct qdf_lro_s *__qdf_lro_ctx_t; + +/* LRO_DESC_TABLE_SZ must be a power of 2 */ +#define QDF_LRO_DESC_TABLE_SZ 16 +#define QDF_LRO_DESC_TABLE_SZ_MASK (QDF_LRO_DESC_TABLE_SZ - 1) +#define QDF_LRO_DESC_POOL_SZ 10 + +#define QDF_LRO_DESC_TABLE_SZ 16 +#define QDF_LRO_DESC_TABLE_SZ_MASK (QDF_LRO_DESC_TABLE_SZ - 1) +#define QDF_LRO_DESC_POOL_SZ 10 + +#define QDF_LRO_MAX_AGGR_SIZE 100 + +#else + +struct qdf_lro_s {}; + +typedef struct qdf_lro_s *__qdf_lro_ctx_t; + +#endif /* FEATURE_LRO */ +#endif /*_I_QDF_NET_BUF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mc_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mc_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..e0ccaf9ad2bdef1097b394c95eeafb1cde81e451 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mc_timer.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_mc_timer.h + * Linux-specific definitions for QDF timers serialized to MC thread + */ + +#if !defined(__I_QDF_MC_TIMER_H) +#define __I_QDF_MC_TIMER_H + +/* Include Files */ +#include +#include +#include +#include +#include +#include + +/* Preprocessor definitions and constants */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +/* Type declarations */ + +typedef struct qdf_mc_timer_platform_s { + struct timer_list timer; + int thread_id; + uint32_t cookie; + qdf_spinlock_t spinlock; +} qdf_mc_timer_platform_t; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __I_QDF_MC_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..cdb5b3e8f870723adfd4ced1cc6390f6d1ad85d8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_mem.h + * Linux-specific definitions for QDF memory API's + */ + +#ifndef __I_QDF_MEM_H +#define __I_QDF_MEM_H + +#ifdef __KERNEL__ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) +#include +#else +#include +#endif +#endif +#include +#include +#include +#include /* pci_alloc_consistent */ +#include /* L1_CACHE_BYTES */ + +#define __qdf_cache_line_sz L1_CACHE_BYTES +#include "queue.h" + +#else +/* + * Provide dummy defs for kernel data types, functions, and enums + * used in this header file. + */ +#define GFP_KERNEL 0 +#define GFP_ATOMIC 0 +#define kzalloc(size, flags) NULL +#define vmalloc(size) NULL +#define kfree(buf) +#define vfree(buf) +#define pci_alloc_consistent(dev, size, paddr) NULL +#define __qdf_mempool_t void* +#define QDF_RET_IP NULL +#endif /* __KERNEL__ */ +#include + +#if IS_ENABLED(CONFIG_ARM_SMMU) +#include +#ifdef ENABLE_SMMU_S1_TRANSLATION +#include +#endif +#include +#endif + +#ifdef __KERNEL__ +typedef struct mempool_elem { + STAILQ_ENTRY(mempool_elem) mempool_entry; +} mempool_elem_t; + +/** + * typedef __qdf_mempool_ctxt_t - Memory pool context + * @pool_id: pool identifier + * @flags: flags + * @elem_size: size of each pool element in bytes + * @pool_mem: pool_addr address of the pool created + * @mem_size: Total size of the pool in bytes + * @free_list: free pool list + * @lock: spinlock object + * @max_elem: Maximum number of elements in tha pool + * @free_cnt: Number of free elements available + */ +typedef struct __qdf_mempool_ctxt { + int pool_id; + u_int32_t flags; + size_t elem_size; + void *pool_mem; + u_int32_t mem_size; + + STAILQ_HEAD(, mempool_elem) free_list; + spinlock_t lock; + u_int32_t max_elem; + u_int32_t free_cnt; +} __qdf_mempool_ctxt_t; + +#endif /* __KERNEL__ */ + +#define __page_size ((size_t)PAGE_SIZE) +#define __qdf_align(a, mask) ALIGN(a, mask) + +#ifdef DISABLE_MEMDEBUG_PANIC +#define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \ + do { \ + /* no-op */ \ + } while (false) +#else +#define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \ + QDF_DEBUG_PANIC(reason_fmt, ## args) +#endif + +/* typedef for dma_data_direction */ +typedef enum dma_data_direction __dma_data_direction; + +/** + * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum + * @dir: QDF DMA data direction + * + * Return: + * enum dma_data_direction + */ +static inline +enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir) +{ + switch (qdf_dir) { + case QDF_DMA_BIDIRECTIONAL: + return DMA_BIDIRECTIONAL; + case QDF_DMA_TO_DEVICE: + return DMA_TO_DEVICE; + case QDF_DMA_FROM_DEVICE: + return DMA_FROM_DEVICE; + default: + return DMA_NONE; + } +} + + +/** + * __qdf_mem_map_nbytes_single - Map memory for DMA + * @osdev: pomter OS device context + * @buf: pointer to memory to be dma mapped + * @dir: DMA map direction + * @nbytes: number of bytes to be mapped. + * @phy_addr: ponter to recive physical address. + * + * Return: success/failure + */ +static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev, + void *buf, qdf_dma_dir_t dir, + int nbytes, + qdf_dma_addr_t *phy_addr) +{ + /* assume that the OS only provides a single fragment */ + *phy_addr = dma_map_single(osdev->dev, buf, nbytes, + __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, *phy_addr) ? + QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev, + qdf_dma_addr_t buf, + qdf_dma_dir_t dir, + int nbytes) +{ + dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir)); +} +#else +static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev, + qdf_dma_addr_t buf, + qdf_dma_dir_t dir, + int nbytes) +{ + dma_sync_single_for_cpu(osdev->dev, buf, nbytes, + __qdf_dma_dir_to_os(dir)); +} +#endif + +/** + * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA + * + * @osdev: pomter OS device context + * @phy_addr: physical address of memory to be dma unmapped + * @dir: DMA unmap direction + * @nbytes: number of bytes to be unmapped. + * + * Return - none + */ +static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev, + qdf_dma_addr_t phy_addr, + qdf_dma_dir_t dir, int nbytes) +{ + dma_unmap_single(osdev->dev, phy_addr, nbytes, + __qdf_dma_dir_to_os(dir)); +} +#ifdef __KERNEL__ + +typedef __qdf_mempool_ctxt_t *__qdf_mempool_t; + +int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt, + size_t pool_entry_size, u_int32_t flags); +void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool); +void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool); +void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf); +#define QDF_RET_IP ((void *)_RET_IP_) + +#define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size) +#endif + +/** + * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status + * @osdev parent device instance + * + * Return: true if smmu s1 enabled, false if smmu s1 is bypassed + */ +static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev) +{ + return osdev->smmu_s1_enabled; +} + +#if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) +/** + * __qdf_dev_get_domain() - get iommu domain from osdev + * @osdev: parent device instance + * + * Return: iommu domain + */ +static inline struct iommu_domain * +__qdf_dev_get_domain(qdf_device_t osdev) +{ + return osdev->domain; +} +#else +static inline struct iommu_domain * +__qdf_dev_get_domain(qdf_device_t osdev) +{ + if (osdev->iommu_mapping) + return osdev->iommu_mapping->domain; + + return NULL; +} +#endif + +/** + * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr + * @osdev: parent device instance + * @dma_addr: dma_addr + * + * Get actual physical address from dma_addr based on SMMU enablement status. + * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address + * (IOVA) otherwise returns physical address. So get SMMU physical address + * mapping from IOVA. + * + * Return: dmaable physical address + */ +static inline unsigned long +__qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, + qdf_dma_addr_t dma_addr) +{ + struct iommu_domain *domain; + + if (__qdf_mem_smmu_s1_enabled(osdev)) { + domain = __qdf_dev_get_domain(osdev); + if (domain) + return iommu_iova_to_phys(domain, dma_addr); + } + + return dma_addr; +} +#else +static inline unsigned long +__qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, + qdf_dma_addr_t dma_addr) +{ + return dma_addr; +} +#endif + +/** + * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table + * @dev: device instace + * @sgt: scatter gather table pointer + * @cpu_addr: HLOS virtual address + * @dma_addr: dma/iova + * @size: allocated memory size + * + * Return: physical address + */ +static inline int +__qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, + qdf_dma_addr_t dma_addr, size_t size) +{ + return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr, + size); +} + +/** + * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table + * @sgt: the mapped sg table header + * + * Return: None + */ +static inline void +__qdf_os_mem_free_sgtable(struct sg_table *sgt) +{ + sg_free_table(sgt); +} + +/** + * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements + * @sgt: scatter gather table pointer + * + * Return: None + */ +static inline void +__qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + if (!sg) + break; + + sg->dma_address = sg_phys(sg); + } +} + +/** + * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Based on smmu stage 1 translation enablement status, return corresponding dma + * address from qdf_mem_info_t. If stage 1 translation enabled, return + * IO virtual address otherwise return physical address. + * + * Return: dma address + */ +static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + if (__qdf_mem_smmu_s1_enabled(osdev)) + return (qdf_dma_addr_t)mem_info->iova; + else + return (qdf_dma_addr_t)mem_info->pa; +} + +/** + * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Based on smmu stage 1 translation enablement status, return corresponding + * dma address pointer from qdf_mem_info_t structure. If stage 1 translation + * enabled, return pointer to IO virtual address otherwise return pointer to + * physical address + * + * Return: dma address storage pointer + */ +static inline qdf_dma_addr_t * +__qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + if (__qdf_mem_smmu_s1_enabled(osdev)) + return (qdf_dma_addr_t *)(&mem_info->iova); + else + return (qdf_dma_addr_t *)(&mem_info->pa); +} + +/** + * __qdf_update_mem_map_table() - Update DMA memory map info + * @osdev: Parent device instance + * @mem_info: Pointer to shared memory information + * @dma_addr: dma address + * @mem_size: memory size allocated + * + * Store DMA shared memory information + * + * Return: none + */ +static inline void __qdf_update_mem_map_table(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_addr, + uint32_t mem_size) +{ + mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); + mem_info->iova = dma_addr; + mem_info->size = mem_size; +} + +/** + * __qdf_mem_get_dma_size() - Return DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA memory size + */ +static inline uint32_t +__qdf_mem_get_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return mem_info->size; +} + +/** + * __qdf_mem_set_dma_size() - Set DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @mem_size: memory size allocated + * + * Return: none + */ +static inline void +__qdf_mem_set_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + uint32_t mem_size) +{ + mem_info->size = mem_size; +} + +/** + * __qdf_mem_get_dma_size() - Return DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA physical address + */ +static inline qdf_dma_addr_t +__qdf_mem_get_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return mem_info->pa; +} + +/** + * __qdf_mem_set_dma_size() - Set DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @dma_pa: DMA phsical address + * + * Return: none + */ +static inline void +__qdf_mem_set_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_pa) +{ + mem_info->pa = dma_pa; +} + +/** + * __qdf_mem_alloc_consistent() - allocates consistent qdf memory + * @osdev: OS device handle + * @dev: Pointer to device handle + * @size: Size to be allocated + * @paddr: Physical address + * @func: Function name of the call site + * @line: line numbe rof the call site + * + * Return: pointer of allocated memory or null if memory alloc fails + */ +void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr, + const char *func, uint32_t line); + +/** + * __qdf_mem_malloc() - allocates QDF memory + * @size: Number of bytes of memory to allocate. + * + * @func: Function name of the call site + * @line: line numbe rof the call site + * + * This function will dynamicallly allocate the specified number of bytes of + * memory. + * + * Return: + * Upon successful allocate, returns a non-NULL pointer to the allocated + * memory. If this function is unable to allocate the amount of memory + * specified (for any reason) it returns NULL. + */ +void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line); + +/** + * __qdf_mem_free() - free QDF memory + * @ptr: Pointer to the starting address of the memory to be freed. + * + * This function will free the memory pointed to by 'ptr'. + * Return: None + */ +void __qdf_mem_free(void *ptr); + +/** + * __qdf_mem_free_consistent() - free consistent qdf memory + * @osdev: OS device handle + * @dev: Pointer to device handle + * @size: Size to be allocated + * @vaddr: virtual address + * @paddr: Physical address + * @memctx: Pointer to DMA context + * + * Return: none + */ +void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, qdf_dma_context_t memctx); + +#endif /* __I_QDF_MEM_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_module.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_module.h new file mode 100644 index 0000000000000000000000000000000000000000..1d049bd41a9ce974fa02bf76235d36e8191d1256 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_module.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_module.h + * Linux-specific definitions for QDF module API's + */ + +#ifndef _I_QDF_MODULE_H +#define _I_QDF_MODULE_H + +#include +#include +#include +#include + + +#define __qdf_virt_module_init(_x) \ + static int _x##_mod(void) \ + { \ + uint32_t st; \ + st = (_x)(); \ + if (st != QDF_STATUS_SUCCESS) \ + return QDF_STATUS_E_INVAL; \ + else \ + return 0; \ + } \ + module_init(_x##_mod); + +#define __qdf_virt_module_exit(_x) module_exit(_x) + +#define __qdf_virt_module_name(_name) MODULE_LICENSE("Dual BSD/GPL") + +#ifdef WLAN_DISABLE_EXPORT_SYMBOL +#define __qdf_export_symbol(_sym) +#else +#define __qdf_export_symbol(_sym) EXPORT_SYMBOL(_sym) +#endif + +#define __qdf_declare_param(_name, _type) \ + module_param(_name, _type, 0600) + +#define __qdf_declare_param_array(_name, _type, _num) \ + module_param_array(_name, _type, _num, 0600) + +#endif /* _I_QDF_MODULE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h new file mode 100644 index 0000000000000000000000000000000000000000..5d6dc425abb43756abc0dab2746fae215244978d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h @@ -0,0 +1,2274 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf.h + * This file provides OS dependent nbuf API's. + */ + +#ifndef _I_QDF_NBUF_H +#define _I_QDF_NBUF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Use socket buffer as the underlying implementation as skbuf . + * Linux use sk_buff to represent both packet and data, + * so we use sk_buffer to represent both skbuf . + */ +typedef struct sk_buff *__qdf_nbuf_t; + +/** + * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct + * + * This is used for skb queue management via linux skb buff head APIs + */ +typedef struct sk_buff_head __qdf_nbuf_queue_head_t; + +#define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1 + +/* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS - + * max tx fragments added by the driver + * The driver will always add one tx fragment (the tx descriptor) + */ +#define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2 +#define QDF_NBUF_CB_PACKET_TYPE_EAPOL 1 +#define QDF_NBUF_CB_PACKET_TYPE_ARP 2 +#define QDF_NBUF_CB_PACKET_TYPE_WAPI 3 +#define QDF_NBUF_CB_PACKET_TYPE_DHCP 4 +#define QDF_NBUF_CB_PACKET_TYPE_ICMP 5 +#define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6 + + +/* mark the first packet after wow wakeup */ +#define QDF_MARK_FIRST_WAKEUP_PACKET 0x80000000 + +/* + * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned + */ +typedef union { + uint64_t u64; + qdf_dma_addr_t dma_addr; +} qdf_paddr_t; + +/** + * struct qdf_nbuf_cb - network buffer control block contents (skb->cb) + * - data passed between layers of the driver. + * + * Notes: + * 1. Hard limited to 48 bytes. Please count your bytes + * 2. The size of this structure has to be easily calculatable and + * consistently so: do not use any conditional compile flags + * 3. Split into a common part followed by a tx/rx overlay + * 4. There is only one extra frag, which represents the HTC/HTT header + * 5. "ext_cb_pt" must be the first member in both TX and RX unions + * for the priv_cb_w since it must be at same offset for both + * TX and RX union + * 6. "ipa.owned" bit must be first member in both TX and RX unions + * for the priv_cb_m since it must be at same offset for both + * TX and RX union. + * + * @paddr : physical addressed retrieved by dma_map of nbuf->data + * + * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer + * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype + * @rx.dev.priv_cb_w.msdu_len: length of RX packet + * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet + * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type + * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd + * + * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer + * @rx.dev.priv_cb_m.flush_ind: flush indication + * @rx.dev.priv_cb_m.packet_buf_pool: packet buff bool + * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset + * @rx.dev.priv_cb_m.exc_frm: exception frame + * @rx.dev.priv_cb_m.reo_dest_ind: reo destination indication + * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map + * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number + * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number + * @rx.dev.priv_cb_m.lro_ctx: LRO context + * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet + * @rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet + * @rx.dev.priv_cb_m.dp.wifi2.map_index: + * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA + * + * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible + * @rx.tcp_proto: L4 protocol is TCP + * @rx.tcp_pure_ack: A TCP ACK packet with no payload + * @rx.ipv6_proto: L3 protocol is IPV6 + * @rx.ip_offset: offset to IP header + * @rx.tcp_offset: offset to TCP header + * @rx_ctx_id: Rx context id + * @num_elements_in_list: number of elements in the nbuf list + * + * @rx.tcp_udp_chksum: L4 payload checksum + * @rx.tcp_wim: TCP window size + * + * @rx.flow_id: 32bit flow id + * + * @rx.flag_chfrag_start: first MSDU in an AMSDU + * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU + * @rx.flag_chfrag_end: last MSDU in an AMSDU + * @rx.flag_retry: flag to indicate MSDU is retried + * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets + * @rx.flag_da_valid: flag to indicate DA is valid for RX packet + * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet + * @rx.flag_is_frag: flag to indicate skb has frag list + * @rx.rsrvd: reserved + * + * @rx.trace: combined structure for DP and protocol trace + * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)| + * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)] + * @rx.trace.dp_trace: flag (Datapath trace) + * @rx.trace.packet_track: RX_DATA packet + * @rx.trace.rsrvd: enable packet logging + * + * @rx.vdev_id: vdev_id for RX pkt + * @rx.is_raw_frame: RAW frame + * @rx.fcs_err: FCS error + * @rx.tid_val: tid value + * @rx.reserved: reserved + * @rx.ftype: mcast2ucast, TSO, SG, MESH + * + * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype + * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer + * + * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes + * + (1) CE classification enablement bit + * + (2) packet type (802.3 or Ethernet type II) + * + (3) packet offset (usually length of HTC/HTT descr) + * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA + * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA + * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw + * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb + * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map + * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use + * @tx.dev.priv_cb_m.reserved: reserved + * + * @tx.ftype: mcast2ucast, TSO, SG, MESH + * @tx.vdev_id: vdev (for protocol trace) + * @tx.len: length of efrag pointed by the above pointers + * + * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream) + * @tx.flags.bits.num: number of extra frags ( 0 or 1) + * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream) + * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU + * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU + * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU + * @tx.flags.bits.flag_ext_header: extended flags + * @tx.flags.bits.reserved: reserved + * @tx.trace: combined structure for DP and protocol trace + * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)| + * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)] + * @tx.trace.is_packet_priv: + * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK} + * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)| + * + (MGMT_ACTION)] - 4 bits + * @tx.trace.dp_trace: flag (Datapath trace) + * @tx.trace.is_bcast: flag (Broadcast packet) + * @tx.trace.is_mcast: flag (Multicast packet) + * @tx.trace.packet_type: flag (Packet type) + * @tx.trace.htt2_frm: flag (high-latency path only) + * @tx.trace.print: enable packet logging + * + * @tx.vaddr: virtual address of ~ + * @tx.paddr: physical/DMA address of ~ + */ +struct qdf_nbuf_cb { + /* common */ + qdf_paddr_t paddr; /* of skb->data */ + /* valid only in one direction */ + union { + /* Note: MAX: 40 bytes */ + struct { + union { + struct { + void *ext_cb_ptr; + void *fctx; + uint16_t msdu_len; + uint16_t peer_id; + uint16_t protocol_tag; + uint16_t flow_tag; + } priv_cb_w; + struct { + /* ipa_owned bit is common between rx + * control block and tx control block. + * Do not change location of this bit. + */ + uint32_t ipa_owned:1, + peer_cached_buf_frm:1, + flush_ind:1, + packet_buf_pool:1, + reserved:4, + l3_hdr_pad:8, + /* exception frame flag */ + exc_frm:1, + reo_dest_ind:5, + ipa_smmu_map:1, + reserved1:9; + uint32_t tcp_seq_num; + uint32_t tcp_ack_num; + union { + struct { + uint16_t msdu_len; + uint16_t peer_id; + } wifi3; + struct { + uint32_t map_index; + } wifi2; + } dp; + unsigned char *lro_ctx; + } priv_cb_m; + } dev; + uint32_t lro_eligible:1, + tcp_proto:1, + tcp_pure_ack:1, + ipv6_proto:1, + ip_offset:7, + tcp_offset:7, + rx_ctx_id:4, + fcs_err:1, + is_raw_frame:1, + num_elements_in_list:8; + uint32_t tcp_udp_chksum:16, + tcp_win:16; + uint32_t flow_id; + uint8_t flag_chfrag_start:1, + flag_chfrag_cont:1, + flag_chfrag_end:1, + flag_retry:1, + flag_da_mcbc:1, + flag_da_valid:1, + flag_sa_valid:1, + flag_is_frag:1; + union { + uint8_t packet_state; + uint8_t dp_trace:1, + packet_track:4, + rsrvd:3; + } trace; + uint16_t vdev_id:8, + tid_val:4, + ftype:4; + } rx; + + /* Note: MAX: 40 bytes */ + struct { + union { + struct { + void *ext_cb_ptr; + void *fctx; + } priv_cb_w; + struct { + /* ipa_owned bit is common between rx + * control block and tx control block. + * Do not change location of this bit. + */ + struct { + uint32_t owned:1, + priv:31; + } ipa; + uint32_t data_attr; + uint16_t desc_id; + uint16_t mgmt_desc_id; + struct { + uint8_t bi_map:1, + reserved:7; + } dma_option; + uint8_t reserved[3]; + } priv_cb_m; + } dev; + uint8_t ftype; + uint8_t vdev_id; + uint16_t len; + union { + struct { + uint8_t flag_efrag:1, + flag_nbuf:1, + num:1, + flag_chfrag_start:1, + flag_chfrag_cont:1, + flag_chfrag_end:1, + flag_ext_header:1, + flag_notify_comp:1; + } bits; + uint8_t u8; + } flags; + struct { + uint8_t packet_state:7, + is_packet_priv:1; + uint8_t packet_track:4, + proto_type:4; + uint8_t dp_trace:1, + is_bcast:1, + is_mcast:1, + packet_type:3, + /* used only for hl*/ + htt2_frm:1, + print:1; + } trace; + unsigned char *vaddr; + qdf_paddr_t paddr; + } tx; + } u; +}; /* struct qdf_nbuf_cb: MAX 48 bytes */ + +QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size, + (sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb)); + +/** + * access macros to qdf_nbuf_cb + * Note: These macros can be used as L-values as well as R-values. + * When used as R-values, they effectively function as "get" macros + * When used as L_values, they effectively function as "set" macros + */ + +#define QDF_NBUF_CB_PADDR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr) + +#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible) +#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto) +#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack) +#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto) +#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset) +#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset) +#define QDF_NBUF_CB_RX_CTX_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id) +#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list) + +#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum) +#define QDF_NBUF_CB_RX_TCP_WIN(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win) + +#define QDF_NBUF_CB_RX_FLOW_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id) + +#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state) +#define QDF_NBUF_CB_RX_DP_TRACE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace) + +#define QDF_NBUF_CB_RX_FTYPE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype) + +#define QDF_NBUF_CB_RX_VDEV_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id) + +#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_chfrag_start) +#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_chfrag_cont) +#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_chfrag_end) + +#define QDF_NBUF_CB_RX_DA_MCBC(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_da_mcbc) + +#define QDF_NBUF_CB_RX_DA_VALID(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_da_valid) + +#define QDF_NBUF_CB_RX_SA_VALID(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_sa_valid) + +#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_retry) + +#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.is_raw_frame) + +#define QDF_NBUF_CB_RX_TID_VAL(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.tid_val) + +#define QDF_NBUF_CB_RX_IS_FRAG(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_is_frag) + +#define QDF_NBUF_CB_RX_FCS_ERR(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.fcs_err) + +#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \ + qdf_nbuf_set_state(skb, PACKET_STATE) + +#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr) + +#define QDF_NBUF_CB_TX_FTYPE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype) + + +#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len) +#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id) + +/* Tx Flags Accessor Macros*/ +#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_efrag) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_nbuf) +#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_chfrag_start) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_chfrag_end) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_ext_header) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8) +/* End of Tx Flags Accessor Macros */ + +/* Tx trace accessor macros */ +#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.packet_state) + +#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.is_packet_priv) + +#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.packet_track) + +#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.trace.packet_track) + +#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.proto_type) + +#define QDF_NBUF_CB_TX_DP_TRACE(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace) + +#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print) + +#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm) + +#define QDF_NBUF_CB_GET_IS_BCAST(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast) + +#define QDF_NBUF_CB_GET_IS_MCAST(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast) + +#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type) + +#define QDF_NBUF_CB_SET_BCAST(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.is_bcast = true) + +#define QDF_NBUF_CB_SET_MCAST(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.is_mcast = true) +/* End of Tx trace accessor macros */ + + +#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr) + +/* assume the OS provides a single fragment */ +#define __qdf_nbuf_get_num_frags(skb) \ + (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1) + +#define __qdf_nbuf_reset_num_frags(skb) \ + (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0) + +/** + * end of nbuf->cb access macros + */ + +typedef void (*qdf_nbuf_trace_update_t)(char *); +typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t); + +#define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb) + +#define __qdf_nbuf_mapped_paddr_set(skb, paddr) \ + (QDF_NBUF_CB_PADDR(skb) = paddr) + +#define __qdf_nbuf_frag_push_head( \ + skb, frag_len, frag_vaddr, frag_paddr) \ + do { \ + QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \ + QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \ + QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \ + QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \ + } while (0) + +#define __qdf_nbuf_get_frag_vaddr(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \ + QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data)) + +#define __qdf_nbuf_get_frag_vaddr_always(skb) \ + QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) + +#define __qdf_nbuf_get_frag_paddr(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \ + QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \ + /* assume that the OS only provides a single fragment */ \ + QDF_NBUF_CB_PADDR(skb)) + +#define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) + +#define __qdf_nbuf_get_frag_len(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \ + QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len) + +#define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \ + ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \ + : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb))) + +#define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \ + do { \ + if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \ + frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS; \ + if (frag_num) \ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = \ + is_wstrm; \ + else \ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = \ + is_wstrm; \ + } while (0) + +#define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \ + do { \ + QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \ + } while (0) + +#define __qdf_nbuf_get_vdev_ctx(skb) \ + QDF_NBUF_CB_TX_VDEV_CTX((skb)) + +#define __qdf_nbuf_set_tx_ftype(skb, type) \ + do { \ + QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_tx_ftype(skb) \ + QDF_NBUF_CB_TX_FTYPE((skb)) + + +#define __qdf_nbuf_set_rx_ftype(skb, type) \ + do { \ + QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_rx_ftype(skb) \ + QDF_NBUF_CB_RX_FTYPE((skb)) + +#define __qdf_nbuf_set_rx_chfrag_start(skb, val) \ + ((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val) + +#define __qdf_nbuf_is_rx_chfrag_start(skb) \ + (QDF_NBUF_CB_RX_CHFRAG_START((skb))) + +#define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \ + do { \ + (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \ + } while (0) + +#define __qdf_nbuf_is_rx_chfrag_cont(skb) \ + (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) + +#define __qdf_nbuf_set_rx_chfrag_end(skb, val) \ + ((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val) + +#define __qdf_nbuf_is_rx_chfrag_end(skb) \ + (QDF_NBUF_CB_RX_CHFRAG_END((skb))) + +#define __qdf_nbuf_set_da_mcbc(skb, val) \ + ((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val) + +#define __qdf_nbuf_is_da_mcbc(skb) \ + (QDF_NBUF_CB_RX_DA_MCBC((skb))) + +#define __qdf_nbuf_set_da_valid(skb, val) \ + ((QDF_NBUF_CB_RX_DA_VALID((skb))) = val) + +#define __qdf_nbuf_is_da_valid(skb) \ + (QDF_NBUF_CB_RX_DA_VALID((skb))) + +#define __qdf_nbuf_set_sa_valid(skb, val) \ + ((QDF_NBUF_CB_RX_SA_VALID((skb))) = val) + +#define __qdf_nbuf_is_sa_valid(skb) \ + (QDF_NBUF_CB_RX_SA_VALID((skb))) + +#define __qdf_nbuf_set_rx_retry_flag(skb, val) \ + ((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val) + +#define __qdf_nbuf_is_rx_retry_flag(skb) \ + (QDF_NBUF_CB_RX_RETRY_FLAG((skb))) + +#define __qdf_nbuf_set_raw_frame(skb, val) \ + ((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val) + +#define __qdf_nbuf_is_raw_frame(skb) \ + (QDF_NBUF_CB_RX_RAW_FRAME((skb))) + +#define __qdf_nbuf_get_tid_val(skb) \ + (QDF_NBUF_CB_RX_TID_VAL((skb))) + +#define __qdf_nbuf_set_tid_val(skb, val) \ + ((QDF_NBUF_CB_RX_TID_VAL((skb))) = val) + +#define __qdf_nbuf_set_is_frag(skb, val) \ + ((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val) + +#define __qdf_nbuf_is_frag(skb) \ + (QDF_NBUF_CB_RX_IS_FRAG((skb))) + +#define __qdf_nbuf_set_tx_chfrag_start(skb, val) \ + ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val) + +#define __qdf_nbuf_is_tx_chfrag_start(skb) \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) + +#define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \ + do { \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \ + } while (0) + +#define __qdf_nbuf_is_tx_chfrag_cont(skb) \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) + +#define __qdf_nbuf_set_tx_chfrag_end(skb, val) \ + ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val) + +#define __qdf_nbuf_is_tx_chfrag_end(skb) \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) + +#define __qdf_nbuf_trace_set_proto_type(skb, proto_type) \ + (QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type)) + +#define __qdf_nbuf_trace_get_proto_type(skb) \ + QDF_NBUF_CB_TX_PROTO_TYPE(skb) + +#define __qdf_nbuf_data_attr_get(skb) \ + QDF_NBUF_CB_TX_DATA_ATTR(skb) +#define __qdf_nbuf_data_attr_set(skb, data_attr) \ + (QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr)) + +#define __qdf_nbuf_queue_walk_safe(queue, var, tvar) \ + skb_queue_walk_safe(queue, var, tvar) + +/** + * __qdf_nbuf_num_frags_init() - init extra frags + * @skb: sk buffer + * + * Return: none + */ +static inline +void __qdf_nbuf_num_frags_init(struct sk_buff *skb) +{ + QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0; +} + +/* + * prototypes. Implemented in qdf_nbuf.c + */ + +/** + * __qdf_nbuf_alloc() - Allocate nbuf + * @osdev: Device handle + * @size: Netbuf requested size + * @reserve: headroom to start with + * @align: Align + * @prio: Priority + * @func: Function name of the call site + * @line: line number of the call site + * + * This allocates an nbuf aligns if needed and reserves some space in the front, + * since the reserve is done after alignment the reserve value if being + * unaligned will result in an unaligned address. + * + * Return: nbuf or %NULL if no memory + */ +__qdf_nbuf_t +__qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align, + int prio, const char *func, uint32_t line); + +void __qdf_nbuf_free(struct sk_buff *skb); +QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +void __qdf_nbuf_unmap(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +void __qdf_nbuf_unmap_single(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr); +void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr); + +QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap); +void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap); +void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg); +QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir, int nbytes); +void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir, int nbytes); + +void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir); + +QDF_STATUS __qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes); +void __qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes); +void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg); +uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag); +void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg); +QDF_STATUS __qdf_nbuf_frag_map( + qdf_device_t osdev, __qdf_nbuf_t nbuf, + int offset, qdf_dma_dir_t dir, int cur_frag); +void qdf_nbuf_classify_pkt(struct sk_buff *skb); + +bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb); +bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb); +bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data); +bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf); +bool __qdf_nbuf_data_is_arp_req(uint8_t *data); +bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data); +uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data); +uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data); +uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len); +bool __qdf_nbuf_data_is_dns_query(uint8_t *data); +bool __qdf_nbuf_data_is_dns_response(uint8_t *data); +bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data); +bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data); +bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data); +uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data); +uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data); +bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data); +bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data); +uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data); +uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_eapol_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_arp_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_icmp_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data); +uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data); +uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data); + +#ifdef QDF_NBUF_GLOBAL_COUNT +int __qdf_nbuf_count_get(void); +void __qdf_nbuf_count_inc(struct sk_buff *skb); +void __qdf_nbuf_count_dec(struct sk_buff *skb); +void __qdf_nbuf_mod_init(void); +void __qdf_nbuf_mod_exit(void); + +#else + +static inline int __qdf_nbuf_count_get(void) +{ + return 0; +} + +static inline void __qdf_nbuf_count_inc(struct sk_buff *skb) +{ + return; +} + +static inline void __qdf_nbuf_count_dec(struct sk_buff *skb) +{ + return; +} + +static inline void __qdf_nbuf_mod_init(void) +{ + return; +} + +static inline void __qdf_nbuf_mod_exit(void) +{ + return; +} +#endif + +/** + * __qdf_to_status() - OS to QDF status conversion + * @error : OS error + * + * Return: QDF status + */ +static inline QDF_STATUS __qdf_to_status(signed int error) +{ + switch (error) { + case 0: + return QDF_STATUS_SUCCESS; + case ENOMEM: + case -ENOMEM: + return QDF_STATUS_E_NOMEM; + default: + return QDF_STATUS_E_NOSUPPORT; + } +} + +/** + * __qdf_nbuf_len() - return the amount of valid data in the skb + * @skb: Pointer to network buffer + * + * This API returns the amount of valid data in the skb, If there are frags + * then it returns total length. + * + * Return: network buffer length + */ +static inline size_t __qdf_nbuf_len(struct sk_buff *skb) +{ + int i, extra_frag_len = 0; + + i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb); + if (i > 0) + extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb); + + return extra_frag_len + skb->len; +} + +/** + * __qdf_nbuf_cat() - link two nbufs + * @dst: Buffer to piggyback into + * @src: Buffer to put + * + * Concat two nbufs, the new buf(src) is piggybacked into the older one. + * It is callers responsibility to free the src skb. + * + * Return: QDF_STATUS (status of the call) if failed the src skb + * is released + */ +static inline QDF_STATUS +__qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src) +{ + QDF_STATUS error = 0; + + qdf_assert(dst && src); + + /* + * Since pskb_expand_head unconditionally reallocates the skb->head + * buffer, first check whether the current buffer is already large + * enough. + */ + if (skb_tailroom(dst) < src->len) { + error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC); + if (error) + return __qdf_to_status(error); + } + + memcpy(skb_tail_pointer(dst), src->data, src->len); + skb_put(dst, src->len); + return __qdf_to_status(error); +} + +/* + * nbuf manipulation routines + */ +/** + * __qdf_nbuf_headroom() - return the amount of tail space available + * @buf: Pointer to network buffer + * + * Return: amount of tail room + */ +static inline int __qdf_nbuf_headroom(struct sk_buff *skb) +{ + return skb_headroom(skb); +} + +/** + * __qdf_nbuf_tailroom() - return the amount of tail space available + * @buf: Pointer to network buffer + * + * Return: amount of tail room + */ +static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb) +{ + return skb_tailroom(skb); +} + +/** + * __qdf_nbuf_put_tail() - Puts data in the end + * @skb: Pointer to network buffer + * @size: size to be pushed + * + * Return: data pointer of this buf where new data has to be + * put, or NULL if there is not enough room in this buf. + */ +static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size) +{ + if (skb_tailroom(skb) < size) { + if (unlikely(pskb_expand_head(skb, 0, + size - skb_tailroom(skb), GFP_ATOMIC))) { + dev_kfree_skb_any(skb); + return NULL; + } + } + return skb_put(skb, size); +} + +/** + * __qdf_nbuf_trim_tail() - trim data out from the end + * @skb: Pointer to network buffer + * @size: size to be popped + * + * Return: none + */ +static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size) +{ + return skb_trim(skb, skb->len - size); +} + + +/* + * prototypes. Implemented in qdf_nbuf.c + */ +qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb); +QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, + qdf_nbuf_rx_cksum_t *cksum); +uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb); +void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid); +uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb); +void __qdf_nbuf_ref(struct sk_buff *skb); +int __qdf_nbuf_shared(struct sk_buff *skb); + +/* + * qdf_nbuf_pool_delete() implementation - do nothing in linux + */ +#define __qdf_nbuf_pool_delete(osdev) + +/** + * __qdf_nbuf_clone() - clone the nbuf (copy is readonly) + * @skb: Pointer to network buffer + * + * if GFP_ATOMIC is overkill then we can check whether its + * called from interrupt context and then do it or else in + * normal case use GFP_KERNEL + * + * example use "in_irq() || irqs_disabled()" + * + * Return: cloned skb + */ +static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb) +{ + struct sk_buff *skb_new = NULL; + + skb_new = skb_clone(skb, GFP_ATOMIC); + if (skb_new) + __qdf_nbuf_count_inc(skb_new); + + return skb_new; +} + +/** + * __qdf_nbuf_copy() - returns a private copy of the skb + * @skb: Pointer to network buffer + * + * This API returns a private copy of the skb, the skb returned is completely + * modifiable by callers + * + * Return: skb or NULL + */ +static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb) +{ + struct sk_buff *skb_new = NULL; + + skb_new = skb_copy(skb, GFP_ATOMIC); + if (skb_new) + __qdf_nbuf_count_inc(skb_new); + + return skb_new; +} + +#define __qdf_nbuf_reserve skb_reserve + +/** + * __qdf_nbuf_set_data_pointer() - set buffer data pointer + * @skb: Pointer to network buffer + * @data: data pointer + * + * Return: none + */ +static inline void +__qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data) +{ + skb->data = data; +} + +/** + * __qdf_nbuf_set_len() - set buffer data length + * @skb: Pointer to network buffer + * @len: data length + * + * Return: none + */ +static inline void +__qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len) +{ + skb->len = len; +} + +/** + * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer + * @skb: Pointer to network buffer + * @len: skb data length + * + * Return: none + */ +static inline void +__qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len) +{ + skb_set_tail_pointer(skb, len); +} + +/** + * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue + * @skb: Pointer to network buffer + * @list: list to use + * + * This is a lockless version, driver must acquire locks if it + * needs to synchronize + * + * Return: none + */ +static inline void +__qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list) +{ + __skb_unlink(skb, list); +} + +/** + * __qdf_nbuf_reset() - reset the buffer data and pointer + * @buf: Network buf instance + * @reserve: reserve + * @align: align + * + * Return: none + */ +static inline void +__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align) +{ + int offset; + + skb_push(skb, skb_headroom(skb)); + skb_put(skb, skb_tailroom(skb)); + memset(skb->data, 0x0, skb->len); + skb_trim(skb, 0); + skb_reserve(skb, NET_SKB_PAD); + memset(skb->cb, 0x0, sizeof(skb->cb)); + + /* + * The default is for netbuf fragments to be interpreted + * as wordstreams rather than bytestreams. + */ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; + + /* + * Align & make sure that the tail & data are adjusted properly + */ + + if (align) { + offset = ((unsigned long)skb->data) % align; + if (offset) + skb_reserve(skb, align - offset); + } + + skb_reserve(skb, reserve); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +/** + * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer + * in kernel + * + * Return: true if dev_scratch is supported + * false if dev_scratch is not supported + */ +static inline bool __qdf_nbuf_is_dev_scratch_supported(void) +{ + return true; +} + +/** + * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer + * @skb: Pointer to network buffer + * + * Return: dev_scratch if dev_scratch supported + * 0 if dev_scratch not supported + */ +static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb) +{ + return skb->dev_scratch; +} + +/** + * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer + * @skb: Pointer to network buffer + * @value: value to be set in dev_scratch of network buffer + * + * Return: void + */ +static inline void +__qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value) +{ + skb->dev_scratch = value; +} +#else +static inline bool __qdf_nbuf_is_dev_scratch_supported(void) +{ + return false; +} + +static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb) +{ + return 0; +} + +static inline void +__qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value) +{ +} +#endif /* KERNEL_VERSION(4, 14, 0) */ + +/** + * __qdf_nbuf_head() - return the pointer the skb's head pointer + * @skb: Pointer to network buffer + * + * Return: Pointer to head buffer + */ +static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb) +{ + return skb->head; +} + +/** + * __qdf_nbuf_data() - return the pointer to data header in the skb + * @skb: Pointer to network buffer + * + * Return: Pointer to skb data + */ +static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb) +{ + return skb->data; +} + +static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb) +{ + return (uint8_t *)&skb->data; +} + +/** + * __qdf_nbuf_get_protocol() - return the protocol value of the skb + * @skb: Pointer to network buffer + * + * Return: skb protocol + */ +static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb) +{ + return skb->protocol; +} + +/** + * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb + * @skb: Pointer to network buffer + * + * Return: skb ip_summed + */ +static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb) +{ + return skb->ip_summed; +} + +/** + * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb + * @skb: Pointer to network buffer + * @ip_summed: ip checksum + * + * Return: none + */ +static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb, + uint8_t ip_summed) +{ + skb->ip_summed = ip_summed; +} + +/** + * __qdf_nbuf_get_priority() - return the priority value of the skb + * @skb: Pointer to network buffer + * + * Return: skb priority + */ +static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb) +{ + return skb->priority; +} + +/** + * __qdf_nbuf_set_priority() - sets the priority value of the skb + * @skb: Pointer to network buffer + * @p: priority + * + * Return: none + */ +static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p) +{ + skb->priority = p; +} + +/** + * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb + * @skb: Current skb + * @next_skb: Next skb + * + * Return: void + */ +static inline void +__qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next) +{ + skb->next = skb_next; +} + +/** + * __qdf_nbuf_next() - return the next skb pointer of the current skb + * @skb: Current skb + * + * Return: the next skb pointed to by the current skb + */ +static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb) +{ + return skb->next; +} + +/** + * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb + * @skb: Current skb + * @next_skb: Next skb + * + * This fn is used to link up extensions to the head skb. Does not handle + * linking to the head + * + * Return: none + */ +static inline void +__qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next) +{ + skb->next = skb_next; +} + +/** + * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb + * @skb: Current skb + * + * Return: the next skb pointed to by the current skb + */ +static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb) +{ + return skb->next; +} + +/** + * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head + * @skb_head: head_buf nbuf holding head segment (single) + * @ext_list: nbuf list holding linked extensions to the head + * @ext_len: Total length of all buffers in the extension list + * + * This function is used to link up a list of packet extensions (seg1, 2,* ...) + * to the nbuf holding the head segment (seg0) + * + * Return: none + */ +static inline void +__qdf_nbuf_append_ext_list(struct sk_buff *skb_head, + struct sk_buff *ext_list, size_t ext_len) +{ + skb_shinfo(skb_head)->frag_list = ext_list; + skb_head->data_len = ext_len; + skb_head->len += skb_head->data_len; +} + +/** + * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list. + * @head_buf: Network buf holding head segment (single) + * + * This ext_list is populated when we have Jumbo packet, for example in case of + * monitor mode amsdu packet reception, and are stiched using frags_list. + * + * Return: Network buf list holding linked extensions from head buf. + */ +static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf) +{ + return (skb_shinfo(head_buf)->frag_list); +} + +/** + * __qdf_nbuf_get_age() - return the checksum value of the skb + * @skb: Pointer to network buffer + * + * Return: checksum value + */ +static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb) +{ + return skb->csum; +} + +/** + * __qdf_nbuf_set_age() - sets the checksum value of the skb + * @skb: Pointer to network buffer + * @v: Value + * + * Return: none + */ +static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v) +{ + skb->csum = v; +} + +/** + * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb + * @skb: Pointer to network buffer + * @adj: Adjustment value + * + * Return: none + */ +static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj) +{ + skb->csum -= adj; +} + +/** + * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb + * @skb: Pointer to network buffer + * @offset: Offset value + * @len: Length + * @to: Destination pointer + * + * Return: length of the copy bits for skb + */ +static inline int32_t +__qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to) +{ + return skb_copy_bits(skb, offset, to, len); +} + +/** + * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail + * @skb: Pointer to network buffer + * @len: Packet length + * + * Return: none + */ +static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len) +{ + if (skb->len > len) { + skb_trim(skb, len); + } else { + if (skb_tailroom(skb) < len - skb->len) { + if (unlikely(pskb_expand_head(skb, 0, + len - skb->len - skb_tailroom(skb), + GFP_ATOMIC))) { + dev_kfree_skb_any(skb); + qdf_assert(0); + } + } + skb_put(skb, (len - skb->len)); + } +} + +/** + * __qdf_nbuf_set_protocol() - sets the protocol value of the skb + * @skb: Pointer to network buffer + * @protocol: Protocol type + * + * Return: none + */ +static inline void +__qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol) +{ + skb->protocol = protocol; +} + +#define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \ + (QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi)) + +#define __qdf_nbuf_get_tx_htt2_frm(skb) \ + QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) + +void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, + uint32_t *lo, uint32_t *hi); + +uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb, + struct qdf_tso_info_t *tso_info); + +void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, + struct qdf_tso_seg_elem_t *tso_seg, + bool is_last_seg); + +#ifdef FEATURE_TSO +/** + * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp + * payload len + * @skb: buffer + * + * Return: size + */ +size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb); +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb); + +#else +static inline +size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb) +{ + return 0; +} + +static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + return 0; +} + +#endif /* FEATURE_TSO */ + +static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb) +{ + if (skb_is_gso(skb) && + (skb_is_gso_v6(skb) || + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))) + return true; + else + return false; +} + +struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb); + +int __qdf_nbuf_get_users(struct sk_buff *skb); + +/** + * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype, + * and get hw_classify by peeking + * into packet + * @nbuf: Network buffer (skb on Linux) + * @pkt_type: Pkt type (from enum htt_pkt_type) + * @pkt_subtype: Bit 4 of this field in HTT descriptor + * needs to be set in case of CE classification support + * Is set by this macro. + * @hw_classify: This is a flag which is set to indicate + * CE classification is enabled. + * Do not set this bit for VLAN packets + * OR for mcast / bcast frames. + * + * This macro parses the payload to figure out relevant Tx meta-data e.g. + * whether to enable tx_classify bit in CE. + * + * Overrides pkt_type only if required for 802.3 frames (original ethernet) + * If protocol is less than ETH_P_802_3_MIN (0x600), then + * it is the length and a 802.3 frame else it is Ethernet Type II + * (RFC 894). + * Bit 4 in pkt_subtype is the tx_classify bit + * + * Return: void + */ +#define __qdf_nbuf_tx_info_get(skb, pkt_type, \ + pkt_subtype, hw_classify) \ +do { \ + struct ethhdr *eh = (struct ethhdr *)skb->data; \ + uint16_t ether_type = ntohs(eh->h_proto); \ + bool is_mc_bc; \ + \ + is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \ + is_multicast_ether_addr((uint8_t *)eh); \ + \ + if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \ + hw_classify = 1; \ + pkt_subtype = 0x01 << \ + HTT_TX_CLASSIFY_BIT_S; \ + } \ + \ + if (unlikely(ether_type < ETH_P_802_3_MIN)) \ + pkt_type = htt_pkt_type_ethernet; \ + \ +} while (0) + +/** + * nbuf private buffer routines + */ + +/** + * __qdf_nbuf_peek_header() - return the header's addr & m_len + * @skb: Pointer to network buffer + * @addr: Pointer to store header's addr + * @m_len: network buffer length + * + * Return: none + */ +static inline void +__qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len) +{ + *addr = skb->data; + *len = skb->len; +} + +/** + * typedef struct __qdf_nbuf_queue_t - network buffer queue + * @head: Head pointer + * @tail: Tail pointer + * @qlen: Queue length + */ +typedef struct __qdf_nbuf_qhead { + struct sk_buff *head; + struct sk_buff *tail; + unsigned int qlen; +} __qdf_nbuf_queue_t; + +/******************Functions *************/ + +/** + * __qdf_nbuf_queue_init() - initiallize the queue head + * @qhead: Queue head + * + * Return: QDF status + */ +static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead) +{ + memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead)); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_nbuf_queue_add() - add an skb in the tail of the queue + * @qhead: Queue head + * @skb: Pointer to network buffer + * + * This is a lockless version, driver must acquire locks if it + * needs to synchronize + * + * Return: none + */ +static inline void +__qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb) +{ + skb->next = NULL; /*Nullify the next ptr */ + + if (!qhead->head) + qhead->head = skb; + else + qhead->tail->next = skb; + + qhead->tail = skb; + qhead->qlen++; +} + +/** + * __qdf_nbuf_queue_append() - Append src list at the end of dest list + * @dest: target netbuf queue + * @src: source netbuf queue + * + * Return: target netbuf queue + */ +static inline __qdf_nbuf_queue_t * +__qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src) +{ + if (!dest) + return NULL; + else if (!src || !(src->head)) + return dest; + + if (!(dest->head)) + dest->head = src->head; + else + dest->tail->next = src->head; + + dest->tail = src->tail; + dest->qlen += src->qlen; + return dest; +} + +/** + * __qdf_nbuf_queue_insert_head() - add an skb at the head of the queue + * @qhead: Queue head + * @skb: Pointer to network buffer + * + * This is a lockless version, driver must acquire locks if it needs to + * synchronize + * + * Return: none + */ +static inline void +__qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb) +{ + if (!qhead->head) { + /*Empty queue Tail pointer Must be updated */ + qhead->tail = skb; + } + skb->next = qhead->head; + qhead->head = skb; + qhead->qlen++; +} + +/** + * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue + * @qhead: Queue head + * + * This is a lockless version. Driver should take care of the locks + * + * Return: skb or NULL + */ +static inline +struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead) +{ + __qdf_nbuf_t tmp = NULL; + + if (qhead->head) { + qhead->qlen--; + tmp = qhead->head; + if (qhead->head == qhead->tail) { + qhead->head = NULL; + qhead->tail = NULL; + } else { + qhead->head = tmp->next; + } + tmp->next = NULL; + } + return tmp; +} + +/** + * __qdf_nbuf_queue_free() - free a queue + * @qhead: head of queue + * + * Return: QDF status + */ +static inline QDF_STATUS +__qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead) +{ + __qdf_nbuf_t buf = NULL; + + while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL) + __qdf_nbuf_free(buf); + return QDF_STATUS_SUCCESS; +} + + +/** + * __qdf_nbuf_queue_first() - returns the first skb in the queue + * @qhead: head of queue + * + * Return: NULL if the queue is empty + */ +static inline struct sk_buff * +__qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead) +{ + return qhead->head; +} + +/** + * __qdf_nbuf_queue_last() - returns the last skb in the queue + * @qhead: head of queue + * + * Return: NULL if the queue is empty + */ +static inline struct sk_buff * +__qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead) +{ + return qhead->tail; +} + +/** + * __qdf_nbuf_queue_len() - return the queue length + * @qhead: Queue head + * + * Return: Queue length + */ +static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead) +{ + return qhead->qlen; +} + +/** + * __qdf_nbuf_queue_next() - return the next skb from packet chain + * @skb: Pointer to network buffer + * + * This API returns the next skb from packet chain, remember the skb is + * still in the queue + * + * Return: NULL if no packets are there + */ +static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb) +{ + return skb->next; +} + +/** + * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not + * @qhead: Queue head + * + * Return: true if length is 0 else false + */ +static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead) +{ + return qhead->qlen == 0; +} + +/* + * Use sk_buff_head as the implementation of qdf_nbuf_queue_t. + * Because the queue head will most likely put in some structure, + * we don't use pointer type as the definition. + */ + +/* + * Use sk_buff_head as the implementation of qdf_nbuf_queue_t. + * Because the queue head will most likely put in some structure, + * we don't use pointer type as the definition. + */ + +static inline void +__qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag) +{ +} + +/** + * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact + * expands the headroom + * in the data region. In case of failure the skb is released. + * @skb: sk buff + * @headroom: size of headroom + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom) +{ + if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb_any(skb); + skb = NULL; + } + return skb; +} + +/** + * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact + * exapnds the tailroom + * in data region. In case of failure it releases the skb. + * @skb: sk buff + * @tailroom: size of tailroom + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom) +{ + if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC))) + return skb; + /** + * unlikely path + */ + dev_kfree_skb_any(skb); + return NULL; +} + +/** + * __qdf_nbuf_linearize() - skb linearize + * @skb: sk buff + * + * create a version of the specified nbuf whose contents + * can be safely modified without affecting other + * users.If the nbuf is non-linear then this function + * linearize. if unable to linearize returns -ENOMEM on + * success 0 is returned + * + * Return: 0 on Success, -ENOMEM on failure is returned. + */ +static inline int +__qdf_nbuf_linearize(struct sk_buff *skb) +{ + return skb_linearize(skb); +} + +/** + * __qdf_nbuf_unshare() - skb unshare + * @skb: sk buff + * + * create a version of the specified nbuf whose contents + * can be safely modified without affecting other + * users.If the nbuf is a clone then this function + * creates a new copy of the data. If the buffer is not + * a clone the original buffer is returned. + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_unshare(struct sk_buff *skb) +{ + return skb_unshare(skb, GFP_ATOMIC); +} + +/** + * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not + *@buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb) +{ + return skb_cloned(skb); +} + +/** + * __qdf_nbuf_pool_init() - init pool + * @net: net handle + * + * Return: QDF status + */ +static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net) +{ + return QDF_STATUS_SUCCESS; +} + +/* + * adf_nbuf_pool_delete() implementation - do nothing in linux + */ +#define __qdf_nbuf_pool_delete(osdev) + +/** + * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure + * release the skb. + * @skb: sk buff + * @headroom: size of headroom + * @tailroom: size of tailroom + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom) +{ + if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC))) + return skb; + + dev_kfree_skb_any(skb); + return NULL; +} + +/** + * __qdf_nbuf_copy_expand() - copy and expand nbuf + * @buf: Network buf instance + * @headroom: Additional headroom to be added + * @tailroom: Additional tailroom to be added + * + * Return: New nbuf that is a copy of buf, with additional head and tailroom + * or NULL if there is no memory + */ +static inline struct sk_buff * +__qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom) +{ + return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC); +} + +/** + * __qdf_nbuf_tx_cksum_info() - tx checksum info + * + * Return: true/false + */ +static inline bool +__qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off, + uint8_t **where) +{ + qdf_assert(0); + return false; +} + +/** + * __qdf_nbuf_reset_ctxt() - mem zero control block + * @nbuf: buffer + * + * Return: none + */ +static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf) +{ + qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); +} + +/** + * __qdf_nbuf_network_header() - get network header + * @buf: buffer + * + * Return: network header pointer + */ +static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf) +{ + return skb_network_header(buf); +} + +/** + * __qdf_nbuf_transport_header() - get transport header + * @buf: buffer + * + * Return: transport header pointer + */ +static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf) +{ + return skb_transport_header(buf); +} + +/** + * __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS), + * passed as part of network buffer by network stack + * @skb: sk buff + * + * Return: TCP MSS size + * + */ +static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} + +/** + * __qdf_nbuf_init() - Re-initializes the skb for re-use + * @nbuf: sk buff + * + * Return: none + */ +void __qdf_nbuf_init(__qdf_nbuf_t nbuf); + +/* + * __qdf_nbuf_get_cb() - returns a pointer to skb->cb + * @nbuf: sk buff + * + * Return: void ptr + */ +static inline void * +__qdf_nbuf_get_cb(__qdf_nbuf_t nbuf) +{ + return (void *)nbuf->cb; +} + +/** + * __qdf_nbuf_headlen() - return the length of linear buffer of the skb + * @skb: sk buff + * + * Return: head size + */ +static inline size_t +__qdf_nbuf_headlen(struct sk_buff *skb) +{ + return skb_headlen(skb); +} + +/** + * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb, + * @skb: sk buff + * + * Return: number of fragments + */ +static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb) +{ + return skb_shinfo(skb)->nr_frags; +} + +/** + * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not. + * @buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0; +} + +/** + * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not. + * @buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0; +} + +/** + * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb + * @skb: sk buff + * + * Return: size of l2+l3+l4 header length + */ +static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb) +{ + return skb_transport_offset(skb) + tcp_hdrlen(skb); +} + +/** + * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not + * @buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb) +{ + if (skb_is_nonlinear(skb)) + return true; + else + return false; +} + +/** + * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the skb + * @buf: sk buff + * + * Return: TCP sequence number + */ +static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb) +{ + return ntohl(tcp_hdr(skb)->seq); +} + +/** + * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space + *@buf: sk buff + * + * Return: data pointer to typecast into your priv structure + */ +static inline uint8_t * +__qdf_nbuf_get_priv_ptr(struct sk_buff *skb) +{ + return &skb->cb[8]; +} + +/** + * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame. + * @buf: Pointer to nbuf + * + * Return: None + */ +static inline void +__qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf) +{ + buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET; +} + +/** + * __qdf_nbuf_record_rx_queue() - set rx queue in skb + * + * @buf: sk buff + * @queue_id: Queue id + * + * Return: void + */ +static inline void +__qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id) +{ + skb_record_rx_queue(skb, queue_id); +} + +/** + * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel + * + * @buf: sk buff + * + * Return: Queue mapping + */ +static inline uint16_t +__qdf_nbuf_get_queue_mapping(struct sk_buff *skb) +{ + return skb->queue_mapping; +} + +/** + * __qdf_nbuf_set_timestamp() - set the timestamp for frame + * + * @buf: sk buff + * + * Return: void + */ +static inline void +__qdf_nbuf_set_timestamp(struct sk_buff *skb) +{ + __net_timestamp(skb); +} + +/** + * __qdf_nbuf_get_timestamp() - get the timestamp for frame + * + * @buf: sk buff + * + * Return: timestamp stored in skb in ms + */ +static inline uint64_t +__qdf_nbuf_get_timestamp(struct sk_buff *skb) +{ + return ktime_to_ms(skb_get_ktime(skb)); +} + +/** + * __qdf_nbuf_get_timedelta_ms() - get time difference in ms + * + * @buf: sk buff + * + * Return: time difference in ms + */ +static inline uint64_t +__qdf_nbuf_get_timedelta_ms(struct sk_buff *skb) +{ + return ktime_to_ms(net_timedelta(skb->tstamp)); +} + +/** + * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds + * + * @buf: sk buff + * + * Return: time difference in micro seconds + */ +static inline uint64_t +__qdf_nbuf_get_timedelta_us(struct sk_buff *skb) +{ + return ktime_to_us(net_timedelta(skb->tstamp)); +} + +/** + * __qdf_nbuf_orphan() - orphan a nbuf + * @skb: sk buff + * + * If a buffer currently has an owner then we call the + * owner's destructor function + * + * Return: void + */ +static inline void __qdf_nbuf_orphan(struct sk_buff *skb) +{ + return skb_orphan(skb); +} + +static inline struct sk_buff * +__qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head) +{ + return skb_dequeue(skb_queue_head); +} + +static inline +uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head) +{ + return skb_queue_head->qlen; +} + +static inline +void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head, + struct sk_buff *skb) +{ + return skb_queue_tail(skb_queue_head, skb); +} + +static inline +void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head) +{ + return skb_queue_head_init(skb_queue_head); +} + +static inline +void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head) +{ + return skb_queue_purge(skb_queue_head); +} + +/** + * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock + * @head: skb list for which lock is to be acquired + * + * Return: void + */ +static inline +void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head) +{ + spin_lock_bh(&skb_queue_head->lock); +} + +/** + * __qdf_nbuf_queue_head_unlock() - Release the skb list lock + * @head: skb list for which lock is to be release + * + * Return: void + */ +static inline +void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head) +{ + spin_unlock_bh(&skb_queue_head->lock); +} + +#ifdef CONFIG_NBUF_AP_PLATFORM +#include +#else +#include +#endif +#endif /*_I_QDF_NET_BUF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h new file mode 100644 index 0000000000000000000000000000000000000000..69f594883c380d98deb3695f90c760aaf9167e48 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_m.h + * + * This file provides platform specific nbuf API's. + * Included by i_qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _I_QDF_NBUF_M_H +#define _I_QDF_NBUF_M_H + +#define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_seq_num) +#define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_ack_num) +#define QDF_NBUF_CB_RX_LRO_CTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.lro_ctx) + +#define QDF_NBUF_CB_TX_IPA_OWNED(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.owned) +#define QDF_NBUF_CB_TX_IPA_PRIV(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.priv) +#define QDF_NBUF_CB_TX_DESC_ID(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.desc_id) +#define QDF_NBUF_CB_MGMT_TXRX_DESC_ID(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.mgmt_desc_id) +#define QDF_NBUF_CB_TX_DMA_BI_MAP(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \ + dma_option.bi_map) + +#define QDF_NBUF_CB_RX_PEER_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \ + wifi3.peer_id) + +#define QDF_NBUF_CB_RX_PKT_LEN(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \ + wifi3.msdu_len) + +#define QDF_NBUF_CB_RX_MAP_IDX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \ + wifi2.map_index) + +#define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ + peer_cached_buf_frm) + +#define QDF_NBUF_CB_RX_FLUSH_IND(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.flush_ind) + +#define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ + packet_buf_pool) + +#define QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ + l3_hdr_pad) + +#define QDF_NBUF_CB_RX_PACKET_EXC_FRAME(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ + exc_frm) + +#define QDF_NBUF_CB_RX_PACKET_REO_DEST_IND(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ + reo_dest_ind) + +#define QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \ + ipa_smmu_map) + +#define __qdf_nbuf_ipa_owned_get(skb) \ + QDF_NBUF_CB_TX_IPA_OWNED(skb) + +#define __qdf_nbuf_ipa_owned_set(skb) \ + (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1) + +#define __qdf_nbuf_ipa_owned_clear(skb) \ + (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 0) + +#define __qdf_nbuf_ipa_priv_get(skb) \ + QDF_NBUF_CB_TX_IPA_PRIV(skb) + +#define __qdf_nbuf_ipa_priv_set(skb, priv) \ + (QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv)) + +/** + * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb + * @skb: skb pointer whose cb is updated with vdev id information + * @vdev_id: vdev id to be updated in cb + * + * Return: void + */ +static inline void +qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id) +{ + QDF_NBUF_CB_RX_VDEV_ID(skb) = vdev_id; +} + +void __qdf_nbuf_init_replenish_timer(void); +void __qdf_nbuf_deinit_replenish_timer(void); + +/** + * __qdf_nbuf_push_head() - Push data in the front + * @skb: Pointer to network buffer + * @size: size to be pushed + * + * Return: New data pointer of this buf after data has been pushed, + * or NULL if there is not enough room in this buf. + */ +static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size) +{ + if (QDF_NBUF_CB_PADDR(skb)) + QDF_NBUF_CB_PADDR(skb) -= size; + + return skb_push(skb, size); +} + + +/** + * __qdf_nbuf_pull_head() - pull data out from the front + * @skb: Pointer to network buffer + * @size: size to be popped + * + * Return: New data pointer of this buf after data has been popped, + * or NULL if there is not sufficient data to pull. + */ +static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size) +{ + if (QDF_NBUF_CB_PADDR(skb)) + QDF_NBUF_CB_PADDR(skb) += size; + + return skb_pull(skb, size); +} + +/** + * qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer + * + * This function initializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +static inline void +qdf_nbuf_init_replenish_timer(void) +{ + __qdf_nbuf_init_replenish_timer(); +} + +/** + * qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer + * + * This function deinitializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +static inline void +qdf_nbuf_deinit_replenish_timer(void) +{ + __qdf_nbuf_deinit_replenish_timer(); +} + +#endif /*_I_QDF_NBUF_M_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h new file mode 100644 index 0000000000000000000000000000000000000000..4657186c71584fd38c009931db63df97d23fe97e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_w.h + * + * This file provides platform specific nbuf API's. + * Included by i_qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _I_QDF_NBUF_W_H +#define _I_QDF_NBUF_W_H + +/* ext_cb accesor macros and internal API's */ + +#define QDF_NBUF_CB_EXT_CB(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.ext_cb_ptr) + +#define __qdf_nbuf_set_ext_cb(skb, ref) \ + do { \ + QDF_NBUF_CB_EXT_CB((skb)) = (ref); \ + } while (0) + +#define __qdf_nbuf_get_ext_cb(skb) \ + QDF_NBUF_CB_EXT_CB((skb)) + +/* fctx accesor macros and internal API's*/ + +#define QDF_NBUF_CB_RX_FCTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.fctx) + +#define QDF_NBUF_CB_TX_FCTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.fctx) + +#define QDF_NBUF_CB_RX_PEER_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.peer_id) + +#define QDF_NBUF_CB_RX_PKT_LEN(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.msdu_len) + +#define __qdf_nbuf_set_rx_fctx_type(skb, ctx, type) \ + do { \ + QDF_NBUF_CB_RX_FCTX((skb)) = (ctx); \ + QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_rx_fctx(skb) \ + QDF_NBUF_CB_RX_FCTX((skb)) + +#define __qdf_nbuf_set_tx_fctx_type(skb, ctx, type) \ + do { \ + QDF_NBUF_CB_TX_FCTX((skb)) = (ctx); \ + QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_tx_fctx(skb) \ + QDF_NBUF_CB_TX_FCTX((skb)) + +#define QDF_NBUF_CB_RX_PROTOCOL_TAG(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.dev.priv_cb_w.protocol_tag) + +#define __qdf_nbuf_set_rx_protocol_tag(skb, val) \ + ((QDF_NBUF_CB_RX_PROTOCOL_TAG((skb))) = val) + +#define __qdf_nbuf_get_rx_protocol_tag(skb) \ + (QDF_NBUF_CB_RX_PROTOCOL_TAG((skb))) + +#define QDF_NBUF_CB_RX_FLOW_TAG(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.dev.priv_cb_w.flow_tag) + +#define __qdf_nbuf_set_rx_flow_tag(skb, val) \ + ((QDF_NBUF_CB_RX_FLOW_TAG((skb))) = val) + +#define __qdf_nbuf_get_rx_flow_tag(skb) \ + (QDF_NBUF_CB_RX_FLOW_TAG((skb))) + +/** + * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb + * @skb: skb pointer whose cb is updated with vdev id information + * @vdev_id: vdev id to be updated in cb + * + * Return: void + */ +static inline void +qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id) +{ + /* Does not apply to WIN */ +} + +/** + * __qdf_nbuf_push_head() - Push data in the front + * @skb: Pointer to network buffer + * @size: size to be pushed + * + * Return: New data pointer of this buf after data has been pushed, + * or NULL if there is not enough room in this buf. + */ +static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size) +{ + return skb_push(skb, size); +} + +/** + * __qdf_nbuf_pull_head() - pull data out from the front + * @skb: Pointer to network buffer + * @size: size to be popped + * + * Return: New data pointer of this buf after data has been popped, + * or NULL if there is not sufficient data to pull. + */ +static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size) +{ + return skb_pull(skb, size); +} + +static inline void qdf_nbuf_init_replenish_timer(void) {} +static inline void qdf_nbuf_deinit_replenish_timer(void) {} + +#endif /*_I_QDF_NBUF_W_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_if.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_if.h new file mode 100644 index 0000000000000000000000000000000000000000..d459e892daa738902ace1c2201ddfa732bd0fac0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_if.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_net_if + * QCA driver framework (QDF) network interface management APIs + */ + +#if !defined(__I_QDF_NET_IF_H) +#define __I_QDF_NET_IF_H + +/* Include Files */ +#include +#include +#include + +struct qdf_net_if; + +/** + * __qdf_net_if_create_dummy_if() - create dummy interface + * @nif: interface handle + * + * This function will create a dummy network interface + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_net_if_create_dummy_if(struct qdf_net_if *nif) +{ + int ret; + + ret = init_dummy_netdev((struct net_device *)nif); + + return qdf_status_from_os_return(ret); +} +#endif /*__I_QDF_NET_IF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_types.h new file mode 100644 index 0000000000000000000000000000000000000000..796f8b7b1f404ad539ed4255e2e7378ce489c9a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_types.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_net_types + * This file provides OS dependent net types API's. + */ + +#ifndef _I_QDF_NET_TYPES_H +#define _I_QDF_NET_TYPES_H + +#include /* uint8_t, etc. */ +#include +#include +#include + +typedef struct in6_addr __in6_addr_t; +typedef __wsum __wsum_t; + +static inline int32_t __qdf_csum_ipv6(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + return csum_ipv6_magic((struct in6_addr *)saddr, + (struct in6_addr *)daddr, len, proto, sum); +} + +#define __QDF_TCPHDR_FIN TCPHDR_FIN +#define __QDF_TCPHDR_SYN TCPHDR_SYN +#define __QDF_TCPHDR_RST TCPHDR_RST +#define __QDF_TCPHDR_PSH TCPHDR_PSH +#define __QDF_TCPHDR_ACK TCPHDR_ACK +#define __QDF_TCPHDR_URG TCPHDR_URG +#define __QDF_TCPHDR_ECE TCPHDR_ECE +#define __QDF_TCPHDR_CWR TCPHDR_CWR +#endif /* _I_QDF_NET_TYPES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_notifier.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_notifier.h new file mode 100644 index 0000000000000000000000000000000000000000..331bfffb485e9ab8413fab9d0cecacd3439cd3ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_notifier.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: i_qdf_notifier.h + * + * Linux-specific definitions for use by QDF notifier APIs + */ + +#ifndef __I_QDF_NOTIFIER_H +#define __I_QDF_NOTIFIER_H + +#include + +typedef struct blocking_notifier_head __qdf_blocking_notif_head; +typedef struct atomic_notifier_head __qdf_atomic_notif_head; +typedef struct notifier_block __qdf_notifier_block; +#define qdf_blocking_notifier_init(p) BLOCKING_NOTIFIER_HEAD(p); +#define qdf_atomic_notifier_init(p) ATOMIC_NOTIFIER_HEAD(p); + +static inline int +__qdf_register_blocking_notifier_chain(__qdf_blocking_notif_head *head, + __qdf_notifier_block *qnb) +{ + return blocking_notifier_chain_register(head, qnb); +} + +static inline int +__qdf_unregister_blocking_notifier_chain(__qdf_blocking_notif_head *head, + __qdf_notifier_block *qnb) +{ + return blocking_notifier_chain_unregister(head, qnb); +} + +static inline int +__qdf_blocking_notfier_call(__qdf_blocking_notif_head *head, + unsigned long v, void *data) +{ + return blocking_notifier_call_chain(head, v, data); +} + +static inline int +__qdf_register_atomic_notifier_chain(__qdf_atomic_notif_head *head, + __qdf_notifier_block *qnb) +{ + return atomic_notifier_chain_register(head, qnb); +} + +static inline int +__qdf_unregister_atomic_notifier_chain(__qdf_atomic_notif_head *head, + __qdf_notifier_block *qnb) +{ + return atomic_notifier_chain_unregister(head, qnb); +} + +static inline int +__qdf_atomic_notifier_call(__qdf_atomic_notif_head *head, + unsigned long v, void *data) +{ + return atomic_notifier_call_chain(head, v, data); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_perf.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_perf.h new file mode 100644 index 0000000000000000000000000000000000000000..a57e068b686ccc33b8cde2f3bf5b41b67c0bea18 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_perf.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_perf + * This file provides OS dependent perf API's. + */ + +#ifndef _I_QDF_PERF_H +#define _I_QDF_PERF_H + +#ifdef QCA_PERF_PROFILING + +#if (QCA_MIPS74K_PERF_PROFILING || QCA_MIPS24KK_PERF_PROFILING) +#include +#endif + +/* #defines required for structures */ +#define MAX_SAMPLES_SHIFT 5 /* change this only*/ +#define MAX_SAMPLES (1 << MAX_SAMPLES_SHIFT) +#define INC_SAMPLES(x) ((x + 1) & (MAX_SAMPLES - 1)) +#define MAX_SAMPLE_SZ (sizeof(uint32_t) * MAX_SAMPLES) +#define PER_SAMPLE_SZ sizeof(uint32_t) + +/** + * typedef qdf_perf_entry_t - performance entry + * @list: pointer to next + * @child: pointer tochild + * @parent: pointer to top + * @type: perf cntr + * @name: string + * @proc: pointer to proc entry + * @start_tsc: array at start tsc + * @end_tsc: array at ent tsc + * @samples: array of samples + * @sample_idx: sample index + * @lock_irq: lock irq + */ +typedef struct qdf_os_perf_entry { + struct list_head list; + struct list_head child; + + struct qdf_perf_entry *parent; + + qdf_perf_cntr_t type; + uint8_t *name; + + struct proc_dir_entry *proc; + + uint64_t start_tsc[MAX_SAMPLES]; + uint64_t end_tsc[MAX_SAMPLES]; + + uint32_t samples[MAX_SAMPLES]; + uint32_t sample_idx; + + spinlock_t lock_irq; + +} qdf_perf_entry_t; + +/* typedefs */ +typedef void *__qdf_perf_id_t; + +#endif /* QCA_PERF_PROFILING */ +#endif /* _I_QDF_PERF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_periodic_work.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_periodic_work.h new file mode 100644 index 0000000000000000000000000000000000000000..40c9347ebf8c734a9e86c2dfaed0f3e12aa95eab --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_periodic_work.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __I_QDF_PERIODIC_WORK_H +#define __I_QDF_PERIODIC_WORK_H + +#include "linux/workqueue.h" + +#define __qdf_opaque_delayed_work delayed_work + +#endif /* __I_QDF_PERIODIC_WORK_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ptr_hash.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ptr_hash.h new file mode 100644 index 0000000000000000000000000000000000000000..86ae269e2373fd5b5d7522267ebbe1d26ea383b1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ptr_hash.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __I_QDF_PTR_HASH_H +#define __I_QDF_PTR_HASH_H + +#include "linux/hash.h" + +#define __qdf_ptr_hash_key(key, bits) hash_long(key, bits) + +#endif /* __I_QDF_PTR_HASH_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_str.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_str.h new file mode 100644 index 0000000000000000000000000000000000000000..53378e12ba0028e1053fb7f40bfc44fd25ae7527 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_str.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_str.h + * Linux-specific implementations for qdf_str + */ + +#ifndef __I_QDF_STR_H +#define __I_QDF_STR_H + +#include "linux/string.h" + +#define __qdf_is_space(c) isspace(c) +#define __qdf_str_cmp(left, right) strcmp(left, right) +#define __qdf_str_lcopy(dest, src, dest_size) strlcpy(dest, src, dest_size) +#define __qdf_str_left_trim(str) skip_spaces(str) +#define __qdf_str_len(str) strlen(str) +#define __qdf_str_trim(str) strim(str) +#define __qdf_str_nlen(str, limit) strnlen(str, limit) +#define __qdf_str_ncmp(left, right, limit) strncmp(left, right, limit) + +#endif /* __I_QDF_STR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_streamfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_streamfs.h new file mode 100644 index 0000000000000000000000000000000000000000..d4b6e2edc1389ff922f245ca183e5559e7a10c34 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_streamfs.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_streamfs.h + * Linux specific implementation for stream filesystem APIs. + */ + +#ifndef _I_QDF_STREAMFS_H +#define _I_QDF_STREAMFS_H + +#include + +typedef struct rchan *__qdf_streamfs_chan_t; +typedef struct rchan_buf *__qdf_streamfs_chan_buf_t; + +#endif /* _I_QDF_STREAMFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_talloc.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_talloc.h new file mode 100644 index 0000000000000000000000000000000000000000..3bc402dcbd43092eba1c7f418cf6da6ae2a08223 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_talloc.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_talloc.h + * + * Linux-specific definitions for use by QDF talloc APIs + */ + +#ifndef __I_QDF_TALLOC_H +#define __I_QDF_TALLOC_H + +#include "asm/page.h" +#include "linux/irqflags.h" +#include "linux/preempt.h" +#include "linux/slab.h" + +#define __can_sleep() \ + (!in_interrupt() && !irqs_disabled() && !in_atomic()) + +#define __zalloc_sleeps(size) kzalloc(size, GFP_KERNEL) +#define __zalloc_atomic(size) kzalloc(size, GFP_ATOMIC) +#define __zalloc_auto(size) \ + kzalloc(size, __can_sleep() ? GFP_KERNEL : GFP_ATOMIC) + +#define __free(ptr) kfree(ptr) + +#define __alloc_size(ptr) ksize(ptr) + +#endif /* __I_QDF_TALLOC_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_threads.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_threads.h new file mode 100644 index 0000000000000000000000000000000000000000..4f6e1a9f64e0afe3dca3bcf4171711a449b30c67 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_threads.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_threads + * Header file for linux-specific thead abstractions + */ + +#ifndef __I_QDF_THREADS_H +#define __I_QDF_THREADS_H + +typedef struct task_struct __qdf_thread_t; + +#endif /* __I_QDF_THREADS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_time.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_time.h new file mode 100644 index 0000000000000000000000000000000000000000..30d98860a755c6874a6737bd0406209c5a313b60 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_time.h @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_time + * This file provides OS dependent time API's. + */ + +#ifndef _I_QDF_TIME_H +#define _I_QDF_TIME_H + +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) +#include +#else +#include +#endif +#ifdef MSM_PLATFORM +#include +#endif + +typedef unsigned long __qdf_time_t; +typedef ktime_t __qdf_ktime_t; + +/** + * __qdf_ns_to_ktime() - Converts nanoseconds to a ktime object + * @ns: time in nanoseconds + * + * Return: nanoseconds as ktime object + */ +static inline ktime_t __qdf_ns_to_ktime(uint64_t ns) +{ + return ns_to_ktime(ns); +} + +/** + * __qdf_ktime_add() - Adds two ktime objects and returns + * a ktime object + * @time1: time as ktime object + * @time2: time as ktime object + * + * Return: sum of ktime objects as ktime object + */ +static inline ktime_t __qdf_ktime_add(ktime_t ktime1, ktime_t ktime2) +{ + return ktime_add(ktime1, ktime2); +} + +/** + * __qdf_ktime_get() - Gets the current time as ktime object + * + * Return: current time as ktime object + */ +static inline ktime_t __qdf_ktime_get(void) +{ + return ktime_get(); +} + +/** + * __qdf_ktime_add_ns() - Adds ktime object and nanoseconds value and + * returns the ktime object + * + * Return: ktime object + */ +static inline ktime_t __qdf_ktime_add_ns(ktime_t ktime, int64_t ns) +{ + return ktime_add_ns(ktime, ns); +} + +/** + * __qdf_ktime_to_ns() - convert ktime to nanoseconds + * @ktime: time as ktime object + * @ns: time in nanoseconds + * + * Return: ktime in nanoseconds + */ +static inline int64_t __qdf_ktime_to_ns(ktime_t ktime) +{ + return ktime_to_ns(ktime); +} + +/** + * __qdf_ktime_to_ms() - convert ktime to milliseconds + * @ktime: time as ktime object + * + * Return: ktime in milliseconds + */ +static inline int64_t __qdf_ktime_to_ms(ktime_t ktime) +{ + return ktime_to_ms(ktime); +} + + +/** + * __qdf_system_ticks() - get system ticks + * + * Return: system tick in jiffies + */ +static inline __qdf_time_t __qdf_system_ticks(void) +{ + return jiffies; +} + +#define __qdf_system_ticks_per_sec HZ + +/** + * __qdf_system_ticks_to_msecs() - convert system ticks into milli seconds + * @ticks: System ticks + * + * Return: system tick converted into milli seconds + */ +static inline uint32_t __qdf_system_ticks_to_msecs(unsigned long ticks) +{ + return jiffies_to_msecs(ticks); +} + +/** + * __qdf_system_msecs_to_ticks() - convert milli seconds into system ticks + * @msecs: Milli seconds + * + * Return: milli seconds converted into system ticks + */ +static inline __qdf_time_t __qdf_system_msecs_to_ticks(uint32_t msecs) +{ + return msecs_to_jiffies(msecs); +} + +/** + * __qdf_get_system_uptime() - get system uptime + * + * Return: system uptime in jiffies + */ +static inline __qdf_time_t __qdf_get_system_uptime(void) +{ + return jiffies; +} + +static inline unsigned long __qdf_get_system_timestamp(void) +{ + return (jiffies / HZ) * 1000 + (jiffies % HZ) * (1000 / HZ); +} + +#ifdef CONFIG_ARM +/** + * __qdf_udelay() - delay execution for given microseconds + * @usecs: Micro seconds to delay + * + * Return: none + */ +static inline void __qdf_udelay(uint32_t usecs) +{ + /* + * This is in support of XScale build. They have a limit on the udelay + * value, so we have to make sure we don't approach the limit + */ + uint32_t mticks; + uint32_t leftover; + int i; + /* slice into 1024 usec chunks (simplifies calculation) */ + mticks = usecs >> 10; + leftover = usecs - (mticks << 10); + for (i = 0; i < mticks; i++) + udelay(1024); + udelay(leftover); +} +#else +static inline void __qdf_udelay(uint32_t usecs) +{ + /* Normal Delay functions. Time specified in microseconds */ + udelay(usecs); +} +#endif + +/** + * __qdf_mdelay() - delay execution for given milliseconds + * @usecs: Milliseconds to delay + * + * Return: none + */ +static inline void __qdf_mdelay(uint32_t msecs) +{ + mdelay(msecs); +} + +/** + * __qdf_system_time_after() - Check if a is later than b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a < b else false + */ +static inline bool __qdf_system_time_after(__qdf_time_t a, __qdf_time_t b) +{ + return (long)(b) - (long)(a) < 0; +} + +/** + * __qdf_system_time_before() - Check if a is before b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a is before b else false + */ +static inline bool __qdf_system_time_before(__qdf_time_t a, __qdf_time_t b) +{ + return __qdf_system_time_after(b, a); +} + +/** + * __qdf_system_time_after_eq() - Check if a atleast as recent as b, if not + * later + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a >= b else false + */ +static inline bool __qdf_system_time_after_eq(__qdf_time_t a, __qdf_time_t b) +{ + return (long)(a) - (long)(b) >= 0; +} + +/** + * __qdf_get_monotonic_boottime() - get monotonic kernel boot time + * This API is similar to qdf_get_system_boottime but it includes + * time spent in suspend. + * + * Return: Time in microseconds + */ +static inline uint64_t __qdf_get_monotonic_boottime(void) +{ + return (uint64_t)ktime_to_us(ktime_get_boottime()); +} + +#if defined (MSM_PLATFORM) + +/** + * __qdf_get_log_timestamp() - get msm timer ticks + * + * Returns QTIMER(19.2 MHz) clock ticks. To convert it into seconds + * divide it by 19200. + * + * Return: QTIMER(19.2 MHz) clock ticks + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +static inline uint64_t __qdf_get_log_timestamp(void) +{ + return __arch_counter_get_cntvct(); +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static inline uint64_t __qdf_get_log_timestamp(void) +{ + return arch_counter_get_cntvct(); +} +#else +static inline uint64_t __qdf_get_log_timestamp(void) +{ + return arch_counter_get_cntpct(); +} +#endif /* LINUX_VERSION_CODE */ +#else + +/** + * __qdf_get_log_timestamp - get time stamp for logging + * + * Return: system tick for non MSM platfroms + */ +static inline uint64_t __qdf_get_log_timestamp(void) +{ + struct timespec ts; + + ktime_get_ts(&ts); + + return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000); +} +#endif + +/** + * __qdf_get_bootbased_boottime_ns() - Get the bootbased time in nanoseconds + * + * __qdf_get_bootbased_boottime_ns() function returns the number of nanoseconds + * that have elapsed since the system was booted. It also includes the time when + * system was suspended. + * + * Return: + * The time since system booted in nanoseconds + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)) +static inline uint64_t __qdf_get_bootbased_boottime_ns(void) +{ + return ktime_get_boottime_ns(); +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) +static inline uint64_t __qdf_get_bootbased_boottime_ns(void) +{ + return ktime_get_boot_ns(); +} + +#else +static inline uint64_t __qdf_get_bootbased_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..29c7782a94306ad42c202ac30b1eb6cf6324f327 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_timer.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_timer + * This file provides OS dependent timer API's. + */ + +#ifndef _I_QDF_TIMER_H +#define _I_QDF_TIMER_H + +#include +#include +#include +#include +#include "qdf_mc_timer.h" +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#include +#endif + +typedef void (*qdf_timer_func_t)(void *); + + +struct __qdf_timer_t { + struct timer_list os_timer; + qdf_timer_func_t callback; + void *context; +}; + +#ifdef QDF_TIMER_MULTIPLIER_FRAC +#define __qdf_scaled_msecs_to_jiffies(msec) \ + (QDF_TIMER_MULTIPLIER_FRAC * msecs_to_jiffies(msec)) +#else +#define __qdf_scaled_msecs_to_jiffies(msec) \ + (qdf_timer_get_multiplier() * msecs_to_jiffies(msec)) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +static inline void __os_timer_shim(struct timer_list *os_timer) +{ + struct __qdf_timer_t *timer = from_timer(timer, os_timer, os_timer); + + timer->callback(timer->context); +} + +static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer, + qdf_timer_func_t func, void *arg, + QDF_TIMER_TYPE type) +{ + struct timer_list *os_timer = &timer->os_timer; + uint32_t flags = 0; + + timer->callback = func; + timer->context = arg; + + if (type == QDF_TIMER_TYPE_SW) + flags |= TIMER_DEFERRABLE; + + if (object_is_on_stack(os_timer)) + timer_setup_on_stack(os_timer, __os_timer_shim, flags); + else + timer_setup(os_timer, __os_timer_shim, flags); + + return QDF_STATUS_SUCCESS; +} + +#else + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) +#define setup_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) +#endif + +static inline void __os_timer_shim(unsigned long addr) +{ + struct __qdf_timer_t *timer = (void *)addr; + + timer->callback(timer->context); +} + +static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer, + qdf_timer_func_t func, void *arg, + QDF_TIMER_TYPE type) +{ + struct timer_list *os_timer = &timer->os_timer; + bool is_on_stack = object_is_on_stack(os_timer); + unsigned long addr = (unsigned long)timer; + + timer->callback = func; + timer->context = arg; + + if (type == QDF_TIMER_TYPE_SW) { + if (is_on_stack) + setup_deferrable_timer_on_stack(os_timer, + __os_timer_shim, + addr); + else + setup_deferrable_timer(os_timer, __os_timer_shim, addr); + } else { + if (is_on_stack) + setup_timer_on_stack(os_timer, __os_timer_shim, addr); + else + setup_timer(os_timer, __os_timer_shim, addr); + } + + return QDF_STATUS_SUCCESS; +} +#endif /* KERNEL_VERSION(4, 15, 0)*/ + +static inline void __qdf_timer_start(struct __qdf_timer_t *timer, uint32_t msec) +{ + struct timer_list *os_timer = &timer->os_timer; + + os_timer->expires = jiffies + __qdf_scaled_msecs_to_jiffies(msec); + add_timer(os_timer); +} + +static inline void __qdf_timer_mod(struct __qdf_timer_t *timer, uint32_t msec) +{ + mod_timer(&timer->os_timer, + jiffies + __qdf_scaled_msecs_to_jiffies(msec)); +} + +static inline bool __qdf_timer_stop(struct __qdf_timer_t *timer) +{ + return !!del_timer(&timer->os_timer); +} + +static inline void __qdf_timer_free(struct __qdf_timer_t *timer) +{ + struct timer_list *os_timer = &timer->os_timer; + + del_timer_sync(os_timer); + + if (object_is_on_stack(os_timer)) + destroy_timer_on_stack(os_timer); +} + +static inline bool __qdf_timer_sync_cancel(struct __qdf_timer_t *timer) +{ + return del_timer_sync(&timer->os_timer); +} + +#endif /* _I_QDF_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_trace.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..ea0b7fda1fbf1605c67538a5f8b4f929c3892e88 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_trace.h @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_trace.h + * + * Linux-specific definitions for QDF trace + * + */ + +#if !defined(__I_QDF_TRACE_H) +#define __I_QDF_TRACE_H + +/* older kernels have a bug in kallsyms, so ensure module.h is included */ +#include +#include +#ifdef CONFIG_QCA_MINIDUMP +#include +#endif + +#if !defined(__printf) +#define __printf(a, b) +#endif + +/* QDF_TRACE is the macro invoked to add trace messages to code. See the + * documenation for qdf_trace_msg() for the parameters etc. for this function. + * + * NOTE: Code QDF_TRACE() macros into the source code. Do not code directly + * to the qdf_trace_msg() function. + * + * NOTE 2: qdf tracing is totally turned off if WLAN_DEBUG is *not* defined. + * This allows us to build 'performance' builds where we can measure performance + * without being bogged down by all the tracing in the code + */ +#if defined(QDF_TRACE_PRINT_ENABLE) +#define qdf_trace(log_level, args...) \ + do { \ + extern int qdf_dbg_mask; \ + if (qdf_dbg_mask >= log_level) { \ + printk(args); \ + printk("\n"); \ + } \ + } while (0) +#endif + +#if defined(WLAN_DEBUG) || defined(DEBUG) || defined(QDF_TRACE_PRINT_ENABLE) +#define QDF_TRACE qdf_trace_msg +#define QDF_VTRACE qdf_vtrace_msg +#define QDF_TRACE_HEX_DUMP qdf_trace_hex_dump +#else +#define QDF_TRACE(arg ...) __qdf_trace_dummy(arg) +#define QDF_VTRACE(arg ...) __qdf_vtrace_dummy(arg) +#define QDF_TRACE_HEX_DUMP(arg ...) __qdf_trace_hexdump_dummy(arg) +#endif + +#if defined(WLAN_DEBUG) || defined(DEBUG) || defined(QDF_TRACE_PRINT_ENABLE) +#define QDF_MAX_LOGS_PER_SEC 2 +/** + * __QDF_TRACE_RATE_LIMITED() - rate limited version of QDF_TRACE + * @params: parameters to pass through to QDF_TRACE + * + * This API prevents logging a message more than QDF_MAX_LOGS_PER_SEC times per + * second. This means any subsequent calls to this API from the same location + * within 1/QDF_MAX_LOGS_PER_SEC seconds will be dropped. + * + * Return: None + */ +#define __QDF_TRACE_RATE_LIMITED(params...)\ + do {\ + static ulong __last_ticks;\ + ulong __ticks = jiffies;\ + if (time_after(__ticks,\ + __last_ticks + HZ / QDF_MAX_LOGS_PER_SEC)) {\ + QDF_TRACE(params);\ + __last_ticks = __ticks;\ + } \ + } while (0) + +#define __QDF_TRACE_HEX_DUMP_RATE_LIMITED(params...)\ + do {\ + static ulong __last_ticks;\ + ulong __ticks = jiffies;\ + if (time_after(__ticks,\ + __last_ticks + HZ / QDF_MAX_LOGS_PER_SEC)) {\ + QDF_TRACE_HEX_DUMP(params);\ + __last_ticks = __ticks;\ + } \ + } while (0) +#else +#define __QDF_TRACE_RATE_LIMITED(arg ...) +#define __QDF_TRACE_HEX_DUMP_RATE_LIMITED(arg ...) +#endif + +#define __QDF_TRACE_NO_FL(log_level, module_id, format, args...) \ + QDF_TRACE(module_id, log_level, format, ## args) + +#define __QDF_TRACE_FL(log_level, module_id, format, args...) \ + QDF_TRACE(module_id, log_level, FL(format), ## args) + +#define __QDF_TRACE_RL(log_level, module_id, format, args...) \ + __QDF_TRACE_RATE_LIMITED(module_id, log_level, FL(format), ## args) + +#define __QDF_TRACE_RL_NO_FL(log_level, module_id, format, args...) \ + __QDF_TRACE_RATE_LIMITED(module_id, log_level, format, ## args) + +#define __QDF_TRACE_HEX_DUMP_RL(log_level, module_id, args...) \ + __QDF_TRACE_HEX_DUMP_RATE_LIMITED(module_id, log_level, ## args) + +static inline void __qdf_trace_noop(QDF_MODULE_ID module, + const char *format, ...) { } +static inline void __qdf_trace_dummy(QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + const char *format, ...) { } +static inline void __qdf_vtrace_dummy(QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + const char *str_format, va_list val) { } +static inline void __qdf_trace_hexdump_dummy(QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + void *data, int buf_len) { } + + +#ifdef WLAN_LOG_FATAL +#define QDF_TRACE_FATAL(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_TRACE_FATAL_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_TRACE_FATAL_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_TRACE_FATAL_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_VTRACE_FATAL(module_id, fmt, args) \ + QDF_VTRACE(module_id, QDF_TRACE_LEVEL_FATAL, fmt, args) +#define QDF_TRACE_HEX_DUMP_FATAL_RL(params...) \ + __QDF_TRACE_HEX_DUMP_RL(QDF_TRACE_LEVEL_FATAL, ## params) +#else +#define QDF_TRACE_FATAL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_FATAL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_FATAL_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_FATAL_RL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_VTRACE_FATAL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_HEX_DUMP_FATAL_RL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_ERROR +#define QDF_TRACE_ERROR(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_TRACE_ERROR_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_TRACE_ERROR_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_TRACE_ERROR_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_VTRACE_ERROR(module_id, fmt, args) \ + QDF_VTRACE(module_id, QDF_TRACE_LEVEL_ERROR, fmt, args) +#define QDF_TRACE_HEX_DUMP_ERROR_RL(params...) \ + __QDF_TRACE_HEX_DUMP_RL(QDF_TRACE_LEVEL_ERROR, ## params) +#else +#define QDF_TRACE_ERROR(params...) __qdf_trace_noop(params) +#define QDF_TRACE_ERROR_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_ERROR_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_ERROR_RL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_VTRACE_ERROR(params...) __qdf_trace_noop(params) +#define QDF_TRACE_HEX_DUMP_ERROR_RL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_WARN +#define QDF_TRACE_WARN(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_TRACE_WARN_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_TRACE_WARN_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_TRACE_WARN_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_VTRACE_WARN(module_id, fmt, args) \ + QDF_VTRACE(module_id, QDF_TRACE_LEVEL_WARN, fmt, args) +#define QDF_TRACE_HEX_DUMP_WARN_RL(params...) \ + __QDF_TRACE_HEX_DUMP_RL(QDF_TRACE_LEVEL_WARN, ## params) +#else +#define QDF_TRACE_WARN(params...) __qdf_trace_noop(params) +#define QDF_TRACE_WARN_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_WARN_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_WARN_RL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_VTRACE_WARN(params...) __qdf_trace_noop(params) +#define QDF_TRACE_HEX_DUMP_WARN_RL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_INFO +#define QDF_TRACE_INFO(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_TRACE_INFO_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_TRACE_INFO_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_TRACE_INFO_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_VTRACE_INFO(module_id, fmt, args) \ + QDF_VTRACE(module_id, QDF_TRACE_LEVEL_INFO, fmt, args) +#define QDF_TRACE_HEX_DUMP_INFO_RL(params...) \ + __QDF_TRACE_HEX_DUMP_RL(QDF_TRACE_LEVEL_INFO, ## params) +#else +#define QDF_TRACE_INFO(params...) __qdf_trace_noop(params) +#define QDF_TRACE_INFO_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_INFO_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_INFO_RL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_VTRACE_INFO(params...) __qdf_trace_noop(params) +#define QDF_TRACE_HEX_DUMP_INFO_RL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_DEBUG +#define QDF_TRACE_DEBUG(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_TRACE_DEBUG_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_TRACE_DEBUG_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_TRACE_DEBUG_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_VTRACE_DEBUG(module_id, fmt, args) \ + QDF_VTRACE(module_id, QDF_TRACE_LEVEL_DEBUG, fmt, args) +#define QDF_TRACE_HEX_DUMP_DEBUG_RL(params...) \ + __QDF_TRACE_HEX_DUMP_RL(QDF_TRACE_LEVEL_DEBUG, ## params) +#else +#define QDF_TRACE_DEBUG(params...) __qdf_trace_noop(params) +#define QDF_TRACE_DEBUG_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_DEBUG_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_DEBUG_RL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_VTRACE_DEBUG(params...) __qdf_trace_noop(params) +#define QDF_TRACE_HEX_DUMP_DEBUG_RL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_ENTER +#define QDF_TRACE_ENTER(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#else +#define QDF_TRACE_ENTER(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_EXIT +#define QDF_TRACE_EXIT(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#else +#define QDF_TRACE_EXIT(params...) __qdf_trace_noop(params) +#endif + +#define QDF_ENABLE_TRACING +#define qdf_scnprintf scnprintf + +#ifdef QDF_ENABLE_TRACING + +#ifdef WLAN_WARN_ON_ASSERT +#define QDF_ASSERT(_condition) \ + do { \ + if (!(_condition)) { \ + pr_err("QDF ASSERT in %s Line %d\n", \ + __func__, __LINE__); \ + WARN_ON(1); \ + } \ + } while (0) +#else +#define QDF_ASSERT(_condition) \ + do { \ + if (!(_condition)) { \ + /* no-op */ \ + } \ + } while (0) +#endif /* WLAN_WARN_ON_ASSERT */ +/** + * qdf_trace_msg()- logging API + * @module: Module identifier. A member of the QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message. + * @level: Trace level. A member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be issued. + * More severe conditions are more likely to be logged. + * @str_format: Format string. The message to be logged. This format string + * contains printf-like replacement parameters, which follow this + * parameter in the variable argument list. + * + * Users wishing to add tracing information to their code should use + * QDF_TRACE. QDF_TRACE() will compile into a call to qdf_trace_msg() when + * tracing is enabled. + * + * Return: nothing + * + * implemented in qdf_trace.c + */ +void __printf(3, 4) qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + const char *str_format, ...); + +/** + * qdf_vtrace_msg() - the va_list version of qdf_trace_msg + * @module: the calling module's Id + * @level: the logging level to log using + * @str_format: the log format string + * @val: the va_list containing the values to format according to str_format + * + * Return: None + */ +void qdf_vtrace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + const char *str_format, va_list val); + +#else + +/* This code will be used for compilation if tracing is to be compiled out */ +/* of the code so these functions/macros are 'do nothing' */ +static inline void qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + const char *str_format, ...) +{ +} + +#define QDF_ASSERT(_condition) + +#endif + +#ifdef QDF_TRACE_PRINT_ENABLE +static inline void qdf_vprint(const char *fmt, va_list args) +{ + QDF_VTRACE_INFO(QDF_MODULE_ID_ANY, fmt, args); +} +#else /* QDF_TRACE_PRINT_ENABLE */ +static inline void qdf_vprint(const char *fmt, va_list args) +{ + QDF_VTRACE_ERROR(QDF_MODULE_ID_QDF, fmt, args); +} +#endif + +#ifdef PANIC_ON_BUG +#ifdef CONFIG_SLUB_DEBUG +/** + * __qdf_bug() - Calls BUG() when the PANIC_ON_BUG compilation option is enabled + * + * Note: Calling BUG() can cause a compiler to assume any following code is + * unreachable. Because these BUG's may or may not be enabled by the build + * configuration, this can cause developers some pain. Consider: + * + * bool bit; + * + * if (ptr) + * bit = ptr->returns_bool(); + * else + * __qdf_bug(); + * + * // do stuff with @bit + * + * return bit; + * + * In this case, @bit is potentially uninitialized when we return! However, the + * compiler can correctly assume this case is impossible when PANIC_ON_BUG is + * enabled. Because developers typically enable this feature, the "maybe + * uninitialized" warning will not be emitted, and the bug remains uncaught + * until someone tries to make a build without PANIC_ON_BUG. + * + * A simple workaround for this, is to put the definition of __qdf_bug in + * another compilation unit, which prevents the compiler from assuming + * subsequent code is unreachable. For CONFIG_SLUB_DEBUG, do this to catch more + * bugs. Otherwise, use the typical inlined approach. + * + * Return: None + */ +void __qdf_bug(void); +#else /* CONFIG_SLUB_DEBUG */ +static inline void __qdf_bug(void) +{ + BUG(); +} +#endif /* CONFIG_SLUB_DEBUG */ + +/** + * QDF_DEBUG_PANIC() - In debug builds, panic, otherwise do nothing + * @reason_fmt: a format string containing the reason for the panic + * @args: zero or more printf compatible logging arguments + * + * Return: None + */ +#define QDF_DEBUG_PANIC(reason_fmt, args...) \ + QDF_DEBUG_PANIC_FL(__func__, __LINE__, reason_fmt, ## args) + +/** + * QDF_DEBUG_PANIC_FL() - In debug builds, panic, otherwise do nothing + * @func: origin function name to be logged + * @line: origin line number to be logged + * @fmt: printf compatible format string to be logged + * @args: zero or more printf compatible logging arguments + * + * Return: None + */ +#define QDF_DEBUG_PANIC_FL(func, line, fmt, args...) \ + do { \ + pr_err("WLAN Panic @ %s:%d: " fmt "\n", func, line, ##args); \ + __qdf_bug(); \ + } while (false) + +#define QDF_BUG(_condition) \ + do { \ + if (!(_condition)) { \ + pr_err("QDF BUG in %s Line %d: Failed assertion '" \ + #_condition "'\n", __func__, __LINE__); \ + __qdf_bug(); \ + } \ + } while (0) + +#else /* PANIC_ON_BUG */ + +#define QDF_DEBUG_PANIC(reason...) \ + do { \ + /* no-op */ \ + } while (false) + +#define QDF_DEBUG_PANIC_FL(func, line, fmt, args...) \ + do { \ + /* no-op */ \ + } while (false) + +#define QDF_BUG(_condition) \ + do { \ + if (!(_condition)) { \ + /* no-op */ \ + } \ + } while (0) + +#endif /* PANIC_ON_BUG */ + +#ifdef KSYM_SYMBOL_LEN +#define __QDF_SYMBOL_LEN KSYM_SYMBOL_LEN +#else +#define __QDF_SYMBOL_LEN 1 +#endif + +#ifdef CONFIG_QCA_MINIDUMP +static inline void +__qdf_minidump_log(void *start_addr, size_t size, const char *name) +{ + if (fill_minidump_segments((uintptr_t)start_addr, size, + QCA_WDT_LOG_DUMP_TYPE_WLAN_MOD, (char *)name) < 0) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: failed to log %pK (%s)\n", + __func__, start_addr, name); +} + +static inline void +__qdf_minidump_remove(void *addr) +{ + remove_minidump_segments((uintptr_t)addr); +} +#else +static inline void +__qdf_minidump_log(void *start_addr, size_t size, const char *name) {} +static inline void +__qdf_minidump_remove(void *addr) {} +#endif +#endif /* __I_QDF_TRACE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h new file mode 100644 index 0000000000000000000000000000000000000000..b43f6c0ccb42dc1dd91240d9343a5106763df0ed --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_types.h + * This file provides OS dependent types API's. + */ + +#if !defined(__I_QDF_TYPES_H) +#define __I_QDF_TYPES_H + +#ifndef __KERNEL__ +#define __iomem +#endif +#include +#include +#include + +#ifndef __ahdecl +#ifdef __i386__ +#define __ahdecl __attribute__((regparm(0))) +#else +#define __ahdecl +#endif +#endif + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef IPA_OFFLOAD +#include +#endif + +#define __qdf_must_check __must_check + +typedef struct sg_table __sgtable_t; + +/* + * The IDs of the various system clocks + */ +#define __QDF_CLOCK_REALTIME CLOCK_REALTIME +#define __QDF_CLOCK_MONOTONIC CLOCK_MONOTONIC + +/* + * Return values for the qdf_hrtimer_data_t callback function + */ +#define __QDF_HRTIMER_NORESTART HRTIMER_NORESTART +#define __QDF_HRTIMER_RESTART HRTIMER_RESTART + +/* + * Mode arguments of qdf_hrtimer_data_t related functions + */ +#define __QDF_HRTIMER_MODE_ABS HRTIMER_MODE_ABS +#define __QDF_HRTIMER_MODE_REL HRTIMER_MODE_REL +#define __QDF_HRTIMER_MODE_PINNED HRTIMER_MODE_PINNED + +#else + +/* + * Hack - coexist with prior defs of dma_addr_t. + * Eventually all other defs of dma_addr_t should be removed. + * At that point, the "already_defined" wrapper can be removed. + */ +#ifndef __dma_addr_t_already_defined__ +#define __dma_addr_t_already_defined__ +typedef unsigned long dma_addr_t; +#endif + +typedef unsigned long phys_addr_t; +typedef unsigned long __sgtable_t; + +#ifndef SIOCGIWAP +#define SIOCGIWAP 0 +#endif + +#ifndef IWEVCUSTOM +#define IWEVCUSTOM 0 +#endif + +#ifndef IWEVREGISTERED +#define IWEVREGISTERED 0 +#endif + +#ifndef IWEVEXPIRED +#define IWEVEXPIRED 0 +#endif + +#ifndef SIOCGIWSCAN +#define SIOCGIWSCAN 0 +#endif + +#define DMA_TO_DEVICE 0 +#define DMA_BIDIRECTIONAL 0 +#define DMA_FROM_DEVICE 0 +#define __QDF_CLOCK_REALTIME 0 +#define __QDF_CLOCK_MONOTONIC 0 +#define __QDF_HRTIMER_MODE_ABS 0 +#define __QDF_HRTIMER_MODE_REL 0 +#define __QDF_HRTIMER_MODE_PINNED 0 +#define __QDF_HRTIMER_NORESTART 0 +#define __QDF_HRTIMER_RESTART 0 +#define __iomem +#endif /* __KERNEL__ */ + +/* + * max sg that we support + */ +#define __QDF_MAX_SCATTER 1 +#define __QDF_NSEC_PER_MSEC NSEC_PER_MSEC + +#if defined(__LITTLE_ENDIAN_BITFIELD) +#define QDF_LITTLE_ENDIAN_MACHINE +#elif defined(__BIG_ENDIAN_BITFIELD) +#define QDF_BIG_ENDIAN_MACHINE +#else +#error "Please fix " +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) || !defined(__KERNEL__) +#ifndef __bool_already_defined__ +#define __bool_already_defined__ + +/** + * bool - This is an enum for boolean + * @false: zero + * @true: one + */ +typedef enum bool { + false = 0, + true = 1, +} bool; +#endif /* __bool_already_defined__ */ +#endif + +#define __qdf_packed __attribute__((packed)) + +typedef int (*__qdf_os_intr)(void *); +/** + * Private definitions of general data types + */ +typedef dma_addr_t __qdf_dma_addr_t; +typedef size_t __qdf_dma_size_t; +typedef dma_addr_t __qdf_dma_context_t; +typedef struct net_device *__qdf_netdev_t; +typedef struct cpumask __qdf_cpu_mask; +typedef __le16 __qdf_le16_t; +typedef __le32 __qdf_le32_t; +typedef __le64 __qdf_le64_t; +typedef __be16 __qdf_be16_t; +typedef __be32 __qdf_be32_t; +typedef __be64 __qdf_be64_t; + +#ifdef IPA_OFFLOAD +typedef struct ipa_wdi_buffer_info __qdf_mem_info_t; +#else +/** + * struct __qdf_shared_mem_info - shared mem info struct + * @pa : physical address + * @iova: i/o virtual address + * @size: allocated memory size + * @result: status + */ +typedef struct __qdf_shared_mem_info { + phys_addr_t pa; + unsigned long iova; + size_t size; + int result; +} __qdf_mem_info_t; +#endif /* IPA_OFFLOAD */ + +#define qdf_dma_mem_context(context) dma_addr_t context +#define qdf_get_dma_mem_context(var, field) ((qdf_dma_context_t)(var->field)) + +/** + * typedef struct __qdf_resource_t - qdf resource type + * @paddr: Physical address + * @paddr: Virtual address + * @len: Length + */ +typedef struct __qdf_resource { + unsigned long paddr; + void __iomem *vaddr; + unsigned long len; +} __qdf_resource_t; + +struct __qdf_mempool_ctxt; + +#define MAX_MEM_POOLS 64 + +/** + * enum qdf_bus_type - Supported Bus types + * @QDF_BUS_TYPE_NONE: None Bus type for error check + * @QDF_BUS_TYPE_PCI: PCI Bus + * @QDF_BUS_TYPE_AHB: AHB Bus + * @QDF_BUS_TYPE_SNOC: SNOC Bus + * @QDF_BUS_TYPE_SIM: Simulator + * @QDF_BUS_TYPE_USB: USB Bus + * @QDF_BUS_TYPE_IPCI: IPCI Bus + */ +enum qdf_bus_type { + QDF_BUS_TYPE_NONE = -1, + QDF_BUS_TYPE_PCI = 0, + QDF_BUS_TYPE_AHB, + QDF_BUS_TYPE_SNOC, + QDF_BUS_TYPE_SIM, + QDF_BUS_TYPE_SDIO, + QDF_BUS_TYPE_USB, + QDF_BUS_TYPE_IPCI +}; + +/** + * struct __qdf_device - generic qdf device type + * @drv: Pointer to driver + * @drv_hdl: Pointer to driver handle + * @drv_name: Pointer to driver name + * @irq: IRQ + * @dev: Pointer to device + * @res: QDF resource + * @func: Interrupt handler + * @mem_pool: array of pointers to mem pool context + * @bus_type: Bus type + * @bid: Bus ID + * @smmu_s1_enabled: SMMU S1 enabled or not + * @iommu_mapping: DMA iommu mapping pointer + */ +struct __qdf_device { + void *drv; + void *drv_hdl; + char *drv_name; + int irq; + struct device *dev; + __qdf_resource_t res; + __qdf_os_intr func; + struct __qdf_mempool_ctxt *mem_pool[MAX_MEM_POOLS]; + enum qdf_bus_type bus_type; + const struct hif_bus_id *bid; + bool smmu_s1_enabled; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + struct iommu_domain *domain; +#else +#ifdef ENABLE_SMMU_S1_TRANSLATION + struct dma_iommu_mapping *iommu_mapping; +#endif +#endif +}; +typedef struct __qdf_device *__qdf_device_t; + +typedef size_t __qdf_size_t; +typedef off_t __qdf_off_t; +typedef void __iomem* __qdf_iomem_t; + +typedef uint32_t ath_dma_addr_t; + +/** + * typedef __qdf_segment_t - segment of memory + * @daddr: dma address + * @len: length of segment + */ +typedef struct __qdf_segment { + dma_addr_t daddr; + uint32_t len; +} __qdf_segment_t; + +/** + * __qdf_dma_map - dma map of memory + * @mapped: mapped address + * @nsegs: number of segments + * @coherent: coherency status + * @seg: segment of memory + */ +struct __qdf_dma_map { + uint32_t mapped; + uint32_t nsegs; + uint32_t coherent; + __qdf_segment_t seg[__QDF_MAX_SCATTER]; +}; +typedef struct __qdf_dma_map *__qdf_dma_map_t; + +/** + * __qdf_net_wireless_evcode - enum for event code + * @__QDF_IEEE80211_ASSOC: association event code + * @__QDF_IEEE80211_REASSOC: reassociation event code + * @__QDF_IEEE80211_DISASSOC: disassociation event code + * @__QDF_IEEE80211_JOIN: join event code + * @__QDF_IEEE80211_LEAVE: leave event code + * @__QDF_IEEE80211_SCAN: scan event code + * @__QDF_IEEE80211_REPLAY: replay event code + * @__QDF_IEEE80211_MICHAEL:michael event code + * @__QDF_IEEE80211_REJOIN: rejoin event code + * @__QDF_CUSTOM_PUSH_BUTTON: push button event code + */ +enum __qdf_net_wireless_evcode { + __QDF_IEEE80211_ASSOC = SIOCGIWAP, + __QDF_IEEE80211_REASSOC = IWEVCUSTOM, + __QDF_IEEE80211_DISASSOC = SIOCGIWAP, + __QDF_IEEE80211_JOIN = IWEVREGISTERED, + __QDF_IEEE80211_LEAVE = IWEVEXPIRED, + __QDF_IEEE80211_SCAN = SIOCGIWSCAN, + __QDF_IEEE80211_REPLAY = IWEVCUSTOM, + __QDF_IEEE80211_MICHAEL = IWEVCUSTOM, + __QDF_IEEE80211_REJOIN = IWEVCUSTOM, + __QDF_CUSTOM_PUSH_BUTTON = IWEVCUSTOM, +}; + +#define __qdf_snprint snprintf +#define __qdf_vsnprint vsnprintf +#define __qdf_toupper toupper +#define qdf_kstrtoint __qdf_kstrtoint +#define qdf_kstrtouint __qdf_kstrtouint + +#define __qdf_kstrtoint kstrtoint +#define __qdf_kstrtouint kstrtouint + +#define __QDF_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL +#define __QDF_DMA_TO_DEVICE DMA_TO_DEVICE +#ifndef __ubicom32__ +#define __QDF_DMA_FROM_DEVICE DMA_FROM_DEVICE +#else +#define __QDF_DMA_FROM_DEVICE DMA_TO_DEVICE +#endif +#define __qdf_inline inline + +/* + * 1. GNU C/C++ Compiler + * + * How to detect gcc : __GNUC__ + * How to detect gcc version : + * major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x) + * minor version : __GNUC_MINOR__ + * + * 2. Microsoft C/C++ Compiler + * + * How to detect msc : _MSC_VER + * How to detect msc version : + * _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...) + * + */ + +/* + * MACROs to help with compiler and OS specifics. May need to get a little + * more sophisticated than this and define these to specific 'VERSIONS' of + * the compiler and OS. Until we have a need for that, lets go with this + */ +#if defined(_MSC_VER) + +#define QDF_COMPILER_MSC +/* assuming that if we build with MSC, OS is WinMobile */ +#define QDF_OS_WINMOBILE + +#elif defined(__GNUC__) + +#define QDF_COMPILER_GNUC +#define QDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */ + +#endif + +#if defined(QDF_COMPILER_MSC) + + +/* + * Does nothing on Windows. packing individual structs is not + * supported on the Windows compiler + */ +#define QDF_PACK_STRUCT_1 +#define QDF_PACK_STRUCT_2 +#define QDF_PACK_STRUCT_4 +#define QDF_PACK_STRUCT_8 +#define QDF_PACK_STRUCT_16 + +#elif defined(QDF_COMPILER_GNUC) + +#else +#error "Compiling with an unknown compiler!!" +#endif + +#endif /* __I_QDF_TYPES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h new file mode 100644 index 0000000000000000000000000000000000000000..67469c355e0917fedf5f9e6f73dccaae956f652a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_util.h + * This file provides OS dependent API's. + */ + +#ifndef _I_QDF_UTIL_H +#define _I_QDF_UTIL_H + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 8) +#include +#else +#if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__) +#include +#else +#endif +#endif + +#include +#include +#include + +#ifdef QCA_PARTNER_PLATFORM +#include "ath_carr_pltfrm.h" +#else +#include +#endif + +typedef wait_queue_head_t __qdf_wait_queue_head_t; + +/* Generic compiler-dependent macros if defined by the OS */ +#define __qdf_wait_queue_interruptible(wait_queue, condition) \ + wait_event_interruptible(wait_queue, condition) + +#define __qdf_wait_queue_timeout(wait_queue, condition, timeout) \ + wait_event_timeout(wait_queue, condition, timeout) + + +#define __qdf_init_waitqueue_head(_q) init_waitqueue_head(_q) + +#define __qdf_wake_up_interruptible(_q) wake_up_interruptible(_q) + +#define __qdf_wake_up(_q) wake_up(_q) + +#define __qdf_wake_up_completion(_q) wake_up_completion(_q) + +#define __qdf_unlikely(_expr) unlikely(_expr) +#define __qdf_likely(_expr) likely(_expr) + +/** + * __qdf_set_bit() - set bit in address + * @nr: bit number to be set + * @addr: address buffer pointer + * + * Return: none + */ +static inline void __qdf_set_bit(unsigned int nr, unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void __qdf_clear_bit(unsigned int nr, unsigned long *addr) +{ + __clear_bit(nr, addr); +} + +static inline bool __qdf_test_bit(unsigned int nr, unsigned long *addr) +{ + return test_bit(nr, addr); +} + +static inline bool __qdf_test_and_clear_bit(unsigned int nr, + unsigned long *addr) +{ + return __test_and_clear_bit(nr, addr); +} + +static inline unsigned long __qdf_find_first_bit(unsigned long *addr, + unsigned long nbits) +{ + return find_first_bit(addr, nbits); +} + +/** + * __qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast' + * @mac_addr: pointer to the qdf MacAddress to set to broadcast + * + * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast + * MacAddress contains all 0xFF bytes. + * + * Return: none + */ +static inline void __qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr) +{ + memset(mac_addr, 0xff, QDF_MAC_ADDR_SIZE); +} + +/** + * __qdf_zero_macaddr() - zero out a MacAddress + * @mac_addr: pointer to the struct qdf_mac_addr to zero. + * + * This function zeros out a QDF MacAddress type. + * + * Return: none + */ +static inline void __qdf_zero_macaddr(struct qdf_mac_addr *mac_addr) +{ + memset(mac_addr, 0, QDF_MAC_ADDR_SIZE); +} + +/** + * __qdf_is_macaddr_equal() - compare two QDF MacAddress + * @mac_addr1: Pointer to one qdf MacAddress to compare + * @mac_addr2: Pointer to the other qdf MacAddress to compare + * + * This function returns a bool that tells if a two QDF MacAddress' + * are equivalent. + * + * Return: true if the MacAddress's are equal + * not true if the MacAddress's are not equal + */ +static inline bool __qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1, + struct qdf_mac_addr *mac_addr2) +{ + return 0 == memcmp(mac_addr1, mac_addr2, QDF_MAC_ADDR_SIZE); +} + +/** + * qdf_in_interrupt - returns true if in interrupt context + */ +#define qdf_in_interrupt in_interrupt + +#define __qdf_min(_a, _b) min(_a, _b) +#define __qdf_max(_a, _b) max(_a, _b) + +/** + * Setting it to blank as feature is not intended to be supported + * on linux version less than 4.3 + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#define __QDF_DECLARE_EWMA(name, _factor, _weight) + +#define __qdf_ewma_tx_lag int +#define __qdf_ewma_rx_rssi int +#else +#define __QDF_DECLARE_EWMA(name, _factor, _weight) \ + DECLARE_EWMA(name, _factor, _weight) + +#define __qdf_ewma_tx_lag struct ewma_tx_lag +#define __qdf_ewma_rx_rssi struct ewma_rx_rssi +#endif + +#define __qdf_ffz(mask) (~(mask) == 0 ? -1 : ffz(mask)) + +#define MEMINFO_KB(x) ((x) << (PAGE_SHIFT - 10)) /* In kilobytes */ + +/** + * @brief Assert + */ +#define __qdf_assert(expr) do { \ + if (unlikely(!(expr))) { \ + pr_err("Assertion failed! %s:%s %s:%d\n", \ + # expr, __func__, __FILE__, __LINE__); \ + dump_stack(); \ + QDF_BUG(0); \ + } \ +} while (0) + +/** + * @brief Assert + */ +#define __qdf_target_assert(expr) do { \ + if (unlikely(!(expr))) { \ + qdf_err("Assertion failed! %s:%s %s:%d", \ + #expr, __FUNCTION__, __FILE__, __LINE__); \ + dump_stack(); \ + QDF_DEBUG_PANIC("Take care of the TARGET ASSERT first\n"); \ + } \ +} while (0) + +/** + * @brief Compile time Assert + */ +#define QDF_COMPILE_TIME_ASSERT(assertion_name, predicate) \ + typedef char assertion_name[(predicate) ? 1 : -1] + +#define __qdf_container_of(ptr, type, member) container_of(ptr, type, member) + +#define __qdf_ntohs ntohs +#define __qdf_ntohl ntohl + +#define __qdf_htons htons +#define __qdf_htonl htonl + +#define __qdf_cpu_to_le16 cpu_to_le16 +#define __qdf_cpu_to_le32 cpu_to_le32 +#define __qdf_cpu_to_le64 cpu_to_le64 + +#define __qdf_le16_to_cpu le16_to_cpu +#define __qdf_le32_to_cpu le32_to_cpu +#define __qdf_le64_to_cpu le64_to_cpu + +#define __qdf_cpu_to_be16 cpu_to_be16 +#define __qdf_cpu_to_be32 cpu_to_be32 +#define __qdf_cpu_to_be64 cpu_to_be64 + +#define __qdf_be16_to_cpu be16_to_cpu +#define __qdf_be32_to_cpu be32_to_cpu +#define __qdf_be64_to_cpu be64_to_cpu + +/** + * @brief memory barriers. + */ +#define __qdf_wmb() wmb() +#define __qdf_rmb() rmb() +#define __qdf_mb() mb() +#define __qdf_ioread32(offset) ioread32(offset) +#define __qdf_iowrite32(offset, value) iowrite32(value, offset) + +#define __qdf_roundup(x, y) roundup(x, y) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#define __qdf_ewma_tx_lag_init(tx_lag) +#define __qdf_ewma_tx_lag_add(tx_lag, value) +#define __qdf_ewma_tx_lag_read(tx_lag) + +#define __qdf_ewma_rx_rssi_init(rx_rssi) +#define __qdf_ewma_rx_rssi_add(rx_rssi, value) +#define __qdf_ewma_rx_rssi_read(rx_rssi) +#else +#define __qdf_ewma_tx_lag_init(tx_lag) \ + ewma_tx_lag_init(tx_lag) + +#define __qdf_ewma_tx_lag_add(tx_lag, value) \ + ewma_tx_lag_add(tx_lag, value) + +#define __qdf_ewma_tx_lag_read(tx_lag) \ + ewma_tx_lag_read(tx_lag) + +#define __qdf_ewma_rx_rssi_init(rx_rssi) \ + ewma_rx_rssi_init(rx_rssi) + +#define __qdf_ewma_rx_rssi_add(rx_rssi, value) \ + ewma_rx_rssi_add(rx_rssi, value) + +#define __qdf_ewma_rx_rssi_read(rx_rssi) \ + ewma_rx_rssi_read(rx_rssi) +#endif + +#define __qdf_prefetch(x) prefetch(x) + +#ifdef QCA_CONFIG_SMP +/** + * __qdf_get_cpu() - get cpu_index + * + * Return: cpu_index + */ +static inline +int __qdf_get_cpu(void) +{ + int cpu_index = get_cpu(); + + put_cpu(); + return cpu_index; +} +#else +static inline +int __qdf_get_cpu(void) +{ + return 0; +} +#endif + +static inline int __qdf_device_init_wakeup(__qdf_device_t qdf_dev, bool enable) +{ + return device_init_wakeup(qdf_dev->dev, enable); +} + +/** + * __qdf_get_totalramsize() - Get total ram size in Kb + * + * Return: Total ram size in Kb + */ +static inline uint64_t +__qdf_get_totalramsize(void) +{ + struct sysinfo meminfo; + + si_meminfo(&meminfo); + return MEMINFO_KB(meminfo.totalram); +} + +/** + * __qdf_get_lower_32_bits() - get lower 32 bits from an address. + * @addr: address + * + * This api returns the lower 32 bits of an address. + * + * Return: lower 32 bits. + */ +static inline +uint32_t __qdf_get_lower_32_bits(__qdf_dma_addr_t addr) +{ + return lower_32_bits(addr); +} + +/** + * __qdf_get_upper_32_bits() - get upper 32 bits from an address. + * @addr: address + * + * This api returns the upper 32 bits of an address. + * + * Return: upper 32 bits. + */ +static inline +uint32_t __qdf_get_upper_32_bits(__qdf_dma_addr_t addr) +{ + return upper_32_bits(addr); +} + +/** + * __qdf_rounddown_pow_of_two() - Round down to nearest power of two + * @n: number to be tested + * + * Test if the input number is power of two, and return the nearest power of two + * + * Return: number rounded down to the nearest power of two + */ +static inline +unsigned long __qdf_rounddown_pow_of_two(unsigned long n) +{ + if (is_power_of_2(n)) + return n; /* already a power of 2 */ + + return __rounddown_pow_of_two(n); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) + +/** + * __qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr + * @dev: device pointer + * @addr_bits: max number of bits allowed in dma address + * + * This API sets the maximum allowed number of bits in the dma address. + * + * Return: 0 - success, non zero - failure + */ +static inline +int __qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits) +{ + return dma_set_mask_and_coherent(dev, DMA_BIT_MASK(addr_bits)); +} + +#else + +/** + * __qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr + * @dev: device pointer + * @addr_bits: max number of bits allowed in dma address + * + * This API sets the maximum allowed number of bits in the dma address. + * + * Return: 0 - success, non zero - failure + */ +static inline +int __qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits) +{ + return dma_set_coherent_mask(dev, DMA_BIT_MASK(addr_bits)); +} +#endif +/** + * qdf_get_random_bytes() - returns nbytes bytes of random + * data + * + * Return: random bytes of data + */ +static inline +void __qdf_get_random_bytes(void *buf, int nbytes) +{ + return get_random_bytes(buf, nbytes); +} + +/** + * __qdf_do_div() - wrapper function for kernel macro(do_div). + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: Quotient + */ +static inline +uint64_t __qdf_do_div(uint64_t dividend, uint32_t divisor) +{ + do_div(dividend, divisor); + /*do_div macro updates dividend with Quotient of dividend/divisor */ + return dividend; +} + +/** + * __qdf_do_div_rem() - wrapper function for kernel macro(do_div) + * to get remainder. + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: remainder + */ +static inline +uint64_t __qdf_do_div_rem(uint64_t dividend, uint32_t divisor) +{ + return do_div(dividend, divisor); +} + +/** + * __qdf_hex_to_bin() - Wrapper function to kernel API to get unsigned + * integer from hexa decimal ASCII character. + * @ch: hexa decimal ASCII character + * + * Return: For hexa decimal ASCII char return actual decimal value + * else -1 for bad input. + */ +static inline +int __qdf_hex_to_bin(char ch) +{ + return hex_to_bin(ch); +} + +/** + * __qdf_hex_str_to_binary() - Wrapper function to get array of unsigned + * integers from string of hexa decimal ASCII characters. + * @dst: output array to hold converted values + * @src: input string of hexa decimal ASCII characters + * @count: size of dst string + * + * Return: For a string of hexa decimal ASCII characters return 0 + * else -1 for bad input. + */ +static inline +int __qdf_hex_str_to_binary(u8 *dst, const char *src, size_t count) +{ + return hex2bin(dst, src, count); +} + +/** + * __qdf_fls() - find last set bit in a given 32 bit input + * @x: 32 bit mask + * + * Return: zero if the input is zero, otherwise returns the bit + * position of the last set bit, where the LSB is 1 and MSB is 32. + */ +static inline +int __qdf_fls(uint32_t x) +{ + return fls(x); +} + +#endif /*_I_QDF_UTIL_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_vfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_vfs.h new file mode 100644 index 0000000000000000000000000000000000000000..e719c4d85b0b877aa965890f0634731e9af9a6c0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_vfs.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_vfs + * QCA driver framework (QDF) virtual filesystem management APIs + */ + +#if !defined(__I_QDF_VFS_H) +#define __I_QDF_VFS_H + +/* Include Files */ +#include + +struct qdf_vfs_attr; +struct qdf_vf_bin_attr; +struct qdf_dev_obj; + +/** + * __qdf_vfs_set_file_attributes() - set file attributes + * @devobj: Device object + * @attr: File attribute + * + * This function will set the attributes of a file + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_vfs_set_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr) +{ + int ret; + + ret = sysfs_create_group((struct kobject *)devobj, + (struct attribute_group *)attr); + + return qdf_status_from_os_return(ret); +} + +/** + * __qdf_vfs_clear_file_attributes() - clear file attributes + * @devobj: Device object + * @attr: File attribute + * + * This function will clear the attributes of a file + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_vfs_clear_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr) +{ + sysfs_remove_group((struct kobject *)devobj, + (struct attribute_group *)attr); + + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_vfs_create_binfile() - create binfile + * @devobj: Device object + * @attr: File attribute + * + * This function will create a binary file + * + * Return: QDF_STATUS_SUCCESS on success + */ +statuc inline QDF_STATUS +__qdf_vfs_create_binfile(struct qdf_dev_obj *devobj, + struct qdf_vf_bin_attr *attr) +{ + int ret; + + ret = sysfs_create_bin_file((struct kobject *)devobj, + (struct bin_attribute *)attr); + + return qdf_status_from_os_return(ret); +} + +/** + * __qdf_vfs_delete_binfile() - delete binfile + * @devobj: Device object + * @attr: File attribute + * + * This function will delete a binary file + * + * Return: QDF_STATUS_SUCCESS on success + */ +static inline QDF_STATUS +__qdf_vfs_delete_binfile(struct qdf_dev_obj *devobj, + struct qdf_vf_bin_attr *attr) +{ + sysfs_remove_bin_file((struct kobject *)devobj, + (struct bin_attribute *)attr); + + return QDF_STATUS_SUCCESS; +} +#endif /* __I_QDF_VFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_cpuhp.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_cpuhp.c new file mode 100644 index 0000000000000000000000000000000000000000..e192055a2e7f024cdb00ffef5a674d07a59e2ca3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_cpuhp.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_cpuhp + * This file provides OS dependent QDF CPU hotplug APIs + */ + +#include "i_qdf_cpuhp.h" +#include "qdf_trace.h" +#include "linux/cpu.h" +#include "linux/notifier.h" +#include "linux/version.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) +#include "linux/cpuhotplug.h" +#endif + +static __qdf_cpuhp_emit __qdf_cpuhp_on_up; +static __qdf_cpuhp_emit __qdf_cpuhp_on_down; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +static int qdf_cpuhp_legacy_handler(struct notifier_block *block, + unsigned long state, + void *hcpu) +{ + unsigned long cpu = (unsigned long)hcpu; + + switch (state) { + case CPU_ONLINE: + __qdf_cpuhp_on_up(cpu); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + __qdf_cpuhp_on_down(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block qdf_cpuhp_notifier_block = { + .notifier_call = qdf_cpuhp_legacy_handler, +}; + +static inline void qdf_cpuhp_register_callbacks(void) +{ + register_hotcpu_notifier(&qdf_cpuhp_notifier_block); +} + +static inline void qdf_cpuhp_unregister_callbacks(void) +{ + unregister_hotcpu_notifier(&qdf_cpuhp_notifier_block); +} +#else +static enum cpuhp_state registered_hotplug_state; + +static int qdf_cpuhp_up_handler(unsigned int cpu) +{ + __qdf_cpuhp_on_up(cpu); + + return 0; +} + +static int qdf_cpuhp_down_handler(unsigned int cpu) +{ + __qdf_cpuhp_on_down(cpu); + + return 0; +} + +static inline void qdf_cpuhp_register_callbacks(void) +{ + registered_hotplug_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "wlan/qca-qdf:online", + qdf_cpuhp_up_handler, + qdf_cpuhp_down_handler); +} + +static inline void qdf_cpuhp_unregister_callbacks(void) +{ + QDF_BUG(registered_hotplug_state); + if (registered_hotplug_state) + cpuhp_remove_state(registered_hotplug_state); +} +#endif /* KERNEL_VERSION(4, 6, 0) */ + +void __qdf_cpuhp_os_init(__qdf_cpuhp_emit on_up, __qdf_cpuhp_emit on_down) +{ + __qdf_cpuhp_on_up = on_up; + __qdf_cpuhp_on_down = on_down; + + qdf_cpuhp_register_callbacks(); +} + +void __qdf_cpuhp_os_deinit(void) +{ + qdf_cpuhp_unregister_callbacks(); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..766221a20a50d80c990f1fc763054db7faf2edcc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c @@ -0,0 +1,456 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_crypto.c + * + * This source file contains linux specific definitions for QDF crypto APIs + */ + +/* Include Files */ +#include "qdf_crypto.h" +#include +#include +#include +#include +#include +#include + +/* Function Definitions and Documentation */ +#define MAX_HMAC_ELEMENT_CNT 10 + +/* + * xor: API to calculate xor + * @a: first variable + * @b: second variable + * @len: length of variables + */ +static void xor(uint8_t *a, const uint8_t *b, size_t len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + a[i] ^= b[i]; +} + +int qdf_get_hash(uint8_t *type, + uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len, + int8_t *hash) +{ + return qdf_get_hmac_hash(type, NULL, 0, element_cnt, + addr, addr_len, hash); +} + +int qdf_get_hmac_hash(uint8_t *type, uint8_t *key, + uint32_t keylen, + uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len, + int8_t *hash) +{ + int i; + size_t src_len[MAX_HMAC_ELEMENT_CNT]; + + if (element_cnt > MAX_HMAC_ELEMENT_CNT) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Invalid element count %d"), element_cnt); + return -EINVAL; + } + + for (i = 0; i < element_cnt; i++) + src_len[i] = addr_len[i]; + + return qdf_get_keyed_hash(type, key, keylen, (const uint8_t **)addr, + src_len, element_cnt, hash); +} + +/* qdf_update_dbl from RFC 5297. Length of d is AES_BLOCK_SIZE (128 bits) */ +void qdf_update_dbl(uint8_t *d) +{ + int i; + uint8_t msb, msb_prev = 0; + + /* left shift by 1 */ + for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) { + msb = d[i] & 0x80; + d[i] = d[i] << 1; + d[i] += msb_prev ? 1 : 0; + msb_prev = msb; + } + + if (msb) + d[AES_BLOCK_SIZE - 1] ^= 0x87; +} + +/** + * set_desc_flags() - set flags variable in the shash_desc struct + * @desc: pointer to shash_desc struct + * @tfm: pointer to crypto_shash struct + * + * Set the flags variable in the shash_desc struct by getting the flag + * from the crypto_hash struct. The flag is not actually used, prompting + * its removal from kernel code in versions 5.2 and above. Thus, for + * versions 5.2 and above, do not set the flag variable of shash_desc. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) +static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm) +{ + desc->flags = crypto_shash_get_flags(tfm); +} +#else +static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm) +{ +} +#endif + +int qdf_get_keyed_hash(const char *alg, const uint8_t *key, + unsigned int key_len, const uint8_t *src[], + size_t *src_len, size_t num_elements, uint8_t *out) +{ + struct crypto_shash *tfm; + int ret; + size_t i; + + tfm = crypto_alloc_shash(alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate transformation for %s: %ld"), + alg, PTR_ERR(tfm)); + return -EINVAL; + } + + if (key && key_len) { + ret = crypto_shash_setkey(tfm, key, key_len); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Set key failed for %s, ret:%d"), + alg, -ret); + goto error; + } + } + + do { + SHASH_DESC_ON_STACK(desc, tfm); + desc->tfm = tfm; + set_desc_flags(desc, tfm); + + ret = crypto_shash_init(desc); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to init hash for %s, ret:%d"), + alg, -ret); + goto error; + } + + for (i = 0; i < num_elements; i++) { + ret = crypto_shash_update(desc, src[i], src_len[i]); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, + QDF_TRACE_LEVEL_ERROR, + FL("Failed to update hash for %s, ret:%d"), + alg, -ret); + goto error; + } + } + + ret = crypto_shash_final(desc, out); + if (ret) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to get digest for %s, ret:%d"), + alg, -ret); + } while (0); + +error: + crypto_free_shash(tfm); + return ret; +} + +/* AES String to Vector from RFC 5297, 'out' should be of length AES_BLOCK_SIZE + */ +int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[], + size_t s_len[], size_t num_s, uint8_t *out) +{ + const char *alg = "cmac(aes)"; + uint8_t d[AES_BLOCK_SIZE]; + uint8_t buf[AES_BLOCK_SIZE] = { 0 }; + size_t buf_len = AES_BLOCK_SIZE; + const uint8_t *a[1]; + unsigned int i; + uint8_t *t = NULL; + size_t t_len; + int ret; + + if (num_s == 0) { + /* V = AES-CMAC(K, ) */ + buf[0] = 0x01; + a[0] = buf; + ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, + out); + return ret; + } + + /* D = AES-CMAC(K, ) */ + a[0] = buf; + ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, d); + if (ret) + goto error; + + for (i = 0; i < num_s - 1; i++) { + /* D = qdf_update_dbl(D) xor AES-CMAC(K, Si) */ + qdf_update_dbl(d); + ret = qdf_get_keyed_hash(alg, key, key_len, &s[i], &s_len[i], 1, + buf); + if (ret) + goto error; + xor(d, buf, AES_BLOCK_SIZE); + } + + if (s_len[i] >= AES_BLOCK_SIZE) { + /* len(Sn) >= 128 */ + /* T = Sn xorend D */ + t = qdf_mem_malloc(s_len[i]); + if (!t) + return -EINVAL; + qdf_mem_copy(t, s[i], s_len[i]); + xor(t + s_len[i] - AES_BLOCK_SIZE, d, AES_BLOCK_SIZE); + t_len = s_len[i]; + } else { + /* len(Sn) < 128 */ + /* T = qdf_update_dbl(D) xor pad(Sn) */ + qdf_update_dbl(d); + qdf_mem_zero(buf, AES_BLOCK_SIZE); + qdf_mem_copy(buf, s[i], s_len[i]); + buf[s_len[i]] = 0x80; + xor(d, s[i], AES_BLOCK_SIZE); + t = d; + t_len = AES_BLOCK_SIZE; + } + + /* V = AES-CMAC(K, T) */ + a[0] = t; + ret = qdf_get_keyed_hash(alg, key, key_len, a, &t_len, 1, out); + +error: + if (t && t != d) + qdf_mem_free(t); + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) +{ + struct crypto_skcipher *tfm; + struct skcipher_request *req = NULL; + struct scatterlist sg_in, sg_out; + int ret; + + if (!IS_VALID_CTR_KEY_LEN(key_len)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Invalid key length: %u"), key_len); + return -EINVAL; + } + + tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to alloc transformation for ctr(aes):%ld"), + PTR_ERR(tfm)); + return -EAGAIN; + } + + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate request for ctr(aes)")); + crypto_free_skcipher(tfm); + return -EAGAIN; + } + + ret = crypto_skcipher_setkey(tfm, key, key_len); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Set key failed for ctr(aes), ret:%d"), -ret); + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return ret; + } + + sg_init_one(&sg_in, src, src_len); + sg_init_one(&sg_out, dest, src_len); + skcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv); + + if (enc) + ret = crypto_skcipher_encrypt(req); + else + ret = crypto_skcipher_decrypt(req); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("%s failed for ctr(aes), ret:%d"), + enc ? "Encryption" : "Decryption", -ret); + } + + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return ret; +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) +{ + struct crypto_ablkcipher *tfm; + struct ablkcipher_request *req = NULL; + struct scatterlist sg_in, sg_out; + int ret; + + if (!IS_VALID_CTR_KEY_LEN(key_len)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Invalid key length: %u"), key_len); + return -EINVAL; + } + + tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to alloc transformation for ctr(aes):%ld"), + PTR_ERR(tfm)); + return -EAGAIN; + } + + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate request for ctr(aes)")); + crypto_free_ablkcipher(tfm); + return -EAGAIN; + } + + ret = crypto_ablkcipher_setkey(tfm, key, key_len); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Set key failed for ctr(aes), ret:%d"), -ret); + ablkcipher_request_free(req); + crypto_free_ablkcipher(tfm); + return ret; + } + + sg_init_one(&sg_in, src, src_len); + sg_init_one(&sg_out, dest, src_len); + ablkcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv); + + if (enc) + ret = crypto_ablkcipher_encrypt(req); + else + ret = crypto_ablkcipher_decrypt(req); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("%s failed for ctr(aes), ret:%d"), + enc ? "Encryption" : "Decryption", -ret); + } + + ablkcipher_request_free(req); + crypto_free_ablkcipher(tfm); + + return ret; +} +#else +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) +{ + return -EINVAL; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, + uint8_t *iv, uint8_t *aad, uint8_t *data, + uint16_t data_len, uint8_t *mic) +{ + struct crypto_aead *tfm; + int ret = 0; + struct scatterlist sg[4]; + uint16_t req_size; + struct aead_request *req = NULL; + uint8_t *aad_ptr, *input; + + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + tfm = NULL; + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: crypto_alloc_aead failed (%d)", __func__, ret); + goto err_tfm; + } + + ret = crypto_aead_setkey(tfm, key, key_length); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "crypto_aead_setkey failed (%d)", ret); + goto err_tfm; + } + + ret = crypto_aead_setauthsize(tfm, IEEE80211_MMIE_GMAC_MICLEN); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "crypto_aead_setauthsize failed (%d)", ret); + goto err_tfm; + } + + /* Prepare aead request */ + req_size = sizeof(*req) + crypto_aead_reqsize(tfm) + + IEEE80211_MMIE_GMAC_MICLEN + AAD_LEN; + req = qdf_mem_malloc(req_size); + if (!req) { + ret = -ENOMEM; + goto err_tfm; + } + + input = (uint8_t *)req + sizeof(*req) + crypto_aead_reqsize(tfm); + aad_ptr = input + IEEE80211_MMIE_GMAC_MICLEN; + qdf_mem_copy(aad_ptr, aad, AAD_LEN); + + /* Scatter list operations */ + sg_init_table(sg, 4); + sg_set_buf(&sg[0], aad_ptr, AAD_LEN); + sg_set_buf(&sg[1], data, data_len); + sg_set_buf(&sg[2], input, IEEE80211_MMIE_GMAC_MICLEN); + sg_set_buf(&sg[3], mic, IEEE80211_MMIE_GMAC_MICLEN); + + aead_request_set_tfm(req, tfm); + aead_request_set_crypt(req, sg, sg, 0, iv); + aead_request_set_ad(req, + AAD_LEN + data_len + IEEE80211_MMIE_GMAC_MICLEN); + crypto_aead_encrypt(req); + +err_tfm: + if (tfm) + crypto_free_aead(tfm); + + if (req) + qdf_mem_free(req); + + return ret; +} +#else +int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, + uint8_t *iv, uint8_t *aad, uint8_t *data, + uint16_t data_len, uint8_t *mic) +{ + return -EINVAL; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..da98bb0bbe889d919925b6c26df2ed7a099436ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debugfs + * This file provides QDF debug file system APIs + */ + +#include +#include +#include +#include +#include + +/* A private structure definition to qdf sequence */ +struct qdf_debugfs_seq_priv { + bool stop; +}; + +/* entry for root debugfs directory*/ +static qdf_dentry_t qdf_debugfs_root; + +QDF_STATUS qdf_debugfs_init(void) +{ + qdf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + + if (!qdf_debugfs_root) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_debugfs_init); + +void qdf_debugfs_exit(void) +{ + if (!qdf_debugfs_root) + return; + + debugfs_remove_recursive(qdf_debugfs_root); + qdf_debugfs_root = NULL; +} +qdf_export_symbol(qdf_debugfs_exit); + +qdf_dentry_t qdf_debugfs_get_root(void) +{ + return qdf_debugfs_root; +} +qdf_export_symbol(qdf_debugfs_get_root); + +umode_t qdf_debugfs_get_filemode(uint16_t mode) +{ + umode_t ret = 0; + + if (mode & QDF_FILE_USR_READ) + ret |= 0400; + if (mode & QDF_FILE_USR_WRITE) + ret |= 0200; + + if (mode & QDF_FILE_GRP_READ) + ret |= 0040; + if (mode & QDF_FILE_GRP_WRITE) + ret |= 0020; + + if (mode & QDF_FILE_OTH_READ) + ret |= 0004; + if (mode & QDF_FILE_OTH_WRITE) + ret |= 0002; + + return ret; +} + +/** + * ---------------------- Implementation note --------------------------------- + * + * A read in debugfs file triggers seq_read() which calls seq_read api. A + * sequence begins with the call of the function start(). If the return is a non + * NULL value, the function next() is called. This function is an iterator, the + * goal is to go though all the data. Each time next() is called, the function + * show() is also called. It writes data values in the buffer read by the user. + * The function next() is called until it returns NULL. The sequence ends when + * next() returns NULL, then the function stop() is called. + * + * NOTE: When a sequence is finished, another one starts. That means that + * at the end of function stop(), the function start() is called again. This + * loop finishes when the function start() returns NULL. + * ---------------------------------------------------------------------------- + */ + +/* .seq_start() */ +static void *qdf_debugfs_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct qdf_debugfs_seq_priv *priv; + + priv = qdf_mem_malloc(sizeof(*priv)); + if (!priv) + return NULL; + + priv->stop = false; + + return priv; +} + +/* .seq_next() */ +static void *qdf_debugfs_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct qdf_debugfs_seq_priv *priv = v; + + if (priv) + ++*pos; + + if (priv && priv->stop) { + qdf_mem_free(priv); + priv = NULL; + } + + return priv; +} + +/* .seq_stop() */ +static void qdf_debugfs_seq_stop(struct seq_file *seq, void *v) +{ + qdf_mem_free(v); +} + +/* .seq_show() */ +static int qdf_debugfs_seq_show(struct seq_file *seq, void *v) +{ + struct qdf_debugfs_seq_priv *priv = v; + struct qdf_debugfs_fops *fops; + QDF_STATUS status; + + fops = seq->private; + + if (fops && fops->show) { + status = fops->show(seq, fops->priv); + + if (priv && (status != QDF_STATUS_E_AGAIN)) + priv->stop = true; + } + + return 0; +} + +void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, ...) +{ + va_list args; + + va_start(args, f); + seq_vprintf(file, f, args); + va_end(args); +} + +qdf_export_symbol(qdf_debugfs_printf); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + +void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len, int rowsize, int groupsize) +{ + seq_hex_dump(file, "", DUMP_PREFIX_OFFSET, rowsize, groupsize, buf, len, + false); +} + +#else + +void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len, int rowsize, int groupsize) +{ + char *dst; + size_t dstlen, readlen, remaining = len; + int prefix = 0; + size_t commitlen; + + while (remaining > 0 && (file->size > file->count)) { + seq_printf(file, "%.8x: ", prefix); + + readlen = qdf_min(remaining, (qdf_size_t)rowsize); + dstlen = seq_get_buf(file, &dst); + hex_dump_to_buffer(buf, readlen, rowsize, groupsize, dst, + dstlen, false); + commitlen = strnlen(dst, dstlen); + seq_commit(file, commitlen); + seq_putc(file, '\n'); + + remaining = (remaining > rowsize) ? remaining - rowsize : 0; + buf += readlen; + prefix += rowsize; + } +} + +#endif + +bool qdf_debugfs_overflow(qdf_debugfs_file_t file) +{ + return seq_has_overflowed(file); +} + +void qdf_debugfs_write(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len) +{ + seq_write(file, buf, len); +} + +/* sequential file operation table */ +static const struct seq_operations __qdf_debugfs_seq_ops = { + .start = qdf_debugfs_seq_start, + .next = qdf_debugfs_seq_next, + .stop = qdf_debugfs_seq_stop, + .show = qdf_debugfs_seq_show, +}; + +/* .open() */ +static int qdf_seq_open(struct inode *inode, struct file *file) +{ + void *private = inode->i_private; + struct seq_file *seq; + int rc; + + /** + * Note: seq_open() will allocate a struct seq_file and store its + * pointer in @file->private_data. It warns if private_data is not NULL. + */ + + rc = seq_open(file, &__qdf_debugfs_seq_ops); + + if (rc == 0) { + seq = file->private_data; + seq->private = private; + } + + return rc; +} + +/* .write() */ +static ssize_t qdf_seq_write(struct file *filp, const char __user *ubuf, + size_t len, loff_t *ppos) +{ + struct qdf_debugfs_fops *fops; + struct seq_file *seq; + u8 *buf; + ssize_t rc = 0; + + if (len == 0) + return 0; + + seq = filp->private_data; + fops = seq->private; + if (fops && fops->write) { + buf = qdf_mem_malloc(len + 1); + if (buf) { + buf[len] = '\0'; + rc = simple_write_to_buffer(buf, len, ppos, ubuf, len); + fops->write(fops->priv, buf, len + 1); + qdf_mem_free(buf); + } + } + + return rc; +} + +/* debugfs file operation table */ +static const struct file_operations __qdf_debugfs_fops = { + .owner = THIS_MODULE, + .open = qdf_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = qdf_seq_write, +}; + +qdf_dentry_t qdf_debugfs_create_dir(const char *name, qdf_dentry_t parent) +{ + qdf_dentry_t dir; + + if (!name) + return NULL; + if (!parent) + parent = qdf_debugfs_get_root(); + + dir = debugfs_create_dir(name, parent); + + if (IS_ERR_OR_NULL(dir)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s creation failed", name); + dir = NULL; + } + + return dir; +} +qdf_export_symbol(qdf_debugfs_create_dir); + +qdf_dentry_t qdf_debugfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + struct qdf_debugfs_fops *fops) +{ + qdf_dentry_t file; + umode_t filemode; + + if (!name || !fops) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + file = debugfs_create_file(name, filemode, parent, fops, + &__qdf_debugfs_fops); + + if (IS_ERR_OR_NULL(file)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s creation failed 0x%pK", name, file); + file = NULL; + } + + return file; +} +qdf_export_symbol(qdf_debugfs_create_file); + +qdf_dentry_t qdf_debugfs_create_u8(const char *name, uint16_t mode, + qdf_dentry_t parent, u8 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u8(name, filemode, parent, value); +} + +qdf_dentry_t qdf_debugfs_create_u16(const char *name, uint16_t mode, + qdf_dentry_t parent, u16 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u16(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_u16); + +qdf_dentry_t qdf_debugfs_create_u32(const char *name, + uint16_t mode, + qdf_dentry_t parent, u32 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u32(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_u32); + +qdf_dentry_t qdf_debugfs_create_u64(const char *name, uint16_t mode, + qdf_dentry_t parent, u64 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u64(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_u64); + +qdf_dentry_t qdf_debugfs_create_atomic(const char *name, uint16_t mode, + qdf_dentry_t parent, qdf_atomic_t *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_atomic_t(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_atomic); + +static int qdf_debugfs_string_show(struct seq_file *seq, void *pos) +{ + char *str = seq->private; + + seq_puts(seq, str); + seq_putc(seq, '\n'); + + return 0; +} + +static int qdf_debugfs_string_open(struct inode *inode, struct file *file) +{ + return single_open(file, qdf_debugfs_string_show, inode->i_private); +} + +static const struct file_operations qdf_string_fops = { + .owner = THIS_MODULE, + .open = qdf_debugfs_string_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +qdf_dentry_t qdf_debugfs_create_string(const char *name, uint16_t mode, + qdf_dentry_t parent, char *str) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_file(name, filemode, parent, str, + &qdf_string_fops); +} +qdf_export_symbol(qdf_debugfs_create_string); + +void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d) +{ + debugfs_remove_recursive(d); +} +qdf_export_symbol(qdf_debugfs_remove_dir_recursive); + +void qdf_debugfs_remove_dir(qdf_dentry_t d) +{ + debugfs_remove(d); +} +qdf_export_symbol(qdf_debugfs_remove_dir); + +void qdf_debugfs_remove_file(qdf_dentry_t d) +{ + debugfs_remove(d); +} +qdf_export_symbol(qdf_debugfs_remove_file); + +static int qdf_debugfs_single_show(struct seq_file *seq, void *v) +{ + struct qdf_debugfs_fops *fops = seq->private; + + if (fops && fops->show) + fops->show(seq, fops->priv); + + return 0; +} + +/* .open() */ +static int qdf_debugfs_single_open(struct inode *inode, struct file *file) +{ + return single_open(file, qdf_debugfs_single_show, + inode->i_private); +} + +/* File operations for the simplified version */ +static const struct file_operations qdf_debugfs_fops_simple = { + .owner = THIS_MODULE, + .open = qdf_debugfs_single_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +qdf_dentry_t qdf_debugfs_create_file_simplified( + const char *name, uint16_t mode, + qdf_dentry_t parent, struct qdf_debugfs_fops *fops) +{ + qdf_dentry_t file; + umode_t filemode; + + if (!name || !fops) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + file = debugfs_create_file(name, filemode, parent, fops, + &qdf_debugfs_fops_simple); + + if (IS_ERR_OR_NULL(file)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s creation failed 0x%pK", name, file); + file = NULL; + } + + return file; +} +qdf_export_symbol(qdf_debugfs_create_file_simplified); + +int qdf_debugfs_printer(void *priv, const char *fmt, ...) +{ + struct seq_file *file = priv; + va_list args; + + va_start(args, fmt); + seq_vprintf(file, fmt, args); + seq_puts(file, "\n"); + va_end(args); + + return 0; +} +qdf_export_symbol(qdf_debugfs_printer); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_defer.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_defer.c new file mode 100644 index 0000000000000000000000000000000000000000..42f5af794bcc0b4e45fef8cc0f45260637d139db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_defer.c @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_defer.c + * This file provides OS dependent deferred API's. + */ + +#include +#include +#include + +#include "i_qdf_defer.h" +#include + +/** + * __qdf_defer_func() - defer work handler + * @work: Pointer to defer work + * + * Return: none + */ +void __qdf_defer_func(struct work_struct *work) +{ + __qdf_work_t *ctx = container_of(work, __qdf_work_t, work); + + if (!ctx->fn) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "No callback registered !!"); + return; + } + ctx->fn(ctx->arg); +} +qdf_export_symbol(__qdf_defer_func); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_delayed_work.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_delayed_work.c new file mode 100644 index 0000000000000000000000000000000000000000..fc7966eb228fe5d8d4fb77edf65c9cc19dad4dbf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_delayed_work.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_delayed_work.h" +#include "qdf_status.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +#ifdef WLAN_DELAYED_WORK_DEBUG +#include "qdf_tracker.h" + +#define qdf_dwork_tracker_bits 2 /* 4 buckets */ +static qdf_tracker_declare(qdf_dwork_tracker, qdf_dwork_tracker_bits, + "delayed work leaks", "delayed work create", + "delayed work destroy"); + +void qdf_delayed_work_feature_init(void) +{ + qdf_tracker_init(&qdf_dwork_tracker); +} + +void qdf_delayed_work_feature_deinit(void) +{ + qdf_tracker_deinit(&qdf_dwork_tracker); +} + +void qdf_delayed_work_check_for_leaks(void) +{ + qdf_tracker_check_for_leaks(&qdf_dwork_tracker); +} + +static inline QDF_STATUS qdf_dwork_dbg_track(struct qdf_delayed_work *dwork, + const char *func, uint32_t line) +{ + return qdf_tracker_track(&qdf_dwork_tracker, dwork, func, line); +} + +static inline void qdf_dwork_dbg_untrack(struct qdf_delayed_work *dwork, + const char *func, uint32_t line) +{ + qdf_tracker_untrack(&qdf_dwork_tracker, dwork, func, line); +} +#else +static inline QDF_STATUS qdf_dwork_dbg_track(struct qdf_delayed_work *dwork, + const char *func, uint32_t line) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void qdf_dwork_dbg_untrack(struct qdf_delayed_work *dwork, + const char *func, uint32_t line) +{ } +#endif /* WLAN_DELAYED_WORK_DEBUG */ + +static void __qdf_delayed_work_handler(struct work_struct *work) +{ + struct qdf_delayed_work *dwork = + container_of(work, struct qdf_delayed_work, dwork.work); + + dwork->callback(dwork->context); +} + +QDF_STATUS __qdf_delayed_work_create(struct qdf_delayed_work *dwork, + qdf_delayed_work_cb callback, + void *context, + const char *func, uint32_t line) +{ + QDF_STATUS status; + + QDF_BUG(dwork); + QDF_BUG(callback); + if (!dwork || !callback) + return QDF_STATUS_E_INVAL; + + status = qdf_dwork_dbg_track(dwork, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + INIT_DELAYED_WORK(&dwork->dwork, __qdf_delayed_work_handler); + dwork->callback = callback; + dwork->context = context; + + return QDF_STATUS_SUCCESS; +} + +void __qdf_delayed_work_destroy(struct qdf_delayed_work *dwork, + const char *func, uint32_t line) +{ + qdf_delayed_work_stop_sync(dwork); + qdf_dwork_dbg_untrack(dwork, func, line); +} + +bool qdf_delayed_work_start(struct qdf_delayed_work *dwork, uint32_t msec) +{ + return schedule_delayed_work(&dwork->dwork, msecs_to_jiffies(msec)); +} + +bool qdf_delayed_work_stop_sync(struct qdf_delayed_work *dwork) +{ + return cancel_delayed_work_sync(&dwork->dwork); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_dev.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..c94480767ad0e5fbdb3648ce4a8202856234860c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_dev.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_dev + * This file provides OS dependent device related APIs + */ + +#include "qdf_dev.h" +#include "qdf_mem.h" +#include "qdf_util.h" +#include "qdf_module.h" +#include + +QDF_STATUS +qdf_dev_alloc_mem(struct qdf_dev *qdfdev, struct qdf_devm **mrptr, + uint32_t reqsize, uint32_t mask) +{ + struct qdf_devm *mptr; + + if (!qdfdev) + return QDF_STATUS_E_INVAL; + + mptr = devm_kzalloc((struct device *)qdfdev, reqsize, mask); + + if (!mrptr) + return QDF_STATUS_E_NOMEM; + + *mrptr = mptr; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_dev_alloc_mem); + +QDF_STATUS +qdf_dev_release_mem(struct qdf_dev *qdfdev, struct qdf_devm *mrptr) +{ + if (!mrptr) + return QDF_STATUS_E_INVAL; + + devm_kfree((struct device *)qdfdev, mrptr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_dev_release_mem); + +QDF_STATUS +qdf_dev_modify_irq_status(uint32_t irnum, unsigned long cmask, + unsigned long smask) +{ + if (irnum <= 0) + return QDF_STATUS_E_INVAL; + + irq_modify_status(irnum, cmask, smask); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_dev_modify_irq_status); + +QDF_STATUS +qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask) +{ + int ret; + + if (irnum <= 0) + return QDF_STATUS_E_INVAL; + + ret = irq_set_affinity_hint(irnum, (struct cpumask *)cpmask); + + return qdf_status_from_os_return(ret); +} + +qdf_export_symbol(qdf_dev_set_irq_affinity); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_event.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_event.c new file mode 100644 index 0000000000000000000000000000000000000000..7a9328a18dd9e0a9faa388d282862a68f4dc21ea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_event.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_event.c + * + * This source file contains linux specific definitions for QDF event APIs + * The APIs mentioned in this file are used for initializing, setting, + * resetting, destroying an event and waiting on an occurrence of an event + * among multiple events. + */ + +/* Include Files */ +#include "qdf_event.h" +#include "qdf_mc_timer.h" +#include "qdf_timer.h" +#include + +struct qdf_evt_node { + qdf_list_node_t node; + qdf_event_t *pevent; +}; + +#define MAX_WAIT_EVENTS 10 + +static qdf_list_t qdf_wait_event_list; +static qdf_spinlock_t qdf_wait_event_lock; + +/* Function Definitions and Documentation */ + +/** + * qdf_event_create() - initializes a QDF event + * @event: Pointer to the opaque event object to initialize + * + * The qdf_event_create() function initializes the specified event. Upon + * successful initialization, the state of the event becomes initialized + * and not signalled. + * + * An event must be initialized before it may be used in any other event + * functions. + * Attempting to initialize an already initialized event results in + * a failure. + * + * Return: QDF status + */ +QDF_STATUS qdf_event_create(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* check for 'already initialized' event */ + QDF_BUG(event->cookie != LINUX_EVENT_COOKIE); + if (event->cookie == LINUX_EVENT_COOKIE) + return QDF_STATUS_E_BUSY; + + /* initialize new event */ + init_completion(&event->complete); + event->cookie = LINUX_EVENT_COOKIE; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_create); + +/** + * qdf_event_set() - sets a QDF event + * @event: The event to set to the signalled state + * + * The state of the specified event is set to signalled by calling + * qdf_event_set(). + * + * Any threads waiting on the event as a result of a qdf_event_wait() will + * be unblocked and available to be scheduled for execution when the event + * is signaled by a call to qdf_event_set(). + * + * Return: QDF status + */ +QDF_STATUS qdf_event_set(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + event->done = true; + complete(&event->complete); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_set); + +/** + * qdf_event_reset() - resets a QDF event + * @event: The event to set to the NOT signalled state + * + * This function isn't required for Linux. Therefore, it doesn't do much. + * + * The state of the specified event is set to 'NOT signalled' by calling + * qdf_event_reset(). The state of the event remains NOT signalled until an + * explicit call to qdf_event_set(). + * + * This function sets the event to a NOT signalled state even if the event was + * signalled multiple times before being signaled. + * + * Return: QDF status + */ +QDF_STATUS qdf_event_reset(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + /* (re)initialize event */ + event->done = false; + event->force_set = false; + INIT_COMPLETION(event->complete); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_reset); + +/** + * qdf_event_destroy() - Destroys a QDF event + * @event: The event object to be destroyed. + * + * This function doesn't do much in Linux. There is no need for the caller + * to explicitly destroy an event after use. + * + * The os_event_destroy() function shall destroy the event object + * referenced by event. After a successful return from qdf_event_destroy() + * the event object becomes, in effect, uninitialized. + * + * A destroyed event object can be reinitialized using qdf_event_create(); + * the results of otherwise referencing the object after it has been destroyed + * are undefined. Calls to QDF event functions to manipulate the lock such + * as qdf_event_set() will fail if the event is destroyed. Therefore, + * don't use the event after it has been destroyed until it has + * been re-initialized. + * + * Return: QDF status + */ +QDF_STATUS qdf_event_destroy(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + /* make sure nobody is waiting on the event */ + complete_all(&event->complete); + + /* destroy the event */ + memset(event, 0, sizeof(qdf_event_t)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_destroy); + +/** + * qdf_wait_single_event() - Waits for a single event to be set. + * This API waits for the event to be set. + * + * @event: Pointer to an event to wait on. + * @timeout: Timeout value (in milliseconds). This function returns + * if this interval elapses, regardless if any of the events have + * been set. An input value of 0 for this timeout parameter means + * to wait infinitely, meaning a timeout will never occur. + * + * Return: QDF status + */ +QDF_STATUS qdf_wait_single_event(qdf_event_t *event, uint32_t timeout) +{ + QDF_BUG(!in_interrupt()); + if (in_interrupt()) + return QDF_STATUS_E_FAULT; + + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + if (timeout) { + long ret; + + ret = wait_for_completion_timeout( + &event->complete, + __qdf_scaled_msecs_to_jiffies(timeout)); + + if (ret <= 0) + return QDF_STATUS_E_TIMEOUT; + } else { + wait_for_completion(&event->complete); + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_wait_single_event); + +/** + * qdf_complete_wait_events() - Sets all the events which are in the list. + * + * This function traverses the list of events and sets all of them. It + * sets the flag force_set as TRUE to indicate that these events have + * been forcefully set. + * + * Return: None + */ +void qdf_complete_wait_events(void) +{ + struct qdf_evt_node *event_node = NULL; + qdf_list_node_t *list_node = NULL; + QDF_STATUS status; + + if (qdf_list_empty(&qdf_wait_event_list)) + return; + + qdf_spin_lock(&qdf_wait_event_lock); + qdf_list_peek_front(&qdf_wait_event_list, + &list_node); + + while (list_node) { + event_node = qdf_container_of(list_node, + struct qdf_evt_node, node); + + if (!event_node->pevent->done) { + event_node->pevent->force_set = true; + qdf_event_set(event_node->pevent); + } + + status = qdf_list_peek_next(&qdf_wait_event_list, + &event_node->node, &list_node); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + } + qdf_spin_unlock(&qdf_wait_event_lock); +} +qdf_export_symbol(qdf_complete_wait_events); + +/** + * qdf_wait_for_event_completion() - Waits for an event to be set. + * + * @event: Pointer to an event to wait on. + * @timeout: Timeout value (in milliseconds). + * + * This function adds the event in a list and waits on it until it + * is set or the timeout duration elapses. The purpose of waiting + * is considered complete only if the event is set and the flag + * force_set is FALSE, it returns success in this case. In other + * cases it returns appropriate error status. + * + * Return: QDF status + */ +QDF_STATUS qdf_wait_for_event_completion(qdf_event_t *event, uint32_t timeout) +{ + struct qdf_evt_node *event_node; + QDF_STATUS status; + + QDF_BUG(!in_interrupt()); + if (in_interrupt()) + return QDF_STATUS_E_FAULT; + + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + event_node = qdf_mem_malloc(sizeof(*event_node)); + if (!event_node) + return QDF_STATUS_E_NOMEM; + + event_node->pevent = event; + + qdf_spin_lock(&qdf_wait_event_lock); + status = qdf_list_insert_back(&qdf_wait_event_list, &event_node->node); + qdf_spin_unlock(&qdf_wait_event_lock); + + if (QDF_STATUS_SUCCESS != status) { + qdf_err("Failed to insert event into tracking list"); + goto free_node; + } + + if (timeout) { + long ret; + + /* update the timeout if it's on an emulation platform */ + timeout *= qdf_timer_get_multiplier(); + ret = wait_for_completion_timeout(&event->complete, + msecs_to_jiffies(timeout)); + + if (ret <= 0) { + status = QDF_STATUS_E_TIMEOUT; + goto list_remove; + } + } else { + wait_for_completion(&event->complete); + } + + /* if event was forcefully completed, return failure */ + if (event->force_set) + status = QDF_STATUS_E_FAULT; + +list_remove: + qdf_spin_lock(&qdf_wait_event_lock); + qdf_list_remove_node(&qdf_wait_event_list, &event_node->node); + qdf_spin_unlock(&qdf_wait_event_lock); + +free_node: + qdf_mem_free(event_node); + + return status; +} +qdf_export_symbol(qdf_wait_for_event_completion); + +/** + * qdf_event_list_init() - Creates a list and spinlock for events. + * + * This function creates a list for maintaining events on which threads + * wait for completion. A spinlock is also created to protect related + * oprations. + * + * Return: None + */ +void qdf_event_list_init(void) +{ + qdf_list_create(&qdf_wait_event_list, MAX_WAIT_EVENTS); + qdf_spinlock_create(&qdf_wait_event_lock); +} +qdf_export_symbol(qdf_event_list_init); + +/** + * qdf_event_list_destroy() - Destroys list and spinlock created for events. + * + * This function destroys the list and spinlock created for events on which + * threads wait for completion. + * + * Return: None + */ +void qdf_event_list_destroy(void) +{ + qdf_list_destroy(&qdf_wait_event_list); + qdf_spinlock_destroy(&qdf_wait_event_lock); +} +qdf_export_symbol(qdf_event_list_destroy); + +QDF_STATUS qdf_exit_thread(QDF_STATUS status) +{ + if (status == QDF_STATUS_SUCCESS) + do_exit(0); + else + do_exit(SIGKILL); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_exit_thread); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_file.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_file.c new file mode 100644 index 0000000000000000000000000000000000000000..23935dbd53d691cad5ad4f0efcfc1a0707cbafbf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_file.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "qdf_file.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_status.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +QDF_STATUS qdf_file_read(const char *path, char **out_buf) +{ + int errno; + const struct firmware *fw; + char *buf; + + *out_buf = NULL; + + errno = request_firmware(&fw, path, NULL); + if (errno) { + qdf_err("Failed to read file %s", path); + return QDF_STATUS_E_FAILURE; + } + + /* qdf_mem_malloc zeros new memory; +1 size ensures null-termination */ + buf = qdf_mem_malloc(fw->size + 1); + if (!buf) { + release_firmware(fw); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(buf, fw->data, fw->size); + release_firmware(fw); + *out_buf = buf; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_file_read); + +void qdf_file_buf_free(char *file_buf) +{ + QDF_BUG(file_buf); + if (!file_buf) + return; + + qdf_mem_free(file_buf); +} +qdf_export_symbol(qdf_file_buf_free); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_func_tracker.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_func_tracker.c new file mode 100644 index 0000000000000000000000000000000000000000..43fa3b44ee522c8262e0ac97ce7c7acfa1a4cf5d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_func_tracker.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include + +#ifdef FUNC_CALL_MAP +char qdf_func_call_map_buf[QDF_FUNCTION_CALL_MAP_BUF_LEN] = {0}; + +void cc_func(unsigned int track) +{ + unsigned int index = 0; + unsigned int bit = 0; + + index = track / 8; + bit = track % 8; + qdf_func_call_map_buf[index] |= (char)(1 << bit); +} + +void qdf_get_func_call_map(char *data) +{ + qdf_mem_copy(data, qdf_func_call_map_buf, + QDF_FUNCTION_CALL_MAP_BUF_LEN); +} + +void qdf_clear_func_call_map(void) +{ + qdf_mem_zero(qdf_func_call_map_buf, QDF_FUNCTION_CALL_MAP_BUF_LEN); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_func_tracker.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_func_tracker.h new file mode 100644 index 0000000000000000000000000000000000000000..5f4fb25dedd73eef3f34a3cc554ca49096ba2716 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_func_tracker.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef QDF_FUNC_TRACKER_H +#define QDF_FUNC_TRACKER_H + +#ifdef FUNC_CALL_MAP + +#define QDF_FUNCTION_CALL_MAP_BUF_LEN 4096 + +/** + * cc_func() - Inserts the function Id into the global + * function map + * @track: Function Id which needs to be inserted into the + * Global function map. + * + * Return: None + */ +void cc_func(unsigned int track); + +/** + * qdf_get_func_call_map() - Copies the global function call + * map into the given buffer + * @data: Buffer in which the function call map needs to be + * copied + * + * Return: None + */ +void qdf_get_func_call_map(char *data); + +/** + * qdf_clear_func_call_map() - Clears the global function + * call map + * + * Return: None + */ +void qdf_clear_func_call_map(void); +#else +static inline void cc_func(unsigned int track) +{ +} + +static inline void qdf_get_func_call_map(char *data) +{ +} + +static inline void qdf_clear_func_call_map(void) +{ +} + +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_idr.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_idr.c new file mode 100644 index 0000000000000000000000000000000000000000..b6341e2cb72f7c74bba30ea1ad04011e9973f207 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_idr.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_idr + * This file provides the ability to map an ID to a pointer + */ + +/* Include files */ +#include +#include + +#define QDF_IDR_START 0x100 +#define QDF_IDR_END 0 + +static int qdf_idr_gpf_flag(void) +{ + if (in_interrupt() || irqs_disabled() || in_atomic()) + return GFP_ATOMIC; + + return GFP_KERNEL; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) +/** + * __qdf_idr_alloc() - Allocates an unused ID + * @idp: pointer to qdf idr + * @ptr: pointer to be associated with the new ID + * @start: the minimum ID + * @end: the maximum ID + * + * Return: new ID + */ +static inline int32_t +__qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t start, int32_t end) +{ + int32_t id = 0; + + idr_get_new(&idp->idr, ptr, &id); + + return id; +} +#else +static inline int32_t +__qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t start, int32_t end) +{ + return idr_alloc(&idp->idr, ptr, start, end, qdf_idr_gpf_flag()); +} +#endif + +QDF_STATUS qdf_idr_create(qdf_idr *idp) +{ + if (!idp) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_create(&idp->lock); + + idr_init(&idp->idr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_create); + +QDF_STATUS qdf_idr_destroy(qdf_idr *idp) +{ + if (!idp) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_destroy(&idp->lock); + idr_destroy(&idp->idr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_destroy); + +QDF_STATUS qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t *id) +{ + int local_id; + + if (!idp || !ptr) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_acquire(&idp->lock); + local_id = __qdf_idr_alloc(idp, ptr, QDF_IDR_START, QDF_IDR_END); + qdf_spinlock_release(&idp->lock); + if (local_id < QDF_IDR_START) + return QDF_STATUS_E_FAILURE; + + *id = local_id; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_alloc); + +QDF_STATUS qdf_idr_remove(qdf_idr *idp, int32_t id) +{ + if (!idp || id < QDF_IDR_START) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_acquire(&idp->lock); + if (idr_find(&idp->idr, id)) + idr_remove(&idp->idr, id); + qdf_spinlock_release(&idp->lock); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_remove); + +QDF_STATUS qdf_idr_find(qdf_idr *idp, int32_t id, void **ptr) +{ + if (!ptr || (id < QDF_IDR_START)) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_acquire(&idp->lock); + *ptr = idr_find(&idp->idr, id); + qdf_spinlock_release(&idp->lock); + if (!(*ptr)) + return QDF_STATUS_E_INVAL; + else + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_find); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_ipa.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_ipa.c new file mode 100644 index 0000000000000000000000000000000000000000..093c79901132ab83725c6ebfcfd7828d25d065db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_ipa.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_ipa.c + * + * This source file contains linux specific definitions for QDF IPA APIs + */ + +/* Include Files */ +#include + +static uint8_t __qdf_to_ipa_wlan_event(int qdf_ipa_event) +{ + uint8_t ipa_event; + + switch (qdf_ipa_event) { + case QDF_IPA_CLIENT_CONNECT: + ipa_event = WLAN_CLIENT_CONNECT; + break; + case QDF_IPA_CLIENT_DISCONNECT: + ipa_event = WLAN_CLIENT_DISCONNECT; + break; + case QDF_IPA_AP_CONNECT: + ipa_event = WLAN_AP_CONNECT; + break; + case QDF_IPA_AP_DISCONNECT: + ipa_event = WLAN_AP_DISCONNECT; + break; + case QDF_IPA_STA_CONNECT: + ipa_event = WLAN_STA_CONNECT; + break; + case QDF_IPA_STA_DISCONNECT: + ipa_event = WLAN_STA_DISCONNECT; + break; + case QDF_IPA_CLIENT_CONNECT_EX: + ipa_event = WLAN_CLIENT_CONNECT_EX; + break; + case QDF_SWITCH_TO_SCC: + ipa_event = WLAN_SWITCH_TO_SCC; + break; + case QDF_SWITCH_TO_MCC: + ipa_event = WLAN_SWITCH_TO_MCC; + break; + case QDF_WDI_ENABLE: + ipa_event = WLAN_WDI_ENABLE; + break; + case QDF_WDI_DISABLE: + ipa_event = WLAN_WDI_DISABLE; + break; + case QDF_FWR_SSR_BEFORE_SHUTDOWN: + ipa_event = WLAN_FWR_SSR_BEFORE_SHUTDOWN; + break; + case QDF_IPA_WLAN_EVENT_MAX: + default: + ipa_event = IPA_WLAN_EVENT_MAX; + break; + } + + return ipa_event; +} + +void __qdf_ipa_set_meta_msg_type(__qdf_ipa_msg_meta_t *meta, int type) +{ + meta->msg_type = __qdf_to_ipa_wlan_event(type); +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_list.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_list.c new file mode 100644 index 0000000000000000000000000000000000000000..c8d1c3413f01a2a91b184bd65ab1732a0dc0adf7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_list.c @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_list.c + * + * QCA driver framework list manipulation APIs. QDF linked list + * APIs are NOT thread safe so make sure to use appropriate locking mechanisms + * to assure operations on the list are thread safe. + */ + +/* Include files */ +#include +#include + +/* Function declarations and documenation */ + +QDF_STATUS qdf_list_insert_before(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node) +{ + list_add_tail(new_node, node); + list->count++; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_before); + +QDF_STATUS qdf_list_insert_after(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node) +{ + list_add(new_node, node); + list->count++; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_after); + +/** + * qdf_list_insert_front() - insert input node at front of the list + * @list: Pointer to list + * @node: Pointer to input node + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node) +{ + list_add(node, &list->anchor); + list->count++; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_front); + +/** + * qdf_list_insert_back() - insert input node at back of the list + * @list: Pointer to list + * @node: Pointer to input node + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node) +{ + list_add_tail(node, &list->anchor); + list->count++; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_back); + +/** + * qdf_list_insert_back_size() - insert input node at back of list and save + * list size + * @list: Pointer to list + * @node: Pointer to input node + * @p_size: Pointer to store list size + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list, + qdf_list_node_t *node, uint32_t *p_size) +{ + list_add_tail(node, &list->anchor); + list->count++; + *p_size = list->count; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_back_size); + +/** + * qdf_list_remove_front() - remove node from front of the list + * @list: Pointer to list + * @node2: Double pointer to store the node which is removed from list + * + * Return: QDF status + */ +QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node2) +{ + struct list_head *listptr; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + listptr = list->anchor.next; + *node2 = listptr; + list_del_init(list->anchor.next); + list->count--; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_remove_front); + +/** + * qdf_list_remove_back() - remove node from end of the list + * @list: Pointer to list + * @node2: Double pointer to store node which is removed from list + * + * Return: QDF status + */ +QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node2) +{ + struct list_head *listptr; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + listptr = list->anchor.prev; + *node2 = listptr; + list_del_init(list->anchor.prev); + list->count--; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_remove_back); + +bool qdf_list_has_node(qdf_list_t *list, qdf_list_node_t *node) +{ + qdf_list_node_t *tmp; + + list_for_each(tmp, &list->anchor) { + if (tmp == node) + return true; + } + + return false; +} + +/** + * qdf_list_remove_node() - remove input node from list + * @list: Pointer to list + * @node_to_remove: Pointer to node which needs to be removed + * + * verifies that the node is in the list before removing it. + * It is expected that the list being removed from is locked + * when this function is being called. + * + * Return: QDF status + */ +QDF_STATUS qdf_list_remove_node(qdf_list_t *list, + qdf_list_node_t *node_to_remove) +{ + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + list_del_init(node_to_remove); + list->count--; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_remove_node); + +/** + * qdf_list_peek_front() - peek front node from list + * @list: Pointer to list + * @node2: Double pointer to store peeked node pointer + * + * Return: QDF status + */ +QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node2) +{ + struct list_head *listptr; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + listptr = list->anchor.next; + *node2 = listptr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_peek_front); + +/** + * qdf_list_peek_next() - peek next node of input node in the list + * @list: Pointer to list + * @node: Pointer to input node + * @node2: Double pointer to store peeked node pointer + * + * Return: QDF status + */ +QDF_STATUS qdf_list_peek_next(qdf_list_t *list, + qdf_list_node_t *node, + qdf_list_node_t **node2) +{ + if (!list || !node || !node2) + return QDF_STATUS_E_FAULT; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + if (node->next == &list->anchor) + return QDF_STATUS_E_EMPTY; + + *node2 = node->next; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_peek_next); + +/** + * qdf_list_empty() - check if the list is empty + * @list: pointer to the list + * + * Return: true if the list is empty and false otherwise. + */ +bool qdf_list_empty(qdf_list_t *list) +{ + return list_empty(&list->anchor); +} +qdf_export_symbol(qdf_list_empty); + +bool qdf_list_node_in_any_list(const qdf_list_node_t *node) +{ + const struct list_head *linux_node = (const struct list_head *)node; + + if (!linux_node) + return false; + + /* if the node is an empty list, it is not tied to an anchor node */ + if (list_empty(linux_node)) + return false; + + if (!linux_node->prev || !linux_node->next) + return false; + + if (linux_node->prev->next != linux_node || + linux_node->next->prev != linux_node) + return false; + + return true; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lock.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lock.c new file mode 100644 index 0000000000000000000000000000000000000000..48deb01e93ea15571f10669488d43f8a5320c9bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lock.c @@ -0,0 +1,874 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include +#include +#ifdef FEATURE_RUNTIME_PM +#include +#include +#endif +#include +#include + +/** + * qdf_mutex_create() - Initialize a mutex + * @m: mutex to initialize + * + * Returns: QDF_STATUS + * =0 success + * else fail status + */ +#undef qdf_mutex_create +QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock, const char *func, int line) +{ + /* check for invalid pointer */ + if (!lock) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + return QDF_STATUS_E_FAULT; + } + /* check for 'already initialized' lock */ + if (LINUX_LOCK_COOKIE == lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: already initialized lock", __func__); + return QDF_STATUS_E_BUSY; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return QDF_STATUS_E_FAULT; + } + + qdf_lock_stats_create(&lock->stats, func, line); + + /* initialize new lock */ + mutex_init(&lock->m_lock); + lock->cookie = LINUX_LOCK_COOKIE; + lock->state = LOCK_RELEASED; + lock->process_id = 0; + lock->refcount = 0; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mutex_create); + +/** + * qdf_mutex_acquire() - acquire a QDF lock + * @lock: Pointer to the opaque lock object to acquire + * + * A lock object is acquired by calling qdf_mutex_acquire(). If the lock + * is already locked, the calling thread shall block until the lock becomes + * available. This operation shall return with the lock object referenced by + * lock in the locked state with the calling thread as its owner. + * + * Return: + * QDF_STATUS_SUCCESS: lock was successfully initialized + * QDF failure reason codes: lock is not initialized and can't be used + */ +QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock) +{ + int rc; + /* check for invalid pointer */ + if (!lock) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + /* check if lock refers to an initialized object */ + if (LINUX_LOCK_COOKIE != lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: uninitialized lock", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + if ((lock->process_id == current->pid) && + (lock->state == LOCK_ACQUIRED)) { + lock->refcount++; +#ifdef QDF_NESTED_LOCK_DEBUG + pe_err("%s: %x %d %d", __func__, lock, current->pid, + lock->refcount); +#endif + return QDF_STATUS_SUCCESS; + } + + BEFORE_LOCK(lock, mutex_is_locked(&lock->m_lock)); + /* acquire a Lock */ + mutex_lock(&lock->m_lock); + AFTER_LOCK(lock, __func__); + rc = mutex_is_locked(&lock->m_lock); + if (rc == 0) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: unable to lock mutex (rc = %d)", __func__, rc); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } +#ifdef QDF_NESTED_LOCK_DEBUG + pe_err("%s: %x %d", __func__, lock, current->pid); +#endif + if (LOCK_DESTROYED != lock->state) { + lock->process_id = current->pid; + lock->refcount++; + lock->state = LOCK_ACQUIRED; + return QDF_STATUS_SUCCESS; + } + + /* lock is already destroyed */ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Lock is already destroyed", __func__); + mutex_unlock(&lock->m_lock); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(qdf_mutex_acquire); + +/** + * qdf_mutex_release() - release a QDF lock + * @lock: Pointer to the opaque lock object to be released + * + * qdf_mutex_release() function shall release the lock object + * referenced by 'lock'. + * + * If a thread attempts to release a lock that it unlocked or is not + * initialized, an error is returned. + * + * Return: + * QDF_STATUS_SUCCESS: lock was successfully initialized + * QDF failure reason codes: lock is not initialized and can't be used + */ +QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock) +{ + /* check for invalid pointer */ + if (!lock) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* check if lock refers to an uninitialized object */ + if (LINUX_LOCK_COOKIE != lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: uninitialized lock", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* current_thread = get_current_thread_id(); + * Check thread ID of caller against thread ID + * of the thread which acquire the lock + */ + if (lock->process_id != current->pid) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: current task pid does not match original task pid!!", + __func__); +#ifdef QDF_NESTED_LOCK_DEBUG + pe_err("%s: Lock held by=%d being released by=%d", + __func__, lock->process_id, current->pid); +#endif + QDF_ASSERT(0); + return QDF_STATUS_E_PERM; + } + if ((lock->process_id == current->pid) && + (lock->state == LOCK_ACQUIRED)) { + if (lock->refcount > 0) + lock->refcount--; + } +#ifdef QDF_NESTED_LOCK_DEBUG + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id, + lock->refcount); +#endif + if (lock->refcount) + return QDF_STATUS_SUCCESS; + + lock->process_id = 0; + lock->refcount = 0; + lock->state = LOCK_RELEASED; + /* release a Lock */ + BEFORE_UNLOCK(lock, 0); + mutex_unlock(&lock->m_lock); +#ifdef QDF_NESTED_LOCK_DEBUG + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id, + lock->refcount); +#endif + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mutex_release); + +/** + * qdf_wake_lock_name() - This function returns the name of the wakelock + * @lock: Pointer to the wakelock + * + * This function returns the name of the wakelock + * + * Return: Pointer to the name if it is valid or a default string + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) +{ + if (lock) + return lock->lock.name; + return "UNNAMED_WAKELOCK"; +} +#else +const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) +{ + return "NO_WAKELOCK_SUPPORT"; +} +#endif +qdf_export_symbol(qdf_wake_lock_name); + +/** + * qdf_wake_lock_create() - initializes a wake lock + * @lock: The wake lock to initialize + * @name: Name of wake lock + * + * Return: + * QDF status success: if wake lock is initialized + * QDF status failure: if wake lock was not initialized + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 110)) || \ + defined(WAKEUP_SOURCE_DEV) +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) +{ + qdf_mem_zero(lock, sizeof(*lock)); + lock->priv = wakeup_source_register(lock->lock.dev, name); + if (!(lock->priv)) { + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + lock->lock = *(lock->priv); + + return QDF_STATUS_SUCCESS; +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) +{ + wakeup_source_init(&(lock->lock), name); + lock->priv = &(lock->lock); + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_create); + +/** + * qdf_wake_lock_acquire() - acquires a wake lock + * @lock: The wake lock to acquire + * @reason: Reason for wakelock + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) +{ + host_diag_log_wlock(reason, qdf_wake_lock_name(lock), + WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, + WIFI_POWER_EVENT_WAKELOCK_TAKEN); + __pm_stay_awake(lock->priv); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_acquire); + +/** + * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout + * @lock: The wake lock to acquire + * @reason: Reason for wakelock + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) +{ + pm_wakeup_ws_event(lock->priv, msec, true); + return QDF_STATUS_SUCCESS; +} +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) +{ + /* Wakelock for Rx is frequent. + * It is reported only during active debug + */ + __pm_wakeup_event(&(lock->lock), msec); + return QDF_STATUS_SUCCESS; +} +#else /* LINUX_VERSION_CODE */ +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* LINUX_VERSION_CODE */ +qdf_export_symbol(qdf_wake_lock_timeout_acquire); + +/** + * qdf_wake_lock_release() - releases a wake lock + * @lock: the wake lock to release + * @reason: Reason for wakelock + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) +{ + host_diag_log_wlock(reason, qdf_wake_lock_name(lock), + WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, + WIFI_POWER_EVENT_WAKELOCK_RELEASED); + __pm_relax(lock->priv); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_release); + +/** + * qdf_wake_lock_destroy() - destroys a wake lock + * @lock: The wake lock to destroy + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 110)) || \ + defined(WAKEUP_SOURCE_DEV) +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) +{ + wakeup_source_unregister(lock->priv); + return QDF_STATUS_SUCCESS; +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) +{ + wakeup_source_trash(&(lock->lock)); + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_destroy); + +/** + * qdf_pm_system_wakeup() - wakeup system + * + * Return: None + */ +void qdf_pm_system_wakeup(void) +{ + pm_system_wakeup(); +} + +qdf_export_symbol(qdf_pm_system_wakeup); + +#ifdef FEATURE_RUNTIME_PM +/** + * qdf_runtime_pm_get() - do a get opperation on the device + * + * A get opperation will prevent a runtime suspend until a + * corresponding put is done. This api should be used when sending + * data. + * + * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, + * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! + * + * return: success if the bus is up and a get has been issued + * otherwise an error code. + */ +QDF_STATUS qdf_runtime_pm_get(void) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + + if (!ol_sc) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_get(ol_sc, RTPM_ID_RESVERD); + + if (ret) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_get); + +/** + * qdf_runtime_pm_put() - do a put opperation on the device + * + * A put opperation will allow a runtime suspend after a corresponding + * get was done. This api should be used when sending data. + * + * This api will return a failure if the hif module hasn't been + * initialized + * + * return: QDF_STATUS_SUCCESS if the put is performed + */ +QDF_STATUS qdf_runtime_pm_put(void) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + + if (!ol_sc) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_put(ol_sc, RTPM_ID_RESVERD); + + if (ret) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_put); + +/** + * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend + * @lock: an opaque context for tracking + * + * The lock can only be acquired once per lock context and is tracked. + * + * return: QDF_STATUS_SUCCESS or failure code. + */ +QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + + if (!ol_sc) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_prevent_suspend(ol_sc, lock->lock); + + if (ret) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_prevent_suspend); + +/** + * qdf_runtime_pm_allow_suspend() - prevent a runtime bus suspend + * @lock: an opaque context for tracking + * + * The lock can only be acquired once per lock context and is tracked. + * + * return: QDF_STATUS_SUCCESS or failure code. + */ +QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + if (!ol_sc) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_allow_suspend(ol_sc, lock->lock); + if (ret) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_allow_suspend); + +/** + * qdf_runtime_lock_init() - initialize runtime lock + * @name: name of the runtime lock + * + * Initialize a runtime pm lock. This lock can be used + * to prevent the runtime pm system from putting the bus + * to sleep. + * + * Return: runtime_pm_lock_t + */ +QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) +{ + int ret = hif_runtime_lock_init(lock, name); + + if (ret) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_runtime_lock_init); + +/** + * qdf_runtime_lock_deinit() - deinitialize runtime pm lock + * @lock: the lock to deinitialize + * + * Ensures the lock is released. Frees the runtime lock. + * + * Return: void + */ +void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) +{ + void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); + hif_runtime_lock_deinit(hif_ctx, lock->lock); +} +qdf_export_symbol(qdf_runtime_lock_deinit); + +#else + +QDF_STATUS qdf_runtime_pm_get(void) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_get); + +QDF_STATUS qdf_runtime_pm_put(void) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_put); + +QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_prevent_suspend); + +QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_allow_suspend); + +QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_runtime_lock_init); + +void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) +{ +} +qdf_export_symbol(qdf_runtime_lock_deinit); + +#endif /* FEATURE_RUNTIME_PM */ + +/** + * qdf_spinlock_acquire() - acquires a spin lock + * @lock: Spin lock to acquire + * + * Return: + * QDF status success: if wake lock is acquired + */ +QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock) +{ + spin_lock(&lock->lock.spinlock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_spinlock_acquire); + + +/** + * qdf_spinlock_release() - release a spin lock + * @lock: Spin lock to release + * + * Return: + * QDF status success : if wake lock is acquired + */ +QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock) +{ + spin_unlock(&lock->lock.spinlock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_spinlock_release); + +/** + * qdf_mutex_destroy() - destroy a QDF lock + * @lock: Pointer to the opaque lock object to be destroyed + * + * function shall destroy the lock object referenced by lock. After a + * successful return from qdf_mutex_destroy() + * the lock object becomes, in effect, uninitialized. + * + * A destroyed lock object can be reinitialized using qdf_mutex_create(); + * the results of otherwise referencing the object after it has been destroyed + * are undefined. Calls to QDF lock functions to manipulate the lock such + * as qdf_mutex_acquire() will fail if the lock is destroyed. Therefore, + * don't use the lock after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS: lock was successfully initialized + * QDF failure reason codes: lock is not initialized and can't be used + */ +QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock) +{ + /* check for invalid pointer */ + if (!lock) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + return QDF_STATUS_E_FAULT; + } + + if (LINUX_LOCK_COOKIE != lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: uninitialized lock", __func__); + return QDF_STATUS_E_INVAL; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return QDF_STATUS_E_FAULT; + } + + /* check if lock is released */ + if (!mutex_trylock(&lock->m_lock)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: lock is not released", __func__); + return QDF_STATUS_E_BUSY; + } + lock->cookie = 0; + lock->state = LOCK_DESTROYED; + lock->process_id = 0; + lock->refcount = 0; + + qdf_lock_stats_destroy(&lock->stats); + mutex_unlock(&lock->m_lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mutex_destroy); + +#if QDF_LOCK_STATS_LIST +struct qdf_lock_cookie { + union { + struct { + struct lock_stats *stats; + const char *func; + int line; + } cookie; + struct { + struct qdf_lock_cookie *next; + } empty_node; + } u; +}; + +#ifndef QDF_LOCK_STATS_LIST_SIZE +#define QDF_LOCK_STATS_LIST_SIZE 256 +#endif + +static qdf_spinlock_t qdf_lock_list_spinlock; +static struct qdf_lock_cookie lock_cookies[QDF_LOCK_STATS_LIST_SIZE]; +static struct qdf_lock_cookie *lock_cookie_freelist; +static qdf_atomic_t lock_cookie_get_failures; +static qdf_atomic_t lock_cookie_untracked_num; +/* dummy value */ +#define DUMMY_LOCK_COOKIE 0xc00c1e + +/** + * qdf_is_lock_cookie - check if memory is a valid lock cookie + * + * return true if the memory is within the range of the lock cookie + * memory. + */ +static bool qdf_is_lock_cookie(struct qdf_lock_cookie *lock_cookie) +{ + return lock_cookie >= &lock_cookies[0] && + lock_cookie <= &lock_cookies[QDF_LOCK_STATS_LIST_SIZE-1]; +} + +/** + * qdf_is_lock_cookie_free() - check if the lock cookie is on the freelist + * @lock_cookie: lock cookie to check + * + * Check that the next field of the lock cookie points to a lock cookie. + * currently this is only true if the cookie is on the freelist. + * + * Checking for the function and line being NULL and 0 should also have worked. + */ +static bool qdf_is_lock_cookie_free(struct qdf_lock_cookie *lock_cookie) +{ + struct qdf_lock_cookie *tmp = lock_cookie->u.empty_node.next; + + return qdf_is_lock_cookie(tmp) || (!tmp); +} + +static struct qdf_lock_cookie *qdf_get_lock_cookie(void) +{ + struct qdf_lock_cookie *lock_cookie; + + qdf_spin_lock_bh(&qdf_lock_list_spinlock); + lock_cookie = lock_cookie_freelist; + if (lock_cookie_freelist) + lock_cookie_freelist = lock_cookie_freelist->u.empty_node.next; + qdf_spin_unlock_bh(&qdf_lock_list_spinlock); + return lock_cookie; +} + +static void __qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) +{ + if (!qdf_is_lock_cookie(lock_cookie)) + QDF_BUG(0); + + lock_cookie->u.empty_node.next = lock_cookie_freelist; + lock_cookie_freelist = lock_cookie; +} + +static void qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) +{ + qdf_spin_lock_bh(&qdf_lock_list_spinlock); + __qdf_put_lock_cookie(lock_cookie); + qdf_spin_unlock_bh(&qdf_lock_list_spinlock); +} + +void qdf_lock_stats_init(void) +{ + int i; + + for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) + __qdf_put_lock_cookie(&lock_cookies[i]); + + /* stats must be allocated for the spinlock before the cookie, + * otherwise this qdf_lock_list_spinlock wouldnt get initialized + * properly + */ + qdf_spinlock_create(&qdf_lock_list_spinlock); + qdf_atomic_init(&lock_cookie_get_failures); + qdf_atomic_init(&lock_cookie_untracked_num); +} + +void qdf_lock_stats_deinit(void) +{ + int i; + + qdf_spinlock_destroy(&qdf_lock_list_spinlock); + for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) { + if (!qdf_is_lock_cookie_free(&lock_cookies[i])) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: lock_not_destroyed, fun: %s, line %d", + __func__, lock_cookies[i].u.cookie.func, + lock_cookies[i].u.cookie.line); + } + lock_cookie_freelist = NULL; +} + +/* allocated separate memory in case the lock memory is freed without + * running the deinitialization code. The cookie list will not be + * corrupted. + */ +void qdf_lock_stats_cookie_create(struct lock_stats *stats, + const char *func, int line) +{ + struct qdf_lock_cookie *cookie = qdf_get_lock_cookie(); + + if (!cookie) { + int count; + + qdf_atomic_inc(&lock_cookie_get_failures); + count = qdf_atomic_inc_return(&lock_cookie_untracked_num); + stats->cookie = (void *) DUMMY_LOCK_COOKIE; + return; + } + + stats->cookie = cookie; + stats->cookie->u.cookie.stats = stats; + stats->cookie->u.cookie.func = func; + stats->cookie->u.cookie.line = line; +} + +void qdf_lock_stats_cookie_destroy(struct lock_stats *stats) +{ + struct qdf_lock_cookie *cookie = stats->cookie; + + if (!cookie) { + QDF_DEBUG_PANIC("Lock destroyed twice or never created"); + return; + } + + stats->cookie = NULL; + if (cookie == (void *)DUMMY_LOCK_COOKIE) { + qdf_atomic_dec(&lock_cookie_untracked_num); + return; + } + + cookie->u.cookie.stats = NULL; + cookie->u.cookie.func = NULL; + cookie->u.cookie.line = 0; + + qdf_put_lock_cookie(cookie); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c new file mode 100644 index 0000000000000000000000000000000000000000..0d83db8d39565879d8c7e65fd735d5ff79168e05 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c @@ -0,0 +1,494 @@ +/* + * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_lro.c + * QCA driver framework(QDF) Large Receive Offload + */ + +#include +#include +#include + +#include +#include + +/** + * qdf_lro_desc_pool_init() - Initialize the free pool of LRO + * descriptors + * @lro_desc_pool: free pool of the LRO descriptors + * @lro_mgr: LRO manager + * + * Initialize a list that holds the free LRO descriptors + * + * Return: none + */ +static void qdf_lro_desc_pool_init(struct qdf_lro_desc_pool *lro_desc_pool, + struct net_lro_mgr *lro_mgr) +{ + int i; + + INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head); + + for (i = 0; i < QDF_LRO_DESC_POOL_SZ; i++) { + lro_desc_pool->lro_desc_array[i].lro_desc = + &lro_mgr->lro_arr[i]; + list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node, + &lro_desc_pool->lro_free_list_head); + } +} + +/** + * qdf_lro_desc_info_init() - Initialize the LRO descriptors + * @qdf_info: QDF LRO data structure + * + * Initialize the free pool of LRO descriptors and the entries + * of the hash table + * + * Return: none + */ +static void qdf_lro_desc_info_init(struct qdf_lro_s *qdf_info) +{ + int i; + + /* Initialize pool of free LRO desc.*/ + qdf_lro_desc_pool_init(&qdf_info->lro_desc_info.lro_desc_pool, + qdf_info->lro_mgr); + + /* Initialize the hash table of LRO desc.*/ + for (i = 0; i < QDF_LRO_DESC_TABLE_SZ; i++) { + /* initialize the flows in the hash table */ + INIT_LIST_HEAD(&qdf_info->lro_desc_info. + lro_hash_table[i].lro_desc_list); + } + +} + +/** + * qdf_lro_get_skb_header() - LRO callback function + * @skb: network buffer + * @ip_hdr: contains a pointer to the IP header + * @tcpudp_hdr: contains a pointer to the TCP header + * @hdr_flags: indicates if this is a TCP, IPV4 frame + * @priv: private driver specific opaque pointer + * + * Get the IP and TCP headers from the skb + * + * Return: 0 - success, < 0 - failure + */ +static int qdf_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr, + void **tcpudp_hdr, u64 *hdr_flags, void *priv) +{ + if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) { + hdr_flags = 0; + return -EINVAL; + } + + *hdr_flags |= (LRO_IPV4 | LRO_TCP); + (*ip_hdr) = skb->data; + (*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb); + return 0; +} + +/** + * qdf_lro_init() - LRO initialization function + * + * Return: LRO context + */ +qdf_lro_ctx_t qdf_lro_init(void) +{ + struct qdf_lro_s *lro_ctx; + size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz; + size_t hash_table_sz; + uint8_t *lro_mem_ptr; + + /* + * Allocate all the LRO data structures at once and then carve + * them up as needed + */ + lro_info_sz = sizeof(struct qdf_lro_s); + lro_mgr_sz = sizeof(struct net_lro_mgr); + desc_arr_sz = + (QDF_LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc)); + desc_pool_sz = + (QDF_LRO_DESC_POOL_SZ * sizeof(struct qdf_lro_desc_entry)); + hash_table_sz = + (sizeof(struct qdf_lro_desc_table) * QDF_LRO_DESC_TABLE_SZ); + + lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz + + desc_pool_sz + hash_table_sz); + + if (unlikely(!lro_mem_ptr)) + return NULL; + + lro_ctx = (struct qdf_lro_s *)lro_mem_ptr; + lro_mem_ptr += lro_info_sz; + /* LRO manager */ + lro_ctx->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr; + lro_mem_ptr += lro_mgr_sz; + + /* LRO decriptor array */ + lro_ctx->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr; + lro_mem_ptr += desc_arr_sz; + + /* LRO descriptor pool */ + lro_ctx->lro_desc_info.lro_desc_pool.lro_desc_array = + (struct qdf_lro_desc_entry *)lro_mem_ptr; + lro_mem_ptr += desc_pool_sz; + + /* hash table to store the LRO descriptors */ + lro_ctx->lro_desc_info.lro_hash_table = + (struct qdf_lro_desc_table *)lro_mem_ptr; + + /* Initialize the LRO descriptors */ + qdf_lro_desc_info_init(lro_ctx); + + /* LRO TODO - NAPI or RX thread */ + lro_ctx->lro_mgr->features |= LRO_F_NAPI; + + lro_ctx->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_ctx->lro_mgr->max_aggr = QDF_LRO_MAX_AGGR_SIZE; + lro_ctx->lro_mgr->get_skb_header = qdf_lro_get_skb_header; + lro_ctx->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; + lro_ctx->lro_mgr->max_desc = QDF_LRO_DESC_POOL_SZ; + + return lro_ctx; +} + +/** + * qdf_lro_deinit() - LRO deinitialization function + * @lro_ctx: LRO context + * + * Return: nothing + */ +void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx) +{ + if (likely(lro_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "LRO instance %pK is being freed", lro_ctx); + qdf_mem_free(lro_ctx); + } +} + +/** + * qdf_lro_tcp_flow_match() - function to check for a flow match + * @iph: IP header + * @tcph: TCP header + * @lro_desc: LRO decriptor + * + * Checks if the descriptor belongs to the same flow as the one + * indicated by the TCP and IP header. + * + * Return: true - flow match, false - flow does not match + */ +static inline bool qdf_lro_tcp_flow_match(struct net_lro_desc *lro_desc, + struct iphdr *iph, + struct tcphdr *tcph) +{ + if ((lro_desc->tcph->source != tcph->source) || + (lro_desc->tcph->dest != tcph->dest) || + (lro_desc->iph->saddr != iph->saddr) || + (lro_desc->iph->daddr != iph->daddr)) + return false; + + return true; + +} + +/** + * qdf_lro_desc_find() - LRO descriptor look-up function + * + * @lro_ctx: LRO context + * @skb: network buffer + * @iph: IP header + * @tcph: TCP header + * @flow_hash: toeplitz hash + * @lro_desc: LRO descriptor to be returned + * + * Look-up the LRO descriptor in the hash table based on the + * flow ID toeplitz. If the flow is not found, allocates a new + * LRO descriptor and places it in the hash table + * + * Return: 0 - success, < 0 - failure + */ +static int qdf_lro_desc_find(struct qdf_lro_s *lro_ctx, + struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph, + uint32_t flow_hash, struct net_lro_desc **lro_desc) +{ + uint32_t i; + struct qdf_lro_desc_table *lro_hash_table; + struct list_head *ptr; + struct qdf_lro_desc_entry *entry; + struct qdf_lro_desc_pool *free_pool; + struct qdf_lro_desc_info *desc_info = &lro_ctx->lro_desc_info; + + *lro_desc = NULL; + i = flow_hash & QDF_LRO_DESC_TABLE_SZ_MASK; + + lro_hash_table = &desc_info->lro_hash_table[i]; + + if (unlikely(!lro_hash_table)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Invalid hash entry"); + QDF_ASSERT(0); + return -EINVAL; + } + + /* Check if this flow exists in the descriptor list */ + list_for_each(ptr, &lro_hash_table->lro_desc_list) { + struct net_lro_desc *tmp_lro_desc = NULL; + + entry = list_entry(ptr, struct qdf_lro_desc_entry, lro_node); + tmp_lro_desc = entry->lro_desc; + if (qdf_lro_tcp_flow_match(entry->lro_desc, iph, tcph)) { + *lro_desc = entry->lro_desc; + return 0; + } + } + + /* no existing flow found, a new LRO desc needs to be allocated */ + free_pool = &lro_ctx->lro_desc_info.lro_desc_pool; + entry = list_first_entry_or_null( + &free_pool->lro_free_list_head, + struct qdf_lro_desc_entry, lro_node); + if (unlikely(!entry)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Could not allocate LRO desc!"); + return -ENOMEM; + } + + list_del_init(&entry->lro_node); + + if (unlikely(!entry->lro_desc)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "entry->lro_desc is NULL!"); + return -EINVAL; + } + + memset(entry->lro_desc, 0, sizeof(struct net_lro_desc)); + + /* + * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval + * should be 0 for newly allocated lro descriptors + */ + list_add_tail(&entry->lro_node, + &lro_hash_table->lro_desc_list); + + *lro_desc = entry->lro_desc; + return 0; +} + +/** + * qdf_lro_get_info() - Update the LRO information + * + * @lro_ctx: LRO context + * @nbuf: network buffer + * @info: LRO related information passed in by the caller + * @plro_desc: lro information returned as output + * + * Look-up the LRO descriptor based on the LRO information and + * the network buffer provided. Update the skb cb with the + * descriptor found + * + * Return: true: LRO eligible false: LRO ineligible + */ +bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf, + struct qdf_lro_info *info, + void **plro_desc) +{ + struct net_lro_desc *lro_desc; + struct iphdr *iph; + struct tcphdr *tcph; + int hw_lro_eligible = + QDF_NBUF_CB_RX_LRO_ELIGIBLE(nbuf) && + (!QDF_NBUF_CB_RX_TCP_PURE_ACK(nbuf)); + + if (unlikely(!lro_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Invalid LRO context"); + return false; + } + + if (!hw_lro_eligible) + return false; + + iph = (struct iphdr *)info->iph; + tcph = (struct tcphdr *)info->tcph; + if (0 != qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph, + QDF_NBUF_CB_RX_FLOW_ID(nbuf), + (struct net_lro_desc **)plro_desc)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "finding the LRO desc failed"); + return false; + } + + lro_desc = (struct net_lro_desc *)(*plro_desc); + if (unlikely(!lro_desc)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "finding the LRO desc failed"); + return false; + } + + /* if this is not the first skb, check the timestamp option */ + if (lro_desc->tcp_rcv_tsval) { + if (tcph->doff == 8) { + __be32 *topt = (__be32 *)(tcph + 1); + + if (*topt != htonl((TCPOPT_NOP << 24) + |(TCPOPT_NOP << 16) + | (TCPOPT_TIMESTAMP << 8) + | TCPOLEN_TIMESTAMP)) + return true; + + /* timestamp should be in right order */ + topt++; + if (after(ntohl(lro_desc->tcp_rcv_tsval), + ntohl(*topt))) + return false; + + /* timestamp reply should not be zero */ + topt++; + if (*topt == 0) + return false; + } + } + + return true; +} + +/** + * qdf_lro_desc_free() - Free the LRO descriptor + * @desc: LRO descriptor + * @lro_ctx: LRO context + * + * Return the LRO descriptor to the free pool + * + * Return: none + */ +void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx, + void *data) +{ + struct qdf_lro_desc_entry *entry; + struct net_lro_mgr *lro_mgr; + struct net_lro_desc *arr_base; + struct qdf_lro_desc_info *desc_info; + int i; + struct net_lro_desc *desc = (struct net_lro_desc *)data; + + qdf_assert(desc); + qdf_assert(lro_ctx); + + if (unlikely(!desc || !lro_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "invalid input"); + return; + } + + lro_mgr = lro_ctx->lro_mgr; + arr_base = lro_mgr->lro_arr; + i = desc - arr_base; + + if (unlikely(i >= QDF_LRO_DESC_POOL_SZ)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "invalid index %d", i); + return; + } + + desc_info = &lro_ctx->lro_desc_info; + entry = &desc_info->lro_desc_pool.lro_desc_array[i]; + + list_del_init(&entry->lro_node); + + list_add_tail(&entry->lro_node, &desc_info-> + lro_desc_pool.lro_free_list_head); +} + +/** + * qdf_lro_flush() - LRO flush API + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for all + * the flows + * + * Return: none + */ +void qdf_lro_flush(qdf_lro_ctx_t lro_ctx) +{ + struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr; + int i; + + for (i = 0; i < lro_mgr->max_desc; i++) { + if (lro_mgr->lro_arr[i].active) { + qdf_lro_desc_free(lro_ctx, &lro_mgr->lro_arr[i]); + lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]); + } + } +} +/** + * qdf_lro_get_desc() - LRO descriptor look-up function + * @iph: IP header + * @tcph: TCP header + * @lro_arr: Array of LRO decriptors + * @lro_mgr: LRO manager + * + * Looks-up the LRO descriptor for a given flow + * + * Return: LRO descriptor + */ +static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr, + struct net_lro_desc *lro_arr, + struct iphdr *iph, + struct tcphdr *tcph) +{ + int i; + + for (i = 0; i < lro_mgr->max_desc; i++) { + if (lro_arr[i].active) + if (qdf_lro_tcp_flow_match(&lro_arr[i], iph, tcph)) + return &lro_arr[i]; + } + + return NULL; +} + +/** + * qdf_lro_flush_pkt() - function to flush the LRO flow + * @info: LRO related information passed by the caller + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for the + * flow indicated by the TCP and IP header + * + * Return: none + */ +void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx, + struct qdf_lro_info *info) +{ + struct net_lro_desc *lro_desc; + struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr; + struct iphdr *iph = (struct iphdr *) info->iph; + struct tcphdr *tcph = (struct tcphdr *) info->tcph; + + lro_desc = qdf_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); + + if (lro_desc) { + /* statistics */ + qdf_lro_desc_free(lro_ctx, lro_desc); + lro_flush_desc(lro_mgr, lro_desc); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c new file mode 100644 index 0000000000000000000000000000000000000000..b2591b32e957ddd9a3ec1e81552b8ac0d199a7d9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c @@ -0,0 +1,939 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mc_timer + * QCA driver framework timer APIs serialized to MC thread + */ + +/* Include Files */ +#include +#include +#include +#include "qdf_lock.h" +#include "qdf_list.h" +#include "qdf_mem.h" +#include +#include "qdf_timer.h" +#include + +/* Preprocessor definitions and constants */ +#define LINUX_TIMER_COOKIE 0x12341234 +#define LINUX_INVALID_TIMER_COOKIE 0xfeedface +#define TMR_INVALID_ID (0) + +static uint32_t g_qdf_timer_multiplier = 1; + +inline void qdf_timer_set_multiplier(uint32_t multiplier) +{ + g_qdf_timer_multiplier = multiplier; +} +qdf_export_symbol(qdf_timer_set_multiplier); + +inline uint32_t qdf_timer_get_multiplier(void) +{ + return g_qdf_timer_multiplier; +} +qdf_export_symbol(qdf_timer_get_multiplier); + +/* Type declarations */ + +/* Static Variable Definitions */ +static unsigned int persistent_timer_count; +static qdf_mutex_t persistent_timer_count_lock; + +static void (*scheduler_timer_callback)(qdf_mc_timer_t *); +void qdf_register_mc_timer_callback(void (*callback) (qdf_mc_timer_t *)) +{ + scheduler_timer_callback = callback; +} + +qdf_export_symbol(qdf_register_mc_timer_callback); + +/* Function declarations and documenation */ + +/** + * qdf_try_allowing_sleep() - clean up timer states after it has been deactivated + * @type: timer type + * + * Clean up timer states after it has been deactivated check and try to allow + * sleep after a timer has been stopped or expired. + * + * Return: none + */ +void qdf_try_allowing_sleep(QDF_TIMER_TYPE type) +{ + if (QDF_TIMER_TYPE_WAKE_APPS == type) { + + persistent_timer_count--; + if (0 == persistent_timer_count) { + /* since the number of persistent timers has + * decreased from 1 to 0, the timer should allow + * sleep + */ + } + } +} +qdf_export_symbol(qdf_try_allowing_sleep); + +/** + * qdf_mc_timer_get_current_state() - get the current state of the timer + * @timer: Pointer to timer object + * + * Return: + * QDF_TIMER_STATE - qdf timer state + */ +QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer) +{ + QDF_TIMER_STATE timer_state = QDF_TIMER_STATE_UNUSED; + + if (!timer) { + QDF_ASSERT(0); + return timer_state; + } + + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + case QDF_TIMER_STATE_STOPPED: + case QDF_TIMER_STATE_STARTING: + case QDF_TIMER_STATE_RUNNING: + case QDF_TIMER_STATE_UNUSED: + timer_state = timer->state; + break; + default: + QDF_ASSERT(0); + } + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + return timer_state; +} +qdf_export_symbol(qdf_mc_timer_get_current_state); + +/** + * qdf_timer_module_init() - initializes a QDF timer module. + * + * This API initializes the QDF timer module. This needs to be called + * exactly once prior to using any QDF timers. + * + * Return: none + */ +void qdf_timer_module_init(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "Initializing the QDF MC timer module"); + qdf_mutex_create(&persistent_timer_count_lock); +} +qdf_export_symbol(qdf_timer_module_init); + +#ifdef TIMER_MANAGER + +static qdf_list_t qdf_timer_domains[QDF_DEBUG_DOMAIN_COUNT]; +static qdf_spinlock_t qdf_timer_list_lock; + +static inline qdf_list_t *qdf_timer_list_get(enum qdf_debug_domain domain) +{ + return &qdf_timer_domains[domain]; +} + +/** + * qdf_mc_timer_manager_init() - initialize QDF debug timer manager + * + * This API initializes QDF timer debug functionality. + * + * Return: none + */ +void qdf_mc_timer_manager_init(void) +{ + int i; + + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_create(&qdf_timer_domains[i], 1000); + qdf_spinlock_create(&qdf_timer_list_lock); +} +qdf_export_symbol(qdf_mc_timer_manager_init); + +static void qdf_mc_timer_print_list(qdf_list_t *timers) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + status = qdf_list_peek_front(timers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + qdf_mc_timer_node_t *timer_node = (qdf_mc_timer_node_t *)node; + const char *filename = kbasename(timer_node->file_name); + uint32_t line = timer_node->line_num; + + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); + qdf_err("timer Leak@ File %s, @Line %u", filename, line); + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + + status = qdf_list_peek_next(timers, node, &node); + } + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); +} + +void qdf_mc_timer_check_for_leaks(void) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *timers = qdf_timer_list_get(current_domain); + + if (qdf_list_empty(timers)) + return; + + qdf_err("Timer leaks detected in %s domain!", + qdf_debug_domain_name(current_domain)); + qdf_mc_timer_print_list(timers); + QDF_DEBUG_PANIC("Previously reported timer leaks detected"); +} + +static void qdf_mc_timer_free_leaked_timers(qdf_list_t *timers) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + status = qdf_list_remove_front(timers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + qdf_mem_free(node); + status = qdf_list_remove_front(timers, &node); + } + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); +} + +/** + * qdf_timer_clean() - clean up QDF timer debug functionality + * + * This API cleans up QDF timer debug functionality and prints which QDF timers + * are leaked. This is called during driver unload. + * + * Return: none + */ +static void qdf_timer_clean(void) +{ + bool leaks_detected = false; + int i; + + /* detect and print leaks */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) { + qdf_list_t *timers = &qdf_timer_domains[i]; + + if (qdf_list_empty(timers)) + continue; + + leaks_detected = true; + + qdf_err("\nTimer leaks detected in the %s (Id %d) domain!", + qdf_debug_domain_name(i), i); + qdf_mc_timer_print_list(timers); + } + + /* we're done if there were no leaks */ + if (!leaks_detected) + return; + + /* panic, if enabled */ + QDF_DEBUG_PANIC("Previously reported timer leaks detected"); + + /* if we didn't crash, release the leaked timers */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_mc_timer_free_leaked_timers(&qdf_timer_domains[i]); +} +qdf_export_symbol(qdf_timer_clean); + +/** + * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality + * + * This API exists QDF timer debug functionality + * + * Return: none + */ +void qdf_mc_timer_manager_exit(void) +{ + int i; + + qdf_timer_clean(); + + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_destroy(&qdf_timer_domains[i]); + + qdf_spinlock_destroy(&qdf_timer_list_lock); +} +qdf_export_symbol(qdf_mc_timer_manager_exit); +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +static void __os_mc_timer_shim(struct timer_list *os_timer) +{ + qdf_mc_timer_platform_t *platform_info_ptr = + qdf_container_of(os_timer, + qdf_mc_timer_platform_t, + timer); + qdf_mc_timer_t *timer = qdf_container_of(platform_info_ptr, + qdf_mc_timer_t, + platform_info); + + scheduler_timer_callback(timer); +} + +static void qdf_mc_timer_setup(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type) +{ + uint32_t flags = 0; + + if (QDF_TIMER_TYPE_SW == timer_type) + flags |= TIMER_DEFERRABLE; + + timer_setup(&timer->platform_info.timer, + __os_mc_timer_shim, flags); +} +#else +static void __os_mc_timer_shim(unsigned long data) +{ + qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data; + + scheduler_timer_callback(timer); +} + +static void qdf_mc_timer_setup(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type) +{ + if (QDF_TIMER_TYPE_SW == timer_type) + init_timer_deferrable(&timer->platform_info.timer); + else + init_timer(&timer->platform_info.timer); + + timer->platform_info.timer.function = __os_mc_timer_shim; + timer->platform_info.timer.data = (unsigned long)timer; +} +#endif +/** + * qdf_mc_timer_init() - initialize a QDF timer + * @timer: Pointer to timer object + * @timer_type: Type of timer + * @callback: Callback to be called after timer expiry + * @ser_data: User data which will be passed to callback function + * + * This API initializes a QDF timer object. + * + * qdf_mc_timer_init() initializes a QDF timer object. A timer must be + * initialized by calling qdf_mc_timer_initialize() before it may be used in + * any other timer functions. + * + * Attempting to initialize timer that is already initialized results in + * a failure. A destroyed timer object can be re-initialized with a call to + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer such + * as qdf_mc_timer_set() will fail if the timer is not initialized or has + * been destroyed. Therefore, don't use the timer after it has been + * destroyed until it has been re-initialized. + * + * All callback will be executed within the CDS main thread unless it is + * initialized from the Tx thread flow, in which case it will be executed + * within the tx thread flow. + * + * Return: + * QDF_STATUS_SUCCESS: timer is initialized successfully + * QDF failure status: timer initialization failed + */ +#ifdef TIMER_MANAGER +QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data, char *file_name, + uint32_t line_num) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *active_timers = qdf_timer_list_get(current_domain); + QDF_STATUS qdf_status; + + /* check for invalid pointer */ + if ((!timer) || (!callback)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null params being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + timer->timer_node = qdf_mem_malloc(sizeof(qdf_mc_timer_node_t)); + + if (!timer->timer_node) { + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + + timer->timer_node->file_name = file_name; + timer->timer_node->line_num = line_num; + timer->timer_node->qdf_timer = timer; + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + qdf_status = qdf_list_insert_front(active_timers, + &timer->timer_node->node); + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); + if (QDF_STATUS_SUCCESS != qdf_status) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Unable to insert node into List qdf_status %d", + __func__, qdf_status); + } + + /* set the various members of the timer structure + * with arguments passed or with default values + */ + qdf_spinlock_create(&timer->platform_info.spinlock); + qdf_mc_timer_setup(timer, timer_type); + timer->callback = callback; + timer->user_data = user_data; + timer->type = timer_type; + timer->platform_info.cookie = LINUX_TIMER_COOKIE; + timer->platform_info.thread_id = 0; + timer->state = QDF_TIMER_STATE_STOPPED; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_init_debug); +#else +QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data) +{ + /* check for invalid pointer */ + if ((!timer) || (!callback)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null params being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* set the various members of the timer structure + * with arguments passed or with default values + */ + qdf_spinlock_create(&timer->platform_info.spinlock); + qdf_mc_timer_setup(timer, timer_type); + timer->callback = callback; + timer->user_data = user_data; + timer->type = timer_type; + timer->platform_info.cookie = LINUX_TIMER_COOKIE; + timer->platform_info.thread_id = 0; + timer->state = QDF_TIMER_STATE_STOPPED; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_init); +#endif + +/** + * qdf_mc_timer_destroy() - destroy QDF timer + * @timer: Pointer to timer object + * + * qdf_mc_timer_destroy() function shall destroy the timer object. + * After a successful return from \a qdf_mc_timer_destroy() the timer + * object becomes, in effect, uninitialized. + * + * A destroyed timer object can be re-initialized by calling + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer, such + * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore, + * don't use the timer after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS - timer is initialized successfully + * QDF failure status - timer initialization failed + */ +#ifdef TIMER_MANAGER +QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *active_timers = qdf_timer_list_get(current_domain); + QDF_STATUS v_status = QDF_STATUS_SUCCESS; + + /* check for invalid pointer */ + if (!timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* Check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot destroy uninitialized timer", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + v_status = qdf_list_remove_node(active_timers, + &timer->timer_node->node); + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); + if (v_status != QDF_STATUS_SUCCESS) { + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + qdf_mem_free(timer->timer_node); + + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + + case QDF_TIMER_STATE_STARTING: + v_status = QDF_STATUS_E_BUSY; + break; + + case QDF_TIMER_STATE_RUNNING: + /* Stop the timer first */ + del_timer(&(timer->platform_info.timer)); + v_status = QDF_STATUS_SUCCESS; + break; + case QDF_TIMER_STATE_STOPPED: + v_status = QDF_STATUS_SUCCESS; + break; + + case QDF_TIMER_STATE_UNUSED: + v_status = QDF_STATUS_E_ALREADY; + break; + + default: + v_status = QDF_STATUS_E_FAULT; + break; + } + + if (QDF_STATUS_SUCCESS == v_status) { + timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE; + timer->state = QDF_TIMER_STATE_UNUSED; + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + qdf_spinlock_destroy(&timer->platform_info.spinlock); + return v_status; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: Cannot destroy timer in state = %d", __func__, + timer->state); + QDF_ASSERT(0); + + return v_status; +} +qdf_export_symbol(qdf_mc_timer_destroy); + +#else + +/** + * qdf_mc_timer_destroy() - destroy QDF timer + * @timer: Pointer to timer object + * + * qdf_mc_timer_destroy() function shall destroy the timer object. + * After a successful return from \a qdf_mc_timer_destroy() the timer + * object becomes, in effect, uninitialized. + * + * A destroyed timer object can be re-initialized by calling + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer, such + * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore, + * don't use the timer after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS - timer is initialized successfully + * QDF failure status - timer initialization failed + */ +QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer) +{ + QDF_STATUS v_status = QDF_STATUS_SUCCESS; + + /* check for invalid pointer */ + if (!timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot destroy uninitialized timer", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + + case QDF_TIMER_STATE_STARTING: + v_status = QDF_STATUS_E_BUSY; + break; + + case QDF_TIMER_STATE_RUNNING: + /* Stop the timer first */ + del_timer(&(timer->platform_info.timer)); + v_status = QDF_STATUS_SUCCESS; + break; + + case QDF_TIMER_STATE_STOPPED: + v_status = QDF_STATUS_SUCCESS; + break; + + case QDF_TIMER_STATE_UNUSED: + v_status = QDF_STATUS_E_ALREADY; + break; + + default: + v_status = QDF_STATUS_E_FAULT; + break; + } + + if (QDF_STATUS_SUCCESS == v_status) { + timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE; + timer->state = QDF_TIMER_STATE_UNUSED; + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + return v_status; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: Cannot destroy timer in state = %d", __func__, + timer->state); + QDF_ASSERT(0); + + return v_status; +} +qdf_export_symbol(qdf_mc_timer_destroy); +#endif + +/** + * qdf_mc_timer_start() - start a QDF timer object + * @timer: Pointer to timer object + * @expiration_time: Time to expire + * + * qdf_mc_timer_start() function starts a timer to expire after the + * specified interval, thus running the timer callback function when + * the interval expires. + * + * A timer only runs once (a one-shot timer). To re-start the + * timer, qdf_mc_timer_start() has to be called after the timer runs + * or has been cancelled. + * + * Return: + * QDF_STATUS_SUCCESS: timer is initialized successfully + * QDF failure status: timer initialization failed + */ +QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time) +{ + /* check for invalid pointer */ + if (!timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot start uninitialized timer", __func__); + QDF_ASSERT(0); + + return QDF_STATUS_E_INVAL; + } + + /* check if timer has expiration time less than 10 ms */ + if (expiration_time < 10) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot start a timer with expiration less than 10 ms", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* make sure the remainer of the logic isn't interrupted */ + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + /* ensure if the timer can be started */ + if (QDF_TIMER_STATE_STOPPED != timer->state) { + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot start timer in state = %d %ps", + __func__, timer->state, (void *)timer->callback); + return QDF_STATUS_E_ALREADY; + } + + /* start the timer */ + mod_timer(&(timer->platform_info.timer), + jiffies + __qdf_scaled_msecs_to_jiffies(expiration_time)); + + timer->state = QDF_TIMER_STATE_RUNNING; + + /* get the thread ID on which the timer is being started */ + timer->platform_info.thread_id = current->pid; + + if (QDF_TIMER_TYPE_WAKE_APPS == timer->type) { + persistent_timer_count++; + if (1 == persistent_timer_count) { + /* since we now have one persistent timer, + * we need to disallow sleep + * sleep_negate_okts(sleep_client_handle); + */ + } + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_start); + +/** + * qdf_mc_timer_stop() - stop a QDF timer + * @timer: Pointer to timer object + * qdf_mc_timer_stop() function stops a timer that has been started but + * has not expired, essentially cancelling the 'start' request. + * + * After a timer is stopped, it goes back to the state it was in after it + * was created and can be started again via a call to qdf_mc_timer_start(). + * + * Return: + * QDF_STATUS_SUCCESS: timer is initialized successfully + * QDF failure status: timer initialization failed + */ +QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer) +{ + /* check for invalid pointer */ + if (!timer) { + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_QDF, + "%s Null timer pointer", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_QDF, + "%s: Cannot stop uninit timer", __func__); + QDF_ASSERT(0); + + return QDF_STATUS_E_INVAL; + } + + /* ensure the timer state is correct */ + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + if (QDF_TIMER_STATE_RUNNING != timer->state) { + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + return QDF_STATUS_SUCCESS; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + del_timer(&(timer->platform_info.timer)); + + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + timer->state = QDF_TIMER_STATE_STOPPED; + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + qdf_try_allowing_sleep(timer->type); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_stop); + +QDF_STATUS qdf_mc_timer_stop_sync(qdf_mc_timer_t *timer) +{ + /* check for invalid pointer */ + if (!timer) { + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_QDF, + "%s Null timer pointer", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_QDF, + "%s: Cannot stop uninit timer", __func__); + QDF_ASSERT(0); + + return QDF_STATUS_E_INVAL; + } + + /* ensure the timer state is correct */ + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + if (QDF_TIMER_STATE_RUNNING != timer->state) { + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + return QDF_STATUS_SUCCESS; + } + + timer->state = QDF_TIMER_STATE_STOPPED; + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + del_timer_sync(&(timer->platform_info.timer)); + + qdf_try_allowing_sleep(timer->type); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_stop_sync); +/** + * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks + + * qdf_mc_timer_get_system_ticks() function returns the current number + * of timer ticks in 10msec intervals. This function is suitable timestamping + * and calculating time intervals by calculating the difference between two + * timestamps. + * + * Return: + * The current system tick count (in 10msec intervals). This + * function cannot fail. + */ +unsigned long qdf_mc_timer_get_system_ticks(void) +{ + return jiffies_to_msecs(jiffies) / 10; +} +qdf_export_symbol(qdf_mc_timer_get_system_ticks); + +/** + * qdf_mc_timer_get_system_time() - Get the system time in milliseconds + * + * qdf_mc_timer_get_system_time() function returns the number of milliseconds + * that have elapsed since the system was started + * + * Return: + * The current system time in milliseconds + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) +unsigned long qdf_mc_timer_get_system_time(void) +{ + struct timespec64 tv; + + ktime_get_real_ts64(&tv); + return tv.tv_sec * 1000 + tv.tv_nsec / 1000000; +} +qdf_export_symbol(qdf_mc_timer_get_system_time); + +#else +unsigned long qdf_mc_timer_get_system_time(void) +{ + struct timeval tv; + + do_gettimeofday(&tv); + return tv.tv_sec * 1000 + tv.tv_usec / 1000; +} +qdf_export_symbol(qdf_mc_timer_get_system_time); +#endif + +s64 qdf_get_monotonic_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +qdf_export_symbol(qdf_get_monotonic_boottime_ns); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) +qdf_time_t qdf_get_time_of_the_day_ms(void) +{ + struct timespec64 tv; + qdf_time_t local_time; + struct rtc_time tm; + + ktime_get_real_ts64(&tv); + local_time = (qdf_time_t)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + + return (tm.tm_hour * 60 * 60 * 1000) + + (tm.tm_min * 60 * 1000) + (tm.tm_sec * 1000) + + (tv.tv_nsec / 1000000); +} +qdf_export_symbol(qdf_get_time_of_the_day_ms); + +#else +qdf_time_t qdf_get_time_of_the_day_ms(void) +{ + struct timeval tv; + qdf_time_t local_time; + struct rtc_time tm; + + do_gettimeofday(&tv); + local_time = (qdf_time_t)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + + return (tm.tm_hour * 60 * 60 * 1000) + + (tm.tm_min * 60 * 1000) + (tm.tm_sec * 1000) + + (tv.tv_usec / 1000); +} +qdf_export_symbol(qdf_get_time_of_the_day_ms); +#endif + +/** + * qdf_timer_module_deinit() - Deinitializes a QDF timer module. + * + * This API deinitializes the QDF timer module. + * Return: none + */ +void qdf_timer_module_deinit(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "De-Initializing the QDF MC timer module"); + qdf_mutex_destroy(&persistent_timer_count_lock); +} +qdf_export_symbol(qdf_timer_module_deinit); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) +void qdf_get_time_of_the_day_in_hr_min_sec_usec(char *tbuf, int len) +{ + struct timespec64 tv; + struct rtc_time tm; + unsigned long local_time; + + /* Format the Log time R#: [hr:min:sec.microsec] */ + ktime_get_real_ts64(&tv); + /* Convert rtc to local time */ + local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + scnprintf(tbuf, len, + "[%02d:%02d:%02d.%06lu]", + tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_nsec / 1000); +} +qdf_export_symbol(qdf_get_time_of_the_day_in_hr_min_sec_usec); + +#else +void qdf_get_time_of_the_day_in_hr_min_sec_usec(char *tbuf, int len) +{ + struct timeval tv; + struct rtc_time tm; + unsigned long local_time; + + /* Format the Log time R#: [hr:min:sec.microsec] */ + do_gettimeofday(&tv); + /* Convert rtc to local time */ + local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + scnprintf(tbuf, len, + "[%02d:%02d:%02d.%06lu]", + tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_usec); +} +qdf_export_symbol(qdf_get_time_of_the_day_in_hr_min_sec_usec); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..21576d9878412a3acd3b8b511cd965a60d740cd5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c @@ -0,0 +1,2319 @@ +/* + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mem + * This file provides OS dependent memory management APIs + */ + +#include "qdf_debugfs.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "qdf_lock.h" +#include "qdf_mc_timer.h" +#include "qdf_module.h" +#include +#include "qdf_atomic.h" +#include "qdf_str.h" +#include "qdf_talloc.h" +#include +#include +#include + +#if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC) +#include +#endif + +#if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG) +static bool mem_debug_disabled; +qdf_declare_param(mem_debug_disabled, bool); +qdf_export_symbol(mem_debug_disabled); +static bool is_initial_mem_debug_disabled; +#endif + +/* Preprocessor Definitions and Constants */ +#define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */ +#define QDF_MEM_WARN_THRESHOLD 300 /* ms */ +#define QDF_DEBUG_STRING_SIZE 512 + +/** + * struct __qdf_mem_stat - qdf memory statistics + * @kmalloc: total kmalloc allocations + * @dma: total dma allocations + * @skb: total skb allocations + */ +static struct __qdf_mem_stat { + qdf_atomic_t kmalloc; + qdf_atomic_t dma; + qdf_atomic_t skb; +} qdf_mem_stat; + +#ifdef MEMORY_DEBUG +#include "qdf_debug_domain.h" +#include + +enum list_type { + LIST_TYPE_MEM = 0, + LIST_TYPE_DMA = 1, + LIST_TYPE_MAX, +}; + +/** + * major_alloc_priv: private data registered to debugfs entry created to list + * the list major allocations + * @type: type of the list to be parsed + * @threshold: configured by user by overwriting the respective debugfs + * sys entry. This is to list the functions which requested + * memory/dma allocations more than threshold nubmer of times. + */ +struct major_alloc_priv { + enum list_type type; + uint32_t threshold; +}; + +static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT]; +static qdf_spinlock_t qdf_mem_list_lock; + +static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT]; +static qdf_spinlock_t qdf_mem_dma_list_lock; + +static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain) +{ + return &qdf_mem_domains[domain]; +} + +static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain) +{ + return &qdf_mem_dma_domains[domain]; +} + +/** + * struct qdf_mem_header - memory object to dubug + * @node: node to the list + * @domain: the active memory domain at time of allocation + * @freed: flag set during free, used to detect double frees + * Use uint8_t so we can detect corruption + * @func: name of the function the allocation was made from + * @line: line number of the file the allocation was made from + * @size: size of the allocation in bytes + * @caller: Caller of the function for which memory is allocated + * @header: a known value, used to detect out-of-bounds access + * @time: timestamp at which allocation was made + */ +struct qdf_mem_header { + qdf_list_node_t node; + enum qdf_debug_domain domain; + uint8_t freed; + char func[QDF_MEM_FUNC_NAME_SIZE]; + uint32_t line; + uint32_t size; + void *caller; + uint64_t header; + uint64_t time; +}; + +static uint64_t WLAN_MEM_HEADER = 0x6162636465666768; +static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687; + +static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr) +{ + return (struct qdf_mem_header *)ptr - 1; +} + +static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr, + qdf_size_t size) +{ + return (struct qdf_mem_header *) ((uint8_t *) ptr + size); +} + +static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header) +{ + return (uint64_t *)((void *)(header + 1) + header->size); +} + +static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header) +{ + return (void *)(header + 1); +} + +/* number of bytes needed for the qdf memory debug information */ +#define QDF_MEM_DEBUG_SIZE \ + (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER)) + +/* number of bytes needed for the qdf dma memory debug information */ +#define QDF_DMA_MEM_DEBUG_SIZE \ + (sizeof(struct qdf_mem_header)) + +static void qdf_mem_trailer_init(struct qdf_mem_header *header) +{ + QDF_BUG(header); + if (!header) + return; + *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER; +} + +static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size, + const char *func, uint32_t line, void *caller) +{ + QDF_BUG(header); + if (!header) + return; + + header->domain = qdf_debug_domain_get(); + header->freed = false; + + qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE); + + header->line = line; + header->size = size; + header->caller = caller; + header->header = WLAN_MEM_HEADER; + header->time = qdf_get_log_timestamp(); +} + +enum qdf_mem_validation_bitmap { + QDF_MEM_BAD_HEADER = 1 << 0, + QDF_MEM_BAD_TRAILER = 1 << 1, + QDF_MEM_BAD_SIZE = 1 << 2, + QDF_MEM_DOUBLE_FREE = 1 << 3, + QDF_MEM_BAD_FREED = 1 << 4, + QDF_MEM_BAD_NODE = 1 << 5, + QDF_MEM_BAD_DOMAIN = 1 << 6, + QDF_MEM_WRONG_DOMAIN = 1 << 7, +}; + +static enum qdf_mem_validation_bitmap +qdf_mem_trailer_validate(struct qdf_mem_header *header) +{ + enum qdf_mem_validation_bitmap error_bitmap = 0; + + if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER) + error_bitmap |= QDF_MEM_BAD_TRAILER; + return error_bitmap; +} + +static enum qdf_mem_validation_bitmap +qdf_mem_header_validate(struct qdf_mem_header *header, + enum qdf_debug_domain domain) +{ + enum qdf_mem_validation_bitmap error_bitmap = 0; + + if (header->header != WLAN_MEM_HEADER) + error_bitmap |= QDF_MEM_BAD_HEADER; + + if (header->size > QDF_MEM_MAX_MALLOC) + error_bitmap |= QDF_MEM_BAD_SIZE; + + if (header->freed == true) + error_bitmap |= QDF_MEM_DOUBLE_FREE; + else if (header->freed) + error_bitmap |= QDF_MEM_BAD_FREED; + + if (!qdf_list_node_in_any_list(&header->node)) + error_bitmap |= QDF_MEM_BAD_NODE; + + if (header->domain < QDF_DEBUG_DOMAIN_INIT || + header->domain >= QDF_DEBUG_DOMAIN_COUNT) + error_bitmap |= QDF_MEM_BAD_DOMAIN; + else if (header->domain != domain) + error_bitmap |= QDF_MEM_WRONG_DOMAIN; + + return error_bitmap; +} + +static void +qdf_mem_header_assert_valid(struct qdf_mem_header *header, + enum qdf_debug_domain current_domain, + enum qdf_mem_validation_bitmap error_bitmap, + const char *func, + uint32_t line) +{ + if (!error_bitmap) + return; + + if (error_bitmap & QDF_MEM_BAD_HEADER) + qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)", + header->header, WLAN_MEM_HEADER); + + if (error_bitmap & QDF_MEM_BAD_SIZE) + qdf_err("Corrupted memory size %u (expected < %d)", + header->size, QDF_MEM_MAX_MALLOC); + + if (error_bitmap & QDF_MEM_BAD_TRAILER) + qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)", + *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER); + + if (error_bitmap & QDF_MEM_DOUBLE_FREE) + qdf_err("Memory has previously been freed"); + + if (error_bitmap & QDF_MEM_BAD_FREED) + qdf_err("Corrupted memory freed flag 0x%x", header->freed); + + if (error_bitmap & QDF_MEM_BAD_NODE) + qdf_err("Corrupted memory header node or double free"); + + if (error_bitmap & QDF_MEM_BAD_DOMAIN) + qdf_err("Corrupted memory domain 0x%x", header->domain); + + if (error_bitmap & QDF_MEM_WRONG_DOMAIN) + qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)", + qdf_debug_domain_name(header->domain), header->domain, + qdf_debug_domain_name(current_domain), current_domain); + + QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line); +} + +void qdf_mem_skb_inc(qdf_size_t size) +{ + qdf_atomic_add(size, &qdf_mem_stat.skb); +} + +void qdf_mem_skb_dec(qdf_size_t size) +{ + qdf_atomic_sub(size, &qdf_mem_stat.skb); +} + +/** + * struct __qdf_mem_info - memory statistics + * @func: the function which allocated memory + * @line: the line at which allocation happened + * @size: the size of allocation + * @caller: Address of the caller function + * @count: how many allocations of same type + * @time: timestamp at which allocation happened + */ +struct __qdf_mem_info { + char func[QDF_MEM_FUNC_NAME_SIZE]; + uint32_t line; + uint32_t size; + void *caller; + uint32_t count; + uint64_t time; +}; + +/* + * The table depth defines the de-duplication proximity scope. + * A deeper table takes more time, so choose any optimum value. + */ +#define QDF_MEM_STAT_TABLE_SIZE 8 + +/** + * qdf_mem_debug_print_header() - memory debug header print logic + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * @threshold: the threshold value set by user to list top allocations + * + * Return: None + */ +static void qdf_mem_debug_print_header(qdf_abstract_print print, + void *print_priv, + uint32_t threshold) +{ + if (threshold) + print(print_priv, "APIs requested allocations >= %u no of time", + threshold); + print(print_priv, + "--------------------------------------------------------------"); + print(print_priv, + " count size total filename caller timestamp"); + print(print_priv, + "--------------------------------------------------------------"); +} + +/** + * qdf_mem_meta_table_insert() - insert memory metadata into the given table + * @table: the memory metadata table to insert into + * @meta: the memory metadata to insert + * + * Return: true if the table is full after inserting, false otherwise + */ +static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table, + struct qdf_mem_header *meta) +{ + int i; + + for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { + if (!table[i].count) { + qdf_str_lcopy(table[i].func, meta->func, + QDF_MEM_FUNC_NAME_SIZE); + table[i].line = meta->line; + table[i].size = meta->size; + table[i].count = 1; + table[i].caller = meta->caller; + table[i].time = meta->time; + break; + } + + if (qdf_str_eq(table[i].func, meta->func) && + table[i].line == meta->line && + table[i].size == meta->size && + table[i].caller == meta->caller) { + table[i].count++; + break; + } + } + + /* return true if the table is now full */ + return i >= QDF_MEM_STAT_TABLE_SIZE - 1; +} + +/** + * qdf_mem_domain_print() - output agnostic memory domain print logic + * @domain: the memory domain to print + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * @threshold: the threshold value set by uset to list top allocations + * @mem_print: pointer to function which prints the memory allocation data + * + * Return: None + */ +static void qdf_mem_domain_print(qdf_list_t *domain, + qdf_abstract_print print, + void *print_priv, + uint32_t threshold, + void (*mem_print)(struct __qdf_mem_info *, + qdf_abstract_print, + void *, uint32_t)) +{ + QDF_STATUS status; + struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE]; + qdf_list_node_t *node; + + qdf_mem_zero(table, sizeof(table)); + qdf_mem_debug_print_header(print, print_priv, threshold); + + /* hold lock while inserting to avoid use-after free of the metadata */ + qdf_spin_lock(&qdf_mem_list_lock); + status = qdf_list_peek_front(domain, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct qdf_mem_header *meta = (struct qdf_mem_header *)node; + bool is_full = qdf_mem_meta_table_insert(table, meta); + + qdf_spin_unlock(&qdf_mem_list_lock); + + if (is_full) { + (*mem_print)(table, print, print_priv, threshold); + qdf_mem_zero(table, sizeof(table)); + } + + qdf_spin_lock(&qdf_mem_list_lock); + status = qdf_list_peek_next(domain, node, &node); + } + qdf_spin_unlock(&qdf_mem_list_lock); + + (*mem_print)(table, print, print_priv, threshold); +} + +/** + * qdf_mem_meta_table_print() - memory metadata table print logic + * @table: the memory metadata table to print + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * @threshold: the threshold value set by user to list top allocations + * + * Return: None + */ +static void qdf_mem_meta_table_print(struct __qdf_mem_info *table, + qdf_abstract_print print, + void *print_priv, + uint32_t threshold) +{ + int i; + char debug_str[QDF_DEBUG_STRING_SIZE]; + size_t len = 0; + char *debug_prefix = "WLAN_BUG_RCA: memory leak detected"; + + len += qdf_scnprintf(debug_str, sizeof(debug_str) - len, + "%s", debug_prefix); + + for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { + if (!table[i].count) + break; + + print(print_priv, + "%6u x %5u = %7uB @ %s:%u %pS %llu", + table[i].count, + table[i].size, + table[i].count * table[i].size, + table[i].func, + table[i].line, table[i].caller, + table[i].time); + len += qdf_scnprintf(debug_str + len, + sizeof(debug_str) - len, + " @ %s:%u %pS", + table[i].func, + table[i].line, + table[i].caller); + } + print(print_priv, "%s", debug_str); +} + +static int qdf_err_printer(void *priv, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args); + va_end(args); + + return 0; +} + +#endif /* MEMORY_DEBUG */ + +u_int8_t prealloc_disabled = 1; +qdf_declare_param(prealloc_disabled, byte); +qdf_export_symbol(prealloc_disabled); + +#if defined WLAN_DEBUGFS + +void qdf_mem_kmalloc_inc(qdf_size_t size) +{ + qdf_atomic_add(size, &qdf_mem_stat.kmalloc); +} + +void qdf_mem_kmalloc_dec(qdf_size_t size) +{ + qdf_atomic_sub(size, &qdf_mem_stat.kmalloc); +} + +/* Debugfs root directory for qdf_mem */ +static struct dentry *qdf_mem_debugfs_root; + +#ifdef MEMORY_DEBUG +static int seq_printf_printer(void *priv, const char *fmt, ...) +{ + struct seq_file *file = priv; + va_list args; + + va_start(args, fmt); + seq_vprintf(file, fmt, args); + seq_puts(file, "\n"); + va_end(args); + + return 0; +} + +/** + * qdf_print_major_alloc() - memory metadata table print logic + * @table: the memory metadata table to print + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * @threshold: the threshold value set by uset to list top allocations + * + * Return: None + */ +static void qdf_print_major_alloc(struct __qdf_mem_info *table, + qdf_abstract_print print, + void *print_priv, + uint32_t threshold) +{ + int i; + + for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { + if (!table[i].count) + break; + if (table[i].count >= threshold) + print(print_priv, + "%6u x %5u = %7uB @ %s:%u %pS %llu", + table[i].count, + table[i].size, + table[i].count * table[i].size, + table[i].func, + table[i].line, table[i].caller, + table[i].time); + } +} + +/** + * qdf_mem_seq_start() - sequential callback to start + * @seq: seq_file handle + * @pos: The start position of the sequence + * + * Return: iterator pointer, or NULL if iteration is complete + */ +static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos) +{ + enum qdf_debug_domain domain = *pos; + + if (!qdf_debug_domain_valid(domain)) + return NULL; + + /* just use the current position as our iterator */ + return pos; +} + +/** + * qdf_mem_seq_next() - next sequential callback + * @seq: seq_file handle + * @v: the current iterator + * @pos: the current position + * + * Get the next node and release previous node. + * + * Return: iterator pointer, or NULL if iteration is complete + */ +static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + + return qdf_mem_seq_start(seq, pos); +} + +/** + * qdf_mem_seq_stop() - stop sequential callback + * @seq: seq_file handle + * @v: current iterator + * + * Return: None + */ +static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { } + +/** + * qdf_mem_seq_show() - print sequential callback + * @seq: seq_file handle + * @v: current iterator + * + * Return: 0 - success + */ +static int qdf_mem_seq_show(struct seq_file *seq, void *v) +{ + enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v; + + seq_printf(seq, "\n%s Memory Domain (Id %d)\n", + qdf_debug_domain_name(domain_id), domain_id); + qdf_mem_domain_print(qdf_mem_list_get(domain_id), + seq_printf_printer, + seq, + 0, + qdf_mem_meta_table_print); + + return 0; +} + +/* sequential file operation table */ +static const struct seq_operations qdf_mem_seq_ops = { + .start = qdf_mem_seq_start, + .next = qdf_mem_seq_next, + .stop = qdf_mem_seq_stop, + .show = qdf_mem_seq_show, +}; + + +static int qdf_mem_debugfs_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &qdf_mem_seq_ops); +} + +/** + * qdf_major_alloc_show() - print sequential callback + * @seq: seq_file handle + * @v: current iterator + * + * Return: 0 - success + */ +static int qdf_major_alloc_show(struct seq_file *seq, void *v) +{ + enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v; + struct major_alloc_priv *priv; + qdf_list_t *list; + + priv = (struct major_alloc_priv *)seq->private; + seq_printf(seq, "\n%s Memory Domain (Id %d)\n", + qdf_debug_domain_name(domain_id), domain_id); + + switch (priv->type) { + case LIST_TYPE_MEM: + list = qdf_mem_list_get(domain_id); + break; + case LIST_TYPE_DMA: + list = qdf_mem_dma_list(domain_id); + break; + default: + list = NULL; + break; + } + + if (list) + qdf_mem_domain_print(list, + seq_printf_printer, + seq, + priv->threshold, + qdf_print_major_alloc); + + return 0; +} + +/* sequential file operation table created to track major allocs */ +static const struct seq_operations qdf_major_allocs_seq_ops = { + .start = qdf_mem_seq_start, + .next = qdf_mem_seq_next, + .stop = qdf_mem_seq_stop, + .show = qdf_major_alloc_show, +}; + +static int qdf_major_allocs_open(struct inode *inode, struct file *file) +{ + void *private = inode->i_private; + struct seq_file *seq; + int rc; + + rc = seq_open(file, &qdf_major_allocs_seq_ops); + if (rc == 0) { + seq = file->private_data; + seq->private = private; + } + return rc; +} + +static ssize_t qdf_major_alloc_set_threshold(struct file *file, + const char __user *user_buf, + size_t count, + loff_t *pos) +{ + char buf[32]; + ssize_t buf_size; + uint32_t threshold; + struct seq_file *seq = file->private_data; + struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private; + + buf_size = min(count, (sizeof(buf) - 1)); + if (buf_size <= 0) + return 0; + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = '\0'; + if (!kstrtou32(buf, 10, &threshold)) + priv->threshold = threshold; + return buf_size; +} + +/* file operation table for listing major allocs */ +static const struct file_operations fops_qdf_major_allocs = { + .owner = THIS_MODULE, + .open = qdf_major_allocs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = qdf_major_alloc_set_threshold, +}; + +/* debugfs file operation table */ +static const struct file_operations fops_qdf_mem_debugfs = { + .owner = THIS_MODULE, + .open = qdf_mem_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static struct major_alloc_priv mem_priv = { + /* List type set to mem */ + LIST_TYPE_MEM, + /* initial threshold to list APIs which allocates mem >= 50 times */ + 50 +}; + +static struct major_alloc_priv dma_priv = { + /* List type set to DMA */ + LIST_TYPE_DMA, + /* initial threshold to list APIs which allocates dma >= 50 times */ + 50 +}; + +static QDF_STATUS qdf_mem_debug_debugfs_init(void) +{ + if (is_initial_mem_debug_disabled) + return QDF_STATUS_SUCCESS; + + if (!qdf_mem_debugfs_root) + return QDF_STATUS_E_FAILURE; + + debugfs_create_file("list", + S_IRUSR, + qdf_mem_debugfs_root, + NULL, + &fops_qdf_mem_debugfs); + + debugfs_create_file("major_mem_allocs", + 0600, + qdf_mem_debugfs_root, + &mem_priv, + &fops_qdf_major_allocs); + + debugfs_create_file("major_dma_allocs", + 0600, + qdf_mem_debugfs_root, + &dma_priv, + &fops_qdf_major_allocs); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_mem_debug_debugfs_exit(void) +{ + return QDF_STATUS_SUCCESS; +} + +#else /* MEMORY_DEBUG */ + +static QDF_STATUS qdf_mem_debug_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static QDF_STATUS qdf_mem_debug_debugfs_exit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#endif /* MEMORY_DEBUG */ + + +static void qdf_mem_debugfs_exit(void) +{ + debugfs_remove_recursive(qdf_mem_debugfs_root); + qdf_mem_debugfs_root = NULL; +} + +static QDF_STATUS qdf_mem_debugfs_init(void) +{ + struct dentry *qdf_debugfs_root = qdf_debugfs_get_root(); + + if (!qdf_debugfs_root) + return QDF_STATUS_E_FAILURE; + + qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root); + + if (!qdf_mem_debugfs_root) + return QDF_STATUS_E_FAILURE; + + + debugfs_create_atomic_t("kmalloc", + S_IRUSR, + qdf_mem_debugfs_root, + &qdf_mem_stat.kmalloc); + + debugfs_create_atomic_t("dma", + S_IRUSR, + qdf_mem_debugfs_root, + &qdf_mem_stat.dma); + + debugfs_create_atomic_t("skb", + S_IRUSR, + qdf_mem_debugfs_root, + &qdf_mem_stat.skb); + + return QDF_STATUS_SUCCESS; +} + +#else /* WLAN_DEBUGFS */ + +static QDF_STATUS qdf_mem_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} +static void qdf_mem_debugfs_exit(void) {} + + +static QDF_STATUS qdf_mem_debug_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static QDF_STATUS qdf_mem_debug_debugfs_exit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#endif /* WLAN_DEBUGFS */ + +static void qdf_mem_dma_inc(qdf_size_t size) +{ + qdf_atomic_add(size, &qdf_mem_stat.dma); +} + +static inline void qdf_mem_dma_dec(qdf_size_t size) +{ + qdf_atomic_sub(size, &qdf_mem_stat.dma); +} + +/** + * __qdf_mempool_init() - Create and initialize memory pool + * + * @osdev: platform device object + * @pool_addr: address of the pool created + * @elem_cnt: no. of elements in pool + * @elem_size: size of each pool element in bytes + * @flags: flags + * + * return: Handle to memory pool or NULL if allocation failed + */ +int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr, + int elem_cnt, size_t elem_size, u_int32_t flags) +{ + __qdf_mempool_ctxt_t *new_pool = NULL; + u_int32_t align = L1_CACHE_BYTES; + unsigned long aligned_pool_mem; + int pool_id; + int i; + + if (prealloc_disabled) { + /* TBD: We can maintain a list of pools in qdf_device_t + * to help debugging + * when pre-allocation is not enabled + */ + new_pool = (__qdf_mempool_ctxt_t *) + kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); + if (!new_pool) + return QDF_STATUS_E_NOMEM; + + memset(new_pool, 0, sizeof(*new_pool)); + /* TBD: define flags for zeroing buffers etc */ + new_pool->flags = flags; + new_pool->elem_size = elem_size; + new_pool->max_elem = elem_cnt; + *pool_addr = new_pool; + return 0; + } + + for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) { + if (!osdev->mem_pool[pool_id]) + break; + } + + if (pool_id == MAX_MEM_POOLS) + return -ENOMEM; + + new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *) + kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); + if (!new_pool) + return -ENOMEM; + + memset(new_pool, 0, sizeof(*new_pool)); + /* TBD: define flags for zeroing buffers etc */ + new_pool->flags = flags; + new_pool->pool_id = pool_id; + + /* Round up the element size to cacheline */ + new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES); + new_pool->mem_size = elem_cnt * new_pool->elem_size + + ((align)?(align - 1):0); + + new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL); + if (!new_pool->pool_mem) { + /* TBD: Check if we need get_free_pages above */ + kfree(new_pool); + osdev->mem_pool[pool_id] = NULL; + return -ENOMEM; + } + + spin_lock_init(&new_pool->lock); + + /* Initialize free list */ + aligned_pool_mem = (unsigned long)(new_pool->pool_mem) + + ((align) ? (unsigned long)(new_pool->pool_mem)%align:0); + STAILQ_INIT(&new_pool->free_list); + + for (i = 0; i < elem_cnt; i++) + STAILQ_INSERT_TAIL(&(new_pool->free_list), + (mempool_elem_t *)(aligned_pool_mem + + (new_pool->elem_size * i)), mempool_entry); + + + new_pool->free_cnt = elem_cnt; + *pool_addr = new_pool; + return 0; +} +qdf_export_symbol(__qdf_mempool_init); + +/** + * __qdf_mempool_destroy() - Destroy memory pool + * @osdev: platform device object + * @Handle: to memory pool + * + * Returns: none + */ +void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool) +{ + int pool_id = 0; + + if (!pool) + return; + + if (prealloc_disabled) { + kfree(pool); + return; + } + + pool_id = pool->pool_id; + + /* TBD: Check if free count matches elem_cnt if debug is enabled */ + kfree(pool->pool_mem); + kfree(pool); + osdev->mem_pool[pool_id] = NULL; +} +qdf_export_symbol(__qdf_mempool_destroy); + +/** + * __qdf_mempool_alloc() - Allocate an element memory pool + * + * @osdev: platform device object + * @Handle: to memory pool + * + * Return: Pointer to the allocated element or NULL if the pool is empty + */ +void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool) +{ + void *buf = NULL; + + if (!pool) + return NULL; + + if (prealloc_disabled) + return qdf_mem_malloc(pool->elem_size); + + spin_lock_bh(&pool->lock); + + buf = STAILQ_FIRST(&pool->free_list); + if (buf) { + STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry); + pool->free_cnt--; + } + + /* TBD: Update free count if debug is enabled */ + spin_unlock_bh(&pool->lock); + + return buf; +} +qdf_export_symbol(__qdf_mempool_alloc); + +/** + * __qdf_mempool_free() - Free a memory pool element + * @osdev: Platform device object + * @pool: Handle to memory pool + * @buf: Element to be freed + * + * Returns: none + */ +void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf) +{ + if (!pool) + return; + + + if (prealloc_disabled) + return qdf_mem_free(buf); + + spin_lock_bh(&pool->lock); + pool->free_cnt++; + + STAILQ_INSERT_TAIL + (&pool->free_list, (mempool_elem_t *)buf, mempool_entry); + spin_unlock_bh(&pool->lock); +} +qdf_export_symbol(__qdf_mempool_free); + +#if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC) +/** + * qdf_mem_prealloc_get() - conditionally pre-allocate memory + * @size: the number of bytes to allocate + * + * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns + * a chunk of pre-allocated memory. If size if less than or equal to + * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead. + * + * Return: NULL on failure, non-NULL on success + */ +static void *qdf_mem_prealloc_get(size_t size) +{ + void *ptr; + + if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD) + return NULL; + + ptr = wcnss_prealloc_get(size); + if (!ptr) + return NULL; + + memset(ptr, 0, size); + + return ptr; +} + +static inline bool qdf_mem_prealloc_put(void *ptr) +{ + return wcnss_prealloc_put(ptr); +} +#else +static inline void *qdf_mem_prealloc_get(size_t size) +{ + return NULL; +} + +static inline bool qdf_mem_prealloc_put(void *ptr) +{ + return false; +} +#endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */ + +static int qdf_mem_malloc_flags(void) +{ + if (in_interrupt() || irqs_disabled() || in_atomic()) + return GFP_ATOMIC; + + return GFP_KERNEL; +} + +/* External Function implementation */ +#ifdef MEMORY_DEBUG +/** + * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled + * + * Return: value of mem_debug_disabled qdf module argument + */ +#ifdef DISABLE_MEM_DBG_LOAD_CONFIG +bool qdf_mem_debug_config_get(void) +{ + /* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */ + return false; +} +#else +bool qdf_mem_debug_config_get(void) +{ + return mem_debug_disabled; +} +#endif /* DISABLE_MEM_DBG_LOAD_CONFIG */ + +/** + * qdf_mem_debug_init() - initialize qdf memory debug functionality + * + * Return: none + */ +static void qdf_mem_debug_init(void) +{ + int i; + + is_initial_mem_debug_disabled = qdf_mem_debug_config_get(); + + if (is_initial_mem_debug_disabled) + return; + + /* Initalizing the list with maximum size of 60000 */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_create(&qdf_mem_domains[i], 60000); + qdf_spinlock_create(&qdf_mem_list_lock); + + /* dma */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_create(&qdf_mem_dma_domains[i], 0); + qdf_spinlock_create(&qdf_mem_dma_list_lock); +} + +static uint32_t +qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain, + qdf_list_t *mem_list) +{ + if (is_initial_mem_debug_disabled) + return 0; + + if (qdf_list_empty(mem_list)) + return 0; + + qdf_err("Memory leaks detected in %s domain!", + qdf_debug_domain_name(domain)); + qdf_mem_domain_print(mem_list, + qdf_err_printer, + NULL, + 0, + qdf_mem_meta_table_print); + + return mem_list->count; +} + +static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains) +{ + uint32_t leak_count = 0; + int i; + + if (is_initial_mem_debug_disabled) + return; + + /* detect and print leaks */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + leak_count += qdf_mem_domain_check_for_leaks(i, domains + i); + + if (leak_count) + QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!", + leak_count); +} + +/** + * qdf_mem_debug_exit() - exit qdf memory debug functionality + * + * Return: none + */ +static void qdf_mem_debug_exit(void) +{ + int i; + + if (is_initial_mem_debug_disabled) + return; + + /* mem */ + qdf_mem_domain_set_check_for_leaks(qdf_mem_domains); + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_destroy(qdf_mem_list_get(i)); + + qdf_spinlock_destroy(&qdf_mem_list_lock); + + /* dma */ + qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains); + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_destroy(&qdf_mem_dma_domains[i]); + qdf_spinlock_destroy(&qdf_mem_dma_list_lock); +} + +void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line, + void *caller, uint32_t flag) +{ + QDF_STATUS status; + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *mem_list = qdf_mem_list_get(current_domain); + struct qdf_mem_header *header; + void *ptr; + unsigned long start, duration; + + if (is_initial_mem_debug_disabled) + return __qdf_mem_malloc(size, func, line); + + if (!size || size > QDF_MEM_MAX_MALLOC) { + qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line); + return NULL; + } + + ptr = qdf_mem_prealloc_get(size); + if (ptr) + return ptr; + + if (!flag) + flag = qdf_mem_malloc_flags(); + + start = qdf_mc_timer_get_system_time(); + header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag); + duration = qdf_mc_timer_get_system_time() - start; + + if (duration > QDF_MEM_WARN_THRESHOLD) + qdf_warn("Malloc slept; %lums, %zuB @ %s:%d", + duration, size, func, line); + + if (!header) { + qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line); + return NULL; + } + + qdf_mem_header_init(header, size, func, line, caller); + qdf_mem_trailer_init(header); + ptr = qdf_mem_get_ptr(header); + + qdf_spin_lock_irqsave(&qdf_mem_list_lock); + status = qdf_list_insert_front(mem_list, &header->node); + qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); + if (QDF_IS_STATUS_ERROR(status)) + qdf_err("Failed to insert memory header; status %d", status); + + qdf_mem_kmalloc_inc(ksize(header)); + + return ptr; +} +qdf_export_symbol(qdf_mem_malloc_debug); + +void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + struct qdf_mem_header *header; + enum qdf_mem_validation_bitmap error_bitmap; + + if (is_initial_mem_debug_disabled) { + __qdf_mem_free(ptr); + return; + } + + /* freeing a null pointer is valid */ + if (qdf_unlikely(!ptr)) + return; + + if (qdf_mem_prealloc_put(ptr)) + return; + + if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header))) + QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK", + ptr); + + qdf_talloc_assert_no_children_fl(ptr, func, line); + + qdf_spin_lock_irqsave(&qdf_mem_list_lock); + header = qdf_mem_get_header(ptr); + error_bitmap = qdf_mem_header_validate(header, current_domain); + error_bitmap |= qdf_mem_trailer_validate(header); + + if (!error_bitmap) { + header->freed = true; + qdf_list_remove_node(qdf_mem_list_get(header->domain), + &header->node); + } + qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); + + qdf_mem_header_assert_valid(header, current_domain, error_bitmap, + func, line); + + qdf_mem_kmalloc_dec(ksize(header)); + kfree(header); +} +qdf_export_symbol(qdf_mem_free_debug); + +void qdf_mem_check_for_leaks(void) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *mem_list = qdf_mem_list_get(current_domain); + qdf_list_t *dma_list = qdf_mem_dma_list(current_domain); + uint32_t leaks_count = 0; + + if (is_initial_mem_debug_disabled) + return; + + leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list); + leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list); + + if (leaks_count) + QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!", + leaks_count); +} + +/** + * qdf_mem_multi_pages_alloc_debug() - Debug version of + * qdf_mem_multi_pages_alloc + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @element_size: Each element size + * @element_num: Total number of elements should be allocated + * @memctxt: Memory context + * @cacheable: Coherent memory or cacheable memory + * @func: Caller of this allocator + * @line: Line number of the caller + * @caller: Return address of the caller + * + * This function will allocate large size of memory over multiple pages. + * Large size of contiguous memory allocation will fail frequently, then + * instead of allocate large memory by one shot, allocate through multiple, non + * contiguous memory and combine pages when actual usage + * + * Return: None + */ +void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + size_t element_size, uint16_t element_num, + qdf_dma_context_t memctxt, bool cacheable, + const char *func, uint32_t line, + void *caller) +{ + uint16_t page_idx; + struct qdf_mem_dma_page_t *dma_pages; + void **cacheable_pages = NULL; + uint16_t i; + + if (!pages->page_size) + pages->page_size = qdf_page_size; + + pages->num_element_per_page = pages->page_size / element_size; + if (!pages->num_element_per_page) { + qdf_print("Invalid page %d or element size %d", + (int)pages->page_size, (int)element_size); + goto out_fail; + } + + pages->num_pages = element_num / pages->num_element_per_page; + if (element_num % pages->num_element_per_page) + pages->num_pages++; + + if (cacheable) { + /* Pages information storage */ + pages->cacheable_pages = qdf_mem_malloc_debug( + pages->num_pages * sizeof(pages->cacheable_pages), + func, line, caller, 0); + if (!pages->cacheable_pages) + goto out_fail; + + cacheable_pages = pages->cacheable_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + cacheable_pages[page_idx] = qdf_mem_malloc_debug( + pages->page_size, func, line, caller, 0); + if (!cacheable_pages[page_idx]) + goto page_alloc_fail; + } + pages->dma_pages = NULL; + } else { + pages->dma_pages = qdf_mem_malloc_debug( + pages->num_pages * sizeof(struct qdf_mem_dma_page_t), + func, line, caller, 0); + if (!pages->dma_pages) + goto out_fail; + + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + dma_pages->page_v_addr_start = + qdf_mem_alloc_consistent_debug( + osdev, osdev->dev, pages->page_size, + &dma_pages->page_p_addr, + func, line, caller); + if (!dma_pages->page_v_addr_start) { + qdf_print("dmaable page alloc fail pi %d", + page_idx); + goto page_alloc_fail; + } + dma_pages->page_v_addr_end = + dma_pages->page_v_addr_start + pages->page_size; + dma_pages++; + } + pages->cacheable_pages = NULL; + } + return; + +page_alloc_fail: + if (cacheable) { + for (i = 0; i < page_idx; i++) + qdf_mem_free_debug(pages->cacheable_pages[i], + func, line); + qdf_mem_free_debug(pages->cacheable_pages, func, line); + } else { + dma_pages = pages->dma_pages; + for (i = 0; i < page_idx; i++) { + qdf_mem_free_consistent_debug( + osdev, osdev->dev, + pages->page_size, dma_pages->page_v_addr_start, + dma_pages->page_p_addr, memctxt, func, line); + dma_pages++; + } + qdf_mem_free_debug(pages->dma_pages, func, line); + } + +out_fail: + pages->cacheable_pages = NULL; + pages->dma_pages = NULL; + pages->num_pages = 0; +} + +qdf_export_symbol(qdf_mem_multi_pages_alloc_debug); + +/** + * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @memctxt: Memory context + * @cacheable: Coherent memory or cacheable memory + * @func: Caller of this allocator + * @line: Line number of the caller + * + * This function will free large size of memory over multiple pages. + * + * Return: None + */ +void qdf_mem_multi_pages_free_debug(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, bool cacheable, + const char *func, uint32_t line) +{ + unsigned int page_idx; + struct qdf_mem_dma_page_t *dma_pages; + + if (!pages->page_size) + pages->page_size = qdf_page_size; + + if (cacheable) { + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) + qdf_mem_free_debug(pages->cacheable_pages[page_idx], + func, line); + qdf_mem_free_debug(pages->cacheable_pages, func, line); + } else { + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + qdf_mem_free_consistent_debug( + osdev, osdev->dev, pages->page_size, + dma_pages->page_v_addr_start, + dma_pages->page_p_addr, memctxt, func, line); + dma_pages++; + } + qdf_mem_free_debug(pages->dma_pages, func, line); + } + + pages->cacheable_pages = NULL; + pages->dma_pages = NULL; + pages->num_pages = 0; +} + +qdf_export_symbol(qdf_mem_multi_pages_free_debug); + +#else +static void qdf_mem_debug_init(void) {} + +static void qdf_mem_debug_exit(void) {} + +void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line) +{ + void *ptr; + + ptr = qdf_mem_prealloc_get(size); + if (ptr) + return ptr; + + ptr = kzalloc(size, GFP_ATOMIC); + if (!ptr) { + qdf_nofl_warn("Failed to malloc %zuB @ %s:%d", + size, func, line); + return NULL; + } + + qdf_mem_kmalloc_inc(ksize(ptr)); + + return ptr; +} +qdf_export_symbol(qdf_mem_malloc_atomic_fl); + +/** + * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @element_size: Each element size + * @element_num: Total number of elements should be allocated + * @memctxt: Memory context + * @cacheable: Coherent memory or cacheable memory + * + * This function will allocate large size of memory over multiple pages. + * Large size of contiguous memory allocation will fail frequently, then + * instead of allocate large memory by one shot, allocate through multiple, non + * contiguous memory and combine pages when actual usage + * + * Return: None + */ +void qdf_mem_multi_pages_alloc(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + size_t element_size, uint16_t element_num, + qdf_dma_context_t memctxt, bool cacheable) +{ + uint16_t page_idx; + struct qdf_mem_dma_page_t *dma_pages; + void **cacheable_pages = NULL; + uint16_t i; + + if (!pages->page_size) + pages->page_size = qdf_page_size; + + pages->num_element_per_page = pages->page_size / element_size; + if (!pages->num_element_per_page) { + qdf_print("Invalid page %d or element size %d", + (int)pages->page_size, (int)element_size); + goto out_fail; + } + + pages->num_pages = element_num / pages->num_element_per_page; + if (element_num % pages->num_element_per_page) + pages->num_pages++; + + if (cacheable) { + /* Pages information storage */ + pages->cacheable_pages = qdf_mem_malloc( + pages->num_pages * sizeof(pages->cacheable_pages)); + if (!pages->cacheable_pages) + goto out_fail; + + cacheable_pages = pages->cacheable_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + cacheable_pages[page_idx] = + qdf_mem_malloc(pages->page_size); + if (!cacheable_pages[page_idx]) + goto page_alloc_fail; + } + pages->dma_pages = NULL; + } else { + pages->dma_pages = qdf_mem_malloc( + pages->num_pages * sizeof(struct qdf_mem_dma_page_t)); + if (!pages->dma_pages) + goto out_fail; + + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + dma_pages->page_v_addr_start = + qdf_mem_alloc_consistent(osdev, osdev->dev, + pages->page_size, + &dma_pages->page_p_addr); + if (!dma_pages->page_v_addr_start) { + qdf_print("dmaable page alloc fail pi %d", + page_idx); + goto page_alloc_fail; + } + dma_pages->page_v_addr_end = + dma_pages->page_v_addr_start + pages->page_size; + dma_pages++; + } + pages->cacheable_pages = NULL; + } + return; + +page_alloc_fail: + if (cacheable) { + for (i = 0; i < page_idx; i++) + qdf_mem_free(pages->cacheable_pages[i]); + qdf_mem_free(pages->cacheable_pages); + } else { + dma_pages = pages->dma_pages; + for (i = 0; i < page_idx; i++) { + qdf_mem_free_consistent( + osdev, osdev->dev, pages->page_size, + dma_pages->page_v_addr_start, + dma_pages->page_p_addr, memctxt); + dma_pages++; + } + qdf_mem_free(pages->dma_pages); + } + +out_fail: + pages->cacheable_pages = NULL; + pages->dma_pages = NULL; + pages->num_pages = 0; + return; +} +qdf_export_symbol(qdf_mem_multi_pages_alloc); + +/** + * qdf_mem_multi_pages_free() - free large size of kernel memory + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @memctxt: Memory context + * @cacheable: Coherent memory or cacheable memory + * + * This function will free large size of memory over multiple pages. + * + * Return: None + */ +void qdf_mem_multi_pages_free(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, bool cacheable) +{ + unsigned int page_idx; + struct qdf_mem_dma_page_t *dma_pages; + + if (!pages->page_size) + pages->page_size = qdf_page_size; + + if (cacheable) { + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) + qdf_mem_free(pages->cacheable_pages[page_idx]); + qdf_mem_free(pages->cacheable_pages); + } else { + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + qdf_mem_free_consistent( + osdev, osdev->dev, pages->page_size, + dma_pages->page_v_addr_start, + dma_pages->page_p_addr, memctxt); + dma_pages++; + } + qdf_mem_free(pages->dma_pages); + } + + pages->cacheable_pages = NULL; + pages->dma_pages = NULL; + pages->num_pages = 0; + return; +} +qdf_export_symbol(qdf_mem_multi_pages_free); +#endif + +void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages, + bool cacheable) +{ + unsigned int page_idx; + struct qdf_mem_dma_page_t *dma_pages; + + if (!pages->page_size) + pages->page_size = qdf_page_size; + + if (cacheable) { + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) + qdf_mem_zero(pages->cacheable_pages[page_idx], + pages->page_size); + } else { + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + qdf_mem_zero(dma_pages->page_v_addr_start, + pages->page_size); + dma_pages++; + } + } +} + +qdf_export_symbol(qdf_mem_multi_pages_zero); + +void __qdf_mem_free(void *ptr) +{ + if (!ptr) + return; + + if (qdf_mem_prealloc_put(ptr)) + return; + + qdf_mem_kmalloc_dec(ksize(ptr)); + + kfree(ptr); +} + +qdf_export_symbol(__qdf_mem_free); + +void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line) +{ + void *ptr; + + if (!size || size > QDF_MEM_MAX_MALLOC) { + qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func, + line); + return NULL; + } + + ptr = qdf_mem_prealloc_get(size); + if (ptr) + return ptr; + + ptr = kzalloc(size, qdf_mem_malloc_flags()); + if (!ptr) + return NULL; + + qdf_mem_kmalloc_inc(ksize(ptr)); + + return ptr; +} + +qdf_export_symbol(__qdf_mem_malloc); + +void *qdf_aligned_malloc_fl(uint32_t *size, + void **vaddr_unaligned, + qdf_dma_addr_t *paddr_unaligned, + qdf_dma_addr_t *paddr_aligned, + uint32_t align, + const char *func, uint32_t line) +{ + void *vaddr_aligned; + uint32_t align_alloc_size; + + *vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func, + line); + if (!*vaddr_unaligned) { + qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line); + return NULL; + } + + *paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned); + + /* Re-allocate additional bytes to align base address only if + * above allocation returns unaligned address. Reason for + * trying exact size allocation above is, OS tries to allocate + * blocks of size power-of-2 pages and then free extra pages. + * e.g., of a ring size of 1MB, the allocation below will + * request 1MB plus 7 bytes for alignment, which will cause a + * 2MB block allocation,and that is failing sometimes due to + * memory fragmentation. + */ + if ((unsigned long)(*paddr_unaligned) & (align - 1)) { + align_alloc_size = *size + align - 1; + + qdf_mem_free(*vaddr_unaligned); + *vaddr_unaligned = qdf_mem_malloc_fl( + (qdf_size_t)align_alloc_size, func, line); + if (!*vaddr_unaligned) { + qdf_warn("Failed to alloc %uB @ %s:%d", + align_alloc_size, func, line); + return NULL; + } + + *paddr_unaligned = qdf_mem_virt_to_phys( + *vaddr_unaligned); + *size = align_alloc_size; + } + + *paddr_aligned = (qdf_dma_addr_t)qdf_align + ((unsigned long)(*paddr_unaligned), align); + + vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) + + ((unsigned long)(*paddr_aligned) - + (unsigned long)(*paddr_unaligned))); + + return vaddr_aligned; +} + +qdf_export_symbol(qdf_aligned_malloc_fl); + +/** + * qdf_mem_multi_page_link() - Make links for multi page elements + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @elem_size: Single element size + * @elem_count: elements count should be linked + * @cacheable: Coherent memory or cacheable memory + * + * This function will make links for multi page allocated structure + * + * Return: 0 success + */ +int qdf_mem_multi_page_link(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + uint32_t elem_size, uint32_t elem_count, uint8_t cacheable) +{ + uint16_t i, i_int; + void *page_info; + void **c_elem = NULL; + uint32_t num_link = 0; + + for (i = 0; i < pages->num_pages; i++) { + if (cacheable) + page_info = pages->cacheable_pages[i]; + else + page_info = pages->dma_pages[i].page_v_addr_start; + + if (!page_info) + return -ENOMEM; + + c_elem = (void **)page_info; + for (i_int = 0; i_int < pages->num_element_per_page; i_int++) { + if (i_int == (pages->num_element_per_page - 1)) { + if (cacheable) + *c_elem = pages-> + cacheable_pages[i + 1]; + else + *c_elem = pages-> + dma_pages[i + 1]. + page_v_addr_start; + num_link++; + break; + } else { + *c_elem = + (void *)(((char *)c_elem) + elem_size); + } + num_link++; + c_elem = (void **)*c_elem; + + /* Last link established exit */ + if (num_link == (elem_count - 1)) + break; + } + } + + if (c_elem) + *c_elem = NULL; + + return 0; +} +qdf_export_symbol(qdf_mem_multi_page_link); + +void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes) +{ + /* special case where dst_addr or src_addr can be NULL */ + if (!num_bytes) + return; + + QDF_BUG(dst_addr); + QDF_BUG(src_addr); + if (!dst_addr || !src_addr) + return; + + memcpy(dst_addr, src_addr, num_bytes); +} +qdf_export_symbol(qdf_mem_copy); + +qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size) +{ + qdf_shared_mem_t *shared_mem; + qdf_dma_addr_t dma_addr, paddr; + int ret; + + shared_mem = qdf_mem_malloc(sizeof(*shared_mem)); + if (!shared_mem) + return NULL; + + shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev, + size, qdf_mem_get_dma_addr_ptr(osdev, + &shared_mem->mem_info)); + if (!shared_mem->vaddr) { + qdf_err("Unable to allocate DMA memory for shared resource"); + qdf_mem_free(shared_mem); + return NULL; + } + + qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); + size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info); + + qdf_mem_zero(shared_mem->vaddr, size); + dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info); + paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); + + qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); + ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, + shared_mem->vaddr, dma_addr, size); + if (ret) { + qdf_err("Unable to get DMA sgtable"); + qdf_mem_free_consistent(osdev, osdev->dev, + shared_mem->mem_info.size, + shared_mem->vaddr, + dma_addr, + qdf_get_dma_mem_context(shared_mem, + memctx)); + qdf_mem_free(shared_mem); + return NULL; + } + + qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); + + return shared_mem; +} + +qdf_export_symbol(qdf_mem_shared_mem_alloc); + +/** + * qdf_mem_copy_toio() - copy memory + * @dst_addr: Pointer to destination memory location (to copy to) + * @src_addr: Pointer to source memory location (to copy from) + * @num_bytes: Number of bytes to copy. + * + * Return: none + */ +void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes) +{ + if (0 == num_bytes) { + /* special case where dst_addr or src_addr can be NULL */ + return; + } + + if ((!dst_addr) || (!src_addr)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s called with NULL parameter, source:%pK destination:%pK", + __func__, src_addr, dst_addr); + QDF_ASSERT(0); + return; + } + memcpy_toio(dst_addr, src_addr, num_bytes); +} + +qdf_export_symbol(qdf_mem_copy_toio); + +/** + * qdf_mem_set_io() - set (fill) memory with a specified byte value. + * @ptr: Pointer to memory that will be set + * @value: Byte set in memory + * @num_bytes: Number of bytes to be set + * + * Return: None + */ +void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value) +{ + if (!ptr) { + qdf_print("%s called with NULL parameter ptr", __func__); + return; + } + memset_io(ptr, value, num_bytes); +} + +qdf_export_symbol(qdf_mem_set_io); + +void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value) +{ + QDF_BUG(ptr); + if (!ptr) + return; + + memset(ptr, value, num_bytes); +} +qdf_export_symbol(qdf_mem_set); + +void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes) +{ + /* special case where dst_addr or src_addr can be NULL */ + if (!num_bytes) + return; + + QDF_BUG(dst_addr); + QDF_BUG(src_addr); + if (!dst_addr || !src_addr) + return; + + memmove(dst_addr, src_addr, num_bytes); +} +qdf_export_symbol(qdf_mem_move); + +int qdf_mem_cmp(const void *left, const void *right, size_t size) +{ + QDF_BUG(left); + QDF_BUG(right); + + return memcmp(left, right, size); +} +qdf_export_symbol(qdf_mem_cmp); + +#if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) +/** + * qdf_mem_dma_alloc() - allocates memory for dma + * @osdev: OS device handle + * @dev: Pointer to device handle + * @size: Size to be allocated + * @phy_addr: Physical address + * + * Return: pointer of allocated memory or null if memory alloc fails + */ +static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, + qdf_size_t size, + qdf_dma_addr_t *phy_addr) +{ + void *vaddr; + + vaddr = qdf_mem_malloc(size); + *phy_addr = ((uintptr_t) vaddr); + /* using this type conversion to suppress "cast from pointer to integer + * of different size" warning on some platforms + */ + BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr)); + return vaddr; +} + +#elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \ + !defined(QCA_WIFI_QCN9000) + +#define QCA8074_RAM_BASE 0x50000000 +#define QDF_MEM_ALLOC_X86_MAX_RETRIES 10 +void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size, + qdf_dma_addr_t *phy_addr) +{ + void *vaddr = NULL; + int i; + + *phy_addr = 0; + + for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) { + vaddr = dma_alloc_coherent(dev, size, phy_addr, + qdf_mem_malloc_flags()); + + if (!vaddr) { + qdf_err("%s failed , size: %zu!", __func__, size); + return NULL; + } + + if (*phy_addr >= QCA8074_RAM_BASE) + return vaddr; + + dma_free_coherent(dev, size, vaddr, *phy_addr); + } + + return NULL; +} + +#else +static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr) +{ + return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags()); +} +#endif + +#if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) +static inline void +qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) +{ + qdf_mem_free(vaddr); +} +#else + +static inline void +qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) +{ + dma_free_coherent(dev, size, vaddr, paddr); +} +#endif + +#ifdef MEMORY_DEBUG +void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr, + const char *func, uint32_t line, + void *caller) +{ + QDF_STATUS status; + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *mem_list = qdf_mem_dma_list(current_domain); + struct qdf_mem_header *header; + void *vaddr; + + if (is_initial_mem_debug_disabled) + return __qdf_mem_alloc_consistent(osdev, dev, + size, paddr, + func, line); + + if (!size || size > QDF_MEM_MAX_MALLOC) { + qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line); + return NULL; + } + + vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE, + paddr); + + if (!vaddr) { + qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line); + return NULL; + } + + header = qdf_mem_dma_get_header(vaddr, size); + /* For DMA buffers we only add trailers, this function will init + * the header structure at the tail + * Prefix the header into DMA buffer causes SMMU faults, so + * do not prefix header into the DMA buffers + */ + qdf_mem_header_init(header, size, func, line, caller); + + qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); + status = qdf_list_insert_front(mem_list, &header->node); + qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); + if (QDF_IS_STATUS_ERROR(status)) + qdf_err("Failed to insert memory header; status %d", status); + + qdf_mem_dma_inc(size); + + return vaddr; +} +qdf_export_symbol(qdf_mem_alloc_consistent_debug); + +void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + const char *func, uint32_t line) +{ + enum qdf_debug_domain domain = qdf_debug_domain_get(); + struct qdf_mem_header *header; + enum qdf_mem_validation_bitmap error_bitmap; + + if (is_initial_mem_debug_disabled) { + __qdf_mem_free_consistent( + osdev, dev, + size, vaddr, + paddr, memctx); + return; + } + + /* freeing a null pointer is valid */ + if (qdf_unlikely(!vaddr)) + return; + + qdf_talloc_assert_no_children_fl(vaddr, func, line); + + qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); + /* For DMA buffers we only add trailers, this function will retrieve + * the header structure at the tail + * Prefix the header into DMA buffer causes SMMU faults, so + * do not prefix header into the DMA buffers + */ + header = qdf_mem_dma_get_header(vaddr, size); + error_bitmap = qdf_mem_header_validate(header, domain); + if (!error_bitmap) { + header->freed = true; + qdf_list_remove_node(qdf_mem_dma_list(header->domain), + &header->node); + } + qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); + + qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line); + + qdf_mem_dma_dec(header->size); + qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr); +} +qdf_export_symbol(qdf_mem_free_consistent_debug); +#endif /* MEMORY_DEBUG */ + +void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, qdf_dma_context_t memctx) +{ + qdf_mem_dma_dec(size); + qdf_mem_dma_free(dev, size, vaddr, paddr); +} + +qdf_export_symbol(__qdf_mem_free_consistent); + +void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr, + const char *func, uint32_t line) +{ + void *vaddr; + + if (!size || size > QDF_MEM_MAX_MALLOC) { + qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", + size, func, line); + return NULL; + } + + vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr); + + if (vaddr) + qdf_mem_dma_inc(size); + + return vaddr; +} + +qdf_export_symbol(__qdf_mem_alloc_consistent); + +void *qdf_aligned_mem_alloc_consistent_fl( + qdf_device_t osdev, uint32_t *size, + void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned, + qdf_dma_addr_t *paddr_aligned, uint32_t align, + const char *func, uint32_t line) +{ + void *vaddr_aligned; + uint32_t align_alloc_size; + + *vaddr_unaligned = qdf_mem_alloc_consistent( + osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned); + if (!*vaddr_unaligned) { + qdf_warn("Failed to alloc %uB @ %s:%d", + *size, func, line); + return NULL; + } + + /* Re-allocate additional bytes to align base address only if + * above allocation returns unaligned address. Reason for + * trying exact size allocation above is, OS tries to allocate + * blocks of size power-of-2 pages and then free extra pages. + * e.g., of a ring size of 1MB, the allocation below will + * request 1MB plus 7 bytes for alignment, which will cause a + * 2MB block allocation,and that is failing sometimes due to + * memory fragmentation. + */ + if ((unsigned long)(*paddr_unaligned) & (align - 1)) { + align_alloc_size = *size + align - 1; + + qdf_mem_free_consistent(osdev, osdev->dev, *size, + *vaddr_unaligned, + *paddr_unaligned, 0); + + *vaddr_unaligned = qdf_mem_alloc_consistent( + osdev, osdev->dev, align_alloc_size, + paddr_unaligned); + if (!*vaddr_unaligned) { + qdf_warn("Failed to alloc %uB @ %s:%d", + align_alloc_size, func, line); + return NULL; + } + + *size = align_alloc_size; + } + + *paddr_aligned = (qdf_dma_addr_t)qdf_align( + (unsigned long)(*paddr_unaligned), align); + + vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) + + ((unsigned long)(*paddr_aligned) - + (unsigned long)(*paddr_unaligned))); + + return vaddr_aligned; +} +qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl); + +/** + * qdf_mem_dma_sync_single_for_device() - assign memory to device + * @osdev: OS device handle + * @bus_addr: dma address to give to the device + * @size: Size of the memory block + * @direction: direction data will be DMAed + * + * Assign memory to the remote device. + * The cache lines are flushed to ram or invalidated as needed. + * + * Return: none + */ +void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_device(osdev->dev, bus_addr, size, direction); +} +qdf_export_symbol(qdf_mem_dma_sync_single_for_device); + +/** + * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU + * @osdev: OS device handle + * @bus_addr: dma address to give to the cpu + * @size: Size of the memory block + * @direction: direction data will be DMAed + * + * Assign memory to the CPU. + * + * Return: none + */ +void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction); +} +qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu); + +void qdf_mem_init(void) +{ + qdf_mem_debug_init(); + qdf_net_buf_debug_init(); + qdf_mem_debugfs_init(); + qdf_mem_debug_debugfs_init(); +} +qdf_export_symbol(qdf_mem_init); + +void qdf_mem_exit(void) +{ + qdf_mem_debug_debugfs_exit(); + qdf_mem_debugfs_exit(); + qdf_net_buf_debug_exit(); + qdf_mem_debug_exit(); +} +qdf_export_symbol(qdf_mem_exit); + +/** + * qdf_ether_addr_copy() - copy an Ethernet address + * + * @dst_addr: A six-byte array Ethernet address destination + * @src_addr: A six-byte array Ethernet address source + * + * Please note: dst & src must both be aligned to u16. + * + * Return: none + */ +void qdf_ether_addr_copy(void *dst_addr, const void *src_addr) +{ + if ((!dst_addr) || (!src_addr)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s called with NULL parameter, source:%pK destination:%pK", + __func__, src_addr, dst_addr); + QDF_ASSERT(0); + return; + } + ether_addr_copy(dst_addr, src_addr); +} +qdf_export_symbol(qdf_ether_addr_copy); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_module.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_module.c new file mode 100644 index 0000000000000000000000000000000000000000..9c1529206c050373d7d05a22b5d82abfa13fe9c6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_module.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2012-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_module.h + * Linux-specific definitions for QDF module API's + */ + +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Qualcomm Atheros Inc."); +MODULE_DESCRIPTION("Qualcomm Atheros Device Framework Module"); +MODULE_LICENSE("Dual BSD/GPL"); + +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif + +/** + * qdf_mod_init() - module initialization + * + * Return: int + */ +#ifndef QCA_SINGLE_WIFI_3_0 +static int __init qdf_mod_init(void) +#else +int qdf_mod_init(void) +#endif +{ + qdf_shared_print_ctrl_init(); + qdf_debugfs_init(); + qdf_mem_init(); + qdf_talloc_feature_init(); + qdf_logging_init(); + qdf_perfmod_init(); + qdf_nbuf_mod_init(); + qdf_event_list_init(); + + return 0; +} + +#ifndef QCA_SINGLE_WIFI_3_0 +module_init(qdf_mod_init); +#endif +/** + * qdf_mod_exit() - module remove + * + * Return: int + */ +#ifndef QCA_SINGLE_WIFI_3_0 +static void __exit qdf_mod_exit(void) +#else +void qdf_mod_exit(void) +#endif +{ + qdf_event_list_destroy(); + qdf_nbuf_mod_exit(); + qdf_perfmod_exit(); + qdf_logging_exit(); + qdf_talloc_feature_deinit(); + qdf_mem_exit(); + qdf_debugfs_exit(); + qdf_shared_print_ctrl_cleanup(); +} + +#ifndef QCA_SINGLE_WIFI_3_0 +module_exit(qdf_mod_exit); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c new file mode 100644 index 0000000000000000000000000000000000000000..0ee852ee732791a22f4307ae751d2a82bcd313a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c @@ -0,0 +1,4656 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_nbuf.c + * QCA driver framework(QDF) network buffer management APIs + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qdf_str.h" +#include +#include "qdf_tracker.h" +#include +#include +#include + +#if defined(FEATURE_TSO) +#include +#include +#include +#include +#include +#endif /* FEATURE_TSO */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) + +#define qdf_nbuf_users_inc atomic_inc +#define qdf_nbuf_users_dec atomic_dec +#define qdf_nbuf_users_set atomic_set +#define qdf_nbuf_users_read atomic_read +#else +#define qdf_nbuf_users_inc refcount_inc +#define qdf_nbuf_users_dec refcount_dec +#define qdf_nbuf_users_set refcount_set +#define qdf_nbuf_users_read refcount_read +#endif /* KERNEL_VERSION(4, 13, 0) */ + +#define IEEE80211_RADIOTAP_VHT_BW_20 0 +#define IEEE80211_RADIOTAP_VHT_BW_40 1 +#define IEEE80211_RADIOTAP_VHT_BW_80 2 +#define IEEE80211_RADIOTAP_VHT_BW_160 3 + +#define RADIOTAP_VHT_BW_20 0 +#define RADIOTAP_VHT_BW_40 1 +#define RADIOTAP_VHT_BW_80 4 +#define RADIOTAP_VHT_BW_160 11 + +/* channel number to freq conversion */ +#define CHANNEL_NUM_14 14 +#define CHANNEL_NUM_15 15 +#define CHANNEL_NUM_27 27 +#define CHANNEL_NUM_35 35 +#define CHANNEL_NUM_182 182 +#define CHANNEL_NUM_197 197 +#define CHANNEL_FREQ_2484 2484 +#define CHANNEL_FREQ_2407 2407 +#define CHANNEL_FREQ_2512 2512 +#define CHANNEL_FREQ_5000 5000 +#define CHANNEL_FREQ_4000 4000 +#define CHANNEL_FREQ_5150 5150 +#define FREQ_MULTIPLIER_CONST_5MHZ 5 +#define FREQ_MULTIPLIER_CONST_20MHZ 20 +#define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100 +#define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080 +#define RADIOTAP_CCK_CHANNEL 0x0020 +#define RADIOTAP_OFDM_CHANNEL 0x0040 + +#ifdef FEATURE_NBUFF_REPLENISH_TIMER +#include + +struct qdf_track_timer { + qdf_mc_timer_t track_timer; + qdf_atomic_t alloc_fail_cnt; +}; + +static struct qdf_track_timer alloc_track_timer; + +#define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS 5000 +#define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD 50 +#endif + +/* Packet Counter */ +static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX]; +static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX]; +#ifdef QDF_NBUF_GLOBAL_COUNT +#define NBUF_DEBUGFS_NAME "nbuf_counters" +static qdf_atomic_t nbuf_count; +#endif + +#if defined(NBUF_MEMORY_DEBUG) +static bool is_initial_mem_debug_disabled; +#endif + +/** + * __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset + * @data: Pointer to network data buffer + * + * Get the IP header offset in case of 8021Q and 8021AD + * tag is present in L2 header. + * + * Return: IP header offset + */ +static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = *(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET); + + if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) + return QDF_NBUF_TRAC_VLAN_IP_OFFSET; + else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD))) + return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET; + + return QDF_NBUF_TRAC_IP_OFFSET; +} + +qdf_export_symbol(__qdf_nbuf_get_ip_offset); + +/** + * __qdf_nbuf_get_ether_type - Get the ether type + * @data: Pointer to network data buffer + * + * Get the ether type in case of 8021Q and 8021AD tag + * is present in L2 header, e.g for the returned ether type + * value, if IPV4 data ether type 0x0800, return 0x0008. + * + * Return ether type. + */ +static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = *(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET); + + if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) + ether_type = *(uint16_t *)(data + + QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET); + else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD))) + ether_type = *(uint16_t *)(data + + QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET); + + return ether_type; +} + +qdf_export_symbol(__qdf_nbuf_get_ether_type); + +/** + * qdf_nbuf_tx_desc_count_display() - Displays the packet counter + * + * Return: none + */ +void qdf_nbuf_tx_desc_count_display(void) +{ + qdf_debug("Current Snapshot of the Driver:"); + qdf_debug("Data Packets:"); + qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d", + nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] - + (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] + + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]), + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE], + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] - + nbuf_tx_data[QDF_NBUF_TX_PKT_HTT], + nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] - + nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]); + qdf_debug(" HTC %d HIF %d CE %d TX_COMP %d", + nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] - + nbuf_tx_data[QDF_NBUF_TX_PKT_HIF], + nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] - + nbuf_tx_data[QDF_NBUF_TX_PKT_CE], + nbuf_tx_data[QDF_NBUF_TX_PKT_CE] - + nbuf_tx_data[QDF_NBUF_TX_PKT_FREE], + nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]); + qdf_debug("Mgmt Packets:"); + qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d", + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]); +} +qdf_export_symbol(qdf_nbuf_tx_desc_count_display); + +/** + * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter + * @packet_type : packet type either mgmt/data + * @current_state : layer at which the packet currently present + * + * Return: none + */ +static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type, + uint8_t current_state) +{ + switch (packet_type) { + case QDF_NBUF_TX_PKT_MGMT_TRACK: + nbuf_tx_mgmt[current_state]++; + break; + case QDF_NBUF_TX_PKT_DATA_TRACK: + nbuf_tx_data[current_state]++; + break; + default: + break; + } +} +qdf_export_symbol(qdf_nbuf_tx_desc_count_update); + +/** + * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt + * + * Return: none + */ +void qdf_nbuf_tx_desc_count_clear(void) +{ + memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt)); + memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data)); +} +qdf_export_symbol(qdf_nbuf_tx_desc_count_clear); + +/** + * qdf_nbuf_set_state() - Updates the packet state + * @nbuf: network buffer + * @current_state : layer at which the packet currently is + * + * This function updates the packet state to the layer at which the packet + * currently is + * + * Return: none + */ +void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state) +{ + /* + * Only Mgmt, Data Packets are tracked. WMI messages + * such as scan commands are not tracked + */ + uint8_t packet_type; + + packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf); + + if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) && + (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) { + return; + } + QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state; + qdf_nbuf_tx_desc_count_update(packet_type, + current_state); +} +qdf_export_symbol(qdf_nbuf_set_state); + +#ifdef FEATURE_NBUFF_REPLENISH_TIMER +/** + * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer + * + * This function starts the alloc fail replenish timer. + * + * Return: void + */ +static void __qdf_nbuf_start_replenish_timer(void) +{ + qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt); + if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) != + QDF_TIMER_STATE_RUNNING) + qdf_mc_timer_start(&alloc_track_timer.track_timer, + QDF_NBUF_ALLOC_EXPIRE_TIMER_MS); +} + +/** + * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer + * + * This function stops the alloc fail replenish timer. + * + * Return: void + */ +static void __qdf_nbuf_stop_replenish_timer(void) +{ + if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0) + return; + + qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0); + if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) == + QDF_TIMER_STATE_RUNNING) + qdf_mc_timer_stop(&alloc_track_timer.track_timer); +} + +/** + * qdf_replenish_expire_handler - Replenish expire handler + * + * This function triggers when the alloc fail replenish timer expires. + * + * Return: void + */ +static void qdf_replenish_expire_handler(void *arg) +{ + if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) > + QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) { + qdf_print("ERROR: NBUF allocation timer expired Fail count %d", + qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt)); + + /* Error handling here */ + } +} + +/** + * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer + * + * This function initializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +void __qdf_nbuf_init_replenish_timer(void) +{ + qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW, + qdf_replenish_expire_handler, NULL); +} + +/** + * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer + * + * This function deinitializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +void __qdf_nbuf_deinit_replenish_timer(void) +{ + __qdf_nbuf_stop_replenish_timer(); + qdf_mc_timer_destroy(&alloc_track_timer.track_timer); +} +#else + +static inline void __qdf_nbuf_start_replenish_timer(void) {} +static inline void __qdf_nbuf_stop_replenish_timer(void) {} +#endif + +/* globals do not need to be initialized to NULL/0 */ +qdf_nbuf_trace_update_t qdf_trace_update_cb; +qdf_nbuf_free_t nbuf_free_cb; + +#ifdef QDF_NBUF_GLOBAL_COUNT + +/** + * __qdf_nbuf_count_get() - get nbuf global count + * + * Return: nbuf global count + */ +int __qdf_nbuf_count_get(void) +{ + return qdf_atomic_read(&nbuf_count); +} +qdf_export_symbol(__qdf_nbuf_count_get); + +/** + * __qdf_nbuf_count_inc() - increment nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf) +{ + int num_nbuf = 1; + qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(nbuf); + + /* Take care to account for frag_list */ + while (ext_list) { + ++num_nbuf; + ext_list = qdf_nbuf_queue_next(ext_list); + } + + qdf_atomic_add(num_nbuf, &nbuf_count); +} +qdf_export_symbol(__qdf_nbuf_count_inc); + +/** + * __qdf_nbuf_count_dec() - decrement nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf) +{ + qdf_nbuf_t ext_list; + int num_nbuf; + + if (qdf_nbuf_get_users(nbuf) > 1) + return; + + num_nbuf = 1; + + /* Take care to account for frag_list */ + ext_list = qdf_nbuf_get_ext_list(nbuf); + while (ext_list) { + if (qdf_nbuf_get_users(ext_list) == 1) + ++num_nbuf; + ext_list = qdf_nbuf_queue_next(ext_list); + } + + qdf_atomic_sub(num_nbuf, &nbuf_count); +} +qdf_export_symbol(__qdf_nbuf_count_dec); +#endif + +#if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \ + !defined(QCA_WIFI_QCN9000) +struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, + int align, int prio, const char *func, + uint32_t line) +{ + struct sk_buff *skb; + unsigned long offset; + uint32_t lowmem_alloc_tries = 0; + + if (align) + size += (align - 1); + +realloc: + skb = dev_alloc_skb(size); + + if (skb) + goto skb_alloc; + + skb = pld_nbuf_pre_alloc(size); + + if (!skb) { + qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d", + size, func, line); + return NULL; + } + +skb_alloc: + /* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040 + * Though we are trying to reserve low memory upfront to prevent this, + * we sometimes see SKBs allocated from low memory. + */ + if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) { + lowmem_alloc_tries++; + if (lowmem_alloc_tries > 100) { + qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d", + size, func, line); + return NULL; + } else { + /* Not freeing to make sure it + * will not get allocated again + */ + goto realloc; + } + } + memset(skb->cb, 0x0, sizeof(skb->cb)); + + /* + * The default is for netbuf fragments to be interpreted + * as wordstreams rather than bytestreams. + */ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; + + /* + * XXX:how about we reserve first then align + * Align & make sure that the tail & data are adjusted properly + */ + + if (align) { + offset = ((unsigned long)skb->data) % align; + if (offset) + skb_reserve(skb, align - offset); + } + + /* + * NOTE:alloc doesn't take responsibility if reserve unaligns the data + * pointer + */ + skb_reserve(skb, reserve); + qdf_nbuf_count_inc(skb); + + return skb; +} +#else +struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, + int align, int prio, const char *func, + uint32_t line) +{ + struct sk_buff *skb; + unsigned long offset; + int flags = GFP_KERNEL; + + if (align) + size += (align - 1); + + if (in_interrupt() || irqs_disabled() || in_atomic()) { + flags = GFP_ATOMIC; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + /* + * Observed that kcompactd burns out CPU to make order-3 page. + *__netdev_alloc_skb has 4k page fallback option just in case of + * failing high order page allocation so we don't need to be + * hard. Make kcompactd rest in piece. + */ + flags = flags & ~__GFP_KSWAPD_RECLAIM; +#endif + } + + skb = __netdev_alloc_skb(NULL, size, flags); + + if (skb) + goto skb_alloc; + + skb = pld_nbuf_pre_alloc(size); + + if (!skb) { + qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d", + size, func, line); + __qdf_nbuf_start_replenish_timer(); + return NULL; + } else { + __qdf_nbuf_stop_replenish_timer(); + } + +skb_alloc: + memset(skb->cb, 0x0, sizeof(skb->cb)); + + /* + * The default is for netbuf fragments to be interpreted + * as wordstreams rather than bytestreams. + */ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; + + /* + * XXX:how about we reserve first then align + * Align & make sure that the tail & data are adjusted properly + */ + + if (align) { + offset = ((unsigned long)skb->data) % align; + if (offset) + skb_reserve(skb, align - offset); + } + + /* + * NOTE:alloc doesn't take responsibility if reserve unaligns the data + * pointer + */ + skb_reserve(skb, reserve); + qdf_nbuf_count_inc(skb); + + return skb; +} +#endif +qdf_export_symbol(__qdf_nbuf_alloc); + +/** + * __qdf_nbuf_free() - free the nbuf its interrupt safe + * @skb: Pointer to network buffer + * + * Return: none + */ + +void __qdf_nbuf_free(struct sk_buff *skb) +{ + if (pld_nbuf_pre_alloc_free(skb)) + return; + + qdf_nbuf_count_dec(skb); + if (nbuf_free_cb) + nbuf_free_cb(skb); + else + dev_kfree_skb_any(skb); +} + +qdf_export_symbol(__qdf_nbuf_free); + +#ifdef NBUF_MEMORY_DEBUG +enum qdf_nbuf_event_type { + QDF_NBUF_ALLOC, + QDF_NBUF_ALLOC_CLONE, + QDF_NBUF_ALLOC_COPY, + QDF_NBUF_ALLOC_FAILURE, + QDF_NBUF_FREE, + QDF_NBUF_MAP, + QDF_NBUF_UNMAP, + QDF_NBUF_ALLOC_COPY_EXPAND, +}; + +struct qdf_nbuf_event { + qdf_nbuf_t nbuf; + char func[QDF_MEM_FUNC_NAME_SIZE]; + uint32_t line; + enum qdf_nbuf_event_type type; + uint64_t timestamp; +}; + +#define QDF_NBUF_HISTORY_SIZE 4096 +static qdf_atomic_t qdf_nbuf_history_index; +static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE]; + +static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size) +{ + int32_t next = qdf_atomic_inc_return(index); + + if (next == size) + qdf_atomic_sub(size, index); + + return next % size; +} + +static void +qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line, + enum qdf_nbuf_event_type type) +{ + int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index, + QDF_NBUF_HISTORY_SIZE); + struct qdf_nbuf_event *event = &qdf_nbuf_history[idx]; + + event->nbuf = nbuf; + qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE); + event->line = line; + event->type = type; + event->timestamp = qdf_get_log_timestamp(); +} +#endif /* NBUF_MEMORY_DEBUG */ + +#ifdef NBUF_MAP_UNMAP_DEBUG +#define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */ +qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits, + "nbuf map-no-unmap events", "nbuf map", "nbuf unmap"); + +static void qdf_nbuf_map_tracking_init(void) +{ + qdf_tracker_init(&qdf_nbuf_map_tracker); +} + +static void qdf_nbuf_map_tracking_deinit(void) +{ + qdf_tracker_deinit(&qdf_nbuf_map_tracker); +} + +static QDF_STATUS +qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line) +{ + QDF_STATUS status; + + if (is_initial_mem_debug_disabled) + return QDF_STATUS_SUCCESS; + + status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP); + + return QDF_STATUS_SUCCESS; +} + +static void +qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line) +{ + if (is_initial_mem_debug_disabled) + return; + + qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP); + qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line); +} + +void qdf_nbuf_map_check_for_leaks(void) +{ + qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker); +} + +QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map(osdev, buf, dir); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, func, line); + else + qdf_net_buf_debug_update_map_node(buf, func, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_debug); + +void qdf_nbuf_unmap_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, func, line); + __qdf_nbuf_unmap_single(osdev, buf, dir); + qdf_net_buf_debug_update_unmap_node(buf, func, line); +} + +qdf_export_symbol(qdf_nbuf_unmap_debug); + +QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map_single(osdev, buf, dir); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, func, line); + else + qdf_net_buf_debug_update_map_node(buf, func, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_single_debug); + +void qdf_nbuf_unmap_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *func, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, func, line); + __qdf_nbuf_unmap_single(osdev, buf, dir); + qdf_net_buf_debug_update_unmap_node(buf, func, line); +} + +qdf_export_symbol(qdf_nbuf_unmap_single_debug); + +QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, func, line); + else + qdf_net_buf_debug_update_map_node(buf, func, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_nbytes_debug); + +void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, func, line); + __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes); + qdf_net_buf_debug_update_unmap_node(buf, func, line); +} + +qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug); + +QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, func, line); + else + qdf_net_buf_debug_update_map_node(buf, func, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug); + +void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *func, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, func, line); + __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes); + qdf_net_buf_debug_update_unmap_node(buf, func, line); +} + +qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug); + +static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, + const char *func, + uint32_t line) +{ + char map_func[QDF_TRACKER_FUNC_SIZE]; + uint32_t map_line; + + if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf, + &map_func, &map_line)) + return; + + QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u", + func, line, map_func, map_line); +} +#else +static inline void qdf_nbuf_map_tracking_init(void) +{ +} + +static inline void qdf_nbuf_map_tracking_deinit(void) +{ +} + +static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, + const char *func, + uint32_t line) +{ +} +#endif /* NBUF_MAP_UNMAP_DEBUG */ + +/** + * __qdf_nbuf_map() - map a buffer to local bus address space + * @osdev: OS device + * @bmap: Bitmap + * @skb: Pointer to network buffer + * @dir: Direction + * + * Return: QDF_STATUS + */ +#ifdef QDF_OS_DEBUG +QDF_STATUS +__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + + qdf_assert((dir == QDF_DMA_TO_DEVICE) + || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's only a single fragment. + * To support multiple fragments, it would be necessary to change + * qdf_nbuf_t to be a separate object that stores meta-info + * (including the bus address for each fragment) and a pointer + * to the underlying sk_buff. + */ + qdf_assert(sh->nr_frags == 0); + + return __qdf_nbuf_map_single(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_map); + +#else +QDF_STATUS +__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir) +{ + return __qdf_nbuf_map_single(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_map); +#endif +/** + * __qdf_nbuf_unmap() - to unmap a previously mapped buf + * @osdev: OS device + * @skb: Pointer to network buffer + * @dir: dma direction + * + * Return: none + */ +void +__qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir) +{ + qdf_assert((dir == QDF_DMA_TO_DEVICE) + || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's a single fragment. + * If this is not true, the assertion in __qdf_nbuf_map will catch it. + */ + __qdf_nbuf_unmap_single(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_unmap); + +/** + * __qdf_nbuf_map_single() - map a single buffer to local bus address space + * @osdev: OS device + * @skb: Pointer to network buffer + * @dir: Direction + * + * Return: QDF_STATUS + */ +#if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO) +QDF_STATUS +__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + qdf_dma_addr_t paddr; + + QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data; + BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data)); + BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data)); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_single); +#else +QDF_STATUS +__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + qdf_dma_addr_t paddr; + + /* assume that the OS only provides a single fragment */ + QDF_NBUF_CB_PADDR(buf) = paddr = + dma_map_single(osdev->dev, buf->data, + skb_end_pointer(buf) - buf->data, + __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, paddr) + ? QDF_STATUS_E_FAILURE + : QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_single); +#endif +/** + * __qdf_nbuf_unmap_single() - unmap a previously mapped buf + * @osdev: OS device + * @skb: Pointer to network buffer + * @dir: Direction + * + * Return: none + */ +#if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO) +void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, + qdf_dma_dir_t dir) +{ +} +#else +void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, + qdf_dma_dir_t dir) +{ + if (QDF_NBUF_CB_PADDR(buf)) + dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf), + skb_end_pointer(buf) - buf->data, + __qdf_dma_dir_to_os(dir)); +} +#endif +qdf_export_symbol(__qdf_nbuf_unmap_single); + +/** + * __qdf_nbuf_set_rx_cksum() - set rx checksum + * @skb: Pointer to network buffer + * @cksum: Pointer to checksum value + * + * Return: QDF_STATUS + */ +QDF_STATUS +__qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum) +{ + switch (cksum->l4_result) { + case QDF_NBUF_RX_CKSUM_NONE: + skb->ip_summed = CHECKSUM_NONE; + break; + case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + case QDF_NBUF_RX_CKSUM_TCP_UDP_HW: + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum = cksum->val; + break; + default: + pr_err("Unknown checksum type\n"); + qdf_assert(0); + return QDF_STATUS_E_NOSUPPORT; + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_set_rx_cksum); + +/** + * __qdf_nbuf_get_tx_cksum() - get tx checksum + * @skb: Pointer to network buffer + * + * Return: TX checksum value + */ +qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb) +{ + switch (skb->ip_summed) { + case CHECKSUM_NONE: + return QDF_NBUF_TX_CKSUM_NONE; + case CHECKSUM_PARTIAL: + return QDF_NBUF_TX_CKSUM_TCP_UDP; + case CHECKSUM_COMPLETE: + return QDF_NBUF_TX_CKSUM_TCP_UDP_IP; + default: + return QDF_NBUF_TX_CKSUM_NONE; + } +} +qdf_export_symbol(__qdf_nbuf_get_tx_cksum); + +/** + * __qdf_nbuf_get_tid() - get tid + * @skb: Pointer to network buffer + * + * Return: tid + */ +uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb) +{ + return skb->priority; +} +qdf_export_symbol(__qdf_nbuf_get_tid); + +/** + * __qdf_nbuf_set_tid() - set tid + * @skb: Pointer to network buffer + * + * Return: none + */ +void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid) +{ + skb->priority = tid; +} +qdf_export_symbol(__qdf_nbuf_set_tid); + +/** + * __qdf_nbuf_set_tid() - set tid + * @skb: Pointer to network buffer + * + * Return: none + */ +uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb) +{ + return QDF_NBUF_EXEMPT_NO_EXEMPTION; +} +qdf_export_symbol(__qdf_nbuf_get_exemption_type); + +/** + * __qdf_nbuf_reg_trace_cb() - register trace callback + * @cb_func_ptr: Pointer to trace callback function + * + * Return: none + */ +void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr) +{ + qdf_trace_update_cb = cb_func_ptr; +} +qdf_export_symbol(__qdf_nbuf_reg_trace_cb); + +/** + * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype + * of DHCP packet. + * @data: Pointer to DHCP packet data buffer + * + * This func. returns the subtype of DHCP packet. + * + * Return: subtype of the DHCP packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_dhcp_subtype(uint8_t *data) +{ + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + + if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) && + (data[QDF_DHCP_OPTION53_LENGTH_OFFSET] == + QDF_DHCP_OPTION53_LENGTH)) { + + switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) { + case QDF_DHCP_DISCOVER: + subtype = QDF_PROTO_DHCP_DISCOVER; + break; + case QDF_DHCP_REQUEST: + subtype = QDF_PROTO_DHCP_REQUEST; + break; + case QDF_DHCP_OFFER: + subtype = QDF_PROTO_DHCP_OFFER; + break; + case QDF_DHCP_ACK: + subtype = QDF_PROTO_DHCP_ACK; + break; + case QDF_DHCP_NAK: + subtype = QDF_PROTO_DHCP_NACK; + break; + case QDF_DHCP_RELEASE: + subtype = QDF_PROTO_DHCP_RELEASE; + break; + case QDF_DHCP_INFORM: + subtype = QDF_PROTO_DHCP_INFORM; + break; + case QDF_DHCP_DECLINE: + subtype = QDF_PROTO_DHCP_DECLINE; + break; + default: + break; + } + } + + return subtype; +} + +/** + * __qdf_nbuf_data_get_eapol_subtype() - get the subtype + * of EAPOL packet. + * @data: Pointer to EAPOL packet data buffer + * + * This func. returns the subtype of EAPOL packet. + * + * Return: subtype of the EAPOL packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_eapol_subtype(uint8_t *data) +{ + uint16_t eapol_key_info; + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + uint16_t mask; + + eapol_key_info = (uint16_t)(*(uint16_t *) + (data + EAPOL_KEY_INFO_OFFSET)); + + mask = eapol_key_info & EAPOL_MASK; + switch (mask) { + case EAPOL_M1_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M1; + break; + case EAPOL_M2_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M2; + break; + case EAPOL_M3_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M3; + break; + case EAPOL_M4_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M4; + break; + default: + break; + } + + return subtype; +} + +/** + * __qdf_nbuf_data_get_arp_subtype() - get the subtype + * of ARP packet. + * @data: Pointer to ARP packet data buffer + * + * This func. returns the subtype of ARP packet. + * + * Return: subtype of the ARP packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_arp_subtype(uint8_t *data) +{ + uint16_t subtype; + enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; + + subtype = (uint16_t)(*(uint16_t *) + (data + ARP_SUB_TYPE_OFFSET)); + + switch (QDF_SWAP_U16(subtype)) { + case ARP_REQUEST: + proto_subtype = QDF_PROTO_ARP_REQ; + break; + case ARP_RESPONSE: + proto_subtype = QDF_PROTO_ARP_RES; + break; + default: + break; + } + + return proto_subtype; +} + +/** + * __qdf_nbuf_data_get_icmp_subtype() - get the subtype + * of IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. returns the subtype of ICMP packet. + * + * Return: subtype of the ICMP packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_icmp_subtype(uint8_t *data) +{ + uint8_t subtype; + enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; + + subtype = (uint8_t)(*(uint8_t *) + (data + ICMP_SUBTYPE_OFFSET)); + + switch (subtype) { + case ICMP_REQUEST: + proto_subtype = QDF_PROTO_ICMP_REQ; + break; + case ICMP_RESPONSE: + proto_subtype = QDF_PROTO_ICMP_RES; + break; + default: + break; + } + + return proto_subtype; +} + +/** + * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype + * of IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. returns the subtype of ICMPV6 packet. + * + * Return: subtype of the ICMPV6 packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data) +{ + uint8_t subtype; + enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; + + subtype = (uint8_t)(*(uint8_t *) + (data + ICMPV6_SUBTYPE_OFFSET)); + + switch (subtype) { + case ICMPV6_REQUEST: + proto_subtype = QDF_PROTO_ICMPV6_REQ; + break; + case ICMPV6_RESPONSE: + proto_subtype = QDF_PROTO_ICMPV6_RES; + break; + case ICMPV6_RS: + proto_subtype = QDF_PROTO_ICMPV6_RS; + break; + case ICMPV6_RA: + proto_subtype = QDF_PROTO_ICMPV6_RA; + break; + case ICMPV6_NS: + proto_subtype = QDF_PROTO_ICMPV6_NS; + break; + case ICMPV6_NA: + proto_subtype = QDF_PROTO_ICMPV6_NA; + break; + default: + break; + } + + return proto_subtype; +} + +/** + * __qdf_nbuf_data_get_ipv4_proto() - get the proto type + * of IPV4 packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. returns the proto type of IPV4 packet. + * + * Return: proto type of IPV4 packet. + */ +uint8_t +__qdf_nbuf_data_get_ipv4_proto(uint8_t *data) +{ + uint8_t proto_type; + + proto_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + return proto_type; +} + +/** + * __qdf_nbuf_data_get_ipv6_proto() - get the proto type + * of IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. returns the proto type of IPV6 packet. + * + * Return: proto type of IPV6 packet. + */ +uint8_t +__qdf_nbuf_data_get_ipv6_proto(uint8_t *data) +{ + uint8_t proto_type; + + proto_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + return proto_type; +} + +/** + * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet + * @data: Pointer to network data + * + * This api is for Tx packets. + * + * Return: true if packet is ipv4 packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is DHCP packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data) +{ + uint16_t sport; + uint16_t dport; + uint8_t ipv4_offset; + uint8_t ipv4_hdr_len; + struct iphdr *iphdr; + + if (__qdf_nbuf_get_ether_type(data) != + QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) + return false; + + ipv4_offset = __qdf_nbuf_get_ip_offset(data); + iphdr = (struct iphdr *)(data + ipv4_offset); + ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT; + + sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len); + dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len + + sizeof(uint16_t)); + + if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) || + ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)))) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is EAPOL packet + * false otherwise. + */ +bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = __qdf_nbuf_get_ether_type(data); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt); + +/** + * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet + * @skb: Pointer to network buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is WAPI packet + * false otherwise. + */ +bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(skb->data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt); + +/** + * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet + * @skb: Pointer to network buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is tdls packet + * false otherwise. + */ +bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb) +{ + uint16_t ether_type; + + ether_type = *(uint16_t *)(skb->data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP packet + * false otherwise. + */ +bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = __qdf_nbuf_get_ether_type(data); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt); + +/** + * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP request + * false otherwise. + */ +bool __qdf_nbuf_data_is_arp_req(uint8_t *data) +{ + uint16_t op_code; + + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_ARP_OPCODE_OFFSET)); + + if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ)) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP response + * false otherwise. + */ +bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data) +{ + uint16_t op_code; + + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_ARP_OPCODE_OFFSET)); + + if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY)) + return true; + return false; +} + +/** + * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: ARP packet source IP value. + */ +uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data) +{ + uint32_t src_ip; + + src_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ARP_SRC_IP_OFFSET)); + + return src_ip; +} + +/** + * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: ARP packet target IP value. + */ +uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data) +{ + uint32_t tgt_ip; + + tgt_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ARP_TGT_IP_OFFSET)); + + return tgt_ip; +} + +/** + * __qdf_nbuf_get_dns_domain_name() - get dns domain name + * @data: Pointer to network data buffer + * @len: length to copy + * + * This api is for dns domain name + * + * Return: dns domain name. + */ +uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len) +{ + uint8_t *domain_name; + + domain_name = (uint8_t *) + (data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET); + return domain_name; +} + + +/** + * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query + * @data: Pointer to network data buffer + * + * This api is for dns query packet. + * + * Return: true if packet is dns query packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_dns_query(uint8_t *data) +{ + uint16_t op_code; + uint16_t tgt_port; + + tgt_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_DST_PORT_OFFSET)); + /* Standard DNS query always happen on Dest Port 53. */ + if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) { + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET)); + if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) == + QDF_NBUF_PKT_DNSOP_STANDARD_QUERY) + return true; + } + return false; +} + +/** + * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response + * @data: Pointer to network data buffer + * + * This api is for dns query response. + * + * Return: true if packet is dns response packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_dns_response(uint8_t *data) +{ + uint16_t op_code; + uint16_t src_port; + + src_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET)); + /* Standard DNS response always comes on Src Port 53. */ + if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) { + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET)); + + if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) == + QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE) + return true; + } + return false; +} + +/** + * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn + * @data: Pointer to network data buffer + * + * This api is for tcp syn packet. + * + * Return: true if packet is tcp syn packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_TCPOP_SYN) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack + * @data: Pointer to network data buffer + * + * This api is for tcp syn ack packet. + * + * Return: true if packet is tcp syn ack packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack + * @data: Pointer to network data buffer + * + * This api is for tcp ack packet. + * + * Return: true if packet is tcp ack packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_TCPOP_ACK) + return true; + return false; +} + +/** + * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port + * @data: Pointer to network data buffer + * + * This api is for tcp packet. + * + * Return: tcp source port value. + */ +uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data) +{ + uint16_t src_port; + + src_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET)); + + return src_port; +} + +/** + * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port + * @data: Pointer to network data buffer + * + * This api is for tcp packet. + * + * Return: tcp destination port value. + */ +uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data) +{ + uint16_t tgt_port; + + tgt_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_TCP_DST_PORT_OFFSET)); + + return tgt_port; +} + +/** + * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request + * @data: Pointer to network data buffer + * + * This api is for ipv4 req packet. + * + * Return: true if packet is icmpv4 request + * false otherwise. + */ +bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res + * @data: Pointer to network data buffer + * + * This api is for ipv4 res packet. + * + * Return: true if packet is icmpv4 response + * false otherwise. + */ +bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY) + return true; + return false; +} + +/** + * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: icmpv4 packet source IP value. + */ +uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data) +{ + uint32_t src_ip; + + src_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET)); + + return src_ip; +} + +/** + * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: icmpv4 packet target IP value. + */ +uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data) +{ + uint32_t tgt_ip; + + tgt_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET)); + + return tgt_ip; +} + + +/** + * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 packet or not. + * + * Return: TRUE if it is a IPV6 packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt); + +/** + * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet + * @data: Pointer to network data buffer + * + * This api is for ipv6 packet. + * + * Return: true if packet is DHCP packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data) +{ + uint16_t sport; + uint16_t dport; + uint8_t ipv6_offset; + + ipv6_offset = __qdf_nbuf_get_ip_offset(data); + sport = *(uint16_t *)(data + ipv6_offset + + QDF_NBUF_TRAC_IPV6_HEADER_SIZE); + dport = *(uint16_t *)(data + ipv6_offset + + QDF_NBUF_TRAC_IPV6_HEADER_SIZE + + sizeof(uint16_t)); + + if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) || + ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)))) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt); + +/** + * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet + * @data: Pointer to network data buffer + * + * This api is for ipv6 packet. + * + * Return: true if packet is MDNS packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data) +{ + uint16_t sport; + uint16_t dport; + + sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET + + QDF_NBUF_TRAC_IPV6_HEADER_SIZE); + dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET + + QDF_NBUF_TRAC_IPV6_HEADER_SIZE + + sizeof(uint16_t)); + + if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) && + dport == sport) + return true; + else + return false; +} + +qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. checks whether it is a IPV4 multicast packet or not. + * + * Return: TRUE if it is a IPV4 multicast packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint32_t *dst_addr = + (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET); + + /* + * Check first word of the IPV4 address and if it is + * equal to 0xE then it represents multicast IP. + */ + if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) == + QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 multicast packet or not. + * + * Return: TRUE if it is a IPV6 multicast packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint16_t *dst_addr; + + dst_addr = (uint16_t *) + (data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET); + + /* + * Check first byte of the IP address and if it + * 0xFF00 then it is a IPV6 mcast packet. + */ + if (*dst_addr == + QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR)) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. checks whether it is a ICMP packet or not. + * + * Return: TRUE if it is a ICMP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE) + return true; + else + return false; + } else + return false; +} + +qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt); + +/** + * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. checks whether it is a ICMPV6 packet or not. + * + * Return: TRUE if it is a ICMPV6 packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. + * @data: Pointer to IPV4 UDP packet data buffer + * + * This func. checks whether it is a IPV4 UDP packet or not. + * + * Return: TRUE if it is a IPV4 UDP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. + * @data: Pointer to IPV4 TCP packet data buffer + * + * This func. checks whether it is a IPV4 TCP packet or not. + * + * Return: TRUE if it is a IPV4 TCP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. + * @data: Pointer to IPV6 UDP packet data buffer + * + * This func. checks whether it is a IPV6 UDP packet or not. + * + * Return: TRUE if it is a IPV6 UDP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. + * @data: Pointer to IPV6 TCP packet data buffer + * + * This func. checks whether it is a IPV6 TCP packet or not. + * + * Return: TRUE if it is a IPV6 TCP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast + * @nbuf - sk buff + * + * Return: true if packet is broadcast + * false otherwise + */ +bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf) +{ + struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf); + return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest); +} +qdf_export_symbol(__qdf_nbuf_is_bcast_pkt); + +#ifdef NBUF_MEMORY_DEBUG +#define QDF_NET_BUF_TRACK_MAX_SIZE (1024) + +/** + * struct qdf_nbuf_track_t - Network buffer track structure + * + * @p_next: Pointer to next + * @net_buf: Pointer to network buffer + * @func_name: Function name + * @line_num: Line number + * @size: Size + * @map_func_name: nbuf mapping function name + * @map_line_num: mapping function line number + * @unmap_func_name: nbuf unmapping function name + * @unmap_line_num: mapping function line number + * @is_nbuf_mapped: indicate mapped/unmapped nbuf + */ +struct qdf_nbuf_track_t { + struct qdf_nbuf_track_t *p_next; + qdf_nbuf_t net_buf; + char func_name[QDF_MEM_FUNC_NAME_SIZE]; + uint32_t line_num; + size_t size; + char map_func_name[QDF_MEM_FUNC_NAME_SIZE]; + uint32_t map_line_num; + char unmap_func_name[QDF_MEM_FUNC_NAME_SIZE]; + uint32_t unmap_line_num; + bool is_nbuf_mapped; +}; + +static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE]; +typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK; + +static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE]; +static struct kmem_cache *nbuf_tracking_cache; +static QDF_NBUF_TRACK *qdf_net_buf_track_free_list; +static spinlock_t qdf_net_buf_track_free_list_lock; +static uint32_t qdf_net_buf_track_free_list_count; +static uint32_t qdf_net_buf_track_used_list_count; +static uint32_t qdf_net_buf_track_max_used; +static uint32_t qdf_net_buf_track_max_free; +static uint32_t qdf_net_buf_track_max_allocated; + +/** + * update_max_used() - update qdf_net_buf_track_max_used tracking variable + * + * tracks the max number of network buffers that the wlan driver was tracking + * at any one time. + * + * Return: none + */ +static inline void update_max_used(void) +{ + int sum; + + if (qdf_net_buf_track_max_used < + qdf_net_buf_track_used_list_count) + qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count; + sum = qdf_net_buf_track_free_list_count + + qdf_net_buf_track_used_list_count; + if (qdf_net_buf_track_max_allocated < sum) + qdf_net_buf_track_max_allocated = sum; +} + +/** + * update_max_free() - update qdf_net_buf_track_free_list_count + * + * tracks the max number tracking buffers kept in the freelist. + * + * Return: none + */ +static inline void update_max_free(void) +{ + if (qdf_net_buf_track_max_free < + qdf_net_buf_track_free_list_count) + qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count; +} + +/** + * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan + * + * This function pulls from a freelist if possible and uses kmem_cache_alloc. + * This function also ads fexibility to adjust the allocation and freelist + * scheems. + * + * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed. + */ +static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void) +{ + int flags = GFP_KERNEL; + unsigned long irq_flag; + QDF_NBUF_TRACK *new_node = NULL; + + spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); + qdf_net_buf_track_used_list_count++; + if (qdf_net_buf_track_free_list) { + new_node = qdf_net_buf_track_free_list; + qdf_net_buf_track_free_list = + qdf_net_buf_track_free_list->p_next; + qdf_net_buf_track_free_list_count--; + } + update_max_used(); + spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); + + if (new_node) + return new_node; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + return kmem_cache_alloc(nbuf_tracking_cache, flags); +} + +/* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */ +#define FREEQ_POOLSIZE 2048 + +/** + * qdf_nbuf_track_free() - free the nbuf tracking cookie. + * + * Matches calls to qdf_nbuf_track_alloc. + * Either frees the tracking cookie to kernel or an internal + * freelist based on the size of the freelist. + * + * Return: none + */ +static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node) +{ + unsigned long irq_flag; + + if (!node) + return; + + /* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE + * only shrink the freelist if it is bigger than twice the number of + * nbufs in use. If the driver is stalling in a consistent bursty + * fasion, this will keep 3/4 of thee allocations from the free list + * while also allowing the system to recover memory as less frantic + * traffic occurs. + */ + + spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); + + qdf_net_buf_track_used_list_count--; + if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE && + (qdf_net_buf_track_free_list_count > + qdf_net_buf_track_used_list_count << 1)) { + kmem_cache_free(nbuf_tracking_cache, node); + } else { + node->p_next = qdf_net_buf_track_free_list; + qdf_net_buf_track_free_list = node; + qdf_net_buf_track_free_list_count++; + } + update_max_free(); + spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); +} + +/** + * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist + * + * Removes a 'warmup time' characteristic of the freelist. Prefilling + * the freelist first makes it performant for the first iperf udp burst + * as well as steady state. + * + * Return: None + */ +static void qdf_nbuf_track_prefill(void) +{ + int i; + QDF_NBUF_TRACK *node, *head; + + /* prepopulate the freelist */ + head = NULL; + for (i = 0; i < FREEQ_POOLSIZE; i++) { + node = qdf_nbuf_track_alloc(); + if (!node) + continue; + node->p_next = head; + head = node; + } + while (head) { + node = head->p_next; + qdf_nbuf_track_free(head); + head = node; + } + + /* prefilled buffers should not count as used */ + qdf_net_buf_track_max_used = 0; +} + +/** + * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies + * + * This initializes the memory manager for the nbuf tracking cookies. Because + * these cookies are all the same size and only used in this feature, we can + * use a kmem_cache to provide tracking as well as to speed up allocations. + * To avoid the overhead of allocating and freeing the buffers (including SLUB + * features) a freelist is prepopulated here. + * + * Return: None + */ +static void qdf_nbuf_track_memory_manager_create(void) +{ + spin_lock_init(&qdf_net_buf_track_free_list_lock); + nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache", + sizeof(QDF_NBUF_TRACK), + 0, 0, NULL); + + qdf_nbuf_track_prefill(); +} + +/** + * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies + * + * Empty the freelist and print out usage statistics when it is no longer + * needed. Also the kmem_cache should be destroyed here so that it can warn if + * any nbuf tracking cookies were leaked. + * + * Return: None + */ +static void qdf_nbuf_track_memory_manager_destroy(void) +{ + QDF_NBUF_TRACK *node, *tmp; + unsigned long irq_flag; + + spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); + node = qdf_net_buf_track_free_list; + + if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4) + qdf_print("%s: unexpectedly large max_used count %d", + __func__, qdf_net_buf_track_max_used); + + if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated) + qdf_print("%s: %d unused trackers were allocated", + __func__, + qdf_net_buf_track_max_allocated - + qdf_net_buf_track_max_used); + + if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE && + qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4) + qdf_print("%s: check freelist shrinking functionality", + __func__); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d residual freelist size", + __func__, qdf_net_buf_track_free_list_count); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d max freelist size observed", + __func__, qdf_net_buf_track_max_free); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d max buffers used observed", + __func__, qdf_net_buf_track_max_used); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d max buffers allocated observed", + __func__, qdf_net_buf_track_max_allocated); + + while (node) { + tmp = node; + node = node->p_next; + kmem_cache_free(nbuf_tracking_cache, tmp); + qdf_net_buf_track_free_list_count--; + } + + if (qdf_net_buf_track_free_list_count != 0) + qdf_info("%d unfreed tracking memory lost in freelist", + qdf_net_buf_track_free_list_count); + + if (qdf_net_buf_track_used_list_count != 0) + qdf_info("%d unfreed tracking memory still in use", + qdf_net_buf_track_used_list_count); + + spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); + kmem_cache_destroy(nbuf_tracking_cache); + qdf_net_buf_track_free_list = NULL; +} + +/** + * qdf_net_buf_debug_init() - initialize network buffer debug functionality + * + * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver + * in a hash table and when driver is unloaded it reports about leaked SKBs. + * WLAN driver module whose allocated SKB is freed by network stack are + * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not + * reported as memory leak. + * + * Return: none + */ +void qdf_net_buf_debug_init(void) +{ + uint32_t i; + + is_initial_mem_debug_disabled = qdf_mem_debug_config_get(); + + if (is_initial_mem_debug_disabled) + return; + + qdf_atomic_set(&qdf_nbuf_history_index, -1); + + qdf_nbuf_map_tracking_init(); + qdf_nbuf_track_memory_manager_create(); + + for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) { + gp_qdf_net_buf_track_tbl[i] = NULL; + spin_lock_init(&g_qdf_net_buf_track_lock[i]); + } +} +qdf_export_symbol(qdf_net_buf_debug_init); + +/** + * qdf_net_buf_debug_init() - exit network buffer debug functionality + * + * Exit network buffer tracking debug functionality and log SKB memory leaks + * As part of exiting the functionality, free the leaked memory and + * cleanup the tracking buffers. + * + * Return: none + */ +void qdf_net_buf_debug_exit(void) +{ + uint32_t i; + uint32_t count = 0; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + QDF_NBUF_TRACK *p_prev; + + if (is_initial_mem_debug_disabled) + return; + + for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) { + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + p_node = gp_qdf_net_buf_track_tbl[i]; + while (p_node) { + p_prev = p_node; + p_node = p_node->p_next; + count++; + qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK", + p_prev->func_name, p_prev->line_num, + p_prev->size, p_prev->net_buf); + qdf_info( + "SKB leak map %s, line %d, unmap %s line %d mapped=%d", + p_prev->map_func_name, + p_prev->map_line_num, + p_prev->unmap_func_name, + p_prev->unmap_line_num, + p_prev->is_nbuf_mapped); + qdf_nbuf_track_free(p_prev); + } + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); + } + + qdf_nbuf_track_memory_manager_destroy(); + qdf_nbuf_map_tracking_deinit(); + +#ifdef CONFIG_HALT_KMEMLEAK + if (count) { + qdf_err("%d SKBs leaked .. please fix the SKB leak", count); + QDF_BUG(0); + } +#endif +} +qdf_export_symbol(qdf_net_buf_debug_exit); + +/** + * qdf_net_buf_debug_hash() - hash network buffer pointer + * + * Return: hash value + */ +static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf) +{ + uint32_t i; + + i = (uint32_t) (((uintptr_t) net_buf) >> 4); + i += (uint32_t) (((uintptr_t) net_buf) >> 14); + i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1); + + return i; +} + +/** + * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table + * + * Return: If skb is found in hash table then return pointer to network buffer + * else return %NULL + */ +static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf) +{ + uint32_t i; + QDF_NBUF_TRACK *p_node; + + i = qdf_net_buf_debug_hash(net_buf); + p_node = gp_qdf_net_buf_track_tbl[i]; + + while (p_node) { + if (p_node->net_buf == net_buf) + return p_node; + p_node = p_node->p_next; + } + + return NULL; +} + +/** + * qdf_net_buf_debug_add_node() - store skb in debug hash table + * + * Return: none + */ +void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size, + const char *func_name, uint32_t line_num) +{ + uint32_t i; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + QDF_NBUF_TRACK *new_node; + + if (is_initial_mem_debug_disabled) + return; + + new_node = qdf_nbuf_track_alloc(); + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_node = qdf_net_buf_debug_look_up(net_buf); + + if (p_node) { + qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d", + p_node->net_buf, p_node->func_name, p_node->line_num, + net_buf, func_name, line_num); + qdf_nbuf_track_free(new_node); + } else { + p_node = new_node; + if (p_node) { + p_node->net_buf = net_buf; + qdf_str_lcopy(p_node->func_name, func_name, + QDF_MEM_FUNC_NAME_SIZE); + p_node->line_num = line_num; + p_node->size = size; + qdf_mem_skb_inc(size); + p_node->p_next = gp_qdf_net_buf_track_tbl[i]; + gp_qdf_net_buf_track_tbl[i] = p_node; + } else + qdf_print( + "Mem alloc failed ! Could not track skb from %s %d of size %zu", + func_name, line_num, size); + } + + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); +} +qdf_export_symbol(qdf_net_buf_debug_add_node); + +void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name, + uint32_t line_num) +{ + uint32_t i; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + + if (is_initial_mem_debug_disabled) + return; + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_node = qdf_net_buf_debug_look_up(net_buf); + + if (p_node) { + qdf_str_lcopy(p_node->func_name, kbasename(func_name), + QDF_MEM_FUNC_NAME_SIZE); + p_node->line_num = line_num; + } + + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); +} + +qdf_export_symbol(qdf_net_buf_debug_update_node); + +void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num) +{ + uint32_t i; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + + if (is_initial_mem_debug_disabled) + return; + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_node = qdf_net_buf_debug_look_up(net_buf); + + if (p_node) { + qdf_str_lcopy(p_node->map_func_name, func_name, + QDF_MEM_FUNC_NAME_SIZE); + p_node->map_line_num = line_num; + p_node->is_nbuf_mapped = true; + } + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); +} + +qdf_export_symbol(qdf_net_buf_debug_update_map_node); + +void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf, + const char *func_name, + uint32_t line_num) +{ + uint32_t i; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + + if (is_initial_mem_debug_disabled) + return; + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_node = qdf_net_buf_debug_look_up(net_buf); + + if (p_node) { + qdf_str_lcopy(p_node->unmap_func_name, func_name, + QDF_MEM_FUNC_NAME_SIZE); + p_node->unmap_line_num = line_num; + p_node->is_nbuf_mapped = false; + } + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); +} + +qdf_export_symbol(qdf_net_buf_debug_update_unmap_node); + +/** + * qdf_net_buf_debug_delete_node() - remove skb from debug hash table + * + * Return: none + */ +void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf) +{ + uint32_t i; + QDF_NBUF_TRACK *p_head; + QDF_NBUF_TRACK *p_node = NULL; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_prev; + + if (is_initial_mem_debug_disabled) + return; + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_head = gp_qdf_net_buf_track_tbl[i]; + + /* Unallocated SKB */ + if (!p_head) + goto done; + + p_node = p_head; + /* Found at head of the table */ + if (p_head->net_buf == net_buf) { + gp_qdf_net_buf_track_tbl[i] = p_node->p_next; + goto done; + } + + /* Search in collision list */ + while (p_node) { + p_prev = p_node; + p_node = p_node->p_next; + if ((p_node) && (p_node->net_buf == net_buf)) { + p_prev->p_next = p_node->p_next; + break; + } + } + +done: + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); + + if (p_node) { + qdf_mem_skb_dec(p_node->size); + qdf_nbuf_track_free(p_node); + } else { + qdf_print("Unallocated buffer ! Double free of net_buf %pK ?", + net_buf); + QDF_BUG(0); + } +} +qdf_export_symbol(qdf_net_buf_debug_delete_node); + +void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, + const char *func_name, uint32_t line_num) +{ + qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf); + + if (is_initial_mem_debug_disabled) + return; + + while (ext_list) { + /* + * Take care to add if it is Jumbo packet connected using + * frag_list + */ + qdf_nbuf_t next; + + next = qdf_nbuf_queue_next(ext_list); + qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num); + ext_list = next; + } + qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num); +} +qdf_export_symbol(qdf_net_buf_debug_acquire_skb); + +/** + * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak + * @net_buf: Network buf holding head segment (single) + * + * WLAN driver module whose allocated SKB is freed by network stack are + * suppose to call this API before returning SKB to network stack such + * that the SKB is not reported as memory leak. + * + * Return: none + */ +void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf) +{ + qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf); + + if (is_initial_mem_debug_disabled) + return; + + while (ext_list) { + /* + * Take care to free if it is Jumbo packet connected using + * frag_list + */ + qdf_nbuf_t next; + + next = qdf_nbuf_queue_next(ext_list); + + if (qdf_nbuf_get_users(ext_list) > 1) { + ext_list = next; + continue; + } + + qdf_net_buf_debug_delete_node(ext_list); + ext_list = next; + } + + if (qdf_nbuf_get_users(net_buf) > 1) + return; + + qdf_net_buf_debug_delete_node(net_buf); +} +qdf_export_symbol(qdf_net_buf_debug_release_skb); + +qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size, + int reserve, int align, int prio, + const char *func, uint32_t line) +{ + qdf_nbuf_t nbuf; + + if (is_initial_mem_debug_disabled) + return __qdf_nbuf_alloc(osdev, size, + reserve, align, + prio, func, line); + + nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line); + + /* Store SKB in internal QDF tracking table */ + if (qdf_likely(nbuf)) { + qdf_net_buf_debug_add_node(nbuf, size, func, line); + qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC); + } else { + qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE); + } + + return nbuf; +} +qdf_export_symbol(qdf_nbuf_alloc_debug); + +void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line) +{ + qdf_nbuf_t ext_list; + + if (qdf_unlikely(!nbuf)) + return; + + if (is_initial_mem_debug_disabled) + goto free_buf; + + if (qdf_nbuf_get_users(nbuf) > 1) + goto free_buf; + + /* Remove SKB from internal QDF tracking table */ + qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line); + qdf_net_buf_debug_delete_node(nbuf); + qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE); + + /* Take care to delete the debug entries for frag_list */ + ext_list = qdf_nbuf_get_ext_list(nbuf); + while (ext_list) { + if (qdf_nbuf_get_users(ext_list) == 1) { + qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line); + qdf_net_buf_debug_delete_node(ext_list); + } + + ext_list = qdf_nbuf_queue_next(ext_list); + } + +free_buf: + __qdf_nbuf_free(nbuf); +} +qdf_export_symbol(qdf_nbuf_free_debug); + +qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line) +{ + qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf); + + if (is_initial_mem_debug_disabled) + return cloned_buf; + + if (qdf_unlikely(!cloned_buf)) + return NULL; + + /* Store SKB in internal QDF tracking table */ + qdf_net_buf_debug_add_node(cloned_buf, 0, func, line); + qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE); + + return cloned_buf; +} +qdf_export_symbol(qdf_nbuf_clone_debug); + +qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line) +{ + qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf); + + if (is_initial_mem_debug_disabled) + return copied_buf; + + if (qdf_unlikely(!copied_buf)) + return NULL; + + /* Store SKB in internal QDF tracking table */ + qdf_net_buf_debug_add_node(copied_buf, 0, func, line); + qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY); + + return copied_buf; +} +qdf_export_symbol(qdf_nbuf_copy_debug); + +qdf_nbuf_t +qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom, + const char *func, uint32_t line) +{ + qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom); + + if (qdf_unlikely(!copied_buf)) + return NULL; + + if (is_initial_mem_debug_disabled) + return copied_buf; + + /* Store SKB in internal QDF tracking table */ + qdf_net_buf_debug_add_node(copied_buf, 0, func, line); + qdf_nbuf_history_add(copied_buf, func, line, + QDF_NBUF_ALLOC_COPY_EXPAND); + + return copied_buf; +} + +qdf_export_symbol(qdf_nbuf_copy_expand_debug); + +#endif /* NBUF_MEMORY_DEBUG */ + +#if defined(FEATURE_TSO) + +/** + * struct qdf_tso_cmn_seg_info_t - TSO common info structure + * + * @ethproto: ethernet type of the msdu + * @ip_tcp_hdr_len: ip + tcp length for the msdu + * @l2_len: L2 length for the msdu + * @eit_hdr: pointer to EIT header + * @eit_hdr_len: EIT header length for the msdu + * @eit_hdr_dma_map_addr: dma addr for EIT header + * @tcphdr: pointer to tcp header + * @ipv4_csum_en: ipv4 checksum enable + * @tcp_ipv4_csum_en: TCP ipv4 checksum enable + * @tcp_ipv6_csum_en: TCP ipv6 checksum enable + * @ip_id: IP id + * @tcp_seq_num: TCP sequence number + * + * This structure holds the TSO common info that is common + * across all the TCP segments of the jumbo packet. + */ +struct qdf_tso_cmn_seg_info_t { + uint16_t ethproto; + uint16_t ip_tcp_hdr_len; + uint16_t l2_len; + uint8_t *eit_hdr; + uint32_t eit_hdr_len; + qdf_dma_addr_t eit_hdr_dma_map_addr; + struct tcphdr *tcphdr; + uint16_t ipv4_csum_en; + uint16_t tcp_ipv4_csum_en; + uint16_t tcp_ipv6_csum_en; + uint16_t ip_id; + uint32_t tcp_seq_num; +}; + +/** + * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment + * + * @skb: network buffer + * + * Return: byte offset length of 8 bytes aligned. + */ +#ifdef FIX_TXDMA_LIMITATION +static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb) +{ + uint32_t eit_hdr_len; + uint8_t *eit_hdr; + uint8_t byte_8_align_offset; + + eit_hdr = skb->data; + eit_hdr_len = (skb_transport_header(skb) + - skb_mac_header(skb)) + tcp_hdrlen(skb); + byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L; + if (qdf_unlikely(byte_8_align_offset)) { + TSO_DEBUG("%pK,Len %d %d", + eit_hdr, eit_hdr_len, byte_8_align_offset); + if (unlikely(skb_headroom(skb) < byte_8_align_offset)) { + TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]", + __LINE__, skb->head, skb->data, + byte_8_align_offset); + return 0; + } + qdf_nbuf_push_head(skb, byte_8_align_offset); + qdf_mem_move(skb->data, + skb->data + byte_8_align_offset, + eit_hdr_len); + skb->len -= byte_8_align_offset; + skb->mac_header -= byte_8_align_offset; + skb->network_header -= byte_8_align_offset; + skb->transport_header -= byte_8_align_offset; + } + return byte_8_align_offset; +} +#else +static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb) +{ + return 0; +} +#endif + +/** + * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common + * information + * @osdev: qdf device handle + * @skb: skb buffer + * @tso_info: Parameters common to all segements + * + * Get the TSO information that is common across all the TCP + * segments of the jumbo packet + * + * Return: 0 - success 1 - failure + */ +static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev, + struct sk_buff *skb, + struct qdf_tso_cmn_seg_info_t *tso_info) +{ + /* Get ethernet type and ethernet header length */ + tso_info->ethproto = vlan_get_protocol(skb); + + /* Determine whether this is an IPv4 or IPv6 packet */ + if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */ + /* for IPv4, get the IP ID and enable TCP and IP csum */ + struct iphdr *ipv4_hdr = ip_hdr(skb); + + tso_info->ip_id = ntohs(ipv4_hdr->id); + tso_info->ipv4_csum_en = 1; + tso_info->tcp_ipv4_csum_en = 1; + if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) { + qdf_err("TSO IPV4 proto 0x%x not TCP", + ipv4_hdr->protocol); + return 1; + } + } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */ + /* for IPv6, enable TCP csum. No IP ID or IP csum */ + tso_info->tcp_ipv6_csum_en = 1; + } else { + qdf_err("TSO: ethertype 0x%x is not supported!", + tso_info->ethproto); + return 1; + } + tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb)); + tso_info->tcphdr = tcp_hdr(skb); + tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq); + /* get pointer to the ethernet + IP + TCP header and their length */ + tso_info->eit_hdr = skb->data; + tso_info->eit_hdr_len = (skb_transport_header(skb) + - skb_mac_header(skb)) + tcp_hdrlen(skb); + tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev, + tso_info->eit_hdr, + tso_info->eit_hdr_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(osdev->dev, + tso_info->eit_hdr_dma_map_addr))) { + qdf_err("DMA mapping error!"); + qdf_assert(0); + return 1; + } + + if (tso_info->ethproto == htons(ETH_P_IP)) { + /* inlcude IPv4 header length for IPV4 (total length) */ + tso_info->ip_tcp_hdr_len = + tso_info->eit_hdr_len - tso_info->l2_len; + } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { + /* exclude IPv6 header length for IPv6 (payload length) */ + tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb); + } + /* + * The length of the payload (application layer data) is added to + * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext + * descriptor. + */ + + TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u skb len %u\n", __func__, + tso_info->tcp_seq_num, + tso_info->eit_hdr_len, + tso_info->l2_len, + skb->len); + return 0; +} + + +/** + * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment + * + * @curr_seg: Segment whose contents are initialized + * @tso_cmn_info: Parameters common to all segements + * + * Return: None + */ +static inline void __qdf_nbuf_fill_tso_cmn_seg_info( + struct qdf_tso_seg_elem_t *curr_seg, + struct qdf_tso_cmn_seg_info_t *tso_cmn_info) +{ + /* Initialize the flags to 0 */ + memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg)); + + /* + * The following fields remain the same across all segments of + * a jumbo packet + */ + curr_seg->seg.tso_flags.tso_enable = 1; + curr_seg->seg.tso_flags.ipv4_checksum_en = + tso_cmn_info->ipv4_csum_en; + curr_seg->seg.tso_flags.tcp_ipv6_checksum_en = + tso_cmn_info->tcp_ipv6_csum_en; + curr_seg->seg.tso_flags.tcp_ipv4_checksum_en = + tso_cmn_info->tcp_ipv4_csum_en; + curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF; + + /* The following fields change for the segments */ + curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id; + tso_cmn_info->ip_id++; + + curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn; + curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst; + curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh; + curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack; + curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg; + curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece; + curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr; + + curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num; + + /* + * First fragment for each segment always contains the ethernet, + * IP and TCP header + */ + curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr; + curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len; + curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length; + curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr; + + TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n", + __func__, __LINE__, tso_cmn_info->eit_hdr, + tso_cmn_info->eit_hdr_len, + curr_seg->seg.tso_flags.tcp_seq_num, + curr_seg->seg.total_len); + qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG); +} + +/** + * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf + * into segments + * @nbuf: network buffer to be segmented + * @tso_info: This is the output. The information about the + * TSO segments will be populated within this. + * + * This function fragments a TCP jumbo packet into smaller + * segments to be transmitted by the driver. It chains the TSO + * segments created into a list. + * + * Return: number of TSO segments + */ +uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb, + struct qdf_tso_info_t *tso_info) +{ + /* common across all segments */ + struct qdf_tso_cmn_seg_info_t tso_cmn_info; + /* segment specific */ + void *tso_frag_vaddr; + qdf_dma_addr_t tso_frag_paddr = 0; + uint32_t num_seg = 0; + struct qdf_tso_seg_elem_t *curr_seg; + struct qdf_tso_num_seg_elem_t *total_num_seg; + skb_frag_t *frag = NULL; + uint32_t tso_frag_len = 0; /* tso segment's fragment length*/ + uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/ + uint32_t skb_proc = skb->len; /* bytes of skb pending processing */ + uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; + int j = 0; /* skb fragment index */ + uint8_t byte_8_align_offset; + + memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info)); + total_num_seg = tso_info->tso_num_seg_list; + curr_seg = tso_info->tso_seg_list; + total_num_seg->num_seg.tso_cmn_num_seg = 0; + + byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb); + + if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev, + skb, &tso_cmn_info))) { + qdf_warn("TSO: error getting common segment info"); + return 0; + } + + /* length of the first chunk of data in the skb */ + skb_frag_len = skb_headlen(skb); + + /* the 0th tso segment's 0th fragment always contains the EIT header */ + /* update the remaining skb fragment length and TSO segment length */ + skb_frag_len -= tso_cmn_info.eit_hdr_len; + skb_proc -= tso_cmn_info.eit_hdr_len; + + /* get the address to the next tso fragment */ + tso_frag_vaddr = skb->data + + tso_cmn_info.eit_hdr_len + + byte_8_align_offset; + /* get the length of the next tso fragment */ + tso_frag_len = min(skb_frag_len, tso_seg_size); + + if (tso_frag_len != 0) { + tso_frag_paddr = dma_map_single(osdev->dev, + tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE); + } + + if (unlikely(dma_mapping_error(osdev->dev, + tso_frag_paddr))) { + qdf_err("DMA mapping error!"); + qdf_assert(0); + return 0; + } + TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__, + __LINE__, skb_frag_len, tso_frag_len); + num_seg = tso_info->num_segs; + tso_info->num_segs = 0; + tso_info->is_tso = 1; + + while (num_seg && curr_seg) { + int i = 1; /* tso fragment index */ + uint8_t more_tso_frags = 1; + + curr_seg->seg.num_frags = 0; + tso_info->num_segs++; + total_num_seg->num_seg.tso_cmn_num_seg++; + + __qdf_nbuf_fill_tso_cmn_seg_info(curr_seg, + &tso_cmn_info); + + if (unlikely(skb_proc == 0)) + return tso_info->num_segs; + + curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len; + curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len; + /* frag len is added to ip_len in while loop below*/ + + curr_seg->seg.num_frags++; + + while (more_tso_frags) { + if (tso_frag_len != 0) { + curr_seg->seg.tso_frags[i].vaddr = + tso_frag_vaddr; + curr_seg->seg.tso_frags[i].length = + tso_frag_len; + curr_seg->seg.total_len += tso_frag_len; + curr_seg->seg.tso_flags.ip_len += tso_frag_len; + curr_seg->seg.num_frags++; + skb_proc = skb_proc - tso_frag_len; + + /* increment the TCP sequence number */ + + tso_cmn_info.tcp_seq_num += tso_frag_len; + curr_seg->seg.tso_frags[i].paddr = + tso_frag_paddr; + } + + TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n", + __func__, __LINE__, + i, + tso_frag_len, + curr_seg->seg.total_len, + curr_seg->seg.tso_frags[i].vaddr); + + /* if there is no more data left in the skb */ + if (!skb_proc) + return tso_info->num_segs; + + /* get the next payload fragment information */ + /* check if there are more fragments in this segment */ + if (tso_frag_len < tso_seg_size) { + more_tso_frags = 1; + if (tso_frag_len != 0) { + tso_seg_size = tso_seg_size - + tso_frag_len; + i++; + if (curr_seg->seg.num_frags == + FRAG_NUM_MAX) { + more_tso_frags = 0; + /* + * reset i and the tso + * payload size + */ + i = 1; + tso_seg_size = + skb_shinfo(skb)-> + gso_size; + } + } + } else { + more_tso_frags = 0; + /* reset i and the tso payload size */ + i = 1; + tso_seg_size = skb_shinfo(skb)->gso_size; + } + + /* if the next fragment is contiguous */ + if ((tso_frag_len != 0) && (tso_frag_len < skb_frag_len)) { + tso_frag_vaddr = tso_frag_vaddr + tso_frag_len; + skb_frag_len = skb_frag_len - tso_frag_len; + tso_frag_len = min(skb_frag_len, tso_seg_size); + + } else { /* the next fragment is not contiguous */ + if (skb_shinfo(skb)->nr_frags == 0) { + qdf_info("TSO: nr_frags == 0!"); + qdf_assert(0); + return 0; + } + if (j >= skb_shinfo(skb)->nr_frags) { + qdf_info("TSO: nr_frags %d j %d", + skb_shinfo(skb)->nr_frags, j); + qdf_assert(0); + return 0; + } + frag = &skb_shinfo(skb)->frags[j]; + skb_frag_len = skb_frag_size(frag); + tso_frag_len = min(skb_frag_len, tso_seg_size); + tso_frag_vaddr = skb_frag_address_safe(frag); + j++; + } + + TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n", + __func__, __LINE__, skb_frag_len, tso_frag_len, + tso_seg_size); + + if (!(tso_frag_vaddr)) { + TSO_DEBUG("%s: Fragment virtual addr is NULL", + __func__); + return 0; + } + + tso_frag_paddr = + dma_map_single(osdev->dev, + tso_frag_vaddr, + tso_frag_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(osdev->dev, + tso_frag_paddr))) { + qdf_err("DMA mapping error!"); + qdf_assert(0); + return 0; + } + } + TSO_DEBUG("%s tcp_seq_num: %u", __func__, + curr_seg->seg.tso_flags.tcp_seq_num); + num_seg--; + /* if TCP FIN flag was set, set it in the last segment */ + if (!num_seg) + curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin; + + qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO); + curr_seg = curr_seg->next; + } + return tso_info->num_segs; +} +qdf_export_symbol(__qdf_nbuf_get_tso_info); + +/** + * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element + * + * @osdev: qdf device handle + * @tso_seg: TSO segment element to be unmapped + * @is_last_seg: whether this is last tso seg or not + * + * Return: none + */ +void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, + struct qdf_tso_seg_elem_t *tso_seg, + bool is_last_seg) +{ + uint32_t num_frags = 0; + + if (tso_seg->seg.num_frags > 0) + num_frags = tso_seg->seg.num_frags - 1; + + /*Num of frags in a tso seg cannot be less than 2 */ + if (num_frags < 1) { + /* + * If Num of frags is 1 in a tso seg but is_last_seg true, + * this may happen when qdf_nbuf_get_tso_info failed, + * do dma unmap for the 0th frag in this seg. + */ + if (is_last_seg && tso_seg->seg.num_frags == 1) + goto last_seg_free_first_frag; + + qdf_assert(0); + qdf_err("ERROR: num of frags in a tso segment is %d", + (num_frags + 1)); + return; + } + + while (num_frags) { + /*Do dma unmap the tso seg except the 0th frag */ + if (0 == tso_seg->seg.tso_frags[num_frags].paddr) { + qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL", + num_frags); + qdf_assert(0); + return; + } + dma_unmap_single(osdev->dev, + tso_seg->seg.tso_frags[num_frags].paddr, + tso_seg->seg.tso_frags[num_frags].length, + __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE)); + tso_seg->seg.tso_frags[num_frags].paddr = 0; + num_frags--; + qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO); + } + +last_seg_free_first_frag: + if (is_last_seg) { + /*Do dma unmap for the tso seg 0th frag */ + if (0 == tso_seg->seg.tso_frags[0].paddr) { + qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL"); + qdf_assert(0); + return; + } + dma_unmap_single(osdev->dev, + tso_seg->seg.tso_frags[0].paddr, + tso_seg->seg.tso_frags[0].length, + __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE)); + tso_seg->seg.tso_frags[0].paddr = 0; + qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST); + } +} +qdf_export_symbol(__qdf_nbuf_unmap_tso_segment); + +size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb) +{ + size_t packet_len; + + packet_len = skb->len - + ((skb_transport_header(skb) - skb_mac_header(skb)) + + tcp_hdrlen(skb)); + + return packet_len; +} + +qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len); + +/** + * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf + * into segments + * @nbuf: network buffer to be segmented + * @tso_info: This is the output. The information about the + * TSO segments will be populated within this. + * + * This function fragments a TCP jumbo packet into smaller + * segments to be transmitted by the driver. It chains the TSO + * segments created into a list. + * + * Return: 0 - success, 1 - failure + */ +#ifndef BUILD_X86 +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; + uint32_t remainder, num_segs = 0; + uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags; + uint8_t frags_per_tso = 0; + uint32_t skb_frag_len = 0; + uint32_t eit_hdr_len = (skb_transport_header(skb) + - skb_mac_header(skb)) + tcp_hdrlen(skb); + skb_frag_t *frag = NULL; + int j = 0; + uint32_t temp_num_seg = 0; + + /* length of the first chunk of data in the skb minus eit header*/ + skb_frag_len = skb_headlen(skb) - eit_hdr_len; + + /* Calculate num of segs for skb's first chunk of data*/ + remainder = skb_frag_len % tso_seg_size; + num_segs = skb_frag_len / tso_seg_size; + /** + * Remainder non-zero and nr_frags zero implies end of skb data. + * In that case, one more tso seg is required to accommodate + * remaining data, hence num_segs++. If nr_frags is non-zero, + * then remaining data will be accomodated while doing the calculation + * for nr_frags data. Hence, frags_per_tso++. + */ + if (remainder) { + if (!skb_nr_frags) + num_segs++; + else + frags_per_tso++; + } + + while (skb_nr_frags) { + if (j >= skb_shinfo(skb)->nr_frags) { + qdf_info("TSO: nr_frags %d j %d", + skb_shinfo(skb)->nr_frags, j); + qdf_assert(0); + return 0; + } + /** + * Calculate the number of tso seg for nr_frags data: + * Get the length of each frag in skb_frag_len, add to + * remainder.Get the number of segments by dividing it to + * tso_seg_size and calculate the new remainder. + * Decrement the nr_frags value and keep + * looping all the skb_fragments. + */ + frag = &skb_shinfo(skb)->frags[j]; + skb_frag_len = skb_frag_size(frag); + temp_num_seg = num_segs; + remainder += skb_frag_len; + num_segs += remainder / tso_seg_size; + remainder = remainder % tso_seg_size; + skb_nr_frags--; + if (remainder) { + if (num_segs > temp_num_seg) + frags_per_tso = 0; + /** + * increment the tso per frags whenever remainder is + * positive. If frags_per_tso reaches the (max-1), + * [First frags always have EIT header, therefore max-1] + * increment the num_segs as no more data can be + * accomodated in the curr tso seg. Reset the remainder + * and frags per tso and keep looping. + */ + frags_per_tso++; + if (frags_per_tso == FRAG_NUM_MAX - 1) { + num_segs++; + frags_per_tso = 0; + remainder = 0; + } + /** + * If this is the last skb frag and still remainder is + * non-zero(frags_per_tso is not reached to the max-1) + * then increment the num_segs to take care of the + * remaining length. + */ + if (!skb_nr_frags && remainder) { + num_segs++; + frags_per_tso = 0; + } + } else { + /* Whenever remainder is 0, reset the frags_per_tso. */ + frags_per_tso = 0; + } + j++; + } + + return num_segs; +} +#elif !defined(QCA_WIFI_QCN9000) +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + uint32_t i, gso_size, tmp_len, num_segs = 0; + skb_frag_t *frag = NULL; + + /* + * Check if the head SKB or any of frags are allocated in < 0x50000000 + * region which cannot be accessed by Target + */ + if (virt_to_phys(skb->data) < 0x50000040) { + TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n", + __func__, __LINE__, skb_shinfo(skb)->nr_frags, + virt_to_phys(skb->data)); + goto fail; + + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + + if (!frag) + goto fail; + + if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040) + goto fail; + } + + + gso_size = skb_shinfo(skb)->gso_size; + tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb)) + + tcp_hdrlen(skb)); + while (tmp_len) { + num_segs++; + if (tmp_len > gso_size) + tmp_len -= gso_size; + else + break; + } + + return num_segs; + + /* + * Do not free this frame, just do socket level accounting + * so that this is not reused. + */ +fail: + if (skb->sk) + atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc)); + + return 0; +} +#else +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + uint32_t i, gso_size, tmp_len, num_segs = 0; + skb_frag_t *frag = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + + if (!frag) + goto fail; + } + + gso_size = skb_shinfo(skb)->gso_size; + tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb)) + + tcp_hdrlen(skb)); + while (tmp_len) { + num_segs++; + if (tmp_len > gso_size) + tmp_len -= gso_size; + else + break; + } + + return num_segs; + + /* + * Do not free this frame, just do socket level accounting + * so that this is not reused. + */ +fail: + if (skb->sk) + atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc)); + + return 0; +} +#endif +qdf_export_symbol(__qdf_nbuf_get_tso_num_seg); + +#endif /* FEATURE_TSO */ + +/** + * qdf_dmaaddr_to_32s - return high and low parts of dma_addr + * + * Returns the high and low 32-bits of the DMA addr in the provided ptrs + * + * Return: N/A + */ +void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, + uint32_t *lo, uint32_t *hi) +{ + if (sizeof(dmaaddr) > sizeof(uint32_t)) { + *lo = lower_32_bits(dmaaddr); + *hi = upper_32_bits(dmaaddr); + } else { + *lo = dmaaddr; + *hi = 0; + } +} + +qdf_export_symbol(__qdf_dmaaddr_to_32s); + +struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb) +{ + qdf_nbuf_users_inc(&skb->users); + return skb; +} +qdf_export_symbol(__qdf_nbuf_inc_users); + +int __qdf_nbuf_get_users(struct sk_buff *skb) +{ + return qdf_nbuf_users_read(&skb->users); +} +qdf_export_symbol(__qdf_nbuf_get_users); + +/** + * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free. + * @skb: sk_buff handle + * + * Return: none + */ + +void __qdf_nbuf_ref(struct sk_buff *skb) +{ + skb_get(skb); +} +qdf_export_symbol(__qdf_nbuf_ref); + +/** + * __qdf_nbuf_shared() - Check whether the buffer is shared + * @skb: sk_buff buffer + * + * Return: true if more than one person has a reference to this buffer. + */ +int __qdf_nbuf_shared(struct sk_buff *skb) +{ + return skb_shared(skb); +} +qdf_export_symbol(__qdf_nbuf_shared); + +/** + * __qdf_nbuf_dmamap_create() - create a DMA map. + * @osdev: qdf device handle + * @dmap: dma map handle + * + * This can later be used to map networking buffers. They : + * - need space in adf_drv's software descriptor + * - are typically created during adf_drv_create + * - need to be created before any API(qdf_nbuf_map) that uses them + * + * Return: QDF STATUS + */ +QDF_STATUS +__qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap) +{ + QDF_STATUS error = QDF_STATUS_SUCCESS; + /* + * driver can tell its SG capablity, it must be handled. + * Bounce buffers if they are there + */ + (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL); + if (!(*dmap)) + error = QDF_STATUS_E_NOMEM; + + return error; +} +qdf_export_symbol(__qdf_nbuf_dmamap_create); +/** + * __qdf_nbuf_dmamap_destroy() - delete a dma map + * @osdev: qdf device handle + * @dmap: dma map handle + * + * Return: none + */ +void +__qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap) +{ + kfree(dmap); +} +qdf_export_symbol(__qdf_nbuf_dmamap_destroy); + +/** + * __qdf_nbuf_map_nbytes_single() - map nbytes + * @osdev: os device + * @buf: buffer + * @dir: direction + * @nbytes: number of bytes + * + * Return: QDF_STATUS + */ +#ifdef A_SIMOS_DEVHOST +QDF_STATUS __qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, + qdf_dma_dir_t dir, int nbytes) +{ + qdf_dma_addr_t paddr; + + QDF_NBUF_CB_PADDR(buf) = paddr = buf->data; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_nbytes_single); +#else +QDF_STATUS __qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, + qdf_dma_dir_t dir, int nbytes) +{ + qdf_dma_addr_t paddr; + + /* assume that the OS only provides a single fragment */ + QDF_NBUF_CB_PADDR(buf) = paddr = + dma_map_single(osdev->dev, buf->data, + nbytes, __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, paddr) ? + QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_nbytes_single); +#endif +/** + * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes + * @osdev: os device + * @buf: buffer + * @dir: direction + * @nbytes: number of bytes + * + * Return: none + */ +#if defined(A_SIMOS_DEVHOST) +void +__qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes) +{ +} +qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single); + +#else +void +__qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes) +{ + if (0 == QDF_NBUF_CB_PADDR(buf)) { + qdf_err("ERROR: NBUF mapped physical address is NULL"); + return; + } + dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf), + nbytes, __qdf_dma_dir_to_os(dir)); +} +qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single); +#endif +/** + * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf + * @osdev: os device + * @skb: skb handle + * @dir: dma direction + * @nbytes: number of bytes to be mapped + * + * Return: QDF_STATUS + */ +#ifdef QDF_OS_DEBUG +QDF_STATUS +__qdf_nbuf_map_nbytes( + qdf_device_t osdev, + struct sk_buff *skb, + qdf_dma_dir_t dir, + int nbytes) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + + qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's only a single fragment. + * To support multiple fragments, it would be necessary to change + * adf_nbuf_t to be a separate object that stores meta-info + * (including the bus address for each fragment) and a pointer + * to the underlying sk_buff. + */ + qdf_assert(sh->nr_frags == 0); + + return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); +} +qdf_export_symbol(__qdf_nbuf_map_nbytes); +#else +QDF_STATUS +__qdf_nbuf_map_nbytes( + qdf_device_t osdev, + struct sk_buff *skb, + qdf_dma_dir_t dir, + int nbytes) +{ + return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); +} +qdf_export_symbol(__qdf_nbuf_map_nbytes); +#endif +/** + * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf + * @osdev: OS device + * @skb: skb handle + * @dir: direction + * @nbytes: number of bytes + * + * Return: none + */ +void +__qdf_nbuf_unmap_nbytes( + qdf_device_t osdev, + struct sk_buff *skb, + qdf_dma_dir_t dir, + int nbytes) +{ + qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's a single fragment. + * If this is not true, the assertion in __adf_nbuf_map will catch it. + */ + __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes); +} +qdf_export_symbol(__qdf_nbuf_unmap_nbytes); + +/** + * __qdf_nbuf_dma_map_info() - return the dma map info + * @bmap: dma map + * @sg: dma map info + * + * Return: none + */ +void +__qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg) +{ + qdf_assert(bmap->mapped); + qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER); + + memcpy(sg->dma_segs, bmap->seg, bmap->nsegs * + sizeof(struct __qdf_segment)); + sg->nsegs = bmap->nsegs; +} +qdf_export_symbol(__qdf_nbuf_dma_map_info); +/** + * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is + * specified by the index + * @skb: sk buff + * @sg: scatter/gather list of all the frags + * + * Return: none + */ +#if defined(__QDF_SUPPORT_FRAG_MEM) +void +__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) +{ + qdf_assert(skb); + sg->sg_segs[0].vaddr = skb->data; + sg->sg_segs[0].len = skb->len; + sg->nsegs = 1; + + for (int i = 1; i <= sh->nr_frags; i++) { + skb_frag_t *f = &sh->frags[i - 1]; + + sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) + + f->page_offset); + sg->sg_segs[i].len = f->size; + + qdf_assert(i < QDF_MAX_SGLIST); + } + sg->nsegs += i; + +} +qdf_export_symbol(__qdf_nbuf_frag_info); +#else +#ifdef QDF_OS_DEBUG +void +__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) +{ + + struct skb_shared_info *sh = skb_shinfo(skb); + + qdf_assert(skb); + sg->sg_segs[0].vaddr = skb->data; + sg->sg_segs[0].len = skb->len; + sg->nsegs = 1; + + qdf_assert(sh->nr_frags == 0); +} +qdf_export_symbol(__qdf_nbuf_frag_info); +#else +void +__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) +{ + sg->sg_segs[0].vaddr = skb->data; + sg->sg_segs[0].len = skb->len; + sg->nsegs = 1; +} +qdf_export_symbol(__qdf_nbuf_frag_info); +#endif +#endif +/** + * __qdf_nbuf_get_frag_size() - get frag size + * @nbuf: sk buffer + * @cur_frag: current frag + * + * Return: frag size + */ +uint32_t +__qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag) +{ + struct skb_shared_info *sh = skb_shinfo(nbuf); + const skb_frag_t *frag = sh->frags + cur_frag; + + return skb_frag_size(frag); +} +qdf_export_symbol(__qdf_nbuf_get_frag_size); + +/** + * __qdf_nbuf_frag_map() - dma map frag + * @osdev: os device + * @nbuf: sk buff + * @offset: offset + * @dir: direction + * @cur_frag: current fragment + * + * Return: QDF status + */ +#ifdef A_SIMOS_DEVHOST +QDF_STATUS __qdf_nbuf_frag_map( + qdf_device_t osdev, __qdf_nbuf_t nbuf, + int offset, qdf_dma_dir_t dir, int cur_frag) +{ + int32_t paddr, frag_len; + + QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_frag_map); +#else +QDF_STATUS __qdf_nbuf_frag_map( + qdf_device_t osdev, __qdf_nbuf_t nbuf, + int offset, qdf_dma_dir_t dir, int cur_frag) +{ + dma_addr_t paddr, frag_len; + struct skb_shared_info *sh = skb_shinfo(nbuf); + const skb_frag_t *frag = sh->frags + cur_frag; + + frag_len = skb_frag_size(frag); + + QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr = + skb_frag_dma_map(osdev->dev, frag, offset, frag_len, + __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, paddr) ? + QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_frag_map); +#endif +/** + * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map + * @dmap: dma map + * @cb: callback + * @arg: argument + * + * Return: none + */ +void +__qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg) +{ + return; +} +qdf_export_symbol(__qdf_nbuf_dmamap_set_cb); + + +/** + * __qdf_nbuf_sync_single_for_cpu() - nbuf sync + * @osdev: os device + * @buf: sk buff + * @dir: direction + * + * Return: none + */ +#if defined(A_SIMOS_DEVHOST) +static void __qdf_nbuf_sync_single_for_cpu( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + return; +} +#else +static void __qdf_nbuf_sync_single_for_cpu( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + if (0 == QDF_NBUF_CB_PADDR(buf)) { + qdf_err("ERROR: NBUF mapped physical address is NULL"); + return; + } + dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf), + skb_end_offset(buf) - skb_headroom(buf), + __qdf_dma_dir_to_os(dir)); +} +#endif +/** + * __qdf_nbuf_sync_for_cpu() - nbuf sync + * @osdev: os device + * @skb: sk buff + * @dir: direction + * + * Return: none + */ +void +__qdf_nbuf_sync_for_cpu(qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir) +{ + qdf_assert( + (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's a single fragment. + * If this is not true, the assertion in __adf_nbuf_map will catch it. + */ + __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_sync_for_cpu); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +/** + * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: Buf to which VHT info has to be updated. + * @rtap_len: Current length of radiotap buffer + * + * Return: Length of radiotap after VHT flags updated. + */ +static unsigned int qdf_nbuf_update_radiotap_vht_flags( + struct mon_rx_status *rx_status, + int8_t *rtap_buf, + uint32_t rtap_len) +{ + uint16_t vht_flags = 0; + + rtap_len = qdf_align(rtap_len, 2); + + /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */ + vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | + IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM | + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH | + IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID; + put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] |= + (rx_status->is_stbc ? + IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) | + (rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) | + (rx_status->ldpc ? + IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) | + (rx_status->beamformed ? + IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0); + rtap_len += 1; + switch (rx_status->vht_flag_values2) { + case IEEE80211_RADIOTAP_VHT_BW_20: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20; + break; + case IEEE80211_RADIOTAP_VHT_BW_40: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40; + break; + case IEEE80211_RADIOTAP_VHT_BW_80: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80; + break; + case IEEE80211_RADIOTAP_VHT_BW_160: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160; + break; + } + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values4); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values5); + rtap_len += 1; + put_unaligned_le16(rx_status->vht_flag_values6, + &rtap_buf[rtap_len]); + rtap_len += 2; + + return rtap_len; +} + +/** + * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status + * @rx_status: Pointer to rx_status. + * @rtap_buf: buffer to which radiotap has to be updated + * @rtap_len: radiotap length + * + * API update high-efficiency (11ax) fields in the radiotap header + * + * Return: length of rtap_len updated. + */ +static unsigned int +qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + /* + * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16 + * Enable all "known" HE radiotap flags for now + */ + rtap_len = qdf_align(rtap_len, 2); + + put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); + rtap_len += 2; + qdf_rl_debug("he data %x %x %x %x %x %x", + rx_status->he_data1, + rx_status->he_data2, rx_status->he_data3, + rx_status->he_data4, rx_status->he_data5, + rx_status->he_data6); + return rtap_len; +} + + +/** + * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: buffer to which radiotap has to be updated + * @rtap_len: radiotap length + * + * API update HE-MU fields in the radiotap header + * + * Return: length of rtap_len updated. + */ +static unsigned int +qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + rtap_len = qdf_align(rtap_len, 2); + + /* + * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4] + * Enable all "known" he-mu radiotap flags for now + */ + put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] = rx_status->he_RU[0]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[1]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[2]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[3]; + rtap_len += 1; + qdf_debug("he_flags %x %x he-RU %x %x %x %x", + rx_status->he_flags1, + rx_status->he_flags2, rx_status->he_RU[0], + rx_status->he_RU[1], rx_status->he_RU[2], + rx_status->he_RU[3]); + + return rtap_len; +} + +/** + * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: buffer to which radiotap has to be updated + * @rtap_len: radiotap length + * + * API update he-mu-other fields in the radiotap header + * + * Return: length of rtap_len updated. + */ +static unsigned int +qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + rtap_len = qdf_align(rtap_len, 2); + + /* + * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8 + * Enable all "known" he-mu-other radiotap flags for now + */ + put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] = rx_status->he_per_user_position; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_per_user_known; + rtap_len += 1; + qdf_debug("he_per_user %x %x pos %x knwn %x", + rx_status->he_per_user_1, + rx_status->he_per_user_2, rx_status->he_per_user_position, + rx_status->he_per_user_known); + return rtap_len; +} + + +/** + * This is the length for radiotap, combined length + * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN) + * cannot be more than available headroom_sz. + * increase this when we add more radiotap elements. + * Number after '+' indicates maximum possible increase due to alignment + */ + +#define RADIOTAP_VHT_FLAGS_LEN (12 + 1) +#define RADIOTAP_HE_FLAGS_LEN (12 + 1) +#define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1) +#define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1) +#define RADIOTAP_FIXED_HEADER_LEN 17 +#define RADIOTAP_HT_FLAGS_LEN 3 +#define RADIOTAP_AMPDU_STATUS_LEN (8 + 3) +#define RADIOTAP_VENDOR_NS_LEN \ + (sizeof(struct qdf_radiotap_vendor_ns_ath) + 1) +#define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \ + RADIOTAP_FIXED_HEADER_LEN + \ + RADIOTAP_HT_FLAGS_LEN + \ + RADIOTAP_VHT_FLAGS_LEN + \ + RADIOTAP_AMPDU_STATUS_LEN + \ + RADIOTAP_HE_FLAGS_LEN + \ + RADIOTAP_HE_MU_FLAGS_LEN + \ + RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \ + RADIOTAP_VENDOR_NS_LEN) + +#define IEEE80211_RADIOTAP_HE 23 +#define IEEE80211_RADIOTAP_HE_MU 24 +#define IEEE80211_RADIOTAP_HE_MU_OTHER 25 +uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */ + +/** + * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: Buf to which AMPDU info has to be updated. + * @rtap_len: Current length of radiotap buffer + * + * Return: Length of radiotap after AMPDU flags updated. + */ +static unsigned int qdf_nbuf_update_radiotap_ampdu_flags( + struct mon_rx_status *rx_status, + uint8_t *rtap_buf, + uint32_t rtap_len) +{ + /* + * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 + * First 32 bits of AMPDU represents the reference number + */ + + uint32_t ampdu_reference_num = rx_status->ppdu_id; + uint16_t ampdu_flags = 0; + uint16_t ampdu_reserved_flags = 0; + + rtap_len = qdf_align(rtap_len, 4); + + put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]); + rtap_len += 4; + put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + + return rtap_len; +} + +/** + * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status + * @rx_status: Pointer to rx_status. + * @nbuf: nbuf pointer to which radiotap has to be updated + * @headroom_sz: Available headroom size. + * + * Return: length of rtap_len updated. + */ +unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, + qdf_nbuf_t nbuf, uint32_t headroom_sz) +{ + uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0}; + struct ieee80211_radiotap_header *rthdr = + (struct ieee80211_radiotap_header *)rtap_buf; + uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header); + uint32_t rtap_len = rtap_hdr_len; + uint8_t length = rtap_len; + struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath; + + /* IEEE80211_RADIOTAP_TSFT __le64 microseconds*/ + rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT); + put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]); + rtap_len += 8; + + /* IEEE80211_RADIOTAP_FLAGS u8 */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS); + + if (rx_status->rs_fcs_err) + rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS; + + rtap_buf[rtap_len] = rx_status->rtap_flags; + rtap_len += 1; + + /* IEEE80211_RADIOTAP_RATE u8 500kb/s */ + if (!rx_status->ht_flags && !rx_status->vht_flags && + !rx_status->he_flags) { + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE); + rtap_buf[rtap_len] = rx_status->rate; + } else + rtap_buf[rtap_len] = 0; + rtap_len += 1; + + /* IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL); + put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]); + rtap_len += 2; + /* Channel flags. */ + if (rx_status->chan_freq > CHANNEL_FREQ_5150) + rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL; + else + rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL; + if (rx_status->cck_flag) + rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL; + if (rx_status->ofdm_flag) + rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL; + put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + + /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from one milliwatt + * (dBm) + */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + /* + * rssi_comb is int dB, need to convert it to dBm. + * normalize value to noise floor of -96 dBm + */ + rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor; + rtap_len += 1; + + /* RX signal noise floor */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); + rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor; + rtap_len += 1; + + /* IEEE80211_RADIOTAP_ANTENNA u8 antenna index */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA); + rtap_buf[rtap_len] = rx_status->nr_ant; + rtap_len += 1; + + if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) { + qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN"); + return 0; + } + + if (rx_status->ht_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_VHT u8, u8, u8 */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS); + rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW | + IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI; + rtap_len += 1; + + if (rx_status->sgi) + rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI; + if (rx_status->bw) + rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40; + else + rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->ht_mcs; + rtap_len += 1; + + if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) { + /* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS); + rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status, + rtap_buf, + rtap_len); + } + + if (rx_status->vht_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT); + rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->he_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_HE */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE); + rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->he_mu_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_HE-MU */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU); + rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->he_mu_other_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_HE-MU-OTHER */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER); + rtap_len = + qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN"); + return 0; + } + } + + rtap_len = qdf_align(rtap_len, 2); + /* + * Radiotap Vendor Namespace + */ + rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE); + radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *) + (rtap_buf + rtap_len); + /* + * Copy Atheros OUI - 3 bytes (4th byte is 0) + */ + qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI)); + /* + * Name space selector = 0 + * We only will have one namespace for now + */ + radiotap_vendor_ns_ath->hdr.selector = 0; + radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16( + sizeof(*radiotap_vendor_ns_ath) - + sizeof(radiotap_vendor_ns_ath->hdr)); + radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id); + radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info); + radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info); + radiotap_vendor_ns_ath->ppdu_start_timestamp = + cpu_to_le32(rx_status->ppdu_timestamp); + rtap_len += sizeof(*radiotap_vendor_ns_ath); + + rthdr->it_len = cpu_to_le16(rtap_len); + rthdr->it_present = cpu_to_le32(rthdr->it_present); + + if (headroom_sz < rtap_len) { + qdf_err("ERROR: not enough space to update radiotap"); + return 0; + } + qdf_nbuf_push_head(nbuf, rtap_len); + qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len); + return rtap_len; +} +#else +static unsigned int qdf_nbuf_update_radiotap_vht_flags( + struct mon_rx_status *rx_status, + int8_t *rtap_buf, + uint32_t rtap_len) +{ + qdf_err("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} + +unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + qdf_err("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} + +static unsigned int qdf_nbuf_update_radiotap_ampdu_flags( + struct mon_rx_status *rx_status, + uint8_t *rtap_buf, + uint32_t rtap_len) +{ + qdf_err("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} + +unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, + qdf_nbuf_t nbuf, uint32_t headroom_sz) +{ + qdf_err("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} +#endif +qdf_export_symbol(qdf_nbuf_update_radiotap); + +/** + * __qdf_nbuf_reg_free_cb() - register nbuf free callback + * @cb_func_ptr: function pointer to the nbuf free callback + * + * This function registers a callback function for nbuf free. + * + * Return: none + */ +void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr) +{ + nbuf_free_cb = cb_func_ptr; +} + +/** + * qdf_nbuf_classify_pkt() - classify packet + * @skb - sk buff + * + * Return: none + */ +void qdf_nbuf_classify_pkt(struct sk_buff *skb) +{ + struct ethhdr *eh = (struct ethhdr *)skb->data; + + /* check destination mac address is broadcast/multicast */ + if (is_broadcast_ether_addr((uint8_t *)eh)) + QDF_NBUF_CB_SET_BCAST(skb); + else if (is_multicast_ether_addr((uint8_t *)eh)) + QDF_NBUF_CB_SET_MCAST(skb); + + if (qdf_nbuf_is_ipv4_arp_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_ARP; + else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_DHCP; + else if (qdf_nbuf_is_ipv4_eapol_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_EAPOL; + else if (qdf_nbuf_is_ipv4_wapi_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_WAPI; +} +qdf_export_symbol(qdf_nbuf_classify_pkt); + +void __qdf_nbuf_init(__qdf_nbuf_t nbuf) +{ + qdf_nbuf_users_set(&nbuf->users, 1); + nbuf->data = nbuf->head + NET_SKB_PAD; + skb_reset_tail_pointer(nbuf); +} +qdf_export_symbol(__qdf_nbuf_init); + +#ifdef WLAN_FEATURE_FASTPATH +void qdf_nbuf_init_fast(qdf_nbuf_t nbuf) +{ + qdf_nbuf_users_set(&nbuf->users, 1); + nbuf->data = nbuf->head + NET_SKB_PAD; + skb_reset_tail_pointer(nbuf); +} +qdf_export_symbol(qdf_nbuf_init_fast); +#endif /* WLAN_FEATURE_FASTPATH */ + + +#ifdef QDF_NBUF_GLOBAL_COUNT +/** + * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf + * + * Return void + */ +void __qdf_nbuf_mod_init(void) +{ + qdf_atomic_init(&nbuf_count); + qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count); +} + +/** + * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf + * + * Return void + */ +void __qdf_nbuf_mod_exit(void) +{ +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_net_if.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_net_if.c new file mode 100644 index 0000000000000000000000000000000000000000..a247c74bf5d63bd706f1e888d076e26e8a45a20a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_net_if.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_net_if + * This file provides OS dependent network interface related APIs + */ + +#include "qdf_net_if.h" +#include "qdf_types.h" +#include "qdf_module.h" +#include "qdf_util.h" +#include + +QDF_STATUS +qdf_net_if_create_dummy_if(struct qdf_net_if *nif) +{ + int ret; + + if (!nif) + return QDF_STATUS_E_INVAL; + + ret = init_dummy_netdev((struct net_device *)nif); + + return qdf_status_from_os_return(ret); +} + +qdf_export_symbol(qdf_net_if_create_dummy_if); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_perf.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_perf.c new file mode 100644 index 0000000000000000000000000000000000000000..157cb832b272244b9e43ccd9882e83d1d7b27aad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_perf.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_perf + * This file provides OS dependent perf API's. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#ifdef QCA_PERF_PROFILING + +qdf_perf_entry_t perf_root = {{0, 0} }; + +/** + * qdf_perfmod_init() - Module init + * + * return: int + */ +int +qdf_perfmod_init(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "Perf Debug Module Init"); + INIT_LIST_HEAD(&perf_root.list); + INIT_LIST_HEAD(&perf_root.child); + perf_root.proc = proc_mkdir(PROCFS_PERF_DIRNAME, 0); + return 0; +} +qdf_export_symbol(qdf_perfmod_init); + +/** + * qdf_perfmod_exit() - Module exit + * + * Return: none + */ +void +qdf_perfmod_exit(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "Perf Debug Module Exit"); + remove_proc_entry(PROCFS_PERF_DIRNAME, 0); +} +qdf_export_symbol(qdf_perfmod_exit); + +/** + * __qdf_perf_init() - Create the perf entry + * @parent: parent perf id + * @id_name: name of perf id + * @type: type of perf counter + * + * return: perf id + */ +qdf_perf_id_t +__qdf_perf_init(qdf_perf_id_t parent, uint8_t *id_name, + qdf_perf_cntr_t type) +{ + qdf_perf_entry_t *entry = NULL; + qdf_perf_entry_t *pentry = PERF_ENTRY(parent); + + if (type >= CNTR_LAST) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s:%s Invalid perf-type", __FILE__, __func__); + goto done; + } + + if (!pentry) + pentry = &perf_root; + entry = kmalloc(sizeof(struct qdf_perf_entry), GFP_ATOMIC); + + if (!entry) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + " Out of Memory,:%s", __func__); + return NULL; + } + + memset(entry, 0, sizeof(struct qdf_perf_entry)); + + INIT_LIST_HEAD(&entry->list); + INIT_LIST_HEAD(&entry->child); + + spin_lock_init(&entry->lock_irq); + + list_add_tail(&entry->list, &pentry->child); + + entry->name = id_name; + entry->type = type; + + if (type == CNTR_GROUP) { + entry->proc = proc_mkdir(id_name, pentry->proc); + goto done; + } + + entry->parent = pentry; + entry->proc = create_proc_entry(id_name, S_IFREG|S_IRUGO|S_IWUSR, + pentry->proc); + entry->proc->data = entry; + entry->proc->read_proc = api_tbl[type].proc_read; + entry->proc->write_proc = api_tbl[type].proc_write; + + /* + * Initialize the Event with default values + */ + api_tbl[type].init(entry, api_tbl[type].def_val); + +done: + return entry; +} +qdf_export_symbol(__qdf_perf_init); + +/** + * __qdf_perf_destroy - Destroy the perf entry + * @id: pointer to qdf_perf_id_t + * + * @return: bool + */ +bool __qdf_perf_destroy(qdf_perf_id_t id) +{ + qdf_perf_entry_t *entry = PERF_ENTRY(id), + *parent = entry->parent; + + if (!list_empty(&entry->child)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Child's are alive, Can't delete"); + return A_FALSE; + } + + remove_proc_entry(entry->name, parent->proc); + + list_del(&entry->list); + + vfree(entry); + + return true; +} +qdf_export_symbol(__qdf_perf_destroy); + +/** + * __qdf_perf_start - Start the sampling + * @id: Instance of qdf_perf_id_t + * + * Returns: none + */ +void __qdf_perf_start(qdf_perf_id_t id) +{ + qdf_perf_entry_t *entry = PERF_ENTRY(id); + + api_tbl[entry->type].sample(entry, 0); +} +qdf_export_symbol(__qdf_perf_start); + +/** + * __qdf_perf_end - Stop sampling + * @id: Instance of qdf_perf_id_t + * + * Returns: none + */ +void __qdf_perf_end(qdf_perf_id_t id) +{ + qdf_perf_entry_t *entry = PERF_ENTRY(id); + + api_tbl[entry->type].sample(entry, 1); +} +qdf_export_symbol(__qdf_perf_end); + +#endif /* QCA_PERF_PROFILING */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_periodic_work.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_periodic_work.c new file mode 100644 index 0000000000000000000000000000000000000000..a845ed18ea3a4e892f3cad335738c71ca05c1406 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_periodic_work.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_periodic_work.h" +#include "qdf_status.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +#ifdef WLAN_PERIODIC_WORK_DEBUG +#include "qdf_tracker.h" + +#define qdf_pwork_tracker_bits 2 /* 4 buckets */ +static qdf_tracker_declare(qdf_pwork_tracker, qdf_pwork_tracker_bits, + "periodic work leaks", "periodic work create", + "periodic work destroy"); + +void qdf_periodic_work_feature_init(void) +{ + qdf_tracker_init(&qdf_pwork_tracker); +} + +void qdf_periodic_work_feature_deinit(void) +{ + qdf_tracker_deinit(&qdf_pwork_tracker); +} + +void qdf_periodic_work_check_for_leaks(void) +{ + qdf_tracker_check_for_leaks(&qdf_pwork_tracker); +} + +static inline QDF_STATUS qdf_pwork_dbg_track(struct qdf_periodic_work *pwork, + const char *func, uint32_t line) +{ + return qdf_tracker_track(&qdf_pwork_tracker, pwork, func, line); +} + +static inline void qdf_pwork_dbg_untrack(struct qdf_periodic_work *pwork, + const char *func, uint32_t line) +{ + qdf_tracker_untrack(&qdf_pwork_tracker, pwork, func, line); +} +#else +static inline QDF_STATUS qdf_pwork_dbg_track(struct qdf_periodic_work *pwork, + const char *func, uint32_t line) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void qdf_pwork_dbg_untrack(struct qdf_periodic_work *pwork, + const char *func, uint32_t line) +{ } +#endif /* WLAN_PERIODIC_WORK_DEBUG */ + +static void __qdf_periodic_work_handler(struct work_struct *work) +{ + struct qdf_periodic_work *pwork = + container_of(work, struct qdf_periodic_work, dwork.work); + uint32_t msec; + + pwork->callback(pwork->context); + + /* this is intentionally racy; see qdf_periodic_work_stop_sync() */ + msec = pwork->msec; + if (msec) + schedule_delayed_work(&pwork->dwork, msecs_to_jiffies(msec)); +} + +QDF_STATUS __qdf_periodic_work_create(struct qdf_periodic_work *pwork, + qdf_periodic_work_cb callback, + void *context, + const char *func, uint32_t line) +{ + QDF_STATUS status; + + QDF_BUG(pwork); + QDF_BUG(callback); + if (!pwork || !callback) + return QDF_STATUS_E_INVAL; + + status = qdf_pwork_dbg_track(pwork, func, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + INIT_DEFERRABLE_WORK(&pwork->dwork, __qdf_periodic_work_handler); + pwork->callback = callback; + pwork->context = context; + pwork->msec = 0; + + return QDF_STATUS_SUCCESS; +} + +void __qdf_periodic_work_destroy(struct qdf_periodic_work *pwork, + const char *func, uint32_t line) +{ + qdf_periodic_work_stop_sync(pwork); + qdf_pwork_dbg_untrack(pwork, func, line); +} + +bool qdf_periodic_work_start(struct qdf_periodic_work *pwork, uint32_t msec) +{ + QDF_BUG(msec); + if (!msec) + return false; + + pwork->msec = msec; + + return schedule_delayed_work(&pwork->dwork, msecs_to_jiffies(msec)); +} + +bool qdf_periodic_work_stop_async(struct qdf_periodic_work *pwork) +{ + bool pending = pwork->msec != 0; + + pwork->msec = 0; + cancel_delayed_work(&pwork->dwork); + + return pending; +} + +bool qdf_periodic_work_stop_sync(struct qdf_periodic_work *pwork) +{ + bool pending = pwork->msec != 0; + + /* To avoid using a lock, signal that the work shouldn't be restarted, + * and cancel_sync in a loop. There is a very small race window, and + * thus the work may ocassionally need to be cancelled more than once. + */ + pwork->msec = 0; + while (cancel_delayed_work_sync(&pwork->dwork)) + ; /* no-op*/ + + return pending; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_status.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_status.c new file mode 100644 index 0000000000000000000000000000000000000000..f96e27392eeefdd8a6db63ab1db4e59631c5a445 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_status.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "linux/errno.h" +#include "qdf_module.h" +#include "qdf_status.h" + +int qdf_status_to_os_return(QDF_STATUS status) +{ + switch (status) { + case QDF_STATUS_SUCCESS: + return 0; + case QDF_STATUS_E_RESOURCES: + return -EBUSY; + case QDF_STATUS_E_NOMEM: + return -ENOMEM; + case QDF_STATUS_E_AGAIN: + return -EAGAIN; + case QDF_STATUS_E_INVAL: + return -EINVAL; + case QDF_STATUS_E_FAULT: + return -EFAULT; + case QDF_STATUS_E_ALREADY: + return -EALREADY; + case QDF_STATUS_E_BADMSG: + return -EBADMSG; + case QDF_STATUS_E_BUSY: + return -EBUSY; + case QDF_STATUS_E_CANCELED: + return -ECANCELED; + case QDF_STATUS_E_ABORTED: + return -ECONNABORTED; + case QDF_STATUS_E_PERM: + return -EPERM; + case QDF_STATUS_E_EXISTS: + return -EEXIST; + case QDF_STATUS_E_NOENT: + return -ENOENT; + case QDF_STATUS_E_E2BIG: + return -E2BIG; + case QDF_STATUS_E_NOSPC: + return -ENOSPC; + case QDF_STATUS_E_ADDRNOTAVAIL: + return -EADDRNOTAVAIL; + case QDF_STATUS_E_ENXIO: + return -ENXIO; + case QDF_STATUS_E_NETDOWN: + return -ENETDOWN; + case QDF_STATUS_E_IO: + return -EIO; + case QDF_STATUS_E_NETRESET: + return -ENETRESET; + case QDF_STATUS_E_PENDING: + return -EINPROGRESS; + case QDF_STATUS_E_TIMEOUT: + return -ETIMEDOUT; + default: + return -EPERM; + } +} +qdf_export_symbol(qdf_status_to_os_return); + +QDF_STATUS qdf_status_from_os_return(int rc) +{ + switch (rc) { + case 0: + return QDF_STATUS_SUCCESS; + case -ENOMEM: + return QDF_STATUS_E_NOMEM; + case -EAGAIN: + return QDF_STATUS_E_AGAIN; + case -EINVAL: + return QDF_STATUS_E_INVAL; + case -EFAULT: + return QDF_STATUS_E_FAULT; + case -EALREADY: + return QDF_STATUS_E_ALREADY; + case -EBADMSG: + return QDF_STATUS_E_BADMSG; + case -EBUSY: + return QDF_STATUS_E_BUSY; + case -ECANCELED: + return QDF_STATUS_E_CANCELED; + case -ECONNABORTED: + return QDF_STATUS_E_ABORTED; + case -EPERM: + return QDF_STATUS_E_PERM; + case -EEXIST: + return QDF_STATUS_E_EXISTS; + case -ENOENT: + return QDF_STATUS_E_NOENT; + case -E2BIG: + return QDF_STATUS_E_E2BIG; + case -ENOSPC: + return QDF_STATUS_E_NOSPC; + case -EADDRNOTAVAIL: + return QDF_STATUS_E_ADDRNOTAVAIL; + case -ENXIO: + return QDF_STATUS_E_ENXIO; + case -ENETDOWN: + return QDF_STATUS_E_NETDOWN; + case -EIO: + return QDF_STATUS_E_IO; + case -ENETRESET: + return QDF_STATUS_E_NETRESET; + case -EINPROGRESS: + return QDF_STATUS_E_PENDING; + case -ETIMEDOUT: + return QDF_STATUS_E_TIMEOUT; + default: + return QDF_STATUS_E_PERM; + } +} +qdf_export_symbol(qdf_status_from_os_return); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_streamfs.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_streamfs.c new file mode 100644 index 0000000000000000000000000000000000000000..25f58a33235aa60dad7fbc4dd9aae03a54da2a84 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_streamfs.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_streamfs + * This file provides QDF stream file system APIs + */ + +#include +#include +#include +#include + +/** + * qdf_create_buf_file_handler() - Create streamfs buffer file + * @filename: base name of files to create, NULL for buffering only + * @parent: dentry of parent directory, NULL for root directory + * @mode: filemode + * @buf: streamfs channel buf + * @is_global: pointer to set whether this buf file is global or not. + * + * Returns dentry if successful, NULL otherwise. + */ +static qdf_dentry_t +qdf_create_buf_file_handler(const char *filename, qdf_dentry_t parent, + uint16_t mode, qdf_streamfs_chan_buf_t buf, + int32_t *is_global) +{ + qdf_dentry_t buf_file; + *is_global = 1; + buf_file = qdf_streamfs_create_file(filename, mode, parent, buf); + + if (!buf_file) + return NULL; + + return buf_file; +} + +/** + * qdf_remove_buf_file_handler() - Remove streamfs buffer file + * @dentry:dentry + */ +static int qdf_remove_buf_file_handler(qdf_dentry_t dentry) +{ + qdf_streamfs_remove_file(dentry); + + return 0; +} + +static struct rchan_callbacks g_qdf_streamfs_cb = { + .create_buf_file = qdf_create_buf_file_handler, + .remove_buf_file = qdf_remove_buf_file_handler, +}; + +qdf_dentry_t +qdf_streamfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + qdf_streamfs_chan_buf_t buf) +{ + qdf_dentry_t file = NULL; + + if (!name) + return NULL; + + file = debugfs_create_file(name, mode, + (struct dentry *)parent, + buf, &relay_file_operations); + + return file; +} + +qdf_export_symbol(qdf_streamfs_create_file); + +qdf_streamfs_chan_t +qdf_streamfs_open(const char *base_filename, qdf_dentry_t parent, + size_t subbuf_size, size_t n_subbufs, + void *private_data) +{ + qdf_streamfs_chan_t channel_ptr = NULL; + + channel_ptr = relay_open(base_filename, + (struct dentry *)parent, + subbuf_size, n_subbufs, + &g_qdf_streamfs_cb, + private_data); + + return channel_ptr; +} + +qdf_export_symbol(qdf_streamfs_open); + +void qdf_streamfs_close(qdf_streamfs_chan_t chan) +{ + if (chan) + relay_close(chan); +} + +qdf_export_symbol(qdf_streamfs_close); + +void qdf_streamfs_flush(qdf_streamfs_chan_t chan) +{ + if (chan) + relay_flush(chan); +} + +qdf_export_symbol(qdf_streamfs_flush); + +void qdf_streamfs_reset(qdf_streamfs_chan_t chan) +{ + if (chan) + relay_reset(chan); +} + +qdf_export_symbol(qdf_streamfs_reset); + +void qdf_streamfs_subbufs_consumed(qdf_streamfs_chan_t chan, + unsigned int cpu, + size_t consumed) +{ + if (chan) + relay_subbufs_consumed(chan, cpu, consumed); +} + +qdf_export_symbol(qdf_streamfs_subbufs_consumed); + +void qdf_streamfs_write(qdf_streamfs_chan_t chan, + const void *data, + size_t length) +{ + if (chan) + relay_write(chan, data, length); +} + +qdf_export_symbol(qdf_streamfs_write); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_threads.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_threads.c new file mode 100644 index 0000000000000000000000000000000000000000..2e6463e737133db456387eba84e2d8d3789e1a14 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_threads.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_threads + * QCA driver framework (QDF) thread APIs + */ + +/* Include Files */ +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include +#else +#include +#endif /* KERNEL_VERSION(4, 11, 0) */ +#include +#include +#include +#include +#include +#include + +/* Function declarations and documenation */ + +typedef int (*qdf_thread_os_func)(void *data); + +/** + * qdf_sleep() - sleep + * @ms_interval : Number of milliseconds to suspend the current thread. + * A value of 0 may or may not cause the current thread to yield. + * + * This function suspends the execution of the current thread + * until the specified time out interval elapses. + * + * Return: none + */ +void qdf_sleep(uint32_t ms_interval) +{ + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return; + } + msleep_interruptible(ms_interval); +} +qdf_export_symbol(qdf_sleep); + +/** + * qdf_sleep_us() - sleep + * @us_interval : Number of microseconds to suspend the current thread. + * A value of 0 may or may not cause the current thread to yield. + * + * This function suspends the execution of the current thread + * until the specified time out interval elapses. + * + * Return : none + */ +void qdf_sleep_us(uint32_t us_interval) +{ + unsigned long timeout = usecs_to_jiffies(us_interval) + 1; + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return; + } + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +} +qdf_export_symbol(qdf_sleep_us); + +/** + * qdf_busy_wait() - busy wait + * @us_interval : Number of microseconds to busy wait. + * + * This function places the current thread in busy wait until the specified + * time out interval elapses. If the interval is greater than 50us on WM, the + * behaviour is undefined. + * + * Return : none + */ +void qdf_busy_wait(uint32_t us_interval) +{ + udelay(us_interval); +} +qdf_export_symbol(qdf_busy_wait); + +#ifdef PF_WAKE_UP_IDLE +void qdf_set_wake_up_idle(bool idle) +{ + set_wake_up_idle(idle); +} +#else +void qdf_set_wake_up_idle(bool idle) +{ +} +#endif /* PF_WAKE_UP_IDLE */ + +qdf_export_symbol(qdf_set_wake_up_idle); + +void qdf_set_user_nice(qdf_thread_t *thread, long nice) +{ + set_user_nice(thread, nice); +} +qdf_export_symbol(qdf_set_user_nice); + +qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data, + const char thread_name[]) +{ + struct task_struct *task; + + task = kthread_create(thread_handler, data, thread_name); + + if (IS_ERR(task)) + return NULL; + + return task; +} +qdf_export_symbol(qdf_create_thread); + +static uint16_t qdf_thread_id; + +qdf_thread_t *qdf_thread_run(qdf_thread_func callback, void *context) +{ + struct task_struct *thread; + + thread = kthread_create((qdf_thread_os_func)callback, context, + "qdf %u", qdf_thread_id++); + if (IS_ERR(thread)) + return NULL; + + get_task_struct(thread); + wake_up_process(thread); + + return thread; +} +qdf_export_symbol(qdf_thread_run); + +QDF_STATUS qdf_thread_join(qdf_thread_t *thread) +{ + QDF_STATUS status; + + QDF_BUG(thread); + + status = (QDF_STATUS)kthread_stop(thread); + put_task_struct(thread); + + return status; +} +qdf_export_symbol(qdf_thread_join); + +bool qdf_thread_should_stop(void) +{ + return kthread_should_stop(); +} +qdf_export_symbol(qdf_thread_should_stop); + +int qdf_wake_up_process(qdf_thread_t *thread) +{ + return wake_up_process(thread); +} +qdf_export_symbol(qdf_wake_up_process); + +/* save_stack_trace_tsk() is exported for: + * 1) non-arm architectures + * 2) arm architectures in kernel versions >=4.14 + * 3) backported kernels defining BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM + */ +#if ((defined(WLAN_HOST_ARCH_ARM) && !WLAN_HOST_ARCH_ARM) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \ + defined(BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM)) && \ + defined(CONFIG_STACKTRACE) && !defined(CONFIG_ARCH_STACKWALK) +#define QDF_PRINT_TRACE_COUNT 32 + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) +void qdf_print_thread_trace(qdf_thread_t *thread) +{ + const int spaces = 4; + struct task_struct *task = thread; + unsigned long entries[QDF_PRINT_TRACE_COUNT] = {0}; + struct stack_trace trace = { + .nr_entries = 0, + .skip = 0, + .entries = &entries[0], + .max_entries = QDF_PRINT_TRACE_COUNT, + }; + + save_stack_trace_tsk(task, &trace); + stack_trace_print(entries, trace.nr_entries, spaces); +} +#else +void qdf_print_thread_trace(qdf_thread_t *thread) +{ + const int spaces = 4; + struct task_struct *task = thread; + unsigned long entries[QDF_PRINT_TRACE_COUNT] = {0}; + struct stack_trace trace = { + .nr_entries = 0, + .skip = 0, + .entries = &entries[0], + .max_entries = QDF_PRINT_TRACE_COUNT, + }; + + save_stack_trace_tsk(task, &trace); + print_stack_trace(&trace, spaces); +} +#endif + +#else +void qdf_print_thread_trace(qdf_thread_t *thread) { } +#endif /* KERNEL_VERSION(4, 14, 0) */ +qdf_export_symbol(qdf_print_thread_trace); + +qdf_thread_t *qdf_get_current_task(void) +{ + return current; +} +qdf_export_symbol(qdf_get_current_task); + +int qdf_get_current_pid(void) +{ + return current->pid; +} +qdf_export_symbol(qdf_get_current_pid); + +const char *qdf_get_current_comm(void) +{ + return current->comm; +} +qdf_export_symbol(qdf_get_current_comm); + +void +qdf_thread_set_cpus_allowed_mask(qdf_thread_t *thread, qdf_cpu_mask *new_mask) +{ + set_cpus_allowed_ptr(thread, new_mask); +} + +qdf_export_symbol(qdf_thread_set_cpus_allowed_mask); + +void qdf_cpumask_clear(qdf_cpu_mask *dstp) +{ + cpumask_clear(dstp); +} + +qdf_export_symbol(qdf_cpumask_clear); + +void qdf_cpumask_set_cpu(unsigned int cpu, qdf_cpu_mask *dstp) +{ + cpumask_set_cpu(cpu, dstp); +} + +qdf_export_symbol(qdf_cpumask_set_cpu); + +void qdf_cpumask_setall(qdf_cpu_mask *dstp) +{ + cpumask_setall(dstp); +} + +qdf_export_symbol(qdf_cpumask_setall); + +bool qdf_cpumask_empty(const struct cpumask *srcp) +{ + return cpumask_empty(srcp); +} + +qdf_export_symbol(qdf_cpumask_empty); + +void qdf_cpumask_copy(struct cpumask *dstp, + const struct cpumask *srcp) +{ + return cpumask_copy(dstp, srcp); +} + +qdf_export_symbol(qdf_cpumask_copy); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c new file mode 100644 index 0000000000000000000000000000000000000000..681046ad804ac9e249605d85d3f99cf84512ea12 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c @@ -0,0 +1,3992 @@ +/* + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_trace + * QCA driver framework (QDF) trace APIs + * Trace, logging, and debugging definitions and APIs + */ + +/* Include Files */ +#include "qdf_str.h" +#include +#include + +/* macro to map qdf trace levels into the bitmask */ +#define QDF_TRACE_LEVEL_TO_MODULE_BITMASK(_level) ((1 << (_level))) + +#include +#include +static int qdf_pidx = -1; +static bool qdf_log_dump_at_kernel_enable = true; +qdf_declare_param(qdf_log_dump_at_kernel_enable, bool); + +/* This value of 0 will disable the timer by default. */ +static uint32_t qdf_log_flush_timer_period; +qdf_declare_param(qdf_log_flush_timer_period, uint); + +#include "qdf_time.h" +#include "qdf_mc_timer.h" +#include + +/* Global qdf print id */ + +/* Preprocessor definitions and constants */ + +enum qdf_timestamp_unit qdf_log_timestamp_type = QDF_LOG_TIMESTAMP_UNIT; + +#define DP_TRACE_META_DATA_STRLEN 50 + +#ifdef TRACE_RECORD +/* Static and Global variables */ +static spinlock_t ltrace_lock; +/* global qdf trace data */ +static t_qdf_trace_data g_qdf_trace_data; +/* + * all the call back functions for dumping MTRACE messages from ring buffer + * are stored in qdf_trace_cb_table,these callbacks are initialized during init + * only so, we will make a copy of these call back functions and maintain in to + * qdf_trace_restore_cb_table. Incase if we make modifications to + * qdf_trace_cb_table, we can certainly retrieve all the call back functions + * back from Restore Table + */ +static tp_qdf_trace_cb qdf_trace_cb_table[QDF_MODULE_ID_MAX]; +static tp_qdf_trace_cb qdf_trace_restore_cb_table[QDF_MODULE_ID_MAX]; + +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY +static qdf_trace_record_t *g_qdf_trace_tbl; +#else +static qdf_trace_record_t g_qdf_trace_tbl[MAX_QDF_TRACE_RECORDS]; +#endif + +#endif + +#ifdef WLAN_FEATURE_MEMDUMP_ENABLE +static tp_qdf_state_info_cb qdf_state_info_table[QDF_MODULE_ID_MAX]; +#endif + +#ifdef CONFIG_DP_TRACE +/* Static and Global variables */ +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY +static struct qdf_dp_trace_record_s *g_qdf_dp_trace_tbl; +#else +static struct qdf_dp_trace_record_s + g_qdf_dp_trace_tbl[MAX_QDF_DP_TRACE_RECORDS]; +#endif +static spinlock_t l_dp_trace_lock; + +/* + * all the options to configure/control DP trace are + * defined in this structure + */ +static struct s_qdf_dp_trace_data g_qdf_dp_trace_data; +/* + * all the call back functions for dumping DPTRACE messages from ring buffer + * are stored in qdf_dp_trace_cb_table, callbacks are initialized during init + */ +static tp_qdf_dp_trace_cb qdf_dp_trace_cb_table[QDF_DP_TRACE_MAX + 1]; +#endif + +/** + * qdf_snprintf() - wrapper function to snprintf + * @str_buffer: string Buffer + * @size: defines the size of the data record + * @str_format: Format string in which the message to be logged. This format + * string contains printf-like replacement parameters, which follow + * this parameter in the variable argument list. + * + * Return: None + */ +void qdf_snprintf(char *str_buffer, unsigned int size, char *str_format, ...) +{ + snprintf(str_buffer, size, str_format); +} +qdf_export_symbol(qdf_snprintf); + +#ifdef QDF_ENABLE_TRACING + +/** + * qdf_trace_msg() - externally called trace function + * @module: Module identifier a member of the QDF_MODULE_ID + * enumeration that identifies the module issuing the trace message. + * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration + * indicating the severity of the condition causing the trace message + * to be issued. More severe conditions are more likely to be logged. + * @str_format: Format string in which the message to be logged. This format + * string contains printf-like replacement parameters, which follow + * this parameter in the variable argument list. + * + * Checks the level of severity and accordingly prints the trace messages + * + * Return: None + */ +void qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + const char *str_format, ...) +{ + va_list val; + + va_start(val, str_format); + qdf_trace_msg_cmn(qdf_pidx, module, level, str_format, val); + va_end(val); +} +qdf_export_symbol(qdf_trace_msg); + +void qdf_vtrace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + const char *str_format, va_list val) +{ + qdf_trace_msg_cmn(qdf_pidx, module, level, str_format, val); +} +qdf_export_symbol(qdf_vtrace_msg); + +#define ROW_SIZE 16 +/* Buffer size = data bytes(2 hex chars plus space) + NULL */ +#define BUFFER_SIZE ((QDF_DP_TRACE_RECORD_SIZE * 3) + 1) + +static void __qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len, bool print_ascii) +{ + const u8 *ptr = data; + int i = 0; + + if (!qdf_print_is_verbose_enabled(qdf_pidx, module, level)) + return; + + while (buf_len > 0) { + unsigned char linebuf[BUFFER_SIZE] = {0}; + int linelen = min(buf_len, ROW_SIZE); + + buf_len -= ROW_SIZE; + + hex_dump_to_buffer(ptr, linelen, ROW_SIZE, 1, + linebuf, sizeof(linebuf), print_ascii); + + qdf_trace_msg(module, level, "%.8x: %s", i, linebuf); + ptr += ROW_SIZE; + i += ROW_SIZE; + } +} + +void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len) +{ + __qdf_trace_hex_dump(module, level, data, buf_len, false); +} + +qdf_export_symbol(qdf_trace_hex_dump); + +void qdf_trace_hex_ascii_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len) +{ + __qdf_trace_hex_dump(module, level, data, buf_len, true); +} + +qdf_export_symbol(qdf_trace_hex_ascii_dump); + +#endif + +#ifdef TRACE_RECORD + +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY +static inline QDF_STATUS allocate_g_qdf_trace_tbl_buffer(void) +{ + g_qdf_trace_tbl = vzalloc(MAX_QDF_TRACE_RECORDS * + sizeof(*g_qdf_trace_tbl)); + QDF_BUG(g_qdf_trace_tbl); + return g_qdf_trace_tbl ? QDF_STATUS_SUCCESS : QDF_STATUS_E_NOMEM; +} + +static inline void free_g_qdf_trace_tbl_buffer(void) +{ + vfree(g_qdf_trace_tbl); + g_qdf_trace_tbl = NULL; +} +#else +static inline QDF_STATUS allocate_g_qdf_trace_tbl_buffer(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void free_g_qdf_trace_tbl_buffer(void) +{ } +#endif +/** + * qdf_trace_enable() - Enable MTRACE for specific modules + * @bitmask_of_module_id: Bitmask according to enum of the modules. + * 32[dec] = 0010 0000 [bin] + * 64[dec] = 0100 0000 [bin] + * 128[dec] = 1000 0000 [bin] + * @enable: can be true or false true implies enabling MTRACE false implies + * disabling MTRACE. + * + * Enable MTRACE for specific modules whose bits are set in bitmask and enable + * is true. if enable is false it disables MTRACE for that module. set the + * bitmask according to enum value of the modules. + * This functions will be called when you issue ioctl as mentioned following + * [iwpriv wlan0 setdumplog ]. + * - Decimal number, i.e. 64 decimal value shows only SME module, + * 128 decimal value shows only PE module, 192 decimal value shows PE and SME. + * + * Return: None + */ +void qdf_trace_enable(uint32_t bitmask_of_module_id, uint8_t enable) +{ + int i; + + if (bitmask_of_module_id) { + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + if (((bitmask_of_module_id >> i) & 1)) { + if (enable) { + if (NULL != + qdf_trace_restore_cb_table[i]) { + qdf_trace_cb_table[i] = + qdf_trace_restore_cb_table[i]; + } + } else { + qdf_trace_restore_cb_table[i] = + qdf_trace_cb_table[i]; + qdf_trace_cb_table[i] = NULL; + } + } + } + } else { + if (enable) { + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + if (qdf_trace_restore_cb_table[i]) { + qdf_trace_cb_table[i] = + qdf_trace_restore_cb_table[i]; + } + } + } else { + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + qdf_trace_restore_cb_table[i] = + qdf_trace_cb_table[i]; + qdf_trace_cb_table[i] = NULL; + } + } + } +} +qdf_export_symbol(qdf_trace_enable); + +/** + * qdf_trace_init() - initializes qdf trace structures and variables + * + * Called immediately after cds_preopen, so that we can start recording HDD + * events ASAP. + * + * Return: None + */ +void qdf_trace_init(void) +{ + uint8_t i; + + if (allocate_g_qdf_trace_tbl_buffer() != QDF_STATUS_SUCCESS) + return; + g_qdf_trace_data.head = INVALID_QDF_TRACE_ADDR; + g_qdf_trace_data.tail = INVALID_QDF_TRACE_ADDR; + g_qdf_trace_data.num = 0; + g_qdf_trace_data.enable = true; + g_qdf_trace_data.dump_count = DEFAULT_QDF_TRACE_DUMP_COUNT; + g_qdf_trace_data.num_since_last_dump = 0; + + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + qdf_trace_cb_table[i] = NULL; + qdf_trace_restore_cb_table[i] = NULL; + } +} +qdf_export_symbol(qdf_trace_init); + +/** + * qdf_trace_deinit() - frees memory allocated dynamically + * + * Called from cds_deinit, so that we can free the memory and resets + * the variables + * + * Return: None + */ +void qdf_trace_deinit(void) +{ + g_qdf_trace_data.enable = false; + g_qdf_trace_data.num = 0; + g_qdf_trace_data.head = INVALID_QDF_TRACE_ADDR; + g_qdf_trace_data.tail = INVALID_QDF_TRACE_ADDR; + + free_g_qdf_trace_tbl_buffer(); +} + +qdf_export_symbol(qdf_trace_deinit); + +/** + * qdf_trace() - puts the messages in to ring-buffer + * @module: Enum of module, basically module id. + * @code: Code to be recorded + * @session: Session ID of the log + * @data: Actual message contents + * + * This function will be called from each module who wants record the messages + * in circular queue. Before calling this functions make sure you have + * registered your module with qdf through qdf_trace_register function. + * + * Return: None + */ +void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data) +{ + tp_qdf_trace_record rec = NULL; + unsigned long flags; + char time[18]; + + if (!g_qdf_trace_data.enable) + return; + + /* if module is not registered, don't record for that module */ + if (!qdf_trace_cb_table[module]) + return; + + qdf_get_time_of_the_day_in_hr_min_sec_usec(time, sizeof(time)); + /* Aquire the lock so that only one thread at a time can fill the ring + * buffer + */ + spin_lock_irqsave(<race_lock, flags); + + g_qdf_trace_data.num++; + + if (g_qdf_trace_data.num > MAX_QDF_TRACE_RECORDS) + g_qdf_trace_data.num = MAX_QDF_TRACE_RECORDS; + + if (INVALID_QDF_TRACE_ADDR == g_qdf_trace_data.head) { + /* first record */ + g_qdf_trace_data.head = 0; + g_qdf_trace_data.tail = 0; + } else { + /* queue is not empty */ + uint32_t tail = g_qdf_trace_data.tail + 1; + + if (MAX_QDF_TRACE_RECORDS == tail) + tail = 0; + + if (g_qdf_trace_data.head == tail) { + /* full */ + if (MAX_QDF_TRACE_RECORDS == ++g_qdf_trace_data.head) + g_qdf_trace_data.head = 0; + } + g_qdf_trace_data.tail = tail; + } + + rec = &g_qdf_trace_tbl[g_qdf_trace_data.tail]; + rec->code = code; + rec->session = session; + rec->data = data; + rec->qtime = qdf_get_log_timestamp(); + scnprintf(rec->time, sizeof(rec->time), "%s", time); + rec->module = module; + rec->pid = (in_interrupt() ? 0 : current->pid); + g_qdf_trace_data.num_since_last_dump++; + spin_unlock_irqrestore(<race_lock, flags); +} +qdf_export_symbol(qdf_trace); + +#ifdef ENABLE_MTRACE_LOG +void qdf_mtrace_log(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id) +{ + uint32_t trace_log, payload; + static uint16_t counter; + + trace_log = (src_module << 23) | (dst_module << 15) | message_id; + payload = (vdev_id << 16) | counter++; + + QDF_TRACE(src_module, QDF_TRACE_LEVEL_TRACE, "%x %x", + trace_log, payload); +} + +qdf_export_symbol(qdf_mtrace_log); +#endif + +void qdf_mtrace(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id, uint32_t data) +{ + qdf_trace(src_module, message_id, vdev_id, data); + qdf_mtrace_log(src_module, dst_module, message_id, vdev_id); +} + +qdf_export_symbol(qdf_mtrace); + +/** + * qdf_trace_spin_lock_init() - initializes the lock variable before use + * + * This function will be called from cds_alloc_global_context, we will have lock + * available to use ASAP + * + * Return: None + */ +QDF_STATUS qdf_trace_spin_lock_init(void) +{ + spin_lock_init(<race_lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_trace_spin_lock_init); + +/** + * qdf_trace_register() - registers the call back functions + * @module_iD: enum value of module + * @qdf_trace_callback: call back functions to display the messages in + * particular format. + * + * Registers the call back functions to display the messages in particular + * format mentioned in these call back functions. This functions should be + * called by interested module in their init part as we will be ready to + * register as soon as modules are up. + * + * Return: None + */ +void qdf_trace_register(QDF_MODULE_ID module_id, + tp_qdf_trace_cb qdf_trace_callback) +{ + qdf_trace_cb_table[module_id] = qdf_trace_callback; +} +qdf_export_symbol(qdf_trace_register); + +/** + * qdf_trace_dump_all() - Dump data from ring buffer via call back functions + * registered with QDF + * @p_mac: Context of particular module + * @code: Reason code + * @session: Session id of log + * @count: Number of lines to dump starting from tail to head + * + * This function will be called up on issueing ioctl call as mentioned following + * [iwpriv wlan0 dumplog 0 0 ] + * + * - number lines to dump starting from tail to head. + * + * - if anybody wants to know how many messages were + * recorded for particular module/s mentioned by setbit in bitmask from last + * messages. It is optional, if you don't provide then it will dump + * everything from buffer. + * + * Return: None + */ +void qdf_trace_dump_all(void *p_mac, uint8_t code, uint8_t session, + uint32_t count, uint32_t bitmask_of_module) +{ + qdf_trace_record_t p_record; + int32_t i, tail; + + if (!g_qdf_trace_data.enable) { + QDF_TRACE(QDF_MODULE_ID_SYS, + QDF_TRACE_LEVEL_ERROR, "Tracing Disabled"); + return; + } + + QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_INFO, + "DPT: Total Records: %d, Head: %d, Tail: %d", + g_qdf_trace_data.num, g_qdf_trace_data.head, + g_qdf_trace_data.tail); + + /* aquire the lock so that only one thread at a time can read + * the ring buffer + */ + spin_lock(<race_lock); + + if (g_qdf_trace_data.head != INVALID_QDF_TRACE_ADDR) { + i = g_qdf_trace_data.head; + tail = g_qdf_trace_data.tail; + + if (count) { + if (count > g_qdf_trace_data.num) + count = g_qdf_trace_data.num; + if (tail >= (count - 1)) + i = tail - count + 1; + else if (count != MAX_QDF_TRACE_RECORDS) + i = MAX_QDF_TRACE_RECORDS - ((count - 1) - + tail); + } + + p_record = g_qdf_trace_tbl[i]; + /* right now we are not using num_since_last_dump member but + * in future we might re-visit and use this member to track + * how many latest messages got added while we were dumping + * from ring buffer + */ + g_qdf_trace_data.num_since_last_dump = 0; + spin_unlock(<race_lock); + for (;; ) { + if ((code == 0 || (code == p_record.code)) && + (qdf_trace_cb_table[p_record.module])) { + if (0 == bitmask_of_module) { + qdf_trace_cb_table[p_record. + module] (p_mac, + &p_record, + (uint16_t) + i); + } else { + if (bitmask_of_module & + (1 << p_record.module)) { + qdf_trace_cb_table[p_record. + module] + (p_mac, &p_record, + (uint16_t) i); + } + } + } + + if (i == tail) + break; + i += 1; + + spin_lock(<race_lock); + if (MAX_QDF_TRACE_RECORDS == i) { + i = 0; + p_record = g_qdf_trace_tbl[0]; + } else { + p_record = g_qdf_trace_tbl[i]; + } + spin_unlock(<race_lock); + } + } else { + spin_unlock(<race_lock); + } +} +qdf_export_symbol(qdf_trace_dump_all); +#endif + +#ifdef WLAN_FEATURE_MEMDUMP_ENABLE +/** + * qdf_register_debugcb_init() - initializes debug callbacks + * to NULL + * + * Return: None + */ +void qdf_register_debugcb_init(void) +{ + uint8_t i; + + for (i = 0; i < QDF_MODULE_ID_MAX; i++) + qdf_state_info_table[i] = NULL; +} +qdf_export_symbol(qdf_register_debugcb_init); + +/** + * qdf_register_debug_callback() - stores callback handlers to print + * state information + * @module_id: module id of layer + * @qdf_state_infocb: callback to be registered + * + * This function is used to store callback handlers to print + * state information + * + * Return: None + */ +void qdf_register_debug_callback(QDF_MODULE_ID module_id, + tp_qdf_state_info_cb qdf_state_infocb) +{ + qdf_state_info_table[module_id] = qdf_state_infocb; +} +qdf_export_symbol(qdf_register_debug_callback); + +/** + * qdf_state_info_dump_all() - it invokes callback of layer which registered + * its callback to print its state information. + * @buf: buffer pointer to be passed + * @size: size of buffer to be filled + * @driver_dump_size: actual size of buffer used + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS qdf_state_info_dump_all(char *buf, uint16_t size, + uint16_t *driver_dump_size) +{ + uint8_t module, ret = QDF_STATUS_SUCCESS; + uint16_t buf_len = size; + char *buf_ptr = buf; + + for (module = 0; module < QDF_MODULE_ID_MAX; module++) { + if (qdf_state_info_table[module]) { + qdf_state_info_table[module](&buf_ptr, &buf_len); + if (!buf_len) { + ret = QDF_STATUS_E_NOMEM; + break; + } + } + } + + *driver_dump_size = size - buf_len; + return ret; +} +qdf_export_symbol(qdf_state_info_dump_all); +#endif + +#ifdef CONFIG_DP_TRACE + +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY +static inline QDF_STATUS allocate_g_qdf_dp_trace_tbl_buffer(void) +{ + g_qdf_dp_trace_tbl = vzalloc(MAX_QDF_DP_TRACE_RECORDS * + sizeof(*g_qdf_dp_trace_tbl)); + QDF_BUG(g_qdf_dp_trace_tbl); + return g_qdf_dp_trace_tbl ? QDF_STATUS_SUCCESS : QDF_STATUS_E_NOMEM; +} + +static inline void free_g_qdf_dp_trace_tbl_buffer(void) +{ + vfree(g_qdf_dp_trace_tbl); + g_qdf_dp_trace_tbl = NULL; +} +#else +static inline QDF_STATUS allocate_g_qdf_dp_trace_tbl_buffer(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void free_g_qdf_dp_trace_tbl_buffer(void) +{ } +#endif + +#define QDF_DP_TRACE_PREPEND_STR_SIZE 100 +/* + * one dp trace record can't be greater than 300 bytes. + * Max Size will be QDF_DP_TRACE_PREPEND_STR_SIZE(100) + BUFFER_SIZE(121). + * Always make sure to change this QDF_DP_TRACE_MAX_RECORD_SIZE + * value accordingly whenever above two mentioned MACRO value changes. + */ +#define QDF_DP_TRACE_MAX_RECORD_SIZE 300 + +static void qdf_dp_unused(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + qdf_print("%s: QDF_DP_TRACE_MAX event should not be generated", + __func__); +} + +/** + * qdf_dp_trace_init() - enables the DP trace + * @live_mode_config: live mode configuration + * @thresh: high throughput threshold for disabling live mode + * @thresh_time_limit: max time to wait before deciding if thresh is crossed + * @verbosity: dptrace verbosity level + * @proto_bitmap: bitmap to enable/disable specific protocols + * + * Called during driver load to init dptrace + * + * A brief note on the 'thresh' param - + * Total # of packets received in a bandwidth timer interval beyond which + * DP Trace logging for data packets (including ICMP) will be disabled. + * In memory logging will still continue for these packets. Other packets for + * which proto.bitmap is set will continue to be recorded in logs and in memory. + + * Return: None + */ +void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh, + uint16_t time_limit, uint8_t verbosity, + uint32_t proto_bitmap) +{ + uint8_t i; + + if (allocate_g_qdf_dp_trace_tbl_buffer() != QDF_STATUS_SUCCESS) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "Failed!!! DP Trace buffer allocation"); + return; + } + qdf_dp_trace_spin_lock_init(); + qdf_dp_trace_clear_buffer(); + g_qdf_dp_trace_data.enable = true; + g_qdf_dp_trace_data.no_of_record = 1; + + g_qdf_dp_trace_data.live_mode_config = live_mode_config; + g_qdf_dp_trace_data.live_mode = live_mode_config; + g_qdf_dp_trace_data.high_tput_thresh = thresh; + g_qdf_dp_trace_data.thresh_time_limit = time_limit; + g_qdf_dp_trace_data.proto_bitmap = proto_bitmap; + g_qdf_dp_trace_data.verbosity = verbosity; + g_qdf_dp_trace_data.ini_conf_verbosity = verbosity; + + for (i = 0; i < ARRAY_SIZE(qdf_dp_trace_cb_table); i++) + qdf_dp_trace_cb_table[i] = qdf_dp_display_record; + + qdf_dp_trace_cb_table[QDF_DP_TRACE_HDD_TX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_HDD_RX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_TX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_RX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_DROP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD] = + qdf_dp_display_data_pkt_record; + + qdf_dp_trace_cb_table[QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_FREE_PACKET_PTR_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD] = + qdf_dp_display_ptr_record; + qdf_dp_trace_cb_table[QDF_DP_TRACE_EAPOL_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_DHCP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_ARP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_ICMP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_ICMPv6_PACKET_RECORD] = + qdf_dp_display_proto_pkt; + qdf_dp_trace_cb_table[QDF_DP_TRACE_MGMT_PACKET_RECORD] = + qdf_dp_display_mgmt_pkt; + qdf_dp_trace_cb_table[QDF_DP_TRACE_TX_CREDIT_RECORD] = + qdf_dp_display_credit_record; + qdf_dp_trace_cb_table[QDF_DP_TRACE_EVENT_RECORD] = + qdf_dp_display_event_record; + + qdf_dp_trace_cb_table[QDF_DP_TRACE_MAX] = qdf_dp_unused; +} +qdf_export_symbol(qdf_dp_trace_init); + +void qdf_dp_trace_deinit(void) +{ + if (!g_qdf_dp_trace_data.enable) + return; + spin_lock_bh(&l_dp_trace_lock); + g_qdf_dp_trace_data.enable = false; + g_qdf_dp_trace_data.no_of_record = 0; + spin_unlock_bh(&l_dp_trace_lock); + + free_g_qdf_dp_trace_tbl_buffer(); +} +/** + * qdf_dp_trace_set_value() - Configure the value to control DP trace + * @proto_bitmap: defines the protocol to be tracked + * @no_of_records: defines the nth packet which is traced + * @verbosity: defines the verbosity level + * + * Return: None + */ +void qdf_dp_trace_set_value(uint32_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity) +{ + g_qdf_dp_trace_data.proto_bitmap = proto_bitmap; + g_qdf_dp_trace_data.no_of_record = no_of_record; + g_qdf_dp_trace_data.verbosity = verbosity; + g_qdf_dp_trace_data.dynamic_verbosity_modify = true; +} +qdf_export_symbol(qdf_dp_trace_set_value); + +/** + * qdf_dp_trace_set_verbosity() - set verbosity value + * + * @val: Value to set + * + * Return: Null + */ +void qdf_dp_trace_set_verbosity(uint32_t val) +{ + g_qdf_dp_trace_data.verbosity = val; +} +qdf_export_symbol(qdf_dp_trace_set_verbosity); + +/** + * qdf_dp_get_verbosity) - get verbosity value + * + * Return: int + */ +uint8_t qdf_dp_get_verbosity(void) +{ + return g_qdf_dp_trace_data.verbosity; +} +qdf_export_symbol(qdf_dp_get_verbosity); + +/** + * qdf_dp_set_proto_bitmap() - set dp trace proto bitmap + * + * @val : unsigned bitmap to set + * + * Return: proto bitmap + */ +void qdf_dp_set_proto_bitmap(uint32_t val) +{ + g_qdf_dp_trace_data.proto_bitmap = val; +} +qdf_export_symbol(qdf_dp_set_proto_bitmap); + +void qdf_dp_set_proto_event_bitmap(uint32_t value) +{ + g_qdf_dp_trace_data.proto_event_bitmap = value; +} + +qdf_export_symbol(qdf_dp_set_proto_event_bitmap); + +static uint32_t qdf_dp_get_proto_event_bitmap(void) +{ + return g_qdf_dp_trace_data.proto_event_bitmap; +} + +/** + * qdf_dp_set_no_of_record() - set dp trace no_of_record + * + * @val : unsigned no_of_record to set + * + * Return: null + */ +void qdf_dp_set_no_of_record(uint32_t val) +{ + g_qdf_dp_trace_data.no_of_record = val; +} +qdf_export_symbol(qdf_dp_set_no_of_record); + +/** + * qdf_dp_get_no_of_record() - get dp trace no_of_record + * + * Return: number of records + */ +uint8_t qdf_dp_get_no_of_record(void) +{ + return g_qdf_dp_trace_data.no_of_record; +} +qdf_export_symbol(qdf_dp_get_no_of_record); + + +/** + * qdf_dp_trace_verbosity_check() - check whether verbosity level is enabled + * @code: defines the event + * + * In High verbosity all codes are logged. + * For Med/Low and Default case code which has + * less value than corresponding verbosity codes + * are logged. + * + * Return: true or false depends on whether tracing enabled + */ +static bool qdf_dp_trace_verbosity_check(enum QDF_DP_TRACE_ID code) +{ + switch (g_qdf_dp_trace_data.verbosity) { + case QDF_DP_TRACE_VERBOSITY_HIGH: + return true; + case QDF_DP_TRACE_VERBOSITY_MEDIUM: + if (code <= QDF_DP_TRACE_MED_VERBOSITY) + return true; + return false; + case QDF_DP_TRACE_VERBOSITY_LOW: + if (code <= QDF_DP_TRACE_LOW_VERBOSITY) + return true; + return false; + case QDF_DP_TRACE_VERBOSITY_ULTRA_LOW: + if (code <= QDF_DP_TRACE_ULTRA_LOW_VERBOSITY) + return true; + return false; + case QDF_DP_TRACE_VERBOSITY_BASE: + if (code <= QDF_DP_TRACE_BASE_VERBOSITY) + return true; + return false; + default: + return false; + } +} + +/** + * qdf_dp_get_proto_bitmap() - get dp trace proto bitmap + * + * Return: proto bitmap + */ +uint32_t qdf_dp_get_proto_bitmap(void) +{ + if (g_qdf_dp_trace_data.enable) + return g_qdf_dp_trace_data.proto_bitmap; + else + return 0; +} + +/** + * qdf_dp_trace_set_track() - Marks whether the packet needs to be traced + * @nbuf: defines the netbuf + * @dir: direction + * + * Return: None + */ +void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir) +{ + uint32_t count = 0; + + if (!g_qdf_dp_trace_data.enable) + return; + + spin_lock_bh(&l_dp_trace_lock); + if (QDF_TX == dir) + count = ++g_qdf_dp_trace_data.tx_count; + else if (QDF_RX == dir) + count = ++g_qdf_dp_trace_data.rx_count; + + if ((g_qdf_dp_trace_data.no_of_record != 0) && + (count % g_qdf_dp_trace_data.no_of_record == 0)) { + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(nbuf) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; + } + spin_unlock_bh(&l_dp_trace_lock); +} +qdf_export_symbol(qdf_dp_trace_set_track); + +/* Number of bytes to be grouped together while printing DP-Trace data */ +#define QDF_DUMP_DP_GROUP_SIZE 6 + +/** + * dump_dp_hex_trace() - Display the data in buffer + * @prepend_str: string to prepend the hexdump with. + * @inbuf: buffer which contains data to be displayed + * @inbuf_len: defines the size of the data to be displayed + * + * Return: None + */ +static void +dump_dp_hex_trace(char *prepend_str, uint8_t *inbuf, uint8_t inbuf_len) +{ + unsigned char outbuf[BUFFER_SIZE]; + const uint8_t *inbuf_ptr = inbuf; + char *outbuf_ptr = outbuf; + int outbytes_written = 0; + + qdf_mem_zero(outbuf, sizeof(outbuf)); + do { + outbytes_written += scnprintf(outbuf_ptr, + BUFFER_SIZE - outbytes_written, + "%02x", *inbuf_ptr); + outbuf_ptr = outbuf + outbytes_written; + + if ((inbuf_ptr - inbuf) && + (inbuf_ptr - inbuf + 1) % QDF_DUMP_DP_GROUP_SIZE == 0) { + outbytes_written += scnprintf(outbuf_ptr, + BUFFER_SIZE - outbytes_written, + " "); + outbuf_ptr = outbuf + outbytes_written; + } + inbuf_ptr++; + } while (inbuf_ptr < (inbuf + inbuf_len)); + DPTRACE_PRINT("%s %s", prepend_str, outbuf); +} + +/** + * qdf_dp_code_to_string() - convert dptrace code to string + * @code: dptrace code + * + * Return: string version of code + */ +static +const char *qdf_dp_code_to_string(enum QDF_DP_TRACE_ID code) +{ + switch (code) { + case QDF_DP_TRACE_DROP_PACKET_RECORD: + return "DROP:"; + case QDF_DP_TRACE_EAPOL_PACKET_RECORD: + return "EAPOL:"; + case QDF_DP_TRACE_DHCP_PACKET_RECORD: + return "DHCP:"; + case QDF_DP_TRACE_ARP_PACKET_RECORD: + return "ARP:"; + case QDF_DP_TRACE_ICMP_PACKET_RECORD: + return "ICMP:"; + case QDF_DP_TRACE_ICMPv6_PACKET_RECORD: + return "ICMPv6:"; + case QDF_DP_TRACE_MGMT_PACKET_RECORD: + return "MGMT:"; + case QDF_DP_TRACE_TX_CREDIT_RECORD: + return "CREDIT:"; + case QDF_DP_TRACE_EVENT_RECORD: + return "EVENT:"; + case QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD: + return "HDD: TX: PTR:"; + case QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD: + return "LI_DP: TX: PTR:"; + case QDF_DP_TRACE_HDD_TX_PACKET_RECORD: + return "HDD: TX: DATA:"; + case QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD: + case QDF_DP_TRACE_TX_PACKET_RECORD: + return "TX:"; + case QDF_DP_TRACE_CE_PACKET_PTR_RECORD: + return "CE: TX: PTR:"; + case QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD: + return "CE: TX: FAST: PTR:"; + case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD: + return "CE: TX: FAST: ERR:"; + case QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD: + case QDF_DP_TRACE_FREE_PACKET_PTR_RECORD: + return "FREE: TX: PTR:"; + case QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD: + return "HTT: RX: PTR:"; + case QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD: + return "HTT: RX: OF: PTR:"; + case QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD: + return "HDD: RX: PTR:"; + case QDF_DP_TRACE_RX_LI_DP_PACKET_PTR_RECORD: + return "LI_DP: RX: PTR:"; + case QDF_DP_TRACE_HDD_RX_PACKET_RECORD: + return "HDD: RX: DATA:"; + case QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD: + return "LI_DP_NULL: RX: DATA:"; + case QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD: + case QDF_DP_TRACE_RX_PACKET_RECORD: + return "RX:"; + case QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD: + return "TXRX: TX: Q: PTR:"; + case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: + return "TXRX: TX: PTR:"; + case QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD: + return "TXRX: TX: FAST: PTR:"; + case QDF_DP_TRACE_HTT_PACKET_PTR_RECORD: + return "HTT: TX: PTR:"; + case QDF_DP_TRACE_HTC_PACKET_PTR_RECORD: + return "HTC: TX: PTR:"; + case QDF_DP_TRACE_HIF_PACKET_PTR_RECORD: + return "HIF: TX: PTR:"; + case QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD: + return "TXRX: RX: PTR:"; + case QDF_DP_TRACE_HDD_TX_TIMEOUT: + return "HDD: STA: TO:"; + case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: + return "HDD: SAP: TO:"; + default: + return "Invalid"; + } +} + +/** + * qdf_dp_dir_to_str() - convert direction to string + * @dir: direction + * + * Return: string version of direction + */ +static const char *qdf_dp_dir_to_str(enum qdf_proto_dir dir) +{ + switch (dir) { + case QDF_TX: + return " --> "; + case QDF_RX: + return " <-- "; + default: + return "invalid"; + } +} + +static const char *qdf_dp_credit_source_to_str( + enum QDF_CREDIT_UPDATE_SOURCE source) +{ + switch (source) { + case QDF_TX_SCHED: + return "TX SCHED"; + case QDF_TX_COMP: + return "TX COMP"; + case QDF_TX_CREDIT_UPDATE: + return "CREDIT UP"; + case QDF_TX_HTT_MSG: + return "HTT TX MSG"; + case QDF_HTT_ATTACH: + return "HTT ATTACH"; + default: + return "invalid"; + } +} + +static const char *qdf_dp_operation_to_str(enum QDF_CREDIT_OPERATION op) +{ + switch (op) { + case QDF_CREDIT_INC: + return "+"; + case QDF_CREDIT_DEC: + return "-"; + case QDF_CREDIT_ABS: + return "ABS"; + default: + return "invalid"; + } +} + +/** + * qdf_dp_type_to_str() - convert packet type to string + * @type: type + * + * Return: string version of packet type + */ +static const char *qdf_dp_type_to_str(enum qdf_proto_type type) +{ + switch (type) { + case QDF_PROTO_TYPE_DHCP: + return "DHCP"; + case QDF_PROTO_TYPE_EAPOL: + return "EAPOL"; + case QDF_PROTO_TYPE_ARP: + return "ARP"; + case QDF_PROTO_TYPE_ICMP: + return "ICMP"; + case QDF_PROTO_TYPE_ICMPv6: + return "ICMPv6"; + case QDF_PROTO_TYPE_MGMT: + return "MGMT"; + case QDF_PROTO_TYPE_EVENT: + return "EVENT"; + default: + return "invalid"; + } +} + +/** + * qdf_dp_subtype_to_str() - convert packet subtype to string + * @type: type + * + * Return: string version of packet subtype + */ +static const char *qdf_dp_subtype_to_str(enum qdf_proto_subtype subtype) +{ + switch (subtype) { + case QDF_PROTO_EAPOL_M1: + return "M1"; + case QDF_PROTO_EAPOL_M2: + return "M2"; + case QDF_PROTO_EAPOL_M3: + return "M3"; + case QDF_PROTO_EAPOL_M4: + return "M4"; + case QDF_PROTO_DHCP_DISCOVER: + return "DISC"; + case QDF_PROTO_DHCP_REQUEST: + return "REQ"; + case QDF_PROTO_DHCP_OFFER: + return "OFF"; + case QDF_PROTO_DHCP_ACK: + return "ACK"; + case QDF_PROTO_DHCP_NACK: + return "NACK"; + case QDF_PROTO_DHCP_RELEASE: + return "REL"; + case QDF_PROTO_DHCP_INFORM: + return "INFORM"; + case QDF_PROTO_DHCP_DECLINE: + return "DECL"; + case QDF_PROTO_ARP_REQ: + case QDF_PROTO_ICMP_REQ: + case QDF_PROTO_ICMPV6_REQ: + return "REQ"; + case QDF_PROTO_ARP_RES: + case QDF_PROTO_ICMP_RES: + case QDF_PROTO_ICMPV6_RES: + return "RSP"; + case QDF_PROTO_ICMPV6_RS: + return "RS"; + case QDF_PROTO_ICMPV6_RA: + return "RA"; + case QDF_PROTO_ICMPV6_NS: + return "NS"; + case QDF_PROTO_ICMPV6_NA: + return "NA"; + case QDF_PROTO_MGMT_ASSOC: + return "ASSOC"; + case QDF_PROTO_MGMT_DISASSOC: + return "DISASSOC"; + case QDF_PROTO_MGMT_AUTH: + return "AUTH"; + case QDF_PROTO_MGMT_DEAUTH: + return "DEAUTH"; + case QDF_ROAM_SYNCH: + return "ROAM SYNCH"; + case QDF_ROAM_COMPLETE: + return "ROAM COMP"; + case QDF_ROAM_EVENTID: + return "ROAM EVENTID"; + default: + return "invalid"; + } +} + +/** + * qdf_dp_enable_check() - check if dptrace, TX/RX tracing is enabled + * @nbuf: nbuf + * @code: dptrace code + * @dir: TX or RX direction + * + * Return: true/false + */ +static bool qdf_dp_enable_check(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, + enum qdf_proto_dir dir) +{ + /* Return when Dp trace is not enabled */ + if (!g_qdf_dp_trace_data.enable) + return false; + + if (qdf_dp_trace_verbosity_check(code) == false) + return false; + + if (nbuf && (dir == QDF_TX && ((QDF_NBUF_CB_TX_DP_TRACE(nbuf) == 0) || + (QDF_NBUF_CB_TX_PACKET_TRACK(nbuf) != + QDF_NBUF_TX_PKT_DATA_TRACK)))) + return false; + + if (nbuf && (dir == QDF_RX && (QDF_NBUF_CB_RX_DP_TRACE(nbuf) == 0))) + return false; + + /* + * Special packets called with NULL nbuf and this API is expected to + * return true + */ + return true; +} + +/** + * qdf_dp_trace_fill_meta_str() - fill up a common meta string + * @prepend_str: pointer to string + * @size: size of prepend_str + * @rec_index: index of record + * @info: info related to the record + * @record: pointer to the record + * + * Return: ret value from scnprintf + */ +static inline +int qdf_dp_trace_fill_meta_str(char *prepend_str, int size, + int rec_index, uint8_t info, + struct qdf_dp_trace_record_s *record) +{ + char buffer[20]; + int ret = 0; + bool live = info & QDF_DP_TRACE_RECORD_INFO_LIVE ? true : false; + bool throttled = info & QDF_DP_TRACE_RECORD_INFO_THROTTLED ? + true : false; + + scnprintf(buffer, sizeof(buffer), "%llu", record->time); + ret = scnprintf(prepend_str, size, + "%s DPT: %04d:%02d%s %s", + throttled ? "*" : "", + rec_index, + record->pdev_id, + live ? "" : buffer, + qdf_dp_code_to_string(record->code)); + + return ret; +} + +/** + * qdf_dp_fill_record_data() - fill meta data and data into the record + * @rec: pointer to record data + * @data: pointer to data + * @data_size: size of the data + * @meta_data: pointer to metadata + * @metadata_size: size of metadata + * + * Should be called from within a spin_lock for the qdf record. + * Fills up rec->data with |metadata|data| + * + * Return: none + */ +static void qdf_dp_fill_record_data + (struct qdf_dp_trace_record_s *rec, + uint8_t *data, uint8_t data_size, + uint8_t *meta_data, uint8_t metadata_size) +{ + int32_t available = QDF_DP_TRACE_RECORD_SIZE; + uint8_t *rec_data = rec->data; + uint8_t data_to_copy = 0; + + qdf_mem_zero(rec_data, QDF_DP_TRACE_RECORD_SIZE); + + /* copy meta data */ + if (meta_data) { + if (metadata_size > available) { + QDF_TRACE_WARN(QDF_MODULE_ID_QDF, + "%s: meta data does not fit into the record", + __func__); + goto end; + } + qdf_mem_copy(rec_data, meta_data, metadata_size); + available = available - metadata_size; + } else { + metadata_size = 0; + } + + /* copy data */ + if (data && (data_size > 0) && (available > 0)) { + data_to_copy = data_size; + if (data_size > available) + data_to_copy = available; + qdf_mem_copy(&rec_data[metadata_size], data, data_to_copy); + } +end: + rec->size = data_to_copy; +} + +/** + * qdf_dp_add_record() - add dp trace record + * @code: dptrace code + * @pdev_id: pdev_id + * @print: true to print it in kmsg + * @data: data pointer + * @data_size: size of data to be copied + * @meta_data: meta data to be prepended to data + * @metadata_size: sizeof meta data + * @print: whether to print record + * + * Return: none + */ +static void qdf_dp_add_record(enum QDF_DP_TRACE_ID code, uint8_t pdev_id, + uint8_t *data, uint8_t data_size, + uint8_t *meta_data, uint8_t metadata_size, + bool print) + +{ + struct qdf_dp_trace_record_s *rec = NULL; + int index; + bool print_this_record = false; + u8 info = 0; + + if (code >= QDF_DP_TRACE_MAX) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "invalid record code %u, max code %u", + code, QDF_DP_TRACE_MAX); + return; + } + + spin_lock_bh(&l_dp_trace_lock); + + if (print || g_qdf_dp_trace_data.force_live_mode) { + print_this_record = true; + } else if (g_qdf_dp_trace_data.live_mode == 1) { + print_this_record = true; + g_qdf_dp_trace_data.print_pkt_cnt++; + if (g_qdf_dp_trace_data.print_pkt_cnt > + g_qdf_dp_trace_data.high_tput_thresh) { + g_qdf_dp_trace_data.live_mode = 0; + g_qdf_dp_trace_data.verbosity = + QDF_DP_TRACE_VERBOSITY_ULTRA_LOW; + info |= QDF_DP_TRACE_RECORD_INFO_THROTTLED; + } + } + + g_qdf_dp_trace_data.num++; + + if (g_qdf_dp_trace_data.num > MAX_QDF_DP_TRACE_RECORDS) + g_qdf_dp_trace_data.num = MAX_QDF_DP_TRACE_RECORDS; + + if (INVALID_QDF_DP_TRACE_ADDR == g_qdf_dp_trace_data.head) { + /* first record */ + g_qdf_dp_trace_data.head = 0; + g_qdf_dp_trace_data.tail = 0; + } else { + /* queue is not empty */ + g_qdf_dp_trace_data.tail++; + + if (MAX_QDF_DP_TRACE_RECORDS == g_qdf_dp_trace_data.tail) + g_qdf_dp_trace_data.tail = 0; + + if (g_qdf_dp_trace_data.head == g_qdf_dp_trace_data.tail) { + /* full */ + if (MAX_QDF_DP_TRACE_RECORDS == + ++g_qdf_dp_trace_data.head) + g_qdf_dp_trace_data.head = 0; + } + } + + rec = &g_qdf_dp_trace_tbl[g_qdf_dp_trace_data.tail]; + index = g_qdf_dp_trace_data.tail; + rec->code = code; + rec->pdev_id = pdev_id; + rec->size = 0; + qdf_dp_fill_record_data(rec, data, data_size, + meta_data, metadata_size); + rec->time = qdf_get_log_timestamp(); + rec->pid = (in_interrupt() ? 0 : current->pid); + + if (rec->code >= QDF_DP_TRACE_MAX) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "invalid record code %u, max code %u", + rec->code, QDF_DP_TRACE_MAX); + return; + } + + spin_unlock_bh(&l_dp_trace_lock); + + info |= QDF_DP_TRACE_RECORD_INFO_LIVE; + if (print_this_record) + qdf_dp_trace_cb_table[rec->code] (rec, index, + QDF_TRACE_DEFAULT_PDEV_ID, info); +} + +/** + * qdf_get_rate_limit_by_type() - Get the rate limit by pkt type + * @type: packet type + * + * Return: Rate limit value for a particular packet type + */ +static inline +uint8_t qdf_get_rate_limit_by_type(uint8_t type) +{ + switch (type) { + case QDF_PROTO_TYPE_DHCP: + return QDF_MAX_DHCP_PKTS_PER_SEC; + case QDF_PROTO_TYPE_EAPOL: + return QDF_MAX_EAPOL_PKTS_PER_SEC; + case QDF_PROTO_TYPE_ARP: + return QDF_MAX_ARP_PKTS_PER_SEC; + case QDF_PROTO_TYPE_DNS: + return QDF_MAX_DNS_PKTS_PER_SEC; + default: + return QDF_MAX_OTHER_PKTS_PER_SEC; + } +} + +/** + * qdf_get_pkt_type_string() - Get the string based on pkt type + * @type: packet type + * @subtype: packet subtype + * + * Return: String based on pkt type + */ +static +uint8_t *qdf_get_pkt_type_string(uint8_t type, uint8_t subtype) +{ + switch (subtype) { + case QDF_PROTO_EAPOL_M1: + return "EAPOL-1"; + case QDF_PROTO_EAPOL_M2: + return "EAPOL-2"; + case QDF_PROTO_EAPOL_M3: + return "EAPOL-3"; + case QDF_PROTO_EAPOL_M4: + return "EAPOL-4"; + case QDF_PROTO_DHCP_DISCOVER: + return "DHCP-D"; + case QDF_PROTO_DHCP_REQUEST: + return "DHCP-R"; + case QDF_PROTO_DHCP_OFFER: + return "DHCP-O"; + case QDF_PROTO_DHCP_ACK: + return "DHCP-A"; + case QDF_PROTO_DHCP_NACK: + return "DHCP-NA"; + case QDF_PROTO_DHCP_RELEASE: + return "DHCP-REL"; + case QDF_PROTO_DHCP_INFORM: + return "DHCP-IN"; + case QDF_PROTO_DHCP_DECLINE: + return "DHCP-DEC"; + case QDF_PROTO_ARP_REQ: + return "ARP-RQ"; + case QDF_PROTO_ARP_RES: + return "ARP-RS"; + case QDF_PROTO_DNS_QUERY: + return "DNS_Q"; + case QDF_PROTO_DNS_RES: + return "DNS_RS"; + default: + switch (type) { + case QDF_PROTO_TYPE_EAPOL: + return "EAP"; + case QDF_PROTO_TYPE_DHCP: + return "DHCP"; + case QDF_PROTO_TYPE_ARP: + return "ARP"; + case QDF_PROTO_TYPE_DNS: + return "DNS"; + default: + return "UNKNOWN"; + } + } +} + +/** + * qdf_get_pkt_status_string() - Get the string based on pkt status + * @status: packet status + * + * Return: String based on pkt status + */ +static +uint8_t *qdf_get_pkt_status_string(uint8_t status) +{ + switch (status) { + case QDF_TX_RX_STATUS_INVALID: + return "inv"; + case QDF_TX_RX_STATUS_OK: + return "succ"; + case QDF_TX_RX_STATUS_FW_DISCARD: + return "disc"; + case QDF_TX_RX_STATUS_NO_ACK: + return "nack"; + case QDF_TX_RX_STATUS_DROP: + return "drop"; + default: + return "unknown"; + } +} + +/** + * qdf_dp_log_proto_pkt_info() - Send diag log with pkt info + * @sa: Source MAC address + * @da: Destination MAC address + * @type: packet type + * @subtype: packet subtype + * @dir: tx or rx + * @msdu_id: MSDU id + * @status: status code + * + * Return: none + */ +void qdf_dp_log_proto_pkt_info(uint8_t *sa, uint8_t *da, uint8_t type, + uint8_t subtype, uint8_t dir, uint16_t msdu_id, + uint8_t status) +{ + uint8_t pkt_rate_limit; + static ulong last_ticks_tx[QDF_PROTO_SUBTYPE_MAX] = {0}; + static ulong last_ticks_rx[QDF_PROTO_SUBTYPE_MAX] = {0}; + ulong curr_ticks = jiffies; + + pkt_rate_limit = qdf_get_rate_limit_by_type(type); + + if ((dir == QDF_TX && + !time_after(curr_ticks, + last_ticks_tx[subtype] + HZ / pkt_rate_limit)) || + (dir == QDF_RX && + !time_after(curr_ticks, + last_ticks_rx[subtype] + HZ / pkt_rate_limit))) + return; + + if (dir == QDF_TX) + last_ticks_tx[subtype] = curr_ticks; + else + last_ticks_rx[subtype] = curr_ticks; + + if (status == QDF_TX_RX_STATUS_INVALID) + qdf_nofl_info("%s %s: SA:" QDF_MAC_ADDR_FMT " DA:" QDF_MAC_ADDR_FMT, + qdf_get_pkt_type_string(type, subtype), + dir ? "RX" : "TX", QDF_MAC_ADDR_REF(sa), + QDF_MAC_ADDR_REF(da)); + else + qdf_nofl_info("%s %s: SA:" QDF_MAC_ADDR_FMT " DA:" QDF_MAC_ADDR_FMT " msdu_id:%d status: %s", + qdf_get_pkt_type_string(type, subtype), + dir ? "RX" : "TX", QDF_MAC_ADDR_REF(sa), + QDF_MAC_ADDR_REF(da), msdu_id, + qdf_get_pkt_status_string(status)); +} + +qdf_export_symbol(qdf_dp_log_proto_pkt_info); + +/** + * qdf_log_icmpv6_pkt() - log ICMPv6 packet + * @vdev_id: ID of the vdev + * @skb: skb pointer + * @dir: direction + * @pdev_id: ID of the pdev + * + * Return: true/false + */ +static bool qdf_log_icmpv6_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_ICMPv6) && + ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_ICMPv6 == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_icmpv6_pkt(skb) == true))) { + + subtype = qdf_nbuf_get_icmpv6_subtype(skb); + + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = false; + if (dir == QDF_TX) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (dir == QDF_RX) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt( + QDF_DP_TRACE_ICMPv6_PACKET_RECORD, + vdev_id, (skb->data + QDF_NBUF_SRC_MAC_OFFSET), + (skb->data + QDF_NBUF_DEST_MAC_OFFSET), + QDF_PROTO_TYPE_ICMPv6, subtype, dir, pdev_id, false)); + + switch (subtype) { + case QDF_PROTO_ICMPV6_REQ: + g_qdf_dp_trace_data.icmpv6_req++; + break; + case QDF_PROTO_ICMPV6_RES: + g_qdf_dp_trace_data.icmpv6_resp++; + break; + case QDF_PROTO_ICMPV6_RS: + g_qdf_dp_trace_data.icmpv6_rs++; + break; + case QDF_PROTO_ICMPV6_RA: + g_qdf_dp_trace_data.icmpv6_ra++; + break; + case QDF_PROTO_ICMPV6_NS: + g_qdf_dp_trace_data.icmpv6_ns++; + break; + case QDF_PROTO_ICMPV6_NA: + g_qdf_dp_trace_data.icmpv6_na++; + break; + default: + break; + } + return true; + } + + return false; +} + +/** + * qdf_log_icmp_pkt() - log ICMP packet + * @vdev_id: ID of the vdev + * @skb: skb pointer + * @dir: direction + * @pdev_id: ID of the pdev + * + * Return: true/false + */ +static bool qdf_log_icmp_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype proto_subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_ICMP) && + (qdf_nbuf_is_icmp_pkt(skb) == true)) { + + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = false; + proto_subtype = qdf_nbuf_get_icmp_subtype(skb); + + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_ICMP_PACKET_RECORD, + vdev_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_ICMP, + proto_subtype, dir, pdev_id, + false)); + + if (proto_subtype == QDF_PROTO_ICMP_REQ) + g_qdf_dp_trace_data.icmp_req++; + else + g_qdf_dp_trace_data.icmp_resp++; + + return true; + } + return false; +} + +/** + * qdf_log_eapol_pkt() - log EAPOL packet + * @vdev_id: ID of the vdev + * @skb: skb pointer + * @dir: direction + * @pdev_id: ID of the pdev + * + * Return: true/false + */ +static bool qdf_log_eapol_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype subtype; + uint32_t dp_eap_trace; + uint32_t dp_eap_event; + + dp_eap_trace = qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_EAPOL; + dp_eap_event = qdf_dp_get_proto_event_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_EAPOL; + + if (!dp_eap_trace && !dp_eap_event) + return false; + + if (!((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_EAPOL == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_ipv4_eapol_pkt(skb) == true))) + return false; + + subtype = qdf_nbuf_get_eapol_subtype(skb); + + if (dp_eap_event && dir == QDF_RX) + qdf_dp_log_proto_pkt_info(skb->data + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_EAPOL, subtype, dir, + QDF_TRACE_DEFAULT_MSDU_ID, + QDF_TX_RX_STATUS_INVALID); + + if (dp_eap_trace) { + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = true; + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_EAPOL_PACKET_RECORD, + vdev_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_EAPOL, subtype, + dir, pdev_id, true)); + + switch (subtype) { + case QDF_PROTO_EAPOL_M1: + g_qdf_dp_trace_data.eapol_m1++; + break; + case QDF_PROTO_EAPOL_M2: + g_qdf_dp_trace_data.eapol_m2++; + break; + case QDF_PROTO_EAPOL_M3: + g_qdf_dp_trace_data.eapol_m3++; + break; + case QDF_PROTO_EAPOL_M4: + g_qdf_dp_trace_data.eapol_m4++; + break; + default: + g_qdf_dp_trace_data.eapol_others++; + break; + } + } + + return true; +} + +/** + * qdf_log_dhcp_pkt() - log DHCP packet + * @vdev_id: ID of the vdev + * @skb: skb pointer + * @dir: direction + * @pdev_id: ID of the pdev + * + * Return: true/false + */ +static bool qdf_log_dhcp_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + uint32_t dp_dhcp_trace; + uint32_t dp_dhcp_event; + + dp_dhcp_trace = qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_DHCP; + dp_dhcp_event = qdf_dp_get_proto_event_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_DHCP; + + if (!dp_dhcp_trace && !dp_dhcp_event) + return false; + + if (!((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_DHCP == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_ipv4_dhcp_pkt(skb) == true))) + return false; + + subtype = qdf_nbuf_get_dhcp_subtype(skb); + + if (dp_dhcp_event && dir == QDF_RX) + qdf_dp_log_proto_pkt_info(skb->data + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_DHCP, subtype, dir, + QDF_TRACE_DEFAULT_MSDU_ID, + QDF_TX_RX_STATUS_INVALID); + + if (dp_dhcp_trace) { + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = true; + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_DHCP_PACKET_RECORD, + vdev_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_DHCP, subtype, + dir, pdev_id, true)); + + switch (subtype) { + case QDF_PROTO_DHCP_DISCOVER: + g_qdf_dp_trace_data.dhcp_disc++; + break; + case QDF_PROTO_DHCP_OFFER: + g_qdf_dp_trace_data.dhcp_off++; + break; + case QDF_PROTO_DHCP_REQUEST: + g_qdf_dp_trace_data.dhcp_req++; + break; + case QDF_PROTO_DHCP_ACK: + g_qdf_dp_trace_data.dhcp_ack++; + break; + case QDF_PROTO_DHCP_NACK: + g_qdf_dp_trace_data.dhcp_nack++; + break; + default: + g_qdf_dp_trace_data.eapol_others++; + break; + } + } + + return true; +} + +/** + * qdf_log_arp_pkt() - log ARP packet + * @vdev_id: ID of the vdev + * @skb: skb pointer + * @dir: direction + * @pdev_id: ID of the pdev + * + * Return: true/false + */ +static bool qdf_log_arp_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype proto_subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_ARP) && + ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_ARP == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_ipv4_arp_pkt(skb) == true))) { + + proto_subtype = qdf_nbuf_get_arp_subtype(skb); + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = true; + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_ARP_PACKET_RECORD, + vdev_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_ARP, + proto_subtype, dir, pdev_id, + true)); + + if (QDF_PROTO_ARP_REQ == proto_subtype) + g_qdf_dp_trace_data.arp_req++; + else + g_qdf_dp_trace_data.arp_resp++; + + return true; + } + return false; +} + + +bool qdf_dp_trace_log_pkt(uint8_t vdev_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + if (!qdf_dp_get_proto_bitmap() && !qdf_dp_get_proto_event_bitmap()) + return false; + if (qdf_log_arp_pkt(vdev_id, skb, dir, pdev_id)) + return true; + if (qdf_log_dhcp_pkt(vdev_id, skb, dir, pdev_id)) + return true; + if (qdf_log_eapol_pkt(vdev_id, skb, dir, pdev_id)) + return true; + if (qdf_log_icmp_pkt(vdev_id, skb, dir, pdev_id)) + return true; + if (qdf_log_icmpv6_pkt(vdev_id, skb, dir, pdev_id)) + return true; + return false; +} +qdf_export_symbol(qdf_dp_trace_log_pkt); + +void qdf_dp_display_mgmt_pkt(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_mgmt_buf *buf = + (struct qdf_dp_trace_mgmt_buf *)record->data; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + DPTRACE_PRINT("%s [%d] [%s %s]", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} +qdf_export_symbol(qdf_dp_display_mgmt_pkt); + + +void qdf_dp_trace_mgmt_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype) +{ + struct qdf_dp_trace_mgmt_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_mgmt_buf); + + if (qdf_dp_enable_check(NULL, code, QDF_NA) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + buf.type = type; + buf.subtype = subtype; + buf.vdev_id = vdev_id; + qdf_dp_add_record(code, pdev_id, (uint8_t *)&buf, buf_size, + NULL, 0, true); +} +qdf_export_symbol(qdf_dp_trace_mgmt_pkt); + +static void +qdf_dpt_display_credit_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_credit_record *buf = + (struct qdf_dp_trace_credit_record *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + if (buf->operation == QDF_OP_NA) + qdf_debugfs_printf(file, "%s [%s] [T: %d G0: %d G1: %d]\n", + prepend_str, + qdf_dp_credit_source_to_str(buf->source), + buf->total_credits, buf->g0_credit, + buf->g1_credit); + else + qdf_debugfs_printf(file, + "%s [%s] [T: %d G0: %d G1: %d] [%s %d]\n", + prepend_str, + qdf_dp_credit_source_to_str(buf->source), + buf->total_credits, buf->g0_credit, + buf->g1_credit, + qdf_dp_operation_to_str(buf->operation), + buf->delta); +} + +void qdf_dp_display_credit_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_credit_record *buf = + (struct qdf_dp_trace_credit_record *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + if (buf->operation == QDF_OP_NA) + DPTRACE_PRINT("%s [%s] [T: %d G0: %d G1: %d]", + prepend_str, + qdf_dp_credit_source_to_str(buf->source), + buf->total_credits, buf->g0_credit, + buf->g1_credit); + else + DPTRACE_PRINT("%s [%s] [T: %d G0: %d G1: %d] [%s %d]", + prepend_str, + qdf_dp_credit_source_to_str(buf->source), + buf->total_credits, buf->g0_credit, + buf->g1_credit, + qdf_dp_operation_to_str(buf->operation), + buf->delta); +} + +void qdf_dp_trace_credit_record(enum QDF_CREDIT_UPDATE_SOURCE source, + enum QDF_CREDIT_OPERATION operation, + int delta, int total_credits, + int g0_credit, int g1_credit) +{ + struct qdf_dp_trace_credit_record buf; + int buf_size = sizeof(struct qdf_dp_trace_credit_record); + enum QDF_DP_TRACE_ID code = QDF_DP_TRACE_TX_CREDIT_RECORD; + + if (qdf_dp_enable_check(NULL, code, QDF_NA) == false) + return; + + if (!(qdf_dp_get_proto_bitmap() & QDF_HL_CREDIT_TRACKING)) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + buf.source = source; + buf.operation = operation; + buf.delta = delta; + buf.total_credits = total_credits; + buf.g0_credit = g0_credit; + buf.g1_credit = g1_credit; + + qdf_dp_add_record(code, QDF_TRACE_DEFAULT_PDEV_ID, (uint8_t *)&buf, + buf_size, NULL, 0, false); +} +qdf_export_symbol(qdf_dp_trace_credit_record); + +void qdf_dp_display_event_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_event_buf *buf = + (struct qdf_dp_trace_event_buf *)record->data; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + DPTRACE_PRINT("%s [%d] [%s %s]", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} +qdf_export_symbol(qdf_dp_display_event_record); + +/** + * qdf_dp_trace_record_event() - record events + * @code: dptrace code + * @vdev_id: vdev id + * @pdev_id: pdev_id + * @type: proto type + * @subtype: proto subtype + * + * Return: none + */ +void qdf_dp_trace_record_event(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype) +{ + struct qdf_dp_trace_event_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_event_buf); + + if (qdf_dp_enable_check(NULL, code, QDF_NA) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + buf.type = type; + buf.subtype = subtype; + buf.vdev_id = vdev_id; + qdf_dp_add_record(code, pdev_id, + (uint8_t *)&buf, buf_size, NULL, 0, true); +} +qdf_export_symbol(qdf_dp_trace_record_event); + + +void qdf_dp_display_proto_pkt(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_proto_buf *buf = + (struct qdf_dp_trace_proto_buf *)record->data; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + DPTRACE_PRINT("%s [%d] [%s] SA: " + QDF_MAC_ADDR_FMT " %s DA: " + QDF_MAC_ADDR_FMT, + prepend_str, + buf->vdev_id, + qdf_dp_subtype_to_str(buf->subtype), + QDF_MAC_ADDR_REF(buf->sa.bytes), + qdf_dp_dir_to_str(buf->dir), + QDF_MAC_ADDR_REF(buf->da.bytes)); +} +qdf_export_symbol(qdf_dp_display_proto_pkt); + +void qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t *sa, uint8_t *da, enum qdf_proto_type type, + enum qdf_proto_subtype subtype, enum qdf_proto_dir dir, + uint8_t pdev_id, bool print) +{ + struct qdf_dp_trace_proto_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_ptr_buf); + + if (qdf_dp_enable_check(NULL, code, dir) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + memcpy(&buf.sa, sa, QDF_NET_ETH_LEN); + memcpy(&buf.da, da, QDF_NET_ETH_LEN); + buf.dir = dir; + buf.type = type; + buf.subtype = subtype; + buf.vdev_id = vdev_id; + qdf_dp_add_record(code, pdev_id, + (uint8_t *)&buf, buf_size, NULL, 0, print); +} +qdf_export_symbol(qdf_dp_trace_proto_pkt); + +void qdf_dp_display_ptr_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_ptr_buf *buf = + (struct qdf_dp_trace_ptr_buf *)record->data; + bool is_free_pkt_ptr_record = false; + + if ((record->code == QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) || + (record->code == QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD)) + is_free_pkt_ptr_record = true; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + if (loc < sizeof(prepend_str)) + scnprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[msdu id %d %s %d]", + buf->msdu_id, + is_free_pkt_ptr_record ? "status" : "vdev_id", + buf->status); + + if (info & QDF_DP_TRACE_RECORD_INFO_LIVE) { + /* In live mode donot dump the contents of the cookie */ + DPTRACE_PRINT("%s", prepend_str); + } else { + dump_dp_hex_trace(prepend_str, (uint8_t *)&buf->cookie, + sizeof(buf->cookie)); + } +} +qdf_export_symbol(qdf_dp_display_ptr_record); + +static +enum qdf_proto_type qdf_dp_get_pkt_proto_type(qdf_nbuf_t nbuf) +{ + uint8_t pkt_type; + + if (!nbuf) + return QDF_PROTO_TYPE_MAX; + + if (qdf_nbuf_data_is_dns_query(nbuf) || + qdf_nbuf_data_is_dns_response(nbuf)) + return QDF_PROTO_TYPE_DNS; + + pkt_type = QDF_NBUF_CB_GET_PACKET_TYPE(nbuf); + + switch (pkt_type) { + case QDF_NBUF_CB_PACKET_TYPE_EAPOL: + return QDF_PROTO_TYPE_EAPOL; + case QDF_NBUF_CB_PACKET_TYPE_ARP: + return QDF_PROTO_TYPE_ARP; + case QDF_NBUF_CB_PACKET_TYPE_DHCP: + return QDF_PROTO_TYPE_DHCP; + default: + return QDF_PROTO_TYPE_MAX; + } +} + +static +enum qdf_proto_subtype qdf_dp_get_pkt_subtype(qdf_nbuf_t nbuf, + enum qdf_proto_type pkt_type) +{ + switch (pkt_type) { + case QDF_PROTO_TYPE_EAPOL: + return qdf_nbuf_get_eapol_subtype(nbuf); + case QDF_PROTO_TYPE_ARP: + return qdf_nbuf_get_arp_subtype(nbuf); + case QDF_PROTO_TYPE_DHCP: + return qdf_nbuf_get_dhcp_subtype(nbuf); + case QDF_PROTO_TYPE_DNS: + return (qdf_nbuf_data_is_dns_query(nbuf)) ? + QDF_PROTO_DNS_QUERY : QDF_PROTO_DNS_RES; + default: + return QDF_PROTO_INVALID; + } +} + +static +bool qdf_dp_proto_log_enable_check(enum qdf_proto_type pkt_type, + uint16_t status) +{ + if (pkt_type == QDF_PROTO_TYPE_MAX) + return false; + + switch (pkt_type) { + case QDF_PROTO_TYPE_EAPOL: + return qdf_dp_get_proto_event_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_EAPOL; + case QDF_PROTO_TYPE_DHCP: + return qdf_dp_get_proto_event_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_DHCP; + case QDF_PROTO_TYPE_ARP: + if (status == QDF_TX_RX_STATUS_OK) + return false; + else + return qdf_dp_get_proto_event_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_ARP; + case QDF_PROTO_TYPE_DNS: + if (status == QDF_TX_RX_STATUS_OK) + return false; + else + return qdf_dp_get_proto_event_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_DNS; + default: + return false; + } +} + +void qdf_dp_track_noack_check(qdf_nbuf_t nbuf, enum qdf_proto_subtype *subtype) +{ + enum qdf_proto_type pkt_type = qdf_dp_get_pkt_proto_type(nbuf); + uint16_t dp_track = 0; + + switch (pkt_type) { + case QDF_PROTO_TYPE_EAPOL: + dp_track = qdf_dp_get_proto_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_EAPOL; + break; + case QDF_PROTO_TYPE_DHCP: + dp_track = qdf_dp_get_proto_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_DHCP; + break; + case QDF_PROTO_TYPE_ARP: + dp_track = qdf_dp_get_proto_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_ARP; + break; + case QDF_PROTO_TYPE_DNS: + dp_track = qdf_dp_get_proto_bitmap() & + QDF_NBUF_PKT_TRAC_TYPE_DNS; + break; + default: + break; + } + + if (!dp_track) { + *subtype = QDF_PROTO_INVALID; + return; + } + + *subtype = qdf_dp_get_pkt_subtype(nbuf, pkt_type); +} +qdf_export_symbol(qdf_dp_track_noack_check); + +/** + * qdf_dp_trace_ptr() - record dptrace + * @code: dptrace code + * @pdev_id: pdev_id + * @data: data + * @size: size of data + * @msdu_id: msdu_id + * @status: return status + * + * Return: none + */ +void qdf_dp_trace_ptr(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, + uint8_t pdev_id, uint8_t *data, uint8_t size, + uint16_t msdu_id, uint16_t status) +{ + struct qdf_dp_trace_ptr_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_ptr_buf); + enum qdf_proto_type pkt_type; + + pkt_type = qdf_dp_get_pkt_proto_type(nbuf); + if ((code == QDF_DP_TRACE_FREE_PACKET_PTR_RECORD || + code == QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD) && + qdf_dp_proto_log_enable_check(pkt_type, status + 1)) + qdf_dp_log_proto_pkt_info(nbuf->data + QDF_NBUF_SRC_MAC_OFFSET, + nbuf->data + QDF_NBUF_DEST_MAC_OFFSET, + pkt_type, + qdf_dp_get_pkt_subtype(nbuf, pkt_type), + QDF_TX, msdu_id, status + 1); + + if (qdf_dp_enable_check(nbuf, code, QDF_TX) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + qdf_mem_copy(&buf.cookie, data, size); + buf.msdu_id = msdu_id; + buf.status = status; + qdf_dp_add_record(code, pdev_id, (uint8_t *)&buf, buf_size, NULL, 0, + QDF_NBUF_CB_DP_TRACE_PRINT(nbuf)); +} +qdf_export_symbol(qdf_dp_trace_ptr); + +void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, uint8_t pdev_id, + enum QDF_DP_TRACE_ID code, uint16_t msdu_id, + enum qdf_proto_dir dir) +{ + struct qdf_dp_trace_data_buf buf; + enum qdf_proto_type pkt_type; + + pkt_type = qdf_dp_get_pkt_proto_type(nbuf); + if (code == QDF_DP_TRACE_DROP_PACKET_RECORD && + qdf_dp_proto_log_enable_check(pkt_type, QDF_TX_RX_STATUS_DROP)) + qdf_dp_log_proto_pkt_info(nbuf->data + QDF_NBUF_SRC_MAC_OFFSET, + nbuf->data + QDF_NBUF_DEST_MAC_OFFSET, + pkt_type, + qdf_dp_get_pkt_subtype(nbuf, pkt_type), + QDF_TX, msdu_id, + QDF_TX_RX_STATUS_DROP); + + buf.msdu_id = msdu_id; + if (!qdf_dp_enable_check(nbuf, code, dir)) + return; + + qdf_dp_add_record(code, pdev_id, + nbuf ? qdf_nbuf_data(nbuf) : NULL, + nbuf ? nbuf->len - nbuf->data_len : 0, + (uint8_t *)&buf, sizeof(struct qdf_dp_trace_data_buf), + (nbuf) ? QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) : false); +} + +qdf_export_symbol(qdf_dp_trace_data_pkt); + +void qdf_dp_display_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + + if (!(pdev_id == QDF_TRACE_DEFAULT_PDEV_ID || + pdev_id == record->pdev_id)) + return; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + switch (record->code) { + case QDF_DP_TRACE_HDD_TX_TIMEOUT: + DPTRACE_PRINT(" %s: HDD TX Timeout", prepend_str); + break; + case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: + DPTRACE_PRINT(" %s: HDD SoftAP TX Timeout", prepend_str); + break; + case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD: + DPTRACE_PRINT(" %s: CE Fast Packet Error", prepend_str); + break; + case QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD: + default: + dump_dp_hex_trace(prepend_str, record->data, record->size); + break; + }; +} +qdf_export_symbol(qdf_dp_display_record); + +void +qdf_dp_display_data_pkt_record(struct qdf_dp_trace_record_s *record, + uint16_t rec_index, uint8_t pdev_id, + uint8_t info) +{ + int loc; + char prepend_str[DP_TRACE_META_DATA_STRLEN + 10]; + struct qdf_dp_trace_data_buf *buf = + (struct qdf_dp_trace_data_buf *)record->data; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + rec_index, info, record); + if (loc < sizeof(prepend_str)) + loc += snprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[%d]", buf->msdu_id); + dump_dp_hex_trace(prepend_str, + &record->data[sizeof(struct qdf_dp_trace_data_buf)], + record->size); +} + +/** + * qdf_dp_trace() - Stores the data in buffer + * @nbuf : defines the netbuf + * @code : defines the event + * @pdev_id: pdev_id + * @data : defines the data to be stored + * @size : defines the size of the data record + * + * Return: None + */ +void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, uint8_t pdev_id, + uint8_t *data, uint8_t size, enum qdf_proto_dir dir) +{ + + if (qdf_dp_enable_check(nbuf, code, dir) == false) + return; + + qdf_dp_add_record(code, pdev_id, nbuf ? qdf_nbuf_data(nbuf) : NULL, + size, NULL, 0, + (nbuf) ? QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) : false); +} +qdf_export_symbol(qdf_dp_trace); + +/** + * qdf_dp_trace_spin_lock_init() - initializes the lock variable before use + * This function will be called from cds_alloc_global_context, we will have lock + * available to use ASAP + * + * Return: None + */ +void qdf_dp_trace_spin_lock_init(void) +{ + spin_lock_init(&l_dp_trace_lock); +} +qdf_export_symbol(qdf_dp_trace_spin_lock_init); + +/** + * qdf_dp_trace_disable_live_mode - disable live mode for dptrace + * + * Return: none + */ +void qdf_dp_trace_disable_live_mode(void) +{ + g_qdf_dp_trace_data.force_live_mode = 0; +} +qdf_export_symbol(qdf_dp_trace_disable_live_mode); + +/** + * qdf_dp_trace_enable_live_mode() - enable live mode for dptrace + * + * Return: none + */ +void qdf_dp_trace_enable_live_mode(void) +{ + g_qdf_dp_trace_data.force_live_mode = 1; +} +qdf_export_symbol(qdf_dp_trace_enable_live_mode); + +/** + * qdf_dp_trace_clear_buffer() - clear dp trace buffer + * + * Return: none + */ +void qdf_dp_trace_clear_buffer(void) +{ + g_qdf_dp_trace_data.head = INVALID_QDF_DP_TRACE_ADDR; + g_qdf_dp_trace_data.tail = INVALID_QDF_DP_TRACE_ADDR; + g_qdf_dp_trace_data.num = 0; + g_qdf_dp_trace_data.dump_counter = 0; + g_qdf_dp_trace_data.num_records_to_dump = MAX_QDF_DP_TRACE_RECORDS; + if (g_qdf_dp_trace_data.enable) + memset(g_qdf_dp_trace_tbl, 0, + MAX_QDF_DP_TRACE_RECORDS * + sizeof(struct qdf_dp_trace_record_s)); +} +qdf_export_symbol(qdf_dp_trace_clear_buffer); + +void qdf_dp_trace_dump_stats(void) +{ + DPTRACE_PRINT("STATS |DPT: tx %u rx %u icmp(%u %u) arp(%u %u) icmpv6(%u %u %u %u %u %u) dhcp(%u %u %u %u %u %u) eapol(%u %u %u %u %u)", + g_qdf_dp_trace_data.tx_count, + g_qdf_dp_trace_data.rx_count, + g_qdf_dp_trace_data.icmp_req, + g_qdf_dp_trace_data.icmp_resp, + g_qdf_dp_trace_data.arp_req, + g_qdf_dp_trace_data.arp_resp, + g_qdf_dp_trace_data.icmpv6_req, + g_qdf_dp_trace_data.icmpv6_resp, + g_qdf_dp_trace_data.icmpv6_ns, + g_qdf_dp_trace_data.icmpv6_na, + g_qdf_dp_trace_data.icmpv6_rs, + g_qdf_dp_trace_data.icmpv6_ra, + g_qdf_dp_trace_data.dhcp_disc, + g_qdf_dp_trace_data.dhcp_off, + g_qdf_dp_trace_data.dhcp_req, + g_qdf_dp_trace_data.dhcp_ack, + g_qdf_dp_trace_data.dhcp_nack, + g_qdf_dp_trace_data.dhcp_others, + g_qdf_dp_trace_data.eapol_m1, + g_qdf_dp_trace_data.eapol_m2, + g_qdf_dp_trace_data.eapol_m3, + g_qdf_dp_trace_data.eapol_m4, + g_qdf_dp_trace_data.eapol_others); +} +qdf_export_symbol(qdf_dp_trace_dump_stats); + +/** + * qdf_dpt_dump_hex_trace_debugfs() - read data in file + * @file: file to read + * @str: string to prepend the hexdump with. + * @buf: buffer which contains data to be written + * @buf_len: defines the size of the data to be written + * + * Return: None + */ +static void qdf_dpt_dump_hex_trace_debugfs(qdf_debugfs_file_t file, + char *str, uint8_t *buf, uint8_t buf_len) +{ + unsigned char linebuf[BUFFER_SIZE]; + const u8 *ptr = buf; + int i, linelen, remaining = buf_len; + + /* Dump the bytes in the last line */ + for (i = 0; i < buf_len; i += ROW_SIZE) { + linelen = min(remaining, ROW_SIZE); + remaining -= ROW_SIZE; + + hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1, + linebuf, sizeof(linebuf), false); + + qdf_debugfs_printf(file, "%s %s\n", str, linebuf); + } +} + +/** + * qdf_dpt_display_proto_pkt_debugfs() - display proto packet + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_proto_pkt_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_proto_buf *buf = + (struct qdf_dp_trace_proto_buf *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + qdf_debugfs_printf(file, "%s [%d] [%s] SA: " + QDF_MAC_ADDR_FMT " %s DA: " + QDF_MAC_ADDR_FMT, + prepend_str, + buf->vdev_id, + qdf_dp_subtype_to_str(buf->subtype), + QDF_MAC_ADDR_REF(buf->sa.bytes), + qdf_dp_dir_to_str(buf->dir), + QDF_MAC_ADDR_REF(buf->da.bytes)); + qdf_debugfs_printf(file, "\n"); +} + +/** + * qdf_dpt_display_mgmt_pkt_debugfs() - display mgmt packet + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_mgmt_pkt_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_mgmt_buf *buf = + (struct qdf_dp_trace_mgmt_buf *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + + qdf_debugfs_printf(file, "%s [%d] [%s %s]\n", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} + +/** + * qdf_dpt_display_event_record_debugfs() - display event records + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_event_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_event_buf *buf = + (struct qdf_dp_trace_event_buf *)record->data; + + qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + qdf_debugfs_printf(file, "%s [%d] [%s %s]\n", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} + +/** + * qdf_dpt_display_ptr_record_debugfs() - display record ptr + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_ptr_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + int loc; + struct qdf_dp_trace_ptr_buf *buf = + (struct qdf_dp_trace_ptr_buf *)record->data; + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + + if (loc < sizeof(prepend_str)) + scnprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[msdu id %d %s %d]", + buf->msdu_id, + (record->code == + QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) ? + "status" : "vdev_id", + buf->status); + + qdf_dpt_dump_hex_trace_debugfs(file, prepend_str, + (uint8_t *)&buf->cookie, + sizeof(buf->cookie)); +} + +/** + * qdf_dpt_display_ptr_record_debugfs() - display record + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_data_buf *buf = + (struct qdf_dp_trace_data_buf *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + if (loc < sizeof(prepend_str)) + loc += snprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[%d]", buf->msdu_id); + qdf_dpt_dump_hex_trace_debugfs(file, prepend_str, + record->data, record->size); +} + +uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file, + enum qdf_dpt_debugfs_state state) +{ + uint32_t i = 0; + uint32_t tail; + uint32_t count = g_qdf_dp_trace_data.num; + + if (!g_qdf_dp_trace_data.enable) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: Tracing Disabled", __func__); + return QDF_STATUS_E_EMPTY; + } + + if (!count) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: no packets", __func__); + return QDF_STATUS_E_EMPTY; + } + + if (state == QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS) + return g_qdf_dp_trace_data.curr_pos; + + qdf_debugfs_printf(file, + "DPT: config - bitmap 0x%x verb %u #rec %u rec_requested %u live_config %u thresh %u time_limit %u\n", + g_qdf_dp_trace_data.proto_bitmap, + g_qdf_dp_trace_data.verbosity, + g_qdf_dp_trace_data.no_of_record, + g_qdf_dp_trace_data.num_records_to_dump, + g_qdf_dp_trace_data.live_mode_config, + g_qdf_dp_trace_data.high_tput_thresh, + g_qdf_dp_trace_data.thresh_time_limit); + + qdf_debugfs_printf(file, + "STATS |DPT: icmp(%u %u) arp(%u %u) icmpv6(%u %u %u %u %u %u) dhcp(%u %u %u %u %u %u) eapol(%u %u %u %u %u)\n", + g_qdf_dp_trace_data.icmp_req, + g_qdf_dp_trace_data.icmp_resp, + g_qdf_dp_trace_data.arp_req, + g_qdf_dp_trace_data.arp_resp, + g_qdf_dp_trace_data.icmpv6_req, + g_qdf_dp_trace_data.icmpv6_resp, + g_qdf_dp_trace_data.icmpv6_ns, + g_qdf_dp_trace_data.icmpv6_na, + g_qdf_dp_trace_data.icmpv6_rs, + g_qdf_dp_trace_data.icmpv6_ra, + g_qdf_dp_trace_data.dhcp_disc, + g_qdf_dp_trace_data.dhcp_off, + g_qdf_dp_trace_data.dhcp_req, + g_qdf_dp_trace_data.dhcp_ack, + g_qdf_dp_trace_data.dhcp_nack, + g_qdf_dp_trace_data.dhcp_others, + g_qdf_dp_trace_data.eapol_m1, + g_qdf_dp_trace_data.eapol_m2, + g_qdf_dp_trace_data.eapol_m3, + g_qdf_dp_trace_data.eapol_m4, + g_qdf_dp_trace_data.eapol_others); + + qdf_debugfs_printf(file, + "DPT: Total Records: %d, Head: %d, Tail: %d\n", + g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head, + g_qdf_dp_trace_data.tail); + + spin_lock_bh(&l_dp_trace_lock); + if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) { + i = g_qdf_dp_trace_data.head; + tail = g_qdf_dp_trace_data.tail; + + if (count > g_qdf_dp_trace_data.num) + count = g_qdf_dp_trace_data.num; + + if (tail >= (count - 1)) + i = tail - count + 1; + else if (count != MAX_QDF_DP_TRACE_RECORDS) + i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) - + tail); + g_qdf_dp_trace_data.curr_pos = 0; + g_qdf_dp_trace_data.saved_tail = tail; + } + spin_unlock_bh(&l_dp_trace_lock); + + return g_qdf_dp_trace_data.saved_tail; +} +qdf_export_symbol(qdf_dpt_get_curr_pos_debugfs); + +QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file, + uint32_t curr_pos) +{ + struct qdf_dp_trace_record_s p_record; + uint32_t i = curr_pos; + uint16_t num_records_to_dump = g_qdf_dp_trace_data.num_records_to_dump; + + if (!g_qdf_dp_trace_data.enable) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Tracing Disabled", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (num_records_to_dump > g_qdf_dp_trace_data.num) + num_records_to_dump = g_qdf_dp_trace_data.num; + + /* + * Max dp trace record size should always be less than + * QDF_DP_TRACE_PREPEND_STR_SIZE(100) + BUFFER_SIZE(121). + */ + if (WARN_ON(QDF_DP_TRACE_MAX_RECORD_SIZE < + QDF_DP_TRACE_PREPEND_STR_SIZE + BUFFER_SIZE)) + return QDF_STATUS_E_FAILURE; + + spin_lock_bh(&l_dp_trace_lock); + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + + for (;; ) { + /* + * Initially we get file as 1 page size, and + * if remaining size in file is less than one record max size, + * then return so that it gets an extra page. + */ + if ((file->size - file->count) < QDF_DP_TRACE_MAX_RECORD_SIZE) { + spin_lock_bh(&l_dp_trace_lock); + g_qdf_dp_trace_data.curr_pos = i; + spin_unlock_bh(&l_dp_trace_lock); + return QDF_STATUS_E_FAILURE; + } + + switch (p_record.code) { + case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: + case QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD: + case QDF_DP_TRACE_FREE_PACKET_PTR_RECORD: + qdf_dpt_display_ptr_record_debugfs(file, &p_record, i); + break; + + case QDF_DP_TRACE_EAPOL_PACKET_RECORD: + case QDF_DP_TRACE_DHCP_PACKET_RECORD: + case QDF_DP_TRACE_ARP_PACKET_RECORD: + case QDF_DP_TRACE_ICMP_PACKET_RECORD: + case QDF_DP_TRACE_ICMPv6_PACKET_RECORD: + qdf_dpt_display_proto_pkt_debugfs(file, &p_record, i); + break; + + case QDF_DP_TRACE_TX_CREDIT_RECORD: + qdf_dpt_display_credit_record_debugfs(file, &p_record, + i); + break; + + case QDF_DP_TRACE_MGMT_PACKET_RECORD: + qdf_dpt_display_mgmt_pkt_debugfs(file, &p_record, i); + break; + + case QDF_DP_TRACE_EVENT_RECORD: + qdf_dpt_display_event_record_debugfs(file, &p_record, + i); + break; + + case QDF_DP_TRACE_HDD_TX_TIMEOUT: + qdf_debugfs_printf( + file, "DPT: %04d: %llu %s\n", + i, p_record.time, + qdf_dp_code_to_string(p_record.code)); + qdf_debugfs_printf(file, "HDD TX Timeout\n"); + break; + + case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: + qdf_debugfs_printf( + file, "DPT: %04d: %llu %s\n", + i, p_record.time, + qdf_dp_code_to_string(p_record.code)); + qdf_debugfs_printf(file, "HDD SoftAP TX Timeout\n"); + break; + + case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD: + qdf_debugfs_printf( + file, "DPT: %04d: %llu %s\n", + i, p_record.time, + qdf_dp_code_to_string(p_record.code)); + qdf_debugfs_printf(file, "CE Fast Packet Error\n"); + break; + + case QDF_DP_TRACE_MAX: + qdf_debugfs_printf(file, + "%s: QDF_DP_TRACE_MAX event should not be generated\n", + __func__); + break; + + case QDF_DP_TRACE_HDD_TX_PACKET_RECORD: + case QDF_DP_TRACE_HDD_RX_PACKET_RECORD: + case QDF_DP_TRACE_TX_PACKET_RECORD: + case QDF_DP_TRACE_RX_PACKET_RECORD: + case QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD: + case QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD: + + default: + qdf_dpt_display_record_debugfs(file, &p_record, i); + break; + } + + if (++g_qdf_dp_trace_data.dump_counter == num_records_to_dump) + break; + + spin_lock_bh(&l_dp_trace_lock); + if (i == 0) + i = MAX_QDF_DP_TRACE_RECORDS; + + i -= 1; + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + } + + g_qdf_dp_trace_data.dump_counter = 0; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_dpt_dump_stats_debugfs); + +/** + * qdf_dpt_set_value_debugfs() - Configure the value to control DP trace + * @proto_bitmap: defines the protocol to be tracked + * @no_of_records: defines the nth packet which is traced + * @verbosity: defines the verbosity level + * + * Return: None + */ +void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity, uint16_t num_records_to_dump) +{ + if (g_qdf_dp_trace_data.enable) { + g_qdf_dp_trace_data.proto_bitmap = proto_bitmap; + g_qdf_dp_trace_data.no_of_record = no_of_record; + g_qdf_dp_trace_data.verbosity = verbosity; + g_qdf_dp_trace_data.num_records_to_dump = num_records_to_dump; + } +} +qdf_export_symbol(qdf_dpt_set_value_debugfs); + + +/** + * qdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions + * registered with QDF + * @count: Number of lines to dump starting from tail to head + * @pdev_id: pdev_id + * + * Return: None + */ +void qdf_dp_trace_dump_all(uint32_t count, uint8_t pdev_id) +{ + struct qdf_dp_trace_record_s p_record; + int32_t i, tail; + + if (!g_qdf_dp_trace_data.enable) { + DPTRACE_PRINT("Tracing Disabled"); + return; + } + + DPTRACE_PRINT( + "DPT: config - bitmap 0x%x verb %u #rec %u live_config %u thresh %u time_limit %u", + g_qdf_dp_trace_data.proto_bitmap, + g_qdf_dp_trace_data.verbosity, + g_qdf_dp_trace_data.no_of_record, + g_qdf_dp_trace_data.live_mode_config, + g_qdf_dp_trace_data.high_tput_thresh, + g_qdf_dp_trace_data.thresh_time_limit); + + qdf_dp_trace_dump_stats(); + + DPTRACE_PRINT("DPT: Total Records: %d, Head: %d, Tail: %d", + g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head, + g_qdf_dp_trace_data.tail); + + /* aquire the lock so that only one thread at a time can read + * the ring buffer + */ + spin_lock_bh(&l_dp_trace_lock); + + if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) { + i = g_qdf_dp_trace_data.head; + tail = g_qdf_dp_trace_data.tail; + + if (count) { + if (count > g_qdf_dp_trace_data.num) + count = g_qdf_dp_trace_data.num; + if (tail >= (count - 1)) + i = tail - count + 1; + else if (count != MAX_QDF_DP_TRACE_RECORDS) + i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) - + tail); + } + + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + for (;; ) { + qdf_dp_trace_cb_table[p_record.code](&p_record, + (uint16_t)i, pdev_id, false); + if (i == tail) + break; + i += 1; + + spin_lock_bh(&l_dp_trace_lock); + if (MAX_QDF_DP_TRACE_RECORDS == i) + i = 0; + + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + } + } else { + spin_unlock_bh(&l_dp_trace_lock); + } +} +qdf_export_symbol(qdf_dp_trace_dump_all); + +/** + * qdf_dp_trace_throttle_live_mode() - Throttle DP Trace live mode + * @high_bw_request: whether this is a high BW req or not + * + * The function tries to prevent excessive logging into the live buffer by + * having an upper limit on number of packets that can be logged per second. + * + * The intention is to allow occasional pings and data packets and really low + * throughput levels while suppressing bursts and higher throughput levels so + * that we donot hog the live buffer. + * + * If the number of packets printed in a particular second exceeds the thresh, + * disable printing in the next second. + * + * Return: None + */ +void qdf_dp_trace_throttle_live_mode(bool high_bw_request) +{ + static int bw_interval_counter; + + if (g_qdf_dp_trace_data.enable == false || + g_qdf_dp_trace_data.live_mode_config == false) + return; + + if (high_bw_request) { + g_qdf_dp_trace_data.live_mode = 0; + bw_interval_counter = 0; + return; + } + + bw_interval_counter++; + + if (0 == (bw_interval_counter % + g_qdf_dp_trace_data.thresh_time_limit)) { + + spin_lock_bh(&l_dp_trace_lock); + if (g_qdf_dp_trace_data.print_pkt_cnt <= + g_qdf_dp_trace_data.high_tput_thresh) + g_qdf_dp_trace_data.live_mode = 1; + + g_qdf_dp_trace_data.print_pkt_cnt = 0; + spin_unlock_bh(&l_dp_trace_lock); + } +} +qdf_export_symbol(qdf_dp_trace_throttle_live_mode); + +void qdf_dp_trace_apply_tput_policy(bool is_data_traffic) +{ + if (g_qdf_dp_trace_data.dynamic_verbosity_modify) { + goto check_live_mode; + return; + } + + if (is_data_traffic) { + g_qdf_dp_trace_data.verbosity = + QDF_DP_TRACE_VERBOSITY_ULTRA_LOW; + } else { + g_qdf_dp_trace_data.verbosity = + g_qdf_dp_trace_data.ini_conf_verbosity; + } +check_live_mode: + qdf_dp_trace_throttle_live_mode(is_data_traffic); +} +#endif + +struct qdf_print_ctrl print_ctrl_obj[MAX_PRINT_CONFIG_SUPPORTED]; + +struct category_name_info g_qdf_category_name[MAX_SUPPORTED_CATEGORY] = { + [QDF_MODULE_ID_TDLS] = {"tdls"}, + [QDF_MODULE_ID_ACS] = {"ACS"}, + [QDF_MODULE_ID_SCAN_SM] = {"scan state machine"}, + [QDF_MODULE_ID_SCANENTRY] = {"scan entry"}, + [QDF_MODULE_ID_WDS] = {"WDS"}, + [QDF_MODULE_ID_ACTION] = {"action"}, + [QDF_MODULE_ID_ROAM] = {"STA roaming"}, + [QDF_MODULE_ID_INACT] = {"inactivity"}, + [QDF_MODULE_ID_DOTH] = {"11h"}, + [QDF_MODULE_ID_IQUE] = {"IQUE"}, + [QDF_MODULE_ID_WME] = {"WME"}, + [QDF_MODULE_ID_ACL] = {"ACL"}, + [QDF_MODULE_ID_WPA] = {"WPA/RSN"}, + [QDF_MODULE_ID_RADKEYS] = {"dump 802.1x keys"}, + [QDF_MODULE_ID_RADDUMP] = {"dump radius packet"}, + [QDF_MODULE_ID_RADIUS] = {"802.1x radius client"}, + [QDF_MODULE_ID_DOT1XSM] = {"802.1x state machine"}, + [QDF_MODULE_ID_DOT1X] = {"802.1x authenticator"}, + [QDF_MODULE_ID_POWER] = {"power save"}, + [QDF_MODULE_ID_STATE] = {"state"}, + [QDF_MODULE_ID_OUTPUT] = {"output"}, + [QDF_MODULE_ID_SCAN] = {"scan"}, + [QDF_MODULE_ID_AUTH] = {"authentication"}, + [QDF_MODULE_ID_ASSOC] = {"association"}, + [QDF_MODULE_ID_NODE] = {"node"}, + [QDF_MODULE_ID_ELEMID] = {"element ID"}, + [QDF_MODULE_ID_XRATE] = {"rate"}, + [QDF_MODULE_ID_INPUT] = {"input"}, + [QDF_MODULE_ID_CRYPTO] = {"crypto"}, + [QDF_MODULE_ID_DUMPPKTS] = {"dump packet"}, + [QDF_MODULE_ID_DEBUG] = {"debug"}, + [QDF_MODULE_ID_MLME] = {"mlme"}, + [QDF_MODULE_ID_RRM] = {"rrm"}, + [QDF_MODULE_ID_WNM] = {"wnm"}, + [QDF_MODULE_ID_P2P_PROT] = {"p2p_prot"}, + [QDF_MODULE_ID_PROXYARP] = {"proxyarp"}, + [QDF_MODULE_ID_L2TIF] = {"l2tif"}, + [QDF_MODULE_ID_WIFIPOS] = {"wifipos"}, + [QDF_MODULE_ID_WRAP] = {"wrap"}, + [QDF_MODULE_ID_DFS] = {"dfs"}, + [QDF_MODULE_ID_ATF] = {"atf"}, + [QDF_MODULE_ID_SPLITMAC] = {"splitmac"}, + [QDF_MODULE_ID_IOCTL] = {"ioctl"}, + [QDF_MODULE_ID_NAC] = {"nac"}, + [QDF_MODULE_ID_MESH] = {"mesh"}, + [QDF_MODULE_ID_MBO] = {"mbo"}, + [QDF_MODULE_ID_EXTIOCTL_CHANSWITCH] = {"extchanswitch"}, + [QDF_MODULE_ID_EXTIOCTL_CHANSSCAN] = {"extchanscan"}, + [QDF_MODULE_ID_TLSHIM] = {"tlshim"}, + [QDF_MODULE_ID_WMI] = {"WMI"}, + [QDF_MODULE_ID_HTT] = {"HTT"}, + [QDF_MODULE_ID_HDD] = {"HDD"}, + [QDF_MODULE_ID_SME] = {"SME"}, + [QDF_MODULE_ID_PE] = {"PE"}, + [QDF_MODULE_ID_WMA] = {"WMA"}, + [QDF_MODULE_ID_SYS] = {"SYS"}, + [QDF_MODULE_ID_QDF] = {"QDF"}, + [QDF_MODULE_ID_SAP] = {"SAP"}, + [QDF_MODULE_ID_HDD_SOFTAP] = {"HDD_SAP"}, + [QDF_MODULE_ID_HDD_DATA] = {"DATA"}, + [QDF_MODULE_ID_HDD_SAP_DATA] = {"SAP_DATA"}, + [QDF_MODULE_ID_HIF] = {"HIF"}, + [QDF_MODULE_ID_HTC] = {"HTC"}, + [QDF_MODULE_ID_TXRX] = {"TXRX"}, + [QDF_MODULE_ID_QDF_DEVICE] = {"QDF_DEV"}, + [QDF_MODULE_ID_CFG] = {"CFG"}, + [QDF_MODULE_ID_BMI] = {"BMI"}, + [QDF_MODULE_ID_EPPING] = {"EPPING"}, + [QDF_MODULE_ID_QVIT] = {"QVIT"}, + [QDF_MODULE_ID_DP] = {"DP"}, + [QDF_MODULE_ID_HAL] = {"HAL"}, + [QDF_MODULE_ID_SOC] = {"SOC"}, + [QDF_MODULE_ID_OS_IF] = {"OSIF"}, + [QDF_MODULE_ID_TARGET_IF] = {"TIF"}, + [QDF_MODULE_ID_SCHEDULER] = {"SCH"}, + [QDF_MODULE_ID_MGMT_TXRX] = {"MGMT_TXRX"}, + [QDF_MODULE_ID_PMO] = {"PMO"}, + [QDF_MODULE_ID_POLICY_MGR] = {"POLICY_MGR"}, + [QDF_MODULE_ID_SA_API] = {"SA_API"}, + [QDF_MODULE_ID_NAN] = {"NAN"}, + [QDF_MODULE_ID_SPECTRAL] = {"SPECTRAL"}, + [QDF_MODULE_ID_P2P] = {"P2P"}, + [QDF_MODULE_ID_OFFCHAN_TXRX] = {"OFFCHAN"}, + [QDF_MODULE_ID_REGULATORY] = {"REGULATORY"}, + [QDF_MODULE_ID_OBJ_MGR] = {"OBJMGR"}, + [QDF_MODULE_ID_SERIALIZATION] = {"SER"}, + [QDF_MODULE_ID_NSS] = {"NSS"}, + [QDF_MODULE_ID_ROAM_DEBUG] = {"roam debug"}, + [QDF_MODULE_ID_DIRECT_BUF_RX] = {"DIRECT_BUF_RX"}, + [QDF_MODULE_ID_DISA] = {"disa"}, + [QDF_MODULE_ID_GREEN_AP] = {"GREEN_AP"}, + [QDF_MODULE_ID_EXTAP] = {"EXTAP"}, + [QDF_MODULE_ID_FD] = {"FILS discovery"}, + [QDF_MODULE_ID_FTM] = {"FTM"}, + [QDF_MODULE_ID_OCB] = {"OCB"}, + [QDF_MODULE_ID_CONFIG] = {"CONFIG"}, + [QDF_MODULE_ID_IPA] = {"IPA"}, + [QDF_MODULE_ID_CP_STATS] = {"CP_STATS"}, + [QDF_MODULE_ID_ACTION_OUI] = {"action_oui"}, + [QDF_MODULE_ID_TARGET] = {"TARGET"}, + [QDF_MODULE_ID_MBSSIE] = {"MBSSIE"}, + [QDF_MODULE_ID_FWOL] = {"fwol"}, + [QDF_MODULE_ID_SM_ENGINE] = {"SM_ENG"}, + [QDF_MODULE_ID_CMN_MLME] = {"CMN_MLME"}, + [QDF_MODULE_ID_BSSCOLOR] = {"BSSCOLOR"}, + [QDF_MODULE_ID_CFR] = {"CFR"}, + [QDF_MODULE_ID_TX_CAPTURE] = {"TX_CAPTURE_ENHANCE"}, + [QDF_MODULE_ID_INTEROP_ISSUES_AP] = {"INTEROP_ISSUES_AP"}, + [QDF_MODULE_ID_BLACKLIST_MGR] = {"blm"}, + [QDF_MODULE_ID_QLD] = {"QLD"}, + [QDF_MODULE_ID_DYNAMIC_MODE_CHG] = {"Dynamic Mode Change"}, + [QDF_MODULE_ID_COEX] = {"COEX"}, + [QDF_MODULE_ID_MON_FILTER] = {"Monitor Filter"}, + [QDF_MODULE_ID_ANY] = {"ANY"}, + [QDF_MODULE_ID_PKT_CAPTURE] = {"pkt_capture"}, + [QDF_MODULE_ID_GPIO] = {"GPIO_CFG"}, +}; +qdf_export_symbol(g_qdf_category_name); + +/** + * qdf_trace_display() - Display trace + * + * Return: None + */ +void qdf_trace_display(void) +{ + QDF_MODULE_ID module_id; + + pr_err(" 1)FATAL 2)ERROR 3)WARN 4)INFO 5)INFO_H 6)INFO_M 7)INFO_L 8)DEBUG\n"); + for (module_id = 0; module_id < QDF_MODULE_ID_MAX; ++module_id) { + pr_err("%2d)%s %s %s %s %s %s %s %s %s\n", + (int)module_id, + g_qdf_category_name[module_id].category_name_str, + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_FATAL) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_ERROR) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_WARN) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO_HIGH) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO_MED) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO_LOW) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_DEBUG) ? "X" : " "); + } +} +qdf_export_symbol(qdf_trace_display); + +#ifdef QDF_TRACE_PRINT_ENABLE +static inline void print_to_console(char *str_buffer) +{ + pr_err("%s\n", str_buffer); +} +#else + +#define print_to_console(str) +#endif + +#ifdef MULTI_IF_NAME +static const char *qdf_trace_wlan_modname(void) +{ + return MULTI_IF_NAME; +} +#else +static const char *qdf_trace_wlan_modname(void) +{ + return "wlan"; +} +#endif + +void qdf_trace_msg_cmn(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + const char *str_format, va_list val) +{ + char str_buffer[QDF_TRACE_BUFFER_SIZE]; + int n; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_info("%s: Invalid category: %d\n", __func__, category); + return; + } + + /* Check if verbose mask is valid */ + if (verbose < 0 || verbose >= QDF_TRACE_LEVEL_MAX) { + pr_info("%s: Invalid verbose level %d\n", __func__, verbose); + return; + } + + /* + * Print the trace message when the desired verbose level is set in + * the desired category for the print control object + */ + if (print_ctrl_obj[idx].cat_info[category].category_verbose_mask & + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose)) { + static const char * const VERBOSE_STR[] = { + [QDF_TRACE_LEVEL_NONE] = "", + [QDF_TRACE_LEVEL_FATAL] = "F", + [QDF_TRACE_LEVEL_ERROR] = "E", + [QDF_TRACE_LEVEL_WARN] = "W", + [QDF_TRACE_LEVEL_INFO] = "I", + [QDF_TRACE_LEVEL_INFO_HIGH] = "IH", + [QDF_TRACE_LEVEL_INFO_MED] = "IM", + [QDF_TRACE_LEVEL_INFO_LOW] = "IL", + [QDF_TRACE_LEVEL_DEBUG] = "D", + [QDF_TRACE_LEVEL_TRACE] = "T", + [QDF_TRACE_LEVEL_ALL] = "" }; + + /* print the prefix string into the string buffer... */ + n = scnprintf(str_buffer, QDF_TRACE_BUFFER_SIZE, + "%s: [%d:%s:%s] ", qdf_trace_wlan_modname(), + in_interrupt() ? 0 : current->pid, + VERBOSE_STR[verbose], + g_qdf_category_name[category].category_name_str); + + /* print the formatted log message after the prefix string */ + vscnprintf(str_buffer + n, QDF_TRACE_BUFFER_SIZE - n, + str_format, val); +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) + wlan_log_to_user(verbose, (char *)str_buffer, + strlen(str_buffer)); + if (qdf_likely(qdf_log_dump_at_kernel_enable)) + print_to_console(str_buffer); +#else + pr_err("%s\n", str_buffer); +#endif + } +} +qdf_export_symbol(qdf_trace_msg_cmn); + +QDF_STATUS qdf_print_setup(void) +{ + int i; + + /* Loop through all print ctrl objects */ + for (i = 0; i < MAX_PRINT_CONFIG_SUPPORTED; i++) { + if (qdf_print_ctrl_cleanup(i)) + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_setup); + +QDF_STATUS qdf_print_ctrl_cleanup(unsigned int idx) +{ + int i = 0; + + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return QDF_STATUS_E_FAILURE; + } + + /* Clean up the print control object corresponding to that index + * If success, callee to change print control index to -1 + */ + + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + print_ctrl_obj[idx].cat_info[i].category_verbose_mask = + QDF_TRACE_LEVEL_NONE; + } + print_ctrl_obj[idx].custom_print = NULL; + print_ctrl_obj[idx].custom_ctxt = NULL; + qdf_print_clean_node_flag(idx); + print_ctrl_obj[idx].in_use = false; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_ctrl_cleanup); + +int qdf_print_ctrl_register(const struct category_info *cinfo, + void *custom_print_handler, + void *custom_ctx, + const char *pctrl_name) +{ + int idx = -1; + int i = 0; + + for (i = 0; i < MAX_PRINT_CONFIG_SUPPORTED; i++) { + if (!print_ctrl_obj[i].in_use) { + idx = i; + break; + } + } + + /* Callee to handle idx -1 appropriately */ + if (idx == -1) { + pr_info("%s: Allocation failed! No print control object free\n", + __func__); + return idx; + } + + print_ctrl_obj[idx].in_use = true; + + /* + * In case callee does not pass category info, + * custom print handler, custom context and print control name, + * we do not set any value here. Clean up for the print control + * getting allocated would have taken care of initializing + * default values. + * + * We need to only set in_use to 1 in such a case + */ + + if (pctrl_name) { + qdf_str_lcopy(print_ctrl_obj[idx].name, pctrl_name, + sizeof(print_ctrl_obj[idx].name)); + } + + if (custom_print_handler) + print_ctrl_obj[idx].custom_print = custom_print_handler; + + if (custom_ctx) + print_ctrl_obj[idx].custom_ctxt = custom_ctx; + + if (cinfo) { + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + if (cinfo[i].category_verbose_mask == + QDF_TRACE_LEVEL_ALL) { + print_ctrl_obj[idx].cat_info[i] + .category_verbose_mask = 0xFFFF; + } else if ((cinfo[i].category_verbose_mask == + QDF_TRACE_LEVEL_NONE) || + (cinfo[i].category_verbose_mask == + QDF_TRACE_LEVEL_TO_MODULE_BITMASK( + QDF_TRACE_LEVEL_NONE))) { + print_ctrl_obj[idx].cat_info[i] + .category_verbose_mask = 0; + } else { + print_ctrl_obj[idx].cat_info[i] + .category_verbose_mask = + cinfo[i].category_verbose_mask; + } + } + } + + return idx; +} +qdf_export_symbol(qdf_print_ctrl_register); + +#ifdef QDF_TRACE_PRINT_ENABLE +void qdf_shared_print_ctrl_cleanup(void) +{ + qdf_print_ctrl_cleanup(qdf_pidx); +} +qdf_export_symbol(qdf_shared_print_ctrl_cleanup); + +/* + * Set this to invalid value to differentiate with user-provided + * value. + */ +int qdf_dbg_mask = QDF_TRACE_LEVEL_MAX; +qdf_export_symbol(qdf_dbg_mask); +qdf_declare_param(qdf_dbg_mask, int); + +/* + * QDF can be passed parameters which indicate the + * debug level for each module. + * an array of string values are passed, each string hold the following form + * + * = + * + * The array qdf_dbg_arr will hold these module-string=value strings + * The variable qdf_dbg_arr_cnt will have the count of how many such + * string values were passed. + */ +static char *qdf_dbg_arr[QDF_MODULE_ID_MAX]; +static int qdf_dbg_arr_cnt; +qdf_declare_param_array(qdf_dbg_arr, charp, &qdf_dbg_arr_cnt); + +static uint16_t set_cumulative_verbose_mask(QDF_TRACE_LEVEL max_level) +{ + uint16_t category_verbose_mask = 0; + QDF_TRACE_LEVEL level; + + for (level = QDF_TRACE_LEVEL_FATAL; level <= max_level; level++) { + category_verbose_mask |= + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level); + } + return category_verbose_mask; +} + +static QDF_MODULE_ID find_qdf_module_from_string(char *str) +{ + QDF_MODULE_ID mod_id; + + for (mod_id = 0; mod_id < QDF_MODULE_ID_MAX; mod_id++) { + if (strcasecmp(str, + g_qdf_category_name[mod_id].category_name_str) + == 0) { + break; + } + } + return mod_id; +} + +static void process_qdf_dbg_arr_param(struct category_info *cinfo, + int array_index) +{ + char *mod_val_str, *mod_str, *val_str; + unsigned long dbg_level; + QDF_MODULE_ID mod_id; + + mod_val_str = qdf_dbg_arr[array_index]; + mod_str = strsep(&mod_val_str, "="); + val_str = mod_val_str; + if (!val_str) { + pr_info("qdf_dbg_arr: %s not in the = form\n", + mod_str); + return; + } + + mod_id = find_qdf_module_from_string(mod_str); + if (mod_id >= QDF_MODULE_ID_MAX) { + pr_info("ERROR!!Module name %s not in the list of modules\n", + mod_str); + return; + } + + if (kstrtol(val_str, 10, &dbg_level) < 0) { + pr_info("ERROR!!Invalid debug level for module: %s\n", + mod_str); + return; + } + + if (dbg_level >= QDF_TRACE_LEVEL_MAX) { + pr_info("ERROR!!Debug level for %s too high", mod_str); + pr_info("max: %d given %lu\n", QDF_TRACE_LEVEL_MAX, + dbg_level); + return; + } + + pr_info("User passed setting module %s(%d) to level %lu\n", + mod_str, + mod_id, + dbg_level); + cinfo[mod_id].category_verbose_mask = + set_cumulative_verbose_mask((QDF_TRACE_LEVEL)dbg_level); +} + +static void set_default_trace_levels(struct category_info *cinfo) +{ + int i; + static QDF_TRACE_LEVEL module_trace_default_level[QDF_MODULE_ID_MAX] = { + [QDF_MODULE_ID_TDLS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SCAN_SM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SCANENTRY] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WDS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACTION] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ROAM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_INACT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DOTH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_IQUE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WME] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WPA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RADKEYS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RADDUMP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RADIUS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DOT1XSM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DOT1X] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_POWER] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_STATE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OUTPUT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SCAN] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_AUTH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ASSOC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_NODE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ELEMID] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_XRATE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_INPUT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CRYPTO] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DUMPPKTS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DEBUG] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MLME] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_RRM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WNM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_P2P_PROT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_PROXYARP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_L2TIF] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WIFIPOS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WRAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DFS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ATF] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_SPLITMAC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_IOCTL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_NAC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MESH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MBO] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_EXTIOCTL_CHANSWITCH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_EXTIOCTL_CHANSSCAN] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_TLSHIM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WMI] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_HTT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SME] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_PE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WMA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SYS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QDF] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_SAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD_SOFTAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD_DATA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD_SAP_DATA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HIF] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_HTC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_TXRX] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QDF_DEVICE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CFG] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_BMI] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_EPPING] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QVIT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DP] = QDF_TRACE_LEVEL_FATAL, + [QDF_MODULE_ID_HAL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SOC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OS_IF] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_TARGET_IF] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_SCHEDULER] = QDF_TRACE_LEVEL_FATAL, + [QDF_MODULE_ID_MGMT_TXRX] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SERIALIZATION] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_PMO] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_P2P] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_POLICY_MGR] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CONFIG] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_REGULATORY] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SA_API] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_NAN] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OFFCHAN_TXRX] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SON] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SPECTRAL] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_OBJ_MGR] = QDF_TRACE_LEVEL_FATAL, + [QDF_MODULE_ID_NSS] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_ROAM_DEBUG] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_CDP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DIRECT_BUF_RX] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_DISA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_GREEN_AP] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_FTM] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_EXTAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_FD] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_OCB] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_IPA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACTION_OUI] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CP_STATS] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_MBSSIE] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_FWOL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SM_ENGINE] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_CMN_MLME] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_BSSCOLOR] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_CFR] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_TX_CAPTURE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_INTEROP_ISSUES_AP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_BLACKLIST_MGR] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QLD] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_DYNAMIC_MODE_CHG] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_COEX] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_MON_FILTER] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_ANY] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_PKT_CAPTURE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_GPIO] = QDF_TRACE_LEVEL_NONE, + }; + + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + cinfo[i].category_verbose_mask = set_cumulative_verbose_mask( + module_trace_default_level[i]); + } +} + +void qdf_shared_print_ctrl_init(void) +{ + int i; + struct category_info cinfo[MAX_SUPPORTED_CATEGORY]; + + set_default_trace_levels(cinfo); + + /* + * User specified across-module single debug level + */ + if ((qdf_dbg_mask >= 0) && (qdf_dbg_mask < QDF_TRACE_LEVEL_MAX)) { + pr_info("User specified module debug level of %d\n", + qdf_dbg_mask); + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + cinfo[i].category_verbose_mask = + set_cumulative_verbose_mask(qdf_dbg_mask); + } + } else if (qdf_dbg_mask != QDF_TRACE_LEVEL_MAX) { + pr_info("qdf_dbg_mask value is invalid\n"); + pr_info("Using the default module debug levels instead\n"); + } + + /* + * Module ID-Level specified as array during module load + */ + for (i = 0; i < qdf_dbg_arr_cnt; i++) { + process_qdf_dbg_arr_param(cinfo, i); + } + qdf_pidx = qdf_print_ctrl_register(cinfo, NULL, NULL, + "LOG_SHARED_OBJ"); +} +qdf_export_symbol(qdf_shared_print_ctrl_init); +#endif + +QDF_STATUS qdf_print_set_category_verbose(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + bool is_set) +{ + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_err("%s: Invalid index - %d\n", __func__, idx); + return QDF_STATUS_E_FAILURE; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_err("%s: Invalid print control object\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_err("%s: Invalid category: %d\n", __func__, category); + return QDF_STATUS_E_FAILURE; + } + + /* Check if verbose mask is valid */ + if (verbose < 0 || verbose >= QDF_TRACE_LEVEL_MAX) { + pr_err("%s: Invalid verbose level %d\n", __func__, verbose); + return QDF_STATUS_E_FAILURE; + } + + if (verbose == QDF_TRACE_LEVEL_ALL) { + print_ctrl_obj[idx].cat_info[category].category_verbose_mask = + 0xFFFF; + return QDF_STATUS_SUCCESS; + } + + if (verbose == QDF_TRACE_LEVEL_NONE) { + print_ctrl_obj[idx].cat_info[category].category_verbose_mask = + QDF_TRACE_LEVEL_NONE; + return QDF_STATUS_SUCCESS; + } + + if (!is_set) { + if (print_ctrl_obj[idx].cat_info[category].category_verbose_mask + & QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose)) { + print_ctrl_obj[idx].cat_info[category] + .category_verbose_mask &= + ~QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose); + } + } else { + print_ctrl_obj[idx].cat_info[category].category_verbose_mask |= + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose); + } + + pr_debug("%s: Print control object %d, Category %d, Verbose level %d\n", + __func__, + idx, + category, + print_ctrl_obj[idx].cat_info[category].category_verbose_mask); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_set_category_verbose); + +void qdf_log_dump_at_kernel_level(bool enable) +{ + if (qdf_log_dump_at_kernel_enable == enable) { + QDF_TRACE_INFO(QDF_MODULE_ID_QDF, + "qdf_log_dump_at_kernel_enable is already %d\n", + enable); + } + qdf_log_dump_at_kernel_enable = enable; +} + +qdf_export_symbol(qdf_log_dump_at_kernel_level); + +bool qdf_print_is_category_enabled(unsigned int idx, QDF_MODULE_ID category) +{ + QDF_TRACE_LEVEL verbose_mask; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return false; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return false; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_info("%s: Invalid category: %d\n", __func__, category); + return false; + } + + verbose_mask = + print_ctrl_obj[idx].cat_info[category].category_verbose_mask; + + if (verbose_mask == QDF_TRACE_LEVEL_NONE) + return false; + else + return true; +} +qdf_export_symbol(qdf_print_is_category_enabled); + +bool qdf_print_is_verbose_enabled(unsigned int idx, QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose) +{ + bool verbose_enabled = false; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return verbose_enabled; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return verbose_enabled; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_info("%s: Invalid category: %d\n", __func__, category); + return verbose_enabled; + } + + if ((verbose == QDF_TRACE_LEVEL_NONE) || + (verbose >= QDF_TRACE_LEVEL_MAX)) { + verbose_enabled = false; + } else if (verbose == QDF_TRACE_LEVEL_ALL) { + if (print_ctrl_obj[idx].cat_info[category] + .category_verbose_mask == 0xFFFF) + verbose_enabled = true; + } else { + verbose_enabled = + (print_ctrl_obj[idx].cat_info[category].category_verbose_mask & + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose)) ? true : false; + } + + return verbose_enabled; +} +qdf_export_symbol(qdf_print_is_verbose_enabled); + +#ifdef DBG_LVL_MAC_FILTERING + +QDF_STATUS qdf_print_set_node_flag(unsigned int idx, uint8_t enable) +{ + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return QDF_STATUS_E_FAILURE; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (enable > 1) { + pr_info("%s: Incorrect input: Use 1 or 0 to enable or disable\n", + __func__); + return QDF_STATUS_E_FAILURE; + } + + print_ctrl_obj[idx].dbglvlmac_on = enable; + pr_info("%s: DbgLVLmac feature %s\n", + __func__, + ((enable) ? "enabled" : "disabled")); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_set_node_flag); + +bool qdf_print_get_node_flag(unsigned int idx) +{ + bool node_flag = false; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return node_flag; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return node_flag; + } + + if (print_ctrl_obj[idx].dbglvlmac_on) + node_flag = true; + + return node_flag; +} +qdf_export_symbol(qdf_print_get_node_flag); + +void qdf_print_clean_node_flag(unsigned int idx) +{ + /* Disable dbglvlmac_on during cleanup */ + print_ctrl_obj[idx].dbglvlmac_on = 0; +} + +#else + +void qdf_print_clean_node_flag(unsigned int idx) +{ + /* No operation in case of no support for DBG_LVL_MAC_FILTERING */ + return; +} +#endif + +void QDF_PRINT_INFO(unsigned int idx, QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + char *str_format, ...) +{ + va_list args; + + /* Generic wrapper API will compile qdf_vprint in order to + * log the message. Once QDF converged debug framework is in + * place, this will be changed to adapt to the framework, compiling + * call to converged tracing API + */ + va_start(args, str_format); + qdf_vprint(str_format, args); + va_end(args); +} +qdf_export_symbol(QDF_PRINT_INFO); + +#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE +void qdf_logging_init(void) +{ + wlan_logging_sock_init_svc(); + nl_srv_init(NULL, WLAN_NLINK_PROTO_FAMILY); + wlan_logging_set_flush_timer(qdf_log_flush_timer_period); +} + +void qdf_logging_exit(void) +{ + nl_srv_exit(); + wlan_logging_sock_deinit_svc(); +} + +int qdf_logging_set_flush_timer(uint32_t milliseconds) +{ + if (wlan_logging_set_flush_timer(milliseconds) == 0) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_FAILURE; +} + +void qdf_logging_flush_logs(void) +{ + wlan_flush_host_logs_for_fatal(); +} + +#else +void qdf_logging_init(void) +{ + nl_srv_init(NULL, WLAN_NLINK_PROTO_FAMILY); +} + +void qdf_logging_exit(void) +{ + nl_srv_exit(); +} + +int qdf_logging_set_flush_timer(uint32_t milliseconds) +{ + return QDF_STATUS_E_FAILURE; +} + +void qdf_logging_flush_logs(void) +{ +} +#endif + +qdf_export_symbol(qdf_logging_set_flush_timer); +qdf_export_symbol(qdf_logging_flush_logs); + +#ifdef CONFIG_KALLSYMS +inline int qdf_sprint_symbol(char *buffer, void *addr) +{ + return sprint_symbol(buffer, (unsigned long)addr); +} +#else +int qdf_sprint_symbol(char *buffer, void *addr) +{ + if (!buffer) + return 0; + + buffer[0] = '\0'; + return 1; +} +#endif +qdf_export_symbol(qdf_sprint_symbol); + +void qdf_set_pidx(int pidx) +{ + qdf_pidx = pidx; +} +qdf_export_symbol(qdf_set_pidx); + +int qdf_get_pidx(void) +{ + return qdf_pidx; +} +qdf_export_symbol(qdf_get_pidx); + +#ifdef PANIC_ON_BUG +#ifdef CONFIG_SLUB_DEBUG +void __qdf_bug(void) +{ + BUG(); +} +qdf_export_symbol(__qdf_bug); +#endif /* CONFIG_SLUB_DEBUG */ +#endif /* PANIC_ON_BUG */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_vfs.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_vfs.c new file mode 100644 index 0000000000000000000000000000000000000000..aed94c608b767b52454e02f81d1ab68a45172c13 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_vfs.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_vfs + * This file provides OS dependent virtual fiesystem APIs + */ + +#include "qdf_vfs.h" +#include "qdf_util.h" +#include "qdf_module.h" +#include +#include + +QDF_STATUS +qdf_vfs_set_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr) +{ + int ret; + + if (!devobj || !attr) + return QDF_STATUS_E_INVAL; + + ret = sysfs_create_group((struct kobject *)devobj, + (struct attribute_group *)attr); + + return qdf_status_from_os_return(ret); +} + +qdf_export_symbol(qdf_vfs_set_file_attributes); + +QDF_STATUS +qdf_vfs_clear_file_attributes(struct qdf_dev_obj *devobj, + struct qdf_vfs_attr *attr) +{ + if (!devobj || !attr) + return QDF_STATUS_E_INVAL; + + sysfs_remove_group((struct kobject *)devobj, + (struct attribute_group *)attr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_vfs_clear_file_attributes); + +QDF_STATUS +qdf_vfs_create_binfile(struct qdf_dev_obj *devobj, struct qdf_vf_bin_attr *attr) +{ + int ret; + + if (!devobj || !attr) + return QDF_STATUS_E_INVAL; + + ret = sysfs_create_bin_file((struct kobject *)devobj, + (struct bin_attribute *)attr); + + return qdf_status_from_os_return(ret); +} + +qdf_export_symbol(qdf_vfs_create_binfile); + +QDF_STATUS +qdf_vfs_delete_binfile(struct qdf_dev_obj *devobj, struct qdf_vf_bin_attr *attr) +{ + if (!devobj || !attr) + return QDF_STATUS_E_INVAL; + + sysfs_remove_bin_file((struct kobject *)devobj, + (struct bin_attribute *)attr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_vfs_delete_binfile); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_cpuhp.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_cpuhp.c new file mode 100644 index 0000000000000000000000000000000000000000..0573583f3c9fdcfb1020d616280b99cbc770d93e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_cpuhp.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_cpuhp (CPU hotplug) + * QCA driver framework (QDF) CPU hotplug APIs + */ + +#include "qdf_cpuhp.h" +#include "i_qdf_cpuhp.h" +#include "qdf_list.h" +#include "qdf_lock.h" + +static qdf_mutex_t qdf_cpuhp_lock; +static qdf_list_t qdf_cpuhp_handlers; + +struct qdf_cpuhp_handler { + qdf_list_node_t node; + void *context; + qdf_cpuhp_callback up_callback; + qdf_cpuhp_callback down_callback; +}; + +static void qdf_cpuhp_on_up(uint32_t cpu) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_mutex_acquire(&qdf_cpuhp_lock); + + status = qdf_list_peek_front(&qdf_cpuhp_handlers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct qdf_cpuhp_handler *handler = + qdf_container_of(node, struct qdf_cpuhp_handler, node); + if (handler->up_callback) + handler->up_callback(handler->context, cpu); + + status = qdf_list_peek_next(&qdf_cpuhp_handlers, node, &node); + } + + qdf_mutex_release(&qdf_cpuhp_lock); +} + +static void qdf_cpuhp_on_down(uint32_t cpu) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_mutex_acquire(&qdf_cpuhp_lock); + + status = qdf_list_peek_front(&qdf_cpuhp_handlers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct qdf_cpuhp_handler *handler = + qdf_container_of(node, struct qdf_cpuhp_handler, node); + if (handler->down_callback) + handler->down_callback(handler->context, cpu); + + status = qdf_list_peek_next(&qdf_cpuhp_handlers, node, &node); + } + + qdf_mutex_release(&qdf_cpuhp_lock); +} + +QDF_STATUS qdf_cpuhp_init(void) +{ + QDF_STATUS status; + + status = qdf_mutex_create(&qdf_cpuhp_lock); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + qdf_list_create(&qdf_cpuhp_handlers, 0); + + __qdf_cpuhp_os_init(qdf_cpuhp_on_up, qdf_cpuhp_on_down); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS qdf_cpuhp_deinit(void) +{ + __qdf_cpuhp_os_deinit(); + qdf_list_destroy(&qdf_cpuhp_handlers); + return qdf_mutex_destroy(&qdf_cpuhp_lock); +} + +QDF_STATUS qdf_cpuhp_register(struct qdf_cpuhp_handler **out_handler, + void *context, + qdf_cpuhp_callback up_callback, + qdf_cpuhp_callback down_callback) +{ + QDF_STATUS status; + struct qdf_cpuhp_handler *handler; + + *out_handler = NULL; + + handler = qdf_mem_malloc(sizeof(*handler)); + if (!handler) + return QDF_STATUS_E_NOMEM; + + handler->context = context; + handler->up_callback = up_callback; + handler->down_callback = down_callback; + + status = qdf_mutex_acquire(&qdf_cpuhp_lock); + if (QDF_IS_STATUS_ERROR(status)) + goto free_handler; + + status = qdf_list_insert_back(&qdf_cpuhp_handlers, &handler->node); + if (QDF_IS_STATUS_ERROR(status)) + goto release_lock; + + /* this can fail, but there isn't a good way to recover... */ + qdf_mutex_release(&qdf_cpuhp_lock); + + *out_handler = handler; + + return QDF_STATUS_SUCCESS; + +release_lock: + qdf_mutex_release(&qdf_cpuhp_lock); + +free_handler: + qdf_mem_free(handler); + + return status; +} + +void qdf_cpuhp_unregister(struct qdf_cpuhp_handler **out_handler) +{ + struct qdf_cpuhp_handler *handler = *out_handler; + + QDF_BUG(handler); + if (!handler) + return; + + qdf_mutex_acquire(&qdf_cpuhp_lock); + qdf_list_remove_node(&qdf_cpuhp_handlers, &handler->node); + qdf_mutex_release(&qdf_cpuhp_lock); + + qdf_mem_free(handler); + *out_handler = NULL; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_debug_domain.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_debug_domain.c new file mode 100644 index 0000000000000000000000000000000000000000..0230f985fd5cb77c1278811b160e68d1838cfc54 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_debug_domain.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debug_domain + * QCA driver framework (QDF) debug domain APIs. Debug domains are used to track + * resource allocations across different driver states, particularly for runtime + * leak detection. + */ + +#include "qdf_debug_domain.h" +#include "qdf_trace.h" + +static enum qdf_debug_domain qdf_debug_domain_current = QDF_DEBUG_DOMAIN_INIT; + +enum qdf_debug_domain qdf_debug_domain_get(void) +{ + return qdf_debug_domain_current; +} + +void qdf_debug_domain_set(enum qdf_debug_domain domain) +{ + QDF_BUG(qdf_debug_domain_valid(domain)); + if (!qdf_debug_domain_valid(domain)) + return; + + qdf_debug_domain_current = domain; +} + +const char *qdf_debug_domain_name(enum qdf_debug_domain domain) +{ + switch (domain) { + case QDF_DEBUG_DOMAIN_INIT: + return "Init"; + case QDF_DEBUG_DOMAIN_ACTIVE: + return "Active"; + default: + return "Invalid"; + } +} + +bool qdf_debug_domain_valid(enum qdf_debug_domain domain) +{ + return domain >= QDF_DEBUG_DOMAIN_INIT && + domain < QDF_DEBUG_DOMAIN_COUNT; +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_flex_mem.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_flex_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..2fcdff6f0cc20883261674749a12b1601c613bc4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_flex_mem.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_flex_mem.h" +#include "qdf_list.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_talloc.h" +#include "qdf_trace.h" +#include "qdf_util.h" + +static struct qdf_flex_mem_segment * +qdf_flex_mem_seg_alloc(struct qdf_flex_mem_pool *pool) +{ + struct qdf_flex_mem_segment *seg; + size_t total_size = sizeof(struct qdf_flex_mem_segment) + + pool->item_size * QDF_FM_BITMAP_BITS; + + seg = qdf_talloc(pool, total_size); + if (!seg) + return NULL; + + seg->dynamic = true; + seg->bytes = (uint8_t *)(seg + 1); + seg->used_bitmap = 0; + qdf_list_insert_back(&pool->seg_list, &seg->node); + + return seg; +} + +void qdf_flex_mem_init(struct qdf_flex_mem_pool *pool) +{ + int i; + + qdf_spinlock_create(&pool->lock); + + for (i = 0; i < pool->reduction_limit; i++) + qdf_flex_mem_seg_alloc(pool); +} +qdf_export_symbol(qdf_flex_mem_init); + +void qdf_flex_mem_deinit(struct qdf_flex_mem_pool *pool) +{ + struct qdf_flex_mem_segment *seg, *next; + + qdf_spinlock_destroy(&pool->lock); + + qdf_list_for_each_del(&pool->seg_list, seg, next, node) { + QDF_BUG(!seg->used_bitmap); + if (seg->used_bitmap) + continue; + + qdf_list_remove_node(&pool->seg_list, &seg->node); + if (seg->dynamic) + qdf_tfree(seg); + } +} +qdf_export_symbol(qdf_flex_mem_deinit); + +static void *__qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool) +{ + struct qdf_flex_mem_segment *seg; + + qdf_list_for_each(&pool->seg_list, seg, node) { + int index; + void *ptr; + + index = qdf_ffz(seg->used_bitmap); + if (index < 0) + continue; + + QDF_BUG(index < QDF_FM_BITMAP_BITS); + + seg->used_bitmap ^= (QDF_FM_BITMAP)1 << index; + ptr = &seg->bytes[index * pool->item_size]; + qdf_mem_zero(ptr, pool->item_size); + + return ptr; + } + + seg = qdf_flex_mem_seg_alloc(pool); + if (!seg) + return NULL; + + seg->used_bitmap = 1; + + return seg->bytes; +} + +void *qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool) +{ + void *ptr; + + QDF_BUG(pool); + if (!pool) + return NULL; + + qdf_spin_lock_bh(&pool->lock); + ptr = __qdf_flex_mem_alloc(pool); + qdf_spin_unlock_bh(&pool->lock); + + return ptr; +} +qdf_export_symbol(qdf_flex_mem_alloc); + +static void qdf_flex_mem_seg_free(struct qdf_flex_mem_pool *pool, + struct qdf_flex_mem_segment *seg) +{ + if (!seg->dynamic) + return; + + if (qdf_list_size(&pool->seg_list) <= pool->reduction_limit) + return; + + qdf_list_remove_node(&pool->seg_list, &seg->node); + qdf_tfree(seg); +} + +static void __qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr) +{ + struct qdf_flex_mem_segment *seg; + void *low_addr; + void *high_addr; + unsigned long index; + + qdf_list_for_each(&pool->seg_list, seg, node) { + low_addr = seg->bytes; + high_addr = low_addr + pool->item_size * QDF_FM_BITMAP_BITS; + + if (ptr < low_addr || ptr > high_addr) + continue; + + index = (ptr - low_addr) / pool->item_size; + QDF_BUG(index < QDF_FM_BITMAP_BITS); + + seg->used_bitmap ^= (QDF_FM_BITMAP)1 << index; + if (!seg->used_bitmap) + qdf_flex_mem_seg_free(pool, seg); + + return; + } + + QDF_DEBUG_PANIC("Failed to find pointer in segment pool"); +} + +void qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr) +{ + QDF_BUG(pool); + if (!pool) + return; + + QDF_BUG(ptr); + if (!ptr) + return; + + qdf_spin_lock_bh(&pool->lock); + __qdf_flex_mem_free(pool, ptr); + qdf_spin_unlock_bh(&pool->lock); +} +qdf_export_symbol(qdf_flex_mem_free); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_hang_event_notifier.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_hang_event_notifier.c new file mode 100644 index 0000000000000000000000000000000000000000..09b1761b65179b47fc63bd3b27bfe2cd6011fb86 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_hang_event_notifier.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: qdf_hang_event_notifier + * This file provides OS dependent QDF notifier call for hang event + */ + +#include +#include +#include + +static qdf_atomic_notifier_init(qdf_hang_event_notif_head) + +QDF_STATUS qdf_hang_event_register_notifier(qdf_notif_block *nb) +{ + return qdf_register_atomic_notifier_chain(&qdf_hang_event_notif_head, + nb); +} + +QDF_STATUS qdf_hang_event_unregister_notifier(qdf_notif_block *nb) +{ + return qdf_unregister_atomic_notifier_chain(&qdf_hang_event_notif_head, + nb); +} + +QDF_STATUS qdf_hang_event_notifier_call(unsigned long v, void *data) +{ + return qdf_atomic_notfier_call(&qdf_hang_event_notif_head, + v, data); +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_notifier.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_notifier.c new file mode 100644 index 0000000000000000000000000000000000000000..6351831e98ff24d8baa06679d4bbd2e6686797bd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_notifier.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include + +QDF_STATUS qdf_register_blocking_notifier_chain(qdf_blocking_notif_head *head, + qdf_notif_block *qnb) +{ + int ret; + + ret = __qdf_register_blocking_notifier_chain(head, &qnb->notif_block); + + return qdf_status_from_os_return(ret); +} + +QDF_STATUS qdf_unregister_blocking_notifier_chain(qdf_blocking_notif_head *head, + qdf_notif_block *qnb) +{ + int ret; + + ret = __qdf_unregister_blocking_notifier_chain(head, + &qnb->notif_block); + + return qdf_status_from_os_return(ret); +} + +QDF_STATUS qdf_blocking_notfier_call(qdf_blocking_notif_head *head, + unsigned long state, void *data) +{ + int ret; + + ret = __qdf_blocking_notfier_call(head, state, data); + + return qdf_status_from_os_return(ret); +} + +QDF_STATUS qdf_register_atomic_notifier_chain(qdf_atomic_notif_head *head, + qdf_notif_block *qnb) +{ + int ret; + + ret = __qdf_register_atomic_notifier_chain(head, &qnb->notif_block); + + return qdf_status_from_os_return(ret); +} + +QDF_STATUS qdf_unregister_atomic_notifier_chain(qdf_atomic_notif_head *head, + qdf_notif_block *qnb) +{ + int ret; + + ret = __qdf_unregister_atomic_notifier_chain(head, &qnb->notif_block); + + return qdf_status_from_os_return(ret); +} + +QDF_STATUS qdf_atomic_notfier_call(qdf_atomic_notif_head *head, + unsigned long v, void *data) +{ + int ret; + + ret = __qdf_atomic_notifier_call(head, v, data); + + return qdf_status_from_os_return(ret); +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_parse.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_parse.c new file mode 100644 index 0000000000000000000000000000000000000000..469d59a4fdf10ce4feedecd0e657fe51d9bd0a0c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_parse.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_file.h" +#include "qdf_module.h" +#include "qdf_parse.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +QDF_STATUS qdf_ini_parse(const char *ini_path, void *context, + qdf_ini_item_cb item_cb, qdf_ini_section_cb section_cb) +{ + QDF_STATUS status; + char *fbuf; + char *cursor; + int ini_read_count = 0; + + status = qdf_file_read(ini_path, &fbuf); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_err("Failed to read *.ini file @ %s", ini_path); + return status; + } + + /* foreach line */ + cursor = fbuf; + while (*cursor != '\0') { + char *key = cursor; + char *value = NULL; + bool comment = false; + bool eol = false; + + /* + * Look for the end of the line, while noting any + * value ('=') or comment ('#') indicators + */ + while (!eol) { + switch (*cursor) { + case '\r': + case '\n': + *cursor = '\0'; + cursor++; + /* fall through */ + case '\0': + eol = true; + break; + + case '=': + /* + * The first '=' is the value indicator. + * Subsequent '=' are valid value characters. + */ + if (!value && !comment) { + value = cursor + 1; + *cursor = '\0'; + } + + cursor++; + break; + + case '#': + /* + * We don't process comments, so we can null- + * terminate unconditionally here (unlike '='). + */ + comment = true; + *cursor = '\0'; + /* fall through */ + default: + cursor++; + break; + } + } + + key = qdf_str_trim(key); + + /* + * Ignoring comments, a valid ini line contains one of: + * 1) some 'key=value' config item + * 2) section header + * 3) a line containing whitespace + */ + if (value) { + status = item_cb(context, key, value); + if (QDF_IS_STATUS_ERROR(status)) + goto free_fbuf; + else + ini_read_count++; + } else if (key[0] == '[') { + qdf_size_t len = qdf_str_len(key); + + if (key[len - 1] != ']') { + qdf_err("Invalid *.ini syntax '%s'", key); + } else { + key[len - 1] = '\0'; + status = section_cb(context, key + 1); + if (QDF_IS_STATUS_ERROR(status)) + goto free_fbuf; + } + } else if (key[0] != '\0') { + qdf_err("Invalid *.ini syntax '%s'", key); + } + + /* skip remaining EoL characters */ + while (*cursor == '\n' || *cursor == '\r') + cursor++; + } + + qdf_debug("INI values read: %d", ini_read_count); + if (ini_read_count != 0) + status = QDF_STATUS_SUCCESS; + else + status = QDF_STATUS_E_FAILURE; + +free_fbuf: + qdf_file_buf_free(fbuf); + + return status; +} +qdf_export_symbol(qdf_ini_parse); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_platform.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..f1e486da74203a9c7e22fe70b9a22583eaed748b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_platform.c @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" +#include "qdf_trace.h" +#include "qdf_platform.h" + +/** + * The following callbacks should be defined static to make sure they are + * initialized to NULL + */ +static qdf_self_recovery_callback self_recovery_cb; +static qdf_is_fw_down_callback is_fw_down_cb; +static qdf_is_driver_unloading_callback is_driver_unloading_cb; +static qdf_is_recovering_callback is_recovering_cb; +static qdf_is_drv_connected_callback is_drv_connected_cb; +static qdf_wmi_send_over_qmi_callback _wmi_send_recv_qmi_cb; +static qdf_is_drv_supported_callback is_drv_supported_cb; +static qdf_recovery_reason_update_callback update_recovery_reason_cb; +static qdf_bus_reg_dump get_bus_reg_dump; + + + +void qdf_register_fw_down_callback(qdf_is_fw_down_callback is_fw_down) +{ + is_fw_down_cb = is_fw_down; +} + +qdf_export_symbol(qdf_register_fw_down_callback); + +bool qdf_is_fw_down(void) +{ + if (!is_fw_down_cb) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "fw down callback is not registered"); + return false; + } + + return is_fw_down_cb(); +} +qdf_export_symbol(qdf_is_fw_down); + +void qdf_register_wmi_send_recv_qmi_callback(qdf_wmi_send_over_qmi_callback + wmi_send_recv_qmi_cb) +{ + _wmi_send_recv_qmi_cb = wmi_send_recv_qmi_cb; +} + +qdf_export_symbol(qdf_register_wmi_send_recv_qmi_callback); + +QDF_STATUS qdf_wmi_send_recv_qmi(void *buf, uint32_t len, void *cb_ctx, + qdf_wmi_recv_qmi_cb wmi_recv_qmi_cb) +{ + if (!_wmi_send_recv_qmi_cb) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Platform callback for WMI over QMI not registered"); + return QDF_STATUS_E_INVAL; + } + + return _wmi_send_recv_qmi_cb(buf, len, cb_ctx, wmi_recv_qmi_cb); +} + +qdf_export_symbol(qdf_wmi_send_recv_qmi); + +void qdf_register_is_driver_unloading_callback( + qdf_is_driver_unloading_callback callback) +{ + is_driver_unloading_cb = callback; +} + +qdf_export_symbol(qdf_register_is_driver_unloading_callback); + +void qdf_register_self_recovery_callback(qdf_self_recovery_callback callback) +{ + self_recovery_cb = callback; +} + +qdf_export_symbol(qdf_register_self_recovery_callback); + +void __qdf_trigger_self_recovery(void *psoc, enum qdf_hang_reason reason, + const char *func, const uint32_t line) +{ + if (self_recovery_cb) + self_recovery_cb(psoc, reason, func, line); + else + QDF_DEBUG_PANIC_FL(func, line, ""); +} + +qdf_export_symbol(__qdf_trigger_self_recovery); + +void qdf_register_recovering_state_query_callback( + qdf_is_recovering_callback is_recovering) +{ + is_recovering_cb = is_recovering; +} + +bool qdf_is_driver_unloading(void) +{ + if (is_driver_unloading_cb) + return is_driver_unloading_cb(); + return false; +} + +qdf_export_symbol(qdf_is_driver_unloading); + +bool qdf_is_recovering(void) +{ + if (is_recovering_cb) + return is_recovering_cb(); + return false; +} + +qdf_export_symbol(qdf_is_recovering); + +static qdf_op_protect_cb __on_op_protect; +static qdf_op_unprotect_cb __on_op_unprotect; + +void qdf_op_callbacks_register(qdf_op_protect_cb on_protect, + qdf_op_unprotect_cb on_unprotect) +{ + __on_op_protect = on_protect; + __on_op_unprotect = on_unprotect; +} +qdf_export_symbol(qdf_op_callbacks_register); + +int __qdf_op_protect(struct qdf_op_sync **out_sync, const char *func) +{ + if (!__on_op_protect) + return 0; + + return __on_op_protect((void **)out_sync, func); +} +qdf_export_symbol(__qdf_op_protect); + +void __qdf_op_unprotect(struct qdf_op_sync *sync, const char *func) +{ + if (__on_op_unprotect) + __on_op_unprotect(sync, func); +} +qdf_export_symbol(__qdf_op_unprotect); + +void qdf_register_drv_connected_callback(qdf_is_drv_connected_callback + is_drv_connected) +{ + is_drv_connected_cb = is_drv_connected; +} +qdf_export_symbol(qdf_register_drv_connected_callback); + +bool qdf_is_drv_connected(void) +{ + if (!is_drv_connected_cb) { + qdf_err("drv connected callback is not registered"); + return false; + } + + return is_drv_connected_cb(); +} +qdf_export_symbol(qdf_is_drv_connected); + +void qdf_check_state_before_panic(void) +{ + if (!qdf_is_recovering() && !qdf_is_fw_down()) + QDF_BUG(0); +} + +qdf_export_symbol(qdf_check_state_before_panic); + +void qdf_register_drv_supported_callback(qdf_is_drv_supported_callback + is_drv_supported) +{ + is_drv_supported_cb = is_drv_supported; +} + +qdf_export_symbol(qdf_register_drv_supported_callback); + +bool qdf_is_drv_supported(void) +{ + if (!is_drv_supported_cb) { + qdf_err("drv supported callback is not registered"); + return false; + } + + return is_drv_supported_cb(); +} + +qdf_export_symbol(qdf_is_drv_supported); + +void qdf_register_recovery_reason_update(qdf_recovery_reason_update_callback + callback) +{ + update_recovery_reason_cb = callback; +} + +qdf_export_symbol(qdf_register_recovery_reason_update); + +void qdf_recovery_reason_update(enum qdf_hang_reason reason) +{ + if (!update_recovery_reason_cb) + return; + + update_recovery_reason_cb(reason); +} + +qdf_export_symbol(qdf_recovery_reason_update); + +void qdf_register_get_bus_reg_dump(qdf_bus_reg_dump callback) +{ + get_bus_reg_dump = callback; +} + +qdf_export_symbol(qdf_register_get_bus_reg_dump); + +void qdf_get_bus_reg_dump(struct device *dev, uint8_t *buf, uint32_t len) +{ + if (!get_bus_reg_dump) + return; + + get_bus_reg_dump(dev, buf, len); +} + +qdf_export_symbol(qdf_get_bus_reg_dump); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_str.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_str.c new file mode 100644 index 0000000000000000000000000000000000000000..f610397d6babca0a32cbf6deb8d3a71e038b6292 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_str.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_str.h" +#include "qdf_trace.h" + +QDF_STATUS qdf_str_dup(char **dest, const char *src) +{ + qdf_size_t size; + char *dup; + + *dest = NULL; + + QDF_BUG(src); + if (!src) + return QDF_STATUS_E_INVAL; + + /* size = length + null-terminator */ + size = qdf_str_len(src) + 1; + dup = qdf_mem_malloc(size); + if (!dup) + return QDF_STATUS_E_NOMEM; + + qdf_mem_copy(dup, src, size); + *dest = dup; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_str_dup); + +void qdf_str_right_trim(char *str) +{ + char *end = str + qdf_str_len(str) - 1; + + while (end >= str && qdf_is_space(*end)) + end--; + + end[1] = '\0'; +} +qdf_export_symbol(qdf_str_right_trim); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_talloc.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_talloc.c new file mode 100644 index 0000000000000000000000000000000000000000..22cd11062368b19d66b222faecd44f745ee4b338 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_talloc.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_talloc.c + * + * OS-independent talloc implementation + */ + +#ifdef WLAN_TALLOC_DEBUG + +#include "i_qdf_talloc.h" +#include "qdf_hashtable.h" +#include "qdf_list.h" +#include "qdf_mc_timer.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_talloc.h" + +#define QDF_TALLOC_MAX_BYTES __page_size +#define QDF_TALLOC_SLEEP_TIMEOUT_MS 300 +#define QDF_TALLOC_FUNC_NAME_SIZE 48 +#define QDF_TALLOC_HT_BITS 8 /* 256 buckets */ + +static void +__qdf_talloc_log_nomem(const size_t size, const char *func, const uint16_t line) +{ + qdf_nofl_info("Failed to alloc %zuB; via %s():%d", size, func, line); +} + +static void * +__qdf_zalloc_auto(const size_t size, const char *func, const uint16_t line) +{ + unsigned long start, duration; + void *ptr; + + start = qdf_mc_timer_get_system_time(); + ptr = __zalloc_auto(size); + duration = qdf_mc_timer_get_system_time() - start; + + if (duration > QDF_TALLOC_SLEEP_TIMEOUT_MS) + qdf_nofl_info("Alloc slept; %lums, %zuB; via %s():%d", + duration, size, func, line); + + if (!ptr) { + __qdf_talloc_log_nomem(size, func, line); + return NULL; + } + + qdf_mem_kmalloc_inc(__alloc_size(ptr)); + + return ptr; +} + +static void * +__qdf_zalloc_atomic(const size_t size, const char *func, const uint16_t line) +{ + void *ptr; + + ptr = __zalloc_atomic(size); + if (!ptr) { + __qdf_talloc_log_nomem(size, func, line); + return NULL; + } + + qdf_mem_kmalloc_inc(__alloc_size(ptr)); + + return ptr; +} + +static void __qdf_free(const void *ptr) +{ + qdf_mem_kmalloc_dec(__alloc_size(ptr)); + + __free(ptr); +} + +static qdf_ht_declare(__qdf_talloc_meta_ht, QDF_TALLOC_HT_BITS); +static qdf_spinlock_t __qdf_talloc_meta_lock; + +/** + * struct qdf_talloc_parent_meta - parent/children metadata for memory tracking + * @entry: entry for membership in the parent hashtable + * @children: list of associated children + */ +struct qdf_talloc_parent_meta { + struct qdf_ht_entry entry; + uintptr_t key; + qdf_list_t children; +}; + +static struct qdf_talloc_parent_meta * +qdf_talloc_parent_meta_alloc(const void *parent, + const char *func, const uint16_t line) +{ + struct qdf_talloc_parent_meta *pmeta; + + QDF_BUG(qdf_spin_is_locked(&__qdf_talloc_meta_lock)); + + pmeta = __qdf_zalloc_atomic(sizeof(*pmeta), func, line); + if (!pmeta) + return NULL; + + pmeta->key = (uintptr_t)parent; + qdf_list_create(&pmeta->children, 0); + qdf_ht_add(__qdf_talloc_meta_ht, &pmeta->entry, pmeta->key); + + return pmeta; +} + +static void qdf_talloc_parent_meta_free(struct qdf_talloc_parent_meta *pmeta) +{ + QDF_BUG(qdf_spin_is_locked(&__qdf_talloc_meta_lock)); + + qdf_ht_remove(&pmeta->entry); + qdf_list_destroy(&pmeta->children); + __free(pmeta); +} + +static struct qdf_talloc_parent_meta * +qdf_talloc_parent_meta_lookup(const void *parent) +{ + struct qdf_talloc_parent_meta *pmeta; + uintptr_t key = (uintptr_t)parent; + + QDF_BUG(qdf_spin_is_locked(&__qdf_talloc_meta_lock)); + + qdf_ht_get(__qdf_talloc_meta_ht, pmeta, entry, key, key); + + return pmeta; +} + +/** + * struct qdf_talloc_child_meta - talloc child debug information + * @parent: parent pointer used during allocation for leak tracking + * @node: list node for membership in @parent's children list + * @func: name of the function that requested the allocation + * @line: line number of the call site in @func + * @size: size of the allocation in bytes + */ +struct qdf_talloc_child_meta { + const void *parent; + qdf_list_node_t node; + char func[QDF_TALLOC_FUNC_NAME_SIZE]; + uint16_t line; + uint32_t size; + uint32_t guard; +}; + +/** + * struct qdf_talloc_header - talloc debug header information + * @meta: child allocation metadata + * @guard: a known value, used to detect out-of-bounds access + */ +struct qdf_talloc_header { + struct qdf_talloc_child_meta meta; + uint32_t guard; +}; + +/** + * struct qdf_talloc_trailer - talloc debug trailer information + * @guard: a known value, used to detect out-of-bounds access + */ +struct qdf_talloc_trailer { + uint32_t guard; +}; + +static uint32_t QDF_TALLOC_GUARD = 0xaabbeeff; + +#define QDF_TALLOC_DEBUG_SIZE \ + (sizeof(struct qdf_talloc_header) + sizeof(struct qdf_talloc_trailer)) + +static struct qdf_talloc_header *qdf_talloc_header(void *ptr) +{ + return (struct qdf_talloc_header *)ptr - 1; +} + +static void *qdf_talloc_ptr(struct qdf_talloc_header *header) +{ + return header + 1; +} + +static struct qdf_talloc_trailer * +qdf_talloc_trailer(struct qdf_talloc_header *header) +{ + void *ptr = qdf_talloc_ptr(header); + size_t size = header->meta.size; + + return (struct qdf_talloc_trailer *)((uint8_t *)ptr + size); +} + +static void qdf_talloc_meta_init(struct qdf_talloc_header *header, + const void *parent, const size_t size, + const char *func, const uint16_t line) +{ + struct qdf_talloc_trailer *trailer; + + /* copy the function name to support multi-*.ko configurations */ + qdf_str_lcopy(header->meta.func, func, sizeof(header->meta.func)); + header->meta.parent = parent; + header->meta.line = line; + header->meta.size = size; + header->guard = QDF_TALLOC_GUARD; + + trailer = qdf_talloc_trailer(header); + trailer->guard = QDF_TALLOC_GUARD; +} + +static bool qdf_talloc_meta_assert_valid(struct qdf_talloc_header *header, + const char *func, const uint16_t line) +{ + struct qdf_talloc_trailer *trailer = qdf_talloc_trailer(header); + bool is_valid = true; + + if (header->guard != QDF_TALLOC_GUARD) { + qdf_nofl_alert("Corrupted header guard 0x%x (expected 0x%x)", + header->guard, QDF_TALLOC_GUARD); + is_valid = false; + } + + if (header->meta.size > QDF_TALLOC_MAX_BYTES) { + qdf_nofl_alert("Corrupted allocation size %u (expected <= %zu)", + header->meta.size, QDF_TALLOC_MAX_BYTES); + is_valid = false; + } + + if (!qdf_list_node_in_any_list(&header->meta.node)) { + qdf_nofl_alert("Corrupted header node or double free"); + is_valid = false; + } + + if (trailer->guard != QDF_TALLOC_GUARD) { + qdf_nofl_alert("Corrupted trailer guard 0x%x (expected 0x%x)", + trailer->guard, QDF_TALLOC_GUARD); + is_valid = false; + } + + if (!is_valid) + QDF_DEBUG_PANIC("Fatal memory error detected @ %s():%d", + func, line); + + return is_valid; +} + +static void qdf_leaks_print_header(void) +{ + qdf_nofl_alert("-----------------------------------------------------"); + qdf_nofl_alert(" size function():line"); + qdf_nofl_alert("-----------------------------------------------------"); +} + +static uint32_t qdf_leaks_print(const struct qdf_talloc_parent_meta *pmeta) +{ + struct qdf_talloc_child_meta *cmeta; + uint32_t count = 0; + + qdf_list_for_each(&pmeta->children, cmeta, node) { + qdf_nofl_alert("%6uB @ %s():%u", + cmeta->size, cmeta->func, cmeta->line); + count++; + } + + return count; +} + +#define qdf_leaks_panic(count, func, line) \ + QDF_DEBUG_PANIC("%u fatal memory leaks detected @ %s():%u", \ + count, func, line) + +QDF_STATUS qdf_talloc_feature_init(void) +{ + qdf_spinlock_create(&__qdf_talloc_meta_lock); + qdf_ht_init(__qdf_talloc_meta_ht); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_talloc_feature_init); + +void qdf_talloc_feature_deinit(void) +{ + qdf_spin_lock_bh(&__qdf_talloc_meta_lock); + + if (!qdf_ht_empty(__qdf_talloc_meta_ht)) { + struct qdf_talloc_parent_meta *pmeta; + uint32_t count = 0; + int i; + + qdf_leaks_print_header(); + + qdf_ht_for_each(__qdf_talloc_meta_ht, i, pmeta, entry) + count += qdf_leaks_print(pmeta); + + qdf_leaks_panic(count, __func__, __LINE__); + } + + qdf_spin_unlock_bh(&__qdf_talloc_meta_lock); + + qdf_ht_deinit(__qdf_talloc_meta_ht); + qdf_spinlock_destroy(&__qdf_talloc_meta_lock); +} +qdf_export_symbol(qdf_talloc_feature_deinit); + +static QDF_STATUS qdf_talloc_meta_insert(struct qdf_talloc_header *header, + const char *func, const uint16_t line) +{ + struct qdf_talloc_child_meta *cmeta = &header->meta; + struct qdf_talloc_parent_meta *pmeta; + + QDF_BUG(qdf_spin_is_locked(&__qdf_talloc_meta_lock)); + + pmeta = qdf_talloc_parent_meta_lookup(cmeta->parent); + if (!pmeta) + pmeta = qdf_talloc_parent_meta_alloc(cmeta->parent, func, line); + if (!pmeta) + return QDF_STATUS_E_NOMEM; + + qdf_list_insert_back(&pmeta->children, &cmeta->node); + + return QDF_STATUS_SUCCESS; +} + +void *__qdf_talloc_fl(const void *parent, const size_t size, + const char *func, const uint16_t line) +{ + QDF_STATUS status; + struct qdf_talloc_header *header; + + QDF_BUG(parent); + if (!parent) + return NULL; + + QDF_BUG(size <= QDF_TALLOC_MAX_BYTES); + if (size > QDF_TALLOC_MAX_BYTES) + return NULL; + + header = __qdf_zalloc_auto(size + QDF_TALLOC_DEBUG_SIZE, func, line); + if (!header) + return NULL; + + qdf_talloc_meta_init(header, parent, size, func, line); + + qdf_spin_lock_bh(&__qdf_talloc_meta_lock); + status = qdf_talloc_meta_insert(header, func, line); + qdf_spin_unlock_bh(&__qdf_talloc_meta_lock); + + if (QDF_IS_STATUS_ERROR(status)) { + __qdf_free(header); + return NULL; + } + + return qdf_talloc_ptr(header); +} +qdf_export_symbol(__qdf_talloc_fl); + +static void +__qdf_talloc_assert_no_children(const void *parent, + const char *func, const uint16_t line) +{ + struct qdf_talloc_parent_meta *pmeta; + uint32_t count; + + QDF_BUG(qdf_spin_is_locked(&__qdf_talloc_meta_lock)); + + pmeta = qdf_talloc_parent_meta_lookup(parent); + if (!pmeta) + return; + + qdf_leaks_print_header(); + count = qdf_leaks_print(pmeta); + qdf_leaks_panic(count, func, line); +} + +static void qdf_talloc_meta_remove(struct qdf_talloc_header *header, + const char *func, const uint16_t line) +{ + struct qdf_talloc_child_meta *cmeta = &header->meta; + struct qdf_talloc_parent_meta *pmeta; + + QDF_BUG(qdf_spin_is_locked(&__qdf_talloc_meta_lock)); + + __qdf_talloc_assert_no_children(qdf_talloc_ptr(header), func, line); + + pmeta = qdf_talloc_parent_meta_lookup(cmeta->parent); + if (!pmeta) { + QDF_DEBUG_PANIC("double-free or free-no-allocate @ %s():%u", + func, line); + return; + } + + qdf_list_remove_node(&pmeta->children, &cmeta->node); + + if (qdf_list_empty(&pmeta->children)) + qdf_talloc_parent_meta_free(pmeta); +} + +void __qdf_tfree_fl(void *ptr, const char *func, const uint16_t line) +{ + struct qdf_talloc_header *header; + + QDF_BUG(ptr); + if (!ptr) + return; + + header = qdf_talloc_header(ptr); + qdf_talloc_meta_assert_valid(header, func, line); + + qdf_spin_lock_bh(&__qdf_talloc_meta_lock); + qdf_talloc_meta_remove(header, func, line); + qdf_spin_unlock_bh(&__qdf_talloc_meta_lock); + + __qdf_free(header); +} +qdf_export_symbol(__qdf_tfree_fl); + +void qdf_talloc_assert_no_children_fl(const void *parent, + const char *func, const uint16_t line) +{ + qdf_spin_lock_bh(&__qdf_talloc_meta_lock); + __qdf_talloc_assert_no_children(parent, func, line); + qdf_spin_unlock_bh(&__qdf_talloc_meta_lock); +} +qdf_export_symbol(qdf_talloc_assert_no_children_fl); + +#endif /* WLAN_TALLOC_DEBUG */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_tracker.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_tracker.c new file mode 100644 index 0000000000000000000000000000000000000000..2061a4b5537ababfae7434b1b75ae2478246a442 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_tracker.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_debug_domain.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_ptr_hash.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_tracker.h" +#include "qdf_types.h" + +struct qdf_tracker_node { + struct qdf_ptr_hash_entry entry; + enum qdf_debug_domain domain; + char func[QDF_TRACKER_FUNC_SIZE]; + uint32_t line; +}; + +void qdf_tracker_init(struct qdf_tracker *tracker) +{ + qdf_spinlock_create(&tracker->lock); + qdf_ptr_hash_init(tracker->ht); +} +qdf_export_symbol(qdf_tracker_init); + +void qdf_tracker_deinit(struct qdf_tracker *tracker) +{ + qdf_tracker_check_for_leaks(tracker); + + qdf_spin_lock_bh(&tracker->lock); + QDF_BUG(qdf_ptr_hash_empty(tracker->ht)); + qdf_spin_unlock_bh(&tracker->lock); + + qdf_ptr_hash_deinit(tracker->ht); + qdf_spinlock_destroy(&tracker->lock); +} +qdf_export_symbol(qdf_tracker_deinit); + +static inline void qdf_tracker_print_break(void) +{ + qdf_nofl_alert("-----------------------------------------------------"); +} + +static uint32_t qdf_tracker_leaks_print(struct qdf_tracker *tracker, + enum qdf_debug_domain domain) +{ + struct qdf_ptr_hash_bucket *bucket; + struct qdf_tracker_node *node; + bool print_header = true; + uint32_t count = 0; + + QDF_BUG(qdf_spin_is_locked(&tracker->lock)); + + qdf_ptr_hash_for_each(tracker->ht, bucket, node, entry) { + if (node->domain != domain) + continue; + + if (print_header) { + print_header = false; + qdf_nofl_alert("%s detected in %s domain!", + tracker->leak_title, + qdf_debug_domain_name(domain)); + qdf_tracker_print_break(); + } + + count++; + qdf_nofl_alert("0x%lx @ %s:%u", node->entry.key, + node->func, node->line); + } + + if (count) + qdf_tracker_print_break(); + + return count; +} + +void qdf_tracker_check_for_leaks(struct qdf_tracker *tracker) +{ + enum qdf_debug_domain domain = qdf_debug_domain_get(); + uint32_t leaks; + + qdf_spin_lock_bh(&tracker->lock); + leaks = qdf_tracker_leaks_print(tracker, domain); + if (leaks) + QDF_DEBUG_PANIC("%u fatal %s detected in %s domain!", + leaks, tracker->leak_title, + qdf_debug_domain_name(domain)); + qdf_spin_unlock_bh(&tracker->lock); +} +qdf_export_symbol(qdf_tracker_check_for_leaks); + +QDF_STATUS qdf_tracker_track(struct qdf_tracker *tracker, void *ptr, + const char *func, uint32_t line) +{ + struct qdf_tracker_node *node; + + QDF_BUG(ptr); + if (!ptr) + return QDF_STATUS_E_INVAL; + + qdf_spin_lock_bh(&tracker->lock); + node = qdf_ptr_hash_get(tracker->ht, ptr, node, entry); + if (node) + QDF_DEBUG_PANIC("Double %s (via %s:%u); last %s from %s:%u", + tracker->track_title, func, line, + tracker->track_title, node->func, node->line); + qdf_spin_unlock_bh(&tracker->lock); + + if (node) + return QDF_STATUS_E_ALREADY; + + node = qdf_mem_malloc(sizeof(*node)); + if (!node) + return QDF_STATUS_E_NOMEM; + + node->domain = qdf_debug_domain_get(); + qdf_str_lcopy(node->func, func, QDF_TRACKER_FUNC_SIZE); + node->line = line; + + qdf_spin_lock_bh(&tracker->lock); + qdf_ptr_hash_add(tracker->ht, ptr, node, entry); + qdf_spin_unlock_bh(&tracker->lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_tracker_track); + +void qdf_tracker_untrack(struct qdf_tracker *tracker, void *ptr, + const char *func, uint32_t line) +{ + enum qdf_debug_domain domain = qdf_debug_domain_get(); + struct qdf_tracker_node *node; + + QDF_BUG(ptr); + if (!ptr) + return; + + qdf_spin_lock_bh(&tracker->lock); + node = qdf_ptr_hash_remove(tracker->ht, ptr, node, entry); + if (!node) + QDF_DEBUG_PANIC("Double %s (via %s:%u)", + tracker->untrack_title, func, line); + else if (node->domain != domain) + QDF_DEBUG_PANIC("%s domain mismatch; tracked:%s, %s:%u; untracked:%s , %s:%u", + tracker->untrack_title, + qdf_debug_domain_name(node->domain), + node->func, node->line, + qdf_debug_domain_name(domain), + func, line); + qdf_spin_unlock_bh(&tracker->lock); + + if (node) + qdf_mem_free(node); +} +qdf_export_symbol(qdf_tracker_untrack); + +bool qdf_tracker_lookup(struct qdf_tracker *tracker, void *ptr, + char (*out_func)[QDF_TRACKER_FUNC_SIZE], + uint32_t *out_line) +{ + struct qdf_tracker_node *node; + + qdf_spin_lock_bh(&tracker->lock); + node = qdf_ptr_hash_get(tracker->ht, ptr, node, entry); + if (node) { + qdf_str_lcopy((char *)out_func, node->func, + QDF_TRACKER_FUNC_SIZE); + *out_line = node->line; + } + qdf_spin_unlock_bh(&tracker->lock); + + return !!node; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_types.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_types.c new file mode 100644 index 0000000000000000000000000000000000000000..86325250d25e8bd533676dfefd36316a358d7c30 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_types.c @@ -0,0 +1,765 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +const char *qdf_opmode_str(const enum QDF_OPMODE opmode) +{ + switch (opmode) { + case QDF_STA_MODE: + return "STA"; + case QDF_SAP_MODE: + return "SAP"; + case QDF_P2P_CLIENT_MODE: + return "P2P Client"; + case QDF_P2P_GO_MODE: + return "P2P GO"; + case QDF_FTM_MODE: + return "FTM"; + case QDF_IBSS_MODE: + return "IBSS"; + case QDF_MONITOR_MODE: + return "Monitor"; + case QDF_P2P_DEVICE_MODE: + return "P2P Device"; + case QDF_OCB_MODE: + return "OCB"; + case QDF_EPPING_MODE: + return "EPPing"; + case QDF_QVIT_MODE: + return "QVIT"; + case QDF_NDI_MODE: + return "NDI"; + case QDF_WDS_MODE: + return "WDS"; + case QDF_BTAMP_MODE: + return "BTAMP"; + case QDF_AHDEMO_MODE: + return "AHDEMO"; + case QDF_TDLS_MODE: + return "TDLS"; + case QDF_NAN_DISC_MODE: + return "NAN"; + default: + return "Invalid operating mode"; + } +} + +static QDF_STATUS qdf_consume_char(const char **str, char c) +{ + if ((*str)[0] != c) + return QDF_STATUS_E_FAILURE; + + (*str)++; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_dec(const char **str, uint8_t *out_digit) +{ + uint8_t c = (*str)[0]; + + if (c >= '0' && c <= '9') + *out_digit = c - '0'; + else + return QDF_STATUS_E_FAILURE; + + (*str)++; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_hex(const char **str, uint8_t *out_nibble) +{ + uint8_t c = (*str)[0]; + + if (c >= '0' && c <= '9') + *out_nibble = c - '0'; + else if (c >= 'a' && c <= 'f') + *out_nibble = c - 'a' + 10; + else if (c >= 'A' && c <= 'F') + *out_nibble = c - 'A' + 10; + else + return QDF_STATUS_E_FAILURE; + + (*str)++; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_octet_dec(const char **str, uint8_t *out_octet) +{ + uint8_t len = 0; + uint16_t octet = 0; + int i; + + /* consume up to 3 decimal digits */ + for (i = 0; i < 3; i++) { + uint8_t digit; + + if (QDF_IS_STATUS_ERROR(qdf_consume_dec(str, &digit))) + break; + + len++; + octet = octet * 10 + digit; + } + + /* require at least 1 digit */ + if (!len) + return QDF_STATUS_E_FAILURE; + + if (octet > 255) { + (*str) -= len; + return QDF_STATUS_E_FAILURE; + } + + *out_octet = octet; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_hex_pair(const char **str, uint8_t *out_byte) +{ + QDF_STATUS status; + uint8_t hi, low; + + status = qdf_consume_hex(str, &hi); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = qdf_consume_hex(str, &low); + if (QDF_IS_STATUS_ERROR(status)) { + (*str)--; + return status; + } + + *out_byte = hi << 4 | low; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_hextet(const char **str, uint16_t *out_hextet) +{ + uint8_t len = 0; + uint16_t hextet = 0; + int i; + + /* consume up to 4 hex digits */ + for (i = 0; i < 4; i++) { + uint8_t digit; + + if (QDF_IS_STATUS_ERROR(qdf_consume_hex(str, &digit))) + break; + + len++; + hextet = (hextet << 4) + digit; + } + + /* require at least 1 digit */ + if (!len) + return QDF_STATUS_E_FAILURE; + + /* no need to check for overflow */ + + *out_hextet = hextet; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_radix(const char **str, uint8_t *out_radix) +{ + if ((*str)[0] == '0') { + switch ((*str)[1]) { + case 'b': + *out_radix = 2; + *str += 2; + break; + case 'o': + *out_radix = 8; + *str += 2; + break; + case 'x': + *out_radix = 16; + *str += 2; + break; + default: + *out_radix = 10; + break; + } + + return QDF_STATUS_SUCCESS; + } + + if (*str[0] >= '0' && *str[0] <= '9') { + *out_radix = 10; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS +__qdf_int_parse_lazy(const char **int_str, uint64_t *out_int, bool *out_negate) +{ + QDF_STATUS status; + bool negate = false; + uint8_t radix; + uint8_t digit; + uint64_t value = 0; + uint64_t next_value; + const char *str = *int_str; + + str = qdf_str_left_trim(str); + + status = qdf_consume_char(&str, '-'); + if (QDF_IS_STATUS_SUCCESS(status)) + negate = true; + else + qdf_consume_char(&str, '+'); + + status = qdf_consume_radix(&str, &radix); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + while (QDF_IS_STATUS_SUCCESS(qdf_consume_hex(&str, &digit))) { + if (digit >= radix) + return QDF_STATUS_E_FAILURE; + + next_value = value * radix + digit; + if (next_value < value) + return QDF_STATUS_E_RANGE; + + value = next_value; + } + + *int_str = str; + *out_negate = negate; + *out_int = value; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +qdf_int_parse(const char *int_str, uint64_t *out_int, bool *out_negate) +{ + QDF_STATUS status; + bool negate; + uint64_t value; + + QDF_BUG(int_str); + if (!int_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_int); + if (!out_int) + return QDF_STATUS_E_INVAL; + + status = __qdf_int_parse_lazy(&int_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + int_str = qdf_str_left_trim(int_str); + if (int_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_negate = negate; + *out_int = value; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS qdf_int32_parse(const char *int_str, int32_t *out_int) +{ + QDF_STATUS status; + int64_t value; + + status = qdf_int64_parse(int_str, &value); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((int32_t)value != value) + return QDF_STATUS_E_RANGE; + + *out_int = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_int32_parse); + +QDF_STATUS qdf_uint32_parse(const char *int_str, uint32_t *out_int) +{ + QDF_STATUS status; + uint64_t value; + + status = qdf_uint64_parse(int_str, &value); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((uint32_t)value != value) + return QDF_STATUS_E_RANGE; + + *out_int = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_uint32_parse); + +QDF_STATUS qdf_int64_parse(const char *int_str, int64_t *out_int) +{ + QDF_STATUS status; + bool negate; + uint64_t value; + int64_t signed_value; + + status = qdf_int_parse(int_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if (negate) { + signed_value = -value; + if (signed_value > 0) + return QDF_STATUS_E_RANGE; + } else { + signed_value = value; + if (signed_value < 0) + return QDF_STATUS_E_RANGE; + } + + *out_int = signed_value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_int64_parse); + +QDF_STATUS qdf_uint64_parse(const char *int_str, uint64_t *out_int) +{ + QDF_STATUS status; + bool negate; + uint64_t value; + + status = qdf_int_parse(int_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if (negate) + return QDF_STATUS_E_RANGE; + + *out_int = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_uint64_parse); + +QDF_STATUS qdf_bool_parse(const char *bool_str, bool *out_bool) +{ + bool value; + + QDF_BUG(bool_str); + if (!bool_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_bool); + if (!out_bool) + return QDF_STATUS_E_INVAL; + + bool_str = qdf_str_left_trim(bool_str); + + switch (bool_str[0]) { + case '1': + case 'y': + case 'Y': + value = true; + break; + case '0': + case 'n': + case 'N': + value = false; + break; + default: + return QDF_STATUS_E_FAILURE; + } + + bool_str++; + bool_str = qdf_str_left_trim(bool_str); + if (bool_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_bool = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_bool_parse); + +QDF_STATUS qdf_mac_parse(const char *mac_str, struct qdf_mac_addr *out_addr) +{ + QDF_STATUS status; + struct qdf_mac_addr addr; + bool colons; + int i; + + QDF_BUG(mac_str); + if (!mac_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_addr); + if (!out_addr) + return QDF_STATUS_E_INVAL; + + mac_str = qdf_str_left_trim(mac_str); + + /* parse leading hex pair */ + status = qdf_consume_hex_pair(&mac_str, &addr.bytes[0]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + /* dynamically detect colons */ + colons = mac_str[0] == ':'; + + for (i = 1; i < QDF_MAC_ADDR_SIZE; i++) { + /* ensure colon separator if previously detected */ + if (colons) { + status = qdf_consume_char(&mac_str, ':'); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + /* parse next hex pair */ + status = qdf_consume_hex_pair(&mac_str, &addr.bytes[i]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + mac_str = qdf_str_left_trim(mac_str); + if (mac_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_addr = addr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mac_parse); + +QDF_STATUS qdf_ipv4_parse(const char *ipv4_str, struct qdf_ipv4_addr *out_addr) +{ + QDF_STATUS status; + struct qdf_ipv4_addr addr; + int i; + + QDF_BUG(ipv4_str); + if (!ipv4_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_addr); + if (!out_addr) + return QDF_STATUS_E_INVAL; + + ipv4_str = qdf_str_left_trim(ipv4_str); + + /* parse leading octet */ + status = qdf_consume_octet_dec(&ipv4_str, &addr.bytes[0]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + for (i = 1; i < QDF_IPV4_ADDR_SIZE; i++) { + /* ensure dot separator */ + status = qdf_consume_char(&ipv4_str, '.'); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + /* parse next octet */ + status = qdf_consume_octet_dec(&ipv4_str, &addr.bytes[i]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + ipv4_str = qdf_str_left_trim(ipv4_str); + if (ipv4_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_addr = addr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_ipv4_parse); + +static inline void qdf_ipv6_apply_zero_comp(struct qdf_ipv6_addr *addr, + uint8_t hextets, + uint8_t zero_comp_index) +{ + /* Given the following hypothetical ipv6 address: + * |---------------------------------------| + * | 01 | ab | cd | ef | | | | | + * |---------------------------------------| + * ^--- zero_comp_index (2) + * from -----^ + * to ---------------------------^ + * | hextets (4) | + * | zero comp size | + * | to move | + * + * We need to apply the zero compression such that we get: + * |---------------------------------------| + * | 01 | ab | 00 | 00 | 00 | 00 | cd | ef | + * |---------------------------------------| + * | zero comp | + * | moved | + */ + + size_t zero_comp_size = (QDF_IPV6_ADDR_HEXTET_COUNT - hextets) * 2; + size_t bytes_to_move = (hextets - zero_comp_index) * 2; + uint8_t *from = &addr->bytes[zero_comp_index * 2]; + uint8_t *to = from + zero_comp_size; + + if (bytes_to_move) + qdf_mem_move(to, from, bytes_to_move); + + qdf_mem_zero(from, to - from); +} + +QDF_STATUS qdf_ipv6_parse(const char *ipv6_str, struct qdf_ipv6_addr *out_addr) +{ + QDF_STATUS status; + struct qdf_ipv6_addr addr; + int8_t zero_comp = -1; + uint8_t hextets_found = 0; + + QDF_BUG(ipv6_str); + if (!ipv6_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_addr); + if (!out_addr) + return QDF_STATUS_E_INVAL; + + ipv6_str = qdf_str_left_trim(ipv6_str); + + /* check for leading zero-compression ("::") */ + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_SUCCESS(status)) { + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_SUCCESS(status)) + zero_comp = 0; + else + return QDF_STATUS_E_FAILURE; + } + + while (hextets_found < QDF_IPV6_ADDR_HEXTET_COUNT) { + uint16_t hextet; + + /* parse hextet */ + status = qdf_consume_hextet(&ipv6_str, &hextet); + if (QDF_IS_STATUS_ERROR(status)) { + /* we must end with hextet or zero compression */ + if (hextets_found != zero_comp) + return QDF_STATUS_E_FAILURE; + + break; + } + + addr.bytes[hextets_found * 2] = hextet >> 8; + addr.bytes[hextets_found * 2 + 1] = hextet; + hextets_found++; + + /* parse ':' char */ + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_ERROR(status)) + break; + + /* check for zero compression ("::") */ + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_SUCCESS(status)) { + /* only one zero compression is allowed */ + if (zero_comp >= 0) + return QDF_STATUS_E_FAILURE; + + zero_comp = hextets_found; + } + } + + ipv6_str = qdf_str_left_trim(ipv6_str); + if (ipv6_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + /* we must have max hextets or a zero compression, but not both */ + if (hextets_found < QDF_IPV6_ADDR_HEXTET_COUNT) { + if (zero_comp < 0) + return QDF_STATUS_E_FAILURE; + + qdf_ipv6_apply_zero_comp(&addr, hextets_found, zero_comp); + } else if (zero_comp > -1) { + return QDF_STATUS_E_FAILURE; + } + + *out_addr = addr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_ipv6_parse); + +QDF_STATUS qdf_uint32_array_parse(const char *in_str, uint32_t *out_array, + qdf_size_t array_size, qdf_size_t *out_size) +{ + QDF_STATUS status; + bool negate; + qdf_size_t size = 0; + uint64_t value; + + QDF_BUG(in_str); + if (!in_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_array); + if (!out_array) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_size); + if (!out_size) + return QDF_STATUS_E_INVAL; + + while (size < array_size) { + status = __qdf_int_parse_lazy(&in_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((uint32_t)value != value || negate) + return QDF_STATUS_E_RANGE; + + in_str = qdf_str_left_trim(in_str); + + switch (in_str[0]) { + case ',': + out_array[size++] = value; + in_str++; + break; + case '\0': + out_array[size++] = value; + *out_size = size; + return QDF_STATUS_SUCCESS; + default: + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(qdf_uint32_array_parse); + +QDF_STATUS qdf_uint16_array_parse(const char *in_str, uint16_t *out_array, + qdf_size_t array_size, qdf_size_t *out_size) +{ + QDF_STATUS status; + bool negate; + qdf_size_t size = 0; + uint64_t value; + + QDF_BUG(in_str); + if (!in_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_array); + if (!out_array) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_size); + if (!out_size) + return QDF_STATUS_E_INVAL; + + while (size < array_size) { + status = __qdf_int_parse_lazy(&in_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((uint16_t)value != value || negate) + return QDF_STATUS_E_RANGE; + + in_str = qdf_str_left_trim(in_str); + + switch (in_str[0]) { + case ',': + out_array[size++] = value; + in_str++; + break; + case '\0': + out_array[size++] = value; + *out_size = size; + return QDF_STATUS_SUCCESS; + default: + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(qdf_uint16_array_parse); + +QDF_STATUS qdf_uint8_array_parse(const char *in_str, uint8_t *out_array, + qdf_size_t array_size, qdf_size_t *out_size) +{ + QDF_STATUS status; + bool negate; + qdf_size_t size = 0; + uint64_t value; + + QDF_BUG(in_str); + if (!in_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_array); + if (!out_array) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_size); + if (!out_size) + return QDF_STATUS_E_INVAL; + + while (size < array_size) { + status = __qdf_int_parse_lazy(&in_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((uint8_t)value != value || negate) + return QDF_STATUS_E_RANGE; + + in_str = qdf_str_left_trim(in_str); + + switch (in_str[0]) { + case ',': + out_array[size++] = value; + in_str++; + break; + case '\0': + out_array[size++] = value; + *out_size = size; + return QDF_STATUS_SUCCESS; + default: + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(qdf_uint8_array_parse); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_delayed_work_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_delayed_work_test.c new file mode 100644 index 0000000000000000000000000000000000000000..7b56827ecde25132be954203a1ba0c86856f7125 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_delayed_work_test.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_delayed_work.h" +#include "qdf_delayed_work_test.h" +#include "qdf_trace.h" + +#define dwork_delay_ms 1 + +static void __qdf_dwork_cb(void *context) +{ + bool *flag = context; + + *flag = true; +} + +uint32_t qdf_delayed_work_unit_test(void) +{ + struct qdf_delayed_work dwork; + bool work_ran = false; + QDF_STATUS status; + + status = qdf_delayed_work_create(&dwork, __qdf_dwork_cb, &work_ran); + QDF_BUG(QDF_IS_STATUS_SUCCESS(status)); + + QDF_BUG(qdf_delayed_work_start(&dwork, dwork_delay_ms)); + + while (!work_ran) + schedule(); + + /* flush to avoid races with the next assert */ + qdf_delayed_work_stop_sync(&dwork); + QDF_BUG(!qdf_delayed_work_stop_sync(&dwork)); + + qdf_delayed_work_destroy(&dwork); + + return 0; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_delayed_work_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_delayed_work_test.h new file mode 100644 index 0000000000000000000000000000000000000000..9fa78a4238062472004aa4dffea2146036bc43d4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_delayed_work_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_DELAYED_WORK_TEST +#define __QDF_DELAYED_WORK_TEST + +#ifdef WLAN_DELAYED_WORK_TEST +/** + * qdf_delayed_work_unit_test() - run the qdf periodic work unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_delayed_work_unit_test(void); +#else +static inline uint32_t qdf_delayed_work_unit_test(void) +{ + return 0; +} +#endif /* WLAN_DELAYED_WORK_TEST */ + +#endif /* __QDF_DELAYED_WORK_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_hashtable_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_hashtable_test.c new file mode 100644 index 0000000000000000000000000000000000000000..1d1e66ea0e596ea08e44a8ec8593846bf7016c18 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_hashtable_test.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_hashtable.h" +#include "qdf_hashtable_test.h" +#include "qdf_trace.h" + +/* 16 buckets */ +#define QDF_HT_HASH_BITS 4 + +struct qdf_ht_test_item { + struct qdf_ht_entry entry; + uintptr_t key; +}; + +static uint32_t qdf_ht_test_single(void) +{ + const int bits = QDF_HT_HASH_BITS; + struct qdf_ht_test_item item = { .key = (uintptr_t)&bits }; + struct qdf_ht_test_item *cursor; + int i, count; + + qdf_ht_declare(ht, QDF_HT_HASH_BITS); + + qdf_ht_init(ht); + qdf_ht_add(ht, &item.entry, item.key); + + qdf_ht_get(ht, cursor, entry, item.key, key); + QDF_BUG(cursor); + QDF_BUG(cursor->key == item.key); + + count = 0; + qdf_ht_for_each(ht, i, cursor, entry) { + QDF_BUG(cursor->key == item.key); + count++; + } + QDF_BUG(count == 1); + + count = 0; + qdf_ht_for_each_in_bucket(ht, cursor, entry, item.key) { + QDF_BUG(cursor->key == item.key); + count++; + } + QDF_BUG(count == 1); + + count = 0; + qdf_ht_for_each_match(ht, cursor, entry, item.key, key) { + QDF_BUG(cursor->key == item.key); + count++; + } + QDF_BUG(count == 1); + + qdf_ht_remove(&item.entry); + + QDF_BUG(qdf_ht_empty(ht)); + + qdf_ht_deinit(ht); + + return 0; +} + +uint32_t qdf_ht_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_ht_test_single(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_hashtable_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_hashtable_test.h new file mode 100644 index 0000000000000000000000000000000000000000..90dfc9ea0c78e2f58d3ed613ff9a510fa09c83b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_hashtable_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_HASHTABLE_TEST +#define __QDF_HASHTABLE_TEST + +#ifdef WLAN_HASHTABLE_TEST +/** + * qdf_ht_unit_test() - run the qdf hashtable unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_ht_unit_test(void); +#else +static inline uint32_t qdf_ht_unit_test(void) +{ + return 0; +} +#endif /* WLAN_HASHTABLE_TEST */ + +#endif /* __QDF_HASHTABLE_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_periodic_work_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_periodic_work_test.c new file mode 100644 index 0000000000000000000000000000000000000000..d3f14554fb2ef116400bef54ece53ce675e86639 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_periodic_work_test.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_periodic_work.h" +#include "qdf_periodic_work_test.h" +#include "qdf_trace.h" + +#define pwork_iterations 2 +#define pwork_delay_ms 1 + +struct qdf_pwork_ut_ctx { + struct qdf_periodic_work pwork; + uint32_t count; +}; + +static void __qdf_pwork_inside_cb(void *context) +{ + struct qdf_pwork_ut_ctx *ut_ctx = context; + + /* stop before incrementing; the main thread is looking at @count */ + if (ut_ctx->count + 1 == pwork_iterations) + qdf_periodic_work_stop_async(&ut_ctx->pwork); + + ut_ctx->count++; +} + +static uint32_t qdf_pwork_stop_inside_cb(void) +{ + struct qdf_pwork_ut_ctx ut_ctx = { .count = 0 }; + QDF_STATUS status; + + status = qdf_periodic_work_create(&ut_ctx.pwork, + __qdf_pwork_inside_cb, &ut_ctx); + QDF_BUG(QDF_IS_STATUS_SUCCESS(status)); + + QDF_BUG(qdf_periodic_work_start(&ut_ctx.pwork, pwork_delay_ms)); + + while (ut_ctx.count < pwork_iterations) + schedule(); + + QDF_BUG(!qdf_periodic_work_stop_sync(&ut_ctx.pwork)); + QDF_BUG(ut_ctx.count == pwork_iterations); + + qdf_periodic_work_destroy(&ut_ctx.pwork); + + return 0; +} + +static void __qdf_pwork_outside_cb(void *context) +{ + struct qdf_pwork_ut_ctx *ut_ctx = context; + + ut_ctx->count++; +} + +static uint32_t qdf_pwork_stop_outside_cb(void) +{ + struct qdf_pwork_ut_ctx ut_ctx = { .count = 0 }; + QDF_STATUS status; + + status = qdf_periodic_work_create(&ut_ctx.pwork, + __qdf_pwork_outside_cb, &ut_ctx); + QDF_BUG(QDF_IS_STATUS_SUCCESS(status)); + + QDF_BUG(qdf_periodic_work_start(&ut_ctx.pwork, pwork_delay_ms)); + + while (ut_ctx.count < pwork_iterations) + schedule(); + + QDF_BUG(qdf_periodic_work_stop_sync(&ut_ctx.pwork)); + QDF_BUG(ut_ctx.count >= pwork_iterations); + + qdf_periodic_work_destroy(&ut_ctx.pwork); + + return 0; +} + +uint32_t qdf_periodic_work_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_pwork_stop_inside_cb(); + errors += qdf_pwork_stop_outside_cb(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_periodic_work_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_periodic_work_test.h new file mode 100644 index 0000000000000000000000000000000000000000..8a768614080a9474247c6d11467c9bd92110a97c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_periodic_work_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_PERIODIC_WORK_TEST +#define __QDF_PERIODIC_WORK_TEST + +#ifdef WLAN_PERIODIC_WORK_TEST +/** + * qdf_periodic_work_unit_test() - run the qdf periodic work unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_periodic_work_unit_test(void); +#else +static inline uint32_t qdf_periodic_work_unit_test(void) +{ + return 0; +} +#endif /* WLAN_PERIODIC_WORK_TEST */ + +#endif /* __QDF_PERIODIC_WORK_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_ptr_hash_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_ptr_hash_test.c new file mode 100644 index 0000000000000000000000000000000000000000..26c973e0cb8e313beace50979724070820265c9a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_ptr_hash_test.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_ptr_hash.h" +#include "qdf_ptr_hash_test.h" +#include "qdf_trace.h" + +#define qdf_ptr_hash_bits 4 /* 16 buckets */ +#define qdf_ptr_hash_entry_count 10 + +struct qdf_ptr_hash_test_item { + uint32_t id; + struct qdf_ptr_hash_entry entry; +}; + +static uint32_t __qdf_ptr_hash_test_empty(struct qdf_ptr_hash *ht) +{ + struct qdf_ptr_hash_test_item *item; + + /* a new ptr_hash should ... */ + + /* ... be empty */ + QDF_BUG(qdf_ptr_hash_empty(ht)); + + /* ... return NULL with get()'d */ + QDF_BUG(!qdf_ptr_hash_get(ht, NULL, item, entry)); + + return 0; +} + +static uint32_t qdf_ptr_hash_test_empty(void) +{ + qdf_ptr_hash_declare_ptr(ht, qdf_ptr_hash_bits); + int errors; + + qdf_ptr_hash_init(ht); + errors = __qdf_ptr_hash_test_empty(ht); + qdf_ptr_hash_deinit(ht); + + return errors; +} + +static uint32_t __qdf_ptr_hash_test_add_remove(struct qdf_ptr_hash *ht) +{ + struct qdf_ptr_hash_test_item items[qdf_ptr_hash_entry_count]; + struct qdf_ptr_hash_test_item *item; + int i; + + /* a ptr_hash with items should ... */ + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + items[i].id = i; + qdf_ptr_hash_add(ht, &items[i], &items[i], entry); + } + + /* ... not be empty */ + QDF_BUG(!qdf_ptr_hash_empty(ht)); + + /* ... be able to get() all items previously add()'d */ + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + QDF_BUG(qdf_ptr_hash_get(ht, &items[i], item, entry)); + QDF_BUG(item->id == items[i].id); + } + + /* ... be able to remove() all items previously add()'d */ + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + QDF_BUG(qdf_ptr_hash_remove(ht, &items[i], item, entry)); + QDF_BUG(item->id == items[i].id); + } + + /* ... be empty after remove()'ing all items */ + QDF_BUG(qdf_ptr_hash_empty(ht)); + + return 0; +} + +static uint32_t qdf_ptr_hash_test_add_remove(void) +{ + qdf_ptr_hash_declare_ptr(ht, qdf_ptr_hash_bits); + int errors; + + qdf_ptr_hash_init(ht); + errors = __qdf_ptr_hash_test_add_remove(ht); + qdf_ptr_hash_deinit(ht); + + return errors; +} + +static uint32_t __qdf_ptr_hash_test_for_each(struct qdf_ptr_hash *ht) +{ + struct qdf_ptr_hash_bucket *bucket; + struct qdf_ptr_hash_test_item items[qdf_ptr_hash_entry_count]; + struct qdf_ptr_hash_test_item *item; + int i; + int count; + + /* a ptr_hash with items should ... */ + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + items[i].id = i; + qdf_ptr_hash_add(ht, i, &items[i], entry); + } + + /* ... be able to iterate over each item */ + count = 0; + qdf_ptr_hash_for_each(ht, bucket, item, entry) { + QDF_BUG(item->id == items[item->id].id); + count++; + } + QDF_BUG(count == qdf_ptr_hash_entry_count); + + /* ... be able to interate by hash value */ + count = 0; + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + qdf_ptr_hash_for_each_by_hash(ht, i, item, entry) { + QDF_BUG(item->id == items[item->id].id); + count++; + } + } + QDF_BUG(count >= qdf_ptr_hash_entry_count); + + /* ... be able to interate by key value */ + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + count = 0; + qdf_ptr_hash_for_each_by_key(ht, i, item, entry) { + QDF_BUG(item->id == items[i].id); + count++; + } + QDF_BUG(count == 1); + } + + /* ... be able to remove each item */ + for (i = 0; i < qdf_ptr_hash_entry_count; i++) { + qdf_ptr_hash_remove(ht, i, item, entry); + QDF_BUG(item); + QDF_BUG(item->id == items[i].id); + } + + /* ... be empty after all items are removed */ + QDF_BUG(qdf_ptr_hash_empty(ht)); + + return 0; +} + +static uint32_t qdf_ptr_hash_test_for_each(void) +{ + qdf_ptr_hash_declare_ptr(ht, qdf_ptr_hash_bits); + int errors; + + qdf_ptr_hash_init(ht); + errors = __qdf_ptr_hash_test_for_each(ht); + qdf_ptr_hash_deinit(ht); + + return errors; +} + +static uint32_t qdf_ptr_hash_test_create_destroy(void) +{ + struct qdf_ptr_hash *ht = qdf_ptr_hash_create(qdf_ptr_hash_bits); + uint32_t errors = 0; + + QDF_BUG(ht); + errors += __qdf_ptr_hash_test_empty(ht); + errors += __qdf_ptr_hash_test_add_remove(ht); + errors += __qdf_ptr_hash_test_for_each(ht); + + qdf_ptr_hash_destroy(ht); + + return errors; +} + +uint32_t qdf_ptr_hash_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_ptr_hash_test_empty(); + errors += qdf_ptr_hash_test_add_remove(); + errors += qdf_ptr_hash_test_for_each(); + errors += qdf_ptr_hash_test_create_destroy(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_ptr_hash_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_ptr_hash_test.h new file mode 100644 index 0000000000000000000000000000000000000000..713e37d8d4e012b6148724c3ffea18bb42eaaa7c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_ptr_hash_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_PTR_HASH_TEST +#define __QDF_PTR_HASH_TEST + +#ifdef WLAN_PTR_HASH_TEST +/** + * qdf_ptr_hash_unit_test() - run the qdf hashtable unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_ptr_hash_unit_test(void); +#else +static inline uint32_t qdf_ptr_hash_unit_test(void) +{ + return 0; +} +#endif /* WLAN_PTR_HASH_TEST */ + +#endif /* __QDF_PTR_HASH_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_slist_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_slist_test.c new file mode 100644 index 0000000000000000000000000000000000000000..ca3e987446b458da87668990985741cf695ce709 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_slist_test.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_slist.h" +#include "qdf_slist_test.h" +#include "qdf_trace.h" + +struct qdf_slist_test_item { + uint32_t id; + struct qdf_slist_node node; +}; + +#define qdf_slist_node_count 10 + +static uint32_t qdf_slist_test_empty(void) +{ + struct qdf_slist list; + struct qdf_slist_test_item *item; + + /* a new list should ... */ + qdf_slist_init(&list); + + /* ... be empty */ + QDF_BUG(qdf_slist_empty(&list)); + + /* ... return NULL when pop()'d */ + QDF_BUG(!qdf_slist_pop(&list, item, node)); + + qdf_slist_deinit(&list); + + return 0; +} + +static uint32_t qdf_slist_test_push_pop(void) +{ + struct qdf_slist list; + struct qdf_slist_test_item items[qdf_slist_node_count]; + struct qdf_slist_test_item *item; + int i; + + qdf_slist_init(&list); + + /* a list with items should ... */ + for (i = 0; i < qdf_slist_node_count; i++) + qdf_slist_push(&list, &items[i], node); + + /* ... not be empty */ + QDF_BUG(!qdf_slist_empty(&list)); + + /* ... be able to pop() all items previously push()'d */ + for (i = 0; i < qdf_slist_node_count; i++) + QDF_BUG(qdf_slist_pop(&list, item, node)); + + /* ... be empty after pop()'ing all items */ + QDF_BUG(qdf_slist_empty(&list)); + + qdf_slist_deinit(&list); + + return 0; +} + +static uint32_t qdf_slist_test_for_each(void) +{ + struct qdf_slist list; + struct qdf_slist_test_item items[qdf_slist_node_count]; + struct qdf_slist_test_item *prev; + struct qdf_slist_test_item *item; + int i; + + qdf_slist_init(&list); + + /* a list with items should ... */ + for (i = 0; i < qdf_slist_node_count; i++) + qdf_slist_push(&list, &items[i], node); + + /* ... be able to iterate over each item */ + i = 0; + qdf_slist_for_each(&list, item, node) { + item->id = i++; + } + QDF_BUG(i == qdf_slist_node_count); + + /* ... be able to remove each item in the same order */ + i = 0; + qdf_slist_for_each_del(&list, prev, item, node) { + QDF_BUG(item); + QDF_BUG(item->id == i++); + QDF_BUG(qdf_slist_remove(&list, prev, node)->id == item->id); + } + QDF_BUG(i == qdf_slist_node_count); + + /* ... be empty after all items are removed */ + QDF_BUG(!qdf_slist_pop(&list, item, node)); + QDF_BUG(qdf_slist_empty(&list)); + + qdf_slist_deinit(&list); + + return 0; +} + +uint32_t qdf_slist_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_slist_test_empty(); + errors += qdf_slist_test_push_pop(); + errors += qdf_slist_test_for_each(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_slist_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_slist_test.h new file mode 100644 index 0000000000000000000000000000000000000000..7e85a15c61f9e6ecd041ecb629e92a694af0ae9d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_slist_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_SLIST_TEST_H +#define __QDF_SLIST_TEST_H + +#ifdef WLAN_SLIST_TEST +/** + * qdf_slist_unit_test() - run the qdf slist unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_slist_unit_test(void); +#else +static inline uint32_t qdf_slist_unit_test(void) +{ + return 0; +} +#endif /* WLAN_SLIST_TEST */ + +#endif /* __QDF_SLIST_TEST_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_talloc_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_talloc_test.c new file mode 100644 index 0000000000000000000000000000000000000000..2ff368fe71a9518156f863acc0eb32b02c0a6b8f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_talloc_test.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_status.h" +#include "qdf_talloc.h" +#include "qdf_talloc_test.h" +#include "qdf_trace.h" + +static uint32_t qdf_talloc_test_alloc_free(void) +{ + uint32_t value; + uint32_t *root; + uint32_t *child; + + root = &value; + + child = qdf_talloc_type(root, child); + QDF_BUG(child); + + qdf_tfree(child); + + return 0; +} + +static uint32_t qdf_talloc_test_parent_child(void) +{ + uint32_t value; + uint32_t *root; + uint32_t *parent; + uint32_t *child; + + root = &value; + + parent = qdf_talloc_type(root, parent); + QDF_BUG(parent); + + child = qdf_talloc_type(parent, child); + QDF_BUG(child); + + qdf_tfree(child); + qdf_tfree(parent); + + return 0; +} + +uint32_t qdf_talloc_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_talloc_test_alloc_free(); + errors += qdf_talloc_test_parent_child(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_talloc_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_talloc_test.h new file mode 100644 index 0000000000000000000000000000000000000000..16b42c33723ee7a82b0998041c9f4188a3504638 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_talloc_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_TALLOC_TEST +#define __QDF_TALLOC_TEST + +#ifdef WLAN_TALLOC_TEST +/** + * qdf_talloc_unit_test() - run the qdf talloc unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_talloc_unit_test(void); +#else +static inline uint32_t qdf_talloc_unit_test(void) +{ + return 0; +} +#endif /* WLAN_TALLOC_TEST */ + +#endif /* __QDF_TALLOC_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_tracker_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_tracker_test.c new file mode 100644 index 0000000000000000000000000000000000000000..b1bd7a8968b8d23ba27506eaaac4e22bad563b10 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_tracker_test.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_tracker.h" +#include "qdf_tracker_test.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +#define qdf_ut_tracker_bits 4 /* 16 buckets */ +#define qdf_ut_tracker_item_count 3 +#define qdf_ut_tracker_declare(name) \ + qdf_tracker_declare(name, qdf_ut_tracker_bits, "unit-test leak", \ + "unit-test alloc", "unit-test free") + +static uint32_t qdf_tracker_test_empty(void) +{ + qdf_ut_tracker_declare(tracker); + char func[QDF_TRACKER_FUNC_SIZE]; + uint32_t line; + + /* a new tracker should ... */ + qdf_tracker_init(&tracker); + + /* ... be empty */ + qdf_tracker_check_for_leaks(&tracker); + + /* ... not contain an arbitrary pointer */ + QDF_BUG(!qdf_tracker_lookup(&tracker, &tracker, &func, &line)); + + qdf_tracker_deinit(&tracker); + + return 0; +} + +static uint32_t qdf_tracker_test_add_remove(void) +{ + qdf_ut_tracker_declare(tracker); + bool items[qdf_ut_tracker_item_count]; + QDF_STATUS status; + int i; + + qdf_tracker_init(&tracker); + + /* an empty tracker should track items */ + for (i = 0; i < qdf_ut_tracker_item_count; i++) { + status = qdf_tracker_track(&tracker, items + i, + __func__, __LINE__); + items[i] = QDF_IS_STATUS_SUCCESS(status); + } + + /* a non-empty tracker should find previously added items */ + for (i = 0; i < qdf_ut_tracker_item_count; i++) { + char func[QDF_TRACKER_FUNC_SIZE]; + uint32_t line; + + if (!items[i]) + continue; + + QDF_BUG(qdf_tracker_lookup(&tracker, items + i, &func, &line)); + qdf_tracker_untrack(&tracker, items + i, __func__, __LINE__); + } + + /* a tracker should be empty after all items are untracked*/ + qdf_tracker_check_for_leaks(&tracker); + + qdf_tracker_deinit(&tracker); + + return 0; +} + +uint32_t qdf_tracker_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_tracker_test_empty(); + errors += qdf_tracker_test_add_remove(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_tracker_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_tracker_test.h new file mode 100644 index 0000000000000000000000000000000000000000..8500f51104390f8db4aa2ec5e5f18b0e919b0fb9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_tracker_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_TRACKER_TEST +#define __QDF_TRACKER_TEST + +#ifdef WLAN_TRACKER_TEST +/** + * qdf_tracker_unit_test() - run the qdf tracker unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_tracker_unit_test(void); +#else +static inline uint32_t qdf_tracker_unit_test(void) +{ + return 0; +} +#endif /* WLAN_TRACKER_TEST */ + +#endif /* __QDF_TRACKER_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_types_test.c b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_types_test.c new file mode 100644 index 0000000000000000000000000000000000000000..d833a76e6515128d9fdaedc71af7663ffb804387 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_types_test.c @@ -0,0 +1,663 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_trace.h" +#include "qdf_types.h" +#include "qdf_types_test.h" + +#define WHITESPACE "\t\n\r \x20" + +#define ut_bool_pass(str, exp) __ut_bool(str, QDF_STATUS_SUCCESS, exp) +#define ut_bool_fail(str) __ut_bool(str, QDF_STATUS_E_FAILURE, false) + +static uint32_t +__ut_bool(const char *str, QDF_STATUS exp_status, bool exp_value) +{ + bool value; + QDF_STATUS status = qdf_bool_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_bool_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (value != exp_value) { + qdf_nofl_alert("FAIL: qdf_bool_parse(\"%s\") -> %s; expected %s", + str, value ? "true" : "false", + exp_value ? "true" : "false"); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_bool_parse(void) +{ + uint32_t errors = 0; + + errors += ut_bool_pass("1", true); + errors += ut_bool_pass("y", true); + errors += ut_bool_pass("Y", true); + errors += ut_bool_pass("0", false); + errors += ut_bool_pass("n", false); + errors += ut_bool_pass("N", false); + errors += ut_bool_pass(WHITESPACE "1" WHITESPACE, true); + + errors += ut_bool_fail("true"); + errors += ut_bool_fail("false"); + errors += ut_bool_fail("日本"); + + return errors; +} + +#define ut_int32_pass(str, exp) __ut_int32(str, QDF_STATUS_SUCCESS, exp) +#define ut_int32_fail(str, exp_status) __ut_int32(str, exp_status, 0) + +static uint32_t +__ut_int32(const char *str, QDF_STATUS exp_status, int32_t exp_value) +{ + int32_t value; + QDF_STATUS status = qdf_int32_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_int32_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (value != exp_value) { + qdf_nofl_alert("FAIL: qdf_int32_parse(\"%s\") -> %d; expected %d", + str, value, exp_value); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_int32_parse(void) +{ + uint32_t errors = 0; + + errors += ut_int32_pass("1", 1); + errors += ut_int32_pass("+1", 1); + errors += ut_int32_pass("-1", -1); + errors += ut_int32_pass(WHITESPACE "1" WHITESPACE, 1); + errors += ut_int32_fail("1;", QDF_STATUS_E_FAILURE); + errors += ut_int32_pass(" 2147483647", 2147483647); + errors += ut_int32_fail(" 2147483648", QDF_STATUS_E_RANGE); + errors += ut_int32_pass("-2147483648", -2147483647 - 1); + errors += ut_int32_fail("-2147483649", QDF_STATUS_E_RANGE); + errors += ut_int32_fail("日本", QDF_STATUS_E_FAILURE); + + return errors; +} + +#define ut_int64_pass(str, exp) __ut_int64(str, QDF_STATUS_SUCCESS, exp) +#define ut_int64_fail(str, exp_status) __ut_int64(str, exp_status, 0) + +static uint32_t +__ut_int64(const char *str, QDF_STATUS exp_status, int64_t exp_value) +{ + int64_t value; + QDF_STATUS status = qdf_int64_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_int64_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (value != exp_value) { + qdf_nofl_alert("FAIL: qdf_int64_parse(\"%s\") -> %lld; expected %lld", + str, value, exp_value); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_int64_parse(void) +{ + uint32_t errors = 0; + + errors += ut_int64_pass("1", 1); + errors += ut_int64_pass("+1", 1); + errors += ut_int64_pass("-1", -1); + errors += ut_int64_pass(WHITESPACE "1" WHITESPACE, 1); + errors += ut_int64_fail("1;", QDF_STATUS_E_FAILURE); + errors += ut_int64_pass(" 9223372036854775807", 9223372036854775807ll); + errors += ut_int64_fail(" 9223372036854775808", QDF_STATUS_E_RANGE); + errors += ut_int64_pass("-9223372036854775808", + -9223372036854775807ll - 1); + errors += ut_int64_fail("-9223372036854775809", QDF_STATUS_E_RANGE); + errors += ut_int64_fail("日本", QDF_STATUS_E_FAILURE); + + return errors; +} + +#define ut_uint16_array_pass(str, max_size, exp_arr, exp_arr_size) \ +__ut_uint16_array(str, QDF_STATUS_SUCCESS, max_size, exp_arr, exp_arr_size) + +#define ut_uint16_array_fail(str, max_size, exp_status, exp_arr, exp_arr_size)\ +__ut_uint16_array(str, exp_status, max_size, exp_arr, exp_arr_size) + +static uint32_t +__ut_uint16_array(const char *str, QDF_STATUS exp_status, + uint8_t max_array_size, uint16_t *exp_array, + uint8_t exp_array_size) +{ + uint16_t parsed_array[10]; + qdf_size_t parsed_array_size; + QDF_STATUS status; + uint8_t i; + + status = qdf_uint16_array_parse(str, parsed_array, max_array_size, + &parsed_array_size); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_uint16_array_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (parsed_array_size != exp_array_size) { + qdf_nofl_alert("FAIL: qdf_uint16_array_parse(\"%s\") -> parsed_array_size %zu; exp_array_size %d", + str, parsed_array_size, exp_array_size); + return 1; + } + + for (i = 0; i < exp_array_size; i++) + if (parsed_array[i] != exp_array[i]) { + qdf_nofl_alert("FAIL: qdf_uint16_array_parse(\"%s\") -> parsed_array[%d] %d; exp_array[%d] %d", + str, i, parsed_array[i], i, + exp_array[i]); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_uint16_array_parse(void) +{ + uint32_t errors = 0; + uint16_t exp_array_value[10] = { + 1, 10, 2412, 2417, 100, 65535, 0, 5486, 5180, 9999}; + + errors += ut_uint16_array_pass( + "1, 10, 2412, 2417, 100, 65535, 0, 5486, 5180, 9999", + 10, exp_array_value, 10); + errors += ut_uint16_array_pass( + "+1, +10, +2412, +2417, +100, +65535, 0, +5486, +5180, +9999", + 10, exp_array_value, 10); + errors += ut_uint16_array_fail("1;", 10, QDF_STATUS_E_FAILURE, + exp_array_value, 0); + /* Out of range test where 65536 is out of range */ + errors += ut_uint16_array_fail( + "1, 10, 2412, 2417, 100, 65536, 0, 5486, 5180, 9999", + 10, QDF_STATUS_E_RANGE, exp_array_value, 0); + errors += ut_uint16_array_fail( + "-1, -10, -2412, -2417, -100, -65535, 0, -5486, -5180, -9999", + 10, QDF_STATUS_E_RANGE, exp_array_value, 0); + errors += ut_uint16_array_fail( + "1, 10, 2412, 2417, 100, 日本, 0, 5486, 5180, 9999", + 10, QDF_STATUS_E_FAILURE, exp_array_value, 0); + + return errors; +} + +#define ut_uint32_array_pass(str, max_size, exp_arr, exp_arr_size) \ +__ut_uint32_array(str, QDF_STATUS_SUCCESS, max_size, exp_arr, exp_arr_size) + +#define ut_uint32_array_fail(str, max_size, exp_status, exp_arr, exp_arr_size)\ +__ut_uint32_array(str, exp_status, max_size, exp_arr, exp_arr_size) + +static uint32_t +__ut_uint32_array(const char *str, QDF_STATUS exp_status, + uint8_t max_array_size, uint32_t *exp_array, + uint8_t exp_array_size) +{ + uint32_t parsed_array[10]; + qdf_size_t parsed_array_size; + QDF_STATUS status; + uint8_t i; + + status = qdf_uint32_array_parse(str, parsed_array, max_array_size, + &parsed_array_size); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_uint32_array_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (parsed_array_size != exp_array_size) { + qdf_nofl_alert("FAIL: qdf_uint32_array_parse(\"%s\") -> parsed_array_size %zu; exp_array_size %d", + str, parsed_array_size, exp_array_size); + return 1; + } + + for (i = 0; i < exp_array_size; i++) + if (parsed_array[i] != exp_array[i]) { + qdf_nofl_alert("FAIL: qdf_uint32_array_parse(\"%s\") -> parsed_array[%d] %d; exp_array[%d] %d", + str, i, parsed_array[i], i, + exp_array[i]); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_uint32_array_parse(void) +{ + uint32_t errors = 0; + uint32_t exp_array_value[10] = { 1, 100, 9997, 899965, 65536, 0, + 4294967295, 268435456, + 2164184149, 999999999}; + + errors += ut_uint32_array_pass( + "1, 100, 9997, 899965, 65536, 0, 4294967295, 268435456, 2164184149, 999999999", + 10, exp_array_value, 10); + errors += ut_uint32_array_pass( + "+1, +100, +9997, +899965, +65536, 0, +4294967295, +268435456, +2164184149, +999999999", + 10, exp_array_value, 10); + errors += ut_uint32_array_fail("1;", 10, QDF_STATUS_E_FAILURE, + exp_array_value, 0); + /* Out of range test where 4294967296 is out of range */ + errors += ut_uint32_array_fail( + "1, 100, 9997, 899965, 65536, 0, 4294967296, 268435456, 2164184149, 999999999", + 10, QDF_STATUS_E_RANGE, exp_array_value, 0); + errors += ut_uint32_array_fail( + "-1, -100, -9997, -899965, -65536, 0, -4294967295, -268435456, -2164184149, -999999999", + 10, QDF_STATUS_E_RANGE, exp_array_value, 0); + errors += ut_uint32_array_fail( + "1, 100, 9997, 899965, 65536, 日本, 0, 4294967295, 268435456, 999999999", + 10, QDF_STATUS_E_FAILURE, exp_array_value, 0); + + return errors; +} + +#define ut_uint32_pass(str, exp) __ut_uint32(str, QDF_STATUS_SUCCESS, exp) +#define ut_uint32_fail(str, exp_status) __ut_uint32(str, exp_status, 0) + +static uint32_t +__ut_uint32(const char *str, QDF_STATUS exp_status, uint32_t exp_value) +{ + uint32_t value; + QDF_STATUS status = qdf_uint32_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_uint32_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (value != exp_value) { + qdf_nofl_alert("FAIL: qdf_uint32_parse(\"%s\") -> %d; expected %d", + str, value, exp_value); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_uint32_parse(void) +{ + uint32_t errors = 0; + + errors += ut_uint32_pass("1", 1); + errors += ut_uint32_pass("+1", 1); + errors += ut_uint32_pass(WHITESPACE "1" WHITESPACE, 1); + errors += ut_uint32_fail("1;", QDF_STATUS_E_FAILURE); + errors += ut_uint32_pass("4294967295", 4294967295U); + errors += ut_uint32_fail("4294967296", QDF_STATUS_E_RANGE); + errors += ut_uint32_pass(" 0", 0); + errors += ut_uint32_fail("-1", QDF_STATUS_E_RANGE); + errors += ut_uint32_fail("日本", QDF_STATUS_E_FAILURE); + + return errors; +} + +#define ut_uint64_pass(str, exp) __ut_uint64(str, QDF_STATUS_SUCCESS, exp) +#define ut_uint64_fail(str, exp_status) __ut_uint64(str, exp_status, 0) + +static uint32_t +__ut_uint64(const char *str, QDF_STATUS exp_status, uint64_t exp_value) +{ + uint64_t value; + QDF_STATUS status = qdf_uint64_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_uint64_parse(\"%s\") -> status %d; expected status %d", + str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (value != exp_value) { + qdf_nofl_alert("FAIL: qdf_uint64_parse(\"%s\") -> %llu; expected %llu", + str, value, exp_value); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_uint64_parse(void) +{ + uint32_t errors = 0; + + errors += ut_uint64_pass("1", 1); + errors += ut_uint64_pass("+1", 1); + errors += ut_uint64_pass(WHITESPACE "1" WHITESPACE, 1); + errors += ut_uint64_fail("1;", QDF_STATUS_E_FAILURE); + errors += ut_uint64_pass("18446744073709551615", + 18446744073709551615ull); + errors += ut_uint64_fail("18446744073709551616", QDF_STATUS_E_RANGE); + errors += ut_uint64_pass(" 0", 0); + errors += ut_uint64_fail("-1", QDF_STATUS_E_RANGE); + errors += ut_uint64_fail("日本", QDF_STATUS_E_FAILURE); + + return errors; +} + +static uint32_t qdf_types_ut_int_formats_parse(void) +{ + uint32_t errors = 0; + + errors += ut_uint64_pass("0b01", 1); + errors += ut_uint64_pass("0o01234567", 342391); + errors += ut_uint64_pass("0123456789", 123456789); + errors += ut_uint64_pass("0x0123456789abcdef", 81985529216486895ll); + + errors += ut_uint64_fail("0b012", QDF_STATUS_E_FAILURE); + errors += ut_uint64_fail("0o012345678", QDF_STATUS_E_FAILURE); + errors += ut_uint64_fail("0123456789a", QDF_STATUS_E_FAILURE); + errors += ut_uint64_fail("0x0123456789abcdefg", QDF_STATUS_E_FAILURE); + + return errors; +} + +#define ut_mac_pass(str, exp) __ut_mac(str, #str, QDF_STATUS_SUCCESS, &(exp)) +#define ut_mac_fail(str) __ut_mac(str, #str, QDF_STATUS_E_FAILURE, NULL) + +static uint32_t +__ut_mac(const char *str, const char *display_str, QDF_STATUS exp_status, + struct qdf_mac_addr *exp_value) +{ + struct qdf_mac_addr value; + QDF_STATUS status = qdf_mac_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_mac_parse(%s) -> status %d; expected status %d", + display_str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (qdf_mem_cmp(&value, exp_value, sizeof(value))) { + qdf_nofl_alert("FAIL: qdf_mac_parse(%s) -> " QDF_FULL_MAC_FMT + "; expected " QDF_FULL_MAC_FMT, + display_str, + QDF_FULL_MAC_REF(value.bytes), + QDF_FULL_MAC_REF(exp_value->bytes)); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_mac_parse(void) +{ + uint32_t errors = 0; + struct qdf_mac_addr addr_aabbccddeeff = { { + 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff } }; + struct qdf_mac_addr addr_0123456789ab = { { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab } }; + + errors += ut_mac_fail(""); + errors += ut_mac_fail("test"); + errors += ut_mac_fail("¥円"); + errors += ut_mac_pass("aabbccddeeff", addr_aabbccddeeff); + errors += ut_mac_pass("AABBCCDDEEFF", addr_aabbccddeeff); + errors += ut_mac_fail("aa:bbccddeeff"); + errors += ut_mac_fail("aabbccddee:ff"); + errors += ut_mac_pass("aa:bb:cc:dd:ee:ff", addr_aabbccddeeff); + errors += ut_mac_pass("01:23:45:67:89:ab", addr_0123456789ab); + errors += ut_mac_fail("01:23:45:67:89:ab:cd:ef"); + errors += ut_mac_fail("01:23:45\0:67:89:ab"); + errors += ut_mac_pass(WHITESPACE "01:23:45:67:89:ab" WHITESPACE, + addr_0123456789ab); + errors += ut_mac_pass("01:23:45:67:89:ab\n", addr_0123456789ab); + errors += ut_mac_fail("01:23:45:67:89:ab\t ,"); + + return errors; +} + +#define ut_ipv4_pass(str, exp) __ut_ipv4(str, #str, QDF_STATUS_SUCCESS, &(exp)) +#define ut_ipv4_fail(str) __ut_ipv4(str, #str, QDF_STATUS_E_FAILURE, NULL) + +static uint32_t +__ut_ipv4(const char *str, const char *display_str, QDF_STATUS exp_status, + struct qdf_ipv4_addr *exp_value) +{ + struct qdf_ipv4_addr value; + QDF_STATUS status = qdf_ipv4_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_ipv4_parse(%s) -> status %d; expected status %d", + display_str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (qdf_mem_cmp(&value, exp_value, sizeof(value))) { + qdf_nofl_alert("FAIL: qdf_ipv4_parse(%s) -> " QDF_IPV4_ADDR_STR + "; expected " QDF_IPV4_ADDR_STR, + display_str, + QDF_IPV4_ADDR_ARRAY(value.bytes), + QDF_IPV4_ADDR_ARRAY(exp_value->bytes)); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_ipv4_parse(void) +{ + uint32_t errors = 0; + struct qdf_ipv4_addr addr_0000 = { { 0, 0, 0, 0 } }; + struct qdf_ipv4_addr addr_127001 = { { 127, 0, 0, 1 } }; + struct qdf_ipv4_addr addr_0112123 = { { 0, 1, 12, 123 } }; + struct qdf_ipv4_addr addr_255255255255 = { { 255, 255, 255, 255 } }; + + errors += ut_ipv4_fail(""); + errors += ut_ipv4_fail("test"); + errors += ut_ipv4_fail("¥円"); + errors += ut_ipv4_pass("0.0.0.0", addr_0000); + errors += ut_ipv4_pass("127.0.0.1", addr_127001); + errors += ut_ipv4_pass("255.255.255.255", addr_255255255255); + errors += ut_ipv4_fail(".0.0.1"); + errors += ut_ipv4_fail("127.0.0."); + errors += ut_ipv4_fail("abc.123.123.123"); + errors += ut_ipv4_fail("256.0.0.0"); + errors += ut_ipv4_pass("0.1.12.123", addr_0112123); + errors += ut_ipv4_pass(WHITESPACE "0.1.12.123" WHITESPACE, + addr_0112123); + errors += ut_ipv4_fail("0.1.12\0.123"); + errors += ut_ipv4_fail("0.1.12.123 ,"); + + return errors; +} + +#define ut_ipv6_pass(str, exp) __ut_ipv6(str, #str, QDF_STATUS_SUCCESS, &(exp)) +#define ut_ipv6_fail(str) __ut_ipv6(str, #str, QDF_STATUS_E_FAILURE, NULL) + +static uint32_t +__ut_ipv6(const char *str, const char *display_str, QDF_STATUS exp_status, + struct qdf_ipv6_addr *exp_value) +{ + struct qdf_ipv6_addr value; + QDF_STATUS status = qdf_ipv6_parse(str, &value); + + if (status != exp_status) { + qdf_nofl_alert("FAIL: qdf_ipv6_parse(%s) -> status %d; expected status %d", + display_str, status, exp_status); + return 1; + } + + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + if (qdf_mem_cmp(&value, exp_value, sizeof(value))) { + qdf_nofl_alert("FAIL: qdf_ipv6_parse(%s) -> " QDF_IPV6_ADDR_STR + "; expected " QDF_IPV6_ADDR_STR, + display_str, + QDF_IPV6_ADDR_ARRAY(value.bytes), + QDF_IPV6_ADDR_ARRAY(exp_value->bytes)); + return 1; + } + + return 0; +} + +static uint32_t qdf_types_ut_ipv6_parse(void) +{ + uint32_t errors = 0; + struct qdf_ipv6_addr addr_00000000000000000000000000000000 = { { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + } }; + struct qdf_ipv6_addr addr_00000000000000000000000000000001 = { { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } }; + struct qdf_ipv6_addr addr_00010000000000000000000000000000 = { { + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + } }; + struct qdf_ipv6_addr addr_0123456789abcdefabcdef0123456789 = { { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, + } }; + struct qdf_ipv6_addr addr_20010db885a3000000008a2e03707334 = { { + 0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, + 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34, + } }; + struct qdf_ipv6_addr addr_ff020000000000000000000000000001 = { { + 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } }; + struct qdf_ipv6_addr addr_00000000000000000000ffffc0000280 = { { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xc0, 0x00, 0x02, 0x80, + } }; + struct qdf_ipv6_addr addr_00010000000000000000000000000001 = { { + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } }; + + errors += ut_ipv6_fail(""); + errors += ut_ipv6_fail("test"); + errors += ut_ipv6_fail("¥円"); + errors += ut_ipv6_pass("::", + addr_00000000000000000000000000000000); + errors += ut_ipv6_pass("::0", + addr_00000000000000000000000000000000); + errors += ut_ipv6_pass("0:0:0:0:0:0:0:0", + addr_00000000000000000000000000000000); + errors += ut_ipv6_pass("::1", + addr_00000000000000000000000000000001); + errors += ut_ipv6_pass("1::", + addr_00010000000000000000000000000000); + errors += ut_ipv6_pass("0:0:0:0:0:0:0:1", + addr_00000000000000000000000000000001); + errors += ut_ipv6_pass("0123:4567:89ab:cdef:ABCD:EF01:2345:6789", + addr_0123456789abcdefabcdef0123456789); + errors += ut_ipv6_fail("::0123:4567:89ab:cdef:ABCD:EF01:2345:6789"); + errors += ut_ipv6_fail("0123:4567:89ab:cdef:ABCD:EF01:2345:6789::"); + errors += ut_ipv6_pass("2001:0db8:85a3:0000:0000:8a2e:0370:7334", + addr_20010db885a3000000008a2e03707334); + errors += ut_ipv6_pass("2001:db8:85a3:0:0:8a2e:370:7334", + addr_20010db885a3000000008a2e03707334); + errors += ut_ipv6_pass("2001:db8:85a3::8a2e:370:7334", + addr_20010db885a3000000008a2e03707334); + errors += ut_ipv6_pass("ff02::1", + addr_ff020000000000000000000000000001); + errors += ut_ipv6_pass("::ffff:c000:0280", + addr_00000000000000000000ffffc0000280); + errors += ut_ipv6_fail(":0:0:0:0:0:0:1"); + errors += ut_ipv6_fail(":0:0::0:0:1"); + errors += ut_ipv6_fail("0:0:0:0:0:0:0:"); + errors += ut_ipv6_fail("0:0:0::0:0:"); + errors += ut_ipv6_fail("0:0::0:0::0:0"); + errors += ut_ipv6_fail("xyz::zyx"); + errors += ut_ipv6_pass(WHITESPACE "1::1" WHITESPACE, + addr_00010000000000000000000000000001); + errors += ut_ipv6_fail("1\0::1"); + errors += ut_ipv6_fail("1::1 ,"); + errors += ut_ipv6_fail("abcd"); + + return errors; +} + +uint32_t qdf_types_unit_test(void) +{ + uint32_t errors = 0; + + errors += qdf_types_ut_bool_parse(); + errors += qdf_types_ut_int32_parse(); + errors += qdf_types_ut_int64_parse(); + errors += qdf_types_ut_uint32_parse(); + errors += qdf_types_ut_uint64_parse(); + errors += qdf_types_ut_int_formats_parse(); + errors += qdf_types_ut_mac_parse(); + errors += qdf_types_ut_ipv4_parse(); + errors += qdf_types_ut_ipv6_parse(); + errors += qdf_types_ut_uint16_array_parse(); + errors += qdf_types_ut_uint32_array_parse(); + + return errors; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_types_test.h b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_types_test.h new file mode 100644 index 0000000000000000000000000000000000000000..198e0d71c6fc0f6429aef95ac203f1afe8018bb9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/test/qdf_types_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __QDF_TYPES_TEST +#define __QDF_TYPES_TEST + +#ifdef WLAN_TYPES_TEST +/** + * qdf_types_unit_test() - run the qdf types unit test suite + * + * Return: number of failed test cases + */ +uint32_t qdf_types_unit_test(void); +#else +static inline uint32_t qdf_types_unit_test(void) +{ + return 0; +} +#endif /* WLAN_TYPES_TEST */ + +#endif /* __QDF_TYPES_TEST */ + diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_api.h b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_api.h new file mode 100644 index 0000000000000000000000000000000000000000..fa1377575300d63ce8aaf58418d5a6530ddeed71 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_api.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__SCHEDULER_API_H) +#define __SCHEDULER_API_H + +#include +#include +#include +#include +#include + +/* Controller thread various event masks + * MC_POST_EVENT_MASK: wake up thread after posting message + * MC_SUSPEND_EVENT_MASK: signal thread to suspend during kernel pm suspend + * MC_SHUTDOWN_EVENT_MASK: signal thread to shutdown and exit during unload + */ +#define MC_POST_EVENT_MASK 0x001 +#define MC_SUSPEND_EVENT_MASK 0x002 +#define MC_SHUTDOWN_EVENT_MASK 0x010 + +/* + * Cookie for timer messages. Note that anyone posting a timer message + * has to write the COOKIE in the reserved field of the message. The + * timer queue handler relies on this COOKIE + */ +#define SYS_MSG_COOKIE 0xFACE + +#define scheduler_get_src_id(qid) (((qid) >> 20) & 0x3FF) +#define scheduler_get_dest_id(qid) (((qid) >> 10) & 0x3FF) +#define scheduler_get_que_id(qid) ((qid) & 0x3FF) +#define scheduler_get_qid(src, dest, que_id) ((que_id) | ((dest) << 10) |\ + ((src) << 20)) + +typedef enum { + SYS_MSG_ID_MC_TIMER, + SYS_MSG_ID_FTM_RSP, + SYS_MSG_ID_QVIT, + SYS_MSG_ID_DATA_STALL_MSG, + SYS_MSG_ID_UMAC_STOP, +} SYS_MSG_ID; + +/** + * struct scheduler_msg: scheduler message structure + * @type: message type + * @reserved: reserved field + * @bodyval: message body val + * @bodyptr: message body pointer based on the type either a bodyptr pointer + * into memory or bodyval as a 32 bit data is used. bodyptr is always a + * freeable pointer, one should always make sure that bodyptr is always + * freeable. + * Messages should use either bodyptr or bodyval; not both !!! + * @callback: callback to be called by scheduler thread once message is posted + * and scheduler thread has started processing the message. + * @flush_callback: flush callback which will be invoked during driver unload + * such that component can release the ref count of common global objects + * like PSOC, PDEV, VDEV and PEER. A component needs to populate flush + * callback in message body pointer for those messages which have taken ref + * count for above mentioned common objects. + * @node: list node for queue membership + * @queue_id: Id of the queue the message was added to + * @queue_depth: depth of the queue when the message was queued + * @queued_at_us: timestamp when the message was queued in microseconds + */ +struct scheduler_msg { + uint16_t type; + uint16_t reserved; + uint32_t bodyval; + void *bodyptr; + void *callback; + void *flush_callback; + qdf_list_node_t node; +#ifdef WLAN_SCHED_HISTORY_SIZE + QDF_MODULE_ID queue_id; + uint32_t queue_depth; + uint64_t queued_at_us; +#endif /* WLAN_SCHED_HISTORY_SIZE */ +}; + +typedef QDF_STATUS (*scheduler_msg_process_fn_t) (struct scheduler_msg *msg); +typedef void (*hdd_suspend_callback)(void); + +/** + * scheduler_init() - initialize control path scheduler + * + * This API initializes control path scheduler. + * + * Return: QDF status + */ +QDF_STATUS scheduler_init(void); + +/** + * scheduler_deinit() - de-initialize control path scheduler + * + * This API de-initializes control path scheduler. + * + * Return: QDF status + */ +QDF_STATUS scheduler_deinit(void); + +/** + * scheduler_enable() - start the scheduler module + * + * Ready the scheduler module to service requests, and start the scheduler's + * message processing thread. Must only be called after scheduler_init(). + * + * Return: QDF_STATUS + */ +QDF_STATUS scheduler_enable(void); + +/** + * scheduler_disable() - stop the scheduler module + * + * Stop the scheduler module from servicing requests, and terminate the + * scheduler's message processing thread. Must be called before + * scheduler_deinit(). + * + * Return: QDF_STATUS + */ +QDF_STATUS scheduler_disable(void); + +/** + * scheduler_register_module() - register input module/queue id + * @qid: queue id to get registered + * @callback: queue message to be called when a message is posted + * + * Return: QDF status + */ +QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, + scheduler_msg_process_fn_t callback); + +/** + * scheduler_deregister_module() - deregister input module/queue id + * @qid: queue id to get deregistered + * + * Return: QDF status + */ +QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid); + +/** + * scheduler_post_msg_by_priority() - post messages by priority + * @qid: queue id to which the message has to be posted. + * @msg: message pointer + * @is_high_priority: set to true for high priority message else false + * + * Return: QDF status + */ +QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, + struct scheduler_msg *msg, + bool is_high_priority); + +/** + * scheduler_post_msg() - post normal messages(no priority) + * @qid: queue id to which the message has to be posted. + * @msg: message pointer + * + * Return: QDF status + */ +static inline QDF_STATUS scheduler_post_msg(uint32_t qid, + struct scheduler_msg *msg) +{ + return scheduler_post_msg_by_priority(qid, msg, false); +} + +/** + * scheduler_post_message() - post normal messages(no priority) + * @src_id: Source module of the message + * @dest_id: Destination module of the message + * @que_id: Queue to which the message has to posted. + * @msg: message pointer + * + * This function will mask the src_id, and destination id to qid of + * scheduler_post_msg + * Return: QDF status + */ +QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, + QDF_MODULE_ID dest_id, + QDF_MODULE_ID que_id, + struct scheduler_msg *msg, + int line, + const char *func); + +#define scheduler_post_message(src_id, dest_id, que_id, msg) \ + scheduler_post_message_debug(src_id, dest_id, que_id, msg, \ + __LINE__, __func__) + +/** + * scheduler_resume() - resume scheduler thread + * + * Complete scheduler thread resume wait event such that scheduler + * thread can wake up and process message queues + * + * Return: none + */ +void scheduler_resume(void); + +/** + * scheduler_register_hdd_suspend_callback() - suspend callback to hdd + * @callback: hdd callback to be called when controllred thread is suspended + * + * Return: none + */ +void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback); + +/** + * scheduler_wake_up_controller_thread() - wake up controller thread + * + * Wake up controller thread to process a critical message. + * + * Return: none + */ +void scheduler_wake_up_controller_thread(void); + +/** + * scheduler_set_event_mask() - set given event mask + * @event_mask: event mask to set + * + * Set given event mask such that controller scheduler thread can do + * specified work after wake up. + * + * Return: none + */ +void scheduler_set_event_mask(uint32_t event_mask); + +/** + * scheduler_clear_event_mask() - clear given event mask + * @event_mask: event mask to set + * + * Return: none + */ +void scheduler_clear_event_mask(uint32_t event_mask); + +/** + * scheduler_target_if_mq_handler() - top level message queue handler for + * target_if message queue + * @msg: pointer to actual message being handled + * + * Return: none + */ +QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_os_if_mq_handler() - top level message queue handler for + * os_if message queue + * @msg: pointer to actual message being handled + * + * Return: none + */ +QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_timer_q_mq_handler() - top level message queue handler for + * timer queue + * @msg: pointer to actual message being handled + * + * Return: none + */ +QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_mlme_mq_handler() - top level message queue handler for + * mlme queue + * @msg: pointer to actual message being handled + * + * Return: QDF status + */ +QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_scan_mq_handler() - top level message queue handler for + * scan queue + * @msg: pointer to actual message being handled + * + * Return: QDF status + */ +QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_register_wma_legacy_handler() - register legacy wma handler + * @callback: legacy wma handler to be called for WMA messages + * + * Return: QDF status + */ +QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t + callback); + +/** + * scheduler_register_sys_legacy_handler() - register legacy sys handler + * @callback: legacy sys handler to be called for sys messages + * + * Return: QDF status + */ +QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t + callback); +/** + * scheduler_deregister_sys_legacy_handler() - deregister legacy sys handler + * + * Return: QDF status + */ +QDF_STATUS scheduler_deregister_sys_legacy_handler(void); + +/** + * scheduler_deregister_wma_legacy_handler() - deregister legacy wma handler + * + * Return: QDF status + */ +QDF_STATUS scheduler_deregister_wma_legacy_handler(void); + +/** + * scheduler_mc_timer_callback() - timer callback, gets called at time out + * @timer: holds the mc timer object. + * + * Return: None + */ +void scheduler_mc_timer_callback(qdf_mc_timer_t *timer); + +/** + * scheduler_get_queue_size() - Get the current size of the scheduler queue + * @qid: Queue ID for which the size is requested + * @size: Pointer to size where the size would be returned to the caller + * + * This API finds the size of the scheduler queue for the given Queue ID + * + * Return: QDF Status + */ +QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_core.h b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_core.h new file mode 100644 index 0000000000000000000000000000000000000000..0fe8d4867562d3cf0aed93d4b946eb35ef98fded --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_core.h @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__SCHEDULER_CORE_H) +#define __SCHEDULER_CORE_H + +#include +#include +#include +#include + +#ifndef SCHEDULER_CORE_MAX_MESSAGES +#define SCHEDULER_CORE_MAX_MESSAGES 4000 +#endif +#ifndef WLAN_SCHED_REDUCTION_LIMIT +#define WLAN_SCHED_REDUCTION_LIMIT 32 +#endif +#define SCHEDULER_NUMBER_OF_MSG_QUEUE 6 +#define SCHEDULER_WRAPPER_MAX_FAIL_COUNT (SCHEDULER_CORE_MAX_MESSAGES * 3) +#define SCHEDULER_WATCHDOG_TIMEOUT (10 * 1000) /* 10s */ + +#define sched_fatal(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_SCHEDULER, params) +#define sched_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_SCHEDULER, params) +#define sched_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_SCHEDULER, params) +#define sched_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_SCHEDULER, params) +#define sched_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_SCHEDULER, params) + +#define sched_nofl_fatal(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_SCHEDULER, params) +#define sched_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_SCHEDULER, params) +#define sched_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_SCHEDULER, params) +#define sched_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_SCHEDULER, params) +#define sched_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_SCHEDULER, params) + +#define sched_enter() sched_debug("Enter") +#define sched_exit() sched_debug("Exit") + +/** + * struct scheduler_mq_type - scheduler message queue + * @mq_lock: message queue lock + * @mq_list: message queue list + * @qid: queue id + */ +struct scheduler_mq_type { + qdf_spinlock_t mq_lock; + qdf_list_t mq_list; + QDF_MODULE_ID qid; +}; + +/** + * struct scheduler_mq_ctx - scheduler message queue context + * @sch_msg_q: scheduler message queue + * @scheduler_msg_qid_to_qidx: message qid to qidx mapping + * @scheduler_msg_process_fn: array of message queue handler function pointers + */ +struct scheduler_mq_ctx { + struct scheduler_mq_type sch_msg_q[SCHEDULER_NUMBER_OF_MSG_QUEUE]; + uint8_t scheduler_msg_qid_to_qidx[QDF_MODULE_ID_MAX]; + QDF_STATUS (*scheduler_msg_process_fn[SCHEDULER_NUMBER_OF_MSG_QUEUE]) + (struct scheduler_msg *msg); +}; + +/** + * struct scheduler_ctx - scheduler context + * @queue_ctx: message queue context + * @sch_start_event: scheduler thread start wait event + * @sch_thread: scheduler thread + * @sch_shutdown: scheduler thread shutdown wait event + * @sch_wait_queue: scheduler wait queue + * @sch_event_flag: scheduler events flag + * @resume_sch_event: scheduler resume wait event + * @sch_thread_lock: scheduler thread lock + * @sch_last_qidx: scheduler last qidx allocation + * @watchdog_msg_type: 'type' of the current msg being processed + * @hdd_callback: os if suspend callback + * @legacy_wma_handler: legacy wma message handler + * @legacy_sys_handler: legacy sys message handler + * @watchdog_timer: timer for triggering a scheduler watchdog bite + * @watchdog_callback: the callback of the current msg being processed + */ +struct scheduler_ctx { + struct scheduler_mq_ctx queue_ctx; + qdf_event_t sch_start_event; + qdf_thread_t *sch_thread; + qdf_event_t sch_shutdown; + qdf_wait_queue_head_t sch_wait_queue; + unsigned long sch_event_flag; + qdf_event_t resume_sch_event; + qdf_spinlock_t sch_thread_lock; + uint8_t sch_last_qidx; + uint16_t watchdog_msg_type; + hdd_suspend_callback hdd_callback; + scheduler_msg_process_fn_t legacy_wma_handler; + scheduler_msg_process_fn_t legacy_sys_handler; + qdf_timer_t watchdog_timer; + void *watchdog_callback; +}; + +/** + * scheduler_core_msg_dup() duplicate the given scheduler message + * @msg: the message to duplicated + * + * Note: Duplicated messages must be freed using scheduler_core_msg_free(). + * + * Return: pointer to the duplicated message + */ +struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg); + +/** + * scheduler_core_msg_free() - free the given scheduler message + * @msg: the duplicated message to free + * + * Return: None + */ +void scheduler_core_msg_free(struct scheduler_msg *msg); + +/** + * scheduler_get_context() - to get scheduler context + * + * This routine is used retrieve scheduler context + * + * Return: Pointer to scheduler context + */ +struct scheduler_ctx *scheduler_get_context(void); + +/** + * scheduler_thread() - spawned thread will execute this routine + * @arg: pointer to scheduler context + * + * Newly created thread will use this routine to perform its duty + * + * Return: none + */ +int scheduler_thread(void *arg); + +/** + * scheduler_create_ctx() - to create scheduler context + * + * This routine is used to create scheduler context + * + * Return: QDF_STATUS based on success or failure + */ +QDF_STATUS scheduler_create_ctx(void); +/** + * scheduler_destroy_ctx() - to destroy scheduler context + * + * This routine is used to destroy scheduler context + * + * Return: QDF_STATUS based on success or failure + */ +QDF_STATUS scheduler_destroy_ctx(void); + +/** + * scheduler_mq_put() - put message in the back of queue + * @msg_q: Pointer to the message queue + * @msg: the message to enqueue + * + * This function is used to put message in back of provided message + * queue + * + * Return: none + */ +void scheduler_mq_put(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg); +/** + * scheduler_mq_put_front() - put message in the front of queue + * @msg_q: Pointer to the message queue + * @msg: the message to enqueue + * + * This function is used to put message in front of provided message + * queue + * + * Return: none + */ +void scheduler_mq_put_front(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg); +/** + * scheduler_mq_get() - to get message from message queue + * @msg_q: Pointer to the message queue + * + * This function is used to get message from given message queue + * + * Return: none + */ +struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q); + +/** + * scheduler_queues_init() - to initialize all the modules' queues + * @sched_ctx: pointer to scheduler context + * + * This function is used to initialize the queues for all the modules + * + * Return: QDF_STATUS based on success of failure + */ +QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx); + +/** + * scheduler_queues_deinit() - to de-initialize all the modules' queues + * @sched_ctx: pointer to scheduler context + * + * This function is used to de-initialize the queues for all the modules + * + * Return: QDF_STATUS based on success of failure + */ +QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *gp_sch_ctx); + +/** + * scheduler_queues_flush() - flush all of the scheduler queues + * @sch_ctx: pointer to scheduler context + * + * This routine is used to clean the module's queues + * + * Return: none + */ +void scheduler_queues_flush(struct scheduler_ctx *sched_ctx); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_api.c b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_api.c new file mode 100644 index 0000000000000000000000000000000000000000..d50a88ac63aa597008a9ba84eec2428058eeb5ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_api.c @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +QDF_STATUS scheduler_disable(void) +{ + struct scheduler_ctx *sched_ctx; + + sched_debug("Disabling Scheduler"); + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + /* send shutdown signal to scheduler thread */ + qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); + qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); + qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); + + /* wait for scheduler thread to shutdown */ + qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); + sched_ctx->sch_thread = NULL; + + /* flush any unprocessed scheduler messages */ + scheduler_queues_flush(sched_ctx); + + return QDF_STATUS_SUCCESS; +} + +static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) +{ + char symbol[QDF_SYMBOL_LEN]; + + if (sched->watchdog_callback) + qdf_sprint_symbol(symbol, sched->watchdog_callback); + + sched_fatal("Callback %s (type 0x%x) exceeded its allotted time of %ds", + sched->watchdog_callback ? symbol : "", + sched->watchdog_msg_type, + SCHEDULER_WATCHDOG_TIMEOUT / 1000); +} + +static void scheduler_watchdog_timeout(void *arg) +{ + struct scheduler_ctx *sched = arg; + + if (qdf_is_recovering()) { + sched_debug("Recovery is in progress ignore timeout"); + return; + } + + scheduler_watchdog_notify(sched); + if (sched->sch_thread) + qdf_print_thread_trace(sched->sch_thread); + + /* avoid crashing during shutdown */ + if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) + return; + + QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); +} + +QDF_STATUS scheduler_enable(void) +{ + struct scheduler_ctx *sched_ctx; + + sched_debug("Enabling Scheduler"); + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, + &sched_ctx->sch_event_flag); + qdf_atomic_clear_bit(MC_POST_EVENT_MASK, + &sched_ctx->sch_event_flag); + + /* create the scheduler thread */ + sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, + "scheduler_thread"); + if (!sched_ctx->sch_thread) { + sched_fatal("Failed to create scheduler thread"); + return QDF_STATUS_E_RESOURCES; + } + + sched_debug("Scheduler thread created"); + + /* wait for the scheduler thread to startup */ + qdf_wake_up_process(sched_ctx->sch_thread); + qdf_wait_single_event(&sched_ctx->sch_start_event, 0); + + sched_debug("Scheduler thread started"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_init(void) +{ + QDF_STATUS status; + struct scheduler_ctx *sched_ctx; + + sched_debug("Initializing Scheduler"); + + status = scheduler_create_ctx(); + if (QDF_IS_STATUS_ERROR(status)) { + sched_fatal("Failed to create context; status:%d", status); + return status; + } + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) { + status = QDF_STATUS_E_FAILURE; + goto ctx_destroy; + } + + status = scheduler_queues_init(sched_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + sched_fatal("Failed to init queues; status:%d", status); + goto ctx_destroy; + } + + status = qdf_event_create(&sched_ctx->sch_start_event); + if (QDF_IS_STATUS_ERROR(status)) { + sched_fatal("Failed to create start event; status:%d", status); + goto queues_deinit; + } + + status = qdf_event_create(&sched_ctx->sch_shutdown); + if (QDF_IS_STATUS_ERROR(status)) { + sched_fatal("Failed to create shutdown event; status:%d", + status); + goto start_event_destroy; + } + + status = qdf_event_create(&sched_ctx->resume_sch_event); + if (QDF_IS_STATUS_ERROR(status)) { + sched_fatal("Failed to create resume event; status:%d", status); + goto shutdown_event_destroy; + } + + qdf_spinlock_create(&sched_ctx->sch_thread_lock); + qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); + sched_ctx->sch_event_flag = 0; + qdf_timer_init(NULL, + &sched_ctx->watchdog_timer, + &scheduler_watchdog_timeout, + sched_ctx, + QDF_TIMER_TYPE_SW); + + qdf_register_mc_timer_callback(scheduler_mc_timer_callback); + + return QDF_STATUS_SUCCESS; + +shutdown_event_destroy: + qdf_event_destroy(&sched_ctx->sch_shutdown); + +start_event_destroy: + qdf_event_destroy(&sched_ctx->sch_start_event); + +queues_deinit: + scheduler_queues_deinit(sched_ctx); + +ctx_destroy: + scheduler_destroy_ctx(); + + return status; +} + +QDF_STATUS scheduler_deinit(void) +{ + QDF_STATUS status; + struct scheduler_ctx *sched_ctx; + + sched_debug("Deinitializing Scheduler"); + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + qdf_timer_free(&sched_ctx->watchdog_timer); + qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); + qdf_event_destroy(&sched_ctx->resume_sch_event); + qdf_event_destroy(&sched_ctx->sch_shutdown); + qdf_event_destroy(&sched_ctx->sch_start_event); + + status = scheduler_queues_deinit(sched_ctx); + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Failed to deinit queues; status:%d", status); + + status = scheduler_destroy_ctx(); + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Failed to destroy context; status:%d", status); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, + struct scheduler_msg *msg, + bool is_high_priority) +{ + uint8_t qidx; + struct scheduler_mq_type *target_mq; + struct scheduler_msg *queue_msg; + struct scheduler_ctx *sched_ctx; + uint16_t src_id; + uint16_t dest_id; + uint16_t que_id; + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_INVAL; + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + if (!sched_ctx->sch_thread) { + sched_err("Cannot post message; scheduler thread is stopped"); + return QDF_STATUS_E_FAILURE; + } + + if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { + QDF_DEBUG_PANIC("Scheduler messages must be initialized"); + return QDF_STATUS_E_FAILURE; + } + + dest_id = scheduler_get_dest_id(qid); + src_id = scheduler_get_src_id(qid); + que_id = scheduler_get_que_id(qid); + + if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || + dest_id >= QDF_MODULE_ID_MAX) { + sched_err("Src_id/Dest_id invalid, cannot post message"); + return QDF_STATUS_E_FAILURE; + } + /* Target_If is a special message queue in phase 3 convergence beacause + * its used by both legacy WMA and as well as new UMAC components which + * directly populate callback handlers in message body. + * 1) WMA legacy messages should not have callback + * 2) New target_if message needs to have valid callback + * Clear callback handler for legacy WMA messages such that in case + * if someone is sending legacy WMA message from stack which has + * uninitialized callback then its handled properly. Also change + * legacy WMA message queue id to target_if queue such that its always + * handled in right order. + */ + if (QDF_MODULE_ID_WMA == que_id) { + msg->callback = NULL; + /* change legacy WMA message id to new target_if mq id */ + que_id = QDF_MODULE_ID_TARGET_IF; + } + qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); + + qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; + if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { + sched_err("Scheduler is deinitialized ignore msg"); + return QDF_STATUS_E_FAILURE; + } + + if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { + QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id); + return QDF_STATUS_E_FAILURE; + } + + target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); + + queue_msg = scheduler_core_msg_dup(msg); + if (!queue_msg) + return QDF_STATUS_E_NOMEM; + + if (is_high_priority) + scheduler_mq_put_front(target_mq, queue_msg); + else + scheduler_mq_put(target_mq, queue_msg); + + qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); + qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, + scheduler_msg_process_fn_t callback) +{ + struct scheduler_mq_ctx *ctx; + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { + sched_err("Already registered max %d no of message queues", + SCHEDULER_NUMBER_OF_MSG_QUEUE); + return QDF_STATUS_E_FAILURE; + } + + ctx = &sched_ctx->queue_ctx; + ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; + ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; + ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; + sched_ctx->sch_last_qidx++; + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) +{ + struct scheduler_mq_ctx *ctx; + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + uint8_t qidx; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + ctx = &sched_ctx->queue_ctx; + qidx = ctx->scheduler_msg_qid_to_qidx[qid]; + ctx->scheduler_msg_process_fn[qidx] = NULL; + sched_ctx->sch_last_qidx--; + ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +void scheduler_resume(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_event_set(&sched_ctx->resume_sch_event); +} + +void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + sched_ctx->hdd_callback = callback; +} +void scheduler_wake_up_controller_thread(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); +} +void scheduler_set_event_mask(uint32_t event_mask) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); +} + +void scheduler_clear_event_mask(uint32_t event_mask) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); +} + +QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) +{ + QDF_STATUS status; + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + target_if_msg_handler = msg->callback; + + /* Target_If is a special message queue in phase 3 convergence beacause + * its used by both legacy WMA and as well as new UMAC components. New + * UMAC components directly pass their message handlers as callback in + * message body. + * 1) All Legacy WMA messages do not contain message callback so invoke + * registered legacy WMA handler. Scheduler message posting APIs + * makes sure legacy WMA messages do not have callbacks. + * 2) For new messages which have valid callbacks invoke their callbacks + * directly. + */ + if (!target_if_msg_handler) + status = sched_ctx->legacy_wma_handler(msg); + else + status = target_if_msg_handler(msg); + + return status; +} + +QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) +{ + QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + os_if_msg_handler = msg->callback; + + QDF_BUG(os_if_msg_handler); + if (!os_if_msg_handler) + return QDF_STATUS_E_FAILURE; + + os_if_msg_handler(msg); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + qdf_mc_timer_callback_t timer_callback; + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + /* legacy sys message handler? */ + if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) + return sched_ctx->legacy_sys_handler(msg); + + timer_callback = msg->callback; + QDF_BUG(timer_callback); + if (!timer_callback) + return QDF_STATUS_E_FAILURE; + + timer_callback(msg->bodyptr); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_mlme_mq_handler(struct scheduler_msg *msg) +{ + scheduler_msg_process_fn_t mlme_msg_handler; + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + mlme_msg_handler = msg->callback; + + QDF_BUG(mlme_msg_handler); + if (!mlme_msg_handler) + return QDF_STATUS_E_FAILURE; + + mlme_msg_handler(msg); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) +{ + QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + scan_q_msg_handler = msg->callback; + + QDF_BUG(scan_q_msg_handler); + if (!scan_q_msg_handler) + return QDF_STATUS_E_FAILURE; + + scan_q_msg_handler(msg); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t + wma_callback) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_wma_handler = wma_callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t + sys_callback) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_sys_handler = sys_callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_deregister_wma_legacy_handler(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_wma_handler = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_deregister_sys_legacy_handler(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_sys_handler = NULL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) +{ + return QDF_STATUS_SUCCESS; +} + +void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + qdf_mc_timer_callback_t callback = NULL; + void *user_data = NULL; + QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; + + QDF_BUG(timer); + if (!timer) + return; + + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + case QDF_TIMER_STATE_STARTING: + /* we are in this state because someone just started the timer, + * MC timer got started and expired, but the time content have + * not been updated this is a rare race condition! + */ + timer->state = QDF_TIMER_STATE_STOPPED; + status = QDF_STATUS_E_ALREADY; + break; + + case QDF_TIMER_STATE_STOPPED: + status = QDF_STATUS_E_ALREADY; + break; + + case QDF_TIMER_STATE_UNUSED: + status = QDF_STATUS_E_EXISTS; + break; + + case QDF_TIMER_STATE_RUNNING: + /* need to go to stop state here because the call-back function + * may restart timer (to emulate periodic timer) + */ + timer->state = QDF_TIMER_STATE_STOPPED; + /* copy the relevant timer information to local variables; + * once we exits from this critical section, the timer content + * may be modified by other tasks + */ + callback = timer->callback; + user_data = timer->user_data; + type = timer->type; + status = QDF_STATUS_SUCCESS; + break; + + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_FAULT; + break; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + if (QDF_IS_STATUS_ERROR(status)) { + sched_debug("MC timer fired but is not running; skip callback"); + return; + } + + qdf_try_allowing_sleep(type); + + QDF_BUG(callback); + if (!callback) + return; + + /* serialize to scheduler controller thread */ + msg.type = SYS_MSG_ID_MC_TIMER; + msg.reserved = SYS_MSG_COOKIE; + msg.callback = callback; + msg.bodyptr = user_data; + msg.bodyval = 0; + + /* bodyptr points to user data, do not free it during msg flush */ + msg.flush_callback = scheduler_msg_flush_noop; + + status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, + QDF_MODULE_ID_SCHEDULER, + QDF_MODULE_ID_SYS, &msg); + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Could not enqueue timer to timer queue"); +} + +QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) +{ + uint8_t qidx; + struct scheduler_mq_type *target_mq; + struct scheduler_ctx *sched_ctx; + + sched_ctx = scheduler_get_context(); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + /* WMA also uses the target_if queue, so replace the QID */ + if (QDF_MODULE_ID_WMA == qid) + qid = QDF_MODULE_ID_TARGET_IF; + + qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; + if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { + sched_err("Scheduler is deinitialized"); + return QDF_STATUS_E_FAILURE; + } + + target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); + + *size = qdf_list_size(&target_mq->mq_list); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_post_message_debug(QDF_MODULE_ID src_id, + QDF_MODULE_ID dest_id, + QDF_MODULE_ID que_id, + struct scheduler_msg *msg, + int line, + const char *func) +{ + QDF_STATUS status; + + status = scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), + msg); + + if (QDF_IS_STATUS_ERROR(status)) + sched_err("couldn't post from %d to %d - called from %d, %s", + src_id, dest_id, line, func); + + return status; +} + +qdf_export_symbol(scheduler_post_message_debug); diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_core.c b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_core.c new file mode 100644 index 0000000000000000000000000000000000000000..055af4fff5e6ea05ba51fc9870e8aa608e34e452 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_core.c @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "qdf_flex_mem.h" + +static struct scheduler_ctx g_sched_ctx; +static struct scheduler_ctx *gp_sched_ctx; + +DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg), + WLAN_SCHED_REDUCTION_LIMIT); + +#ifdef WLAN_SCHED_HISTORY_SIZE + +/** + * struct sched_history_item - metrics for a scheduler message + * @callback: the message's execution callback + * @type_id: the message's type_id + * @queue_id: Id of the queue the message was added to + * @queue_start_us: timestamp when the message was queued in microseconds + * @queue_duration_us: duration the message was queued in microseconds + * @queue_depth: depth of the queue when the message was queued + * @run_start_us: timesatmp when the message started execution in microseconds + * @run_duration_us: duration the message was executed in microseconds + */ +struct sched_history_item { + void *callback; + uint32_t type_id; + QDF_MODULE_ID queue_id; + uint64_t queue_start_us; + uint32_t queue_duration_us; + uint32_t queue_depth; + uint64_t run_start_us; + uint32_t run_duration_us; +}; + +static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE]; +static uint32_t sched_history_index; + +static void sched_history_queue(struct scheduler_mq_type *queue, + struct scheduler_msg *msg) +{ + msg->queue_id = queue->qid; + msg->queue_depth = qdf_list_size(&queue->mq_list); + msg->queued_at_us = qdf_get_log_timestamp_usecs(); +} + +static void sched_history_start(struct scheduler_msg *msg) +{ + uint64_t started_at_us = qdf_get_log_timestamp_usecs(); + struct sched_history_item hist = { + .callback = msg->callback, + .type_id = msg->type, + .queue_start_us = msg->queued_at_us, + .queue_duration_us = started_at_us - msg->queued_at_us, + .queue_depth = msg->queue_depth, + .run_start_us = started_at_us, + }; + + sched_history[sched_history_index] = hist; +} + +static void sched_history_stop(void) +{ + struct sched_history_item *hist = &sched_history[sched_history_index]; + uint64_t stopped_at_us = qdf_get_log_timestamp_usecs(); + + hist->run_duration_us = stopped_at_us - hist->run_start_us; + + sched_history_index++; + sched_history_index %= WLAN_SCHED_HISTORY_SIZE; +} + +#else /* WLAN_SCHED_HISTORY_SIZE */ + +static inline void sched_history_queue(struct scheduler_mq_type *queue, + struct scheduler_msg *msg) { } +static inline void sched_history_start(struct scheduler_msg *msg) { } +static inline void sched_history_stop(void) { } + +#endif /* WLAN_SCHED_HISTORY_SIZE */ + +QDF_STATUS scheduler_create_ctx(void) +{ + qdf_flex_mem_init(&sched_pool); + gp_sched_ctx = &g_sched_ctx; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_destroy_ctx(void) +{ + gp_sched_ctx = NULL; + qdf_flex_mem_deinit(&sched_pool); + + return QDF_STATUS_SUCCESS; +} + +struct scheduler_ctx *scheduler_get_context(void) +{ + QDF_BUG(gp_sched_ctx); + + return gp_sched_ctx; +} + +static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q) +{ + sched_enter(); + + qdf_spinlock_create(&msg_q->mq_lock); + qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES); + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q) +{ + sched_enter(); + + qdf_list_destroy(&msg_q->mq_list); + qdf_spinlock_destroy(&msg_q->mq_lock); + + sched_exit(); +} + +static qdf_atomic_t __sched_queue_depth; +static qdf_atomic_t __sched_dup_fail_count; + +static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx) +{ + QDF_STATUS status; + int i; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + qdf_atomic_set(&__sched_queue_depth, 0); + + /* Initialize all message queues */ + for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) { + status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]); + if (QDF_STATUS_SUCCESS != status) + return status; + } + + /* Initialize all qid to qidx mapping to invalid values */ + for (i = 0; i < QDF_MODULE_ID_MAX; i++) + sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] = + SCHEDULER_NUMBER_OF_MSG_QUEUE; + + sched_exit(); + + return status; +} + +static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx) +{ + int i; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + /* De-Initialize all message queues */ + for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) + scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]); + + /* Initialize all qid to qidx mapping to invalid values */ + for (i = 0; i < QDF_MODULE_ID_MAX; i++) + sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] = + SCHEDULER_NUMBER_OF_MSG_QUEUE; + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +void scheduler_mq_put(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg) +{ + qdf_spin_lock_irqsave(&msg_q->mq_lock); + sched_history_queue(msg_q, msg); + qdf_list_insert_back(&msg_q->mq_list, &msg->node); + qdf_spin_unlock_irqrestore(&msg_q->mq_lock); +} + +void scheduler_mq_put_front(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg) +{ + qdf_spin_lock_irqsave(&msg_q->mq_lock); + sched_history_queue(msg_q, msg); + qdf_list_insert_front(&msg_q->mq_list, &msg->node); + qdf_spin_unlock_irqrestore(&msg_q->mq_lock); +} + +struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_spin_lock_irqsave(&msg_q->mq_lock); + status = qdf_list_remove_front(&msg_q->mq_list, &node); + qdf_spin_unlock_irqrestore(&msg_q->mq_lock); + + if (QDF_IS_STATUS_ERROR(status)) + return NULL; + + return qdf_container_of(node, struct scheduler_msg, node); +} + +QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx) +{ + return scheduler_all_queues_deinit(sched_ctx); +} + +QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx) +{ + QDF_STATUS status; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + status = scheduler_all_queues_init(sched_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + scheduler_all_queues_deinit(sched_ctx); + sched_err("Failed to initialize the msg queues"); + return status; + } + + sched_debug("Queue init passed"); + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg) +{ + struct scheduler_msg *dup; + + if (qdf_atomic_inc_return(&__sched_queue_depth) > + SCHEDULER_CORE_MAX_MESSAGES) + goto buffer_full; + + dup = qdf_flex_mem_alloc(&sched_pool); + if (!dup) { + sched_err("out of memory"); + goto dec_queue_count; + } + + qdf_mem_copy(dup, msg, sizeof(*dup)); + + qdf_atomic_set(&__sched_dup_fail_count, 0); + + return dup; + +buffer_full: + if (qdf_atomic_inc_return(&__sched_dup_fail_count) > + SCHEDULER_WRAPPER_MAX_FAIL_COUNT) + QDF_DEBUG_PANIC("Scheduler buffer is full"); + + +dec_queue_count: + qdf_atomic_dec(&__sched_queue_depth); + + return NULL; +} + +void scheduler_core_msg_free(struct scheduler_msg *msg) +{ + qdf_flex_mem_free(&sched_pool, msg); + qdf_atomic_dec(&__sched_queue_depth); +} + +static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx, + bool *shutdown) +{ + int i; + QDF_STATUS status; + struct scheduler_msg *msg; + + if (!sch_ctx) { + QDF_DEBUG_PANIC("sch_ctx is null"); + return; + } + + /* start with highest priority queue : timer queue at index 0 */ + i = 0; + while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) { + /* Check if MC needs to shutdown */ + if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, + &sch_ctx->sch_event_flag)) { + sched_debug("scheduler thread signaled to shutdown"); + *shutdown = true; + + /* Check for any Suspend Indication */ + if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK, + &sch_ctx->sch_event_flag)) { + /* Unblock anyone waiting on suspend */ + if (gp_sched_ctx->hdd_callback) + gp_sched_ctx->hdd_callback(); + } + + break; + } + + msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]); + if (!msg) { + /* check next queue */ + i++; + continue; + } + + if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) { + sch_ctx->watchdog_msg_type = msg->type; + sch_ctx->watchdog_callback = msg->callback; + + sched_history_start(msg); + qdf_timer_start(&sch_ctx->watchdog_timer, + SCHEDULER_WATCHDOG_TIMEOUT); + status = sch_ctx->queue_ctx. + scheduler_msg_process_fn[i](msg); + qdf_timer_stop(&sch_ctx->watchdog_timer); + sched_history_stop(); + + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Failed processing Qid[%d] message", + sch_ctx->queue_ctx.sch_msg_q[i].qid); + + scheduler_core_msg_free(msg); + } + + /* start again with highest priority queue at index 0 */ + i = 0; + } + + /* Check for any Suspend Indication */ + if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK, + &sch_ctx->sch_event_flag)) { + qdf_spin_lock(&sch_ctx->sch_thread_lock); + qdf_event_reset(&sch_ctx->resume_sch_event); + /* controller thread suspend completion callback */ + if (gp_sched_ctx->hdd_callback) + gp_sched_ctx->hdd_callback(); + qdf_spin_unlock(&sch_ctx->sch_thread_lock); + /* Wait for resume indication */ + qdf_wait_single_event(&sch_ctx->resume_sch_event, 0); + } + + return; /* Nothing to process wait on wait queue */ +} + +int scheduler_thread(void *arg) +{ + struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg; + int retWaitStatus = 0; + bool shutdown = false; + + if (!arg) { + QDF_DEBUG_PANIC("arg is null"); + return 0; + } + qdf_set_user_nice(current, -2); + + /* Ack back to the context from which the main controller thread + * has been created + */ + qdf_event_set(&sch_ctx->sch_start_event); + sched_debug("scheduler thread %d (%s) starting up", + current->pid, current->comm); + + while (!shutdown) { + /* This implements the execution model algorithm */ + retWaitStatus = qdf_wait_queue_interruptible( + sch_ctx->sch_wait_queue, + qdf_atomic_test_bit(MC_POST_EVENT_MASK, + &sch_ctx->sch_event_flag) || + qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK, + &sch_ctx->sch_event_flag)); + + if (retWaitStatus == -ERESTARTSYS) + QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS"); + + qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag); + scheduler_thread_process_queues(sch_ctx, &shutdown); + } + + /* If we get here the scheduler thread must exit */ + sched_debug("Scheduler thread exiting"); + qdf_event_set(&sch_ctx->sch_shutdown); + qdf_exit_thread(QDF_STATUS_SUCCESS); + + return 0; +} + +static void scheduler_flush_single_queue(struct scheduler_mq_type *mq) +{ + struct scheduler_msg *msg; + QDF_STATUS (*flush_cb)(struct scheduler_msg *); + + while ((msg = scheduler_mq_get(mq))) { + if (msg->flush_callback) { + sched_debug("Calling flush callback; type: %x", + msg->type); + flush_cb = msg->flush_callback; + flush_cb(msg); + } else if (msg->bodyptr) { + sched_debug("Freeing scheduler msg bodyptr; type: %x", + msg->type); + qdf_mem_free(msg->bodyptr); + } + + scheduler_core_msg_free(msg); + } +} + +void scheduler_queues_flush(struct scheduler_ctx *sched_ctx) +{ + struct scheduler_mq_type *mq; + int i; + + sched_debug("Flushing scheduler message queues"); + + for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) { + mq = &sched_ctx->queue_ctx.sch_msg_q[i]; + scheduler_flush_single_queue(mq); + } +} + diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/Kbuild b/drivers/staging/qca-wifi-host-cmn/spectral/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..117fc4f4ce248aab09a9690db24fac14487a14ef --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/Kbuild @@ -0,0 +1,253 @@ +ifeq ($(obj),) +obj := . +endif + +DEPTH := ../.. + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +export QCA_PARTNER_MAKE_F_INC=1 +endif + +include $(obj)/$(DEPTH)/os/linux/Makefile-linux.common + +INCS += -I$(HAL) -I$(HAL)/$(OS) -I$(ATH) -I$(ATH_RATE) -I$(ATH_PKTLOG) -I$(WLAN) -I$(IF_WLAN) -I$(ATH_SPECTRAL) -I$(ATHEROSPATH) -I$(obj)/$(DEPTH)/../../apps/spectral/common +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/inc -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/obj_mgr/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/utils/nlink/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/scan/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/global_umac_dispatcher/lmac_if/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/scheduler/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/global_lmac_if/inc +INCS += -I$(obj)/$(DEPTH)/umac/scan +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/mgmt_txrx/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/regulatory/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/umac/son/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/dfs/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/os_if/linux/spectral/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/os_if/linux/scan/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/spectral/core +INCS += -I$(obj)/$(DEPTH)/component_dev/direct_attach/spectral/core +INCS += -I$(obj)/$(DEPTH)/cmn_dev/target_if/direct_buf_rx/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/wbuff/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/cfg/inc +INCS += -I$(obj)/$(DEPTH)/ini_cfg/inc/ +INCS += -I$(obj)/$(DEPTH)/component_dev/wmi/inc +INCS += -I$(obj)/$(DEPTH)/pld/inc +INCS += -I$(obj)/$(DEPTH)/component_dev/dp/inc + +ifeq ($(WLAN_CONV_CRYPTO_SUPPORTED), 1) +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/crypto/inc +INCS += -I$(obj)/$(DEPTH)/component_dev/crypto/inc +endif + +ifeq ($(WLAN_SUPPORT_GREEN_AP), 1) +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/green_ap/dispatcher/inc +endif + +#Start of offload related deifines +HOST_CMN_CONVG_SRC := $(DEPTH)/cmn_dev +HOST_CMN_CONVG_HIF_SRC := $(DEPTH)/cmn_dev/hif/src +HOST_CMN_CONVG_HIF_INC1 := $(DEPTH)/cmn_dev/hif +HOST_CMN_CONVG_HTC_INC := $(DEPTH)/cmn_dev/htc +HOST_CMN_CONVG_DP_INC := $(DEPTH)/cmn_dev/dp/wifi3.0 +HOST_CMN_CONVG_CFG_INC := $(DEPTH)/cmn_dev/wlan_cfg +HOST_CMN_CONVG_HAL_INC := $(DEPTH)/cmn_dev/hal/inc +HOST_CMN_CONVG_HAL_WIFI_INC := $(DEPTH)/cmn_dev/hal/wifi3.0 + +INCS += -I$(obj)/$(DEPTH)/include -I$(obj)/$(DEPTH)/umac/include \ + -I$(obj)/$(DEPTH)/direct_attach/umac/include \ + -I$(obj)/$(DEPTH)/direct_attach/umac/if_lmac -I$(obj)/$(DEPTH)/umac/crypto \ + -I$(obj)/$(DEPTH)/umac/scan -I$(obj)/$(DEPTH)/umac/resmgr \ + -I$(obj)/$(DEPTH)/umac/pm -I$(obj)/$(DEPTH)/umac/txrx \ + -I$(obj)/$(DEPTH)/umac/acs -I$(obj)/$(DEPTH)/umac/txbf \ + -I$(obj)/$(DEPTH)/umac/wnm \ + -I$(obj)/$(DEPTH)/umac/tdls \ + -I$(obj)/$(DEPTH)/umac/rpt_placement \ + -I$(obj)/$(DEPTH)/umac/wifipos \ + -I$(obj)/$(DEPTH)/umac/wds -I$(obj)/$(DEPTH)/umac/ique \ + -I$(obj)/$(DEPTH)/direct_attach/hal -I$(obj)/$(DEPTH)/direct_attach/lmac/ath_dev \ + -I$(obj)/$(DEPTH)/direct_attach/hal/$(OS) \ + -I$(obj)/$(DEPTH)/umac/vi_dbg \ + -I$(obj)/$(DEPTH)/umac/smart_antenna \ + -I$(obj)/$(DEPTH)/umac/smart_ant \ + -I$(obj)/$(DEPTH)/umac/ald \ + -I$(obj)/$(DEPTH)/lmac/ath_pktlog \ + -I$(obj)/$(DEPTH)/direct_attach/lmac/ratectrl \ + -I$(obj)/$(DEPTH)/os/linux/mem/ \ + -I$(obj)/$(DEPTH)/umac/base \ + -I$(obj)/$(DEPTH)/qca_ol \ + -I$(obj)/$(DEPTH)/cmn_dev/qdf/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/qdf/linux/src \ + -I$(obj)/$(DEPTH)/cmn_dev/qal/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/qal/linux/src \ + -I$(obj)/$(DEPTH)/cmn_dev/hif \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/ce \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/pcie \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/snoc \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/dispatcher \ + -I$(obj)/$(DEPTH)/cmn_dev/pld_stub/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/hal/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/hal/wifi3.0 \ + -I$(obj)/$(DEPTH)/cmn_dev/dp/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/dp/wifi3.0 \ + -I$(obj)/$(DEPTH)/cmn_dev/wlan_cfg \ + -I$(obj)/$(HOST_CMN_CONVG_SRC)/htc \ + -I$(obj)/$(DEPTH)/cmn_dev/wmi/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/obj_mgr/inc \ + -I$(obj)/$(HOST_CMN_CONVG_SRC)/scheduler/inc \ + -I$(obj)/$(HOST_CMN_CONVG_SRC)/init_deinit/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/global_umac_dispatcher/lmac_if/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/mgmt_txrx/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/init_deinit/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/global_lmac_if/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/os_if/linux \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/core/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/scan/dispatcher/inc \ + -I$(obj)/$(DEPTH)/umac/scan \ + -I$(obj)/$(DEPTH)/cmn_dev/ol_if \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/scan/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/serialization/core/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/regulatory/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/regulatory/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/mlme/mlme_objmgr/dispatcher/inc/ \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/mlme/vdev_mgr/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/mlme/pdev_mgr/dispatcher/inc \ + +PERF_PWR_OFFLOAD_INC += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/ath_pktlog/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/htt/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/txrx/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/hif/pci \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/hif/pci/linux \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/os/linux/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/regdmn \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/lmac_offload_if \ + -I$(HOST_CMN_CONVG_HIF_INC1)/inc \ + -I$(HOST_CMN_CONVG_HIF_INC1)/src \ + -I$(HOST_CMN_CONVG_HIF_INC1)/src/pcie \ + -I$(HOST_CMN_CONVG_HIF_INC1)/src/snoc \ + -I$(HOST_CMN_CONVG_SRC)/pld_stub/inc \ + -I$(HOST_CMN_CONVG_HIF_SRC)/ce \ + -I$(HOST_CMN_CONVG_HTC_INC) \ + -I$(HOST_CMN_CONVG_CFG_INC) \ + -I$(HOST_CMN_CONVG_DP_INC) \ + -I$(HOST_CMN_CONVG_HAL_INC) \ + -I$(HOST_CMN_CONVG_HAL_WIFI_INC) \ + -I$(PERF_PWR_OFFLOAD_WMI_SRC)/inc \ + -I$(obj)/$(DEPTH)/offload/extra_include + +#Add HK/BL Fw header path required by common files +ifeq (1, ${WIFI_TARGET_3_0}) +PERF_PWR_OFFLOAD_INC += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include/fwcommon \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include/fwcommon/htt_stats +else +PERF_PWR_OFFLOAD_INC += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include/legacy +endif + +INCS += $(PERF_PWR_OFFLOAD_INC) +INCS += -I$(obj)/$(DEPTH)/cmn_dev/target_if/spectral +INCS += -I$(obj)/$(DEPTH)/cmn_dev/wmi/inc +INCS += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/hw/include +#end of offload related defines + +#Start of Legacy spectral related defines +INCS += -I$(HAL) -I$(HAL)/$(OS) -I$(ATH) -I$(ATH_RATE) -I$(ATH_PKTLOG) -I$(WLAN) -I$(IF_WLAN) -I$(ATH_SPECTRAL) -I$(ATHEROSPATH) -I$(obj)/$(DEPTH)/../../apps/spectral/common +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/inc -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/obj_mgr/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/scan/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/global_umac_dispatcher/lmac_if/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/scheduler/inc +INCS += -I$(obj)/$(DEPTH)/umac/scan +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/mgmt_txrx/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/regulatory/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/dfs/dispatcher/inc + +SPECTRAL_DA_OBJS := $(DEPTH)/direct_attach/lmac/spectral/spectral.o \ + $(DEPTH)/direct_attach/lmac/spectral/spectral_netlink.o \ + $(DEPTH)/direct_attach/lmac/spectral/spectral_cmds.o \ + $(DEPTH)/direct_attach/lmac/spectral/spectral_process_data.o \ + $(DEPTH)/direct_attach/lmac/spectral/spectral_phyerr.o +#End of legacy spectral defines + +ifeq ($(QCA_AIRTIME_FAIRNESS), 1) +ccflags-y+= -DWLAN_ATF_ENABLE +INCS += -I$(obj)/$(DEPTH)/umac/airtime_fairness/dispatcher/inc +endif + +ifeq ($(UNIFIED_SMARTANTENNA), 1) +ccflags-y+= -DWLAN_SA_API_ENABLE +INCS += -I$(obj)/$(DEPTH)/umac/sa_api/dispatcher/inc +endif + +ifeq ($(QCA_CFR_SUPPORT), 1) +ccflags-y+= -DWLAN_CFR_ENABLE=1 +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cfr/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/component_dev/qal/inc +endif + +ifeq ($(strip ${QCA_DFS_COMPONENT_ENABLE}),1) +ccflags-y+= -DDFS_COMPONENT_ENABLE +endif + +obj-m += qca_spectral.o + +ccflags-y+= $(INCS) $(COPTS) -DSPECTRAL_USE_NETLINK_SOCKETS=1 -DWLAN_SPECTRAL_ENABLE=1 +ccflags-y+= -DSPECTRAL_USE_NL_BCAST=1 + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +MOD_CFLAGS = -D"KBUILD_STR(s)=\#s" -D"KBUILD_BASENAME=KBUILD_STR(qca_spectral.mod)" -D"KBUILD_MODNAME=KBUILD_STR(qca_spectral)" +endif + +INCS += -I$(obj)/$(DEPTH)/spectral/dispatcher/inc + +SPECTRAL_TIF_OBJS += $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral.o \ + $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral_netlink.o \ + $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral_phyerr.o \ + $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral_sim.o + +SPECTRAL_CMN_OBJS += core/spectral_offload.o \ + core/spectral_common.o \ + dispatcher/src/wlan_spectral_utils_api.o \ + dispatcher/src/wlan_spectral_ucfg_api.o \ + dispatcher/src/wlan_spectral_tgt_api.o \ + core/spectral_module.o \ + $(DEPTH)/cmn_dev/os_if/linux/spectral/src/wlan_cfg80211_spectral.o \ + $(DEPTH)/cmn_dev/os_if/linux/spectral/src/os_if_spectral_netlink.o + +ifeq ($(strip ${DA_SUPPORT}),1) +SPECTRAL_CMN_OBJS += $(DEPTH)/component_dev/direct_attach/spectral/core/spectral_direct_attach.o +endif + +ifeq ($(strip ${DA_SUPPORT}),1) +qca_spectral-objs += ${SPECTRAL_CMN_OBJS} \ + ${SPECTRAL_TIF_OBJS} \ + ${SPECTRAL_DA_OBJS} +else +qca_spectral-objs += ${SPECTRAL_CMN_OBJS} \ + ${SPECTRAL_TIF_OBJS} +endif + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +all: qca_spectral.ko + +qca_spectral.mod.o: qca_spectral.mod.c + ${CC} -c -o $@ ${ccflags-y} ${MOD_CFLAGS} $< + +qca_spectral.o: ${qca_spectral-objs} + $(LD) -m elf32btsmip -r -o qca_spectral.o $(qca_spectral-objs) + $(KERNELPATH)/scripts/mod/modpost qca_spectral.o + +qca_spectral.ko: qca_spectral.o qca_spectral.mod.o + $(LD) $(LDOPTS) -o qca_spectral.ko qca_spectral.o qca_spectral.mod.o + +%.o: %.c + ${CC} -c -o $@ ${ccflags-y} $< +endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_cmn_api_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_cmn_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..02c90eeda0f95dca03ec2d5f445d90647a74e627 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_cmn_api_i.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_CMN_API_I_H_ +#define _SPECTRAL_CMN_API_I_H_ + +#include "spectral_defs_i.h" + +/** + * wlan_spectral_psoc_obj_create_handler() - handler for psoc object create + * @psoc: reference to global psoc object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate psoc object created. Hence spectral_context + * object can be created and attached to psoc component list. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE if psoc is null + * QDF_STATUS_E_NOMEM on failure of spectral object allocation + */ +QDF_STATUS wlan_spectral_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * wlan_spectral_psoc_obj_destroy_handler() - handler for psoc object delete + * @psoc: reference to global psoc object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate psoc object going to be deleted. + * Hence spectral_context object can be detached from psoc component list. + * Then spectral_context object can be deleted. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS wlan_spectral_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * wlan_spectral_pdev_obj_create_handler() - handler for pdev object create + * @pdev: reference to global pdev object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate pdev object created. Hence pdev specific + * spectral object can be created and attached to pdev component list. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE if pdev is null + * QDF_STATUS_E_NOMEM on failure of spectral object allocation + */ +QDF_STATUS wlan_spectral_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wlan_spectral_pdev_obj_destroy_handler() - handler for pdev object delete + * @pdev: reference to global pdev object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate pdev object going to be deleted. + * Hence pdev specific spectral object can be detached from pdev component list. + * Then pdev_spectral object can be deleted. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS wlan_spectral_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * spectral_control_cmn()- common handler for demultiplexing requests from + * higher layer + * @pdev: reference to global pdev object + * @sscan_req: pointer to Spectral scan request + * + * This function processes the spectral config command + * and appropriate handlers are invoked. + * + * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE + */ +QDF_STATUS spectral_control_cmn(struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req); + +/** + * spectral_control_ol(): Offload handler for demultiplexing requests from + * higher layer + * @pdev: reference to global pdev object + * @id: spectral config command id + * @indata: reference to input data + * @insize: input data size + * @outdata: reference to output data + * @outsize: reference to output data size + * + * This function processes the spectral config command + * and appropriate handlers are invoked. + * + * Return: 0 success else failure + */ +int spectral_control_ol( + struct wlan_objmgr_pdev *pdev, u_int id, + void *indata, uint32_t insize, void *outdata, uint32_t *outsize); + +/** + * spectral_get_spectral_ctx_from_pdev() - API to get spectral context object + * from pdev + * @pdev : Reference to pdev global object + * + * This API used to get spectral context object from global pdev reference. + * Null check should be done before invoking this inline function. + * + * Return : Reference to spectral_context object + */ +static inline struct spectral_context * +spectral_get_spectral_ctx_from_pdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct spectral_context *sc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + if (psoc) { + sc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, + WLAN_UMAC_COMP_SPECTRAL); + } + + return sc; +} + +/** + * spectral_get_spectral_ctx_from_pscoc() - API to get spectral context object + * from psoc + * @psoc : Reference to psoc global object + * + * This API used to get spectral context object from global psoc reference. + * Null check should be done before invoking this inline function. + * + * Return : Reference to spectral_context object + */ +static inline struct spectral_context * +spectral_get_spectral_ctx_from_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct spectral_context *sc = NULL; + + if (psoc) { + sc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, + WLAN_UMAC_COMP_SPECTRAL); + } + + return sc; +} + +/** + * spectral_get_spectral_ctx_from_vdev() - API to get spectral context object + * from vdev + * @vdev : Reference to vdev global object + * + * This API used to get spectral context object from global vdev reference. + * Null check should be done before invoking this inline function. + * + * Return : Reference to spectral_context object + */ +static inline struct spectral_context * +spectral_get_spectral_ctx_from_vdev(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct spectral_context *sc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (psoc) { + sc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, + WLAN_UMAC_COMP_SPECTRAL); + } + + return sc; +} +#endif /* _SPECTRAL_CMN_API_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_common.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_common.c new file mode 100644 index 0000000000000000000000000000000000000000..a460635da4d4883b80911085f4f1c8572a1c0d86 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_common.c @@ -0,0 +1,685 @@ +/* + * Copyright (c) 2011,2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "spectral_cmn_api_i.h" +#include "spectral_ol_api_i.h" +#include +#include +#ifdef DA_SUPPORT +#include "spectral_da_api_i.h" +#endif +#include +#include +#include + +/** + * spectral_get_vdev() - Get pointer to vdev to be used for Spectral + * operations + * @pdev: Pointer to pdev + * + * Spectral operates on pdev. However, in order to retrieve some WLAN + * properties, a vdev is required. To facilitate this, the function returns the + * first vdev in our pdev. The caller should release the reference to the vdev + * once it is done using it. Additionally, the caller should ensure it has a + * reference to the pdev at the time of calling this function, and should + * release the pdev reference either after this function returns or at a later + * time when the caller is done using pdev. + * TODO: + * - If the framework later provides an API to obtain the first active + * vdev, then it would be preferable to use this API. + * - Use a common get_vdev() handler for core and target_if using Rx ops. This + * is deferred till details emerge on framework providing API to get first + * active vdev. + * + * Return: Pointer to vdev on success, NULL on failure + */ +static struct wlan_objmgr_vdev* +spectral_get_vdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(pdev); + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_SPECTRAL_ID); + + if (!vdev) { + spectral_warn("Unable to get first vdev of pdev"); + return NULL; + } + + return vdev; +} + +#ifdef SPECTRAL_MODULIZED_ENABLE +/** + * spectral_register_cfg80211_handlers() - Register spectral cfg80211 handlers + * @pdev: Pointer to pdev + * + * Register spectral cfg80211 handlers + * Handlers can be different depending on whether spectral modulized or not + * + * Return: None + */ +static void +spectral_register_cfg80211_handlers(struct wlan_objmgr_pdev *pdev) +{ + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_START_HANDLER_IDX, + wlan_cfg80211_spectral_scan_config_and_start); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_STOP_HANDLER_IDX, + wlan_cfg80211_spectral_scan_stop); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_CONFIG_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_config); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_DIAG_STATS_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_diag_stats); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_CAP_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_cap); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_STATUS_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_status); +} +#else +static void +spectral_register_cfg80211_handlers(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +QDF_STATUS +spectral_control_cmn(struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int temp_debug; + struct spectral_config sp_out; + struct spectral_config *sp_in; + struct spectral_config *spectralparams; + struct spectral_context *sc; + struct wlan_objmgr_vdev *vdev = NULL; + uint8_t vdev_rxchainmask = 0; + enum spectral_scan_mode smode = sscan_req->ss_mode; + enum spectral_cp_error_code *err; + QDF_STATUS ret; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + goto bad; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("Spectral context is NULL!"); + goto bad; + } + + switch (sscan_req->req_id) { + case SPECTRAL_SET_CONFIG: + err = &sscan_req->config_req.sscan_err_code; + sp_in = &sscan_req->config_req.sscan_config; + if (sp_in->ss_count != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_SCAN_COUNT, + sp_in->ss_count, smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_fft_period != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_FFT_PERIOD, + sp_in->ss_fft_period, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_period != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_SCAN_PERIOD, + sp_in->ss_period, smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_short_report != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_SHORT_REPORT, + (uint32_t) + sp_in->ss_short_report ? 1 : 0, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_spectral_pri != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_SPECT_PRI, + (uint32_t) + (sp_in->ss_spectral_pri), + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_fft_size != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_FFT_SIZE, + sp_in->ss_fft_size, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_gc_ena != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_GC_ENA, + sp_in->ss_gc_ena, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_restart_ena != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_RESTART_ENA, + sp_in->ss_restart_ena, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_noise_floor_ref != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_NOISE_FLOOR_REF, + sp_in->ss_noise_floor_ref, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_init_delay != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_INIT_DELAY, + sp_in->ss_init_delay, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_nb_tone_thr != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_NB_TONE_THR, + sp_in->ss_nb_tone_thr, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_str_bin_thr != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_STR_BIN_THR, + sp_in->ss_str_bin_thr, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_wb_rpt_mode != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_WB_RPT_MODE, + sp_in->ss_wb_rpt_mode, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_rssi_rpt_mode != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_RSSI_RPT_MODE, + sp_in->ss_rssi_rpt_mode, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_rssi_thr != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_RSSI_THR, + sp_in->ss_rssi_thr, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_pwr_format != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_PWR_FORMAT, + sp_in->ss_pwr_format, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_rpt_mode != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_RPT_MODE, + sp_in->ss_rpt_mode, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_bin_scale != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_BIN_SCALE, + sp_in->ss_bin_scale, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_dbm_adj != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_DBM_ADJ, + sp_in->ss_dbm_adj, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + if (sp_in->ss_chn_mask != SPECTRAL_PHYERR_PARAM_NOVAL) { + /* + * Check if any of the inactive Rx antenna + * chains is set active in spectral chainmask + */ + vdev = spectral_get_vdev(pdev); + if (!vdev) + goto bad; + + vdev_rxchainmask = + wlan_vdev_mlme_get_rxchainmask(vdev); + wlan_objmgr_vdev_release_ref(vdev, + WLAN_SPECTRAL_ID); + + if (!(sp_in->ss_chn_mask & vdev_rxchainmask)) { + spectral_err("Invalid Spectral Chainmask - Inactive Rx antenna chain cannot be an active spectral chain"); + goto bad; + } else { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_CHN_MASK, + sp_in->ss_chn_mask, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + } + + if (sp_in->ss_frequency != SPECTRAL_PHYERR_PARAM_NOVAL) { + ret = sc->sptrlc_set_spectral_config + (pdev, + SPECTRAL_PARAM_FREQUENCY, + sp_in->ss_frequency, + smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + } + + break; + + case SPECTRAL_GET_CONFIG: + sc->sptrlc_get_spectral_config(pdev, &sp_out, smode); + spectralparams = &sscan_req->config_req.sscan_config; + spectralparams->ss_fft_period = sp_out.ss_fft_period; + spectralparams->ss_period = sp_out.ss_period; + spectralparams->ss_count = sp_out.ss_count; + spectralparams->ss_short_report = + sp_out.ss_short_report; + spectralparams->ss_spectral_pri = + sp_out.ss_spectral_pri; + spectralparams->ss_fft_size = sp_out.ss_fft_size; + spectralparams->ss_gc_ena = sp_out.ss_gc_ena; + spectralparams->ss_restart_ena = sp_out.ss_restart_ena; + spectralparams->ss_noise_floor_ref = + sp_out.ss_noise_floor_ref; + spectralparams->ss_init_delay = sp_out.ss_init_delay; + spectralparams->ss_nb_tone_thr = sp_out.ss_nb_tone_thr; + spectralparams->ss_str_bin_thr = sp_out.ss_str_bin_thr; + spectralparams->ss_wb_rpt_mode = sp_out.ss_wb_rpt_mode; + spectralparams->ss_rssi_rpt_mode = + sp_out.ss_rssi_rpt_mode; + spectralparams->ss_rssi_thr = sp_out.ss_rssi_thr; + spectralparams->ss_pwr_format = sp_out.ss_pwr_format; + spectralparams->ss_rpt_mode = sp_out.ss_rpt_mode; + spectralparams->ss_bin_scale = sp_out.ss_bin_scale; + spectralparams->ss_dbm_adj = sp_out.ss_dbm_adj; + spectralparams->ss_chn_mask = sp_out.ss_chn_mask; + spectralparams->ss_frequency = sp_out.ss_frequency; + break; + + case SPECTRAL_IS_ACTIVE: + sscan_req->status_req.is_active = + sc->sptrlc_is_spectral_active(pdev, + smode); + break; + + case SPECTRAL_IS_ENABLED: + sscan_req->status_req.is_enabled = + sc->sptrlc_is_spectral_enabled(pdev, + smode); + break; + + case SPECTRAL_SET_DEBUG_LEVEL: + temp_debug = sscan_req->debug_req.spectral_dbg_level; + sc->sptrlc_set_debug_level(pdev, temp_debug); + break; + + case SPECTRAL_GET_DEBUG_LEVEL: + sscan_req->debug_req.spectral_dbg_level = + sc->sptrlc_get_debug_level(pdev); + break; + + case SPECTRAL_ACTIVATE_SCAN: + err = &sscan_req->action_req.sscan_err_code; + ret = sc->sptrlc_start_spectral_scan(pdev, smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + break; + + case SPECTRAL_STOP_SCAN: + err = &sscan_req->action_req.sscan_err_code; + ret = sc->sptrlc_stop_spectral_scan(pdev, smode, err); + if (QDF_IS_STATUS_ERROR(ret)) + goto bad; + break; + + case SPECTRAL_GET_CAPABILITY_INFO: + { + struct spectral_caps *caps; + + caps = &sscan_req->caps_req.sscan_caps; + sc->sptrlc_get_spectral_capinfo(pdev, caps); + } + break; + + case SPECTRAL_GET_DIAG_STATS: + { + struct spectral_diag_stats *diag; + + diag = &sscan_req->diag_req.sscan_diag; + sc->sptrlc_get_spectral_diagstats(pdev, diag); + } + break; + + case SPECTRAL_GET_CHAN_WIDTH: + { + uint32_t chan_width; + + vdev = spectral_get_vdev(pdev); + if (!vdev) + goto bad; + + chan_width = spectral_vdev_get_ch_width(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + sscan_req->chan_width_req.chan_width = + (uint32_t)chan_width; + } + break; + + case SPECTRAL_SET_DMA_DEBUG: + if (sc->sptrlc_set_dma_debug) + sc->sptrlc_set_dma_debug( + pdev, + sscan_req->dma_debug_req.dma_debug_type, + sscan_req->dma_debug_req.dma_debug_enable); + break; + + default: + goto bad; + break; + } + + status = QDF_STATUS_SUCCESS; +bad: + return status; +} + +/** + * spectral_ctx_deinit() - De-initialize function pointers from spectral context + * @sc - Reference to spectral_context object + * + * Return: None + */ +static void +spectral_ctx_deinit(struct spectral_context *sc) +{ + if (sc) { + sc->sptrlc_ucfg_phyerr_config = NULL; + sc->sptrlc_pdev_spectral_init = NULL; + sc->sptrlc_pdev_spectral_deinit = NULL; + sc->sptrlc_set_spectral_config = NULL; + sc->sptrlc_get_spectral_config = NULL; + sc->sptrlc_start_spectral_scan = NULL; + sc->sptrlc_stop_spectral_scan = NULL; + sc->sptrlc_is_spectral_active = NULL; + sc->sptrlc_is_spectral_enabled = NULL; + sc->sptrlc_set_debug_level = NULL; + sc->sptrlc_get_debug_level = NULL; + sc->sptrlc_get_spectral_capinfo = NULL; + sc->sptrlc_get_spectral_diagstats = NULL; + } +} + +#ifdef DA_SUPPORT +/** + * wlan_spectral_init_da() - init context of DA devices + * + * init context of DA device + * + * Return: void + */ +static void +wlan_spectral_init_da(struct spectral_context *sc) +{ + spectral_ctx_init_da(sc); +} +#else +static void +wlan_spectral_init_da(struct spectral_context *sc) +{ +} +#endif + +QDF_STATUS +wlan_spectral_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct spectral_context *sc = NULL; + + if (!psoc) { + spectral_err("PSOC is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (cfg_get(psoc, CFG_SPECTRAL_DISABLE)) { + wlan_psoc_nif_feat_cap_set(psoc, WLAN_SOC_F_SPECTRAL_DISABLE); + spectral_info("Spectral is disabled"); + return QDF_STATUS_COMP_DISABLED; + } + + sc = (struct spectral_context *) + qdf_mem_malloc(sizeof(struct spectral_context)); + if (!sc) + return QDF_STATUS_E_NOMEM; + + qdf_mem_zero(sc, sizeof(struct spectral_context)); + sc->psoc_obj = psoc; + if (wlan_objmgr_psoc_get_dev_type(psoc) == WLAN_DEV_OL) + spectral_ctx_init_ol(sc); + else if (wlan_objmgr_psoc_get_dev_type(psoc) == WLAN_DEV_DA) + wlan_spectral_init_da(sc); + wlan_objmgr_psoc_component_obj_attach(psoc, WLAN_UMAC_COMP_SPECTRAL, + (void *)sc, QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_spectral_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct spectral_context *sc = NULL; + + if (!psoc) { + spectral_err("PSOC is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (wlan_spectral_is_feature_disabled(psoc)) { + spectral_info("Spectral is disabled"); + return QDF_STATUS_COMP_DISABLED; + } + + sc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SPECTRAL); + if (sc) { + wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_SPECTRAL, + (void *)sc); + /* Deinitilise function pointers from spectral context */ + spectral_ctx_deinit(sc); + qdf_mem_free(sc); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_spectral_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct pdev_spectral *ps = NULL; + struct spectral_context *sc = NULL; + void *target_handle = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (wlan_spectral_is_feature_disabled(wlan_pdev_get_psoc(pdev))) { + spectral_info("Spectral is disabled"); + return QDF_STATUS_COMP_DISABLED; + } + + ps = (struct pdev_spectral *) + qdf_mem_malloc(sizeof(struct pdev_spectral)); + if (!ps) + return QDF_STATUS_E_NOMEM; + + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("Spectral context is NULL!"); + goto cleanup; + } + + qdf_mem_zero(ps, sizeof(struct pdev_spectral)); + ps->psptrl_pdev = pdev; + + spectral_register_cfg80211_handlers(pdev); + if (sc->sptrlc_pdev_spectral_init) { + target_handle = sc->sptrlc_pdev_spectral_init(pdev); + if (!target_handle) { + spectral_err("Spectral lmac object is NULL!"); + goto cleanup; + } + ps->psptrl_target_handle = target_handle; + } + wlan_objmgr_pdev_component_obj_attach(pdev, WLAN_UMAC_COMP_SPECTRAL, + (void *)ps, QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; + cleanup: + qdf_mem_free(ps); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wlan_spectral_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct pdev_spectral *ps = NULL; + struct spectral_context *sc = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (wlan_spectral_is_feature_disabled(wlan_pdev_get_psoc(pdev))) { + spectral_info("Spectral is disabled"); + return QDF_STATUS_COMP_DISABLED; + } + + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("Spectral context is NULL!"); + return QDF_STATUS_E_FAILURE; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (ps) { + if (sc->sptrlc_pdev_spectral_deinit) + sc->sptrlc_pdev_spectral_deinit(pdev); + ps->psptrl_target_handle = NULL; + wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_SPECTRAL, + (void *)ps); + qdf_mem_free(ps); + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_defs_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_defs_i.h new file mode 100644 index 0000000000000000000000000000000000000000..716807d5afac9d3468df25d08048f468b062fdfd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_defs_i.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_DEFS_I_H_ +#define _SPECTRAL_DEFS_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define spectral_fatal(format, args...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_err(format, args...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_warn(format, args...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_info(format, args...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_debug(format, args...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_SPECTRAL, format, ## args) + +#define spectral_fatal_nofl(format, args...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_err_nofl(format, args...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_warn_nofl(format, args...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_info_nofl(format, args...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_debug_nofl(format, args...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) + +#define spectral_fatal_rl(format, args...) \ + QDF_TRACE_FATAL_RL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_err_rl(format, args...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_warn_rl(format, args...) \ + QDF_TRACE_WARN_RL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_info_rl(format, args...) \ + QDF_TRACE_INFO_RL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_debug_rl(format, args...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_SPECTRAL, format, ## args) + +#define spectral_fatal_rl_nofl(format, args...) \ + QDF_TRACE_FATAL_RL_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_err_rl_nofl(format, args...) \ + QDF_TRACE_ERROR_RL_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_warn_rl_nofl(format, args...) \ + QDF_TRACE_WARN_RL_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_info_rl_nofl(format, args...) \ + QDF_TRACE_INFO_RL_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) +#define spectral_debug_rl_nofl(format, args...) \ + QDF_TRACE_DEBUG_RL_NO_FL(QDF_MODULE_ID_SPECTRAL, format, ## args) + +/** + * struct pdev_spectral - Radio specific spectral object + * @psptrl_pdev: Back-pointer to struct wlan_objmgr_pdev + * @spectral_sock: Spectral Netlink socket for sending samples to + * applications + * @psptrl_target_handle: reference to spectral lmac object + * @skb: Socket buffer for sending samples to applications + * @spectral_pid : Spectral port ID + */ +struct pdev_spectral { + struct wlan_objmgr_pdev *psptrl_pdev; + struct sock *spectral_sock; + void *psptrl_target_handle; + struct sk_buff *skb[SPECTRAL_MSG_TYPE_MAX]; + uint32_t spectral_pid; +}; + +struct wmi_spectral_cmd_ops; +/** + * struct spectral_context - spectral global context + * @psoc_obj: Reference to psoc global object + * @spectral_legacy_cbacks: Spectral legacy callbacks + * + * Call back functions to invoke independent of OL/DA + * @sptrlc_ucfg_phyerr_config: ucfg handler for phyerr + * @sptrlc_pdev_spectral_init: Init spectral + * @sptrlc_pdev_spectral_deinit: Deinit spectral + * @sptrlc_set_spectral_config: Set spectral configurations + * @sptrlc_get_spectral_config: Get spectral configurations + * @sptrlc_start_spectral_scan: Start spectral scan + * @sptrlc_stop_spectral_scan: Stop spectral scan + * @sptrlc_is_spectral_active: Check if spectral scan is active + * @sptrlc_is_spectral_enabled: Check if spectral is enabled + * @sptrlc_set_debug_level: Set debug level + * @sptrlc_get_debug_level: Get debug level + * @sptrlc_get_spectral_capinfo: Get spectral capability info + * @sptrlc_get_spectral_diagstats: Get spectral diag status + * @sptrlc_register_wmi_spectral_cmd_ops: Register wmi_spectral_cmd operations + * @sptrlc_register_netlink_cb: Register Netlink callbacks + * @sptrlc_use_nl_bcast: Check whether to use Netlink broadcast/unicast + * @sptrlc_deregister_netlink_cb: De-register Netlink callbacks + * @sptrlc_process_spectral_report: Process spectral report + * @sptrlc_set_dma_debug: Set DMA debug + */ +struct spectral_context { + struct wlan_objmgr_psoc *psoc_obj; + struct spectral_legacy_cbacks legacy_cbacks; + QDF_STATUS (*sptrlc_spectral_control) + (struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req); + int (*sptrlc_ucfg_phyerr_config)(struct wlan_objmgr_pdev *pdev, + void *ad); + void * (*sptrlc_pdev_spectral_init)(struct wlan_objmgr_pdev *pdev); + void (*sptrlc_pdev_spectral_deinit)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*sptrlc_set_spectral_config) + (struct wlan_objmgr_pdev *pdev, + const uint32_t threshtype, + const uint32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + QDF_STATUS (*sptrlc_get_spectral_config) + (struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config, + const enum spectral_scan_mode smode); + QDF_STATUS (*sptrlc_start_spectral_scan) + (struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + QDF_STATUS (*sptrlc_stop_spectral_scan) + (struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + bool (*sptrlc_is_spectral_active)(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + bool (*sptrlc_is_spectral_enabled)(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + QDF_STATUS (*sptrlc_set_debug_level)(struct wlan_objmgr_pdev *pdev, + uint32_t debug_level); + uint32_t (*sptrlc_get_debug_level)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*sptrlc_get_spectral_capinfo)(struct wlan_objmgr_pdev *pdev, + struct spectral_caps *scaps); + QDF_STATUS (*sptrlc_get_spectral_diagstats) + (struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *stats); + void (*sptrlc_register_wmi_spectral_cmd_ops)( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + void (*sptrlc_register_netlink_cb)( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb); + bool (*sptrlc_use_nl_bcast)(struct wlan_objmgr_pdev *pdev); + void (*sptrlc_deregister_netlink_cb)(struct wlan_objmgr_pdev *pdev); + int (*sptrlc_process_spectral_report)( + struct wlan_objmgr_pdev *pdev, + void *payload); + QDF_STATUS (*sptrlc_set_dma_debug)( + struct wlan_objmgr_pdev *pdev, + enum spectral_dma_debug dma_debug_type, + bool dma_debug_enable); +}; + +#endif /* _SPECTRAL_DEFS_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_module.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_module.c new file mode 100644 index 0000000000000000000000000000000000000000..eabff65f6b36b536b584a551b050e1e1cdfe2a3c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_module.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2011,2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "spectral_defs_i.h" +#include + +MODULE_LICENSE("Dual BSD/GPL"); + +/** + * spectral_init_module() - Initialize Spectral module + * + * Return: None + */ + +#ifndef QCA_SINGLE_WIFI_3_0 +static int __init spectral_init_module(void) +#else +int spectral_init_module(void) +#endif +{ + spectral_info("qca_spectral module loaded"); + wlan_spectral_init(); + /* register spectral rxops */ + wlan_lmac_if_sptrl_set_rx_ops_register_cb + (wlan_lmac_if_sptrl_register_rx_ops); + /* register spectral pdev open handler */ + dispatcher_register_spectral_pdev_open_handler( + spectral_pdev_open); + + return 0; +} + +/** + * spectral_exit_module() - De-initialize and exit Spectral module + * + * Return: None + */ +#ifndef QCA_SINGLE_WIFI_3_0 +static void __exit spectral_exit_module(void) +#else +void spectral_exit_module(void) +#endif +{ + wlan_spectral_deinit(); + spectral_info("qca_spectral module unloaded"); +} + +#ifndef QCA_SINGLE_WIFI_3_0 +module_init(spectral_init_module); +module_exit(spectral_exit_module); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_offload.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..7db9dd3861735521fed7fcf562c00bec2248ec4f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_offload.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "spectral_cmn_api_i.h" +#include "spectral_ol_api_i.h" +#include "../dispatcher/inc/wlan_spectral_tgt_api.h" + +#ifdef DIRECT_BUF_RX_DEBUG +static void +spectral_ctx_init_ol_dma_debug(struct spectral_context *sc) +{ + if (!sc) { + spectral_err("spectral context is null!"); + return; + } + sc->sptrlc_set_dma_debug = tgt_set_spectral_dma_debug; +} +#else +static void +spectral_ctx_init_ol_dma_debug(struct spectral_context *sc) +{ +} +#endif + +void +spectral_ctx_init_ol(struct spectral_context *sc) +{ + if (!sc) { + spectral_err("spectral context is null!"); + return; + } + sc->sptrlc_spectral_control = tgt_spectral_control; + sc->sptrlc_pdev_spectral_init = tgt_pdev_spectral_init; + sc->sptrlc_pdev_spectral_deinit = tgt_pdev_spectral_deinit; + sc->sptrlc_set_spectral_config = tgt_set_spectral_config; + sc->sptrlc_get_spectral_config = tgt_get_spectral_config; + sc->sptrlc_start_spectral_scan = tgt_start_spectral_scan; + sc->sptrlc_stop_spectral_scan = tgt_stop_spectral_scan; + sc->sptrlc_is_spectral_active = tgt_is_spectral_active; + sc->sptrlc_is_spectral_enabled = tgt_is_spectral_enabled; + sc->sptrlc_set_debug_level = tgt_set_debug_level; + sc->sptrlc_get_debug_level = tgt_get_debug_level; + sc->sptrlc_get_spectral_capinfo = tgt_get_spectral_capinfo; + sc->sptrlc_get_spectral_diagstats = tgt_get_spectral_diagstats; + sc->sptrlc_register_wmi_spectral_cmd_ops = + tgt_register_wmi_spectral_cmd_ops; + sc->sptrlc_register_netlink_cb = tgt_spectral_register_nl_cb; + sc->sptrlc_use_nl_bcast = tgt_spectral_use_nl_bcast; + sc->sptrlc_deregister_netlink_cb = tgt_spectral_deregister_nl_cb; + sc->sptrlc_process_spectral_report = tgt_spectral_process_report; + spectral_ctx_init_ol_dma_debug(sc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_ol_api_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_ol_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..0ac69348b2ecd6a758e2cee6de600f921e086a57 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_ol_api_i.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_OL_API_I_H_ +#define _SPECTRAL_OL_API_I_H_ + +#include "spectral_defs_i.h" + +/** + * spectral_ctx_init_ol() - Internal function to initialize spectral context + * with offload specific functions + * @sc : spectral context + * + * Internal function to initialize spectral context with offload specific + * functions + * + * Return : None + */ +void spectral_ctx_init_ol(struct spectral_context *sc); + +#endif /* _SPECTRAL_OL_API_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/cfg_spectral.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/cfg_spectral.h new file mode 100644 index 0000000000000000000000000000000000000000..175d743b53a66fcc74eb05796b6fec6e13d1e3be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/cfg_spectral.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized cfg definitions of Spectral component + */ +#ifndef __CONFIG_SPECTRAL_H +#define __CONFIG_SPECTRAL_H + +/* + * + * spectral_disable - disable spectral feature + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to disable spectral feature. + * + * Related: None + * + * Supported Feature: Spectral + * + * Usage: External + * + * + */ +#define CFG_SPECTRAL_DISABLE \ + CFG_INI_BOOL("spectral_disable", false, \ + "Spectral disable") + +/* + * + * poison_spectral_bufs - enable poisoning of spectral buffers + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to enable the poisoning of spectral buffers. + * + * Related: None + * + * Supported Feature: Spectral + * + * Usage: Internal + * + * + */ +#define CFG_SPECTRAL_POISON_BUFS \ + CFG_INI_BOOL("poison_spectral_bufs", false, \ + "Enable spectral bufs poison at init") + +#define CFG_SPECTRAL_ALL \ + CFG(CFG_SPECTRAL_DISABLE) \ + CFG(CFG_SPECTRAL_POISON_BUFS) + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/spectral_ioctl.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/spectral_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..4f99495f1ad5dd2c87e40f81fd0d3aca0c2e4bb4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/spectral_ioctl.h @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2011, 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_IOCTL_H_ +#define _SPECTRAL_IOCTL_H_ +#include + +#ifndef AH_MAX_CHAINS +#define AH_MAX_CHAINS 3 +#endif + +/* + * ioctl defines + */ + +#define SPECTRAL_SET_CONFIG (DFS_LAST_IOCTL + 1) +#define SPECTRAL_GET_CONFIG (DFS_LAST_IOCTL + 2) +#define SPECTRAL_SHOW_INTERFERENCE (DFS_LAST_IOCTL + 3) +#define SPECTRAL_ENABLE_SCAN (DFS_LAST_IOCTL + 4) +#define SPECTRAL_DISABLE_SCAN (DFS_LAST_IOCTL + 5) +#define SPECTRAL_ACTIVATE_SCAN (DFS_LAST_IOCTL + 6) +#define SPECTRAL_STOP_SCAN (DFS_LAST_IOCTL + 7) +#define SPECTRAL_SET_DEBUG_LEVEL (DFS_LAST_IOCTL + 8) +#define SPECTRAL_IS_ACTIVE (DFS_LAST_IOCTL + 9) +#define SPECTRAL_IS_ENABLED (DFS_LAST_IOCTL + 10) +#define SPECTRAL_CLASSIFY_SCAN (DFS_LAST_IOCTL + 11) +#define SPECTRAL_GET_CLASSIFIER_CONFIG (DFS_LAST_IOCTL + 12) +#define SPECTRAL_EACS (DFS_LAST_IOCTL + 13) +#define SPECTRAL_ACTIVATE_FULL_SCAN (DFS_LAST_IOCTL + 14) +#define SPECTRAL_STOP_FULL_SCAN (DFS_LAST_IOCTL + 15) +#define SPECTRAL_GET_CAPABILITY_INFO (DFS_LAST_IOCTL + 16) +#define SPECTRAL_GET_DIAG_STATS (DFS_LAST_IOCTL + 17) +#define SPECTRAL_GET_CHAN_WIDTH (DFS_LAST_IOCTL + 18) +#define SPECTRAL_GET_CHANINFO (DFS_LAST_IOCTL + 19) +#define SPECTRAL_CLEAR_CHANINFO (DFS_LAST_IOCTL + 20) +#define SPECTRAL_SET_ICM_ACTIVE (DFS_LAST_IOCTL + 21) +#define SPECTRAL_GET_NOMINAL_NOISEFLOOR (DFS_LAST_IOCTL + 22) +#define SPECTRAL_GET_DEBUG_LEVEL (DFS_LAST_IOCTL + 23) +#define SPECTRAL_SET_DMA_DEBUG (DFS_LAST_IOCTL + 24) + +/* + * ioctl parameter types + */ +enum spectral_params { + SPECTRAL_PARAM_FFT_PERIOD, + SPECTRAL_PARAM_SCAN_PERIOD, + SPECTRAL_PARAM_SCAN_COUNT, + SPECTRAL_PARAM_SHORT_REPORT, + SPECTRAL_PARAM_SPECT_PRI, + SPECTRAL_PARAM_FFT_SIZE, + SPECTRAL_PARAM_GC_ENA, + SPECTRAL_PARAM_RESTART_ENA, + SPECTRAL_PARAM_NOISE_FLOOR_REF, + SPECTRAL_PARAM_INIT_DELAY, + SPECTRAL_PARAM_NB_TONE_THR, + SPECTRAL_PARAM_STR_BIN_THR, + SPECTRAL_PARAM_WB_RPT_MODE, + SPECTRAL_PARAM_RSSI_RPT_MODE, + SPECTRAL_PARAM_RSSI_THR, + SPECTRAL_PARAM_PWR_FORMAT, + SPECTRAL_PARAM_RPT_MODE, + SPECTRAL_PARAM_BIN_SCALE, + SPECTRAL_PARAM_DBM_ADJ, + SPECTRAL_PARAM_CHN_MASK, + SPECTRAL_PARAM_ACTIVE, + SPECTRAL_PARAM_STOP, + SPECTRAL_PARAM_ENABLE, + SPECTRAL_PARAM_FREQUENCY, + SPECTRAL_PARAM_CHAN_FREQUENCY, + SPECTRAL_PARAM_CHAN_WIDTH, + SPECTRAL_PARAM_MAX, +}; + +/** + * enum spectral_scan_mode - Spectral scan mode + * @SPECTRAL_SCAN_MODE_NORMAL: Normal mode + * @SPECTRAL_SCAN_MODE_AGILE: Agile mode + */ +enum spectral_scan_mode { + SPECTRAL_SCAN_MODE_NORMAL, + SPECTRAL_SCAN_MODE_AGILE, + SPECTRAL_SCAN_MODE_MAX, +}; + +struct spectral_ioctl_params { + int16_t spectral_fft_period; + int16_t pectral_period; + int16_t spectral_count; + uint16_t spectral_short_report; + uint16_t spectral_pri; +}; + +/** + * spectral_cap_hw_gen: Definitions for the Spectral hardware generation. + * This corresponds to definitions in qca_wlan_vendor_spectral_scan_cap_hw_gen. + * @SPECTRAL_CAP_HW_GEN_1: Generation 1 + * @SPECTRAL_CAP_HW_GEN_2: Generation 2 + * @SPECTRAL_CAP_HW_GEN_3: Generation 3 + */ +enum spectral_cap_hw_gen { + SPECTRAL_CAP_HW_GEN_1 = 0, + SPECTRAL_CAP_HW_GEN_2 = 1, + SPECTRAL_CAP_HW_GEN_3 = 2, +}; + +/** + * struct spectral_config - spectral config parameters + * @ss_fft_period: Skip interval for FFT reports + * @ss_period: Spectral scan period + * @ss_count: # of reports to return from ss_active + * @ss_short_report: Set to report only 1 set of FFT results + * @radar_bin_thresh_sel: Select threshold to classify strong bin for FFT + * @ss_spectral_pri: Priority, and are we doing a noise power cal ? + * @ss_fft_size: Defines the number of FFT data points to compute, + * defined as a log index num_fft_pts = + * 2^ss_fft_size + * @ss_gc_ena: Set, to enable targeted gain change before + * starting the spectral scan FFT + * @ss_restart_ena: Set, to enable abort of receive frames when in high + * priority and a spectral scan is queued + * @ss_noise_floor_ref: Noise floor reference number (signed) for the + * calculation of bin power (dBm) Though stored as an + * unsigned this should be treated as a signed 8-bit int. + * @ss_init_delay: Disallow spectral scan triggers after tx/rx packets + * by setting this delay value to roughly SIFS time + * period or greater Delay timer count in units of 0.25us + * @ss_nb_tone_thr: Number of strong bins (inclusive) per sub-channel, + * below which a signal is declared a narrowband tone + * @ss_str_bin_thr: Bin/max_bin ratio threshold over which a bin is + * declared strong (for spectral scan bandwidth analysis) + * @ss_wb_rpt_mode: Set this bit to report spectral scans as EXT_BLOCKER + * (phy_error=36), if none of the sub-channels are + * deemed narrowband + * @ss_rssi_rpt_mode: Set this bit to report spectral scans as EXT_BLOCKER + * (phy_error=36), if the ADC RSSI is below the + * threshold ss_rssi_thr + * @ss_rssi_thr: ADC RSSI must be greater than or equal to this + * threshold (signed Db) to ensure spectral scan + * reporting with normal phy error codes (please see + * ss_rssi_rpt_mode above).Though stored as an unsigned + * value, this should be treated as a signed 8-bit int + * @ss_pwr_format: Format of frequency bin magnitude for spectral scan + * triggered FFTs 0: linear magnitude + * 1: log magnitude (20*log10(lin_mag), 1/2 dB step size) + * @ss_rpt_mode: Format of per-FFT reports to software for spectral + * scan triggered FFTs + * 0: No FFT report (only pulse end summary) + * 1: 2-dword summary of metrics for each completed FFT + * 2: 2-dword summary + 1x-oversampled bins(in-band) per + * FFT + * 3: 2-dword summary + 2x-oversampled bins (all) per FFT + * @ss_bin_scale: Number of LSBs to shift out to scale the FFT bins + * for spectral scan triggered FFTs + * @ss_dbm_adj: Set (with ss_pwr_format=1), to report bin + * magnitudes + * converted to dBm power using the noisefloor + * calibration results + * @ss_chn_mask: Per chain enable mask to select input ADC for search + * FFT + * @ss_nf_cal: nf calibrated values for ctl+ext + * @ss_nf_pwr: nf pwr values for ctl+ext + * @ss_nf_temp_data: temperature data taken during nf scan + * @ss_frequency: This specifies the frequency span over which Spectral + * scan would be carried out. Its value depends on the + * Spectral scan mode. + * Normal mode:- + * Not applicable. Spectral scan would happen in the + * operating span. + * Agile mode:- + * Center frequency (in MHz) of the interested span + * or center frequency (in MHz) of any WLAN channel + * in the interested span. + */ +struct spectral_config { + uint16_t ss_fft_period; + uint16_t ss_period; + uint16_t ss_count; + uint16_t ss_short_report; + uint8_t radar_bin_thresh_sel; + uint16_t ss_spectral_pri; + uint16_t ss_fft_size; + uint16_t ss_gc_ena; + uint16_t ss_restart_ena; + uint16_t ss_noise_floor_ref; + uint16_t ss_init_delay; + uint16_t ss_nb_tone_thr; + uint16_t ss_str_bin_thr; + uint16_t ss_wb_rpt_mode; + uint16_t ss_rssi_rpt_mode; + uint16_t ss_rssi_thr; + uint16_t ss_pwr_format; + uint16_t ss_rpt_mode; + uint16_t ss_bin_scale; + uint16_t ss_dbm_adj; + uint16_t ss_chn_mask; + int8_t ss_nf_cal[AH_MAX_CHAINS * 2]; + int8_t ss_nf_pwr[AH_MAX_CHAINS * 2]; + int32_t ss_nf_temp_data; + uint32_t ss_frequency; +}; + +/** + * struct spectral_caps - Spectral capabilities structure + * @phydiag_cap: Phydiag capability + * @radar_cap: Radar detection capability + * @spectral_cap: Spectral capability + * @advncd_spectral_cap: Advanced spectral capability + * @hw_gen: Spectral hw generation as defined in spectral_cap_hw_gen + * @is_scaling_params_populated: indicates whether scaling params is populated + * @formula_id: formula_id + * @low_level_offset: low_level_offset + * @high_level_offset: high_level_offset + * @rssi_thr: rssi_thr + * @default_agc_max_gain: default_agc_max_gain + * @agile_spectral_cap: agile Spectral capability for 20/40/80 + * @agile_spectral_cap_160: agile Spectral capability for 160 MHz + * @agile_spectral_cap_80p80: agile Spectral capability for 80p80 + */ +struct spectral_caps { + uint8_t phydiag_cap; + uint8_t radar_cap; + uint8_t spectral_cap; + uint8_t advncd_spectral_cap; + uint32_t hw_gen; + bool is_scaling_params_populated; + uint16_t formula_id; + int16_t low_level_offset; + int16_t high_level_offset; + int16_t rssi_thr; + uint8_t default_agc_max_gain; + bool agile_spectral_cap; + bool agile_spectral_cap_160; + bool agile_spectral_cap_80p80; +}; + +#define SPECTRAL_IOCTL_PARAM_NOVAL (65535) + +#define MAX_SPECTRAL_CHAINS (3) +#define MAX_NUM_BINS (1024) +#define MAX_NUM_BINS_PRI80 (1024) +#define MAX_NUM_BINS_SEC80 (520) +/* 5 categories x (lower + upper) bands */ +#define MAX_INTERF 10 + +/** + * enum dcs_int_type - Interference type indicated by DCS + * @SPECTRAL_DCS_INT_NONE: No interference + * @SPECTRAL_DCS_INT_CW: CW interference + * @SPECTRAL_DCS_INT_WIFI: WLAN interference + */ +enum dcs_int_type { + SPECTRAL_DCS_INT_NONE, + SPECTRAL_DCS_INT_CW, + SPECTRAL_DCS_INT_WIFI +}; + +/** + * struct interf_rsp - Interference record + * @interf_type: eINTERF_TYPE giving type of interference + * @interf_min_freq: Minimum frequency in MHz at which interference has been + * found + * @interf_max_freq: Maximum frequency in MHz at which interference has been + * found + * @advncd_spectral_cap: Advanced spectral capability + */ +struct interf_rsp { + uint8_t interf_type; + uint16_t interf_min_freq; + uint16_t interf_max_freq; +} __packed; + +/** + * struct interf_src_rsp - List of interference sources + * @count: Number of interference records + * @interf: Array of interference records + */ +struct interf_src_rsp { + uint16_t count; + struct interf_rsp interf[MAX_INTERF]; +} __packed; + +/** + * struct spectral_classifier_params - spectral classifier parameters + * @spectral_20_40_mode: Is AP in 20/40 mode? + * @spectral_dc_index: DC index + * @spectral_dc_in_mhz: DC in MHz + * @upper_chan_in_mhz: Upper channel in MHz + * @lower_chan_in_mhz: Lower channel in MHz + */ +struct spectral_classifier_params { + int spectral_20_40_mode; + int spectral_dc_index; + int spectral_dc_in_mhz; + int upper_chan_in_mhz; + int lower_chan_in_mhz; +} __packed; + +/** + * struct spectral_samp_data - Spectral Analysis Messaging Protocol Data format + * @spectral_data_len: Indicates the bin size + * @spectral_data_len_sec80: Indicates the bin size for secondary 80 segment + * @spectral_rssi: Indicates RSSI + * @spectral_rssi_sec80: Indicates RSSI for secondary 80 segment + * @spectral_combined_rssi: Indicates combined RSSI from all antennas + * @spectral_upper_rssi: Indicates RSSI of upper band + * @spectral_lower_rssi: Indicates RSSI of lower band + * @spectral_chain_ctl_rssi: RSSI for control channel, for all antennas + * @spectral_chain_ext_rssi: RSSI for extension channel, for all antennas + * @spectral_max_scale: Indicates scale factor + * @spectral_bwinfo: Indicates bandwidth info + * @spectral_tstamp: Indicates timestamp + * @spectral_max_index: Indicates the index of max magnitude + * @spectral_max_index_sec80: Indicates the index of max magnitude for secondary + * 80 segment + * @spectral_max_mag: Indicates the maximum magnitude + * @spectral_max_mag_sec80: Indicates the maximum magnitude for secondary 80 + * segment + * @spectral_max_exp: Indicates the max exp + * @spectral_last_tstamp: Indicates the last time stamp + * @spectral_upper_max_index: Indicates the index of max mag in upper band + * @spectral_lower_max_index: Indicates the index of max mag in lower band + * @spectral_nb_upper: Not Used + * @spectral_nb_lower: Not Used + * @classifier_params: Indicates classifier parameters + * @bin_pwr_count: Indicates the number of FFT bins + * @lb_edge_extrabins: Number of extra bins on left band edge + * @rb_edge_extrabins: Number of extra bins on right band edge + * @bin_pwr_count_sec80: Indicates the number of FFT bins in secondary 80 + * segment + * @bin_pwr: Contains FFT magnitudes + * @bin_pwr_sec80: Contains FFT magnitudes for the secondary 80 + * segment + * @interf_list: List of interfernce sources + * @noise_floor: Indicates the current noise floor + * @noise_floor_sec80: Indicates the current noise floor for secondary 80 + * segment + * @ch_width: Channel width 20/40/80/160 MHz + * @spectral_mode: Spectral scan mode + * @spectral_pri80ind: Indication from hardware that the sample was + * received on the primary 80 MHz segment. If this + * is set when smode = SPECTRAL_SCAN_MODE_AGILE, it + * indicates that Spectral was carried out on pri80 + * instead of the Agile frequency due to a + * channel switch - Software may choose + * to ignore the sample in this case. + * @spectral_pri80ind_sec80: Indication from hardware that the sample was + * received on the primary 80 MHz segment instead of + * the secondary 80 MHz segment due to a channel + * switch - Software may choose to ignore the sample + * if this is set. Applicable only if smode = + * SPECTRAL_SCAN_MODE_NORMAL and for 160/80+80 MHz + * Spectral operation. + * @last_raw_timestamp: Previous FFT report's raw timestamp. In case of + * 160Mhz it will be primary 80 segment's timestamp + * as both primary & secondary segment's timestamp + * are expected to be almost equal. + * @timestamp_war_offset: Offset calculated based on reset_delay and + * last_raw_timestamp. It will be added to + * raw_timestamp to get spectral_tstamp. + * @raw_timestamp: Actual FFT timestamp reported by HW on primary + * segment. + * @raw_timestamp_sec80: Actual FFT timestamp reported by HW on sec80 MHz + * segment. + * @reset_delay: Time gap between the last spectral report before + * reset and the end of reset. It is provided by FW + * via direct DMA framework. + * @target_reset_count: Indicates the number of times target went through + * reset routine after spectral was enabled. + */ +struct spectral_samp_data { + int16_t spectral_data_len; + int16_t spectral_data_len_sec80; + int16_t spectral_rssi; + int16_t spectral_rssi_sec80; + int8_t spectral_combined_rssi; + int8_t spectral_upper_rssi; + int8_t spectral_lower_rssi; + int8_t spectral_chain_ctl_rssi[MAX_SPECTRAL_CHAINS]; + int8_t spectral_chain_ext_rssi[MAX_SPECTRAL_CHAINS]; + uint8_t spectral_max_scale; + int16_t spectral_bwinfo; + int32_t spectral_tstamp; + int16_t spectral_max_index; + int16_t spectral_max_index_sec80; + int16_t spectral_max_mag; + int16_t spectral_max_mag_sec80; + uint8_t spectral_max_exp; + int32_t spectral_last_tstamp; + int16_t spectral_upper_max_index; + int16_t spectral_lower_max_index; + uint8_t spectral_nb_upper; + uint8_t spectral_nb_lower; + struct spectral_classifier_params classifier_params; + uint16_t bin_pwr_count; + /* + * For 11ac chipsets prior to AR900B version 2.0, a max of 512 bins are + * delivered. However, there can be additional bins reported for + * AR900B version 2.0 and QCA9984 as described next: + * + * AR900B version 2.0: An additional tone is processed on the right + * hand side in order to facilitate detection of radar pulses out to + * the extreme band-edge of the channel frequency. + * Since the HW design processes four tones at a time, + * this requires one additional Dword to be added to the + * search FFT report. + * + * QCA9984: When spectral_scan_rpt_mode=2, i.e 2-dword summary + + * 1x-oversampled bins (in-band) per FFT, + * then 8 more bins (4 more on left side and 4 more on right side) + * are added. + */ + uint8_t lb_edge_extrabins; + uint8_t rb_edge_extrabins; + uint16_t bin_pwr_count_sec80; + uint8_t bin_pwr[MAX_NUM_BINS_PRI80]; + uint8_t bin_pwr_sec80[MAX_NUM_BINS_SEC80]; + struct interf_src_rsp interf_list; + int16_t noise_floor; + int16_t noise_floor_sec80; + uint32_t ch_width; + uint8_t spectral_agc_total_gain; + uint8_t spectral_agc_total_gain_sec80; + uint8_t spectral_gainchange; + uint8_t spectral_gainchange_sec80; + enum spectral_scan_mode spectral_mode; + uint8_t spectral_pri80ind; + uint8_t spectral_pri80ind_sec80; + uint32_t last_raw_timestamp; + uint32_t timestamp_war_offset; + uint32_t raw_timestamp; + uint32_t raw_timestamp_sec80; + uint32_t reset_delay; + uint32_t target_reset_count; + uint32_t agile_ch_width; +} __packed; + +/** + * struct spectral_samp_msg - Spectral SAMP message + * @signature: Validates the SAMP message + * @freq: Operating frequency in MHz + * @vhtop_ch_freq_seg1: VHT Segment 1 centre frequency in MHz + * @vhtop_ch_freq_seg2: VHT Segment 2 centre frequency in MHz + * @agile_freq: Center frequency in MHz of the entire span across which + * Agile Spectral is carried out. Applicable only for Agile + * Spectral samples. + * @freq_loading: How busy was the channel + * @dcs_enabled: Whether DCS is enabled + * @int_type: Interference type indicated by DCS + * @macaddr: Indicates the device interface + * @samp_data: SAMP Data + */ +struct spectral_samp_msg { + uint32_t signature; + uint16_t freq; + uint16_t vhtop_ch_freq_seg1; + uint16_t vhtop_ch_freq_seg2; + uint16_t agile_freq; + uint16_t freq_loading; + uint16_t dcs_enabled; + enum dcs_int_type int_type; + uint8_t macaddr[6]; + struct spectral_samp_data samp_data; +} __packed; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_public_structs.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..34cc4b1bc80ccee51a09bb5f5b61372af36ad28b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_public_structs.h @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2011,2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wlan_dfs_ioctl.h" +#include + +#ifndef __KERNEL__ +#include +#endif /* __KERNEL__ */ + +#ifndef _WLAN_SPECTRAL_PUBLIC_STRUCTS_H_ +#define _WLAN_SPECTRAL_PUBLIC_STRUCTS_H_ + +#ifndef AH_MAX_CHAINS +#define AH_MAX_CHAINS 3 +#endif + +#define MAX_NUM_CHANNELS 255 +#define SPECTRAL_PHYERR_PARAM_NOVAL 65535 + +#ifdef SPECTRAL_USE_EMU_DEFAULTS +/* Use defaults from emulation */ +#define SPECTRAL_SCAN_ACTIVE_DEFAULT (0x0) +#define SPECTRAL_SCAN_ENABLE_DEFAULT (0x0) +#define SPECTRAL_SCAN_COUNT_DEFAULT (0x0) +#define SPECTRAL_SCAN_PERIOD_DEFAULT (250) +#define SPECTRAL_SCAN_PRIORITY_DEFAULT (0x1) +#define SPECTRAL_SCAN_FFT_SIZE_DEFAULT (0x7) +#define SPECTRAL_SCAN_GC_ENA_DEFAULT (0x1) +#define SPECTRAL_SCAN_RESTART_ENA_DEFAULT (0x0) +#define SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT (0xa0) +#define SPECTRAL_SCAN_INIT_DELAY_DEFAULT (0x50) +#define SPECTRAL_SCAN_NB_TONE_THR_DEFAULT (0xc) +#define SPECTRAL_SCAN_STR_BIN_THR_DEFAULT (0x7) +#define SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT (0x0) +#define SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT (0x1) +#define SPECTRAL_SCAN_RSSI_THR_DEFAULT (0xf) +#define SPECTRAL_SCAN_PWR_FORMAT_DEFAULT (0x1) +#define SPECTRAL_SCAN_RPT_MODE_DEFAULT (0x2) +#define SPECTRAL_SCAN_BIN_SCALE_DEFAULT (0x1) +#define SPECTRAL_SCAN_DBM_ADJ_DEFAULT (0x0) +#define SPECTRAL_SCAN_CHN_MASK_DEFAULT (0x1) +#else +/* + * Static default values for spectral state and configuration. + * These definitions should be treated as temporary. Ideally, + * we should get the defaults from firmware - this will be discussed. + * + * Use defaults from Spectral Hardware Micro-Architecture + * document (v1.0) + */ +#define SPECTRAL_SCAN_ACTIVE_DEFAULT (0) +#define SPECTRAL_SCAN_ENABLE_DEFAULT (0) +#define SPECTRAL_SCAN_COUNT_DEFAULT (0) +#define SPECTRAL_SCAN_PERIOD_GEN_I_DEFAULT (35) +#define SPECTRAL_SCAN_PERIOD_GEN_II_DEFAULT (35) +#define SPECTRAL_SCAN_PERIOD_GEN_III_DEFAULT (224) +#define SPECTRAL_SCAN_PRIORITY_DEFAULT (1) +#define SPECTRAL_SCAN_FFT_SIZE_DEFAULT (7) +#define SPECTRAL_SCAN_GC_ENA_DEFAULT (1) +#define SPECTRAL_SCAN_RESTART_ENA_DEFAULT (0) +#define SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT (-96) +#define SPECTRAL_SCAN_INIT_DELAY_DEFAULT (80) +#define SPECTRAL_SCAN_NB_TONE_THR_DEFAULT (12) +#define SPECTRAL_SCAN_STR_BIN_THR_DEFAULT (8) +#define SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT (0) +#define SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT (0) +#define SPECTRAL_SCAN_RSSI_THR_DEFAULT (0xf0) +#define SPECTRAL_SCAN_PWR_FORMAT_DEFAULT (0) +#define SPECTRAL_SCAN_RPT_MODE_DEFAULT (2) +#define SPECTRAL_SCAN_BIN_SCALE_DEFAULT (1) +#define SPECTRAL_SCAN_DBM_ADJ_DEFAULT (1) +#define SPECTRAL_SCAN_CHN_MASK_DEFAULT (1) +#define SPECTRAL_SCAN_FREQUENCY_DEFAULT (0) +#endif /* SPECTRAL_USE_EMU_DEFAULTS */ + +/* The below two definitions apply only to pre-11ac chipsets */ +#define SPECTRAL_SCAN_SHORT_REPORT_DEFAULT (1) +#define SPECTRAL_SCAN_FFT_PERIOD_DEFAULT (1) + +/* + * Definitions to help in scaling of gen3 linear format Spectral bins to values + * similar to those from gen2 chipsets. + */ + +/* + * Max gain for QCA9984. Since this chipset is a prime representative of gen2 + * chipsets, it is chosen for this value. + */ +#define SPECTRAL_QCA9984_MAX_GAIN (78) + +/* Temporary section for hard-coded values. These need to come from FW. */ + +/* Max gain for IPQ8074 */ +#define SPECTRAL_IPQ8074_DEFAULT_MAX_GAIN_HARDCODE (62) + +/* + * Section for values needing tuning per customer platform. These too may need + * to come from FW. To be considered as hard-coded for now. + */ + +/* + * If customers have a different gain line up than QCA reference designs for + * IPQ8074 and/or QCA9984, they may have to tune the low level threshold and + * the RSSI threshold. + */ +#define SPECTRAL_SCALING_LOW_LEVEL_OFFSET (7) +#define SPECTRAL_SCALING_RSSI_THRESH (5) + +/* + * If customers set the AGC backoff differently, they may have to tune the high + * level threshold. + */ +#define SPECTRAL_SCALING_HIGH_LEVEL_OFFSET (5) + +/* End of section for values needing fine tuning. */ +/* End of temporary section for hard-coded values */ + +/** + * enum spectral_msg_buf_type - Spectral message buffer type + * @SPECTRAL_MSG_BUF_NEW: Allocate new buffer + * @SPECTRAL_MSG_BUF_SAVED: Reuse last buffer, used for secondary segment report + * in case of 160 MHz. + */ +enum spectral_msg_buf_type { + SPECTRAL_MSG_BUF_NEW, + SPECTRAL_MSG_BUF_SAVED, + SPECTRAL_MSG_BUF_TYPE_MAX, +}; + +/** + * enum spectral_msg_type - Spectral SAMP message type + * @SPECTRAL_MSG_NORMAL_MODE: Normal mode Spectral SAMP message + * @SPECTRAL_MSG_AGILE_MODE: Agile mode Spectral SAMP message + * @SPECTRAL_MSG_INTERFERENCE_NOTIFICATION: Interference notification to + * external auto channel selection + * entity + * @SPECTRAL_MSG_TYPE_MAX: Spectral SAMP message type max + */ +enum spectral_msg_type { + SPECTRAL_MSG_NORMAL_MODE, + SPECTRAL_MSG_AGILE_MODE, + SPECTRAL_MSG_INTERFERENCE_NOTIFICATION, + SPECTRAL_MSG_TYPE_MAX, +}; + +/** + * enum wlan_cfg80211_spectral_vendorcmd_handler_idx - Indices to cfg80211 + * spectral vendor command handlers + * @SPECTRAL_SCAN_START_HANDLER_IDX: Index to SPECTRAL_SCAN_START handler + * @SPECTRAL_SCAN_STOP_HANDLER_IDX: Index to SPECTRAL_SCAN_STOP handler + * @SPECTRAL_SCAN_GET_CONFIG_HANDLER_IDX: Index to SPECTRAL_SCAN_GET_CONFIG + * handler + * @SPECTRAL_SCAN_GET_DIAG_STATS_HANDLER_IDX: Index to + * SPECTRAL_SCAN_GET_DIAG_STATS handler + * @SPECTRAL_SCAN_GET_CAP_HANDLER_IDX: Index to SPECTRAL_SCAN_GET_CAP handler + * @SPECTRAL_SCAN_GET_STATUS_HANDLER_IDX: Index to SPECTRAL_SCAN_GET_STATUS + * handler + * @SPECTRAL_SCAN_VENDOR_CMD_HANDLER_MAX: Number of cfg80211 spectral + * vendor command handlers supported + */ +enum wlan_cfg80211_spectral_vendorcmd_handler_idx { + SPECTRAL_SCAN_START_HANDLER_IDX, + SPECTRAL_SCAN_STOP_HANDLER_IDX, + SPECTRAL_SCAN_GET_CONFIG_HANDLER_IDX, + SPECTRAL_SCAN_GET_DIAG_STATS_HANDLER_IDX, + SPECTRAL_SCAN_GET_CAP_HANDLER_IDX, + SPECTRAL_SCAN_GET_STATUS_HANDLER_IDX, + SPECTRAL_SCAN_VENDOR_CMD_HANDLER_MAX, +}; + +/** + * enum spectral_debug - Spectral debug level + * @DEBUG_SPECTRAL: Minimal SPECTRAL debug + * @DEBUG_SPECTRAL1: Normal SPECTRAL debug + * @DEBUG_SPECTRAL2: Maximal SPECTRAL debug + * @DEBUG_SPECTRAL3: Matched filterID display + * @DEBUG_SPECTRAL4: One time dump of FFT report + */ +enum spectral_debug { + DEBUG_SPECTRAL = 0x00000100, + DEBUG_SPECTRAL1 = 0x00000200, + DEBUG_SPECTRAL2 = 0x00000400, + DEBUG_SPECTRAL3 = 0x00000800, + DEBUG_SPECTRAL4 = 0x00001000, +}; + +/** + * enum spectral_capability_type - Spectral capability type + * @SPECTRAL_CAP_PHYDIAG: Phydiag capability + * @SPECTRAL_CAP_RADAR: Radar detection capability + * @SPECTRAL_CAP_SPECTRAL_SCAN: Spectral capability + * @SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN: Advanced spectral capability + */ +enum spectral_capability_type { + SPECTRAL_CAP_PHYDIAG, + SPECTRAL_CAP_RADAR, + SPECTRAL_CAP_SPECTRAL_SCAN, + SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN, +}; + +/** + * enum spectral_cp_error_code - Spectral control path response code + * @SPECTRAL_SCAN_RESP_ERR_INVALID: Invalid error identifier + * @SPECTRAL_SCAN_RESP_ERR_PARAM_UNSUPPORTED: parameter unsupported + * @SPECTRAL_SCAN_RESP_ERR_MODE_UNSUPPORTED: mode unsupported + * @SPECTRAL_SCAN_RESP_ERR_PARAM_INVALID_VALUE: invalid parameter value + * @SPECTRAL_SCAN_RESP_ERR_PARAM_NOT_INITIALIZED: parameter uninitialized + */ +enum spectral_cp_error_code { + SPECTRAL_SCAN_ERR_INVALID, + SPECTRAL_SCAN_ERR_PARAM_UNSUPPORTED, + SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED, + SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE, + SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED, +}; + +/** + * enum spectral_dma_debug - Spectral DMA debug + * @SPECTRAL_DMA_RING_DEBUG: Spectral DMA ring debug + * @SPECTRAL_DMA_BUFFER_DEBUG: Spectral DMA buffer debug + */ +enum spectral_dma_debug { + SPECTRAL_DMA_RING_DEBUG, + SPECTRAL_DMA_BUFFER_DEBUG, +}; + +/** + * struct spectral_chan_stats - channel status info + * @cycle_count: Cycle count + * @channel_load: Channel load + * @per: Period + * @noisefloor: Noise floor + * @comp_usablity: Computed usability + * @maxregpower: Maximum allowed regulatory power + * @comp_usablity_sec80: Computed usability of secondary 80 Mhz + * @maxregpower_sec80: Max regulatory power of secondary 80 Mhz + */ +struct spectral_chan_stats { + int cycle_count; + int channel_load; + int per; + int noisefloor; + uint16_t comp_usablity; + int8_t maxregpower; + uint16_t comp_usablity_sec80; + int8_t maxregpower_sec80; +}; + +/** + * struct spectral_diag_stats - spectral diag stats + * @spectral_mismatch: Spectral TLV signature mismatches + * @spectral_sec80_sfft_insufflen: Insufficient length when parsing for + * Secondary 80 Search FFT report + * @spectral_no_sec80_sfft: Secondary 80 Search FFT report + * TLV not found + * @spectral_vhtseg1id_mismatch: VHT Operation Segment 1 ID + * mismatches in Search FFT report + * @spectral_vhtseg2id_mismatch: VHT Operation Segment 2 ID + * mismatches in Search FFT report + * @spectral_invalid_detector_id: Invalid detector id + */ +struct spectral_diag_stats { + uint64_t spectral_mismatch; + uint64_t spectral_sec80_sfft_insufflen; + uint64_t spectral_no_sec80_sfft; + uint64_t spectral_vhtseg1id_mismatch; + uint64_t spectral_vhtseg2id_mismatch; + uint64_t spectral_invalid_detector_id; +}; + +/** + * struct spectral_scan_state - State of spectral scan + * @is_active: Is spectral scan active + * @is_enabled: Is spectral scan enabled + */ +struct spectral_scan_state { + uint8_t is_active; + uint8_t is_enabled; +}; + +/* Forward declarations */ +struct wlan_objmgr_pdev; + +/** + * struct spectral_nl_cb - Spectral Netlink callbacks + * @get_sbuff: Get the socket buffer to send the data to the application + * @send_nl_bcast: Send data to the application using netlink broadcast + * @send_nl_unicast: Send data to the application using netlink unicast + * @free_sbuff: Free the socket buffer for a particular message type + */ +struct spectral_nl_cb { + void *(*get_sbuff)(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type, + enum spectral_msg_buf_type buf_type); + int (*send_nl_bcast)(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type); + int (*send_nl_unicast)(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type); + void (*free_sbuff)(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type); +}; + +/** + * struct spectral_scan_config_request - Config request + * @sscan_config: Spectral parameters + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_config_request { + struct spectral_config sscan_config; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_action_request - Action request + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_action_request { + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_get_caps_request - Get caps request + * @sscan_caps: Spectral capabilities + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_get_caps_request { + struct spectral_caps sscan_caps; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_get_diag_request - Get diag request + * @sscan_diag: Spectral diag stats + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_get_diag_request { + struct spectral_diag_stats sscan_diag; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_get_chan_width_request - Get channel width request + * @chan_width: Channel width + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_get_chan_width_request { + uint32_t chan_width; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_get_status_request - Get status request + * @is_active: is Spectral scan active + * @is_enabled: is Spectral scan enabled + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_get_status_request { + bool is_active; + bool is_enabled; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_debug_request - Get/set debug level request + * @spectral_dbg_level: Spectral debug level + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_debug_request { + uint32_t spectral_dbg_level; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_scan_dma_debug_request - DMA debug request + * @dma_debug_enable: Enable/disable @dma_debug_type + * @dma_debug_type: Type of Spectral DMA debug i.e., ring or buffer debug + * @sscan_err_code: Spectral scan error code + */ +struct spectral_scan_dma_debug_request { + bool dma_debug_enable; + enum spectral_dma_debug dma_debug_type; + enum spectral_cp_error_code sscan_err_code; +}; + +/** + * struct spectral_cp_request - Spectral control path request + * Creating request and extracting response has to + * be atomic. + * @ss_mode: Spectral scan mode + * @req_id: Request identifier + * @dma_debug_req: Spectral DMA debug request + */ +struct spectral_cp_request { + enum spectral_scan_mode ss_mode; + uint8_t req_id; + union { + struct spectral_scan_config_request config_req; + struct spectral_scan_action_request action_req; + struct spectral_scan_get_caps_request caps_req; + struct spectral_scan_get_diag_request diag_req; + struct spectral_scan_get_chan_width_request chan_width_req; + struct spectral_scan_get_status_request status_req; + struct spectral_scan_debug_request debug_req; + struct spectral_scan_dma_debug_request dma_debug_req; + }; +}; + +#ifndef __KERNEL__ + +static inline int16_t +spectral_pwfactor_max(int16_t pwfactor1, + int16_t pwfactor2) +{ + return ((pwfactor1 > pwfactor2) ? pwfactor1 : pwfactor2); +} + +/** + * get_spectral_scale_rssi_corr() - Compute RSSI correction factor for scaling + * @agc_total_gain_db: AGC total gain in dB steps + * @gen3_defmaxgain: Default max gain value of the gen III chipset + * @gen2_maxgain: Max gain value used by the reference gen II chipset + * @lowlevel_offset: Low level offset for scaling + * @inband_pwr: In band power in dB steps + * @rssi_thr: RSSI threshold for scaling + * + * Helper function to compute RSSI correction factor for Gen III linear format + * Spectral scaling. It is the responsibility of the caller to ensure that + * correct values are passed. + * + * Return: RSSI correction factor + */ +static inline int16_t +get_spectral_scale_rssi_corr(u_int8_t agc_total_gain_db, + u_int8_t gen3_defmaxgain, u_int8_t gen2_maxgain, + int16_t lowlevel_offset, int16_t inband_pwr, + int16_t rssi_thr) +{ + return ((agc_total_gain_db < gen3_defmaxgain) ? + (gen2_maxgain - gen3_defmaxgain + lowlevel_offset) : + spectral_pwfactor_max((inband_pwr - rssi_thr), 0)); +} + +/** + * spectral_scale_linear_to_gen2() - Scale linear bin value to gen II equivalent + * @gen3_binmag: Captured FFT bin value from the Spectral Search FFT report + * generated by the Gen III chipset + * @gen2_maxgain: Max gain value used by the reference gen II chipset + * @gen3_defmaxgain: Default max gain value of the gen III chipset + * @lowlevel_offset: Low level offset for scaling + * @inband_pwr: In band power in dB steps + * @rssi_thr: RSSI threshold for scaling + * @agc_total_gain_db: AGC total gain in dB steps + * @highlevel_offset: High level offset for scaling + * @gen2_bin_scale: Bin scale value used on reference gen II chipset + * @gen3_bin_scale: Bin scale value used on gen III chipset + * + * Helper function to scale a given gen III linear format bin value into an + * approximately equivalent gen II value. The scaled value can possibly be + * higher than 8 bits. If the caller is incapable of handling values larger + * than 8 bits, the caller can saturate the value at 255. This function does not + * carry out this saturation for the sake of flexibility so that callers + * interested in the larger values can avail of this. Also note it is the + * responsibility of the caller to ensure that correct values are passed. + * + * Return: Scaled bin value + */ +static inline u_int32_t +spectral_scale_linear_to_gen2(u_int8_t gen3_binmag, + u_int8_t gen2_maxgain, u_int8_t gen3_defmaxgain, + int16_t lowlevel_offset, int16_t inband_pwr, + int16_t rssi_thr, u_int8_t agc_total_gain_db, + int16_t highlevel_offset, u_int8_t gen2_bin_scale, + u_int8_t gen3_bin_scale) +{ + return (gen3_binmag * + sqrt(pow(10, (((double)spectral_pwfactor_max(gen2_maxgain - + gen3_defmaxgain + lowlevel_offset - + get_spectral_scale_rssi_corr(agc_total_gain_db, + gen3_defmaxgain, + gen2_maxgain, + lowlevel_offset, + inband_pwr, + rssi_thr), + (agc_total_gain_db < gen3_defmaxgain) * + highlevel_offset)) / 10))) * + pow(2, (gen3_bin_scale - gen2_bin_scale))); +} + +#endif /* __KERNEL__ */ + +#endif /* _WLAN_SPECTRAL_PUBLIC_STRUCTS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..264ab28dc8617a31fb067f9babe3c78b1a2c4131 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_tgt_api.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_SPECTRAL_TGT_API_H_ +#define _WLAN_SPECTRAL_TGT_API_H_ + +#include +#include +#include "../../core/spectral_cmn_api_i.h" + +/** + * tgt_get_target_handle() - Get target_if handle + * @pdev: Pointer to pdev + * + * Get handle to target_if internal Spectral data + * + * Return: Handle to target_if internal Spectral data on success, NULL on + * failure + */ +void *tgt_get_target_handle(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_control()- handler for demultiplexing requests from higher layer + * @pdev: Reference to global pdev object + * @sscan_req: pointer to Spectral scan request + * + * This function processes the spectral config command + * and appropriate handlers are invoked. + * + * Return: QDF_STATUS_SUCCESS/QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_spectral_control(struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req); + +/** + * tgt_pdev_spectral_init() - implementation for spectral init + * @pdev: Pointer to pdev + * + * Return: On success, pointer to Spectral target_if internal private data, on + * failure, NULL + */ +void *tgt_pdev_spectral_init(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_pdev_spectral_deinit() - implementation for spectral de-init + * @pdev: Pointer to pdev + * + * Return: None + */ +void tgt_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_set_spectral_config() - Set spectral config + * @pdev: Pointer to pdev object + * @threshtype: spectral parameter type + * @value: Value to be configured for the given spectral parameter + * @smode: Spectral scan mode + * @err: Spectral control path error code + * + * Implementation for setting spectral config + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const u_int32_t threshtype, + const u_int32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * tgt_get_spectral_config() - Get spectral configuration + * @pdev: Pointer to pdev object + * @param: Pointer to spectral_config structure in which the configuration + * should be returned + * @smode: Spectral scan mode + * + * Implementation for getting the current spectral configuration + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config, + const enum spectral_scan_mode smode); + +/** + * tgt_start_spectral_scan() - Start spectral scan + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * @err: Spectral control path error code + * + * Implementation for starting spectral scan + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_start_spectral_scan(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * tgt_stop_spectral_scan() - Stop spectral scan + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * @err: Spectral control path error code + * + * Implementation for stop spectral scan + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_stop_spectral_scan(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * tgt_is_spectral_active() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * + * Implementation to get whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool tgt_is_spectral_active(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + +/** + * tgt_is_spectral_enabled() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * + * Implementation to get whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool tgt_is_spectral_enabled(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + +/** + * tgt_set_debug_level() - Set debug level for Spectral + * @pdev: Pointer to pdev object + * @debug_level: Debug level + * + * Implementation to set the debug level for Spectral + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_set_debug_level(struct wlan_objmgr_pdev *pdev, + u_int32_t debug_level); + +/** + * tgt_get_debug_level() - Get debug level for Spectral + * @pdev: Pointer to pdev object + * + * Implementation to get the debug level for Spectral + * + * Return: Current debug level + */ +uint32_t tgt_get_debug_level(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_get_spectral_capinfo() - Get Spectral capability information + * @pdev: Pointer to pdev object + * @scaps: Buffer into which data should be copied + * + * Implementation to get the spectral capability information + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, + struct spectral_caps *scaps); + +/** + * tgt_get_spectral_diagstats() - Get Spectral diagnostic statistics + * @pdev: Pointer to pdev object + * @stats: Buffer into which data should be copied + * + * Implementation to get the spectral diagnostic statistics + * + * Return: QDF_STATUS_SUCCESS on success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS tgt_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *stats); + +/** + * tgt_register_wmi_spectral_cmd_ops() - Register wmi_spectral_cmd_ops + * @cmd_ops: Pointer to the structure having wmi_spectral_cmd function pointers + * @pdev: Pointer to pdev object + * + * Implementation to register wmi_spectral_cmd_ops in spectral + * internal data structure + * + * Return: void + */ +void tgt_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + +/** + * tgt_spectral_register_nl_cb() - Register Netlink callbacks + * @pdev: Pointer to pdev object + * @nl_cb: Netlink callbacks to register + * + * Return: void + */ +void tgt_spectral_register_nl_cb(struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb); + +/** + * tgt_spectral_use_nl_bcast() - Get whether to use broadcast/unicast while + * sending Netlink messages to the application layer + * @pdev: Pointer to pdev object + * + * Return: true for broadcast, false for unicast + */ +bool tgt_spectral_use_nl_bcast(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_deregister_nl_cb() - De-register Netlink callbacks + * @pdev: Pointer to pdev object + * + * Return: void + */ +void tgt_spectral_deregister_nl_cb(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_process_report() - Process spectral report + * @pdev: Pointer to pdev object + * @payload: Pointer to spectral report buffer + * + * Return: status + */ +int +tgt_spectral_process_report(struct wlan_objmgr_pdev *pdev, + void *payload); + +/** + * tgt_spectral_register_to_dbr() - Register to direct DMA + * @pdev: Pointer to pdev object + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_unregister_to_dbr() - Register to direct DMA + * @pdev: Pointer to pdev object + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_spectral_unregister_to_dbr(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_get_target_type() - Get target type + * @psoc: Pointer to psoc object + * + * Return: target type + */ +uint32_t +tgt_spectral_get_target_type(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_set_spectral_dma_debug() - Set DMA debug for Spectral + * @pdev: Pointer to pdev object + * @dma_debug_type: Type of Spectral DMA debug i.e., ring or buffer debug + * @dma_debug_enable: Value to be set for @dma_debug_type + * + * Return: QDF_STATUS of operation + */ +QDF_STATUS tgt_set_spectral_dma_debug(struct wlan_objmgr_pdev *pdev, + enum spectral_dma_debug dma_debug_type, + bool dma_debug_enable); +#endif /* _WLAN_SPECTRAL_TGT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..b2e21838118ea27004663fc85f88b4898918211e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_ucfg_api.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_SPECTRAL_UCFG_API_H_ +#define _WLAN_SPECTRAL_UCFG_API_H_ + +#include +#include + +/* Spectral specific UCFG set operations */ + +/** + * ucfg_spectral_control() - Carry out Spectral control operations + * @pdev: Pointer to pdev + * @sscan_req: spectral related control request + * + * Carry out Spectral specific UCFG control get/set operations + * + * Return: 0 on success, negative value on failure + */ +QDF_STATUS ucfg_spectral_control(struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req); + +/** + * ucfg_spectral_scan_set_ppid() - configure pid of spectral tool + * @pdev: Pointer to pdev + * @ppid: Spectral tool pid + * + * Configure pid of spectral tool + * + * Return: None + */ +void ucfg_spectral_scan_set_ppid(struct wlan_objmgr_pdev *pdev, + uint32_t ppid); + +/** + * ucfg_spectral_create_cp_req() - Create Spectral control path request + * @sscan_req: Pointer to Spectral scan request + * @indata: pointer input data + * @insize: Size of input data + * + * Create Spectral control path request structure + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS ucfg_spectral_create_cp_req(struct spectral_cp_request *sscan_req, + void *indata, u_int32_t insize); + +/** + * ucfg_spectral_create_cp_req() - Extract response from Spectral CP request + * @sscan_req: Pointer to Spectral scan request + * @outdata: pointer output data + * @outsize: Size of output data + * + * Extract response from Spectral control path request + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS ucfg_spectral_extract_response(struct spectral_cp_request *sscan_req, + void *outdata, u_int32_t *outsize); + +/** + * ucfg_spectral_register_to_dbr() - Register spectral to DBR + * @pdev: Pointer to pdev object + * + * Register spectral to Direct Buffer RX component + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS ucfg_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_SPECTRAL_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_utils_api.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3d1b159e82179aa0aae79f52d3fa1b623b6b2b93 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_utils_api.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_SPECTRAL_UTILS_API_H_ +#define _WLAN_SPECTRAL_UTILS_API_H_ + +#include +#include + +/* Forward declaration */ +struct direct_buf_rx_data; +struct wmi_spectral_cmd_ops; + +/** + * wlan_spectral_is_feature_disabled() - Check if spectral feature is disabled + * @psoc - the physical device object. + * + * Return : true if spectral is disabled, else false. + */ +bool wlan_spectral_is_feature_disabled(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_spectral_init() - API to init spectral component + * + * This API is invoked from dispatcher init during all component init. + * This API will register all required handlers for pdev and peer object + * create/delete notification. + * + * Return: SUCCESS, + * Failure + */ +QDF_STATUS wlan_spectral_init(void); + +/** + * wlan_spectral_deinit() - API to deinit spectral component + * + * This API is invoked from dispatcher deinit during all component deinit. + * This API will unregister all registered handlers for pdev and peer object + * create/delete notification. + * + * Return: SUCCESS, + * Failure + */ +QDF_STATUS wlan_spectral_deinit(void); + +/** + * wlan_lmac_if_sptrl_register_rx_ops() - Register lmac interface Rx operations + * @rx_ops: Pointer to lmac interface Rx operations structure + * + * API to register spectral related lmac interface Rx operations + * + * Return: None + */ +void +wlan_lmac_if_sptrl_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); + +/** +* wlan_register_wmi_spectral_cmd_ops() - Register operations related to wmi +* commands on spectral parameters +* @pdev - the physical device object +* @cmd_ops - pointer to the structure holding the operations +* related to wmi commands on spectral parameters +* +* API to register operations related to wmi commands on spectral parameters +* +* Return: None +*/ +void +wlan_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + +/** + * struct spectral_legacy_cbacks - Spectral legacy callbacks + * @vdev_get_chan_freq: Get channel frequency + * @vdev_get_chan_freq_seg2: Get secondary 80 center frequency + * @vdev_get_ch_width: Get channel width + * @vdev_get_sec20chan_freq_mhz: Get seconadry 20 frequency + */ +struct spectral_legacy_cbacks { + int16_t (*vdev_get_chan_freq)(struct wlan_objmgr_vdev *vdev); + int16_t (*vdev_get_chan_freq_seg2)(struct wlan_objmgr_vdev *vdev); + enum phy_ch_width (*vdev_get_ch_width)(struct wlan_objmgr_vdev *vdev); + int (*vdev_get_sec20chan_freq_mhz)(struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq); +}; + +/** + * spectral_vdev_get_chan_freq - Get vdev channel frequency + * @vdev: vdev object + * + * Return: vdev operating frequency + */ +int16_t spectral_vdev_get_chan_freq(struct wlan_objmgr_vdev *vdev); + +/** + * spectral_vdev_get_chan_freq_seg2 - Get vdev's secondary 80 center frequency + * @vdev: vdev object + * + * Return: vdev secondary 80 center frequency + */ +int16_t spectral_vdev_get_chan_freq_seg2(struct wlan_objmgr_vdev *vdev); + +/** + * spectral_vdev_get_sec20chan_freq_mhz - Get vdev secondary channel frequency + * @vdev: vdev object + * @sec20chan_freq: secondary channel frequency + * + * Return: secondary channel freq + */ +int spectral_vdev_get_sec20chan_freq_mhz(struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq); + +/** + * spectral_register_legacy_cb() - Register spectral legacy callbacks + * commands on spectral parameters + * @psoc - the physical device object + * @legacy_cbacks - Reference to struct spectral_legacy_cbacks from which + * function pointers need to be copied + * + * API to register spectral related legacy callbacks + * + * Return: QDF_STATUS_SUCCESS upon successful registration, + * QDF_STATUS_E_FAILURE upon failure + */ +QDF_STATUS spectral_register_legacy_cb( + struct wlan_objmgr_psoc *psoc, + struct spectral_legacy_cbacks *legacy_cbacks); + +/** + * spectral_vdev_get_ch_width() - Get the channel bandwidth + * @vdev - Pointer to vdev + * + * API to get the channel bandwidth of a given vdev + * + * Return: Enumeration corresponding to the channel bandwidth + */ +enum phy_ch_width +spectral_vdev_get_ch_width(struct wlan_objmgr_vdev *vdev); + +/** + * spectral_pdev_open() - Spectral pdev open handler + * @pdev: pointer to pdev object + * + * API to execute operations on pdev open + * + * Return: QDF_STATUS_SUCCESS upon successful registration, + * QDF_STATUS_E_FAILURE upon failure + */ +QDF_STATUS spectral_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * spectral_register_dbr() - register Spectral event handler with DDMA + * @pdev: pointer to pdev object + * + * API to register event handler with Direct DMA + * + * Return: QDF_STATUS_SUCCESS upon successful registration, + * QDF_STATUS_E_FAILURE upon failure + */ + +QDF_STATUS spectral_register_dbr(struct wlan_objmgr_pdev *pdev); + +/** + * spectral_unregister_dbr() - unregister Spectral event handler with DDMA + * @pdev: pointer to pdev object + * + * API to unregister event handler with Direct DMA + * + * Return: QDF_STATUS_SUCCESS upon successful unregistration, + * QDF_STATUS_E_FAILURE upon failure + */ +QDF_STATUS spectral_unregister_dbr(struct wlan_objmgr_pdev *pdev); + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * spectral_dbr_event_handler() - Spectral dbr event handler + * @pdev: pointer to pdev object + * @payload: dbr event buffer + * + * API to handle spectral dbr event + * + * Return: true to release buf + */ +bool spectral_dbr_event_handler(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *payload); +#endif +#endif /* _WLAN_SPECTRAL_UTILS_API_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..f416b47e9c325cc0ae53ef4a1fcc35ad22253fc5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_tgt_api.c @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2011,2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include + +#ifdef DIRECT_BUF_RX_ENABLE +#include + +#define DBR_EVENT_TIMEOUT_IN_MS_SPECTRAL 1 +#define DBR_NUM_RESP_PER_EVENT_SPECTRAL 2 +#endif + +void * +tgt_get_target_handle(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return NULL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return NULL; + } + return ps->psptrl_target_handle; +} + +QDF_STATUS +tgt_spectral_control( + struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EPERM; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return -EPERM; + } + return spectral_control_cmn(pdev, sscan_req); +} + +void * +tgt_pdev_spectral_init(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_pdev_spectral_init( + pdev); +} + +void +tgt_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_pdev_spectral_deinit(pdev); +} + +QDF_STATUS +tgt_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const u_int32_t threshtype, const u_int32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_set_spectral_config( + pdev, threshtype, value, smode, err); +} + +QDF_STATUS +tgt_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config, + const enum spectral_scan_mode smode) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_spectral_config( + pdev, + sptrl_config, + smode); +} + +QDF_STATUS +tgt_start_spectral_scan(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_start_spectral_scan( + pdev, smode, err); +} + +QDF_STATUS +tgt_stop_spectral_scan(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_stop_spectral_scan( + pdev, smode, err); +} + +bool +tgt_is_spectral_active(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_is_spectral_active( + pdev, smode); +} + +bool +tgt_is_spectral_enabled(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_is_spectral_enabled( + pdev, smode); +} + +QDF_STATUS +tgt_set_debug_level(struct wlan_objmgr_pdev *pdev, u_int32_t debug_level) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_set_debug_level( + pdev, + debug_level); +} + +u_int32_t +tgt_get_debug_level(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_debug_level(pdev); +} + +QDF_STATUS +tgt_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, + struct spectral_caps *scaps) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_spectral_capinfo( + pdev, scaps); +} + +QDF_STATUS +tgt_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *stats) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_spectral_diagstats( + pdev, stats); +} + +void +tgt_register_wmi_spectral_cmd_ops( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_register_wmi_spectral_cmd_ops(pdev, + cmd_ops); +} + +void +tgt_spectral_register_nl_cb( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_register_netlink_cb(pdev, + nl_cb); +} + +bool +tgt_spectral_use_nl_bcast(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_use_nl_bcast(pdev); +} + +void tgt_spectral_deregister_nl_cb(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + psptrl_tx_ops->sptrlto_deregister_netlink_cb(pdev); +} + +int +tgt_spectral_process_report(struct wlan_objmgr_pdev *pdev, + void *payload) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_process_spectral_report(pdev, payload); +} + +uint32_t +tgt_spectral_get_target_type(struct wlan_objmgr_psoc *psoc) +{ + uint32_t target_type = 0; + struct wlan_lmac_if_target_tx_ops *target_type_tx_ops; + + target_type_tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (target_type_tx_ops->tgt_get_tgt_type) + target_type = target_type_tx_ops->tgt_get_tgt_type(psoc); + + return target_type; +} + +#ifdef DIRECT_BUF_RX_ENABLE +QDF_STATUS +tgt_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_direct_buf_rx_tx_ops *dbr_tx_ops = NULL; + struct wlan_lmac_if_sptrl_tx_ops *sptrl_tx_ops = NULL; + struct dbr_module_config dbr_config = {0}; + + psoc = wlan_pdev_get_psoc(pdev); + dbr_tx_ops = &psoc->soc_cb.tx_ops.dbr_tx_ops; + sptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + dbr_config.num_resp_per_event = DBR_NUM_RESP_PER_EVENT_SPECTRAL; + dbr_config.event_timeout_in_ms = DBR_EVENT_TIMEOUT_IN_MS_SPECTRAL; + + if ((sptrl_tx_ops->sptrlto_direct_dma_support) && + (sptrl_tx_ops->sptrlto_direct_dma_support(pdev))) { + if (sptrl_tx_ops->sptrlto_check_and_do_dbr_buff_debug) + sptrl_tx_ops->sptrlto_check_and_do_dbr_buff_debug(pdev); + if (dbr_tx_ops->direct_buf_rx_module_register) + dbr_tx_ops->direct_buf_rx_module_register + (pdev, 0, &dbr_config, + spectral_dbr_event_handler); + if (sptrl_tx_ops->sptrlto_check_and_do_dbr_ring_debug) + sptrl_tx_ops->sptrlto_check_and_do_dbr_ring_debug(pdev); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_spectral_unregister_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_direct_buf_rx_tx_ops *dbr_tx_ops = NULL; + struct wlan_lmac_if_sptrl_tx_ops *sptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + dbr_tx_ops = &psoc->soc_cb.tx_ops.dbr_tx_ops; + sptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + if ((sptrl_tx_ops->sptrlto_direct_dma_support) && + (sptrl_tx_ops->sptrlto_direct_dma_support(pdev))) { + /* Stop DBR debug as the buffers itself are freed now */ + if (dbr_tx_ops->direct_buf_rx_stop_ring_debug) + dbr_tx_ops->direct_buf_rx_stop_ring_debug(pdev, 0); + + /*No need to zero-out as buffers are anyway getting freed*/ + if (dbr_tx_ops->direct_buf_rx_stop_buffer_poisoning) + dbr_tx_ops->direct_buf_rx_stop_buffer_poisoning + (pdev, 0); + if (dbr_tx_ops->direct_buf_rx_module_unregister) + dbr_tx_ops->direct_buf_rx_module_unregister + (pdev, 0); + + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} +#else +QDF_STATUS +tgt_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_spectral_unregister_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +#ifdef DIRECT_BUF_RX_DEBUG +QDF_STATUS tgt_set_spectral_dma_debug(struct wlan_objmgr_pdev *pdev, + enum spectral_dma_debug dma_debug_type, + bool dma_debug_enable) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + spectral_err("psoc is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_set_dma_debug( + pdev, + dma_debug_type, + dma_debug_enable); +} +#else +QDF_STATUS tgt_set_spectral_dma_debug(struct wlan_objmgr_pdev *pdev, + enum spectral_dma_debug dma_debug_type, + bool dma_debug_enable) +{ + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8f64615ef627223c56fb74f5d4490ca45ba8a08d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_ucfg_api.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "../../core/spectral_cmn_api_i.h" +#include +#include +#include + +QDF_STATUS +ucfg_spectral_control(struct wlan_objmgr_pdev *pdev, + struct spectral_cp_request *sscan_req) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EPERM; + } + + if (wlan_spectral_is_feature_disabled(wlan_pdev_get_psoc(pdev))) { + spectral_info("Spectral is disabled"); + return -EPERM; + } + + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return -EPERM; + } + + return sc->sptrlc_spectral_control(pdev, sscan_req); +} +qdf_export_symbol(ucfg_spectral_control); + +void ucfg_spectral_scan_set_ppid(struct wlan_objmgr_pdev *pdev, uint32_t ppid) +{ + struct pdev_spectral *ps = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + spectral_err("spectral context is NULL!"); + return; + } + ps->spectral_pid = ppid; + spectral_debug("spectral ppid: %d", ppid); + + return; +} + +QDF_STATUS ucfg_spectral_create_cp_req(struct spectral_cp_request *sscan_req, + void *indata, u_int32_t insize) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (sscan_req->req_id) { + case SPECTRAL_SET_CONFIG: + { + if (insize < sizeof(struct spectral_config) || + !indata) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + qdf_mem_copy(&sscan_req->config_req.sscan_config, + indata, + sizeof(struct spectral_config)); + } + break; + + case SPECTRAL_SET_DEBUG_LEVEL: + { + if (insize < sizeof(uint32_t) || !indata) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + sscan_req->debug_req.spectral_dbg_level = + *(uint32_t *)indata; + } + break; + + default: + break; + } + +bad: + return status; +} + +qdf_export_symbol(ucfg_spectral_create_cp_req); + +QDF_STATUS ucfg_spectral_extract_response(struct spectral_cp_request *sscan_req, + void *outdata, u_int32_t *outsize) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (sscan_req->req_id) { + case SPECTRAL_GET_CONFIG: + { + if (!outdata || !outsize || + (*outsize < sizeof(struct spectral_config))) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(struct spectral_config); + qdf_mem_copy(outdata, + &sscan_req->config_req.sscan_config, + sizeof(struct spectral_config)); + } + break; + + case SPECTRAL_IS_ACTIVE: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + sscan_req->status_req.is_active; + } + break; + + case SPECTRAL_IS_ENABLED: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + sscan_req->status_req.is_enabled; + } + break; + + case SPECTRAL_GET_DEBUG_LEVEL: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + sscan_req->debug_req.spectral_dbg_level; + } + break; + + case SPECTRAL_GET_CAPABILITY_INFO: + { + if (!outdata || !outsize || + *outsize < sizeof(struct spectral_caps)) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(struct spectral_caps); + qdf_mem_copy(outdata, &sscan_req->caps_req.sscan_caps, + sizeof(struct spectral_caps)); + } + break; + + case SPECTRAL_GET_DIAG_STATS: + { + if (!outdata || !outsize || + (*outsize < sizeof(struct spectral_diag_stats))) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(struct spectral_diag_stats); + qdf_mem_copy(outdata, &sscan_req->diag_req.sscan_diag, + sizeof(struct spectral_diag_stats)); + } + break; + + case SPECTRAL_GET_CHAN_WIDTH: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + status = QDF_STATUS_E_FAILURE; + goto bad; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + sscan_req->chan_width_req.chan_width; + } + break; + + default: + break; + } + +bad: + return status; +} + +qdf_export_symbol(ucfg_spectral_extract_response); + +QDF_STATUS ucfg_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + return spectral_pdev_open(pdev); +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_utils_api.c b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8a58ad65437880615b39860c6a5d5a0374cdc08b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_utils_api.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "../../core/spectral_cmn_api_i.h" +#include +#include + +bool wlan_spectral_is_feature_disabled(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + spectral_err("PSOC is NULL!"); + return true; + } + + if (wlan_psoc_nif_feat_cap_get(psoc, WLAN_SOC_F_SPECTRAL_DISABLE)) + return true; + + return false; +} + +QDF_STATUS +wlan_spectral_init(void) +{ + if (wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_spectral_deinit(void) +{ + if (wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +spectral_register_legacy_cb(struct wlan_objmgr_psoc *psoc, + struct spectral_legacy_cbacks *legacy_cbacks) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_psoc(psoc); + if (!sc) { + spectral_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + sc->legacy_cbacks.vdev_get_chan_freq = + legacy_cbacks->vdev_get_chan_freq; + sc->legacy_cbacks.vdev_get_chan_freq_seg2 = + legacy_cbacks->vdev_get_chan_freq_seg2; + sc->legacy_cbacks.vdev_get_ch_width = legacy_cbacks->vdev_get_ch_width; + sc->legacy_cbacks.vdev_get_sec20chan_freq_mhz = + legacy_cbacks->vdev_get_sec20chan_freq_mhz; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(spectral_register_legacy_cb); + +int16_t +spectral_vdev_get_chan_freq(struct wlan_objmgr_vdev *vdev) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is Null"); + return -EINVAL; + } + + if (!sc->legacy_cbacks.vdev_get_chan_freq) { + spectral_err("vdev_get_chan_freq is not supported"); + return -ENOTSUPP; + } + + return sc->legacy_cbacks.vdev_get_chan_freq(vdev); +} + +int16_t +spectral_vdev_get_chan_freq_seg2(struct wlan_objmgr_vdev *vdev) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is null"); + return -EINVAL; + } + + if (!sc->legacy_cbacks.vdev_get_chan_freq_seg2) { + spectral_err("vdev_get_chan_freq_seg2 is not supported"); + return -ENOTSUPP; + } + + return sc->legacy_cbacks.vdev_get_chan_freq_seg2(vdev); +} + +enum phy_ch_width +spectral_vdev_get_ch_width(struct wlan_objmgr_vdev *vdev) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is Null"); + return CH_WIDTH_INVALID; + } + + if (!sc->legacy_cbacks.vdev_get_ch_width) { + spectral_err("vdev_get_ch_width is not supported"); + return -ENOTSUPP; + } + + return sc->legacy_cbacks.vdev_get_ch_width(vdev); +} + +int +spectral_vdev_get_sec20chan_freq_mhz(struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is Null"); + return -EINVAL; + } + + if (!sc->legacy_cbacks.vdev_get_sec20chan_freq_mhz) { + spectral_err("vdev_get_sec20chan_freq_mhz is not supported"); + return -ENOTSUPP; + } + + return sc->legacy_cbacks.vdev_get_sec20chan_freq_mhz(vdev, + sec20chan_freq); +} + +void +wlan_lmac_if_sptrl_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_sptrl_rx_ops *sptrl_rx_ops = &rx_ops->sptrl_rx_ops; + + /* Spectral rx ops */ + sptrl_rx_ops->sptrlro_get_target_handle = tgt_get_target_handle; + sptrl_rx_ops->sptrlro_vdev_get_chan_freq = spectral_vdev_get_chan_freq; + sptrl_rx_ops->sptrlro_vdev_get_chan_freq_seg2 = + spectral_vdev_get_chan_freq_seg2; + sptrl_rx_ops->sptrlro_vdev_get_ch_width = spectral_vdev_get_ch_width; + sptrl_rx_ops->sptrlro_vdev_get_sec20chan_freq_mhz = + spectral_vdev_get_sec20chan_freq_mhz; + sptrl_rx_ops->sptrlro_spectral_is_feature_disabled = + wlan_spectral_is_feature_disabled; +} + +void +wlan_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return; + } + + return sc->sptrlc_register_wmi_spectral_cmd_ops(pdev, cmd_ops); +} +qdf_export_symbol(wlan_register_wmi_spectral_cmd_ops); + +#ifdef DIRECT_BUF_RX_ENABLE +bool spectral_dbr_event_handler(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *payload) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return -EINVAL; + } + + sc->sptrlc_process_spectral_report(pdev, payload); + + return true; +} +#endif + +QDF_STATUS spectral_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + psoc = wlan_pdev_get_psoc(pdev); + + if (wlan_spectral_is_feature_disabled(psoc)) { + spectral_info("Spectral is disabled"); + return QDF_STATUS_COMP_DISABLED; + } + + if (cfg_get(psoc, CFG_SPECTRAL_POISON_BUFS)) + tgt_set_spectral_dma_debug(pdev, SPECTRAL_DMA_BUFFER_DEBUG, 1); + + status = tgt_spectral_register_to_dbr(pdev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS spectral_register_dbr(struct wlan_objmgr_pdev *pdev) +{ + return tgt_spectral_register_to_dbr(pdev); +} + +qdf_export_symbol(spectral_register_dbr); + +QDF_STATUS spectral_unregister_dbr(struct wlan_objmgr_pdev *pdev) +{ + QDF_STATUS status; + + status = tgt_spectral_unregister_to_dbr(pdev); + + return status; +} + +qdf_export_symbol(spectral_unregister_dbr); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr.h b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr.h new file mode 100644 index 0000000000000000000000000000000000000000..77d3c69bee7de2f84fa35cb9d2920a4b5f481465 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_CFR_H_ +#define _TARGET_IF_CFR_H_ + +#include +#include +#include +#include +#include + +#include "wmi_unified_cfr_api.h" +#include "wmi_unified_param.h" +#include "wmi_unified_cfr_param.h" +#define PEER_CFR_CAPTURE_ENABLE 1 +#define PEER_CFR_CAPTURE_DISABLE 0 + +#define PEER_CFR_CAPTURE_EVT_STATUS_MASK 0x80000000 +#define PEER_CFR_CAPTURE_EVT_PS_STATUS_MASK 0x40000000 +#define CFR_TX_EVT_STATUS_MASK 0x00000003 + +/* Status codes used by correlate and relay function */ +#define STATUS_STREAM_AND_RELEASE 0 +#define STATUS_HOLD 1 +#define STATUS_ERROR -1 + +/* Module IDs using corrlation function */ +#define CORRELATE_DBR_MODULE_ID 0 +/* + * HKV2 - Tx completion event for one-shot capture + * Cypress - Tx completion event for one-shot capture (or) RXTLV event for RCC + */ +#define CORRELATE_TX_EV_MODULE_ID 1 + +/** + * target_if_cfr_init_pdev() - Inits cfr pdev and registers necessary handlers. + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: Registration status for necessary handlers + */ +int target_if_cfr_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_cfr_deinit_pdev() - De-inits corresponding pdev and handlers. + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: De-registration status for necessary handlers + */ +int target_if_cfr_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_cfr_tx_ops_register() - Registers tx ops for cfr module + * @tx_ops - pointer to tx_ops structure. + */ +void target_if_cfr_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_cfr_enable_cfr_timer() - Enables cfr timer + * @pdev: pointer to pdev object + * @cfr_timer: Amount of time this timer has to run + * + * Return: status of timer + */ +int target_if_cfr_enable_cfr_timer(struct wlan_objmgr_pdev *pdev, + uint32_t cfr_timer); + +/** + * target_if_cfr_pdev_set_param() - Function to set params for cfr config + * @pdev: pointer to pdev object + * @param_id: param id which has to be set + * @param_value: value of param being set + * + * Return: success/failure of setting param + */ +int target_if_cfr_pdev_set_param(struct wlan_objmgr_pdev *pdev, + uint32_t param_id, uint32_t param_value); +/** + * target_if_cfr_start_capture() - Function to start cfr capture for a peer + * @pdev: pointer to pdev object + * @peer: pointer to peer object + * @cfr_params: capture parameters for this peer + * + * Return: success/failure status of start capture + */ +int target_if_cfr_start_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *cfr_params); +/** + * target_if_cfr_stop_capture() - Function to stop cfr capture for a peer + * @pdev: pointer to pdev object + * @peer: pointer to peer object + * + * Return: success/failure status of stop capture + */ +int target_if_cfr_stop_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + +/** + * target_if_cfr_get_target_type() - Function to get target type + * @psoc: pointer to psoc object + * + * Return: target type of target + */ +int target_if_cfr_get_target_type(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_cfr_set_cfr_support() - Function to set cfr support + * @psoc: pointer to psoc object + * @value: value to be set + */ +void target_if_cfr_set_cfr_support(struct wlan_objmgr_psoc *psoc, + uint8_t value); + +/** + * target_if_cfr_info_send() - Function to send cfr info to upper layers + * @pdev: pointer to pdev object + * @head: pointer to cfr info head + * @hlen: head len + * @data: pointer to cfr info data + * @dlen: data len + * @tail: pointer to cfr info tail + * @tlen: tail len + */ +void target_if_cfr_info_send(struct wlan_objmgr_pdev *pdev, void *head, + size_t hlen, void *data, size_t dlen, void *tail, + size_t tlen); + +/** + * cfr_wifi2_0_init_pdev() - Function to init legacy pdev + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: success/failure status of init + */ +QDF_STATUS cfr_wifi2_0_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * cfr_wifi2_0_deinit_pdev() - Function to deinit legacy pdev + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: success/failure status of deinit + */ +QDF_STATUS cfr_wifi2_0_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr_6018.h b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr_6018.h new file mode 100644 index 0000000000000000000000000000000000000000..4255f85acac9e2a1fd03c5e2d6ba863d3dc58ce2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr_6018.h @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_CFR_6018_H_ +#define _TARGET_IF_CFR_6018_H_ + +#ifdef WLAN_ENH_CFR_ENABLE +/* + * Memory requirements : + * + * 1. DMA header : + * + * Legacy DMA header(QCA8074V2) : 2 words (length = 8 bytes) + * Enhanced DMA header(QCA6018) : Upto 16 words depending on no. of MU users + * in UL-MU-PPDU (Max length = 64 bytes) + * + * Fixed 4 words for whal_cfir_enhanced_hdr + freeze TLV + * + uplink_user_info TLV (MAX 4) + * + * mu_rx_num_users -> No. of words in CFR DMA header + * 0 -> 12 = 4 + 7(freeze TLV) + 1(for 64-bit alignment) + * 1 -> 12 = 4 + 7(freeze TLV) + 1(user1) + * 2 -> 14 = 4 + 7(freeze TLV) + 2(users 1,2) + 1(for 64-bit alignment) + * 3 -> 14 = 4 + 7(freeze TLV) + 3(users 1,2,3) + * 4 -> 16 = 4 + 7(freeze TLV) + 4(users 1,2,3,4) + 1(for 64-bit alignment) + * + * + * 2. CFR data size for max BW/Nss/Nrx + * + * Cypress : Max BW = 80 MHz + * NSS = 2 + * Nrx = 2 + * Size of one tone = 4 bytes + * + * a. RTT-H - 2048 bytes + * + * b. Debug-H (MIMO CFR) - 16016 bytes + * + * c. RTT-H + CIR - 10240 bytes = 2048(RTT-H) + 8192(CIR) + */ + +/* Max 4 users in MU case */ +#define CYP_CFR_MU_USERS 4 + +#define CYP_MAX_HEADER_LENGTH_WORDS 16 + +/* payload_len = Max(2048, 16016, 10240) = 16064 (64-bit alignment) */ +#define CYP_MAX_DATA_LENGTH_BYTES 16064 + +/* in ms */ +#define LUT_AGE_TIMER 3000 +#define LUT_AGE_THRESHOLD 3000 +#define NUM_LUT_ENTRIES 136 + +/* Max size : + * 16173 = 93 bytes(csi header) + 64 bytes(cfr header) + 16016 bytes(cfr + * payload) + */ +#define STREAMFS_MAX_SUBBUF_CYP 16173 + +#define STREAMFS_NUM_SUBBUF_CYP 255 + +/* + * @tag: ucode fills this with 0xBA + * + * @length: length of CFR header in words (32-bit) + * + * @upload_done: ucode sets this to 1 to indicate DMA completion + * + * @capture_type: + * + * 0 - None + * 1 - RTT-H (Nss = 1, Nrx) + * 2 - Debug-H (Nss, Nrx) + * 3 - Reserved + * 5 - RTT-H + CIR(Nss, Nrx) + * + * @preamble_type: + * + * 0 - Legacy + * 1 - HT + * 2 - VHT + * 3 - HE + * + * @nss: + * + * 0 - 1-stream + * 1 - 2-stream + * .. .. + * 7 - 8-stream + * + *@num_chains: + * + * 0 - 1-chain + * 1 - 2-chain + * .. .. + * 7 - 8-chain + * + *@upload_bw_pkt: + * + * 0 - 20 MHz + * 1 - 40 MHz + * 2 - 80 MHz + * 3 - 160 MHz + * + * @sw_peer_id_valid: Indicates whether sw_peer_id field is valid or not, + * sent from MAC to PHY via the MACRX_FREEZE_CAPTURE_CHANNEL TLV + * + * @sw_peer_id: Indicates peer id based on AST search, sent from MAC to PHY + * via the MACRX_FREEZE_CAPTURE_CHANNEL TLV + * + * @phy_ppdu_id: sent from PHY to MAC, copied to MACRX_FREEZE_CAPTURE_CHANNEL + * TLV + * + * @total_bytes: Total size of CFR payload (FFT bins) + * + * @header_version: + * + * 1 - HKV2/Hastings + * 2 - Cypress + * + * @target_id: + * + * 1 - Hastings + * 2 - Cypress + * 3 - Hastings Prime + * 4 - Pine + * + * @cfr_fmt: + * + * 0 - raw (32-bit format) + * 1 - compressed (24-bit format) + * + * @mu_rx_data_incl: Indicates whether CFR header contains UL-MU-MIMO info + * + * @freeze_data_incl: Indicates whether CFR header contains + * MACRX_FREEZE_CAPTURE_CHANNEL TLV + * + * @decimation_factor: FFT bins decimation + * @mu_rx_num_users: Number of users in UL-MU-PPDU + */ +struct whal_cfir_enhanced_hdr { + uint16_t tag : 8, + length : 6, + rsvd1 : 2; + + uint16_t upload_done : 1, + capture_type : 3, + preamble_type : 2, + nss : 3, + num_chains : 3, + upload_pkt_bw : 3, + sw_peer_id_valid : 1; + + uint16_t sw_peer_id : 16; + + uint16_t phy_ppdu_id : 16; + + uint16_t total_bytes; + + uint16_t header_version :4, + target_id :4, + cfr_fmt :1, + rsvd2 :1, + mu_rx_data_incl :1, + freeze_data_incl:1, + rsvd3 :4; + + uint16_t mu_rx_num_users :8, + decimation_factor :4, + rsvd4 :4; + + uint16_t rsvd5; +}; + +struct macrx_freeze_capture_channel { + uint16_t freeze : 1, //[0] + capture_reason : 3, //[3:1] + packet_type : 2, //[5:4] + packet_sub_type : 4, //[9:6] + reserved : 5, //[14:10] + sw_peer_id_valid : 1; //[15] + uint16_t sw_peer_id : 16; //[15:0] + uint16_t phy_ppdu_id : 16; //[15:0] + uint16_t packet_ta_lower_16 : 16; //[15:0] + uint16_t packet_ta_mid_16 : 16; //[15:0] + uint16_t packet_ta_upper_16 : 16; //[15:0] + uint16_t packet_ra_lower_16 : 16; //[15:0] + uint16_t packet_ra_mid_16 : 16; //[15:0] + uint16_t packet_ra_upper_16 : 16; //[15:0] + uint16_t tsf_timestamp_15_0 : 16; //[15:0] + uint16_t tsf_timestamp_31_16 : 16; //[15:0] + uint16_t tsf_timestamp_47_32 : 16; //[15:0] + uint16_t tsf_timestamp_63_48 : 16; //[15:0] + uint16_t user_index : 6, //[5:0] + directed : 1, //[6] + reserved_13 : 9; //[15:7] +}; + +struct uplink_user_setup_info { + uint32_t bw_info_valid : 1, //[0] + uplink_receive_type : 2, //[2:1] + reserved_0a : 1, //[3] + uplink_11ax_mcs : 4, //[7:4] + ru_width : 7, //[14:8] + reserved_0b : 1, //[15] + nss : 3, //[18:16] + stream_offset : 3, //[21:19] + sta_dcm : 1, //[22] + sta_coding : 1, //[23] + ru_start_index : 7, //[30:24] + reserved_0c : 1; //[31] +}; + +/** + * cfr_6018_init_pdev() - Inits cfr pdev and registers necessary handlers. + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: Registration status for necessary handlers + */ +QDF_STATUS cfr_6018_init_pdev( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * cfr_6018_deinit_pdev() - De-inits corresponding pdev and handlers. + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: De-registration status for necessary handlers + */ +QDF_STATUS cfr_6018_deinit_pdev( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_cfr_start_lut_age_timer() - Start timer to flush aged-out LUT + * entries + * @pdev: pointer to pdev object + * + * Return: None + */ +void target_if_cfr_start_lut_age_timer(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_cfr_stop_lut_age_timer() - Stop timer to flush aged-out LUT + * entries + * @pdev: pointer to pdev object + * + * Return: None + */ +void target_if_cfr_stop_lut_age_timer(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_cfr_dump_lut_enh() - Dump all valid LUT entries + * @pdev: objmgr PDEV + * + * Return: none + */ +void target_if_cfr_dump_lut_enh(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_cfr_config_rcc() - Start repetitive channel capture + * @pdev: pointer to pdev object + * @rcc_param: rcc configurations + * + * Return: Success/Failure status + */ +QDF_STATUS target_if_cfr_config_rcc(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *rcc_param); + +/** + * target_if_cfr_default_ta_ra_config() - Configure default values to all + * params(BW/NSS/TA/RA) in TA_RA mode + * @rcc_param: rcc configurations + * @allvalid: Indicates whether all TA_RA params are valid or not. + * It could be either 0 or 1. + * 1: should be sent to FW during CFR initialization + * 0: should be set, after a successful commit session. + * @reset_cfg: This bitmap is being used to determine which groups' + * parameters are needed to be reset to its default state. + */ +void target_if_cfr_default_ta_ra_config(struct cfr_rcc_param *rcc_param, + bool allvalid, uint16_t reset_cfg); + +/** + * target_if_cfr_rx_tlv_process() - Process PPDU status TLVs and store info in + * lookup table + * @pdev: PDEV object + * @nbuf: ppdu info + * + * Return: none + */ +void target_if_cfr_rx_tlv_process(struct wlan_objmgr_pdev *pdev, void *nbuf); + +/** + * target_if_cfr_update_global_cfg() - Update global config after a successful + * commit + * @pdev: pointer to pdev object + * + * Return: None + */ +void target_if_cfr_update_global_cfg(struct wlan_objmgr_pdev *pdev); +#else +static QDF_STATUS cfr_6018_init_pdev( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cfr_6018_deinit_pdev( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr_6490.h b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr_6490.h new file mode 100644 index 0000000000000000000000000000000000000000..94b149c84a7a6eee23bd6bf0e4cfac691d62ed6b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/inc/target_if_cfr_6490.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC : target_if_cfr_6490.h + * + * Target interface of CFR for QCA6490 implementation + * + */ + +#ifndef _TARGET_IF_CFR_6490_H +#define _TARGET_IF_CFR_6490_H + +#ifdef QCA_WIFI_QCA6490 +#define CFR_MAC_ID_24G 1 +#define CFR_MAC_ID_5G 0 +#endif /* QCA_WIFI_QCA6490 */ + +/** + * target_if_cfr_subscribe_ppdu_desc() - subscribe ppdu description + * for CFR component + * + * @pdev: pointer to pdev object + * @is_subscribe: subscribe or unsubscribe + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_cfr_subscribe_ppdu_desc(struct wlan_objmgr_pdev *pdev, + bool is_subscribe); + +/** + * cfr_6490_init_pdev() - Init pdev cfr for QCA6490 + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Registers to DBR component and init pdev cfr parameters + * + * Return: QDF status + */ +QDF_STATUS cfr_6490_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * cfr_6490_deinit_pdev() - De-inits pdev cfr for QCA6490 + * @pdev: pointer to pdev object + * + * Unregister to DBR and deinit pdev cfr parameters + * + * Return: QDF status + */ +QDF_STATUS cfr_6490_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +#endif /* _TARGET_IF_CFR_6490_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr.c b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr.c new file mode 100644 index 0000000000000000000000000000000000000000..11e1c59062b43c3ba73900e550cd3423a5ce08b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr.c @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CFR_USE_FIXED_FOLDER +#include "target_if_cfr_6490.h" +#include "wlan_reg_services_api.h" +#else +#include +#endif + +int target_if_cfr_stop_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer) +{ + struct peer_cfr *pe; + struct peer_cfr_params param = {0}; + struct wmi_unified *pdev_wmi_handle = NULL; + struct wlan_objmgr_vdev *vdev = {0}; + struct pdev_cfr *pdev_cfrobj; + int retv = 0; + + pe = wlan_objmgr_peer_get_comp_private_obj(peer, WLAN_UMAC_COMP_CFR); + if (pe == NULL) + return -EINVAL; + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + cfr_err("pdev wmi handle NULL"); + return -EINVAL; + } + vdev = wlan_peer_get_vdev(peer); + + qdf_mem_set(¶m, sizeof(param), 0); + + param.request = PEER_CFR_CAPTURE_DISABLE; + param.macaddr = wlan_peer_get_macaddr(peer); + param.vdev_id = wlan_vdev_get_id(vdev); + + param.periodicity = pe->period; + param.bandwidth = pe->bandwidth; + param.capture_method = pe->capture_method; + + retv = wmi_unified_send_peer_cfr_capture_cmd(pdev_wmi_handle, ¶m); + + pdev_cfrobj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pdev_cfrobj) { + cfr_err("pdev object for CFR is null"); + return -EINVAL; + } + cfr_err("CFR capture stats for this capture:"); + cfr_err("DBR event count = %llu, Tx event count = %llu " + "Release count = %llu", + pdev_cfrobj->dbr_evt_cnt, pdev_cfrobj->tx_evt_cnt, + pdev_cfrobj->release_cnt); + + pdev_cfrobj->dbr_evt_cnt = 0; + pdev_cfrobj->tx_evt_cnt = 0; + pdev_cfrobj->release_cnt = 0; + + return retv; +} + +int target_if_cfr_start_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *cfr_params) +{ + struct peer_cfr_params param = {0}; + struct wmi_unified *pdev_wmi_handle = NULL; + struct wlan_objmgr_vdev *vdev; + int retv = 0; + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + cfr_err("pdev wmi handle NULL"); + return -EINVAL; + } + vdev = wlan_peer_get_vdev(peer); + qdf_mem_set(¶m, sizeof(param), 0); + + param.request = PEER_CFR_CAPTURE_ENABLE; + param.macaddr = wlan_peer_get_macaddr(peer); + param.vdev_id = wlan_vdev_get_id(vdev); + + param.periodicity = cfr_params->period; + param.bandwidth = cfr_params->bandwidth; + param.capture_method = cfr_params->method; + + retv = wmi_unified_send_peer_cfr_capture_cmd(pdev_wmi_handle, ¶m); + return retv; +} + +int target_if_cfr_pdev_set_param(struct wlan_objmgr_pdev *pdev, + uint32_t param_id, uint32_t param_value) +{ + struct pdev_params pparam; + uint32_t pdev_id; + struct wmi_unified *pdev_wmi_handle = NULL; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (pdev_id < 0) + return -EINVAL; + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + cfr_err("pdev wmi handle NULL"); + return -EINVAL; + } + qdf_mem_set(&pparam, sizeof(pparam), 0); + pparam.param_id = param_id; + pparam.param_value = param_value; + + return wmi_unified_pdev_param_send(pdev_wmi_handle, + &pparam, pdev_id); +} + +int target_if_cfr_enable_cfr_timer(struct wlan_objmgr_pdev *pdev, + uint32_t cfr_timer) +{ + struct pdev_cfr *pa; + int retval; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa == NULL) + return QDF_STATUS_E_FAILURE; + + if (!cfr_timer) { + /* disable periodic cfr capture */ + retval = + target_if_cfr_pdev_set_param(pdev, + wmi_pdev_param_per_peer_prd_cfr_enable, + WMI_HOST_PEER_CFR_TIMER_DISABLE); + + if (retval == QDF_STATUS_SUCCESS) + pa->cfr_timer_enable = 0; + } else { + /* enable periodic cfr capture (default base timer is 10ms ) */ + retval = + target_if_cfr_pdev_set_param(pdev, + wmi_pdev_param_per_peer_prd_cfr_enable, + WMI_HOST_PEER_CFR_TIMER_ENABLE); + + if (retval == QDF_STATUS_SUCCESS) + pa->cfr_timer_enable = 1; + } + + return retval; +} + +int target_if_cfr_get_target_type(struct wlan_objmgr_psoc *psoc) +{ + uint32_t target_type = 0; + struct wlan_lmac_if_target_tx_ops *target_type_tx_ops; + + target_type_tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (target_type_tx_ops->tgt_get_tgt_type) + target_type = target_type_tx_ops->tgt_get_tgt_type(psoc); + + return target_type; +} + +#ifdef CFR_USE_FIXED_FOLDER +int target_if_cfr_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint32_t target_type; + QDF_STATUS status; + + target_type = target_if_cfr_get_target_type(psoc); + + if (target_type == TARGET_TYPE_QCA6490) { + status = cfr_6490_init_pdev(psoc, pdev); + } else { + cfr_info("unsupport chip"); + status = QDF_STATUS_SUCCESS; + } + + return qdf_status_to_os_return(status); +} + +int target_if_cfr_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint32_t target_type; + QDF_STATUS status; + + target_type = target_if_cfr_get_target_type(psoc); + + if (target_type == TARGET_TYPE_QCA6490) { + status = cfr_6490_deinit_pdev(psoc, pdev); + } else { + cfr_info("unsupport chip"); + status = QDF_STATUS_SUCCESS; + } + + return qdf_status_to_os_return(status); +} +#else +int target_if_cfr_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint32_t target_type; + struct pdev_cfr *pa; + struct psoc_cfr *cfr_sc; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa == NULL) + return QDF_STATUS_E_FAILURE; + + /* Reset unassociated entries for every init */ + qdf_mem_zero(&pa->unassoc_pool[0], MAX_CFR_ENABLED_CLIENTS * + sizeof(struct unassoc_pool_entry)); + + cfr_sc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CFR); + + if (cfr_sc == NULL) + return QDF_STATUS_E_FAILURE; + + target_type = target_if_cfr_get_target_type(psoc); + + if (target_type == TARGET_TYPE_QCA8074V2) { + pa->is_cfr_capable = cfr_sc->is_cfr_capable; + return cfr_8074v2_init_pdev(psoc, pdev); + } else if ((target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_QCA9888)) { + + pa->is_cfr_capable = cfr_sc->is_cfr_capable; + + return cfr_wifi2_0_init_pdev(psoc, pdev); + } else if (target_type == TARGET_TYPE_QCA6018) { + pa->is_cfr_capable = cfr_sc->is_cfr_capable; + return cfr_6018_init_pdev(psoc, pdev); + } else + return QDF_STATUS_E_NOSUPPORT; +} + +int target_if_cfr_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint32_t target_type; + + target_type = target_if_cfr_get_target_type(psoc); + + if (target_type == TARGET_TYPE_QCA8074V2) { + return cfr_8074v2_deinit_pdev(psoc, pdev); + } else if ((target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_QCA9888)) { + + return cfr_wifi2_0_deinit_pdev(psoc, pdev); + } else if (target_type == TARGET_TYPE_QCA6018) { + return cfr_6018_deinit_pdev(psoc, pdev); + } else + return QDF_STATUS_E_NOSUPPORT; +} +#endif + +#ifdef WLAN_ENH_CFR_ENABLE +#ifdef QCA_WIFI_QCA6490 +static uint8_t target_if_cfr_get_mac_id(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_channel *bss_chan; + struct pdev_cfr *pcfr; + uint8_t mac_id = 0; + + if (!pdev) { + cfr_err("null pdev"); + return mac_id; + } + + mac_id = wlan_objmgr_pdev_get_pdev_id(pdev); + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("null pcfr"); + return mac_id; + } + + if (pcfr->rcc_param.vdev_id == CFR_INVALID_VDEV_ID) + return mac_id; + + vdev = wlan_objmgr_get_vdev_by_id_from_pdev(pdev, + pcfr->rcc_param.vdev_id, + WLAN_CFR_ID); + if (!vdev) { + cfr_err("null vdev"); + return mac_id; + } + + bss_chan = wlan_vdev_mlme_get_bss_chan(vdev); + if (!bss_chan) { + cfr_info("null bss chan"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID); + return mac_id; + } + + cfr_debug("bss freq %d", bss_chan->ch_freq); + if (wlan_reg_is_24ghz_ch_freq(bss_chan->ch_freq)) + mac_id = CFR_MAC_ID_24G; + else + mac_id = CFR_MAC_ID_5G; + + pcfr->rcc_param.srng_id = mac_id; + wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID); + + return mac_id; +} + +static uint8_t target_if_cfr_get_pdev_id(struct wlan_objmgr_pdev *pdev) +{ + return target_if_cfr_get_mac_id(pdev); +} +#else +static uint8_t target_if_cfr_get_pdev_id(struct wlan_objmgr_pdev *pdev) +{ + return wlan_objmgr_pdev_get_pdev_id(pdev); +} +#endif /* QCA_WIFI_QCA6490 */ + +QDF_STATUS target_if_cfr_config_rcc(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *rcc_info) +{ + QDF_STATUS status; + struct wmi_unified *pdev_wmi_handle = NULL; + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + cfr_err("pdev_wmi_handle is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + rcc_info->pdev_id = target_if_cfr_get_pdev_id(pdev); + rcc_info->num_grp_tlvs = + count_set_bits(rcc_info->modified_in_curr_session); + + status = wmi_unified_send_cfr_rcc_cmd(pdev_wmi_handle, rcc_info); + return status; +} + +void target_if_cfr_default_ta_ra_config(struct cfr_rcc_param *rcc_info, + bool allvalid, uint16_t reset_cfg) +{ + struct ta_ra_cfr_cfg *curr_cfg = NULL; + int grp_id; + unsigned long bitmap = reset_cfg; + uint8_t def_mac[QDF_MAC_ADDR_SIZE] = {0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF}; + uint8_t null_mac[QDF_MAC_ADDR_SIZE] = {0, 0, 0, 0, 0, 0}; + + for (grp_id = 0; grp_id < MAX_TA_RA_ENTRIES; grp_id++) { + if (qdf_test_bit(grp_id, &bitmap)) { + curr_cfg = &rcc_info->curr[grp_id]; + qdf_mem_copy(curr_cfg->tx_addr, + def_mac, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(curr_cfg->tx_addr_mask, + null_mac, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(curr_cfg->rx_addr, + def_mac, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(curr_cfg->rx_addr_mask, + null_mac, QDF_MAC_ADDR_SIZE); + curr_cfg->bw = 0xf; + curr_cfg->nss = 0xff; + curr_cfg->mgmt_subtype_filter = 0xffff; + curr_cfg->ctrl_subtype_filter = 0xffff; + curr_cfg->data_subtype_filter = 0xffff; + if (!allvalid) { + curr_cfg->valid_ta = 0; + curr_cfg->valid_ta_mask = 0; + curr_cfg->valid_ra = 0; + curr_cfg->valid_ra_mask = 0; + curr_cfg->valid_bw_mask = 0; + curr_cfg->valid_nss_mask = 0; + curr_cfg->valid_mgmt_subtype = 0; + curr_cfg->valid_ctrl_subtype = 0; + curr_cfg->valid_data_subtype = 0; + } else { + curr_cfg->valid_ta = 1; + curr_cfg->valid_ta_mask = 1; + curr_cfg->valid_ra = 1; + curr_cfg->valid_ra_mask = 1; + curr_cfg->valid_bw_mask = 1; + curr_cfg->valid_nss_mask = 1; + curr_cfg->valid_mgmt_subtype = 1; + curr_cfg->valid_ctrl_subtype = 1; + curr_cfg->valid_data_subtype = 1; + } + } + } +} +#endif + +#ifdef WLAN_ENH_CFR_ENABLE +#ifdef CFR_USE_FIXED_FOLDER +static void target_if_enh_cfr_add_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->cfr_tx_ops.cfr_subscribe_ppdu_desc = + target_if_cfr_subscribe_ppdu_desc; +} +#else +static void target_if_enh_cfr_add_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* CFR_USE_FIXED_FOLDER */ +static void target_if_enh_cfr_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->cfr_tx_ops.cfr_config_rcc = + target_if_cfr_config_rcc; + tx_ops->cfr_tx_ops.cfr_start_lut_timer = + target_if_cfr_start_lut_age_timer; + tx_ops->cfr_tx_ops.cfr_stop_lut_timer = + target_if_cfr_stop_lut_age_timer; + tx_ops->cfr_tx_ops.cfr_default_ta_ra_cfg = + target_if_cfr_default_ta_ra_config; + tx_ops->cfr_tx_ops.cfr_dump_lut_enh = + target_if_cfr_dump_lut_enh; + tx_ops->cfr_tx_ops.cfr_rx_tlv_process = + target_if_cfr_rx_tlv_process; + tx_ops->cfr_tx_ops.cfr_update_global_cfg = + target_if_cfr_update_global_cfg; + target_if_enh_cfr_add_ops(tx_ops); +} +#else +static void target_if_enh_cfr_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +void target_if_cfr_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->cfr_tx_ops.cfr_init_pdev = + target_if_cfr_init_pdev; + tx_ops->cfr_tx_ops.cfr_deinit_pdev = + target_if_cfr_deinit_pdev; + tx_ops->cfr_tx_ops.cfr_enable_cfr_timer = + target_if_cfr_enable_cfr_timer; + tx_ops->cfr_tx_ops.cfr_start_capture = + target_if_cfr_start_capture; + tx_ops->cfr_tx_ops.cfr_stop_capture = + target_if_cfr_stop_capture; + target_if_enh_cfr_tx_ops(tx_ops); +} + +void target_if_cfr_set_cfr_support(struct wlan_objmgr_psoc *psoc, + uint8_t value) +{ + if (psoc->soc_cb.rx_ops.cfr_rx_ops.cfr_support_set) + psoc->soc_cb.rx_ops.cfr_rx_ops.cfr_support_set(psoc, value); +} + +void target_if_cfr_info_send(struct wlan_objmgr_pdev *pdev, void *head, + size_t hlen, void *data, size_t dlen, void *tail, + size_t tlen) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + + if (psoc->soc_cb.rx_ops.cfr_rx_ops.cfr_info_send) + psoc->soc_cb.rx_ops.cfr_rx_ops.cfr_info_send(pdev, head, hlen, + data, dlen, tail, + tlen); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr_6018.c b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr_6018.c new file mode 100644 index 0000000000000000000000000000000000000000..de491aa41e2ca1f8f3a97e5e999ec51eccd88787 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr_6018.c @@ -0,0 +1,1638 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DIRECT_BUF_RX_ENABLE +#include +#endif +#include +#include "cdp_txrx_ctrl.h" + +#define NUM_CHAINS_FW_TO_HOST(n) ((1 << ((n) + 1)) - 1) + +static u_int32_t end_magic = 0xBEAFDEAD; +/** + * get_lut_entry() - Retrieve LUT entry using cookie number + * @pcfr: PDEV CFR object + * @offset: cookie number + * + * Return: look up table entry + */ +static struct look_up_table *get_lut_entry(struct pdev_cfr *pcfr, + int offset) +{ + if (offset >= pcfr->lut_num) { + cfr_err("Invalid offset %d, lut_num %d", + offset, pcfr->lut_num); + return NULL; + } + + return pcfr->lut[offset]; +} + +/** + * release_lut_entry_enh() - Clear all params in an LUT entry + * @pdev: objmgr PDEV + * @lut: pointer to LUT + * + * Return: status + */ +static int release_lut_entry_enh(struct wlan_objmgr_pdev *pdev, + struct look_up_table *lut) +{ + lut->dbr_recv = false; + lut->tx_recv = false; + lut->data = NULL; + lut->data_len = 0; + lut->dbr_ppdu_id = 0; + lut->tx_ppdu_id = 0; + lut->dbr_tstamp = 0; + lut->txrx_tstamp = 0; + lut->tx_address1 = 0; + lut->tx_address2 = 0; + lut->dbr_address = 0; + qdf_mem_zero(&lut->header, sizeof(struct csi_cfr_header)); + + return 0; +} + +/** + * target_if_cfr_dump_lut_enh() - dump all valid lut entries + * @pdev: objmgr pdev + * + * return: none + */ +void target_if_cfr_dump_lut_enh(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pcfr; + struct look_up_table *lut = NULL; + int i = 0; + uint64_t diff; + QDF_STATUS retval = 0; + + retval = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_CFR_ID); + if (retval != QDF_STATUS_SUCCESS) { + cfr_err("failed to get pdev reference"); + return; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pdev object for CFR is null"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + return; + } + + for (i = 0; i < pcfr->lut_num; i++) { + lut = get_lut_entry(pcfr, i); + if (!lut) + continue; + if (lut->dbr_recv ^ lut->tx_recv) { + diff = (lut->dbr_tstamp > lut->txrx_tstamp) ? + (lut->dbr_tstamp - lut->txrx_tstamp) : + (lut->txrx_tstamp - lut->dbr_tstamp); + cfr_err("idx:%d dbrevnt: %d txrxevent: %d " + "dbrppdu:0x%x txrxppdu:0x%x dbr_tstamp: %llu " + "txrx_tstamp: %llu diff: %llu\n", + i, lut->dbr_recv, lut->tx_recv, + lut->dbr_ppdu_id, lut->tx_ppdu_id, + lut->dbr_tstamp, lut->txrx_tstamp, diff); + } + + } + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); +} + +/** + * cfr_free_pending_dbr_events() - Flush all pending DBR events. This is useful + * in cases where for RXTLV drops in host monitor status ring is huge. + * @pdev: objmgr pdev + * + * return: none + */ +static void cfr_free_pending_dbr_events(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pcfr; + struct look_up_table *lut = NULL; + int i = 0; + QDF_STATUS retval = 0; + + retval = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_CFR_ID); + if (retval != QDF_STATUS_SUCCESS) { + cfr_err("failed to get pdev reference"); + return; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pdev object for CFR is null"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + return; + } + + for (i = 0; i < pcfr->lut_num; i++) { + lut = get_lut_entry(pcfr, i); + if (!lut) + continue; + + if (lut->dbr_recv && !lut->tx_recv && + (lut->dbr_tstamp < pcfr->last_success_tstamp)) { + target_if_dbr_buf_release(pdev, DBR_MODULE_CFR, + lut->dbr_address, + i, 0); + pcfr->flush_dbr_cnt++; + release_lut_entry_enh(pdev, lut); + } + } + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); +} + +/** + * dump_freeze_tlv() - Dump freeze TLV sent in enhanced DMA header + * @freeze_tlv: Freeze TLV sent from MAC to PHY + * @cookie: Index into lookup table + * + * Return: none + */ +static void dump_freeze_tlv(void *freeze_tlv, uint32_t cookie) +{ + struct macrx_freeze_capture_channel *freeze = + (struct macrx_freeze_capture_channel *)freeze_tlv; + + cfr_debug("<%u>\n" + "freeze: %d capture_reason: %d packet_type: 0x%x\n" + "packet_subtype: 0x%x sw_peer_id_valid: %d sw_peer_id: %d\n" + "phy_ppdu_id: 0x%04x packet_ta_upper_16: 0x%04x\n" + "packet_ta_mid_16: 0x%04x packet_ta_lower_16: 0x%04x\n" + "packet_ra_upper_16: 0x%04x packet_ra_mid_16: 0x%04x\n" + "packet_ra_lower_16: 0x%04x tsf_timestamp_63_48: 0x%04x\n" + "tsf_timestamp_47_32: 0x%04x tsf_timestamp_31_16: 0x%04x\n" + "tsf_timestamp_15_0: 0x%04x user_index: %d directed: %d\n", + cookie, + freeze->freeze, + freeze->capture_reason, + freeze->packet_type, + freeze->packet_sub_type, + freeze->sw_peer_id_valid, + freeze->sw_peer_id, + freeze->phy_ppdu_id, + freeze->packet_ta_upper_16, + freeze->packet_ta_mid_16, + freeze->packet_ta_lower_16, + freeze->packet_ra_upper_16, + freeze->packet_ra_mid_16, + freeze->packet_ra_lower_16, + freeze->tsf_timestamp_63_48, + freeze->tsf_timestamp_47_32, + freeze->tsf_timestamp_31_16, + freeze->tsf_timestamp_15_0, + freeze->user_index, + freeze->directed); +} + +/** + * dump_mu_rx_info() - Dump MU info in enhanced DMA header + * @mu_rx_user_info: MU info sent by ucode + * @mu_rx_num_users: Number of MU users in UL-MU-PPDU + * @cookie: Index into lookup table + * + * Return: none + */ +static void dump_mu_rx_info(void *mu_rx_user_info, + uint8_t mu_rx_num_users, + uint32_t cookie) +{ + uint8_t i; + struct uplink_user_setup_info *ul_mu_user_info = + (struct uplink_user_setup_info *) mu_rx_user_info; + + for (i = 0 ; i < mu_rx_num_users; i++) { + cfr_debug("<%u>\n" + "\n" + "bw_info_valid = %d\n" + "uplink_receive_type = %d\n" + "uplink_11ax_mcs = %d\n" + "ru_width = %d\n" + "nss = %d\n" + "stream_offset = %d\n" + "sta_dcm = %d\n" + "sta_coding = %d\n" + "ru_start_index = %d\n", + cookie, + i, + ul_mu_user_info->bw_info_valid, + ul_mu_user_info->uplink_receive_type, + ul_mu_user_info->uplink_11ax_mcs, + ul_mu_user_info->ru_width, + ul_mu_user_info->nss, + ul_mu_user_info->stream_offset, + ul_mu_user_info->sta_dcm, + ul_mu_user_info->sta_coding, + ul_mu_user_info->ru_start_index); + ul_mu_user_info += sizeof(struct uplink_user_setup_info); + } +} + +static void dump_metadata(struct csi_cfr_header *header, uint32_t cookie) +{ + uint8_t user_id, chain_id; + struct cfr_metadata_version_3 *meta = &header->u.meta_v3; + uint8_t *usermac = NULL; + + cfr_debug("<%u>\n" + "start_magic_num = 0x%x\n" + "vendorid = 0x%x\n" + "cfr_metadata_version = %d\n" + "cfr_data_version = %d\n" + "chip_type = %d\n" + "platform_type = %d\n" + "status = %d\n" + "capture_bw = %d\n" + "channel_bw = %d\n" + "phy_mode = %d\n" + "prim20_chan = %d\n" + "center_freq1 = %d\n" + "center_freq2 = %d\n" + "ack_capture_mode = %d\n" + "cfr_capture_type = %d\n" + "sts_count = %d\n" + "num_rx_chain = %d\n" + "timestamp = %llu\n" + "length = %d\n" + "is_mu_ppdu = %d\n" + "num_users = %d\n", + cookie, + header->start_magic_num, + header->vendorid, + header->cfr_metadata_version, + header->cfr_data_version, + header->chip_type, + header->pltform_type, + meta->status, + meta->capture_bw, + meta->channel_bw, + meta->phy_mode, + meta->prim20_chan, + meta->center_freq1, + meta->center_freq2, + meta->capture_mode, + meta->capture_type, + meta->sts_count, + meta->num_rx_chain, + meta->timestamp, + meta->length, + meta->is_mu_ppdu, + meta->num_mu_users); + + if (meta->is_mu_ppdu) { + for (user_id = 0; user_id < meta->num_mu_users; user_id++) { + usermac = meta->peer_addr.mu_peer_addr[user_id]; + cfr_debug("peermac[%d]: " QDF_MAC_ADDR_FMT, + user_id, QDF_MAC_ADDR_REF(usermac)); + } + } else { + cfr_debug("peermac: " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(meta->peer_addr.su_peer_addr)); + } + + for (chain_id = 0; chain_id < HOST_MAX_CHAINS; chain_id++) { + cfr_debug("chain_rssi[%d] = %d\n", + chain_id, + meta->chain_rssi[chain_id]); + } + + for (chain_id = 0; chain_id < HOST_MAX_CHAINS; chain_id++) { + cfr_debug("chain_phase[%d] = %d\n", + chain_id, + meta->chain_phase[chain_id]); + } +} +/** + * dump_enh_dma_hdr() - Dump enhanced DMA header populated by ucode + * @dma_hdr: pointer to enhanced DMA header + * @freeze_tlv: pointer to MACRX_FREEZE_CAPTURE_CHANNEL TLV + * @mu_rx_user_info: UPLINK_USER_SETUP_INFO TLV + * @header: pointer to metadata passed to userspace + * @error: Indicates whether it is an error + * @cookie: Index into lookup table + * + * Return: none + */ +static void dump_enh_dma_hdr(struct whal_cfir_enhanced_hdr *dma_hdr, + void *freeze_tlv, void *mu_rx_user_info, + struct csi_cfr_header *header, int error, + uint32_t cookie) +{ + if (!error) { + cfr_debug("<%u>\n" + "Tag: 0x%02x Length: %d udone: %d\n" + "ctype: %d preamble: %d Nss: %d\n" + "num_chains: %d bw: %d peervalid: %d\n" + "peer_id: %d ppdu_id: 0x%04x total_bytes: %d\n" + "header_version: %d target_id: %d cfr_fmt: %d\n" + "mu_rx_data_incl: %d freeze_data_incl: %d\n" + "mu_rx_num_users: %d decimation_factor: %d\n", + cookie, + dma_hdr->tag, + dma_hdr->length, + dma_hdr->upload_done, + dma_hdr->capture_type, + dma_hdr->preamble_type, + dma_hdr->nss, + dma_hdr->num_chains, + dma_hdr->upload_pkt_bw, + dma_hdr->sw_peer_id_valid, + dma_hdr->sw_peer_id, + dma_hdr->phy_ppdu_id, + dma_hdr->total_bytes, + dma_hdr->header_version, + dma_hdr->target_id, + dma_hdr->cfr_fmt, + dma_hdr->mu_rx_data_incl, + dma_hdr->freeze_data_incl, + dma_hdr->mu_rx_num_users, + dma_hdr->decimation_factor); + + if (dma_hdr->freeze_data_incl) + dump_freeze_tlv(freeze_tlv, cookie); + + if (dma_hdr->mu_rx_data_incl) + dump_mu_rx_info(mu_rx_user_info, + dma_hdr->mu_rx_num_users, + cookie); + } else { + cfr_err("<%u>\n" + "Tag: 0x%02x Length: %d udone: %d\n" + "ctype: %d preamble: %d Nss: %d\n" + "num_chains: %d bw: %d peervalid: %d\n" + "peer_id: %d ppdu_id: 0x%04x total_bytes: %d\n" + "header_version: %d target_id: %d cfr_fmt: %d\n" + "mu_rx_data_incl: %d freeze_data_incl: %d\n" + "mu_rx_num_users: %d decimation_factor: %d\n", + cookie, + dma_hdr->tag, + dma_hdr->length, + dma_hdr->upload_done, + dma_hdr->capture_type, + dma_hdr->preamble_type, + dma_hdr->nss, + dma_hdr->num_chains, + dma_hdr->upload_pkt_bw, + dma_hdr->sw_peer_id_valid, + dma_hdr->sw_peer_id, + dma_hdr->phy_ppdu_id, + dma_hdr->total_bytes, + dma_hdr->header_version, + dma_hdr->target_id, + dma_hdr->cfr_fmt, + dma_hdr->mu_rx_data_incl, + dma_hdr->freeze_data_incl, + dma_hdr->mu_rx_num_users, + dma_hdr->decimation_factor); + } +} + + +/** + * extract_peer_mac_from_freeze_tlv() - extract macaddr from freeze tlv + * @freeze_tlv: Freeze TLV sent from MAC to PHY + * @peermac: macaddr of the peer + * + * Return: none + */ +static void +extract_peer_mac_from_freeze_tlv(void *freeze_tlv, uint8_t *peermac) +{ + struct macrx_freeze_capture_channel *freeze = + (struct macrx_freeze_capture_channel *)freeze_tlv; + + peermac[0] = freeze->packet_ta_lower_16 & 0x00FF; + peermac[1] = (freeze->packet_ta_lower_16 & 0xFF00) >> 8; + peermac[2] = freeze->packet_ta_mid_16 & 0x00FF; + peermac[3] = (freeze->packet_ta_mid_16 & 0xFF00) >> 8; + peermac[4] = freeze->packet_ta_upper_16 & 0x00FF; + peermac[5] = (freeze->packet_ta_upper_16 & 0xFF00) >> 8; +} + +/** + * check_dma_length() - Sanity check DMA header and payload length + * @dma_hdr: pointer to enhanced DMA header + * + * Return: QDF_STATUS + */ +static QDF_STATUS check_dma_length(struct look_up_table *lut) +{ + if (lut->header_length <= CYP_MAX_HEADER_LENGTH_WORDS && + lut->payload_length <= CYP_MAX_DATA_LENGTH_BYTES) { + return QDF_STATUS_SUCCESS; + } else { + return QDF_STATUS_E_FAILURE; + } +} + +/** + * correlate_and_relay_enh() - Correlate TXRX and DBR events and stream CFR + * data to userspace + * @pdev: objmgr PDEV + * @cookie: Index into lookup table + * @lut: pointer to lookup table + * @module_id: ID of the event received + * 0 - DBR event + * 1 - TXRX event + * + * Return: + * - STATUS_ERROR + * - STATUS_HOLD + * - STATUS_STREAM_AND_RELEASE + */ +static int correlate_and_relay_enh(struct wlan_objmgr_pdev *pdev, + uint32_t cookie, + struct look_up_table *lut, + uint8_t module_id) +{ + struct pdev_cfr *pcfr; + uint64_t diff; + int status = STATUS_ERROR; + + if (module_id > 1) { + cfr_err("Received request with invalid mod id. Investigate!!"); + QDF_ASSERT(0); + status = STATUS_ERROR; + goto done; + } + + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + + if (module_id == CORRELATE_TX_EV_MODULE_ID) { + if (lut->tx_recv) + pcfr->cfr_dma_aborts++; + lut->tx_recv = true; + } else if (module_id == CORRELATE_DBR_MODULE_ID) { + pcfr->dbr_evt_cnt++; + lut->dbr_recv = true; + } + + if ((lut->dbr_recv == true) && (lut->tx_recv == true)) { + if (lut->dbr_ppdu_id == lut->tx_ppdu_id) { + + pcfr->last_success_tstamp = lut->dbr_tstamp; + if (lut->dbr_tstamp > lut->txrx_tstamp) { + diff = lut->dbr_tstamp - lut->txrx_tstamp; + cfr_debug("<%u>: " + "TXRX evt -> DBR evt" + "(delay = %llu ms)\n", cookie, diff); + } else if (lut->txrx_tstamp > lut->dbr_tstamp) { + diff = lut->txrx_tstamp - lut->dbr_tstamp; + cfr_debug("<%u>: " + "DBR evt -> TXRX evt" + "(delay = %llu ms)\n", cookie, diff); + } + + /* + * Flush pending dbr events, if newer PPDU TLV is + * received + */ + cfr_free_pending_dbr_events(pdev); + + if (check_dma_length(lut) == QDF_STATUS_SUCCESS) { + pcfr->release_cnt++; + cfr_debug("<%u>:Stream and release " + "CFR data for " + "ppdu_id:0x%04x\n", cookie, + lut->tx_ppdu_id); + status = STATUS_STREAM_AND_RELEASE; + goto done; + } else { + pcfr->invalid_dma_length_cnt++; + cfr_err("<%u>:CFR buffers " + "received with invalid length " + "header_length_words = %d " + "cfr_payload_length_bytes = %d " + "ppdu_id:0x%04x\n", + cookie, + lut->header_length, + lut->payload_length, + lut->tx_ppdu_id); + /* + * Assert here as length exceeding the allowed + * limit would anyway manifest as random crash + */ + QDF_ASSERT(0); + status = STATUS_ERROR; + goto done; + } + } else { + /* + * When there is a ppdu id mismatch, discard the TXRX + * event since multiple PPDUs are likely to have same + * dma addr, due to ucode aborts + */ + cfr_debug("Received new dbr event for same " + "cookie %u", + cookie); + lut->tx_recv = false; + lut->tx_ppdu_id = 0; + pcfr->clear_txrx_event++; + pcfr->cfr_dma_aborts++; + status = STATUS_HOLD; + } + } else { + status = STATUS_HOLD; + } +done: + return status; +} + +/** + * target_if_cfr_rx_tlv_process() - Process PPDU status TLVs and store info in + * lookup table + * @pdev_obj: PDEV object + * @nbuf: ppdu info + * + * Return: none + */ +void target_if_cfr_rx_tlv_process(struct wlan_objmgr_pdev *pdev, void *nbuf) +{ + struct cdp_rx_indication_ppdu *cdp_rx_ppdu; + struct cdp_rx_stats_ppdu_user *rx_stats_peruser; + struct cdp_rx_ppdu_cfr_info *cfr_info; + qdf_dma_addr_t buf_addr = 0, buf_addr_extn = 0; + struct pdev_cfr *pcfr; + struct look_up_table *lut = NULL; + struct csi_cfr_header *header = NULL; + uint32_t cookie; + struct wlan_objmgr_psoc *psoc; + struct wlan_channel *bss_chan; + enum wlan_phymode ch_phymode; + uint16_t ch_freq; + uint32_t ch_cfreq1; + uint32_t ch_cfreq2; + struct wlan_objmgr_vdev *vdev = NULL; + int i, status = 0; + QDF_STATUS retval = 0; + struct wlan_lmac_if_cfr_rx_ops *cfr_rx_ops = NULL; + struct cfr_metadata_version_3 *meta = NULL; + uint8_t srng_id = 0; + + if (qdf_unlikely(!pdev)) { + cfr_err("pdev is null\n"); + qdf_nbuf_free(nbuf); + return; + } + + retval = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_CFR_ID); + if (qdf_unlikely(retval != QDF_STATUS_SUCCESS)) { + cfr_err("failed to get pdev reference"); + qdf_nbuf_free(nbuf); + return; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (qdf_unlikely(!pcfr)) { + cfr_err("pdev object for CFR is NULL"); + goto done; + } + + cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(nbuf); + cfr_info = &cdp_rx_ppdu->cfr_info; + + if (!cfr_info->bb_captured_channel) + goto done; + + psoc = wlan_pdev_get_psoc(pdev); + if (qdf_unlikely(!psoc)) { + cfr_err("psoc is null\n"); + goto done; + } + + cfr_rx_ops = &psoc->soc_cb.rx_ops.cfr_rx_ops; + buf_addr_extn = cfr_info->rtt_che_buffer_pointer_high8 & 0xF; + buf_addr = (cfr_info->rtt_che_buffer_pointer_low32 | + ((uint64_t)buf_addr_extn << 32)); + + srng_id = pcfr->rcc_param.srng_id; + if (target_if_dbr_cookie_lookup(pdev, DBR_MODULE_CFR, buf_addr, + &cookie, srng_id)) { + cfr_debug("Cookie lookup failure for addr: 0x%pK", + (void *)((uintptr_t)buf_addr)); + goto done; + } + + cfr_debug("<%u>:buffer address: 0x%pK \n" + " ppdu_id: 0x%04x\n" + " BB_CAPTURED_CHANNEL = %d\n" + " RX_LOCATION_INFO_VALID = %d\n" + " RTT_CHE_BUFFER_POINTER_LOW32 = %x\n" + " RTT_CHE_BUFFER_POINTER_HIGH8 = %x\n" + " CHAN_CAPTURE_STATUS = %d\n", + cookie, + (void *)((uintptr_t)buf_addr), + cdp_rx_ppdu->ppdu_id, + cfr_info->bb_captured_channel, + cfr_info->rx_location_info_valid, + cfr_info->rtt_che_buffer_pointer_low32, + cfr_info->rtt_che_buffer_pointer_high8, + cfr_info->chan_capture_status); + + lut = get_lut_entry(pcfr, cookie); + if (qdf_unlikely(!lut)) { + cfr_err("lut is NULL"); + goto done; + } + + if (pcfr->rcc_param.vdev_id == CFR_INVALID_VDEV_ID) + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_CFR_ID); + else + vdev = wlan_objmgr_get_vdev_by_id_from_pdev( + pdev, pcfr->rcc_param.vdev_id, WLAN_CFR_ID); + if (qdf_unlikely(!vdev)) { + cfr_debug("vdev is null\n"); + goto done; + } + + bss_chan = wlan_vdev_mlme_get_bss_chan(vdev); + ch_freq = bss_chan->ch_freq; + ch_cfreq1 = bss_chan->ch_cfreq1; + ch_cfreq2 = bss_chan->ch_cfreq2; + ch_phymode = bss_chan->ch_phymode; + wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID); + + pcfr->rx_tlv_evt_cnt++; + lut->tx_ppdu_id = cdp_rx_ppdu->ppdu_id; + lut->tx_address1 = cfr_info->rtt_che_buffer_pointer_low32; + lut->tx_address2 = cfr_info->rtt_che_buffer_pointer_high8; + lut->txrx_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); + header = &lut->header; + meta = &header->u.meta_v3; + + header->start_magic_num = 0xDEADBEAF; + header->vendorid = 0x8cfdf0; + header->cfr_metadata_version = CFR_META_VERSION_3; + header->cfr_data_version = CFR_DATA_VERSION_1; + header->chip_type = pcfr->chip_type; + header->pltform_type = CFR_PLATFORM_TYPE_ARM; + header->Reserved = 0; + + meta->status = 1; + meta->phy_mode = ch_phymode; + meta->prim20_chan = ch_freq; + meta->center_freq1 = ch_cfreq1; + meta->center_freq2 = ch_cfreq2; + meta->capture_mode = 0; + + meta->timestamp = cdp_rx_ppdu->timestamp; + meta->is_mu_ppdu = (cdp_rx_ppdu->u.ppdu_type == CDP_RX_TYPE_SU) ? 0 : 1; + meta->num_mu_users = (meta->is_mu_ppdu) ? (cdp_rx_ppdu->num_users) : 0; + + if (meta->num_mu_users > CYP_CFR_MU_USERS) + meta->num_mu_users = CYP_CFR_MU_USERS; + + for (i = 0; i < MAX_CHAIN; i++) + meta->chain_rssi[i] = cdp_rx_ppdu->per_chain_rssi[i]; + + if (cdp_rx_ppdu->u.ppdu_type == CDP_RX_TYPE_SU) { + qdf_mem_copy(meta->peer_addr.su_peer_addr, + cdp_rx_ppdu->mac_addr, + QDF_MAC_ADDR_SIZE); + } else { + for (i = 0 ; i < meta->num_mu_users; i++) { + rx_stats_peruser = &cdp_rx_ppdu->user[i]; + qdf_mem_copy(meta->peer_addr.mu_peer_addr[i], + rx_stats_peruser->mac_addr, + QDF_MAC_ADDR_SIZE); + } + } + status = correlate_and_relay_enh(pdev, cookie, lut, + CORRELATE_TX_EV_MODULE_ID); + if (status == STATUS_STREAM_AND_RELEASE) { + if (cfr_rx_ops->cfr_info_send) + status = cfr_rx_ops->cfr_info_send(pdev, + &lut->header, + sizeof(struct + csi_cfr_header), + lut->data, + lut->data_len, + &end_magic, 4); + dump_metadata(header, cookie); + release_lut_entry_enh(pdev, lut); + target_if_dbr_buf_release(pdev, DBR_MODULE_CFR, buf_addr, + cookie, srng_id); + cfr_debug("Data sent to upper layers, release look up table"); + } else if (status == STATUS_HOLD) { + cfr_debug("HOLD for buffer address: 0x%pK cookie: %u", + (void *)((uintptr_t)buf_addr), cookie); + } else { + cfr_err("Correlation returned invalid status!!"); + } +done: + qdf_nbuf_free(nbuf); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); +} + +/** + * freeze_reason_to_capture_type() - Convert capture type enum in freeze tlv + * to the cfr type enum shared with userspace + * @freeze_tlv: pointer to MACRX_FREEZE_CAPTURE_CHANNEL TLV + * + * Return: cfr type enum + */ +static uint8_t freeze_reason_to_capture_type(void *freeze_tlv) +{ + struct macrx_freeze_capture_channel *freeze = + (struct macrx_freeze_capture_channel *)freeze_tlv; + + switch (freeze->capture_reason) { + case FREEZE_REASON_TM: + return CFR_TYPE_METHOD_TM; + case FREEZE_REASON_FTM: + return CFR_TYPE_METHOD_FTM; + case FREEZE_REASON_TA_RA_TYPE_FILTER: + return CFR_TYPE_METHOD_TA_RA_TYPE_FILTER; + case FREEZE_REASON_NDPA_NDP: + return CFR_TYPE_METHOD_NDPA_NDP; + case FREEZE_REASON_ALL_PACKET: + return CFR_TYPE_METHOD_ALL_PACKET; + case FREEZE_REASON_ACK_RESP_TO_TM_FTM: + return CFR_TYPE_METHOD_ACK_RESP_TO_TM_FTM; + default: + return CFR_TYPE_METHOD_AUTO; + } + return CFR_TYPE_METHOD_AUTO; +} + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * enh_cfr_dbr_event_handler() - Process DBR event for CFR data DMA completion + * @pdev: PDEV object + * @payload: pointer to CFR data + * + * Return: status + */ +static bool enh_cfr_dbr_event_handler(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *payload) +{ + uint8_t *data = NULL; + uint32_t cookie = 0; + struct whal_cfir_enhanced_hdr dma_hdr = {0}; + int length, status = 0; + struct wlan_objmgr_psoc *psoc; + struct pdev_cfr *pcfr; + struct look_up_table *lut = NULL; + struct csi_cfr_header *header = NULL; + void *mu_rx_user_info = NULL, *freeze_tlv = NULL; + uint8_t capture_type = CFR_TYPE_METHOD_AUTO; + uint8_t *peer_macaddr = NULL; + struct wlan_lmac_if_cfr_rx_ops *cfr_rx_ops = NULL; + struct cfr_metadata_version_3 *meta = NULL; + + if ((!pdev) || (!payload)) { + cfr_err("pdev or payload is null"); + return true; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + cfr_err("psoc is null"); + return true; + } + + cfr_rx_ops = &psoc->soc_cb.rx_ops.cfr_rx_ops; + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pdev object for CFR is null"); + return true; + } + + data = payload->vaddr; + cookie = payload->cookie; + + cfr_debug("<%u>:bufferaddr: 0x%pK cookie: %u\n", cookie, + (void *)((uintptr_t)payload->paddr), cookie); + + qdf_mem_copy(&dma_hdr, &data[0], + sizeof(struct whal_cfir_enhanced_hdr)); + + if (dma_hdr.freeze_data_incl) { + freeze_tlv = data + sizeof(struct whal_cfir_enhanced_hdr); + capture_type = freeze_reason_to_capture_type(freeze_tlv); + } + + if (dma_hdr.mu_rx_data_incl) { + mu_rx_user_info = data + + sizeof(struct whal_cfir_enhanced_hdr) + + (dma_hdr.freeze_data_incl ? + sizeof(struct macrx_freeze_capture_channel) : 0); + } + + + length = dma_hdr.length * 4; + length += dma_hdr.total_bytes; /* size of cfr data */ + + lut = get_lut_entry(pcfr, cookie); + if (!lut) { + cfr_err("lut is NULL"); + return true; + } + + lut->data = data; + lut->data_len = length; + lut->dbr_ppdu_id = dma_hdr.phy_ppdu_id; + lut->dbr_address = payload->paddr; + lut->dbr_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); + lut->header_length = dma_hdr.length; + lut->payload_length = dma_hdr.total_bytes; + qdf_mem_copy(&lut->dma_hdr, &dma_hdr, + sizeof(struct whal_cfir_dma_hdr)); + + header = &lut->header; + meta = &header->u.meta_v3; + meta->channel_bw = dma_hdr.upload_pkt_bw; + meta->num_rx_chain = NUM_CHAINS_FW_TO_HOST(dma_hdr.num_chains); + meta->length = length; + /* For Tx based captures, capture type is sent from FW */ + if (capture_type != CFR_TYPE_METHOD_ACK_RESP_TO_TM_FTM) { + meta->capture_type = capture_type; + meta->sts_count = (dma_hdr.nss + 1); + if (!dma_hdr.mu_rx_data_incl) { + /* extract peer addr from freeze tlv */ + peer_macaddr = meta->peer_addr.su_peer_addr; + if (dma_hdr.freeze_data_incl) { + extract_peer_mac_from_freeze_tlv(freeze_tlv, + peer_macaddr); + } + } + } + + if (dma_hdr.freeze_data_incl) { + dump_enh_dma_hdr(&dma_hdr, freeze_tlv, mu_rx_user_info, + header, 0, cookie); + } + + status = correlate_and_relay_enh(pdev, cookie, lut, + CORRELATE_DBR_MODULE_ID); + if (status == STATUS_STREAM_AND_RELEASE) { + /* + * Message format + * Meta data Header + actual payload + trailer + */ + if (cfr_rx_ops->cfr_info_send) + status = cfr_rx_ops->cfr_info_send(pdev, + &lut->header, + sizeof(struct + csi_cfr_header), + lut->data, + lut->data_len, + &end_magic, 4); + dump_metadata(header, cookie); + release_lut_entry_enh(pdev, lut); + cfr_debug("Data sent to upper layers, released look up table"); + status = true; + } else if (status == STATUS_HOLD) { + cfr_debug("TxRx event not received yet. " + "Buffer is not released"); + status = false; + } else { + cfr_err("Correlation returned invalid status!!"); + status = true; + } + + return status; +} + +/** + * target_if_register_to_dbr_enh() - Initialize DBR ring and register callback + * for DBR events + * @pdev: PDEV object + * + * Return: status + */ +static QDF_STATUS +target_if_register_to_dbr_enh(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_direct_buf_rx_tx_ops *dbr_tx_ops = NULL; + struct dbr_module_config dbr_config; + + psoc = wlan_pdev_get_psoc(pdev); + dbr_tx_ops = &psoc->soc_cb.tx_ops.dbr_tx_ops; + dbr_config.num_resp_per_event = DBR_NUM_RESP_PER_EVENT_CFR; + dbr_config.event_timeout_in_ms = DBR_EVENT_TIMEOUT_IN_MS_CFR; + if (dbr_tx_ops->direct_buf_rx_module_register) { + return dbr_tx_ops->direct_buf_rx_module_register + (pdev, DBR_MODULE_CFR, &dbr_config, + enh_cfr_dbr_event_handler); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_unregister_to_dbr_enh() - Unregister callback for DBR events + * @pdev: PDEV object + * + * Return: status + */ +static QDF_STATUS +target_if_unregister_to_dbr_enh(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_direct_buf_rx_tx_ops *dbr_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + dbr_tx_ops = &psoc->soc_cb.tx_ops.dbr_tx_ops; + if (dbr_tx_ops->direct_buf_rx_module_unregister) { + return dbr_tx_ops->direct_buf_rx_module_unregister + (pdev, DBR_MODULE_CFR); + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dump_cfr_peer_tx_event_enh() - Dump TX completion event + * @event: ptr to WMI TX completion event for QOS frames sent during + * one-shot capture + * @cookie: Index into lookup table + * + * Return: none + */ +static void dump_cfr_peer_tx_event_enh(wmi_cfr_peer_tx_event_param *event, + uint32_t cookie) +{ + cfr_debug("<%u>CFR capture method: %d vdev_id: %d mac: " + QDF_MAC_ADDR_FMT, cookie, + event->capture_method, event->vdev_id, + QDF_MAC_ADDR_REF(event->peer_mac_addr.bytes)); + + cfr_debug("<%u>Chan: %d bw: %d phymode: %d cfreq1: %d cfrq2: %d " + "nss: %d\n", + cookie, + event->primary_20mhz_chan, event->bandwidth, + event->phy_mode, event->band_center_freq1, + event->band_center_freq2, event->spatial_streams); + + cfr_debug("<%u>Correlation_info1: 0x%08x " + "Correlation_info2: 0x%08x\n", + cookie, + event->correlation_info_1, event->correlation_info_2); + + cfr_debug("<%u>status: 0x%x ts: %d counter: %d rssi0: 0x%08x\n", + cookie, + event->status, event->timestamp_us, event->counter, + event->chain_rssi[0]); +} + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * enh_prepare_cfr_header_txstatus() - Prepare CFR metadata for TX failures + * @tx_evt_param: ptr to WMI TX completion event + * @header: pointer to metadata + * + * Return: none + */ +static void enh_prepare_cfr_header_txstatus(wmi_cfr_peer_tx_event_param + *tx_evt_param, + struct csi_cfr_header *header) +{ + header->start_magic_num = 0xDEADBEAF; + header->vendorid = 0x8cfdf0; + header->cfr_metadata_version = CFR_META_VERSION_3; + header->cfr_data_version = CFR_DATA_VERSION_1; + header->chip_type = CFR_CAPTURE_RADIO_CYP; + header->pltform_type = CFR_PLATFORM_TYPE_ARM; + header->Reserved = 0; + header->u.meta_v3.status = 0; /* failure */ + header->u.meta_v3.length = 0; + + qdf_mem_copy(&header->u.meta_v2.peer_addr[0], + &tx_evt_param->peer_mac_addr.bytes[0], QDF_MAC_ADDR_SIZE); + +} + +/** + * target_if_peer_capture_event() - WMI TX completion event for one-shot + * capture + * @sc: pointer to offload soc object + * @data: WMI TX completion event buffer + * @datalen: WMI Tx completion event buffer length + * + * Return: status + */ +static int +target_if_peer_capture_event(ol_scn_t sc, uint8_t *data, uint32_t datalen) +{ + QDF_STATUS retval = 0; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + uint32_t cookie; + struct pdev_cfr *pcfr; + struct look_up_table *lut = NULL; + struct csi_cfr_header *header = NULL; + struct csi_cfr_header header_error = {0}; + wmi_cfr_peer_tx_event_param tx_evt_param = {0}; + qdf_dma_addr_t buf_addr = 0, buf_addr_temp = 0; + int status; + struct wlan_channel *bss_chan; + struct wlan_lmac_if_cfr_rx_ops *cfr_rx_ops = NULL; + + if (!sc || !data) { + cfr_err("sc or data is null"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(sc); + if (!psoc) { + cfr_err("psoc is null"); + return -EINVAL; + } + + cfr_rx_ops = &psoc->soc_cb.rx_ops.cfr_rx_ops; + + retval = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_CFR_ID); + if (QDF_IS_STATUS_ERROR(retval)) { + cfr_err("unable to get psoc reference"); + return -EINVAL; + } + + wmi_handle = GET_WMI_HDL_FROM_PSOC(psoc); + if (!wmi_handle) { + cfr_err("wmi_handle is null"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID); + return -EINVAL; + } + + + retval = wmi_extract_cfr_peer_tx_event_param(wmi_handle, data, + &tx_evt_param); + + if (retval != QDF_STATUS_SUCCESS) { + cfr_err("Failed to extract cfr tx event param"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID); + return -EINVAL; + } + + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, tx_evt_param.vdev_id, + WLAN_CFR_ID); + if (!vdev) { + cfr_err("vdev is null"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID); + return -EINVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + cfr_err("pdev is null"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID); + return -EINVAL; + } + + retval = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_CFR_ID); + if (retval != QDF_STATUS_SUCCESS) { + cfr_err("failed to get pdev reference"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID); + return -EINVAL; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pdev object for CFR is NULL"); + retval = -EINVAL; + goto end; + } + + if ((tx_evt_param.status & PEER_CFR_CAPTURE_EVT_PS_STATUS_MASK) == 1) { + cfr_err("CFR capture failed as peer is in powersave: " + QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(tx_evt_param.peer_mac_addr.bytes)); + + enh_prepare_cfr_header_txstatus(&tx_evt_param, &header_error); + if (cfr_rx_ops->cfr_info_send) + cfr_rx_ops->cfr_info_send(pdev, + &header_error, + sizeof(struct + csi_cfr_header), + NULL, 0, &end_magic, 4); + + retval = -EINVAL; + goto end; + } + + if ((tx_evt_param.status & PEER_CFR_CAPTURE_EVT_STATUS_MASK) == 0) { + cfr_debug("CFR capture failed for peer: " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(tx_evt_param.peer_mac_addr.bytes)); + retval = -EINVAL; + goto end; + } + + if (tx_evt_param.status & CFR_TX_EVT_STATUS_MASK) { + cfr_debug("TX packet returned status %d for peer: " + QDF_MAC_ADDR_FMT, + tx_evt_param.status & CFR_TX_EVT_STATUS_MASK, + QDF_MAC_ADDR_REF(tx_evt_param.peer_mac_addr.bytes)); + retval = -EINVAL; + goto end; + } + + buf_addr_temp = (tx_evt_param.correlation_info_2 & 0x0f); + buf_addr = (tx_evt_param.correlation_info_1 | + ((uint64_t)buf_addr_temp << 32)); + + if (target_if_dbr_cookie_lookup(pdev, DBR_MODULE_CFR, buf_addr, + &cookie, 0)) { + cfr_debug("Cookie lookup failure for addr: 0x%pK status: 0x%x", + (void *)((uintptr_t)buf_addr), tx_evt_param.status); + retval = -EINVAL; + goto end; + } + + cfr_debug("buffer address: 0x%pK cookie: %u", + (void *)((uintptr_t)buf_addr), cookie); + + dump_cfr_peer_tx_event_enh(&tx_evt_param, cookie); + + lut = get_lut_entry(pcfr, cookie); + if (!lut) { + cfr_err("lut is NULL\n"); + retval = -EINVAL; + goto end; + } + + pcfr->tx_evt_cnt++; + pcfr->total_tx_evt_cnt++; + + lut->tx_ppdu_id = (tx_evt_param.correlation_info_2 >> 16); + lut->tx_address1 = tx_evt_param.correlation_info_1; + lut->tx_address2 = tx_evt_param.correlation_info_2; + lut->txrx_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); + + header = &lut->header; + header->start_magic_num = 0xDEADBEAF; + header->vendorid = 0x8cfdf0; + header->cfr_metadata_version = CFR_META_VERSION_3; + header->cfr_data_version = CFR_DATA_VERSION_1; + header->chip_type = CFR_CAPTURE_RADIO_CYP; + header->pltform_type = CFR_PLATFORM_TYPE_ARM; + header->Reserved = 0; + header->u.meta_v3.status = (tx_evt_param.status & + PEER_CFR_CAPTURE_EVT_STATUS_MASK) ? + 1 : 0; + header->u.meta_v3.capture_bw = tx_evt_param.bandwidth; + + bss_chan = wlan_vdev_mlme_get_bss_chan(vdev); + header->u.meta_v3.phy_mode = bss_chan->ch_phymode; + + header->u.meta_v3.prim20_chan = tx_evt_param.primary_20mhz_chan; + header->u.meta_v3.center_freq1 = tx_evt_param.band_center_freq1; + header->u.meta_v3.center_freq2 = tx_evt_param.band_center_freq2; + + /* Currently CFR data is captured on ACK of a Qos NULL frame. + * For 20 MHz, ACK is Legacy and for 40/80/160, ACK is DUP Legacy. + */ + header->u.meta_v3.capture_mode = tx_evt_param.bandwidth ? + CFR_DUP_LEGACY_ACK : CFR_LEGACY_ACK; + header->u.meta_v3.capture_type = tx_evt_param.capture_method; + header->u.meta_v3.num_rx_chain = wlan_vdev_mlme_get_rxchainmask(vdev); + header->u.meta_v3.sts_count = tx_evt_param.spatial_streams; + header->u.meta_v3.timestamp = tx_evt_param.timestamp_us; + + qdf_mem_copy(&header->u.meta_v3.peer_addr.su_peer_addr[0], + &tx_evt_param.peer_mac_addr.bytes[0], QDF_MAC_ADDR_SIZE); + qdf_mem_copy(&header->u.meta_v3.chain_rssi[0], + &tx_evt_param.chain_rssi[0], + HOST_MAX_CHAINS * sizeof(tx_evt_param.chain_rssi[0])); + qdf_mem_copy(&header->u.meta_v3.chain_phase[0], + &tx_evt_param.chain_phase[0], + HOST_MAX_CHAINS * sizeof(tx_evt_param.chain_phase[0])); + + status = correlate_and_relay_enh(pdev, cookie, lut, + CORRELATE_TX_EV_MODULE_ID); + if (status == STATUS_STREAM_AND_RELEASE) { + if (cfr_rx_ops->cfr_info_send) + status = cfr_rx_ops->cfr_info_send(pdev, + &lut->header, + sizeof( + struct + csi_cfr_header), + lut->data, + lut->data_len, + &end_magic, 4); + dump_metadata(header, cookie); + release_lut_entry_enh(pdev, lut); + target_if_dbr_buf_release(pdev, DBR_MODULE_CFR, buf_addr, + cookie, 0); + cfr_debug("Data sent to upper layers, " + "releasing look up table"); + } else if (status == STATUS_HOLD) { + cfr_debug("HOLD for buffer address: 0x%pK cookie: %u", + (void *)((uintptr_t)buf_addr), cookie); + } else { + cfr_err("Correlation returned invalid status!!"); + retval = -EINVAL; + goto end; + } + +end: + + wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return retval; +} +#else +static int +target_if_peer_capture_event(ol_scn_t sc, uint8_t *data, uint32_t datalen) +{ + return 0; +} +#endif + +/** + * target_if_register_tx_completion_enh_event_handler() - Register callback for + * WMI TX completion event + * @psoc: PSOC object + * + * Return: Success/Failure status + */ +static int +target_if_register_tx_completion_enh_event_handler(struct wlan_objmgr_psoc + *psoc) +{ + /* Register completion handler here */ + wmi_unified_t wmi_hdl; + int ret = 0; + + wmi_hdl = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_hdl) { + cfr_err("Unable to get wmi handle"); + return -EINVAL; + } + + ret = wmi_unified_register_event_handler(wmi_hdl, + wmi_peer_cfr_capture_event_id, + target_if_peer_capture_event, + WMI_RX_UMAC_CTX); + /* + * Event registration is called per pdev + * Ignore erorr if event is alreday registred. + */ + if (ret == QDF_STATUS_E_FAILURE) + ret = QDF_STATUS_SUCCESS; + + return ret; +} + +/** + * target_if_unregister_tx_completion_enh_event_handler() - Unregister callback + * for WMI TX completion event + * @psoc: PSOC object + * + * Return: Success/Failure status + */ +static int +target_if_unregister_tx_completion_enh_event_handler(struct wlan_objmgr_psoc + *psoc) +{ + /* Unregister completion handler here */ + wmi_unified_t wmi_hdl; + int status = 0; + + wmi_hdl = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_hdl) { + cfr_err("Unable to get wmi handle"); + return -EINVAL; + } + + status = wmi_unified_unregister_event(wmi_hdl, + wmi_peer_cfr_capture_event_id); + return status; +} + +/** + * lut_ageout_timer_task() - Timer to flush pending TXRX/DBR events + * + * Return: none + */ +static os_timer_func(lut_ageout_timer_task) +{ + int i = 0; + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct look_up_table *lut = NULL; + uint64_t diff, cur_tstamp; + uint8_t srng_id = 0; + + OS_GET_TIMER_ARG(pcfr, struct pdev_cfr*); + + if (!pcfr) { + cfr_err("pdev object for CFR is null"); + return; + } + + pdev = pcfr->pdev_obj; + if (!pdev) { + cfr_err("pdev is null"); + return; + } + + srng_id = pcfr->rcc_param.srng_id; + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_CFR_ID) + != QDF_STATUS_SUCCESS) { + cfr_err("failed to get pdev reference"); + return; + } + + cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get()); + + for (i = 0; i < pcfr->lut_num; i++) { + lut = get_lut_entry(pcfr, i); + if (!lut) + continue; + + if (lut->dbr_recv && !lut->tx_recv) { + diff = cur_tstamp - lut->dbr_tstamp; + if (diff > LUT_AGE_THRESHOLD) { + cfr_debug("<%d>TXRX event not received for " + "%llu ms, release lut entry : " + "dma_addr = 0x%pK\n", i, diff, + (void *)((uintptr_t)lut->dbr_address)); + target_if_dbr_buf_release(pdev, DBR_MODULE_CFR, + lut->dbr_address, + i, srng_id); + pcfr->flush_timeout_dbr_cnt++; + release_lut_entry_enh(pdev, lut); + } + } + } + + if (pcfr->lut_timer_init) + qdf_timer_mod(&pcfr->lut_age_timer, LUT_AGE_TIMER); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); +} + +/** + * target_if_cfr_start_lut_age_timer() - Start timer to flush aged-out LUT + * entries + * @pdev: pointer to pdev object + * + * Return: None + */ +void target_if_cfr_start_lut_age_timer(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pcfr; + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (pcfr->lut_timer_init) + qdf_timer_mod(&pcfr->lut_age_timer, LUT_AGE_TIMER); +} + +/** + * target_if_cfr_stop_lut_age_timer() - Stop timer to flush aged-out LUT + * entries + * @pdev: pointer to pdev object + * + * Return: None + */ +void target_if_cfr_stop_lut_age_timer(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pcfr; + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (pcfr->lut_timer_init) + qdf_timer_stop(&pcfr->lut_age_timer); +} + +/** + * target_if_cfr_update_global_cfg() - Update global config after a successful + * commit + * @pdev: pointer to pdev object + * + * Return: None + */ +void target_if_cfr_update_global_cfg(struct wlan_objmgr_pdev *pdev) +{ + int grp_id; + struct pdev_cfr *pcfr; + struct ta_ra_cfr_cfg *curr_cfg = NULL; + struct ta_ra_cfr_cfg *glbl_cfg = NULL; + unsigned long *modified_in_this_session; + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + modified_in_this_session = + (unsigned long *)&pcfr->rcc_param.modified_in_curr_session; + + for (grp_id = 0; grp_id < MAX_TA_RA_ENTRIES; grp_id++) { + if (qdf_test_bit(grp_id, modified_in_this_session)) { + /* Populating global config based on user's input */ + glbl_cfg = &pcfr->global[grp_id]; + curr_cfg = &pcfr->rcc_param.curr[grp_id]; + + if (curr_cfg->valid_ta) + qdf_mem_copy(glbl_cfg->tx_addr, + curr_cfg->tx_addr, + QDF_MAC_ADDR_SIZE); + + if (curr_cfg->valid_ra) + qdf_mem_copy(glbl_cfg->rx_addr, + curr_cfg->rx_addr, + QDF_MAC_ADDR_SIZE); + + if (curr_cfg->valid_ta_mask) + qdf_mem_copy(glbl_cfg->tx_addr_mask, + curr_cfg->tx_addr_mask, + QDF_MAC_ADDR_SIZE); + + if (curr_cfg->valid_ra_mask) + qdf_mem_copy(glbl_cfg->rx_addr_mask, + curr_cfg->rx_addr_mask, + QDF_MAC_ADDR_SIZE); + + if (curr_cfg->valid_bw_mask) + glbl_cfg->bw = curr_cfg->bw; + + if (curr_cfg->valid_nss_mask) + glbl_cfg->nss = curr_cfg->nss; + + if (curr_cfg->valid_mgmt_subtype) + glbl_cfg->mgmt_subtype_filter = + curr_cfg->mgmt_subtype_filter; + + if (curr_cfg->valid_ctrl_subtype) + glbl_cfg->ctrl_subtype_filter = + curr_cfg->ctrl_subtype_filter; + + if (curr_cfg->valid_data_subtype) + glbl_cfg->data_subtype_filter = + curr_cfg->data_subtype_filter; + } + } +} + +/** + * cfr_6018_init_pdev() - Inits cfr pdev and registers necessary handlers. + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: Registration status for necessary handlers + */ +QDF_STATUS cfr_6018_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct pdev_cfr *pcfr; + + if (!pdev) { + cfr_err("PDEV is NULL!"); + return QDF_STATUS_E_NULL_VALUE; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pcfr is NULL!"); + return QDF_STATUS_E_NULL_VALUE; + } + +#if DIRECT_BUF_RX_ENABLE + status = target_if_register_to_dbr_enh(pdev); + if (status != QDF_STATUS_SUCCESS) { + cfr_err("Failed to register with dbr"); + return status; + } +#endif + + status = target_if_register_tx_completion_enh_event_handler(psoc); + if (status != QDF_STATUS_SUCCESS) { + cfr_err("Failed to register with tx event handler"); + return status; + } + + pcfr->is_cfr_rcc_capable = 1; + pcfr->rcc_param.pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + pcfr->rcc_param.modified_in_curr_session = MAX_RESET_CFG_ENTRY; + pcfr->rcc_param.num_grp_tlvs = MAX_TA_RA_ENTRIES; + pcfr->rcc_param.vdev_id = CFR_INVALID_VDEV_ID; + pcfr->rcc_param.srng_id = 0; + + target_if_cfr_default_ta_ra_config(&pcfr->rcc_param, + true, MAX_RESET_CFG_ENTRY); + + status = target_if_cfr_config_rcc(pdev, &pcfr->rcc_param); + if (status == QDF_STATUS_SUCCESS) { + /* Update global configuration */ + target_if_cfr_update_global_cfg(pdev); + } else { + cfr_err("Sending WMI to configure default has failed\n"); + return status; + } + + pcfr->rcc_param.modified_in_curr_session = 0; + + pcfr->cfr_max_sta_count = MAX_CFR_ENABLED_CLIENTS; + pcfr->subbuf_size = STREAMFS_MAX_SUBBUF_CYP; + pcfr->num_subbufs = STREAMFS_NUM_SUBBUF_CYP; + pcfr->chip_type = CFR_CAPTURE_RADIO_CYP; + + if (!pcfr->lut_timer_init) { + qdf_timer_init(NULL, + &(pcfr->lut_age_timer), + lut_ageout_timer_task, (void *)pcfr, + QDF_TIMER_TYPE_WAKE_APPS); + pcfr->lut_timer_init = 1; + } + + return status; +} + +/** + * cfr_6018_deinit_pdev() - De-inits corresponding pdev and handlers. + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: De-registration status for necessary handlers + */ +QDF_STATUS cfr_6018_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + int status; + struct pdev_cfr *pcfr; + + pcfr = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pcfr is NULL"); + return -EINVAL; + } + + if (pcfr->lut_timer_init) { + qdf_timer_stop(&pcfr->lut_age_timer); + qdf_timer_free(&(pcfr->lut_age_timer)); + pcfr->lut_timer_init = 0; + } + + pcfr->tx_evt_cnt = 0; + pcfr->dbr_evt_cnt = 0; + pcfr->release_cnt = 0; + pcfr->total_tx_evt_cnt = 0; + pcfr->rx_tlv_evt_cnt = 0; + pcfr->flush_dbr_cnt = 0; + pcfr->flush_timeout_dbr_cnt = 0; + pcfr->invalid_dma_length_cnt = 0; + pcfr->clear_txrx_event = 0; + pcfr->cfr_dma_aborts = 0; + qdf_mem_zero(&pcfr->rcc_param, sizeof(struct cfr_rcc_param)); + qdf_mem_zero(&pcfr->global, (sizeof(struct ta_ra_cfr_cfg) * + MAX_TA_RA_ENTRIES)); + +#ifdef DIRECT_BUF_RX_ENABLE + status = target_if_unregister_to_dbr_enh(pdev); + if (status != QDF_STATUS_SUCCESS) + cfr_err("Failed to register with dbr"); +#endif + + status = target_if_unregister_tx_completion_enh_event_handler(psoc); + if (status != QDF_STATUS_SUCCESS) + cfr_err("Failed to register with dbr"); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr_6490.c b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr_6490.c new file mode 100644 index 0000000000000000000000000000000000000000..0e25aff300ecfcb829e0ed450e6f94779a0d49b4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cfr/src/target_if_cfr_6490.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC : target_if_cfr_6490.c + * + * Target interface of CFR for QCA6490 implementation + * + */ + +#include +#include "target_if_cfr.h" +#include +#include "wlan_cfr_utils_api.h" +#include "target_if_cfr_6490.h" +#include "target_if_cfr_6018.h" +#include "init_deinit_lmac.h" +#include "cfg_ucfg_api.h" +#include "cfr_cfg.h" + +static wdi_event_subscribe g_cfr_subscribe; + +static void target_cfr_callback(void *pdev_obj, enum WDI_EVENT event, + void *data, u_int16_t peer_id, + uint32_t status) +{ + struct wlan_objmgr_pdev *pdev; + qdf_nbuf_t nbuf = (qdf_nbuf_t)data; + qdf_nbuf_t data_clone; + + pdev = (struct wlan_objmgr_pdev *)pdev_obj; + if (qdf_unlikely((!pdev || !data))) { + cfr_err("Invalid pdev %pK or data %pK for event %d", + pdev, data, event); + qdf_nbuf_free(nbuf); + return; + } + + if (event != WDI_EVENT_RX_PPDU_DESC) { + cfr_debug("event is %d", event); + qdf_nbuf_free(nbuf); + return; + } + + data_clone = qdf_nbuf_clone(nbuf); + if (data_clone) + wlan_cfr_rx_tlv_process(pdev, (void *)data_clone); + + qdf_nbuf_free(nbuf); +} + +QDF_STATUS +target_if_cfr_subscribe_ppdu_desc(struct wlan_objmgr_pdev *pdev, + bool is_subscribe) +{ + ol_txrx_soc_handle soc; + struct wlan_objmgr_psoc *psoc; + struct pdev_cfr *pcfr; + + if (!pdev) { + cfr_err("Null pdev"); + return QDF_STATUS_E_INVAL; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("pcfr is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + cfr_err("Null psoc"); + return QDF_STATUS_E_INVAL; + } + + soc = wlan_psoc_get_dp_handle(psoc); + if (!soc) { + cfr_err("Null soc"); + return QDF_STATUS_E_INVAL; + } + + g_cfr_subscribe.callback = target_cfr_callback; + g_cfr_subscribe.context = pdev; + cdp_set_cfr_rcc(soc, 0, is_subscribe); + cdp_enable_mon_reap_timer(soc, 0, is_subscribe); + if (is_subscribe) { + if (cdp_wdi_event_sub(soc, 0, &g_cfr_subscribe, + WDI_EVENT_RX_PPDU_DESC)) { + cfr_err("wdi event sub fail"); + return QDF_STATUS_E_FAILURE; + } + } else { + if (cdp_wdi_event_unsub(soc, 0, &g_cfr_subscribe, + WDI_EVENT_RX_PPDU_DESC)) { + cfr_err("wdi event unsub fail"); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfr_6490_init_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *cfr_pdev; + struct psoc_cfr *cfr_psoc; + struct wmi_unified *wmi_handle = NULL; + bool is_cfr_disabled; + bool cfr_capable; + QDF_STATUS status; + + if (!psoc || !pdev) { + cfr_err("null pdev or psoc"); + return QDF_STATUS_E_FAILURE; + } + + cfr_pdev = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_CFR); + if (!cfr_pdev) { + cfr_err("null pdev cfr"); + return QDF_STATUS_E_FAILURE; + } + + cfr_psoc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_CFR); + + if (!cfr_psoc) { + cfr_err("null psoc cfr"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!wmi_handle) { + cfr_err("null wmi handle"); + return QDF_STATUS_E_FAILURE; + } + + is_cfr_disabled = cfg_get(psoc, CFG_CFR_DISABLE); + if (is_cfr_disabled) { + cfr_pdev->is_cfr_capable = 0; + cfr_psoc->is_cfr_capable = 0; + cfr_info("cfr disabled"); + return QDF_STATUS_SUCCESS; + } + + cfr_capable = wmi_service_enabled(wmi_handle, + wmi_service_cfr_capture_support); + cfr_pdev->is_cfr_capable = cfr_capable; + cfr_psoc->is_cfr_capable = cfr_capable; + if (!cfr_capable) { + cfr_err("FW doesn't support CFR"); + return QDF_STATUS_SUCCESS; + } + + status = cfr_6018_init_pdev(psoc, pdev); + cfr_pdev->chip_type = CFR_CAPTURE_RADIO_HSP; + + return status; +} + +QDF_STATUS cfr_6490_deinit_pdev(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pcfr; + + if (!psoc || !pdev) { + cfr_err("null pdev or psoc"); + return QDF_STATUS_E_FAILURE; + } + + pcfr = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_CFR); + if (!pcfr) { + cfr_err("null pdev cfr"); + return QDF_STATUS_E_FAILURE; + } + + if (!pcfr->is_cfr_capable) { + cfr_info("cfr disabled or FW not support"); + return QDF_STATUS_SUCCESS; + } + + return cfr_6018_deinit_pdev(psoc, pdev); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/coex/inc/target_if_coex.h b/drivers/staging/qca-wifi-host-cmn/target_if/coex/inc/target_if_coex.h new file mode 100644 index 0000000000000000000000000000000000000000..d496efaa78ea7b371c5ad808ffec1f5839253b86 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/coex/inc/target_if_coex.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains coex target if declarations + */ +#ifndef __TARGET_IF_COEX_H__ +#define __TARGET_IF_COEX_H__ + +#include + +/** + * target_if_coex_register_tx_ops() - Register coex target_if tx ops + * @tx_ops: pointer to target if tx ops + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_coex_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/coex/src/target_if_coex.c b/drivers/staging/qca-wifi-host-cmn/target_if/coex/src/target_if_coex.c new file mode 100644 index 0000000000000000000000000000000000000000..6dd3c70b8092fc07d776aa9f57b540a597c56c5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/coex/src/target_if_coex.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains coex target if functions + */ +#include +#include + +static QDF_STATUS +target_if_coex_config_send(struct wlan_objmgr_pdev *pdev, + struct coex_config_params *param) +{ + wmi_unified_t pdev_wmi_handle; + + pdev_wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!pdev_wmi_handle) { + coex_err("Invalid PDEV WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + return wmi_unified_send_coex_config_cmd(pdev_wmi_handle, param); +} + +QDF_STATUS +target_if_coex_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_coex_tx_ops *coex_ops; + + if (!tx_ops) { + coex_err("target if tx ops is NULL!"); + return QDF_STATUS_E_INVAL; + } + + coex_ops = &tx_ops->coex_ops; + coex_ops->coex_config_send = target_if_coex_config_send; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/core/inc/target_if.h b/drivers/staging/qca-wifi-host-cmn/target_if/core/inc/target_if.h new file mode 100644 index 0000000000000000000000000000000000000000..3f229597393c4e015a86a8b94aa40d12fe0155cb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/core/inc/target_if.h @@ -0,0 +1,2314 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This target interface shall be used + * to communicate with target using WMI. + */ +#ifndef _WLAN_TARGET_IF_H_ +#define _WLAN_TARGET_IF_H_ + +#include "qdf_types.h" +#include "qdf_util.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wmi_unified_api.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_param.h" + +#define TGT_WMI_PDEV_ID_SOC 0 /* WMI SOC ID */ + +/* ASCII "TGT\0" */ +#define TGT_MAGIC 0x54575400 + +#define target_if_fatal(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_TARGET_IF, params) +#define target_if_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_TARGET_IF, params) +#define target_if_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_TARGET_IF, params) +#define target_if_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_TARGET_IF, params) +#define target_if_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_TARGET_IF, params) +#define TARGET_IF_ENTER() \ + QDF_TRACE_ENTER(QDF_MODULE_ID_TARGET_IF, "enter") +#define TARGET_IF_EXIT() \ + QDF_TRACE_EXIT(QDF_MODULE_ID_TARGET_IF, "exit") +#define target_if_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_TARGET_IF, params) + + +#define targetif_nofl_fatal(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_TARGET_IF, params) +#define targetif_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_TARGET_IF, params) +#define targetif_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_TARGET_IF, params) +#define targetif_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_TARGET_IF, params) +#define targetif_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_TARGET_IF, params) + +typedef struct wlan_objmgr_psoc *(*get_psoc_handle_callback)( + void *scn_handle); + +typedef struct wlan_objmgr_pdev *(*get_pdev_handle_callback)( + void *scn_handle); + +typedef int (*wmi_legacy_service_ready_callback)(uint32_t event_id, + void *handle, + uint8_t *event_data, + uint32_t length); + +/** + * struct target_if_ctx - target_interface context + * @magic: magic for target if ctx + * @get_psoc_hdl_cb: function pointer to get psoc + * @get_pdev_hdl_cb: function pointer to get pdev + * @lock: spin lock for protecting the ctx + */ +struct target_if_ctx { + uint32_t magic; + get_psoc_handle_callback get_psoc_hdl_cb; + get_pdev_handle_callback get_pdev_hdl_cb; + wmi_legacy_service_ready_callback service_ready_cb; + qdf_spinlock_t lock; +}; + +struct target_psoc_info; +/** + * struct host_fw_ver - holds host fw version + * @host_ver: Host version + * @target_ver: Target version ID + * @target_rev: Target revision ID + * @wlan_ver: FW SW version + * @wlan_ver_1: FW SW version second dword + * @abi_ver: ABI version + */ +struct host_fw_ver { + uint32_t host_ver; + uint32_t target_ver; + uint32_t target_rev; + uint32_t wlan_ver; + uint32_t wlan_ver_1; + uint32_t abi_ver; +}; + +struct common_dbglog_handle; +struct common_accelerator_handle; + +/** + * struct comp_hdls - Non-umac/lower layer components handles, it is a sub + * structure of target psoc information + * @hif_hdl: HIF handle + * @htc_hdl: HTC handle + * @wmi_hdl: WMI handle + * @accelerator_hdl: NSS offload/IPA handle + * @dbglog_hdl: Debug log handle + */ +struct comp_hdls { + struct hif_opaque_softc *hif_hdl; + HTC_HANDLE htc_hdl; + struct wmi_unified *wmi_hdl; + struct common_accelerator_handle *accelerator_hdl; + struct common_dbglog_handle *dbglog_hdl; +}; + +/** + * struct target_supported_modes - List of HW modes supported by target. + * + * @num_modes: Number of modes supported + * @hw_mode_ids: List of HW mode ids + */ +struct target_supported_modes { + uint8_t num_modes; + uint32_t hw_mode_ids[WMI_HOST_HW_MODE_MAX]; +}; + +/** + * struct target_version_info - Target version information + * + * @reg_db_version_major: REG DB version major + * @reg_db_version_minor: REG DB version minor + * @bdf_reg_db_version_major: BDF REG DB version major + * @bdf_reg_db_version_minor: BDF REG DB version minor + */ +struct target_version_info { + uint8_t reg_db_version_major; + uint8_t reg_db_version_minor; + uint8_t bdf_reg_db_version_major; + uint8_t bdf_reg_db_version_minor; +}; + +/** + * struct tgt_info - FW or lower layer related info(required by target_if), + * it is a sub structure of taarget psoc information + * @version: Host FW version struct + * @wlan_res_cfg: target_resource_config info + * @wlan_ext_res_cfg: wmi_host_ext_resource_config info + * @wmi_service_ready: is service ready received + * @wmi_ready: is ready event received + * @total_mac_phy_cnt: num of mac phys + * @num_radios: number of radios + * @wlan_init_status: Target init status + * @target_type: Target type + * @max_descs: Max descriptors + * @preferred_hw_mode: preferred hw mode + * @wmi_timeout: wait timeout for target events + * @event: qdf_event for target events + * @service_bitmap: WMI service bitmap + * @target_cap: target capabilities + * @service_ext2_param: service ready ext2 event params + * @service_ext_param: ext service params + * @mac_phy_cap: phy caps array + * @dbr_ring_cap: dbr_ring capability info + * @reg_cap: regulatory caps array + * @scaling_params: Spectral bin scaling parameters + * @num_mem_chunks: number of mem chunks allocated + * @hw_mode_caps: HW mode caps of preferred mode + * @mem_chunks: allocated memory blocks for FW + */ +struct tgt_info { + struct host_fw_ver version; + target_resource_config wlan_res_cfg; + wmi_host_ext_resource_config wlan_ext_res_cfg; + bool wmi_service_ready; + bool wmi_ready; + uint8_t total_mac_phy_cnt; + uint8_t num_radios; + uint32_t wlan_init_status; + uint32_t target_type; + uint32_t max_descs; + uint32_t preferred_hw_mode; + uint32_t wmi_timeout; + qdf_event_t event; + uint32_t service_bitmap[PSOC_SERVICE_BM_SIZE]; + struct wlan_psoc_target_capability_info target_caps; + struct wlan_psoc_host_service_ext_param service_ext_param; + struct wlan_psoc_host_service_ext2_param service_ext2_param; + struct wlan_psoc_host_mac_phy_caps + mac_phy_cap[PSOC_MAX_MAC_PHY_CAP]; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + struct wlan_psoc_host_spectral_scaling_params *scaling_params; + uint32_t num_mem_chunks; + struct wmi_host_mem_chunk mem_chunks[MAX_MEM_CHUNKS]; + struct wlan_psoc_host_hw_mode_caps hw_mode_cap; + struct target_supported_modes hw_modes; +}; + +/** + * struct target_ops - Holds feature specific function pointers, which would be + * invoked as part of service ready or ext service ready + * @ext_resource_config_enable: Ext resource config + * @peer_config: Peer config enable + * @mesh_support_enable: Mesh support enable + * @smart_antenna_enable: Smart antenna enable + * @atf_config_enable: ATF config enable + * @qwrap_config_enable: QWRAP config enable + * @btcoex_config_enable: BTCOEX config enable + * @lteu_ext_support_enable: LTE-U Ext config enable + * @set_init_cmd_dev_based_params: Sets Init command params + * @alloc_pdevs: Allocates PDEVs + * @update_pdev_tgt_info: Updates PDEV target info + * @mem_mgr_alloc_chunk: Allocates memory through MEM manager + * @mem_mgr_free_chunks: Free memory chunks through MEM manager + * @print_svc_ready_ex_param: Print service ready ext params + * @add_11ax_modes: Adds 11ax modes to reg cap + * @set_default_tgt_config: Sets target config with default values + * @sw_version_check: Checks the SW version + * @smart_log_enable: Enable Smart Logs feature + * @cfr_support_enable: CFR support enable + * @set_pktlog_checksum: Set the pktlog checksum from FW ready event to pl_dev + */ +struct target_ops { + QDF_STATUS (*ext_resource_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*peer_config) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*mesh_support_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*smart_antenna_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*atf_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*qwrap_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*btcoex_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*lteu_ext_support_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*set_init_cmd_dev_based_params) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + QDF_STATUS (*alloc_pdevs) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + QDF_STATUS (*update_pdev_tgt_info) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + uint32_t (*mem_mgr_alloc_chunk)(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, + u_int32_t req_id, u_int32_t idx, u_int32_t num_units, + u_int32_t unit_len, u_int32_t num_unit_info); + QDF_STATUS (*mem_mgr_free_chunks)(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl); + void (*print_svc_ready_ex_param)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + void (*add_11ax_modes)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + void (*set_default_tgt_config)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + QDF_STATUS (*sw_version_check)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + uint8_t *evt_buf); + void (*eapol_minrate_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*cfr_support_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*set_pktlog_checksum) + (struct wlan_objmgr_pdev *pdev, uint32_t checksum); +}; + + + +/** + * struct target_psoc_info - target psoc information + * @hdls: component handles (htc/htt/wmi) sub structure + * @info: target related info sub structure + * @feature_ptr: stores legacy pointer or few driver specific structures + * @tif_ops: holds driver specific function pointers + */ +struct target_psoc_info { + struct comp_hdls hdls; + struct tgt_info info; + void *feature_ptr; + struct target_ops *tif_ops; +}; + +/** + * struct target_pdev_info - target pdev information + * @wmi_handle: WMI handle + * @accelerator_hdl: NSS offload/IPA handles + * @pdev_idx: pdev id (of FW) + * @phy_idx: phy id (of FW) + * @feature_ptr: stores legacy pointer or few driver specific structures + */ +struct target_pdev_info { + struct wmi_unified *wmi_handle; + struct common_accelerator_handle *accelerator_hdl; + int32_t pdev_idx; + int32_t phy_idx; + void *feature_ptr; +}; + + +/** + * target_if_init() - target_if Initialization + * @get_wmi_handle: function pointer to get wmi handle + * + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_init(get_psoc_handle_callback psoc_hdl_cb); + +/** + * target_if_deinit() - Close target_if + * @scn_handle: scn handle + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_deinit(void); + +/** + * target_if_store_pdev_target_if_ctx() - stores objmgr pdev in target if ctx + * @pdev_hdl_cb: function pointer to get objmgr pdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_store_pdev_target_if_ctx( + get_pdev_handle_callback pdev_hdl_cb); + +/** + * wlan_get_tgt_if_ctx() -Get target if ctx + * + * Return: target if ctx + */ +struct target_if_ctx *target_if_get_ctx(void); + +/** + * target_if_get_psoc_from_scn_hdl() - get psoc from scn handle + * @scn_handle: scn handle + * + * This API is generally used while processing wmi event. + * In wmi event SCN handle will be passed by wmi hence + * using this API we can get psoc from scn handle. + * + * Return: index for matching scn handle + */ +struct wlan_objmgr_psoc *target_if_get_psoc_from_scn_hdl(void *scn_handle); + +/** + * target_if_get_pdev_from_scn_hdl() - get pdev from scn handle + * @scn_handle: scn handle + * + * This API is generally used while processing wmi event. + * In wmi event SCN handle will be passed by wmi hence + * using this API we can get pdev from scn handle. + * + * Return: pdev for matching scn handle + */ +struct wlan_objmgr_pdev *target_if_get_pdev_from_scn_hdl(void *scn_handle); + +/** target_if_register_tx_ops() - register tx_ops + * @tx_ops: tx_ops structure + * + * This function is to be used by components to populate + * the OL function pointers (tx_ops) required by the component + * for UMAC-LMAC interaction, with the appropriate handler + * + * Return: QDF STATUS + */ +QDF_STATUS target_if_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_get_psoc_legacy_service_ready_cb() - get psoc from scn handle + * + * This API is generally used while processing wmi event. + * In wmi event SCN handle will be passed by wmi hence + * using this API we can get psoc from scn handle. + * + * Return: wmi_legacy_service_ready_callback + */ +wmi_legacy_service_ready_callback + target_if_get_psoc_legacy_service_ready_cb(void); + +/** + * target_if_register_legacy_service_ready_cb() - get legacy + * service ready handler from scn handle + * + * @service_ready_cb: function pointer to service ready callback + * + * Return: QDF Status + */ +QDF_STATUS target_if_register_legacy_service_ready_cb( + wmi_legacy_service_ready_callback service_ready_cb); + +/** + * target_if_alloc_pdev_tgt_info() - alloc pdev tgt info + * @pdev: pointer to pdev + * + * API to allocate memory for target_pdev_info + * + * Return: SUCCESS on successful memory allocation or Failure + */ +QDF_STATUS target_if_alloc_pdev_tgt_info(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_free_pdev_tgt_info() - free pdev tgt info + * @pdev: pointer to pdev + * + * API to free allocated memory for target_pdev_info + * + * Return: SUCCESS on successful memory deallocation or Failure + */ +QDF_STATUS target_if_free_pdev_tgt_info(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_alloc_psoc_tgt_info() - alloc psoc tgt info + * @psoc: pointer to psoc + * + * API to allocate memory for target_psoc_info + * + * Return: SUCCESS on successful memory allocation or Failure + */ +QDF_STATUS target_if_alloc_psoc_tgt_info(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_free_psoc_tgt_info() - free psoc tgt info + * @psoc: pointer to psoc + * + * API to free allocated memory for target_psoc_info + * + * Return: SUCCESS on successful memory deallocation or Failure + */ +QDF_STATUS target_if_free_psoc_tgt_info(struct wlan_objmgr_psoc *psoc); + +/** + * target_is_tgt_type_ar900b() - Check if the target type is AR900B + * @target_type: target type to be checked. + * + * Return: true if the target_type is AR900B, else false. + */ +bool target_is_tgt_type_ar900b(uint32_t target_type); + +/** + * target_is_tgt_type_ipq4019() - Check if the target type is IPQ4019 + * @target_type: target type to be checked. + * + * Return: true if the target_type is IPQ4019, else false. + */ +bool target_is_tgt_type_ipq4019(uint32_t target_type); + +/** + * target_is_tgt_type_qca9984() - Check if the target type is QCA9984 + * @target_type: target type to be checked. + * + * Return: true if the target_type is QCA9984, else false. + */ +bool target_is_tgt_type_qca9984(uint32_t target_type); + +/** + * target_is_tgt_type_qca9888() - Check if the target type is QCA9888 + * @target_type: target type to be checked. + * + * Return: true if the target_type is QCA9888, else false. + */ +bool target_is_tgt_type_qca9888(uint32_t target_type); + +/** + * target_is_tgt_type_adrastea() - Check if the target type is QCS40X + * @target_type: target type to be checked. + * + * Return: true if the target_type is QCS40X, else false. + */ +bool target_is_tgt_type_adrastea(uint32_t target_type); + + +/** + * target_psoc_set_wlan_init_status() - set info wlan_init_status + * @psoc_info: pointer to structure target_psoc_info + * @wlan_init_status: FW init status + * + * API to set wlan_init_status + * + * Return: void + */ +static inline void target_psoc_set_wlan_init_status + (struct target_psoc_info *psoc_info, uint32_t wlan_init_status) +{ + if (!psoc_info) + return; + + psoc_info->info.wlan_init_status = wlan_init_status; +} + +/** + * target_psoc_get_wlan_init_status() - get info wlan_init_status + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wlan_init_status + * + * Return: uint32_t + */ +static inline uint32_t target_psoc_get_wlan_init_status + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return (uint32_t)-1; + + return psoc_info->info.wlan_init_status; +} + +/** + * target_psoc_set_target_type() - set info target_type + * @psoc_info: pointer to structure target_psoc_info + * @target_type: Target type + * + * API to set target_type + * + * Return: void + */ +static inline void target_psoc_set_target_type + (struct target_psoc_info *psoc_info, uint32_t target_type) +{ + if (!psoc_info) + return; + + psoc_info->info.target_type = target_type; +} + +/** + * target_psoc_get_target_type() - get info target_type + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target_type + * + * Return: unit32_t + */ +static inline uint32_t target_psoc_get_target_type + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return (uint32_t)-1; + + return psoc_info->info.target_type; +} + +/** + * target_psoc_set_max_descs() - set info max_descs + * @psoc_info: pointer to structure target_psoc_info + * @max_descs: Max descriptors + * + * API to set max_descs + * + * Return: void + */ +static inline void target_psoc_set_max_descs + (struct target_psoc_info *psoc_info, uint32_t max_descs) +{ + if (!psoc_info) + return; + + psoc_info->info.max_descs = max_descs; +} + +/** + * target_psoc_get_max_descs() - get info max_descs + * @psoc_info: pointer to structure target_psoc_info + * + * API to get max_descs + * + * Return: unint32_t + */ +static inline uint32_t target_psoc_get_max_descs + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return (uint32_t)-1; + + return psoc_info->info.max_descs; +} + +/** + * target_psoc_set_wmi_service_ready() - set info wmi_service_ready + * @psoc_info: pointer to structure target_psoc_info + * @wmi_service_ready: service ready flag + * + * API to set wmi_service_ready + * + * Return: void + */ +static inline void target_psoc_set_wmi_service_ready + (struct target_psoc_info *psoc_info, bool wmi_service_ready) +{ + if (!psoc_info) + return; + + psoc_info->info.wmi_service_ready = wmi_service_ready; +} + +/** + * target_psoc_get_wmi_service_ready() - get info wmi_service_ready + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_service_ready + * + * Return: bool + */ +static inline bool target_psoc_get_wmi_service_ready + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.wmi_service_ready; +} + +/** + * target_psoc_set_wmi_ready() - set info wmi_ready + * @psoc_info: pointer to structure target_psoc_info + * @wmi_ready: Ready event flag + * + * API to set wmi_ready + * + * Return: void + */ +static inline void target_psoc_set_wmi_ready + (struct target_psoc_info *psoc_info, bool wmi_ready) +{ + if (!psoc_info) + return; + + psoc_info->info.wmi_ready = wmi_ready; +} + +/** + * target_psoc_get_wmi_ready() - get info wmi_ready + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_ready + * + * Return: bool + */ +static inline bool target_psoc_get_wmi_ready + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.wmi_ready; +} + +/** + * target_psoc_set_preferred_hw_mode() - set preferred_hw_mode + * @psoc_info: pointer to structure target_psoc_info + * @preferred_hw_mode: Preferred HW mode + * + * API to set preferred_hw_mode + * + * Return: void + */ +static inline void target_psoc_set_preferred_hw_mode( + struct target_psoc_info *psoc_info, uint32_t preferred_hw_mode) +{ + if (!psoc_info) + return; + + psoc_info->info.preferred_hw_mode = preferred_hw_mode; +} + +/** + * target_psoc_get_preferred_hw_mode() - get preferred_hw_mode + * @psoc_info: pointer to structure target_psoc_info + * + * API to get preferred_hw_mode + * + * Return: unint32_t + */ +static inline uint32_t target_psoc_get_preferred_hw_mode + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return WMI_HOST_HW_MODE_MAX; + + return psoc_info->info.preferred_hw_mode; +} + +/** + * target_psoc_get_supported_hw_modes() - get supported_hw_mode in target + * @psoc_info: pointer to structure target_psoc_info + * + * API to get list of supported HW modes + * + * Return: pointer to target_supported_modes + */ +static inline struct target_supported_modes *target_psoc_get_supported_hw_modes + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return &psoc_info->info.hw_modes; +} + +/** + * target_psoc_set_wmi_timeout() - set wmi_timeout + * @psoc_info: pointer to structure target_psoc_info + * @wmi_timeout: WMI timeout value in sec + * + * API to set wmi_timeout + * + * Return: void + */ +static inline void target_psoc_set_wmi_timeout + (struct target_psoc_info *psoc_info, uint32_t wmi_timeout) +{ + if (!psoc_info) + return; + + psoc_info->info.wmi_timeout = wmi_timeout; +} + +/** + * target_psoc_get_wmi_timeout() - get wmi_timeout + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_timeout + * + * Return: unint32_t + */ +static inline uint32_t target_psoc_get_wmi_timeout + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return (uint32_t)-1; + + return psoc_info->info.wmi_timeout; +} + +/** + * target_psoc_set_total_mac_phy_cnt() - set total_mac_phy + * @psoc_info: pointer to structure target_psoc_infoa + * @total_mac_phy_cnt: Total MAC PHY cnt + * + * API to set total_mac_phy + * + * Return: void + */ +static inline void target_psoc_set_total_mac_phy_cnt + (struct target_psoc_info *psoc_info, uint8_t total_mac_phy_cnt) +{ + if (!psoc_info) + return; + + psoc_info->info.total_mac_phy_cnt = total_mac_phy_cnt; +} + +/** + * target_psoc_get_total_mac_phy_cnt() - get total_mac_phy + * @psoc_info: pointer to structure target_psoc_info + * + * API to get total_mac_phy + * + * Return: unint8_t + */ +static inline uint8_t target_psoc_get_total_mac_phy_cnt( + struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return 0; + + return psoc_info->info.total_mac_phy_cnt; +} + +/** + * target_psoc_set_num_radios() - set num of radios + * @psoc_info: pointer to structure target_psoc_info + * @num_radios: Number of radios + * + * API to set number of radios + * + * Return: number of radios + */ +static inline void target_psoc_set_num_radios( + struct target_psoc_info *psoc_info, uint8_t num_radios) +{ + if (!psoc_info) + return; + + psoc_info->info.num_radios = num_radios; +} + +/** + * target_psoc_get_num_radios() - get number of radios + * @psoc_info: pointer to structure target_psoc_info + * + * API to get number_of_radios + * + * Return: number of radios + */ +static inline uint8_t target_psoc_get_num_radios + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return 0; + + return psoc_info->info.num_radios; +} + +/** + * target_psoc_get_num_radios_for_mode() - get number of radios for a hw-mode + * @psoc_info: pointer to structure target_psoc_info + * + * API to get number_of_radios for a HW mode + * + * Return: number of radios + */ + +static inline uint8_t target_psoc_get_num_radios_for_mode + (struct target_psoc_info *psoc_info, uint8_t mode) +{ + uint8_t mac_phy_count; + uint8_t num_radios = 0; + struct tgt_info *info = &psoc_info->info; + + if (!psoc_info) + return 0; + + for (mac_phy_count = 0; + mac_phy_count < target_psoc_get_total_mac_phy_cnt(psoc_info); + mac_phy_count++) { + num_radios += + (info->mac_phy_cap[mac_phy_count].hw_mode_id == mode); + } + + return num_radios; +} + +/** + * target_psoc_set_service_bitmap() - set service_bitmap + * @psoc_info: pointer to structure target_psoc_info + * @service_bitmap: FW service bitmap + * + * API to set service_bitmap + * + * Return: void + */ +static inline void target_psoc_set_service_bitmap + (struct target_psoc_info *psoc_info, uint32_t *service_bitmap) +{ + qdf_mem_copy(psoc_info->info.service_bitmap, service_bitmap, + sizeof(psoc_info->info.service_bitmap)); +} + +/** + * target_psoc_get_service_bitmap() - get service_bitmap + * @psoc_info: pointer to structure target_psoc_info + * + * API to get service_bitmap + * + * Return: unint32_t + */ +static inline uint32_t *target_psoc_get_service_bitmap + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.service_bitmap; +} + +/** + * target_psoc_set_num_mem_chunks - set num_mem_chunks + * @psoc_info: pointer to structure target_psoc_info + & @num_mem_chunks: Num Memory chunks allocated for FW + * + * API to set num_mem_chunks + * + * Return: void + */ +static inline void target_psoc_set_num_mem_chunks( + struct target_psoc_info *psoc_info, uint32_t num_mem_chunks) +{ + if (!psoc_info) + return; + psoc_info->info.num_mem_chunks = num_mem_chunks; +} + +/** + * target_psoc_get_num_mem_chunks() - get num_mem_chunks + * @psoc_info: pointer to structure target_psoc_info + * + * API to get total_mac_phy + * + * Return: unint8_t + */ +static inline uint32_t target_psoc_get_num_mem_chunks + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return (uint32_t)-1; + + return psoc_info->info.num_mem_chunks; +} +/** + * target_psoc_set_hif_hdl - set hif_hdl + * @psoc_info: pointer to structure target_psoc_info + * @hif_hdl: HIF handle + * + * API to set hif_hdl + * + * Return: void + */ +static inline void target_psoc_set_hif_hdl + (struct target_psoc_info *psoc_info, + struct hif_opaque_softc *hif_hdl) +{ + if (!psoc_info) + return; + + psoc_info->hdls.hif_hdl = hif_hdl; +} + +/** + * target_psoc_get_hif_hdl() - get hif_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get hif_hdl + * + * Return: hif_hdl + */ +static inline struct hif_opaque_softc *target_psoc_get_hif_hdl + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->hdls.hif_hdl; +} + +/** + * target_psoc_set_hif_hdl - set htc_hdl + * @psoc_info: pointer to structure target_psoc_info + * @htc_hdl: HTC handle + * + * API to set htc_hdl + * + * Return: void + */ +static inline void target_psoc_set_htc_hdl( + struct target_psoc_info *psoc_info, + HTC_HANDLE htc_hdl) +{ + if (!psoc_info) + return; + + psoc_info->hdls.htc_hdl = htc_hdl; +} + +/** + * target_psoc_get_htc_hdl() - get htc_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get htc_hdl + * + * Return: htc_hdl + */ +static inline HTC_HANDLE target_psoc_get_htc_hdl + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->hdls.htc_hdl; +} +/** + * target_psoc_set_wmi_hdl - set wmi_hdl + * @psoc_info: pointer to structure target_psoc_info + * @wmi_hdl: WMI handle + * + * API to set wmi_hdl + * + * Return: void + */ +static inline void target_psoc_set_wmi_hdl + (struct target_psoc_info *psoc_info, + struct wmi_unified *wmi_hdl) +{ + if (!psoc_info) + return; + + psoc_info->hdls.wmi_hdl = wmi_hdl; +} + +/** + * target_psoc_get_wmi_hdl() - get wmi_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_hdl + * + * Return: wmi_hdl + */ +static inline struct wmi_unified *target_psoc_get_wmi_hdl + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->hdls.wmi_hdl; +} + +/** + * target_psoc_set_accelerator_hdl - set accelerator_hdl + * @psoc_info: pointer to structure target_psoc_info + * @accelerator_hdl: Accelator handle + * + * API to set accelerator_hdl + * + * Return: void + */ +static inline void target_psoc_set_accelerator_hdl + (struct target_psoc_info *psoc_info, + struct common_accelerator_handle *accelerator_hdl) +{ + if (!psoc_info) + return; + + psoc_info->hdls.accelerator_hdl = accelerator_hdl; +} + +/** + * target_psoc_get_accelerator_hdl() - get accelerator_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get accelerator_hdl + * + * Return: accelerator_hdl + */ +static inline +struct common_accelerator_handle *target_psoc_get_accelerator_hdl + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->hdls.accelerator_hdl; +} + +/** + * target_psoc_set_feature_ptr - set feature_ptr + * @psoc_info: pointer to structure target_psoc_info + * @feature_ptr: set feature pointer + * + * API to set feature_ptr + * + * Return: void + */ +static inline void target_psoc_set_feature_ptr + (struct target_psoc_info *psoc_info, void *feature_ptr) +{ + if (!psoc_info) + return; + + psoc_info->feature_ptr = feature_ptr; +} + +/** + * target_psoc_get_feature_ptr() - get feature_ptr + * @psoc_info: pointer to structure target_psoc_info + * + * API to get feature_ptr + * + * Return: feature_ptr + */ +static inline void *target_psoc_get_feature_ptr + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->feature_ptr; +} + +/** + * target_psoc_get_version()- get host_fw_ver version + * @psoc_info: pointer to structure target_psoc_info + * + * API to get host_fw_ver version + * + * Return: void + */ +static inline struct host_fw_ver *target_psoc_get_version + (struct target_psoc_info *psoc_info) +{ + return &psoc_info->info.version; +} + +/** + * target_psoc_get_target_ver()- get target version + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target version + * + * Return: target version + */ +static inline uint32_t target_psoc_get_target_ver + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.version.target_ver; +} + +/** + * target_psoc_set_target_ver()- set target version + * @psoc_info: pointer to structure target_psoc_info + * @target_ver: Target version + * + * API to set target version + * + * Return: void + */ +static inline void target_psoc_set_target_ver + (struct target_psoc_info *psoc_info, uint32_t target_ver) +{ + if (!psoc_info) + return; + + psoc_info->info.version.target_ver = target_ver; +} + +/** + * target_psoc_set_target_rev()- set target revision + * @psoc_info: pointer to structure target_psoc_info + * @target_rev: Target revision + * + * API to get target version + * + * Return: void + */ +static inline void target_psoc_set_target_rev + (struct target_psoc_info *psoc_info, uint32_t target_rev) +{ + if (!psoc_info) + return; + + psoc_info->info.version.target_rev = target_rev; +} + +/** + * target_psoc_get_target_rev()- get target revision + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target revision + * + * Return: target revision + */ +static inline uint32_t target_psoc_get_target_rev + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.version.target_rev; +} + +/** + * target_psoc_set_dbglog_hdl - set dbglog_hdl + * @psoc_info: pointer to structure target_psoc_info + * @dbglog_hdl: dbglog handle + * + * API to set dbglog_hdl + * + * Return: void + */ +static inline void target_psoc_set_dbglog_hdl + (struct target_psoc_info *psoc_info, + struct common_dbglog_handle *dbglog_hdl) +{ + if (!psoc_info) + return; + + psoc_info->hdls.dbglog_hdl = dbglog_hdl; +} + +/** + * target_psoc_get_dbglog_hdl() - get dbglog_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get dbglog_hdl + * + * Return: dbglog_hdl + */ +static inline struct common_dbglog_handle *target_psoc_get_dbglog_hdl + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->hdls.dbglog_hdl; +} + +/** + * target_psoc_get_wlan_res_cfg() - get wlan_res_cfg + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wlan_res_cfg + * + * Return: structure pointer to host_fw_ver + */ +static inline target_resource_config *target_psoc_get_wlan_res_cfg + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return &psoc_info->info.wlan_res_cfg; +} + +/** + * target_psoc_get_wlan_ext_res_cfg() - get wlan_ext_res_cfg + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wlan_ext_res_cfg + * + * Return: structure pointer to wmi_host_ext_resource_config + */ +static inline wmi_host_ext_resource_config *target_psoc_get_wlan_ext_res_cfg + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return &psoc_info->info.wlan_ext_res_cfg; +} + +/** + * target_psoc_get_event_queue() - get event_queue + * @psoc_info: pointer to structure target_psoc_info + * + * API to get event_queue + * + * Return: structure pointer to qdf_wait_queue_head_t + */ +static inline qdf_event_t *target_psoc_get_event + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return &psoc_info->info.event; +} + +/** + * target_psoc_get_target_caps() - get target_caps + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target_caps + * + * Return: structure pointer to wlan_psoc_target_capability_info + */ +static inline struct wlan_psoc_target_capability_info + *target_psoc_get_target_caps(struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return &psoc_info->info.target_caps; +} + +/** + * target_psoc_get_service_ext_param() - get service_ext_param + * @psoc_info: pointer to structure target_psoc_info + * + * API to get service_ext_param + * + * Return: structure pointer to wlan_psoc_host_service_ext_param + */ +static inline struct wlan_psoc_host_service_ext_param + *target_psoc_get_service_ext_param + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return &psoc_info->info.service_ext_param; +} + +/** + * target_psoc_get_num_dbr_ring_caps() - get no of dbr_ring_caps + * @psoc_info: pointer to structure target_psoc_info + * + * API to get num_dbr_ring_caps + * + * Return: no of dbr_ring_caps + */ +static inline uint32_t target_psoc_get_num_dbr_ring_caps + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return 0; + + if (psoc_info->info.service_ext_param.num_dbr_ring_caps) + return psoc_info->info.service_ext_param.num_dbr_ring_caps; + + return psoc_info->info.service_ext2_param.num_dbr_ring_caps; +} + +/** + * target_psoc_get_mac_phy_cap_for_mode() - get mac_phy_cap for a hw-mode + * @psoc_info: pointer to structure target_psoc_info + * + * API to get mac_phy_cap for a specified hw-mode + * + * Return: structure pointer to wlan_psoc_host_mac_phy_caps + */ + +static inline struct wlan_psoc_host_mac_phy_caps + *target_psoc_get_mac_phy_cap_for_mode + (struct target_psoc_info *psoc_info, uint8_t mode) +{ + uint8_t mac_phy_idx; + struct tgt_info *info = &psoc_info->info; + + if (!psoc_info) + return NULL; + + for (mac_phy_idx = 0; + mac_phy_idx < PSOC_MAX_MAC_PHY_CAP; + mac_phy_idx++) + if (info->mac_phy_cap[mac_phy_idx].hw_mode_id == mode) + break; + + if (mac_phy_idx == PSOC_MAX_MAC_PHY_CAP) + return NULL; + + return &info->mac_phy_cap[mac_phy_idx]; +} + +/** + * target_psoc_get_mac_phy_cap() - get mac_phy_cap + * @psoc_info: pointer to structure target_psoc_info + * + * API to get mac_phy_cap + * + * Return: structure pointer to wlan_psoc_host_mac_phy_caps + */ +static inline struct wlan_psoc_host_mac_phy_caps *target_psoc_get_mac_phy_cap + (struct target_psoc_info *psoc_info) +{ + uint32_t preferred_hw_mode; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; + + if (!psoc_info) + return NULL; + + preferred_hw_mode = + target_psoc_get_preferred_hw_mode(psoc_info); + + if (preferred_hw_mode < WMI_HOST_HW_MODE_MAX) { + mac_phy_cap = + target_psoc_get_mac_phy_cap_for_mode + (psoc_info, preferred_hw_mode); + } else { + mac_phy_cap = psoc_info->info.mac_phy_cap; + } + + return mac_phy_cap; +} + +/** + * target_psoc_get_dbr_ring_caps() - get dbr_ring_cap + * @psoc_info: pointer to structure target_psoc_info + * + * API to get dbr_ring_cap + * + * Return: structure pointer to wlan_psoc_host_dbr_ring_caps + */ +static inline struct wlan_psoc_host_dbr_ring_caps + *target_psoc_get_dbr_ring_caps(struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->info.dbr_ring_cap; +} + +/** + * target_psoc_get_spectral_scaling_params() - get Spectral scaling params + * @psoc_info: pointer to structure target_psoc_info + * + * API to get Spectral scaling params + * + * Return: structure pointer to wlan_psoc_host_spectral_scaling_params + */ +static inline struct wlan_psoc_host_spectral_scaling_params + *target_psoc_get_spectral_scaling_params( + struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->info.scaling_params; +} + +/** + * target_psoc_get_mem_chunks() - get mem_chunks + * @psoc_info: pointer to structure target_psoc_info + * + * API to get mem_chunks + * + * Return: structure pointer to wmi_host_mem_chunk + */ +static inline struct wmi_host_mem_chunk *target_psoc_get_mem_chunks + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->info.mem_chunks; +} + +/** + * target_psoc_get_tif_ops() - get tif_ops + * @psoc_info: pointer to structure target_psoc_info + * + * API to get tif_ops + * + * Return: structure pointer to target_ops + */ +static inline struct target_ops *target_psoc_get_tif_ops + (struct target_psoc_info *psoc_info) +{ + if (!psoc_info) + return NULL; + + return psoc_info->tif_ops; +} + +/** + * target_pdev_set_feature_ptr - set feature_ptr + * @pdev_info: pointer to structure target_pdev_info + * @feature_ptr: Feature pointer + * + * API to set feature_ptr + * + * Return: void + */ +static inline void target_pdev_set_feature_ptr + (struct target_pdev_info *pdev_info, void *feature_ptr) +{ + if (!pdev_info) + return; + + pdev_info->feature_ptr = feature_ptr; +} + +/** + * target_pdev_get_feature_ptr() - get feature_ptr + * @pdev_info: pointer to structure target_pdev_info + * + * API to get feature_ptr + * + * Return: feature_ptr + */ +static inline void *target_pdev_get_feature_ptr + (struct target_pdev_info *pdev_info) +{ + if (!pdev_info) + return NULL; + + return pdev_info->feature_ptr; +} + +/** + * target_pdev_set_wmi_handle - set wmi_handle + * @pdev_info: pointer to structure target_pdev_info + * @wmi_handle: WMI handle + * + * API to set wmi_handle + * + * Return: void + */ +static inline void target_pdev_set_wmi_handle + (struct target_pdev_info *pdev_info, + struct wmi_unified *wmi_handle) +{ + if (!pdev_info) + return; + + pdev_info->wmi_handle = wmi_handle; +} + +/** + * target_pdev_get_wmi_handle - get wmi_handle + * @pdev_info: pointer to structure target_dev_info + * + * API to get wmi_handle + * + * Return: wmi_handle + */ +static inline struct wmi_unified *target_pdev_get_wmi_handle + (struct target_pdev_info *pdev_info) +{ + if (!pdev_info) + return NULL; + + return pdev_info->wmi_handle; +} + +/** + * target_pdev_set_accelerator_hdl - set accelerator_hdl + * @pdev_info: pointer to structure target_pdev_info + * @accelerator_hdl: Accelator handle + * + * API to set accelerator_hdl + * + * Return: void + */ +static inline void target_pdev_set_accelerator_hdl + (struct target_pdev_info *pdev_info, + struct common_accelerator_handle *accelerator_hdl) +{ + if (!pdev_info) + return; + + pdev_info->accelerator_hdl = accelerator_hdl; +} + +/** + * target_pdev_get_accelerator_hdl - get accelerator_hdl + * @pdev_info: pointer to structure target_dev_info + * + * API to get accelerator_hdl + * + * Return: accelerator_hdl + */ +static inline struct common_accelerator_handle * +target_pdev_get_accelerator_hdl(struct target_pdev_info *pdev_info) +{ + if (!pdev_info) + return NULL; + + return pdev_info->accelerator_hdl; +} + +/** + * target_pdev_set_pdev_idx - set pdev_idx + * @pdev_info: pointer to structure target_pdev_info + * @pdev_idx: PDEV id of FW + * + * API to set pdev_idx + * + * Return: void + */ +static inline void target_pdev_set_pdev_idx + (struct target_pdev_info *pdev_info, int32_t pdev_idx) +{ + if (!pdev_info) + return; + + pdev_info->pdev_idx = pdev_idx; +} + +/** + * target_pdev_get_pdev_idx - get pdev_idx + * @pdev_info: pointer to structure target_dev_info + * + * API to get pdev_idx + * + * Return: int32_t + */ +static inline int32_t target_pdev_get_pdev_idx + (struct target_pdev_info *pdev_info) +{ + if (!pdev_info) + return -EINVAL; + + return pdev_info->pdev_idx; +} + +/** + * target_pdev_set_phy_idx - set phy_idx + * @pdev_info: pointer to structure target_pdev_info + * @phy_idx: phy ID of FW + * + * API to set phy_idx + * + * Return: void + */ +static inline void target_pdev_set_phy_idx + (struct target_pdev_info *pdev_info, int32_t phy_idx) +{ + if (!pdev_info) + return; + + pdev_info->phy_idx = phy_idx; +} + +/** + * target_pdev_get_phy_idx - get phy_idx + * @pdev_info: pointer to structure target_dev_info + * + * API to get phy_idx + * + * Return: int32_t + */ +static inline int32_t target_pdev_get_phy_idx + (struct target_pdev_info *pdev_info) +{ + if (!pdev_info) + return -EINVAL; + + return pdev_info->phy_idx; +} + +/** + * GET_WMI_HDL_FROM_PSOC - get wmi handle from psoc + * @psoc: psoc object + * + * API to get wmi_handle from psoc + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline struct wmi_unified *GET_WMI_HDL_FROM_PSOC( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_if_handle; + + if (psoc) { + tgt_if_handle = psoc->tgt_if_handle; + + if (tgt_if_handle) + return target_psoc_get_wmi_hdl(tgt_if_handle); + else + return NULL; + } + + return NULL; +} + +/** + * GET_WMI_HDL_FROM_PDEV - get wmi handle from pdev + * @pdev: pdev object + * + * API to get wmi_handle from pdev + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline struct wmi_unified *GET_WMI_HDL_FROM_PDEV( + struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_if_handle; + + if (pdev) { + tgt_if_handle = pdev->tgt_if_handle; + + if (tgt_if_handle) + return target_pdev_get_wmi_handle(tgt_if_handle); + else + return NULL; + } + + return NULL; +} + +/** + * get_wmi_unified_hdl_from_psoc - get wmi handle from psoc + * @psoc: psoc object + * + * API to get wmi_handle from psoc + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline wmi_unified_t +get_wmi_unified_hdl_from_psoc(struct wlan_objmgr_psoc *psoc) +{ + return (wmi_unified_t)GET_WMI_HDL_FROM_PSOC(psoc); +} + +/** + * get_wmi_unified_hdl_from_pdev - get wmi handle from pdev + * @pdev: pdev object + * + * API to get wmi_handle from pdev + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline wmi_unified_t +get_wmi_unified_hdl_from_pdev(struct wlan_objmgr_pdev *pdev) +{ + return (wmi_unified_t)GET_WMI_HDL_FROM_PDEV(pdev); +} + +/** + * target_if_ext_res_cfg_enable - Enable ext resource config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable Ext resource config + * + * Return: none + */ +static inline void target_if_ext_res_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->ext_resource_config_enable)) + tgt_hdl->tif_ops->ext_resource_config_enable(psoc, + tgt_hdl, evt_buf); +} + +/** + * target_if_peer_cfg_enable - Enable peer config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable peer config + * + * Return: none + */ +static inline void target_if_peer_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->peer_config)) + tgt_hdl->tif_ops->peer_config(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_mesh_support_enable - Enable MESH mode support + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable Mesh mode + * + * Return: none + */ +static inline void target_if_mesh_support_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->mesh_support_enable)) + tgt_hdl->tif_ops->mesh_support_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_eapol_minrate_enable - Enable EAPOL Minrate in Tunnel Mode + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable eapol minrate + * + * Return: none + */ +static inline void target_if_eapol_minrate_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->eapol_minrate_enable)) + tgt_hdl->tif_ops->eapol_minrate_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_smart_antenna_enable - Enable Smart antenna module + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable Smart antenna + * + * Return: none + */ +static inline void target_if_smart_antenna_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->smart_antenna_enable)) + tgt_hdl->tif_ops->smart_antenna_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_cfr_support_enable - Enable cfr support + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable cfr support + * + * Return: none + */ +static inline void target_if_cfr_support_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->cfr_support_enable)) + tgt_hdl->tif_ops->cfr_support_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_set_pktlog_checksum - Set pktlog checksum + * @pdev: pdev object + * @tgt_hdl: target_psoc_info pointer + * @checksum: checksum received from FW + * + * API to set pktlog checksum + * + * Return: none + */ +static inline void target_if_set_pktlog_checksum(struct wlan_objmgr_pdev *pdev, + struct target_psoc_info *tgt_hdl, uint32_t checksum) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->set_pktlog_checksum)) + tgt_hdl->tif_ops->set_pktlog_checksum(pdev, checksum); +} + +/** + * target_if_atf_cfg_enable - Enable ATF config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable ATF config + * + * Return: none + */ +static inline void target_if_atf_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->atf_config_enable)) + tgt_hdl->tif_ops->atf_config_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_qwrap_cfg_enable - Enable QWRAP config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable QWRAP config + * + * Return: none + */ +static inline void target_if_qwrap_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->qwrap_config_enable)) + tgt_hdl->tif_ops->qwrap_config_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_btcoex_cfg_enable - Enable BT coex config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable BT coex config + * + * Return: none + */ +static inline void target_if_btcoex_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->btcoex_config_enable)) + tgt_hdl->tif_ops->btcoex_config_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_lteu_cfg_enable - Enable LTEU config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable LTEU coex config + * + * Return: none + */ +static inline void target_if_lteu_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->lteu_ext_support_enable)) + tgt_hdl->tif_ops->lteu_ext_support_enable(psoc, tgt_hdl, + evt_buf); +} + +/** + * target_if_set_init_cmd_dev_param - Set init command params + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to set init command param based on config + * + * Return: none + */ +static inline void target_if_set_init_cmd_dev_param( + struct wlan_objmgr_psoc *psoc, struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->set_init_cmd_dev_based_params)) { + tgt_hdl->tif_ops->set_init_cmd_dev_based_params(psoc, + tgt_hdl); + } +} + +/** + * target_if_alloc_pdevs - Allocate PDEVs + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API allocates PDEVs based on ext service ready param + * + * Return: SUCCESS on pdev allocation or PDEV allocation is not needed + * FAILURE, if allocation fails + */ +static inline QDF_STATUS target_if_alloc_pdevs(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + QDF_STATUS ret_val; + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->alloc_pdevs)) + ret_val = tgt_hdl->tif_ops->alloc_pdevs(psoc, tgt_hdl); + else + ret_val = QDF_STATUS_SUCCESS; + + return ret_val; +} + +/** + * target_if_update_pdev_tgt_info - Update PDEVs info + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API updates PDEVs info based on config + * + * Return: SUCCESS on pdev updation or PDEV updation is not needed + * FAILURE, if updation fails + */ +static inline QDF_STATUS target_if_update_pdev_tgt_info( + struct wlan_objmgr_psoc *psoc, struct target_psoc_info *tgt_hdl) +{ + QDF_STATUS ret_val; + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->update_pdev_tgt_info)) + ret_val = tgt_hdl->tif_ops->update_pdev_tgt_info(psoc, + tgt_hdl); + else + ret_val = QDF_STATUS_SUCCESS; + + return ret_val; +} + +/** + * target_if_print_service_ready_ext_param - Print Service ready ext param + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to print service ready ext param + * + * Return: none + */ +static inline void target_if_print_service_ready_ext_param( + struct wlan_objmgr_psoc *psoc, struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->print_svc_ready_ex_param)) { + tgt_hdl->tif_ops->print_svc_ready_ex_param(psoc, + tgt_hdl); + } +} + +/** + * target_if_add_11ax_modes - Add 11ax modes explicitly + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to adds 11ax modes + * + * Return: none + */ +static inline void target_if_add_11ax_modes(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->add_11ax_modes)) { + tgt_hdl->tif_ops->add_11ax_modes(psoc, tgt_hdl); + } +} + +/** + * target_if_set_default_config - Set default config in init command + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to set default config in init command + * + * Return: none + */ +static inline void target_if_set_default_config(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->set_default_tgt_config)) { + tgt_hdl->tif_ops->set_default_tgt_config(psoc, tgt_hdl); + } +} + +/** + * target_if_sw_version_check - SW version check + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API checks the SW version + * + * Return: SUCCESS on version matches or version check is not needed + * FAILURE, if check fails + */ +static inline QDF_STATUS target_if_sw_version_check( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + QDF_STATUS ret_val; + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->sw_version_check)) + ret_val = tgt_hdl->tif_ops->sw_version_check(psoc, tgt_hdl, + evt_buf); + else + ret_val = QDF_STATUS_SUCCESS; + + return ret_val; +} + +/** + * target_if_get_phy_capability - get phy capability + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get phy capability from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_phy_capability + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return -EINVAL; + + return target_psoc_info->info.target_caps.phy_capability; +} + +/** + * target_if_set_phy_capability - set phy capability + * @target_psoc_info: pointer to structure target_psoc_info + * @phy_capab: PHY capabilities + * + * API to set phy capability in the target caps + * + * Return: None + */ +static inline void target_if_set_phy_capability + (struct target_psoc_info *target_psoc_info, int phy_capability) +{ + if (!target_psoc_info) + return; + + target_psoc_info->info.target_caps.phy_capability = phy_capability; +} + +/** + * target_if_set_max_frag_entry - set Maximum frag entries + * @target_psoc_info: pointer to structure target_psoc_info + * @max_frag_entry: Maximum frag entries + * + * API to set Maximum frag entries from the target caps + * + * Return: None + */ +static inline void target_if_set_max_frag_entry + (struct target_psoc_info *target_psoc_info, int max_frag_entry) +{ + if (!target_psoc_info) + return; + + target_psoc_info->info.target_caps.max_frag_entry = max_frag_entry; +} + +/** + * target_if_get_max_frag_entry - get Maximum frag entries + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get Maximum frag entries from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_max_frag_entry + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return -EINVAL; + + return target_psoc_info->info.target_caps.max_frag_entry; +} + +/** + * target_if_get_ht_cap_info - get ht capabilities info + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get ht capabilities info from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_ht_cap_info + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return -EINVAL; + + return target_psoc_info->info.target_caps.ht_cap_info; +} + +/** + * target_if_get_vht_cap_info - get vht capabilities info + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get vht capabilities info from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_vht_cap_info + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return -EINVAL; + + return target_psoc_info->info.target_caps.vht_cap_info; +} + +/** + * target_if_get_num_rf_chains - get Number of RF chains supported + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get Number of RF chains supported from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_num_rf_chains + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return -EINVAL; + + return target_psoc_info->info.target_caps.num_rf_chains; +} + +/** + * target_if_get_fw_version - get firmware version + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get firmware version from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_fw_version + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return 0; + + return target_psoc_info->info.target_caps.fw_version; +} + +/** + * target_if_get_wmi_fw_sub_feat_caps - FW sub feature capabilities + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get FW sub feature capabilities from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_wmi_fw_sub_feat_caps + (struct target_psoc_info *target_psoc_info) +{ + if (!target_psoc_info) + return -EINVAL; + + return target_psoc_info->info.target_caps.wmi_fw_sub_feat_caps; +} + +/** + * target_if_get_conc_scan_config_bits - Default concurrenct scan config + * @tgt_hdl: pointer to structure target_psoc_info + * + * API to get Default concurrenct scan config from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_conc_scan_config_bits + (struct target_psoc_info *tgt_hdl) +{ + if (!tgt_hdl) + return -EINVAL; + + return tgt_hdl->info.service_ext_param.default_conc_scan_config_bits; +} + +/** + * target_if_get_fw_config_bits - Default HW config bits + * @tgt_hdl: pointer to structure target_psoc_info + * + * API to get Default HW config bits from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_fw_config_bits + (struct target_psoc_info *tgt_hdl) +{ + if (!tgt_hdl) + return -EINVAL; + + return tgt_hdl->info.service_ext_param.default_fw_config_bits; +} + +/** + * target_psoc_get_num_hw_modes - get number of dbs hardware modes + * @tgt_hdl: pointer to structure target_psoc_info + * + * API to get Number of Dual Band Simultaneous (DBS) hardware modes + * + * Return: int32_t + */ +static inline int32_t target_psoc_get_num_hw_modes + (struct target_psoc_info *tgt_hdl) +{ + if (!tgt_hdl) + return -EINVAL; + + return tgt_hdl->info.service_ext_param.num_hw_modes; +} + +#ifdef WLAN_SUPPORT_TWT +static inline void target_if_set_twt_ap_pdev_count + (struct tgt_info *info, struct target_psoc_info *tgt_hdl) +{ + if (!tgt_hdl) + return; + + info->wlan_res_cfg.twt_ap_pdev_count = + target_psoc_get_num_radios(tgt_hdl); +} +#else +static inline void target_if_set_twt_ap_pdev_count + (struct tgt_info *info, struct target_psoc_info *tgt_hdl) +{ +} +#endif /* WLAN_SUPPORT_TWT */ + +/** + * target_psoc_get_version_info() - Get version info from tgt info + * @psoc_info: pointer to structure target_psoc_info + * @reg_major: reg db version major + * @reg_minor: reg db version minor + * @bdf_major: bdf reg db version major + * @bdf_minor: bdf reg db version minor + * + * API to get target version information. + * + * Return: void + */ +static inline void target_psoc_get_version_info( + struct target_psoc_info *psoc_info, + uint8_t *reg_major, uint8_t *reg_minor, + uint8_t *bdf_major, uint8_t *bdf_minor) +{ + if (!psoc_info) + return; + + *reg_major = psoc_info->info.service_ext2_param.reg_db_version_major; + *reg_minor = psoc_info->info.service_ext2_param.reg_db_version_minor; + *bdf_major = + psoc_info->info.service_ext2_param.bdf_reg_db_version_major; + *bdf_minor = + psoc_info->info.service_ext2_param.bdf_reg_db_version_minor; +} +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/core/src/target_if_main.c b/drivers/staging/qca-wifi-host-cmn/target_if/core/src/target_if_main.c new file mode 100644 index 0000000000000000000000000000000000000000..a565aab1d547719106fd24b8301812a4f9b9fee0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/core/src/target_if_main.c @@ -0,0 +1,645 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: API for interacting with target interface. + * + */ + +#include "target_if.h" +#include "target_type.h" +#ifdef WLAN_ATF_ENABLE +#include "target_if_atf.h" +#endif +#ifdef WLAN_SA_API_ENABLE +#include "target_if_sa_api.h" +#endif +#ifdef WLAN_CFR_ENABLE +#include "target_if_cfr.h" +#endif +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include "target_if_spectral.h" +#endif +#include +#include +#include +#ifdef DFS_COMPONENT_ENABLE +#include +#endif + +#ifdef CONVERGED_P2P_ENABLE +#include "target_if_p2p.h" +#endif + +#ifdef WIFI_POS_CONVERGED +#include "target_if_wifi_pos.h" +#endif + +#ifdef FEATURE_WLAN_TDLS +#include "target_if_tdls.h" +#endif +#ifdef QCA_SUPPORT_SON +#include +#endif +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +#include +#endif +#ifdef WLAN_SUPPORT_GREEN_AP +#include +#endif +#include +#include + +#ifdef DIRECT_BUF_RX_ENABLE +#include +#endif + +#ifdef WLAN_SUPPORT_FILS +#include +#endif +#include "qdf_module.h" + +#include +#ifdef CRYPTO_SET_KEY_CONVERGED +#include +#endif +#include + +#ifdef FEATURE_COEX +#include +#endif + +#include + +static struct target_if_ctx *g_target_if_ctx; + +struct target_if_ctx *target_if_get_ctx(void) +{ + return g_target_if_ctx; +} + +struct wlan_objmgr_psoc *target_if_get_psoc_from_scn_hdl(void *scn_handle) +{ + struct wlan_objmgr_psoc *psoc; + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + if (scn_handle && g_target_if_ctx->get_psoc_hdl_cb) + psoc = g_target_if_ctx->get_psoc_hdl_cb(scn_handle); + else + psoc = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return psoc; +} + +struct wlan_objmgr_pdev *target_if_get_pdev_from_scn_hdl(void *scn_handle) +{ + struct wlan_objmgr_pdev *pdev; + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + if (scn_handle && g_target_if_ctx->get_pdev_hdl_cb) + pdev = g_target_if_ctx->get_pdev_hdl_cb(scn_handle); + else + pdev = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return pdev; +} + +#ifdef DIRECT_BUF_RX_ENABLE +static QDF_STATUS target_if_direct_buf_rx_init(void) +{ + return direct_buf_rx_init(); +} + +static QDF_STATUS target_if_direct_buf_rx_deinit(void) +{ + return direct_buf_rx_deinit(); +} +#else +static QDF_STATUS target_if_direct_buf_rx_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_direct_buf_rx_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +QDF_STATUS target_if_init(get_psoc_handle_callback psoc_hdl_cb) +{ + g_target_if_ctx = qdf_mem_malloc(sizeof(*g_target_if_ctx)); + if (!g_target_if_ctx) { + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + + qdf_spinlock_create(&g_target_if_ctx->lock); + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->magic = TGT_MAGIC; + g_target_if_ctx->get_psoc_hdl_cb = psoc_hdl_cb; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + target_if_direct_buf_rx_init(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_deinit(void) +{ + if (!g_target_if_ctx) { + QDF_ASSERT(0); + target_if_err("target if ctx is null"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->magic = 0; + g_target_if_ctx->get_psoc_hdl_cb = NULL; + g_target_if_ctx->get_pdev_hdl_cb = NULL; + g_target_if_ctx->service_ready_cb = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + qdf_spinlock_destroy(&g_target_if_ctx->lock); + qdf_mem_free(g_target_if_ctx); + g_target_if_ctx = NULL; + + target_if_direct_buf_rx_deinit(); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(target_if_deinit); + +QDF_STATUS target_if_store_pdev_target_if_ctx( + get_pdev_handle_callback pdev_hdl_cb) +{ + if (!g_target_if_ctx) { + QDF_ASSERT(0); + target_if_err("target if ctx is null"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->get_pdev_hdl_cb = pdev_hdl_cb; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +#ifndef WLAN_OFFCHAN_TXRX_ENABLE +static void target_if_offchan_txrx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_OFFCHAN_TXRX_ENABLE */ + +#ifndef WLAN_ATF_ENABLE +static void target_if_atf_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_ATF_ENABLE */ + +#ifndef WLAN_SA_API_ENABLE +static void target_if_sa_api_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_SA_API_ENABLE */ + +#ifndef WLAN_CFR_ENABLE +static void target_if_cfr_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +#ifdef WLAN_SUPPORT_FILS +static void target_if_fd_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_fd_register_tx_ops(tx_ops); +} +#else +static void target_if_fd_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +#ifdef WIFI_POS_CONVERGED +static void target_if_wifi_pos_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_wifi_pos_register_tx_ops(tx_ops); +} +#else +static void target_if_wifi_pos_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif +#ifdef QCA_SUPPORT_SON +static void target_if_son_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_son_register_tx_ops(tx_ops); + return; +} +#else +static void target_if_son_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + return; +} +#endif + +#ifdef FEATURE_WLAN_TDLS +static void target_if_tdls_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_tdls_register_tx_ops(tx_ops); +} +#else +static void target_if_tdls_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* FEATURE_WLAN_TDLS */ + +#ifdef DFS_COMPONENT_ENABLE +static void target_if_dfs_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_register_dfs_tx_ops(tx_ops); +} +#else +static void target_if_dfs_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* DFS_COMPONENT_ENABLE */ + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +static void target_if_sptrl_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_sptrl_register_tx_ops(tx_ops); +} +#else +static void target_if_sptrl_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +#ifdef DIRECT_BUF_RX_ENABLE +static void target_if_direct_buf_rx_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_direct_buf_rx_register_tx_ops(tx_ops); +} +#else +static void target_if_direct_buf_rx_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS target_if_green_ap_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + return target_if_register_green_ap_tx_ops(tx_ops); +} +#else +static QDF_STATUS target_if_green_ap_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_GREEN_AP */ +#if defined(WLAN_CONV_CRYPTO_SUPPORTED) && defined(CRYPTO_SET_KEY_CONVERGED) +static void target_if_crypto_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_crypto_register_tx_ops(tx_ops); +} +#else +static inline void target_if_crypto_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +#ifdef FEATURE_COEX +static QDF_STATUS +target_if_coex_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return target_if_coex_register_tx_ops(tx_ops); +} +#else +static inline QDF_STATUS +target_if_coex_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static void target_if_target_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_target_tx_ops *target_tx_ops; + + if (!tx_ops) { + target_if_err("invalid tx_ops"); + return; + } + + target_tx_ops = &tx_ops->target_tx_ops; + + target_tx_ops->tgt_is_tgt_type_ar900b = + target_is_tgt_type_ar900b; + + target_tx_ops->tgt_is_tgt_type_ipq4019 = + target_is_tgt_type_ipq4019; + + target_tx_ops->tgt_is_tgt_type_qca9984 = + target_is_tgt_type_qca9984; + + target_tx_ops->tgt_is_tgt_type_qca9888 = + target_is_tgt_type_qca9888; + + target_tx_ops->tgt_is_tgt_type_adrastea = + target_is_tgt_type_adrastea; + + target_tx_ops->tgt_get_tgt_type = + lmac_get_tgt_type; + + target_tx_ops->tgt_get_tgt_version = + lmac_get_tgt_version; + + target_tx_ops->tgt_get_tgt_revision = + lmac_get_tgt_revision; +} + +static QDF_STATUS +target_if_cp_stats_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return target_if_cp_stats_register_tx_ops(tx_ops); +} + +static QDF_STATUS +target_if_vdev_mgr_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return target_if_vdev_mgr_register_tx_ops(tx_ops); +} + +#ifdef QCA_WIFI_FTM +static +void target_if_ftm_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_ftm_register_tx_ops(tx_ops); +} +#else +static +void target_if_ftm_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +#ifdef WLAN_FEATURE_GPIO_CFG +static +void target_if_gpio_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_gpio_register_tx_ops(tx_ops); +} +#else +static +void target_if_gpio_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +static +QDF_STATUS target_if_register_umac_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + /* call regulatory callback to register tx ops */ + target_if_register_regulatory_tx_ops(tx_ops); + + /* call umac callback to register legacy tx ops */ + wlan_lmac_if_umac_tx_ops_register(tx_ops); + + /* Register scan tx ops */ + target_if_scan_tx_ops_register(tx_ops); + + target_if_atf_tx_ops_register(tx_ops); + + target_if_sa_api_tx_ops_register(tx_ops); + + target_if_cfr_tx_ops_register(tx_ops); + + target_if_wifi_pos_tx_ops_register(tx_ops); + + target_if_dfs_tx_ops_register(tx_ops); + + target_if_son_tx_ops_register(tx_ops); + + target_if_tdls_tx_ops_register(tx_ops); + + target_if_fd_tx_ops_register(tx_ops); + + target_if_target_tx_ops_register(tx_ops); + + target_if_offchan_txrx_ops_register(tx_ops); + + target_if_green_ap_tx_ops_register(tx_ops); + + target_if_ftm_tx_ops_register(tx_ops); + + target_if_cp_stats_tx_ops_register(tx_ops); + + target_if_crypto_tx_ops_register(tx_ops); + + target_if_vdev_mgr_tx_ops_register(tx_ops); + + target_if_coex_tx_ops_register(tx_ops); + + target_if_gpio_tx_ops_register(tx_ops); + + /* Converged UMAC components to register their TX-ops here */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + /* Converged UMAC components to register their TX-ops */ + target_if_register_umac_tx_ops(tx_ops); + + /* Components parallel to UMAC to register their TX-ops here */ + target_if_sptrl_tx_ops_register(tx_ops); + + /* Register direct buffer rx component tx ops here */ + target_if_direct_buf_rx_tx_ops_register(tx_ops); + +#ifdef CONVERGED_P2P_ENABLE + /* Converged UMAC components to register P2P TX-ops */ + target_if_p2p_register_tx_ops(tx_ops); +#endif + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(target_if_register_tx_ops); + +wmi_legacy_service_ready_callback +target_if_get_psoc_legacy_service_ready_cb(void) +{ + wmi_legacy_service_ready_callback service_ready_cb; + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + if (g_target_if_ctx->service_ready_cb) + service_ready_cb = g_target_if_ctx->service_ready_cb; + else + service_ready_cb = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return service_ready_cb; +} +qdf_export_symbol(target_if_get_psoc_legacy_service_ready_cb); + +QDF_STATUS target_if_register_legacy_service_ready_cb( + wmi_legacy_service_ready_callback service_ready_cb) +{ + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->service_ready_cb = service_ready_cb; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(target_if_register_legacy_service_ready_cb); + +QDF_STATUS target_if_alloc_pdev_tgt_info(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_pdev_info; + + if (!pdev) { + target_if_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_pdev_info = qdf_mem_malloc(sizeof(*tgt_pdev_info)); + + if (!tgt_pdev_info) + return QDF_STATUS_E_NOMEM; + + wlan_pdev_set_tgt_if_handle(pdev, tgt_pdev_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_free_pdev_tgt_info(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_pdev_info; + + if (!pdev) { + target_if_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_pdev_info = wlan_pdev_get_tgt_if_handle(pdev); + + wlan_pdev_set_tgt_if_handle(pdev, NULL); + + qdf_mem_free(tgt_pdev_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_alloc_psoc_tgt_info(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_psoc_info; + + if (!psoc) { + target_if_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_psoc_info = qdf_mem_malloc(sizeof(*tgt_psoc_info)); + + if (!tgt_psoc_info) + return QDF_STATUS_E_NOMEM; + + wlan_psoc_set_tgt_if_handle(psoc, tgt_psoc_info); + target_psoc_set_preferred_hw_mode(tgt_psoc_info, WMI_HOST_HW_MODE_MAX); + + qdf_event_create(&tgt_psoc_info->info.event); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_free_psoc_tgt_info(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_psoc_info; + struct wlan_psoc_host_service_ext_param *ext_param; + + if (!psoc) { + target_if_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + + ext_param = target_psoc_get_service_ext_param(tgt_psoc_info); + if (!ext_param) { + target_if_err("tgt_psoc_info is NULL"); + return QDF_STATUS_E_INVAL; + } + init_deinit_chainmask_table_free(ext_param); + init_deinit_dbr_ring_cap_free(tgt_psoc_info); + init_deinit_spectral_scaling_params_free(tgt_psoc_info); + + qdf_event_destroy(&tgt_psoc_info->info.event); + + wlan_psoc_set_tgt_if_handle(psoc, NULL); + + qdf_mem_free(tgt_psoc_info); + + return QDF_STATUS_SUCCESS; +} + +bool target_is_tgt_type_ar900b(uint32_t target_type) +{ + return target_type == TARGET_TYPE_AR900B; +} + +bool target_is_tgt_type_ipq4019(uint32_t target_type) +{ + return target_type == TARGET_TYPE_IPQ4019; +} + +bool target_is_tgt_type_qca9984(uint32_t target_type) +{ + return target_type == TARGET_TYPE_QCA9984; +} + +bool target_is_tgt_type_qca9888(uint32_t target_type) +{ + return target_type == TARGET_TYPE_QCA9888; +} + +bool target_is_tgt_type_adrastea(uint32_t target_type) +{ + return target_type == TARGET_TYPE_ADRASTEA; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/inc/target_if_cp_stats.h b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/inc/target_if_cp_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..58d940a6543908948869030bc339ac52936f2a7c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/inc/target_if_cp_stats.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_cp_stats.h + * + * This header file provide declarations required for Rx and Tx events from + * firmware + */ + +#ifndef __TARGET_IF_CP_STATS_H__ +#define __TARGET_IF_CP_STATS_H__ + +#include +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS + +/** + * target_if_cp_stats_get_rx_ops() - get rx ops + * @tx_ops: pointer to lmac tx ops + * + * Return: pointer to rx ops + */ +static inline struct wlan_lmac_if_cp_stats_rx_ops * +target_if_cp_stats_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.cp_stats_rx_ops; +} + +/** + * target_if_cp_stats_get_tx_ops() - get tx ops + * @tx_ops: pointer to lmac tx ops + * + * Return: pointer to tx ops + */ +static inline struct wlan_lmac_if_cp_stats_tx_ops * +target_if_cp_stats_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.cp_stats_tx_ops; +} + +/** + * target_if_cp_stats_register_tx_ops() - define cp_stats lmac tx ops functions + * @tx_ops: pointer to lmac tx ops + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); +#else +static inline QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* QCA_SUPPORT_CP_STATS */ + +#endif /* __TARGET_IF_CP_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..e0e99fedf8b7ef520b091001a3db49c3f46b1756 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_cp_stats.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_cp_stats.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS +target_if_cp_stats_register_event_handler(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_cp_stats_unregister_event_handler(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_cp_stats_tx_ops *cp_stats_tx_ops; + + if (!tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_INVAL; + } + + cp_stats_tx_ops = &tx_ops->cp_stats_tx_ops; + if (!cp_stats_tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + cp_stats_tx_ops->cp_stats_attach = + target_if_cp_stats_register_event_handler; + cp_stats_tx_ops->cp_stats_detach = + target_if_cp_stats_unregister_event_handler; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..df77875133087d9ece2c6d1cfb2166629d3e6b84 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_cp_stats.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../../umac/cmn_services/utils/inc/wlan_utility.h" +#include +#include +#include +#include +#include + +#ifdef WLAN_FEATURE_MIB_STATS +static void target_if_cp_stats_free_mib_stats(struct stats_event *ev) +{ + qdf_mem_free(ev->mib_stats); + ev->mib_stats = NULL; +} +#else +static void target_if_cp_stats_free_mib_stats(struct stats_event *ev) +{ +} +#endif +static void target_if_cp_stats_free_stats_event(struct stats_event *ev) +{ + qdf_mem_free(ev->pdev_stats); + ev->pdev_stats = NULL; + qdf_mem_free(ev->peer_stats); + ev->peer_stats = NULL; + qdf_mem_free(ev->peer_adv_stats); + ev->peer_adv_stats = NULL; + qdf_mem_free(ev->peer_extended_stats); + ev->peer_extended_stats = NULL; + qdf_mem_free(ev->cca_stats); + ev->cca_stats = NULL; + qdf_mem_free(ev->vdev_summary_stats); + ev->vdev_summary_stats = NULL; + qdf_mem_free(ev->vdev_chain_rssi); + ev->vdev_chain_rssi = NULL; + target_if_cp_stats_free_mib_stats(ev); +} + +static QDF_STATUS target_if_cp_stats_extract_pdev_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, + uint8_t *data) +{ + uint32_t i; + QDF_STATUS status; + wmi_host_pdev_stats pdev_stats; + + ev->num_pdev_stats = stats_param->num_pdev_stats; + if (!ev->num_pdev_stats) + return QDF_STATUS_SUCCESS; + + /* + * num_pdev_stats is validated within function wmi_extract_stats_param + * which is called to populated wmi_host_stats_event stats_param + */ + ev->pdev_stats = qdf_mem_malloc(sizeof(*ev->pdev_stats) * + ev->num_pdev_stats); + if (!ev->pdev_stats) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < ev->num_pdev_stats; i++) { + status = wmi_extract_pdev_stats(wmi_hdl, data, i, &pdev_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_pdev_stats failed"); + return status; + } + ev->pdev_stats[i].max_pwr = pdev_stats.chan_tx_pwr; + } + + return QDF_STATUS_SUCCESS; +} + +static void target_if_cp_stats_extract_peer_extd_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, + uint8_t *data) + +{ + QDF_STATUS status; + uint32_t i; + wmi_host_peer_extd_stats peer_extd_stats; + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + struct cdp_peer_stats *peer_stats; + + if (!stats_param->num_peer_extd_stats) + return; + + ev->peer_extended_stats = + qdf_mem_malloc(sizeof(*ev->peer_extended_stats) * + stats_param->num_peer_extd_stats); + if (!ev->peer_extended_stats) + return; + + ev->num_peer_extd_stats = stats_param->num_peer_extd_stats; + + for (i = 0; i < ev->num_peer_extd_stats; i++) { + status = wmi_extract_peer_extd_stats(wmi_hdl, data, i, + &peer_extd_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_peer_extd_stats failed"); + continue; + } + WMI_MAC_ADDR_TO_CHAR_ARRAY( + &peer_extd_stats.peer_macaddr, + ev->peer_extended_stats[i].peer_macaddr); + ev->peer_extended_stats[i].rx_mc_bc_cnt = + peer_extd_stats.rx_mc_bc_cnt; + + peer_stats = qdf_mem_malloc(sizeof(*peer_stats)); + if (!peer_stats) + continue; + + status = cdp_host_get_peer_stats(soc, VDEV_ALL, + ev->peer_extended_stats[i].peer_macaddr, + peer_stats); + if (status == QDF_STATUS_SUCCESS) + ev->peer_extended_stats[i].rx_mc_bc_cnt = + peer_stats->rx.multicast.num + + peer_stats->rx.bcast.num; + + qdf_mem_free(peer_stats); + } +} + +static QDF_STATUS target_if_cp_stats_extract_peer_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, + uint8_t *data) +{ + uint32_t i; + QDF_STATUS status; + wmi_host_peer_stats peer_stats; + bool db2dbm_enabled; + struct wmi_host_peer_adv_stats *peer_adv_stats; + + /* Extract peer_stats */ + if (!stats_param->num_peer_stats) + return QDF_STATUS_SUCCESS; + + ev->peer_stats = qdf_mem_malloc(sizeof(*ev->peer_stats) * + stats_param->num_peer_stats); + if (!ev->peer_stats) + return QDF_STATUS_E_NOMEM; + ev->num_peer_stats = stats_param->num_peer_stats; + + db2dbm_enabled = wmi_service_enabled(wmi_hdl, + wmi_service_hw_db2dbm_support); + for (i = 0; i < ev->num_peer_stats; i++) { + status = wmi_extract_peer_stats(wmi_hdl, data, i, &peer_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_peer_stats failed"); + continue; + } + WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats.peer_macaddr, + ev->peer_stats[i].peer_macaddr); + ev->peer_stats[i].tx_rate = peer_stats.peer_tx_rate; + ev->peer_stats[i].rx_rate = peer_stats.peer_rx_rate; + if (db2dbm_enabled) + ev->peer_stats[i].peer_rssi = peer_stats.peer_rssi; + else + ev->peer_stats[i].peer_rssi = peer_stats.peer_rssi + + TGT_NOISE_FLOOR_DBM; + } + + target_if_cp_stats_extract_peer_extd_stats(wmi_hdl, stats_param, ev, + data); + + /* Extract peer_adv_stats */ + ev->num_peer_adv_stats = stats_param->num_peer_adv_stats; + if (!ev->num_peer_adv_stats) + return QDF_STATUS_SUCCESS; + + ev->peer_adv_stats = qdf_mem_malloc(sizeof(*ev->peer_adv_stats) * + ev->num_peer_adv_stats); + if (!ev->peer_adv_stats) + return QDF_STATUS_E_NOMEM; + + peer_adv_stats = qdf_mem_malloc(sizeof(*peer_adv_stats) * + ev->num_peer_adv_stats); + if (!peer_adv_stats) { + qdf_mem_free(ev->peer_adv_stats); + return QDF_STATUS_E_NOMEM; + } + + status = wmi_extract_peer_adv_stats(wmi_hdl, data, peer_adv_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_peer_stats failed"); + qdf_mem_free(peer_adv_stats); + qdf_mem_free(ev->peer_adv_stats); + ev->peer_adv_stats = NULL; + return QDF_STATUS_SUCCESS; + } + + for (i = 0; i < ev->num_peer_adv_stats; i++) { + qdf_mem_copy(&ev->peer_adv_stats[i].peer_macaddr, + &peer_adv_stats[i].peer_macaddr, + QDF_MAC_ADDR_SIZE); + ev->peer_adv_stats[i].fcs_count = peer_adv_stats[i].fcs_count; + ev->peer_adv_stats[i].rx_bytes = peer_adv_stats[i].rx_bytes; + ev->peer_adv_stats[i].rx_count = peer_adv_stats[i].rx_count; + } + qdf_mem_free(peer_adv_stats); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_cp_stats_extract_cca_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + QDF_STATUS status; + struct wmi_host_congestion_stats stats = {0}; + + status = wmi_extract_cca_stats(wmi_hdl, data, &stats); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_SUCCESS; + + ev->cca_stats = qdf_mem_malloc(sizeof(*ev->cca_stats)); + if (!ev->cca_stats) + return QDF_STATUS_E_NOMEM; + + ev->cca_stats->vdev_id = stats.vdev_id; + ev->cca_stats->congestion = stats.congestion; + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_MIB_STATS +static QDF_STATUS target_if_cp_stats_extract_mib_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + QDF_STATUS status; + + if (!stats_param->num_mib_stats) + return QDF_STATUS_SUCCESS; + + if (stats_param->num_mib_stats != MAX_MIB_STATS || + (stats_param->num_mib_extd_stats && + stats_param->num_mib_extd_stats != MAX_MIB_STATS)) { + cp_stats_err("number of mib stats wrong, num_mib_stats %d, num_mib_extd_stats %d", + stats_param->num_mib_stats, + stats_param->num_mib_extd_stats); + return QDF_STATUS_E_INVAL; + } + + ev->num_mib_stats = stats_param->num_mib_stats; + + ev->mib_stats = qdf_mem_malloc(sizeof(*ev->mib_stats)); + if (!ev->mib_stats) + return QDF_STATUS_E_NOMEM; + + status = wmi_extract_mib_stats(wmi_hdl, data, ev->mib_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_mib_stats failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS target_if_cp_stats_extract_mib_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS target_if_cp_stats_extract_vdev_summary_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + uint32_t i, j; + QDF_STATUS status; + int32_t bcn_snr, dat_snr; + wmi_host_vdev_stats vdev_stats; + bool db2dbm_enabled; + + ev->num_summary_stats = stats_param->num_vdev_stats; + if (!ev->num_summary_stats) + return QDF_STATUS_SUCCESS; + + ev->vdev_summary_stats = qdf_mem_malloc(sizeof(*ev->vdev_summary_stats) + * ev->num_summary_stats); + + if (!ev->vdev_summary_stats) + return QDF_STATUS_E_NOMEM; + + db2dbm_enabled = wmi_service_enabled(wmi_hdl, + wmi_service_hw_db2dbm_support); + for (i = 0; i < ev->num_summary_stats; i++) { + status = wmi_extract_vdev_stats(wmi_hdl, data, i, &vdev_stats); + if (QDF_IS_STATUS_ERROR(status)) + continue; + + bcn_snr = vdev_stats.vdev_snr.bcn_snr; + dat_snr = vdev_stats.vdev_snr.dat_snr; + ev->vdev_summary_stats[i].vdev_id = vdev_stats.vdev_id; + + cp_stats_debug("vdev %d SNR bcn: %d data: %d", + ev->vdev_summary_stats[i].vdev_id, bcn_snr, + dat_snr); + + for (j = 0; j < 4; j++) { + ev->vdev_summary_stats[i].stats.tx_frm_cnt[j] + = vdev_stats.tx_frm_cnt[j]; + ev->vdev_summary_stats[i].stats.fail_cnt[j] + = vdev_stats.fail_cnt[j]; + ev->vdev_summary_stats[i].stats.multiple_retry_cnt[j] + = vdev_stats.multiple_retry_cnt[j]; + } + + ev->vdev_summary_stats[i].stats.rx_frm_cnt = + vdev_stats.rx_frm_cnt; + ev->vdev_summary_stats[i].stats.rx_error_cnt = + vdev_stats.rx_err_cnt; + ev->vdev_summary_stats[i].stats.rx_discard_cnt = + vdev_stats.rx_discard_cnt; + ev->vdev_summary_stats[i].stats.ack_fail_cnt = + vdev_stats.ack_fail_cnt; + ev->vdev_summary_stats[i].stats.rts_succ_cnt = + vdev_stats.rts_succ_cnt; + ev->vdev_summary_stats[i].stats.rts_fail_cnt = + vdev_stats.rts_fail_cnt; + /* Update SNR and RSSI in SummaryStats */ + wlan_util_stats_get_rssi(db2dbm_enabled, bcn_snr, dat_snr, + &ev->vdev_summary_stats[i].stats.rssi); + ev->vdev_summary_stats[i].stats.snr = + ev->vdev_summary_stats[i].stats.rssi - + TGT_NOISE_FLOOR_DBM; + } + + return QDF_STATUS_SUCCESS; +} + + +static QDF_STATUS target_if_cp_stats_extract_vdev_chain_rssi_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + uint32_t i, j; + QDF_STATUS status; + int32_t bcn_snr, dat_snr; + struct wmi_host_per_chain_rssi_stats rssi_stats; + bool db2dbm_enabled; + + ev->num_chain_rssi_stats = stats_param->num_rssi_stats; + if (!ev->num_chain_rssi_stats) + return QDF_STATUS_SUCCESS; + + ev->vdev_chain_rssi = qdf_mem_malloc(sizeof(*ev->vdev_chain_rssi) * + ev->num_chain_rssi_stats); + if (!ev->vdev_chain_rssi) + return QDF_STATUS_E_NOMEM; + + db2dbm_enabled = wmi_service_enabled(wmi_hdl, + wmi_service_hw_db2dbm_support); + for (i = 0; i < ev->num_chain_rssi_stats; i++) { + status = wmi_extract_per_chain_rssi_stats(wmi_hdl, data, i, + &rssi_stats); + if (QDF_IS_STATUS_ERROR(status)) + continue; + ev->vdev_chain_rssi[i].vdev_id = rssi_stats.vdev_id; + + for (j = 0; j < MAX_NUM_CHAINS; j++) { + dat_snr = rssi_stats.rssi_avg_data[j]; + bcn_snr = rssi_stats.rssi_avg_beacon[j]; + cp_stats_nofl_debug("Chain %d SNR bcn: %d data: %d", j, + bcn_snr, dat_snr); + /* + * Get the absolute rssi value from the current rssi + * value the snr value is hardcoded into 0 in the + * qcacld-new/CORE stack + */ + wlan_util_stats_get_rssi(db2dbm_enabled, bcn_snr, + dat_snr, + &ev->vdev_chain_rssi[i]. + chain_rssi[j]); + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_cp_stats_extract_event(struct wmi_unified *wmi_hdl, + struct stats_event *ev, + uint8_t *data) +{ + QDF_STATUS status; + wmi_host_stats_event stats_param = {0}; + + status = wmi_extract_stats_param(wmi_hdl, data, &stats_param); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("stats param extract failed: %d", status); + return status; + } + cp_stats_nofl_debug("num: pdev: %d, pdev_extd: %d, vdev: %d, peer: %d," + "peer_extd: %d rssi: %d, mib %d, mib_extd %d, " + "bcnflt: %d, channel: %d, bcn: %d, peer_extd2: %d," + "last_event: %x", + stats_param.num_pdev_stats, + stats_param.num_pdev_ext_stats, + stats_param.num_vdev_stats, + stats_param.num_peer_stats, + stats_param.num_peer_extd_stats, + stats_param.num_rssi_stats, + stats_param.num_mib_stats, + stats_param.num_mib_extd_stats, + stats_param.num_bcnflt_stats, + stats_param.num_chan_stats, + stats_param.num_bcn_stats, + stats_param.num_peer_adv_stats, stats_param.last_event); + + ev->last_event = stats_param.last_event; + status = target_if_cp_stats_extract_pdev_stats(wmi_hdl, &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_peer_stats(wmi_hdl, &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_cca_stats(wmi_hdl, &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_vdev_summary_stats(wmi_hdl, + &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_vdev_chain_rssi_stats(wmi_hdl, + &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_mib_stats(wmi_hdl, + &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_mc_cp_stats_stats_event_handler() - function to handle stats event + * from firmware. + * @scn: scn handle + * @data: data buffer for event + * @datalen: data length + * + * Return: status of operation. + */ +static int target_if_mc_cp_stats_stats_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status; + struct stats_event ev = {0}; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_cp_stats_rx_ops *rx_ops; + + if (!scn || !data) { + cp_stats_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + cp_stats_err("null psoc"); + return -EINVAL; + } + + rx_ops = target_if_cp_stats_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->process_stats_event) { + cp_stats_err("callback not registered"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null"); + return -EINVAL; + } + + status = target_if_cp_stats_extract_event(wmi_handle, &ev, data); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("extract event failed"); + goto end; + } + + status = rx_ops->process_stats_event(psoc, &ev); + +end: + target_if_cp_stats_free_stats_event(&ev); + + return qdf_status_to_os_return(status); +} + +static void target_if_cp_stats_inc_wake_lock_stats(uint32_t reason, + struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count) +{ + switch (reason) { + case WOW_REASON_UNSPECIFIED: + (*unspecified_wake_count)++; + break; + + case WOW_REASON_ASSOC_REQ_RECV: + stats->mgmt_assoc++; + break; + + case WOW_REASON_DISASSOC_RECVD: + stats->mgmt_disassoc++; + break; + + case WOW_REASON_ASSOC_RES_RECV: + stats->mgmt_assoc_resp++; + break; + + case WOW_REASON_REASSOC_REQ_RECV: + stats->mgmt_reassoc++; + break; + + case WOW_REASON_REASSOC_RES_RECV: + stats->mgmt_reassoc_resp++; + break; + + case WOW_REASON_AUTH_REQ_RECV: + stats->mgmt_auth++; + break; + + case WOW_REASON_DEAUTH_RECVD: + stats->mgmt_deauth++; + break; + + case WOW_REASON_ACTION_FRAME_RECV: + stats->mgmt_action++; + break; + + case WOW_REASON_NLOD: + stats->pno_match_wake_up_count++; + break; + + case WOW_REASON_NLO_SCAN_COMPLETE: + stats->pno_complete_wake_up_count++; + break; + + case WOW_REASON_LOW_RSSI: + stats->low_rssi_wake_up_count++; + break; + + case WOW_REASON_EXTSCAN: + stats->gscan_wake_up_count++; + break; + + case WOW_REASON_RSSI_BREACH_EVENT: + stats->rssi_breach_wake_up_count++; + break; + + case WOW_REASON_OEM_RESPONSE_EVENT: + stats->oem_response_wake_up_count++; + break; + + case WOW_REASON_11D_SCAN: + stats->scan_11d++; + break; + + case WOW_REASON_CHIP_POWER_FAILURE_DETECT: + stats->pwr_save_fail_detected++; + break; + + default: + break; + } +} + +static QDF_STATUS +target_if_cp_stats_register_event_handler(struct wlan_objmgr_psoc *psoc) +{ + int ret_val; + struct wmi_unified *wmi_handle; + + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_NULL_VALUE; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null"); + return QDF_STATUS_E_INVAL; + } + + ret_val = wmi_unified_register_event_handler( + wmi_handle, + wmi_update_stats_event_id, + target_if_mc_cp_stats_stats_event_handler, + WMI_RX_WORK_CTX); + if (ret_val) + cp_stats_err("Failed to register stats event cb"); + + return qdf_status_from_os_return(ret_val); +} + +static QDF_STATUS +target_if_cp_stats_unregister_event_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wmi_unified *wmi_handle; + + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null"); + return QDF_STATUS_E_INVAL; + } + wmi_unified_unregister_event_handler(wmi_handle, + wmi_update_stats_event_id); + + return QDF_STATUS_SUCCESS; +} + +static uint32_t get_stats_id(enum stats_req_type type) +{ + switch (type) { + default: + break; + case TYPE_CONNECTION_TX_POWER: + return WMI_REQUEST_PDEV_STAT; + case TYPE_PEER_STATS: + return WMI_REQUEST_PEER_STAT | WMI_REQUEST_PEER_EXTD_STAT; + case TYPE_STATION_STATS: + return (WMI_REQUEST_AP_STAT | + WMI_REQUEST_PEER_STAT | + WMI_REQUEST_VDEV_STAT | + WMI_REQUEST_PDEV_STAT | + WMI_REQUEST_PEER_EXTD2_STAT | + WMI_REQUEST_RSSI_PER_CHAIN_STAT); + case TYPE_MIB_STATS: + return (WMI_REQUEST_MIB_STAT | WMI_REQUEST_MIB_EXTD_STAT); + } + + return 0; +} + +/** + * target_if_cp_stats_send_stats_req() - API to send stats request to wmi + * @psoc: pointer to psoc object + * @req: pointer to object containing stats request parameters + * + * Return: status of operation. + */ +static QDF_STATUS target_if_cp_stats_send_stats_req( + struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req) + +{ + struct wmi_unified *wmi_handle; + struct stats_request_params param = {0}; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null."); + return QDF_STATUS_E_NULL_VALUE; + } + /* refer (WMI_REQUEST_STATS_CMDID) */ + param.stats_id = get_stats_id(type); + param.vdev_id = req->vdev_id; + param.pdev_id = req->pdev_id; + + return wmi_unified_stats_request_send(wmi_handle, req->peer_mac_addr, + ¶m); +} + +QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_cp_stats_tx_ops *cp_stats_tx_ops; + + if (!tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_INVAL; + } + + cp_stats_tx_ops = &tx_ops->cp_stats_tx_ops; + if (!cp_stats_tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + cp_stats_tx_ops->cp_stats_attach = + target_if_cp_stats_register_event_handler; + cp_stats_tx_ops->cp_stats_detach = + target_if_cp_stats_unregister_event_handler; + cp_stats_tx_ops->inc_wake_lock_stats = + target_if_cp_stats_inc_wake_lock_stats; + cp_stats_tx_ops->send_req_stats = target_if_cp_stats_send_stats_req; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/crypto/inc/target_if_crypto.h b/drivers/staging/qca-wifi-host-cmn/target_if/crypto/inc/target_if_crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..4be54582b18d2c9f20286665d5dbf0505f1cf046 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/crypto/inc/target_if_crypto.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares crypto functions interfacing with the target + */ + +#ifndef __TARGET_IF_CRYPTO_H__ +#define __TARGET_IF_CRYPTO_H__ +#include + +/** + * target_if_crypto_register_tx_ops() - lmac handler to register + * crypto tx_ops callback functions + * @tx_ops: wlan_lmac_if_tx_ops object + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_crypto_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_crypto_set_key() - lmac handler to set key + * @vdev: VDEV object pointer + * @req: Key parameters that are required to install the key + * @key_type: Pairwise or Group Key type + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_crypto_set_key(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/crypto/src/target_if_crypto.c b/drivers/staging/qca-wifi-host-cmn/target_if/crypto/src/target_if_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..53fc94a5f2515e45da3ea7ecde531c4a599949a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/crypto/src/target_if_crypto.c @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for crypto + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_obj_mgr_i.h" + +#ifdef FEATURE_WLAN_WAPI +#ifdef FEATURE_WAPI_BIG_ENDIAN +/* + * All lithium firmware expects WAPI in big endian + * format , whereas helium firmware's expect otherwise + */ + +static void wlan_crypto_set_wapi_key(struct wlan_objmgr_vdev *vdev, + bool pairwise, + enum wlan_crypto_cipher_type cipher_type, + struct set_key_params *params) +{ + static const unsigned char tx_iv[16] = {0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x36, 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x36}; + + static const unsigned char rx_iv[16] = {0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x36, 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x37}; + + if (cipher_type != WLAN_CRYPTO_CIPHER_WAPI_SMS4 && + cipher_type != WLAN_CRYPTO_CIPHER_WAPI_GCM4) + return; + + if (vdev->vdev_mlme.vdev_opmode == QDF_SAP_MODE || + vdev->vdev_mlme.vdev_opmode == QDF_P2P_GO_MODE) { + qdf_mem_copy(¶ms->rx_iv, &tx_iv, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(params->tx_iv, &rx_iv, + WLAN_CRYPTO_WAPI_IV_SIZE); + } else { + qdf_mem_copy(params->rx_iv, &rx_iv, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(params->tx_iv, &tx_iv, + WLAN_CRYPTO_WAPI_IV_SIZE); + } + + params->key_txmic_len = WLAN_CRYPTO_MIC_LEN; + params->key_rxmic_len = WLAN_CRYPTO_MIC_LEN; +} +#else +static void wlan_crypto_set_wapi_key(struct wlan_objmgr_vdev *vdev, + bool pairwise, + enum wlan_crypto_cipher_type cipher_type, + struct set_key_params *params) +{ + static const unsigned char tx_iv[16] = {0x36, 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x36, 0x5c, 0x36, 0x5c, 0x36, + 0x5c}; + + static const unsigned char rx_iv[16] = {0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x36, 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, 0x5c, + 0x37}; + + if (cipher_type != WLAN_CRYPTO_CIPHER_WAPI_SMS4 && + cipher_type != WLAN_CRYPTO_CIPHER_WAPI_GCM4) + return; + + qdf_mem_copy(¶ms->rx_iv, &rx_iv, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(¶ms->tx_iv, &tx_iv, + WLAN_CRYPTO_WAPI_IV_SIZE); + + if (vdev->vdev_mlme.vdev_opmode == QDF_SAP_MODE) { + if (pairwise) + params->tx_iv[0] = 0x37; + + params->rx_iv[WLAN_CRYPTO_WAPI_IV_SIZE - 1] = 0x36; + } else { + if (!pairwise) + params->rx_iv[WLAN_CRYPTO_WAPI_IV_SIZE - 1] = 0x36; + } + + params->key_txmic_len = WLAN_CRYPTO_MIC_LEN; + params->key_rxmic_len = WLAN_CRYPTO_MIC_LEN; +} +#endif /* FEATURE_WAPI_BIG_ENDIAN */ +#else +static inline void wlan_crypto_set_wapi_key(struct wlan_objmgr_vdev *vdev, + bool pairwise, + enum wlan_crypto_cipher_type cipher, + struct set_key_params *params) +{ +} +#endif /* FEATURE_WLAN_WAPI */ + +#ifdef BIG_ENDIAN_HOST +static void wlan_crypto_endianness_conversion(uint8_t *dest, uint8_t *src, + uint32_t keylen) +{ + int8_t i; + + for (i = 0; i < roundup(keylen, sizeof(uint32_t)) / 4; i++) { + *dest = le32_to_cpu(*src); + dest++; + src++; + } +} +#else +static void wlan_crypto_endianness_conversion(uint8_t *dest, uint8_t *src, + uint32_t keylen) +{ + qdf_mem_copy(dest, src, keylen); +} +#endif + +QDF_STATUS target_if_crypto_set_key(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type) +{ + struct set_key_params params = {0}; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + enum cdp_sec_type sec_type = cdp_sec_type_none; + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + uint32_t pn[4] = {0, 0, 0, 0}; + bool peer_exist = false; + uint8_t def_tx_idx; + wmi_unified_t pdev_wmi_handle; + bool pairwise; + QDF_STATUS status; + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + target_if_err("Invalid PDEV"); + return QDF_STATUS_E_FAILURE; + } + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + target_if_err("Invalid PSOC"); + return QDF_STATUS_E_FAILURE; + } + soc = wlan_psoc_get_dp_handle(psoc); + if (!soc) { + target_if_err("Invalid DP Handle"); + return QDF_STATUS_E_FAILURE; + } + params.vdev_id = wlan_vdev_get_id(vdev); + params.key_idx = req->keyix; + qdf_mem_copy(params.peer_mac, req->macaddr, QDF_MAC_ADDR_SIZE); + pdev_wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!pdev_wmi_handle) { + target_if_err("Invalid PDEV WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + params.key_flags = req->flags; + if (key_type != WLAN_CRYPTO_KEY_TYPE_UNICAST) { + pairwise = false; + params.key_flags |= GROUP_USAGE; + + } else { + pairwise = true; + params.key_flags |= PAIRWISE_USAGE; + } + qdf_mem_copy(¶ms.key_rsc_ctr, + &req->keyrsc[0], sizeof(uint64_t)); + + peer_exist = cdp_find_peer_exist(soc, pdev->pdev_objmgr.wlan_pdev_id, + req->macaddr); + target_if_debug("key_type %d, mac: %02x:%02x:%02x:%02x:%02x:%02x", + key_type, req->macaddr[0], req->macaddr[1], + req->macaddr[2], req->macaddr[3], req->macaddr[4], + req->macaddr[5]); + + if ((key_type == WLAN_CRYPTO_KEY_TYPE_UNICAST) && !peer_exist) { + target_if_err("Invalid peer"); + return QDF_STATUS_E_FAILURE; + } + + params.key_cipher = wlan_crypto_cipher_to_wmi_cipher(req->cipher_type); + sec_type = wlan_crypto_cipher_to_cdp_sec_type(req->cipher_type); + wlan_crypto_set_wapi_key(vdev, pairwise, req->cipher_type, ¶ms); + + switch (req->cipher_type) { + case WLAN_CRYPTO_CIPHER_WEP: + case WLAN_CRYPTO_CIPHER_WEP_40: + case WLAN_CRYPTO_CIPHER_WEP_104: + def_tx_idx = wlan_crypto_get_default_key_idx(vdev, false); + if (pairwise && params.key_idx == def_tx_idx) + params.key_flags |= TX_USAGE; + else if ((vdev->vdev_mlme.vdev_opmode == QDF_SAP_MODE) && + (params.key_idx == def_tx_idx)) + params.key_flags |= TX_USAGE; + break; + case WLAN_CRYPTO_CIPHER_TKIP: + params.key_txmic_len = WLAN_CRYPTO_MIC_LEN; + params.key_rxmic_len = WLAN_CRYPTO_MIC_LEN; + break; + default: + break; + } + + wlan_crypto_endianness_conversion(¶ms.key_data[0], + &req->keyval[0], + req->keylen); + params.key_len = req->keylen; + + /* Set PN check & security type in data path */ + qdf_mem_copy(&pn[0], ¶ms.key_rsc_ctr, sizeof(pn)); + cdp_set_pn_check(soc, vdev->vdev_objmgr.vdev_id, req->macaddr, + sec_type, pn); + + cdp_set_key_sec_type(soc, vdev->vdev_objmgr.vdev_id, req->macaddr, + sec_type, pairwise); + + cdp_set_key(soc, vdev->vdev_objmgr.vdev_id, req->macaddr, pairwise, + (uint32_t *)(req->keyval + WLAN_CRYPTO_IV_SIZE + + WLAN_CRYPTO_MIC_LEN)); + + target_if_debug("vdev_id:%d, key: idx:%d,len:%d", params.vdev_id, + params.key_idx, params.key_len); + target_if_debug("peer mac "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(params.peer_mac)); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_DEBUG, + ¶ms.key_rsc_ctr, sizeof(uint64_t)); + status = wmi_unified_setup_install_key_cmd(pdev_wmi_handle, ¶ms); + + /* Zero-out local key variables */ + qdf_mem_zero(¶ms, sizeof(struct set_key_params)); + + return status; +} + +/** + * target_if_crypto_install_key_comp_evt_handler() - install key complete + * handler + * @handle: wma handle + * @event: event data + * @len: data length + * + * This event is sent by fw once WPA/WPA2 keys are installed in fw. + * + * Return: 0 for success or error code + */ +static int +target_if_crypto_install_key_comp_evt_handler(void *handle, uint8_t *event, + uint32_t len) +{ + struct wlan_crypto_comp_priv *priv_obj; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct wmi_install_key_comp_event params; + QDF_STATUS status; + wmi_unified_t wmi_handle; + struct crypto_add_key_result result; + + if (!event || !handle) { + target_if_err("invalid param"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("invalid wmi handle"); + return -EINVAL; + } + + status = wmi_extract_install_key_comp_event(wmi_handle, event, + len, ¶ms); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("received invalid buf from target"); + return -EINVAL; + } + + target_if_debug("vdev %d mac " QDF_MAC_ADDR_FMT " ix %x flags %x status %d", + params.vdev_id, + QDF_MAC_ADDR_REF(params.peer_macaddr), + params.key_ix, params.key_flags, params.status); + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, params.vdev_id, + WLAN_CRYPTO_ID); + if (!vdev) { + target_if_err("vdev %d is null", params.vdev_id); + return -EINVAL; + } + + priv_obj = wlan_get_vdev_crypto_obj(vdev); + if (!priv_obj) { + target_if_err("priv_obj is null"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CRYPTO_ID); + return -EINVAL; + } + + result.vdev_id = params.vdev_id; + result.key_ix = params.key_ix; + result.key_flags = params.key_flags; + result.status = params.status; + qdf_mem_copy(result.peer_macaddr, params.peer_macaddr, + QDF_MAC_ADDR_SIZE); + + if (priv_obj->add_key_cb) + priv_obj->add_key_cb(priv_obj->add_key_ctx, &result); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_CRYPTO_ID); + + return 0; +} + +static QDF_STATUS +target_if_crypto_register_events(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + target_if_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_vdev_install_key_complete_event_id, + target_if_crypto_install_key_comp_evt_handler, + WMI_RX_WORK_CTX); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("register_event_handler failed: err %d", status); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_crypto_deregister_events(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + target_if_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_vdev_install_key_complete_event_id); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_crypto_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_crypto_tx_ops *crypto; + + if (!tx_ops) { + target_if_err("txops NULL"); + return QDF_STATUS_E_FAILURE; + } + crypto = &tx_ops->crypto_tx_ops; + + crypto->set_key = target_if_crypto_set_key; + crypto->register_events = target_if_crypto_register_events; + crypto->deregister_events = target_if_crypto_deregister_events; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs.h b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs.h new file mode 100644 index 0000000000000000000000000000000000000000..c903cd0baa86decc8523f9b125d8b19b55fdde4c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs.h + * This file contains dfs target interface + */ + +/** + * target_if_register_dfs_tx_ops() - register dfs tx ops + * @dfs_tx_ops: tx ops pointer + * + * Register dfs tx ops + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_register_dfs_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_dfs_get_rx_ops() - Get dfs_rx_ops + * @psoc: psoc handle. + * + * Return: dfs_rx_ops. + */ +static inline struct wlan_lmac_if_dfs_rx_ops * +target_if_dfs_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.dfs_rx_ops; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_full_offload.h b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_full_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..513b8413d9873c696d3a1a9fc244beb20daab493 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_full_offload.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs_full_offload.h + * This file contains dfs target interface for full-offload. + */ + +#ifndef _TARGET_IF_DFS_FULL_OFFLOAD_H_ +#define _TARGET_IF_DFS_FULL_OFFLOAD_H_ + +/** + * target_if_dfs_reg_offload_events() - registers dfs events for full offload. + * @psoc: Pointer to psoc object. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) +QDF_STATUS target_if_dfs_reg_offload_events(struct wlan_objmgr_psoc *psoc); +#else +static QDF_STATUS +target_if_dfs_reg_offload_events(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_process_bang_radar_cmd() - fill unit test args and send bangradar + * command to firmware. + * @pdev: Pointer to DFS pdev object. + * @dfs_unit_test: Pointer to dfs_unit_test structure. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) +QDF_STATUS target_process_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); +#else +static QDF_STATUS target_process_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(QCA_SUPPORT_AGILE_DFS) +/** + * target_send_ocac_abort_cmd() - Send off channel CAC abort to target for + * to cancel current offchannel CAC + * @pdev: Pointer to DFS pdev object. + * + * Return: QDF_STATUS + */ +QDF_STATUS target_send_ocac_abort_cmd(struct wlan_objmgr_pdev *pdev); +/** + * target_send_agile_ch_cfg_cmd() - Send agile channel parameters to target for + * off channel precac. + * @pdev: Pointer to DFS pdev object. + * @adfs_param: Agile-DFS CAC parameters. + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_send_agile_ch_cfg_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_agile_cac_params *adfs_param); +#else +static inline QDF_STATUS +target_send_ocac_abort_cmd(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_send_agile_ch_cfg_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_agile_cac_params *adfs_param) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +/** + * target_send_usenol_pdev_param - send usenol pdev param to FW. + * @pdev: Pointer to pdev object. + * @usenol: Value of user configured usenol. + * + * Return: QDF_STATUS + */ +QDF_STATUS target_send_usenol_pdev_param(struct wlan_objmgr_pdev *pdev, + bool usenol); + +/** + * target_send_subchan_marking_pdev_param - Send subchannel marking + * pdev param to FW. + * @pdev: Pointer to pdev object. + * @subchanmark: Value of user configured subchannel_marking. + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_send_subchan_marking_pdev_param(struct wlan_objmgr_pdev *pdev, + bool subchanmark); + +#else +static inline QDF_STATUS +target_send_usenol_pdev_param(struct wlan_objmgr_pdev *pdev, + bool usenol) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_send_subchan_marking_pdev_param(struct wlan_objmgr_pdev *pdev, + bool subchanmark) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _TARGET_IF_DFS_FULL_OFFLOAD_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_partial_offload.h b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_partial_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..f81cce5c0a363f59e53b05674cfc1f62db4bfb07 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_partial_offload.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs_partial_offload.h + * This file contains dfs target interface for partial offload. + */ + +#ifndef _TARGET_IF_DFS_PARTIAL_OFFLOAD_H_ +#define _TARGET_IF_DFS_PARTIAL_OFFLOAD_H_ + +/** + * target_if_dfs_reg_phyerr_events() - register phyerror events. + * @psoc: Pointer to psoc object. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +QDF_STATUS target_if_dfs_reg_phyerr_events(struct wlan_objmgr_psoc *psoc); +#else +static QDF_STATUS +target_if_dfs_reg_phyerr_events(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_dfs_get_caps() - get dfs caps. + * @pdev: Pointer to DFS pdev object. + * @dfs_caps: Pointer to dfs_caps structure. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +QDF_STATUS target_if_dfs_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps); +#else +static inline QDF_STATUS target_if_dfs_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_dfs_status_check_event_handler() - Host dfs confirmation event + * handler. + * @scn: Handle to HIF context + * @data: radar event buffer + * @datalen: radar event buffer length + * + * Return: 0 on success + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +int target_if_dfs_status_check_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen); +#else +static inline +int target_if_dfs_status_check_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + return 0; +} +#endif + +/** + * target_if_dfs_send_avg_params_to_fw() - Send average parameters to FW. + * @pdev: pdev pointer + * @params: Pointer to dfs_radar_found_params structure. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS target_if_dfs_send_avg_params_to_fw( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params); +#else +static inline +QDF_STATUS target_if_dfs_send_avg_params_to_fw( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _TARGET_IF_DFS_PARTIAL_OFFLOAD_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs.c b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e040aea2bcecf8020ab724f3e0e2d1baa26091c0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs.c @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs.c + * This file contains dfs target interface + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_dfs_tgt_api.h" +#include "target_type.h" +#include +#include +#include +#include + +/** + * target_if_dfs_register_host_status_check_event() - Register host dfs + * confirmation event. + * @psoc: pointer to psoc. + * + * Return: QDF_STATUS. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +static QDF_STATUS target_if_dfs_register_host_status_check_event( + struct wlan_objmgr_psoc *psoc) + +{ + wmi_unified_t wmi_handle; + QDF_STATUS retval; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + retval = wmi_unified_register_event(wmi_handle, + wmi_host_dfs_status_check_event_id, + target_if_dfs_status_check_event_handler); + if (QDF_IS_STATUS_ERROR(retval)) + target_if_err("wmi_dfs_radar_detection_event_id ret=%d", + retval); + + return retval; +} +#else +static QDF_STATUS target_if_dfs_register_host_status_check_event( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_is_dfs_3() - Is dfs3 support or not + * @target_type: target type being used. + * + * Return: true if dfs3 is supported, false otherwise. + */ +static bool target_if_is_dfs_3(uint32_t target_type) +{ + bool is_dfs_3; + + switch (target_type) { + case TARGET_TYPE_AR6320: + is_dfs_3 = false; + break; + case TARGET_TYPE_ADRASTEA: + is_dfs_3 = true; + break; + default: + is_dfs_3 = true; + } + + return is_dfs_3; +} + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * target_if_radar_event_handler() - handle radar event when + * phyerr filter offload is enabled. + * @scn: Handle to HIF context + * @data: radar event buffer + * @datalen: radar event buffer length + * + * Return: 0 on success; error code otherwise + */ +static int target_if_radar_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct radar_event_info wlan_radar_event; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + + if (!dfs_rx_ops || !dfs_rx_ops->dfs_process_phyerr_filter_offload) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI context"); + return -EINVAL; + } + + if (QDF_IS_STATUS_ERROR(wmi_extract_wlan_radar_event_info( + wmi_handle, data, + &wlan_radar_event, datalen))) { + target_if_err("failed to extract wlan radar event"); + return -EFAULT; + } + pdev = wlan_objmgr_get_pdev_by_id(psoc, wlan_radar_event.pdev_id, + WLAN_DFS_ID); + if (!pdev) { + target_if_err("null pdev"); + return -EINVAL; + } + dfs_rx_ops->dfs_process_phyerr_filter_offload(pdev, + &wlan_radar_event); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + + return 0; +} + +/** + * target_if_reg_phyerr_events() - register dfs phyerr radar event. + * @psoc: pointer to psoc. + * @pdev: pointer to pdev. + * + * Return: QDF_STATUS. + */ +static QDF_STATUS target_if_reg_phyerr_events_dfs2( + struct wlan_objmgr_psoc *psoc) +{ + int ret; + wmi_unified_t wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event(wmi_handle, + wmi_dfs_radar_event_id, + target_if_radar_event_handler); + if (ret) { + target_if_err("failed to register wmi_dfs_radar_event_id"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS target_if_reg_phyerr_events_dfs2( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static bool target_if_dfs_offload(struct wlan_objmgr_psoc *psoc) +{ + wmi_unified_t wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return false; + } + + return wmi_service_enabled(wmi_handle, + wmi_service_dfs_phyerr_offload); +} + +static QDF_STATUS target_if_dfs_get_target_type(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type) +{ + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_psoc_info; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + target_if_err("null tgt_psoc_info"); + return QDF_STATUS_E_FAILURE; + } + *target_type = target_psoc_get_target_type(tgt_psoc_info); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dfs_register_event_handler( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_psoc_info; + + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + if (!target_if_dfs_offload(psoc)) { + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + target_if_err("null tgt_psoc_info"); + return QDF_STATUS_E_FAILURE; + } + + target_if_dfs_register_host_status_check_event(psoc); + + if (target_if_is_dfs_3( + target_psoc_get_target_type(tgt_psoc_info))) + return target_if_dfs_reg_phyerr_events(psoc); + else + return target_if_reg_phyerr_events_dfs2(psoc); + } else { + return target_if_dfs_reg_offload_events(psoc); + } +} + +static QDF_STATUS target_if_dfs_is_pdev_5ghz(struct wlan_objmgr_pdev *pdev, + bool *is_5ghz) +{ + struct wlan_objmgr_psoc *psoc; + uint8_t pdev_id; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap_ptr; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("dfs: null psoc"); + return QDF_STATUS_E_FAILURE; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + reg_cap_ptr = ucfg_reg_get_hal_reg_cap(psoc); + if (!reg_cap_ptr) { + target_if_err("dfs: reg cap null"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_cap_ptr[pdev_id].wireless_modes & + WMI_HOST_REGDMN_MODE_11A) + *is_5ghz = true; + else + *is_5ghz = false; + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * target_if_dfs_set_phyerr_filter_offload() - config phyerr filter offload. + * @pdev: Pointer to DFS pdev object. + * @dfs_phyerr_filter_offload: Phyerr filter offload value. + * + * Return: QDF_STATUS + */ +static QDF_STATUS target_if_dfs_set_phyerr_filter_offload( + struct wlan_objmgr_pdev *pdev, + bool dfs_phyerr_filter_offload) +{ + QDF_STATUS status; + wmi_unified_t wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_dfs_phyerr_filter_offload_en_cmd(wmi_handle, + dfs_phyerr_filter_offload); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("phyerr filter offload %d set fail: %d", + dfs_phyerr_filter_offload, status); + + return status; +} +#else +static QDF_STATUS target_if_dfs_set_phyerr_filter_offload( + struct wlan_objmgr_pdev *pdev, + bool dfs_phyerr_filter_offload) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS target_send_dfs_offload_enable_cmd( + struct wlan_objmgr_pdev *pdev, bool enable) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t pdev_id; + void *wmi_hdl; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + target_if_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (enable) + status = wmi_unified_dfs_phyerr_offload_en_cmd(wmi_hdl, + pdev_id); + else + status = wmi_unified_dfs_phyerr_offload_dis_cmd(wmi_hdl, + pdev_id); + + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: dfs offload cmd failed, enable:%d, pdev:%d", + enable, pdev_id); + else + target_if_debug("dfs: sent dfs offload cmd, enable:%d, pdev:%d", + enable, pdev_id); + + return status; +} + +QDF_STATUS target_if_register_dfs_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + if (!tx_ops) { + target_if_err("invalid tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = &tx_ops->dfs_tx_ops; + dfs_tx_ops->dfs_reg_ev_handler = &target_if_dfs_register_event_handler; + + dfs_tx_ops->dfs_process_emulate_bang_radar_cmd = + &target_process_bang_radar_cmd; + dfs_tx_ops->dfs_agile_ch_cfg_cmd = + &target_send_agile_ch_cfg_cmd; + dfs_tx_ops->dfs_ocac_abort_cmd = + &target_send_ocac_abort_cmd; + dfs_tx_ops->dfs_is_pdev_5ghz = &target_if_dfs_is_pdev_5ghz; + dfs_tx_ops->dfs_send_offload_enable_cmd = + &target_send_dfs_offload_enable_cmd; + + dfs_tx_ops->dfs_set_phyerr_filter_offload = + &target_if_dfs_set_phyerr_filter_offload; + + dfs_tx_ops->dfs_get_caps = &target_if_dfs_get_caps; + dfs_tx_ops->dfs_send_avg_radar_params_to_fw = + &target_if_dfs_send_avg_params_to_fw; + dfs_tx_ops->dfs_is_tgt_offload = &target_if_dfs_offload; + + dfs_tx_ops->dfs_send_usenol_pdev_param = + &target_send_usenol_pdev_param; + dfs_tx_ops->dfs_send_subchan_marking_pdev_param = + &target_send_subchan_marking_pdev_param; + dfs_tx_ops->dfs_get_target_type = &target_if_dfs_get_target_type; + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_full_offload.c b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_full_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..9c29ce31cb02cd6f71deeb9a30e4d8ac5107d6ac --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_full_offload.c @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs_full_offload.c + * This file contains dfs target interface for full offload + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(QCA_SUPPORT_AGILE_DFS) +#include +#define QUICK_OCAC_MODE 0 +#endif +/** + * target_if_dfs_cac_complete_event_handler() - CAC complete indication. + * @scn: scn handle. + * @data: Pointer to data buffer. + * @datalen: data length. + * + * Return: 0 on successful indication. + */ +static int target_if_dfs_cac_complete_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + int ret = 0; + uint32_t vdev_id = 0; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops || !dfs_rx_ops->dfs_dfs_cac_complete_ind) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + + if (wmi_extract_dfs_cac_complete_event(wmi_handle, data, &vdev_id, + datalen) != QDF_STATUS_SUCCESS) { + target_if_err("failed to extract cac complete event"); + return -EFAULT; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, WLAN_DFS_ID); + if (!vdev) { + target_if_err("null vdev"); + return -EINVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + target_if_err("null pdev"); + ret = -EINVAL; + } + + if (!ret && (QDF_STATUS_SUCCESS != + dfs_rx_ops->dfs_dfs_cac_complete_ind(pdev, vdev_id))) { + target_if_err("dfs_dfs_cac_complete_ind failed"); + ret = -EINVAL; + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + + return ret; +} + +#if defined(QCA_SUPPORT_AGILE_DFS) +/** + * target_if_dfs_ocac_complete_event_handler() - Off Channel CAC complete + * indication. + * @scn: scn handle. + * @data: Pointer to data buffer. + * @datalen: data length. + * + * Return: 0 on successful indication. + */ +static int target_if_dfs_ocac_complete_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + struct vdev_adfs_complete_status ocac_status; + int ret = 0; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops || !dfs_rx_ops->dfs_dfs_ocac_complete_ind) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + + if (wmi_extract_dfs_ocac_complete_event(wmi_handle, + data, + &ocac_status) + != QDF_STATUS_SUCCESS) { + target_if_err("failed to extract off channel cac complete event"); + return -EFAULT; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + ocac_status.vdev_id, + WLAN_DFS_ID); + if (!vdev) { + target_if_err("null vdev"); + return -EINVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + target_if_err("null pdev"); + ret = -EINVAL; + goto free_vdevref; + } + + if (!ret && (QDF_STATUS_SUCCESS != + dfs_rx_ops->dfs_dfs_ocac_complete_ind(pdev, &ocac_status))) { + target_if_err("dfs_dfs_ocac_complete_ind failed"); + ret = -EINVAL; + } + +free_vdevref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + + return ret; +} +#endif + +/** + * target_if_dfs_radar_detection_event_handler() - Indicate RADAR detection and + * process RADAR detection. + * @scn: scn handle. + * @data: pointer to data buffer. + * @datalen: data length. + * + * Return: 0 on successful indication. + */ +static int target_if_dfs_radar_detection_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct radar_found_info radar; + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + int ret = 0; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops || !dfs_rx_ops->dfs_process_radar_ind) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + + if (wmi_extract_dfs_radar_detection_event(wmi_handle, data, &radar, + datalen) + != QDF_STATUS_SUCCESS) { + target_if_err("failed to extract cac complete event"); + return -EFAULT; + } + + pdev = wlan_objmgr_get_pdev_by_id(psoc, radar.pdev_id, WLAN_DFS_ID); + if (!pdev) { + target_if_err("null pdev"); + return -EINVAL; + } + + if (dfs_rx_ops->dfs_process_radar_ind(pdev, + &radar) != QDF_STATUS_SUCCESS) { + target_if_err("dfs_process_radar_ind failed pdev_id=%d", + radar.pdev_id); + ret = -EINVAL; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + + return ret; +} + +/** + * target_if_dfs_reg_ocac_event() - registers dfs off channel event + * for full offload. + * @psoc: Pointer to psoc object. + * + * Return: 0 on successful registration. + */ +#if defined(QCA_SUPPORT_AGILE_DFS) +static int target_if_dfs_reg_ocac_event(struct wlan_objmgr_psoc *psoc) +{ + return wmi_unified_register_event( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_vdev_ocac_complete_event_id, + target_if_dfs_ocac_complete_event_handler); +} +#else +static int target_if_dfs_reg_ocac_event(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) +QDF_STATUS target_if_dfs_reg_offload_events( + struct wlan_objmgr_psoc *psoc) +{ + int ret1, ret2, ret3; + + ret1 = wmi_unified_register_event( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dfs_radar_detection_event_id, + target_if_dfs_radar_detection_event_handler); + target_if_debug("wmi_dfs_radar_detection_event_id ret=%d", ret1); + + ret2 = wmi_unified_register_event( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dfs_cac_complete_id, + target_if_dfs_cac_complete_event_handler); + target_if_debug("wmi_dfs_cac_complete_id ret=%d", ret2); + + ret3 = target_if_dfs_reg_ocac_event(psoc); + target_if_debug("wmi_vdev_ocac_complete_event_id ret=%d", ret3); + + if (ret1 || ret2 || ret3) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(QCA_SUPPORT_AGILE_DFS) +QDF_STATUS target_send_ocac_abort_cmd(struct wlan_objmgr_pdev *pdev) +{ + wmi_unified_t wmi_handle; + struct vdev_adfs_abort_params param; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_DFS_ID); + + if (!vdev) { + target_if_err("null vdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + status = QDF_STATUS_E_FAILURE; + goto free_vdevref; + } + + qdf_mem_set(¶m, sizeof(param), 0); + param.vdev_id = wlan_vdev_get_id(vdev); + utils_dfs_cancel_precac_timer(pdev); + + status = wmi_unified_send_vdev_adfs_ocac_abort_cmd(wmi_handle, ¶m); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: unit_test_cmd send failed %d", status); + +free_vdevref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + + return status; +} + +QDF_STATUS target_send_agile_ch_cfg_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_agile_cac_params *adfs_param) +{ + wmi_unified_t wmi_handle; + struct vdev_adfs_ch_cfg_params param; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_DFS_ID); + + if (!vdev) { + target_if_err("null vdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + status = QDF_STATUS_E_FAILURE; + goto free_vdevref; + } + + qdf_mem_set(¶m, sizeof(param), 0); + param.vdev_id = wlan_vdev_get_id(vdev); + param.ocac_mode = QUICK_OCAC_MODE; + param.min_duration_ms = adfs_param->min_precac_timeout; + param.max_duration_ms = adfs_param->max_precac_timeout; + param.chan_freq = adfs_param->precac_chan; + param.chan_width = adfs_param->precac_chwidth; + param.center_freq = adfs_param->precac_chan; + + status = wmi_unified_send_vdev_adfs_ch_cfg_cmd(wmi_handle, ¶m); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: unit_test_cmd send failed %d", status); + +free_vdevref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + + return status; +} +#endif + +#if (defined(WLAN_DFS_FULL_OFFLOAD) || defined(QCA_WIFI_QCA8074) || \ + defined(QCA_WIFI_QCA6018)) +QDF_STATUS target_process_bang_radar_cmd( + struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + QDF_STATUS status; + struct wmi_unit_test_cmd wmi_utest; + int i; + wmi_unified_t wmi_handle; + uint32_t target_pdev_id = 0; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + wmi_utest.vdev_id = dfs_unit_test->vdev_id; + wmi_utest.module_id = WLAN_MODULE_PHYERR_DFS; + wmi_utest.num_args = dfs_unit_test->num_args; + + for (i = 0; i < dfs_unit_test->num_args; i++) + wmi_utest.args[i] = dfs_unit_test->args[i]; + /* + * Host to Target conversion for pdev id required + * before we send a wmi unit test command + */ + if (wmi_convert_pdev_id_host_to_target( + wmi_handle, pdev->pdev_objmgr.wlan_pdev_id, + &target_pdev_id) != QDF_STATUS_SUCCESS) { + target_if_err("failed to convert host pdev id to target"); + return QDF_STATUS_E_FAILURE; + } + + wmi_utest.args[IDX_PDEV_ID] = target_pdev_id; + + status = wmi_unified_unit_test_cmd(wmi_handle, &wmi_utest); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: unit_test_cmd send failed %d", status); + return status; +} +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +QDF_STATUS target_send_usenol_pdev_param(struct wlan_objmgr_pdev *pdev, + bool usenol) +{ + QDF_STATUS status; + wmi_unified_t wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + status = wmi_send_usenol_pdev_param(wmi_handle, usenol, pdev); + + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: usenol_pdev_param send failed %d", status); + return status; +} + +QDF_STATUS +target_send_subchan_marking_pdev_param(struct wlan_objmgr_pdev *pdev, + bool subchanmark) +{ + QDF_STATUS status; + wmi_unified_t wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + status = wmi_send_subchan_marking_pdev_param(wmi_handle, + subchanmark, pdev); + + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: subchan_marking_pdev_param send failed %d", + status); + + return status; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_partial_offload.c b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_partial_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..af09b710a0219c7cbd197c4c0d28bd8d3b4c2079 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_partial_offload.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs_partial_offload.c + * This file contains dfs target interface for partial offload + */ + +#include +#include "target_type.h" +#include "target_if_dfs_partial_offload.h" +#include "target_if_dfs.h" + +QDF_STATUS target_if_dfs_reg_phyerr_events(struct wlan_objmgr_psoc *psoc) +{ + /* TODO: dfs non-offload case */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dfs_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct target_psoc_info *tgt_psoc_info; + + if (!dfs_caps) { + target_if_err("null dfs_caps"); + return QDF_STATUS_E_FAILURE; + } + + dfs_caps->wlan_dfs_combined_rssi_ok = 0; + dfs_caps->wlan_dfs_ext_chan_ok = 0; + dfs_caps->wlan_dfs_use_enhancement = 0; + dfs_caps->wlan_strong_signal_diversiry = 0; + dfs_caps->wlan_fastdiv_val = 0; + dfs_caps->wlan_chip_is_bb_tlv = 1; + dfs_caps->wlan_chip_is_over_sampled = 0; + dfs_caps->wlan_chip_is_ht160 = 0; + dfs_caps->wlan_chip_is_false_detect = 0; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + target_if_err("null tgt_psoc_info"); + return QDF_STATUS_E_FAILURE; + } + + switch (target_psoc_get_target_type(tgt_psoc_info)) { + case TARGET_TYPE_AR900B: + break; + + case TARGET_TYPE_IPQ4019: + dfs_caps->wlan_chip_is_false_detect = 0; + break; + + case TARGET_TYPE_AR9888: + dfs_caps->wlan_chip_is_over_sampled = 1; + break; + + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_QCA9888: + dfs_caps->wlan_chip_is_ht160 = 1; + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +#if defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS target_if_dfs_send_avg_params_to_fw( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params) +{ + QDF_STATUS status; + wmi_unified_t wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_dfs_send_avg_params_cmd(wmi_handle, + params); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs radar found average parameters send failed: %d", + status); + + return status; +} + +int target_if_dfs_status_check_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + u_int32_t dfs_status_check; + wmi_unified_t wmi_hdl; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + /* Since Partial Offload chipsets have only one pdev per psoc, the first + * pdev from the pdev list is used. + */ + pdev = wlan_objmgr_get_pdev_by_id(psoc, 0, WLAN_DFS_ID); + if (!pdev) { + target_if_err("null pdev"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops) { + target_if_err("null dfs_rx_ops"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + if (!dfs_rx_ops->dfs_action_on_status) { + target_if_err("dfs_rx_ops->dfs_action_on_status is NULL"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + wmi_hdl = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_hdl) { + target_if_err("wmi_hdl is NULL"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + if (wmi_extract_dfs_status_from_fw(wmi_hdl, data, &dfs_status_check) != + QDF_STATUS_SUCCESS) { + target_if_err("failed to extract status response from FW"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + if (dfs_rx_ops->dfs_action_on_status(pdev, &dfs_status_check) != + QDF_STATUS_SUCCESS) { + target_if_err("dfs action on host dfs status from FW failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/inc/target_if_direct_buf_rx_api.h b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/inc/target_if_direct_buf_rx_api.h new file mode 100644 index 0000000000000000000000000000000000000000..10882f2f2c2c0e99384ccb923d9631a3858e07b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/inc/target_if_direct_buf_rx_api.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_DIRECT_BUF_RX_API_H_ +#define _TARGET_IF_DIRECT_BUF_RX_API_H_ + +#include "qdf_nbuf.h" +#include "qdf_atomic.h" +#include "wmi_unified_api.h" + +#ifdef WLAN_DEBUGFS +#ifdef DIRECT_BUF_RX_DEBUG +/* Base debugfs entry for DBR module */ +extern qdf_dentry_t dbr_debugfs_entry; +#endif /* DIRECT_BUF_RX_DEBUG */ +#endif /* WLAN_DEBUGFS */ + +#define direct_buf_rx_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define direct_buf_rx_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define direct_buf_rx_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define direct_buf_rx_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define direct_buf_rx_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define direct_buf_rx_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define direct_buf_rx_enter() \ + QDF_TRACE_ENTER(QDF_MODULE_ID_DIRECT_BUF_RX, "enter") +#define direct_buf_rx_exit() \ + QDF_TRACE_EXIT(QDF_MODULE_ID_DIRECT_BUF_RX, "exit") + +#define directbuf_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define directbuf_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define directbuf_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define directbuf_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_DIRECT_BUF_RX, params) +#define directbuf_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_DIRECT_BUF_RX, params) + +#define DBR_MAX_CHAINS (8) + +struct wlan_objmgr_psoc; +struct wlan_lmac_if_tx_ops; + +/** + * enum DBR_MODULE - Enum containing the modules supporting direct buf rx + * @DBR_MODULE_SPECTRAL: Module ID for Spectral + * @DBR_MODULE_CFR: Module ID for CFR + * @DBR_MODULE_MAX: Max module ID + */ +enum DBR_MODULE { + DBR_MODULE_SPECTRAL = 0, + DBR_MODULE_CFR = 1, + DBR_MODULE_MAX, +}; + +/** + * struct direct_buf_rx_data - direct buffer rx data + * @dbr_len: Length of the buffer DMAed + * @vaddr: Virtual address of the buffer that has DMAed data + * @cookie: Cookie for the buffer rxed from target + * @paddr: physical address of buffer corresponding to vaddr + * @meta_data_valid: Indicates that metadata is valid + * @meta_data: Meta data + */ +struct direct_buf_rx_data { + size_t dbr_len; + void *vaddr; + uint32_t cookie; + qdf_dma_addr_t paddr; + bool meta_data_valid; + struct direct_buf_rx_metadata meta_data; +}; + +/** + * struct dbr_module_config - module configuration for dbr + * @num_resp_per_event: Number of events to be packed together + * @event_timeout_in_ms: Timeout until which multiple events can be packed + */ +struct dbr_module_config { + uint32_t num_resp_per_event; + uint32_t event_timeout_in_ms; +}; + +/** + * direct_buf_rx_init() - Function to initialize direct buf rx module + * + * Return: QDF status of operation + */ +QDF_STATUS direct_buf_rx_init(void); + +/** + * direct_buf_rx_deinit() - Function to deinitialize direct buf rx module + * + * Return: QDF status of operation + */ +QDF_STATUS direct_buf_rx_deinit(void); + +/** + * direct_buf_rx_target_attach() - Attach hal_soc,osdev in direct buf rx psoc obj + * @psoc: pointer to psoc object + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF os device handle + * + * Return: QDF status of operation + */ +QDF_STATUS direct_buf_rx_target_attach(struct wlan_objmgr_psoc *psoc, + void *hal_soc, qdf_device_t osdev); + +/** + * target_if_direct_buf_rx_register_tx_ops() - Register tx ops for direct buffer + * rx module + * @tx_ops: pointer to lmac interface tx ops + * + * Return: None + */ +void target_if_direct_buf_rx_register_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_dbr_cookie_lookup() - Function to retrieve cookie from + * buffer address(paddr) + * @pdev: pointer to pdev object + * @mod_id: module id indicating the module using direct buffer rx framework + * @paddr: Physical address of buffer for which cookie info is required + * @cookie: cookie will be returned in this param + * @srng_id: srng ID + * + * Return: QDF status of operation + */ +QDF_STATUS target_if_dbr_cookie_lookup(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, qdf_dma_addr_t paddr, + uint32_t *cookie, uint8_t srng_id); + +/** + * target_if_dbr_buf_release() - Notify direct buf that a previously provided + * buffer can be released. + * @pdev: pointer to pdev object + * @mod_id: module id indicating the module using direct buffer rx framework + * @paddr: Physical address of buffer for which cookie info is required + * @cookie: cookie value corresponding to the paddr + * @srng_id: srng ID + * + * Return: QDF status of operation + */ +QDF_STATUS target_if_dbr_buf_release(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, qdf_dma_addr_t paddr, + uint32_t cookie, uint8_t srng_id); +#endif /* _TARGET_IF_DIRECT_BUF_RX_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_api.c b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_api.c new file mode 100644 index 0000000000000000000000000000000000000000..cf9e7eabf233e643347292d11ad8cca6675aff51 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_api.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "target_if_direct_buf_rx_main.h" +#include + +#if defined(WLAN_DEBUGFS) && defined(DIRECT_BUF_RX_DEBUG) +/* Base debugfs entry for DBR module */ +qdf_dentry_t dbr_debugfs_entry; + +static inline void +target_if_direct_buf_rx_debugfs_init(void) +{ + dbr_debugfs_entry = qdf_debugfs_create_dir("dbr_ring_debug", NULL); + + if (!dbr_debugfs_entry) + direct_buf_rx_err("error while creating direct_buf rx debugfs dir"); +} + +static inline void +target_if_direct_buf_rx_debugfs_deinit(void) +{ + if (dbr_debugfs_entry) { + qdf_debugfs_remove_dir_recursive(dbr_debugfs_entry); + dbr_debugfs_entry = NULL; + } +} +#else +static inline void +target_if_direct_buf_rx_debugfs_init(void) +{ +} + +static inline void +target_if_direct_buf_rx_debugfs_deinit(void) +{ +} +#endif /* WLAN_DEBUGFS && DIRECT_BUF_RX_DEBUG */ + +QDF_STATUS direct_buf_rx_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register psoc create handler"); + return status; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register psoc destroy handler"); + goto dbr_unreg_psoc_create; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register pdev create handler"); + goto dbr_unreg_psoc_destroy; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register pdev destroy handler"); + goto dbr_unreg_pdev_create; + } + + target_if_direct_buf_rx_debugfs_init(); + + direct_buf_rx_info("Direct Buffer RX pdev,psoc create and destroy handlers registered"); + + return QDF_STATUS_SUCCESS; + +dbr_unreg_pdev_create: + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_create_handler, + NULL); + +dbr_unreg_psoc_destroy: + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_destroy_handler, + NULL); + +dbr_unreg_psoc_create: + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_create_handler, + NULL); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(direct_buf_rx_init); + +QDF_STATUS direct_buf_rx_deinit(void) +{ + QDF_STATUS status; + + target_if_direct_buf_rx_debugfs_deinit(); + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister pdev destroy handler"); + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister pdev create handler"); + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister psoc destroy handler"); + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister psoc create handler"); + + direct_buf_rx_debug("Direct Buffer RX pdev,psoc create and destroy handlers unregistered"); + + return status; +} +qdf_export_symbol(direct_buf_rx_deinit); + +QDF_STATUS direct_buf_rx_target_attach(struct wlan_objmgr_psoc *psoc, + void *hal_soc, qdf_device_t osdev) +{ + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + + if (!hal_soc || !osdev) { + direct_buf_rx_err("hal soc or osdev is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + direct_buf_rx_debug("Dbr psoc obj %pK", dbr_psoc_obj); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc obj is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj->hal_soc = hal_soc; + dbr_psoc_obj->osdev = osdev; + + return QDF_STATUS_SUCCESS; +} + +#ifdef DIRECT_BUF_RX_DEBUG +static inline void +target_if_direct_buf_rx_debug_register_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->dbr_tx_ops.direct_buf_rx_start_ring_debug = + target_if_dbr_start_ring_debug; + tx_ops->dbr_tx_ops.direct_buf_rx_stop_ring_debug = + target_if_dbr_stop_ring_debug; + tx_ops->dbr_tx_ops.direct_buf_rx_start_buffer_poisoning = + target_if_dbr_start_buffer_poisoning; + tx_ops->dbr_tx_ops.direct_buf_rx_stop_buffer_poisoning = + target_if_dbr_stop_buffer_poisoning; +} +#else +static inline void +target_if_direct_buf_rx_debug_register_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* DIRECT_BUF_RX_DEBUG */ + +void target_if_direct_buf_rx_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->dbr_tx_ops.direct_buf_rx_module_register = + target_if_direct_buf_rx_module_register; + tx_ops->dbr_tx_ops.direct_buf_rx_module_unregister = + target_if_direct_buf_rx_module_unregister; + tx_ops->dbr_tx_ops.direct_buf_rx_register_events = + target_if_direct_buf_rx_register_events; + tx_ops->dbr_tx_ops.direct_buf_rx_unregister_events = + target_if_direct_buf_rx_unregister_events; + tx_ops->dbr_tx_ops.direct_buf_rx_print_ring_stat = + target_if_direct_buf_rx_print_ring_stat; + tx_ops->dbr_tx_ops.direct_buf_rx_get_ring_params = + target_if_direct_buf_rx_get_ring_params; + target_if_direct_buf_rx_debug_register_tx_ops(tx_ops); +} +qdf_export_symbol(target_if_direct_buf_rx_register_tx_ops); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c new file mode 100644 index 0000000000000000000000000000000000000000..516f4b3002d2af1f5e961eb46d43d324538cf343 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c @@ -0,0 +1,2149 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "target_if.h" +#include "wlan_lmac_if_def.h" +#include "target_if_direct_buf_rx_main.h" +#include +#include "hal_api.h" +#include +#include + +/** + * struct module_name : Module name information structure + * @module_name_str : Module name subscribing to DBR + */ +struct module_name { + unsigned char module_name_str[QDF_MAX_NAME_SIZE]; +}; + +static const struct module_name g_dbr_module_name[DBR_MODULE_MAX] = { + [DBR_MODULE_SPECTRAL] = {"SPECTRAL"}, + [DBR_MODULE_CFR] = {"CFR"}, +}; + +static uint8_t get_num_dbr_modules_per_pdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + uint8_t num_dbr_ring_caps, cap_idx, pdev_id, num_modules; + struct target_psoc_info *tgt_psoc_info; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return 0; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + direct_buf_rx_err("target_psoc_info is null"); + return 0; + } + num_dbr_ring_caps = target_psoc_get_num_dbr_ring_caps(tgt_psoc_info); + dbr_ring_cap = target_psoc_get_dbr_ring_caps(tgt_psoc_info); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + num_modules = 0; + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + if (dbr_ring_cap[cap_idx].pdev_id == pdev_id) + num_modules++; + } + + return num_modules; +} + +static QDF_STATUS populate_dbr_cap_mod_param(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + uint8_t cap_idx; + bool cap_found = false; + enum DBR_MODULE mod_id = mod_param->mod_id; + uint32_t num_dbr_ring_caps, pdev_id; + struct target_psoc_info *tgt_psoc_info; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + direct_buf_rx_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + num_dbr_ring_caps = target_psoc_get_num_dbr_ring_caps(tgt_psoc_info); + dbr_ring_cap = target_psoc_get_dbr_ring_caps(tgt_psoc_info); + pdev_id = mod_param->pdev_id; + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + if (dbr_ring_cap[cap_idx].pdev_id == pdev_id) { + if (dbr_ring_cap[cap_idx].mod_id == mod_id) { + mod_param->dbr_ring_cap->ring_elems_min = + dbr_ring_cap[cap_idx].ring_elems_min; + mod_param->dbr_ring_cap->min_buf_size = + dbr_ring_cap[cap_idx].min_buf_size; + mod_param->dbr_ring_cap->min_buf_align = + dbr_ring_cap[cap_idx].min_buf_align; + cap_found = true; + } + } + } + + if (!cap_found) { + direct_buf_rx_err("No cap found for module %d in pdev %d", + mod_id, pdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#ifdef DIRECT_BUF_RX_DEBUG +static inline struct direct_buf_rx_module_debug * +target_if_get_dbr_mod_debug_from_dbr_pdev_obj( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + uint8_t mod_id) +{ + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return NULL; + } + + if (mod_id >= DBR_MODULE_MAX) { + direct_buf_rx_err("Invalid module id"); + return NULL; + } + + if (!dbr_pdev_obj->dbr_mod_debug) { + direct_buf_rx_err("dbr_pdev_obj->dbr_mod_debug is NULL"); + return NULL; + } + + if (mod_id >= dbr_pdev_obj->num_modules) { + direct_buf_rx_err("Module %d not supported in target", mod_id); + return NULL; + } + return &dbr_pdev_obj->dbr_mod_debug[mod_id]; +} + +static inline struct direct_buf_rx_module_debug * +target_if_get_dbr_mod_debug_from_pdev( + struct wlan_objmgr_pdev *pdev, + uint8_t mod_id) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + + if (!pdev) { + direct_buf_rx_err("pdev is null"); + return NULL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + return target_if_get_dbr_mod_debug_from_dbr_pdev_obj( + dbr_pdev_obj, mod_id); +} +#endif + +#ifdef DIRECT_BUF_RX_DEBUG +#define RING_DEBUG_EVENT_NAME_SIZE 12 +static const unsigned char +g_dbr_ring_debug_event[DBR_RING_DEBUG_EVENT_MAX][RING_DEBUG_EVENT_NAME_SIZE] = { + [DBR_RING_DEBUG_EVENT_RX] = "Rx", + [DBR_RING_DEBUG_EVENT_REPLENISH_RING] = "Replenish", +}; + +/** + * target_if_dbr_print_ring_debug_entries() - Print ring debug entries + * @print: The print adapter function + * @print_priv: The private data to be consumed by @print + * @dbr_pdev_obj: Pdev object of the DBR module + * @mod_id: Module ID + * + * Print ring debug entries of the ring identified by @dbr_pdev_obj and @mod_id + * using the given print adapter function + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS target_if_dbr_print_ring_debug_entries( + qdf_abstract_print print, void *print_priv, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + uint8_t mod_id, uint8_t srng_id) +{ + struct direct_buf_rx_module_debug *mod_debug; + struct direct_buf_rx_ring_debug *ring_debug; + int idx; + + mod_debug = target_if_get_dbr_mod_debug_from_dbr_pdev_obj(dbr_pdev_obj, + mod_id); + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + mod_debug = &dbr_pdev_obj->dbr_mod_debug[mod_id]; + ring_debug = &mod_debug->dbr_ring_debug[srng_id]; + + if (ring_debug->entries) { + print(print_priv, "Current debug entry is %d", + ring_debug->ring_debug_idx); + print(print_priv, "---------------------------------------------------------"); + print(print_priv, "| Number | Head Idx | Tail Idx | Timestamp | event |"); + print(print_priv, "---------------------------------------------------------"); + for (idx = 0; idx < ring_debug->num_ring_debug_entries; ++idx) { + print(print_priv, "|%8u|%10u|%10u|%11llu|%12s|", idx, + ring_debug->entries[idx].head_idx, + ring_debug->entries[idx].tail_idx, + ring_debug->entries[idx].timestamp, + g_dbr_ring_debug_event[ + ring_debug->entries[idx].event]); + } + print(print_priv, "---------------------------------------------------------"); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_dbr_qdf_err_printer() - QDF error level printer for DBR module + * @print_priv: The private data + * @fmt: Format string + * + * This function should be passed in place of the 'print' argument to + * target_if_dbr_print_ring_debug_entries function for the logs that should be + * printed via QDF trace + * + * Return: QDF_STATUS of operation + */ +static int target_if_dbr_qdf_err_printer(void *priv, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + QDF_VTRACE(QDF_MODULE_ID_DIRECT_BUF_RX, QDF_TRACE_LEVEL_ERROR, + (char *)fmt, args); + va_end(args); + + return 0; +} + +static inline void target_if_direct_buf_rx_free_mod_debug( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj) +{ + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return; + } + /* Free the debug data structures of all modules */ + if (dbr_pdev_obj->dbr_mod_debug) { + qdf_mem_free(dbr_pdev_obj->dbr_mod_debug); + dbr_pdev_obj->dbr_mod_debug = NULL; + } +} + +static inline QDF_STATUS target_if_direct_buf_rx_alloc_mod_debug( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj) +{ + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + /* Allocate the debug data structure for each module */ + dbr_pdev_obj->dbr_mod_debug = qdf_mem_malloc( + dbr_pdev_obj->num_modules * + sizeof(struct direct_buf_rx_module_debug)); + + if (!dbr_pdev_obj->dbr_mod_debug) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS target_if_direct_buf_rx_alloc_mod_debug( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void target_if_direct_buf_rx_free_mod_debug( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj) +{ +} +#endif + +#if defined(WLAN_DEBUGFS) && defined(DIRECT_BUF_RX_DEBUG) +static inline void target_if_direct_buf_pdev_debugfs_init( + struct wlan_objmgr_pdev *pdev) +{ + char dir_name[32]; + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + + if (!pdev) { + direct_buf_rx_err("pdev is null"); + return; + } + + psoc = wlan_pdev_get_psoc(pdev); + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return; + } + + qdf_snprintf(dir_name, sizeof(dir_name), "SOC%u_PDEV%u", + wlan_psoc_get_id(psoc), + wlan_objmgr_pdev_get_pdev_id(pdev)); + + /* Create debugfs entry for this radio */ + dbr_pdev_obj->debugfs_entry = qdf_debugfs_create_dir( + dir_name, dbr_debugfs_entry); + + if (!dbr_pdev_obj->debugfs_entry) + direct_buf_rx_err("error while creating direct_buf debugfs dir"); +} + +static inline void target_if_direct_buf_pdev_debugfs_deinit( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj) +{ + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return; + } + /* Remove the debugfs entry of the radio */ + if (dbr_pdev_obj->debugfs_entry) { + qdf_debugfs_remove_dir_recursive(dbr_pdev_obj->debugfs_entry); + dbr_pdev_obj->debugfs_entry = NULL; + } +} +#else +static inline void target_if_direct_buf_pdev_debugfs_init( + struct wlan_objmgr_pdev *pdev) +{ +} + +static inline void target_if_direct_buf_pdev_debugfs_deinit( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj) +{ +} +#endif /* WLAN_DEBUGFS && DIRECT_BUF_RX_DEBUG */ + +QDF_STATUS target_if_direct_buf_rx_pdev_create_handler( + struct wlan_objmgr_pdev *pdev, void *data) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct wlan_objmgr_psoc *psoc; + uint8_t num_modules; + QDF_STATUS status; + + direct_buf_rx_enter(); + + if (!pdev) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_psoc_obj = + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_pdev_obj = qdf_mem_malloc(sizeof(*dbr_pdev_obj)); + + if (!dbr_pdev_obj) + return QDF_STATUS_E_NOMEM; + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_pdev_obj, QDF_STATUS_SUCCESS); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("Failed to attach dir buf rx component %d", + status); + qdf_mem_free(dbr_pdev_obj); + return status; + } + + dbr_psoc_obj->dbr_pdev_obj[wlan_objmgr_pdev_get_pdev_id(pdev)] = + dbr_pdev_obj; + + num_modules = get_num_dbr_modules_per_pdev(pdev); + direct_buf_rx_debug("Number of modules = %d pdev %d DBR pdev obj %pK", + num_modules, wlan_objmgr_pdev_get_pdev_id(pdev), + dbr_pdev_obj); + dbr_pdev_obj->num_modules = num_modules; + + if (!dbr_pdev_obj->num_modules) { + direct_buf_rx_info("Number of modules = %d", num_modules); + return QDF_STATUS_SUCCESS; + } + + direct_buf_rx_debug("sring number = %d", DBR_SRNG_NUM); + dbr_pdev_obj->dbr_mod_param = qdf_mem_malloc(num_modules * + DBR_SRNG_NUM * + sizeof(struct direct_buf_rx_module_param)); + + if (!dbr_pdev_obj->dbr_mod_param) { + direct_buf_rx_err("alloc dbr mod param fail"); + goto dbr_mod_param_fail; + } + + if (target_if_direct_buf_rx_alloc_mod_debug(dbr_pdev_obj) != + QDF_STATUS_SUCCESS) + goto dbr_mod_debug_fail; + + target_if_direct_buf_pdev_debugfs_init(pdev); + + return QDF_STATUS_SUCCESS; + +dbr_mod_debug_fail: + qdf_mem_free(dbr_pdev_obj->dbr_mod_param); + +dbr_mod_param_fail: + wlan_objmgr_pdev_component_obj_detach( + pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_pdev_obj); + qdf_mem_free(dbr_pdev_obj); + + return QDF_STATUS_E_NOMEM; +} + +QDF_STATUS target_if_direct_buf_rx_pdev_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *data) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + QDF_STATUS status; + uint8_t num_modules, mod_idx, srng_id; + + if (!pdev) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + num_modules = dbr_pdev_obj->num_modules; + for (mod_idx = 0; mod_idx < num_modules; mod_idx++) { + /* + * If the module didn't stop the ring debug by this time, + * it will result in memory leak of its ring debug entries. + * So, stop the ring debug + */ + target_if_dbr_stop_ring_debug(pdev, mod_idx); + for (srng_id = 0; srng_id < DBR_SRNG_NUM; srng_id++) + target_if_deinit_dbr_ring(pdev, dbr_pdev_obj, + mod_idx, srng_id); + } + + target_if_direct_buf_pdev_debugfs_deinit(dbr_pdev_obj); + target_if_direct_buf_rx_free_mod_debug(dbr_pdev_obj); + qdf_mem_free(dbr_pdev_obj->dbr_mod_param); + dbr_pdev_obj->dbr_mod_param = NULL; + + status = wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_pdev_obj); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("failed to detach dir buf rx component %d", + status); + } + + qdf_mem_free(dbr_pdev_obj); + + return status; +} + +QDF_STATUS target_if_direct_buf_rx_psoc_create_handler( + struct wlan_objmgr_psoc *psoc, void *data) +{ + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + QDF_STATUS status; + + direct_buf_rx_enter(); + + if (!psoc) { + direct_buf_rx_err("psoc context passed is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_psoc_obj = qdf_mem_malloc(sizeof(*dbr_psoc_obj)); + + if (!dbr_psoc_obj) + return QDF_STATUS_E_NOMEM; + + direct_buf_rx_debug("Dbr psoc obj %pK", dbr_psoc_obj); + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, dbr_psoc_obj, + QDF_STATUS_SUCCESS); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("Failed to attach dir buf rx component %d", + status); + goto attach_error; + } + + return status; + +attach_error: + qdf_mem_free(dbr_psoc_obj); + + return status; +} + +QDF_STATUS target_if_direct_buf_rx_psoc_destroy_handler( + struct wlan_objmgr_psoc *psoc, void *data) +{ + QDF_STATUS status; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + + direct_buf_rx_enter(); + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc obj is null"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_psoc_obj); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("failed to detach dir buf rx component %d", + status); + } + + qdf_mem_free(dbr_psoc_obj); + + return status; +} + +#if defined(WLAN_DEBUGFS) && defined(DIRECT_BUF_RX_DEBUG) +/** + * target_if_dbr_debugfs_show_ring_debug() - Function to display ring debug + * entries in debugfs + * @file: qdf debugfs file handler + * @arg: pointer to DBR debugfs private object + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS target_if_dbr_debugfs_show_ring_debug( + qdf_debugfs_file_t file, void *arg) +{ + struct dbr_debugfs_priv *priv = arg; + + return target_if_dbr_print_ring_debug_entries(qdf_debugfs_printer, + file, priv->dbr_pdev_obj, + priv->mod_id, + priv->srng_id); +} + +/** + * target_if_dbr_mod_debugfs_init() - Init debugfs for a given module + * @dbr_pdev_obj: Pointer to the pdev obj of Direct buffer rx module + * @mod_id: Module ID corresponding to this ring + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS target_if_dbr_mod_debugfs_init( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id) +{ + struct direct_buf_rx_module_debug *mod_debug; + + mod_debug = target_if_get_dbr_mod_debug_from_dbr_pdev_obj(dbr_pdev_obj, + mod_id); + + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + if (mod_debug->debugfs_entry) { + direct_buf_rx_err("debugfs mod entry was already created for %s module", + g_dbr_module_name[mod_id].module_name_str); + return QDF_STATUS_SUCCESS; + } + + mod_debug->debugfs_entry = + qdf_debugfs_create_dir(g_dbr_module_name[mod_id].module_name_str, + dbr_pdev_obj->debugfs_entry); + + if (!mod_debug->debugfs_entry) { + direct_buf_rx_err("error while creating direct_buf debugfs entry for %s module", + g_dbr_module_name[mod_id].module_name_str); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_dbr_ring_debugfs_init() - Init debugfs for a given ring + * @dbr_pdev_obj: Pointer to the pdev obj of Direct buffer rx module + * @mod_id: Module ID corresponding to this ring + * @srng_id: srng ID corresponding to this ring + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS target_if_dbr_ring_debugfs_init( + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id, uint8_t srng_id) +{ + struct direct_buf_rx_module_debug *mod_debug; + struct direct_buf_rx_ring_debug *ring_debug; + struct dbr_debugfs_priv *priv; + char debug_file_name[32]; + + mod_debug = target_if_get_dbr_mod_debug_from_dbr_pdev_obj(dbr_pdev_obj, + mod_id); + + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + ring_debug = &mod_debug->dbr_ring_debug[srng_id]; + + if (!mod_debug->debugfs_entry) { + direct_buf_rx_err("error mod_debug->debugfs_entry not created"); + return QDF_STATUS_E_FAILURE; + } + + if (ring_debug->debugfs_entry) { + direct_buf_rx_err("debugfs file for %d ring under %s module already created", + srng_id, + g_dbr_module_name[mod_id].module_name_str); + return QDF_STATUS_SUCCESS; + } + + qdf_snprintf(debug_file_name, sizeof(debug_file_name), + "ring_%d", srng_id); + + // Allocate debugfs ops + ring_debug->debugfs_fops = + qdf_mem_malloc(sizeof(*ring_debug->debugfs_fops)); + if (!ring_debug->debugfs_fops) { + direct_buf_rx_err("error in allocating debugfs ops"); + return QDF_STATUS_E_NOMEM; + } + + // Allocate private data + priv = qdf_mem_malloc(sizeof(*priv)); + if (!priv) { + direct_buf_rx_err("error in creating debugfs private data"); + goto priv_alloc_fail; + } + priv->dbr_pdev_obj = dbr_pdev_obj; + priv->mod_id = mod_id; + priv->srng_id = srng_id; + + /* Fill in the debugfs ops for this ring. + * When the output time comes, the 'show' function will be + * called with 'priv' as an argument. + */ + ring_debug->debugfs_fops->show = target_if_dbr_debugfs_show_ring_debug; + ring_debug->debugfs_fops->priv = priv; + + ring_debug->debugfs_entry = + qdf_debugfs_create_file_simplified( + debug_file_name, + (QDF_FILE_USR_READ | QDF_FILE_GRP_READ | + QDF_FILE_OTH_READ), + mod_debug->debugfs_entry, + ring_debug->debugfs_fops); + + if (!ring_debug->debugfs_entry) { + direct_buf_rx_err("error while creating direct_buf debugfs file for %d ring under %s module", + srng_id, + g_dbr_module_name[mod_id].module_name_str); + goto file_creation_fail; + } + + return QDF_STATUS_SUCCESS; + +file_creation_fail: + qdf_mem_free(ring_debug->debugfs_fops->priv); + +priv_alloc_fail: + qdf_mem_free(ring_debug->debugfs_fops); + ring_debug->debugfs_fops = NULL; + return QDF_STATUS_E_NOMEM; +} + +/** + * target_if_dbr_mod_debugfs_deinit() - De-init debugfs for a given module + * @mod_debug: Pointer to direct_buf_rx_module_debug structure + * + * Return: void + */ +static void target_if_dbr_mod_debugfs_deinit( + struct direct_buf_rx_module_debug *mod_debug) +{ + if (!mod_debug) { + direct_buf_rx_err("mod_debug is null"); + return; + } + + if (mod_debug->debugfs_entry) { + qdf_debugfs_remove_file(mod_debug->debugfs_entry); + mod_debug->debugfs_entry = NULL; + } +} + +/** + * target_if_dbr_ring_debugfs_deinit() - De-init debugfs for a given ring + * @ring_debug: Pointer to direct_buf_rx_ring_debug structure + * + * Return: void + */ +static void target_if_dbr_ring_debugfs_deinit( + struct direct_buf_rx_ring_debug *ring_debug) +{ + if (!ring_debug) { + direct_buf_rx_err("ring_debug is null"); + return; + } + + if (ring_debug->debugfs_entry) { + qdf_debugfs_remove_file(ring_debug->debugfs_entry); + ring_debug->debugfs_entry = NULL; + } + + // Free the private data and debugfs ops of this ring + if (ring_debug->debugfs_fops) { + qdf_mem_free(ring_debug->debugfs_fops->priv); + qdf_mem_free(ring_debug->debugfs_fops); + ring_debug->debugfs_fops = NULL; + } +} +#endif /* WLAN_DEBUGFS && DIRECT_BUF_RX_DEBUG */ + +#ifdef DIRECT_BUF_RX_DEBUG +QDF_STATUS target_if_dbr_stop_ring_debug(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id) +{ + struct direct_buf_rx_module_debug *mod_debug; + struct direct_buf_rx_ring_debug *ring_debug; + uint8_t srng_id; + + mod_debug = target_if_get_dbr_mod_debug_from_pdev(pdev, mod_id); + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + for (srng_id = 0; srng_id < DBR_SRNG_NUM; srng_id++) { + ring_debug = &mod_debug->dbr_ring_debug[srng_id]; + if (!ring_debug->entries) { + direct_buf_rx_debug("DBR ring debug for module %d srng %d was already disabled", + mod_id, srng_id); + continue; + } + /* De-init debugsfs for this ring */ + target_if_dbr_ring_debugfs_deinit(ring_debug); + qdf_mem_free(ring_debug->entries); + ring_debug->entries = NULL; + ring_debug->ring_debug_idx = 0; + ring_debug->num_ring_debug_entries = 0; + direct_buf_rx_info("DBR ring debug for module %d srng %d is now stopped", + mod_id, srng_id); + } + target_if_dbr_mod_debugfs_deinit(mod_debug); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dbr_start_ring_debug(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, + uint32_t num_ring_debug_entries) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_module_debug *mod_debug; + struct direct_buf_rx_ring_debug *ring_debug; + uint8_t srng_id; + + mod_debug = target_if_get_dbr_mod_debug_from_pdev(pdev, mod_id); + + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + if (num_ring_debug_entries > DIRECT_BUF_RX_MAX_RING_DEBUG_ENTRIES) { + direct_buf_rx_err("Requested number of ring debug entries(%d) exceed the maximum entries allowed(%d)", + num_ring_debug_entries, + DIRECT_BUF_RX_MAX_RING_DEBUG_ENTRIES); + + return QDF_STATUS_E_FAILURE; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + target_if_dbr_mod_debugfs_init(dbr_pdev_obj, mod_id); + + for (srng_id = 0; srng_id < DBR_SRNG_NUM; srng_id++) { + ring_debug = &mod_debug->dbr_ring_debug[srng_id]; + + if (ring_debug->entries) { + direct_buf_rx_err("DBR ring debug for module %d srng %d was already enabled", + mod_id, srng_id); + continue; + } + + ring_debug->entries = qdf_mem_malloc( + num_ring_debug_entries * + sizeof(*ring_debug->entries)); + + if (!ring_debug->entries) + return QDF_STATUS_E_NOMEM; + + ring_debug->ring_debug_idx = 0; + ring_debug->num_ring_debug_entries = num_ring_debug_entries; + /* Init debugsfs for this ring */ + target_if_dbr_ring_debugfs_init( + dbr_pdev_obj, + mod_id, srng_id); + direct_buf_rx_info("DBR ring debug for module %d srng %d is now started", + mod_id, srng_id); + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dbr_start_buffer_poisoning(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, uint32_t value) +{ + struct direct_buf_rx_module_debug *mod_debug; + + mod_debug = target_if_get_dbr_mod_debug_from_pdev(pdev, mod_id); + + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + mod_debug->poisoning_enabled = true; + mod_debug->poison_value = value; /* Save the poison value */ + + direct_buf_rx_debug("DBR buffer poisoning for module %d is now started", + mod_id); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dbr_stop_buffer_poisoning( + struct wlan_objmgr_pdev *pdev, + uint8_t mod_id) +{ + struct direct_buf_rx_module_debug *mod_debug; + + mod_debug = target_if_get_dbr_mod_debug_from_pdev(pdev, mod_id); + + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + mod_debug->poisoning_enabled = false; + mod_debug->poison_value = 0; + + direct_buf_rx_debug("DBR buffer poisoning for module %d is now stopped", + mod_id); + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_dbr_fill_buffer_u32() - Fill buffer with an unsigned 32-bit value + * @buffer: pointer to the buffer + * @num_bytes: Size of the destination buffer in bytes + * @value: Unsigned 32-bit value to be copied + * + * Return : void + */ +static void +target_if_dbr_fill_buffer_u32(uint8_t *buffer, uint32_t num_bytes, + uint32_t value) +{ + uint32_t *bufp; + uint32_t idx; + uint32_t size = (num_bytes >> 2); + + if (!buffer) { + direct_buf_rx_err("buffer empty"); + return; + } + + bufp = (uint32_t *)buffer; + + for (idx = 0; idx < size; ++idx) { + *bufp = value; + ++bufp; + } +} + +/** + * target_if_dbr_debug_poison_buffer() - Poison a given DBR buffer + * @pdev: pointer to pdev object + * @mod_id: Module ID of the owner of the buffer + * @aligned_vaddr: Virtual address(aligned) of the buffer + * @size: Size of the buffer + * + * Value with which the buffers will be poisoned would have been saved + * while starting the buffer poisoning for the module, use that value. + * + * Return : QDF status of operation + */ +static QDF_STATUS target_if_dbr_debug_poison_buffer( + struct wlan_objmgr_pdev *pdev, + uint32_t mod_id, void *aligned_vaddr, uint32_t size) +{ + struct direct_buf_rx_module_debug *mod_debug; + + mod_debug = target_if_get_dbr_mod_debug_from_pdev(pdev, mod_id); + + if (!mod_debug) + return QDF_STATUS_E_INVAL; + + if (mod_debug->poisoning_enabled) { + target_if_dbr_fill_buffer_u32(aligned_vaddr, size, + mod_debug->poison_value); + } + + return QDF_STATUS_SUCCESS; +} + +static inline void target_if_dbr_qdf_show_ring_debug( + struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, uint8_t srng_id) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj = + wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + target_if_dbr_print_ring_debug_entries( + target_if_dbr_qdf_err_printer, + NULL, dbr_pdev_obj, + mod_id, srng_id); +} +#else +QDF_STATUS target_if_dbr_stop_ring_debug(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dbr_start_ring_debug(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, + uint32_t num_ring_debug_entries) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dbr_start_buffer_poisoning(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, uint32_t value) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dbr_stop_buffer_poisoning( + struct wlan_objmgr_pdev *pdev, + uint8_t mod_id) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_debug_poison_buffer( + struct wlan_objmgr_pdev *pdev, + uint32_t mod_id, void *aligned_vaddr, uint32_t size) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void target_if_dbr_qdf_show_ring_debug( + struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, uint8_t srng_id) +{ +} +#endif /* DIRECT_BUF_RX_DEBUG */ + +static QDF_STATUS target_if_dbr_replenish_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param, + void *aligned_vaddr, uint32_t cookie) +{ + uint64_t *ring_entry; + uint32_t dw_lo, dw_hi = 0, map_status; + void *hal_soc, *srng; + qdf_dma_addr_t paddr; + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_buf_info *dbr_buf_pool; + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_buf_pool = mod_param->dbr_buf_pool; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + if (cookie >= mod_param->dbr_ring_cfg->num_ptr) { + direct_buf_rx_err("invalid cookie %d", cookie); + return QDF_STATUS_E_INVAL; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + hal_soc = dbr_psoc_obj->hal_soc; + srng = dbr_ring_cfg->srng; + if (!aligned_vaddr) { + direct_buf_rx_err("aligned vaddr is null"); + return QDF_STATUS_SUCCESS; + } + + target_if_dbr_debug_poison_buffer( + pdev, mod_param->mod_id, aligned_vaddr, + dbr_ring_cap->min_buf_size); + + map_status = qdf_mem_map_nbytes_single(dbr_psoc_obj->osdev, + aligned_vaddr, + QDF_DMA_FROM_DEVICE, + dbr_ring_cap->min_buf_size, + &paddr); + if (map_status) { + direct_buf_rx_err("mem map failed status = %d", map_status); + return QDF_STATUS_E_FAILURE; + } + + QDF_ASSERT(!((uint64_t)paddr % dbr_ring_cap->min_buf_align)); + dbr_buf_pool[cookie].paddr = paddr; + + hal_srng_access_start(hal_soc, srng); + ring_entry = hal_srng_src_get_next(hal_soc, srng); + + if (!ring_entry) { + target_if_dbr_qdf_show_ring_debug(pdev, mod_param->mod_id, + mod_param->srng_id); + QDF_BUG(0); + } + + dw_lo = (uint64_t)paddr & 0xFFFFFFFF; + WMI_HOST_DBR_RING_ADDR_HI_SET(dw_hi, (uint64_t)paddr >> 32); + WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_SET(dw_hi, cookie); + *ring_entry = (uint64_t)dw_hi << 32 | dw_lo; + hal_srng_access_end(hal_soc, srng); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_fill_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + uint32_t idx; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_buf_info *dbr_buf_pool; + QDF_STATUS status; + + direct_buf_rx_enter(); + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_buf_pool = mod_param->dbr_buf_pool; + + for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) { + void *buf_vaddr_unaligned = NULL, *buf_vaddr_aligned; + dma_addr_t buf_paddr_aligned, buf_paddr_unaligned; + + buf_vaddr_aligned = qdf_aligned_malloc( + &dbr_ring_cap->min_buf_size, &buf_vaddr_unaligned, + &buf_paddr_unaligned, &buf_paddr_aligned, + dbr_ring_cap->min_buf_align); + + if (!buf_vaddr_aligned) { + direct_buf_rx_err("dir buf rx ring alloc failed"); + return QDF_STATUS_E_NOMEM; + } + dbr_buf_pool[idx].vaddr = buf_vaddr_unaligned; + dbr_buf_pool[idx].offset = buf_vaddr_aligned - + buf_vaddr_unaligned; + dbr_buf_pool[idx].cookie = idx; + status = target_if_dbr_replenish_ring(pdev, mod_param, + buf_vaddr_aligned, idx); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("replenish failed with status : %d", + status); + qdf_mem_free(buf_vaddr_unaligned); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_init_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + void *srng; + uint32_t num_entries, ring_alloc_size, max_entries, entry_size; + qdf_dma_addr_t paddr; + struct hal_srng_params ring_params = {0}; + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + QDF_STATUS status; + + direct_buf_rx_enter(); + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + if (!dbr_psoc_obj->hal_soc || + !dbr_psoc_obj->osdev) { + direct_buf_rx_err("dir buf rx target attach failed"); + return QDF_STATUS_E_FAILURE; + } + + max_entries = hal_srng_max_entries(dbr_psoc_obj->hal_soc, + DIR_BUF_RX_DMA_SRC); + entry_size = hal_srng_get_entrysize(dbr_psoc_obj->hal_soc, + DIR_BUF_RX_DMA_SRC); + direct_buf_rx_debug("Max Entries = %d", max_entries); + direct_buf_rx_debug("Entry Size = %d", entry_size); + + status = populate_dbr_cap_mod_param(pdev, mod_param); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Module cap population failed"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_ring_cfg = mod_param->dbr_ring_cfg; + num_entries = dbr_ring_cap->ring_elems_min > max_entries ? + max_entries : dbr_ring_cap->ring_elems_min; + direct_buf_rx_debug("Num entries = %d", num_entries); + dbr_ring_cfg->num_ptr = num_entries; + mod_param->dbr_buf_pool = qdf_mem_malloc(num_entries * sizeof( + struct direct_buf_rx_buf_info)); + if (!mod_param->dbr_buf_pool) + return QDF_STATUS_E_NOMEM; + + ring_alloc_size = (num_entries * entry_size) + DBR_RING_BASE_ALIGN - 1; + dbr_ring_cfg->ring_alloc_size = ring_alloc_size; + direct_buf_rx_debug("dbr_psoc_obj %pK", dbr_psoc_obj); + dbr_ring_cfg->base_vaddr_unaligned = qdf_mem_alloc_consistent( + dbr_psoc_obj->osdev, dbr_psoc_obj->osdev->dev, ring_alloc_size, + &paddr); + direct_buf_rx_debug("vaddr aligned allocated"); + dbr_ring_cfg->base_paddr_unaligned = paddr; + if (!dbr_ring_cfg->base_vaddr_unaligned) { + direct_buf_rx_err("dir buf rx vaddr alloc failed"); + qdf_mem_free(mod_param->dbr_buf_pool); + return QDF_STATUS_E_NOMEM; + } + + /* Alignment is defined to 8 for now. Will be advertised by FW */ + dbr_ring_cfg->base_vaddr_aligned = (void *)(uintptr_t)qdf_roundup( + (uint64_t)(uintptr_t)dbr_ring_cfg->base_vaddr_unaligned, + DBR_RING_BASE_ALIGN); + ring_params.ring_base_vaddr = dbr_ring_cfg->base_vaddr_aligned; + dbr_ring_cfg->base_paddr_aligned = qdf_roundup( + (uint64_t)dbr_ring_cfg->base_paddr_unaligned, + DBR_RING_BASE_ALIGN); + ring_params.ring_base_paddr = + (qdf_dma_addr_t)dbr_ring_cfg->base_paddr_aligned; + ring_params.num_entries = num_entries; + srng = hal_srng_setup(dbr_psoc_obj->hal_soc, DIR_BUF_RX_DMA_SRC, + mod_param->mod_id, + mod_param->pdev_id, &ring_params); + + if (!srng) { + direct_buf_rx_err("srng setup failed"); + qdf_mem_free(mod_param->dbr_buf_pool); + qdf_mem_free_consistent(dbr_psoc_obj->osdev, + dbr_psoc_obj->osdev->dev, + ring_alloc_size, + dbr_ring_cfg->base_vaddr_unaligned, + (qdf_dma_addr_t)dbr_ring_cfg->base_paddr_unaligned, 0); + return QDF_STATUS_E_FAILURE; + } + dbr_ring_cfg->srng = srng; + dbr_ring_cfg->tail_idx_addr = + hal_srng_get_tp_addr(dbr_psoc_obj->hal_soc, srng); + dbr_ring_cfg->head_idx_addr = + hal_srng_get_hp_addr(dbr_psoc_obj->hal_soc, srng); + dbr_ring_cfg->buf_size = dbr_ring_cap->min_buf_size; + + return target_if_dbr_fill_ring(pdev, mod_param); +} + +static QDF_STATUS target_if_dbr_init_srng(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + QDF_STATUS status; + + direct_buf_rx_debug("Init DBR srng"); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_INVAL; + } + + mod_param->dbr_ring_cap = qdf_mem_malloc(sizeof( + struct direct_buf_rx_ring_cap)); + + if (!mod_param->dbr_ring_cap) + return QDF_STATUS_E_NOMEM; + + /* Allocate memory for DBR Ring Config */ + mod_param->dbr_ring_cfg = qdf_mem_malloc(sizeof( + struct direct_buf_rx_ring_cfg)); + + if (!mod_param->dbr_ring_cfg) { + qdf_mem_free(mod_param->dbr_ring_cap); + return QDF_STATUS_E_NOMEM; + } + + status = target_if_dbr_init_ring(pdev, mod_param); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR ring init failed"); + qdf_mem_free(mod_param->dbr_ring_cfg); + qdf_mem_free(mod_param->dbr_ring_cap); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_cfg_tgt(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + wmi_unified_t wmi_hdl; + struct direct_buf_rx_cfg_req dbr_cfg_req = {0}; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct dbr_module_config *dbr_config; + + direct_buf_rx_enter(); + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_config = &mod_param->dbr_config; + wmi_hdl = lmac_get_pdev_wmi_handle(pdev); + if (!wmi_hdl) { + direct_buf_rx_err("WMI handle null. Can't send WMI CMD"); + return QDF_STATUS_E_INVAL; + } + + direct_buf_rx_debug("Sending DBR Ring CFG to target"); + dbr_cfg_req.pdev_id = mod_param->pdev_id; + /* Module ID numbering starts from 1 in FW. need to fix it */ + dbr_cfg_req.mod_id = mod_param->mod_id; + dbr_cfg_req.base_paddr_lo = + qdf_get_lower_32_bits(dbr_ring_cfg->base_paddr_aligned); + dbr_cfg_req.base_paddr_hi = + qdf_get_upper_32_bits(dbr_ring_cfg->base_paddr_aligned); + dbr_cfg_req.head_idx_paddr_lo = + qdf_get_lower_32_bits(dbr_ring_cfg->head_idx_addr); + dbr_cfg_req.head_idx_paddr_hi = + qdf_get_upper_32_bits(dbr_ring_cfg->head_idx_addr); + dbr_cfg_req.tail_idx_paddr_lo = + qdf_get_lower_32_bits(dbr_ring_cfg->tail_idx_addr); + dbr_cfg_req.tail_idx_paddr_hi = + qdf_get_upper_32_bits(dbr_ring_cfg->tail_idx_addr); + dbr_cfg_req.num_elems = dbr_ring_cap->ring_elems_min; + dbr_cfg_req.buf_size = dbr_ring_cap->min_buf_size; + dbr_cfg_req.num_resp_per_event = dbr_config->num_resp_per_event; + dbr_cfg_req.event_timeout_ms = dbr_config->event_timeout_in_ms; + direct_buf_rx_debug("pdev id %d mod id %d base addr lo %x\n" + "base addr hi %x head idx addr lo %x\n" + "head idx addr hi %x tail idx addr lo %x\n" + "tail idx addr hi %x num ptr %d\n" + "num resp %d event timeout %d\n", + dbr_cfg_req.pdev_id, dbr_cfg_req.mod_id, + dbr_cfg_req.base_paddr_lo, + dbr_cfg_req.base_paddr_hi, + dbr_cfg_req.head_idx_paddr_lo, + dbr_cfg_req.head_idx_paddr_hi, + dbr_cfg_req.tail_idx_paddr_lo, + dbr_cfg_req.tail_idx_paddr_hi, + dbr_cfg_req.num_elems, + dbr_cfg_req.num_resp_per_event, + dbr_cfg_req.event_timeout_ms); + status = wmi_unified_dbr_ring_cfg(wmi_hdl, &dbr_cfg_req); + + return status; +} + +static QDF_STATUS target_if_init_dbr_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id, uint8_t srng_id) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct direct_buf_rx_module_param *mod_param; + + direct_buf_rx_debug("Init DBR ring for module %d, srng %d", + mod_id, srng_id); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_INVAL; + } + + mod_param = &(dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_FAILURE; + } + + direct_buf_rx_debug("mod_param %pK", mod_param); + + mod_param->mod_id = mod_id; + mod_param->pdev_id = dbr_get_pdev_id( + srng_id, wlan_objmgr_pdev_get_pdev_id(pdev)); + mod_param->srng_id = srng_id; + + /* Initialize DMA ring now */ + status = target_if_dbr_init_srng(pdev, mod_param); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR ring init failed %d", status); + return status; + } + + /* Send CFG request command to firmware */ + status = target_if_dbr_cfg_tgt(pdev, mod_param); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR config to target failed %d", status); + goto dbr_srng_init_failed; + } + + return QDF_STATUS_SUCCESS; + +dbr_srng_init_failed: + target_if_deinit_dbr_ring(pdev, dbr_pdev_obj, mod_id, srng_id); + return status; +} + +QDF_STATUS target_if_direct_buf_rx_module_register( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + struct dbr_module_config *dbr_config, + bool (*dbr_rsp_handler) + (struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data)) +{ + QDF_STATUS status; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct dbr_module_config *config = NULL; + struct direct_buf_rx_module_param *mod_param; + uint8_t srng_id; + + if (!pdev) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + if (!dbr_rsp_handler) { + direct_buf_rx_err("Response handler is null"); + return QDF_STATUS_E_INVAL; + } + + if (mod_id >= DBR_MODULE_MAX) { + direct_buf_rx_err("Invalid module id"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + direct_buf_rx_debug("Dbr pdev obj %pK", dbr_pdev_obj); + + if (!dbr_pdev_obj->dbr_mod_param) { + direct_buf_rx_err("dbr_pdev_obj->dbr_mod_param is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (mod_id >= dbr_pdev_obj->num_modules) { + direct_buf_rx_err("Module %d not supported in target", mod_id); + return QDF_STATUS_E_FAILURE; + } + + for (srng_id = 0; srng_id < DBR_SRNG_NUM; srng_id++) { + mod_param = &dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]; + config = &mod_param->dbr_config; + mod_param->dbr_rsp_handler = dbr_rsp_handler; + *config = *dbr_config; + + status = target_if_init_dbr_ring(pdev, dbr_pdev_obj, + (enum DBR_MODULE)mod_id, + srng_id); + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("init dbr ring fail, srng_id %d, status %d", + srng_id, status); + } + + return status; +} + +QDF_STATUS target_if_direct_buf_rx_module_unregister( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id) +{ + QDF_STATUS status; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + uint8_t srng_id; + + if (!pdev) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + if (mod_id >= DBR_MODULE_MAX) { + direct_buf_rx_err("Invalid module id"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj + (pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + direct_buf_rx_debug("Dbr pdev obj %pK", dbr_pdev_obj); + + if (!dbr_pdev_obj->dbr_mod_param) { + direct_buf_rx_err("dbr_pdev_obj->dbr_mod_param is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (mod_id >= dbr_pdev_obj->num_modules) { + direct_buf_rx_err("Module %d not supported in target", mod_id); + return QDF_STATUS_E_FAILURE; + } + + for (srng_id = 0; srng_id < DBR_SRNG_NUM; srng_id++) { + status = target_if_deinit_dbr_ring(pdev, dbr_pdev_obj, + mod_id, srng_id); + direct_buf_rx_info("status %d", status); + } + + return status; +} + +static void *target_if_dbr_vaddr_lookup( + struct direct_buf_rx_module_param *mod_param, + qdf_dma_addr_t paddr, uint32_t cookie) +{ + struct direct_buf_rx_buf_info *dbr_buf_pool; + + dbr_buf_pool = mod_param->dbr_buf_pool; + + if (cookie >= mod_param->dbr_ring_cfg->num_ptr) { + direct_buf_rx_err("invalid cookie %d", cookie); + return NULL; + } + + if (dbr_buf_pool[cookie].paddr == paddr) { + return dbr_buf_pool[cookie].vaddr + + dbr_buf_pool[cookie].offset; + } + direct_buf_rx_debug("Invalid paddr, cookie %d, pool paddr %pK, paddr %pK", + cookie, (void *)dbr_buf_pool[cookie].paddr, + (void *)paddr); + + return NULL; +} + +QDF_STATUS target_if_dbr_cookie_lookup(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, qdf_dma_addr_t paddr, + uint32_t *cookie, uint8_t srng_id) +{ + struct direct_buf_rx_buf_info *dbr_buf_pool; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_module_param *mod_param; + enum wlan_umac_comp_id dbr_comp_id = WLAN_TARGET_IF_COMP_DIRECT_BUF_RX; + uint32_t idx; + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, dbr_comp_id); + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + mod_param = &dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]; + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_buf_pool = mod_param->dbr_buf_pool; + + for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) { + if (dbr_buf_pool[idx].paddr && + dbr_buf_pool[idx].paddr == paddr) { + *cookie = idx; + return QDF_STATUS_SUCCESS; + } + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS target_if_dbr_buf_release(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, qdf_dma_addr_t paddr, + uint32_t cookie, uint8_t srng_id) +{ + struct direct_buf_rx_module_param *mod_param; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + enum wlan_umac_comp_id dbr_comp_id = WLAN_TARGET_IF_COMP_DIRECT_BUF_RX; + void *vaddr; + QDF_STATUS status; + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, dbr_comp_id); + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + mod_param = &dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]; + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_FAILURE; + } + + vaddr = target_if_dbr_vaddr_lookup(mod_param, paddr, cookie); + if (!vaddr) + return QDF_STATUS_E_FAILURE; + + status = target_if_dbr_replenish_ring(pdev, mod_param, + vaddr, cookie); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Ring replenish failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_get_dbr_data(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param, + struct direct_buf_rx_rsp *dbr_rsp, + struct direct_buf_rx_data *dbr_data, + uint8_t idx, uint32_t *cookie) +{ + qdf_dma_addr_t paddr = 0; + uint32_t addr_hi; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cap = mod_param->dbr_ring_cap; + addr_hi = (uint64_t)WMI_HOST_DBR_DATA_ADDR_HI_GET( + dbr_rsp->dbr_entries[idx].paddr_hi); + paddr = (qdf_dma_addr_t)((uint64_t)addr_hi << 32 | + dbr_rsp->dbr_entries[idx].paddr_lo); + *cookie = WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_GET( + dbr_rsp->dbr_entries[idx].paddr_hi); + dbr_data->vaddr = target_if_dbr_vaddr_lookup(mod_param, paddr, *cookie); + + if (!dbr_data->vaddr) { + direct_buf_rx_debug("dbr vaddr lookup failed, cookie %d, hi %x, lo %x", + *cookie, dbr_rsp->dbr_entries[idx].paddr_hi, + dbr_rsp->dbr_entries[idx].paddr_lo); + return QDF_STATUS_E_FAILURE; + } + + dbr_data->cookie = *cookie; + dbr_data->paddr = paddr; + direct_buf_rx_debug("Cookie = %d Vaddr look up = %pK", + dbr_data->cookie, dbr_data->vaddr); + dbr_data->dbr_len = dbr_rsp->dbr_entries[idx].len; + qdf_mem_unmap_nbytes_single(dbr_psoc_obj->osdev, (qdf_dma_addr_t)paddr, + QDF_DMA_FROM_DEVICE, + dbr_ring_cap->min_buf_size); + + return QDF_STATUS_SUCCESS; +} + +#ifdef DBR_MULTI_SRNG_ENABLE +/** + * dbr_get_pdev_and_srng_id() - get pdev object and srng id + * + * @psoc: pointer to psoc object + * @pdev_id: pdev id from wmi_pdev_dma_ring_buf_release eventid + * @srng_id: pointer to return srng id + * + * Return : pointer to pdev + */ +static struct wlan_objmgr_pdev * +dbr_get_pdev_and_srng_id(struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *srng_id) +{ + struct wlan_objmgr_pdev *pdev; + wlan_objmgr_ref_dbgid dbr_mod_id = WLAN_DIRECT_BUF_RX_ID; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbr_mod_id); + if (!pdev) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, TGT_WMI_PDEV_ID_SOC, + dbr_mod_id); + if (pdev) { + direct_buf_rx_debug("update srng id from %d to %d", + *srng_id, pdev_id); + *srng_id = pdev_id; + } + } + + return pdev; +} +#else +static struct wlan_objmgr_pdev * +dbr_get_pdev_and_srng_id(struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *srng_id) +{ + struct wlan_objmgr_pdev *pdev; + wlan_objmgr_ref_dbgid dbr_mod_id = WLAN_DIRECT_BUF_RX_ID; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbr_mod_id); + + return pdev; +} +#endif + +#ifdef DIRECT_BUF_RX_DEBUG +/** + * target_if_dbr_add_ring_debug_entry() - Add a DBR ring debug entry + * @pdev: pointer to pdev object + * @mod_id: Module ID + * @event: ring debug event + * + * Log the given event, head and tail pointers of DBR ring of the given module + * into its ring debug data structure. + * Also, log the timestamp at the time of logging. + */ +static void target_if_dbr_add_ring_debug_entry( + struct wlan_objmgr_pdev *pdev, + uint32_t mod_id, + enum DBR_RING_DEBUG_EVENT event, + uint8_t srng_id) +{ + struct wlan_objmgr_psoc *psoc; + void *hal_soc, *srng; + uint32_t hp = 0, tp = 0; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_module_debug *mod_debug; + struct direct_buf_rx_module_param *mod_param; + struct direct_buf_rx_ring_debug *ring_debug; + struct direct_buf_rx_ring_debug_entry *entry; + + mod_debug = target_if_get_dbr_mod_debug_from_pdev(pdev, mod_id); + + if (!mod_debug) + return; + + psoc = wlan_pdev_get_psoc(pdev); + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + mod_param = &dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]; + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return; + } + + hal_soc = dbr_psoc_obj->hal_soc; + dbr_ring_cfg = mod_param->dbr_ring_cfg; + srng = dbr_ring_cfg->srng; + ring_debug = &mod_debug->dbr_ring_debug[srng_id]; + + if (ring_debug->entries) { + if (hal_srng_access_start(hal_soc, srng)) { + direct_buf_rx_err("module %d - HAL srng access failed", + mod_id); + return; + } + hal_get_sw_hptp(hal_soc, srng, &tp, &hp); + hal_srng_access_end(hal_soc, srng); + entry = &ring_debug->entries[ring_debug->ring_debug_idx]; + + entry->head_idx = hp; + entry->tail_idx = tp; + entry->timestamp = qdf_get_log_timestamp(); + entry->event = event; + + ring_debug->ring_debug_idx++; + if (ring_debug->ring_debug_idx == + ring_debug->num_ring_debug_entries) + ring_debug->ring_debug_idx = 0; + } +} + +#else +static void target_if_dbr_add_ring_debug_entry( + struct wlan_objmgr_pdev *pdev, + uint32_t mod_id, + enum DBR_RING_DEBUG_EVENT event, + uint8_t srng_id) +{ +} +#endif /* DIRECT_BUF_RX_DEBUG */ + +static int target_if_direct_buf_rx_rsp_event_handler(ol_scn_t scn, + uint8_t *data_buf, + uint32_t data_len) +{ + int ret = 0; + uint8_t i = 0; + QDF_STATUS status; + uint32_t cookie = 0; + struct direct_buf_rx_rsp dbr_rsp = {0}; + struct direct_buf_rx_data dbr_data = {0}; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct direct_buf_rx_buf_info *dbr_buf_pool; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_module_param *mod_param; + struct wmi_unified *wmi_handle; + wlan_objmgr_ref_dbgid dbr_mod_id = WLAN_DIRECT_BUF_RX_ID; + uint8_t srng_id = 0; + + direct_buf_rx_enter(); + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = GET_WMI_HDL_FROM_PSOC(psoc); + if (!wmi_handle) { + direct_buf_rx_err("WMI handle is null"); + return QDF_STATUS_E_FAILURE; + } + + if (wmi_extract_dbr_buf_release_fixed( + wmi_handle, data_buf, &dbr_rsp) != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("unable to extract DBR rsp fixed param"); + return QDF_STATUS_E_FAILURE; + } + + direct_buf_rx_debug("Num buf release entry = %d", + dbr_rsp.num_buf_release_entry); + + pdev = dbr_get_pdev_and_srng_id(psoc, (uint8_t)dbr_rsp.pdev_id, + &srng_id); + if (!pdev || (srng_id >= DBR_SRNG_NUM)) { + direct_buf_rx_err("invalid pdev or srng, pdev %pK, srng %d", + pdev, srng_id); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + + if (dbr_rsp.mod_id >= dbr_pdev_obj->num_modules) { + direct_buf_rx_err("Invalid module id:%d", dbr_rsp.mod_id); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + mod_param = &(dbr_pdev_obj->dbr_mod_param[dbr_rsp.mod_id][srng_id]); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + + dbr_buf_pool = mod_param->dbr_buf_pool; + dbr_rsp.dbr_entries = qdf_mem_malloc(dbr_rsp.num_buf_release_entry * + sizeof(struct direct_buf_rx_entry)); + if (!dbr_rsp.dbr_entries) { + direct_buf_rx_err("invalid dbr_entries"); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + + if (dbr_rsp.num_meta_data_entry > dbr_rsp.num_buf_release_entry) { + direct_buf_rx_err("More than expected number of metadata"); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < dbr_rsp.num_buf_release_entry; i++) { + if (wmi_extract_dbr_buf_release_entry( + wmi_handle, data_buf, i, + &dbr_rsp.dbr_entries[i]) != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("Unable to extract DBR buf entry %d", + i+1); + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + status = target_if_get_dbr_data(pdev, mod_param, &dbr_rsp, + &dbr_data, i, &cookie); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR data get failed"); + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + + dbr_data.meta_data_valid = false; + if (i < dbr_rsp.num_meta_data_entry) { + if (wmi_extract_dbr_buf_metadata( + wmi_handle, data_buf, i, + &dbr_data.meta_data) == QDF_STATUS_SUCCESS) + dbr_data.meta_data_valid = true; + } + + target_if_dbr_add_ring_debug_entry(pdev, dbr_rsp.mod_id, + DBR_RING_DEBUG_EVENT_RX, + srng_id); + if (mod_param->dbr_rsp_handler(pdev, &dbr_data)) { + status = target_if_dbr_replenish_ring(pdev, mod_param, + dbr_data.vaddr, + cookie); + + target_if_dbr_add_ring_debug_entry( + pdev, dbr_rsp.mod_id, + DBR_RING_DEBUG_EVENT_REPLENISH_RING, + srng_id); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Ring replenish failed"); + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + } + } + + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + + return ret; +} + +static QDF_STATUS target_if_dbr_empty_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_psoc_obj *dbr_psoc_obj, + struct direct_buf_rx_module_param *mod_param) +{ + uint32_t idx; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_buf_info *dbr_buf_pool; + + direct_buf_rx_enter(); + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_buf_pool = mod_param->dbr_buf_pool; + + direct_buf_rx_debug("dbr_ring_cfg %pK, ring_cap %pK buf_pool %pK", + dbr_ring_cfg, dbr_ring_cap, dbr_buf_pool); + + for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) { + qdf_mem_unmap_nbytes_single(dbr_psoc_obj->osdev, + (qdf_dma_addr_t)dbr_buf_pool[idx].paddr, + QDF_DMA_FROM_DEVICE, + dbr_ring_cap->min_buf_size); + qdf_mem_free(dbr_buf_pool[idx].vaddr); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_deinit_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + + direct_buf_rx_enter(); + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + direct_buf_rx_debug("dbr_psoc_obj %pK", dbr_psoc_obj); + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + if (dbr_ring_cfg) { + target_if_dbr_empty_ring(pdev, dbr_psoc_obj, mod_param); + hal_srng_cleanup(dbr_psoc_obj->hal_soc, dbr_ring_cfg->srng); + qdf_mem_free_consistent(dbr_psoc_obj->osdev, + dbr_psoc_obj->osdev->dev, + dbr_ring_cfg->ring_alloc_size, + dbr_ring_cfg->base_vaddr_unaligned, + (qdf_dma_addr_t)dbr_ring_cfg->base_paddr_unaligned, 0); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_deinit_srng( + struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + struct direct_buf_rx_buf_info *dbr_buf_pool; + + direct_buf_rx_enter(); + dbr_buf_pool = mod_param->dbr_buf_pool; + direct_buf_rx_debug("dbr buf pool %pK", dbr_buf_pool); + target_if_dbr_deinit_ring(pdev, mod_param); + if (mod_param->dbr_buf_pool) + qdf_mem_free(dbr_buf_pool); + mod_param->dbr_buf_pool = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_deinit_dbr_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id, uint8_t srng_id) +{ + struct direct_buf_rx_module_param *mod_param; + + direct_buf_rx_enter(); + mod_param = &(dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_FAILURE; + } + direct_buf_rx_debug("mod_param %pK, dbr_ring_cap %pK", + mod_param, mod_param->dbr_ring_cap); + target_if_dbr_deinit_srng(pdev, mod_param); + if (mod_param->dbr_ring_cap) + qdf_mem_free(mod_param->dbr_ring_cap); + mod_param->dbr_ring_cap = NULL; + if (mod_param->dbr_ring_cfg) + qdf_mem_free(mod_param->dbr_ring_cfg); + mod_param->dbr_ring_cfg = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_register_events( + struct wlan_objmgr_psoc *psoc) +{ + int ret; + + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + direct_buf_rx_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dma_buf_release_event_id, + target_if_direct_buf_rx_rsp_event_handler, + WMI_RX_UMAC_CTX); + + if (ret) + direct_buf_rx_debug("event handler not supported, ret=%d", ret); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_unregister_events( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + direct_buf_rx_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dma_buf_release_event_id); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_print_ring_stat( + struct wlan_objmgr_pdev *pdev) +{ + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct wlan_objmgr_psoc *psoc; + void *srng, *hal_soc; + uint32_t hp = 0, tp = 0; + struct direct_buf_rx_module_param *mod_param; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + uint8_t num_modules, mod_idx; + uint8_t srng_id; + + if (!pdev) { + direct_buf_rx_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + hal_soc = dbr_psoc_obj->hal_soc; + num_modules = dbr_pdev_obj->num_modules; + direct_buf_rx_debug("--------------------------------------------------"); + direct_buf_rx_debug("| Module ID | Module | Head Idx | Tail Idx |"); + direct_buf_rx_debug("--------------------------------------------------"); + for (mod_idx = 0; mod_idx < num_modules; mod_idx++) { + for (srng_id = 0; srng_id < DBR_SRNG_NUM; srng_id++) { + mod_param = + &dbr_pdev_obj->dbr_mod_param[mod_idx][srng_id]; + dbr_ring_cfg = mod_param->dbr_ring_cfg; + srng = dbr_ring_cfg->srng; + hal_get_sw_hptp(hal_soc, srng, &tp, &hp); + direct_buf_rx_debug("|%11d|%14s|%10x|%10x|", + mod_idx, g_dbr_module_name[mod_idx]. + module_name_str, + hp, tp); + } + } + direct_buf_rx_debug("--------------------------------------------------"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_direct_buf_rx_get_ring_params(struct wlan_objmgr_pdev *pdev, + struct module_ring_params *param, + uint8_t mod_id, uint8_t srng_id) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_module_param *dbr_mod_param; + + if (!pdev) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj + (pdev, WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + if ((mod_id >= DBR_MODULE_MAX) || (srng_id >= DBR_SRNG_NUM)) { + direct_buf_rx_err("invalid params, mod id %d, srng id %d", + mod_id, srng_id); + return QDF_STATUS_E_INVAL; + } + + dbr_mod_param = &dbr_pdev_obj->dbr_mod_param[mod_id][srng_id]; + param->num_bufs = dbr_mod_param->dbr_ring_cfg->num_ptr; + param->buf_size = dbr_mod_param->dbr_ring_cfg->buf_size; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.h b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.h new file mode 100644 index 0000000000000000000000000000000000000000..a06375f9cc40fb9dbdca0aca2e5f13c686d04c82 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.h @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_DIRECT_BUF_RX_MAIN_H_ +#define _TARGET_IF_DIRECT_BUF_RX_MAIN_H_ + +#include "qdf_types.h" +#include "qdf_status.h" +#include + +struct wlan_objmgr_psoc; +struct wlan_lmac_if_tx_ops; +struct direct_buf_rx_data; + +#define DBR_RING_BASE_ALIGN 8 + +#ifdef DBR_MULTI_SRNG_ENABLE +#define DBR_SRNG_NUM 2 +#define dbr_get_pdev_id(srng_id, pdev_id) (srng_id) +#else +#define DBR_SRNG_NUM 1 +#define dbr_get_pdev_id(srng_id, pdev_id) (pdev_id) +#endif + +/** + * struct direct_buf_rx_info - direct buffer rx operation info struct + * @cookie: SW cookie used to get the virtual address + * @paddr: Physical address pointer for DMA operation + * @vaddr: Virtual address pointer + * @offset: Offset of aligned address from unaligned + */ +struct direct_buf_rx_buf_info { + uint32_t cookie; + qdf_dma_addr_t paddr; + void *vaddr; + uint8_t offset; +}; + +/** + * struct direct_buf_rx_ring_cfg - DMA ring config parameters + * @num_ptr: Depth or the number of physical address pointers in the ring + * @ring_alloc_size: Size of the HAL ring + * @base_paddr_unaligned: base physical addr unaligned + * @base_vaddr_unaligned: base virtual addr unaligned + * @base_paddr_aligned: base physical addr aligned + * @base_vaddr_aligned: base virtual addr unaligned + * @head_idx_addr: head index addr + * @tail_idx_addr: tail index addr + * @srng: HAL srng context + */ +struct direct_buf_rx_ring_cfg { + uint32_t num_ptr; + uint32_t ring_alloc_size; + qdf_dma_addr_t base_paddr_unaligned; + void *base_vaddr_unaligned; + qdf_dma_addr_t base_paddr_aligned; + void *base_vaddr_aligned; + qdf_dma_addr_t head_idx_addr; + qdf_dma_addr_t tail_idx_addr; + void *srng; + uint32_t buf_size; +}; + +/** + * struct direct_buf_rx_ring_cap - DMA ring capabilities + * @ring_elems_min: Minimum number of pointers in the ring + * @min_buf_size: Minimum size of each buffer entry in the ring + * @min_buf_align: Minimum alignment of the addresses in the ring + */ +struct direct_buf_rx_ring_cap { + uint32_t ring_elems_min; + uint32_t min_buf_size; + uint32_t min_buf_align; +}; + +/** + * enum DBR_RING_DEBUG_EVENT - DMA ring debug event + * @DBR_RING_DEBUG_EVENT_NONE: Not a real value, just a place holder for + * no entry + * @DBR_RING_DEBUG_EVENT_RX: DBR Rx event + * @DBR_RING_DEBUG_EVENT_REPLENISH_RING: DBR replenish event + * @DBR_RING_DEBUG_EVENT_MAX: Not a real value, just a place holder for max + */ +enum DBR_RING_DEBUG_EVENT { + DBR_RING_DEBUG_EVENT_NONE = 0, + DBR_RING_DEBUG_EVENT_RX, + DBR_RING_DEBUG_EVENT_REPLENISH_RING, + DBR_RING_DEBUG_EVENT_MAX, +}; + +#define DIRECT_BUF_RX_MAX_RING_DEBUG_ENTRIES (1024) +/** + * struct direct_buf_rx_ring_debug_entry - DBR ring debug entry + * @head_idx: Head index of the DMA ring + * @tail_idx: Tail index of the DMA ring + * @timestamp: Timestamp at the time of logging + * @event: Name of the event + */ +struct direct_buf_rx_ring_debug_entry { + uint32_t head_idx; + uint32_t tail_idx; + uint64_t timestamp; + enum DBR_RING_DEBUG_EVENT event; +}; + +#ifdef WLAN_DEBUGFS +/** + * struct dbr_debugfs_priv - Private data for DBR ring debugfs + * @dbr_pdev_obj: Pointer to the pdev obj of Direct buffer rx module + * @mod_id: Pointer to the registered module ID + * @srng_id: srng ID + */ +struct dbr_debugfs_priv { + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + enum DBR_MODULE mod_id; + uint8_t srng_id; +}; +#endif + +/** + * struct direct_buf_rx_ring_debug - DMA ring debug of a module + * @entries: Pointer to the array of ring debug entries + * @ring_debug_idx: Current index in the array of ring debug entries + * @num_ring_debug_entries: Total ring debug entries + * @debugfs_entry: Debugfs entry for this ring + * @debugfs_priv: Debugfs ops for this ring + */ +struct direct_buf_rx_ring_debug { + struct direct_buf_rx_ring_debug_entry *entries; + uint32_t ring_debug_idx; + uint32_t num_ring_debug_entries; +#ifdef WLAN_DEBUGFS + qdf_dentry_t debugfs_entry; + struct qdf_debugfs_fops *debugfs_fops; +#endif +}; + +/** + * struct direct_buf_rx_module_debug - Debug of a module subscribed to DBR + * @dbr_ring_debug: Array of ring debug structers corresponding to each srng + * @poisoning_enabled: Whether buffer poisoning is enabled for this module + * @poison_value: Value with which buffers should be poisoned + * @debugfs_entry: Debugfs entry for this module + */ +struct direct_buf_rx_module_debug { + struct direct_buf_rx_ring_debug dbr_ring_debug[DBR_SRNG_NUM]; + bool poisoning_enabled; + uint32_t poison_value; +#ifdef WLAN_DEBUGFS + qdf_dentry_t debugfs_entry; +#endif +}; + +/** + * struct direct_buf_rx_module_param - DMA module param + * @mod_id: Module ID + * @pdev_id: pdev ID + * @dbr_config: Pointer to dirct buf rx module configuration struct + * @dbr_ring_cap: Pointer to direct buf rx ring capabilities struct + * @dbr_ring_cfg: Pointer to direct buf rx ring config struct + * @dbr_buf_pool: Pointer to direct buf rx buffer pool struct + * @dbr_rsp_handler: Pointer to direct buf rx response handler for the module + */ +struct direct_buf_rx_module_param { + enum DBR_MODULE mod_id; + uint8_t pdev_id; + uint8_t srng_id; + struct dbr_module_config dbr_config; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_buf_info *dbr_buf_pool; + bool (*dbr_rsp_handler)(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data); +}; + +/** + * struct direct_buf_rx_pdev_obj - Direct Buf RX pdev object struct + * @num_modules: Number of modules registered to DBR for the pdev + * @dbr_mod_param: Pointer to direct buf rx module param struct + * @dbr_mod_debug: Pointer to the array of DBR module debug structures + * @debugfs_entry: DBR debugfs entry of this radio + */ +struct direct_buf_rx_pdev_obj { + uint32_t num_modules; + struct direct_buf_rx_module_param (*dbr_mod_param)[DBR_SRNG_NUM]; +#ifdef DIRECT_BUF_RX_DEBUG + struct direct_buf_rx_module_debug *dbr_mod_debug; +#ifdef WLAN_DEBUGFS + qdf_dentry_t debugfs_entry; +#endif +#endif +}; + +/** + * struct direct_buf_rx_psoc_obj - Direct Buf RX psoc object struct + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF os device handle + * @dbr_pdev_objs: array of DBR pdev objects + */ +struct direct_buf_rx_psoc_obj { + void *hal_soc; + qdf_device_t osdev; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj[WLAN_UMAC_MAX_PDEVS]; +}; + +/** + * struct module_ring_params - Direct buf ring params for module + * @num_bufs: Number of buffers alloted to this module + * @buf_size: size of buffers + */ +struct module_ring_params { + uint32_t num_bufs; + uint32_t buf_size; +}; + +/** + * target_if_direct_buf_rx_register_events() - Register WMI events to direct + * buffer rx module + * @psoc: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_register_events( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_direct_buf_rx_unregister_events() - Unregister WMI events to direct + * buffer rx module + * @psoc: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_unregister_events( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_direct_buf_rx_print_ring_stat() - Print ring status for each + * module in the pdev + * @pdev: pointer to pdev object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_print_ring_stat( + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_direct_buf_rx_pdev_create_handler() - Handler to be invoked for + * direct buffer rx module + * during pdev object create + * @pdev: pointer to pdev object + * @data: pointer to data + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_pdev_create_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * target_if_direct_buf_rx_pdev_destroy_handler() - Handler to be invoked for + * direct buffer rx module + * during pdev object destroy + * @pdev: pointer to pdev object + * @data: pointer to data + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_pdev_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * target_if_direct_buf_rx_psoc_create_handler() - Handler invoked for + * direct buffer rx module + * during attach + * @pdev: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_psoc_create_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * target_if_direct_buf_rx_psoc_destroy_handler() - Handler invoked for + * direct buffer rx module + * during detach + * @pdev: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_psoc_destroy_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * target_if_deinit_dbr_ring() - Function to deinitialize buffers and ring + * allocated for direct buffer rx module + * @pdev: pointer to pdev object + * @dbr_pdev_obj: pointer to direct buffer rx module pdev obj + * @mod_id: module id indicating the module using direct buffer rx framework + * @srng_id: srng ID + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_deinit_dbr_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id, uint8_t srng_id); +/** + * target_if_direct_buf_rx_module_register() - Function to register to direct + * buffer rx module + * @pdev: pointer to pdev object + * @mod_id: module id indicating the module using direct buffer rx framework + * @dbr_config: dbr module configuration params + * @dbr_rsp_handler: function pointer pointing to the response handler to be + * invoked for the module registering to direct buffer rx + * module + * + * Return: QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_module_register( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + struct dbr_module_config *dbr_config, + bool (*dbr_rsp_handler) + (struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data)); + +/** + * target_if_direct_buf_rx_module_unregister() - Function to unregister to + * direct buffer rx module + * @pdev: pointer to pdev object + * @mod_id: module id indicating the module using direct buffer rx framework + * + * Return: QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_module_unregister( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id); + +/** + * target_if_direct_buf_rx_get_ring_params() - Function to get ring parameters + * for module_id + * @pdev: pointer to pdev object + * @module_ring_params: pointer to store ring params + * @mod_id: module idindicating module using direct buffer rx framework + * @srng_id: srng ID + */ +QDF_STATUS +target_if_direct_buf_rx_get_ring_params(struct wlan_objmgr_pdev *pdev, + struct module_ring_params *param, + uint8_t mod_id, uint8_t srng_id); + +/** + * target_if_dbr_start_ring_debug() - Start DBR ring debug + * @pdev: pointer to pdev object + * @mod_id: module ID indicating the module using direct buffer rx framework + * @num_ring_debug_entries: Size of the ring debug entries + */ +QDF_STATUS target_if_dbr_start_ring_debug(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, + uint32_t num_ring_debug_entries); + +/** + * target_if_dbr_stop_ring_debug() - Stop DBR ring debug + * @pdev: pointer to pdev object + * @mod_id: module ID indicating the module using direct buffer rx framework + */ +QDF_STATUS target_if_dbr_stop_ring_debug(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id); + +/** + * target_if_dbr_start_buffer_poisoning() - Start DBR buffer poisoning + * @pdev: pointer to pdev object + * @mod_id: module ID indicating the module using direct buffer rx framework + * @value: Value with which buffers should be poisoned + * + * Only those buffers which are going to be mapped to the device after this + * API call are guaranteed to be poisoned. If user wants all the buffers in + * the ring to be poisoned from their creation time then this API should be + * called before module's registration to the DBR. + * + */ +QDF_STATUS target_if_dbr_start_buffer_poisoning(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id, uint32_t value); + +/** + * target_if_dbr_stop_buffer_poisoning() - Stop DBR buffer poisoning + * @pdev: pointer to pdev object + * @mod_id: module ID indicating the module using direct buffer rx framework + */ +QDF_STATUS target_if_dbr_stop_buffer_poisoning(struct wlan_objmgr_pdev *pdev, + uint8_t mod_id); +#endif /* _TARGET_IF_DIRECT_BUF_RX_MAIN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dispatcher/inc/target_if_pub.h b/drivers/staging/qca-wifi-host-cmn/target_if/dispatcher/inc/target_if_pub.h new file mode 100644 index 0000000000000000000000000000000000000000..84a24b7c264dcb4d51f2116e59d3ef8143cbb7d8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dispatcher/inc/target_if_pub.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the structure definitions of target interface + * used for the abstraction across various layers. + */ + +#ifndef _TARGET_IF_PUB_H_ +#define _TARGET_IF_PUB_H_ + +/** + * typedef target_pdev_info_t - Opaque definition of target pdev + * information structure + */ +struct target_pdev_info; +typedef struct target_pdev_info target_pdev_info_t; + +/** + * typedef target_psoc_info_t - Opaque definition of target psoc + * information structure + */ +struct target_psoc_info; +typedef struct target_psoc_info target_psoc_info_t; +#endif /* _TARGET_IF_PUB_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dp/inc/target_if_dp.h b/drivers/staging/qca-wifi-host-cmn/target_if/dp/inc/target_if_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..dcb6327b425b251375686edf030b12f23e4a2223 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dp/inc/target_if_dp.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This target interface shall be used by DP + * to communicate with target using WMI. + */ + +#ifndef _WLAN_TARGET_IF_DP_H_ +#define _WLAN_TARGET_IF_DP_H_ + +#include +#include +#include +#include +#include +#include + +/** + * struct reorder_q_setup - reorder queue setup params + * @psoc: psoc + * @vdev_id: vdev id + * @pdev_id: pdev id + * @peer_macaddr: peer mac address + * @hw_qdesc: hw queue descriptor + * @tid: tid number + * @queue_no: queue number + * @ba_window_size_valid: BA window size validity flag + * @ba_window_size: BA window size + */ +struct reorder_q_setup { + struct cdp_ctrl_objmgr_psoc *psoc; + uint8_t vdev_id; + uint8_t pdev_id; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + qdf_dma_addr_t hw_qdesc_paddr; + uint8_t tid; + uint16_t queue_no; + uint8_t ba_window_size_valid; + uint16_t ba_window_size; +}; + +/** + * target_if_peer_set_default_routing() - set peer default routing + * @psoc: psoc pointer + * @pdev_id: pdev id + * @peer_macaddr: peer mac address + * @vdev_id: vdev id + * @hash_based: hash based routing + * @ring_num: ring number + * + * return: void + */ +void +target_if_peer_set_default_routing(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t *peer_macaddr, uint8_t vdev_id, + bool hash_based, uint8_t ring_num); +/** + * target_if_peer_rx_reorder_queue_setup() - setup rx reorder queue + * @pdev: pdev pointer + * @pdev_id: pdev id + * @vdev_id: vdev id + * @peer_macaddr: peer mac address + * @hw_qdesc: hw queue descriptor + * @tid: tid number + * @queue_no: queue number + * @ba_window_size_valid: BA window size validity flag + * @ba_window_size: BA window size + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +target_if_peer_rx_reorder_queue_setup(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_macaddr, + qdf_dma_addr_t hw_qdesc, int tid, + uint16_t queue_no, + uint8_t ba_window_size_valid, + uint16_t ba_window_size); + +/** + * target_if_peer_rx_reorder_queue_remove() - remove rx reorder queue + * @psoc: psoc pointer + * @pdev_id: pdev id + * @vdev_id: vdev id + * @peer_macaddr: peer mac address + * @peer_tid_bitmap: peer tid bitmap + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +target_if_peer_rx_reorder_queue_remove(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_macaddr, + uint32_t peer_tid_bitmap); + +/** + * target_if_lro_hash_config() - send LRO hash config to FW + * @psoc_handle: psoc handle pointer + * @lro_hash_cfg: LRO hash config parameters + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +target_if_lro_hash_config(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id, + struct cdp_lro_hash_config *lro_hash_cfg); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dp/src/target_if_dp.c b/drivers/staging/qca-wifi-host-cmn/target_if/dp/src/target_if_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..e45352f887ec4b0e3274cfadebbccc303a6b4fc7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dp/src/target_if_dp.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines DP interaction with FW using WMI + */ + +#include +#include "target_if_dp.h" +#include + +void +target_if_peer_set_default_routing(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, uint8_t *peer_macaddr, + uint8_t vdev_id, + bool hash_based, uint8_t ring_num) +{ + uint32_t value; + struct peer_set_params param; + struct wmi_unified *pdev_wmi_handle; + struct wlan_objmgr_pdev *pdev = + wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc, + pdev_id, WLAN_PDEV_TARGET_IF_ID); + + if (!pdev) { + target_if_err("pdev with id %d is NULL", pdev_id); + return; + } + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + target_if_err("pdev wmi handle NULL"); + return; + } + + qdf_mem_zero(¶m, sizeof(param)); + + /* TODO: Need bit definitions for ring number and hash based routing + * fields in common wmi header file + */ + value = ((hash_based) ? 1 : 0) | (ring_num << 1); + + param.param_id = WMI_HOST_PEER_SET_DEFAULT_ROUTING; + param.vdev_id = vdev_id; + param.param_value = value; + + if (wmi_set_peer_param_send(pdev_wmi_handle, peer_macaddr, ¶m)) { + target_if_err("Unable to set default routing for peer " + QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(peer_macaddr)); + } + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); +} + +#ifdef SERIALIZE_QUEUE_SETUP +static QDF_STATUS +target_if_rx_reorder_queue_setup(struct scheduler_msg *msg) +{ + struct rx_reorder_queue_setup_params param; + struct wmi_unified *pdev_wmi_handle; + struct reorder_q_setup *q_params; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + + if (!(msg->bodyptr)) { + target_if_err("rx_reorder: Invalid message body"); + return QDF_STATUS_E_INVAL; + } + + q_params = msg->bodyptr; + psoc = (struct wlan_objmgr_psoc *)q_params->psoc; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, q_params->pdev_id, + WLAN_PDEV_TARGET_IF_ID); + + if (!pdev) { + target_if_err("pdev with id %d is NULL", q_params->pdev_id); + return QDF_STATUS_E_INVAL; + } + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + target_if_err("pdev wmi handle NULL"); + status = QDF_STATUS_E_FAILURE; + goto out; + } + + param.tid = q_params->tid; + param.vdev_id = q_params->vdev_id; + param.peer_macaddr = q_params->peer_mac; + param.hw_qdesc_paddr_lo = q_params->hw_qdesc_paddr & 0xffffffff; + param.hw_qdesc_paddr_hi = (uint64_t)q_params->hw_qdesc_paddr >> 32; + param.queue_no = q_params->queue_no; + param.ba_window_size_valid = q_params->ba_window_size_valid; + param.ba_window_size = q_params->ba_window_size; + + status = wmi_unified_peer_rx_reorder_queue_setup_send(pdev_wmi_handle, + ¶m); +out: + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + qdf_mem_free(q_params); + + return status; +} + +QDF_STATUS +target_if_peer_rx_reorder_queue_setup(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_macaddr, + qdf_dma_addr_t hw_qdesc, int tid, + uint16_t queue_no, + uint8_t ba_window_size_valid, + uint16_t ba_window_size) +{ + struct scheduler_msg msg = {0}; + struct reorder_q_setup *q_params; + QDF_STATUS status; + + q_params = qdf_mem_malloc(sizeof(*q_params)); + if (!q_params) + return QDF_STATUS_E_NOMEM; + + q_params->psoc = psoc; + q_params->vdev_id = vdev_id; + q_params->pdev_id = pdev_id; + q_params->hw_qdesc_paddr = hw_qdesc; + q_params->tid = tid; + q_params->queue_no = queue_no; + q_params->ba_window_size_valid = ba_window_size_valid; + q_params->ba_window_size = ba_window_size; + qdf_mem_copy(q_params->peer_mac, peer_macaddr, QDF_MAC_ADDR_SIZE); + + msg.bodyptr = q_params; + msg.callback = target_if_rx_reorder_queue_setup; + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + + if (status != QDF_STATUS_SUCCESS) + qdf_mem_free(q_params); + + return status; +} + +#else + +QDF_STATUS +target_if_peer_rx_reorder_queue_setup(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_macaddr, + qdf_dma_addr_t hw_qdesc, int tid, + uint16_t queue_no, + uint8_t ba_window_size_valid, + uint16_t ba_window_size) +{ + struct rx_reorder_queue_setup_params param; + struct wmi_unified *pdev_wmi_handle; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev = + wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc, + pdev_id, WLAN_PDEV_TARGET_IF_ID); + + if (!pdev) { + target_if_err("pdev with id %d is NULL", pdev_id); + return QDF_STATUS_E_INVAL; + } + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + target_if_err("pdev wmi handle NULL"); + return QDF_STATUS_E_FAILURE; + } + param.tid = tid; + param.vdev_id = vdev_id; + param.peer_macaddr = peer_macaddr; + param.hw_qdesc_paddr_lo = hw_qdesc & 0xffffffff; + param.hw_qdesc_paddr_hi = (uint64_t)hw_qdesc >> 32; + param.queue_no = queue_no; + param.ba_window_size_valid = ba_window_size_valid; + param.ba_window_size = ba_window_size; + + status = wmi_unified_peer_rx_reorder_queue_setup_send(pdev_wmi_handle, + ¶m); + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + + return status; +} +#endif + +QDF_STATUS +target_if_peer_rx_reorder_queue_remove(struct cdp_ctrl_objmgr_psoc *psoc, + uint8_t pdev_id, + uint8_t vdev_id, uint8_t *peer_macaddr, + uint32_t peer_tid_bitmap) +{ + struct rx_reorder_queue_remove_params param; + struct wmi_unified *pdev_wmi_handle; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev = + wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc, + pdev_id, WLAN_PDEV_TARGET_IF_ID); + + if (!pdev) { + target_if_err("pdev with id %d is NULL", pdev_id); + return QDF_STATUS_E_INVAL; + } + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!pdev_wmi_handle) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + target_if_err("pdev wmi handle NULL"); + return QDF_STATUS_E_FAILURE; + } + param.vdev_id = vdev_id; + param.peer_macaddr = peer_macaddr; + param.peer_tid_bitmap = peer_tid_bitmap; + status = wmi_unified_peer_rx_reorder_queue_remove_send(pdev_wmi_handle, + ¶m); + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + + return status; +} + +QDF_STATUS +target_if_lro_hash_config(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id, + struct cdp_lro_hash_config *lro_hash_cfg) +{ + struct wmi_lro_config_cmd_t wmi_lro_cmd = {0}; + struct wmi_unified *pdev_wmi_handle; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev = + wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc, + pdev_id, WLAN_PDEV_TARGET_IF_ID); + + if (!pdev) { + target_if_err("pdev with id %d is NULL", pdev_id); + return QDF_STATUS_E_INVAL; + } + + pdev_wmi_handle = lmac_get_pdev_wmi_handle(pdev); + if (!lro_hash_cfg || !pdev_wmi_handle) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + target_if_err("wmi_handle: 0x%pK, lro_hash_cfg: 0x%pK", + pdev_wmi_handle, lro_hash_cfg); + return QDF_STATUS_E_FAILURE; + } + + wmi_lro_cmd.lro_enable = lro_hash_cfg->lro_enable; + wmi_lro_cmd.tcp_flag = lro_hash_cfg->tcp_flag; + wmi_lro_cmd.tcp_flag_mask = lro_hash_cfg->tcp_flag_mask; + wmi_lro_cmd.pdev_id = pdev_id; + + qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv4, + lro_hash_cfg->toeplitz_hash_ipv4, + LRO_IPV4_SEED_ARR_SZ * sizeof(uint32_t)); + + qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv6, + lro_hash_cfg->toeplitz_hash_ipv6, + LRO_IPV6_SEED_ARR_SZ * sizeof(uint32_t)); + + status = wmi_unified_lro_config_cmd(pdev_wmi_handle, + &wmi_lro_cmd); + wlan_objmgr_pdev_release_ref(pdev, WLAN_PDEV_TARGET_IF_ID); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/ftm/inc/target_if_ftm.h b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/inc/target_if_ftm.h new file mode 100644 index 0000000000000000000000000000000000000000..97ca6f5f84a58e55cc12196b1f1b4fdf4a8f7cd4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/inc/target_if_ftm.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _TARGET_IF_FTM_H_ +#define _TARGET_IF_FTM_H_ + +#include +#include +#include + +/** + * target_if_ftm_register_tx_ops() - register ftm tx ops + * @tx_ops: tx ops pointer + * + * Register ftm tx ops + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_ftm_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_ftm_attach() - Register FW event handler + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_ftm_attach(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_ftm_detach() - De-Register FW event handler + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_ftm_detach(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_ftm_cmd_send() - Send WMI command for FTM requests + * @pdev: pdev pointer + * buf: data to be sent to FW + * len: length of the data + * pdev_id: pdev id + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, + uint8_t *buf, uint32_t len, uint8_t pdev_id); +#endif /* _TARGET_IF_FTM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/ftm/src/target_if_ftm.c b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/src/target_if_ftm.c new file mode 100644 index 0000000000000000000000000000000000000000..0e4e76563233230cdc3564b71c8821b304e237b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/src/target_if_ftm.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for FTM + */ + +#include +#include +#include +#include +#include +#include +#include + +static inline struct wlan_lmac_if_ftm_rx_ops * +target_if_ftm_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.ftm_rx_ops; +} + +static int +target_if_ftm_process_utf_event(ol_scn_t sc, uint8_t *event_buf, uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wmi_host_pdev_utf_event event; + struct wlan_lmac_if_ftm_rx_ops *ftm_rx_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t pdev_id; + struct wmi_unified *wmi_handle; + + psoc = target_if_get_psoc_from_scn_hdl(sc); + if (!psoc) { + ftm_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_FTM_ID); + if (QDF_IS_STATUS_ERROR(status)) { + ftm_err("unable to get psoc reference"); + return QDF_STATUS_E_INVAL; + } + + event.datalen = len; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + ftm_err("Invalid WMI handle"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + return QDF_STATUS_E_INVAL; + } + + if (wmi_extract_pdev_utf_event(wmi_handle, event_buf, &event) + != QDF_STATUS_SUCCESS) { + ftm_err("Extracting utf event failed"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + return QDF_STATUS_E_INVAL; + } + + pdev_id = event.pdev_id; + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, WLAN_FTM_ID); + if (!pdev) { + pdev_id = TGT_WMI_PDEV_ID_SOC; + ftm_debug("Can't find pdev by pdev_id %d, try soc_id", + event.pdev_id); + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, WLAN_FTM_ID); + if (!pdev) { + ftm_err("null pdev"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + return QDF_STATUS_E_INVAL; + } + } + + ftm_rx_ops = target_if_ftm_get_rx_ops(psoc); + + if (ftm_rx_ops->ftm_ev_handler) { + status = ftm_rx_ops->ftm_ev_handler(pdev, + event.data, event.datalen); + if (QDF_IS_STATUS_ERROR(status)) + status = QDF_STATUS_E_INVAL; + } else { + status = QDF_STATUS_E_INVAL; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_FTM_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + + return status; +} + +QDF_STATUS target_if_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, + uint8_t *buf, uint32_t len, + uint8_t pdev_id) +{ + QDF_STATUS ret; + wmi_unified_t handle; + struct pdev_utf_params param; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!handle) { + target_if_err("null handle"); + return QDF_STATUS_E_FAILURE; + } + param.utf_payload = buf; + param.len = len; + + ret = wmi_unified_pdev_utf_cmd_send(handle, ¶m, pdev_id); + if (QDF_IS_STATUS_ERROR(ret)) + ftm_err("wmi utf cmd send failed, ret: %d", ret); + + return ret; +} + +QDF_STATUS target_if_ftm_attach(struct wlan_objmgr_psoc *psoc) +{ + int ret; + wmi_unified_t handle; + + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!handle) { + target_if_err("null handle"); + return QDF_STATUS_E_FAILURE; + } + ret = wmi_unified_register_event_handler(handle, + wmi_pdev_utf_event_id, + target_if_ftm_process_utf_event, + WMI_RX_UMAC_CTX); + if (ret) { + ftm_err("wmi event registration failed, ret: %d", ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_ftm_detach(struct wlan_objmgr_psoc *psoc) + +{ + int ret; + wmi_unified_t handle; + + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!handle) { + target_if_err("null handle"); + return QDF_STATUS_E_FAILURE; + } + ret = wmi_unified_unregister_event_handler(handle, + wmi_pdev_utf_event_id); + + if (ret) { + ftm_err("wmi event deregistration failed, ret: %d", ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_ftm_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + + if (!tx_ops) { + ftm_err("invalid tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + ftm_tx_ops = &tx_ops->ftm_tx_ops; + ftm_tx_ops->ftm_attach = target_if_ftm_attach; + ftm_tx_ops->ftm_detach = target_if_ftm_detach; + ftm_tx_ops->ftm_cmd_send = target_if_ftm_cmd_send; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/gpio/target_if_gpio.c b/drivers/staging/qca-wifi-host-cmn/target_if/gpio/target_if_gpio.c new file mode 100644 index 0000000000000000000000000000000000000000..0b2cb5050645c2f8211bdfe2342626335b3d8578 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/gpio/target_if_gpio.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_gpio.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ + +#include +#include +#include +#include +#include + +/** + * target_if_set_gpio_config() - API to send gpio config request to wmi + * @psoc: pointer to psoc object + * @param: pointer to gpio info + * + * Return: status of operation. + */ +static QDF_STATUS +target_if_set_gpio_config(struct wlan_objmgr_psoc *psoc, + struct gpio_config_params *param) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + return wmi_unified_gpio_config_cmd_send(wmi_handle, param); +} + +/** + * target_if_set_gpio_output() - API to send gpio output request to wmi + * @psoc: pointer to psoc object + * @param: pointer to gpio info + * + * Return: status of operation. + */ +static QDF_STATUS +target_if_set_gpio_output(struct wlan_objmgr_psoc *psoc, + struct gpio_output_params *param) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + return wmi_unified_gpio_output_cmd_send(wmi_handle, param); +} + +QDF_STATUS +target_if_gpio_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_gpio_tx_ops *gpio_ops; + + if (!tx_ops) { + target_if_err("tx ops is NULL!"); + return QDF_STATUS_E_INVAL; + } + gpio_ops = &tx_ops->gpio_ops; + + gpio_ops->set_gpio_config = target_if_set_gpio_config; + gpio_ops->set_gpio_output = target_if_set_gpio_output; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/gpio/target_if_gpio.h b/drivers/staging/qca-wifi-host-cmn/target_if/gpio/target_if_gpio.h new file mode 100644 index 0000000000000000000000000000000000000000..f0993511c645959beb6a3ca974b655295643ad28 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/gpio/target_if_gpio.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for gpio cfg + */ +#ifndef __TARGET_IF_GPIO_CFG_H__ +#define __TARGET_IF_GPIO_CFG_H__ + +#ifdef WLAN_FEATURE_GPIO_CFG +#include +struct wlan_lmac_if_tx_ops; + +/** + * target_if_gpio_register_tx_ops() - register tx ops funcs + * @tx_ops: pointer to gpio tx ops + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_gpio_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +#endif /* WLAN_FEATURE_GPIO_CFG */ +#endif /* __TARGET_IF_GPIO_CFG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/inc/target_if_green_ap.h b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/inc/target_if_green_ap.h new file mode 100644 index 0000000000000000000000000000000000000000..824a66be415d2a93c0e2a8a3f374e98adc9ca7ef --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/inc/target_if_green_ap.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for green ap + */ +#ifndef __TARGET_IF_GREEN_AP_H__ +#define __TARGET_IF_GREEN_AP_H__ + +#include +#include +#include +#include + +struct wlan_green_ap_egap_params; + +/** + * target_if_register_green_ap_tx_ops() - lmac handler to register + * green ap tx_ops callback functions + * @tx_ops: wlan_lmac_if_tx_ops object + * + * Return: QDF_STATUS in case of success + */ +QDF_STATUS target_if_register_green_ap_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_green_ap_register_egap_event_handler() - registers enhanced + * green ap event handler + * @pdev: objmgr pdev + * + * Return: QDF_STATUS in case of success + */ +QDF_STATUS target_if_green_ap_register_egap_event_handler( + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_green_ap_enable_egap() - enable enhanced green ap + * @pdev: pdev pointer + * @egap_params: enhanced green ap params + * + * @Return: QDF_STATUS_SUCCESS in case of success + */ +QDF_STATUS target_if_green_ap_enable_egap( + struct wlan_objmgr_pdev *pdev, + struct wlan_green_ap_egap_params *egap_params); + +/** + * target_if_green_ap_set_ps_on_off() - Green AP PS toggle + * @pdev: pdev pointer + * @value: Value to send PS on/off to FW + * @pdev_id: pdev id + * + * @Return: QDF_STATUS_SUCCESS in case of success + */ +QDF_STATUS target_if_green_ap_set_ps_on_off(struct wlan_objmgr_pdev *pdev, + bool value, uint8_t pdev_id); + +/** + * target_if_green_ap_get_current_channel() - Get current channel + * @pdev: pdev pointer + * + * @Return: current channel freq + */ +uint16_t target_if_green_ap_get_current_channel(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_green_ap_get_current_channel_flags() - Get current channel flags + * @pdev: pdev pointer + * + * @Return: current channel flags + */ +uint64_t target_if_green_ap_get_current_channel_flags( + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_green_ap_reset_dev() - Reset dev + * @pdev: pdev pointer + * + * @Return: QDF_STATUS_SUCCESS if device resetted + */ +QDF_STATUS target_if_green_ap_reset_dev(struct wlan_objmgr_pdev *pdev); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/src/target_if_green_ap.c b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/src/target_if_green_ap.c new file mode 100644 index 0000000000000000000000000000000000000000..56b70dbceeef4c20fa9dc70ae49782d5c03ec97d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/src/target_if_green_ap.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for Green ap + */ + +#include +#include +#include <../../core/src/wlan_green_ap_main_i.h> +#include +#include + +QDF_STATUS target_if_register_green_ap_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + + if (!tx_ops) { + target_if_err("invalid tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_tx_ops = &tx_ops->green_ap_tx_ops; + + green_ap_tx_ops->enable_egap = target_if_green_ap_enable_egap; + green_ap_tx_ops->ps_on_off_send = target_if_green_ap_set_ps_on_off; + green_ap_tx_ops->reset_dev = NULL; + green_ap_tx_ops->get_current_channel = NULL; + green_ap_tx_ops->get_current_channel_flags = NULL; + green_ap_tx_ops->get_capab = NULL; + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_green_ap_egap_status_info_event() - egap status info event + * @scn: pointer to scn handle + * @evt_buf: pointer to event buffer + * @data_len: data len of the event buffer + * + * Return: 0 for success, otherwise appropriate error code + */ +static int target_if_green_ap_egap_status_info_event( + ol_scn_t scn, uint8_t *evt_buf, uint32_t data_len) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_green_ap_egap_status_info egap_status_info_params; + void *wmi_hdl; + + pdev = target_if_get_pdev_from_scn_hdl(scn); + if (!pdev) { + green_ap_err("pdev is null"); + return QDF_STATUS_E_FAILURE; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + if (wmi_extract_green_ap_egap_status_info(wmi_hdl, + evt_buf, + &egap_status_info_params) != + QDF_STATUS_SUCCESS) { + green_ap_err("unable to extract green ap egap status info"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("mac_id: %d, status: %d, tx_mask: %x, rx_mask: %d", + egap_status_info_params.mac_id, + egap_status_info_params.status, + egap_status_info_params.tx_chainmask, + egap_status_info_params.rx_chainmask); + + return 0; +} + +QDF_STATUS target_if_green_ap_register_egap_event_handler( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_green_ap_egap_params *egap_params; + int ret; + void *wmi_hdl; + + if (!pdev) { + green_ap_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + egap_params = &green_ap_ctx->egap_params; + + ret = wmi_unified_register_event_handler( + wmi_hdl, + wmi_ap_ps_egap_info_event_id, + target_if_green_ap_egap_status_info_event, + WMI_RX_UMAC_CTX); + if (ret < 0) { + green_ap_err("Failed to register Enhance Green AP event"); + egap_params->fw_egap_support = false; + } else { + green_ap_info("Set the Enhance Green AP event handler"); + egap_params->fw_egap_support = true; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_green_ap_enable_egap( + struct wlan_objmgr_pdev *pdev, + struct wlan_green_ap_egap_params *egap_params) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + wmi_unified_t wmi_hdl; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (!wlan_is_egap_enabled(green_ap_ctx)) { + green_ap_info("enhanced green ap support is not present"); + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wmi_unified_egap_conf_params_cmd(wmi_hdl, + egap_params); +} + +QDF_STATUS target_if_green_ap_set_ps_on_off(struct wlan_objmgr_pdev *pdev, + bool value, uint8_t pdev_id) +{ + wmi_unified_t wmi_hdl; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + return wmi_unified_green_ap_ps_send(wmi_hdl, + value, pdev_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_cmd_api.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_cmd_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a7c9adf1efc640d8c6e40f80a625335db6bbb998 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_cmd_api.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_cmd_api.h + * + * Public APIs to prepare and send init command + */ + +#ifndef _INIT_DEINIT_INIT_CMD_H_ +#define _INIT_DEINIT_INIT_CMD_H_ + +/* max size if 256k */ +#define HOST_MEM_CHUNK_MAX_SIZE (256 * 1024) +#define HOST_MEM_CHUNK_MAX_SIZE_POWER2 (8 + 10) +#define TXBF_CV_POOL0 2 +#define TXBF_CV_POOL1 3 +#define TXBF_CV_POOL2 4 +#define HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED 0x8 + +/** + * init_deinit_handle_host_mem_req() - handle host memory request + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * @event: Event buffer from FW + * + * API to handle memory request from FW and allocate memory chunks + * + * Return: SUCCESS on successful memory allocation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS init_deinit_handle_host_mem_req( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + +/** + * init_deinit_free_num_units() - Free allocated mem chunks + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * + * API to free memory + * + * Return: SUCCESS on successful memory free + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS init_deinit_free_num_units(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl); + +/** + * init_deinit_derive_band_to_mac_param() - Derive band to mac param + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * @band_to_mac: BAND_TO_MAC object + * + * API to derive band to mac param + * + * Return: void + */ +void init_deinit_derive_band_to_mac_param( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, + struct wmi_host_pdev_band_to_mac *band_to_mac); + +/** + * init_deinit_prepare_send_init_cmd() - prepare send init cmd + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * + * API to prepare send init command + * + * Return: void + */ +void init_deinit_prepare_send_init_cmd( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +#endif /* _INIT_DEINIT_INIT_CMD_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_deinit_lmac.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_deinit_lmac.h new file mode 100644 index 0000000000000000000000000000000000000000..7d3b7104186255082dba560d5565a6914e45fb9f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_deinit_lmac.h @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_deinit_lmac.h + * + * Public APIs to get target_if info + */ + +#ifndef _INIT_DEINIT_LMAC_H_ +#define _INIT_DEINIT_LMAC_H_ + +/** + * lmac_get_tgt_res_cfg() - get target resource config + * @psoc: pointer to psoc + * + * API to get target resource config + * + * Return: target resource configuration + */ +target_resource_config *lmac_get_tgt_res_cfg(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_target_cap() - get target capability + * @psoc: pointer to psoc + * + * API to get target capability + * + * Return: target capability Information + */ +struct wlan_psoc_target_capability_info *lmac_get_target_cap( + struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_pdev_idx() - get pdev id + * @pdev: pointer to pdev + * + * API to get pdev id + * + * Return: pdev id + */ +int32_t lmac_get_pdev_idx(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_pdev_target_type() - check pdev target type + * @pdev: pointer to pdev + * @target_type: target type ptr, it is assigned with pdev target_type + * target type stores the radio code + * + * API to check pdev target type + * + * Return: Success if found required target type else Failure + */ +QDF_STATUS lmac_get_pdev_target_type(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type); + +/** + * lmac_get_tgt_type() - get target type + * @psoc: pointer to psoc + * + * API to get target type + * + * Return: target type (value to identify particular radio) + */ +uint32_t lmac_get_tgt_type(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_tgt_version() - get target version + * @psoc: pointer to psoc + * + * API to get target version + * + * Return: target version + */ +uint32_t lmac_get_tgt_version(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_tgt_revision() - get target revision + * @psoc: pointer to psoc + * + * API to get target revision + * + * Return: target revision + */ +uint32_t lmac_get_tgt_revision(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_is_target_ar900b() - checks the target type + * @psoc: pointer to psoc + * + * API to check target type + * + * Return: True on presence of required target type else false + */ +bool lmac_is_target_ar900b(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_wmi_hdl() - get wmi handle + * @psoc: pointer to psoc + * + * API to get wmi handle + * + * Return: wmi handler + */ +struct wmi_unified *lmac_get_wmi_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_wmi_unified_hdl() - get wmi handle + * @psoc: pointer to psoc + * + * API to get wmi handle + * + * Return: wmi handler + */ +wmi_unified_t lmac_get_wmi_unified_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_htc_hdl() - get htc handler + * @psoc: pointer to psoc + * + * API to get htc handle + * + * Return: htc handler + */ +HTC_HANDLE lmac_get_htc_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_set_htc_hdl() - set htc handle + * @psoc: pointer to psoc + * @htc_hdl: HTC handle + * + * API to set htc handle + * + * Return: void + */ +void lmac_set_htc_hdl(struct wlan_objmgr_psoc *psoc, + HTC_HANDLE htc_hdl); + +/** + * lmac_get_hif_hdl() - get hif handle + * @psoc: pointer to psoc + * + * API to get hif handle + * + * Return: hif handler + */ +struct hif_opaque_softc *lmac_get_hif_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_ol_hif_hdl() - get hif handle + * @psoc: pointer to psoc + * + * API to get hif handle + * + * Return: hif handler + */ +struct hif_opaque_softc *lmac_get_ol_hif_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_pdev_wmi_handle() - get pdev wmi handle + * @pdev: pointer to dev + * + * API to get wmi handle + * + * Return: wmi handle + */ +struct wmi_unified *lmac_get_pdev_wmi_handle( + struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_pdev_wmi_unified_handle() - get pdev wmi handle + * @pdev: pointer to dev + * + * API to get wmi handle + * + * Return: wmi handle + */ +wmi_unified_t lmac_get_pdev_wmi_unified_handle( + struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_psoc_feature_ptr() - get feature pointer + * @psoc: pointer to psoc + * + * API to get legacy pointer + * + * Return: feature pointer + */ +void *lmac_get_psoc_feature_ptr(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_pdev_feature_ptr() - get feature pointer + * @pdev: pointer to pdev + * + * API to get legacy pointer + * + * Return: pdev feature pointer + */ +void *lmac_get_pdev_feature_ptr(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_num_radios() - get number of radios + * @psoc: pointer to psoc + * + * API to get number of radios + * + * Return: number of radios + */ +uint32_t lmac_get_num_radios(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_preferred_hw_mode() - get preferred hw mode + * @psoc: pointer to psoc + * + * API to get the preferred hw mode + * + * Return: preferred how mode + */ +enum wmi_host_hw_mode_config_type lmac_get_preferred_hw_mode( + struct wlan_objmgr_psoc *psoc); + +#endif /* _INIT_DEINIT_LMAC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_event_handler.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_event_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..6db62d87ea71eefaaaf3fe16e15a1da3eca5d239 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_event_handler.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_event_handler.h + * + * Public API file for common WMI event handlers + */ +#ifndef _INIT_EVENT_HANDLER_H_ +#define _INIT_EVENT_HANDLER_H_ + +/** + * init_deinit_register_tgt_psoc_ev_handlers() - register tgt if handlers + * @psoc: PSOC object + * + * API to register tgt handlers + * + * Return: SUCCESS on successful registration + */ +QDF_STATUS init_deinit_register_tgt_psoc_ev_handlers( + struct wlan_objmgr_psoc *psoc); + +#endif /* _INIT_EVENT_HANDLER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_param.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_param.h new file mode 100644 index 0000000000000000000000000000000000000000..3a0f9986aebecd78fcd0d3a499feff67893994eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_param.h @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: service_ready_param.h + * + * Public structures to access (ext)service ready data + */ +#ifndef _SERVICE_READY_PARAM_H_ +#define _SERVICE_READY_PARAM_H_ + +#include "qdf_types.h" +#ifdef WLAN_SUPPORT_RF_CHARACTERIZATION +#include "wmi_unified_param.h" +#endif + + +/** + * struct wlan_psoc_hal_reg_capability - hal reg table in psoc + * @eeprom_rd: regdomain value specified in EEPROM + * @eeprom_rd_ext: regdomain + * @regcap1: CAP1 capabilities bit map + * @regcap2: REGDMN EEPROM CAP + * @wireless_modes: REGDMN MODE + * @low_2ghz_chan: lower 2.4GHz channels + * @high_2ghz_chan: higher 2.4 GHz channels + * @low_5ghz_chan: lower 5 GHz channels + * @high_5ghz_chan: higher 5 GHz channels + */ +struct wlan_psoc_hal_reg_capability { + uint32_t eeprom_rd; + uint32_t eeprom_rd_ext; + uint32_t regcap1; + uint32_t regcap2; + uint32_t wireless_modes; + uint32_t low_2ghz_chan; + uint32_t high_2ghz_chan; + uint32_t low_5ghz_chan; + uint32_t high_5ghz_chan; +}; + +/** + * struct wlan_psoc_target_capability_info - target capabilities in psoc + * @phy_capability: PHY capabilities + * @max_frag_entry: Maximum frag entries + * @num_rf_chains: Number of RF chains supported + * @ht_cap_info: HT cap info + * @vht_cap_info: VHT cap info + * @vht_supp_mcs: VHT Supported MCS + * @hw_min_tx_power: HW minimum tx power + * @hw_max_tx_power: HW maximum tx power + * @sys_cap_info: sys capability info + * @min_pkt_size_enable: Enterprise mode short pkt enable + * @max_bcn_ie_size: Max beacon and probe rsp IE offload size + * @max_num_scan_channels: Max scan channels + * @max_supported_macs: max supported MCS + * @wmi_fw_sub_feat_caps: FW sub feature capabilities + * @txrx_chainmask: TXRX chain mask + * @default_dbs_hw_mode_index: DBS hw mode index + * @num_msdu_desc: number of msdu desc + * @fw_version: FW build version + * @fw_version_1: Second dword of FW version (Valid for non-tlv FW) + */ +struct wlan_psoc_target_capability_info { + uint32_t phy_capability; + uint32_t max_frag_entry; + uint32_t num_rf_chains; + uint32_t ht_cap_info; + uint32_t vht_cap_info; + uint32_t vht_supp_mcs; + uint32_t hw_min_tx_power; + uint32_t hw_max_tx_power; + uint32_t sys_cap_info; + uint32_t min_pkt_size_enable; + uint32_t max_bcn_ie_size; + uint32_t max_num_scan_channels; + uint32_t max_supported_macs; + uint32_t wmi_fw_sub_feat_caps; + uint32_t txrx_chainmask; + uint32_t default_dbs_hw_mode_index; + uint32_t num_msdu_desc; + uint32_t fw_version; + uint32_t fw_version_1; +}; + +/** + * struct wlan_psoc_host_ppe_threshold - PPE threshold + * @numss_m1: NSS - 1 + * @ru_bit_mask: RU bit mask indicating the supported RU's + * @ppet16_ppet8_ru3_ru0: ppet8 and ppet16 for max num ss + */ +struct wlan_psoc_host_ppe_threshold { + uint32_t numss_m1; + uint32_t ru_bit_mask; + uint32_t ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS]; +}; + +/** + * struct wlan_psoc_host_hal_reg_cap_ext - extended regulatory capabilities + * recvd in EXT service + * @wireless_modes: REGDMN MODE + * @low_2ghz_chan: lower 2.4GHz channels + * @high_2ghz_chan: higher 2.4 GHz channels + * @low_5ghz_chan: lower 5 GHz channels + * @high_5ghz_chan: higher 5 GHz channels + */ +struct wlan_psoc_host_hal_reg_cap_ext { + uint32_t wireless_modes; + uint32_t low_2ghz_chan; + uint32_t high_2ghz_chan; + uint32_t low_5ghz_chan; + uint32_t high_5ghz_chan; +}; + +/** + * struct wlan_psoc_host_mac_phy_caps - Phy caps recvd in EXT service + * @hw_mode_id: identify a particular set of HW characteristics, + * as specified by the subsequent fields. WMI_MAC_PHY_CAPABILITIES + * element must be mapped to its parent WMI_HW_MODE_CAPABILITIES + * element using hw_mode_id. No particular ordering of + * WMI_MAC_PHY_CAPABILITIES elements should be + * assumed, though in practice the elements may always be ordered + * by hw_mode_id. + * @pdev_id: pdev_id starts with 1. pdev_id 1 => phy_id 0, pdev_id 2 => phy_id 1 + * @phy_id: Starts with 0 + * @hw_mode_config_type: holds the enum wmi_hw_mode_config_type + * @bitmap of supported modulations + * @supported_bands: supported bands, enum WLAN_BAND_CAPABILITY + * @ampdu_density: ampdu density 0 for no restriction, 1 for 1/4 us, + * 2 for 1/2 us, 3 for 1 us,4 for 2 us, 5 for 4 us, + * 6 for 8 us,7 for 16 us + * @max_bw_supported_2G: max bw supported 2G, enum wmi_channel_width + * @ht_cap_info_2G: WMI HT Capability, WMI_HT_CAP defines + * @vht_cap_info_2G: VHT capability info field of 802.11ac, WMI_VHT_CAP defines + * @vht_supp_mcs_2G: VHT Supported MCS Set field Rx/Tx same + * The max VHT-MCS for n SS subfield (where n = 1,...,8) is encoded as + * follows + * - 0 indicates support for VHT-MCS 0-7 for n spatial streams + * - 1 indicates support for VHT-MCS 0-8 for n spatial streams + * - 2 indicates support for VHT-MCS 0-9 for n spatial streams + * - 3 indicates that n spatial streams is not supported + * @he_cap_info_2G[]: HE capability info field of 802.11ax, WMI_HE_CAP defines + * @he_supp_mcs_2G: HE Supported MCS Set field Rx/Tx same + * @tx_chain_mask_2G: Valid Transmit chain mask + * @rx_chain_mask_2G: Valid Receive chain mask + * @max_bw_supported_5G: max bw supported 5G, enum wmi_channel_width + * @ht_cap_info_5G: WMI HT Capability, WMI_HT_CAP defines + * @vht_cap_info_5G: VHT capability info field of 802.11ac, WMI_VHT_CAP defines + * @vht_supp_mcs_5G: VHT Supported MCS Set field Rx/Tx same + * The max VHT-MCS for n SS subfield (where n = 1,...,8) is encoded as + * follows + * - 0 indicates support for VHT-MCS 0-7 for n spatial streams + * - 1 indicates support for VHT-MCS 0-8 for n spatial streams + * - 2 indicates support for VHT-MCS 0-9 for n spatial streams + * - 3 indicates that n spatial streams is not supported + * @he_cap_info_5G[]: HE capability info field of 802.11ax, WMI_HE_CAP defines + * @he_supp_mcs_5G: HE Supported MCS Set field Rx/Tx same + * @tx_chain_mask_5G: Valid Transmit chain mask + * @rx_chain_mask_5G: Valid Receive chain mask + * @he_cap_phy_info_2G: 2G HE capability phy field + * @he_cap_phy_info_5G: 5G HE capability phy field + * @he_cap_info_internal: HE PHY internal feature capability + * @he_ppet2G: 2G HE PPET info + * @he_ppet5G: 5G HE PPET info + * @chainmask_table_id: chain mask table id + * @lmac_id: hw mac id + * @reg_cap_ext: extended regulatory capabilities + * @tgt_pdev_id: target pdev id assigned and used by firmware + * @nss_ratio_enabled: This flag is set if nss ratio is received from FW as part + * of service ready ext event. + * @nss_ratio: nss ratio is used to calculate the NSS value for 160MHz. + */ +struct wlan_psoc_host_mac_phy_caps { + uint32_t hw_mode_id; + uint32_t pdev_id; + uint32_t phy_id; + int hw_mode_config_type; + uint32_t supports_11b:1, + supports_11g:1, + supports_11a:1, + supports_11n:1, + supports_11ac:1, + supports_11ax:1; + uint32_t supported_bands; + uint32_t ampdu_density; + uint32_t max_bw_supported_2G; + uint32_t ht_cap_info_2G; + uint32_t vht_cap_info_2G; + uint32_t vht_supp_mcs_2G; + uint32_t he_cap_info_2G[PSOC_HOST_MAX_MAC_SIZE]; + uint32_t he_supp_mcs_2G; + uint32_t tx_chain_mask_2G; + uint32_t rx_chain_mask_2G; + uint32_t max_bw_supported_5G; + uint32_t ht_cap_info_5G; + uint32_t vht_cap_info_5G; + uint32_t vht_supp_mcs_5G; + uint32_t he_cap_info_5G[PSOC_HOST_MAX_MAC_SIZE]; + uint32_t he_supp_mcs_5G; + uint32_t tx_chain_mask_5G; + uint32_t rx_chain_mask_5G; + uint32_t he_cap_phy_info_2G[PSOC_HOST_MAX_PHY_SIZE]; + uint32_t he_cap_phy_info_5G[PSOC_HOST_MAX_PHY_SIZE]; + uint32_t he_cap_info_internal; + struct wlan_psoc_host_ppe_threshold he_ppet2G; + struct wlan_psoc_host_ppe_threshold he_ppet5G; + uint32_t chainmask_table_id; + uint32_t lmac_id; + struct wlan_psoc_host_hal_reg_cap_ext reg_cap_ext; + uint32_t tgt_pdev_id; + bool nss_ratio_enabled; + uint8_t nss_ratio_info; +}; + +/** + * struct wlan_psoc_host_hw_mode_caps - HW mode capabilities in EXT event + * @hw_mode_id: identify a particular set of HW characteristics, + * as specified by the subsequent fields + * @phy_id_map: BIT0 represents phy_id 0, BIT1 represent phy_id 1 and so on + * @hw_mode_config_type: HW mode config type + */ +struct wlan_psoc_host_hw_mode_caps { + uint32_t hw_mode_id; + uint32_t phy_id_map; + uint32_t hw_mode_config_type; +}; + +/** + * struct wlan_psoc_host_dbr_ring_caps - Direct buffer rx module ring + * capability maintained by PSOC + * @pdev_id: Pdev id of the pdev + * @mod_id: Module id + * @ring_elems_min: Minimum number of pointers in the ring + * @min_buf_size: Minimum size of each buffer entry in the ring + * @min_buf_align: Minimum alignment of the addresses in the ring + */ +struct wlan_psoc_host_dbr_ring_caps { + uint32_t pdev_id; + uint32_t mod_id; + uint32_t ring_elems_min; + uint32_t min_buf_size; + uint32_t min_buf_align; +}; + +/** + * struct wlan_psoc_host_spectral_scaling_params - Spectral scaling params + * @pdev_id: Pdev id of the pdev + * @formula_id: Formula id + * @low_level_offset: Low level offset + * @high_level_offset: High level offset + * @rssi_thr: RSSI threshold + * @default_agc_max_gain: Default agc max gain + */ +struct wlan_psoc_host_spectral_scaling_params { + uint32_t pdev_id; + uint32_t formula_id; + uint32_t low_level_offset; + uint32_t high_level_offset; + uint32_t rssi_thr; + uint32_t default_agc_max_gain; +}; + +/** + * struct wlan_psoc_host_chainmask_capabilities - chain mask capabilities list + * @supports_chan_width_20: channel width 20 support for this chain mask. + * @supports_chan_width_40: channel width 40 support for this chain mask. + * @supports_chan_width_80: channel width 80 support for this chain mask. + * @supports_chan_width_160: channel width 160 support for this chain mask. + * @supports_chan_width_80P80: channel width 80P80 support for this chain mask. + * @supports_aSpectral: Agile Spectral support for this chain mask. + * @supports_aSpectral_160: Agile Spectral support in 160 MHz. + * @supports_aDFS_160: Agile DFS support in 160 MHz for this chain mask. + * @chain_mask_2G: 2G support for this chain mask. + * @chain_mask_5G: 5G support for this chain mask. + * @chain_mask_tx: Tx support for this chain mask. + * @chain_mask_rx: Rx support for this chain mask. + * @supports_aDFS: Agile DFS support for this chain mask. + * @chainmask: chain mask value. + */ +struct wlan_psoc_host_chainmask_capabilities { + uint32_t supports_chan_width_20:1, + supports_chan_width_40:1, + supports_chan_width_80:1, + supports_chan_width_160:1, + supports_chan_width_80P80:1, + supports_aSpectral:1, + supports_aSpectral_160:1, + supports_aDFS_160:1, + reserved:19, + chain_mask_2G:1, + chain_mask_5G:1, + chain_mask_tx:1, + chain_mask_rx:1, + supports_aDFS:1; + uint32_t chainmask; +}; + +/** + * struct wlan_psoc_host_chainmask_table - chain mask table + * @table_id: tableid. + * @num_valid_chainmasks: num valid chainmasks. + * @cap_list: pointer to wlan_psoc_host_chainmask_capabilities list. + */ +struct wlan_psoc_host_chainmask_table { + uint32_t table_id; + uint32_t num_valid_chainmasks; + struct wlan_psoc_host_chainmask_capabilities *cap_list; +}; + +/** + * struct wlan_psoc_host_service_ext_param - EXT service base params in event + * @default_conc_scan_config_bits: Default concurrenct scan config + * @default_fw_config_bits: Default HW config bits + * @wlan_psoc_host_ppe_threshold ppet: Host PPE threshold struct + * @he_cap_info: HE capabality info + * @mpdu_density: units are microseconds + * @max_bssid_rx_filters: Maximum no of BSSID based RX filters host can program + * Value 0 means FW hasn't given any limit to host. + * @fw_build_vers_ext: Extended FW build version info. + * bits 27:0 rsvd + * bits 31:28 CRM sub ID + * @num_hw_modes: Number of HW modes in event + * @num_phy: Number of Phy mode. + * @num_chainmask_tables: Number of chain mask tables. + * @num_dbr_ring_caps: Number of direct buf rx ring capabilities + * @max_bssid_indicator: Maximum number of VAPs in MBSS IE + * @num_bin_scaling_params: Number of Spectral bin scaling parameters + * @chainmask_table: Available chain mask tables. + * @sar_version: SAR version info + */ +struct wlan_psoc_host_service_ext_param { + uint32_t default_conc_scan_config_bits; + uint32_t default_fw_config_bits; + struct wlan_psoc_host_ppe_threshold ppet; + uint32_t he_cap_info; + uint32_t mpdu_density; + uint32_t max_bssid_rx_filters; + uint32_t fw_build_vers_ext; + uint32_t num_hw_modes; + uint32_t num_phy; + uint32_t num_chainmask_tables; + uint32_t num_dbr_ring_caps; + uint32_t max_bssid_indicator; + uint32_t num_bin_scaling_params; + struct wlan_psoc_host_chainmask_table + chainmask_table[PSOC_MAX_CHAINMASK_TABLES]; + uint32_t sar_version; +}; + +/** + * struct wlan_psoc_host_service_ext2_param - EXT service base params in event + * reg_db_version_major: REG DB version major number + * reg_db_version_minor: REG DB version minor number + * bdf_reg_db_version_major: BDF REG DB version major number + * bdf_reg_db_version_minor: BDF REG DB version minor number + * @num_dbr_ring_caps: Number of direct buf rx ring capabilities + * @max_ndp_sessions: Max number of ndp session fw supports + */ +struct wlan_psoc_host_service_ext2_param { + uint8_t reg_db_version_major; + uint8_t reg_db_version_minor; + uint8_t bdf_reg_db_version_major; + uint8_t bdf_reg_db_version_minor; + uint32_t num_dbr_ring_caps; + uint32_t max_ndp_sessions; +}; + +#endif /* _SERVICE_READY_PARAM_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_util.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e78fa79ff6a130c9f756b7e1910551bae4b4b05c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_util.h @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: service_ready_util.h + * + * Public APIs to access (ext)service ready data from psoc object + */ +#ifndef _SERVICE_READY_UTIL_H_ +#define _SERVICE_READY_UTIL_H_ + +#include "wlan_objmgr_psoc_obj.h" +#include "service_ready_param.h" +#include "target_if.h" + +/** + * init_deinit_chainmask_table_alloc() + * - allocate chainmask table capability list. + * @service_ext_param: pointer to server ext param. + * + * Allocates capability list based on num_valid_chainmasks for that table. + * + * Return: QDF Status. + */ +QDF_STATUS init_deinit_chainmask_table_alloc( + struct wlan_psoc_host_service_ext_param *service_ext_param); + +/** + * init_deinit_chainmask_table_free() + * -free chainmask table capability list. + * @service_ext_param: pointer to server ext param. + * + * free capability list based on num_valid_chainmasks for that table. + * + * Return: QDF Status. + */ +QDF_STATUS init_deinit_chainmask_table_free( + struct wlan_psoc_host_service_ext_param *service_ext_param); + +/** + * init_deinit_populate_service_bitmap() - populate service bitmap + * @wmi_handle: wmi handle + * @event: event buffer received from FW + * @service_bitmap: service bitmap information + * + * API to populate service bit map + * + * Return: zero on successful population of service bitmap or failure flag + */ +int init_deinit_populate_service_bitmap( + wmi_unified_t wmi_handle, uint8_t *event, + uint32_t *service_bitmap); + +/** + * init_deinit_populate_fw_version_cmd() - populate FW version + * @wmi_handle: wmi handle + * @event: event buffer received from FW + * + * API to populate FW version + * + * Return: zero on successful population of fw_version command or failure flag + */ +int +init_deinit_populate_fw_version_cmd(wmi_unified_t wmi_handle, uint8_t *event); + +/** + * init_deinit_populate_target_cap() - populate target cap + * @wmi_handle: wmi handle + * @event: event buffer received from FW + * @cap: target capability info object + * + * API to populate target cap + * + * Return: zero on successful population of target cap or failure flag + */ +int init_deinit_populate_target_cap( + wmi_unified_t wmi_handle, uint8_t *event, + struct wlan_psoc_target_capability_info *cap); + +/** + * init_deinit_populate_service_ready_ext_param() - populate service ready ext + * parameter + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @param: service ext param object + * + * API to populate service ready ext param + * + * Return: zero on successful parsing of service ready ext parameter or failure + */ +int init_deinit_populate_service_ready_ext_param( + wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_service_ext_param *param); + +/** + * init_deinit_populate_service_ready_ext2_param() - populate service ready ext2 + * parameter + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @info: Target info handle + * + * API to populate service ready ext2 param + * + * Return: zero on successful parsing of service ready ext parameter or failure + */ +int init_deinit_populate_service_ready_ext2_param( + wmi_unified_t handle, uint8_t *evt, + struct tgt_info *info); + +/** + * init_deinit_populate_chainmask_tables() - populate chainmaks tables + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @param: chainmaks_table object + * + * API to populate chainmaks tables + * + * Return: zero on successful parsing of chainmaks tables or failure flag + */ +int init_deinit_populate_chainmask_tables( + wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_chainmask_table *param); + +/** + * init_deinit_populate_mac_phy_capability() - populate mac phy capability + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @hw_cap: hw_mode_caps object + * @info: tgt_info object + * + * API to populate mac phy capability + * + * Return: zero on successful population of mac physical capability or failure + */ +int init_deinit_populate_mac_phy_capability( + wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_hw_mode_caps *hw_cap, struct tgt_info *info); + +/** + * init_deinit_populate_hw_mode_capability() - populate hw mode capability + * @wmi_handle: WMI handle pointer + * @event: event buffer received from FW + * @tgt_hdl: target_psoc_info object + * + * API to populate hw mode capability + * + * Return: zero on successful parsing of hw mode capability or failure + */ +int init_deinit_populate_hw_mode_capability( + wmi_unified_t wmi_handle, + uint8_t *event, struct target_psoc_info *tgt_hdl); + +/** + * init_deinit_populate_dbr_ring_cap() - populate dbr ring capability + * @psoc: PSOC object + * @handle: WMI handle pointer + * @event: event buffer received from FW + * @info: tgt_info object + * + * API to populate dbr ring capability + * + * Return: zero on successful parsing of dbr ring capability or failure + */ +int init_deinit_populate_dbr_ring_cap(struct wlan_objmgr_psoc *psoc, + wmi_unified_t handle, uint8_t *event, + struct tgt_info *info); + +/** + * init_deinit_populate_dbr_ring_cap_ext2() - populate dbr ring capability + * from ext2 event + * @psoc: PSOC object + * @handle: WMI handle pointer + * @event: event buffer received from FW + * @info: tgt_info object + * + * API to populate dbr ring capability + * + * Return: zero on successful parsing of dbr ring capability or failure + */ +int init_deinit_populate_dbr_ring_cap_ext2(struct wlan_objmgr_psoc *psoc, + wmi_unified_t handle, uint8_t *event, + struct tgt_info *info); + +/** + * init_deinit_populate_spectral_bin_scale_params() - populate Spectral scaling + * @psoc: PSOC object + * @handle: WMI handle pointer + * @event: event buffer received from FW + * @info: tgt_info object + * + * API to populate Spectral bin scaling parameters + * + * Return: zero on successful parsing of scaling params or failure + */ +int init_deinit_populate_spectral_bin_scale_params( + struct wlan_objmgr_psoc *psoc, + wmi_unified_t handle, uint8_t *event, + struct tgt_info *info); + +/** + * init_deinit_dbr_ring_cap_free() - free dbr ring capability + * @tgt_psoc_info: target psoc info object + * + * API to free dbr ring capability + * + * Return: QDF_STATUS + */ +QDF_STATUS init_deinit_dbr_ring_cap_free( + struct target_psoc_info *tgt_psoc_info); + +/** + * init_deinit_spectral_scaling_params_free() - free Spectral scaling params + * @tgt_psoc_info: target psoc info object + * + * API to free Spectral scaling params + * + * Return: QDF_STATUS + */ +QDF_STATUS init_deinit_spectral_scaling_params_free( + struct target_psoc_info *tgt_psoc_info); + +/** + * init_deinit_populate_phy_reg_cap() - populate phy reg capability + * @psoc: PSOC object + * @wmi_handle: WMI handle pointer + * @event: event buffer received from FW + * @info: tgt_info object + * @service_ready: service ready determiner + * + * API to populate phy reg capability + * + * Return: zero on successful parsing of physical reg capability or failure flag + */ +int init_deinit_populate_phy_reg_cap(struct wlan_objmgr_psoc *psoc, + wmi_unified_t wmi_handle, uint8_t *event, + struct tgt_info *info, + bool service_ready); + +/** + * init_deinit_validate_160_80p80_fw_caps() - validate 160 80p80 fw caps + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to validate 160 80p80 fw caps + * + * Return: SUCCESS on successful validation of 160 80p80 forward caps or Failure + */ +QDF_STATUS init_deinit_validate_160_80p80_fw_caps( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +/** + * init_deinit_chainmask_config() - config chainmask + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to config chainmask + * + * Return: none + */ +void init_deinit_chainmask_config( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +/** + * init_deinit_is_service_ext_msg() - check service ext message + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to check whether service ext message is enabled + * + * Return: SUCCESS on successful check of service_ext message or Failure + */ +QDF_STATUS init_deinit_is_service_ext_msg( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); +/** + * init_deinit_is_preferred_hw_mode_supported() - check support of preferred + * hw mode + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to check whether preferred hardware mode is enabled + * + * Return: True on support of preferred hardware support or False + */ +bool init_deinit_is_preferred_hw_mode_supported( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +/** + * init_deinit_wakeup_host_wait() - wakeup host wait + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to wakeup FW ready wait queue + * + * Return: None + */ +void init_deinit_wakeup_host_wait( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +#endif /* _SERVICE_READY_UTIL_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_cmd_api.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_cmd_api.c new file mode 100644 index 0000000000000000000000000000000000000000..ebc0b3b7c8ab2739e8ce0dffb25085908bfd1d45 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_cmd_api.c @@ -0,0 +1,434 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_cmd_api.c + * + * WMI Init command prepare & send APIs + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * init_deinit_alloc_host_mem_chunk() - allocates chunk of memory requested + * by FW. + * @psoc: PSOC object + * @tgt_hdl: Target PSOC info + * @req_id: request id + * @idx: chunk id + * @num_units: Number of units + * @unit_len: Unit length + * @num_unit_info: Num unit info + * + * API to allocate host memory chunk requested by FW + * + * Return: num_units on successful allocation + * 0 on failure + */ +static uint32_t init_deinit_alloc_host_mem_chunk(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + u_int32_t req_id, u_int32_t idx, u_int32_t num_units, + u_int32_t unit_len, u_int32_t num_unit_info) +{ + qdf_dma_addr_t paddr; + uint32_t ichunk = 0; + struct tgt_info *info; + qdf_device_t qdf_dev; + + info = (&tgt_hdl->info); + + if (!num_units || !unit_len) + return 0; + + qdf_dev = wlan_psoc_get_qdf_dev(psoc); + if (!qdf_dev) + return 0; + + /* + * We have skip smaller chunks memory allocation for TXBF_CV buffer + * as Firmware is expecting continuous memory + */ + if (!((num_unit_info & HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED) && + (req_id == TXBF_CV_POOL0 || req_id == TXBF_CV_POOL1 || + req_id == TXBF_CV_POOL2))) { + ichunk = ((num_units * unit_len) >> + HOST_MEM_CHUNK_MAX_SIZE_POWER2); + if (ichunk) + num_units = num_units / (ichunk + 1); + } + + info->mem_chunks[idx].vaddr = NULL; + /* reduce the requested allocation by half until allocation succeeds */ + while (!info->mem_chunks[idx].vaddr && num_units) { + info->mem_chunks[idx].vaddr = qdf_mem_alloc_consistent(qdf_dev, + qdf_dev->dev, num_units * unit_len, &paddr); + if (!info->mem_chunks[idx].vaddr) { + if (num_unit_info & + HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED) { + num_units = 0; + target_if_err("mem chink alloc failed for %d", + idx); + break; + } + /* reduce length by half */ + num_units = (num_units >> 1); + } else { + info->mem_chunks[idx].paddr = paddr; + info->mem_chunks[idx].len = num_units*unit_len; + info->mem_chunks[idx].req_id = req_id; + } + } + target_if_debug("req_id %d idx %d num_units %d unit_len %d", + req_id, idx, num_units, unit_len); + + return num_units; +} + +/* Host mem size units, it is used for round-off */ +#define HOST_MEM_SIZE_UNIT 4 + +/** + * init_deinit_alloc_host_mem() - allocates amount of memory requested by FW. + * @psoc: PSOC object + * @tgt_hdl: Target PSOC info + * @req_id: request id + * @num_units: Number of units + * @unit_len: Unit length + * @num_unit_info: Num unit info + * + * API to allocate host memory requested by FW + * + * Return: QDF_STATUS_SUCCESS on successful allocation + * QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS init_deinit_alloc_host_mem(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, u_int32_t req_id, + u_int32_t num_units, u_int32_t unit_len, + u_int32_t num_unit_info) +{ + struct tgt_info *info; + uint32_t remaining_units; + uint32_t allocated_units = 0; + uint32_t idx; + + info = (&tgt_hdl->info); + /* adjust the length to nearest multiple of unit size */ + unit_len = (unit_len + (HOST_MEM_SIZE_UNIT - 1)) & + (~(HOST_MEM_SIZE_UNIT - 1)); + idx = info->num_mem_chunks; + remaining_units = num_units; + + while (remaining_units) { + if (idx == MAX_MEM_CHUNKS) { + target_if_err( + "REACHED MAX CHUNK LIMIT for mem units %d", + num_units); + target_if_err( + "unit len %d requested by FW, only allocated %d", + unit_len, (num_units - remaining_units)); + info->num_mem_chunks = idx; + return QDF_STATUS_E_FAILURE; + } + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->mem_mgr_alloc_chunk)) + allocated_units = tgt_hdl->tif_ops->mem_mgr_alloc_chunk( + psoc, tgt_hdl, req_id, idx, + remaining_units, + unit_len, num_unit_info); + else + allocated_units = init_deinit_alloc_host_mem_chunk( + psoc, tgt_hdl, req_id, idx, + remaining_units, + unit_len, num_unit_info); + if (allocated_units == 0) { + target_if_err("FAILED TO ALLOC mem unit len %d", + unit_len); + target_if_err("units requested %d units allocated %d", + num_units, (num_units - remaining_units)); + info->num_mem_chunks = idx; + return QDF_STATUS_E_NOMEM; + } + remaining_units -= allocated_units; + ++idx; + } + info->num_mem_chunks = idx; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS init_deinit_free_num_units(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct tgt_info *info; + qdf_device_t qdf_dev; + uint32_t idx; + QDF_STATUS status; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->mem_mgr_free_chunks)) { + status = tgt_hdl->tif_ops->mem_mgr_free_chunks(psoc, tgt_hdl); + } else { + qdf_dev = wlan_psoc_get_qdf_dev(psoc); + if (!qdf_dev) { + target_if_err("qdf_dev is null"); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + info = (&tgt_hdl->info); + for (idx = 0; idx < info->num_mem_chunks; idx++) { + qdf_mem_free_consistent( + qdf_dev, qdf_dev->dev, + info->mem_chunks[idx].len, + info->mem_chunks[idx].vaddr, + info->mem_chunks[idx].paddr, + qdf_get_dma_mem_context( + (&info->mem_chunks[idx]), memctx)); + + info->mem_chunks[idx].vaddr = NULL; + info->mem_chunks[idx].paddr = 0; + info->mem_chunks[idx].len = 0; + } + info->num_mem_chunks = 0; + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +QDF_STATUS init_deinit_handle_host_mem_req( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *event) +{ + uint32_t num_mem_reqs; + host_mem_req mem_reqs; + uint32_t i; + uint32_t idx; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wmi_unified *wmi_handle; + struct tgt_info *info; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + num_mem_reqs = wmi_extract_num_mem_reqs_from_service_ready( + wmi_handle, event); + if (!num_mem_reqs) + return QDF_STATUS_SUCCESS; + + if (num_mem_reqs > MAX_MEM_CHUNKS) { + target_if_err_rl("num_mem_reqs:%u is out of bounds", + num_mem_reqs); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < WMI_FW_PRIORITY_MAX; i++) { + for (idx = 0; idx < num_mem_reqs; idx++) { + status = wmi_extract_host_mem_req_from_service_ready( + wmi_handle, event, &mem_reqs, + info->wlan_res_cfg.num_active_peers, + info->wlan_res_cfg.num_peers, i, idx); + if (mem_reqs.tgt_num_units) { + status = init_deinit_alloc_host_mem( + psoc, + tgt_hdl, + mem_reqs.req_id, + mem_reqs.tgt_num_units, + mem_reqs.unit_size, + mem_reqs.num_unit_info); + if (status == QDF_STATUS_E_FAILURE) { + target_if_err("num_mem_chunk exceeds supp number"); + } else if (status == QDF_STATUS_E_NOMEM) { + target_if_err("mem alloc failure"); + } + } + + if (status != QDF_STATUS_SUCCESS) + return status; + } + } + + return status; +} + +void init_deinit_derive_band_to_mac_param( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + struct wmi_host_pdev_band_to_mac *band_to_mac) +{ + uint8_t i; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap; + struct tgt_info *info; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null "); + return; + } + + info = (&tgt_hdl->info); + + reg_cap = ucfg_reg_get_hal_reg_cap(psoc); + if (!reg_cap) { + target_if_err("reg cap is NULL"); + return; + } + + mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl); + if (!mac_phy_cap) { + target_if_err("mac_phy_cap is NULL"); + return; + } + for (i = 0; i < target_psoc_get_num_radios(tgt_hdl); i++) { + if (mac_phy_cap->supported_bands == + (WMI_HOST_WLAN_5G_CAPABILITY | + WMI_HOST_WLAN_2G_CAPABILITY)) { + /*Supports both 5G and 2G. Use freq from both radios*/ + target_if_debug("Supports both 2G and 5G"); + band_to_mac[i].pdev_id = mac_phy_cap->pdev_id; + band_to_mac[i].start_freq = + reg_cap[i].low_2ghz_chan; + band_to_mac[i].end_freq = + reg_cap[i].high_5ghz_chan; + + } else if (mac_phy_cap->supported_bands == + WMI_HOST_WLAN_2G_CAPABILITY) { + band_to_mac[i].pdev_id = mac_phy_cap->pdev_id; + band_to_mac[i].start_freq = + reg_cap[i].low_2ghz_chan; + band_to_mac[i].end_freq = + reg_cap[i].high_2ghz_chan; + + reg_cap[mac_phy_cap->phy_id].low_5ghz_chan = 0; + reg_cap[mac_phy_cap->phy_id].high_5ghz_chan = 0; + + target_if_debug("2G radio - pdev_id = %d start_freq = %d end_freq= %d", + band_to_mac[i].pdev_id, + band_to_mac[i].start_freq, + band_to_mac[i].end_freq); + + } else if (mac_phy_cap->supported_bands == + WMI_HOST_WLAN_5G_CAPABILITY) { + band_to_mac[i].pdev_id = mac_phy_cap->pdev_id; + band_to_mac[i].start_freq = + reg_cap[i].low_5ghz_chan; + band_to_mac[i].end_freq = + reg_cap[i].high_5ghz_chan; + + reg_cap[mac_phy_cap->phy_id].low_2ghz_chan = 0; + reg_cap[mac_phy_cap->phy_id].high_2ghz_chan = 0; + + target_if_debug("5G radio -pdev_id = %d start_freq = %d end_freq =%d\n", + band_to_mac[i].pdev_id, + band_to_mac[i].start_freq, + band_to_mac[i].end_freq); + } + mac_phy_cap++; + } +} + +void init_deinit_prepare_send_init_cmd( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct wmi_init_cmd_param init_param = {0}; + struct tgt_info *info; + struct wmi_unified *wmi_handle; + QDF_STATUS ret_val; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + init_param.res_cfg = &info->wlan_res_cfg; + init_param.num_mem_chunks = info->num_mem_chunks; + init_param.mem_chunks = info->mem_chunks; + + if (init_deinit_is_service_ext_msg(psoc, tgt_hdl) == + QDF_STATUS_SUCCESS) { + init_param.hw_mode_id = info->preferred_hw_mode; + /* Temp change, until FW submits support for handling this TLV + * For single mode, skip sending hw_mode + */ + if (info->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE) + init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; + + init_param.num_band_to_mac = target_psoc_get_num_radios( + tgt_hdl); + + init_deinit_derive_band_to_mac_param(psoc, tgt_hdl, + init_param.band_to_mac); + } else { + ret_val = tgt_if_regulatory_modify_freq_range(psoc); + if (QDF_IS_STATUS_ERROR(ret_val)) { + target_if_err("Modify freq range is failed"); + return; + } + } + + ret_val = target_if_alloc_pdevs(psoc, tgt_hdl); + if (ret_val != QDF_STATUS_SUCCESS) + return; + + ret_val = target_if_update_pdev_tgt_info(psoc, tgt_hdl); + if (ret_val != QDF_STATUS_SUCCESS) + return; + + info->wlan_res_cfg.max_ndp_sessions = + QDF_MIN(info->wlan_res_cfg.max_ndp_sessions, + info->service_ext2_param.max_ndp_sessions); + + target_if_debug("FW version 0x%x ", info->target_caps.fw_version); + if (init_deinit_is_service_ext_msg(psoc, tgt_hdl) == QDF_STATUS_SUCCESS) + target_if_debug("0x%x\n", + info->service_ext_param.fw_build_vers_ext); + else + target_if_debug("0x%x\n", info->target_caps.fw_version_1); + + wmi_unified_init_cmd_send(wmi_handle, &init_param); + + /* Set Max scans allowed */ + target_if_scan_set_max_active_scans(psoc, + WLAN_MAX_ACTIVE_SCANS_ALLOWED); + + if (wmi_service_enabled(wmi_handle, wmi_service_hw_db2dbm_support)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_HW_DB2DBM); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_deinit_lmac.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_deinit_lmac.c new file mode 100644 index 0000000000000000000000000000000000000000..4da2a989350f1d144f35412dd972ae4d466ef9b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_deinit_lmac.c @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_deinit_lmac.c + * + * APIs to get/set target_if params + */ +#include +#include +#include +#include +#include +#include +#include + +struct wlan_psoc_target_capability_info *lmac_get_target_cap( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_target_caps(tgt_hdl); +} + +target_resource_config *lmac_get_tgt_res_cfg(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_wlan_res_cfg(tgt_hdl); +} + +int32_t lmac_get_pdev_idx(struct wlan_objmgr_pdev *pdev) +{ + if (!pdev) { + target_if_err("pdev is null"); + return 0xffffffff; + } + + return wlan_objmgr_pdev_get_pdev_id(pdev); +} + +uint32_t lmac_get_tgt_type(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return 0; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return 0; + } + + return target_psoc_get_target_type(tgt_hdl); +} +qdf_export_symbol(lmac_get_tgt_type); + +QDF_STATUS lmac_get_pdev_target_type(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("psoc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *target_type = lmac_get_tgt_type(psoc); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(lmac_get_pdev_target_type); + +uint32_t lmac_get_tgt_version(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + return target_psoc_get_target_ver(tgt_hdl); +} +qdf_export_symbol(lmac_get_tgt_version); + +uint32_t lmac_get_tgt_revision(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + return target_psoc_get_target_rev(tgt_hdl); +} +qdf_export_symbol(lmac_get_tgt_revision); + +bool lmac_is_target_ar900b(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + uint32_t target_type; + + if (!psoc) { + target_if_err("psoc is null\n"); + return false; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return false; + } + target_type = tgt_hdl->info.target_type; + + switch (target_type) { + case TARGET_TYPE_AR900B: + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_IPQ4019: + case TARGET_TYPE_QCA9888: + return true; + default: + return false; + } + return false; +} +qdf_export_symbol(lmac_is_target_ar900b); + +struct wmi_unified *lmac_get_wmi_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_wmi_hdl(tgt_hdl); +} +qdf_export_symbol(lmac_get_wmi_hdl); + +wmi_unified_t lmac_get_wmi_unified_hdl(struct wlan_objmgr_psoc *psoc) +{ + return (wmi_unified_t)lmac_get_wmi_hdl(psoc); +} +qdf_export_symbol(lmac_get_wmi_unified_hdl); + +HTC_HANDLE lmac_get_htc_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_htc_hdl(tgt_hdl); +} +qdf_export_symbol(lmac_get_htc_hdl); + +void lmac_set_htc_hdl(struct wlan_objmgr_psoc *psoc, + HTC_HANDLE htc_hdl) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return; + } + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return; + } + + target_psoc_set_htc_hdl(tgt_hdl, htc_hdl); +} + +struct hif_opaque_softc *lmac_get_hif_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_hif_hdl(tgt_hdl); +} +qdf_export_symbol(lmac_get_hif_hdl); + +struct hif_opaque_softc *lmac_get_ol_hif_hdl(struct wlan_objmgr_psoc *psoc) +{ + return (struct hif_opaque_softc *)lmac_get_hif_hdl(psoc); +} +qdf_export_symbol(lmac_get_ol_hif_hdl); + +struct wmi_unified *lmac_get_pdev_wmi_handle( + struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_hdl; + + if (!pdev) { + target_if_err("pdev is null"); + return NULL; + } + + tgt_hdl = wlan_pdev_get_tgt_if_handle(pdev); + if (!tgt_hdl) { + target_if_err("target_pdev_info is null"); + return NULL; + } + + return target_pdev_get_wmi_handle(tgt_hdl); +} +qdf_export_symbol(lmac_get_pdev_wmi_handle); + +wmi_unified_t +lmac_get_pdev_wmi_unified_handle(struct wlan_objmgr_pdev *pdev) +{ + return (wmi_unified_t)lmac_get_pdev_wmi_handle(pdev); +} + +uint32_t lmac_get_num_radios(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return 0; + } + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return 0; + } + + return target_psoc_get_num_radios(tgt_hdl); +} +qdf_export_symbol(lmac_get_num_radios); + +void *lmac_get_psoc_feature_ptr(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_feature_ptr(tgt_hdl); +} +qdf_export_symbol(lmac_get_psoc_feature_ptr); + +void *lmac_get_pdev_feature_ptr(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_hdl; + + if (!pdev) { + target_if_err("pdev is null"); + return NULL; + } + tgt_hdl = wlan_pdev_get_tgt_if_handle(pdev); + if (!tgt_hdl) { + target_if_err("target_pdev_info is null"); + return NULL; + } + + return target_pdev_get_feature_ptr(tgt_hdl); +} +qdf_export_symbol(lmac_get_pdev_feature_ptr); + +enum wmi_host_hw_mode_config_type lmac_get_preferred_hw_mode( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return WMI_HOST_HW_MODE_MAX; + } + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return WMI_HOST_HW_MODE_MAX; + } + + return target_psoc_get_preferred_hw_mode(tgt_hdl); +} + +qdf_export_symbol(lmac_get_preferred_hw_mode); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_event_handler.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_event_handler.c new file mode 100644 index 0000000000000000000000000000000000000000..5868c1a84d05a1e137cacffc3a6f92242a45deae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_event_handler.c @@ -0,0 +1,661 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_event_handler.c + * + * WMI common event handler implementation source file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void init_deinit_set_send_init_cmd(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + tgt_hdl->info.wmi_service_ready = TRUE; + /* send init command */ + init_deinit_prepare_send_init_cmd(psoc, tgt_hdl); +} + +static int init_deinit_service_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + int err_code; + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + wmi_legacy_service_ready_callback legacy_callback; + struct wmi_unified *wmi_handle; + QDF_STATUS ret_val; + + if (!scn_handle) { + target_if_err("scn handle NULL in service ready handler"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null in service ready handler"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null in service ready ev"); + return -EINVAL; + } + + ret_val = target_if_sw_version_check(psoc, tgt_hdl, event); + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + err_code = init_deinit_populate_service_bitmap(wmi_handle, event, + tgt_hdl->info.service_bitmap); + if (err_code) + goto exit; + + err_code = init_deinit_populate_fw_version_cmd(wmi_handle, event); + if (err_code) + goto exit; + + err_code = init_deinit_populate_target_cap(wmi_handle, event, + &(tgt_hdl->info.target_caps)); + if (err_code) + goto exit; + + err_code = init_deinit_populate_phy_reg_cap(psoc, wmi_handle, event, + &(tgt_hdl->info), true); + if (err_code) + goto exit; + + if (init_deinit_validate_160_80p80_fw_caps(psoc, tgt_hdl) != + QDF_STATUS_SUCCESS) { + wlan_psoc_nif_op_flag_set(psoc, WLAN_SOC_OP_VHT_INVALID_CAP); + } + + target_if_ext_res_cfg_enable(psoc, tgt_hdl, event); + + if (wmi_service_enabled(wmi_handle, wmi_service_tt)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_TT_SUPPORT); + + if (wmi_service_enabled(wmi_handle, wmi_service_widebw_scan)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_WIDEBAND_SCAN); + + if (wmi_service_enabled(wmi_handle, wmi_service_check_cal_version)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_SW_CAL); + + if (wmi_service_enabled(wmi_handle, wmi_service_twt_requestor)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_TWT_REQUESTER); + + if (wmi_service_enabled(wmi_handle, wmi_service_twt_responder)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_TWT_RESPONDER); + + if (wmi_service_enabled(wmi_handle, wmi_service_bss_color_offload)) + target_if_debug(" BSS COLOR OFFLOAD supported"); + + if (wmi_service_enabled(wmi_handle, wmi_service_ul_ru26_allowed)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_OBSS_NBW_RU); + + if (wmi_service_enabled(wmi_handle, wmi_service_infra_mbssid)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_MBSS_IE); + + if (wmi_service_enabled(wmi_handle, wmi_service_dynamic_hw_mode)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_DYNAMIC_HW_MODE); + + if (wmi_service_enabled(wmi_handle, wmi_service_bw_165mhz_support)) + wlan_psoc_nif_fw_ext_cap_set(psoc, + WLAN_SOC_RESTRICTED_80P80_SUPPORT); + + if (wmi_service_enabled(wmi_handle, + wmi_service_nss_ratio_to_host_support)) + wlan_psoc_nif_fw_ext_cap_set( + psoc, WLAN_SOC_NSS_RATIO_TO_HOST_SUPPORT); + + target_if_debug(" TT support %d, Wide BW Scan %d, SW cal %d", + wlan_psoc_nif_fw_ext_cap_get(psoc, WLAN_SOC_CEXT_TT_SUPPORT), + wlan_psoc_nif_fw_ext_cap_get(psoc, WLAN_SOC_CEXT_WIDEBAND_SCAN), + wlan_psoc_nif_fw_ext_cap_get(psoc, WLAN_SOC_CEXT_SW_CAL)); + + target_if_mesh_support_enable(psoc, tgt_hdl, event); + + target_if_eapol_minrate_enable(psoc, tgt_hdl, event); + + target_if_smart_antenna_enable(psoc, tgt_hdl, event); + + target_if_cfr_support_enable(psoc, tgt_hdl, event); + + target_if_peer_cfg_enable(psoc, tgt_hdl, event); + + target_if_atf_cfg_enable(psoc, tgt_hdl, event); + + if (!wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) + target_if_qwrap_cfg_enable(psoc, tgt_hdl, event); + + target_if_lteu_cfg_enable(psoc, tgt_hdl, event); + + if (wmi_service_enabled(wmi_handle, wmi_service_rx_fse_support)) + wlan_psoc_nif_fw_ext_cap_set(psoc, + WLAN_SOC_CEXT_RX_FSE_SUPPORT); + + /* override derived value, if it exceeds max peer count */ + if ((wlan_psoc_get_max_peer_count(psoc) > + tgt_hdl->info.wlan_res_cfg.num_active_peers) && + (wlan_psoc_get_max_peer_count(psoc) < + (tgt_hdl->info.wlan_res_cfg.num_peers - + tgt_hdl->info.wlan_res_cfg.num_vdevs))) { + tgt_hdl->info.wlan_res_cfg.num_peers = + wlan_psoc_get_max_peer_count(psoc) + + tgt_hdl->info.wlan_res_cfg.num_vdevs; + } + legacy_callback = target_if_get_psoc_legacy_service_ready_cb(); + if (!legacy_callback) { + err_code = -EINVAL; + goto exit; + } + + err_code = legacy_callback(wmi_service_ready_event_id, + scn_handle, event, data_len); + init_deinit_chainmask_config(psoc, tgt_hdl); + + if (wmi_service_enabled(wmi_handle, wmi_service_mgmt_tx_wmi)) { + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_WMI_MGMT_REF); + target_if_debug("WMI mgmt service enabled"); + } else { + wlan_psoc_nif_fw_ext_cap_clear(psoc, + WLAN_SOC_CEXT_WMI_MGMT_REF); + target_if_debug("WMI mgmt service disabled"); + } + + err_code = init_deinit_handle_host_mem_req(psoc, tgt_hdl, event); + if (err_code != QDF_STATUS_SUCCESS) + goto exit; + + target_if_reg_set_offloaded_info(psoc); + target_if_reg_set_6ghz_info(psoc); + + if (wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) { + target_if_debug("Wait for EXT message"); + } else { + target_if_debug("No EXT message, send init command"); + target_psoc_set_num_radios(tgt_hdl, 1); + init_deinit_set_send_init_cmd(psoc, tgt_hdl); + } + +exit: + return err_code; +} + +static int init_deinit_service_ext2_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + int err_code = 0; + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + struct wmi_unified *wmi_handle; + struct tgt_info *info; + + if (!scn_handle) { + target_if_err("scn handle NULL in service ready handler"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null in service ready handler"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null in service ready ev"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + err_code = init_deinit_populate_service_ready_ext2_param(wmi_handle, + event, info); + if (err_code) + goto exit; + + /* dbr_ring_caps could have already come as part of EXT event */ + if (info->service_ext2_param.num_dbr_ring_caps) { + err_code = init_deinit_populate_dbr_ring_cap_ext2(psoc, + wmi_handle, + event, info); + if (err_code) + goto exit; + } + + /* send init command */ + init_deinit_set_send_init_cmd(psoc, tgt_hdl); + +exit: + return err_code; +} + +static int init_deinit_service_ext_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + int err_code; + uint8_t num_radios; + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + struct wmi_unified *wmi_handle; + struct tgt_info *info; + wmi_legacy_service_ready_callback legacy_callback; + + if (!scn_handle) { + target_if_err("scn handle NULL in service ready handler"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null in service ready handler"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null in service ready ev"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + err_code = init_deinit_populate_service_ready_ext_param(wmi_handle, + event, &(info->service_ext_param)); + if (err_code) + goto exit; + + target_psoc_set_num_radios(tgt_hdl, 0); + err_code = init_deinit_populate_hw_mode_capability(wmi_handle, + event, tgt_hdl); + if (err_code) + goto exit; + + if (init_deinit_is_preferred_hw_mode_supported(psoc, tgt_hdl) + == FALSE) { + target_if_err("Preferred mode %d not supported", + info->preferred_hw_mode); + return -EINVAL; + } + + num_radios = target_psoc_get_num_radios_for_mode(tgt_hdl, + info->preferred_hw_mode); + + /* set number of radios based on current mode */ + target_psoc_set_num_radios(tgt_hdl, num_radios); + + target_if_print_service_ready_ext_param(psoc, tgt_hdl); + + err_code = init_deinit_populate_phy_reg_cap(psoc, wmi_handle, + event, info, false); + if (err_code) + goto exit; + + target_if_add_11ax_modes(psoc, tgt_hdl); + + if (init_deinit_chainmask_table_alloc( + &(info->service_ext_param)) == + QDF_STATUS_SUCCESS) { + err_code = init_deinit_populate_chainmask_tables(wmi_handle, + event, + &(info->service_ext_param.chainmask_table[0])); + if (err_code) + goto exit; + } + + /* dbr_ring_caps can be absent if enough space is not available */ + if (info->service_ext_param.num_dbr_ring_caps) { + err_code = init_deinit_populate_dbr_ring_cap(psoc, wmi_handle, + event, info); + if (err_code) + goto exit; + } + + err_code = init_deinit_populate_spectral_bin_scale_params(psoc, + wmi_handle, + event, info); + if (err_code) + goto exit; + + legacy_callback = target_if_get_psoc_legacy_service_ready_cb(); + if (legacy_callback) + legacy_callback(wmi_service_ready_ext_event_id, + scn_handle, event, data_len); + + target_if_qwrap_cfg_enable(psoc, tgt_hdl, event); + + target_if_set_twt_ap_pdev_count(info, tgt_hdl); + + info->wlan_res_cfg.max_bssid_indicator = + info->service_ext_param.max_bssid_indicator; + + if (wmi_service_enabled(wmi_handle, wmi_service_ext2_msg)) { + target_if_debug("Wait for EXT2 message"); + } else { + target_if_debug("No EXT2 message, send init command"); + init_deinit_set_send_init_cmd(psoc, tgt_hdl); + } + +exit: + return err_code; +} + +static int init_deinit_service_available_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + struct wmi_unified *wmi_handle; + + if (!scn_handle) { + target_if_err("scn handle NULL"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + if (wmi_save_ext_service_bitmap(wmi_handle, event, NULL) != + QDF_STATUS_SUCCESS) { + target_if_err("Failed to save ext service bitmap"); + return -EINVAL; + } + + return 0; +} + +/* MAC address fourth byte index */ +#define MAC_BYTE_4 4 + +static int init_deinit_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct target_psoc_info *tgt_hdl; + struct wmi_unified *wmi_handle; + struct wmi_host_fw_abi_ver fw_ver; + uint8_t myaddr[QDF_MAC_ADDR_SIZE]; + struct tgt_info *info; + struct wmi_host_ready_ev_param ready_ev; + wmi_legacy_service_ready_callback legacy_callback; + uint8_t num_radios, i; + uint32_t max_peers; + uint32_t max_ast_index; + target_resource_config *tgt_cfg; + + if (!scn_handle) { + target_if_err("scn handle NULL"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + if (wmi_extract_fw_abi_version(wmi_handle, event, &fw_ver) == + QDF_STATUS_SUCCESS) { + info->version.wlan_ver = fw_ver.sw_version; + info->version.wlan_ver = fw_ver.abi_version; + } + + if (wmi_check_and_update_fw_version(wmi_handle, event) < 0) { + target_if_err("Version mismatch with FW"); + return -EINVAL; + } + + if (wmi_extract_ready_event_params(wmi_handle, event, &ready_ev) != + QDF_STATUS_SUCCESS) { + target_if_err("Failed to extract ready event"); + return -EINVAL; + } + + if (!ready_ev.agile_capability) + target_if_err("agile capability disabled in HW"); + else + info->wlan_res_cfg.agile_capability = ready_ev.agile_capability; + + /* Indicate to the waiting thread that the ready + * event was received + */ + info->wlan_init_status = wmi_ready_extract_init_status( + wmi_handle, event); + + legacy_callback = target_if_get_psoc_legacy_service_ready_cb(); + if (legacy_callback) + if (legacy_callback(wmi_ready_event_id, + scn_handle, event, data_len)) { + target_if_err("Legacy callback returned error!"); + tgt_hdl->info.wmi_ready = FALSE; + goto exit; + } + + num_radios = target_psoc_get_num_radios(tgt_hdl); + + if ((ready_ev.num_total_peer != 0) && + (info->wlan_res_cfg.num_peers != ready_ev.num_total_peer)) { + uint16_t num_peers = 0; + /* FW allocated number of peers is different than host + * requested. Update host max with FW reported value. + */ + target_if_err("Host Requested %d peers. FW Supports %d peers", + info->wlan_res_cfg.num_peers, + ready_ev.num_total_peer); + info->wlan_res_cfg.num_peers = ready_ev.num_total_peer; + num_peers = info->wlan_res_cfg.num_peers / num_radios; + + for (i = 0; i < num_radios; i++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, + WLAN_INIT_DEINIT_ID); + if (!pdev) { + target_if_err(" PDEV %d is NULL", i); + return -EINVAL; + } + + wlan_pdev_set_max_peer_count(pdev, num_peers); + wlan_objmgr_pdev_release_ref(pdev, WLAN_INIT_DEINIT_ID); + } + + wlan_psoc_set_max_peer_count(psoc, + info->wlan_res_cfg.num_peers); + } + + /* for non legacy num_total_peer will be non zero + * allocate peer memory in this case + */ + if (ready_ev.num_total_peer != 0) { + tgt_cfg = &info->wlan_res_cfg; + max_peers = tgt_cfg->num_peers + ready_ev.num_extra_peer + 1; + max_ast_index = ready_ev.max_ast_index + 1; + + if (cdp_peer_map_attach(wlan_psoc_get_dp_handle(psoc), + max_peers, max_ast_index, + tgt_cfg->peer_map_unmap_v2) != + QDF_STATUS_SUCCESS) { + target_if_err("DP peer map attach failed"); + return -EINVAL; + } + } + + + if (ready_ev.pktlog_defs_checksum) { + for (i = 0; i < num_radios; i++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, + WLAN_INIT_DEINIT_ID); + if (!pdev) { + target_if_err(" PDEV %d is NULL", i); + return -EINVAL; + } + target_if_set_pktlog_checksum(pdev, tgt_hdl, + ready_ev. + pktlog_defs_checksum); + wlan_objmgr_pdev_release_ref(pdev, WLAN_INIT_DEINIT_ID); + } + } + + /* + * For non-legacy HW, MAC addr list is extracted. + */ + if (num_radios > 1) { + uint8_t num_mac_addr; + wmi_host_mac_addr *addr_list; + int i; + + addr_list = wmi_ready_extract_mac_addr_list(wmi_handle, event, + &num_mac_addr); + if ((num_mac_addr >= num_radios) && (addr_list)) { + for (i = 0; i < num_radios; i++) { + WMI_HOST_MAC_ADDR_TO_CHAR_ARRAY(&addr_list[i], + myaddr); + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, + WLAN_INIT_DEINIT_ID); + if (!pdev) { + target_if_err(" PDEV %d is NULL", i); + return -EINVAL; + } + wlan_pdev_set_hw_macaddr(pdev, myaddr); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_INIT_DEINIT_ID); + + /* assign 1st radio addr to psoc */ + if (i == 0) + wlan_psoc_set_hw_macaddr(psoc, myaddr); + } + goto out; + } else { + target_if_err("Using default MAC addr for all radios.."); + } + } + + /* + * We extract single MAC address in two scenarios: + * 1. In non-legacy case, if addr list is NULL or num_mac_addr < num_radios + * 2. In all legacy cases + */ + for (i = 0; i < num_radios; i++) { + wmi_ready_extract_mac_addr(wmi_handle, event, myaddr); + myaddr[MAC_BYTE_4] += i; + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, WLAN_INIT_DEINIT_ID); + if (!pdev) { + target_if_err(" PDEV %d is NULL", i); + return -EINVAL; + } + wlan_pdev_set_hw_macaddr(pdev, myaddr); + wlan_objmgr_pdev_release_ref(pdev, WLAN_INIT_DEINIT_ID); + /* assign 1st radio addr to psoc */ + if (i == 0) + wlan_psoc_set_hw_macaddr(psoc, myaddr); + } + +out: + target_if_btcoex_cfg_enable(psoc, tgt_hdl, event); + tgt_hdl->info.wmi_ready = TRUE; +exit: + init_deinit_wakeup_host_wait(psoc, tgt_hdl); + + return 0; +} + + +QDF_STATUS init_deinit_register_tgt_psoc_ev_handlers( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + wmi_unified_t wmi_handle; + QDF_STATUS retval = QDF_STATUS_SUCCESS; + + if (!psoc) { + target_if_err("psoc is null in register wmi handler"); + return QDF_STATUS_E_FAILURE; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info null in register wmi hadler"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = (wmi_unified_t)target_psoc_get_wmi_hdl(tgt_hdl); + + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_service_ready_event_id, + init_deinit_service_ready_event_handler, + WMI_RX_WORK_CTX); + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_service_ready_ext_event_id, + init_deinit_service_ext_ready_event_handler, + WMI_RX_WORK_CTX); + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_service_available_event_id, + init_deinit_service_available_handler, + WMI_RX_UMAC_CTX); + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_ready_event_id, + init_deinit_ready_event_handler, + WMI_RX_WORK_CTX); + retval = wmi_unified_register_event_handler( + wmi_handle, + wmi_service_ready_ext2_event_id, + init_deinit_service_ext2_ready_event_handler, + WMI_RX_WORK_CTX); + + + return retval; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/service_ready_util.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/service_ready_util.c new file mode 100644 index 0000000000000000000000000000000000000000..21bc668947b8dbfaf7f43256c2715f28236aa2a8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/service_ready_util.c @@ -0,0 +1,865 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: service_ready_util.c + * + * Public APIs implementation source file for accessing (ext)service ready + * data from psoc object + */ +#include "service_ready_util.h" +#include +#include +#include + +QDF_STATUS init_deinit_chainmask_table_alloc( + struct wlan_psoc_host_service_ext_param *ser_ext_par) +{ + int i; + uint32_t alloc_size; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (ser_ext_par->num_chainmask_tables == 0) + return QDF_STATUS_E_NOSUPPORT; + + for (i = 0; i < ser_ext_par->num_chainmask_tables; i++) { + if (ser_ext_par->chainmask_table[i].num_valid_chainmasks > + (UINT_MAX / sizeof( + struct wlan_psoc_host_chainmask_capabilities))) { + target_if_err("invalid valid chanmask num %d", + ser_ext_par->chainmask_table[i]. + num_valid_chainmasks); + status = QDF_STATUS_E_FAILURE; + break; + } + alloc_size = + (sizeof(struct wlan_psoc_host_chainmask_capabilities) * + ser_ext_par->chainmask_table[i].num_valid_chainmasks); + + ser_ext_par->chainmask_table[i].cap_list = + qdf_mem_malloc(alloc_size); + if (!ser_ext_par->chainmask_table[i].cap_list) { + init_deinit_chainmask_table_free(ser_ext_par); + status = QDF_STATUS_E_NOMEM; + break; + } + } + + return status; +} + +qdf_export_symbol(init_deinit_chainmask_table_alloc); + +QDF_STATUS init_deinit_chainmask_table_free( + struct wlan_psoc_host_service_ext_param *ser_ext_par) +{ + struct wlan_psoc_host_chainmask_table *table; + int i; + + for (i = 0; i < ser_ext_par->num_chainmask_tables; i++) { + table = &(ser_ext_par->chainmask_table[i]); + if (table->cap_list) { + qdf_mem_free(table->cap_list); + table->cap_list = NULL; + } + } + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(init_deinit_chainmask_table_free); + +int init_deinit_populate_service_bitmap( + wmi_unified_t wmi_handle, uint8_t *event, + uint32_t *service_bitmap) +{ + QDF_STATUS status; + + status = wmi_save_service_bitmap(wmi_handle, event, service_bitmap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse service bitmap"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_fw_version_cmd(wmi_unified_t wmi_handle, + uint8_t *event) +{ + QDF_STATUS status; + + status = wmi_unified_save_fw_version_cmd(wmi_handle, event); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("failed to save fw version"); + + return 0; +} + +int init_deinit_populate_target_cap( + wmi_unified_t wmi_handle, uint8_t *event, + struct wlan_psoc_target_capability_info *cap) +{ + QDF_STATUS status; + + status = wmi_get_target_cap_from_service_ready(wmi_handle, event, cap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse target cap"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_service_ready_ext_param( + wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_service_ext_param *param) +{ + QDF_STATUS status; + + status = wmi_extract_service_ready_ext(handle, evt, param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse wmi service ready ext param"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_service_ready_ext2_param( + wmi_unified_t handle, uint8_t *evt, + struct tgt_info *info) +{ + QDF_STATUS status; + + status = wmi_extract_service_ready_ext2(handle, evt, + &info->service_ext2_param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse wmi service ready ext param"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_chainmask_tables( + wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_chainmask_table *param) +{ + QDF_STATUS status; + + status = wmi_extract_chainmask_tables(handle, evt, param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse wmi service ready ext param"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_mac_phy_capability( + wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_hw_mode_caps *hw_cap, struct tgt_info *info) +{ + QDF_STATUS status; + uint32_t hw_mode_id; + uint32_t phy_bit_map; + uint8_t mac_phy_id; + + hw_mode_id = hw_cap->hw_mode_id; + phy_bit_map = hw_cap->phy_id_map; + target_if_debug("hw_mode_id %d phy_bit_map 0x%x", + hw_mode_id, phy_bit_map); + + mac_phy_id = 0; + while (phy_bit_map) { + if (info->total_mac_phy_cnt >= PSOC_MAX_MAC_PHY_CAP) { + target_if_err("total mac phy exceeds max limit %d", + info->total_mac_phy_cnt); + return -EINVAL; + } + + status = wmi_extract_mac_phy_cap_service_ready_ext(handle, + evt, hw_mode_id, mac_phy_id, + &(info->mac_phy_cap[info->total_mac_phy_cnt])); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse mac phy capability"); + return qdf_status_to_os_return(status); + } + info->mac_phy_cap[info->total_mac_phy_cnt].hw_mode_config_type + = hw_cap->hw_mode_config_type; + info->total_mac_phy_cnt++; + phy_bit_map &= (phy_bit_map - 1); + mac_phy_id++; + } + target_if_debug("total_mac_phy_cnt %d", info->total_mac_phy_cnt); + + return 0; +} + +static int get_hw_mode(wmi_unified_t handle, uint8_t *evt, uint8_t hw_idx, + struct wlan_psoc_host_hw_mode_caps *cap) +{ + QDF_STATUS status; + + status = wmi_extract_hw_mode_cap_service_ready_ext(handle, evt, + hw_idx, cap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse hw mode capability"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +static int get_sar_version(wmi_unified_t handle, uint8_t *evt, + struct wlan_psoc_host_service_ext_param *ext_param) +{ + QDF_STATUS status; + + status = wmi_extract_sar_cap_service_ready_ext(handle, evt, ext_param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse sar capability"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +static bool new_hw_mode_preferred(uint32_t current_hw_mode, + uint32_t new_hw_mode) +{ + uint8_t hw_mode_id_precedence[WMI_HOST_HW_MODE_MAX + 1] = { 5, 1, 4, + 3, 0, 2, + 6 }; + + if (current_hw_mode > WMI_HOST_HW_MODE_MAX || + new_hw_mode > WMI_HOST_HW_MODE_MAX) + return false; + + /* Above precedence is defined by low to high, lower the value + * higher the precedence + */ + if (hw_mode_id_precedence[current_hw_mode] > + hw_mode_id_precedence[new_hw_mode]) + return true; + + return false; +} + +/** + * select_preferred_mode() - Select preferred hw mode based on current mode. + * @tgt_hdl: target_psoc_info object + * @hw_mode_caps: HW mode caps of new mode id that needs to checked for + * selection. + * @current_mode: Current mode. + * + * API to select preferred hw mode based on the current config. + * Based on host config for preferred mode, final mode selected as follows- + * 1) If preferred_mode == WMI_HOST_HW_MODE_DETECT, Then select mode from FW + * supported modes such that it is a super set of all modes FW advertises. + * For e.g., If FW supports DBS(2 radio) and DBS_SBS(3 radio)- Choose DBS_SBS + * 2) If preferred_mode == WMI_HOST_HW_MODE_MAX, Then do not select any mode + * from FW advertised modes. Host needs to maintain all modes supported in FW + * and can switch dynamically. + * 3) Else, A valid preferred_mode is set, Hence check if this is part of FW + * supported modes. If it is found, then use it to bring up the device. + * + * Return: selected_mode based on the above criteria. + */ +static uint32_t +select_preferred_hw_mode(struct target_psoc_info *tgt_hdl, + struct wlan_psoc_host_hw_mode_caps *hw_mode_caps, + uint32_t current_mode) +{ + uint32_t preferred_mode, selected_mode = current_mode; + struct tgt_info *info; + + info = &tgt_hdl->info; + preferred_mode = target_psoc_get_preferred_hw_mode(tgt_hdl); + if (preferred_mode == WMI_HOST_HW_MODE_DETECT) { + uint32_t new_mode = hw_mode_caps->hw_mode_id; + + /* Choose hw_mode_id based on precedence */ + if (new_hw_mode_preferred(selected_mode, new_mode)) { + selected_mode = new_mode; + qdf_mem_copy(&info->hw_mode_cap, hw_mode_caps, + sizeof(info->hw_mode_cap)); + } + } else if ((preferred_mode != WMI_HOST_HW_MODE_MAX) && + (preferred_mode == hw_mode_caps->hw_mode_id)) { + selected_mode = preferred_mode; + qdf_mem_copy(&info->hw_mode_cap, hw_mode_caps, + sizeof(info->hw_mode_cap)); + } + + return selected_mode; +} + +int init_deinit_populate_hw_mode_capability( + wmi_unified_t wmi_handle, uint8_t *event, + struct target_psoc_info *tgt_hdl) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t hw_idx; + uint32_t num_hw_modes; + struct wlan_psoc_host_hw_mode_caps hw_mode_caps[PSOC_MAX_HW_MODE]; + uint32_t preferred_mode, selected_mode = WMI_HOST_HW_MODE_MAX; + struct tgt_info *info; + + info = &tgt_hdl->info; + num_hw_modes = info->service_ext_param.num_hw_modes; + if (num_hw_modes > PSOC_MAX_HW_MODE) { + target_if_err("invalid num_hw_modes %d", num_hw_modes); + return -EINVAL; + } + target_if_debug("num_hw_modes %d", num_hw_modes); + + qdf_mem_zero(&hw_mode_caps, sizeof(hw_mode_caps)); + info->hw_modes.num_modes = 0; + info->hw_mode_cap.hw_mode_id = WMI_HOST_HW_MODE_MAX; + + preferred_mode = target_psoc_get_preferred_hw_mode(tgt_hdl); + for (hw_idx = 0; hw_idx < num_hw_modes; hw_idx++) { + status = get_hw_mode(wmi_handle, event, hw_idx, + &hw_mode_caps[hw_idx]); + if (status) + goto return_exit; + + if (hw_idx < WMI_HOST_HW_MODE_MAX) { + info->hw_modes.hw_mode_ids[hw_idx] = + hw_mode_caps[hw_idx].hw_mode_id; + info->hw_modes.num_modes++; + } + + status = init_deinit_populate_mac_phy_capability(wmi_handle, + event, &hw_mode_caps[hw_idx], info); + if (status) + goto return_exit; + + selected_mode = select_preferred_hw_mode(tgt_hdl, + &hw_mode_caps[hw_idx], + selected_mode); + } + + if (preferred_mode == WMI_HOST_HW_MODE_DETECT) { + target_if_info("Preferred mode is not set, use mode id %d\n", + selected_mode); + target_psoc_set_preferred_hw_mode(tgt_hdl, selected_mode); + } + + status = get_sar_version(wmi_handle, event, &info->service_ext_param); + target_if_debug("sar version %d", info->service_ext_param.sar_version); + +return_exit: + return qdf_status_to_os_return(status); +} + +int init_deinit_populate_dbr_ring_cap(struct wlan_objmgr_psoc *psoc, + wmi_unified_t handle, uint8_t *event, + struct tgt_info *info) + +{ + uint8_t cap_idx; + uint32_t num_dbr_ring_caps; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + num_dbr_ring_caps = info->service_ext_param.num_dbr_ring_caps; + target_if_debug("Num DMA Capabilities = %d", num_dbr_ring_caps); + + if (!num_dbr_ring_caps) + return 0; + + info->dbr_ring_cap = qdf_mem_malloc( + sizeof(struct wlan_psoc_host_dbr_ring_caps) * + num_dbr_ring_caps); + + if (!info->dbr_ring_cap) + return -EINVAL; + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + status = wmi_extract_dbr_ring_cap_service_ready_ext(handle, + event, cap_idx, + &(info->dbr_ring_cap[cap_idx])); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("Extraction of DMA cap failed"); + goto free_and_return; + } + } + + return 0; + +free_and_return: + qdf_mem_free(info->dbr_ring_cap); + info->dbr_ring_cap = NULL; + + return qdf_status_to_os_return(status); +} + +int init_deinit_populate_dbr_ring_cap_ext2(struct wlan_objmgr_psoc *psoc, + wmi_unified_t handle, uint8_t *event, + struct tgt_info *info) + +{ + uint8_t cap_idx; + uint32_t num_dbr_ring_caps; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_psoc_host_dbr_ring_caps *param; + + /* + * If FW had already sent this info as part of EXT event, + * we need to discard the same and use the info from EXT2. + */ + if (info->service_ext_param.num_dbr_ring_caps) { + target_if_debug("dbr_ring_caps already populated"); + info->service_ext_param.num_dbr_ring_caps = 0; + qdf_mem_free(info->dbr_ring_cap); + info->dbr_ring_cap = NULL; + } + + num_dbr_ring_caps = info->service_ext2_param.num_dbr_ring_caps; + target_if_debug("Num DMA Capabilities = %d", num_dbr_ring_caps); + + if (!num_dbr_ring_caps) + return 0; + + info->dbr_ring_cap = qdf_mem_malloc( + sizeof(struct wlan_psoc_host_dbr_ring_caps) * + num_dbr_ring_caps); + + if (!info->dbr_ring_cap) + return -EINVAL; + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + param = &info->dbr_ring_cap[cap_idx]; + status = wmi_extract_dbr_ring_cap_service_ready_ext2(handle, + event, + cap_idx, + param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("Extraction of DMA cap failed"); + goto free_and_return; + } + } + + return 0; + +free_and_return: + qdf_mem_free(info->dbr_ring_cap); + info->dbr_ring_cap = NULL; + + return qdf_status_to_os_return(status); +} +int init_deinit_populate_spectral_bin_scale_params( + struct wlan_objmgr_psoc *psoc, wmi_unified_t handle, + uint8_t *event, struct tgt_info *info) + +{ + uint8_t param_idx; + uint32_t num_bin_scaling_params; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + num_bin_scaling_params = info->service_ext_param.num_bin_scaling_params; + + if (!num_bin_scaling_params) + return 0; + + info->scaling_params = qdf_mem_malloc( + sizeof(struct wlan_psoc_host_spectral_scaling_params) * + num_bin_scaling_params); + + if (!info->scaling_params) { + target_if_err("Mem alloc for bin scaling params failed"); + return -EINVAL; + } + + for (param_idx = 0; param_idx < num_bin_scaling_params; param_idx++) { + status = wmi_extract_spectral_scaling_params_service_ready_ext( + handle, + event, param_idx, + &info->scaling_params[param_idx]); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("Extraction of scaling params failed"); + goto free_and_return; + } + } + + return 0; + +free_and_return: + qdf_mem_free(info->scaling_params); + info->scaling_params = NULL; + + return qdf_status_to_os_return(status); +} + +QDF_STATUS init_deinit_dbr_ring_cap_free( + struct target_psoc_info *tgt_psoc_info) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (tgt_psoc_info->info.dbr_ring_cap) { + qdf_mem_free(tgt_psoc_info->info.dbr_ring_cap); + tgt_psoc_info->info.dbr_ring_cap = NULL; + } + + return status; +} +qdf_export_symbol(init_deinit_dbr_ring_cap_free); + +QDF_STATUS init_deinit_spectral_scaling_params_free( + struct target_psoc_info *tgt_psoc_info) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (tgt_psoc_info->info.scaling_params) { + qdf_mem_free(tgt_psoc_info->info.scaling_params); + tgt_psoc_info->info.scaling_params = NULL; + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +qdf_export_symbol(init_deinit_spectral_scaling_params_free); + +#ifdef DBS_SBS_BAND_LIMITATION_WAR +#define phy0 0 +#define phy2 2 +#define NUM_RF_MODES 2 /* (DBS + DBS_SBS) */ +/** + * init_deinit_update_phy_reg_cap() - Update the low/high frequency for phy0. + * @psoc: PSOC common object + * @info: FW or lower layer related info + * @wlan_psoc_host_hal_reg_capabilities_ext: Reg caps per PHY + * + * For the DBS_SBS capable board, update the low or high frequency + * for phy0 by leveraging the frequency populated for phy2 + * depending on whether it is mapped to upper or lower 5G band by + * FW/HAL-PHY. + */ +static void init_deinit_update_phy_reg_cap(struct wlan_objmgr_psoc *psoc, + struct tgt_info *info, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap) +{ + struct target_psoc_info *tgt_hdl; + enum wmi_host_hw_mode_config_type mode; + uint32_t num_hw_modes; + uint8_t idx; + + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle( + psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null in service ready ev"); + return; + } + + mode = target_psoc_get_preferred_hw_mode(tgt_hdl); + + num_hw_modes = info->hw_modes.num_modes; + + if ((mode != WMI_HOST_HW_MODE_DBS) || (num_hw_modes < NUM_RF_MODES)) + return; + + for (idx = 0; idx < num_hw_modes; idx++) + if (info->hw_modes.hw_mode_ids[idx] == + WMI_HOST_HW_MODE_DBS_SBS) { + if (reg_cap[phy0].low_5ghz_chan > + reg_cap[phy2].low_5ghz_chan) + reg_cap[phy0].low_5ghz_chan = + reg_cap[phy2].low_5ghz_chan; + else if (reg_cap[phy0].high_5ghz_chan < + reg_cap[phy2].high_5ghz_chan) + reg_cap[phy0].high_5ghz_chan = + reg_cap[phy2].high_5ghz_chan; + break; + } +} +#else +static void init_deinit_update_phy_reg_cap(struct wlan_objmgr_psoc *psoc, + struct tgt_info *info, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap) +{ +} +#endif + +int init_deinit_populate_phy_reg_cap(struct wlan_objmgr_psoc *psoc, + wmi_unified_t handle, uint8_t *event, + struct tgt_info *info, + bool service_ready) +{ + uint8_t reg_idx; + uint32_t num_phy_reg_cap; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_psoc_hal_reg_capability cap; + struct wlan_psoc_host_hal_reg_capabilities_ext + reg_cap[PSOC_MAX_PHY_REG_CAP] = {{0} }; + + if (service_ready) { + status = wmi_extract_hal_reg_cap(handle, event, &cap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse hal reg cap"); + return qdf_status_to_os_return(status); + } + info->service_ext_param.num_phy = 1; + num_phy_reg_cap = 1; + reg_cap[0].phy_id = 0; + qdf_mem_copy(&(reg_cap[0].eeprom_reg_domain), &cap, + sizeof(struct wlan_psoc_hal_reg_capability)); + target_if_debug("FW wireless modes 0x%x", + reg_cap[0].wireless_modes); + } else { + num_phy_reg_cap = info->service_ext_param.num_phy; + if (num_phy_reg_cap > PSOC_MAX_PHY_REG_CAP) { + target_if_err("Invalid num_phy_reg_cap %d", + num_phy_reg_cap); + return -EINVAL; + } + target_if_debug("num_phy_reg_cap %d", num_phy_reg_cap); + + for (reg_idx = 0; reg_idx < num_phy_reg_cap; reg_idx++) { + status = wmi_extract_reg_cap_service_ready_ext(handle, + event, reg_idx, &(reg_cap[reg_idx])); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse reg cap"); + return qdf_status_to_os_return(status); + } + } + } + + init_deinit_update_phy_reg_cap(psoc, info, reg_cap); + status = ucfg_reg_set_hal_reg_cap(psoc, reg_cap, num_phy_reg_cap); + + return qdf_status_to_os_return(status); +} + +static bool init_deinit_regdmn_160mhz_support( + struct wlan_psoc_host_hal_reg_capabilities_ext *hal_cap) +{ + return ((hal_cap->wireless_modes & + WMI_HOST_REGDMN_MODE_11AC_VHT160) != 0); +} + +static bool init_deinit_regdmn_80p80mhz_support( + struct wlan_psoc_host_hal_reg_capabilities_ext *hal_cap) +{ + return ((hal_cap->wireless_modes & + WMI_HOST_REGDMN_MODE_11AC_VHT80_80) != 0); +} + +static bool init_deinit_vht_160mhz_is_supported(uint32_t vhtcap) +{ + return ((vhtcap & WLAN_VHTCAP_SUP_CHAN_WIDTH_160) != 0); +} + +static bool init_deinit_vht_80p80mhz_is_supported(uint32_t vhtcap) +{ + return ((vhtcap & WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160) != 0); +} + +static bool init_deinit_vht_160mhz_shortgi_is_supported(uint32_t vhtcap) +{ + return ((vhtcap & WLAN_VHTCAP_SHORTGI_160) != 0); +} + +QDF_STATUS init_deinit_validate_160_80p80_fw_caps( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + bool wireless_mode_160mhz = false; + bool wireless_mode_80p80mhz = false; + bool vhtcap_160mhz = false; + bool vhtcap_80p80_160mhz = false; + bool vhtcap_160mhz_sgi = false; + bool valid = false; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap; + struct wmi_unified *wmi_handle; + + if (!tgt_hdl) { + target_if_err( + "target_psoc_info is null in validate 160n80p80 cap check"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + if ((tgt_hdl->info.target_type == TARGET_TYPE_QCA8074) || + (tgt_hdl->info.target_type == TARGET_TYPE_QCA8074V2) || + (tgt_hdl->info.target_type == TARGET_TYPE_QCA6290)) { + /** + * Return true for now. This is not available in + * qca8074 fw yet + */ + return QDF_STATUS_SUCCESS; + } + + reg_cap = ucfg_reg_get_hal_reg_cap(psoc); + if (!reg_cap) { + target_if_err("reg cap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* NOTE: Host driver gets vht capability and supported channel + * width / channel frequency range from FW/HALPHY and obeys it. + * Host driver is unaware of any physical filters or any other + * hardware factors that can impact these capabilities. + * These need to be correctly determined by firmware. + */ + + /*This table lists all valid and invalid combinations + * WMODE160 WMODE80_80 VHTCAP_160 VHTCAP_80+80_160 IsCombinationvalid? + * 0 0 0 0 YES + * 0 0 0 1 NO + * 0 0 1 0 NO + * 0 0 1 1 NO + * 0 1 0 0 NO + * 0 1 0 1 NO + * 0 1 1 0 NO + * 0 1 1 1 NO + * 1 0 0 0 NO + * 1 0 0 1 NO + * 1 0 1 0 YES + * 1 0 1 1 NO + * 1 1 0 0 NO + * 1 1 0 1 YES + * 1 1 1 0 NO + * 1 1 1 1 NO + */ + + /* NOTE: Last row in above table is invalid because value corresponding + * to both VHTCAP_160 and VHTCAP_80+80_160 being set is reserved as per + * 802.11ac. Only one of them can be set at a time. + */ + + wireless_mode_160mhz = init_deinit_regdmn_160mhz_support(reg_cap); + wireless_mode_80p80mhz = init_deinit_regdmn_80p80mhz_support(reg_cap); + vhtcap_160mhz = init_deinit_vht_160mhz_is_supported( + tgt_hdl->info.target_caps.vht_cap_info); + vhtcap_80p80_160mhz = init_deinit_vht_80p80mhz_is_supported( + tgt_hdl->info.target_caps.vht_cap_info); + vhtcap_160mhz_sgi = init_deinit_vht_160mhz_shortgi_is_supported( + tgt_hdl->info.target_caps.vht_cap_info); + + if (!(wireless_mode_160mhz || wireless_mode_80p80mhz || + vhtcap_160mhz || vhtcap_80p80_160mhz)) { + valid = QDF_STATUS_SUCCESS; + } else if (wireless_mode_160mhz && !wireless_mode_80p80mhz && + vhtcap_160mhz && !vhtcap_80p80_160mhz) { + valid = QDF_STATUS_SUCCESS; + } else if (wireless_mode_160mhz && wireless_mode_80p80mhz && + !vhtcap_160mhz && vhtcap_160mhz_sgi) { + valid = QDF_STATUS_SUCCESS; + } + + if (valid == QDF_STATUS_SUCCESS) { + /* + * Ensure short GI for 160 MHz is enabled + * only if 160/80+80 is supported. + */ + if (vhtcap_160mhz_sgi && + !(vhtcap_160mhz || vhtcap_80p80_160mhz)) { + valid = QDF_STATUS_E_FAILURE; + } + } + + /* Invalid config specified by FW */ + if (valid != QDF_STATUS_SUCCESS) { + target_if_err("Invalid 160/80+80 MHz config specified by FW. Take care of it first"); + target_if_err("wireless_mode_160mhz: %d, wireless_mode_80p80mhz: %d", + wireless_mode_160mhz, wireless_mode_80p80mhz); + target_if_err("vhtcap_160mhz: %d, vhtcap_80p80_160mhz: %d,vhtcap_160mhz_sgi: %d", + vhtcap_160mhz, vhtcap_80p80_160mhz, + vhtcap_160mhz_sgi); + } + return valid; +} + +void init_deinit_chainmask_config( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + tgt_hdl->info.wlan_res_cfg.tx_chain_mask = + ((1 << tgt_hdl->info.target_caps.num_rf_chains) - 1); + tgt_hdl->info.wlan_res_cfg.rx_chain_mask = + ((1 << tgt_hdl->info.target_caps.num_rf_chains) - 1); +} + +QDF_STATUS init_deinit_is_service_ext_msg( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct wmi_unified *wmi_handle; + + if (!tgt_hdl) { + target_if_err( + "psoc target_psoc_info is null in service ext msg"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + if (wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_FAILURE; +} + +bool init_deinit_is_preferred_hw_mode_supported( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + uint16_t i; + struct tgt_info *info; + + if (!tgt_hdl) { + target_if_err( + "psoc target_psoc_info is null in service ext msg"); + return FALSE; + } + + info = &tgt_hdl->info; + + if (info->preferred_hw_mode == WMI_HOST_HW_MODE_MAX) + return TRUE; + + if (wlan_psoc_nif_feat_cap_get(psoc, WLAN_SOC_F_DYNAMIC_HW_MODE)) { + if (!wlan_psoc_nif_fw_ext_cap_get(psoc, + WLAN_SOC_CEXT_DYNAMIC_HW_MODE)) { + target_if_err( + "WMI service bit for DYNAMIC HW mode is not set!"); + return FALSE; + } + } + + for (i = 0; i < target_psoc_get_total_mac_phy_cnt(tgt_hdl); i++) { + if (info->mac_phy_cap[i].hw_mode_id == info->preferred_hw_mode) + return TRUE; + } + + return FALSE; +} + +void init_deinit_wakeup_host_wait( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + if (!tgt_hdl) { + target_if_err("psoc target_psoc_info is null in target ready"); + return; + } + qdf_event_set(&tgt_hdl->info.event); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/inc/target_if_psoc_timer_tx_ops.h b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/inc/target_if_psoc_timer_tx_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ad781db7fb60fd90a152db1f52ce527df8a7cb30 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/inc/target_if_psoc_timer_tx_ops.h @@ -0,0 +1,94 @@ + +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_psoc_timer_tx_ops.h + * + * This file provide declaration for APIs registered through lmac Tx Ops + */ + +#ifndef __TARGET_IF_PSOC_TIMER_TX_OPS_H__ +#define __TARGET_IF_PSOC_TIMER_TX_OPS_H__ + +/** + * target_if_psoc_vdev_rsp_timer_inuse() - API to check if the response timer + * for vdev is inuse + * @psoc: Psoc object + * @vdev_id: Vdev object id + * + * Return: QDF_STATUS_E_ALREADY in case the timer is inuse or QDF_STATUS_SUCCESS + */ +QDF_STATUS target_if_psoc_vdev_rsp_timer_inuse(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + +/** + * target_if_flush_psoc_vdev_timers() - API to flush target_if response timers + * for vdev + * @psoc: pointer to psoc object + * + * This API is used to flush target_if response timer. This API used while + * wlan driver shutdown. + * + * Return: none + */ +void target_if_flush_psoc_vdev_timers(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_psoc_vdev_rsp_timer_init() - API to initialize response timers for + * vdev from psoc + * @psoc: pointer to psoc object + * @vdev_id: vdev id for which response timer to be retrieved + * + * This API is used to initialize vdev response timer for vdev-id. + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_psoc_vdev_rsp_timer_init(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + +/** + * target_if_psoc_vdev_rsp_timer_deinit() - API to de-initialize response timers + * for vdev from psoc + * @psoc: pointer to psoc object + * @vdev_id: vdev id for which response timer to be retrieved + * + * This API is used to de-initialize vdev response timer from vdev-id. + * + * Return: none + */ +void target_if_psoc_vdev_rsp_timer_deinit(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + +/** + * target_if_vdev_mgr_rsp_timer_mod() - API to modify time of response timers + * for vdev from psoc + * @psoc: pointer to psoc object + * @vdev_id: vdev id for which response timer to be retrieved + * @mseconds: milli seconds + * + * This API is used to modify vdev response timer for vdev-id. + * + * Return: none + */ +QDF_STATUS target_if_vdev_mgr_rsp_timer_mod( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + int mseconds); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/inc/target_if_psoc_wake_lock.h b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/inc/target_if_psoc_wake_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..7451dd5cb5646a0d686b3296c493141151d7e9d2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/inc/target_if_psoc_wake_lock.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_psoc_wake_lock.h + * + * This file provides declaration for wakelock APIs + */ + +#ifndef __TARGET_IF_PSOC_WAKE_LOCK_H__ +#define __TARGET_IF_PSOC_WAKE_LOCK_H__ + +#include +#include + +#ifdef FEATURE_VDEV_RSP_WAKELOCK +/** + * struct wlan_vdev_wakelock - vdev wake lock sub structure + * @start_wakelock: wakelock for vdev start + * @stop_wakelock: wakelock for vdev stop + * @delete_wakelock: wakelock for vdev delete + * @wmi_cmd_rsp_runtime_lock: run time lock + */ +struct psoc_mlme_wakelock { + qdf_wake_lock_t start_wakelock; + qdf_wake_lock_t stop_wakelock; + qdf_wake_lock_t delete_wakelock; + qdf_runtime_lock_t wmi_cmd_rsp_runtime_lock; +}; +#endif + +enum wakelock_mode { + START_WAKELOCK, + STOP_WAKELOCK, + DELETE_WAKELOCK +}; + +#ifdef FEATURE_VDEV_RSP_WAKELOCK + +/** + * target_if_wake_lock_init() - API to initialize + wakelocks:start, + stop and delete. + * @psoc: pointer to psoc + * + * This also initialize the runtime lock + * + * Return: None + */ +void target_if_wake_lock_init(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_wake_lock_deinit() - API to destroy + wakelocks:start, stop and delete. + * @psoc: pointer to psoc + * + * This also destroy the runtime lock + * + * Return: None + */ +void target_if_wake_lock_deinit(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_start_wake_lock_timeout_acquire() - acquire the + vdev start wakelock + * @psoc: pointer to psoc + * + * This also acquires the target_if runtime pm lock. + * + * Return: Success/Failure + */ +QDF_STATUS target_if_wake_lock_timeout_acquire(struct wlan_objmgr_psoc *psoc, + enum wakelock_mode mode); +/** + * target_if_start_wake_lock_timeout_release() - release the + start wakelock + * @psoc: pointer to psoc + * + * This also release the target_if runtime pm lock. + * + * Return: Success/Failure + */ +QDF_STATUS target_if_wake_lock_timeout_release(struct wlan_objmgr_psoc *psoc, + enum wakelock_mode mode); +#else +static inline void target_if_wake_lock_init(struct wlan_objmgr_psoc *psoc) +{ +} + +static inline void target_if_wake_lock_deinit(struct wlan_objmgr_psoc *psoc) +{ +} + +static inline QDF_STATUS target_if_wake_lock_timeout_acquire( + struct wlan_objmgr_psoc *psoc, + enum wakelock_mode mode) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS target_if_wake_lock_timeout_release( + struct wlan_objmgr_psoc *psoc, + enum wakelock_mode mode) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/src/target_if_psoc_timer_tx_ops.c b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/src/target_if_psoc_timer_tx_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..7936e1f83415b3c5cb28905ce8a1cee00dab1dac --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/src/target_if_psoc_timer_tx_ops.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_psoc_timer_tx_ops.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ + +#include +#include +#include +#include +#include +#include + +QDF_STATUS target_if_psoc_vdev_rsp_timer_inuse(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct vdev_response_timer *vdev_rsp; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + if (vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + mlme_err("Invalid vdev id passed VDEV_%d", vdev_id); + return QDF_STATUS_E_INVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops && !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response is NULL for VDEV_%d PSOC_%d", + vdev_id, wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + if (qdf_atomic_read(&vdev_rsp->rsp_timer_inuse)) { + mlme_err("vdev response timer still inuse VDEV_%d PSOC_%d", + vdev_id, wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_ALREADY; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_psoc_vdev_rsp_timer_init(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct vdev_response_timer *vdev_rsp; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + if (vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + mlme_err("Invalid vdev id passed VDEV_%d PSOC_%d", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops && !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response is NULL for VDEV_%d PSOC_%d", + vdev_id, wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp->psoc = psoc; + vdev_rsp->vdev_id = vdev_id; + qdf_timer_init(NULL, &vdev_rsp->rsp_timer, + target_if_vdev_mgr_rsp_timer_mgmt_cb, + vdev_rsp, QDF_TIMER_TYPE_WAKE_APPS); + qdf_atomic_init(&vdev_rsp->rsp_timer_inuse); + + return QDF_STATUS_SUCCESS; +} + +void target_if_psoc_vdev_rsp_timer_deinit(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct vdev_response_timer *vdev_rsp; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + if (vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + mlme_err("Invalid vdev id passed VDEV_%d PSOC_%d", vdev_id, + wlan_psoc_get_id(psoc)); + return; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops && !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response is NULL for VDEV_%d PSOC_%d", + vdev_id, wlan_psoc_get_id(psoc)); + return; + } + + qdf_timer_free(&vdev_rsp->rsp_timer); + qdf_atomic_set(&vdev_rsp->rsp_timer_inuse, 0); + vdev_rsp->psoc = NULL; +} + +void target_if_flush_psoc_vdev_timers(struct wlan_objmgr_psoc *psoc) +{ + struct vdev_response_timer *vdev_rsp; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + int i; + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops && !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("PSOC_%d No Rx Ops", wlan_psoc_get_id(psoc)); + return; + } + + for (i = 0; i < WLAN_UMAC_PSOC_MAX_VDEVS; i++) { + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, + i); + if (vdev_rsp && qdf_timer_sync_cancel(&vdev_rsp->rsp_timer)) + target_if_vdev_mgr_rsp_timer_cb(vdev_rsp); + } +} + +QDF_STATUS target_if_vdev_mgr_rsp_timer_mod( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + int mseconds) +{ + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct vdev_response_timer *vdev_rsp; + + if (!psoc) { + mlme_err("Invalid input"); + return QDF_STATUS_E_FAILURE; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops && !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_FAILURE; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + qdf_timer_mod(&vdev_rsp->rsp_timer, mseconds); + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/src/target_if_psoc_wake_lock.c b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/src/target_if_psoc_wake_lock.c new file mode 100644 index 0000000000000000000000000000000000000000..6028911d5a2e38bd7077247fb7ac254c2ff01e1c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/psoc/src/target_if_psoc_wake_lock.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_psoc_wake_lock.c + * + * This file provide definition for APIs related to wake lock + */ + +#include "qdf_lock.h" +#include +#include +#include +#include +#include +#include + +void target_if_wake_lock_init(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mlme_wakelock *psoc_wakelock; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_wakelock_info) { + mlme_err("psoc_id:%d No Rx Ops", wlan_psoc_get_id(psoc)); + return; + } + + psoc_wakelock = rx_ops->psoc_get_wakelock_info(psoc); + + qdf_wake_lock_create(&psoc_wakelock->start_wakelock, "vdev_start"); + qdf_wake_lock_create(&psoc_wakelock->stop_wakelock, "vdev_stop"); + qdf_wake_lock_create(&psoc_wakelock->delete_wakelock, "vdev_delete"); + + qdf_runtime_lock_init(&psoc_wakelock->wmi_cmd_rsp_runtime_lock); +} + +void target_if_wake_lock_deinit(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mlme_wakelock *psoc_wakelock; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_wakelock_info) { + mlme_err("psoc_id:%d No Rx Ops", wlan_psoc_get_id(psoc)); + return; + } + + psoc_wakelock = rx_ops->psoc_get_wakelock_info(psoc); + + qdf_wake_lock_destroy(&psoc_wakelock->start_wakelock); + qdf_wake_lock_destroy(&psoc_wakelock->stop_wakelock); + qdf_wake_lock_destroy(&psoc_wakelock->delete_wakelock); + + qdf_runtime_lock_deinit(&psoc_wakelock->wmi_cmd_rsp_runtime_lock); +} + +QDF_STATUS target_if_wake_lock_timeout_acquire( + struct wlan_objmgr_psoc *psoc, + enum wakelock_mode mode) +{ + struct psoc_mlme_wakelock *psoc_wakelock; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops && !rx_ops->psoc_get_wakelock_info) { + mlme_err("psoc_id:%d No Rx Ops", wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + psoc_wakelock = rx_ops->psoc_get_wakelock_info(psoc); + switch (mode) { + case START_WAKELOCK: + qdf_wake_lock_timeout_acquire(&psoc_wakelock->start_wakelock, + START_RESPONSE_TIMER); + break; + case STOP_WAKELOCK: + qdf_wake_lock_timeout_acquire(&psoc_wakelock->stop_wakelock, + STOP_RESPONSE_TIMER); + break; + case DELETE_WAKELOCK: + qdf_wake_lock_timeout_acquire(&psoc_wakelock->delete_wakelock, + DELETE_RESPONSE_TIMER); + break; + default: + target_if_err("operation mode is invalid"); + return QDF_STATUS_E_FAILURE; + } + + qdf_runtime_pm_prevent_suspend( + &psoc_wakelock->wmi_cmd_rsp_runtime_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_wake_lock_timeout_release( + struct wlan_objmgr_psoc *psoc, + enum wakelock_mode mode) +{ + struct psoc_mlme_wakelock *psoc_wakelock; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_wakelock_info) { + mlme_err("psoc_id:%d No Rx Ops", wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + psoc_wakelock = rx_ops->psoc_get_wakelock_info(psoc); + switch (mode) { + case START_WAKELOCK: + qdf_wake_lock_release(&psoc_wakelock->start_wakelock, + WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP); + break; + case STOP_WAKELOCK: + qdf_wake_lock_release(&psoc_wakelock->stop_wakelock, + WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP); + break; + case DELETE_WAKELOCK: + qdf_wake_lock_release(&psoc_wakelock->delete_wakelock, + WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP); + break; + default: + target_if_err("operation mode is invalid"); + return QDF_STATUS_E_FAILURE; + } + + qdf_runtime_pm_allow_suspend(&psoc_wakelock->wmi_cmd_rsp_runtime_lock); + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/inc/target_if_vdev_mgr_rx_ops.h b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/inc/target_if_vdev_mgr_rx_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7cda52628d90b9b29e743b9b043e7fea7633adc7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/inc/target_if_vdev_mgr_rx_ops.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_vdev_mgr_rx_ops.h + * + * This file provides declarations for APIs registered for wmi events + */ + +#ifndef __TARGET_IF_VDEV_MGR_RX_OPS_H__ +#define __TARGET_IF_VDEV_MGR_RX_OPS_H__ + +#include +#include +#include +#include + +/** + * target_if_vdev_mgr_is_panic_allowed: API to get if panic is allowed on + * timeout + * + * Return: TRUE or FALSE when VDEV_ASSERT_MANAGEMENT is disabled else FALSE + */ +#ifdef VDEV_ASSERT_MANAGEMENT +static inline bool target_if_vdev_mgr_is_panic_allowed(void) +{ + return false; +} +#else +static inline bool target_if_vdev_mgr_is_panic_allowed(void) +{ + if (qdf_is_recovering() || qdf_is_fw_down()) + return false; + + return true; +} +#endif + + +/** + * target_if_vdev_mgr_offload_bcn_tx_status_handler() - API to handle beacon + * tx status event + * @scn: pointer to scan object + * @data: pointer to data + * @datalen: length of data + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +int target_if_vdev_mgr_offload_bcn_tx_status_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen); + +/** + * target_if_vdev_mgr_tbttoffset_update_handler() - API to handle tbtt offset + * update event + * @scn: pointer to scan object + * @data: pointer to data + * @datalen: length of data + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +int target_if_vdev_mgr_tbttoffset_update_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen); + +/** + * target_if_vdev_mgr_ext_tbttoffset_update_handler() - API to handle ext tbtt + * offset update event + * @scn: pointer to scan object + * @data: pointer to data + * @datalen: length of data + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +int target_if_vdev_mgr_ext_tbttoffset_update_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen); + +/** + * target_if_vdev_mgr_is_panic_on_bug: API to get panic on bug + * + * Return: TRUE or FALSE + */ +static inline bool target_if_vdev_mgr_is_panic_on_bug(void) +{ +#ifdef PANIC_ON_BUG + return true; +#else + return false; +#endif +} + +/** + * target_if_vdev_mgr_get_rx_ops() - get rx ops + * @psoc: pointer to psoc object + * + * Return: pointer to rx ops + */ +static inline struct wlan_lmac_if_mlme_rx_ops * +target_if_vdev_mgr_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.mops; +} + +/** + * target_if_vdev_mgr_rsp_timer_mgmt_cb() - function to handle response timer + * @arg: pointer to argument + * + * Callback timer triggered when response timer expires which pass + * vdev as argument + * + * Return: status of operation. + */ +void target_if_vdev_mgr_rsp_timer_mgmt_cb(void *arg); + +/** + * target_if_vdev_mgr_wmi_event_register() - function to handle register + * events from WMI + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS target_if_vdev_mgr_wmi_event_register( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_vdev_mgr_wmi_event_unregister() - function to handle unregister + * events from WMI + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS target_if_vdev_mgr_wmi_event_unregister( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_vdev_mgr_rsp_timer_cb() - function to handle vdev related timeouts + * @vdev_rsp: pointer to vdev response timer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on error + */ +QDF_STATUS +target_if_vdev_mgr_rsp_timer_cb(struct vdev_response_timer *vdev_rsp); + +#endif /* __TARGET_IF_VDEV_MGR_RX_OPS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/inc/target_if_vdev_mgr_tx_ops.h b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/inc/target_if_vdev_mgr_tx_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..14fd51c639d044e071c008c790a023c86b4f1e58 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/inc/target_if_vdev_mgr_tx_ops.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_vdev_mgmt_tx_ops.h + * + * This file provides declaration for APIs to send WMI commands + */ + +#ifndef __TARGET_IF_VDEV_MGR_TX_OPS_H__ +#define __TARGET_IF_VDEV_MGR_TX_OPS_H__ + +#include +#include +#include +#include + +/** + * target_if_vdev_mgr_wmi_handle_get() - API to get wmi handle from vdev + * @vdev: pointer to vdev + * + * Return: pointer to wmi_unified handle or NULL + */ +struct wmi_unified *target_if_vdev_mgr_wmi_handle_get( + struct wlan_objmgr_vdev *vdev); + +/** + * target_if_vdev_mgr_get_tx_ops() - get tx ops + * @psoc: pointer to psoc obj + * + * Return: pointer to tx ops + */ +static inline struct wlan_lmac_if_mlme_tx_ops * +target_if_vdev_mgr_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.mops; +} + +/** + * target_if_vdev_mgr_vdev_mlme_register_tx_ops() - define mlme lmac + * tx ops functions + * @tx_ops: pointer to lmac tx ops + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_vdev_mgr_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_vdev_mgr_assert_mgmt() - vdev assert mgmt api + * @PSOC: pointer to objmgr psoc + * @vdev_id: vdev id + * + * Return: NA + */ +#ifdef VDEV_ASSERT_MANAGEMENT +static inline void target_if_vdev_mgr_assert_mgmt( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ +} +#else +static inline void target_if_vdev_mgr_assert_mgmt( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) + +{ + QDF_ASSERT(0); +} +#endif + +/** + * target_if_vdev_mgr_rsp_timer_stop() - API to stop response timer for + * vdev manager operations + * @psoc: pointer to psoc object + * @vdev_rsp: vdev response timer + * @clear_bit: enum of wlan_vdev_mgr_tgt_if_rsp_bit + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS target_if_vdev_mgr_rsp_timer_stop( + struct wlan_objmgr_psoc *psoc, + struct vdev_response_timer *vdev_rsp, + enum wlan_vdev_mgr_tgt_if_rsp_bit clear_bit); + +#endif /* __TARGET_IF_VDEV_MGR_TX_OPS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/src/target_if_vdev_mgr_rx_ops.c b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/src/target_if_vdev_mgr_rx_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..2edff715a23e5dfdd193b18dacb147db7cca3b1b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/src/target_if_vdev_mgr_rx_ops.c @@ -0,0 +1,816 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_vdev_mgr_rx_ops.c + * + * This file provide definition for APIs registered through events received + * from FW + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline +void target_if_vdev_mgr_handle_recovery(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum qdf_hang_reason recovery_reason, + uint16_t rsp_pos) +{ + mlme_nofl_err("PSOC_%d VDEV_%d: %s rsp timeout", wlan_psoc_get_id(psoc), + vdev_id, string_from_rsp_bit(rsp_pos)); + if (target_if_vdev_mgr_is_panic_allowed()) + qdf_trigger_self_recovery(psoc, recovery_reason); + else + mlme_nofl_debug("PSOC_%d VDEV_%d: Panic not allowed", + wlan_psoc_get_id(psoc), vdev_id); +} + +QDF_STATUS target_if_vdev_mgr_rsp_timer_cb(struct vdev_response_timer *vdev_rsp) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct vdev_start_response start_rsp = {0}; + struct vdev_stop_response stop_rsp = {0}; + struct vdev_delete_response del_rsp = {0}; + struct peer_delete_all_response peer_del_all_rsp = {0}; + enum qdf_hang_reason recovery_reason; + uint8_t vdev_id; + uint16_t rsp_pos = RESPONSE_BIT_MAX; + + if (!vdev_rsp) { + mlme_err("Vdev response timer is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc = vdev_rsp->psoc; + if (!psoc) { + mlme_err("PSOC is NULL"); + return QDF_STATUS_E_FAILURE; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("No Rx Ops"); + return QDF_STATUS_E_FAILURE; + } + + if (!qdf_atomic_test_bit(START_RESPONSE_BIT, &vdev_rsp->rsp_status) && + !qdf_atomic_test_bit(RESTART_RESPONSE_BIT, &vdev_rsp->rsp_status) && + !qdf_atomic_test_bit(STOP_RESPONSE_BIT, &vdev_rsp->rsp_status) && + !qdf_atomic_test_bit(DELETE_RESPONSE_BIT, &vdev_rsp->rsp_status) && + !qdf_atomic_test_bit( + PEER_DELETE_ALL_RESPONSE_BIT, + &vdev_rsp->rsp_status)) { + mlme_debug("No response bit is set, ignoring actions :%d", + vdev_rsp->vdev_id); + return QDF_STATUS_E_FAILURE; + } + + vdev_id = vdev_rsp->vdev_id; + if (vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + mlme_err("Invalid VDEV_%d PSOC_%d", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_FAILURE; + } + + vdev_rsp->timer_status = QDF_STATUS_E_TIMEOUT; + if (qdf_atomic_test_bit(START_RESPONSE_BIT, + &vdev_rsp->rsp_status) || + qdf_atomic_test_bit(RESTART_RESPONSE_BIT, + &vdev_rsp->rsp_status)) { + start_rsp.vdev_id = vdev_id; + start_rsp.status = WLAN_MLME_HOST_VDEV_START_TIMEOUT; + if (qdf_atomic_test_bit(START_RESPONSE_BIT, + &vdev_rsp->rsp_status)) { + start_rsp.resp_type = + WMI_HOST_VDEV_START_RESP_EVENT; + rsp_pos = START_RESPONSE_BIT; + recovery_reason = QDF_VDEV_START_RESPONSE_TIMED_OUT; + } else { + start_rsp.resp_type = + WMI_HOST_VDEV_RESTART_RESP_EVENT; + rsp_pos = RESTART_RESPONSE_BIT; + recovery_reason = QDF_VDEV_RESTART_RESPONSE_TIMED_OUT; + } + + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, rsp_pos); + target_if_vdev_mgr_handle_recovery(psoc, vdev_id, + recovery_reason, rsp_pos); + rx_ops->vdev_mgr_start_response(psoc, &start_rsp); + } else if (qdf_atomic_test_bit(STOP_RESPONSE_BIT, + &vdev_rsp->rsp_status)) { + rsp_pos = STOP_RESPONSE_BIT; + stop_rsp.vdev_id = vdev_id; + recovery_reason = QDF_VDEV_STOP_RESPONSE_TIMED_OUT; + + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, rsp_pos); + target_if_vdev_mgr_handle_recovery(psoc, vdev_id, + recovery_reason, rsp_pos); + rx_ops->vdev_mgr_stop_response(psoc, &stop_rsp); + } else if (qdf_atomic_test_bit(DELETE_RESPONSE_BIT, + &vdev_rsp->rsp_status)) { + del_rsp.vdev_id = vdev_id; + rsp_pos = DELETE_RESPONSE_BIT; + recovery_reason = QDF_VDEV_DELETE_RESPONSE_TIMED_OUT; + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, rsp_pos); + target_if_vdev_mgr_handle_recovery(psoc, vdev_id, + recovery_reason, rsp_pos); + rx_ops->vdev_mgr_delete_response(psoc, &del_rsp); + } else if (qdf_atomic_test_bit(PEER_DELETE_ALL_RESPONSE_BIT, + &vdev_rsp->rsp_status)) { + peer_del_all_rsp.vdev_id = vdev_id; + rsp_pos = PEER_DELETE_ALL_RESPONSE_BIT; + recovery_reason = QDF_VDEV_PEER_DELETE_ALL_RESPONSE_TIMED_OUT; + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, rsp_pos); + target_if_vdev_mgr_handle_recovery(psoc, vdev_id, + recovery_reason, rsp_pos); + rx_ops->vdev_mgr_peer_delete_all_response(psoc, + &peer_del_all_rsp); + } else { + mlme_err("PSOC_%d VDEV_%d: Unknown error", + wlan_psoc_get_id(psoc), vdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef SERIALIZE_VDEV_RESP +static QDF_STATUS target_if_vdev_mgr_rsp_flush_cb(struct scheduler_msg *msg) +{ + struct vdev_response_timer *vdev_rsp; + struct wlan_objmgr_psoc *psoc; + + if (!msg->bodyptr) { + mlme_err("Message bodyptr is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = msg->bodyptr; + if (!vdev_rsp) { + mlme_err("vdev response timer is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = vdev_rsp->psoc; + if (!psoc) { + mlme_err("PSOC is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (vdev_rsp->rsp_status) + wlan_objmgr_psoc_release_ref(psoc, WLAN_PSOC_TARGET_IF_ID); + + return QDF_STATUS_SUCCESS; +} + +static void +target_if_vdev_mgr_rsp_cb_mc_ctx(void *arg) +{ + struct scheduler_msg msg = {0}; + struct vdev_response_timer *vdev_rsp = arg; + struct wlan_objmgr_psoc *psoc; + + psoc = vdev_rsp->psoc; + if (!psoc) { + mlme_err("PSOC is NULL"); + return; + } + + msg.type = SYS_MSG_ID_MC_TIMER; + msg.reserved = SYS_MSG_COOKIE; + msg.callback = target_if_vdev_mgr_rsp_timer_cb; + msg.bodyptr = vdev_rsp; + msg.bodyval = 0; + msg.flush_callback = target_if_vdev_mgr_rsp_flush_cb; + + if (scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_SYS, &msg) == + QDF_STATUS_SUCCESS) + return; + + mlme_err("Could not enqueue timer to timer queue"); + if (psoc) + wlan_objmgr_psoc_release_ref(psoc, WLAN_PSOC_TARGET_IF_ID); +} + +void target_if_vdev_mgr_rsp_timer_mgmt_cb(void *arg) +{ + target_if_vdev_mgr_rsp_cb_mc_ctx(arg); +} + +#define VDEV_RSP_RX_CTX WMI_RX_SERIALIZER_CTX +#else +void target_if_vdev_mgr_rsp_timer_mgmt_cb(void *arg) +{ + target_if_vdev_mgr_rsp_timer_cb(arg); +} + +#define VDEV_RSP_RX_CTX WMI_RX_UMAC_CTX +#endif + +static int target_if_vdev_mgr_start_response_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct vdev_start_response rsp = {0}; + wmi_host_vdev_start_resp vdev_start_resp; + uint8_t vdev_id; + struct vdev_response_timer *vdev_rsp; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_start_response) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_vdev_start_resp(wmi_handle, data, &vdev_start_resp)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + vdev_id = vdev_start_resp.vdev_id; + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response timer is null VDEV_%d PSOC_%d", + vdev_id, wlan_psoc_get_id(psoc)); + return -EINVAL; + } + + if (vdev_start_resp.resp_type == WMI_HOST_VDEV_RESTART_RESP_EVENT) + status = target_if_vdev_mgr_rsp_timer_stop( + psoc, vdev_rsp, + RESTART_RESPONSE_BIT); + else + status = target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + START_RESPONSE_BIT); + + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("PSOC_%d VDEV_%d: VDE MGR RSP Timer stop failed", + psoc->soc_objmgr.psoc_id, vdev_id); + goto err; + } + + rsp.vdev_id = vdev_start_resp.vdev_id; + rsp.requestor_id = vdev_start_resp.requestor_id; + rsp.status = vdev_start_resp.status; + rsp.resp_type = vdev_start_resp.resp_type; + rsp.chain_mask = vdev_start_resp.chain_mask; + rsp.smps_mode = vdev_start_resp.smps_mode; + rsp.mac_id = vdev_start_resp.mac_id; + rsp.cfgd_tx_streams = vdev_start_resp.cfgd_tx_streams; + rsp.cfgd_rx_streams = vdev_start_resp.cfgd_rx_streams; + rsp.max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power; + + status = rx_ops->vdev_mgr_start_response(psoc, &rsp); + +err: + return qdf_status_to_os_return(status); +} + +static int target_if_vdev_mgr_stop_response_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct vdev_stop_response rsp = {0}; + uint32_t vdev_id; + struct vdev_response_timer *vdev_rsp; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_stop_response) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_vdev_stopped_param(wmi_handle, data, &vdev_id)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response timer is null VDEV_%d PSOC_%d", + vdev_id, wlan_psoc_get_id(psoc)); + return -EINVAL; + } + + status = target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + STOP_RESPONSE_BIT); + + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("PSOC_%d VDEV_%d: VDE MGR RSP Timer stop failed", + psoc->soc_objmgr.psoc_id, vdev_id); + goto err; + } + + rsp.vdev_id = vdev_id; + status = rx_ops->vdev_mgr_stop_response(psoc, &rsp); + +err: + return qdf_status_to_os_return(status); +} + +static int target_if_vdev_mgr_delete_response_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct vdev_delete_response rsp = {0}; + struct wmi_host_vdev_delete_resp vdev_del_resp; + struct vdev_response_timer *vdev_rsp; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_delete_response) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_vdev_delete_resp(wmi_handle, data, &vdev_del_resp)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, + vdev_del_resp.vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response timer is null VDEV_%d PSOC_%d", + vdev_del_resp.vdev_id, wlan_psoc_get_id(psoc)); + return -EINVAL; + } + + status = target_if_vdev_mgr_rsp_timer_stop( + psoc, vdev_rsp, + DELETE_RESPONSE_BIT); + + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("PSOC_%d VDEV_%d: VDE MGR RSP Timer stop failed", + wlan_psoc_get_id(psoc), vdev_del_resp.vdev_id); + goto err; + } + + rsp.vdev_id = vdev_del_resp.vdev_id; + status = rx_ops->vdev_mgr_delete_response(psoc, &rsp); + target_if_wake_lock_timeout_release(psoc, DELETE_WAKELOCK); +err: + return qdf_status_to_os_return(status); +} + +static int target_if_vdev_mgr_peer_delete_all_response_handler( + ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct peer_delete_all_response rsp = {0}; + struct wmi_host_vdev_peer_delete_all_response_event + vdev_peer_del_all_resp; + struct vdev_response_timer *vdev_rsp; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_peer_delete_all_response) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_vdev_peer_delete_all_response_event( + wmi_handle, data, + &vdev_peer_del_all_resp)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, + vdev_peer_del_all_resp.vdev_id); + if (!vdev_rsp) { + mlme_err("vdev response timer is null VDEV_%d PSOC_%d", + vdev_peer_del_all_resp.vdev_id, + wlan_psoc_get_id(psoc)); + return -EINVAL; + } + + status = target_if_vdev_mgr_rsp_timer_stop( + psoc, + vdev_rsp, + PEER_DELETE_ALL_RESPONSE_BIT); + + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("PSOC_%d VDEV_%d: VDE MGR RSP Timer stop failed", + psoc->soc_objmgr.psoc_id, + vdev_peer_del_all_resp.vdev_id); + goto err; + } + + rsp.vdev_id = vdev_peer_del_all_resp.vdev_id; + rsp.status = vdev_peer_del_all_resp.status; + status = rx_ops->vdev_mgr_peer_delete_all_response(psoc, &rsp); + +err: + return qdf_status_to_os_return(status); +} + +int target_if_vdev_mgr_offload_bcn_tx_status_handler( + ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + uint32_t vdev_id, tx_status; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_offload_bcn_tx_status_event_handle) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_offload_bcn_tx_status_evt(wmi_handle, data, + &vdev_id, &tx_status)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + status = rx_ops->vdev_mgr_offload_bcn_tx_status_event_handle( + vdev_id, + tx_status); + + return qdf_status_to_os_return(status); +} + +int target_if_vdev_mgr_tbttoffset_update_handler( + ol_scn_t scn, uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + uint32_t num_vdevs = 0; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_tbttoffset_update_handle) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_tbttoffset_num_vdevs(wmi_handle, data, &num_vdevs)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + status = rx_ops->vdev_mgr_tbttoffset_update_handle(num_vdevs, + false); + + return qdf_status_to_os_return(status); +} + +int target_if_vdev_mgr_ext_tbttoffset_update_handler( + ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + uint32_t num_vdevs = 0; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_tbttoffset_update_handle) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + if (wmi_extract_ext_tbttoffset_num_vdevs(wmi_handle, data, + &num_vdevs)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + status = rx_ops->vdev_mgr_tbttoffset_update_handle(num_vdevs, + true); + + return qdf_status_to_os_return(status); +} + +static int target_if_vdev_mgr_multi_vdev_restart_resp_handler( + ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct multi_vdev_restart_resp restart_resp; + struct vdev_response_timer *vdev_rsp; + uint8_t max_vdevs, vdev_idx; + + if (!scn || !data) { + mlme_err("Invalid input"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + mlme_err("PSOC is NULL"); + return -EINVAL; + } + + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->vdev_mgr_multi_vdev_restart_resp || + !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("No Rx Ops"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return -EINVAL; + } + + qdf_mem_zero(&restart_resp, sizeof(restart_resp)); + if (wmi_extract_multi_vdev_restart_resp_event(wmi_handle, data, + &restart_resp)) { + mlme_err("WMI extract failed"); + return -EINVAL; + } + + max_vdevs = wlan_psoc_get_max_vdev_count(psoc); + for (vdev_idx = 0; vdev_idx < max_vdevs; vdev_idx++) { + if (!qdf_test_bit(vdev_idx, restart_resp.vdev_id_bmap)) + continue; + + mlme_debug("PSOC_%d VDEV_%d: Restart resp received", + wlan_psoc_get_id(psoc), vdev_idx); + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, + vdev_idx); + if (!vdev_rsp) { + mlme_err("PSOC_%d VDEV_%d: VDEV RSP is NULL", + wlan_psoc_get_id(psoc), vdev_idx); + continue; + } + + status = target_if_vdev_mgr_rsp_timer_stop( + psoc, vdev_rsp, RESTART_RESPONSE_BIT); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("PSOC_%d VDEV_%d: VDE MGR RSP Timer stop failed", + wlan_psoc_get_id(psoc), vdev_idx); + } + + status = rx_ops->vdev_mgr_multi_vdev_restart_resp(psoc, &restart_resp); + + return qdf_status_to_os_return(status); +} + +QDF_STATUS target_if_vdev_mgr_wmi_event_register( + struct wlan_objmgr_psoc *psoc) +{ + int retval = 0; + struct wmi_unified *wmi_handle; + + if (!psoc) { + mlme_err("PSOC is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return QDF_STATUS_E_INVAL; + } + + retval = wmi_unified_register_event_handler( + wmi_handle, + wmi_vdev_stopped_event_id, + target_if_vdev_mgr_stop_response_handler, + VDEV_RSP_RX_CTX); + if (retval) + mlme_err("failed to register for stop response"); + + retval = wmi_unified_register_event_handler( + wmi_handle, + wmi_vdev_delete_resp_event_id, + target_if_vdev_mgr_delete_response_handler, + VDEV_RSP_RX_CTX); + if (retval) + mlme_err("failed to register for delete response"); + + retval = wmi_unified_register_event_handler( + wmi_handle, + wmi_vdev_start_resp_event_id, + target_if_vdev_mgr_start_response_handler, + VDEV_RSP_RX_CTX); + if (retval) + mlme_err("failed to register for start response"); + + retval = wmi_unified_register_event_handler( + wmi_handle, + wmi_peer_delete_all_response_event_id, + target_if_vdev_mgr_peer_delete_all_response_handler, + VDEV_RSP_RX_CTX); + if (retval) + mlme_err("failed to register for peer delete all response"); + + retval = wmi_unified_register_event_handler( + wmi_handle, + wmi_pdev_multi_vdev_restart_response_event_id, + target_if_vdev_mgr_multi_vdev_restart_resp_handler, + VDEV_RSP_RX_CTX); + if (retval) + mlme_err("failed to register for multivdev restart response"); + + return qdf_status_from_os_return(retval); +} + +QDF_STATUS target_if_vdev_mgr_wmi_event_unregister( + struct wlan_objmgr_psoc *psoc) +{ + struct wmi_unified *wmi_handle; + + if (!psoc) { + mlme_err("PSOC is NULL"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_unified_unregister_event_handler( + wmi_handle, + wmi_pdev_multi_vdev_restart_response_event_id); + + wmi_unified_unregister_event_handler( + wmi_handle, + wmi_peer_delete_all_response_event_id); + + wmi_unified_unregister_event_handler(wmi_handle, + wmi_vdev_start_resp_event_id); + + wmi_unified_unregister_event_handler(wmi_handle, + wmi_vdev_delete_resp_event_id); + + wmi_unified_unregister_event_handler(wmi_handle, + wmi_vdev_stopped_event_id); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/src/target_if_vdev_mgr_tx_ops.c b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/src/target_if_vdev_mgr_tx_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..83cc320070e44d13d2386c7371aae1db1fcddb34 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/mlme/vdev_mgr/src/target_if_vdev_mgr_tx_ops.c @@ -0,0 +1,1197 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_vdev_mgr_tx_ops.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS target_if_vdev_mgr_register_event_handler( + struct wlan_objmgr_psoc *psoc) +{ + return target_if_vdev_mgr_wmi_event_register(psoc); +} + +static QDF_STATUS target_if_vdev_mgr_unregister_event_handler( + struct wlan_objmgr_psoc *psoc) +{ + return target_if_vdev_mgr_wmi_event_unregister(psoc); +} + +QDF_STATUS +target_if_vdev_mgr_rsp_timer_stop(struct wlan_objmgr_psoc *psoc, + struct vdev_response_timer *vdev_rsp, + enum wlan_vdev_mgr_tgt_if_rsp_bit clear_bit) +{ + struct wlan_lmac_if_mlme_tx_ops *txops; + + txops = target_if_vdev_mgr_get_tx_ops(psoc); + if (!txops || !txops->psoc_vdev_rsp_timer_deinit) { + mlme_err("Failed to get mlme txrx_ops VDEV_%d PSOC_%d", + vdev_rsp->vdev_id, wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_FAILURE; + } + + if (qdf_atomic_test_and_clear_bit(clear_bit, &vdev_rsp->rsp_status)) { + /* + * This is triggered from timer expiry case only for + * which timer stop is not required + */ + if (vdev_rsp->timer_status != QDF_STATUS_E_TIMEOUT) + qdf_timer_stop(&vdev_rsp->rsp_timer); + + vdev_rsp->timer_status = QDF_STATUS_SUCCESS; + if (clear_bit == DELETE_RESPONSE_BIT) + txops->psoc_vdev_rsp_timer_deinit(psoc, + vdev_rsp->vdev_id); + + /* + * Releasing reference taken at the time of + * starting response timer + */ + wlan_objmgr_psoc_release_ref(psoc, WLAN_PSOC_TARGET_IF_ID); + return QDF_STATUS_SUCCESS; + } + return QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS target_if_vdev_mgr_rsp_timer_start( + struct wlan_objmgr_psoc *psoc, + struct vdev_response_timer *vdev_rsp, + enum wlan_vdev_mgr_tgt_if_rsp_bit set_bit) +{ + uint8_t rsp_pos; + uint8_t vdev_id; + + /* it is expected to be only one command with FW at a time */ + for (rsp_pos = START_RESPONSE_BIT; rsp_pos <= RESPONSE_BIT_MAX; + rsp_pos++) { + if (rsp_pos != set_bit) { + if (qdf_atomic_test_bit(rsp_pos, + &vdev_rsp->rsp_status)) { + vdev_id = vdev_rsp->vdev_id; + mlme_err("PSOC_%d VDEV_%d: %s requested, waiting for %s response", + wlan_psoc_get_id(psoc), + vdev_id, + string_from_rsp_bit(set_bit), + string_from_rsp_bit(rsp_pos)); + target_if_vdev_mgr_assert_mgmt(psoc, + vdev_id); + target_if_vdev_mgr_rsp_timer_stop(psoc, + vdev_rsp, + rsp_pos); + } + } + } + + if (qdf_atomic_test_and_set_bit(set_bit, &vdev_rsp->rsp_status)) { + mlme_err("PSOC_%d VDEV_%d: %s requested, waiting for %s response", + wlan_psoc_get_id(psoc), + vdev_rsp->vdev_id, string_from_rsp_bit(set_bit), + string_from_rsp_bit(set_bit)); + target_if_vdev_mgr_assert_mgmt(psoc, vdev_rsp->vdev_id); + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, set_bit); + + qdf_atomic_set_bit(set_bit, &vdev_rsp->rsp_status); + } + + /* reference taken for timer start, will be released with stop */ + wlan_objmgr_psoc_get_ref(psoc, WLAN_PSOC_TARGET_IF_ID); + qdf_timer_start(&vdev_rsp->rsp_timer, vdev_rsp->expire_time); + + return QDF_STATUS_SUCCESS; +} + + +struct wmi_unified +*target_if_vdev_mgr_wmi_handle_get(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct wmi_unified *wmi_handle; + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mlme_err("PDEV is NULL"); + return NULL; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + mlme_err("wmi_handle is null"); + return NULL; + } + + return wmi_handle; +} + +static inline uint32_t +target_if_vdev_mlme_build_txbf_caps(struct wlan_objmgr_vdev *vdev) +{ + uint32_t txbf_cap; + uint32_t subfer; + uint32_t mubfer; + uint32_t subfee; + uint32_t mubfee; + uint32_t implicit_bf; + uint32_t sounding_dimension; + uint32_t bfee_sts_cap; + + txbf_cap = 0; + /* + * ensure to set these after mlme component is attached to objmgr + */ + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_SUBFEE, &subfee); + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_MUBFEE, &mubfee); + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_SUBFER, &subfer); + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_MUBFER, &mubfer); + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_BFEE_STS_CAP, + &bfee_sts_cap); + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_IMLICIT_BF, + &implicit_bf); + wlan_util_vdev_get_param(vdev, WLAN_MLME_CFG_SOUNDING_DIM, + &sounding_dimension); + + WMI_HOST_TXBF_CONF_SU_TX_BFEE_SET(txbf_cap, subfee); + WMI_HOST_TXBF_CONF_MU_TX_BFEE_SET(txbf_cap, mubfee); + WMI_HOST_TXBF_CONF_SU_TX_BFER_SET(txbf_cap, subfer); + WMI_HOST_TXBF_CONF_MU_TX_BFER_SET(txbf_cap, mubfer); + WMI_HOST_TXBF_CONF_STS_CAP_SET(txbf_cap, bfee_sts_cap); + WMI_HOST_TXBF_CONF_IMPLICIT_BF_SET(txbf_cap, implicit_bf); + WMI_HOST_TXBF_CONF_BF_SND_DIM_SET(txbf_cap, sounding_dimension); + + mlme_debug("VHT su bfee:%d mu bfee:%d su bfer:%d " + "mu bfer:%d impl bf:%d sounding dim:%d", + WMI_HOST_TXBF_CONF_SU_TX_BFEE_GET(txbf_cap), + WMI_HOST_TXBF_CONF_MU_TX_BFEE_GET(txbf_cap), + WMI_HOST_TXBF_CONF_SU_TX_BFER_GET(txbf_cap), + WMI_HOST_TXBF_CONF_MU_TX_BFER_GET(txbf_cap), + WMI_HOST_TXBF_CONF_IMPLICIT_BF_GET(txbf_cap), + WMI_HOST_TXBF_CONF_BF_SND_DIM_GET(txbf_cap)); + + return txbf_cap; +} + +static inline uint32_t +target_if_vdev_mlme_id_2_wmi(uint32_t cfg_id) +{ + int wmi_id; + + switch (cfg_id) { + case WLAN_MLME_CFG_DTIM_PERIOD: + wmi_id = wmi_vdev_param_dtim_period; + break; + case WLAN_MLME_CFG_SLOT_TIME: + wmi_id = wmi_vdev_param_slot_time; + break; + case WLAN_MLME_CFG_PROTECTION_MODE: + wmi_id = wmi_vdev_param_protection_mode; + break; + case WLAN_MLME_CFG_BEACON_INTERVAL: + wmi_id = wmi_vdev_param_beacon_interval; + break; + case WLAN_MLME_CFG_LDPC: + wmi_id = wmi_vdev_param_ldpc; + break; + case WLAN_MLME_CFG_NSS: + wmi_id = wmi_vdev_param_nss; + break; + case WLAN_MLME_CFG_SUBFER: + case WLAN_MLME_CFG_MUBFER: + case WLAN_MLME_CFG_SUBFEE: + case WLAN_MLME_CFG_MUBFEE: + case WLAN_MLME_CFG_IMLICIT_BF: + case WLAN_MLME_CFG_SOUNDING_DIM: + case WLAN_MLME_CFG_TXBF_CAPS: + wmi_id = wmi_vdev_param_txbf; + break; + case WLAN_MLME_CFG_HE_OPS: + wmi_id = wmi_vdev_param_set_heop; + break; + case WLAN_MLME_CFG_RTS_THRESHOLD: + wmi_id = wmi_vdev_param_rts_threshold; + break; + case WLAN_MLME_CFG_FRAG_THRESHOLD: + wmi_id = wmi_vdev_param_fragmentation_threshold; + break; + case WLAN_MLME_CFG_DROP_UNENCRY: + wmi_id = wmi_vdev_param_drop_unencry; + break; + case WLAN_MLME_CFG_TX_POWER: + wmi_id = wmi_vdev_param_tx_power; + break; + case WLAN_MLME_CFG_AMPDU: + wmi_id = wmi_vdev_param_ampdu_subframe_size_per_ac; + break; + case WLAN_MLME_CFG_AMSDU: + wmi_id = wmi_vdev_param_amsdu_subframe_size_per_ac; + break; + case WLAN_MLME_CFG_MIN_IDLE_INACTIVE_TIME: + wmi_id = + wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs; + break; + case WLAN_MLME_CFG_MAX_IDLE_INACTIVE_TIME: + wmi_id = + wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs; + break; + case WLAN_MLME_CFG_MAX_UNRESPONSIVE_INACTIVE_TIME: + wmi_id = + wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs; + break; + case WLAN_MLME_CFG_UAPSD: + wmi_id = WMI_HOST_STA_PS_PARAM_UAPSD; + break; + case WLAN_MLME_CFG_BCN_TX_RATE_CODE: + wmi_id = wmi_vdev_param_beacon_rate; + break; + case WLAN_MLME_CFG_TX_MGMT_RATE_CODE: + wmi_id = wmi_vdev_param_mgmt_rate; + break; + case WLAN_MLME_CFG_LISTEN_INTERVAL: + wmi_id = wmi_vdev_param_listen_interval; + break; + case WLAN_MLME_CFG_ENABLE_MULTI_GROUP_KEY: + wmi_id = wmi_vdev_param_enable_multi_group_key; + break; + case WLAN_MLME_CFG_MAX_GROUP_KEYS: + wmi_id = wmi_vdev_param_max_group_keys; + break; + case WLAN_MLME_CFG_TX_ENCAP_TYPE: + wmi_id = wmi_vdev_param_tx_encap_type; + break; + case WLAN_MLME_CFG_RX_DECAP_TYPE: + wmi_id = wmi_vdev_param_rx_decap_type; + break; + default: + wmi_id = cfg_id; + break; + } + + return wmi_id; +} + +static +QDF_STATUS target_if_vdev_set_tx_rx_decap_type(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t value) +{ + ol_txrx_soc_handle soc_txrx_handle; + struct wlan_objmgr_psoc *psoc; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + cdp_config_param_type val = {0}; + + psoc = wlan_vdev_get_psoc(vdev); + soc_txrx_handle = wlan_psoc_get_dp_handle(psoc); + + if (!soc_txrx_handle) + return QDF_STATUS_E_INVAL; + + if (param_id == WLAN_MLME_CFG_TX_ENCAP_TYPE) { + val.cdp_vdev_param_tx_encap = value; + return cdp_txrx_set_vdev_param(soc_txrx_handle, + vdev_id, CDP_TX_ENCAP_TYPE, + val); + } else if (param_id == WLAN_MLME_CFG_RX_DECAP_TYPE) { + val.cdp_vdev_param_rx_decap = value; + return cdp_txrx_set_vdev_param(soc_txrx_handle, + vdev_id, CDP_RX_DECAP_TYPE, + val); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_vdev_mgr_set_param_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_set_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + int param_id; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + param_id = target_if_vdev_mlme_id_2_wmi(param->param_id); + param->param_id = param_id; + if (param->param_id == wmi_vdev_param_txbf) + param->param_value = target_if_vdev_mlme_build_txbf_caps(vdev); + + status = wmi_unified_vdev_set_param_send(wmi_handle, param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_create_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_create_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + uint8_t vap_addr[QDF_MAC_ADDR_SIZE] = {0}; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_psoc *psoc; + uint8_t vdev_id; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mlme_err("Failed to get psoc for VDEV_%d", + wlan_vdev_get_id(vdev)); + return QDF_STATUS_E_INVAL; + } + + txops = wlan_mlme_get_lmac_tx_ops(psoc); + if (!txops || !txops->psoc_vdev_rsp_timer_init) { + mlme_err("Failed to get mlme txrx_ops for VDEV_%d PSOC_%d", + wlan_vdev_get_id(vdev), wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + WLAN_ADDR_COPY(vap_addr, wlan_vdev_mlme_get_macaddr(vdev)); + status = wmi_unified_vdev_create_send(wmi_handle, vap_addr, + param); + + vdev_id = wlan_vdev_get_id(vdev); + if (QDF_IS_STATUS_SUCCESS(status)) + status = txops->psoc_vdev_rsp_timer_init(psoc, vdev_id); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_start_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_start_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + uint8_t vdev_id; + struct vdev_response_timer *vdev_rsp; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + vdev_id = wlan_vdev_get_id(vdev); + psoc = wlan_vdev_get_psoc(vdev); + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VEV_%d: PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("VDEV_%d: PSOC_%d No vdev rsp timer", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp->expire_time = START_RESPONSE_TIMER; + target_if_wake_lock_timeout_acquire(psoc, START_WAKELOCK); + + if (param->is_restart) + target_if_vdev_mgr_rsp_timer_start(psoc, vdev_rsp, + RESTART_RESPONSE_BIT); + else + target_if_vdev_mgr_rsp_timer_start(psoc, vdev_rsp, + START_RESPONSE_BIT); + + status = wmi_unified_vdev_start_send(wmi_handle, param); + if (QDF_IS_STATUS_ERROR(status)) { + vdev_rsp->timer_status = QDF_STATUS_E_CANCELED; + vdev_rsp->expire_time = 0; + target_if_wake_lock_timeout_release(psoc, START_WAKELOCK); + if (param->is_restart) + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + RESTART_RESPONSE_BIT); + else + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + START_RESPONSE_BIT); + } + return status; +} + +static QDF_STATUS target_if_vdev_mgr_delete_response_send( + struct wlan_objmgr_vdev *vdev, + struct wlan_lmac_if_mlme_rx_ops *rx_ops) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev); + struct vdev_delete_response rsp = {0}; + + rsp.vdev_id = wlan_vdev_get_id(vdev); + status = rx_ops->vdev_mgr_delete_response(psoc, &rsp); + target_if_wake_lock_timeout_release(psoc, DELETE_WAKELOCK); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_delete_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_delete_params *param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + uint8_t vdev_id; + struct vdev_response_timer *vdev_rsp; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + vdev_id = wlan_vdev_get_id(vdev); + psoc = wlan_vdev_get_psoc(vdev); + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("VDEV_%d: PSOC_%d No vdev rsp timer", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp->expire_time = DELETE_RESPONSE_TIMER; + target_if_vdev_mgr_rsp_timer_start(psoc, vdev_rsp, + DELETE_RESPONSE_BIT); + target_if_wake_lock_timeout_acquire(psoc, DELETE_WAKELOCK); + + status = wmi_unified_vdev_delete_send(wmi_handle, param->vdev_id); + if (QDF_IS_STATUS_SUCCESS(status)) { + /* + * Simulate delete response if target doesn't support + */ + if (!wmi_service_enabled(wmi_handle, + wmi_service_sync_delete_cmds) || + wlan_psoc_nif_feat_cap_get(psoc, + WLAN_SOC_F_TESTMODE_ENABLE)) { + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + DELETE_RESPONSE_BIT); + target_if_vdev_mgr_delete_response_send(vdev, rx_ops); + } + } else { + vdev_rsp->expire_time = 0; + vdev_rsp->timer_status = QDF_STATUS_E_CANCELED; + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + DELETE_RESPONSE_BIT); + target_if_wake_lock_timeout_release(psoc, DELETE_WAKELOCK); + } + return status; +} + +static QDF_STATUS target_if_vdev_mgr_stop_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_stop_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct wlan_objmgr_psoc *psoc; + uint8_t vdev_id; + struct vdev_response_timer *vdev_rsp; + + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + vdev_id = wlan_vdev_get_id(vdev); + psoc = wlan_vdev_get_psoc(vdev); + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("VDEV_%d: PSOC_%d No vdev rsp timer", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp->expire_time = STOP_RESPONSE_TIMER; + target_if_vdev_mgr_rsp_timer_start(psoc, vdev_rsp, STOP_RESPONSE_BIT); + /* + * START wakelock is acquired before sending the start command and + * released after sending up command to fw. This is to prevent the + * system to go into suspend state during the connection. + * In auth/assoc failure scenario UP command is not sent + * so release the START wakelock here. + */ + target_if_wake_lock_timeout_release(psoc, START_WAKELOCK); + target_if_wake_lock_timeout_acquire(psoc, STOP_WAKELOCK); + + status = wmi_unified_vdev_stop_send(wmi_handle, param->vdev_id); + if (QDF_IS_STATUS_ERROR(status)) { + vdev_rsp->expire_time = 0; + vdev_rsp->timer_status = QDF_STATUS_E_CANCELED; + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + STOP_RESPONSE_BIT); + target_if_wake_lock_timeout_release(psoc, STOP_WAKELOCK); + } + return status; +} + +static QDF_STATUS target_if_vdev_mgr_down_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_down_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mlme_err("Failed to get PSOC Object"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_vdev_down_send(wmi_handle, param->vdev_id); + target_if_wake_lock_timeout_release(psoc, STOP_WAKELOCK); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_up_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_up_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; + struct wlan_objmgr_psoc *psoc; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mlme_err("Failed to get PSOC Object"); + return QDF_STATUS_E_INVAL; + } + ucfg_wlan_vdev_mgr_get_param_bssid(vdev, bssid); + + status = wmi_unified_vdev_up_send(wmi_handle, bssid, param); + target_if_wake_lock_timeout_release(psoc, START_WAKELOCK); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_beacon_tmpl_send( + struct wlan_objmgr_vdev *vdev, + struct beacon_tmpl_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_beacon_tmpl_send_cmd(wmi_handle, param); + return status; +} + +static QDF_STATUS target_if_vdev_mgr_set_nac_rssi_send( + struct wlan_objmgr_vdev *vdev, + struct vdev_scan_nac_rssi_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_vdev_set_nac_rssi_send(wmi_handle, param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_set_neighbour_rx_cmd_send( + struct wlan_objmgr_vdev *vdev, + struct set_neighbour_rx_params *param, + uint8_t *mac) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_vdev_set_neighbour_rx_cmd_send(wmi_handle, + mac, param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_sifs_trigger_send( + struct wlan_objmgr_vdev *vdev, + struct sifs_trigger_param *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_sifs_trigger_send(wmi_handle, param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_set_custom_aggr_size_cmd_send( + struct wlan_objmgr_vdev *vdev, + struct set_custom_aggr_size_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_vdev_set_custom_aggr_size_cmd_send(wmi_handle, + param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_config_ratemask_cmd_send( + struct wlan_objmgr_vdev *vdev, + struct config_ratemask_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_vdev_config_ratemask_cmd_send(wmi_handle, + param); + return status; +} + +static QDF_STATUS target_if_vdev_mgr_peer_flush_tids_send( + struct wlan_objmgr_vdev *vdev, + struct peer_flush_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_peer_flush_tids_send(wmi_handle, param->peer_mac, + param); + + return status; +} + +static int32_t target_if_vdev_mgr_multi_vdev_restart_get_ref( + struct wlan_objmgr_pdev *pdev, + struct multiple_vdev_restart_params *param, + struct wlan_objmgr_vdev **vdev_list, + bool *vdev_timer_started) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *tvdev; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + int32_t vdev_idx = -1; + int32_t last_vdev_idx = -1; + struct vdev_response_timer *vdev_rsp; + + psoc = wlan_pdev_get_psoc(pdev); + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d: No Rx Ops", vdev_idx); + return last_vdev_idx; + } + + for (vdev_idx = 0; vdev_idx < param->num_vdevs ; vdev_idx++) { + vdev_list[vdev_idx] = wlan_objmgr_get_vdev_by_id_from_pdev( + pdev, + param->vdev_ids[vdev_idx], + WLAN_VDEV_TARGET_IF_ID); + tvdev = vdev_list[vdev_idx]; + if (!tvdev) { + mlme_err("VDEV_%d is NULL", vdev_idx); + return last_vdev_idx; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info( + psoc, + wlan_vdev_get_id(tvdev)); + if (!vdev_rsp) { + mlme_err("VDEV_%d PSOC_%d No vdev rsp timer", + vdev_idx, wlan_psoc_get_id(psoc)); + return last_vdev_idx; + } + + last_vdev_idx = vdev_idx; + target_if_vdev_mgr_rsp_timer_start(psoc, vdev_rsp, + RESTART_RESPONSE_BIT); + vdev_timer_started[vdev_idx] = true; + } + + return last_vdev_idx; +} + +static void target_if_vdev_mgr_multi_vdev_restart_rel_ref( + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev **vdev_list, + bool *vdev_timer_started, + int32_t last_vdev_idx, + QDF_STATUS status) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *tvdev; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + uint32_t vdev_idx; + struct vdev_response_timer *vdev_rsp; + + psoc = wlan_pdev_get_psoc(pdev); + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d: No Rx Ops", last_vdev_idx); + return; + } + + for (vdev_idx = 0; vdev_idx <= last_vdev_idx; vdev_idx++) { + tvdev = vdev_list[vdev_idx]; + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, + vdev_idx); + if (!vdev_rsp) { + mlme_err("VDEV_%d: PSOC_%d No vdev rsp timer", + vdev_idx, wlan_psoc_get_id(psoc)); + return; + } + + if (QDF_IS_STATUS_ERROR(status)) { + if (vdev_timer_started[vdev_idx]) { + target_if_vdev_mgr_rsp_timer_stop( + psoc, vdev_rsp, + RESTART_RESPONSE_BIT); + vdev_timer_started[vdev_idx] = false; + } + } + wlan_objmgr_vdev_release_ref(tvdev, + WLAN_VDEV_TARGET_IF_ID); + } +} + +static QDF_STATUS target_if_vdev_mgr_multiple_vdev_restart_req_cmd( + struct wlan_objmgr_pdev *pdev, + struct multiple_vdev_restart_params *param) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev_list[WLAN_UMAC_PDEV_MAX_VDEVS] = {NULL}; + bool vdev_timer_started[WLAN_UMAC_PDEV_MAX_VDEVS] = {false}; + int32_t last_vdev_idx = -1; + + if (!pdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + mlme_err("PSOC is NULL"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + mlme_err("PDEV WMI Handle is NULL!"); + return QDF_STATUS_E_INVAL; + } + + last_vdev_idx = target_if_vdev_mgr_multi_vdev_restart_get_ref( + pdev, param, + vdev_list, + vdev_timer_started); + if (last_vdev_idx < 0 || (last_vdev_idx != (param->num_vdevs - 1))) { + target_if_vdev_mgr_multi_vdev_restart_rel_ref( + pdev, vdev_list, + vdev_timer_started, + last_vdev_idx, + QDF_STATUS_E_FAILURE); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_send_multiple_vdev_restart_req_cmd(wmi_handle, + param); + + target_if_vdev_mgr_multi_vdev_restart_rel_ref( + pdev, vdev_list, + vdev_timer_started, + last_vdev_idx, status); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_beacon_send( + struct wlan_objmgr_vdev *vdev, + struct beacon_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_beacon_send_cmd(wmi_handle, param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_sta_ps_param_send( + struct wlan_objmgr_vdev *vdev, + struct sta_ps_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + int param_id; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + param_id = target_if_vdev_mlme_id_2_wmi(param->param_id); + param->param_id = param_id; + + status = wmi_unified_sta_ps_cmd_send(wmi_handle, param); + + return status; +} + +static QDF_STATUS target_if_vdev_mgr_peer_delete_all_send( + struct wlan_objmgr_vdev *vdev, + struct peer_delete_all_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_mlme_rx_ops *rx_ops; + struct wlan_objmgr_psoc *psoc; + uint8_t vdev_id; + struct vdev_response_timer *vdev_rsp; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + vdev_id = wlan_vdev_get_id(vdev); + psoc = wlan_vdev_get_psoc(vdev); + rx_ops = target_if_vdev_mgr_get_rx_ops(psoc); + + if (!rx_ops || !rx_ops->psoc_get_vdev_response_timer_info) { + mlme_err("VDEV_%d PSOC_%d No Rx Ops", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp = rx_ops->psoc_get_vdev_response_timer_info(psoc, vdev_id); + if (!vdev_rsp) { + mlme_err("VDEV_%d: PSOC_%d No vdev rsp timer", vdev_id, + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_INVAL; + } + + vdev_rsp->expire_time = PEER_DELETE_ALL_RESPONSE_TIMER; + target_if_vdev_mgr_rsp_timer_start(psoc, vdev_rsp, + PEER_DELETE_ALL_RESPONSE_BIT); + + status = wmi_unified_peer_delete_all_send(wmi_handle, param); + if (QDF_IS_STATUS_ERROR(status)) { + vdev_rsp->expire_time = 0; + vdev_rsp->timer_status = QDF_STATUS_E_CANCELED; + target_if_vdev_mgr_rsp_timer_stop(psoc, vdev_rsp, + PEER_DELETE_ALL_RESPONSE_BIT); + } + return status; +} + +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +static QDF_STATUS target_if_vdev_mgr_fils_enable_send( + struct wlan_objmgr_vdev *vdev, + struct config_fils_params *param) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + if (!vdev || !param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_if_vdev_mgr_wmi_handle_get(vdev); + if (!wmi_handle) { + mlme_err("Failed to get WMI handle!"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_vdev_fils_enable_cmd_send(wmi_handle, param); + + return status; +} + +static void target_if_vdev_register_tx_fils( + struct wlan_lmac_if_mlme_tx_ops *mlme_tx_ops) +{ + mlme_tx_ops->vdev_fils_enable_send = + target_if_vdev_mgr_fils_enable_send; +} +#else +static void target_if_vdev_register_tx_fils( + struct wlan_lmac_if_mlme_tx_ops *mlme_tx_ops) +{ +} +#endif + +QDF_STATUS +target_if_vdev_mgr_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_mlme_tx_ops *mlme_tx_ops; + + if (!tx_ops) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + mlme_tx_ops = &tx_ops->mops; + if (!mlme_tx_ops) { + mlme_err("No Tx Ops"); + return QDF_STATUS_E_FAILURE; + } + + mlme_tx_ops->vdev_mlme_attach = + target_if_vdev_mgr_register_event_handler; + mlme_tx_ops->vdev_mlme_detach = + target_if_vdev_mgr_unregister_event_handler; + mlme_tx_ops->vdev_create_send = target_if_vdev_mgr_create_send; + mlme_tx_ops->vdev_start_send = target_if_vdev_mgr_start_send; + mlme_tx_ops->vdev_up_send = target_if_vdev_mgr_up_send; + mlme_tx_ops->vdev_delete_send = target_if_vdev_mgr_delete_send; + mlme_tx_ops->vdev_stop_send = target_if_vdev_mgr_stop_send; + mlme_tx_ops->vdev_down_send = target_if_vdev_mgr_down_send; + mlme_tx_ops->vdev_set_nac_rssi_send = + target_if_vdev_mgr_set_nac_rssi_send; + mlme_tx_ops->vdev_set_neighbour_rx_cmd_send = + target_if_vdev_mgr_set_neighbour_rx_cmd_send; + mlme_tx_ops->vdev_sifs_trigger_send = + target_if_vdev_mgr_sifs_trigger_send; + mlme_tx_ops->vdev_set_custom_aggr_size_cmd_send = + target_if_vdev_mgr_set_custom_aggr_size_cmd_send; + mlme_tx_ops->vdev_config_ratemask_cmd_send = + target_if_vdev_mgr_config_ratemask_cmd_send; + mlme_tx_ops->peer_flush_tids_send = + target_if_vdev_mgr_peer_flush_tids_send; + mlme_tx_ops->multiple_vdev_restart_req_cmd = + target_if_vdev_mgr_multiple_vdev_restart_req_cmd; + mlme_tx_ops->beacon_cmd_send = target_if_vdev_mgr_beacon_send; + mlme_tx_ops->beacon_tmpl_send = target_if_vdev_mgr_beacon_tmpl_send; + mlme_tx_ops->vdev_set_param_send = + target_if_vdev_mgr_set_param_send; + mlme_tx_ops->vdev_set_tx_rx_decap_type = + target_if_vdev_set_tx_rx_decap_type; + mlme_tx_ops->vdev_sta_ps_param_send = + target_if_vdev_mgr_sta_ps_param_send; + mlme_tx_ops->psoc_vdev_rsp_timer_mod = + target_if_vdev_mgr_rsp_timer_mod; + mlme_tx_ops->peer_delete_all_send = + target_if_vdev_mgr_peer_delete_all_send; + target_if_vdev_register_tx_fils(mlme_tx_ops); + + mlme_tx_ops->psoc_vdev_rsp_timer_init = + target_if_psoc_vdev_rsp_timer_init; + mlme_tx_ops->psoc_vdev_rsp_timer_deinit = + target_if_psoc_vdev_rsp_timer_deinit; + mlme_tx_ops->psoc_vdev_rsp_timer_inuse = + target_if_psoc_vdev_rsp_timer_inuse; + mlme_tx_ops->psoc_wake_lock_init = + target_if_wake_lock_init; + mlme_tx_ops->psoc_wake_lock_deinit = + target_if_wake_lock_deinit; + mlme_tx_ops->vdev_mgr_rsp_timer_stop = + target_if_vdev_mgr_rsp_timer_stop; + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg.h b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..a1da2388bb905f18ab75f96957d0aabf02b3d7dc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg.h + * This file contains regulatory target interfaces. + */ +#ifndef __TARGET_IF_REG_H__ +#define __TARGET_IF_REG_H__ + +/** + * tgt_if_regulatory_modify_freq_range() - Modify low and high freq ranges based + * on wireless mode. + * @psoc: psoc pointer + * + * Return: Success or Failure + */ +QDF_STATUS tgt_if_regulatory_modify_freq_range(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_register_regulatory_tx_ops() - register regulatory tx ops + * + * @tx_ops: tx_ops pointer + * Return: Success or Failure + */ +QDF_STATUS target_if_register_regulatory_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_reg_set_offloaded_info() - populate regulatory offloaded info + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS target_if_reg_set_offloaded_info(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_reg_set_6ghz_info() - populate 6ghz enablement info + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS target_if_reg_set_6ghz_info(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_regulatory_get_rx_ops() - Get regdb rx ops + * @psoc: pointer to psoc object + * + * Return: Reg rx_ops + */ +struct wlan_lmac_if_reg_rx_ops * +target_if_regulatory_get_rx_ops(struct wlan_objmgr_psoc *psoc); +#endif /* __TARGET_IF_REG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg_11d.h b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg_11d.h new file mode 100644 index 0000000000000000000000000000000000000000..c69962ffcd2e6a310eb602cabfebe597ab62bf13 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg_11d.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg_11d.h + * This file contains regulatory target 11d scan interface. + */ + +#ifndef __TARGET_IF_REG_11D_H__ +#define __TARGET_IF_REG_11D_H__ + +#include "qdf_types.h" +#include "target_if.h" +#include +#include "target_if_reg.h" +#include "wmi_unified_api.h" +#include "wmi_unified_reg_api.h" + +#ifdef HOST_11D_SCAN +/** + * tgt_if_regulatory_is_11d_offloaded() - Check if reg 11d is offloaded. + * @psoc: psoc pointer + * + * Return: true if 11d is offloaded, else false. + */ +bool tgt_if_regulatory_is_11d_offloaded(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_if_regulatory_register_11d_new_cc_handler() - Register for 11d country + * code event ID. + * @psoc: psoc pointer + * @args: Pointer to args. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_if_regulatory_register_11d_new_cc_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * tgt_if_regulatory_unregister_11d_new_cc_handler() - Unregister 11d country + * code event ID. + * @psoc: psoc pointer + * @args: Pointer to args. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_if_regulatory_unregister_11d_new_cc_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * tgt_if_regulatory_start_11d_scan() - Start 11d scan. + * @psoc: psoc pointer + * @reg_start_11d_scan_req: Pointer to 11d scan start request. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_if_regulatory_start_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_start_11d_scan_req *reg_start_11d_scan_req); + +/** + * tgt_if_regulatory_stop_11d_scan() - Stop 11d scan. + * @psoc: psoc pointer + * @reg_stop_11d_scan_req: Pointer to 11d scan stop request. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_if_regulatory_stop_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_stop_11d_scan_req *reg_stop_11d_scan_req); + +#else + +static inline bool +tgt_if_regulatory_is_11d_offloaded(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static inline QDF_STATUS +tgt_if_regulatory_register_11d_new_cc_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +tgt_if_regulatory_unregister_11d_new_cc_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_if_regulatory_start_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_start_11d_scan_req *reg_start_11d_scan_req) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_if_regulatory_stop_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_stop_11d_scan_req *reg_stop_11d_scan_req) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg_lte.h b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg_lte.h new file mode 100644 index 0000000000000000000000000000000000000000..3683b8e0c8efe0ac9df778dbcd99623128ed0512 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg_lte.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg_lte.h + * This file contains regulatory target LTE interface + */ + +#ifndef __TARGET_IF_REG_LTE_H__ +#define __TARGET_IF_REG_LTE_H__ + +#include "qdf_types.h" +#include "target_if.h" +#include +#include "target_if_reg.h" +#include "wmi_unified_api.h" +#include "wmi_unified_reg_api.h" + +#ifdef LTE_COEX +/** + * tgt_if_regulatory_register_ch_avoid_event_handler() - Register avoid channel + * list event handler + * @psoc: Pointer to psoc + * @arg: Pointer to argumemt list + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_if_regulatory_register_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * tgt_if_regulatory_unregister_ch_avoid_event_handler() - Unregister avoid + * channel list event handler + * @psoc: Pointer to psoc + * @arg: Pointer to argumemt list + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_if_regulatory_unregister_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +#else + +static inline QDF_STATUS tgt_if_regulatory_register_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_if_regulatory_unregister_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* __TARGET_IF_REG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg.c b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg.c new file mode 100644 index 0000000000000000000000000000000000000000..aa52d61bd60dda72eedd6fcfe3c65ec5d2a5928e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg.c @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg.c + * This file contains regulatory target interfaces. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * get_chan_list_cc_event_id() - Get chan_list_cc event i + * + * Return: Event id + */ +static inline uint32_t get_chan_list_cc_event_id(void) +{ + return wmi_reg_chan_list_cc_event_id; +} + +/** + * tgt_if_regulatory_is_regdb_offloaded() - Check if regdb is offloaded + * @psoc: Pointer to psoc + * + * Return: true if regdb if offloaded, else false + */ +static bool tgt_if_regulatory_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + + if (!wmi_handle) + return false; + + if (reg_rx_ops->reg_ignore_fw_reg_offload_ind && + reg_rx_ops->reg_ignore_fw_reg_offload_ind(psoc)) { + target_if_debug("User disabled regulatory offload from ini"); + return 0; + } + + return wmi_service_enabled(wmi_handle, wmi_service_regulatory_db); +} + +/** + * tgt_if_regulatory_is_6ghz_supported() - Check if 6ghz is supported + * @psoc: Pointer to psoc + * + * Return: true if regdb if offloaded, else false + */ +static bool tgt_if_regulatory_is_6ghz_supported(struct wlan_objmgr_psoc *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return false; + + return wmi_service_enabled(wmi_handle, wmi_service_6ghz_support); +} + +/** + * tgt_if_regulatory_is_there_serv_ready_extn() - Check for service ready + * extension + * @psoc: Pointer to psoc object + * + * Return: true if service ready extension is present, else false. + */ +static bool tgt_if_regulatory_is_there_serv_ready_extn( + struct wlan_objmgr_psoc *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return false; + + return wmi_service_enabled(wmi_handle, wmi_service_ext_msg); +} + +/** + * target_if_regulatory_get_rx_ops() - Get regdb rx ops + * @psoc: Pointer to psoc object + * + * Return: Reg rx_ops + */ +struct wlan_lmac_if_reg_rx_ops * +target_if_regulatory_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.reg_rx_ops; +} + +QDF_STATUS target_if_reg_set_offloaded_info(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + if (!reg_rx_ops) { + target_if_err("reg_rx_ops is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_rx_ops->reg_set_regdb_offloaded) + reg_rx_ops->reg_set_regdb_offloaded( + psoc, + tgt_if_regulatory_is_regdb_offloaded(psoc)); + + if (reg_rx_ops->reg_set_11d_offloaded) + reg_rx_ops->reg_set_11d_offloaded( + psoc, tgt_if_regulatory_is_11d_offloaded(psoc)); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_reg_set_6ghz_info(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + if (!reg_rx_ops) { + target_if_err("reg_rx_ops is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_rx_ops->reg_set_6ghz_supported) + reg_rx_ops->reg_set_6ghz_supported( + psoc, + tgt_if_regulatory_is_6ghz_supported(psoc)); + + return QDF_STATUS_SUCCESS; +} + +/** + * tgt_reg_chan_list_update_handler() - Channel list update handler + * @handle: scn handle + * @event_buf: pointer to event buffer + * @len: buffer length + * + * Return: 0 on success + */ +static int tgt_reg_chan_list_update_handler(ol_scn_t handle, uint8_t *event_buf, + uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + struct cur_regulatory_info *reg_info; + QDF_STATUS status; + struct wmi_unified *wmi_handle; + int ret_val = 0; + + TARGET_IF_ENTER(); + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc ptr is NULL"); + return -EINVAL; + } + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + if (!reg_rx_ops->master_list_handler) { + target_if_err("master_list_handler is NULL"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("invalid wmi handle"); + return -EINVAL; + } + + reg_info = qdf_mem_malloc(sizeof(*reg_info)); + if (!reg_info) + return -ENOMEM; + + if (wmi_extract_reg_chan_list_update_event(wmi_handle, + event_buf, reg_info, len) + != QDF_STATUS_SUCCESS) { + target_if_err("Extraction of channel list event failed"); + ret_val = -EFAULT; + goto clean; + } + + if (reg_info->phy_id >= PSOC_MAX_PHY_REG_CAP) { + target_if_err_rl("phy_id %d is out of bounds", + reg_info->phy_id); + ret_val = -EFAULT; + goto clean; + } + + reg_info->psoc = psoc; + + status = reg_rx_ops->master_list_handler(reg_info); + if (status != QDF_STATUS_SUCCESS) { + target_if_err("Failed to process master channel list handler"); + ret_val = -EFAULT; + } + +clean: + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + + TARGET_IF_EXIT(); + + return ret_val; +} + +/** + * tgt_if_regulatory_register_master_list_handler() - Register master channel + * list + * @psoc: Pointer to psoc + * @arg: Pointer to argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS tgt_if_regulatory_register_master_list_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_register_event_handler( + wmi_handle, wmi_reg_chan_list_cc_event_id, + tgt_reg_chan_list_update_handler, WMI_RX_WORK_CTX); +} + +/** + * tgt_if_regulatory_unregister_master_list_handler() - Unregister master + * channel list + * @psoc: Pointer to psoc + * @arg: Pointer to argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS tgt_if_regulatory_unregister_master_list_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_unregister_event_handler( + wmi_handle, wmi_reg_chan_list_cc_event_id); +} + +/** + * tgt_if_regulatory_set_country_code() - Set country code + * @psoc: Pointer to psoc + * @arg: Pointer to argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS tgt_if_regulatory_set_country_code( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_set_country_cmd_send(wmi_handle, arg); +} + +/** + * tgt_if_regulatory_set_user_country_code() - Set user country code + * @psoc: Pointer to psoc + * @pdev_id: Pdev id + * @rd: Pointer to regdomain structure + * + * Return: QDF_STATUS + */ +static QDF_STATUS tgt_if_regulatory_set_user_country_code( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, struct cc_regdmn_s *rd) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + if (wmi_unified_set_user_country_code_cmd_send( + wmi_handle, pdev_id, rd) != QDF_STATUS_SUCCESS + ) { + target_if_err("Set user country code failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_if_regulatory_modify_freq_range(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap; + + reg_cap = ucfg_reg_get_hal_reg_cap(psoc); + if (!reg_cap) { + target_if_err("reg cap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!(reg_cap->wireless_modes & WMI_HOST_REGDMN_MODE_11A)) { + reg_cap->low_5ghz_chan = 0; + reg_cap->high_5ghz_chan = 0; + } + + if (!(reg_cap->wireless_modes & + (WMI_HOST_REGDMN_MODE_11B | WMI_HOST_REGDMN_MODE_PUREG))) { + reg_cap->low_2ghz_chan = 0; + reg_cap->high_2ghz_chan = 0; + } + + target_if_debug("phy_id = %d - low_2ghz_chan = %d high_2ghz_chan = %d low_5ghz_chan = %d high_5ghz_chan = %d", + reg_cap->phy_id, + reg_cap->low_2ghz_chan, + reg_cap->high_2ghz_chan, + reg_cap->low_5ghz_chan, + reg_cap->high_5ghz_chan); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_REG_CLIENT +/** + * tgt_if_regulatory_send_ctl_info() - Send CTL info to firmware + * @psoc: Pointer to psoc + * @params: Pointer to reg control params + * + * Return: QDF_STATUS + */ +static QDF_STATUS +tgt_if_regulatory_send_ctl_info(struct wlan_objmgr_psoc *psoc, + struct reg_ctl_params *params) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_send_regdomain_info_to_fw_cmd(wmi_handle, + params->regd, + params->regd_2g, + params->regd_5g, + params->ctl_2g, + params->ctl_5g); +} +#else +static QDF_STATUS +tgt_if_regulatory_send_ctl_info(struct wlan_objmgr_psoc *psoc, + struct reg_ctl_params *params) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS target_if_register_regulatory_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_reg_tx_ops *reg_ops = &tx_ops->reg_ops; + + reg_ops->register_master_handler = + tgt_if_regulatory_register_master_list_handler; + + reg_ops->unregister_master_handler = + tgt_if_regulatory_unregister_master_list_handler; + + reg_ops->set_country_code = tgt_if_regulatory_set_country_code; + + reg_ops->fill_umac_legacy_chanlist = NULL; + + reg_ops->set_country_failed = NULL; + + reg_ops->register_11d_new_cc_handler = + tgt_if_regulatory_register_11d_new_cc_handler; + + reg_ops->unregister_11d_new_cc_handler = + tgt_if_regulatory_unregister_11d_new_cc_handler; + + reg_ops->start_11d_scan = tgt_if_regulatory_start_11d_scan; + + reg_ops->stop_11d_scan = tgt_if_regulatory_stop_11d_scan; + + reg_ops->is_there_serv_ready_extn = + tgt_if_regulatory_is_there_serv_ready_extn; + + reg_ops->set_user_country_code = + tgt_if_regulatory_set_user_country_code; + + reg_ops->register_ch_avoid_event_handler = + tgt_if_regulatory_register_ch_avoid_event_handler; + + reg_ops->unregister_ch_avoid_event_handler = + tgt_if_regulatory_unregister_ch_avoid_event_handler; + + reg_ops->send_ctl_info = tgt_if_regulatory_send_ctl_info; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg_11d.c b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg_11d.c new file mode 100644 index 0000000000000000000000000000000000000000..c857868d380b38572ea4ddbbb820fa27d52b95e1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg_11d.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg_11d.c + * This file contains regulatory target interface + */ + +#include "target_if_reg_11d.h" + +#ifdef HOST_11D_SCAN +bool tgt_if_regulatory_is_11d_offloaded(struct wlan_objmgr_psoc *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + + if (!wmi_handle) + return false; + + if (reg_rx_ops->reg_ignore_fw_reg_offload_ind && + reg_rx_ops->reg_ignore_fw_reg_offload_ind(psoc)) { + target_if_debug("Ignore fw reg 11d offload indication"); + return 0; + } + + return wmi_service_enabled(wmi_handle, wmi_service_11d_offload); +} + +/** + * tgt_reg_11d_new_cc_handler() - 11d country code event handler + * @handle: scn handle + * @event_buf: event buffer + * @len: legth of @event_buf + * + * Return: 0 on success + */ +static int tgt_reg_11d_new_cc_handler(ol_scn_t handle, uint8_t *event_buf, + uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + struct reg_11d_new_country reg_11d_new_cc; + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + TARGET_IF_ENTER(); + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc ptr is NULL"); + return -EINVAL; + } + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + + if (!reg_rx_ops->reg_11d_new_cc_handler) { + target_if_err("reg_11d_new_cc_handler is NULL"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + if (wmi_extract_reg_11d_new_cc_event(wmi_handle, event_buf, + ®_11d_new_cc, len) + != QDF_STATUS_SUCCESS) { + target_if_err("Extraction of new country event failed"); + return -EFAULT; + } + + status = reg_rx_ops->reg_11d_new_cc_handler(psoc, ®_11d_new_cc); + if (status != QDF_STATUS_SUCCESS) { + target_if_err("Failed to process new country code event"); + return -EFAULT; + } + + target_if_debug("processed 11d new country code event"); + + return 0; +} + +QDF_STATUS tgt_if_regulatory_register_11d_new_cc_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_register_event(wmi_handle, + wmi_11d_new_country_event_id, + tgt_reg_11d_new_cc_handler); +} + +QDF_STATUS tgt_if_regulatory_unregister_11d_new_cc_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_unregister_event(wmi_handle, + wmi_11d_new_country_event_id); +} + +QDF_STATUS tgt_if_regulatory_start_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_start_11d_scan_req *reg_start_11d_scan_req) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_send_start_11d_scan_cmd(wmi_handle, + reg_start_11d_scan_req); +} + +QDF_STATUS tgt_if_regulatory_stop_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_stop_11d_scan_req *reg_stop_11d_scan_req) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_send_stop_11d_scan_cmd(wmi_handle, + reg_stop_11d_scan_req); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg_lte.c b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg_lte.c new file mode 100644 index 0000000000000000000000000000000000000000..9f85e8a642b192fd6bfa68c5752baa2a159cd174 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg_lte.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg_lte.c + * This file contains regulatory target LTE interface + */ + +#ifdef LTE_COEX + +#include "target_if_reg_lte.h" + +/** + * tgt_reg_ch_avoid_event_handler() - Avoid channel list event handler. + * @handle: Pointer to scn handler. + * @event_buf: Pointer to event buffer. + * @len: Buffer length. + * + * Return: Error code. + */ +static int tgt_reg_ch_avoid_event_handler(ol_scn_t handle, uint8_t *event_buf, + uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + struct ch_avoid_ind_type ch_avoid_event; + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + TARGET_IF_ENTER(); + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc ptr is NULL"); + return -EINVAL; + } + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + + if (!reg_rx_ops->reg_ch_avoid_event_handler) { + target_if_err("reg_ch_avoid_event_handler is NULL"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + + if (wmi_extract_reg_ch_avoid_event( + wmi_handle, event_buf, &ch_avoid_event, len) + != QDF_STATUS_SUCCESS) { + target_if_err("Extraction of CH avoid event failed"); + return -EFAULT; + } + + status = reg_rx_ops->reg_ch_avoid_event_handler(psoc, &ch_avoid_event); + if (status != QDF_STATUS_SUCCESS) { + target_if_err("Failed to process CH avoid event"); + return -EFAULT; + } + + TARGET_IF_EXIT(); + + return 0; +} + +QDF_STATUS tgt_if_regulatory_register_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_register_event(wmi_handle, + wmi_wlan_freq_avoid_event_id, + tgt_reg_ch_avoid_event_handler); +} + +QDF_STATUS tgt_if_regulatory_unregister_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_unregister_event(wmi_handle, + wmi_wlan_freq_avoid_event_id); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/scan/inc/target_if_scan.h b/drivers/staging/qca-wifi-host-cmn/target_if/scan/inc/target_if_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..5e1830c45a6a1401d9a609704305111b04a908b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/scan/inc/target_if_scan.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for scan + */ +#ifndef __TARGET_SCAN_IF_H__ +#define __TARGET_SCAN_IF_H__ + +#include + +struct scan_req_params; +struct scan_cancel_param; +struct wlan_objmgr_psoc; + +#define WLAN_MAX_ACTIVE_SCANS_ALLOWED 8 + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * target_if_nlo_match_event_handler() - nlo match event handler + * @scn: scn handle + * @event: event data + * @len: data length + * + * Record NLO match event comes from FW. It's a indication that + * one of the profile is matched. + * + * Return: 0 for success or error code. + */ +int target_if_nlo_match_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t len); + +/** + * target_if_nlo_complete_handler() - nlo complete event handler + * @scn: scn handle + * @event: event data + * @len: data length + * + * Record NLO match event comes from FW. It's a indication that + * one of the profile is matched. + * + * Return: 0 for success or error code. + */ +int target_if_nlo_complete_handler(ol_scn_t scn, uint8_t *data, + uint32_t len); +#endif + +/** + * target_if_scan_register_event_handler() - lmac handler API + * to register for scan events + * @psoc: psoc object + * @arg: argument to lmac + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_register_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * target_if_scan_unregister_event_handler() - lmac handler API + * to unregister for scan events + * @psoc: psoc object + * @arg: argument to lmac + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_unregister_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * target_if_scan_start() - lmac handler API to start scan + * @pdev: pdev object + * @req: scan_req_params object + * + * Return: QDF_STATUS + */ + +QDF_STATUS +target_if_scan_start(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req); + +/** + * target_if_scan_cancel() - lmac handler API to cancel a previous active scan + * @pdev: pdev object + * @req: scan_cancel_param object + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_cancel(struct wlan_objmgr_pdev *pdev, + struct scan_cancel_param *req); + +/** + * target_if_scan_tx_ops_register() - lmac handler to register scan tx_ops + * callback functions + * @tx_ops: wlan_lmac_if_tx_ops object + * + * Return: QDF_STATUS + */ + +QDF_STATUS +target_if_scan_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_scan_set_max_active_scans() - lmac handler to set max active scans + * @psoc: psoc object + * @max_active_scans: maximum active scans allowed on underlying psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/scan/src/target_if_scan.c b/drivers/staging/qca-wifi-host-cmn/target_if/scan/src/target_if_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..a3b825bfac3e2e1d81218a26d51ff167be98f616 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/scan/src/target_if_scan.c @@ -0,0 +1,456 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for scan + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static inline struct wlan_lmac_if_scan_rx_ops * +target_if_scan_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.scan; +} + +static int +target_if_scan_event_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct scan_event_info *event_info; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK\n", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc\n"); + return -EINVAL; + } + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) { + target_if_err("wmi_handle is NULL"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + + if (!event_info) + return -ENOMEM; + + if (wmi_extract_vdev_scan_ev_param(wmi_handle, data, + &(event_info->event))) { + target_if_err("Failed to extract wmi scan event"); + qdf_mem_free(event_info); + return -EINVAL; + } + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_ev_handler) { + status = scan_rx_ops->scan_ev_handler(psoc, event_info); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(event_info); + return -EINVAL; + } + } else { + qdf_mem_free(event_info); + return -EINVAL; + } + + return 0; +} + +#ifdef FEATURE_WLAN_SCAN_PNO + +int target_if_nlo_complete_handler(ol_scn_t scn, uint8_t *data, + uint32_t len) +{ + struct scan_event_info *event_info; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is NULL"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + if (!event_info) + return -ENOMEM; + + if (wmi_extract_nlo_complete_ev_param(wmi_handle, data, + &event_info->event)) { + target_if_err("Failed to extract WMI PNO complete event"); + qdf_mem_free(event_info); + return -EINVAL; + } + + target_if_debug("PNO complete event received for vdev %d", + event_info->event.vdev_id); + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_ev_handler) { + status = scan_rx_ops->scan_ev_handler(psoc, event_info); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(event_info); + return -EINVAL; + } + } else { + qdf_mem_free(event_info); + return -EINVAL; + } + + return 0; +} + +int target_if_nlo_match_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t len) +{ + struct scan_event_info *event_info; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is NULL"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + if (!event_info) + return -ENOMEM; + + if (wmi_extract_nlo_match_ev_param(wmi_handle, data, + &event_info->event)) { + target_if_err("Failed to extract WMI PNO match event"); + qdf_mem_free(event_info); + return -EINVAL; + } + + target_if_debug("PNO match event received for vdev %d", + event_info->event.vdev_id); + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_ev_handler) { + status = scan_rx_ops->scan_ev_handler(psoc, event_info); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(event_info); + return -EINVAL; + } + } else { + qdf_mem_free(event_info); + return -EINVAL; + } + + return 0; +} + +static QDF_STATUS +target_if_scan_register_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_register_event( + wmi_handle, + wmi_nlo_match_event_id, + target_if_nlo_match_event_handler); + if (status) { + target_if_err("Failed to register nlo match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_register_event( + wmi_handle, + wmi_nlo_scan_complete_event_id, + target_if_nlo_complete_handler); + if (status) { + target_if_err("Failed to register nlo scan comp event cb"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_scan_unregister_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_unregister_event( + wmi_handle, + wmi_nlo_match_event_id); + if (status) { + target_if_err("Failed to unregister nlo match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_unregister_event( + wmi_handle, + wmi_nlo_scan_complete_event_id); + if (status) { + target_if_err("Failed to unregister nlo scan comp event cb"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_pno_start(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *req) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_pno_start_cmd(wmi_handle, req); + if (status == QDF_STATUS_SUCCESS) { + if (req->mawc_params.enable) + status = wmi_unified_nlo_mawc_cmd(wmi_handle, + &req->mawc_params); + } + + return status; +} + +static QDF_STATUS +target_if_pno_stop(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + return wmi_unified_pno_stop_cmd(wmi_handle, vdev_id); +} + +#else + +static inline QDF_STATUS +target_if_scan_register_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_if_scan_unregister_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_if_pno_start(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *req) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_if_pno_stop(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#endif + + +QDF_STATUS +target_if_scan_register_event_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_register_event( + wmi_handle, + wmi_scan_event_id, + target_if_scan_event_handler); + if (status) { + target_if_err("Failed to register Scan match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = target_if_scan_register_pno_event_handler(psoc, arg); + + return status; +} + +QDF_STATUS +target_if_scan_unregister_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_unregister_event( + wmi_handle, + wmi_scan_event_id); + if (status) { + target_if_err("Failed to unregister Scan match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = target_if_scan_unregister_pno_event_handler(psoc, arg); + + return status; +} + +QDF_STATUS +target_if_scan_start(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req) +{ + wmi_unified_t pdev_wmi_handle; + + pdev_wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!pdev_wmi_handle) { + target_if_err("Invalid PDEV WMI handle"); + return QDF_STATUS_E_FAILURE; + } + return wmi_unified_scan_start_cmd_send(pdev_wmi_handle, &req->scan_req); +} + +QDF_STATUS +target_if_scan_cancel(struct wlan_objmgr_pdev *pdev, + struct scan_cancel_param *req) +{ + wmi_unified_t pdev_wmi_handle; + + pdev_wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!pdev_wmi_handle) { + target_if_err("Invalid PDEV WMI handle"); + return QDF_STATUS_E_NULL_VALUE; + } + return wmi_unified_scan_stop_cmd_send(pdev_wmi_handle, req); +} + +QDF_STATUS +target_if_scan_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_scan_tx_ops *scan; + + scan = &tx_ops->scan; + if (!scan) { + target_if_err("Scan txops NULL"); + return QDF_STATUS_E_FAILURE; + } + + scan->scan_start = target_if_scan_start; + scan->scan_cancel = target_if_scan_cancel; + scan->pno_start = target_if_pno_start; + scan->pno_stop = target_if_pno_stop; + scan->scan_reg_ev_handler = target_if_scan_register_event_handler; + scan->scan_unreg_ev_handler = target_if_scan_unregister_event_handler; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans) +{ + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + QDF_STATUS status; + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_set_max_active_scans) { + status = scan_rx_ops->scan_set_max_active_scans(psoc, + max_active_scans); + } else { + target_if_err("scan_set_max_active_scans uninitialized"); + status = QDF_STATUS_E_FAULT; + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/son/inc/target_if_son.h b/drivers/staging/qca-wifi-host-cmn/target_if/son/inc/target_if_son.h new file mode 100644 index 0000000000000000000000000000000000000000..f63807da05fef93a69c8efd25cbf6b6d16147450 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/son/inc/target_if_son.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +void target_if_son_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +bool son_ol_is_peer_inact(struct wlan_objmgr_peer *); + +u_int32_t son_ol_get_peer_rate(struct wlan_objmgr_peer *peer, u_int8_t type); + +int8_t son_ol_sanitize_util_invtl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample); + +bool son_ol_enable(struct wlan_objmgr_pdev *pdev, bool enable); + +/* Function pointer to set overload status */ + +void son_ol_set_overload(struct wlan_objmgr_pdev *pdev, bool overload); + +/* Function pointer to set band steering parameters */ + +bool son_ol_set_params(struct wlan_objmgr_pdev *pdev, + u_int32_t inactivity_check_period, + u_int32_t inactivity_threshold_normal, + u_int32_t inactivity_threshold_overload); + +QDF_STATUS son_ol_send_null(struct wlan_objmgr_pdev *pdev, + u_int8_t *macaddr, + struct wlan_objmgr_vdev *vdev); + +int son_ol_lmac_create(struct wlan_objmgr_pdev *pdev); + + +int son_ol_lmac_destroy(struct wlan_objmgr_pdev *pdev); + + +void son_ol_rx_rssi_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, int8_t rssi, u_int8_t subtype); + +void son_ol_rx_rate_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, u_int32_t rateKbps); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/son/src/target_if_son.c b/drivers/staging/qca-wifi-host-cmn/target_if/son/src/target_if_son.c new file mode 100644 index 0000000000000000000000000000000000000000..d41ab7104187369e767f052bf7e54b3cb1cadb27 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/son/src/target_if_son.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#if QCA_SUPPORT_SON + +u_int32_t son_ol_get_peer_rate(struct wlan_objmgr_peer *peer, u_int8_t type) +{ + return ol_if_peer_get_rate(peer, type); +} + + +bool son_ol_enable(struct wlan_objmgr_pdev *pdev, bool enable) +{ + return true; +} + +int8_t son_ol_sanitize_util_invtl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample) +{ + return 0; +} + +QDF_STATUS son_ol_send_null(struct wlan_objmgr_pdev *pdev, + u_int8_t *macaddr, + struct wlan_objmgr_vdev *vdev) +{ + struct stats_request_params param = {0}; + struct wlan_objmgr_psoc *psoc = NULL; + wmi_unified_t wmi_handle; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) + return QDF_STATUS_E_FAILURE; + + param.vdev_id = wlan_vdev_get_id(vdev); + param.stats_id = WMI_HOST_REQUEST_INST_STAT; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_stats_request_send(wmi_handle, macaddr, ¶m); +} + +int son_ol_lmac_create(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} + +int son_ol_lmac_destroy(struct wlan_objmgr_pdev *pdev) +{ + return 0; + +} + +void son_ol_rx_rssi_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, int8_t rssi, u_int8_t subtype) +{ + return; + +} + +void son_ol_rx_rate_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, u_int32_t rateKbps) +{ + return; +} + +void target_if_son_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + /* wlan son related function handler */ + tx_ops->son_tx_ops.son_enable = son_ol_enable; + tx_ops->son_tx_ops.lmac_create = son_ol_lmac_create; + tx_ops->son_tx_ops.lmac_destroy = son_ol_lmac_destroy; + tx_ops->son_tx_ops.son_send_null = son_ol_send_null; + tx_ops->son_tx_ops.son_rssi_update = son_ol_rx_rssi_update; + tx_ops->son_tx_ops.son_rate_update = son_ol_rx_rate_update; + tx_ops->son_tx_ops.son_sanity_util_intvl = son_ol_sanitize_util_invtl; + tx_ops->son_tx_ops.get_peer_rate = son_ol_get_peer_rate; + return; +} +#else +void target_if_son_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return; +} +int8_t son_ol_sanitize_util_intvl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample) +{ + return -EINVAL; + +} + +u_int32_t son_ol_get_peer_rate(struct wlan_objmgr_peer *peer, u_int8_t type) +{ + return 0; +} + + +bool son_ol_enable(struct wlan_objmgr_pdev *pdev, bool enable) +{ + return -EINVAL; + +} + +QDF_STATUS son_ol_send_null(struct wlan_objmgr_pdev *pdev, + u_int8_t *macaddr, + struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} +int8_t son_ol_sanitize_util_invtl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample) +{ + return 0; +} + +int son_ol_lmac_create(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} + + +int son_ol_lmac_destroy(struct wlan_objmgr_pdev *pdev) +{ + return 0; + +} + + +void son_ol_rx_rssi_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, int8_t rssi, u_int8_t subtype) +{ + return; + +} + +void son_ol_rx_rate_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, u_int32_t rateKbps) +{ + return; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.c new file mode 100644 index 0000000000000000000000000000000000000000..6dc413934eca84cf7157083f176d792d05e533de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.c @@ -0,0 +1,4522 @@ +/* + * Copyright (c) 2011,2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @spectral_ops - Spectral function table, holds the Spectral functions that + * depend on whether the architecture is Direct Attach or Offload. This is used + * to populate the actual Spectral function table present in the Spectral + * module. + */ +struct target_if_spectral_ops spectral_ops; +int spectral_debug_level = DEBUG_SPECTRAL; + +static void target_if_spectral_get_firstvdev_pdev(struct wlan_objmgr_pdev *pdev, + void *obj, void *arg) +{ + struct wlan_objmgr_vdev *vdev = obj; + struct wlan_objmgr_vdev **first_vdev = arg; + + if (!(*first_vdev)) + *first_vdev = vdev; +} + +struct wlan_objmgr_vdev * +target_if_spectral_get_vdev(struct target_if_spectral *spectral) +{ + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_vdev *first_vdev = NULL; + + qdf_assert_always(spectral); + pdev = spectral->pdev_obj; + qdf_assert_always(pdev); + + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_SPECTRAL_ID) != + QDF_STATUS_SUCCESS) { + spectral_err("Unable to get pdev reference."); + return NULL; + } + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + target_if_spectral_get_firstvdev_pdev, + &first_vdev, 0, WLAN_SPECTRAL_ID); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_SPECTRAL_ID); + + if (!first_vdev) + return NULL; + + if (wlan_objmgr_vdev_try_get_ref(first_vdev, WLAN_SPECTRAL_ID) != + QDF_STATUS_SUCCESS) + first_vdev = NULL; + + + return first_vdev; +} + +/** + * target_if_send_vdev_spectral_configure_cmd() - Send WMI command to configure + * spectral parameters + * @spectral: Pointer to Spectral target_if internal private data + * @smode: Spectral scan mode + * @param: Pointer to spectral_config giving the Spectral configuration + * + * Return: QDF_STATUS_SUCCESS on success, negative error code on failure + */ +static int +target_if_send_vdev_spectral_configure_cmd(struct target_if_spectral *spectral, + enum spectral_scan_mode smode, + struct spectral_config *param) +{ + struct vdev_spectral_configure_params sparam; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(spectral && param); + + pdev = spectral->pdev_obj; + + qdf_assert_always(pdev); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return QDF_STATUS_E_NOENT; + + qdf_mem_zero(&sparam, sizeof(sparam)); + + sparam.vdev_id = wlan_vdev_get_id(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + sparam.count = param->ss_count; + sparam.period = param->ss_period; + sparam.spectral_pri = param->ss_spectral_pri; + sparam.fft_size = param->ss_fft_size; + sparam.gc_enable = param->ss_gc_ena; + sparam.restart_enable = param->ss_restart_ena; + sparam.noise_floor_ref = param->ss_noise_floor_ref; + sparam.init_delay = param->ss_init_delay; + sparam.nb_tone_thr = param->ss_nb_tone_thr; + sparam.str_bin_thr = param->ss_str_bin_thr; + sparam.wb_rpt_mode = param->ss_wb_rpt_mode; + sparam.rssi_rpt_mode = param->ss_rssi_rpt_mode; + sparam.rssi_thr = param->ss_rssi_thr; + sparam.pwr_format = param->ss_pwr_format; + sparam.rpt_mode = param->ss_rpt_mode; + sparam.bin_scale = param->ss_bin_scale; + sparam.dbm_adj = param->ss_dbm_adj; + sparam.chn_mask = param->ss_chn_mask; + sparam.mode = smode; + sparam.center_freq = param->ss_frequency; + + return spectral->param_wmi_cmd_ops.wmi_spectral_configure_cmd_send( + GET_WMI_HDL_FROM_PDEV(pdev), &sparam); +} + +/** + * target_if_send_vdev_spectral_enable_cmd() - Send WMI command to + * enable/disable Spectral + * @spectral: Pointer to Spectral target_if internal private data + * @smode: Spectral scan mode + * @is_spectral_active_valid: Flag to indicate if spectral activate (trigger) is + * valid + * @is_spectral_active: Value of spectral activate + * @is_spectral_enabled_valid: Flag to indicate if spectral enable is valid + * @is_spectral_enabled: Value of spectral enable + * + * Return: QDF_STATUS_SUCCESS on success, negative error code on failure + */ +static int +target_if_send_vdev_spectral_enable_cmd(struct target_if_spectral *spectral, + enum spectral_scan_mode smode, + uint8_t is_spectral_active_valid, + uint8_t is_spectral_active, + uint8_t is_spectral_enabled_valid, + uint8_t is_spectral_enabled) +{ + struct vdev_spectral_enable_params param; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(spectral); + + pdev = spectral->pdev_obj; + + qdf_assert_always(pdev); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return QDF_STATUS_E_NOENT; + + qdf_mem_zero(¶m, sizeof(param)); + + param.vdev_id = wlan_vdev_get_id(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + param.active_valid = is_spectral_active_valid; + param.enabled_valid = is_spectral_enabled_valid; + param.active = is_spectral_active; + param.enabled = is_spectral_enabled; + param.mode = smode; + + return spectral->param_wmi_cmd_ops.wmi_spectral_enable_cmd_send( + GET_WMI_HDL_FROM_PDEV(pdev), ¶m); +} + +/** + * target_if_spectral_info_init_defaults() - Helper function to load defaults + * for Spectral information (parameters and state) into cache. + * @spectral: Pointer to Spectral target_if internal private data + * @smode: Spectral scan mode + * + * It is assumed that the caller has obtained the requisite lock if applicable. + * Note that this is currently treated as a temporary function. Ideally, we + * would like to get defaults from the firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS +target_if_spectral_info_init_defaults(struct target_if_spectral *spectral, + enum spectral_scan_mode smode) +{ + struct target_if_spectral_param_state_info *info; + struct wlan_objmgr_vdev *vdev = NULL; + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return QDF_STATUS_E_FAILURE; + } + + info = &spectral->param_info[smode]; + + /* State */ + info->osps_cache.osc_spectral_active = SPECTRAL_SCAN_ACTIVE_DEFAULT; + + info->osps_cache.osc_spectral_enabled = SPECTRAL_SCAN_ENABLE_DEFAULT; + + /* Parameters */ + info->osps_cache.osc_params.ss_count = SPECTRAL_SCAN_COUNT_DEFAULT; + + if (spectral->spectral_gen == SPECTRAL_GEN3) + info->osps_cache.osc_params.ss_period = + SPECTRAL_SCAN_PERIOD_GEN_III_DEFAULT; + else + info->osps_cache.osc_params.ss_period = + SPECTRAL_SCAN_PERIOD_GEN_II_DEFAULT; + + info->osps_cache.osc_params.ss_spectral_pri = + SPECTRAL_SCAN_PRIORITY_DEFAULT; + + info->osps_cache.osc_params.ss_fft_size = + SPECTRAL_SCAN_FFT_SIZE_DEFAULT; + + info->osps_cache.osc_params.ss_gc_ena = SPECTRAL_SCAN_GC_ENA_DEFAULT; + + info->osps_cache.osc_params.ss_restart_ena = + SPECTRAL_SCAN_RESTART_ENA_DEFAULT; + + info->osps_cache.osc_params.ss_noise_floor_ref = + SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT; + + info->osps_cache.osc_params.ss_init_delay = + SPECTRAL_SCAN_INIT_DELAY_DEFAULT; + + info->osps_cache.osc_params.ss_nb_tone_thr = + SPECTRAL_SCAN_NB_TONE_THR_DEFAULT; + + info->osps_cache.osc_params.ss_str_bin_thr = + SPECTRAL_SCAN_STR_BIN_THR_DEFAULT; + + info->osps_cache.osc_params.ss_wb_rpt_mode = + SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT; + + info->osps_cache.osc_params.ss_rssi_rpt_mode = + SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT; + + info->osps_cache.osc_params.ss_rssi_thr = + SPECTRAL_SCAN_RSSI_THR_DEFAULT; + + info->osps_cache.osc_params.ss_pwr_format = + SPECTRAL_SCAN_PWR_FORMAT_DEFAULT; + + info->osps_cache.osc_params.ss_rpt_mode = + SPECTRAL_SCAN_RPT_MODE_DEFAULT; + + info->osps_cache.osc_params.ss_bin_scale = + SPECTRAL_SCAN_BIN_SCALE_DEFAULT; + + info->osps_cache.osc_params.ss_dbm_adj = SPECTRAL_SCAN_DBM_ADJ_DEFAULT; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return QDF_STATUS_E_NOENT; + + info->osps_cache.osc_params.ss_chn_mask = + wlan_vdev_mlme_get_rxchainmask(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + info->osps_cache.osc_params.ss_short_report = + SPECTRAL_SCAN_SHORT_REPORT_DEFAULT; + + info->osps_cache.osc_params.ss_fft_period = + SPECTRAL_SCAN_FFT_PERIOD_DEFAULT; + + info->osps_cache.osc_params.ss_frequency = + SPECTRAL_SCAN_FREQUENCY_DEFAULT; + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_log_read_spectral_active() - Helper function to log whether + * spectral is active after reading cache + * @function_name: Function name + * @output: whether spectral is active or not + * + * Helper function to log whether spectral is active after reading cache + * + * Return: none + */ +static void +target_if_log_read_spectral_active( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ACTIVE. Returning val=%u", + function_name, output); +} + +/** + * target_if_log_read_spectral_enabled() - Helper function to log whether + * spectral is enabled after reading cache + * @function_name: Function name + * @output: whether spectral is enabled or not + * + * Helper function to log whether spectral is enabled after reading cache + * + * Return: none + */ +static void +target_if_log_read_spectral_enabled( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ENABLED. Returning val=%u", + function_name, output); +} + +/** + * target_if_log_read_spectral_enabled() - Helper function to log spectral + * parameters after reading cache + * @function_name: Function name + * @pparam: Spectral parameters + * + * Helper function to log spectral parameters after reading cache + * + * Return: none + */ +static void +target_if_log_read_spectral_params( + const char *function_name, + struct spectral_config *pparam) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_PARAMS. Returning following params:\nss_count = %u\nss_period = %u\nss_spectral_pri = %u\nss_fft_size = %u\nss_gc_ena = %u\nss_restart_ena = %u\nss_noise_floor_ref = %d\nss_init_delay = %u\nss_nb_tone_thr = %u\nss_str_bin_thr = %u\nss_wb_rpt_mode = %u\nss_rssi_rpt_mode = %u\nss_rssi_thr = %d\nss_pwr_format = %u\nss_rpt_mode = %u\nss_bin_scale = %u\nss_dbm_adj = %u\nss_chn_mask = %u\nss_frequency=%u\n", + function_name, + pparam->ss_count, + pparam->ss_period, + pparam->ss_spectral_pri, + pparam->ss_fft_size, + pparam->ss_gc_ena, + pparam->ss_restart_ena, + (int8_t)pparam->ss_noise_floor_ref, + pparam->ss_init_delay, + pparam->ss_nb_tone_thr, + pparam->ss_str_bin_thr, + pparam->ss_wb_rpt_mode, + pparam->ss_rssi_rpt_mode, + (int8_t)pparam->ss_rssi_thr, + pparam->ss_pwr_format, + pparam->ss_rpt_mode, + pparam->ss_bin_scale, + pparam->ss_dbm_adj, + pparam->ss_chn_mask, + pparam->ss_frequency); +} + +/** + * target_if_log_read_spectral_active_catch_validate() - Helper function to + * log whether spectral is active after intializing the cache + * @function_name: Function name + * @output: whether spectral is active or not + * + * Helper function to log whether spectral is active after intializing cache + * + * Return: none + */ +static void +target_if_log_read_spectral_active_catch_validate( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ACTIVE on initial cache validation\nReturning val=%u", + function_name, output); +} + +/** + * target_if_log_read_spectral_enabled_catch_validate() - Helper function to + * log whether spectral is enabled after intializing the cache + * @function_name: Function name + * @output: whether spectral is enabled or not + * + * Helper function to log whether spectral is enabled after intializing cache + * + * Return: none + */ +static void +target_if_log_read_spectral_enabled_catch_validate( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ENABLED on initial cache validation\nReturning val=%u\n", + function_name, output); +} + +/** + * target_if_log_read_spectral_params_catch_validate() - Helper function to + * log spectral parameters after intializing the cache + * @function_name: Function name + * @pparam: Spectral parameters + * + * Helper function to log spectral parameters after intializing the cache + * + * Return: none + */ +static void +target_if_log_read_spectral_params_catch_validate( + const char *function_name, + struct spectral_config *pparam) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_PARAMS on initial cache validation\nReturning following params:\nss_count = %u\nss_period = %u\nss_spectral_pri = %u\nss_fft_size = %u\nss_gc_ena = %u\nss_restart_ena = %u\nss_noise_floor_ref = %d\nss_init_delay = %u\nss_nb_tone_thr = %u\nss_str_bin_thr = %u\nss_wb_rpt_mode = %u\nss_rssi_rpt_mode = %u\nss_rssi_thr = %d\nss_pwr_format = %u\nss_rpt_mode = %u\nss_bin_scale = %u\nss_dbm_adj = %u\nss_chn_mask = %u", + function_name, + pparam->ss_count, + pparam->ss_period, + pparam->ss_spectral_pri, + pparam->ss_fft_size, + pparam->ss_gc_ena, + pparam->ss_restart_ena, + (int8_t)pparam->ss_noise_floor_ref, + pparam->ss_init_delay, + pparam->ss_nb_tone_thr, + pparam->ss_str_bin_thr, + pparam->ss_wb_rpt_mode, + pparam->ss_rssi_rpt_mode, + (int8_t)pparam->ss_rssi_thr, + pparam->ss_pwr_format, + pparam->ss_rpt_mode, + pparam->ss_bin_scale, + pparam->ss_dbm_adj, pparam->ss_chn_mask); +} + +/** + * target_if_spectral_info_read() - Read spectral information from the cache. + * @spectral: Pointer to Spectral target_if internal private data + * @smode: Spectral scan mode + * @specifier: target_if_spectral_info enumeration specifying which + * information is required + * @output: Void output pointer into which the information will be read + * @output_len: size of object pointed to by output pointer + * + * Read spectral parameters or the desired state information from the cache. + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_spectral_info_read( + struct target_if_spectral *spectral, + enum spectral_scan_mode smode, + enum target_if_spectral_info specifier, + void *output, int output_len) +{ + /* + * Note: This function is designed to be able to accommodate + * WMI reads for defaults, non-cacheable information, etc + * if required. + */ + struct target_if_spectral_param_state_info *info; + int is_cacheable = 0; + int init_def_retval = 0; + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return -EINVAL; + } + info = &spectral->param_info[smode]; + + if (!output) + return -EINVAL; + + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + if (output_len != sizeof(info->osps_cache.osc_spectral_active)) + return -EINVAL; + is_cacheable = 1; + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + if (output_len != sizeof(info->osps_cache.osc_spectral_enabled)) + return -EINVAL; + is_cacheable = 1; + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + if (output_len != sizeof(info->osps_cache.osc_params)) + return -EINVAL; + is_cacheable = 1; + break; + + default: + spectral_err("Unknown target_if_spectral_info specifier"); + return -EINVAL; + } + + qdf_spin_lock(&info->osps_lock); + + if (is_cacheable) { + if (info->osps_cache.osc_is_valid) { + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + qdf_mem_copy( + output, + &info->osps_cache.osc_spectral_active, + sizeof(info->osps_cache.osc_spectral_active)); + + target_if_log_read_spectral_active( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + qdf_mem_copy( + output, + &info->osps_cache.osc_spectral_enabled, + sizeof( + info->osps_cache.osc_spectral_enabled)); + + target_if_log_read_spectral_enabled( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + qdf_mem_copy( + output, + &info->osps_cache.osc_params, + sizeof(info->osps_cache.osc_params)); + + target_if_log_read_spectral_params( + __func__, + (struct spectral_config *)output); + break; + + default: + /* We can't reach this point */ + break; + } + qdf_spin_unlock(&info->osps_lock); + return 0; + } + } + + /* Cache is invalid */ + + /* + * If WMI Reads are implemented to fetch defaults/non-cacheable info, + * then the below implementation will change + */ + init_def_retval = + target_if_spectral_info_init_defaults(spectral, smode); + if (init_def_retval != QDF_STATUS_SUCCESS) { + qdf_spin_unlock(&info->osps_lock); + if (init_def_retval == QDF_STATUS_E_NOENT) + return -ENOENT; + else + return -EINVAL; + } + /* target_if_spectral_info_init_defaults() has set cache to valid */ + + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + qdf_mem_copy(output, + &info->osps_cache.osc_spectral_active, + sizeof(info->osps_cache.osc_spectral_active)); + + target_if_log_read_spectral_active_catch_validate( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + qdf_mem_copy(output, + &info->osps_cache.osc_spectral_enabled, + sizeof(info->osps_cache.osc_spectral_enabled)); + + target_if_log_read_spectral_enabled_catch_validate( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + qdf_mem_copy(output, + &info->osps_cache.osc_params, + sizeof(info->osps_cache.osc_params)); + + target_if_log_read_spectral_params_catch_validate( + __func__, + (struct spectral_config *)output); + + break; + + default: + /* We can't reach this point */ + break; + } + + qdf_spin_unlock(&info->osps_lock); + + return 0; +} + +/** + * target_if_log_write_spectral_active() - Helper function to log inputs and + * return value of call to configure the Spectral 'active' configuration, + * TARGET_IF_SPECTRAL_INFO_ACTIVE into firmware + * @function_name: Function name in which this is called + * @pval: whether spectral is active or not + * @ret: return value of the firmware write function + * + * Return: none + */ +static void +target_if_log_write_spectral_active( + const char *function_name, + uint8_t pval, + int ret) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ACTIVE with val=%u status=%d", + function_name, pval, ret); +} + +/** + * target_if_log_write_spectral_enabled() - Helper function to log inputs and + * return value of call to configure the Spectral 'enabled' configuration, + * TARGET_IF_SPECTRAL_INFO_ENABLED into firmware + * @function_name: Function name in which this is called + * @pval: whether spectral is enabled or not + * @ret: return value of the firmware write function + * + * Return: none + */ +static void +target_if_log_write_spectral_enabled( + const char *function_name, + uint8_t pval, + int ret) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ENABLED with val=%u status=%d", + function_name, pval, ret); +} + +/** + * target_if_log_write_spectral_params() - Helper function to log inputs and + * return value of call to configure Spectral parameters, + * TARGET_IF_SPECTRAL_INFO_PARAMS into firmware + * @param: Spectral parameters + * @function_name: Function name in which this is called + * @ret: return value of the firmware write function + * + * Return: none + */ +static void +target_if_log_write_spectral_params( + struct spectral_config *param, + const char *function_name, + int ret) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_PARAMS. Params:\nss_count = %u\nss_period = %u\nss_spectral_pri = %u\nss_fft_size = %u\nss_gc_ena = %u\nss_restart_ena = %u\nss_noise_floor_ref = %d\nss_init_delay = %u\nss_nb_tone_thr = %u\nss_str_bin_thr = %u\nss_wb_rpt_mode = %u\nss_rssi_rpt_mode = %u\nss_rssi_thr = %d\nss_pwr_format = %u\nss_rpt_mode = %u\nss_bin_scale = %u\nss_dbm_adj = %u\nss_chn_mask = %u\nss_frequency=%u\nstatus = %d", + function_name, + param->ss_count, + param->ss_period, + param->ss_spectral_pri, + param->ss_fft_size, + param->ss_gc_ena, + param->ss_restart_ena, + (int8_t)param->ss_noise_floor_ref, + param->ss_init_delay, + param->ss_nb_tone_thr, + param->ss_str_bin_thr, + param->ss_wb_rpt_mode, + param->ss_rssi_rpt_mode, + (int8_t)param->ss_rssi_thr, + param->ss_pwr_format, + param->ss_rpt_mode, + param->ss_bin_scale, + param->ss_dbm_adj, + param->ss_chn_mask, + param->ss_frequency, + ret); +} + +/** + * target_if_spectral_info_write() - Write Spectral information to the + * firmware, and update cache + * @spectral: Pointer to Spectral target_if internal private data + * @smode: Spectral scan mode + * @specifier: target_if_spectral_info enumeration specifying which + * information is involved + * @input: void input pointer containing the information to be written + * @input_len: size of object pointed to by input pointer + * + * Write Spectral parameters or the desired state information to + * the firmware, and update cache + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_spectral_info_write( + struct target_if_spectral *spectral, + enum spectral_scan_mode smode, + enum target_if_spectral_info specifier, + void *input, int input_len) +{ + struct target_if_spectral_param_state_info *info; + int ret; + uint8_t *pval = NULL; + struct spectral_config *param = NULL; + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return -EINVAL; + } + info = &spectral->param_info[smode]; + + if (!input) + return -EINVAL; + + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + if (input_len != sizeof(info->osps_cache.osc_spectral_active)) + return -EINVAL; + + pval = (uint8_t *)input; + + qdf_spin_lock(&info->osps_lock); + ret = target_if_send_vdev_spectral_enable_cmd(spectral, smode, + 1, *pval, 0, 0); + + target_if_log_write_spectral_active( + __func__, + *pval, + ret); + + if (ret < 0) { + spectral_err("target_if_send_vdev_spectral_enable_cmd failed with error=%d", + ret); + qdf_spin_unlock(&info->osps_lock); + return ret; + } + + info->osps_cache.osc_spectral_active = *pval; + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + qdf_spin_unlock(&info->osps_lock); + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + if (input_len != sizeof(info->osps_cache.osc_spectral_enabled)) + return -EINVAL; + + pval = (uint8_t *)input; + + qdf_spin_lock(&info->osps_lock); + ret = target_if_send_vdev_spectral_enable_cmd(spectral, smode, + 0, 0, 1, *pval); + + target_if_log_write_spectral_enabled( + __func__, + *pval, + ret); + + if (ret < 0) { + spectral_err("target_if_send_vdev_spectral_enable_cmd failed with error=%d", + ret); + qdf_spin_unlock(&info->osps_lock); + return ret; + } + + info->osps_cache.osc_spectral_enabled = *pval; + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + qdf_spin_unlock(&info->osps_lock); + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + if (input_len != sizeof(info->osps_cache.osc_params)) + return -EINVAL; + + param = (struct spectral_config *)input; + + qdf_spin_lock(&info->osps_lock); + ret = target_if_send_vdev_spectral_configure_cmd(spectral, + smode, param); + + target_if_log_write_spectral_params( + param, + __func__, + ret); + + if (ret < 0) { + spectral_err("target_if_send_vdev_spectral_configure_cmd failed with error=%d", + ret); + qdf_spin_unlock(&info->osps_lock); + return ret; + } + + qdf_mem_copy(&info->osps_cache.osc_params, + param, sizeof(info->osps_cache.osc_params)); + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + qdf_spin_unlock(&info->osps_lock); + break; + + default: + spectral_err("Unknown target_if_spectral_info specifier"); + return -EINVAL; + } + + return 0; +} + +/** + * target_if_spectral_get_tsf64() - Function to get the TSF value + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Get the last TSF received in WMI buffer + * + * Return: TSF value + */ +static uint64_t +target_if_spectral_get_tsf64(void *arg) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + + return spectral->tsf64; +} + +/** + * target_if_spectral_get_capability() - Function to get whether a + * given Spectral hardware capability is available + * @arg: Pointer to handle for Spectral target_if internal private data + * @type: Spectral hardware capability type + * + * Get whether a given Spectral hardware capability is available + * + * Return: True if the capability is available, false if the capability is not + * available + */ +uint32_t +target_if_spectral_get_capability(void *arg, enum spectral_capability_type type) +{ + int status = STATUS_FAIL; + + switch (type) { + case SPECTRAL_CAP_PHYDIAG: + case SPECTRAL_CAP_RADAR: + case SPECTRAL_CAP_SPECTRAL_SCAN: + case SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN: + status = STATUS_PASS; + break; + default: + status = STATUS_FAIL; + } + return status; +} + +/** + * target_if_spectral_set_rxfilter() - Set the RX Filter before Spectral start + * @arg: Pointer to handle for Spectral target_if internal private data + * @rxfilter: Rx filter to be used + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t +target_if_spectral_set_rxfilter(void *arg, int rxfilter) +{ + /* + * Will not be required since enabling of spectral in firmware + * will take care of this + */ + return 0; +} + +/** + * target_if_spectral_get_rxfilter() - Get the current RX Filter settings + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t +target_if_spectral_get_rxfilter(void *arg) +{ + /* + * Will not be required since enabling of spectral in firmware + * will take care of this + */ + return 0; +} + +/** + * target_if_sops_is_spectral_active() - Get whether Spectral is active + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * + * Function to check whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +uint32_t +target_if_sops_is_spectral_active(void *arg, enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 0; + int ret; + + ret = target_if_spectral_info_read( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ACTIVE, + &val, sizeof(val)); + + if (ret != 0) { + /* + * Could not determine if Spectral is active. + * Return false as a safe value. + * XXX: Consider changing the function prototype + * to be able to indicate failure to fetch value. + */ + return 0; + } + + return val; +} + +/** + * target_if_sops_is_spectral_enabled() - Get whether Spectral is enabled + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * + * Function to check whether Spectral is enabled + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +uint32_t +target_if_sops_is_spectral_enabled(void *arg, enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 0; + int ret; + + ret = target_if_spectral_info_read( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &val, sizeof(val)); + + if (ret != 0) { + /* + * Could not determine if Spectral is enabled. + * Return false as a safe value. + * XXX: Consider changing the function prototype + * to be able to indicate failure to fetch value. + */ + return 0; + } + + return val; +} + +/** + * target_if_sops_start_spectral_scan() - Start Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * @err: Spectral error code + * + * Function to start spectral scan + * + * Return: 0 on success else failure + */ +uint32_t +target_if_sops_start_spectral_scan(void *arg, enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 1; + uint8_t enabled = 0; + int ret; + + ret = target_if_spectral_info_read( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &enabled, sizeof(enabled)); + + if (ret != 0) { + /* + * Could not determine if Spectral is enabled. Assume we need + * to enable it + */ + enabled = 0; + } + + if (!enabled) { + ret = target_if_spectral_info_write( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &val, sizeof(val)); + + if (ret != 0) + return ret; + } + + ret = target_if_spectral_info_write( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ACTIVE, + &val, sizeof(val)); + + if (ret != 0) + return ret; + + return 0; +} + +/** + * target_if_sops_stop_spectral_scan() - Stop Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * + * Function to stop spectral scan + * + * Return: 0 on success else failure + */ +uint32_t +target_if_sops_stop_spectral_scan(void *arg, enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 0; + int tempret, ret = 0; + uint8_t enabled = 0; + + tempret = target_if_spectral_info_read( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &enabled, sizeof(enabled)); + + if (tempret) + /* + * Could not determine if Spectral is enabled. Assume scan is + * not in progress + */ + enabled = 0; + + /* if scan is not enabled, no need to send stop to FW */ + if (!enabled) + return -EPERM; + + tempret = target_if_spectral_info_write( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ACTIVE, + &val, sizeof(val)); + + if (tempret != 0) + ret = tempret; + + tempret = target_if_spectral_info_write( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &val, sizeof(val)); + + if (tempret != 0) + ret = tempret; + + return ret; +} + +/** + * target_if_spectral_get_extension_channel() - Get the Extension channel + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to get the current Extension channel (in MHz) + * + * Return: Current Extension channel (in MHz) on success, 0 on failure or if + * extension channel is not present. + */ +uint32_t +target_if_spectral_get_extension_channel(void *arg) +{ + /* + * XXX: Once we expand to use cases where Spectral could be activated + * without a channel being set to VDEV, we need to consider returning a + * negative value in case of failure and having all callers handle this. + */ + + struct target_if_spectral *spectral = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + uint16_t sec20chan_freq = 0; + + qdf_assert_always(arg); + spectral = (struct target_if_spectral *)arg; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return 0; + + if (target_if_vdev_get_sec20chan_freq_mhz(vdev, &sec20chan_freq) < 0) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + return 0; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + return sec20chan_freq; +} + +/** + * target_if_spectral_get_current_channel() - Get the current channel + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to get the current channel (in MHz) + * + * Return: Current channel (in MHz) on success, 0 on failure + */ +uint32_t +target_if_spectral_get_current_channel(void *arg) +{ + /* + * XXX: Once we expand to use cases where Spectral could be activated + * without a channel being set to VDEV, we need to consider returning a + * negative value in case of failure and having all callers handle this. + */ + + struct target_if_spectral *spectral = NULL; + int16_t chan_freq = 0; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(arg); + spectral = (struct target_if_spectral *)arg; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return 0; + + chan_freq = target_if_vdev_get_chan_freq(vdev); + if (chan_freq < 0) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + return 0; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + return chan_freq; +} + +/** + * target_if_spectral_reset_hw() - Reset the hardware + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t +target_if_spectral_reset_hw(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_chain_noise_floor() - Get the Chain noise floor from + * Noisefloor history buffer + * @arg: Pointer to handle for Spectral target_if internal private data + * @nf_buf: Pointer to buffer into which chain Noise Floor data should be copied + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t +target_if_spectral_get_chain_noise_floor(void *arg, int16_t *nf_buf) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_ext_noisefloor() - Get the extension channel + * noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t +target_if_spectral_get_ext_noisefloor(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_ctl_noisefloor() - Get the control channel noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t +target_if_spectral_get_ctl_noisefloor(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_sops_configure_params() - Configure user supplied Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Spectral parameters + * @smode: Spectral scan mode + * + * Function to configure spectral parameters + * + * Return: 0 on success else failure + */ +uint32_t +target_if_spectral_sops_configure_params( + void *arg, struct spectral_config *params, + enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + + return target_if_spectral_info_write( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_PARAMS, + params, sizeof(*params)); +} + +/** + * target_if_spectral_sops_get_params() - Get user configured Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Pointer to buffer into which Spectral parameters should be copied + * @smode: Spectral scan mode + * + * Function to get the configured spectral parameters + * + * Return: 0 on success else failure + */ +uint32_t +target_if_spectral_sops_get_params(void *arg, struct spectral_config *params, + enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + + return target_if_spectral_info_read( + spectral, + smode, + TARGET_IF_SPECTRAL_INFO_PARAMS, + params, sizeof(*params)); +} + +/** + * target_if_spectral_get_ent_mask() - Get enterprise mask + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +static uint32_t +target_if_spectral_get_ent_mask(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_macaddr() - Get radio MAC address + * @arg: Pointer to handle for Spectral target_if internal private data + * @addr: Pointer to buffer into which MAC address should be copied + * + * Function to get the MAC address of the pdev + * + * Return: 0 on success, -1 on failure + */ +static uint32_t +target_if_spectral_get_macaddr(void *arg, char *addr) +{ + uint8_t *myaddr = NULL; + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + struct wlan_objmgr_pdev *pdev = NULL; + + pdev = spectral->pdev_obj; + + wlan_pdev_obj_lock(pdev); + myaddr = wlan_pdev_get_hw_macaddr(pdev); + wlan_pdev_obj_unlock(pdev); + qdf_mem_copy(addr, myaddr, QDF_MAC_ADDR_SIZE); + + return 0; +} + +/** + * target_if_init_spectral_param_min_max() - Initialize Spectral parameter + * min and max values + * + * @param_min_max: Pointer to Spectral parameter min and max structure + * @gen: Spectral HW generation + * @target_type: Target type + * + * Initialize Spectral parameter min and max values + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_init_spectral_param_min_max( + struct spectral_param_min_max *param_min_max, + enum spectral_gen gen, uint32_t target_type) +{ + switch (gen) { + case SPECTRAL_GEN3: + param_min_max->fft_size_min = SPECTRAL_PARAM_FFT_SIZE_MIN_GEN3; + param_min_max->fft_size_max[CH_WIDTH_20MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_DEFAULT; + if (target_type == TARGET_TYPE_QCN9000) { + param_min_max->fft_size_max[CH_WIDTH_40MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_QCN9000; + param_min_max->fft_size_max[CH_WIDTH_80MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_QCN9000; + param_min_max->fft_size_max[CH_WIDTH_160MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_QCN9000; + param_min_max->fft_size_max[CH_WIDTH_80P80MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_QCN9000; + } else { + param_min_max->fft_size_max[CH_WIDTH_40MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_DEFAULT; + param_min_max->fft_size_max[CH_WIDTH_80MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_DEFAULT; + param_min_max->fft_size_max[CH_WIDTH_160MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_DEFAULT; + param_min_max->fft_size_max[CH_WIDTH_80P80MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_DEFAULT; + } + break; + + case SPECTRAL_GEN2: + param_min_max->fft_size_min = SPECTRAL_PARAM_FFT_SIZE_MIN_GEN2; + param_min_max->fft_size_max[CH_WIDTH_20MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN2; + param_min_max->fft_size_max[CH_WIDTH_40MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN2; + param_min_max->fft_size_max[CH_WIDTH_80MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN2; + param_min_max->fft_size_max[CH_WIDTH_80P80MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN2; + param_min_max->fft_size_max[CH_WIDTH_160MHZ] = + SPECTRAL_PARAM_FFT_SIZE_MAX_GEN2; + break; + + default: + spectral_err("Invalid spectral generation %d", gen); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_init_spectral_param_properties() - Initialize Spectral parameter + * properties + * @spectral: Pointer to Spectral target_if internal private data + * + * Initialize Spectral parameter properties + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_init_spectral_param_properties(struct target_if_spectral *spectral) +{ + enum spectral_scan_mode smode = SPECTRAL_SCAN_MODE_NORMAL; + int param; + + /* Initialize default values for properties. + * Default values are supported for all the parameters for all modes + * and allows different values for each mode for all the parameters . + */ + for (; smode < SPECTRAL_SCAN_MODE_MAX; smode++) { + for (param = 0; param < SPECTRAL_PARAM_MAX; param++) { + spectral->properties[smode][param].supported = true; + spectral->properties[smode][param].common_all_modes = + false; + } + } + + /* Once FW advertisement is in place remove this hard coding */ + smode = SPECTRAL_SCAN_MODE_NORMAL; + spectral->properties[SPECTRAL_SCAN_MODE_NORMAL] + [SPECTRAL_PARAM_FREQUENCY].supported = false; + for (; smode < SPECTRAL_SCAN_MODE_MAX; smode++) { + spectral->properties[smode] + [SPECTRAL_PARAM_SPECT_PRI].common_all_modes = true; + spectral->properties[smode] + [SPECTRAL_PARAM_SCAN_PERIOD].common_all_modes = true; + spectral->properties[smode] + [SPECTRAL_PARAM_INIT_DELAY].common_all_modes = true; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_init_spectral_capability() - Initialize Spectral capability + * @spectral: Pointer to Spectral target_if internal private data + * + * This is a workaround. + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_init_spectral_capability(struct target_if_spectral *spectral) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_psoc_host_spectral_scaling_params *scaling_params; + uint8_t num_bin_scaling_params, param_idx, pdev_id; + struct target_psoc_info *tgt_psoc_info; + struct wlan_psoc_host_service_ext_param *ext_svc_param; + struct spectral_caps *pcap = &spectral->capability; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap_arr = NULL; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap = NULL; + struct wlan_psoc_host_chainmask_table *table; + int j; + uint32_t table_id; + + pdev = spectral->pdev_obj; + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + spectral_err("target_psoc_info is null"); + return QDF_STATUS_E_FAILURE; + } + + ext_svc_param = target_psoc_get_service_ext_param(tgt_psoc_info); + num_bin_scaling_params = ext_svc_param->num_bin_scaling_params; + scaling_params = target_psoc_get_spectral_scaling_params(tgt_psoc_info); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + /* XXX : Workaround: Set Spectral capability */ + pcap = &spectral->capability; + pcap->phydiag_cap = 1; + pcap->radar_cap = 1; + pcap->spectral_cap = 1; + pcap->advncd_spectral_cap = 1; + pcap->hw_gen = spectral->spectral_gen; + if (spectral->spectral_gen >= SPECTRAL_GEN3) { + mac_phy_cap_arr = target_psoc_get_mac_phy_cap(tgt_psoc_info); + if (!mac_phy_cap_arr) { + spectral_err("mac phy cap array is null"); + return QDF_STATUS_E_FAILURE; + } + + mac_phy_cap = &mac_phy_cap_arr[pdev_id]; + if (!mac_phy_cap) { + spectral_err("mac phy cap is null"); + return QDF_STATUS_E_FAILURE; + } + + table_id = mac_phy_cap->chainmask_table_id; + table = &ext_svc_param->chainmask_table[table_id]; + if (!table) { + spectral_err("chainmask table not found"); + return QDF_STATUS_E_FAILURE; + } + + for (j = 0; j < table->num_valid_chainmasks; j++) { + pcap->agile_spectral_cap |= + table->cap_list[j].supports_aSpectral; + pcap->agile_spectral_cap_160 |= + table->cap_list[j].supports_aSpectral_160; + } + pcap->agile_spectral_cap_80p80 = pcap->agile_spectral_cap_160; + } else { + pcap->agile_spectral_cap = false; + pcap->agile_spectral_cap_160 = false; + pcap->agile_spectral_cap_80p80 = false; + } + + if (scaling_params) { + for (param_idx = 0; param_idx < num_bin_scaling_params; + param_idx++) { + if (scaling_params[param_idx].pdev_id == pdev_id) { + pcap->is_scaling_params_populated = true; + pcap->formula_id = + scaling_params[param_idx].formula_id; + pcap->low_level_offset = + scaling_params[param_idx].low_level_offset; + pcap->high_level_offset = + scaling_params[param_idx].high_level_offset; + pcap->rssi_thr = + scaling_params[param_idx].rssi_thr; + pcap->default_agc_max_gain = + scaling_params[param_idx].default_agc_max_gain; + break; + } + } + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +/** + * target_if_init_spectral_simulation_ops() - Initialize spectral target_if + * internal operations with functions related to spectral simulation + * @p_sops: spectral low level ops table + * + * Initialize spectral target_if internal operations with functions + * related to spectral simulation + * + * Return: None + */ +static void +target_if_init_spectral_simulation_ops(struct target_if_spectral_ops *p_sops) +{ + /* + * Spectral simulation is currently intended for platform transitions + * where underlying HW support may not be available for some time. + * Hence, we do not currently provide a runtime switch to turn the + * simulation on or off. + * In case of future requirements where runtime switches are required, + * this can be added. But it is suggested to use application layer + * simulation as far as possible in such cases, since the main + * use of record and replay of samples would concern higher + * level sample processing rather than lower level delivery. + */ + p_sops->is_spectral_enabled = target_if_spectral_sops_sim_is_enabled; + p_sops->is_spectral_active = target_if_spectral_sops_sim_is_active; + p_sops->start_spectral_scan = target_if_spectral_sops_sim_start_scan; + p_sops->stop_spectral_scan = target_if_spectral_sops_sim_stop_scan; + p_sops->configure_spectral = + target_if_spectral_sops_sim_configure_params; + p_sops->get_spectral_config = target_if_spectral_sops_sim_get_params; +} + +#else +/** + * target_if_init_spectral_simulation_ops() - Initialize spectral target_if + * internal operations + * @p_sops: spectral low level ops table + * + * Return: None + */ +static void +target_if_init_spectral_simulation_ops(struct target_if_spectral_ops *p_sops) +{ + p_sops->is_spectral_enabled = target_if_sops_is_spectral_enabled; + p_sops->is_spectral_active = target_if_sops_is_spectral_active; + p_sops->start_spectral_scan = target_if_sops_start_spectral_scan; + p_sops->stop_spectral_scan = target_if_sops_stop_spectral_scan; + p_sops->configure_spectral = target_if_spectral_sops_configure_params; + p_sops->get_spectral_config = target_if_spectral_sops_get_params; +} +#endif + +/** + * target_if_init_spectral_ops_common() - Initialize Spectral target_if internal + * operations common to all Spectral chipset generations + * + * Initializes target_if_spectral_ops common to all chipset generations + * + * Return: None + */ +static void +target_if_init_spectral_ops_common(void) +{ + struct target_if_spectral_ops *p_sops = &spectral_ops; + + p_sops->get_tsf64 = target_if_spectral_get_tsf64; + p_sops->get_capability = target_if_spectral_get_capability; + p_sops->set_rxfilter = target_if_spectral_set_rxfilter; + p_sops->get_rxfilter = target_if_spectral_get_rxfilter; + + target_if_init_spectral_simulation_ops(p_sops); + + p_sops->get_extension_channel = + target_if_spectral_get_extension_channel; + p_sops->get_ctl_noisefloor = target_if_spectral_get_ctl_noisefloor; + p_sops->get_ext_noisefloor = target_if_spectral_get_ext_noisefloor; + p_sops->get_ent_spectral_mask = target_if_spectral_get_ent_mask; + p_sops->get_mac_address = target_if_spectral_get_macaddr; + p_sops->get_current_channel = target_if_spectral_get_current_channel; + p_sops->reset_hw = target_if_spectral_reset_hw; + p_sops->get_chain_noise_floor = + target_if_spectral_get_chain_noise_floor; +} + +/** + * target_if_init_spectral_ops_gen2() - Initialize Spectral target_if internal + * operations specific to Spectral chipset generation 2. + * + * Initializes target_if_spectral_ops specific to Spectral chipset generation 2. + * + * Return: None + */ +static void +target_if_init_spectral_ops_gen2(void) +{ + struct target_if_spectral_ops *p_sops = &spectral_ops; + + p_sops->spectral_process_phyerr = target_if_process_phyerr_gen2; +} + +/** + * target_if_init_spectral_ops_gen3() - Initialize Spectral target_if internal + * operations specific to Spectral chipset generation 3. + * + * Initializes target_if_spectral_ops specific to Spectral chipset generation 3. + * + * Return: None + */ +static void +target_if_init_spectral_ops_gen3(void) +{ + struct target_if_spectral_ops *p_sops = &spectral_ops; + + p_sops->process_spectral_report = + target_if_spectral_process_report_gen3; + return; +} + +/** + * target_if_init_spectral_ops() - Initialize target_if internal Spectral + * operations. + * @spectral: Pointer to Spectral target_if internal private data + * + * Initializes all function pointers in target_if_spectral_ops for + * all generations + * + * Return: None + */ +static void +target_if_init_spectral_ops(struct target_if_spectral *spectral) +{ + target_if_init_spectral_ops_common(); + if (spectral->spectral_gen == SPECTRAL_GEN2) + target_if_init_spectral_ops_gen2(); + else if (spectral->spectral_gen == SPECTRAL_GEN3) + target_if_init_spectral_ops_gen3(); + else + spectral_err("Invalid Spectral generation"); +} + +/* + * Dummy Functions: + * These functions are initially registered to avoid any crashes due to + * invocation of spectral functions before they are registered. + */ + +static uint64_t +null_get_tsf64(void *arg) +{ + spectral_ops_not_registered("get_tsf64"); + return 0; +} + +static uint32_t +null_get_capability(void *arg, enum spectral_capability_type type) +{ + /* + * TODO : We should have conditional compilation to get the capability + * : We have not yet attahced ATH layer here, so there is no + * : way to check the HAL capbalities + */ + spectral_ops_not_registered("get_capability"); + + /* TODO : For the time being, we are returning TRUE */ + return true; +} + +static uint32_t +null_set_rxfilter(void *arg, int rxfilter) +{ + spectral_ops_not_registered("set_rxfilter"); + return 1; +} + +static uint32_t +null_get_rxfilter(void *arg) +{ + spectral_ops_not_registered("get_rxfilter"); + return 0; +} + +static uint32_t +null_is_spectral_active(void *arg, enum spectral_scan_mode smode) +{ + spectral_ops_not_registered("is_spectral_active"); + return 1; +} + +static uint32_t +null_is_spectral_enabled(void *arg, enum spectral_scan_mode smode) +{ + spectral_ops_not_registered("is_spectral_enabled"); + return 1; +} + +static uint32_t +null_start_spectral_scan(void *arg, enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + spectral_ops_not_registered("start_spectral_scan"); + return 1; +} + +static uint32_t +null_stop_spectral_scan(void *arg, enum spectral_scan_mode smode) +{ + spectral_ops_not_registered("stop_spectral_scan"); + return 1; +} + +static uint32_t +null_get_extension_channel(void *arg) +{ + spectral_ops_not_registered("get_extension_channel"); + return 1; +} + +static int8_t +null_get_ctl_noisefloor(void *arg) +{ + spectral_ops_not_registered("get_ctl_noisefloor"); + return 1; +} + +static int8_t +null_get_ext_noisefloor(void *arg) +{ + spectral_ops_not_registered("get_ext_noisefloor"); + return 0; +} + +static uint32_t +null_configure_spectral(void *arg, struct spectral_config *params, + enum spectral_scan_mode smode) +{ + spectral_ops_not_registered("configure_spectral"); + return 0; +} + +static uint32_t +null_get_spectral_config(void *arg, struct spectral_config *params, + enum spectral_scan_mode smode) +{ + spectral_ops_not_registered("get_spectral_config"); + return 0; +} + +static uint32_t +null_get_ent_spectral_mask(void *arg) +{ + spectral_ops_not_registered("get_ent_spectral_mask"); + return 0; +} + +static uint32_t +null_get_mac_address(void *arg, char *addr) +{ + spectral_ops_not_registered("get_mac_address"); + return 0; +} + +static uint32_t +null_get_current_channel(void *arg) +{ + spectral_ops_not_registered("get_current_channel"); + return 0; +} + +static uint32_t +null_reset_hw(void *arg) +{ + spectral_ops_not_registered("get_current_channel"); + return 0; +} + +static uint32_t +null_get_chain_noise_floor(void *arg, int16_t *nf_buf) +{ + spectral_ops_not_registered("get_chain_noise_floor"); + return 0; +} + +static int +null_spectral_process_phyerr(struct target_if_spectral *spectral, + uint8_t *data, + uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats) +{ + spectral_ops_not_registered("spectral_process_phyerr"); + return 0; +} + +static int +null_process_spectral_report(struct wlan_objmgr_pdev *pdev, + void *payload) +{ + spectral_ops_not_registered("process_spectral_report"); + return 0; +} +/** + * target_if_spectral_init_dummy_function_table() - + * Initialize target_if internal + * Spectral operations to dummy functions + * @ps: Pointer to Spectral target_if internal private data + * + * Initialize all the function pointers in target_if_spectral_ops with + * dummy functions. + * + * Return: None + */ +static void +target_if_spectral_init_dummy_function_table(struct target_if_spectral *ps) +{ + struct target_if_spectral_ops *p_sops = GET_TARGET_IF_SPECTRAL_OPS(ps); + + p_sops->get_tsf64 = null_get_tsf64; + p_sops->get_capability = null_get_capability; + p_sops->set_rxfilter = null_set_rxfilter; + p_sops->get_rxfilter = null_get_rxfilter; + p_sops->is_spectral_enabled = null_is_spectral_enabled; + p_sops->is_spectral_active = null_is_spectral_active; + p_sops->start_spectral_scan = null_start_spectral_scan; + p_sops->stop_spectral_scan = null_stop_spectral_scan; + p_sops->get_extension_channel = null_get_extension_channel; + p_sops->get_ctl_noisefloor = null_get_ctl_noisefloor; + p_sops->get_ext_noisefloor = null_get_ext_noisefloor; + p_sops->configure_spectral = null_configure_spectral; + p_sops->get_spectral_config = null_get_spectral_config; + p_sops->get_ent_spectral_mask = null_get_ent_spectral_mask; + p_sops->get_mac_address = null_get_mac_address; + p_sops->get_current_channel = null_get_current_channel; + p_sops->reset_hw = null_reset_hw; + p_sops->get_chain_noise_floor = null_get_chain_noise_floor; + p_sops->spectral_process_phyerr = null_spectral_process_phyerr; + p_sops->process_spectral_report = null_process_spectral_report; +} + +/** + * target_if_spectral_register_funcs() - Initialize target_if internal Spectral + * operations + * @spectral: Pointer to Spectral target_if internal private data + * @p: Pointer to Spectral function table + * + * Return: None + */ +static void +target_if_spectral_register_funcs(struct target_if_spectral *spectral, + struct target_if_spectral_ops *p) +{ + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + p_sops->get_tsf64 = p->get_tsf64; + p_sops->get_capability = p->get_capability; + p_sops->set_rxfilter = p->set_rxfilter; + p_sops->get_rxfilter = p->get_rxfilter; + p_sops->is_spectral_enabled = p->is_spectral_enabled; + p_sops->is_spectral_active = p->is_spectral_active; + p_sops->start_spectral_scan = p->start_spectral_scan; + p_sops->stop_spectral_scan = p->stop_spectral_scan; + p_sops->get_extension_channel = p->get_extension_channel; + p_sops->get_ctl_noisefloor = p->get_ctl_noisefloor; + p_sops->get_ext_noisefloor = p->get_ext_noisefloor; + p_sops->configure_spectral = p->configure_spectral; + p_sops->get_spectral_config = p->get_spectral_config; + p_sops->get_ent_spectral_mask = p->get_ent_spectral_mask; + p_sops->get_mac_address = p->get_mac_address; + p_sops->get_current_channel = p->get_current_channel; + p_sops->reset_hw = p->reset_hw; + p_sops->get_chain_noise_floor = p->get_chain_noise_floor; + p_sops->spectral_process_phyerr = p->spectral_process_phyerr; + p_sops->process_spectral_report = p->process_spectral_report; +} + +/** + * target_if_spectral_clear_stats() - Clear Spectral stats + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to clear spectral stats + * + * Return: None + */ +static void +target_if_spectral_clear_stats(struct target_if_spectral *spectral) +{ + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + qdf_mem_zero(&spectral->spectral_stats, + sizeof(struct target_if_spectral_stats)); + spectral->spectral_stats.last_reset_tstamp = + p_sops->get_tsf64(spectral); +} + +/** + * target_if_spectral_check_hw_capability() - Check whether HW supports spectral + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to check whether hardware supports spectral + * + * Return: True if HW supports Spectral, false if HW does not support Spectral + */ +static int +target_if_spectral_check_hw_capability(struct target_if_spectral *spectral) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct spectral_caps *pcap = NULL; + int is_spectral_supported = true; + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + pcap = &spectral->capability; + + if (p_sops->get_capability(spectral, SPECTRAL_CAP_PHYDIAG) == false) { + is_spectral_supported = false; + spectral_info("SPECTRAL : No PHYDIAG support"); + return is_spectral_supported; + } + pcap->phydiag_cap = 1; + + if (p_sops->get_capability(spectral, SPECTRAL_CAP_RADAR) == false) { + is_spectral_supported = false; + spectral_info("SPECTRAL : No RADAR support"); + return is_spectral_supported; + } + pcap->radar_cap = 1; + + if (p_sops->get_capability(spectral, + SPECTRAL_CAP_SPECTRAL_SCAN) == false) { + is_spectral_supported = false; + spectral_info("SPECTRAL : No SPECTRAL SUPPORT"); + return is_spectral_supported; + } + pcap->spectral_cap = 1; + + if (p_sops->get_capability(spectral, SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN) + == false) { + spectral_info("SPECTRAL : No ADVANCED SPECTRAL SUPPORT"); + } else { + pcap->advncd_spectral_cap = 1; + } + + return is_spectral_supported; +} + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +/** + * target_if_spectral_detach_simulation() - De-initialize Spectral + * Simulation functionality + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to de-initialize Spectral Simulation functionality + * + * Return: None + */ +static void +target_if_spectral_detach_simulation(struct target_if_spectral *spectral) +{ + target_if_spectral_sim_detach(spectral); +} + +#else +static void +target_if_spectral_detach_simulation(struct target_if_spectral *spectral) +{ +} +#endif + +/** + * target_if_spectral_detach() - De-initialize target_if Spectral + * @pdev: Pointer to pdev object + * + * Function to detach target_if spectral + * + * Return: None + */ +static void +target_if_spectral_detach(struct target_if_spectral *spectral) +{ + enum spectral_scan_mode smode = SPECTRAL_SCAN_MODE_NORMAL; + spectral_info("spectral detach"); + + if (spectral) { + for (; smode < SPECTRAL_SCAN_MODE_MAX; smode++) + qdf_spinlock_destroy + (&spectral->param_info[smode].osps_lock); + + target_if_spectral_detach_simulation(spectral); + + qdf_spinlock_destroy(&spectral->spectral_lock); + qdf_spinlock_destroy(&spectral->noise_pwr_reports_lock); + + qdf_mem_free(spectral); + spectral = NULL; + } +} + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +/** + * target_if_spectral_attach_simulation() - Initialize Spectral Simulation + * functionality + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to initialize spectral simulation functionality + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_spectral_attach_simulation(struct target_if_spectral *spectral) +{ + if (target_if_spectral_sim_attach(spectral)) { + qdf_mem_free(spectral); + return -EPERM; + } + return 0; +} + +#else +static int +target_if_spectral_attach_simulation(struct target_if_spectral *spectral) +{ + return 0; +} +#endif + +/** + * target_if_spectral_len_adj_swar_init() - Initialize FFT bin length adjustment + * related info + * @swar: Pointer to Spectral FFT bin length adjustment SWAR params + * @target_type: Target type + * + * Function to Initialize parameters related to Spectral FFT bin + * length adjustment SWARs. + * + * Return: void + */ +static void +target_if_spectral_len_adj_swar_init(struct spectral_fft_bin_len_adj_swar *swar, + uint32_t target_type) +{ + if (target_type == TARGET_TYPE_QCA8074V2 || + target_type == TARGET_TYPE_QCN9000) + swar->fftbin_size_war = SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE; + else if (target_type == TARGET_TYPE_QCA8074 || + target_type == TARGET_TYPE_QCA6018 || + target_type == TARGET_TYPE_QCA6390) + swar->fftbin_size_war = SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE; + else + swar->fftbin_size_war = SPECTRAL_FFTBIN_SIZE_NO_WAR; + + if (target_type == TARGET_TYPE_QCA8074 || + target_type == TARGET_TYPE_QCA8074V2 || + target_type == TARGET_TYPE_QCA6018 || + target_type == TARGET_TYPE_QCN9000) { + swar->inband_fftbin_size_adj = 1; + swar->null_fftbin_adj = 1; + } else { + swar->inband_fftbin_size_adj = 0; + swar->null_fftbin_adj = 0; + } + + if (target_type == TARGET_TYPE_QCA8074V2) + swar->packmode_fftbin_size_adj = 1; + else + swar->packmode_fftbin_size_adj = 0; +} + +/** + * target_if_spectral_report_params_init() - Initialize parameters which + * describes the structure of Spectral reports + * + * @rparams: Pointer to Spectral report parameter object + * @target_type: target type + * + * Function to Initialize parameters related to the structure of Spectral + * reports. + * + * Return: void + */ +static void +target_if_spectral_report_params_init( + struct spectral_report_params *rparams, + uint32_t target_type) +{ + /* This entries are currently used by gen3 chipsets only. Hence + * initialization is done for gen3 alone. In future if other generations + * needs to use them they have to add proper initial values. + */ + if (target_type == TARGET_TYPE_QCN9000) + rparams->version = SPECTRAL_REPORT_FORMAT_VERSION_2; + else + rparams->version = SPECTRAL_REPORT_FORMAT_VERSION_1; + + switch (rparams->version) { + case SPECTRAL_REPORT_FORMAT_VERSION_1: + rparams->ssumaary_padding_bytes = + NUM_PADDING_BYTES_SSCAN_SUMARY_REPORT_GEN3_V1; + rparams->fft_report_hdr_len = + FFT_REPORT_HEADER_LENGTH_GEN3_V1; + break; + case SPECTRAL_REPORT_FORMAT_VERSION_2: + rparams->ssumaary_padding_bytes = + NUM_PADDING_BYTES_SSCAN_SUMARY_REPORT_GEN3_V2; + rparams->fft_report_hdr_len = + FFT_REPORT_HEADER_LENGTH_GEN3_V2; + break; + default: + qdf_assert_always(0); + } +} + +/** + * target_if_spectral_timestamp_war_init() - Initialize Spectral timestamp WAR + * related info + * @twar: Pointer to Spectral timstamp WAR related info + * + * Function to Initialize parameters related to Spectral timestamp WAR + * + * Return: void + */ +static void +target_if_spectral_timestamp_war_init(struct spectral_timestamp_war *twar) +{ + enum spectral_scan_mode smode; + + smode = SPECTRAL_SCAN_MODE_NORMAL; + for (; smode < SPECTRAL_SCAN_MODE_MAX; smode++) { + twar->last_fft_timestamp[smode] = 0; + twar->timestamp_war_offset[smode] = 0; + } + twar->target_reset_count = 0; +} + +/** + * target_if_pdev_spectral_init() - Initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Function to initialize pointer to spectral target_if internal private data + * + * Return: On success, pointer to Spectral target_if internal private data, on + * failure, NULL + */ +void * +target_if_pdev_spectral_init(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + uint32_t target_type; + uint32_t target_revision; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_target_tx_ops *tx_ops; + enum spectral_scan_mode smode = SPECTRAL_SCAN_MODE_NORMAL; + QDF_STATUS status; + + if (!pdev) { + spectral_err("SPECTRAL: pdev is NULL!"); + return NULL; + } + spectral = (struct target_if_spectral *)qdf_mem_malloc( + sizeof(struct target_if_spectral)); + if (!spectral) + return spectral; + + qdf_mem_zero(spectral, sizeof(struct target_if_spectral)); + /* Store pdev in Spectral */ + spectral->pdev_obj = pdev; + + psoc = wlan_pdev_get_psoc(pdev); + + tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (tx_ops->tgt_get_tgt_type) { + target_type = tx_ops->tgt_get_tgt_type(psoc); + } else { + qdf_mem_free(spectral); + return NULL; + } + + if (tx_ops->tgt_get_tgt_revision) { + target_revision = tx_ops->tgt_get_tgt_revision(psoc); + } else { + qdf_mem_free(spectral); + return NULL; + } + + /* init the function ptr table */ + target_if_spectral_init_dummy_function_table(spectral); + + /* get spectral function table */ + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + /* TODO : Should this be called here of after ath_attach ? */ + if (p_sops->get_capability(spectral, SPECTRAL_CAP_PHYDIAG)) + spectral_info("HAL_CAP_PHYDIAG : Capable"); + + /* TODO: Need to fix the capablity check for RADAR */ + if (p_sops->get_capability(spectral, SPECTRAL_CAP_RADAR)) + spectral_info("HAL_CAP_RADAR : Capable"); + + /* TODO : Need to fix the capablity check for SPECTRAL */ + /* TODO : Should this be called here of after ath_attach ? */ + if (p_sops->get_capability(spectral, SPECTRAL_CAP_SPECTRAL_SCAN)) + spectral_info("HAL_CAP_SPECTRAL_SCAN : Capable"); + + qdf_spinlock_create(&spectral->spectral_lock); + qdf_spinlock_create(&spectral->noise_pwr_reports_lock); + target_if_spectral_clear_stats(spectral); + + if (target_type == TARGET_TYPE_QCA8074 || + target_type == TARGET_TYPE_QCA8074V2 || + target_type == TARGET_TYPE_QCA6018 || + target_type == TARGET_TYPE_QCA6390 || + target_type == TARGET_TYPE_QCN9000) + spectral->direct_dma_support = true; + + target_if_spectral_len_adj_swar_init(&spectral->len_adj_swar, + target_type); + target_if_spectral_report_params_init(&spectral->rparams, target_type); + + if ((target_type == TARGET_TYPE_QCA8074) || + (target_type == TARGET_TYPE_QCA8074V2) || + (target_type == TARGET_TYPE_QCA6018) || + (target_type == TARGET_TYPE_QCN9000) || + (target_type == TARGET_TYPE_QCA6290) || + (target_type == TARGET_TYPE_QCA6390)) { + spectral->spectral_gen = SPECTRAL_GEN3; + spectral->hdr_sig_exp = SPECTRAL_PHYERR_SIGNATURE_GEN3; + spectral->tag_sscan_summary_exp = + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3; + spectral->tag_sscan_fft_exp = TLV_TAG_SEARCH_FFT_REPORT_GEN3; + spectral->tlvhdr_size = SPECTRAL_PHYERR_TLVSIZE_GEN3; + } else { + spectral->spectral_gen = SPECTRAL_GEN2; + spectral->hdr_sig_exp = SPECTRAL_PHYERR_SIGNATURE_GEN2; + spectral->tag_sscan_summary_exp = + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN2; + spectral->tag_sscan_fft_exp = TLV_TAG_SEARCH_FFT_REPORT_GEN2; + spectral->tlvhdr_size = sizeof(struct spectral_phyerr_tlv_gen2); + } + + status = target_if_init_spectral_param_min_max( + &spectral->param_min_max, + spectral->spectral_gen, target_type); + if (QDF_IS_STATUS_ERROR(status)) { + spectral_err("Failed to initialize parameter min max values"); + goto fail; + } + + target_if_init_spectral_param_properties(spectral); + /* Init spectral capability */ + if (target_if_init_spectral_capability(spectral) != + QDF_STATUS_SUCCESS) { + qdf_mem_free(spectral); + return NULL; + } + if (target_if_spectral_attach_simulation(spectral) < 0) + return NULL; + + target_if_init_spectral_ops(spectral); + target_if_spectral_timestamp_war_init(&spectral->timestamp_war); + + /* Spectral mode specific init */ + for (; smode < SPECTRAL_SCAN_MODE_MAX; smode++) { + spectral->params_valid[smode] = false; + qdf_spinlock_create(&spectral->param_info[smode].osps_lock); + spectral->param_info[smode].osps_cache.osc_is_valid = 0; + } + + target_if_spectral_register_funcs(spectral, &spectral_ops); + + if (target_if_spectral_check_hw_capability(spectral) == false) { + goto fail; + } else { + /* + * TODO: Once the driver architecture transitions to chipset + * versioning based checks, reflect this here. + */ + spectral->is_160_format = false; + spectral->is_lb_edge_extrabins_format = false; + spectral->is_rb_edge_extrabins_format = false; + + if (target_type == TARGET_TYPE_QCA9984 || + target_type == TARGET_TYPE_QCA9888) { + spectral->is_160_format = true; + spectral->is_lb_edge_extrabins_format = true; + spectral->is_rb_edge_extrabins_format = true; + } else if ((target_type == TARGET_TYPE_AR900B) && + (target_revision == AR900B_REV_2)) { + spectral->is_rb_edge_extrabins_format = true; + } + + if (target_type == TARGET_TYPE_QCA9984 || + target_type == TARGET_TYPE_QCA9888) + spectral->is_sec80_rssi_war_required = true; + + spectral->use_nl_bcast = SPECTRAL_USE_NL_BCAST; + + if (spectral->spectral_gen == SPECTRAL_GEN3) + init_160mhz_delivery_state_machine(spectral); + } + + return spectral; + +fail: + target_if_spectral_detach(spectral); + return NULL; +} + +/** + * target_if_pdev_spectral_deinit() - De-initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Function to de-initialize pointer to spectral target_if internal private data + * + * Return: None + */ +void +target_if_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return; + } + target_if_spectral_detach(spectral); + + return; +} + +/* target_if_spectral_find_agile_width() - Given a channel width enum, find the + * corresponding translation for Agile channel width. + * Translation schema of different operating modes: + * 20 -> 20, 40 -> 40, (80 & 160 & 80_80) -> 80. + * @chwidth: Channel width enum. + * + * Return: The translated channel width enum. + */ +static enum phy_ch_width +target_if_spectral_find_agile_width(enum phy_ch_width chwidth) +{ + switch (chwidth) { + case CH_WIDTH_20MHZ: + return CH_WIDTH_20MHZ; + case CH_WIDTH_40MHZ: + return CH_WIDTH_40MHZ; + case CH_WIDTH_80MHZ: + case CH_WIDTH_80P80MHZ: + case CH_WIDTH_160MHZ: + return CH_WIDTH_80MHZ; + default: + spectral_err("Invalid chwidth enum %d", chwidth); + return CH_WIDTH_INVALID; + } +} + +/** + * target_if_calculate_center_freq() - Helper routine to + * check whether given frequency is center frequency of a + * WLAN channel + * + * @spectral: Pointer to Spectral object + * @chan_freq: Center frequency of a WLAN channel + * @is_valid: Indicates whether given frequency is valid + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_is_center_freq_of_any_chan(struct wlan_objmgr_pdev *pdev, + uint32_t chan_freq, + bool *is_valid) +{ + struct regulatory_channel *cur_chan_list; + int i; + + if (!pdev) { + spectral_err("pdev object is null"); + return QDF_STATUS_E_FAILURE; + } + + if (!is_valid) { + spectral_err("is valid argument is null"); + return QDF_STATUS_E_FAILURE; + } + + cur_chan_list = qdf_mem_malloc(NUM_CHANNELS * sizeof(*cur_chan_list)); + if (!cur_chan_list) + return QDF_STATUS_E_FAILURE; + + if (wlan_reg_get_current_chan_list( + pdev, cur_chan_list) != QDF_STATUS_SUCCESS) { + spectral_err("Failed to get cur_chan list"); + qdf_mem_free(cur_chan_list); + return QDF_STATUS_E_FAILURE; + } + + *is_valid = false; + for (i = 0; i < NUM_CHANNELS; i++) { + uint32_t flags; + uint32_t center_freq; + + flags = cur_chan_list[i].chan_flags; + center_freq = cur_chan_list[i].center_freq; + + if (!(flags & REGULATORY_CHAN_DISABLED) && + (center_freq == chan_freq)) { + *is_valid = true; + break; + } + } + + qdf_mem_free(cur_chan_list); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_calculate_center_freq() - Helper routine to + * find the center frequency of the agile span from a + * WLAN channel center frequency + * + * @spectral: Pointer to Spectral object + * @chan_freq: Center frequency of a WLAN channel + * @center_freq: Pointer to center frequency + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_calculate_center_freq(struct target_if_spectral *spectral, + uint16_t chan_freq, + uint16_t *center_freq) +{ + struct wlan_objmgr_vdev *vdev; + enum phy_ch_width ch_width; + enum phy_ch_width agile_ch_width; + + if (!spectral) { + spectral_err("spectral target if object is null"); + return QDF_STATUS_E_FAILURE; + } + + if (!center_freq) { + spectral_err("center_freq argument is null"); + return QDF_STATUS_E_FAILURE; + } + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + ch_width = target_if_vdev_get_ch_width(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + agile_ch_width = target_if_spectral_find_agile_width(ch_width); + + if (agile_ch_width == CH_WIDTH_20MHZ) { + *center_freq = chan_freq; + } else { + uint16_t start_freq; + uint16_t end_freq; + const struct bonded_channel_freq *bonded_chan_ptr = NULL; + + wlan_reg_get_5g_bonded_channel_and_state_for_freq + (spectral->pdev_obj, chan_freq, agile_ch_width, + &bonded_chan_ptr); + if (!bonded_chan_ptr) { + spectral_err("Bonded channel is not found"); + return QDF_STATUS_E_FAILURE; + } + start_freq = bonded_chan_ptr->start_freq; + end_freq = bonded_chan_ptr->end_freq; + *center_freq = (start_freq + end_freq) >> 1; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_validate_center_freq() - Helper routine to + * validate user provided agile center frequency + * + * @spectral: Pointer to Spectral object + * @center_freq: User provided agile span center frequency + * @is_valid: Indicates whether agile span center frequency is valid + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_validate_center_freq(struct target_if_spectral *spectral, + uint16_t center_freq, + bool *is_valid) +{ + struct wlan_objmgr_vdev *vdev; + enum phy_ch_width ch_width; + enum phy_ch_width agile_ch_width; + struct wlan_objmgr_pdev *pdev; + QDF_STATUS status; + + if (!spectral) { + spectral_err("spectral target if object is null"); + return QDF_STATUS_E_FAILURE; + } + + if (!is_valid) { + spectral_err("is_valid argument is null"); + return QDF_STATUS_E_FAILURE; + } + + pdev = spectral->pdev_obj; + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + ch_width = target_if_vdev_get_ch_width(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + agile_ch_width = target_if_spectral_find_agile_width(ch_width); + + if (agile_ch_width == CH_WIDTH_20MHZ) { + status = target_if_is_center_freq_of_any_chan + (pdev, center_freq, is_valid); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + } else { + uint16_t start_freq; + uint16_t end_freq; + const struct bonded_channel_freq *bonded_chan_ptr = NULL; + bool is_chan; + + status = target_if_is_center_freq_of_any_chan + (pdev, center_freq + FREQ_OFFSET_10MHZ, + &is_chan); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + if (is_chan) { + uint32_t calulated_center_freq; + + wlan_reg_get_5g_bonded_channel_and_state_for_freq + (pdev, center_freq + FREQ_OFFSET_10MHZ, + agile_ch_width, + &bonded_chan_ptr); + if (!bonded_chan_ptr) { + spectral_err("Bonded channel is not found"); + return QDF_STATUS_E_FAILURE; + } + start_freq = bonded_chan_ptr->start_freq; + end_freq = bonded_chan_ptr->end_freq; + calulated_center_freq = (start_freq + end_freq) >> 1; + *is_valid = (center_freq == calulated_center_freq); + } else { + *is_valid = false; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_is_agile_span_overlap_with_operating_span() - Helper routine to + * check whether agile span overlaps with current operating band. + * + * @spectral: Pointer to Spectral object + * @ss_frequency: Agile span center frequency + * @is_overlapping: Indicates whether Agile span overlaps with operating span + * + * Helper routine to check whether agile span overlaps with current + * operating band. + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_is_agile_span_overlap_with_operating_span + (struct target_if_spectral *spectral, + uint32_t ss_frequency, + bool *is_overlapping) +{ + enum phy_ch_width ch_width; + enum phy_ch_width agile_ch_width; + const struct bonded_channel_freq *bonded_chan_ptr = NULL; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + int16_t chan_freq; + uint32_t op_start_freq; + uint32_t op_end_freq; + uint32_t agile_start_freq; + uint32_t agile_end_freq; + uint32_t cfreq2; + + if (!spectral) { + spectral_err("Spectral object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev = spectral->pdev_obj; + if (!pdev) { + spectral_err("pdev object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!is_overlapping) { + spectral_err("Argument(is_overlapping) is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + ch_width = target_if_vdev_get_ch_width(vdev); + chan_freq = target_if_vdev_get_chan_freq(vdev); + cfreq2 = target_if_vdev_get_chan_freq_seg2(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + if (cfreq2 < 0) + return QDF_STATUS_E_FAILURE; + + if (ch_width == CH_WIDTH_20MHZ) { + op_start_freq = chan_freq - FREQ_OFFSET_10MHZ; + op_end_freq = chan_freq + FREQ_OFFSET_10MHZ; + } else { + wlan_reg_get_5g_bonded_channel_and_state_for_freq + (pdev, chan_freq, ch_width, &bonded_chan_ptr); + if (!bonded_chan_ptr) { + spectral_err("Bonded channel is not found"); + return QDF_STATUS_E_FAILURE; + } + op_start_freq = bonded_chan_ptr->start_freq - FREQ_OFFSET_10MHZ; + op_end_freq = bonded_chan_ptr->end_freq - FREQ_OFFSET_10MHZ; + } + + agile_ch_width = target_if_spectral_find_agile_width(ch_width); + if (agile_ch_width == CH_WIDTH_INVALID) + return QDF_STATUS_E_FAILURE; + agile_start_freq = ss_frequency - + (wlan_reg_get_bw_value(agile_ch_width) >> 1); + agile_end_freq = ss_frequency + + (wlan_reg_get_bw_value(agile_ch_width) >> 1); + if (agile_end_freq <= op_start_freq || op_end_freq <= agile_start_freq) + *is_overlapping = false; + else + *is_overlapping = true; + + /* Use non zero cfreq2 to identify 80p80 */ + if (cfreq2) { + uint32_t sec80_start_feq; + uint32_t sec80_end_freq; + + sec80_start_feq = cfreq2 - 40; + sec80_end_freq = cfreq2 + 40; + + if ((agile_end_freq > sec80_start_feq) && + (sec80_end_freq > agile_start_freq)) + *is_overlapping = true; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_spectral_populate_chwidth() - Helper routine to + * populate channel width for different Spectral modes + * + * @spectral: Pointer to Spectral object + * + * Helper routine to populate channel width for different Spectral modes + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_spectral_populate_chwidth(struct target_if_spectral *spectral) { + struct wlan_objmgr_vdev *vdev; + enum phy_ch_width vdev_ch_with; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_err("vdev is null"); + return QDF_STATUS_E_FAILURE; + } + + vdev_ch_with = target_if_vdev_get_ch_width(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + if (vdev_ch_with == CH_WIDTH_INVALID) { + spectral_err("Invalid channel width %d", vdev_ch_with); + return QDF_STATUS_E_FAILURE; + } + spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] = vdev_ch_with; + spectral->ch_width[SPECTRAL_SCAN_MODE_AGILE] = + target_if_spectral_find_agile_width(vdev_ch_with); + + return QDF_STATUS_SUCCESS; +} + +/** + * _target_if_set_spectral_config() - Set spectral config + * @spectral: Pointer to spectral object + * @threshtype: config type + * @value: config value + * @smode: Spectral scan mode + * @err: Spectral error code + * + * API to set spectral configurations + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS +_target_if_set_spectral_config(struct target_if_spectral *spectral, + const uint32_t threshtype, const uint32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct spectral_config params; + struct target_if_spectral_ops *p_sops; + struct spectral_config *sparams; + QDF_STATUS status; + bool is_overlapping; + uint16_t agile_cfreq; + bool is_valid_chan; + struct spectral_param_min_max *param_min_max; + + if (!err) { + spectral_err("Error code argument is null"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + *err = SPECTRAL_SCAN_ERR_INVALID; + + if (!spectral) { + spectral_err("spectral object is NULL"); + return QDF_STATUS_E_FAILURE; + } + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + param_min_max = &spectral->param_min_max; + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + + sparams = &spectral->params[smode]; + + if (!spectral->params_valid[smode]) { + target_if_spectral_info_read(spectral, + smode, + TARGET_IF_SPECTRAL_INFO_PARAMS, + &spectral->params[smode], + sizeof(spectral->params[smode])); + spectral->params_valid[smode] = true; + } + + switch (threshtype) { + case SPECTRAL_PARAM_FFT_PERIOD: + sparams->ss_fft_period = value; + break; + case SPECTRAL_PARAM_SCAN_PERIOD: + sparams->ss_period = value; + break; + case SPECTRAL_PARAM_SCAN_COUNT: + sparams->ss_count = value; + break; + case SPECTRAL_PARAM_SHORT_REPORT: + sparams->ss_short_report = (!!value) ? true : false; + break; + case SPECTRAL_PARAM_SPECT_PRI: + sparams->ss_spectral_pri = (!!value) ? true : false; + break; + case SPECTRAL_PARAM_FFT_SIZE: + status = target_if_spectral_populate_chwidth(spectral); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + if ((value < param_min_max->fft_size_min) || + (value > param_min_max->fft_size_max + [spectral->ch_width[smode]])) { + *err = SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + return QDF_STATUS_E_FAILURE; + } + sparams->ss_fft_size = value; + break; + case SPECTRAL_PARAM_GC_ENA: + sparams->ss_gc_ena = !!value; + break; + case SPECTRAL_PARAM_RESTART_ENA: + sparams->ss_restart_ena = !!value; + break; + case SPECTRAL_PARAM_NOISE_FLOOR_REF: + sparams->ss_noise_floor_ref = value; + break; + case SPECTRAL_PARAM_INIT_DELAY: + sparams->ss_init_delay = value; + break; + case SPECTRAL_PARAM_NB_TONE_THR: + sparams->ss_nb_tone_thr = value; + break; + case SPECTRAL_PARAM_STR_BIN_THR: + sparams->ss_str_bin_thr = value; + break; + case SPECTRAL_PARAM_WB_RPT_MODE: + sparams->ss_wb_rpt_mode = !!value; + break; + case SPECTRAL_PARAM_RSSI_RPT_MODE: + sparams->ss_rssi_rpt_mode = !!value; + break; + case SPECTRAL_PARAM_RSSI_THR: + sparams->ss_rssi_thr = value; + break; + case SPECTRAL_PARAM_PWR_FORMAT: + sparams->ss_pwr_format = !!value; + break; + case SPECTRAL_PARAM_RPT_MODE: + if ((value < SPECTRAL_PARAM_RPT_MODE_MIN) || + (value > SPECTRAL_PARAM_RPT_MODE_MAX)) { + *err = SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + return QDF_STATUS_E_FAILURE; + } + sparams->ss_rpt_mode = value; + break; + case SPECTRAL_PARAM_BIN_SCALE: + sparams->ss_bin_scale = value; + break; + case SPECTRAL_PARAM_DBM_ADJ: + sparams->ss_dbm_adj = !!value; + break; + case SPECTRAL_PARAM_CHN_MASK: + sparams->ss_chn_mask = value; + break; + case SPECTRAL_PARAM_FREQUENCY: + status = target_if_is_center_freq_of_any_chan + (spectral->pdev_obj, value, &is_valid_chan); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + if (is_valid_chan) { + status = target_if_calculate_center_freq(spectral, + value, + &agile_cfreq); + if (QDF_IS_STATUS_ERROR(status)) { + *err = SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + return QDF_STATUS_E_FAILURE; + } + } else { + bool is_valid_agile_cfreq; + + status = target_if_validate_center_freq + (spectral, value, &is_valid_agile_cfreq); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + if (!is_valid_agile_cfreq) { + *err = SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + spectral_err("Invalid agile center frequency"); + return QDF_STATUS_E_FAILURE; + } + + agile_cfreq = value; + } + + status = target_if_is_agile_span_overlap_with_operating_span + (spectral, agile_cfreq, &is_overlapping); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + if (is_overlapping) { + spectral_err("Agile span overlapping with current BW"); + *err = SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + return QDF_STATUS_E_FAILURE; + } + sparams->ss_frequency = agile_cfreq; + break; + } + + p_sops->configure_spectral(spectral, sparams, smode); + /* only to validate the writes */ + p_sops->get_spectral_config(spectral, ¶ms, smode); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const uint32_t threshtype, const uint32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + enum spectral_scan_mode mode = SPECTRAL_SCAN_MODE_NORMAL; + struct target_if_spectral *spectral; + QDF_STATUS status; + + if (!err) { + spectral_err("Error code argument is null"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + *err = SPECTRAL_SCAN_ERR_INVALID; + + if (!pdev) { + spectral_err("pdev object is NULL"); + return QDF_STATUS_E_FAILURE; + } + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("spectral object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + + if (!spectral->properties[smode][threshtype].supported) { + spectral_err("Spectral parameter(%u) unsupported for mode %u", + threshtype, smode); + *err = SPECTRAL_SCAN_ERR_PARAM_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + + if (spectral->properties[smode][threshtype].common_all_modes) { + spectral_warn("Setting Spectral parameter %u for all modes", + threshtype); + for (; mode < SPECTRAL_SCAN_MODE_MAX; mode++) { + status = _target_if_set_spectral_config + (spectral, threshtype, value, + mode, err); + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; + } + + return _target_if_set_spectral_config(spectral, threshtype, + value, smode, err); +} + +/** + * target_if_get_fft_bin_count() - Get fft bin count for a given fft length + * @fft_len: FFT length + * @pdev: Pointer to pdev object + * + * API to get fft bin count for a given fft length + * + * Return: FFt bin count + */ +static int +target_if_get_fft_bin_count(int fft_len) +{ + int bin_count = 0; + + switch (fft_len) { + case 5: + bin_count = 16; + break; + case 6: + bin_count = 32; + break; + case 7: + bin_count = 64; + break; + case 8: + bin_count = 128; + break; + case 9: + bin_count = 256; + break; + default: + break; + } + + return bin_count; +} + +/** + * target_if_init_upper_lower_flags() - Initializes control and extension + * segment flags + * @fft_len: FFT length + * @pdev: Pointer to pdev object + * + * API to initialize the control and extension flags with the lower/upper + * segment based on the HT mode + * + * Return: FFt bin count + */ +static void +target_if_init_upper_lower_flags(struct target_if_spectral *spectral) +{ + int current_channel = 0; + int ext_channel = 0; + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + current_channel = p_sops->get_current_channel(spectral); + ext_channel = p_sops->get_extension_channel(spectral); + + if ((current_channel == 0) || (ext_channel == 0)) + return; + + if (spectral->sc_spectral_20_40_mode) { + /* HT40 mode */ + if (ext_channel < current_channel) { + spectral->lower_is_extension = 1; + spectral->upper_is_control = 1; + spectral->lower_is_control = 0; + spectral->upper_is_extension = 0; + } else { + spectral->lower_is_extension = 0; + spectral->upper_is_control = 0; + spectral->lower_is_control = 1; + spectral->upper_is_extension = 1; + } + } else { + /* HT20 mode, lower is always control */ + spectral->lower_is_extension = 0; + spectral->upper_is_control = 0; + spectral->lower_is_control = 1; + spectral->upper_is_extension = 0; + } +} + +/** + * target_if_get_spectral_config() - Get spectral configuration + * @pdev: Pointer to pdev object + * @param: Pointer to spectral_config structure in which the configuration + * should be returned + * @smode: Spectral scan mode + * + * API to get the current spectral configuration + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS +target_if_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *param, + enum spectral_scan_mode smode) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return QDF_STATUS_E_FAILURE; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + if (!p_sops) { + spectral_err("p_sops is null"); + return QDF_STATUS_E_FAILURE; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_zero(param, sizeof(struct spectral_config)); + p_sops->get_spectral_config(spectral, param, smode); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_spectral_scan_enable_params() - Enable use of desired Spectral + * parameters + * @spectral: Pointer to Spectral target_if internal private data + * @spectral_params: Pointer to Spectral parameters + * @smode: Spectral scan mode + * @err: Spectral error code + * + * Enable use of desired Spectral parameters by configuring them into HW, and + * starting Spectral scan + * + * Return: 0 on success, 1 on failure + */ +int +target_if_spectral_scan_enable_params(struct target_if_spectral *spectral, + struct spectral_config *spectral_params, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + int extension_channel = 0; + int current_channel = 0; + struct target_if_spectral_ops *p_sops = NULL; + QDF_STATUS status; + + if (!spectral) { + spectral_err("Spectral LMAC object is NULL"); + return 1; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return 1; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + if (!p_sops) { + spectral_err("p_sops is NULL"); + return 1; + } + + spectral->sc_spectral_noise_pwr_cal = + spectral_params->ss_spectral_pri ? 1 : 0; + + /* check if extension channel is present */ + extension_channel = p_sops->get_extension_channel(spectral); + current_channel = p_sops->get_current_channel(spectral); + + status = target_if_spectral_populate_chwidth(spectral); + if (QDF_IS_STATUS_ERROR(status)) { + spectral_err("Failed to get channel widths"); + return 1; + } + + if (spectral->capability.advncd_spectral_cap) { + spectral->lb_edge_extrabins = 0; + spectral->rb_edge_extrabins = 0; + + if (spectral->is_lb_edge_extrabins_format && + spectral->params[smode].ss_rpt_mode == 2) { + spectral->lb_edge_extrabins = 4; + } + + if (spectral->is_rb_edge_extrabins_format && + spectral->params[smode].ss_rpt_mode == 2) { + spectral->rb_edge_extrabins = 4; + } + + if (spectral->ch_width[smode] == CH_WIDTH_20MHZ) { + spectral->sc_spectral_20_40_mode = 0; + + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + } else if (spectral->ch_width[smode] == CH_WIDTH_40MHZ) { + /* TODO : Remove this variable */ + spectral->sc_spectral_20_40_mode = 1; + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + + } else if (spectral->ch_width[smode] == CH_WIDTH_80MHZ) { + /* Set the FFT Size */ + /* TODO : Remove this variable */ + spectral->sc_spectral_20_40_mode = 0; + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + + } else if (spectral->ch_width[smode] == CH_WIDTH_160MHZ) { + /* Set the FFT Size */ + + /* The below applies to both 160 and 80+80 cases */ + + /* TODO : Remove this variable */ + spectral->sc_spectral_20_40_mode = 0; + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params[smode].ss_fft_size); + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + } + + if (spectral->spectral_numbins) { + spectral->spectral_numbins += + spectral->lb_edge_extrabins; + spectral->spectral_numbins += + spectral->rb_edge_extrabins; + } + + if (spectral->spectral_fft_len) { + spectral->spectral_fft_len += + spectral->lb_edge_extrabins; + spectral->spectral_fft_len += + spectral->rb_edge_extrabins; + } + + if (spectral->spectral_data_len) { + spectral->spectral_data_len += + spectral->lb_edge_extrabins; + spectral->spectral_data_len += + spectral->rb_edge_extrabins; + } + } else { + /* + * The decision to find 20/40 mode is found based on the + * presence of extension channel + * instead of channel width, as the channel width can + * dynamically change + */ + + if (extension_channel == 0) { + spectral->spectral_numbins = SPECTRAL_HT20_NUM_BINS; + spectral->spectral_dc_index = SPECTRAL_HT20_DC_INDEX; + spectral->spectral_fft_len = SPECTRAL_HT20_FFT_LEN; + spectral->spectral_data_len = + SPECTRAL_HT20_TOTAL_DATA_LEN; + /* only valid in 20-40 mode */ + spectral->spectral_lower_max_index_offset = -1; + /* only valid in 20-40 mode */ + spectral->spectral_upper_max_index_offset = -1; + spectral->spectral_max_index_offset = + spectral->spectral_fft_len + 2; + spectral->sc_spectral_20_40_mode = 0; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + } else { + spectral->spectral_numbins = + SPECTRAL_HT40_TOTAL_NUM_BINS; + spectral->spectral_fft_len = SPECTRAL_HT40_FFT_LEN; + spectral->spectral_data_len = + SPECTRAL_HT40_TOTAL_DATA_LEN; + spectral->spectral_dc_index = SPECTRAL_HT40_DC_INDEX; + /* only valid in 20 mode */ + spectral->spectral_max_index_offset = -1; + spectral->spectral_lower_max_index_offset = + spectral->spectral_fft_len + 2; + spectral->spectral_upper_max_index_offset = + spectral->spectral_fft_len + 5; + spectral->sc_spectral_20_40_mode = 1; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + } + } + + spectral->send_single_packet = 0; + spectral->classifier_params.spectral_20_40_mode = + spectral->sc_spectral_20_40_mode; + spectral->classifier_params.spectral_dc_index = + spectral->spectral_dc_index; + spectral->spectral_sent_msg = 0; + spectral->classify_scan = 0; + spectral->num_spectral_data = 0; + + if (!p_sops->is_spectral_active(spectral, smode)) { + p_sops->configure_spectral(spectral, spectral_params, smode); + p_sops->start_spectral_scan(spectral, smode, err); + spectral->timestamp_war.timestamp_war_offset[smode] = 0; + spectral->timestamp_war.last_fft_timestamp[smode] = 0; + } + + /* get current spectral configuration */ + p_sops->get_spectral_config(spectral, &spectral->params[smode], smode); + + target_if_init_upper_lower_flags(spectral); + + return 0; +} + +/** + * target_if_is_aspectral_prohibited_by_adfs() - Is Agile Spectral prohibited by + * Agile DFS + * @psoc: Pointer to psoc + * @object: Pointer to pdev + * @arg: Pointer to flag which indicates whether Agile Spectral is prohibited + * + * This API checks whether Agile DFS is running on any of the pdevs. If so, it + * indicates that Agile Spectral scan is prohibited by Agile DFS. + * + * Return: void + */ +static void +target_if_is_aspectral_prohibited_by_adfs(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + bool *is_aspectral_prohibited = arg; + struct wlan_objmgr_pdev *cur_pdev = object; + bool is_agile_dfs_enabled_cur_pdev = false; + QDF_STATUS status; + + qdf_assert_always(is_aspectral_prohibited); + if (*is_aspectral_prohibited) + return; + + qdf_assert_always(psoc); + qdf_assert_always(cur_pdev); + + status = ucfg_dfs_get_agile_precac_enable + (cur_pdev, + &is_agile_dfs_enabled_cur_pdev); + if (QDF_IS_STATUS_ERROR(status)) { + spectral_err("Get agile precac failed, prohibiting aSpectral"); + *is_aspectral_prohibited = true; + return; + } + + if (is_agile_dfs_enabled_cur_pdev) { + spectral_err("aDFS is in progress on one of the pdevs"); + *is_aspectral_prohibited = true; + } +} + +/** + * target_if_get_curr_band() - Get current operating band of pdev + * + * @spectral: pointer to spectral object + * + * API to get current operating band of a given pdev. + * + * Return: if success enum reg_wifi_band, REG_BAND_UNKNOWN in case of failure + */ +static enum reg_wifi_band +target_if_get_curr_band(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_vdev *vdev; + int16_t chan_freq; + enum reg_wifi_band cur_band; + + if (!pdev) { + spectral_err("pdev is NULL"); + return REG_BAND_UNKNOWN; + } + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_SPECTRAL_ID); + if (!vdev) { + spectral_debug("vdev is NULL"); + return REG_BAND_UNKNOWN; + } + chan_freq = target_if_vdev_get_chan_freq(vdev); + cur_band = wlan_reg_freq_to_band(chan_freq); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + return cur_band; +} + +/** + * target_if_is_agile_scan_active_in_5g() - Is Agile Spectral scan active on + * any of the 5G pdevs + * @psoc: Pointer to psoc + * @object: Pointer to pdev + * @arg: Pointer to flag which indicates whether Agile Spectral scan is in + * progress in any 5G pdevs + * + * Return: void + */ +static void +target_if_is_agile_scan_active_in_5g(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + enum reg_wifi_band band; + bool *is_agile_scan_inprog_5g_pdev = arg; + struct target_if_spectral *spectral; + struct wlan_objmgr_pdev *cur_pdev = object; + struct target_if_spectral_ops *p_sops; + + if (*is_agile_scan_inprog_5g_pdev) + return; + + spectral = get_target_if_spectral_handle_from_pdev(cur_pdev); + if (!spectral) { + spectral_err("target if spectral handle is NULL"); + return; + } + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + band = target_if_get_curr_band(cur_pdev); + if (band == REG_BAND_UNKNOWN) { + spectral_debug("Failed to get current band"); + return; + } + + if (band == REG_BAND_5G && + p_sops->is_spectral_active(spectral, SPECTRAL_SCAN_MODE_AGILE)) + *is_agile_scan_inprog_5g_pdev = true; +} + +/** + * target_if_is_agile_supported_cur_chmask() - Is Agile Spectral scan supported + * for current vdev rx chainmask. + * + * @spectral: Pointer to Spectral object + * @is_supported: Pointer to is_supported + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS +target_if_is_agile_supported_cur_chmask(struct target_if_spectral *spectral, + bool *is_supported) +{ + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_rxchainmask; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct target_psoc_info *tgt_psoc_info; + struct wlan_psoc_host_service_ext_param *ext_svc_param; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap_arr = NULL; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap = NULL; + struct wlan_psoc_host_chainmask_table *table; + int j; + uint32_t table_id; + enum phy_ch_width ch_width; + uint8_t pdev_id; + + if (!spectral) { + spectral_err("spectral target if object is null"); + return QDF_STATUS_E_FAILURE; + } + + if (!is_supported) { + spectral_err("is supported argument is null"); + return QDF_STATUS_E_FAILURE; + } + + if (spectral->spectral_gen <= SPECTRAL_GEN2) { + spectral_err("HW Agile mode is not supported up to gen 2"); + return QDF_STATUS_E_FAILURE; + } + + pdev = spectral->pdev_obj; + if (!pdev) { + spectral_err("pdev is null"); + return QDF_STATUS_E_FAILURE; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_err("First vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_rxchainmask = wlan_vdev_mlme_get_rxchainmask(vdev); + if (!vdev_rxchainmask) { + spectral_err("vdev rx chainmask is zero"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + return QDF_STATUS_E_FAILURE; + } + + ch_width = target_if_vdev_get_ch_width(vdev); + if (ch_width == CH_WIDTH_INVALID) { + spectral_err("Invalid channel width"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + spectral_err("target_psoc_info is null"); + return QDF_STATUS_E_FAILURE; + } + + ext_svc_param = target_psoc_get_service_ext_param(tgt_psoc_info); + if (!ext_svc_param) { + spectral_err("Extended service ready param null"); + return QDF_STATUS_E_FAILURE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + mac_phy_cap_arr = target_psoc_get_mac_phy_cap(tgt_psoc_info); + if (!mac_phy_cap_arr) { + spectral_err("mac phy cap array is null"); + return QDF_STATUS_E_FAILURE; + } + + mac_phy_cap = &mac_phy_cap_arr[pdev_id]; + if (!mac_phy_cap) { + spectral_err("mac phy cap is null"); + return QDF_STATUS_E_FAILURE; + } + + table_id = mac_phy_cap->chainmask_table_id; + table = &ext_svc_param->chainmask_table[table_id]; + if (!table) { + spectral_err("chainmask table not found"); + return QDF_STATUS_E_FAILURE; + } + + for (j = 0; j < table->num_valid_chainmasks; j++) { + if (table->cap_list[j].chainmask == vdev_rxchainmask) { + if (ch_width <= CH_WIDTH_80MHZ) + *is_supported = + table->cap_list[j].supports_aSpectral; + else + *is_supported = + table->cap_list[j].supports_aSpectral_160; + break; + } + } + + if (j == table->num_valid_chainmasks) { + spectral_err("vdev rx chainmask %u not found in table id = %u", + vdev_rxchainmask, table_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_start_spectral_scan(struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct target_if_spectral_ops *p_sops; + struct target_if_spectral *spectral; + struct wlan_objmgr_psoc *psoc; + enum reg_wifi_band band; + + if (!err) { + spectral_err("Error code argument is null"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + *err = SPECTRAL_SCAN_ERR_INVALID; + + if (!pdev) { + spectral_err("pdev object is NUll"); + return QDF_STATUS_E_FAILURE; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + spectral_err("Invalid Spectral mode %u", smode); + return QDF_STATUS_E_FAILURE; + } + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectral LMAC object is NUll"); + return QDF_STATUS_E_FAILURE; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + if (!p_sops) { + spectral_err("p_sops is null"); + return QDF_STATUS_E_FAILURE; + } + + if (smode == SPECTRAL_SCAN_MODE_AGILE) { + QDF_STATUS status; + bool is_supported = false; + + status = target_if_is_agile_supported_cur_chmask(spectral, + &is_supported); + if (QDF_IS_STATUS_ERROR(status)) { + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + + if (!is_supported) { + spectral_err("aSpectral unsupported for cur chainmask"); + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + } + + band = target_if_get_curr_band(spectral->pdev_obj); + if (band == REG_BAND_UNKNOWN) { + spectral_err("Failed to get current band"); + return QDF_STATUS_E_FAILURE; + } + if ((band == REG_BAND_5G) && (smode == SPECTRAL_SCAN_MODE_AGILE)) { + struct target_psoc_info *tgt_hdl; + enum wmi_host_hw_mode_config_type mode; + bool is_agile_scan_inprog_5g_pdev; + + if (p_sops->is_spectral_active(spectral, + SPECTRAL_SCAN_MODE_AGILE)) { + spectral_err("Agile Scan in progress in current pdev"); + return QDF_STATUS_E_FAILURE; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return QDF_STATUS_E_FAILURE; + } + + mode = target_psoc_get_preferred_hw_mode(tgt_hdl); + switch (mode) { + case WMI_HOST_HW_MODE_SBS_PASSIVE: + case WMI_HOST_HW_MODE_SBS: + case WMI_HOST_HW_MODE_DBS_SBS: + case WMI_HOST_HW_MODE_DBS_OR_SBS: + is_agile_scan_inprog_5g_pdev = false; + wlan_objmgr_iterate_obj_list + (psoc, WLAN_PDEV_OP, + target_if_is_agile_scan_active_in_5g, + &is_agile_scan_inprog_5g_pdev, 0, + WLAN_SPECTRAL_ID); + break; + default: + is_agile_scan_inprog_5g_pdev = false; + break; + } + + if (is_agile_scan_inprog_5g_pdev) { + spectral_err("Agile Scan in progress in one of the SBS 5G pdev"); + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + } + + if (smode == SPECTRAL_SCAN_MODE_AGILE) { + bool is_aspectral_prohibited = false; + QDF_STATUS status; + + status = wlan_objmgr_iterate_obj_list + (psoc, WLAN_PDEV_OP, + target_if_is_aspectral_prohibited_by_adfs, + &is_aspectral_prohibited, 0, + WLAN_SPECTRAL_ID); + if (QDF_IS_STATUS_ERROR(status)) { + spectral_err("Failed to iterate over pdevs"); + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + + if (is_aspectral_prohibited) { + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + return QDF_STATUS_E_FAILURE; + } + } + + if (!spectral->params_valid[smode]) { + target_if_spectral_info_read(spectral, + smode, + TARGET_IF_SPECTRAL_INFO_PARAMS, + &spectral->params[smode], + sizeof(spectral->params[smode])); + spectral->params_valid[smode] = true; + } + + qdf_spin_lock(&spectral->spectral_lock); + if (smode == SPECTRAL_SCAN_MODE_AGILE && + !spectral->params[smode].ss_frequency) { + *err = SPECTRAL_SCAN_ERR_PARAM_NOT_INITIALIZED; + qdf_spin_unlock(&spectral->spectral_lock); + return QDF_STATUS_E_FAILURE; + } + + if (smode == SPECTRAL_SCAN_MODE_AGILE) { + QDF_STATUS status; + bool is_overlapping; + + status = target_if_is_agile_span_overlap_with_operating_span + (spectral, + spectral->params[smode].ss_frequency, + &is_overlapping); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_spin_unlock(&spectral->spectral_lock); + return QDF_STATUS_E_FAILURE; + } + + if (is_overlapping) { + *err = SPECTRAL_SCAN_ERR_PARAM_INVALID_VALUE; + qdf_spin_unlock(&spectral->spectral_lock); + return QDF_STATUS_E_FAILURE; + } + } + + target_if_spectral_scan_enable_params(spectral, + &spectral->params[smode], smode, + err); + qdf_spin_unlock(&spectral->spectral_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_stop_spectral_scan(struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err) +{ + struct target_if_spectral_ops *p_sops; + struct target_if_spectral *spectral; + + if (!err) { + spectral_err("Error code argument is null"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + *err = SPECTRAL_SCAN_ERR_INVALID; + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + *err = SPECTRAL_SCAN_ERR_MODE_UNSUPPORTED; + spectral_err("Invalid Spectral mode %u", smode); + return QDF_STATUS_E_FAILURE; + } + + if (!pdev) { + spectral_err("pdev object is NUll "); + return QDF_STATUS_E_FAILURE; + } + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectral LMAC object is NUll "); + return QDF_STATUS_E_FAILURE; + } + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + qdf_spin_lock(&spectral->spectral_lock); + p_sops->stop_spectral_scan(spectral, smode); + if (spectral->classify_scan) { + /* TODO : Check if this logic is necessary */ + spectral->detects_control_channel = 0; + spectral->detects_extension_channel = 0; + spectral->detects_above_dc = 0; + spectral->detects_below_dc = 0; + spectral->classify_scan = 0; + } + + spectral->send_single_packet = 0; + spectral->sc_spectral_scan = 0; + + qdf_spin_unlock(&spectral->spectral_lock); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_is_spectral_active() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * + * API to get whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool +target_if_is_spectral_active(struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return QDF_STATUS_E_FAILURE; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + if (!p_sops) { + spectral_err("p_sops is null"); + return QDF_STATUS_E_FAILURE; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return QDF_STATUS_E_FAILURE; + } + + return p_sops->is_spectral_active(spectral, smode); +} + +/** + * target_if_is_spectral_enabled() - Get whether Spectral is enabled + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * + * API to get whether Spectral is enabled + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +bool +target_if_is_spectral_enabled(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return QDF_STATUS_E_FAILURE; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + if (!p_sops) { + spectral_err("p_sops is null"); + return QDF_STATUS_E_FAILURE; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return QDF_STATUS_E_FAILURE; + } + + return p_sops->is_spectral_enabled(spectral, smode); +} + +#ifdef DIRECT_BUF_RX_DEBUG +/** + * target_if_spectral_do_dbr_ring_debug() - Start/Stop Spectral DMA ring debug + * @pdev: Pointer to pdev object + * @enable: Enable/Disable Spectral DMA ring debug + * + * Start/stop Spectral DMA ring debug based on @enable. + * Also save the state for future use. + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS +target_if_spectral_do_dbr_ring_debug(struct wlan_objmgr_pdev *pdev, bool enable) +{ + struct target_if_spectral *spectral; + struct wlan_lmac_if_tx_ops *tx_ops; + struct wlan_objmgr_psoc *psoc; + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + tx_ops = &psoc->soc_cb.tx_ops; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectal LMAC object is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Save the state */ + spectral->dbr_ring_debug = enable; + + if (enable) + return tx_ops->dbr_tx_ops.direct_buf_rx_start_ring_debug( + pdev, 0, SPECTRAL_DBR_RING_DEBUG_SIZE); + else + return tx_ops->dbr_tx_ops.direct_buf_rx_stop_ring_debug( + pdev, 0); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_spectral_do_dbr_buff_debug() - Start/Stop Spectral DMA buffer debug + * @pdev: Pointer to pdev object + * @enable: Enable/Disable Spectral DMA buffer debug + * + * Start/stop Spectral DMA buffer debug based on @enable. + * Also save the state for future use. + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS +target_if_spectral_do_dbr_buff_debug(struct wlan_objmgr_pdev *pdev, bool enable) +{ + struct target_if_spectral *spectral; + struct wlan_lmac_if_tx_ops *tx_ops; + struct wlan_objmgr_psoc *psoc; + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + tx_ops = &psoc->soc_cb.tx_ops; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectal LMAC object is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Save the state */ + spectral->dbr_buff_debug = enable; + + if (enable) + return tx_ops->dbr_tx_ops.direct_buf_rx_start_buffer_poisoning( + pdev, 0, MEM_POISON_SIGNATURE); + else + return tx_ops->dbr_tx_ops.direct_buf_rx_stop_buffer_poisoning( + pdev, 0); +} + +/** + * target_if_spectral_check_and_do_dbr_buff_debug() - Start/Stop Spectral buffer + * debug based on the previous state + * @pdev: Pointer to pdev object + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS +target_if_spectral_check_and_do_dbr_buff_debug(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral; + + if (!pdev) { + spectral_err("pdev is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectal LMAC object is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (spectral->dbr_buff_debug) + return target_if_spectral_do_dbr_buff_debug(pdev, true); + else + return target_if_spectral_do_dbr_buff_debug(pdev, false); +} + +/** + * target_if_spectral_check_and_do_dbr_ring_debug() - Start/Stop Spectral ring + * debug based on the previous state + * @pdev: Pointer to pdev object + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS +target_if_spectral_check_and_do_dbr_ring_debug(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral; + + if (!pdev) { + spectral_err("pdev is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectal LMAC object is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (spectral->dbr_ring_debug) + return target_if_spectral_do_dbr_ring_debug(pdev, true); + else + return target_if_spectral_do_dbr_ring_debug(pdev, false); +} + +/** + * target_if_spectral_set_dma_debug() - Set DMA debug for Spectral + * @pdev: Pointer to pdev object + * @dma_debug_type: Type of Spectral DMA debug i.e., ring or buffer debug + * @debug_value: Value to be set for @dma_debug_type + * + * Set DMA debug for Spectral and start/stop Spectral DMA debug function + * based on @debug_value + * + * Return: QDF_STATUS of operation + */ +static QDF_STATUS +target_if_spectral_set_dma_debug( + struct wlan_objmgr_pdev *pdev, + enum spectral_dma_debug dma_debug_type, + bool debug_value) +{ + struct target_if_spectral_ops *p_sops; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_tx_ops *tx_ops; + struct target_if_spectral *spectral; + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + tx_ops = &psoc->soc_cb.tx_ops; + + if (!tx_ops->target_tx_ops.tgt_get_tgt_type) { + spectral_err("Unable to fetch target type"); + return QDF_STATUS_E_FAILURE; + } + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectal LMAC object is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (spectral->direct_dma_support) { + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + if (p_sops->is_spectral_active(spectral, + SPECTRAL_SCAN_MODE_NORMAL) || + p_sops->is_spectral_active(spectral, + SPECTRAL_SCAN_MODE_AGILE)) { + spectral_err("Altering DBR debug config isn't allowed during an ongoing scan"); + return QDF_STATUS_E_FAILURE; + } + + switch (dma_debug_type) { + case SPECTRAL_DMA_RING_DEBUG: + target_if_spectral_do_dbr_ring_debug(pdev, debug_value); + break; + + case SPECTRAL_DMA_BUFFER_DEBUG: + target_if_spectral_do_dbr_buff_debug(pdev, debug_value); + break; + + default: + spectral_err("Unsupported DMA debug type : %d", + dma_debug_type); + return QDF_STATUS_E_FAILURE; + } + } + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_DEBUG */ + +/** + * target_if_spectral_direct_dma_support() - Get Direct-DMA support + * @pdev: Pointer to pdev object + * + * Return: Whether Direct-DMA is supported on this radio + */ +static bool +target_if_spectral_direct_dma_support(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral; + + if (!pdev) { + spectral_err("pdev is NULL!"); + return false; + } + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectral LMAC object is NULL"); + return false; + } + return spectral->direct_dma_support; +} + +/** + * target_if_set_debug_level() - Set debug level for Spectral + * @pdev: Pointer to pdev object + * @debug_level: Debug level + * + * API to set the debug level for Spectral + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS +target_if_set_debug_level(struct wlan_objmgr_pdev *pdev, uint32_t debug_level) +{ + spectral_debug_level = (DEBUG_SPECTRAL << debug_level); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_get_debug_level() - Get debug level for Spectral + * @pdev: Pointer to pdev object + * + * API to get the debug level for Spectral + * + * Return: Current debug level + */ +uint32_t +target_if_get_debug_level(struct wlan_objmgr_pdev *pdev) +{ + return spectral_debug_level; +} + +/** + * target_if_get_spectral_capinfo() - Get Spectral capability information + * @pdev: Pointer to pdev object + * @scaps: Buffer into which data should be copied + * + * API to get the spectral capability information + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS +target_if_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, + struct spectral_caps *scaps) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(scaps, &spectral->capability, + sizeof(struct spectral_caps)); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_get_spectral_diagstats() - Get Spectral diagnostic statistics + * @pdev: Pointer to pdev object + * @stats: Buffer into which data should be copied + * + * API to get the spectral diagnostic statistics + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS +target_if_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *stats) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(stats, &spectral->diag_stats, + sizeof(struct spectral_diag_stats)); + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_register_wmi_spectral_cmd_ops() - Register wmi_spectral_cmd_ops + * @cmd_ops: Pointer to the structure having wmi_spectral_cmd function pointers + * @pdev: Pointer to pdev object + * + * API for register wmi_spectral_cmd_ops in spectral internal data structure + * + * Return: void + */ +void +target_if_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops) +{ + struct target_if_spectral *spectral = + get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("Spectral LMAC object is null"); + return; + } + spectral->param_wmi_cmd_ops = *cmd_ops; +} + +/** + * target_if_register_netlink_cb() - Register Netlink callbacks + * @pdev: Pointer to pdev object + * @nl_cb: Netlink callbacks to register + * + * Return: void + */ +static void +target_if_register_netlink_cb( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return; + } + + qdf_mem_copy(&spectral->nl_cb, nl_cb, sizeof(struct spectral_nl_cb)); + + if (spectral->use_nl_bcast) + spectral->send_phy_data = spectral->nl_cb.send_nl_bcast; + else + spectral->send_phy_data = spectral->nl_cb.send_nl_unicast; +} + +/** + * target_if_use_nl_bcast() - Get whether to use broadcast/unicast while sending + * Netlink messages to the application layer + * @pdev: Pointer to pdev object + * + * Return: true for broadcast, false for unicast + */ +static bool +target_if_use_nl_bcast(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return false; + } + + return spectral->use_nl_bcast; +} + +/** + * target_if_deregister_netlink_cb() - De-register Netlink callbacks + * @pdev: Pointer to pdev object + * + * Return: void + */ +static void +target_if_deregister_netlink_cb(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return; + } + + qdf_mem_zero(&spectral->nl_cb, sizeof(struct spectral_nl_cb)); +} + +static int +target_if_process_spectral_report(struct wlan_objmgr_pdev *pdev, + void *payload) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return -EPERM; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + if (!p_sops) { + spectral_err("p_sops is null"); + return -EPERM; + } + + return p_sops->process_spectral_report(pdev, payload); +} + +#ifdef DIRECT_BUF_RX_DEBUG +static inline void +target_if_sptrl_debug_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + if (!tx_ops) + return; + + tx_ops->sptrl_tx_ops.sptrlto_set_dma_debug = + target_if_spectral_set_dma_debug; + tx_ops->sptrl_tx_ops.sptrlto_check_and_do_dbr_ring_debug = + target_if_spectral_check_and_do_dbr_ring_debug; + tx_ops->sptrl_tx_ops.sptrlto_check_and_do_dbr_buff_debug = + target_if_spectral_check_and_do_dbr_buff_debug; +} +#else +static inline void +target_if_sptrl_debug_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +void +target_if_sptrl_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->sptrl_tx_ops.sptrlto_pdev_spectral_init = + target_if_pdev_spectral_init; + tx_ops->sptrl_tx_ops.sptrlto_pdev_spectral_deinit = + target_if_pdev_spectral_deinit; + tx_ops->sptrl_tx_ops.sptrlto_set_spectral_config = + target_if_set_spectral_config; + tx_ops->sptrl_tx_ops.sptrlto_get_spectral_config = + target_if_get_spectral_config; + tx_ops->sptrl_tx_ops.sptrlto_start_spectral_scan = + target_if_start_spectral_scan; + tx_ops->sptrl_tx_ops.sptrlto_stop_spectral_scan = + target_if_stop_spectral_scan; + tx_ops->sptrl_tx_ops.sptrlto_is_spectral_active = + target_if_is_spectral_active; + tx_ops->sptrl_tx_ops.sptrlto_is_spectral_enabled = + target_if_is_spectral_enabled; + tx_ops->sptrl_tx_ops.sptrlto_set_debug_level = + target_if_set_debug_level; + tx_ops->sptrl_tx_ops.sptrlto_get_debug_level = + target_if_get_debug_level; + tx_ops->sptrl_tx_ops.sptrlto_get_spectral_capinfo = + target_if_get_spectral_capinfo; + tx_ops->sptrl_tx_ops.sptrlto_get_spectral_diagstats = + target_if_get_spectral_diagstats; + tx_ops->sptrl_tx_ops.sptrlto_register_wmi_spectral_cmd_ops = + target_if_register_wmi_spectral_cmd_ops; + tx_ops->sptrl_tx_ops.sptrlto_register_netlink_cb = + target_if_register_netlink_cb; + tx_ops->sptrl_tx_ops.sptrlto_use_nl_bcast = + target_if_use_nl_bcast; + tx_ops->sptrl_tx_ops.sptrlto_deregister_netlink_cb = + target_if_deregister_netlink_cb; + tx_ops->sptrl_tx_ops.sptrlto_process_spectral_report = + target_if_process_spectral_report; + tx_ops->sptrl_tx_ops.sptrlto_direct_dma_support = + target_if_spectral_direct_dma_support; + target_if_sptrl_debug_register_tx_ops(tx_ops); +} +qdf_export_symbol(target_if_sptrl_register_tx_ops); + +void +target_if_spectral_send_intf_found_msg(struct wlan_objmgr_pdev *pdev, + uint16_t cw_int, uint32_t dcs_enabled) +{ + struct spectral_samp_msg *msg = NULL; + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + if (!p_sops) { + spectral_err("p_sops is null"); + return; + } + + msg = (struct spectral_samp_msg *)spectral->nl_cb.get_sbuff( + spectral->pdev_obj, + SPECTRAL_MSG_INTERFERENCE_NOTIFICATION, + SPECTRAL_MSG_BUF_NEW); + + if (msg) { + msg->int_type = cw_int ? + SPECTRAL_DCS_INT_CW : SPECTRAL_DCS_INT_WIFI; + msg->dcs_enabled = dcs_enabled; + msg->signature = SPECTRAL_SIGNATURE; + p_sops->get_mac_address(spectral, msg->macaddr); + if (spectral->send_phy_data + (pdev, + SPECTRAL_MSG_INTERFERENCE_NOTIFICATION) == 0) + spectral->spectral_sent_msg++; + } +} +qdf_export_symbol(target_if_spectral_send_intf_found_msg); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.h b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.h new file mode 100644 index 0000000000000000000000000000000000000000..7d651c883f78fcb3b2faf2934a0e5dca397294fd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.h @@ -0,0 +1,2083 @@ +/* + * Copyright (c) 2011,2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_SPECTRAL_H_ +#define _TARGET_IF_SPECTRAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#ifdef DIRECT_BUF_RX_ENABLE +#include +#endif +#ifdef WIN32 +#pragma pack(push, target_if_spectral, 1) +#define __ATTRIB_PACK +#else +#ifndef __ATTRIB_PACK +#define __ATTRIB_PACK __attribute__ ((packed)) +#endif +#endif + +#include + +#define FREQ_OFFSET_10MHZ 10 +#ifndef SPECTRAL_USE_NL_BCAST +#define SPECTRAL_USE_NL_BCAST (0) +#endif + +#define STATUS_PASS 1 +#define STATUS_FAIL 0 +#undef spectral_dbg_line +#define spectral_dbg_line() \ + spectral_debug("----------------------------------------------------") + +#undef spectral_ops_not_registered +#define spectral_ops_not_registered(str) \ + spectral_info("SPECTRAL : %s not registered\n", (str)) +#undef not_yet_implemented +#define not_yet_implemented() \ + spectral_info("SPECTRAL : %s : %d Not yet implemented\n", \ + __func__, __LINE__) + +#define SPECTRAL_HT20_NUM_BINS 56 +#define SPECTRAL_HT20_FFT_LEN 56 +#define SPECTRAL_HT20_DC_INDEX (SPECTRAL_HT20_FFT_LEN / 2) +#define SPECTRAL_HT20_DATA_LEN 60 +#define SPECTRAL_HT20_TOTAL_DATA_LEN (SPECTRAL_HT20_DATA_LEN + 3) +#define SPECTRAL_HT40_TOTAL_NUM_BINS 128 +#define SPECTRAL_HT40_DATA_LEN 135 +#define SPECTRAL_HT40_TOTAL_DATA_LEN (SPECTRAL_HT40_DATA_LEN + 3) +#define SPECTRAL_HT40_FFT_LEN 128 +#define SPECTRAL_HT40_DC_INDEX (SPECTRAL_HT40_FFT_LEN / 2) + +/* + * Used for the SWAR to obtain approximate combined rssi + * in secondary 80Mhz segment + */ +#define OFFSET_CH_WIDTH_20 65 +#define OFFSET_CH_WIDTH_40 62 +#define OFFSET_CH_WIDTH_80 56 +#define OFFSET_CH_WIDTH_160 50 + +/* Min and max for relevant Spectral params */ +#define SPECTRAL_PARAM_FFT_SIZE_MIN_GEN2 (1) +#define SPECTRAL_PARAM_FFT_SIZE_MAX_GEN2 (9) +#define SPECTRAL_PARAM_FFT_SIZE_MIN_GEN3 (5) +#define SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_DEFAULT (9) +#define SPECTRAL_PARAM_FFT_SIZE_MAX_GEN3_QCN9000 (10) +#define SPECTRAL_PARAM_RPT_MODE_MIN (0) +#define SPECTRAL_PARAM_RPT_MODE_MAX (3) + +/* DBR ring debug size for Spectral */ +#define SPECTRAL_DBR_RING_DEBUG_SIZE 512 + +#ifdef BIG_ENDIAN_HOST +#define SPECTRAL_MESSAGE_COPY_CHAR_ARRAY(destp, srcp, len) do { \ + int j; \ + uint32_t *src, *dest; \ + src = (uint32_t *)(srcp); \ + dest = (uint32_t *)(destp); \ + for (j = 0; j < roundup((len), sizeof(uint32_t)) / 4; j++) { \ + *(dest + j) = qdf_le32_to_cpu(*(src + j)); \ + } \ + } while (0) +#else +#define SPECTRAL_MESSAGE_COPY_CHAR_ARRAY(destp, srcp, len) \ + OS_MEMCPY((destp), (srcp), (len)); +#endif + +#define DUMMY_NF_VALUE (-123) +/* 5 categories x (lower + upper) bands */ +#define MAX_INTERF 10 +#define HOST_MAX_ANTENNA 3 +/* Mask for time stamp from descriptor */ +#define SPECTRAL_TSMASK 0xFFFFFFFF +#define SPECTRAL_SIGNATURE 0xdeadbeef +/* Signature to write onto spectral buffer and then later validate */ +#define MEM_POISON_SIGNATURE (htobe32(0xdeadbeef)) + +/* START of spectral GEN II HW specific details */ +#define SPECTRAL_PHYERR_SIGNATURE_GEN2 0xbb +#define TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN2 0xF9 +#define TLV_TAG_ADC_REPORT_GEN2 0xFA +#define TLV_TAG_SEARCH_FFT_REPORT_GEN2 0xFB + +/** + * enum spectral_160mhz_report_delivery_state - 160 MHz state machine states + * @SPECTRAL_REPORT_WAIT_PRIMARY80: Wait for primary80 report + * @SPECTRAL_REPORT_RX_PRIMARY80: Receive primary 80 report + * @SPECTRAL_REPORT_WAIT_SECONDARY80: Wait for secondory 80 report + * @SPECTRAL_REPORT_RX_SECONDARY80: Receive secondary 80 report + */ +enum spectral_160mhz_report_delivery_state { + SPECTRAL_REPORT_WAIT_PRIMARY80, + SPECTRAL_REPORT_RX_PRIMARY80, + SPECTRAL_REPORT_WAIT_SECONDARY80, + SPECTRAL_REPORT_RX_SECONDARY80, +}; + +/** + * enum spectral_detector_id - Spectral detector id + * @SPECTRAL_DETECTOR_PRIMARY: Primary detector + * @SPECTRAL_DETECTOR_SECONDARY: Secondary detector + * @SPECTRAL_DETECTOR_AGILE: Agile detector + * @SPECTRAL_DETECTOR_INVALID: Invalid detector + */ +enum spectral_detector_id { + SPECTRAL_DETECTOR_PRIMARY, + SPECTRAL_DETECTOR_SECONDARY, + SPECTRAL_DETECTOR_AGILE, + SPECTRAL_DETECTOR_INVALID, +}; + +/** + * struct spectral_search_fft_info_gen2 - spectral search fft report for gen2 + * @relpwr_db: Total bin power in db + * @num_str_bins_ib: Number of strong bins + * @base_pwr: Base power + * @total_gain_info: Total gain + * @fft_chn_idx: FFT chain on which report is originated + * @avgpwr_db: Average power in db + * @peak_mag: Peak power seen in the bins + * @peak_inx: Index of bin holding peak power + */ +struct spectral_search_fft_info_gen2 { + uint32_t relpwr_db; + uint32_t num_str_bins_ib; + uint32_t base_pwr; + uint32_t total_gain_info; + uint32_t fft_chn_idx; + uint32_t avgpwr_db; + uint32_t peak_mag; + int16_t peak_inx; +}; + +/* + * XXX Check if we should be handling the endinness difference in some + * other way opaque to the host + */ +#ifdef BIG_ENDIAN_HOST + +/** + * struct spectral_phyerr_tlv_gen2 - phyerr tlv info for big endian host + * @signature: signature + * @tag: tag + * @length: length + */ +struct spectral_phyerr_tlv_gen2 { + uint8_t signature; + uint8_t tag; + uint16_t length; +} __ATTRIB_PACK; + +#else + +/** + * struct spectral_phyerr_tlv_gen2 - phyerr tlv info for little endian host + * @length: length + * @tag: tag + * @signature: signature + */ +struct spectral_phyerr_tlv_gen2 { + uint16_t length; + uint8_t tag; + uint8_t signature; +} __ATTRIB_PACK; + +#endif /* BIG_ENDIAN_HOST */ + +/** + * struct spectral_phyerr_hdr_gen2 - phyerr header for gen2 HW + * @hdr_a: Header[0:31] + * @hdr_b: Header[32:63] + */ +struct spectral_phyerr_hdr_gen2 { + uint32_t hdr_a; + uint32_t hdr_b; +}; + +/* + * Segment ID information for 80+80. + * + * If the HW micro-architecture specification extends this DWORD for other + * purposes, then redefine+rename accordingly. For now, the specification + * mentions only segment ID (though this doesn't require an entire DWORD) + * without mention of any generic terminology for the DWORD, or any reservation. + * We use nomenclature accordingly. + */ +typedef uint32_t SPECTRAL_SEGID_INFO; + +/** + * struct spectral_phyerr_fft_gen2 - fft info in phyerr event + * @buf: fft report + */ +struct spectral_phyerr_fft_gen2 { + uint8_t buf[0]; +}; +/* END of spectral GEN II HW specific details */ + +/* START of spectral GEN III HW specific details */ + +#define get_bitfield(value, size, pos) \ + (((value) >> (pos)) & ((1 << (size)) - 1)) +#define unsigned_to_signed(value, width) \ + (((value) >= (1 << ((width) - 1))) ? \ + (value - (1 << (width))) : (value)) + +#define SSCAN_SUMMARY_REPORT_HDR_A_DETECTOR_ID_POS_GEN3 (29) +#define SSCAN_SUMMARY_REPORT_HDR_A_DETECTOR_ID_SIZE_GEN3 (2) +#define SSCAN_SUMMARY_REPORT_HDR_A_AGC_TOTAL_GAIN_POS_GEN3 (0) +#define SSCAN_SUMMARY_REPORT_HDR_A_AGC_TOTAL_GAIN_SIZE_GEN3 (8) +#define SSCAN_SUMMARY_REPORT_HDR_A_INBAND_PWR_DB_POS_GEN3 (18) +#define SSCAN_SUMMARY_REPORT_HDR_A_INBAND_PWR_DB_SIZE_GEN3 (10) +#define SSCAN_SUMMARY_REPORT_HDR_A_PRI80_POS_GEN3 (31) +#define SSCAN_SUMMARY_REPORT_HDR_A_PRI80_SIZE_GEN3 (1) +#define SSCAN_SUMMARY_REPORT_HDR_B_GAINCHANGE_POS_GEN3_V1 (30) +#define SSCAN_SUMMARY_REPORT_HDR_B_GAINCHANGE_SIZE_GEN3_V1 (1) +#define SSCAN_SUMMARY_REPORT_HDR_C_GAINCHANGE_POS_GEN3_V2 (16) +#define SSCAN_SUMMARY_REPORT_HDR_C_GAINCHANGE_SIZE_GEN3_V2 (1) + +#define SPECTRAL_PHYERR_SIGNATURE_GEN3 (0xFA) +#define TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3 (0x02) +#define TLV_TAG_SEARCH_FFT_REPORT_GEN3 (0x03) +#define SPECTRAL_PHYERR_TLVSIZE_GEN3 (4) + +#define FFT_REPORT_HEADER_LENGTH_GEN3_V2 (24) +#define FFT_REPORT_HEADER_LENGTH_GEN3_V1 (16) +#define NUM_PADDING_BYTES_SSCAN_SUMARY_REPORT_GEN3_V1 (0) +#define NUM_PADDING_BYTES_SSCAN_SUMARY_REPORT_GEN3_V2 (16) + +#define PHYERR_HDR_SIG_POS \ + (offsetof(struct spectral_phyerr_fft_report_gen3, fft_hdr_sig)) +#define PHYERR_HDR_TAG_POS \ + (offsetof(struct spectral_phyerr_fft_report_gen3, fft_hdr_tag)) +#define SPECTRAL_FFT_BINS_POS \ + (offsetof(struct spectral_phyerr_fft_report_gen3, buf)) + +/** + * struct phyerr_info - spectral search fft report for gen3 + * @data: handle to phyerror buffer + * @datalen: length of phyerror bufer + * @p_rfqual: rf quality matrices + * @p_chaninfo: pointer to chaninfo + * @tsf64: 64 bit TSF + * @acs_stats: acs stats + */ +struct phyerr_info { + uint8_t *data; + uint32_t datalen; + struct target_if_spectral_rfqual_info *p_rfqual; + struct target_if_spectral_chan_info *p_chaninfo; + uint64_t tsf64; + struct target_if_spectral_acs_stats *acs_stats; +}; + +/** + * struct spectral_search_fft_info_gen3 - spectral search fft report for gen3 + * @timestamp: Timestamp at which fft report was generated + * @fft_detector_id: Which radio generated this report + * @fft_num: The FFT count number. Set to 0 for short FFT. + * @fft_radar_check: NA for spectral + * @fft_peak_sidx: Index of bin with maximum power + * @fft_chn_idx: Rx chain index + * @fft_base_pwr_db: Base power in dB + * @fft_total_gain_db: Total gain in dB + * @fft_num_str_bins_ib: Number of strong bins in the report + * @fft_peak_mag: Peak magnitude + * @fft_avgpwr_db: Average power in dB + * @fft_relpwr_db: Relative power in dB + */ +struct spectral_search_fft_info_gen3 { + uint32_t timestamp; + uint32_t fft_detector_id; + uint32_t fft_num; + uint32_t fft_radar_check; + int32_t fft_peak_sidx; + uint32_t fft_chn_idx; + uint32_t fft_base_pwr_db; + uint32_t fft_total_gain_db; + uint32_t fft_num_str_bins_ib; + int32_t fft_peak_mag; + uint32_t fft_avgpwr_db; + uint32_t fft_relpwr_db; +}; + +/** + * struct spectral_phyerr_sfftreport_gen3 - fft info in phyerr event + * @fft_timestamp: Timestamp at which fft report was generated + * @fft_hdr_sig: signature + * @fft_hdr_tag: tag + * @fft_hdr_length: length + * @hdr_a: Header[0:31] + * @hdr_b: Header[32:63] + * @hdr_c: Header[64:95] + * @resv: Header[96:127] + * @buf: fft bins + */ +struct spectral_phyerr_fft_report_gen3 { + uint32_t fft_timestamp; +#ifdef BIG_ENDIAN_HOST + uint8_t fft_hdr_sig; + uint8_t fft_hdr_tag; + uint16_t fft_hdr_length; +#else + uint16_t fft_hdr_length; + uint8_t fft_hdr_tag; + uint8_t fft_hdr_sig; +#endif /* BIG_ENDIAN_HOST */ + uint32_t hdr_a; + uint32_t hdr_b; + uint32_t hdr_c; + uint32_t resv; + uint8_t buf[0]; +} __ATTRIB_PACK; + +/** + * struct sscan_report_fields_gen3 - Fields of spectral report + * @sscan_agc_total_gain: The AGC total gain in DB. + * @inband_pwr_db: The in-band power of the signal in 1/2 DB steps + * @sscan_gainchange: This bit is set to 1 if a gainchange occurred during + * the spectral scan FFT. Software may choose to + * disregard the results. + * @sscan_pri80: This is set to 1 to indicate that the Spectral scan was + * performed on the pri80 segment. Software may choose to + * disregard the FFT sample if this is set to 1 but detector ID + * does not correspond to the ID for the pri80 segment. + */ +struct sscan_report_fields_gen3 { + uint8_t sscan_agc_total_gain; + int16_t inband_pwr_db; + uint8_t sscan_gainchange; + uint8_t sscan_pri80; +}; + +/** + * struct spectral_sscan_summary_report_gen3 - Spectral summary report + * event + * @sscan_timestamp: Timestamp at which fft report was generated + * @sscan_hdr_sig: signature + * @sscan_hdr_tag: tag + * @sscan_hdr_length: length + * @hdr_a: Header[0:31] + * @resv: Header[32:63] + * @hdr_b: Header[64:95] + * @hdr_c: Header[96:127] + */ +struct spectral_sscan_summary_report_gen3 { + u_int32_t sscan_timestamp; +#ifdef BIG_ENDIAN_HOST + u_int8_t sscan_hdr_sig; + u_int8_t sscan_hdr_tag; + u_int16_t sscan_hdr_length; +#else + u_int16_t sscan_hdr_length; + u_int8_t sscan_hdr_tag; + u_int8_t sscan_hdr_sig; +#endif /* BIG_ENDIAN_HOST */ + u_int32_t hdr_a; + u_int32_t res1; + u_int32_t hdr_b; + u_int32_t hdr_c; +} __ATTRIB_PACK; + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * struct Spectral_report - spectral report + * @data: Report buffer + * @noisefloor: Noise floor values + * @reset_delay: Time taken for warm reset in us + */ +struct spectral_report { + uint8_t *data; + int32_t noisefloor[DBR_MAX_CHAINS]; + uint32_t reset_delay; +}; +#endif +/* END of spectral GEN III HW specific details */ + +typedef signed char pwr_dbm; + +/** + * enum spectral_gen - spectral hw generation + * @SPECTRAL_GEN1 : spectral hw gen 1 + * @SPECTRAL_GEN2 : spectral hw gen 2 + * @SPECTRAL_GEN3 : spectral hw gen 3 + */ +enum spectral_gen { + SPECTRAL_GEN1, + SPECTRAL_GEN2, + SPECTRAL_GEN3, +}; + +/** + * enum spectral_fftbin_size_war - spectral fft bin size war + * @SPECTRAL_FFTBIN_SIZE_NO_WAR : No WAR applicable for Spectral FFT bin size + * @SPECTRAL_FFTBIN_SIZE_2BYTE_TO_1BYTE : Spectral FFT bin size: Retain only + * least significant byte from 2 byte + * FFT bin transferred by HW + * @SPECTRAL_FFTBIN_SIZE_4BYTE_TO_1BYTE : Spectral FFT bin size: Retain only + * least significant byte from 4 byte + * FFT bin transferred by HW + */ +enum spectral_fftbin_size_war { + SPECTRAL_FFTBIN_SIZE_NO_WAR = 0, + SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE = 1, + SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE = 2, +}; + +/** + * enum spectral_report_format_version - This represents the report format + * version number within each Spectral generation. + * @SPECTRAL_REPORT_FORMAT_VERSION_1 : version 1 + * @SPECTRAL_REPORT_FORMAT_VERSION_2 : version 2 + */ +enum spectral_report_format_version { + SPECTRAL_REPORT_FORMAT_VERSION_1, + SPECTRAL_REPORT_FORMAT_VERSION_2, +}; + +/** + * struct spectral_fft_bin_len_adj_swar - Encapsulate information required for + * Spectral FFT bin length adjusting software WARS. + * @inband_fftbin_size_adj: Whether to carry out FFT bin size adjustment for + * in-band report format. This would be required on some chipsets under the + * following circumstances: In report mode 2 only the in-band bins are DMA'ed. + * Scatter/gather is used. However, the HW generates all bins, not just in-band, + * and reports the number of bins accordingly. The subsystem arranging for the + * DMA cannot change this value. On such chipsets the adjustment required at the + * host driver is to check if report format is 2, and if so halve the number of + * bins reported to get the number actually DMA'ed. + * @null_fftbin_adj: Whether to remove NULL FFT bins for report mode (1) in + * which only summary of metrics for each completed FFT + spectral scan summary + * report are to be provided. This would be required on some chipsets under the + * following circumstances: In report mode 1, HW reports a length corresponding + * to all bins, and provides bins with value 0. This is because the subsystem + * arranging for the FFT information does not arrange for DMA of FFT bin values + * (as expected), but cannot arrange for a smaller length to be reported by HW. + * In these circumstances, the driver would have to disregard the NULL bins and + * report a bin count of 0 to higher layers. + * @packmode_fftbin_size_adj: Pack mode in HW refers to packing of each Spectral + * FFT bin into 2 bytes. But due to a bug HW reports 2 times the expected length + * when packmode is enabled. This SWAR compensates this bug by dividing the + * length with 2. + * @fftbin_size_war: Type of FFT bin size SWAR + */ +struct spectral_fft_bin_len_adj_swar { + u_int8_t inband_fftbin_size_adj; + u_int8_t null_fftbin_adj; + uint8_t packmode_fftbin_size_adj; + enum spectral_fftbin_size_war fftbin_size_war; +}; + +/** + * struct spectral_report_params - Parameters related to format of Spectral + * report. + * @version: This represents the report format version number within each + * Spectral generation. + * @ssumaary_padding_bytes: Number of bytes of padding after Spectral summary + * report + * @fft_report_hdr_len: Number of bytes in the header of the FFT report. This + * has to be subtracted from the length field of FFT report to find the length + * of FFT bins. + */ +struct spectral_report_params { + enum spectral_report_format_version version; + uint8_t ssumaary_padding_bytes; + uint8_t fft_report_hdr_len; +}; + +/** + * struct spectral_param_min_max - Spectral parameter minimum and maximum values + * @fft_size_min: Minimum value of fft_size + * @fft_size_max: Maximum value of fft_size for each BW + */ +struct spectral_param_min_max { + uint16_t fft_size_min; + uint16_t fft_size_max[CH_WIDTH_MAX]; +}; + +/** + * struct spectral_timestamp_swar - Spectral time stamp WAR related parameters + * @timestamp_war_offset: Offset to be added to correct timestamp + * @target_reset_count: Number of times target exercised the reset routine + * @last_fft_timestamp: last fft report timestamp + */ +struct spectral_timestamp_war { + uint32_t timestamp_war_offset[SPECTRAL_SCAN_MODE_MAX]; + uint64_t target_reset_count; + uint32_t last_fft_timestamp[SPECTRAL_SCAN_MODE_MAX]; +}; + +#if ATH_PERF_PWR_OFFLOAD +/** + * enum target_if_spectral_info - Enumerations for specifying which spectral + * information (among parameters and states) + * is desired. + * @TARGET_IF_SPECTRAL_INFO_ACTIVE: Indicated whether spectral is active + * @TARGET_IF_SPECTRAL_INFO_ENABLED: Indicated whether spectral is enabled + * @TARGET_IF_SPECTRAL_INFO_PARAMS: Config params + */ +enum target_if_spectral_info { + TARGET_IF_SPECTRAL_INFO_ACTIVE, + TARGET_IF_SPECTRAL_INFO_ENABLED, + TARGET_IF_SPECTRAL_INFO_PARAMS, +}; +#endif /* ATH_PERF_PWR_OFFLOAD */ + +/* forward declaration */ +struct target_if_spectral; + +/** + * struct target_if_spectral_chan_info - Channel information + * @center_freq1: center frequency 1 in MHz + * @center_freq2: center frequency 2 in MHz -valid only for + * 11ACVHT 80PLUS80 mode + * @chan_width: channel width in MHz + */ +struct target_if_spectral_chan_info { + uint16_t center_freq1; + uint16_t center_freq2; + uint8_t chan_width; +}; + +/** + * struct target_if_spectral_acs_stats - EACS stats from spectral samples + * @nfc_ctl_rssi: Control chan rssi + * @nfc_ext_rssi: Extension chan rssi + * @ctrl_nf: Control chan Noise Floor + * @ext_nf: Extension chan Noise Floor + */ +struct target_if_spectral_acs_stats { + int8_t nfc_ctl_rssi; + int8_t nfc_ext_rssi; + int8_t ctrl_nf; + int8_t ext_nf; +}; + +/** + * struct target_if_spectral_perchain_rssi_info - per chain rssi info + * @rssi_pri20: Rssi of primary 20 Mhz + * @rssi_sec20: Rssi of secondary 20 Mhz + * @rssi_sec40: Rssi of secondary 40 Mhz + * @rssi_sec80: Rssi of secondary 80 Mhz + */ +struct target_if_spectral_perchain_rssi_info { + int8_t rssi_pri20; + int8_t rssi_sec20; + int8_t rssi_sec40; + int8_t rssi_sec80; +}; + +/** + * struct target_if_spectral_rfqual_info - RF measurement information + * @rssi_comb: RSSI Information + * @pc_rssi_info: XXX : For now, we know we are getting information + * for only 4 chains at max. For future extensions + * use a define + * @noise_floor: Noise floor information + */ +struct target_if_spectral_rfqual_info { + int8_t rssi_comb; + struct target_if_spectral_perchain_rssi_info pc_rssi_info[4]; + int16_t noise_floor[4]; +}; + +#define GET_TARGET_IF_SPECTRAL_OPS(spectral) \ + ((struct target_if_spectral_ops *)(&((spectral)->spectral_ops))) + +/** + * struct target_if_spectral_ops - spectral low level ops table + * @get_tsf64: Get 64 bit TSF value + * @get_capability: Get capability info + * @set_rxfilter: Set rx filter + * @get_rxfilter: Get rx filter + * @is_spectral_active: Check whether icm is active + * @is_spectral_enabled: Check whether spectral is enabled + * @start_spectral_scan: Start spectral scan + * @stop_spectral_scan: Stop spectral scan + * @get_extension_channel: Get extension channel + * @get_ctl_noisefloor: Get control noise floor + * @get_ext_noisefloor: Get extension noise floor + * @configure_spectral: Set spectral configurations + * @get_spectral_config: Get spectral configurations + * @get_ent_spectral_mask: Get spectral mask + * @get_mac_address: Get mac address + * @get_current_channel: Get current channel + * @reset_hw: Reset HW + * @get_chain_noise_floor: Get Channel noise floor + * @spectral_process_phyerr: Process phyerr event + * @process_spectral_report: Process spectral report + */ +struct target_if_spectral_ops { + uint64_t (*get_tsf64)(void *arg); + uint32_t (*get_capability)( + void *arg, enum spectral_capability_type type); + uint32_t (*set_rxfilter)(void *arg, int rxfilter); + uint32_t (*get_rxfilter)(void *arg); + uint32_t (*is_spectral_active)(void *arg, + enum spectral_scan_mode smode); + uint32_t (*is_spectral_enabled)(void *arg, + enum spectral_scan_mode smode); + uint32_t (*start_spectral_scan)(void *arg, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + uint32_t (*stop_spectral_scan)(void *arg, + enum spectral_scan_mode smode); + uint32_t (*get_extension_channel)(void *arg); + int8_t (*get_ctl_noisefloor)(void *arg); + int8_t (*get_ext_noisefloor)(void *arg); + uint32_t (*configure_spectral)( + void *arg, + struct spectral_config *params, + enum spectral_scan_mode smode); + uint32_t (*get_spectral_config)( + void *arg, + struct spectral_config *params, + enum spectral_scan_mode smode); + uint32_t (*get_ent_spectral_mask)(void *arg); + uint32_t (*get_mac_address)(void *arg, char *addr); + uint32_t (*get_current_channel)(void *arg); + uint32_t (*reset_hw)(void *arg); + uint32_t (*get_chain_noise_floor)(void *arg, int16_t *nf_buf); + int (*spectral_process_phyerr)(struct target_if_spectral *spectral, + uint8_t *data, uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats); + int (*process_spectral_report)(struct wlan_objmgr_pdev *pdev, + void *payload); +}; + +/** + * struct target_if_spectral_stats - spectral stats info + * @num_spectral_detects: Total num. of spectral detects + * @total_phy_errors: Total number of phyerrors + * @owl_phy_errors: Indicated phyerrors in old gen1 chipsets + * @pri_phy_errors: Phyerrors in primary channel + * @ext_phy_errors: Phyerrors in secondary channel + * @dc_phy_errors: Phyerrors due to dc + * @early_ext_phy_errors: Early secondary channel phyerrors + * @bwinfo_errors: Bandwidth info errors + * @datalen_discards: Invalid data length errors, seen in gen1 chipsets + * @rssi_discards bw: Indicates reports dropped due to RSSI threshold + * @last_reset_tstamp: Last reset time stamp + */ +struct target_if_spectral_stats { + uint32_t num_spectral_detects; + uint32_t total_phy_errors; + uint32_t owl_phy_errors; + uint32_t pri_phy_errors; + uint32_t ext_phy_errors; + uint32_t dc_phy_errors; + uint32_t early_ext_phy_errors; + uint32_t bwinfo_errors; + uint32_t datalen_discards; + uint32_t rssi_discards; + uint64_t last_reset_tstamp; +}; + +/** + * struct target_if_spectral_event - spectral event structure + * @se_ts: Original 15 bit recv timestamp + * @se_full_ts: 64-bit full timestamp from interrupt time + * @se_rssi: Rssi of spectral event + * @se_bwinfo: Rssi of spectral event + * @se_dur: Duration of spectral pulse + * @se_chanindex: Channel of event + * @se_list: List of spectral events + */ +struct target_if_spectral_event { + uint32_t se_ts; + uint64_t se_full_ts; + uint8_t se_rssi; + uint8_t se_bwinfo; + uint8_t se_dur; + uint8_t se_chanindex; + + STAILQ_ENTRY(spectral_event) se_list; +}; + +/** + * struct target_if_chain_noise_pwr_info - Noise power info for each channel + * @rptcount: Count of reports in pwr array + * @un_cal_nf: Uncalibrated noise floor + * @factory_cal_nf: Noise floor as calibrated at the factory for module + * @median_pwr: Median power (median of pwr array) + * @pwr: Power reports + */ +struct target_if_chain_noise_pwr_info { + int rptcount; + pwr_dbm un_cal_nf; + pwr_dbm factory_cal_nf; + pwr_dbm median_pwr; + pwr_dbm pwr[]; +} __ATTRIB_PACK; + +/** + * struct target_if_spectral_chan_stats - Channel information + * @cycle_count: Cycle count + * @channel_load: Channel load + * @per: Period + * @noisefloor: Noise floor + * @comp_usablity: Computed usability + * @maxregpower: Maximum allowed regulatary power + * @comp_usablity_sec80: Computed usability of secondary 80 Mhz + * @maxregpower_sec80: Max regulatory power in secondary 80 Mhz + */ +struct target_if_spectral_chan_stats { + int cycle_count; + int channel_load; + int per; + int noisefloor; + uint16_t comp_usablity; + int8_t maxregpower; + uint16_t comp_usablity_sec80; + int8_t maxregpower_sec80; +}; + +#if ATH_PERF_PWR_OFFLOAD + +/** + * struct target_if_spectral_cache - Cache used to minimize WMI operations + * in offload architecture + * @osc_spectral_enabled: Whether Spectral is enabled + * @osc_spectral_active: Whether spectral is active + * XXX: Ideally, we should NOT cache this + * since the hardware can self clear the bit, + * the firmware can possibly stop spectral due to + * intermittent off-channel activity, etc + * A WMI read command should be introduced to handle + * this This will be discussed. + * @osc_params: Spectral parameters + * @osc_is_valid: Whether the cache is valid + */ +struct target_if_spectral_cache { + uint8_t osc_spectral_enabled; + uint8_t osc_spectral_active; + struct spectral_config osc_params; + uint8_t osc_is_valid; +}; + +/** + * struct target_if_spectral_param_state_info - Structure used to represent and + * manage spectral information + * (parameters and states) + * @osps_lock: Lock to synchronize accesses to information + * @osps_cache: Cacheable' information + */ +struct target_if_spectral_param_state_info { + qdf_spinlock_t osps_lock; + struct target_if_spectral_cache osps_cache; + /* XXX - Non-cacheable information goes here, in the future */ +}; +#endif /* ATH_PERF_PWR_OFFLOAD */ + +struct vdev_spectral_configure_params; +struct vdev_spectral_enable_params; + +/** + * struct wmi_spectral_cmd_ops - structure used holding the operations + * related to wmi commands on spectral parameters. + * @wmi_spectral_configure_cmd_send: Configure Spectral parameters + * @wmi_spectral_enable_cmd_send: Enable/Disable Spectral + * @wmi_spectral_crash_inject: Inject FW crash + */ +struct wmi_spectral_cmd_ops { + QDF_STATUS (*wmi_spectral_configure_cmd_send)( + wmi_unified_t wmi_hdl, + struct vdev_spectral_configure_params *param); + QDF_STATUS (*wmi_spectral_enable_cmd_send)( + wmi_unified_t wmi_hdl, + struct vdev_spectral_enable_params *param); + QDF_STATUS(*wmi_spectral_crash_inject)( + wmi_unified_t wmi_handle, struct crash_inject *param); +}; + +/** + * struct spectral_param_properties - structure holding Spectral + * parameter properties + * @supported: Parameter is supported or not + * @common_all_modes: Parameter should be common for all modes or not + */ +struct spectral_param_properties { + bool supported; + bool common_all_modes; +}; + +/** + * struct target_if_spectral - main spectral structure + * @pdev: Pointer to pdev + * @spectral_ops: Target if internal Spectral low level operations table + * @capability: Spectral capabilities structure + * @properties: Spectral parameter properties per mode + * @spectral_lock: Lock used for internal Spectral operations + * @spectral_curchan_radindex: Current channel spectral index + * @spectral_extchan_radindex: Extension channel spectral index + * @spectraldomain: Current Spectral domain + * @spectral_proc_phyerr: Flags to process for PHY errors + * @spectral_defaultparams: Default PHY params per Spectral stat + * @spectral_stats: Spectral related stats + * @events: Events structure + * @sc_spectral_ext_chan_ok: Can spectral be detected on the extension channel? + * @sc_spectral_combined_rssi_ok: Can use combined spectral RSSI? + * @sc_spectral_20_40_mode: Is AP in 20-40 mode? + * @sc_spectral_noise_pwr_cal: Noise power cal required? + * @sc_spectral_non_edma: Is the spectral capable device Non-EDMA? + * @upper_is_control: Upper segment is primary + * @upper_is_extension: Upper segment is secondary + * @lower_is_control: Lower segment is primary + * @lower_is_extension: Lower segment is secondary + * @sc_spectraltest_ieeechan: IEEE channel number to return to after a spectral + * mute test + * @spectral_numbins: Number of bins + * @spectral_fft_len: FFT length + * @spectral_data_len: Total phyerror report length + * @lb_edge_extrabins: Number of extra bins on left band edge + * @rb_edge_extrabins: Number of extra bins on right band edge + * @spectral_max_index_offset: Max FFT index offset (20 MHz mode) + * @spectral_upper_max_index_offset: Upper max FFT index offset (20/40 MHz mode) + * @spectral_lower_max_index_offset: Lower max FFT index offset (20/40 MHz mode) + * @spectral_dc_index: At which index DC is present + * @send_single_packet: Deprecated + * @spectral_sent_msg: Indicates whether we send report to upper layers + * @params: Spectral parameters + * @last_capture_time: Indicates timestamp of previouse report + * @num_spectral_data: Number of Spectral samples received in current session + * @total_spectral_data: Total number of Spectral samples received + * @max_rssi: Maximum RSSI + * @detects_control_channel: NA + * @detects_extension_channel: NA + * @detects_below_dc: NA + * @detects_above_dc: NA + * @sc_scanning: Indicates active wifi scan + * @sc_spectral_scan: Indicates active specral scan + * @sc_spectral_full_scan: Deprecated + * @scan_start_tstamp: Deprecated + * @last_tstamp: Deprecated + * @first_tstamp: Deprecated + * @spectral_samp_count: Deprecated + * @sc_spectral_samp_count: Deprecated + * @noise_pwr_reports_reqd: Number of noise power reports required + * @noise_pwr_reports_recv: Number of noise power reports received + * @noise_pwr_reports_lock: Lock used for Noise power report processing + * @noise_pwr_chain_ctl: Noise power report - control channel + * @noise_pwr_chain_ext: Noise power report - extension channel + * @chaninfo: Channel statistics + * @tsf64: Latest TSF Value + * @param_info: Offload architecture Spectral parameter cache information + * @ch_width: Indicates Channel Width 20/40/80/160 MHz for each Spectral mode + * @diag_stats: Diagnostic statistics + * @is_160_format: Indicates whether information provided by HW is in altered + * format for 802.11ac 160/80+80 MHz support (QCA9984 onwards) + * @is_lb_edge_extrabins_format: Indicates whether information provided by + * HW has 4 extra bins, at left band edge, for report mode 2 + * @is_rb_edge_extrabins_format: Indicates whether information provided + * by HW has 4 extra bins, at right band edge, for report mode 2 + * @is_sec80_rssi_war_required: Indicates whether the software workaround is + * required to obtain approximate combined RSSI for secondary 80Mhz segment + * @simctx: Spectral Simulation context + * @spectral_gen: Spectral hardware generation + * @hdr_sig_exp: Expected signature in PHYERR TLV header, for the given hardware + * generation + * @tag_sscan_summary_exp: Expected Spectral Scan Summary tag in PHYERR TLV + * header, for the given hardware generation + * @tag_sscan_fft_exp: Expected Spectral Scan FFT report tag in PHYERR TLV + * header, for the given hardware generation + * @tlvhdr_size: Expected PHYERR TLV header size, for the given hardware + * generation + * @nl_cb: Netlink callbacks + * @use_nl_bcast: Whether to use Netlink broadcast/unicast + * @send_phy_data: Send data to the application layer for a particular msg type + * @len_adj_swar: Spectral fft bin length adjustment SWAR related info + * @timestamp_war: Spectral time stamp WAR related info + * @dbr_ring_debug: Whether Spectral DBR ring debug is enabled + * @dbr_buff_debug: Whether Spectral DBR buffer debug is enabled + * @direct_dma_support: Whether Direct-DMA is supported on the current radio + * @prev_tstamp: Timestamp of the previously received sample, which has to be + * compared with the current tstamp to check descrepancy + * @rparams: Parameters related to Spectral report structure + * @param_min_max: Spectral parameter's minimum and maximum values + */ +struct target_if_spectral { + struct wlan_objmgr_pdev *pdev_obj; + struct target_if_spectral_ops spectral_ops; + struct spectral_caps capability; + struct spectral_param_properties + properties[SPECTRAL_SCAN_MODE_MAX][SPECTRAL_PARAM_MAX]; + qdf_spinlock_t spectral_lock; + int16_t spectral_curchan_radindex; + int16_t spectral_extchan_radindex; + uint32_t spectraldomain; + uint32_t spectral_proc_phyerr; + struct spectral_config spectral_defaultparams; + struct target_if_spectral_stats spectral_stats; + struct target_if_spectral_event *events; + unsigned int sc_spectral_ext_chan_ok:1, + sc_spectral_combined_rssi_ok:1, + sc_spectral_20_40_mode:1, + sc_spectral_noise_pwr_cal:1, + sc_spectral_non_edma:1; + int upper_is_control; + int upper_is_extension; + int lower_is_control; + int lower_is_extension; + uint8_t sc_spectraltest_ieeechan; + int spectral_numbins; + int spectral_fft_len; + int spectral_data_len; + + /* + * For 11ac chipsets prior to AR900B version 2.0, a max of 512 bins are + * delivered. However, there can be additional bins reported for + * AR900B version 2.0 and QCA9984 as described next: + * + * AR900B version 2.0: An additional tone is processed on the right + * hand side in order to facilitate detection of radar pulses out to + * the extreme band-edge of the channel frequency. Since the HW design + * processes four tones at a time, this requires one additional Dword + * to be added to the search FFT report. + * + * QCA9984: When spectral_scan_rpt_mode = 2, i.e 2-dword summary + + * 1x-oversampled bins (in-band) per FFT, then 8 more bins + * (4 more on left side and 4 more on right side)are added. + */ + + int lb_edge_extrabins; + int rb_edge_extrabins; + int spectral_max_index_offset; + int spectral_upper_max_index_offset; + int spectral_lower_max_index_offset; + int spectral_dc_index; + int send_single_packet; + int spectral_sent_msg; + int classify_scan; + qdf_timer_t classify_timer; + struct spectral_config params[SPECTRAL_SCAN_MODE_MAX]; + bool params_valid[SPECTRAL_SCAN_MODE_MAX]; + struct spectral_classifier_params classifier_params; + int last_capture_time; + int num_spectral_data; + int total_spectral_data; + int max_rssi; + int detects_control_channel; + int detects_extension_channel; + int detects_below_dc; + int detects_above_dc; + int sc_scanning; + int sc_spectral_scan; + int sc_spectral_full_scan; + uint64_t scan_start_tstamp; + uint32_t last_tstamp; + uint32_t first_tstamp; + uint32_t spectral_samp_count; + uint32_t sc_spectral_samp_count; + int noise_pwr_reports_reqd; + int noise_pwr_reports_recv; + qdf_spinlock_t noise_pwr_reports_lock; + struct target_if_chain_noise_pwr_info + *noise_pwr_chain_ctl[HOST_MAX_ANTENNA]; + struct target_if_chain_noise_pwr_info + *noise_pwr_chain_ext[HOST_MAX_ANTENNA]; + uint64_t tsf64; +#if ATH_PERF_PWR_OFFLOAD + struct target_if_spectral_param_state_info + param_info[SPECTRAL_SCAN_MODE_MAX]; +#endif + enum phy_ch_width ch_width[SPECTRAL_SCAN_MODE_MAX]; + struct spectral_diag_stats diag_stats; + bool is_160_format; + bool is_lb_edge_extrabins_format; + bool is_rb_edge_extrabins_format; + bool is_sec80_rssi_war_required; +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION + void *simctx; +#endif + enum spectral_gen spectral_gen; + uint8_t hdr_sig_exp; + uint8_t tag_sscan_summary_exp; + uint8_t tag_sscan_fft_exp; + uint8_t tlvhdr_size; + struct wmi_spectral_cmd_ops param_wmi_cmd_ops; + struct spectral_nl_cb nl_cb; + bool use_nl_bcast; + int (*send_phy_data)(struct wlan_objmgr_pdev *pdev, + enum spectral_msg_type smsg_type); + struct spectral_fft_bin_len_adj_swar len_adj_swar; + struct spectral_timestamp_war timestamp_war; + enum spectral_160mhz_report_delivery_state state_160mhz_delivery; + bool dbr_ring_debug; + bool dbr_buff_debug; + bool direct_dma_support; + uint32_t prev_tstamp; + struct spectral_report_params rparams; + struct spectral_param_min_max param_min_max; +}; + +/** + * struct target_if_samp_msg_params - Spectral Analysis Messaging Protocol + * data format + * @rssi: RSSI (except for secondary 80 segment) + * @rssi_sec80: RSSI for secondary 80 segment + * @lower_rssi: RSSI of lower band + * @upper_rssi: RSSI of upper band + * @chain_ctl_rssi: RSSI for control channel, for all antennas + * @chain_ext_rssi: RSSI for extension channel, for all antennas + * @bwinfo: bandwidth info + * @data_len: length of FFT data (except for secondary 80 segment) + * @data_len_sec80: length of FFT data for secondary 80 segment + * @tstamp: timestamp + * @last_tstamp: last time stamp + * @max_mag: maximum magnitude (except for secondary 80 segment) + * @max_mag_sec80: maximum magnitude for secondary 80 segment + * @max_index: index of max magnitude (except for secondary 80 segment) + * @max_index_sec80: index of max magnitude for secondary 80 segment + * @max_exp: max exp + * @peak: peak frequency (obsolete) + * @pwr_count: number of FFT bins (except for secondary 80 segment) + * @pwr_count_sec80: number of FFT bins in secondary 80 segment + * @nb_lower: This is deprecated + * @nb_upper: This is deprecated + * @max_upper_index: index of max mag in upper band + * @max_lower_index: index of max mag in lower band + * @bin_pwr_data: Contains FFT magnitudes (except for secondary 80 segment) + * @bin_pwr_data_sec80: Contains FFT magnitudes for the secondary 80 segment + * @freq: Center frequency of primary 20MHz channel in MHz + * @vhtop_ch_freq_seg1: VHT operation first segment center frequency in MHz + * @vhtop_ch_freq_seg2: VHT operation second segment center frequency in MHz + * @agile_freq: Center frequency in MHz of the entire span across which Agile + * Spectral is carried out. Applicable only for Agile Spectral samples. + * @freq_loading: spectral control duty cycles + * @noise_floor: current noise floor (except for secondary 80 segment) + * @noise_floor_sec80: current noise floor for secondary 80 segment + * @interf_list: List of interfernce sources + * @classifier_params: classifier parameters + * @sc: classifier parameters + * @pri80ind: Indication from hardware that the sample was received on the + * primary 80 MHz segment. If this is set when smode = + * SPECTRAL_SCAN_MODE_AGILE, it indicates that Spectral was carried out on + * pri80 instead of the Agile frequency due to a channel switch - Software may + * choose to ignore the sample in this case. + * @pri80ind_sec80: Indication from hardware that the sample was received on the + * primary 80 MHz segment instead of the secondary 80 MHz segment due to a + * channel switch - Software may choose to ignore the sample if this is set. + * Applicable only if smode = SPECTRAL_SCAN_MODE_NORMAL and for 160/80+80 MHz + * Spectral operation. + * @last_raw_timestamp: Previous FFT report's raw timestamp. In case of 160MHz + * it will be primary 80 segment's timestamp as both primary & secondary + * segment's timestamps are expected to be almost equal + * @timestamp_war_offset: Offset calculated based on reset_delay and + * last_raw_stamp. It will be added to raw_timestamp to get tstamp. + * @raw_timestamp: FFT timestamp reported by HW on primary segment. + * @raw_timestamp_sec80: FFT timestamp reported by HW on secondary 80 segment. + * @reset_delay: Time gap between the last spectral report before reset and the + * end of reset. + * @target_reset_count: Indicates the the number of times the target went + * through reset routine after spectral was enabled. + */ +struct target_if_samp_msg_params { + int8_t rssi; + int8_t rssi_sec80; + int8_t lower_rssi; + int8_t upper_rssi; + int8_t chain_ctl_rssi[HOST_MAX_ANTENNA]; + int8_t chain_ext_rssi[HOST_MAX_ANTENNA]; + uint16_t bwinfo; + uint16_t datalen; + uint16_t datalen_sec80; + uint32_t tstamp; + uint32_t last_tstamp; + uint16_t max_mag; + uint16_t max_mag_sec80; + uint16_t max_index; + uint16_t max_index_sec80; + uint8_t max_exp; + int peak; + int pwr_count; + int pwr_count_sec80; + int8_t nb_lower; + int8_t nb_upper; + uint16_t max_lower_index; + uint16_t max_upper_index; + uint8_t *bin_pwr_data; + uint8_t *bin_pwr_data_sec80; + uint16_t freq; + uint16_t vhtop_ch_freq_seg1; + uint16_t vhtop_ch_freq_seg2; + uint16_t agile_freq; + uint16_t freq_loading; + int16_t noise_floor; + int16_t noise_floor_sec80; + struct interf_src_rsp interf_list; + struct spectral_classifier_params classifier_params; + struct ath_softc *sc; + uint8_t agc_total_gain; + uint8_t agc_total_gain_sec80; + uint8_t gainchange; + uint8_t gainchange_sec80; + enum spectral_scan_mode smode; + uint8_t pri80ind; + uint8_t pri80ind_sec80; + uint32_t last_raw_timestamp; + uint32_t timestamp_war_offset; + uint32_t raw_timestamp; + uint32_t raw_timestamp_sec80; + uint32_t reset_delay; + uint32_t target_reset_count; +}; + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * target_if_spectral_dump_fft() - Dump Spectral FFT + * @pfft: Pointer to Spectral Phyerr FFT + * @fftlen: FFT length + * + * Return: Success or failure + */ +int target_if_spectral_dump_fft(uint8_t *pfft, int fftlen); + +/** + * target_if_dbg_print_samp_param() - Print contents of SAMP struct + * @p: Pointer to SAMP message + * + * Return: Void + */ +void target_if_dbg_print_samp_param(struct target_if_samp_msg_params *p); + +/** + * target_if_get_offset_swar_sec80() - Get offset for SWAR according to + * the channel width + * @channel_width: Channel width + * + * Return: Offset for SWAR + */ +uint32_t target_if_get_offset_swar_sec80(uint32_t channel_width); + +/** + * target_if_sptrl_register_tx_ops() - Register Spectral target_if Tx Ops + * @tx_ops: Tx Ops + * + * Return: void + */ +void target_if_sptrl_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_spectral_create_samp_msg() - Create the spectral samp message + * @spectral : Pointer to spectral internal structure + * @params : spectral samp message parameters + * + * API to create the spectral samp message + * + * Return: void + */ +void target_if_spectral_create_samp_msg( + struct target_if_spectral *spectral, + struct target_if_samp_msg_params *params); + +/** + * target_if_spectral_process_phyerr_gen3() - Process phyerror event for gen3 + * @pdev: Pointer to pdev object + * @payload: Pointer to spectral report + * + * Process phyerror event for gen3 + * + * Return: Success/Failure + */ +int target_if_spectral_process_report_gen3( + struct wlan_objmgr_pdev *pdev, + void *buf); + +/** + * target_if_process_phyerr_gen2() - Process PHY Error for gen2 + * @spectral: Pointer to Spectral object + * @data: Pointer to phyerror event buffer + * @datalen: Data length + * @p_rfqual: RF quality info + * @p_chaninfo: Channel info + * @tsf64: 64 bit tsf timestamp + * @acs_stats: ACS stats + * + * Process PHY Error for gen2 + * + * Return: Success/Failure + */ +int target_if_process_phyerr_gen2( + struct target_if_spectral *spectral, + uint8_t *data, + uint32_t datalen, struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats); + +/** + * target_if_spectral_send_intf_found_msg() - Indicate to application layer that + * interference has been found + * @pdev: Pointer to pdev + * @cw_int: 1 if CW interference is found, 0 if WLAN interference is found + * @dcs_enabled: 1 if DCS is enabled, 0 if DCS is disabled + * + * Send message to application layer + * indicating that interference has been found + * + * Return: None + */ +void target_if_spectral_send_intf_found_msg( + struct wlan_objmgr_pdev *pdev, + uint16_t cw_int, uint32_t dcs_enabled); + +/** + * target_if_stop_spectral_scan() - Stop spectral scan + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * @err: Pointer to error code + * + * API to stop the current on-going spectral scan + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS target_if_stop_spectral_scan(struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * target_if_spectral_get_vdev() - Get pointer to vdev to be used for Spectral + * operations + * @spectral: Pointer to Spectral target_if internal private data + * + * Spectral operates on pdev. However, in order to retrieve some WLAN + * properties, a vdev is required. To facilitate this, the function returns the + * first vdev in our pdev. The caller should release the reference to the vdev + * once it is done using it. + * TODO: If the framework later provides an API to obtain the first active + * vdev, then it would be preferable to use this API. + * + * Return: Pointer to vdev on success, NULL on failure + */ +struct wlan_objmgr_vdev *target_if_spectral_get_vdev( + struct target_if_spectral *spectral); + +/** + * target_if_spectral_dump_hdr_gen2() - Dump Spectral header for gen2 + * @phdr: Pointer to Spectral Phyerr Header + * + * Dump Spectral header + * + * Return: Success/Failure + */ +int target_if_spectral_dump_hdr_gen2(struct spectral_phyerr_hdr_gen2 *phdr); + +/** + * target_if_get_combrssi_sec80_seg_gen2() - Get approximate combined RSSI + * for Secondary 80 segment + * @spectral: Pointer to spectral object + * @p_sfft_sec80: Pointer to search fft info of secondary 80 segment + * + * Get approximate combined RSSI for Secondary 80 segment + * + * Return: Combined RSSI for secondary 80Mhz segment + */ +int8_t target_if_get_combrssi_sec80_seg_gen2( + struct target_if_spectral *spectral, + struct spectral_search_fft_info_gen2 *p_sfft_sec80); + +/** + * target_if_spectral_dump_tlv_gen2() - Dump Spectral TLV for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @is_160_format: Indicates 160 format + * + * Dump Spectral TLV for gen2 + * + * Return: Success/Failure + */ +int target_if_spectral_dump_tlv_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, bool is_160_format); + +/** + * target_if_spectral_dump_phyerr_data_gen2() - Dump Spectral + * related PHY Error for gen2 + * @data: Pointer to phyerror buffer + * @datalen: Data length + * @is_160_format: Indicates 160 format + * + * Dump Spectral related PHY Error for gen2 + * + * Return: Success/Failure + */ +int target_if_spectral_dump_phyerr_data_gen2( + uint8_t *data, + uint32_t datalen, + bool is_160_format); + +/** + * target_if_dbg_print_samp_msg() - Print contents of SAMP Message + * @p: Pointer to SAMP message + * + * Print contents of SAMP Message + * + * Return: Void + */ +void target_if_dbg_print_samp_msg(struct spectral_samp_msg *pmsg); + +/** + * get_target_if_spectral_handle_from_pdev() - Get handle to target_if internal + * Spectral data + * @pdev: Pointer to pdev + * + * Return: Handle to target_if internal Spectral data on success, NULL on + * failure + */ +static inline +struct target_if_spectral *get_target_if_spectral_handle_from_pdev( + struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral; + struct wlan_objmgr_psoc *psoc; + + if (!pdev) { + spectral_err("pdev is null"); + return NULL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is null"); + return NULL; + } + + spectral = (struct target_if_spectral *) + psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_get_target_handle( + pdev); + return spectral; +} + +/** + * target_if_vdev_get_chan_freq() - Get the operating channel frequency of a + * given vdev + * @pdev: Pointer to vdev + * + * Get the operating channel frequency of a given vdev + * + * Return: Operating channel frequency of a vdev + */ +static inline +int16_t target_if_vdev_get_chan_freq(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return -EINVAL; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_vdev_get_chan_freq( + vdev); +} + +/** + * target_if_vdev_get_chan_freq_seg2() - Get center frequency of secondary 80 of + * given vdev + * @vdev: Pointer to vdev + * + * Get the center frequency of secondary 80 of given vdev + * + * Return: center frequency of secondary 80 + */ +static inline +int16_t target_if_vdev_get_chan_freq_seg2(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return -EINVAL; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_vdev_get_chan_freq_seg2( + vdev); +} + +/** + * target_if_vdev_get_ch_width() - Get the operating channel bandwidth of a + * given vdev + * @pdev: Pointer to vdev + * + * Get the operating channel bandwidth of a given vdev + * + * Return: channel bandwidth enumeration corresponding to the vdev + */ +static inline +enum phy_ch_width target_if_vdev_get_ch_width(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return CH_WIDTH_INVALID; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_vdev_get_ch_width( + vdev); +} + +/** + * target_if_vdev_get_sec20chan_freq_mhz() - Get the frequency of secondary + * 20 MHz channel for a given vdev + * @pdev: Pointer to vdev + * + * Get the frequency of secondary 20Mhz channel for a given vdev + * + * Return: Frequency of secondary 20Mhz channel for a given vdev + */ +static inline +int target_if_vdev_get_sec20chan_freq_mhz( + struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return -EINVAL; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops. + sptrlro_vdev_get_sec20chan_freq_mhz(vdev, sec20chan_freq); +} + +/** + * target_if_spectral_set_rxchainmask() - Set Spectral Rx chainmask + * @pdev: Pointer to pdev + * @spectral_rx_chainmask: Spectral Rx chainmask + * + * Return: None + */ +static inline +void target_if_spectral_set_rxchainmask(struct wlan_objmgr_pdev *pdev, + uint8_t spectral_rx_chainmask) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct target_if_spectral *spectral = NULL; + enum spectral_scan_mode smode = SPECTRAL_SCAN_MODE_NORMAL; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return; + } + + if (smode >= SPECTRAL_SCAN_MODE_MAX) { + spectral_err("Invalid Spectral mode %u", smode); + return; + } + + if (psoc->soc_cb.rx_ops.sptrl_rx_ops. + sptrlro_spectral_is_feature_disabled(psoc)) { + spectral_info("Spectral is disabled"); + return; + } + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectral target if object is null"); + return; + } + + /* set chainmask for all the modes */ + for (; smode < SPECTRAL_SCAN_MODE_MAX; smode++) + spectral->params[smode].ss_chn_mask = spectral_rx_chainmask; +} + +/** + * target_if_spectral_process_phyerr() - Process Spectral PHY error + * @pdev: Pointer to pdev + * @data: PHY error data received from FW + * @datalen: Length of data + * @p_rfqual: Pointer to RF Quality information + * @p_chaninfo: Pointer to channel information + * @tsf: TSF time instance at which the Spectral sample was received + * @acs_stats: ACS stats + * + * Process Spectral PHY error by extracting necessary information from the data + * sent by FW, and send the extracted information to application layer. + * + * Return: None + */ +static inline +void target_if_spectral_process_phyerr( + struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectral target if object is null"); + return; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + if (!p_sops->spectral_process_phyerr) { + spectral_err("null spectral_process_phyerr"); + return; + } + p_sops->spectral_process_phyerr(spectral, data, datalen, + p_rfqual, p_chaninfo, + tsf64, acs_stats); +} + +static QDF_STATUS +target_if_get_spectral_msg_type(enum spectral_scan_mode smode, + enum spectral_msg_type *msg_type) { + + switch (smode) { + case SPECTRAL_SCAN_MODE_NORMAL: + *msg_type = SPECTRAL_MSG_NORMAL_MODE; + break; + + case SPECTRAL_SCAN_MODE_AGILE: + *msg_type = SPECTRAL_MSG_AGILE_MODE; + break; + + default: + spectral_err("Invalid spectral mode"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * init_160mhz_delivery_state_machine() - Initialize 160MHz Spectral + * state machine + * @spectral: Pointer to Spectral + * + * Initialize 160MHz Spectral state machine + * + * Return: void + */ +static inline void +init_160mhz_delivery_state_machine(struct target_if_spectral *spectral) +{ + spectral->state_160mhz_delivery = + SPECTRAL_REPORT_WAIT_PRIMARY80; +} + +/** + * reset_160mhz_delivery_state_machine() - Reset 160MHz Spectral state machine + * @spectral: Pointer to Spectral + * + * Reset 160MHz Spectral state machine + * + * Return: void + */ +static inline void +reset_160mhz_delivery_state_machine(struct target_if_spectral *spectral, + enum spectral_scan_mode smode) +{ + enum spectral_msg_type smsg_type; + QDF_STATUS ret; + + if (spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == CH_WIDTH_160MHZ) { + spectral->state_160mhz_delivery = + SPECTRAL_REPORT_WAIT_PRIMARY80; + + ret = target_if_get_spectral_msg_type(smode, &smsg_type); + if (QDF_IS_STATUS_ERROR(ret)) { + spectral_err("Failed to reset 160 MHz state machine"); + return; + } + + spectral->nl_cb.free_sbuff(spectral->pdev_obj, + smsg_type); + } +} + +/** + * is_secondaryseg_expected() - Is waiting for secondary 80 report + * @spectral: Pointer to Spectral + * + * Return true if secondary 80 report expected and mode is 160 MHz + * + * Return: true or false + */ +static inline +bool is_secondaryseg_expected(struct target_if_spectral *spectral) +{ + return + ((spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == CH_WIDTH_160MHZ) && + (spectral->state_160mhz_delivery == SPECTRAL_REPORT_WAIT_SECONDARY80)); +} + +/** + * is_primaryseg_expected() - Is waiting for primary 80 report + * @spectral: Pointer to Spectral + * + * Return true if mode is 160 Mhz and primary 80 report expected or + * mode is not 160 Mhz + * + * Return: true or false + */ +static inline +bool is_primaryseg_expected(struct target_if_spectral *spectral) +{ + return + ((spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] != CH_WIDTH_160MHZ) || + ((spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == CH_WIDTH_160MHZ) && + (spectral->state_160mhz_delivery == SPECTRAL_REPORT_WAIT_PRIMARY80))); +} + +/** + * is_primaryseg_rx_inprog() - Is primary 80 report processing is in progress + * @spectral: Pointer to Spectral + * + * Is primary 80 report processing is in progress + * + * Return: true or false + */ +static inline +bool is_primaryseg_rx_inprog(struct target_if_spectral *spectral) +{ + return + ((spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] != CH_WIDTH_160MHZ) || + ((spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == CH_WIDTH_160MHZ) && + ((spectral->spectral_gen == SPECTRAL_GEN2) || + ((spectral->spectral_gen == SPECTRAL_GEN3) && + (spectral->state_160mhz_delivery == SPECTRAL_REPORT_RX_PRIMARY80))))); +} + +/** + * is_secondaryseg_rx_inprog() - Is secondary80 report processing is in progress + * @spectral: Pointer to Spectral + * + * Is secondary 80 report processing is in progress + * + * Return: true or false + */ +static inline +bool is_secondaryseg_rx_inprog(struct target_if_spectral *spectral) +{ + return + ((spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == CH_WIDTH_160MHZ) && + ((spectral->spectral_gen == SPECTRAL_GEN2) || + ((spectral->spectral_gen == SPECTRAL_GEN3) && + (spectral->state_160mhz_delivery == SPECTRAL_REPORT_RX_SECONDARY80)))); +} + +/** + * target_if_160mhz_delivery_state_change() - State transition for 160Mhz + * Spectral + * @spectral: Pointer to spectral object + * @detector_id: Detector id + * + * Move the states of state machine for 160MHz spectral scan report receive + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_160mhz_delivery_state_change(struct target_if_spectral *spectral, + uint8_t detector_id); + +/** + * target_if_sops_is_spectral_enabled() - Get whether Spectral is enabled + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * + * Function to check whether Spectral is enabled + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +uint32_t target_if_sops_is_spectral_enabled(void *arg, + enum spectral_scan_mode smode); + +/** + * target_if_sops_is_spectral_active() - Get whether Spectral is active + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * + * Function to check whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +uint32_t target_if_sops_is_spectral_active(void *arg, + enum spectral_scan_mode smode); + +/** + * target_if_sops_start_spectral_scan() - Start Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * @err: Pointer to error code + * + * Function to start spectral scan + * + * Return: 0 on success else failure + */ +uint32_t target_if_sops_start_spectral_scan(void *arg, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * target_if_sops_stop_spectral_scan() - Stop Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * @smode: Spectral scan mode + * + * Function to stop spectral scan + * + * Return: 0 in case of success, -1 on failure + */ +uint32_t target_if_sops_stop_spectral_scan(void *arg, + enum spectral_scan_mode smode); + +/** + * target_if_spectral_get_extension_channel() - Get the current Extension + * channel (in MHz) + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Return: Current Extension channel (in MHz) on success, 0 on failure or if + * extension channel is not present. + */ +uint32_t target_if_spectral_get_extension_channel(void *arg); + +/** + * target_if_spectral_get_current_channel() - Get the current channel (in MHz) + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Return: Current channel (in MHz) on success, 0 on failure + */ +uint32_t target_if_spectral_get_current_channel(void *arg); + + +/** + * target_if_spectral_reset_hw() - Reset the hardware + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t target_if_spectral_reset_hw(void *arg); + +/** + * target_if_spectral_get_chain_noise_floor() - Get the Chain noise floor from + * Noisefloor history buffer + * @arg: Pointer to handle for Spectral target_if internal private data + * @nf_buf: Pointer to buffer into which chain Noise Floor data should be copied + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t target_if_spectral_get_chain_noise_floor(void *arg, int16_t *nf_buf); + +/** + * target_if_spectral_get_ext_noisefloor() - Get the extension channel + * noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t target_if_spectral_get_ext_noisefloor(void *arg); + +/** + * target_if_spectral_get_ctl_noisefloor() - Get the control channel noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t target_if_spectral_get_ctl_noisefloor(void *arg); + +/** + * target_if_spectral_get_capability() - Get whether a given Spectral hardware + * capability is available + * @arg: Pointer to handle for Spectral target_if internal private data + * @type: Spectral hardware capability type + * + * Return: True if the capability is available, false if the capability is not + * available + */ +uint32_t target_if_spectral_get_capability( + void *arg, enum spectral_capability_type type); + +/** + * target_if_spectral_set_rxfilter() - Set the RX Filter before Spectral start + * @arg: Pointer to handle for Spectral target_if internal private data + * @rxfilter: Rx filter to be used + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t target_if_spectral_set_rxfilter(void *arg, int rxfilter); + +/** + * target_if_spectral_sops_configure_params() - Configure user supplied Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Spectral parameters + * @smode: Spectral scan mode + * + * Return: 0 in case of success, -1 on failure + */ +uint32_t target_if_spectral_sops_configure_params( + void *arg, struct spectral_config *params, + enum spectral_scan_mode smode); + +/** + * target_if_spectral_get_rxfilter() - Get the current RX Filter settings + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t target_if_spectral_get_rxfilter(void *arg); + +/** + * target_if_pdev_spectral_deinit() - De-initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Return: None + */ +void target_if_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_set_spectral_config() - Set spectral config + * @pdev: Pointer to pdev object + * @threshtype: config type + * @value: config value + * @smode: Spectral scan mode + * @err: Pointer to Spectral error code + * + * API to set spectral configurations + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS target_if_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const uint32_t threshtype, + const uint32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * target_if_pdev_spectral_init() - Initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Return: On success, pointer to Spectral target_if internal private data, on + * failure, NULL + */ +void *target_if_pdev_spectral_init(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_spectral_sops_get_params() - Get user configured Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Pointer to buffer into which Spectral parameters should be copied + * @smode: Spectral scan mode + * + * Return: 0 in case of success, -1 on failure + */ +uint32_t target_if_spectral_sops_get_params( + void *arg, struct spectral_config *params, + enum spectral_scan_mode smode); + +/** + * target_if_init_spectral_capability() - Initialize Spectral capability + * @spectral: Pointer to Spectral target_if internal private data + * + * This is a workaround. + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_init_spectral_capability(struct target_if_spectral *spectral); + +/** + * target_if_start_spectral_scan() - Start spectral scan + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * @err: Spectral error code + * + * API to start spectral scan + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS target_if_start_spectral_scan(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * target_if_get_spectral_config() - Get spectral configuration + * @pdev: Pointer to pdev object + * @param: Pointer to spectral_config structure in which the configuration + * should be returned + * @smode: Spectral scan mode + * + * API to get the current spectral configuration + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS target_if_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *param, + enum spectral_scan_mode smode); + +/** + * target_if_spectral_scan_enable_params() - Enable use of desired Spectral + * parameters + * @spectral: Pointer to Spectral target_if internal private data + * @spectral_params: Pointer to Spectral parameters + * @smode: Spectral scan mode + * @err: Spectral error code + * + * Enable use of desired Spectral parameters by configuring them into HW, and + * starting Spectral scan + * + * Return: 0 on success, 1 on failure + */ +int target_if_spectral_scan_enable_params( + struct target_if_spectral *spectral, + struct spectral_config *spectral_params, + enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + +/** + * target_if_is_spectral_active() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool target_if_is_spectral_active(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + +/** + * target_if_is_spectral_enabled() - Get whether Spectral is enabled + * @pdev: Pointer to pdev object + * @smode: Spectral scan mode + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +bool target_if_is_spectral_enabled(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + +/** + * target_if_set_debug_level() - Set debug level for Spectral + * @pdev: Pointer to pdev object + * @debug_level: Debug level + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + * + */ +QDF_STATUS target_if_set_debug_level(struct wlan_objmgr_pdev *pdev, + uint32_t debug_level); + +/** + * target_if_get_debug_level() - Get debug level for Spectral + * @pdev: Pointer to pdev object + * + * Return: Current debug level + */ +uint32_t target_if_get_debug_level(struct wlan_objmgr_pdev *pdev); + + +/** + * target_if_get_spectral_capinfo() - Get Spectral capability information + * @pdev: Pointer to pdev object + * @scaps: Buffer into which data should be copied + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS target_if_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, + struct spectral_caps *scaps); + + +/** + * target_if_get_spectral_diagstats() - Get Spectral diagnostic statistics + * @pdev: Pointer to pdev object + * @stats: Buffer into which data should be copied + * + * Return: QDF_STATUS_SUCCESS in case of success, else QDF_STATUS_E_FAILURE + */ +QDF_STATUS target_if_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *stats); + +void target_if_register_wmi_spectral_cmd_ops( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + +QDF_STATUS +target_if_160mhz_delivery_state_change(struct target_if_spectral *spectral, + uint8_t detector_id); +#ifdef DIRECT_BUF_RX_ENABLE +/** + * target_if_consume_sfft_report_gen3() - Process fft report for gen3 + * @spectral: Pointer to spectral object + * @report: Pointer to spectral report + * + * Process fft report for gen3 + * + * Return: Success/Failure + */ +int +target_if_consume_spectral_report_gen3( + struct target_if_spectral *spectral, + struct spectral_report *report); +#endif + +/** + * target_if_spectral_fw_hang() - Crash the FW from Spectral module + * @spectral: Pointer to Spectral LMAC object + * + * Return: QDF_STATUS of operation + */ +QDF_STATUS target_if_spectral_fw_hang(struct target_if_spectral *spectral); + +#ifdef WIN32 +#pragma pack(pop, target_if_spectral) +#endif +#ifdef __ATTRIB_PACK +#undef __ATTRIB_PACK +#endif + +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ +#endif /* _TARGET_IF_SPECTRAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_netlink.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_netlink.c new file mode 100644 index 0000000000000000000000000000000000000000..a2be35823b39648447730db5eebeed8946ecfc95 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_netlink.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2011,2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void +target_if_spectral_create_samp_msg(struct target_if_spectral *spectral, + struct target_if_samp_msg_params *params) +{ + /* + * XXX : Non-Rentrant. Will be an issue with dual concurrent + * operation on multi-processor system + */ + + struct spectral_samp_msg *spec_samp_msg = NULL; + + uint8_t *bin_pwr_data = NULL; + struct spectral_classifier_params *cp = NULL; + struct spectral_classifier_params *pcp = NULL; + struct target_if_spectral_ops *p_sops = NULL; + uint32_t *binptr_32 = NULL; + uint16_t *binptr_16 = NULL; + int idx = 0; + struct spectral_samp_data *samp_data; + static int samp_msg_index; + size_t pwr_count = 0; + size_t pwr_count_sec80 = 0; + enum spectral_msg_type msg_type; + QDF_STATUS ret; + struct spectral_fft_bin_len_adj_swar *swar = &spectral->len_adj_swar; + + ret = target_if_get_spectral_msg_type(params->smode, &msg_type); + if (QDF_IS_STATUS_ERROR(ret)) + return; + + if ((params->smode == SPECTRAL_SCAN_MODE_AGILE) || + is_primaryseg_rx_inprog(spectral)) { + spec_samp_msg = (struct spectral_samp_msg *) + spectral->nl_cb.get_sbuff(spectral->pdev_obj, + msg_type, + SPECTRAL_MSG_BUF_NEW); + + if (!spec_samp_msg) + return; + + samp_data = &spec_samp_msg->samp_data; + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + bin_pwr_data = params->bin_pwr_data; + + spec_samp_msg->signature = SPECTRAL_SIGNATURE; + spec_samp_msg->freq = params->freq; + if (params->smode == SPECTRAL_SCAN_MODE_AGILE) + spec_samp_msg->agile_freq = params->agile_freq; + spec_samp_msg->freq_loading = params->freq_loading; + samp_data->spectral_mode = params->smode; + samp_data->spectral_data_len = params->datalen; + samp_data->spectral_rssi = params->rssi; + samp_data->ch_width = + spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL]; + samp_data->agile_ch_width = + spectral->ch_width[SPECTRAL_SCAN_MODE_AGILE]; + samp_data->spectral_agc_total_gain = params->agc_total_gain; + samp_data->spectral_gainchange = params->gainchange; + samp_data->spectral_pri80ind = params->pri80ind; + samp_data->last_raw_timestamp = params->last_raw_timestamp; + samp_data->timestamp_war_offset = params->timestamp_war_offset; + samp_data->raw_timestamp = params->raw_timestamp; + samp_data->reset_delay = params->reset_delay; + samp_data->target_reset_count = params->target_reset_count; + + samp_data->spectral_combined_rssi = + (uint8_t)params->rssi; + samp_data->spectral_upper_rssi = params->upper_rssi; + samp_data->spectral_lower_rssi = params->lower_rssi; + + qdf_mem_copy(samp_data->spectral_chain_ctl_rssi, + params->chain_ctl_rssi, + sizeof(params->chain_ctl_rssi)); + qdf_mem_copy(samp_data->spectral_chain_ext_rssi, + params->chain_ext_rssi, + sizeof(params->chain_ext_rssi)); + + samp_data->spectral_bwinfo = params->bwinfo; + samp_data->spectral_tstamp = params->tstamp; + samp_data->spectral_max_index = params->max_index; + + /* Classifier in user space needs access to these */ + samp_data->spectral_lower_max_index = + params->max_lower_index; + samp_data->spectral_upper_max_index = + params->max_upper_index; + samp_data->spectral_nb_lower = params->nb_lower; + samp_data->spectral_nb_upper = params->nb_upper; + samp_data->spectral_last_tstamp = params->last_tstamp; + samp_data->spectral_max_mag = params->max_mag; + + /* + * Currently, we compute pwr_count considering the size of the + * samp_data->bin_pwr array rather than the number of elements + * in this array. The reasons are that + * SPECTRAL_MESSAGE_COPY_CHAR_ARRAY() where pwr_count will be + * used maps directly to OS_MEMCPY() on little endian platforms, + * and that samp_data->bin_pwr is an array of u_int8_t elements + * due to which the number of elements in the array == the size + * of the array. In case FFT bin size is increased from 8 bits + * in the future, this code would have to be changed along with + * rest of framework on which it depends. + */ + pwr_count = qdf_min((size_t)params->pwr_count, + sizeof(samp_data->bin_pwr)); + + samp_data->bin_pwr_count = pwr_count; + samp_data->lb_edge_extrabins = + spectral->lb_edge_extrabins; + samp_data->rb_edge_extrabins = + spectral->rb_edge_extrabins; + samp_data->spectral_combined_rssi = params->rssi; + samp_data->spectral_max_scale = params->max_exp; + + samp_data->noise_floor = params->noise_floor; + + /* Classifier in user space needs access to these */ + cp = &samp_data->classifier_params; + pcp = ¶ms->classifier_params; + + qdf_mem_copy(cp, pcp, + sizeof(struct spectral_classifier_params)); + + if (swar->fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE) { + binptr_32 = (uint32_t *)bin_pwr_data; + for (idx = 0; idx < pwr_count; idx++) + samp_data->bin_pwr[idx] = *(binptr_32++); + } else if (swar->fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE) { + binptr_16 = (uint16_t *)bin_pwr_data; + for (idx = 0; idx < pwr_count; idx++) + samp_data->bin_pwr[idx] = *(binptr_16++); + } else { + SPECTRAL_MESSAGE_COPY_CHAR_ARRAY( + &samp_data->bin_pwr[0], bin_pwr_data, + pwr_count); + } + + p_sops->get_mac_address(spectral, spec_samp_msg->macaddr); + } + + if (is_secondaryseg_rx_inprog(spectral)) { + spec_samp_msg = (struct spectral_samp_msg *) + spectral->nl_cb.get_sbuff(spectral->pdev_obj, + msg_type, + SPECTRAL_MSG_BUF_SAVED); + + if (!spec_samp_msg) { + spectral_err("Spectral SAMP message is NULL"); + return; + } + + samp_data = &spec_samp_msg->samp_data; + spec_samp_msg->vhtop_ch_freq_seg1 = params->vhtop_ch_freq_seg1; + spec_samp_msg->vhtop_ch_freq_seg2 = params->vhtop_ch_freq_seg2; + samp_data->spectral_rssi_sec80 = + params->rssi_sec80; + samp_data->noise_floor_sec80 = + params->noise_floor_sec80; + spec_samp_msg->samp_data.spectral_agc_total_gain_sec80 = + params->agc_total_gain_sec80; + spec_samp_msg->samp_data.spectral_gainchange_sec80 = + params->gainchange_sec80; + spec_samp_msg->samp_data.spectral_pri80ind_sec80 = + params->pri80ind_sec80; + + samp_data->spectral_data_len_sec80 = + params->datalen_sec80; + samp_data->spectral_max_index_sec80 = + params->max_index_sec80; + samp_data->spectral_max_mag_sec80 = + params->max_mag_sec80; + + samp_data->raw_timestamp_sec80 = params->raw_timestamp_sec80; + + /* + * Currently, we compute pwr_count_sec80 considering the size of + * the samp_data->bin_pwr_sec80 array rather than the number of + * elements in this array. The reasons are that + * SPECTRAL_MESSAGE_COPY_CHAR_ARRAY() where pwr_count_sec80 will + * be used maps directly to OS_MEMCPY() on little endian + * platforms, and that samp_data->bin_pwr_sec80 is an array of + * u_int8_t elements due to which the number of elements in the + * array == the size of the array. In case FFT bin size is + * increased from 8 bits in the future, this code would have to + * be changed along with rest of framework on which it depends. + */ + pwr_count_sec80 = qdf_min((size_t)params->pwr_count_sec80, + sizeof(samp_data->bin_pwr_sec80)); + + samp_data->bin_pwr_count_sec80 = pwr_count_sec80; + + bin_pwr_data = params->bin_pwr_data_sec80; + if (swar->fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE) { + binptr_32 = (uint32_t *)bin_pwr_data; + for (idx = 0; idx < pwr_count_sec80; idx++) + samp_data->bin_pwr_sec80[idx] = *(binptr_32++); + } else if (swar->fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE) { + binptr_16 = (uint16_t *)bin_pwr_data; + for (idx = 0; idx < pwr_count_sec80; idx++) + samp_data->bin_pwr_sec80[idx] = *(binptr_16++); + } else { + SPECTRAL_MESSAGE_COPY_CHAR_ARRAY( + &samp_data->bin_pwr_sec80[0], + params->bin_pwr_data_sec80, + pwr_count_sec80); + } + } + + if (spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] != CH_WIDTH_160MHZ || + (params->smode == SPECTRAL_SCAN_MODE_AGILE) || + is_secondaryseg_rx_inprog(spectral)) { + if (spectral->send_phy_data(spectral->pdev_obj, + msg_type) == 0) + spectral->spectral_sent_msg++; + samp_msg_index++; + } + + /* Take care of state transitions for 160MHz/ 80p80 */ + if ((spectral->spectral_gen == SPECTRAL_GEN3) && + (params->smode != SPECTRAL_SCAN_MODE_AGILE)) + target_if_160mhz_delivery_state_change( + spectral, + SPECTRAL_DETECTOR_INVALID); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_phyerr.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_phyerr.c new file mode 100644 index 0000000000000000000000000000000000000000..e235149110cbf8252134aedbf155322cdfdc7cf4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_phyerr.c @@ -0,0 +1,2155 @@ +/* + * Copyright (c) 2011,2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DIRECT_BUF_RX_ENABLE +#include +#endif +extern int spectral_debug_level; + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + +#define SPECTRAL_HEXDUMP_OCTET_PRINT_SIZE (3) +#define SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE (16) +#define SPECTRAL_HEXDUMP_EXTRA_BUFFER_PER_LINE (16) + +/* + * Provision for the expected hexdump line size as follows: + * + * Size per octet multiplied by number of octets per line + * + + * ASCII representation which is equivalent in print size to number of octets + * per line + * + + * Some extra buffer + */ +#define SPECTRAL_HEXDUMP_LINESIZE \ + ((SPECTRAL_HEXDUMP_OCTET_PRINT_SIZE * \ + SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE) + \ + SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE + \ + SPECTRAL_HEXDUMP_EXTRA_BUFFER_PER_LINE) + +/** + * target_if_spectral_hexdump() - Print hexdump of the given buffer + * @_buf: Pointer to buffer + * @_len: Length of the buffer + * + * Print the hexdump of buffer upto given length. Print upto + * SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE per line, followed by the ASCII + * representation of these octets. + */ +static inline void target_if_spectral_hexdump(unsigned char *_buf, int _len) +{ + int i, mod; + unsigned char ascii[SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE + 1]; + unsigned char *pc = (_buf); + char hexdump_line[SPECTRAL_HEXDUMP_LINESIZE + 1]; + int loc = 0; + + qdf_mem_zero(hexdump_line, sizeof(hexdump_line)); + + if (_len <= 0) { + spectral_err("buffer len is %d, too short", _len); + return; + } + + for (i = 0; i < _len; i++) { + mod = i % SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE; + + if (!mod) { + if (i) { + qdf_assert_always(loc < sizeof(hexdump_line)); + loc += snprintf(&hexdump_line[loc], + sizeof(hexdump_line) - loc, + " %s", ascii); + spectral_debug("%s", hexdump_line); + qdf_mem_zero(hexdump_line, + sizeof(hexdump_line)); + loc = 0; + } + } + + qdf_assert_always(loc < sizeof(hexdump_line)); + loc += snprintf(&hexdump_line[loc], sizeof(hexdump_line) - loc, + " %02x", pc[i]); + + if ((pc[i] < 0x20) || (pc[i] > 0x7e)) + ascii[mod] = '.'; + else + ascii[mod] = pc[i]; + ascii[(mod) + 1] = '\0'; + } + + while ((i % SPECTRAL_HEXDUMP_NUM_OCTETS_PER_LINE) != 0) { + qdf_assert_always(loc < sizeof(hexdump_line)); + loc += snprintf(&hexdump_line[loc], sizeof(hexdump_line) - loc, + " "); + i++; + } + + qdf_assert_always(loc < sizeof(hexdump_line)); + snprintf(&hexdump_line[loc], sizeof(hexdump_line) - loc, " %s", ascii); + spectral_debug("%s", hexdump_line); +} + +/** + * target_if_print_buf() - Prints given buffer for given length + * @pbuf: Pointer to buffer + * @len: length + * + * Prints given buffer for given length + * + * Return: void + */ +static void +target_if_print_buf(uint8_t *pbuf, int len) +{ + int i = 0; + + for (i = 0; i < len; i++) { + spectral_debug("%02X ", pbuf[i]); + if (i % 32 == 31) + spectral_debug("\n"); + } +} + +int +target_if_spectral_dump_fft(uint8_t *pfft, int fftlen) +{ + int i = 0; + + /* + * TODO : Do not delete the following print + * The scripts used to validate Spectral depend on this Print + */ + spectral_debug("SPECTRAL : FFT Length is 0x%x (%d)", fftlen, fftlen); + + spectral_debug("fft_data # "); + for (i = 0; i < fftlen; i++) + spectral_debug("%d ", pfft[i]); + spectral_debug("\n"); + return 0; +} + +QDF_STATUS target_if_spectral_fw_hang(struct target_if_spectral *spectral) +{ + struct crash_inject param; + + if (!spectral) { + spectral_err("Spectral LMAC object is null"); + return QDF_STATUS_E_INVAL; + } + qdf_mem_set(¶m, sizeof(param), 0); + param.type = 1; //RECOVERY_SIM_ASSERT + + return spectral->param_wmi_cmd_ops.wmi_spectral_crash_inject( + GET_WMI_HDL_FROM_PDEV(spectral->pdev_obj), ¶m); +} + +void +target_if_dbg_print_samp_param(struct target_if_samp_msg_params *p) +{ + spectral_debug("\nSAMP Packet : -------------------- START --------------------"); + spectral_debug("Freq = %d", p->freq); + spectral_debug("RSSI = %d", p->rssi); + spectral_debug("Bin Count = %d", p->pwr_count); + spectral_debug("Timestamp = %d", p->tstamp); + spectral_debug("SAMP Packet : -------------------- END -----------------------"); +} + +void +target_if_dbg_print_samp_msg(struct spectral_samp_msg *ss_msg) +{ + int i = 0; + + struct spectral_samp_data *p = &ss_msg->samp_data; + struct spectral_classifier_params *pc = &p->classifier_params; + struct interf_src_rsp *pi = &p->interf_list; + + spectral_dbg_line(); + spectral_debug("Spectral Message"); + spectral_dbg_line(); + spectral_debug("Signature : 0x%x", ss_msg->signature); + spectral_debug("Freq : %d", ss_msg->freq); + spectral_debug("Freq load : %d", ss_msg->freq_loading); + spectral_debug("Intfnc type : %d", ss_msg->int_type); + spectral_dbg_line(); + spectral_debug("Spectral Data info"); + spectral_dbg_line(); + spectral_debug("data length : %d", p->spectral_data_len); + spectral_debug("rssi : %d", p->spectral_rssi); + spectral_debug("combined rssi : %d", p->spectral_combined_rssi); + spectral_debug("upper rssi : %d", p->spectral_upper_rssi); + spectral_debug("lower rssi : %d", p->spectral_lower_rssi); + spectral_debug("bw info : %d", p->spectral_bwinfo); + spectral_debug("timestamp : %d", p->spectral_tstamp); + spectral_debug("max index : %d", p->spectral_max_index); + spectral_debug("max exp : %d", p->spectral_max_exp); + spectral_debug("max mag : %d", p->spectral_max_mag); + spectral_debug("last timstamp : %d", p->spectral_last_tstamp); + spectral_debug("upper max idx : %d", p->spectral_upper_max_index); + spectral_debug("lower max idx : %d", p->spectral_lower_max_index); + spectral_debug("bin power count : %d", p->bin_pwr_count); + spectral_dbg_line(); + spectral_debug("Classifier info"); + spectral_dbg_line(); + spectral_debug("20/40 Mode : %d", pc->spectral_20_40_mode); + spectral_debug("dc index : %d", pc->spectral_dc_index); + spectral_debug("dc in MHz : %d", pc->spectral_dc_in_mhz); + spectral_debug("upper channel : %d", pc->upper_chan_in_mhz); + spectral_debug("lower channel : %d", pc->lower_chan_in_mhz); + spectral_dbg_line(); + spectral_debug("Interference info"); + spectral_dbg_line(); + spectral_debug("inter count : %d", pi->count); + + for (i = 0; i < pi->count; i++) { + spectral_debug("inter type : %d", + pi->interf[i].interf_type); + spectral_debug("min freq : %d", + pi->interf[i].interf_min_freq); + spectral_debug("max freq : %d", + pi->interf[i].interf_max_freq); + } +} + +uint32_t +target_if_get_offset_swar_sec80(uint32_t channel_width) +{ + uint32_t offset = 0; + + switch (channel_width) { + case CH_WIDTH_20MHZ: + offset = OFFSET_CH_WIDTH_20; + break; + case CH_WIDTH_40MHZ: + offset = OFFSET_CH_WIDTH_40; + break; + case CH_WIDTH_80MHZ: + offset = OFFSET_CH_WIDTH_80; + break; + case CH_WIDTH_160MHZ: + offset = OFFSET_CH_WIDTH_160; + break; + default: + offset = OFFSET_CH_WIDTH_80; + break; + } + return offset; +} + +/** + * target_if_dump_summary_report_gen2() - Dump Spectral Summary Report for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * @is_160_format: Indicates whether information provided by HW is in altered + * format for 802.11ac 160/80+80 MHz support (QCA9984 onwards) + * + * Dump Spectral Summary Report for gen2 + * + * Return: Success/Failure + */ +static int +target_if_dump_summary_report_gen2(struct spectral_phyerr_tlv_gen2 *ptlv, + int tlvlen, bool is_160_format) +{ + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + + uint32_t agc_mb_gain; + uint32_t sscan_gidx; + uint32_t agc_total_gain; + uint32_t recent_rfsat; + uint32_t ob_flag; + uint32_t nb_mask; + uint32_t peak_mag; + int16_t peak_inx; + + uint32_t ss_summary_A = 0; + uint32_t ss_summary_B = 0; + uint32_t ss_summary_C = 0; + uint32_t ss_summary_D = 0; + uint32_t ss_summary_E = 0; + struct spectral_phyerr_hdr_gen2 *phdr = + (struct spectral_phyerr_hdr_gen2 *)( + (uint8_t *)ptlv + + sizeof(struct spectral_phyerr_tlv_gen2)); + + spectral_debug("SPECTRAL : SPECTRAL SUMMARY REPORT"); + + if (is_160_format) { + if (tlvlen != 20) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&ss_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&ss_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + qdf_mem_copy(&ss_summary_C, + (uint8_t *)((uint8_t *)phdr + 2 * sizeof(int)), + sizeof(int)); + qdf_mem_copy(&ss_summary_D, + (uint8_t *)((uint8_t *)phdr + 3 * sizeof(int)), + sizeof(int)); + qdf_mem_copy(&ss_summary_E, + (uint8_t *)((uint8_t *)phdr + 4 * sizeof(int)), + sizeof(int)); + + /* + * The following is adapted from MDK scripts for + * easier comparability + */ + + recent_rfsat = ((ss_summary_A >> 8) & 0x1); + sscan_gidx = (ss_summary_A & 0xff); + spectral_debug("sscan_gidx=%d, is_recent_rfsat=%d", + sscan_gidx, recent_rfsat); + + /* First segment */ + agc_mb_gain = ((ss_summary_B >> 10) & 0x7f); + agc_total_gain = (ss_summary_B & 0x3ff); + nb_mask = ((ss_summary_C >> 22) & 0xff); + ob_flag = ((ss_summary_B >> 17) & 0x1); + peak_inx = (ss_summary_C & 0xfff); + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + peak_mag = ((ss_summary_C >> 12) & 0x3ff); + + spectral_debug("agc_total_gain_segid0 = 0x%.2x, agc_mb_gain_segid0=%d", + agc_total_gain, agc_mb_gain); + spectral_debug("nb_mask_segid0 = 0x%.2x, ob_flag_segid0=%d, peak_index_segid0=%d, peak_mag_segid0=%d", + nb_mask, ob_flag, peak_inx, peak_mag); + + /* Second segment */ + agc_mb_gain = ((ss_summary_D >> 10) & 0x7f); + agc_total_gain = (ss_summary_D & 0x3ff); + nb_mask = ((ss_summary_E >> 22) & 0xff); + ob_flag = ((ss_summary_D >> 17) & 0x1); + peak_inx = (ss_summary_E & 0xfff); + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + peak_mag = ((ss_summary_E >> 12) & 0x3ff); + + spectral_debug("agc_total_gain_segid1 = 0x%.2x, agc_mb_gain_segid1=%d", + agc_total_gain, agc_mb_gain); + spectral_debug("nb_mask_segid1 = 0x%.2x, ob_flag_segid1=%d, peak_index_segid1=%d, peak_mag_segid1=%d", + nb_mask, ob_flag, peak_inx, peak_mag); + } else { + if (tlvlen != 8) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&ss_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&ss_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + + nb_mask = ((ss_summary_B >> 22) & 0xff); + ob_flag = ((ss_summary_B >> 30) & 0x1); + peak_inx = (ss_summary_B & 0xfff); + + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + + peak_mag = ((ss_summary_B >> 12) & 0x3ff); + agc_mb_gain = ((ss_summary_A >> 24) & 0x7f); + agc_total_gain = (ss_summary_A & 0x3ff); + sscan_gidx = ((ss_summary_A >> 16) & 0xff); + recent_rfsat = ((ss_summary_B >> 31) & 0x1); + + spectral_debug("nb_mask = 0x%.2x, ob_flag=%d, peak_index=%d, peak_mag=%d, agc_mb_gain=%d, agc_total_gain=%d, sscan_gidx=%d, recent_rfsat=%d", + nb_mask, ob_flag, peak_inx, peak_mag, + agc_mb_gain, agc_total_gain, sscan_gidx, + recent_rfsat); + } + + return 0; +} + +/** + * target_if_process_sfft_report_gen2() - Process Search FFT Report + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * @p_fft_info: Pointer to search fft info + * + * Dump Spectral Summary Report for gen2 + * + * Return: Success/Failure + */ +static int +target_if_process_sfft_report_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, + int tlvlen, + struct spectral_search_fft_info_gen2 *p_fft_info) +{ + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + uint32_t relpwr_db; + uint32_t num_str_bins_ib; + uint32_t base_pwr; + uint32_t total_gain_info; + + uint32_t fft_chn_idx; + int16_t peak_inx; + uint32_t avgpwr_db; + uint32_t peak_mag; + + uint32_t fft_summary_A = 0; + uint32_t fft_summary_B = 0; + uint8_t *tmp = (uint8_t *)ptlv; + struct spectral_phyerr_hdr_gen2 *phdr = + (struct spectral_phyerr_hdr_gen2 *)( + tmp + + sizeof(struct spectral_phyerr_tlv_gen2)); + + /* Relook this */ + if (tlvlen < 8) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&fft_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&fft_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + + relpwr_db = ((fft_summary_B >> 26) & 0x3f); + num_str_bins_ib = fft_summary_B & 0xff; + base_pwr = ((fft_summary_A >> 14) & 0x1ff); + total_gain_info = ((fft_summary_A >> 23) & 0x1ff); + + fft_chn_idx = ((fft_summary_A >> 12) & 0x3); + peak_inx = fft_summary_A & 0xfff; + + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + + avgpwr_db = ((fft_summary_B >> 18) & 0xff); + peak_mag = ((fft_summary_B >> 8) & 0x3ff); + + /* Populate the Search FFT Info */ + if (p_fft_info) { + p_fft_info->relpwr_db = relpwr_db; + p_fft_info->num_str_bins_ib = num_str_bins_ib; + p_fft_info->base_pwr = base_pwr; + p_fft_info->total_gain_info = total_gain_info; + p_fft_info->fft_chn_idx = fft_chn_idx; + p_fft_info->peak_inx = peak_inx; + p_fft_info->avgpwr_db = avgpwr_db; + p_fft_info->peak_mag = peak_mag; + } + + return 0; +} + +/** + * target_if_dump_adc_report_gen2() - Dump ADC Reports for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * + * Dump ADC Reports for gen2 + * + * Return: Success/Failure + */ +static int +target_if_dump_adc_report_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, int tlvlen) +{ + int i; + uint32_t *pdata; + uint32_t data; + + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + uint32_t samp_fmt; + uint32_t chn_idx; + uint32_t recent_rfsat; + uint32_t agc_mb_gain; + uint32_t agc_total_gain; + + uint32_t adc_summary = 0; + + uint8_t *ptmp = (uint8_t *)ptlv; + + spectral_debug("SPECTRAL : ADC REPORT"); + + /* Relook this */ + if (tlvlen < 4) { + spectral_err("Unexpected TLV length %d for ADC Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + qdf_mem_copy(&adc_summary, (uint8_t *)(ptlv + 4), sizeof(int)); + + samp_fmt = ((adc_summary >> 28) & 0x1); + chn_idx = ((adc_summary >> 24) & 0x3); + recent_rfsat = ((adc_summary >> 23) & 0x1); + agc_mb_gain = ((adc_summary >> 16) & 0x7f); + agc_total_gain = adc_summary & 0x3ff; + + spectral_debug("samp_fmt= %u, chn_idx= %u, recent_rfsat= %u, agc_mb_gain=%u agc_total_gain=%u", + samp_fmt, chn_idx, recent_rfsat, agc_mb_gain, + agc_total_gain); + + for (i = 0; i < (tlvlen / 4); i++) { + pdata = (uint32_t *)(ptmp + 4 + i * 4); + data = *pdata; + + /* Interpreting capture format 1 */ + if (1) { + uint8_t i1; + uint8_t q1; + uint8_t i2; + uint8_t q2; + int8_t si1; + int8_t sq1; + int8_t si2; + int8_t sq2; + + i1 = data & 0xff; + q1 = (data >> 8) & 0xff; + i2 = (data >> 16) & 0xff; + q2 = (data >> 24) & 0xff; + + if (i1 > 127) + si1 = i1 - 256; + else + si1 = i1; + + if (q1 > 127) + sq1 = q1 - 256; + else + sq1 = q1; + + if (i2 > 127) + si2 = i2 - 256; + else + si2 = i2; + + if (q2 > 127) + sq2 = q2 - 256; + else + sq2 = q2; + + spectral_debug("SPECTRAL ADC : Interpreting capture format 1"); + spectral_debug("adc_data_format_1 # %d %d %d", + 2 * i, si1, sq1); + spectral_debug("adc_data_format_1 # %d %d %d", + 2 * i + 1, si2, sq2); + } + + /* Interpreting capture format 0 */ + if (1) { + uint16_t i1; + uint16_t q1; + int16_t si1; + int16_t sq1; + + i1 = data & 0xffff; + q1 = (data >> 16) & 0xffff; + if (i1 > 32767) + si1 = i1 - 65536; + else + si1 = i1; + + if (q1 > 32767) + sq1 = q1 - 65536; + else + sq1 = q1; + spectral_debug("SPECTRAL ADC : Interpreting capture format 0"); + spectral_debug("adc_data_format_2 # %d %d %d", + i, si1, sq1); + } + } + + spectral_debug("\n"); + + return 0; +} + +/** + * target_if_dump_sfft_report_gen2() - Process Search FFT Report for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * @is_160_format: Indicates 160 format + * + * Process Search FFT Report for gen2 + * + * Return: Success/Failure + */ +static int +target_if_dump_sfft_report_gen2(struct spectral_phyerr_tlv_gen2 *ptlv, + int tlvlen, bool is_160_format) +{ + int i; + uint32_t fft_mag; + + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + uint32_t relpwr_db; + uint32_t num_str_bins_ib; + uint32_t base_pwr; + uint32_t total_gain_info; + + uint32_t fft_chn_idx; + int16_t peak_inx; + uint32_t avgpwr_db; + uint32_t peak_mag; + uint8_t segid; + + uint32_t fft_summary_A = 0; + uint32_t fft_summary_B = 0; + uint32_t fft_summary_C = 0; + uint8_t *tmp = (uint8_t *)ptlv; + struct spectral_phyerr_hdr_gen2 *phdr = + (struct spectral_phyerr_hdr_gen2 *)( + tmp + + sizeof(struct spectral_phyerr_tlv_gen2)); + uint32_t segid_skiplen = 0; + + if (is_160_format) + segid_skiplen = sizeof(SPECTRAL_SEGID_INFO); + + spectral_debug("SPECTRAL : SEARCH FFT REPORT"); + + /* Relook this */ + if (tlvlen < (8 + segid_skiplen)) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&fft_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&fft_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + if (is_160_format) + qdf_mem_copy(&fft_summary_C, + (uint8_t *)((uint8_t *)phdr + 2 * sizeof(int)), + sizeof(int)); + + relpwr_db = ((fft_summary_B >> 26) & 0x3f); + num_str_bins_ib = fft_summary_B & 0xff; + base_pwr = ((fft_summary_A >> 14) & 0x1ff); + total_gain_info = ((fft_summary_A >> 23) & 0x1ff); + + fft_chn_idx = ((fft_summary_A >> 12) & 0x3); + peak_inx = fft_summary_A & 0xfff; + + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + + avgpwr_db = ((fft_summary_B >> 18) & 0xff); + peak_mag = ((fft_summary_B >> 8) & 0x3ff); + + spectral_debug("Header A = 0x%x Header B = 0x%x", + phdr->hdr_a, phdr->hdr_b); + spectral_debug("Base Power= 0x%x, Total Gain= %d, relpwr_db=%d, num_str_bins_ib=%d fft_chn_idx=%d peak_inx=%d avgpwr_db=%d peak_mag=%d", + base_pwr, total_gain_info, relpwr_db, num_str_bins_ib, + fft_chn_idx, peak_inx, avgpwr_db, peak_mag); + if (is_160_format) { + segid = fft_summary_C & 0x1; + spectral_debug("Segment ID: %hhu", segid); + } + + spectral_debug("FFT bins:"); + for (i = 0; i < (tlvlen - 8 - segid_skiplen); i++) { + fft_mag = ((uint8_t *)ptlv)[12 + segid_skiplen + i]; + spectral_debug("%d %d, ", i, fft_mag); + } + + spectral_debug("\n"); + + return 0; +} + +#ifdef SPECTRAL_DEBUG_SAMP_MSG +/** + * target_if_spectral_log_SAMP_param() - Log SAMP parameters + * @params: Reference to target_if_samp_msg_params + * + * API to log spectral SAMP message parameters + * + * Return: None + */ +static void +target_if_spectral_log_SAMP_param(struct target_if_samp_msg_params *params) +{ + target_if_dbg_print_samp_param(params); +} + +#else +static void +target_if_spectral_log_SAMP_param(struct target_if_samp_msg_params *params) +{ +} +#endif + +int +target_if_process_phyerr_gen2(struct target_if_spectral *spectral, + uint8_t *data, + uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats) +{ + /* + * XXX : The classifier do not use all the members of the SAMP + * message data format. + * The classifier only depends upon the following parameters + * + * 1. Frequency (freq, msg->freq) + * 2. Spectral RSSI (spectral_rssi, + * msg->samp_data.spectral_rssi) + * 3. Bin Power Count (bin_pwr_count, + * msg->samp_data.bin_pwr_count) + * 4. Bin Power values (bin_pwr, msg->samp_data.bin_pwr[0] + * 5. Spectral Timestamp (spectral_tstamp, + * msg->samp_data.spectral_tstamp) + * 6. MAC Address (macaddr, msg->macaddr) + * + * This function prepares the params structure and populates it + * with + * relevant values, this is in turn passed to + * spectral_create_samp_msg() + * to prepare fully formatted Spectral SAMP message + * + * XXX : Need to verify + * 1. Order of FFT bin values + * + */ + + struct target_if_samp_msg_params params; + struct spectral_search_fft_info_gen2 search_fft_info; + struct spectral_search_fft_info_gen2 *p_sfft = &search_fft_info; + struct spectral_search_fft_info_gen2 search_fft_info_sec80; + struct spectral_search_fft_info_gen2 *p_sfft_sec80 = + &search_fft_info_sec80; + uint32_t segid_skiplen = 0; + + int8_t rssi_up = 0; + int8_t rssi_low = 0; + + int8_t chn_idx_highest_enabled = 0; + int8_t chn_idx_lowest_enabled = 0; + + uint8_t control_rssi = 0; + uint8_t extension_rssi = 0; + uint8_t combined_rssi = 0; + + uint32_t tstamp = 0; + + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + struct spectral_phyerr_tlv_gen2 *ptlv = + (struct spectral_phyerr_tlv_gen2 *)data; + struct spectral_phyerr_tlv_gen2 *ptlv_sec80 = NULL; + struct spectral_phyerr_fft_gen2 *pfft = NULL; + struct spectral_phyerr_fft_gen2 *pfft_sec80 = NULL; + + uint8_t segid = 0; + uint8_t segid_sec80 = 0; + + if (spectral->is_160_format) + segid_skiplen = sizeof(SPECTRAL_SEGID_INFO); + + pfft = (struct spectral_phyerr_fft_gen2 *)( + data + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2) + + segid_skiplen); + + /* + * XXX Extend SPECTRAL_DPRINTK() to use spectral_debug_level, + * and use this facility inside spectral_dump_phyerr_data() + * and supporting functions. + */ + if (spectral_debug_level & DEBUG_SPECTRAL2) + target_if_spectral_dump_phyerr_data_gen2( + data, datalen, + spectral->is_160_format); + + if (spectral_debug_level & DEBUG_SPECTRAL4) { + target_if_spectral_dump_phyerr_data_gen2( + data, datalen, + spectral->is_160_format); + spectral_debug_level = DEBUG_SPECTRAL; + } + + if (ptlv->signature != SPECTRAL_PHYERR_SIGNATURE_GEN2) { + /* + * EV# 118023: We tentatively disable the below print + * and provide stats instead. + */ + spectral->diag_stats.spectral_mismatch++; + return -EPERM; + } + + OS_MEMZERO(¶ms, sizeof(params)); + /* Gen 2 only supports normal Spectral scan currently */ + params.smode = SPECTRAL_SCAN_MODE_NORMAL; + + if (ptlv->tag == TLV_TAG_SEARCH_FFT_REPORT_GEN2) { + if (spectral->is_160_format) { + segid = *((SPECTRAL_SEGID_INFO *)( + (uint8_t *)ptlv + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2))); + + if (segid != 0) { + struct spectral_diag_stats *p_diag_stats = + &spectral->diag_stats; + p_diag_stats->spectral_vhtseg1id_mismatch++; + return -EPERM; + } + } + + target_if_process_sfft_report_gen2(ptlv, ptlv->length, + &search_fft_info); + + tstamp = p_sops->get_tsf64(spectral) & SPECTRAL_TSMASK; + + combined_rssi = p_rfqual->rssi_comb; + + if (spectral->upper_is_control) + rssi_up = control_rssi; + else + rssi_up = extension_rssi; + + if (spectral->lower_is_control) + rssi_low = control_rssi; + else + rssi_low = extension_rssi; + + params.rssi = p_rfqual->rssi_comb; + params.lower_rssi = rssi_low; + params.upper_rssi = rssi_up; + + if (spectral->sc_spectral_noise_pwr_cal) { + params.chain_ctl_rssi[0] = + p_rfqual->pc_rssi_info[0].rssi_pri20; + params.chain_ctl_rssi[1] = + p_rfqual->pc_rssi_info[1].rssi_pri20; + params.chain_ctl_rssi[2] = + p_rfqual->pc_rssi_info[2].rssi_pri20; + params.chain_ext_rssi[0] = + p_rfqual->pc_rssi_info[0].rssi_sec20; + params.chain_ext_rssi[1] = + p_rfqual->pc_rssi_info[1].rssi_sec20; + params.chain_ext_rssi[2] = + p_rfqual->pc_rssi_info[2].rssi_sec20; + } + + /* + * XXX : This actually depends on the programmed chain mask + * This value decides the per-chain enable mask to select + * the input ADC for search FTT. + * For modes upto VHT80, if more than one chain is + * enabled, the max valid chain + * is used. LSB corresponds to chain zero. + * For VHT80_80 and VHT160, the lowest enabled chain is + * used for primary + * detection and highest enabled chain is used for + * secondary detection. + * + * XXX : The current algorithm do not use these control and + * extension channel + * Instead, it just relies on the combined RSSI values + * only. + * For fool-proof detection algorithm, we should take + * these RSSI values in to account. + * This is marked for future enhancements. + */ + chn_idx_highest_enabled = + ((spectral->params[params.smode].ss_chn_mask & 0x8) ? 3 : + (spectral->params[params.smode].ss_chn_mask & 0x4) ? 2 : + (spectral->params[params.smode].ss_chn_mask & 0x2) ? 1 : 0); + chn_idx_lowest_enabled = + ((spectral->params[params.smode].ss_chn_mask & 0x1) ? 0 : + (spectral->params[params.smode].ss_chn_mask & 0x2) ? 1 : + (spectral->params[params.smode].ss_chn_mask & 0x4) ? 2 : 3); + control_rssi = (uint8_t) + p_rfqual->pc_rssi_info[chn_idx_highest_enabled].rssi_pri20; + extension_rssi = (uint8_t) + p_rfqual->pc_rssi_info[chn_idx_highest_enabled].rssi_sec20; + + params.bwinfo = 0; + params.tstamp = 0; + params.max_mag = p_sfft->peak_mag; + + params.max_index = p_sfft->peak_inx; + params.max_exp = 0; + params.peak = 0; + params.bin_pwr_data = (uint8_t *)pfft; + params.freq = p_sops->get_current_channel(spectral); + params.freq_loading = 0; + + params.interf_list.count = 0; + params.max_lower_index = 0; + params.max_upper_index = 0; + params.nb_lower = 0; + params.nb_upper = 0; + /* + * For modes upto VHT80, the noise floor is populated with the + * one corresponding + * to the highest enabled antenna chain + */ + params.noise_floor = + p_rfqual->noise_floor[chn_idx_highest_enabled]; + params.datalen = ptlv->length; + params.pwr_count = ptlv->length - + sizeof(struct spectral_phyerr_hdr_gen2) - segid_skiplen; + params.tstamp = (tsf64 & SPECTRAL_TSMASK); + + acs_stats->ctrl_nf = params.noise_floor; + acs_stats->ext_nf = params.noise_floor; + acs_stats->nfc_ctl_rssi = control_rssi; + acs_stats->nfc_ext_rssi = extension_rssi; + + if (spectral->is_160_format && spectral->ch_width + [SPECTRAL_SCAN_MODE_NORMAL] == CH_WIDTH_160MHZ) { + /* + * We expect to see one more Search FFT report, and it + * should be equal in size to the current one. + */ + if (datalen < ( + 2 * ( + sizeof(struct spectral_phyerr_tlv_gen2) + + ptlv->length))) { + struct spectral_diag_stats *p_diag_stats = + &spectral->diag_stats; + p_diag_stats->spectral_sec80_sfft_insufflen++; + return -EPERM; + } + + ptlv_sec80 = (struct spectral_phyerr_tlv_gen2 *)( + data + + sizeof(struct spectral_phyerr_tlv_gen2) + + ptlv->length); + + if (ptlv_sec80->signature != + SPECTRAL_PHYERR_SIGNATURE_GEN2) { + spectral->diag_stats.spectral_mismatch++; + return -EPERM; + } + + if (ptlv_sec80->tag != TLV_TAG_SEARCH_FFT_REPORT_GEN2) { + spectral->diag_stats.spectral_no_sec80_sfft++; + return -EPERM; + } + + segid_sec80 = *((SPECTRAL_SEGID_INFO *)( + (uint8_t *)ptlv_sec80 + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2))); + + if (segid_sec80 != 1) { + struct spectral_diag_stats *p_diag_stats = + &spectral->diag_stats; + p_diag_stats->spectral_vhtseg2id_mismatch++; + return -EPERM; + } + + params.vhtop_ch_freq_seg1 = p_chaninfo->center_freq1; + params.vhtop_ch_freq_seg2 = p_chaninfo->center_freq2; + + target_if_process_sfft_report_gen2( + ptlv_sec80, + ptlv_sec80->length, + &search_fft_info_sec80); + + pfft_sec80 = (struct spectral_phyerr_fft_gen2 *)( + ((uint8_t *)ptlv_sec80) + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2) + + segid_skiplen); + + /* XXX: Confirm. TBD at SoD. */ + params.rssi_sec80 = p_rfqual->rssi_comb; + if (spectral->is_sec80_rssi_war_required) + params.rssi_sec80 = + target_if_get_combrssi_sec80_seg_gen2 + (spectral, &search_fft_info_sec80); + /* XXX: Determine dynamically. TBD at SoD. */ + /* + * For VHT80_80/VHT160, the noise floor for primary + * 80MHz segment is populated with the + * lowest enabled antenna chain and the noise floor for + * secondary 80MHz segment is populated + * with the highest enabled antenna chain + */ + params.noise_floor_sec80 = + p_rfqual->noise_floor[chn_idx_highest_enabled]; + params.noise_floor = + p_rfqual->noise_floor[chn_idx_lowest_enabled]; + + params.max_mag_sec80 = p_sfft_sec80->peak_mag; + params.max_index_sec80 = p_sfft_sec80->peak_inx; + /* XXX Does this definition of datalen *still hold? */ + params.datalen_sec80 = ptlv_sec80->length; + params.pwr_count_sec80 = + ptlv_sec80->length - + sizeof(struct spectral_phyerr_hdr_gen2) - + segid_skiplen; + params.bin_pwr_data_sec80 = (uint8_t *)pfft_sec80; + } + qdf_mem_copy(¶ms.classifier_params, + &spectral->classifier_params, + sizeof(struct spectral_classifier_params)); + + target_if_spectral_log_SAMP_param(¶ms); + target_if_spectral_create_samp_msg(spectral, ¶ms); + } + + return 0; +} + +int +target_if_spectral_dump_hdr_gen2(struct spectral_phyerr_hdr_gen2 *phdr) +{ + uint32_t a = 0; + uint32_t b = 0; + + qdf_mem_copy(&a, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&b, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + + spectral_debug("SPECTRAL : HEADER A 0x%x (%d)", a, a); + spectral_debug("SPECTRAL : HEADER B 0x%x (%d)", b, b); + return 0; +} + +int8_t +target_if_get_combrssi_sec80_seg_gen2( + struct target_if_spectral *spectral, + struct spectral_search_fft_info_gen2 *p_sfft_sec80) +{ + uint32_t avgpwr_db = 0; + uint32_t total_gain_db = 0; + uint32_t offset = 0; + int8_t comb_rssi = 0; + + /* Obtain required parameters for algorithm from search FFT report */ + avgpwr_db = p_sfft_sec80->avgpwr_db; + total_gain_db = p_sfft_sec80->total_gain_info; + + /* Calculate offset */ + offset = target_if_get_offset_swar_sec80( + spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL]); + + /* Calculate RSSI */ + comb_rssi = ((avgpwr_db - total_gain_db) + offset); + + return comb_rssi; +} + +int +target_if_spectral_dump_tlv_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, bool is_160_format) +{ + int ret = 0; + + /* + * TODO : Do not delete the following print + * The scripts used to validate Spectral depend on this Print + */ + spectral_debug("SPECTRAL : TLV Length is 0x%x (%d)", + ptlv->length, ptlv->length); + + switch (ptlv->tag) { + case TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN2: + ret = + target_if_dump_summary_report_gen2( + ptlv, ptlv->length, is_160_format); + break; + + case TLV_TAG_SEARCH_FFT_REPORT_GEN2: + ret = + target_if_dump_sfft_report_gen2(ptlv, ptlv->length, + is_160_format); + break; + + case TLV_TAG_ADC_REPORT_GEN2: + ret = target_if_dump_adc_report_gen2(ptlv, ptlv->length); + break; + + default: + spectral_warn("INVALID TLV"); + ret = -1; + break; + } + + return ret; +} + +int +target_if_spectral_dump_phyerr_data_gen2(uint8_t *data, uint32_t datalen, + bool is_160_format) +{ + struct spectral_phyerr_tlv_gen2 *ptlv = NULL; + uint32_t bytes_processed = 0; + uint32_t bytes_remaining = datalen; + uint32_t curr_tlv_complete_size = 0; + + if (datalen < sizeof(struct spectral_phyerr_tlv_gen2)) { + spectral_err("Total PHY error data length %u too short to contain any TLVs", + datalen); + return -EPERM; + } + + while (bytes_processed < datalen) { + if (bytes_remaining < sizeof(struct spectral_phyerr_tlv_gen2)) { + spectral_err("Remaining PHY error data length %u too short to contain a TLV", + bytes_remaining); + return -EPERM; + } + + ptlv = (struct spectral_phyerr_tlv_gen2 *)(data + + bytes_processed); + + if (ptlv->signature != SPECTRAL_PHYERR_SIGNATURE_GEN2) { + spectral_err("Invalid signature 0x%x!", + ptlv->signature); + return -EPERM; + } + + curr_tlv_complete_size = + sizeof(struct spectral_phyerr_tlv_gen2) + + ptlv->length; + + if (curr_tlv_complete_size > bytes_remaining) { + spectral_err("TLV size %d greater than number of bytes remaining %d", + curr_tlv_complete_size, bytes_remaining); + return -EPERM; + } + + if (target_if_spectral_dump_tlv_gen2(ptlv, is_160_format) == -1) + return -EPERM; + + bytes_processed += curr_tlv_complete_size; + bytes_remaining = datalen - bytes_processed; + } + + return 0; +} + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * target_if_spectral_get_bin_count_after_len_adj() - Get number of FFT bins in + * Spectral FFT report + * @fft_bin_len: FFT bin length reported by target + * @rpt_mode: Spectral report mode + * @swar: Spectral FFT bin length adjustments SWAR parameters + * + * Get actual number of FFT bins in the FFT report after adjusting the length + * by applying the SWARs for getting correct length. + * + * Return: FFT bin count + */ +static size_t +target_if_spectral_get_bin_count_after_len_adj( + size_t fft_bin_len, uint8_t rpt_mode, + struct spectral_fft_bin_len_adj_swar *swar) +{ + size_t fft_bin_count = fft_bin_len; + + if (rpt_mode == 1 && swar->null_fftbin_adj) { + /* + * No FFT bins are expected. Explicitly set FFT bin + * count to 0. + */ + fft_bin_count = 0; + } else { + /* + * Divide fft bin length by appropriate factor depending + * on the value of fftbin_size_war. + */ + switch (swar->fftbin_size_war) { + case SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE: + fft_bin_count >>= 2; + break; + case SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE: + fft_bin_count >>= 1; + /* Ideally we should be dividing fft bin length + * by 2. Due to a HW bug, actual length is two + * times the expected length. + */ + if (swar->packmode_fftbin_size_adj) + fft_bin_count >>= 1; + break; + case SPECTRAL_FFTBIN_SIZE_NO_WAR: + /* No length adjustment */ + break; + default: + qdf_assert_always(0); + } + + if (rpt_mode == 2 && swar->inband_fftbin_size_adj) + fft_bin_count >>= 1; + } + + return fft_bin_count; +} + +/** + * target_if_process_sfft_report_gen3() - Process Search FFT Report for gen3 + * @p_fft_report: Pointer to fft report + * @p_sfft: Pointer to search fft report + * + * Process Search FFT Report for gen3 + * + * Return: Success/Failure + */ +static int +target_if_process_sfft_report_gen3( + struct spectral_phyerr_fft_report_gen3 *p_fft_report, + struct spectral_search_fft_info_gen3 *p_sfft) +{ + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + int32_t peak_sidx; + int32_t peak_mag; + + /* Populate the Search FFT Info */ + if (p_sfft) { + p_sfft->timestamp = p_fft_report->fft_timestamp; + + p_sfft->fft_detector_id = get_bitfield(p_fft_report->hdr_a, + 2, 0); + p_sfft->fft_num = get_bitfield(p_fft_report->hdr_a, 3, 2); + p_sfft->fft_radar_check = get_bitfield(p_fft_report->hdr_a, + 12, 5); + + peak_sidx = get_bitfield(p_fft_report->hdr_a, 11, 17); + p_sfft->fft_peak_sidx = unsigned_to_signed(peak_sidx, 11); + p_sfft->fft_chn_idx = get_bitfield(p_fft_report->hdr_a, 3, 28); + + p_sfft->fft_base_pwr_db = get_bitfield(p_fft_report->hdr_b, + 9, 0); + p_sfft->fft_total_gain_db = get_bitfield(p_fft_report->hdr_b, + 8, 9); + + p_sfft->fft_num_str_bins_ib = get_bitfield(p_fft_report->hdr_c, + 8, 0); + peak_mag = get_bitfield(p_fft_report->hdr_c, 10, 8); + p_sfft->fft_peak_mag = unsigned_to_signed(peak_mag, 10); + p_sfft->fft_avgpwr_db = get_bitfield(p_fft_report->hdr_c, + 7, 18); + p_sfft->fft_relpwr_db = get_bitfield(p_fft_report->hdr_c, + 7, 25); + } + + return 0; +} + +/** + * target_if_dump_fft_report_gen3() - Dump FFT Report for gen3 + * @spectral: Pointer to Spectral object + * @smode: Spectral scan mode + * @p_fft_report: Pointer to fft report + * @p_sfft: Pointer to search fft report + * + * Dump FFT Report for gen3 + * + * Return: void + */ +static void +target_if_dump_fft_report_gen3(struct target_if_spectral *spectral, + enum spectral_scan_mode smode, + struct spectral_phyerr_fft_report_gen3 *p_fft_report, + struct spectral_search_fft_info_gen3 *p_sfft) +{ + size_t fft_hdr_length = (p_fft_report->fft_hdr_length * 4); + size_t report_len = (fft_hdr_length + 8); + size_t fft_bin_len; + size_t fft_bin_count; + size_t fft_bin_len_inband_tfer = 0; + uint8_t *fft_bin_buf = NULL; + + fft_bin_len = fft_hdr_length - spectral->rparams.fft_report_hdr_len; + fft_bin_count = target_if_spectral_get_bin_count_after_len_adj( + fft_bin_len, + spectral->params[smode].ss_rpt_mode, + &spectral->len_adj_swar); + + if ((spectral->params[smode].ss_rpt_mode == 2) && + spectral->len_adj_swar.inband_fftbin_size_adj) + fft_bin_len_inband_tfer = fft_bin_len >> 1; + + spectral_debug("Spectral FFT Report"); + spectral_debug("fft_timestamp = 0x%x", p_fft_report->fft_timestamp); + spectral_debug("fft_hdr_length = %u(32 bit words)", + p_fft_report->fft_hdr_length); + spectral_debug("fft_hdr_tag = 0x%x", p_fft_report->fft_hdr_tag); + spectral_debug("fft_hdr_sig = 0x%x", p_fft_report->fft_hdr_sig); + + spectral_debug("Length field in search fft report is %zu(0x%zx) bytes", + fft_hdr_length, fft_hdr_length); + spectral_debug("Total length of search fft report is %zu(0x%zx) bytes", + report_len, report_len); + spectral_debug("Target reported fftbins in report is %zu(0x%zx)", + fft_bin_len, fft_bin_len); + + if ((spectral->params[smode].ss_rpt_mode == 1) && + spectral->len_adj_swar.null_fftbin_adj) + spectral_debug("WAR: Considering number of FFT bins as 0"); + else if ((spectral->params[smode].ss_rpt_mode == 2) && + spectral->len_adj_swar.inband_fftbin_size_adj) { + spectral_debug("FW fftbins actually transferred (in-band report mode) %zu(0x%zx)", + fft_bin_len_inband_tfer, + fft_bin_len_inband_tfer); + } + + spectral_debug("Actual number of fftbins in report is %zu(0x%zx)", + fft_bin_count, fft_bin_count); + + spectral_debug("fft_detector_id = %u", p_sfft->fft_detector_id); + spectral_debug("fft_num = %u", p_sfft->fft_num); + spectral_debug("fft_radar_check = %u", p_sfft->fft_radar_check); + spectral_debug("fft_peak_sidx = %d", p_sfft->fft_peak_sidx); + spectral_debug("fft_chn_idx = %u", p_sfft->fft_chn_idx); + spectral_debug("fft_base_pwr_db = %u", p_sfft->fft_base_pwr_db); + spectral_debug("fft_total_gain_db = %u", p_sfft->fft_total_gain_db); + spectral_debug("fft_num_str_bins_ib = %u", p_sfft->fft_num_str_bins_ib); + spectral_debug("fft_peak_mag = %d", p_sfft->fft_peak_mag); + spectral_debug("fft_avgpwr_db = %u", p_sfft->fft_avgpwr_db); + spectral_debug("fft_relpwr_db = %u", p_sfft->fft_relpwr_db); + + if (fft_bin_count > 0) { + int idx; + + spectral_debug("FFT bins:"); + if (spectral->len_adj_swar.fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE) { + uint32_t *binptr_32 = (uint32_t *)&p_fft_report->buf; + + fft_bin_buf = (uint8_t *)qdf_mem_malloc(MAX_NUM_BINS); + if (!fft_bin_buf) { + spectral_err("Failed to allocate memory"); + return; + } + for (idx = 0; idx < fft_bin_count; idx++) + fft_bin_buf[idx] = *(binptr_32++); + } else if (spectral->len_adj_swar.fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE) { + uint16_t *binptr_16 = (uint16_t *)&p_fft_report->buf; + + fft_bin_buf = (uint8_t *)qdf_mem_malloc(MAX_NUM_BINS); + if (!fft_bin_buf) { + spectral_err("Failed to allocate memory"); + return; + } + for (idx = 0; idx < fft_bin_count; idx++) + fft_bin_buf[idx] = *(binptr_16++); + } else { + fft_bin_buf = (uint8_t *)&p_fft_report->buf; + } + target_if_spectral_hexdump(fft_bin_buf, fft_bin_count); + if ((spectral->len_adj_swar.fftbin_size_war != + SPECTRAL_FFTBIN_SIZE_NO_WAR) && fft_bin_buf) + qdf_mem_free(fft_bin_buf); + } +} +#endif + +QDF_STATUS +target_if_160mhz_delivery_state_change(struct target_if_spectral *spectral, + uint8_t detector_id) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] != CH_WIDTH_160MHZ) + return QDF_STATUS_E_FAILURE; + + /* agile reports should not be coupled with 160 MHz state machine + * for normal Spectral + */ + if (detector_id == SPECTRAL_DETECTOR_AGILE) + return QDF_STATUS_SUCCESS; + + switch (spectral->state_160mhz_delivery) { + case SPECTRAL_REPORT_WAIT_PRIMARY80: + if (detector_id == SPECTRAL_DETECTOR_PRIMARY) + spectral->state_160mhz_delivery = + SPECTRAL_REPORT_RX_PRIMARY80; + else { + status = QDF_STATUS_E_FAILURE; + spectral->diag_stats.spectral_vhtseg1id_mismatch++; + } + break; + + case SPECTRAL_REPORT_WAIT_SECONDARY80: + if (detector_id == SPECTRAL_DETECTOR_SECONDARY) + spectral->state_160mhz_delivery = + SPECTRAL_REPORT_RX_SECONDARY80; + else { + spectral->state_160mhz_delivery = + SPECTRAL_REPORT_WAIT_PRIMARY80; + status = QDF_STATUS_E_FAILURE; + spectral->diag_stats.spectral_vhtseg2id_mismatch++; + } + break; + + case SPECTRAL_REPORT_RX_SECONDARY80: + /* We don't care about detector id in this state. */ + reset_160mhz_delivery_state_machine(spectral, + SPECTRAL_SCAN_MODE_NORMAL); + break; + + case SPECTRAL_REPORT_RX_PRIMARY80: + /* We don't care about detector id in this state */ + spectral->state_160mhz_delivery = + SPECTRAL_REPORT_WAIT_SECONDARY80; + break; + + default: + break; + } + + return status; +} + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * target_if_get_detector_id_sscan_summary_report_gen3() - Get Spectral detector + * ID from Spectral summary report + * @data: Pointer to Spectral summary report + * + * Return: Detector ID + */ +static uint8_t +target_if_get_detector_id_sscan_summary_report_gen3(uint8_t *data) { + struct spectral_sscan_summary_report_gen3 *psscan_summary_report; + uint8_t detector_id; + + qdf_assert_always(data); + + psscan_summary_report = + (struct spectral_sscan_summary_report_gen3 *)data; + + detector_id = get_bitfield( + psscan_summary_report->hdr_a, + SSCAN_SUMMARY_REPORT_HDR_A_DETECTOR_ID_SIZE_GEN3, + SSCAN_SUMMARY_REPORT_HDR_A_DETECTOR_ID_POS_GEN3); + + return detector_id; +} + +/** + * target_if_consume_sscan_summary_report_gen3() - Consume Spectral summary + * report + * @data: Pointer to Spectral summary report + * @fields: Pointer to structure to be populated with extracted fields + * @rparams: Pointer to structure with Spectral report params + * + * Consume Spectral summary report for gen3 + * + * Return: void + */ +static void +target_if_consume_sscan_summary_report_gen3( + uint8_t *data, + struct sscan_report_fields_gen3 *fields, + struct spectral_report_params *rparams) { + struct spectral_sscan_summary_report_gen3 *psscan_summary_report; + + qdf_assert_always(data); + qdf_assert_always(fields); + qdf_assert_always(rparams); + + psscan_summary_report = + (struct spectral_sscan_summary_report_gen3 *)data; + + fields->sscan_agc_total_gain = get_bitfield( + psscan_summary_report->hdr_a, + SSCAN_SUMMARY_REPORT_HDR_A_AGC_TOTAL_GAIN_SIZE_GEN3, + SSCAN_SUMMARY_REPORT_HDR_A_AGC_TOTAL_GAIN_POS_GEN3); + fields->inband_pwr_db = get_bitfield( + psscan_summary_report->hdr_a, + SSCAN_SUMMARY_REPORT_HDR_A_INBAND_PWR_DB_SIZE_GEN3, + SSCAN_SUMMARY_REPORT_HDR_A_INBAND_PWR_DB_POS_GEN3); + fields->sscan_pri80 = get_bitfield( + psscan_summary_report->hdr_a, + SSCAN_SUMMARY_REPORT_HDR_A_PRI80_SIZE_GEN3, + SSCAN_SUMMARY_REPORT_HDR_A_PRI80_POS_GEN3); + + switch (rparams->version) { + case SPECTRAL_REPORT_FORMAT_VERSION_1: + fields->sscan_gainchange = get_bitfield( + psscan_summary_report->hdr_b, + SSCAN_SUMMARY_REPORT_HDR_B_GAINCHANGE_SIZE_GEN3_V1, + SSCAN_SUMMARY_REPORT_HDR_B_GAINCHANGE_POS_GEN3_V1); + break; + case SPECTRAL_REPORT_FORMAT_VERSION_2: + fields->sscan_gainchange = get_bitfield( + psscan_summary_report->hdr_c, + SSCAN_SUMMARY_REPORT_HDR_C_GAINCHANGE_SIZE_GEN3_V2, + SSCAN_SUMMARY_REPORT_HDR_C_GAINCHANGE_POS_GEN3_V2); + break; + default: + qdf_assert_always(0); + } +} + +/** + * target_if_verify_sig_and_tag_gen3() - Verify tag and signature + * of spectral report + * @spectral: Pointer to spectral object + * @data: Pointer to spectral summary report + * @exp_tag: iexpected tag value + * + * Process fft report for gen3 + * + * Return: SUCCESS/FAILURE + */ +static int +target_if_verify_sig_and_tag_gen3(struct target_if_spectral *spectral, + uint8_t *data, uint8_t exp_tag) +{ + uint8_t tag = 0; + uint8_t signature = 0; + + /* Peek into the data to figure out whether + * 1) Signature matches the expected value + * 2) What is inside the package (TAG ID is used for finding this) + */ + tag = *(data + PHYERR_HDR_TAG_POS); + signature = *(data + PHYERR_HDR_SIG_POS); + + if (signature != SPECTRAL_PHYERR_SIGNATURE_GEN3) { + spectral->diag_stats.spectral_mismatch++; + return -EINVAL; + } + + if (tag != exp_tag) { + spectral->diag_stats.spectral_mismatch++; + return -EINVAL; + } + + return 0; +} + +static uint8_t +target_if_spectral_get_lowest_chn_idx(uint8_t chainmask) +{ + uint8_t idx; + + for (idx = 0; idx < DBR_MAX_CHAINS; idx++) { + if (chainmask & 0x1) + break; + chainmask >>= 1; + } + return idx; +} + +static QDF_STATUS +target_if_get_spectral_mode(enum spectral_detector_id detector_id, + enum spectral_scan_mode *smode) { + switch (detector_id) { + case SPECTRAL_DETECTOR_PRIMARY: + case SPECTRAL_DETECTOR_SECONDARY: + *smode = SPECTRAL_SCAN_MODE_NORMAL; + break; + + case SPECTRAL_DETECTOR_AGILE: + *smode = SPECTRAL_SCAN_MODE_AGILE; + break; + + default: + spectral_err("Invalid Spectral detector id"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef DIRECT_BUF_RX_DEBUG +static void target_if_spectral_check_buffer_poisoning( + struct target_if_spectral *spectral, + struct spectral_report *report, + int num_fft_bins, enum spectral_scan_mode smode) +{ + uint32_t *data; + size_t len; + size_t words_to_check = + sizeof(struct spectral_sscan_summary_report_gen3) >> 2; + bool poisoned_words_found = false; + + if (!spectral) { + spectral_err_rl("Spectral LMAC object is null"); + return; + } + + if (!spectral->dbr_buff_debug) + return; + + if (!report) { + spectral_err_rl("Spectral report is null"); + return; + } + + /* Add search FFT report */ + if (spectral->params[smode].ss_rpt_mode > 0) + words_to_check += + sizeof(struct spectral_phyerr_fft_report_gen3) >> 2; + + /* Now add the number of FFT bins */ + if (spectral->params[smode].ss_rpt_mode > 1) { + /* Caller should take care to pass correct number of FFT bins */ + if (spectral->len_adj_swar.fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_4BYTE_TO_1BYTE) + words_to_check += num_fft_bins; + else if (spectral->len_adj_swar.fftbin_size_war == + SPECTRAL_FFTBIN_SIZE_WAR_2BYTE_TO_1BYTE) + words_to_check += (num_fft_bins >> 1); + } + + data = (uint32_t *)report->data; + for (len = 0; len < words_to_check; ++len) { + if (*data == MEM_POISON_SIGNATURE) { + spectral_err("Pattern(%x) found in Spectral search FFT report at position %zu in the buffer %pK", + MEM_POISON_SIGNATURE, + (len << 2), report->data); + poisoned_words_found = true; + break; + } + ++data; + } + + /* Crash the FW even if one word is poisoned */ + if (poisoned_words_found) { + spectral_err("Pattern(%x) found in Spectral report, Hex dump of the sfft follows", + MEM_POISON_SIGNATURE); + target_if_spectral_hexdump((unsigned char *)report->data, + words_to_check << 2); + spectral_err("Asserting the FW"); + target_if_spectral_fw_hang(spectral); + } +} + +static void target_if_spectral_verify_ts(struct target_if_spectral *spectral, + uint8_t *buf, uint32_t current_ts) +{ + if (!spectral) { + spectral_err_rl("Spectral LMAC object is null"); + return; + } + + if (!spectral->dbr_buff_debug) + return; + + if (spectral->prev_tstamp) { + if (current_ts == spectral->prev_tstamp) { + spectral_err("Spectral timestamp(%u) in the current buffer(%pK) is equal to the previous timestamp, same report DMAed twice? Asserting the FW", + current_ts, buf); + target_if_spectral_fw_hang(spectral); + } + } + spectral->prev_tstamp = current_ts; +} +#else +static void target_if_spectral_check_buffer_poisoning( + struct target_if_spectral *spectral, + struct spectral_report *report, + int num_fft_bins, enum spectral_scan_mode smode) +{ +} + +static void target_if_spectral_verify_ts(struct target_if_spectral *spectral, + uint8_t *buf, uint32_t current_ts) +{ +} +#endif + +/** + * target_if_spectral_get_adjusted_timestamp() - Adjust Spectral time + * stamp to account for reset in time stamp due to target reset + * @twar: Spectral time stamp WAR related information + * @raw_timestamp: Spectral time stamp reported by target + * @reset_delay: Reset delay at target + * @smode: Spectral scan mode + * + * Correct time stamp to account for reset in time stamp due to target reset + * + * Return: Adjusted time stamp + */ +static uint32_t +target_if_spectral_get_adjusted_timestamp(struct spectral_timestamp_war *twar, + uint32_t raw_timestamp, + uint32_t reset_delay, + enum spectral_scan_mode smode) { + qdf_assert_always(smode < SPECTRAL_SCAN_MODE_MAX); + + if (reset_delay) { + enum spectral_scan_mode m = + SPECTRAL_SCAN_MODE_NORMAL; + + /* Adjust the offset for all the Spectral modes. + * Target will be sending the non zero reset delay for + * the first Spectral report after reset. This delay is + * common for all the Spectral modes. + */ + for (; m < SPECTRAL_SCAN_MODE_MAX; m++) + twar->timestamp_war_offset[m] += (reset_delay + + twar->last_fft_timestamp[m]); + twar->target_reset_count++; + } + twar->last_fft_timestamp[smode] = raw_timestamp; + + return raw_timestamp + twar->timestamp_war_offset[smode]; +} + +int +target_if_consume_spectral_report_gen3( + struct target_if_spectral *spectral, + struct spectral_report *report) +{ + /* + * XXX : The classifier do not use all the members of the SAMP + * message data format. + * The classifier only depends upon the following parameters + * + * 1. Frequency (freq, msg->freq) + * 2. Spectral RSSI (spectral_rssi, + * msg->samp_data.spectral_rssi) + * 3. Bin Power Count (bin_pwr_count, + * msg->samp_data.bin_pwr_count) + * 4. Bin Power values (bin_pwr, msg->samp_data.bin_pwr[0] + * 5. Spectral Timestamp (spectral_tstamp, + * msg->samp_data.spectral_tstamp) + * 6. MAC Address (macaddr, msg->macaddr) + * + * This function prepares the params structure and populates it + * with + * relevant values, this is in turn passed to + * spectral_create_samp_msg() + * to prepare fully formatted Spectral SAMP message + * + * XXX : Need to verify + * 1. Order of FFT bin values + * + */ + struct target_if_samp_msg_params params = {0}; + struct spectral_search_fft_info_gen3 search_fft_info; + struct spectral_search_fft_info_gen3 *p_sfft = &search_fft_info; + int8_t chn_idx_lowest_enabled = 0; + int fft_hdr_length = 0; + int report_len = 0; + size_t fft_bin_count; + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + struct spectral_phyerr_fft_report_gen3 *p_fft_report; + int8_t rssi; + uint8_t *data = report->data; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_rxchainmask; + struct sscan_report_fields_gen3 sscan_report_fields = {0}; + enum spectral_detector_id detector_id; + QDF_STATUS ret; + + params.smode = SPECTRAL_SCAN_MODE_NORMAL; + + /* Process Spectral scan summary report */ + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3) != 0) { + spectral_err_rl("Wrong tag/sig in sscan summary"); + goto fail; + } + + detector_id = target_if_get_detector_id_sscan_summary_report_gen3(data); + if (detector_id > SPECTRAL_DETECTOR_AGILE) { + spectral->diag_stats.spectral_invalid_detector_id++; + spectral_err("Invalid detector id %u, expected is 0/1/2", + detector_id); + goto fail; + } + target_if_consume_sscan_summary_report_gen3(data, &sscan_report_fields, + &spectral->rparams); + /* Advance buf pointer to the search fft report */ + data += sizeof(struct spectral_sscan_summary_report_gen3); + data += spectral->rparams.ssumaary_padding_bytes; + + if ((detector_id == SPECTRAL_DETECTOR_AGILE) || + is_primaryseg_expected(spectral)) { + /* RSSI is in 1/2 dBm steps, Covert it to dBm scale */ + rssi = (sscan_report_fields.inband_pwr_db) >> 1; + params.agc_total_gain = + sscan_report_fields.sscan_agc_total_gain; + params.gainchange = sscan_report_fields.sscan_gainchange; + params.pri80ind = sscan_report_fields.sscan_pri80; + + /* Process Spectral search FFT report */ + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SEARCH_FFT_REPORT_GEN3) != 0) { + spectral_err_rl("Unexpected tag/sig in sfft, detid= %u", + detector_id); + goto fail; + } + p_fft_report = (struct spectral_phyerr_fft_report_gen3 *)data; + fft_hdr_length = p_fft_report->fft_hdr_length * 4; + if (fft_hdr_length < 16) { + spectral_err("Wrong TLV length %u, detector id = %d", + fft_hdr_length, detector_id); + goto fail; + } + + report_len = (fft_hdr_length + 8); + + target_if_process_sfft_report_gen3(p_fft_report, p_sfft); + /* It is expected to have same detector id for + * summary and fft report + */ + if (detector_id != p_sfft->fft_detector_id) { + spectral_err_rl + ("Different detid in ssummary(%u) and sfft(%u)", + detector_id, p_sfft->fft_detector_id); + goto fail; + } + + if (detector_id > SPECTRAL_DETECTOR_AGILE) { + spectral->diag_stats.spectral_invalid_detector_id++; + spectral_err("Invalid detector id %u, expected is 0/2", + detector_id); + goto fail; + } + + ret = target_if_get_spectral_mode(detector_id, ¶ms.smode); + if (QDF_IS_STATUS_ERROR(ret)) { + spectral_err_rl("Failed to get mode from detid= %u", + detector_id); + goto fail; + } + + fft_bin_count = target_if_spectral_get_bin_count_after_len_adj( + fft_hdr_length - spectral->rparams.fft_report_hdr_len, + spectral->params[params.smode].ss_rpt_mode, + &spectral->len_adj_swar); + + params.last_raw_timestamp = spectral->timestamp_war. + last_fft_timestamp[params.smode]; + params.reset_delay = report->reset_delay; + params.raw_timestamp = p_sfft->timestamp; + params.tstamp = target_if_spectral_get_adjusted_timestamp( + &spectral->timestamp_war, + p_sfft->timestamp, report->reset_delay, + params.smode); + params.timestamp_war_offset = spectral->timestamp_war. + timestamp_war_offset[params.smode]; + params.target_reset_count = spectral->timestamp_war. + target_reset_count; + + /* Take care of state transitions for 160 MHz and 80p80 */ + if (spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == + CH_WIDTH_160MHZ) { + ret = target_if_160mhz_delivery_state_change( + spectral, + detector_id); + if (ret != QDF_STATUS_SUCCESS) + goto fail; + } + + if (spectral_debug_level & (DEBUG_SPECTRAL2 | DEBUG_SPECTRAL4)) + target_if_dump_fft_report_gen3(spectral, params.smode, + p_fft_report, p_sfft); + + params.rssi = rssi; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_info("First vdev is NULL"); + reset_160mhz_delivery_state_machine + (spectral, + SPECTRAL_SCAN_MODE_NORMAL); + return -EPERM; + } + vdev_rxchainmask = wlan_vdev_mlme_get_rxchainmask(vdev); + QDF_ASSERT(vdev_rxchainmask != 0); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + chn_idx_lowest_enabled = + target_if_spectral_get_lowest_chn_idx(vdev_rxchainmask); + if (chn_idx_lowest_enabled >= DBR_MAX_CHAINS) { + spectral_err("Invalid chain index, detector id = %u", + detector_id); + goto fail; + } + + params.max_mag = p_sfft->fft_peak_mag; + + params.bin_pwr_data = (uint8_t *)((uint8_t *)p_fft_report + + SPECTRAL_FFT_BINS_POS); + params.freq = p_sops->get_current_channel(spectral); + + if (params.smode == SPECTRAL_SCAN_MODE_AGILE) + params.agile_freq = + spectral->params[params.smode].ss_frequency; + + /* + * For modes upto VHT80, the noise floor is populated with + * the one corresponding + * to the highest enabled antenna chain + */ + /* TODO: Fill proper values once FW provides them*/ + params.noise_floor = + report->noisefloor[chn_idx_lowest_enabled]; + params.datalen = (fft_hdr_length * 4); + params.pwr_count = fft_bin_count; + + target_if_spectral_verify_ts(spectral, report->data, + params.tstamp); + } else if (is_secondaryseg_expected(spectral)) { + /* RSSI is in 1/2 dBm steps, Covert it to dBm scale */ + rssi = (sscan_report_fields.inband_pwr_db) >> 1; + params.agc_total_gain_sec80 = + sscan_report_fields.sscan_agc_total_gain; + params.gainchange_sec80 = sscan_report_fields.sscan_gainchange; + params.pri80ind_sec80 = sscan_report_fields.sscan_pri80; + + /* Process Spectral search FFT report */ + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SEARCH_FFT_REPORT_GEN3) != 0) { + spectral_err_rl("Unexpected tag/sig in sfft, detid= %u", + detector_id); + goto fail; + } + p_fft_report = (struct spectral_phyerr_fft_report_gen3 *)data; + fft_hdr_length = p_fft_report->fft_hdr_length * 4; + if (fft_hdr_length < 16) { + spectral_err("Wrong TLV length %u, detector id = %u", + fft_hdr_length, detector_id); + goto fail; + } + + report_len = (fft_hdr_length + 8); + + target_if_process_sfft_report_gen3(p_fft_report, p_sfft); + /* It is expected to have same detector id for + * summary and fft report + */ + if (detector_id != p_sfft->fft_detector_id) { + spectral_err_rl + ("Different detid in ssummary(%u) and sfft(%u)", + detector_id, p_sfft->fft_detector_id); + goto fail; + } + + if (detector_id > SPECTRAL_DETECTOR_AGILE) { + spectral->diag_stats.spectral_invalid_detector_id++; + spectral_err("Invalid detector id %u, expected is 1", + detector_id); + goto fail; + } + + ret = target_if_get_spectral_mode(detector_id, ¶ms.smode); + if (QDF_IS_STATUS_ERROR(ret)) { + spectral_err("Failed to get mode from detid= %u", + detector_id); + goto fail; + } + + fft_bin_count = target_if_spectral_get_bin_count_after_len_adj( + fft_hdr_length - spectral->rparams.fft_report_hdr_len, + spectral->params[params.smode].ss_rpt_mode, + &spectral->len_adj_swar); + params.raw_timestamp_sec80 = p_sfft->timestamp; + + /* Take care of state transitions for 160 MHz and 80p80 */ + if (spectral->ch_width[SPECTRAL_SCAN_MODE_NORMAL] == + CH_WIDTH_160MHZ) { + ret = target_if_160mhz_delivery_state_change( + spectral, + detector_id); + if (ret != QDF_STATUS_SUCCESS) + goto fail; + } + + if (spectral_debug_level & (DEBUG_SPECTRAL2 | DEBUG_SPECTRAL4)) + target_if_dump_fft_report_gen3(spectral, params.smode, + p_fft_report, p_sfft); + + params.vhtop_ch_freq_seg1 = 0; + params.vhtop_ch_freq_seg2 = 0; + + params.rssi_sec80 = rssi; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_info("First vdev is NULL"); + reset_160mhz_delivery_state_machine + (spectral, + SPECTRAL_SCAN_MODE_NORMAL); + return -EPERM; + } + vdev_rxchainmask = wlan_vdev_mlme_get_rxchainmask(vdev); + QDF_ASSERT(vdev_rxchainmask != 0); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + chn_idx_lowest_enabled = + target_if_spectral_get_lowest_chn_idx(vdev_rxchainmask); + if (chn_idx_lowest_enabled >= DBR_MAX_CHAINS) { + spectral_err("Invalid chain index"); + goto fail; + } + + /* Need to change this as per FW team's inputs */ + params.noise_floor_sec80 = + report->noisefloor[chn_idx_lowest_enabled]; + + params.max_mag_sec80 = p_sfft->fft_peak_mag; + /* params.max_index_sec80 = p_sfft->peak_inx; */ + /* XXX Does this definition of datalen *still hold? */ + params.datalen_sec80 = fft_hdr_length * 4; + params.pwr_count_sec80 = fft_bin_count; + params.bin_pwr_data_sec80 = + (uint8_t *)((uint8_t *)p_fft_report + + SPECTRAL_FFT_BINS_POS); + } else { + spectral_err("Spectral state machine in undefined state"); + goto fail; + } + + target_if_spectral_check_buffer_poisoning(spectral, report, + fft_bin_count, params.smode); + qdf_mem_copy(¶ms.classifier_params, + &spectral->classifier_params, + sizeof(struct spectral_classifier_params)); + + target_if_spectral_log_SAMP_param(¶ms); + target_if_spectral_create_samp_msg(spectral, ¶ms); + + return 0; + fail: + spectral_err_rl("Error while processing Spectral report"); + reset_160mhz_delivery_state_machine(spectral, + SPECTRAL_SCAN_MODE_NORMAL); + return -EPERM; +} + +int target_if_spectral_process_report_gen3( + struct wlan_objmgr_pdev *pdev, + void *buf) +{ + int ret = 0; + struct direct_buf_rx_data *payload = buf; + struct target_if_spectral *spectral; + struct spectral_report report; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("Spectral target object is null"); + return -EINVAL; + } + + report.data = payload->vaddr; + if (payload->meta_data_valid) { + qdf_mem_copy(report.noisefloor, payload->meta_data.noisefloor, + qdf_min(sizeof(report.noisefloor), + sizeof(payload->meta_data.noisefloor))); + report.reset_delay = payload->meta_data.reset_delay; + } + + if (spectral_debug_level & (DEBUG_SPECTRAL2 | DEBUG_SPECTRAL4)) { + spectral_debug("Printing the spectral phyerr buffer for debug"); + spectral_debug("Datalength of buffer = 0x%zx(%zd) bufptr = 0x%pK", + payload->dbr_len, + payload->dbr_len, + payload->vaddr); + target_if_spectral_hexdump((unsigned char *)payload->vaddr, + 1024); + } + + ret = target_if_consume_spectral_report_gen3(spectral, &report); + + if (spectral_debug_level & DEBUG_SPECTRAL4) + spectral_debug_level = DEBUG_SPECTRAL; + + return ret; +} +#else +int target_if_spectral_process_report_gen3( + struct wlan_objmgr_pdev *pdev, + void *buf) +{ + spectral_err("Direct dma support is not enabled"); + return -EINVAL; +} +#endif +qdf_export_symbol(target_if_spectral_process_report_gen3); +/* END of spectral GEN III HW specific functions */ + +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.c new file mode 100644 index 0000000000000000000000000000000000000000..40378bbaae7c86e644a3820383572d654925c0ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.c @@ -0,0 +1,970 @@ +/* + * Copyright (c) 2015,2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +#include "target_if_spectral.h" +#include "target_if_spectral_sim.h" +#include "target_if_spectral_sim_int.h" +#include "_ieee80211.h" +#include "ieee80211_api.h" +#include "ieee80211_defines.h" +#include "qdf_types.h" +#include "ieee80211_var.h" +#include +#include + +/* Helper functions */ + +static int target_if_populate_report_static_gen2( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80); +static int target_if_populate_report_static_gen3( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80); +static void target_if_depopulate_report( + struct spectralsim_report *report); + +static int target_if_populate_reportset_static( + struct spectralsim_context *simctx, + struct spectralsim_reportset *reportset, + enum phy_ch_width width, bool is_80_80); +static void target_if_depopulate_reportset( + struct spectralsim_reportset * + reportset); + +static int target_if_populate_simdata(struct spectralsim_context *simctx); +static void target_if_depopulate_simdata(struct spectralsim_context *simctx); +static OS_TIMER_FUNC(target_if_spectral_sim_phyerrdelivery_handler); + +/* + * Static configuration. + * For now, we will be having a single configuration per BW, and a single + * report per configuration (since we need the data only for ensuring correct + * format handling). + * + * Extend this for more functionality if required in the future. + */ + +/** + * target_if_populate_report_static_gen2() - Statically populate simulation + * data for one report for generation 2 chipsets + * @report: Pointer to spectral report data instance + * @width : Channel bandwidth enumeration + * @is_80_80: Whether the channel is operating in 80-80 mode + * + * Statically populate simulation data for one report for generation 2 chipsets + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_report_static_gen2( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80) +{ + qdf_assert_always(report); + + switch (width) { + case CH_WIDTH_20MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_20_gen2)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_20_gen2); + qdf_mem_copy(report->data, + reportdata_20_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_20, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_20, sizeof(report->chan_info)); + + break; + case CH_WIDTH_40MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_40_gen2)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_40_gen2); + qdf_mem_copy(report->data, + reportdata_40_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_40, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_40, sizeof(report->chan_info)); + + break; + case CH_WIDTH_80MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_gen2)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_80_gen2); + qdf_mem_copy(report->data, + reportdata_80_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80, sizeof(report->chan_info)); + + break; + case CH_WIDTH_160MHZ: + if (is_80_80) { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_80_gen2)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_80_80_gen2); + qdf_mem_copy(report->data, + reportdata_80_80_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80_80, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80_80, + sizeof(report->chan_info)); + + } else { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_160_gen2)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_160_gen2); + qdf_mem_copy(report->data, + reportdata_160_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_160, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_160, sizeof(report->chan_info)); + } + break; + default: + spectral_err("Unhandled width. Please correct. Asserting"); + qdf_assert_always(0); + } + + return 0; + + bad: + return -EPERM; +} + +/** + * target_if_populate_report_static_gen3() - Statically populate simulation + * data for one report for generation 3 chipsets + * @report: Pointer to spectral report data instance + * @width : Channel bandwidth enumeration + * @is_80_80: Whether the channel is operating in 80-80 mode + * + * Statically populate simulation data for one report for generation 3 chipsets + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_report_static_gen3( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80) +{ + qdf_assert_always(report); + + switch (width) { + case CH_WIDTH_20MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_20_gen3)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_20_gen3); + qdf_mem_copy(report->data, + reportdata_20_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_20, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_20, sizeof(report->chan_info)); + + break; + case CH_WIDTH_40MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_40_gen3)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_40_gen3); + qdf_mem_copy(report->data, + reportdata_40_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_40, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_40, sizeof(report->chan_info)); + + break; + case CH_WIDTH_80MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_gen3)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_80_gen3); + qdf_mem_copy(report->data, + reportdata_80_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80, sizeof(report->chan_info)); + + break; + case CH_WIDTH_160MHZ: + if (is_80_80) { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_80_gen3)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_80_80_gen3); + qdf_mem_copy(report->data, + reportdata_80_80_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80_80, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80_80, + sizeof(report->chan_info)); + + } else { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_160_gen3)); + + if (!report->data) + goto bad; + + report->datasize = sizeof(reportdata_160_gen3); + qdf_mem_copy(report->data, + reportdata_160_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_160, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_160, sizeof(report->chan_info)); + } + break; + default: + spectral_err("Unhandled width. Please correct. Asserting"); + qdf_assert_always(0); + } + + return 0; + + bad: + return -EPERM; +} + +/** + * target_if_depopulate_report() - Free the given instances of + * struct spectralsim_report + * @report: instance of struct spectralsim_report + * + * Free the given instances of struct spectralsim_report + * + * Return: None + */ +static void +target_if_depopulate_report( + struct spectralsim_report *report) +{ + if (!report) + return; + + if (report->data) { + qdf_mem_free(report->data); + report->data = NULL; + report->datasize = 0; + } +} + +/** + * target_if_populate_reportset_static() - Statically populate simulation data + * for a given configuration + * @simctx: Pointer to struct spectralsim_context + * @reportset: Set of spectral report data instances + * @width : Channel bandwidth enumeration + * @is_80_80: Whether the channel is operating in 80+80 mode + * + * Statically populate simulation data for a given configuration + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_reportset_static( + struct spectralsim_context *simctx, + struct spectralsim_reportset *reportset, + enum phy_ch_width width, bool is_80_80) +{ + int ret = 0; + struct spectralsim_report *report = NULL; + + qdf_assert_always(reportset); + + reportset->headreport = NULL; + reportset->curr_report = NULL; + + /* For now, we populate only one report */ + report = (struct spectralsim_report *) + qdf_mem_malloc(sizeof(struct spectralsim_report)); + + if (!report) + goto bad; + + qdf_mem_zero(report, sizeof(*report)); + + switch (width) { + case CH_WIDTH_20MHZ: + qdf_mem_copy(&reportset->config, + &config_20_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, CH_WIDTH_20MHZ, 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + break; + case CH_WIDTH_40MHZ: + qdf_mem_copy(&reportset->config, + &config_40_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, CH_WIDTH_40MHZ, 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + break; + case CH_WIDTH_80MHZ: + qdf_mem_copy(&reportset->config, + &config_80_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, CH_WIDTH_80MHZ, 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + break; + case CH_WIDTH_160MHZ: + if (is_80_80) { + qdf_mem_copy(&reportset->config, + &config_80_80_1, + sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, + CH_WIDTH_160MHZ, + 1); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + } else { + qdf_mem_copy(&reportset->config, + &config_160_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, + CH_WIDTH_160MHZ, + 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + } + break; + default: + spectral_err("Unhandled width. Please rectify."); + qdf_assert_always(0); + }; + + reportset->curr_report = reportset->headreport; + + return 0; + + bad: + target_if_depopulate_reportset(reportset); + return -EPERM; +} + +/** + * target_if_depopulate_reportset() - Free all the instances of + * struct spectralsim_reportset + * @report: head pointer to struct spectralsim_reportset linked list + * + * Free all the instances of struct spectralsim_reportset + * + * Return: None + */ +static void +target_if_depopulate_reportset( + struct spectralsim_reportset *reportset) +{ + struct spectralsim_report *curr_report = NULL; + struct spectralsim_report *next_report = NULL; + + if (!reportset) + return; + + curr_report = reportset->headreport; + + while (curr_report) { + next_report = curr_report->next; + target_if_depopulate_report(curr_report); + qdf_mem_free(curr_report); + curr_report = next_report; + } +} + +/** + * target_if_populate_simdata() - Populate simulation data + * @simctx: Pointer to struct spectralsim_context + * + * Populate simulation data + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_simdata( + struct spectralsim_context *simctx) +{ + /* + * For now, we use static population. Switch to loading from a file if + * needed in the future. + */ + + simctx->bw20_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw20_headreportset, + CH_WIDTH_20MHZ, 0); + + simctx->bw40_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw40_headreportset, + CH_WIDTH_40MHZ, 0); + + simctx->bw80_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw80_headreportset, + CH_WIDTH_80MHZ, 0); + + simctx->bw160_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw160_headreportset, + CH_WIDTH_160MHZ, 0); + + simctx->bw80_80_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw80_80_headreportset, + CH_WIDTH_160MHZ, 1); + + simctx->curr_reportset = NULL; + + simctx->is_enabled = false; + simctx->is_active = false; + + simctx->ssim_starting_tsf64 = 0; + simctx->ssim_count = 0; + simctx->ssim_period_ms = 0; + + return 0; +} + +/** + * target_if_depopulate_simdata() - De-populate simulation data + * @simctx: Pointer to struct spectralsim_context + * + * De-populate simulation data + * + * Return: none + */ +static void +target_if_depopulate_simdata( + struct spectralsim_context *simctx) +{ + if (!simctx) + return; + + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw20_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw40_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw80_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw160_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw80_80_headreportset); +} + +/** + * target_if_spectral_sim_phyerrdelivery_handler() - Phyerr delivery handler + * + * Return: none + */ +static +OS_TIMER_FUNC(target_if_spectral_sim_phyerrdelivery_handler) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + struct spectralsim_reportset *curr_reportset = NULL; + struct spectralsim_report *curr_report = NULL; + struct target_if_spectral_acs_stats acs_stats; + uint64_t curr_tsf64 = 0; + struct target_if_spectral_ops *p_sops; + + OS_GET_TIMER_ARG(spectral, struct target_if_spectral *); + qdf_assert_always(spectral); + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + if (!simctx->is_active) + return; + + curr_reportset = simctx->curr_reportset; + qdf_assert_always(curr_reportset); + + curr_report = curr_reportset->curr_report; + qdf_assert_always(curr_report); + + qdf_assert_always(curr_reportset->headreport); + + /* + * We use a simulation TSF since in offload architectures we can't + * expect to + * get an accurate current TSF from HW. + * In case of TSF wrap over, we'll use it as-is for now since the + * simulation + * is intended only for format verification. + */ + curr_tsf64 = simctx->ssim_starting_tsf64 + + ((simctx->ssim_period_ms * simctx->ssim_count) * 1000); + + p_sops->spectral_process_phyerr(spectral, + curr_report->data, + curr_report->datasize, + &curr_report->rfqual_info, + &curr_report->chan_info, + curr_tsf64, &acs_stats); + + simctx->ssim_count++; + + if (curr_report->next) + curr_reportset->curr_report = curr_report->next; + else + curr_reportset->curr_report = curr_reportset->headreport; + + if (curr_reportset->config.ss_count != 0 && + simctx->ssim_count == curr_reportset->config.ss_count) { + target_if_spectral_sops_sim_stop_scan(spectral); + } else { + qdf_timer_start(&simctx->ssim_pherrdelivery_timer, + simctx->ssim_period_ms); + } +} + +/* Module services */ + +int +target_if_spectral_sim_attach(struct target_if_spectral *spectral) +{ + struct spectralsim_context *simctx = NULL; + + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *) + qdf_mem_malloc(sizeof(struct spectralsim_context)); + + if (!simctx) + return -EPERM; + + qdf_mem_zero(simctx, sizeof(*simctx)); + + spectral->simctx = simctx; + + if (spectral->spectral_gen == SPECTRAL_GEN2) + simctx->populate_report_static = + target_if_populate_report_static_gen2; + else if (spectral->spectral_gen == SPECTRAL_GEN3) + simctx->populate_report_static = + target_if_populate_report_static_gen3; + + if (target_if_populate_simdata(simctx) != 0) { + qdf_mem_free(simctx); + spectral->simctx = NULL; + spectral_err("Spectral simulation attach failed"); + return -EPERM; + } + + qdf_timer_init(NULL, + &simctx->ssim_pherrdelivery_timer, + target_if_spectral_sim_phyerrdelivery_handler, + (void *)(spectral), QDF_TIMER_TYPE_WAKE_APPS); + + spectral_info("Spectral simulation attached"); + + return 0; +} + +void +target_if_spectral_sim_detach(struct target_if_spectral *spectral) +{ + struct spectralsim_context *simctx = NULL; + + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + qdf_timer_free(&simctx->ssim_pherrdelivery_timer); + + target_if_depopulate_simdata(simctx); + qdf_mem_free(simctx); + spectral->simctx = NULL; + + spectral_info("Spectral simulation detached"); +} + +uint32_t +target_if_spectral_sops_sim_is_active(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + return simctx->is_active; +} +qdf_export_symbol(target_if_spectral_sops_sim_is_active); + +uint32_t +target_if_spectral_sops_sim_is_enabled(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + return simctx->is_enabled; +} +qdf_export_symbol(target_if_spectral_sops_sim_is_enabled); + +uint32_t +target_if_spectral_sops_sim_start_scan(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + if (!simctx->curr_reportset) { + spectral_err("Spectral simulation: No current report set configured - unable to start simulated Spectral scan"); + return 0; + } + + if (!simctx->curr_reportset->curr_report) { + spectral_err("Spectral simulation: No report data instances populated - unable to start simulated Spectral scan"); + return 0; + } + + if (!simctx->is_enabled) + simctx->is_enabled = true; + + simctx->is_active = true; + + /* Hardcoding current time as zero since it is simulation */ + simctx->ssim_starting_tsf64 = 0; + simctx->ssim_count = 0; + + /* + * TODO: Support high resolution timer in microseconds if required, so + * that + * we can support default periods such as ~200 us. For now, we use 1 + * millisecond since the current use case for the simulation is to + * validate + * formats rather than have a time dependent classification. + */ + simctx->ssim_period_ms = 1; + + qdf_timer_start(&simctx->ssim_pherrdelivery_timer, + simctx->ssim_period_ms); + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_start_scan); + +uint32_t +target_if_spectral_sops_sim_stop_scan(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + qdf_timer_stop(&simctx->ssim_pherrdelivery_timer); + + simctx->is_active = false; + simctx->is_enabled = false; + + simctx->ssim_starting_tsf64 = 0; + simctx->ssim_count = 0; + simctx->ssim_period_ms = 0; + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_stop_scan); + +#ifdef SPECTRAL_SIM_DUMP_PARAM_DATA +static void +target_if_log_sim_spectral_params(struct spectral_config *params) +{ + int i = 0; + + spectral_debug("\n"); + + spectral_debug("Spectral simulation: Param data dump:\nss_fft_period=%hu\nss_period=%hu\nss_count=%hu\nss_short_report=%hu\nradar_bin_thresh_sel=%hhu\nss_spectral_pri=%hu\nss_fft_size=%hu\nss_gc_ena=%hu\nss_restart_ena=%hu\nss_noise_floor_ref=%hu\nss_init_delay=%hu\nss_nb_tone_thr=%hu\nss_str_bin_thr=%hu\nss_wb_rpt_mode=%hu\nss_rssi_rpt_mode=%hu\nss_rssi_thr=%hu\nss_pwr_format=%hu\nss_rpt_mode=%hu\nss_bin_scale=%hu\nss_dbm_adj=%hu\nss_chn_mask=%hu\nss_nf_temp_data=%d", + params->ss_fft_period, + params->ss_period, + params->ss_count, + params->ss_short_report, + params->radar_bin_thresh_sel, + params->ss_spectral_pri, + params->ss_fft_size, + params->ss_gc_ena, + params->ss_restart_ena, + params->ss_noise_floor_ref, + params->ss_init_delay, + params->ss_nb_tone_thr, + params->ss_str_bin_thr, + params->ss_wb_rpt_mode, + params->ss_rssi_rpt_mode, + params->ss_rssi_thr, + params->ss_pwr_format, + params->ss_rpt_mode, + params->ss_bin_scale, + params->ss_dbm_adj, + params->ss_chn_mask, params->ss_nf_temp_data); + + for (i = 0; i < AH_MAX_CHAINS * 2; i++) + spectral_debug("ss_nf_cal[%d]=%hhd", i, params->ss_nf_cal[i]); + + for (i = 0; i < AH_MAX_CHAINS * 2; i++) + spectral_debug("ss_nf_pwr[%d]=%hhd", i, params->ss_nf_pwr[i]); + + spectral_info("\n"); +} +#else + +static void +target_if_log_sim_spectral_params(struct spectral_config *params) +{ +} +#endif /* SPECTRAL_SIM_DUMP_PARAM_DATA */ + +uint32_t +target_if_spectral_sops_sim_configure_params( + void *arg, + struct spectral_config *params) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + enum wlan_phymode phymode; + uint8_t bw; + struct spectralsim_reportset *des_headreportset = NULL; + struct spectralsim_reportset *temp_reportset = NULL; + bool is_invalid_width = false; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(params); + target_if_log_sim_spectral_params(params); + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_warn("Spectral simulation: No VAPs found - not proceeding with param config."); + return 0; + } + + bw = target_if_vdev_get_ch_width(vdev); + + switch (bw) { + case CH_WIDTH_20MHZ: + des_headreportset = simctx->bw20_headreportset; + break; + case CH_WIDTH_40MHZ: + des_headreportset = simctx->bw40_headreportset; + break; + case CH_WIDTH_80MHZ: + des_headreportset = simctx->bw80_headreportset; + break; + case CH_WIDTH_160MHZ: + phymode = wlan_vdev_get_phymode(vdev); + if (phymode == WLAN_PHYMODE_11AC_VHT160) { + des_headreportset = simctx->bw160_headreportset; + } else if (phymode == WLAN_PHYMODE_11AC_VHT80_80) { + des_headreportset = simctx->bw80_80_headreportset; + } else { + spectral_err("Spectral simulation: Unexpected PHY mode %u found for width 160 MHz...asserting.", + phymode); + qdf_assert_always(0); + } + break; + + case IEEE80211_CWM_WIDTHINVALID: + spectral_err("Spectral simulation: Invalid width configured - not proceeding with param config."); + is_invalid_width = true; + default: + spectral_err("Spectral simulation: Unknown width %u...asserting", + bw); + qdf_assert_always(0); + break; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + if (is_invalid_width) + return 0; + + if (!des_headreportset) { + spectral_warn("Spectral simulation: No simulation data present for configured bandwidth/PHY mode - unable to proceed with param config."); + return 0; + } + + simctx->curr_reportset = NULL; + temp_reportset = des_headreportset; + + while (temp_reportset) { + if (qdf_mem_cmp(&temp_reportset->config, + params, sizeof(struct spectral_config)) == 0) { + /* Found a matching config. We are done. */ + simctx->curr_reportset = temp_reportset; + break; + } + + temp_reportset = temp_reportset->next; + } + + if (!simctx->curr_reportset) { + spectral_warn("Spectral simulation: No simulation data present for desired Spectral configuration - unable to proceed with param config."); + return 0; + } + + if (!simctx->curr_reportset->curr_report) { + spectral_warn("Spectral simulation: No report data instances populated for desired Spectral configuration - unable to proceed with param config"); + return 0; + } + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_configure_params); + +uint32_t +target_if_spectral_sops_sim_get_params( + void *arg, struct spectral_config *params) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + qdf_assert_always(params); + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + if (!simctx->curr_reportset) { + spectral_warn("Spectral simulation: No configured reportset found."); + return 0; + } + + qdf_mem_copy(params, &simctx->curr_reportset->config, sizeof(*params)); + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_get_params); + +#endif /* QCA_SUPPORT_SPECTRAL_SIMULATION */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.h b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.h new file mode 100644 index 0000000000000000000000000000000000000000..308186c1be3ed65e08d3c9203afb8a5a4694cef7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_SIM_H_ +#define _SPECTRAL_SIM_H_ + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +#include "target_if_spectral.h" + +/** + * target_if_spectral_sim_attach() - Initialize Spectral Simulation + * functionality + * @spectral - pointer to spectral internal data structure + * + * Setup data structures to be used for serving out data corresponding to + * various bandwidths and configurations. + * + * Return: Integer status value. 0:Success, -1:Failure + */ +int target_if_spectral_sim_attach(struct target_if_spectral *spectral); + +/** + * target_if_spectral_sim_detach() - De-initialize Spectral Simulation + * functionality + * @spectral - pointer to spectral internal data structure + * + * Free up data structures used for serving out data corresponding to various + * bandwidths and configurations. + * + * Return: None + */ +void target_if_spectral_sim_detach(struct target_if_spectral *spectral); + +/** + * target_if_spectral_sops_sim_is_active() - Check if Spectral(simulated) is + * active + * @arg - pointer to spectral internal data structure + * + * Check if Spectral (simulated) is active + * + * Return: Integer status value. 0: Not active, 1: Active + */ +uint32_t target_if_spectral_sops_sim_is_active(void *arg); + +/** + * target_if_spectral_sops_sim_is_enabled() - Check if Spectral(simulated) is + * enabled + * @arg - pointer to spectral internal data structure + * + * Check if Spectral(simulated) is enabled + * + * Return: Integer status value. 0: Not enabled, 1: Enabled + */ +uint32_t target_if_spectral_sops_sim_is_enabled(void *arg); + +/** + * target_if_spectral_sops_sim_start_scan() - Start Spectral simulation + * @arg - pointer to spectral internal data structure + * + * Start Spectral simulation + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_start_scan(void *arg); + +/** + * target_if_spectral_sops_sim_stop_scan() - Stop Spectral simulation + * @arg - pointer to spectral internal data structure + * + * Stop Spectral simulation + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_stop_scan(void *arg); + +/** + * target_if_spectral_sops_sim_configure_params() - Configure Spectral + * parameters into simulation + * arg - pointer to ath_spectral structure + * params - pointer to struct spectral_config structure bearing Spectral + * configuration + * + * Internally, this function actually searches if a record set with the desired + * configuration has been loaded. If so, it points to the record set for + * later usage when the simulation is started. If not, it returns an error. + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_configure_params( + void *arg, + struct spectral_config *params); + +/** + * target_if_spectral_sops_sim_get_params() - Get Spectral parameters configured + * into simulation + * arg - pointer to ath_spectral structure + * params - pointer to struct spectral_config structure which should be + * populated with Spectral configuration + * + * Get Spectral parameters configured into simulation + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_get_params( + void *arg, + struct spectral_config *params); + +#endif /* QCA_SUPPORT_SPECTRAL_SIMULATION */ +#endif /* _SPECTRAL_SIM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim_int.h b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim_int.h new file mode 100644 index 0000000000000000000000000000000000000000..aa0fa00957ac8ceec4e5f4ae80630496e1b28edf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim_int.h @@ -0,0 +1,1014 @@ +/* + * Copyright (c) 2015,2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_SIM_INTERNAL_H_ +#define _SPECTRAL_SIM_INTERNAL_H_ + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +#include "target_if_spectral.h" + +/* #define SPECTRAL_SIM_DUMP_PARAM_DATA 1 */ +/** + * struct spectralsim_report - Linked list node of spectal simulation report + * Spectral report data instance. Usable in a linked list. + * In the case of Direct Attach chipsets, one instance should correspond to + * one PHY Data Error frame received from the HW. + * XXX Direct Attach support to be implemented if needed. Any modifications + * required here can be made at the time of implementation. + * In the case of 802.11ac offload chipsets, one instance should correspond to + * one report received from HW, inclusive of all TLVs. + * + * @rfqual_info: RF measurement information + * @chan_info: Channel information + * @datasize: Length of report data + * @data: Pointer to report data + * @next: Pointer to next node in the struct spectralsim_report + */ +struct spectralsim_report { + /* 11ac onwards only */ + struct target_if_spectral_rfqual_info rfqual_info; + /* 11ac onwards only */ + struct target_if_spectral_chan_info chan_info; + uint32_t datasize; + uint8_t *data; + struct spectralsim_report *next; +}; + +/** + * struct spectralsim_reportset - Set of Spectral report data instances + * corresponding to one particular configuration. Usable in a linked list. + * @config: Spectral config parameters + * @headreport: Pointer to the linked list of struct spectralsim_report + * @curr_report: Pointer to current node in the linked list of + * struct spectralsim_report + * @next: Pointer to next node in the struct spectralsim_reportset + */ +struct spectralsim_reportset { + struct spectral_config config; + struct spectralsim_report *headreport; + struct spectralsim_report *curr_report; + struct spectralsim_reportset *next; +}; + +/* + * struct spectralsim_context - Main structure for Spectral simulation. + * All data and controls get linked here. + * + * For each width (20/40/80/160/80+80), we will have a linked list of + * spectralsim_reportset nodes. Each struct spectralsim_reportset will have a + * linked list of struct spectralsim_report nodes. When the user requests for a + * given PHY mode and Spectral configuration, we find the appropriate + * spectralsim_reportset, and then serve struct spectralsim_report instances + * from the linked list. If required report count is higher than size of linked + * list (or infinite), we repeatedly cycle through the linked list. There can + * be more elaborate data structures devised taking care of a large number of + * possibilities, but we stick to a simple scheme given limited simulation + * needs. + * + * @bw20_headreportset : Linked list of spectralsim_reportset for 20MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 40MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 80MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 160MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 80_80MHz width + * @curr_reportset : Pointer to current node in the linked list of + * struct spectralsim_reportset + * @is_enabled : Whether the simulated spectral scan is set as enabled + * @is_active : Whether the simulated spectral scan is set as active + * @ssim_pherrdelivery_timer : Simulated Phyerr delivery timer + * @ssim_starting_tsf64 : Starting 64-bit TSF value for spectral simulation + * @ssim_period_ms : Simulated Phyerr delivery period in ms + * @ssim_count : Number of simulated spectral samples to deliver + * @populate_report_static : Pointer to function to populate static spectral + * report data + */ +struct spectralsim_context { + struct spectralsim_reportset *bw20_headreportset; + struct spectralsim_reportset *bw40_headreportset; + struct spectralsim_reportset *bw80_headreportset; + struct spectralsim_reportset *bw160_headreportset; + struct spectralsim_reportset *bw80_80_headreportset; + + struct spectralsim_reportset *curr_reportset; + bool is_enabled; + bool is_active; + + qdf_timer_t ssim_pherrdelivery_timer; + uint64_t ssim_starting_tsf64; + uint32_t ssim_period_ms; /* TODO: Support in microseconds */ + uint32_t ssim_count; + int (*populate_report_static)(struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80); +}; + +/* Helper Macros */ + +/* Allocate and populate reportset for a single configuration */ +#define SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, reportset, width, \ + is_80_80) \ + { \ + (reportset) = (struct spectralsim_reportset *) \ + qdf_mem_malloc(sizeof(struct spectralsim_reportset)); \ + \ + if ((reportset) == NULL) { \ + target_if_depopulate_simdata((simctx)); \ + return -EPERM; \ + } \ + \ + qdf_mem_zero((reportset), sizeof(struct spectralsim_reportset)); \ + \ + if (target_if_populate_reportset_static( \ + (simctx), (reportset), (width), (is_80_80)) != 0) { \ + target_if_depopulate_simdata((simctx)); \ + return -EPERM; \ + } \ + \ + (reportset)->next = NULL; \ + } + +/* Depopulate and free list of report sets */ +#define SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(reportset) \ + { \ + struct spectralsim_reportset *curr_reportset = NULL; \ + struct spectralsim_reportset *next_reportset = NULL; \ + \ + curr_reportset = (reportset); \ + \ + while (curr_reportset) { \ + next_reportset = curr_reportset->next; \ + target_if_depopulate_reportset(curr_reportset); \ + qdf_mem_free(curr_reportset); \ + curr_reportset = next_reportset; \ + } \ + \ + (reportset) = NULL; \ + } + +/* Values for static population */ + +/* 20 MHz */ + +/* Report data for 20MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_20_gen2[] = { +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x00, /* Size */ + 0x54, + 0x2e, 0x60, 0x0f, 0xe8, /* FFT Summary A */ + 0x00, 0x00, 0x04, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x54, /* Length */ + 0x00, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0xe8, 0x0f, 0x60, 0x2e, /* FFT Summary A */ + 0x00, 0x04, 0x00, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 2, 0, 1, 1, 1, 0, + 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 2, 1, 2, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, +}; + +/* Report data for 20MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_20_gen3[] = { +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x14, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x14, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 2, 0, 1, 1, 1, 0, + 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 2, 1, 2, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 20 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_20 = { + .rssi_comb = 1, + + .pc_rssi_info[0].rssi_pri20 = 1, + .pc_rssi_info[0].rssi_sec20 = 128, + .pc_rssi_info[0].rssi_sec40 = 128, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 20 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_20 = { + .center_freq1 = 5180, + .center_freq2 = 0, + .chan_width = 20, +}; + +/* Spectral config parameters for 20 MHz bandwidth */ +static struct spectral_config config_20_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 7, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 40 MHz */ + +/* Report data for 40MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_40_gen2[] = { +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x00, /* Size */ + 0x94, + 0x2e, 0x61, 0x0f, 0x80, /* FFT Summary A */ + 0x00, 0x00, 0x06, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x94, /* Length */ + 0x00, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x80, 0x0f, 0x61, 0x2e, /* FFT Summary A */ + 0x00, 0x06, 0x00, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, + 0, 0, 0, 1, 0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, +}; + +/* Report data for 40MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_40_gen3[] = { +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x24, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x24, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, + 0, 0, 0, 1, 0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 1, 0, 0, 0, 0, +}; + +/* RF measurement information for 40 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_40 = { + .rssi_comb = 1, + + .pc_rssi_info[0].rssi_pri20 = 1, + .pc_rssi_info[0].rssi_sec20 = 2, + .pc_rssi_info[0].rssi_sec40 = 128, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 40 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_40 = { + .center_freq1 = 5180, + .center_freq2 = 0, + .chan_width = 40, +}; + +/* Spectral config parameters for 40 MHz bandwidth */ +static struct spectral_config config_40_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 8, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 80 MHz */ + +/* Report data for 80MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_80_gen2[] = { +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x19, 0xeb, 0x80, 0x40, /* FFT Summary A */ + 0x00, 0x00, 0x10, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x80, 0xeb, 0x19, /* FFT Summary A */ + 0x00, 0x10, 0x00, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* Report data for 80MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_80_gen3[] = { +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 80 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_80 = { + .rssi_comb = 16, + + .pc_rssi_info[0].rssi_pri20 = 16, + .pc_rssi_info[0].rssi_sec20 = 17, + .pc_rssi_info[0].rssi_sec40 = 0, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 80 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_80 = { + .center_freq1 = 5210, + .center_freq2 = 0, + .chan_width = 80, +}; + +/* Spectral config parameters for 80 MHz bandwidth */ +static struct spectral_config config_80_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 9, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 160 MHz */ + +/* Report data for 160MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_160_gen2[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x5c, 0x5c, 0x78, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x78, 0x5c, 0x5c, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x5c, 0x5c, 0x78, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x01, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x78, 0x5c, 0x5c, /* FFT Summary B */ + 0x01, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, +}; + +/* Report data for 160MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_160_gen3[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe1, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe1, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 160 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_160 = { + .rssi_comb = 3, + + .pc_rssi_info[0].rssi_pri20 = 3, + .pc_rssi_info[0].rssi_sec20 = 12, + .pc_rssi_info[0].rssi_sec40 = 41, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 160 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_160 = { + .center_freq1 = 5250, + .center_freq2 = 0, + .chan_width = 160, +}; + +/* Spectral config parameters for 160 MHz bandwidth */ +static struct spectral_config config_160_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 9, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 80+80 MHz */ + +/* Report data for 80_80MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_80_80_gen2[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x64, 0x64, 0x89, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x89, 0x64, 0x64, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x64, 0x64, 0x89, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x01, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x89, 0x64, 0x64, /* FFT Summary B */ + 0x01, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, +}; + +/* Report data for 80_80MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_80_80_gen3[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe1, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe1, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 80_80 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_80_80 = { + .rssi_comb = 1, + + .pc_rssi_info[0].rssi_pri20 = 1, + .pc_rssi_info[0].rssi_sec20 = 17, + .pc_rssi_info[0].rssi_sec40 = 40, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 80_80 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_80_80 = { + .center_freq1 = 5210, + .center_freq2 = 5530, + .chan_width = 160, +}; + +/* Spectral config parameters for 80_80 MHz bandwidth */ +static struct spectral_config config_80_80_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 9, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +#endif /* QCA_SUPPORT_SPECTRAL_SIMULATION */ +#endif /* _SPECTRAL_SIM_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/inc/target_if_wifi_pos.h b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/inc/target_if_wifi_pos.h new file mode 100644 index 0000000000000000000000000000000000000000..e136e099611e4595bf00990e5f1df0f5e4159faa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/inc/target_if_wifi_pos.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_wifi_pos.h + * This file declares the functions pertinent to wifi positioning component's + * target if layer. + */ +#ifndef _WIFI_POS_TGT_IF_H_ +#define _WIFI_POS_TGT_IF_H_ + +#include "qdf_types.h" +#include "qdf_status.h" +#include "wlan_cmn.h" +struct oem_data_req; +struct oem_data_rsp; +struct wlan_objmgr_psoc; +struct wlan_soc_southbound_cb; +struct wlan_lmac_if_tx_ops; +struct wlan_lmac_if_rx_ops; + +#ifdef WIFI_POS_CONVERGED + +/** + * target_if_wifi_pos_get_rxops: api to get rx ops + * @psoc: pointer to psoc object + * + * Return: rx ops + */ +struct wlan_lmac_if_wifi_pos_rx_ops *target_if_wifi_pos_get_rxops( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_wifi_pos_register_events: function to register with wmi event + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_register_events(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_wifi_pos_deregister_events: function to deregister wmi event + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_deregister_events(struct wlan_objmgr_psoc *psoc); + + +/** + * target_if_wifi_pos_get_vht_ch_width: function to get vht channel width + * @psoc: pointer to psoc object + * @ch_width: pointer to the variable in which output value needs to be filled + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_get_vht_ch_width(struct wlan_objmgr_psoc *psoc, + enum phy_ch_width *ch_width); + +/** + * target_if_wifi_pos_register_tx_ops: function to register with lmac tx ops + * @tx_ops: lmac tx ops struct object + * + * Return: none + */ +void target_if_wifi_pos_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +#else +static inline struct wlan_lmac_if_wifi_pos_rx_ops *target_if_wifi_pos_get_rxops( + struct wlan_objmgr_psoc *psoc) +{ + return NULL; +} + +static inline void target_if_wifi_pos_register_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} + +#endif + +#if defined(WLAN_FEATURE_CIF_CFR) && defined(WIFI_POS_CONVERGED) +/** + * target_if_wifi_pos_init_cir_cfr_rings: set DMA ring cap in psoc private + * object + * @psoc: pointer to psoc object + * @hal_soc: pointer to hal soc + * @num_mac: number of mac + * @buf: buffer containing DMA ring cap + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, + void *buf); +/** + * target_if_wifi_pos_deinit_dma_rings: frees up DMA rings + * @psoc: pointer to psoc + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_deinit_dma_rings(struct wlan_objmgr_psoc *psoc); +#else +static inline QDF_STATUS target_if_wifi_pos_init_cir_cfr_rings( + struct wlan_objmgr_psoc *psoc, void *hal_soc, + uint8_t num_mac, void *buf) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS target_if_wifi_pos_deinit_dma_rings( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif /* _WIFI_POS_TGT_IF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/src/target_if_wifi_pos.c b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/src/target_if_wifi_pos.c new file mode 100644 index 0000000000000000000000000000000000000000..6f9b00f4b8069c25853af801a6072599dbcc2fd3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/src/target_if_wifi_pos.c @@ -0,0 +1,703 @@ +/* + * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_wifi_pos.c + * This file defines the functions pertinent to wifi positioning component's + * target if layer. + */ +#include "../../../../umac/wifi_pos/src/wifi_pos_utils_i.h" +#include "wifi_pos_utils_pub.h" + +#include "wmi_unified_api.h" +#include "wlan_lmac_if_def.h" +#include "target_if_wifi_pos.h" +#include "../../../../umac/wifi_pos/src/wifi_pos_main_i.h" +#include "target_if.h" +#ifdef WLAN_FEATURE_CIF_CFR +#include "hal_api.h" + +#define RING_BASE_ALIGN 8 + +static void *target_if_wifi_pos_vaddr_lookup( + struct wifi_pos_psoc_priv_obj *priv, + void *paddr, uint8_t ring_num, uint32_t cookie) +{ + if (priv->dma_buf_pool[ring_num][cookie].paddr == paddr) { + return priv->dma_buf_pool[ring_num][cookie].vaddr + + priv->dma_buf_pool[ring_num][cookie].offset; + } else { + target_if_err("incorrect paddr found on cookie slot"); + return NULL; + } +} + +static QDF_STATUS target_if_wifi_pos_replenish_ring( + struct wifi_pos_psoc_priv_obj *priv, uint8_t ring_idx, + void *alinged_vaddr, uint32_t cookie) +{ + uint64_t *ring_entry; + uint32_t dw_lo, dw_hi = 0, map_status; + void *hal_soc = priv->hal_soc; + void *srng = priv->dma_cfg[ring_idx].srng; + void *paddr; + + if (!alinged_vaddr) { + target_if_debug("NULL alinged_vaddr provided"); + return QDF_STATUS_SUCCESS; + } + + map_status = qdf_mem_map_nbytes_single(NULL, alinged_vaddr, + QDF_DMA_FROM_DEVICE, + priv->dma_cap[ring_idx].min_buf_size, + (qdf_dma_addr_t *)&paddr); + if (map_status) { + target_if_err("mem map failed status: %d", map_status); + return QDF_STATUS_E_FAILURE; + } + QDF_ASSERT(!((uint64_t)paddr % priv->dma_cap[ring_idx].min_buf_align)); + priv->dma_buf_pool[ring_idx][cookie].paddr = paddr; + + hal_srng_access_start(hal_soc, srng); + ring_entry = hal_srng_src_get_next(hal_soc, srng); + dw_lo = (uint64_t)paddr & 0xFFFFFFFF; + WMI_OEM_DMA_DATA_ADDR_HI_SET(dw_hi, (uint64_t)paddr >> 32); + WMI_OEM_DMA_DATA_ADDR_HI_HOST_DATA_SET(dw_hi, cookie); + *ring_entry = (uint64_t)dw_hi << 32 | dw_lo; + hal_srng_access_end(hal_soc, srng); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_get_indirect_data( + struct wifi_pos_psoc_priv_obj *priv_obj, + struct wmi_host_oem_indirect_data *indirect, + struct oem_data_rsp *rsp, uint32_t *cookie) +{ + void *paddr = NULL; + uint32_t addr_hi; + uint8_t ring_idx = 0, num_rings; + uint32_t allocated_len; + + if (!indirect) { + target_if_debug("no indirect data. regular event received"); + return QDF_STATUS_SUCCESS; + } + + ring_idx = indirect->pdev_id - 1; + num_rings = priv_obj->num_rings; + if (ring_idx >= num_rings) { + target_if_err("incorrect pdev_id: %d", indirect->pdev_id); + return QDF_STATUS_E_INVAL; + } + + allocated_len = priv_obj->dma_cap[ring_idx].min_buf_size + + (priv_obj->dma_cap[ring_idx].min_buf_align - 1); + if (indirect->len > allocated_len || + indirect->len > OEM_DATA_DMA_BUFF_SIZE) { + target_if_err("Invalid indirect len: %d, allocated_len:%d", + indirect->len, allocated_len); + return QDF_STATUS_E_INVAL; + } + + addr_hi = (uint64_t)WMI_OEM_DMA_DATA_ADDR_HI_GET( + indirect->addr_hi); + paddr = (void *)((uint64_t)addr_hi << 32 | indirect->addr_lo); + *cookie = WMI_OEM_DMA_DATA_ADDR_HI_HOST_DATA_GET( + indirect->addr_hi); + rsp->vaddr = target_if_wifi_pos_vaddr_lookup(priv_obj, + paddr, ring_idx, *cookie); + rsp->dma_len = indirect->len; + qdf_mem_unmap_nbytes_single(NULL, (qdf_dma_addr_t)paddr, + QDF_DMA_FROM_DEVICE, + priv_obj->dma_cap[ring_idx].min_buf_size); + + return QDF_STATUS_SUCCESS; +} + +#else +static QDF_STATUS target_if_wifi_pos_replenish_ring( + struct wifi_pos_psoc_priv_obj *priv, uint8_t ring_idx, + void *vaddr, uint32_t cookie) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_get_indirect_data( + struct wifi_pos_psoc_priv_obj *priv_obj, + struct wmi_host_oem_indirect_data *indirect, + struct oem_data_rsp *rsp, uint32_t *cookie) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_wifi_pos_oem_rsp_ev_handler: handler registered with + * WMI_OEM_RESPONSE_EVENTID + * @scn: scn handle + * @data_buf: event buffer + * @data_len: event buffer length + * + * Return: status of operation + */ +static int target_if_wifi_pos_oem_rsp_ev_handler(ol_scn_t scn, + uint8_t *data_buf, + uint32_t data_len) +{ + int ret; + uint8_t ring_idx = 0; + QDF_STATUS status; + uint32_t cookie = 0; + struct wmi_host_oem_indirect_data *indirect; + struct oem_data_rsp oem_rsp = {0}; + struct wifi_pos_psoc_priv_obj *priv_obj; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_wifi_pos_rx_ops *wifi_pos_rx_ops; + struct wmi_oem_response_param oem_resp_param = {0}; + wmi_unified_t wmi_handle; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return QDF_STATUS_NOT_INITIALIZED; + } + + wlan_objmgr_psoc_get_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + return QDF_STATUS_NOT_INITIALIZED; + } + + priv_obj = wifi_pos_get_psoc_priv_obj(psoc); + if (!priv_obj) { + target_if_err("priv_obj is null"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + return QDF_STATUS_NOT_INITIALIZED; + } + + wifi_pos_rx_ops = target_if_wifi_pos_get_rxops(psoc); + if (!wifi_pos_rx_ops || !wifi_pos_rx_ops->oem_rsp_event_rx) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + target_if_err("lmac callbacks not registered"); + return QDF_STATUS_NOT_INITIALIZED; + } + + ret = wmi_extract_oem_response_param(wmi_handle, + data_buf, + &oem_resp_param); + + oem_rsp.rsp_len_1 = oem_resp_param.num_data1; + oem_rsp.data_1 = oem_resp_param.data_1; + + if (oem_resp_param.num_data2) { + oem_rsp.rsp_len_2 = oem_resp_param.num_data2; + oem_rsp.data_2 = oem_resp_param.data_2; + } + + indirect = &oem_resp_param.indirect_data; + status = target_if_wifi_pos_get_indirect_data(priv_obj, indirect, + &oem_rsp, &cookie); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("get indirect data failed status: %d", status); + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + return QDF_STATUS_E_INVAL; + } + + ret = wifi_pos_rx_ops->oem_rsp_event_rx(psoc, &oem_rsp); + if (indirect) + ring_idx = indirect->pdev_id - 1; + status = target_if_wifi_pos_replenish_ring(priv_obj, ring_idx, + oem_rsp.vaddr, cookie); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("replenish failed status: %d", status); + ret = QDF_STATUS_E_FAILURE; + } + + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + + return ret; +} + +/** + * wifi_pos_oem_cap_ev_handler: handler registered with wmi_oem_cap_event_id + * @scn: scn handle + * @buf: event buffer + * @len: event buffer length + * + * Return: status of operation + */ +static int wifi_pos_oem_cap_ev_handler(ol_scn_t scn, uint8_t *buf, uint32_t len) +{ + /* TBD */ + return 0; +} + +/** + * wifi_pos_oem_meas_rpt_ev_handler: handler registered with + * wmi_oem_meas_report_event_id + * @scn: scn handle + * @buf: event buffer + * @len: event buffer length + * + * Return: status of operation + */ +static int wifi_pos_oem_meas_rpt_ev_handler(ol_scn_t scn, uint8_t *buf, + uint32_t len) +{ + /* TBD */ + return 0; +} + +/** + * wifi_pos_oem_err_rpt_ev_handler: handler registered with + * wmi_oem_err_report_event_id + * @scn: scn handle + * @buf: event buffer + * @len: event buffer length + * + * Return: status of operation + */ +static int wifi_pos_oem_err_rpt_ev_handler(ol_scn_t scn, uint8_t *buf, + uint32_t len) +{ + /* TBD */ + return 0; +} + +/** + * target_if_wifi_pos_oem_data_req() - start OEM data request to target + * @psoc: pointer to psoc object mgr + * @req: start request params + * + * Return: QDF_STATUS + */ +static QDF_STATUS +target_if_wifi_pos_oem_data_req(struct wlan_objmgr_pdev *pdev, + struct oem_data_req *req) +{ + QDF_STATUS status; + wmi_unified_t wmi_hdl = get_wmi_unified_hdl_from_pdev(pdev); + + target_if_debug("Send oem data req to target"); + + if (!req || !req->data) { + target_if_err("oem_data_req is null"); + return QDF_STATUS_E_INVAL; + } + + if (!wmi_hdl) { + target_if_err("WMA closed, can't send oem data req cmd"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_start_oem_data_cmd(wmi_hdl, req->data_len, + req->data); + + if (!QDF_IS_STATUS_SUCCESS(status)) + target_if_err("wmi cmd send failed"); + + return status; +} + +void target_if_wifi_pos_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_wifi_pos_tx_ops *wifi_pos_tx_ops; + wifi_pos_tx_ops = &tx_ops->wifi_pos_tx_ops; + wifi_pos_tx_ops->data_req_tx = target_if_wifi_pos_oem_data_req; + wifi_pos_tx_ops->wifi_pos_register_events = + target_if_wifi_pos_register_events; + wifi_pos_tx_ops->wifi_pos_deregister_events = + target_if_wifi_pos_deregister_events; + wifi_pos_tx_ops->wifi_pos_get_vht_ch_width = + target_if_wifi_pos_get_vht_ch_width; + +} + +inline struct wlan_lmac_if_wifi_pos_rx_ops *target_if_wifi_pos_get_rxops( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + target_if_err("passed psoc is NULL"); + return NULL; + } + + return &psoc->soc_cb.rx_ops.wifi_pos_rx_ops; +} + +QDF_STATUS target_if_wifi_pos_register_events(struct wlan_objmgr_psoc *psoc) +{ + int ret; + + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + target_if_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + /* wmi_oem_response_event_id is not defined for legacy targets. + * So do not check for error for this event. + */ + wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_response_event_id, + target_if_wifi_pos_oem_rsp_ev_handler, + WMI_RX_WORK_CTX); + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_cap_event_id, + wifi_pos_oem_cap_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_meas_report_event_id, + wifi_pos_oem_meas_rpt_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_report_event_id, + wifi_pos_oem_err_rpt_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_wifi_pos_deregister_events(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + target_if_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_response_event_id); + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_cap_event_id); + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_meas_report_event_id); + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_report_event_id); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_wifi_pos_get_vht_ch_width(struct wlan_objmgr_psoc *psoc, + enum phy_ch_width *ch_width) +{ + struct target_psoc_info *tgt_hdl; + int vht_cap_info; + + *ch_width = CH_WIDTH_INVALID; + + if (!psoc) + return QDF_STATUS_E_INVAL; + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) + return QDF_STATUS_E_INVAL; + + *ch_width = CH_WIDTH_80MHZ; + + vht_cap_info = target_if_get_vht_cap_info(tgt_hdl); + + if (vht_cap_info & WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160) + *ch_width = CH_WIDTH_80P80MHZ; + else if (vht_cap_info & WLAN_VHTCAP_SUP_CHAN_WIDTH_160) + *ch_width = CH_WIDTH_160MHZ; + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_CIF_CFR +static QDF_STATUS target_if_wifi_pos_fill_ring(uint8_t ring_idx, + struct hal_srng *srng, + struct wifi_pos_psoc_priv_obj *priv) +{ + uint32_t i; + void *buf, *buf_aligned; + + for (i = 0; i < priv->dma_cfg[ring_idx].num_ptr; i++) { + buf = qdf_mem_malloc(priv->dma_cap[ring_idx].min_buf_size + + priv->dma_cap[ring_idx].min_buf_align - 1); + if (!buf) + return QDF_STATUS_E_NOMEM; + + priv->dma_buf_pool[ring_idx][i].vaddr = buf; + buf_aligned = (void *)qdf_roundup((uint64_t)buf, + priv->dma_cap[ring_idx].min_buf_align); + priv->dma_buf_pool[ring_idx][i].offset = buf_aligned - buf; + priv->dma_buf_pool[ring_idx][i].cookie = i; + target_if_wifi_pos_replenish_ring(priv, ring_idx, + buf_aligned, i); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_empty_ring(uint8_t ring_idx, + struct wifi_pos_psoc_priv_obj *priv) +{ + uint32_t i; + + for (i = 0; i < priv->dma_cfg[ring_idx].num_ptr; i++) { + qdf_mem_unmap_nbytes_single(NULL, + (qdf_dma_addr_t)priv->dma_buf_pool[ring_idx][i].vaddr, + QDF_DMA_FROM_DEVICE, + priv->dma_cap[ring_idx].min_buf_size); + qdf_mem_free(priv->dma_buf_pool[ring_idx][i].vaddr); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_init_ring(uint8_t ring_idx, + struct wifi_pos_psoc_priv_obj *priv) +{ + void *srng; + uint32_t num_entries; + qdf_dma_addr_t paddr; + uint32_t ring_alloc_size; + void *hal_soc = priv->hal_soc; + struct hal_srng_params ring_params = {0}; + uint32_t max_entries = hal_srng_max_entries(hal_soc, WIFI_POS_SRC); + uint32_t entry_size = hal_srng_get_entrysize(hal_soc, WIFI_POS_SRC); + + num_entries = priv->dma_cap[ring_idx].min_num_ptr > max_entries ? + max_entries : priv->dma_cap[ring_idx].min_num_ptr; + priv->dma_cfg[ring_idx].num_ptr = num_entries; + priv->dma_buf_pool[ring_idx] = qdf_mem_malloc(num_entries * + sizeof(struct wifi_pos_dma_buf_info)); + if (!priv->dma_buf_pool[ring_idx]) + return QDF_STATUS_E_NOMEM; + + ring_alloc_size = (num_entries * entry_size) + RING_BASE_ALIGN - 1; + priv->dma_cfg[ring_idx].ring_alloc_size = ring_alloc_size; + priv->dma_cfg[ring_idx].base_vaddr_unaligned = + qdf_mem_alloc_consistent(NULL, NULL, ring_alloc_size, &paddr); + priv->dma_cfg[ring_idx].base_paddr_unaligned = (void *)paddr; + if (!priv->dma_cfg[ring_idx].base_vaddr_unaligned) { + target_if_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + priv->dma_cfg[ring_idx].base_vaddr_aligned = (void *)qdf_roundup( + (uint64_t)priv->dma_cfg[ring_idx].base_vaddr_unaligned, + RING_BASE_ALIGN); + ring_params.ring_base_vaddr = + priv->dma_cfg[ring_idx].base_vaddr_aligned; + priv->dma_cfg[ring_idx].base_paddr_aligned = (void *)qdf_roundup( + (uint64_t)priv->dma_cfg[ring_idx].base_paddr_unaligned, + RING_BASE_ALIGN); + ring_params.ring_base_paddr = + (qdf_dma_addr_t)priv->dma_cfg[ring_idx].base_paddr_aligned; + ring_params.num_entries = num_entries; + srng = hal_srng_setup(hal_soc, WIFI_POS_SRC, 0, + priv->dma_cap[ring_idx].pdev_id, &ring_params); + if (!srng) { + target_if_err("srng setup failed"); + return QDF_STATUS_E_FAILURE; + } + priv->dma_cfg[ring_idx].srng = srng; + priv->dma_cfg[ring_idx].tail_idx_addr = + (void *)hal_srng_get_tp_addr(hal_soc, srng); + priv->dma_cfg[ring_idx].head_idx_addr = + (void *)hal_srng_get_tp_addr(hal_soc, srng); + + return target_if_wifi_pos_fill_ring(ring_idx, srng, priv); +} + +static QDF_STATUS target_if_wifi_pos_deinit_ring(uint8_t ring_idx, + struct wifi_pos_psoc_priv_obj *priv) +{ + target_if_wifi_pos_empty_ring(ring_idx, priv); + priv->dma_buf_pool[ring_idx] = NULL; + hal_srng_cleanup(priv->hal_soc, priv->dma_cfg[ring_idx].srng); + qdf_mem_free_consistent(NULL, NULL, + priv->dma_cfg[ring_idx].ring_alloc_size, + priv->dma_cfg[ring_idx].base_vaddr_unaligned, + (qdf_dma_addr_t)priv->dma_cfg[ring_idx].base_paddr_unaligned, + 0); + qdf_mem_free(priv->dma_buf_pool[ring_idx]); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_init_srngs( + struct wifi_pos_psoc_priv_obj *priv) +{ + uint8_t i; + QDF_STATUS status; + + /* allocate memory for num_rings pointers */ + priv->dma_cfg = qdf_mem_malloc(priv->num_rings * + sizeof(struct wifi_pos_dma_rings_cap)); + if (!priv->dma_cfg) + return QDF_STATUS_E_NOMEM; + + priv->dma_buf_pool = qdf_mem_malloc(priv->num_rings * + sizeof(struct wifi_pos_dma_buf_info *)); + if (!priv->dma_buf_pool) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < priv->num_rings; i++) { + status = target_if_wifi_pos_init_ring(i, priv); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("init for ring[%d] failed", i); + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_deinit_srngs( + struct wifi_pos_psoc_priv_obj *priv) +{ + uint8_t i; + + for (i = 0; i < priv->num_rings; i++) + target_if_wifi_pos_deinit_ring(i, priv); + + qdf_mem_free(priv->dma_buf_pool); + priv->dma_buf_pool = NULL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_cfg_fw(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_psoc_priv_obj *priv) +{ + uint8_t i; + QDF_STATUS status; + wmi_unified_t wmi_hdl = GET_WMI_HDL_FROM_PSOC(psoc); + wmi_oem_dma_ring_cfg_req_fixed_param cfg = {0}; + + if (!wmi_hdl) { + target_if_err("WMA closed, can't send oem data req cmd"); + return QDF_STATUS_E_INVAL; + } + + target_if_debug("Sending oem dma ring cfg to target"); + + for (i = 0; i < priv->num_rings; i++) { + cfg.pdev_id = priv->dma_cfg[i].pdev_id; + cfg.base_addr_lo = (uint64_t)priv->dma_cfg[i].base_paddr_aligned + & 0xFFFFFFFF; + cfg.base_addr_hi = (uint64_t)priv->dma_cfg[i].base_paddr_aligned + & 0xFFFFFFFF00000000; + cfg.head_idx_addr_lo = (uint64_t)priv->dma_cfg[i].head_idx_addr + & 0xFFFFFFFF; + cfg.head_idx_addr_hi = (uint64_t)priv->dma_cfg[i].head_idx_addr + & 0xFFFFFFFF00000000; + cfg.tail_idx_addr_lo = (uint64_t)priv->dma_cfg[i].tail_idx_addr + & 0xFFFFFFFF; + cfg.tail_idx_addr_hi = (uint64_t)priv->dma_cfg[i].tail_idx_addr + & 0xFFFFFFFF00000000; + cfg.num_ptr = priv->dma_cfg[i].num_ptr; + status = wmi_unified_oem_dma_ring_cfg(wmi_hdl, &cfg); + if (!QDF_IS_STATUS_SUCCESS(status)) { + target_if_err("wmi cmd send failed"); + return status; + } + } + + return status; +} + +QDF_STATUS target_if_wifi_pos_deinit_dma_rings(struct wlan_objmgr_psoc *psoc) +{ + struct wifi_pos_psoc_priv_obj *priv = wifi_pos_get_psoc_priv_obj(psoc); + + target_if_wifi_pos_deinit_srngs(priv); + qdf_mem_free(priv->dma_cap); + priv->dma_cap = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, + void *buf) +{ + uint8_t i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + WMI_OEM_DMA_RING_CAPABILITIES *dma_cap = buf; + struct wifi_pos_psoc_priv_obj *priv = wifi_pos_get_psoc_priv_obj(psoc); + + if (!priv) { + target_if_err("unable to get wifi_pos psoc obj"); + return QDF_STATUS_E_NULL_VALUE; + } + + priv->hal_soc = hal_soc; + priv->num_rings = num_mac; + priv->dma_cap = qdf_mem_malloc(priv->num_rings * + sizeof(struct wifi_pos_dma_rings_cap)); + if (!priv->dma_cap) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < num_mac; i++) { + priv->dma_cap[i].pdev_id = dma_cap[i].pdev_id; + priv->dma_cap[i].min_num_ptr = dma_cap[i].min_num_ptr; + priv->dma_cap[i].min_buf_size = dma_cap[i].min_buf_size; + priv->dma_cap[i].min_buf_align = dma_cap[i].min_buf_align; + } + + /* initialize DMA rings now */ + status = target_if_wifi_pos_init_srngs(priv); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("dma init failed: %d", status); + goto dma_init_failed; + } + + /* send cfg req cmd to firmware */ + status = target_if_wifi_pos_cfg_fw(psoc, priv); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("configure to FW failed: %d", status); + goto dma_init_failed; + } + + return QDF_STATUS_SUCCESS; + +dma_init_failed: + target_if_wifi_pos_deinit_dma_rings(psoc); + return status; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/core/inc/cfr_defs_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cfr/core/inc/cfr_defs_i.h new file mode 100644 index 0000000000000000000000000000000000000000..196071670a69c74dfc4b9648b1eec6c57d92fae4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/core/inc/cfr_defs_i.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CFR_DEFS_I_H_ +#define _CFR_DEFS_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CFR_STOP_STR "CFR-CAPTURE-STOPPED" + +/** + * wlan_cfr_psoc_obj_create_handler() - psoc object create handler for cfr + * @psoc - pointer to psoc object + * @args - void pointer in case it needs arguments + * + * Return: status of object creation + */ +QDF_STATUS +wlan_cfr_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * wlan_cfr_psoc_obj_destroy_handler() - psoc object destroy handler for cfr + * @psoc - pointer to psoc object + * @args - void pointer in case it needs arguments + * + * Return: status of destroy object + */ +QDF_STATUS +wlan_cfr_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * wlan_cfr_pdev_obj_create_handler() - pdev object create handler for cfr + * @pdev - pointer to pdev object + * @args - void pointer in case it needs arguments + * + * Return: status of object creation + */ +QDF_STATUS +wlan_cfr_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, void *arg); + +/** + * wlan_cfr_pdev_obj_destroy_handler() - pdev object destroy handler for cfr + * @pdev - pointer to pdev object + * @args - void pointer in case it needs arguments + * + * Return: status of destroy object + */ +QDF_STATUS +wlan_cfr_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, void *arg); + +/** + * wlan_cfr_peer_obj_create_handler() - peer object create handler for cfr + * @peer - pointer to peer object + * @args - void pointer in case it needs arguments + * + * Return: status of object creation + */ +QDF_STATUS +wlan_cfr_peer_obj_create_handler(struct wlan_objmgr_peer *peer, void *arg); + +/** + * wlan_cfr_peer_obj_destroy_handler() - peer object destroy handler for cfr + * @peer - pointer to peer object + * @args - void pointer in case it needs arguments + * + * Return: status ofi destry object + */ +QDF_STATUS +wlan_cfr_peer_obj_destroy_handler(struct wlan_objmgr_peer *peer, void *arg); + +/** + * cfr_streamfs_init() - stream filesystem init + * @pdev - pointer to pdev object + * + * Return: status of fs init + */ +QDF_STATUS +cfr_streamfs_init(struct wlan_objmgr_pdev *pdev); + +/** + * cfr_streamfs_remove() - stream filesystem remove + * @pdev - pointer to pdev object + * + * Return: status of fs remove + */ +QDF_STATUS +cfr_streamfs_remove(struct wlan_objmgr_pdev *pdev); + +/** + * cfr_streamfs_write() - write to stream filesystem + * @pa - pointer to pdev_cfr object + * @write_data - Pointer to data + * @write_len - data len + * + * Return: status of fs write + */ +QDF_STATUS +cfr_streamfs_write(struct pdev_cfr *pa, const void *write_data, + size_t write_len); + +/** + * cfr_streamfs_flush() - flush the write to streamfs + * @pa - pointer to pdev_cfr object + * + * Return: status of fs flush + */ +QDF_STATUS +cfr_streamfs_flush(struct pdev_cfr *pa); + +/** + * cfr_stop_indication() - write cfr stop string + * @vdev - pointer to vdev object + * + * Write stop string and indicate to up layer. + * + * Return: status of write CFR stop string + */ +QDF_STATUS cfr_stop_indication(struct wlan_objmgr_vdev *vdev); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/core/src/cfr_common.c b/drivers/staging/qca-wifi-host-cmn/umac/cfr/core/src/cfr_common.c new file mode 100644 index 0000000000000000000000000000000000000000..58a614b0a06f8c33a72f14f17d4e0e0aeef29530 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/core/src/cfr_common.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CFR_USE_FIXED_FOLDER +#include +#endif + +/** + * wlan_cfr_get_dbr_num_entries() - Get entry number of DBR ring + * @pdev - the physical device object. + * + * Return : Entry number of DBR ring. + */ +static uint32_t +wlan_cfr_get_dbr_num_entries(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + uint8_t num_dbr_ring_caps, cap_idx, pdev_id; + struct target_psoc_info *tgt_psoc_info; + uint32_t num_entries = MAX_LUT_ENTRIES; + + if (!pdev) { + cfr_err("Invalid pdev"); + return num_entries; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + cfr_err("psoc is null"); + return num_entries; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + cfr_err("target_psoc_info is null"); + return num_entries; + } + + num_dbr_ring_caps = target_psoc_get_num_dbr_ring_caps(tgt_psoc_info); + dbr_ring_cap = target_psoc_get_dbr_ring_caps(tgt_psoc_info); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + if (dbr_ring_cap[cap_idx].pdev_id == pdev_id && + dbr_ring_cap[cap_idx].mod_id == DBR_MODULE_CFR) + num_entries = dbr_ring_cap[cap_idx].ring_elems_min; + } + + num_entries = QDF_MIN(num_entries, MAX_LUT_ENTRIES); + cfr_debug("pdev id %d, num_entries %d", pdev_id, num_entries); + + return num_entries; +} + +QDF_STATUS +wlan_cfr_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct psoc_cfr *cfr_sc = NULL; + + cfr_sc = (struct psoc_cfr *)qdf_mem_malloc(sizeof(struct psoc_cfr)); + if (!cfr_sc) { + cfr_err("Failed to allocate cfr_ctx object\n"); + return QDF_STATUS_E_NOMEM; + } + + cfr_sc->psoc_obj = psoc; + + wlan_objmgr_psoc_component_obj_attach(psoc, WLAN_UMAC_COMP_CFR, + (void *)cfr_sc, + QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cfr_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct psoc_cfr *cfr_sc = NULL; + + cfr_sc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CFR); + if (cfr_sc) { + wlan_objmgr_psoc_component_obj_detach(psoc, WLAN_UMAC_COMP_CFR, + (void *)cfr_sc); + qdf_mem_free(cfr_sc); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cfr_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct pdev_cfr *pa = NULL; + uint32_t idx; + + if (!pdev) { + cfr_err("PDEV is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + pa = (struct pdev_cfr *)qdf_mem_malloc(sizeof(struct pdev_cfr)); + if (!pa) { + cfr_err("Failed to allocate pdev_cfr object\n"); + return QDF_STATUS_E_NOMEM; + } + pa->pdev_obj = pdev; + pa->lut_num = wlan_cfr_get_dbr_num_entries(pdev); + if (!pa->lut_num) { + cfr_err("lut num is 0"); + return QDF_STATUS_E_INVAL; + } + pa->lut = (struct look_up_table **)qdf_mem_malloc(pa->lut_num * + sizeof(struct look_up_table *)); + if (!pa->lut) { + cfr_err("Failed to allocate lut, lut num %d", pa->lut_num); + qdf_mem_free(pa); + return QDF_STATUS_E_NOMEM; + } + for (idx = 0; idx < pa->lut_num; idx++) + pa->lut[idx] = (struct look_up_table *)qdf_mem_malloc( + sizeof(struct look_up_table)); + + wlan_objmgr_pdev_component_obj_attach(pdev, WLAN_UMAC_COMP_CFR, + (void *)pa, QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cfr_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct pdev_cfr *pa = NULL; + uint32_t idx; + + if (!pdev) { + cfr_err("PDEV is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa) { + wlan_objmgr_pdev_component_obj_detach(pdev, WLAN_UMAC_COMP_CFR, + (void *)pa); + if (pa->lut) { + for (idx = 0; idx < pa->lut_num; idx++) + qdf_mem_free(pa->lut[idx]); + qdf_mem_free(pa->lut); + } + qdf_mem_free(pa); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cfr_peer_obj_create_handler(struct wlan_objmgr_peer *peer, void *arg) +{ + struct peer_cfr *pe = NULL; + + if (!peer) { + cfr_err("PEER is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + pe = (struct peer_cfr *)qdf_mem_malloc(sizeof(struct peer_cfr)); + if (!pe) { + cfr_err("Failed to allocate peer_cfr object\n"); + return QDF_STATUS_E_FAILURE; + } + + pe->peer_obj = peer; + + /* Remaining will be populated when we give CFR capture command */ + wlan_objmgr_peer_component_obj_attach(peer, WLAN_UMAC_COMP_CFR, + (void *)pe, QDF_STATUS_SUCCESS); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cfr_peer_obj_destroy_handler(struct wlan_objmgr_peer *peer, void *arg) +{ + struct peer_cfr *pe = NULL; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev = NULL; + struct pdev_cfr *pa = NULL; + + if (!peer) { + cfr_err("PEER is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + vdev = wlan_peer_get_vdev(peer); + if (vdev) + pdev = wlan_vdev_get_pdev(vdev); + + if (pdev) + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CFR); + + pe = wlan_objmgr_peer_get_comp_private_obj(peer, WLAN_UMAC_COMP_CFR); + + if (pa && pe) { + if (pe->period && pe->request) + pa->cfr_current_sta_count--; + } + + if (pe) { + wlan_objmgr_peer_component_obj_detach(peer, WLAN_UMAC_COMP_CFR, + (void *)pe); + qdf_mem_free(pe); + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef CFR_USE_FIXED_FOLDER +static const char *cfr_get_dev_name(struct wlan_objmgr_pdev *pdev) +{ + const char *default_name = "wlan"; + + return default_name; +} +#else +static const char *cfr_get_dev_name(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pa = NULL; + char folder[32]; + struct net_device *pdev_netdev; + struct ol_ath_softc_net80211 *scn; + struct target_pdev_info *tgt_hdl; + const char *default_name = "wlan"; + + if (!pdev) { + cfr_err("PDEV is NULL\n"); + return default_name; + } + + tgt_hdl = wlan_pdev_get_tgt_if_handle(pdev); + + if (!tgt_hdl) { + cfr_err("target_pdev_info is NULL\n"); + return default_name; + } + + scn = target_pdev_get_feature_ptr(tgt_hdl); + pdev_netdev = scn->netdev; + + return pdev_netdev->name; +} +#endif + +QDF_STATUS cfr_streamfs_init(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pa = NULL; + char folder[32]; + + if (!pdev) { + cfr_err("PDEV is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + + if (pa == NULL) { + cfr_err("pdev_cfr is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + if (!pa->is_cfr_capable) { + cfr_err("CFR IS NOT SUPPORTED\n"); + return QDF_STATUS_E_FAILURE; + } + + snprintf(folder, sizeof(folder), "cfr%s", cfr_get_dev_name(pdev)); + + pa->dir_ptr = qdf_streamfs_create_dir((const char *)folder, NULL); + + if (!pa->dir_ptr) { + cfr_err("Directory create failed"); + return QDF_STATUS_E_FAILURE; + } + + pa->chan_ptr = qdf_streamfs_open("cfr_dump", pa->dir_ptr, + pa->subbuf_size, + pa->num_subbufs, NULL); + + if (!pa->chan_ptr) { + cfr_err("Chan create failed"); + qdf_streamfs_remove_dir_recursive(pa->dir_ptr); + pa->dir_ptr = NULL; + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfr_streamfs_remove(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pa = NULL; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa) { + if (pa->chan_ptr) { + qdf_streamfs_close(pa->chan_ptr); + pa->chan_ptr = NULL; + } + + if (pa->dir_ptr) { + qdf_streamfs_remove_dir_recursive(pa->dir_ptr); + pa->dir_ptr = NULL; + } + + } else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfr_streamfs_write(struct pdev_cfr *pa, const void *write_data, + size_t write_len) +{ + if (pa->chan_ptr) { + + /* write to channel buffer */ + qdf_streamfs_write(pa->chan_ptr, (const void *)write_data, + write_len); + } else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfr_streamfs_flush(struct pdev_cfr *pa) +{ + if (pa->chan_ptr) { + + /* Flush the data write to channel buffer */ + qdf_streamfs_flush(pa->chan_ptr); + } else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfr_stop_indication(struct wlan_objmgr_vdev *vdev) +{ + struct pdev_cfr *pa; + uint32_t status; + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (!pa) { + cfr_err("pdev_cfr is NULL\n"); + return QDF_STATUS_E_INVAL; + } + + status = cfr_streamfs_write(pa, (const void *)CFR_STOP_STR, + sizeof(CFR_STOP_STR)); + + status = cfr_streamfs_flush(pa); + cfr_debug("stop indication done"); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/cfr_cfg.h b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/cfr_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..f7e3417a0a4a7c6160a3aa168e39121bf18ef2d5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/cfr_cfg.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized cfg definitions of CFR component + */ +#ifndef __CFR_CONFIG_H +#define __CFR_CONFIG_H + +/* + * + * cfr_disable - disable channel frequence response(CFR) feature + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to disable cfr feature. + * + * Related: None + * + * Supported Feature: cfr + * + * Usage: External + * + * + */ +#define CFG_CFR_DISABLE \ + CFG_INI_BOOL("cfr_disable", false, \ + "CFR disable") + +#define CFG_CFR_ALL \ + CFG(CFG_CFR_DISABLE) + +#endif /* __CFR_CONFIG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_public_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..60803097a4ccfc1677240b34e20ddb9c25f21e9e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_public_structs.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the data structure to hold CFR specific configurations + */ +#ifndef _WLAN_CFR_PUBLIC_STRUCTS_H_ +#define _WLAN_CFR_PUBLIC_STRUCTS_H_ + +#ifdef WLAN_CFR_ENABLE +#include "qdf_types.h" + +/** + * cfr_cwm_width : Capture bandwidth + * 0 : 20MHz, 1 : 40MHz, 2 : 80MHz, 3 : 160MHz, 4 : 80+80MHz + */ +enum cfr_cwm_width { + CFR_CWM_WIDTH20, + CFR_CWM_WIDTH40, + CFR_CWM_WIDTH80, + CFR_CWM_WIDTH160, + CFR_CWM_WIDTH80_80, + + CFR_CWM_WIDTH_MAX, + CFR_CWM_WIDTHINVALID = 0xff +}; + +/** + * cfr_capture_method : Tx based CFR capture method + * @CFR_CAPTURE_METHOD_QOS_NULL : Send QOS Null frame and capture CFR on ACK + * @CFR_CAPTURE_METHOD_QOS_NULL_WITH_PHASE: Send QoS Null frame with phase + * @CFR_CAPTURE_METHOD_PROBE_RESPONSE : Capture is enabled on probe response + * If node is not found, trigger unassociated capture. + */ +enum cfr_capture_method { + CFR_CAPTURE_METHOD_QOS_NULL = 0, + CFR_CAPTURE_METHOD_QOS_NULL_WITH_PHASE = 1, + CFR_CAPTURE_METHOD_PROBE_RESPONSE = 2, + CFR_CAPTURE_METHOD_LAST_VALID, + CFR_CAPTURE_METHOD_AUTO = 0xff, + CFR_CAPTURE_METHOD_MAX, +}; + +/** + * cfr_wlanconfig_param : CFR params used to store user provided inputs + * @bandwidth : CFR capture bandwidth + * @periodicity : CFR capture periodicity in milli seconds + * @capture_method : CFR capture method + * @mac : peer mac address + * @ta : Tx address + * @ra : Rx Address + * @ta_mask: Tx address mask + * @ra_mask; Rx address mask + * *** Controls for different capture modes in RCC *** + * @en_directed_ftm: Enable capture for directed RTT FTM Packet + * @en_directed_ndpa_ndp: Enable NDPA filter followed by directed NDP capture + * @en_ta_ra_filter: Enable MAC TA/RA/type filtering channel capture + * @en_all_ftm_ack: Enable all FTM and ACK capture + * @en_ndpa_ndp_all: Enable NDPA filter followed by NDP capture, + * capture includes both directed and non-directed packets. + * @en_all_pkt: Enable capture mode to filter in all packets + * @dis_directed_ftm: Drop directed RTT FTM packets + * @dis_directed_ndpa_ndp: Drop directed NDPA and NDP packets + * @dis_ta_ra_filter: Disable MAC TA/RA/type filtering channel capture + * @dis_all_ftm_ack: Drop all FTM and ACK capture + * @dis_ndpa_ndp_all: Drop all NDPA and NDP packets + * @dis_all_pkt: Do not filter in any packet + * + * **** Fixed parameters **** + * @cap_dur: Capture duration + * @cap_intvl: Capture interval + * FW may limit the interval and duration during which HW may attempt + * to capture by programming the user provided values. + * These values(cap_dur, cap_intvl) range from 1 us to roughly 16.8 in 1 us + * units. Max value is 0xFFFFFF, i.e., 16.777215 s + * @bw: Bandwidth: 20, 40, 80, 160, 320MHz + * @nss: 8 bits are allotted for NSS mask. Specifies which numbers of + * spatial streams (MIMO factor) are permitted + * @grp_id: Group id could of any value between 0 and 15 + * @expected_mgmt_subtype/ expected_ctrl_subtype / expected_data_subtype: + * corresponds to mgmt/ ctrl/ data, all are bitmasks, in which each bit + * represents the corresponding type/ subtype value as per IEEE80211. + * + * @en_cfg and reset_cfg: This bitmap of 16 bits, indicates 16 groups. + * Valid entry should be in between 0 to 0xFFFF. + * Turning on a bit in en_cfg will enable MAC TA_RA filter + * for corresponding group; whereas turning on a bit in reset_cfg + * will reset all 9 params in the corresponding group to default values. + * + * @ul_mu_user_mask_lower, ul_mu_user_mask_upper : + * Since Cypress supports max bandwidth of 80Mhz, maximum number + * of users in a UL MU-MIMO transmission would be 37. + * mask_lower_32: Bits from 31 to 0 indicate user indices for 32 users. + * mask_upper_32: Bits from 0 to 4 indicate user indices from 33 to 37. + * + * @ freeze_tlv_delay_cnt_en, freeze_tlv_delay_cnt_thr : + * freeze_tlv_delay_cnt_thr will decide the threshold for MAC to drop the + * freeze TLV. freeze_tlv_delay_cnt_thr will only be applicable if + * freeze_tlv_delay_cnt_en is enabled. + */ +struct cfr_wlanconfig_param { + enum cfr_cwm_width bandwidth; + uint32_t periodicity; + enum cfr_capture_method capture_method; + uint8_t mac[QDF_MAC_ADDR_SIZE]; +#ifdef WLAN_ENH_CFR_ENABLE + uint8_t ta[QDF_MAC_ADDR_SIZE]; + uint8_t ra[QDF_MAC_ADDR_SIZE]; + uint8_t ta_mask[QDF_MAC_ADDR_SIZE]; + uint8_t ra_mask[QDF_MAC_ADDR_SIZE]; + uint16_t en_directed_ftm :1, + en_directed_ndpa_ndp :1, + en_ta_ra_filter :1, + en_all_ftm_ack :1, + en_ndpa_ndp_all :1, + en_all_pkt :1, + dis_directed_ftm :1, + dis_directed_ndpa_ndp :1, + dis_ta_ra_filter :1, + dis_all_ftm_ack :1, + dis_ndpa_ndp_all :1, + dis_all_pkt :1, + rsvd0 :4; + + uint32_t cap_dur :24, + rsvd1 :8; + uint32_t cap_intvl :24, + rsvd2 :8; + uint32_t bw :5, + nss :8, + grp_id :4, + rsvd3 :15; + + uint32_t expected_mgmt_subtype :16, + expected_ctrl_subtype :16; + + uint32_t expected_data_subtype :16, + rsvd5 :16; + + uint32_t en_cfg :16, + reset_cfg :16; + + uint32_t ul_mu_user_mask_lower; + uint32_t ul_mu_user_mask_upper; + + uint32_t freeze_tlv_delay_cnt_en :1, + freeze_tlv_delay_cnt_thr :8, + rsvd6 :23; +#endif +}; + +#endif /* WLAN_CFR_ENABLE */ +#endif /* _WLAN_CFR_PUBLIC_STRUCTS_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..907524ba49b79b8a4f58eac66f72ea495072866a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_tgt_api.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_CFR_TGT_API_H_ +#define _WLAN_CFR_TGT_API_H_ + +#include +#include +#include +#include + +/* tgt layer has APIs in application, to access functions in target + * through tx_ops. + */ + +/** + * tgt_cfr_init_pdev() - API that registers CFR to handlers. + * @pdev: pointer to pdev_object + * + * Return: success/failure of init + */ +int tgt_cfr_init_pdev(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_cfr_deinit_pdev() - API that de-registers CFR to handlers. + * @pdev: pointer to pdev_object + * + * Return: success/failure of de-init + */ +int tgt_cfr_deinit_pdev(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_cfr_get_target_type() - API to determine target type. + * @psoc: pointer to psoc_object + * + * Return: enum value of target type + */ +int tgt_cfr_get_target_type(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_cfr_start_capture() - API to start cfr capture on a peer. + * @pdev: pointer to pdev_object + * @peer: pointer to peer_object + * @cfr_params: pointer to config cfr_params + * + * Return: success/failure of start capture + */ +int tgt_cfr_start_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *cfr_params); + +/** + * tgt_cfr_stop_capture() - API to stop cfr capture on a peer. + * @pdev: pointer to pdev_object + * @peer: pointer to peer_object + * + * Return: success/failure of stop capture + */ +int tgt_cfr_stop_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + +/** + * tgt_cfr_enable_cfr_timer() - API to enable cfr timer + * @pdev: pointer to pdev_object + * @cfr_timer: Amount of time this timer has to run. If 0, it disables timer. + * + * Return: success/failure of timer enable + */ +int +tgt_cfr_enable_cfr_timer(struct wlan_objmgr_pdev *pdev, uint32_t cfr_timer); + +/** + * tgt_cfr_support_set() - API to set cfr support + * @psoc: pointer to psoc_object + * @value: value to be set + */ +void tgt_cfr_support_set(struct wlan_objmgr_psoc *psoc, uint32_t value); + +/** + * tgt_cfr_info_send() - API to send cfr info + * @pdev: pointer to pdev_object + * @head: pointer to cfr info head + * @hlen: head len + * @data: pointer to cfr info data + * @dlen: data len + * @tail: pointer to cfr info tail + * @tlen: tail len + * + * Return: success/failure of cfr info send + */ +uint32_t tgt_cfr_info_send(struct wlan_objmgr_pdev *pdev, void *head, + size_t hlen, void *data, size_t dlen, void *tail, + size_t tlen); + +#ifdef WLAN_ENH_CFR_ENABLE +/** + * tgt_cfr_config_rcc() - API to set RCC + * @pdev: pointer to pdev_object + * @rcc_param: rcc configurations + * + * Return: succcess / failure + */ +QDF_STATUS tgt_cfr_config_rcc(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *rcc_param); + +/** + * tgt_cfr_start_lut_age_timer() - API to start timer to flush aged out LUT + * entries + * @pdev: pointer to pdev_object + * + * Return: None + */ +void tgt_cfr_start_lut_age_timer(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_cfr_stop_lut_age_timer() - API to stop timer to flush aged out LUT + * entries + * @pdev: pointer to pdev_object + * + * Return: None + */ +void tgt_cfr_stop_lut_age_timer(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_cfr_default_ta_ra_cfg() - API to configure default values in TA_RA mode + * entries + * @pdev: pointer to pdev_object + * + * Return: none + */ +void tgt_cfr_default_ta_ra_cfg(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *rcc_param, + bool allvalid, uint16_t reset_cfg); + +/** + * tgt_cfr_dump_lut_enh() - Print all LUT entries + * @pdev: pointer to pdev_object + */ +void tgt_cfr_dump_lut_enh(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_cfr_rx_tlv_process() - Process PPDU status TLVs + * @pdev_obj: pointer to pdev_object + * @nbuf: pointer to cdp_rx_indication_ppdu + */ +void tgt_cfr_rx_tlv_process(struct wlan_objmgr_pdev *pdev, void *nbuf); + +/** + * tgt_cfr_update_global_cfg() - Update global config after successful commit + * @pdev: pointer to pdev_object + */ +void tgt_cfr_update_global_cfg(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_cfr_subscribe_ppdu_desc() - Target interface to + * subscribe/unsubscribe WDI PPDU desc event + * @pdev: pointer to pdev_object + * @is_subscribe: subscribe or unsubscribei + * + * return QDF status + */ +QDF_STATUS tgt_cfr_subscribe_ppdu_desc(struct wlan_objmgr_pdev *pdev, + bool is_subscribe); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..50f2889da0d50e0efe8cfaaea511dc35999c6010 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_ucfg_api.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_CFR_UCFG_API_H_ +#define _WLAN_CFR_UCFG_API_H_ + +#include +#include +#include + +#define MAX_CFR_PRD (10*60*1000) /* 10 minutes */ + +/** + * ucfg_cfr_start_capture() - function to start cfr capture for connected client + * @pdev: pointer to pdev object + * @peer: pointer to peer object + * @cfr_params: config params to cfr capture + * + * Return: status of start capture. + */ +int ucfg_cfr_start_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *cfr_params); + +/** + * ucfg_cfr_stop_capture() - function to stop cfr capture for connected client + * @pdev: pointer to pdev object + * @peer: pointer to peer object + * + * Return: status of stop capture. + */ +int ucfg_cfr_stop_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + +/** + * ucfg_cfr_start_capture_probe_req() - function to start cfr capture for + * unassociated clients + * @pdev: pointer to pdev object + * @unassoc_mac: mac address of un-associated client + * @cfr_params: config params to cfr capture + * + * Return: status of start capture. + */ +int ucfg_cfr_start_capture_probe_req(struct wlan_objmgr_pdev *pdev, + struct qdf_mac_addr *unassoc_mac, + struct cfr_capture_params *params); + +/** + * ucfg_cfr_stop_capture_probe_req() - function to stop cfr capture for + * unassociated cleints + * @pdev: pointer to pdev object + * @unassoc_mac: mac address of un-associated client + * + * Return: status of stop capture. + */ +int ucfg_cfr_stop_capture_probe_req(struct wlan_objmgr_pdev *pdev, + struct qdf_mac_addr *unassoc_mac); + +/** + * ucfg_cfr_list_peers() - Lists total number of peers with cfr capture enabled + * @pdev: pointer to pdev object + * + * Return: number of peers with cfr capture enabled + */ +int ucfg_cfr_list_peers(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_cfr_set_timer() - function to enable cfr timer + * @pdev: pointer to pdev object + * @value: value to be set + * + * Return: status of timer enable + */ +int ucfg_cfr_set_timer(struct wlan_objmgr_pdev *pdev, uint32_t value); + +/** + * ucfg_cfr_get_timer() - function to get cfr_timer_enable + * @pdev: pointer to pdev object + * + * Return: value of cfr_timer_enable + */ +int ucfg_cfr_get_timer(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_cfr_stop_indication() - User space API to write cfr stop string + * @vdev - pointer to vdev object + * + * Write stop string and indicate to up layer. + * + * Return: status of write CFR stop string + */ +QDF_STATUS ucfg_cfr_stop_indication(struct wlan_objmgr_vdev *vdev); + +#ifdef WLAN_ENH_CFR_ENABLE +/* Channel capture recipe filters */ +enum capture_type { + RCC_DIRECTED_FTM_FILTER, + RCC_ALL_FTM_ACK_FILTER, + RCC_DIRECTED_NDPA_NDP_FILTER, + RCC_NDPA_NDP_ALL_FILTER, + RCC_TA_RA_FILTER, + RCC_ALL_PACKET_FILTER, + RCC_DIS_ALL_MODE, +}; + +/** + * ucfg_cfr_set_rcc_mode() - function to set RCC mode + * @vdev: pointer to vdev object + * @mode: capture type passed by user + * @value: Enable/Disable capture mode + * + * Return: status if the mode is set or not + */ +QDF_STATUS ucfg_cfr_set_rcc_mode(struct wlan_objmgr_vdev *vdev, + enum capture_type mode, uint8_t value); + +/** + * ucfg_cfr_get_rcc_enabled() - function to get RCC mode + * @vdev: pointer to vdev object + * + * Return: if the rcc is enabled or not + */ +bool ucfg_cfr_get_rcc_enabled(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_cfr_set_tara_config() - function to configure TA/RA address and mask + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS ucfg_cfr_set_tara_config(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_bw_nss() - function to configure nss and bandwidth + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS ucfg_cfr_set_bw_nss(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_frame_type_subtype() - function to configure frame type/subtype + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS +ucfg_cfr_set_frame_type_subtype(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_capture_duration() - function to configure capture duration + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS +ucfg_cfr_set_capture_duration(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_capture_interval() - function to configure capture interval + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS +ucfg_cfr_set_capture_interval(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_en_bitmap() - function to configure 16-bit bitmap in TA_RA mode + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS ucfg_cfr_set_en_bitmap(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_reset_bitmap() - function to clear all 9 params for all 16 + * groups in TA_RA mode + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS ucfg_cfr_set_reset_bitmap(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_ul_mu_user_mask() - function to configure UL MU user mask + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS +ucfg_cfr_set_ul_mu_user_mask(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_set_freeze_tlv_delay_cnt() - function to configure freeze TLV delay + * count threshold + * @vdev: pointer to vdev object + * @params: user config + * + * Return: status + */ +QDF_STATUS +ucfg_cfr_set_freeze_tlv_delay_cnt(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params); + +/** + * ucfg_cfr_committed_rcc_config() - function to commit user config + * @vdev: pointer to vdev object + * + * Return: status + */ +QDF_STATUS ucfg_cfr_committed_rcc_config(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_cfr_get_cfg() - function to display user config + * @vdev: pointer to vdev object + * + * Return: status + */ +QDF_STATUS ucfg_cfr_get_cfg(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_cfr_rcc_dump_dbg_counters() - function to display PPDU counters + * @vdev: pointer to vdev object + * + * Return: status + */ +QDF_STATUS ucfg_cfr_rcc_dump_dbg_counters(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_cfr_rcc_clr_dbg_counters() - function to clear CFR PPDU counters + * @vdev: pointer to vdev object + * + * Return: status + */ +QDF_STATUS ucfg_cfr_rcc_clr_dbg_counters(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_cfr_rcc_dump_lut() - function to display lookup table + * @vdev: pointer to vdev object + * + * Return: status + */ +QDF_STATUS ucfg_cfr_rcc_dump_lut(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_cfr_subscribe_ppdu_desc() - User space interface to + * subscribe/unsubscribe WDI PPDU desc event + * @pdev: pointer to pdev_object + * @is_subscribe: subscribe or unsubscribei + * + * return QDF status + */ +QDF_STATUS ucfg_cfr_subscribe_ppdu_desc(struct wlan_objmgr_pdev *pdev, + bool is_subscribe); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..24eaf26f710e5e76ee18053b052adb6cf67edf78 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h @@ -0,0 +1,593 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_CFR_UTILS_API_H_ +#define _WLAN_CFR_UTILS_API_H_ + +#include +#include +#ifdef WLAN_ENH_CFR_ENABLE +#include +#endif + +#define cfr_alert(format, args...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_CFR, format, ## args) + +#define cfr_err(format, args...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_CFR, format, ## args) + +#define cfr_warn(format, args...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_CFR, format, ## args) + +#define cfr_info(format, args...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_CFR, format, ## args) + +#define cfr_debug(format, args...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_CFR, format, ## args) + +#define DBR_EVENT_TIMEOUT_IN_MS_CFR 1 +#define DBR_NUM_RESP_PER_EVENT_CFR 1 +#define MAX_CFR_ENABLED_CLIENTS 10 +#ifdef WLAN_ENH_CFR_ENABLE +#define MAX_CFR_MU_USERS 4 +#define NUM_CHAN_CAPTURE_STATUS 4 +#define NUM_CHAN_CAPTURE_REASON 6 +#define MAX_TA_RA_ENTRIES 16 +#define MAX_RESET_CFG_ENTRY 0xFFFF +#define CFR_INVALID_VDEV_ID 0xff +#endif + +enum cfrmetaversion { + CFR_META_VERSION_NONE, + CFR_META_VERSION_1, + CFR_META_VERSION_2, + CFR_META_VERSION_3, + CFR_META_VERSION_MAX = 0xFF, +}; + +enum cfrdataversion { + CFR_DATA_VERSION_NONE, + CFR_DATA_VERSION_1, + CFR_DATA_VERSION_MAX = 0xFF, +}; + +enum cfrplatformtype { + CFR_PLATFORM_TYPE_NONE, + CFR_PLATFORM_TYPE_MIPS, + CFR_PLATFORM_TYPE_ARM, + CFR_PLATFFORM_TYPE_MAX = 0xFF, +}; + +enum cfrradiotype { + CFR_CAPTURE_RADIO_NONE, + CFR_CAPTURE_RADIO_OSPREY, + CFR_CAPTURE_RADIO_PEAKCOCK, + CFR_CAPTURE_RADIO_SCORPION, + CFR_CAPTURE_RADIO_HONEYBEE, + CFR_CAPTURE_RADIO_DRAGONFLY, + CFR_CAPTURE_RADIO_JET, + CFR_CAPTURE_RADIO_PEREGRINE = 17, + CFR_CAPTURE_RADIO_SWIFT, + CFR_CAPTURE_RADIO_BEELINER, + CFR_CAPTURE_RADIO_CASCADE, + CFR_CAPTURE_RADIO_DAKOTA, + CFR_CAPTURE_RADIO_BESRA, + CFR_CAPTURE_RADIO_HKV2, + CFR_CAPTURE_RADIO_CYP, + CFR_CAPTURE_RADIO_HSP, + CFR_CAPTURE_RADIO_MAX = 0xFF, +}; + +enum ack_capture_mode { + CFR_LEGACY_ACK = 0, + CFR_DUP_LEGACY_ACK = 1, + CFR_HT_ACK = 2, + CFR_VHT_ACK = 3, + CFR_INVALID_ACK, /*Always keep this at last*/ +}; + +/* Similar to WMI_PEER_CFR_CAPTURE_METHOD used in one-shot capture */ +enum cfr_capture_type { + CFR_TYPE_METHOD_NULL_FRAME = 0, + CFR_TYPE_METHOD_NULL_FRAME_WITH_PHASE = 1, + CFR_TYPE_METHOD_PROBE_RESP = 2, + CFR_TYPE_METHOD_TM = 3, + CFR_TYPE_METHOD_FTM = 4, + CFR_TYPE_METHOD_ACK_RESP_TO_TM_FTM = 5, + CFR_TYPE_METHOD_TA_RA_TYPE_FILTER = 6, + CFR_TYPE_METHOD_NDPA_NDP = 7, + CFR_TYPE_METHOD_ALL_PACKET = 8, + /* Add new capture methods before this line */ + CFR_TYPE_METHOD_LAST_VALID, + CFR_TYPE_METHOD_AUTO = 0xff, + CFR_TYPE_METHOD_MAX, +}; + +struct cfr_metadata_version_1 { + u_int8_t peer_addr[QDF_MAC_ADDR_SIZE]; + u_int8_t status; + u_int8_t capture_bw; + u_int8_t channel_bw; + u_int8_t phy_mode; + u_int16_t prim20_chan; + u_int16_t center_freq1; + u_int16_t center_freq2; + u_int8_t capture_mode; + u_int8_t capture_type; + u_int8_t sts_count; + u_int8_t num_rx_chain; + u_int32_t timestamp; + u_int32_t length; +} __attribute__ ((__packed__)); + +#define HOST_MAX_CHAINS 8 + +struct cfr_metadata_version_2 { + u_int8_t peer_addr[QDF_MAC_ADDR_SIZE]; + u_int8_t status; + u_int8_t capture_bw; + u_int8_t channel_bw; + u_int8_t phy_mode; + u_int16_t prim20_chan; + u_int16_t center_freq1; + u_int16_t center_freq2; + u_int8_t capture_mode; + u_int8_t capture_type; + u_int8_t sts_count; + u_int8_t num_rx_chain; + u_int32_t timestamp; + u_int32_t length; + u_int32_t chain_rssi[HOST_MAX_CHAINS]; + u_int16_t chain_phase[HOST_MAX_CHAINS]; +} __attribute__ ((__packed__)); + +#ifdef WLAN_ENH_CFR_ENABLE +struct cfr_metadata_version_3 { + u_int8_t status; + u_int8_t capture_bw; + u_int8_t channel_bw; + u_int8_t phy_mode; + u_int16_t prim20_chan; + u_int16_t center_freq1; + u_int16_t center_freq2; + u_int8_t capture_mode; /* ack_capture_mode */ + u_int8_t capture_type; /* cfr_capture_type */ + u_int8_t sts_count; + u_int8_t num_rx_chain; + u_int64_t timestamp; + u_int32_t length; + u_int8_t is_mu_ppdu; + u_int8_t num_mu_users; + union { + u_int8_t su_peer_addr[QDF_MAC_ADDR_SIZE]; + u_int8_t mu_peer_addr[MAX_CFR_MU_USERS][QDF_MAC_ADDR_SIZE]; + } peer_addr; + u_int32_t chain_rssi[HOST_MAX_CHAINS]; + u_int16_t chain_phase[HOST_MAX_CHAINS]; +} __attribute__ ((__packed__)); +#endif + +struct csi_cfr_header { + u_int32_t start_magic_num; + u_int32_t vendorid; + u_int8_t cfr_metadata_version; + u_int8_t cfr_data_version; + u_int8_t chip_type; + u_int8_t pltform_type; + u_int32_t Reserved; + + union { + struct cfr_metadata_version_1 meta_v1; + struct cfr_metadata_version_2 meta_v2; +#ifdef WLAN_ENH_CFR_ENABLE + struct cfr_metadata_version_3 meta_v3; +#endif + } u; +} __attribute__ ((__packed__)); + +/** + * struct cfr_capture_params - structure to store cfr config param + * bandwidth: bandwitdh of capture + * period: period of capture + * method: enum of method being followed to capture cfr data. 0-QoS null data + */ +struct cfr_capture_params { + u_int8_t bandwidth; + u_int32_t period; + u_int8_t method; +}; + +/** + * struct psoc_cfr - private psoc object for cfr + * psoc_obj: pointer to psoc object + * is_cfr_capable: flag to determine if cfr is enabled or not + */ +struct psoc_cfr { + struct wlan_objmgr_psoc *psoc_obj; + uint8_t is_cfr_capable; +}; + +/** + * struct cfr_wmi_host_mem_chunk - wmi mem chunk related + * vaddr: pointer to virtual address + * paddr: physical address + * len: len of the mem chunk allocated + * req_id: reqid related to the mem chunk + */ +struct cfr_wmi_host_mem_chunk { + uint32_t *vaddr; + uint32_t paddr; + uint32_t len; + uint32_t req_id; +}; + +struct whal_cfir_dma_hdr { + uint16_t + // 'BA' + tag : 8, + // '02', length of header in 4 octet units + length : 6, + // 00 + reserved : 2; + uint16_t + // [16] + upload_done : 1, + // [17:18], 0: invalid, 1: CFR, 2: CIR, 3: DebugH + capture_type : 3, + // [19:20], 0: Legacy, 1: HT, 2: VHT, 3: HE + preamble_type : 2, + // [21:23], 0: 1-stream, 1: 2-stream, ..., 7: 8-stream + nss : 3, + // [24:27], 0: invalid, 1: 1-chain, 2: 2-chain, etc. + num_chains : 3, + // [28:30], 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 MHz + upload_pkt_bw : 3, // [31] + sw_peer_id_valid : 1; + uint16_t + sw_peer_id : 16; // [15:0] + uint16_t + phy_ppdu_id : 16; // [15:0] +}; + +#define MAX_LUT_ENTRIES 140 /* For HKv2 136 is max */ + +/** + * struct look_up_table - Placeholder for 2 asynchronous events (DBR and + * TXRX event) + * dbr_recv: Indicates whether WMI for DBR completion is received or not + * tx_recv: Indicates whether WMI for TX completion (or) WDI event for RX + * status is received or not + * data: pointer to CFR data that ucode DMAs to host memory + * data_len: length of CFR data DMAed by ucode + * dbr_ppdu_id: PPDU id retrieved from DBR completion WMI event + * tx_ppdu_id: PPDU id retrieved from WMI TX completion event (or) PPDU status + * TLV + * dbr_address: Physical address of the CFR data dump retrieved from DBR + * completion WMI event + * tx_address1: Physical address of the CFR data from TX/RX event + * tx_address2: Physical address of the CFR data from TX/RX event + * csi_cfr_header: CFR header constructed by host + * whal_cfir_enhanced_hdr: CFR header constructed by ucode + * tx_tstamp: Timestamp when TX/RX event was received + * dbr_tstamp: Timestamp when DBR completion event was received + * header_length: Length of header DMAed by ucode in words + * payload_length: Length of CFR payload + */ +struct look_up_table { + bool dbr_recv; + bool tx_recv; + uint8_t *data; /* capture payload */ + uint32_t data_len; /* capture len */ + uint16_t dbr_ppdu_id; /* ppdu id from dbr */ + uint16_t tx_ppdu_id; /* ppdu id from TX event */ + qdf_dma_addr_t dbr_address; /* capture len */ + uint32_t tx_address1; /* capture len */ + uint32_t tx_address2; /* capture len */ + struct csi_cfr_header header; + struct whal_cfir_dma_hdr dma_hdr; + uint64_t txrx_tstamp; + uint64_t dbr_tstamp; + uint32_t header_length; + uint32_t payload_length; +}; + +struct unassoc_pool_entry { + struct qdf_mac_addr mac; + struct cfr_capture_params cfr_params; + bool is_valid; +}; + +#ifdef WLAN_ENH_CFR_ENABLE +/** + * struct ta_ra_cfr_cfg - structure to store configuration of 16 groups in + * M_TA_RA mode + * filter_group_id: Filter group number for which the below filters needs to be + * applied + * bw: CFR capture will be done for packets matching the bandwidths specified + * within this bitmask + * nss: CFR capture will be done for packets matching the Nss specified within + * this bitmask + * valid_ta: Ta_addr is valid if set + * valid_ta_mask: Ta_addr_mask is valid if set + * valid_ra: Ra_addr is valid if set + * valid_ra_mask: Ra_addr_mask is valid if set + * valid_bw_mask: Bandwidth is valid if set + * valid_nss_mask: NSS is valid if set + * valid_mgmt_subtype: Mgmt_subtype is valid if set + * valid_ctrl_subtype: Ctrl_subtype is valid if set + * valid_data_subtype: Data_subtype is valid if set + * mgmt_subtype_filter: Managments Packets matching the subtype filter + * categories will be filtered in by MAC for CFR capture. + * ctrl_subtype_filter: Control Packets matching the subtype filter + * categories will be filtered in by MAC for CFR capture. + * data_subtype_filter: Data Packets matching the subtype filter + * categories will be filtered in by MAC for CFR capture. + * tx_addr: Packets whose transmitter address matches (tx_addr & tx_addr_mask) + * will be filtered in by MAC + * tx_addr_mask: Packets whose transmitter address matches (tx_addr & + * tx_addr_mask) will be filtered in by MAC + * rx_addr: Packets whose receiver address matches (rx_addr & rx_addr_mask) + * will be filtered in by MAC + * rx_addr_mask: Packets whose receiver address matches (rx_addr & + * rx_addr_mask) will be filtered in by MAC + */ +struct ta_ra_cfr_cfg { + uint8_t filter_group_id; + uint16_t bw :5, + nss :8, + rsvd0 :3; + uint16_t valid_ta :1, + valid_ta_mask :1, + valid_ra :1, + valid_ra_mask :1, + valid_bw_mask :1, + valid_nss_mask :1, + valid_mgmt_subtype :1, + valid_ctrl_subtype :1, + valid_data_subtype :1, + rsvd1 :7; + uint16_t mgmt_subtype_filter; + uint16_t ctrl_subtype_filter; + uint16_t data_subtype_filter; + uint8_t tx_addr[QDF_MAC_ADDR_SIZE]; + uint8_t rx_addr[QDF_MAC_ADDR_SIZE]; + uint8_t tx_addr_mask[QDF_MAC_ADDR_SIZE]; + uint8_t rx_addr_mask[QDF_MAC_ADDR_SIZE]; + +} qdf_packed; + +/** + * struct cfr_rcc_param - structure to store cfr config param + * pdev_id: pdev_id for identifying the MAC + * vdev_id: vdev_id of current rcc configures + * srng_id: srng id of current rcc configures + * capture_duration: Capture Duration field for which CFR capture has to happen, + * in microsecond units + * capture_interval: Capture interval field which is time in between + * consecutive CFR capture, in microsecond units + * ul_mu_user_mask_lower: Bitfields indicates which of the users in the current + * UL MU tranmission are enabled for CFR capture. + * ul_mu_user_mask_upper: This is contiuation of the above lower mask. + * freeze_tlv_delay_cnt_en: Enable Freeze TLV delay counter in MAC + * freeze_tlv_delay_cnt_thr: Indicates the number of consecutive Rx packets to + * be skipped before CFR capture is enabled again. + * filter_group_bitmap: Bitfields set indicates which of the CFR group config is + * enabled + * m_directed_ftm: Filter Directed FTM ACK frames for CFR capture + * m_all_ftm_ack: Filter All FTM ACK frames for CFR capture + * m_ndpa_ndp_directed: Filter NDPA NDP Directed Frames for CFR capture + * m_ndpa_ndp_all: Filter all NDPA NDP for CFR capture + * m_ta_ra_filter: Filter Frames based on TA/RA/Subtype as provided in CFR Group + * config + * m_all_packet: Filter in All packets for CFR Capture + * num_grp_tlvs: Indicates the number of groups in M_TA_RA mode, that have + * changes in the current commit session, use to construct WMI group TLV(s) + * curr: Placeholder for M_TA_RA group config in current commit session + * modified_in_curr_session: Bitmap indicating number of groups in M_TA_RA mode + * that have changed in current commit session. + */ +struct cfr_rcc_param { + uint8_t pdev_id; + uint8_t vdev_id; + uint8_t srng_id; + uint32_t capture_duration; + uint32_t capture_interval; + uint32_t ul_mu_user_mask_lower; + uint32_t ul_mu_user_mask_upper; + uint16_t freeze_tlv_delay_cnt_en :1, + freeze_tlv_delay_cnt_thr :8, + rsvd0 :7; + uint16_t filter_group_bitmap; + uint8_t m_directed_ftm : 1, + m_all_ftm_ack : 1, + m_ndpa_ndp_directed : 1, + m_ndpa_ndp_all : 1, + m_ta_ra_filter : 1, + m_all_packet : 1, + rsvd1 : 2; + uint8_t num_grp_tlvs; + + struct ta_ra_cfr_cfg curr[MAX_TA_RA_ENTRIES]; + uint16_t modified_in_curr_session; +}; +#endif /* WLAN_ENH_CFR_ENABLE */ + +/** + * struct pdev_cfr - private pdev object for cfr + * pdev_obj: pointer to pdev object + * is_cfr_capable: flag to determine if cfr is enabled or not + * cfr_timer_enable: flag to enable/disable timer + * chip_type: chip type which is defined in enum cfrradiotype + * cfr_mem_chunk: Region of memory used for storing cfr data + * cfr_max_sta_count: Maximum stations supported in one-shot capture mode + * num_subbufs: No. of sub-buffers used in relayfs + * subbuf_size: Size of sub-buffer used in relayfs + * chan_ptr: Channel in relayfs + * dir_ptr: Parent directory of relayfs file + * lut: lookup table used to store asynchronous DBR and TX/RX events for + * correlation + * lut_num: Number of lut + * dbr_buf_size: Size of DBR completion buffer + * dbr_num_bufs: No. of DBR completions + * tx_evt_cnt: No. of TX completion events till CFR stop was issued + * total_tx_evt_cnt: No. of Tx completion events since wifi was up + * dbr_evt_cnt: No. of WMI DBR completion events + * release_cnt: No. of CFR data buffers relayed to userspace + * rcc_param: Structure to store CFR config for the current commit session + * global: Structure to store accumulated CFR config + * rx_tlv_evt_cnt: Number of CFR WDI events from datapath + * lut_age_timer: Timer to flush pending TXRX/DBR events in lookup table + * lut_timer_init: flag to determine if lut_age_timer is initialized or not + * is_cfr_rcc_capable: Flag to determine if RCC is enabled or not. + * flush_dbr_cnt: No. of un-correlated DBR completions flushed when a newer PPDU + * is correlated successfully with newer DBR completion + * invalid_dma_length_cnt: No. of buffers for which CFR DMA header length (or) + * data length was invalid + * flush_timeout_dbr_cnt: No. of DBR completion flushed out in ageout logic + * clear_txrx_event: No. of PPDU status TLVs over-written in LUT + * unassoc_pool: Pool of un-associated clients used when capture method is + * CFR_CAPTURE_METHOD_PROBE_RESPONSE + * last_success_tstamp: DBR timestamp which indicates that both DBR and TX/RX + * events have been received successfully. + * cfr_dma_aborts: No. of CFR DMA aborts in ucode + */ +/* + * To be extended if we get more capbality info + * from FW's extended service ready event. + */ +struct pdev_cfr { + struct wlan_objmgr_pdev *pdev_obj; + uint8_t is_cfr_capable; + uint8_t cfr_timer_enable; + uint8_t chip_type; + struct cfr_wmi_host_mem_chunk cfr_mem_chunk; + uint16_t cfr_max_sta_count; + uint16_t cfr_current_sta_count; + uint32_t num_subbufs; + uint32_t subbuf_size; + qdf_streamfs_chan_t chan_ptr; + qdf_dentry_t dir_ptr; + struct look_up_table **lut; + uint32_t lut_num; + uint32_t dbr_buf_size; + uint32_t dbr_num_bufs; + uint64_t tx_evt_cnt; + uint64_t total_tx_evt_cnt; + uint64_t dbr_evt_cnt; + uint64_t release_cnt; +#ifdef WLAN_ENH_CFR_ENABLE + struct cfr_rcc_param rcc_param; + struct ta_ra_cfr_cfg global[MAX_TA_RA_ENTRIES]; + uint64_t rx_tlv_evt_cnt; + qdf_timer_t lut_age_timer; + uint8_t lut_timer_init; + uint8_t is_cfr_rcc_capable; + uint64_t flush_dbr_cnt; + uint64_t invalid_dma_length_cnt; + uint64_t flush_timeout_dbr_cnt; + uint64_t clear_txrx_event; + uint64_t last_success_tstamp; + uint64_t cfr_dma_aborts; +#endif + struct unassoc_pool_entry unassoc_pool[MAX_CFR_ENABLED_CLIENTS]; +}; + +#define PEER_CFR_CAPTURE_ENABLE 1 +#define PEER_CFR_CAPTURE_DISABLE 0 +/** + * struct peer_cfr - private peer object for cfr + * peer_obj: pointer to peer_obj + * request: Type of request (start/stop) + * bandwidth: bandwitdth of capture for this peer + * capture_method: enum determining type of cfr data capture. + * 0-Qos null data + */ +struct peer_cfr { + struct wlan_objmgr_peer *peer_obj; + u_int8_t request; /* start/stop */ + u_int8_t bandwidth; + u_int32_t period; + u_int8_t capture_method; +}; + +/** + * cfr_initialize_pdev() - cfr initialize pdev + * @pdev: Pointer to pdev_obj + * + * Return: status of cfr pdev init + */ +QDF_STATUS cfr_initialize_pdev(struct wlan_objmgr_pdev *pdev); + +/** + * cfr_deinitialize_pdev() - cfr deinitialize pdev + * @pdev: Pointer to pdev_obj + * + * Return: status of cfr pdev deinit + */ +QDF_STATUS cfr_deinitialize_pdev(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfr_init() - Global init for cfr. + * + * Return: status of global init pass/fail + */ +QDF_STATUS wlan_cfr_init(void); + +/** + * wlan_cfr_deinit() - Global de-init for cfr. + * + * Return: status of global de-init pass/fail + */ +QDF_STATUS wlan_cfr_deinit(void); + +/** + * wlan_cfr_pdev_open() - pdev_open function for cfr. + * @pdev: pointer to pdev object + * + * Return: status of pdev_open pass/fail + */ +QDF_STATUS wlan_cfr_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfr_pdev_close() - pdev_close function for cfr. + * @pdev: pointer to pdev object + * + * Return: status of pdev_close pass/fail + */ +QDF_STATUS wlan_cfr_pdev_close(struct wlan_objmgr_pdev *pdev); + +/** + * count_set_bits() - function to count set bits in a bitmap + * @value: input bitmap + * + * Return: No. of set bits + */ +uint8_t count_set_bits(uint32_t value); + +#ifdef WLAN_ENH_CFR_ENABLE +/** + * wlan_cfr_rx_tlv_process() - Process PPDU status TLVs and store info in + * lookup table + * @pdev_obj: PDEV object + * @nbuf: ppdu info + * + * Return: none + */ +void wlan_cfr_rx_tlv_process(struct wlan_objmgr_pdev *pdev, void *nbuf); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..3504ee5abd95af7f0377dc14dd5c36c4c78c0719 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_tgt_api.c @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Layer b/w umac and target_if (ol) txops + * It contains wrapers for txops + */ + +#include +#include +#include +#include + +uint32_t tgt_cfr_info_send(struct wlan_objmgr_pdev *pdev, void *head, + size_t hlen, void *data, size_t dlen, void *tail, + size_t tlen) +{ + struct pdev_cfr *pa; + uint32_t status; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + + if (pa == NULL) { + cfr_err("pdev_cfr is NULL\n"); + return -1; + } + + if (head) + status = cfr_streamfs_write(pa, (const void *)head, hlen); + + if (data) + status = cfr_streamfs_write(pa, (const void *)data, dlen); + + if (tail) + status = cfr_streamfs_write(pa, (const void *)tail, tlen); + + + /* finalise the write */ + status = cfr_streamfs_flush(pa); + + return status; +} + +void tgt_cfr_support_set(struct wlan_objmgr_psoc *psoc, uint32_t value) +{ + struct psoc_cfr *cfr_sc; + + if (psoc == NULL) + return; + + cfr_sc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CFR); + if (cfr_sc == NULL) + return; + + cfr_sc->is_cfr_capable = !!value; + cfr_debug("CFR:%s FW support advert=%d\n", __func__, + cfr_sc->is_cfr_capable); +} + +static inline struct wlan_lmac_if_cfr_tx_ops * + wlan_psoc_get_cfr_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.cfr_tx_ops)); +} + +int tgt_cfr_get_target_type(struct wlan_objmgr_psoc *psoc) +{ + uint32_t target_type = 0; + struct wlan_lmac_if_target_tx_ops *target_type_tx_ops; + + target_type_tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (target_type_tx_ops->tgt_get_tgt_type) + target_type = target_type_tx_ops->tgt_get_tgt_type(psoc); + + return target_type; +} + +int tgt_cfr_init_pdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + int status = 0; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_init_pdev) + status = cfr_tx_ops->cfr_init_pdev(psoc, pdev); + + if (status != 0) + cfr_err("Error occurred with exit code %d\n", status); + + return status; +} + +int tgt_cfr_deinit_pdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + int status = 0; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_deinit_pdev) + status = cfr_tx_ops->cfr_deinit_pdev(psoc, pdev); + + if (status != 0) + cfr_err("Error occurred with exit code %d\n", status); + + return status; +} + +int tgt_cfr_start_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *cfr_params) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + int status = 0; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_start_capture) + status = cfr_tx_ops->cfr_start_capture(pdev, peer, cfr_params); + + if (status != 0) + cfr_err("Error occurred with exit code %d\n", status); + + return status; +} + +int tgt_cfr_stop_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + int status = 0; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_stop_capture) + status = cfr_tx_ops->cfr_stop_capture(pdev, peer); + + if (status != 0) + cfr_err("Error occurred with exit code %d\n", status); + + return status; +} + +int +tgt_cfr_enable_cfr_timer(struct wlan_objmgr_pdev *pdev, uint32_t cfr_timer) +{ + int status = 0; + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_enable_cfr_timer) + status = cfr_tx_ops->cfr_enable_cfr_timer(pdev, cfr_timer); + + if (status != 0) + cfr_err("Error occurred with exit code %d\n", status); + + return status; +} + +#ifdef WLAN_ENH_CFR_ENABLE +QDF_STATUS +tgt_cfr_config_rcc(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *rcc_param) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_config_rcc) + status = cfr_tx_ops->cfr_config_rcc(pdev, rcc_param); + + if (status != QDF_STATUS_SUCCESS) + cfr_err("Error occurred with exit code %d\n", status); + + return status; +} + +void tgt_cfr_start_lut_age_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + cfr_err("Invalid PSOC: Flush LUT Timer cannot be started\n"); + return; + } + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_start_lut_timer) + cfr_tx_ops->cfr_start_lut_timer(pdev); +} + +void tgt_cfr_stop_lut_age_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + cfr_err("Invalid PSOC: Flush LUT Timer cannot be stopped\n"); + return; + } + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_stop_lut_timer) + cfr_tx_ops->cfr_stop_lut_timer(pdev); +} + +void tgt_cfr_default_ta_ra_cfg(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *rcc_param, + bool allvalid, uint16_t reset_cfg) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_default_ta_ra_cfg) + cfr_tx_ops->cfr_default_ta_ra_cfg(rcc_param, + allvalid, reset_cfg); +} + +void tgt_cfr_dump_lut_enh(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_dump_lut_enh) + cfr_tx_ops->cfr_dump_lut_enh(pdev); +} + +void tgt_cfr_rx_tlv_process(struct wlan_objmgr_pdev *pdev, void *nbuf) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_rx_tlv_process) + cfr_tx_ops->cfr_rx_tlv_process(pdev, nbuf); +} + +void tgt_cfr_update_global_cfg(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + cfr_err("Invalid PSOC:Cannot update global config.\n"); + return; + } + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_update_global_cfg) + cfr_tx_ops->cfr_update_global_cfg(pdev); +} + +QDF_STATUS tgt_cfr_subscribe_ppdu_desc(struct wlan_objmgr_pdev *pdev, + bool is_subscribe) +{ + struct wlan_lmac_if_cfr_tx_ops *cfr_tx_ops = NULL; + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + cfr_err("Invalid psoc\n"); + return QDF_STATUS_E_INVAL; + } + + cfr_tx_ops = wlan_psoc_get_cfr_txops(psoc); + + if (cfr_tx_ops->cfr_subscribe_ppdu_desc) + return cfr_tx_ops->cfr_subscribe_ppdu_desc(pdev, + is_subscribe); + + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..7ae271105e2d8928627b19cc65aa5ae51cd9db24 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c @@ -0,0 +1,1124 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "cfr_defs_i.h" +#include +#include +#include +#include +#include +#ifdef WLAN_ENH_CFR_ENABLE +#include "cdp_txrx_ctrl.h" +#endif + +int ucfg_cfr_start_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *params) +{ + int status; + struct pdev_cfr *pa; + struct peer_cfr *pe; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (NULL == pa) { + cfr_err("PDEV cfr object is NULL!\n"); + return -EINVAL; + } + + if (!(pa->is_cfr_capable)) { + qdf_info("cfr is not supported on this chip\n"); + return -EINVAL; + } + + /* Get peer private object */ + pe = wlan_objmgr_peer_get_comp_private_obj(peer, WLAN_UMAC_COMP_CFR); + if (NULL == pe) { + cfr_err("PEER cfr object is NULL!\n"); + return -EINVAL; + } + + if ((params->period < 0) || (params->period > MAX_CFR_PRD) || + (params->period % 10)) { + cfr_err("Invalid period value: %d\n", params->period); + return -EINVAL; + } + + if (!(params->period) && (pa->cfr_timer_enable)) { + cfr_err("Single shot capture is not allowed during periodic capture\n"); + return -EINVAL; + } + + if ((params->period) && !(pa->cfr_timer_enable)) { + cfr_err("Global periodic timer is not enabled, configure global cfr timer\n"); + } + + if (params->period) { + if (pa->cfr_current_sta_count == pa->cfr_max_sta_count) { + qdf_info("max periodic cfr clients reached\n"); + return -EINVAL; + } + if (!(pe->request)) + pa->cfr_current_sta_count++; + } + + status = tgt_cfr_start_capture(pdev, peer, params); + + if (status == 0) { + pe->bandwidth = params->bandwidth; + pe->period = params->period; + pe->capture_method = params->method; + pe->request = PEER_CFR_CAPTURE_ENABLE; + } else + pa->cfr_current_sta_count--; + + return status; +} + +int ucfg_cfr_start_capture_probe_req(struct wlan_objmgr_pdev *pdev, + struct qdf_mac_addr *unassoc_mac, + struct cfr_capture_params *params) +{ + int idx, idx_to_insert = -1; + struct pdev_cfr *pa; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (!pa) { + cfr_err("Pdev cfr object is null!"); + return -EINVAL; + } + + if (!(pa->is_cfr_capable)) { + cfr_err("CFR is not supported on this chip"); + return -EINVAL; + } + + if (pa->cfr_current_sta_count == pa->cfr_max_sta_count) { + cfr_err("max cfr cleint reached"); + return -EINVAL; + } + + for (idx = 0; idx < MAX_CFR_ENABLED_CLIENTS; idx++) { + /* Store first invalid entry's index, to add mac entry if not + * already present. + */ + if (idx_to_insert < 0) { + if (pa->unassoc_pool[idx].is_valid != true) + idx_to_insert = idx; + } + + /* Add new mac entry only if it is not present. If already + * present, update the capture parameters + */ + if (qdf_mem_cmp(&pa->unassoc_pool[idx].mac, unassoc_mac, + sizeof(struct qdf_mac_addr)) == 0) { + cfr_info("Node already present. Updating params"); + qdf_mem_copy(&pa->unassoc_pool[idx].cfr_params, + params, + sizeof(struct cfr_capture_params)); + pa->unassoc_pool[idx].is_valid = true; + return 0; + } + } + + if (idx_to_insert < 0) { + /* All the entries in the table are valid. So we have reached + * max client capacity. To add a new client, capture on one of + * the clients in table has to be stopped. + */ + cfr_err("Maximum client capacity reached"); + return -EINVAL; + } + + /* If control reaches here, we did not find mac in the table + * and we have atleast one free entry in table. + * Add the entry at index = idx_to_insert + */ + qdf_mem_copy(&pa->unassoc_pool[idx_to_insert].mac, + unassoc_mac, sizeof(struct qdf_mac_addr)); + qdf_mem_copy(&pa->unassoc_pool[idx_to_insert].cfr_params, + params, sizeof(struct cfr_capture_params)); + pa->unassoc_pool[idx_to_insert].is_valid = true; + pa->cfr_current_sta_count++; + + return 0; +} + +int ucfg_cfr_stop_capture_probe_req(struct wlan_objmgr_pdev *pdev, + struct qdf_mac_addr *unassoc_mac) +{ + struct pdev_cfr *pa; + int idx; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (!pa) { + cfr_err("Pdev cfr object is NULL!\n"); + return -EINVAL; + } + + if (!(pa->is_cfr_capable)) { + cfr_err("CFR is not supported on this chip\n"); + return -EINVAL; + } + + for (idx = 0; idx < MAX_CFR_ENABLED_CLIENTS; idx++) { + /* Remove mac only if it is present */ + if (qdf_mem_cmp(&pa->unassoc_pool[idx].mac, unassoc_mac, + sizeof(struct qdf_mac_addr)) == 0) { + qdf_mem_zero(&pa->unassoc_pool[idx], + sizeof(struct unassoc_pool_entry)); + pa->cfr_current_sta_count--; + return 0; + } + } + + /* If mac was present in pool it would have been deleted in the + * above loop and returned from there. + * If control reached here, mac was not found. So, ignore the request. + */ + cfr_err("Trying to delete mac not present in pool. Ignoring request."); + return 0; +} + +int ucfg_cfr_set_timer(struct wlan_objmgr_pdev *pdev, uint32_t value) +{ + struct pdev_cfr *pa; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa == NULL) { + cfr_err("PDEV cfr object is NULL!\n"); + return -EINVAL; + } + + if (!(pa->is_cfr_capable)) { + qdf_info("cfr is not supported on this chip\n"); + return -EINVAL; + } + + return tgt_cfr_enable_cfr_timer(pdev, value); +} +qdf_export_symbol(ucfg_cfr_set_timer); + +int ucfg_cfr_get_timer(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cfr *pa; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa == NULL) { + cfr_err("PDEV cfr object is NULL!\n"); + return -EINVAL; + } + + if (!(pa->is_cfr_capable)) { + qdf_info("cfr is not supported on this chip\n"); + return -EINVAL; + } + + return pa->cfr_timer_enable; +} +qdf_export_symbol(ucfg_cfr_get_timer); + +int ucfg_cfr_stop_capture(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer) +{ + int status; + struct peer_cfr *pe; + struct pdev_cfr *pa; + + pa = wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_CFR); + if (pa == NULL) { + cfr_err("PDEV cfr object is NULL!\n"); + return -EINVAL; + } + + if (!(pa->is_cfr_capable)) { + qdf_info("cfr is not supported on this chip\n"); + return -EINVAL; + } + + pe = wlan_objmgr_peer_get_comp_private_obj(peer, WLAN_UMAC_COMP_CFR); + if (pe == NULL) { + cfr_err("PEER cfr object is NULL!\n"); + return -EINVAL; + } + + if ((pe->period) && (pe->request)) + status = tgt_cfr_stop_capture(pdev, peer); + else { + qdf_info("periodic cfr not started for the client\n"); + return -EINVAL; + } + + if (status == 0) { + pe->request = PEER_CFR_CAPTURE_DISABLE; + pa->cfr_current_sta_count--; + } + + return status; +} + +int ucfg_cfr_list_peers(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} + +QDF_STATUS ucfg_cfr_stop_indication(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + cfr_err("null vdev"); + return QDF_STATUS_E_INVAL; + } + + return cfr_stop_indication(vdev); +} + +#ifdef WLAN_ENH_CFR_ENABLE + +static inline +QDF_STATUS dev_sanity_check(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_pdev **ppdev, + struct pdev_cfr **ppcfr) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!vdev) { + cfr_err("vdev is NULL\n"); + return QDF_STATUS_E_NULL_VALUE; + } + + *ppdev = wlan_vdev_get_pdev(vdev); + + if (!*ppdev) { + cfr_err("pdev is NULL\n"); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wlan_objmgr_pdev_try_get_ref(*ppdev, WLAN_CFR_ID); + if (status != QDF_STATUS_SUCCESS) { + cfr_err("Failed to get pdev reference\n"); + return status; + } + + *ppcfr = wlan_objmgr_pdev_get_comp_private_obj(*ppdev, + WLAN_UMAC_COMP_CFR); + + if (!(*ppcfr)) { + cfr_err("pdev object for CFR is null"); + wlan_objmgr_pdev_release_ref(*ppdev, WLAN_CFR_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!(*ppcfr)->is_cfr_rcc_capable) { + cfr_err("cfr is not supported on this chip\n"); + wlan_objmgr_pdev_release_ref(*ppdev, WLAN_CFR_ID); + return QDF_STATUS_E_NOSUPPORT; + } + + return status; +} + +/* + * This is needed only in case of m_ta_ra_filter mode. + * If user wants to reset the group configurations to default values, + * then this handler will come into action. + * + * If user wants to reset the configurations of 0th, 1st and 3rd group, + * then the input should be : + * + * wlanconfig ath0 cfr reset_cfg 0xb + * + */ + +QDF_STATUS ucfg_cfr_set_reset_bitmap(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + pcfr->rcc_param.modified_in_curr_session |= params->reset_cfg; + tgt_cfr_default_ta_ra_cfg(pdev, &pcfr->rcc_param, + true, params->reset_cfg); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * This is needed only in case of m_ta_ra_filter mode. + * After providing all the group configurations, user should provide + * the information about which groups need to be enabled. + * Based on that FW will enable the configurations for CFR groups. + * If user has to enable only 0th group, then input should be : + * + * wlanconfig ath0 cfr en_cfg 0x1 + * + * Enable the bitmap from user provided configuration into cfr_rcc_param. + */ + +QDF_STATUS ucfg_cfr_set_en_bitmap(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + pcfr->rcc_param.filter_group_bitmap = params->en_cfg; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * Copy user provided input for ul_mu_user_mask into cfr_rcc_param. + */ + +QDF_STATUS +ucfg_cfr_set_ul_mu_user_mask(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + pcfr->rcc_param.ul_mu_user_mask_lower = params->ul_mu_user_mask_lower; + pcfr->rcc_param.ul_mu_user_mask_upper = params->ul_mu_user_mask_upper; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * FREEZE_TLV_DELAY_CNT_* registers are used for FREEZE TLV timeout mechanism + * in MAC side. In case MAC send FREEZE TLV to PHY too late due to + * long AST delay, PHY ucode may not handle it well or it will impact + * next frame’s normal processing, then MAC needs to drop FREEZE TLV + * sending process after reaching the threshold. + * + * This handler will copy user provided input for freeze_tlv_delay_cnt + * into cfr_rcc_param. + */ + +QDF_STATUS +ucfg_cfr_set_freeze_tlv_delay_cnt(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + pcfr->rcc_param.freeze_tlv_delay_cnt_en = + params->freeze_tlv_delay_cnt_en; + + pcfr->rcc_param.freeze_tlv_delay_cnt_thr = + params->freeze_tlv_delay_cnt_thr; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * Set capture interval from the provided configuration into cfr_rcc_param. + * All fixed parameters are needed to be stored into cfr_rcc_param. + */ + +QDF_STATUS +ucfg_cfr_set_capture_interval(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + pcfr->rcc_param.capture_interval = params->cap_intvl; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * Set capture duration from the provided configuration into cfr_rcc_param. + * All fixed parameters are needed to be stored into cfr_rcc_param. + */ + +QDF_STATUS +ucfg_cfr_set_capture_duration(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + pcfr->rcc_param.capture_duration = params->cap_dur; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * Copy user provided group parameters( type/ subtype of mgmt, ctrl, data ) + * into curr_cfg instance of ta_ra_cfr_cfg. + * Set valid mask for the provided configuration. + * Set modified_in_this_session for the particular group. + */ + +QDF_STATUS +ucfg_cfr_set_frame_type_subtype(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct ta_ra_cfr_cfg *curr_cfg = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + /* Populating current config based on user's input */ + curr_cfg = &pcfr->rcc_param.curr[params->grp_id]; + curr_cfg->mgmt_subtype_filter = params->expected_mgmt_subtype; + curr_cfg->ctrl_subtype_filter = params->expected_ctrl_subtype; + curr_cfg->data_subtype_filter = params->expected_data_subtype; + + curr_cfg->valid_mgmt_subtype = 1; + curr_cfg->valid_ctrl_subtype = 1; + curr_cfg->valid_data_subtype = 1; + + qdf_set_bit(params->grp_id, + (unsigned long *) + &pcfr->rcc_param.modified_in_curr_session); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * Copy user provided group parameters( BW and NSS ) + * into curr_cfg instance of ta_ra_cfr_cfg. + * Set valid mask for the provided configuration. + * Set modified_in_this_session for the particular group. + */ + +QDF_STATUS ucfg_cfr_set_bw_nss(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct ta_ra_cfr_cfg *curr_cfg = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + /* Populating current config based on user's input */ + curr_cfg = &pcfr->rcc_param.curr[params->grp_id]; + curr_cfg->bw = params->bw; + curr_cfg->nss = params->nss; + + curr_cfg->valid_bw_mask = 1; + curr_cfg->valid_nss_mask = 1; + + qdf_set_bit(params->grp_id, + (unsigned long *)&pcfr->rcc_param.modified_in_curr_session); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * Copy user provided group parameters( TA, RA, TA_MASK, RA_MASK ) + * into curr_cfg instance of ta_ra_cfr_cfg. + * Set valid mask for the provided configuration. + * Set modified_in_this_session for the particular group. + */ + +QDF_STATUS ucfg_cfr_set_tara_config(struct wlan_objmgr_vdev *vdev, + struct cfr_wlanconfig_param *params) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct ta_ra_cfr_cfg *curr_cfg = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + curr_cfg = &pcfr->rcc_param.curr[params->grp_id]; + qdf_mem_copy(curr_cfg->tx_addr, params->ta, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(curr_cfg->rx_addr, params->ra, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(curr_cfg->tx_addr_mask, + params->ta_mask, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(curr_cfg->rx_addr_mask, + params->ra_mask, QDF_MAC_ADDR_SIZE); + + curr_cfg->valid_ta = 1; + curr_cfg->valid_ta_mask = 1; + curr_cfg->valid_ra = 1; + curr_cfg->valid_ra_mask = 1; + + qdf_set_bit(params->grp_id, + (unsigned long *) + &pcfr->rcc_param.modified_in_curr_session); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +static bool cfr_is_filter_enabled(struct cfr_rcc_param *rcc_param) +{ + if (rcc_param->m_directed_ftm || + rcc_param->m_all_ftm_ack || + rcc_param->m_ndpa_ndp_directed || + rcc_param->m_ndpa_ndp_all || + rcc_param->m_ta_ra_filter || + rcc_param->m_all_packet) + return true; + else + return false; +} + +QDF_STATUS ucfg_cfr_get_cfg(struct wlan_objmgr_vdev *vdev) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct ta_ra_cfr_cfg *glbl_cfg = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t grp_id; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + if (!cfr_is_filter_enabled(&pcfr->rcc_param)) { + cfr_err(" All RCC modes are disabled.\n"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + return status; + } + + cfr_err("CAPTURE MODE:\n"); + + cfr_err("m_directed_ftm is : %s\n", + pcfr->rcc_param.m_directed_ftm ? + "enabled" : "disabled"); + cfr_err("m_all_ftm_ack is : %s\n", + pcfr->rcc_param.m_all_ftm_ack ? + "enabled" : "disabled"); + cfr_err("m_ndpa_ndp_directed is: %s\n", + pcfr->rcc_param.m_ndpa_ndp_directed ? + "enabled" : "disabled"); + cfr_err("m_ndpa_ndp_all is : %s\n", + pcfr->rcc_param.m_ndpa_ndp_all ? + "enabled" : "disabled"); + cfr_err("m_ta_ra_filter is : %s\n", + pcfr->rcc_param.m_ta_ra_filter ? + "enabled" : "disabled"); + cfr_err("m_all_packet is : %s\n", + pcfr->rcc_param.m_all_packet ? + "enabled" : "disabled"); + + cfr_err("capture duration : %u usec\n", + pcfr->rcc_param.capture_duration); + cfr_err("capture interval : %u usec\n", + pcfr->rcc_param.capture_interval); + cfr_err("UL MU User mask lower : %u\n", + pcfr->rcc_param.ul_mu_user_mask_lower); + cfr_err("UL MU User mask upper : %u\n", + pcfr->rcc_param.ul_mu_user_mask_upper); + cfr_err("Freeze TLV delay count is : %s\n", + pcfr->rcc_param.freeze_tlv_delay_cnt_en ? + "enabled" : "disabled"); + cfr_err("Freeze TLV delay count threshold : %u\n", + pcfr->rcc_param.freeze_tlv_delay_cnt_thr); + cfr_err("Enabled CFG id bitmap : 0x%x\n", + pcfr->rcc_param.filter_group_bitmap); + cfr_err(" Modified cfg id bitmap : 0x%x\n", + pcfr->rcc_param.modified_in_curr_session); + + cfr_err("TARA_CONFIG details:\n"); + + for (grp_id = 0; grp_id < MAX_TA_RA_ENTRIES; grp_id++) { + glbl_cfg = &pcfr->global[grp_id]; + + cfr_err("Config ID: %d\n", grp_id); + cfr_err("Bandwidth :0x%x\n", glbl_cfg->bw); + cfr_err("NSS : 0x%x\n", glbl_cfg->nss); + cfr_err("valid_ta: %d\n", glbl_cfg->valid_ta); + cfr_err("valid_ta_mask: %d\n", glbl_cfg->valid_ta_mask); + cfr_err("valid_ra: %d\n", glbl_cfg->valid_ra); + cfr_err("valid_ra_mask: %d\n", glbl_cfg->valid_ra_mask); + cfr_err("valid_bw_mask: %d\n", glbl_cfg->valid_bw_mask); + cfr_err("valid_nss_mask: %d\n", glbl_cfg->valid_nss_mask); + cfr_err("valid_mgmt_subtype: %d\n", + glbl_cfg->valid_mgmt_subtype); + cfr_err("valid_ctrl_subtype: %d\n", + glbl_cfg->valid_ctrl_subtype); + cfr_err("valid_data_subtype: %d\n", + glbl_cfg->valid_data_subtype); + cfr_err("Mgmt subtype : 0x%x\n", + glbl_cfg->mgmt_subtype_filter); + cfr_err("CTRL subtype : 0x%x\n", + glbl_cfg->ctrl_subtype_filter); + cfr_err("Data subtype : 0x%x\n", + glbl_cfg->data_subtype_filter); + cfr_err("TX Addr: " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(glbl_cfg->tx_addr)); + cfr_err("TX Addr Mask: " QDF_FULL_MAC_FMT, + QDF_FULL_MAC_REF(glbl_cfg->tx_addr_mask)); + cfr_err("RX Addr: " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(glbl_cfg->rx_addr)); + cfr_err("RX Addr Mask: " QDF_FULL_MAC_FMT, + QDF_FULL_MAC_REF(glbl_cfg->rx_addr_mask)); + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +static const char *chan_capture_status_to_str(enum chan_capture_status type) +{ + switch (type) { + case CAPTURE_IDLE: + return "CAPTURE_IDLE"; + case CAPTURE_BUSY: + return "CAPTURE_BUSY"; + case CAPTURE_ACTIVE: + return "CAPTURE_ACTIVE"; + case CAPTURE_NO_BUFFER: + return "CAPTURE_NO_BUFFER"; + default: + return "INVALID"; + } +} + +static const +char *mac_freeze_reason_to_str(enum mac_freeze_capture_reason type) +{ + switch (type) { + case FREEZE_REASON_TM: + return "FREEZE_REASON_TM"; + case FREEZE_REASON_FTM: + return "FREEZE_REASON_FTM"; + case FREEZE_REASON_ACK_RESP_TO_TM_FTM: + return "FREEZE_REASON_ACK_RESP_TO_TM_FTM"; + case FREEZE_REASON_TA_RA_TYPE_FILTER: + return "FREEZE_REASON_TA_RA_TYPE_FILTER"; + case FREEZE_REASON_NDPA_NDP: + return "FREEZE_REASON_NDPA_NDP"; + case FREEZE_REASON_ALL_PACKET: + return "FREEZE_REASON_ALL_PACKET"; + default: + return "INVALID"; + } +} + +QDF_STATUS ucfg_cfr_rcc_dump_dbg_counters(struct wlan_objmgr_vdev *vdev) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + struct cdp_cfr_rcc_stats *cfr_rcc_stats = NULL; + uint8_t stats_cnt; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + cfr_err("psoc is null!"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + cfr_err("total_tx_evt_cnt = %llu\n", + pcfr->total_tx_evt_cnt); + cfr_err("dbr_evt_cnt = %llu\n", + pcfr->dbr_evt_cnt); + cfr_err("rx_tlv_evt_cnt = %llu\n", + pcfr->rx_tlv_evt_cnt); + cfr_err("release_cnt = %llu\n", + pcfr->release_cnt); + cfr_err("Error cnt:\n"); + cfr_err("flush_dbr_cnt = %llu\n", + pcfr->flush_dbr_cnt); + cfr_err("invalid_dma_length_cnt = %llu\n", + pcfr->invalid_dma_length_cnt); + cfr_err("flush_timeout_dbr_cnt = %llu\n", + pcfr->flush_timeout_dbr_cnt); + cfr_err("PPDU id mismatch for same cookie:\n"); + cfr_err("clear_txrx_event = %llu\n", + pcfr->clear_txrx_event); + cfr_err("cfr_dma_aborts = %llu\n", + pcfr->cfr_dma_aborts); + + cfr_rcc_stats = qdf_mem_malloc(sizeof(struct cdp_cfr_rcc_stats)); + if (!cfr_rcc_stats) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + return QDF_STATUS_E_NOMEM; + } + + cdp_get_cfr_dbg_stats(wlan_psoc_get_dp_handle(psoc), + wlan_objmgr_pdev_get_pdev_id(pdev), + cfr_rcc_stats); + + cfr_err("bb_captured_channel_cnt: %llu\n", + cfr_rcc_stats->bb_captured_channel_cnt); + cfr_err("bb_captured_timeout_cnt: %llu\n", + cfr_rcc_stats->bb_captured_timeout_cnt); + cfr_err("rx_loc_info_valid_cnt: %llu\n", + cfr_rcc_stats->rx_loc_info_valid_cnt); + + cfr_err("Channel capture status:\n"); + for (stats_cnt = 0; stats_cnt < CAPTURE_MAX; stats_cnt++) { + cfr_err("%s = %llu\n", + chan_capture_status_to_str(stats_cnt), + cfr_rcc_stats->chan_capture_status[stats_cnt]); + } + + cfr_err("Freeze reason:\n"); + for (stats_cnt = 0; stats_cnt < FREEZE_REASON_MAX; stats_cnt++) { + cfr_err("%s = %llu\n", + mac_freeze_reason_to_str(stats_cnt), + cfr_rcc_stats->reason_cnt[stats_cnt]); + } + + qdf_mem_free(cfr_rcc_stats); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +QDF_STATUS ucfg_cfr_rcc_clr_dbg_counters(struct wlan_objmgr_vdev *vdev) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + cfr_err("psoc is null!"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + return QDF_STATUS_E_NULL_VALUE; + } + cdp_cfr_clr_dbg_stats(wlan_psoc_get_dp_handle(psoc), + wlan_objmgr_pdev_get_pdev_id(pdev)); + + pcfr->dbr_evt_cnt = 0; + pcfr->release_cnt = 0; + pcfr->total_tx_evt_cnt = 0; + pcfr->rx_tlv_evt_cnt = 0; + pcfr->flush_dbr_cnt = 0; + pcfr->flush_timeout_dbr_cnt = 0; + pcfr->invalid_dma_length_cnt = 0; + pcfr->clear_txrx_event = 0; + pcfr->cfr_dma_aborts = 0; + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +QDF_STATUS ucfg_cfr_rcc_dump_lut(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!vdev) { + cfr_err("vdev is NULL\n"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + cfr_err("pdev is NULL\n"); + return QDF_STATUS_E_INVAL; + } + + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_CFR_ID) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_INVAL; + } + + cfr_err("LUT table:\n"); + tgt_cfr_dump_lut_enh(pdev); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +static void cfr_set_filter(struct wlan_objmgr_pdev *pdev, bool enable, + struct cdp_monitor_filter *filter_val) +{ + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + cfr_info("pdev_id=%d\n", wlan_objmgr_pdev_get_pdev_id(pdev)); + + cdp_cfr_filter(wlan_psoc_get_dp_handle(psoc), + wlan_objmgr_pdev_get_pdev_id(pdev), + enable, + filter_val); +} + +/* + * With the initiation of commit command, this handler will be triggered. + * + * Starts the procedure of forming the TLVs. + * If Host succeeds to send WMI command to FW, after TLV processing, then it + * will save the previous CFR configurations into one instance ta_ra_cfr_cfg, + * called glbl_cfg and update the current config to default state for the + * next commit session. + * + * Finally, reset the counter (modified_in_this_session) to 0 before moving to + * next commit session. + * + */ + +QDF_STATUS ucfg_cfr_committed_rcc_config(struct wlan_objmgr_vdev *vdev) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct cdp_monitor_filter filter_val = {0}; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + cfr_err("psoc is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + pcfr->rcc_param.vdev_id = wlan_vdev_get_id(vdev); + + /* + * If capture mode is valid, then Host: + * Subscribes for PPDU status TLVs in monitor status ring. + * Sets filter type to either FP or MO, based on the capture mode. + * Starts the LUT_AGE_TIMER of 1sec. + * + * If capture mode is disabled, then Host: + * unsubscribes for PPDU status TLVs in monitor status ring. + * Sets filter type to 0. + * Stops the LUT_AGE_TIMER. + * + */ + + if (cfr_is_filter_enabled(&pcfr->rcc_param)) { + if (pcfr->rcc_param.m_all_ftm_ack) { + filter_val.mode |= MON_FILTER_PASS | + MON_FILTER_OTHER; + filter_val.fp_mgmt |= FILTER_MGMT_ACTION; + filter_val.mo_mgmt |= FILTER_MGMT_ACTION; + } + + if (pcfr->rcc_param.m_ndpa_ndp_all) { + filter_val.mode |= MON_FILTER_PASS | + MON_FILTER_OTHER; + filter_val.fp_ctrl |= FILTER_CTRL_VHT_NDP; + filter_val.mo_ctrl |= FILTER_CTRL_VHT_NDP; + } + + if (pcfr->rcc_param.m_all_packet) { + filter_val.mode |= MON_FILTER_PASS | + MON_FILTER_OTHER; + filter_val.fp_mgmt |= FILTER_MGMT_ALL; + filter_val.mo_mgmt |= FILTER_MGMT_ALL; + filter_val.fp_ctrl |= FILTER_CTRL_ALL; + filter_val.mo_ctrl |= FILTER_CTRL_ALL; + filter_val.fp_data |= FILTER_DATA_ALL; + filter_val.mo_data |= FILTER_DATA_ALL; + } + + /* + * M_TA_RA in monitor other is as intensive as M_ALL pkt + * Support only FP in M_TA_RA mode + */ + if (pcfr->rcc_param.m_ta_ra_filter) { + filter_val.mode |= MON_FILTER_PASS | + MON_FILTER_OTHER; + filter_val.fp_mgmt |= FILTER_MGMT_ALL; + filter_val.mo_mgmt |= FILTER_MGMT_ALL; + filter_val.fp_ctrl |= FILTER_CTRL_ALL; + filter_val.mo_ctrl |= FILTER_CTRL_ALL; + filter_val.fp_data |= FILTER_DATA_ALL; + filter_val.mo_data |= FILTER_DATA_ALL; + } + + if (pcfr->rcc_param.m_directed_ftm) { + filter_val.mode |= MON_FILTER_PASS; + filter_val.fp_mgmt |= FILTER_MGMT_ACTION; + } + + if (pcfr->rcc_param.m_ndpa_ndp_directed) { + filter_val.mode |= MON_FILTER_PASS; + filter_val.fp_ctrl |= FILTER_CTRL_VHT_NDP; + } + + if (!cdp_get_cfr_rcc(wlan_psoc_get_dp_handle(psoc), + wlan_objmgr_pdev_get_pdev_id(pdev))) + tgt_cfr_start_lut_age_timer(pdev); + cfr_set_filter(pdev, 1, &filter_val); + } else { + if (cdp_get_cfr_rcc(wlan_psoc_get_dp_handle(psoc), + wlan_objmgr_pdev_get_pdev_id(pdev))) + tgt_cfr_stop_lut_age_timer(pdev); + cfr_set_filter(pdev, 0, &filter_val); + } + + /* Trigger wmi to start the TLV processing. */ + status = tgt_cfr_config_rcc(pdev, &pcfr->rcc_param); + if (status == QDF_STATUS_SUCCESS) { + cfr_info("CFR commit done\n"); + /* Update global config */ + tgt_cfr_update_global_cfg(pdev); + + /* Bring curr_cfg to default state for next commit session */ + tgt_cfr_default_ta_ra_cfg(pdev, &pcfr->rcc_param, + false, MAX_RESET_CFG_ENTRY); + } else { + cfr_err("CFR commit failed\n"); + } + + pcfr->rcc_param.num_grp_tlvs = 0; + pcfr->rcc_param.modified_in_curr_session = 0; + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +/* + * This handler is used to enable / disable the capture mode. + * + */ +QDF_STATUS ucfg_cfr_set_rcc_mode(struct wlan_objmgr_vdev *vdev, + enum capture_type mode, uint8_t value) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return status; + + switch (mode) { + case RCC_DIRECTED_FTM_FILTER: + pcfr->rcc_param.m_directed_ftm = value; + break; + case RCC_ALL_FTM_ACK_FILTER: + pcfr->rcc_param.m_all_ftm_ack = value; + break; + case RCC_DIRECTED_NDPA_NDP_FILTER: + pcfr->rcc_param.m_ndpa_ndp_directed = value; + break; + case RCC_NDPA_NDP_ALL_FILTER: + pcfr->rcc_param.m_ndpa_ndp_all = value; + break; + case RCC_TA_RA_FILTER: + pcfr->rcc_param.m_ta_ra_filter = value; + break; + case RCC_ALL_PACKET_FILTER: + pcfr->rcc_param.m_all_packet = value; + break; + case RCC_DIS_ALL_MODE: + pcfr->rcc_param.m_directed_ftm = value; + pcfr->rcc_param.m_all_ftm_ack = value; + pcfr->rcc_param.m_ndpa_ndp_directed = value; + pcfr->rcc_param.m_ndpa_ndp_all = value; + pcfr->rcc_param.m_ta_ra_filter = value; + pcfr->rcc_param.m_all_packet = value; + break; + + default: + break; + } + + cfr_debug(" Capture mode set by user: 0x%x\n", value); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return status; +} + +bool ucfg_cfr_get_rcc_enabled(struct wlan_objmgr_vdev *vdev) +{ + struct pdev_cfr *pcfr = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + bool rcc_enabled = false; + + status = dev_sanity_check(vdev, &pdev, &pcfr); + if (status != QDF_STATUS_SUCCESS) + return false; + + rcc_enabled = cfr_is_filter_enabled(&pcfr->rcc_param); + wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID); + + return rcc_enabled; +} + +QDF_STATUS ucfg_cfr_subscribe_ppdu_desc(struct wlan_objmgr_pdev *pdev, + bool is_subscribe) +{ + return tgt_cfr_subscribe_ppdu_desc(pdev, is_subscribe); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..4cb8ea2298e2710d78df20ba9bd0f71524ff0863 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cfr/dispatcher/src/wlan_cfr_utils_api.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "../../core/inc/cfr_defs_i.h" +#include +#include + +QDF_STATUS wlan_cfr_init(void) +{ + if (wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_psoc_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_psoc_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_pdev_create_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_pdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_pdev_destroy_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_pdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_peer_create_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_peer_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_peer_destroy_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_peer_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cfr_deinit(void) +{ + if (wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_psoc_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_psoc_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_pdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_pdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_peer_create_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_peer_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_peer_destroy_handler(WLAN_UMAC_COMP_CFR, + wlan_cfr_peer_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cfr_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + int status; + + /* chip specific init */ + status = tgt_cfr_init_pdev(pdev); + + if (status != QDF_STATUS_SUCCESS) { + cfr_err("tgt_cfr_init_pdev failed with %d\n", status); + return QDF_STATUS_SUCCESS; + } + + /* RealyFS init */ + status = cfr_streamfs_init(pdev); + + if (status != QDF_STATUS_SUCCESS) { + cfr_err("cfr_streamfs_init failed with %d\n", status); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cfr_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + int status = QDF_STATUS_SUCCESS; + /* + * DBR does not have close as of now; + * but this is getting added as part for new gerrit + * Once we have that support we will add it. + */ + status = cfr_streamfs_remove(pdev); + + return status; +} + +QDF_STATUS cfr_initialize_pdev(struct wlan_objmgr_pdev *pdev) +{ + int status = QDF_STATUS_SUCCESS; + + /* chip specific init */ + + status = tgt_cfr_init_pdev(pdev); + + if (status != QDF_STATUS_SUCCESS) + cfr_err("cfr_initialize_pdev status=%d\n", status); + + return status; +} +qdf_export_symbol(cfr_initialize_pdev); + +QDF_STATUS cfr_deinitialize_pdev(struct wlan_objmgr_pdev *pdev) +{ + int status = QDF_STATUS_SUCCESS; + + /* chip specific deinit */ + + status = tgt_cfr_deinit_pdev(pdev); + + if (status != QDF_STATUS_SUCCESS) + cfr_err("cfr_deinitialize_pdev status=%d\n", status); + + return status; +} +qdf_export_symbol(cfr_deinitialize_pdev); + +uint8_t count_set_bits(uint32_t value) +{ + uint8_t count = 0; + + while (value) { + value &= (value - 1); + count++; + } + + return count; +} + +qdf_export_symbol(count_set_bits); + +#ifdef WLAN_ENH_CFR_ENABLE +void wlan_cfr_rx_tlv_process(struct wlan_objmgr_pdev *pdev, void *nbuf) +{ + tgt_cfr_rx_tlv_process(pdev, nbuf); +} + +qdf_export_symbol(wlan_cfr_rx_tlv_process); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/cmn_defs/inc/wlan_cmn_ieee80211.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/cmn_defs/inc/wlan_cmn_ieee80211.h new file mode 100644 index 0000000000000000000000000000000000000000..88b5129764ecfce122edba347ffe88303be01597 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/cmn_defs/inc/wlan_cmn_ieee80211.h @@ -0,0 +1,2086 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains commnon ieee80211 definitions + */ + +#ifndef _WLAN_CMN_IEEE80211_H_ +#define _WLAN_CMN_IEEE80211_H_ +#include +#include + +#define IEEE80211_CCMP_HEADERLEN 8 +#define IEEE80211_CCMP_MICLEN 8 +#define WLAN_IEEE80211_GCMP_HEADERLEN 8 +#define WLAN_IEEE80211_GCMP_MICLEN 16 +#define IEEE80211_FC1_WEP 0x40 +#define WLAN_HDR_IV_LEN 3 +#define WLAN_HDR_EXT_IV_BIT 0x20 +#define WLAN_HDR_EXT_IV_LEN 4 + +#define WLAN_SEQ_SEQ_SHIFT 4 + +#define P2P_WFA_OUI {0x50, 0x6f, 0x9a} +#define P2P_WFA_VER 0x09 + +#define WSC_OUI 0x0050f204 +#define MBO_OCE_OUI 0x506f9a16 +#define MBO_OCE_OUI_SIZE 4 +#define REDUCED_WAN_METRICS_ATTR 103 + +/* WCN IE */ +/* Microsoft OUI */ +#define WCN_OUI 0xf25000 +/* WCN */ +#define WCN_OUI_TYPE 0x04 +#define WME_OUI 0xf25000 +#define WME_OUI_TYPE 0x02 +#define WME_PARAM_OUI_SUBTYPE 0x01 +#define WME_INFO_OUI_SUBTYPE 0x00 + /* Atheros OUI */ +#define ATH_OUI 0x7f0300 +#define ATH_OUI_TYPE 0x01 +/* Atheros Extended Cap Type */ +#define ATH_OUI_EXTCAP_TYPE 0x04 +/* QCA Bandwidth NSS Mapping Type */ +#define ATH_OUI_BW_NSS_MAP_TYPE 0x05 +#define SFA_OUI 0x964000 +#define SFA_OUI_TYPE 0x14 +/* QCA OUI (in little endian) */ +#define QCA_OUI 0xf0fd8c +#define QCA_OUI_WHC_TYPE 0x00 + +/* Extender vendor specific IE */ +#define QCA_OUI_EXTENDER_TYPE 0x03 + +#define ADAPTIVE_11R_OUI 0x964000 +#define ADAPTIVE_11R_OUI_TYPE 0x2C + +#define OUI_LENGTH 4 +#define OUI_TYPE_BITS 24 +#define MAX_ADAPTIVE_11R_IE_LEN 8 + +/* + * sae single pmk vendor specific IE details + * Category Data + * Type 0xDD + * Length 0x05 + * OUI 0x00 40 96 + * Type 0x03 + * Data Don’t care (EX, 0x05) + */ +#define SAE_SINGLE_PMK_OUI 0x964000 +#define SAE_SINGLE_PMK_TYPE 0x03 +#define MAX_SAE_SINGLE_PMK_IE_LEN 8 + +/* Temporary vendor specific IE for 11n pre-standard interoperability */ +#define VENDOR_HT_OUI 0x00904c +#define VENDOR_HT_CAP_ID 51 +#define VENDOR_HT_INFO_ID 52 + +#define VHT_INTEROP_OUI 0x00904c +#define VHT_INTEROP_TYPE 0x04 +#define VHT_INTEROP_OUI_SUBTYPE 0x08 +#define VHT_INTEROP_OUI_SUBTYPE_VENDORSPEC 0x18 + +/* ATH HE OUI ( in little endian) */ +#define ATH_HE_OUI 0x741300 +#define ATH_HE_CAP_SUBTYPE 0x01 +#define ATH_HE_OP_SUBTYPE 0x02 + +/* EPR information element flags */ +#define ERP_NON_ERP_PRESENT 0x01 +#define ERP_USE_PROTECTION 0x02 +#define ERP_LONG_PREAMBLE 0x04 + +#define QCA_OUI_WHC_AP_INFO_SUBTYPE 0x00 + +#define WLAN_MAX_IE_LEN 255 +#define WLAN_RSN_IE_LEN 22 + +/* Individual element IEs length checks */ + +#define WLAN_SUPPORTED_RATES_IE_MAX_LEN 12 +#define WLAN_FH_PARAM_IE_MAX_LEN 5 +#define WLAN_DS_PARAM_IE_MAX_LEN 1 +#define WLAN_CF_PARAM_IE_MAX_LEN 6 +#define WLAN_COUNTRY_IE_MIN_LEN 3 +#define WLAN_QUIET_IE_MAX_LEN 6 +#define WLAN_CSA_IE_MAX_LEN 3 +#define WLAN_XCSA_IE_MAX_LEN 4 +#define WLAN_SECCHANOFF_IE_MAX_LEN 1 +#define WLAN_EXT_SUPPORTED_RATES_IE_MAX_LEN 12 +#define WLAN_EXTCAP_IE_MAX_LEN 15 +#define WLAN_FILS_INDICATION_IE_MIN_LEN 2 +#define WLAN_MOBILITY_DOMAIN_IE_MAX_LEN 3 +#define WLAN_OPMODE_IE_MAX_LEN 1 +#define WLAN_IBSSDFS_IE_MIN_LEN 7 +#define WLAN_IBSS_IE_MAX_LEN 2 +#define WLAN_REQUEST_IE_MAX_LEN 255 +#define WLAN_RM_CAPABILITY_IE_MAX_LEN 5 +#define WLAN_RNR_IE_MIN_LEN 5 + +/* Wide band channel switch IE length */ +#define WLAN_WIDE_BW_CHAN_SWITCH_IE_LEN 3 + +/* Number of max TX power elements supported plus size of Transmit Power + * Information element. + */ +#define WLAN_TPE_IE_MAX_LEN 9 + +/* Max channel switch time IE length */ +#define WLAN_MAX_CHAN_SWITCH_TIME_IE_LEN 4 + +/* HT capability flags */ +#define WLAN_HTCAP_C_ADVCODING 0x0001 +#define WLAN_HTCAP_C_CHWIDTH40 0x0002 +/* Capable of SM Power Save (Static) */ +#define WLAN_HTCAP_C_SMPOWERSAVE_STATIC 0x0000 +/* Capable of SM Power Save (Dynamic) */ +#define WLAN_HTCAP_C_SMPOWERSAVE_DYNAMIC 0x0004 +/* Reserved */ +#define WLAN_HTCAP_C_SM_RESERVED 0x0008 +/* SM enabled, no SM Power Save */ +#define WLAN_HTCAP_C_SMPOWERSAVE_DISABLED 0x000c +#define WLAN_HTCAP_C_GREENFIELD 0x0010 +#define WLAN_HTCAP_C_SHORTGI20 0x0020 +#define WLAN_HTCAP_C_SHORTGI40 0x0040 +#define WLAN_HTCAP_C_TXSTBC 0x0080 +#define WLAN_HTCAP_C_TXSTBC_S 7 +/* 2 bits */ +#define WLAN_HTCAP_C_RXSTBC 0x0300 +#define WLAN_HTCAP_C_RXSTBC_S 8 +#define WLAN_HTCAP_C_DELAYEDBLKACK 0x0400 +/* 1 = 8K, 0 = 3839B */ +#define WLAN_HTCAP_C_MAXAMSDUSIZE 0x0800 +#define WLAN_HTCAP_C_DSSSCCK40 0x1000 +#define WLAN_HTCAP_C_PSMP 0x2000 +#define WLAN_HTCAP_C_INTOLERANT40 0x4000 +#define WLAN_HTCAP_C_LSIGTXOPPROT 0x8000 +/* Spatial Multiplexing (SM) capabitlity bitmask */ +#define WLAN_HTCAP_C_SM_MASK 0x000c + +/* VHT Operation */ +/* 20/40 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_2040 0 +/* 80 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_80 1 +/* 160 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_160 2 +/* 80 + 80 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_80_80 3 +/* 160 MHz Operating Channel (revised signalling) */ +#define WLAN_VHTOP_CHWIDTH_REVSIG_160 1 +/* 80 + 80 MHz Operating Channel (revised signalling) */ +#define WLAN_VHTOP_CHWIDTH_REVSIG_80_80 1 + +#define WLAN_HEOP_FIXED_PARAM_LENGTH 7 +#define WLAN_HEOP_VHTOP_LENGTH 3 +#define WLAN_HEOP_CO_LOCATED_BSS_LENGTH 1 + +#define WLAN_HEOP_VHTOP_PRESENT_MASK 0x00004000 /* B14 */ +#define WLAN_HEOP_CO_LOCATED_BSS_MASK 0x00008000 /* B15 */ +#define WLAN_HEOP_6GHZ_INFO_PRESENT_MASK 0X00020000 /* B17 */ + +#define WLAN_HE_6GHZ_CHWIDTH_20 0 /* 20MHz Oper Ch width */ +#define WLAN_HE_6GHZ_CHWIDTH_40 1 /* 40MHz Oper Ch width */ +#define WLAN_HE_6GHZ_CHWIDTH_80 2 /* 80MHz Oper Ch width */ +#define WLAN_HE_6GHZ_CHWIDTH_160_80_80 3 /* 160/80+80 MHz Oper Ch width */ + +#define WLAN_RATE_VAL 0x7f +#define WLAN_BASIC_RATE_MASK 0x80 + +#define WLAN_RV(v) ((v) & WLAN_RATE_VAL) + +#define WLAN_BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 +#define WLAN_BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 +#define WLAN_BSS_MEMBERSHIP_SELECTOR_GLK 125 +#define WLAN_BSS_MEMBERSHIP_SELECTOR_EPD 124 +#define WLAN_BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 +#define WLAN_BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 + +#define WLAN_CHAN_IS_5GHZ(chanidx) \ + ((chanidx > 30) ? true : false) +#define WLAN_CHAN_IS_2GHZ(chanidx) \ + (((chanidx > 0) && (chanidx < 15)) ? true : false) + +/* Check if revised signalling is being used for VHT160 in vhtop */ +#define WLAN_IS_REVSIG_VHT160(vhtop) (((vhtop)->vht_op_chwidth == \ + WLAN_VHTOP_CHWIDTH_REVSIG_160) && \ + ((vhtop)->vht_op_ch_freq_seg2 != 0) && \ + (abs((vhtop)->vht_op_ch_freq_seg2 - (vhtop)->vht_op_ch_freq_seg1) == 8)) + +/* Check if revised signalling is being used for VHT80p80 in vhtop */ +#define WLAN_IS_REVSIG_VHT80_80(vhtop) (((vhtop)->vht_op_chwidth == \ + WLAN_VHTOP_CHWIDTH_REVSIG_80_80) && \ + ((vhtop)->vht_op_ch_freq_seg2 != 0) && \ + (abs((vhtop)->vht_op_ch_freq_seg2 - (vhtop)->vht_op_ch_freq_seg1) > 8)) + +/* Check if channel width is HE160 in HE 6ghz params */ +#define WLAN_IS_HE160(he_6g_param) (((he_6g_param)->width == \ + WLAN_HE_6GHZ_CHWIDTH_160_80_80) && \ + ((he_6g_param)->chan_freq_seg1 != 0) && \ + (abs((he_6g_param)->chan_freq_seg1 - \ + (he_6g_param)->chan_freq_seg0) == 8)) + +/* Check if channel width is HE80p80 in HE 6ghz params */ +#define WLAN_IS_HE80_80(he_6g_param) (((he_6g_param)->width == \ + WLAN_HE_6GHZ_CHWIDTH_160_80_80) && \ + ((he_6g_param)->chan_freq_seg1 != 0) && \ + (abs((he_6g_param)->chan_freq_seg1 - \ + (he_6g_param)->chan_freq_seg0) > 8)) + +#define LE_READ_2(p) \ + ((uint16_t)\ + ((((const uint8_t *)(p))[0]) |\ + (((const uint8_t *)(p))[1] << 8))) + +#define LE_READ_4(p) \ + ((uint32_t)\ + ((((const uint8_t *)(p))[0]) |\ + (((const uint8_t *)(p))[1] << 8) | \ + (((const uint8_t *)(p))[2] << 16) |\ + (((const uint8_t *)(p))[3] << 24))) + +#define BE_READ_4(p) \ + ((uint32_t)\ + ((((const uint8_t *)(p))[0] << 24) |\ + (((const uint8_t *)(p))[1] << 16) |\ + (((const uint8_t *)(p))[2] << 8) |\ + (((const uint8_t *)(p))[3]))) + +/** + * enum ext_chan_offset: extension channel offset + * @WLAN_HTINFO_EXTOFFSET_NA: no extension channel is present + * @WLAN_HTINFO_EXTOFFSET_ABOVE: above control channel + * @WLAN_HTINFO_EXTOFFSET_UNDEF: undefined + * @WLAN_HTINFO_EXTOFFSET_BELOW: below control channel + */ +enum ext_chan_offset { + WLAN_HTINFO_EXTOFFSET_NA = 0, + WLAN_HTINFO_EXTOFFSET_ABOVE = 1, + WLAN_HTINFO_EXTOFFSET_UNDEF = 2, + WLAN_HTINFO_EXTOFFSET_BELOW = 3 +}; + +/** + * enum element_ie :- Management information element + * @WLAN_ELEMID_SSID: ssid IE + * @WLAN_ELEMID_RATES: Rates IE + * @WLAN_ELEMID_FHPARMS: FH param IE + * @WLAN_ELEMID_DSPARMS: DS Param IE + * @WLAN_ELEMID_CFPARMS : CF Param IE + * @WLAN_ELEMID_TIM: TIM IE + * @WLAN_ELEMID_IBSSPARMS: Ibss params IE + * @WLAN_ELEMID_COUNTRY: Country code IE + * @WLAN_ELEMID_REQINFO: Req Info IE + * @WLAN_ELEMID_QBSS_LOAD: Qbss load IE + * @WLAN_ELEMID_TCLAS: TCLAS IE + * @WLAN_ELEMID_CHALLENGE: Challenge IE + * @WLAN_ELEMID_PWRCNSTR: Power cn IE + * @WLAN_ELEMID_PWRCAP: power cap IE + * @WLAN_ELEMID_TPCREQ: TPC req IE + * @WLAN_ELEMID_TPCREP: TPC rsp IE + * @WLAN_ELEMID_SUPPCHAN: Supported channel IE + * @WLAN_ELEMID_CHANSWITCHANN: Channel switch IE + * @WLAN_ELEMID_MEASREQ: Measurement request IE + * @WLAN_ELEMID_MEASREP: Measurement Resp IE + * @WLAN_ELEMID_QUIET: Quiet IE + * @WLAN_ELEMID_IBSSDFS: IBSS DFS IE + * @WLAN_ELEMID_ERP: ERP IE + * @WLAN_ELEMID_TCLAS_PROCESS: TCLAS process IE + * @WLAN_ELEMID_HTCAP_ANA: HTT Capability IE + * @WLAN_ELEMID_RSN: RSN IE + * @WLAN_ELEMID_XRATES: Extended rate IE + * @WLAN_ELEMID_HTCAP_VENDOR: HT cap vendor IE + * @WLAN_ELEMID_HTINFO_VENDOR: HT info vendor IE + * @WLAN_ELEMID_MOBILITY_DOMAIN: MD IE + * @WLAN_ELEMID_FT: FT IE + * @WLAN_ELEMID_TIMEOUT_INTERVAL: Timeout interval IE + * @WLAN_ELEMID_SUPP_OP_CLASS: OP class IE + * @WLAN_ELEMID_EXTCHANSWITCHANN: Extended Channel switch IE + * @WLAN_ELEMID_HTINFO_ANA: HT info IE + * @WLAN_ELEMID_SECCHANOFFSET: Sec channel Offset IE + * @WLAN_ELEMID_WAPI: WAPI IE + * @WLAN_ELEMID_TIME_ADVERTISEMENT: Time IE + * @WLAN_ELEMID_RRM: Radio resource measurement IE + * @WLAN_ELEMID_MULTIPLE_BSSID: Multiple BSSID IE + * @WLAN_ELEMID_2040_COEXT: 20-40 COext ext IE + * @WLAN_ELEMID_2040_INTOL:20-40 INT OL IE + * @WLAN_ELEMID_OBSS_SCAN: OBSS scan IE + * @WLAN_ELEMID_MMIE: 802.11w Management MIC IE + * @WLAN_ELEMID_NONTX_BSSID_CAP: Nontransmitted BSSID Capability IE + * @WLAN_ELEMID_MULTI_BSSID_IDX: Multiple BSSID index + * @WLAN_ELEMID_FMS_DESCRIPTOR: 802.11v FMS descriptor IE + * @WLAN_ELEMID_FMS_REQUEST: 802.11v FMS request IE + * @WLAN_ELEMID_FMS_RESPONSE: 802.11v FMS response IE + * @WLAN_ELEMID_BSSMAX_IDLE_PERIOD = 90: BSS MAX IDLE PERIOD + * @WLAN_ELEMID_TFS_REQUEST: TFS req IE + * @WLAN_ELEMID_TFS_RESPONSE: TFS resp IE + * @WLAN_ELEMID_TIM_BCAST_REQUEST: TIM bcast req IE + * @WLAN_ELEMID_TIM_BCAST_RESPONSE: TIM bcast resp IE + * @WLAN_ELEMID_INTERWORKING: Interworking IE + * @WLAN_ELEMID_QOS_MAP: QOS MAP IE + * @WLAN_ELEMID_XCAPS: Extended capability IE + * @WLAN_ELEMID_TPC: TPC IE + * @WLAN_ELEMID_CCKM: CCKM IE + * @WLAN_ELEMID_VHTCAP: VHT Capabilities + * @WLAN_ELEMID_VHTOP: VHT Operation + * @WLAN_ELEMID_EXT_BSS_LOAD: Extended BSS Load + * @WLAN_ELEMID_WIDE_BAND_CHAN_SWITCH: Wide Band Channel Switch + * @WLAN_ELEMID_VHT_TX_PWR_ENVLP: VHT Transmit Power Envelope + * @WLAN_ELEMID_CHAN_SWITCH_WRAP: Channel Switch Wrapper + * @WLAN_ELEMID_AID: AID + * @WLAN_ELEMID_QUIET_CHANNEL: Quiet Channel + * @WLAN_ELEMID_OP_MODE_NOTIFY: Operating Mode Notification + * @WLAN_ELEMID_VENDOR: vendor private + * @WLAN_ELEMID_EXTN_ELEM: extended IE + */ +enum element_ie { + WLAN_ELEMID_SSID = 0, + WLAN_ELEMID_RATES = 1, + WLAN_ELEMID_FHPARMS = 2, + WLAN_ELEMID_DSPARMS = 3, + WLAN_ELEMID_CFPARMS = 4, + WLAN_ELEMID_TIM = 5, + WLAN_ELEMID_IBSSPARMS = 6, + WLAN_ELEMID_COUNTRY = 7, + /* 8-9 reserved */ + WLAN_ELEMID_REQINFO = 10, + WLAN_ELEMID_QBSS_LOAD = 11, + WLAN_ELEMID_EDCAPARMS = 12, + WLAN_ELEMID_TCLAS = 14, + WLAN_ELEMID_CHALLENGE = 16, + /* 17-31 reserved for challenge text extension */ + WLAN_ELEMID_PWRCNSTR = 32, + WLAN_ELEMID_PWRCAP = 33, + WLAN_ELEMID_TPCREQ = 34, + WLAN_ELEMID_TPCREP = 35, + WLAN_ELEMID_SUPPCHAN = 36, + WLAN_ELEMID_CHANSWITCHANN = 37, + WLAN_ELEMID_MEASREQ = 38, + WLAN_ELEMID_MEASREP = 39, + WLAN_ELEMID_QUIET = 40, + WLAN_ELEMID_IBSSDFS = 41, + WLAN_ELEMID_ERP = 42, + WLAN_ELEMID_TCLAS_PROCESS = 44, + WLAN_ELEMID_HTCAP_ANA = 45, + WLAN_ELEMID_QOS_CAPABILITY = 46, + WLAN_ELEMID_RSN = 48, + WLAN_ELEMID_XRATES = 50, + WLAN_ELEMID_HTCAP_VENDOR = 51, + WLAN_ELEMID_HTINFO_VENDOR = 52, + WLAN_ELEMID_MOBILITY_DOMAIN = 54, + WLAN_ELEMID_FT = 55, + WLAN_ELEMID_TIMEOUT_INTERVAL = 56, + WLAN_ELEMID_SUPP_OP_CLASS = 59, + WLAN_ELEMID_EXTCHANSWITCHANN = 60, + WLAN_ELEMID_HTINFO_ANA = 61, + WLAN_ELEMID_SECCHANOFFSET = 62, + WLAN_ELEMID_WAPI = 68, + WLAN_ELEMID_TIME_ADVERTISEMENT = 69, + WLAN_ELEMID_RRM = 70, + WLAN_ELEMID_MULTIPLE_BSSID = 71, + WLAN_ELEMID_2040_COEXT = 72, + WLAN_ELEMID_2040_INTOL = 73, + WLAN_ELEMID_OBSS_SCAN = 74, + WLAN_ELEMID_MMIE = 76, + WLAN_ELEMID_NONTX_BSSID_CAP = 83, + WLAN_ELEMID_MULTI_BSSID_IDX = 85, + WLAN_ELEMID_FMS_DESCRIPTOR = 86, + WLAN_ELEMID_FMS_REQUEST = 87, + WLAN_ELEMID_FMS_RESPONSE = 88, + WLAN_ELEMID_BSSMAX_IDLE_PERIOD = 90, + WLAN_ELEMID_TFS_REQUEST = 91, + WLAN_ELEMID_TFS_RESPONSE = 92, + WLAN_ELEMID_TIM_BCAST_REQUEST = 94, + WLAN_ELEMID_TIM_BCAST_RESPONSE = 95, + WLAN_ELEMID_INTERWORKING = 107, + WLAN_ELEMID_QOS_MAP = 110, + WLAN_ELEMID_XCAPS = 127, + WLAN_ELEMID_TPC = 150, + WLAN_ELEMID_CCKM = 156, + WLAN_ELEMID_VHTCAP = 191, + WLAN_ELEMID_VHTOP = 192, + WLAN_ELEMID_EXT_BSS_LOAD = 193, + WLAN_ELEMID_WIDE_BAND_CHAN_SWITCH = 194, + WLAN_ELEMID_VHT_TX_PWR_ENVLP = 195, + WLAN_ELEMID_CHAN_SWITCH_WRAP = 196, + WLAN_ELEMID_AID = 197, + WLAN_ELEMID_QUIET_CHANNEL = 198, + WLAN_ELEMID_OP_MODE_NOTIFY = 199, + WLAN_ELEMID_REDUCED_NEIGHBOR_REPORT = 201, + WLAN_ELEMID_VENDOR = 221, + WLAN_ELEMID_FILS_INDICATION = 240, + WLAN_ELEMID_RSNXE = 244, + WLAN_ELEMID_EXTN_ELEM = 255, +}; + +/** + * enum extn_element_ie :- extended management information element + * @WLAN_EXTN_ELEMID_MAX_CHAN_SWITCH_TIME: Maximum Channel Switch Time IE + * @WLAN_EXTN_ELEMID_HECAP: HE capabilities IE + * @WLAN_EXTN_ELEMID_HEOP: HE Operation IE + * @WLAN_EXTN_ELEMID_MUEDCA: MU-EDCA IE + * @WLAN_EXTN_ELEMID_HE_6G_CAP: HE 6GHz Band Capabilities IE + * @WLAN_EXTN_ELEMID_SRP: spatial reuse parameter IE + */ +enum extn_element_ie { + WLAN_EXTN_ELEMID_MAX_CHAN_SWITCH_TIME = 34, + WLAN_EXTN_ELEMID_HECAP = 35, + WLAN_EXTN_ELEMID_HEOP = 36, + WLAN_EXTN_ELEMID_MUEDCA = 38, + WLAN_EXTN_ELEMID_SRP = 39, + WLAN_EXTN_ELEMID_HE_6G_CAP = 59, + WLAN_EXTN_ELEMID_ESP = 11, +}; + +/** + * enum wlan_status_code - wlan status codes + * (IEEE Std 802.11-2016, 9.4.1.9, Table 9-46) + * @STATUS_SUCCESS: Success full + * @STATUS_UNSPECIFIED_FAILURE: Unspecified failure. + * @STATUS_TDLS_WAKEUP_REJECT: TDLS wakeup schedule rejected but alternative + * schedule provided. + * @STATUS_SECURITY_DISABLED: Security disabled. + * @STATUS_UNACCEPTABLE_LIFETIME: Unacceptable lifetime. + * @STATUS_NOT_IN_SAME_BSS: Not in same BSS. + * @STATUS_CAPS_UNSUPPORTED: Cannot support all requested capabilities in the + * Capability Information field. + * @STATUS_REASSOC_NO_ASSOC: Reassociation denied due to inability to confirm + * that association exists. + * @STATUS_ASSOC_DENIED_UNSPEC: Association denied due to reason outside the + * scope of this standard. + * @STATUS_NOT_SUPPORTED_AUTH_ALG: Responding STA does not support the specified + * authentication algorithm. + * @STATUS_UNKNOWN_AUTH_TRANSACTION: Received an Authentication frame with + * authentication transaction sequence number out of expected sequence. + * @STATUS_CHALLENGE_FAIL: Authentication rejected because of challenge failure. + * @STATUS_AUTH_TIMEOUT: Authentication rejected due to timeout waiting for next + * frame in sequence. + * @STATUS_AP_UNABLE_TO_HANDLE_NEW_STA: Association denied because AP is unable + * to handle additional associated STAs. + * @STATUS_ASSOC_DENIED_RATES: Association denied due to requesting STA not + * supporting all of the data rates in the BSSBasicRateSet parameter, + * the Basic HT-MCS Set field of the HT Operation parameter, or the Basic + * VHT-MCS and NSS Set field in the VHT Operation parameter. + * @STATUS_ASSOC_DENIED_NOSHORT: Association denied due to requesting + * STA not supporting the short preamble option. + * @STATUS_SPEC_MGMT_REQUIRED: Association request rejected because Spectrum + * Management capability is required. + * @STATUS_PWR_CAPABILITY_NOT_VALID: Association request rejected because the + * information in the Power Capability element is unacceptable. + * @STATUS_SUPPORTED_CHANNEL_NOT_VALID: Association request rejected because + * the information in the Supported Channels element is unacceptable. + * @STATUS_ASSOC_DENIED_NO_SHORT_SLOT_TIME: Association denied due to requesting + * STA not supporting the Short Slot Time option. + * @STATUS_ASSOC_DENIED_NO_HT: Association denied because the requesting STA + * does not support HT features. + * @STATUS_R0KH_UNREACHABLE: R0KH unreachable. + * @STATUS_ASSOC_DENIED_NO_PCO: Association denied because the requesting STA + * does not support the phased coexistence operation (PCO) transition time + * required by the AP. + * @STATUS_ASSOC_REJECTED_TEMPORARILY: Association request rejected temporarily, + * try again later. + * @STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION: Robust management frame policy + * violation. + * @STATUS_UNSPECIFIED_QOS_FAILURE: Unspecified, QoS-related failure. + * @STATUS_DENIED_INSUFFICIENT_BANDWIDTH: Association denied because QoS AP or + * PCP has insufficient bandwidth to handle another QoS STA. + * @STATUS_DENIED_POOR_CHANNEL_CONDITIONS: Association denied due to excessive + * frame loss rates and/or poor conditions on current operating channel. + * @STATUS_DENIED_QOS_NOT_SUPPORTED: Association (with QoS BSS) denied because + * the requesting STA does not support the QoS facility. + * @STATUS_REQUEST_DECLINED: The request has been declined. + * @STATUS_INVALID_PARAMETERS: The request has not been successful as one + * or more parameters have invalid values. + * @STATUS_REJECTED_WITH_SUGGESTED_CHANGES: The allocation or TS has not been + * created because the request cannot be honored; however, a suggested TSPEC/DMG + * TSPEC is provided so that the initiating STA can attempt to set another + * allocation or TS with the suggested changes to the TSPEC/DMG TSPEC + * @STATUS_INVALID_IE: Invalid element, i.e., an element defined in this + * standard for which the content does not meet the specifications in Clause 9. + * @STATUS_GROUP_CIPHER_NOT_VALID: Invalid group cipher. + * @STATUS_PAIRWISE_CIPHER_NOT_VALID: Invalid pairwise cipher. + * @STATUS_AKMP_NOT_VALID: Invalid AKMP. + * @STATUS_UNSUPPORTED_RSN_IE_VERSION: Unsupported RSNE version. + * @STATUS_INVALID_RSN_IE_CAPAB: Invalid RSNE capabilities. + * @STATUS_CIPHER_REJECTED_PER_POLICY: Cipher suite rejected because of security + * policy. + * @STATUS_TS_NOT_CREATED: The TS or allocation has not been created; however, + * the HC or PCP might be capable of creating a TS or allocation, in response to + * a request, after the time indicated in the TS Delay element. + * @STATUS_DIRECT_LINK_NOT_ALLOWED: Direct link is not allowed in the BSS by + * policy. + * @STATUS_DEST_STA_NOT_PRESENT: The Destination STA is not present within this + * BSS. + * @STATUS_DEST_STA_NOT_QOS_STA: The Destination STA is not a QoS STA. + * @STATUS_ASSOC_DENIED_LISTEN_INT_TOO_LARGE: Association denied because the + * listen interval is too large. + * @STATUS_INVALID_FT_ACTION_FRAME_COUNT: Invalid FT Action frame count. + * @STATUS_INVALID_PMKID: Invalid pairwise master key identifier (PMKID). + * + * Internal status codes: Add any internal status code just after + * STATUS_PROP_START and decrease the value of STATUS_PROP_START + * accordingly. + * + * @STATUS_PROP_START: Start of prop status codes. + * @STATUS_NO_NETWORK_FOUND: No network found + * @STATUS_AUTH_TX_FAIL: Failed to sent AUTH on air + * @STATUS_AUTH_NO_ACK_RECEIVED: No ack received for Auth tx + * @STATUS_AUTH_NO_RESP_RECEIVED: No Auth response for Auth tx + * @STATUS_ASSOC_TX_FAIL: Failed to sent Assoc on air + * @STATUS_ASSOC_NO_ACK_RECEIVED: No ack received for Assoc tx + * @STATUS_ASSOC_NO_RESP_RECEIVED: No Assoc response for Assoc tx + */ +enum wlan_status_code { + STATUS_SUCCESS = 0, + STATUS_UNSPECIFIED_FAILURE = 1, + STATUS_TDLS_WAKEUP_REJECT = 3, + STATUS_SECURITY_DISABLED = 5, + STATUS_UNACCEPTABLE_LIFETIME = 6, + STATUS_NOT_IN_SAME_BSS = 7, + STATUS_CAPS_UNSUPPORTED = 10, + STATUS_REASSOC_NO_ASSOC = 11, + STATUS_ASSOC_DENIED_UNSPEC = 12, + STATUS_NOT_SUPPORTED_AUTH_ALG = 13, + STATUS_UNKNOWN_AUTH_TRANSACTION = 14, + STATUS_CHALLENGE_FAIL = 15, + STATUS_AUTH_TIMEOUT = 16, + STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, + STATUS_ASSOC_DENIED_RATES = 18, + STATUS_ASSOC_DENIED_NOSHORT = 19, + STATUS_SPEC_MGMT_REQUIRED = 22, + STATUS_PWR_CAPABILITY_NOT_VALID = 23, + STATUS_SUPPORTED_CHANNEL_NOT_VALID = 24, + STATUS_ASSOC_DENIED_NO_SHORT_SLOT_TIME = 25, + STATUS_ASSOC_DENIED_NO_HT = 27, + STATUS_R0KH_UNREACHABLE = 28, + STATUS_ASSOC_DENIED_NO_PCO = 29, + STATUS_ASSOC_REJECTED_TEMPORARILY = 30, + STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, + STATUS_UNSPECIFIED_QOS_FAILURE = 32, + STATUS_DENIED_INSUFFICIENT_BANDWIDTH = 33, + STATUS_DENIED_POOR_CHANNEL_CONDITIONS = 34, + STATUS_DENIED_QOS_NOT_SUPPORTED = 35, + STATUS_REQUEST_DECLINED = 37, + STATUS_INVALID_PARAMETERS = 38, + STATUS_REJECTED_WITH_SUGGESTED_CHANGES = 39, + STATUS_INVALID_IE = 40, + STATUS_GROUP_CIPHER_NOT_VALID = 41, + STATUS_PAIRWISE_CIPHER_NOT_VALID = 42, + STATUS_AKMP_NOT_VALID = 43, + STATUS_UNSUPPORTED_RSN_IE_VERSION = 44, + STATUS_INVALID_RSN_IE_CAPAB = 45, + STATUS_CIPHER_REJECTED_PER_POLICY = 46, + STATUS_TS_NOT_CREATED = 47, + STATUS_DIRECT_LINK_NOT_ALLOWED = 48, + STATUS_DEST_STA_NOT_PRESENT = 49, + STATUS_DEST_STA_NOT_QOS_STA = 50, + STATUS_ASSOC_DENIED_LISTEN_INT_TOO_LARGE = 51, + STATUS_INVALID_FT_ACTION_FRAME_COUNT = 52, + STATUS_INVALID_PMKID = 53, + + /* Error STATUS code for intenal usage*/ + STATUS_PROP_START = 65528, + STATUS_NO_NETWORK_FOUND = 65528, + STATUS_AUTH_TX_FAIL = 65529, + STATUS_AUTH_NO_ACK_RECEIVED = 65530, + STATUS_AUTH_NO_RESP_RECEIVED = 65531, + STATUS_ASSOC_TX_FAIL = 65532, + STATUS_ASSOC_NO_ACK_RECEIVED = 65533, + STATUS_ASSOC_NO_RESP_RECEIVED = 65534, +}; + +#define WLAN_OUI_SIZE 4 +#define WLAN_MAX_CIPHER 6 +#define WLAN_RSN_SELECTOR_LEN 4 +#define WLAN_WPA_SELECTOR_LEN 4 +#define PMKID_LEN 16 +#define MAX_PMK_LEN 64 +#define MAX_PMKID 4 + +#define WLAN_WPA_OUI 0xf25000 +#define WLAN_WPA_OUI_TYPE 0x01 +#define WPA_VERSION 1 +#define WLAN_WPA_SEL(x) (((x) << 24) | WLAN_WPA_OUI) + +#define WLAN_RSN_OUI 0xac0f00 +#define WLAN_CCKM_OUI 0x964000 +#define WLAN_CCKM_ASE_UNSPEC 0 +#define WLAN_WPA_CCKM_AKM 0x00964000 +#define WLAN_RSN_CCKM_AKM 0x00964000 +#define WLAN_RSN_DPP_AKM 0x029A6F50 +#define WLAN_RSN_OSEN_AKM 0x019A6F50 + +#define RSN_VERSION 1 +#define WLAN_RSN_SEL(x) (((x) << 24) | WLAN_RSN_OUI) +#define WLAN_CCKM_SEL(x) (((x) << 24) | WLAN_CCKM_OUI) + +#define WLAN_CSE_NONE 0x00 +#define WLAN_CSE_WEP40 0x01 +#define WLAN_CSE_TKIP 0x02 +#define WLAN_CSE_RESERVED 0x03 +#define WLAN_CSE_CCMP 0x04 +#define WLAN_CSE_WEP104 0x05 +#define WLAN_CSE_AES_CMAC 0x06 +#define WLAN_CSE_GCMP_128 0x08 +#define WLAN_CSE_GCMP_256 0x09 +#define WLAN_CSE_CCMP_256 0x0A +#define WLAN_CSE_BIP_GMAC_128 0x0B +#define WLAN_CSE_BIP_GMAC_256 0x0C +#define WLAN_CSE_BIP_CMAC_256 0x0D + +#define WLAN_AKM_IEEE8021X 0x01 +#define WLAN_AKM_PSK 0x02 +#define WLAN_AKM_FT_IEEE8021X 0x03 +#define WLAN_AKM_FT_PSK 0x04 +#define WLAN_AKM_SHA256_IEEE8021X 0x05 +#define WLAN_AKM_SHA256_PSK 0x06 +#define WLAN_AKM_SAE 0x08 +#define WLAN_AKM_FT_SAE 0x09 +#define WLAN_AKM_SUITEB_EAP_SHA256 0x0B +#define WLAN_AKM_SUITEB_EAP_SHA384 0x0C +#define WLAN_AKM_FT_SUITEB_EAP_SHA384 0x0D +#define WLAN_AKM_FILS_SHA256 0x0E +#define WLAN_AKM_FILS_SHA384 0x0F +#define WLAN_AKM_FILS_FT_SHA256 0x10 +#define WLAN_AKM_FILS_FT_SHA384 0x11 +#define WLAN_AKM_OWE 0x12 + +#define WLAN_ASE_NONE 0x00 +#define WLAN_ASE_8021X_UNSPEC 0x01 +#define WLAN_ASE_8021X_PSK 0x02 +#define WLAN_ASE_FT_IEEE8021X 0x20 +#define WLAN_ASE_FT_PSK 0x40 +#define WLAN_ASE_SHA256_IEEE8021X 0x80 +#define WLAN_ASE_SHA256_PSK 0x100 +#define WLAN_ASE_WPS 0x200 + +#define RSN_CAP_MFP_CAPABLE 0x80 +#define RSN_CAP_MFP_REQUIRED 0x40 + +/** + * struct wlan_rsn_ie_hdr: rsn ie header + * @elem_id: RSN element id WLAN_ELEMID_RSN. + * @len: rsn ie length + * @version: RSN ver + */ +struct wlan_rsn_ie_hdr { + u8 elem_id; + u8 len; + u8 version[2]; +}; + +#define WLAN_RSN_IE_MIN_LEN 2 + +/** + * struct wlan_rsn_ie: rsn ie info + * @ver: RSN ver + * @gp_cipher_suite: group cipher + * @pwise_cipher_count: number of pw cipher + * @pwise_cipher_suites: pair wise cipher list + * @akm_suite_count: Number of akm suite + * @akm_suites: akm suites list + * @cap: RSN capability + * @pmkid_count: number of PMKID + * @pmkid: PMKID list + * @mgmt_cipher_suite: management (11w) cipher suite + */ +struct wlan_rsn_ie { + uint16_t ver; + uint32_t gp_cipher_suite; + uint16_t pwise_cipher_count; + uint32_t pwise_cipher_suites[WLAN_MAX_CIPHER]; + uint16_t akm_suite_count; + uint32_t akm_suites[WLAN_MAX_CIPHER]; + uint16_t cap; + uint16_t pmkid_count; + uint8_t pmkid[MAX_PMKID][PMKID_LEN]; + uint32_t mgmt_cipher_suite; +}; + +#define WLAN_WAPI_IE_MIN_LEN 20 + +/** + * struct wlan_wpa_ie_hdr: wpa ie header + * @elem_id: Wpa element id, vender specific. + * @len: wpa ie length + * @oui: 24-bit OUI followed by 8-bit OUI type + * @version: wpa ver + */ +struct wlan_wpa_ie_hdr { + u8 elem_id; + u8 len; + u8 oui[4]; + u8 version[2]; +}; + +/** + * struct wlan_wpa_ie: WPA ie info + * @ver: WPA ver + * @mc_cipher: multicast cipher + * @uc_cipher_count: number of unicast cipher + * @uc_ciphers: unicast cipher list + * @auth_suite_count: Number of akm suite + * @auth_suites: akm suites list + * @cap: WPA capability + */ +struct wlan_wpa_ie { + uint16_t ver; + uint32_t mc_cipher; + uint16_t uc_cipher_count; + uint32_t uc_ciphers[WLAN_MAX_CIPHER]; + uint16_t auth_suite_count; + uint32_t auth_suites[WLAN_MAX_CIPHER]; + uint16_t cap; +}; + +#define WAPI_VERSION 1 +#define WLAN_WAPI_OUI 0x721400 + +#define WLAN_WAPI_SEL(x) (((x) << 24) | WLAN_WAPI_OUI) + +#define WLAN_WAI_CERT_OR_SMS4 0x01 +#define WLAN_WAI_PSK 0x02 + +/** + * struct wlan_wapi_ie: WAPI ie info + * @ver: WAPI ver + * @akm_suite_count: Number of akm suite + * @akm_suites: akm suites list + * @uc_cipher_suites:unicast cipher count + * @uc_cipher_suites: unicast cipher suite + * @mc_cipher_suite: mc cipher suite + */ +struct wlan_wapi_ie { + uint16_t ver; + uint16_t akm_suite_count; + uint32_t akm_suites[WLAN_MAX_CIPHER]; + uint16_t uc_cipher_count; + uint32_t uc_cipher_suites[WLAN_MAX_CIPHER]; + uint32_t mc_cipher_suite; +}; + +/** + * struct wlan_frame_hdr: generic IEEE 802.11 frames + * @i_fc: frame control + * @i_dur: duration field + * @i_addr1: mac address 1 + * @i_addr2: mac address 2 + * @i_addr3: mac address 3 + * @i_seq: seq info + */ +struct wlan_frame_hdr { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + union { + struct { + uint8_t i_addr1[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr2[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr3[QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_addr_all[3 * QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_seq[2]; +} qdf_packed; + +struct wlan_frame_hdr_qos { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + union { + struct { + uint8_t i_addr1[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr2[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr3[QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_addr_all[3 * QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_seq[2]; + uint8_t i_qos[2]; +} qdf_packed; + +struct wlan_frame_hdr_qos_addr4 { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + union { + struct { + uint8_t i_addr1[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr2[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr3[QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_addr_all[3 * QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_seq[2]; + uint8_t i_addr4[QDF_MAC_ADDR_SIZE]; + uint8_t i_qos[2]; +} qdf_packed; + +/* sequence number offset base on begin of mac header */ +#define WLAN_SEQ_CTL_OFFSET 22 +#define WLAN_LOW_SEQ_NUM_MASK 0x000F +#define WLAN_HIGH_SEQ_NUM_MASK 0x0FF0 +#define WLAN_HIGH_SEQ_NUM_OFFSET 4 + +/** + * struct wlan_seq_ctl: sequence number control + * @frag_num: frag number + * @seq_num_lo: sequence number low byte + * @seq_num_hi: sequence number high byte + */ +struct wlan_seq_ctl { + uint8_t frag_num:4; + uint8_t seq_num_lo:4; + uint8_t seq_num_hi:8; +} qdf_packed; + +/** + * union wlan_capability : wlan_capability info + * @value: capability value + */ +union wlan_capability { + struct caps { + uint16_t ess:1; + uint16_t ibss:1; + uint16_t cf_pollable:1; + uint16_t cf_poll_request:1; + uint16_t privacy:1; + uint16_t short_preamble:1; + uint16_t pbcc:1; + uint16_t channel_agility:1; + uint16_t spectrum_management:1; + uint16_t qos:1; + uint16_t short_slot_time:1; + uint16_t apsd:1; + uint16_t reserved2:1; + uint16_t dsss_ofdm:1; + uint16_t del_block_ack:1; + uint16_t immed_block_ack:1; + } wlan_caps; + uint16_t value; +} qdf_packed; + +/** + * struct ie_header : IE header + * @ie_id: Element Id + * @ie_len: IE Length + */ +struct ie_header { + uint8_t ie_id; + uint8_t ie_len; +} qdf_packed; + +/** + * struct extn_ie_header : Extension IE header + * @ie_id: Element Id + * @ie_len: IE Length + * @ie_extn_id: extension id + */ +struct extn_ie_header { + uint8_t ie_id; + uint8_t ie_len; + uint8_t ie_extn_id; +} qdf_packed; + + +/** + * struct ie_ssid : ssid IE + * @ssid_id: SSID Element Id + * @ssid_len: SSID IE Length + * @ssid: ssid value + */ +struct ie_ssid { + uint8_t ssid_id; + uint8_t ssid_len; + uint8_t ssid[WLAN_SSID_MAX_LEN]; +} qdf_packed; + +/** + * struct ds_ie : ds IE + * @ie: DS Element Id + * @len: DS IE Length + * @cur_chan: channel info + */ +struct ds_ie { + uint8_t ie; + uint8_t len; + uint8_t cur_chan; +} qdf_packed; + +/** + * struct erp_ie: ERP IE + * @ie: ERP Element Id + * @len: ERP IE Length + * @value: EP Info + */ +struct erp_ie { + uint8_t ie; + uint8_t len; + uint8_t value; +} qdf_packed; + +/** + * struct htcap_cmn_ie: HT common IE info + * @hc_cap: HT capabilities + * @ampdu_param: ampdu params + * @mcsset: supported MCS set + * @extcap: extended HT capabilities + * @txbf_cap: txbf capabilities + * @antenna: antenna capabilities + */ +struct htcap_cmn_ie { + uint16_t hc_cap; + uint8_t ampdu_param; + uint8_t mcsset[16]; + uint16_t extcap; + uint32_t txbf_cap; + uint8_t antenna; +} qdf_packed; + +/** + * struct htcap_ie: HT Capability IE + * @id: HT IE + * @len: HT IE LEN + * @ie: HT cap info + */ +struct htcap_ie { + uint8_t id; + uint8_t len; + struct htcap_cmn_ie ie; +} qdf_packed; + +/** + * struct fils_indication_ie: FILS indication IE element + * @id: id + * @len: len + * @public_key_identifiers_cnt: public key identifiers count + * @realm_identifiers_cnt: realm identifiers count + * @is_ip_config_supported: whether ip config is supported in AP + * @is_cache_id_present: whether cache identifier is present + * @is_hessid_present: whether hessid is present + * @is_fils_sk_auth_supported: FILS shared key authentication is supported + * @is_fils_sk_auth_pfs_supported: FILS shared key auth with PFS is supported + * @is_pk_auth_supported: FILS public key authentication is supported + * @reserved: reserved + * @variable_data: pointer to data depends on initial variables + */ +struct fils_indication_ie { + uint8_t id; + uint8_t len; + uint16_t public_key_identifiers_cnt:3; + uint16_t realm_identifiers_cnt:3; + uint16_t is_ip_config_supported:1; + uint16_t is_cache_id_present:1; + uint16_t is_hessid_present:1; + uint16_t is_fils_sk_auth_supported:1; + uint16_t is_fils_sk_auth_pfs_supported:1; + uint16_t is_pk_auth_supported:1; + uint16_t reserved:4; + uint8_t variable_data[253]; +} qdf_packed; + +#define WLAN_VENDOR_HT_IE_OFFSET_LEN 4 + +/** + * struct wlan_vendor_ie_htcap: vendor private HT Capability IE + * @id: HT IE + * @hlen: HT IE len + * @oui: vendor OUI + * @oui_type: Oui type + * @ie: HT cap info + */ +struct wlan_vendor_ie_htcap { + uint8_t id; + uint8_t hlen; + uint8_t oui[3]; + uint8_t oui_type; + struct htcap_cmn_ie ie; +} qdf_packed; + +/** + * struct wlan_ie_htinfo_cmn: ht info comman + * @hi_ctrlchannel: control channel + * @hi_extchoff: B0-1 extension channel offset + * @hi_txchwidth: B2 recommended xmiss width set + * @hi_rifsmode: rifs mode + * @hi_ctrlaccess: controlled access only + * @hi_serviceinterval: B5-7 svc interval granularity + * @uhi_opmode: B0-1 operating mode + * @hi_nongfpresent: B2 non greenfield devices present + * @hi_txburstlimit: B3 transmit burst limit + * @hi_obssnonhtpresent: B4 OBSS non-HT STA present + * @hi_reserved0: B5-15 reserved + * @hi_reserved2: B0-5 reserved + * @hi_dualbeacon: B6 dual beacon + * @hi_dualctsprot: B7 dual CTS protection + * @hi_stbcbeacon: B8 STBC beacon + * @hi_lsigtxopprot: B9 l-sig txop protection full support + * @hi_pcoactive: B10 pco active + * @hi_pcophase: B11 pco phase + * @hi_reserved1: B12-15 reserved + * @hi_basicmcsset[16]: basic MCS set + */ +struct wlan_ie_htinfo_cmn { + uint8_t hi_ctrlchannel; + uint8_t hi_extchoff:2, + hi_txchwidth:1, + hi_rifsmode:1, + hi_ctrlaccess:1, + hi_serviceinterval:3; + uint16_t hi_opmode:2, + hi_nongfpresent:1, + hi_txburstlimit:1, + hi_obssnonhtpresent:1, + hi_reserved0:11; + uint16_t hi_reserved2:6, + hi_dualbeacon:1, + hi_dualctsprot:1, + hi_stbcbeacon:1, + hi_lsigtxopprot:1, + hi_pcoactive:1, + hi_pcophase:1, + hi_reserved1:4; + uint8_t hi_basicmcsset[16]; +} qdf_packed; + +/** + * struct wlan_ie_htinfo: HT info IE + * @hi_id: HT info IE + * @hi_len: HT info IE len + * @hi_ie: HT info info + */ +struct wlan_ie_htinfo { + uint8_t hi_id; + uint8_t hi_len; + struct wlan_ie_htinfo_cmn hi_ie; +} qdf_packed; + +/** + * struct wlan_ie_htinfo: vendor private HT info IE + * @hi_id: HT info IE + * @hi_len: HT info IE len + * @hi_oui: vendor OUI + * @hi_ouitype: Oui type + * @hi_ie: HT info info + */ +struct wlan_vendor_ie_htinfo { + uint8_t hi_id; + uint8_t hi_len; + uint8_t hi_oui[3]; + uint8_t hi_ouitype; + struct wlan_ie_htinfo_cmn hi_ie; +} qdf_packed; + +#define WLAN_VENDOR_VHTCAP_IE_OFFSET 7 +#define WLAN_VENDOR_VHTOP_IE_OFFSET 21 + +/** + * struct wlan_ie_vhtcaps - VHT capabilities + * @elem_id: VHT caps IE + * @elem_len: VHT caps IE len + * @max_mpdu_len: MPDU length + * @supported_channel_widthset: channel width set + * @ldpc_coding: LDPC coding capability + * @shortgi80: short GI 80 support + * @shortgi160and80plus80: short Gi 160 & 80+80 support + * @tx_stbc; Tx STBC cap + * @tx_stbc: Rx STBC cap + * @su_beam_former: SU beam former cap + * @su_beam_formee: SU beam formee cap + * @csnof_beamformer_antSup: Antenna support for beamforming + * @num_soundingdim: Sound dimensions + * @mu_beam_former: MU beam former cap + * @mu_beam_formee: MU beam formee cap + * @vht_txops: TXOP power save + * @htc_vhtcap: HTC VHT capability + * @max_ampdu_lenexp: AMPDU length + * @vht_link_adapt: VHT link adapatation capable + * @rx_antpattern: Rx Antenna pattern + * @tx_antpattern: Tx Antenna pattern + * @rx_mcs_map: RX MCS map + * @rx_high_sup_data_rate : highest RX supported data rate + * @tx_mcs_map: TX MCS map + * @tx_sup_data_rate: highest TX supported data rate + */ +struct wlan_ie_vhtcaps { + uint8_t elem_id; + uint8_t elem_len; + uint32_t max_mpdu_len:2; + uint32_t supported_channel_widthset:2; + uint32_t ldpc_coding:1; + uint32_t shortgi80:1; + uint32_t shortgi160and80plus80:1; + uint32_t tx_stbc:1; + uint32_t rx_stbc:3; + uint32_t su_beam_former:1; + uint32_t su_beam_formee:1; + uint32_t csnof_beamformer_antSup:3; + uint32_t num_soundingdim:3; + uint32_t mu_beam_former:1; + uint32_t mu_beam_formee:1; + uint32_t vht_txops:1; + uint32_t htc_vhtcap:1; + uint32_t max_ampdu_lenexp:3; + uint32_t vht_link_adapt:2; + uint32_t rx_antpattern:1; + uint32_t tx_antpattern:1; + uint32_t unused:2; + uint16_t rx_mcs_map; + uint16_t rx_high_sup_data_rate:13; + uint16_t reserved2:3; + uint16_t tx_mcs_map; + uint16_t tx_sup_data_rate:13; + uint16_t reserved3:3; +} qdf_packed; + +/** + * struct wlan_ie_vhtop: VHT op IE + * @elem_id: VHT op IE + * @elem_len: VHT op IE len + * @vht_op_chwidth: BSS Operational Channel width + * @vht_op_ch_freq_seg1: Channel Center frequency + * @vht_op_ch_freq_seg2: Channel Center frequency for 80+80MHz + * @vhtop_basic_mcs_set: Basic MCS set + */ +struct wlan_ie_vhtop { + uint8_t elem_id; + uint8_t elem_len; + uint8_t vht_op_chwidth; + uint8_t vht_op_ch_freq_seg1; + uint8_t vht_op_ch_freq_seg2; + uint16_t vhtop_basic_mcs_set; +} qdf_packed; + +/** + * struct he_oper_6g_param: 6 Ghz params for HE + * @primary_channel: HE 6GHz Primary channel number + * @width: HE 6GHz BSS Channel Width + * @duplicate_beacon: HE 6GHz Duplicate beacon field + * @reserved: Reserved bits + * @chan_freq_seg0: HE 6GHz Channel Centre Frequency Segment 0 + * @chan_freq_seg1: HE 6GHz Channel Centre Frequency Segment 1 + * @minimum_rate: HE 6GHz Minimum Rate + */ +struct he_oper_6g_param { + uint8_t primary_channel; + uint8_t width:2, + duplicate_beacon:1, + reserved:5; + uint8_t chan_freq_seg0; + uint8_t chan_freq_seg1; + uint8_t minimum_rate; +} qdf_packed; + +/** + * struct wlan_country_ie: country IE + * @ie: country IE + * @len: IE len + * @cc: country code + */ +struct wlan_country_ie { + uint8_t ie; + uint8_t len; + uint8_t cc[3]; +} qdf_packed; + +/** + * struct wlan_country_ie: country IE + * @ie: QBSS IE + * @len: IE len + * @station_count: number of station associated + * @qbss_chan_load: qbss channel load + * @qbss_load_avail: qbss_load_avail + */ +struct qbss_load_ie { + uint8_t ie; + uint8_t len; + uint16_t station_count; + uint8_t qbss_chan_load; + uint16_t qbss_load_avail; +} qdf_packed; + +/** + * struct wlan_bcn_frame: beacon frame fixed params + * @timestamp: the value of sender's TSFTIMER + * @beacon_interval: beacon interval + * @capability: capability + * @ie: variable IE + */ +struct wlan_bcn_frame { + uint8_t timestamp[8]; + uint16_t beacon_interval; + union wlan_capability capability; + struct ie_header ie; +} qdf_packed; + +#define WLAN_TIM_IE_MIN_LENGTH 4 + +/** + * struct wlan_tim_ie: tim IE + * @tim_ie: Time IE + * @tim_len: TIM IE len + * @tim_count: dtim count + * @tim_period: dtim period + * @tim_bitctl: bitmap control + * @tim_bitmap: variable length bitmap + */ +struct wlan_tim_ie { + uint8_t tim_ie; /* WLAN_ELEMID_TIM */ + uint8_t tim_len; + uint8_t tim_count; /* DTIM count */ + uint8_t tim_period; /* DTIM period */ + uint8_t tim_bitctl; /* bitmap control */ + uint8_t tim_bitmap[251]; /* variable-length bitmap */ +} qdf_packed; + +/** + * struct rsn_mdie: mobility domain IE + * @rsn_id: RSN IE id + * @rsn_len: RSN IE len + * @mobility_domain: mobility domain info + * @ft_capab: ft capability + * + * Reference 9.4.2.47 Mobility Domain element (MDE) of 802.11-2016 + */ +struct rsn_mdie { + uint8_t rsn_id; + uint8_t rsn_len; + uint8_t mobility_domain[2]; + uint8_t ft_capab; +} qdf_packed; + +/** + * struct srp_ie: Spatial reuse parameter IE + * @srp_id: SRP IE id + * @srp_len: SRP IE len + * @srp_id_extn: SRP Extension ID + * @sr_control: sr control + * @non_srg_obsspd_max_offset: non srg obsspd max offset + * @srg_obss_pd_min_offset: srg obss pd min offset + * @srg_obss_pd_max_offset: srg obss pd max offset + * @srg_bss_color_bitmap: srg bss color bitmap + * @srg_partial_bssid_bitmap: srg partial bssid bitmap + */ +struct wlan_srp_ie { + uint8_t srp_id; + uint8_t srp_len; + uint8_t srp_id_extn; + uint8_t sr_control; + union { + struct { + uint8_t non_srg_obsspd_max_offset; + uint8_t srg_obss_pd_min_offset; + uint8_t srg_obss_pd_max_offset; + uint8_t srg_bss_color_bitmap[8]; + uint8_t srg_partial_bssid_bitmap[8]; + } qdf_packed nonsrg_srg_info; + struct { + uint8_t non_srg_obsspd_max_offset; + } qdf_packed nonsrg_info; + struct { + uint8_t srg_obss_pd_min_offset; + uint8_t srg_obss_pd_max_offset; + uint8_t srg_bss_color_bitmap[8]; + uint8_t srg_partial_bssid_bitmap[8]; + } qdf_packed srg_info; + }; +} qdf_packed; + +#define ESP_INFORMATION_LIST_LENGTH 3 +#define MAX_ESP_INFORMATION_FIELD 4 +/* + * enum access_category: tells about access category in ESP paramameter + * @ESP_AC_BK: ESP access category for background + * @ESP_AC_BE: ESP access category for best effort + * @ESP_AC_VI: ESP access category for video + * @ESP_AC_VO: ESP access category for Voice + */ +enum access_category { + ESP_AC_BK, + ESP_AC_BE, + ESP_AC_VI, + ESP_AC_VO, + +}; +/* + * struct wlan_esp_info: structure for Esp information parameter + * @access_category: access category info + * @reserved: reserved + * @data_format: two bits in length and tells about data format + * i.e. 0 = No aggregation is expected to be performed for MSDUs or MPDUs with + * the Type subfield equal to Data for the corresponding AC + * 1 = A-MSDU aggregation is expected to be performed for MSDUs for the + * corresponding AC, but A-MPDU aggregation is not expected to be performed + * for MPDUs with the Type subfield equal to Data for the corresponding AC + * 2 = A-MPDU aggregation is expected to be performed for MPDUs with the Type + * subfield equal to Data for the corresponding AC, but A-MSDU aggregation is + * not expected to be performed for MSDUs for the corresponding AC + * 3 = A-MSDU aggregation is expected to be performed for MSDUs for the + * corresponding AC and A-MPDU aggregation is expected to be performed for + * MPDUs with the Type subfield equal to Data for the corresponding AC + * @ba_window_size: BA Window Size subfield is three bits in length and + * indicates the size of the Block Ack window that is + * expected for the corresponding access category + * @estimated_air_fraction: Estimated Air Time Fraction subfield is 8 bits in + * length and contains an unsigned integer that represents + * the predicted percentage of time, linearly scaled with 255 representing + * 100%, that a new STA joining the + * BSS will be allocated for PPDUs that contain only + * MPDUs with the Type + * subfield equal to Data of the + * corresponding access category for that STA. + * @ppdu_duration: Data PPDU Duration Target field + * is 8 bits in length and is + * an unsigned integer that indicates the + * expected target duration of PPDUs that contain only MPDUs with the Type + * subfield equal to Data for the + * corresponding access category in units of 50 μs + */ +struct wlan_esp_info { + uint8_t access_category:2; + uint8_t reserved:1; + uint8_t data_format:2; + uint8_t ba_window_size:3; + uint8_t estimated_air_fraction; + uint8_t ppdu_duration; +}; + +/** + * struct wlan_esp_ie: struct for ESP information + * @esp_id: ESP IE id + * @esp_len: ESP IE len + * @esp_id_extn: ESP Extension ID + * @esp_info_AC_BK: ESP information related to BK category + * @esp_info_AC_BE: ESP information related to BE category + * @esp_info_AC_VI: ESP information related to VI category + * @esp_info_AC_VO: ESP information related to VO category + */ +struct wlan_esp_ie { + uint8_t esp_id; + uint8_t esp_len; + uint8_t esp_id_extn; + struct wlan_esp_info esp_info_AC_BK; + struct wlan_esp_info esp_info_AC_BE; + struct wlan_esp_info esp_info_AC_VI; + struct wlan_esp_info esp_info_AC_VO; +} qdf_packed; + +/** + * struct wlan_ext_cap_ie - struct for extended capabilities information + * @ext_cap_id: Extended capabilities id + * @ext_cap_len: Extended capabilities IE len + * @ext_caps: Variable length extended capabilities information + */ +struct wlan_ext_cap_ie { + uint8_t ext_cap_id; + uint8_t ext_cap_len; + uint8_t ext_caps[]; +} qdf_packed; + +/** + * struct oce_reduced_wan_metrics: struct for oce wan metrics + * @downlink_av_cap: Download available capacity + * @uplink_av_cap: Upload available capacity + */ +struct oce_reduced_wan_metrics { + uint8_t downlink_av_cap:4; + uint8_t uplink_av_cap:4; +}; + +/** + * is_wpa_oui() - If vendor IE is WPA type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WPA + * + * Return: true if its WPA IE + */ +static inline bool +is_wpa_oui(uint8_t *frm) +{ + return (frm[1] > 3) && (LE_READ_4(frm + 2) == + ((WLAN_WPA_OUI_TYPE << 24) | WLAN_WPA_OUI)); +} + +/** + * is_wps_oui() - If vendor IE is WPS type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WPS + * + * Return: true if its WPS IE + */ +static inline bool +is_wps_oui(const uint8_t *frm) +{ + return frm[1] > 3 && BE_READ_4(frm + 2) == WSC_OUI; +} + +/** + * is_mbo_oce_oui() - If vendor IE is MBO/OCE type + * @frm: vendor IE pointer + * + * API to check if vendor IE is MBO/OCE + * + * Return: true if its MBO/OCE IE + */ +static inline bool +is_mbo_oce_oui(const uint8_t *frm) +{ + return frm[1] > 3 && BE_READ_4(frm + 2) == MBO_OCE_OUI; +} + +/** + * is_wcn_oui() - If vendor IE is WCN type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WCN + * + * Return: true if its WCN IE + */ +static inline bool +is_wcn_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((WCN_OUI_TYPE << 24) | WCN_OUI)); +} + +/** + * is_wme_param() - If vendor IE is WME param type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WME param + * + * Return: true if its WME param IE + */ +static inline bool +is_wme_param(const uint8_t *frm) +{ + return (frm[1] > 5) && (LE_READ_4(frm + 2) == + ((WME_OUI_TYPE << 24) | WME_OUI)) && + (frm[6] == WME_PARAM_OUI_SUBTYPE); +} + +/** + * is_wme_info() - If vendor IE is WME info type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WME info + * + * Return: true if its WME info IE + */ +static inline bool +is_wme_info(const uint8_t *frm) +{ + return (frm[1] > 5) && (LE_READ_4(frm + 2) == + ((WME_OUI_TYPE << 24) | WME_OUI)) && + (frm[6] == WME_INFO_OUI_SUBTYPE); +} + +/** + * is_atheros_oui() - If vendor IE is Atheros type + * @frm: vendor IE pointer + * + * API to check if vendor IE is Atheros + * + * Return: true if its Atheros IE + */ +static inline bool +is_atheros_oui(const uint8_t *frm) +{ + return (frm[1] > 3) && LE_READ_4(frm + 2) == + ((ATH_OUI_TYPE << 24) | ATH_OUI); +} + +/** + * is_atheros_extcap_oui() - If vendor IE is Atheros ext cap + * @frm: vendor IE pointer + * + * API to check if vendor IE is Atheros ext cap + * + * Return: true if its Atheros ext cap IE + */ +static inline int +is_atheros_extcap_oui(uint8_t *frm) +{ + return (frm[1] > 3) && (LE_READ_4(frm + 2) == + ((ATH_OUI_EXTCAP_TYPE << 24) | ATH_OUI)); +} + +/** + * is_sfa_oui() - If vendor IE is SFA type + * @frm: vendor IE pointer + * + * API to check if vendor IE is SFA + * + * Return: true if its SFA IE + */ +static inline bool +is_sfa_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((SFA_OUI_TYPE << 24) | SFA_OUI)); +} + +/** + * is_p2p_oui() - If vendor IE is P2P type + * @frm: vendor IE pointer + * + * API to check if vendor IE is P2P + * + * Return: true if its P2P IE + */ +static inline bool +is_p2p_oui(const uint8_t *frm) +{ + const uint8_t wfa_oui[3] = P2P_WFA_OUI; + + return (frm[1] >= 4) && + (frm[2] == wfa_oui[0]) && + (frm[3] == wfa_oui[1]) && + (frm[4] == wfa_oui[2]) && + (frm[5] == P2P_WFA_VER); +} + +/** + * is_qca_son_oui() - If vendor IE is QCA WHC type + * @frm: vendor IE pointer + * @whc_subtype: subtype + * + * API to check if vendor IE is QCA WHC + * + * Return: true if its QCA WHC IE + */ +static inline bool +is_qca_son_oui(uint8_t *frm, uint8_t whc_subtype) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((QCA_OUI_WHC_TYPE << 24) | QCA_OUI)) && + (*(frm + 6) == whc_subtype); +} + +/** + * is_ht_cap() - If vendor IE is vendor HT cap type + * @frm: vendor IE pointer + * + * API to check if vendor IE is vendor HT cap + * + * Return: true if its vendor HT cap IE + */ +static inline bool +is_ht_cap(uint8_t *frm) +{ + return (frm[1] > 3) && (BE_READ_4(frm + 2) == + ((VENDOR_HT_OUI << 8) | VENDOR_HT_CAP_ID)); +} + +/** + * is_ht_info() - If vendor IE is vendor HT info type + * @frm: vendor IE pointer + * + * API to check if vendor IE is vendor HT info + * + * Return: true if its vendor HT info IE + */ +static inline bool +is_ht_info(uint8_t *frm) +{ + return (frm[1] > 3) && (BE_READ_4(frm + 2) == + ((VENDOR_HT_OUI << 8) | VENDOR_HT_INFO_ID)); +} + +/** + * is_interop_vht() - If vendor IE is VHT interop + * @frm: vendor IE pointer + * + * API to check if vendor IE is VHT interop + * + * Return: true if its VHT interop IE + */ +static inline bool +is_interop_vht(uint8_t *frm) +{ + return (frm[1] > 12) && (BE_READ_4(frm + 2) == + ((VHT_INTEROP_OUI << 8) | VHT_INTEROP_TYPE)) && + ((*(frm + 6) == VHT_INTEROP_OUI_SUBTYPE) || + (*(frm + 6) == VHT_INTEROP_OUI_SUBTYPE_VENDORSPEC)); +} + +/** + * is_bwnss_oui() - If vendor IE is BW NSS type + * @frm: vendor IE pointer + * + * API to check if vendor IE is BW NSS + * + * Return: true if its BW NSS IE + */ +static inline bool +is_bwnss_oui(uint8_t *frm) +{ + return (frm[1] > 3) && (LE_READ_4(frm + 2) == + ((ATH_OUI_BW_NSS_MAP_TYPE << 24) | ATH_OUI)); +} + +#define WLAN_BWNSS_MAP_OFFSET 6 + +/** + * is_he_cap_oui() - If vendor IE is HE CAP OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is HE CAP + * + * Return: true if its HE CAP IE + */ +static inline bool +is_he_cap_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((ATH_HE_CAP_SUBTYPE << 24) | ATH_HE_OUI)); +} + +/** + * is_he_op_oui() - If vendor IE is HE OP OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is HE OP OUT + * + * Return: true if its HE OP OUI + */ +static inline bool +is_he_op_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((ATH_HE_OP_SUBTYPE << 24) | ATH_HE_OUI)); +} + +/** + * is_extender_oui() - If vendor IE is EXTENDER OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is EXTENDER OUI + * + * Return: true if its EXTENDER OUI + */ +static inline bool +is_extender_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((QCA_OUI_EXTENDER_TYPE << 24) | QCA_OUI)); +} + +/** + * is_adaptive_11r_oui() - Function to check if vendor IE is ADAPTIVE 11R OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is ADAPTIVE 11R OUI + * + * Return: true if its ADAPTIVE 11r OUI + */ +static inline bool +is_adaptive_11r_oui(uint8_t *frm) +{ + return (frm[1] > OUI_LENGTH) && (LE_READ_4(frm + 2) == + ((ADAPTIVE_11R_OUI_TYPE << OUI_TYPE_BITS) | ADAPTIVE_11R_OUI)); +} + +/** + * is_sae_single_pmk_oui() - Fun to check if vendor IE is sae single pmk OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is sae single pmk OUI + * + * Return: true if its sae single pmk OUI + */ +static inline bool +is_sae_single_pmk_oui(uint8_t *frm) +{ + return (frm[1] > OUI_LENGTH) && (LE_READ_4(frm + 2) == + ((SAE_SINGLE_PMK_TYPE << OUI_TYPE_BITS) | SAE_SINGLE_PMK_OUI)); +} + +/** + * wlan_parse_rsn_ie() - parse rsn ie + * @rsn_ie: rsn ie ptr + * @rsn: out structure for the parsed ie + * + * API, function to parse rsn ie, if optional fields are not present use the + * default values defined by standard. + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS wlan_parse_rsn_ie(uint8_t *rsn_ie, + struct wlan_rsn_ie *rsn) +{ + uint8_t rsn_ie_len, i; + uint8_t *ie; + int rem_len; + const struct wlan_rsn_ie_hdr *hdr; + + if (!rsn_ie) + return QDF_STATUS_E_NULL_VALUE; + + ie = rsn_ie; + rsn_ie_len = ie[1] + 2; + + /* + * Check the length once for fixed parts: + * element id, len and version. Other, variable-length data, + * must be checked separately. + */ + if (rsn_ie_len < sizeof(struct wlan_rsn_ie_hdr)) + return QDF_STATUS_E_INVAL; + + hdr = (struct wlan_rsn_ie_hdr *) rsn_ie; + + if (hdr->elem_id != WLAN_ELEMID_RSN || + LE_READ_2(hdr->version) != RSN_VERSION) + return QDF_STATUS_E_INVAL; + + /* Set default values for optional field. */ + rsn->gp_cipher_suite = WLAN_RSN_SEL(WLAN_CSE_CCMP); + rsn->pwise_cipher_count = 1; + rsn->pwise_cipher_suites[0] = WLAN_RSN_SEL(WLAN_CSE_CCMP); + rsn->akm_suite_count = 1; + rsn->akm_suites[0] = WLAN_RSN_SEL(WLAN_AKM_IEEE8021X); + + rsn->ver = LE_READ_2(hdr->version); + + ie = (uint8_t *) (hdr + 1); + rem_len = rsn_ie_len - sizeof(*hdr); + + /* Check if optional group cipher is present */ + if (rem_len >= WLAN_RSN_SELECTOR_LEN) { + rsn->gp_cipher_suite = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } else if (rem_len > 0) { + /* RSN IE is invalid as group cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional pairwise cipher is present */ + if (rem_len >= 2) { + rsn->pwise_cipher_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (rsn->pwise_cipher_count == 0 || + rsn->pwise_cipher_count > WLAN_MAX_CIPHER || + rsn->pwise_cipher_count > rem_len / WLAN_RSN_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < rsn->pwise_cipher_count; i++) { + rsn->pwise_cipher_suites[i] = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* RSN IE is invalid as pairwise cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional akm suite is present */ + if (rem_len >= 2) { + rsn->akm_suite_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (rsn->akm_suite_count == 0 || + rsn->akm_suite_count > WLAN_MAX_CIPHER || + rsn->akm_suite_count > rem_len / WLAN_RSN_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < rsn->akm_suite_count; i++) { + rsn->akm_suites[i] = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* RSN IE is invalid as akm suite is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Update capabilty if present */ + if (rem_len >= 2) { + rsn->cap = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + } else if (rem_len == 1) { + /* RSN IE is invalid as cap field is truncated */ + return QDF_STATUS_E_INVAL; + } + + /* Update PMKID if present */ + if (rem_len >= 2) { + rsn->pmkid_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (rsn->pmkid_count > MAX_PMKID || + rsn->pmkid_count > (unsigned int)rem_len / PMKID_LEN) { + rsn->pmkid_count = 0; + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(rsn->pmkid, ie, + rsn->pmkid_count * PMKID_LEN); + ie += rsn->pmkid_count * PMKID_LEN; + rem_len -= rsn->pmkid_count * PMKID_LEN; + } else if (rem_len == 1) { + /* RSN IE is invalid as pmkid count field is truncated */ + return QDF_STATUS_E_INVAL; + } + + /* Update mgmt cipher if present */ + if (rem_len >= WLAN_RSN_SELECTOR_LEN) { + rsn->mgmt_cipher_suite = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } else if (rem_len > 0) { + /* RSN IE is invalid as mgmt cipher is truncated */ + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_parse_wpa_ie() - parse wpa ie + * @wpa_ie: wpa ie ptr + * @wpa: out structure for the parsed ie + * + * API, function to parse wpa ie, if optional fields are not present use the + * default values defined by standard. + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS wlan_parse_wpa_ie(uint8_t *wpa_ie, + struct wlan_wpa_ie *wpa) +{ + uint8_t wpa_ie_len, i; + uint8_t *ie; + int rem_len; + struct wlan_wpa_ie_hdr *hdr; + + if (!wpa_ie) + return QDF_STATUS_E_NULL_VALUE; + + ie = wpa_ie; + wpa_ie_len = ie[1] + 2; + + /* + * Check the length once for fixed parts: + * element id, len, oui and version. Other, variable-length data, + * must be checked separately. + */ + if (wpa_ie_len < sizeof(struct wlan_wpa_ie_hdr)) + return QDF_STATUS_E_INVAL; + + hdr = (struct wlan_wpa_ie_hdr *) wpa_ie; + + if (hdr->elem_id != WLAN_ELEMID_VENDOR || + !is_wpa_oui(wpa_ie) || + LE_READ_2(hdr->version) != WPA_VERSION) + return QDF_STATUS_E_INVAL; + + /* Set default values for optional field. */ + wpa->mc_cipher = WLAN_WPA_SEL(WLAN_CSE_TKIP); + wpa->uc_cipher_count = 1; + wpa->uc_ciphers[0] = WLAN_WPA_SEL(WLAN_CSE_TKIP); + wpa->auth_suite_count = 1; + wpa->auth_suites[0] = WLAN_WPA_SEL(WLAN_ASE_8021X_UNSPEC); + + wpa->ver = LE_READ_2(hdr->version); + ie = (uint8_t *) (hdr + 1); + rem_len = wpa_ie_len - sizeof(*hdr); + + /* Check if optional group cipher is present */ + if (rem_len >= WLAN_WPA_SELECTOR_LEN) { + wpa->mc_cipher = LE_READ_4(ie); + ie += WLAN_WPA_SELECTOR_LEN; + rem_len -= WLAN_WPA_SELECTOR_LEN; + } else if (rem_len > 0) { + /* WPA IE is invalid as group cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional pairwise cipher is present */ + if (rem_len >= 2) { + wpa->uc_cipher_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (wpa->uc_cipher_count == 0 || + wpa->uc_cipher_count > WLAN_MAX_CIPHER || + wpa->uc_cipher_count > rem_len / WLAN_WPA_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < wpa->uc_cipher_count; i++) { + wpa->uc_ciphers[i] = LE_READ_4(ie); + ie += WLAN_WPA_SELECTOR_LEN; + rem_len -= WLAN_WPA_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* WPA IE is invalid as pairwise cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional akm suite is present */ + if (rem_len >= 2) { + wpa->auth_suite_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (wpa->auth_suite_count == 0 || + wpa->auth_suite_count > WLAN_MAX_CIPHER || + wpa->auth_suite_count > rem_len / WLAN_WPA_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < wpa->auth_suite_count; i++) { + wpa->auth_suites[i] = LE_READ_4(ie); + ie += WLAN_WPA_SELECTOR_LEN; + rem_len -= WLAN_WPA_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* WPA IE is invalid as akm suite is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Update capabilty if optional capabilty is present */ + if (rem_len >= 2) { + wpa->cap = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_parse_wapi_ie() - parse wapi ie + * @wapi_ie: wpa ie ptr + * @wapi: out structure for the parsed IE + * + * API, function to parse wapi ie + * + * Return: void + */ +static inline void wlan_parse_wapi_ie(uint8_t *wapi_ie, + struct wlan_wapi_ie *wapi) +{ + uint8_t len, i; + uint8_t *ie; + + if (!wapi_ie) + return; + + ie = wapi_ie; + len = ie[1]; + /* + * Check the length once for fixed parts: OUI, type, + * version, mcast cipher, and 2 selector counts. + * Other, variable-length data, must be checked separately. + */ + if (len < 20) + return; + + ie += 2; + + wapi->ver = LE_READ_2(ie); + if (wapi->ver != WAPI_VERSION) + return; + + ie += 2; + len -= 2; + + /* akm */ + wapi->akm_suite_count = LE_READ_2(ie); + + ie += 2; + len -= 2; + + if ((wapi->akm_suite_count > WLAN_MAX_CIPHER) || + len < (wapi->akm_suite_count * WLAN_OUI_SIZE)) + return; + for (i = 0 ; i < wapi->akm_suite_count; i++) { + wapi->akm_suites[i] = LE_READ_4(ie); + ie += WLAN_OUI_SIZE; + len -= WLAN_OUI_SIZE; + } + + if (len < 2) + return; + wapi->uc_cipher_count = LE_READ_2(ie); + ie += 2; + len -= 2; + + if ((wapi->uc_cipher_count > WLAN_MAX_CIPHER) || + len < (wapi->uc_cipher_count * WLAN_OUI_SIZE + 2)) + return; + for (i = 0 ; i < wapi->uc_cipher_count; i++) { + wapi->uc_cipher_suites[i] = LE_READ_4(ie); + ie += WLAN_OUI_SIZE; + len -= WLAN_OUI_SIZE; + } + + if (len >= WLAN_OUI_SIZE) + wapi->mc_cipher_suite = LE_READ_4(ie); +} + +/** + * wlan_parse_oce_reduced_wan_metrics_ie() - parse oce wan metrics + * @mbo_oce_ie: MBO/OCE ie ptr + * @wan_metrics: out structure for the reduced wan metric + * + * API, function to parse reduced wan metric + * + * Return: true if oce wan metrics is present + */ +static inline bool +wlan_parse_oce_reduced_wan_metrics_ie(uint8_t *mbo_oce_ie, + struct oce_reduced_wan_metrics *wan_metrics) +{ + uint8_t len, attribute_len, attribute_id; + uint8_t *ie; + + if (!mbo_oce_ie) + return false; + + ie = mbo_oce_ie; + len = ie[1]; + ie += 2; + + if (len <= MBO_OCE_OUI_SIZE) + return false; + + ie += MBO_OCE_OUI_SIZE; + len -= MBO_OCE_OUI_SIZE; + + while (len > 2) { + attribute_id = ie[0]; + attribute_len = ie[1]; + len -= 2; + if (attribute_len > len) + return false; + + if (attribute_id == REDUCED_WAN_METRICS_ATTR) { + wan_metrics->downlink_av_cap = ie[2] & 0xff; + wan_metrics->uplink_av_cap = ie[2] >> 4; + return true; + } + + ie += (attribute_len + 2); + len -= attribute_len; + } + + return false; +} + +#endif /* _WLAN_CMN_IEEE80211_DEFS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_api.h new file mode 100644 index 0000000000000000000000000000000000000000..161fd0158c55b249e2a8b700c0d0e93f778412cc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_api.h @@ -0,0 +1,1017 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +#ifndef _WLAN_CRYPTO_GLOBAL_API_H_ +#define _WLAN_CRYPTO_GLOBAL_API_H_ + +#include "wlan_crypto_global_def.h" +/** + * wlan_crypto_set_vdev_param - called by ucfg to set crypto param + * @vdev: vdev + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_vdev_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param, + uint32_t value); + +/** + * wlan_crypto_set_peer_param - called by ucfg to set crypto param + * + * @peer: peer + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param, + uint32_t value); + +/** + * wlan_crypto_get_param - called by ucfg to get crypto param + * @vdev: vdev + * @param: param to be get. + * + * This function gets called from ucfg to get param + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param); +/** + * wlan_crypto_get_peer_param - called by ucfg to get crypto peer param + * @peer: peer + * @param: param to be get. + * + * This function gets called from ucfg to get peer param + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param); + +/** + * wlan_crypto_is_htallowed - called by ucfg to check if HT rates is allowed + * @vdev: Vdev + * @peer: Peer + * + * This function is called to check if HT rates is allowed + * + * Return: 0 for not allowed and +ve for allowed + */ +uint8_t wlan_crypto_is_htallowed(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); +/** + * wlan_crypto_setkey - called by ucfg to setkey + * @vdev: vdev + * @req_key: req_key with cipher type, key macaddress + * + * This function gets called from ucfg to sey key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_setkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key); + +/** + * wlan_crypto_getkey - called by ucfg to get key + * @vdev: vdev + * @req_key: key value will be copied in this req_key + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is requested. + * + * This function gets called from ucfg to get key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_getkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key, + uint8_t *mac_addr); + +/** + * wlan_crypto_delkey - called by ucfg to delete key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is deleted. + * @key_idx: key index to be deleted + * + * This function gets called from ucfg to delete key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_delkey(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx); + +/** + * wlan_crypto_default_key - called by ucfg to set default tx key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key need to made default. + * @key_idx: key index to be made as default key + * @unicast: is key was unicast or group key. + * + * This function gets called from ucfg to set default key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx, + bool unicast); + +/** + * wlan_crypto_encap - called by mgmt for encap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to encap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_encap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t encapdone); + +/** + * wlan_crypto_decap - called by mgmt for decap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the packet. + * + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_decap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t tid); + +/** + * wlan_crypto_enmic - called by mgmt for adding mic in frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to adding mic to the frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_enmic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t encapdone); + +/** + * wlan_crypto_demic - called by mgmt for remove and check mic for + * the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the frame + * @keyid: keyid in the received frame + * + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_demic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t tid, + uint8_t keyid); + +/** + * wlan_crypto_vdev_is_pmf_enabled - called to check is pmf enabled in vdev + * @vdev: vdev + * + * This function gets called to check is pmf enabled or not in vdev. + * + * Return: true or false + */ +bool wlan_crypto_vdev_is_pmf_enabled(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_crypto_vdev_is_pmf_required - called to check is pmf required in vdev + * @vdev: vdev + * + * This function gets called to check is pmf required or not in vdev. + * + * Return: true or false + */ +bool wlan_crypto_vdev_is_pmf_required(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_crypto_is_pmf_enabled - called by mgmt txrx to check is pmf enabled + * @vdev: vdev + * @peer: peer + * + * This function gets called by mgmt txrx to check is pmf enabled or not. + * + * Return: true or false + */ +bool wlan_crypto_is_pmf_enabled(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_is_key_valid - called by mgmt txrx to check if key is valid + * @vdev: vdev + * @peer: peer + * @keyidx : key index + * + * This function gets called by mgmt txrx to check if key is valid + * + * Return: true or false + */ +bool wlan_crypto_is_key_valid(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer, + uint16_t keyidx); + +/** + * wlan_crypto_add_mmie - called by mgmt txrx to add mmie in frame + * @vdev: vdev + * @frm: frame starting pointer + * @len: length of the frame + * + * This function gets called by mgmt txrx to add mmie in frame + * + * Return: end of frame or NULL in case failure + */ +uint8_t *wlan_crypto_add_mmie(struct wlan_objmgr_vdev *vdev, + uint8_t *frm, + uint32_t len); + +/** + * wlan_crypto_is_mmie_valid - called by mgmt txrx to check mmie of the frame + * @vdev: vdev + * @frm: frame starting pointer + * @efrm: end of frame pointer + * + * This function gets called by mgmt txrx to check mmie of the frame + * + * Return: true or false + */ +bool wlan_crypto_is_mmie_valid(struct wlan_objmgr_vdev *vdev, + uint8_t *frm, + uint8_t *efrm); + +/** + * wlan_crypto_wpaie_check - called by mlme to check the wpaie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wpa is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wpaie_check(struct wlan_crypto_params *, uint8_t *frm); + +/** + * wlan_crypto_rsnie_check - called by mlme to check the rsnie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of rsn is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_rsnie_check(struct wlan_crypto_params *, uint8_t *frm); +/** + * wlan_crypto_build_wpaie - called by mlme to build wpaie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wpaie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wpaie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf); + +/** + * wlan_crypto_build_rsnie_with_pmksa() - called by mlme to build rsnie + * @vdev: vdev + * @iebuf: ie buffer + * @pmksa: pmksa struct + * + * This function gets called by mlme to build rsnie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_rsnie_with_pmksa(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf, + struct wlan_crypto_pmksa *pmksa); + +/** + * wlan_crypto_build_rsnie - called by mlme to build rsnie + * @vdev: vdev + * @iebuf: ie buffer + * @bssid: bssid mac address to add pmkid in rsnie + * + * This function gets called by mlme to build rsnie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_rsnie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf, + struct qdf_mac_addr *bssid); + +/** + * wlan_crypto_wapiie_check - called by mlme to check the wapiie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wapi is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wapiie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm); + +/** + * wlan_crypto_build_wapiie - called by mlme to build wapi ie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wapi ie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wapiie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf); +/** + * wlan_crypto_rsn_info - check is given params matching with vdev params. + * @vdev: vdev + * @crypto params: crypto params + * + * This function gets called by mlme to check is given params matching with + * vdev params. + * + * Return: true success or false for failure. + */ +bool wlan_crypto_rsn_info(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_params *crypto_params); +/** + * wlan_crypto_pn_check - called by data patch for PN check + * @vdev: vdev + * @wbuf: wbuf + * + * This function gets called by data patch for PN check + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_pn_check(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf); +/** + * wlan_crypto_vdev_get_crypto_params - called by mlme to get crypto params + * @vdev:vdev + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_vdev_get_crypto_params( + struct wlan_objmgr_vdev *vdev); +/** + * wlan_crypto_peer_get_crypto_params - called by mlme to get crypto params + * @peer:peer + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_peer_get_crypto_params( + struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_set_peer_wep_keys - set wep keys into peer entries + * @vdev:vdev + * @peer:peer + * + * This function gets called by mlme, when auth frame is received. + * this helps in setting wep keys into peer data structure. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_set_peer_wep_keys(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_register_crypto_rx_ops - set crypto_rx_ops + * @crypto_rx_ops: crypto_rx_ops + * + * This function gets called by object manger to register crypto rx ops. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_register_crypto_rx_ops( + struct wlan_lmac_if_crypto_rx_ops *crypto_rx_ops); + +/** + * wlan_crypto_get_crypto_rx_ops - get crypto_rx_ops from psoc + * @psoc: psoc + * + * This function gets called by umac to get the crypto_rx_ops + * + * Return: crypto_rx_ops + */ +struct wlan_lmac_if_crypto_rx_ops *wlan_crypto_get_crypto_rx_ops( + struct wlan_objmgr_psoc *psoc); +/** + * wlan_crypto_vdev_has_auth_mode - check authmode for vdev + * @vdev: vdev + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_auth_mode(struct wlan_objmgr_vdev *vdev, + wlan_crypto_auth_mode authmode); + +/** + * wlan_crypto_peer_has_auth_mode - check authmode for peer + * @peer: peer + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_auth_mode(struct wlan_objmgr_peer *peer, + wlan_crypto_auth_mode authvalue); + +/** + * wlan_crypto_vdev_has_ucastcipher - check ucastcipher for vdev + * @vdev: vdev + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_ucastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type ucastcipher); + +/** + * wlan_crypto_peer_has_ucastcipher - check ucastcipher for peer + * @peer: peer + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_ucastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type ucastcipher); + + +/** + * wlan_crypto_vdev_has_mcastcipher - check mcastcipher for vdev + * @vdev: vdev + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_mcastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type mcastcipher); + +/** + * wlan_crypto_peer_has_mcastcipher - check mcastcipher for peer + * @peer: peer + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_mcastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type mcastcipher); + +/** + * wlan_crypto_vdev_has_mgmtcipher - check mgmtcipher for vdev + * @vdev: vdev + * @mgmtcipher: mgmtcipher to be checked + * + * This function checks any one of mgmtciphers are supported by vdev or not. + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_mgmtcipher(struct wlan_objmgr_vdev *vdev, + uint32_t mgmtcipher); + +/** + * wlan_crypto_peer_has_mgmtcipher - check mgmtcipher for peer + * @peer: peer + * @mgmtcipher: mgmtcipher to be checked + * + * This function checks any one of mgmtciphers are supported by peer or not. + * + * Return: true or false + */ +bool wlan_crypto_peer_has_mgmtcipher(struct wlan_objmgr_peer *peer, + uint32_t mgmtcipher); + +/** + * wlan_crypto_get_keytype - get keytype + * @key: key + * + * This function gets keytype from key + * + * Return: keytype + */ +wlan_crypto_cipher_type wlan_crypto_get_key_type( + struct wlan_crypto_key *key); + +/** + * wlan_crypto_vdev_getkey - get key from vdev + * @vdev: vdev + * @keyix: keyix + * + * This function gets key from vdev + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_vdev_getkey(struct wlan_objmgr_vdev *vdev, + uint16_t keyix); +/** + * wlan_crypto_peer_getkey - get key from peer + * @peer: peer + * @keyix: keyix + * + * This function gets key from peer + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_peer_getkey(struct wlan_objmgr_peer *peer, + uint16_t keyix); +/** + * wlan_crypto_get_peer_fils_aead - Get peer fils aead set flag + * @peer: Peer object + * + * This function returns the peer fils aead set flag value. + * + * Return: 1 for enabled, 0 for disabled + */ +uint8_t wlan_crypto_get_peer_fils_aead(struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_set_peer_fils_aead - Set peer fils aead set flag + * @peer: Peer object + * @value: Value to set the flag + * + * This function set the peer fils aead set flag once FILS AUTH received. + * + * Return: None + */ +void wlan_crypto_set_peer_fils_aead( + struct wlan_objmgr_peer *peer, uint8_t value); + +/** + * wlan_crypto_get_key_header - get header length + * @key: key + * + * This function gets header length based on keytype + * + * Return: header length + */ +uint8_t wlan_crypto_get_key_header(struct wlan_crypto_key *key); + +/** + * wlan_crypto_get_key_trailer - get cipher trailer length + * @key: key + * + * This function gets cipher trailer length based on keytype + * + * Return: cipher trailer length + */ +uint8_t wlan_crypto_get_key_trailer(struct wlan_crypto_key *key); + +/** + * wlan_crypto_get_key_miclen - get cipher miclen length + * @key: key + * + * This function gets cipher miclen length based on keytype + * + * Return: cipher miclen length + */ +uint8_t wlan_crypto_get_key_miclen(struct wlan_crypto_key *key); + +/** + * wlan_crypto_get_keyid - get keyid from frame + * @data: frame + * @hdrlen: 802.11 header length + * + * This function parse frame and returns keyid + * + * Return: keyid + */ +uint16_t wlan_crypto_get_keyid(uint8_t *data, int hdrlen); + +/** + * wlan_crypto_restore_keys - restore crypto keys in hw keycache + * @vdev: vdev + * + * This function restores keys in hw keycache + * + * Return: void + */ +void wlan_crypto_restore_keys(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_crypto_check_open_none - called by ucfg to check for open security + * @psoc: psoc pointer + * @vdev_id: vdev id + * + * This function gets called from ucfg to check open security. + * + * Return: true or false + */ +bool wlan_crypto_check_open_none(struct wlan_objmgr_psoc *psoc, + uint8_t vedv_id); + +/** + * wlan_crypto_check_wep - called by ucfg to check for WEP security + * @psoc: psoc pointer + * @vdev_id: vdev id + * + * This function gets called from ucfg to check WEP security. + * + * Return: true or false + */ +bool wlan_crypto_check_wep(struct wlan_objmgr_psoc *psoc, uint8_t vedv_id); + +/** + * wlan_crypto_check_rsn_match - called by ucfg to check for RSN match + * @psoc: psoc pointer + * @vdev_id: vdev id + * @ie_ptr: pointer to IEs + * @ie_len: IE length + * @peer_crypto_params: return peer crypto parameters + * + * This function gets called from ucfg to check RSN match. + * + * Return: true or false + */ +bool wlan_crypto_check_rsn_match(struct wlan_objmgr_psoc *psoc, + uint8_t vedv_id, uint8_t *ie_ptr, + uint16_t ie_len, struct wlan_crypto_params * + peer_crypto_params); + +/** + * wlan_crypto_check_rsn_match - called by ucfg to check for WPA match + * @psoc: psoc pointer + * @vdev_id: vdev id + * @ie_ptr: pointer to IEs + * @ie_len: IE length + * @peer_crypto_params: return peer crypto parameters + * + * This function gets called from ucfg to check WPA match. + * + * Return: true or false + */ +bool wlan_crypto_check_wpa_match(struct wlan_objmgr_psoc *psoc, + uint8_t vedv_id, uint8_t *ie_ptr, + uint16_t ie_len, struct wlan_crypto_params * + peer_crypto_params); + +/** + * wlan_crypto_parse_rsnxe_ie() - parse RSNXE IE + * @rsnxe_ie: RSNXE IE pointer + * @cap_len: pointer to hold len of ext capability + * + * Return: pointer to RSNXE capability or NULL + */ +uint8_t * +wlan_crypto_parse_rsnxe_ie(uint8_t *rsnxe_ie, uint8_t *cap_len); + +/** + * wlan_set_vdev_crypto_prarams_from_ie - Sets vdev crypto params from IE info + * @vdev: vdev pointer + * @ie_ptr: pointer to IE + * @ie_len: IE length + * + * This function gets called from ucfg to set crypto params from IE data. + * + * Return: QDF_STATUS_SUCCESS or error code + */ +QDF_STATUS wlan_set_vdev_crypto_prarams_from_ie(struct wlan_objmgr_vdev *vdev, + uint8_t *ie_ptr, + uint16_t ie_len); +#ifdef WLAN_CRYPTO_GCM_OS_DERIVATIVE +static inline int wlan_crypto_aes_gmac(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, + const uint8_t *aad, size_t aad_len, + uint8_t *tag) +{ + return 0; +} +#endif +#ifdef WLAN_CRYPTO_OMAC1_OS_DERIVATIVE +static inline int omac1_aes_128(const uint8_t *key, const uint8_t *data, + size_t data_len, uint8_t *mac) +{ + return 0; +} + +static inline int omac1_aes_256(const uint8_t *key, const uint8_t *data, + size_t data_len, uint8_t *mac) +{ + return 0; +} +#endif + +/** + * ucfg_crypto_set_key_req() - Set key request to UCFG + * @vdev: vdev object + * @req: key request information + * @key_type: indicates the type of key to be set, unicast or group key + * + * Return: None + */ +QDF_STATUS ucfg_crypto_set_key_req(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type); + +/** + * wlan_crypto_get_default_key_idx() - Get the default key index + * @vdev: vdev object + * @igtk: denotes if the request is for igtk key type or not + * + * Return: Index of the requested key + */ +int8_t wlan_crypto_get_default_key_idx(struct wlan_objmgr_vdev *vdev, + bool igtk); + +/** + * wlan_crypto_get_cipher() - Get the cipher type for the vdev + * @vdev: vdev object + * @pairwise: denotes if the request is for pairwise cipher or not + * @key_index: Index of the key whose cipher type has to be returned + * + * Return: enum wlan_crypto_cipher_type + */ +enum wlan_crypto_cipher_type +wlan_crypto_get_cipher(struct wlan_objmgr_vdev *vdev, + bool pairwise, uint8_t key_index); + +#ifdef CRYPTO_SET_KEY_CONVERGED +/** + * wlan_crypto_update_set_key_peer() - Update the peer for set key + * @vdev: vdev object + * @pairwise: denotes if the request is for pairwise cipher or not + * @key_index: Index of the key whose peer has to be set + * @peer_mac: MAC address of the peer + * + * Return: None + */ +void wlan_crypto_update_set_key_peer(struct wlan_objmgr_vdev *vdev, + bool pairwise, uint8_t key_index, + struct qdf_mac_addr *peer_mac); + +/** + * wlan_crypto_validate_key_params() - validates key parameters + * @cipher: cipher type + * @key_index: the index of the key + * @key_len: key length + * @seq_len: sequence counter length + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_validate_key_params(enum wlan_crypto_cipher_type cipher, + uint8_t key_index, uint8_t key_len, + uint8_t seq_len); + +/** + * wlan_crypto_save_key() - Allocate memory for storing key + * @vdev: vdev object + * @key_index: the index of the key that needs to be allocated + * @crypto_key: Pointer to crypto key + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_save_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + struct wlan_crypto_key *crypto_key); + +/** + * wlan_crypto_get_key() - Get the stored key information + * @vdev: vdev object + * @key_index: the index of the key that needs to be retrieved + * + * Return: Key material + */ +struct wlan_crypto_key *wlan_crypto_get_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index); + +/** + * wlan_crypto_set_key_req() - Set key request + * @vdev: vdev object + * @req: key request information + * @key_type: indicates the type of key to be set, unicast or group key + * + * Return: QDF status + */ +QDF_STATUS wlan_crypto_set_key_req(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type); + +/** + * wlan_crypto_free_vdev_key - Free keys for vdev + * @vdev: vdev object + * + * This function frees keys stored in vdev crypto object. + * + * Return: None + */ +void wlan_crypto_free_vdev_key(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_crypto_reset_vdev_params - Reset params for vdev + * @vdev: vdev object + * + * This function reset params stored in vdev crypto object. + * + * Return: None + */ +void wlan_crypto_reset_vdev_params(struct wlan_objmgr_vdev *vdev); +#else +static inline void wlan_crypto_update_set_key_peer( + struct wlan_objmgr_vdev *vdev, + bool pairwise, + uint8_t key_index, + struct qdf_mac_addr *peer_mac) +{ +} + +static inline QDF_STATUS +wlan_crypto_save_key(struct wlan_objmgr_vdev *vdev, uint8_t key_index, + struct wlan_crypto_key *crypto_key) +{ + return QDF_STATUS_SUCCESS; +} + +static inline struct wlan_crypto_key * +wlan_crypto_get_key(struct wlan_objmgr_vdev *vdev, uint8_t key_index) +{ + return NULL; +} + +static inline +QDF_STATUS wlan_crypto_set_key_req(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void wlan_crypto_free_vdev_key(struct wlan_objmgr_vdev *vdev) +{ +} + +static inline void wlan_crypto_reset_vdev_prarams(struct wlan_objmgr_vdev *vdev) +{ +} +#endif /* CRYPTO_SET_KEY_CONVERGED */ + +/** + * wlan_crypto_get_peer_pmksa() - called to get pmksa based on pmksa parameter + * @vdev: vdev + * @pmksa: bssid + * + * This function is to get pmksa based on pmksa parameter + * + * Return: wlan_crypto_pmksa when match found else NULL. + */ +struct wlan_crypto_pmksa * +wlan_crypto_get_peer_pmksa(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_pmksa *pmksa); + +/** + * wlan_crypto_get_pmksa - called to get pmksa of bssid passed. + * @vdev: vdev + * @bssid: bssid + * + * This function gets called from to get pmksa for the bssid. + * + * Return: wlan_crypto_pmksa when match found else NULL. + */ +struct wlan_crypto_pmksa * +wlan_crypto_get_pmksa(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *bssid); + +/** + * wlan_crypto_get_fils_pmksa - Get the PMKSA for FILS + * SSID, if the SSID and cache id matches + * @vdev: Pointer with VDEV object + * @cache_id: Cache id + * @ssid: Pointer to ssid + * @ssid_len: SSID length + * + * Return: PMKSA entry if the cache id and SSID matches + */ +struct wlan_crypto_pmksa * +wlan_crypto_get_fils_pmksa(struct wlan_objmgr_vdev *vdev, + uint8_t *cache_id, uint8_t *ssid, + uint8_t ssid_len); + +/** + * wlan_crypto_pmksa_flush - called to flush saved pmksa + * @crypto_params: crypto_params + * + * This function flush saved pmksa from crypto params. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_pmksa_flush(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_vdev_param - called by ucfg to set crypto param + * @vdev: vdev + * @pmksa: pmksa to be set/del. + * @set: set(set=1) or del(set=0) pmksa from the list. + * + * This function gets called from ucfg to set or del pmksa. + * when given pmksa is NULL and set is 0, it is for flush all entries. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_del_pmksa(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_pmksa *pmksa, + bool set); + +#if defined(WLAN_SAE_SINGLE_PMK) && defined(WLAN_FEATURE_ROAM_OFFLOAD) +/** + * wlan_crypto_selective_clear_sae_single_pmk_entries - Clear the PMK entries + * for BSS which have the single PMK flag set other than the current connected + * AP + * @vdev: Vdev + * @conn_bssid: Connected bssid + */ +void +wlan_crypto_selective_clear_sae_single_pmk_entries( + struct wlan_objmgr_vdev *vdev, struct qdf_mac_addr *conn_bssid); + +/** + * wlan_crypto_set_sae_single_pmk_bss_cap - Set the peer SAE sinlge pmk + * feature supported status + * @vdev: Vdev + * @bssid: BSSID for which the flag is to be set + * @single_pmk_capable_bss: Flag to indicate Sae single pmk supported BSSID or + * not + */ +void wlan_crypto_set_sae_single_pmk_bss_cap(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *bssid, + bool single_pmk_capable_bss); +#else +static inline void +wlan_crypto_selective_clear_sae_single_pmk_entries( + struct wlan_objmgr_vdev *vdev, struct qdf_mac_addr *conn_bssid) +{ +} + +static inline +void wlan_crypto_set_sae_single_pmk_bss_cap(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *bssid, + bool single_pmk_capable_bss) +{ +} +#endif + +#endif /* end of _WLAN_CRYPTO_GLOBAL_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_def.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_def.h new file mode 100644 index 0000000000000000000000000000000000000000..a5f489b530a6ec9087123e131761919ad35a3d7e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_def.h @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public definations for crypto service + */ + +#ifndef _WLAN_CRYPTO_GLOBAL_DEF_H_ +#define _WLAN_CRYPTO_GLOBAL_DEF_H_ + +#include +#ifdef WLAN_CRYPTO_SUPPORT_FILS +#include "wlan_crypto_fils_def.h" +#endif +#include +#include + +#define WLAN_CRYPTO_TID_SIZE (17) +#define WLAN_CRYPTO_RSC_SIZE (16) +#define WLAN_CRYPTO_KEYBUF_SIZE (32) +#define WLAN_CRYPTO_MICBUF_SIZE (16) +#define WLAN_CRYPTO_MIC_LEN (8) +#define WLAN_CRYPTO_IV_SIZE (16) +#define WLAN_CRYPTO_MIC256_LEN (16) +#define WLAN_CRYPTO_TXMIC_OFFSET (0) +#define WLAN_CRYPTO_RXMIC_OFFSET (WLAN_CRYPTO_TXMIC_OFFSET + \ + WLAN_CRYPTO_MIC_LEN) +#define WLAN_CRYPTO_WAPI_IV_SIZE (16) +#define WLAN_CRYPTO_CRC_LEN (4) +#define WLAN_CRYPTO_IV_LEN (3) +#define WLAN_CRYPTO_KEYID_LEN (1) +#define WLAN_CRYPTO_EXT_IV_LEN (4) +#define WLAN_CRYPTO_EXT_IV_BIT (0x20) +#define WLAN_CRYPTO_KEYIX_NONE ((uint16_t)-1) +#define WLAN_CRYPTO_MAXKEYIDX (4) +#define WLAN_CRYPTO_MAXIGTKKEYIDX (2) +#ifndef WLAN_CRYPTO_MAX_VLANKEYIX +#define WLAN_CRYPTO_MAX_VLANKEYIX WLAN_CRYPTO_MAXKEYIDX +#endif +#define WLAN_CRYPTO_MAX_PMKID (16) + +/* 40 bit wep key len */ +#define WLAN_CRYPTO_KEY_WEP40_LEN (5) +/* 104 bit wep key len */ +#define WLAN_CRYPTO_KEY_WEP104_LEN (13) +/* 128 bit wep key len */ +#define WLAN_CRYPTO_KEY_WEP128_LEN (16) + +#define WLAN_CRYPTO_KEY_TKIP_LEN (32) +#define WLAN_CRYPTO_KEY_CCMP_LEN (16) +#define WLAN_CRYPTO_KEY_CCMP_256_LEN (32) +#define WLAN_CRYPTO_KEY_GCMP_LEN (16) +#define WLAN_CRYPTO_KEY_GCMP_256_LEN (32) +#define WLAN_CRYPTO_KEY_WAPI_LEN (32) +#define WLAN_CRYPTO_KEY_GMAC_LEN (16) +#define WLAN_CRYPTO_KEY_GMAC_256_LEN (32) +#define WLAN_CRYPTO_WPI_SMS4_IVLEN (16) +#define WLAN_CRYPTO_WPI_SMS4_KIDLEN (1) +#define WLAN_CRYPTO_WPI_SMS4_PADLEN (1) +#define WLAN_CRYPTO_WPI_SMS4_MICLEN (16) + +/* key used for xmit */ +#define WLAN_CRYPTO_KEY_XMIT (0x01) +/* key used for recv */ +#define WLAN_CRYPTO_KEY_RECV (0x02) +/* key used for WPA group operation */ +#define WLAN_CRYPTO_KEY_GROUP (0x04) +/* key also used for management frames */ +#define WLAN_CRYPTO_KEY_MFP (0x08) +/* host-based encryption */ +#define WLAN_CRYPTO_KEY_SWENCRYPT (0x10) +/* host-based enmic */ +#define WLAN_CRYPTO_KEY_SWENMIC (0x20) +/* do not remove unless OS commands us to do so */ +#define WLAN_CRYPTO_KEY_PERSISTENT (0x40) +/* per STA default key */ +#define WLAN_CRYPTO_KEY_DEFAULT (0x80) +/* host-based decryption */ +#define WLAN_CRYPTO_KEY_SWDECRYPT (0x100) +/* host-based demic */ +#define WLAN_CRYPTO_KEY_SWDEMIC (0x200) +/* get pn from fw for key */ +#define WLAN_CRYPTO_KEY_GET_PN (0x400) + +#define WLAN_CRYPTO_KEY_SWCRYPT (WLAN_CRYPTO_KEY_SWENCRYPT \ + | WLAN_CRYPTO_KEY_SWDECRYPT) + +#define WLAN_CRYPTO_KEY_SWMIC (WLAN_CRYPTO_KEY_SWENMIC \ + | WLAN_CRYPTO_KEY_SWDEMIC) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03 +#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04 +#endif + +/* + * Cipher types + */ +typedef enum wlan_crypto_cipher_type { + WLAN_CRYPTO_CIPHER_WEP = 0, + WLAN_CRYPTO_CIPHER_TKIP = 1, + WLAN_CRYPTO_CIPHER_AES_OCB = 2, + WLAN_CRYPTO_CIPHER_AES_CCM = 3, + WLAN_CRYPTO_CIPHER_WAPI_SMS4 = 4, + WLAN_CRYPTO_CIPHER_CKIP = 5, + WLAN_CRYPTO_CIPHER_AES_CMAC = 6, + WLAN_CRYPTO_CIPHER_AES_CCM_256 = 7, + WLAN_CRYPTO_CIPHER_AES_CMAC_256 = 8, + WLAN_CRYPTO_CIPHER_AES_GCM = 9, + WLAN_CRYPTO_CIPHER_AES_GCM_256 = 10, + WLAN_CRYPTO_CIPHER_AES_GMAC = 11, + WLAN_CRYPTO_CIPHER_AES_GMAC_256 = 12, + WLAN_CRYPTO_CIPHER_WAPI_GCM4 = 13, + WLAN_CRYPTO_CIPHER_FILS_AEAD = 14, + WLAN_CRYPTO_CIPHER_WEP_40 = 15, + WLAN_CRYPTO_CIPHER_WEP_104 = 16, + WLAN_CRYPTO_CIPHER_NONE = 17, + WLAN_CRYPTO_CIPHER_MAX = (WLAN_CRYPTO_CIPHER_NONE + 1), + WLAN_CRYPTO_CIPHER_INVALID, +} wlan_crypto_cipher_type; + +/* Auth types */ +typedef enum wlan_crypto_auth_mode { + WLAN_CRYPTO_AUTH_NONE = 0, + WLAN_CRYPTO_AUTH_OPEN = 1, + WLAN_CRYPTO_AUTH_SHARED = 2, + WLAN_CRYPTO_AUTH_8021X = 3, + WLAN_CRYPTO_AUTH_AUTO = 4, + WLAN_CRYPTO_AUTH_WPA = 5, + WLAN_CRYPTO_AUTH_RSNA = 6, + WLAN_CRYPTO_AUTH_CCKM = 7, + WLAN_CRYPTO_AUTH_WAPI = 8, + WLAN_CRYPTO_AUTH_SAE = 9, + WLAN_CRYPTO_AUTH_FILS_SK = 10, + /** Keep WLAN_CRYPTO_AUTH_MAX at the end. */ + WLAN_CRYPTO_AUTH_MAX = WLAN_CRYPTO_AUTH_FILS_SK, +} wlan_crypto_auth_mode; + +/* crypto capabilities */ +typedef enum wlan_crypto_cap { + WLAN_CRYPTO_CAP_PRIVACY = 0, + WLAN_CRYPTO_CAP_WPA1 = 1, + WLAN_CRYPTO_CAP_WPA2 = 2, + WLAN_CRYPTO_CAP_WPA = 3, + WLAN_CRYPTO_CAP_AES = 4, + WLAN_CRYPTO_CAP_WEP = 5, + WLAN_CRYPTO_CAP_CKIP = 6, + WLAN_CRYPTO_CAP_TKIP_MIC = 7, + WLAN_CRYPTO_CAP_CCM256 = 8, + WLAN_CRYPTO_CAP_GCM = 9, + WLAN_CRYPTO_CAP_GCM_256 = 10, + WLAN_CRYPTO_CAP_WAPI_SMS4 = 11, + WLAN_CRYPTO_CAP_WAPI_GCM4 = 12, + WLAN_CRYPTO_CAP_KEY_MGMT_OFFLOAD = 13, + WLAN_CRYPTO_CAP_PMF_OFFLOAD = 14, + WLAN_CRYPTO_CAP_PN_TID_BASED = 15, + WLAN_CRYPTO_CAP_FILS_AEAD = 16, +} wlan_crypto_cap; + +typedef enum wlan_crypto_rsn_cap { + WLAN_CRYPTO_RSN_CAP_PREAUTH = 0x01, + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED = 0x80, + WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED = 0x40, +} wlan_crypto_rsn_cap; + +enum wlan_crypto_rsnx_cap { + WLAN_CRYPTO_RSNX_CAP_PROTECTED_TWT = 0x10, + WLAN_CRYPTO_RSNX_CAP_SAE_H2E = 0x20, + WLAN_CRYPTO_RSNX_CAP_SAE_PK = 0x40, +}; + +typedef enum wlan_crypto_key_mgmt { + WLAN_CRYPTO_KEY_MGMT_IEEE8021X = 0, + WLAN_CRYPTO_KEY_MGMT_PSK = 1, + WLAN_CRYPTO_KEY_MGMT_NONE = 2, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_NO_WPA = 3, + WLAN_CRYPTO_KEY_MGMT_WPA_NONE = 4, + WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X = 5, + WLAN_CRYPTO_KEY_MGMT_FT_PSK = 6, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256 = 7, + WLAN_CRYPTO_KEY_MGMT_PSK_SHA256 = 8, + WLAN_CRYPTO_KEY_MGMT_WPS = 9, + WLAN_CRYPTO_KEY_MGMT_SAE = 10, + WLAN_CRYPTO_KEY_MGMT_FT_SAE = 11, + WLAN_CRYPTO_KEY_MGMT_WAPI_PSK = 12, + WLAN_CRYPTO_KEY_MGMT_WAPI_CERT = 13, + WLAN_CRYPTO_KEY_MGMT_CCKM = 14, + WLAN_CRYPTO_KEY_MGMT_OSEN = 15, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B = 16, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192 = 17, + WLAN_CRYPTO_KEY_MGMT_FILS_SHA256 = 18, + WLAN_CRYPTO_KEY_MGMT_FILS_SHA384 = 19, + WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256 = 20, + WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384 = 21, + WLAN_CRYPTO_KEY_MGMT_OWE = 22, + WLAN_CRYPTO_KEY_MGMT_DPP = 23, + WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X_SHA384 = 24, + /** Keep WLAN_CRYPTO_KEY_MGMT_MAX at the end. */ + WLAN_CRYPTO_KEY_MGMT_MAX = WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X_SHA384, +} wlan_crypto_key_mgmt; + +enum wlan_crypto_key_type { + WLAN_CRYPTO_KEY_TYPE_UNICAST, + WLAN_CRYPTO_KEY_TYPE_GROUP, +}; + +#define IS_WEP_CIPHER(_c) ((_c == WLAN_CRYPTO_CIPHER_WEP) || \ + (_c == WLAN_CRYPTO_CIPHER_WEP_40) || \ + (_c == WLAN_CRYPTO_CIPHER_WEP_104)) +/** + * struct wlan_crypto_pmksa - structure of crypto to contain pmkid + * @bssid: bssid for which pmkid is saved + * @pmkid: pmkid info + * @pmk: pmk info + * @pmk_len: pmk len + * @single_pmk_supported: SAE single pmk supported BSS + */ +struct wlan_crypto_pmksa { + struct qdf_mac_addr bssid; + uint8_t pmkid[PMKID_LEN]; + uint8_t pmk[MAX_PMK_LEN]; + uint8_t pmk_len; + uint8_t ssid_len; + uint8_t ssid[WLAN_SSID_MAX_LEN]; + uint8_t cache_id[WLAN_CACHE_ID_LEN]; +#if defined(WLAN_SAE_SINGLE_PMK) && defined(WLAN_FEATURE_ROAM_OFFLOAD) + bool single_pmk_supported; +#endif +}; + +/** + * struct wlan_crypto_params - holds crypto params + * @authmodeset: authentication mode + * @ucastcipherset: unicast ciphers + * @mcastcipherset: multicast cipher + * @mgmtcipherset: mgmt cipher + * @cipher_caps: cipher capability + * @key_mgmt: key mgmt + * @pmksa: pmksa + * @rsn_caps: rsn_capability + * + * This structure holds crypto params for peer or vdev + */ +struct wlan_crypto_params { + uint32_t authmodeset; + uint32_t ucastcipherset; + uint32_t mcastcipherset; + uint32_t mgmtcipherset; + uint32_t cipher_caps; + uint32_t key_mgmt; + struct wlan_crypto_pmksa *pmksa[WLAN_CRYPTO_MAX_PMKID]; + uint16_t rsn_caps; +}; + +typedef enum wlan_crypto_param_type { + WLAN_CRYPTO_PARAM_AUTH_MODE, + WLAN_CRYPTO_PARAM_UCAST_CIPHER, + WLAN_CRYPTO_PARAM_MCAST_CIPHER, + WLAN_CRYPTO_PARAM_MGMT_CIPHER, + WLAN_CRYPTO_PARAM_CIPHER_CAP, + WLAN_CRYPTO_PARAM_RSN_CAP, + WLAN_CRYPTO_PARAM_KEY_MGMT, + WLAN_CRYPTO_PARAM_PMKSA, +} wlan_crypto_param_type; + +/** + * struct wlan_crypto_key - key structure + * @keylen: length of the key + * @valid: is key valid or not + * @flags: key flags + * @keyix: key id + * @cipher_type: cipher type being used for this key + * @mac_addr: MAC address of the peer + * @cipher_table: table which stores cipher related info + * @private: private pointer to save cipher context + * @keylock: spin lock + * @recviv: WAPI key receive sequence counter + * @txiv: WAPI key transmit sequence counter + * @keytsc: key transmit sequence counter + * @keyrsc: key receive sequence counter + * @keyrsc_suspect: key receive sequence counter under + * suspect when pN jump is detected + * @keyglobal: key receive global sequence counter used with suspect + * @keyval: key value buffer + * + * This key structure to key related details. + */ +struct wlan_crypto_key { + uint8_t keylen; + bool valid; + uint16_t flags; + uint16_t keyix; + enum wlan_crypto_cipher_type cipher_type; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + void *cipher_table; + void *private; + qdf_spinlock_t keylock; + uint8_t recviv[WLAN_CRYPTO_WAPI_IV_SIZE]; + uint8_t txiv[WLAN_CRYPTO_WAPI_IV_SIZE]; + uint64_t keytsc; + uint64_t keyrsc[WLAN_CRYPTO_TID_SIZE]; + uint64_t keyrsc_suspect[WLAN_CRYPTO_TID_SIZE]; + uint64_t keyglobal; + uint8_t keyval[WLAN_CRYPTO_KEYBUF_SIZE + + WLAN_CRYPTO_MICBUF_SIZE]; +#define txmic (keyval + WLAN_CRYPTO_KEYBUF_SIZE \ + + WLAN_CRYPTO_TXMIC_OFFSET) +#define rxmic (keyval + WLAN_CRYPTO_KEYBUF_SIZE \ + + WLAN_CRYPTO_RXMIC_OFFSET) +}; + +/** + * struct wlan_crypto_req_key - key request structure + * @type: key/cipher type + * @pad: padding member + * @keyix: key index + * @keylen: length of the key value + * @flags: key flags + * @macaddr: macaddr of the key + * @keyrsc: key receive sequence counter + * @keytsc: key transmit sequence counter + * @keydata: key value + * @txiv: wapi key tx iv + * @rxiv: wapi key rx iv + * @filsaad: FILS AEAD data + * + * Key request structure used for setkey, getkey or delkey + */ +struct wlan_crypto_req_key { + uint8_t type; + uint8_t pad; + uint16_t keyix; + uint8_t keylen; + uint16_t flags; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint64_t keyrsc; + uint64_t keytsc; + uint8_t keydata[WLAN_CRYPTO_KEYBUF_SIZE + WLAN_CRYPTO_MICBUF_SIZE]; + uint8_t txiv[WLAN_CRYPTO_WAPI_IV_SIZE]; + uint8_t recviv[WLAN_CRYPTO_WAPI_IV_SIZE]; +#ifdef WLAN_CRYPTO_SUPPORT_FILS + struct wlan_crypto_fils_aad_key filsaad; +#endif +}; + +/** + * struct wlan_lmac_if_crypto_tx_ops - structure of crypto function + * pointers + * @allockey: function pointer to alloc key in hw + * @setkey: function pointer to setkey in hw + * @delkey: function pointer to delkey in hw + * @defaultkey: function pointer to set default key + * @set_key: converged function pointer to set key in hw + * @getpn: function pointer to get current pn value of peer + * @register_events: function pointer to register wmi event handler + * @deregister_events: function pointer to deregister wmi event handler + */ + +struct wlan_lmac_if_crypto_tx_ops { + QDF_STATUS (*allockey)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS (*setkey)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS (*delkey)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS (*defaultkey)(struct wlan_objmgr_vdev *vdev, + uint8_t keyix, uint8_t *macaddr); + QDF_STATUS (*set_key)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + enum wlan_crypto_key_type key_type); + QDF_STATUS(*getpn)(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS (*register_events)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*deregister_events)(struct wlan_objmgr_psoc *psoc); +}; + +/** + * struct wlan_lmac_if_crypto_rx_ops - structure of crypto rx function + * pointers + * @encap: function pointer to encap tx frame + * @decap: function pointer to decap rx frame in hw + * @enmic: function pointer to enmic tx frame + * @demic: function pointer to demic rx frame + */ + +struct wlan_lmac_if_crypto_rx_ops { + QDF_STATUS(*crypto_encap)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t encapdone); + QDF_STATUS(*crypto_decap)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t tid); + QDF_STATUS(*crypto_enmic)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t encapdone); + QDF_STATUS(*crypto_demic)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t tid, uint8_t keyid); + QDF_STATUS(*set_peer_wep_keys)(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); +}; + +#define WLAN_CRYPTO_RX_OPS_ENCAP(crypto_rx_ops) \ + (crypto_rx_ops->crypto_encap) +#define WLAN_CRYPTO_RX_OPS_DECAP(crypto_rx_ops) \ + (crypto_rx_ops->crypto_decap) +#define WLAN_CRYPTO_RX_OPS_ENMIC(crypto_rx_ops) \ + (crypto_rx_ops->crypto_enmic) +#define WLAN_CRYPTO_RX_OPS_DEMIC(crypto_rx_ops) \ + (crypto_rx_ops->crypto_demic) +#define WLAN_CRYPTO_RX_OPS_SET_PEER_WEP_KEYS(crypto_rx_ops) \ + (crypto_rx_ops->set_peer_wep_keys) + +#endif /* end of _WLAN_CRYPTO_GLOBAL_DEF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_main.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_main.h new file mode 100644 index 0000000000000000000000000000000000000000..20c1be62ab92b9fd2f9ffda9592625ae62b0a45f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_main.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2017-2018, 2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private API for crypto service with object manager handler + */ +#ifndef _WLAN_CRYPTO_MAIN_H_ +#define _WLAN_CRYPTO_MAIN_H_ +#include "wlan_crypto_global_def.h" + +/** + * wlan_crypto_init - Init the crypto service with object manager + * Called from umac init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_init(void); + +/** + * wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from umac deinit context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_deinit(void); + +#ifdef CRYPTO_SET_KEY_CONVERGED +/** + * wlan_crypto_psoc_enable: psoc enable API for wlan crypto component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS wlan_crypto_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_crypto_psoc_disable: psoc disable API for wlan crypto component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS wlan_crypto_psoc_disable(struct wlan_objmgr_psoc *psoc); +#else +static inline QDF_STATUS wlan_crypto_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wlan_crypto_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif /* end of _WLAN_CRYPTO_MAIN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_def_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_def_i.h new file mode 100644 index 0000000000000000000000000000000000000000..b82e848ff9c0433c4bf5ee21e05b49e0a05cc576 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_def_i.h @@ -0,0 +1,608 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private definations for handling crypto params + */ +#ifndef _WLAN_CRYPTO_DEF_I_H_ +#define _WLAN_CRYPTO_DEF_I_H_ + +#include +#ifdef WLAN_CRYPTO_AES +#include "wlan_crypto_aes_i.h" +#endif + +/* IEEE 802.11 defines */ +#define WLAN_FC0_PVER 0x0003 +#define WLAN_FC1_DIR_MASK 0x03 +#define WLAN_FC1_TODS 0x01 +#define WLAN_FC1_FROMDS 0x02 +#define WLAN_FC1_DSTODS 0x03 +#define WLAN_FC1_MOREFRAG 0x04 +#define WLAN_FC1_RETRY 0x08 +#define WLAN_FC1_PWRMGT 0x10 +#define WLAN_FC1_MOREDATA 0x20 +#define WLAN_FC1_ISWEP 0x40 +#define WLAN_FC1_ORDER 0x80 + +#define WLAN_FC0_GET_TYPE(fc) (((fc) & 0x0c) >> 2) +#define WLAN_FC0_GET_STYPE(fc) (((fc) & 0xf0) >> 4) + +#define WLAN_INVALID_MGMT_SEQ 0xffff +#define WLAN_SEQ_MASK 0x0fff +#define WLAN_QOS_TID_MASK 0x0f +#define WLAN_GET_SEQ_FRAG(seq) ((seq) & (BIT(3) | BIT(2) | BIT(1) | BIT(0))) +#define WLAN_GET_SEQ_SEQ(seq) \ + (((seq) & (~(BIT(3) | BIT(2) | BIT(1) | BIT(0)))) >> 4) + +#define WLAN_FC0_TYPE_MGMT 0 +#define WLAN_FC0_TYPE_CTRL 1 +#define WLAN_FC0_TYPE_DATA 2 + +/* management */ +#define WLAN_FC0_STYPE_ASSOC_REQ 0 +#define WLAN_FC0_STYPE_ASSOC_RESP 1 +#define WLAN_FC0_STYPE_REASSOC_REQ 2 +#define WLAN_FC0_STYPE_REASSOC_RESP 3 +#define WLAN_FC0_STYPE_PROBE_REQ 4 +#define WLAN_FC0_STYPE_PROBE_RESP 5 +#define WLAN_FC0_STYPE_BEACON 8 +#define WLAN_FC0_STYPE_ATIM 9 +#define WLAN_FC0_STYPE_DISASSOC 10 +#define WLAN_FC0_STYPE_AUTH 11 +#define WLAN_FC0_STYPE_DEAUTH 12 +#define WLAN_FC0_STYPE_ACTION 13 + +/* control */ +#define WLAN_FC0_STYPE_PSPOLL 10 +#define WLAN_FC0_STYPE_RTS 11 +#define WLAN_FC0_STYPE_CTS 12 +#define WLAN_FC0_STYPE_ACK 13 +#define WLAN_FC0_STYPE_CFEND 14 +#define WLAN_FC0_STYPE_CFENDACK 15 + +/* data */ +#define WLAN_FC0_STYPE_DATA 0 +#define WLAN_FC0_STYPE_DATA_CFACK 1 +#define WLAN_FC0_STYPE_DATA_CFPOLL 2 +#define WLAN_FC0_STYPE_DATA_CFACKPOLL 3 +#define WLAN_FC0_STYPE_NULLFUNC 4 +#define WLAN_FC0_STYPE_CFACK 5 +#define WLAN_FC0_STYPE_CFPOLL 6 +#define WLAN_FC0_STYPE_CFACKPOLL 7 +#define WLAN_FC0_STYPE_QOS_DATA 8 +#define WLAN_FC0_STYPE_QOS_DATA_CFACK 9 +#define WLAN_FC0_STYPE_QOS_DATA_CFPOLL 10 +#define WLAN_FC0_STYPE_QOS_DATA_CFACKPOLL 11 +#define WLAN_FC0_STYPE_QOS_NULL 12 +#define WLAN_FC0_STYPE_QOS_CFPOLL 14 +#define WLAN_FC0_STYPE_QOS_CFACKPOLL 15 + +#define WLAN_TID_SIZE 17 +#define WLAN_NONQOS_SEQ 16 + +/* Number of bits per byte */ +#define CRYPTO_NBBY 8 + +/* Macros for handling unaligned memory accesses */ + +static inline uint16_t wlan_crypto_get_be16(const uint8_t *a) +{ + return (a[0] << 8) | a[1]; +} + +static inline void wlan_crypto_put_be16(uint8_t *a, uint16_t val) +{ + a[0] = val >> 8; + a[1] = val & 0xff; +} + +static inline uint16_t wlan_crypto_get_le16(const uint8_t *a) +{ + return (a[1] << 8) | a[0]; +} + +static inline void wlan_crypto_put_le16(uint8_t *a, uint16_t val) +{ + a[1] = val >> 8; + a[0] = val & 0xff; +} + +static inline uint32_t wlan_crypto_get_be32(const uint8_t *a) +{ + return ((u32) a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]; +} + +static inline void wlan_crypto_put_be32(uint8_t *a, uint32_t val) +{ + a[0] = (val >> 24) & 0xff; + a[1] = (val >> 16) & 0xff; + a[2] = (val >> 8) & 0xff; + a[3] = val & 0xff; +} + +static inline uint32_t wlan_crypto_get_le32(const uint8_t *a) +{ + return ((u32) a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]; +} + +static inline void wlan_crypto_put_le32(uint8_t *a, uint32_t val) +{ + a[3] = (val >> 24) & 0xff; + a[2] = (val >> 16) & 0xff; + a[1] = (val >> 8) & 0xff; + a[0] = val & 0xff; +} + +static inline void wlan_crypto_put_be64(u8 *a, u64 val) +{ + a[0] = val >> 56; + a[1] = val >> 48; + a[2] = val >> 40; + a[3] = val >> 32; + a[4] = val >> 24; + a[5] = val >> 16; + a[6] = val >> 8; + a[7] = val & 0xff; +} + +#define WLAN_CRYPTO_TX_OPS_ALLOCKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.allockey) +#define WLAN_CRYPTO_TX_OPS_SETKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.setkey) +#define WLAN_CRYPTO_TX_OPS_DELKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.delkey) +#define WLAN_CRYPTO_TX_OPS_DEFAULTKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.defaultkey) +#define WLAN_CRYPTO_TX_OPS_SET_KEY(psoc) \ + ((psoc)->soc_cb.tx_ops.crypto_tx_ops.set_key) +#define WLAN_CRYPTO_TX_OPS_GETPN(psoc) \ + ((psoc)->soc_cb.tx_ops.crypto_tx_ops.getpn) +#define WLAN_CRYPTO_TX_OPS_REGISTER_EVENTS(psoc) \ + ((psoc)->soc_cb.tx_ops.crypto_tx_ops.register_events) +#define WLAN_CRYPTO_TX_OPS_DEREGISTER_EVENTS(psoc) \ + ((psoc)->soc_cb.tx_ops.crypto_tx_ops.deregister_events) + +/* unalligned little endian access */ +#ifndef LE_READ_2 +#define LE_READ_2(p) \ + ((uint16_t) \ + ((((const uint8_t *)(p))[0]) | \ + (((const uint8_t *)(p))[1] << 8))) +#endif + +#ifndef LE_READ_4 +#define LE_READ_4(p) \ + ((uint32_t) \ + ((((const uint8_t *)(p))[0]) | \ + (((const uint8_t *)(p))[1] << 8) | \ + (((const uint8_t *)(p))[2] << 16) | \ + (((const uint8_t *)(p))[3] << 24))) +#endif + +#ifndef BE_READ_4 +#define BE_READ_4(p) \ + ((uint32_t) \ + ((((const uint8_t *)(p))[0] << 24) | \ + (((const uint8_t *)(p))[1] << 16) | \ + (((const uint8_t *)(p))[2] << 8) | \ + (((const uint8_t *)(p))[3]))) +#endif + +#ifndef READ_6 +#define READ_6(b0, b1, b2, b3, b4, b5) ({ \ + uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24);\ + uint16_t iv16 = (b4 << 0) | (b5 << 8);\ + (((uint64_t)iv16) << 32) | iv32;\ +}) +#endif + +#define OUI_SIZE (4) +#define WLAN_CRYPTO_ADDSHORT(frm, v) \ + do {frm[0] = (v) & 0xff; frm[1] = (v) >> 8; frm += 2; } while (0) + +#define WLAN_CRYPTO_ADDSELECTOR(frm, sel) \ + do { \ + uint32_t value = sel;\ + qdf_mem_copy(frm, (uint8_t *)&value, OUI_SIZE); \ + frm += OUI_SIZE; } while (0) + +#define WLAN_CRYPTO_SELECTOR(a, b, c, d) \ + ((((uint32_t) (a)) << 24) | \ + (((uint32_t) (b)) << 16) | \ + (((uint32_t) (c)) << 8) | \ + (uint32_t) (d)) + +#define WPA_TYPE_OUI WLAN_WPA_SEL(WLAN_WPA_OUI_TYPE) + +#define WLAN_CRYPTO_WAPI_IE_LEN 20 +#define WLAN_CRYPTO_WAPI_SMS4_CIPHER 0x01 + +#define WPA_AUTH_KEY_MGMT_NONE WLAN_WPA_SEL(WLAN_ASE_NONE) +#define WPA_AUTH_KEY_MGMT_UNSPEC_802_1X WLAN_WPA_SEL(WLAN_ASE_8021X_UNSPEC) +#define WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X \ + WLAN_WPA_SEL(WLAN_ASE_8021X_PSK) +#define WPA_AUTH_KEY_MGMT_CCKM WLAN_WPA_CCKM_AKM + +#define WPA_CIPHER_SUITE_NONE WLAN_WPA_SEL(WLAN_CSE_NONE) +#define WPA_CIPHER_SUITE_WEP40 WLAN_WPA_SEL(WLAN_CSE_WEP40) +#define WPA_CIPHER_SUITE_WEP104 WLAN_WPA_SEL(WLAN_CSE_WEP104) +#define WPA_CIPHER_SUITE_TKIP WLAN_WPA_SEL(WLAN_CSE_TKIP) +#define WPA_CIPHER_SUITE_CCMP WLAN_WPA_SEL(WLAN_CSE_CCMP) + +#define RSN_AUTH_KEY_MGMT_NONE WLAN_RSN_SEL(0) +#define RSN_AUTH_KEY_MGMT_UNSPEC_802_1X WLAN_RSN_SEL(1) +#define RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X\ + WLAN_RSN_SEL(2) +#define RSN_AUTH_KEY_MGMT_FT_802_1X WLAN_RSN_SEL(3) +#define RSN_AUTH_KEY_MGMT_FT_PSK WLAN_RSN_SEL(4) +#define RSN_AUTH_KEY_MGMT_802_1X_SHA256\ + WLAN_RSN_SEL(5) +#define RSN_AUTH_KEY_MGMT_PSK_SHA256 WLAN_RSN_SEL(6) +#define RSN_AUTH_KEY_MGMT_WPS WLAN_RSN_SEL(7) +#define RSN_AUTH_KEY_MGMT_SAE WLAN_RSN_SEL(8) +#define RSN_AUTH_KEY_MGMT_FT_SAE WLAN_RSN_SEL(9) +#define RSN_AUTH_KEY_MGMT_802_1X_SUITE_B\ + WLAN_RSN_SEL(11) +#define RSN_AUTH_KEY_MGMT_802_1X_SUITE_B_192\ + WLAN_RSN_SEL(12) +#define RSN_AUTH_KEY_MGMT_FT_802_1X_SUITE_B_384\ + WLAN_RSN_SEL(13) +#define RSN_AUTH_KEY_MGMT_FILS_SHA256 WLAN_RSN_SEL(14) +#define RSN_AUTH_KEY_MGMT_FILS_SHA384 WLAN_RSN_SEL(15) +#define RSN_AUTH_KEY_MGMT_FT_FILS_SHA256\ + WLAN_RSN_SEL(16) +#define RSN_AUTH_KEY_MGMT_FT_FILS_SHA384\ + WLAN_RSN_SEL(17) +#define RSN_AUTH_KEY_MGMT_OWE WLAN_RSN_SEL(18) + +#define RSN_AUTH_KEY_MGMT_CCKM (WLAN_RSN_CCKM_AKM) +#define RSN_AUTH_KEY_MGMT_OSEN (0x019a6f50) +#define RSN_AUTH_KEY_MGMT_DPP (WLAN_RSN_DPP_AKM) + +#define RSN_CIPHER_SUITE_NONE WLAN_RSN_SEL(WLAN_CSE_NONE) +#define RSN_CIPHER_SUITE_WEP40 WLAN_RSN_SEL(WLAN_CSE_WEP40) +#define RSN_CIPHER_SUITE_TKIP WLAN_RSN_SEL(WLAN_CSE_TKIP) +#define RSN_CIPHER_SUITE_WEP104 WLAN_RSN_SEL(WLAN_CSE_WEP104) +#define RSN_CIPHER_SUITE_CCMP WLAN_RSN_SEL(WLAN_CSE_CCMP) +#define RSN_CIPHER_SUITE_AES_CMAC WLAN_RSN_SEL(WLAN_CSE_AES_CMAC) +#define RSN_CIPHER_SUITE_GCMP WLAN_RSN_SEL(WLAN_CSE_GCMP_128) +#define RSN_CIPHER_SUITE_GCMP_256 WLAN_RSN_SEL(WLAN_CSE_GCMP_256) +#define RSN_CIPHER_SUITE_CCMP_256 WLAN_RSN_SEL(WLAN_CSE_CCMP_256) +#define RSN_CIPHER_SUITE_BIP_GMAC_128 WLAN_RSN_SEL(WLAN_CSE_BIP_GMAC_128) +#define RSN_CIPHER_SUITE_BIP_GMAC_256 WLAN_RSN_SEL(WLAN_CSE_BIP_GMAC_256) +#define RSN_CIPHER_SUITE_BIP_CMAC_256 WLAN_RSN_SEL(WLAN_CSE_BIP_CMAC_256) + +#define RESET_PARAM(__param) ((__param) = 0) +#define SET_PARAM(__param, __val) ((__param) |= (1 << (__val))) +#define HAS_PARAM(__param, __val) ((__param) & (1 << (__val))) +#define CLEAR_PARAM(__param, __val) ((__param) &= ((~1) << (__val))) + + +#define RESET_AUTHMODE(_param) ((_param)->authmodeset = \ + (1 << WLAN_CRYPTO_AUTH_OPEN)) + +#define SET_AUTHMODE(_param, _mode) ((_param)->authmodeset |= (1 << (_mode))) +#define HAS_AUTHMODE(_param, _mode) ((_param)->authmodeset & (1 << (_mode))) + +#define AUTH_IS_OPEN(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_OPEN) +#define AUTH_IS_SHARED_KEY(_param) \ + HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_SHARED) +#define AUTH_IS_8021X(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_8021X) +#define AUTH_IS_WPA(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_WPA) +#define AUTH_IS_RSNA(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_RSNA) +#define AUTH_IS_CCKM(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_CCKM) +#define AUTH_IS_WAI(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_WAPI) +#define AUTH_IS_WPA2(_param) AUTH_IS_RSNA(_param) + +#define AUTH_MATCH(_param1, _param2) \ + (((_param1)->authmodeset & (_param2)->authmodeset) != 0) + + +#define RESET_UCAST_CIPHERS(_param) ((_param)->ucastcipherset =\ + (1 << WLAN_CRYPTO_CIPHER_NONE)) +#define SET_UCAST_CIPHER(_param, _c) ((_param)->ucastcipherset |= (1 << (_c))) +#define HAS_UCAST_CIPHER(_param, _c) ((_param)->ucastcipherset & (1 << (_c))) + +#define UCIPHER_IS_CLEAR(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_NONE) +#define UCIPHER_IS_WEP(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WEP) +#define UCIPHER_IS_TKIP(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_TKIP) +#define UCIPHER_IS_CCMP128(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM) +#define UCIPHER_IS_CCMP256(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM_256) +#define UCIPHER_IS_GCMP128(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM) +#define UCIPHER_IS_GCMP256(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM_256) +#define UCIPHER_IS_SMS4(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WAPI_SMS4) + +#define RESET_MCAST_CIPHERS(_param) ((_param)->mcastcipherset = \ + (1 << WLAN_CRYPTO_CIPHER_NONE)) +#define SET_MCAST_CIPHER(_param, _c) ((_param)->mcastcipherset |= (1 << (_c))) +#define HAS_MCAST_CIPHER(_param, _c) ((_param)->mcastcipherset & (1 << (_c))) +#define HAS_ANY_MCAST_CIPHER(_param) ((_param)->mcastcipherset) +#define CLEAR_MCAST_CIPHER(_param, _c) \ + ((_param)->mcastcipherset &= (~(1)<<(_c))) + +#define MCIPHER_IS_CLEAR(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_NONE) +#define MCIPHER_IS_WEP(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WEP) +#define MCIPHER_IS_TKIP(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_TKIP) +#define MCIPHER_IS_CCMP128(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM) +#define MCIPHER_IS_CCMP256(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM_256) +#define MCIPHER_IS_GCMP128(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM) +#define MCIPHER_IS_GCMP256(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM_256) +#define MCIPHER_IS_SMS4(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WAPI_SMS4) + +#define RESET_MGMT_CIPHERS(_param) ((_param)->mgmtcipherset = \ + (1 << WLAN_CRYPTO_CIPHER_NONE)) +#define SET_MGMT_CIPHER(_param, _c) ((_param)->mgmtcipherset |= (1 << (_c))) +#define HAS_MGMT_CIPHER(_param, _c) ((_param)->mgmtcipherset & (1 << (_c))) +#define IS_MGMT_CIPHER(_c) ((_c == WLAN_CRYPTO_CIPHER_AES_CMAC) || \ + (_c == WLAN_CRYPTO_CIPHER_AES_CMAC_256) || \ + (_c == WLAN_CRYPTO_CIPHER_AES_GMAC) || \ + (_c == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) + +#define IS_FILS_CIPHER(_c) ((_c) == WLAN_CRYPTO_CIPHER_FILS_AEAD) + +#define MGMT_CIPHER_IS_CMAC(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CMAC) +#define MGMT_CIPHER_IS_CMAC256(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CMAC_256) +#define MGMT_CIPHER_IS_GMAC(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GMAC) +#define MGMT_CIPHER_IS_GMAC256(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GMAC_256) + +#define RESET_KEY_MGMT(_param) ((_param)->key_mgmt = \ + (1 << WLAN_CRYPTO_KEY_MGMT_NONE)) +#define SET_KEY_MGMT(_param, _c) ((_param)->key_mgmt |= (1 << (_c))) +#define HAS_KEY_MGMT(_param, _c) ((_param)->key_mgmt & (1 << (_c))) + +#define UCAST_CIPHER_MATCH(_param1, _param2) \ + (((_param1)->ucastcipherset & (_param2)->ucastcipherset) != 0) + +#define MCAST_CIPHER_MATCH(_param1, _param2) \ + (((_param1)->mcastcipherset & (_param2)->mcastcipherset) != 0) + +#define MGMT_CIPHER_MATCH(_param1, _param2) \ + (((_param1)->mgmtcipherset & (_param2)->mgmtcipherset) != 0) + +#define KEY_MGMTSET_MATCH(_param1, _param2) \ + (((_param1)->key_mgmt & (_param2)->key_mgmt) != 0 || \ + (!(_param1)->key_mgmt && !(_param2)->key_mgmt)) + +#define RESET_CIPHER_CAP(_param) ((_param)->cipher_caps = 0) +#define SET_CIPHER_CAP(_param, _c) ((_param)->cipher_caps |= (1 << (_c))) +#define HAS_CIPHER_CAP(_param, _c) ((_param)->cipher_caps & (1 << (_c))) +#define HAS_ANY_CIPHER_CAP(_param) ((_param)->cipher_caps) + +#define crypto_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_CRYPTO, params) +#define crypto_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_CRYPTO, params) +#define crypto_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_CRYPTO, params) + +/** + * struct wlan_crypto_mmie - MMIE IE + * @element_id: element id + * @length: length of the ie + * @key_id: igtk key_id used + * @sequence_number: igtk PN number + * @mic: MIC for the frame + * + * This structure represents Management MIC information element (IEEE 802.11w) + */ +struct wlan_crypto_mmie { + uint8_t element_id; + uint8_t length; + uint16_t key_id; + uint8_t sequence_number[6]; + uint8_t mic[16]; +} __packed; + +/** + * struct crypto_add_key_result - add key result structure + * @vdev_id: unique id identifying the VDEV + * @key_ix: key index + * @key_flags: key flags + * @status: status of add key + * @peer_macaddr: MAC address of the peer + * + * Structure used for add key result. + */ +struct crypto_add_key_result { + uint32_t vdev_id; + uint32_t key_ix; + uint32_t key_flags; + uint32_t status; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * typedef crypto_add_key_callback - add key callback + * @context: opaque context that the client can use to associate the + * callback with the request + * @result: result of add key + */ +typedef void (*crypto_add_key_callback)(void *context, + struct crypto_add_key_result *result); + +/** + * struct wlan_crypto_comp_priv - crypto component private structure + * @crypto_params: crypto params for the peer + * @key: key buffers for this peer + * @igtk_key: igtk key buffer for this peer + * @igtk_key_type: igtk key type + * @def_tx_keyid: default key used for this peer + * @def_igtk_tx_keyid default igtk key used for this peer + * @fils_aead_set fils params for this peer + * @add_key_ctx: Opaque context to be used by the caller to associate the + * add key request with the response + * @add_key_cb: Callback function to be called with the add key result + * + */ +struct wlan_crypto_comp_priv { + struct wlan_crypto_params crypto_params; + struct wlan_crypto_key *key[WLAN_CRYPTO_MAX_VLANKEYIX]; + struct wlan_crypto_key *igtk_key[WLAN_CRYPTO_MAXIGTKKEYIDX]; + enum wlan_crypto_cipher_type igtk_key_type; + uint8_t def_tx_keyid; + uint8_t def_igtk_tx_keyid; + uint8_t fils_aead_set; + void *add_key_ctx; + crypto_add_key_callback add_key_cb; +}; + +/** + * struct wlan_crypto_cipher - crypto cipher table + * @cipher_name: printable name + * @cipher: cipher type WLAN_CRYPTO_CIPHER_* + * @header: size of privacy header (bytes) + * @trailer: size of privacy trailer (bytes) + * @miclen: size of mic trailer (bytes) + * @keylen: max key length + * @setkey: function pointer for setkey + * @encap: function pointer for encap + * @decap: function pointer for decap + * @enmic: function pointer for enmic + * @demic: function pointer for demic + * + */ +struct wlan_crypto_cipher { + const char *cipher_name; + wlan_crypto_cipher_type cipher; + const uint8_t header; + const uint8_t trailer; + const uint8_t miclen; + const uint32_t keylen; + QDF_STATUS(*setkey)(struct wlan_crypto_key *); + QDF_STATUS(*encap)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); + QDF_STATUS(*decap)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); + QDF_STATUS(*enmic)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); + QDF_STATUS(*demic)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); +}; + + +/** + * wlan_crypto_is_data_protected - check is frame is protected or not + * @data: frame + * + * This function check is frame is protected or not + * + * Return: TRUE/FALSE + */ +static inline bool wlan_crypto_is_data_protected(const void *data) +{ + const struct wlan_frame_hdr *hdr = (const struct wlan_frame_hdr *)data; + if (hdr->i_fc[1] & WLAN_FC1_ISWEP) + return true; + else + return false; +} + +/** + * ieee80211_hdrsize - calculate frame header size + * @data: frame + * + * This function calculate frame header size + * + * Return: header size of the frame + */ +static inline uint8_t ieee80211_hdrsize(const void *data) +{ + const struct wlan_frame_hdr *hdr = (const struct wlan_frame_hdr *)data; + uint8_t size = sizeof(struct wlan_frame_hdr); + + if ((hdr->i_fc[1] & WLAN_FC1_DIR_MASK) + == (WLAN_FC1_DSTODS)) { + size += QDF_MAC_ADDR_SIZE; + } + + if (((WLAN_FC0_GET_STYPE(hdr->i_fc[0]) + == WLAN_FC0_STYPE_QOS_DATA))) { + size += sizeof(uint16_t); + /* Qos frame with Order bit set indicates an HTC frame */ + if (hdr->i_fc[1] & WLAN_FC1_ORDER) + size += (sizeof(uint8_t)*4); + } + return size; +} + +/** + * ieee80211_hdrspace - calculate frame header size with padding + * @pdev: pdev + * @data: frame header + * + * This function returns the space occupied by the 802.11 header + * and any padding required by the driver. This works for a management + * or data frame. + * + * Return: header size of the frame with padding + */ +static inline uint8_t +ieee80211_hdrspace(struct wlan_objmgr_pdev *pdev, const void *data) +{ + uint8_t size = ieee80211_hdrsize(data); + + if (wlan_pdev_nif_feat_cap_get(pdev, WLAN_PDEV_F_DATAPAD)) + size = roundup(size, sizeof(u_int32_t)); + + return size; +} + +/** + * wlan_get_tid - get tid of the frame + * @data: frame + * + * This function get tid of the frame + * + * Return: tid of the frame + */ +static inline int wlan_get_tid(const void *data) +{ + const struct wlan_frame_hdr *hdr = (const struct wlan_frame_hdr *)data; + + if (((WLAN_FC0_GET_STYPE(hdr->i_fc[0]) + == WLAN_FC0_STYPE_QOS_DATA))) { + if ((hdr->i_fc[1] & WLAN_FC1_DIR_MASK) + == (WLAN_FC1_DSTODS)) { + return ((struct wlan_frame_hdr_qos_addr4 *)data)->i_qos[0] + & WLAN_QOS_TID_MASK; + } else { + return ((struct wlan_frame_hdr_qos *)data)->i_qos[0] + & WLAN_QOS_TID_MASK; + } + } else + return WLAN_NONQOS_SEQ; +} +#endif /* end of _WLAN_CRYPTO_DEF_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_global_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_global_api.c new file mode 100644 index 0000000000000000000000000000000000000000..15874754c0cd3e796b76e46ee8964cd810a1eb1d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_global_api.c @@ -0,0 +1,4377 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_param_handling_i.h" +#include "wlan_crypto_obj_mgr_i.h" +#include "wlan_crypto_main.h" +#include + +const struct wlan_crypto_cipher *wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_MAX]; + +#define WPA_ADD_CIPHER_TO_SUITE(frm, cipher) \ + WLAN_CRYPTO_ADDSELECTOR(frm,\ + wlan_crypto_wpa_cipher_to_suite(cipher)) + +#define RSN_ADD_CIPHER_TO_SUITE(frm, cipher) \ + WLAN_CRYPTO_ADDSELECTOR(frm,\ + wlan_crypto_rsn_cipher_to_suite(cipher)) + +#define WPA_ADD_KEYMGMT_TO_SUITE(frm, keymgmt)\ + WLAN_CRYPTO_ADDSELECTOR(frm,\ + wlan_crypto_wpa_keymgmt_to_suite(keymgmt)) + +#define RSN_ADD_KEYMGMT_TO_SUITE(frm, keymgmt)\ + WLAN_CRYPTO_ADDSELECTOR(frm,\ + wlan_crypto_rsn_keymgmt_to_suite(keymgmt)) + +/** + * wlan_crypto_vdev_get_crypto_params - called by mlme to get crypto params + * @vdev:vdev + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +static struct wlan_crypto_params *wlan_crypto_vdev_get_comp_params( + struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_comp_priv **crypto_priv){ + *crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + if (!(*crypto_priv)) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + return &((*crypto_priv)->crypto_params); +} + +/** + * wlan_crypto_peer_get_crypto_params - called by mlme to get crypto params + * @peer:peer + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +static struct wlan_crypto_params *wlan_crypto_peer_get_comp_params( + struct wlan_objmgr_peer *peer, + struct wlan_crypto_comp_priv **crypto_priv){ + + *crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_peer_crypto_obj(peer); + if (!*crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + return &((*crypto_priv)->crypto_params); +} + +static QDF_STATUS wlan_crypto_set_igtk_key(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_set_param - called by ucfg to set crypto param + * @crypto_params: crypto_params + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_crypto_set_param(struct wlan_crypto_params *crypto_params, + wlan_crypto_param_type param, + uint32_t value){ + QDF_STATUS status = QDF_STATUS_E_INVAL; + + crypto_debug("param %d, value %d", param, value); + switch (param) { + case WLAN_CRYPTO_PARAM_AUTH_MODE: + status = wlan_crypto_set_authmode(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_UCAST_CIPHER: + status = wlan_crypto_set_ucastciphers(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_MCAST_CIPHER: + status = wlan_crypto_set_mcastcipher(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_MGMT_CIPHER: + status = wlan_crypto_set_mgmtcipher(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_CIPHER_CAP: + status = wlan_crypto_set_cipher_cap(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_RSN_CAP: + status = wlan_crypto_set_rsn_cap(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_KEY_MGMT: + status = wlan_crypto_set_key_mgmt(crypto_params, value); + break; + default: + status = QDF_STATUS_E_INVAL; + } + return status; +} + +/** + * wlan_crypto_set_vdev_param - called by ucfg to set crypto param + * @vdev: vdev + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_vdev_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param, + uint32_t value){ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &(crypto_priv->crypto_params); + + status = wlan_crypto_set_param(crypto_params, param, value); + + return status; +} + +/** + * wlan_crypto_set_param - called by ucfg to set crypto param + * + * @peer: peer + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param, + uint32_t value){ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &(crypto_priv->crypto_params); + + status = wlan_crypto_set_param(crypto_params, param, value); + + return status; +} + +/** + * wlan_crypto_get_param_value - called by crypto APIs to get value for param + * @param: Crypto param type + * @crypto_params: Crypto params struct + * + * This function gets called from in-within crypto layer + * + * Return: value or -1 for failure + */ +static int32_t wlan_crypto_get_param_value(wlan_crypto_param_type param, + struct wlan_crypto_params *crypto_params) +{ + int32_t value = -1; + + switch (param) { + case WLAN_CRYPTO_PARAM_AUTH_MODE: + value = wlan_crypto_get_authmode(crypto_params); + break; + case WLAN_CRYPTO_PARAM_UCAST_CIPHER: + value = wlan_crypto_get_ucastciphers(crypto_params); + break; + case WLAN_CRYPTO_PARAM_MCAST_CIPHER: + value = wlan_crypto_get_mcastcipher(crypto_params); + break; + case WLAN_CRYPTO_PARAM_MGMT_CIPHER: + value = wlan_crypto_get_mgmtciphers(crypto_params); + break; + case WLAN_CRYPTO_PARAM_CIPHER_CAP: + value = wlan_crypto_get_cipher_cap(crypto_params); + break; + case WLAN_CRYPTO_PARAM_RSN_CAP: + value = wlan_crypto_get_rsn_cap(crypto_params); + break; + case WLAN_CRYPTO_PARAM_KEY_MGMT: + value = wlan_crypto_get_key_mgmt(crypto_params); + break; + default: + value = QDF_STATUS_E_INVAL; + } + + return value; +} + +/** + * wlan_crypto_get_param - called to get value for param from vdev + * @vdev: vdev + * @param: Crypto param type + * + * This function gets called to get value for param from vdev + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param) +{ + int32_t value = -1; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &(crypto_priv->crypto_params); + value = wlan_crypto_get_param_value(param, crypto_params); + + return value; +} +/** + * wlan_crypto_get_peer_param - called to get value for param from peer + * @peer: peer + * @param: Crypto param type + * + * This function gets called to get value for param from peer + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param) +{ + int32_t value = -1; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + + if (!crypto_params) { + crypto_err("crypto_params NULL"); + return QDF_STATUS_E_INVAL; + } + value = wlan_crypto_get_param_value(param, crypto_params); + + return value; +} +qdf_export_symbol(wlan_crypto_get_peer_param); + +static +QDF_STATUS wlan_crypto_del_pmksa(struct wlan_crypto_params *crypto_params, + struct wlan_crypto_pmksa *pmksa); + +static +QDF_STATUS wlan_crypto_set_pmksa(struct wlan_crypto_params *crypto_params, + struct wlan_crypto_pmksa *pmksa) +{ + uint8_t i, first_available_slot = 0; + bool slot_found = false; + + /* Delete the old entry and then Add new entry */ + wlan_crypto_del_pmksa(crypto_params, pmksa); + + /* find the empty slot as duplicate is already deleted */ + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) { + slot_found = true; + first_available_slot = i; + break; + } + } + + if (i == WLAN_CRYPTO_MAX_PMKID && !slot_found) { + crypto_err("no entry available for pmksa"); + return QDF_STATUS_E_INVAL; + } + crypto_params->pmksa[first_available_slot] = pmksa; + + return QDF_STATUS_SUCCESS; +} + +static +QDF_STATUS wlan_crypto_del_pmksa(struct wlan_crypto_params *crypto_params, + struct wlan_crypto_pmksa *pmksa) +{ + uint8_t i, j; + bool match_found = false; + u8 del_pmk[MAX_PMK_LEN] = {0}; + + /* find slot with same bssid */ + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + if (qdf_is_macaddr_equal(&pmksa->bssid, + &crypto_params->pmksa[i]->bssid)) { + match_found = true; + } else if (pmksa->ssid_len && + !qdf_mem_cmp(pmksa->ssid, + crypto_params->pmksa[i]->ssid, + pmksa->ssid_len) && + !qdf_mem_cmp(pmksa->cache_id, + crypto_params->pmksa[i]->cache_id, + WLAN_CACHE_ID_LEN)) { + match_found = true; + } + + if (match_found) { + qdf_mem_copy(del_pmk, crypto_params->pmksa[i]->pmk, + crypto_params->pmksa[i]->pmk_len); + /* Free matching entry */ + qdf_mem_zero(crypto_params->pmksa[i], + sizeof(struct wlan_crypto_pmksa)); + qdf_mem_free(crypto_params->pmksa[i]); + crypto_params->pmksa[i] = NULL; + + /* Find and remove the entries matching the pmk */ + for (j = 0; j < WLAN_CRYPTO_MAX_PMKID; j++) { + if (!crypto_params->pmksa[j]) + continue; + if (crypto_params->pmksa[j]->pmk_len && + (!qdf_mem_cmp(crypto_params->pmksa[j]->pmk, + del_pmk, + crypto_params->pmksa[j]->pmk_len))) { + qdf_mem_zero(crypto_params->pmksa[j], + sizeof(struct wlan_crypto_pmksa)); + qdf_mem_free(crypto_params->pmksa[j]); + crypto_params->pmksa[j] = NULL; + } + } + /* reset stored pmk */ + qdf_mem_zero(del_pmk, MAX_PMK_LEN); + + return QDF_STATUS_SUCCESS; + } + } + + if (i == WLAN_CRYPTO_MAX_PMKID && !match_found) + crypto_debug("No such pmksa entry exists"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_crypto_pmksa_flush(struct wlan_crypto_params *crypto_params) +{ + uint8_t i; + + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + qdf_mem_zero(crypto_params->pmksa[i], + sizeof(struct wlan_crypto_pmksa)); + qdf_mem_free(crypto_params->pmksa[i]); + crypto_params->pmksa[i] = NULL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_crypto_set_del_pmksa(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_pmksa *pmksa, + bool set) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + enum QDF_OPMODE op_mode; + + op_mode = wlan_vdev_mlme_get_opmode(vdev); + + if (op_mode != QDF_STA_MODE && op_mode != QDF_SAP_MODE) + return QDF_STATUS_E_NOSUPPORT; + + if (!pmksa && set) { + crypto_err("pmksa is NULL for set operation"); + return QDF_STATUS_E_INVAL; + } + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &crypto_priv->crypto_params; + if (set) { + status = wlan_crypto_set_pmksa(crypto_params, pmksa); + /* Set pmksa */ + } else { + /* del pmksa */ + if (!pmksa) + status = wlan_crypto_pmksa_flush(crypto_params); + else + status = wlan_crypto_del_pmksa(crypto_params, pmksa); + } + + return status; +} + +struct wlan_crypto_pmksa * +wlan_crypto_get_peer_pmksa(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_pmksa *pmksa) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + uint8_t i; + + if (!pmksa) { + crypto_err("pmksa is NULL"); + return NULL; + } + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + crypto_params = &crypto_priv->crypto_params; + + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + if (qdf_is_macaddr_equal(&pmksa->bssid, + &crypto_params->pmksa[i]->bssid)) { + return crypto_params->pmksa[i]; + } else if (pmksa->ssid_len && + !qdf_mem_cmp(pmksa->ssid, + crypto_params->pmksa[i]->ssid, + pmksa->ssid_len) && + !qdf_mem_cmp(pmksa->cache_id, + crypto_params->pmksa[i]->cache_id, + WLAN_CACHE_ID_LEN)){ + return crypto_params->pmksa[i]; + } + } + + return NULL; +} + +struct wlan_crypto_pmksa * +wlan_crypto_get_pmksa(struct wlan_objmgr_vdev *vdev, struct qdf_mac_addr *bssid) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + uint8_t i; + + if (!bssid) { + crypto_err("bssid is NULL"); + return NULL; + } + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + crypto_params = &crypto_priv->crypto_params; + + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + if (qdf_is_macaddr_equal(bssid, + &crypto_params->pmksa[i]->bssid)) { + return crypto_params->pmksa[i]; + } + } + + return NULL; +} + +struct wlan_crypto_pmksa * +wlan_crypto_get_fils_pmksa(struct wlan_objmgr_vdev *vdev, + uint8_t *cache_id, uint8_t *ssid, + uint8_t ssid_len) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + uint8_t i; + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + crypto_params = &crypto_priv->crypto_params; + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + + if (!qdf_mem_cmp(cache_id, + crypto_params->pmksa[i]->cache_id, + WLAN_CACHE_ID_LEN) && + !qdf_mem_cmp(ssid, crypto_params->pmksa[i]->ssid, + ssid_len) && + ssid_len == crypto_params->pmksa[i]->ssid_len) + return crypto_params->pmksa[i]; + } + + return NULL; +} + +/** + * wlan_crypto_is_htallowed - called to check is HT allowed for cipher + * @vdev: vdev + * @peer: peer + * + * This function gets called to check is HT allowed for cipher. + * HT is not allowed for wep and tkip. + * + * Return: 0 - not allowed or 1 - allowed + */ +uint8_t wlan_crypto_is_htallowed(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + int32_t ucast_cipher; + + if (!(vdev || peer)) { + crypto_err("Invalid params"); + return 0; + } + + if (vdev) + ucast_cipher = wlan_crypto_get_param(vdev, + WLAN_CRYPTO_PARAM_UCAST_CIPHER); + else + ucast_cipher = wlan_crypto_get_peer_param(peer, + WLAN_CRYPTO_PARAM_UCAST_CIPHER); + + return (ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_WEP)) || + ((ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_TKIP)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_CCM)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_GCM)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_GCM_256)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_CCM_256))); +} +qdf_export_symbol(wlan_crypto_is_htallowed); + +/** + * wlan_crypto_setkey - called by ucfg to setkey + * @vdev: vdev + * @req_key: req_key with cipher type, key macaddress + * + * This function gets called from ucfg to sey key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_setkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key){ + + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + struct wlan_crypto_key *key = NULL; + const struct wlan_crypto_cipher *cipher; + uint8_t macaddr[QDF_MAC_ADDR_SIZE] = + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + bool isbcast; + enum QDF_OPMODE vdev_mode; + uint8_t igtk_idx = 0; + + if (!vdev || !req_key || req_key->keylen > (sizeof(req_key->keydata))) { + crypto_err("Invalid params vdev%pK, req_key%pK", vdev, req_key); + return QDF_STATUS_E_INVAL; + } + + isbcast = qdf_is_macaddr_group( + (struct qdf_mac_addr *)req_key->macaddr); + if ((req_key->keylen == 0) && !IS_FILS_CIPHER(req_key->type)) { + /* zero length keys, only set default key id if flags are set*/ + if ((req_key->flags & WLAN_CRYPTO_KEY_DEFAULT) + && (req_key->keyix != WLAN_CRYPTO_KEYIX_NONE) + && (!IS_MGMT_CIPHER(req_key->type))) { + wlan_crypto_default_key(vdev, + req_key->macaddr, + req_key->keyix, + !isbcast); + return QDF_STATUS_SUCCESS; + } + crypto_err("req_key len zero"); + return QDF_STATUS_E_INVAL; + } + + cipher = wlan_crypto_cipher_ops[req_key->type]; + + if (!cipher && !IS_MGMT_CIPHER(req_key->type)) { + crypto_err("cipher invalid"); + return QDF_STATUS_E_INVAL; + } + + if (cipher && (!IS_FILS_CIPHER(req_key->type)) && + (!IS_MGMT_CIPHER(req_key->type)) && + ((req_key->keylen != (cipher->keylen / CRYPTO_NBBY)) && + (req_key->type != WLAN_CRYPTO_CIPHER_WEP))) { + crypto_err("cipher invalid"); + return QDF_STATUS_E_INVAL; + } else if ((req_key->type == WLAN_CRYPTO_CIPHER_WEP) && + !((req_key->keylen == WLAN_CRYPTO_KEY_WEP40_LEN) + || (req_key->keylen == WLAN_CRYPTO_KEY_WEP104_LEN) + || (req_key->keylen == WLAN_CRYPTO_KEY_WEP128_LEN))) { + crypto_err("wep key len invalid. keylen: %d", req_key->keylen); + return QDF_STATUS_E_INVAL; + } + + if (req_key->keyix == WLAN_CRYPTO_KEYIX_NONE) { + if (req_key->flags != (WLAN_CRYPTO_KEY_XMIT + | WLAN_CRYPTO_KEY_RECV)) { + req_key->flags |= (WLAN_CRYPTO_KEY_XMIT + | WLAN_CRYPTO_KEY_RECV); + } + } else { + if ((req_key->keyix >= WLAN_CRYPTO_MAX_VLANKEYIX) + && (!IS_MGMT_CIPHER(req_key->type))) { + return QDF_STATUS_E_INVAL; + } + + req_key->flags |= (WLAN_CRYPTO_KEY_XMIT + | WLAN_CRYPTO_KEY_RECV); + if (isbcast) + req_key->flags |= WLAN_CRYPTO_KEY_GROUP; + } + + vdev_mode = wlan_vdev_mlme_get_opmode(vdev); + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(macaddr, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (req_key->type == WLAN_CRYPTO_CIPHER_WEP) { + if (wlan_crypto_vdev_has_auth_mode(vdev, + (1 << WLAN_CRYPTO_AUTH_8021X))) { + req_key->flags |= WLAN_CRYPTO_KEY_DEFAULT; + } + } + + if (isbcast) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + if (IS_MGMT_CIPHER(req_key->type)) { + igtk_idx = req_key->keyix - WLAN_CRYPTO_MAXKEYIDX; + if (igtk_idx >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + crypto_err("igtk key invalid keyid %d", + igtk_idx); + return QDF_STATUS_E_INVAL; + } + key = qdf_mem_malloc(sizeof(struct wlan_crypto_key)); + if (!key) + return QDF_STATUS_E_NOMEM; + + if (crypto_priv->igtk_key[igtk_idx]) + qdf_mem_free(crypto_priv->igtk_key[igtk_idx]); + + crypto_priv->igtk_key[igtk_idx] = key; + crypto_priv->igtk_key_type = req_key->type; + crypto_priv->def_igtk_tx_keyid = igtk_idx; + } else { + if (IS_FILS_CIPHER(req_key->type)) { + crypto_err("FILS key is not for BroadCast pkt"); + return QDF_STATUS_E_INVAL; + } + if (!HAS_MCAST_CIPHER(crypto_params, req_key->type) + && (req_key->type != WLAN_CRYPTO_CIPHER_WEP)) { + return QDF_STATUS_E_INVAL; + } + if (!crypto_priv->key[req_key->keyix]) { + crypto_priv->key[req_key->keyix] + = qdf_mem_malloc( + sizeof(struct wlan_crypto_key)); + if (!crypto_priv->key[req_key->keyix]) + return QDF_STATUS_E_NOMEM; + } + key = crypto_priv->key[req_key->keyix]; + } + if (vdev_mode == QDF_STA_MODE) { + peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, + WLAN_CRYPTO_ID); + if (!peer) { + crypto_err("peer NULL"); + if (IS_MGMT_CIPHER(req_key->type)) { + crypto_priv->igtk_key[igtk_idx] = NULL; + crypto_priv->igtk_key_type + = WLAN_CRYPTO_CIPHER_NONE; + } else + crypto_priv->key[req_key->keyix] = NULL; + if (key) + qdf_mem_free(key); + return QDF_STATUS_E_INVAL; + } + qdf_mem_copy(macaddr, wlan_peer_get_macaddr(peer), + QDF_MAC_ADDR_SIZE); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + } + } else { + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, + pdev_id, + macaddr, + req_key->macaddr, + WLAN_CRYPTO_ID); + + if (!peer) { + crypto_err("peer NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(macaddr, req_key->macaddr, QDF_MAC_ADDR_SIZE); + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + if (IS_MGMT_CIPHER(req_key->type)) { + igtk_idx = req_key->keyix - WLAN_CRYPTO_MAXKEYIDX; + if (igtk_idx >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + crypto_err("igtk key invalid keyid %d", + igtk_idx); + return QDF_STATUS_E_INVAL; + } + key = qdf_mem_malloc(sizeof(struct wlan_crypto_key)); + if (!key) + return QDF_STATUS_E_NOMEM; + + if (crypto_priv->igtk_key[igtk_idx]) + qdf_mem_free(crypto_priv->igtk_key[igtk_idx]); + + crypto_priv->igtk_key[igtk_idx] = key; + crypto_priv->igtk_key_type = req_key->type; + crypto_priv->def_igtk_tx_keyid = igtk_idx; + } else { + uint16_t kid = req_key->keyix; + if (kid == WLAN_CRYPTO_KEYIX_NONE) + kid = 0; + if (kid >= WLAN_CRYPTO_MAX_VLANKEYIX) { + crypto_err("invalid keyid %d", kid); + return QDF_STATUS_E_INVAL; + } + if (!crypto_priv->key[kid]) { + crypto_priv->key[kid] + = qdf_mem_malloc( + sizeof(struct wlan_crypto_key)); + if (!crypto_priv->key[kid]) + return QDF_STATUS_E_NOMEM; + } + key = crypto_priv->key[kid]; + } + } + + /* alloc key might not required as it is already there */ + key->cipher_table = (void *)cipher; + key->keylen = req_key->keylen; + key->flags = req_key->flags; + + if (req_key->keyix == WLAN_CRYPTO_KEYIX_NONE) + key->keyix = 0; + else + key->keyix = req_key->keyix; + + if (req_key->flags & WLAN_CRYPTO_KEY_DEFAULT + && (!IS_MGMT_CIPHER(req_key->type))) { + crypto_priv->def_tx_keyid = key->keyix; + key->flags |= WLAN_CRYPTO_KEY_DEFAULT; + } + if ((req_key->type == WLAN_CRYPTO_CIPHER_WAPI_SMS4) + || (req_key->type == WLAN_CRYPTO_CIPHER_WAPI_GCM4)) { + uint8_t iv_AP[16] = { 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x37}; + uint8_t iv_STA[16] = { 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36}; + + /* During Tx PN should be increment and + * send but as per our implementation we increment only after + * Tx complete. So First packet PN check will be failed. + * To compensate increment the PN here by 2 + */ + if (vdev_mode == QDF_SAP_MODE) { + iv_AP[15] += 2; + qdf_mem_copy(key->recviv, iv_STA, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(key->txiv, iv_AP, + WLAN_CRYPTO_WAPI_IV_SIZE); + } else { + iv_STA[15] += 2; + qdf_mem_copy(key->recviv, iv_AP, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(key->txiv, iv_STA, + WLAN_CRYPTO_WAPI_IV_SIZE); + } + } else { + uint8_t i = 0; + qdf_mem_copy((uint8_t *)(&key->keytsc), + (uint8_t *)(&req_key->keytsc), sizeof(key->keytsc)); + for (i = 0; i < WLAN_CRYPTO_TID_SIZE; i++) { + qdf_mem_copy((uint8_t *)(&key->keyrsc[i]), + (uint8_t *)(&req_key->keyrsc), + sizeof(key->keyrsc[0])); + } + } + + qdf_mem_copy(key->keyval, req_key->keydata, sizeof(key->keyval)); + key->valid = 1; + if ((IS_MGMT_CIPHER(req_key->type))) { + if (HAS_CIPHER_CAP(crypto_params, + WLAN_CRYPTO_CAP_PMF_OFFLOAD)) { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)(vdev, + key, macaddr, req_key->type); + } + } + wlan_crypto_set_mgmtcipher(crypto_params, req_key->type); + status = wlan_crypto_set_igtk_key(key); + return status; + } else if (IS_FILS_CIPHER(req_key->type)) { + /* Take request key object to FILS setkey */ + key->private = req_key; + } else { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)(vdev, key, + macaddr, req_key->type); + } + } + status = cipher->setkey(key); + + if ((req_key->flags & WLAN_CRYPTO_KEY_DEFAULT) && + (req_key->keyix != WLAN_CRYPTO_KEYIX_NONE) && + (!IS_MGMT_CIPHER(req_key->type))) { + /* default xmit key */ + wlan_crypto_default_key(vdev, + req_key->macaddr, + req_key->keyix, + !isbcast); + } + + return status; +} + +/** + * wlan_crypto_get_keytype - get keytype + * @key: key + * + * This function gets keytype from key + * + * Return: keytype + */ +wlan_crypto_cipher_type wlan_crypto_get_key_type( + struct wlan_crypto_key *key){ + if (key && key->cipher_table) { + return ((struct wlan_crypto_cipher *) + (key->cipher_table))->cipher; + } + return WLAN_CRYPTO_CIPHER_NONE; +} +qdf_export_symbol(wlan_crypto_get_key_type); +/** + * wlan_crypto_vdev_getkey - get key from vdev + * @vdev: vdev + * @keyix: keyix + * + * This function gets key from vdev + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_vdev_getkey(struct wlan_objmgr_vdev *vdev, + uint16_t keyix){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key = NULL; + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + /* for keyix 4,5 we return the igtk keys for keyix more than 5 + * we return the default key, for all other keyix we return the + * key accordingly. + */ + if (keyix == WLAN_CRYPTO_KEYIX_NONE || + keyix >= (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX)) + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + else if (keyix >= WLAN_CRYPTO_MAXKEYIDX) + key = crypto_priv->igtk_key[keyix - WLAN_CRYPTO_MAXKEYIDX]; + else + key = crypto_priv->key[keyix]; + + if (key && key->valid) + return key; + + return NULL; +} +qdf_export_symbol(wlan_crypto_vdev_getkey); + +/** + * wlan_crypto_peer_getkey - get key from peer + * @peer: peer + * @keyix: keyix + * + * This function gets key from peer + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_peer_getkey(struct wlan_objmgr_peer *peer, + uint16_t keyix){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key = NULL; + + crypto_params = wlan_crypto_peer_get_comp_params(peer, &crypto_priv); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + /* for keyix 4,5 we return the igtk keys for keyix more than 5 + * we return the default key, for all other keyix we return the + * key accordingly. + */ + if (keyix == WLAN_CRYPTO_KEYIX_NONE || + keyix >= (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX)) + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + else if (keyix >= WLAN_CRYPTO_MAXKEYIDX) + key = crypto_priv->igtk_key[keyix - WLAN_CRYPTO_MAXKEYIDX]; + else + key = crypto_priv->key[keyix]; + + if (key && key->valid) + return key; + + return NULL; +} +qdf_export_symbol(wlan_crypto_peer_getkey); + +/** + * wlan_crypto_getkey - called by ucfg to get key + * @vdev: vdev + * @req_key: key value will be copied in this req_key + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is requested. + * + * This function gets called from ucfg to get key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_getkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key, + uint8_t *mac_addr){ + struct wlan_crypto_cipher *cipher_table; + struct wlan_crypto_key *key; + struct wlan_objmgr_psoc *psoc; + uint8_t macaddr[QDF_MAC_ADDR_SIZE] = + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(macaddr, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)mac_addr)) { + key = wlan_crypto_vdev_getkey(vdev, req_key->keyix); + if (!key) + return QDF_STATUS_E_INVAL; + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, + pdev_id, + macaddr, + mac_addr, + WLAN_CRYPTO_ID); + if (!peer) { + crypto_err("peer NULL"); + return QDF_STATUS_E_NOENT; + } + key = wlan_crypto_peer_getkey(peer, req_key->keyix); + if (WLAN_CRYPTO_TX_OPS_GETPN(psoc) && + (req_key->flags & WLAN_CRYPTO_KEY_GET_PN)) + WLAN_CRYPTO_TX_OPS_GETPN(psoc)(vdev, mac_addr, + req_key->type); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + if (!key) + return QDF_STATUS_E_INVAL; + } + + if (key->valid) { + qdf_mem_copy(req_key->keydata, + key->keyval, key->keylen); + qdf_mem_copy((uint8_t *)(&req_key->keytsc), + (uint8_t *)(&key->keytsc), + sizeof(req_key->keytsc)); + qdf_mem_copy((uint8_t *)(&req_key->keyrsc), + (uint8_t *)(&key->keyrsc[0]), + sizeof(req_key->keyrsc)); + req_key->keylen = key->keylen; + req_key->flags = key->flags; + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + + if (!cipher_table) + return QDF_STATUS_SUCCESS; + + req_key->type = cipher_table->cipher; + if (req_key->type == WLAN_CRYPTO_CIPHER_WAPI_SMS4) { + qdf_mem_copy((uint8_t *)(&req_key->txiv), + (uint8_t *)(key->txiv), + sizeof(req_key->txiv)); + qdf_mem_copy((uint8_t *)(&req_key->recviv), + (uint8_t *)(key->recviv), + sizeof(req_key->recviv)); + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_delkey - called by ucfg to delete key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is deleted. + * @key_idx: key index to be deleted + * + * This function gets called from ucfg to delete key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_delkey(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[QDF_MAC_ADDR_SIZE]; + + if (!vdev || !macaddr || + (key_idx >= + (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX))) { + crypto_err("Invalid param vdev %pK macaddr %pK keyidx %d", + vdev, macaddr, key_idx); + return QDF_STATUS_E_INVAL; + } + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)macaddr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, + bssid_mac, + macaddr, + WLAN_CRYPTO_ID); + if (!peer) { + return QDF_STATUS_E_INVAL; + } + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + } + + if (key_idx >= WLAN_CRYPTO_MAXKEYIDX) { + uint8_t igtk_idx = key_idx - WLAN_CRYPTO_MAXKEYIDX; + if (igtk_idx >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + crypto_err("Igtk key invalid keyid %d", igtk_idx); + return QDF_STATUS_E_INVAL; + } + key = crypto_priv->igtk_key[igtk_idx]; + crypto_priv->igtk_key[igtk_idx] = NULL; + if (key) + key->valid = 0; + } else { + key = crypto_priv->key[key_idx]; + crypto_priv->key[key_idx] = NULL; + } + + if (!key) + return QDF_STATUS_E_INVAL; + + if (key->valid) { + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + qdf_mem_zero(key->keyval, sizeof(key->keyval)); + + if (!IS_FILS_CIPHER(cipher_table->cipher) && + WLAN_CRYPTO_TX_OPS_DELKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_DELKEY(psoc)(vdev, key, + macaddr, cipher_table->cipher); + } else if (IS_FILS_CIPHER(cipher_table->cipher)) { + if (key->private) + qdf_mem_free(key->private); + } + } + + /* Zero-out local key variables */ + qdf_mem_zero(key, sizeof(struct wlan_crypto_key)); + qdf_mem_free(key); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CRYPTO_SET_KEY_CONVERGED +static QDF_STATUS wlan_crypto_set_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_idx, uint8_t *macaddr) +{ + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS wlan_crypto_set_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_idx, uint8_t *macaddr) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + crypto_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + if (WLAN_CRYPTO_TX_OPS_DEFAULTKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_DEFAULTKEY(psoc)(vdev, key_idx, + macaddr); + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wlan_crypto_default_key - called by ucfg to set default tx key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key need to made default. + * @key_idx: key index to be made as default key + * @unicast: is key was unicast or group key. + * + * This function gets called from ucfg to set default key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx, + bool unicast){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[QDF_MAC_ADDR_SIZE]; + + if (!vdev || !macaddr || (key_idx >= WLAN_CRYPTO_MAXKEYIDX)) { + crypto_err("Invalid param vdev %pK macaddr %pK keyidx %d", + vdev, macaddr, key_idx); + return QDF_STATUS_E_INVAL; + } + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)macaddr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[key_idx]; + if (!key) + return QDF_STATUS_E_INVAL; + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, + bssid_mac, + macaddr, + WLAN_CRYPTO_ID); + + if (!peer) { + crypto_err("peer NULL"); + return QDF_STATUS_E_INVAL; + } + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[key_idx]; + if (!key) + return QDF_STATUS_E_INVAL; + } + if (!key->valid) + return QDF_STATUS_E_INVAL; + + if (wlan_crypto_set_default_key(vdev, key_idx, macaddr) != + QDF_STATUS_SUCCESS) + return QDF_STATUS_E_INVAL; + crypto_priv->def_tx_keyid = key_idx; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_encap - called by mgmt for encap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to encap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_encap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t encapdone){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + uint8_t bssid_mac[QDF_MAC_ADDR_SIZE]; + uint8_t pdev_id; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + /* FILS Encap required only for (Re-)Assoc response */ + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_CRYPTO_ID); + + if (!wlan_crypto_is_data_protected((uint8_t *)qdf_nbuf_data(wbuf)) && + peer && !wlan_crypto_get_peer_fils_aead(peer)) { + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + return QDF_STATUS_E_INVAL; + } + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (qdf_is_macaddr_group((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev(psoc, pdev_id, + bssid_mac, mac_addr, + WLAN_CRYPTO_ID); + + if (!peer) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->encap(key, wbuf, encapdone, + hdrlen); + + return status; +} +qdf_export_symbol(wlan_crypto_encap); + +/** + * wlan_crypto_decap - called by mgmt for decap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the frame + * + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_decap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t tid){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + uint8_t bssid_mac[QDF_MAC_ADDR_SIZE]; + uint8_t keyid; + uint8_t pdev_id; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + keyid = wlan_crypto_get_keyid((uint8_t *)qdf_nbuf_data(wbuf), hdrlen); + + if (keyid >= WLAN_CRYPTO_MAXKEYIDX) + return QDF_STATUS_E_INVAL; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + /* FILS Decap required only for (Re-)Assoc request */ + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_CRYPTO_ID); + + if (!wlan_crypto_is_data_protected((uint8_t *)qdf_nbuf_data(wbuf)) && + peer && !wlan_crypto_get_peer_fils_aead(peer)) { + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + return QDF_STATUS_E_INVAL; + } + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (qdf_is_macaddr_group((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, bssid_mac, + mac_addr, WLAN_CRYPTO_ID); + if (!peer) { + crypto_err("peer NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->decap(key, wbuf, tid, hdrlen); + + return status; +} +qdf_export_symbol(wlan_crypto_decap); +/** + * wlan_crypto_enmic - called by mgmt for adding mic in frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to adding mic to the frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_enmic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t encapdone){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[QDF_MAC_ADDR_SIZE]; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, bssid_mac, + mac_addr, WLAN_CRYPTO_ID); + if (!peer) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->enmic(key, wbuf, encapdone, hdrlen); + + return status; +} + +/** + * wlan_crypto_demic - called by mgmt for remove and check mic for + * the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the frame + * @keyid: keyid in the received frame + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_demic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t tid, + uint8_t keyid){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[QDF_MAC_ADDR_SIZE]; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), + QDF_MAC_ADDR_SIZE); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + crypto_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, bssid_mac, + mac_addr, WLAN_CRYPTO_ID); + if (!peer) { + crypto_err("peer NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->demic(key, wbuf, tid, hdrlen); + + return status; +} + +/** + * wlan_crypto_vdev_is_pmf_enabled - called to check is pmf enabled in vdev + * @vdev: vdev + * + * This function gets called to check is pmf enabled or not in vdev. + * + * Return: true or false + */ +bool wlan_crypto_vdev_is_pmf_enabled(struct wlan_objmgr_vdev *vdev) +{ + + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *vdev_crypto_params; + + if (!vdev) + return false; + vdev_crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + if ((vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED) + || (vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED)) { + return true; + } + + return false; +} + +/** + * wlan_crypto_vdev_is_pmf_required - called to check is pmf required in vdev + * @vdev: vdev + * + * This function gets called to check is pmf required or not in vdev. + * + * Return: true or false + */ +bool wlan_crypto_vdev_is_pmf_required(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *vdev_crypto_params; + + if (!vdev) + return false; + + vdev_crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + if (vdev_crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED) + return true; + + return false; +} + +/** + * wlan_crypto_is_pmf_enabled - called by mgmt txrx to check is pmf enabled + * @vdev: vdev + * @peer: peer + * + * This function gets called by mgmt txrx to check is pmf enabled or not. + * + * Return: true or false + */ +bool wlan_crypto_is_pmf_enabled(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer){ + + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *vdev_crypto_params; + struct wlan_crypto_params *peer_crypto_params; + + if (!vdev || !peer) + return false; + vdev_crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + peer_crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + if (((vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED) && + (peer_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED)) + || (vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED)) { + return true; + } + + return false; +} + +/** + * wlan_crypto_is_key_valid - called by mgmt txrx to check if key is valid + * @vdev: vdev + * @peer: peer + * @keyidx : key index + * + * This function gets called by mgmt txrx to check if key is valid + * + * Return: true or false + */ +bool wlan_crypto_is_key_valid(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer, + uint16_t keyidx) +{ + struct wlan_crypto_key *key = NULL; + + if (!vdev && !peer) + return false; + + if (peer) + key = wlan_crypto_peer_getkey(peer, keyidx); + else if (vdev) + key = wlan_crypto_vdev_getkey(vdev, keyidx); + + if ((key) && key->valid) + return true; + + return false; +} + +static void wlan_crypto_gmac_pn_swap(uint8_t *a, uint8_t *b) +{ + a[0] = b[5]; + a[1] = b[4]; + a[2] = b[3]; + a[3] = b[2]; + a[4] = b[1]; + a[5] = b[0]; +} + +/** + * wlan_crypto_add_mmie - called by mgmt txrx to add mmie in frame + * @vdev: vdev + * @bfrm: frame starting pointer + * @len: length of the frame + * + * This function gets called by mgmt txrx to add mmie in frame + * + * Return: end of frame or NULL in case failure + */ +uint8_t *wlan_crypto_add_mmie(struct wlan_objmgr_vdev *vdev, + uint8_t *bfrm, + uint32_t len) { + struct wlan_crypto_key *key; + struct wlan_crypto_mmie *mmie; + uint8_t *pn, *aad, *buf, *efrm, nounce[12]; + struct wlan_frame_hdr *hdr; + uint32_t i, hdrlen, mic_len, aad_len; + uint8_t mic[16]; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + int32_t ret = -1; + + if (!bfrm) { + crypto_err("frame is NULL"); + return NULL; + } + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + + if (crypto_priv->def_igtk_tx_keyid >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + crypto_err("igtk key invalid keyid %d", + crypto_priv->def_igtk_tx_keyid); + return NULL; + } + + key = crypto_priv->igtk_key[crypto_priv->def_igtk_tx_keyid]; + if (!key) { + crypto_err("No igtk key present"); + return NULL; + } + mic_len = (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC) ? 8 : 16; + + efrm = bfrm + len; + aad_len = 20; + hdrlen = sizeof(struct wlan_frame_hdr); + len += sizeof(struct wlan_crypto_mmie); + + mmie = (struct wlan_crypto_mmie *) efrm; + qdf_mem_zero((unsigned char *)mmie, sizeof(*mmie)); + mmie->element_id = WLAN_ELEMID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = qdf_cpu_to_le16(key->keyix); + + mic_len = (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC) ? 8 : 16; + if (mic_len == 8) { + mmie->length -= 8; + len -= 8; + } + /* PN = PN + 1 */ + pn = (uint8_t *)&key->keytsc; + + for (i = 0; i <= 5; i++) { + pn[i]++; + if (pn[i]) + break; + } + + /* Copy IPN */ + qdf_mem_copy(mmie->sequence_number, pn, 6); + + hdr = (struct wlan_frame_hdr *) bfrm; + + buf = qdf_mem_malloc(len - hdrlen + 20); + if (!buf) + return NULL; + + qdf_mem_zero(buf, len - hdrlen + 20); + aad = buf; + /* generate BIP AAD: FC(masked) || A1 || A2 || A3 */ + + /* FC type/subtype */ + aad[0] = hdr->i_fc[0]; + /* Mask FC Retry, PwrMgt, MoreData flags to zero */ + aad[1] = (hdr->i_fc[1] & ~(WLAN_FC1_RETRY | WLAN_FC1_PWRMGT + | WLAN_FC1_MOREDATA)); + /* A1 || A2 || A3 */ + qdf_mem_copy(aad + 2, hdr->i_addr1, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(aad + 8, hdr->i_addr2, QDF_MAC_ADDR_SIZE); + qdf_mem_copy(aad + 14, hdr->i_addr3, QDF_MAC_ADDR_SIZE); + qdf_mem_zero(mic, 16); + + /* + * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) + */ + + qdf_mem_copy(buf + aad_len, bfrm + hdrlen, len - hdrlen); + if (crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_CMAC) { + + ret = omac1_aes_128(key->keyval, buf, + len + aad_len - hdrlen, mic); + qdf_mem_copy(mmie->mic, mic, 8); + + } else if (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC_256) { + + ret = omac1_aes_256(key->keyval, buf, + len + aad_len - hdrlen, mmie->mic); + } else if ((crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_GMAC) + || (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { + + qdf_mem_copy(nounce, hdr->i_addr2, QDF_MAC_ADDR_SIZE); + wlan_crypto_gmac_pn_swap(nounce + 6, pn); + ret = wlan_crypto_aes_gmac(key->keyval, key->keylen, nounce, + sizeof(nounce), buf, + len + aad_len - hdrlen, mmie->mic); + } + qdf_mem_free(buf); + if (ret < 0) { + crypto_err("add mmie failed"); + return NULL; + } + + return bfrm + len; +} + +/** + * wlan_crypto_is_mmie_valid - called by mgmt txrx to check mmie of the frame + * @vdev: vdev + * @frm: frame starting pointer + * @efrm: end of frame pointer + * + * This function gets called by mgmt txrx to check mmie of the frame + * + * Return: true or false + */ +bool wlan_crypto_is_mmie_valid(struct wlan_objmgr_vdev *vdev, + uint8_t *frm, + uint8_t *efrm){ + struct wlan_crypto_mmie *mmie = NULL; + uint8_t *ipn, *aad, *buf, mic[16], nounce[12]; + struct wlan_crypto_key *key; + struct wlan_frame_hdr *hdr; + uint16_t mic_len, hdrlen, len; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + uint8_t aad_len = 20; + int32_t ret = -1; + + /* check if frame is illegal length */ + if (!frm || !efrm || (efrm < frm) + || ((efrm - frm) < sizeof(struct wlan_frame_hdr))) { + crypto_err("Invalid params"); + return false; + } + len = efrm - frm; + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return false; + } + + crypto_params = &(crypto_priv->crypto_params); + + + mic_len = (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC) ? 8 : 16; + hdrlen = sizeof(struct wlan_frame_hdr); + + if (mic_len == 8) + mmie = (struct wlan_crypto_mmie *)(efrm - sizeof(*mmie) + 8); + else + mmie = (struct wlan_crypto_mmie *)(efrm - sizeof(*mmie)); + + + /* check Elem ID*/ + if ((!mmie) || (mmie->element_id != WLAN_ELEMID_MMIE)) { + crypto_err("IE is not MMIE"); + return false; + } + + if (mmie->key_id >= (WLAN_CRYPTO_MAXKEYIDX + + WLAN_CRYPTO_MAXIGTKKEYIDX) || + (mmie->key_id < WLAN_CRYPTO_MAXKEYIDX)) { + crypto_err("keyid not valid"); + return false; + } + + key = crypto_priv->igtk_key[mmie->key_id - WLAN_CRYPTO_MAXKEYIDX]; + if (!key) { + crypto_err("No igtk key present"); + return false; + } + + /* validate ipn */ + ipn = mmie->sequence_number; + if (qdf_mem_cmp(ipn, key->keyrsc, 6) <= 0) { + uint8_t *su = (uint8_t *)key->keyrsc; + uint8_t *end = ipn + 6; + + crypto_err("replay error :"); + while (ipn < end) { + crypto_err("expected pn = %x received pn = %x", + *ipn++, *su++); + } + return false; + } + + buf = qdf_mem_malloc(len - hdrlen + 20); + if (!buf) + return false; + + aad = buf; + + /* construct AAD */ + hdr = (struct wlan_frame_hdr *)frm; + /* generate BIP AAD: FC(masked) || A1 || A2 || A3 */ + + /* FC type/subtype */ + aad[0] = hdr->i_fc[0]; + /* Mask FC Retry, PwrMgt, MoreData flags to zero */ + aad[1] = (hdr->i_fc[1] & ~(WLAN_FC1_RETRY | WLAN_FC1_PWRMGT + | WLAN_FC1_MOREDATA)); + /* A1 || A2 || A3 */ + qdf_mem_copy(aad + 2, hdr->i_addr1, 3 * QDF_MAC_ADDR_SIZE); + + /* + * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) + */ + qdf_mem_copy(buf + 20, frm + hdrlen, len - hdrlen); + qdf_mem_zero(buf + (len - hdrlen + 20 - mic_len), mic_len); + qdf_mem_zero(mic, 16); + if (crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_CMAC) { + ret = omac1_aes_128(key->keyval, buf, + len - hdrlen + aad_len, mic); + } else if (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC_256) { + ret = omac1_aes_256(key->keyval, buf, + len + aad_len - hdrlen, mic); + } else if ((crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_GMAC) + || (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { + qdf_mem_copy(nounce, hdr->i_addr2, QDF_MAC_ADDR_SIZE); + wlan_crypto_gmac_pn_swap(nounce + 6, ipn); + ret = wlan_crypto_aes_gmac(key->keyval, key->keylen, nounce, + sizeof(nounce), buf, + len + aad_len - hdrlen, mic); + } + + qdf_mem_free(buf); + + if (ret < 0) { + crypto_err("genarate mmie failed"); + return false; + } + + if (qdf_mem_cmp(mic, mmie->mic, mic_len) != 0) { + crypto_err("mmie mismatch"); + /* MMIE MIC mismatch */ + return false; + } + + /* Update the receive sequence number */ + qdf_mem_copy(key->keyrsc, ipn, 6); + crypto_debug("mmie matched"); + + return true; +} + + +static int32_t wlan_crypto_wpa_cipher_to_suite(uint32_t cipher) +{ + int32_t status = -1; + + switch (cipher) { + case WLAN_CRYPTO_CIPHER_TKIP: + return WPA_CIPHER_SUITE_TKIP; + case WLAN_CRYPTO_CIPHER_AES_CCM: + return WPA_CIPHER_SUITE_CCMP; + case WLAN_CRYPTO_CIPHER_NONE: + return WPA_CIPHER_SUITE_NONE; + } + + return status; +} + +static int32_t wlan_crypto_rsn_cipher_to_suite(uint32_t cipher) +{ + int32_t status = -1; + + switch (cipher) { + case WLAN_CRYPTO_CIPHER_TKIP: + return RSN_CIPHER_SUITE_TKIP; + case WLAN_CRYPTO_CIPHER_AES_CCM: + return RSN_CIPHER_SUITE_CCMP; + case WLAN_CRYPTO_CIPHER_AES_CCM_256: + return RSN_CIPHER_SUITE_CCMP_256; + case WLAN_CRYPTO_CIPHER_AES_GCM: + return RSN_CIPHER_SUITE_GCMP; + case WLAN_CRYPTO_CIPHER_AES_GCM_256: + return RSN_CIPHER_SUITE_GCMP_256; + case WLAN_CRYPTO_CIPHER_AES_CMAC: + return RSN_CIPHER_SUITE_AES_CMAC; + case WLAN_CRYPTO_CIPHER_AES_CMAC_256: + return RSN_CIPHER_SUITE_BIP_CMAC_256; + case WLAN_CRYPTO_CIPHER_AES_GMAC: + return RSN_CIPHER_SUITE_BIP_GMAC_128; + case WLAN_CRYPTO_CIPHER_AES_GMAC_256: + return RSN_CIPHER_SUITE_BIP_GMAC_256; + case WLAN_CRYPTO_CIPHER_NONE: + return RSN_CIPHER_SUITE_NONE; + } + + return status; +} + +/* + * Convert an RSN key management/authentication algorithm + * to an internal code. + */ +static int32_t +wlan_crypto_rsn_keymgmt_to_suite(uint32_t keymgmt) +{ + int32_t status = -1; + + switch (keymgmt) { + case WLAN_CRYPTO_KEY_MGMT_NONE: + return RSN_AUTH_KEY_MGMT_NONE; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X: + return RSN_AUTH_KEY_MGMT_UNSPEC_802_1X; + case WLAN_CRYPTO_KEY_MGMT_PSK: + return RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X; + case WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X: + return RSN_AUTH_KEY_MGMT_FT_802_1X; + case WLAN_CRYPTO_KEY_MGMT_FT_PSK: + return RSN_AUTH_KEY_MGMT_FT_PSK; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256: + return RSN_AUTH_KEY_MGMT_802_1X_SHA256; + case WLAN_CRYPTO_KEY_MGMT_PSK_SHA256: + return RSN_AUTH_KEY_MGMT_PSK_SHA256; + case WLAN_CRYPTO_KEY_MGMT_SAE: + return RSN_AUTH_KEY_MGMT_SAE; + case WLAN_CRYPTO_KEY_MGMT_FT_SAE: + return RSN_AUTH_KEY_MGMT_FT_SAE; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B: + return RSN_AUTH_KEY_MGMT_802_1X_SUITE_B; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192: + return RSN_AUTH_KEY_MGMT_802_1X_SUITE_B_192; + case WLAN_CRYPTO_KEY_MGMT_CCKM: + return RSN_AUTH_KEY_MGMT_CCKM; + case WLAN_CRYPTO_KEY_MGMT_OSEN: + return RSN_AUTH_KEY_MGMT_OSEN; + case WLAN_CRYPTO_KEY_MGMT_FILS_SHA256: + return RSN_AUTH_KEY_MGMT_FILS_SHA256; + case WLAN_CRYPTO_KEY_MGMT_FILS_SHA384: + return RSN_AUTH_KEY_MGMT_FILS_SHA384; + case WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256: + return RSN_AUTH_KEY_MGMT_FT_FILS_SHA256; + case WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384: + return RSN_AUTH_KEY_MGMT_FT_FILS_SHA384; + case WLAN_CRYPTO_KEY_MGMT_OWE: + return RSN_AUTH_KEY_MGMT_OWE; + case WLAN_CRYPTO_KEY_MGMT_DPP: + return RSN_AUTH_KEY_MGMT_DPP; + } + + return status; +} + +/* + * Convert an RSN key management/authentication algorithm + * to an internal code. + */ +static int32_t +wlan_crypto_wpa_keymgmt_to_suite(uint32_t keymgmt) +{ + int32_t status = -1; + + switch (keymgmt) { + case WLAN_CRYPTO_KEY_MGMT_NONE: + return WPA_AUTH_KEY_MGMT_NONE; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X: + return WPA_AUTH_KEY_MGMT_UNSPEC_802_1X; + case WLAN_CRYPTO_KEY_MGMT_PSK: + return WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X; + case WLAN_CRYPTO_KEY_MGMT_CCKM: + return WPA_AUTH_KEY_MGMT_CCKM; + } + + return status; +} +/** + * Convert a WPA cipher selector OUI to an internal + * cipher algorithm. Where appropriate we also + * record any key length. + */ +static int32_t wlan_crypto_wpa_suite_to_cipher(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case WPA_CIPHER_SUITE_TKIP: + return WLAN_CRYPTO_CIPHER_TKIP; + case WPA_CIPHER_SUITE_CCMP: + return WLAN_CRYPTO_CIPHER_AES_CCM; + case WPA_CIPHER_SUITE_NONE: + return WLAN_CRYPTO_CIPHER_NONE; + } + + return status; +} + +/* + * Convert a WPA key management/authentication algorithm + * to an internal code. + */ +static int32_t wlan_crypto_wpa_suite_to_keymgmt(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case WPA_AUTH_KEY_MGMT_UNSPEC_802_1X: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X; + case WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X: + return WLAN_CRYPTO_KEY_MGMT_PSK; + case WPA_AUTH_KEY_MGMT_CCKM: + return WLAN_CRYPTO_KEY_MGMT_CCKM; + case WPA_AUTH_KEY_MGMT_NONE: + return WLAN_CRYPTO_KEY_MGMT_NONE; + } + return status; +} + +/* + * Convert a RSN cipher selector OUI to an internal + * cipher algorithm. Where appropriate we also + * record any key length. + */ +static int32_t wlan_crypto_rsn_suite_to_cipher(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case RSN_CIPHER_SUITE_TKIP: + return WLAN_CRYPTO_CIPHER_TKIP; + case RSN_CIPHER_SUITE_CCMP: + return WLAN_CRYPTO_CIPHER_AES_CCM; + case RSN_CIPHER_SUITE_CCMP_256: + return WLAN_CRYPTO_CIPHER_AES_CCM_256; + case RSN_CIPHER_SUITE_GCMP: + return WLAN_CRYPTO_CIPHER_AES_GCM; + case RSN_CIPHER_SUITE_GCMP_256: + return WLAN_CRYPTO_CIPHER_AES_GCM_256; + case RSN_CIPHER_SUITE_AES_CMAC: + return WLAN_CRYPTO_CIPHER_AES_CMAC; + case RSN_CIPHER_SUITE_BIP_CMAC_256: + return WLAN_CRYPTO_CIPHER_AES_CMAC_256; + case RSN_CIPHER_SUITE_BIP_GMAC_128: + return WLAN_CRYPTO_CIPHER_AES_GMAC; + case RSN_CIPHER_SUITE_BIP_GMAC_256: + return WLAN_CRYPTO_CIPHER_AES_GMAC_256; + case RSN_CIPHER_SUITE_NONE: + return WLAN_CRYPTO_CIPHER_NONE; + } + + return status; +} +/* + * Convert an RSN key management/authentication algorithm + * to an internal code. + */ +static int32_t wlan_crypto_rsn_suite_to_keymgmt(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case RSN_AUTH_KEY_MGMT_UNSPEC_802_1X: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X; + case RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X: + return WLAN_CRYPTO_KEY_MGMT_PSK; + case RSN_AUTH_KEY_MGMT_FT_802_1X: + return WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X; + case RSN_AUTH_KEY_MGMT_FT_PSK: + return WLAN_CRYPTO_KEY_MGMT_FT_PSK; + case RSN_AUTH_KEY_MGMT_802_1X_SHA256: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256; + case RSN_AUTH_KEY_MGMT_PSK_SHA256: + return WLAN_CRYPTO_KEY_MGMT_PSK_SHA256; + case RSN_AUTH_KEY_MGMT_SAE: + return WLAN_CRYPTO_KEY_MGMT_SAE; + case RSN_AUTH_KEY_MGMT_FT_SAE: + return WLAN_CRYPTO_KEY_MGMT_FT_SAE; + case RSN_AUTH_KEY_MGMT_802_1X_SUITE_B: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B; + case RSN_AUTH_KEY_MGMT_802_1X_SUITE_B_192: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192; + case RSN_AUTH_KEY_MGMT_CCKM: + return WLAN_CRYPTO_KEY_MGMT_CCKM; + case RSN_AUTH_KEY_MGMT_OSEN: + return WLAN_CRYPTO_KEY_MGMT_OSEN; + case RSN_AUTH_KEY_MGMT_FILS_SHA256: + return WLAN_CRYPTO_KEY_MGMT_FILS_SHA256; + case RSN_AUTH_KEY_MGMT_FILS_SHA384: + return WLAN_CRYPTO_KEY_MGMT_FILS_SHA384; + case RSN_AUTH_KEY_MGMT_FT_FILS_SHA256: + return WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256; + case RSN_AUTH_KEY_MGMT_FT_FILS_SHA384: + return WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384; + case RSN_AUTH_KEY_MGMT_OWE: + return WLAN_CRYPTO_KEY_MGMT_OWE; + case RSN_AUTH_KEY_MGMT_DPP: + return WLAN_CRYPTO_KEY_MGMT_DPP; + case RSN_AUTH_KEY_MGMT_FT_802_1X_SUITE_B_384: + return WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X_SHA384; + } + + return status; +} + +/** + * wlan_crypto_wpaie_check - called by mlme to check the wpaie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wpa is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wpaie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm){ + uint8_t len = frm[1]; + int32_t w; + int n; + + /* + * Check the length once for fixed parts: OUI, type, + * version, mcast cipher, and 2 selector counts. + * Other, variable-length data, must be checked separately. + */ + SET_AUTHMODE(crypto_params, WLAN_CRYPTO_AUTH_WPA); + + if (len < 14) + return QDF_STATUS_E_INVAL; + + frm += 6, len -= 4; + + w = LE_READ_2(frm); + if (w != WPA_VERSION) + return QDF_STATUS_E_INVAL; + + frm += 2, len -= 2; + + /* multicast/group cipher */ + w = wlan_crypto_wpa_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_MCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + + /* unicast ciphers */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4+2) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_wpa_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_UCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + + if (!crypto_params->ucastcipherset) + return QDF_STATUS_E_INVAL; + + /* key management algorithms */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4) + return QDF_STATUS_E_INVAL; + + w = 0; + for (; n > 0; n--) { + w = wlan_crypto_wpa_suite_to_keymgmt(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_KEY_MGMT(crypto_params, w); + frm += 4, len -= 4; + } + + /* optional capabilities */ + if (len >= 2) { + crypto_params->rsn_caps = LE_READ_2(frm); + frm += 2, len -= 2; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_rsnie_check - called by mlme to check the rsnie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wpa is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_rsnie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm){ + uint8_t len = frm[1]; + int32_t w; + int n; + + /* Check the length once for fixed parts: OUI, type & version */ + if (len < 2) + return QDF_STATUS_E_INVAL; + + /* initialize crypto params */ + qdf_mem_zero(crypto_params, sizeof(struct wlan_crypto_params)); + + SET_AUTHMODE(crypto_params, WLAN_CRYPTO_AUTH_RSNA); + + frm += 2; + /* NB: iswapoui already validated the OUI and type */ + w = LE_READ_2(frm); + if (w != RSN_VERSION) + return QDF_STATUS_E_INVAL; + + frm += 2, len -= 2; + + if (!len) { + /* set defaults */ + /* default group cipher CCMP-128 */ + SET_MCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + /* default ucast cipher CCMP-128 */ + SET_UCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + return QDF_STATUS_SUCCESS; + } else if (len < 4) { + return QDF_STATUS_E_INVAL; + } + + /* multicast/group cipher */ + w = wlan_crypto_rsn_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + else { + SET_MCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + + if (crypto_params->mcastcipherset == 0) + return QDF_STATUS_E_INVAL; + + if (!len) { + /* default ucast cipher CCMP-128 */ + SET_UCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + return QDF_STATUS_SUCCESS; + } else if (len < 2) { + return QDF_STATUS_E_INVAL; + } + + /* unicast ciphers */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (n) { + if (len < n * 4) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_rsn_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_UCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + } else { + /* default ucast cipher CCMP-128 */ + SET_UCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + } + + if (crypto_params->ucastcipherset == 0) + return QDF_STATUS_E_INVAL; + + if (!len) { + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + return QDF_STATUS_SUCCESS; + } else if (len < 2) { + return QDF_STATUS_E_INVAL; + } + + /* key management algorithms */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + + if (n) { + if (len < n * 4) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_rsn_suite_to_keymgmt(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_KEY_MGMT(crypto_params, w); + frm += 4, len -= 4; + } + } else { + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + } + + if (crypto_params->key_mgmt == 0) + return QDF_STATUS_E_INVAL; + + /* optional capabilities */ + if (len >= 2) { + crypto_params->rsn_caps = LE_READ_2(frm); + frm += 2, len -= 2; + } else if (len && len < 2) { + return QDF_STATUS_E_INVAL; + } + + + /* PMKID */ + if (len >= 2) { + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (n && len) { + if (len >= n * PMKID_LEN) + frm += (n * PMKID_LEN), len -= (n * PMKID_LEN); + else + return QDF_STATUS_E_INVAL; + } else if (n && !len) { + return QDF_STATUS_E_INVAL; + } + /*TODO: Save pmkid in params for further reference */ + } + + /* BIP */ + if (!len && + (crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_ENABLED)) { + /* when no BIP mentioned and MFP capable use CMAC as default*/ + SET_MGMT_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CMAC); + return QDF_STATUS_SUCCESS; + } else if (len >= 4) { + w = wlan_crypto_rsn_suite_to_cipher(frm); + frm += 4, len -= 4; + SET_MGMT_CIPHER(crypto_params, w); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_build_wpaie - called by mlme to build wpaie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wpaie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wpaie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf){ + uint8_t *frm = iebuf; + uint8_t *selcnt; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + if (!frm) + return NULL; + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_params) + return NULL; + + *frm++ = WLAN_ELEMID_VENDOR; + *frm++ = 0; + WLAN_CRYPTO_ADDSELECTOR(frm, WPA_TYPE_OUI); + WLAN_CRYPTO_ADDSHORT(frm, WPA_VERSION); + + + /* multicast cipher */ + if (MCIPHER_IS_TKIP(crypto_params)) + WPA_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_TKIP); + else if (MCIPHER_IS_CCMP128(crypto_params)) + WPA_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_CCM); + + /* unicast cipher list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + /* do not use CCMP unicast cipher in WPA mode */ + if (UCIPHER_IS_CCMP128(crypto_params)) { + selcnt[0]++; + WPA_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_CCM); + } + if (UCIPHER_IS_TKIP(crypto_params)) { + selcnt[0]++; + WPA_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_TKIP); + } + + /* authenticator selector list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X)) { + selcnt[0]++; + WPA_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + } else if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_PSK)) { + selcnt[0]++; + WPA_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_PSK); + } else if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_CCKM)) { + selcnt[0]++; + WPA_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_CCKM); + } else { + selcnt[0]++; + WPA_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_NONE); + } + + /* optional capabilities */ + if (crypto_params->rsn_caps != 0 && + crypto_params->rsn_caps != WLAN_CRYPTO_RSN_CAP_PREAUTH) { + WLAN_CRYPTO_ADDSHORT(frm, crypto_params->rsn_caps); + } + + /* calculate element length */ + iebuf[1] = frm - iebuf - 2; + + return frm; +} + +uint8_t *wlan_crypto_build_rsnie_with_pmksa(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf, + struct wlan_crypto_pmksa *pmksa) +{ + uint8_t *frm = iebuf; + uint8_t *selcnt; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + if (!frm) { + return NULL; + } + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_params) { + return NULL; + } + + *frm++ = WLAN_ELEMID_RSN; + *frm++ = 0; + WLAN_CRYPTO_ADDSHORT(frm, RSN_VERSION); + + + /* multicast cipher */ + if (MCIPHER_IS_TKIP(crypto_params)) + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_TKIP); + else if (MCIPHER_IS_CCMP128(crypto_params)) + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_CCM); + else if (MCIPHER_IS_CCMP256(crypto_params)) + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_CCM_256); + else if (MCIPHER_IS_GCMP128(crypto_params)) + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_GCM); + else if (MCIPHER_IS_GCMP256(crypto_params)) + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_GCM_256); + + /* unicast cipher list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (UCIPHER_IS_CCMP256(crypto_params)) { + selcnt[0]++; + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_CCM_256); + } + if (UCIPHER_IS_GCMP256(crypto_params)) { + selcnt[0]++; + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_GCM_256); + } + if (UCIPHER_IS_CCMP128(crypto_params)) { + selcnt[0]++; + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_CCM); + } + if (UCIPHER_IS_GCMP128(crypto_params)) { + selcnt[0]++; + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_AES_GCM); + } + if (UCIPHER_IS_TKIP(crypto_params)) { + selcnt[0]++; + RSN_ADD_CIPHER_TO_SUITE(frm, WLAN_CRYPTO_CIPHER_TKIP); + } + + /* authenticator selector list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_CCKM)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_CCKM); + /* Other key mgmt should not be added after CCKM */ + goto add_rsn_caps; + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_PSK)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_PSK); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, + WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FT_PSK)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_FT_PSK); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_PSK_SHA256)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_PSK_SHA256); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_SAE)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_SAE); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FT_SAE)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_FT_SAE); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B)) { + uint32_t kmgmt = WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B; + + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, kmgmt); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192)) { + uint32_t kmgmt = WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192; + + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, kmgmt); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FILS_SHA256)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_FILS_SHA256); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FILS_SHA384)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_FILS_SHA384); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, + WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, + WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_OWE)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_OWE); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_DPP)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_DPP); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_OSEN)) { + selcnt[0]++; + RSN_ADD_KEYMGMT_TO_SUITE(frm, WLAN_CRYPTO_KEY_MGMT_OSEN); + } +add_rsn_caps: + WLAN_CRYPTO_ADDSHORT(frm, crypto_params->rsn_caps); + /* optional capabilities */ + if (crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_ENABLED) { + /* PMK list */ + if (pmksa) { + WLAN_CRYPTO_ADDSHORT(frm, 1); + qdf_mem_copy(frm, pmksa->pmkid, PMKID_LEN); + frm += PMKID_LEN; + } else { + WLAN_CRYPTO_ADDSHORT(frm, 0); + } + + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_CMAC)) { + RSN_ADD_CIPHER_TO_SUITE(frm, + WLAN_CRYPTO_CIPHER_AES_CMAC); + } + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_GMAC)) { + RSN_ADD_CIPHER_TO_SUITE(frm, + WLAN_CRYPTO_CIPHER_AES_GMAC); + } + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_CMAC_256)) { + RSN_ADD_CIPHER_TO_SUITE(frm, + WLAN_CRYPTO_CIPHER_AES_CMAC_256 + ); + } + + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { + RSN_ADD_CIPHER_TO_SUITE(frm, + WLAN_CRYPTO_CIPHER_AES_GMAC_256 + ); + } + } else { + /* PMK list */ + if (pmksa) { + WLAN_CRYPTO_ADDSHORT(frm, 1); + qdf_mem_copy(frm, pmksa->pmkid, PMKID_LEN); + frm += PMKID_LEN; + } + } + + /* calculate element length */ + iebuf[1] = frm - iebuf - 2; + + return frm; +} + +uint8_t *wlan_crypto_build_rsnie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf, + struct qdf_mac_addr *bssid) +{ + struct wlan_crypto_pmksa *pmksa = NULL; + + if (bssid) + pmksa = wlan_crypto_get_pmksa(vdev, bssid); + + return wlan_crypto_build_rsnie_with_pmksa(vdev, iebuf, pmksa); +} + +bool wlan_crypto_rsn_info(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_params *crypto_params){ + struct wlan_crypto_params *my_crypto_params; + my_crypto_params = wlan_crypto_vdev_get_crypto_params(vdev); + + if (!my_crypto_params) { + crypto_debug("vdev crypto params is NULL"); + return false; + } + /* + * Check peer's pairwise ciphers. + * At least one must match with our unicast cipher + */ + if (!UCAST_CIPHER_MATCH(crypto_params, my_crypto_params)) { + crypto_debug("Unicast cipher match failed"); + return false; + } + /* + * Check peer's group cipher is our enabled multicast cipher. + */ + if (!MCAST_CIPHER_MATCH(crypto_params, my_crypto_params)) { + crypto_debug("Multicast cipher match failed"); + return false; + } + /* + * Check peer's key management class set (PSK or UNSPEC) + */ + if (!KEY_MGMTSET_MATCH(crypto_params, my_crypto_params)) { + crypto_debug("Key mgmt match failed"); + return false; + } + if (wlan_crypto_vdev_is_pmf_required(vdev) && + !(crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_ENABLED)) { + crypto_debug("Peer is not PMF capable"); + return false; + } + if (!wlan_crypto_vdev_is_pmf_enabled(vdev) && + (crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED)) { + crypto_debug("Peer needs PMF, but vdev is not capable"); + return false; + } + + return true; +} + +/* + * Convert an WAPI CIPHER suite to to an internal code. + */ +static int32_t wlan_crypto_wapi_suite_to_cipher(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case (WLAN_WAPI_SEL(WLAN_CRYPTO_WAPI_SMS4_CIPHER)): + return WLAN_CRYPTO_CIPHER_WAPI_SMS4; + } + + return status; +} + +/* + * Convert an WAPI key management/authentication algorithm + * to an internal code. + */ +static int32_t wlan_crypto_wapi_keymgmt(u_int8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case (WLAN_WAPI_SEL(WLAN_WAI_PSK)): + return WLAN_CRYPTO_KEY_MGMT_WAPI_PSK; + case (WLAN_WAPI_SEL(WLAN_WAI_CERT_OR_SMS4)): + return WLAN_CRYPTO_KEY_MGMT_WAPI_CERT; + } + + return status; +} +/** + * wlan_crypto_wapiie_check - called by mlme to check the wapiie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wapi is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wapiie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm) +{ + uint8_t len = frm[1]; + int32_t w; + int n; + + /* + * Check the length once for fixed parts: OUI, type, + * version, mcast cipher, and 2 selector counts. + * Other, variable-length data, must be checked separately. + */ + SET_AUTHMODE(crypto_params, WLAN_CRYPTO_AUTH_WAPI); + + if (len < WLAN_CRYPTO_WAPI_IE_LEN) + return QDF_STATUS_E_INVAL; + + + frm += 2; + + w = LE_READ_2(frm); + frm += 2, len -= 2; + if (w != WAPI_VERSION) + return QDF_STATUS_E_INVAL; + + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4+2) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_wapi_keymgmt(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + + SET_KEY_MGMT(crypto_params, w); + frm += 4, len -= 4; + } + + /* unicast ciphers */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4+2) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_wapi_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_UCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + + if (!crypto_params->ucastcipherset) + return QDF_STATUS_E_INVAL; + + /* multicast/group cipher */ + w = wlan_crypto_wapi_suite_to_cipher(frm); + + if (w < 0) + return QDF_STATUS_E_INVAL; + + SET_MCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_build_wapiie - called by mlme to build wapi ie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wapi ie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wapiie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf) +{ + uint8_t *frm; + uint8_t *selcnt; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + frm = iebuf; + if (!frm) { + crypto_err("ie buffer NULL"); + return NULL; + } + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_params) { + crypto_err("crypto_params NULL"); + return NULL; + } + + *frm++ = WLAN_ELEMID_WAPI; + *frm++ = 0; + + WLAN_CRYPTO_ADDSHORT(frm, WAPI_VERSION); + + /* authenticator selector list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_WAPI_PSK)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_WAI_PSK)); + } + + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_WAPI_CERT)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_WAI_CERT_OR_SMS4)); + } + + /* unicast cipher list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (UCIPHER_IS_SMS4(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_CRYPTO_WAPI_SMS4_CIPHER)); + } + + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_CRYPTO_WAPI_SMS4_CIPHER)); + + /* optional capabilities */ + WLAN_CRYPTO_ADDSHORT(frm, crypto_params->rsn_caps); + + /* bkid count */ + if (vdev->vdev_mlme.vdev_opmode == QDF_STA_MODE || + vdev->vdev_mlme.vdev_opmode == QDF_P2P_CLIENT_MODE) + WLAN_CRYPTO_ADDSHORT(frm, 0); + + /* calculate element length */ + iebuf[1] = frm - iebuf - 2; + + return frm; + +} + +/** + * wlan_crypto_pn_check - called by data patch for PN check + * @vdev: vdev + * @wbuf: wbuf + * + * This function gets called by data patch for PN check + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_pn_check(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf){ + /* Need to check is there real requirement for this function + * as PN check is already handled in decap function. + */ + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_vdev_get_crypto_params - called by mlme to get crypto params + * @vdev:vdev + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_vdev_get_crypto_params( + struct wlan_objmgr_vdev *vdev){ + struct wlan_crypto_comp_priv *crypto_priv; + + return wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); +} + +/** + * wlan_crypto_peer_get_crypto_params - called by mlme to get crypto params + * @peer:peer + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_peer_get_crypto_params( + struct wlan_objmgr_peer *peer){ + struct wlan_crypto_comp_priv *crypto_priv; + + return wlan_crypto_peer_get_comp_params(peer, &crypto_priv); +} + + +QDF_STATUS wlan_crypto_set_peer_wep_keys(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_comp_priv *sta_crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + struct wlan_crypto_key *sta_key; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t *mac_addr; + int i; + enum QDF_OPMODE opmode; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!vdev) + return QDF_STATUS_E_NULL_VALUE; + + if (!peer) { + crypto_debug("peer NULL"); + return QDF_STATUS_E_INVAL; + } + + opmode = wlan_vdev_mlme_get_opmode(vdev); + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) { + crypto_err("psoc NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_peer_obj_lock(peer); + mac_addr = wlan_peer_get_macaddr(peer); + wlan_peer_obj_unlock(peer); + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* push only valid static WEP keys from vap */ + if (AUTH_IS_8021X(crypto_params)) + return QDF_STATUS_E_INVAL; + + if (opmode == QDF_STA_MODE) { + peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_CRYPTO_ID); + if (!peer) { + crypto_debug("peer NULL"); + return QDF_STATUS_E_INVAL; + } + } + + wlan_crypto_peer_get_comp_params(peer, &sta_crypto_priv); + if (!sta_crypto_priv) { + crypto_err("sta priv is null"); + status = QDF_STATUS_E_INVAL; + goto exit; + } + + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + if (crypto_priv->key[i]) { + key = crypto_priv->key[i]; + if (!key || !key->valid) + continue; + + cipher_table = (struct wlan_crypto_cipher *) + key->cipher_table; + + if (cipher_table->cipher == WLAN_CRYPTO_CIPHER_WEP) { + sta_key = qdf_mem_malloc( + sizeof(struct wlan_crypto_key)); + if (!sta_key) { + status = QDF_STATUS_E_NOMEM; + goto exit; + } + + sta_crypto_priv->key[i] = sta_key; + qdf_mem_copy(sta_key, key, + sizeof(struct wlan_crypto_key)); + + sta_key->flags &= ~WLAN_CRYPTO_KEY_DEFAULT; + + if (crypto_priv->def_tx_keyid == i) { + sta_key->flags + |= WLAN_CRYPTO_KEY_DEFAULT; + sta_crypto_priv->def_tx_keyid = + crypto_priv->def_tx_keyid; + } + /* setting the broadcast/multicast key for sta*/ + if (opmode == QDF_STA_MODE || + opmode == QDF_IBSS_MODE){ + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)( + vdev, sta_key, mac_addr, + cipher_table->cipher); + } + } + + /* setting unicast key */ + sta_key->flags &= ~WLAN_CRYPTO_KEY_GROUP; + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)(vdev, + sta_key, mac_addr, + cipher_table->cipher); + } + } + } + } + +exit: + if (opmode == QDF_STA_MODE) + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + return status; +} + +/** + * wlan_crypto_register_crypto_rx_ops - set crypto_rx_ops + * @crypto_rx_ops: crypto_rx_ops + * + * This function gets called by object manger to register crypto rx ops. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_register_crypto_rx_ops( + struct wlan_lmac_if_crypto_rx_ops *crypto_rx_ops){ + crypto_rx_ops->crypto_encap = wlan_crypto_encap; + crypto_rx_ops->crypto_decap = wlan_crypto_decap; + crypto_rx_ops->crypto_enmic = wlan_crypto_enmic; + crypto_rx_ops->crypto_demic = wlan_crypto_demic; + crypto_rx_ops->set_peer_wep_keys = wlan_crypto_set_peer_wep_keys; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_crypto_rx_ops - get crypto_rx_ops from psoc + * @psoc: psoc + * + * This function gets called by umac to get the crypto_rx_ops + * + * Return: crypto_rx_ops + */ +struct wlan_lmac_if_crypto_rx_ops *wlan_crypto_get_crypto_rx_ops( + struct wlan_objmgr_psoc *psoc) +{ + + return &(psoc->soc_cb.rx_ops.crypto_rx_ops); +} +qdf_export_symbol(wlan_crypto_get_crypto_rx_ops); + +/** + * wlan_crypto_vdev_has_auth_mode - check authmode for vdev + * @vdev: vdev + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_auth_mode(struct wlan_objmgr_vdev *vdev, + wlan_crypto_auth_mode authvalue) +{ + return wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_AUTH_MODE) + & authvalue; +} +qdf_export_symbol(wlan_crypto_vdev_has_auth_mode); + +/** + * wlan_crypto_peer_has_auth_mode - check authmode for peer + * @peer: peer + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_auth_mode(struct wlan_objmgr_peer *peer, + wlan_crypto_auth_mode authvalue) +{ + return wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_AUTH_MODE) + & authvalue; +} +qdf_export_symbol(wlan_crypto_peer_has_auth_mode); + +/** + * wlan_crypto_vdev_has_ucastcipher - check ucastcipher for vdev + * @vdev: vdev + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_ucastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type ucastcipher) +{ + return wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_UCAST_CIPHER) + & ucastcipher; +} +qdf_export_symbol(wlan_crypto_vdev_has_ucastcipher); + +/** + * wlan_crypto_peer_has_ucastcipher - check ucastcipher for peer + * @peer: peer + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_ucastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type ucastcipher) +{ + return wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_UCAST_CIPHER) + & ucastcipher; +} +qdf_export_symbol(wlan_crypto_peer_has_ucastcipher); + +/** + * wlan_crypto_vdev_has_mcastcipher - check mcastcipher for vdev + * @vdev: vdev + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_mcastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type mcastcipher) +{ + return wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_MCAST_CIPHER) + & mcastcipher; +} +qdf_export_symbol(wlan_crypto_vdev_has_mcastcipher); + +/** + * wlan_crypto_peer_has_mcastcipher - check mcastcipher for peer + * @peer: peer + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_mcastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type mcastcipher) +{ + return wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_MCAST_CIPHER) + & mcastcipher; +} +qdf_export_symbol(wlan_crypto_peer_has_mcastcipher); + +/** + * wlan_crypto_vdev_has_mgmtcipher - check mgmtcipher for vdev + * @vdev: vdev + * @mgmtcipher: mgmtcipher to be checked + * + * This function checks any one of mgmtciphers are supported by vdev or not. + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_mgmtcipher(struct wlan_objmgr_vdev *vdev, + uint32_t mgmtcipher) +{ + return (wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_MGMT_CIPHER) + & mgmtcipher) != 0; +} + +qdf_export_symbol(wlan_crypto_vdev_has_mgmtcipher); + +/** + * wlan_crypto_peer_has_mgmtcipher - check mgmtcipher for peer + * @peer: peer + * @mgmtcipher: mgmtcipher to be checked + * + * This function checks any one of mgmtciphers are supported by peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_mgmtcipher(struct wlan_objmgr_peer *peer, + uint32_t mgmtcipher) +{ + return (wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_MGMT_CIPHER) + & mgmtcipher) != 0; +} + +qdf_export_symbol(wlan_crypto_peer_has_mgmtcipher); + +uint8_t wlan_crypto_get_peer_fils_aead(struct wlan_objmgr_peer *peer) +{ + struct wlan_crypto_comp_priv *crypto_priv = NULL; + + if (!peer) { + crypto_err("Invalid Input"); + return 0; + } + + crypto_priv = wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return 0; + } + + return crypto_priv->fils_aead_set; +} + +void +wlan_crypto_set_peer_fils_aead(struct wlan_objmgr_peer *peer, uint8_t value) +{ + struct wlan_crypto_comp_priv *crypto_priv = NULL; + + if (!peer) { + crypto_err("Invalid Input"); + return; + } + + crypto_priv = wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + crypto_priv->fils_aead_set = value; +} + +/** + * wlan_crypto_get_key_header - get header length + * @key: key + * + * This function gets header length based on keytype + * + * Return: header length + */ +uint8_t wlan_crypto_get_key_header(struct wlan_crypto_key *key) +{ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + if (cipher_table) + return cipher_table->header; + else + return 0; +} + +qdf_export_symbol(wlan_crypto_get_key_header); + +/** + * wlan_crypto_get_key_trailer - get cipher trailer length + * @key: key + * + * This function gets cipher trailer length based on keytype + * + * Return: cipher trailer length + */ +uint8_t wlan_crypto_get_key_trailer(struct wlan_crypto_key *key) +{ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + if (cipher_table) + return cipher_table->trailer; + else + return 0; +} + +qdf_export_symbol(wlan_crypto_get_key_trailer); + +/** + * wlan_crypto_get_key_miclen - get cipher miclen length + * @key: key + * + * This function gets cipher miclen length based on keytype + * + * Return: cipher miclen length + */ +uint8_t wlan_crypto_get_key_miclen(struct wlan_crypto_key *key) +{ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + if (cipher_table) + return cipher_table->miclen; + else + return 0; +} + +qdf_export_symbol(wlan_crypto_get_key_miclen); + +/** + * wlan_crypto_get_keyid - get keyid from frame + * @data: frame + * + * This function parse frame and returns keyid + * + * Return: keyid + */ +uint16_t wlan_crypto_get_keyid(uint8_t *data, int hdrlen) +{ + struct wlan_frame_hdr *hdr = (struct wlan_frame_hdr *)data; + uint8_t *iv; + uint8_t stype = WLAN_FC0_GET_STYPE(hdr->i_fc[0]); + + /* + * In FILS SK (Re)Association request/response frame has + * to be decrypted + */ + if ((stype == WLAN_FC0_STYPE_ASSOC_REQ) || + (stype == WLAN_FC0_STYPE_REASSOC_REQ) || + (stype == WLAN_FC0_STYPE_ASSOC_RESP) || + (stype == WLAN_FC0_STYPE_REASSOC_RESP)) { + return 0; + } + + if (hdr->i_fc[1] & WLAN_FC1_ISWEP) { + iv = data + hdrlen; + /* + * iv[3] is the Key ID octet in the CCMP/TKIP/WEP headers + * Bits 6–7 of the Key ID octet are for the Key ID subfield + */ + return ((iv[3] >> 6) & 0x3); + } else { + return WLAN_CRYPTO_KEYIX_NONE; + } +} + +qdf_export_symbol(wlan_crypto_get_keyid); + +/** + * crypto_plumb_peer_keys - called during radio reset + * @vdev: vdev + * @object: peer + * @arg: psoc + * + * Restore unicast and persta hardware keys + * + * Return: void + */ +static void crypto_plumb_peer_keys(struct wlan_objmgr_vdev *vdev, + void *object, void *arg) { + struct wlan_objmgr_peer *peer = (struct wlan_objmgr_peer *)object; + struct wlan_objmgr_psoc *psoc = (struct wlan_objmgr_psoc *)arg; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key = NULL; + int i; + + if ((!peer) || (!vdev) || (!psoc)) { + crypto_err("Peer or vdev or psoc objects are null!"); + return; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + key = crypto_priv->key[i]; + if (key && key->valid) { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc) + ( + vdev, + key, + wlan_peer_get_macaddr(peer), + wlan_crypto_get_key_type(key) + ); + } + } + } +} + +/** + * wlan_crypto_restore_keys - called during radio reset + * @vdev: vdev + * + * Clear and restore keycache, needed for some DA chipsets which put + * random values in keycache when phy reset is triggered + * + * Return: void + */ +void wlan_crypto_restore_keys(struct wlan_objmgr_vdev *vdev) +{ + int i; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + uint8_t macaddr[QDF_MAC_ADDR_SIZE] = + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + psoc = wlan_vdev_get_psoc(vdev); + if (!pdev) { + crypto_err("pdev is NULL"); + return; + } + if (!psoc) { + crypto_err("psoc is NULL"); + return; + } + + /* TBD: QWRAP key restore*/ + /* crypto is on */ + if (wlan_vdev_mlme_feat_cap_get(vdev, WLAN_VDEV_F_PRIVACY)) { + /* restore static shared keys */ + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + crypto_params = wlan_crypto_vdev_get_comp_params + ( + vdev, + &crypto_priv + ); + if (!crypto_priv) { + crypto_err("crypto_priv is NULL"); + return; + } + key = crypto_priv->key[i]; + if (key && key->valid) { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc) + ( + vdev, + key, + macaddr, + wlan_crypto_get_key_type(key) + ); + } + } + } + + wlan_objmgr_iterate_peerobj_list(vdev, + crypto_plumb_peer_keys, + psoc, + WLAN_CRYPTO_ID); + } +} + +/** + * wlan_crypto_check_open_none - called by ucfg to check for open security + * @psoc: psoc pointer + * @vdev_id: vdev id + * + * This function gets called from ucfg to check open security. + * + * Return: true or false + */ +bool wlan_crypto_check_open_none(struct wlan_objmgr_psoc *psoc, uint8_t vdev_id) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_objmgr_vdev *vdev; + bool match = true; + + if (!psoc) { + crypto_err("PSOC is NULL"); + return false; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CRYPTO_ID); + if (!vdev) { + crypto_err("vdev is NULL"); + return false; + } + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + match = false; + goto send_res; + } + + crypto_params = &crypto_priv->crypto_params; + + if (crypto_params->mcastcipherset != WLAN_CRYPTO_CIPHER_NONE) { + match = false; + goto send_res; + } + + if ((crypto_params->authmodeset != WLAN_CRYPTO_AUTH_AUTO) && + (crypto_params->authmodeset != WLAN_CRYPTO_AUTH_NONE)) + match = false; + +send_res: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CRYPTO_ID); + + return match; +} + +/** + * wlan_crypto_check_wep - called by ucfg to check for WEP security + * @psoc: psoc pointer + * @vdev_id: vdev id + * + * This function gets called from ucfg to check WEP security. + * + * Return: true or false + */ +bool wlan_crypto_check_wep(struct wlan_objmgr_psoc *psoc, uint8_t vdev_id) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_objmgr_vdev *vdev; + bool match = true; + + if (!psoc) { + crypto_err("PSOC is NULL"); + return false; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CRYPTO_ID); + if (!vdev) { + crypto_err("vdev is NULL"); + return false; + } + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + match = false; + goto send_res; + } + + crypto_params = &crypto_priv->crypto_params; + + if ((crypto_params->ucastcipherset != WLAN_CRYPTO_CIPHER_WEP) && + (crypto_params->ucastcipherset != WLAN_CRYPTO_CIPHER_WEP_40) && + (crypto_params->ucastcipherset != WLAN_CRYPTO_CIPHER_WEP_104)) { + match = false; + goto send_res; + } + if ((crypto_params->mcastcipherset != WLAN_CRYPTO_CIPHER_WEP) && + (crypto_params->mcastcipherset != WLAN_CRYPTO_CIPHER_WEP_40) && + (crypto_params->mcastcipherset != WLAN_CRYPTO_CIPHER_WEP_104)) { + match = false; + goto send_res; + } + if (crypto_params->ucastcipherset != crypto_params->mcastcipherset) { + match = false; + goto send_res; + } + if ((crypto_params->authmodeset != WLAN_CRYPTO_AUTH_AUTO) && + (crypto_params->authmodeset != WLAN_CRYPTO_AUTH_OPEN) && + (crypto_params->authmodeset != WLAN_CRYPTO_AUTH_SHARED)) { + match = false; + } +send_res: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CRYPTO_ID); + + return match; +} + +static QDF_STATUS +wlan_get_crypto_params_from_rsn_ie(struct wlan_crypto_params *crypto_params, + uint8_t *ie_ptr, uint16_t ie_len) +{ + const uint8_t *rsn_ie = NULL; + QDF_STATUS status; + + qdf_mem_zero(crypto_params, sizeof(struct wlan_crypto_params)); + rsn_ie = wlan_get_ie_ptr_from_eid(WLAN_ELEMID_RSN, ie_ptr, ie_len); + if (!rsn_ie) { + crypto_debug("RSN IE not present"); + return QDF_STATUS_E_INVAL; + } + + status = wlan_crypto_rsnie_check(crypto_params, (uint8_t *)rsn_ie); + if (QDF_STATUS_SUCCESS != status) { + crypto_err("RSN IE check failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_get_crypto_params_from_wpa_ie(struct wlan_crypto_params *crypto_params, + uint8_t *ie_ptr, uint16_t ie_len) +{ + const uint8_t *wpa_ie = NULL; + uint32_t wpa_oui; + QDF_STATUS status; + + qdf_mem_zero(crypto_params, sizeof(struct wlan_crypto_params)); + + wpa_oui = WLAN_WPA_SEL(WLAN_WPA_OUI_TYPE); + wpa_ie = wlan_get_vendor_ie_ptr_from_oui((uint8_t *)&wpa_oui, + WLAN_OUI_SIZE, ie_ptr, ie_len); + if (!wpa_ie) { + crypto_debug("WPA IE not present"); + return QDF_STATUS_E_INVAL; + } + + status = wlan_crypto_wpaie_check(crypto_params, (uint8_t *)wpa_ie); + if (QDF_STATUS_SUCCESS != status) { + crypto_err("WPA IE check failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} +/** + * wlan_crypto_check_rsn_match - called by ucfg to check for RSN match + * @psoc: psoc pointer + * @vdev_id: vdev id + * @ie_ptr: pointer to IEs + * @ie_len: IE length + * @peer_crypto_params: return peer crypto parameters + * + * This function gets called from ucfg to check RSN match. + * + * Return: true or false + */ +bool wlan_crypto_check_rsn_match(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *ie_ptr, + uint16_t ie_len, struct wlan_crypto_params * + peer_crypto_params) +{ + struct wlan_objmgr_vdev *vdev; + bool match = true; + QDF_STATUS status; + + if (!psoc) { + crypto_err("PSOC is NULL"); + return false; + } + status = wlan_get_crypto_params_from_rsn_ie(peer_crypto_params, + ie_ptr, ie_len); + if (QDF_STATUS_SUCCESS != status) { + crypto_err("get crypto prarams from RSN IE failed"); + return false; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CRYPTO_ID); + if (!vdev) { + crypto_err("vdev is NULL"); + return false; + } + + match = wlan_crypto_rsn_info(vdev, peer_crypto_params); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_CRYPTO_ID); + + return match; +} + +/** + * wlan_crypto_check_wpa_match - called by ucfg to check for WPA match + * @psoc: psoc pointer + * @vdev_id: vdev id + * @ie_ptr: pointer to IEs + * @ie_len: IE length + * @peer_crypto_params: return peer crypto parameters + * + * This function gets called from ucfg to check WPA match. + * + * Return: true or false + */ +bool wlan_crypto_check_wpa_match(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *ie_ptr, + uint16_t ie_len, struct wlan_crypto_params * + peer_crypto_params) +{ + struct wlan_objmgr_vdev *vdev; + bool match = true; + QDF_STATUS status; + + if (!psoc) { + crypto_err("PSOC is NULL"); + return false; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CRYPTO_ID); + if (!vdev) { + crypto_err("vdev is NULL"); + return false; + } + + status = wlan_get_crypto_params_from_wpa_ie(peer_crypto_params, + ie_ptr, ie_len); + if (QDF_STATUS_SUCCESS != status) { + crypto_err("get crypto prarams from WPA IE failed"); + match = false; + goto send_res; + } + match = wlan_crypto_rsn_info(vdev, peer_crypto_params); + +send_res: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CRYPTO_ID); + + return match; +} + + +static void +wlan_crypto_merge_prarams(struct wlan_crypto_params *dst_params, + struct wlan_crypto_params *src_params) +{ + dst_params->authmodeset |= src_params->authmodeset; + dst_params->ucastcipherset |= src_params->ucastcipherset; + dst_params->mcastcipherset |= src_params->mcastcipherset; + dst_params->mgmtcipherset |= src_params->mgmtcipherset; + dst_params->cipher_caps |= src_params->cipher_caps; + dst_params->key_mgmt |= src_params->key_mgmt; + dst_params->rsn_caps |= src_params->rsn_caps; +} + +static void +wlan_crypto_reset_prarams(struct wlan_crypto_params *params) +{ + params->authmodeset = 0; + params->ucastcipherset = 0; + params->mcastcipherset = 0; + params->mgmtcipherset = 0; + params->key_mgmt = 0; + params->rsn_caps = 0; +} + +uint8_t * +wlan_crypto_parse_rsnxe_ie(uint8_t *rsnxe_ie, uint8_t *cap_len) +{ + uint8_t len; + uint8_t *ie; + + if (!rsnxe_ie) + return NULL; + + ie = rsnxe_ie; + len = ie[1]; + ie += 2; + + if (!len) + return NULL; + + *cap_len = ie[0] & 0xf; + + return ie; +} + +QDF_STATUS wlan_set_vdev_crypto_prarams_from_ie(struct wlan_objmgr_vdev *vdev, + uint8_t *ie_ptr, + uint16_t ie_len) +{ + struct wlan_crypto_params crypto_params; + QDF_STATUS status; + struct wlan_crypto_params *vdev_crypto_params; + struct wlan_crypto_comp_priv *crypto_priv; + bool send_fail = false; + + if (!vdev) { + crypto_err("VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!ie_ptr) { + crypto_err("IE ptr is NULL"); + return QDF_STATUS_E_FAILURE; + } + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_crypto_params = &crypto_priv->crypto_params; + + wlan_crypto_reset_prarams(vdev_crypto_params); + status = wlan_get_crypto_params_from_rsn_ie(&crypto_params, + ie_ptr, ie_len); + if (QDF_IS_STATUS_SUCCESS(status)) + wlan_crypto_merge_prarams(vdev_crypto_params, &crypto_params); + else + send_fail = true; + + status = wlan_get_crypto_params_from_wpa_ie(&crypto_params, + ie_ptr, ie_len); + if (QDF_IS_STATUS_SUCCESS(status)) { + wlan_crypto_merge_prarams(vdev_crypto_params, &crypto_params); + send_fail = false; + } + + return send_fail ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; +} + +int8_t wlan_crypto_get_default_key_idx(struct wlan_objmgr_vdev *vdev, bool igtk) +{ + struct wlan_crypto_comp_priv *crypto_priv; + + crypto_priv = wlan_get_vdev_crypto_obj(vdev); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (igtk) + return crypto_priv->def_igtk_tx_keyid; + else + return crypto_priv->def_tx_keyid; +} + +enum wlan_crypto_cipher_type +wlan_crypto_get_cipher(struct wlan_objmgr_vdev *vdev, + bool pairwise, uint8_t key_index) +{ + struct wlan_crypto_key *crypto_key; + + crypto_key = wlan_crypto_get_key(vdev, key_index); + + if (crypto_key) + return crypto_key->cipher_type; + else + return WLAN_CRYPTO_CIPHER_INVALID; +} + +#ifdef CRYPTO_SET_KEY_CONVERGED +QDF_STATUS wlan_crypto_validate_key_params(enum wlan_crypto_cipher_type cipher, + uint8_t key_index, uint8_t key_len, + uint8_t seq_len) +{ + if (key_index >= (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX)) { + crypto_err("Invalid Key index %d", key_index); + return QDF_STATUS_E_INVAL; + } + if (cipher == WLAN_CRYPTO_CIPHER_INVALID) { + crypto_err("Invalid Cipher %d", cipher); + return QDF_STATUS_E_INVAL; + } + if ((!(cipher == WLAN_CRYPTO_CIPHER_AES_CMAC || + cipher == WLAN_CRYPTO_CIPHER_AES_CMAC_256 || + cipher == WLAN_CRYPTO_CIPHER_AES_GMAC || + cipher == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) && + (key_index >= WLAN_CRYPTO_MAXKEYIDX)) { + crypto_err("Invalid key index %d for cipher %d", + key_index, cipher); + return QDF_STATUS_E_INVAL; + } + if (key_len > (WLAN_CRYPTO_KEYBUF_SIZE + WLAN_CRYPTO_MICBUF_SIZE)) { + crypto_err("Invalid key length %d", key_len); + return QDF_STATUS_E_INVAL; + } + + if (seq_len > WLAN_CRYPTO_RSC_SIZE) { + crypto_err("Invalid seq length %d", seq_len); + return QDF_STATUS_E_INVAL; + } + + crypto_debug("key: idx:%d, len:%d, seq len:%d", + key_index, key_len, seq_len); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_crypto_save_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index, + struct wlan_crypto_key *crypto_key) +{ + struct wlan_crypto_comp_priv *crypto_priv; + + crypto_priv = wlan_get_vdev_crypto_obj(vdev); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_FAILURE; + } + if (key_index >= (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX)) { + crypto_err("Invalid Key index %d", key_index); + return QDF_STATUS_E_FAILURE; + } + if (key_index < WLAN_CRYPTO_MAXKEYIDX) { + crypto_priv->key[key_index] = crypto_key; + } else { + crypto_priv->igtk_key[key_index - WLAN_CRYPTO_MAXKEYIDX] = + crypto_key; + crypto_priv->def_igtk_tx_keyid = + key_index - WLAN_CRYPTO_MAXKEYIDX; + crypto_priv->igtk_key_type = crypto_key->cipher_type; + } + + return QDF_STATUS_SUCCESS; +} + +struct wlan_crypto_key *wlan_crypto_get_key(struct wlan_objmgr_vdev *vdev, + uint8_t key_index) +{ + struct wlan_crypto_comp_priv *crypto_priv; + + crypto_priv = wlan_get_vdev_crypto_obj(vdev); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return NULL; + } + if (key_index >= (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX)) { + crypto_err("Invalid Key index %d", key_index); + return NULL; + } + if (key_index < WLAN_CRYPTO_MAXKEYIDX) + return crypto_priv->key[key_index]; + + return crypto_priv->igtk_key[key_index - WLAN_CRYPTO_MAXKEYIDX]; +} + +QDF_STATUS wlan_crypto_set_key_req(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + if (psoc && WLAN_CRYPTO_TX_OPS_SET_KEY(psoc)) + WLAN_CRYPTO_TX_OPS_SET_KEY(psoc)(vdev, req, key_type); + else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +void wlan_crypto_update_set_key_peer(struct wlan_objmgr_vdev *vdev, + bool pairwise, uint8_t key_index, + struct qdf_mac_addr *peer_mac) +{ + struct wlan_crypto_key *crypto_key; + + crypto_key = wlan_crypto_get_key(vdev, key_index); + if (!crypto_key) { + crypto_err("crypto_key not present for key_idx %d", key_index); + return; + } + + qdf_mem_copy(crypto_key->macaddr, peer_mac, QDF_MAC_ADDR_SIZE); +} + +#if defined(WLAN_SAE_SINGLE_PMK) && defined(WLAN_FEATURE_ROAM_OFFLOAD) +void wlan_crypto_selective_clear_sae_single_pmk_entries( + struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *conn_bssid) +{ + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_comp_priv *crypto_priv; + int i; + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + crypto_params = &crypto_priv->crypto_params; + + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + + if (crypto_params->pmksa[i]->single_pmk_supported && + !qdf_is_macaddr_equal(conn_bssid, + &crypto_params->pmksa[i]->bssid)) { + qdf_mem_zero(crypto_params->pmksa[i], + sizeof(struct wlan_crypto_pmksa)); + qdf_mem_free(crypto_params->pmksa[i]); + crypto_params->pmksa[i] = NULL; + } + } +} + +void wlan_crypto_set_sae_single_pmk_bss_cap(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *bssid, + bool single_pmk_capable_bss) +{ + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_comp_priv *crypto_priv; + int i; + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + crypto_params = &crypto_priv->crypto_params; + + for (i = 0; i < WLAN_CRYPTO_MAX_PMKID; i++) { + if (!crypto_params->pmksa[i]) + continue; + + if (qdf_is_macaddr_equal(bssid, + &crypto_params->pmksa[i]->bssid)) + crypto_params->pmksa[i]->single_pmk_supported = + single_pmk_capable_bss; + } +} +#endif + +void wlan_crypto_reset_vdev_params(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_crypto_comp_priv *crypto_priv; + + crypto_debug("reset params for vdev %d", wlan_vdev_get_id(vdev)); + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + wlan_crypto_reset_prarams(&crypto_priv->crypto_params); +} + +QDF_STATUS wlan_crypto_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + if (psoc && WLAN_CRYPTO_TX_OPS_REGISTER_EVENTS(psoc)) + return WLAN_CRYPTO_TX_OPS_REGISTER_EVENTS(psoc)(psoc); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_crypto_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + if (psoc && WLAN_CRYPTO_TX_OPS_DEREGISTER_EVENTS(psoc)) + return WLAN_CRYPTO_TX_OPS_DEREGISTER_EVENTS(psoc)(psoc); + + return QDF_STATUS_E_FAILURE; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main.c new file mode 100644 index 0000000000000000000000000000000000000000..2ae5adca43547cf752067a9b920661808d318a2d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_main.h" + +/** + * wlan_crypto_init - Init the crypto service with object manager + * Called from umac init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_init(void) +{ + return __wlan_crypto_init(); +} + +/** + * wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from umac deinit context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_deinit(void) +{ + return __wlan_crypto_deinit(); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..49aa80a19bba9d9086d4ddbc17503e939326e08f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main_i.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private API for crypto service with object manager handler + */ +#ifndef _WLAN_CRYPTO_MAIN_I_H_ +#define _WLAN_CRYPTO_MAIN_I_H_ + +/** + * wlan_crypto_init - Init the crypto service with object manager + * Called from umac init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_init(void); + +/** + * wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from umac deinit context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_deinit(void); + + +#endif /* end of _WLAN_CRYPTO_MAIN_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..91d163b7ff44c5ffd0e0531d2674fbfddaddf281 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" +#ifdef WLAN_CRYPTO_SUPPORT_FILS +#include "wlan_crypto_fils_api.h" +#endif + + +extern const struct wlan_crypto_cipher + *wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_MAX]; + +static QDF_STATUS wlan_crypto_register_all_ciphers( + struct wlan_crypto_params *crypto_param) +{ + + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WEP)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_WEP] + = wep_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_TKIP_MIC)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_TKIP] + = tkip_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_AES)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_CCM] + = ccmp_register(); + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_CCM_256] + = ccmp256_register(); + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_GCM] + = gcmp_register(); + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_GCM_256] + = gcmp256_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WAPI_SMS4)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_WAPI_SMS4] + = wapi_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_FILS_AEAD)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_FILS_AEAD] + = fils_register(); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_crypto_vdev_obj_create_handler( + struct wlan_objmgr_vdev *vdev, + void *arg) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_objmgr_pdev *pdev; + struct wlan_crypto_params *crypto_param; + QDF_STATUS status; + + if (!vdev) + return QDF_STATUS_E_INVAL; + + crypto_priv = qdf_mem_malloc(sizeof(struct wlan_crypto_comp_priv)); + if (!crypto_priv) + return QDF_STATUS_E_NOMEM; + + crypto_param = &(crypto_priv->crypto_params); + + RESET_AUTHMODE(crypto_param); + RESET_UCAST_CIPHERS(crypto_param); + RESET_MCAST_CIPHERS(crypto_param); + RESET_MGMT_CIPHERS(crypto_param); + RESET_KEY_MGMT(crypto_param); + RESET_CIPHER_CAP(crypto_param); + + pdev = wlan_vdev_get_pdev(vdev); + wlan_pdev_obj_lock(pdev); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_WEP)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WEP); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_TKIP)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_TKIP_MIC); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_AES)) { + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_AES); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_CCM256); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_GCM); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_GCM_256); + } + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_CKIP)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_CKIP); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_WAPI)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WAPI_SMS4); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_FILS_AEAD); + wlan_pdev_obj_unlock(pdev); + /* update the crypto cipher table based on the fw caps*/ + /* update the fw_caps into ciphercaps then attach to objmgr*/ + wlan_crypto_register_all_ciphers(crypto_param); + + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_CRYPTO, + (void *)crypto_priv, + QDF_STATUS_SUCCESS); + if (status != QDF_STATUS_SUCCESS) + qdf_mem_free(crypto_priv); + + return status; +} + +static QDF_STATUS wlan_crypto_peer_obj_create_handler( + struct wlan_objmgr_peer *peer, + void *arg) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_param; + QDF_STATUS status; + + if (!peer) + return QDF_STATUS_E_INVAL; + + crypto_priv = qdf_mem_malloc(sizeof(struct wlan_crypto_comp_priv)); + if (!crypto_priv) + return QDF_STATUS_E_NOMEM; + + status = wlan_objmgr_peer_component_obj_attach(peer, + WLAN_UMAC_COMP_CRYPTO, (void *)crypto_priv, + QDF_STATUS_SUCCESS); + + if (status == QDF_STATUS_SUCCESS) { + crypto_param = &crypto_priv->crypto_params; + RESET_AUTHMODE(crypto_param); + RESET_UCAST_CIPHERS(crypto_param); + RESET_MCAST_CIPHERS(crypto_param); + RESET_MGMT_CIPHERS(crypto_param); + RESET_KEY_MGMT(crypto_param); + RESET_CIPHER_CAP(crypto_param); + if (wlan_vdev_get_selfpeer(peer->peer_objmgr.vdev) != peer) { + wlan_crypto_set_peer_wep_keys( + wlan_peer_get_vdev(peer), peer); + } + } else { + crypto_err("peer obj failed status %d", status); + qdf_mem_free(crypto_priv); + } + + return status; +} + +static void wlan_crypto_free_key(struct wlan_crypto_comp_priv *crypto_priv) +{ + uint8_t i; + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + if (crypto_priv->key[i]) { + qdf_mem_free(crypto_priv->key[i]); + crypto_priv->key[i] = NULL; + } + } + + for (i = 0; i < WLAN_CRYPTO_MAXIGTKKEYIDX; i++) { + if (crypto_priv->igtk_key[i]) { + qdf_mem_free(crypto_priv->igtk_key[i]); + crypto_priv->igtk_key[i] = NULL; + } + } + +} + +#ifdef CRYPTO_SET_KEY_CONVERGED +void wlan_crypto_free_vdev_key(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_crypto_comp_priv *crypto_priv; + + crypto_debug("free key for vdev %d", wlan_vdev_get_id(vdev)); + crypto_priv = wlan_get_vdev_crypto_obj(vdev); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return; + } + + wlan_crypto_free_key(crypto_priv); +} +#endif + +static QDF_STATUS wlan_crypto_vdev_obj_destroy_handler( + struct wlan_objmgr_vdev *vdev, + void *arg){ + struct wlan_crypto_comp_priv *crypto_priv; + + if (!vdev) { + crypto_err("Vdev NULL"); + return QDF_STATUS_E_INVAL; + } + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_CRYPTO, + (void *)crypto_priv); + + wlan_crypto_pmksa_flush(&crypto_priv->crypto_params); + wlan_crypto_free_key(crypto_priv); + qdf_mem_free(crypto_priv); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_crypto_peer_obj_destroy_handler( + struct wlan_objmgr_peer *peer, + void *arg){ + struct wlan_crypto_comp_priv *crypto_priv; + + if (!peer) { + crypto_err("Peer NULL"); + return QDF_STATUS_E_INVAL; + } + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + crypto_err("crypto_priv NULL"); + return QDF_STATUS_E_INVAL; + } + + wlan_objmgr_peer_component_obj_detach(peer, + WLAN_UMAC_COMP_CRYPTO, + (void *)crypto_priv); + wlan_crypto_free_key(crypto_priv); + qdf_mem_free(crypto_priv); + + return QDF_STATUS_SUCCESS; +} +/** + * __wlan_crypto_init - Init the crypto service with object manager + * Called from crypto init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_vdev_create_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + return status; + + status = wlan_objmgr_register_peer_create_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_peer_create; + + status = wlan_objmgr_register_vdev_destroy_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_vdev_delete; + + status = wlan_objmgr_register_peer_destroy_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_peer_delete; + + goto register_success; +err_peer_delete: + wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_destroy_handler, NULL); +err_vdev_delete: + wlan_objmgr_unregister_peer_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_create_handler, NULL); +err_peer_create: + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_create_handler, NULL); + +register_success: + return status; +} + +/** + * __wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from crypto context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_deinit(void) +{ + + if (wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_peer_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_peer_destroy_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr_i.h new file mode 100644 index 0000000000000000000000000000000000000000..f10d50d55e654a8bb148d6b7c6446d556357a889 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr_i.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ + +#ifndef __WLAN_CRYPTO_OBJ_MGR_I_ +#define __WLAN_CRYPTO_OBJ_MGR_I_ + +#ifdef WLAN_CRYPTO_WEP_OS_DERIVATIVE +static inline const struct wlan_crypto_cipher *wep_register(void) +{ + return NULL; +} +#else +const struct wlan_crypto_cipher *wep_register(void); +#endif + +#ifdef WLAN_CRYPTO_TKIP_OS_DERIVATIVE +static inline const struct wlan_crypto_cipher *tkip_register(void) +{ + return NULL; +} +#else +const struct wlan_crypto_cipher *tkip_register(void); +#endif + +#ifdef WLAN_CRYPTO_CCMP_OS_DERIVATIVE +static inline const struct wlan_crypto_cipher *ccmp_register(void) +{ + return NULL; +} + +static inline const struct wlan_crypto_cipher *ccmp256_register(void) +{ + return NULL; +} +#else +const struct wlan_crypto_cipher *ccmp_register(void); +const struct wlan_crypto_cipher *ccmp256_register(void); +#endif + +#ifdef WLAN_CRYPTO_GCMP_OS_DERIVATIVE +static inline const struct wlan_crypto_cipher *gcmp_register(void) +{ + return NULL; +} + +static inline const struct wlan_crypto_cipher *gcmp256_register(void) +{ + return NULL; +} +#else +const struct wlan_crypto_cipher *gcmp_register(void); +const struct wlan_crypto_cipher *gcmp256_register(void); +#endif + +#ifdef WLAN_CRYPTO_WAPI_OS_DERIVATIVE +static inline const struct wlan_crypto_cipher *wapi_register(void) +{ + return NULL; +} +#else +const struct wlan_crypto_cipher *wapi_register(void); +#endif + +#ifdef WLAN_CRYPTO_FILS_OS_DERIVATIVE +static inline const struct wlan_crypto_cipher *fils_register(void) +{ + return NULL; +} +#else +/** + * fils_register() - Register all callback functions to Crypto manager + * + * This function is invoked from crypto object manager to register + * FILS specific callbacks. + * + * Return: Pointer to wlan_crypto_cipher Object + */ +const struct wlan_crypto_cipher *fils_register(void); +#endif + + +static inline void *wlan_get_vdev_crypto_obj(struct wlan_objmgr_vdev *vdev) +{ + void *crypto_priv; + crypto_priv = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_CRYPTO); + + return crypto_priv; +} + +static inline void *wlan_get_peer_crypto_obj(struct wlan_objmgr_peer *peer) +{ + void *crypto_priv; + crypto_priv = wlan_objmgr_peer_get_comp_private_obj(peer, + WLAN_UMAC_COMP_CRYPTO); + + return crypto_priv; +} +#endif /* end of __WLAN_CRYPTO_OBJ_MGR_I_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling.c new file mode 100644 index 0000000000000000000000000000000000000000..33d24db046d315ba770476665ba9db9ba40e6a9b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling.c @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +/* include files */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_param_handling_i.h" + +static uint32_t +cipher2cap(int cipher) +{ + switch (cipher) { + case WLAN_CRYPTO_CIPHER_WEP: return WLAN_CRYPTO_CAP_WEP; + case WLAN_CRYPTO_CIPHER_WEP_40: return WLAN_CRYPTO_CAP_WEP; + case WLAN_CRYPTO_CIPHER_WEP_104: return WLAN_CRYPTO_CAP_WEP; + case WLAN_CRYPTO_CIPHER_AES_OCB: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_CCM: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_CCM_256: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_GCM: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_GCM_256: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_CKIP: return WLAN_CRYPTO_CAP_CKIP; + case WLAN_CRYPTO_CIPHER_TKIP: return WLAN_CRYPTO_CAP_TKIP_MIC; + case WLAN_CRYPTO_CIPHER_WAPI_SMS4: return WLAN_CRYPTO_CAP_WAPI_SMS4; + case WLAN_CRYPTO_CIPHER_WAPI_GCM4: return WLAN_CRYPTO_CAP_WAPI_GCM4; + case WLAN_CRYPTO_CIPHER_FILS_AEAD: return WLAN_CRYPTO_CAP_FILS_AEAD; + } + return 0; +} + +/** + * wlan_crypto_set_authmode - called by ucfg to configure authmode for vdev + * @vdev: vdev + * @authmode: authmode + * + * This function gets called from ucfg to configure authmode for vdev. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_authmode(struct wlan_crypto_params *crypto_params, + uint32_t authmode) +{ + crypto_params->authmodeset = authmode; + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_authmode - called by ucfg to get authmode of particular vdev + * @vdev: vdev + * + * This function gets called from ucfg to get authmode of particular vdev + * + * Return: authmode + */ +int32_t wlan_crypto_get_authmode(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->authmodeset; +} + +/** + * wlan_crypto_set_mcastcipher - called by ucfg to configure mcastcipher in vdev + * @vdev: vdev + * @wlan_crypto_cipher_type: mcast cipher value. + * + * This function gets called from ucfg to configure mcastcipher in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mcastcipher(struct wlan_crypto_params *crypto_params, + wlan_crypto_cipher_type cipher) +{ + uint16_t i; + uint32_t cap; + QDF_STATUS status = QDF_STATUS_E_INVAL; + + RESET_MCAST_CIPHERS(crypto_params); + + for (i = 0; i < WLAN_CRYPTO_CIPHER_MAX; i++) { + if (HAS_PARAM(cipher, i)) { + cap = cipher2cap(i); + if (cap && HAS_CIPHER_CAP(crypto_params, cap)) { + SET_MCAST_CIPHER(crypto_params, i); + status = QDF_STATUS_SUCCESS; + } + } + CLEAR_PARAM(cipher, i); + } + return status; +} +/** + * wlan_crypto_get_mcastcipher - called by ucfg to get mcastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get mcastcipher of particular vdev + * + * Return: mcast cipher + */ +int32_t wlan_crypto_get_mcastcipher(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->mcastcipherset; +} + +/** + * wlan_crypto_set_ucastciphers - called by ucfg to configure + * unicast ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_ucastciphers( + struct wlan_crypto_params *crypto_params, + uint32_t cipher) +{ + uint16_t i; + uint32_t cap; + QDF_STATUS status = QDF_STATUS_E_INVAL; + + RESET_UCAST_CIPHERS(crypto_params); + + for (i = 0; i < WLAN_CRYPTO_CIPHER_MAX ; i++) { + if (HAS_PARAM(cipher, i)) { + cap = cipher2cap(i); + if (cap && HAS_CIPHER_CAP(crypto_params, cap)) { + SET_UCAST_CIPHER(crypto_params, i); + status = QDF_STATUS_SUCCESS; + } + } + CLEAR_PARAM(cipher, i); + } + + return status; +} + +/** + * wlan_crypto_get_ucastciphers - called by ucfg to get ucastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_ucastciphers(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->ucastcipherset; +} + +/** + * wlan_crypto_set_mgmtcipher - called by ucfg to configure + * mgmt ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mgmtcipher( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + SET_MGMT_CIPHER(crypto_params, value); + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_mgmtciphers - called by ucfg to get mgmtcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_mgmtciphers(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->mgmtcipherset; +} + +/** + * wlan_crypto_set_cipher_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_cipher_cap( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + crypto_params->cipher_caps = value; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_cipher_cap - called by ucfg to get cipher caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_cipher_cap(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->cipher_caps; +} + +/** + * wlan_crypto_set_rsn_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_rsn_cap( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + crypto_params->rsn_caps = value; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_rsn_cap - called by ucfg to get rsn caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_rsn_cap(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->rsn_caps; +} + + +/** + * wlan_crypto_set_key_mgmt - called by ucfg to configure + * key_mgmt in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_key_mgmt( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + crypto_params->key_mgmt = value; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_key_mgmt - called by ucfg to get key mgmt from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_key_mgmt(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->key_mgmt; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling_i.h new file mode 100644 index 0000000000000000000000000000000000000000..d6198b4b2aa1256e4806791eeec21589a23f7a7f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling_i.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +/* include files */ +#ifndef __WLAN_CRYPTO_PARAM_HANDLING_I_H_ +#define __WLAN_CRYPTO_PARAM_HANDLING_I_H_ +/** + * wlan_crypto_set_authmode - called by ucfg to configure authmode for vdev + * @vdev: vdev + * @authmode: authmode + * + * This function gets called from ucfg to configure authmode for vdev. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_authmode(struct wlan_crypto_params *crypto_params, + uint32_t authmode); + +/** + * wlan_crypto_get_authmode - called by ucfg to get authmode of particular vdev + * @vdev: vdev + * + * This function gets called from ucfg to get authmode of particular vdev + * + * Return: authmode + */ +int32_t wlan_crypto_get_authmode(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_mcastcipher - called by ucfg to configure mcastcipher in vdev + * @vdev: vdev + * @wlan_crypto_cipher_type: mcast cipher value. + * + * This function gets called from ucfg to configure mcastcipher in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mcastcipher(struct wlan_crypto_params *crypto_params, + wlan_crypto_cipher_type cipher); +/** + * wlan_crypto_get_mcastcipher - called by ucfg to get mcastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get mcastcipher of particular vdev + * + * Return: mcast cipher + */ +int32_t wlan_crypto_get_mcastcipher(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_ucastciphers - called by ucfg to configure + * unicast ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_ucastciphers(struct wlan_crypto_params *, + uint32_t ciphers); +/** + * wlan_crypto_get_ucastciphers - called by ucfg to get ucastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_ucastciphers(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_mgmtcipher - called by ucfg to configure + * mgmt ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mgmtcipher(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_mgmtciphers - called by ucfg to get mgmtcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_mgmtciphers(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_cipher_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_cipher_cap(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_cipher_cap - called by ucfg to get cipher caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_cipher_cap(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_rsn_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_rsn_cap(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_rsn_cap - called by ucfg to get rsn caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_rsn_cap(struct wlan_crypto_params *crypto_params); + + +/** + * wlan_crypto_set_key_mgmt - called by ucfg to configure + * key_mgmt in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_key_mgmt(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_key_mgmt - called by ucfg to get key mgmt from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_key_mgmt(struct wlan_crypto_params *crypto_params); +#endif /* __WLAN_CRYPTO_PARAM_HANDLING_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8e6e64a0c7303b04750834306fdd1c35340fa227 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ucfg_api.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains crypto north bound interface definitions + */ + +#include +#include +#include +#include +QDF_STATUS ucfg_crypto_set_key_req(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *req, + enum wlan_crypto_key_type key_type) +{ + /* + * It is the job of dispatcher to decide whether the + * request has to be sent to scheduler or should be + * sent as a offload request or process directly. + * + * Current requirement is to process set key request + * as run to completion without posting any messages. + * Hence the request handler is directly called from + * here. + */ + return wlan_crypto_set_key_req(vdev, req, key_type); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/inc/wlan_cmn.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/inc/wlan_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..1a346c79c10a9fc19fe414ff00386a6d65cc6122 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/inc/wlan_cmn.h @@ -0,0 +1,697 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: This file provides the common definitions for object manager + */ + +#ifndef _WLAN_CMN_H_ +#define _WLAN_CMN_H_ + +#include + +/* Max no of UMAC components */ +#define WLAN_UMAC_MAX_COMPONENTS WLAN_UMAC_COMP_ID_MAX + +/* Max no. of radios, a pSoc/Device can support */ +#ifdef WLAN_MAX_PDEVS +#define WLAN_UMAC_MAX_PDEVS WLAN_MAX_PDEVS +#else +#define WLAN_UMAC_MAX_PDEVS 3 +#endif + +/* Max no. of VDEV per PSOC */ +#ifdef WLAN_PSOC_MAX_VDEVS +#define WLAN_UMAC_PSOC_MAX_VDEVS WLAN_PSOC_MAX_VDEVS +#else +#define WLAN_UMAC_PSOC_MAX_VDEVS 51 +#endif + +/* Max no. of VDEVs, a PDEV can support */ +#ifdef WLAN_PDEV_MAX_VDEVS +#define WLAN_UMAC_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS +#else +#define WLAN_UMAC_PDEV_MAX_VDEVS 17 +#endif + +/* Max no. of Peers, a device can support */ +#define WLAN_UMAC_PSOC_MAX_PEERS (1536 + WLAN_UMAC_PSOC_MAX_VDEVS) + +/* Max no. of Temporary Peers, a pdev can support */ +#define WLAN_MAX_PDEV_TEMP_PEERS 128 + +/* Max no. of Temporary Peers, a psoc can support */ +#define WLAN_MAX_PSOC_TEMP_PEERS \ + (WLAN_MAX_PDEV_TEMP_PEERS * WLAN_UMAC_MAX_PDEVS) + +/* Max length of a SSID */ +#define WLAN_SSID_MAX_LEN 32 + +#define WLAN_CACHE_ID_LEN 2 + +/* Max sequence number */ +#define WLAN_MAX_SEQ_NUM 4096 + +/* Max no. of peers for STA vap */ +#define WLAN_UMAC_MAX_STA_PEERS 2 + +/* Max vdev_id */ +#define WLAN_UMAC_VDEV_ID_MAX 0xFF + +/* Invalid pdev_id */ +#define WLAN_INVALID_PDEV_ID 0xFFFFFFFF + +/* Invalid free descriptor count */ +#define WLAN_INVALID_MGMT_DESC_COUNT 0xFFFFFFFF + +/* 802.11 cap info */ +#define WLAN_CAPINFO_ESS 0x0001 +#define WLAN_CAPINFO_IBSS 0x0002 +#define WLAN_CAPINFO_CF_POLLABLE 0x0004 +#define WLAN_CAPINFO_CF_POLLREQ 0x0008 +#define WLAN_CAPINFO_PRIVACY 0x0010 +#define WLAN_CAPINFO_SHORT_PREAMBLE 0x0020 +#define WLAN_CAPINFO_PBCC 0x0040 +#define WLAN_CAPINFO_CHNL_AGILITY 0x0080 +#define WLAN_CAPINFO_SPECTRUM_MGMT 0x0100 +#define WLAN_CAPINFO_QOS 0x0200 +#define WLAN_CAPINFO_SHORT_SLOTTIME 0x0400 +#define WLAN_CAPINFO_APSD 0x0800 +#define WLAN_CAPINFO_RADIOMEAS 0x1000 +#define WLAN_CAPINFO_DSSSOFDM 0x2000 + +/* Allowed time to wait for Object creation */ +#define WLAN_VDEV_CREATE_TIMEOUT_CNT 300 +/* 25 msec */ +#define WLAN_VDEV_CREATE_TIMEOUT 25 + +#define WLAN_PDEV_CREATE_TIMEOUT_CNT 300 +/* 25 msec */ +#define WLAN_PDEV_CREATE_TIMEOUT 25 + +#define WLAN_PSOC_CREATE_TIMEOUT_CNT 300 +/* 25 msec */ +#define WLAN_PSOC_CREATE_TIMEOUT 25 +#define WLAN_24_GHZ_BASE_FREQ (2407) +#define WLAN_5_GHZ_BASE_FREQ (5000) +#define WLAN_24_GHZ_CHANNEL_6 (6) +#define WLAN_24_GHZ_CHANNEL_14 (14) +#define WLAN_24_GHZ_CHANNEL_15 (15) +#define WLAN_24_GHZ_CHANNEL_27 (27) +#define WLAN_5_GHZ_CHANNEL_170 (170) +#define WLAN_CHAN_SPACING_5MHZ (5) +#define WLAN_CHAN_SPACING_20MHZ (20) +#define WLAN_CHAN_14_FREQ (2484) +#define WLAN_CHAN_15_FREQ (2512) +#define WLAN_CHAN_170_FREQ (5852) + +#define WLAN_MAC_EID_VENDOR 221 +#define WLAN_MAC_EID_EXT 255 + +/* VHT capability flags */ +/* B0-B1 Maximum MPDU Length */ +/* A-MSDU Length 3839 octets */ +#define WLAN_VHTCAP_MAX_MPDU_LEN_3839 0x00000000 + /* A-MSDU Length 7991 octets */ +#define WLAN_VHTCAP_MAX_MPDU_LEN_7935 0x00000001 +/* A-MSDU Length 11454 octets */ +#define WLAN_VHTCAP_MAX_MPDU_LEN_11454 0x00000002 + +/* B2-B3 Supported Channel Width */ +/* Does not support 160 or 80+80 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_80 0x00000000 +/* Supports 160 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_160 0x00000004 +/* Support both 160 or 80+80 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160 0x00000008 +/* B2-B3 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_S 2 +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_MASK 0x0000000C +/* B4 RX LDPC */ +#define WLAN_VHTCAP_RX_LDPC 0x00000010 +/* B5 Short GI for 80MHz */ +#define WLAN_VHTCAP_SHORTGI_80 0x00000020 +/* B6 Short GI for 160 and 80+80 MHz */ +#define WLAN_VHTCAP_SHORTGI_160 0x00000040 +/* B7 Tx STBC */ +#define WLAN_VHTCAP_TX_STBC 0x00000080 +#define WLAN_VHTCAP_TX_STBC_S 7 +/* B8-B10 Rx STBC */ +#define WLAN_VHTCAP_RX_STBC 0x00000700 +#define WLAN_VHTCAP_RX_STBC_S 8 +/* B11 SU Beam former capable */ +#define WLAN_VHTCAP_SU_BFORMER 0x00000800 +#define WLAN_VHTCAP_SU_BFORMER_S 11 +/* B12 SU Beam formee capable */ +#define WLAN_VHTCAP_SU_BFORMEE 0x00001000 +#define WLAN_VHTCAP_SU_BFORMEE_S 12 + +/* B13-B15 Compressed steering number of beacomformer Antennas supported */ +#define WLAN_VHTCAP_BF_MAX_ANT 0x0000E000 +#define WLAN_VHTCAP_BF_MAX_ANT_S 13 +/* B13-B15 Beamformee STS Capability */ +#define WLAN_VHTCAP_STS_CAP_S 13 +#define WLAN_VHTCAP_STS_CAP_M 0x7 + +/* B16-B18 Sounding Dimensions */ +#define WLAN_VHTCAP_SOUND_DIM 0x00070000 +#define WLAN_VHTCAP_SOUND_DIM_S 16 +/* B19 MU Beam Former */ +#define WLAN_VHTCAP_MU_BFORMER 0x00080000 +#define WLAN_VHTCAP_MU_BFORMER_S 19 +/* B20 MU Beam Formee */ +#define WLAN_VHTCAP_MU_BFORMEE 0x00100000 +#define WLAN_VHTCAP_MU_BFORMEE_S 20 +/* B21 VHT TXOP PS */ +#define WLAN_VHTCAP_TXOP_PS 0x00200000 +/* B22 +HTC-VHT capable */ +#define WLAN_VHTCAP_PLUS_HTC_VHT 0x00400000 + +#define WLAN_VHTCAP_MAX_AMPDU_LEN_FACTOR 13 +/* B23-B25 maximum AMPDU Length Exponent */ +#define WLAN_VHTCAP_MAX_AMPDU_LEN_EXP 0x03800000 +#define WLAN_VHTCAP_MAX_AMPDU_LEN_EXP_S 23 +/* B26-B27 VHT Link Adaptation capable */ +#define WLAN_VHTCAP_LINK_ADAPT 0x0C000000 +/* Rx Antenna Pattern Consistency Supported */ +#define WLAN_VHTCAP_RX_ANTENNA_PATTERN 0x10000000 +/* Tx Antenna Pattern Consistency Supported */ +#define WLAN_VHTCAP_TX_ANTENNA_PATTERN 0x20000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT 0x00000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_1 0x40000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_2 0x80000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3 0xC0000000 +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_S 30 +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_MASK 0xC0000000 + +#define WLAN_VHTCAP_EXT_NSS_MASK (WLAN_VHTCAP_SUP_CHAN_WIDTH_MASK |\ + WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_MASK) +/* VHTCAP combinations of "supported channel width" and "ext nss support" + * which determine the NSS value supported by STA for <=80 MHz, 160 MHz + * and 80+80 MHz. The macros to be read as combination of + * "supported channel width" and "ext nss support" followed by NSS for 80MHz, + * 160MHz and 80+80MHz defined as a function of Max VHT NSS supported. + * Ex: WLAN_EXTNSS_MAP_01_80F1_160FDOT5_80P80NONE - To be reas as + * supported channel width = 0 + * ext nss support = 1 + * NSS value for <=80MHz = max_vht_nss * 1 + * NSS value for 160MHz = max_vht_nss * (.5) + * NSS value for 80+80MHz = not supported + */ +#define WLAN_EXTNSS_MAP_00_80F1_160NONE_80P80NONE \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT) +#define WLAN_EXTNSS_MAP_01_80F1_160FDOT5_80P80NONE \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_1) +#define WLAN_EXTNSS_MAP_02_80F1_160FDOT5_80P80FDOT5 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_2) +#define WLAN_EXTNSS_MAP_03_80F1_160FDOT75_80P80FDOT75 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3) +#define WLAN_EXTNSS_MAP_10_80F1_160F1_80P80NONE \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT) +#define WLAN_EXTNSS_MAP_11_80F1_160F1_80P80FDOT5 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_1) +#define WLAN_EXTNSS_MAP_12_80F1_160F1_80P80FDOT75 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_2) +#define WLAN_EXTNSS_MAP_13_80F2_160F2_80P80F1 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3) +#define WLAN_EXTNSS_MAP_20_80F1_160F1_80P80F1 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160 | WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT) +#define WLAN_EXTNSS_MAP_23_80F2_160F1_80P80F1 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3) + + +/** + * enum wlan_umac_comp_id - UMAC component id + * @WLAN_UMAC_COMP_MLME: MLME + * @WLAN_UMAC_COMP_MGMT_TXRX: MGMT Tx/Rx + * @WLAN_UMAC_COMP_SERIALIZATION: Serialization + * @WLAN_UMAC_COMP_SCAN: SCAN - as scan module uses services provided by + * MLME, MGMT_TXRX and SERIALIZATION, SCAN module + * must be initializes after above modules. + * @WLAN_UMAC_COMP_PMO: PMO component + * @WLAN_UMAC_COMP_P2P: P2P + * @WLAN_UMAC_COMP_POLICY_MGR: Policy Manager + * @WLAN_UMAC_COMP_CONFIG: Configuration + * @WLAN_UMAC_COMP_WIFI_POS: WIFI Positioning + * @WLAN_UMAC_COMP_TDLS: TDLS + * @WLAN_UMAC_COMP_ATF: Airtime Fairness + * @WLAN_UMAC_COMP_SA_API: Smart Antenna API + * @WLAN_UMAC_COMP_REGULATORY: REGULATORY + * @WLAN_UMAC_COMP_CRYPTO: CRYPTO + * @WLAN_UMAC_COMP_NAN: Neighbor Aware Networking + * @WLAN_UMAC_COMP_DFS: DFS + * @WLAN_UMAC_COMP_SPECTRAL: Spectral + * @WLAN_UMAC_COMP_OFFCHAN_TXRX: Offchan TxRx + * @WLAN_UMAC_COMP_SON: SON + * @WLAN_UMAC_COMP_SPECTRAL: Spectral + * @WLAN_UMAC_COMP_SPLITMAC: SplitMAC + * @WLAN_UMAC_COMP_DISA: DISA encryption test + * @WLAN_UMAC_COMP_GREEN_AP: Green AP + * @WLAN_UMAC_COMP_FTM: FTM component + * @WLAN_UMAC_COMP_FD: FILS Discovery + * @WLAN_UMAC_COMP_OCB: OCB + * @WLAN_UMAC_COMP_IPA: IPA + * @WLAN_UMAC_COMP_CP_STATS: Control Plane Statistics + * @WLAN_UMAC_COMP_ACTION_OUI: ACTION OUI + * @WLAN_UMAC_COMP_FWOL FW Offload + * @WLAN_UMAC_COMP_INTEROP_ISSUES_AP interop issues ap component + * @WLAN_UMAC_COMP_BLACKLIST_MGR: Blacklist mgr component + * @WLAN_UMAC_COMP_COEX: Coex config component + * @WLAN_UMAC_COMP_FTM_TIME_SYNC: WLAN FTM TIMESYNC + * @WLAN_UMAC_COMP_PKT_CAPTURE: Packet capture component + * @WLAN_UMAC_COMP_GPIO: GPIO Configuration + * @WLAN_UMAC_COMP_ID_MAX: Maximum components in UMAC + * + * This id is static. + * On Adding new component, new id has to be assigned + */ +enum wlan_umac_comp_id { + WLAN_UMAC_COMP_MLME = 0, + WLAN_UMAC_COMP_MGMT_TXRX = 1, + WLAN_UMAC_COMP_SERIALIZATION = 2, + WLAN_UMAC_COMP_SCAN = 3, + WLAN_UMAC_COMP_PMO = 4, + WLAN_UMAC_COMP_P2P = 5, + WLAN_UMAC_COMP_POLICY_MGR = 6, + WLAN_UMAC_COMP_CONFIG = 7, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX = 8, + WLAN_UMAC_COMP_WIFI_POS = 9, + WLAN_UMAC_COMP_TDLS = 10, + WLAN_UMAC_COMP_ATF = 11, + WLAN_UMAC_COMP_SA_API = 12, + WLAN_UMAC_COMP_REGULATORY = 13, + WLAN_UMAC_COMP_CRYPTO = 14, + WLAN_UMAC_COMP_NAN = 15, + WLAN_UMAC_COMP_DFS = 16, + WLAN_UMAC_COMP_OFFCHAN_TXRX = 17, + WLAN_UMAC_COMP_SON = 18, + WLAN_UMAC_COMP_SPECTRAL = 19, + WLAN_UMAC_COMP_SPLITMAC = 20, + WLAN_UMAC_COMP_DISA = 21, + WLAN_UMAC_COMP_GREEN_AP = 22, + WLAN_UMAC_COMP_FTM = 23, + WLAN_UMAC_COMP_FD = 24, + WLAN_UMAC_COMP_OCB = 25, + WLAN_UMAC_COMP_IPA = 26, + WLAN_UMAC_COMP_CP_STATS = 27, + WLAN_UMAC_COMP_ACTION_OUI = 28, + WLAN_UMAC_COMP_FWOL = 29, + WLAN_UMAC_COMP_CFR = 30, + WLAN_UMAC_COMP_INTEROP_ISSUES_AP = 31, + WLAN_UMAC_COMP_BLACKLIST_MGR = 32, + WLAN_UMAC_COMP_COEX = 33, + WLAN_UMAC_COMP_FTM_TIME_SYNC = 34, + WLAN_UMAC_COMP_PKT_CAPTURE = 35, + WLAN_UMAC_COMP_GPIO = 39, + WLAN_UMAC_COMP_ID_MAX, +}; + +/** + * enum WLAN_DEV_TYPE - for DA or OL architecture types + * @WLAN_DEV_DA: Direct attach + * @WLAN_DEV_OL: Partial offload + * @WLAN_DEV_INVALID: Invalid dev type + */ +typedef enum { + WLAN_DEV_DA = 0, + WLAN_DEV_OL = 1, + WLAN_DEV_INVALID = 3, +} WLAN_DEV_TYPE; + +/** + * enum wlan_phymode - phy mode + * @WLAN_PHYMODE_AUTO: autoselect + * @WLAN_PHYMODE_11A: 5GHz, OFDM + * @WLAN_PHYMODE_11B: 2GHz, CCK + * @WLAN_PHYMODE_11G: 2GHz, OFDM + * @WLAN_PHYMODE_11G_ONLY: 2GHz only + * @WLAN_PHYMODE_11NA_HT20: 5Ghz, HT20 + * @WLAN_PHYMODE_11NG_HT20: 2Ghz, HT20 + * @WLAN_PHYMODE_11NA_HT40: 5Ghz, Auto HT40 + * @WLAN_PHYMODE_11NG_HT40PLUS: 2Ghz, HT40 (ext ch +1) + * @WLAN_PHYMODE_11NG_HT40MINUS: 2Ghz, HT40 (ext ch -1) + * @WLAN_PHYMODE_11NG_HT40: 2Ghz, Auto HT40 + * @WLAN_PHYMODE_11AC_VHT20: 5Ghz, VHT20 + * @WLAN_PHYMODE_11AC_VHT20_2G: 2Ghz, VHT20 + * @WLAN_PHYMODE_11AC_VHT40: 5Ghz, VHT40 + * @WLAN_PHYMODE_11AC_VHT40PLUS_2G: 2Ghz, VHT40 (ext ch +1) + * @WLAN_PHYMODE_11AC_VHT40MINUS_2G: 2Ghz, VHT40 (ext ch -1) + * @WLAN_PHYMODE_11AC_VHT40_2G: 2Ghz, VHT40 + * @WLAN_PHYMODE_11AC_VHT80: 5Ghz, VHT80 + * @WLAN_PHYMODE_11AC_VHT80_2G: 2Ghz, VHT80 + * @WLAN_PHYMODE_11AC_VHT160: 5Ghz, VHT160 + * @WLAN_PHYMODE_11AC_VHT80_80: 5Ghz, VHT80_80 + * @WLAN_PHYMODE_11AXA_HE20: 5GHz, HE20 + * @WLAN_PHYMODE_11AXG_HE20: 2GHz, HE20 + * @WLAN_PHYMODE_11AXA_HE40: 5GHz, HE40 + * @WLAN_PHYMODE_11AXG_HE40PLUS: 2GHz, HE40 (ext ch +1) + * @WLAN_PHYMODE_11AXG_HE40MINUS:2GHz, HE40 (ext ch -1) + * @WLAN_PHYMODE_11AXG_HE40: 2GHz, HE40 + * @WLAN_PHYMODE_11AXA_HE80: 5GHz, HE80 + * @WLAN_PHYMODE_11AXG_HE80: 2GHz, HE80 + * @WLAN_PHYMODE_11AXA_HE160: 5GHz, HE160 + * @WLAN_PHYMODE_11AXA_HE80_80: 5GHz, HE80_80 + * @WLAN_PHYMODE_MAX: Max phymode + */ +enum wlan_phymode { + WLAN_PHYMODE_AUTO = 0, + WLAN_PHYMODE_11A = 1, + WLAN_PHYMODE_11B = 2, + WLAN_PHYMODE_11G = 3, + WLAN_PHYMODE_11G_ONLY = 4, + WLAN_PHYMODE_11NA_HT20 = 5, + WLAN_PHYMODE_11NG_HT20 = 6, + WLAN_PHYMODE_11NA_HT40 = 7, + WLAN_PHYMODE_11NG_HT40PLUS = 8, + WLAN_PHYMODE_11NG_HT40MINUS = 9, + WLAN_PHYMODE_11NG_HT40 = 10, + WLAN_PHYMODE_11AC_VHT20 = 11, + WLAN_PHYMODE_11AC_VHT20_2G = 12, + WLAN_PHYMODE_11AC_VHT40 = 13, + WLAN_PHYMODE_11AC_VHT40PLUS_2G = 14, + WLAN_PHYMODE_11AC_VHT40MINUS_2G = 15, + WLAN_PHYMODE_11AC_VHT40_2G = 16, + WLAN_PHYMODE_11AC_VHT80 = 17, + WLAN_PHYMODE_11AC_VHT80_2G = 18, + WLAN_PHYMODE_11AC_VHT160 = 19, + WLAN_PHYMODE_11AC_VHT80_80 = 20, + WLAN_PHYMODE_11AXA_HE20 = 21, + WLAN_PHYMODE_11AXG_HE20 = 22, + WLAN_PHYMODE_11AXA_HE40 = 23, + WLAN_PHYMODE_11AXG_HE40PLUS = 24, + WLAN_PHYMODE_11AXG_HE40MINUS = 25, + WLAN_PHYMODE_11AXG_HE40 = 26, + WLAN_PHYMODE_11AXA_HE80 = 27, + WLAN_PHYMODE_11AXG_HE80 = 28, + WLAN_PHYMODE_11AXA_HE160 = 29, + WLAN_PHYMODE_11AXA_HE80_80 = 30, + WLAN_PHYMODE_MAX +}; + +#define IS_WLAN_PHYMODE_160MHZ(_mode) ({typeof(_mode) mode = (_mode); \ + ((mode) == WLAN_PHYMODE_11AC_VHT80_80) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT160) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE80_80) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE160); }) + +#define IS_WLAN_PHYMODE_80MHZ(_mode) ({typeof(_mode) mode = (_mode); \ + ((mode) == WLAN_PHYMODE_11AC_VHT80) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT80_2G) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE80) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE80); }) + +#define IS_WLAN_PHYMODE_40MHZ(_mode) ({typeof(_mode) mode = (_mode); \ + ((mode) == WLAN_PHYMODE_11NG_HT40) || \ + ((mode) == WLAN_PHYMODE_11NG_HT40PLUS) || \ + ((mode) == WLAN_PHYMODE_11NG_HT40MINUS) || \ + ((mode) == WLAN_PHYMODE_11NA_HT40) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40PLUS_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40MINUS_2G) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE40) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE40) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE40PLUS) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE40MINUS); }) + +#define IS_WLAN_PHYMODE_HT(_mode) ({typeof(_mode) mode = (_mode); \ + ((mode) == WLAN_PHYMODE_11NA_HT20) || \ + ((mode) == WLAN_PHYMODE_11NG_HT20) || \ + ((mode) == WLAN_PHYMODE_11NA_HT40) || \ + ((mode) == WLAN_PHYMODE_11NG_HT40PLUS) || \ + ((mode) == WLAN_PHYMODE_11NG_HT40MINUS) || \ + ((mode) == WLAN_PHYMODE_11NG_HT40); }) + +#define IS_WLAN_PHYMODE_VHT(_mode) ({typeof(_mode) mode = (_mode); \ + ((mode) == WLAN_PHYMODE_11AC_VHT20) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT20_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40PLUS_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40MINUS_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT40_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT80) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT80_2G) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT160) || \ + ((mode) == WLAN_PHYMODE_11AC_VHT80_80); }) + +#define IS_WLAN_PHYMODE_HE(_mode) ({typeof(_mode) mode = (_mode); \ + ((mode) == WLAN_PHYMODE_11AXA_HE20) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE20) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE40) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE40) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE40PLUS) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE40MINUS) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE80) || \ + ((mode) == WLAN_PHYMODE_11AXG_HE80) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE160) || \ + ((mode) == WLAN_PHYMODE_11AXA_HE80_80); }) + +/** + * enum phy_ch_width - channel width + * @CH_WIDTH_20MHZ: 20 mhz width + * @CH_WIDTH_40MHZ: 40 mhz width + * @CH_WIDTH_80MHZ: 80 mhz width + * @CH_WIDTH_160MHZ: 160 mhz width + * @CH_WIDTH_80P80HZ: 80+80 mhz width + * @CH_WIDTH_5MHZ: 5 mhz width + * @CH_WIDTH_10MHZ: 10 mhz width + * @CH_WIDTH_INVALID: invalid width + * @CH_WIDTH_MAX: max possible width + */ +enum phy_ch_width { + CH_WIDTH_20MHZ = 0, + CH_WIDTH_40MHZ, + CH_WIDTH_80MHZ, + CH_WIDTH_160MHZ, + CH_WIDTH_80P80MHZ, + CH_WIDTH_5MHZ, + CH_WIDTH_10MHZ, + CH_WIDTH_INVALID, + CH_WIDTH_MAX +}; + +/** + * enum wifi_traffic_ac - access category type + * @WIFI_AC_VO: Voice AC + * @WIFI_AC_VI: Video AC + * @WIFI_AC_BE: Best effort AC + * @WIFI_AC_BK: Background AC + * @WIFI_AC_MAX: MAX access category + */ +enum wifi_traffic_ac { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4, +}; + +/** + * enum wlan_peer_type - peer type + * @WLAN_PEER_SELF: for AP mode, SELF PEER or AP PEER are same + * @WLAN_PEER_AP: BSS peer for STA mode, Self peer for AP mode + * @WLAN_PEER_P2P_GO: BSS peer for P2P CLI mode, Self peer for P2P GO mode + * @WLAN_PEER_STA: Self Peer for STA mode, STA peer for AP mode + * @WLAN_PEER_P2P_CLI: Self peer for P2P CLI mode, P2P CLI peer for P2P GO mode + * @WLAN_PEER_TDLS: TDLS Peer + * @WLAN_PEER_NAWDS: NAWDS Peer + * @WLAN_PEER_STA_TEMP: STA Peer Temp (its host only node) + * @WLAN_PEER_IBSS: IBSS Peer + * @WLAN_PEER_NDP: NDP Peer + */ +enum wlan_peer_type { + WLAN_PEER_SELF = 1, + WLAN_PEER_AP = 2, + WLAN_PEER_P2P_GO = 3, + WLAN_PEER_STA = 4, + WLAN_PEER_P2P_CLI = 5, + WLAN_PEER_TDLS = 6, + WLAN_PEER_NAWDS = 7, + WLAN_PEER_STA_TEMP = 8, + WLAN_PEER_IBSS = 9, + WLAN_PEER_NDP = 10, +}; + +/** + * enum wlan_band - specifies operating channel band + * @WLAN_BAND_ALL: Any band + * @WLAN_BAND_2_4_GHZ: 2.4 GHz band + * @WLAN_BAND_5_GHZ: 5 GHz band + * @WLAN_BAND_4_9_GHZ: 4.9 GHz band + * @WLAN_BAND_NUM_MAX: Max num band + */ +enum wlan_band { + WLAN_BAND_ALL, + WLAN_BAND_2_4_GHZ, + WLAN_BAND_5_GHZ, + WLAN_BAND_4_9_GHZ, + WLAN_BAND_NUM_MAX, +}; + +/** + * enum wlan_bss_type - type of network + * @WLAN_TYPE_ANY: Default value + * @WLAN_TYPE_BSS: Type BSS + * @WLAN_TYPE_IBSS: Type IBSS + */ +enum wlan_bss_type { + WLAN_TYPE_ANY, + WLAN_TYPE_BSS, + WLAN_TYPE_IBSS, +}; + +/** + * enum wlan_pmf_cap: pmf capability + * @PMF_DISABLED: PMF is disabled + * @PMF_CAPABLE: PMF is supported + * @PMF_REQUIRED: PMF is mandatory + */ +enum wlan_pmf_cap { + WLAN_PMF_DISABLED, + WLAN_PMF_CAPABLE, + WLAN_PMF_REQUIRED, +}; + +/** + * enum wlan_auth_type - Enumeration of the various Auth types + * @WLAN_AUTH_TYPE_OPEN_SYSTEM: Open auth type + * @WLAN_AUTH_TYPE_SHARED_KEY: Shared Key Auth type + * @WLAN_AUTH_TYPE_AUTOSWITCH: Auto switch Open/Shared + * @WLAN_AUTH_TYPE_SAE: SAE auth type + * @WLAN_AUTH_TYPE_WPA: WPA Enterprise + * @WLAN_AUTH_TYPE_WPA_PSK: WPA PSK + * @WLAN_AUTH_TYPE_WPA_NONE: WPA None + * @WLAN_AUTH_TYPE_RSN: RSN Enterprise + * @WLAN_AUTH_TYPE_RSN_PSK: RSN PSK + * @WLAN_AUTH_TYPE_FT_RSN: FT RSN Enterprise + * @WLAN_AUTH_TYPE_FT_RSN_PSK: FT RSN PSK + * @WLAN_AUTH_TYPE_WAPI_WAI_CERTIFICATE: WAPI certificate + * @WLAN_AUTH_TYPE_WAPI_WAI_PSK: WAPI PSK + * @WLAN_AUTH_TYPE_CCKM_WPA: CCKM WPA + * @WLAN_AUTH_TYPE_CCKM_RSN: CCKM RSN + * @WLAN_AUTH_TYPE_RSN_PSK_SHA256: SHA256 PSK + * @WLAN_AUTH_TYPE_RSN_8021X_SHA256: SHA256 Enterprise + * @WLAN_AUTH_TYPE_FILS_SHA256: FILS SHA256 + * @WLAN_AUTH_TYPE_FILS_SHA384: FILS SHA384 + * @WLAN_AUTH_TYPE_FT_FILS_SHA256: FILS SHA256 for 11r + * @WLAN_AUTH_TYPE_FT_FILS_SHA384: FILS SHA384 for 11r + * @WLAN_AUTH_TYPE_DPP_RSN: DPP RSN + * @WLAN_AUTH_TYPE_OWE: OWE + * @WLAN_AUTH_TYPE_SUITEB_EAP_SHA256: EAP SHA256 + * @WLAN_AUTH_TYPE_SUITEB_EAP_SHA384: EAP SHA384 + * @WLAN_AUTH_TYPE_FT_SAE: FT SAE + * @WLAN_AUTH_TYPE_FT_SUITEB_EAP_SHA384: FT suiteb SHA384 + * @WLAN_AUTH_TYPE_ANY: To match any auth type + * @WLAN_NUM_OF_SUPPORT_AUTH_TYPE: Max no of Auth type + */ +enum wlan_auth_type { + WLAN_AUTH_TYPE_OPEN_SYSTEM, + WLAN_AUTH_TYPE_SHARED_KEY, + WLAN_AUTH_TYPE_AUTOSWITCH, + WLAN_AUTH_TYPE_SAE, + WLAN_AUTH_TYPE_WPA, + WLAN_AUTH_TYPE_WPA_PSK, + WLAN_AUTH_TYPE_WPA_NONE, + WLAN_AUTH_TYPE_RSN, + WLAN_AUTH_TYPE_RSN_PSK, + WLAN_AUTH_TYPE_FT_RSN, + WLAN_AUTH_TYPE_FT_RSN_PSK, + WLAN_AUTH_TYPE_WAPI_WAI_CERTIFICATE, + WLAN_AUTH_TYPE_WAPI_WAI_PSK, + WLAN_AUTH_TYPE_CCKM_WPA, + WLAN_AUTH_TYPE_CCKM_RSN, + WLAN_AUTH_TYPE_RSN_PSK_SHA256, + WLAN_AUTH_TYPE_RSN_8021X_SHA256, + WLAN_AUTH_TYPE_FILS_SHA256, + WLAN_AUTH_TYPE_FILS_SHA384, + WLAN_AUTH_TYPE_FT_FILS_SHA256, + WLAN_AUTH_TYPE_FT_FILS_SHA384, + WLAN_AUTH_TYPE_DPP_RSN, + WLAN_AUTH_TYPE_OWE, + WLAN_AUTH_TYPE_SUITEB_EAP_SHA256, + WLAN_AUTH_TYPE_SUITEB_EAP_SHA384, + WLAN_AUTH_TYPE_OSEN, + WLAN_AUTH_TYPE_FT_SAE, + WLAN_AUTH_TYPE_FT_SUITEB_EAP_SHA384, + WLAN_AUTH_TYPE_ANY, + WLAN_NUM_OF_SUPPORT_AUTH_TYPE = WLAN_AUTH_TYPE_ANY, +}; + +/** + * enum wlan_enc_type - Enumeration of the various Enc types + * @WLAN_ENCRYPT_TYPE_NONE: No encryption + * @WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: WEP 40 Static key + * @WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: WEP 104 Static key + * @WLAN_ENCRYPT_TYPE_WEP40: WEP 40 + * @WLAN_ENCRYPT_TYPE_WEP104: WEP 104 + * @WLAN_ENCRYPT_TYPE_TKIP: TKIP + * @WLAN_ENCRYPT_TYPE_AES: AES + * @WLAN_ENCRYPT_TYPE_WPI: WAPI + * @WLAN_ENCRYPT_TYPE_KRK: KRK + * @WLAN_ENCRYPT_TYPE_BTK: BTK + * @WLAN_ENCRYPT_TYPE_AES_CMAC: 11W BIP + * @WLAN_ENCRYPT_TYPE_ANY: Any + * @WLAN_NUM_OF_ENCRYPT_TYPE: Max value + */ +enum wlan_enc_type { + WLAN_ENCRYPT_TYPE_NONE, + WLAN_ENCRYPT_TYPE_WEP40_STATICKEY, + WLAN_ENCRYPT_TYPE_WEP104_STATICKEY, + WLAN_ENCRYPT_TYPE_WEP40, + WLAN_ENCRYPT_TYPE_WEP104, + WLAN_ENCRYPT_TYPE_TKIP, + WLAN_ENCRYPT_TYPE_AES, + WLAN_ENCRYPT_TYPE_WPI, + WLAN_ENCRYPT_TYPE_KRK, + WLAN_ENCRYPT_TYPE_BTK, + WLAN_ENCRYPT_TYPE_AES_CMAC, + WLAN_ENCRYPT_TYPE_AES_GCMP, + WLAN_ENCRYPT_TYPE_AES_GCMP_256, + WLAN_ENCRYPT_TYPE_ANY, + WLAN_NUM_OF_ENCRYPT_TYPE = WLAN_ENCRYPT_TYPE_ANY, +}; + +/** + * struct wlan_ssid - SSID info + * @length: ssid length of bss excluding null + * @ssid: ssid character array potentially non null terminated + */ +struct wlan_ssid { + uint8_t length; + uint8_t ssid[WLAN_SSID_MAX_LEN]; +}; + +/* Util API to copy the MAC address */ +#define WLAN_ADDR_COPY(dst, src) qdf_mem_copy(dst, src, QDF_MAC_ADDR_SIZE) +/* Util API to compare the MAC address */ +#define WLAN_ADDR_EQ(a1, a2) qdf_mem_cmp(a1, a2, QDF_MAC_ADDR_SIZE) + +#define PSOC_SERVICE_BM_SIZE ((128 + sizeof(uint32_t) - 1) / sizeof(uint32_t)) +#define PSOC_HOST_MAX_NUM_SS (8) +#define PSOC_HOST_MAX_PHY_SIZE (3) +#define PSOC_HOST_MAX_MAC_SIZE (2) +#define PSOC_MAX_HW_MODE (3) +#define PSOC_MAX_MAC_PHY_CAP (5) +#define PSOC_MAX_PHY_REG_CAP (3) +#define PSOC_MAX_CHAINMASK_TABLES (5) + + +#endif /* _WLAN_OBJMGR_CMN_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main.c new file mode 100644 index 0000000000000000000000000000000000000000..95ddbb9e47e08c63b0ab9fabbae909350c43da8f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_mgmt_txrx_main.c + * This file contains mgmt txrx private API definitions for + * mgmt txrx component. + */ + +#include "wlan_mgmt_txrx_main_i.h" +#include "qdf_nbuf.h" + +QDF_STATUS wlan_mgmt_txrx_desc_pool_init( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) +{ + uint32_t i; + + mgmt_txrx_debug( + "mgmt_txrx ctx: %pK pdev: %pK mgmt desc pool size %d", + mgmt_txrx_pdev_ctx, mgmt_txrx_pdev_ctx->pdev, + MGMT_DESC_POOL_MAX); + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool = qdf_mem_malloc( + MGMT_DESC_POOL_MAX * + sizeof(struct mgmt_txrx_desc_elem_t)); + + if (!mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool) + return QDF_STATUS_E_NOMEM; + + qdf_list_create(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + MGMT_DESC_POOL_MAX); + + for (i = 0; i < MGMT_DESC_POOL_MAX; i++) { + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].desc_id = i; + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].in_use = false; + qdf_list_insert_front( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].entry); + } + + qdf_spinlock_create( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + + return QDF_STATUS_SUCCESS; +} + +void wlan_mgmt_txrx_desc_pool_deinit( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) +{ + uint32_t i; + uint32_t pool_size; + QDF_STATUS status; + + if (!mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool) { + mgmt_txrx_err("Empty mgmt descriptor pool"); + qdf_assert_always(0); + return; + } + + pool_size = mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size; + for (i = 0; i < pool_size; i++) { + status = qdf_list_remove_node( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].entry); + if (status != QDF_STATUS_SUCCESS) + mgmt_txrx_err( + "Failed to get mgmt desc from freelist, desc id: %d: status %d", + i, status); + } + + qdf_list_destroy(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list); + qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool); + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool = NULL; + + qdf_spinlock_destroy( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); +} + +struct mgmt_txrx_desc_elem_t *wlan_mgmt_txrx_desc_get( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) +{ + QDF_STATUS status; + qdf_list_node_t *desc_node; + struct mgmt_txrx_desc_elem_t *mgmt_txrx_desc; + + qdf_spin_lock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + if (qdf_list_peek_front(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &desc_node) + != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + mgmt_txrx_err("Descriptor freelist empty for mgmt_txrx_ctx %pK", + mgmt_txrx_pdev_ctx); + return NULL; + } + + status = qdf_list_remove_node( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + desc_node); + if (status != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + mgmt_txrx_err("Failed to get descriptor from list: status %d", + status); + qdf_assert_always(0); + } + + mgmt_txrx_desc = qdf_container_of(desc_node, + struct mgmt_txrx_desc_elem_t, + entry); + mgmt_txrx_desc->in_use = true; + + qdf_spin_unlock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + + /* acquire the wakelock when there are pending mgmt tx frames */ + qdf_wake_lock_timeout_acquire(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp, + MGMT_TXRX_WAKELOCK_TIMEOUT_TX_CMP); + qdf_runtime_pm_prevent_suspend( + &mgmt_txrx_pdev_ctx->wakelock_tx_runtime_cmp); + + + return mgmt_txrx_desc; +} + +void wlan_mgmt_txrx_desc_put( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx, + uint32_t desc_id) +{ + struct mgmt_txrx_desc_elem_t *desc; + bool release_wakelock = false; + + desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + qdf_spin_lock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + if (!desc->in_use) { + qdf_spin_unlock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool. + desc_pool_lock); + mgmt_txrx_err("desc %d is freed", desc_id); + return; + } + desc->in_use = false; + desc->context = NULL; + desc->peer = NULL; + desc->nbuf = NULL; + desc->tx_dwnld_cmpl_cb = NULL; + desc->tx_ota_cmpl_cb = NULL; + desc->vdev_id = WLAN_UMAC_VDEV_ID_MAX; + + qdf_list_insert_front(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &desc->entry); + + /* release the wakelock if there are no pending mgmt tx frames */ + if (mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.count == + mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size) + release_wakelock = true; + + qdf_spin_unlock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + + if (release_wakelock) { + qdf_runtime_pm_allow_suspend( + &mgmt_txrx_pdev_ctx->wakelock_tx_runtime_cmp); + qdf_wake_lock_release(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp, + MGMT_TXRX_WAKELOCK_REASON_TX_CMP); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..8aea6aff8eb88d004fec7e036b29084ead82dbb5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main_i.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_MGMT_TXRX_MAIN_I_H_ +#define _WLAN_MGMT_TXRX_MAIN_I_H_ + +/** + * DOC: wlan_mgmt_txrx_main_i.h + * + * management tx/rx layer private API and structures + * + */ + +#include "wlan_mgmt_txrx_utils_api.h" +#include "wlan_objmgr_cmn.h" +#include "qdf_list.h" + + +#define IEEE80211_FC0_TYPE_MASK 0x0c +#define IEEE80211_FC0_SUBTYPE_MASK 0xf0 +#define IEEE80211_FC0_TYPE_MGT 0x00 + +/** + * mgmt_wakelock_reason - reasons mgmt_txrx might hold a wakelock + * @MGMT_TXRX_WAKELOCK_REASON_TX_CMP - wait for mgmt_tx_complete event + */ +enum mgmt_txrx_wakelock_reason { + MGMT_TXRX_WAKELOCK_REASON_TX_CMP +}; + +/* timeout to wait for management_tx_complete event from firmware */ +#define MGMT_TXRX_WAKELOCK_TIMEOUT_TX_CMP 300 + +/* + * generic definitions for IEEE 802.11 frames + */ +struct ieee80211_frame { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + union { + struct { + uint8_t i_addr1[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr2[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr3[QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_addr_all[3 * QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_seq[2]; + /* possibly followed by addr4[QDF_MAC_ADDR_SIZE]; */ + /* see below */ +} __packed; + + +/** + * struct mgmt_txrx_desc_elem_t - element in mgmt desc pool linked list + * @entry: list entry + * @tx_dwnld_cmpl_cb: dma completion callback function pointer + * @tx_ota_cmpl_cb: ota completion callback function pointer + * @nbuf: frame buffer + * @desc_id: descriptor id + * @peer: peer who wants to send this frame + * @context: caller component specific context + * @vdev_id: vdev id + * @in_use: flag to denote whether desc is in use + */ +struct mgmt_txrx_desc_elem_t { + qdf_list_node_t entry; + mgmt_tx_download_comp_cb tx_dwnld_cmpl_cb; + mgmt_ota_comp_cb tx_ota_cmpl_cb; + qdf_nbuf_t nbuf; + uint32_t desc_id; + struct wlan_objmgr_peer *peer; + void *context; + uint8_t vdev_id; + bool in_use; +}; + +/** + * struct mgmt_desc_pool_t - linked list mgmt desc pool + * @free_list: linked list of free descriptors + * @pool: pool of descriptors in use + * @desc_pool_lock: mgmt. descriptor free pool spinlock + */ +struct mgmt_desc_pool_t { + qdf_list_t free_list; + struct mgmt_txrx_desc_elem_t *pool; + qdf_spinlock_t desc_pool_lock; +}; + +/** + * struct mgmt_rx_handler - structure for storing rx cb + * @comp_id: component id + * @rx_cb: rx callback for the mgmt. frame + * @next: pointer to next rx cb structure + */ +struct mgmt_rx_handler { + enum wlan_umac_comp_id comp_id; + mgmt_frame_rx_callback rx_cb; + struct mgmt_rx_handler *next; +}; + +/** + * struct txrx_stats - txrx stats for mgmt frames + * @pkts_success: no. of packets successfully txed/rcvd + * @pkts_fail: no. of packets unsuccessfully txed/rcvd + * @bytes_success: no. of bytes successfully txed/rcvd + * @bytes_fail: no. of bytes successfully txed/rcvd + * @assoc_req_rcvd: no. of assoc requests rcvd + * @assoc_rsp_rcvd: no. of assoc responses rcvd + * @reassoc_req_rcvd: no. of reassoc requests rcvd + * @reassoc_rsp_rcvd: no. of reassoc responses rcvd + * @probe_req_rcvd: no. of probe requests rcvd + * @prob_resp_rcvd: no. of probe responses rcvd + * @beacon_rcvd: no. of beacons rcvd + * @atim_rcvd: no. of ATIMs rcvd + * @disassoc_rcvd: no. of disassocs rcvd + * @auth_rcvd: no. of auths rcvd + * @deauth_rcvd: no. of deauths rcvd + * @action_rcvd: no. of action frames rcvd + * @action_no_ack_rcvd: no. of action frames with no ack rcvd + */ +struct txrx_stats { + uint64_t pkts_success; + uint64_t pkts_fail; + uint64_t bytes_success; + uint64_t bytes_fail; + uint64_t assoc_req_rcvd; + uint64_t assoc_rsp_rcvd; + uint64_t reassoc_req_rcvd; + uint64_t reassoc_rsp_rcvd; + uint64_t probe_req_rcvd; + uint64_t prob_resp_rcvd; + uint64_t beacon_rcvd; + uint64_t atim_rcvd; + uint64_t disassoc_rcvd; + uint64_t auth_rcvd; + uint64_t deauth_rcvd; + uint64_t action_rcvd; + uint64_t action_no_ack_rcvd; +}; + +/** + * struct mgmt_txrx_stats_t - mgmt txrx stats + * @mgmt_tx_stats: mgmt tx stats + * @mgmt_rx_stats: mgmt rx stats + * @ota_comp: no. of ota completions rcvd + * @dma_comp: no. of dma completions rcvd + */ +struct mgmt_txrx_stats_t { + struct txrx_stats mgmt_tx_stats; + struct txrx_stats mgmt_rx_stats; + uint64_t ota_comp; + uint64_t dma_comp; +}; + +/** + * struct mgmt_txrx_priv_psoc_context - mgmt txrx private psoc context + * @psoc: psoc context + * @mgmt_rx_comp_cb: array of pointers of mgmt rx cbs + * @mgmt_txrx_psoc_ctx_lock: mgmt txrx psoc ctx lock + */ +struct mgmt_txrx_priv_psoc_context { + struct wlan_objmgr_psoc *psoc; + struct mgmt_rx_handler *mgmt_rx_comp_cb[MGMT_MAX_FRAME_TYPE]; + qdf_spinlock_t mgmt_txrx_psoc_ctx_lock; +}; + +/** + * struct mgmt_txrx_priv_context_dev - mgmt txrx private context + * @pdev: pdev context + * @mgmt_desc_pool: pointer to mgmt desc. pool + * @mgmt_txrx_stats: pointer to mgmt txrx stats + * @wakelock_tx_cmp: mgmt tx complete wake lock + * @wakelock_tx_runtime_cmp: mgmt tx runtime complete wake lock + */ +struct mgmt_txrx_priv_pdev_context { + struct wlan_objmgr_pdev *pdev; + struct mgmt_desc_pool_t mgmt_desc_pool; + struct mgmt_txrx_stats_t *mgmt_txrx_stats; + qdf_wake_lock_t wakelock_tx_cmp; + qdf_runtime_lock_t wakelock_tx_runtime_cmp; +}; + + +/** + * wlan_mgmt_txrx_desc_pool_init() - initializes mgmt. desc. pool + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * + * This function initializes the mgmt descriptor pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_desc_pool_init( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx); + +/** + * wlan_mgmt_txrx_desc_pool_deinit() - deinitializes mgmt. desc. pool + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * + * This function deinitializes the mgmt descriptor pool. + * + * Return: void + */ +void wlan_mgmt_txrx_desc_pool_deinit( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx); + +/** + * wlan_mgmt_txrx_desc_get() - gets mgmt. descriptor from freelist + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * + * This function retrieves the mgmt. descriptor for mgmt. tx frames + * from the mgmt. descriptor freelist. + * + * Return: mgmt. descriptor retrieved. + */ +struct mgmt_txrx_desc_elem_t *wlan_mgmt_txrx_desc_get( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx); + +/** + * wlan_mgmt_txrx_desc_put() - puts mgmt. descriptor back in freelist + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * @desc_id: mgmt txrx descriptor id + * + * This function puts the mgmt. descriptor back in to the freelist. + * + * Return: void + */ +void wlan_mgmt_txrx_desc_put( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx, + uint32_t desc_id); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9505ceac4fc3d9121d5cf644b9da65d5847974c5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_tgt_api.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_MGMT_TXRX_TGT_API_H_ +#define _WLAN_MGMT_TXRX_TGT_API_H_ + +/** + * DOC: wlan_mgmt_txrx_tgt_api.h + * + * management tx/rx layer public API and structures for + * umac southbound interface. + * + */ + +#include "wlan_objmgr_cmn.h" +#include "wlan_mgmt_txrx_utils_api.h" +#include "qdf_nbuf.h" + + +/** + * tgt_mgmt_txrx_rx_frame_handler() - handles rx mgmt. frames + * @psoc: psoc context + * @buf: buffer + * @mgmt_rx_params: rx event params + * + * This function handles mgmt. rx frames and is registered to southbound + * interface through rx ops. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_mgmt_txrx_rx_frame_handler( + struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params); + +/** + * tgt_mgmt_txrx_tx_completion_handler() - handles mgmt. tx completions + * @pdev: pdev context + * @desc_id: mgmt desc. id + * @status: status of download of tx packet + * @tx_compl_params: tx completion params + * + * This function handles tx completions of mgmt. frames and is registered to + * LMAC_if layer through lmac_if cbs.The cb needs to free the nbuf. In case no + * callback is registered, this function will free the nbuf. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_mgmt_txrx_tx_completion_handler( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *tx_compl_params); + +/** + * tgt_mgmt_txrx_get_nbuf_from_desc_id() - extracts nbuf from mgmt desc + * @pdev: pdev context + * @desc_id: desc_id + * + * This function extracts nbuf from mgmt desc extracted from desc id. + * + * Return: nbuf - in case of success + * NULL - in case of failure + */ +qdf_nbuf_t tgt_mgmt_txrx_get_nbuf_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + +/** + * tgt_mgmt_txrx_get_peer_from_desc_id() - extracts peer from mgmt desc + * @pdev: pdev context + * @desc_id: desc_id + * + * This function extracts peer from mgmt desc extracted from desc id. + * + * Return: peer - in case of success + * NULL - in case of failure + */ +struct wlan_objmgr_peer * +tgt_mgmt_txrx_get_peer_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + +/** + * tgt_mgmt_txrx_get_vdev_id_from_desc_id() - extracts vdev id from mgmt desc + * @pdev: pdev context + * @desc_id: desc_id + * + * This function extracts vdev id from mgmt desc extracted from desc id. + * + * Return: vdev_id - in case of success + * WLAN_UMAC_VDEV_ID_MAX - in case of failure + */ +uint8_t tgt_mgmt_txrx_get_vdev_id_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + +/** + * tgt_mgmt_txrx_get_free_desc_pool_count() - get free mgmt desc count + * @pdev: pdev context + * + * This function returns the count of free mgmt descriptors. + * + * Return: free descpriptor count + */ +uint32_t tgt_mgmt_txrx_get_free_desc_pool_count( + struct wlan_objmgr_pdev *pdev); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1044b04a7233f769e2f5e5a4559ee9ebdf94dfc2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_utils_api.h @@ -0,0 +1,968 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_MGMT_TXRX_UTILS_API_H_ +#define _WLAN_MGMT_TXRX_UTILS_API_H_ + +/** + * DOC: wlan_mgmt_txrx_utils_api.h + * + * management tx/rx layer public API and structures + * for umac converged components. + * + */ + +#include "wlan_objmgr_cmn.h" +#include "qdf_nbuf.h" + +#define mgmt_txrx_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_MGMT_TXRX, params) + +#define mgmttxrx_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmttxrx_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmttxrx_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmttxrx_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmttxrx_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_MGMT_TXRX, params) + +/** + * enum mgmt_subtype - enum of mgmt. subtypes + * @MGMT_SUBTYPE_ASSOC_REQ: association request frame + * @MGMT_SUBTYPE_ASSOC_RESP: association response frame + * @MGMT_SUBTYPE_REASSOC_REQ: reassociation request frame + * @MGMT_SUBTYPE_REASSOC_RESP: reassociation response frame + * @MGMT_SUBTYPE_PROBE_REQ: probe request frame + * @MGMT_SUBTYPE_PROBE_RESP: probe response frame + * @MGMT_SUBTYPE_BEACON: beacon frame + * @MGMT_SUBTYPE_ATIM: ATIM frame + * @MGMT_SUBTYPE_DISASSOC: disassociation frame + * @MGMT_SUBTYPE_AUTH: authentication frame + * @MGMT_SUBTYPE_DEAUTH: deauthentication frame + * @MGMT_SUBTYPE_ACTION: action frame + * @MGMT_SUBTYPE_ACTION_NO_ACK: action no ack frame + */ +enum mgmt_subtype { + MGMT_SUBTYPE_ASSOC_REQ = 0x00, + MGMT_SUBTYPE_ASSOC_RESP = 0x10, + MGMT_SUBTYPE_REASSOC_REQ = 0x20, + MGMT_SUBTYPE_REASSOC_RESP = 0x30, + MGMT_SUBTYPE_PROBE_REQ = 0x40, + MGMT_SUBTYPE_PROBE_RESP = 0x50, + MGMT_SUBTYPE_BEACON = 0x80, + MGMT_SUBTYPE_ATIM = 0x90, + MGMT_SUBTYPE_DISASSOC = 0xa0, + MGMT_SUBTYPE_AUTH = 0xb0, + MGMT_SUBTYPE_DEAUTH = 0xc0, + MGMT_SUBTYPE_ACTION = 0xd0, + MGMT_SUBTYPE_ACTION_NO_ACK = 0xe0, +}; + +/** + * enum mgmt_action_category - mgmt. action categories + * @ACTION_CATEGORY_SPECTRUM_MGMT: spectrum mgmt. action category + * @ACTION_CATEGORY_QOS: qos action category + * @ACTION_CATEGORY_DLS: dls action category + * @ACTION_CATEGORY_BACK: block ack action category + * @ACTION_CATEGORY_PUBLIC: public action category + * @ACTION_CATEGORY_RRM: rrm action category + * @ACTION_FAST_BSS_TRNST: trnst action category + * @ACTION_CATEGORY_HT: ht actipon category + * @ACTION_CATEGORY_SA_QUERY: sa query action category + * @ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION: protected + * public action category + * @ACTION_CATEGORY_WNM: wnm action category + * @ACTION_CATEGORY_WNM_UNPROTECTED: wnm protected action category + * @ACTION_CATEGORY_TDLS: tdls action category + * @ACTION_CATEGORY_MESH_ACTION: mesh action category + * @ACTION_CATEGORY_MULTIHOP_ACTION: multihop action category + * @ACTION_CATEGORY_SELF_PROTECTED: self protected action category + * @ACTION_CATEGORY_DMG: unprotected dmg action category + * @ACTION_CATEGORY_WMM: wmm action category + * @ACTION_CATEGORY_FST: fst action category + * @ACTION_CATEGORY_UNPROT_DMG: dmg action category + * @ACTION_CATEGORY_VHT: vht action category + * @ACTION_CATEGORY_VENDOR_SPECIFIC_PROTECTED: vendor specific protected + * action category + * @ACTION_CATEGORY_VENDOR_SPECIFIC: vendor specific action category + */ +enum mgmt_action_category { + ACTION_CATEGORY_SPECTRUM_MGMT = 0, + ACTION_CATEGORY_QOS = 1, + ACTION_CATEGORY_DLS = 2, + ACTION_CATEGORY_BACK = 3, + ACTION_CATEGORY_PUBLIC = 4, + ACTION_CATEGORY_RRM = 5, + ACTION_FAST_BSS_TRNST = 6, + ACTION_CATEGORY_HT = 7, + ACTION_CATEGORY_SA_QUERY = 8, + ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION = 9, + ACTION_CATEGORY_WNM = 10, + ACTION_CATEGORY_WNM_UNPROTECTED = 11, + ACTION_CATEGORY_TDLS = 12, + ACTION_CATEGORY_MESH_ACTION = 13, + ACTION_CATEGORY_MULTIHOP_ACTION = 14, + ACTION_CATEGORY_SELF_PROTECTED = 15, + ACTION_CATEGORY_DMG = 16, + ACTION_CATEGORY_WMM = 17, + ACTION_CATEGORY_FST = 18, + ACTION_CATEGORY_UNPROT_DMG = 20, + ACTION_CATEGORY_VHT = 21, + ACTION_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + ACTION_CATEGORY_VENDOR_SPECIFIC = 127, +}; + +/** + * enum spectrum_mgmt_actioncode - spectrum mgmt. action frms + * @ACTION_SPCT_MSR_REQ: spectrum measurement request frame + * @ACTION_SPCT_MSR_RPRT: spectrum measurement report frame + * @ACTION_SPCT_TPC_REQ: spectrum tpc request frame + * @ACTION_SPCT_TPC_RPRT: spectrum tpc report frame + * @ACTION_SPCT_CHL_SWITCH: spectrum channel switch frame + */ +enum spectrum_mgmt_actioncode { + ACTION_SPCT_MSR_REQ, + ACTION_SPCT_MSR_RPRT, + ACTION_SPCT_TPC_REQ, + ACTION_SPCT_TPC_RPRT, + ACTION_SPCT_CHL_SWITCH, +}; + +/** + * enum qos_actioncode - qos action frames + * @QOS_ADD_TS_REQ: qos add ts request frame + * @QOS_ADD_TS_RSP: qos add ts response frame + * @QOS_DEL_TS_REQ: qos del ts request frame + * @QOS_SCHEDULE: qos schecule frame + * @QOS_MAP_CONFIGURE: qos map configure frame + */ +enum qos_actioncode { + QOS_ADD_TS_REQ, + QOS_ADD_TS_RSP, + QOS_DEL_TS_REQ, + QOS_SCHEDULE, + QOS_MAP_CONFIGURE, +}; + +/** + * enum dls_actioncode - dls action frames + * @DLS_REQUEST: dls request frame + * @DLS_RESPONSE: dls response frame + * @DLS_TEARDOWN: dls teardown frame + */ +enum dls_actioncode { + DLS_REQUEST, + DLS_RESPONSE, + DLS_TEARDOWN, +}; + +/** + * enum block_ack_actioncode - block ack action frames + * @ADDBA_REQUEST: add block ack request frame + * @ADDBA_RESPONSE: add block ack response frame + * @DELBA: delete block ack frame + */ +enum block_ack_actioncode { + ADDBA_REQUEST, + ADDBA_RESPONSE, + DELBA, +}; + +/** + * enum pub_actioncode - public action frames + * @PUB_ACTION_2040_BSS_COEXISTENCE: public 20-40 bss coex action frame + * @PUB_ACTION_EXT_CHANNEL_SWITCH_ID: public ext channel switch id action frame + * @PUB_ACTION_VENDOR_SPECIFIC: vendor specific public action frame + * @PUB_ACTION_GAS_INITIAL_REQUEST: GAS initial request action frame + * @PUB_ACTION_GAS_INITIAL_RESPONSE: GAS initial response action frame + * @PUB_ACTION_GAS_COMEBACK_REQUEST: GAS comeback request action frame + * @PUB_ACTION_GAS_COMEBACK_RESPONSE: GAS comeback respose action frame + * @PUB_ACTION_TDLS_DISCRESP: tdls discovery response public action frame + */ +enum pub_actioncode { + PUB_ACTION_2040_BSS_COEXISTENCE = 0, + PUB_ACTION_EXT_CHANNEL_SWITCH_ID = 4, + PUB_ACTION_VENDOR_SPECIFIC = 9, + PUB_ACTION_GAS_INITIAL_REQUEST = 10, + PUB_ACTION_GAS_INITIAL_RESPONSE = 11, + PUB_ACTION_GAS_COMEBACK_REQUEST = 12, + PUB_ACTION_GAS_COMEBACK_RESPONSE = 13, + PUB_ACTION_TDLS_DISCRESP = 14, +}; + +/** + * enum rrm_actioncode - rrm action frames + * @RRM_RADIO_MEASURE_REQ: rrm radio meas. request frame + * @RRM_RADIO_MEASURE_RPT: rrm radio meas. report frame + * @RRM_LINK_MEASUREMENT_REQ: rrm link meas. request frmae + * @RRM_LINK_MEASUREMENT_RPT: rrm link meas. report frame + * @RRM_NEIGHBOR_REQ: rrm neighbor request frame + * @RRM_NEIGHBOR_RPT: rrm neighbor report frame + */ +enum rrm_actioncode { + RRM_RADIO_MEASURE_REQ, + RRM_RADIO_MEASURE_RPT, + RRM_LINK_MEASUREMENT_REQ, + RRM_LINK_MEASUREMENT_RPT, + RRM_NEIGHBOR_REQ, + RRM_NEIGHBOR_RPT, +}; + +/** + * enum ft_actioncode - ft action frames + * @FT_FAST_BSS_TRNST_REQ: ft request frame + * @FT_FAST_BSS_TRNST_RES: ft response frame + * @FT_FAST_BSS_TRNST_CONFIRM: ft confirm frame + * @FT_FAST_BSS_TRNST_ACK: ft ACK frame + */ +enum ft_actioncode { + FT_FAST_BSS_TRNST_REQ = 1, + FT_FAST_BSS_TRNST_RES, + FT_FAST_BSS_TRNST_CONFIRM, + FT_FAST_BSS_TRNST_ACK, +}; + +/** + * enum ht_actioncode - ht action frames + * @HT_ACTION_NOTIFY_CHANWIDTH: ht notify bw action frame + * @HT_ACTION_SMPS: ht smps action frame + * @HT_ACTION_PSMP: ht psmp action frame + * @HT_ACTION_PCO_PHASE: ht pco phase action frame + * @HT_ACTION_CSI: ht csi action frame + * @HT_ACTION_NONCOMPRESSED_BF: ht noncompressed bf action frame + * @HT_ACTION_COMPRESSED_BF: ht compressed bf action frame + * @HT_ACTION_ASEL_IDX_FEEDBACK: ht asel idx feedback action frame + */ +enum ht_actioncode { + HT_ACTION_NOTIFY_CHANWIDTH, + HT_ACTION_SMPS, + HT_ACTION_PSMP, + HT_ACTION_PCO_PHASE, + HT_ACTION_CSI, + HT_ACTION_NONCOMPRESSED_BF, + HT_ACTION_COMPRESSED_BF, + HT_ACTION_ASEL_IDX_FEEDBACK, +}; + +/** + * enum sa_query_action - sa query action frames + * @SA_QUERY_REQUEST: sa query request frame + * @SA_QUERY_RESPONSE: sa query response frame + */ +enum sa_query_action { + SA_QUERY_REQUEST, + SA_QUERY_RESPONSE, +}; + +/** + * enum protected_dual_actioncode - protected dual action frames + * @PDPA_GAS_INIT_REQ: pdpa gas init request frame + * @PDPA_GAS_INIT_RSP: pdpa gas init response frame + * @PDPA_GAS_COMEBACK_REQ: pdpa gas comeback request frame + * @PDPA_GAS_COMEBACK_RSP: pdpa gas comeback response frame + */ +enum protected_dual_actioncode { + PDPA_GAS_INIT_REQ = 10, + PDPA_GAS_INIT_RSP = 11, + PDPA_GAS_COMEBACK_REQ = 12, + PDPA_GAS_COMEBACK_RSP = 13, +}; + +/** + * enum wnm_actioncode - wnm action frames + * @WNM_BSS_TM_QUERY: wnm bss tm query frame + * @WNM_BSS_TM_REQUEST: wnm bss tm request frame + * @WNM_BSS_TM_RESPONSE: wnm bss tm response frame + * @WNM_FMS_REQ: wnm fms request frame + * @WNM_FMS_RESP: wnm fms response frame + * @WNM_TFS_REQ: wnm tfs request frame + * @WNM_TFS_RESP: wnm tfs response frame + * @WNM_TFS_NOTIFY: wnm tfs notify frame + * @WNM_SLEEP_REQ: wnm sleep request frame + * @WNM_SLEEP_RESP: wnm sleep response frame + * @WNM_TIM_REQ: wnm Tim broadcast request frame + * @WNM_TIM_RESP: wnm Tim broadcast response frame + * @WNM_NOTIF_REQUEST: wnm notify request frame + * @WNM_NOTIF_RESPONSE: wnm notify response frame + */ +enum wnm_actioncode { + WNM_BSS_TM_QUERY = 6, + WNM_BSS_TM_REQUEST = 7, + WNM_BSS_TM_RESPONSE = 8, + WNM_FMS_REQ = 9, + WNM_FMS_RESP = 10, + WNM_TFS_REQ = 13, + WNM_TFS_RESP = 14, + WNM_TFS_NOTIFY = 15, + WNM_SLEEP_REQ = 16, + WNM_SLEEP_RESP = 17, + WNM_TIM_REQ = 18, + WNM_TIM_RESP = 19, + WNM_NOTIF_REQUEST = 26, + WNM_NOTIF_RESPONSE = 27, +}; + +/** + * enum tdls_actioncode - tdls action frames + * @TDLS_SETUP_REQUEST: tdls setup request frame + * @TDLS_SETUP_RESPONSE: tdls setup response frame + * @TDLS_SETUP_CONFIRM: tdls setup confirm frame + * @TDLS_TEARDOWN: tdls teardown frame + * @TDLS_PEER_TRAFFIC_INDICATION: tdls peer traffic indication frame + * @TDLS_CHANNEL_SWITCH_REQUEST: tdls channel switch req. frame + * @TDLS_CHANNEL_SWITCH_RESPONSE: tdls channel switch response frame + * @TDLS_PEER_PSM_REQUEST: tdls peer psm request frame + * @TDLS_PEER_PSM_RESPONSE: tdls peer psm response frame + * @TDLS_PEER_TRAFFIC_RESPONSE: tdls peer traffic response frame + * @TDLS_DISCOVERY_REQUEST: tdls discovery request frame + */ +enum tdls_actioncode { + TDLS_SETUP_REQUEST = 0, + TDLS_SETUP_RESPONSE = 1, + TDLS_SETUP_CONFIRM = 2, + TDLS_TEARDOWN = 3, + TDLS_PEER_TRAFFIC_INDICATION = 4, + TDLS_CHANNEL_SWITCH_REQUEST = 5, + TDLS_CHANNEL_SWITCH_RESPONSE = 6, + TDLS_PEER_PSM_REQUEST = 7, + TDLS_PEER_PSM_RESPONSE = 8, + TDLS_PEER_TRAFFIC_RESPONSE = 9, + TDLS_DISCOVERY_REQUEST = 10, + TDLS_DISCOVERY_RESPONSE = 14, +}; + +/** + * enum mesh_actioncode - mesh action frames + * @MESH_ACTION_LINK_METRIC_REPORT: mesh link metric report action frame + * @MESH_ACTION_HWMP_PATH_SELECTION: mesh hwmp path selection action frame + * @MESH_ACTION_GATE_ANNOUNCEMENT: mesh gate announcement action frame + * @MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION: mesh congestion control frame + * @MESH_ACTION_MCCA_SETUP_REQUEST: mesh mcca setup request action frame + * @MESH_ACTION_MCCA_SETUP_REPLY: mesh mcca setup reply action frame + * @MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST: mesh mcca advertisement req. frame + * @MESH_ACTION_MCCA_ADVERTISEMENT: mesh mcca advertisement action frame + * @MESH_ACTION_MCCA_TEARDOWN: mesh mcca teardown action frame + * @MESH_ACTION_TBTT_ADJUSTMENT_REQUEST: mesh tbtt adjustment req. frame + * @MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE: mesh tbtt adjustment rsp. frame + */ +enum mesh_actioncode { + MESH_ACTION_LINK_METRIC_REPORT, + MESH_ACTION_HWMP_PATH_SELECTION, + MESH_ACTION_GATE_ANNOUNCEMENT, + MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION, + MESH_ACTION_MCCA_SETUP_REQUEST, + MESH_ACTION_MCCA_SETUP_REPLY, + MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST, + MESH_ACTION_MCCA_ADVERTISEMENT, + MESH_ACTION_MCCA_TEARDOWN, + MESH_ACTION_TBTT_ADJUSTMENT_REQUEST, + MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE, +}; + +/** + * enum self_protected_actioncode - self protected action frames + * @SP_RESERVED: self protected reserved + * @SP_MESH_PEERING_OPEN: self protected mesh peering open frame + * @SP_MESH_PEERING_CONFIRM: self protected mesh peering confirm frame + * @SP_MESH_PEERING_CLOSE: self protected mesh peering close frame + * @SP_MGK_INFORM: self protected mgk inform frame + * @SP_MGK_ACK: self protected mgk ack frame + */ +enum self_protected_actioncode { + SP_RESERVED, + SP_MESH_PEERING_OPEN, + SP_MESH_PEERING_CONFIRM, + SP_MESH_PEERING_CLOSE, + SP_MGK_INFORM, + SP_MGK_ACK, +}; + +/** + * enum wmm_actioncode - wmm action frames + * @WMM_QOS_SETUP_REQ: wmm qos setup request frame + * @WMM_QOS_SETUP_RESP: q wmm qos setup response frame + * @WMM_QOS_TEARDOWN: wmm qos teardown frame + */ +enum wmm_actioncode { + WMM_QOS_SETUP_REQ, + WMM_QOS_SETUP_RESP, + WMM_QOS_TEARDOWN, +}; + +/** + * enum fst_actioncode - fst action frames + * @FST_SETUP_REQ: fst setup request frame + * @FST_SETUP_RSP: fst setup response frame + * @FST_TEAR_DOWN: fst qos teardown frame + * @FST_ACK_REQ: fst ack frame for request + * @FST_ACK_RSP: fst ack frame for response + * @FST_ON_CHANNEL_TUNNEL: fst on channel tunnel frame + */ +enum fst_actioncode { + FST_SETUP_REQ, + FST_SETUP_RSP, + FST_TEAR_DOWN, + FST_ACK_REQ, + FST_ACK_RSP, + FST_ON_CHANNEL_TUNNEL, +}; + +/** + * enum vht_actioncode - vht action frames + * @VHT_ACTION_COMPRESSED_BF: vht compressed bf action frame + * @VHT_ACTION_GID_NOTIF: vht gid notification action frame + * @VHT_ACTION_OPMODE_NOTIF: vht opmode notification action frame + */ +enum vht_actioncode { + VHT_ACTION_COMPRESSED_BF, + VHT_ACTION_GID_NOTIF, + VHT_ACTION_OPMODE_NOTIF, +}; + +/** + * struct action_frm_hdr - action frame header + * @action_category: action category + * @action_code: action code + */ +struct action_frm_hdr { + uint8_t action_category; + uint8_t action_code; +}; + +/** + * enum mgmt_frame_type - enum of mgmt. frames + * @MGMT_FRM_UNSPECIFIED: unspecified + * @MGMT_ASSOC_REQ: association request frame + * @MGMT_ASSOC_RESP: association response frame + * @MGMT_REASSOC_REQ: reassociation request frame + * @MGMT_REASSOC_RESP: reassociation response frame + * @MGMT_PROBE_REQ: probe request frame + * @MGMT_PROBE_RESP: probe response frame + * @MGMT_BEACON: beacon frame + * @MGMT_ATIM: ATIM frame + * @MGMT_DISASSOC: disassociation frame + * @MGMT_AUTH: authentication frame + * @MGMT_DEAUTH: deauthentication frame + * @MGMT_ACTION_MEAS_REQUEST: measure channels request action frame + * @MGMT_ACTION_MEAS_REPORT: measure channels response action frame + * @MGMT_ACTION_TPC_REQUEST: transmit power control request action frame + * @MGMT_ACTION_TPC_REPORT: transmit power control response action frame + * @MGMT_ACTION_CHAN_SWITCH: 802.11 channel switch announcement frame + * @MGMT_ACTION_QOS_ADD_TS_REQ: qos add ts request frame + * @MGMT_ACTION_QOS_ADD_TS_RSP: qos add ts response frame + * @MGMT_ACTION_QOS_DEL_TS_REQ: qos del ts request frame + * @MGMT_ACTION_QOS_SCHEDULE: qos schedule frame + * @MGMT_ACTION_QOS_MAP_CONFIGURE: qos map configure frame + * @MGMT_ACTION_DLS_REQUEST: DLS request action frame + * @MGMT_ACTION_DLS_RESPONSE: DLS response action frame + * @MGMT_ACTION_DLS_TEARDOWN: DLS taerdown action frame + * @MGMT_ACTION_BA_ADDBA_REQUEST: ADDBA request action frame + * @MGMT_ACTION_BA_ADDBA_RESPONSE: ADDBA response action frame + * @MGMT_ACTION_BA_DELBA: DELBA action frame + * @MGMT_ACTION_2040_BSS_COEXISTENCE: 20-40 bss coex action frame + * @MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC: category vendor spcific action frame + * @MGMT_ACTION_EXT_CHANNEL_SWITCH_ID: ext channel switch id action frame + * @MGMT_ACTION_VENDOR_SPECIFIC: vendor specific action frame + * @MGMT_ACTION_TDLS_DISCRESP: TDLS discovery response frame + * @MGMT_ACTION_RRM_RADIO_MEASURE_REQ: rrm radio meas. req. action frame + * @MGMT_ACTION_RRM_RADIO_MEASURE_RPT: rrm radio meas. report action frame + * @MGMT_ACTION_RRM_LINK_MEASUREMENT_REQ: rrm link meas. req. action frame + * @MGMT_ACTION_RRM_LINK_MEASUREMENT_RPT: rrm link meas. report action frame + * @MGMT_ACTION_RRM_NEIGHBOR_REQ: rrm neighbor request action frame + * @MGMT_ACTION_RRM_NEIGHBOR_RPT: rrm neighbor response action frame + * @MGMT_ACTION_HT_NOTIFY_CHANWIDTH: notify channel width action frame + * @MGMT_ACTION_HT_SMPS: spatial multiplexing power save action frame + * @MGMT_ACTION_HT_PSMP: psmp action frame + * @MGMT_ACTION_HT_PCO_PHASE: pco phase action frame + * @MGMT_ACTION_HT_CSI: CSI action frame + * @MGMT_ACTION_HT_NONCOMPRESSED_BF: non-compressed beamforming action frame + * @MGMT_ACTION_HT_COMPRESSED_BF: compressed beamforming action frame + * @MGMT_ACTION_HT_ASEL_IDX_FEEDBACK: asel idx feedback action frame + * @MGMT_ACTION_SA_QUERY_REQUEST: SA query request frame + * @MGMT_ACTION_SA_QUERY_RESPONSE: SA query response frame + * @MGMT_ACTION_PDPA_GAS_INIT_REQ: pdpa gas init request action frame + * @MGMT_ACTION_PDPA_GAS_INIT_RSP: pdpa gas init response frame + * @MGMT_ACTION_PDPA_GAS_COMEBACK_REQ: pdpa gas comeback req. action frame + * @MGMT_ACTION_PDPA_GAS_COMEBACK_RSP: pdpa gas comeback rsp. action frame + * @MGMT_ACTION_WNM_BSS_TM_QUERY: wnm bss tm query action frame + * @MGMT_ACTION_WNM_BSS_TM_REQUEST: wnm bss tm request action frame + * @MGMT_ACTION_WNM_BSS_TM_RESPONSE: wnm bss tm response action frame + * @MGMT_ACTION_WNM_NOTIF_REQUEST: wnm notification request action frame + * @MGMT_ACTION_WNM_NOTIF_RESPONSE: wnm notification response action frame + * @MGMT_ACTION_WNM_FMS_REQ: wnm fms request frame + * @MGMT_ACTION_WNM_FMS_RESP: wnm fms response frame + * @MGMT_ACTION_WNM_TFS_REQ: wnm tfs request frame + * @MGMT_ACTION_WNM_TFS_RESP: wnm tfs response frame + * @MGMT_ACTION_WNM_TFS_NOTIFY: wnm tfs notify frame + * @MGMT_ACTION_WNM_SLEEP_REQ: wnm sleep request frame + * @MGMT_ACTION_WNM_SLEEP_RESP: wnm sleep response frame + * @MGMT_ACTION_WNM_TIM_REQ: wnm Tim broadcast request frame + * @MGMT_ACTION_WNM_TIM_RESP: wnm Tim broadcast response frame + * @MGMT_ACTION_TDLS_SETUP_REQ: tdls setup request action frame + * @MGMT_ACTION_TDLS_SETUP_RSP: tdls setup response frame + * @MGMT_ACTION_TDLS_SETUP_CNF: tdls setup confirm frame + * @MGMT_ACTION_TDLS_TEARDOWN: tdls teardown frame + * @MGMT_ACTION_TDLS_PEER_TRAFFIC_IND: tdls peer traffic indication frame + * @MGMT_ACTION_TDLS_CH_SWITCH_REQ: tdls channel switch req. frame + * @MGMT_ACTION_TDLS_CH_SWITCH_RSP: tdls channel switch response frame + * @MGMT_ACTION_TDLS_PEER_PSM_REQUEST: tdls peer psm request frame + * @MGMT_ACTION_TDLS_PEER_PSM_RESPONSE: tdls peer psm response frame + * @MGMT_ACTION_TDLS_PEER_TRAFFIC_RSP: tdls peer traffic response frame + * @MGMT_ACTION_TDLS_DIS_REQ: tdls discovery request frame + * @MGMT_ACTION_MESH_LINK_METRIC_REPORT: mesh link metric report action frame + * @MGMT_ACTION_MESH_HWMP_PATH_SELECTION: mesh hwmp path selection action frame + * @MGMT_ACTION_MESH_GATE_ANNOUNCEMENT: mesh gate announcement action frame + * @MGMT_ACTION_MESH_CONGESTION_CONTROL_NOTIFICATION: mesh congestion control + * @MGMT_ACTION_MESH_MCCA_SETUP_REQUEST: mesh mcca setup request action frame + * @MGMT_ACTION_MESH_MCCA_SETUP_REPLY: mesh mcca setup reply action frame + * @MGMT_ACTION_MESH_MCCA_ADVERTISEMENT_REQUEST: mesh mcca advertisement req. + * @MGMT_ACTION_MESH_MCCA_ADVERTISEMENT: mesh mcca advertisement action frame + * @MGMT_ACTION_MESH_MCCA_TEARDOWN: mesh mcca teardown action fram + * @MGMT_ACTION_MESH_TBTT_ADJUSTMENT_REQUEST: mesh tbtt adjustment req. frame + * @MGMT_ACTION_MESH_TBTT_ADJUSTMENT_RESPONSE: mesh tbtt adjustment rsp. frame + * @MGMT_ACTION_SP_MESH_PEERING_OPEN: self protected mesh peering open frame + * @MGMT_ACTION_SP_MESH_PEERING_CONFIRM: self protected mesh peering confirm + * @MGMT_ACTION_SP_MESH_PEERING_CLOSE: self protected mesh peering close frame + * @MGMT_ACTION_SP_MGK_INFORM: self protected mgk inform frame + * @MGMT_ACTION_SP_MGK_ACK: self protected mgk ack frame + * @MGMT_ACTION_WMM_QOS_SETUP_REQ: WMM qos setup request action frame + * @MGMT_ACTION_WMM_QOS_SETUP_RESP: WMM qos setup response action frame + * @MGMT_ACTION_WMM_QOS_TEARDOWN: WMM qos teardown action frame + * @MGMT_ACTION_VHT_COMPRESSED_BF: vht compressed bf action frame + * @MGMT_ACTION_VHT_GID_NOTIF: vht gid notification action frame + * @MGMT_ACTION_VHT_OPMODE_NOTIF: vht opmode notification action frame + * @MGMT_ACTION_GAS_INITIAL_REQUEST: GAS Initial request action frame + * @MGMT_ACTION_GAS_INITIAL_RESPONSE: GAS Initial response action frame + * @MGMT_ACTION_GAS_COMEBACK_REQUEST: GAS Comeback request action frame + * @MGMT_ACTION_GAS_COMEBACK_RESPONSE: GAS Comeback response action frame + * @MGMT_ACTION_FST_SETUP_REQ: FST setup request frame + * @MGMT_ACTION_FST_SETUP_RSPA: FST setup response frame + * @MGMT_ACTION_FST_TEAR_DOWN: FST qos teardown frame + * @MGMT_ACTION_FST_ACK_REQ: FST ack frame for request + * @MGMT_ACTION_FST_ACK_RSP: FST ack frame for response + * @MGMT_ACTION_FST_ON_CHANNEL_TUNNEL: FST on channel tunnel frame + * @MGMT_FRAME_TYPE_ALL: mgmt frame type for all type of frames + * @MGMT_MAX_FRAME_TYPE: max. mgmt frame types + */ +enum mgmt_frame_type { + MGMT_FRM_UNSPECIFIED = -1, + MGMT_ASSOC_REQ, + MGMT_ASSOC_RESP, + MGMT_REASSOC_REQ, + MGMT_REASSOC_RESP, + MGMT_PROBE_REQ, + MGMT_PROBE_RESP, + MGMT_BEACON, + MGMT_ATIM, + MGMT_DISASSOC, + MGMT_AUTH, + MGMT_DEAUTH, + MGMT_ACTION_MEAS_REQUEST, + MGMT_ACTION_MEAS_REPORT, + MGMT_ACTION_TPC_REQUEST, + MGMT_ACTION_TPC_REPORT, + MGMT_ACTION_CHAN_SWITCH, + MGMT_ACTION_QOS_ADD_TS_REQ, + MGMT_ACTION_QOS_ADD_TS_RSP, + MGMT_ACTION_QOS_DEL_TS_REQ, + MGMT_ACTION_QOS_SCHEDULE, + MGMT_ACTION_QOS_MAP_CONFIGURE, + MGMT_ACTION_DLS_REQUEST, + MGMT_ACTION_DLS_RESPONSE, + MGMT_ACTION_DLS_TEARDOWN, + MGMT_ACTION_BA_ADDBA_REQUEST, + MGMT_ACTION_BA_ADDBA_RESPONSE, + MGMT_ACTION_BA_DELBA, + MGMT_ACTION_2040_BSS_COEXISTENCE, + MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC, + MGMT_ACTION_EXT_CHANNEL_SWITCH_ID, + MGMT_ACTION_VENDOR_SPECIFIC, + MGMT_ACTION_TDLS_DISCRESP, + MGMT_ACTION_RRM_RADIO_MEASURE_REQ, + MGMT_ACTION_RRM_RADIO_MEASURE_RPT, + MGMT_ACTION_RRM_LINK_MEASUREMENT_REQ, + MGMT_ACTION_RRM_LINK_MEASUREMENT_RPT, + MGMT_ACTION_RRM_NEIGHBOR_REQ, + MGMT_ACTION_RRM_NEIGHBOR_RPT, + MGMT_ACTION_FT_REQUEST, + MGMT_ACTION_FT_RESPONSE, + MGMT_ACTION_FT_CONFIRM, + MGMT_ACTION_FT_ACK, + MGMT_ACTION_HT_NOTIFY_CHANWIDTH, + MGMT_ACTION_HT_SMPS, + MGMT_ACTION_HT_PSMP, + MGMT_ACTION_HT_PCO_PHASE, + MGMT_ACTION_HT_CSI, + MGMT_ACTION_HT_NONCOMPRESSED_BF, + MGMT_ACTION_HT_COMPRESSED_BF, + MGMT_ACTION_HT_ASEL_IDX_FEEDBACK, + MGMT_ACTION_SA_QUERY_REQUEST, + MGMT_ACTION_SA_QUERY_RESPONSE, + MGMT_ACTION_PDPA_GAS_INIT_REQ, + MGMT_ACTION_PDPA_GAS_INIT_RSP, + MGMT_ACTION_PDPA_GAS_COMEBACK_REQ, + MGMT_ACTION_PDPA_GAS_COMEBACK_RSP, + MGMT_ACTION_WNM_BSS_TM_QUERY, + MGMT_ACTION_WNM_BSS_TM_REQUEST, + MGMT_ACTION_WNM_BSS_TM_RESPONSE, + MGMT_ACTION_WNM_NOTIF_REQUEST, + MGMT_ACTION_WNM_NOTIF_RESPONSE, + MGMT_ACTION_WNM_FMS_REQ, + MGMT_ACTION_WNM_FMS_RESP, + MGMT_ACTION_WNM_TFS_REQ, + MGMT_ACTION_WNM_TFS_RESP, + MGMT_ACTION_WNM_TFS_NOTIFY, + MGMT_ACTION_WNM_SLEEP_REQ, + MGMT_ACTION_WNM_SLEEP_RESP, + MGMT_ACTION_WNM_TIM_REQ, + MGMT_ACTION_WNM_TIM_RESP, + MGMT_ACTION_TDLS_SETUP_REQ, + MGMT_ACTION_TDLS_SETUP_RSP, + MGMT_ACTION_TDLS_SETUP_CNF, + MGMT_ACTION_TDLS_TEARDOWN, + MGMT_ACTION_TDLS_PEER_TRAFFIC_IND, + MGMT_ACTION_TDLS_CH_SWITCH_REQ, + MGMT_ACTION_TDLS_CH_SWITCH_RSP, + MGMT_ACTION_TDLS_PEER_PSM_REQUEST, + MGMT_ACTION_TDLS_PEER_PSM_RESPONSE, + MGMT_ACTION_TDLS_PEER_TRAFFIC_RSP, + MGMT_ACTION_TDLS_DIS_REQ, + MGMT_ACTION_MESH_LINK_METRIC_REPORT, + MGMT_ACTION_MESH_HWMP_PATH_SELECTION, + MGMT_ACTION_MESH_GATE_ANNOUNCEMENT, + MGMT_ACTION_MESH_CONGESTION_CONTROL_NOTIFICATION, + MGMT_ACTION_MESH_MCCA_SETUP_REQUEST, + MGMT_ACTION_MESH_MCCA_SETUP_REPLY, + MGMT_ACTION_MESH_MCCA_ADVERTISEMENT_REQUEST, + MGMT_ACTION_MESH_MCCA_ADVERTISEMENT, + MGMT_ACTION_MESH_MCCA_TEARDOWN, + MGMT_ACTION_MESH_TBTT_ADJUSTMENT_REQUEST, + MGMT_ACTION_MESH_TBTT_ADJUSTMENT_RESPONSE, + MGMT_ACTION_SP_MESH_PEERING_OPEN, + MGMT_ACTION_SP_MESH_PEERING_CONFIRM, + MGMT_ACTION_SP_MESH_PEERING_CLOSE, + MGMT_ACTION_SP_MGK_INFORM, + MGMT_ACTION_SP_MGK_ACK, + MGMT_ACTION_WMM_QOS_SETUP_REQ, + MGMT_ACTION_WMM_QOS_SETUP_RESP, + MGMT_ACTION_WMM_QOS_TEARDOWN, + MGMT_ACTION_VHT_COMPRESSED_BF, + MGMT_ACTION_VHT_GID_NOTIF, + MGMT_ACTION_VHT_OPMODE_NOTIF, + MGMT_ACTION_GAS_INITIAL_REQUEST, + MGMT_ACTION_GAS_INITIAL_RESPONSE, + MGMT_ACTION_GAS_COMEBACK_REQUEST, + MGMT_ACTION_GAS_COMEBACK_RESPONSE, + MGMT_ACTION_FST_SETUP_REQ, + MGMT_ACTION_FST_SETUP_RSP, + MGMT_ACTION_FST_TEAR_DOWN, + MGMT_ACTION_FST_ACK_REQ, + MGMT_ACTION_FST_ACK_RSP, + MGMT_ACTION_FST_ON_CHANNEL_TUNNEL, + MGMT_FRAME_TYPE_ALL, + MGMT_MAX_FRAME_TYPE, +}; + +#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4 +#define WLAN_INVALID_PER_CHAIN_RSSI 0xFF +#define WLAN_INVALID_PER_CHAIN_SNR 0x80 +#define WLAN_NOISE_FLOOR_DBM_DEFAULT -96 +/** + * struct mgmt_rx_event_params - host mgmt header params + * @chan_freq: channel frequency on which this frame is received + * @channel: channel on which this frame is received + * @snr: snr information used to call rssi + * @rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]: RSSI of PRI 20MHz for each chain + * @rate: Rate kbps + * @phy_mode: rx phy mode + * @buf_len: length of the frame + * @status: rx status + * @flags: information about the management frame e.g. can give a + * scan source for a scan result mgmt frame + * @rssi: combined RSSI, i.e. the sum of the snr + noise floor (dBm units) + * @tsf_delta: tsf delta + * @pdev_id: pdev id + * @rx_params: pointer to other rx params + * (win specific, will be removed in phase 4) + */ +struct mgmt_rx_event_params { + uint32_t chan_freq; + uint32_t channel; + uint32_t snr; + uint8_t rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; + uint32_t rate; + enum wlan_phymode phy_mode; + uint32_t buf_len; + QDF_STATUS status; + uint32_t flags; + int32_t rssi; + uint32_t tsf_delta; + uint8_t pdev_id; + void *rx_params; +}; + +/** + * mgmt_tx_download_comp_cb - function pointer for tx download completions. + * @context: caller component specific context + * @buf: buffer + * @free: to free/not free the buffer + * + * This is the function pointer to be called on tx download completion + * if download complete is required. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_tx_download_comp_cb)(void *context, + qdf_nbuf_t buf, bool free); + +/** + * mgmt_ota_comp_cb - function pointer for tx ota completions. + * @context: caller component specific context + * @buf: buffer + * @status: tx completion status + * @tx_compl_params: tx completion params + * + * This is the function pointer to be called on tx ota completion. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_ota_comp_cb)(void *context, qdf_nbuf_t buf, + uint32_t status, void *tx_compl_params); + +/** + * mgmt_frame_rx_callback - function pointer for receiving mgmt rx frames + * @psoc: psoc context + * @peer: peer + * @buf: buffer + * @mgmt_rx_params: rx params + * @frm_type: mgmt rx frame type + * + * This is the function pointer to be called on receiving mgmt rx frames. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_frame_rx_callback)( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type); + +/** + * mgmt_frame_fill_peer_cb - Function pointer to fill peer in the buf + * @peer: peer + * @buf: buffer + * + * This is the function pointer to be called during drain to fill the + * peer into the buf's cb structure. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_frame_fill_peer_cb)( + struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf); + +/** + * struct mgmt_txrx_mgmt_frame_cb_info - frm and corresponding rx cb info + * @frm_type: mgmt frm type + * @mgmt_rx_cb: corresponding rx callback + */ +struct mgmt_txrx_mgmt_frame_cb_info { + enum mgmt_frame_type frm_type; + mgmt_frame_rx_callback mgmt_rx_cb; +}; + + +/** + * wlan_mgmt_txrx_init() - initialize mgmt txrx context. + * + * This function initializes the mgmt txrx context, + * mgmt descriptor pool, etc. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_init(void); + +/** + * wlan_mgmt_txrx_deinit() - deinitialize mgmt txrx context. + * + * This function deinitializes the mgmt txrx context, + * mgmt descriptor pool, etc. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_deinit(void); + +/** + * wlan_mgmt_txrx_mgmt_frame_tx() - transmits mgmt. frame + * @peer: peer + * @context: caller component specific context + * @buf: buffer to be transmitted + * @comp_cb: download completion cb function + * @ota_cb: post processing cb function + * @comp_id: umac component id + * @mgmt_tx_params: mgmt tx params + * + * This function transmits the mgmt. frame to southbound interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_mgmt_frame_tx(struct wlan_objmgr_peer *peer, + void *context, + qdf_nbuf_t buf, + mgmt_tx_download_comp_cb tx_comp_cb, + mgmt_ota_comp_cb tx_ota_comp_cb, + enum wlan_umac_comp_id comp_id, + void *mgmt_tx_params); + +/** + * wlan_mgmt_txrx_beacon_frame_tx() - transmits mgmt. beacon + * @psoc: psoc context + * @buf: buffer to be transmitted + * @comp_id: umac component id + * + * This function transmits the mgmt. beacon to southbound interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_beacon_frame_tx(struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id); + +#ifdef WLAN_SUPPORT_FILS +/** + * wlan_mgmt_txrx_fd_action_frame_tx() - transmits mgmt. FD Action frame + * @vdev: vdev object + * @buf: buffer to be transmitted + * @comp_id: umac component id + * + * This function transmits the FILS Dicovery Action frame to + * southbound interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_fd_action_frame_tx(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id); +#endif /* WLAN_SUPPORT_FILS */ + +/** + * wlan_mgmt_txrx_register_rx_cb() - registers the rx cb for mgmt. frames + * @psoc: psoc context + * @comp_id: umac component id + * @frm_cb_info: pointer to array of structure containing frm type and callback + * @num_entries: num of frames for which cb to be registered + * + * This function registers rx callback for mgmt. frames for + * the corresponding umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_register_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries); + +/** + * wlan_mgmt_txrx_vdev_drain() - Function to drain all mgmt packets + * specific to a vdev + * @vdev: vdev context + * @mgmt_fill_peer_cb: callback func to UMAC to fill peer into buf + * @status: opaque pointer about the status of the pkts passed to UMAC + * + * This function drains all mgmt packets of a vdev. This can be used in the + * event of target going down without sending completions. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_vdev_drain( + struct wlan_objmgr_vdev *vdev, + mgmt_frame_fill_peer_cb mgmt_fill_peer_cb, + void *status); + +/** + * wlan_mgmt_txrx_deregister_rx_cb() - deregisters the rx cb for mgmt. frames + * @psoc: psoc context + * @comp_id: umac component id + * @frm_cb_info: pointer to array of structure containing frm type and callback + * @num_entries: num of frames for which cb to be deregistered + * + * This function deregisters rx callback for mgmt. frames for + * the corresponding umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_deregister_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries); + +/** + * wlan_mgmt_txrx_psoc_open() - mgmt txrx module psoc open API + * @psoc: psoc context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_mgmt_txrx_psoc_close() - mgmt txrx module psoc close API + * @psoc: psoc context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_mgmt_txrx_pdev_open() - mgmt txrx module pdev open API + * @pdev: pdev context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_pdev_open(struct wlan_objmgr_pdev *pdev); + + +/** + * wlan_mgmt_txrx_pdev_close() - mgmt txrx module pdev close API + * @pdev: pdev context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_pdev_close(struct wlan_objmgr_pdev *pdev); +#endif + + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..3aadfd8cc5097270e569ff3858716a9c3c724e19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_tgt_api.c @@ -0,0 +1,1323 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_mgmt_txrx_tgt_api.c + * This file contains mgmt txrx public API definitions for + * southbound interface. + */ + +#include "wlan_mgmt_txrx_tgt_api.h" +#include "wlan_mgmt_txrx_utils_api.h" +#include "../../core/src/wlan_mgmt_txrx_main_i.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_peer_obj.h" +#include "wlan_objmgr_pdev_obj.h" + + +/** + * mgmt_get_spec_mgmt_action_subtype() - gets spec mgmt action subtype + * @action_code: action code + * + * This function returns the subtype for spectrum management action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_spec_mgmt_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case ACTION_SPCT_MSR_REQ: + frm_type = MGMT_ACTION_MEAS_REQUEST; + break; + case ACTION_SPCT_MSR_RPRT: + frm_type = MGMT_ACTION_MEAS_REPORT; + break; + case ACTION_SPCT_TPC_REQ: + frm_type = MGMT_ACTION_TPC_REQUEST; + break; + case ACTION_SPCT_TPC_RPRT: + frm_type = MGMT_ACTION_TPC_REPORT; + break; + case ACTION_SPCT_CHL_SWITCH: + frm_type = MGMT_ACTION_CHAN_SWITCH; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_qos_action_subtype() - gets qos action subtype + * @action_code: action code + * + * This function returns the subtype for qos action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_qos_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case QOS_ADD_TS_REQ: + frm_type = MGMT_ACTION_QOS_ADD_TS_REQ; + break; + case QOS_ADD_TS_RSP: + frm_type = MGMT_ACTION_QOS_ADD_TS_RSP; + break; + case QOS_DEL_TS_REQ: + frm_type = MGMT_ACTION_QOS_DEL_TS_REQ; + break; + case QOS_SCHEDULE: + frm_type = MGMT_ACTION_QOS_SCHEDULE; + break; + case QOS_MAP_CONFIGURE: + frm_type = MGMT_ACTION_QOS_MAP_CONFIGURE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_dls_action_subtype() - gets dls action subtype + * @action_code: action code + * + * This function returns the subtype for dls action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_dls_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case DLS_REQUEST: + frm_type = MGMT_ACTION_DLS_REQUEST; + break; + case DLS_RESPONSE: + frm_type = MGMT_ACTION_DLS_RESPONSE; + break; + case DLS_TEARDOWN: + frm_type = MGMT_ACTION_DLS_TEARDOWN; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_back_action_subtype() - gets block ack action subtype + * @action_code: action code + * + * This function returns the subtype for block ack action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_back_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case ADDBA_REQUEST: + frm_type = MGMT_ACTION_BA_ADDBA_REQUEST; + break; + case ADDBA_RESPONSE: + frm_type = MGMT_ACTION_BA_ADDBA_RESPONSE; + break; + case DELBA: + frm_type = MGMT_ACTION_BA_DELBA; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_public_action_subtype() - gets public action subtype + * @action_code: action code + * + * This function returns the subtype for public action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_public_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case PUB_ACTION_2040_BSS_COEXISTENCE: + frm_type = MGMT_ACTION_2040_BSS_COEXISTENCE; + break; + case PUB_ACTION_EXT_CHANNEL_SWITCH_ID: + frm_type = MGMT_ACTION_EXT_CHANNEL_SWITCH_ID; + break; + case PUB_ACTION_VENDOR_SPECIFIC: + frm_type = MGMT_ACTION_VENDOR_SPECIFIC; + break; + case PUB_ACTION_TDLS_DISCRESP: + frm_type = MGMT_ACTION_TDLS_DISCRESP; + break; + case PUB_ACTION_GAS_INITIAL_REQUEST: + frm_type = MGMT_ACTION_GAS_INITIAL_REQUEST; + break; + case PUB_ACTION_GAS_INITIAL_RESPONSE: + frm_type = MGMT_ACTION_GAS_INITIAL_RESPONSE; + break; + case PUB_ACTION_GAS_COMEBACK_REQUEST: + frm_type = MGMT_ACTION_GAS_COMEBACK_REQUEST; + break; + case PUB_ACTION_GAS_COMEBACK_RESPONSE: + frm_type = MGMT_ACTION_GAS_COMEBACK_RESPONSE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_rrm_action_subtype() - gets rrm action subtype + * @action_code: action code + * + * This function returns the subtype for rrm action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_rrm_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case RRM_RADIO_MEASURE_REQ: + frm_type = MGMT_ACTION_RRM_RADIO_MEASURE_REQ; + break; + case RRM_RADIO_MEASURE_RPT: + frm_type = MGMT_ACTION_RRM_RADIO_MEASURE_RPT; + break; + case RRM_LINK_MEASUREMENT_REQ: + frm_type = MGMT_ACTION_RRM_LINK_MEASUREMENT_REQ; + break; + case RRM_LINK_MEASUREMENT_RPT: + frm_type = MGMT_ACTION_RRM_LINK_MEASUREMENT_RPT; + break; + case RRM_NEIGHBOR_REQ: + frm_type = MGMT_ACTION_RRM_NEIGHBOR_REQ; + break; + case RRM_NEIGHBOR_RPT: + frm_type = MGMT_ACTION_RRM_NEIGHBOR_RPT; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +static enum mgmt_frame_type +mgmt_get_ft_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case FT_FAST_BSS_TRNST_REQ: + frm_type = MGMT_ACTION_FT_REQUEST; + break; + case FT_FAST_BSS_TRNST_RES: + frm_type = MGMT_ACTION_FT_RESPONSE; + break; + case FT_FAST_BSS_TRNST_CONFIRM: + frm_type = MGMT_ACTION_FT_CONFIRM; + break; + case FT_FAST_BSS_TRNST_ACK: + frm_type = MGMT_ACTION_FT_ACK; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_ht_action_subtype() - gets ht action subtype + * @action_code: action code + * + * This function returns the subtype for ht action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_ht_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case HT_ACTION_NOTIFY_CHANWIDTH: + frm_type = MGMT_ACTION_HT_NOTIFY_CHANWIDTH; + break; + case HT_ACTION_SMPS: + frm_type = MGMT_ACTION_HT_SMPS; + break; + case HT_ACTION_PSMP: + frm_type = MGMT_ACTION_HT_PSMP; + break; + case HT_ACTION_PCO_PHASE: + frm_type = MGMT_ACTION_HT_PCO_PHASE; + break; + case HT_ACTION_CSI: + frm_type = MGMT_ACTION_HT_CSI; + break; + case HT_ACTION_NONCOMPRESSED_BF: + frm_type = MGMT_ACTION_HT_NONCOMPRESSED_BF; + break; + case HT_ACTION_COMPRESSED_BF: + frm_type = MGMT_ACTION_HT_COMPRESSED_BF; + break; + case HT_ACTION_ASEL_IDX_FEEDBACK: + frm_type = MGMT_ACTION_HT_ASEL_IDX_FEEDBACK; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_sa_query_action_subtype() - gets sa query action subtype + * @action_code: action code + * + * This function returns the subtype for sa query action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_sa_query_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case SA_QUERY_REQUEST: + frm_type = MGMT_ACTION_SA_QUERY_REQUEST; + break; + case SA_QUERY_RESPONSE: + frm_type = MGMT_ACTION_SA_QUERY_RESPONSE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_pdpa_action_subtype() - gets pdpa action subtype + * @action_code: action code + * + * This function returns the subtype for protected dual public + * action category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_pdpa_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case PDPA_GAS_INIT_REQ: + frm_type = MGMT_ACTION_PDPA_GAS_INIT_REQ; + break; + case PDPA_GAS_INIT_RSP: + frm_type = MGMT_ACTION_PDPA_GAS_INIT_RSP; + break; + case PDPA_GAS_COMEBACK_REQ: + frm_type = MGMT_ACTION_PDPA_GAS_COMEBACK_REQ; + break; + case PDPA_GAS_COMEBACK_RSP: + frm_type = MGMT_ACTION_PDPA_GAS_COMEBACK_RSP; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_wnm_action_subtype() - gets wnm action subtype + * @action_code: action code + * + * This function returns the subtype for wnm action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_wnm_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case WNM_BSS_TM_QUERY: + frm_type = MGMT_ACTION_WNM_BSS_TM_QUERY; + break; + case WNM_BSS_TM_REQUEST: + frm_type = MGMT_ACTION_WNM_BSS_TM_REQUEST; + break; + case WNM_BSS_TM_RESPONSE: + frm_type = MGMT_ACTION_WNM_BSS_TM_RESPONSE; + break; + case WNM_NOTIF_REQUEST: + frm_type = MGMT_ACTION_WNM_NOTIF_REQUEST; + break; + case WNM_NOTIF_RESPONSE: + frm_type = MGMT_ACTION_WNM_NOTIF_RESPONSE; + break; + case WNM_FMS_REQ: + frm_type = MGMT_ACTION_WNM_FMS_REQ; + break; + case WNM_FMS_RESP: + frm_type = MGMT_ACTION_WNM_FMS_RESP; + break; + case WNM_TFS_REQ: + frm_type = MGMT_ACTION_WNM_TFS_REQ; + break; + case WNM_TFS_RESP: + frm_type = MGMT_ACTION_WNM_TFS_RESP; + break; + case WNM_TFS_NOTIFY: + frm_type = MGMT_ACTION_WNM_TFS_NOTIFY; + break; + case WNM_SLEEP_REQ: + frm_type = MGMT_ACTION_WNM_SLEEP_REQ; + break; + case WNM_SLEEP_RESP: + frm_type = MGMT_ACTION_WNM_SLEEP_RESP; + break; + case WNM_TIM_REQ: + frm_type = MGMT_ACTION_WNM_TFS_REQ; + break; + case WNM_TIM_RESP: + frm_type = MGMT_ACTION_WNM_TFS_RESP; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_wnm_action_subtype() - gets tdls action subtype + * @action_code: action code + * + * This function returns the subtype for tdls action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_tdls_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case TDLS_SETUP_REQUEST: + frm_type = MGMT_ACTION_TDLS_SETUP_REQ; + break; + case TDLS_SETUP_RESPONSE: + frm_type = MGMT_ACTION_TDLS_SETUP_RSP; + break; + case TDLS_SETUP_CONFIRM: + frm_type = MGMT_ACTION_TDLS_SETUP_CNF; + break; + case TDLS_TEARDOWN: + frm_type = MGMT_ACTION_TDLS_TEARDOWN; + break; + case TDLS_PEER_TRAFFIC_INDICATION: + frm_type = MGMT_ACTION_TDLS_PEER_TRAFFIC_IND; + break; + case TDLS_CHANNEL_SWITCH_REQUEST: + frm_type = MGMT_ACTION_TDLS_CH_SWITCH_REQ; + break; + case TDLS_CHANNEL_SWITCH_RESPONSE: + frm_type = MGMT_ACTION_TDLS_CH_SWITCH_RSP; + break; + case TDLS_PEER_PSM_REQUEST: + frm_type = MGMT_ACTION_TDLS_PEER_PSM_REQUEST; + break; + case TDLS_PEER_PSM_RESPONSE: + frm_type = MGMT_ACTION_TDLS_PEER_PSM_RESPONSE; + break; + case TDLS_PEER_TRAFFIC_RESPONSE: + frm_type = MGMT_ACTION_TDLS_PEER_TRAFFIC_RSP; + break; + case TDLS_DISCOVERY_REQUEST: + frm_type = MGMT_ACTION_TDLS_DIS_REQ; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_mesh_action_subtype() - gets mesh action subtype + * @action_code: action code + * + * This function returns the subtype for mesh action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_mesh_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case MESH_ACTION_LINK_METRIC_REPORT: + frm_type = MGMT_ACTION_MESH_LINK_METRIC_REPORT; + break; + case MESH_ACTION_HWMP_PATH_SELECTION: + frm_type = MGMT_ACTION_MESH_HWMP_PATH_SELECTION; + break; + case MESH_ACTION_GATE_ANNOUNCEMENT: + frm_type = MGMT_ACTION_MESH_GATE_ANNOUNCEMENT; + break; + case MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION: + frm_type = MGMT_ACTION_MESH_CONGESTION_CONTROL_NOTIFICATION; + break; + case MESH_ACTION_MCCA_SETUP_REQUEST: + frm_type = MGMT_ACTION_MESH_MCCA_SETUP_REQUEST; + break; + case MESH_ACTION_MCCA_SETUP_REPLY: + frm_type = MGMT_ACTION_MESH_MCCA_SETUP_REPLY; + break; + case MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST: + frm_type = MGMT_ACTION_MESH_MCCA_ADVERTISEMENT_REQUEST; + break; + case MESH_ACTION_MCCA_ADVERTISEMENT: + frm_type = MGMT_ACTION_MESH_MCCA_ADVERTISEMENT; + break; + case MESH_ACTION_MCCA_TEARDOWN: + frm_type = MGMT_ACTION_MESH_MCCA_TEARDOWN; + break; + case MESH_ACTION_TBTT_ADJUSTMENT_REQUEST: + frm_type = MGMT_ACTION_MESH_TBTT_ADJUSTMENT_REQUEST; + break; + case MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE: + frm_type = MGMT_ACTION_MESH_TBTT_ADJUSTMENT_RESPONSE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_self_prot_action_subtype() - gets self prot. action subtype + * @action_code: action code + * + * This function returns the subtype for self protected action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_self_prot_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case SP_MESH_PEERING_OPEN: + frm_type = MGMT_ACTION_SP_MESH_PEERING_OPEN; + break; + case SP_MESH_PEERING_CONFIRM: + frm_type = MGMT_ACTION_SP_MESH_PEERING_CONFIRM; + break; + case SP_MESH_PEERING_CLOSE: + frm_type = MGMT_ACTION_SP_MESH_PEERING_CLOSE; + break; + case SP_MGK_INFORM: + frm_type = MGMT_ACTION_SP_MGK_INFORM; + break; + case SP_MGK_ACK: + frm_type = MGMT_ACTION_SP_MGK_ACK; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_wmm_action_subtype() - gets wmm action subtype + * @action_code: action code + * + * This function returns the subtype for wmm action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_wmm_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case WMM_QOS_SETUP_REQ: + frm_type = MGMT_ACTION_WMM_QOS_SETUP_REQ; + break; + case WMM_QOS_SETUP_RESP: + frm_type = MGMT_ACTION_WMM_QOS_SETUP_RESP; + break; + case WMM_QOS_TEARDOWN: + frm_type = MGMT_ACTION_WMM_QOS_TEARDOWN; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_vht_action_subtype() - gets vht action subtype + * @action_code: action code + * + * This function returns the subtype for vht action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_vht_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case VHT_ACTION_COMPRESSED_BF: + frm_type = MGMT_ACTION_VHT_COMPRESSED_BF; + break; + case VHT_ACTION_GID_NOTIF: + frm_type = MGMT_ACTION_VHT_GID_NOTIF; + break; + case VHT_ACTION_OPMODE_NOTIF: + frm_type = MGMT_ACTION_VHT_OPMODE_NOTIF; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_fst_action_subtype() - gets fst action subtype + * @action_code: action code + * + * This function returns the subtype for fst action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_fst_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case FST_SETUP_REQ: + frm_type = MGMT_ACTION_FST_SETUP_REQ; + break; + case FST_SETUP_RSP: + frm_type = MGMT_ACTION_FST_SETUP_RSP; + break; + case FST_TEAR_DOWN: + frm_type = MGMT_ACTION_FST_TEAR_DOWN; + break; + case FST_ACK_REQ: + frm_type = MGMT_ACTION_FST_ACK_REQ; + break; + case FST_ACK_RSP: + frm_type = MGMT_ACTION_FST_ACK_RSP; + break; + case FST_ON_CHANNEL_TUNNEL: + frm_type = MGMT_ACTION_FST_ON_CHANNEL_TUNNEL; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_txrx_get_action_frm_subtype() - gets action frm subtype + * @mpdu_data_ptr: pointer to mpdu data + * + * This function determines the action category of the frame + * and calls respective function to get mgmt frame type. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_txrx_get_action_frm_subtype(uint8_t *mpdu_data_ptr) +{ + struct action_frm_hdr *action_hdr = + (struct action_frm_hdr *)mpdu_data_ptr; + enum mgmt_frame_type frm_type; + + switch (action_hdr->action_category) { + case ACTION_CATEGORY_SPECTRUM_MGMT: + frm_type = mgmt_get_spec_mgmt_action_subtype( + action_hdr->action_code); + break; + case ACTION_FAST_BSS_TRNST: + frm_type = mgmt_get_ft_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_QOS: + frm_type = mgmt_get_qos_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_DLS: + frm_type = mgmt_get_dls_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_BACK: + frm_type = mgmt_get_back_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_PUBLIC: + frm_type = mgmt_get_public_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_RRM: + frm_type = mgmt_get_rrm_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_HT: + frm_type = mgmt_get_ht_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_SA_QUERY: + frm_type = mgmt_get_sa_query_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION: + frm_type = mgmt_get_pdpa_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_WNM: + frm_type = mgmt_get_wnm_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_TDLS: + frm_type = mgmt_get_tdls_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_MESH_ACTION: + frm_type = mgmt_get_mesh_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_SELF_PROTECTED: + frm_type = mgmt_get_self_prot_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_WMM: + frm_type = mgmt_get_wmm_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_VHT: + frm_type = mgmt_get_vht_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_VENDOR_SPECIFIC: + frm_type = MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC; + break; + case ACTION_CATEGORY_FST: + frm_type = mgmt_get_fst_action_subtype(action_hdr->action_code); + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_txrx_get_frm_type() - gets mgmt frm type + * @mgmt_subtype: mgmt subtype + * @mpdu_data_ptr: pointer to mpdu data + * + * This function returns mgmt frame type of the frame + * based on the mgmt subtype. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_txrx_get_frm_type(uint8_t mgmt_subtype, uint8_t *mpdu_data_ptr) +{ + enum mgmt_frame_type frm_type; + + switch (mgmt_subtype) { + case MGMT_SUBTYPE_ASSOC_REQ: + frm_type = MGMT_ASSOC_REQ; + break; + case MGMT_SUBTYPE_ASSOC_RESP: + frm_type = MGMT_ASSOC_RESP; + break; + case MGMT_SUBTYPE_REASSOC_REQ: + frm_type = MGMT_ASSOC_REQ; + break; + case MGMT_SUBTYPE_REASSOC_RESP: + frm_type = MGMT_REASSOC_RESP; + break; + case MGMT_SUBTYPE_PROBE_REQ: + frm_type = MGMT_PROBE_REQ; + break; + case MGMT_SUBTYPE_PROBE_RESP: + frm_type = MGMT_PROBE_RESP; + break; + case MGMT_SUBTYPE_BEACON: + frm_type = MGMT_BEACON; + break; + case MGMT_SUBTYPE_ATIM: + frm_type = MGMT_ATIM; + break; + case MGMT_SUBTYPE_DISASSOC: + frm_type = MGMT_DISASSOC; + break; + case MGMT_SUBTYPE_AUTH: + frm_type = MGMT_AUTH; + break; + case MGMT_SUBTYPE_DEAUTH: + frm_type = MGMT_DEAUTH; + break; + case MGMT_SUBTYPE_ACTION: + case MGMT_SUBTYPE_ACTION_NO_ACK: + frm_type = mgmt_txrx_get_action_frm_subtype(mpdu_data_ptr); + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * wlan_mgmt_txrx_rx_handler_list_copy() - copies rx handler list + * @rx_handler: pointer to rx handler list + * @rx_handler_head: pointer to head of the copies list + * @rx_handler_tail: pointer to tail of the copies list + * + * This function copies the rx handler linked list into a local + * linked list. + * + * Return: QDF_STATUS_SUCCESS in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_rx_handler_list_copy( + struct mgmt_rx_handler *rx_handler, + struct mgmt_rx_handler **rx_handler_head, + struct mgmt_rx_handler **rx_handler_tail) +{ + struct mgmt_rx_handler *rx_handler_node; + + while (rx_handler) { + rx_handler_node = + qdf_mem_malloc_atomic(sizeof(*rx_handler_node)); + if (!rx_handler_node) { + mgmt_txrx_err_rl("Couldn't allocate memory for rx handler node"); + return QDF_STATUS_E_NOMEM; + } + + rx_handler_node->comp_id = rx_handler->comp_id; + rx_handler_node->rx_cb = rx_handler->rx_cb; + rx_handler_node->next = NULL; + + if (!(*rx_handler_head)) { + *rx_handler_head = rx_handler_node; + *rx_handler_tail = *rx_handler_head; + } else { + (*rx_handler_tail)->next = rx_handler_node; + *rx_handler_tail = (*rx_handler_tail)->next; + } + rx_handler = rx_handler->next; + } + + return QDF_STATUS_SUCCESS; +} + +static bool +mgmt_rx_is_bssid_valid(struct qdf_mac_addr *mac_addr) +{ + if (qdf_is_macaddr_group(mac_addr) || + qdf_is_macaddr_zero(mac_addr)) + return false; + + return true; +} + +QDF_STATUS tgt_mgmt_txrx_rx_frame_handler( + struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + struct ieee80211_frame *wh; + qdf_nbuf_t copy_buf; + struct wlan_objmgr_peer *peer = NULL; + uint8_t mgmt_type, mgmt_subtype; + uint8_t *mac_addr, *mpdu_data_ptr; + enum mgmt_frame_type frm_type; + struct mgmt_rx_handler *rx_handler; + struct mgmt_rx_handler *rx_handler_head = NULL, *rx_handler_tail = NULL; + u_int8_t *data, *ivp = NULL; + uint16_t buflen; + QDF_STATUS status = QDF_STATUS_SUCCESS; + bool is_from_addr_valid, is_bssid_valid; + + if (!buf) { + mgmt_txrx_err("buffer passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!psoc) { + mgmt_txrx_err("psoc_ctx passed is NULL"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_INVAL; + } + + data = (uint8_t *)qdf_nbuf_data(buf); + wh = (struct ieee80211_frame *)data; + buflen = qdf_nbuf_len(buf); + + /** + * TO DO (calculate pdev) + * Waiting for a new parameter: pdev id to get added in rx event + */ + + mgmt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; + mgmt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; + + if (mgmt_type != IEEE80211_FC0_TYPE_MGT) { + mgmt_txrx_err("Rx event doesn't conatin a mgmt. packet, %d", + mgmt_type); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + is_from_addr_valid = mgmt_rx_is_bssid_valid((struct qdf_mac_addr *) + wh->i_addr2); + is_bssid_valid = mgmt_rx_is_bssid_valid((struct qdf_mac_addr *) + wh->i_addr3); + + if (!is_from_addr_valid && !is_bssid_valid) { + mgmt_txrx_debug_rl("from addr "QDF_MAC_ADDR_FMT" bssid addr "QDF_MAC_ADDR_FMT" both not valid, dropping them", + QDF_MAC_ADDR_REF(wh->i_addr2), + QDF_MAC_ADDR_REF(wh->i_addr3)); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + if ((mgmt_subtype == MGMT_SUBTYPE_BEACON || + mgmt_subtype == MGMT_SUBTYPE_PROBE_RESP) && + !(is_from_addr_valid && is_bssid_valid)) { + mgmt_txrx_debug_rl("from addr "QDF_MAC_ADDR_FMT" bssid addr "QDF_MAC_ADDR_FMT" not valid, modifying them", + QDF_MAC_ADDR_REF(wh->i_addr2), + QDF_MAC_ADDR_REF(wh->i_addr3)); + if (!is_from_addr_valid) + qdf_mem_copy(wh->i_addr2, wh->i_addr3, + QDF_MAC_ADDR_SIZE); + else + qdf_mem_copy(wh->i_addr3, wh->i_addr2, + QDF_MAC_ADDR_SIZE); + } + + /* mpdu_data_ptr is pointer to action header */ + mpdu_data_ptr = (uint8_t *)qdf_nbuf_data(buf) + + sizeof(struct ieee80211_frame); + if ((wh->i_fc[1] & IEEE80211_FC1_WEP) && + !qdf_is_macaddr_group((struct qdf_mac_addr *)wh->i_addr1) && + !qdf_is_macaddr_broadcast((struct qdf_mac_addr *)wh->i_addr1)) { + + if (buflen > (sizeof(struct ieee80211_frame) + + WLAN_HDR_EXT_IV_LEN)) + ivp = data + sizeof(struct ieee80211_frame); + + /* Set mpdu_data_ptr based on EXT IV bit + * if EXT IV bit set, CCMP using PMF 8 bytes of IV is present + * else for WEP using PMF, 4 bytes of IV is present + */ + if (ivp && (ivp[WLAN_HDR_IV_LEN] & WLAN_HDR_EXT_IV_BIT)) { + if (buflen <= (sizeof(struct ieee80211_frame) + + IEEE80211_CCMP_HEADERLEN)) { + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + mpdu_data_ptr += IEEE80211_CCMP_HEADERLEN; + } else { + if (buflen <= (sizeof(struct ieee80211_frame) + + WLAN_HDR_EXT_IV_LEN)) { + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + mpdu_data_ptr += WLAN_HDR_EXT_IV_LEN; + } + } + + frm_type = mgmt_txrx_get_frm_type(mgmt_subtype, mpdu_data_ptr); + if (frm_type == MGMT_FRM_UNSPECIFIED) { + mgmt_txrx_debug_rl("Unspecified mgmt frame type fc: %x %x", + wh->i_fc[0], wh->i_fc[1]); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + if (!(mgmt_subtype == MGMT_SUBTYPE_BEACON || + mgmt_subtype == MGMT_SUBTYPE_PROBE_RESP || + mgmt_subtype == MGMT_SUBTYPE_PROBE_REQ)) + mgmt_txrx_debug("Rcvd mgmt frame subtype %x (frame type %u) from "QDF_MAC_ADDR_FMT", seq_num = %d, rssi = %d tsf_delta: %u", + mgmt_subtype, frm_type, + QDF_MAC_ADDR_REF(wh->i_addr2), + (le16toh(*(uint16_t *)wh->i_seq) >> + WLAN_SEQ_SEQ_SHIFT), mgmt_rx_params->rssi, + mgmt_rx_params->tsf_delta); + + mgmt_txrx_psoc_ctx = (struct mgmt_txrx_priv_psoc_context *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MGMT_TXRX); + + qdf_spin_lock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + rx_handler = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type]; + if (rx_handler) { + status = wlan_mgmt_txrx_rx_handler_list_copy(rx_handler, + &rx_handler_head, &rx_handler_tail); + if (status != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_nbuf_free(buf); + goto rx_handler_mem_free; + } + } + + rx_handler = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[MGMT_FRAME_TYPE_ALL]; + if (rx_handler) { + status = wlan_mgmt_txrx_rx_handler_list_copy(rx_handler, + &rx_handler_head, &rx_handler_tail); + if (status != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_nbuf_free(buf); + goto rx_handler_mem_free; + } + } + + if (!rx_handler_head) { + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + mgmt_txrx_debug("No rx callback registered for frm_type: %d", + frm_type); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + mac_addr = (uint8_t *)wh->i_addr2; + /* + * peer can be NULL in following 2 scenarios: + * 1. broadcast frame received + * 2. operating in monitor mode + * + * and in both scenarios, the receiver of frame + * is expected to do processing accordingly considerng + * the fact that peer = NULL can be received and is a valid + * scenario. + */ + peer = wlan_objmgr_get_peer(psoc, mgmt_rx_params->pdev_id, + mac_addr, WLAN_MGMT_SB_ID); + if (!peer && !qdf_is_macaddr_broadcast( + (struct qdf_mac_addr *)wh->i_addr1)) { + mac_addr = (uint8_t *)wh->i_addr1; + peer = wlan_objmgr_get_peer(psoc, + mgmt_rx_params->pdev_id, + mac_addr, WLAN_MGMT_SB_ID); + } + + rx_handler = rx_handler_head; + while (rx_handler->next) { + copy_buf = qdf_nbuf_clone(buf); + + if (!copy_buf) { + rx_handler = rx_handler->next; + continue; + } + + rx_handler->rx_cb(psoc, peer, copy_buf, + mgmt_rx_params, frm_type); + rx_handler = rx_handler->next; + } + rx_handler->rx_cb(psoc, peer, buf, + mgmt_rx_params, frm_type); + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_SB_ID); + +rx_handler_mem_free: + while (rx_handler_head) { + rx_handler = rx_handler_head; + rx_handler_head = rx_handler_head->next; + qdf_mem_free(rx_handler); + } + + return status; +} + +QDF_STATUS tgt_mgmt_txrx_tx_completion_handler( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *tx_compl_params) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + void *cb_context; + mgmt_tx_download_comp_cb tx_compl_cb; + mgmt_ota_comp_cb ota_comp_cb; + qdf_nbuf_t nbuf; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + return QDF_STATUS_E_NULL_VALUE; + } + if (desc_id >= MGMT_DESC_POOL_MAX) { + mgmt_txrx_err("desc_id:%u is out of bounds", desc_id); + return QDF_STATUS_E_INVAL; + } + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt desc empty for id %d pdev %pK ", + desc_id, pdev); + return QDF_STATUS_E_NULL_VALUE; + } + tx_compl_cb = mgmt_desc->tx_dwnld_cmpl_cb; + ota_comp_cb = mgmt_desc->tx_ota_cmpl_cb; + nbuf = mgmt_desc->nbuf; + + /* + * TO DO + * Make the API more generic to handle tx download completion as well + * as OTA completion separately. + */ + + /* + * 1. If the tx frame is sent by any UMAC converged component then it + * passes the context as NULL while calling mgmt txrx API for + * sending mgmt frame. If context is NULL, peer will be passed as + * cb_context in completion callbacks. + * 2. If the tx frame is sent by legacy MLME then it passes the context + * as its specific context (for ex- mac context in case of MCL) while + * calling mgmt txrx API for sending mgmt frame. This caller specific + * context is passed as cb_context in completion callbacks. + */ + if (mgmt_desc->context) + cb_context = mgmt_desc->context; + else + cb_context = (void *)mgmt_desc->peer; + + if (!tx_compl_cb && !ota_comp_cb) { + qdf_nbuf_free(nbuf); + goto no_registered_cb; + } + + if (tx_compl_cb) + tx_compl_cb(cb_context, nbuf, status); + + if (ota_comp_cb) + ota_comp_cb(cb_context, nbuf, status, tx_compl_params); + +no_registered_cb: + /* + * decrementing the peer ref count that was incremented while + * accessing peer in wlan_mgmt_txrx_mgmt_frame_tx + */ + wlan_objmgr_peer_release_ref(mgmt_desc->peer, WLAN_MGMT_NB_ID); + wlan_mgmt_txrx_desc_put(mgmt_txrx_pdev_ctx, desc_id); + return QDF_STATUS_SUCCESS; +} + +qdf_nbuf_t tgt_mgmt_txrx_get_nbuf_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + qdf_nbuf_t buf; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + if (desc_id >= MGMT_DESC_POOL_MAX) { + mgmt_txrx_err("desc_id:%u is out of bounds", desc_id); + goto fail; + } + + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt descriptor unavailable for id %d pdev %pK", + desc_id, pdev); + goto fail; + } + buf = mgmt_desc->nbuf; + return buf; + +fail: + return NULL; +} + +struct wlan_objmgr_peer * +tgt_mgmt_txrx_get_peer_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + struct wlan_objmgr_peer *peer; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt descriptor unavailable for id %d pdev %pK", + desc_id, pdev); + goto fail; + } + + peer = mgmt_desc->peer; + return peer; + +fail: + return NULL; +} + +uint8_t tgt_mgmt_txrx_get_vdev_id_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + uint8_t vdev_id; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + if (desc_id >= MGMT_DESC_POOL_MAX) { + mgmt_txrx_err("desc_id:%u is out of bounds", desc_id); + goto fail; + } + + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt descriptor unavailable for id %d pdev %pK", + desc_id, pdev); + goto fail; + } + + vdev_id = mgmt_desc->vdev_id; + return vdev_id; + +fail: + return WLAN_UMAC_VDEV_ID_MAX; +} + +uint32_t tgt_mgmt_txrx_get_free_desc_pool_count( + struct wlan_objmgr_pdev *pdev) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + uint32_t free_desc_count = WLAN_INVALID_MGMT_DESC_COUNT; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + + free_desc_count = qdf_list_size( + &(mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list)); + +fail: + return free_desc_count; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..5889f146fa7d655995aa3b067ef63a80d37505bd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_utils_api.c @@ -0,0 +1,838 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_mgmt_txrx_utils_api.c + * This file contains mgmt txrx public API definitions for umac + * converged components. + */ + +#include "wlan_mgmt_txrx_utils_api.h" +#include "../../core/src/wlan_mgmt_txrx_main_i.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_objmgr_peer_obj.h" +#include "qdf_nbuf.h" +#include "wlan_lmac_if_api.h" + +/** + * wlan_mgmt_txrx_psoc_obj_create_notification() - called from objmgr when psoc + * is created + * @psoc: psoc context + * @arg: argument + * + * This function gets called from object manager when psoc is being created and + * creates mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_psoc_obj_create_notification( + struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + QDF_STATUS status; + + if (!psoc) { + mgmt_txrx_err("psoc context passed is NULL"); + status = QDF_STATUS_E_INVAL; + goto err_return; + } + + mgmt_txrx_psoc_ctx = qdf_mem_malloc(sizeof(*mgmt_txrx_psoc_ctx)); + if (!mgmt_txrx_psoc_ctx) { + status = QDF_STATUS_E_NOMEM; + goto err_return; + } + + mgmt_txrx_psoc_ctx->psoc = psoc; + + qdf_spinlock_create(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + if (wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_MGMT_TXRX, + mgmt_txrx_psoc_ctx, QDF_STATUS_SUCCESS) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to attach mgmt txrx ctx in psoc ctx"); + status = QDF_STATUS_E_FAILURE; + goto err_psoc_attach; + } + + mgmt_txrx_debug("Mgmt txrx creation successful, mgmt txrx ctx: %pK, psoc: %pK", + mgmt_txrx_psoc_ctx, psoc); + + return QDF_STATUS_SUCCESS; + +err_psoc_attach: + qdf_spinlock_destroy(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_mem_free(mgmt_txrx_psoc_ctx); +err_return: + return status; +} + +/** + * wlan_mgmt_txrx_psoc_obj_destroy_notification() - called from objmgr when + * psoc is destroyed + * @psoc: psoc context + * @arg: argument + * + * This function gets called from object manager when psoc is being destroyed + * psoc deletes mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_psoc_obj_destroy_notification( + struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + + if (!psoc) { + mgmt_txrx_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_psoc_ctx = wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("mgmt txrx context is already NULL"); + return QDF_STATUS_E_FAILURE; + } + + mgmt_txrx_debug("deleting mgmt txrx psoc obj, mgmt txrx ctx: %pK, psoc: %pK", + mgmt_txrx_psoc_ctx, psoc); + if (wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_MGMT_TXRX, mgmt_txrx_psoc_ctx) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to detach mgmt txrx ctx in psoc ctx"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spinlock_destroy(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_mem_free(mgmt_txrx_psoc_ctx); + + mgmt_txrx_debug("mgmt txrx deletion successful"); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_mgmt_txrx_pdev_obj_create_notification() - called from objmgr when pdev + * is created + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being created and + * creates mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_pdev_obj_create_notification( + struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_stats_t *mgmt_txrx_stats; + QDF_STATUS status; + + if (!pdev) { + mgmt_txrx_err("pdev context passed is NULL"); + status = QDF_STATUS_E_INVAL; + goto err_return; + + } + + mgmt_txrx_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_txrx_pdev_ctx)); + if (!mgmt_txrx_pdev_ctx) { + status = QDF_STATUS_E_NOMEM; + goto err_return; + } + + mgmt_txrx_pdev_ctx->pdev = pdev; + + status = wlan_mgmt_txrx_desc_pool_init(mgmt_txrx_pdev_ctx); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err( + "Failed to initialize mgmt desc. pool with status: %u", + status); + goto err_desc_pool_init; + } + + mgmt_txrx_stats = qdf_mem_malloc(sizeof(*mgmt_txrx_stats)); + if (!mgmt_txrx_stats) { + status = QDF_STATUS_E_NOMEM; + goto err_mgmt_txrx_stats; + } + mgmt_txrx_pdev_ctx->mgmt_txrx_stats = mgmt_txrx_stats; + + qdf_wake_lock_create(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp, + "mgmt_txrx tx_cmp"); + qdf_runtime_lock_init(&mgmt_txrx_pdev_ctx->wakelock_tx_runtime_cmp); + + if (wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_MGMT_TXRX, + mgmt_txrx_pdev_ctx, QDF_STATUS_SUCCESS) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to attach mgmt txrx ctx in pdev ctx"); + status = QDF_STATUS_E_FAILURE; + goto err_pdev_attach; + } + + mgmt_txrx_debug( + "Mgmt txrx creation successful, mgmt txrx ctx: %pK, pdev: %pK", + mgmt_txrx_pdev_ctx, pdev); + + return QDF_STATUS_SUCCESS; + +err_pdev_attach: + qdf_runtime_lock_deinit(&mgmt_txrx_pdev_ctx->wakelock_tx_runtime_cmp); + qdf_wake_lock_destroy(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp); + qdf_mem_free(mgmt_txrx_stats); +err_mgmt_txrx_stats: + wlan_mgmt_txrx_desc_pool_deinit(mgmt_txrx_pdev_ctx); +err_desc_pool_init: + qdf_mem_free(mgmt_txrx_pdev_ctx); +err_return: + return status; +} + +/** + * wlan_mgmt_txrx_pdev_obj_destroy_notification() - called from objmgr when + * pdev is destroyed + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being destroyed + * pdev deletes mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_pdev_obj_destroy_notification( + struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + + if (!pdev) { + mgmt_txrx_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_pdev_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("mgmt txrx context is already NULL"); + return QDF_STATUS_E_FAILURE; + } + + mgmt_txrx_debug("deleting mgmt txrx pdev obj, mgmt txrx ctx: %pK, pdev: %pK", + mgmt_txrx_pdev_ctx, pdev); + if (wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_MGMT_TXRX, mgmt_txrx_pdev_ctx) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to detach mgmt txrx ctx in pdev ctx"); + return QDF_STATUS_E_FAILURE; + } + + wlan_mgmt_txrx_desc_pool_deinit(mgmt_txrx_pdev_ctx); + qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_txrx_stats); + qdf_runtime_lock_deinit(&mgmt_txrx_pdev_ctx->wakelock_tx_runtime_cmp); + qdf_wake_lock_destroy(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp); + qdf_mem_free(mgmt_txrx_pdev_ctx); + + mgmt_txrx_debug("mgmt txrx deletion successful, pdev: %pK", pdev); + + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_mgmt_txrx_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx psoc create handler"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx psoc destroy handler"); + goto err_psoc_delete; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx pdev obj create handler"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx obj destroy handler"); + goto err_pdev_delete; + } + + mgmt_txrx_debug("Successfully registered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_create_notification, NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_destroy_notification, NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_create_notification, NULL); +err_psoc_create: + return status; +} + +QDF_STATUS wlan_mgmt_txrx_deinit(void) +{ + if (wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_create_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_destroy_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_create_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_destroy_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + + mgmt_txrx_debug("Successfully unregistered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_mgmt_frame_tx(struct wlan_objmgr_peer *peer, + void *context, + qdf_nbuf_t buf, + mgmt_tx_download_comp_cb tx_comp_cb, + mgmt_ota_comp_cb tx_ota_comp_cb, + enum wlan_umac_comp_id comp_id, + void *mgmt_tx_params) +{ + struct mgmt_txrx_desc_elem_t *desc; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct mgmt_txrx_priv_pdev_context *txrx_ctx; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + if (!peer) { + mgmt_txrx_err("peer passed is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_MGMT_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + mgmt_txrx_err("failed to get ref count for peer %pK", peer); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + mgmt_txrx_err("vdev unavailable for peer %pK", peer); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for peer %pK vdev %pK", + peer, vdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mgmt_txrx_err("pdev unavailable for peer %pK vdev %pK", + peer, vdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + txrx_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!txrx_ctx) { + mgmt_txrx_err("No txrx context for peer %pK pdev %pK", + peer, pdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + desc = wlan_mgmt_txrx_desc_get(txrx_ctx); + if (!desc) { + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_RESOURCES; + } + + desc->nbuf = buf; + desc->tx_ota_cmpl_cb = tx_ota_comp_cb; + desc->tx_dwnld_cmpl_cb = tx_comp_cb; + desc->peer = peer; + desc->vdev_id = wlan_vdev_get_id(vdev); + desc->context = context; + + if (!psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.mgmt_tx_send) { + mgmt_txrx_err( + "mgmt txrx txop to send mgmt frame is NULL for psoc: %pK", + psoc); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + desc->nbuf = NULL; + wlan_mgmt_txrx_desc_put(txrx_ctx, desc->desc_id); + return QDF_STATUS_E_FAILURE; + } + + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.mgmt_tx_send( + vdev, buf, desc->desc_id, mgmt_tx_params)) { + mgmt_txrx_err("Mgmt send fail for peer %pK psoc %pK pdev: %pK", + peer, psoc, pdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + desc->nbuf = NULL; + wlan_mgmt_txrx_desc_put(txrx_ctx, desc->desc_id); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_beacon_frame_tx(struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + mgmt_txrx_err("vdev unavailable for peer %pK", peer); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for peer %pK", peer); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.beacon_send) { + mgmt_txrx_err("mgmt txrx tx op to send beacon frame is NULL for psoc: %pK", + psoc); + return QDF_STATUS_E_FAILURE; + } + + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.beacon_send(vdev, buf)) { + mgmt_txrx_err("Beacon send fail for peer %pK psoc %pK", + peer, psoc); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_FILS +QDF_STATUS +wlan_mgmt_txrx_fd_action_frame_tx(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id) +{ + struct wlan_objmgr_psoc *psoc; + uint32_t vdev_id; + + if (!vdev) { + mgmt_txrx_err("Invalid vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + vdev_id = wlan_vdev_get_id(vdev); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for vdev %d", vdev_id); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.fd_action_frame_send) { + mgmt_txrx_err("mgmt txrx txop to send fd action frame is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.fd_action_frame_send( + vdev, buf)) { + mgmt_txrx_err("FD send fail for vdev %d", vdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_FILS */ + +/** + * wlan_mgmt_txrx_create_rx_handler() - creates rx handler node for umac comp. + * @mgmt_txrx_psoc_ctx: mgmt txrx context + * @mgmt_rx_cb: mgmt rx callback to be registered + * @comp_id: umac component id + * @frm_type: mgmt. frame for which cb to be registered. + * + * This function creates rx handler node for frame type and + * umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_create_rx_handler( + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx, + mgmt_frame_rx_callback mgmt_rx_cb, + enum wlan_umac_comp_id comp_id, + enum mgmt_frame_type frm_type) +{ + struct mgmt_rx_handler *rx_handler; + + rx_handler = qdf_mem_malloc(sizeof(*rx_handler)); + if (!rx_handler) + return QDF_STATUS_E_NOMEM; + + rx_handler->comp_id = comp_id; + rx_handler->rx_cb = mgmt_rx_cb; + + qdf_spin_lock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + rx_handler->next = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type]; + mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type] = rx_handler; + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + mgmt_txrx_debug("Callback registered for comp_id: %d, frm_type: %d", + comp_id, frm_type); + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_mgmt_txrx_delete_rx_handler() - deletes rx handler node for umac comp. + * @mgmt_txrx_psoc_ctx: mgmt txrx context + * @mgmt_rx_cb: mgmt rx callback to be deregistered + * @comp_id: umac component id + * @frm_type: mgmt. frame for which cb to be registered. + * + * This function deletes rx handler node for frame type and + * umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_delete_rx_handler( + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx, + mgmt_frame_rx_callback mgmt_rx_cb, + enum wlan_umac_comp_id comp_id, + enum mgmt_frame_type frm_type) +{ + struct mgmt_rx_handler *rx_handler = NULL, *rx_handler_prev = NULL; + bool delete = false; + + qdf_spin_lock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + rx_handler = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type]; + while (rx_handler) { + if (rx_handler->comp_id == comp_id && + rx_handler->rx_cb == mgmt_rx_cb) { + if (rx_handler_prev) + rx_handler_prev->next = + rx_handler->next; + else + mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type] = + rx_handler->next; + + qdf_mem_free(rx_handler); + delete = true; + break; + } + + rx_handler_prev = rx_handler; + rx_handler = rx_handler->next; + } + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + if (!delete) { + mgmt_txrx_err("No callback registered for comp_id: %d, frm_type: %d", + comp_id, frm_type); + return QDF_STATUS_E_FAILURE; + } + + mgmt_txrx_debug("Callback deregistered for comp_id: %d, frm_type: %d", + comp_id, frm_type); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_register_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + QDF_STATUS status; + uint8_t i, j; + + if (!psoc) { + mgmt_txrx_err("psoc context is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (comp_id >= WLAN_UMAC_MAX_COMPONENTS) { + mgmt_txrx_err("Invalid component id %d passed", comp_id); + return QDF_STATUS_E_INVAL; + } + + if (!num_entries || num_entries >= MGMT_MAX_FRAME_TYPE) { + mgmt_txrx_err("Invalid value for num_entries: %d passed", + num_entries); + return QDF_STATUS_E_INVAL; + } + + if (!frm_cb_info) { + mgmt_txrx_err("frame cb info pointer is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_psoc_ctx = (struct mgmt_txrx_priv_psoc_context *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < num_entries; i++) { + status = wlan_mgmt_txrx_create_rx_handler(mgmt_txrx_psoc_ctx, + frm_cb_info[i].mgmt_rx_cb, comp_id, + frm_cb_info[i].frm_type); + if (status != QDF_STATUS_SUCCESS) { + for (j = 0; j < i; j++) { + wlan_mgmt_txrx_delete_rx_handler( + mgmt_txrx_psoc_ctx, + frm_cb_info[j].mgmt_rx_cb, + comp_id, frm_cb_info[j].frm_type); + } + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_deregister_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + uint8_t i; + + if (!psoc) { + mgmt_txrx_err("psoc context is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (comp_id >= WLAN_UMAC_MAX_COMPONENTS) { + mgmt_txrx_err("Invalid component id %d passed", comp_id); + return QDF_STATUS_E_INVAL; + } + + if (!num_entries || num_entries >= MGMT_MAX_FRAME_TYPE) { + mgmt_txrx_err("Invalid value for num_entries: %d passed", + num_entries); + return QDF_STATUS_E_INVAL; + } + + if (!frm_cb_info) { + mgmt_txrx_err("frame cb info pointer is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_psoc_ctx = (struct mgmt_txrx_priv_psoc_context *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < num_entries; i++) { + wlan_mgmt_txrx_delete_rx_handler(mgmt_txrx_psoc_ctx, + frm_cb_info[i].mgmt_rx_cb, comp_id, + frm_cb_info[i].frm_type); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + uint32_t pool_size; + uint32_t index; + + if (!pdev) { + mgmt_txrx_err("pdev context is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for pdev %pK", pdev); + return QDF_STATUS_E_NULL_VALUE; + } + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pool_size = mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size; + if (!pool_size) { + mgmt_txrx_err("pool size is 0"); + return QDF_STATUS_E_FAILURE; + } + + for (index = 0; index < pool_size; index++) { + if (mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[index].in_use) { + mgmt_txrx_debug( + "mgmt descriptor with desc id: %d not in freelist", + index); + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[index]; + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops. + tx_drain_nbuf_op) + psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops. + tx_drain_nbuf_op(pdev, mgmt_desc->nbuf); + qdf_nbuf_free(mgmt_desc->nbuf); + wlan_objmgr_peer_release_ref(mgmt_desc->peer, + WLAN_MGMT_NB_ID); + wlan_mgmt_txrx_desc_put(mgmt_txrx_pdev_ctx, index); + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_vdev_drain(struct wlan_objmgr_vdev *vdev, + mgmt_frame_fill_peer_cb mgmt_fill_peer_cb, + void *status) +{ + struct wlan_objmgr_pdev *pdev; + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_vdev *peer_vdev; + uint32_t pool_size; + int i; + + if (!vdev) { + mgmt_txrx_err("vdev context is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mgmt_txrx_err("pdev context is NULL"); + return QDF_STATUS_E_INVAL; + } + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pool_size = mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size; + if (!pool_size) { + mgmt_txrx_err("pool size is 0"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < pool_size; i++) { + if (mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].in_use) { + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i]; + peer = mgmt_txrx_get_peer(pdev, mgmt_desc->desc_id); + if (peer) { + peer_vdev = wlan_peer_get_vdev(peer); + if (peer_vdev == vdev) { + if (mgmt_fill_peer_cb) + mgmt_fill_peer_cb(peer, mgmt_desc->nbuf); + mgmt_txrx_tx_completion_handler(pdev, + mgmt_desc->desc_id, 0, status); + } + } + } + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_cmn.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..fd758ccd079795d1f8da565ccdc9d59f6da29ce4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_cmn.h @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: This file provides the common definitions for object manager + */ + +#ifndef _WLAN_OBJMGR_CMN_H_ +#define _WLAN_OBJMGR_CMN_H_ + +#include "qdf_lock.h" +#include "qdf_list.h" +#include "qdf_status.h" +#include "wlan_cmn.h" +#include "qdf_atomic.h" + +/* No. of PSOCs can be supported */ +#define WLAN_OBJMGR_MAX_DEVICES 3 + +/* size of Hash */ +#define WLAN_PEER_HASHSIZE 64 + +/* simple hash is enough for variation of macaddr */ +#define WLAN_PEER_HASH(addr) \ +(((const uint8_t *)(addr))[QDF_MAC_ADDR_SIZE - 1] % WLAN_PEER_HASHSIZE) + +#define obj_mgr_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_OBJ_MGR, level, ## args) +#define obj_mgr_logfl(level, format, args...) \ + obj_mgr_log(level, FL(format), ## args) +#define obj_mgr_log_level(level, format, args...)\ + obj_mgr_logfl(level, format, ## args) + +#define obj_mgr_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_OBJ_MGR, params) +#define obj_mgr_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_OBJ_MGR, params) +#define obj_mgr_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_OBJ_MGR, params) +#define obj_mgr_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_OBJ_MGR, params) +#define obj_mgr_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_OBJ_MGR, params) + +#define objmgr_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_OBJ_MGR, params) +#define objmgr_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_OBJ_MGR, params) +#define objmgr_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_OBJ_MGR, params) +#define objmgr_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_OBJ_MGR, params) +#define objmgr_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_OBJ_MGR, params) + +/** + * enum WLAN_OBJ_STATE - State of Object + * @WLAN_OBJ_STATE_ALLOCATED: Common object is allocated, but not + * fully initialized + * @WLAN_OBJ_STATE_CREATED: All component objects are created + * @WLAN_OBJ_STATE_DELETED: All component objects are destroyed + * @WLAN_OBJ_STATE_PARTIALLY_CREATED: Few/All component objects creation is + * in progress + * @WLAN_OBJ_STATE_PARTIALLY_DELETED: Component objects deletion is triggered, + * they are yet to be destroyed + * @WLAN_OBJ_STATE_COMP_DEL_PROGRESS: If a component is disabled run time, + * and this state is used to represent the + * deletion in progress after that + * component object is destroyed, object + * state would be moved to CREATED state + * @WLAN_OBJ_STATE_LOGICALLY_DELETED: Object deletion has been initiated, + * object destroy invoked once references + * are released + * @WLAN_OBJ_STATE_CREATION_FAILED: any component object is failed to be + * created + * @WLAN_OBJ_STATE_DELETION_FAILED: any component object is failed to be + * destroyed + */ +typedef enum { + WLAN_OBJ_STATE_ALLOCATED = 0, + WLAN_OBJ_STATE_CREATED = 1, + WLAN_OBJ_STATE_DELETED = 2, + WLAN_OBJ_STATE_PARTIALLY_CREATED = 3, + WLAN_OBJ_STATE_PARTIALLY_DELETED = 4, + WLAN_OBJ_STATE_COMP_DEL_PROGRESS = 5, + WLAN_OBJ_STATE_LOGICALLY_DELETED = 6, + WLAN_OBJ_STATE_CREATION_FAILED = 7, + WLAN_OBJ_STATE_DELETION_FAILED = 8, +} WLAN_OBJ_STATE; + +/* Object type is assigned with value */ +enum wlan_objmgr_obj_type { + WLAN_PSOC_OP = 0, + WLAN_PDEV_OP = 1, + WLAN_VDEV_OP = 2, + WLAN_PEER_OP = 3, + WLAN_OBJ_TYPE_MAX = 4, +}; + +/** + * struct wlan_peer_list { + * @peer_hash[]: peer sub lists + * @peer_list_lock: List lock, this has to be acquired on + * accessing/updating the list + * + * Peer list, it maintains sublists based on the MAC address as hash + * Note: For DA WDS similar peer list has to be maintained + * This peer list will not have WDS nodes + */ +struct wlan_peer_list { + qdf_list_t peer_hash[WLAN_PEER_HASHSIZE]; + qdf_spinlock_t peer_list_lock; +}; + +struct wlan_objmgr_psoc; +struct wlan_objmgr_pdev; +struct wlan_objmgr_vdev; +struct wlan_objmgr_peer; + +/* Create handler would return the following status + QDF_STATUS_SUCCESS-- + For synchronous handler:- this is returned on successful + component object creation + + QDF_STATUS_COMP_DISABLED-- + For synchronous handler:- this is returned on if component + doesn't want to allocate + + QDF_STATUS_COMP_ASYNC-- + For asynchronous handler:- this is returned on if component + needs a context break + + QDF_STATUS_E_NOMEM-- + For synchronous handler:- this is returned on if component + can't allocate + QDF_STATUS_E_FAILURE-- + For synchronous handler:- If it is failed, + For asynchronous handler:- If it is failed to post message + (means, not required)/feature is not supported +*/ +typedef QDF_STATUS (*wlan_objmgr_psoc_create_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); +typedef QDF_STATUS (*wlan_objmgr_psoc_destroy_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); +typedef void (*wlan_objmgr_psoc_status_handler)(struct wlan_objmgr_psoc *psoc, + void *arg, QDF_STATUS status); + +typedef QDF_STATUS (*wlan_objmgr_pdev_create_handler)( + struct wlan_objmgr_pdev *pdev, void *arg); +typedef QDF_STATUS (*wlan_objmgr_pdev_destroy_handler)( + struct wlan_objmgr_pdev *pdev, void *arg); +typedef void (*wlan_objmgr_pdev_status_handler)( + struct wlan_objmgr_pdev *pdev, void *arg, + QDF_STATUS status); + +typedef QDF_STATUS (*wlan_objmgr_vdev_create_handler)( + struct wlan_objmgr_vdev *vdev, void *arg); +typedef QDF_STATUS (*wlan_objmgr_vdev_destroy_handler)( + struct wlan_objmgr_vdev *vdev, void *arg); +typedef void (*wlan_objmgr_vdev_status_handler)( + struct wlan_objmgr_vdev *vdev, void *arg, + QDF_STATUS status); +typedef void (*wlan_objmgr_vdev_peer_free_notify_handler)( + struct wlan_objmgr_vdev *vdev); + + +typedef QDF_STATUS (*wlan_objmgr_peer_create_handler)( + struct wlan_objmgr_peer *peer, void *arg); +typedef QDF_STATUS (*wlan_objmgr_peer_destroy_handler)( + struct wlan_objmgr_peer *peer, void *arg); +typedef void (*wlan_objmgr_peer_status_handler)( + struct wlan_objmgr_peer *peer, void *arg, + QDF_STATUS status); + +/** + * enum wlan_objmgr_ref_dbgid - ref count debug id + * @WLAN_OBJMGR_ID: Object manager internal operations + * @WLAN_MLME_SB_ID: MLME Southbound operations + * @WLAN_MLME_NB_ID: MLME Northbound operations + * @WLAN_MGMT_SB_ID: MGMT Northbound operations + * @WLAN_MGMT_NB_ID: MGMT Southbound operations + * @WLAN_HDD_ID_OBJ_MGR: HDD Object Manager operations + * @WLAN_OSIF_ID: New component's OS IF ID + * @WLAN_LEGACY_MAC_ID: Legacy MAC operations + * @WLAN_LEGACY_WMA_ID: Legacy WMA operations + * @WLAN_SERIALIZATION_ID: Serialization operations + * @WLAN_PMO_ID: power manager offload (PMO) ID + * @WLAN_LEGACY_SME_ID: Legacy SME operations + * @WLAN_SCAN_ID: scan operations + * @WLAN_WIFI_POS_CORE_ID: wifi positioning (CORE) + * @WLAN_DFS_ID: DFS operations + * @WLAN_P2P_ID: P2P operations + * @WLAN_TDLS_SB_ID: TDLS Southbound operations + * @WLAN_TDLS_NB_ID: TDLS Northbound operations + * @WLAN_ATF_ID: Airtime Fairness operations + * @WLAN_CRYPTO_ID: Crypto service operation + * @WLAN_NAN_ID: nan operations + * @WLAN_REGULATORY_SB_ID: SB regulatory operations + * @WLAN_REGULATORY_NB_ID: NB regulatory operations + * @WLAN_POLICY_MGR_ID: Policy Manager operations + * @WLAN_SON_ID: SON + * @WLAN_SA_API_ID: SA PAI + * @WLAN_SPECTRAL_ID: Spectral operations + * @WLAN_SPLITMAC_ID: SplitMac + * @WLAN_DEBUG_ID: Debug operations + * @WLAN_DIRECT_BUF_RX_ID: Direct Buffer Receive operations + * @WLAN_DISA_ID: DISA (encryption test) operations + * @WLAN_FTM_ID: FTM module + * @WLAN_FD_ID: FILS Discovery + * @WLAN_OCB_NB_ID: OCB Northbound operations + * @WLAN_OCB_SB_ID: OCB Southbound operations + * @WLAN_INIT_DEINIT_ID: Init deinit module + * @WLAN_IPA_ID: IPA operations + * @WLAN_CP_STATS_ID: Control Plane Statistics Module + * @WLAN_GREEN_AP_ID: Green AP operations + * @WLAN_WIFI_POS_OSIF_ID: wifi positioning (OSID) + * @WLAN_WIFI_POS_TGT_IF_ID: wifi positioning (Target IF) + * @WLAN_MLME_OBJ_DEL_ID: Object delete req/resp tracking with FW + * @WLAN_ACTION_OUI_ID: action oui operations + * @WLAN_LEGACY_SAP_ID: legacy sap fsm + * @WLAN_PDEV_TARGET_IF_ID: Target interface layer for pdev APIs + * @WLAN_MLME_SER_IF_ID: mlme serialization interface layer + * @WLAN_SCHEDULER_ID: mlme scheduler + * @WLAN_CFR_ID: CFG Capture method + * @WLAN_VDEV_TARGET_IF_ID: Target interface layer + * @WLAN_RX_PKT_TAG_ID: RX protocol tag operations + * @WLAN_INTEROP_ISSUES_AP_ID: interop issues ap operation + * @WLAN_WDS_ID: WDS operations + * @WLAN_PROXY_ARP_ID: AP proxy ARP + * @WLAN_WNM_ID: wireless network management operations + * @WLAN_RRM_ID: Radio resource management operations + * @WLAN_TR69_ID: TR69 operations + * @WLAN_MGMT_RX_ID: Legacy offload management frame input handler + * @WLAN_MGMT_TX_ID: Legacy offload management frame output handler + * @WLAN_NSS_IF_ID: NSS offload interface operations + * @WLAN_MBO_ID: MBO operations + * @WLAN_RTT_ID: RTT operations + * @WLAN_ALD_ID: Ath Link Diagnostic operations + * @WLAN_ME_ID: Multicast enhancement operations + * @WLAN_MGMT_HANDLER_ID: Management frame handler + * @WLAN_MLME_HANDLER_ID: MLME handler + * @WLAN_DBDC_ID: Dual Band Dual Concurrent mode operations + * @WLAN_MLME_OBJMGR_ID: MLME object manager operations VAP, Node + * @WLAN_OFFCHAN_TX_ID: Offchannel Tx operations + * @WLAN_MISC_ID: power manager, PAPI, rate set, etc. + * @WLAN_FWOL_NB_ID: fw offload northbound operations + * @WLAN_FWOL_SB_ID: fw offload southbound operations + * @WLAN_PSOC_TARGET_IF_ID PSOC related target_if operations + * @FTM_TIME_SYNC_ID: ftm time sync operations + * @WLAN_PKT_CAPTURE_ID Packet capture operations + * @WLAN_REF_ID_MAX: Max id used to generate ref count tracking array + */ + /* New value added to the enum must also be reflected in function + * string_from_dbgid() + */ +typedef enum { + WLAN_OBJMGR_ID = 0, + WLAN_MLME_SB_ID = 1, + WLAN_MLME_NB_ID = 2, + WLAN_MGMT_SB_ID = 3, + WLAN_MGMT_NB_ID = 4, + WLAN_HDD_ID_OBJ_MGR = 5, + WLAN_OSIF_ID = 6, + WLAN_LEGACY_MAC_ID = 7, + WLAN_LEGACY_WMA_ID = 8, + WLAN_SERIALIZATION_ID = 9, + WLAN_PMO_ID = 10, + WLAN_LEGACY_SME_ID = 11, + WLAN_SCAN_ID = 12, + WLAN_WIFI_POS_CORE_ID = 13, + WLAN_DFS_ID = 14, + WLAN_P2P_ID = 15, + WLAN_TDLS_SB_ID = 16, + WLAN_TDLS_NB_ID = 17, + WLAN_ATF_ID = 18, + WLAN_CRYPTO_ID = 19, + WLAN_NAN_ID = 20, + WLAN_REGULATORY_SB_ID = 21, + WLAN_REGULATORY_NB_ID = 22, + WLAN_OFFCHAN_TXRX_ID = 23, + WLAN_POLICY_MGR_ID = 24, + WLAN_SON_ID = 25, + WLAN_SA_API_ID = 26, + WLAN_SPECTRAL_ID = 27, + WLAN_SPLITMAC_ID = 28, + WLAN_DEBUG_ID = 29, + WLAN_DIRECT_BUF_RX_ID = 30, + WLAN_DISA_ID = 31, + WLAN_FTM_ID = 32, + WLAN_FD_ID = 33, + WLAN_OCB_NB_ID = 34, + WLAN_OCB_SB_ID = 35, + WLAN_INIT_DEINIT_ID = 36, + WLAN_IPA_ID = 37, + WLAN_CP_STATS_ID = 38, + WLAN_GREEN_AP_ID = 39, + WLAN_WIFI_POS_OSIF_ID = 40, + WLAN_WIFI_POS_TGT_IF_ID = 41, + WLAN_MLME_OBJ_DEL_ID = 42, + WLAN_ACTION_OUI_ID = 43, + WLAN_LEGACY_SAP_ID = 44, + WLAN_PDEV_TARGET_IF_ID = 45, + WLAN_MLME_SER_IF_ID = 46, + WLAN_SCHEDULER_ID = 47, + WLAN_CFR_ID = 48, + WLAN_VDEV_TARGET_IF_ID = 49, + WLAN_RX_PKT_TAG_ID = 50, + WLAN_INTEROP_ISSUES_AP_ID = 51, + WLAN_WDS_ID = 52, + WLAN_PROXY_ARP_ID = 53, + WLAN_WNM_ID = 54, + WLAN_RRM_ID = 55, + WLAN_TR69_ID = 56, + WLAN_MGMT_RX_ID = 57, + WLAN_MGMT_TX_ID = 58, + WLAN_NSS_IF_ID = 59, + WLAN_MBO_ID = 60, + WLAN_RTT_ID = 61, + WLAN_ALD_ID = 62, + WLAN_ME_ID = 63, + WLAN_MGMT_HANDLER_ID = 64, + WLAN_MLME_HANDLER_ID = 65, + WLAN_DBDC_ID = 66, + WLAN_MLME_OBJMGR_ID = 67, + WLAN_OFFCHAN_TX_ID = 68, + WLAN_MISC_ID = 69, + WLAN_FWOL_NB_ID = 70, + WLAN_FWOL_SB_ID = 71, + WLAN_PSOC_TARGET_IF_ID = 72, + FTM_TIME_SYNC_ID = 73, + WLAN_PKT_CAPTURE_ID = 74, + WLAN_REF_ID_MAX, +} wlan_objmgr_ref_dbgid; + +/** + * string_from_dbgid() - Convert Refcnt dbgid to respective string + * @id - Reference count debug id + * + * Debug support function to convert refcnt dbgid to string. + * Please note to add new string in the array at index equal to + * its enum value in wlan_objmgr_ref_dbgid. + */ +static inline char *string_from_dbgid(wlan_objmgr_ref_dbgid id) +{ + static const char *strings[] = { "WLAN_OBJMGR_ID", + "WLAN_MLME_SB_ID", + "WLAN_MLME_NB_ID", + "WLAN_MGMT_SB_ID", + "WLAN_MGMT_NB_ID", + "WLAN_HDD_ID_OBJ_MGR", + "WLAN_OSIF_ID", + "WLAN_LEGACY_MAC_ID", + "WLAN_LEGACY_WMA_ID", + "WLAN_SERIALIZATION_ID", + "WLAN_PMO_ID", + "WLAN_LEGACY_SME_ID", + "WLAN_SCAN_ID", + "WLAN_WIFI_POS_CORE_ID", + "WLAN_DFS_ID", + "WLAN_P2P_ID", + "WLAN_TDLS_SB_ID", + "WLAN_TDLS_NB_ID", + "WLAN_ATF_ID", + "WLAN_CRYPTO_ID", + "WLAN_NAN_ID", + "WLAN_REGULATORY_SB_ID", + "WLAN_REGULATORY_NB_ID", + "WLAN_OFFCHAN_TXRX_ID", + "WLAN_POLICY_MGR_ID", + "WLAN_SON_ID", + "WLAN_SA_API_ID", + "WLAN_SPECTRAL_ID", + "WLAN_SPLITMAC_ID", + "WLAN_DEBUG_ID", + "WLAN_DIRECT_BUF_RX_ID", + "WLAN_DISA_ID", + "WLAN_FTM_ID", + "WLAN_FD_ID", + "WLAN_OCB_NB_ID", + "WLAN_OCB_SB_ID", + "WLAN_INIT_DEINIT_ID", + "WLAN_IPA_ID", + "WLAN_CP_STATS_ID", + "WLAN_GREEN_AP_ID", + "WLAN_WIFI_POS_OSIF_ID", + "WLAN_WIFI_POS_TGT_IF_ID", + "WLAN_MLME_OBJ_DEL_ID", + "WLAN_ACTION_OUI_ID", + "WLAN_LEGACY_SAP_ID", + "WLAN_PDEV_TARGET_IF_ID", + "WLAN_MLME_SER_IF_ID", + "WLAN_SCHEDULER_ID", + "WLAN_CFR_ID", + "WLAN_VDEV_TARGET_IF_ID", + "WLAN_RX_PKT_TAG_ID", + "WLAN_INTEROP_ISSUES_AP_ID", + "WLAN_WDS_ID", + "WLAN_PROXY_ARP_ID", + "WLAN_WNM_ID", + "WLAN_RRM_ID", + "WLAN_TR69_ID", + "WLAN_MGMT_RX_ID", + "WLAN_MGMT_TX_ID", + "WLAN_NSS_IF_ID", + "WLAN_MBO_ID", + "WLAN_RTT_ID", + "WLAN_ALD_ID", + "WLAN_ME_ID", + "WLAN_MGMT_HANDLER_ID", + "WLAN_MLME_HANDLER_ID", + "WLAN_DBDC_ID", + "WLAN_MLME_OBJMGR_ID", + "WLAN_OFFCHAN_TX_ID", + "WLAN_MISC_ID", + "WLAN_FWOL_NB_ID", + "WLAN_FWOL_SB_ID", + "WLAN_PSOC_TARGET_IF_ID", + "FTM_TIME_SYNC_ID", + "WLAN_PKT_CAPTURE_ID", + "WLAN_REF_ID_MAX"}; + + return (char *)strings[id]; +} + +#ifdef WLAN_OBJMGR_DEBUG +#define WLAN_OBJMGR_BUG(val) QDF_BUG(val) +#else +#define WLAN_OBJMGR_BUG(val) +#endif +#define WLAN_OBJMGR_RATELIMIT_THRESH 2 + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +#define WLAN_OBJMGR_TRACE_FUNC_SIZE 30 +/** + * struct wlan_objmgr_line_ref - line reference data + * @line: line number + * @cnt: line reference count + */ +struct wlan_objmgr_line_ref { + uint32_t line; + qdf_atomic_t cnt; +}; + +/** + * struct wlan_objmgr_line_ref_node - line reference node + * @line_ref: line reference data + * @next: pointer to next line reference + */ +struct wlan_objmgr_line_ref_node { + struct wlan_objmgr_line_ref line_ref; + struct wlan_objmgr_line_ref_node *next; +}; + +/** + * struct wlan_objmgr_trace_func - trace function data + * @func: function pointer + * @line_head: pointer to head line trace reference + * @next: pointer to next function reference + */ +struct wlan_objmgr_trace_func { + char func[WLAN_OBJMGR_TRACE_FUNC_SIZE]; + struct wlan_objmgr_line_ref_node *line_head; + struct wlan_objmgr_trace_func *next; +}; + +/** + * struct wlan_objmgr_trace_id - trace reference data + * @num_func: num of functions + * @head: head pointer to function reference + */ +struct wlan_objmgr_trace_id { + uint32_t num_func; + struct wlan_objmgr_trace_func *head; +}; + +/** + * struct wlan_objmgr_trace - trace reference data + * @references: reference data + * @dereferences: dereference data + * @trace_lock: lock + */ +struct wlan_objmgr_trace { + struct wlan_objmgr_trace_id references[WLAN_REF_ID_MAX]; + struct wlan_objmgr_trace_id dereferences[WLAN_REF_ID_MAX]; + qdf_spinlock_t trace_lock; +}; +#endif /*WLAN_OBJMGR_REF_ID_TRACE*/ + +#endif /* _WLAN_OBJMGR_CMN_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_debug.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..ee5c5430a6472a532a45e3732271061d1ff77e53 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_debug.h @@ -0,0 +1,145 @@ + /* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public Data Structures to perform debug operations + * on object manager + */ + +#ifndef _WLAN_OBJMGR_DEBUG_H_ +#define _WLAN_OBJMGR_DEBUG_H_ + +#include + +#ifdef WLAN_OBJMGR_DEBUG + +/** + * wlan_objmgr_notify_log_delete()- insert + * logically deleted object into list + * @obj: object to be inserted + * @obj_type: type of object to be inserted + * + * Return: void + */ +void wlan_objmgr_notify_log_delete(void *obj, + enum wlan_objmgr_obj_type obj_type); + +/** + * wlan_objmgr_notify_destroy() - remove + * logically deleted object from list + * @obj: object to be removed + * @obj_type: type of object to be removed + * + * Return: void + */ +void wlan_objmgr_notify_destroy(void *obj, + enum wlan_objmgr_obj_type obj_type); + +/** + * wlan_objmgr_debug_info_init() - initialize + * the logically deleted list object + * Caller need to protect with global object lock + * + * Return: void + */ +void wlan_objmgr_debug_info_init(void); + +/** + * wlan_objmgr_debug_info_deinit() - deinitialize + * the logically deleted list object + * + * Return: void + */ +void wlan_objmgr_debug_info_deinit(void); + + +#else + +static inline void +wlan_objmgr_notify_log_delete(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ +} + +static inline void +wlan_objmgr_notify_destroy(void *obj, + enum wlan_objmgr_obj_type obj_typ) +{ +} + +static inline void +wlan_objmgr_debug_info_init(void) +{ +} + +static inline void +wlan_objmgr_debug_info_deinit(void) +{ +} + +#endif /*WLAN_OBJMGR_DEBUG*/ + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +/** + * wlan_objmgr_trace_ref() - Save trace info to list + * @func_head: head object of function list + * @trace: trace object + * @func: function name + * @line: line number + * + * API to trace func and line information for reference + * and dereference + * + * Return: void + */ +void +wlan_objmgr_trace_ref(struct wlan_objmgr_trace_func **func_head, + struct wlan_objmgr_trace *trace, + const char *func, int line); + +/** + * wlan_objmgr_trace_init_lock() - Initialize trace spinlock + * @trace: trace object + * + * API to initialize trace spin lock + * + * Return: void + */ +void wlan_objmgr_trace_init_lock(struct wlan_objmgr_trace *trace); + +/** + * wlan_objmgr_trace_deinit_lock() - Deinitialize trace spinlock + * @trace: trace object + * + * API to deinitialize trace spin lock + * + * Return: void + */ +void wlan_objmgr_trace_deinit_lock(struct wlan_objmgr_trace *trace); + +/** + * wlan_objmgr_trace_del_ref_list() - Delete reference trace list + * @trace: trace object + * + * API to delete trace list + * + * Return: void + */ +void wlan_objmgr_trace_del_ref_list(struct wlan_objmgr_trace *trace); +#endif /*WLAN_OBJMGR_REF_ID_TRACE*/ + +#endif /*_WLAN_OBJMGR_DEBUG_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_global_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_global_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..64b1daead5a80a145f6c0460fe88fb0a122f53ab --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_global_obj.h @@ -0,0 +1,541 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_GLOBAL_OBJ_H_ +#define _WLAN_OBJMGR_GLOBAL_OBJ_H_ + +/** + * wlan_objmgr_global_obj_init() - global object initialization + * + * Creates global object, intializes with default values + * + * Return: SUCCESS on successful creation, + * FAILURE on Mem alloc failure or allocated already + * + */ +QDF_STATUS wlan_objmgr_global_obj_init(void); + +/** + * wlan_objmgr_global_obj_deinit() - global object deinitialization + * + * Deletes global object + * + * Return: SUCCESS on successful deletion, + * FAILURE on object is not found + * + */ +QDF_STATUS wlan_objmgr_global_obj_deinit(void); + +/** + * wlan_objmgr_global_obj_can_destroyed() - Checks whether global object + * can be destroyed + * + * Checks the psoc table of global object, if psoc table is empty + * returns the SUCCESS + * + * Return: SUCCESS on can be destroyed, + * FAILURE on can't be destroyed + * + */ +QDF_STATUS wlan_objmgr_global_obj_can_destroyed(void); + +/** + * wlan_objmgr_register_psoc_create_handler() - register psoc create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PSOC creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_psoc_create_handler() - unregister psoc create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_psoc_destroy_handler() - register destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PSOC deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_psoc_destroy_handler() - unregister destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_psoc_status_handler() - register status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PSOC object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_psoc_status_handler() - unregister status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *args); + +/** + * wlan_objmgr_register_pdev_create_handler() - register pdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PDEV creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_pdev_create_handler() - unregister pdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_pdev_destroy_handler() - register pdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PDEV deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_pdev_destroy_handler() - unregister pdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_pdev_status_handler() - register pdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PDEV object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_pdev_status_handler() - unregister pdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_create_handler() - register vdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_vdev_create_handler() - unregister vdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_destroy_handler() - register vdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_vdev_destroy_handler() - unregister vdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_status_handler() - register vdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_vdev_status_handler() - unregister vdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_peer_free_notify_handler() - register vdev peer + * free handler + * @id: component id + * @handler: function pointer of the component + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV Peer gets freed + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_peer_free_notify_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_peer_free_notify_handler handler); + +/** + * wlan_objmgr_unregister_vdev_peer_free_notify_handler() - unregister vdev + * peer free handler + * @id: component id + * @handler: function pointer of the component + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_peer_free_notify_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_peer_free_notify_handler handler); + +/** + * wlan_objmgr_register_peer_create_handler() - register peer create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PEER creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_peer_create_handler() - unregister peer create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_peer_destroy_handler() - register peer destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PEER deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_peer_destroy_handler() - unregister peer destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_peer_status_handler() - register peer status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PEER object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_peer_status_handler() - unregister peer status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *args); + +/** + * APIs to operations on psoc + */ +typedef void (*wlan_objmgr_psoc_handler)(struct wlan_objmgr_psoc *psoc, + void *arg, + uint8_t index); + +/** + * wlan_objmgr_iterate_psoc_list() - iterate through all psocs + * + * @handler: the handler will be called for each psoc + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all psoc + * The "handler" here shouldn't take g_umac_glb_obj->global_lock lock when + * processing + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_psoc_list( + wlan_objmgr_psoc_handler handler, + void *arg, wlan_objmgr_ref_dbgid dbg_id); + +#endif /* _WLAN_OBJMGR_GLOBAL_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_pdev_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_pdev_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..a8c532a963f7fe754ce4e2a0d4897832239fb043 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_pdev_obj.h @@ -0,0 +1,1085 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define the pdev data structure of UMAC + * Public APIs to perform operations on Global objects + */ + +#ifndef _WLAN_OBJMGR_PDEV_OBJ_H_ +#define _WLAN_OBJMGR_PDEV_OBJ_H_ + +#include +#include "wlan_objmgr_psoc_obj.h" +#include + +/* STATUS: scanning */ +#define WLAN_PDEV_F_SCAN 0x00000001 +/* STATUS: use short slot time*/ +#define WLAN_PDEV_F_SHSLOT 0x00000002 + /* STATUS: channel switch event pending after DFS RADAR */ +#define WLAN_PDEV_F_DFS_CHANSWITCH_PENDING 0x00000004 + /* TX Power: fixed rate */ +#define WLAN_PDEV_F_TXPOW_FIXED 0x00000008 + /* STATUS: use short preamble */ +#define WLAN_PDEV_F_SHPREAMBLE 0x00000010 + /* CONF: do alignment pad */ +#define WLAN_PDEV_F_DATAPAD 0x00000020 + /* STATUS: protection enabled */ +#define WLAN_PDEV_F_USEPROT 0x00000040 + /* STATUS: use barker preamble*/ +#define WLAN_PDEV_F_USEBARKER 0x00000080 + /* CONF: DISABLE 2040 coexistence */ +#define WLAN_PDEV_F_COEXT_DISABLE 0x00000100 + /* STATE: scan pending */ +#define WLAN_PDEV_F_SCAN_PENDING 0x00000200 + /* CONF: send regclassids in country ie */ +#define WLAN_PDEV_F_REGCLASS 0x00000400 + /* CONF: block the use of DFS channels */ +#define WLAN_PDEV_F_BLKDFSCHAN 0x00000800 + /* STATUS: 11D in used */ +#define WLAN_PDEV_F_DOT11D 0x00001000 + /* STATUS: 11D channel-switch detected */ +#define WLAN_PDEV_F_RADAR 0x00002000 + /* CONF: A-MPDU supported */ +#define WLAN_PDEV_F_AMPDU 0x00004000 + /* CONF: A-MSDU supported */ +#define WLAN_PDEV_F_AMSDU 0x00008000 + /* CONF: HT traffic protected */ +#define WLAN_PDEV_F_HTPROT 0x00010000 + /* CONF: Reset once */ +#define WLAN_PDEV_F_RESET 0x00020000 + /* CONF: ignore 11d beacon */ +#define WLAN_PDEV_F_IGNORE_11D_BEACON 0x00040000 + /* HT CAP IE present */ +#define WLAN_PDEV_F_HTVIE 0x00080000 + /* radio in middle of CSA */ +#define WLAN_PDEV_F_CSA_WAIT 0x00100000 + /* wnm support flag */ +#define WLAN_PDEV_F_WNM 0x00200000 +#define WLAN_PDEV_F_2G_CSA 0x00400000 + /* enhanced independent repeater */ +#define WLAN_PDEV_F_ENH_REP_IND 0x00800000 + /* Disable Tx AMSDU for station vap */ +#define WLAN_PDEV_F_STA_AMPDU_DIS 0x01000000 +/* do not send probe request in passive channel */ +#define WLAN_PDEV_F_STRICT_PSCAN_EN 0x02000000 + /* dupie (ANA,pre ANA ) */ +/*#define WLAN_PDEV_F_DUPIE 0x00200000*/ + /* QWRAP enable flag */ +#define WLAN_PDEV_F_WRAP_EN 0x04000000 +/* Chan concurrency enabled */ +#define WLAN_PDEV_F_CHAN_CONCURRENCY 0x08000000 +/* Multivdev restart enabled */ +#define WLAN_PDEV_F_MULTIVDEV_RESTART 0x10000000 +/* MBSS IE enable */ +#define WLAN_PDEV_F_MBSS_IE_ENABLE 0x20000000 +/* VDEV Peer delete all */ +#define WLAN_PDEV_F_DELETE_ALL_PEER 0x40000000 + +/* PDEV op flags */ + /* Enable htrate for wep and tkip */ +#define WLAN_PDEV_OP_WEP_TKIP_HTRATE 0x00000001 + /* non HT AP found flag */ +#define WLAN_PDEV_OP_NON_HT_AP 0x00000002 + /* block the use of DFS channels flag */ +#define WLAN_PDEV_OP_BLK_DFS_CHAN 0x00000004 + /* 11.h flag */ +#define WLAN_PDEV_OP_DOTH 0x00000008 + /* Off-channel support enabled */ +#define WLAN_PDEV_OP_OFFCHAN 0x00000010 +#define WLAN_PDEV_OP_HT20ADHOC 0x00000020 +#define WLAN_PDEV_OP_HT40ADHOC 0x00000040 +#define WLAN_PDEV_OP_HTADHOC_AGGR 0x00000080 + /* disallow CC change when assoc completes */ +#define WLAN_PDEV_OP_DISALLOW_AUTO_CC 0x00000100 + /* Is P2P Enabled? */ +#define WLAN_PDEV_OP_P2P 0x00000200 + /* disallowed */ +#define WLAN_PDEV_OP_IGNORE_DYNHALT 0x00000400 + /* overwrite probe response IE with beacon IE */ +#define WLAN_PDEV_OP_OVERRIDE_PROBERESP 0x00000800 +#define WLAN_PDEV_OP_DROPSTA_QUERY 0x00001000 +#define WLAN_PDEV_OP_BLK_REPORT_FLOOD 0x00002000 + /* Offchan scan */ +#define WLAN_PDEV_OP_OFFCHAN_SCAN 0x00004000 + /*Consider OBSS non-erp to change to long slot*/ +#define WLAN_PDEV_OP_OBSS_LONGSLOT 0x00008000 + /* enable/disable min rssi cli block */ +#define WLAN_PDEV_OP_MIN_RSSI_ENABLE 0x00010000 + /* PDEV VDEV restart is in progress */ +#define WLAN_PDEV_OP_RESTART_INPROGRESS 0x00020000 + /* PDEV MBSSID VDEV restart trigger */ +#define WLAN_PDEV_OP_MBSSID_RESTART 0x00040000 + /* RADAR DETECT Defer */ +#define WLAN_PDEV_OP_RADAR_DETECT_DEFER 0x00080000 + + +struct osif_pdev_priv; + +/** + * struct wlan_objmgr_pdev_nif - pdev object nif structure + * @pdev_fw_caps: radio specific FW capabilities + * @pdev_feature_caps: radio specific feature capabilities + * @pdev_ospriv: OS specific pointer + * @macaddr[]: MAC address + * @notified_ap_vdev: ap vdev + */ +struct wlan_objmgr_pdev_nif { + uint32_t pdev_fw_caps; + uint32_t pdev_feature_caps; + struct pdev_osif_priv *pdev_ospriv; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint8_t notified_ap_vdev; +}; + +/** + * struct wlan_objmgr_pdev_mlme - pdev object mlme structure + * @pdev_op_flags: PDEV operation flags, can be used to know the + * operation status (deletion progress, etc) + */ +struct wlan_objmgr_pdev_mlme { + uint32_t pdev_op_flags; +}; + +/** + * struct wlan_objmgr_pdev_objmgr - pdev object object manager structure + * @wlan_pdev_id: PDEV id + * @wlan_vdev_count: VDEVs count + * @max_vdev_count: Max no. of VDEVs supported by this PDEV + * @print_cnt: Count to throttle Logical delete prints + * @wlan_vdev_list: List maintains the VDEVs created on this PDEV + * @wlan_peer_count: Peer count + * @max_peer_count: Max Peer count + * @temp_peer_count: Temporary peer count + * @max_monitor_vdev_count: Max monitor vdev count + * @wlan_psoc: back pointer to PSOC, its attached to + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + */ +struct wlan_objmgr_pdev_objmgr { + uint8_t wlan_pdev_id; + uint8_t wlan_vdev_count; + uint8_t max_vdev_count; + uint8_t print_cnt; + qdf_list_t wlan_vdev_list; + uint16_t wlan_peer_count; + uint16_t max_peer_count; + uint16_t temp_peer_count; + uint8_t max_monitor_vdev_count; + struct wlan_objmgr_psoc *wlan_psoc; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; +}; + +/** + * struct wlan_objmgr_pdev - PDEV common object + * @current_chan_list: Active/current Channel list of the radio + * @pdev_nif: pdev nif structure + * @pdev_objmgr: pdev object manager structure + * @pdev_mlme: pdev MLME structure + * @pdev_comp_priv_obj[]: component's private object array + * @obj_status[]: object status of each component object + * @obj_state: object state + * @tgt_if_handle: Target interface handle + * @pdev_lock: lock to protect object +*/ +struct wlan_objmgr_pdev { + struct wlan_chan_list *current_chan_list; + struct wlan_objmgr_pdev_nif pdev_nif; + struct wlan_objmgr_pdev_objmgr pdev_objmgr; + struct wlan_objmgr_pdev_mlme pdev_mlme; + void *pdev_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + target_pdev_info_t *tgt_if_handle; + qdf_spinlock_t pdev_lock; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_pdev_obj_create() - pdev create + * @psoc: PSOC object + * @scn: os private object + * + * Creates PDEV object, intializes with default values + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_psoc on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_pdev *wlan_objmgr_pdev_obj_create( + struct wlan_objmgr_psoc *psoc, struct pdev_osif_priv *osif_priv); + +/** + * wlan_objmgr_pdev_obj_delete() - pdev delete + * @psoc: PDEV object + * + * Logically deletes PDEV object, + * Once all the references are released, object manager invokes the registered + * notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_obj_delete(struct wlan_objmgr_pdev *pdev); + +/** + ** APIs to attach/detach component objects + */ +/** + * wlan_objmgr_pdev_component_obj_attach() - pdev comp object attach + * @psoc: PDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with PDEV common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_pdev_component_obj_attach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_pdev_component_obj_detach() - pdev comp object detach + * @psoc: PDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with PDEV common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_pdev_component_obj_detach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + +/** + ** APIs to operations on pdev objects + */ + +typedef void (*wlan_objmgr_pdev_op_handler)(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg); + +/** + * wlan_objmgr_pdev_iterate_obj_list() - operate on all objects of pdev + * @pdev: PDEV object + * @obj_type: VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @lock_free_op: its obsolete + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all VDEV/PEER objects + * of pdev + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_iterate_obj_list( + struct wlan_objmgr_pdev *pdev, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_pdev_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_trigger_pdev_comp_priv_object_creation() - create + * comp object of pdev + * @pdev: PDEV object + * @id: Component id + * + * API to create component private object in run time, this would be + * used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_creation( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_pdev_comp_priv_object_deletion() - destroy + * comp object of pdev + * @pdev: PDEV object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_deletion( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_get_vdev_by_id_from_pdev() - find vdev using id from pdev + * @pdev: PDEV object + * @vdev_id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_debug( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_id_from_pdev(pdev, vdev_id, dbgid) \ + wlan_objmgr_get_vdev_by_id_from_pdev_debug(pdev, \ + vdev_id, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_id_from_pdev_no_state() - find vdev using id + * from pdev + * @pdev: PDEV object + * @vdev_id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_no_state_debug( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_id_from_pdev_no_state(pdev, \ + vdev_id, dbgid) \ + wlan_objmgr_get_vdev_by_id_from_pdev_no_state_debug(pdev, \ + vdev_id, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_pdev() - find vdev using macaddr + * @pdev: PDEV object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev mac addr from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev_debug( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *fnc, int ln); + +#define wlan_objmgr_get_vdev_by_macaddr_from_pdev(pdev, macaddr, dbgid) \ + wlan_objmgr_get_vdev_by_macaddr_from_pdev_debug(pdev, macaddr, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state() - find vdev using + * macaddr + * @pdev: PDEV object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev mac addr from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev + *wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state_debug( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state(pdev, macaddr, \ + dbgid) \ + wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state_debug(pdev, \ + macaddr, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_pdev_get_first_vdev() - Get first vdev of pdev + * @pdev: PDEV object + * @dbg_id: Object Manager ref debug id + * + * API to get reference to first vdev of pdev. + * + * Return: reference to first vdev + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_pdev_get_first_vdev_debug( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_pdev_get_first_vdev(pdev, dbgid) \ + wlan_objmgr_pdev_get_first_vdev_debug(pdev, dbgid, \ + __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_pdev_get_first_vdev( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_pdev_get_comp_private_obj() - get pdev component private object + * @pdev: PDEV object + * @id: Component id + * + * API to get component private object + * + * Return: void *ptr on SUCCESS + * NULL on Failure + */ +void *wlan_objmgr_pdev_get_comp_private_obj( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id); + +/** + * wlan_pdev_obj_lock() - Acquire PDEV spinlock + * @pdev: PDEV object + * + * API to acquire PDEV lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_pdev_obj_lock(struct wlan_objmgr_pdev *pdev) +{ + qdf_spin_lock_bh(&pdev->pdev_lock); +} + +/** + * wlan_pdev_obj_unlock() - Release PDEV spinlock + * @pdev: PDEV object + * + * API to Release PDEV lock + * + * Return: void + */ +static inline void wlan_pdev_obj_unlock(struct wlan_objmgr_pdev *pdev) +{ + qdf_spin_unlock_bh(&pdev->pdev_lock); +} + +/** + * wlan_pdev_get_psoc() - get psoc + * @pdev: PDEV object + * + * API to get the psoc object from PDEV + * + * Return: + * @psoc: PSOC object + */ +static inline struct wlan_objmgr_psoc *wlan_pdev_get_psoc( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_psoc; +} + +/** + * wlan_pdev_set_psoc() - set psoc + * @pdev: PDEV object + * @psoc: PSOC object + * + * API to set the psoc object from PDEV + * + * Return: void + */ +static inline void wlan_pdev_set_psoc(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_psoc *psoc) +{ + pdev->pdev_objmgr.wlan_psoc = psoc; +} + +/** + * wlan_pdev_nif_fw_cap_set() - set fw caps + * @pdev: PDEV object + * @cap: capability flag to be set + * + * API to set fw caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_fw_cap_set(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_fw_caps |= cap; +} + +/** + * wlan_pdev_nif_fw_cap_clear() - clear fw cap + * @pdev: PDEV object + * @cap: capability flag to be cleared + * + * API to clear fw caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_fw_cap_clear(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_fw_caps &= ~cap; +} + +/** + * wlan_pdev_nif_fw_cap_get() - get fw caps + * @pdev: PDEV object + * @cap: capability flag to be checked + * + * API to know, whether particular fw caps flag is set in pdev + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_pdev_nif_fw_cap_get(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + return (pdev->pdev_nif.pdev_fw_caps & cap) ? 1 : 0; +} + +/** + * wlan_pdev_nif_feat_cap_set() - set feature caps + * @pdev: PDEV object + * @cap: capability flag to be set + * + * API to set feat caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_feat_cap_set(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_feature_caps |= cap; +} + +/** + * wlan_pdev_nif_feat_cap_clear() - clear feature caps + * @pdev: PDEV object + * @cap: capability flag to be cleared + * + * API to clear feat caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_feat_cap_clear(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_feature_caps &= ~cap; +} + +/** + * wlan_pdev_nif_feat_cap_get() - get feature caps + * @pdev: PDEV object + * @cap: capability flag to be checked + * + * API to know, whether particular feat caps flag is set in pdev + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_pdev_nif_feat_cap_get(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + return (pdev->pdev_nif.pdev_feature_caps & cap) ? 1 : 0; +} + +/** + * wlan_pdev_mlme_op_set() - set operation flags + * @pdev: PDEV object + * @op: Operation flag to be set + * + * API to set operation flag in pdev + * + * Return: void + */ +static inline void wlan_pdev_mlme_op_set(struct wlan_objmgr_pdev *pdev, + uint32_t op) +{ + pdev->pdev_mlme.pdev_op_flags |= op; +} + +/** + * wlan_pdev_mlme_op_clear() - clear op flags + * @pdev: PDEV object + * @op: Operation flag to be cleared + * + * API to clear op flag in pdev + * + * Return: void + */ +static inline void wlan_pdev_mlme_op_clear(struct wlan_objmgr_pdev *pdev, + uint32_t op) +{ + pdev->pdev_mlme.pdev_op_flags &= ~op; +} + +/** + * wlan_pdev_mlme_op_get() - get op flag + * @pdev: PDEV object + * @op: Operation flag to be checked + * + * API to know, whether particular operation flag is set in pdev + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_pdev_mlme_op_get(struct wlan_objmgr_pdev *pdev, + uint32_t op) +{ + return (pdev->pdev_mlme.pdev_op_flags & op) ? 1 : 0; +} + +/** + * wlan_pdev_get_hw_macaddr() - get hw macaddr + * @pdev: PDEV object + * + * API to get HW MAC address form PDEV + * + * Caller need to acquire lock with wlan_pdev_obj_lock() + * + * Return: @macaddr -MAC address + */ +static inline uint8_t *wlan_pdev_get_hw_macaddr(struct wlan_objmgr_pdev *pdev) +{ + if (!pdev) + return NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + return pdev->pdev_nif.macaddr; +} + +/** + * wlan_pdev_set_hw_macaddr() - set hw macaddr + * @pdev: PDEV object + * @macaddr: MAC address + * + * API to set HW MAC address form PDEV + * + * Caller need to acquire lock with wlan_pdev_obj_lock() + * + * Return: void + */ +static inline void wlan_pdev_set_hw_macaddr(struct wlan_objmgr_pdev *pdev, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(pdev->pdev_nif.macaddr, macaddr); +} + +/** + * wlan_pdev_get_ospriv() - get os priv pointer + * @pdev: PDEV object + * + * API to get OS private pointer from PDEV + * + * Return: ospriv - private pointer + */ +static inline struct pdev_osif_priv *wlan_pdev_get_ospriv( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_nif.pdev_ospriv; +} + +/** + * wlan_pdev_reset_ospriv() - reset os priv pointer + * @pdev: PDEV object + * + * API to reset OS private pointer in PDEV + * + * Return: void + */ +static inline void wlan_pdev_reset_ospriv(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_nif.pdev_ospriv = NULL; +} + +/** + * wlan_pdev_set_max_vdev_count() - set pdev max vdev count + * @pdev: PDEV object + * @vdev count: Max vdev count + * + * API to set Max vdev count + * + * Return: void + */ +static inline void wlan_pdev_set_max_vdev_count(struct wlan_objmgr_pdev *pdev, + uint8_t max_vdev_count) +{ + pdev->pdev_objmgr.max_vdev_count = max_vdev_count; +} + +/** + * wlan_pdev_get_max_vdev_count() - get pdev max vdev count + * @pdev: PDEV object + * + * API to set Max vdev count + * + * Return: @vdev count: Max vdev count + */ +static inline uint8_t wlan_pdev_get_max_vdev_count( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.max_vdev_count; +} + +/** + * DOC: Examples to use PDEV ref count APIs + * + * In all the scenarios, the pair of API should be followed + * otherwise it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_pdev_obj_create() + * ---- + * wlan_objmgr_pdev_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_pdev_get_ref() + * ---- + * the operations which are done on + * pdev object + * ---- + * wlan_objmgr_pdev_release_ref() + * + * scenario 3: + * + * wlan_objmgr_get_pdev_by_id[_no_state]() + * ---- + * the operations which are done on + * pdev object + * ---- + * wlan_objmgr_pdev_release_ref() + * + * scenario 4: + * + * wlan_objmgr_get_pdev_by_macaddr[_no_state]() + * ---- + * the operations which are done on + * pdev object + * ---- + * wlan_objmgr_pdev_release_ref() + */ + +/** + * wlan_objmgr_pdev_get_ref() - increment ref count + * @pdev: PDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of pdev + * + * Return: void + */ +void wlan_objmgr_pdev_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_pdev_try_get_ref() - increment ref count, if allowed + * @pdev: PDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of pdev after checking valid object state + * + * Return: void + */ +QDF_STATUS wlan_objmgr_pdev_try_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_pdev_release_ref() - decrement ref count + * @pdev: PDEV object + * @id: Object Manager ref debug id + * + * API to decrement ref count of pdev, if ref count is 1, it initiates the + * PDEV deletion + * + * Return: void + */ +void wlan_objmgr_pdev_release_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_pdev_get_pdev_id() - get pdev id + * @pdev: PDEV object + * + * API to get pdev id from pdev object + * + * Return: @pdev id + */ +static inline +uint8_t wlan_objmgr_pdev_get_pdev_id(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_pdev_id; +} + +/** + * wlan_pdev_set_tgt_if_handle(): API to set target if handle in pdev object + * @pdev: Pdev pointer + * @tgt_if_handle: target interface handle + * + * API to set target interface handle in pdev object + * + * Caller needs to acquire lock with wlan_pdev_obj_lock() + * + * Return: None + */ +static inline +void wlan_pdev_set_tgt_if_handle(struct wlan_objmgr_pdev *pdev, + target_pdev_info_t *tgt_if_handle) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (!pdev) + return; + + pdev->tgt_if_handle = tgt_if_handle; +} + +/** + * wlan_pdev_get_tgt_if_handle(): API to get target interface handle + * @pdev: Pdev pointer + * + * API to get target interface handle from pdev object + * + * Return: target interface handle + */ +static inline +target_pdev_info_t *wlan_pdev_get_tgt_if_handle(struct wlan_objmgr_pdev *pdev) +{ + if (!pdev) + return NULL; + + return pdev->tgt_if_handle; +} + +/** + * wlan_pdev_set_max_peer_count() - set max peer count + * @vdev: PDEV object + * @count: Max peer count + * + * API to set max peer count of PDEV + * + * Return: void + */ +static inline void wlan_pdev_set_max_peer_count(struct wlan_objmgr_pdev *pdev, + uint16_t count) +{ + pdev->pdev_objmgr.max_peer_count = count; +} + +/** + * wlan_pdev_get_max_peer_count() - get max peer count + * @pdev: PDEV object + * + * API to get max peer count of PDEV + * + * Return: max peer count + */ +static inline uint16_t wlan_pdev_get_max_peer_count( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.max_peer_count; +} + +/** + * wlan_pdev_set_max_monitor_vdev_count() - set max monitor vdev count + * @pdev: PDEV object + * @count: Max monitor vdev count + * + * API to set max monitor vdev count of PDEV + * + * Return: void + */ +static inline void wlan_pdev_set_max_monitor_vdev_count( + struct wlan_objmgr_pdev *pdev, + uint16_t count) +{ + pdev->pdev_objmgr.max_monitor_vdev_count = count; +} + +/** + * wlan_pdev_get_max_monitor_vdev_count() - get max monitor vdev count + * @pdev: PDEV object + * + * API to get max monitor vdev count of PDEV + * + * Return: max monitor vdev count + */ +static inline uint16_t wlan_pdev_get_max_monitor_vdev_count( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.max_monitor_vdev_count; +} + +/** + * wlan_pdev_get_peer_count() - get pdev peer count + * @pdev: PDEV object + * + * API to get peer count from PDEV + * + * Return: peer_count - pdev's peer count + */ +static inline uint16_t wlan_pdev_get_peer_count(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_peer_count; +} + +/** + * wlan_pdev_get_temp_peer_count() - get pdev temporary peer count + * @pdev: PDEV object + * + * API to get temporary peer count from PDEV + * + * Return: temp_peer_count - pdev's temporary peer count + */ +static inline uint16_t wlan_pdev_get_temp_peer_count(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.temp_peer_count; +} + + +/** + * wlan_pdev_incr_peer_count() - increment pdev peer count + * @pdev: PDEV object + * + * API to increment peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_incr_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.wlan_peer_count++; +} + +/** + * wlan_pdev_decr_peer_count() - decrement pdev peer count + * @pdev: PDEV object + * + * API to decrement peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_decr_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.wlan_peer_count--; +} + +/** + * wlan_pdev_incr_temp_peer_count() - increment temporary pdev peer count + * @pdev: PDEV object + * + * API to increment temporary peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_incr_temp_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.temp_peer_count++; +} + +/** + * wlan_pdev_decr_temp_peer_count() - decrement pdev temporary peer count + * @pdev: PDEV object + * + * API to decrement temporary peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_decr_temp_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.temp_peer_count--; +} + +/** + * wlan_pdev_get_vdev_count() - get PDEV vdev count + * @pdev: PDEV object + * + * API to get vdev count from PDEV + * + * Return: vdev_count - pdev's vdev count + */ +static inline uint8_t wlan_pdev_get_vdev_count(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_vdev_count; +} + +/** + * wlan_print_pdev_info() - print pdev members + * @pdev: pdev object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_DEBUG +void wlan_print_pdev_info(struct wlan_objmgr_pdev *pdev); +#else +static inline void wlan_print_pdev_info(struct wlan_objmgr_pdev *pdev) {} +#endif + +#endif /* _WLAN_OBJMGR_PDEV_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_peer_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_peer_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..2324b37667faff0914995efbe240f3f7c9cdddfc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_peer_obj.h @@ -0,0 +1,1182 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the peer data structure of UMAC + * Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_PEER_OBJ_H_ +#define _WLAN_OBJMGR_PEER_OBJ_H_ + +#include +#include +#include "wlan_objmgr_vdev_obj.h" + +/* peer flags */ +/* authorized for data */ +#define WLAN_PEER_F_AUTH 0x00000001 +/* QoS enabled */ +#define WLAN_PEER_F_QOS 0x00000002 +/* ERP enabled */ +#define WLAN_PEER_F_ERP 0x00000004 +/* HT enabled */ +#define WLAN_PEER_F_HT 0x00000008 +/* NB: tWLANhave the same value as IEEE80211_FC1_PWR_MGT */ +/* power save mode enabled */ +#define WLAN_PEER_F_PWR_MGT 0x00000010 +/* keytsc for node has already been updated */ +#define WLAN_PEER_F_TSC_SET 0x00000020 +/* U-APSD power save enabled */ +#define WLAN_PEER_F_UAPSD 0x00000040 +/* U-APSD triggerable state */ +#define WLAN_PEER_F_UAPSD_TRIG 0x00000080 +/* U-APSD SP in progress */ +#define WLAN_PEER_F_UAPSD_SP 0x00000100 +/* Atheros Owl or follow-on device */ +#define WLAN_PEER_F_ATH 0x00000200 +/* Owl WDS workaround needed*/ +#define WLAN_PEER_F_OWL_WDSWAR 0x00000400 +/* WDS link */ +#define WLAN_PEER_F_WDS 0x00000800 +/* No AMPDU support */ +#define WLAN_PEER_F_NOAMPDU 0x00001000 +/* wep/tkip aggregation support */ +#define WLAN_PEER_F_WEPTKIPAGGR 0x00002000 +#define WLAN_PEER_F_WEPTKIP 0x00004000 +/* temp node (not in the node table) */ +#define WLAN_PEER_F_TEMP 0x00008000 +/* 2.4ng VHT interop AMSDU disabled */ +#define WLAN_PEER_F_11NG_VHT_INTEROP_AMSDU_DISABLE 0x00010000 +/* 40 MHz Intolerant */ +#define WLAN_PEER_F_40MHZ_INTOLERANT 0x00020000 +/* node is paused*/ +#define WLAN_PEER_F_PAUSED 0x00040000 +#define WLAN_PEER_F_EXTRADELIMWAR 0x00080000 +/* 20 MHz requesting node */ +#define WLAN_PEER_F_REQ_20MHZ 0x00100000 +/* all the tid queues in ath layer are paused*/ +#define WLAN_PEER_F_ATH_PAUSED 0x00200000 +/*Require credit update*/ +#define WLAN_PEER_F_UAPSD_CREDIT_UPDATE 0x00400000 +/*Require send deauth when h/w queue no data*/ +#define WLAN_PEER_F_KICK_OUT_DEAUTH 0x00800000 +/* RRM enabled node */ +#define WLAN_PEER_F_RRM 0x01000000 +/* Wakeup node */ +#define WLAN_PEER_F_WAKEUP 0x02000000 +/* VHT enabled node */ +#define WLAN_PEER_F_VHT 0x04000000 +/* deauth/Disassoc wait for node cleanup till frame goes on + air and tx feedback received */ +#define WLAN_PEER_F_DELAYED_CLEANUP 0x08000000 +/* Extended stats enabled node */ +#define WLAN_PEER_F_EXT_STATS 0x10000000 +/* Prevent _ieee80211_node_leave() from reentry */ +#define WLAN_PEER_F_LEAVE_ONGOING 0x20000000 +/* band steering is enabled for this node */ +#define WLAN_PEER_F_BSTEERING_CAPABLE 0x40000000 +/* node is a local mesh peer */ +#define WLAN_PEER_F_LOCAL_MESH_PEER 0x80000000 + +/** + * enum wlan_peer_state - peer state + * @WLAN_INIT_STATE: Default state + * @WLAN_JOIN_STATE: Station mode, STA is waiting for Join + * @WLAN_AUTH_STATE: AUTH in progress + * @WLAN_ASSOC_STATE: ASSOC in progress + * @WLAN_WAITKEY_STATE: 4-way KEY handshake is in progress + * @WLAN_CONNECTED_STATE: Connected state + * @WLAN_PREAUTH_STATE: Station mode: Preauth + * @WLAN_DISCONNECT_STATE: Disconnect is in progress + */ +enum wlan_peer_state { + WLAN_INIT_STATE = 1, + WLAN_JOIN_STATE = 2, + WLAN_AUTH_STATE = 3, + WLAN_ASSOC_STATE = 4, + WLAN_WAITKEY_STATE = 5, + WLAN_CONNECTED_STATE = 6, + WLAN_PREAUTH_STATE = 7, + WLAN_DISCONNECT_STATE = 8, +}; + +/** + * struct wlan_objmgr_peer_mlme - mlme common data of peer + * @peer_capinfo: protocol cap info + * @peer_flags: PEER OP flags + * @peer_type: Type of PEER, (STA/AP/etc.) + * @phymode: phy mode of station + * @max_rate: Max Rate supported + * @state: State of the peer + * @seq_num: Sequence number + * @rssi: Last received RSSI value + */ +struct wlan_objmgr_peer_mlme { + uint32_t peer_capinfo; + uint32_t peer_flags; + enum wlan_peer_type peer_type; + enum wlan_phymode phymode; + uint32_t max_rate; + enum wlan_peer_state state; + uint16_t seq_num; + int8_t rssi; + bool is_authenticated; +}; + +/** + * struct wlan_objmgr_peer_objmgr - object manager data of peer + * @vdev: VDEV pointer to which it is associated + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + * @print_cnt: Count to throttle Logical delete prints + * @wlan_objmgr_trace: Trace ref and deref + */ +struct wlan_objmgr_peer_objmgr { + struct wlan_objmgr_vdev *vdev; + qdf_atomic_t ref_cnt; +#ifdef WLAN_OBJMGR_REF_ID_DEBUG + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; +#endif + uint8_t print_cnt; +#ifdef WLAN_OBJMGR_REF_ID_TRACE + struct wlan_objmgr_trace trace; +#endif +}; + +/** + * struct wlan_objmgr_peer - PEER common object + * @psoc_peer: peer list node for psoc's qdf list + * @vdev_peer: peer list node for vdev's qdf list + * @macaddr[]: Peer MAC address + * @peer_mlme: Peer MLME common structure + * @peer_objmgr: Peer Object manager common structure + * @peer_comp_priv_obj[]: Component's private object pointers + * @obj_status[]: status of each component object + * @obj_state: Status of Peer object + * @pdev_id: Pdev ID + * @peer_lock: Lock for access/update peer contents + */ +struct wlan_objmgr_peer { + qdf_list_node_t psoc_peer; + qdf_list_node_t vdev_peer; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint8_t pdev_id; + struct wlan_objmgr_peer_mlme peer_mlme; + struct wlan_objmgr_peer_objmgr peer_objmgr; + void *peer_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + qdf_spinlock_t peer_lock; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_peer_obj_create() - peer object create + * @vdev: VDEV object on which this peer gets created + * @peer_type: peer type (AP/STA) + * @macaddr: MAC address + * + * Creates Peer object, intializes with default values + * Attaches to psoc and vdev objects + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_peer on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_peer *wlan_objmgr_peer_obj_create( + struct wlan_objmgr_vdev *vdev, + enum wlan_peer_type type, + uint8_t macaddr[]); + +/** + * wlan_objmgr_peer_obj_delete() - peer object delete + * @peer: PEER object + * + * Deletes PEER object, removes it from PSOC's, VDEV's peer list + * Invokes the registered notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_peer_obj_delete(struct wlan_objmgr_peer *peer); + +/** + ** APIs to attach/detach component objects + */ +/** + * wlan_objmgr_peer_component_obj_attach() - attach comp object to peer + * @peer: PEER object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with PEER common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_peer_component_obj_attach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_peer_component_obj_detach() - detach comp object from peer + * @peer: PEER object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with PEER common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_peer_component_obj_detach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + +/** + ** APIs to operations on peer objects + */ + +/** + * wlan_objmgr_trigger_peer_comp_priv_object_creation() - create + * peer comp object + * @peer: PEER object + * @id: Component id + * + * API to create component private object in run time, this would + * be used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_creation( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_peer_comp_priv_object_deletion() - destroy + * peer comp object + * @peer: PEER object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_deletion( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_peer_get_comp_private_obj() - get peer component private object + * @peer: PEER object + * @id: Component id + * + * API to get component private object + * + * Return: void *ptr on SUCCESS + * NULL on Failure + */ +void *wlan_objmgr_peer_get_comp_private_obj( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_peer_obj_lock() - Acquire PEER spinlock + * @psoc: PEER object + * + * API to acquire PEER spin lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_peer_obj_lock(struct wlan_objmgr_peer *peer) +{ + qdf_spin_lock_bh(&peer->peer_lock); +} + +/** + * wlan_peer_obj_unlock() - Release PEER spinlock + * @peer: PEER object + * + * API to Release PEER spin lock + * + * Return: void + */ +static inline void wlan_peer_obj_unlock(struct wlan_objmgr_peer *peer) +{ + qdf_spin_unlock_bh(&peer->peer_lock); +} + +/** + * DOC: Examples to use PEER ref count APIs + * + * In all the scenarios, the pair of API should be followed + * other it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_peer_obj_create() + * ---- + * wlan_objmgr_peer_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_peer_get_ref() + * ---- + * the operations which are done on + * peer object + * ---- + * wlan_objmgr_peer_release_ref() + * + * scenario 3: + * + * API to retrieve peer (xxx_get_peer_xxx()) + * ---- + * the operations which are done on + * peer object + * ---- + * wlan_objmgr_peer_release_ref() + */ + +/** + * wlan_objmgr_peer_get_ref() - increment ref count + * @peer: PEER object + * @id: Object Manager ref debug id + * + * API to increment ref count of peer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_peer_get_ref_debug(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line); + +#define wlan_objmgr_peer_get_ref(peer, dbgid) \ + wlan_objmgr_peer_get_ref_debug(peer, dbgid, __func__, __LINE__) +#else +void wlan_objmgr_peer_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id); +#endif + +/** + * wlan_objmgr_peer_try_get_ref() - increment ref count, if allowed + * @peer: PEER object + * @id: Object Manager ref debug id + * + * API to increment ref count of peer, if object state is valid + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +QDF_STATUS wlan_objmgr_peer_try_get_ref_debug(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line); + +#define wlan_objmgr_peer_try_get_ref(peer, dbgid) \ + wlan_objmgr_peer_try_get_ref_debug(peer, dbgid, \ + __func__, __LINE__) +#else +QDF_STATUS wlan_objmgr_peer_try_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id); +#endif + +/** + * wlan_objmgr_peer_release_ref() - decrement ref count + * @peer: PEER object + * @id: Object Manager ref debug id + * + * API to decrement ref count of peer, if ref count is 1, it initiates the + * peer deletion + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_peer_release_ref_debug(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line); + +#define wlan_objmgr_peer_release_ref(peer, dbgid) \ + wlan_objmgr_peer_release_ref_debug(peer, dbgid, \ + __func__, __LINE__) +#else +void wlan_objmgr_peer_release_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id); +#endif + +/** + * wlan_peer_get_next_peer_of_psoc_ref() - get next peer to psoc peer list + * with lock and ref taken + * @peer_list: Peer list + * @hash_index: peer list hash index + * @peer: PEER object + * @dbg_id: Ref count debug module id + * + * API to get the next peer of given peer (of psoc's peer list) + * + * Return: + * @next_peer: PEER object + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc_ref_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_peer_get_next_peer_of_psoc_ref(peer_list, hash_index, peer, \ + dbgid) \ + wlan_peer_get_next_peer_of_psoc_ref_debug(peer_list, \ + hash_index, peer, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc_ref( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_peer_get_next_active_peer_of_psoc() - get next active peer to psoc peer + * list + * @peer_list: Peer list + * @hash_index: peer list hash index + * @peer: PEER object + * @dbg_id: Ref count debug module id + * + * API to get the next peer of given peer (of psoc's peer list) + * + * Return: + * @next_peer: PEER object + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_psoc_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_peer_get_next_active_peer_of_psoc(peer_list, hash_index, \ + peer, dbgid) \ + wlan_peer_get_next_active_peer_of_psoc_debug(peer_list, \ + hash_index, peer, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_psoc( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_peer_get_next_active_peer_of_vdev() - get next active_peer of vdev list + * @vdev: VDEV object + * @peer_list: Peer object list + * @peer: PEER object + * @dbg_id: Ref count debug module id + * + * API to get the next active peer of given peer (of vdev's peer list) + * + * Return: + * @next_peer: PEER object + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_vdev_debug( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_peer_get_next_active_peer_of_vdev(vdev, peer_list, peer, dbgid) \ + wlan_peer_get_next_active_peer_of_vdev_debug(vdev, peer_list, \ + peer, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_vdev( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_vdev_peer_list_peek_active_head() - get active head of vdev peer list + * @vdev: VDEV object + * @peer_list: qdf_list_t + * @dbg_id: Ref count debug module id + * + * API to get the active head peer of given peer (of vdev's peer list) + * + * Return: + * @peer: active head peer + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_active_head_debug( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_vdev_peer_list_peek_active_head(vdev, peer_list, dbgid) \ + wlan_vdev_peer_list_peek_active_head_debug(vdev, peer_list, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_active_head( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_psoc_peer_list_peek_head_ref() - get head of psoc peer list + * with ref and lock protected + * @peer_list: wlan_peer_list + * @hash_index: peer list hash index + * @dbg_id: Ref count debug module id + * + * API to get the head peer of given peer (of psoc's peer list) + * + * Return: + * @peer: head peer + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head_ref_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_psoc_peer_list_peek_head_ref(peer_list, hash_index, dbgid) \ + wlan_psoc_peer_list_peek_head_ref_debug(peer_list, hash_index, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head_ref( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_psoc_peer_list_peek_active_head() - get active head of psoc peer list + * @peer_list: wlan_peer_list + * @hash_index: peer list hash index + * @dbg_id: Ref count debug module id + * + * API to get the head peer of given peer (of psoc's peer list) + * + * Return: + * @peer: head peer + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_active_head_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_psoc_peer_list_peek_active_head(peer_list, hash_index, dbgid) \ + wlan_psoc_peer_list_peek_active_head_debug(peer_list, \ + hash_index, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_active_head( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_psoc_peer_list_peek_head() - get head of psoc peer list + * @peer_list: qdf_list_t + * + * API to get the head peer of given peer (of psoc's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @peer: head peer + */ +static inline struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head( + qdf_list_t *peer_list) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *psoc_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (qdf_list_peek_front(peer_list, &psoc_node) != QDF_STATUS_SUCCESS) + return NULL; + + peer = qdf_container_of(psoc_node, struct wlan_objmgr_peer, psoc_peer); + return peer; +} + +/** + * wlan_vdev_peer_list_peek_head() - get head of vdev peer list + * @peer_list: qdf_list_t + * + * API to get the head peer of given peer (of vdev's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @peer: head peer + */ +static inline struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_head( + qdf_list_t *peer_list) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *vdev_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (qdf_list_peek_front(peer_list, &vdev_node) != QDF_STATUS_SUCCESS) + return NULL; + + peer = qdf_container_of(vdev_node, struct wlan_objmgr_peer, vdev_peer); + return peer; +} + +/** + * wlan_peer_get_next_peer_of_vdev() - get next peer of vdev list + * @peer: PEER object + * + * API to get the next peer of given peer (of vdev's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @next_peer: PEER object + */ +static inline struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_vdev( + qdf_list_t *peer_list, struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *node; + qdf_list_node_t *next_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (!peer) + return NULL; + + node = &peer->vdev_peer; + if (qdf_list_peek_next(peer_list, node, &next_node) != + QDF_STATUS_SUCCESS) + return NULL; + + peer_next = qdf_container_of(next_node, struct wlan_objmgr_peer, + vdev_peer); + return peer_next; +} + +/** + * wlan_peer_set_next_peer_of_vdev() - add peer to vdev peer list + * @peer: PEER object + * @new_peer: PEER object + * + * API to set as the next peer to given peer (of vdev's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_set_next_peer_of_vdev(qdf_list_t *peer_list, + struct wlan_objmgr_peer *new_peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + /* set next peer with new peer */ + qdf_list_insert_back(peer_list, &new_peer->vdev_peer); + return; +} + +/** + * wlan_peer_get_next_peer_of_psoc() - get next peer to psoc peer list + * @peer_list: Peer list + * @peer: PEER object + * + * API to get the next peer of given peer (of psoc's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @next_peer: PEER object + */ +static inline struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc( + qdf_list_t *peer_list, struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *node = NULL; + qdf_list_node_t *next_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (!peer) + return NULL; + + node = &peer->psoc_peer; + if (qdf_list_peek_next(peer_list, node, &next_node) != + QDF_STATUS_SUCCESS) + return NULL; + + peer_next = qdf_container_of(next_node, struct wlan_objmgr_peer, + psoc_peer); + return peer_next; +} + +/** + * wlan_peer_set_next_peer_of_psoc() - add peer to psoc peer list + * @peer: PEER object + * @new_peer: PEER object + * + * API to set as the next peer to given peer (of psoc's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_set_next_peer_of_psoc(qdf_list_t *peer_list, + struct wlan_objmgr_peer *new_peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + /* set next peer with new peer */ + qdf_list_insert_back(peer_list, &new_peer->psoc_peer); + return; +} + +/** + * wlan_peer_set_peer_type() - set peer type + * @peer: PEER object + * @peer_type: type of PEER + * + * API to set peer type + * + * Return: void + */ +static inline void wlan_peer_set_peer_type(struct wlan_objmgr_peer *peer, + enum wlan_peer_type type) +{ + peer->peer_mlme.peer_type = type; +} + +/** + * wlan_peer_get_peer_type() - get peer type + * @peer: PEER object + * + * API to get peer type + * + * Return: + * @peer_type: type of PEER + */ +static inline enum wlan_peer_type wlan_peer_get_peer_type( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.peer_type; +} + +/** + * wlan_peer_set_phymode() - set phymode + * @peer: PEER object + * @phymode: phymode of peer + * + * API to set phymode + * + * Return: void + */ +static inline void wlan_peer_set_phymode(struct wlan_objmgr_peer *peer, + enum wlan_phymode phymode) +{ + peer->peer_mlme.phymode = phymode; +} + +/** + * wlan_peer_get_phymode() - get phymode + * @peer: PEER object + * + * API to get phymode + * + * Return: + * @phymode: phymode of PEER + */ +static inline enum wlan_phymode wlan_peer_get_phymode( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.phymode; +} + + +/** + * wlan_peer_set_macaddr() - set mac addr + * @peer: PEER object + * @macaddr: MAC address + * + * API to set peer mac address + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_set_macaddr(struct wlan_objmgr_peer *peer, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(peer->macaddr, macaddr); +} + +/** + * wlan_peer_get_macaddr() - get mac addr + * @peer: PEER object + * + * API to get peer mac address + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @macaddr: MAC address + */ +static inline uint8_t *wlan_peer_get_macaddr(struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return peer->macaddr; +} + +/** + * wlan_peer_get_vdev() - get vdev + * @peer: PEER object + * + * API to get peer's vdev + * + * Return: + * @vdev: VDEV object + */ +static inline struct wlan_objmgr_vdev *wlan_peer_get_vdev( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_objmgr.vdev; +} + +/** + * wlan_peer_set_vdev() - set vdev + * @peer: PEER object + * @vdev: VDEV object + * + * API to set peer's vdev + * + * Return: void + */ +static inline void wlan_peer_set_vdev(struct wlan_objmgr_peer *peer, + struct wlan_objmgr_vdev *vdev) +{ + peer->peer_objmgr.vdev = vdev; +} + +/** + * wlan_peer_mlme_flag_set() - mlme flag set + * @peer: PEER object + * @flag: flag to be set + * + * API to set flag in peer + * + * Return: void + */ +static inline void wlan_peer_mlme_flag_set(struct wlan_objmgr_peer *peer, + uint32_t flag) +{ + peer->peer_mlme.peer_flags |= flag; +} + +/** + * wlan_peer_mlme_flag_clear() - mlme flag clear + * @peer: PEER object + * @flag: flag to be cleared + * + * API to clear flag in peer + * + * Return: void + */ +static inline void wlan_peer_mlme_flag_clear(struct wlan_objmgr_peer *peer, + uint32_t flag) +{ + peer->peer_mlme.peer_flags &= ~flag; +} + +/** + * wlan_peer_mlme_flag_get() - mlme flag get + * @peer: PEER object + * @flag: flag to be checked + * + * API to know, whether particular flag is set in peer + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_peer_mlme_flag_get(struct wlan_objmgr_peer *peer, + uint32_t flag) +{ + return (peer->peer_mlme.peer_flags & flag) ? 1 : 0; +} + +/** + * wlan_peer_mlme_set_state() - peer mlme state + * @peer: PEER object + * @state: enum wlan_peer_state + * + * API to update the current peer state + * + * Return: void + */ +static inline void wlan_peer_mlme_set_state( + struct wlan_objmgr_peer *peer, + enum wlan_peer_state state) +{ + peer->peer_mlme.state = state; +} + +/** + * wlan_peer_mlme_set_auth_state() - peer mlme auth state + * @peer: PEER object + * @is_authenticated: true or false + * + * API to update the current peer auth state + * + * Return: void + */ +static inline void wlan_peer_mlme_set_auth_state( + struct wlan_objmgr_peer *peer, + bool is_authenticated) +{ + peer->peer_mlme.is_authenticated = is_authenticated; +} + +/** + * wlan_peer_mlme_get_state() - peer mlme state + * @peer: PEER object + * + * API to get peer state + * + * Return: enum wlan_peer_state + */ +static inline enum wlan_peer_state wlan_peer_mlme_get_state( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.state; +} + +/** + * wlan_peer_mlme_get_auth_state() - peer mlme auth state + * @peer: PEER object + * + * API to get peer auth state + * + * Return: auth state true/false + */ +static inline bool wlan_peer_mlme_get_auth_state( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.is_authenticated; +} + +/** + * wlan_peer_mlme_get_next_seq_num() - get peer mlme next sequence number + * @peer: PEER object + * + * API to get mlme peer next sequence number + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: peer mlme next sequence number + */ +static inline uint32_t wlan_peer_mlme_get_next_seq_num( + struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (peer->peer_mlme.seq_num < WLAN_MAX_SEQ_NUM) + peer->peer_mlme.seq_num++; + else + peer->peer_mlme.seq_num = 0; + + return peer->peer_mlme.seq_num; +} + +/** + * wlan_peer_mlme_get_seq_num() - get peer mlme sequence number + * @peer: PEER object + * + * API to get mlme peer sequence number + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: peer mlme sequence number + */ +static inline uint32_t wlan_peer_mlme_get_seq_num( + struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return peer->peer_mlme.seq_num; +} + +/** + * wlan_peer_mlme_reset_seq_num() - reset peer mlme sequence number + * @peer: PEER object + * + * API to reset peer sequence number + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_mlme_reset_seq_num( + struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + peer->peer_mlme.seq_num = 0; +} + +/** + * wlan_peer_get_psoc() - get psoc + * @peer: PEER object + * + * API to get peer's psoc + * + * Return: PSOC object or NULL if the psoc can not be found + */ +static inline struct wlan_objmgr_psoc *wlan_peer_get_psoc( + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) + return NULL; + + psoc = wlan_vdev_get_psoc(vdev); + + return psoc; +} + +/* + * wlan_peer_get_pdev_id() - get pdev id + * @peer: peer object pointer + * + * Return: pdev id + */ +static inline uint8_t wlan_peer_get_pdev_id(struct wlan_objmgr_peer *peer) +{ + return peer->pdev_id; +} + +/** + * wlan_peer_set_pdev_id() - set pdev id + * @peer: peer object pointer + * @pdev_id: pdev id + * + * Return: void + */ +static inline void wlan_peer_set_pdev_id(struct wlan_objmgr_peer *peer, + uint8_t pdev_id) +{ + peer->pdev_id = pdev_id; +} + +/** + * wlan_objmgr_print_peer_ref_ids() - print peer object refs + * @peer: peer object pointer + * @log_level: log level + * + * Return: void + */ +void wlan_objmgr_print_peer_ref_ids(struct wlan_objmgr_peer *peer, + QDF_TRACE_LEVEL log_level); + +/** + * wlan_objmgr_peer_get_comp_ref_cnt() - get component ref count for a peer + * @peer: peer object pointer + * @id: component id + * + * Return: uint32_t + */ +uint32_t +wlan_objmgr_peer_get_comp_ref_cnt(struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_peer_trace_init_lock() - Initialize peer trace lock + * @peer: peer object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_TRACE +static inline void +wlan_objmgr_peer_trace_init_lock(struct wlan_objmgr_peer *peer) +{ + wlan_objmgr_trace_init_lock(&peer->peer_objmgr.trace); +} +#else +static inline void +wlan_objmgr_peer_trace_init_lock(struct wlan_objmgr_peer *peer) +{ +} +#endif + +/** + * wlan_objmgr_peer_trace_deinit_lock() - Deinitialize peer trace lock + * @peer: peer object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_TRACE +static inline void +wlan_objmgr_peer_trace_deinit_lock(struct wlan_objmgr_peer *peer) +{ + wlan_objmgr_trace_deinit_lock(&peer->peer_objmgr.trace); +} +#else +static inline void +wlan_objmgr_peer_trace_deinit_lock(struct wlan_objmgr_peer *peer) +{ +} +#endif + +/** + * wlan_objmgr_peer_trace_del_ref_list() - Delete peer trace reference list + * @peer: peer object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static inline void +wlan_objmgr_peer_trace_del_ref_list(struct wlan_objmgr_peer *peer) +{ + wlan_objmgr_trace_del_ref_list(&peer->peer_objmgr.trace); +} +#else +static inline void +wlan_objmgr_peer_trace_del_ref_list(struct wlan_objmgr_peer *peer) +{ +} +#endif + +#endif /* _WLAN_OBJMGR_PEER_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_psoc_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_psoc_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..c2c60313d699404ea2b2fc91171978820f84aaba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_psoc_obj.h @@ -0,0 +1,1758 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the pSoc data structure of UMAC + * Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_PSOC_OBJ_H_ +#define _WLAN_OBJMGR_PSOC_OBJ_H_ + +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_debug.h" +#include "wlan_lmac_if_def.h" +#include + +#define REG_DMN_CH144 0x0001 +#define REG_DMN_ENTREPRISE 0x0002 + + +/* fw_caps */ + /* CAPABILITY: WEP available */ +#define WLAN_SOC_C_WEP 0x00000001 + /* CAPABILITY: TKIP available */ +#define WLAN_SOC_C_TKIP 0x00000002 + /* CAPABILITY: AES OCB avail */ +#define WLAN_SOC_C_AES 0x00000004 + /* CAPABILITY: AES CCM avail */ +#define WLAN_SOC_C_AES_CCM 0x00000008 + /* CAPABILITY: 11n HT available */ +#define WLAN_SOC_C_HT 0x00000010 + /* CAPABILITY: CKIP available */ +#define WLAN_SOC_C_CKIP 0x00000020 + /* CAPABILITY: ATH FF avail */ +#define WLAN_SOC_C_FF 0x00000040 + /* CAPABILITY: ATH Turbo avail*/ +#define WLAN_SOC_C_TURBOP 0x00000080 + /* CAPABILITY: IBSS available */ +#define WLAN_SOC_C_IBSS 0x00000100 + /* CAPABILITY: Power mgmt */ +#define WLAN_SOC_C_PMGT 0x00000200 + /* CAPABILITY: HOSTAP avail */ +#define WLAN_SOC_C_HOSTAP 0x00000400 + /* CAPABILITY: Old Adhoc Demo */ +#define WLAN_SOC_C_AHDEMO 0x00000800 + /* CAPABILITY: tx power mgmt */ +#define WLAN_SOC_C_TXPMGT 0x00001000 + /* CAPABILITY: short slottime */ +#define WLAN_SOC_C_SHSLOT 0x00002000 + /* CAPABILITY: short preamble */ +#define WLAN_SOC_C_SHPREAMBLE 0x00004000 + /* CAPABILITY: monitor mode */ +#define WLAN_SOC_C_MONITOR 0x00008000 + /* CAPABILITY: TKIP MIC avail */ +#define WLAN_SOC_C_TKIPMIC 0x00010000 + /* CAPABILITY: ATH WAPI avail */ +#define WLAN_SOC_C_WAPI 0x00020000 + /* CONF: WDS auto Detect/DELBA */ +#define WLAN_SOC_C_WDS_AUTODETECT 0x00040000 + /* CAPABILITY: WPA1 avail */ +#define WLAN_SOC_C_WPA1 0x00080000 + /* CAPABILITY: WPA2 avail */ +#define WLAN_SOC_C_WPA2 0x00100000 + /* CAPABILITY: WPA1+WPA2 avail*/ +#define WLAN_SOC_C_WPA 0x00180000 + /* CAPABILITY: frame bursting */ +#define WLAN_SOC_C_BURST 0x00200000 + /* CAPABILITY: WME avail */ +#define WLAN_SOC_C_WME 0x00400000 + /* CAPABILITY: 4-addr support */ +#define WLAN_SOC_C_WDS 0x00800000 + /* CAPABILITY: TKIP MIC for QoS frame */ +#define WLAN_SOC_C_WME_TKIPMIC 0x01000000 + /* CAPABILITY: bg scanning */ +#define WLAN_SOC_C_BGSCAN 0x02000000 + /* CAPABILITY: UAPSD */ +#define WLAN_SOC_C_UAPSD 0x04000000 + /* CAPABILITY: enabled 11.h */ +#define WLAN_SOC_C_DOTH 0x08000000 + +/* XXX protection/barker? */ + /* CAPABILITY: crypto alg's */ +#define WLAN_SOC_C_CRYPTO 0x0000002f + +/* fw_caps_ext */ + /* CAPABILITY: fast channel change */ +#define WLAN_SOC_CEXT_FASTCC 0x00000001 + /* CAPABILITY: P2P */ +#define WLAN_SOC_CEXT_P2P 0x00000002 + /* CAPABILITY: Multi-Channel Operations */ +#define WLAN_SOC_CEXT_MULTICHAN 0x00000004 + /* CAPABILITY: the device supports perf and power offload */ +#define WLAN_SOC_CEXT_PERF_PWR_OFLD 0x00000008 + /* CAPABILITY: the device supports 11ac */ +#define WLAN_SOC_CEXT_11AC 0x00000010 + /* CAPABILITY: the device support acs channel hopping */ +#define WLAN_SOC_CEXT_ACS_CHAN_HOP 0x00000020 + /* CAPABILITY: the device support STA DFS */ +#define WLAN_SOC_CEXT_STADFS 0x00000040 + /* NSS offload capability */ +#define WLAN_SOC_CEXT_NSS_OFFLOAD 0x00000080 + /* SW cal support capability */ +#define WLAN_SOC_CEXT_SW_CAL 0x00000100 + /* Hybrid mode */ +#define WLAN_SOC_CEXT_HYBRID_MODE 0x00000200 + /* TT support */ +#define WLAN_SOC_CEXT_TT_SUPPORT 0x00000400 + /* WMI MGMT REF */ +#define WLAN_SOC_CEXT_WMI_MGMT_REF 0x00000800 + /* Wideband scan */ +#define WLAN_SOC_CEXT_WIDEBAND_SCAN 0x00001000 + /* TWT Requester capable */ +#define WLAN_SOC_CEXT_TWT_REQUESTER 0x00002000 + /* TWT Responder capable */ +#define WLAN_SOC_CEXT_TWT_RESPONDER 0x00004000 + /* HW DB2DBM CAPABLE */ +#define WLAN_SOC_CEXT_HW_DB2DBM 0x00008000 + /* OBSS Narrow Bandwidth RU Tolerance */ +#define WLAN_SOC_CEXT_OBSS_NBW_RU 0x00010000 + /* MBSS IE support */ +#define WLAN_SOC_CEXT_MBSS_IE 0x00020000 + /* RXOLE Flow Search Support */ +#define WLAN_SOC_CEXT_RX_FSE_SUPPORT 0x00040000 + /* Dynamic HW Mode Switch Support */ +#define WLAN_SOC_CEXT_DYNAMIC_HW_MODE 0x00080000 + /* Restricted 80+80 MHz support */ +#define WLAN_SOC_RESTRICTED_80P80_SUPPORT 0x00100000 + /* Indicates Firmware supports sending NSS ratio info to host */ +#define WLAN_SOC_NSS_RATIO_TO_HOST_SUPPORT 0x00200000 + +/* feature_flags */ + /* CONF: ATH FF enabled */ +#define WLAN_SOC_F_FF 0x00000001 + /* CONF: ATH Turbo enabled*/ +#define WLAN_SOC_F_TURBOP 0x00000002 + /* STATUS: promiscuous mode */ +#define WLAN_SOC_F_PROMISC 0x00000004 + /* STATUS: all multicast mode */ +#define WLAN_SOC_F_ALLMULTI 0x00000008 +/* NB: this is intentionally setup to be IEEE80211_CAPINFO_PRIVACY */ + /* STATUS: start IBSS */ +#define WLAN_SOC_F_SIBSS 0x00000010 +/* NB: this is intentionally setup to be IEEE80211_CAPINFO_SHORT_SLOTTIME */ + /* CONF: Power mgmt enable */ +#define WLAN_SOC_F_PMGTON 0x00000020 + /* CONF: IBSS creation enable */ +#define WLAN_SOC_F_IBSSON 0x00000040 + /* force chanswitch */ +#define WLAN_SOC_F_CHANSWITCH 0x00000080 + +/* ic_flags_ext and/or iv_flags_ext */ + /* CONF: enable country IE */ +#define WLAN_SOC_F_COUNTRYIE 0x00000100 + /* STATE: enable full bgscan completion */ +#define WLAN_SOC_F_BGSCAN 0x00000200 + /* CONF: enable U-APSD */ +#define WLAN_SOC_F_UAPSD 0x00000400 + /* STATUS: sleeping */ +#define WLAN_SOC_F_SLEEP 0x00000800 + /* Enable marking of dfs interfernce */ +#define WLAN_SOC_F_MARKDFS 0x00001000 + /* enable or disable s/w ccmp encrypt decrypt support */ +#define WLAN_SOC_F_CCMPSW_ENCDEC 0x00002000 + /* STATE: hibernating */ +#define WLAN_SOC_F_HIBERNATION 0x00004000 + /* CONF: desired country has been set */ +#define WLAN_SOC_F_DESCOUNTRY 0x00008000 + /* CONF: enable power capability or contraint IE */ +#define WLAN_SOC_F_PWRCNSTRIE 0x00010000 + /* STATUS: 11D in used */ +#define WLAN_SOC_F_DOT11D 0x00020000 + /* Beacon offload */ +#define WLAN_SOC_F_BCN_OFFLOAD 0x00040000 + /* QWRAP enable */ +#define WLAN_SOC_F_QWRAP_ENABLE 0x00080000 + /* LTEU support */ +#define WLAN_SOC_F_LTEU_SUPPORT 0x00100000 + /* BT coext support */ +#define WLAN_SOC_F_BTCOEX_SUPPORT 0x00200000 + /* HOST 80211 enable*/ +#define WLAN_SOC_F_HOST_80211_ENABLE 0x00400000 + /* Spectral disable */ +#define WLAN_SOC_F_SPECTRAL_DISABLE 0x00800000 + /* FTM testmode enable */ +#define WLAN_SOC_F_TESTMODE_ENABLE 0x01000000 + /* Dynamic HW mode swithch enable */ +#define WLAN_SOC_F_DYNAMIC_HW_MODE 0x02000000 + +/* PSOC op flags */ + + /* Invalid VHT cap */ +#define WLAN_SOC_OP_VHT_INVALID_CAP 0x00000001 + +/* enum wlan_nss_ratio - NSS ratio received from FW during service ready ext + * event. + * WLAN_NSS_RATIO_1BY2_NSS : Max nss of 160MHz is equals to half of the max nss + * of 80MHz + * WLAN_NSS_RATIO_3BY4_NSS : Max nss of 160MHz is equals to 3/4 of the max nss + * of 80MHz + * WLAN_NSS_RATIO_1_NSS : Max nss of 160MHz is equals to the max nss of 80MHz + * WLAN_NSS_RATIO_2_NSS : Max nss of 160MHz is equals to two times the max + * nss of 80MHz + * Values of this enum should be in sync with WMI_NSS_RATIO_INFO value provided + * in wmi_unified.h. + */ +enum wlan_nss_ratio { + WLAN_NSS_RATIO_1BY2_NSS = 0x0, + WLAN_NSS_RATIO_3BY4_NSS = 0x1, + WLAN_NSS_RATIO_1_NSS = 0x2, + WLAN_NSS_RATIO_2_NSS = 0x3, +}; + +/** + * struct wlan_objmgr_psoc_regulatory - Regulatory sub structure of PSOC + * @country_code: Country code + * @reg_dmn: Regulatory Domain + * @reg_flags: Regulatory flags + */ +struct wlan_objmgr_psoc_regulatory { + uint16_t country_code; + uint16_t reg_dmn; + uint16_t reg_flags; +}; + +/** + * struct wlan_objmgr_psoc_user_config - user configurations to + * be used by common modules + * @is_11d_support_enabled: Enable/disable 11d feature + * @is_11h_support_enabled: Enable/disable 11h feature + * @dot11_mode: Phy mode + * @skip_dfs_chnl_in_p2p_search: Skip Dfs Channel in case of P2P + * Search + * @band_capability: Preferred band (0:Both, 1:2G only, 2:5G only) + */ +struct wlan_objmgr_psoc_user_config { + bool is_11d_support_enabled; + bool is_11h_support_enabled; + uint8_t dot11_mode; + uint8_t band_capability; +}; + +/** + * struct wlan_objmgr_psoc_nif - HDD/OSIF specific sub structure of PSOC + * @phy_version: phy version, read in device probe + * @phy_type: OL/DA type + * @soc_fw_caps: FW capabilities + * @soc_fw_ext_caps: FW ext capabilities + * @soc_feature_caps:Feature capabilities + * @soc_op_flags: Flags to set/reset during operation + * @soc_hw_macaddr[]:HW MAC address + * @user_config: user config from OS layer + */ +struct wlan_objmgr_psoc_nif { + uint32_t phy_version; + WLAN_DEV_TYPE phy_type; + uint32_t soc_fw_caps; + uint32_t soc_fw_ext_caps; + uint32_t soc_feature_caps; + uint32_t soc_op_flags; + uint8_t soc_hw_macaddr[QDF_MAC_ADDR_SIZE]; + struct wlan_objmgr_psoc_user_config user_config; +}; + +/** + * struct wlan_objmgr_psoc_objmgr - psoc object manager sub structure + * @psoc_id: The PSOC's numeric Id + * @wlan_pdev_count: PDEV count + * @wlan_pdev_id_map: PDEV id map, to allocate free ids + * @wlan_vdev_count: VDEV count + * @max_vdev_count: Max no. of VDEVs supported by this PSOC + * @print_cnt: Count to throttle Logical delete prints + * @wlan_peer_count: PEER count + * @max_peer_count: Max no. of peers supported by this PSOC + * @temp_peer_count: Temporary peer count + * @wlan_pdev_list[]: PDEV list + * @wlan_vdev_list[]: VDEV list + * @wlan_vdev_id_map[]: VDEV id map, to allocate free ids + * @peer_list: Peer list + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + * @qdf_dev: QDF Device + */ +struct wlan_objmgr_psoc_objmgr { + uint8_t psoc_id; + uint8_t wlan_pdev_count; + uint8_t wlan_pdev_id_map; + uint8_t wlan_vdev_count; + uint8_t max_vdev_count; + uint8_t print_cnt; + uint16_t wlan_peer_count; + uint16_t max_peer_count; + uint16_t temp_peer_count; + struct wlan_objmgr_pdev *wlan_pdev_list[WLAN_UMAC_MAX_PDEVS]; + struct wlan_objmgr_vdev *wlan_vdev_list[WLAN_UMAC_PSOC_MAX_VDEVS]; + uint32_t wlan_vdev_id_map[2]; + struct wlan_peer_list peer_list; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; + qdf_device_t qdf_dev; +}; + +/** + * struct wlan_soc_southbound_cb - Southbound callbacks + * @tx_ops: contains southbound tx callbacks + * @rx_ops: contains southbound rx callbacks + */ +struct wlan_soc_southbound_cb { + struct wlan_lmac_if_tx_ops tx_ops; + struct wlan_lmac_if_rx_ops rx_ops; +}; + +/** + * struct wlan_concurrency_info - structure for concurrency info + * + */ +struct wlan_concurrency_info { +}; + +/** + * struct wlan_soc_timer - structure for soc timer + * + */ +struct wlan_soc_timer { +}; + +/** + * struct wlan_objmgr_psoc - PSOC common object + * @soc_reg: regulatory sub structure + * @soc_nif: nif sub strucutre + * @soc_objmgr: object manager sub structure + * @soc_cb: south bound callbacks + * @soc_timer: soc timer for inactivity + * @soc_concurrency: concurrency info + * @wlan_active_vdevs[]: List of active VDEVs + * @soc_comp_priv_obj[]: component private object pointers + * @obj_status[]: component object status + * @obj_state: object state + * @tgt_if_handle: target interface handle + * @dp_handle: DP module handle + * @psoc_lock: psoc lock + */ +struct wlan_objmgr_psoc { + struct wlan_objmgr_psoc_regulatory soc_reg; + struct wlan_objmgr_psoc_nif soc_nif; + struct wlan_objmgr_psoc_objmgr soc_objmgr; + struct wlan_soc_southbound_cb soc_cb; + struct wlan_soc_timer soc_timer; + struct wlan_concurrency_info soc_concurrency; /*TODO */ + uint8_t wlan_active_vdevs[WLAN_UMAC_PSOC_MAX_VDEVS]; + void *soc_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + target_psoc_info_t *tgt_if_handle; + void *dp_handle; + qdf_spinlock_t psoc_lock; +}; + +/** + * struct wlan_psoc_host_hal_reg_capabilities_ext: Below are Reg caps per PHY. + * Please note PHY ID starts with 0. + * @phy_id: phy id starts with 0. + * @eeprom_reg_domain: regdomain value specified in EEPROM + * @eeprom_reg_domain_ext: regdomain + * @regcap1: CAP1 capabilities bit map, see REGDMN_CAP1_ defines + * @regcap2: REGDMN EEPROM CAP, see REGDMN_EEPROM_EEREGCAP_ defines + * @wireless_modes: REGDMN MODE, see REGDMN_MODE_ enum + * @low_2ghz_chan: 2G channel low + * @high_2ghz_chan: 2G channel High + * @low_5ghz_chan: 5G channel low + * @high_5ghz_chan: 5G channel High + */ +struct wlan_psoc_host_hal_reg_capabilities_ext { + uint32_t phy_id; + uint32_t eeprom_reg_domain; + uint32_t eeprom_reg_domain_ext; + uint32_t regcap1; + uint32_t regcap2; + uint32_t wireless_modes; + uint32_t low_2ghz_chan; + uint32_t high_2ghz_chan; + uint32_t low_5ghz_chan; + uint32_t high_5ghz_chan; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_psoc_obj_create() - psoc object create + * @phy_version: device id (from probe) + * @dev_type: Offload/DA + * + * Creates PSOC object, intializes with default values + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_psoc on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_psoc *wlan_objmgr_psoc_obj_create(uint32_t phy_version, + WLAN_DEV_TYPE dev_type); + +/** + * wlan_objmgr_psoc_obj_delete() - psoc object delete + * @psoc: PSOC object + * + * Logically deletes PSOC object, + * Once all the references are released, object manager invokes the registered + * notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_psoc_obj_delete(struct wlan_objmgr_psoc *psoc); + +/** + ** APIs to attach/detach component objects + */ + +/** + * wlan_objmgr_psoc_component_obj_attach() - psoc comp object attach + * @psoc: PSOC object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with PSOC common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_psoc_component_obj_attach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_psoc_component_obj_detach() - psoc comp object detach + * @psoc: PSOC object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with PSOC common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_psoc_component_obj_detach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + +/** + ** APIs to operations on psoc objects + */ +typedef void (*wlan_objmgr_op_handler)(struct wlan_objmgr_psoc *psoc, + void *object, + void *arg); + +/** + * wlan_objmgr_iterate_obj_list() - iterate through all psoc objects + * (CREATED state) + * @psoc: PSOC object + * @obj_type: PDEV_OP/VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @lock_free_op: its obsolete + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all PDEV/VDEV/PEER objects + * of psoc + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_obj_list( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_iterate_obj_list_all() - iterate through all psoc objects + * @psoc: PSOC object + * @obj_type: PDEV_OP/VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @lock_free_op: its obsolete + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all PDEV/VDEV/PEER objects + * of psoc + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_obj_list_all( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_free_all_objects_per_psoc() - free all psoc objects + * @psoc: PSOC object + * + * API to be used free all the objects(pdev/vdev/peer) of psoc + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_free_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_trigger_psoc_comp_priv_object_creation() - create + * psoc comp object + * @psoc: PSOC object + * @id: Component id + * + * API to create component private object in run time, this would + * be used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_creation( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_psoc_comp_priv_object_deletion() - destroy + * psoc comp object + * @psoc: PSOC object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_deletion( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_get_peer_by_mac() - find peer from psoc's peer list + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_debug( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, const char *func, int line); + +#define wlan_objmgr_get_peer_by_mac(psoc, macaddr, dbgid) \ + wlan_objmgr_get_peer_by_mac_debug(psoc, macaddr, dbgid, \ + __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_peer() - find peer from psoc's peer list + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr and pdev id + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_peer(psoc, pdev_id, macaddr, dbgid) \ + wlan_objmgr_get_peer_debug(psoc, pdev_id, macaddr, dbgid, \ + __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_peer_nolock() - find peer from psoc's peer list (lock free) + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_nolock_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_peer_nolock(psoc, pdev_id, macaddr, dbgid) \ + wlan_objmgr_get_peer_nolock_debug(psoc, pdev_id, macaddr, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_nolock( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_peer_logically_deleted() - find peer + * from psoc's peer list + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer of logically deleted peer + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_logically_deleted_debug( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_peer_logically_deleted(psoc, macaddr, dbgid) \ + wlan_objmgr_get_peer_logically_deleted_debug(psoc, macaddr, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_logically_deleted( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_peer_no_state() - find peer from psoc's peer list + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr and pdev id, + * ignores the state check + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_peer_no_state(psoc, pdev_id, macaddr, dbgid) \ + wlan_objmgr_get_peer_no_state_debug(psoc, pdev_id, macaddr, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev() - get peer from + * psoc peer list using + * mac and vdev + * self mac + * @psoc: PSOC object + * @pdev_id: Pdev id + * @bssid: BSSID address + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr, vdev self mac + * address and pdev id for a node that is logically in deleted state + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: List of peer pointers + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +qdf_list_t *wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev( \ + psoc, pdev_id, bssid, macaddr, dbgid) \ + wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev_debug( \ + psoc, pdev_id, bssid, macaddr, dbgid, __func__, __LINE__) +#else +qdf_list_t *wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_peer_by_mac_n_vdev() - find peer from psoc's peer list + * using mac address and bssid + * @psoc: PSOC object + * @pdev_id: Pdev id + * @bssid: MAC address of AP its associated + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr and vdev self mac address + * and pdev id + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_peer_by_mac_n_vdev(psoc, pdevid, bssid, macaddr, \ + dbgid) \ + wlan_objmgr_get_peer_by_mac_n_vdev_debug(psoc, pdevid, \ + bssid, macaddr, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_peer_by_mac_n_vdev_no_state() - find peer from psoc's peer + * list using mac address and bssid + * @psoc: PSOC object + * @pdev_id: Pdev id + * @bssid: MAC address of AP its associated + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr, vdev self mac address, + * and pdev id ,ignores the state + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_peer_by_mac_n_vdev_no_state(psoc, pdevid, bssid, \ + macaddr, dbgid) \ + wlan_objmgr_get_peer_by_mac_n_vdev_no_state_debug(psoc, \ + pdevid, bssid, macaddr, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_pdev_by_id() - retrieve pdev by id + * @psoc: PSOC object + * @id: pdev id + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev id + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_id_no_state() - retrieve pdev by id + * @psoc: PSOC object + * @id: pdev id + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev id, Ignores the state check + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_macaddr() - retrieve pdev by macaddr + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev macaddr + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_macaddr( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_macaddr_no_state() - retrieve pdev by macaddr + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev macaddr, ignores the state check + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_macaddr_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_opmode_from_psoc() - retrieve vdev by opmode + * @psoc: PSOC object + * @opmode: vdev operating mode + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev operating mode from psoc + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_opmode_from_psoc_debug( + struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE opmode, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_opmode_from_psoc(psoc, opmode, dbgid) \ + wlan_objmgr_get_vdev_by_opmode_from_psoc_debug(psoc, opmode, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_opmode_from_psoc( + struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE opmode, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_id_from_psoc() - retrieve vdev by id + * @psoc: PSOC object + * @id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from psoc + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_debug( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, dbgid) \ + wlan_objmgr_get_vdev_by_id_from_psoc_debug(psoc, vdev_id, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_id_from_psoc_no_state() - retrieve vdev by id + * @psoc: PSOC object + * @id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from psoc, ignores the + * state check + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_id_from_psoc_no_state(psoc, vdev_id, dbgid) \ + wlan_objmgr_get_vdev_by_id_from_psoc_no_state_debug(psoc, \ + vdev_id, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_psoc() - retrieve vdev by macaddr + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: macaddr + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev macaddr from pdev + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_macaddr_from_psoc(psoc, pdev_id, macaddr, \ + dbgid) \ + wlan_objmgr_get_vdev_by_macaddr_from_psoc_debug(psoc, pdev_id, \ + macaddr, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state() - retrieve vdev by + * macaddr + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: macaddr + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev macaddr from psoc, ignores the state + * check + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev + *wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state(psoc, pdev_id, \ + macaddr, dbgid) \ + wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state_debug(psoc, \ + pdev_id, macaddr, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_psoc_obj_lock() - Acquire PSOC spinlock + * @psoc: PSOC object + * + * API to acquire PSOC lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_psoc_obj_lock(struct wlan_objmgr_psoc *psoc) +{ + qdf_spin_lock_bh(&psoc->psoc_lock); +} + +/** + * wlan_psoc_obj_unlock() - Release PSOC spinlock + * @psoc: PSOC object + * + * API to Release PSOC lock + * + * Return: void + */ +static inline void wlan_psoc_obj_unlock(struct wlan_objmgr_psoc *psoc) +{ + qdf_spin_unlock_bh(&psoc->psoc_lock); +} + +/** + * wlan_psoc_set_nif_phy_version() - set nif phy version + * @psoc: PSOC object + * @phy_ver: phy version + * + * API to set nif phy version in psoc + * + * Return: void + */ +static inline void wlan_psoc_set_nif_phy_version(struct wlan_objmgr_psoc *psoc, + uint32_t phy_ver) +{ + psoc->soc_nif.phy_version = phy_ver; +} + +/** + * wlan_psoc_get_nif_phy_version() - get nif phy version + * @psoc: PSOC object + * + * API to set nif phy version in psoc + * + * Return: @phy_ver: phy version + */ +static inline uint32_t wlan_psoc_get_nif_phy_version( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return (uint32_t)-1; + + return psoc->soc_nif.phy_version; +} + +/** + * wlan_psoc_set_dev_type() - set dev type + * @psoc: PSOC object + * @phy_type: phy type (OL/DA) + * + * API to set dev type in psoc + * + * Return: void + */ +static inline void wlan_psoc_set_dev_type(struct wlan_objmgr_psoc *psoc, + WLAN_DEV_TYPE phy_type) +{ + psoc->soc_nif.phy_type = phy_type; +} + +/** + * wlan_objmgr_psoc_get_dev_type - get dev type + * @psoc: PSOC object + * + * API to get dev type in psoc + * + * Return: phy type (OL/DA) + */ +static inline WLAN_DEV_TYPE wlan_objmgr_psoc_get_dev_type( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return (uint32_t)-1; + + return psoc->soc_nif.phy_type; +} + +/** + * wlan_psoc_nif_fw_cap_set() - set fw caps + * @psoc: PSOC object + * @cap: capability flag to be set + * + * API to set fw caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_cap_set(struct wlan_objmgr_psoc *psoc, + uint32_t cap) +{ + psoc->soc_nif.soc_fw_caps |= cap; +} + +/** + * wlan_psoc_nif_fw_cap_clear() - clear fw caps + * @psoc: PSOC object + * @cap: capability flag to be cleared + * + * API to clear fw caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_cap_clear(struct wlan_objmgr_psoc *psoc, + uint32_t cap) +{ + psoc->soc_nif.soc_fw_caps &= ~cap; +} + +/** + * wlan_psoc_nif_fw_cap_get() - get fw caps + * @psoc: PSOC object + * @cap: capability flag to be checked + * + * API to know, whether particular fw caps flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_fw_cap_get(struct wlan_objmgr_psoc *psoc, + uint32_t cap) +{ + return (psoc->soc_nif.soc_fw_caps & cap) ? 1 : 0; +} + +/** + * wlan_psoc_nif_fw_ext_cap_set() - set fw ext caps + * @psoc: PSOC object + * @ext_cap: capability flag to be set + * + * API to set fw ext caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_ext_cap_set(struct wlan_objmgr_psoc *psoc, + uint32_t ext_cap) +{ + psoc->soc_nif.soc_fw_ext_caps |= ext_cap; +} + +/** + * wlan_psoc_nif_fw_ext_cap_clear() - clear fw ext caps + * @psoc: PSOC object + * @ext_cap: capability flag to be cleared + * + * API to clear fw ext caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_ext_cap_clear(struct wlan_objmgr_psoc *psoc, + uint32_t ext_cap) +{ + psoc->soc_nif.soc_fw_ext_caps &= ~ext_cap; +} + +/** + * wlan_psoc_nif_fw_ext_cap_get() - get fw caps + * @psoc: PSOC object + * @ext_cap: capability flag to be checked + * + * API to know, whether particular fw caps flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_fw_ext_cap_get( + struct wlan_objmgr_psoc *psoc, uint32_t ext_cap) +{ + return (psoc->soc_nif.soc_fw_ext_caps & ext_cap) ? 1 : 0; +} + +/** + * wlan_psoc_nif_feat_cap_set() - set feature caps + * @psoc: PSOC object + * @cap: feature flag to be set + * + * API to set feature caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_feat_cap_set(struct wlan_objmgr_psoc *psoc, + uint32_t feat_cap) +{ + psoc->soc_nif.soc_feature_caps |= feat_cap; +} + +/** + * wlan_psoc_nif_feat_cap_clear() - clear feature caps + * @psoc: PSOC object + * @cap: feature flag to be cleared + * + * API to clear feature caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_feat_cap_clear(struct wlan_objmgr_psoc *psoc, + uint32_t feat_cap) +{ + psoc->soc_nif.soc_feature_caps &= ~feat_cap; +} + +/** + * wlan_psoc_nif_feat_cap_get() - get feature caps + * @psoc: PSOC object + * @cap: feature flag to be checked + * + * API to know, whether particular feature cap flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_feat_cap_get(struct wlan_objmgr_psoc *psoc, + uint32_t feat_cap) +{ + return (psoc->soc_nif.soc_feature_caps & feat_cap) ? 1 : 0; +} + +/** + * wlan_psoc_nif_op_flag_get() - get op flags + * @psoc: PSOC object + * @flag: op flag to be checked + * + * API to know, whether particular op flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_op_flag_get(struct wlan_objmgr_psoc *psoc, + uint32_t flag) +{ + return (psoc->soc_nif.soc_op_flags & flag) ? 1 : 0; +} + +/** + * wlan_psoc_nif_op_flag_set() - set op flag + * @psoc: PSOC object + * @flag: op flag to be set + * + * API to set op flag in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_op_flag_set(struct wlan_objmgr_psoc *psoc, + uint32_t flag) +{ + psoc->soc_nif.soc_op_flags |= flag; +} + +/** + * wlan_psoc_nif_op_flag_clear() - clear op flag + * @psoc: PSOC object + * @flag: op flag to be cleared + * + * API to clear op flag in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_op_flag_clear(struct wlan_objmgr_psoc *psoc, + uint32_t flag) +{ + psoc->soc_nif.soc_op_flags &= ~flag; +} + +/** + * wlan_psoc_set_hw_macaddr() - set hw mac addr + * @psoc: PSOC object + * @macaddr: hw macaddr + * + * API to set hw macaddr of psoc + * + * Caller need to acquire lock with wlan_psoc_obj_lock() + * + * Return: void + */ +static inline void wlan_psoc_set_hw_macaddr(struct wlan_objmgr_psoc *psoc, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (psoc) + WLAN_ADDR_COPY(psoc->soc_nif.soc_hw_macaddr, macaddr); +} + +/** + * wlan_psoc_get_hw_macaddr() - get hw macaddr + * @psoc: PSOC object + * + * API to set hw macaddr of psoc + * + * Return: hw macaddr + */ +static inline uint8_t *wlan_psoc_get_hw_macaddr(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return psoc->soc_nif.soc_hw_macaddr; +} + +/** + * wlan_objmgr_psoc_get_comp_private_obj(): API to retrieve component object + * @psoc: Psoc pointer + * @id: component id + * + * This API is used to get the component private object pointer tied to the + * corresponding psoc object + * + * Return: Component private object + */ +void *wlan_objmgr_psoc_get_comp_private_obj(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id); +/** + * wlan_psoc_get_pdev_count() - get pdev count for psoc + * @psoc: PSOC object + * + * API to get number of pdev's attached to the psoc + * + * Return: number of pdev's + */ +static inline uint8_t wlan_psoc_get_pdev_count(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return 0; + + return psoc->soc_objmgr.wlan_pdev_count; +} + +/** + * wlan_psoc_set_tgt_if_handle(): API to set target if handle in psoc object + * @psoc: Psoc pointer + * @tgt_if_handle: target interface handle + * + * API to set target interface handle in psoc object + * + * Return: None + */ +static inline +void wlan_psoc_set_tgt_if_handle(struct wlan_objmgr_psoc *psoc, + target_psoc_info_t *tgt_if_handle) +{ + if (!psoc) + return; + + psoc->tgt_if_handle = tgt_if_handle; +} + +/** + * wlan_psoc_get_tgt_if_handle(): API to get target interface handle + * @psoc: Psoc pointer + * + * API to get target interface handle from psoc object + * + * Return: target interface handle + */ +static inline +target_psoc_info_t *wlan_psoc_get_tgt_if_handle(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return psoc->tgt_if_handle; +} + +/** + * wlan_psoc_get_qdf_dev(): API to get qdf device + * @psoc: Psoc pointer + * + * API to get qdf device from psoc object + * + * Return: qdf_device_t + */ +static inline qdf_device_t wlan_psoc_get_qdf_dev( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return psoc->soc_objmgr.qdf_dev; +} + +/** + * wlan_psoc_set_qdf_dev(): API to get qdf device + * @psoc: Psoc pointer + * dev: qdf device + * + * API to set qdf device from psoc object + * + * Return: None + */ +static inline void wlan_psoc_set_qdf_dev( + struct wlan_objmgr_psoc *psoc, + qdf_device_t dev) +{ + if (!psoc) + return; + + psoc->soc_objmgr.qdf_dev = dev; +} + +/** + * wlan_psoc_set_max_vdev_count() - set psoc max vdev count + * @psoc: PSOC object + * @vdev count: Max vdev count + * + * API to set Max vdev count + * + * Return: void + */ +static inline void wlan_psoc_set_max_vdev_count(struct wlan_objmgr_psoc *psoc, + uint8_t max_vdev_count) +{ + psoc->soc_objmgr.max_vdev_count = max_vdev_count; +} + +/** + * wlan_psoc_get_max_vdev_count() - get psoc max vdev count + * @psoc: PSOC object + * + * API to set Max vdev count + * + * Return: @vdev count: Max vdev count + */ +static inline uint8_t wlan_psoc_get_max_vdev_count( + struct wlan_objmgr_psoc *psoc) +{ + return psoc->soc_objmgr.max_vdev_count; +} + +/** + * wlan_psoc_set_max_peer_count() - set psoc max peer count + * @psoc: PSOC object + * @peer count: Max peer count + * + * API to set Max peer count + * + * Return: void + */ +static inline void wlan_psoc_set_max_peer_count(struct wlan_objmgr_psoc *psoc, + uint16_t max_peer_count) +{ + psoc->soc_objmgr.max_peer_count = max_peer_count; +} + +/** + * wlan_psoc_get_max_peer_count() - get psoc max peer count + * @psoc: PSOC object + * + * API to set Max peer count + * + * Return: @peer count: Max peer count + */ +static inline uint16_t wlan_psoc_get_max_peer_count( + struct wlan_objmgr_psoc *psoc) +{ + return psoc->soc_objmgr.max_peer_count; +} + +/** + * wlan_psoc_get_peer_count() - get psoc peer count + * @psoc: PSOC object + * + * API to get peer count + * + * Return: @peer count: peer count + */ +static inline uint16_t wlan_psoc_get_peer_count( + struct wlan_objmgr_psoc *psoc) +{ + return psoc->soc_objmgr.wlan_peer_count; +} + + +/** + * DOC: Examples to use PSOC ref count APIs + * + * In all the scenarios, the pair of API should be followed + * other it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_psoc_obj_create() + * ---- + * wlan_objmgr_psoc_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_psoc_get_ref() + * ---- + * the operations which are done on + * psoc object + * ---- + * wlan_objmgr_psoc_release_ref() + */ + +/** + * wlan_objmgr_psoc_get_ref() - increment ref count + * @psoc: PSOC object + * @id: Object Manager ref debug id + * + * API to increment ref count of psoc + * + * Return: void + */ +void wlan_objmgr_psoc_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_psoc_try_get_ref() - increment ref count, if allowed + * @psoc: PSOC object + * @id: Object Manager ref debug id + * + * API to increment ref count after checking valid object state + * + * Return: void + */ +QDF_STATUS wlan_objmgr_psoc_try_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_psoc_release_ref() - decrement ref count + * @psoc: PSOC object + * @id: Object Manager ref debug id + * + * API to decrement ref count of psoc, if ref count is 1, it initiates the + * PSOC deletion + * + * Return: void + */ +void wlan_objmgr_psoc_release_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_print_ref_all_objects_per_psoc() - print all psoc objects' + * ref counts + * @psoc: PSOC object + * + * API to be used for printing all the objects(pdev/vdev/peer) ref counts + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_print_ref_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc); + +/** +* wlan_objmgr_psoc_set_user_config () - populate user config +* data in psoc +* @psoc: psoc object pointer +* @user_config_data: pointer to user config data filled up by os +* dependent component +* it is intended to set all elements by OSIF/HDD and it not +* intended to modify a single element +* Return: QDF status +*/ +QDF_STATUS wlan_objmgr_psoc_set_user_config(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_psoc_user_config *user_config_data); + +/** + * wlan_objmgr_psoc_check_for_pdev_leaks() - Assert no pdevs attached to @psoc + * @psoc: The psoc to check + * + * Return: No. of psoc leaks + */ +uint32_t wlan_objmgr_psoc_check_for_pdev_leaks(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_check_for_vdev_leaks() - Assert no vdevs attached to @psoc + * @psoc: The psoc to check + * + * Return: No. of vdev leaks + */ +uint32_t wlan_objmgr_psoc_check_for_vdev_leaks(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_check_for_peer_leaks() - Assert no peers attached to @psoc + * @psoc: The psoc to check + * + * Return: No. of peer leaks + */ +uint32_t wlan_objmgr_psoc_check_for_peer_leaks(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_check_for_leaks() - Assert on leak + * @psoc: The psoc to check + * + * Return: None + */ +void wlan_objmgr_psoc_check_for_leaks(struct wlan_objmgr_psoc *psoc); + +/** +* wlan_objmgr_psoc_get_band_capability () - get user config +* data for band capability +* @psoc: psoc object pointer +* +* Return: band_capability +*/ +static inline uint8_t wlan_objmgr_psoc_get_band_capability( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return 0; + + return psoc->soc_nif.user_config.band_capability; +} + +/** + * wlan_psoc_set_dp_handle() - set dp handle + * @psoc: psoc object pointer + * @dp_handle: Data path module handle + * + * Return: void + */ +static inline void wlan_psoc_set_dp_handle(struct wlan_objmgr_psoc *psoc, + void *dp_handle) +{ + if (qdf_unlikely(!psoc)) { + QDF_BUG(0); + return; + } + + psoc->dp_handle = dp_handle; +} + +/** + * wlan_psoc_get_dp_handle() - get dp handle + * @psoc: psoc object pointer + * + * Return: dp handle + */ +static inline void *wlan_psoc_get_dp_handle(struct wlan_objmgr_psoc *psoc) +{ + if (qdf_unlikely(!psoc)) { + QDF_BUG(0); + return NULL; + } + + return psoc->dp_handle; +} + +struct wlan_logically_del_peer { + qdf_list_node_t list; + struct wlan_objmgr_peer *peer; +}; + +/** + * wlan_psoc_get_lmac_if_txops() - get lmac if txops for the psoc + * @psoc: psoc object pointer + * + * Return: Pointer to wlan_lmac_if_tx_ops + */ +static inline struct wlan_lmac_if_tx_ops * +wlan_psoc_get_lmac_if_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops)); +} + +/** + * wlan_psoc_get_id() - get psoc id + * @psoc: PSOC object + * + * API to get psoc id + * + * Return: @psoc_id: psoc id + */ +static inline uint8_t wlan_psoc_get_id( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return (uint8_t)-1; + + return psoc->soc_objmgr.psoc_id; +} + +/** + * wlan_print_psoc_info() - print psoc members + * @psoc: psoc object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_DEBUG +void wlan_print_psoc_info(struct wlan_objmgr_psoc *psoc); +#else +static inline void wlan_print_psoc_info(struct wlan_objmgr_psoc *psoc) {} +#endif + +#endif /* _WLAN_OBJMGR_PSOC_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_vdev_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_vdev_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..ce4637ef16e8f6c33c13ddf7cbce45881875eb8f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_vdev_obj.h @@ -0,0 +1,1476 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define the vdev data structure of UMAC + */ + +#ifndef _WLAN_OBJMGR_VDEV_OBJ_H_ +#define _WLAN_OBJMGR_VDEV_OBJ_H_ + +#include "qdf_atomic.h" +#include "qdf_list.h" +#include "qdf_lock.h" +#include "qdf_types.h" +#include "wlan_cmn.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_vdev_mlme_main.h" +#include "include/wlan_vdev_mlme.h" +#include "wlan_vdev_mlme_api.h" +#include "wlan_mlme_dbg.h" + + /* CONF: privacy enabled */ +#define WLAN_VDEV_F_PRIVACY 0x00000001 + /* CONF: 11g w/o 11b sta's */ +#define WLAN_VDEV_F_PUREG 0x00000002 + /* CONF: des_bssid is set */ +#define WLAN_VDEV_F_DESBSSID 0x00000004 + /* CONF: bg scan enabled */ +#define WLAN_VDEV_F_BGSCAN 0x00000008 + /* CONF: sw tx retry enabled */ +#define WLAN_VDEV_F_SWRETRY 0x00000010 + /* STATUS: update beacon tim */ +#define WLAN_VDEV_F_TIMUPDATE 0x00000020 + /* CONF: WPA enabled */ +#define WLAN_VDEV_F_WPA1 0x00000040 + /* CONF: WPA2 enabled */ +#define WLAN_VDEV_F_WPA2 0x00000080 + /* CONF: WPA/WPA2 enabled */ +#define WLAN_VDEV_F_WPA 0x000000c0 + /* CONF: drop unencrypted */ +#define WLAN_VDEV_F_DROPUNENC 0x00000100 + /* CONF: TKIP countermeasures */ +#define WLAN_VDEV_F_COUNTERM 0x00000200 + /* CONF: hide SSID in beacon */ /*TODO PDEV/PSOC */ +#define WLAN_VDEV_F_HIDESSID 0x00000400 + /* CONF: disable internal bridge */ /*TODO PDEV/PSOC */ +#define WLAN_VDEV_F_NOBRIDGE 0x00000800 + /* STATUS: update beacon wme */ +#define WLAN_VDEV_F_WMEUPDATE 0x00001000 + /* CONF: 4 addr allowed */ +#define WLAN_VDEV_F_WDS 0x00002000 + /* CONF: enable U-APSD */ +#define WLAN_VDEV_F_UAPSD 0x00004000 + /* STATUS: sleeping */ +#define WLAN_VDEV_F_SLEEP 0x00008000 + /* drop uapsd EOSP frames for test */ +#define WLAN_VDEV_F_EOSPDROP 0x00010000 + /* CONF: A-MPDU supported */ +#define WLAN_VDEV_F_AMPDU 0x00020000 + /* STATE: beacon APP IE updated */ +#define WLAN_VDEV_F_APPIE_UPDATE 0x00040000 + /* CONF: WDS auto Detect/DELBA */ +#define WLAN_VDEV_F_WDS_AUTODETECT 0x00080000 + /* 11b only without 11g stations */ +#define WLAN_VDEV_F_PUREB 0x00100000 + /* disable HT rates */ +#define WLAN_VDEV_F_HTRATES 0x00200000 + /* Extender AP */ +#define WLAN_VDEV_F_AP 0x00400000 + /* CONF: deliver rx frames with 802.11 header */ +#define WLAN_VDEV_F_DELIVER_80211 0x00800000 + /* CONF: os sends down tx frames with 802.11 header */ +#define WLAN_VDEV_F_SEND_80211 0x01000000 + /* CONF: statically configured WDS */ +#define WLAN_VDEV_F_WDS_STATIC 0x02000000 + /* CONF: pure 11n mode */ +#define WLAN_VDEV_F_PURE11N 0x04000000 + /* CONF: pure 11ac mode */ +#define WLAN_VDEV_F_PURE11AC 0x08000000 + /* Basic Rates Update */ +#define WLAN_VDEV_F_BR_UPDATE 0x10000000 + /* CONF: restrict bw ont top of per 11ac/n */ +#define WLAN_VDEV_F_STRICT_BW 0x20000000 + /* Wi-Fi SON mode (with APS) */ +#define WLAN_VDEV_F_SON 0x40000000 + /* Wi-Fi SON mode (with APS) */ +#define WLAN_VDEV_F_MBO 0x80000000 + +/* Feature extension flags */ + /* CONF: MSFT safe mode */ +#define WLAN_VDEV_FEXT_SAFEMODE 0x00000001 + /* if the vap can sleep*/ +#define WLAN_VDEV_FEXT_CANSLEEP 0x00000002 + /* use sw bmiss timer */ +#define WLAN_VDEV_FEXT_SWBMISS 0x00000004 + /* enable beacon copy */ +#define WLAN_VDEV_FEXT_COPY_BEACON 0x00000008 +#define WLAN_VDEV_FEXT_WAPI 0x00000010 + /* 802.11h enabled */ +#define WLAN_VDEV_FEXT_DOTH 0x00000020 + /* if the vap has wds independance set */ +#define WLAN_VDEV_FEXT_VAPIND 0x00000040 + /* QBSS load IE enabled */ +#define WLAN_VDEV_FEXT_BSSLOAD 0x00000080 + /* Short Guard Interval Enable:1 Disable:0 */ +#define WLAN_VDEV_FEXT_SGI 0x00000100 + /* Short Guard Interval Enable:1 Disable:0 for VHT fixed rates */ +#define WLAN_VDEV_FEXT_DATASGI 0x00000200 + /* LDPC Enable Rx:1 TX: 2 ; Disable:0 */ +#define WLAN_VDEV_FEXT_LDPC_TX 0x00000400 +#define WLAN_VDEV_FEXT_LDPC_RX 0x00000800 +#define WLAN_VDEV_FEXT_LDPC 0x00000c00 + /* wme enabled */ +#define WLAN_VDEV_FEXT_WME 0x00001000 + /* WNM Capabilities */ +#define WLAN_VDEV_FEXT_WNM 0x00002000 + /* RRM Capabilities */ +#define WLAN_VDEV_FEXT_RRM 0x00004000 + /* WNM Proxy ARP Capabilities */ +#define WLAN_VDEV_FEXT_PROXYARP 0x00008000 + /* 256 QAM support in 2.4GHz mode Enable:1 Disable:0 */ +#define WLAN_VDEV_FEXT_256QAM 0x00010000 + /* 2.4NG 256 QAM Interop mode Enable:1 Disable:0 */ +#define WLAN_VDEV_FEXT_256QAM_INTEROP 0x00020000 + /* static mimo ps enabled */ +#define WLAN_VDEV_FEXT_STATIC_MIMOPS 0x00040000 + /* dynamic mimo ps enabled */ +#define WLAN_VDEV_FEXT_DYN_MIMOPS 0x00080000 + /* Country IE enabled */ +#define WLAN_VDEV_FEXT_CNTRY_IE 0x00100000 + /*does not want to trigger multi channel operation + instead follow master vaps channel (for AP/GO Vaps) */ +#define WLAN_VDEV_FEXT_NO_MULCHAN 0x00200000 + /*non-beaconing AP VAP*/ +#define WLAN_VDEV_FEXT_NON_BEACON 0x00400000 + /* SPL repeater enabled for SON*/ +#define WLAN_VDEV_FEXT_SON_SPL_RPT 0x00800000 + /* SON IE update in MGMT frame */ +#define WLAN_VDEV_FEXT_SON_INFO_UPDATE 0x01000000 + /* CONF: A-MSDU supported */ +#define WLAN_VDEV_FEXT_AMSDU 0x02000000 + /* VDEV is PSTA*/ +#define WLAN_VDEV_FEXT_PSTA 0x04000000 + /* VDEV is MPSTA*/ +#define WLAN_VDEV_FEXT_MPSTA 0x08000000 + /* VDEV is WRAP*/ +#define WLAN_VDEV_FEXT_WRAP 0x10000000 + /* VDEV has MAT enabled*/ +#define WLAN_VDEV_FEXT_MAT 0x20000000 + /* VDEV is wired PSTA*/ +#define WLAN_VDEV_FEXT_WIRED_PSTA 0x40000000 + /* Fils discovery on 6G SAP*/ +#define WLAN_VDEV_FEXT_FILS_DISC_6G_SAP 0x80000000 + +/* VDEV OP flags */ + /* if the vap destroyed by user */ +#define WLAN_VDEV_OP_DELETE_PROGRESS 0x00000001 + /* set to enable sta-fws fweature */ +#define WLAN_VDEV_OP_STAFWD 0x00000002 + /* Off-channel support enabled */ +#define WLAN_VDEV_OP_OFFCHAN 0x00000004 + /* if the vap has erp update set */ +#define WLAN_VDEV_OP_ERPUPDATE 0x00000008 + /* this vap needs scheduler for off channel operation */ +#define WLAN_VDEV_OP_NEEDS_SCHED 0x00000010 + /*STA in forced sleep set PS bit for all outgoing frames */ +#define WLAN_VDEV_OP_FORCED_SLEEP 0x00000020 + /* update bssload IE in beacon */ +#define WLAN_VDEV_OP_BSSLOAD_UPDATE 0x00000040 + /* Hotspot 2.0 DGAF Disable bit */ +#define WLAN_VDEV_OP_DGAF_DISABLE 0x00000080 + /* STA SmartNet enabled */ +#define WLAN_VDEV_OP_SMARTNET_EN 0x00000100 + /* SoftAP to reject resuming in DFS channels */ +#define WLAN_VDEV_OP_REJ_DFS_CHAN 0x00000200 + /* Trigger mlme response */ +#define WLAN_VDEV_OP_TRIGGER_MLME_RESP 0x00000400 + /* test flag for MFP */ +#define WLAN_VDEV_OP_MFP_TEST 0x00000800 + /* flag to indicate using default ratemask */ +#define WLAN_VDEV_OP_DEF_RATEMASK 0x00001000 +/*For wakeup AP VAP when wds-sta connect to the AP only use when + export (UMAC_REPEATER_DELAYED_BRINGUP || DBDC_REPEATER_SUPPORT)=1*/ +#define WLAN_VDEV_OP_KEYFLAG 0x00002000 + /* if performe the iwlist scanning */ +#define WLAN_VDEV_OP_LIST_SCANNING 0x00004000 + /*Set when VAP down*/ +#define WLAN_VDEV_OP_IS_DOWN 0x00008000 + /* if vap may require acs when another vap is brought down */ +#define WLAN_VDEV_OP_NEEDS_UP_ACS 0x00010000 + /* Block data traffic tx for this vap */ +#define WLAN_VDEV_OP_BLOCK_TX_TRAFFIC 0x00020000 + /* for mbo functionality */ +#define WLAN_VDEV_OP_MBO 0x00040000 + + /* CAPABILITY: IBSS available */ +#define WLAN_VDEV_C_IBSS 0x00000001 +/* CAPABILITY: HOSTAP avail */ +#define WLAN_VDEV_C_HOSTAP 0x00000002 + /* CAPABILITY: Old Adhoc Demo */ +#define WLAN_VDEV_C_AHDEMO 0x00000004 + /* CAPABILITY: sw tx retry */ +#define WLAN_VDEV_C_SWRETRY 0x00000008 + /* CAPABILITY: monitor mode */ +#define WLAN_VDEV_C_MONITOR 0x00000010 + /* CAPABILITY: TKIP MIC avail */ +#define WLAN_VDEV_C_TKIPMIC 0x00000020 + /* CAPABILITY: 4-addr support */ +#define WLAN_VDEV_C_WDS 0x00000040 + /* CAPABILITY: TKIP MIC for QoS frame */ +#define WLAN_VDEV_C_WME_TKIPMIC 0x00000080 + /* CAPABILITY: bg scanning */ +#define WLAN_VDEV_C_BGSCAN 0x00000100 + /* CAPABILITY: Restrict offchannel */ +#define WLAN_VDEV_C_RESTRICT_OFFCHAN 0x00000200 + +/* Invalid VDEV identifier */ +#define WLAN_INVALID_VDEV_ID 255 + +/** + * struct wlan_vdev_create_params - Create params, HDD/OSIF passes this + * structure While creating VDEV + * @opmode: Opmode of VDEV + * @flags: create flags + * @size_vdev_priv: Size of vdev private + * @legacy_osif: Legacy os_if private member + * @macaddr[]: MAC address + * @mataddr[]: MAT address + */ +struct wlan_vdev_create_params { + enum QDF_OPMODE opmode; + uint32_t flags; + size_t size_vdev_priv; + void *legacy_osif; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint8_t mataddr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct wlan_channel - channel structure + * @ch_freq: Channel in Mhz. + * @ch_ieee: IEEE channel number. + * @ch_freq_seg1: Channel Center frequeny for VHT80/160 and HE80/160. + * @ch_freq_seg2: Second channel Center frequency applicable for 80+80MHz mode. + * @ch_maxpower: Maximum tx power in dBm. + * @ch_flagext: Channel extension flags. + * @ch_flags: Channel flags. + * @ch_cfreq1: channel center frequency for primary + * @ch_cfreq2: channel center frequency for secondary + * @ch_width: Channel width. + * @ch_phymode: Channel phymode. + */ +struct wlan_channel { + uint16_t ch_freq; + uint8_t ch_ieee; + uint8_t ch_freq_seg1; + uint8_t ch_freq_seg2; + int8_t ch_maxpower; + uint16_t ch_flagext; + uint64_t ch_flags; + uint32_t ch_cfreq1; + uint32_t ch_cfreq2; + enum phy_ch_width ch_width; + enum wlan_phymode ch_phymode; +}; + +/** + * struct wlan_objmgr_vdev_mlme - VDEV MLME specific sub structure + * @vdev_opmode: Opmode of VDEV + * @mlme_state: VDEV MLME SM state + * @mlme_state: VDEV MLME SM substate + * @bss_chan: BSS channel + * @des_chan: Desired channel, for STA Desired may not be used + * @vdev_caps: VDEV capabilities + * @vdev_feat_caps: VDEV feature caps + * @vdev_feat_ext_caps: VDEV Extended feature caps + * @vdev_op_flags: Operation flags + * @mataddr[]: MAT address + * @macaddr[]: VDEV self MAC address + * @ssid[]: SSID + * @ssid_len: SSID length + * @nss: Num. Spatial streams + * @tx_chainmask: Tx Chainmask + * @rx_chainmask: Rx Chainmask + * @tx_power: Tx power + * @max_rate: MAX rate + * @tx_mgmt_rate: TX Mgmt. Rate + * @per_band_mgmt_rate: Per-band TX Mgmt. Rate + */ +struct wlan_objmgr_vdev_mlme { + enum QDF_OPMODE vdev_opmode; + enum wlan_vdev_state mlme_state; + enum wlan_vdev_state mlme_substate; + struct wlan_channel *bss_chan; + struct wlan_channel *des_chan; + uint32_t vdev_caps; + uint32_t vdev_feat_caps; + uint32_t vdev_feat_ext_caps; + uint32_t vdev_op_flags; + uint8_t mataddr[QDF_MAC_ADDR_SIZE]; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct wlan_objmgr_vdev_nif - VDEV HDD specific sub structure + * @osdev: OS specific pointer + */ +struct wlan_objmgr_vdev_nif { + struct vdev_osif_priv *osdev; +}; + +/** + * struct wlan_objmgr_vdev_objmgr - vdev object manager sub structure + * @vdev_id: VDEV id + * @print_cnt: Count to throttle Logical delete prints + * @self_peer: Self PEER + * @bss_peer: BSS PEER + * @wlan_peer_list: PEER list + * @wlan_pdev: PDEV pointer + * @wlan_peer_count: Peer count + * @max_peer_count: Max Peer count + * @c_flags: creation specific flags + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + * @wlan_objmgr_trace: Trace ref and deref + */ +struct wlan_objmgr_vdev_objmgr { + uint8_t vdev_id; + uint8_t print_cnt; + struct wlan_objmgr_peer *self_peer; + struct wlan_objmgr_peer *bss_peer; + qdf_list_t wlan_peer_list; + struct wlan_objmgr_pdev *wlan_pdev; + uint16_t wlan_peer_count; + uint16_t max_peer_count; + uint32_t c_flags; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; +#ifdef WLAN_OBJMGR_REF_ID_TRACE + struct wlan_objmgr_trace trace; +#endif +}; + +/** + * struct wlan_objmgr_vdev - VDEV common object + * @vdev_node: qdf list of pdev's vdev list + * @vdev_mlme: VDEV MLME substructure + * @vdev_objmgr: VDEV Object Mgr substructure + * @vdev_nif: VDEV HDD substructure + * @vdev_comp_priv_obj[]:Component's private objects list + * @obj_status[]: Component object status + * @obj_state: VDEV object state + * @vdev_lock: VDEV lock + */ +struct wlan_objmgr_vdev { + qdf_list_node_t vdev_node; + struct wlan_objmgr_vdev_mlme vdev_mlme; + struct wlan_objmgr_vdev_objmgr vdev_objmgr; + struct wlan_objmgr_vdev_nif vdev_nif; + void *vdev_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + qdf_spinlock_t vdev_lock; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_vdev_obj_create() - vdev object create + * @pdev: PDEV object on which this vdev gets created + * @params: VDEV create params from HDD + * + * Creates vdev object, intializes with default values + * Attaches to psoc and pdev objects + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_vdev on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_vdev *wlan_objmgr_vdev_obj_create( + struct wlan_objmgr_pdev *pdev, + struct wlan_vdev_create_params *params); + +/** + * wlan_objmgr_vdev_obj_delete() - vdev object delete + * @vdev: vdev object + * + * Logically deletes VDEV object, + * Once all the references are released, object manager invokes the registered + * notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_vdev_obj_delete(struct wlan_objmgr_vdev *vdev); + +/** + ** APIs to attach/detach component objects + */ +/** + * wlan_objmgr_vdev_component_obj_attach() - vdev comp object attach + * @vdev: VDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with VDEV common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_vdev_component_obj_attach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_vdev_component_obj_detach() - vdev comp object detach + * @vdev: VDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with VDEV common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_vdev_component_obj_detach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj); +/* + ** APIs to operations on vdev objects +*/ + +typedef void (*wlan_objmgr_vdev_op_handler)(struct wlan_objmgr_vdev *vdev, + void *object, + void *arg); + +/** + * wlan_objmgr_iterate_peerobj_list() - iterate vdev's peer list + * @vdev: vdev object + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all PEER objects + * of vdev + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_peerobj_list( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_vdev_op_handler handler, + void *arg, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_vdev_get_log_del_peer_list() - vdev logically deleted peer list + * @vdev: vdev object + * @dbg_id: id of the caller + * + * API to be used for populating the list of logically deleted peers from the + * vdev's peer list + * + * The caller of this function should free the memory allocated for the + * peerlist and the peer member in the list + * Also the peer ref release is handled by the caller + * + * Return: list of peer pointers + * NULL on FAILURE + */ +qdf_list_t *wlan_objmgr_vdev_get_log_del_peer_list( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_trigger_vdev_comp_priv_object_creation() - vdev + * comp object creation + * @vdev: VDEV object + * @id: Component id + * + * API to create component private object in run time, this would + * be used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_creation( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_vdev_comp_priv_object_deletion() - vdev comp + * object deletion + * @vdev: VDEV object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_deletion( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_vdev_get_comp_private_obj() - get vdev component private object + * @vdev: VDEV object + * @id: Component id + * + * API to get component private object + * + * Return: void *ptr on SUCCESS + * NULL on Failure + */ +void *wlan_objmgr_vdev_get_comp_private_obj( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id); + +/* Util APIs */ + +/** + * wlan_vdev_get_pdev() - get pdev + * @vdev: VDEV object + * + * API to get pdev object pointer from vdev + * + * Return: pdev object pointer + */ +static inline struct wlan_objmgr_pdev *wlan_vdev_get_pdev( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.wlan_pdev; +} + +/** + * wlan_pdev_vdev_list_peek_head() - get first vdev from pdev list + * @peer_list: qdf_list_t + * + * API to get the head vdev of given vdev (of pdev's vdev list) + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @peer: head peer + */ +static inline struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_head( + qdf_list_t *vdev_list) +{ + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *vdev_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (qdf_list_peek_front(vdev_list, &vdev_node) != QDF_STATUS_SUCCESS) + return NULL; + + vdev = qdf_container_of(vdev_node, struct wlan_objmgr_vdev, vdev_node); + return vdev; +} + + +/** + * wlan_vdev_get_next_vdev_of_pdev() - get next vdev + * @vdev: VDEV object + * + * API to get next vdev object pointer of vdev + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @vdev_next: VDEV object + */ +static inline struct wlan_objmgr_vdev *wlan_vdev_get_next_vdev_of_pdev( + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_vdev *vdev_next; + qdf_list_node_t *node = &vdev->vdev_node; + qdf_list_node_t *next_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (!node) + return NULL; + + if (qdf_list_peek_next(vdev_list, node, &next_node) != + QDF_STATUS_SUCCESS) + return NULL; + + vdev_next = qdf_container_of(next_node, struct wlan_objmgr_vdev, + vdev_node); + return vdev_next; +} + + + +/** + * wlan_vdev_set_pdev() - set pdev + * @vdev: VDEV object + * @pdev: PDEV object + * + * API to get pdev object pointer from vdev + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: void + */ +static inline void wlan_vdev_set_pdev(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_pdev *pdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + vdev->vdev_objmgr.wlan_pdev = pdev; +} + +/** + * wlan_vdev_get_psoc() - get psoc + * @vdev: VDEV object + * + * API to get pdev object pointer from vdev + * + * Return: psoc object pointer + */ +static inline struct wlan_objmgr_psoc *wlan_vdev_get_psoc( + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) + return NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + return psoc; +} + +/** + * wlan_vdev_mlme_set_opmode() - set vdev opmode + * @vdev: VDEV object + * @mode: VDEV op mode + * + * API to set opmode in vdev object + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_opmode(struct wlan_objmgr_vdev *vdev, + enum QDF_OPMODE mode) +{ + vdev->vdev_mlme.vdev_opmode = mode; +} + +/** + * wlan_vdev_mlme_get_opmode() - get vdev opmode + * @vdev: VDEV object + * + * API to set opmode of vdev object + * + * Return: + * @mode: VDEV op mode + */ +static inline enum QDF_OPMODE wlan_vdev_mlme_get_opmode( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.vdev_opmode; +} + +/** + * wlan_vdev_mlme_set_macaddr() - set vdev macaddr + * @vdev: VDEV object + * @macaddr: MAC address + * + * API to set macaddr in vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_macaddr(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(vdev->vdev_mlme.macaddr, macaddr); +} + +/** + * wlan_vdev_mlme_get_macaddr() - get vdev macaddr + * @vdev: VDEV object + * + * API to get MAC address from vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @macaddr: MAC address + */ +static inline uint8_t *wlan_vdev_mlme_get_macaddr(struct wlan_objmgr_vdev *vdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return vdev->vdev_mlme.macaddr; +} + +/** + * wlan_vdev_mlme_set_mataddr() - set vdev mataddr + * @vdev: VDEV object + * @mataddr: MAT address + * + * API to set mataddr in vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_mataddr(struct wlan_objmgr_vdev *vdev, + uint8_t *mataddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(vdev->vdev_mlme.mataddr, mataddr); +} + +/** + * wlan_vdev_mlme_get_mataddr() - get mataddr + * @vdev: VDEV object + * + * API to get MAT address from vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @mataddr: MAT address + */ +static inline uint8_t *wlan_vdev_mlme_get_mataddr(struct wlan_objmgr_vdev *vdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return vdev->vdev_mlme.mataddr; +} + +/** + * wlan_vdev_get_id() - get vdev id + * @vdev: VDEV object + * + * API to get vdev id + * + * Return: + * @id: vdev id + */ +static inline uint8_t wlan_vdev_get_id(struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.vdev_id; +} + +/** + * wlan_vdev_get_hw_macaddr() - get hw macaddr + * @vdev: VDEV object + * + * API to retrieve the HW MAC address from PDEV + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @macaddr: HW MAC address + */ +static inline uint8_t *wlan_vdev_get_hw_macaddr(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + + /* This API is invoked with lock acquired, do not add log prints */ + if (pdev) + return wlan_pdev_get_hw_macaddr(pdev); + else + return NULL; +} + +/** + * wlan_vdev_obj_lock() - Acquire VDEV spinlock + * @vdev: VDEV object + * + * API to acquire VDEV lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_vdev_obj_lock(struct wlan_objmgr_vdev *vdev) +{ + qdf_spin_lock_bh(&vdev->vdev_lock); +} + +/** + * wlan_vdev_obj_unlock() - Release VDEV spinlock + * @vdev: VDEV object + * + * API to Release VDEV lock + * + * Return: void + */ +static inline void wlan_vdev_obj_unlock(struct wlan_objmgr_vdev *vdev) +{ + qdf_spin_unlock_bh(&vdev->vdev_lock); +} + +/** + * wlan_vdev_mlme_set_bss_chan() - set bss chan + * @vdev: VDEV object + * @bss_chan: Channel + * + * API to set the BSS channel + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_bss_chan( + struct wlan_objmgr_vdev *vdev, + struct wlan_channel *bss_chan) +{ + vdev->vdev_mlme.bss_chan = bss_chan; +} + +/** + * wlan_vdev_mlme_get_bss_chan() - get bss chan + * @vdev: VDEV object + * + * API to get the BSS channel + * + * Return: + * @bss_chan: Channel + */ +static inline struct wlan_channel *wlan_vdev_mlme_get_bss_chan( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.bss_chan; +} + +/** + * wlan_vdev_mlme_set_des_chan() - set desired chan + * @vdev: VDEV object + * @des_chan: Channel configured by user + * + * API to set the desired channel + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_des_chan( + struct wlan_objmgr_vdev *vdev, + struct wlan_channel *des_chan) +{ + vdev->vdev_mlme.des_chan = des_chan; +} + +/** + * wlan_vdev_mlme_get_des_chan() - get desired chan + * @vdev: VDEV object + * + * API to get the desired channel + * + * Return: + * @des_chan: Channel configured by user + */ +static inline struct wlan_channel *wlan_vdev_mlme_get_des_chan( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.des_chan; +} + +/** + * wlan_vdev_mlme_feat_cap_set() - set feature caps + * @vdev: VDEV object + * @cap: capabilities to be set + * + * API to set MLME feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_cap_set(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_caps |= cap; +} + +/** + * wlan_vdev_mlme_feat_cap_clear() - clear feature caps + * @vdev: VDEV object + * @cap: capabilities to be cleared + * + * API to clear MLME feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_cap_clear(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_caps &= ~cap; +} + +/** + * wlan_vdev_mlme_feat_cap_get() - get feature caps + * @vdev: VDEV object + * @cap: capabilities to be checked + * + * API to know MLME feature capability is set or not + * + * Return: 1 -- if capabilities set + * 0 -- if capabilities clear + */ +static inline uint8_t wlan_vdev_mlme_feat_cap_get(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + return (vdev->vdev_mlme.vdev_feat_caps & cap) ? 1 : 0; +} + +/** + * wlan_vdev_mlme_feat_ext_cap_set() - set ext feature caps + * @vdev: VDEV object + * @cap: capabilities to be set + * + * API to set the MLME extensive feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_ext_cap_set( + struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_ext_caps |= cap; +} + +/** + * wlan_vdev_mlme_feat_ext_cap_clear() - clear ext feature caps + * @vdev: VDEV object + * @cap: capabilities to be cleared + * + * API to clear the MLME extensive feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_ext_cap_clear( + struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_ext_caps &= ~cap; +} + +/** + * wlan_vdev_mlme_feat_ext_cap_get() - get feature ext caps + * @vdev: VDEV object + * @cap: capabilities to be checked + * + * API to know MLME ext feature capability is set or not + * + * Return: 1 -- if capabilities set + * 0 -- if capabilities clear + */ +static inline uint8_t wlan_vdev_mlme_feat_ext_cap_get( + struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + return (vdev->vdev_mlme.vdev_feat_ext_caps & cap) ? 1 : 0; +} + +/** + * wlan_vdev_mlme_cap_set() - mlme caps set + * @vdev: VDEV object + * @cap: capabilities to be set + * + * API to set the MLME capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_cap_set(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_caps |= cap; +} + +/** + * wlan_vdev_mlme_cap_clear() - mlme caps clear + * @vdev: VDEV object + * @cap: capabilities to be cleared + * + * API to clear the MLME capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_cap_clear(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_caps &= ~cap; +} + +/** + * wlan_vdev_mlme_cap_get() - get mlme caps + * @vdev: VDEV object + * @cap: capabilities to be checked + * + * API to know MLME capability is set or not + * + * Return: 1 -- if capabilities set + * 0 -- if capabilities clear + */ +static inline uint8_t wlan_vdev_mlme_cap_get(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + return (vdev->vdev_mlme.vdev_caps & cap) ? 1 : 0; +} + +/** + * wlan_vdev_mlme_get_state() - get mlme state + * @vdev: VDEV object + * + * API to get MLME state + * + * Return: state of MLME + */ +static inline enum wlan_vdev_state wlan_vdev_mlme_get_state( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.mlme_state; +} + +/** + * wlan_vdev_mlme_get_substate() - get mlme substate + * @vdev: VDEV object + * + * API to get VDEV MLME substate + * + * Return: substate of VDEV MLME + */ +static inline enum wlan_vdev_state wlan_vdev_mlme_get_substate( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.mlme_substate; +} + +/** + * wlan_vdev_set_selfpeer() - set self peer + * @vdev: VDEV object + * @peer: peer pointer + * + * API to set the self peer of VDEV + * + * Return: void + */ +static inline void wlan_vdev_set_selfpeer(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + vdev->vdev_objmgr.self_peer = peer; +} + +/** + * wlan_vdev_get_selfpeer() - get self peer + * @vdev: VDEV object + * + * API to get the self peer of VDEV + * + * Return: + * @peer: peer pointer + */ +static inline struct wlan_objmgr_peer *wlan_vdev_get_selfpeer( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.self_peer; +} + +/** + * wlan_vdev_set_bsspeer() - set bss peer + * @vdev: VDEV object + * @peer: BSS peer pointer + * + * API to set the BSS peer of VDEV + * + * Return: void + */ +static inline void wlan_vdev_set_bsspeer(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + vdev->vdev_objmgr.bss_peer = peer; +} + +/** + * wlan_vdev_get_bsspeer() - get bss peer + * @vdev: VDEV object + * + * API to get the BSS peer of VDEV, wlan_objmgr_vdev_try_get_bsspeer API + * preferred to use outside obj manager to take and handle ref count of + * bss_peer with ref debug ID. + * + * Return: + * @peer: BSS peer pointer + */ +static inline struct wlan_objmgr_peer *wlan_vdev_get_bsspeer( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.bss_peer; +} + +/** + * wlan_objmgr_vdev_find_peer_by_mac() - get a peer with given mac from vdev + * @vdev: VDEV object + * @peer_mac: mac address of the peer to be found + * @dbg_id: dbg_id of the module + * + * API to get and increment ref count of BSS peer of VDEV + * + * Return: + * @peer: peer pointer to the peer of the mac address + */ +struct wlan_objmgr_peer * +wlan_objmgr_vdev_find_peer_by_mac(struct wlan_objmgr_vdev *vdev, + uint8_t *peer_mac, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_vdev_try_get_bsspeer() - get and increment ref count of BSS peer + * of VDEV + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to get and increment ref count of BSS peer of VDEV + * + * Return: + * @peer: BSS peer pointer if bss peer is present and valid else NULL + */ +struct wlan_objmgr_peer *wlan_objmgr_vdev_try_get_bsspeer( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); +/** + * wlan_vdev_get_ospriv() - get os priv pointer + * @vdev: VDEV object + * + * API to get OS private pointer from VDEV + * + * Return: ospriv - private pointer + */ +static inline struct vdev_osif_priv *wlan_vdev_get_ospriv( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_nif.osdev; +} + +/** + * wlan_vdev_reset_ospriv() - reset os priv pointer + * @vdev: VDEV object + * + * API to reset OS private pointer in VDEV + * + * Return: void + */ +static inline void wlan_vdev_reset_ospriv(struct wlan_objmgr_vdev *vdev) +{ + vdev->vdev_nif.osdev = NULL; +} + +/** + * wlan_vdev_get_peer_count() - get vdev peer count + * @vdev: VDEV object + * + * API to get peer count from VDEV + * + * Return: peer_count - vdev's peer count + */ +static inline uint16_t wlan_vdev_get_peer_count(struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.wlan_peer_count; +} + +/** + * DOC: Examples to use VDEV ref count APIs + * + * In all the scenarios, the pair of API should be followed + * other it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_vdev_obj_create() + * ---- + * wlan_objmgr_vdev_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_vdev_get_ref() + * ---- + * the operations which are done on + * vdev object + * ---- + * wlan_objmgr_vdev_release_ref() + * + * scenario 3: + * + * API to retrieve vdev (xxx_get_vdev_xxx()) + * ---- + * the operations which are done on + * vdev object + * ---- + * wlan_objmgr_vdev_release_ref() + */ + +/** + * wlan_objmgr_vdev_get_ref() - increment ref count + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of vdev + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_vdev_get_ref_debug(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line); + +#define wlan_objmgr_vdev_get_ref(vdev, dbgid) \ + wlan_objmgr_vdev_get_ref_debug(vdev, dbgid, __func__, __LINE__) +#else +void wlan_objmgr_vdev_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); +#endif + +/** + * wlan_objmgr_vdev_try_get_ref() - increment ref count, if allowed + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of vdev after checking valid object state + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +QDF_STATUS wlan_objmgr_vdev_try_get_ref_debug(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line); + +#define wlan_objmgr_vdev_try_get_ref(vdev, dbgid) \ + wlan_objmgr_vdev_try_get_ref_debug(vdev, dbgid, \ + __func__, __LINE__) +#else +QDF_STATUS wlan_objmgr_vdev_try_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); +#endif + +/** + * wlan_objmgr_vdev_release_ref() - decrement ref count + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to decrement ref count of vdev, if ref count is 1, it initiates the + * VDEV deletion + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_vdev_release_ref_debug(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line); + +#define wlan_objmgr_vdev_release_ref(vdev, dbgid)\ + wlan_objmgr_vdev_release_ref_debug(vdev, dbgid, \ + __func__, __LINE__) +#else +void wlan_objmgr_vdev_release_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); +#endif + +/** + * wlan_vdev_get_next_active_vdev_of_pdev() - get next active vdev + * @pdev: PDEV object + * @vdev_list: qdf_list_t + * @vdev: VDEV object + * @dbg_id: id of the caller + * + * API to get next active vdev object pointer of vdev + * + * Return: + * @vdev_next: VDEV object + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_vdev_get_next_active_vdev_of_pdev_debug( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_vdev_get_next_active_vdev_of_pdev(pdev, vdev_list, vdev, dbgid) \ + wlan_vdev_get_next_active_vdev_of_pdev_debug(pdev, vdev_list, \ + vdev, dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_vdev_get_next_active_vdev_of_pdev( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_pdev_peek_active_first_vdev() - get first active vdev from pdev list + * @pdev: PDEV object + * @dbg_id: id of the caller + * + * API to get the head active vdev of given pdev (of pdev's vdev list) + * + * Return: + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_pdev_peek_active_first_vdev_debug( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_pdev_peek_active_first_vdev(pdev, dbgid) \ + wlan_pdev_peek_active_first_vdev_debug(pdev, dbgid, \ + __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_pdev_peek_active_first_vdev( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_pdev_vdev_list_peek_active_head() - get first active vdev from pdev list + * @vdev: VDEV object + * @vdev_list: qdf_list_t + * @dbg_id: id of the caller + * + * API to get the head active vdev of given vdev (of pdev's vdev list) + * + * Return: + * @peer: head peer + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_active_head_debug( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line); + +#define wlan_pdev_vdev_list_peek_active_head(pdev, vdev_list, dbgid) \ + wlan_pdev_vdev_list_peek_active_head_debug(pdev, vdev_list, \ + dbgid, __func__, __LINE__) +#else +struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_active_head( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + wlan_objmgr_ref_dbgid dbg_id); +#endif + +/** + * wlan_objmgr_vdev_peer_freed_notify() - Notifies modules about peer freed + * @vdev: VDEV object + * + * API to invokes registered callbacks to notify about peer freed + * + * Return: void + */ +void wlan_objmgr_vdev_peer_freed_notify(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_set_max_peer_count() - set max peer count + * @vdev: VDEV object + * @count: Max peer count + * + * API to set max peer count of VDEV + * + * Return: void + */ +static inline void wlan_vdev_set_max_peer_count(struct wlan_objmgr_vdev *vdev, + uint16_t count) +{ + vdev->vdev_objmgr.max_peer_count = count; +} + +/** + * wlan_vdev_get_max_peer_count() - get max peer count + * @vdev: VDEV object + * + * API to get max peer count of VDEV + * + * Return: max peer count + */ +static inline uint16_t wlan_vdev_get_max_peer_count( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.max_peer_count; +} + +/** + * wlan_print_vdev_info() - print vdev members + * @vdev: vdev object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_DEBUG +void wlan_print_vdev_info(struct wlan_objmgr_vdev *vdev); +#else +static inline void wlan_print_vdev_info(struct wlan_objmgr_vdev *vdev) {} +#endif + +/** + * wlan_objmgr_vdev_trace_init_lock() - Initialize trace lock + * @vdev: vdev object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_TRACE +static inline void +wlan_objmgr_vdev_trace_init_lock(struct wlan_objmgr_vdev *vdev) +{ + wlan_objmgr_trace_init_lock(&vdev->vdev_objmgr.trace); +} +#else +static inline void +wlan_objmgr_vdev_trace_init_lock(struct wlan_objmgr_vdev *vdev) +{ +} +#endif + +/** + * wlan_objmgr_vdev_trace_deinit_lock() - Deinitialize trace lock + * @vdev: vdev object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_TRACE +static inline void +wlan_objmgr_vdev_trace_deinit_lock(struct wlan_objmgr_vdev *vdev) +{ + wlan_objmgr_trace_deinit_lock(&vdev->vdev_objmgr.trace); +} +#else +static inline void +wlan_objmgr_vdev_trace_deinit_lock(struct wlan_objmgr_vdev *vdev) +{ +} +#endif + +/** + * wlan_objmgr_vdev_trace_del_ref_list() - Delete trace ref list + * @vdev: vdev object pointer + * + * Return: void + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static inline void +wlan_objmgr_vdev_trace_del_ref_list(struct wlan_objmgr_vdev *vdev) +{ + wlan_objmgr_trace_del_ref_list(&vdev->vdev_objmgr.trace); +} +#else +static inline void +wlan_objmgr_vdev_trace_del_ref_list(struct wlan_objmgr_vdev *vdev) +{ +} +#endif + +#endif /* _WLAN_OBJMGR_VDEV_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_debug.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..a6d9b242b374073b43cb1b0c9b883a9801d50e46 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_debug.c @@ -0,0 +1,754 @@ +/* + * + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: Public APIs to perform debug operations on object manager + */ + +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include +#include +#include + +/* + * Default TTL (of FW) for mgmt frames is 5 sec, by considering all the other + * delays, arrived with this value + */ +#define LOG_DEL_OBJ_TIMEOUT_VALUE_MSEC 8000 +#define LOG_DEL_OBJ_DESTROY_DURATION_SEC 8 +/* + * The max duration for which a obj can be allowed to remain in L-state + * The duration should be higher than the psoc idle timeout. + */ +#define LOG_DEL_OBJ_DESTROY_ASSERT_DURATION_SEC 32 +#define LOG_DEL_OBJ_LIST_MAX_COUNT (3 + 5 + 48 + 4096) + +union wlan_objmgr_del_obj { + struct wlan_objmgr_psoc *obj_psoc; + struct wlan_objmgr_pdev *obj_pdev; + struct wlan_objmgr_vdev *obj_vdev; + struct wlan_objmgr_peer *obj_peer; +}; + +/** + * struct log_del_obj - Logically deleted Object + * @obj: Represents peer/vdev/pdev/psoc + * @node: List node from Logically deleted list + * @obj_type: Object type for peer/vdev/pdev/psoc + * @tstamp: Timestamp when node entered logically + * deleted state + */ +struct log_del_obj { + union wlan_objmgr_del_obj obj; + qdf_list_node_t node; + enum wlan_objmgr_obj_type obj_type; + qdf_time_t tstamp; +}; + +/** + * struct wlan_objmgr_debug_info - Objmgr debug info + * for Logically deleted object + * @obj_timer: Timer object + * @obj_list: list object having linking logically + * deleted nodes + * @list_lock: lock to protect list + */ +struct wlan_objmgr_debug_info { + qdf_timer_t obj_timer; + qdf_list_t obj_list; + qdf_spinlock_t list_lock; +}; + +static const char * +wlan_obj_type_get_obj_name(enum wlan_objmgr_obj_type obj_type) +{ + static const struct wlan_obj_type_to_name { + enum wlan_objmgr_obj_type obj_type; + const char *name; + } obj_type_name[WLAN_OBJ_TYPE_MAX] = { + {WLAN_PSOC_OP, "psoc"}, + {WLAN_PDEV_OP, "pdev"}, + {WLAN_VDEV_OP, "vdev"}, + {WLAN_PEER_OP, "peer"} + }; + uint8_t idx; + + for (idx = 0; idx < WLAN_OBJ_TYPE_MAX; idx++) { + if (obj_type == obj_type_name[idx].obj_type) + return obj_type_name[idx].name; + } + + return NULL; +} + +static uint8_t* +wlan_objmgr_debug_get_macaddr(union wlan_objmgr_del_obj *obj, + enum wlan_objmgr_obj_type obj_type) +{ + switch (obj_type) { + case WLAN_PSOC_OP: + return wlan_psoc_get_hw_macaddr(obj->obj_psoc); + case WLAN_PDEV_OP: + return wlan_pdev_get_hw_macaddr(obj->obj_pdev); + case WLAN_VDEV_OP: + return wlan_vdev_mlme_get_macaddr(obj->obj_vdev); + case WLAN_PEER_OP: + return wlan_peer_get_macaddr(obj->obj_peer); + default: + obj_mgr_err("invalid obj_type"); + return NULL; + } +} + +static void +wlan_objmgr_insert_ld_obj_to_list(struct wlan_objmgr_debug_info *debug_info, + qdf_list_node_t *node) +{ + /* Insert object to list with lock being held*/ + qdf_spin_lock_bh(&debug_info->list_lock); + + /* Start timer only when list is empty */ + if (qdf_list_empty(&debug_info->obj_list)) + qdf_timer_start(&debug_info->obj_timer, + LOG_DEL_OBJ_TIMEOUT_VALUE_MSEC); + + qdf_list_insert_back(&debug_info->obj_list, node); + qdf_spin_unlock_bh(&debug_info->list_lock); +} + +static void wlan_obj_type_get_obj(union wlan_objmgr_del_obj *obj, + union wlan_objmgr_del_obj *del_obj, + enum wlan_objmgr_obj_type obj_type) +{ + switch (obj_type) { + case WLAN_PSOC_OP: + del_obj->obj_psoc = obj->obj_psoc; + return; + case WLAN_PDEV_OP: + del_obj->obj_pdev = obj->obj_pdev; + return; + case WLAN_VDEV_OP: + del_obj->obj_vdev = obj->obj_vdev; + return; + case WLAN_PEER_OP: + del_obj->obj_peer = obj->obj_peer; + return; + default: + obj_mgr_err("invalid obj_type"); + return; + } +} + +void wlan_objmgr_notify_log_delete(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ + struct wlan_objmgr_debug_info *debug_info; + const char *obj_name; + uint8_t *macaddr; + qdf_time_t tstamp; + struct log_del_obj *node; + union wlan_objmgr_del_obj *del_obj = (union wlan_objmgr_del_obj *)&obj; + + if (!obj) { + obj_mgr_err("object is null"); + return; + } + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is null"); + return; + } + + macaddr = wlan_objmgr_debug_get_macaddr(del_obj, obj_type); + if (!macaddr) { + obj_mgr_err("macaddr is null"); + return; + } + + obj_name = wlan_obj_type_get_obj_name(obj_type); + if (!obj_name) { + obj_mgr_err("obj_name is null"); + return; + } + + tstamp = qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000; + node = qdf_mem_malloc(sizeof(*node)); + if (!node) + return; + + wlan_obj_type_get_obj(del_obj, &node->obj, obj_type); + node->obj_type = obj_type; + node->tstamp = tstamp; + obj_mgr_debug("#%s : mac_addr: "QDF_MAC_ADDR_FMT" entered L-state", + obj_name, QDF_MAC_ADDR_REF(macaddr)); + wlan_objmgr_insert_ld_obj_to_list(debug_info, &node->node); +} + +static bool wlan_objmgr_del_obj_match(union wlan_objmgr_del_obj *obj, + union wlan_objmgr_del_obj *del_obj, + enum wlan_objmgr_obj_type obj_type) +{ + switch (obj_type) { + case WLAN_PSOC_OP: + if (del_obj->obj_psoc == obj->obj_psoc) + return true; + case WLAN_PDEV_OP: + if (del_obj->obj_pdev == obj->obj_pdev) + return true; + case WLAN_VDEV_OP: + if (del_obj->obj_vdev == obj->obj_vdev) + return true; + case WLAN_PEER_OP: + if (del_obj->obj_peer == obj->obj_peer) + return true; + default: + return false; + } +} + +static void +wlan_objmgr_rem_ld_obj_from_list(union wlan_objmgr_del_obj *obj, + struct wlan_objmgr_debug_info *debug_info, + enum wlan_objmgr_obj_type obj_type) +{ + qdf_list_node_t *node = NULL; + struct log_del_obj *obj_to_remove = NULL; + qdf_list_t *list; + QDF_STATUS status; + + list = &debug_info->obj_list; + qdf_spin_lock_bh(&debug_info->list_lock); + status = qdf_list_peek_front(list, &node); + + while (QDF_IS_STATUS_SUCCESS(status)) { + obj_to_remove = qdf_container_of(node, + struct log_del_obj, node); + if (wlan_objmgr_del_obj_match(obj, &obj_to_remove->obj, + obj_type) && + obj_to_remove->obj_type == obj_type) { + status = qdf_list_remove_node(list, + &obj_to_remove->node); + /* Stop timer if list is empty */ + if (QDF_IS_STATUS_SUCCESS(status)) { + if (qdf_list_empty(&debug_info->obj_list)) + qdf_timer_stop(&debug_info->obj_timer); + qdf_mem_free(obj_to_remove); + } + break; + } + status = qdf_list_peek_next(list, node, &node); + }; + qdf_spin_unlock_bh(&debug_info->list_lock); +} + +void wlan_objmgr_notify_destroy(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ + struct wlan_objmgr_debug_info *debug_info; + uint8_t *macaddr; + const char *obj_name; + union wlan_objmgr_del_obj *del_obj = (union wlan_objmgr_del_obj *)&obj; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is null"); + return; + } + macaddr = wlan_objmgr_debug_get_macaddr(del_obj, obj_type); + if (!macaddr) { + obj_mgr_err("macaddr is null"); + return; + } + obj_name = wlan_obj_type_get_obj_name(obj_type); + if (!obj_name) { + obj_mgr_err("obj_name is null"); + return; + } + obj_mgr_debug("#%s : macaddr: "QDF_MAC_ADDR_FMT" exited L-state", + obj_name, QDF_MAC_ADDR_REF(macaddr)); + + wlan_objmgr_rem_ld_obj_from_list(del_obj, + debug_info, obj_type); +} + +/** + * wlan_objmgr_debug_obj_destroyed_panic() - Panic in case obj is in L-state + * for long + * @obj_name: The name of the module ID + * + * This will invoke panic in the case that the obj is in logically destroyed + * state for a long time. The panic is invoked only in case feature flag + * WLAN_OBJMGR_PANIC_ON_BUG is enabled + * + * Return: None + */ +#ifdef CONFIG_LEAK_DETECTION +static inline void wlan_objmgr_debug_obj_destroyed_panic(const char *obj_name) +{ + obj_mgr_alert("#%s in L-state for too long!", obj_name); + QDF_BUG(0); +} +#else +static inline void wlan_objmgr_debug_obj_destroyed_panic(const char *obj_name) +{ +} +#endif + +/* + * wlan_objmgr_print_pending_refs() - Print pending refs according to the obj + * @obj: Represents peer/vdev/pdev/psoc + * @obj_type: Object type for peer/vdev/pdev/psoc + * + * Return: None + */ +static void wlan_objmgr_print_pending_refs(union wlan_objmgr_del_obj *obj, + enum wlan_objmgr_obj_type obj_type) +{ + switch (obj_type) { + case WLAN_PSOC_OP: + wlan_objmgr_print_ref_ids(obj->obj_psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + case WLAN_PDEV_OP: + wlan_objmgr_print_ref_ids(obj->obj_pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + case WLAN_VDEV_OP: + wlan_objmgr_print_ref_ids(obj->obj_vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + case WLAN_PEER_OP: + wlan_objmgr_print_ref_ids(obj->obj_peer->peer_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + default: + obj_mgr_debug("invalid obj_type"); + } +} + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static void +wlan_objmgr_print_ref_func_line(struct wlan_objmgr_trace_func *func_head, + uint32_t id) +{ + uint32_t ref_cnt; + struct wlan_objmgr_line_ref_node *tmp_ln_node; + + obj_mgr_debug("ID: %s", string_from_dbgid(id)); + while (func_head) { + obj_mgr_debug("Func: %s", func_head->func); + tmp_ln_node = func_head->line_head; + while (tmp_ln_node) { + ref_cnt = qdf_atomic_read(&tmp_ln_node->line_ref.cnt); + obj_mgr_debug("line: %d cnt: %d", + tmp_ln_node->line_ref.line, + ref_cnt); + tmp_ln_node = tmp_ln_node->next; + } + func_head = func_head->next; + } +} + +static void +wlan_objmgr_trace_print_ref(union wlan_objmgr_del_obj *obj, + enum wlan_objmgr_obj_type obj_type) +{ + uint32_t id; + struct wlan_objmgr_trace_func *func_head; + struct wlan_objmgr_trace *trace; + struct wlan_objmgr_vdev_objmgr *vdev_obj; + struct wlan_objmgr_peer_objmgr *peer_obj; + + switch (obj_type) { + case WLAN_VDEV_OP: + vdev_obj = &obj->obj_vdev->vdev_objmgr; + trace = &vdev_obj->trace; + for (id = 0; id < WLAN_REF_ID_MAX; id++) { + if (qdf_atomic_read(&vdev_obj->ref_id_dbg[id])) { + obj_mgr_debug("Reference:"); + + func_head = trace->references[id].head; + wlan_objmgr_print_ref_func_line(func_head, id); + + obj_mgr_debug("Dereference:"); + func_head = trace->dereferences[id].head; + wlan_objmgr_print_ref_func_line(func_head, id); + } + } + break; + case WLAN_PEER_OP: + peer_obj = &obj->obj_peer->peer_objmgr; + trace = &peer_obj->trace; + for (id = 0; id < WLAN_REF_ID_MAX; id++) { + if (qdf_atomic_read(&vdev_obj->ref_id_dbg[id])) { + obj_mgr_debug("Reference:"); + + func_head = trace->references[id].head; + wlan_objmgr_print_ref_func_line(func_head, id); + + obj_mgr_debug("Dereference:"); + func_head = trace->dereferences[id].head; + wlan_objmgr_print_ref_func_line(func_head, id); + } + } + break; + default: + break; + } +} +#else +static void +wlan_objmgr_trace_print_ref(union wlan_objmgr_del_obj *obj, + enum wlan_objmgr_obj_type obj_type) +{ +} +#endif + +/* timeout handler for iterating logically deleted object */ + +static void wlan_objmgr_iterate_log_del_obj_handler(void *timer_arg) +{ + enum wlan_objmgr_obj_type obj_type; + uint8_t *macaddr; + const char *obj_name; + struct wlan_objmgr_debug_info *debug_info; + qdf_list_node_t *node; + qdf_list_t *log_del_obj_list = NULL; + struct log_del_obj *del_obj = NULL; + qdf_time_t cur_tstamp; + QDF_STATUS status; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is not initialized"); + return; + } + + log_del_obj_list = &debug_info->obj_list; + qdf_spin_lock_bh(&debug_info->list_lock); + + status = qdf_list_peek_front(log_del_obj_list, &node); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_spin_unlock_bh(&debug_info->list_lock); + return; + } + + /* compute the current timestamp in seconds + * need to compare with destroy duration of object + */ + cur_tstamp = (qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000); + + do { + del_obj = qdf_container_of(node, struct log_del_obj, node); + obj_type = del_obj->obj_type; + macaddr = wlan_objmgr_debug_get_macaddr(&del_obj->obj, + obj_type); + obj_name = wlan_obj_type_get_obj_name(obj_type); + + /* If object is in logically deleted state for time more than + * destroy duration, print the object type and MAC + */ + if (cur_tstamp < (del_obj->tstamp + + LOG_DEL_OBJ_DESTROY_DURATION_SEC)) { + break; + } + if (!macaddr) { + obj_mgr_err("macaddr is null"); + QDF_BUG(0); + break; + } + if (!obj_name) { + obj_mgr_err("obj_name is null"); + QDF_BUG(0); + break; + } + + obj_mgr_alert("#%s in L-state,MAC: " QDF_MAC_ADDR_FMT, + obj_name, QDF_MAC_ADDR_REF(macaddr)); + wlan_objmgr_print_pending_refs(&del_obj->obj, obj_type); + + wlan_objmgr_trace_print_ref(&del_obj->obj, obj_type); + if (cur_tstamp > del_obj->tstamp + + LOG_DEL_OBJ_DESTROY_ASSERT_DURATION_SEC) { + if (!qdf_is_recovering() && !qdf_is_fw_down()) + wlan_objmgr_debug_obj_destroyed_panic(obj_name); + } + + status = qdf_list_peek_next(log_del_obj_list, node, &node); + + } while (QDF_IS_STATUS_SUCCESS(status)); + + qdf_timer_mod(&debug_info->obj_timer, LOG_DEL_OBJ_TIMEOUT_VALUE_MSEC); + qdf_spin_unlock_bh(&debug_info->list_lock); +} + +void wlan_objmgr_debug_info_deinit(void) +{ + struct log_del_obj *obj_to_remove; + struct wlan_objmgr_debug_info *debug_info; + qdf_list_node_t *node = NULL; + qdf_list_t *list; + bool is_child_alive = false; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is not initialized"); + return; + } + list = &debug_info->obj_list; + + qdf_spin_lock_bh(&debug_info->list_lock); + + /* Check if any child of global object is in L-state and remove it, + * ideally it shouldn't be + */ + while (qdf_list_remove_front(list, &node) == QDF_STATUS_SUCCESS) { + is_child_alive = true; + obj_to_remove = qdf_container_of(node, + struct log_del_obj, node); + if (qdf_list_empty(&debug_info->obj_list)) + qdf_timer_stop(&debug_info->obj_timer); + /* free the object */ + qdf_mem_free(obj_to_remove); + } + qdf_spin_unlock_bh(&debug_info->list_lock); + + if (is_child_alive) { + obj_mgr_alert("This shouldn't happen!!, No child of global" + "object should be in L-state, as global obj" + "is going to destroy"); + QDF_BUG(0); + } + + /* free timer, destroy spinlock, list and debug_info object as + * global object is going to free + */ + qdf_list_destroy(list); + qdf_timer_free(&debug_info->obj_timer); + qdf_spinlock_destroy(&debug_info->list_lock); + qdf_mem_free(debug_info); + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + g_umac_glb_obj->debug_info = NULL; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); +} + +void wlan_objmgr_debug_info_init(void) +{ + struct wlan_objmgr_debug_info *debug_info; + + debug_info = qdf_mem_malloc(sizeof(*debug_info)); + if (!debug_info) { + g_umac_glb_obj->debug_info = NULL; + return; + } + + /* Initialize timer with timeout handler */ + qdf_timer_init(NULL, &debug_info->obj_timer, + wlan_objmgr_iterate_log_del_obj_handler, + NULL, QDF_TIMER_TYPE_WAKE_APPS); + + /* Initialze the node_count to 0 and create list*/ + qdf_list_create(&debug_info->obj_list, + LOG_DEL_OBJ_LIST_MAX_COUNT); + + /* Initialize the spin_lock to protect list */ + qdf_spinlock_create(&debug_info->list_lock); + + /* attach debug_info object to global object */ + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + g_umac_glb_obj->debug_info = debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); +} + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void +wlan_objmgr_trace_init_lock(struct wlan_objmgr_trace *trace) +{ + qdf_spinlock_create(&trace->trace_lock); +} + +void +wlan_objmgr_trace_deinit_lock(struct wlan_objmgr_trace *trace) +{ + qdf_spinlock_destroy(&trace->trace_lock); +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static inline struct wlan_objmgr_line_ref_node* +wlan_objmgr_trace_line_node_alloc(int line) +{ + struct wlan_objmgr_line_ref_node *line_node; + + line_node = qdf_mem_malloc_atomic(sizeof(*line_node)); + if (!line_node) + return NULL; + + line_node->line_ref.line = line; + qdf_atomic_set(&line_node->line_ref.cnt, 1); + line_node->next = NULL; + + return line_node; +} + +static inline struct wlan_objmgr_trace_func* +wlan_objmgr_trace_ref_node_alloc(const char *func, int line) +{ + struct wlan_objmgr_trace_func *func_node; + struct wlan_objmgr_line_ref_node *line_node; + + func_node = qdf_mem_malloc_atomic(sizeof(*func_node)); + if (!func_node) + return NULL; + + line_node = wlan_objmgr_trace_line_node_alloc(line); + if (!line_node) { + qdf_mem_free(func_node); + return NULL; + } + + func_node->line_head = line_node; + qdf_str_lcopy(func_node->func, func, WLAN_OBJMGR_TRACE_FUNC_SIZE); + func_node->next = NULL; + + return func_node; +} + +static inline void +wlan_objmgr_trace_check_line(struct wlan_objmgr_trace_func *tmp_func_node, + struct wlan_objmgr_trace *trace, int line) +{ + struct wlan_objmgr_line_ref_node *line_node; + struct wlan_objmgr_line_ref_node *tmp_ln_node; + + tmp_ln_node = tmp_func_node->line_head; + while (tmp_ln_node) { + line_node = tmp_ln_node; + if (tmp_ln_node->line_ref.line == line) { + qdf_atomic_inc(&tmp_ln_node->line_ref.cnt); + break; + } + tmp_ln_node = tmp_ln_node->next; + } + if (!tmp_ln_node) { + tmp_ln_node = wlan_objmgr_trace_line_node_alloc(line); + if (tmp_ln_node) + line_node->next = tmp_ln_node; + } +} + +void +wlan_objmgr_trace_ref(struct wlan_objmgr_trace_func **func_head, + struct wlan_objmgr_trace *trace, + const char *func, int line) +{ + struct wlan_objmgr_trace_func *tmp_func_node; + struct wlan_objmgr_trace_func *func_node; + + qdf_spin_lock_bh(&trace->trace_lock); + if (!*func_head) { + tmp_func_node = wlan_objmgr_trace_ref_node_alloc(func, line); + if (tmp_func_node) + *func_head = tmp_func_node; + } else { + tmp_func_node = *func_head; + while (tmp_func_node) { + func_node = tmp_func_node; + if (!qdf_str_ncmp(tmp_func_node->func, func, + WLAN_OBJMGR_TRACE_FUNC_SIZE - 1)) { + wlan_objmgr_trace_check_line(tmp_func_node, + trace, line); + break; + } + tmp_func_node = tmp_func_node->next; + } + + if (!tmp_func_node) { + tmp_func_node = wlan_objmgr_trace_ref_node_alloc(func, + line); + if (tmp_func_node) + func_node->next = tmp_func_node; + } + } + qdf_spin_unlock_bh(&trace->trace_lock); +} + +void +wlan_objmgr_trace_del_line(struct wlan_objmgr_line_ref_node **line_head) +{ + struct wlan_objmgr_line_ref_node *del_tmp_node; + struct wlan_objmgr_line_ref_node *line_node; + + line_node = *line_head; + while (line_node) { + del_tmp_node = line_node; + line_node = line_node->next; + qdf_mem_free(del_tmp_node); + } + *line_head = NULL; +} + +void +wlan_objmgr_trace_del_ref_list(struct wlan_objmgr_trace *trace) +{ + struct wlan_objmgr_trace_func *func_node; + struct wlan_objmgr_trace_func *del_tmp_node; + uint32_t id; + + qdf_spin_lock_bh(&trace->trace_lock); + for (id = 0; id < WLAN_REF_ID_MAX; id++) { + func_node = trace->references[id].head; + while (func_node) { + del_tmp_node = func_node; + wlan_objmgr_trace_del_line(&del_tmp_node->line_head); + func_node = func_node->next; + qdf_mem_free(del_tmp_node); + } + trace->references[id].head = NULL; + } + for (id = 0; id < WLAN_REF_ID_MAX; id++) { + func_node = trace->dereferences[id].head; + while (func_node) { + del_tmp_node = func_node; + wlan_objmgr_trace_del_line(&del_tmp_node->line_head); + func_node = func_node->next; + qdf_mem_free(del_tmp_node); + } + trace->dereferences[id].head = NULL; + } + qdf_spin_unlock_bh(&trace->trace_lock); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..f307d213bba6238c33f510250917d6398d953c3a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj.c @@ -0,0 +1,860 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ + +#include "wlan_objmgr_global_obj_i.h" +#include +#include "wlan_objmgr_psoc_obj.h" +#include "qdf_mem.h" +#include + +/* Global object, it is declared globally */ +struct wlan_objmgr_global *g_umac_glb_obj; + +/* +** APIs to Create/Delete Global object APIs +*/ +QDF_STATUS wlan_objmgr_global_obj_init(void) +{ + struct wlan_objmgr_global *umac_global_obj; + + /* If it is already created, ignore */ + if (g_umac_glb_obj) { + obj_mgr_err("Global object is already created"); + return QDF_STATUS_E_FAILURE; + } + + /* Allocation of memory for Global object */ + umac_global_obj = (struct wlan_objmgr_global *)qdf_mem_malloc( + sizeof(*umac_global_obj)); + if (!umac_global_obj) + return QDF_STATUS_E_NOMEM; + + /* Store Global object pointer in Global variable */ + g_umac_glb_obj = umac_global_obj; + /* Initialize spinlock */ + qdf_spinlock_create(&g_umac_glb_obj->global_lock); + wlan_objmgr_debug_info_init(); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_global_obj_init); + +QDF_STATUS wlan_objmgr_global_obj_deinit(void) +{ + /* If it is already destroyed */ + if (!g_umac_glb_obj) { + obj_mgr_err("Global object is not allocated"); + return QDF_STATUS_E_FAILURE; + } + + wlan_objmgr_debug_info_deinit(); + + if (QDF_STATUS_SUCCESS == wlan_objmgr_global_obj_can_destroyed()) { + qdf_spinlock_destroy(&g_umac_glb_obj->global_lock); + qdf_mem_free(g_umac_glb_obj); + g_umac_glb_obj = NULL; + } else { + obj_mgr_err("PSOCs are leaked can't free global objmgr ctx"); + WLAN_OBJMGR_BUG(0); + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_global_obj_deinit); + +/** + ** APIs to register/unregister handlers + */ +QDF_STATUS wlan_objmgr_register_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->psoc_create_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->psoc_create_handler[id] = handler; + g_umac_glb_obj->psoc_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_psoc_create_handler); + +QDF_STATUS wlan_objmgr_unregister_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->psoc_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->psoc_create_handler[id] = NULL; + g_umac_glb_obj->psoc_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_psoc_create_handler); + +QDF_STATUS wlan_objmgr_register_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->psoc_destroy_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->psoc_destroy_handler[id] = handler; + g_umac_glb_obj->psoc_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_psoc_destroy_handler); + +QDF_STATUS wlan_objmgr_unregister_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->psoc_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->psoc_destroy_handler[id] = NULL; + g_umac_glb_obj->psoc_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_psoc_destroy_handler); + +QDF_STATUS wlan_objmgr_register_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->psoc_status_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->psoc_status_handler[id] = handler; + g_umac_glb_obj->psoc_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->psoc_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->psoc_status_handler[id] = NULL; + g_umac_glb_obj->psoc_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_register_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->pdev_create_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->pdev_create_handler[id] = handler; + g_umac_glb_obj->pdev_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_pdev_create_handler); + +QDF_STATUS wlan_objmgr_unregister_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->pdev_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->pdev_create_handler[id] = NULL; + g_umac_glb_obj->pdev_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_pdev_create_handler); + +QDF_STATUS wlan_objmgr_register_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->pdev_destroy_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->pdev_destroy_handler[id] = handler; + g_umac_glb_obj->pdev_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_pdev_destroy_handler); + +QDF_STATUS wlan_objmgr_unregister_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->pdev_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->pdev_destroy_handler[id] = NULL; + g_umac_glb_obj->pdev_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_pdev_destroy_handler); + +QDF_STATUS wlan_objmgr_register_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->pdev_status_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->pdev_status_handler[id] = handler; + g_umac_glb_obj->pdev_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->pdev_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->pdev_status_handler[id] = NULL; + g_umac_glb_obj->pdev_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_register_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_create_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->vdev_create_handler[id] = handler; + g_umac_glb_obj->vdev_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->vdev_create_handler[id] = NULL; + g_umac_glb_obj->vdev_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_destroy_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->vdev_destroy_handler[id] = handler; + g_umac_glb_obj->vdev_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->vdev_destroy_handler[id] = NULL; + g_umac_glb_obj->vdev_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_status_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->vdev_status_handler[id] = handler; + g_umac_glb_obj->vdev_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->vdev_status_handler[id] = NULL; + g_umac_glb_obj->vdev_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_vdev_peer_free_notify_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_peer_free_notify_handler handler) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + WLAN_OBJMGR_BUG(0); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_peer_free_notify_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler in Global object table */ + g_umac_glb_obj->vdev_peer_free_notify_handler[id] = handler; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_peer_free_notify_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_peer_free_notify_handler handler) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + WLAN_OBJMGR_BUG(0); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_peer_free_notify_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers to NULL */ + g_umac_glb_obj->vdev_peer_free_notify_handler[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->peer_create_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->peer_create_handler[id] = handler; + g_umac_glb_obj->peer_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_unregister_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->peer_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->peer_create_handler[id] = NULL; + g_umac_glb_obj->peer_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->peer_destroy_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->peer_destroy_handler[id] = handler; + g_umac_glb_obj->peer_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->peer_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->peer_destroy_handler[id] = NULL; + g_umac_glb_obj->peer_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->peer_status_handler[id]) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->peer_status_handler[id] = handler; + g_umac_glb_obj->peer_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->peer_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->peer_status_handler[id] = NULL; + g_umac_glb_obj->peer_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_psoc_object_attach(struct wlan_objmgr_psoc *psoc) +{ + uint8_t index = 0; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* Find free slot in PSOC table, store the PSOC */ + while (index < WLAN_OBJMGR_MAX_DEVICES) { + if (!g_umac_glb_obj->psoc[index]) { + /* Found free slot, store psoc */ + g_umac_glb_obj->psoc[index] = psoc; + psoc->soc_objmgr.psoc_id = index; + status = QDF_STATUS_SUCCESS; + break; + } + index++; + } + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return status; +} + +QDF_STATUS wlan_objmgr_psoc_object_detach(struct wlan_objmgr_psoc *psoc) +{ + uint8_t psoc_id; + + psoc_id = psoc->soc_objmgr.psoc_id; + QDF_BUG(psoc_id < WLAN_OBJMGR_MAX_DEVICES); + if (psoc_id >= WLAN_OBJMGR_MAX_DEVICES) + return QDF_STATUS_E_INVAL; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + g_umac_glb_obj->psoc[psoc_id] = NULL; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_global_obj_can_destroyed(void) +{ + uint8_t index = 0; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* Check whether all PSOCs are freed */ + while (index < WLAN_OBJMGR_MAX_DEVICES) { + if (g_umac_glb_obj->psoc[index]) { + status = QDF_STATUS_E_FAILURE; + break; + } + index++; + } + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return status; +} +qdf_export_symbol(wlan_objmgr_global_obj_can_destroyed); + +void wlan_objmgr_print_ref_ids(qdf_atomic_t *id, + QDF_TRACE_LEVEL log_level) +{ + uint32_t i; + uint32_t pending_ref; + + obj_mgr_log_level(log_level, "Pending references of object"); + for (i = 0; i < WLAN_REF_ID_MAX; i++) { + pending_ref = qdf_atomic_read(&id[i]); + if (pending_ref) + obj_mgr_log_level(log_level, "%s -- %d", + string_from_dbgid(i), pending_ref); + } + + return; +} + +QDF_STATUS wlan_objmgr_iterate_psoc_list( + wlan_objmgr_psoc_handler handler, + void *arg, wlan_objmgr_ref_dbgid dbg_id) +{ + uint8_t index = 0; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + + while (index < WLAN_OBJMGR_MAX_DEVICES) { + if (g_umac_glb_obj->psoc[index]) { + handler((void *)g_umac_glb_obj->psoc[index], + arg, index); + } + index++; + } + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_objmgr_iterate_psoc_list); + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..b9e24673a6878a757c0eb4f836e1bf38a4ce072b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj_i.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the global data structure of UMAC + */ +#ifndef _WLAN_OBJMGR_GLOBAL_OBJ_I_H_ +#define _WLAN_OBJMGR_GLOBAL_OBJ_I_H_ + +#include "wlan_objmgr_cmn.h" + +struct wlan_objmgr_debug_info; +/** + * struct wlan_objmgr_global - Global object definition + * @psoc[]: Array of PSOCs to maintain PSOC's list, + * its optional + * @psoc_create_handler[]: PSOC create handler array + * @psoc_create_handler_arg[]: PSOC create handler args array + * @psoc_destroy_handler[]: PSOC destroy handler array + * @psoc_destroy_handler_arg[]: PSOC destroy handler args array + * @psoc_status_handler[]: PSOC status handler array + * @psoc_status_handler_arg[]: PSOC status handler args array + * @pdev_create_handler[]: PDEV create handler array + * @pdev_create_handler_arg[]: PDEV create handler args array + * @pdev_destroy_handler[]: PDEV destroy handler array + * @pdev_destroy_handler_arg[]: PDEV destroy handler args array + * @pdev_status_handler[]: PDEV status handler array + * @pdev_status_handler_arg[]: PDEV status handler args array + * @vdev_create_handler[]: VDEV create handler array + * @vdev_create_handler_arg[]: VDEV create handler args array + * @vdev_destroy_handler[]: VDEV destroy handler array + * @vdev_destroy_handler_arg[]: VDEV destroy handler args array + * @vdev_peer_free_notify_handler[]: VDEV peer free notify handler array + * @vdev_status_handler[]: VDEV status handler array + * @vdev_status_handler_arg[]: VDEV status handler args array + * @peer_create_handler[]: PEER create handler array + * @peer_create_handler_arg[]: PEER create handler args array + * @peer_destroy_handler[]: PEER destroy handler array + * @peer_destroy_handler_arg[]: PEER destroy handler args array + * @peer_status_handler[]: PEER status handler array + * @peer_status_handler_arg[]: PEER status handler args array + * @debug_info: Objmgr debug information + * @global_lock: Global lock + */ +struct wlan_objmgr_global { + struct wlan_objmgr_psoc *psoc[WLAN_OBJMGR_MAX_DEVICES]; + wlan_objmgr_psoc_create_handler + psoc_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *psoc_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_psoc_destroy_handler + psoc_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *psoc_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_psoc_status_handler + psoc_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *psoc_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_pdev_create_handler + pdev_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *pdev_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_pdev_destroy_handler + pdev_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *pdev_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_pdev_status_handler + pdev_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *pdev_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_create_handler + vdev_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *vdev_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_destroy_handler + vdev_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *vdev_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_peer_free_notify_handler + vdev_peer_free_notify_handler[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_status_handler + vdev_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *vdev_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_peer_create_handler + peer_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *peer_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_peer_destroy_handler + peer_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *peer_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_peer_status_handler + peer_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *peer_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + struct wlan_objmgr_debug_info *debug_info; + qdf_spinlock_t global_lock; +}; + +#define MAX_SLEEP_ITERATION 5 + +extern struct wlan_objmgr_global *g_umac_glb_obj; + +/** + * wlan_objmgr_psoc_object_attach() - attach psoc to global object + * @psoc - PSOC object + * + * attaches PSOC to global psoc list + * + * Return: SUCCESS + * Failure (Max supported PSOCs exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_object_attach( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_object_detach() - detach psoc from global object + * @psoc - PSOC object + * + * detaches PSOC from global psoc list + * + * Return: SUCCESS + * Failure (if list is empty and PSOC is not present) + */ +QDF_STATUS wlan_objmgr_psoc_object_detach( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_print_ref_ids() - Print ref counts of modules + * @id - array of ref debug + * @log_level - log level + * + * Itertes through array, and prints the ref count debug + * + * Return: nothing + */ +void wlan_objmgr_print_ref_ids(qdf_atomic_t *id, + QDF_TRACE_LEVEL log_level); +#endif /* _WLAN_OBJMGR_GLOBAL_OBJ_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..06311d9c1127f5ac6d3c62c19bf57d15312a309e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj.c @@ -0,0 +1,1160 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" + + +/** + ** APIs to Create/Delete Global object APIs + */ +static QDF_STATUS wlan_objmgr_pdev_object_status( + struct wlan_objmgr_pdev *pdev) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_pdev_obj_lock(pdev); + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (pdev->obj_status[id] == QDF_STATUS_COMP_DISABLED) { + continue; + /* If component operates in Async, status is Partially created, + break */ + } else if (pdev->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (!pdev->pdev_comp_priv_obj[id]) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* If component failed to allocate its object, treat it as + failure, complete object need to be cleaned up */ + } else if ((pdev->obj_status[id] == QDF_STATUS_E_NOMEM) || + (pdev->obj_status[id] == QDF_STATUS_E_FAILURE)) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_pdev_obj_unlock(pdev); + return status; +} + +static QDF_STATUS wlan_objmgr_pdev_obj_free(struct wlan_objmgr_pdev *pdev) +{ + + uint8_t pdev_id; + + if (!pdev) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + /* Detach PDEV from PSOC PDEV's list */ + if (wlan_objmgr_psoc_pdev_detach(pdev->pdev_objmgr.wlan_psoc, pdev) == + QDF_STATUS_E_FAILURE) { + obj_mgr_err("PSOC PDEV detach failed: pdev-id: %d", pdev_id); + return QDF_STATUS_E_FAILURE; + } + qdf_spinlock_destroy(&pdev->pdev_lock); + qdf_mem_free(pdev); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_pdev *wlan_objmgr_pdev_obj_create( + struct wlan_objmgr_psoc *psoc, + struct pdev_osif_priv *osdev_priv) +{ + struct wlan_objmgr_pdev *pdev; + uint8_t id; + wlan_objmgr_pdev_create_handler handler; + wlan_objmgr_pdev_status_handler s_handler; + void *arg; + QDF_STATUS obj_status; + + if (!psoc) { + obj_mgr_err("psoc is NULL"); + return NULL; + } + /* Allocate PDEV object's memory */ + pdev = qdf_mem_malloc(sizeof(*pdev)); + if (!pdev) + return NULL; + + pdev->obj_state = WLAN_OBJ_STATE_ALLOCATED; + /* Initialize PDEV spinlock */ + qdf_spinlock_create(&pdev->pdev_lock); + /* Attach PDEV with PSOC */ + if (wlan_objmgr_psoc_pdev_attach(psoc, pdev) + != QDF_STATUS_SUCCESS) { + obj_mgr_err("pdev psoc attach failed"); + qdf_spinlock_destroy(&pdev->pdev_lock); + qdf_mem_free(pdev); + return NULL; + } + /* Save PSOC object pointer in PDEV */ + wlan_pdev_set_psoc(pdev, psoc); + /* Initialize PDEV's VDEV list, assign default values */ + qdf_list_create(&pdev->pdev_objmgr.wlan_vdev_list, + WLAN_UMAC_PDEV_MAX_VDEVS); + pdev->pdev_objmgr.wlan_vdev_count = 0; + pdev->pdev_objmgr.max_vdev_count = WLAN_UMAC_PDEV_MAX_VDEVS; + pdev->pdev_objmgr.wlan_peer_count = 0; + pdev->pdev_objmgr.temp_peer_count = 0; + pdev->pdev_objmgr.max_peer_count = wlan_psoc_get_max_peer_count(psoc); + /* Save HDD/OSIF pointer */ + pdev->pdev_nif.pdev_ospriv = osdev_priv; + qdf_atomic_init(&pdev->pdev_objmgr.ref_cnt); + pdev->pdev_objmgr.print_cnt = 0; + wlan_objmgr_pdev_get_ref(pdev, WLAN_OBJMGR_ID); + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->pdev_create_handler[id]; + arg = g_umac_glb_obj->pdev_create_handler_arg[id]; + if (handler) + pdev->obj_status[id] = handler(pdev, arg); + else + pdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + + if (obj_status == QDF_STATUS_SUCCESS) { + /* Object status is SUCCESS, Object is created */ + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Invoke component registered status handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + s_handler = g_umac_glb_obj->pdev_status_handler[id]; + arg = g_umac_glb_obj->pdev_status_handler_arg[id]; + if (s_handler) { + s_handler(pdev, arg, + QDF_STATUS_SUCCESS); + } + } + /* Few components operates in Asynchrous communction, Object state + partially created */ + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + pdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + /* Component object failed to be created, clean up the object */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Clean up the psoc */ + obj_mgr_err("PDEV component objects allocation failed"); + wlan_objmgr_pdev_obj_delete(pdev); + return NULL; + } + + obj_mgr_debug("Created pdev %d", pdev->pdev_objmgr.wlan_pdev_id); + + return pdev; +} +qdf_export_symbol(wlan_objmgr_pdev_obj_create); + +static QDF_STATUS wlan_objmgr_pdev_obj_destroy(struct wlan_objmgr_pdev *pdev) +{ + uint8_t id; + wlan_objmgr_pdev_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + uint8_t pdev_id; + + if (!pdev) { + obj_mgr_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(pdev, WLAN_PDEV_OP); + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + wlan_print_pdev_info(pdev); + obj_mgr_debug("Physically deleting pdev %d", pdev_id); + + if (pdev->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("PDEV object delete is not invoked pdevid:%d objstate:%d", + pdev_id, pdev->obj_state); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered destroy handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->pdev_destroy_handler[id]; + arg = g_umac_glb_obj->pdev_destroy_handler_arg[id]; + if (handler && + (pdev->obj_status[id] == QDF_STATUS_SUCCESS || + pdev->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + pdev->obj_status[id] = handler(pdev, arg); + else + pdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + + if (obj_status == QDF_STATUS_E_FAILURE) { + obj_mgr_err("PDEV component objects destroy failed: pdev-id:%d", + pdev_id); + /* Ideally should not happen */ + /* This leads to memleak ??? how to handle */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + /* Deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + pdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free PDEV object */ + return wlan_objmgr_pdev_obj_free(pdev); +} + +QDF_STATUS wlan_objmgr_pdev_obj_delete(struct wlan_objmgr_pdev *pdev) +{ + uint8_t print_idx; + + if (!pdev) { + obj_mgr_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + obj_mgr_debug("Logically deleting pdev %d", + pdev->pdev_objmgr.wlan_pdev_id); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /* + * Update PDEV object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_pdev_obj_lock(pdev); + pdev->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_pdev_obj_unlock(pdev); + wlan_objmgr_notify_log_delete(pdev, WLAN_PDEV_OP); + wlan_objmgr_pdev_release_ref(pdev, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_obj_delete); + +/** + ** APIs to attach/detach component objects + */ +QDF_STATUS wlan_objmgr_pdev_component_obj_attach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + uint8_t i; + wlan_objmgr_pdev_status_handler s_hlr; + void *a; + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("component-id %d is not supported", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + wlan_pdev_obj_lock(pdev); + /* If there is a valid entry, return failure */ + if (pdev->pdev_comp_priv_obj[id]) { + obj_mgr_err("component-%d already have valid pointer", id); + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* Save component's pointer and status */ + pdev->pdev_comp_priv_obj[id] = comp_priv_obj; + pdev->obj_status[id] = status; + + wlan_pdev_obj_unlock(pdev); + + if (pdev->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + /** + * If PDEV object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + /* Derive status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* update state as CREATION failed, caller has to delete the + PDEV object */ + else if (obj_status == QDF_STATUS_E_FAILURE) + pdev->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + /* nofity object status */ + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + s_hlr = g_umac_glb_obj->pdev_status_handler[i]; + a = g_umac_glb_obj->pdev_status_handler_arg[i]; + if (s_hlr) + s_hlr(pdev, a, obj_status); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_component_obj_attach); + +QDF_STATUS wlan_objmgr_pdev_component_obj_detach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_pdev_obj_lock(pdev); + /* If there is a invalid entry, return failure */ + if (pdev->pdev_comp_priv_obj[id] != comp_priv_obj) { + pdev->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* Reset pointers to NULL, update the status*/ + pdev->pdev_comp_priv_obj[id] = NULL; + pdev->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_pdev_obj_unlock(pdev); + + /* If PDEV object status is partially destroyed means, this API is + invoked with differnt context, this block should be executed for async + components only */ + if ((pdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (pdev->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + if (obj_status == QDF_STATUS_SUCCESS) { + /*Update the status as Deleted, if full object + deletion is in progress */ + if (pdev->obj_state == + WLAN_OBJ_STATE_PARTIALLY_DELETED) + pdev->obj_state = WLAN_OBJ_STATE_DELETED; + /* Move to creation state, since this component + deletion alone requested */ + if (pdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /*Update the status as Deletion failed, if full object + deletion is in progress */ + if (pdev->obj_state == + WLAN_OBJ_STATE_PARTIALLY_DELETED) + pdev->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + /* Move to creation state, since this component + deletion alone requested (do not block other + components)*/ + if (pdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + } + + /* Delete pdev object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (pdev->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free PDEV object */ + return wlan_objmgr_pdev_obj_free(pdev); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_component_obj_detach); + +/** + ** APIs to operations on pdev objects + */ +static void wlan_objmgr_pdev_vdev_iterate_peers(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_pdev_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *peer_list = NULL; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_objmgr_peer *peer_next = NULL; + + /* Iterating through vdev's peer list, so lock is + needed */ + /* Get peer list of the vdev */ + peer_list = &vdev->vdev_objmgr.wlan_peer_list; + if (peer_list) { + peer = wlan_vdev_peer_list_peek_active_head(vdev, peer_list, + dbg_id); + while (peer) { + /* Invoke the handler */ + handler(pdev, (void *)peer, arg); + /* Get next peer pointer, increments the ref count */ + peer_next = wlan_peer_get_next_active_peer_of_vdev(vdev, + peer_list, peer, dbg_id); + wlan_objmgr_peer_release_ref(peer, dbg_id); + peer = peer_next; + } + } +} + +QDF_STATUS wlan_objmgr_pdev_iterate_obj_list( + struct wlan_objmgr_pdev *pdev, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_pdev_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + struct wlan_objmgr_vdev *vdev_next = NULL; + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + + switch (obj_type) { + case WLAN_VDEV_OP: + /* Iterate through all VDEV object, and invoke handler for each + VDEV object */ + vdev = wlan_pdev_vdev_list_peek_active_head(pdev, vdev_list, + dbg_id); + while (vdev) { + handler(pdev, (void *)vdev, arg); + /* Get next vdev, it increments ref of next vdev */ + vdev_next = wlan_vdev_get_next_active_vdev_of_pdev( + pdev, vdev_list, vdev, dbg_id); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + vdev = vdev_next; + } + break; + case WLAN_PEER_OP: + vdev = wlan_pdev_vdev_list_peek_active_head(pdev, vdev_list, + dbg_id); + while (vdev) { + wlan_objmgr_pdev_vdev_iterate_peers(pdev, vdev, handler, + arg, lock_free_op, dbg_id); + /* Get next vdev, it increments ref of next vdev */ + vdev_next = wlan_vdev_get_next_active_vdev_of_pdev( + pdev, vdev_list, vdev, dbg_id); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + vdev = vdev_next; + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_iterate_obj_list); + +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_creation( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_pdev_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_pdev_obj_lock(pdev); + /* If component object is already created, delete old + component object, then invoke creation */ + if (pdev->pdev_comp_priv_obj[id]) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + wlan_pdev_obj_unlock(pdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->pdev_create_handler[id]; + arg = g_umac_glb_obj->pdev_create_handler_arg[id]; + if (handler) + pdev->obj_status[id] = handler(pdev, arg); + else + return QDF_STATUS_E_FAILURE; + /* If object status is created, then only handle this object status */ + if (pdev->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + /* Move PDEV object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + pdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + return obj_status; +} + +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_deletion( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_pdev_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_pdev_obj_lock(pdev); + /* Component object was never created, invalid operation */ + if (!pdev->pdev_comp_priv_obj[id]) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + wlan_pdev_obj_unlock(pdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->pdev_destroy_handler[id]; + arg = g_umac_glb_obj->pdev_destroy_handler_arg[id]; + if (handler) + pdev->obj_status[id] = handler(pdev, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (pdev->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_pdev_object_status(pdev); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + pdev->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + return obj_status; +} + +static void wlan_obj_pdev_vdevlist_add_tail(qdf_list_t *obj_list, + struct wlan_objmgr_vdev *obj) +{ + qdf_list_insert_back(obj_list, &obj->vdev_node); +} + +static QDF_STATUS wlan_obj_pdev_vdevlist_remove_vdev( + qdf_list_t *obj_list, + struct wlan_objmgr_vdev *vdev) +{ + qdf_list_node_t *vdev_node = NULL; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + /* get vdev list node element */ + vdev_node = &vdev->vdev_node; + /* list is empty, return failure */ + if (qdf_list_remove_node(obj_list, vdev_node) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_pdev_vdev_attach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + + wlan_pdev_obj_lock(pdev); + /* If Max vdev count exceeds, return failure */ + if (objmgr->wlan_vdev_count >= objmgr->max_vdev_count) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* Add vdev to pdev's vdev list */ + wlan_obj_pdev_vdevlist_add_tail(&objmgr->wlan_vdev_list, vdev); + /* Increment pdev ref count to make sure it won't be destroyed before */ + wlan_objmgr_pdev_get_ref(pdev, WLAN_OBJMGR_ID); + /* Increment vdev count of pdev */ + objmgr->wlan_vdev_count++; + wlan_pdev_obj_unlock(pdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_pdev_vdev_detach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + + wlan_pdev_obj_lock(pdev); + /* if vdev count is 0, return failure */ + if (objmgr->wlan_vdev_count == 0) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* remove vdev from pdev's vdev list */ + wlan_obj_pdev_vdevlist_remove_vdev(&objmgr->wlan_vdev_list, vdev); + /* decrement vdev count */ + objmgr->wlan_vdev_count--; + wlan_pdev_obj_unlock(pdev); + /* Decrement pdev ref count since vdev is releasing reference */ + wlan_objmgr_pdev_release_ref(pdev, WLAN_OBJMGR_ID); + return QDF_STATUS_SUCCESS; +} + +void *wlan_objmgr_pdev_get_comp_private_obj( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id) +{ + void *comp_priv_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (!pdev) { + QDF_BUG(0); + return NULL; + } + + comp_priv_obj = pdev->pdev_comp_priv_obj[id]; + + return comp_priv_obj; +} + +qdf_export_symbol(wlan_objmgr_pdev_get_comp_private_obj); + +void wlan_objmgr_pdev_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id) +{ + if (!pdev) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return; + } + qdf_atomic_inc(&pdev->pdev_objmgr.ref_cnt); + qdf_atomic_inc(&pdev->pdev_objmgr.ref_id_dbg[id]); +} + +qdf_export_symbol(wlan_objmgr_pdev_get_ref); + +QDF_STATUS wlan_objmgr_pdev_try_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t pdev_id; + + if (!pdev) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_pdev_obj_lock(pdev); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (pdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_pdev_obj_unlock(pdev); + if (pdev->pdev_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] pdev [%d] is not in Created(st:%d)", + id, pdev_id, pdev->obj_state); + return QDF_STATUS_E_RESOURCES; + } + + wlan_objmgr_pdev_get_ref(pdev, id); + wlan_pdev_obj_unlock(pdev); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_objmgr_pdev_try_get_ref); + +void wlan_objmgr_pdev_release_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t pdev_id; + + if (!pdev) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (!qdf_atomic_read(&pdev->pdev_objmgr.ref_id_dbg[id])) { + obj_mgr_err("pdev (id:%d)ref cnt was not taken by %d", + pdev_id, id); + wlan_objmgr_print_ref_ids(pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + return; + } + + if (!qdf_atomic_read(&pdev->pdev_objmgr.ref_cnt)) { + obj_mgr_err("pdev ref cnt is 0: pdev-id:%d", pdev_id); + WLAN_OBJMGR_BUG(0); + return; + } + + qdf_atomic_dec(&pdev->pdev_objmgr.ref_id_dbg[id]); + /* Decrement ref count, free pdev, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&pdev->pdev_objmgr.ref_cnt)) + wlan_objmgr_pdev_obj_destroy(pdev); +} + +qdf_export_symbol(wlan_objmgr_pdev_release_ref); + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_pdev_get_first_vdev_debug( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list = NULL; + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *node = NULL; + qdf_list_node_t *prev_node = NULL; + + wlan_pdev_obj_lock(pdev); + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + if (qdf_list_peek_front(vdev_list, &node) != QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return NULL; + } + + do { + vdev = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref_debug(vdev, + dbg_id, func, line) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + + prev_node = node; + } while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS); + + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +qdf_export_symbol(wlan_objmgr_pdev_get_first_vdev_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_pdev_get_first_vdev( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list = NULL; + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *node = NULL; + qdf_list_node_t *prev_node = NULL; + + wlan_pdev_obj_lock(pdev); + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + if (qdf_list_peek_front(vdev_list, &node) != QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return NULL; + } + + do { + vdev = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + + prev_node = node; + } while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS); + + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +qdf_export_symbol(wlan_objmgr_pdev_get_first_vdev); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_debug( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev id matches with + * entry of vdev list + */ + while (vdev) { + if (wlan_vdev_get_id(vdev) == vdev_id) { + if (wlan_objmgr_vdev_try_get_ref_debug(vdev, dbg_id, + func, line) != + QDF_STATUS_SUCCESS) + vdev = NULL; + + wlan_pdev_obj_unlock(pdev); + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + return NULL; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_pdev_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev id matches with + * entry of vdev list + */ + while (vdev) { + if (wlan_vdev_get_id(vdev) == vdev_id) { + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) + vdev = NULL; + + wlan_pdev_obj_unlock(pdev); + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + return NULL; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_pdev); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_no_state_debug( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev id matches with + * entry of vdev list + */ + while (vdev) { + if (wlan_vdev_get_id(vdev) == vdev_id) { + wlan_objmgr_vdev_get_ref_debug(vdev, dbg_id, + func, line); + wlan_pdev_obj_unlock(pdev); + + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_pdev_no_state_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev id matches with + * entry of vdev list + */ + while (vdev) { + if (wlan_vdev_get_id(vdev) == vdev_id) { + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + wlan_pdev_obj_unlock(pdev); + + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_pdev_no_state); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev_debug( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *fnc, int ln) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev macaddr matches with + * entry of vdev list + */ + while (vdev) { + if (QDF_IS_STATUS_SUCCESS( + WLAN_ADDR_EQ(wlan_vdev_mlme_get_macaddr(vdev), macaddr))) { + if (QDF_IS_STATUS_SUCCESS( + wlan_objmgr_vdev_try_get_ref_debug(vdev, dbg_id, + fnc, ln))) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev macaddr matches with + * entry of vdev list + */ + while (vdev) { + if (QDF_IS_STATUS_SUCCESS( + WLAN_ADDR_EQ(wlan_vdev_mlme_get_macaddr(vdev), macaddr))) { + if (QDF_IS_STATUS_SUCCESS( + wlan_objmgr_vdev_try_get_ref(vdev, dbg_id))) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev + *wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state_debug( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev macaddr matches with + * entry of vdev list + */ + while (vdev) { + if (WLAN_ADDR_EQ(wlan_vdev_mlme_get_macaddr(vdev), macaddr) + == QDF_STATUS_SUCCESS) { + wlan_objmgr_vdev_get_ref_debug(vdev, dbg_id, + func, line); + wlan_pdev_obj_unlock(pdev); + + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev macaddr matches with + * entry of vdev list + */ + while (vdev) { + if (WLAN_ADDR_EQ(wlan_vdev_mlme_get_macaddr(vdev), macaddr) + == QDF_STATUS_SUCCESS) { + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + wlan_pdev_obj_unlock(pdev); + + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_DEBUG +void wlan_print_pdev_info(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_pdev_objmgr *pdev_objmgr; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + qdf_list_t *vdev_list; + uint16_t index = 0; + + pdev_objmgr = &pdev->pdev_objmgr; + + obj_mgr_debug("pdev: %pK", pdev); + obj_mgr_debug("wlan_pdev_id: %d", pdev_objmgr->wlan_pdev_id); + obj_mgr_debug("wlan_vdev_count: %d", pdev_objmgr->wlan_vdev_count); + obj_mgr_debug("max_vdev_count: %d", pdev_objmgr->max_vdev_count); + obj_mgr_debug("wlan_peer_count: %d", pdev_objmgr->wlan_peer_count); + obj_mgr_debug("max_peer_count: %d", pdev_objmgr->max_peer_count); + obj_mgr_debug("temp_peer_count: %d", pdev_objmgr->temp_peer_count); + obj_mgr_debug("wlan_psoc: %pK", pdev_objmgr->wlan_psoc); + obj_mgr_debug("ref_cnt: %d", qdf_atomic_read(&pdev_objmgr->ref_cnt)); + + wlan_pdev_obj_lock(pdev); + vdev_list = &pdev_objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + + while (vdev) { + obj_mgr_debug("wlan_vdev_list[%d]: %pK", index, vdev); + wlan_print_vdev_info(vdev); + index++; + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); +} + +qdf_export_symbol(wlan_print_pdev_info); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..8c0bc8e09688327cb3e290b09df2094ef553df03 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj_i.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on PDEV object + */ +#ifndef _WLAN_OBJMGR_PDEV_OBJ_I_H_ +#define _WLAN_OBJMGR_PDEV_OBJ_I_H_ + +/** + * wlan_objmgr_pdev_vdev_attach() - attach vdev to pdev + * @pdev: PDEV object + * @vdev: VDEV object + * + * API to be used for adding the VDEV object in PDEV's VDEV object list + * + * Return: SUCCESS on successful storing of VDEV object + * FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_vdev_attach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_objmgr_pdev_vdev_detach() - detach vdev from pdev + * @pdev: PDEV object + * @vdev: VDEV object + * + * API to be used for removing the VDEV object from PDEV's VDEV object list + * + * Return: SUCCESS on successful removal of VDEV object + * FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_vdev_detach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev); + +#endif /* _WLAN_OBJMGR_PDEV_OBJ_I_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_peer_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_peer_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..c99f3ac372f32d97219c1ce649f9477a722a08ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_peer_obj.c @@ -0,0 +1,1282 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Peer object + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" +#include "wlan_objmgr_vdev_obj_i.h" + + +/** + ** APIs to Create/Delete Peer object APIs + */ +static QDF_STATUS wlan_objmgr_peer_object_status( + struct wlan_objmgr_peer *peer) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_peer_obj_lock(peer); + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (peer->obj_status[id] == QDF_STATUS_COMP_DISABLED) + continue; + /* If component operates in Async, status is Partially created, + break */ + else if (peer->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (!peer->peer_comp_priv_obj[id]) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* If component failed to allocate its object, treat it as + failure, complete object need to be cleaned up */ + } else if ((peer->obj_status[id] == QDF_STATUS_E_NOMEM) || + (peer->obj_status[id] == QDF_STATUS_E_FAILURE)) { + obj_mgr_err("Peer comp object(id:%d) alloc fail", id); + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_peer_obj_unlock(peer); + return status; +} + +static QDF_STATUS wlan_objmgr_peer_obj_free(struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + uint8_t *macaddr; + uint8_t vdev_id; + + if (!peer) { + obj_mgr_err("PEER is NULL"); + return QDF_STATUS_E_FAILURE; + } + + macaddr = wlan_peer_get_macaddr(peer); + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + obj_mgr_err( + "VDEV is NULL for peer("QDF_MAC_ADDR_FMT")", + QDF_MAC_ADDR_REF(macaddr)); + return QDF_STATUS_E_FAILURE; + } + + vdev_id = wlan_vdev_get_id(vdev); + + /* get PSOC from VDEV, if it is NULL, return */ + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + obj_mgr_err( + "PSOC is NULL for peer("QDF_MAC_ADDR_FMT")", + QDF_MAC_ADDR_REF(macaddr)); + return QDF_STATUS_E_FAILURE; + } + + /* Decrement ref count for BSS peer, so that BSS peer deletes last*/ + if ((wlan_peer_get_peer_type(peer) == WLAN_PEER_STA) || + (wlan_peer_get_peer_type(peer) == WLAN_PEER_STA_TEMP) || + (wlan_peer_get_peer_type(peer) == WLAN_PEER_P2P_CLI)) + wlan_objmgr_peer_release_ref(wlan_vdev_get_bsspeer(vdev), + WLAN_OBJMGR_ID); + + wlan_objmgr_vdev_get_ref(vdev, WLAN_OBJMGR_ID); + + /* Detach peer from VDEV's peer list */ + if (wlan_objmgr_vdev_peer_detach(vdev, peer) == QDF_STATUS_E_FAILURE) { + obj_mgr_err( + "Peer("QDF_MAC_ADDR_FMT") VDEV detach fail, vdev id: %d", + QDF_MAC_ADDR_REF(macaddr), vdev_id); + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + return QDF_STATUS_E_FAILURE; + } + /* Detach peer from PSOC's peer list */ + if (wlan_objmgr_psoc_peer_detach(psoc, peer) == QDF_STATUS_E_FAILURE) { + obj_mgr_err( + "Peer("QDF_MAC_ADDR_FMT") PSOC detach failure", + QDF_MAC_ADDR_REF(macaddr)); + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_peer_trace_del_ref_list(peer); + wlan_objmgr_peer_trace_deinit_lock(peer); + qdf_spinlock_destroy(&peer->peer_lock); + qdf_mem_free(peer); + + wlan_objmgr_vdev_peer_freed_notify(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; + +} + +#ifdef WLAN_OBJMGR_REF_ID_DEBUG +static void +wlan_objmgr_peer_init_ref_id_debug(struct wlan_objmgr_peer *peer) +{ + uint8_t id; + + for (id = 0; id < WLAN_REF_ID_MAX; id++) + qdf_atomic_init(&peer->peer_objmgr.ref_id_dbg[id]); +} +#else +static inline void +wlan_objmgr_peer_init_ref_id_debug(struct wlan_objmgr_peer *peer) {} +#endif + +struct wlan_objmgr_peer *wlan_objmgr_peer_obj_create( + struct wlan_objmgr_vdev *vdev, + enum wlan_peer_type type, + uint8_t *macaddr) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_psoc *psoc; + wlan_objmgr_peer_create_handler handler; + wlan_objmgr_peer_status_handler stat_handler; + void *arg; + QDF_STATUS obj_status; + uint8_t id; + + if (!vdev) { + obj_mgr_err( + "VDEV is NULL for peer ("QDF_MAC_ADDR_FMT")", + QDF_MAC_ADDR_REF(macaddr)); + return NULL; + } + /* Get psoc, if psoc is NULL, return */ + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + obj_mgr_err( + "PSOC is NULL for peer ("QDF_MAC_ADDR_FMT")", + QDF_MAC_ADDR_REF(macaddr)); + return NULL; + } + /* Allocate memory for peer object */ + peer = qdf_mem_malloc(sizeof(*peer)); + if (!peer) + return NULL; + + peer->obj_state = WLAN_OBJ_STATE_ALLOCATED; + qdf_atomic_init(&peer->peer_objmgr.ref_cnt); + wlan_objmgr_peer_init_ref_id_debug(peer); + wlan_objmgr_peer_get_ref(peer, WLAN_OBJMGR_ID); + /* set vdev to peer */ + wlan_peer_set_vdev(peer, vdev); + /* set peer type */ + wlan_peer_set_peer_type(peer, type); + /* set mac address of peer */ + wlan_peer_set_macaddr(peer, macaddr); + /* initialize peer state */ + wlan_peer_mlme_set_state(peer, WLAN_INIT_STATE); + wlan_peer_mlme_reset_seq_num(peer); + peer->peer_objmgr.print_cnt = 0; + + qdf_spinlock_create(&peer->peer_lock); + wlan_objmgr_peer_trace_init_lock(peer); + /* Attach peer to psoc, psoc maintains the node table for the device */ + if (wlan_objmgr_psoc_peer_attach(psoc, peer) != + QDF_STATUS_SUCCESS) { + obj_mgr_warn( + "Peer("QDF_MAC_ADDR_FMT") PSOC attach failure", + QDF_MAC_ADDR_REF(macaddr)); + qdf_spinlock_destroy(&peer->peer_lock); + wlan_objmgr_peer_trace_deinit_lock(peer); + qdf_mem_free(peer); + return NULL; + } + /* Attach peer to vdev peer table */ + if (wlan_objmgr_vdev_peer_attach(vdev, peer) != + QDF_STATUS_SUCCESS) { + obj_mgr_warn( + "Peer("QDF_MAC_ADDR_FMT") VDEV attach failure", + QDF_MAC_ADDR_REF(macaddr)); + /* if attach fails, detach from psoc table before free */ + wlan_objmgr_psoc_peer_detach(psoc, peer); + qdf_spinlock_destroy(&peer->peer_lock); + wlan_objmgr_peer_trace_deinit_lock(peer); + qdf_mem_free(peer); + return NULL; + } + wlan_peer_set_pdev_id(peer, wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev))); + /* Increment ref count for BSS peer, so that BSS peer deletes last*/ + if ((type == WLAN_PEER_STA) || (type == WLAN_PEER_STA_TEMP) + || (type == WLAN_PEER_P2P_CLI)) + wlan_objmgr_peer_get_ref(wlan_vdev_get_bsspeer(vdev), + WLAN_OBJMGR_ID); + /* TODO init other parameters */ + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->peer_create_handler[id]; + arg = g_umac_glb_obj->peer_create_handler_arg[id]; + if (handler) + peer->obj_status[id] = handler(peer, arg); + else + peer->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* derive the object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + /* If SUCCESS, Object is created */ + if (obj_status == QDF_STATUS_SUCCESS) { + peer->obj_state = WLAN_OBJ_STATE_CREATED; + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + stat_handler = g_umac_glb_obj->peer_status_handler[id]; + arg = g_umac_glb_obj->peer_status_handler_arg[id]; + if (stat_handler) + stat_handler(peer, arg, + QDF_STATUS_SUCCESS); + } + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + /* If any component operates in different context, update it + as partially created */ + peer->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Clean up the peer */ + obj_mgr_err( + "Peer("QDF_MAC_ADDR_FMT") comp object alloc fail", + QDF_MAC_ADDR_REF(macaddr)); + wlan_objmgr_peer_obj_delete(peer); + return NULL; + } + + obj_mgr_debug("Created peer " QDF_MAC_ADDR_FMT " type %d", + QDF_MAC_ADDR_REF(macaddr), type); + + return peer; +} + +static QDF_STATUS wlan_objmgr_peer_obj_destroy(struct wlan_objmgr_peer *peer) +{ + uint8_t id; + wlan_objmgr_peer_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + uint8_t *macaddr; + + if (!peer) { + obj_mgr_err("PEER is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(peer, WLAN_PEER_OP); + + macaddr = wlan_peer_get_macaddr(peer); + + obj_mgr_debug("Physically deleting peer " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(macaddr)); + + if (peer->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("PEER object del is not invoked obj_state:%d peer " + QDF_MAC_ADDR_FMT, peer->obj_state, + QDF_MAC_ADDR_REF(macaddr)); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered destroy handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->peer_destroy_handler[id]; + arg = g_umac_glb_obj->peer_destroy_handler_arg[id]; + if (handler && + (peer->obj_status[id] == QDF_STATUS_SUCCESS || + peer->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + peer->obj_status[id] = handler(peer, arg); + else + peer->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive the object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + if (obj_status == QDF_STATUS_E_FAILURE) { + /* If it status is failure, memory will not be freed */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + /* few components deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + peer->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free the peer object */ + return wlan_objmgr_peer_obj_free(peer); +} + +QDF_STATUS wlan_objmgr_peer_obj_delete(struct wlan_objmgr_peer *peer) +{ + uint8_t print_idx; + uint8_t *macaddr; + + if (!peer) { + obj_mgr_err("PEER is NULL"); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_lock(peer); + macaddr = wlan_peer_get_macaddr(peer); + wlan_peer_obj_unlock(peer); + + obj_mgr_debug("Logically deleting peer " QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(macaddr)); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_peer_ref_ids(peer, QDF_TRACE_LEVEL_DEBUG); + /** + * Update VDEV object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_peer_obj_lock(peer); + peer->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_peer_obj_unlock(peer); + wlan_objmgr_notify_log_delete(peer, WLAN_PEER_OP); + wlan_objmgr_peer_release_ref(peer, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_peer_obj_delete); +/** + ** APIs to attach/detach component objects + */ +QDF_STATUS wlan_objmgr_peer_component_obj_attach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + wlan_objmgr_peer_status_handler s_hler; + void *arg; + uint8_t i; + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* If there is a valid entry, return failure, + valid object needs to be freed first */ + if (peer->peer_comp_priv_obj[id]) { + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + /* Assign component object private pointer(can be NULL also), status */ + peer->peer_comp_priv_obj[id] = comp_priv_obj; + peer->obj_status[id] = status; + wlan_peer_obj_unlock(peer); + + if (peer->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + + /* If PEER object status is partially created means, this API is + invoked with differnt context. this block should be executed for async + components only */ + /* Derive status */ + obj_status = wlan_objmgr_peer_object_status(peer); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + peer->obj_state = WLAN_OBJ_STATE_CREATED; + /* update state as CREATION failed, caller has to delete the + PEER object */ + else if (obj_status == QDF_STATUS_E_FAILURE) + peer->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + /* nofity object status */ + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + s_hler = g_umac_glb_obj->peer_status_handler[i]; + arg = g_umac_glb_obj->peer_status_handler_arg[i]; + if (s_hler) + s_hler(peer, arg, obj_status); + } + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_peer_component_obj_detach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* If there is a invalid entry, return failure */ + if (peer->peer_comp_priv_obj[id] != comp_priv_obj) { + peer->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + /* Reset the pointer to NULL */ + peer->peer_comp_priv_obj[id] = NULL; + peer->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_peer_obj_unlock(peer); + + /* If PEER object status is partially destroyed means, this API is + invoked with differnt context, this block should be executed for async + components only */ + if ((peer->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (peer->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + if (obj_status == QDF_STATUS_SUCCESS) { + /*Update the status as Deleted, if full object + deletion is in progress */ + if (peer->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + peer->obj_state = WLAN_OBJ_STATE_DELETED; + /* Move to creation state, since this component + deletion alone requested */ + if (peer->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + peer->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /*Update the status as Deletion failed, if full object + deletion is in progress */ + if (peer->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + peer->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + /* Move to creation state, since this component + deletion alone requested (do not block other + components) */ + if (peer->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + peer->obj_state = WLAN_OBJ_STATE_CREATED; + } + + /* Delete peer object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (peer->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free the peer object */ + return wlan_objmgr_peer_obj_free(peer); + } + } + + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_creation( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_peer_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* If component object is already created, delete old + component object, then invoke creation */ + if (peer->peer_comp_priv_obj[id]) { + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + wlan_peer_obj_unlock(peer); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->peer_create_handler[id]; + arg = g_umac_glb_obj->peer_create_handler_arg[id]; + if (handler) + peer->obj_status[id] = handler(peer, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (peer->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + /* Move PDEV object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + peer->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + + return obj_status; +} + + +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_deletion( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_peer_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* Component object was never created, invalid operation */ + if (!peer->peer_comp_priv_obj[id]) { + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_unlock(peer); + + /* Invoke registered destroy handlers */ + handler = g_umac_glb_obj->peer_destroy_handler[id]; + arg = g_umac_glb_obj->peer_destroy_handler_arg[id]; + if (handler) + peer->obj_status[id] = handler(peer, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (peer->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_peer_object_status(peer); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + peer->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + return obj_status; +} + +void *wlan_objmgr_peer_get_comp_private_obj( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + void *comp_priv_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (!peer) { + QDF_BUG(0); + return NULL; + } + + comp_priv_obj = peer->peer_comp_priv_obj[id]; + return comp_priv_obj; +} +qdf_export_symbol(wlan_objmgr_peer_get_comp_private_obj); + +#ifdef WLAN_OBJMGR_REF_ID_DEBUG +static inline void +wlan_objmgr_peer_get_debug_id_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + qdf_atomic_inc(&peer->peer_objmgr.ref_id_dbg[id]); +} +#else +static inline void +wlan_objmgr_peer_get_debug_id_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) {} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_DEBUG +static QDF_STATUS +wlan_objmgr_peer_release_debug_id_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + if (!qdf_atomic_read(&peer->peer_objmgr.ref_id_dbg[id])) { + uint8_t *macaddr; + + macaddr = wlan_peer_get_macaddr(peer); + obj_mgr_err( + "peer("QDF_MAC_ADDR_FMT") ref was not taken by %d", + QDF_MAC_ADDR_REF(macaddr), id); + wlan_objmgr_print_ref_ids(peer->peer_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + qdf_atomic_dec(&peer->peer_objmgr.ref_id_dbg[id]); + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_objmgr_peer_release_debug_id_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static inline void +wlan_objmgr_peer_ref_trace(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + struct wlan_objmgr_trace *trace; + + trace = &peer->peer_objmgr.trace; + + if (func) + wlan_objmgr_trace_ref(&trace->references[id].head, + trace, func, line); +} + +static inline void +wlan_objmgr_peer_deref_trace(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + struct wlan_objmgr_trace *trace; + + trace = &peer->peer_objmgr.trace; + if (func) + wlan_objmgr_trace_ref(&trace->dereferences[id].head, + trace, func, line); +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_peer_get_ref_debug(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + if (!peer) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&peer->peer_objmgr.ref_cnt); + wlan_objmgr_peer_get_debug_id_ref(peer, id); + + wlan_objmgr_peer_ref_trace(peer, id, func, line); + return; +} + +qdf_export_symbol(wlan_objmgr_peer_get_ref_debug); +#else +void wlan_objmgr_peer_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + if (!peer) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&peer->peer_objmgr.ref_cnt); + wlan_objmgr_peer_get_debug_id_ref(peer, id); +} + +qdf_export_symbol(wlan_objmgr_peer_get_ref); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +QDF_STATUS wlan_objmgr_peer_try_get_ref_debug(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + if (!peer) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_lock(peer); + if (peer->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_peer_obj_unlock(peer); + if (peer->peer_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) { + uint8_t *macaddr; + + macaddr = wlan_peer_get_macaddr(peer); + obj_mgr_debug( + "peer(" QDF_MAC_ADDR_FMT ") not in Created st(%d)", + QDF_MAC_ADDR_REF(macaddr), + peer->obj_state); + } + return QDF_STATUS_E_RESOURCES; + } + + wlan_objmgr_peer_get_ref_debug(peer, id, func, line); + wlan_peer_obj_unlock(peer); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_objmgr_peer_try_get_ref_debug); +#else +QDF_STATUS wlan_objmgr_peer_try_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + if (!peer) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_lock(peer); + if (peer->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_peer_obj_unlock(peer); + if (peer->peer_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) { + uint8_t *macaddr; + + macaddr = wlan_peer_get_macaddr(peer); + obj_mgr_debug( + "peer(" QDF_MAC_ADDR_FMT ") not in Created st(%d)", + QDF_MAC_ADDR_REF(macaddr), + peer->obj_state); + } + return QDF_STATUS_E_RESOURCES; + } + + wlan_objmgr_peer_get_ref(peer, id); + wlan_peer_obj_unlock(peer); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_objmgr_peer_try_get_ref); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_psoc_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer_next = NULL; + qdf_list_node_t *psoc_node = NULL; + qdf_list_node_t *prev_psoc_node = NULL; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + prev_psoc_node = &peer->psoc_peer; + while (qdf_list_peek_next(obj_list, prev_psoc_node, &psoc_node) == + QDF_STATUS_SUCCESS) { + peer_next = qdf_container_of(psoc_node, struct wlan_objmgr_peer, + psoc_peer); + + if (wlan_objmgr_peer_try_get_ref_debug(peer_next, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return peer_next; + } + + prev_psoc_node = psoc_node; + } + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return NULL; +} +#else +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_psoc( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer_next = NULL; + qdf_list_node_t *psoc_node = NULL; + qdf_list_node_t *prev_psoc_node = NULL; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + prev_psoc_node = &peer->psoc_peer; + while (qdf_list_peek_next(obj_list, prev_psoc_node, &psoc_node) == + QDF_STATUS_SUCCESS) { + peer_next = qdf_container_of(psoc_node, struct wlan_objmgr_peer, + psoc_peer); + + if (wlan_objmgr_peer_try_get_ref(peer_next, dbg_id) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return peer_next; + } + + prev_psoc_node = psoc_node; + } + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_active_head_debug( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *vdev_node = NULL; + qdf_list_node_t *prev_vdev_node = NULL; + + wlan_vdev_obj_lock(vdev); + + if (qdf_list_peek_front(peer_list, &vdev_node) != QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return NULL; + } + + do { + peer = qdf_container_of(vdev_node, struct wlan_objmgr_peer, + vdev_peer); + + if (wlan_objmgr_peer_try_get_ref_debug(peer, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return peer; + } + + prev_vdev_node = vdev_node; + } while (qdf_list_peek_next(peer_list, prev_vdev_node, &vdev_node) == + QDF_STATUS_SUCCESS); + + wlan_vdev_obj_unlock(vdev); + + return NULL; +} +#else +struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_active_head( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *vdev_node = NULL; + qdf_list_node_t *prev_vdev_node = NULL; + + wlan_vdev_obj_lock(vdev); + + if (qdf_list_peek_front(peer_list, &vdev_node) != QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return NULL; + } + + do { + peer = qdf_container_of(vdev_node, struct wlan_objmgr_peer, + vdev_peer); + + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return peer; + } + + prev_vdev_node = vdev_node; + } while (qdf_list_peek_next(peer_list, prev_vdev_node, &vdev_node) == + QDF_STATUS_SUCCESS); + + wlan_vdev_obj_unlock(vdev); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_vdev_debug( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *vdev_node = NULL; + qdf_list_node_t *prev_vdev_node = NULL; + + if (!peer) + return NULL; + + wlan_vdev_obj_lock(vdev); + + prev_vdev_node = &peer->vdev_peer; + while (qdf_list_peek_next(peer_list, prev_vdev_node, &vdev_node) == + QDF_STATUS_SUCCESS) { + peer_next = qdf_container_of(vdev_node, struct wlan_objmgr_peer, + vdev_peer); + + if (wlan_objmgr_peer_try_get_ref_debug(peer_next, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return peer_next; + } + + prev_vdev_node = vdev_node; + } + + wlan_vdev_obj_unlock(vdev); + + return NULL; +} +#else +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_vdev( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *vdev_node = NULL; + qdf_list_node_t *prev_vdev_node = NULL; + + if (!peer) + return NULL; + + wlan_vdev_obj_lock(vdev); + + prev_vdev_node = &peer->vdev_peer; + while (qdf_list_peek_next(peer_list, prev_vdev_node, &vdev_node) == + QDF_STATUS_SUCCESS) { + peer_next = qdf_container_of(vdev_node, struct wlan_objmgr_peer, + vdev_peer); + + if (wlan_objmgr_peer_try_get_ref(peer_next, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return peer_next; + } + + prev_vdev_node = vdev_node; + } + + wlan_vdev_obj_unlock(vdev); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_active_head_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *psoc_node = NULL; + qdf_list_node_t *prev_psoc_node = NULL; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + if (qdf_list_peek_front(obj_list, &psoc_node) != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return NULL; + } + + do { + peer = qdf_container_of(psoc_node, struct wlan_objmgr_peer, + psoc_peer); + if (wlan_objmgr_peer_try_get_ref_debug(peer, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return peer; + } + + prev_psoc_node = psoc_node; + } while (qdf_list_peek_next(obj_list, prev_psoc_node, &psoc_node) == + QDF_STATUS_SUCCESS); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return NULL; +} +#else +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_active_head( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *psoc_node = NULL; + qdf_list_node_t *prev_psoc_node = NULL; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + if (qdf_list_peek_front(obj_list, &psoc_node) != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return NULL; + } + + do { + peer = qdf_container_of(psoc_node, struct wlan_objmgr_peer, + psoc_peer); + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return peer; + } + + prev_psoc_node = psoc_node; + } while (qdf_list_peek_next(obj_list, prev_psoc_node, &psoc_node) == + QDF_STATUS_SUCCESS); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head_ref_debug( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + peer = wlan_psoc_peer_list_peek_head(obj_list); + + /* This API is invoked by caller, only when caller need to access the + * peer object, though object is not in active state, this API should be + * used carefully, where multiple object frees are not triggered + */ + if (peer) + wlan_objmgr_peer_get_ref_debug(peer, dbg_id, func, line); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return peer; +} +#else +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head_ref( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + peer = wlan_psoc_peer_list_peek_head(obj_list); + + /* This API is invoked by caller, only when caller need to access the + * peer object, though object is not in active state, this API should be + * used carefully, where multiple object frees are not triggered + */ + if (peer) + wlan_objmgr_peer_get_ref(peer, dbg_id); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return peer; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc_ref_debug( + struct wlan_peer_list *peer_list, uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + qdf_list_t *obj_list; + struct wlan_objmgr_peer *peer_next; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + peer_next = wlan_peer_get_next_peer_of_psoc(obj_list, peer); + /* This API is invoked by caller, only when caller need to access the + * peer object, though object is not in active state, this API should be + * used carefully, where multiple free on object are not triggered + */ + if (peer_next) + wlan_objmgr_peer_get_ref_debug(peer_next, dbg_id, func, line); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return peer_next; +} +#else +struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc_ref( + struct wlan_peer_list *peer_list, uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *obj_list; + struct wlan_objmgr_peer *peer_next; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + peer_next = wlan_peer_get_next_peer_of_psoc(obj_list, peer); + /* This API is invoked by caller, only when caller need to access the + * peer object, though object is not in active state, this API should be + * used carefully, where multiple free on object are not triggered + */ + if (peer_next) + wlan_objmgr_peer_get_ref(peer_next, dbg_id); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return peer_next; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_peer_release_ref_debug(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + QDF_STATUS status; + + if (!peer) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return; + } + + if (!qdf_atomic_read(&peer->peer_objmgr.ref_cnt)) { + uint8_t *macaddr; + + macaddr = wlan_peer_get_macaddr(peer); + obj_mgr_err("peer("QDF_MAC_ADDR_FMT") ref cnt is 0", + QDF_MAC_ADDR_REF(macaddr)); + WLAN_OBJMGR_BUG(0); + return; + } + + status = wlan_objmgr_peer_release_debug_id_ref(peer, id); + if (QDF_IS_STATUS_ERROR(status)) + return; + + wlan_objmgr_peer_deref_trace(peer, id, func, line); + /* Provide synchronization from the access to add peer + * to logically deleted peer list. + */ + wlan_peer_obj_lock(peer); + /* Decrement ref count, free peer object, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&peer->peer_objmgr.ref_cnt)) { + wlan_peer_obj_unlock(peer); + wlan_objmgr_peer_obj_destroy(peer); + } else { + wlan_peer_obj_unlock(peer); + } + + return; +} + +qdf_export_symbol(wlan_objmgr_peer_release_ref_debug); +#else +void wlan_objmgr_peer_release_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + QDF_STATUS status; + + if (!peer) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return; + } + + if (!qdf_atomic_read(&peer->peer_objmgr.ref_cnt)) { + uint8_t *macaddr; + + macaddr = wlan_peer_get_macaddr(peer); + obj_mgr_err("peer("QDF_MAC_ADDR_FMT") ref cnt is 0", + QDF_MAC_ADDR_REF(macaddr)); + WLAN_OBJMGR_BUG(0); + return; + } + + status = wlan_objmgr_peer_release_debug_id_ref(peer, id); + if (QDF_IS_STATUS_ERROR(status)) + return; + + /* Provide synchronization from the access to add peer + * to logically deleted peer list. + */ + wlan_peer_obj_lock(peer); + /* Decrement ref count, free peer object, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&peer->peer_objmgr.ref_cnt)) { + wlan_peer_obj_unlock(peer); + wlan_objmgr_peer_obj_destroy(peer); + } else { + wlan_peer_obj_unlock(peer); + } +} + +qdf_export_symbol(wlan_objmgr_peer_release_ref); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_DEBUG +void +wlan_objmgr_print_peer_ref_ids(struct wlan_objmgr_peer *peer, + QDF_TRACE_LEVEL log_level) +{ + wlan_objmgr_print_ref_ids(peer->peer_objmgr.ref_id_dbg, log_level); +} + +uint32_t +wlan_objmgr_peer_get_comp_ref_cnt(struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + return qdf_atomic_read(&peer->peer_objmgr.ref_id_dbg[id]); +} +#else +void +wlan_objmgr_print_peer_ref_ids(struct wlan_objmgr_peer *peer, + QDF_TRACE_LEVEL log_level) +{ + uint32_t pending_ref; + + pending_ref = qdf_atomic_read(&peer->peer_objmgr.ref_cnt); + obj_mgr_log_level(log_level, "Pending refs -- %d", pending_ref); +} + +uint32_t +wlan_objmgr_peer_get_comp_ref_cnt(struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..d5d2ce5d12fd39325122af4e181fc267f4683a5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj.c @@ -0,0 +1,3016 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" +#include "wlan_objmgr_vdev_obj_i.h" + +/** + ** APIs to Create/Delete Global object APIs + */ +static QDF_STATUS wlan_objmgr_psoc_object_status( + struct wlan_objmgr_psoc *psoc) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_psoc_obj_lock(psoc); + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (psoc->obj_status[id] == QDF_STATUS_COMP_DISABLED) + continue; + /* If component operates in Async, status is Partially created, + * break + */ + else if (psoc->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (!psoc->soc_comp_priv_obj[id]) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* + * If component failed to allocate its object, treat it as + * failure, complete object need to be cleaned up + */ + } else if ((psoc->obj_status[id] == QDF_STATUS_E_NOMEM) || + (psoc->obj_status[id] == QDF_STATUS_E_FAILURE)) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_psoc_obj_unlock(psoc); + + return status; +} + +static void wlan_objmgr_psoc_peer_list_init(struct wlan_peer_list *peer_list) +{ + uint8_t i; + + qdf_spinlock_create(&peer_list->peer_list_lock); + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) + qdf_list_create(&peer_list->peer_hash[i], + WLAN_UMAC_PSOC_MAX_PEERS + + WLAN_MAX_PSOC_TEMP_PEERS); +} + +static void wlan_objmgr_psoc_peer_list_deinit(struct wlan_peer_list *peer_list) +{ + uint8_t i; + + /* deinit the lock */ + qdf_spinlock_destroy(&peer_list->peer_list_lock); + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) + qdf_list_destroy(&peer_list->peer_hash[i]); +} + +static QDF_STATUS wlan_objmgr_psoc_obj_free(struct wlan_objmgr_psoc *psoc) +{ + /* Detach PSOC from global object's psoc list */ + if (wlan_objmgr_psoc_object_detach(psoc) == QDF_STATUS_E_FAILURE) { + obj_mgr_err("PSOC object detach failed"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_psoc_peer_list_deinit(&psoc->soc_objmgr.peer_list); + + qdf_spinlock_destroy(&psoc->psoc_lock); + qdf_mem_free(psoc); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_psoc *wlan_objmgr_psoc_obj_create(uint32_t phy_version, + WLAN_DEV_TYPE dev_type) +{ + uint8_t id; + struct wlan_objmgr_psoc *psoc = NULL; + wlan_objmgr_psoc_create_handler handler; + wlan_objmgr_psoc_status_handler stat_handler; + struct wlan_objmgr_psoc_objmgr *objmgr; + QDF_STATUS obj_status; + void *arg; + + psoc = qdf_mem_malloc(sizeof(*psoc)); + if (!psoc) + return NULL; + + psoc->obj_state = WLAN_OBJ_STATE_ALLOCATED; + qdf_spinlock_create(&psoc->psoc_lock); + /* Initialize with default values */ + objmgr = &psoc->soc_objmgr; + objmgr->wlan_pdev_count = 0; + objmgr->wlan_vdev_count = 0; + objmgr->max_vdev_count = WLAN_UMAC_PSOC_MAX_VDEVS; + objmgr->wlan_peer_count = 0; + objmgr->temp_peer_count = 0; + objmgr->max_peer_count = WLAN_UMAC_PSOC_MAX_PEERS; + qdf_atomic_init(&objmgr->ref_cnt); + objmgr->print_cnt = 0; + /* set phy version, dev_type in psoc */ + wlan_psoc_set_nif_phy_version(psoc, phy_version); + wlan_psoc_set_dev_type(psoc, dev_type); + /* Initialize peer list */ + wlan_objmgr_psoc_peer_list_init(&objmgr->peer_list); + wlan_objmgr_psoc_get_ref(psoc, WLAN_OBJMGR_ID); + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->psoc_create_handler[id]; + arg = g_umac_glb_obj->psoc_create_handler_arg[id]; + if (handler) + psoc->obj_status[id] = handler(psoc, arg); + else + psoc->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + + if (obj_status == QDF_STATUS_SUCCESS) { + /* Object status is SUCCESS, Object is created */ + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + stat_handler = g_umac_glb_obj->psoc_status_handler[id]; + arg = g_umac_glb_obj->psoc_status_handler_arg[id]; + if (stat_handler) + stat_handler(psoc, arg, + QDF_STATUS_SUCCESS); + } + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + /* + * Few components operates in Asynchrous communction + * Object state partially created + */ + psoc->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Component object failed to be created, clean up the object */ + obj_mgr_err("PSOC component objects allocation failed"); + /* Clean up the psoc */ + wlan_objmgr_psoc_obj_delete(psoc); + return NULL; + } + + if (wlan_objmgr_psoc_object_attach(psoc) != + QDF_STATUS_SUCCESS) { + obj_mgr_err("PSOC object attach failed"); + wlan_objmgr_psoc_obj_delete(psoc); + return NULL; + } + + obj_mgr_info("Created psoc %d", psoc->soc_objmgr.psoc_id); + + return psoc; +} +qdf_export_symbol(wlan_objmgr_psoc_obj_create); + +static QDF_STATUS wlan_objmgr_psoc_obj_destroy(struct wlan_objmgr_psoc *psoc) +{ + uint8_t id; + wlan_objmgr_psoc_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + + if (!psoc) { + obj_mgr_err("psoc is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(psoc, WLAN_PSOC_OP); + + wlan_print_psoc_info(psoc); + obj_mgr_info("Physically deleting psoc %d", psoc->soc_objmgr.psoc_id); + + if (psoc->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("PSOC object delete is not invoked obj_state:%d", + psoc->obj_state); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->psoc_destroy_handler[id]; + arg = g_umac_glb_obj->psoc_destroy_handler_arg[id]; + if (handler && + (psoc->obj_status[id] == QDF_STATUS_SUCCESS || + psoc->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + psoc->obj_status[id] = handler(psoc, arg); + else + psoc->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + + if (obj_status == QDF_STATUS_E_FAILURE) { + obj_mgr_err("PSOC component object free failed"); + /* Ideally should not happen + * This leads to memleak, BUG_ON to find which component + * delete notification failed and fix it. + */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + /* Deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + psoc->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free psoc object */ + return wlan_objmgr_psoc_obj_free(psoc); +} + + +QDF_STATUS wlan_objmgr_psoc_obj_delete(struct wlan_objmgr_psoc *psoc) +{ + uint8_t print_idx; + + if (!psoc) { + obj_mgr_err("psoc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + obj_mgr_info("Logically deleting psoc %d", psoc->soc_objmgr.psoc_id); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /* + * Update PSOC object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_psoc_obj_lock(psoc); + psoc->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_psoc_obj_unlock(psoc); + wlan_objmgr_notify_log_delete(psoc, WLAN_PSOC_OP); + wlan_objmgr_psoc_release_ref(psoc, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_obj_delete); + +QDF_STATUS wlan_objmgr_psoc_component_obj_attach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + wlan_objmgr_psoc_status_handler stat_handler; + void *arg = NULL; + QDF_STATUS obj_status; + uint8_t i; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* If there is a valid entry, return failure */ + if (psoc->soc_comp_priv_obj[id]) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + /* Save component's pointer and status */ + psoc->soc_comp_priv_obj[id] = comp_priv_obj; + psoc->obj_status[id] = status; + + wlan_psoc_obj_unlock(psoc); + + if (psoc->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + /* If PSOC object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + /* Derive status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + /* update state as CREATION failed, caller has to delete the + * PSOC object + */ + else if (obj_status == QDF_STATUS_E_FAILURE) + psoc->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + /* nofity object status */ + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + stat_handler = g_umac_glb_obj->psoc_status_handler[i]; + arg = g_umac_glb_obj->psoc_status_handler_arg[i]; + if (stat_handler) + stat_handler(psoc, arg, obj_status); + } + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_component_obj_attach); + +QDF_STATUS wlan_objmgr_psoc_component_obj_detach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* If there is a valid entry, return failure */ + if (psoc->soc_comp_priv_obj[id] != comp_priv_obj) { + psoc->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + /* Reset pointers to NULL, update the status*/ + psoc->soc_comp_priv_obj[id] = NULL; + psoc->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_psoc_obj_unlock(psoc); + + /* If PSOC object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + if ((psoc->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (psoc->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + if (obj_status == QDF_STATUS_SUCCESS) { + /* Update the status as Deleted, if full object + * deletion is in progress + */ + if (psoc->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + psoc->obj_state = WLAN_OBJ_STATE_DELETED; + + /* Move to creation state, since this component + * deletion alone requested + */ + if (psoc->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Update the status as Deletion failed, if full object + * deletion is in progress + */ + if (psoc->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + psoc->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + + /* Move to creation state, since this component + * deletion alone requested (do not block other + * components) + */ + if (psoc->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + } + + /* Delete psoc object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (psoc->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free psoc object */ + return wlan_objmgr_psoc_obj_free(psoc); + } + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_component_obj_detach); + +QDF_STATUS wlan_objmgr_iterate_obj_list( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint16_t obj_id; + uint8_t i; + struct wlan_objmgr_psoc_objmgr *objmgr = &psoc->soc_objmgr; + struct wlan_peer_list *peer_list; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + uint16_t max_vdev_cnt; + + switch (obj_type) { + case WLAN_PDEV_OP: + /* Iterate through PDEV list, invoke handler for each pdev */ + for (obj_id = 0; obj_id < WLAN_UMAC_MAX_PDEVS; obj_id++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, obj_id, dbg_id); + if (pdev) { + handler(psoc, (void *)pdev, arg); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + } + } + break; + case WLAN_VDEV_OP: + /* Iterate through VDEV list, invoke handler for each vdev */ + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + for (obj_id = 0; obj_id < max_vdev_cnt; obj_id++) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + obj_id, dbg_id); + if (vdev) { + handler(psoc, vdev, arg); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + } + } + break; + case WLAN_PEER_OP: + /* Iterate through PEER list, invoke handler for each peer */ + peer_list = &objmgr->peer_list; + /* Since peer list has sublist, iterate through sublists */ + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) { + peer = wlan_psoc_peer_list_peek_active_head(peer_list, + i, dbg_id); + while (peer) { + handler(psoc, (void *)peer, arg); + /* Get next peer */ + peer_next = + wlan_peer_get_next_active_peer_of_psoc( + peer_list, i, peer, dbg_id); + wlan_objmgr_peer_release_ref(peer, dbg_id); + peer = peer_next; + } + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_iterate_obj_list); + +QDF_STATUS wlan_objmgr_iterate_obj_list_all( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint16_t obj_id; + uint8_t i; + struct wlan_objmgr_psoc_objmgr *objmgr = &psoc->soc_objmgr; + struct wlan_peer_list *peer_list; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + uint16_t max_vdev_cnt; + + /* If caller requests for lock free opeation, do not acquire, + * handler will handle the synchronization + */ + + switch (obj_type) { + case WLAN_PDEV_OP: + /* Iterate through PDEV list, invoke handler for each pdev */ + for (obj_id = 0; obj_id < WLAN_UMAC_MAX_PDEVS; obj_id++) { + pdev = wlan_objmgr_get_pdev_by_id_no_state(psoc, + obj_id, dbg_id); + if (pdev) { + handler(psoc, (void *)pdev, arg); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + } + } + break; + case WLAN_VDEV_OP: + /* Iterate through VDEV list, invoke handler for each vdev */ + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + for (obj_id = 0; obj_id < max_vdev_cnt; obj_id++) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc_no_state( + psoc, obj_id, dbg_id); + if (vdev) { + handler(psoc, vdev, arg); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + } + } + break; + case WLAN_PEER_OP: + /* Iterate through PEER list, invoke handler for each peer */ + peer_list = &objmgr->peer_list; + /* Since peer list has sublist, iterate through sublists */ + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) { + peer = wlan_psoc_peer_list_peek_head_ref(peer_list, i, + dbg_id); + + while (peer) { + handler(psoc, (void *)peer, arg); + /* Get next peer */ + peer_next = wlan_peer_get_next_peer_of_psoc_ref( + peer_list, i, + peer, dbg_id); + wlan_objmgr_peer_release_ref(peer, dbg_id); + peer = peer_next; + } + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_iterate_obj_list_all); + +/** + * wlan_objmgr_iterate_obj_list_all_noref() - iterate through all psoc objects + * without taking ref + * @psoc: PSOC object + * @obj_type: PDEV_OP/VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * + * API to be used for performing the operations on all PDEV/VDEV/PEER objects + * of psoc with lock protected + * + * Return: SUCCESS/FAILURE + */ +static QDF_STATUS wlan_objmgr_iterate_obj_list_all_noref( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg) +{ + uint16_t obj_id; + uint8_t i; + struct wlan_objmgr_psoc_objmgr *objmgr = &psoc->soc_objmgr; + struct wlan_peer_list *peer_list; + qdf_list_t *obj_list; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + uint16_t max_vdev_cnt; + + /* If caller requests for lock free opeation, do not acquire, + * handler will handle the synchronization + */ + wlan_psoc_obj_lock(psoc); + + switch (obj_type) { + case WLAN_PDEV_OP: + /* Iterate through PDEV list, invoke handler for each pdev */ + for (obj_id = 0; obj_id < WLAN_UMAC_MAX_PDEVS; obj_id++) { + pdev = objmgr->wlan_pdev_list[obj_id]; + if (pdev) + handler(psoc, (void *)pdev, arg); + } + break; + case WLAN_VDEV_OP: + /* Iterate through VDEV list, invoke handler for each vdev */ + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + for (obj_id = 0; obj_id < max_vdev_cnt; obj_id++) { + vdev = objmgr->wlan_vdev_list[obj_id]; + if (vdev) + handler(psoc, vdev, arg); + } + break; + case WLAN_PEER_OP: + /* Iterate through PEER list, invoke handler for each peer */ + peer_list = &objmgr->peer_list; + /* psoc lock should be taken before list lock */ + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Since peer list has sublist, iterate through sublists */ + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) { + obj_list = &peer_list->peer_hash[i]; + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* Get next peer */ + peer_next = wlan_peer_get_next_peer_of_psoc( + obj_list, peer); + handler(psoc, (void *)peer, arg); + peer = peer_next; + } + } + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + break; + default: + break; + } + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +static void wlan_objmgr_psoc_peer_delete(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_peer *peer = (struct wlan_objmgr_peer *)obj; + + wlan_objmgr_peer_obj_delete(peer); +} + +static void wlan_objmgr_psoc_vdev_delete(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + + wlan_objmgr_vdev_obj_delete(vdev); +} + +static void wlan_objmgr_psoc_pdev_delete(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)obj; + + wlan_objmgr_pdev_obj_delete(pdev); +} + +QDF_STATUS wlan_objmgr_free_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc) +{ + /* Free all peers */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PEER_OP, + wlan_objmgr_psoc_peer_delete, NULL, 1, + WLAN_OBJMGR_ID); + /* Free all vdevs */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + wlan_objmgr_psoc_vdev_delete, NULL, 1, + WLAN_OBJMGR_ID); + /* Free all PDEVs */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_objmgr_psoc_pdev_delete, NULL, 1, + WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_creation( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_psoc_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* If component object is already created, delete old + * component object, then invoke creation + */ + if (psoc->soc_comp_priv_obj[id]) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + wlan_psoc_obj_unlock(psoc); + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->psoc_create_handler[id]; + arg = g_umac_glb_obj->psoc_create_handler_arg[id]; + if (handler) + psoc->obj_status[id] = handler(psoc, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (psoc->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + /* Move PSOC object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + psoc->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + + return obj_status; +} + +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_deletion( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_psoc_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* Component object was never created, invalid operation */ + if (!psoc->soc_comp_priv_obj[id]) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + wlan_psoc_obj_unlock(psoc); + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->psoc_destroy_handler[id]; + arg = g_umac_glb_obj->psoc_destroy_handler_arg[id]; + if (handler) + psoc->obj_status[id] = handler(psoc, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (psoc->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_psoc_object_status(psoc); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + psoc->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + + return obj_status; +} + +/* Util APIs */ + +QDF_STATUS wlan_objmgr_psoc_pdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id = 0; + QDF_STATUS status; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* + * Derive pdev id from pdev map + * First free pdev id is assigned + */ + while ((id < WLAN_UMAC_MAX_PDEVS) && + (objmgr->wlan_pdev_id_map & (1<wlan_pdev_id_map |= (1<wlan_pdev_list[id] = pdev; + /* Increment pdev count */ + objmgr->wlan_pdev_count++; + /* save pdev id */ + pdev->pdev_objmgr.wlan_pdev_id = id; + status = QDF_STATUS_SUCCESS; + /* Inrement psoc ref count to block its free before pdev */ + wlan_objmgr_psoc_get_ref(psoc, WLAN_OBJMGR_ID); + } + wlan_psoc_obj_unlock(psoc); + + return status; +} + +QDF_STATUS wlan_objmgr_psoc_pdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id; + + id = pdev->pdev_objmgr.wlan_pdev_id; + /* If id is invalid, return */ + if (id >= WLAN_UMAC_MAX_PDEVS) + return QDF_STATUS_E_FAILURE; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* Free pdev id slot */ + objmgr->wlan_pdev_id_map &= ~(1<wlan_pdev_list[id] = NULL; + objmgr->wlan_pdev_count--; + pdev->pdev_objmgr.wlan_pdev_id = 0xff; + wlan_psoc_obj_unlock(psoc); + /* Release ref count of psoc */ + wlan_objmgr_psoc_release_ref(psoc, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + struct wlan_objmgr_pdev *pdev = NULL; + + /* If id is invalid, return */ + if (id >= WLAN_UMAC_MAX_PDEVS) + return NULL; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* get pdev from pdev list */ + pdev = objmgr->wlan_pdev_list[id]; + /* Do not return object, if it is not CREATED state */ + if (pdev) { + if (wlan_objmgr_pdev_try_get_ref(pdev, dbg_id) != + QDF_STATUS_SUCCESS) + pdev = NULL; + } + + wlan_psoc_obj_unlock(psoc); + + return pdev; +} +qdf_export_symbol(wlan_objmgr_get_pdev_by_id); + +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + struct wlan_objmgr_pdev *pdev = NULL; + + /* If id is invalid, return */ + if (id >= WLAN_UMAC_MAX_PDEVS) + return NULL; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* get pdev from pdev list */ + pdev = objmgr->wlan_pdev_list[id]; + /* Do not return object, if it is not CREATED state */ + if (pdev) + wlan_objmgr_pdev_get_ref(pdev, dbg_id); + + wlan_psoc_obj_unlock(psoc); + + return pdev; +} +QDF_STATUS wlan_objmgr_psoc_vdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id = 0; + uint8_t map_index = 0; + uint8_t map_entry_size = 32; + uint8_t adjust_ix = 0; + QDF_STATUS status; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* Find first free vdev id */ + while ((id < objmgr->max_vdev_count) && + (objmgr->wlan_vdev_id_map[map_index] & (1<<(id - adjust_ix)))) { + id++; + /* + * The map is two DWORDS(32 bits), so, map_index + * adjust_ix derived based on the id value + */ + if (id == ((map_index + 1) * map_entry_size)) { + map_index++; + adjust_ix = map_index * map_entry_size; + } + } + /* If no free slot, return failure */ + if (id == objmgr->max_vdev_count) { + status = QDF_STATUS_E_FAILURE; + } else { + /* set free vdev id index */ + objmgr->wlan_vdev_id_map[map_index] |= (1<<(id-adjust_ix)); + /* store vdev pointer in vdev list */ + objmgr->wlan_vdev_list[id] = vdev; + /* increment vdev counter */ + objmgr->wlan_vdev_count++; + /* save vdev id */ + vdev->vdev_objmgr.vdev_id = id; + status = QDF_STATUS_SUCCESS; + } + wlan_psoc_obj_unlock(psoc); + + return status; +} + +QDF_STATUS wlan_objmgr_psoc_vdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id = 0; + uint8_t map_index = 0; + uint8_t map_entry_size = 32; + uint8_t adjust_ix = 0; + + id = vdev->vdev_objmgr.vdev_id; + /* Invalid vdev id */ + if (id >= wlan_psoc_get_max_vdev_count(psoc)) + return QDF_STATUS_E_FAILURE; + /* + * Derive map_index and adjust_ix to find actual DWORD + * the id map is present + */ + while ((id - adjust_ix) >= map_entry_size) { + map_index++; + adjust_ix = map_index * map_entry_size; + } + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* unset bit, to free the slot */ + objmgr->wlan_vdev_id_map[map_index] &= ~(1<<(id-adjust_ix)); + /* reset VDEV pointer to NULL in VDEV list array */ + objmgr->wlan_vdev_list[id] = NULL; + /* decrement vdev count */ + objmgr->wlan_vdev_count--; + vdev->vdev_objmgr.vdev_id = 0xff; + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_debug( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + /* vdev id is invalid */ + if (vdev_id >= wlan_psoc_get_max_vdev_count(psoc)) + return NULL; + + wlan_psoc_obj_lock(psoc); + /* retrieve vdev pointer from vdev list */ + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_id]; + if (vdev) { + if (wlan_objmgr_vdev_try_get_ref_debug(vdev, dbg_id, + func, line) != + QDF_STATUS_SUCCESS) + vdev = NULL; + } + wlan_psoc_obj_unlock(psoc); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_psoc_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + /* vdev id is invalid */ + if (vdev_id >= wlan_psoc_get_max_vdev_count(psoc)) + return NULL; + + wlan_psoc_obj_lock(psoc); + /* retrieve vdev pointer from vdev list */ + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_id]; + if (vdev) { + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) + vdev = NULL; + } + wlan_psoc_obj_unlock(psoc); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_psoc); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + /* vdev id is invalid */ + if (vdev_id >= wlan_psoc_get_max_vdev_count(psoc)) + return NULL; + + wlan_psoc_obj_lock(psoc); + /* retrieve vdev pointer from vdev list */ + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_id]; + if (vdev) + wlan_objmgr_vdev_get_ref_debug(vdev, dbg_id, func, line); + + wlan_psoc_obj_unlock(psoc); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_psoc_no_state_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + /* vdev id is invalid */ + if (vdev_id >= wlan_psoc_get_max_vdev_count(psoc)) + return NULL; + + wlan_psoc_obj_lock(psoc); + /* retrieve vdev pointer from vdev list */ + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_id]; + if (vdev) + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + + wlan_psoc_obj_unlock(psoc); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_psoc_no_state); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_opmode_from_psoc_debug( + struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE opmode, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev = NULL; + int vdev_cnt = 0; + uint16_t max_vdev_cnt; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + + wlan_psoc_obj_lock(psoc); + + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + /* retrieve vdev pointer from vdev list */ + while (vdev_cnt < max_vdev_cnt) { + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_cnt]; + vdev_cnt++; + if (!vdev) + continue; + wlan_vdev_obj_lock(vdev); + if (vdev->vdev_mlme.vdev_opmode == opmode) { + wlan_vdev_obj_unlock(vdev); + if (wlan_objmgr_vdev_try_get_ref_debug(vdev, dbg_id, + func, line) != + QDF_STATUS_SUCCESS) { + vdev = NULL; + continue; + } + break; + } + wlan_vdev_obj_unlock(vdev); + } + wlan_psoc_obj_unlock(psoc); + + return vdev; +} +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_opmode_from_psoc( + struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE opmode, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev = NULL; + int vdev_cnt = 0; + uint16_t max_vdev_cnt; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + + wlan_psoc_obj_lock(psoc); + + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + /* retrieve vdev pointer from vdev list */ + while (vdev_cnt < max_vdev_cnt) { + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_cnt]; + vdev_cnt++; + if (!vdev) + continue; + wlan_vdev_obj_lock(vdev); + if (vdev->vdev_mlme.vdev_opmode == opmode) { + wlan_vdev_obj_unlock(vdev); + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) { + vdev = NULL; + continue; + } + break; + } + wlan_vdev_obj_unlock(vdev); + } + wlan_psoc_obj_unlock(psoc); + + return vdev; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + + if (!macaddr) + return NULL; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbg_id); + if (!pdev) { + obj_mgr_err("pdev is null"); + return NULL; + } + vdev = wlan_objmgr_get_vdev_by_macaddr_from_pdev_debug(pdev, macaddr, + dbg_id, + func, line); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_macaddr_from_psoc_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + + if (!macaddr) + return NULL; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbg_id); + if (!pdev) { + obj_mgr_err("pdev is null"); + return NULL; + } + vdev = wlan_objmgr_get_vdev_by_macaddr_from_pdev(pdev, macaddr, dbg_id); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_macaddr_from_psoc); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev + *wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + + if (!macaddr) + return NULL; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbg_id); + if (!pdev) { + obj_mgr_err("pdev is null"); + return NULL; + } + vdev = wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state_debug(pdev, + macaddr, + dbg_id, + func, + line); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state_debug); +#else +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + + /* if PSOC is NULL, return */ + if (!psoc) + return NULL; + + if (!macaddr) + return NULL; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbg_id); + if (!pdev) { + obj_mgr_err("pdev is null"); + return NULL; + } + vdev = wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state(pdev, macaddr, dbg_id); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + return vdev; +} + +qdf_export_symbol(wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state); +#endif + +static void wlan_obj_psoc_peerlist_add_tail(qdf_list_t *obj_list, + struct wlan_objmgr_peer *obj) +{ + qdf_list_insert_back(obj_list, &obj->psoc_peer); +} + +static QDF_STATUS wlan_obj_psoc_peerlist_remove_peer( + qdf_list_t *obj_list, + struct wlan_objmgr_peer *peer) +{ + qdf_list_node_t *psoc_node = NULL; + + if (!peer) + return QDF_STATUS_E_FAILURE; + /* get vdev list node element */ + psoc_node = &peer->psoc_peer; + /* list is empty, return failure */ + if (qdf_list_remove_node(obj_list, psoc_node) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_peer_bssid_match(struct wlan_objmgr_peer *peer, + uint8_t *bssid) +{ + struct wlan_objmgr_vdev *vdev = wlan_peer_get_vdev(peer); + uint8_t *peer_bssid = wlan_vdev_mlme_get_macaddr(vdev); + + if (WLAN_ADDR_EQ(peer_bssid, bssid) == QDF_STATUS_SUCCESS) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_FAILURE; +} + +/** + * wlan_obj_psoc_peerlist_get_peer_by_pdev_id() - get peer from + * psoc peer list + * @psoc: PSOC object + * @macaddr: MAC address + * #pdev_id: Pdev id + * + * API to finds peer object pointer by MAC addr and pdev id from hash list + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static struct wlan_objmgr_peer + *wlan_obj_psoc_peerlist_get_peer_by_pdev_id_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t pdev_id, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + if (wlan_objmgr_peer_try_get_ref_debug(peer, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#else +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_by_pdev_id( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t pdev_id, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#endif + +/** + * wlan_obj_psoc_peerlist_get_peer() - get peer from psoc peer list + * @psoc: PSOC object + * @macaddr: MAC address + * + * API to finds peer object pointer by MAC addr from hash list + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + if (wlan_objmgr_peer_try_get_ref_debug(peer, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#else +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer( + qdf_list_t *obj_list, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#endif + +/** + * wlan_obj_psoc_peerlist_get_peer_logically_deleted() - get peer + * from psoc peer list + * @psoc: PSOC object + * @macaddr: MAC address + * + * API to finds peer object pointer of logically deleted peer + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static struct wlan_objmgr_peer * + wlan_obj_psoc_peerlist_get_peer_logically_deleted_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* Return peer in logically deleted state */ + if (peer->obj_state == + WLAN_OBJ_STATE_LOGICALLY_DELETED) { + wlan_objmgr_peer_get_ref_debug(peer, dbg_id, + func, line); + + return peer; + } + + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#else +static struct wlan_objmgr_peer * + wlan_obj_psoc_peerlist_get_peer_logically_deleted( + qdf_list_t *obj_list, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* Return peer in logically deleted state */ + if (peer->obj_state == + WLAN_OBJ_STATE_LOGICALLY_DELETED) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static struct wlan_objmgr_peer + *wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_no_state_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, + uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* + * BSSID match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + wlan_objmgr_peer_get_ref_debug(peer, dbg_id, + func, line); + + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#else +static struct wlan_objmgr_peer + *wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_no_state( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, + uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* + * BSSID match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#endif + +/** + * wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid() - get peer + * from psoc peer + * list using mac and vdev + * self mac + * @psoc: PSOC object + * @macaddr: MAC address + * @bssid: BSSID address + * + * API to finds peer object pointer by MAC addr and BSSID from + * peer hash list, bssid check is done on matching peer + * + * Return: peer pointer + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static struct wlan_objmgr_peer + *wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* + * BSSID match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + if (wlan_objmgr_peer_try_get_ref_debug(peer, + dbg_id, + func, + line) + == QDF_STATUS_SUCCESS) { + return peer; + } + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + /* Not found, return NULL */ + return NULL; +} +#else +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* + * BSSID match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) + == QDF_STATUS_SUCCESS) { + return peer; + } + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + /* Not found, return NULL */ + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_no_state_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t pdev_id, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr and pdev_id is key */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + wlan_objmgr_peer_get_ref_debug(peer, dbg_id, func, + line); + + return peer; + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#else +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_no_state( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t pdev_id, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* For peer, macaddr and pdev_id is key */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + + return peer; + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} +#endif + +/** + * wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid() - + * get peer + * from psoc peer list using + * mac and vdev self mac + * @obj_list: peer object list + * @macaddr: MAC address + * @bssid: BSSID address + * @dbg_id: id of the caller + * @func: function name + * @line: line number + * + * API to finds peer object pointer by MAC addr and BSSID from + * peer hash list for a node which is in logically deleted state, + * bssid check is done on matching peer + * + * Caller to free the list allocated in this function + * + * Return: list of peer pointers + * NULL on FAILURE + */ +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static qdf_list_t + *wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid_debug( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + struct wlan_logically_del_peer *peer_list = NULL; + qdf_list_t *logical_del_peer_list = NULL; + bool lock_released = false; + + logical_del_peer_list = qdf_mem_malloc(sizeof(*logical_del_peer_list)); + if (!logical_del_peer_list) + return NULL; + + qdf_list_create(logical_del_peer_list, WLAN_UMAC_PSOC_MAX_PEERS); + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + wlan_peer_obj_lock(peer); + /* For peer, macaddr and pdev id are keys */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + /* + * if BSSID not NULL, + * then match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((!bssid) || + (wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS)) { + /* Return peer in logically deleted state */ + if ((peer->obj_state == + WLAN_OBJ_STATE_LOGICALLY_DELETED) && + qdf_atomic_read( + &peer->peer_objmgr.ref_cnt)) { + wlan_objmgr_peer_get_ref_debug(peer, + dbg_id, + func, + line); + wlan_peer_obj_unlock(peer); + lock_released = true; + + peer_list = + qdf_mem_malloc( + sizeof(struct wlan_logically_del_peer)); + if (!peer_list) { + wlan_objmgr_peer_release_ref(peer, dbg_id); + /* Lock is already released */ + WLAN_OBJMGR_BUG(0); + break; + } + + peer_list->peer = peer; + + qdf_list_insert_front( + logical_del_peer_list, + &peer_list->list); + } + } + } + + if (!lock_released) + wlan_peer_obj_unlock(peer); + + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + lock_released = false; + } + + /* Not found, return NULL */ + if (qdf_list_empty(logical_del_peer_list)) { + qdf_mem_free(logical_del_peer_list); + return NULL; + } else { + return logical_del_peer_list; + } +} +#else +static qdf_list_t + *wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + struct wlan_logically_del_peer *peer_list = NULL; + qdf_list_t *logical_del_peer_list = NULL; + bool lock_released = false; + + logical_del_peer_list = qdf_mem_malloc(sizeof(*logical_del_peer_list)); + if (!logical_del_peer_list) + return NULL; + + qdf_list_create(logical_del_peer_list, WLAN_UMAC_PSOC_MAX_PEERS); + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + wlan_peer_obj_lock(peer); + /* For peer, macaddr and pdev id are keys */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + /* + * if BSSID not NULL, + * then match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((!bssid) || + (wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS)) { + /* Return peer in logically deleted state */ + if ((peer->obj_state == + WLAN_OBJ_STATE_LOGICALLY_DELETED) && + qdf_atomic_read( + &peer->peer_objmgr.ref_cnt)) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + wlan_peer_obj_unlock(peer); + lock_released = true; + + peer_list = + qdf_mem_malloc( + sizeof(struct wlan_logically_del_peer)); + if (!peer_list) { + wlan_objmgr_peer_release_ref(peer, dbg_id); + /* Lock is already released */ + WLAN_OBJMGR_BUG(0); + break; + } + + peer_list->peer = peer; + + qdf_list_insert_front( + logical_del_peer_list, + &peer_list->list); + } + } + } + + if (!lock_released) + wlan_peer_obj_unlock(peer); + + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + lock_released = false; + } + + /* Not found, return NULL */ + if (qdf_list_empty(logical_del_peer_list)) { + qdf_mem_free(logical_del_peer_list); + return NULL; + } else { + return logical_del_peer_list; + } +} +#endif + +QDF_STATUS wlan_objmgr_psoc_peer_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* Max temporary peer limit is reached, return failure */ + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) { + if (objmgr->temp_peer_count >= WLAN_MAX_PSOC_TEMP_PEERS) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + } else { + /* Max peer limit is reached, return failure */ + if (objmgr->wlan_peer_count + >= wlan_psoc_get_max_peer_count(psoc)) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + } + + /* Derive hash index from mac address */ + hash_index = WLAN_PEER_HASH(peer->macaddr); + peer_list = &objmgr->peer_list; + /* psoc lock should be taken before list lock */ + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* add peer to hash peer list */ + wlan_obj_psoc_peerlist_add_tail( + &peer_list->peer_hash[hash_index], + peer); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + /* Increment peer count */ + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + objmgr->temp_peer_count++; + else + objmgr->wlan_peer_count++; + + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_psoc_peer_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* if list is empty, return */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + /* Get hash index, to locate the actual peer list */ + hash_index = WLAN_PEER_HASH(peer->macaddr); + peer_list = &objmgr->peer_list; + /* psoc lock should be taken before list lock */ + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* removes the peer from peer_list */ + if (wlan_obj_psoc_peerlist_remove_peer( + &peer_list->peer_hash[hash_index], + peer) == + QDF_STATUS_E_FAILURE) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + obj_mgr_err("Failed to detach peer"); + return QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + /* Decrement peer count */ + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + objmgr->temp_peer_count--; + else + objmgr->wlan_peer_count--; + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + if (pdev_id >= WLAN_UMAC_MAX_PDEVS) + QDF_ASSERT(0); + + if (!macaddr) + return NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_pdev_id_debug( + &peer_list->peer_hash[hash_index], macaddr, + pdev_id, dbg_id, func, line); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_debug); +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + if (pdev_id >= WLAN_UMAC_MAX_PDEVS) + QDF_ASSERT(0); + + if (!macaddr) + return NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_pdev_id( + &peer_list->peer_hash[hash_index], macaddr, pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_debug( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + if (!macaddr) + return NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_debug( + &peer_list->peer_hash[hash_index], + macaddr, dbg_id, func, line); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_debug); +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + if (!macaddr) + return NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer( + &peer_list->peer_hash[hash_index], macaddr, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_by_mac); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_logically_deleted_debug( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_logically_deleted_debug( + &peer_list->peer_hash[hash_index], macaddr, dbg_id, + func, line); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_logically_deleted( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_logically_deleted( + &peer_list->peer_hash[hash_index], macaddr, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_no_state_debug( + &peer_list->peer_hash[hash_index], macaddr, bssid, + pdev_id, dbg_id, func, line); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_n_vdev_no_state_debug); +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_no_state( + &peer_list->peer_hash[hash_index], macaddr, bssid, + pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_n_vdev_no_state); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_debug( + &peer_list->peer_hash[hash_index], macaddr, bssid, + pdev_id, dbg_id, func, line); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_n_vdev_debug); +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid( + &peer_list->peer_hash[hash_index], macaddr, bssid, + pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_n_vdev); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_nolock_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) + return NULL; + + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_pdev_id_debug( + &peer_list->peer_hash[hash_index], macaddr, + pdev_id, dbg_id, func, line); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_nolock_debug); +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_nolock( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) + return NULL; + + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_pdev_id( + &peer_list->peer_hash[hash_index], macaddr, pdev_id, dbg_id); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_nolock); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_peer *wlan_objmgr_get_peer_no_state_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_no_state_debug( + &peer_list->peer_hash[hash_index], macaddr, + pdev_id, dbg_id, func, line); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_no_state_debug); +#else +struct wlan_objmgr_peer *wlan_objmgr_get_peer_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_no_state( + &peer_list->peer_hash[hash_index], macaddr, pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +qdf_export_symbol(wlan_objmgr_get_peer_no_state); +#endif + +/** + * wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev() - + * get peer from psoc + * peer list using + * mac and vdev + * self mac + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @bssid: BSSID address. NULL mac means search all. + * @dbg_id: id of the caller + * @func: function name + * @line: line number + * + * API to finds peer object pointer by MAC addr and BSSID from + * peer hash list, bssid check is done on matching peer + * + * Return: list of peer pointer pointers + * NULL on FAILURE + */ + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +qdf_list_t *wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev_debug( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list = NULL; + qdf_list_t *logical_del_peer_list = NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + + /* Iterate through peer list, get peer */ + logical_del_peer_list = + wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid_debug( + &peer_list->peer_hash[hash_index], macaddr, + bssid, pdev_id, dbg_id, func, line); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return logical_del_peer_list; +} + +qdf_export_symbol(wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev_debug); +#else +qdf_list_t *wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list = NULL; + qdf_list_t *logical_del_peer_list = NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + + /* Iterate through peer list, get peer */ + logical_del_peer_list = + wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid( + &peer_list->peer_hash[hash_index], macaddr, + bssid, pdev_id, dbg_id); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return logical_del_peer_list; +} + +qdf_export_symbol(wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev); +#endif + +void *wlan_objmgr_psoc_get_comp_private_obj(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id) +{ + void *comp_private_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (!psoc) { + QDF_BUG(0); + return NULL; + } + + comp_private_obj = psoc->soc_comp_priv_obj[id]; + + return comp_private_obj; +} +qdf_export_symbol(wlan_objmgr_psoc_get_comp_private_obj); + +void wlan_objmgr_psoc_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id) +{ + if (!psoc) { + obj_mgr_err("psoc obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&psoc->soc_objmgr.ref_cnt); + qdf_atomic_inc(&psoc->soc_objmgr.ref_id_dbg[id]); + return; +} +qdf_export_symbol(wlan_objmgr_psoc_get_ref); + +QDF_STATUS wlan_objmgr_psoc_try_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id) +{ + if (!psoc) { + obj_mgr_err("psoc obj is NULL for id:%d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_psoc_obj_lock(psoc); + if (psoc->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_psoc_obj_unlock(psoc); + if (psoc->soc_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] psoc is not in Created state(%d)", + id, psoc->obj_state); + + return QDF_STATUS_E_RESOURCES; + } + + /* Increment ref count */ + wlan_objmgr_psoc_get_ref(psoc, id); + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_try_get_ref); + +void wlan_objmgr_psoc_release_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id) +{ + if (!psoc) { + obj_mgr_err("psoc obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + + if (!qdf_atomic_read(&psoc->soc_objmgr.ref_id_dbg[id])) { + obj_mgr_err("psoc ref cnt was not taken by %d", id); + wlan_objmgr_print_ref_ids(psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + } + + if (!qdf_atomic_read(&psoc->soc_objmgr.ref_cnt)) { + obj_mgr_err("psoc ref cnt is 0"); + WLAN_OBJMGR_BUG(0); + return; + } + + qdf_atomic_dec(&psoc->soc_objmgr.ref_id_dbg[id]); + /* Decrement ref count, free psoc, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&psoc->soc_objmgr.ref_cnt)) + wlan_objmgr_psoc_obj_destroy(psoc); + + return; +} +qdf_export_symbol(wlan_objmgr_psoc_release_ref); + +static void wlan_objmgr_psoc_peer_ref_print(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_peer *peer = (struct wlan_objmgr_peer *)obj; + WLAN_OBJ_STATE obj_state; + uint8_t vdev_id; + uint8_t *macaddr; + + wlan_peer_obj_lock(peer); + macaddr = wlan_peer_get_macaddr(peer); + obj_state = peer->obj_state; + vdev_id = wlan_vdev_get_id(wlan_peer_get_vdev(peer)); + wlan_peer_obj_unlock(peer); + + obj_mgr_alert("Peer MAC:%02x:%02x:%02x:%02x:%02x:%02x state:%d vdev_id:%d", + macaddr[0], macaddr[1], macaddr[2], macaddr[3], + macaddr[4], macaddr[5], obj_state, vdev_id); + wlan_objmgr_print_peer_ref_ids(peer, QDF_TRACE_LEVEL_FATAL); +} + +static void wlan_objmgr_psoc_vdev_ref_print(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + WLAN_OBJ_STATE obj_state; + uint8_t id; + + wlan_vdev_obj_lock(vdev); + id = wlan_vdev_get_id(vdev); + obj_state = vdev->obj_state; + wlan_vdev_obj_unlock(vdev); + obj_mgr_alert("Vdev ID is %d, state %d", id, obj_state); + + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); +} + +static void wlan_objmgr_psoc_pdev_ref_print(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)obj; + uint8_t id; + + wlan_pdev_obj_lock(pdev); + id = wlan_objmgr_pdev_get_pdev_id(pdev); + wlan_pdev_obj_unlock(pdev); + obj_mgr_alert("pdev ID is %d", id); + + wlan_objmgr_print_ref_ids(pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); +} + +QDF_STATUS wlan_objmgr_print_ref_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc) +{ + obj_mgr_alert("Ref counts of PEER"); + wlan_objmgr_iterate_obj_list_all_noref(psoc, WLAN_PEER_OP, + wlan_objmgr_psoc_peer_ref_print, NULL); + obj_mgr_alert("Ref counts of VDEV"); + wlan_objmgr_iterate_obj_list_all_noref(psoc, WLAN_VDEV_OP, + wlan_objmgr_psoc_vdev_ref_print, NULL); + obj_mgr_alert("Ref counts of PDEV"); + wlan_objmgr_iterate_obj_list_all_noref(psoc, WLAN_PDEV_OP, + wlan_objmgr_psoc_pdev_ref_print, NULL); + + obj_mgr_alert(" Ref counts of PSOC"); + wlan_objmgr_print_ref_ids(psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_print_ref_all_objects_per_psoc); + +QDF_STATUS wlan_objmgr_psoc_set_user_config(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_psoc_user_config *user_config_data) +{ + if (!user_config_data) { + obj_mgr_err("user_config_data is NULL"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + wlan_psoc_obj_lock(psoc); + qdf_mem_copy(&psoc->soc_nif.user_config, user_config_data, + sizeof(psoc->soc_nif.user_config)); + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +uint32_t wlan_objmgr_psoc_check_for_pdev_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + struct wlan_objmgr_pdev *pdev; + int pdev_id; + uint32_t leaks = 0; + + QDF_BUG(psoc); + if (!psoc) + return leaks; + + wlan_psoc_obj_lock(psoc); + _psoc = &psoc->soc_objmgr; + if (!_psoc->wlan_pdev_count) { + wlan_psoc_obj_unlock(psoc); + return leaks; + } + + obj_mgr_alert("objmgr pdev leaks detected for psoc %u!", + _psoc->psoc_id); + obj_mgr_alert("----------------------------------------------------"); + obj_mgr_alert("Pdev Id Refs Module"); + obj_mgr_alert("----------------------------------------------------"); + + wlan_objmgr_for_each_psoc_pdev(psoc, pdev_id, pdev) { + qdf_atomic_t *ref_id_dbg; + int ref_id; + int32_t refs; + + wlan_pdev_obj_lock(pdev); + ref_id_dbg = pdev->pdev_objmgr.ref_id_dbg; + wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) { + leaks++; + obj_mgr_alert("%7u %4u %s", + pdev_id, refs, + string_from_dbgid(ref_id)); + } + wlan_pdev_obj_unlock(pdev); + } + + wlan_psoc_obj_unlock(psoc); + return leaks; +} +qdf_export_symbol(wlan_objmgr_psoc_check_for_pdev_leaks); + +uint32_t wlan_objmgr_psoc_check_for_vdev_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + struct wlan_objmgr_vdev *vdev; + int vdev_id; + uint32_t leaks = 0; + + QDF_BUG(psoc); + if (!psoc) + return leaks; + + wlan_psoc_obj_lock(psoc); + _psoc = &psoc->soc_objmgr; + if (!_psoc->wlan_vdev_count) { + wlan_psoc_obj_unlock(psoc); + return leaks; + } + + obj_mgr_alert("objmgr vdev leaks detected for psoc %u!", + _psoc->psoc_id); + obj_mgr_alert("----------------------------------------------------"); + obj_mgr_alert("Vdev Id Refs Module"); + obj_mgr_alert("----------------------------------------------------"); + + wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev) { + qdf_atomic_t *ref_id_dbg; + int ref_id; + int32_t refs; + + wlan_vdev_obj_lock(vdev); + ref_id_dbg = vdev->vdev_objmgr.ref_id_dbg; + wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) { + leaks++; + obj_mgr_alert("%7u %4u %s", + vdev_id, refs, string_from_dbgid(ref_id)); + } + wlan_vdev_obj_unlock(vdev); + } + + wlan_psoc_obj_unlock(psoc); + return leaks; +} +qdf_export_symbol(wlan_objmgr_psoc_check_for_vdev_leaks); + +#ifdef WLAN_OBJMGR_REF_ID_DEBUG +static void +wlan_objmgr_print_peer_ref_leaks(struct wlan_objmgr_peer *peer, int vdev_id) +{ + qdf_atomic_t *ref_id_dbg; + int32_t refs; + int ref_id; + + ref_id_dbg = peer->peer_objmgr.ref_id_dbg; + wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) { + obj_mgr_alert(QDF_MAC_ADDR_FMT " %7u %4u %s", + QDF_MAC_ADDR_REF(peer->macaddr), + vdev_id, + refs, + string_from_dbgid(ref_id)); + } +} +#else +static inline void +wlan_objmgr_print_peer_ref_leaks(struct wlan_objmgr_peer *peer, int vdev_id) +{ + obj_mgr_alert(QDF_MAC_ADDR_FMT " %7u %4u %s", + QDF_MAC_ADDR_REF(peer->macaddr), + vdev_id, + qdf_atomic_read(&peer->peer_objmgr.ref_cnt), + "TOTAL_REF_COUNT"); +} +#endif + +uint32_t wlan_objmgr_psoc_check_for_peer_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + struct wlan_objmgr_vdev *vdev; + int vdev_id; + uint32_t leaks = 0; + + QDF_BUG(psoc); + if (!psoc) + return leaks; + + wlan_psoc_obj_lock(psoc); + _psoc = &psoc->soc_objmgr; + if (!_psoc->temp_peer_count && !_psoc->wlan_peer_count) { + wlan_psoc_obj_unlock(psoc); + return leaks; + } + + obj_mgr_alert("objmgr peer leaks detected for psoc %u!", + _psoc->psoc_id); + obj_mgr_alert("----------------------------------------------------"); + obj_mgr_alert("Peer MAC Vdev Id Refs Module"); + obj_mgr_alert("----------------------------------------------------"); + + wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev) { + struct wlan_objmgr_peer *peer; + + wlan_vdev_obj_lock(vdev); + wlan_objmgr_for_each_vdev_peer(vdev, peer) { + wlan_peer_obj_lock(peer); + leaks += qdf_atomic_read(&peer->peer_objmgr.ref_cnt); + wlan_objmgr_print_peer_ref_leaks(peer, vdev_id); + wlan_peer_obj_unlock(peer); + } + wlan_vdev_obj_unlock(vdev); + } + + wlan_psoc_obj_unlock(psoc); + return leaks; +} +qdf_export_symbol(wlan_objmgr_psoc_check_for_peer_leaks); + +void wlan_objmgr_psoc_check_for_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + uint32_t peer_leaks = 0; + uint32_t vdev_leaks = 0; + uint32_t pdev_leaks = 0; + + _psoc = &psoc->soc_objmgr; + + peer_leaks = wlan_objmgr_psoc_check_for_peer_leaks(psoc); + vdev_leaks = wlan_objmgr_psoc_check_for_vdev_leaks(psoc); + pdev_leaks = wlan_objmgr_psoc_check_for_pdev_leaks(psoc); + + if (peer_leaks || vdev_leaks || pdev_leaks) { + QDF_DEBUG_PANIC("%u objmgr peer leaks %u objmgr vdev leaks" + "%u objmgr pdev leaks detected for psoc %u!", + peer_leaks, vdev_leaks, pdev_leaks, + _psoc->psoc_id); + } +} + +qdf_export_symbol(wlan_objmgr_psoc_check_for_leaks); + +#ifdef WLAN_OBJMGR_DEBUG +void wlan_print_psoc_info(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *psoc_objmgr; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + uint16_t index = 0; + + psoc_objmgr = &psoc->soc_objmgr; + + obj_mgr_debug("psoc: %pK", psoc); + obj_mgr_debug("psoc_id: %d", psoc_objmgr->psoc_id); + obj_mgr_debug("wlan_pdev_count: %d", psoc_objmgr->wlan_pdev_count); + obj_mgr_debug("wlan_pdev_id_map: 0x%x", psoc_objmgr->wlan_pdev_id_map); + obj_mgr_debug("wlan_vdev_count: %d", psoc_objmgr->wlan_vdev_count); + obj_mgr_debug("max_vdev_count: %d", psoc_objmgr->max_vdev_count); + obj_mgr_debug("wlan_peer_count: %d", psoc_objmgr->wlan_peer_count); + obj_mgr_debug("max_peer_count: %d", psoc_objmgr->max_peer_count); + obj_mgr_debug("temp_peer_count: %d", psoc_objmgr->temp_peer_count); + obj_mgr_debug("ref_cnt: %d", qdf_atomic_read(&psoc_objmgr->ref_cnt)); + obj_mgr_debug("qdf_dev: %pK", psoc_objmgr->qdf_dev); + + obj_mgr_debug("wlan_vdev_id_map[%d]: 0x%x", + index, psoc_objmgr->wlan_vdev_id_map[index]); + index++; + obj_mgr_debug("wlan_vdev_id_map[%d]: 0x%x", + index, psoc_objmgr->wlan_vdev_id_map[index]); + + wlan_objmgr_for_each_psoc_pdev(psoc, index, pdev) { + obj_mgr_debug("wlan_pdev_list[%d]: %pK", index, pdev); + wlan_print_pdev_info(pdev); + } + + wlan_objmgr_for_each_psoc_vdev(psoc, index, vdev) { + obj_mgr_debug("wlan_vdev_list[%d]: %pK", index, vdev); + wlan_print_vdev_info(vdev); + } +} + +qdf_export_symbol(wlan_print_psoc_info); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..81fcfb24752bf9fdaee7e0d2fd3f0a32aa57bf1f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj_i.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_PSOC_OBJ_I_H_ +#define _WLAN_OBJMGR_PSOC_OBJ_I_H_ + +/** + * wlan_objmgr_for_each_psoc_pdev() - iterate over each pdev for @psoc + * @psoc: the psoc whose pdevs should be iterated + * @pdev_id: pdev Id index cursor + * @pdev: pdev object cursor + * + * Note: The caller is responsible for grabbing @psoc's object lock before + * using this iterator + */ +#define wlan_objmgr_for_each_psoc_pdev(psoc, pdev_id, pdev) \ + for (pdev_id = 0; pdev_id < WLAN_UMAC_MAX_PDEVS; pdev_id++) \ + if ((pdev = (psoc)->soc_objmgr.wlan_pdev_list[pdev_id])) + +/** + * wlan_objmgr_for_each_psoc_vdev() - iterate over each vdev for @psoc + * @psoc: the psoc whose vdevs should be iterated + * @vdev_id: vdev Id index cursor + * @vdev: vdev object cursor + * + * Note: The caller is responsible for grabbing @psoc's object lock before + * using this iterator + */ +#define wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev) \ + for (vdev_id = 0; vdev_id < WLAN_UMAC_PSOC_MAX_VDEVS; vdev_id++) \ + if ((vdev = (psoc)->soc_objmgr.wlan_vdev_list[vdev_id])) + +/** + * wlan_objmgr_for_each_refs() - iterate non-zero ref counts in @ref_id_dbg + * @ref_id_dbg: the ref count array to iterate + * @ref_id: the reference Id index cursor + * @refs: the ref count cursor + * + * Note: The caller is responsible for grabbing @ref_id_dbg's parent object lock + * before using this iterator + */ +#define wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) \ + for (ref_id = 0; ref_id < WLAN_REF_ID_MAX; ref_id++) \ + if ((refs = qdf_atomic_read(&(ref_id_dbg)[ref_id])) > 0) + +/** + * wlan_objmgr_psoc_pdev_attach() - store pdev in psoc's pdev list + * @psoc - PSOC object + * @pdev - PDEV object + * + * Attaches PDEV to PSOC, allocates PDEV id + * + * Return: SUCCESS + * Failure (Max PDEVs are exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_pdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_objmgr_psoc_pdev_detach() - remove pdev from psoc's pdev list + * @psoc - PSOC object + * @pdev - PDEV object + * + * detaches PDEV to PSOC, frees PDEV id + * + * Return: SUCCESS + * Failure (No PDEVs are present) + */ +QDF_STATUS wlan_objmgr_psoc_pdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_objmgr_psoc_vdev_attach() - store vdev in psoc's vdev list + * @psoc - PSOC object + * @vdev - VDEV object + * + * Attaches VDEV to PSOC, allocates VDEV id + * + * Return: SUCCESS + * Failure (Max VDEVs are exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_vdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_objmgr_psoc_vdev_detach() - remove vdev from psoc's vdev list + * @psoc - PSOC object + * @vdev - VDEV object + * + * detaches VDEV to PSOC, frees VDEV id + * + * Return: SUCCESS + * Failure (No VDEVs are present) + */ +QDF_STATUS wlan_objmgr_psoc_vdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_objmgr_psoc_peer_attach() - store peer in psoc's peer table + * @psoc - PSOC object + * @peer - PEER object + * + * Attaches PEER to PSOC, derives the HASH, add peer to its peer list + * + * Return: SUCCESS + * Failure (Max PEERs are exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_peer_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer); + +/** + * wlan_objmgr_psoc_peer_detach() - remove peer from psoc's peer table + * @psoc - PSOC object + * @peer - PEER object + * + * detaches PEER to PSOC, removes the peer from the peer list + * + * Return: SUCCESS + * Failure (PEER is not present) + */ +QDF_STATUS wlan_objmgr_psoc_peer_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer); +#endif /* _WLAN_OBJMGR_PSOC_OBJ_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..7646a2a7a28a57eeb404e7cff72c8c14b006c7af --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj.c @@ -0,0 +1,1396 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" +#include "wlan_objmgr_vdev_obj_i.h" +#include + +/** + ** APIs to Create/Delete Global object APIs + */ + +static QDF_STATUS wlan_objmgr_vdev_object_status( + struct wlan_objmgr_vdev *vdev) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_vdev_obj_lock(vdev); + + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (vdev->obj_status[id] == QDF_STATUS_COMP_DISABLED) { + continue; + /* + * If component operates in Async, status is Partially created, + * break + */ + } else if (vdev->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (!vdev->vdev_comp_priv_obj[id]) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* + * If component failed to allocate its object, treat it as + * failure, complete object need to be cleaned up + */ + } else if ((vdev->obj_status[id] == QDF_STATUS_E_NOMEM) || + (vdev->obj_status[id] == QDF_STATUS_E_FAILURE)) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_vdev_obj_unlock(vdev); + + return status; +} + +static QDF_STATUS wlan_objmgr_vdev_obj_free(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + + if (!vdev) { + obj_mgr_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + /* if PDEV is NULL, return */ + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + obj_mgr_err("pdev is NULL for vdev-id: %d", + vdev->vdev_objmgr.vdev_id); + return QDF_STATUS_E_FAILURE; + } + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + obj_mgr_err("psoc is NULL in pdev"); + return QDF_STATUS_E_FAILURE; + } + + /* Detach VDEV from PDEV VDEV's list */ + if (wlan_objmgr_pdev_vdev_detach(pdev, vdev) == + QDF_STATUS_E_FAILURE) + return QDF_STATUS_E_FAILURE; + + /* Detach VDEV from PSOC VDEV's list */ + if (wlan_objmgr_psoc_vdev_detach(psoc, vdev) == + QDF_STATUS_E_FAILURE) + return QDF_STATUS_E_FAILURE; + + wlan_objmgr_vdev_trace_del_ref_list(vdev); + wlan_objmgr_vdev_trace_deinit_lock(vdev); + qdf_spinlock_destroy(&vdev->vdev_lock); + + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev->vdev_mlme.des_chan); + qdf_mem_free(vdev); + + return QDF_STATUS_SUCCESS; + +} + +static struct vdev_osif_priv *wlan_objmgr_vdev_get_osif_priv( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_osif_priv *osif_priv; + + /* private data area immediately follows the struct wlan_objmgr_vdev */ + osif_priv = (struct vdev_osif_priv *)(vdev + 1); + + return osif_priv; +} + +struct wlan_objmgr_vdev *wlan_objmgr_vdev_obj_create( + struct wlan_objmgr_pdev *pdev, + struct wlan_vdev_create_params *params) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + uint8_t id; + wlan_objmgr_vdev_create_handler handler; + wlan_objmgr_vdev_status_handler stat_handler; + void *arg; + QDF_STATUS obj_status; + + if (!pdev) { + obj_mgr_err("pdev is NULL"); + return NULL; + } + psoc = wlan_pdev_get_psoc(pdev); + /* PSOC is NULL */ + if (!psoc) { + obj_mgr_err("psoc is NULL for pdev-id:%d", + pdev->pdev_objmgr.wlan_pdev_id); + return NULL; + } + /* Allocate vdev object memory */ + vdev = qdf_mem_malloc(sizeof(*vdev) + params->size_vdev_priv); + if (!vdev) + return NULL; + + vdev->obj_state = WLAN_OBJ_STATE_ALLOCATED; + + vdev->vdev_mlme.bss_chan = qdf_mem_malloc(sizeof(struct wlan_channel)); + if (!vdev->vdev_mlme.bss_chan) { + qdf_mem_free(vdev); + return NULL; + } + + vdev->vdev_mlme.des_chan = qdf_mem_malloc(sizeof(struct wlan_channel)); + if (!vdev->vdev_mlme.des_chan) { + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev); + return NULL; + } + + wlan_objmgr_vdev_trace_init_lock(vdev); + /* Initialize spinlock */ + qdf_spinlock_create(&vdev->vdev_lock); + /* Attach VDEV to PSOC VDEV's list */ + if (wlan_objmgr_psoc_vdev_attach(psoc, vdev) != + QDF_STATUS_SUCCESS) { + obj_mgr_err("psoc vdev attach failed for vdev-id:%d", + vdev->vdev_objmgr.vdev_id); + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev->vdev_mlme.des_chan); + qdf_spinlock_destroy(&vdev->vdev_lock); + wlan_objmgr_vdev_trace_deinit_lock(vdev); + qdf_mem_free(vdev); + return NULL; + } + /* Store pdev in vdev */ + wlan_vdev_set_pdev(vdev, pdev); + /* Attach vdev to PDEV */ + if (wlan_objmgr_pdev_vdev_attach(pdev, vdev) != + QDF_STATUS_SUCCESS) { + obj_mgr_err("pdev vdev attach failed for vdev-id:%d", + vdev->vdev_objmgr.vdev_id); + wlan_objmgr_psoc_vdev_detach(psoc, vdev); + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev->vdev_mlme.des_chan); + qdf_spinlock_destroy(&vdev->vdev_lock); + wlan_objmgr_vdev_trace_deinit_lock(vdev); + qdf_mem_free(vdev); + return NULL; + } + /* set opmode */ + wlan_vdev_mlme_set_opmode(vdev, params->opmode); + /* set MAC address */ + wlan_vdev_mlme_set_macaddr(vdev, params->macaddr); + /* set MAT address */ + wlan_vdev_mlme_set_mataddr(vdev, params->mataddr); + /* Set create flags */ + vdev->vdev_objmgr.c_flags = params->flags; + /* store os-specific pointer */ + vdev->vdev_nif.osdev = wlan_objmgr_vdev_get_osif_priv(vdev); + /* peer count to 0 */ + vdev->vdev_objmgr.wlan_peer_count = 0; + qdf_atomic_init(&vdev->vdev_objmgr.ref_cnt); + vdev->vdev_objmgr.print_cnt = 0; + wlan_objmgr_vdev_get_ref(vdev, WLAN_OBJMGR_ID); + /* Initialize max peer count based on opmode type */ + if (wlan_vdev_mlme_get_opmode(vdev) == QDF_STA_MODE) + vdev->vdev_objmgr.max_peer_count = WLAN_UMAC_MAX_STA_PEERS; + else + vdev->vdev_objmgr.max_peer_count = + wlan_pdev_get_max_peer_count(pdev); + + if (params->legacy_osif) + vdev->vdev_nif.osdev->legacy_osif_priv = params->legacy_osif; + + /* Initialize peer list */ + qdf_list_create(&vdev->vdev_objmgr.wlan_peer_list, + vdev->vdev_objmgr.max_peer_count + + WLAN_MAX_PDEV_TEMP_PEERS); + /* TODO init other parameters */ + + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->vdev_create_handler[id]; + arg = g_umac_glb_obj->vdev_create_handler_arg[id]; + if (handler) + vdev->obj_status[id] = handler(vdev, arg); + else + vdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + + if (obj_status == QDF_STATUS_SUCCESS) { + /* Object status is SUCCESS, Object is created */ + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Invoke component registered status handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + stat_handler = g_umac_glb_obj->vdev_status_handler[id]; + arg = g_umac_glb_obj->vdev_status_handler_arg[id]; + if (stat_handler) { + stat_handler(vdev, arg, + QDF_STATUS_SUCCESS); + } + } + /* + * Few components operates in Asynchrous communction, Object state + * partially created + */ + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + vdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + /* Component object failed to be created, clean up the object */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Clean up the psoc */ + obj_mgr_err("VDEV comp objects creation failed for vdev-id:%d", + vdev->vdev_objmgr.vdev_id); + wlan_objmgr_vdev_obj_delete(vdev); + return NULL; + } + + obj_mgr_debug("Created vdev %d", vdev->vdev_objmgr.vdev_id); + + return vdev; +} +qdf_export_symbol(wlan_objmgr_vdev_obj_create); + +static QDF_STATUS wlan_objmgr_vdev_obj_destroy(struct wlan_objmgr_vdev *vdev) +{ + uint8_t id; + wlan_objmgr_vdev_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(vdev, WLAN_VDEV_OP); + + vdev_id = wlan_vdev_get_id(vdev); + + obj_mgr_debug("Physically deleting vdev %d", vdev_id); + + if (vdev->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("VDEV object delete is not invoked vdevid:%d objstate:%d", + wlan_vdev_get_id(vdev), vdev->obj_state); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered destroy handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->vdev_destroy_handler[id]; + arg = g_umac_glb_obj->vdev_destroy_handler_arg[id]; + if (handler && + (vdev->obj_status[id] == QDF_STATUS_SUCCESS || + vdev->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + vdev->obj_status[id] = handler(vdev, arg); + else + vdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + + if (obj_status == QDF_STATUS_E_FAILURE) { + obj_mgr_err("VDEV object deletion failed: vdev-id: %d", + vdev_id); + /* Ideally should not happen */ + /* This leads to memleak ??? how to handle */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + /* Deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + vdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free VDEV object */ + return wlan_objmgr_vdev_obj_free(vdev); +} + +QDF_STATUS wlan_objmgr_vdev_obj_delete(struct wlan_objmgr_vdev *vdev) +{ + uint8_t print_idx; + + if (!vdev) { + obj_mgr_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + obj_mgr_debug("Logically deleting vdev %d", vdev->vdev_objmgr.vdev_id); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /* + * Update VDEV object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_vdev_obj_lock(vdev); + vdev->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_vdev_obj_unlock(vdev); + wlan_objmgr_notify_log_delete(vdev, WLAN_VDEV_OP); + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_obj_delete); + +/** + ** APIs to attach/detach component objects + */ +QDF_STATUS wlan_objmgr_vdev_component_obj_attach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + wlan_objmgr_vdev_status_handler stat_handler; + void *arg; + uint8_t i; + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* If there is a valid entry, return failure */ + if (vdev->vdev_comp_priv_obj[id]) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + /* Save component's pointer and status */ + vdev->vdev_comp_priv_obj[id] = comp_priv_obj; + vdev->obj_status[id] = status; + wlan_vdev_obj_unlock(vdev); + if (vdev->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + /* + * If VDEV object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + /* Derive status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* + * update state as CREATION failed, caller has to delete the + * VDEV object + */ + else if (obj_status == QDF_STATUS_E_FAILURE) + vdev->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + stat_handler = g_umac_glb_obj->vdev_status_handler[i]; + arg = g_umac_glb_obj->vdev_status_handler_arg[i]; + if (stat_handler) + stat_handler(vdev, arg, obj_status); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_component_obj_attach); + +QDF_STATUS wlan_objmgr_vdev_component_obj_detach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* If there is a valid entry, return failure */ + if (vdev->vdev_comp_priv_obj[id] != comp_priv_obj) { + vdev->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + /* Reset pointers to NULL, update the status*/ + vdev->vdev_comp_priv_obj[id] = NULL; + vdev->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_vdev_obj_unlock(vdev); + + /** + *If VDEV object status is partially destroyed means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + if ((vdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (vdev->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + if (obj_status == QDF_STATUS_SUCCESS) { + /* + * Update the status as Deleted, if full object + * deletion is in progress + */ + if (vdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + vdev->obj_state = WLAN_OBJ_STATE_DELETED; + /* + * Move to creation state, since this component + * deletion alone requested + */ + else if (vdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* + * Update the status as Deletion failed, if full object + * deletion is in progress + */ + if (vdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + vdev->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + /* Move to creation state, since this component + deletion alone requested (do not block other + components) */ + else if (vdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + } + /* Delete vdev object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (vdev->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free VDEV object */ + return wlan_objmgr_vdev_obj_free(vdev); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_component_obj_detach); + +/** + ** APIs to operations on vdev objects + */ +QDF_STATUS wlan_objmgr_iterate_peerobj_list( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_vdev_op_handler handler, + void *arg, wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *peer_list = NULL; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_objmgr_peer *peer_next = NULL; + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_lock(vdev); + vdev_id = wlan_vdev_get_id(vdev); + + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_vdev_obj_unlock(vdev); + obj_mgr_err("VDEV is not in create state:%d: vdev-id:%d", + vdev->obj_state, vdev_id); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + peer_list = &vdev->vdev_objmgr.wlan_peer_list; + if (peer_list) { + /* Iterate through VDEV's peer list */ + peer = wlan_vdev_peer_list_peek_head(peer_list); + while (peer) { + peer_next = wlan_peer_get_next_peer_of_vdev(peer_list, + peer); + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + /* Invoke handler for operation */ + handler(vdev, (void *)peer, arg); + wlan_objmgr_peer_release_ref(peer, dbg_id); + } + peer = peer_next; + } + } + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_SUCCESS; +} + +/** + ** APIs to get a peer with given mac in a vdev + */ +struct wlan_objmgr_peer * +wlan_objmgr_vdev_find_peer_by_mac(struct wlan_objmgr_vdev *vdev, + uint8_t *peer_mac, + wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *peer_list; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_objmgr_peer *peer_next = NULL; + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("VDEV is NULL"); + return NULL; + } + wlan_vdev_obj_lock(vdev); + vdev_id = wlan_vdev_get_id(vdev); + + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_vdev_obj_unlock(vdev); + obj_mgr_err("VDEV is not in create state:%d: vdev-id:%d", + vdev->obj_state, vdev_id); + return NULL; + } + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + peer_list = &vdev->vdev_objmgr.wlan_peer_list; + /* Iterate through VDEV's peer list */ + peer = wlan_vdev_peer_list_peek_head(peer_list); + while (peer) { + peer_next = wlan_peer_get_next_peer_of_vdev(peer_list, + peer); + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + if (!WLAN_ADDR_EQ(peer_mac, + wlan_peer_get_macaddr(peer))) { + wlan_objmgr_vdev_release_ref(vdev, + dbg_id); + wlan_vdev_obj_unlock(vdev); + return peer; + } + wlan_objmgr_peer_release_ref(peer, dbg_id); + } + peer = peer_next; + } + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + wlan_vdev_obj_unlock(vdev); + return NULL; +} + +qdf_export_symbol(wlan_objmgr_vdev_find_peer_by_mac); + +/** + * wlan_obj_vdev_populate_logically_del_peerlist() - get peer + * from vdev peer list + * @obj_list: peer object list + * @vdev_obj: vdev object mgr substructure + * @dbg_id: id of the caller + * + * API to finds peer object pointer by vdev from peer hash list for a node + * which is in logically deleted state + * + * Caller to free the list allocated in this function + * + * Return: list of peer pointers + * NULL on FAILURE + */ +static qdf_list_t *wlan_obj_vdev_populate_logically_del_peerlist( + qdf_list_t *obj_list, + struct wlan_objmgr_vdev_objmgr *vdev_obj, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + struct wlan_logically_del_peer *peer_list; + qdf_list_t *logical_del_peerlist; + bool lock_released = false; + + logical_del_peerlist = qdf_mem_malloc(sizeof(*logical_del_peerlist)); + if (!logical_del_peerlist) + return NULL; + + qdf_list_create(logical_del_peerlist, vdev_obj->max_peer_count); + + peer = wlan_vdev_peer_list_peek_head(obj_list); + while (peer) { + wlan_peer_obj_lock(peer); + peer_next = wlan_peer_get_next_peer_of_vdev(obj_list, peer); + if (peer->obj_state == WLAN_OBJ_STATE_LOGICALLY_DELETED && + qdf_atomic_read(&peer->peer_objmgr.ref_cnt)) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + wlan_peer_obj_unlock(peer); + lock_released = true; + + peer_list = qdf_mem_malloc(sizeof(*peer_list)); + if (!peer_list) { + wlan_objmgr_peer_release_ref(peer, dbg_id); + WLAN_OBJMGR_BUG(0); + break; + } + + peer_list->peer = peer; + qdf_list_insert_front(logical_del_peerlist, + &peer_list->list); + } + + if (!lock_released) + wlan_peer_obj_unlock(peer); + + peer = peer_next; + lock_released = false; + } + + /* Not found, return NULL */ + if (qdf_list_empty(logical_del_peerlist)) { + qdf_mem_free(logical_del_peerlist); + return NULL; + } + + return logical_del_peerlist; +} + +qdf_list_t *wlan_objmgr_vdev_get_log_del_peer_list( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *peer_list; + qdf_list_t *log_del_peer_list = NULL; + + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + obj_mgr_err("Invalid state vdev:%d state:%d", + wlan_vdev_get_id(vdev), vdev->obj_state); + return NULL; + } + + wlan_vdev_obj_lock(vdev); + if (vdev->vdev_objmgr.wlan_peer_count == 0) { + wlan_vdev_obj_unlock(vdev); + return NULL; + } + + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + peer_list = &vdev->vdev_objmgr.wlan_peer_list; + if (peer_list) { + log_del_peer_list = + wlan_obj_vdev_populate_logically_del_peerlist( + peer_list, &vdev->vdev_objmgr, + dbg_id); + } + + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + wlan_vdev_obj_unlock(vdev); + + return log_del_peer_list; +} + +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_creation( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_vdev_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* + * If component object is already created, delete old + * component object, then invoke creation + */ + if (vdev->vdev_comp_priv_obj[id]) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_unlock(vdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->vdev_create_handler[id]; + arg = g_umac_glb_obj->vdev_create_handler_arg[id]; + if (handler) + vdev->obj_status[id] = handler(vdev, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (vdev->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + /* Move PDEV object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + vdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + return obj_status; +} + +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_deletion( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_vdev_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* Component object was never created, invalid operation */ + if (!vdev->vdev_comp_priv_obj[id]) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_unlock(vdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->vdev_destroy_handler[id]; + arg = g_umac_glb_obj->vdev_destroy_handler_arg[id]; + if (handler) + vdev->obj_status[id] = handler(vdev, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (vdev->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_vdev_object_status(vdev); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + vdev->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + return obj_status; +} + + + +static void wlan_obj_vdev_peerlist_add_tail(qdf_list_t *obj_list, + struct wlan_objmgr_peer *obj) +{ + qdf_list_insert_back(obj_list, &obj->vdev_peer); +} + +static QDF_STATUS wlan_obj_vdev_peerlist_remove_peer(qdf_list_t *obj_list, + struct wlan_objmgr_peer *peer) +{ + qdf_list_node_t *vdev_node = NULL; + + if (!peer) + return QDF_STATUS_E_FAILURE; + /* get vdev list node element */ + vdev_node = &peer->vdev_peer; + /* list is empty, return failure */ + if (qdf_list_remove_node(obj_list, vdev_node) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_vdev_peer_attach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev_objmgr *objmgr = &vdev->vdev_objmgr; + struct wlan_objmgr_pdev *pdev; + enum QDF_OPMODE opmode; + + wlan_vdev_obj_lock(vdev); + pdev = wlan_vdev_get_pdev(vdev); + /* If Max VDEV peer count exceeds, return failure */ + if (peer->peer_mlme.peer_type != WLAN_PEER_STA_TEMP) { + if (objmgr->wlan_peer_count >= objmgr->max_peer_count) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + } + wlan_vdev_obj_unlock(vdev); + + /* If Max PDEV peer count exceeds, return failure */ + wlan_pdev_obj_lock(pdev); + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) { + if (wlan_pdev_get_temp_peer_count(pdev) >= + WLAN_MAX_PDEV_TEMP_PEERS) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + } else { + if (wlan_pdev_get_peer_count(pdev) >= + wlan_pdev_get_max_peer_count(pdev)) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + } + + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + wlan_pdev_incr_temp_peer_count(wlan_vdev_get_pdev(vdev)); + else + wlan_pdev_incr_peer_count(wlan_vdev_get_pdev(vdev)); + wlan_pdev_obj_unlock(pdev); + + wlan_vdev_obj_lock(vdev); + /* Add peer to vdev's peer list */ + wlan_obj_vdev_peerlist_add_tail(&objmgr->wlan_peer_list, peer); + objmgr->wlan_peer_count++; + + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), + wlan_vdev_mlme_get_macaddr(vdev)) == + QDF_STATUS_SUCCESS) { + /* + * if peer mac address and vdev mac address match, set + * this peer as self peer + */ + wlan_vdev_set_selfpeer(vdev, peer); + opmode = wlan_vdev_mlme_get_opmode(vdev); + /* For AP mode, self peer and BSS peer are same */ + if ((opmode == QDF_SAP_MODE) || + (opmode == QDF_P2P_GO_MODE) || + (opmode == QDF_NDI_MODE)) + wlan_vdev_set_bsspeer(vdev, peer); + } + /* set BSS peer for sta */ + if ((wlan_vdev_mlme_get_opmode(vdev) == QDF_STA_MODE || + wlan_vdev_mlme_get_opmode(vdev) == QDF_P2P_CLIENT_MODE) && + (wlan_peer_get_peer_type(peer) == WLAN_PEER_AP || + wlan_peer_get_peer_type(peer) == WLAN_PEER_P2P_GO)) + wlan_vdev_set_bsspeer(vdev, peer); + + /* Increment vdev ref count to make sure it won't be destroyed before */ + wlan_objmgr_vdev_get_ref(vdev, WLAN_OBJMGR_ID); + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_vdev_peer_detach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev_objmgr *objmgr = &vdev->vdev_objmgr; + struct wlan_objmgr_pdev *pdev; + + wlan_vdev_obj_lock(vdev); + /* if peer count is 0, return failure */ + if (objmgr->wlan_peer_count == 0) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + + if (wlan_vdev_get_selfpeer(vdev) == peer) { + /* + * There might be instances where new node is created + * before deleting existing node, in which case selfpeer + * will be pointing to the new node. So set selfpeer to + * NULL only if vdev->vdev_objmgr.self_peer is pointing + * to the peer processed for deletion + */ + wlan_vdev_set_selfpeer(vdev, NULL); + } + + if (wlan_vdev_get_bsspeer(vdev) == peer) { + /* + * There might be instances where new node is created + * before deleting existing node, in which case bsspeer + * in vdev will be pointing to the new node. So set + * bsspeer to NULL only if vdev->vdev_objmgr.bss_peer is + * pointing to the peer processed for deletion + */ + wlan_vdev_set_bsspeer(vdev, NULL); + } + + /* remove peer from vdev's peer list */ + if (wlan_obj_vdev_peerlist_remove_peer(&objmgr->wlan_peer_list, peer) + == QDF_STATUS_E_FAILURE) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + /* decrement peer count */ + objmgr->wlan_peer_count--; + /* decrement pdev peer count */ + pdev = wlan_vdev_get_pdev(vdev); + wlan_vdev_obj_unlock(vdev); + + wlan_pdev_obj_lock(pdev); + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + wlan_pdev_decr_temp_peer_count(pdev); + else + wlan_pdev_decr_peer_count(pdev); + wlan_pdev_obj_unlock(pdev); + + /* decrement vdev ref count after peer released its reference */ + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_peer *wlan_objmgr_vdev_try_get_bsspeer( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + struct wlan_objmgr_peer *peer; + QDF_STATUS status = QDF_STATUS_E_EMPTY; + + if (!vdev) + return NULL; + + wlan_vdev_obj_lock(vdev); + peer = wlan_vdev_get_bsspeer(vdev); + if (peer) + status = wlan_objmgr_peer_try_get_ref(peer, id); + wlan_vdev_obj_unlock(vdev); + + if (QDF_IS_STATUS_SUCCESS(status)) + return peer; + + return NULL; +} + +void *wlan_objmgr_vdev_get_comp_private_obj( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + void *comp_priv_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (!vdev) { + QDF_BUG(0); + return NULL; + } + + comp_priv_obj = vdev->vdev_comp_priv_obj[id]; + + return comp_priv_obj; +} +qdf_export_symbol(wlan_objmgr_vdev_get_comp_private_obj); + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +static inline void +wlan_objmgr_vdev_ref_trace(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + struct wlan_objmgr_trace *trace; + + trace = &vdev->vdev_objmgr.trace; + + if (func) + wlan_objmgr_trace_ref(&trace->references[id].head, + trace, func, line); +} + +static inline void +wlan_objmgr_vdev_deref_trace(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + struct wlan_objmgr_trace *trace; + + trace = &vdev->vdev_objmgr.trace; + + if (func) + wlan_objmgr_trace_ref(&trace->dereferences[id].head, + trace, func, line); +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_vdev_get_ref_debug(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + if (!vdev) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&vdev->vdev_objmgr.ref_cnt); + qdf_atomic_inc(&vdev->vdev_objmgr.ref_id_dbg[id]); + + wlan_objmgr_vdev_ref_trace(vdev, id, func, line); + return; +} + +qdf_export_symbol(wlan_objmgr_vdev_get_ref_debug); +#else +void wlan_objmgr_vdev_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + if (!vdev) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&vdev->vdev_objmgr.ref_cnt); + qdf_atomic_inc(&vdev->vdev_objmgr.ref_id_dbg[id]); +} + +qdf_export_symbol(wlan_objmgr_vdev_get_ref); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +QDF_STATUS wlan_objmgr_vdev_try_get_ref_debug(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_vdev_obj_lock(vdev); + vdev_id = wlan_vdev_get_id(vdev); + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_vdev_obj_unlock(vdev); + if (vdev->vdev_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] vdev(%d) is not in Created state(%d)", + id, vdev_id, vdev->obj_state); + + return QDF_STATUS_E_RESOURCES; + } + + /* Increment ref count */ + wlan_objmgr_vdev_get_ref_debug(vdev, id, func, line); + wlan_vdev_obj_unlock(vdev); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_objmgr_vdev_try_get_ref_debug); +#else +QDF_STATUS wlan_objmgr_vdev_try_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_vdev_obj_lock(vdev); + vdev_id = wlan_vdev_get_id(vdev); + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_vdev_obj_unlock(vdev); + if (vdev->vdev_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] vdev(%d) is not in Created state(%d)", + id, vdev_id, vdev->obj_state); + + return QDF_STATUS_E_RESOURCES; + } + + /* Increment ref count */ + wlan_objmgr_vdev_get_ref(vdev, id); + wlan_vdev_obj_unlock(vdev); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_objmgr_vdev_try_get_ref); +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_vdev_get_next_active_vdev_of_pdev_debug( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev_next; + qdf_list_node_t *node = &vdev->vdev_node; + qdf_list_node_t *prev_node = NULL; + + if (!node) + return NULL; + + wlan_pdev_obj_lock(pdev); + prev_node = node; + while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS) { + vdev_next = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref_debug(vdev_next, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev_next; + } + + prev_node = node; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#else +struct wlan_objmgr_vdev *wlan_vdev_get_next_active_vdev_of_pdev( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev_next; + qdf_list_node_t *node = &vdev->vdev_node; + qdf_list_node_t *prev_node = NULL; + + if (!node) + return NULL; + + wlan_pdev_obj_lock(pdev); + prev_node = node; + while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS) { + vdev_next = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref(vdev_next, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev_next; + } + + prev_node = node; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_active_head_debug( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *node = NULL; + qdf_list_node_t *prev_node = NULL; + + wlan_pdev_obj_lock(pdev); + + if (qdf_list_peek_front(vdev_list, &node) != QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return NULL; + } + + do { + vdev = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref_debug(vdev, dbg_id, + func, line) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + + prev_node = node; + } while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS); + + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#else +struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_active_head( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *node = NULL; + qdf_list_node_t *prev_node = NULL; + + wlan_pdev_obj_lock(pdev); + + if (qdf_list_peek_front(vdev_list, &node) != QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return NULL; + } + + do { + vdev = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + + prev_node = node; + } while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS); + + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +struct wlan_objmgr_vdev *wlan_pdev_peek_active_first_vdev_debug( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id, + const char *func, int line) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list; + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + + return wlan_pdev_vdev_list_peek_active_head_debug(pdev, vdev_list, + dbg_id, func, line); +} +#else +struct wlan_objmgr_vdev *wlan_pdev_peek_active_first_vdev( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list; + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + + return wlan_pdev_vdev_list_peek_active_head(pdev, vdev_list, + dbg_id); +} +#endif + +#ifdef WLAN_OBJMGR_REF_ID_TRACE +void wlan_objmgr_vdev_release_ref_debug(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id, + const char *func, int line) +{ + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + + vdev_id = wlan_vdev_get_id(vdev); + + if (!qdf_atomic_read(&vdev->vdev_objmgr.ref_id_dbg[id])) { + obj_mgr_alert("vdev (id:%d)ref cnt was not taken by %d", + vdev_id, id); + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + return; + } + + if (!qdf_atomic_read(&vdev->vdev_objmgr.ref_cnt)) { + obj_mgr_alert("vdev ref cnt is 0"); + WLAN_OBJMGR_BUG(0); + return; + } + qdf_atomic_dec(&vdev->vdev_objmgr.ref_id_dbg[id]); + wlan_objmgr_vdev_deref_trace(vdev, id, func, line); + + /* Decrement ref count, free vdev, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&vdev->vdev_objmgr.ref_cnt)) + wlan_objmgr_vdev_obj_destroy(vdev); +} + +qdf_export_symbol(wlan_objmgr_vdev_release_ref_debug); +#else +void wlan_objmgr_vdev_release_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t vdev_id; + + if (!vdev) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + + vdev_id = wlan_vdev_get_id(vdev); + + if (!qdf_atomic_read(&vdev->vdev_objmgr.ref_id_dbg[id])) { + obj_mgr_alert("vdev (id:%d)ref cnt was not taken by %d", + vdev_id, id); + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + return; + } + + if (!qdf_atomic_read(&vdev->vdev_objmgr.ref_cnt)) { + obj_mgr_alert("vdev ref cnt is 0"); + WLAN_OBJMGR_BUG(0); + return; + } + qdf_atomic_dec(&vdev->vdev_objmgr.ref_id_dbg[id]); + + /* Decrement ref count, free vdev, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&vdev->vdev_objmgr.ref_cnt)) + wlan_objmgr_vdev_obj_destroy(vdev); +} + +qdf_export_symbol(wlan_objmgr_vdev_release_ref); +#endif + +#ifdef WLAN_OBJMGR_DEBUG +void wlan_print_vdev_info(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_vdev_objmgr *vdev_objmgr; + uint32_t ref_cnt; + + vdev_objmgr = &vdev->vdev_objmgr; + + ref_cnt = qdf_atomic_read(&vdev_objmgr->ref_cnt); + + obj_mgr_debug("vdev: %pK", vdev); + obj_mgr_debug("vdev_id: %d", vdev_objmgr->vdev_id); + obj_mgr_debug("print_cnt: %d", vdev_objmgr->print_cnt); + obj_mgr_debug("wlan_pdev: %pK", vdev_objmgr->wlan_pdev); + obj_mgr_debug("ref_cnt: %d", ref_cnt); +} + +qdf_export_symbol(wlan_print_vdev_info); +#endif + +void wlan_objmgr_vdev_peer_freed_notify(struct wlan_objmgr_vdev *vdev) +{ + wlan_objmgr_vdev_peer_free_notify_handler stat_handler; + uint8_t i; + + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + stat_handler = g_umac_glb_obj->vdev_peer_free_notify_handler[i]; + if (stat_handler) + stat_handler(vdev); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..460d79f3283b238434782ad04063cf7721c5b0b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj_i.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on VDEV objects + */ +#ifndef _WLAN_OBJMGR_VDEV_OBJ_I_H_ +#define _WLAN_OBJMGR_VDEV_OBJ_I_H_ + +/** + * wlan_objmgr_for_each_vdev_peer() - iterate over each peer for @vdev + * @vdev: the vdev whose peers should be iterated + * @peer: peer object cursor + * + * Note: The caller is responsible for grabbing @vdev's object lock before + * using this iterator + */ +#define wlan_objmgr_for_each_vdev_peer(vdev, peer) \ + qdf_list_for_each(&(vdev)->vdev_objmgr.wlan_peer_list, peer, vdev_peer) + +/** + * wlan_objmgr_vdev_peer_attach() - attach peer to vdev peer list + * @vdev: VDEV object + * @peer: PEER object + * + * Attaches PEER to VDEV, stores it in VDEV's peer list + * + * Return: SUCCESS + * Failure (Max PEERs are exceeded) + */ +QDF_STATUS wlan_objmgr_vdev_peer_attach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +/** + * wlan_objmgr_vdev_peer_detach() - detach peer from vdev peer list + * @vdev: VDEV object + * @peer: PEER object + * + * detaches PEER from VDEV's peer list + * + * Return: SUCCESS + * Failure (No PEERs are present) + */ +QDF_STATUS wlan_objmgr_vdev_peer_detach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +#endif /* _WLAN_OBJMGR_VDEV_OBJ_I_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2d33bd0beb015427ae1ac696c8f988bdc554b029 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_api.h + * This file provides prototypes of the routines needed for the + * external components to utilize the services provided by the + * serialization component. + */ + +/* Include files */ +#ifndef __WLAN_SERIALIZATION_API_H +#define __WLAN_SERIALIZATION_API_H + +#include +#include + +/* Preprocessor Definitions and Constants */ + +/** + * enum ser_queue_reason- reason for changes to serialization queue + * @: SER_REQUEST: queue updated for serialization request + * @: SER_REMOVE : queue updated for serialization remove request + * @: SER_CANCEL : queue updated for serialization cancel request + * @: SER_TIMEOUT : queue updated for command timeout + * @: SER_ACTIVATION_FAILED : queue updated since command activation failed + * @: SER_PENDING_TO_ACTIVE : queue updated for pending to active movement + */ +enum ser_queue_reason { + SER_REQUEST, + SER_REMOVE, + SER_CANCEL, + SER_TIMEOUT, + SER_ACTIVATION_FAILED, + SER_PENDING_TO_ACTIVE, + SER_QUEUE_ACTION_MAX, +}; + +/* + * struct wlan_serialization_queued_cmd_info member queue_type specifies the + * below values to cancel the commands in these queues. Setting both the + * bits will cancel the commands in both the queues. + */ +#define WLAN_SERIALIZATION_ACTIVE_QUEUE 0x1 +#define WLAN_SERIALIZATION_PENDING_QUEUE 0x2 + +/** + * enum wlan_serialization_cb_reason - reason for calling the callback + * @WLAN_SERIALIZATION_REASON_ACTIVATE_CMD: activate the cmd by sending it to FW + * @WLAN_SERIALIZATION_REASON_CANCEL_CMD: Cancel the cmd in the pending list + * @WLAN_SERIALIZATION_REASON_RELEASE_MEM_CMD:cmd execution complete. Release + * the memory allocated while + * building the command + * @WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: active cmd has been timeout. + */ +enum wlan_serialization_cb_reason { + WLAN_SER_CB_ACTIVATE_CMD, + WLAN_SER_CB_CANCEL_CMD, + WLAN_SER_CB_RELEASE_MEM_CMD, + WLAN_SER_CB_ACTIVE_CMD_TIMEOUT, +}; + +/** + * struct wlan_serialization_scan_info - Information needed for scan cmd + * @is_cac_in_progress: boolean to check the cac status + * @is_tdls_in_progress: boolean to check the tdls status + * @is_mlme_op_in_progress: boolean to check the mlme op status + * + * This information is needed for scan command from other components + * to apply the rules and check whether the cmd is allowed or not + */ +struct wlan_serialization_scan_info { + bool is_cac_in_progress; + bool is_tdls_in_progress; + bool is_mlme_op_in_progress; +}; + +/** + * union wlan_serialization_rules_info - union of all rules info structures + * @scan_info: information needed to apply rules on scan command + */ +union wlan_serialization_rules_info { + struct wlan_serialization_scan_info scan_info; +}; + +struct wlan_serialization_command; + +/** + * wlan_serialization_cmd_callback() - Callback registered by the component + * @wlan_cmd: Command passed by the component for serialization + * @reason: Reason code for which the callback is being called + * + * Reason specifies the reason for which the callback is being called. callback + * should return success or failure based up on overall success of callback. + * if callback returns failure then serialization will remove the command from + * active queue and proceed for next pending command. + * + * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_FAILURE + */ +typedef QDF_STATUS +(*wlan_serialization_cmd_callback)(struct wlan_serialization_command *wlan_cmd, + enum wlan_serialization_cb_reason reason); + +/** + * wlan_serialization_comp_info_cb() - callback to fill the rules information + * @vdev: VDEV object for which the command has been received + * @comp_info: Information filled by the component + * + * This callback is registered dynamically by the component with the + * serialization component. Serialization component invokes the callback + * while applying the rules for a particular command and the component + * fills in the required information to apply the rules + * + * Return: None + */ +typedef void (*wlan_serialization_comp_info_cb)(struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info); + +/** + * wlan_serialization_apply_rules_cb() - callback per command to apply rules + * @comp_info: information needed to apply the rules + * + * The rules are applied using this callback and decided whether to + * allow or deny the command + * + * Return: true, if rules are successful and cmd can be queued + * false, if rules failed and cmd should not be queued + */ +typedef bool (*wlan_serialization_apply_rules_cb)( + union wlan_serialization_rules_info *comp_info, + uint8_t comp_id); + +/** + * wlan_ser_umac_cmd_cb() - callback to validate umac_cmd + * @umac_cmd: umac data associated with the serialization cmd + * + * This callback can be called at run time for a command in active queue to + * fetch the required information from the umac cmd data stored in serialization + * command buffer. + * + * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_FAILURE + */ +typedef QDF_STATUS (*wlan_ser_umac_cmd_cb)(void *umac_cmd); + +/** + * enum wlan_umac_cmd_id - Command Type + * @WLAN_SER_CMD_SCAN: Scan command + * @WLAN_SER_CMD_NONSCAN: Non-scan command + * @WLAN_SER_CMD_HDD_ISSUE_REASSOC_SAME_AP: HDD Reassoc cmd + * @WLAN_SER_CMD_SME_ISSUE_REASSOC_SAME_AP: SME Reassoc cmd + * @WLAN_SER_CMD_SME_ISSUE_DISASSOC_FOR_HANDOFF: SME Disassoc cmd + * @WLAN_SER_CMD_SME_ISSUE_ASSOC_TO_SIMILAR_AP: SME Assoc cmd + * @WLAN_SER_CMD_FORCE_IBSS_LEAVE: IBSS leave AP cmd + * @WLAN_SER_CMD_SME_ISSUE_FT_REASSOC: SME reassoc cmd + * @WLAN_SER_CMD_FORCE_DISASSOC_STA: Force diassoc for STA vap + * @WLAN_SER_CMD_FORCE_DEAUTH_STA: Force deauth for STA vap + * @WLAN_SER_CMD_PERFORM_PRE_AUTH: Pre auth ops cmd + * @WLAN_SER_CMD_WM_STATUS_CHANGE: WM status modification cmd + * @WLAN_SER_CMD_NDP_INIT_REQ: NDP init request cmd + * @WLAN_SER_CMD_NDP_RESP_REQ: NDP response to request cmd + * @WLAN_SER_CMD_NDP_DATA_END_INIT_REQ: NDP data end init request + * @WLAN_SER_CMD_NDP_END_ALL_REQ: NDP close all request + * @WLAN_SER_CMD_ADDTS: ADD Ts cmd + * @WLAN_SER_CMD_DELTS: Del Ts cmd + * @WLAN_SER_CMD_TDLS_SEND_MGMT: TDLS mgmt send cmd + * @WLAN_SER_CMD_TDLS_ADD_PEER: TDLS cmd to add peer + * @WLAN_SER_CMD_TDLS_DEL_PEER: TDLS cmd to del peer + * @WLAN_SER_CMD_SET_HW_MODE: Cmd to set hardware mode change + * @WLAN_SER_CMD_NSS_UPDATE: Cmd to update NSS config + * @WLAN_SER_CMD_SET_DUAL_MAC_CONFIG: Cmd to set dual mac + * @WLAN_SER_CMD_SET_ANTENNA_MODE: Set antenna mode + * @WLAN_SER_CMD_VDEV_DELETE: Cmd to del vdev + * @WLAN_SER_CMD_VDEV_START_BSS: Cmd to start a AP VDEV + * @WLAN_SER_CMD_VDEV_STOP_BSS: Cmd to stop a AP VDEV + * @WLAN_SER_CMD_VDEV_CONNECT: Cmd to start a STA VDEV + * @WLAN_SER_CMD_VDEV_DISCONNECT: Cmd to stop a STA VDEV + * @WLAN_SER_CMD_VDEV_RESTART: Cmd to restart a VDEV + * @WLAN_SER_CMD_PDEV_RESTART: Cmd to restart all VDEVs of a PDEV + * @WLAN_SER_CMD_PDEV_CSA_RESTART: Cmd to CSA restart all AP VDEVs of a PDEV + * @WLAN_SER_CMD_GET_DISCONNECT_STATS: Cmd to get peer stats on disconnection + */ +enum wlan_serialization_cmd_type { + /* all scan command before non-scan */ + WLAN_SER_CMD_SCAN, + /* all non-scan command below */ + WLAN_SER_CMD_NONSCAN, + WLAN_SER_CMD_HDD_ISSUE_REASSOC_SAME_AP, + WLAN_SER_CMD_SME_ISSUE_REASSOC_SAME_AP, + WLAN_SER_CMD_SME_ISSUE_DISASSOC_FOR_HANDOFF, + WLAN_SER_CMD_SME_ISSUE_ASSOC_TO_SIMILAR_AP, + WLAN_SER_CMD_FORCE_IBSS_LEAVE, + WLAN_SER_CMD_SME_ISSUE_FT_REASSOC, + WLAN_SER_CMD_FORCE_DISASSOC_STA, + WLAN_SER_CMD_FORCE_DEAUTH_STA, + WLAN_SER_CMD_PERFORM_PRE_AUTH, + WLAN_SER_CMD_WM_STATUS_CHANGE, + WLAN_SER_CMD_NDP_INIT_REQ, + WLAN_SER_CMD_NDP_RESP_REQ, + WLAN_SER_CMD_NDP_DATA_END_INIT_REQ, + WLAN_SER_CMD_NDP_END_ALL_REQ, + WLAN_SER_CMD_ADDTS, + WLAN_SER_CMD_DELTS, + WLAN_SER_CMD_TDLS_SEND_MGMT, + WLAN_SER_CMD_TDLS_ADD_PEER, + WLAN_SER_CMD_TDLS_DEL_PEER, + WLAN_SER_CMD_SET_HW_MODE, + WLAN_SER_CMD_NSS_UPDATE, + WLAN_SER_CMD_SET_DUAL_MAC_CONFIG, + WLAN_SER_CMD_SET_ANTENNA_MODE, + WLAN_SER_CMD_VDEV_DELETE, + WLAN_SER_CMD_VDEV_START_BSS, + WLAN_SER_CMD_VDEV_STOP_BSS, + WLAN_SER_CMD_VDEV_CONNECT, + WLAN_SER_CMD_VDEV_DISCONNECT, + WLAN_SER_CMD_VDEV_RESTART, + WLAN_SER_CMD_PDEV_RESTART, + WLAN_SER_CMD_PDEV_CSA_RESTART, + WLAN_SER_CMD_GET_DISCONNECT_STATS, + WLAN_SER_CMD_MAX +}; + +/** + * enum wlan_serialization_cancel_type - Type of commands to be cancelled + * @WLAN_SER_CANCEL_SINGLE_SCAN: Cancel a single scan with a given ID + * @WLAN_SER_CANCEL_PDEV_SCANS: Cancel all the scans on a given pdev + * @WLAN_SER_CANCEL_VDEV_SCANS: Cancel all the scans on given vdev + * @WLAN_SER_CANCEL_VDEV_HOST_SCANS: Cancel all host scans on given vdev + * @WLAN_SER_CANCEL_PDEV_NON_SCAN_CMD: Cancel all non scans on a given pdev + * @WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD: Cancel all non scans on a given vdev + * @WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE: Cancel all non scans on a given vdev + * and matching cmd type + * @WLAN_SER_CANCEL_VDEV_NON_SCAN_NB_CMD: Cancel all non-blocking, + * non-scan commands of a given vdev + * @WLAN_SER_CANCEL_NON_SCAN_CMD: Cancel the given non scan command + */ +enum wlan_serialization_cancel_type { + WLAN_SER_CANCEL_SINGLE_SCAN, + WLAN_SER_CANCEL_PDEV_SCANS, + WLAN_SER_CANCEL_VDEV_SCANS, + WLAN_SER_CANCEL_VDEV_HOST_SCANS, + WLAN_SER_CANCEL_PDEV_NON_SCAN_CMD, + WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD, + WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE, + WLAN_SER_CANCEL_VDEV_NON_SCAN_NB_CMD, + WLAN_SER_CANCEL_NON_SCAN_CMD, + WLAN_SER_CANCEL_MAX, +}; + +/** + * enum wlan_serialization_status - Return status of cmd serialization request + * @WLAN_SER_CMD_PENDING: Command is put into the pending queue + * @WLAN_SER_CMD_ACTIVE: Command is activated and put in active queue + * @WLAN_SER_CMD_DENIED_RULES_FAILED: Command denied as the rules fail + * @WLAN_SER_CMD_DENIED_LIST_FULL: Command denied as the pending list is full + * @WLAN_SER_CMD_QUEUE_DISABLED: Command denied as the queue is disabled + * @WLAN_SER_CMD_ALREADY_EXISTS: Command already exists in the queue + * @WLAN_SER_CMD_DENIED_UNSPECIFIED: Command denied due to unknown reason + */ +enum wlan_serialization_status { + WLAN_SER_CMD_PENDING, + WLAN_SER_CMD_ACTIVE, + WLAN_SER_CMD_DENIED_RULES_FAILED, + WLAN_SER_CMD_DENIED_LIST_FULL, + WLAN_SER_CMD_QUEUE_DISABLED, + WLAN_SER_CMD_ALREADY_EXISTS, + WLAN_SER_CMD_DENIED_UNSPECIFIED, +}; + +/** + * enum wlan_serialization_cmd_status - Return status for a cancel request + * @WLAN_SER_CMD_IN_PENDING_LIST: Command cancelled from pending list + * @WLAN_SER_CMD_IN_ACTIVE_LIST: Command cancelled from active list + * @WLAN_SER_CMDS_IN_ALL_LISTS: Command cancelled from all lists + * @WLAN_SER_CMD_NOT_FOUND: Specified command to be cancelled + * not found in the lists + */ +enum wlan_serialization_cmd_status { + WLAN_SER_CMD_IN_PENDING_LIST, + WLAN_SER_CMD_IN_ACTIVE_LIST, + WLAN_SER_CMDS_IN_ALL_LISTS, + WLAN_SER_CMD_MARKED_FOR_ACTIVATION, + WLAN_SER_CMD_NOT_FOUND, +}; + +/** + * enum wlan_ser_cmd_attr - Serialization cmd attribute + * @WLAN_SER_CMD_ATTR_NONE - No attribuate associated + * @WLAN_SER_CMD_ATTR_BLOCK - Blocking attribute + * @WLAN_SER_CMD_ATTR_NONBLOCK - Non-blocking attribute + */ +enum wlan_ser_cmd_attr { + WLAN_SER_CMD_ATTR_NONE, + WLAN_SER_CMD_ATTR_BLOCK, + WLAN_SER_CMD_ATTR_NONBLOCK, +}; + +/** + * struct wlan_serialization_command - Command to be serialized + * @wlan_serialization_cmd_type: Type of command + * @cmd_id: Command Identifier + * @cmd_cb: Command callback + * @source: component ID of the source of the command + * @is_high_priority: Normal/High Priority at which the cmd has to be queued + * @is_blocking: Is the command blocking + * @queue_disable: Should the command disable the queues + * @activation_reason: reason the activation cb was called + * @cmd_timeout_cb: Command timeout callback + * @cmd_timeout_duration: Timeout duration in milliseconds + * @vdev: VDEV object associated to the command + * @umac_cmd: Actual command that needs to be sent to WMI/firmware + * + * Note: Unnamed union has been used in this structure, so that in future if + * somebody wants to add pdev or psoc structure then that person can add without + * modifying existing code. + */ +struct wlan_serialization_command { + enum wlan_serialization_cmd_type cmd_type; + uint32_t cmd_id; + wlan_serialization_cmd_callback cmd_cb; + enum wlan_umac_comp_id source; + uint8_t is_high_priority:1, + is_blocking:1, + queue_disable:1, + activation_reason:3; + uint32_t cmd_timeout_duration; + union { + struct wlan_objmgr_vdev *vdev; + }; + void *umac_cmd; +}; + +/** + * struct wlan_serialization_queued_cmd_info - cmd that has to be cancelled + * @requestor: component ID of the source requesting this action + * @cmd_type: Command type + * @cmd_id: Command ID + * @req_type: Commands that need to be cancelled + * @vdev: VDEV object associated to the command + * @queue_type: Queues from which the command to be cancelled + */ +struct wlan_serialization_queued_cmd_info { + enum wlan_umac_comp_id requestor; + enum wlan_serialization_cmd_type cmd_type; + uint32_t cmd_id; + enum wlan_serialization_cancel_type req_type; + union { + struct wlan_objmgr_vdev *vdev; + }; + uint8_t queue_type; +}; + +/** + * wlan_serialization_cancel_request() - Request to cancel a command + * @req: Request information + * + * This API is used by external components to cancel a command + * that is either in the pending or active queue. Based on the + * req_type, it is decided whether to use pdev or vdev + * object. For all non-scan commands, it will be pdev. + * + * Return: Status specifying the removal of a command from a certain queue + */ +enum wlan_serialization_cmd_status +wlan_serialization_cancel_request( + struct wlan_serialization_queued_cmd_info *req); + +/** + * wlan_serialization_remove_cmd() - Request to release a command + * @cmd: Command information + * + * This API is used to release a command sitting in the active + * queue upon successful completion of the command + * + * Return: None + */ +void wlan_serialization_remove_cmd( + struct wlan_serialization_queued_cmd_info *cmd); + +/** + * wlan_serialization_update_timer() -Update timer for an active command + * @cmd: Command information + * + * Return: Status of the timer update + */ +QDF_STATUS +wlan_serialization_update_timer(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_request() - Request to serialize a command + * @cmd: Command information + * + * Return: Status of the serialization request + */ +enum wlan_serialization_status +wlan_serialization_request(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_register_comp_info_cb() - Register component's info cb + * @psoc: PSOC object information + * @comp_id: Component ID + * @cmd_type: Command Type + * @cb: Callback + * + * This is called from component during its initialization.It initializes + * callback handler for given comp_id/cmd_id in a 2-D array. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_register_comp_info_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_comp_info_cb cb); + +/** + * wlan_serialization_deregister_comp_info_cb() - Deregister component's info + * callback + * @psoc: PSOC object information + * @comp_id: Component ID + * @cmd_type: Command Type + * + * This routine is called from other component during its de-initialization. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_deregister_comp_info_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_register_apply_rules_cb() - Register component's rules + * callback + * @psoc: PSOC object information + * @cmd_type: Command Type + * @cb: Callback + * + * This is called from component during its initialization.It initializes + * callback handler for given cmd_type in a 1-D array. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_register_apply_rules_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_apply_rules_cb apply_rules_cb); + +/** + * wlan_serialization_deregister_apply_rules_cb() - Deregister component's rules + * callback + * @psoc: PSOC object information + * @cmd_type: Command Type + * + * This routine is called from other component during its de-initialization. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_deregister_apply_rules_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type); + +/** + * @wlan_serialization_init() - Serialization component initialization routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_init(void); + +/** + * @wlan_serialization_deinit() - Serialization component de-init routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_deinit(void); + +/** + * @wlan_serialization_psoc_enable() - Serialization component enable routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * @wlan_serialization_psoc_disable() - Serialization component disable routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_vdev_scan_status() - Return the status of the vdev scan + * @vdev: VDEV Object + * + * Return: Status of the scans for the corresponding vdev + */ +enum wlan_serialization_cmd_status +wlan_serialization_vdev_scan_status(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_serialization_pdev_scan_status() - Return the status of the pdev scan + * @pdev: PDEV Object + * + * Return: Status of the scans for the corresponding pdev + */ +enum wlan_serialization_cmd_status +wlan_serialization_pdev_scan_status(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_serialization_non_scan_cmd_status() - Return status of pdev non-scan cmd + * @pdev: PDEV Object + * @cmd_id: ID of the command for which the status has to be checked + * + * Return: Status of the command for the corresponding pdev + */ +enum wlan_serialization_cmd_status +wlan_serialization_non_scan_cmd_status(struct wlan_objmgr_pdev *pdev, + enum wlan_serialization_cmd_type cmd_id); + +/** + * wlan_serialization_is_cmd_present_in_pending_queue() - Return if the command + * is already present in pending queue + * @cmd: pointer to serialization command to check + * + * This API will check if command is present in pending queue. If present + * then return true, so use know that it is duplicated command + * + * Return: true or false + */ +bool wlan_serialization_is_cmd_present_in_pending_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); +/** + * wlan_serialization_is_cmd_present_in_active_queue() - Return if the command + * is already present in active queue + * @cmd: pointer to serialization command to check + * + * This API will check if command is present in active queue. If present + * then return true, so use know that it is duplicated command + * + * Return: true or false + */ +bool wlan_serialization_is_cmd_present_in_active_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_get_scan_cmd_using_scan_id() - Return command which + * matches vdev_id and scan_id + * @psoc: pointer to soc + * @vdev_id: vdev id to pull vdev object + * @scan_id: scan id to match + * @is_scan_cmd_from_active_queue: to indicate active or pending queue + * + * This API fetches vdev/pdev object based on vdev_id, loops through scan + * command queue and find the command which matches scan id as well as vdev + * object. + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_get_scan_cmd_using_scan_id( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint16_t scan_id, + uint8_t is_scan_cmd_from_active_queue); +/** + * wlan_serialization_get_active_cmd() - Return active umac command which + * matches vdev and cmd type + * @psoc: pointer to soc + * @vdev_id: vdev id to pull vdev object + * @cmd_type: cmd type to match + * + * This API fetches vdev/pdev object based on vdev_id, loops through active + * command queue and find the active command which matches cmd_type as well + * as vdev object. + * + * Return: Pointer to umac command. NULL is returned if active command of given + * type is not found. + */ +void *wlan_serialization_get_active_cmd( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_get_vdev_active_cmd_type() - Return cmd type of the + * active command for the given vdev + * @vdev: vdev object + * + * This API fetches command type of the command in the vdev active queue + * + * Return: command type of the command in the vdev active queue + */ + +enum wlan_serialization_cmd_type +wlan_serialization_get_vdev_active_cmd_type(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_ser_get_cmd_activation_status() - Return active command status + * @vdev: vdev object + * + * This API fetches active command state in the vdev active queue + * + * Return: success if CMD_MARKED_FOR_ACTIVATION bit is set, else fail + */ + +QDF_STATUS +wlan_ser_get_cmd_activation_status(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_ser_is_vdev_queue_enabled() - Return vdev queue status + * @vdev: vdev object + * + * This API return vdev queue enable status + * + * Return: true if vdev queue is enabled + */ +bool wlan_ser_is_vdev_queue_enabled(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_ser_validate_umac_cmd() - validate umac cmd data + * @vdev: objmgr vdev pointer + * @cmd_type: cmd type to match + * @umac_cmd_cb: Callback to be called to validate the data + * + * This API returns the validation status of the umac cmd cb. + * The umac_cmd_cb callback is called with serialization lock held, and hence + * only atomic operations are allowed in the callback. + * + * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_FAILURE + */ +QDF_STATUS +wlan_ser_validate_umac_cmd(struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type, + wlan_ser_umac_cmd_cb umac_cmd_cb); + +/** + * wlan_serialization_purge_all_pdev_cmd() - purge all command for given pdev + * @pdev: objmgr pdev pointer + * + * Return: void + */ +void wlan_serialization_purge_all_pdev_cmd(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_serialization_purge_all_cmd() - purge all command for psoc + * @psoc: objmgr psoc pointer + * + * Return: void + */ +void wlan_serialization_purge_all_cmd(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_purge_all_pending_cmd_by_vdev_id() - Purge all pending + * scan and non scan commands for vdev id + * @pdev: pointer to pdev + * @vdev_id: vdev_id variable + * + * Return: none + */ +void wlan_serialization_purge_all_pending_cmd_by_vdev_id( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + +/** + * wlan_serialization_purge_all_scan_cmd_by_vdev_id() - Purge all pending/active + * scan commands for vdev id + * @pdev: pointer to pdev + * @vdev_id: vdev_id variable + * + * Return: none + */ +void wlan_serialization_purge_all_scan_cmd_by_vdev_id( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + +/** + * wlan_ser_vdev_queue_disable -Disable vdev specific serialization queue + * @vdev: Vdev Object + * + * This function disables the serialization for the vdev queue + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_ser_vdev_queue_disable(struct wlan_objmgr_vdev *vdev); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_legacy_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_legacy_api.h new file mode 100644 index 0000000000000000000000000000000000000000..0c48da459aed79ec45969339b4ef357b667165ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_legacy_api.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_legacy_api.h + * This file provides prototypes of the routines needed for the + * legacy mcl serialization to utilize the services provided by the + * serialization component. + */ +#ifndef __WLAN_SERIALIZATION_LEGACY_API_H +#define __WLAN_SERIALIZATION_LEGACY_API_H + +#include "wlan_serialization_api.h" + +/** + * wlan_serialization_peek_head_pending_cmd_using_psoc() - Return command from + * scan or non-scan pending queue based on flag + * @psoc: pointer to psoc + * @is_cmd_from_pending_scan_queue: flag to determine whether command needed + * from scan or non-scan pending queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * pending queue (based on is_cmd_from_pending_scan_queue flag) and fetches + * first pending command from queue + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_peek_head_pending_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue); +/** + * wlan_serialization_peek_head_active_cmd_using_psoc() - Return command from + * scan or non-scan active queue based on flag + * @psoc: pointer to psoc + * @is_cmd_from_active_scan_queue: flag to determine whether command needed + * from scan or non-scan active queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * active queue (based on is_cmd_from_active_scan_queue flag) and fetches + * first active command from queue + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_peek_head_active_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_active_scan_queue); + +/** + * wlan_serialization_get_pending_list_next_node_using_psoc() - Return next + * scan or non-scan pending command from queue + * @psoc: pointer to psoc + * @prev_cmd: previous command given by caller, find next command after this + * @is_cmd_for_pending_scan_queue: to find from scan or non-scan pending queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * pending queue (based on is_cmd_from_pending_scan_queue flag) and fetches + * next pending command after prev_cmd + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_get_pending_list_next_node_using_psoc( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *prev_cmd, + uint8_t is_cmd_for_pending_scan_queue); +/** + * wlan_serialization_get_pending_list_count() - Return pending list count + * @psoc: pointer to soc + * @is_cmd_from_pending_scan_queue: flag to determine whether command needed + * from scan or non-scan pending queue + * + * Get the number of nodes present in pending list + * + * Return: count number of pending commands in queue + */ +uint32_t wlan_serialization_get_pending_list_count( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6cf61b7cf3cbcf9389e39c8aea52154b844b5d6e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_api.c @@ -0,0 +1,920 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_api.c + * This file provides an interface for the external components + * to utilize the services provided by the serialization + * component. + */ + +#include +#include +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_queue_i.h" +#include "wlan_serialization_scan_i.h" +#include "wlan_serialization_internal_i.h" + +bool wlan_serialization_is_cmd_present_in_pending_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + bool status = false; + + if (!cmd) { + ser_err("invalid cmd"); + goto error; + } + + status = wlan_serialization_is_cmd_present_queue(cmd, false); + +error: + return status; +} + +bool wlan_serialization_is_cmd_present_in_active_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + bool status; + + if (!cmd) { + ser_err("invalid cmd"); + status = false; + goto error; + } + + status = wlan_serialization_is_cmd_present_queue(cmd, true); + +error: + return status; +} + +QDF_STATUS +wlan_serialization_register_apply_rules_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_apply_rules_cb cb) +{ + struct wlan_ser_psoc_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmdtype(cmd_type); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("invalid cmd_type %d", cmd_type); + goto error; + } + + ser_soc_obj = wlan_serialization_get_psoc_obj(psoc); + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + + ser_soc_obj->apply_rules_cb[cmd_type] = cb; + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +QDF_STATUS +wlan_serialization_deregister_apply_rules_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_ser_psoc_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmdtype(cmd_type); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("invalid cmd_type %d", cmd_type); + goto error; + } + ser_soc_obj = wlan_serialization_get_psoc_obj(psoc); + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + ser_soc_obj->apply_rules_cb[cmd_type] = NULL; + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +QDF_STATUS +wlan_serialization_register_comp_info_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_comp_info_cb cb) +{ + struct wlan_ser_psoc_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmd(comp_id, cmd_type); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("invalid comp_id %d or cmd_type %d", + comp_id, cmd_type); + goto error; + } + ser_soc_obj = wlan_serialization_get_psoc_obj(psoc); + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + ser_soc_obj->comp_info_cb[cmd_type][comp_id] = cb; + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +QDF_STATUS +wlan_serialization_deregister_comp_info_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_ser_psoc_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmd(comp_id, cmd_type); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("invalid comp_id %d or cmd_type %d", + comp_id, cmd_type); + goto error; + } + ser_soc_obj = wlan_serialization_get_psoc_obj(psoc); + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + ser_soc_obj->comp_info_cb[cmd_type][comp_id] = NULL; + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_non_scan_cmd_status( + struct wlan_objmgr_pdev *pdev, + enum wlan_serialization_cmd_type cmd_type) +{ + bool cmd_in_active = 0; + bool cmd_in_pending = 0; + struct wlan_ser_pdev_obj *ser_pdev_obj = + wlan_serialization_get_pdev_obj(pdev); + enum wlan_serialization_cmd_status cmd_status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_serialization_pdev_queue *pdev_q; + qdf_list_node_t *node = NULL; + qdf_list_t *queue = NULL; + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + + /* Look in the pdev non scan active queue */ + queue = &pdev_q->active_list; + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_CMD_TYPE, + NULL, cmd_type, NULL, NULL, WLAN_SER_PDEV_NODE); + + if (node) + cmd_in_active = true; + + node = NULL; + + /* Look in the pdev non scan pending queue */ + queue = &pdev_q->pending_list; + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_CMD_TYPE, + NULL, cmd_type, NULL, NULL, WLAN_SER_PDEV_NODE); + + if (node) + cmd_in_pending = true; + + cmd_status = wlan_serialization_is_cmd_in_active_pending( + cmd_in_active, cmd_in_pending); + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + + return cmd_status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_cancel_request( + struct wlan_serialization_queued_cmd_info *req) +{ + QDF_STATUS status; + enum wlan_serialization_cmd_status cmd_status; + + struct wlan_serialization_command cmd; + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_pdev_queue *pdev_queue; + + if (!req) { + ser_err("given request is empty"); + cmd_status = WLAN_SER_CMD_NOT_FOUND; + goto error; + } + + status = wlan_serialization_validate_cmd(req->requestor, req->cmd_type); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("req is not valid"); + cmd_status = WLAN_SER_CMD_NOT_FOUND; + goto error; + } + + cmd.cmd_type = req->cmd_type; + cmd.cmd_id = req->cmd_id; + cmd.source = req->requestor; + cmd.vdev = req->vdev; + + ser_debug("Type %d id %d source %d req type %d queue type %d", + cmd.cmd_type, cmd.cmd_id, cmd.source, req->req_type, + req->queue_type); + pdev = wlan_serialization_get_pdev_from_cmd(&cmd); + if (!pdev) { + ser_err("pdev is invalid"); + cmd_status = WLAN_SER_CMD_NOT_FOUND; + goto error; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + + pdev_queue = wlan_serialization_get_pdev_queue_obj(ser_pdev_obj, + cmd.cmd_type); + + if (!pdev_queue) { + ser_err("pdev_queue is invalid"); + cmd_status = WLAN_SER_CMD_NOT_FOUND; + goto error; + } + + cmd_status = wlan_serialization_find_and_cancel_cmd( + &cmd, req->req_type, req->queue_type); + +error: + + return cmd_status; +} + +void wlan_serialization_remove_cmd( + struct wlan_serialization_queued_cmd_info *cmd_info) +{ + QDF_STATUS status; + enum wlan_serialization_cmd_status ser_status; + struct wlan_serialization_command cmd = {0}; + + if (!cmd_info) { + ser_err("given request is empty"); + QDF_ASSERT(0); + return; + } + status = wlan_serialization_validate_cmd(cmd_info->requestor, + cmd_info->cmd_type); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("cmd type %d is not valid", cmd_info->cmd_type); + QDF_ASSERT(0); + return; + } + + cmd.cmd_type = cmd_info->cmd_type; + cmd.cmd_id = cmd_info->cmd_id; + cmd.source = cmd_info->requestor; + cmd.vdev = cmd_info->vdev; + + ser_status = wlan_serialization_dequeue_cmd( + &cmd, SER_REMOVE, true); + + if (ser_status != WLAN_SER_CMD_IN_ACTIVE_LIST) { + if (ser_status != WLAN_SER_CMD_MARKED_FOR_ACTIVATION) + ser_debug("Can't dequeue requested id %d type %d requestor %d", + cmd.cmd_id, cmd.cmd_type, + cmd_info->requestor); + } +} + +enum wlan_serialization_status +wlan_serialization_request(struct wlan_serialization_command *cmd) +{ + QDF_STATUS status; + enum wlan_serialization_status serialization_status; + uint8_t comp_id; + struct wlan_ser_psoc_obj *ser_soc_obj; + union wlan_serialization_rules_info info; + struct wlan_objmgr_psoc *psoc; + + serialization_status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + + if (!cmd) { + ser_err("serialization cmd is null"); + goto error; + } + status = wlan_serialization_validate_cmd(cmd->source, cmd->cmd_type); + if (QDF_IS_STATUS_ERROR(status)) + goto error; + + psoc = wlan_serialization_get_psoc_from_cmd(cmd); + if (!psoc) { + ser_err("psoc _obj is invalid"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + ser_soc_obj = wlan_serialization_get_psoc_obj(psoc); + + if (!ser_soc_obj) { + ser_err("ser_soc_obj is invalid"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + /* + * Get Component Info callback by calling + * each registered module + */ + for (comp_id = 0; comp_id < WLAN_UMAC_COMP_ID_MAX; comp_id++) { + if (!ser_soc_obj->comp_info_cb[cmd->cmd_type][comp_id]) + continue; + ser_soc_obj->comp_info_cb[cmd->cmd_type][comp_id](cmd->vdev, + &info); + if (!ser_soc_obj->apply_rules_cb[cmd->cmd_type]) + continue; + if (!ser_soc_obj->apply_rules_cb[cmd->cmd_type](&info, comp_id)) + return WLAN_SER_CMD_DENIED_RULES_FAILED; + } + + serialization_status = wlan_serialization_enqueue_cmd(cmd, SER_REQUEST); + +error: + return serialization_status; +} + +QDF_STATUS +wlan_serialization_update_timer(struct wlan_serialization_command *cmd) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + + if (!cmd) { + ser_err("NULL command"); + goto error; + } + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + ser_err("invalid pdev"); + goto error; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + ser_err("invalid psoc"); + goto error; + } + + status = wlan_serialization_find_and_update_timer(psoc, cmd); + +error: + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_vdev_scan_status(struct wlan_objmgr_vdev *vdev) +{ + bool cmd_in_active = 0, cmd_in_pending = 0; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_ser_pdev_obj *ser_pdev_obj = + wlan_serialization_get_pdev_obj(pdev); + struct wlan_serialization_pdev_queue *pdev_q; + enum wlan_serialization_cmd_status status; + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + cmd_in_active = + wlan_serialization_is_cmd_in_vdev_list( + vdev, &pdev_q->active_list, WLAN_SER_PDEV_NODE); + + cmd_in_pending = + wlan_serialization_is_cmd_in_vdev_list( + vdev, &pdev_q->pending_list, WLAN_SER_PDEV_NODE); + + status = wlan_serialization_is_cmd_in_active_pending( + cmd_in_active, cmd_in_pending); + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_pdev_scan_status(struct wlan_objmgr_pdev *pdev) +{ + bool cmd_in_active, cmd_in_pending; + struct wlan_ser_pdev_obj *ser_pdev_obj = + wlan_serialization_get_pdev_obj(pdev); + struct wlan_serialization_pdev_queue *pdev_q; + enum wlan_serialization_cmd_status status; + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + cmd_in_active = !qdf_list_empty(&pdev_q->active_list); + cmd_in_pending = !qdf_list_empty(&pdev_q->pending_list); + + status = wlan_serialization_is_cmd_in_active_pending( + cmd_in_active, cmd_in_pending); + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + + return status; +} + +struct wlan_serialization_command* +wlan_serialization_get_scan_cmd_using_scan_id( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint16_t scan_id, + uint8_t is_scan_cmd_from_active_queue) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_command cmd = {0}; + struct wlan_serialization_command *pcmd = NULL; + struct wlan_serialization_command_list *cmd_list; + qdf_list_node_t *node = NULL; + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_q; + + if (!psoc) { + ser_err("invalid psoc"); + goto error; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + ser_err("invalid vdev"); + goto error; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + ser_err("invalid pdev"); + goto release_vdev_ref; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + goto release_vdev_ref; + } + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + if (is_scan_cmd_from_active_queue) + queue = &pdev_q->active_list; + else + queue = &pdev_q->pending_list; + + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = scan_id; + cmd.vdev = vdev; + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_CMD_ID_VDEV, + &cmd, 0, NULL, vdev, WLAN_SER_PDEV_NODE); + + if (node) { + cmd_list = qdf_container_of( + node, + struct wlan_serialization_command_list, + pdev_node); + + pcmd = &cmd_list->cmd; + } + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + +release_vdev_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); +error: + return pcmd; +} + +void *wlan_serialization_get_active_cmd( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + void *umac_cmd = NULL; + qdf_list_node_t *node = NULL; + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_q; + + if (!psoc) { + ser_err("invalid psoc"); + goto error; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + ser_err("invalid vdev"); + goto error; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + ser_err("invalid pdev"); + goto release_vdev_ref; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + goto release_vdev_ref; + } + + pdev_q = wlan_serialization_get_pdev_queue_obj(ser_pdev_obj, cmd_type); + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + queue = &pdev_q->active_list; + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_CMD_TYPE_VDEV, + NULL, cmd_type, NULL, vdev, WLAN_SER_PDEV_NODE); + + if (node) { + cmd_list = qdf_container_of( + node, + struct wlan_serialization_command_list, + pdev_node); + + umac_cmd = cmd_list->cmd.umac_cmd; + } + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + +release_vdev_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); +error: + + return umac_cmd; +} + +enum wlan_serialization_cmd_type +wlan_serialization_get_vdev_active_cmd_type(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_serialization_cmd_type cmd_type = WLAN_SER_CMD_MAX; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_ser_vdev_obj *ser_vdev_obj; + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_serialization_vdev_queue *vdev_queue; + struct wlan_serialization_command_list *cmd_list = NULL; + qdf_list_node_t *node; + + ser_pdev_obj = wlan_serialization_get_pdev_obj( + wlan_vdev_get_pdev(vdev)); + + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + goto error; + } + pdev_queue = wlan_serialization_get_pdev_queue_obj( + ser_pdev_obj, cmd_type); + + ser_vdev_obj = wlan_serialization_get_vdev_obj(vdev); + if (!ser_vdev_obj) { + ser_err("invalid ser_vdev_obj"); + goto error; + } + vdev_queue = wlan_serialization_get_vdev_queue_obj( + ser_vdev_obj, WLAN_SER_CMD_NONSCAN); + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + if (wlan_serialization_peek_front( + &vdev_queue->active_list, &node) == QDF_STATUS_SUCCESS) { + cmd_list = qdf_container_of( + node, + struct wlan_serialization_command_list, + vdev_node); + + cmd_type = cmd_list->cmd.cmd_type; + } + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + +error: + return cmd_type; +} + +bool wlan_ser_is_vdev_queue_enabled(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_ser_vdev_obj *ser_vdev_obj; + struct wlan_serialization_vdev_queue *vdev_queue; + + ser_vdev_obj = wlan_serialization_get_vdev_obj(vdev); + if (!ser_vdev_obj) { + ser_err("invalid ser_vdev_obj"); + return false; + } + + vdev_queue = wlan_serialization_get_vdev_queue_obj( + ser_vdev_obj, WLAN_SER_CMD_NONSCAN); + if (vdev_queue->queue_disable) + return false; + else + return true; +} + +QDF_STATUS +wlan_ser_get_cmd_activation_status(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_ser_vdev_obj *ser_vdev_obj; + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_serialization_vdev_queue *vdev_queue; + struct wlan_serialization_command_list *cmd_list = NULL; + qdf_list_node_t *node; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + ser_pdev_obj = wlan_serialization_get_pdev_obj( + wlan_vdev_get_pdev(vdev)); + + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return QDF_STATUS_E_FAILURE; + } + + pdev_queue = wlan_serialization_get_pdev_queue_obj( + ser_pdev_obj, WLAN_SER_CMD_NONSCAN); + + ser_vdev_obj = wlan_serialization_get_vdev_obj(vdev); + if (!ser_vdev_obj) { + ser_err("invalid ser_vdev_obj"); + return QDF_STATUS_E_FAILURE; + } + vdev_queue = wlan_serialization_get_vdev_queue_obj( + ser_vdev_obj, WLAN_SER_CMD_NONSCAN); + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + if (wlan_serialization_peek_front( + &vdev_queue->active_list, &node) == QDF_STATUS_SUCCESS) { + cmd_list = qdf_container_of( + node, + struct wlan_serialization_command_list, + vdev_node); + + if (qdf_atomic_test_bit(CMD_MARKED_FOR_ACTIVATION, + &cmd_list->cmd_in_use)) + status = QDF_STATUS_SUCCESS; + } + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + return status; +} + +QDF_STATUS +wlan_ser_validate_umac_cmd(struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type, + wlan_ser_umac_cmd_cb umac_cmd_cb) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + void *umac_cmd = NULL; + qdf_list_node_t *node = NULL; + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_q; + QDF_STATUS status = QDF_STATUS_E_INVAL; + + if (!vdev) { + ser_err("invalid vdev"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + ser_err("invalid pdev"); + return QDF_STATUS_E_INVAL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return QDF_STATUS_E_INVAL; + } + + pdev_q = wlan_serialization_get_pdev_queue_obj(ser_pdev_obj, cmd_type); + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + queue = &pdev_q->active_list; + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_CMD_TYPE_VDEV, + NULL, cmd_type, NULL, vdev, WLAN_SER_PDEV_NODE); + if (node) { + cmd_list = qdf_container_of( + node, + struct wlan_serialization_command_list, + pdev_node); + + umac_cmd = cmd_list->cmd.umac_cmd; + status = umac_cmd_cb(umac_cmd); + } + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + + return status; +} + +void wlan_serialization_purge_all_pdev_cmd(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + + if (!pdev) { + ser_err("NULL pdev"); + return; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return; + } + + wlan_ser_cancel_scan_cmd(ser_pdev_obj, pdev, NULL, NULL, + WLAN_SER_CMD_SCAN, false); + wlan_ser_cancel_scan_cmd(ser_pdev_obj, pdev, NULL, NULL, + WLAN_SER_CMD_SCAN, true); + wlan_ser_cancel_non_scan_cmd(ser_pdev_obj, pdev, NULL, NULL, + WLAN_SER_CMD_NONSCAN, false, + WLAN_SER_CMD_ATTR_NONE); + wlan_ser_cancel_non_scan_cmd(ser_pdev_obj, pdev, NULL, NULL, + WLAN_SER_CMD_NONSCAN, true, + WLAN_SER_CMD_ATTR_NONE); +} + +static inline +void wlan_ser_purge_pdev_cmd_cb(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + + wlan_serialization_purge_all_pdev_cmd(pdev); +} + +void wlan_serialization_purge_all_cmd(struct wlan_objmgr_psoc *psoc) +{ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_ser_purge_pdev_cmd_cb, NULL, 1, + WLAN_SERIALIZATION_ID); +} + +void wlan_serialization_purge_all_pending_cmd_by_vdev_id( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + + if (!pdev) { + ser_err("Invalid pdev"); + return; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_pdev(pdev, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + ser_err("Invalid vdev"); + return; + } + + wlan_ser_cancel_scan_cmd(ser_pdev_obj, pdev, vdev, NULL, + WLAN_SER_CMD_SCAN, false); + wlan_ser_cancel_non_scan_cmd(ser_pdev_obj, pdev, vdev, NULL, + WLAN_SER_CMD_NONSCAN, false, + WLAN_SER_CMD_ATTR_NONE); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); +} + +void wlan_serialization_purge_all_scan_cmd_by_vdev_id( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + + if (!pdev) { + ser_err("Invalid pdev"); + return; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_pdev(pdev, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + ser_err("Invalid vdev"); + return; + } + + wlan_ser_cancel_scan_cmd(ser_pdev_obj, pdev, vdev, NULL, + WLAN_SER_CMD_SCAN, false); + wlan_ser_cancel_scan_cmd(ser_pdev_obj, pdev, vdev, NULL, + WLAN_SER_CMD_SCAN, true); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); +} + +QDF_STATUS wlan_ser_vdev_queue_disable(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_vdev_obj *ser_vdev_obj; + struct wlan_serialization_vdev_queue *vdev_queue; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_pdev_queue *pdev_q; + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + ser_err("invalid PDEV object"); + return QDF_STATUS_E_INVAL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return QDF_STATUS_E_INVAL; + } + + ser_vdev_obj = wlan_serialization_get_vdev_obj(vdev); + if (!ser_vdev_obj) { + ser_err("invalid ser_vdev_obj"); + return QDF_STATUS_E_INVAL; + } + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + + vdev_queue = wlan_serialization_get_vdev_queue_obj( + ser_vdev_obj, WLAN_SER_CMD_NONSCAN); + if (!vdev_queue) { + ser_err("invalid vdev_queue object"); + return QDF_STATUS_E_INVAL; + } + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + vdev_queue->queue_disable = true; + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + ser_debug("Disabling the serialization for vdev:%d", + wlan_vdev_get_id(vdev)); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_debug.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..6e43592012c7dde2059d651c0fe9e958d9f4ac82 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_debug.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_debug.c + * This file defines the debug functions for serialization component. + */ + +#include +#include +#include +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_queue_i.h" +#include "wlan_serialization_debug_i.h" + +#ifdef WLAN_SER_DEBUG +const char *ser_reason_string[SER_QUEUE_ACTION_MAX] = { + "REQUEST", + "REMOVE", + "CANCEL", + "TIMEOUT", + "ACTIVATION_FAILED", + "PENDING_TO_ACTIVE", +}; + +static void wlan_ser_print_queues( + qdf_list_t *queue, + enum wlan_serialization_node node_type, + bool is_active_queue) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + uint32_t queuelen; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + qdf_list_node_t *nnode = NULL; + bool is_pdev_queue = false; + + if (node_type == WLAN_SER_PDEV_NODE) + is_pdev_queue = true; + + ser_err_no_fl(WLAN_SER_LINE); + ser_err_no_fl("%s %s Queue", (is_pdev_queue) ? "PDEV" : "VDEV", + (is_active_queue ? "Active" : "Pending")); + + ser_err_no_fl(WLAN_SER_LINE); + ser_err_no_fl("|CMD_TYPE|CMD_ID|BLOCKING|PRIORITY|"); + ser_err_no_fl(WLAN_SER_LINE); + + queuelen = wlan_serialization_list_size(queue); + while (queuelen--) { + status = wlan_serialization_get_cmd_from_queue(queue, &nnode); + if (status != QDF_STATUS_SUCCESS) + break; + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = qdf_container_of( + nnode, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = qdf_container_of( + nnode, + struct wlan_serialization_command_list, + vdev_node); + + ser_err_no_fl("|%8u|%6u|%8u|%8u|", + cmd_list->cmd.cmd_type, + cmd_list->cmd.cmd_id, + cmd_list->cmd.is_blocking, + cmd_list->cmd.is_high_priority); + } +} + +static void wlan_ser_print_pdev_queue( + struct wlan_serialization_pdev_queue *ser_pdev_q_obj, + enum wlan_serialization_node node_type) +{ + /*Dump the active queue*/ + wlan_ser_print_queues(&ser_pdev_q_obj->active_list, + node_type, true); + + /*Dump the pending queue*/ + wlan_ser_print_queues(&ser_pdev_q_obj->pending_list, + node_type, false); +} + +static void wlan_ser_print_vdev_queue( + struct wlan_serialization_vdev_queue *ser_vdev_q_obj, + enum wlan_serialization_node node_type) +{ + /*Dump the active queue*/ + wlan_ser_print_queues(&ser_vdev_q_obj->active_list, + node_type, true); + + /*Dump the pending queue*/ + wlan_ser_print_queues(&ser_vdev_q_obj->pending_list, + node_type, false); +} + +static void wlan_ser_print_all_history( + struct wlan_serialization_pdev_queue *pdev_queue, + bool for_vdev_queue, + uint32_t vdev_id) +{ + uint8_t idx; + uint8_t data_idx; + struct ser_history *history_info; + struct ser_data *data; + + history_info = &pdev_queue->history; + + ser_err_no_fl(WLAN_SER_LINE WLAN_SER_LINE); + ser_err_no_fl("Queue Commands History"); + ser_err_no_fl(WLAN_SER_LINE WLAN_SER_LINE); + ser_err_no_fl(WLAN_SER_HISTORY_HEADER); + ser_err_no_fl(WLAN_SER_LINE WLAN_SER_LINE); + + for (idx = 0; idx < SER_MAX_HISTORY_CMDS; idx++) { + data_idx = (history_info->index + idx) % SER_MAX_HISTORY_CMDS; + + data = &history_info->data[data_idx]; + + if (data->ser_reason >= SER_QUEUE_ACTION_MAX) { + ser_debug("Invalid Serialization Reason"); + continue; + } + + if (!data->data_updated) + continue; + + if (for_vdev_queue) { + if (vdev_id != data->vdev_id) + continue; + } + ser_err_no_fl( + "%8d|%6d|%7d|%8d|%8d|%6s|%7s|%17s|", + data->cmd_type, + data->cmd_id, + data->vdev_id, + data->is_blocking, + data->is_high_priority, + data->add_remove ? "ADD" : "REMOVE", + data->active_pending ? "ACTIVE" : "PENDING", + ser_reason_string[data->ser_reason]); + } +} + +QDF_STATUS wlan_ser_print_history( + struct wlan_objmgr_vdev *vdev, uint8_t val, + uint32_t sub_val) +{ + struct wlan_ser_pdev_obj *ser_pdev; + struct wlan_ser_vdev_obj *ser_vdev; + struct wlan_serialization_pdev_queue *pdev_q; + struct wlan_serialization_vdev_queue *vdev_q; + bool for_vdev_queue = false; + uint32_t vdev_id = WLAN_INVALID_VDEV_ID; + + ser_pdev = wlan_serialization_get_pdev_obj( + wlan_vdev_get_pdev(vdev)); + + ser_vdev = wlan_serialization_get_vdev_obj(vdev); + + switch (val) { + /* + * Print scan pdev queues + */ + case SER_PDEV_QUEUE_COMP_SCAN: + ser_err_no_fl("Serialization SCAN Queues(LIVE)"); + pdev_q = &ser_pdev->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + wlan_ser_print_pdev_queue(pdev_q, WLAN_SER_PDEV_NODE); + break; + /* + * Print non scan queues + */ + case SER_PDEV_QUEUE_COMP_NON_SCAN: + pdev_q = &ser_pdev->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + ser_err_no_fl("Serialization NON SCAN Queues(LIVE)"); + switch (sub_val) { + /* + * Print non scan pdev queues + */ + case SER_PDEV_QUEUE_TYPE: + wlan_ser_print_pdev_queue(pdev_q, WLAN_SER_PDEV_NODE); + break; + /* + * Print non scan pdev queues + */ + case SER_VDEV_QUEUE_TYPE: + vdev_q = + &ser_vdev->vdev_q[SER_VDEV_QUEUE_COMP_NON_SCAN]; + for_vdev_queue = true; + vdev_id = wlan_vdev_get_id(vdev); + wlan_ser_print_vdev_queue(vdev_q, WLAN_SER_VDEV_NODE); + break; + default: + ser_err("Invalid parameter for queue type(pdev/vdev)"); + } + break; + default: + ser_err("Invalid pramater for queue type(scan/non_scan"); + goto error; + } + + wlan_ser_print_all_history(pdev_q, for_vdev_queue, vdev_id); +error: + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_ser_print_history); + +void wlan_ser_update_cmd_history( + struct wlan_serialization_pdev_queue *pdev_queue, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason, + bool add_remove, + bool active_queue) +{ + struct ser_data *ser_data_info; + struct ser_history *ser_history_info; + + ser_history_info = &pdev_queue->history; + ser_history_info->index %= SER_MAX_HISTORY_CMDS; + + ser_data_info = &ser_history_info->data[ser_history_info->index]; + + ser_data_info->cmd_type = cmd->cmd_type; + ser_data_info->cmd_id = cmd->cmd_id; + ser_data_info->is_blocking = cmd->is_blocking; + ser_data_info->is_high_priority = cmd->is_high_priority; + ser_data_info->add_remove = add_remove; + ser_data_info->active_pending = active_queue; + ser_data_info->ser_reason = ser_reason; + ser_data_info->vdev_id = wlan_vdev_get_id(cmd->vdev); + ser_data_info->data_updated = true; + + ser_history_info->index++; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_debug_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_debug_i.h new file mode 100644 index 0000000000000000000000000000000000000000..a07d1e02cbbc3075caa2961b7570de3823add57d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_debug_i.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_debug_i.h + * This file defines the prototypes for the debug functions + * for the serialization component. + */ + +#ifndef __WLAN_SERIALIZATION_DEBUG_I_H +#define __WLAN_SERIALIZATION_DEBUG_I_H + +#ifdef WLAN_SER_DEBUG + +#define SER_MAX_HISTORY_CMDS 50 + +#define WLAN_SER_LINE "--------------------"\ + "--------------------" + +#define WLAN_SER_HISTORY_HEADER "CMD_TYPE|CMD_ID|VDEV_ID|"\ + "BLOCKING|PRIORITY|ACTION|"\ + " QUEUE| REASON|" + +enum ser_queue_type { + SER_PDEV_QUEUE_TYPE, + SER_VDEV_QUEUE_TYPE, +}; + +struct ser_data { + /* + * Serialization Actions that modifies the serialization queues + * 0: SER_REQUEST + * 1: SER_REMOVE + * 2: SER_CANCEL + * 3: SER_TIMEOUT + * 4: SER_ACTIVATION_FAILED + * 5: SER_PENDING_TO_ACTIVE + */ + uint32_t cmd_type:6, /* max 2^6 = 64 types of commands */ + cmd_id:16, /* max cmd_id = 2^16 */ + is_blocking:1, + is_high_priority:1, + add_remove:1, + active_pending:1, + data_updated:1, + ser_reason:5; + + uint16_t vdev_id; +}; + +struct ser_history { + struct ser_data data[SER_MAX_HISTORY_CMDS]; + uint16_t index; +}; +#endif /* WLAN_SER_DEBUG */ +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_internal.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_internal.c new file mode 100644 index 0000000000000000000000000000000000000000..88dfc763614710aff9711adf174244183b703f6e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_internal.c @@ -0,0 +1,966 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_internal.c + * This file defines the functions which are called + * from serialization public API's and are internal + * to serialization. + */ + +#include +#include +#include +#include +#include +#include +#include "wlan_serialization_api.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_non_scan_i.h" +#include "wlan_serialization_scan_i.h" +#include "wlan_serialization_internal_i.h" + +bool wlan_serialization_is_cmd_present_queue( + struct wlan_serialization_command *cmd, + uint8_t is_active_queue) +{ + qdf_list_t *queue; + bool status = false; + enum wlan_serialization_node node_type; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_ser_vdev_obj *ser_vdev_obj; + enum wlan_serialization_cmd_type cmd_type; + + if (!cmd) { + ser_err("invalid cmd"); + goto error; + } + + cmd_type = cmd->cmd_type; + + ser_pdev_obj = wlan_serialization_get_pdev_obj( + wlan_serialization_get_pdev_from_cmd(cmd)); + + if (!ser_pdev_obj) { + ser_err("invalid ser vdev obj"); + goto error; + } + + ser_vdev_obj = wlan_serialization_get_vdev_obj( + wlan_serialization_get_vdev_from_cmd(cmd)); + if (!ser_vdev_obj) { + ser_err("invalid ser pdev obj"); + goto error; + } + + if (cmd_type < WLAN_SER_CMD_NONSCAN) { + queue = wlan_serialization_get_list_from_pdev_queue( + ser_pdev_obj, cmd_type, is_active_queue); + node_type = WLAN_SER_PDEV_NODE; + } else { + queue = wlan_serialization_get_list_from_vdev_queue( + ser_vdev_obj, cmd_type, is_active_queue); + node_type = WLAN_SER_VDEV_NODE; + } + + status = wlan_serialization_is_cmd_present_in_given_queue(queue, cmd, + node_type); + +error: + return status; +} + +enum wlan_serialization_status +wlan_serialization_enqueue_cmd(struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason) +{ + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + struct wlan_serialization_command_list *cmd_list; + qdf_list_node_t *nnode; + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_ser_vdev_obj *ser_vdev_obj; + struct wlan_serialization_vdev_queue *vdev_queue; + bool active_queue; + + /* Enqueue process + * 1) peek through command structure and see what is the command type + * 2) two main types of commands to process + * a) SCAN + * b) NON-SCAN + * 3) for each command there are separate command queues per pdev + * 4) pull pdev from vdev structure and get the command queue associated + * with that pdev and try to enqueue on those queue + * 5) Thumb rule: + * a) There could be only 1 active non-scan command at a + * time including all total non-scan commands of all pdevs. + * + * example: pdev1 has 1 non-scan active command and + * pdev2 got 1 non-scan command then that command should go to + * pdev2's pending queue + * + * b) There could be only N number of scan commands at a time + * including all total scan commands of all pdevs + * + * example: Let's say N=8, + * pdev1's vdev1 has 5 scan command, pdev2's vdev1 has 3 + * scan commands, if we get scan request on vdev2 then it will go + * to pending queue of vdev2 as we reached max allowed scan active + * command. + */ + + if (!cmd) { + ser_err("NULL command"); + goto error; + } + + if (!cmd->cmd_cb) { + ser_err("no cmd_cb for cmd type:%d, id: %d", + cmd->cmd_type, + cmd->cmd_id); + goto error; + } + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + ser_err("pdev is invalid"); + goto error; + } + + ser_pdev_obj = + wlan_objmgr_pdev_get_comp_private_obj( + pdev, + WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + ser_err("Invalid ser_pdev_obj"); + goto error; + } + + pdev_queue = wlan_serialization_get_pdev_queue_obj(ser_pdev_obj, + cmd->cmd_type); + if (!pdev_queue) { + ser_err("pdev_queue is invalid"); + goto error; + } + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + /* Before queuing any non scan command, + * as part of wlan_serialization_request, + * we check if the vdev queues are disabled. + * + * The serialization command structure has an + * attribute, where after a given command is queued, + * we can block the vdev queues. + * + * For example, after VDEV_DOWN command is queued as + * part of a vdev deletion, no other commands should be queued + * until the deletion is complete, so with VDEV_DOWN(in case of + * vdev deletion) with pass the attribute to disable vdev queues + */ + if (cmd->cmd_type > WLAN_SER_CMD_SCAN && + ser_reason == SER_REQUEST) { + ser_vdev_obj = + wlan_serialization_get_vdev_obj( + wlan_serialization_get_vdev_from_cmd(cmd)); + + if (!ser_vdev_obj) { + wlan_serialization_release_lock( + &pdev_queue->pdev_queue_lock); + goto error; + } + + vdev_queue = + wlan_serialization_get_vdev_queue_obj( + ser_vdev_obj, + cmd->cmd_type); + + if (!vdev_queue) { + wlan_serialization_release_lock( + &pdev_queue->pdev_queue_lock); + goto error; + } + + if (vdev_queue->queue_disable) { + wlan_serialization_release_lock( + &pdev_queue->pdev_queue_lock); + ser_err_rl("VDEV queue is disabled, reject cmd id %d type %d", + cmd->cmd_id, cmd->cmd_type); + status = WLAN_SER_CMD_QUEUE_DISABLED; + goto error; + } + } + + active_queue = wlan_serialization_is_active_cmd_allowed(cmd); + + if (wlan_serialization_is_cmd_present_queue(cmd, active_queue)) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + ser_err("duplicate command, reject cmd id %d type %d", + cmd->cmd_id, cmd->cmd_type); + goto error; + } + + if (wlan_serialization_remove_front( + &pdev_queue->cmd_pool_list, + &nnode) != QDF_STATUS_SUCCESS) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + ser_err("Failed to get cmd buffer from global pool cmd id %d type %d", + cmd->cmd_id, cmd->cmd_type); + status = WLAN_SER_CMD_DENIED_LIST_FULL; + goto error; + } + + ser_debug("Type %d id %d high_priority %d blocking %d timeout %d allowed %d", + cmd->cmd_type, cmd->cmd_id, cmd->is_high_priority, + cmd->is_blocking, cmd->cmd_timeout_duration, active_queue); + + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + + qdf_mem_copy(&cmd_list->cmd, cmd, + sizeof(struct wlan_serialization_command)); + + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) { + status = wlan_ser_add_scan_cmd(ser_pdev_obj, + cmd_list, + active_queue); + } else { + status = wlan_ser_add_non_scan_cmd(ser_pdev_obj, + cmd_list, + active_queue); + } + + if (status != WLAN_SER_CMD_PENDING && status != WLAN_SER_CMD_ACTIVE) { + qdf_mem_zero(&cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + cmd_list->cmd_in_use = 0; + wlan_serialization_insert_back( + &pdev_queue->cmd_pool_list, + &cmd_list->pdev_node); + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + ser_err("Failed to add cmd id %d type %d to active/pending queue", + cmd->cmd_id, cmd->cmd_type); + goto error; + } + + if (WLAN_SER_CMD_ACTIVE == status) { + qdf_atomic_set_bit(CMD_MARKED_FOR_ACTIVATION, + &cmd_list->cmd_in_use); + } + + wlan_ser_update_cmd_history(pdev_queue, &cmd_list->cmd, + ser_reason, true, active_queue); + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + if (WLAN_SER_CMD_ACTIVE == status) + wlan_serialization_activate_cmd(cmd_list, + ser_pdev_obj, ser_reason); + +error: + + return status; +} + +QDF_STATUS wlan_serialization_activate_cmd( + struct wlan_serialization_command_list *cmd_list, + struct wlan_ser_pdev_obj *ser_pdev_obj, + enum ser_queue_reason ser_reason) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_serialization_pdev_queue *pdev_queue; + + pdev_queue = wlan_serialization_get_pdev_queue_obj( + ser_pdev_obj, cmd_list->cmd.cmd_type); + + psoc = wlan_vdev_get_psoc(cmd_list->cmd.vdev); + if (!psoc) { + ser_err("invalid psoc"); + goto error; + } + + /* + * command is already pushed to active queue above + * now start the timer and notify requestor + */ + + status = wlan_serialization_find_and_start_timer(psoc, &cmd_list->cmd, + ser_reason); + if (QDF_IS_STATUS_ERROR(status)) { + ser_err("Failed to start timer cmd type[%d] id[%d] vdev[%d]", + cmd_list->cmd.cmd_type, + cmd_list->cmd.cmd_id, + wlan_vdev_get_id(cmd_list->cmd.vdev)); + goto timer_failed; + } + + /* + * Remember that serialization module may send + * this callback in same context through which it + * received the serialization request. Due to which + * it is caller's responsibility to ensure acquiring + * and releasing its own lock appropriately. + */ + + ser_debug("Activate type %d id %d", cmd_list->cmd.cmd_type, + cmd_list->cmd.cmd_id); + + cmd_list->cmd.activation_reason = ser_reason; + + status = cmd_list->cmd.cmd_cb(&cmd_list->cmd, + WLAN_SER_CB_ACTIVATE_CMD); +timer_failed: + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + qdf_atomic_clear_bit(CMD_MARKED_FOR_ACTIVATION, + &cmd_list->cmd_in_use); + qdf_atomic_set_bit(CMD_IS_ACTIVE, + &cmd_list->cmd_in_use); + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + if (QDF_IS_STATUS_ERROR(status)) { + wlan_serialization_dequeue_cmd(&cmd_list->cmd, + SER_ACTIVATION_FAILED, + true); + return status; + } + + /* + * Cmd was marked for activation and delete or cancel + * is received before activation completed, then the command + * should be immediately removed after activation + */ + if (qdf_atomic_test_bit(CMD_ACTIVE_MARKED_FOR_REMOVAL, + &cmd_list->cmd_in_use)) { + wlan_serialization_dequeue_cmd(&cmd_list->cmd, + SER_REMOVE, + true); + return status; + } + + if (qdf_atomic_test_bit(CMD_ACTIVE_MARKED_FOR_CANCEL, + &cmd_list->cmd_in_use)) + wlan_serialization_cmd_cancel_handler( + ser_pdev_obj, &cmd_list->cmd, + NULL, NULL, cmd_list->cmd.cmd_type, + WLAN_SERIALIZATION_ACTIVE_QUEUE, + WLAN_SER_CMD_ATTR_NONE); +error: + return status; +} + +bool +wlan_serialization_is_active_cmd_allowed(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev; + bool active_cmd_allowed = 0; + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + ser_err("NULL pdev"); + goto error; + } + + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + active_cmd_allowed = + (wlan_serialization_is_active_scan_cmd_allowed(cmd) && + wlan_serialization_is_scan_pending_queue_empty(cmd)); + else + active_cmd_allowed = + (wlan_serialization_is_active_non_scan_cmd_allowed(cmd) && + wlan_serialization_is_non_scan_pending_queue_empty(cmd)); + +error: + return active_cmd_allowed; +} + +enum wlan_serialization_status +wlan_serialization_move_pending_to_active( + enum wlan_serialization_cmd_type cmd_type, + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_objmgr_vdev *vdev, + bool blocking_cmd_removed) +{ + enum wlan_serialization_status status; + + if (cmd_type < WLAN_SER_CMD_NONSCAN) { + status = + wlan_ser_move_scan_pending_to_active( + ser_pdev_obj); + } else { + status = + wlan_ser_move_non_scan_pending_to_active( + ser_pdev_obj, + vdev, + blocking_cmd_removed); + } + + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_dequeue_cmd(struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason, + uint8_t active_cmd) +{ + enum wlan_serialization_cmd_status status = + WLAN_SER_CMD_NOT_FOUND; + enum wlan_serialization_status ser_status = + WLAN_SER_CMD_DENIED_UNSPECIFIED; + + QDF_STATUS qdf_status; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_command cmd_bkup; + struct wlan_serialization_command_list *cmd_list; + struct wlan_serialization_pdev_queue *pdev_queue; + bool blocking_cmd_removed = 0; + + if (!cmd) { + ser_err("NULL command"); + goto error; + } + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + ser_err("invalid pdev"); + goto error; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + ser_err("invalid psoc"); + goto error; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_pdev_obj) { + ser_err("ser_pdev_obj is empty"); + goto error; + } + + pdev_queue = wlan_serialization_get_pdev_queue_obj( + ser_pdev_obj, cmd->cmd_type); + + ser_debug("Type %d id %d blocking %d reason %d active %d", + cmd->cmd_type, cmd->cmd_id, cmd->is_blocking, + ser_reason, active_cmd); + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + qdf_status = wlan_ser_remove_scan_cmd( + ser_pdev_obj, &cmd_list, cmd, active_cmd); + else { + qdf_status = wlan_ser_remove_non_scan_cmd( + ser_pdev_obj, &cmd_list, cmd, active_cmd); + } + + if (qdf_status == QDF_STATUS_E_PENDING) { + status = WLAN_SER_CMD_MARKED_FOR_ACTIVATION; + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + goto error; + } + + if (qdf_status != QDF_STATUS_SUCCESS) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + status = WLAN_SER_CMD_NOT_FOUND; + goto error; + } + + if (active_cmd) { + if (cmd_list->cmd.cmd_type >= WLAN_SER_CMD_NONSCAN) + blocking_cmd_removed = cmd_list->cmd.is_blocking; + } + + if (active_cmd) + wlan_serialization_find_and_stop_timer( + psoc, &cmd_list->cmd, + ser_reason); + + qdf_mem_copy(&cmd_bkup, &cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + qdf_mem_zero(&cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + cmd_list->cmd_in_use = 0; + qdf_status = wlan_serialization_insert_back( + &pdev_queue->cmd_pool_list, + &cmd_list->pdev_node); + + wlan_ser_update_cmd_history(pdev_queue, &cmd_bkup, ser_reason, + false, active_cmd); + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + /* Call cmd cb for remove request*/ + if (cmd_bkup.cmd_cb) { + /* caller should release the memory */ + ser_debug("Release memory for type %d id %d", + cmd_bkup.cmd_type, cmd_bkup.cmd_id); + cmd_bkup.cmd_cb(&cmd_bkup, + WLAN_SER_CB_RELEASE_MEM_CMD); + } + + if (active_cmd) { + ser_status = wlan_serialization_move_pending_to_active( + cmd_bkup.cmd_type, ser_pdev_obj, + cmd_bkup.vdev, + blocking_cmd_removed); + } + + if (active_cmd) + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + else + status = WLAN_SER_CMD_IN_PENDING_LIST; + +error: + return status; +} + +QDF_STATUS wlan_serialization_generic_timer_cb(void *arg) +{ + struct wlan_serialization_timer *timer = arg; + struct wlan_serialization_command *cmd = timer->cmd; + struct wlan_objmgr_vdev *vdev = NULL; + enum wlan_serialization_cmd_status status; + + + if (!cmd) { + ser_err("Command not found"); + return QDF_STATUS_E_FAILURE; + } + + vdev = cmd->vdev; + if (!vdev) { + ser_err("Invalid vdev"); + return QDF_STATUS_E_FAILURE; + } + + ser_err("Active cmd timeout for cmd_type[%d] vdev[%d]", + cmd->cmd_type, wlan_vdev_get_id(cmd->vdev)); + + if (cmd->cmd_cb) + cmd->cmd_cb(cmd, WLAN_SER_CB_ACTIVE_CMD_TIMEOUT); + + /* + * dequeue cmd API will cleanup and destroy the timer. If it fails to + * dequeue command then we have to destroy the timer. + */ + status = wlan_serialization_dequeue_cmd(cmd, SER_TIMEOUT, true); + + /* Release the ref taken before the timer was started */ + if (status == WLAN_SER_CMD_IN_ACTIVE_LIST) + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_serialization_mc_flush_noop(struct scheduler_msg *msg) +{ + return QDF_STATUS_SUCCESS; +} + +static void +wlan_serialization_timer_cb_mc_ctx(void *arg) +{ + struct scheduler_msg msg = {0}; + + msg.type = SYS_MSG_ID_MC_TIMER; + msg.reserved = SYS_MSG_COOKIE; + msg.callback = wlan_serialization_generic_timer_cb; + msg.bodyptr = arg; + msg.bodyval = 0; + msg.flush_callback = wlan_serialization_mc_flush_noop; + + if (scheduler_post_message(QDF_MODULE_ID_SERIALIZATION, + QDF_MODULE_ID_SERIALIZATION, + QDF_MODULE_ID_SYS, &msg) == + QDF_STATUS_SUCCESS) + return; + + ser_err("Could not enqueue timer to timer queue"); +} + +static void wlan_serialization_timer_handler(void *arg) +{ + struct wlan_serialization_timer *timer = arg; + struct wlan_serialization_command *cmd = timer->cmd; + + if (!cmd) { + ser_err("Command not found"); + return; + } + + ser_err("Active cmd timeout for cmd_type %d vdev %d", + cmd->cmd_type, wlan_vdev_get_id(cmd->vdev)); + + wlan_serialization_timer_cb_mc_ctx(arg); + +} + +QDF_STATUS +wlan_serialization_find_and_update_timer( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + struct wlan_ser_psoc_obj *psoc_ser_obj; + struct wlan_serialization_timer *ser_timer; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int i = 0; + + if (!psoc || !cmd) { + ser_err("invalid param"); + goto exit; + } + + psoc_ser_obj = wlan_serialization_get_psoc_obj(psoc); + /* + * Here cmd_id and cmd_type are used to locate the timer being + * associated with command. + */ + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + ser_timer = &psoc_ser_obj->timers[i]; + if (!(ser_timer->cmd) || + (ser_timer->cmd->cmd_id != cmd->cmd_id) || + (ser_timer->cmd->cmd_type != cmd->cmd_type) || + (ser_timer->cmd->vdev != cmd->vdev)) + continue; + + qdf_timer_mod(&ser_timer->timer, + cmd->cmd_timeout_duration); + status = QDF_STATUS_SUCCESS; + break; + } + + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); + + if (QDF_IS_STATUS_ERROR(status)) + ser_debug("Can't find timer for cmd_type %d", cmd->cmd_type); + +exit: + return status; +} + +QDF_STATUS +wlan_serialization_find_and_stop_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason) +{ + struct wlan_ser_psoc_obj *psoc_ser_obj; + struct wlan_serialization_timer *ser_timer; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int i = 0; + uint32_t phy_version; + struct wlan_objmgr_vdev *vdev; + + if (!psoc || !cmd) { + ser_err("invalid param"); + goto exit; + } + + if (cmd->cmd_timeout_duration == 0) { + phy_version = wlan_psoc_get_nif_phy_version(psoc); + if (wlan_is_emulation_platform(phy_version)) { + ser_err("[SCAN-EMULATION]: Not performing timer funcs"); + status = QDF_STATUS_SUCCESS; + goto exit; + } + } + + psoc_ser_obj = wlan_serialization_get_psoc_obj(psoc); + /* + * Here cmd_id and cmd_type are used to locate the timer being + * associated with command. + */ + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + ser_timer = &psoc_ser_obj->timers[i]; + if (!(ser_timer->cmd) || + (ser_timer->cmd->cmd_id != cmd->cmd_id) || + (ser_timer->cmd->cmd_type != cmd->cmd_type) || + (ser_timer->cmd->vdev != cmd->vdev)) + continue; + + vdev = ser_timer->cmd->vdev; + status = wlan_serialization_stop_timer(ser_timer); + /* + * Release the vdev reference when the active cmd is removed + * through remove/cancel request. + * + * In case the command removal is because of timer expiry, + * the vdev is released when the timer handler completes. + */ + if (vdev && ser_reason != SER_TIMEOUT) + wlan_objmgr_vdev_release_ref( + vdev, WLAN_SERIALIZATION_ID); + + break; + + } + + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); + + if (QDF_IS_STATUS_ERROR(status)) + ser_err("Can't find timer for cmd_type %d cmd id %d", + cmd->cmd_type, cmd->cmd_id); + +exit: + return status; +} + +QDF_STATUS +wlan_serialization_find_and_start_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_ser_psoc_obj *psoc_ser_obj; + struct wlan_serialization_timer *ser_timer; + int i = 0; + uint32_t nif_phy_ver; + + if (!psoc || !cmd) { + ser_err("invalid param"); + goto error; + } + + nif_phy_ver = wlan_psoc_get_nif_phy_version(psoc); + if ((cmd->cmd_timeout_duration == 0) && + (wlan_is_emulation_platform(nif_phy_ver))) { + ser_err("[SCAN-EMULATION]: Not performing timer functions\n"); + status = QDF_STATUS_SUCCESS; + goto error; + } + + psoc_ser_obj = wlan_serialization_get_psoc_obj(psoc); + + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + /* Keep trying timer */ + ser_timer = &psoc_ser_obj->timers[i]; + if (ser_timer->cmd) + continue; + + /* Remember timer is pointing to command */ + ser_timer->cmd = cmd; + status = QDF_STATUS_SUCCESS; + + /* + * Get vdev reference before starting the timer + * Remove the reference before removing the command + * in any one of the cases: + * 1. Active command is removed through remove/cancel request + * 2. Timer expiry handler is completed. + */ + + status = wlan_objmgr_vdev_try_get_ref(ser_timer->cmd->vdev, + WLAN_SERIALIZATION_ID); + if (QDF_IS_STATUS_ERROR(status)) { + /* + * Set cmd to null so that ref release is not tried for + * vdev when timer is flushed. + */ + ser_timer->cmd = NULL; + wlan_serialization_release_lock( + &psoc_ser_obj->timer_lock); + ser_err("Unbale to get vdev reference"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + break; + } + + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); + + if (QDF_IS_STATUS_SUCCESS(status)) { + qdf_timer_init(NULL, &ser_timer->timer, + wlan_serialization_timer_handler, + ser_timer, QDF_TIMER_TYPE_SW); + qdf_timer_mod(&ser_timer->timer, cmd->cmd_timeout_duration); + } else { + ser_err("Failed to start timer for cmd: type[%d] id[%d] high_priority[%d] blocking[%d]", + cmd->cmd_type, cmd->cmd_id, cmd->is_high_priority, + cmd->is_blocking); + } + +error: + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_cmd_cancel_handler( + struct wlan_ser_pdev_obj *ser_obj, + struct wlan_serialization_command *cmd, + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type, uint8_t queue_type, + enum wlan_ser_cmd_attr cmd_attr) +{ + enum wlan_serialization_cmd_status active_status = + WLAN_SER_CMD_NOT_FOUND; + enum wlan_serialization_cmd_status pending_status = + WLAN_SER_CMD_NOT_FOUND; + enum wlan_serialization_cmd_status status = + WLAN_SER_CMD_NOT_FOUND; + + if (!ser_obj) { + ser_err("invalid serial object"); + goto error; + } + + if (queue_type & WLAN_SERIALIZATION_ACTIVE_QUEUE) { + if (cmd_type < WLAN_SER_CMD_NONSCAN) + active_status = wlan_ser_cancel_scan_cmd( + ser_obj, pdev, vdev, cmd, + cmd_type, true); + else + active_status = wlan_ser_cancel_non_scan_cmd( + ser_obj, pdev, vdev, cmd, + cmd_type, true, cmd_attr); + } + + if (queue_type & WLAN_SERIALIZATION_PENDING_QUEUE) { + if (cmd_type < WLAN_SER_CMD_NONSCAN) + pending_status = wlan_ser_cancel_scan_cmd( + ser_obj, pdev, vdev, cmd, + cmd_type, false); + else + pending_status = wlan_ser_cancel_non_scan_cmd( + ser_obj, pdev, vdev, cmd, + cmd_type, false, cmd_attr); + } + + if (active_status == WLAN_SER_CMD_IN_ACTIVE_LIST && + pending_status == WLAN_SER_CMD_IN_PENDING_LIST) + status = WLAN_SER_CMDS_IN_ALL_LISTS; + else if (active_status == WLAN_SER_CMD_IN_ACTIVE_LIST) + status = active_status; + else if (pending_status == WLAN_SER_CMD_IN_PENDING_LIST) + status = pending_status; + +error: + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_find_and_cancel_cmd( + struct wlan_serialization_command *cmd, + enum wlan_serialization_cancel_type req_type, + uint8_t queue_type) +{ + enum wlan_serialization_cmd_status status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_ser_pdev_obj *ser_obj = NULL; + struct wlan_objmgr_pdev *pdev; + + if (!cmd) { + ser_err("Invalid cmd"); + goto error; + } + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + ser_err("Invalid pdev"); + goto error; + } + ser_obj = wlan_serialization_get_pdev_obj(pdev); + if (!ser_obj) { + ser_err("Invalid ser_obj"); + goto error; + } + + switch (req_type) { + case WLAN_SER_CANCEL_SINGLE_SCAN: + /* remove scan cmd which matches the given cmd struct */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, cmd, NULL, NULL, + WLAN_SER_CMD_SCAN, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_PDEV_SCANS: + /* remove all scan cmds which matches the pdev object */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, NULL, pdev, NULL, + WLAN_SER_CMD_SCAN, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_VDEV_SCANS: + case WLAN_SER_CANCEL_VDEV_HOST_SCANS: + /* remove all scan cmds which matches the vdev object */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, NULL, NULL, cmd->vdev, + WLAN_SER_CMD_SCAN, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_NON_SCAN_CMD: + /* remove nonscan cmd which matches the given cmd */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, cmd, NULL, NULL, + WLAN_SER_CMD_NONSCAN, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_PDEV_NON_SCAN_CMD: + /* remove all non scan cmds which matches the pdev object */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, NULL, pdev, NULL, + WLAN_SER_CMD_NONSCAN, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD: + /* remove all non scan cmds which matches the vdev object */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, NULL, NULL, cmd->vdev, + WLAN_SER_CMD_NONSCAN, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE: + /* + * remove all non scan cmds which matches the vdev + * and given cmd type + */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, NULL, NULL, cmd->vdev, + cmd->cmd_type, queue_type, + WLAN_SER_CMD_ATTR_NONE); + break; + case WLAN_SER_CANCEL_VDEV_NON_SCAN_NB_CMD: + /* + * remove all non-blocking non-scan cmds which matches the given + * vdev + */ + status = wlan_serialization_cmd_cancel_handler( + ser_obj, NULL, NULL, cmd->vdev, + WLAN_SER_CMD_NONSCAN, queue_type, + WLAN_SER_CMD_ATTR_NONBLOCK); + break; + default: + ser_err("Invalid request"); + } + +error: + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_internal_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_internal_i.h new file mode 100644 index 0000000000000000000000000000000000000000..a1cb902fad3b1afd5089769c86e0e54d8732747d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_internal_i.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_internal_i.h + * This file defines the prototypes of functions which are called + * from serialization public API's and are internal + * to serialization. + */ +#ifndef __WLAN_SERIALIZATION_PVT_I_H +#define __WLAN_SERIALIZATION_PVT_I_H + +#include +#include +#include +#include +#include +#include "wlan_serialization_api.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_non_scan_i.h" + +/** + * wlan_serialization_is_cmd_present_queue() - Check if same command + * is already present active or pending queue + * @cmd: pointer to command which we need to find + * @is_active_queue: flag to find the command in active or pending queue + * + * This API will check the given command is already present in active or + * pending queue based on flag + * If present then return true otherwise false + * + * Return: true or false + */ +bool +wlan_serialization_is_cmd_present_queue( + struct wlan_serialization_command *cmd, + uint8_t is_active_queue); + +/** + * wlan_serialization_is_active_cmd_allowed() - Check if the given command + * can be moved to active queue + * @cmd: Serialization command information + * + * Return: true or false + */ +bool +wlan_serialization_is_active_cmd_allowed( + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_enqueue_cmd() - Enqueue the cmd to pending/active Queue + * @cmd: Command information + * @ser_reason: action for dequeue + * + * Return: Status of the serialization request + */ +enum wlan_serialization_status +wlan_serialization_enqueue_cmd(struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason); + +/** + * wlan_serialization_activate_cmd() - activate cmd in active queue + * @cmd_list: Command needs to be activated + * @ser_pdev_obj: Serialization private pdev object + * @ser_reason: reason the activation cb would be called + * + * Return: Status of activation of the command + */ +QDF_STATUS +wlan_serialization_activate_cmd( + struct wlan_serialization_command_list *cmd_list, + struct wlan_ser_pdev_obj *ser_pdev_obj, + enum ser_queue_reason ser_reason); + +/** + * wlan_serialization_move_pending_to_active() - Move a cmd from pending + * queue to active queue + * @cmd_type: Type of command to be moved i.e scan or non scan + * @pcmd_list: Pointer to command list containing the command + * @ser_pdev_obj: Serialization private pdev object + * @vdev: Pointer to vdev object manager + * @blocking_cmd_removed: If a blocking cmd is removed from active queue + * @blocking_cmd_waiting: If a blocking cmd is waiting in pending queue + * + * Return: Status of command request + */ +enum wlan_serialization_status +wlan_serialization_move_pending_to_active( + enum wlan_serialization_cmd_type cmd_type, + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_objmgr_vdev *vdev, + bool blocking_cmd_removed); + +/** + * wlan_serialization_dequeue_cmd() - dequeue the cmd to pending/active Queue + * @cmd: Command information + * @ser_reason: action for dequeue + * @active_cmd: whether command is for active queue + * + * Return: Status of the serialization request + */ +enum wlan_serialization_cmd_status +wlan_serialization_dequeue_cmd(struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason, + uint8_t active_cmd); + +/** + * wlan_serialization_generic_timer_cb() - timer callback when timer fire + * @arg: argument that timer passes to this callback + * + * All the timers in serialization module calls this callback when they fire, + * and this API in turn calls command specific timeout callback and remove + * timed-out command from active queue and move any pending command to active + * queue of same cmd_type. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_generic_timer_cb(void *arg); + +/** + * wlan_serialization_find_and_start_timer() - to find and start the timer + * @psoc: pointer to psoc + * @cmd: pointer to actual command + * @ser_reason: serialization reason + * + * find the free timer, initialize it, and start it + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_find_and_start_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason); + +/** + * wlan_serialization_find_and_update_timer() - to find and update the timer + * @psoc: pointer to psoc + * @cmd: pointer to command attributes + * + * Find the timer associated with command, and update it + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_find_and_update_timer( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_find_and_stop_timer() - to find and stop the timer + * @psoc: pointer to psoc + * @cmd: pointer to actual command + * @ser_reason: serialization reason + * + * find the timer associated with command, stop it and destroy it + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_find_and_stop_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason); + + +/** + * wlan_serialization_find_and_cancel_cmd() - to find cmd from queue and cancel + * @cmd: pointer to serialization command + * @req_type: Command cancel request type + * @queue_type: Bitmask for member queue type i.e active or pending or both + * + * This api will find command from active queue and pending queue and + * removes the command. If it is in active queue then it will notifies the + * requester that it is in active queue and from there it expects requester + * to send remove command + * + * Return: wlan_serialization_cmd_status + */ + +enum wlan_serialization_cmd_status +wlan_serialization_find_and_cancel_cmd( + struct wlan_serialization_command *cmd, + enum wlan_serialization_cancel_type req_type, + uint8_t queue_type); + +/** + * wlan_serialization_cmd_cancel_handler() - helper func to cancel cmd + * @ser_obj: private pdev ser obj + * @cmd: pointer to command + * @pdev: pointer to pdev + * @vdev: pointer to vdev + * @cmd_type: pointer to cmd_type + * @queue_type: If active queue or pending queue + * @cmd_attr: Attrbute to indicate a blocking or a non-blocking command + * + * This API will decide from which queue, command needs to be cancelled + * and pass that queue and other parameter required to cancel the command + * to helper function. + * + * Return: wlan_serialization_cmd_status + */ +enum wlan_serialization_cmd_status +wlan_serialization_cmd_cancel_handler( + struct wlan_ser_pdev_obj *ser_obj, + struct wlan_serialization_command *cmd, + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type, + uint8_t queue_type, + enum wlan_ser_cmd_attr cmd_attr); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_legacy_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_legacy_api.c new file mode 100644 index 0000000000000000000000000000000000000000..1469630d49231fb0a834311bde839de37f4dcfd6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_legacy_api.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_legacy_api.c + * This file provides prototypes of the routines needed for the + * legacy mcl serialization to utilize the services provided by the + * serialization component. + */ + +#include "wlan_serialization_legacy_api.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_serialization_internal_i.h" +#include "wlan_serialization_scan_i.h" +#include "wlan_serialization_non_scan_i.h" + +static struct wlan_objmgr_pdev *wlan_serialization_get_first_pdev( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_pdev *pdev; + uint8_t i = 0; + + if (!psoc) { + ser_err("invalid psoc"); + return NULL; + } + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, + WLAN_SERIALIZATION_ID); + if (pdev) + break; + } + + return pdev; +} + +static struct wlan_ser_pdev_obj * +wlan_serialization_get_pdev_priv_obj_using_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_ser_pdev_obj *ser_pdev_obj; + + if (!psoc) { + ser_err("invalid psoc"); + return NULL; + } + + pdev = wlan_serialization_get_first_pdev(psoc); + if (!pdev) { + ser_err("invalid pdev"); + return NULL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + wlan_objmgr_pdev_release_ref(pdev, WLAN_SERIALIZATION_ID); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return NULL; + } + + return ser_pdev_obj; +} + +uint32_t wlan_serialization_get_pending_list_count( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + qdf_list_t *queue; + uint32_t count = 0; + struct wlan_serialization_pdev_queue *pdev_queue; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return 0; + } + + if (is_cmd_from_pending_scan_queue) + pdev_queue = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + else + pdev_queue = + &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + queue = &pdev_queue->pending_list; + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + count = qdf_list_size(queue); + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + return count; +} + +struct wlan_serialization_command* +wlan_serialization_peek_head_active_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_active_scan_queue) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + struct wlan_serialization_command *cmd = NULL; + qdf_list_node_t *nnode = NULL; + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_queue; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return NULL; + } + + if (is_cmd_from_active_scan_queue) + pdev_queue = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + else + pdev_queue = + &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + queue = &pdev_queue->active_list; + if (wlan_serialization_list_empty(queue)) { + ser_debug_rl("Empty Queue"); + goto end; + } + + if (QDF_STATUS_SUCCESS != wlan_serialization_get_cmd_from_queue(queue, + &nnode)) { + ser_err("Can't get command from queue"); + goto end; + } + + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, pdev_node); + cmd = &cmd_list->cmd; + +end: + return cmd; +} + +struct wlan_serialization_command* +wlan_serialization_peek_head_pending_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + struct wlan_serialization_command *cmd = NULL; + qdf_list_node_t *nnode = NULL; + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_queue; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return NULL; + } + if (is_cmd_from_pending_scan_queue) + pdev_queue = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + else + pdev_queue = + &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + queue = &pdev_queue->pending_list; + if (wlan_serialization_list_empty(queue)) + goto end; + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + if (QDF_STATUS_SUCCESS != wlan_serialization_get_cmd_from_queue( + queue, + &nnode)) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + ser_err("Can't get command from queue"); + goto end; + } + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, pdev_node); + cmd = &cmd_list->cmd; + ser_debug("cmd_type[%d] cmd_id[%d]matched", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + +end: + return cmd; +} + +static struct wlan_serialization_command* +wlan_serialization_get_list_next_node(qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_ser_pdev_obj *ser_pdev_obj) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + qdf_list_node_t *pnode = NULL, *nnode = NULL; + bool found = false; + uint32_t i = 0; + QDF_STATUS status; + struct wlan_serialization_command *ret_cmd = NULL; + + i = wlan_serialization_list_size(queue); + if (i == 0) { + ser_err("Empty Queue"); + return NULL; + } + while (i--) { + if (!cmd_list) + status = wlan_serialization_peek_front(queue, &nnode); + else + status = wlan_serialization_peek_next(queue, pnode, + &nnode); + + if ((status != QDF_STATUS_SUCCESS) || found) + break; + + pnode = nnode; + cmd_list = qdf_container_of( + nnode, + struct wlan_serialization_command_list, + pdev_node); + if (wlan_serialization_match_cmd_id_type( + nnode, cmd, WLAN_SER_PDEV_NODE) && + wlan_serialization_match_cmd_vdev(nnode, + cmd->vdev, + WLAN_SER_PDEV_NODE)) { + found = true; + } + nnode = NULL; + } + if (nnode && found) { + cmd_list = qdf_container_of( + nnode, + struct wlan_serialization_command_list, + pdev_node); + ret_cmd = &cmd_list->cmd; + } + if (!found) { + ser_err("Can't locate next command"); + return NULL; + } + if (!nnode) { + ser_debug("next node is empty, so fine"); + return NULL; + } + + return ret_cmd; +} + +struct wlan_serialization_command* +wlan_serialization_get_pending_list_next_node_using_psoc( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *prev_cmd, + uint8_t is_cmd_for_pending_scan_queue) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_serialization_command *cmd; + + if (!prev_cmd) { + ser_err("invalid prev_cmd"); + return NULL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + ser_err("invalid ser_pdev_obj"); + return NULL; + } + if (is_cmd_for_pending_scan_queue) + pdev_queue = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + else + pdev_queue = + &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + queue = &pdev_queue->pending_list; + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + cmd = wlan_serialization_get_list_next_node(queue, prev_cmd, + ser_pdev_obj); + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + return cmd; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main.c new file mode 100644 index 0000000000000000000000000000000000000000..683d00074434a9219ce7647e25aa18dec536e3b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main.c @@ -0,0 +1,624 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_main.c + * This file defines the important functions pertinent to + * serialization to initialize and de-initialize the + * component. + */ +#include +#include +#include +#include +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_rules_i.h" +#include "wlan_serialization_utils_i.h" + +QDF_STATUS wlan_serialization_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_ser_psoc_obj *ser_soc_obj = + wlan_serialization_get_psoc_obj(psoc); + + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + goto error; + } + + /* + * purge all serialization command if there are any pending to make + * sure memory and vdev ref are freed. + */ + wlan_serialization_purge_all_cmd(psoc); + /* clean up all timers before exiting */ + status = wlan_serialization_cleanup_all_timers(ser_soc_obj); + if (status != QDF_STATUS_SUCCESS) + ser_err("ser cleanning up all timer failed"); + + /* Use lock to free to avoid any race where timer is still in use */ + wlan_serialization_acquire_lock(&ser_soc_obj->timer_lock); + qdf_mem_free(ser_soc_obj->timers); + ser_soc_obj->timers = NULL; + ser_soc_obj->max_active_cmds = 0; + wlan_serialization_release_lock(&ser_soc_obj->timer_lock); +error: + return status; +} + +QDF_STATUS wlan_serialization_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + uint8_t pdev_count; + struct wlan_ser_psoc_obj *ser_soc_obj = + wlan_serialization_get_psoc_obj(psoc); + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + goto error; + } + + pdev_count = wlan_psoc_get_pdev_count(psoc); + ser_soc_obj->max_active_cmds = WLAN_SER_MAX_ACTIVE_SCAN_CMDS + + (pdev_count * WLAN_SER_MAX_VDEVS); + + ser_debug("max_active_cmds %d", ser_soc_obj->max_active_cmds); + + ser_soc_obj->timers = + qdf_mem_malloc(sizeof(struct wlan_serialization_timer) * + ser_soc_obj->max_active_cmds); + if (!ser_soc_obj->timers) { + status = QDF_STATUS_E_NOMEM; + goto error; + } + + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +/** + * wlan_serialization_psoc_create_handler() - PSOC obj create callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to create the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the creation of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_psoc_create_handler( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + struct wlan_ser_psoc_obj *soc_ser_obj; + QDF_STATUS status = QDF_STATUS_E_NOMEM; + + soc_ser_obj = + qdf_mem_malloc(sizeof(*soc_ser_obj)); + if (!soc_ser_obj) + goto error; + + status = wlan_objmgr_psoc_component_obj_attach( + psoc, + WLAN_UMAC_COMP_SERIALIZATION, + soc_ser_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(soc_ser_obj); + ser_err("Obj attach failed"); + goto error; + } + wlan_serialization_create_lock(&soc_ser_obj->timer_lock); + ser_debug("ser psoc obj created"); + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +/** + * wlan_serialization_destroy_cmd_pool() - Destroy the global cmd pool + * @ser_pdev_obj: Serialization private pdev object + * + * Return: None + */ +static void wlan_serialization_destroy_cmd_pool( + struct wlan_serialization_pdev_queue *pdev_queue) +{ + qdf_list_node_t *node = NULL; + + ser_debug("Destroy cmd pool list %pK, size %d", + &pdev_queue->cmd_pool_list, + qdf_list_size(&pdev_queue->cmd_pool_list)); + while (!qdf_list_empty(&pdev_queue->cmd_pool_list)) { + qdf_list_remove_front(&pdev_queue->cmd_pool_list, + &node); + qdf_mem_free(node); + } + + qdf_list_destroy(&pdev_queue->cmd_pool_list); + +} + +/** + * wlan_serialization_create_cmd_pool() - Create the global cmd pool + * @pdev: PDEV Object + * @ser_pdev_obj: Serialization private pdev object + * + * Global command pool of memory is created here. + * It is safe to allocate memory individually for each command rather than + * requesting for a huge chunk of memory at once. + * + * The individual command nodes allocated above will keep moving between + * the active, pending and global pool lists dynamically, but all the + * memory will be freed during driver unload only. + * + * Return: QDF Status + */ +static QDF_STATUS +wlan_serialization_create_cmd_pool( + struct wlan_serialization_pdev_queue *pdev_queue, + uint16_t cmd_pool_size) +{ + struct wlan_serialization_command_list *cmd_list_ptr; + uint8_t i; + QDF_STATUS status = QDF_STATUS_E_NOMEM; + + qdf_list_create(&pdev_queue->cmd_pool_list, cmd_pool_size); + + for (i = 0; i < cmd_pool_size; i++) { + cmd_list_ptr = qdf_mem_malloc(sizeof(*cmd_list_ptr)); + if (!cmd_list_ptr) { + wlan_serialization_destroy_cmd_pool(pdev_queue); + goto error; + } + + qdf_mem_zero(cmd_list_ptr, sizeof(*cmd_list_ptr)); + qdf_list_insert_back( + &pdev_queue->cmd_pool_list, + &cmd_list_ptr->pdev_node); + cmd_list_ptr->cmd_in_use = 0; + } + + ser_debug("Create cmd pool list %pK, size %d", + &pdev_queue->cmd_pool_list, + qdf_list_size(&pdev_queue->cmd_pool_list)); + + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +/** + * wlan_serialization_pdev_create_handler() - PDEV obj create callback + * @pdev: PDEV object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to create the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the creation of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_pdev_create_handler( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_pdev_queue *pdev_queue; + QDF_STATUS status = QDF_STATUS_E_NOMEM; + uint8_t index; + uint8_t free_index; + uint8_t max_active_cmds; + uint8_t max_pending_cmds; + uint16_t cmd_pool_size; + + ser_pdev_obj = + qdf_mem_malloc(sizeof(*ser_pdev_obj)); + if (!ser_pdev_obj) + goto error; + + for (index = 0; index < SER_PDEV_QUEUE_COMP_MAX; index++) { + pdev_queue = &ser_pdev_obj->pdev_q[index]; + + wlan_serialization_create_lock(&pdev_queue->pdev_queue_lock); + + switch (index) { + case SER_PDEV_QUEUE_COMP_SCAN: + max_active_cmds = WLAN_SER_MAX_ACTIVE_SCAN_CMDS; + max_pending_cmds = WLAN_SER_MAX_PENDING_SCAN_CMDS; + cmd_pool_size = max_active_cmds + max_pending_cmds; + break; + + case SER_PDEV_QUEUE_COMP_NON_SCAN: + max_active_cmds = WLAN_SER_MAX_ACTIVE_CMDS; + max_pending_cmds = WLAN_SER_MAX_PENDING_CMDS; + cmd_pool_size = max_active_cmds + max_pending_cmds; + ser_debug("max_active_cmds %d max_pending_cmds %d", + max_active_cmds, max_pending_cmds); + break; + } + qdf_list_create(&pdev_queue->active_list, + max_active_cmds); + qdf_list_create(&pdev_queue->pending_list, + max_pending_cmds); + + status = wlan_serialization_create_cmd_pool(pdev_queue, + cmd_pool_size); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Create cmd pool failed, status %d", status); + goto error_free; + } + + pdev_queue->vdev_active_cmd_bitmap = 0; + pdev_queue->blocking_cmd_active = 0; + pdev_queue->blocking_cmd_waiting = 0; + } + + status = wlan_objmgr_pdev_component_obj_attach( + pdev, WLAN_UMAC_COMP_SERIALIZATION, + ser_pdev_obj, QDF_STATUS_SUCCESS); + + if (status != QDF_STATUS_SUCCESS) { + ser_err("Pdev obj attach failed, status %d", status); + goto error_free; + } + + return QDF_STATUS_SUCCESS; + +error_free: + for (free_index = 0; free_index <= index; free_index++) { + pdev_queue = &ser_pdev_obj->pdev_q[free_index]; + + wlan_serialization_destroy_cmd_pool(pdev_queue); + qdf_list_destroy(&pdev_queue->pending_list); + qdf_list_destroy(&pdev_queue->active_list); + wlan_serialization_destroy_lock(&pdev_queue->pdev_queue_lock); + } +error: + return status; +} + +/** + * wlan_serialization_psoc_destroy_handler() - PSOC obj delete callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to delete the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the deletion of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS +wlan_serialization_psoc_destroy_handler(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status = QDF_STATUS_E_FAULT; + struct wlan_ser_psoc_obj *ser_soc_obj = + wlan_serialization_get_psoc_obj(psoc); + + if (!ser_soc_obj) { + ser_err("invalid ser_soc_obj"); + goto error; + } + status = wlan_objmgr_psoc_component_obj_detach( + psoc, WLAN_UMAC_COMP_SERIALIZATION, ser_soc_obj); + if (status != QDF_STATUS_SUCCESS) + ser_err("ser psoc private obj detach failed"); + + wlan_serialization_destroy_lock(&ser_soc_obj->timer_lock); + ser_debug("ser psoc obj deleted with status %d", status); + qdf_mem_free(ser_soc_obj); + +error: + return status; +} + +/** + * wlan_serialization_pdev_destroy_handler() - PDEV obj delete callback + * @pdev: PDEV object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to delete the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the deletion of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_pdev_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + QDF_STATUS status; + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_ser_pdev_obj *ser_pdev_obj = + wlan_serialization_get_pdev_obj(pdev); + uint8_t index; + + if (!ser_pdev_obj) { + ser_err("ser_pdev_obj NULL"); + return QDF_STATUS_E_INVAL; + } + status = wlan_objmgr_pdev_component_obj_detach( + pdev, WLAN_UMAC_COMP_SERIALIZATION, ser_pdev_obj); + + for (index = 0; index < SER_PDEV_QUEUE_COMP_MAX; index++) { + pdev_queue = &ser_pdev_obj->pdev_q[index]; + + wlan_serialization_destroy_pdev_list(pdev_queue); + wlan_serialization_destroy_cmd_pool(pdev_queue); + + wlan_serialization_destroy_lock(&pdev_queue->pdev_queue_lock); + } + qdf_mem_free(ser_pdev_obj); + + return status; +} + +/** + * wlan_serialization_vdev_create_handler() - VDEV obj create callback + * @vdev: VDEV object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to create the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the creation of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS +wlan_serialization_vdev_create_handler(struct wlan_objmgr_vdev *vdev, + void *arg_list) +{ + struct wlan_ser_vdev_obj *ser_vdev_obj; + struct wlan_serialization_vdev_queue *vdev_q; + QDF_STATUS status = QDF_STATUS_E_NOMEM; + uint8_t index; + uint8_t max_active_cmds; + uint8_t max_pending_cmds; + + ser_vdev_obj = qdf_mem_malloc(sizeof(*ser_vdev_obj)); + if (!ser_vdev_obj) + goto error; + + for (index = 0; index < SER_VDEV_QUEUE_COMP_MAX; index++) { + vdev_q = &ser_vdev_obj->vdev_q[index]; + + switch (index) { + case SER_VDEV_QUEUE_COMP_NON_SCAN: + max_active_cmds = WLAN_SER_MAX_ACTIVE_CMDS / + WLAN_SER_MAX_VDEVS; + if (wlan_vdev_mlme_get_opmode(vdev) == QDF_SAP_MODE || + wlan_vdev_mlme_get_opmode(vdev) == QDF_P2P_GO_MODE) + max_pending_cmds = WLAN_SER_MAX_PENDING_CMDS_AP; + else + max_pending_cmds = + WLAN_SER_MAX_PENDING_CMDS_STA; + + ser_debug("Vdev type %d max_pending_cmds %d", + wlan_vdev_mlme_get_opmode(vdev), + max_pending_cmds); + break; + } + + qdf_list_create(&vdev_q->active_list, + max_active_cmds); + qdf_list_create(&vdev_q->pending_list, + max_pending_cmds); + } + + status = wlan_objmgr_vdev_component_obj_attach( + vdev, WLAN_UMAC_COMP_SERIALIZATION, ser_vdev_obj, + QDF_STATUS_SUCCESS); + + if (status != QDF_STATUS_SUCCESS) { + for (index = 0; index < SER_VDEV_QUEUE_COMP_MAX; index++) { + vdev_q = &ser_vdev_obj->vdev_q[index]; + qdf_list_destroy(&vdev_q->pending_list); + qdf_list_destroy(&vdev_q->active_list); + } + qdf_mem_free(ser_vdev_obj); + ser_err("serialization vdev obj attach failed"); + } +error: + return status; +} + +/** + * wlan_serialization_vdev_destroy_handler() - vdev obj delete callback + * @vdev: VDEV object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to delete the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the deletion of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_vdev_destroy_handler( + struct wlan_objmgr_vdev *vdev, void *arg_list) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_serialization_vdev_queue *vdev_q; + struct wlan_ser_vdev_obj *ser_vdev_obj = + wlan_serialization_get_vdev_obj(vdev); + uint8_t index; + + if (!ser_vdev_obj) { + ser_err("ser_vdev_obj NULL"); + return QDF_STATUS_E_INVAL; + } + + status = wlan_objmgr_vdev_component_obj_detach( + vdev, WLAN_UMAC_COMP_SERIALIZATION, ser_vdev_obj); + + /*Clean up serialization timers if any for this vdev*/ + wlan_serialization_cleanup_vdev_timers(vdev); + + for (index = 0; index < SER_VDEV_QUEUE_COMP_MAX; index++) { + vdev_q = &ser_vdev_obj->vdev_q[index]; + wlan_serialization_destroy_vdev_list(&vdev_q->pending_list); + wlan_serialization_destroy_vdev_list(&vdev_q->active_list); + } + + qdf_mem_free(ser_vdev_obj); + + return status; +} + +QDF_STATUS wlan_serialization_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Failed to reg soc ser obj create handler"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Failed to reg soc ser obj delete handler"); + goto err_psoc_delete; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Failed to reg pdev ser obj create handler"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Failed to reg pdev ser obj delete handler"); + goto err_pdev_delete; + } + + status = wlan_objmgr_register_vdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_vdev_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Failed to reg vdev ser obj create handler"); + goto err_vdev_create; + } + + status = wlan_objmgr_register_vdev_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_vdev_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("Failed to reg vdev ser obj delete handler"); + goto err_vdev_delete; + } + + status = QDF_STATUS_SUCCESS; + goto exit; + +err_vdev_delete: + wlan_objmgr_unregister_vdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_vdev_create_handler, + NULL); +err_vdev_create: + wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_destroy_handler, + NULL); +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_create_handler, + NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_destroy_handler, + NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_create_handler, + NULL); +err_psoc_create: +exit: + return status; +} + +QDF_STATUS wlan_serialization_deinit(void) +{ + QDF_STATUS status; + QDF_STATUS ret_status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_create_handler, + NULL); + + if (status != QDF_STATUS_SUCCESS) { + ser_err("unreg fail for psoc ser obj create notf:%d", status); + ret_status = QDF_STATUS_E_FAILURE; + } + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_destroy_handler, + NULL); + + if (status != QDF_STATUS_SUCCESS) { + ser_err("unreg fail for psoc ser obj destroy notf:%d", status); + ret_status = QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_create_handler, + NULL); + if (status != QDF_STATUS_SUCCESS) { + ser_err("unreg fail for pdev ser obj create notf:%d", status); + ret_status = QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_destroy_handler, + NULL); + + if (status != QDF_STATUS_SUCCESS) { + ser_err("unreg fail for pdev ser destroy notf:%d", status); + ret_status = QDF_STATUS_E_FAILURE; + } + + ser_alert("deregistered callbacks with obj mgr successfully"); + + return ret_status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..eabfa895dc8cec74971582baed127bac33c47898 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main_i.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_main.h + * This file contains all the prototype definitions necessary for the + * serialization component's internal functions + */ +#ifndef __WLAN_SERIALIZATION_MAIN_I_H +#define __WLAN_SERIALIZATION_MAIN_I_H + +#include +#include +#include +#include + +#define WLAN_SER_MAX_VDEVS WLAN_UMAC_PDEV_MAX_VDEVS + +#define WLAN_SER_MAX_ACTIVE_CMDS WLAN_SER_MAX_VDEVS + +#ifndef WLAN_SER_MAX_PENDING_CMDS +#define WLAN_SER_MAX_PENDING_CMDS (WLAN_SER_MAX_VDEVS * 4) +#endif + +#ifndef WLAN_SER_MAX_PENDING_CMDS_AP +#define WLAN_SER_MAX_PENDING_CMDS_AP \ + (WLAN_SER_MAX_PENDING_CMDS / WLAN_SER_MAX_VDEVS) +#endif +#ifndef WLAN_SER_MAX_PENDING_CMDS_STA +#define WLAN_SER_MAX_PENDING_CMDS_STA \ + (WLAN_SER_MAX_PENDING_CMDS / WLAN_SER_MAX_VDEVS) +#endif + +#define WLAN_SER_MAX_ACTIVE_SCAN_CMDS 8 +#define WLAN_SER_MAX_PENDING_SCAN_CMDS 24 + +#define WLAN_SERIALIZATION_MAX_GLOBAL_POOL_CMDS \ + (WLAN_SER_MAX_ACTIVE_CMDS + \ + WLAN_SER_MAX_PENDING_CMDS + \ + WLAN_SER_MAX_ACTIVE_SCAN_CMDS + \ + WLAN_SER_MAX_PENDING_SCAN_CMDS) + +#define ser_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_SERIALIZATION, params) +#define ser_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_SERIALIZATION, params) +#define ser_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_SERIALIZATION, params) +#define ser_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_SERIALIZATION, params) +#define ser_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_SERIALIZATION, params) +#define ser_enter() \ + QDF_TRACE_ENTER(QDF_MODULE_ID_SERIALIZATION, "enter") +#define ser_exit() \ + QDF_TRACE_EXIT(QDF_MODULE_ID_SERIALIZATION, "exit") + +#define ser_err_no_fl(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_SERIALIZATION, params) + +/* + * Rate limited serialization logging api + */ +#define ser_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_SERIALIZATION, params) +#define ser_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_SERIALIZATION, params) + +/** + * struct serialization_legacy_callback - to handle legacy serialization cb + * @serialization_purge_cmd_list: function ptr to be filled by serialization + * module + * + * Some of the legacy modules wants to call API to purge the commands in + * order to handle backward compatibility. + */ +struct serialization_legacy_callback { + void (*serialization_purge_cmd_list)(struct wlan_objmgr_psoc *, + struct wlan_objmgr_vdev *, + bool, bool, bool, bool, bool); +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_non_scan.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_non_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..cd41cb7a6a395201557562705cdbc0285eeb6827 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_non_scan.c @@ -0,0 +1,671 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_non_scan.c + * This file defines the functions which deals with + * serialization non scan commands. + */ + +#include +#include +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_non_scan_i.h" + +bool +wlan_serialization_is_non_scan_pending_queue_empty( + struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_vdev *vdev = NULL; + struct wlan_ser_vdev_obj *ser_vdev_obj = NULL; + struct wlan_serialization_vdev_queue *vdev_q; + bool status = false; + + vdev = wlan_serialization_get_vdev_from_cmd(cmd); + + if (!vdev) { + ser_err("vdev object is invalid"); + goto error; + } + + ser_vdev_obj = wlan_serialization_get_vdev_obj(vdev); + vdev_q = &ser_vdev_obj->vdev_q[SER_VDEV_QUEUE_COMP_NON_SCAN]; + + if (qdf_list_empty(&vdev_q->pending_list)) + status = true; + +error: + return status; +} + +/** + * wlan_serialization_is_active_nonscan_cmd_allowed() - find if cmd allowed + * @pdev: pointer to pdev object + * + * This API will be called to find out if non scan cmd is allowed. + * + * Return: true or false + */ + +bool +wlan_serialization_is_active_non_scan_cmd_allowed( + struct wlan_serialization_command *cmd) +{ + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_ser_pdev_obj *ser_pdev_obj; + uint32_t vdev_active_cmd_bitmap; + bool blocking_cmd_active = 0; + uint8_t blocking_cmd_waiting = 0; + bool status = false; + uint32_t vdev_id; + + ser_pdev_obj = wlan_serialization_get_pdev_obj( + wlan_serialization_get_pdev_from_cmd(cmd)); + + pdev_queue = wlan_serialization_get_pdev_queue_obj(ser_pdev_obj, + cmd->cmd_type); + + vdev_active_cmd_bitmap = pdev_queue->vdev_active_cmd_bitmap; + blocking_cmd_active = pdev_queue->blocking_cmd_active; + blocking_cmd_waiting = pdev_queue->blocking_cmd_waiting; + + /* + * Command is blocking + */ + if (cmd->is_blocking) { + /* + * For blocking commands, no other + * commands from any vdev should be active + */ + if (vdev_active_cmd_bitmap) { + status = false; + pdev_queue->blocking_cmd_waiting++; + } else { + status = true; + } + } else { + /* + * Command is non blocking + * For activating non blocking commands, if there any blocking + * commands, waiting or active, put it to pending queue + */ + if (blocking_cmd_active || blocking_cmd_waiting) { + status = false; + } else { + /* + * For non blocking command, and no blocking commands + * waiting or active, check if a cmd for that vdev is active + * If not active, put to active else pending queue + */ + vdev_id = wlan_vdev_get_id(cmd->vdev); + status = vdev_active_cmd_bitmap & (1 << vdev_id) + ? false : true; + } + } + return status; +} + +enum wlan_serialization_status wlan_ser_add_non_scan_cmd( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t is_cmd_for_active_queue) +{ + enum wlan_serialization_status pdev_status, vdev_status; + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + struct wlan_serialization_command_list *pcmd_list; + uint8_t vdev_id; + struct wlan_serialization_pdev_queue *pdev_queue; + + vdev_status = wlan_serialization_add_cmd_to_vdev_queue( + ser_pdev_obj, cmd_list, is_cmd_for_active_queue); + + if (vdev_status == WLAN_SER_CMD_DENIED_LIST_FULL) { + ser_err_rl("List is full cannot add type %d cmd id %d", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + status = vdev_status; + goto vdev_error; + } + + if (is_cmd_for_active_queue) { + if (vdev_status != WLAN_SER_CMD_ACTIVE) { + ser_err("Failed to add type %d cmd id %d to vdev active queue", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + QDF_ASSERT(0); + goto vdev_error; + } + } else { + if (vdev_status != WLAN_SER_CMD_PENDING) { + ser_err("Failed to add type %d cmd id %d to vdev pending queue", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + QDF_ASSERT(0); + goto vdev_error; + } + } + + pdev_status = wlan_serialization_add_cmd_to_pdev_queue( + ser_pdev_obj, cmd_list, is_cmd_for_active_queue); + + if (pdev_status == WLAN_SER_CMD_DENIED_LIST_FULL) { + ser_err_rl("pdev List is full cannot add type %d cmd id %d", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + status = pdev_status; + goto pdev_error; + } + + if (is_cmd_for_active_queue) { + if (pdev_status != WLAN_SER_CMD_ACTIVE) { + ser_err("Failed to add type %d cmd id %d to pdev active queue", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + QDF_ASSERT(0); + goto pdev_error; + } + } else { + if (pdev_status != WLAN_SER_CMD_PENDING) { + ser_err("Failed to add type %d cmd id %d to pdev pending queue", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + QDF_ASSERT(0); + goto pdev_error; + } + } +pdev_error: + /* + * If cmd added to vdev queue, but failed while + * adding to pdev queue, remove cmd from vdev queue as well + */ + if (pdev_status != vdev_status) { + wlan_serialization_remove_cmd_from_vdev_queue( + ser_pdev_obj, &pcmd_list, + &cmd_list->cmd, + is_cmd_for_active_queue); + } else { + status = pdev_status; + } + + if (is_cmd_for_active_queue) { + pdev_queue = wlan_serialization_get_pdev_queue_obj( + ser_pdev_obj, cmd_list->cmd.cmd_type); + vdev_id = wlan_vdev_get_id(cmd_list->cmd.vdev); + pdev_queue->vdev_active_cmd_bitmap |= (1 << vdev_id); + + if (cmd_list->cmd.is_blocking) + pdev_queue->blocking_cmd_active = 1; + } + +vdev_error: + return status; +} + +enum wlan_serialization_status +wlan_ser_move_non_scan_pending_to_active( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_objmgr_vdev *vdev, + bool blocking_cmd_removed) +{ + struct wlan_serialization_command_list *pending_cmd_list = NULL; + struct wlan_serialization_command_list *active_cmd_list; + struct wlan_serialization_command cmd_to_remove; + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + struct wlan_serialization_pdev_queue *pdev_queue; + struct wlan_serialization_vdev_queue *vdev_queue; + + struct wlan_ser_vdev_obj *ser_vdev_obj; + + qdf_list_t *pending_queue; + qdf_list_node_t *pending_node = NULL; + QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE; + uint32_t blocking_cmd_waiting = 0; + uint32_t vdev_id; + uint32_t qsize; + bool vdev_cmd_active = 0; + bool vdev_queue_lookup = false; + + pdev_queue = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + + ser_vdev_obj = wlan_serialization_get_vdev_obj(vdev); + vdev_queue = &ser_vdev_obj->vdev_q[SER_VDEV_QUEUE_COMP_NON_SCAN]; + + if (!ser_pdev_obj) { + ser_err("Can't find ser_pdev_obj"); + goto error; + } + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + blocking_cmd_waiting = pdev_queue->blocking_cmd_waiting; + + if (!blocking_cmd_removed && !blocking_cmd_waiting) { + pending_queue = &vdev_queue->pending_list; + vdev_queue_lookup = true; + } else { + pending_queue = &pdev_queue->pending_list; + } + + qsize = wlan_serialization_list_size(pending_queue); + if (!qsize) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + goto error; + } + + while (qsize--) { + qdf_status = wlan_serialization_get_cmd_from_queue( + pending_queue, &pending_node); + if (qdf_status != QDF_STATUS_SUCCESS) { + ser_err("can't peek cmd"); + break; + } + + if (vdev_queue_lookup) { + pending_cmd_list = + qdf_container_of( + pending_node, + struct wlan_serialization_command_list, + vdev_node); + } else { + pending_cmd_list = + qdf_container_of( + pending_node, + struct wlan_serialization_command_list, + pdev_node); + } + + if (!pending_cmd_list) { + wlan_serialization_release_lock( + &pdev_queue->pdev_queue_lock); + ser_debug( + "non scan cmd cannot move frm pendin to actv"); + goto error; + } + + vdev_id = wlan_vdev_get_id(pending_cmd_list->cmd.vdev); + vdev_cmd_active = + pdev_queue->vdev_active_cmd_bitmap & + (1 << vdev_id); + + if (!vdev_queue_lookup) { + if (pending_cmd_list->cmd.is_blocking && + pdev_queue->vdev_active_cmd_bitmap) { + break; + } + if (vdev_cmd_active) + continue; + } else { + if (vdev_cmd_active) + break; + } + + qdf_mem_copy(&cmd_to_remove, &pending_cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + + qdf_status = wlan_ser_remove_non_scan_cmd(ser_pdev_obj, + &pending_cmd_list, + &cmd_to_remove, + false); + + wlan_ser_update_cmd_history( + pdev_queue, &pending_cmd_list->cmd, + SER_PENDING_TO_ACTIVE, + false, false); + + if (QDF_STATUS_SUCCESS != qdf_status) { + wlan_serialization_release_lock( + &pdev_queue->pdev_queue_lock); + ser_err("Can't remove cmd from pendingQ id-%d type-%d", + pending_cmd_list->cmd.cmd_id, + pending_cmd_list->cmd.cmd_type); + QDF_ASSERT(0); + status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + goto error; + } + + active_cmd_list = pending_cmd_list; + + status = wlan_ser_add_non_scan_cmd( + ser_pdev_obj, active_cmd_list, true); + + if (WLAN_SER_CMD_ACTIVE != status) { + wlan_serialization_release_lock( + &pdev_queue->pdev_queue_lock); + ser_err("Can't move cmd to activeQ id-%d type-%d", + pending_cmd_list->cmd.cmd_id, + pending_cmd_list->cmd.cmd_type); + wlan_serialization_insert_back( + &pdev_queue->cmd_pool_list, + &active_cmd_list->pdev_node); + status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + QDF_ASSERT(0); + goto error; + } + + wlan_ser_update_cmd_history( + pdev_queue, &active_cmd_list->cmd, + SER_PENDING_TO_ACTIVE, + true, true); + + qdf_atomic_set_bit(CMD_MARKED_FOR_ACTIVATION, + &active_cmd_list->cmd_in_use); + + if (active_cmd_list->cmd.is_blocking) + pdev_queue->blocking_cmd_waiting--; + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + wlan_serialization_activate_cmd(active_cmd_list, ser_pdev_obj, + SER_PENDING_TO_ACTIVE); + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + if (vdev_queue_lookup || pdev_queue->blocking_cmd_active) + break; + + pending_node = NULL; + + } + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); +error: + + return status; +} + +QDF_STATUS wlan_ser_remove_non_scan_cmd( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_cmd) +{ + QDF_STATUS pdev_status, vdev_status; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t vdev_id; + bool blocking_cmd_removed = 0; + struct wlan_serialization_pdev_queue *pdev_queue; + + vdev_status = + wlan_serialization_remove_cmd_from_vdev_queue(ser_pdev_obj, + pcmd_list, + cmd, + is_active_cmd); + + /* Here command removal can fail for 2 reasons + * 1. The cmd is not present + * 2. The command had not returned from activation + * and will not be removed now. + * + * In the second case, we should not flag it as error + * since it will removed after the activation completes. + */ + + if (vdev_status != QDF_STATUS_SUCCESS) { + status = vdev_status; + if (vdev_status != QDF_STATUS_E_PENDING) + ser_debug("Failed to remove type %d id %d from vdev queue", + cmd->cmd_type, cmd->cmd_id); + goto error; + } + + pdev_status = + wlan_serialization_remove_cmd_from_pdev_queue(ser_pdev_obj, + pcmd_list, + cmd, + is_active_cmd); + + if (pdev_status != QDF_STATUS_SUCCESS) { + ser_debug("Failed to remove type %d id %d from pdev active/pending queue", + cmd->cmd_type, cmd->cmd_id); + goto error; + } + + if (is_active_cmd) { + blocking_cmd_removed = (*pcmd_list)->cmd.is_blocking; + pdev_queue = wlan_serialization_get_pdev_queue_obj( + ser_pdev_obj, (*pcmd_list)->cmd.cmd_type); + + if (blocking_cmd_removed) + pdev_queue->blocking_cmd_active = 0; + + vdev_id = wlan_vdev_get_id(cmd->vdev); + pdev_queue->vdev_active_cmd_bitmap &= ~(1 << vdev_id); + } + + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +enum wlan_serialization_cmd_status +wlan_ser_cancel_non_scan_cmd( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_command *cmd, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_queue, enum wlan_ser_cmd_attr cmd_attr) +{ + qdf_list_t *pdev_queue; + qdf_list_t *vdev_queue; + struct wlan_serialization_pdev_queue *pdev_q; + uint32_t qsize; + uint8_t vdev_id; + bool is_blocking; + struct wlan_serialization_command_list *cmd_list = NULL; + struct wlan_serialization_command cmd_bkup; + qdf_list_node_t *nnode = NULL, *pnode = NULL; + enum wlan_serialization_cmd_status status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_objmgr_psoc *psoc = NULL; + QDF_STATUS qdf_status; + QDF_STATUS pdev_status, vdev_status; + struct wlan_ser_vdev_obj *ser_vdev_obj; + + pdev_q = wlan_serialization_get_pdev_queue_obj(ser_pdev_obj, cmd_type); + + pdev_queue = wlan_serialization_get_list_from_pdev_queue( + ser_pdev_obj, cmd_type, is_active_queue); + + if (pdev) + psoc = wlan_pdev_get_psoc(pdev); + else if (vdev) + psoc = wlan_vdev_get_psoc(vdev); + else if (cmd && cmd->vdev) + psoc = wlan_vdev_get_psoc(cmd->vdev); + else + ser_debug("Can't find psoc"); + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + qsize = wlan_serialization_list_size(pdev_queue); + while (!wlan_serialization_list_empty(pdev_queue) && qsize--) { + if (wlan_serialization_get_cmd_from_queue(pdev_queue, &nnode) + != QDF_STATUS_SUCCESS) { + ser_err("can't read cmd from queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + if (cmd && !wlan_serialization_match_cmd_id_type( + nnode, cmd, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + + if (vdev && + !wlan_serialization_match_cmd_vdev(nnode, + vdev, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + + if (pdev && + !wlan_serialization_match_cmd_pdev(nnode, + pdev, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + + if (cmd_type > WLAN_SER_CMD_NONSCAN && vdev && + (!wlan_serialization_match_cmd_type(nnode, cmd_type, + WLAN_SER_PDEV_NODE) || + !wlan_serialization_match_cmd_vdev(nnode, vdev, + WLAN_SER_PDEV_NODE))) { + pnode = nnode; + continue; + } + + /* + * If a non-blocking cmd is required to be cancelled, but + * the nnode cmd is a blocking cmd then continue with the + * next command in the list else proceed with cmd cancel. + */ + if ((cmd_attr == WLAN_SER_CMD_ATTR_NONBLOCK) && + wlan_serialization_match_cmd_blocking(nnode, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + + /* + * active queue can't be removed directly, requester needs to + * wait for active command response and send remove request for + * active command separately + */ + if (is_active_queue) { + if (!psoc || !cmd_list) { + ser_err("psoc:0x%pK, cmd_list:0x%pK", + psoc, cmd_list); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + + /* Cancel request received for a cmd in active + * queue which has not been activated yet, we mark + * it as CMD_ACTIVE_MARKED_FOR_CANCEL and remove + * the cmd after activation + */ + if (qdf_atomic_test_bit(CMD_MARKED_FOR_ACTIVATION, + &cmd_list->cmd_in_use)) { + qdf_atomic_set_bit(CMD_ACTIVE_MARKED_FOR_CANCEL, + &cmd_list->cmd_in_use); + status = WLAN_SER_CMD_MARKED_FOR_ACTIVATION; + continue; + } + + qdf_status = wlan_serialization_find_and_stop_timer( + psoc, &cmd_list->cmd, + SER_CANCEL); + if (QDF_IS_STATUS_ERROR(qdf_status)) { + ser_err("Can't find timer for active cmd"); + status = WLAN_SER_CMD_NOT_FOUND; + /* + * This should not happen, as an active command + * should always have the timer. + */ + QDF_BUG(0); + break; + } + + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + } + + qdf_mem_copy(&cmd_bkup, &cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + + pdev_status = + wlan_serialization_remove_node(pdev_queue, + &cmd_list->pdev_node); + + ser_vdev_obj = wlan_serialization_get_vdev_obj( + cmd_list->cmd.vdev); + + vdev_queue = wlan_serialization_get_list_from_vdev_queue( + ser_vdev_obj, cmd_type, is_active_queue); + + vdev_status = + wlan_serialization_remove_node(vdev_queue, + &cmd_list->vdev_node); + + if (pdev_status != QDF_STATUS_SUCCESS || + vdev_status != QDF_STATUS_SUCCESS) { + ser_err("can't remove cmd from pdev/vdev queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + + qdf_mem_zero(&cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + cmd_list->cmd_in_use = 0; + qdf_status = wlan_serialization_insert_back( + &pdev_q->cmd_pool_list, + &cmd_list->pdev_node); + + if (QDF_STATUS_SUCCESS != qdf_status) { + ser_err("can't remove cmd from queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + nnode = pnode; + + vdev_id = wlan_vdev_get_id(cmd_bkup.vdev); + is_blocking = cmd_bkup.is_blocking; + + wlan_ser_update_cmd_history(pdev_q, &cmd_bkup, + SER_CANCEL, false, is_active_queue); + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + /* + * call pending cmd's callback to notify that + * it is being removed + */ + if (cmd_bkup.cmd_cb) { + /* caller should now do necessary clean up */ + ser_debug("Cancel command: type %d id %d and Release memory", + cmd_bkup.cmd_type, cmd_bkup.cmd_id); + cmd_bkup.cmd_cb(&cmd_bkup, WLAN_SER_CB_CANCEL_CMD); + /* caller should release the memory */ + cmd_bkup.cmd_cb(&cmd_bkup, WLAN_SER_CB_RELEASE_MEM_CMD); + } + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + if (is_active_queue) { + if (is_blocking) + pdev_q->blocking_cmd_active = 0; + pdev_q->vdev_active_cmd_bitmap &= ~(1 << vdev_id); + ser_debug("pdev_q->vdev_active_cmd_bitmap %x after reseting for vdev %d", + pdev_q->vdev_active_cmd_bitmap, + vdev_id); + } else { + if (is_blocking) + pdev_q->blocking_cmd_waiting--; + + status = WLAN_SER_CMD_IN_PENDING_LIST; + } + + + if (!vdev && !pdev) + break; + } + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_non_scan_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_non_scan_i.h new file mode 100644 index 0000000000000000000000000000000000000000..d52402bf8efb1b2b723ddc98be46798fcebf9dbb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_non_scan_i.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_non_scan_i.h + * This file defines the prototypes for functions which deals with + * serialization non scan commands. + */ + +#ifndef __WLAN_SERIALIZATION_NON_SCAN_I_H +#define __WLAN_SERIALIZATION_NON_SCAN_I_H + +#include +#include +#include +#include +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_api.h" +#include "wlan_serialization_internal_i.h" +#include "wlan_serialization_queue_i.h" + +/** + * wlan_serialization_is_non_scan_pending_queue_empty() + * + * @cmd: Serialization command information + * + * This API will be find out if non scan cmd pending queue is empty. + * + * Return: true or false + */ +bool +wlan_serialization_is_non_scan_pending_queue_empty( + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_is_active_nonscan_cmd_allowed() - find if cmd allowed + * to be enqueued in active queue + * @cmd: Serialization command information + * + * This API will be called to find out if non scan cmd is allowed. + * + * Return: true or false + */ +bool +wlan_serialization_is_active_non_scan_cmd_allowed( + struct wlan_serialization_command *cmd); + +/** + * wlan_ser_add_non_scan_cmd() - Add a non-scan cmd to serialization queue + * @ser_pdev_obj: Serialization private pdev object + * @cmd_list: Command list with command info that is to be queued + * @is_cmd_for_active_queue: If the cmd to be enqueued in active queue or + * pending queue + * + * Return: Status of the cmd's serialization request + */ +enum wlan_serialization_status +wlan_ser_add_non_scan_cmd( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t is_cmd_for_active_queue); +/** + * wlan_ser_move_non_scan_pending_to_active() - Move a non-scan cmd from pending + * queue to active queue + * @pcmd_list: Pointer to command list containing the command + * @ser_pdev_obj: Serialization private pdev object + * @vdev: Pointer to object manager vdev + * + * Return: Status of the cmd's serialization request + */ +enum wlan_serialization_status +wlan_ser_move_non_scan_pending_to_active( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_objmgr_vdev *vdev, + bool blocking_cmd_removed); + +/** + * wlan_ser_remove_non_scan_cmd() - Remove a non-scan cmd from the given queue + * @ser_pdev_obj: Serialization private pdev object + * @pcmd_list: Pointer to command list containing the command + * @cmd: Serialization command information + * @is_active_cmd: If the cmd has to be removed from active queue or pending + * queue + * + * Return: QDF_STATUS_SUCCESS on successfully removing the cmd else + * QDF_STATUS_E_FAILURE + */ +QDF_STATUS +wlan_ser_remove_non_scan_cmd(struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_cmd); +/** + * wlan_ser_cancel_non_scan_cmd() - Cancel a non-scan cmd from the given queue + * @ser_obj: Serialization private pdev object + * @pdev: Pointer to object manager pdev + * @vdev: Pointer to object manager vdev + * @cmd: Serialization command information + * @cmd_type: Serialization command type to be cancelled + * @is_active_queue: If the cmd has to be removed from active queue or pending + * queue + * @cmd_attr: Indicate the attribute of the cmd to be cancelled + * i.e blocking/non-blocking + * + * Return: Status specifying the cancel of a command from the given queue + */ +enum wlan_serialization_cmd_status +wlan_ser_cancel_non_scan_cmd(struct wlan_ser_pdev_obj *ser_obj, + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_command *cmd, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_queue, + enum wlan_ser_cmd_attr cmd_attr); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_queue.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_queue.c new file mode 100644 index 0000000000000000000000000000000000000000..d4d23679267a678a1e6d0d51fbfee7b4f10b9b6d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_queue.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_queue.c + * This file defines the functions which deals with the + * serialization queue objects. + */ +#include +#include +#include +#include +#include +#include "wlan_serialization_api.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_queue_i.h" + +struct wlan_serialization_pdev_queue *wlan_serialization_get_pdev_queue_obj( + struct wlan_ser_pdev_obj *pdev_obj, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_serialization_pdev_queue *pdev_queue = NULL; + + if (cmd_type < WLAN_SER_CMD_NONSCAN) + pdev_queue = &pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + else + pdev_queue = &pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_NON_SCAN]; + + return pdev_queue; +} + +struct wlan_serialization_vdev_queue *wlan_serialization_get_vdev_queue_obj( + struct wlan_ser_vdev_obj *vdev_obj, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_serialization_vdev_queue *vdev_queue = NULL; + + vdev_queue = &vdev_obj->vdev_q[SER_VDEV_QUEUE_COMP_NON_SCAN]; + + return vdev_queue; +} + +qdf_list_t *wlan_serialization_get_list_from_pdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_cmd) +{ + struct wlan_serialization_pdev_queue *pdev_queue; + qdf_list_t *queue = NULL; + + pdev_queue = wlan_serialization_get_pdev_queue_obj(pdev_obj, cmd_type); + if (is_active_cmd) + queue = &pdev_queue->active_list; + else + queue = &pdev_queue->pending_list; + + return queue; +} + +qdf_list_t *wlan_serialization_get_list_from_vdev_queue( + struct wlan_ser_vdev_obj *vdev_obj, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_cmd) +{ + struct wlan_serialization_vdev_queue *vdev_queue; + qdf_list_t *queue = NULL; + + vdev_queue = wlan_serialization_get_vdev_queue_obj(vdev_obj, cmd_type); + if (is_active_cmd) + queue = &vdev_queue->active_list; + else + queue = &vdev_queue->pending_list; + + return queue; +} + +enum wlan_serialization_status +wlan_serialization_add_cmd_to_pdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t for_active_queue) +{ + qdf_list_t *queue; + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + + if (!pdev_obj) { + ser_err("invalid serialization pdev"); + status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + goto error; + } + + queue = wlan_serialization_get_list_from_pdev_queue( + pdev_obj, cmd_list->cmd.cmd_type, for_active_queue); + + status = wlan_serialization_add_cmd_to_queue(queue, cmd_list, + pdev_obj, + for_active_queue, + WLAN_SER_PDEV_NODE); + +error: + return status; +} + +enum wlan_serialization_status +wlan_serialization_add_cmd_to_vdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t for_active_queue) +{ + qdf_list_t *queue; + enum wlan_serialization_status status; + struct wlan_serialization_command *cmd; + struct wlan_ser_vdev_obj *vdev_obj; + struct wlan_serialization_vdev_queue *vdev_queue_obj; + + cmd = &cmd_list->cmd; + + vdev_obj = wlan_serialization_get_vdev_obj( + wlan_serialization_get_vdev_from_cmd(cmd)); + + vdev_queue_obj = + wlan_serialization_get_vdev_queue_obj( + vdev_obj, + cmd->cmd_type); + + queue = wlan_serialization_get_list_from_vdev_queue(vdev_obj, + cmd->cmd_type, + for_active_queue); + + status = wlan_serialization_add_cmd_to_queue(queue, cmd_list, + pdev_obj, + for_active_queue, + WLAN_SER_VDEV_NODE); + + if (cmd->queue_disable) + vdev_queue_obj->queue_disable = true; + + return status; +} + +QDF_STATUS +wlan_serialization_remove_cmd_from_pdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_queue) +{ + qdf_list_t *queue; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!pdev_obj) { + ser_err("Invalid pdev"); + return status; + } + + queue = wlan_serialization_get_list_from_pdev_queue( + pdev_obj, cmd->cmd_type, is_active_queue); + + status = wlan_serialization_remove_cmd_from_queue(queue, cmd, + pcmd_list, + pdev_obj, + WLAN_SER_PDEV_NODE); + + return status; +} + +QDF_STATUS +wlan_serialization_remove_cmd_from_vdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_queue) +{ + qdf_list_t *queue; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_ser_vdev_obj *vdev_obj; + + vdev_obj = wlan_serialization_get_vdev_obj( + wlan_serialization_get_vdev_from_cmd(cmd)); + + queue = wlan_serialization_get_list_from_vdev_queue(vdev_obj, + cmd->cmd_type, + is_active_queue); + + status = wlan_serialization_remove_cmd_from_queue(queue, cmd, + pcmd_list, + pdev_obj, + WLAN_SER_VDEV_NODE); + + return status; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_queue_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_queue_i.h new file mode 100644 index 0000000000000000000000000000000000000000..c5845447a1c51e5ed45b1767cd743f8b6763cacf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_queue_i.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_queue_i.h + * This file defines the prototpye for functions which deals with the + * serialization queue objects. + */ +#ifndef __WLAN_SERIALIZATION_QUEUE_I_H +#define __WLAN_SERIALIZATION_QUEUE_I_H + +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include +#include +#include +#include +#include + +/** + * wlan_serialization_get_pdev_queue_obj() - Get serialization pdev queue for + * the given cmd_type + * @pdev_obj: Serialization private pdev object + * @cmd_type: Serialization command type i.e scan or non-scan + * + * Return: Pointer to serialization pdev queue + */ +struct wlan_serialization_pdev_queue *wlan_serialization_get_pdev_queue_obj( + struct wlan_ser_pdev_obj *pdev_obj, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_get_vdev_queue_obj() - Get serialization vdev queue for + * the given cmd_type + * @vdev_obj: Serialization private vdev object + * @cmd_type: Serialization command type i.e scan or non-scan + * + * Return: Pointer to serialization vdev queue + */ +struct wlan_serialization_vdev_queue *wlan_serialization_get_vdev_queue_obj( + struct wlan_ser_vdev_obj *vdev_obj, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_get_list_from_pdev_queue() - Get list member from the pdev + * queue for the given cmd type + * @pdev_obj: Serialization private pdev object + * @cmd_type: Serialization command type i.e scan or non-scan + * @is_active_cmd: Get list from active queue or pending queue + * + * Return: Pointer to the obtained list member + */ +qdf_list_t *wlan_serialization_get_list_from_pdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_cmd); + +/** + * wlan_serialization_get_list_from_vdev_queue() - Get list member from the vdev + * queue for the given cmd type + * @vdev_obj: Serialization private vdev object + * @cmd_type: Serialization command type i.e scan or non-scan + * @is_active_cmd: Get list from active queue or pending queue + * + * Return: Pointer to the obtained list member + */ +qdf_list_t *wlan_serialization_get_list_from_vdev_queue( + struct wlan_ser_vdev_obj *vdev_obj, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_cmd); + +/** + * wlan_serialization_add_cmd_to_pdev_queue() - Add given cmd to the pdev + * queue for the given cmd type + * @pdev_obj: Serialization private pdev object + * @cmd_list: Pointer to command list containing the command + * @is_cmd_for_active_queue: Add to active queue or pending queue + * + * Return: Status of the cmd's serialization request + */ +enum wlan_serialization_status wlan_serialization_add_cmd_to_pdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t is_cmd_for_active_queue); + +/** + * wlan_serialization_add_cmd_to_vdev_queue() - Add given cmd to the vdev + * queue for the given cmd type + * @pdev_obj: Serialization private pdev object + * @cmd_list: Pointer to command list containing the command + * @is_cmd_for_active_queue: Add to active queue or pending queue + * + * Return: Status of the cmd's serialization request + */ +enum wlan_serialization_status wlan_serialization_add_cmd_to_vdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t is_cmd_for_active_queue); + +/** + * wlan_serialization_remove_cmd_from_pdev_queue() - Remove given cmd from + * the pdev queue for the given cmd type + * @pdev_obj: Serialization private pdev object + * @pcmd_list: Pointer to command list containing the command + * @cmd: Serialization command information + * @is_active_queue: Remove from active queue or pending queue + * + * Return: QDF_STATUS_SUCCESS on success, error code on failure + */ +QDF_STATUS wlan_serialization_remove_cmd_from_pdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_queue); + +/** + * wlan_serialization_remove_cmd_from_vdev_queue() - Remove given cmd from + * the vdev queue for the given cmd type + * @pdev_obj: Serialization private pdev object + * @pcmd_list: Pointer to command list containing the command + * @cmd: Serialization command information + * @is_active_queue: Remove from active queue or pending queue + * + * Return: QDF_STATUS_SUCCESS on success, error code on failure + */ +QDF_STATUS wlan_serialization_remove_cmd_from_vdev_queue( + struct wlan_ser_pdev_obj *pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_queue); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules.c new file mode 100644 index 0000000000000000000000000000000000000000..97f3024ba8db1f129f5f7ebb87b8f2a71d3f3ef2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules.c @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wlan_serialization_rules_i.h" + +bool +wlan_serialization_apply_scan_rules( + union wlan_serialization_rules_info *info, uint8_t comp_id) +{ + switch (comp_id) { + default: + return false; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules_i.h new file mode 100644 index 0000000000000000000000000000000000000000..5035ef27eb04e2b3b863e4bd3b3c535a7587253a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules_i.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_rules_i.h + * This file defines the prototypes for the rules related data + * pertinent to the serialization component. + */ +#ifndef __WLAN_SERIALIZATION_RULES_I_H +#define __WLAN_SERIALIZATION_RULES_I_H + +#include +#include + +/** + * wlan_serialization_apply_scan_rules() - apply scan rules callback + * @info: rules info structure + * @comp_id: component Identifier + * + * This callback is registered with object manager during initialization and + * when serialization request is called by component, this callback handler + * applies rules depending on component. + * There will be many apply rules callback handlers in future + * + * Return: boolean + */ +bool +wlan_serialization_apply_scan_rules( + union wlan_serialization_rules_info *info, uint8_t comp_id); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_scan.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..9c44ef211c6cfed19e15add086f99e0a6ad6043d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_scan.c @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_scan.c + * This file defines the functions which deals with + * serialization scan commands. + */ + +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_api.h" +#include "wlan_serialization_scan_i.h" +#include +#include +#include +#include + +void +wlan_serialization_active_scan_cmd_count_handler(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + struct wlan_objmgr_pdev *pdev = obj; + struct wlan_ser_pdev_obj *ser_pdev_obj; + struct wlan_serialization_pdev_queue *pdev_q; + uint32_t *count = arg; + + if (!pdev) { + ser_err("invalid pdev"); + return; + } + + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_SERIALIZATION); + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + *count += wlan_serialization_list_size(&pdev_q->active_list); +} + +bool +wlan_serialization_is_scan_pending_queue_empty( + struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_ser_pdev_obj *ser_pdev_obj = NULL; + struct wlan_serialization_pdev_queue *pdev_q; + bool status = false; + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + ser_pdev_obj = wlan_serialization_get_pdev_obj(pdev); + + pdev_q = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + + if (qdf_list_empty(&pdev_q->pending_list)) + status = true; + + return status; +} + +bool +wlan_serialization_is_active_scan_cmd_allowed( + struct wlan_serialization_command *cmd) +{ + uint32_t count = 0; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc; + bool status = false; + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + ser_err("invalid pdev"); + goto error; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + ser_err("invalid psoc"); + goto error; + } + + wlan_objmgr_iterate_obj_list( + psoc, WLAN_PDEV_OP, + wlan_serialization_active_scan_cmd_count_handler, + &count, 1, WLAN_SERIALIZATION_ID); + if (count < ucfg_scan_get_max_active_scans(psoc)) + status = true; + +error: + return status; +} + +bool wlan_ser_match_cmd_scan_id( + qdf_list_node_t *nnode, + struct wlan_serialization_command **cmd, + uint16_t scan_id, struct wlan_objmgr_vdev *vdev) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + if ((cmd_list->cmd.cmd_id == scan_id) && + (cmd_list->cmd.vdev == vdev)) { + *cmd = &cmd_list->cmd; + match_found = true; + }; + + ser_debug("match found: %d", match_found); + + return match_found; +} + +enum wlan_serialization_status +wlan_ser_add_scan_cmd( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t is_cmd_for_active_queue) +{ + enum wlan_serialization_status status; + + status = wlan_serialization_add_cmd_to_pdev_queue( + ser_pdev_obj, cmd_list, + is_cmd_for_active_queue); + + return status; +} + +QDF_STATUS +wlan_ser_remove_scan_cmd( + struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_cmd) +{ + QDF_STATUS status; + + status = wlan_serialization_remove_cmd_from_pdev_queue( + ser_pdev_obj, pcmd_list, cmd, is_active_cmd); + + return status; +} + +enum wlan_serialization_cmd_status +wlan_ser_cancel_scan_cmd( + struct wlan_ser_pdev_obj *ser_obj, + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_command *cmd, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_queue) +{ + qdf_list_t *queue; + struct wlan_serialization_pdev_queue *pdev_q; + uint32_t qsize; + struct wlan_serialization_command_list *cmd_list = NULL; + struct wlan_serialization_command cmd_bkup; + qdf_list_node_t *nnode = NULL, *pnode = NULL; + enum wlan_serialization_cmd_status status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_objmgr_psoc *psoc = NULL; + QDF_STATUS qdf_status; + + pdev_q = &ser_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + + if (is_active_queue) + queue = &pdev_q->active_list; + else + queue = &pdev_q->pending_list; + + if (pdev) + psoc = wlan_pdev_get_psoc(pdev); + else if (vdev) + psoc = wlan_vdev_get_psoc(vdev); + else if (cmd && cmd->vdev) + psoc = wlan_vdev_get_psoc(cmd->vdev); + else + ser_debug("Can't find psoc"); + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + qsize = wlan_serialization_list_size(queue); + while (!wlan_serialization_list_empty(queue) && qsize--) { + if (wlan_serialization_get_cmd_from_queue( + queue, &nnode) != QDF_STATUS_SUCCESS) { + ser_err("can't read cmd from queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + + if (cmd && !wlan_serialization_match_cmd_id_type( + nnode, cmd, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + if (vdev && + !wlan_serialization_match_cmd_vdev(nnode, + vdev, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + + if (pdev && + !wlan_serialization_match_cmd_pdev(nnode, + pdev, + WLAN_SER_PDEV_NODE)) { + pnode = nnode; + continue; + } + + /* + * active queue can't be removed directly, requester needs to + * wait for active command response and send remove request for + * active command separately + */ + if (is_active_queue) { + if (!psoc || !cmd_list) { + ser_err("psoc:0x%pK, cmd_list:0x%pK", + psoc, cmd_list); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + + /* Cancel request received for a cmd in active + * queue which has not been activated yet, we mark + * it as CMD_ACTIVE_MARKED_FOR_CANCEL and remove + * the cmd after activation + */ + if (qdf_atomic_test_bit(CMD_MARKED_FOR_ACTIVATION, + &cmd_list->cmd_in_use)) { + qdf_atomic_set_bit(CMD_ACTIVE_MARKED_FOR_CANCEL, + &cmd_list->cmd_in_use); + status = WLAN_SER_CMD_MARKED_FOR_ACTIVATION; + continue; + } + + qdf_status = wlan_serialization_find_and_stop_timer( + psoc, &cmd_list->cmd, + SER_CANCEL); + if (QDF_IS_STATUS_ERROR(qdf_status)) { + ser_err("Can't fix timer for active cmd"); + status = WLAN_SER_CMD_NOT_FOUND; + /* + * This should not happen, as an active command + * should always have the timer. + */ + QDF_BUG(0); + break; + } + + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + } + + qdf_mem_copy(&cmd_bkup, &cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + + qdf_status = + wlan_serialization_remove_node(queue, + &cmd_list->pdev_node); + + if (qdf_status != QDF_STATUS_SUCCESS) { + ser_err("can't remove cmd from pdev queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + + qdf_mem_zero(&cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + cmd_list->cmd_in_use = 0; + qdf_status = wlan_serialization_insert_back( + &pdev_q->cmd_pool_list, + &cmd_list->pdev_node); + + if (QDF_STATUS_SUCCESS != qdf_status) { + ser_err("can't remove cmd from queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + nnode = pnode; + + wlan_ser_update_cmd_history(pdev_q, &cmd_bkup, + SER_CANCEL, false, is_active_queue); + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + /* + * call pending cmd's callback to notify that + * it is being removed + */ + if (cmd_bkup.cmd_cb) { + ser_debug("Cancel command: type %d id %d and Release memory", + cmd_bkup.cmd_type, cmd_bkup.cmd_id); + cmd_bkup.cmd_cb(&cmd_bkup, WLAN_SER_CB_CANCEL_CMD); + cmd_bkup.cmd_cb(&cmd_bkup, WLAN_SER_CB_RELEASE_MEM_CMD); + } + + wlan_serialization_acquire_lock(&pdev_q->pdev_queue_lock); + + if (!is_active_queue) + status = WLAN_SER_CMD_IN_PENDING_LIST; + } + + wlan_serialization_release_lock(&pdev_q->pdev_queue_lock); + + return status; +} + +enum wlan_serialization_status wlan_ser_move_scan_pending_to_active( + struct wlan_ser_pdev_obj *ser_pdev_obj) +{ + struct wlan_serialization_command_list *pending_cmd_list = NULL; + struct wlan_serialization_command_list *active_cmd_list; + struct wlan_serialization_command cmd_to_remove; + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + QDF_STATUS qdf_status; + struct wlan_serialization_pdev_queue *pdev_queue; + qdf_list_t *pending_queue; + qdf_list_node_t *pending_node = NULL; + + pdev_queue = &ser_pdev_obj->pdev_q[SER_PDEV_QUEUE_COMP_SCAN]; + + if (!ser_pdev_obj) { + ser_err("Can't find ser_pdev_obj"); + goto error; + } + + wlan_serialization_acquire_lock(&pdev_queue->pdev_queue_lock); + + pending_queue = &pdev_queue->pending_list; + + if (wlan_serialization_list_empty(pending_queue)) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + goto error; + } + + qdf_status = wlan_serialization_peek_front(pending_queue, + &pending_node); + if (QDF_STATUS_SUCCESS != qdf_status) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + ser_err("can't read from pending queue"); + goto error; + } + + pending_cmd_list = + qdf_container_of(pending_node, + struct wlan_serialization_command_list, + pdev_node); + + if (!pending_cmd_list) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + goto error; + } + + qdf_mem_copy(&cmd_to_remove, &pending_cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + + if (!wlan_serialization_is_active_scan_cmd_allowed(&cmd_to_remove)) { + ser_debug("active scan command not allowed"); + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + goto error; + } + + qdf_status = + wlan_ser_remove_scan_cmd(ser_pdev_obj, + &pending_cmd_list, + &cmd_to_remove, false); + + wlan_ser_update_cmd_history(pdev_queue, &pending_cmd_list->cmd, + SER_PENDING_TO_ACTIVE, + false, false); + + if (QDF_STATUS_SUCCESS != qdf_status) { + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + ser_err("Can't remove from pendingQ id %d type %d", + pending_cmd_list->cmd.cmd_id, + pending_cmd_list->cmd.cmd_type); + QDF_ASSERT(0); + status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + goto error; + } + + active_cmd_list = pending_cmd_list; + + status = wlan_ser_add_scan_cmd(ser_pdev_obj, + active_cmd_list, true); + + if (WLAN_SER_CMD_ACTIVE != status) { + wlan_serialization_insert_back( + &pdev_queue->cmd_pool_list, + &active_cmd_list->pdev_node); + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + ser_err("Can't add cmd to activeQ id-%d type-%d", + active_cmd_list->cmd.cmd_id, + active_cmd_list->cmd.cmd_type); + QDF_ASSERT(0); + goto error; + } + + qdf_atomic_set_bit(CMD_MARKED_FOR_ACTIVATION, + &active_cmd_list->cmd_in_use); + + wlan_ser_update_cmd_history(pdev_queue, &active_cmd_list->cmd, + SER_PENDING_TO_ACTIVE, + true, true); + + wlan_serialization_release_lock(&pdev_queue->pdev_queue_lock); + + wlan_serialization_activate_cmd(active_cmd_list, ser_pdev_obj, + SER_PENDING_TO_ACTIVE); +error: + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_scan_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_scan_i.h new file mode 100644 index 0000000000000000000000000000000000000000..375f3aaea3bf94b76d4e32b3571a166167874ca3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_scan_i.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_scan.h + * This file defines the prototypes for functions which deals with + * serialization non scan commands. + */ + +#ifndef __WLAN_SERIALIZATION_SCAN_I_H +#define __WLAN_SERIALIZATION_SCAN_I_H + +#include +#include +#include +#include +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_api.h" +#include "wlan_serialization_queue_i.h" +#include "wlan_serialization_internal_i.h" + +/** + * wlan_serialization_active_scan_cmd_count_handler() - count active scan cmds + * @psoc: pointer to soc strucutre + * @obj : pointer to pdev object + * @arg: pointer to argument + * + * This API will be called while iterating each pdev object and it will count + * number of scan commands present in that pdev object's active queue. count + * will be updated in *arg + * + * Return: none + */ +void +wlan_serialization_active_scan_cmd_count_handler(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg); + +/** + * wlan_serialization_is_scan_pending_queue_empty() + * + * @cmd: Serialization command information + * + * This API will be find out if scan cmd pending queue is empty. + * + * Return: true or false + */ +bool +wlan_serialization_is_scan_pending_queue_empty( + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_is_active_scan_cmd_allowed() - find if scan cmd allowed + * @pdev: pointer to pdev object + * + * This API will be called to find out if active scan cmd is allowed. It has + * to iterate through all pdev to find out total number of active scan cmds. + * If total number of active scan cmds reach to allowed threshold then don't + * allow more scan cmd. + * + * Return: true or false + */ +bool +wlan_serialization_is_active_scan_cmd_allowed( + struct wlan_serialization_command *cmd); + +/** + * wlan_ser_match_cmd_scan_id() - Compare the scan id and the vdev to the given + * command + * @cmd: Serialization command information + * @scan_id: Scan id to be compared + * @vdev: Pointer to object manager vdev that needs to compared + * + * Return: true if match found, else false + */ +bool +wlan_ser_match_cmd_scan_id(qdf_list_node_t *nnode, + struct wlan_serialization_command **cmd, + uint16_t scan_id, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_ser_add_scan_cmd() - Add a scan cmd to serialization queue + * @ser_pdev_obj: Serialization private pdev object + * @cmd_list: Command list with command info that is to be queued + * @is_cmd_for_active_queue: If the cmd to be enqueued in active queue or + * pending queue + * + * Return: Status of the cmd's serialization request + */ +enum wlan_serialization_status +wlan_ser_add_scan_cmd(struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list *cmd_list, + uint8_t is_cmd_for_active_queue); + +/** + * wlan_ser_remove_scan_cmd() - Remove a scan cmd from the given queue + * @ser_pdev_obj: Serialization private pdev object + * @pcmd_list: Pointer to command list containing the command + * @cmd: Serialization command information + * @is_active_cmd: If the cmd has to be removed from active queue or pending + * queue + * + * Return: QDF_STATUS_SUCCESS on successfully removing the cmd else + * QDF_STATUS_E_FAILURE + */ +QDF_STATUS +wlan_ser_remove_scan_cmd(struct wlan_ser_pdev_obj *ser_pdev_obj, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_serialization_command *cmd, + uint8_t is_active_cmd); + +/** + * wlan_ser_cancel_scan_cmd() - Cancel a scan cmd from the given queue + * @ser_obj: Serialization private pdev object + * @pdev: Pointer to object manager pdev + * @vdev: Pointer to object manager vdev + * @cmd: Serialization command information + * @cmd_type: Serialization command type to be cancelled + * @is_active_queue: If the cmd has to be removed from active queue or pending + * queue + * + * Return: Status specifying the cancel of a command from the given queue + */ +enum wlan_serialization_cmd_status +wlan_ser_cancel_scan_cmd(struct wlan_ser_pdev_obj *ser_obj, + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_command *cmd, + enum wlan_serialization_cmd_type cmd_type, + uint8_t is_active_queue); + +/** + * wlan_ser_move_scan_pending_to_active() - Move a scan cmd from pending + * queue to active queue + * @ser_pdev_obj: Serialization private pdev object + * + * Return: Status of the cmd's serialization request + */ +enum wlan_serialization_status +wlan_ser_move_scan_pending_to_active( + struct wlan_ser_pdev_obj *ser_pdev_obj); +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utf.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utf.c new file mode 100644 index 0000000000000000000000000000000000000000..959e353687fc382fc52659b835c29a9694d576e8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utf.c @@ -0,0 +1,1009 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements the unit test framework for serialization module + */ + +#include +#include +#include +#include +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utf_i.h" + +struct wlan_ser_utf_vdev_info ser_utf_vdev[WLAN_SER_UTF_MAX_VDEVS]; + +struct wlan_ser_utf_data * +wlan_ser_utf_data_alloc(struct wlan_ser_utf_data **ser_data, + struct wlan_objmgr_vdev *vdev, + uint8_t cmd_id) +{ + struct wlan_ser_utf_data *data; + + data = qdf_mem_malloc(sizeof(*data)); + + if (!data) { + QDF_ASSERT(0); + return data; + } + + data->id = cmd_id; + WLAN_SER_DATA_STR(data->str, wlan_vdev_get_id(vdev), cmd_id); + + *ser_data = data; + return data; +} + +enum wlan_serialization_status +wlan_ser_utf_add_cmd(struct wlan_serialization_command *cmd) +{ + enum wlan_serialization_status status; + struct wlan_ser_utf_data *data = cmd->umac_cmd; + + cmd->cmd_timeout_duration = WLAN_SER_UTF_TEST_CMD_TIMEOUT_MS; + cmd->source = WLAN_UMAC_COMP_SERIALIZATION; + cmd->cmd_cb = wlan_ser_utf_cb; + + status = wlan_serialization_request(cmd); + ser_debug("ADD : cmd_type:%d %9s %s status: %s", + cmd->cmd_type, SER_UTF_BLOCK_STR(cmd->is_blocking), data->str, + wlan_serialization_status_strings[status]); + + return status; +} + +enum wlan_serialization_status +wlan_ser_utf_add_scan_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, void *umac_cmd, + bool is_high_priority) +{ + struct wlan_serialization_command cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.umac_cmd = umac_cmd; + cmd.is_high_priority = is_high_priority; + cmd.is_blocking = false; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + + return wlan_ser_utf_add_cmd(&cmd); +} + +enum wlan_serialization_status +wlan_ser_utf_add_nonscan_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, void *umac_cmd, + bool is_high_priority, bool is_blocking) +{ + struct wlan_serialization_command cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.umac_cmd = umac_cmd; + cmd.is_blocking = is_blocking; + cmd.is_high_priority = is_high_priority; + cmd.cmd_type = WLAN_SER_CMD_NONSCAN; + + return wlan_ser_utf_add_cmd(&cmd); +} + +void wlan_ser_utf_remove_scan_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + wlan_serialization_remove_cmd(&cmd); +} + +void wlan_ser_utf_remove_nonscan_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.cmd_type = WLAN_SER_CMD_NONSCAN; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + wlan_serialization_remove_cmd(&cmd); +} + +enum wlan_serialization_cmd_status +wlan_ser_utf_cancel_scan_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, uint8_t queue_type, + enum wlan_serialization_cancel_type req_type) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.queue_type = queue_type; + cmd.req_type = req_type; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + return wlan_serialization_cancel_request(&cmd); +} + +enum wlan_serialization_cmd_status +wlan_ser_utf_cancel_nonscan_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, uint8_t queue_type, + enum wlan_serialization_cancel_type req_type) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.queue_type = queue_type; + cmd.req_type = req_type; + cmd.cmd_type = WLAN_SER_CMD_NONSCAN; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + return wlan_serialization_cancel_request(&cmd); +} + +void wlan_ser_utf_remove_start_bss_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.cmd_type = WLAN_SER_CMD_VDEV_START_BSS; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + wlan_serialization_remove_cmd(&cmd); +} + +void wlan_ser_utf_remove_stop_bss_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.cmd_type = WLAN_SER_CMD_VDEV_STOP_BSS; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + wlan_serialization_remove_cmd(&cmd); +} + +enum wlan_serialization_cmd_status +wlan_ser_utf_cancel_start_bss_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, uint8_t queue_type, + enum wlan_serialization_cancel_type req_type) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.queue_type = queue_type; + cmd.req_type = req_type; + cmd.cmd_type = WLAN_SER_CMD_VDEV_START_BSS; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + return wlan_serialization_cancel_request(&cmd); +} + +enum wlan_serialization_cmd_status +wlan_ser_utf_cancel_stop_bss_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, uint8_t queue_type, + enum wlan_serialization_cancel_type req_type) +{ + struct wlan_serialization_queued_cmd_info cmd; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.queue_type = queue_type; + cmd.req_type = req_type; + cmd.cmd_type = WLAN_SER_CMD_VDEV_STOP_BSS; + cmd.requestor = WLAN_UMAC_COMP_SERIALIZATION; + + return wlan_serialization_cancel_request(&cmd); +} + +enum wlan_serialization_status +wlan_ser_utf_add_vdev_stop_bss_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, void *umac_cmd, + bool is_high_priority, bool is_blocking) +{ + struct wlan_serialization_command cmd; + uint8_t queue_type; + enum wlan_serialization_cancel_type req_type; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.umac_cmd = umac_cmd; + cmd.is_blocking = is_blocking; + cmd.is_high_priority = is_high_priority; + cmd.cmd_type = WLAN_SER_CMD_VDEV_STOP_BSS; + + /* Command filtering logic */ + req_type = WLAN_SER_CANCEL_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_start_bss_cmd(vdev, cmd_id, + queue_type, req_type); + + wlan_ser_utf_cancel_stop_bss_cmd(vdev, cmd_id, + queue_type, req_type); + + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, &cmd)) + return WLAN_SER_CMD_ACTIVE; + + return wlan_ser_utf_add_cmd(&cmd); +} + +enum wlan_serialization_status +wlan_ser_utf_add_vdev_start_bss_cmd(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, void *umac_cmd, + bool is_high_priority, bool is_blocking) +{ + struct wlan_serialization_command cmd; + uint8_t queue_type; + enum wlan_serialization_cancel_type req_type; + struct wlan_ser_utf_data *data; + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.umac_cmd = umac_cmd; + cmd.is_blocking = is_blocking; + cmd.is_high_priority = is_high_priority; + cmd.cmd_type = WLAN_SER_CMD_VDEV_START_BSS; + + /* Command filtering logic */ + req_type = WLAN_SER_CANCEL_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_start_bss_cmd(vdev, cmd_id, + queue_type, req_type); + + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, &cmd)) { + cmd.cmd_type = WLAN_SER_CMD_VDEV_STOP_BSS; + if (!wlan_serialization_is_cmd_present_in_pending_queue( + NULL, &cmd)) { + if (!wlan_ser_utf_data_alloc(&data, vdev, cmd_id)) + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + wlan_ser_utf_add_vdev_stop_bss_cmd( + vdev, cmd_id, (void *)data, + is_high_priority, is_blocking); + } + } + + cmd.cmd_type = WLAN_SER_CMD_VDEV_START_BSS; + return wlan_ser_utf_add_cmd(&cmd); +} + +QDF_STATUS wlan_ser_utf_cb(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t vdev_id; + struct wlan_ser_utf_data *data; + + if (!cmd) { + ser_err("Error: reason:%d", reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev_id = wlan_vdev_get_id(cmd->vdev); + data = cmd->umac_cmd; + if (!data) { + ser_err("Error: reason:%d", reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + ser_debug("ACTIVATE: cmd_type:%d %9s %s\n", cmd->cmd_type, + SER_UTF_BLOCK_STR(cmd->is_blocking), data->str); + break; + + case WLAN_SER_CB_CANCEL_CMD: + ser_debug("CANCEL : cmd_type:%d %9s %s", cmd->cmd_type, + SER_UTF_BLOCK_STR(cmd->is_blocking), data->str); + break; + + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + ser_debug("TIMEOUT : cmd_type:%d %9s %s", cmd->cmd_type, + SER_UTF_BLOCK_STR(cmd->is_blocking), data->str); + qdf_mem_free(data); + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + ser_debug("RELEASE : cmd_type:%d %9s %s", cmd->cmd_type, + SER_UTF_BLOCK_STR(cmd->is_blocking), data->str); + qdf_mem_free(data); + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +static void wlan_ser_utf_scan_timer_cb(void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)arg; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + + wlan_ser_utf_remove_scan_cmd(vdev, ser_utf_vdev[vdev_id].ser_count++); +} + +static void wlan_ser_utf_nonscan_timer_cb(void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)arg; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + + wlan_ser_utf_remove_nonscan_cmd(vdev, + ser_utf_vdev[vdev_id].ser_count++); +} + +void wlan_ser_utf_run(struct wlan_objmgr_vdev *vdev, uint8_t scan_cmd, + uint8_t max_cmds, + bool is_high_priority, bool is_blocking) +{ + struct wlan_ser_utf_data *data; + uint8_t id; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + enum wlan_serialization_status ret; + + if (!max_cmds) + max_cmds = WLAN_SER_UTF_SCAN_CMD_TESTS; + + ser_utf_vdev[vdev_id].ser_count = 0; + for (id = 0; id < max_cmds; id++) { + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + return; + + if (scan_cmd) + ret = wlan_ser_utf_add_scan_cmd(vdev, id, data, + is_high_priority); + else + ret = wlan_ser_utf_add_nonscan_cmd(vdev, id, + data, + is_high_priority, + is_blocking); + } + + for (id = 0; id < max_cmds; id++) { + if (scan_cmd) + qdf_timer_mod( + &ser_utf_vdev[vdev_id].utf_scan_timer[id], + WLAN_SER_UTF_TIMER_TIMEOUT_MS); + else + qdf_timer_mod( + &ser_utf_vdev[vdev_id].utf_nonscan_timer[id], + WLAN_SER_UTF_TIMER_TIMEOUT_MS); + } +} + +static void wlan_ser_utf_init_iter_op(struct wlan_objmgr_pdev *pdev, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + uint8_t id; + + ser_utf_vdev[vdev_id].vdev = vdev; + for (id = 0; id < WLAN_SER_UTF_SCAN_CMD_TESTS; id++) { + qdf_timer_init(NULL, + &ser_utf_vdev[vdev_id].utf_scan_timer[id], + wlan_ser_utf_scan_timer_cb, + (void *)vdev, QDF_TIMER_TYPE_WAKE_APPS); + qdf_timer_init(NULL, + &ser_utf_vdev[vdev_id].utf_nonscan_timer[id], + wlan_ser_utf_nonscan_timer_cb, + (void *)vdev, QDF_TIMER_TYPE_WAKE_APPS); + } +} + +static void wlan_ser_utf_deinit_iter_op(struct wlan_objmgr_pdev *pdev, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + uint8_t id; + + for (id = 0; id < WLAN_SER_UTF_SCAN_CMD_TESTS; id++) { + qdf_timer_free( + &ser_utf_vdev[vdev_id].utf_nonscan_timer[id]); + qdf_timer_free( + &ser_utf_vdev[vdev_id].utf_scan_timer[id]); + } +} + +static void wlan_ser_utf_vdev_iter_op(struct wlan_objmgr_pdev *pdev, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + uint8_t is_blocking = *(uint8_t *)args; + + wlan_ser_utf_run(vdev, false, 2, false, is_blocking); +} + +/* + * List of available APIs + * 1. wlan_serialization_request( + * struct wlan_serialization_command *cmd) + * 2. wlan_serialization_remove_cmd( + * struct wlan_serialization_queued_cmd_info *cmd_info) + * 3. wlan_serialization_cancel_request( + * struct wlan_serialization_queued_cmd_info *cmd_info) + * sub_val: + * 1st byte : cmd_id + * 2nd byte : scan_cmd + * 3rd byte : queue_type + * 4th byte : req_type + */ +int wlan_ser_utf_main(struct wlan_objmgr_vdev *vdev, uint8_t val, + uint32_t sub_val) +{ + uint8_t id; + uint8_t vdev_id; + static uint8_t wlan_ser_utf_init; + struct wlan_ser_utf_data *data; + bool is_blocking; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + uint8_t cmd_id = (uint8_t)sub_val; + uint8_t scan_cmd = (uint8_t)(sub_val >> 8); + uint8_t queue_type = (uint8_t)(sub_val >> 16); + enum wlan_serialization_cancel_type req_type = (uint8_t)(sub_val >> 24); + + if (wlan_ser_utf_init == 0 && val != 1) { + ser_err("Init UTF before running test cases"); + return 0; + } + + switch (val) { + case SER_UTF_TC_DEINIT: + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_SERIALIZATION_ID) == + QDF_STATUS_SUCCESS) { + wlan_objmgr_pdev_iterate_obj_list( + pdev, WLAN_VDEV_OP, + wlan_ser_utf_deinit_iter_op, + NULL, 0, WLAN_SERIALIZATION_ID); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_SERIALIZATION_ID); + ser_err("Serialization Timer Deinit Done"); + } + break; + case SER_UTF_TC_INIT: + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_SERIALIZATION_ID) == + QDF_STATUS_SUCCESS) { + wlan_objmgr_pdev_iterate_obj_list( + pdev, WLAN_VDEV_OP, + wlan_ser_utf_init_iter_op, + NULL, 0, WLAN_SERIALIZATION_ID); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_SERIALIZATION_ID); + wlan_ser_utf_init = 1; + ser_err("Serialization Timer Init Done"); + } + break; + case SER_UTF_TC_ADD: + ser_err("Add:%s, id:%d", scan_cmd ? "SCAN" : "NONSCAN", cmd_id); + if (!wlan_ser_utf_data_alloc(&data, vdev, cmd_id)) + break; + + if (scan_cmd) + wlan_ser_utf_add_scan_cmd(vdev, cmd_id, data, false); + else + wlan_ser_utf_add_nonscan_cmd(vdev, cmd_id, data, + false, false); + break; + case SER_UTF_TC_REMOVE: + ser_err("Remove:%s, id:%d", scan_cmd ? "SCAN" : "NONSCAN", + cmd_id); + if (scan_cmd) + wlan_ser_utf_remove_scan_cmd(vdev, cmd_id); + else + wlan_ser_utf_remove_nonscan_cmd(vdev, cmd_id); + break; + case SER_UTF_TC_CANCEL: + ser_err("Cancel:%s, id:%d", scan_cmd ? "SCAN" : "NONSCAN", + cmd_id); + if (scan_cmd) + wlan_ser_utf_cancel_scan_cmd(vdev, cmd_id, queue_type, + req_type); + else + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, + queue_type, req_type); + break; + case SER_UTF_TC_SINGLE_SCAN: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_scan_cmd(vdev, id, data, false); + wlan_ser_utf_remove_scan_cmd(vdev, id); + break; + case SER_UTF_TC_MULTI_SCAN: + wlan_ser_utf_run(vdev, true, 10, false, false); + break; + case SER_UTF_TC_MAX_SCAN: + wlan_ser_utf_run(vdev, true, 0, false, false); + break; + case SER_UTF_TC_SINGLE_NONSCAN: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + wlan_ser_utf_remove_nonscan_cmd(vdev, id); + break; + case SER_UTF_TC_MULTI_NONSCAN: + wlan_ser_utf_run(vdev, false, 10, false, false); + break; + case SER_UTF_TC_MAX_NONSCAN: + wlan_ser_utf_run(vdev, false, 0, false, false); + break; + case SER_UTF_TC_MULTI_VDEV_NONSCAN: + is_blocking = false; + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_SERIALIZATION_ID) == + QDF_STATUS_SUCCESS) { + wlan_objmgr_pdev_iterate_obj_list( + pdev, WLAN_VDEV_OP, + wlan_ser_utf_vdev_iter_op, + &is_blocking, 0, WLAN_SERIALIZATION_ID); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_SERIALIZATION_ID); + } + break; + case SER_UTF_TC_CANCEL_SCAN_AC_SINGLE: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_scan_cmd(vdev, id, data, false); + req_type = WLAN_SER_CANCEL_SINGLE_SCAN; + queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + wlan_ser_utf_cancel_scan_cmd(vdev, id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_SCAN_AC_PDEV: + wlan_ser_utf_run(vdev, true, 15, false, false); + req_type = WLAN_SER_CANCEL_PDEV_SCANS; + queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + wlan_ser_utf_cancel_scan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_SCAN_AC_VDEV: + wlan_ser_utf_run(vdev, true, 15, false, false); + req_type = WLAN_SER_CANCEL_VDEV_SCANS; + queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + wlan_ser_utf_cancel_scan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_SCAN_PD_SINGLE: + wlan_ser_utf_run(vdev, true, 15, false, false); + req_type = WLAN_SER_CANCEL_SINGLE_SCAN; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_scan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_SCAN_PD_PDEV: + wlan_ser_utf_run(vdev, true, 15, false, false); + req_type = WLAN_SER_CANCEL_PDEV_SCANS; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_scan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_SCAN_PD_VDEV: + wlan_ser_utf_run(vdev, true, 15, false, false); + req_type = WLAN_SER_CANCEL_VDEV_SCANS; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_scan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_NONSCAN_AC_SINGLE: + req_type = WLAN_SER_CANCEL_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_NONSCAN_AC_PDEV: + req_type = WLAN_SER_CANCEL_PDEV_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_NONSCAN_AC_VDEV: + req_type = WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_NONSCAN_PD_SINGLE: + req_type = WLAN_SER_CANCEL_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_NONSCAN_PD_PDEV: + req_type = WLAN_SER_CANCEL_PDEV_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_CANCEL_NONSCAN_PD_VDEV: + req_type = WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD; + queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + wlan_ser_utf_cancel_nonscan_cmd(vdev, cmd_id, queue_type, + req_type); + break; + case SER_UTF_TC_START_BSS_FILTERING: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_vdev_start_bss_cmd(vdev, id, data, + false, false); + + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_vdev_start_bss_cmd(vdev, id, data, + false, false); + + wlan_ser_utf_remove_start_bss_cmd(vdev, id); + wlan_ser_utf_remove_stop_bss_cmd(vdev, id); + wlan_ser_utf_remove_start_bss_cmd(vdev, id); + break; + case SER_UTF_TC_STOP_BSS_FILTERING: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_vdev_start_bss_cmd(vdev, id, data, + false, false); + + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_vdev_start_bss_cmd(vdev, id, data, + false, false); + + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_vdev_stop_bss_cmd(vdev, id, data, + false, false); + + wlan_ser_utf_remove_start_bss_cmd(vdev, id); + wlan_ser_utf_remove_stop_bss_cmd(vdev, id); + break; + case SER_UTF_TC_ADD_BLOCKING_NONSCAN_AC_1: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + wlan_ser_utf_remove_nonscan_cmd(vdev, id); + break; + case SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_1: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + + id = 2; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + break; + case SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_2: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 2; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + break; + case SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_3: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 2; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 3; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 4; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + wlan_ser_utf_remove_nonscan_cmd(vdev, 3); + wlan_ser_utf_remove_nonscan_cmd(vdev, 4); + break; + case SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_4: + id = 1; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 2; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 3; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 4; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + + id = 5; + if (!wlan_ser_utf_data_alloc(&data, vdev, id)) + break; + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + wlan_ser_utf_remove_nonscan_cmd(vdev, 3); + wlan_ser_utf_remove_nonscan_cmd(vdev, 4); + wlan_ser_utf_remove_nonscan_cmd(vdev, 5); + break; + case SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_1: + if (wlan_pdev_get_vdev_count(pdev) < WLAN_SER_UTF_MAX_VDEVS) { + ser_err("Requires atleast %d vdevs for the given pdev", + WLAN_SER_UTF_MAX_VDEVS); + break; + } + is_blocking = true; + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_SERIALIZATION_ID) == + QDF_STATUS_SUCCESS) { + wlan_objmgr_pdev_iterate_obj_list( + pdev, WLAN_VDEV_OP, + wlan_ser_utf_vdev_iter_op, + &is_blocking, 0, WLAN_SERIALIZATION_ID); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_SERIALIZATION_ID); + } + break; + case SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_2: + if (wlan_pdev_get_vdev_count(pdev) < WLAN_SER_UTF_MAX_VDEVS) { + ser_err("Requires atleast %d vdevs for the given pdev", + WLAN_SER_UTF_MAX_VDEVS); + break; + } + id = 1; + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[0].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[0].vdev, id, + data, false, false); + + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[1].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[1].vdev, id, + data, false, false); + + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[2].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[2].vdev, id, + data, false, false); + + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[2].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[2].vdev, id, + data, false, true); + + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[0].vdev, id); + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[1].vdev, id); + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[2].vdev, id); + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[2].vdev, id); + break; + case SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_3: + if (wlan_pdev_get_vdev_count(pdev) < WLAN_SER_UTF_MAX_VDEVS) { + ser_err("Requires atleast %d vdevs for the given pdev", + WLAN_SER_UTF_MAX_VDEVS); + break; + } + id = 1; + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[0].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[0].vdev, id, + data, false, true); + + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[0].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[0].vdev, id, + data, false, false); + + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[1].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[1].vdev, id, + data, false, false); + + wlan_ser_utf_data_alloc(&data, ser_utf_vdev[2].vdev, id); + wlan_ser_utf_add_nonscan_cmd(ser_utf_vdev[2].vdev, id, + data, false, false); + + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[0].vdev, id); + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[0].vdev, id); + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[1].vdev, id); + wlan_ser_utf_remove_nonscan_cmd(ser_utf_vdev[2].vdev, id); + break; + case SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_4: + if (wlan_pdev_get_vdev_count(pdev) < WLAN_SER_UTF_MAX_VDEVS) { + ser_err("Requires atleast %d vdevs for the given pdev", + WLAN_SER_UTF_MAX_VDEVS); + break; + } + for (id = 1; id <= 2; id++) { + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; + vdev_id++) { + wlan_ser_utf_data_alloc( + &data, + ser_utf_vdev[vdev_id].vdev, id); + wlan_ser_utf_add_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, + id, data, false, false); + } + } + + id = 3; + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; vdev_id++) { + wlan_ser_utf_data_alloc( + &data, ser_utf_vdev[vdev_id].vdev, id); + wlan_ser_utf_add_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, id, + data, false, true); + } + + for (id = 1; id <= 3; id++) { + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; + vdev_id++) + wlan_ser_utf_remove_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, id); + } + break; + case SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_5: + if (wlan_pdev_get_vdev_count(pdev) < WLAN_SER_UTF_MAX_VDEVS) { + ser_err("Requires atleast %d vdevs for the given pdev", + WLAN_SER_UTF_MAX_VDEVS); + break; + } + id = 1; + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; + vdev_id++) { + wlan_ser_utf_data_alloc( + &data, + ser_utf_vdev[vdev_id].vdev, id); + wlan_ser_utf_add_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, + id, data, false, false); + } + id = 2; + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; + vdev_id++) { + wlan_ser_utf_data_alloc( + &data, + ser_utf_vdev[vdev_id].vdev, id); + wlan_ser_utf_add_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, + id, data, false, true); + } + id = 3; + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; + vdev_id++) { + wlan_ser_utf_data_alloc( + &data, + ser_utf_vdev[vdev_id].vdev, id); + wlan_ser_utf_add_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, + id, data, false, false); + } + + for (id = 1; id <= 3; id++) { + for (vdev_id = 0; vdev_id < WLAN_SER_UTF_MAX_VDEVS; + vdev_id++) + wlan_ser_utf_remove_nonscan_cmd( + ser_utf_vdev[vdev_id].vdev, id); + } + break; + case SER_UTF_TC_HIGH_PRIO_NONSCAN_WO_BL: + id = 1; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 2; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 3; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 4; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, true, false); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 4); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + wlan_ser_utf_remove_nonscan_cmd(vdev, 3); + break; + case SER_UTF_TC_HIGH_PRIO_NONSCAN_W_BL: + id = 1; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 2; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, true); + + id = 3; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, true, false); + + id = 4; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 3); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + wlan_ser_utf_remove_nonscan_cmd(vdev, 4); + break; + case SER_UTF_TC_HIGH_PRIO_BL_NONSCAN: + id = 1; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 2; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 3; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, false, false); + + id = 4; + wlan_ser_utf_data_alloc(&data, vdev, id); + wlan_ser_utf_add_nonscan_cmd(vdev, id, data, true, true); + + wlan_ser_utf_remove_nonscan_cmd(vdev, 1); + wlan_ser_utf_remove_nonscan_cmd(vdev, 4); + wlan_ser_utf_remove_nonscan_cmd(vdev, 2); + wlan_ser_utf_remove_nonscan_cmd(vdev, 3); + break; + default: + ser_err("Error: Unknown val"); + break; + } + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utf_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utf_i.h new file mode 100644 index 0000000000000000000000000000000000000000..0ccac18abf23838597159ebfb2647195387392e5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utf_i.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Defines the data structures used by the unit test framework for + * serialization module + */ + +#ifndef _WLAN_SERIALIZATION_UTF_I_H_ +#define _WLAN_SERIALIZATION_UTF_I_H_ + +#define WLAN_SER_UTF_MAX_VDEVS 4 +#define WLAN_SER_UTF_SCAN_CMD_TESTS 33 +#define WLAN_SER_UTF_TIMER_TIMEOUT_MS 5000 +#define WLAN_SER_UTF_TEST_CMD_TIMEOUT_MS 30000 + +/* Sample string: SER_Vxx_Cxx */ +#define WLAN_SER_UTF_STR_SIZE 15 +#define WLAN_SER_DATA_STR(_s, _v, _i) \ + scnprintf(_s, WLAN_SER_UTF_STR_SIZE, "SER_V%u_C%u", _v, _i) + +#define SER_UTF_BLOCK_STR(_x) (_x ? "BLOCK" : "NON-BLOCK") + +char *wlan_serialization_status_strings[] = { + "WLAN_SER_CMD_PENDING", + "WLAN_SER_CMD_ACTIVE", + "WLAN_SER_CMD_DENIED_RULES_FAILED", + "WLAN_SER_CMD_DENIED_LIST_FULL", + "WLAN_SER_CMD_DENIED_UNSPECIFIED", +}; + +/** + * enum wlan_ser_utf_tc_id - Test case id + * @SER_UTF_TC_DEINIT: Deinit UTF + * @SER_UTF_TC_INIT: Init UTF + * @SER_UTF_TC_ADD: Add a custom cmd to queue + * @SER_UTF_TC_REMOVE: Remove a custom cmd from queue + * @SER_UTF_TC_CANCEL: Cancel a custom cmd from queue + * @SER_UTF_TC_SINGLE_SCAN: Add and remove a single scan cmd + * @SER_UTF_TC_MULTI_SCAN: Add and remove a multiple scan cmd + * @SER_UTF_TC_MAX_SCAN: Add and remove a maximum scan cmd + * @SER_UTF_TC_SINGLE_NONSCAN: Add and remove a single nonscan cmd + * @SER_UTF_TC_MULTI_NONSCAN: Add and remove a multiple nonscan cmd + * @SER_UTF_TC_MAX_NONSCAN: Add and remove a maximum nonscan cmd + * @SER_UTF_TC_MULTI_VDEV_NONSCAN: Add nonscan cmd across multiple vdev + * @SER_UTF_TC_CANCEL_SCAN_AC_SINGLE: Cancel single scan from active queue + * @SER_UTF_TC_CANCEL_SCAN_AC_PDEV: Cancel pdev scan from active queue + * @SER_UTF_TC_CANCEL_SCAN_AC_VDEV: Cancel vdev scan from active queue + * @SER_UTF_TC_CANCEL_SCAN_PD_SINGLE: Cancel single scan from pending queue + * @SER_UTF_TC_CANCEL_SCAN_PD_PDEV: Cancel pdev scan from pending queue + * @SER_UTF_TC_CANCEL_SCAN_PD_VDEV: Cancel vdev scan from pending queue + * @SER_UTF_TC_CANCEL_NONSCAN_AC_SINGLE: Cancel single nonscan from active queue + * @SER_UTF_TC_CANCEL_NONSCAN_AC_PDEV: Cancel pdev nonscan from active queue + * @SER_UTF_TC_CANCEL_NONSCAN_AC_VDEV: Cancel vdev nonscan from active queue + * @SER_UTF_TC_CANCEL_NONSCAN_PD_SINGLE: Cancel nonscan from pending queue + * @SER_UTF_TC_CANCEL_NONSCAN_PD_PDEV: Cancel pdev nonscan from pending queue + * @SER_UTF_TC_CANCEL_NONSCAN_PD_VDEV: Cancel vdev nonscan from pending queue + * @SER_UTF_TC_START_BSS_FILTERING: Test start_bss filtering logic + * @SER_UTF_TC_STOP_BSS_FILTERING: Test stop_bss filtering logic + * @SER_UTF_TC_ADD_BLOCKING_NONSCAN_AC_1: Add blocking cmd to active queue + * @SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_1: Add blocking cmd to pending queue with + * blocking cmd in active queue + * @SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_2: Add blocking cmd to pending queue with + * non-blocking cmd in active queue + * @SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_3: Add blocking cmd to tail of + * pending queue with non-blocking cmd in active queue + * @SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_4: Add blocking cmd to pending between + * non-blocking cmd in pending and active queue + * @SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_1: Add blocking nonscan cmd + * across multiple vdev + * @SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_2: Add blocking nonscan cmd + * to a pending queue of vdev with non-blocking across multi vdev + * @SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_3: Add blocking nonscan cmd + * to a active queue of vdev with non-blocking across multiple vdev + * @SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_4: Add blocking nonscan cmd to the + * pending queue of multi vdev with non-blocking across multi vdev + * @SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_5: Add blocking nonscan cmd to the + * pending queue of multi vdev with non-blocking across multi vdev + * in pending and active queue + * @SER_UTF_TC_HIGH_PRIO_NONSCAN_WO_BL: Add high priority nonscan cmd + * to the tail of pending queue + * @SER_UTF_TC_HIGH_PRIO_NONSCAN_W_BL: Add high priority nonscan cmd + * to the pending queue between normal priority command + * @SER_UTF_TC_HIGH_PRIO_BL_NONSCAN: Add high priority blocking + * nonscan cmd to the tail of pending queue + */ +enum wlan_ser_utf_tc_id { + SER_UTF_TC_DEINIT, + SER_UTF_TC_INIT, + SER_UTF_TC_ADD, + SER_UTF_TC_REMOVE, + SER_UTF_TC_CANCEL, + SER_UTF_TC_SINGLE_SCAN, + SER_UTF_TC_MULTI_SCAN, + SER_UTF_TC_MAX_SCAN, + SER_UTF_TC_SINGLE_NONSCAN, + SER_UTF_TC_MULTI_NONSCAN, + SER_UTF_TC_MAX_NONSCAN, + SER_UTF_TC_MULTI_VDEV_NONSCAN, + SER_UTF_TC_CANCEL_SCAN_AC_SINGLE, + SER_UTF_TC_CANCEL_SCAN_AC_PDEV, + SER_UTF_TC_CANCEL_SCAN_AC_VDEV, + SER_UTF_TC_CANCEL_SCAN_PD_SINGLE, + SER_UTF_TC_CANCEL_SCAN_PD_PDEV, + SER_UTF_TC_CANCEL_SCAN_PD_VDEV, + SER_UTF_TC_CANCEL_NONSCAN_AC_SINGLE, + SER_UTF_TC_CANCEL_NONSCAN_AC_PDEV, + SER_UTF_TC_CANCEL_NONSCAN_AC_VDEV, + SER_UTF_TC_CANCEL_NONSCAN_PD_SINGLE, + SER_UTF_TC_CANCEL_NONSCAN_PD_PDEV, + SER_UTF_TC_CANCEL_NONSCAN_PD_VDEV, + SER_UTF_TC_START_BSS_FILTERING, + SER_UTF_TC_STOP_BSS_FILTERING, + SER_UTF_TC_ADD_BLOCKING_NONSCAN_AC_1, + SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_1, + SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_2, + SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_3, + SER_UTF_TC_ADD_BLOCKING_NONSCAN_PD_4, + SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_1, + SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_2, + SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_3, + SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_4, + SER_UTF_TC_MULTI_VDEV_BL_NONSCAN_5, + SER_UTF_TC_HIGH_PRIO_NONSCAN_WO_BL, + SER_UTF_TC_HIGH_PRIO_NONSCAN_W_BL, + SER_UTF_TC_HIGH_PRIO_BL_NONSCAN, +}; + +/** + * struct wlan_ser_utf_data - Test data + * @id - Test variable + * @str - String tag associated with the command + */ +struct wlan_ser_utf_data { + uint8_t id; + char str[WLAN_SER_UTF_STR_SIZE + 1]; +}; + +/** + * struct wlan_ser_utf_vdev_info - Information used by the vdevs + * @vdev: Vdev object manager information + * @ser_count: Serialization command count for the vdev + * @utf_scan_timer: Timer for scan commands + * @utf_nonscan_timer: Timer for non-scan commands + */ +struct wlan_ser_utf_vdev_info { + struct wlan_objmgr_vdev *vdev; + uint8_t ser_count; + qdf_timer_t utf_scan_timer[WLAN_SER_UTF_SCAN_CMD_TESTS]; + qdf_timer_t utf_nonscan_timer[WLAN_SER_UTF_SCAN_CMD_TESTS]; +}; + +/** + * wlan_ser_utf_cb() - Serialization callback function + * @cmd: Serialization command info + * @reason: Serialization reason for callback execution + * + * Return: Status of callback execution + */ +QDF_STATUS wlan_ser_utf_cb(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason); + +#endif /* _WLAN_SERIALIZATION_UTF_I_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..4ec739408f03b2c70a2a02ee8a11c0b9f19309fb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils.c @@ -0,0 +1,904 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_utils.c + * This file defines the utility helper functions for serialization component. + */ + +#include +#include +#include +#include +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_queue_i.h" +#include "wlan_serialization_api.h" + +#ifndef WLAN_SER_DEBUG +void wlan_ser_update_cmd_history( + struct wlan_serialization_pdev_queue *pdev_queue, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason, + bool add_remove, + bool active_queue){ } +#endif + +struct wlan_objmgr_pdev* +wlan_serialization_get_pdev_from_cmd(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev = NULL; + + if (!cmd) { + ser_err("invalid cmd"); + return pdev; + } + if (!cmd->vdev) { + ser_err("invalid cmd->vdev"); + return pdev; + } + pdev = wlan_vdev_get_pdev(cmd->vdev); + + return pdev; +} + +struct wlan_objmgr_psoc* +wlan_serialization_get_psoc_from_cmd(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + if (!cmd) { + ser_err("invalid cmd"); + return psoc; + } + if (!cmd->vdev) { + ser_err("invalid cmd->vdev"); + return psoc; + } + psoc = wlan_vdev_get_psoc(cmd->vdev); + + return psoc; +} + +struct wlan_objmgr_vdev* +wlan_serialization_get_vdev_from_cmd(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_vdev *vdev = NULL; + + if (!cmd) { + ser_err("invalid cmd"); + goto error; + } + + vdev = cmd->vdev; + +error: + return vdev; +} + +QDF_STATUS +wlan_serialization_get_cmd_from_queue(qdf_list_t *queue, + qdf_list_node_t **nnode) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + qdf_list_node_t *pnode; + + if (!queue) { + ser_err("input parameters are invalid"); + goto error; + } + + pnode = *nnode; + if (!pnode) + status = wlan_serialization_peek_front(queue, nnode); + else + status = wlan_serialization_peek_next(queue, pnode, nnode); + + if (status != QDF_STATUS_SUCCESS) + ser_err("can't get next node from queue"); + +error: + return status; +} + +QDF_STATUS wlan_serialization_timer_destroy( + struct wlan_serialization_timer *ser_timer) +{ + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + + if (!ser_timer || !ser_timer->cmd) { + ser_debug("Invalid ser_timer"); + qdf_status = QDF_STATUS_E_FAILURE; + goto error; + } + + qdf_timer_stop(&ser_timer->timer); + ser_timer->cmd = NULL; + +error: + return qdf_status; +} + +/** + * wlan_serialization_stop_timer() - to stop particular timer + * @ser_timer: pointer to serialization timer + * + * This API stops the particular timer + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_stop_timer(struct wlan_serialization_timer *ser_timer) +{ + wlan_serialization_timer_destroy(ser_timer); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_serialization_cleanup_vdev_timers( + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_ser_psoc_obj *psoc_ser_obj; + struct wlan_serialization_timer *ser_timer; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t i = 0; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + QDF_BUG(0); + ser_err("pdev is null"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + QDF_BUG(0); + ser_err("psoc is null"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + + psoc_ser_obj = wlan_serialization_get_psoc_obj(psoc); + + if (!psoc_ser_obj) { + ser_err("Invalid psoc_ser_obj"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + ser_timer = &psoc_ser_obj->timers[i]; + if (!ser_timer->cmd) + continue; + /* + * Check if the timer is for the given vdev + */ + if (ser_timer->cmd->vdev != vdev) + continue; + + ser_debug("Stopping the timer for vdev id[%d]", + wlan_vdev_get_id(vdev)); + + status = wlan_serialization_stop_timer(ser_timer); + if (QDF_STATUS_SUCCESS != status) { + /* lets not break the loop but report error */ + ser_err("some error in stopping timer"); + } + } + + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); +error: + return status; +} + +QDF_STATUS wlan_serialization_cleanup_all_timers( + struct wlan_ser_psoc_obj *psoc_ser_obj) +{ + struct wlan_serialization_timer *ser_timer; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t i = 0; + + if (!psoc_ser_obj) { + ser_err("Invalid psoc_ser_obj"); + status = QDF_STATUS_E_FAILURE; + goto error; + } + + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + ser_timer = &psoc_ser_obj->timers[i]; + if (!ser_timer->cmd) + continue; + status = wlan_serialization_stop_timer(ser_timer); + if (QDF_STATUS_SUCCESS != status) { + /* lets not break the loop but report error */ + ser_err("some error in stopping timer"); + } + } + + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); +error: + + return status; +} + +QDF_STATUS wlan_serialization_validate_cmdtype( + enum wlan_serialization_cmd_type cmd_type) +{ + if (cmd_type < 0 || cmd_type >= WLAN_SER_CMD_MAX) { + ser_err("Invalid cmd %d passed", cmd_type); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_serialization_validate_cmd( + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + + if (cmd_type < 0 || comp_id < 0 || cmd_type >= WLAN_SER_CMD_MAX || + comp_id >= WLAN_UMAC_COMP_ID_MAX) { + ser_err("Invalid cmd or comp passed comp %d type %d", + comp_id, cmd_type); + goto error; + } + + status = QDF_STATUS_SUCCESS; +error: + return status; +} + +QDF_STATUS wlan_serialization_validate_cmd_list( + struct wlan_serialization_command_list *cmd_list) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + + if (!cmd_list->cmd.cmd_cb) { + ser_err("no cmd_cb for cmd type:%d, id: %d", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + QDF_ASSERT(0); + goto error; + } + + if (!cmd_list->cmd.vdev) { + ser_err("invalid cmd.vdev"); + goto error; + } + + status = QDF_STATUS_SUCCESS; + +error: + return status; +} + +static void wlan_serialization_release_pdev_list_cmds( + struct wlan_serialization_pdev_queue *pdev_queue) +{ + qdf_list_node_t *node = NULL; + + while (!wlan_serialization_list_empty(&pdev_queue->active_list)) { + wlan_serialization_remove_front( + &pdev_queue->active_list, &node); + wlan_serialization_insert_back( + &pdev_queue->cmd_pool_list, node); + } + + while (!wlan_serialization_list_empty(&pdev_queue->pending_list)) { + wlan_serialization_remove_front( + &pdev_queue->pending_list, &node); + wlan_serialization_insert_back( + &pdev_queue->cmd_pool_list, node); + } + +} + +static void wlan_serialization_release_vdev_list_cmds(qdf_list_t *list) +{ + qdf_list_node_t *node = NULL; + + + while (!wlan_serialization_list_empty(list)) + wlan_serialization_remove_front(list, &node); + +} + +void wlan_serialization_destroy_pdev_list( + struct wlan_serialization_pdev_queue *pdev_queue) +{ + + wlan_serialization_release_pdev_list_cmds(pdev_queue); + qdf_list_destroy(&pdev_queue->pending_list); + qdf_list_destroy(&pdev_queue->active_list); + +} + +void wlan_serialization_destroy_vdev_list(qdf_list_t *list) +{ + + wlan_serialization_release_vdev_list_cmds(list); + qdf_list_destroy(list); + +} + +struct wlan_ser_psoc_obj *wlan_serialization_get_psoc_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_ser_psoc_obj *ser_soc_obj; + + ser_soc_obj = + wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_SERIALIZATION); + + return ser_soc_obj; +} + +struct wlan_ser_pdev_obj *wlan_serialization_get_pdev_obj( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_ser_pdev_obj *obj; + + obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_SERIALIZATION); + + return obj; +} + +struct wlan_ser_vdev_obj *wlan_serialization_get_vdev_obj( + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_ser_vdev_obj *obj; + + obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_SERIALIZATION); + + return obj; +} + +bool wlan_serialization_is_cmd_in_vdev_list( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *queue, + enum wlan_serialization_node node_type) +{ + qdf_list_node_t *node = NULL; + bool cmd_found = false; + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_VDEV, + NULL, 0, NULL, vdev, node_type); + + if (node) + cmd_found = true; + + return cmd_found; +} + +bool wlan_serialization_is_cmd_in_pdev_list( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *queue) +{ + qdf_list_node_t *node = NULL; + bool cmd_found = false; + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_PDEV, + NULL, 0, pdev, NULL, WLAN_SER_PDEV_NODE); + + if (node) + cmd_found = true; + + return cmd_found; +} + +enum wlan_serialization_cmd_status +wlan_serialization_is_cmd_in_active_pending(bool cmd_in_active, + bool cmd_in_pending) +{ + enum wlan_serialization_cmd_status status; + + if (cmd_in_active && cmd_in_pending) + status = WLAN_SER_CMDS_IN_ALL_LISTS; + else if (cmd_in_active) + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + else if (cmd_in_pending) + status = WLAN_SER_CMD_IN_PENDING_LIST; + else + status = WLAN_SER_CMD_NOT_FOUND; + + return status; +} + +bool +wlan_serialization_is_cmd_present_in_given_queue( + qdf_list_t *queue, + struct wlan_serialization_command *cmd, + enum wlan_serialization_node node_type) +{ + qdf_list_node_t *node = NULL; + bool found = false; + + node = wlan_serialization_find_cmd( + queue, WLAN_SER_MATCH_CMD_ID_VDEV, + cmd, 0, NULL, cmd->vdev, node_type); + + if (node) + found = true; + + return found; +} + +/** + * wlan_serialization_remove_cmd_from_queue() - to remove command from + * given queue + * @queue: queue from which command needs to be removed + * @cmd: command to match in the queue + * @ser_pdev_obj: pointer to private pdev serialization object + * + * This API takes the queue, it matches the provided command from this queue + * and removes it. Before removing the command, it will notify the caller + * that if it needs to remove any memory allocated by caller. + * + * Return: none + */ +QDF_STATUS +wlan_serialization_remove_cmd_from_queue( + qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_ser_pdev_obj *ser_pdev_obj, + enum wlan_serialization_node node_type) +{ + struct wlan_serialization_command_list *cmd_list; + qdf_list_node_t *node = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!cmd) + goto error; + + if (!queue || wlan_serialization_list_empty(queue)) { + ser_debug("Empty queue"); + goto error; + } + + node = wlan_serialization_find_cmd(queue, WLAN_SER_MATCH_CMD_ID_VDEV, + cmd, 0, NULL, cmd->vdev, node_type); + + if (!node) { + ser_info("fail to find node %d for removal", node_type); + goto error; + } + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = + qdf_container_of(node, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = + qdf_container_of(node, + struct wlan_serialization_command_list, + vdev_node); + + if (qdf_atomic_test_bit(CMD_MARKED_FOR_ACTIVATION, + &cmd_list->cmd_in_use)) { + qdf_atomic_set_bit(CMD_ACTIVE_MARKED_FOR_REMOVAL, + &cmd_list->cmd_in_use); + status = QDF_STATUS_E_PENDING; + goto error; + } + + status = wlan_serialization_remove_node(queue, node); + + if (QDF_STATUS_SUCCESS != status) + ser_err("Fail to add to free pool type %d", + cmd->cmd_type); + + *pcmd_list = cmd_list; + +error: + return status; +} + +enum wlan_serialization_status +wlan_serialization_add_cmd_to_queue( + qdf_list_t *queue, + struct wlan_serialization_command_list *cmd_list, + struct wlan_ser_pdev_obj *ser_pdev_obj, + uint8_t is_cmd_for_active_queue, + enum wlan_serialization_node node_type) +{ + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + QDF_STATUS qdf_status; + qdf_list_node_t *node; + + if (!cmd_list || !queue || !ser_pdev_obj) { + ser_err("Input arguments are not valid"); + goto error; + } + + if (node_type == WLAN_SER_PDEV_NODE) + node = &cmd_list->pdev_node; + else + node = &cmd_list->vdev_node; + + if (qdf_list_size(queue) == qdf_list_max_size(queue)) { + status = WLAN_SER_CMD_DENIED_LIST_FULL; + ser_err("Queue size reached max %d, fail to add type %d id %d", + qdf_list_max_size(queue), cmd_list->cmd.cmd_type, + cmd_list->cmd.cmd_id); + goto error; + } + + if (cmd_list->cmd.is_high_priority) + qdf_status = wlan_serialization_insert_front(queue, node); + else + qdf_status = wlan_serialization_insert_back(queue, node); + + if (QDF_IS_STATUS_ERROR(qdf_status)) + goto error; + + if (is_cmd_for_active_queue) + status = WLAN_SER_CMD_ACTIVE; + else + status = WLAN_SER_CMD_PENDING; + +error: + return status; +} + +bool wlan_serialization_list_empty(qdf_list_t *queue) +{ + bool is_empty; + + if (qdf_list_empty(queue)) + is_empty = true; + else + is_empty = false; + + return is_empty; +} + +uint32_t wlan_serialization_list_size(qdf_list_t *queue) +{ + uint32_t size; + + size = qdf_list_size(queue); + + return size; +} + +QDF_STATUS wlan_serialization_remove_front(qdf_list_t *list, + qdf_list_node_t **node) +{ + QDF_STATUS status; + + if (wlan_serialization_list_empty(list)) { + ser_err("The list is empty"); + status = QDF_STATUS_E_EMPTY; + goto error; + } + + status = qdf_list_remove_front(list, node); +error: + return status; +} + +QDF_STATUS wlan_serialization_remove_node(qdf_list_t *list, + qdf_list_node_t *node) +{ + QDF_STATUS status; + + if (wlan_serialization_list_empty(list)) { + ser_err("The list is empty"); + status = QDF_STATUS_E_EMPTY; + goto error; + } + status = qdf_list_remove_node(list, node); + +error: + return status; +} + +QDF_STATUS wlan_serialization_insert_front(qdf_list_t *list, + qdf_list_node_t *node) +{ + QDF_STATUS status; + + status = qdf_list_insert_front(list, node); + + return status; +} + +QDF_STATUS wlan_serialization_insert_back(qdf_list_t *list, + qdf_list_node_t *node) +{ + QDF_STATUS status; + + status = qdf_list_insert_back(list, node); + + return status; +} + +QDF_STATUS wlan_serialization_peek_front(qdf_list_t *list, + qdf_list_node_t **node) +{ + QDF_STATUS status; + + status = qdf_list_peek_front(list, node); + + return status; +} + +QDF_STATUS wlan_serialization_peek_next(qdf_list_t *list, + qdf_list_node_t *node1, + qdf_list_node_t **node2) +{ + QDF_STATUS status; + + status = qdf_list_peek_next(list, node1, node2); + + return status; +} + +bool +wlan_serialization_match_cmd_type(qdf_list_node_t *nnode, + enum wlan_serialization_cmd_type cmd_type, + enum wlan_serialization_node node_type) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = true; + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + vdev_node); + + if (cmd_list->cmd.cmd_type != cmd_type) + match_found = false; + + return match_found; +} + +bool +wlan_serialization_match_cmd_id_type(qdf_list_node_t *nnode, + struct wlan_serialization_command *cmd, + enum wlan_serialization_node node_type) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = true; + + if (!cmd) { + match_found = false; + goto error; + } + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + vdev_node); + + if ((cmd_list->cmd.cmd_id != cmd->cmd_id) || + (cmd_list->cmd.cmd_type != cmd->cmd_type)) { + match_found = false; + }; + +error: + return match_found; +} + +bool wlan_serialization_match_cmd_vdev(qdf_list_node_t *nnode, + struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_node node_type) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + vdev_node); + + if (cmd_list->cmd.vdev == vdev) + match_found = true; + + if (!match_found) + ser_debug("matching cmd not found for (vdev:%pK)", vdev); + + return match_found; +} + +bool wlan_serialization_match_cmd_pdev(qdf_list_node_t *nnode, + struct wlan_objmgr_pdev *pdev, + enum wlan_serialization_node node_type) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + struct wlan_objmgr_pdev *node_pdev = NULL; + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + vdev_node); + + node_pdev = wlan_vdev_get_pdev(cmd_list->cmd.vdev); + if (node_pdev == pdev) + match_found = true; + + return match_found; +} + +bool wlan_serialization_match_cmd_blocking( + qdf_list_node_t *nnode, + enum wlan_serialization_node node_type) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + + if (node_type == WLAN_SER_PDEV_NODE) + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + pdev_node); + else + cmd_list = + qdf_container_of(nnode, + struct wlan_serialization_command_list, + vdev_node); + + if (cmd_list->cmd.is_blocking) + match_found = true; + + return match_found; +} + +qdf_list_node_t * +wlan_serialization_find_cmd(qdf_list_t *queue, + enum wlan_serialization_match_type match_type, + struct wlan_serialization_command *cmd, + enum wlan_serialization_cmd_type cmd_type, + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_node node_type) +{ + qdf_list_node_t *cmd_node = NULL; + uint32_t queuelen; + qdf_list_node_t *nnode = NULL; + QDF_STATUS status; + bool node_found = 0; + + queuelen = wlan_serialization_list_size(queue); + + if (!queuelen) + goto error; + + while (queuelen--) { + status = wlan_serialization_get_cmd_from_queue(queue, &nnode); + if (status != QDF_STATUS_SUCCESS) + break; + + switch (match_type) { + case WLAN_SER_MATCH_PDEV: + if (wlan_serialization_match_cmd_pdev( + nnode, pdev, WLAN_SER_PDEV_NODE)) + node_found = 1; + break; + case WLAN_SER_MATCH_VDEV: + if (wlan_serialization_match_cmd_vdev( + nnode, vdev, node_type)) + node_found = 1; + break; + case WLAN_SER_MATCH_CMD_TYPE: + if (wlan_serialization_match_cmd_type( + nnode, cmd_type, node_type)) + node_found = 1; + break; + case WLAN_SER_MATCH_CMD_ID: + if (wlan_serialization_match_cmd_id_type( + nnode, cmd, node_type)) + node_found = 1; + break; + case WLAN_SER_MATCH_CMD_TYPE_VDEV: + if (wlan_serialization_match_cmd_type( + nnode, cmd_type, node_type) && + wlan_serialization_match_cmd_vdev( + nnode, vdev, node_type)) + node_found = 1; + break; + case WLAN_SER_MATCH_CMD_ID_VDEV: + if (wlan_serialization_match_cmd_id_type( + nnode, cmd, node_type) && + wlan_serialization_match_cmd_vdev( + nnode, vdev, node_type)) + node_found = 1; + break; + default: + break; + } + + if (node_found) { + cmd_node = nnode; + break; + } + } +error: + return cmd_node; +} + +QDF_STATUS +wlan_serialization_acquire_lock(qdf_spinlock_t *lock) +{ + qdf_spin_lock_bh(lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_release_lock(qdf_spinlock_t *lock) +{ + qdf_spin_unlock_bh(lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_create_lock(qdf_spinlock_t *lock) +{ + qdf_spinlock_create(lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_destroy_lock(qdf_spinlock_t *lock) +{ + qdf_spinlock_destroy(lock); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils_i.h new file mode 100644 index 0000000000000000000000000000000000000000..6b9c82f0bfbfc1e88a207f96ec36c784f4bff50c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils_i.h @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_utils_i.h + * This file defines the prototypes for the utility helper functions + * for the serialization component. + */ +#ifndef __WLAN_SERIALIZATION_UTILS_I_H +#define __WLAN_SERIALIZATION_UTILS_I_H + +#include +#include +#include +#include +#include +#include +#include +#include "wlan_serialization_rules_i.h" +#ifdef WLAN_SER_DEBUG +#include "wlan_serialization_debug_i.h" +#endif + +/* + * Below bit positions are used to identify if a + * serialization command is in use or marked for + * deletion. + * CMD_MARKED_FOR_ACTIVATION - The command is about to be activated + * CMD_IS_ACTIVE - The command is active and currently in use + */ +#define CMD_MARKED_FOR_ACTIVATION 1 +#define CMD_IS_ACTIVE 2 +#define CMD_ACTIVE_MARKED_FOR_CANCEL 3 +#define CMD_ACTIVE_MARKED_FOR_REMOVAL 4 +/** + * struct wlan_serialization_timer - Timer used for serialization + * @cmd: Cmd to which the timer is linked + * @timer: Timer associated with the command + * + * Timers are allocated statically during init, one each for the + * maximum active commands permitted in the system. Once a cmd is + * moved from pending list to active list, the timer is activated + * and once the cmd is completed, the timer is cancelled. Timer is + * also cancelled if the command is aborted + * + * The timers are maintained per psoc. A timer is associated to + * unique combination of pdev, cmd_type and cmd_id. + */ +struct wlan_serialization_timer { + struct wlan_serialization_command *cmd; + qdf_timer_t timer; +}; + +/** + * enum wlan_serialization_node - Types of available nodes in serialization list + * @WLAN_SER_PDEV_NODE: pdev node from the pdev queue + * @WLAN_SER_VDEV_NODE: vdev node from the vdev queue + */ +enum wlan_serialization_node { + WLAN_SER_PDEV_NODE, + WLAN_SER_VDEV_NODE, +}; + +/** + * struct wlan_serialization_command_list - List of commands to be serialized + * @pdev_node: PDEV node identifier in the list + * @vdev_node: VDEV node identifier in the list + * @cmd: Command to be serialized + * @cmd_in_use: flag to check if the node/entry is logically active + */ +struct wlan_serialization_command_list { + qdf_list_node_t pdev_node; + qdf_list_node_t vdev_node; + struct wlan_serialization_command cmd; + unsigned long cmd_in_use; +}; + +/** + * struct wlan_serialization_pdev_queue - queue data related to pdev + * @active_list: list to hold the commands currently being executed + * @pending_list: list to hold the commands currently pending + * @cmd_pool_list: list to hold the global command pool + * @vdev_active_cmd_bitmap: Active cmd bitmap of vdev for the given pdev + * @blocking_cmd_active: Indicate if a blocking cmd is in active execution + * @blocking_cmd_waiting: Indicate if a blocking cmd is in pending queue + * @pdev_queue_lock: pdev lock to protect concurrent operations on the queues + */ +struct wlan_serialization_pdev_queue { + qdf_list_t active_list; + qdf_list_t pending_list; + qdf_list_t cmd_pool_list; + uint32_t vdev_active_cmd_bitmap; + bool blocking_cmd_active; + uint16_t blocking_cmd_waiting; + qdf_spinlock_t pdev_queue_lock; +#ifdef WLAN_SER_DEBUG + struct ser_history history; +#endif +}; + +/** + * struct wlan_serialization_vdev_queue - queue data related to vdev + * @active_list: list to hold the commands currently being executed + * @pending_list list: to hold the commands currently pending + * @queue_disable: is the queue disabled + */ +struct wlan_serialization_vdev_queue { + qdf_list_t active_list; + qdf_list_t pending_list; + bool queue_disable; +}; + +/** + * enum wlan_serialization_pdev_queue_type - Types of available pdev queues + * @QUEUE_COMP_SCAN: Scan queue + * @QUEUE_COMP_NON_SCAN: Non Scan queue + */ +enum serialization_pdev_queue_type { + SER_PDEV_QUEUE_COMP_SCAN, + SER_PDEV_QUEUE_COMP_NON_SCAN, + SER_PDEV_QUEUE_COMP_MAX, +}; + +/** + * enum wlan_serialization_vdev_queue_type - Types of available vdev queues + * @QUEUE_COMP_NON_SCAN: Non Scan queue + */ +enum serialization_vdev_queue_type { + SER_VDEV_QUEUE_COMP_NON_SCAN, + SER_VDEV_QUEUE_COMP_MAX, +}; + +/** + * enum wlan_serialization_match_type - Comparison options for a command + * @WLAN_SER_MATCH_VDEV: Compare vdev + * @WLAN_SER_MATCH_PDEV: Compare pdev + * @WLAN_SER_MATCH_CMD_TYPE: Compare command type + * @WLAN_SER_MATCH_CMD_TYPE_VDEV: Compare command type and vdev + * @WLAN_SER_MATCH_CMD_ID: Compare command id + * @WLAN_SER_MATCH_CMD_ID_VDEV: Compare command id and vdev + */ +enum wlan_serialization_match_type { + WLAN_SER_MATCH_VDEV, + WLAN_SER_MATCH_PDEV, + WLAN_SER_MATCH_CMD_TYPE, + WLAN_SER_MATCH_CMD_TYPE_VDEV, + WLAN_SER_MATCH_CMD_ID, + WLAN_SER_MATCH_CMD_ID_VDEV, + WLAN_SER_MATCH_MAX, +}; + +/** + * struct wlan_ser_pdev_obj - pdev obj data for serialization + * @pdev_q: Array of pdev queues + */ +struct wlan_ser_pdev_obj { + struct wlan_serialization_pdev_queue pdev_q[SER_PDEV_QUEUE_COMP_MAX]; +}; + +/** + * struct wlan_ser_vdev_priv_obj - Serialization private object of vdev + * @vdev_q: Array of vdev queues + */ +struct wlan_ser_vdev_obj { + struct wlan_serialization_vdev_queue vdev_q[SER_VDEV_QUEUE_COMP_MAX]; +}; + +/** + * struct wlan_ser_psoc_obj - psoc obj data for serialization + * @comp_info_cb - module level callback + * @apply_rules_cb - pointer to apply rules on the cmd + * @timers - Timers associated with the active commands + * @max_axtive_cmds - Maximum active commands allowed + * + * Serialization component takes a command as input and checks whether to + * allow/deny the command. It will use the module level callback registered + * by each component to fetch the information needed to apply the rules. + * Once the information is available, the rules callback registered for each + * command internally by serialization will be applied to determine the + * checkpoint for the command. If allowed, command will be put into active/ + * pending list and each active command is associated with a timer. + */ +struct wlan_ser_psoc_obj { + wlan_serialization_comp_info_cb comp_info_cb[ + WLAN_SER_CMD_MAX][WLAN_UMAC_COMP_ID_MAX]; + wlan_serialization_apply_rules_cb apply_rules_cb[WLAN_SER_CMD_MAX]; + struct wlan_serialization_timer *timers; + uint8_t max_active_cmds; + qdf_spinlock_t timer_lock; +}; + +/** + * wlan_serialization_remove_cmd_from_queue() - to remove command from + * given queue + * @queue: queue from which command needs to be removed + * @cmd: command to match in the queue + * @pcmd_list: Pointer to command list containing the command + * @ser_pdev_obj: pointer to private pdev serialization object + * @node_type: Pdev node or vdev node + * + * This API takes the queue, it matches the provided command from this queue + * and removes it. Before removing the command, it will notify the caller + * that if it needs to remove any memory allocated by caller. + * + * Return: QDF_STATUS_SUCCESS on success, error code on failure + */ +QDF_STATUS +wlan_serialization_remove_cmd_from_queue( + qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_serialization_command_list **pcmd_list, + struct wlan_ser_pdev_obj *ser_pdev_obj, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_add_cmd_from_queue() - Add a cmd to + * given queue + * @queue: queue from which command needs to be removed + * @cmd_list: Pointer to command list containing the command + * @ser_pdev_obj: pointer to private pdev serialization object + * @is_cmd_for_active_queue: Add cmd to active or pending queue + * @node_type: Pdev node or vdev node + * + * Return: Status of the serialization request + */ +enum wlan_serialization_status +wlan_serialization_add_cmd_to_queue( + qdf_list_t *queue, + struct wlan_serialization_command_list *cmd_list, + struct wlan_ser_pdev_obj *ser_pdev_obj, + uint8_t is_cmd_for_active_queue, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_get_psoc_from_cmd() - get psoc from provided cmd + * @cmd: pointer to actual command + * + * This API will get the pointer to psoc through checking type of cmd + * + * Return: pointer to psoc + */ +struct wlan_objmgr_psoc* +wlan_serialization_get_psoc_from_cmd(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_get_pdev_from_cmd() - get pdev from provided cmd + * @cmd: pointer to actual command + * + * This API will get the pointer to pdev through checking type of cmd + * + * Return: pointer to pdev + */ +struct wlan_objmgr_pdev* +wlan_serialization_get_pdev_from_cmd(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_get_vdev_from_cmd() - get vdev from provided cmd + * @cmd: pointer to actual command + * + * This API will get the pointer to vdev through checking type of cmd + * + * Return: pointer to vdev + */ +struct wlan_objmgr_vdev* +wlan_serialization_get_vdev_from_cmd(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_get_cmd_from_queue() - to extract command from given queue + * @queue: pointer to queue + * @nnode: next node to extract + * + * This API will try to extract node from queue which is next to prev node. If + * no previous node is given then take out the front node of the queue. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_get_cmd_from_queue( + qdf_list_t *queue, qdf_list_node_t **nnode); + +/** + * wlan_serialization_stop_timer() - to stop particular timer + * @ser_timer: pointer to serialization timer + * + * This API stops the particular timer + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_stop_timer(struct wlan_serialization_timer *ser_timer); +/** + * wlan_serialization_cleanup_vdev_timers() - clean-up all timers for a vdev + * + * @vdev: pointer to vdev object + * + * This API is to cleanup all the timers for a vdev. + * It can be used when serialization vdev destroy is called. + * It will make sure that if timer is running then it will + * stop and destroys the timer + * + * Return: QDF_STATUS + */ + +QDF_STATUS wlan_serialization_cleanup_vdev_timers( + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_serialization_cleanup_all_timers() - to clean-up all timers + * + * @psoc_ser_ob: pointer to serialization psoc private object + * + * This API is to cleanup all the timers. it can be used when serialization + * module is exiting. it will make sure that if timer is running then it will + * stop and destroys the timer + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_cleanup_all_timers( + struct wlan_ser_psoc_obj *psoc_ser_ob); + +/** + * wlan_serialization_validate_cmd() - Validate the command + * @comp_id: Component ID + * @cmd_type: Command Type + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_validate_cmd( + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_validate_cmd_list() - Validate the command list + * @cmd_list: Serialization command list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_validate_cmd_list( + struct wlan_serialization_command_list *cmd_list); + +/** + * wlan_serialization_validate_cmdtype() - Validate the command type + * @cmd_type: Command Type + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_validate_cmdtype( + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_destroy_pdev_list() - Release the pdev cmds and + * destroy list + * @pdev_queue: Pointer to the pdev queue + * + * Return: None + */ +void wlan_serialization_destroy_pdev_list( + struct wlan_serialization_pdev_queue *pdev_queue); + +/** + * wlan_serialization_destroy_vdev_list() - Release the vdev cmds and + * destroy list + * @list: List to be destroyed + * + * Return: None + */ +void wlan_serialization_destroy_vdev_list(qdf_list_t *list); + +/** + * wlan_serialization_get_psoc_obj() - Return the component private obj + * @psoc: Pointer to the PSOC object + * + * Return: Serialization component's PSOC level private data object + */ +struct wlan_ser_psoc_obj *wlan_serialization_get_psoc_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_get_pdev_obj() - Return the component private obj + * @psoc: Pointer to the PDEV object + * + * Return: Serialization component's PDEV level private data object + */ +struct wlan_ser_pdev_obj *wlan_serialization_get_pdev_obj( + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_serialization_get_vdev_obj() - Return the component private obj + * @vdev: Pointer to the VDEV object + * + * Return: Serialization component's VDEV level private data object + */ +struct wlan_ser_vdev_obj *wlan_serialization_get_vdev_obj( + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_serialization_is_cmd_in_vdev_list() - Check Node present in VDEV list + * @vdev: Pointer to the VDEV object + * @queue: Pointer to the qdf_list_t + * @node_type: Pdev node or vdev node + * + * Return: Boolean true or false + */ +bool +wlan_serialization_is_cmd_in_vdev_list( + struct wlan_objmgr_vdev *vdev, qdf_list_t *queue, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_is_cmd_in_pdev_list() - Check Node present in PDEV list + * @pdev: Pointer to the PDEV object + * @queue: Pointer to the qdf_list_t + * + * Return: Boolean true or false + */ +bool +wlan_serialization_is_cmd_in_pdev_list( + struct wlan_objmgr_pdev *pdev, qdf_list_t *queue); + +/** + * wlan_serialization_is_cmd_in_active_pending() - return cmd status + * active/pending queue + * @cmd_in_active: CMD in active list + * @cmd_in_pending: CMD in pending list + * + * Return: enum wlan_serialization_cmd_status + */ +enum wlan_serialization_cmd_status +wlan_serialization_is_cmd_in_active_pending( + bool cmd_in_active, bool cmd_in_pending); + +/** + * wlan_serialization_is_cmd_present_in_given_queue() - Check if the cmd is + * present in the given queue + * @queue: List of commands which has to be searched + * @cmd: Serialization command information + * @node_type: Pdev node or vdev node + * + * Return: Boolean true or false + */ +bool wlan_serialization_is_cmd_present_in_given_queue( + qdf_list_t *queue, + struct wlan_serialization_command *cmd, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_timer_destroy() - destroys the timer + * @ser_timer: pointer to particular timer + * + * This API destroys the memory allocated by timer and assigns cmd member of + * that timer structure to NULL + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_timer_destroy( + struct wlan_serialization_timer *ser_timer); + +/** + * wlan_serialization_list_empty() - check if the list is empty + * @queue: Queue/List that needs to be checked for emptiness + * + * Return: true if list is empty and false otherwise + */ +bool wlan_serialization_list_empty(qdf_list_t *queue); + +/** + * wlan_serialization_list_size() - Find the size of the provided queue + * @queue: Queue/List for which the size/length is to be returned + * + * Return: size/length of the queue/list + */ +uint32_t wlan_serialization_list_size(qdf_list_t *queue); + +/** + * wlan_serialization_match_cmd_type() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @cmd_type: Command type that needs to be matched + * @node_type: Pdev node or vdev node + * + * This API will check if the cmd ID and cmd type of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_type( + qdf_list_node_t *nnode, + enum wlan_serialization_cmd_type, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_match_cmd_id_type() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @cmd: Command that needs to be matched + * @node_type: Pdev node or vdev node + * + * This API will check if the cmd ID and cmd type of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_id_type( + qdf_list_node_t *nnode, + struct wlan_serialization_command *cmd, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_match_cmd_vdev() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @vdev: VDEV object that needs to be matched + * @node_type: Pdev node or vdev node + * + * This API will check if the VDEV object of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_vdev(qdf_list_node_t *nnode, + struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_match_cmd_pdev() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @pdev: pdev object that needs to be matched + * @node_type: Node type. Pdev node or vdev node + * + * This API will check if the PDEV object of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_pdev(qdf_list_node_t *nnode, + struct wlan_objmgr_pdev *pdev, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_match_cmd_blocking() - Check for a blocking cmd + * @nnode: The node on which the matching has to be done + * @node_type: Pdev node or vdev node + * + * This API will check if the give command of nnode is a blocking command. + * + * Return: True if blocking command, false otherwise. + */ +bool wlan_serialization_match_cmd_blocking( + qdf_list_node_t *nnode, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_find_cmd() - Find the cmd matching the given criterias + * @cmd: Serialization command information + * @cmd_type: Command type to be matched + * @pdev: pdev object that needs to be matched + * @vdev: vdev object that needs to be matched + * @node_type: Node type. Pdev node or vdev node + * + * Return: Pointer to the node member in the list + */ +qdf_list_node_t * +wlan_serialization_find_cmd(qdf_list_t *queue, uint32_t match_type, + struct wlan_serialization_command *cmd, + enum wlan_serialization_cmd_type cmd_type, + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_node node_type); + +/** + * wlan_serialization_remove_front() - Remove the front node of the list + * @list: List from which the node is to be removed + * @node: Pointer to store the node that is removed + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_remove_front( + qdf_list_t *list, + qdf_list_node_t **node); + +/** + * wlan_serialization_remove_node() - Remove the given node from the list + * @list: List from which the node is to be removed + * @node: Pointer to the node that is to be removed + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_remove_node( + qdf_list_t *list, + qdf_list_node_t *node); + +/** + * wlan_serialization_insert_front() - Insert a node into the front of the list + * @list: List to which the node is to be inserted + * @node: Pointer to the node that is to be inserted + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_insert_front( + qdf_list_t *list, + qdf_list_node_t *node); + +/** + * wlan_serialization_insert_back() - Insert a node into the back of the list + * @list: List to which the node is to be inserted + * @node: Pointer to the node that is to be inserted + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_insert_back( + qdf_list_t *list, + qdf_list_node_t *node); + +/** + * wlan_serialization_peek_front() - Peek the front node of the list + * @list: List on which the node is to be peeked + * @node: Pointer to the store the node that is being peeked + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_peek_front( + qdf_list_t *list, + qdf_list_node_t **node); + +/** + * wlan_serialization_peek_next() - Peek the next node of the list + * @list: List on which the node is to be peeked + * @node1: Pointer to the node1 from where the next node has to be peeked + * @node2: Pointer to the store the node that is being peeked + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_peek_next( + qdf_list_t *list, + qdf_list_node_t *node1, + qdf_list_node_t **node2); + +/** + * wlan_serialization_acquire_lock() - Acquire lock to the given queue + * @lock: Pointer to the lock + * + * Return: QDF_STATUS success or failure + */ +QDF_STATUS +wlan_serialization_acquire_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_release_lock() - Release lock to the given queue + * @lock: Pointer to the lock + * + * Return: QDF_STATUS success or failure + */ +QDF_STATUS +wlan_serialization_release_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_create_lock() - Init the lock to the given queue + * @lock: Pointer to the lock + * + * Return: QDF_STATUS success or failure + */ +QDF_STATUS +wlan_serialization_create_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_destroy_lock() - Deinit the lock to the given queue + * @lock: Pointer to the lock + * + * Return: QDF_STATUS success or failure + */ +QDF_STATUS +wlan_serialization_destroy_lock(qdf_spinlock_t *lock); + +/** + * wlan_ser_update_cmd_history() - Update serialization queue history + * @pdev_queue:serialization pdev queue + * @cmd: cmd to be added/remeoved + * @ser_reason: serialization action that resulted in addition/removal + * @add_remove: added or removed from queue + * @active_queue:for active queue + * + * Return: QDF_STATUS success or failure + */ + +void wlan_ser_update_cmd_history( + struct wlan_serialization_pdev_queue *pdev_queue, + struct wlan_serialization_command *cmd, + enum ser_queue_reason ser_reason, + bool add_remove, + bool active_queue); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/inc/wlan_sm_engine.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/inc/wlan_sm_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..501a9a7ee00cb36487a87dcca9692884ef29f43d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/inc/wlan_sm_engine.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define general SM framework, The modules can implement state machine + * using framework, it provides framework for state, event, state transition, + * event handling + * + * The module, whoever implement SM using this framework need to define an array + * of structures (of type struct wlan_sm_state_info) as below, + * for example, sample state array definition + * + * struct wlan_sm_state_info sm_info[] = { + * { + * (uint8_t) WLAN_VDEV_S_INIT, + * (uint8_t) WLAN_SM_ENGINE_STATE_NONE, + * (uint8_t) WLAN_SM_ENGINE_STATE_NONE, + * true, + * "INIT", + * mlme_vdev_state_init_entry, --> gets invoked on entering the state + * mlme_vdev_state_init_exit, --> gets invoked before exiting the state + * mlme_vdev_state_init_event --> gets invoked on event dispatch + * }, + * { + * (uint8_t) WLAN_VDEV_S_START, + * (uint8_t) WLAN_SM_ENGINE_STATE_NONE, + * (uint8_t) WLAN_SM_ENGINE_STATE_NONE, + * true, + * "START", + * mlme_vdev_state_start_entry, + * mlme_vdev_state_start_exit, + * mlme_vdev_state_start_event + * }, + * { + * (uint8_t) WLAN_VDEV_S_DFS_CAC_WAIT, + * (uint8_t) WLAN_SM_ENGINE_STATE_NONE, + * (uint8_t) WLAN_SM_ENGINE_STATE_NONE, + * true, + * "DFS_CAC_WAIT", + * mlme_vdev_state_dfs_cac_wait_entry, + * mlme_vdev_state_dfs_cac_wait_exit, + * mlme_vdev_state_dfs_cac_wait_event + * }, + * ... + * }; + * + * Invoke wlan_sm_create() with this state array as below + * + * sm = wlan_sm_create("VDEV_SM", vdev_obj, WLAN_VDEV_S_INIT, sm_info, 3, + * event_names[], num_events); + * + * on successful creation, invoke below functions to dispatch events and state + * transition + * + * Event dispatch: + * wlan_sm_dispatch(sm, start_event_id, 0, NULL); + * + * State transition: + * wlan_sm_transition_to(sm, WLAN_VDEV_S_INIT); + * + * + */ +#ifndef _WLAN_SM_ENGINE_H_ +#define _WLAN_SM_ENGINE_H_ + +#include +#include +#include + +/* invalid state */ +#define WLAN_SM_ENGINE_STATE_NONE 255 +/* invalid event */ +#define WLAN_SM_ENGINE_EVENT_NONE 255 + +#define WLAN_SM_ENGINE_MAX_STATE_NAME 128 +#define WLAN_SM_ENGINE_MAX_NAME 64 +#define WLAN_SM_ENGINE_MAX_STATES 200 +#define WLAN_SM_ENGINE_MAX_EVENTS 200 + +/** + * struct wlan_sm_state_info - state structure definition + * @state: State id + * @parent_state: Parent state id (optional) + * @initial_substate: Initial sub state of this state + * @has_substates: flag to specify, whether it has sub states + * @name: Name of the state + * @wlan_sm_entry: State entry callback poitner + * @wlan_sm_exit: State exit callback poitner + * @wlan_sm_event: State event callback poitner + */ +struct wlan_sm_state_info { + uint8_t state; + uint8_t parent_state; + uint8_t initial_substate; + uint8_t has_substates; + const char *name; + void (*wlan_sm_entry) (void *ctx); + void (*wlan_sm_exit) (void *ctx); + bool (*wlan_sm_event) (void *ctx, uint16_t event, + uint16_t event_data_len, void *event_data); +}; + +/** + * struct wlan_sm - state machine structure + * @name: Name of the statemachine + * @cur_state: Current state (state/sub-state) + * @num_states: Number of states + * @last_event: Holds the last handled event of SM + * @state_info: Initialized States' table + * @ctx: Holds the caller's context + * @in_state_transition: Flag to check whether state transition is in progress + * @event_names: Array of event names + * @num_event_names: Number of event names + * @history: Holds the SM history pointer + */ +struct wlan_sm { + uint8_t name[WLAN_SM_ENGINE_MAX_NAME]; + uint8_t cur_state; + uint8_t num_states; + uint8_t last_event; + struct wlan_sm_state_info *state_info; + void *ctx; + qdf_atomic_t in_state_transition; + const char **event_names; + uint32_t num_event_names; +#ifdef SM_ENG_HIST_ENABLE + struct wlan_sm_history history; +#endif +}; + +#define WLAN_SM_ENGINE_ENTRY(name, state, parent, initsubstate, has_substates) \ + { state, parent, initsubstate, has_substates, \ + "##name", wlan_sm_##name_entry, wlan_sm_##name_exit,\ + wlan_sm_##name_event } + +/* + * flag definitions + */ +#define WLAN_SM_ENGINE_ASYNCHRONOUS 0x0 /* run SM asynchronously */ +#define WLAN_SM_ENGINE_SYNCHRONOUS 0x1 /* run SM synchronously */ + +/** + * wlan_sm_create() - SM create + * @name: Name of SM owner module + * @ctx: caller pointer, used on invoking callbacks + * @init_state: Default state of the SM + * @state_info: States' definitions + * @num_state: Number of states + * @event_names: Event name table + * @num_event_names: Number of events + * + * Creates SM object, intializes with init_state, stores the name and owner + * module pointer, states definition table, and event name table + * + * Return: Handle to struct wlan_sm on successful creation, + * NULL on Failure + */ +struct wlan_sm *wlan_sm_create(const char *name, void *ctx, + uint8_t init_state, + struct wlan_sm_state_info *state_info, + uint8_t num_states, + const char **event_names, + uint32_t num_event_names); + +/** + * wlan_sm_delete() - SM delete + * @sm: state machine handle + * + * Delete SM object + * + * Return: void + */ +void wlan_sm_delete(struct wlan_sm *sm); + +/** + * wlan_sm_dispatch() - API to notify event to SM + * @sm: state machine handle + * @event: event id + * @event_data_len: Size of event data + * @event_data: Event data + * + * Notifies event to SM, it invokes event callback of the current state of SM + * + * Return: QDF_STATUS_SUCCESS for handling + * QDF_STATUS_E_INVAL for not handling + */ +QDF_STATUS wlan_sm_dispatch(struct wlan_sm *sm, uint16_t event, + uint16_t event_data_len, void *event_data); + +/** + * wlan_sm_transition_to() - API to move the state of SM + * @sm: state machine handle + * @state: State id + * + * Moves the SM's state + * + * Return: void + */ +void wlan_sm_transition_to(struct wlan_sm *sm, uint8_t state); + +/** + * wlan_sm_get_lastevent() - API to get last dispatched event + * @sm: state machine handle + * + * Gets the last dispatched event + * + * Return: event id + */ +uint8_t wlan_sm_get_lastevent(struct wlan_sm *sm); + +/** + * wlan_sm_get_current_state() - API to get current state of SM + * @sm: state machine handle + * + * Gets the current state of SM + * + * Return: state id + */ +uint8_t wlan_sm_get_current_state(struct wlan_sm *sm); + +/** + * wlan_sm_get_current_state_name() - API to get current state's name of SM + * @sm: state machine handle + * + * Gets the current state name of SM + * + * Return: name of the state + */ +const char *wlan_sm_get_current_state_name(struct wlan_sm *sm); + +/** + * wlan_sm_get_state_name() - API to get state's name + * @sm: state machine handle + * @state: state id + * + * Gets the given state name of SM + * + * Return: name of the state + */ +const char *wlan_sm_get_state_name(struct wlan_sm *sm, uint8_t state); + +/** + * wlan_sm_reset() - API to reset SM state + * @sm: state machine handle + * @init_state: state to reset SM + * + * Resets the SM to given state + * + * Return: void + */ +void wlan_sm_reset(struct wlan_sm *sm, uint8_t init_state); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/inc/wlan_sm_engine_dbg.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/inc/wlan_sm_engine_dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..9edb7eb03a9fc033feacc4974daabc8be170e6fa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/inc/wlan_sm_engine_dbg.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the debug data structure of UMAC SM + */ +#ifndef _WLAN_SM_ENGINE_DBG_H_ +#define _WLAN_SM_ENGINE_DBG_H_ + +#include +#include + +#define sm_engine_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_SM_ENGINE, params) + +#define sm_engine_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_SM_ENGINE, params) + +#define sm_engine_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_SM_ENGINE, params) + +#define sm_engine_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_SM_ENGINE, params) + +#define sm_engine_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_SM_ENGINE, params) + +#define sm_engine_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_SM_ENGINE, params) +#define sm_engine_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_SM_ENGINE, params) +#define sm_engine_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_SM_ENGINE, params) +#define sm_engine_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_SM_ENGINE, params) +#define sm_engine_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_SM_ENGINE, params) + +#define WLAN_SM_ENGINE_HISTORY_SIZE 50 +struct wlan_sm; +/** + * enum wlan_sm_trace_type - history element type + * @SM_EVENT_STATE_TRANSITION - Represents state transition + * @SM_EVENT_MSG_PROCESSING - Represents event processing + */ +enum wlan_sm_trace_type { + SM_EVENT_STATE_TRANSITION = 1, + SM_EVENT_MSG_PROCESSING, +}; + +#ifdef SM_ENG_HIST_ENABLE + +/** + * struct wlan_sm_history_info - history element structure + * @trace_type: history element type + * @event_type: Type of the event + * @initial_state: Current state (state/sub-state) + * @final_state: New state + */ +struct wlan_sm_history_info { + enum wlan_sm_trace_type trace_type; + uint8_t event_type; + uint8_t initial_state; + uint8_t final_state; +}; + +/** + * struct wlan_sm_history - history structure + * @sm_history_lock: SM history lock + * @index: Last updated entry index + * @data: Histoy elements array + */ +struct wlan_sm_history { + qdf_spinlock_t sm_history_lock; + uint8_t index; + struct wlan_sm_history_info data[WLAN_SM_ENGINE_HISTORY_SIZE]; +}; + +/** + * wlan_sm_save_history() - API to save SM history + * @sm: state machine handle + * @trace_type: type of operation + * @initial_state: current state + * @final_state: Resultant state + * @event_type: Event id + * + * Stores the SM state transition and event processing + * + * Return: void + */ +void wlan_sm_save_history(struct wlan_sm *sm, + enum wlan_sm_trace_type trace_type, + uint8_t initial_state, uint8_t final_state, + uint16_t event_type); + +/** + * wlan_sm_history_init() - API to initialize SM history module + * @sm: state machine handle + * + * Initializes SM history module + * + * Return: void + */ +void wlan_sm_history_init(struct wlan_sm *sm); + +/** + * wlan_sm_history_delete() - API to delete SM history module + * @sm: state machine handle + * + * Deletes SM history module + * + * Return: void + */ +void wlan_sm_history_delete(struct wlan_sm *sm); + +/** + * wlan_sm_print_history() - API to print SM history + * @sm: state machine handle + * + * Prints SM history + * + * Return: void + */ +void wlan_sm_print_history(struct wlan_sm *sm); + +#if SM_HIST_DEBUGFS_SUPPORT +/** + * wlan_sm_print_fs_history() - API to print SM history in proc + * @sm: state machine handle + * @m: debug fs file handle + * + * Prints SM history through proc + * + * Return: void + */ +void wlan_sm_print_fs_history(struct wlan_sm *sm, qdf_debugfs_file_t m); +#endif +#else /* SM_ENG_HIST_ENABLE */ + +/** + * wlan_sm_save_history() - API to save SM history + * @sm: state machine handle + * @trace_type: type of operation + * @initial_state: current state + * @final_state: Resultant state + * @event_type: Event id + * + * Stores the SM state transition and event processing + * + * Return: void + */ +static inline void wlan_sm_save_history(struct wlan_sm *sm, + enum wlan_sm_trace_type trace_type, + uint8_t initial_state, + uint8_t final_state, + uint16_t event_type) +{ +} + +/** + * wlan_sm_history_init() - API to initialize SM history module + * @sm: state machine handle + * + * Initializes SM history module + * + * Return: void + */ +static inline void wlan_sm_history_init(struct wlan_sm *sm) +{ +} + +/** + * wlan_sm_history_delete() - API to delete SM history module + * @sm: state machine handle + * + * Deletes SM history module + * + * Return: void + */ +static inline void wlan_sm_history_delete(struct wlan_sm *sm) +{ +} + +/** + * wlan_sm_print_history() - API to print SM history + * @sm: state machine handle + * + * Prints SM history + * + * Return: void + */ +static inline void wlan_sm_print_history(struct wlan_sm *sm) +{ +} + +#endif /* SM_ENG_HIST_ENABLE */ +#endif /* _WLAN_SM_ENGINE_DBG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/src/wlan_sm_engine.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/src/wlan_sm_engine.c new file mode 100644 index 0000000000000000000000000000000000000000..6b0d6f23e0db594b78629b62e4cc5a251e0ac055 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/src/wlan_sm_engine.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements general SM framework + */ + +#include "wlan_sm_engine.h" +#include "wlan_sm_engine_dbg.h" +#include +#include +#include + +QDF_STATUS wlan_sm_dispatch(struct wlan_sm *sm, uint16_t event, + uint16_t event_data_len, void *event_data) +{ + bool event_handled = false; + uint8_t state; + const char *event_name = NULL; + + if (!sm) { + sm_engine_err("SM is NULL"); + return QDF_STATUS_E_FAILURE; + } + + state = sm->cur_state; + + if (event == WLAN_SM_ENGINE_EVENT_NONE) { + sm_engine_err("%s: invalid event %d", sm->name, event); + return QDF_STATUS_E_FAILURE; + } + sm->last_event = event; + + wlan_sm_save_history(sm, SM_EVENT_MSG_PROCESSING, sm->cur_state, + sm->cur_state, event); + + if (sm->event_names) { + if (event < sm->num_event_names) + event_name = sm->event_names[event]; + + sm_engine_nofl_debug("%s: %s, %s", sm->name, + sm->state_info[state].name, + event_name ? event_name : "UNKNOWN_EVENT"); + } else { + sm_engine_nofl_debug("%s: %s ev [%d]", sm->name, + sm->state_info[state].name, event); + } + + if (state != WLAN_SM_ENGINE_STATE_NONE) { + event_handled = (*sm->state_info[state].wlan_sm_event) ( + sm->ctx, event, event_data_len, event_data); + if (!event_handled) { + sm_engine_nofl_info("%s: event %d not handled in state %s", + sm->name, event, + sm->state_info[sm->cur_state].name); + return QDF_STATUS_E_INVAL; + } + } + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(wlan_sm_dispatch); + +void wlan_sm_transition_to(struct wlan_sm *sm, uint8_t state) +{ + struct wlan_sm_state_info *state_info; + uint8_t new_state; + uint8_t old_state; + uint8_t new_sub_st; + uint8_t ol_sub_st; + uint8_t cur_state; + + if (!sm) { + sm_engine_err("SM is NULL"); + return; + } + + state_info = sm->state_info; + cur_state = sm->cur_state; + + /* cannot change state from state entry/exit routines */ + if (qdf_atomic_read(&sm->in_state_transition)) { + sm_engine_alert( + "%s: can not call state transition from entry/exit routines", + sm->name); + QDF_BUG(0); + return; + } + + qdf_atomic_set(&sm->in_state_transition, 1); + + wlan_sm_save_history(sm, SM_EVENT_STATE_TRANSITION, sm->cur_state, + state, 0xFF); + + if ((state == WLAN_SM_ENGINE_STATE_NONE) || + (state >= WLAN_SM_ENGINE_MAX_STATES) || + (state >= sm->num_states)) { + sm_engine_err( + "%s: to state %d needs to be a valid state current_state=%d", + sm->name, cur_state, state); + return; + } + + /* + * Here state and sub state are derived for debug printing only + * as SME keeps state and sub state as flat, to differentiate between + * state and substate, checks current state if it has parent state, + * the parent state is printed along with the sub state + */ + if (state_info[cur_state].parent_state != WLAN_SM_ENGINE_STATE_NONE) + old_state = state_info[cur_state].parent_state; + else + old_state = cur_state; + + if (state_info[state].parent_state != WLAN_SM_ENGINE_STATE_NONE) + new_state = state_info[state].parent_state; + else + new_state = state; + + if (state_info[cur_state].parent_state != WLAN_SM_ENGINE_STATE_NONE) + ol_sub_st = cur_state; + else + ol_sub_st = 0; + + if (state_info[state].parent_state != WLAN_SM_ENGINE_STATE_NONE) + new_sub_st = state; + else + new_sub_st = 0; + + sm_engine_nofl_debug("%s: %s > %s, %s > %s", sm->name, + state_info[old_state].name, + state_info[new_state].name, + ol_sub_st ? state_info[ol_sub_st].name : "IDLE", + new_sub_st ? state_info[new_sub_st].name : "IDLE"); + + /* + * call the exit function(s) of the current state hierarchy + * starting from substate. + */ + while (cur_state != WLAN_SM_ENGINE_STATE_NONE) { + if (state_info[cur_state].wlan_sm_exit) + state_info[cur_state].wlan_sm_exit(sm->ctx); + + cur_state = state_info[cur_state].parent_state; + } + + /* + * call the entry function(s) of the current state hierarchy + * starting from superstate. + */ + cur_state = state; + while (cur_state != WLAN_SM_ENGINE_STATE_NONE) { + if (state_info[cur_state].wlan_sm_entry) + state_info[cur_state].wlan_sm_entry(sm->ctx); + + sm->cur_state = cur_state; + cur_state = state_info[cur_state].initial_substate; + + if (cur_state != WLAN_SM_ENGINE_STATE_NONE) + sm_engine_nofl_debug("%s: Initial sub state %s", + sm->name, + state_info[cur_state].name); + } + qdf_atomic_set(&sm->in_state_transition, 0); +} + +qdf_export_symbol(wlan_sm_transition_to); + +void wlan_sm_reset(struct wlan_sm *sm, uint8_t init_state) +{ + sm->cur_state = init_state; +} + +static QDF_STATUS wlan_sm_validate_state_info(const char *name, + const struct wlan_sm_state_info *state_info, + uint8_t i) +{ + bool state_visited[WLAN_SM_ENGINE_MAX_STATES] = {false}; + uint8_t state, next_state; + /* + * make sure that the state definitions are in order + */ + if ((state_info[i].state >= WLAN_SM_ENGINE_MAX_STATES) || + (state_info[i].state != i)) { + sm_engine_err("%s: entry %d has invalid state %d", + name, i, state_info[i].state); + + return QDF_STATUS_E_FAILURE; + } + /* detect if there is any loop in the hierarichy */ + state = state_info[i].state; + while (state != WLAN_SM_ENGINE_STATE_NONE) { + if (state_visited[state]) { + sm_engine_err("%s: detected a loop with entry %d", + name, i); + return QDF_STATUS_E_FAILURE; + } + + state_visited[state] = true; + next_state = state_info[state].parent_state; + if (next_state != WLAN_SM_ENGINE_STATE_NONE) { + if (!state_info[next_state].has_substates) { + sm_engine_err( + "%s: state %d is marked as parent of %d but is not a super state", + name, next_state, state); + return QDF_STATUS_E_FAILURE; + } + } + state = next_state; + } + + return QDF_STATUS_SUCCESS; +} + +struct wlan_sm *wlan_sm_create(const char *name, void *ctx, + uint8_t init_state, + struct wlan_sm_state_info *state_info, + uint8_t num_states, + const char **event_names, + uint32_t num_event_names) +{ + struct wlan_sm *sm; + u_int32_t i; + + if (num_states > WLAN_SM_ENGINE_MAX_STATES) { + sm_engine_err("%s: Num states exceeded", name); + return NULL; + } + + /* + * validate the state_info table. + * the entries need to be valid and also + * need to be in order. + */ + for (i = 0; i < num_states; ++i) { + if (wlan_sm_validate_state_info(name, state_info, i) != + QDF_STATUS_SUCCESS) { + sm_engine_err("%s: states validation failed", name); + return NULL; + } + } + + sm = qdf_mem_malloc(sizeof(*sm)); + if (!sm) + return NULL; + + wlan_sm_history_init(sm); + + sm->cur_state = init_state; + sm->num_states = num_states; + sm->state_info = state_info; + sm->ctx = ctx; + sm->last_event = WLAN_SM_ENGINE_EVENT_NONE; + qdf_atomic_set(&sm->in_state_transition, 0); + sm->event_names = event_names; + sm->num_event_names = num_event_names; + + qdf_str_lcopy(sm->name, name, WLAN_SM_ENGINE_MAX_NAME); + + sm_engine_debug("%s: sm creation successful", name); + + return sm; +} + +qdf_export_symbol(wlan_sm_create); + +void wlan_sm_delete(struct wlan_sm *sm) +{ + wlan_sm_history_delete(sm); + qdf_mem_free(sm); +} + +qdf_export_symbol(wlan_sm_delete); + +uint8_t wlan_sm_get_lastevent(struct wlan_sm *sm) +{ + return sm->last_event; +} + +uint8_t wlan_sm_get_current_state(struct wlan_sm *sm) +{ + return sm->cur_state; +} + +qdf_export_symbol(wlan_sm_get_current_state); + +const char *wlan_sm_get_state_name(struct wlan_sm *sm, uint8_t state) +{ + return sm->state_info[state].name; +} + +const char *wlan_sm_get_current_state_name(struct wlan_sm *sm) +{ + return sm->state_info[sm->cur_state].name; +} + +qdf_export_symbol(wlan_sm_get_current_state_name); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/src/wlan_sm_engine_dbg.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/src/wlan_sm_engine_dbg.c new file mode 100644 index 0000000000000000000000000000000000000000..b21ee4cc6cbf8de1710e671341e59d7b3ff23fdb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/sm_engine/src/wlan_sm_engine_dbg.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements general SM debug framework + */ +#include +#include + +void wlan_sm_save_history(struct wlan_sm *sm, + enum wlan_sm_trace_type trace_type, + uint8_t initial_state, uint8_t final_state, + uint16_t event_type) +{ + struct wlan_sm_history *p_sm_history = &sm->history; + struct wlan_sm_history_info *p_memento; + + /* + * History saved in circular buffer. + * Save a pointer to next write location and increment pointer. + */ + qdf_spin_lock_bh(&p_sm_history->sm_history_lock); + p_memento = &p_sm_history->data[p_sm_history->index]; + p_sm_history->index++; + + p_sm_history->index %= WLAN_SM_ENGINE_HISTORY_SIZE; + + qdf_spin_unlock_bh(&p_sm_history->sm_history_lock); + + qdf_mem_zero(p_memento, sizeof(*p_memento)); + p_memento->trace_type = trace_type; + p_memento->initial_state = initial_state; + p_memento->final_state = final_state; + p_memento->event_type = event_type; +} + +void wlan_sm_history_init(struct wlan_sm *sm) +{ + qdf_spinlock_create(&sm->history.sm_history_lock); + qdf_mem_zero(&sm->history, sizeof(struct wlan_sm_history)); +} + +void wlan_sm_history_delete(struct wlan_sm *sm) +{ + qdf_spinlock_destroy(&sm->history.sm_history_lock); +} + +static void wlan_sm_print_history_entry(struct wlan_sm *sm, + struct wlan_sm_history_info *ent, + uint16_t i) +{ + const char *event_name = NULL; + + if (sm->event_names) { + if (ent->event_type < sm->num_event_names) + event_name = sm->event_names[ent->event_type]; + + if (!ent->trace_type) + return; + + sm_engine_nofl_err( + "|%6d |%11d |%23s[%3d] |%19s[%2d] |%19s[%2d] |", + i, ent->trace_type, + event_name ? event_name : "UNKNOWN_EVENT", + ent->event_type, + sm->state_info[ent->initial_state].name, + ent->initial_state, + sm->state_info[ent->final_state].name, + ent->final_state); + } else { + sm_engine_nofl_err( + "|%6d |%11d |%28d |%19s[%2d] |%19s[%2d] |", + i, ent->trace_type, + ent->event_type, + sm->state_info[ent->initial_state].name, + ent->initial_state, + sm->state_info[ent->final_state].name, + ent->final_state); + } +} + +void wlan_sm_print_history(struct wlan_sm *sm) +{ + struct wlan_sm_history *p_sm_history = &sm->history; + uint8_t i; + uint8_t idx; + + /* + * History saved in circular buffer. + * Save a pointer to next write location and increment pointer. + */ + qdf_spin_lock_bh(&p_sm_history->sm_history_lock); + + sm_engine_nofl_err("|%6s |%11s |%28s |%23s |%23s |", "Index", + "Trace Type", "Event", + "Initial State", "Final State"); + + for (i = 0; i < WLAN_SM_ENGINE_HISTORY_SIZE; i++) { + idx = (p_sm_history->index + i) % WLAN_SM_ENGINE_HISTORY_SIZE; + wlan_sm_print_history_entry( + sm, &p_sm_history->data[idx], idx); + } + + qdf_spin_unlock_bh(&p_sm_history->sm_history_lock); +} + +#if SM_HIST_DEBUGFS_SUPPORT +static void wlan_sm_print_fs_history_entry(struct wlan_sm *sm, + struct wlan_sm_history_info *ent, + uint16_t i, qdf_debugfs_file_t m) +{ + const char *event_name = NULL; + + if (sm->event_names) { + if (ent->event_type < sm->num_event_names) + event_name = sm->event_names[ent->event_type]; + + if (!ent->trace_type) + return; + + qdf_debugfs_printf(m, + "|%6d |%11d |%23s[%3d] |%19s[%2d] |%19s[%2d] |\n", + i, ent->trace_type, + event_name ? event_name : "UNKNOWN_EVENT", + ent->event_type, + sm->state_info[ent->initial_state].name, + ent->initial_state, + sm->state_info[ent->final_state].name, + ent->final_state); + } else { + qdf_debugfs_printf(m, + "|%6d |%11d |%28d |%19s[%2d] |%19s[%2d] |\n", + i, ent->trace_type, + ent->event_type, + sm->state_info[ent->initial_state].name, + ent->initial_state, + sm->state_info[ent->final_state].name, + ent->final_state); + } +} + +void wlan_sm_print_fs_history(struct wlan_sm *sm, qdf_debugfs_file_t m) +{ + struct wlan_sm_history *p_sm_history = &sm->history; + uint8_t i; + uint8_t idx; + + /* + * History saved in circular buffer. + * Save a pointer to next write location and increment pointer. + */ + qdf_spin_lock_bh(&p_sm_history->sm_history_lock); + qdf_debugfs_printf(m, "|%6s |%11s |%28s |%23s |%23s |\n", "Index", + "Trace Type", "Event", + "Initial State", "Final State"); + + for (i = 0; i < WLAN_SM_ENGINE_HISTORY_SIZE; i++) { + idx = (p_sm_history->index + i) % WLAN_SM_ENGINE_HISTORY_SIZE; + wlan_sm_print_fs_history_entry(sm, &p_sm_history->data[idx], + idx, m); + } + + qdf_spin_unlock_bh(&p_sm_history->sm_history_lock); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/inc/wlan_utility.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/inc/wlan_utility.h new file mode 100644 index 0000000000000000000000000000000000000000..bdc80be57e4225ef0d3931e0fccaab9eaab9aafd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/inc/wlan_utility.h @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains mandatory API from legacy + */ + +#ifndef _WLAN_UTILITY_H_ +#define _WLAN_UTILITY_H_ + +#include +#include +#include +#include + +#define TGT_INVALID_SNR (0) +#define TGT_MAX_SNR (TGT_NOISE_FLOOR_DBM * (-1)) +#define TGT_NOISE_FLOOR_DBM (-96) +#define TGT_IS_VALID_SNR(x) ((x) >= 0 && (x) < TGT_MAX_SNR) +#define TGT_IS_VALID_RSSI(x) ((x) != 0xFF) + +/** + * struct wlan_vdev_ch_check_filter - vdev chan check filter object + * @flag: matches or not + * @vdev: vdev to be checked against all the active vdevs + */ +struct wlan_vdev_ch_check_filter { + uint8_t flag; + struct wlan_objmgr_vdev *vdev; +}; + +/** + * struct wlan_peer_count- vdev connected peer count + * @opmode: QDF mode + * @peer_count: peer count + **/ +struct wlan_op_mode_peer_count { + enum QDF_OPMODE opmode; + uint16_t peer_count; +}; + +/** + * wlan_chan_to_freq() - converts channel to frequency + * @chan: channel number + * + * @return frequency of the channel + */ +uint32_t wlan_chan_to_freq(uint8_t chan); + +/** + * wlan_freq_to_chan() - converts frequency to channel + * @freq: frequency + * + * Return: channel of frequency + */ +uint8_t wlan_freq_to_chan(uint32_t freq); + +/** + * wlan_is_ie_valid() - Determine if an IE sequence is valid + * @ie: Pointer to the IE buffer + * @ie_len: Length of the IE buffer @ie + * + * This function validates that the IE sequence is valid by verifying + * that the sum of the lengths of the embedded elements match the + * length of the sequence. + * + * Note well that a 0-length IE sequence is considered valid. + * + * Return: true if the IE sequence is valid, false if it is invalid + */ +bool wlan_is_ie_valid(const uint8_t *ie, size_t ie_len); + +/** + * wlan_get_ie_ptr_from_eid() - Find out ie from eid + * @eid: element id + * @ie: source ie address + * @ie_len: source ie length + * + * Return: vendor ie address - success + * NULL - failure + */ +const uint8_t *wlan_get_ie_ptr_from_eid(uint8_t eid, + const uint8_t *ie, + int ie_len); + +/** + * wlan_get_vendor_ie_ptr_from_oui() - Find out vendor ie + * @oui: oui buffer + * @oui_size: oui size + * @ie: source ie address + * @ie_len: source ie length + * + * This function find out vendor ie by pass source ie and vendor oui. + * + * Return: vendor ie address - success + * NULL - failure + */ +const uint8_t *wlan_get_vendor_ie_ptr_from_oui(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len); + +/** + * wlan_get_ext_ie_ptr_from_ext_id() - Find out ext ie + * @oui: oui buffer + * @oui_size: oui size + * @ie: source ie address + * @ie_len: source ie length + * + * This function find out ext ie from ext id (passed oui) + * + * Return: vendor ie address - success + * NULL - failure + */ +const uint8_t *wlan_get_ext_ie_ptr_from_ext_id(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len); + +/** + * wlan_is_emulation_platform() - check if platform is emulation based + * @phy_version - psoc nif phy_version + * + * Return: boolean value based on platform type + */ +bool wlan_is_emulation_platform(uint32_t phy_version); + +/** + * wlan_get_pdev_id_from_vdev_id() - Helper func to derive pdev id from vdev_id + * @psoc: psoc object + * @vdev_id: vdev identifier + * @dbg_id: object manager debug id + * + * This function is used to derive the pdev id from vdev id for a psoc + * + * Return : pdev_id - +ve integer for success and WLAN_INVALID_PDEV_ID + * for failure + */ +uint32_t wlan_get_pdev_id_from_vdev_id(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_util_is_vdev_active() - Check for vdev active + * @pdev: pdev pointer + * @dbg_id: debug id for ref counting + * + * Return: QDF_STATUS_SUCCESS in case of vdev active + * QDF_STATUS_E_INVAL, if dev is not active + */ +QDF_STATUS wlan_util_is_vdev_active(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_vdev_is_up() - Check for vdev is in UP state + * @vdev: vdev pointer + * + * Return: QDF_STATUS_SUCCESS, if vdev is in up, otherwise QDF_STATUS_E_FAILURE + */ +QDF_STATUS wlan_vdev_is_up(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_util_pdev_vdevs_deschan_match() - function to check des channel matches + * with other vdevs in pdev + * @pdev: pdev object + * @vdev: vdev object + * @ref_id: object manager ref id + * + * This function checks the vdev desired channel with other vdev channels + * + * Return: QDF_STATUS_SUCCESS, if it matches, otherwise QDF_STATUS_E_FAILURE + */ +QDF_STATUS wlan_util_pdev_vdevs_deschan_match(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_util_change_map_index() - function to set/reset given index bit + * @map: bitmpap + * @id: bit index + * @set: 1 for set, 0 of reset + * + * This function set/reset given index bit + * + * Return: void + */ +void wlan_util_change_map_index(unsigned long *map, uint8_t id, uint8_t set); + +/** + * wlan_util_map_index_is_set() - function to check whether given index bit is + * set + * @map: bitmpap + * @id: bit index + * + * This function checks the given index bit is set + * + * Return: true, if bit is set, otherwise false + */ +bool wlan_util_map_index_is_set(unsigned long *map, uint8_t id); + +/** + * wlan_pdev_chan_change_pending_vdevs() - function to test/set channel change + * pending flag + * @pdev: pdev object + * @vdev_id_map: bitmap to derive channel change vdevs + * @ref_id: object manager ref id + * + * This function test/set channel change pending flag + * + * Return: QDF_STATUS_SUCCESS, if it iterates through all vdevs, + * otherwise QDF_STATUS_E_FAILURE + */ +QDF_STATUS wlan_pdev_chan_change_pending_vdevs(struct wlan_objmgr_pdev *pdev, + unsigned long *vdev_id_map, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_chan_eq() - function to check whether both channels are same + * @chan1: channel1 object + * @chan2: channel2 object + * + * This function checks the chan1 and chan2 are same + * + * Return: QDF_STATUS_SUCCESS, if it matches, otherwise QDF_STATUS_E_FAILURE + */ +QDF_STATUS wlan_chan_eq(struct wlan_channel *chan1, struct wlan_channel *chan2); + +/** + * wlan_chan_copy() - function to copy channel + * @tgt: target channel object + * @src: src achannel object + * + * This function copies channel data from src to tgt + * + * Return: void + */ +void wlan_chan_copy(struct wlan_channel *tgt, struct wlan_channel *src); + +/** + * wlan_vdev_get_active_channel() - derives the vdev operating channel + * @vdev: VDEV object + * + * This function checks vdev state and return the channel pointer accordingly + * + * Return: active channel, if vdev chan config is valid + * NULL, if VDEV is in INIT or STOP state + */ +struct wlan_channel *wlan_vdev_get_active_channel + (struct wlan_objmgr_vdev *vdev); + +/** + * wlan_util_stats_get_rssi() - API to get rssi in dbm + * @db2dbm_enabled: If db2dbm capability is enabled + * @bcn_snr: beacon snr + * @dat_snr: data snr + * @rssi: rssi + * + * This function gets the rssi based on db2dbm support. If this feature is + * present in hw then it means firmware directly sends rssi and no converstion + * is required. If this capablity is not present then host needs to convert + * snr to rssi + * + * Return: None + */ +void +wlan_util_stats_get_rssi(bool db2dbm_enabled, int32_t bcn_snr, int32_t dat_snr, + int8_t *rssi); + +/** + * wlan_util_is_pdev_restart_progress() - Check if any vdev is in restart state + * @pdev: pdev pointer + * @dbg_id: module id + * + * Iterates through all vdevs, checks if any VDEV is in RESTART_PROGRESS + * substate + * + * Return: QDF_STATUS_SUCCESS,if any vdev is in RESTART_PROGRESS substate + * otherwise QDF_STATUS_E_FAILURE + */ +QDF_STATUS wlan_util_is_pdev_restart_progress(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_util_is_pdev_scan_allowed() - Check for vdev is allowed to scan + * @pdev: pdev pointer + * @dbg_id: module id + * + * Iterates through all vdevs, checks if any VDEV is not either in S_INIT or in + * S_UP state + * + * Return: QDF_STATUS_SUCCESS,if scan is allowed, otherwise QDF_STATUS_E_FAILURE + */ +QDF_STATUS wlan_util_is_pdev_scan_allowed(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_util_get_peer_count_for_mode - This api gives vdev mode specific + * peer count` + * @pdev: PDEV object + * @mode: Operation mode. + * + * Return: int- peer count for operating mode + */ +uint16_t wlan_util_get_peer_count_for_mode(struct wlan_objmgr_pdev *pdev, + enum QDF_OPMODE mode); + +#endif /* _WLAN_UTILITY_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/src/wlan_utility.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/src/wlan_utility.c new file mode 100644 index 0000000000000000000000000000000000000000..c82001afd5944381360e248d33b32627d2b061f4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/src/wlan_utility.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains definition for mandatory legacy API + */ + +#include "qdf_str.h" +#include "wlan_utility.h" +#include +#include "wlan_osif_priv.h" +#include +#include +#include + +uint32_t wlan_chan_to_freq(uint8_t chan) +{ + if (chan == 0 ) + return 0; + + if (chan < WLAN_24_GHZ_CHANNEL_14) + return WLAN_24_GHZ_BASE_FREQ + chan * WLAN_CHAN_SPACING_5MHZ; + else if (chan == WLAN_24_GHZ_CHANNEL_14) + return WLAN_CHAN_14_FREQ; + else if (chan < WLAN_24_GHZ_CHANNEL_27) + /* ch 15 - ch 26 */ + return WLAN_CHAN_15_FREQ + + (chan - WLAN_24_GHZ_CHANNEL_15) * WLAN_CHAN_SPACING_20MHZ; + else if (chan == WLAN_5_GHZ_CHANNEL_170) + return WLAN_CHAN_170_FREQ; + else + return WLAN_5_GHZ_BASE_FREQ + chan * WLAN_CHAN_SPACING_5MHZ; +} + +uint8_t wlan_freq_to_chan(uint32_t freq) +{ + uint8_t chan; + + if (freq == 0) + return 0; + + if (freq > WLAN_24_GHZ_BASE_FREQ && freq < WLAN_CHAN_14_FREQ) + chan = ((freq - WLAN_24_GHZ_BASE_FREQ) / + WLAN_CHAN_SPACING_5MHZ); + else if (freq == WLAN_CHAN_14_FREQ) + chan = WLAN_24_GHZ_CHANNEL_14; + else if ((freq > WLAN_24_GHZ_BASE_FREQ) && + (freq < WLAN_5_GHZ_BASE_FREQ)) + chan = (((freq - WLAN_CHAN_15_FREQ) / + WLAN_CHAN_SPACING_20MHZ) + + WLAN_24_GHZ_CHANNEL_15); + else + chan = (freq - WLAN_5_GHZ_BASE_FREQ) / + WLAN_CHAN_SPACING_5MHZ; + + return chan; +} + +bool wlan_is_ie_valid(const uint8_t *ie, size_t ie_len) +{ + uint8_t elen; + + while (ie_len) { + if (ie_len < 2) + return false; + + elen = ie[1]; + ie_len -= 2; + ie += 2; + if (elen > ie_len) + return false; + + ie_len -= elen; + ie += elen; + } + + return true; +} + +static const uint8_t *wlan_get_ie_ptr_from_eid_n_oui(uint8_t eid, + const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len) +{ + int32_t left = ie_len; + const uint8_t *ptr = ie; + uint8_t elem_id, elem_len; + + while (left >= 2) { + elem_id = ptr[0]; + elem_len = ptr[1]; + left -= 2; + + if (elem_len > left) + return NULL; + + if (eid == elem_id) { + /* if oui is not provide eid match is enough */ + if (!oui) + return ptr; + + /* + * if oui is provided and oui_size is more than left + * bytes, then we cannot have match + */ + if (oui_size > left) + return NULL; + + if (qdf_mem_cmp(&ptr[2], oui, oui_size) == 0) + return ptr; + } + + left -= elem_len; + ptr += (elem_len + 2); + } + + return NULL; +} + +const uint8_t *wlan_get_ie_ptr_from_eid(uint8_t eid, + const uint8_t *ie, + int ie_len) +{ + return wlan_get_ie_ptr_from_eid_n_oui(eid, NULL, 0, ie, ie_len); +} + +const uint8_t *wlan_get_vendor_ie_ptr_from_oui(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len) +{ + return wlan_get_ie_ptr_from_eid_n_oui(WLAN_MAC_EID_VENDOR, + oui, oui_size, ie, ie_len); +} + +const uint8_t *wlan_get_ext_ie_ptr_from_ext_id(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len) +{ + return wlan_get_ie_ptr_from_eid_n_oui(WLAN_MAC_EID_EXT, + oui, oui_size, ie, ie_len); +} + +bool wlan_is_emulation_platform(uint32_t phy_version) +{ + if ((phy_version == 0xABC0) || (phy_version == 0xABC1) || + (phy_version == 0xABC2) || (phy_version == 0xABC3) || + (phy_version == 0xFFFF) || (phy_version == 0xABCD)) + return true; + + return false; +} + +uint32_t wlan_get_pdev_id_from_vdev_id(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev = NULL; + uint32_t pdev_id = WLAN_INVALID_PDEV_ID; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, dbg_id); + + if (vdev) { + pdev = wlan_vdev_get_pdev(vdev); + if (pdev) + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + } + + return pdev_id; +} +qdf_export_symbol(wlan_get_pdev_id_from_vdev_id); + +static void wlan_vdev_active(struct wlan_objmgr_pdev *pdev, void *object, + void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *flag = (uint8_t *)arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_is_active(vdev) == QDF_STATUS_SUCCESS) + *flag = 1; + + wlan_vdev_obj_unlock(vdev); +} + +QDF_STATUS wlan_vdev_is_up(struct wlan_objmgr_vdev *vdev) +{ + return wlan_vdev_allow_connect_n_tx(vdev); +} +qdf_export_symbol(wlan_vdev_is_up); + +QDF_STATUS wlan_util_is_vdev_active(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint8_t flag = 0; + + if (!pdev) + return QDF_STATUS_E_INVAL; + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, wlan_vdev_active, + &flag, 0, dbg_id); + + if (flag == 1) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_INVAL; +} + +qdf_export_symbol(wlan_util_is_vdev_active); + +void wlan_util_change_map_index(unsigned long *map, uint8_t id, uint8_t set) +{ + if (set) + qdf_set_bit(id, map); + else + qdf_clear_bit(id, map); +} + +bool wlan_util_map_index_is_set(unsigned long *map, uint8_t id) +{ + return qdf_test_bit(id, map); +} + +static void wlan_vdev_chan_change_pending(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + unsigned long *vdev_id_map = (unsigned long *)arg; + uint8_t id = 0; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) + return; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_chan_config_valid(vdev) == QDF_STATUS_SUCCESS) { + id = wlan_vdev_get_id(vdev); + /* Invalid vdev id */ + if (id >= wlan_psoc_get_max_vdev_count(psoc)) { + wlan_vdev_obj_unlock(vdev); + return; + } + + wlan_util_change_map_index(vdev_id_map, id, 1); + } + + wlan_vdev_obj_unlock(vdev); +} + +QDF_STATUS wlan_pdev_chan_change_pending_vdevs(struct wlan_objmgr_pdev *pdev, + unsigned long *vdev_id_map, + wlan_objmgr_ref_dbgid dbg_id) +{ + if (!pdev) + return QDF_STATUS_E_INVAL; + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_vdev_chan_change_pending, + vdev_id_map, 0, dbg_id); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_chan_eq(struct wlan_channel *chan1, struct wlan_channel *chan2) +{ + if ((chan1->ch_ieee == chan2->ch_ieee) && + (chan1->ch_freq_seg2 == chan2->ch_freq_seg2)) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +void wlan_chan_copy(struct wlan_channel *tgt, struct wlan_channel *src) +{ + qdf_mem_copy(tgt, src, sizeof(struct wlan_channel)); +} + +struct wlan_channel *wlan_vdev_get_active_channel(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_channel *comp_vdev_chan = NULL; + + if (wlan_vdev_chan_config_valid(vdev) == QDF_STATUS_SUCCESS) { + /* compare with BSS channel, when vdev is active, since desired + * channel gets update, if channel is triggered in another path + */ + if (wlan_vdev_mlme_is_active(vdev) == QDF_STATUS_SUCCESS) + comp_vdev_chan = wlan_vdev_mlme_get_bss_chan(vdev); + else + comp_vdev_chan = wlan_vdev_mlme_get_des_chan(vdev); + } + + return comp_vdev_chan; +} + +static void wlan_pdev_chan_match(struct wlan_objmgr_pdev *pdev, void *object, + void *arg) +{ + struct wlan_objmgr_vdev *comp_vdev = (struct wlan_objmgr_vdev *)object; + struct wlan_vdev_ch_check_filter *ch_filter = arg; + struct wlan_channel vdev_chan, *chan; + struct wlan_channel *iter_vdev_chan; + + if (ch_filter->flag) + return; + + if (comp_vdev == ch_filter->vdev) + return; + + wlan_vdev_obj_lock(comp_vdev); + chan = wlan_vdev_get_active_channel(comp_vdev); + if (!chan) { + wlan_vdev_obj_unlock(comp_vdev); + return; + } + wlan_chan_copy(&vdev_chan, chan); + wlan_vdev_obj_unlock(comp_vdev); + + wlan_vdev_obj_lock(ch_filter->vdev); + iter_vdev_chan = wlan_vdev_mlme_get_des_chan(ch_filter->vdev); + if (wlan_chan_eq(&vdev_chan, iter_vdev_chan) + != QDF_STATUS_SUCCESS) { + ch_filter->flag = 1; + qdf_nofl_err("==> iter vdev id: %d: ieee %d, mode %d", + wlan_vdev_get_id(comp_vdev), + vdev_chan.ch_ieee, + vdev_chan.ch_phymode); + qdf_nofl_err("fl %016llx, fl-ext %08x, s1 %d, s2 %d ", + vdev_chan.ch_flags, vdev_chan.ch_flagext, + vdev_chan.ch_freq_seg1, + vdev_chan.ch_freq_seg2); + qdf_nofl_err("==> base vdev id: %d: ieee %d mode %d", + wlan_vdev_get_id(ch_filter->vdev), + iter_vdev_chan->ch_ieee, + iter_vdev_chan->ch_phymode); + qdf_nofl_err("fl %016llx, fl-ext %08x s1 %d, s2 %d", + iter_vdev_chan->ch_flags, + iter_vdev_chan->ch_flagext, + iter_vdev_chan->ch_freq_seg1, + iter_vdev_chan->ch_freq_seg2); + } + wlan_vdev_obj_unlock(ch_filter->vdev); +} + +QDF_STATUS wlan_util_pdev_vdevs_deschan_match(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_vdev_ch_check_filter ch_filter; + + if (!pdev) + return QDF_STATUS_E_INVAL; + + if (wlan_pdev_nif_feat_cap_get(pdev, WLAN_PDEV_F_CHAN_CONCURRENCY)) + return QDF_STATUS_SUCCESS; + + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) == QDF_STATUS_SUCCESS) { + ch_filter.flag = 0; + ch_filter.vdev = vdev; + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_pdev_chan_match, + &ch_filter, 0, dbg_id); + + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + + if (ch_filter.flag == 0) + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +static void wlan_vdev_restart_progress(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *flag = (uint8_t *)arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_is_restart_progress(vdev) == QDF_STATUS_SUCCESS) + *flag = 1; + + wlan_vdev_obj_unlock(vdev); +} + +QDF_STATUS wlan_util_is_pdev_restart_progress(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint8_t flag = 0; + + if (!pdev) + return QDF_STATUS_E_INVAL; + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_vdev_restart_progress, + &flag, 0, dbg_id); + + if (flag == 1) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_INVAL; +} + +static void wlan_vdev_scan_allowed(struct wlan_objmgr_pdev *pdev, void *object, + void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *flag = (uint8_t *)arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_is_scan_allowed(vdev) != QDF_STATUS_SUCCESS) + *flag = 1; + + wlan_vdev_obj_unlock(vdev); +} + +QDF_STATUS wlan_util_is_pdev_scan_allowed(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint8_t flag = 0; + + if (!pdev) + return QDF_STATUS_E_INVAL; + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_vdev_scan_allowed, + &flag, 0, dbg_id); + + if (flag == 1) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +void +wlan_util_stats_get_rssi(bool db2dbm_enabled, int32_t bcn_snr, int32_t dat_snr, + int8_t *rssi) +{ + uint32_t snr; + + if (db2dbm_enabled) { + if (TGT_IS_VALID_RSSI(bcn_snr)) + *rssi = bcn_snr; + else if (TGT_IS_VALID_RSSI(dat_snr)) + *rssi = dat_snr; + else + *rssi = TGT_NOISE_FLOOR_DBM; + } else { + if (TGT_IS_VALID_SNR(bcn_snr)) + snr = bcn_snr; + else if (TGT_IS_VALID_SNR(dat_snr)) + snr = dat_snr; + else + snr = TGT_INVALID_SNR; + + /* Get the absolute rssi value from the current rssi value */ + *rssi = snr + TGT_NOISE_FLOOR_DBM; + } +} + +/** + * wlan_util_get_mode_specific_peer_count - This api gives vdev mode specific + * peer count` + * @pdev: PDEV object + * @object: vdev object + * @arg: argument passed by caller + * + * Return: void + */ +static void +wlan_util_get_mode_specific_peer_count(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = object; + uint16_t temp_count = 0; + struct wlan_op_mode_peer_count *count = arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_get_opmode(vdev) == count->opmode) { + temp_count = wlan_vdev_get_peer_count(vdev); + /* Decrement the self peer count */ + if (temp_count > 1) + count->peer_count += (temp_count - 1); + } + wlan_vdev_obj_unlock(vdev); +} + +uint16_t wlan_util_get_peer_count_for_mode(struct wlan_objmgr_pdev *pdev, + enum QDF_OPMODE mode) +{ + struct wlan_op_mode_peer_count count; + + count.opmode = mode; + count.peer_count = 0; + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_util_get_mode_specific_peer_count, &count, + 0, WLAN_OBJMGR_ID); + + return count.peer_count; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/core/inc/wlan_coex_main.h b/drivers/staging/qca-wifi-host-cmn/umac/coex/core/inc/wlan_coex_main.h new file mode 100644 index 0000000000000000000000000000000000000000..22cde21af03fe1fd2f10c639307a7b3fb3b9324b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/core/inc/wlan_coex_main.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains declarations for coex core functions + */ + +#ifndef _WLAN_COEX_MAIN_API_H_ +#define _WLAN_COEX_MAIN_API_H_ + +#ifdef FEATURE_COEX +#include "wlan_coex_ucfg_api.h" +#include "wmi_unified_param.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_vdev_obj.h" + +#define coex_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_COEX, params) +#define coex_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_COEX, params) +#define coex_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_COEX, params) + +/** + * struct coex_psoc_obj - coex object definition + * @btc_chain_mode: BT Coex chain mode. + * @coex_config_updated: callback functions for each config type, which will + * be called when config is updated. + */ +struct coex_psoc_obj { + uint8_t btc_chain_mode; + update_coex_cb coex_config_updated[COEX_CONFIG_TYPE_MAX]; +}; + +/** + * wlan_psoc_get_coex_obj() - private API to get coex object from psoc + * @psoc: psoc object + * + * Return: coex object + */ +#define wlan_psoc_get_coex_obj(psoc) \ + wlan_psoc_get_coex_obj_fl(psoc, __func__, __LINE__) + +static inline struct coex_psoc_obj * +wlan_psoc_get_coex_obj_fl(struct wlan_objmgr_psoc *psoc, + const char *func, uint32_t line) +{ + struct coex_psoc_obj *psoc_obj; + + psoc_obj = (struct coex_psoc_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_COEX); + if (!psoc_obj) { + coex_err("%s:%u, Failed to get coex psoc object", func, line); + return NULL; + } + return psoc_obj; +} + +/** + * wlan_coex_psoc_init() - API to initialize coex component + * @psoc: soc context + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_coex_psoc_init(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_coex_psoc_deinit() - API to deinitialize coex component + * @psoc: soc context + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_coex_psoc_deinit(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_coex_config_send() - private API to send coex config + * @vdev: pointer to vdev object + * @param: parameters of coex config + * + * Return: status of operation + */ +QDF_STATUS wlan_coex_config_send(struct wlan_objmgr_vdev *vdev, + struct coex_config_params *param); + +/** + * wlan_coex_config_updated() - private API to notify that coex config + * is updated. + * @vdev: pointer to vdev object + * @type: type of coex config + * + * Return: status of operation + */ +QDF_STATUS +wlan_coex_config_updated(struct wlan_objmgr_vdev *vdev, uint8_t type); + +/** + * wlan_coex_psoc_created_notification() - PSOC obj create callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is created. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_coex_psoc_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * wlan_coex_psoc_destroyed_notification() - PSOC obj delete callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is deleted. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_coex_psoc_destroyed_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * wlan_coex_psoc_set_btc_chain_mode() - private API to set BT coex chain mode + * for psoc + * @psoc: pointer to psoc object + * @val: BT coex chain mode + * + * Return : status of operation + */ +QDF_STATUS +wlan_coex_psoc_set_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wlan_coex_psoc_get_btc_chain_mode() - private API to get BT coex chain mode + * from psoc + * @psoc: pointer to psoc object + * @val: pointer to BT coex chain mode + * + * Return : status of operation + */ +QDF_STATUS +wlan_coex_psoc_get_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t *val); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/core/src/wlan_coex_main.c b/drivers/staging/qca-wifi-host-cmn/umac/coex/core/src/wlan_coex_main.c new file mode 100644 index 0000000000000000000000000000000000000000..d48b6e54eba229245d28acd55d5e4af61288b75e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/core/src/wlan_coex_main.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains definitions for coex core functions + */ + +#include +#include +#include + +QDF_STATUS wlan_coex_psoc_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + struct coex_psoc_obj *psoc_obj; + QDF_STATUS status; + + psoc_obj = qdf_mem_malloc(sizeof(*psoc_obj)); + if (!psoc_obj) + return QDF_STATUS_E_NOMEM; + + /* Attach scan private date to psoc */ + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_COEX, + psoc_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + coex_err("Failed to attach psoc coex component"); + qdf_mem_free(psoc_obj); + } else { + coex_debug("Coex object attach to psoc successful"); + } + + return status; +} + +QDF_STATUS wlan_coex_psoc_destroyed_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + void *psoc_obj; + QDF_STATUS status; + + psoc_obj = wlan_psoc_get_coex_obj(psoc); + if (!psoc_obj) + return QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_COEX, + psoc_obj); + if (QDF_IS_STATUS_ERROR(status)) + coex_err("Failed to detach psoc coex component"); + + qdf_mem_free(psoc_obj); + + return status; +} + +QDF_STATUS +wlan_coex_psoc_init(struct wlan_objmgr_psoc *psoc) +{ + struct coex_psoc_obj *coex_obj; + + coex_obj = wlan_psoc_get_coex_obj(psoc); + if (!coex_obj) + return QDF_STATUS_E_INVAL; + + coex_obj->btc_chain_mode = WLAN_COEX_BTC_CHAIN_MODE_UNSETTLED; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_coex_psoc_deinit(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_coex_config_send(struct wlan_objmgr_vdev *vdev, + struct coex_config_params *param) +{ + QDF_STATUS status; + + status = tgt_send_coex_config(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + coex_err("failed to send coex config"); + + return status; +} + +QDF_STATUS +wlan_coex_config_updated(struct wlan_objmgr_vdev *vdev, uint8_t type) +{ + struct wlan_objmgr_psoc *psoc; + struct coex_psoc_obj *coex_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!vdev) { + coex_err("NULL vdev"); + return QDF_STATUS_E_INVAL; + } + + if (type >= COEX_CONFIG_TYPE_MAX) { + coex_err("config type out of range: %d", type); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + coex_err("NULL psoc"); + return QDF_STATUS_E_INVAL; + } + + coex_obj = wlan_psoc_get_coex_obj(psoc); + if (!coex_obj) + return QDF_STATUS_E_INVAL; + + if (coex_obj->coex_config_updated[type]) + status = coex_obj->coex_config_updated[type](vdev); + + return status; +} + +QDF_STATUS +wlan_coex_psoc_set_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct coex_psoc_obj *coex_obj; + + coex_obj = wlan_psoc_get_coex_obj(psoc); + if (!coex_obj) + return QDF_STATUS_E_INVAL; + + coex_obj->btc_chain_mode = val; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_coex_psoc_get_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t *val) +{ + struct coex_psoc_obj *coex_obj; + + if (!val) { + coex_err("invalid param for getting btc chain mode"); + return QDF_STATUS_E_INVAL; + } + + coex_obj = wlan_psoc_get_coex_obj(psoc); + if (!coex_obj) + return QDF_STATUS_E_INVAL; + + *val = coex_obj->btc_chain_mode; + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..452ce1bc6c1d31d081a4ad295bb9c69a1042eb7c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_tgt_api.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains coex south bound interface definitions + */ + +#ifndef _WLAN_COEX_TGT_API_H_ +#define _WLAN_COEX_TGT_API_H_ + +#ifdef FEATURE_COEX +struct coex_config_params; + +/** + * tgt_send_coex_config() - invoke target_if send coex config + * @vdev: vdev object + * @param: coex config parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_send_coex_config(struct wlan_objmgr_vdev *vdev, + struct coex_config_params *param); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd5a24230bfe0dca64907426e0ba087b63cecf4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_ucfg_api.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains coex north bound interface declarations + */ + +#ifndef _WLAN_COEX_UCFG_API_H_ +#define _WLAN_COEX_UCFG_API_H_ + +#include "qdf_status.h" +#include +#include +#include "qca_vendor.h" + +#define WLAN_COEX_BTC_CHAIN_MODE_SHARED QCA_BTC_CHAIN_SHARED +#define WLAN_COEX_BTC_CHAIN_MODE_SEPARATED QCA_BTC_CHAIN_SEPARATED +#define WLAN_COEX_BTC_CHAIN_MODE_UNSETTLED 0xFF + +/** + * enum coex_config_type - coex config type definitions + * @COEX_CONFIG_BTC_CHAIN_MODE: config BT coex chain mode + * @COEX_CONFIG_TYPE_MAX: max value + */ +enum coex_config_type { + COEX_CONFIG_BTC_CHAIN_MODE, + /* keep last */ + COEX_CONFIG_TYPE_MAX, +}; + +/** + * typedef update_coex_cb() - cb to inform coex config + * @vdev: vdev pointer + * + * Return: void + */ +typedef QDF_STATUS (*update_coex_cb)(struct wlan_objmgr_vdev *vdev); + +#ifdef FEATURE_COEX +/** + * ucfg_coex_register_cfg_updated_handler() - API to register coex config + * updated handler. + * @psoc: pointer to psoc object + * @type: type of coex config + * @handler: handler to be registered + * + * Return: status of operation + */ +QDF_STATUS +ucfg_coex_register_cfg_updated_handler(struct wlan_objmgr_psoc *psoc, + enum coex_config_type type, + update_coex_cb handler); + +/** + * ucfg_coex_psoc_set_btc_chain_mode() - API to set BT coex chain mode for psoc + * @psoc: pointer to psoc object + * @val: BT coex chain mode + * + * Return : status of operation + */ +QDF_STATUS +ucfg_coex_psoc_set_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * ucfg_coex_psoc_get_btc_chain_mode() - API to get BT coex chain mode from psoc + * @psoc: pointer to psoc object + * @val: pointer to BT coex chain mode + * + * Return : status of operation + */ +QDF_STATUS +ucfg_coex_psoc_get_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t *val); + +/** + * ucfg_coex_send_btc_chain_mode() - API to send BT coex config to target if + * @vdev: pointer to vdev object + * @mode: BT coex chain mode + * + * Return: status of operation + */ +QDF_STATUS +ucfg_coex_send_btc_chain_mode(struct wlan_objmgr_vdev *vdev, uint8_t mode); +#else +static inline QDF_STATUS +ucfg_coex_register_cfg_updated_handler(struct wlan_objmgr_psoc *psoc, + enum coex_config_type type, + update_coex_cb handler) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +ucfg_coex_psoc_get_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t *val) +{ + if (val) + *val = WLAN_COEX_BTC_CHAIN_MODE_UNSETTLED; + + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +ucfg_coex_send_btc_chain_mode(struct wlan_objmgr_vdev *vdev, uint8_t mode) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..76bb68d495848aaa973ddd5688d759574006b677 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/inc/wlan_coex_utils_api.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_coex_utils_api.h + * + * This header file provides declaration of public APIs exposed to other UMAC + * components. + */ + +#ifndef _WLAN_COEX_UTILS_API_H_ +#define _WLAN_COEX_UTILS_API_H_ +#include + +/* + * wlan_coex_init() - Coex module initialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_coex_init(void); + +/* + * wlan_coex_deinit() - Coex module deinitialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_coex_deinit(void); + +/** + * wlan_coex_psoc_open() - Open coex component + * @psoc: soc context + * + * This function gets called when dispatcher opening. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +wlan_coex_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_coex_psoc_close() - Close coex component + * @psoc: soc context + * + * This function gets called when dispatcher closing. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +wlan_coex_psoc_close(struct wlan_objmgr_psoc *psoc); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..4022a7c13eb5a9f2be0e75b7d0bbdc47da6dd42e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_tgt_api.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains coex south bound interface definitions + */ + +#include +#include +#include +#include "wlan_objmgr_pdev_obj.h" + +static inline struct wlan_lmac_if_coex_tx_ops * +wlan_psoc_get_coex_txops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.coex_ops; +} + +static inline struct wlan_lmac_if_coex_tx_ops * +wlan_vdev_get_coex_txops(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + coex_err("NULL psoc"); + return NULL; + } + + return wlan_psoc_get_coex_txops(psoc); +} + +QDF_STATUS +tgt_send_coex_config(struct wlan_objmgr_vdev *vdev, + struct coex_config_params *param) +{ + struct wlan_lmac_if_coex_tx_ops *coex_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + + if (!vdev) { + coex_err("NULL vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + coex_err("NULL psoc"); + return QDF_STATUS_E_NULL_VALUE; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + coex_err("NULL pdev"); + return QDF_STATUS_E_NULL_VALUE; + } + + coex_ops = wlan_psoc_get_coex_txops(psoc); + QDF_ASSERT(coex_ops->coex_config_send); + if (coex_ops->coex_config_send) + return coex_ops->coex_config_send(pdev, param); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..68321ee4c1f557245235a74f28d5e9514c1884f1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_ucfg_api.c @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains coex north bound interface definitions + */ + +#include +#include +#include "wmi_unified.h" + +QDF_STATUS +ucfg_coex_register_cfg_updated_handler(struct wlan_objmgr_psoc *psoc, + enum coex_config_type type, + update_coex_cb handler) +{ + struct coex_psoc_obj *coex_obj; + + if (type >= COEX_CONFIG_TYPE_MAX) { + coex_err("invalid coex type: %d", type); + return QDF_STATUS_E_INVAL; + } + + coex_obj = wlan_psoc_get_coex_obj(psoc); + if (!coex_obj) + return QDF_STATUS_E_INVAL; + + coex_obj->coex_config_updated[type] = handler; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_coex_psoc_set_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + return wlan_coex_psoc_set_btc_chain_mode(psoc, val); +} + +QDF_STATUS +ucfg_coex_psoc_get_btc_chain_mode(struct wlan_objmgr_psoc *psoc, uint8_t *val) +{ + return wlan_coex_psoc_get_btc_chain_mode(psoc, val); +} + +QDF_STATUS +ucfg_coex_send_btc_chain_mode(struct wlan_objmgr_vdev *vdev, uint8_t mode) +{ + struct coex_config_params param = {0}; + + if (mode != WLAN_COEX_BTC_CHAIN_MODE_SHARED && + mode != WLAN_COEX_BTC_CHAIN_MODE_SEPARATED) + return QDF_STATUS_E_INVAL; + + param.vdev_id = wlan_vdev_get_id(vdev); + param.config_type = WMI_COEX_CONFIG_BTCOEX_SEPARATE_CHAIN_MODE; + param.config_arg1 = mode; + + coex_debug("send btc chain mode %d for vdev %d", mode, param.vdev_id); + + return wlan_coex_config_send(vdev, ¶m); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..32178781bac0b16a280e74efc99d5bda0706f8cb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/coex/dispatcher/src/wlan_coex_utils_api.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_coex_utils_api.c + * + * This file provides definitions of public APIs exposed to other UMAC + * components. + */ + +#include +#include +#include + +QDF_STATUS wlan_coex_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_COEX, + wlan_coex_psoc_created_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + coex_err("Failed to register psoc create handler"); + goto fail_create_psoc; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_COEX, + wlan_coex_psoc_destroyed_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + coex_err("Failed to create psoc delete handler"); + goto fail_psoc_destroy; + } + + coex_debug("coex psoc create and delete handler registered"); + return status; + +fail_psoc_destroy: + wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_COEX, + wlan_coex_psoc_created_notification, NULL); +fail_create_psoc: + return status; +} + +QDF_STATUS wlan_coex_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_COEX, + wlan_coex_psoc_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + coex_err("Failed to unregister psoc delete handler"); + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_COEX, + wlan_coex_psoc_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + coex_err("Failed to unregister psoc create handler"); + + return status; +} + +QDF_STATUS +wlan_coex_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_coex_psoc_init(psoc); +} + +QDF_STATUS +wlan_coex_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_coex_psoc_deinit(psoc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_api_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..207704ee4e44621ce7e050f3c892d698675b8d97 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_api_i.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_cmn_api_i.h + * + * This header filed declare APIs which have separate definition for both mc + * and ic + */ +#ifndef __WLAN_CP_STATS_CMN_API_I_H__ +#define __WLAN_CP_STATS_CMN_API_I_H__ +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_defs.h" + +/** + * wlan_cp_stats_psoc_cs_init() - common psoc obj initialization + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_cs_init(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_psoc_cs_deinit() - common psoc obj deinitialization + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_cs_deinit(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_pdev_cs_init() - common pdev obj initialization + * @pdev: pointer to pdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_cs_init(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_pdev_cs_deinit() - common pdev obj deinitialization + * @pdev: pointer to pdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_cs_deinit(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_vdev_cs_init() - common vdev obj initialization + * @vdev: pointer to vdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_cs_init(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_vdev_cs_deinit() - common vdev obj deinitialization + * @vdev: pointer to vdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_cs_deinit(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_peer_cs_init() - common peer obj initialization + * @peer: pointer to peer object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_cs_init(struct peer_cp_stats *peer_cs); + +/** + * wlan_cp_stats_peer_cs_deinit() - common peer obj deinitialization + * @peer: pointer to peer object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_cs_deinit(struct peer_cp_stats *peer_cs); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_CMN_API_I_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..0e9d2afee4092275acf4cb59f9637eb40ec94145 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_defs.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_cmn_defs.h + * + * This header file maintain definitions for cp stats structures which are + * common between win and mcl + */ + +#ifndef __WLAN_CP_STATS_CMN_DEFS_H__ +#define __WLAN_CP_STATS_CMN_DEFS_H__ + +#endif /* __WLAN_CP_STATS_CMN_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.c new file mode 100644 index 0000000000000000000000000000000000000000..e3fca004fea91b947b9a25138ce31b6dc7347cf9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_comp_handler.c + * + * This file maintain definitions to APIs which handle attach/detach of other + * UMAC component specific cp stat object to cp stats + * + * Components calling configure API should alloc data structure while attaching + * dealloc while detaching, where as address for which to be deallocated will + * be passed back to component for data + */ +#include "wlan_cp_stats_comp_handler.h" +#include "wlan_cp_stats_defs.h" +#include +#include + +static QDF_STATUS +wlan_cp_stats_psoc_comp_obj_config +(struct wlan_objmgr_psoc *psoc, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct psoc_cp_stats *psoc_cs; + + psoc_cs = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cs) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (psoc_cs->psoc_comp_priv_obj[comp_id]) { + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_E_EXISTS; + } + psoc_cs->psoc_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (psoc_cs->psoc_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_E_INVAL; + } + data = psoc_cs->psoc_comp_priv_obj[comp_id]; + psoc_cs->psoc_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_cp_stats_pdev_comp_obj_config +(struct wlan_objmgr_pdev *pdev, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct pdev_cp_stats *pdev_cs; + + pdev_cs = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (!pdev_cs) { + cp_stats_err("pdev cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_pdev_obj_lock(pdev_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (pdev_cs->pdev_comp_priv_obj[comp_id]) { + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_E_EXISTS; + } + pdev_cs->pdev_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (pdev_cs->pdev_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_E_INVAL; + } + data = pdev_cs->pdev_comp_priv_obj[comp_id]; + pdev_cs->pdev_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_cp_stats_vdev_comp_obj_config +(struct wlan_objmgr_vdev *vdev, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct vdev_cp_stats *vdev_cs; + + vdev_cs = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cs) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (vdev_cs->vdev_comp_priv_obj[comp_id]) { + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_E_EXISTS; + } + vdev_cs->vdev_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (vdev_cs->vdev_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_E_INVAL; + } + data = vdev_cs->vdev_comp_priv_obj[comp_id]; + vdev_cs->vdev_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_cp_stats_peer_comp_obj_config +(struct wlan_objmgr_peer *peer, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct peer_cp_stats *peer_cs; + + peer_cs = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cs) { + cp_stats_err("peer cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_peer_obj_lock(peer_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (peer_cs->peer_comp_priv_obj[comp_id]) { + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_E_EXISTS; + } + peer_cs->peer_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (peer_cs->peer_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_E_INVAL; + } + data = peer_cs->peer_comp_priv_obj[comp_id]; + peer_cs->peer_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_comp_obj_config(enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_cp_stats_comp_id comp_id, + void *cmn_obj, void *data) +{ + QDF_STATUS status; + + if (!cmn_obj) { + cp_stats_err("Common object is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* component id is invalid */ + if (comp_id >= WLAN_CP_STATS_MAX_COMPONENTS) { + cp_stats_err("Invalid component Id"); + return QDF_STATUS_MAXCOMP_FAIL; + } + + switch (obj_type) { + case WLAN_PSOC_OP: + status = + wlan_cp_stats_psoc_comp_obj_config( + (struct wlan_objmgr_psoc *)cmn_obj, + comp_id, cfg_state, data); + break; + case WLAN_PDEV_OP: + status = + wlan_cp_stats_pdev_comp_obj_config( + (struct wlan_objmgr_pdev *)cmn_obj, + comp_id, cfg_state, data); + break; + case WLAN_VDEV_OP: + status = + wlan_cp_stats_vdev_comp_obj_config( + (struct wlan_objmgr_vdev *)cmn_obj, + comp_id, cfg_state, data); + break; + case WLAN_PEER_OP: + status = + wlan_cp_stats_peer_comp_obj_config( + (struct wlan_objmgr_peer *)cmn_obj, + comp_id, cfg_state, data); + break; + default: + cp_stats_err("Invalid common object"); + return QDF_STATUS_E_INVAL; + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..02a63f53992499aee70831682a53e9a357653a7e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_comp_handler.h + * + * This header file API declarations required to attach/detach and + * enable/disable other UMAC component specific control plane statitics + * to cp stats component object + */ + +#ifndef __WLAN_CP_STATS_COMP_HANDLER_H__ +#define __WLAN_CP_STATS_COMP_HANDLER_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_defs.h" + +/** + * wlan_cp_stats_comp_obj_config() - attach/detach component specific stats + * callback function + * @obj_type: common object type + * @cfg_state: config state either to attach of detach + * @comp_id: cpstats component id + * @cmn_obj: pointer to common object + * @comp_priv_obj: pointer to component specific cp stats object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_comp_obj_config( + enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_cp_stats_comp_id comp_id, + void *cmn_obj, + void *comp_priv_obj); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_COMP_HANDLER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..762cdfe70b34ea6096ac7e0d6399e9db4d2cf72c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_defs.h @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_defs.h + * + * This header file maintains core definitions of control plane statistics + * component + */ + +#ifndef __WLAN_CP_STATS_DEFS_H__ +#define __WLAN_CP_STATS_DEFS_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#include +#include +#include +#include +#include "wlan_cp_stats_cmn_defs.h" +#include +#include + +/* noise floor */ +#define CP_STATS_TGT_NOISE_FLOOR_DBM (-96) + +/** + * struct psoc_cp_stats - defines cp stats at psoc object + * @psoc_obj: pointer to psoc + * @psoc_comp_priv_obj[]: component's private object pointers + * @psoc_cp_stats_lock: lock to protect object + * @cmn_stats: stats common for AP and STA devices + * @obj_stats: stats specific to AP or STA devices + * @legacy_stats_cb: callback to update the stats received from FW through + * asynchronous events. + */ +struct psoc_cp_stats { + struct wlan_objmgr_psoc *psoc_obj; + void *psoc_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t psoc_cp_stats_lock; + struct psoc_cmn_cp_stats *cmn_stats; + psoc_ext_cp_stats_t *obj_stats; + void (*legacy_stats_cb)(void *stats); +}; + +/** + * struct pdev_cp_stats - defines cp stats at pdev object + * @pdev_obj: pointer to pdev + * @pdev_stats: pointer to ic/mc specific stats + * @pdev_comp_priv_obj[]: component's private object pointers + * @pdev_cp_stats_lock: lock to protect object + */ +struct pdev_cp_stats { + struct wlan_objmgr_pdev *pdev_obj; + pdev_ext_cp_stats_t *pdev_stats; + void *pdev_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t pdev_cp_stats_lock; +}; + +/** + * struct vdev_cp_stats - defines cp stats at vdev object + * @vdev_obj: pointer to vdev + * @vdev_stats: pointer to ic/mc specific stats + * @vdev_comp_priv_obj[]: component's private object pointers + * @vdev_cp_stats_lock: lock to protect object + */ +struct vdev_cp_stats { + struct wlan_objmgr_vdev *vdev_obj; + vdev_ext_cp_stats_t *vdev_stats; + void *vdev_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t vdev_cp_stats_lock; +}; + +/** + * struct peer_cp_stats - defines cp stats at peer object + * @peer_obj: pointer to peer + * @peer_stats: pointer to ic/mc specific stats + * @peer_comp_priv_obj[]: component's private object pointers + * @peer_cp_stats_lock: lock to protect object + */ +struct peer_cp_stats { + struct wlan_objmgr_peer *peer_obj; + peer_ext_cp_stats_t *peer_stats; + void *peer_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t peer_cp_stats_lock; +}; + +/** + * struct cp_stats_context - defines cp stats global context object + * @csc_lock: lock to protect object + * @psoc_obj: pointer to psoc + * @psoc_cs: pointer to cp stats at psoc + * @cp_stats_ctx_init: callback pointer to init cp stats global ctx + * @cp_stats_ctx_deinit: callback pointer to deinit cp stats global ctx + * @cp_stats_psoc_obj_init:callback pointer to init cp stats obj on psoc create + * @cp_stats_psoc_obj_deinit:callback pointer to deinit cp stats obj on psoc + * destroy + * @cp_stats_pdev_obj_init:callback pointer to init cp stats obj on pdev create + * @cp_stats_pdev_obj_deinit:callback pointer to deinit cp stats obj on pdev + * destroy + * @cp_stats_vdev_obj_init:callback pointer to init cp stats obj on vdev create + * @cp_stats_vdev_obj_deinit:callback pointer to deinit cp stats obj on vdev + * destroy + * @cp_stats_peer_obj_init:callback pointer to init cp stats obj on peer create + * @cp_stats_peer_obj_deinit:callback pointer to deinit cp stats obj on peer + * destroy + * @cp_stats_comp_obj_config:callback pointer to attach/detach other umac comp + * @cp_stats_open: callback pointer for cp stats on psoc open + * @cp_stats_close: callback pointer for cp stats on psoc close + * @cp_stats_enable: callback pointer for cp stats on psoc enable + * @cp_stats_disable: callback pointer for cp stats on psoc disable + */ +struct cp_stats_context { + qdf_spinlock_t csc_lock; + struct wlan_objmgr_psoc *psoc_obj; + struct psoc_cp_stats *psoc_cs; + QDF_STATUS (*cp_stats_ctx_init)(struct cp_stats_context *ctx); + QDF_STATUS (*cp_stats_ctx_deinit)(struct cp_stats_context *ctx); + QDF_STATUS (*cp_stats_psoc_obj_init)(struct psoc_cp_stats *psoc_cs); + QDF_STATUS (*cp_stats_psoc_obj_deinit)(struct psoc_cp_stats *psoc_cs); + QDF_STATUS (*cp_stats_pdev_obj_init)(struct pdev_cp_stats *pdev_cs); + QDF_STATUS (*cp_stats_pdev_obj_deinit)(struct pdev_cp_stats *pdev_cs); + QDF_STATUS (*cp_stats_vdev_obj_init)(struct vdev_cp_stats *vdev_cs); + QDF_STATUS (*cp_stats_vdev_obj_deinit)(struct vdev_cp_stats *vdev_cs); + QDF_STATUS (*cp_stats_peer_obj_init)(struct peer_cp_stats *peer_cs); + QDF_STATUS (*cp_stats_peer_obj_deinit)(struct peer_cp_stats *peer_cs); + QDF_STATUS (*cp_stats_comp_obj_config)( + enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_cp_stats_comp_id comp_id, + void *cmn_obj, + void *data); + QDF_STATUS (*cp_stats_open)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_close)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_enable)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_disable)(struct wlan_objmgr_psoc *psoc); +}; + +/** + * wlan_cp_stats_psoc_obj_lock() - private API to acquire spinlock at psoc + * @psoc: pointer to psoc cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_psoc_obj_lock(struct psoc_cp_stats *psoc) +{ + qdf_spin_lock_bh(&psoc->psoc_cp_stats_lock); +} + +/** + * wlan_cp_stats_psoc_obj_unlock() - private API to release spinlock at psoc + * @psoc: pointer to psoc cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_psoc_obj_unlock(struct psoc_cp_stats *psoc) +{ + qdf_spin_unlock_bh(&psoc->psoc_cp_stats_lock); +} + +/** + * wlan_cp_stats_pdev_obj_lock() - private API to acquire spinlock at pdev + * @pdev: pointer to pdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_pdev_obj_lock(struct pdev_cp_stats *pdev) +{ + qdf_spin_lock_bh(&pdev->pdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_pdev_obj_unlock() - private api to release spinlock at pdev + * @pdev: pointer to pdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_pdev_obj_unlock(struct pdev_cp_stats *pdev) +{ + qdf_spin_unlock_bh(&pdev->pdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_vdev_obj_lock() - private api to acquire spinlock at vdev + * @vdev: pointer to vdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_vdev_obj_lock(struct vdev_cp_stats *vdev) +{ + qdf_spin_lock_bh(&vdev->vdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_vdev_obj_unlock() - private api to release spinlock at vdev + * @vdev: pointer to vdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_vdev_obj_unlock(struct vdev_cp_stats *vdev) +{ + qdf_spin_unlock_bh(&vdev->vdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_peer_obj_lock() - private api to acquire spinlock at peer + * @peer: pointer to peer cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_peer_obj_lock(struct peer_cp_stats *peer) +{ + qdf_spin_lock_bh(&peer->peer_cp_stats_lock); +} + +/** + * wlan_cp_stats_peer_obj_unlock() - private api to release spinlock at peer + * @peer: pointer to peer cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_peer_obj_unlock(struct peer_cp_stats *peer) +{ + qdf_spin_unlock_bh(&peer->peer_cp_stats_lock); +} + +/** + * wlan_cp_stats_get_psoc_stats_obj() - API to get psoc_cp_stats from psoc + * @psoc: Reference to psoc global object + * + * This API used to get psoc specific cp_stats object from global psoc + * reference. + * + * Return : Reference to psoc_cp_stats object on success or NULL on failure + */ +static inline +struct psoc_cp_stats *wlan_cp_stats_get_psoc_stats_obj(struct wlan_objmgr_psoc + *psoc) +{ + struct cp_stats_context *csc; + + if (!psoc) + return NULL; + + csc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CP_STATS); + + if (!csc) + return NULL; + + return csc->psoc_cs; +} + +/** + * wlan_cp_stats_get_pdev_stats_obj() - API to get pdev_cp_stats from pdev + * @pdev: Reference to pdev global object + * + * This API used to get pdev specific cp_stats object from global pdev + * reference. + * + * Return : Reference to pdev_cp_stats object on success or NULL on failure + */ +static inline +struct pdev_cp_stats *wlan_cp_stats_get_pdev_stats_obj(struct wlan_objmgr_pdev + *pdev) +{ + struct pdev_cp_stats *pdev_cs = NULL; + + if (pdev) { + pdev_cs = wlan_objmgr_pdev_get_comp_private_obj + (pdev, WLAN_UMAC_COMP_CP_STATS); + } + + return pdev_cs; +} + +/** + * wlan_cp_stats_get_vdev_stats_obj() - API to get vdev_cp_stats from vdev + * @vdev : Reference to vdev global object + * + * This API used to get vdev specific cp_stats object from global vdev + * reference. + * + * Return : Reference to vdev_cp_stats object on success or NULL on failure + */ +static inline +struct vdev_cp_stats *wlan_cp_stats_get_vdev_stats_obj(struct wlan_objmgr_vdev + *vdev) +{ + struct vdev_cp_stats *vdev_cs = NULL; + + if (vdev) { + vdev_cs = wlan_objmgr_vdev_get_comp_private_obj + (vdev, WLAN_UMAC_COMP_CP_STATS); + } + + return vdev_cs; +} + +/** + * wlan_cp_stats_get_peer_stats_obj() - API to get peer_cp_stats from peer + * @peer: Reference to peer global object + * + * This API used to get peer specific cp_stats object from global peer + * reference. + * + * Return : Reference to peer_cp_stats object on success or NULL on failure + */ +static inline +struct peer_cp_stats *wlan_cp_stats_get_peer_stats_obj(struct wlan_objmgr_peer + *peer) +{ + struct peer_cp_stats *peer_cs = NULL; + + if (peer) { + peer_cs = wlan_objmgr_peer_get_comp_private_obj + (peer, WLAN_UMAC_COMP_CP_STATS); + } + + return peer_cs; +} + +/** + * wlan_cp_stats_get_pdev_from_vdev() - API to get pdev_cp_stats obj from vdev + * @vdev: Reference to vdev global object + * + * This API used to get pdev specific cp_stats object from global vdev + * reference. + * + * Return: Reference to pdev_cp_stats object on success or NULL on failure + */ +static inline +struct pdev_cp_stats *wlan_cp_stats_get_pdev_from_vdev(struct wlan_objmgr_vdev + *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct pdev_cp_stats *pdev_cs = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + if (pdev) { + pdev_cs = wlan_objmgr_pdev_get_comp_private_obj + (pdev, WLAN_UMAC_COMP_CP_STATS); + } + + return pdev_cs; +} + +/** + * wlan_cp_stats_ctx_get_from_pdev() - API to get cp_stats ctx obj from pdev + * @pdev: Reference to pdev global object + * + * This API used to get cp_stats context object from global pdev reference. + * + * Return: Reference to cp_stats_context object on success or NULL on failure + */ +static inline +struct cp_stats_context *wlan_cp_stats_ctx_get_from_pdev(struct wlan_objmgr_pdev + *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct cp_stats_context *csc = NULL; + + if (!pdev) + return NULL; + + psoc = wlan_pdev_get_psoc(pdev); + if (psoc) { + csc = wlan_objmgr_psoc_get_comp_private_obj + (psoc, WLAN_UMAC_COMP_CP_STATS); + } + return csc; +} + +/** + * wlan_cp_stats_ctx_get_from_vdev() - API to get cp_stats ctx obj from vdev + * @vdev: Reference to vdev global object + * + * This API used to get cp_stats context object from global vdev reference. + * + * Return: Reference to cp_stats_context object on success or NULL on failure + */ +static inline +struct cp_stats_context *wlan_cp_stats_ctx_get_from_vdev(struct wlan_objmgr_vdev + *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + if (!vdev) + return NULL; + + pdev = wlan_vdev_get_pdev(vdev); + return wlan_cp_stats_ctx_get_from_pdev(pdev); +} + +/** + * wlan_cp_stats_ctx_get_from_peer() - API to get cp_stats ctx object from peer + * @peer: Reference to peer object + * + * This API used to get cp_stats context object from global peer reference. + * + * Return: Reference to cp_stats_context object on success or NULL on failure + */ +static inline +struct cp_stats_context *wlan_cp_stats_ctx_get_from_peer(struct wlan_objmgr_peer + *peer) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = wlan_peer_get_vdev(peer); + return wlan_cp_stats_ctx_get_from_vdev(vdev); +} + +/** + * wlan_cp_stats_get_comp_id() - API to get cp_stats component id from umac + * component id + * @comp_id: umac comp id + * + * Return: wlan_cp_stats_comp_id + */ +static inline enum wlan_cp_stats_comp_id +wlan_cp_stats_get_comp_id(enum wlan_umac_comp_id comp_id) +{ + enum wlan_cp_stats_comp_id cp_stats_comp_id = + WLAN_CP_STATS_MAX_COMPONENTS; + + if (comp_id == WLAN_UMAC_COMP_ATF) + cp_stats_comp_id = WLAN_CP_STATS_ATF; + + return cp_stats_comp_id; +} + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.c new file mode 100644 index 0000000000000000000000000000000000000000..50e442f26efbbe896298175577246e08a320dea1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.c @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * Doc: wlan_cp_stats_om_handler.c + * + * This file provide definitions to APIs invoked on receiving common object + * repective create/destroy event notifications, which further + * (de)allocate cp specific objects and (de)attach to specific + * common object + */ +#include "wlan_cp_stats_obj_mgr_handler.h" +#include "wlan_cp_stats_defs.h" +#include "wlan_cp_stats_ol_api.h" +#include +#include "wlan_cp_stats_utils_api.h" + +QDF_STATUS +wlan_cp_stats_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + WLAN_DEV_TYPE dev_type; + struct cp_stats_context *csc = NULL; + struct psoc_cp_stats *psoc_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!psoc) { + cp_stats_err("PSOC is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + csc = qdf_mem_malloc(sizeof(*csc)); + if (!csc) { + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + csc->psoc_obj = psoc; + dev_type = wlan_objmgr_psoc_get_dev_type(csc->psoc_obj); + if (dev_type == WLAN_DEV_INVALID) { + cp_stats_err("Failed to init cp stats ctx, bad device type"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } else if (WLAN_DEV_DA == dev_type) { + csc->cp_stats_ctx_init = wlan_cp_stats_ctx_init_da; + csc->cp_stats_ctx_deinit = wlan_cp_stats_ctx_deinit_da; + } else if (WLAN_DEV_OL == dev_type) { + csc->cp_stats_ctx_init = wlan_cp_stats_ctx_init_ol; + csc->cp_stats_ctx_deinit = wlan_cp_stats_ctx_deinit_ol; + } + + if (QDF_STATUS_SUCCESS != csc->cp_stats_ctx_init(csc)) { + cp_stats_err("Failed to init global ctx call back handlers"); + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + psoc_cs = qdf_mem_malloc(sizeof(*psoc_cs)); + if (!psoc_cs) { + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + psoc_cs->psoc_obj = psoc; + csc->psoc_cs = psoc_cs; + if (csc->cp_stats_psoc_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_psoc_obj_init(psoc_cs)) { + cp_stats_err("Failed to initialize psoc handlers"); + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + } + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_CP_STATS, + csc, + QDF_STATUS_SUCCESS); + +wlan_cp_stats_psoc_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_psoc_obj_deinit && psoc_cs) + csc->cp_stats_psoc_obj_deinit(psoc_cs); + + if (csc->psoc_cs) { + qdf_mem_free(csc->psoc_cs); + csc->psoc_cs = NULL; + } + + if (csc->cp_stats_ctx_deinit) + csc->cp_stats_ctx_deinit(csc); + + qdf_mem_free(csc); + csc = NULL; + } + return status; + } + + cp_stats_debug("cp stats context attach at psoc"); + return status; +} + +QDF_STATUS +wlan_cp_stats_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is NULL"); + return QDF_STATUS_E_NOMEM; + } + csc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CP_STATS); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_CP_STATS, csc); + if (csc->cp_stats_psoc_obj_deinit) + csc->cp_stats_psoc_obj_deinit(csc->psoc_cs); + qdf_mem_free(csc->psoc_cs); + if (csc->cp_stats_ctx_deinit) + csc->cp_stats_ctx_deinit(csc); + qdf_mem_free(csc); + + cp_stats_debug("cp stats context dettached at psoc"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct cp_stats_context *csc = NULL; + struct pdev_cp_stats *pdev_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!pdev) { + cp_stats_err("PDEV is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + + pdev_cs = qdf_mem_malloc(sizeof(*pdev_cs)); + if (!pdev_cs) { + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + csc = wlan_cp_stats_ctx_get_from_pdev(pdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + pdev_cs->pdev_obj = pdev; + if (csc->cp_stats_pdev_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_pdev_obj_init(pdev_cs)) { + cp_stats_err("Failed to initialize pdev handlers"); + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + } + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_CP_STATS, + pdev_cs, + QDF_STATUS_SUCCESS); + + cp_stats_debug("pdev cp stats object attached"); +wlan_cp_stats_pdev_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_pdev_obj_deinit) + csc->cp_stats_pdev_obj_deinit(pdev_cs); + } + + if (pdev_cs) + qdf_mem_free(pdev_cs); + } + + return status; +} + +QDF_STATUS +wlan_cp_stats_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct pdev_cp_stats *pdev_cs; + struct cp_stats_context *csc; + + if (!pdev) { + cp_stats_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_cs = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CP_STATS); + if (!pdev_cs) { + cp_stats_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + csc = wlan_cp_stats_ctx_get_from_pdev(pdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + if (csc->cp_stats_pdev_obj_deinit) + csc->cp_stats_pdev_obj_deinit(pdev_cs); + + wlan_objmgr_pdev_component_obj_detach(pdev, WLAN_UMAC_COMP_CP_STATS, + pdev_cs); + + qdf_mem_free(pdev_cs); + cp_stats_debug("pdev cp stats object dettached"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_vdev_obj_create_handler(struct wlan_objmgr_vdev *vdev, void *arg) +{ + struct cp_stats_context *csc = NULL; + struct vdev_cp_stats *vdev_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!vdev) { + cp_stats_err("vdev is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + + vdev_cs = qdf_mem_malloc(sizeof(*vdev_cs)); + if (!vdev_cs) { + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + csc = wlan_cp_stats_ctx_get_from_vdev(vdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + vdev_cs->vdev_obj = vdev; + if (csc->cp_stats_vdev_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_vdev_obj_init(vdev_cs)) { + cp_stats_err("Failed to initialize vdev handlers"); + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + } + + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_CP_STATS, + vdev_cs, + QDF_STATUS_SUCCESS); + +wlan_cp_stats_vdev_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_vdev_obj_deinit) + csc->cp_stats_vdev_obj_deinit(vdev_cs); + } + + if (vdev_cs) + qdf_mem_free(vdev_cs); + } + + cp_stats_debug("vdev cp stats object attach"); + return status; +} + +QDF_STATUS +wlan_cp_stats_vdev_obj_destroy_handler(struct wlan_objmgr_vdev *vdev, void *arg) +{ + struct vdev_cp_stats *vdev_cs; + struct cp_stats_context *csc; + + if (!vdev) { + cp_stats_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev_cs = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_CP_STATS); + if (!vdev_cs) { + cp_stats_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + csc = wlan_cp_stats_ctx_get_from_vdev(vdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + if (csc->cp_stats_vdev_obj_deinit) + csc->cp_stats_vdev_obj_deinit(vdev_cs); + + wlan_objmgr_vdev_component_obj_detach(vdev, WLAN_UMAC_COMP_CP_STATS, + vdev_cs); + + qdf_mem_free(vdev_cs); + cp_stats_debug("vdev cp stats object dettach"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_peer_obj_create_handler(struct wlan_objmgr_peer *peer, void *arg) +{ + struct cp_stats_context *csc = NULL; + struct peer_cp_stats *peer_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!peer) { + cp_stats_err("peer is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_peer_obj_create_handler_return; + } + + peer_cs = qdf_mem_malloc(sizeof(*peer_cs)); + if (!peer_cs) { + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_peer_obj_create_handler_return; + } + csc = wlan_cp_stats_ctx_get_from_peer(peer); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_peer_obj_create_handler_return; + } + peer_cs->peer_obj = peer; + if (csc->cp_stats_peer_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_peer_obj_init(peer_cs)) { + cp_stats_err("Failed to initialize peer handlers"); + goto wlan_cp_stats_peer_obj_create_handler_return; + } + } + + status = wlan_objmgr_peer_component_obj_attach(peer, + WLAN_UMAC_COMP_CP_STATS, + peer_cs, + QDF_STATUS_SUCCESS); + +wlan_cp_stats_peer_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_peer_obj_deinit) + csc->cp_stats_peer_obj_deinit(peer_cs); + } + + if (peer_cs) + qdf_mem_free(peer_cs); + } + + cp_stats_debug("peer cp stats object attach"); + return status; +} + +QDF_STATUS +wlan_cp_stats_peer_obj_destroy_handler(struct wlan_objmgr_peer *peer, void *arg) +{ + struct peer_cp_stats *peer_cs; + struct cp_stats_context *csc; + + if (!peer) { + cp_stats_err("peer is NULL"); + return QDF_STATUS_E_INVAL; + } + + peer_cs = wlan_objmgr_peer_get_comp_private_obj(peer, + WLAN_UMAC_COMP_CP_STATS); + if (!peer_cs) { + cp_stats_err("peer is NULL"); + return QDF_STATUS_E_INVAL; + } + csc = wlan_cp_stats_ctx_get_from_peer(peer); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + if (csc->cp_stats_peer_obj_deinit) + csc->cp_stats_peer_obj_deinit(peer_cs); + + wlan_objmgr_peer_component_obj_detach(peer, WLAN_UMAC_COMP_CP_STATS, + peer_cs); + + qdf_mem_free(peer_cs); + cp_stats_debug("peer cp stats object dettached"); + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..0f60f94b940e250461a07592102be1f56d01b2a3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_obj_mgr_handler.h + * + * This header file provide declarations for APIs to handle events from object + * manager for registered events from wlan_cp_stats_init() + */ + +#ifndef __WLAN_CP_STATS_OBJ_MGR_HANDLER_H__ +#define __WLAN_CP_STATS_OBJ_MGR_HANDLER_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#include +#include +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS_DA +#include "wlan_cp_stats_da_api.h" +#else +#include "wlan_cp_stats_defs.h" +#endif + +/** + * wlan_cp_stats_psoc_obj_create_handler() - psoc create notification handler + * callback function + * @psoc: pointer to psoc object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_obj_create_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * wlan_cp_stats_psoc_obj_destroy_handler() - psoc destroy notification handler + * callback function + * @psoc: pointer to psoc object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_obj_destroy_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * wlan_cp_stats_pdev_obj_create_handler() - Pdev create notification handler + * callback function + * @pdev: pointer to pdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_obj_create_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * wlan_cp_stats_pdev_obj_destroy_handler() - Pdev destroy notification handler + * callback function + * @pdev: pointer to pdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_obj_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * wlan_cp_stats_vdev_obj_create_handler() - vdev create notification handler + * callback function + * @vdev: pointer to vdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_obj_create_handler( + struct wlan_objmgr_vdev *vdev, void *data); + +/** + * wlan_cp_stats_vdev_obj_destroy_handler() - vdev destroy notification handler + * callback function + * @vdev: pointer to vdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_obj_destroy_handler( + struct wlan_objmgr_vdev *vdev, void *data); + +/** + * wlan_cp_stats_peer_obj_create_handler() - peer create notification handler + * callback function + * @peer: pointer to peer object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_obj_create_handler( + struct wlan_objmgr_peer *peer, void *data); + +/** + * wlan_cp_stats_peer_obj_destroy_handler() - peer destroy notification handler + * callback function + * @peer: pointer to peer object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_obj_destroy_handler( + struct wlan_objmgr_peer *peer, void *data); + +#ifndef QCA_SUPPORT_CP_STATS_DA +static inline +QDF_STATUS wlan_cp_stats_ctx_init_da(struct cp_stats_context *csc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS wlan_cp_stats_ctx_deinit_da(struct cp_stats_context *csc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_OBJ_MGR_HANDLER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.c new file mode 100644 index 0000000000000000000000000000000000000000..1f038310a5505d5279cadc1793b69766703fc99a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ol_api.c + * + * This file provide definitions for following + * - (de)init cp stat global ctx obj + * - (de)init common specific ucfg handler + * - (de)register to WMI events for psoc open + */ +#include +#include "wlan_cp_stats_defs.h" +#include "wlan_cp_stats_ol_api.h" +#include "wlan_cp_stats_cmn_api_i.h" +#include +#include +#include + +QDF_STATUS wlan_cp_stats_psoc_obj_init_ol(struct psoc_cp_stats *psoc_cs) +{ + qdf_spinlock_create(&psoc_cs->psoc_cp_stats_lock); + wlan_cp_stats_psoc_cs_init(psoc_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_psoc_obj_deinit_ol(struct psoc_cp_stats *psoc_cs) +{ + wlan_cp_stats_psoc_cs_deinit(psoc_cs); + qdf_spinlock_destroy(&psoc_cs->psoc_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_obj_init_ol(struct pdev_cp_stats *pdev_cs) +{ + qdf_spinlock_create(&pdev_cs->pdev_cp_stats_lock); + wlan_cp_stats_pdev_cs_init(pdev_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_obj_deinit_ol(struct pdev_cp_stats *pdev_cs) +{ + wlan_cp_stats_pdev_cs_deinit(pdev_cs); + qdf_spinlock_destroy(&pdev_cs->pdev_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_obj_init_ol(struct vdev_cp_stats *vdev_cs) +{ + qdf_spinlock_create(&vdev_cs->vdev_cp_stats_lock); + wlan_cp_stats_vdev_cs_init(vdev_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_obj_deinit_ol(struct vdev_cp_stats *vdev_cs) +{ + wlan_cp_stats_vdev_cs_deinit(vdev_cs); + qdf_spinlock_destroy(&vdev_cs->vdev_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_obj_init_ol(struct peer_cp_stats *peer_cs) +{ + qdf_spinlock_create(&peer_cs->peer_cp_stats_lock); + wlan_cp_stats_peer_cs_init(peer_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_obj_deinit_ol(struct peer_cp_stats *peer_cs) +{ + wlan_cp_stats_peer_cs_deinit(peer_cs); + qdf_spinlock_destroy(&peer_cs->peer_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_open_ol(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_close_ol(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_enable_ol(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops) { + cp_stats_err("tx_ops is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!tx_ops->cp_stats_attach) { + cp_stats_err("cp_stats_attach function ptr is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + tx_ops->cp_stats_attach(psoc); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_disable_ol(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops) { + cp_stats_err("tx_ops is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!tx_ops->cp_stats_detach) { + cp_stats_err("cp_stats_detach function ptr is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + tx_ops->cp_stats_detach(psoc); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_ctx_init_ol(struct cp_stats_context *csc) +{ + csc->cp_stats_open = wlan_cp_stats_open_ol; + csc->cp_stats_close = wlan_cp_stats_close_ol; + csc->cp_stats_enable = wlan_cp_stats_enable_ol; + csc->cp_stats_disable = wlan_cp_stats_disable_ol; + csc->cp_stats_psoc_obj_init = wlan_cp_stats_psoc_obj_init_ol; + csc->cp_stats_psoc_obj_deinit = wlan_cp_stats_psoc_obj_deinit_ol; + csc->cp_stats_pdev_obj_init = wlan_cp_stats_pdev_obj_init_ol; + csc->cp_stats_pdev_obj_deinit = wlan_cp_stats_pdev_obj_deinit_ol; + csc->cp_stats_vdev_obj_init = wlan_cp_stats_vdev_obj_init_ol; + csc->cp_stats_vdev_obj_deinit = wlan_cp_stats_vdev_obj_deinit_ol; + csc->cp_stats_peer_obj_init = wlan_cp_stats_peer_obj_init_ol; + csc->cp_stats_peer_obj_deinit = wlan_cp_stats_peer_obj_deinit_ol; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_ctx_deinit_ol(struct cp_stats_context *csc) +{ + csc->cp_stats_open = NULL; + csc->cp_stats_close = NULL; + csc->cp_stats_enable = NULL; + csc->cp_stats_disable = NULL; + csc->cp_stats_psoc_obj_init = NULL; + csc->cp_stats_psoc_obj_deinit = NULL; + csc->cp_stats_pdev_obj_init = NULL; + csc->cp_stats_pdev_obj_deinit = NULL; + csc->cp_stats_vdev_obj_init = NULL; + csc->cp_stats_vdev_obj_deinit = NULL; + csc->cp_stats_peer_obj_init = NULL; + csc->cp_stats_peer_obj_deinit = NULL; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1b97c956a0172534670c81eb70a42174e2431955 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ol_api.h + * + * This header file provide API declarations required for cp stats global + * context specific to offload + */ + +#ifndef __WLAN_CP_STATS_OL_API_H__ +#define __WLAN_CP_STATS_OL_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include "wlan_cp_stats_defs.h" +#include "wlan_cp_stats_ol_api.h" + +/** + * wlan_cp_stats_psoc_obj_init_ol() - private API to init psoc cp stats obj + * @psoc_cs: pointer to psoc cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_psoc_obj_init_ol(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_psoc_obj_deinit_ol() - private API to deinit psoc cp stats obj + * @psoc_cs: pointer to psoc cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_psoc_obj_deinit_ol(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_pdev_obj_init_ol() - private API to init pdev cp stats obj + * @pdev_cs: pointer to pdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_pdev_obj_init_ol(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_pdev_obj_deinit_ol() - private API to deinit pdev cp stats obj + * @pdev_cs: pointer to pdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_pdev_obj_deinit_ol(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_vdev_obj_init_ol() - private API to init vdev cp stats obj + * @vdev_cs: pointer to vdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_vdev_obj_init_ol(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_vdev_obj_deinit_ol() - private API to deinit vdev cp stats obj + * @vdev_cs: pointer to vdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_vdev_obj_deinit_ol(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_peer_obj_init_ol() - private API to init peer cp stats obj + * @peer_cs: pointer to peer cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_peer_obj_init_ol(struct peer_cp_stats *peer_cs); + +/** + * wlan_cp_stats_peer_obj_deinit_ol() - private API to deinit peer cp stats obj + * @peer_cs: pointer to peer cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_peer_obj_deinit_ol(struct peer_cp_stats *peer_cs); + +/** + * wlan_cp_stats_open_ol() - private API for psoc open + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_open_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_close_ol() - private API for psoc close + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_close_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_enable_ol() - private API for psoc enable + * @psoc: pointer to psoc enable + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_enable_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_disable_ol() - private API for psoc disable + * @psoc: pointer to psoc enable + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_disable_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_ctx_init_ol() - private API to initialize cp stat global ctx + * @csc: pointer to cp stats global context object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_ctx_init_ol(struct cp_stats_context *csc); + +/** + * wlan_cp_stats_ctx_deinit_ol() - private API to deinit cp stat global ctx + * @csc: pointer to cp stats global context object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_ctx_deinit_ol(struct cp_stats_context *csc); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_OL_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_api.h new file mode 100644 index 0000000000000000000000000000000000000000..734e898036073a9748a78bf79f2523c4d1f93bcf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_api.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_chan_info_api.h + * + * This file provide structure definitions for ACS related control plane stats + */ + +#ifndef __WLAN_CP_STATS_CHAN_INFO_API_H__ +#define __WLAN_CP_STATS_CHAN_INFO_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#endif /* QCA_SUPPORT_CP_STATS*/ +#endif /* __WLAN_CP_STATS_CHAN_INFO_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..48ac52ceb852d8d30d6ad476431762528f3f036f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_defs.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_chan_info_defs.h + * + * This file provide structure definitions for ACS related control plane stats + */ +#ifndef __WLAN_CP_STATS_CHAN_INFO_DEFS_H__ +#define __WLAN_CP_STATS_CHAN_INFO_DEFS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_CHAN_INFO_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..d06ad955d4f2734ea8efca5852acc8445c7276ab --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_defs.h @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_defs.h + * + * This file provide definition for structure/enums/defines related to control + * path stats componenet + */ + +#ifndef __WLAN_CP_STATS_MC_DEFS_H__ +#define __WLAN_CP_STATS_MC_DEFS_H__ + +#ifdef CONFIG_MCL + +#include "wlan_cmn.h" +#include "qdf_event.h" + +#define MAX_NUM_CHAINS 2 + +#define MAX_MIB_STATS 1 + +#define IS_MSB_SET(__num) ((__num) & BIT(31)) +#define IS_LSB_SET(__num) ((__num) & BIT(0)) + +#define VDEV_ALL 0xFF + +/** + * enum stats_req_type - enum indicating bit position of various stats type in + * request map + * @TYPE_CONNECTION_TX_POWER: tx power was requested + * @TYPE_STATION_STATS: station stats was requested + * @TYPE_PEER_STATS: peer stats was requested + * @TYPE_MIB_STATS: MIB stats was requested + */ +enum stats_req_type { + TYPE_CONNECTION_TX_POWER = 0, + TYPE_STATION_STATS, + TYPE_PEER_STATS, + TYPE_MIB_STATS, + TYPE_MAX, +}; + +/** + * enum tx_rate_info - tx rate flags + * @TX_RATE_LEGACY: Legacy rates + * @TX_RATE_HT20: HT20 rates + * @TX_RATE_HT40: HT40 rates + * @TX_RATE_SGI: Rate with Short guard interval + * @TX_RATE_LGI: Rate with Long guard interval + * @TX_RATE_VHT20: VHT 20 rates + * @TX_RATE_VHT40: VHT 40 rates + * @TX_RATE_VHT80: VHT 80 rates + * @TX_RATE_HE20: HE 20 rates + * @TX_RATE_HE40: HE 40 rates + * @TX_RATE_HE80: HE 80 rates + * @TX_RATE_HE160: HE 160 rates + * @TX_RATE_VHT160: VHT 160 rates + */ +enum tx_rate_info { + TX_RATE_LEGACY = 0x1, + TX_RATE_HT20 = 0x2, + TX_RATE_HT40 = 0x4, + TX_RATE_SGI = 0x8, + TX_RATE_LGI = 0x10, + TX_RATE_VHT20 = 0x20, + TX_RATE_VHT40 = 0x40, + TX_RATE_VHT80 = 0x80, + TX_RATE_HE20 = 0x100, + TX_RATE_HE40 = 0x200, + TX_RATE_HE80 = 0x400, + TX_RATE_HE160 = 0x800, + TX_RATE_VHT160 = 0x1000, +}; + +/** + * enum - txrate_gi + * @txrate_gi_0_8_US: guard interval 0.8 us + * @txrate_gi_0_4_US: guard interval 0.4 us for legacy + * @txrate_gi_1_6_US: guard interval 1.6 us + * @txrate_gi_3_2_US: guard interval 3.2 us + */ +enum txrate_gi { + TXRATE_GI_0_8_US = 0, + TXRATE_GI_0_4_US, + TXRATE_GI_1_6_US, + TXRATE_GI_3_2_US, +}; + +/** + * struct wake_lock_stats - wake lock stats structure + * @ucast_wake_up_count: Unicast wakeup count + * @bcast_wake_up_count: Broadcast wakeup count + * @ipv4_mcast_wake_up_count: ipv4 multicast wakeup count + * @ipv6_mcast_wake_up_count: ipv6 multicast wakeup count + * @ipv6_mcast_ra_stats: ipv6 multicast ra stats + * @ipv6_mcast_ns_stats: ipv6 multicast ns stats + * @ipv6_mcast_na_stats: ipv6 multicast na stats + * @icmpv4_count: ipv4 icmp packet count + * @icmpv6_count: ipv6 icmp packet count + * @rssi_breach_wake_up_count: rssi breach wakeup count + * @low_rssi_wake_up_count: low rssi wakeup count + * @gscan_wake_up_count: gscan wakeup count + * @pno_complete_wake_up_count: pno complete wakeup count + * @pno_match_wake_up_count: pno match wakeup count + * @oem_response_wake_up_count: oem response wakeup count + * @pwr_save_fail_detected: pwr save fail detected wakeup count + * @scan_11d 11d scan wakeup count + * @mgmt_assoc: association request management frame + * @mgmt_disassoc: disassociation management frame + * @mgmt_assoc_resp: association response management frame + * @mgmt_reassoc: reassociate request management frame + * @mgmt_reassoc_resp: reassociate response management frame + * @mgmt_auth: authentication managament frame + * @mgmt_deauth: deauthentication management frame + * @mgmt_action: action managament frame + */ +struct wake_lock_stats { + uint32_t ucast_wake_up_count; + uint32_t bcast_wake_up_count; + uint32_t ipv4_mcast_wake_up_count; + uint32_t ipv6_mcast_wake_up_count; + uint32_t ipv6_mcast_ra_stats; + uint32_t ipv6_mcast_ns_stats; + uint32_t ipv6_mcast_na_stats; + uint32_t icmpv4_count; + uint32_t icmpv6_count; + uint32_t rssi_breach_wake_up_count; + uint32_t low_rssi_wake_up_count; + uint32_t gscan_wake_up_count; + uint32_t pno_complete_wake_up_count; + uint32_t pno_match_wake_up_count; + uint32_t oem_response_wake_up_count; + uint32_t pwr_save_fail_detected; + uint32_t scan_11d; + uint32_t mgmt_assoc; + uint32_t mgmt_disassoc; + uint32_t mgmt_assoc_resp; + uint32_t mgmt_reassoc; + uint32_t mgmt_reassoc_resp; + uint32_t mgmt_auth; + uint32_t mgmt_deauth; + uint32_t mgmt_action; +}; + +struct stats_event; + +/** + * struct request_info: details of each request + * @cookie: identifier for os_if request + * @u: unified data type for callback to process tx power/peer rssi/ + * station stats/mib stats request when response comes. + * @vdev_id: vdev_id of request + * @pdev_id: pdev_id of request + * @peer_mac_addr: peer mac address + */ +struct request_info { + void *cookie; + union { + void (*get_tx_power_cb)(int tx_power, void *cookie); + void (*get_peer_rssi_cb)(struct stats_event *ev, void *cookie); + void (*get_station_stats_cb)(struct stats_event *ev, + void *cookie); + void (*get_mib_stats_cb)(struct stats_event *ev, + void *cookie); + } u; + uint32_t vdev_id; + uint32_t pdev_id; + uint8_t peer_mac_addr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct pending_stats_requests: details of pending requests + * @type_map: map indicating type of outstanding requests + * @req: array of info for outstanding request of each type + */ +struct pending_stats_requests { + uint32_t type_map; + struct request_info req[TYPE_MAX]; +}; + +/** + * struct cca_stats - cca stats + * @congestion: the congestion percentage = (busy_time/total_time)*100 + * for the interval from when the vdev was started to the current time + * (or the time at which the vdev was stopped). + */ +struct cca_stats { + uint32_t congestion; +}; + +/** + * struct psoc_mc_cp_stats: psoc specific stats + * @is_cp_stats_suspended: is cp stats suspended or not + * @pending: details of pending requests + * @wow_unspecified_wake_up_count: number of non-wow related wake ups + * @wow_stats: wake_lock stats for vdev + */ +struct psoc_mc_cp_stats { + bool is_cp_stats_suspended; + struct pending_stats_requests pending; + uint32_t wow_unspecified_wake_up_count; + struct wake_lock_stats wow_stats; +}; + +/** + * struct pdev_mc_cp_stats: pdev specific stats + * @max_pwr: max tx power for vdev + */ +struct pdev_mc_cp_stats { + int32_t max_pwr; +}; + +/** + * struct summary_stats - summary stats + * @snr: snr of vdev + * @rssi: rssi of vdev + * @retry_cnt: retry count + * @multiple_retry_cnt: multiple_retry_cnt + * @tx_frm_cnt: num of tx frames + * @rx_frm_cnt: num of rx frames + * @frm_dup_cnt: duplicate frame count + * @fail_cnt: fail count + * @rts_fail_cnt: rts fail count + * @ack_fail_cnt: ack fail count + * @rts_succ_cnt: rts success count + * @rx_discard_cnt: rx frames discarded + * @rx_error_cnt: rx frames with error + */ +struct summary_stats { + uint32_t snr; + int8_t rssi; + uint32_t retry_cnt[4]; + uint32_t multiple_retry_cnt[4]; + uint32_t tx_frm_cnt[4]; + uint32_t rx_frm_cnt; + uint32_t frm_dup_cnt; + uint32_t fail_cnt[4]; + uint32_t rts_fail_cnt; + uint32_t ack_fail_cnt; + uint32_t rts_succ_cnt; + uint32_t rx_discard_cnt; + uint32_t rx_error_cnt; +}; + +/** + * struct vdev_mc_cp_stats - vdev specific stats + * @cca: cca stats + * @tx_rate_flags: tx rate flags (enum tx_rate_info) + * @chain_rssi: chain rssi + * @vdev_summary_stats: vdev's summary stats + */ +struct vdev_mc_cp_stats { + struct cca_stats cca; + uint32_t tx_rate_flags; + int8_t chain_rssi[MAX_NUM_CHAINS]; + struct summary_stats vdev_summary_stats; +}; + +/** + * struct peer_extd_stats - Peer extension statistics + * @peer_macaddr: peer MAC address + * @rx_duration: lower 32 bits of rx duration in microseconds + * @peer_tx_bytes: Total TX bytes (including dot11 header) sent to peer + * @peer_rx_bytes: Total RX bytes (including dot11 header) received from peer + * @last_tx_rate_code: last TX ratecode + * @last_tx_power: TX power used by peer - units are 0.5 dBm + * @rx_mc_bc_cnt: Total number of received multicast & broadcast data frames + * corresponding to this peer, 1 in the MSB of rx_mc_bc_cnt represents a + * valid data + */ +struct peer_extd_stats { + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t rx_duration; + uint32_t peer_tx_bytes; + uint32_t peer_rx_bytes; + uint32_t last_tx_rate_code; + int32_t last_tx_power; + uint32_t rx_mc_bc_cnt; +}; + +/** + * struct peer_mc_cp_stats - peer specific stats + * @tx_rate: tx rate + * @rx_rate: rx rate + * @peer_rssi: rssi + * @peer_macaddr: mac address + * @extd_stats: Pointer to peer extended stats + * @adv_stats: Pointer to peer adv (extd2) stats + */ +struct peer_mc_cp_stats { + uint32_t tx_rate; + uint32_t rx_rate; + int8_t peer_rssi; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + struct peer_extd_stats *extd_stats; + struct peer_adv_mc_cp_stats *adv_stats; +}; + +/** + * struct peer_adv_mc_cp_stats - peer specific adv stats + * @peer_macaddr: mac address + * @fcs_count: fcs count + * @rx_bytes: rx bytes + * @rx_count: rx count + */ +struct peer_adv_mc_cp_stats { + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t fcs_count; + uint32_t rx_count; + uint64_t rx_bytes; +}; + +#ifdef WLAN_FEATURE_MIB_STATS +/** + * struct dot11_counters - mib group containing attributes that are MAC counters + * @tx_frags: successfully transmitted fragments + * @group_tx_frames: transmitted group addressed frames + * @failed_cnt: MSDUs not transmitted successfully + * @rx_frags: fragments successfully received + * @group_rx_frames: group addressed frames received + * @fcs_error_cnt: FCS errors detected + * @tx_frames: frames successfully transmitted + */ +struct dot11_counters { + uint32_t tx_frags; + uint32_t group_tx_frames; + uint32_t failed_cnt; + uint32_t rx_frags; + uint32_t group_rx_frames; + uint32_t fcs_error_cnt; + uint32_t tx_frames; +}; + +/** + * struct dot11_mac_statistics - mib stats information on the operation of MAC + * @retry_cnt: retries done by mac for successful transmition + * @multi_retry_cnt: multiple retries done before successful transmition + * @frame_dup_cnt: duplicate no of frames + * @rts_success_cnt: number of CTS received (in response to RTS) + * @rts_fail_cnt: number of CTS not received (in response to RTS) + * @tx_ack_fail_cnt: number of ACK not received + */ +struct dot11_mac_statistics { + uint32_t retry_cnt; + uint32_t multi_retry_cnt; + uint32_t frame_dup_cnt; + uint32_t rts_success_cnt; + uint32_t rts_fail_cnt; + uint32_t tx_ack_fail_cnt; +}; + +/** + * dot11_qos_counters - qos mac counters + * @qos_tx_frag_cnt: transmitted QoS fragments + * @qos_failed_cnt: failed Qos fragments + * @qos_retry_cnt: Qos frames transmitted after retransmissions + * @qos_multi_retry_cnt: Qos frames transmitted after more than + * one retransmissions + * @qos_frame_dup_cnt: duplicate frames + * @qos_rts_success_cnt: number of CTS received (in response to RTS) + * @qos_rts_fail_cnt: number of CTS not received (in response to RTS) + * @tx_qos_ack_fail_cnt_up: number of ACK not received + * (in response to Qos frame) + * @qos_rx_frag_cnt: number of received MPDU of type Data + * @qos_tx_frame_cnt: number of transmitted MPDU of type Data + * @qos_discarded_frame_cnt: total Discarded MSDUs + * @qos_mpdu_rx_cnt: total received MPDU + * @qos_retries_rx_cnt: received MPDU with retry bit equal to 1 + */ +struct dot11_qos_counters { + uint32_t qos_tx_frag_cnt; + uint32_t qos_failed_cnt; + uint32_t qos_retry_cnt; + uint32_t qos_multi_retry_cnt; + uint32_t qos_frame_dup_cnt; + uint32_t qos_rts_success_cnt; + uint32_t qos_rts_fail_cnt; + uint32_t tx_qos_ack_fail_cnt_up; + uint32_t qos_rx_frag_cnt; + uint32_t qos_tx_frame_cnt; + uint32_t qos_discarded_frame_cnt; + uint32_t qos_mpdu_rx_cnt; + uint32_t qos_retries_rx_cnt; +}; + +/** + * dot11_rsna_stats - mib rsn stats + * @rm_ccmp_replays: received robust management CCMP MPDUs discarded + * by the replay mechanism + * @tkip_icv_err: TKIP ICV errors encountered + * @tkip_replays: TKIP replay errors detected + * @ccmp_decrypt_err: MPDUs discarded by the CCMP decryption algorithm + * @ccmp_replays: received CCMP MPDUs discarded by the replay mechanism + * @cmac_icv_err: MPDUs discarded by the CMAC integrity check algorithm + * @cmac_replays: MPDUs discarded by the CMAC replay errors + */ +struct dot11_rsna_stats { + uint32_t rm_ccmp_replays; + uint32_t tkip_icv_err; + uint32_t tkip_replays; + uint32_t ccmp_decrypt_err; + uint32_t ccmp_replays; + uint32_t cmac_icv_err; + uint32_t cmac_replays; +}; + +/** + * dot11_counters_group3 - dot11 group3 stats + * @tx_ampdu_cnt: transmitted AMPDUs + * @tx_mpdus_in_ampdu_cnt: number of MPDUs in the A-MPDU in transmitted AMPDUs + * @tx_octets_in_ampdu_cnt: octets in the transmitted A-MPDUs + * @ampdu_rx_cnt: received A-MPDU + * @mpdu_in_rx_ampdu_cnt: MPDUs received in the A-MPDU + * @rx_octets_in_ampdu_cnt: octets in the received A-MPDU + * @rx_ampdu_deli_crc_err_cnt: number of MPDUs delimiter with CRC error + */ +struct dot11_counters_group3 { + uint32_t tx_ampdu_cnt; + uint32_t tx_mpdus_in_ampdu_cnt; + uint64_t tx_octets_in_ampdu_cnt; + uint32_t ampdu_rx_cnt; + uint32_t mpdu_in_rx_ampdu_cnt; + uint64_t rx_octets_in_ampdu_cnt; + uint32_t rx_ampdu_deli_crc_err_cnt; +}; + +/** + * mib_stats_metrics - mib stats counters + * @mib_counters: dot11Counters group + * @mib_mac_statistics: dot11MACStatistics group + * @mib_qos_counters: dot11QoSCounters group + * @mib_rsna_stats: dot11RSNAStats group + * @mib_counters_group3: dot11CountersGroup3 group + */ +struct mib_stats_metrics { + struct dot11_counters mib_counters; + struct dot11_mac_statistics mib_mac_statistics; + struct dot11_qos_counters mib_qos_counters; + struct dot11_rsna_stats mib_rsna_stats; + struct dot11_counters_group3 mib_counters_group3; +}; +#endif + +/** + * struct congestion_stats_event: congestion stats event param + * @vdev_id: vdev_id of the event + * @congestion: the congestion percentage + */ +struct congestion_stats_event { + uint8_t vdev_id; + uint32_t congestion; +}; + +/** + * struct summary_stats_event - summary_stats event param + * @vdev_id: vdev_id of the event + * @stats: summary stats + */ +struct summary_stats_event { + uint8_t vdev_id; + struct summary_stats stats; +}; + +/** + * struct chain_rssi_event - chain_rssi event param + * @vdev_id: vdev_id of the event + * @chain_rssi: chain_rssi + */ +struct chain_rssi_event { + uint8_t vdev_id; + int8_t chain_rssi[MAX_NUM_CHAINS]; +}; + +/** + * struct stats_event - parameters populated by stats event + * @num_pdev_stats: num pdev stats + * @pdev_stats: if populated array indicating pdev stats (index = pdev_id) + * @num_peer_stats: num peer stats + * @peer_stats: if populated array indicating peer stats + * @peer_adv_stats: if populated, indicates peer adv (extd2) stats + * @num_peer_adv_stats: number of peer adv (extd2) stats + * @num_peer_extd_stats: Num peer extended stats + * @peer_extended_stats: Peer extended stats + * @cca_stats: if populated indicates congestion stats + * @num_summary_stats: number of summary stats + * @vdev_summary_stats: if populated indicates array of summary stats per vdev + * @num_mib_stats: number of mib stats + * @mib_stats: if populated indicates array of mib stats per vdev + * @num_chain_rssi_stats: number of chain rssi stats + * @vdev_chain_rssi: if populated indicates array of chain rssi per vdev + * @tx_rate: tx rate (kbps) + * @tx_rate_flags: tx rate flags, (enum tx_rate_info) + * @last_event: The LSB indicates if the event is the last event or not and the + * MSB indicates if this feature is supported by FW or not. + */ +struct stats_event { + uint32_t num_pdev_stats; + struct pdev_mc_cp_stats *pdev_stats; + uint32_t num_peer_stats; + struct peer_mc_cp_stats *peer_stats; + uint32_t num_peer_adv_stats; + struct peer_adv_mc_cp_stats *peer_adv_stats; + uint32_t num_peer_extd_stats; + struct peer_extd_stats *peer_extended_stats; + struct congestion_stats_event *cca_stats; + uint32_t num_summary_stats; + struct summary_stats_event *vdev_summary_stats; +#ifdef WLAN_FEATURE_MIB_STATS + uint32_t num_mib_stats; + struct mib_stats_metrics *mib_stats; +#endif + uint32_t num_chain_rssi_stats; + struct chain_rssi_event *vdev_chain_rssi; + uint32_t tx_rate; + uint32_t rx_rate; + enum tx_rate_info tx_rate_flags; + uint32_t last_event; +}; + +#endif /* CONFIG_MCL */ +#endif /* __WLAN_CP_STATS_MC_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5ee653bfb4bc15cbbd731ce35c48a95e7c3c8fb2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_tgt_api.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_tgt_api.h + * + * This header file provide with API declarations to interface with Southbound + */ +#ifndef __WLAN_CP_STATS_MC_TGT_API_H__ +#define __WLAN_CP_STATS_MC_TGT_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_mc_defs.h" + +/** + * tgt_mc_cp_stats_process_stats_event(): API to process stats event + * @psoc: pointer to psoc object + * @event: event parameters + * + * Return: status of operation + */ +QDF_STATUS tgt_mc_cp_stats_process_stats_event(struct wlan_objmgr_psoc *psoc, + struct stats_event *event); + +/** + * tgt_send_mc_cp_stats_req(): API to send stats request to lmac + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS tgt_send_mc_cp_stats_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req); + +/** + * tgt_mc_cp_stats_inc_wake_lock_stats() : API to increment wake lock stats + * given the wake reason code + * @psoc: pointer to psoc object + * @reason: wake reason + * @stats: vdev wow stats to update + * @unspecified_wake_count: unspecified wake count to update + * + * Return : status of operation + */ +QDF_STATUS tgt_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint32_t reason, struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_MC_TGT_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9420168ce09de80d35dc132ef51129cb3d3f1156 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_ucfg_api.h @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_ucfg_api.h + * + * This header file maintain API declaration required for northbound interaction + */ + +#ifndef __WLAN_CP_STATS_MC_UCFG_API_H__ +#define __WLAN_CP_STATS_MC_UCFG_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#include +#include +#include + +struct psoc_cp_stats; +struct vdev_cp_stats; + +/** + * ucfg_mc_cp_stats_get_psoc_wake_lock_stats() : API to get wake lock stats from + * psoc + * @psoc: pointer to psoc object + * @stats: stats object to populate + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_psoc_wake_lock_stats( + struct wlan_objmgr_psoc *psoc, + struct wake_lock_stats *stats); + +/** + * ucfg_mc_cp_stats_get_vdev_wake_lock_stats() : API to get wake lock stats from + * vdev + * @vdev: pointer to vdev object + * @stats: stats object to populate + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_vdev_wake_lock_stats( + struct wlan_objmgr_vdev *vdev, + struct wake_lock_stats *stats); + +/** + * ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol() : API to increment wake + * lock stats given the protocol of the packet that was received. + * @psoc: pointer to psoc object + * @vdev_id: vdev_id for which the packet was received + * @protocol: protocol of the packet that was received + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum qdf_proto_subtype protocol); + +/** + * ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol() : API to increment wake + * lock stats given destnation of packet that was received. + * @psoc: pointer to psoc object + * @dest_mac: destinamtion mac address of packet that was received + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_dst_addr( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *dest_mac); + +/** + * ucfg_mc_cp_stats_inc_wake_lock_stats() : API to increment wake lock stats + * given wake reason. + * @psoc: pointer to psoc object + * @vdev_id: vdev_id on with WOW was received + * @reason: reason of WOW + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + uint32_t reason); + +/** + * ucfg_mc_cp_stats_write_wow_stats() - Writes WOW stats to buffer + * @psoc: pointer to psoc object + * @buffer: The char buffer to write to + * @max_len: The maximum number of chars to write + * @ret: number of bytes written + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_write_wow_stats( + struct wlan_objmgr_psoc *psoc, + char *buffer, uint16_t max_len, int *ret); + +/** + * ucfg_mc_cp_stats_send_tx_power_request() - API to send tx_power request to + * lmac + * @vdev: pointer to vdev object + * @type: request type + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_send_stats_request(struct wlan_objmgr_vdev *vdev, + enum stats_req_type type, + struct request_info *info); + +/** + * ucfg_mc_cp_stats_get_tx_power() - API to fetch tx_power + * @vdev: pointer to vdev object + * @dbm: pointer to tx power in dbm + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm); + +/** + * ucfg_mc_cp_stats_is_req_pending() - API to tell if given request is pending + * @psoc: pointer to psoc object + * @type: request type to check + * + * Return: true of request is pending, false otherwise + */ +bool ucfg_mc_cp_stats_is_req_pending(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type); + +/** + * ucfg_mc_cp_stats_set_pending_req() - API to set pending request + * @psoc: pointer to psoc object + * @type: request to update + * @req: value to update + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_set_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req); + +/** + * ucfg_mc_cp_stats_reset_pending_req() - API to reset pending request + * @psoc: pointer to psoc object + * @type: request to update + * @last_req: last request + * @pending: pending request present + * + * The function is an atomic operation of "reset" and "get" last request. + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_reset_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *last_req, + bool *pending); + +/** + * ucfg_mc_cp_stats_get_pending_req() - API to get pending request + * @psoc: pointer to psoc object + * @type: request to update + * @info: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *info); + +/** + * ucfg_mc_cp_stats_free_stats_resources() - API to free buffers within stats_event + * structure + * @ev: strcture whose buffer are to freed + * + * Return: none + */ +void ucfg_mc_cp_stats_free_stats_resources(struct stats_event *ev); + +/** + * ucfg_mc_cp_stats_cca_stats_get() - API to fetch cca stats + * @vdev: pointer to vdev object + * @cca_stats: pointer to cca info + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_cca_stats_get(struct wlan_objmgr_vdev *vdev, + struct cca_stats *cca_stats); + +/** + * ucfg_mc_cp_stats_set_rate_flags() - API to set rate flags + * @vdev: pointer to vdev object + * @flags: value to set (enum tx_rate_info) + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_set_rate_flags(struct wlan_objmgr_vdev *vdev, + enum tx_rate_info flags); + +/** + * ucfg_mc_cp_stats_register_lost_link_info_cb() - API to register lost link + * info callback + * @psoc: pointer to psoc object + * @lost_link_cp_stats_info_cb: Lost link info callback to be registered + * + */ +void ucfg_mc_cp_stats_register_lost_link_info_cb( + struct wlan_objmgr_psoc *psoc, + void (*lost_link_cp_stats_info_cb)(void *stats_ev)); + +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +/** + * ucfg_mc_cp_stats_register_pmo_handler() - API to register pmo handler + * + * Return: none + */ +void ucfg_mc_cp_stats_register_pmo_handler(void); +#else +void static inline ucfg_mc_cp_stats_register_pmo_handler(void) { }; +#endif /* WLAN_POWER_MANAGEMENT_OFFLOAD */ +#else +void static inline ucfg_mc_cp_stats_register_pmo_handler(void) { }; +static inline QDF_STATUS ucfg_mc_cp_stats_send_stats_request( + struct wlan_objmgr_vdev *vdev, + enum stats_req_type type, + struct request_info *info) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS ucfg_mc_cp_stats_set_rate_flags( + struct wlan_objmgr_vdev *vdev, + enum tx_rate_info flags) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS ucfg_mc_cp_stats_get_psoc_wake_lock_stats( + struct wlan_objmgr_psoc *psoc, + struct wake_lock_stats *stats) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum qdf_proto_subtype protocol) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + uint32_t reason) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_dst_addr( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *dest_mac) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS ucfg_mc_cp_stats_get_vdev_wake_lock_stats( + struct wlan_objmgr_vdev *vdev, + struct wake_lock_stats *stats) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_MC_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..94929afff981e06770fa9bc1e20c1c133ead6bdb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_tgt_api.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_tgt_api.h + * + * This header file provide with API declarations to interface with Southbound + */ +#ifndef __WLAN_CP_STATS_TGT_API_H__ +#define __WLAN_CP_STATS_TGT_API_H__ +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS +/** + * tgt_cp_stats_register_rx_ops(): API to register rx ops with lmac + * @rx_ops: rx ops struct + * + * Return: none + */ +void tgt_cp_stats_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); +#else +static inline void tgt_cp_stats_register_rx_ops( + struct wlan_lmac_if_rx_ops *rx_ops) {} +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_TGT_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e09eee385047abac46a3ed3ead440a169b374c95 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ucfg_api.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ucfg_api.h + * + * This header file maintain API declaration required for northbound interaction + */ + +#ifndef __WLAN_CP_STATS_UCFG_API_H__ +#define __WLAN_CP_STATS_UCFG_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_cmn_api_i.h" + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5c87de8e473818017927e7d6f665c2196f71ff62 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_utils_api.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_utils_api.h + * + * This header file provide declaration to public APIs exposed for other UMAC + * components to init/deinit, (de)register to required WMI events on + * soc enable/disable + */ + +#ifndef __WLAN_CP_STATS_UTILS_API_H__ +#define __WLAN_CP_STATS_UTILS_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include + +#define cp_stats_debug(args ...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_CP_STATS, ## args) +#define cp_stats_err(args ...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_CP_STATS, ## args) +#define cp_stats_nofl_debug(args ...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_CP_STATS, ## args) +/** + * enum wlan_cp_stats_cfg_state - State of Object configuration to + * indicate whether object has to be attached/detached in cp stats + * @WLAN_CP_STATS_OBJ_DETACH: Object has to be detached + * @WLAN_CP_STATS_OBJ_ATTACH: Object has to be attached + * @WLAN_CP_STATS_OBJ_INVALID: Object is invalid + */ +enum wlan_cp_stats_cfg_state { + WLAN_CP_STATS_OBJ_DETACH = 0, + WLAN_CP_STATS_OBJ_ATTACH = 1, + WLAN_CP_STATS_OBJ_INVALID +}; + +/** + * enum wlan_cp_stats_comp_id - component id for other umac components + * @WLAN_CP_STATS_ATF: ATF component specific id + * @WLAN_CP_STATS_MAX_COMPONENTS : Max id of cp stats components + */ +enum wlan_cp_stats_comp_id { + WLAN_CP_STATS_ATF = 0, + WLAN_CP_STATS_MAX_COMPONENTS, +}; + +/** + * wlan_cp_stats_init(): API to init stats component + * + * This API is invoked from dispatcher init during all component init. + * This API will register all required handlers for psoc, pdev,vdev + * and peer object create/delete notification. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_init(void); + +/** + * wlan_cp_stats_init(): API to deinit stats component + * + * This API is invoked from dispatcher deinit during all component deinit. + * This API will unregister all required handlers for psoc, pdev,vdev + * and peer object create/delete notification. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_deinit(void); + +/** + * wlan_cp_stats_open(): API to open cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc open. + * This API will initialize psoc level cp stats object. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_close(): API to close cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc close. + * This API will de-initialize psoc level cp stats object. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_close(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_enable(): API to enable cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc enable. + * This API will register cp_stats WMI event handlers. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_disable(): API to disable cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc disable. + * This API will unregister cp_stats WMI event handlers. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_disable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_comp_obj_cfg() - public API to umac for + * attach/detach + * component specific stat obj to cp stats obj + * @obj_type: common object type + * @cfg_state: config state either to attach of detach + * @comp_id: umac component id + * @cmn_obj: pointer to common object + * @comp_priv_obj: pointer to component specific cp stats object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_comp_obj_cfg( + enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_umac_comp_id comp_id, + void *cmn_obj, + void *data); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_UTILS_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_chan_info_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_chan_info_api.c new file mode 100644 index 0000000000000000000000000000000000000000..dc6a1909ebd3c84feab9390b8821a616c4dd695f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_chan_info_api.c @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_chan_info_api.c + * + * This header file declare APIs and defines structure for channel information + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8c9d4726ea90019ac8d70d4eba60fa569042536f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_tgt_api.c @@ -0,0 +1,956 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC:wlan_cp_stats_mc_tgt_api.c + * + * This file provide API definitions to update control plane statistics received + * from southbound interface + */ + +#include "wlan_cp_stats_mc_defs.h" +#include "target_if_cp_stats.h" +#include "wlan_cp_stats_tgt_api.h" +#include "wlan_cp_stats_mc_tgt_api.h" +#include +#include +#include "../../core/src/wlan_cp_stats_defs.h" + +static bool tgt_mc_cp_stats_is_last_event(struct stats_event *ev, + enum stats_req_type stats_type) +{ + bool is_last_event; + + if (IS_MSB_SET(ev->last_event)) { + is_last_event = IS_LSB_SET(ev->last_event); + } else { + if (stats_type == TYPE_CONNECTION_TX_POWER) + is_last_event = true; + else + is_last_event = !!ev->peer_stats; + } + + if (is_last_event) + cp_stats_debug("Last stats event"); + + return is_last_event; +} + +void tgt_cp_stats_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->cp_stats_rx_ops.process_stats_event = + tgt_mc_cp_stats_process_stats_event; +} + +static void tgt_mc_cp_stats_extract_tx_power(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev, + bool is_station_stats) +{ + int32_t max_pwr; + uint8_t pdev_id; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev; + struct request_info last_req = {0}; + struct wlan_objmgr_vdev *vdev = NULL; + struct pdev_mc_cp_stats *pdev_mc_stats; + struct pdev_cp_stats *pdev_cp_stats_priv; + bool pending = false; + + if (!ev->pdev_stats) + return; + + if (is_station_stats) + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, &last_req); + else + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_CONNECTION_TX_POWER, &last_req); + + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + goto end; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req.vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + goto end; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + cp_stats_err("pdev is null"); + goto end; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (pdev_id >= ev->num_pdev_stats) { + cp_stats_err("pdev_id: %d invalid", pdev_id); + goto end; + } + + pdev_cp_stats_priv = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (!pdev_cp_stats_priv) { + cp_stats_err("pdev_cp_stats_priv is null"); + goto end; + } + + wlan_cp_stats_pdev_obj_lock(pdev_cp_stats_priv); + pdev_mc_stats = pdev_cp_stats_priv->pdev_stats; + max_pwr = pdev_mc_stats->max_pwr = ev->pdev_stats[pdev_id].max_pwr; + wlan_cp_stats_pdev_obj_unlock(pdev_cp_stats_priv); + if (is_station_stats) + goto end; + + if (tgt_mc_cp_stats_is_last_event(ev, TYPE_CONNECTION_TX_POWER)) { + ucfg_mc_cp_stats_reset_pending_req(psoc, + TYPE_CONNECTION_TX_POWER, + &last_req, + &pending); + if (last_req.u.get_tx_power_cb && pending) + last_req.u.get_tx_power_cb(max_pwr, last_req.cookie); + } +end: + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void peer_rssi_iterator(struct wlan_objmgr_pdev *pdev, + void *peer, void *arg) +{ + struct stats_event *ev; + struct peer_mc_cp_stats *peer_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + struct peer_extd_stats *peer_extd_mc_stats; + + if (WLAN_PEER_SELF == wlan_peer_get_peer_type(peer)) { + cp_stats_debug("ignore self peer: "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(wlan_peer_get_macaddr(peer))); + return; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + return; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + ev = arg; + ev->peer_stats[ev->num_peer_stats] = *peer_mc_stats; + ev->num_peer_stats++; + + peer_extd_mc_stats = peer_mc_stats->extd_stats; + ev->peer_extended_stats[ev->num_peer_extd_stats] = *peer_extd_mc_stats; + ev->num_peer_extd_stats++; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); +} + +static void +tgt_mc_cp_stats_prepare_raw_peer_rssi(struct wlan_objmgr_psoc *psoc, + struct request_info *last_req) +{ + uint8_t *mac_addr; + uint16_t peer_count; + struct stats_event ev = {0}; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer = NULL; + struct peer_mc_cp_stats *peer_mc_stats; + struct peer_extd_stats *peer_mc_extd_stats; + struct peer_cp_stats *peer_cp_stats_priv; + void (*get_peer_rssi_cb)(struct stats_event *ev, void *cookie); + + get_peer_rssi_cb = last_req->u.get_peer_rssi_cb; + if (!get_peer_rssi_cb) { + cp_stats_err("get_peer_rssi_cb is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req->vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + goto end; + } + + mac_addr = last_req->peer_mac_addr; + if (QDF_IS_ADDR_BROADCAST(mac_addr)) { + pdev = wlan_vdev_get_pdev(vdev); + peer_count = wlan_pdev_get_peer_count(pdev); + ev.peer_stats = qdf_mem_malloc(sizeof(*ev.peer_stats) * + peer_count); + if (!ev.peer_stats) + goto end; + + ev.peer_extended_stats = + qdf_mem_malloc(sizeof(*ev.peer_extended_stats) * + peer_count); + if (!ev.peer_extended_stats) + goto end; + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_PEER_OP, + peer_rssi_iterator, &ev, + true, WLAN_CP_STATS_ID); + } else { + peer = wlan_objmgr_get_peer(psoc, last_req->pdev_id, + mac_addr, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer["QDF_MAC_ADDR_FMT"] is null", + QDF_MAC_ADDR_REF(mac_addr)); + goto end; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + goto end; + } + + ev.peer_stats = qdf_mem_malloc(sizeof(*ev.peer_stats)); + if (!ev.peer_stats) + goto end; + + ev.num_peer_stats = 1; + + ev.peer_extended_stats = + qdf_mem_malloc(sizeof(*ev.peer_extended_stats)); + if (!ev.peer_extended_stats) + goto end; + + ev.num_peer_extd_stats = 1; + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + *ev.peer_stats = *peer_mc_stats; + + peer_mc_extd_stats = peer_mc_stats->extd_stats; + *ev.peer_extended_stats = *peer_mc_extd_stats; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + } + +end: + if (ev.peer_stats) + get_peer_rssi_cb(&ev, last_req->cookie); + + ucfg_mc_cp_stats_free_stats_resources(&ev); + + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); +} + +static QDF_STATUS +tgt_mc_cp_stats_update_peer_adv_stats(struct wlan_objmgr_psoc *psoc, + struct peer_adv_mc_cp_stats + *peer_adv_stats, uint32_t size) +{ + uint8_t *peer_mac_addr; + struct wlan_objmgr_peer *peer; + struct peer_mc_cp_stats *peer_mc_stats; + struct peer_adv_mc_cp_stats *peer_adv_mc_stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct peer_cp_stats *peer_cp_stats_priv; + + if (!peer_adv_stats) + return QDF_STATUS_E_INVAL; + + peer_mac_addr = peer_adv_stats->peer_macaddr; + peer = wlan_objmgr_get_peer_by_mac(psoc, peer_mac_addr, + WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_debug("peer is null"); + return QDF_STATUS_E_EXISTS; + } + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer_cp_stats_priv is null"); + status = QDF_STATUS_E_EXISTS; + goto end; + } + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + peer_adv_mc_stats = peer_mc_stats->adv_stats; + + qdf_mem_copy(peer_adv_mc_stats->peer_macaddr, + peer_adv_stats->peer_macaddr, + QDF_MAC_ADDR_SIZE); + if (peer_adv_stats->fcs_count) + peer_adv_mc_stats->fcs_count = peer_adv_stats->fcs_count; + if (peer_adv_stats->rx_bytes) + peer_adv_mc_stats->rx_bytes = peer_adv_stats->rx_bytes; + if (peer_adv_stats->rx_count) + peer_adv_mc_stats->rx_count = peer_adv_stats->rx_count; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + return status; +} + +static QDF_STATUS +tgt_mc_cp_stats_update_peer_stats(struct wlan_objmgr_psoc *psoc, + struct peer_mc_cp_stats *peer_stats) +{ + uint8_t *peer_mac_addr; + struct wlan_objmgr_peer *peer; + struct peer_mc_cp_stats *peer_mc_stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct peer_cp_stats *peer_cp_stats_priv; + + if (!peer_stats) + return QDF_STATUS_E_INVAL; + + peer_mac_addr = peer_stats->peer_macaddr; + peer = wlan_objmgr_get_peer_by_mac(psoc, peer_mac_addr, + WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_debug("peer is null"); + return QDF_STATUS_E_EXISTS; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer_cp_stats_priv is null"); + status = QDF_STATUS_E_EXISTS; + goto end; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + qdf_mem_copy(peer_mc_stats->peer_macaddr, + peer_stats->peer_macaddr, + QDF_MAC_ADDR_SIZE); + if (peer_stats->tx_rate) + peer_mc_stats->tx_rate = peer_stats->tx_rate; + if (peer_stats->rx_rate) + peer_mc_stats->rx_rate = peer_stats->rx_rate; + if (peer_stats->peer_rssi) + peer_mc_stats->peer_rssi = peer_stats->peer_rssi; + cp_stats_nofl_debug("PEER STATS: peer_mac="QDF_MAC_ADDR_FMT", tx_rate=%u, rx_rate=%u, peer_rssi=%d", + QDF_MAC_ADDR_REF(peer_mc_stats->peer_macaddr), + peer_mc_stats->tx_rate, + peer_mc_stats->rx_rate, peer_mc_stats->peer_rssi); + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + return status; +} + +static QDF_STATUS +tgt_mc_cp_stats_update_peer_extd_stats( + struct wlan_objmgr_psoc *psoc, + struct peer_extd_stats *peer_extended_stats) +{ + uint8_t *peer_mac_addr; + struct wlan_objmgr_peer *peer; + struct peer_mc_cp_stats *peer_mc_stats; + struct peer_extd_stats *peer_extd_mc_stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct peer_cp_stats *peer_cp_stats_priv; + + if (!peer_extended_stats) + return QDF_STATUS_E_INVAL; + + peer_mac_addr = peer_extended_stats->peer_macaddr; + peer = wlan_objmgr_get_peer_by_mac(psoc, peer_mac_addr, + WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_debug("peer is null"); + return QDF_STATUS_E_EXISTS; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer_cp_stats_priv is null"); + status = QDF_STATUS_E_EXISTS; + goto end; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + peer_extd_mc_stats = peer_mc_stats->extd_stats; + if (!peer_extd_mc_stats) { + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + cp_stats_err("No peer_extd_mc_stats"); + status = QDF_STATUS_E_INVAL; + goto end; + } + qdf_mem_copy(peer_extd_mc_stats->peer_macaddr, + peer_extended_stats->peer_macaddr, + QDF_MAC_ADDR_SIZE); + if (peer_extended_stats->rx_mc_bc_cnt) + peer_extd_mc_stats->rx_mc_bc_cnt = + peer_extended_stats->rx_mc_bc_cnt; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + + cp_stats_debug("peer_mac="QDF_MAC_ADDR_FMT", rx_mc_bc_cnt=%u", + QDF_MAC_ADDR_REF(peer_extended_stats->peer_macaddr), + peer_extended_stats->rx_mc_bc_cnt); + +end: + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + return status; +} + +static void tgt_mc_cp_stats_extract_peer_extd_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + uint32_t i, selected; + QDF_STATUS status; + struct request_info last_req = {0}; + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_PEER_STATS, + &last_req); + + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + selected = ev->num_peer_extd_stats; + for (i = 0; i < ev->num_peer_extd_stats; i++) { + status = tgt_mc_cp_stats_update_peer_extd_stats( + psoc, + &ev->peer_extended_stats[i]); + + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + !qdf_mem_cmp(ev->peer_extended_stats[i].peer_macaddr, + last_req.peer_mac_addr, + QDF_MAC_ADDR_SIZE)) { + /* mac is specified, but failed to update the peer */ + if (QDF_IS_STATUS_ERROR(status)) + return; + + selected = i; + } + } + + /* no matched peer */ + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + selected == ev->num_peer_extd_stats) { + cp_stats_err("peer not found stats"); + return; + } +} + +static void tgt_mc_cp_stats_extract_peer_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev, + bool is_station_stats) +{ + uint32_t i; + QDF_STATUS status; + struct request_info last_req = {0}; + bool pending = false; + uint32_t selected; + + if (is_station_stats) + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + else + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_PEER_STATS, + &last_req); + + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + if (!ev->peer_stats) + goto extd2_stats; + + selected = ev->num_peer_stats; + for (i = 0; i < ev->num_peer_stats; i++) { + status = tgt_mc_cp_stats_update_peer_stats(psoc, + &ev->peer_stats[i]); + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + !qdf_mem_cmp(ev->peer_stats[i].peer_macaddr, + last_req.peer_mac_addr, + QDF_MAC_ADDR_SIZE)) { + /* mac is specified, but failed to update the peer */ + if (QDF_IS_STATUS_ERROR(status)) + return; + + selected = i; + } + } + + /* no matched peer */ + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + selected == ev->num_peer_stats) { + cp_stats_err("peer not found for stats"); + } + +extd2_stats: + + if (!ev->peer_adv_stats) + goto complete; + + selected = ev->num_peer_adv_stats; + for (i = 0; i < ev->num_peer_adv_stats; i++) { + status = tgt_mc_cp_stats_update_peer_adv_stats( + psoc, &ev->peer_adv_stats[i], + ev->num_peer_adv_stats); + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + !qdf_mem_cmp(ev->peer_adv_stats[i].peer_macaddr, + last_req.peer_mac_addr, + QDF_MAC_ADDR_SIZE)) { + /* mac is specified, but failed to update the peer */ + if (QDF_IS_STATUS_ERROR(status)) + return; + + selected = i; + } + } + + /* no matched peer */ + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + selected == ev->num_peer_adv_stats) { + cp_stats_debug("peer not found for extd stats"); + return; + } + +complete: + if (is_station_stats) + return; + + tgt_mc_cp_stats_extract_peer_extd_stats(psoc, ev); + if (tgt_mc_cp_stats_is_last_event(ev, TYPE_PEER_STATS)) { + ucfg_mc_cp_stats_reset_pending_req(psoc, TYPE_PEER_STATS, + &last_req, &pending); + if (pending && last_req.u.get_peer_rssi_cb) + tgt_mc_cp_stats_prepare_raw_peer_rssi(psoc, &last_req); + } +} + +#ifdef WLAN_FEATURE_MIB_STATS +static void tgt_mc_cp_stats_extract_mib_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + QDF_STATUS status; + struct request_info last_req = {0}; + bool pending = false; + + if (!ev->mib_stats) { + cp_stats_debug("no mib stats"); + return; + } + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_MIB_STATS, &last_req); + + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + if (tgt_mc_cp_stats_is_last_event(ev, TYPE_MIB_STATS)) { + ucfg_mc_cp_stats_reset_pending_req(psoc, TYPE_MIB_STATS, + &last_req, &pending); + if (last_req.u.get_mib_stats_cb && pending) + last_req.u.get_mib_stats_cb(ev, last_req.cookie); + } +} +#else +static void tgt_mc_cp_stats_extract_mib_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ +} +#endif + +static void tgt_mc_cp_stats_extract_cca_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + struct wlan_objmgr_vdev *vdev; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + if (!ev->cca_stats) + return; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + ev->cca_stats->vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + return; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + vdev_mc_stats->cca.congestion = ev->cca_stats->congestion; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + +end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void tgt_mc_cp_stats_extract_vdev_summary_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + uint8_t i; + QDF_STATUS status; + struct wlan_objmgr_peer *peer = NULL; + struct request_info last_req = {0}; + struct wlan_objmgr_vdev *vdev; + struct peer_mc_cp_stats *peer_mc_stats; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + struct vdev_cp_stats *vdev_cp_stats_priv; + + if (!ev->vdev_summary_stats) + return; + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + for (i = 0; i < ev->num_summary_stats; i++) { + if (ev->vdev_summary_stats[i].vdev_id == last_req.vdev_id) + break; + } + + if (i == ev->num_summary_stats) { + cp_stats_debug("vdev_id %d not found", last_req.vdev_id); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req.vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + return; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + qdf_mem_copy(&vdev_mc_stats->vdev_summary_stats, + &ev->vdev_summary_stats[i].stats, + sizeof(vdev_mc_stats->vdev_summary_stats)); + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + peer = wlan_objmgr_get_peer(psoc, last_req.pdev_id, + last_req.peer_mac_addr, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_debug("peer is null "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(last_req.peer_mac_addr)); + goto end; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + goto end; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + peer_mc_stats->peer_rssi = ev->vdev_summary_stats[i].stats.rssi; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void tgt_mc_cp_stats_extract_vdev_chain_rssi_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + uint8_t i, j; + QDF_STATUS status; + struct request_info last_req = {0}; + struct wlan_objmgr_vdev *vdev; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + if (!ev->vdev_chain_rssi) + return; + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + for (i = 0; i < ev->num_chain_rssi_stats; i++) { + if (ev->vdev_chain_rssi[i].vdev_id == last_req.vdev_id) + break; + } + + if (i == ev->num_chain_rssi_stats) { + cp_stats_debug("vdev_id %d not found", last_req.vdev_id); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req.vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + return; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + for (j = 0; j < MAX_NUM_CHAINS; j++) { + vdev_mc_stats->chain_rssi[j] = + ev->vdev_chain_rssi[i].chain_rssi[j]; + } + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + +end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void +tgt_mc_cp_stats_prepare_n_send_raw_station_stats(struct wlan_objmgr_psoc *psoc, + struct request_info *last_req) +{ + /* station_stats to be given to userspace thread */ + struct stats_event info = {0}; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct peer_mc_cp_stats *peer_mc_stats; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + struct vdev_cp_stats *vdev_cp_stats_priv; + void (*get_station_stats_cb)(struct stats_event *info, void *cookie); + + get_station_stats_cb = last_req->u.get_station_stats_cb; + if (!get_station_stats_cb) { + cp_stats_err("callback is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req->vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev object is null"); + return; + } + + peer = wlan_objmgr_get_peer(psoc, last_req->pdev_id, + last_req->peer_mac_addr, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer object is null"); + goto end; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + goto end; + } + + info.num_summary_stats = 1; + info.vdev_summary_stats = qdf_mem_malloc( + sizeof(*info.vdev_summary_stats)); + if (!info.vdev_summary_stats) + goto end; + + info.num_chain_rssi_stats = 1; + info.vdev_chain_rssi = qdf_mem_malloc(sizeof(*info.vdev_chain_rssi));; + if (!info.vdev_chain_rssi) + goto end; + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + info.vdev_summary_stats[0].vdev_id = last_req->vdev_id; + info.vdev_summary_stats[0].stats = vdev_mc_stats->vdev_summary_stats; + info.vdev_chain_rssi[0].vdev_id = last_req->vdev_id; + qdf_mem_copy(info.vdev_chain_rssi[0].chain_rssi, + vdev_mc_stats->chain_rssi, + sizeof(vdev_mc_stats->chain_rssi)); + info.tx_rate_flags = vdev_mc_stats->tx_rate_flags; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + info.peer_adv_stats = qdf_mem_malloc(sizeof(*info.peer_adv_stats)); + if (!info.peer_adv_stats) + goto end; + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + /* + * The linkspeed returned by fw is in kbps so convert + * it in units of 100kbps which is expected by UMAC + */ + info.tx_rate = peer_mc_stats->tx_rate / 100; + info.rx_rate = peer_mc_stats->rx_rate / 100; + + if (peer_mc_stats->adv_stats) { + info.num_peer_adv_stats = 1; + qdf_mem_copy(info.peer_adv_stats, + peer_mc_stats->adv_stats, + sizeof(peer_mc_stats->adv_stats)); + } + + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (info.vdev_summary_stats && info.vdev_chain_rssi) + get_station_stats_cb(&info, last_req->cookie); + + ucfg_mc_cp_stats_free_stats_resources(&info); + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void tgt_mc_cp_stats_extract_station_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + QDF_STATUS status; + struct request_info last_req = {0}; + bool pending = false; + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + tgt_mc_cp_stats_extract_tx_power(psoc, ev, true); + tgt_mc_cp_stats_extract_peer_stats(psoc, ev, true); + tgt_mc_cp_stats_extract_vdev_summary_stats(psoc, ev); + tgt_mc_cp_stats_extract_vdev_chain_rssi_stats(psoc, ev); + + /* + * PEER stats are the last stats sent for get_station statistics. + * reset type_map bit for station stats . + */ + if (tgt_mc_cp_stats_is_last_event(ev, TYPE_STATION_STATS)) { + ucfg_mc_cp_stats_reset_pending_req(psoc, TYPE_STATION_STATS, + &last_req, + &pending); + if (pending && last_req.u.get_station_stats_cb) + tgt_mc_cp_stats_prepare_n_send_raw_station_stats( + psoc, &last_req); + } +} + +static void tgt_mc_cp_send_lost_link_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (psoc_cp_stats_priv && psoc_cp_stats_priv->legacy_stats_cb) + psoc_cp_stats_priv->legacy_stats_cb(ev); +} + +QDF_STATUS tgt_mc_cp_stats_process_stats_event(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_CONNECTION_TX_POWER)) + tgt_mc_cp_stats_extract_tx_power(psoc, ev, false); + + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_PEER_STATS)) + tgt_mc_cp_stats_extract_peer_stats(psoc, ev, false); + + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_STATION_STATS)) + tgt_mc_cp_stats_extract_station_stats(psoc, ev); + + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_MIB_STATS)) + tgt_mc_cp_stats_extract_mib_stats(psoc, ev); + + tgt_mc_cp_stats_extract_cca_stats(psoc, ev); + + tgt_mc_cp_send_lost_link_stats(psoc, ev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint32_t reason, + struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops) + return QDF_STATUS_E_NULL_VALUE; + + tx_ops->inc_wake_lock_stats(reason, stats, unspecified_wake_count); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_send_mc_cp_stats_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops || !tx_ops->send_req_stats) { + cp_stats_err("could not get tx_ops"); + return QDF_STATUS_E_NULL_VALUE; + } + + return tx_ops->send_req_stats(psoc, type, req); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..44c517de4c300fc777adea8f1454706b6599011d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_ucfg_api.c @@ -0,0 +1,725 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_ucfg_api.c + * + * This file provide API definitions required for northbound interaction + */ + +#include +#include "wlan_cp_stats_mc_defs.h" +#include +#include +#include +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_cmn_api_i.h" +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include +#endif + +QDF_STATUS wlan_cp_stats_psoc_cs_init(struct psoc_cp_stats *psoc_cs) +{ + psoc_cs->obj_stats = qdf_mem_malloc(sizeof(struct psoc_mc_cp_stats)); + if (!psoc_cs->obj_stats) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_psoc_cs_deinit(struct psoc_cp_stats *psoc_cs) +{ + qdf_mem_free(psoc_cs->obj_stats); + psoc_cs->obj_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_cs_init(struct vdev_cp_stats *vdev_cs) +{ + vdev_cs->vdev_stats = qdf_mem_malloc(sizeof(struct vdev_mc_cp_stats)); + if (!vdev_cs->vdev_stats) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_cs_deinit(struct vdev_cp_stats *vdev_cs) +{ + qdf_mem_free(vdev_cs->vdev_stats); + vdev_cs->vdev_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_cs_init(struct pdev_cp_stats *pdev_cs) +{ + pdev_cs->pdev_stats = qdf_mem_malloc(sizeof(struct pdev_mc_cp_stats)); + if (!pdev_cs->pdev_stats) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_cs_deinit(struct pdev_cp_stats *pdev_cs) +{ + qdf_mem_free(pdev_cs->pdev_stats); + pdev_cs->pdev_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_cs_init(struct peer_cp_stats *peer_cs) +{ + struct peer_mc_cp_stats *peer_mc_stats; + + peer_mc_stats = qdf_mem_malloc(sizeof(struct peer_mc_cp_stats)); + if (!peer_mc_stats) + return QDF_STATUS_E_NOMEM; + + peer_mc_stats->adv_stats = + qdf_mem_malloc(sizeof(struct peer_adv_mc_cp_stats)); + + if (!peer_mc_stats->adv_stats) { + qdf_mem_free(peer_mc_stats); + peer_mc_stats = NULL; + return QDF_STATUS_E_NOMEM; + } + + peer_mc_stats->extd_stats = + qdf_mem_malloc(sizeof(struct peer_extd_stats)); + + if (!peer_mc_stats->extd_stats) { + qdf_mem_free(peer_mc_stats->adv_stats); + peer_mc_stats->adv_stats = NULL; + qdf_mem_free(peer_mc_stats); + peer_mc_stats = NULL; + return QDF_STATUS_E_NOMEM; + } + peer_cs->peer_stats = peer_mc_stats; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_cs_deinit(struct peer_cp_stats *peer_cs) +{ + struct peer_mc_cp_stats *peer_mc_stats = peer_cs->peer_stats; + + qdf_mem_free(peer_mc_stats->adv_stats); + peer_mc_stats->adv_stats = NULL; + qdf_mem_free(peer_mc_stats->extd_stats); + peer_mc_stats->extd_stats = NULL; + qdf_mem_free(peer_cs->peer_stats); + peer_cs->peer_stats = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum qdf_proto_subtype protocol) +{ + struct wake_lock_stats *stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + struct psoc_mc_cp_stats *psoc_mc_stats; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + + if (!psoc_mc_stats) { + cp_stats_err("psoc mc stats is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + stats = &psoc_mc_stats->wow_stats; + switch (protocol) { + case QDF_PROTO_ICMP_REQ: + case QDF_PROTO_ICMP_RES: + stats->icmpv4_count++; + break; + case QDF_PROTO_ICMPV6_REQ: + case QDF_PROTO_ICMPV6_RES: + case QDF_PROTO_ICMPV6_RS: + stats->icmpv6_count++; + break; + case QDF_PROTO_ICMPV6_RA: + stats->icmpv6_count++; + stats->ipv6_mcast_ra_stats++; + break; + case QDF_PROTO_ICMPV6_NS: + stats->icmpv6_count++; + stats->ipv6_mcast_ns_stats++; + break; + case QDF_PROTO_ICMPV6_NA: + stats->icmpv6_count++; + stats->ipv6_mcast_na_stats++; + break; + default: + break; + } + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_dst_addr( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *dest_mac) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct wake_lock_stats *stats; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + if (!psoc_mc_stats) { + cp_stats_err("psoc mc stats is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + stats = &psoc_mc_stats->wow_stats; + + switch (*dest_mac) { + case QDF_BCAST_MAC_ADDR: + stats->bcast_wake_up_count++; + break; + case QDF_MCAST_IPV4_MAC_ADDR: + stats->ipv4_mcast_wake_up_count++; + break; + case QDF_MCAST_IPV6_MAC_ADDR: + stats->ipv6_mcast_wake_up_count++; + break; + default: + stats->ucast_wake_up_count++; + break; + } + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + uint32_t reason) +{ + struct wake_lock_stats *stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + + if (!psoc_mc_stats) { + cp_stats_err("psoc mc stats is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + stats = &psoc_mc_stats->wow_stats; + + status = tgt_mc_cp_stats_inc_wake_lock_stats(psoc, reason, stats, + &psoc_mc_stats->wow_unspecified_wake_up_count); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return status; +} + +/** + * vdev_iterator() - iterator function to collect wake_lock_stats from all vdev + * @psoc: pointer to psoc object + * @vdev: pointer to vdev object + * @arg: stats object pointer passed as arg + * + * Return - none + */ +static void vdev_iterator(struct wlan_objmgr_psoc *psoc, void *vdev, void *arg) +{ + struct wake_lock_stats *vdev_stats; + struct wake_lock_stats *stats = arg; + struct psoc_cp_stats *psoc_cp_stats_priv; + struct psoc_mc_cp_stats *psoc_mc_stats; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return; + } + + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + if (!psoc_mc_stats) { + cp_stats_err("psoc mc stats is null"); + return; + } + + vdev_stats = &psoc_mc_stats->wow_stats; + + stats->ucast_wake_up_count += vdev_stats->ucast_wake_up_count; + stats->bcast_wake_up_count += vdev_stats->bcast_wake_up_count; + stats->ipv4_mcast_wake_up_count += vdev_stats->ipv4_mcast_wake_up_count; + stats->ipv6_mcast_wake_up_count += vdev_stats->ipv6_mcast_wake_up_count; + stats->ipv6_mcast_ra_stats += vdev_stats->ipv6_mcast_ra_stats; + stats->ipv6_mcast_ns_stats += vdev_stats->ipv6_mcast_ns_stats; + stats->ipv6_mcast_na_stats += vdev_stats->ipv6_mcast_na_stats; + stats->icmpv4_count += vdev_stats->icmpv4_count; + stats->icmpv6_count += vdev_stats->icmpv6_count; + stats->rssi_breach_wake_up_count += + vdev_stats->rssi_breach_wake_up_count; + stats->low_rssi_wake_up_count += vdev_stats->low_rssi_wake_up_count; + stats->gscan_wake_up_count += vdev_stats->gscan_wake_up_count; + stats->pno_complete_wake_up_count += + vdev_stats->pno_complete_wake_up_count; + stats->pno_match_wake_up_count += vdev_stats->pno_match_wake_up_count; + stats->oem_response_wake_up_count += + vdev_stats->oem_response_wake_up_count; + stats->pwr_save_fail_detected += vdev_stats->pwr_save_fail_detected; + stats->scan_11d += vdev_stats->scan_11d; +} + +QDF_STATUS ucfg_mc_cp_stats_get_psoc_wake_lock_stats( + struct wlan_objmgr_psoc *psoc, + struct wake_lock_stats *stats) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + struct psoc_mc_cp_stats *psoc_mc_stats; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + /* iterate through all vdevs, and get wow stats from vdev_cs object */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, vdev_iterator, + stats, true, WLAN_CP_STATS_ID); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_get_vdev_wake_lock_stats( + struct wlan_objmgr_vdev *vdev, + struct wake_lock_stats *stats) +{ + struct wlan_objmgr_psoc *psoc; + struct psoc_cp_stats *psoc_cp_stats_priv; + struct psoc_mc_cp_stats *psoc_mc_stats; + + wlan_vdev_obj_lock(vdev); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + cp_stats_err("psoc NULL"); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + + if (!psoc_mc_stats) { + cp_stats_err("psoc mc stats is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_mem_copy(stats, &psoc_mc_stats->wow_stats, sizeof(*stats)); + + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_write_wow_stats( + struct wlan_objmgr_psoc *psoc, + char *buffer, uint16_t max_len, int *ret) +{ + QDF_STATUS status; + uint32_t unspecified_wake_count; + struct wake_lock_stats wow_stats = {0}; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* get stats from psoc */ + status = ucfg_mc_cp_stats_get_psoc_wake_lock_stats(psoc, &wow_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to get WoW stats"); + return status; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + unspecified_wake_count = psoc_mc_stats->wow_unspecified_wake_up_count; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + *ret = qdf_scnprintf(buffer, max_len, + "WoW Wake Reasons\n" + "\tunspecified wake count: %u\n" + "\tunicast: %u\n" + "\tbroadcast: %u\n" + "\tIPv4 multicast: %u\n" + "\tIPv6 multicast: %u\n" + "\tIPv6 multicast RA: %u\n" + "\tIPv6 multicast NS: %u\n" + "\tIPv6 multicast NA: %u\n" + "\tICMPv4: %u\n" + "\tICMPv6: %u\n" + "\tRSSI Breach: %u\n" + "\tLow RSSI: %u\n" + "\tG-Scan: %u\n" + "\tPNO Complete: %u\n" + "\tPNO Match: %u\n" + "\tOEM rsp wake_count: %u\n" + "\twake count due to pwr_save_fail_detected: %u\n" + "\twake count due to 11d scan: %u\n", + unspecified_wake_count, + wow_stats.ucast_wake_up_count, + wow_stats.bcast_wake_up_count, + wow_stats.ipv4_mcast_wake_up_count, + wow_stats.ipv6_mcast_wake_up_count, + wow_stats.ipv6_mcast_ra_stats, + wow_stats.ipv6_mcast_ns_stats, + wow_stats.ipv6_mcast_na_stats, + wow_stats.icmpv4_count, + wow_stats.icmpv6_count, + wow_stats.rssi_breach_wake_up_count, + wow_stats.low_rssi_wake_up_count, + wow_stats.gscan_wake_up_count, + wow_stats.pno_complete_wake_up_count, + wow_stats.pno_match_wake_up_count, + wow_stats.oem_response_wake_up_count, + wow_stats.pwr_save_fail_detected, + wow_stats.scan_11d); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_send_stats_request(struct wlan_objmgr_vdev *vdev, + enum stats_req_type type, + struct request_info *info) +{ + QDF_STATUS status; + + status = ucfg_mc_cp_stats_set_pending_req(wlan_vdev_get_psoc(vdev), + type, info); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_set_pending_req pdev failed: %d", + status); + return status; + } + + return tgt_send_mc_cp_stats_req(wlan_vdev_get_psoc(vdev), type, info); +} + +QDF_STATUS ucfg_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm) +{ + struct wlan_objmgr_pdev *pdev; + struct pdev_mc_cp_stats *pdev_mc_stats; + struct pdev_cp_stats *pdev_cp_stats_priv; + + pdev = wlan_vdev_get_pdev(vdev); + pdev_cp_stats_priv = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (!pdev_cp_stats_priv) { + cp_stats_err("pdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_pdev_obj_lock(pdev_cp_stats_priv); + pdev_mc_stats = pdev_cp_stats_priv->pdev_stats; + *dbm = pdev_mc_stats->max_pwr; + wlan_cp_stats_pdev_obj_unlock(pdev_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +bool ucfg_mc_cp_stats_is_req_pending(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type) +{ + uint32_t pending_req_map; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return false; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + pending_req_map = psoc_mc_stats->pending.type_map; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return (pending_req_map & (1 << type)); +} + +QDF_STATUS ucfg_mc_cp_stats_set_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (type >= TYPE_MAX) { + cp_stats_err("Invalid type index: %d", type); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + if (psoc_mc_stats->is_cp_stats_suspended) { + cp_stats_debug("cp stats is suspended try again after resume"); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + return QDF_STATUS_E_AGAIN; + } + psoc_mc_stats->pending.type_map |= (1 << type); + psoc_mc_stats->pending.req[type] = *req; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_reset_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *last_req, + bool *pending) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (type >= TYPE_MAX) { + cp_stats_err("Invalid type index: %d", type); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + if (psoc_mc_stats->pending.type_map & (1 << type)) { + *last_req = psoc_mc_stats->pending.req[type]; + *pending = true; + } else { + *pending = false; + } + psoc_mc_stats->pending.type_map &= ~(1 << type); + qdf_mem_zero(&psoc_mc_stats->pending.req[type], + sizeof(psoc_mc_stats->pending.req[type])); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_get_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *info) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (type >= TYPE_MAX) { + cp_stats_err("Invalid type index: %d", type); + return QDF_STATUS_E_INVAL; + } + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + *info = psoc_mc_stats->pending.req[type]; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +void ucfg_mc_cp_stats_free_stats_resources(struct stats_event *ev) +{ + if (!ev) + return; + + qdf_mem_free(ev->pdev_stats); + qdf_mem_free(ev->peer_adv_stats); + qdf_mem_free(ev->peer_stats); + qdf_mem_free(ev->cca_stats); + qdf_mem_free(ev->vdev_summary_stats); + qdf_mem_free(ev->vdev_chain_rssi); + qdf_mem_free(ev->peer_extended_stats); + qdf_mem_zero(ev, sizeof(*ev)); +} + +QDF_STATUS ucfg_mc_cp_stats_cca_stats_get(struct wlan_objmgr_vdev *vdev, + struct cca_stats *cca_stats) +{ + struct vdev_cp_stats *vdev_cp_stats_priv; + struct vdev_mc_cp_stats *vdev_mc_stats; + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + cca_stats->congestion = vdev_mc_stats->cca.congestion; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_set_rate_flags(struct wlan_objmgr_vdev *vdev, + uint32_t flags) +{ + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + vdev_mc_stats->tx_rate_flags = flags; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +void ucfg_mc_cp_stats_register_lost_link_info_cb( + struct wlan_objmgr_psoc *psoc, + void (*lost_link_cp_stats_info_cb)(void *stats_ev)) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return; + } + + psoc_cp_stats_priv->legacy_stats_cb = lost_link_cp_stats_info_cb; +} + +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +static QDF_STATUS +ucfg_mc_cp_stats_suspend_req_handler(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + psoc_mc_stats->is_cp_stats_suspended = true; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +ucfg_mc_cp_stats_resume_req_handler(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + psoc_mc_stats->is_cp_stats_suspended = false; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +ucfg_mc_cp_stats_resume_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return ucfg_mc_cp_stats_resume_req_handler(psoc); +} + +static QDF_STATUS +ucfg_mc_cp_stats_suspend_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return ucfg_mc_cp_stats_suspend_req_handler(psoc); +} + +void ucfg_mc_cp_stats_register_pmo_handler(void) +{ + pmo_register_suspend_handler(WLAN_UMAC_COMP_CP_STATS, + ucfg_mc_cp_stats_suspend_handler, NULL); + pmo_register_resume_handler(WLAN_UMAC_COMP_CP_STATS, + ucfg_mc_cp_stats_resume_handler, NULL); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6fa1675024acb526ee772e1b42d16095cbf961e3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ucfg_api.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ucfg_api.h + * + * This file provide API definitions required for northbound interaction + */ + +#ifndef __WLAN_CP_STATS_UCFG_API_H__ +#define __WLAN_CP_STATS_UCFG_API_H__ + +#endif /* __WLAN_CP_STATS_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..89f4f4a0052d2097453757ff0b2ec1887ecdc5d5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_utils_api.c @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_utils_api.c + * + * This file provide public API definitions for other accessing other UMAC + * components + */ +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_obj_mgr_handler.h" +#include +#include + +QDF_STATUS wlan_cp_stats_init(void) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_register_psoc_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register psoc create handler"); + goto wlan_cp_stats_psoc_init_fail1; + } + + status = wlan_objmgr_register_psoc_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register psoc destroy handler"); + goto wlan_cp_stats_psoc_init_fail2; + } + + status = wlan_objmgr_register_pdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register pdev create handler"); + goto wlan_cp_stats_pdev_init_fail1; + } + + status = wlan_objmgr_register_pdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register pdev destroy handler"); + goto wlan_cp_stats_pdev_init_fail2; + } + + status = wlan_objmgr_register_vdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register vdev create handler"); + goto wlan_cp_stats_vdev_init_fail1; + } + + status = wlan_objmgr_register_vdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register vdev destroy handler"); + goto wlan_cp_stats_vdev_init_fail2; + } + + status = wlan_objmgr_register_peer_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register peer create handler"); + goto wlan_cp_stats_peer_init_fail1; + } + + status = wlan_objmgr_register_peer_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register peer destroy handler"); + goto wlan_cp_stats_peer_init_fail2; + } + + return QDF_STATUS_SUCCESS; + +wlan_cp_stats_peer_init_fail2: + wlan_objmgr_unregister_peer_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_create_handler, + NULL); +wlan_cp_stats_peer_init_fail1: + wlan_objmgr_unregister_vdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_destroy_handler, + NULL); +wlan_cp_stats_vdev_init_fail2: + wlan_objmgr_unregister_vdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_create_handler, + NULL); +wlan_cp_stats_vdev_init_fail1: + wlan_objmgr_unregister_pdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_destroy_handler, + NULL); +wlan_cp_stats_pdev_init_fail2: + wlan_objmgr_unregister_pdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_create_handler, + NULL); +wlan_cp_stats_pdev_init_fail1: + wlan_objmgr_unregister_psoc_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_destroy_handler, + NULL); +wlan_cp_stats_psoc_init_fail2: + wlan_objmgr_unregister_psoc_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_create_handler, + NULL); +wlan_cp_stats_psoc_init_fail1: + return status; +} + +QDF_STATUS wlan_cp_stats_deinit(void) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_unregister_psoc_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister psoc create handler"); + + status = wlan_objmgr_unregister_psoc_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister psoc destroy handler"); + + status = wlan_objmgr_unregister_pdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister pdev create handler"); + + status = wlan_objmgr_unregister_pdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister pdev destroy handler"); + + status = wlan_objmgr_unregister_vdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister vdev create handler"); + + status = wlan_objmgr_unregister_vdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister vdev destroy handler"); + + status = wlan_objmgr_unregister_peer_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister peer create handler"); + + status = wlan_objmgr_unregister_peer_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister peer destroy handler"); + + return status; +} + +/* DA/OL specific call back initialization */ +QDF_STATUS wlan_cp_stats_open(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (!csc) { + cp_stats_err("cp_stats_context is null!"); + return QDF_STATUS_E_FAILURE; + } + + if (csc->cp_stats_open) + status = csc->cp_stats_open(psoc); + + qdf_spinlock_create(&csc->csc_lock); + return status; +} + +QDF_STATUS wlan_cp_stats_close(struct wlan_objmgr_psoc *psoc) +{ + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (csc && csc->cp_stats_close) { + csc->cp_stats_close(psoc); + qdf_spinlock_destroy(&csc->csc_lock); + } + + return QDF_STATUS_SUCCESS; +} + +/* WMI registrations stage */ +QDF_STATUS wlan_cp_stats_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (!csc) { + cp_stats_err("cp_stats_context is null!"); + return QDF_STATUS_E_FAILURE; + } + + if (csc->cp_stats_enable) + status = csc->cp_stats_enable(psoc); + + return status; +} + +QDF_STATUS wlan_cp_stats_disable(struct wlan_objmgr_psoc *psoc) +{ + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!\n"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (csc && csc->cp_stats_disable) + csc->cp_stats_disable(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_comp_obj_cfg(enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_umac_comp_id comp_id, + void *cmn_obj, void *data) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct cp_stats_context *csc; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + enum wlan_cp_stats_comp_id cp_stats_comp_id; + + if (!cmn_obj) { + cp_stats_err("common object is null!"); + return QDF_STATUS_E_INVAL; + } + + cp_stats_comp_id = wlan_cp_stats_get_comp_id(comp_id); + if (cp_stats_comp_id >= WLAN_CP_STATS_MAX_COMPONENTS) { + cp_stats_err("Invalid UMAC id provided to cp_stats"); + return QDF_STATUS_E_INVAL; + } + + switch (obj_type) { + case WLAN_PSOC_OP: + psoc = (struct wlan_objmgr_psoc *)cmn_obj; + csc = + wlan_objmgr_psoc_get_comp_private_obj + (psoc, WLAN_UMAC_COMP_CP_STATS); + break; + case WLAN_PDEV_OP: + pdev = (struct wlan_objmgr_pdev *)cmn_obj; + csc = wlan_cp_stats_ctx_get_from_pdev(pdev); + break; + case WLAN_VDEV_OP: + vdev = (struct wlan_objmgr_vdev *)cmn_obj; + csc = wlan_cp_stats_ctx_get_from_vdev(vdev); + break; + case WLAN_PEER_OP: + peer = (struct wlan_objmgr_peer *)cmn_obj; + csc = wlan_cp_stats_ctx_get_from_peer(peer); + break; + default: + cp_stats_err("Invalid common object type"); + return QDF_STATUS_E_INVAL; + } + + if (!csc) { + cp_stats_err("cp_stats_context is null!"); + return QDF_STATUS_E_FAILURE; + } + + if (csc->cp_stats_comp_obj_config) + status = csc->cp_stats_comp_obj_config(obj_type, cfg_state, + cp_stats_comp_id, + cmn_obj, data); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs.h new file mode 100644 index 0000000000000000000000000000000000000000..24f5a84c959095c4ba0f6b1ea2e2b23d872448fd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs.h @@ -0,0 +1,2831 @@ +/* + * Copyright (c) 2013, 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2005-2006 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has main dfs structures. + */ + +#ifndef _DFS_H_ +#define _DFS_H_ + +#include /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */ +#include /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */ +#include /* qdf_nbuf_t, etc. */ +#include /* qdf_assert */ +#include /* qdf_spinlock */ +#include +#include +#include /* qdf_str_lcopy */ + +#include +#include "dfs_structs.h" +#include "dfs_channel.h" +#include "dfs_ioctl_private.h" +#include /* For qdf_packed*/ +#include "queue.h" /* For STAILQ_ENTRY */ +#include +#include +#include +#include + +/* File Line and Submodule String */ +#define FLSM(x, str) #str " : " FL(x) +/* Cast to dfs type */ +#define DC(x) ((struct wlan_dfs *)(x)) + +/** + * dfs_log: dfs logging using submodule MASKs and + * QDF trace level. + * The logging is controlled by two bitmasks: + * 1) submodule bitmask: sm + * 2) trace level masks: level + * + * @dfs: The dfs object pointer or NULL if dfs is not defined. + * @sm: Submodule BITMASK. + * @level: QDF trace level. + * @args...: Variable argument list. + * + * The submodule(sm) cannot be empty even if argument dfs is NULL. + * Else the macro will create a compilation error. + * One may provide WLAN_DEBUG_DFS_ALWAYS when the argument dfs is NULL. + * Example:- + * dfs_log(NULL, WLAN_DEBUG_DFS_ALWAYS, QDF_TRACE_LEVEL_INFO,"Error pulse"); + * + * Why DC(x) is required? + * Since NULL is defined as ((void *)(0)), if the argument "dfs" + * in a call to the macro "dfs_log" is NULL + * then during compilation (NULL)->dfs_debug_mask will dereference + * a (void *) type, which is illegal. Therefore, we need + * the cast: (DC(dfs))->dfs_debug_mask. + * Example:- + * dfs_log(NULL, WLAN_DEBUG_DFS, QDF_TRACE_LEVEL_INFO,"dfs is NULL"); + */ +#define dfs_log(dfs, sm, level, args...) do { \ + if (((dfs) == NULL) || \ + ((sm) == WLAN_DEBUG_DFS_ALWAYS) || \ + ((sm) & ((DC(dfs))->dfs_debug_mask))) { \ + QDF_TRACE(QDF_MODULE_ID_DFS, level, ## args); \ + } \ +} while (0) + +#define dfs_logfl(dfs, level, sm, format, args...) \ + dfs_log(dfs, sm, level, FLSM(format, sm), ## args) + +#define dfs_alert(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_FATAL, sm, format, ## args) + +#define dfs_err(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_ERROR, sm, format, ## args) + +#define dfs_warn(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_WARN, sm, format, ## args) + +#define dfs_info(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_INFO, sm, format, ## args) + +#define dfs_debug(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_DEBUG, sm, format, ## args) + +#define DFS_MIN(a, b) ((a) < (b)?(a):(b)) +#define DFS_MAX(a, b) ((a) > (b)?(a) : (b)) +#define DFS_DIFF(a, b)(DFS_MAX(a, b) - DFS_MIN(a, b)) + +/** + * Maximum number of radar events to be processed in a single iteration. + * Allows soft watchdog to run. + */ +#define MAX_EVENTS 100 + +/** + * Constants to use for chirping detection. + * + * All are unconverted as HW reports them. + * + * XXX Are these constants with or without fast clock 5GHz operation? + * XXX Peregrine reports pulses in microseconds, not hardware clocks! + */ + +#define MAX_DUR_FOR_LOW_RSSI 4 + +/** + * Cascade has issue with reported duration especially when there is a + * crossover of chirp from one segment to another. It may report a value + * of duration that is well below 50us for a valid FCC type 5 chirping + * pulse. For now changing minimum duration as a work around. This will + * affect all chips but since we detect chirp with Merlin+, we may be OK + * for now. We need a more robust solution for this. + */ +#define MIN_BIN5_DUR_CAS 25 /* 50 * 1.25*/ +#define MIN_BIN5_DUR_MICROSEC_CAS 20 +#define MIN_BIN5_DUR 63 /* 50 * 1.25*/ +#define MIN_BIN5_DUR_MICROSEC 50 +#define MAYBE_BIN5_DUR 35 /* 28 * 1.25*/ +#define MAYBE_BIN5_DUR_MICROSEC 28 + +/* Conversion is already done using dfs->dur_multiplier */ +#define MAX_BIN5_DUR 145 /* use 145 for osprey */ +#define MAX_BIN5_DUR_MICROSEC 105 + +#define DFS_MARGIN_EQUAL(a, b, margin) ((DFS_DIFF(a, b)) <= margin) +#define DFS_MAX_STAGGERED_BURSTS 3 + +/** + * All filter thresholds in the radar filter tables are effective at a 50% + * channel loading. + */ +#define DFS_CHAN_LOADING_THRESH 50 +#define DFS_EXT_CHAN_LOADING_THRESH 30 +#define DFS_DEFAULT_PRI_MARGIN 6 +#define DFS_DEFAULT_FIXEDPATTERN_PRI_MARGIN 4 + +#define WLAN_DFSQ_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_radarqlock) +#define WLAN_DFSQ_UNLOCK(_dfs) qdf_spin_unlock_bh(&(_dfs)->dfs_radarqlock) +#define WLAN_DFSQ_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_radarqlock) +#define WLAN_DFSQ_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_radarqlock) + +#define WLAN_ARQ_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_arqlock) +#define WLAN_ARQ_UNLOCK(_dfs) qdf_spin_unlock_bh(&(_dfs)->dfs_arqlock) +#define WLAN_ARQ_LOCK_CREATE(_dfs) qdf_spinlock_create(&(_dfs)->dfs_arqlock) +#define WLAN_ARQ_LOCK_DESTROY(_dfs) qdf_spinlock_destroy(&(_dfs)->dfs_arqlock) + +#define WLAN_DFSEVENTQ_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_eventqlock) +#define WLAN_DFSEVENTQ_UNLOCK(_dfs) qdf_spin_unlock_bh( \ + &(_dfs)->dfs_eventqlock) +#define WLAN_DFSEVENTQ_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_eventqlock) +#define WLAN_DFSEVENTQ_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_eventqlock) + +#define WLAN_DFSNOL_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_nol_lock) +#define WLAN_DFSNOL_UNLOCK(_dfs) qdf_spin_unlock_bh(&(_dfs)->dfs_nol_lock) +#define WLAN_DFSNOL_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_nol_lock) +#define WLAN_DFSNOL_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_nol_lock) + +#define PRECAC_LIST_LOCK(_dfs) qdf_spin_lock_irqsave( \ + &(_dfs)->dfs_precac_lock) +#define PRECAC_LIST_UNLOCK(_dfs) qdf_spin_unlock_irqrestore( \ + &(_dfs)->dfs_precac_lock) +#define PRECAC_LIST_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_precac_lock) +#define PRECAC_LIST_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_precac_lock) + +#define WLAN_DFS_DATA_STRUCT_LOCK(_dfs) \ + qdf_spin_lock_bh(&(_dfs)->dfs_data_struct_lock) +#define WLAN_DFS_DATA_STRUCT_UNLOCK(_dfs) \ + qdf_spin_unlock_bh(&(_dfs)->dfs_data_struct_lock) +#define WLAN_DFS_DATA_STRUCT_LOCK_CREATE(_dfs) \ + qdf_spinlock_create(&(_dfs)->dfs_data_struct_lock) +#define WLAN_DFS_DATA_STRUCT_LOCK_DESTROY(_dfs) \ + qdf_spinlock_destroy(&(_dfs)->dfs_data_struct_lock) + +/* Wrappers to call MLME radar during mode switch lock. */ +#define DFS_RADAR_MODE_SWITCH_LOCK(_dfs) \ + dfs_mlme_acquire_radar_mode_switch_lock((_dfs)->dfs_pdev_obj) +#define DFS_RADAR_MODE_SWITCH_UNLOCK(_dfs) \ + dfs_mlme_release_radar_mode_switch_lock((_dfs)->dfs_pdev_obj) + +/* Mask for time stamp from descriptor */ +#define DFS_TSMASK 0xFFFFFFFF +/* Shift for time stamp from descriptor */ +#define DFS_TSSHIFT 32 +/* 64 bit TSF wrap value */ +#define DFS_TSF_WRAP 0xFFFFFFFFFFFFFFFFULL +/* TS mask for 64 bit value */ +#define DFS_64BIT_TSFMASK 0x0000000000007FFFULL + +#define DFS_AR_RADAR_RSSI_THR 5 /* in dB */ +#define DFS_AR_RADAR_RESET_INT 1 /* in secs */ +#define DFS_AR_RADAR_MAX_HISTORY 500 +#define DFS_AR_REGION_WIDTH 128 +#define DFS_AR_RSSI_THRESH_STRONG_PKTS 17 /* in dB */ +#define DFS_AR_RSSI_DOUBLE_THRESHOLD 15 /* in dB */ +#define DFS_AR_MAX_NUM_ACK_REGIONS 9 +#define DFS_AR_ACK_DETECT_PAR_THRESH 20 +#define DFS_AR_PKT_COUNT_THRESH 20 + +#define DFS_MAX_DL_SIZE 64 +#define DFS_MAX_DL_MASK 0x3F + +#define DFS_NOL_TIME DFS_NOL_TIMEOUT_US +/* 30 minutes in usecs */ + +#define DFS_WAIT_TIME (60*1000000) /* 1 minute in usecs */ + +#define DFS_DISABLE_TIME (3*60*1000000) /* 3 minutes in usecs */ + +#define DFS_MAX_B5_SIZE 128 +#define DFS_MAX_B5_MASK 0x0000007F /* 128 */ + +/* Max number of overlapping filters */ +#define DFS_MAX_RADAR_OVERLAP 16 + +/* Max number of dfs events which can be q'd */ +#define DFS_MAX_EVENTS 1024 + +#define DFS_RADAR_EN 0x80000000 /* Radar detect is capable */ +#define DFS_AR_EN 0x40000000 /* AR detect is capable */ +/* Radar detect in second segment is capable */ +#define DFS_SECOND_SEGMENT_RADAR_EN 0x20000000 +#define DFS_MAX_RSSI_VALUE 0x7fffffff /* Max rssi value */ + +#define DFS_BIN_MAX_PULSES 60 /* max num of pulses in a burst */ +#define DFS_BIN5_PRI_LOWER_LIMIT 990 /* us */ + +/** + * To cover the single pusle burst case, change from 2010 us to + * 2010000 us. + */ + +/** + * This is reverted back to 2010 as larger value causes false + * bin5 detect (EV76432, EV76320) + */ +#define DFS_BIN5_PRI_HIGHER_LIMIT 2010 /* us */ + +#define DFS_BIN5_WIDTH_MARGIN 4 /* us */ +#define DFS_BIN5_RSSI_MARGIN 5 /* dBm */ + +/** + * Following threshold is not specified but should be + * okay statistically. + */ +#define DFS_BIN5_BRI_LOWER_LIMIT 300000 /* us */ +#define DFS_BIN5_BRI_UPPER_LIMIT 12000000 /* us */ + +/* Max number of pulses kept in buffer */ +#define DFS_MAX_PULSE_BUFFER_SIZE 1024 +#define DFS_MAX_PULSE_BUFFER_MASK 0x3ff + +#define DFS_FAST_CLOCK_MULTIPLIER (800/11) +#define DFS_NO_FAST_CLOCK_MULTIPLIER (80) +#define DFS_BIG_SIDX 10000 + +/* Min value of valid psidx diff */ +#define DFS_MIN_PSIDX_DIFF 4 +/* Max value of valid psidx diff */ +#define DFS_MAX_PSIDX_DIFF 16 + +/** + * Software use: channel interference used for as AR as well as RADAR + * interference detection. + */ +#define CHANNEL_INTERFERENCE 0x01 + +#define CHANNEL_2GHZ 0x00080 /* 2 GHz spectrum channel. */ +#define CHANNEL_OFDM 0x00040 /* OFDM channel */ +#define CHANNEL_TURBO 0x00010 /* Turbo Channel */ +#define CHANNEL_108G (CHANNEL_2GHZ|CHANNEL_OFDM|CHANNEL_TURBO) + +/* qdf_packed - denotes structure is packed. */ +#define qdf_packed __qdf_packed + +#define SEG_ID_PRIMARY 0 +#define SEG_ID_SECONDARY 1 + +/* MIN and MAX width for different regions */ +#define REG0_MIN_WIDTH 33 +#define REG0_MAX_WIDTH 38 +#define REG1_MIN_WIDTH 39 +#define REG1_MAX_WIDTH 44 +#define REG2_MIN_WIDTH 53 +#define REG2_MAX_WIDTH 58 +#define REG3_MIN_WIDTH 126 +#define REG3_MAX_WIDTH 140 +#define REG4_MIN_WIDTH 141 +#define REG4_MAX_WIDTH 160 +#define REG5_MIN_WIDTH 189 +#define REG5_MAX_WIDTH 210 +#define REG6_MIN_WIDTH 360 +#define REG6_MAX_WIDTH 380 +#define REG7_MIN_WIDTH 257 +#define REG7_MAX_WIDTH 270 +#define REG8_MIN_WIDTH 295 +#define REG8_MAX_WIDTH 302 + +#define OVER_SAMPLING_FREQ 44000 +#define SAMPLING_FREQ 40000 +#define HUNDRED 100 +#define NUM_BINS 128 +#define THOUSAND 1000 + +/* Array offset to ETSI legacy pulse */ +#define ETSI_LEGACY_PULSE_ARR_OFFSET 4 + +#define ETSI_RADAR_EN302_502_FREQ_LOWER 5725 +#define ETSI_RADAR_EN302_502_FREQ_UPPER 5865 + +#define DFS_NOL_ADD_CHAN_LOCKED(dfs, freq, timeout) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_nol_addchan(dfs, freq, timeout); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +/* + * Free the NOL element in a thread. This is to avoid freeing the + * timer object from within timer callback function . The nol element + * contains the timer Object. + */ +#define DFS_NOL_DELETE_CHAN_LOCKED(dfs, freq, chwidth) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_nol_delete(dfs, freq, chwidth); \ + qdf_sched_work(NULL, &dfs->dfs_nol_elem_free_work); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_GET_NOL_LOCKED(dfs, dfs_nol, nchan) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_get_nol(dfs, dfs_nol, nchan); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_PRINT_NOL_LOCKED(dfs) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_print_nol(dfs); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_NOL_FREE_LIST_LOCKED(dfs) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_nol_free_list(dfs); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +/* Host sends the average parameters of the radar pulses and starts the status + * wait timer with this timeout. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +#define HOST_DFS_STATUS_WAIT_TIMER_MS 200 +#endif + +/* + * USENOL_DISABLE_NOL_HOST_AND_FW : Do not add radar hit channel to NOL + * in host and FW. Enable CSA on the same channel. + */ +#define USENOL_DISABLE_NOL_HOST_AND_FW 0 +/* + * USENOL_ENABLE_NOL_HOST_AND_FW : Add the radar hit channel to NOL in + * host and FW (in case of FO). NOL timer cannot be configured by the user + * as FW does not allow manipulating NOL timeout. If noltimeout is configured, + * (say 1 min) FW will not be intimated about the configuration and hence NOL + * timer may elapse at different instances in host (after 1 min) and FW (after + * default 30 min) which could lead to DFS Violation if host tries to come up + * on the channel after host NOL timeout (of 1 min) as the FW would still + * have the channel in NOL list. + */ +#define USENOL_ENABLE_NOL_HOST_AND_FW 1 +/* + * USENOL_ENABLE_NOL_HOST_DISABLE_NOL_FW : Add the radar hit channel to NOL + * in host. NOL timer can be configured by user. NOL in FW (for FO) is disabled. + */ +#define USENOL_ENABLE_NOL_HOST_DISABLE_NOL_FW 2 + +/* Non Agile detector IDs */ +#define DETECTOR_ID_0 0 +#define DETECTOR_ID_1 1 +/* Agile detector ID */ +#define AGILE_DETECTOR_ID 2 + +/** + * struct dfs_pulseparams - DFS pulse param structure. + * @p_time: Time for start of pulse in usecs. + * @p_dur: Duration of pulse in usecs. + * @p_rssi: RSSI of pulse. + * @p_seg_id: Segment id. + * @p_sidx: Sidx value. + * @p_delta_peak: Delta peak value. + * @p_psidx_diff: The difference in the FFT peak index between the short FFT + * and the first long FFT. + * @p_seq_num: Sequence number. + */ +struct dfs_pulseparams { + uint64_t p_time; + uint8_t p_dur; + uint8_t p_rssi; + uint8_t p_seg_id; + int16_t p_sidx; + int8_t p_delta_peak; + int16_t p_psidx_diff; + uint32_t p_seq_num; +} qdf_packed; + +/** + * struct dfs_pulseline - Pulseline structure. + * @pl_elems[]: array of pulses in delay line. + * @pl_firstelem: Index of the first element. + * @pl_lastelem: Index of the last element. + * @pl_numelems: Number of elements in the delay line. + */ +struct dfs_pulseline { + struct dfs_pulseparams pl_elems[DFS_MAX_PULSE_BUFFER_SIZE]; + uint32_t pl_firstelem; + uint32_t pl_lastelem; + uint32_t pl_numelems; +} qdf_packed; + +#define DFS_EVENT_CHECKCHIRP 0x01 /* Whether to check the chirp flag */ +#define DFS_EVENT_HW_CHIRP 0x02 /* hardware chirp */ +#define DFS_EVENT_SW_CHIRP 0x04 /* software chirp */ +/* Whether the event contains valid psidx diff value*/ +#define DFS_EVENT_VALID_PSIDX_DIFF 0x08 + +/* Use this only if the event has CHECKCHIRP set. */ +#define DFS_EVENT_ISCHIRP(e) \ + ((e)->re_flags & (DFS_EVENT_HW_CHIRP | DFS_EVENT_SW_CHIRP)) + +/** + * Check if the given event is to be rejected as not possibly + * a chirp. This means: + * (a) it's a hardware or software checked chirp, and + * (b) the HW/SW chirp bits are both 0. + */ +#define DFS_EVENT_NOTCHIRP(e) \ + (((e)->re_flags & (DFS_EVENT_CHECKCHIRP)) && (!DFS_EVENT_ISCHIRP((e)))) + +/** + * struct dfs_event - DFS event structure. + * @re_full_ts: 64-bit full timestamp from interrupt time. + * @re_ts: Original 15 bit recv timestamp. + * @re_rssi: Rssi of radar event. + * @re_dur: Duration of radar pulse. + * @re_chanindex: Channel of event. + * @re_flags: Event flags. + * @re_freq: Centre frequency of event, KHz. + * @re_freq_lo: Lower bounds of frequency, KHz. + * @re_freq_hi: Upper bounds of frequency, KHz. + * @re_seg_id: HT80_80/HT160 use. + * @re_sidx: Seg index. + * @re_freq_offset_khz: Freq offset in KHz + * @re_peak_mag: Peak mag. + * @re_total_gain: Total gain. + * @re_mb_gain: Mb gain. + * @re_relpwr_db: Relpower in db. + * @re_delta_diff: Delta diff. + * @re_delta_peak: Delta peak. + * @re_psidx_diff: Psidx diff. + * @re_list: List of radar events. + */ +struct dfs_event { + uint64_t re_full_ts; + uint32_t re_ts; + uint8_t re_rssi; + uint8_t re_dur; + uint8_t re_chanindex; + uint8_t re_flags; + uint32_t re_freq; + uint32_t re_freq_lo; + uint32_t re_freq_hi; + uint8_t re_seg_id; + int re_sidx; + u_int re_freq_offset_khz; + int re_peak_mag; + int re_total_gain; + int re_mb_gain; + int re_relpwr_db; + uint8_t re_delta_diff; + int8_t re_delta_peak; + int16_t re_psidx_diff; + + STAILQ_ENTRY(dfs_event) re_list; +} qdf_packed; + +#define DFS_AR_MAX_ACK_RADAR_DUR 511 +#define DFS_AR_MAX_NUM_PEAKS 3 +#define DFS_AR_ARQ_SIZE 2048 /* 8K AR events for buffer size */ +#define DFS_AR_ARQ_SEQSIZE 2049 /* Sequence counter wrap for AR */ + +#define DFS_RADARQ_SIZE 512 /* 1K radar events for buffer size */ +#define DFS_RADARQ_SEQSIZE 513 /* Sequence counter wrap for radar */ +/* Number of radar channels we keep state for */ +#define DFS_NUM_RADAR_STATES 64 +/* Max number radar filters for each type */ +#define DFS_MAX_NUM_RADAR_FILTERS 10 +/* Number of different radar types */ +#define DFS_MAX_RADAR_TYPES 32 +/* Number of filter index table rows */ +#define DFS_NUM_FT_IDX_TBL_ROWS 256 + +/* RADAR filter pattern type 1*/ +#define WLAN_DFS_RF_PATTERN_TYPE_1 1 + +/** + * struct dfs_ar_state - DFS AR state structure. + * @ar_prevwidth: Previous width. + * @ar_phyerrcount[]: Phy error count. + * @ar_acksum: Acksum. + * @ar_packetthreshold: Thresh to determine traffic load. + * @ar_parthreshold: Thresh to determine peak. + * @ar_radarrssi: Rssi threshold for AR event. + * @ar_prevtimestamp: Prev time stamp. + * @ar_peaklist[]: Peak list. + */ +struct dfs_ar_state { + uint32_t ar_prevwidth; + uint32_t ar_phyerrcount[DFS_AR_MAX_ACK_RADAR_DUR]; + uint32_t ar_acksum; + uint32_t ar_packetthreshold; + uint32_t ar_parthreshold; + uint32_t ar_radarrssi; + uint16_t ar_prevtimestamp; + uint16_t ar_peaklist[DFS_AR_MAX_NUM_PEAKS]; +}; + +/** + * struct dfs_delayelem - Delay Element. + * @de_time: Current "filter" time for start of pulse in usecs. + * @de_dur: Duration of pulse in usecs. + * @de_rssi: Rssi of pulse in dB. + * @de_ts: Time stamp for this delay element. + * @de_seg_id: Segment id for HT80_80/HT160 use. + * @de_sidx: Sidx value. + * @de_delta_peak: Delta peak. + * @de_psidx_diff: Psidx diff. + * @de_seq_num: Sequence number. + */ +struct dfs_delayelem { + uint32_t de_time; + uint8_t de_dur; + uint8_t de_rssi; + uint64_t de_ts; + uint8_t de_seg_id; + int16_t de_sidx; + int8_t de_delta_peak; + int16_t de_psidx_diff; + uint32_t de_seq_num; +} qdf_packed; + +/** + * struct dfs_delayline - DFS Delay Line. + * @dl_elems[]: Array of pulses in delay line. + * @dl_last_ts: Last timestamp the delay line was used (in usecs). + * @dl_firstelem: Index of the first element. + * @dl_lastelem: Index of the last element. + * @dl_numelems: Number of elements in the delay line. + * The following is to handle fractional PRI pulses that can cause false + * detection. + * @dl_seq_num_start: Sequence number of first pulse that was part of + * threshold match. + * @dl_seq_num_stop: Sequence number of last pulse that was part of threshold + * match. + * The following is required because the first pulse may or may not be in the + * delay line but we will find it iin the pulse line using dl_seq_num_second's + * diff_ts value. + * @dl_seq_num_second: Sequence number of second pulse that was part of + * threshold match. + * @dl_search_pri: We need final search PRI to identify possible fractional + * PRI issue. + * @dl_min_sidx: Minimum sidx value of pulses used to match thershold. + * Used for sidx spread check. + * @dl_max_sidx: Maximum sidx value of pulses used to match thershold. + * Used for sidx spread check. + * @dl_delta_peak_match_count: Number of pulse in the delay line that had valid + * delta peak value. + * @dl_psidx_diff_match_count: Number of pulse in the delay line that had valid + * psidx diff value. + */ +struct dfs_delayline { + struct dfs_delayelem dl_elems[DFS_MAX_DL_SIZE]; + uint64_t dl_last_ts; + uint32_t dl_firstelem; + uint32_t dl_lastelem; + uint32_t dl_numelems; + uint32_t dl_seq_num_start; + uint32_t dl_seq_num_stop; + uint32_t dl_seq_num_second; + uint32_t dl_search_pri; + int16_t dl_min_sidx; + int8_t dl_max_sidx; + uint8_t dl_delta_peak_match_count; + uint8_t dl_psidx_diff_match_count; +} qdf_packed; + +/** + * struct dfs_filter - Dfs filter. + * @rf_dl: Delay line of pulses for this filter. + * @rf_numpulses: Number of pulses in the filter. + * @rf_minpri: Min pri to be considered for this filter. + * @rf_maxpri: Max pri to be considered for this filter. + * @rf_threshold: Match filter output threshold for radar detect. + * @rf_filterlen: Length (in usecs) of the filter. + * @rf_patterntype: Fixed or variable pattern type. + * @rf_fixed_pri_radar_pulse: indicates if it is a fixed pri pulse. + * @rf_mindur: Min duration for this radar filter. + * @rf_maxdur: Max duration for this radar filter. + * @rf_ignore_pri_window: Ignore pri window. + * @rf_pulseid: Unique ID corresponding to the original filter ID. + * To reduce false detection, look at frequency spread. For now we will use + * sidx spread. But for HT160 frequency spread will be a better measure. + * @rf_sidx_spread: Maximum SIDX value spread in a matched sequence + * excluding FCC Bin 5. + * @rf_check_delta_peak: Minimum allowed delta_peak value for a pulse to be + * considetred for this filter's match. + */ +struct dfs_filter { + struct dfs_delayline rf_dl; + uint32_t rf_numpulses; + uint32_t rf_minpri; + uint32_t rf_maxpri; + uint32_t rf_threshold; + uint32_t rf_filterlen; + uint32_t rf_patterntype; + uint32_t rf_fixed_pri_radar_pulse; + uint32_t rf_mindur; + uint32_t rf_maxdur; + uint32_t rf_ignore_pri_window; + uint32_t rf_pulseid; + uint16_t rf_sidx_spread; + int8_t rf_check_delta_peak; +} qdf_packed; + +/** + * struct dfs_filtertype - Structure of DFS Filter type. + * @ft_filters[]: Array of ptrs storing addresses for struct of dfs_filter. + * @ft_filterdur: Duration of pulse which specifies filter type. + * @ft_numfilters: Num filters of this type. + * @ft_last_ts: Last timestamp this filtertype was used (in usecs). + * @ft_mindur: Min pulse duration to be considered for this filter type. + * @ft_maxdur: Max pulse duration to be considered for this filter type. + * @ft_rssithresh: Min rssi to be considered for this filter type. + * @ft_numpulses: Num pulses in each filter of this type. + * @ft_patterntype: Fixed or variable pattern type. + * @ft_minpri: Min pri to be considered for this type. + * @ft_rssimargin: Rssi threshold margin. In Turbo Mode HW reports rssi 3dB + * lower than in non TURBO mode. This will offset that diff. + */ +struct dfs_filtertype { + struct dfs_filter *ft_filters[DFS_MAX_NUM_RADAR_FILTERS]; + uint32_t ft_filterdur; + uint32_t ft_numfilters; + uint64_t ft_last_ts; + uint32_t ft_mindur; + uint32_t ft_maxdur; + uint32_t ft_rssithresh; + uint32_t ft_numpulses; + uint32_t ft_patterntype; + uint32_t ft_minpri; + uint32_t ft_rssimargin; +}; + +/** + * struct dfs_channel - Channel structure for dfs component. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: IEEE Channel Center of primary segment + * @dfs_ch_vhtop_ch_freq_seg2: IEEE Channel Center applicable for 80+80MHz + * mode of operation. + * @dfs_ch_mhz_freq_seg1: Channel center frequency of primary segment in + * MHZ. + * @dfs_ch_mhz_freq_seg2: Channel center frequency of secondary segment + * in MHZ applicable only for 80+80MHZ mode of + * operation. + */ +struct dfs_channel { + uint16_t dfs_ch_freq; + uint64_t dfs_ch_flags; + uint16_t dfs_ch_flagext; + uint8_t dfs_ch_ieee; + uint8_t dfs_ch_vhtop_ch_freq_seg1; + uint8_t dfs_ch_vhtop_ch_freq_seg2; + uint16_t dfs_ch_mhz_freq_seg1; + uint16_t dfs_ch_mhz_freq_seg2; +}; + +/** + * struct dfs_state - DFS state. + * @rs_chan: Channel info. + * @rs_chanindex: Channel index in radar structure. + * @rs_numradarevents: Number of radar events. + * @rs_param: Phy param. + */ +struct dfs_state { + struct dfs_channel rs_chan; + uint8_t rs_chanindex; + uint32_t rs_numradarevents; + struct wlan_dfs_phyerr_param rs_param; +}; + +#define DFS_NOL_TIMEOUT_S (30*60) /* 30 minutes in seconds */ +#define DFS_NOL_TIMEOUT_MS (DFS_NOL_TIMEOUT_S * 1000) +#define DFS_NOL_TIMEOUT_US (DFS_NOL_TIMEOUT_MS * 1000) + +/** + * struct dfs_nolelem - DFS NOL element. + * @nol_dfs Back pointer to dfs object. + * @nol_freq: Centre frequency. + * @nol_chwidth: Event width (MHz). + * @nol_start_ticks: NOL start time in OS ticks. + * @nol_timeout_ms: NOL timeout value in msec. + * @nol_timer: Per element NOL timer. + * @nol_next: Next element pointer. + */ +struct dfs_nolelem { + TAILQ_ENTRY(dfs_nolelem) nolelem_list; + struct wlan_dfs *nol_dfs; + uint32_t nol_freq; + uint32_t nol_chwidth; + unsigned long nol_start_ticks; + uint32_t nol_timeout_ms; + qdf_timer_t nol_timer; + struct dfs_nolelem *nol_next; +} qdf_packed; + + +/** + * struct dfs_info - DFS Info. + * @rn_ftindex: Number of different types of radars. + * @rn_lastfull_ts: Last 64 bit timstamp from recv interrupt. + * @rn_last_ts: last 15 bit ts from recv descriptor. + * @rn_last_unique_ts: last unique 32 bit ts from recv descriptor. + * @rn_ts_prefix: Prefix to prepend to 15 bit recv ts. + * @rn_numbin5radars: Number of bin5 radar pulses to search for. + * @rn_fastdivGCval: Value of fast diversity gc limit from init file. + * @rn_minrssithresh: Min rssi for all radar types. + * @rn_maxpulsedur: Max pulse width in TSF ticks. + * @dfs_ext_chan_busy: Ext chan busy. + * @ext_chan_busy_ts: Ext chan busy time. + * @dfs_bin5_chirp_ts: Ext bin5 chrip time. + * @dfs_last_bin5_dur: Last bin5 during. + */ +struct dfs_info { + uint32_t rn_ftindex; + uint64_t rn_lastfull_ts; + uint16_t rn_last_ts; + uint32_t rn_last_unique_ts; + uint64_t rn_ts_prefix; + uint32_t rn_numbin5radars; + uint32_t rn_fastdivGCval; + int32_t rn_minrssithresh; + uint32_t rn_maxpulsedur; + uint8_t dfs_ext_chan_busy; + uint64_t ext_chan_busy_ts; + uint64_t dfs_bin5_chirp_ts; + uint8_t dfs_last_bin5_dur; +} qdf_packed; + +/** + * struct dfs_bin5elem - BIN5 elements. + * @be_ts: Timestamp for the bin5 element. + * @be_rssi: Rssi for the bin5 element. + * @be_dur: Duration of bin5 element. + */ +struct dfs_bin5elem { + uint64_t be_ts; + uint32_t be_rssi; + uint32_t be_dur; +}; + +/** + * struct dfs_bin5radars - BIN5 radars. + * @br_elems[]: List of bin5 elems that fall within the time window. + * @br_firstelem: Index of the first element. + * @br_lastelem: Index of the last element. + * @br_numelems: Number of elements in the delay line. + * @br_pulse: Original info about bin5 pulse. + */ +struct dfs_bin5radars { + struct dfs_bin5elem br_elems[DFS_MAX_B5_SIZE]; + uint32_t br_firstelem; + uint32_t br_lastelem; + uint32_t br_numelems; + struct dfs_bin5pulse br_pulse; +}; + +/** + * struct dfs_stats - DFS stats. + * @num_radar_detects: Total num. of radar detects. + * @num_seg_two_radar_detects: Total num. of radar detected in secondary segment + * @total_phy_errors: Total PHY errors. + * @owl_phy_errors: OWL PHY errors. + * @pri_phy_errors: Primary channel phy errors. + * @ext_phy_errors: Extension channel phy errors. + * @dc_phy_errors: DC PHY errors. + * @early_ext_phy_errors: Extension channel early radar found error. + * @bwinfo_errors: Bogus bandwidth info received in descriptor. + * @datalen_discards: data length at least three bytes of payload. + * @rssi_discards: RSSI is not accurate. + * @last_reset_tstamp: Last reset timestamp. + */ +struct dfs_stats { + uint32_t num_radar_detects; + uint32_t num_seg_two_radar_detects; + uint32_t total_phy_errors; + uint32_t owl_phy_errors; + uint32_t pri_phy_errors; + uint32_t ext_phy_errors; + uint32_t dc_phy_errors; + uint32_t early_ext_phy_errors; + uint32_t bwinfo_errors; + uint32_t datalen_discards; + uint32_t rssi_discards; + uint64_t last_reset_tstamp; +}; + +#define DFS_EVENT_LOG_SIZE 256 + +/** + * struct dfs_event_log - DFS event log. + * @ts: 64-bit full timestamp from interrupt time. + * @diff_ts: Diff timestamp. + * @rssi: Rssi of radar event. + * @dur: Duration of radar pulse. + * @is_chirp: Chirp flag. + * @seg_id: HT80_80/HT160 use. + * @sidx: Seg index. + * @freq_offset_khz: Freq offset in KHz + * @peak_mag: Peak mag. + * @total_gain: Total gain. + * @mb_gain: Mb gain. + * @relpwr_db: Relpower in db. + * @delta_diff: Delta diff. + * @delta_peak: Delta peak. + * @psidx_diff: Psidx diff. + */ + +struct dfs_event_log { + uint64_t ts; + uint32_t diff_ts; + uint8_t rssi; + uint8_t dur; + int is_chirp; + uint8_t seg_id; + int sidx; + u_int freq_offset_khz; + int peak_mag; + int total_gain; + int mb_gain; + int relpwr_db; + uint8_t delta_diff; + int8_t delta_peak; + int16_t psidx_diff; +}; + +#define WLAN_DFS_WEATHER_CHANNEL_WAIT_MIN 10 /*10 minutes*/ +#define WLAN_DFS_WEATHER_CHANNEL_WAIT_S (WLAN_DFS_WEATHER_CHANNEL_WAIT_MIN * 60) +#define WLAN_DFS_WEATHER_CHANNEL_WAIT_MS \ + ((WLAN_DFS_WEATHER_CHANNEL_WAIT_S) * 1000) /*in MS*/ + +#define WLAN_DFS_WAIT_POLL_PERIOD 2 /* 2 seconds */ +#define WLAN_DFS_WAIT_POLL_PERIOD_MS \ + ((WLAN_DFS_WAIT_POLL_PERIOD) * 1000) /*in MS*/ + +#define DFS_DEBUG_TIMEOUT_S 30 /* debug timeout is 30 seconds */ +#define DFS_DEBUG_TIMEOUT_MS (DFS_DEBUG_TIMEOUT_S * 1000) + +#define RSSI_POSSIBLY_FALSE 50 +#define SEARCH_FFT_REPORT_PEAK_MAG_THRSH 40 + +#define MIN_DFS_SUBCHAN_BW 20 /* Minimum bandwidth of each subchannel. */ + +#define FREQ_OFFSET_BOUNDARY_FOR_80MHZ 40 + +/** + * struct dfs_mode_switch_defer_params - Parameters storing DFS information + * before defer, as part of HW mode switch. + * + * @radar_params: Deferred radar parameters. + * @is_cac_completed: Boolean representing CAC completion event. + * @is_radar_detected: Boolean representing radar event. + */ +struct dfs_mode_switch_defer_params { + struct radar_found_info *radar_params; + bool is_cac_completed; + bool is_radar_detected; +}; + +/** + * struct wlan_dfs - The main dfs structure. + * @dfs_debug_mask: Current debug bitmask. + * @dfs_curchan_radindex: Current channel radar index. + * @dfs_extchan_radindex: Extension channel radar index. + * @dfsdomain: Current DFS domain. + * @dfs_proc_phyerr: Flags for Phy Errs to process. + * @dfs_eventq: Q of free dfs event objects. + * @dfs_eventqlock: Lock for free dfs event list. + * @dfs_radarq: Q of radar events. + * @dfs_radarqlock: Lock for dfs q. + * @dfs_arq: Q of AR events. + * @dfs_arqlock: Lock for AR q. + * @dfs_ar_state: AR state. + * @dfs_radar[]: Per-Channel Radar detector state. + * @dfs_radarf[]: One filter for each radar pulse type. + * @dfs_rinfo: State vars for radar processing. + * @dfs_b5radars: Array of bin5 radar events. + * @dfs_ftindextable: Map of radar durs to filter types. + * @dfs_nol: Non occupancy list for radar. + * @dfs_nol_count: How many items? + * @dfs_defaultparams: Default phy params per radar state. + * @wlan_dfs_stats: DFS related stats. + * @pulses: Pulse history. + * @events: Events structure. + * @wlan_radar_tasksched: Radar task is scheduled. + * @wlan_dfswait: Waiting on channel for radar detect. + * @wlan_dfstest: Test timer in progress. + * @dfs_caps: Object of wlan_dfs_caps structure. + * @wlan_dfstest_ieeechan: IEEE chan num to return to after a dfs mute + * test. + * @wlan_dfs_cac_time: CAC period. + * @wlan_dfstesttime: Time to stay off chan during dfs test. + * @wlan_dfswaittimer: Dfs wait timer. + * @wlan_dfstesttimer: Dfs mute test timer. + * @wlan_dfs_debug_timer: Dfs debug timer. + * @dfs_bangradar_type: Radar simulation type. + * @is_radar_found_on_secondary_seg: Radar on second segment. + * @dfs_radar_found_for_fo: Radar found event for FO(Full Offload) is + * received. + * @is_radar_during_precac: Radar found during precac. + * @dfs_precac_lock: Lock to protect precac lists. + * @dfs_precac_secondary_freq: Second segment freq for precac. + * Applicable to only legacy chips. + * @dfs_precac_secondary_freq_mhz: Second segment freq in MHZ for precac. + * Applicable to only legacy chips. + * @dfs_precac_primary_freq: PreCAC Primary freq applicable only to + * legacy chips. + * @dfs_precac_primary_freq_mhz: PreCAC Primary freq in MHZ applicable only + * to legacy chips. + * @dfs_defer_precac_channel_change: Defer precac channel change. + * @dfs_precac_inter_chan: Intermediate non-DFS channel used while + * doing precac. + * @dfs_precac_inter_chan_freq: Intermediate non-DFS freq used while + * doing precac. + * @dfs_autoswitch_des_chan: Desired channel which has to be used + * after precac. + * @dfs_autoswitch_des_chan_freq: Desired freq which has to be used + * after precac. + * @dfs_autoswitch_des_mode: Desired PHY mode which has to be used + * after precac. + * @dfs_pre_cac_timeout_channel_change: Channel change due to precac timeout. + * @wlan_dfs_task_timer: Dfs wait timer. + * @dur_multiplier: Duration multiplier. + * @wlan_dfs_isdfsregdomain: True when AP is in DFS domain + * @wlan_dfs_false_rssi_thres: False RSSI Threshold. + * @wlan_dfs_peak_mag: Peak mag. + * @radar_log[]: Radar log. + * @dfs_event_log_count: Event log count. + * @dfs_event_log_on: Event log on. + * @dfs_phyerr_count: Same as number of PHY radar interrupts. + * @dfs_phyerr_reject_count: When TLV is supported, # of radar events + * ignored after TLV is parsed. + * @dfs_phyerr_queued_count: Number of radar events queued for matching + * the filters. + * @dfs_phyerr_freq_min: Phyerr min freq. + * @dfs_phyerr_freq_max: Phyerr max freq. + * @dfs_phyerr_w53_counter: Phyerr w53 counter. + * @dfs_pri_multiplier: Allow pulse if they are within multiple of + * PRI for the radar type. + * @wlan_dfs_nol_timeout: NOL timeout. + * @update_nol: Update NOL. + * @dfs_seq_num: Sequence number. + * @dfs_nol_event[]: NOL event. + * @dfs_nol_timer: NOL list processing. + * @dfs_nol_free_list: NOL free list. + * @dfs_nol_elem_free_work: The work queue to free an NOL element. + * @dfs_cac_timer: CAC timer. + * @dfs_cac_valid_timer: Ignore CAC when this timer is running. + * @dfs_cac_timeout_override: Overridden cac timeout. + * @dfs_enable: DFS Enable. + * @dfs_cac_timer_running: DFS CAC timer running. + * @dfs_ignore_dfs: Ignore DFS. + * @dfs_ignore_cac: Ignore CAC. + * @dfs_cac_valid: DFS CAC valid. + * @dfs_cac_valid_time: Time for which CAC will be valid and will + * not be re-done. + * @dfs_precac_timeout_override: Overridden precac timeout. + * @dfs_num_precac_freqs: Number of PreCAC VHT80 frequencies. + * @dfs_precac_list: PreCAC list (contains individual trees). + * @dfs_precac_chwidth: PreCAC channel width enum. + * @dfs_curchan: DFS current channel. + * @dfs_prevchan: DFS previous channel. + * @dfs_cac_started_chan: CAC started channel. + * @dfs_pdev_obj: DFS pdev object. + * @dfs_is_offload_enabled: Set if DFS offload enabled. + * @dfs_agile_precac_freq_mhz: Freq in MHZ configured on Agile DFS engine. + * @dfs_use_nol: Use the NOL when radar found(default: TRUE) + * @dfs_nol_lock: Lock to protect nol list. + * @tx_leakage_threshold: Tx leakage threshold for dfs. + * @dfs_use_nol_subchannel_marking: Use subchannel marking logic to add only + * radar affected subchannel instead of all + * bonding channels. + * @dfs_host_wait_timer: The timer that is started from host after + * sending the average radar parameters. + * Before this timeout host expects its dfs + * status from fw. + * @dfs_average_pri: Average pri value of the received radar + * pulses. + * @dfs_average_duration: Average duration of the received radar + * pulses. + * @dfs_average_sidx: Average sidx of the received radar pulses. + * @dfs_is_host_wait_running: Indicates if host dfs status wait timer is + * running. + * @dfs_average_params_sent: Indicates if host has sent the average + * radar parameters. + * @dfs_no_res_from_fw: Indicates no response from fw. + * @dfs_spoof_check_failed: Indicates if the spoof check has failed. + * @dfs_spoof_test_done: Indicates if the sppof test is done. + * @dfs_status_timeout_override: Used to change the timeout value of + * dfs_host_wait_timer. + * @dfs_is_stadfs_enabled: Is STADFS enabled. + * @dfs_min_sidx: Minimum sidx of the received radar pulses. + * @dfs_max_sidx: Maximum sidx of the received radar pulses. + * @dfs_seg_id: Segment ID of the radar hit channel. + * @dfs_is_chirp: Radar Chirp in pulse present or not. + * @dfs_bw_reduced: DFS bandwidth reduced channel bit. + * @dfs_freq_offset: Frequency offset where radar was found. + * @dfs_cac_aborted: DFS cac is aborted. + * @dfs_disable_radar_marking: To mark or unmark NOL chan as radar hit. + * @dfs_data_struct_lock: DFS data structure lock. This is to protect + * all the filtering data structures. For + * example: dfs_bin5radars, dfs_filtertype, + * etc. + * @dfs_nol_ie_bandwidth: Minimum Bandwidth of subchannels that + * are added to NOL. + * @dfs_nol_ie_startfreq: The centre frequency of the starting + * subchannel in the current channel list + * to be sent in NOL IE with RCSA. + * @dfs_nol_ie_bitmap: The bitmap of radar affected subchannels + * in the current channel list + * to be sent in NOL IE with RCSA. + * @dfs_is_rcsa_ie_sent: To send or to not send RCSA IE. + * @dfs_is_nol_ie_sent: To send or to not send NOL IE. + * @dfs_legacy_precac_ucfg: User configuration for legacy preCAC in + * partial offload chipsets. + * @dfs_agile_precac_ucfg: User configuration for agile preCAC. + * @dfs_fw_adfs_support_non_160: Target Agile DFS support for non-160 BWs. + * @dfs_fw_adfs_support_160: Target Agile DFS support for 160 BW. + * @dfs_allow_hw_pulses: Allow/Block HW pulses. When synthetic + * pulses are injected, the HW pulses should + * be blocked and this variable should be + * false so that HW pulses and synthetic + * pulses do not get mixed up. + * defer timer running. + * @dfs_defer_params: DFS deferred event parameters (allocated + * only for the duration of defer alone). + */ +struct wlan_dfs { + uint32_t dfs_debug_mask; + int16_t dfs_curchan_radindex; + int16_t dfs_extchan_radindex; + uint32_t dfsdomain; + uint32_t dfs_proc_phyerr; + + STAILQ_HEAD(, dfs_event) dfs_eventq; + qdf_spinlock_t dfs_eventqlock; + + STAILQ_HEAD(, dfs_event) dfs_radarq; + qdf_spinlock_t dfs_radarqlock; + + STAILQ_HEAD(, dfs_event) dfs_arq; + qdf_spinlock_t dfs_arqlock; + + struct dfs_ar_state dfs_ar_state; + struct dfs_state dfs_radar[DFS_NUM_RADAR_STATES]; + struct dfs_filtertype *dfs_radarf[DFS_MAX_RADAR_TYPES]; + struct dfs_info dfs_rinfo; + struct dfs_bin5radars *dfs_b5radars; + int8_t **dfs_ftindextable; + struct dfs_nolelem *dfs_nol; + int dfs_nol_count; + struct wlan_dfs_phyerr_param dfs_defaultparams; + struct dfs_stats wlan_dfs_stats; + struct dfs_pulseline *pulses; + struct dfs_event *events; + + uint32_t wlan_radar_tasksched:1, + wlan_dfswait:1, + wlan_dfstest:1; + struct wlan_dfs_caps dfs_caps; + uint8_t wlan_dfstest_ieeechan; + uint32_t wlan_dfs_cac_time; + uint32_t wlan_dfstesttime; + qdf_timer_t wlan_dfswaittimer; + qdf_timer_t wlan_dfstesttimer; + qdf_timer_t wlan_dfs_debug_timer; + enum dfs_bangradar_types dfs_bangradar_type; + bool is_radar_found_on_secondary_seg; + bool dfs_radar_found_for_fo; + bool is_radar_during_precac; + qdf_spinlock_t dfs_precac_lock; + bool dfs_precac_enable; +#ifdef CONFIG_CHAN_NUM_API + uint8_t dfs_precac_secondary_freq; + uint8_t dfs_precac_primary_freq; +#endif +#ifdef CONFIG_CHAN_FREQ_API + uint16_t dfs_precac_secondary_freq_mhz; + uint16_t dfs_precac_primary_freq_mhz; +#endif + uint8_t dfs_defer_precac_channel_change; +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +#ifdef CONFIG_CHAN_NUM_API + uint8_t dfs_precac_inter_chan; + uint8_t dfs_autoswitch_des_chan; +#endif + enum wlan_phymode dfs_autoswitch_des_mode; +#endif +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +#ifdef CONFIG_CHAN_FREQ_API + uint16_t dfs_precac_inter_chan_freq; + uint16_t dfs_autoswitch_des_chan_freq; +#endif +#endif + uint8_t dfs_pre_cac_timeout_channel_change:1; + qdf_timer_t wlan_dfs_task_timer; + int dur_multiplier; + uint16_t wlan_dfs_isdfsregdomain; + int wlan_dfs_false_rssi_thres; + int wlan_dfs_peak_mag; + struct dfs_event_log radar_log[DFS_EVENT_LOG_SIZE]; + int dfs_event_log_count; + int dfs_event_log_on; + int dfs_phyerr_count; + int dfs_phyerr_reject_count; + int dfs_phyerr_queued_count; + int dfs_phyerr_freq_min; + int dfs_phyerr_freq_max; + int dfs_phyerr_w53_counter; + int dfs_pri_multiplier; + int wlan_dfs_nol_timeout; + bool update_nol; + uint32_t dfs_seq_num; + int dfs_nol_event[DFS_CHAN_MAX]; + qdf_timer_t dfs_nol_timer; + + TAILQ_HEAD(, dfs_nolelem) dfs_nol_free_list; + qdf_work_t dfs_nol_elem_free_work; + + qdf_timer_t dfs_cac_timer; + qdf_timer_t dfs_cac_valid_timer; + int dfs_cac_timeout_override; + uint8_t dfs_enable:1, + dfs_cac_timer_running:1, + dfs_ignore_dfs:1, + dfs_ignore_cac:1, + dfs_cac_valid:1; + uint32_t dfs_cac_valid_time; + int dfs_precac_timeout_override; + uint8_t dfs_num_precac_freqs; +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) + uint8_t dfs_disable_radar_marking; +#endif + TAILQ_HEAD(, dfs_precac_entry) dfs_precac_list; + enum phy_ch_width dfs_precac_chwidth; + + struct dfs_channel *dfs_curchan; + struct dfs_channel *dfs_prevchan; + struct dfs_channel dfs_cac_started_chan; + struct wlan_objmgr_pdev *dfs_pdev_obj; + struct dfs_soc_priv_obj *dfs_soc_obj; +#if defined(QCA_SUPPORT_AGILE_DFS) || defined(ATH_SUPPORT_ZERO_CAC_DFS) + uint8_t dfs_psoc_idx; +#endif +#ifdef CONFIG_CHAN_NUM_API + uint8_t dfs_agile_precac_freq; +#endif +#ifdef CONFIG_CHAN_FREQ_API + uint16_t dfs_agile_precac_freq_mhz; +#endif + bool dfs_is_offload_enabled; + int dfs_use_nol; + qdf_spinlock_t dfs_nol_lock; + uint16_t tx_leakage_threshold; + bool dfs_use_nol_subchannel_marking; + uint8_t dfs_spoof_test_done:1; +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + qdf_timer_t dfs_host_wait_timer; + uint32_t dfs_average_pri; + uint32_t dfs_average_duration; + uint32_t dfs_average_sidx; + uint8_t dfs_is_host_wait_running:1, + dfs_average_params_sent:1, + dfs_no_res_from_fw:1, + dfs_spoof_check_failed:1; + struct dfs_channel dfs_radar_found_chan; + int dfs_status_timeout_override; +#endif + bool dfs_is_stadfs_enabled; + int32_t dfs_min_sidx; + int32_t dfs_max_sidx; + uint8_t dfs_seg_id; + uint8_t dfs_is_chirp; + bool dfs_bw_reduced; + int32_t dfs_freq_offset; + bool dfs_cac_aborted; + qdf_spinlock_t dfs_data_struct_lock; + uint8_t dfs_nol_ie_bandwidth; + uint16_t dfs_nol_ie_startfreq; + uint8_t dfs_nol_ie_bitmap; + bool dfs_is_rcsa_ie_sent; + bool dfs_is_nol_ie_sent; + uint8_t dfs_legacy_precac_ucfg:1, + dfs_agile_precac_ucfg:1, + dfs_fw_adfs_support_non_160:1, + dfs_fw_adfs_support_160:1; +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) + bool dfs_allow_hw_pulses; +#endif + struct dfs_mode_switch_defer_params dfs_defer_params; +}; + +#if defined(QCA_SUPPORT_AGILE_DFS) || defined(ATH_SUPPORT_ZERO_CAC_DFS) +/** + * struct wlan_dfs_priv - dfs private struct with agile capability info + * @wlan_dfs: pointer to wlan_dfs object. + * @agile_precac_active: agile precac active information for wlan_dfs_priv obj + */ +struct wlan_dfs_priv { + struct wlan_dfs *dfs; + bool agile_precac_active; +}; +#endif + +/** + * struct dfs_soc_priv_obj - dfs private data + * @psoc: pointer to PSOC object information + * @pdev: pointer to PDEV object information + * @dfs_is_phyerr_filter_offload: For some chip like Rome indicates too many + * phyerr packets in a short time, which causes + * OS hang. If this feild is configured as true, + * FW will do the pre-check, filter out some + * kinds of invalid phyerrors and indicate + * radar detection related information to host. + * @dfs_priv: array of dfs private structs with agile capability info + * @num_dfs_privs: array size of dfs private structs for given psoc. + * @cur_precac_dfs_index: current precac dfs index + * @dfs_precac_timer: agile precac timer + * @dfs_precac_timer_running: precac timer running flag + * @ocac_status: Off channel CAC complete status + * @dfs_nol_ctx: dfs NOL data for all radios. + */ +struct dfs_soc_priv_obj { + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + bool dfs_is_phyerr_filter_offload; +#if defined(QCA_SUPPORT_AGILE_DFS) || defined(ATH_SUPPORT_ZERO_CAC_DFS) + struct wlan_dfs_priv dfs_priv[WLAN_UMAC_MAX_PDEVS]; + uint8_t num_dfs_privs; + uint8_t cur_precac_dfs_index; + qdf_timer_t dfs_precac_timer; + uint8_t dfs_precac_timer_running; + bool precac_state_started; + bool ocac_status; +#endif + struct dfsreq_nolinfo *dfs_psoc_nolinfo; +}; + +/** + * enum DFS debug - This should match the table from if_ath.c. + * @WLAN_DEBUG_DFS: Minimal DFS debug. + * @WLAN_DEBUG_DFS1: Normal DFS debug. + * @WLAN_DEBUG_DFS2: Maximal DFS debug. + * @WLAN_DEBUG_DFS3: Matched filterID display. + * @WLAN_DEBUG_DFS_PHYERR: Phy error parsing. + * @WLAN_DEBUG_DFS_NOL: NOL related entries. + * @WLAN_DEBUG_DFS_PHYERR_SUM: PHY error summary. + * @WLAN_DEBUG_DFS_PHYERR_PKT: PHY error payload. + * @WLAN_DEBUG_DFS_BIN5: BIN5 checks. + * @WLAN_DEBUG_DFS_BIN5_FFT: BIN5 FFT check. + * @WLAN_DEBUG_DFS_BIN5_PULSE: BIN5 pulse check. + * @WLAN_DEBUG_DFS_FALSE_DET: False detection debug related prints. + * @WLAN_DEBUG_DFS_FALSE_DET2: Second level check to confirm poisitive + * detection. + * @WLAN_DEBUG_DFS_RANDOM_CHAN: Random channel selection. + */ +enum { + WLAN_DEBUG_DFS = 0x00000100, + WLAN_DEBUG_DFS1 = 0x00000200, + WLAN_DEBUG_DFS2 = 0x00000400, + WLAN_DEBUG_DFS3 = 0x00000800, + WLAN_DEBUG_DFS_PHYERR = 0x00001000, + WLAN_DEBUG_DFS_NOL = 0x00002000, + WLAN_DEBUG_DFS_PHYERR_SUM = 0x00004000, + WLAN_DEBUG_DFS_PHYERR_PKT = 0x00008000, + WLAN_DEBUG_DFS_BIN5 = 0x00010000, + WLAN_DEBUG_DFS_BIN5_FFT = 0x00020000, + WLAN_DEBUG_DFS_BIN5_PULSE = 0x00040000, + WLAN_DEBUG_DFS_FALSE_DET = 0x00080000, + WLAN_DEBUG_DFS_FALSE_DET2 = 0x00100000, + WLAN_DEBUG_DFS_RANDOM_CHAN = 0x00200000, + WLAN_DEBUG_DFS_MAX = 0x80000000, + WLAN_DEBUG_DFS_ALWAYS = WLAN_DEBUG_DFS_MAX +}; + +/** + * enum host dfs spoof check status. + * @HOST_DFS_CHECK_PASSED: Host indicates RADAR detected and the FW + * confirms it to be spoof radar to host. + * @HOST_DFS_CHECK_FAILED: Host doesn't indicate RADAR detected or spoof + * radar parameters by + * WMI_HOST_DFS_RADAR_FOUND_CMDID doesn't match. + * @HOST_DFS_STATUS_CHECK_HW_RADAR: Host indicates RADAR detected and the + * FW confirms it to be real HW radar to host. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +enum { + HOST_DFS_STATUS_CHECK_PASSED = 0, + HOST_DFS_STATUS_CHECK_FAILED = 1, + HOST_DFS_STATUS_CHECK_HW_RADAR = 2 +}; +#endif + +/** + * struct dfs_phy_err - DFS phy error. + * @fulltsf: 64-bit TSF as read from MAC. + * @is_pri: Detected on primary channel. + * @is_ext: Detected on extension channel. + * @is_dc: Detected at DC. + * @is_early: Early detect. + * @do_check_chirp: Whether to check hw_chirp/sw_chirp. + * @is_hw_chirp: Hardware-detected chirp. + * @is_sw_chirp: Software detected chirp. + * @rs_tstamp: 32 bit TSF from RX descriptor (event). + * @freq: Centre frequency of event - KHz. + * @freq_lo: Lower bounds of frequency - KHz. + * @freq_hi: Upper bounds of frequency - KHz. + * @rssi: Pulse RSSI. + * @dur: Pulse duration, raw (not uS). + * @seg_id: HT80_80/HT160 use. + * @sidx: Seg index. + * @freq_offset_khz: Freq offset in KHz. + * @peak_mag: Peak mag. + * @total_gain: Total gain. + * @mb_gain: Mb gain. + * @relpwr_db: Relpower in DB. + * @pulse_delta_diff: Pulse delta diff. + * @pulse_delta_peak: Pulse delta peak. + * @pulse_psidx_diff: Pulse psidx diff. + * + * Chirp notes! + * + * Pre-Sowl chips don't do FFT reports, so chirp pulses simply show up + * as long duration pulses. + * + * The bin5 checking code would simply look for a chirp pulse of the correct + * duration (within MIN_BIN5_DUR and MAX_BIN5_DUR) and add it to the "chirp" + * pattern. + * + * For Sowl and later, an FFT was done on longer duration frames. If those + * frames looked like a chirp, their duration was adjusted to fall within + * the chirp duration limits. If the pulse failed the chirp test (it had + * no FFT data or the FFT didn't meet the chirping requirements) then the + * pulse duration was adjusted to be greater than MAX_BIN5_DUR, so it + * would always fail chirp detection. + * + * This is pretty horrible. + * + * The eventual goal for chirp handling is thus: + * + * 1)In case someone ever wants to do chirp detection with this code on + * chips that don't support chirp detection, you can still do it based + * on pulse duration. That's your problem to solve. + * + * 2)For chips that do hardware chirp detection or FFT, the "do_check_chirp" + * bit should be set. + * + * 3)Then, either is_hw_chirp or is_sw_chirp is set, indicating that + * the hardware or software post-processing of the chirp event found + * that indeed it was a chirp. + * + * 4)Finally, the bin5 code should just check whether the chirp bits are + * set and behave appropriately, falling back onto the duration checks + * if someone wishes to use this on older hardware (or with disabled + * FFTs, for whatever reason.) + * + * XXX TODO: + * + * 1)add duration in uS and raw duration, so the PHY error parsing + * code is responsible for doing the duration calculation; + * 2)add ts in raw and corrected, so the PHY error parsing + * code is responsible for doing the offsetting, not the radar + * event code. + */ +struct dfs_phy_err { + uint64_t fulltsf; + uint32_t is_pri:1, + is_ext:1, + is_dc:1, + is_early:1, + do_check_chirp:1, + is_hw_chirp:1, + is_sw_chirp:1; + uint32_t rs_tstamp; + uint32_t freq; + uint32_t freq_lo; + uint32_t freq_hi; + uint8_t rssi; + uint8_t dur; + uint8_t seg_id; + int sidx; + u_int freq_offset_khz; + int peak_mag; + int total_gain; + int mb_gain; + int relpwr_db; + uint8_t pulse_delta_diff; + int8_t pulse_delta_peak; + int16_t pulse_psidx_diff; +}; + +/** + * struct rx_radar_status - Parsed radar status + * @raw_tsf: Raw tsf + * @tsf_offset: TSF offset. + * @rssi: RSSI. + * @pulse_duration: Pulse duration. + * @is_chirp: Is chirp. + * @delta_peak: Delta peak. + * @delta_diff: Delta diff. + * @sidx: Starting frequency. + * @freq_offset: Frequency offset. + * @agc_total_gain: AGC total gain. + * @agc_mb_gain: AGC MB gain. + */ +struct rx_radar_status { + uint32_t raw_tsf; + uint32_t tsf_offset; + int rssi; + int pulse_duration; + int is_chirp:1; + int delta_peak; + int delta_diff; + int sidx; + int freq_offset; /* in KHz */ + int agc_total_gain; + int agc_mb_gain; +}; + +/** + * struct rx_search_fft_report - FFT report. + * @total_gain_db: Total gain in Db. + * @base_pwr_db: Base power in Db. + * @fft_chn_idx: FFT channel index. + * @peak_sidx: Peak sidx. + * @relpwr_db: Real power in Db. + * @avgpwr_db: Average power in Db. + * @peak_mag: Peak Mag. + * @num_str_bins_ib: Num dtr BINs IB + * @seg_id: Segment ID + */ +struct rx_search_fft_report { + uint32_t total_gain_db; + uint32_t base_pwr_db; + int fft_chn_idx; + int peak_sidx; + int relpwr_db; + int avgpwr_db; + int peak_mag; + int num_str_bins_ib; + int seg_id; +}; + +/** + * dfs_process_radarevent() - process the radar event generated for a pulse. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * + * There is currently no way to specify that a radar event has occurred on + * a specific channel, so the current methodology is to mark both the pri + * and ext channels as being unavailable. This should be fixed for 802.11ac + * or we'll quickly run out of valid channels to use. + * + * If Radar found, this marks the channel (and the extension channel, if HT40) + * as having seen a radar event. It marks CHAN_INTERFERENCE and will add it to + * the local NOL implementation. This is only done for 'usenol=1', as the other + * two modes don't do radar notification or CAC/CSA/NOL; it just notes there + * was a radar. + */ +void dfs_process_radarevent(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +/** + * dfs_nol_addchan() - Add channel to NOL. + * @dfs: Pointer to wlan_dfs structure. + * @freq: frequency to add to NOL. + * @dfs_nol_timeout: NOL timeout. + */ +void dfs_nol_addchan(struct wlan_dfs *dfs, + uint16_t freq, + uint32_t dfs_nol_timeout); + +/** + * dfs_get_nol() - Get NOL. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_nol: Pointer to dfsreq_nolelem structure to save the channels from NOL. + * @nchan: Number of channels. + */ +void dfs_get_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int *nchan); + +/** + * dfs_set_nol() - Set NOL. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_nol: Pointer to dfsreq_nolelem structure. + * @nchan: Number of channels. + */ +void dfs_set_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int nchan); + +/** + * dfs_nol_update() - NOL update + * @dfs: Pointer to wlan_dfs structure. + * + * Notify the driver/umac that it should update the channel radar/NOL flags + * based on the current NOL list. + */ +void dfs_nol_update(struct wlan_dfs *dfs); + +/** + * dfs_nol_timer_cleanup() - NOL timer cleanup. + * @dfs: Pointer to wlan_dfs structure. + * + * Cancels the NOL timer and frees the NOL elements. + */ +void dfs_nol_timer_cleanup(struct wlan_dfs *dfs); + +/** + * dfs_nol_timer_detach() - Free NOL timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_timer_detach(struct wlan_dfs *dfs); + +/** + * dfs_nol_workqueue_cleanup() - Flushes NOL workqueue. + * @dfs: Pointer to wlan_dfs structure. + * + * Flushes the NOL workqueue. + */ +void dfs_nol_workqueue_cleanup(struct wlan_dfs *dfs); + +/** + * dfs_retain_bin5_burst_pattern() - Retain the BIN5 burst pattern. + * @dfs: Pointer to wlan_dfs structure. + * @diff_ts: Timestamp diff. + * @old_dur: Old duration. + */ +uint8_t dfs_retain_bin5_burst_pattern(struct wlan_dfs *dfs, + uint32_t diff_ts, + uint8_t old_dur); + +/** + * dfs_bin5_check_pulse() - BIN5 check pulse. + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to dfs_event structure. + * @br: Pointer to dfs_bin5radars structure. + * + * Reject the pulse if: + * 1) It's outside the RSSI threshold; + * 2) It's outside the pulse duration; + * 3) It's been verified by HW/SW chirp checking + * and neither of those found a chirp. + */ +int dfs_bin5_check_pulse(struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_bin5radars *br); + +/** + * dfs_bin5_addpulse() - BIN5 add pulse. + * @dfs: Pointer to wlan_dfs structure. + * @br: Pointer to dfs_bin5radars structure. + * @re: Pointer to dfs_event structure. + * @thists: Timestamp. + */ +int dfs_bin5_addpulse(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + struct dfs_event *re, + uint64_t thists); + +/** + * dfs_bin5_check() - BIN5 check. + * @dfs: Pointer to wlan_dfs structure. + * + * If the dfs structure is NULL (which should be illegal if everyting is working + * properly, then signify that a bin5 radar was found. + */ +int dfs_bin5_check(struct wlan_dfs *dfs); + +/** + * dfs_check_chirping() - Check chirping. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf length + * @is_ctl: detected on primary channel. + * @is_ext: detected on extension channel. + * @slope: Slope + * @is_dc: DC found + * + * This examines the FFT data contained in the PHY error information to figure + * out whether the pulse is moving across frequencies. + */ +int dfs_check_chirping(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc); + +/** + * dfs_get_random_bin5_dur() - Get random BIN5 duration. + * @dfs: Pointer to wlan_dfs structure. + * @tstamp: Timestamp. + * + * Chirping pulses may get cut off at DC and report lower durations. + * This function will compute a suitable random duration for each pulse. + * Duration must be between 50 and 100 us, but remember that in + * wlan_process_phyerr() which calls this function, we are dealing with the + * HW reported duration (unconverted). dfs_process_radarevent() will + * actually convert the duration into the correct value. + * This function doesn't take into account whether the hardware + * is operating in 5GHz fast clock mode or not. + * And this function doesn't take into account whether the hardware + * is peregrine or not. + */ +int dfs_get_random_bin5_dur(struct wlan_dfs *dfs, + uint64_t tstamp); + +/** + * dfs_print_delayline() - Prints delayline. + * @dfs: Pointer to wlan_dfs structure. + * @dl: Pointer to dfs_delayline structure. + */ +void dfs_print_delayline(struct wlan_dfs *dfs, + struct dfs_delayline *dl); + +/** + * dfs_print_nol() - Print NOL elements. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_print_nol(struct wlan_dfs *dfs); + +/** + * dfs_print_filter() - Prints the filter. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + */ +void dfs_print_filter(struct wlan_dfs *dfs, + struct dfs_filter *rf); + +/** + * dfs_getchanstate() - Get chan state. + * @dfs: Pointer to wlan_dfs structure. + * @index: To save the index of dfs_radar[] + * @ext_chan_flag: Extension channel flag; + */ +struct dfs_state *dfs_getchanstate(struct wlan_dfs *dfs, + uint8_t *index, + int ext_ch_flag); + +/** + * dfs_round() - DFS found. + * @val: Convert durations to TSF ticks. + * + * Return: TSF ticks. + */ +uint32_t dfs_round(int32_t val); + +/** + * dfs_reset_alldelaylines() - Reset alldelaylines. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_reset_alldelaylines(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_alldelaylines(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_delayline() - Clear only a single delay line. + * @dl: Pointer to dfs_delayline structure. + */ +void dfs_reset_delayline(struct dfs_delayline *dl); + +/** + * dfs_reset_filter_delaylines() - Reset filter delaylines. + * @dft: Pointer to dfs_filtertype structure. + */ +void dfs_reset_filter_delaylines(struct dfs_filtertype *dft); + +/** + * dfs_reset_radarq() - Reset radar queue. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_reset_radarq(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_radarq(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_add_pulse() - Adds pulse to the queue. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @re: Pointer to dfs_event structure. + * @deltaT: deltaT value. + * @this_ts: Last time stamp. + */ +void dfs_add_pulse(struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_event *re, + uint32_t deltaT, + uint64_t this_ts); + +/** + * dfs_bin_check() - BIN check + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @deltaT: deltaT value. + * @width: Width + * @ext_chan_flag: Extension channel flag. + */ +int dfs_bin_check(struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t deltaT, + uint32_t dur, + int ext_chan_flag); + +/** + * dfs_bin_pri_check() - BIN PRI check + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dl: Pointer to dfs_delayline structure. + * @score: Primary score. + * @refpri: Current "filter" time for start of pulse in usecs. + * @refdur: Duration value. + * @ext_chan_flag: Extension channel flag. + * @fundamentalpri: Highest PRI. + */ +int dfs_bin_pri_check(struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_delayline *dl, + uint32_t score, + uint32_t refpri, + uint32_t refdur, + int ext_chan_flag, + int fundamentalpri); + +/** + * dfs_staggered_check() - Detection implementation for staggered PRIs. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @deltaT: Delta of the Timestamp. + * @width: Duration of radar pulse. + * + * Return: 1 on success and 0 on failure. + */ +int dfs_staggered_check(struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t deltaT, + uint32_t width); + +/** + * dfs_get_pri_margin() - Get Primary margin. + * @dfs: Pointer to wlan_dfs structure. + * @is_extchan_detect: Extension channel detect. + * @is_fixed_pattern: Fixed pattern. + * + * For the extension channel, if legacy traffic is present, we see a lot of + * false alarms, so make the PRI margin narrower depending on the busy % for + * the extension channel. + * + * Return: Returns pri_margin. + */ +int dfs_get_pri_margin(struct wlan_dfs *dfs, + int is_extchan_detect, + int is_fixed_pattern); + +/** + * dfs_get_filter_threshold() - Get filter threshold. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @is_extchan_detect: Extension channel detect. + * + * For the extension channel, if legacy traffic is present, we see a lot of + * false alarms, so make the thresholds higher depending on the busy % for the + * extension channel. + * + * Return: Returns threshold. + */ +int dfs_get_filter_threshold(struct wlan_dfs *dfs, + struct dfs_filter *rf, + int is_extchan_detect); + +#if defined(QCA_MCL_DFS_SUPPORT) +/** + * dfs_process_ar_event() - Process the ar event. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel structure. + */ +static inline void dfs_process_ar_event(struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ +} + +/** + * dfs_reset_ar() - resets the ar state. + * @dfs: pointer to wlan_dfs structure. + */ +static inline void dfs_reset_ar(struct wlan_dfs *dfs) +{ +} + +/** + * dfs_reset_arq() - resets the ar queue. + * @dfs: pointer to wlan_dfs structure. + */ +static inline void dfs_reset_arq(struct wlan_dfs *dfs) +{ +} + +#else +void dfs_process_ar_event(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +void dfs_reset_ar(struct wlan_dfs *dfs); +void dfs_reset_arq(struct wlan_dfs *dfs); +#endif + +/** + * dfs_is_radar_enabled() - check if radar detection is enabled. + * @dfs: Pointer to wlan_dfs structure. + * @ignore_dfs: if 1 then radar detection is disabled.. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_is_radar_enabled(struct wlan_dfs *dfs, + int *ignore_dfs); +#else +static inline void dfs_is_radar_enabled(struct wlan_dfs *dfs, + int *ignore_dfs) +{ +} +#endif + +/** + * dfs_process_phyerr_bb_tlv() - Parses the PHY error and populates the + * dfs_phy_err struct. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_bb_tlv(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/** + * dfs_reset() - DFS reset + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_reset(struct wlan_dfs *dfs); + +/** + * dfs_radar_enable() - Enables the radar. + * @dfs: Pointer to wlan_dfs structure. + * @no_cac: If no_cac is 0, it cancels the CAC. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_radar_enable(struct wlan_dfs *dfs, + int no_cac, uint32_t opmode); +#else +static inline void dfs_radar_enable(struct wlan_dfs *dfs, + int no_cac, uint32_t opmode) +{ +} +#endif + +/** + * dfs_process_phyerr() - Process phyerr. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @datalen: phyerr buffer length. + * @r_rssi: RSSI. + * @r_ext_rssi: Extension channel RSSI. + * @r_rs_tstamp: Timestamp. + * @r_fulltsf: TSF64. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_process_phyerr(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf); +#else +static inline void dfs_process_phyerr(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf) +{ +} +#endif + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * dfs_process_phyerr_filter_offload() - Process radar event. + * @dfs: Pointer to wlan_dfs structure. + * @wlan_radar_event: Pointer to radar_event_info structure. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_process_phyerr_filter_offload(struct wlan_dfs *dfs, + struct radar_event_info *wlan_radar_event); +#else +static inline void dfs_process_phyerr_filter_offload( + struct wlan_dfs *dfs, + struct radar_event_info *wlan_radar_event) +{ +} +#endif +#endif + +/** + * dfs_get_radars() - Based on the chipset, calls init radar table functions. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_get_radars(struct wlan_dfs *dfs); +#else +static inline void dfs_get_radars(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_attach() - Wrapper function to allocate memory for wlan_dfs members. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_attach(struct wlan_dfs *dfs); + + +/** + * dfs_create_object() - Creates DFS object. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_create_object(struct wlan_dfs **dfs); + +/** + * dfs_destroy_object() - Destroys the DFS object. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_destroy_object(struct wlan_dfs *dfs); + +/** + * dfs_detach() - Wrapper function to free dfs variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_detach(struct wlan_dfs *dfs); + +/** + * dfs_cac_valid_reset() - Cancels the dfs_cac_valid_timer timer. + * @dfs: Pointer to wlan_dfs structure. + * @prevchan_ieee: Prevchan number. + * @prevchan_flags: Prevchan flags. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_cac_valid_reset(struct wlan_dfs *dfs, + uint8_t prevchan_ieee, + uint32_t prevchan_flags); +#endif + +/** + * dfs_cac_valid_reset_for_freq() - Cancels the dfs_cac_valid_timer timer. + * @dfs: Pointer to wlan_dfs structure. + * @prevchan_chan: Prevchan frequency + * @prevchan_flags: Prevchan flags. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_cac_valid_reset_for_freq(struct wlan_dfs *dfs, + uint16_t prevchan_freq, + uint32_t prevchan_flags); +#endif + +/** + * dfs_cac_stop() - Clear the AP CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_stop(struct wlan_dfs *dfs); + +/** + * dfs_cancel_cac_timer() - Cancels the CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cancel_cac_timer(struct wlan_dfs *dfs); + +/** + * dfs_start_cac_timer() - Starts the CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_start_cac_timer(struct wlan_dfs *dfs); + +/** + * dfs_set_update_nol_flag() - Sets update_nol flag. + * @dfs: Pointer to wlan_dfs structure. + * @val: update_nol flag. + */ +void dfs_set_update_nol_flag(struct wlan_dfs *dfs, + bool val); + +/** + * dfs_get_update_nol_flag() - Returns update_nol flag. + * @dfs: Pointer to wlan_dfs structure. + */ +bool dfs_get_update_nol_flag(struct wlan_dfs *dfs); + +/** + * dfs_get_use_nol() - Get usenol. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_get_use_nol(struct wlan_dfs *dfs); + +/** + * dfs_get_nol_timeout() - Get NOL timeout. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_get_nol_timeout(struct wlan_dfs *dfs); + +/** + * dfs_is_ap_cac_timer_running() - Returns the dfs cac timer. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_is_ap_cac_timer_running(struct wlan_dfs *dfs); + +/** + * dfs_control()- Used to process ioctls related to DFS. + * @dfs: Pointer to wlan_dfs structure. + * @id: Command type. + * @indata: Input buffer. + * @insize: size of the input buffer. + * @outdata: A buffer for the results. + * @outsize: Size of the output buffer. + */ +int dfs_control(struct wlan_dfs *dfs, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize); + +/** + * dfs_getnol() - Wrapper function for dfs_get_nol() + * @dfs: Pointer to wlan_dfs structure. + * @dfs_nolinfo: Pointer to dfsreq_nolinfo structure. + */ +void dfs_getnol(struct wlan_dfs *dfs, + void *dfs_nolinfo); + +/** + * dfs_get_override_cac_timeout() - Get override CAC timeout value. + * @dfs: Pointer to DFS object. + * @cac_timeout: Pointer to save the CAC timeout value. + */ +int dfs_get_override_cac_timeout(struct wlan_dfs *dfs, + int *cac_timeout); + +/** + * dfs_override_cac_timeout() - Override the default CAC timeout. + * @dfs: Pointer to DFS object. + * @cac_timeout: CAC timeout value. + */ +int dfs_override_cac_timeout(struct wlan_dfs *dfs, + int cac_timeout); + +/** + * dfs_clear_nolhistory() - unmarks WLAN_CHAN_CLR_HISTORY_RADAR flag for + * all the channels in dfs_ch_channels. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_clear_nolhistory(struct wlan_dfs *dfs); + +/** + * ol_if_dfs_configure() - Initialize the RADAR table for offload chipsets. + * @dfs: Pointer to wlan_dfs structure. + * + * This is called during a channel change or regulatory domain + * reset; in order to fetch the new configuration information and + * program the DFS pattern matching module. + * + * Eventually this should be split into "fetch config" (which can + * happen at regdomain selection time) and "configure DFS" (which + * can happen at channel config time) so as to minimise overheads + * when doing channel changes. However, this'll do for now. + */ +void ol_if_dfs_configure(struct wlan_dfs *dfs); + +/** + * dfs_init_radar_filters() - Init Radar filters. + * @dfs: Pointer to wlan_dfs structure. + * @radar_info: Pointer to wlan_dfs_radar_tab_info structure. + */ +int dfs_init_radar_filters(struct wlan_dfs *dfs, + struct wlan_dfs_radar_tab_info *radar_info); + +/** + * dfs_get_radars_for_ar5212() - Initialize radar table for AR5212 chipsets. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_get_radars_for_ar5212(struct wlan_dfs *dfs); + +/** + * dfs_get_radars_for_ar5416() - Initialize radar table for AR5416 chipsets. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_get_radars_for_ar5416(struct wlan_dfs *dfs); + +/** + * dfs_get_radars_for_ar9300() - Initialize radar table for AR9300 chipsets. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_get_radars_for_ar9300(struct wlan_dfs *dfs); + +/** + * dfs_print_filters() - Print the filters. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_print_filters(struct wlan_dfs *dfs); + +/** + * dfs_clear_stats() - Clear stats. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_clear_stats(struct wlan_dfs *dfs); + +/** + * dfs_radar_disable() - Disables the radar. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_radar_disable(struct wlan_dfs *dfs); +#else +static inline int dfs_radar_disable(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_get_debug_info() - Get debug info. + * @dfs: Pointer to wlan_dfs structure. + * @data: void pointer to the data to save dfs_proc_phyerr. + */ +int dfs_get_debug_info(struct wlan_dfs *dfs, + void *data); + +/** + * dfs_cac_timer_attach() - Initialize cac timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_timer_attach(struct wlan_dfs *dfs); + +/** + * dfs_cac_timer_reset() - Cancel dfs cac timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_timer_reset(struct wlan_dfs *dfs); + +/** + * dfs_cac_timer_detach() - Free dfs cac timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_timer_detach(struct wlan_dfs *dfs); + +/** + * dfs_nol_timer_init() - Initialize NOL timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_timer_init(struct wlan_dfs *dfs); + +/** + * dfs_nol_attach() - Initialize NOL variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_attach(struct wlan_dfs *dfs); + +/** + * dfs_nol_detach() - Detach NOL variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_detach(struct wlan_dfs *dfs); + +/** + * dfs_print_nolhistory() - Print NOL history. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_print_nolhistory(struct wlan_dfs *dfs); + +/** + * dfs_stacac_stop() - Clear the STA CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_stacac_stop(struct wlan_dfs *dfs); + +/** + * dfs_find_precac_secondary_vht80_chan() - Get a VHT80 channel with the + * precac primary center frequency. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Pointer to dfs channel structure. + */ +void dfs_find_precac_secondary_vht80_chan(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * dfs_precac_csa() - Automatically switch the channel to the DFS channel + * on which PreCAC was completed without finding a RADAR. + * Use CSA with TBTT_COUNT to switch the channel. + * @dfs: Pointer to dfs handler. + * + * Return: Void + */ +void dfs_precac_csa(struct wlan_dfs *dfs); +#endif + +/** + * dfs_phyerr_param_copy() - Function to copy src buf to dest buf. + * @dst: dest buf. + * @src: src buf. + */ +void dfs_phyerr_param_copy(struct wlan_dfs_phyerr_param *dst, + struct wlan_dfs_phyerr_param *src); + +/** + * dfs_get_thresholds() - Get the threshold value. + * @dfs: Pointer to wlan_dfs structure. + * @param: Pointer to wlan_dfs_phyerr_param structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_get_thresholds(struct wlan_dfs *dfs, + struct wlan_dfs_phyerr_param *param); +#else +static inline int dfs_get_thresholds(struct wlan_dfs *dfs, + struct wlan_dfs_phyerr_param *param) +{ + return 0; +} +#endif + +/** + * dfs_set_thresholds() - Sets the threshold value. + * @dfs: Pointer to wlan_dfs structure. + * @threshtype: DFS ioctl param type. + * @value: Threshold value. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_set_thresholds(struct wlan_dfs *dfs, + const uint32_t threshtype, + const uint32_t value); +#else +static inline int dfs_set_thresholds(struct wlan_dfs *dfs, + const uint32_t threshtype, + const uint32_t value) +{ + return 0; +} +#endif + +/** + * dfs_check_intersect_excl() - Check whether curfreq falls within lower_freq + * and upper_freq, exclusively. + * @low_freq : lower bound frequency value. + * @high_freq: upper bound frequency value. + * @chan_freq: Current frequency value to be checked. + * + * Return: returns true if overlap found, else returns false. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +bool dfs_check_intersect_excl(int low_freq, int high_freq, int chan_freq); +#else +static inline bool dfs_check_intersect_excl(int low_freq, int high_freq, + int chan_freq) +{ + return false; +} +#endif + +/** + * dfs_check_etsi_overlap() - Check whether given frequency centre/channel + * width entry overlap with frequency spread in any way. + * @center_freq : current channel centre frequency. + * @chan_width : current channel width. + * @en302_502_freq_low : overlap frequency lower bound. + * @en302_502_freq_high : overlap frequency upper bound. + * + * Return: returns 1 if overlap found, else returns 0. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_check_etsi_overlap(int center_freq, int chan_width, + int en302_502_freq_low, int en302_502_freq_high); +#else +static inline int dfs_check_etsi_overlap(int center_freq, int chan_width, + int en302_502_freq_low, + int en302_502_freq_high) +{ + return 0; +} +#endif + +/** + * dfs_is_en302_502_applicable() - Check whether current channel frequecy spread + * overlaps with EN 302 502 radar type + * frequency range. + *@dfs: Pointer to wlan_dfs structure. + * + * Return: returns true if overlap found, else returns false. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +bool dfs_is_en302_502_applicable(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_en302_502_applicable(struct wlan_dfs *dfs) +{ + return false; +} +#endif + +/** + * dfs_set_current_channel() - Set DFS current channel. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency1. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency2. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_set_current_channel(struct wlan_dfs *dfs, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +/** + * dfs_set_current_channel_for_freq() - Set DFS current channel. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_chan_freq: Frequency in Mhz. + * @dfs_chan_flags: Channel flags. + * @dfs_chan_flagext: Extended channel flags. + * @dfs_chan_ieee: IEEE channel number. + * @dfs_chan_vhtop_freq_seg1: Channel Center frequency1. + * @dfs_chan_vhtop_freq_seg2: Channel Center frequency2. + * @dfs_chan_mhz_freq_seg1: Channel center frequency of primary segment in MHZ. + * @dfs_chan_mhz_freq_seg2: Channel center frequency of secondary segment in MHZ + * applicable only for 80+80MHZ mode of operation. + */ +void dfs_set_current_channel_for_freq(struct wlan_dfs *dfs, + uint16_t dfs_chan_freq, + uint64_t dfs_chan_flags, + uint16_t dfs_chan_flagext, + uint8_t dfs_chan_ieee, + uint8_t dfs_chan_vhtop_freq_seg1, + uint8_t dfs_chan_vhtop_freq_seg2, + uint16_t dfs_chan_mhz_freq_seg1, + uint16_t dfs_chan_mhz_freq_seg2); + +#endif +/** + * dfs_get_nol_chfreq_and_chwidth() - Get channel freq and width from NOL list. + * @dfs_nol: Pointer to NOL channel entry. + * @nol_chfreq: Pointer to save channel frequency. + * @nol_chwidth: Pointer to save channel width. + * @index: Index to dfs_nol list. + */ +void dfs_get_nol_chfreq_and_chwidth(struct dfsreq_nolelem *dfs_nol, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index); + +/** + * dfs_process_phyerr_owl() - Process an Owl-style phy error. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_owl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/** + * dfs_process_phyerr_sowl() -Process a Sowl/Howl style phy error. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_sowl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/** + * dfs_process_phyerr_merlin() - Process a Merlin/Osprey style phy error. + * dfs_phy_err struct. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_merlin(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/* + * __dfs_process_radarevent() - Continuation of process a radar event function. + * @dfs: Pointer to wlan_dfs structure. + * @ft: Pointer to dfs_filtertype structure. + * @re: Pointer to dfs_event structure. + * @this_ts: Timestamp. + * + * There is currently no way to specify that a radar event has occurred on + * a specific channel, so the current methodology is to mark both the pri + * and ext channels as being unavailable. This should be fixed for 802.11ac + * or we'll quickly run out of valid channels to use. + * + * Return: If a radar event is found, return 1. Otherwise, return 0. + */ +void __dfs_process_radarevent(struct wlan_dfs *dfs, + struct dfs_filtertype *ft, + struct dfs_event *re, + uint64_t this_ts, + int *found, + int *false_radar_found); + +/** + * dfs_radar_found_action() - Radar found action + * @dfs: Pointer to wlan_dfs structure. + * @bangradar: true if radar is due to bangradar command. + * @seg_id: Segment id. + */ +void dfs_radar_found_action(struct wlan_dfs *dfs, + bool bangradar, + uint8_t seg_id); + +/** + * bin5_rules_check_internal() - This is a extension of dfs_bin5_check(). + * @dfs: Pointer to wlan_dfs structure. + * @br: Pointer to dfs_bin5radars structure. + * @bursts: Bursts. + * @numevents: Number of events. + * @prev: prev index. + * @i: Index. + * @this: index to br_elems[] + */ +void bin5_rules_check_internal(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + uint32_t *bursts, + uint32_t *numevents, + uint32_t prev, + uint32_t i, + uint32_t this, + int *index); + +/** + * dfs_main_task_testtimer_init() - Initialize dfs task testtimer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_main_task_testtimer_init(struct wlan_dfs *dfs); + +/** + * dfs_stop() - Clear dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_stop(struct wlan_dfs *dfs); + +/** + * dfs_update_cur_chan_flags() - Update DFS channel flag and flagext. + * @dfs: Pointer to wlan_dfs structure. + * @flags: New channel flags + * @flagext: New Extended flags + */ +void dfs_update_cur_chan_flags(struct wlan_dfs *dfs, + uint64_t flags, + uint16_t flagext); + +/** + * dfs_radarevent_basic_sanity() - Check basic sanity of the radar event + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * + * Return: If a radar event found on NON-DFS channel return 0. Otherwise, + * return 1. + */ +int dfs_radarevent_basic_sanity(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +/** + * wlan_psoc_get_dfs_txops() - Get dfs_tx_ops pointer + * @psoc: Pointer to psoc structure. + * + * Return: Pointer to dfs_tx_ops. + */ +struct wlan_lmac_if_dfs_tx_ops * +wlan_psoc_get_dfs_txops(struct wlan_objmgr_psoc *psoc); + +/** + * dfs_nol_free_list() - Free NOL elements. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_free_list(struct wlan_dfs *dfs); + +/** + * dfs_second_segment_radar_disable() - Disables the second segment radar. + * @dfs: Pointer to wlan_dfs structure. + * + * This is called when AP detects the radar, to (potentially) disable + * the radar code. + * + * Return: returns 0. + */ +int dfs_second_segment_radar_disable(struct wlan_dfs *dfs); + +/** + * dfs_fetch_nol_ie_info() - Fill NOL information to be sent with RCSA. + * @dfs - Pointer to wlan_dfs structure. + * @nol_ie_bandwidth - Minimum subchannel bandwidth. + * @nol_ie_startfreq - Radar affected channel list's first subchannel's + * - centre frequency. + * @nol_ie_bitmap - NOL bitmap denoting affected subchannels. + */ +void dfs_fetch_nol_ie_info(struct wlan_dfs *dfs, uint8_t *nol_ie_bandwidth, + uint16_t *nol_ie_startfreq, uint8_t *nol_ie_bitmap); + +/** + * dfs_set_rcsa_flags() - Set flags that are required for sending RCSA and + * NOL IE. + * @dfs: Pointer to wlan_dfs structure. + * @is_rcsa_ie_sent: Boolean to check if RCSA IE should be sent or not. + * @is_nol_ie_sent: Boolean to check if NOL IE should be sent or not. + */ +void dfs_set_rcsa_flags(struct wlan_dfs *dfs, bool is_rcsa_ie_sent, + bool is_nol_ie_sent); + +/** + * dfs_get_rcsa_flags() - Get flags that are required for sending RCSA and + * NOL IE. + * @dfs: Pointer to wlan_dfs structure. + * @is_rcsa_ie_sent: Boolean to check if RCSA IE should be sent or not. + * @is_nol_ie_sent: Boolean to check if NOL IE should be sent or not. + */ +void dfs_get_rcsa_flags(struct wlan_dfs *dfs, bool *is_rcsa_ie_sent, + bool *is_nol_ie_sent); + +/** + * dfs_process_nol_ie_bitmap() - Update NOL with external radar information. + * @dfs - Pointer to wlan_dfs structure. + * @nol_ie_bandwidth - Minimum subchannel bandwidth. + * @nol_ie_starfreq - Radar affected channel list's first subchannel's + * - centre frequency. + * @nol_ie_bitmap - Bitmap denoting radar affected subchannels. + * + * Return: True if NOL IE should be propagated, else false. + */ +bool dfs_process_nol_ie_bitmap(struct wlan_dfs *dfs, uint8_t nol_ie_bandwidth, + uint16_t nol_ie_startfreq, + uint8_t nol_ie_bitmap); + +/** + * dfs_is_cac_required() - Check if DFS CAC is required for the current channel. + * @dfs: Pointer to wlan_dfs structure. + * @cur_chan: Pointer to current channel of dfs_channel structure. + * @prev_chan: Pointer to previous channel of dfs_channel structure. + * @continue_current_cac: If AP can start CAC then this variable indicates + * whether to continue with the current CAC or restart the CAC. This variable + * is valid only if this function returns true. + * + * Return: true if AP requires CAC or can continue current CAC, else false. + */ +bool dfs_is_cac_required(struct wlan_dfs *dfs, + struct dfs_channel *cur_chan, + struct dfs_channel *prev_chan, + bool *continue_current_cac); + +/** + * dfs_task_testtimer_reset() - stop dfs test timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_task_testtimer_reset(struct wlan_dfs *dfs); + +/** + * dfs_is_freq_in_nol() - check if given channel in nol list + * @dfs: Pointer to wlan_dfs structure + * @freq: channel frequency + * + * check if given channel in nol list. + * + * Return: true if channel in nol, false else + */ +bool dfs_is_freq_in_nol(struct wlan_dfs *dfs, uint32_t freq); + +/** + * dfs_task_testtimer_detach() - Free dfs test timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_task_testtimer_detach(struct wlan_dfs *dfs); + +/** + * dfs_timer_detach() - Free dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_timer_detach(struct wlan_dfs *dfs); + +/** + * dfs_is_disable_radar_marking_set() - Check if radar marking is set on + * NOL chan. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +int dfs_is_disable_radar_marking_set(struct wlan_dfs *dfs, + bool *disable_radar_marking); +#else +static inline int dfs_is_disable_radar_marking_set(struct wlan_dfs *dfs, + bool *disable_radar_marking) +{ + return QDF_STATUS_SUCCESS; +} +#endif +/** + * dfs_get_disable_radar_marking() - Get the value of disable radar marking. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +bool dfs_get_disable_radar_marking(struct wlan_dfs *dfs); +#endif + +/** + * dfs_reset_agile_config() - Reset the ADFS config variables. + * @dfs: Pointer to dfs_soc_priv_obj. + */ +#ifdef QCA_SUPPORT_AGILE_DFS +void dfs_reset_agile_config(struct dfs_soc_priv_obj *dfs_soc); +#endif + +/** + * dfs_reinit_timers() - Reinit timers in DFS. + * @dfs: Pointer to wlan_dfs. + */ +int dfs_reinit_timers(struct wlan_dfs *dfs); + +/** + * dfs_reset_dfs_prevchan() - Reset DFS previous channel structure. + * @dfs: Pointer to wlan_dfs object. + * + * Return: None. + */ +void dfs_reset_dfs_prevchan(struct wlan_dfs *dfs); + +/** + * dfs_init_tmp_psoc_nol() - Init temporary psoc NOL structure. + * @dfs: Pointer to wlan_dfs object. + * @num_radios: Num of radios in the PSOC. + * + * Return: void. + */ +void dfs_init_tmp_psoc_nol(struct wlan_dfs *dfs, uint8_t num_radios); + +/** + * dfs_deinit_tmp_psoc_nol() - De-init temporary psoc NOL structure. + * @dfs: Pointer to wlan_dfs object. + * + * Return: void. + */ +void dfs_deinit_tmp_psoc_nol(struct wlan_dfs *dfs); + +/** + * dfs_save_dfs_nol_in_psoc() - Save NOL data of given pdev. + * @dfs: Pointer to wlan_dfs object. + * @pdev_id: The pdev ID which will have the NOL data. + * @low_5ghz_freq: The low 5GHz frequency value of the target pdev id. + * @high_5ghz_freq: The high 5GHz frequency value of the target pdev id. + * + * Based on the frequency of the NOL channel, copy it to the target pdev_id + * structure in psoc. + * + * Return: void. + */ +void dfs_save_dfs_nol_in_psoc(struct wlan_dfs *dfs, + uint8_t pdev_id, + uint16_t low_5ghz_freq, + uint16_t high_5ghz_freq); + +/** + * dfs_reinit_nol_from_psoc_copy() - Reinit saved NOL data to corresponding + * DFS object. + * @dfs: Pointer to wlan_dfs object. + * @pdev_id: pdev_id of the given dfs object. + * + * Return: void. + */ +void dfs_reinit_nol_from_psoc_copy(struct wlan_dfs *dfs, uint8_t pdev_id); + +/** + * dfs_is_hw_mode_switch_in_progress() - Check if HW mode switch in progress. + * @dfs: Pointer to wlan_dfs object. + * + * Return: True if mode switch is in progress, else false. + */ +bool dfs_is_hw_mode_switch_in_progress(struct wlan_dfs *dfs); + +/** + * dfs_start_mode_switch_defer_timer() - start mode switch defer timer. + * @dfs: Pointer to wlan_dfs object. + * + * Return: void. + */ +void dfs_start_mode_switch_defer_timer(struct wlan_dfs *dfs); + +/** + * dfs_complete_deferred_tasks() - Process mode switch completion event and + * handle deffered tasks. + * @dfs: Pointer to wlan_dfs object. + * + * Return: void. + */ +void dfs_complete_deferred_tasks(struct wlan_dfs *dfs); + +/** + * dfs_process_cac_completion() - Process DFS CAC completion event. + * @dfs: Pointer to wlan_dfs object. + * + * Return: void. + */ +void dfs_process_cac_completion(struct wlan_dfs *dfs); +#endif /* _DFS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_channel.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..cb5a636c2fa04238bcd2f0f74eb09083b1c3eba0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_channel.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has channel related information. + */ + +#ifndef _DFS_CHANNEL_H_ +#define _DFS_CHANNEL_H_ + +/* Channel attributes */ + +/* OFDM channel */ +#define WLAN_CHAN_OFDM 0x0000000000000040 + +/* 2 GHz spectrum channel. */ +#define WLAN_CHAN_2GHZ 0x0000000000000080 + +/* 5 GHz spectrum channel */ +#define WLAN_CHAN_5GHZ 0x0000000000000100 + +/* Radar found on channel */ +#define WLAN_CHAN_DFS_RADAR 0x0000000000001000 + +/* HT 20 channel */ +#define WLAN_CHAN_HT20 0x0000000000010000 + +/* HT 40 with extension channel above */ +#define WLAN_CHAN_HT40PLUS 0x0000000000020000 + +/* HT 40 with extension channel below */ +#define WLAN_CHAN_HT40MINUS 0x0000000000040000 + +/* VHT 20 channel */ +#define WLAN_CHAN_VHT20 0x0000000000100000 + +/* VHT 40 with extension channel above */ +#define WLAN_CHAN_VHT40PLUS 0x0000000000200000 + +/* VHT 40 with extension channel below */ +#define WLAN_CHAN_VHT40MINUS 0x0000000000400000 + +/* VHT 80 channel */ +#define WLAN_CHAN_VHT80 0x0000000000800000 + +/* VHT 160 channel */ +#define WLAN_CHAN_VHT160 0x0000000004000000 + +/* VHT 80_80 channel */ +#define WLAN_CHAN_VHT80_80 0x0000000008000000 + +/* HE 20 channel */ +#define WLAN_CHAN_HE20 0x0000000010000000 + +/* HE 40 with extension channel above */ +#define WLAN_CHAN_HE40PLUS 0x0000000020000000 + +/* HE 40 with extension channel below */ +#define WLAN_CHAN_HE40MINUS 0x0000000040000000 + +/* HE 80 channel */ +#define WLAN_CHAN_HE80 0x0000000200000000 + +/* HE 160 channel */ +#define WLAN_CHAN_HE160 0x0000000400000000 + +/* HE 80_80 channel */ +#define WLAN_CHAN_HE80_80 0x0000000800000000 + +/* flagext */ +#define WLAN_CHAN_DFS_RADAR_FOUND 0x01 + +/* DFS required on channel */ +#define WLAN_CHAN_DFS 0x0002 + +/* DFS required on channel for 2nd band of 80+80*/ +#define WLAN_CHAN_DFS_CFREQ2 0x0004 + +/* if channel has been checked for DFS */ +#define WLAN_CHAN_DFS_CLEAR 0x0008 + +/* DFS radar history for slave device(STA mode) */ +#define WLAN_CHAN_HISTORY_RADAR 0x0100 + +/* DFS CAC valid for slave device(STA mode) */ +#define WLAN_CHAN_CAC_VALID 0x0200 + +#define WLAN_IS_CHAN_2GHZ(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_2GHZ) != 0) + +#define WLAN_IS_CHAN_5GHZ(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_5GHZ) != 0) + +#define WLAN_IS_CHAN_11N_HT40(_c) \ + (((_c)->dfs_ch_flags & (WLAN_CHAN_HT40PLUS | \ + WLAN_CHAN_HT40MINUS)) != 0) + +#define WLAN_IS_CHAN_11N_HT40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_HT40PLUS) != 0) + +#define WLAN_IS_CHAN_11N_HT40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_HT40MINUS) != 0) + +#define WLAN_CHAN_A \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_OFDM) + +#define WLAN_IS_CHAN_A(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_A) == WLAN_CHAN_A) + +#define WLAN_CHAN_11NA_HT20 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HT20) + +#define WLAN_CHAN_11NA_HT40PLUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HT40PLUS) + +#define WLAN_CHAN_11NA_HT40MINUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HT40MINUS) + +#define WLAN_IS_CHAN_11NA_HT20(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11NA_HT20) == \ + WLAN_CHAN_11NA_HT20) + +#define WLAN_IS_CHAN_11NA_HT40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11NA_HT40PLUS) == \ + WLAN_CHAN_11NA_HT40PLUS) + +#define WLAN_IS_CHAN_11NA_HT40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11NA_HT40MINUS) == \ + WLAN_CHAN_11NA_HT40MINUS) + +#define WLAN_CHAN_11AC_VHT20 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT20) + +#define WLAN_CHAN_11AC_VHT40PLUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT40PLUS) + +#define WLAN_CHAN_11AC_VHT40MINUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT40MINUS) + +#define WLAN_CHAN_11AC_VHT80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT80) + +#define WLAN_CHAN_11AC_VHT160 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT160) + +#define WLAN_CHAN_11AC_VHT80_80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT80_80) + +#define WLAN_IS_CHAN_11AC_VHT20(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT20) == \ + WLAN_CHAN_11AC_VHT20) + +#define WLAN_IS_CHAN_11AC_VHT40(_c) \ + (((_c)->dfs_ch_flags & (WLAN_CHAN_VHT40PLUS | \ + WLAN_CHAN_VHT40MINUS)) != 0) + +#define WLAN_IS_CHAN_11AC_VHT40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT40PLUS) == \ + WLAN_CHAN_11AC_VHT40PLUS) + +#define WLAN_IS_CHAN_11AC_VHT40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT40MINUS) == \ + WLAN_CHAN_11AC_VHT40MINUS) + +#define WLAN_IS_CHAN_11AC_VHT80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT80) == \ + WLAN_CHAN_11AC_VHT80) + +#define WLAN_IS_CHAN_11AC_VHT160(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT160) == \ + WLAN_CHAN_11AC_VHT160) + +#define WLAN_IS_CHAN_11AC_VHT80_80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT80_80) == \ + WLAN_CHAN_11AC_VHT80_80) + +#define WLAN_CHAN_11AXA_HE20 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE20) + +#define WLAN_CHAN_11AXA_HE40PLUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE40PLUS) + +#define WLAN_CHAN_11AXA_HE40MINUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE40MINUS) + +#define WLAN_CHAN_11AXA_HE80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE80) + +#define WLAN_CHAN_11AXA_HE160 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE160) + +#define WLAN_CHAN_11AXA_HE80_80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE80_80) + +#define WLAN_IS_CHAN_11AXA_HE20(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE20) == \ + WLAN_CHAN_11AXA_HE20) + +#define WLAN_IS_CHAN_11AXA_HE40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE40PLUS) == \ + WLAN_CHAN_11AXA_HE40PLUS) + +#define WLAN_IS_CHAN_11AXA_HE40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE40MINUS) == \ + WLAN_CHAN_11AXA_HE40MINUS) + +#define WLAN_IS_CHAN_11AXA_HE80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE80) == \ + WLAN_CHAN_11AXA_HE80) + +#define WLAN_IS_CHAN_11AXA_HE160(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE160) == \ + WLAN_CHAN_11AXA_HE160) + +#define WLAN_IS_CHAN_11AXA_HE80_80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE80_80) == \ + WLAN_CHAN_11AXA_HE80_80) + +#define WLAN_IS_CHAN_DFS(_c) \ + (((_c)->dfs_ch_flagext & \ + (WLAN_CHAN_DFS | WLAN_CHAN_DFS_CLEAR)) == WLAN_CHAN_DFS) + +#define WLAN_IS_CHAN_DFS_CFREQ2(_c) \ + (((_c)->dfs_ch_flagext & \ + (WLAN_CHAN_DFS_CFREQ2|WLAN_CHAN_DFS_CLEAR)) == \ + WLAN_CHAN_DFS_CFREQ2) + +#define WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(_c) \ + (WLAN_IS_CHAN_DFS(_c) || \ + ((WLAN_IS_CHAN_11AC_VHT160(_c) || \ + WLAN_IS_CHAN_11AC_VHT80_80(_c) || \ + WLAN_IS_CHAN_11AXA_HE160(_c) || \ + WLAN_IS_CHAN_11AXA_HE80_80(_c)) \ + && WLAN_IS_CHAN_DFS_CFREQ2(_c))) + +#define WLAN_IS_CHAN_RADAR(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_DFS_RADAR) == \ + WLAN_CHAN_DFS_RADAR) + +#define WLAN_IS_CHAN_HISTORY_RADAR(_c) \ + (((_c)->dfs_ch_flagext & WLAN_CHAN_HISTORY_RADAR) == \ + WLAN_CHAN_HISTORY_RADAR) + +#define WLAN_CHAN_CLR_HISTORY_RADAR(_c) \ + ((_c)->dfs_ch_flagext &= ~WLAN_CHAN_HISTORY_RADAR) + +#define WLAN_CHAN_ANY (-1) /* token for ``any channel'' */ + +#define WLAN_CHAN_ANYC \ + ((struct dfs_channel *) WLAN_CHAN_ANY) + +#define WLAN_IS_CHAN_MODE_20(_c) \ + (WLAN_IS_CHAN_A(_c) || \ + WLAN_IS_CHAN_11NA_HT20(_c) || \ + WLAN_IS_CHAN_11AC_VHT20(_c) || \ + WLAN_IS_CHAN_11AXA_HE20(_c)) + +#define WLAN_IS_CHAN_MODE_40(_c) \ + (WLAN_IS_CHAN_11AC_VHT40PLUS(_c) || \ + WLAN_IS_CHAN_11AC_VHT40MINUS(_c) || \ + WLAN_IS_CHAN_11NA_HT40PLUS(_c) || \ + WLAN_IS_CHAN_11NA_HT40MINUS(_c) || \ + WLAN_IS_CHAN_11AXA_HE40PLUS(_c) || \ + WLAN_IS_CHAN_11AXA_HE40MINUS(_c)) + +#define WLAN_IS_CHAN_MODE_80(_c) \ + (WLAN_IS_CHAN_11AC_VHT80(_c) || \ + WLAN_IS_CHAN_11AXA_HE80(_c)) + +#define WLAN_IS_CHAN_MODE_160(_c) \ + (WLAN_IS_CHAN_11AC_VHT160(_c) || \ + WLAN_IS_CHAN_11AXA_HE160(_c)) + +#define WLAN_IS_CHAN_MODE_80_80(_c) \ + (WLAN_IS_CHAN_11AC_VHT80_80(_c) || \ + WLAN_IS_CHAN_11AXA_HE80_80(_c)) + +#endif /* _DFS_CHANNEL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_filter_init.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_filter_init.h new file mode 100644 index 0000000000000000000000000000000000000000..5b86d7a104fb77e5fc2e84fe471abe94a5c2c6b1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_filter_init.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: umac/dfs/core/src/dfs_filter_init.h + * This file contains dfs interfaces + */ + +#ifndef _DFS_FILTER_INIT_H_ +#define _DFS_FILTER_INIT_H_ + +/** + * dfs_main_attach() - Allocates memory for wlan_dfs members. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_main_attach(struct wlan_dfs *dfs); +#else +static inline int dfs_main_attach(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_main_detach() - Free dfs variables. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_main_detach(struct wlan_dfs *dfs); +#else +static inline void dfs_main_detach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_start_host_based_bangradar() - Mark as bangradar and start + * wlan_dfs_task_timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_start_host_based_bangradar(struct wlan_dfs *dfs); +#else +static inline int dfs_start_host_based_bangradar(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_main_timer_reset() - Stop dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_main_timer_reset(struct wlan_dfs *dfs); +#else +static inline void dfs_main_timer_reset(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_main_timer_detach() - Free dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_main_timer_detach(struct wlan_dfs *dfs); +#else +static inline void dfs_main_timer_detach(struct wlan_dfs *dfs) +{ +} +#endif + +#if defined(DA_SUPPORT) && defined(WLAN_DFS_DIRECT_ATTACH) +void dfs_get_da_radars(struct wlan_dfs *dfs); +#else +static inline void dfs_get_da_radars(struct wlan_dfs *dfs) +{ +} +#endif + +#endif /* _DFS_FILTER_INIT_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_full_offload.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_full_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..de49e3e3adcd8161ae17036f9cd26a487be936aa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_full_offload.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_full_offload.h + * This file contains full offload specific dfs interfaces + */ + +#ifndef _DFS_FULL_OFFLOAD_H_ +#define _DFS_FULL_OFFLOAD_H_ + +#if defined(WLAN_DFS_FULL_OFFLOAD) + +/** + * dfs_fill_emulate_bang_radar_test() - Update dfs unit test arguments and + * send bangradar command to firmware. + * @dfs: Pointer to wlan_dfs structure. + * @segid: Segment Identifier(Primary and Secondary) + * @is_chirp: Boolean to determine if Chirp or Non Chirp. + * @freq_offset: Value of frequency offset from centre frequency. + * @dfs_unit_test: Pointer to Unit test command structure + * + * Return: If the event is received return 0. + */ +int dfs_fill_emulate_bang_radar_test(struct wlan_dfs *dfs, + uint8_t segid, bool is_chirp, int32_t freq_offset, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); + +#else +static inline int dfs_fill_emulate_bang_radar_test(struct wlan_dfs *dfs, + uint8_t segid, bool is_chirp, int32_t freq_offset, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + return 0; +} +#endif +#endif /* _DFS_FULL_OFFLOAD_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_internal.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..33bcb7514375d089b92992edda3f2a0a6d404f3f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_internal.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: The structurs and functions in this file are used only within DFS + * component. + */ + +#ifndef _DFS_INTERNAL_H_ +#define _DFS_INTERNAL_H_ + +#include +#include "dfs.h" + +/** + * enum DFS_DOMAIN - DFS domain + * @DFS_UNINIT_DOMAIN: Uninitialized domain + * @DFS_FCC_DOMAIN: FCC domain + * @DFS_ETSI_DOMAIN: ETSI domain + * @DFS_MKK4_DOMAIN: MKK domain + * @DFS_CN_DOMAIN: China domain + * @DFS_KR_DOMAIN: Korea domain + * @DFS_MKKN_DOMAIN: MKKN domain + * @DFS_UNDEF_DOMAIN: Undefined domain + */ +enum DFS_DOMAIN { + DFS_UNINIT_DOMAIN = 0, + DFS_FCC_DOMAIN = 1, + DFS_ETSI_DOMAIN = 2, + DFS_MKK4_DOMAIN = 3, + DFS_CN_DOMAIN = 4, + DFS_KR_DOMAIN = 5, + DFS_MKKN_DOMAIN = 6, + DFS_UNDEF_DOMAIN +}; + +/* CAPABILITY: the device support STA DFS */ +#define WLAN_CEXT_STADFS 0x00000040 + +/** + * dfs_chan2freq() - Convert channel to frequency value. + * @chan: Pointer to dfs_channel structure. + * + * Return: Channel frequency. + */ +uint16_t dfs_chan2freq(struct dfs_channel *chan); + +#endif /* _DFS_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_ioctl_private.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_ioctl_private.h new file mode 100644 index 0000000000000000000000000000000000000000..5609633be1070dee9b41deddcb0cb337633ea2b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_ioctl_private.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2011, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has dfs param copy functions. + */ + +#ifndef _DFS_IOCTL_PRIVATE_H_ +#define _DFS_IOCTL_PRIVATE_H_ + + +static inline void +wlan_dfs_dfsparam_to_ioctlparam(struct wlan_dfs_phyerr_param *src, + struct dfs_ioctl_params *dst) +{ + dst->dfs_firpwr = src->pe_firpwr; + dst->dfs_rrssi = src->pe_rrssi; + dst->dfs_height = src->pe_height; + dst->dfs_prssi = src->pe_prssi; + dst->dfs_inband = src->pe_inband; + dst->dfs_relpwr = src->pe_relpwr; + dst->dfs_relstep = src->pe_relstep; + dst->dfs_maxlen = src->pe_maxlen; +} + +#endif /* _DFS_IOCTL_PRIVATE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_partial_offload_radar.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_partial_offload_radar.h new file mode 100644 index 0000000000000000000000000000000000000000..4fb8f7ec0dfbb54c6894bead3ccdba70c2a338a5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_partial_offload_radar.h @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_partial_offload_radar.h + * This file contains partial offload specific dfs interfaces + */ + +#ifndef _DFS_PARTIAL_OFFLOAD_RADAR_H_ +#define _DFS_PARTIAL_OFFLOAD_RADAR_H_ + +/** + * dfs_get_po_radars() - Initialize the RADAR table for PO. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_get_po_radars(struct wlan_dfs *dfs); +#else +static inline void dfs_get_po_radars(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_send_avg_params_to_fw - send avg radar parameters to FW. + * @dfs: Pointer to wlan_dfs structure. + * @params: Pointer to dfs_radar_found_params. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_send_avg_params_to_fw(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params); +#else +static inline +void dfs_send_avg_params_to_fw(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params) +{ +} +#endif + +/** + * dfs_host_wait_timer_init() - Initialize dfs host status wait timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_init(struct wlan_dfs *dfs); +#else +static inline void dfs_host_wait_timer_init(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_host_wait_timer_detach() - Free dfs host status wait timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_detach(struct wlan_dfs *dfs); +#else +static inline void dfs_host_wait_timer_detach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_set_override_status_timeout() - Change the dfs host status timeout. + * @dfs: Pointer to wlan_dfs structure. + * @status_timeout: timeout value. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS dfs_set_override_status_timeout(struct wlan_dfs *dfs, + int status_timeout); +#else +static inline QDF_STATUS dfs_set_override_status_timeout(struct wlan_dfs *dfs, + int status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dfs_get_override_status_timeout() - Get the dfs host status timeout value. + * @dfs: Pointer to wlan_dfs structure. + * @status_timeout: Pointer to timeout value. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS dfs_get_override_status_timeout(struct wlan_dfs *dfs, + int *status_timeout); +#else +static inline +QDF_STATUS dfs_get_override_status_timeout(struct wlan_dfs *dfs, + int *status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dfs_radarfound_action_fcc() - The dfs action on radar detection by host for + * FCC domain. + * @dfs: Pointer to wlan_dfs structure. + * @seg_id: segment id. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_radarfound_action_fcc(struct wlan_dfs *dfs, uint8_t seg_id); +#else +static inline void dfs_radarfound_action_fcc(struct wlan_dfs *dfs, + uint8_t seg_id) +{ +} +#endif + +/** + * dfs_host_wait_timer_reset() - Stop dfs host wait timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_reset(struct wlan_dfs *dfs); +#else +static inline void dfs_host_wait_timer_reset(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_remove_spoof_channel_from_nol() - Remove the spoofed radar hit channel + * from NOL. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs); +#else +static inline void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_spoof_test() - reset the spoof test variables. + * @dfs: Pointer to wlan_dfs structure. + * + * Return: None. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_reset_spoof_test(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_spoof_test(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_action_on_fw_radar_status_check() - The dfs action on host dfs + * confirmation by fw. + * @dfs: Pointer to wlan_dfs structure. + * @status: pointer to host dfs status. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_action_on_fw_radar_status_check(struct wlan_dfs *dfs, + uint32_t *status); +#else +static inline void dfs_action_on_fw_radar_status_check(struct wlan_dfs *dfs, + uint32_t *status) +{ +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_false_radarfound_reset_vars(struct wlan_dfs *dfs); +#else +static inline void dfs_false_radarfound_reset_vars(struct wlan_dfs *dfs) +{ +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) +/** + * dfs_allow_hw_pulses() - Set or unset dfs_allow_hw_pulses + * which allow or disallow HW pulses. + * @dfs: Pointer to DFS pdev object. + * @allow_hw_pulses: allow/disallow synthetic pulse detection true/false. + * + * Return: void + */ +void dfs_allow_hw_pulses(struct wlan_dfs *dfs, bool allow_hw_pulses); +#else +static inline void dfs_allow_hw_pulses(struct wlan_dfs *dfs, + bool allow_hw_pulses) +{ +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) +/** + * dfs_is_hw_pulses_allowed() - Check if HW pulses are allowed or not. + * @pdev: Pointer to DFS pdev object. + * + * Return: bool + */ +bool dfs_is_hw_pulses_allowed(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_hw_pulses_allowed(struct wlan_dfs *dfs) +{ + return true; +} +#endif + +/** + * dfs_inject_synthetic_pulse_sequence() - Inject the synthetic pulse to the + * phyerror processing algorithm. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Pointer to buffer of pulses. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) +QDF_STATUS dfs_inject_synthetic_pulse_sequence(struct wlan_dfs *dfs, + unsigned char *buf); +#else +static inline +QDF_STATUS dfs_inject_synthetic_pulse_sequence(struct wlan_dfs *dfs, + unsigned char *buf) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_DFS_PARTIAL_OFFLOAD && WLAN_DFS_SYNTHETIC_RADAR */ +#endif /* _DFS_PARTIAL_OFFLOAD_RADAR_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_phyerr_tlv.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_phyerr_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..0fad819bde94ff3131e571b958b5c0b46cae461a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_phyerr_tlv.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has Radar summary. + */ + +#ifndef _DFS_PHYERR_TLV_H_ +#define _DFS_PHYERR_TLV_H_ + +/* + * Register manipulation macros that expect bit field defines + * to follow the convention that an _S suffix is appended for + * a shift count, while the field mask has no suffix. + */ +#define SM(_v, _f) (((_v) << _f##_S) & _f) +#define MS(_v, _f) (((_v) & _f) >> _f##_S) + +/* The TLV dword is at the beginning of each TLV section. */ +#define TLV_REG 0x00 + +#define TLV_LEN 0x0000FFFF +#define TLV_LEN_S 0 + +#define TLV_SIG 0x00FF0000 +#define TLV_SIG_S 16 + +#define TLV_TAG 0xFF000000 +#define TLV_TAG_S 24 + +#define TAG_ID_SEARCH_FFT_REPORT 0xFB +#define TAG_ID_RADAR_PULSE_SUMMARY 0xF8 + +/* + * Radar pulse summary + * + TYPE=0xF8 (Radar pulse summary reprot) + * + SIG=0xBB (baseband PHY generated TLV components) + */ + +#define RADAR_REPORT_PULSE_REG_1 0x00 + +#define RADAR_REPORT_PULSE_IS_CHIRP 0x80000000 +#define RADAR_REPORT_PULSE_IS_CHIRP_S 31 + +#define RADAR_REPORT_PULSE_IS_MAX_WIDTH 0x40000000 +#define RADAR_REPORT_PULSE_IS_MAX_WIDTH_S 30 + +#define RADAR_REPORT_AGC_TOTAL_GAIN 0x3FF00000 +#define RADAR_REPORT_AGC_TOTAL_GAIN_S 20 + +#define RADAR_REPORT_PULSE_DELTA_DIFF 0x000F0000 +#define RADAR_REPORT_PULSE_DELTA_DIFF_S 16 + +#define RADAR_REPORT_PULSE_DELTA_PEAK 0x0000FC00 +#define RADAR_REPORT_PULSE_DELTA_PEAK_S 10 + +#define RADAR_REPORT_PULSE_SIDX 0x000003FF +#define RADAR_REPORT_PULSE_SIDX_S 0x0 + +#define RADAR_REPORT_PULSE_REG_2 0x01 + +#define RADAR_REPORT_PULSE_SRCH_FFT_A_VALID 0x80000000 +#define RADAR_REPORT_PULSE_SRCH_FFT_A_VALID_S 31 + +#define RADAR_REPORT_PULSE_AGC_MB_GAIN 0x7F000000 +#define RADAR_REPORT_PULSE_AGC_MB_GAIN_S 24 + +#define RADAR_REPORT_PULSE_SUBCHAN_MASK 0x00FF0000 +#define RADAR_REPORT_PULSE_SUBCHAN_MASK_S 16 + +#define RADAR_REPORT_PULSE_TSF_OFFSET 0x0000FF00 +#define RADAR_REPORT_PULSE_TSF_OFFSET_S 8 + +#define RADAR_REPORT_PULSE_DUR 0x000000FF +#define RADAR_REPORT_PULSE_DUR_S 0 + +#define SEARCH_FFT_REPORT_REG_1 0x00 + +#define SEARCH_FFT_REPORT_TOTAL_GAIN_DB 0xFF800000 +#define SEARCH_FFT_REPORT_TOTAL_GAIN_DB_S 23 + +#define SEARCH_FFT_REPORT_BASE_PWR_DB 0x007FC000 +#define SEARCH_FFT_REPORT_BASE_PWR_DB_S 14 + +#define SEARCH_FFT_REPORT_FFT_CHN_IDX 0x00003000 +#define SEARCH_FFT_REPORT_FFT_CHN_IDX_S 12 + +#define SEARCH_FFT_REPORT_PEAK_SIDX 0x00000FFF +#define SEARCH_FFT_REPORT_PEAK_SIDX_S 0 + +#define SEARCH_FFT_REPORT_REG_2 0x01 + +#define SEARCH_FFT_REPORT_RELPWR_DB 0xFC000000 +#define SEARCH_FFT_REPORT_RELPWR_DB_S 26 + +#define SEARCH_FFT_REPORT_AVGPWR_DB 0x03FC0000 +#define SEARCH_FFT_REPORT_AVGPWR_DB_S 18 + +#define SEARCH_FFT_REPORT_PEAK_MAG 0x0003FF00 +#define SEARCH_FFT_REPORT_PEAK_MAG_S 8 + +#define SEARCH_FFT_REPORT_NUM_STR_BINS_IB 0x000000FF +#define SEARCH_FFT_REPORT_NUM_STR_BINS_IB_S 0 + +#define SEARCH_FFT_REPORT_REG_3 0x02 + +#define SEARCH_FFT_REPORT_SEG_ID 0x00000001 +#define SEARCH_FFT_REPORT_SEG_ID_S 0 + +/* + * Although this code is now not parsing the whole frame (descriptor + * and all), the relevant fields are still useful information + * for anyone who is working on the PHY error part of DFS pattern + * matching. + * + * However, to understand _where_ these descriptors start, you + * should do some digging into the peregrine descriptor format. + * The 30 second version: each RX ring has a bitmap listing which + * descriptors are to be included, and then a set of offsets + * into the RX buffer for where each descriptor will be written. + * It's not like the 802.11n generation hardware which has + * a fixed descriptor format. + */ + +/* RX_PPDU_START */ +#define RX_PPDU_START_LEN (10*4) +#define RX_PPDU_START_REG_4 0x0004 +#define RX_PPDU_START_RSSI_COMB 0x000000FF +#define RX_PPDU_START_RSSI_COMB_S 0 + +/* RX_PPDU_END */ +#define RX_PPDU_END_LEN (21*4) +#define RX_PPDU_END_REG_16 16 +#define RX_PPDU_END_TSF_TIMESTAMP 0xFFFFFFFF +#define RX_PPDU_END_TSF_TIMESTAMP_S 0 +#define RX_PPDU_END_REG_18 18 +#define RX_PPDU_END_PHY_ERR_CODE 0x0000FF00 +#define RX_PPDU_END_PHY_ERR_CODE_S 8 +#define RX_PPDU_END_PHY_ERR 0x00010000 +#define RX_PPDU_END_PHY_ERR_S 16 + +/* + * The RSSI values can have "special meanings". + * If rssi=50, it means that the peak detector triggered. + */ +#define RSSI_PEAK_DETECTOR_SAT 50 + +/* + * If rssi=25, it means that the ADC was saturated, but that only is + * valid when there is one ADC gain change. For short pulses this + * is true - you won't have time to do a gain change before the pulse + * goes away. But for longer pulses, ADC gain changes can occur, so + * you'll get a more accurate RSSI figure. + * + * For short pulses (and the definition of "short" still isn't clear + * at the time of writing) there isn't any real time to do a gain change + * (or two, or three..) in order to get an accurate estimation of signal + * sizing. Thus, RSSI will not be very accurate for short duration pulses. + * All you can really say for certain is that yes, there's a pulse that + * met the requirements of the pulse detector. + * + * For more information, see the 802.11ac Microarchitecture guide. + * (TODO: add a twiki reference.) + */ + +#endif /* _DFS_PHYERR_TLV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_process_radar_found_ind.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_process_radar_found_ind.h new file mode 100644 index 0000000000000000000000000000000000000000..d41cc034a9eb3f48ca37f06978b0305148389582 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_process_radar_found_ind.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_process_radar_found_ind.h + * This file provides prototypes of the routines needed for the + * external components to utilize the services provided by the + * DFS component. + */ + +#ifndef _DFS_PROCESS_RADAR_FOUND_IND_H_ +#define _DFS_PROCESS_RADAR_FOUND_IND_H_ + +/* Number of channel marking offsets */ +#define DFS_NUM_FREQ_OFFSET 3 + +/* Lower channel from 20 Mhz center channel */ +#define DFS_20MHZ_LOWER_CHANNEL(_f) ((_f) - 20) +/* Upper channel from 20 Mhz center channel */ +#define DFS_20MHZ_UPPER_CHANNEL(_f) ((_f) + 20) +/* 1st lower channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_FIRST_LOWER_CHANNEL(_f) ((_f) - 10) +/* 2nd lower channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_SECOND_LOWER_CHANNEL(_f) ((_f) - 30) +/* 3rd lower channel from center channel of bandwidth 80/160Mhz */ +#define DFS_THIRD_LOWER_CHANNEL(_f) ((_f) - 50) +/* 1st upper channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_FIRST_UPPER_CHANNEL(_f) ((_f) + 10) +/* 2nd upper channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_SECOND_UPPER_CHANNEL(_f) ((_f) + 30) +/* 3rd upper channel from center channel of bandwidth 80/160Mhz */ +#define DFS_THIRD_UPPER_CHANNEL(_f) ((_f) + 50) + +/* 20 Mhz freq_offset lower */ +#define DFS_20MZ_OFFSET_LOWER (-10) +/* 20 Mhz freq_offset upper */ +#define DFS_20MZ_OFFSET_UPPER (10) +/* 40/80 Mhz freq_offset first lower */ +#define DFS_OFFSET_FIRST_LOWER (-20) +/* 40/80 Mhz freq_offset second lower */ +#define DFS_OFFSET_SECOND_LOWER (-40) +/* 40/80 Mhz freq_offset first upper */ +#define DFS_OFFSET_FIRST_UPPER (20) +/* 40/80 Mhz freq_offset second upper */ +#define DFS_OFFSET_SECOND_UPPER (40) + +/* Frequency offset to sidx */ +#define DFS_FREQ_OFFSET_TO_SIDX(_f) ((32 * (_f)) / 10) +/* Sidx to frequency offset */ +#define DFS_SIDX_TO_FREQ_OFFSET(_s) ((10 * (_s)) / 32) +/* sidx offset boundary */ +#define DFS_BOUNDARY_SIDX 32 +/* freq offset for chirp */ +#define DFS_CHIRP_OFFSET 10 +/* second segment freq offset */ +#define DFS_160MHZ_SECOND_SEG_OFFSET 40 +/*Primary segment id is 0 */ +#define PRIMARY_SEG 0 + +/* Frequency offset indices */ +#define CENTER_CH 0 +#define LEFT_CH 1 +#define RIGHT_CH 2 + +#ifdef CONFIG_CHAN_NUM_API +/* Next channel number offset's from center channel number */ +#define DFS_5GHZ_NEXT_CHAN_OFFSET 2 +#define DFS_5GHZ_2ND_CHAN_OFFSET 6 +#define DFS_5GHZ_3RD_CHAN_OFFSET 10 +#define DFS_5GHZ_4TH_CHAN_OFFSET 14 +#endif + +#ifdef CONFIG_CHAN_FREQ_API +/* Next channel frequency offsets from center channel frequency */ +#define DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET 10 +#define DFS_5GHZ_2ND_CHAN_FREQ_OFFSET 30 +#define DFS_5GHZ_3RD_CHAN_FREQ_OFFSET 50 +#define DFS_5GHZ_4TH_CHAN_FREQ_OFFSET 70 +#endif +/* Max number of bonding channels in 160 MHz segment */ +#define NUM_CHANNELS_160MHZ 8 + +/** + * struct freqs_offsets - frequency and offset information + * @freq: channel frequency in mhz. + * @offset: offset from center frequency. + * + * Index 0 - Center channel affected by RADAR. + * Index 1 - Left of Center channel affected by RADAR. + * Index 2 - Right of Center channel affected by RADAR. + * + * This information is needed to find and mark radar infected + * channels in NOL and regulatory database. + */ +struct freqs_offsets { + uint32_t freq[DFS_NUM_FREQ_OFFSET]; + int32_t offset[DFS_NUM_FREQ_OFFSET]; +}; + +/** + * dfs_process_radar_found_indication() - Process radar found indication + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: radar found info. + * + * Process radar found indication and update radar effected channel in NOL + * and regulatory. + * + * Return: None + */ +void dfs_process_radar_found_indication(struct wlan_dfs *dfs, + struct radar_found_info *radar_found); + +/** + * dfs_process_radar_ind() - Process radar indication event + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: Pointer to radar_found_info structure. + * + * Wrapper function of dfs_process_radar_found_indication(). + * + * Return: QDF_STATUS + */ +QDF_STATUS dfs_process_radar_ind(struct wlan_dfs *dfs, + struct radar_found_info *radar_found); + +/** + * dfs_radarfound_action_generic() - The dfs action on radar detection by host + * for domains other than FCC. + * @dfs: Pointer to wlan_dfs structure. + * @seg_id: segment id. + * + * Return: None + */ +void dfs_radarfound_action_generic(struct wlan_dfs *dfs, uint8_t seg_id); + +/** + * dfs_get_bonding_channels() - Get bonding channels. + * @dfs: Pointer to wlan_dfs structure. + * @curchan: Pointer to dfs_channels to know width and primary channel. + * @segment_id: Segment id, useful for 80+80/160 MHz operating band. + * @detector_id: Detector id, used to find if radar is detected on + * Agile detector. + * @channels: Pointer to save radar affected channels. + * + * Return: Number of channels. + */ +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_get_bonding_channels(struct wlan_dfs *dfs, + struct dfs_channel *curchan, + uint32_t segment_id, + uint8_t detector_id, + uint8_t *channels); +#endif + +/** + * dfs_get_bonding_channels_for_freq() - Get bonding channels. + * @dfs: Pointer to wlan_dfs structure. + * @curchan: Pointer to dfs_channels to know width and primary channel. + * @segment_id: Segment id, useful for 80+80/160 MHz operating band. + * @detector_id: Detector id, used to find if radar is detected on + * Agile detector. + * @freq_list: Pointer to save radar affected channel's frequency. + * + * Return: Number of channels. + */ +#ifdef CONFIG_CHAN_FREQ_API +uint8_t dfs_get_bonding_channels_for_freq(struct wlan_dfs *dfs, + struct dfs_channel *curchan, + uint32_t segment_id, + uint8_t detector_id, + uint16_t *freq_list); +#endif + +/** + * dfs_get_bonding_channels_without_seg_info() - Get bonding channels in chan + * @chan: Pointer to dfs_channel structure. + * @channels: channel array holding list of bonded channels. + * + * Return: number of sub channels in the input channel. + */ +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_get_bonding_channels_without_seg_info(struct dfs_channel *chan, + uint8_t *channels); +#endif + +/** + * dfs_get_bonding_channel_without_seg_info_for_freq() - Get bonding channels + * in chan. + * @chan: Pointer to dfs_channel structure. + * @freq_list: channel array holding list of bonded channel's frequency. + * + * Return: number of sub channels in the input channel. + */ +#ifdef CONFIG_CHAN_FREQ_API +uint8_t +dfs_get_bonding_channel_without_seg_info_for_freq(struct dfs_channel *chan, + uint16_t *freq_list); +#endif + +/** + * dfs_set_nol_subchannel_marking() - Set or unset NOL subchannel marking. + * @dfs: Pointer to wlan_dfs structure. + * @nol_subchannel_marking - Configure NOL subchannel marking. + * + * Return: Status of the configuration. + */ +int +dfs_set_nol_subchannel_marking(struct wlan_dfs *dfs, + bool nol_subchannel_marking); + +/** + * dfs_get_nol_subchannel_marking() - Get the value of NOL subchannel marking. + * @dfs: Pointer to wlan_dfs structure. + * @nol_subchannel_marking - Read and store the value of NOL subchannel marking + * config. + * + * Return: Status of the read. + */ +int +dfs_get_nol_subchannel_marking(struct wlan_dfs *dfs, + bool *nol_subchannel_marking); +#endif /*_DFS_PROCESS_RADAR_FOUND_IND_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_random_chan_sel.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_random_chan_sel.h new file mode 100644 index 0000000000000000000000000000000000000000..bf03f79856fed011045ad761676f03fe36d6408c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_random_chan_sel.h @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* dfs regions definitions */ +/* un-initialized region */ +#define DFS_UNINIT_REGION_VAL 0 + +/* FCC region */ +#define DFS_FCC_REGION_VAL 1 + +/* ETSI region */ +#define DFS_ETSI_REGION_VAL 2 + +/* MKK region */ +#define DFS_MKK_REGION_VAL 3 + +/* China region */ +#define DFS_CN_REGION_VAL 4 + +/* Korea region */ +#define DFS_KR_REGION_VAL 5 + +/* Undefined region */ +#define DFS_UNDEF_REGION_VAL 6 + +/* Channel width definitions */ +/* 20MHz channel width */ +#define DFS_CH_WIDTH_20MHZ 0 + +/* 40MHz channel width */ +#define DFS_CH_WIDTH_40MHZ 1 + +/* 80MHz channel width */ +#define DFS_CH_WIDTH_80MHZ 2 + +/* 160MHz channel width */ +#define DFS_CH_WIDTH_160MHZ 3 + +/* 80+80 non-contiguous */ +#define DFS_CH_WIDTH_80P80MHZ 4 + +/* 5MHz channel width */ +#define DFS_CH_WIDTH_5MHZ 5 + +/* 10MHz channel width */ +#define DFS_CH_WIDTH_10MHZ 6 + +/* Invalid channel width */ +#define DFS_CH_WIDTH_INVALID 7 + +/* Max channel width */ +#define DFS_CH_WIDTH_MAX 8 + +/* Next 5GHz channel number */ +#define DFS_80_NUM_SUB_CHANNEL 4 + +/* Next 5GHz channel freq offset */ +#define DFS_80_NUM_SUB_CHANNEL_FREQ 20 + +/* Next 5GHz channel number */ +#define DFS_NEXT_5GHZ_CHANNEL 4 + +/* Next 5GHz channel number */ +#define DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET 20 + +/* Number of 20MHz channels in bitmap */ +#define DFS_MAX_20M_SUB_CH 8 + +/* Frequency difference between 80+80 MHz */ +#define DFS_80P80M_FREQ_DIFF 40 + +/* Number of 80MHz channels in 5GHz band */ +#define DFS_MAX_80MHZ_BANDS 6 + +/* Start channel and center channel diff in 80Mhz */ +#define DFS_80MHZ_START_CENTER_CH_DIFF 6 + +/* Start channel and center channel freq diff in 80Mhz */ +#define DFS_80MHZ_START_CENTER_CH_FREQ_DIFF 30 + +/* Bitmap mask for 80MHz */ +#define DFS_80MHZ_MASK 0x0F + +/* Bitmap mask for 40MHz lower */ +#define DFS_40MHZ_MASK_L 0x03 + +/* Bitmap mask for 40MHz higher */ +#define DFS_40MHZ_MASK_H 0x0C + +/* Adjacent weather radar channel frequency */ +#define DFS_ADJACENT_WEATHER_RADAR_CHANNEL 5580 + +/* Adjacent weather radar channel number */ +#define DFS_ADJACENT_WEATHER_RADAR_CHANNEL_NUM 116 + +/* Max 2.4 GHz channel number */ +#define DFS_MAX_24GHZ_CHANNEL 14 + +/* Max 2.4 GHz channel frequency */ +#define DFS_MAX_24GHZ_CHANNEL_FREQ 2484 + +/* Adjacent weather radar channel frequency */ +#define DFS_ADJACENT_WEATHER_RADAR_CHANNEL_FREQ 5580 +/* Max valid channel number */ +#define MAX_CHANNEL_NUM 184 + +#ifdef WLAN_ENABLE_CHNL_MATRIX_RESTRICTION +#define DFS_TX_LEAKAGE_THRES 310 +#define DFS_TX_LEAKAGE_MAX 1000 +#define DFS_TX_LEAKAGE_MIN 200 + +/* + * This define is used to block additional channels + * based on the new data gathered on auto platforms + * and to differentiate the leakage data among different + * platforms. + */ + +#define DFS_TX_LEAKAGE_AUTO_MIN 210 +#endif + +#define DFS_IS_CHANNEL_WEATHER_RADAR(_f) (((_f) >= 5600) && ((_f) <= 5650)) +#ifdef CONFIG_CHAN_NUM_API +#define DFS_IS_CHAN_JAPAN_INDOOR(_ch) (((_ch) >= 36) && ((_ch) <= 64)) +#define DFS_IS_CHAN_JAPAN_W53(_ch) (((_ch) >= 52) && ((_ch) <= 64)) +#define DFS_IS_CHAN_JAPAN_OUTDOOR(_ch) (((_ch) >= 100) && ((_ch) <= 140)) +#endif + +#ifdef CONFIG_CHAN_FREQ_API +#define DFS_IS_CHAN_JAPAN_INDOOR_FREQ(_ch)(((_ch) >= 5180) && ((_ch) <= 5320)) +#define DFS_IS_CHAN_JAPAN_OUTDOOR_FREQ(_ch)(((_ch) >= 5500) && ((_ch) <= 5700)) +#define DFS_IS_CHAN_JAPAN_W53_FREQ(_ch) (((_ch) >= 5260) && ((_ch) <= 5320)) +#endif + +/** + * struct chan_bonding_info - for holding channel bonding bitmap + * @chan_map: channel map + * @rsvd: reserved + * @start_chan: start channel + * @start_chan_freq: start channel frequency in MHZ. + */ +struct chan_bonding_info { + uint8_t chan_map:4; + uint8_t rsvd:4; + uint8_t start_chan; + uint16_t start_chan_freq; +}; + +/** + * struct chan_bonding_bitmap - bitmap structure which represent + * all 5GHZ channels. + * @chan_bonding_set: channel bonding bitmap + */ +struct chan_bonding_bitmap { + struct chan_bonding_info chan_bonding_set[DFS_MAX_80MHZ_BANDS]; +}; + +#ifdef WLAN_ENABLE_CHNL_MATRIX_RESTRICTION +/** + * struct dfs_tx_leak_info - DFS leakage info + * @leak_chan: leak channel. + * @leak_lvl: tx leakage lvl. + */ +struct dfs_tx_leak_info { + uint8_t leak_chan; + uint16_t leak_chan_freq; + uint32_t leak_lvl; +}; + +/** + * struct dfs_matrix_tx_leak_info - DFS leakage matrix info for dfs channel. + * @channel: channel to switch from + * @chan_matrix DFS leakage matrix info for given dfs channel. + */ +struct dfs_matrix_tx_leak_info { + uint8_t channel; + uint16_t channel_freq; + struct dfs_tx_leak_info chan_matrix[CHAN_ENUM_5720 - + CHAN_ENUM_5180 + 1]; +}; +#endif + +/** + * dfs_mark_leaking_ch() - to mark channel leaking in to nol + * @dfs: dfs handler. + * @ch_width: channel width + * @temp_ch_lst_sz: the target channel list + * @temp_ch_lst: the target channel list + * + * This function removes the channels from temp channel list that + * (if selected as target channel) will cause leakage in one of + * the NOL channels + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS dfs_mark_leaking_ch(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst); +#endif + +/** + * dfs_mark_leaking_chan_for_freq() - to mark channel leaking in to nol + * @dfs: dfs handler. + * @ch_width: channel width + * @temp_chan_lst_sz: the target channel list size. + * @temp_freq_lst: the target frequency channel list + * + * This function removes the channels from temp channel list that + * (if selected as target channel) will cause leakage in one of + * the NOL channels + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS dfs_mark_leaking_chan_for_freq(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_chan_lst_sz, + uint16_t *temp_freq_lst); +#endif + +/** + * dfs_prepare_random_channel() - This function picks a random channel from + * the list of available channels. + * @dfs: dfs handler. + * @ch_list: channel list. + * @ch_count: Number of channels in given list. + * @flags: DFS_RANDOM_CH_FLAG_* + * @ch_wd: input channel width, used same variable to return new ch width. + * @cur_chan: current channel. + * @dfs_region: DFS region. + * @acs_info: acs channel range information. + * + * Function used to find random channel selection from a given list. + * First this function removes channels based on flags and then uses final + * list to find channel based on requested bandwidth, if requested bandwidth + * not available, it chooses next lower bandwidth and try. + * + * Return: channel number, else zero. + */ +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_prepare_random_channel(struct wlan_dfs *dfs, + struct dfs_channel *ch_list, + uint32_t ch_count, + uint32_t flags, + uint8_t *ch_wd, + struct dfs_channel *cur_chan, + uint8_t dfs_region, + struct dfs_acs_info *acs_info); +#endif + +/** + * dfs_prepare_random_channel() - This function picks a random channel from + * the list of available channels. + * @dfs: dfs handler. + * @chan_list: channel list. + * @ch_count: Number of channels in given list. + * @flags: DFS_RANDOM_CH_FLAG_* + * @chan_wd: input channel width, used same variable to return new ch width. + * @cur_chan: current channel. + * @dfs_region: DFS region. + * @acs_info: acs channel range information. + * + * Function used to find random channel selection from a given list. + * First this function removes channels based on flags and then uses final + * list to find channel based on requested bandwidth, if requested bandwidth + * not available, it chooses next lower bandwidth and try. + * + * Return: channel frequency, else zero. + */ +#ifdef CONFIG_CHAN_FREQ_API +uint16_t dfs_prepare_random_channel_for_freq(struct wlan_dfs *dfs, + struct dfs_channel *ch_list, + uint32_t chan_count, + uint32_t flags, + uint8_t *chan_wd, + struct dfs_channel *cur_chan, + uint8_t dfs_region, + struct dfs_acs_info *acs_info); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..0700b896c28bf945daa1a4e3a9207553f3f087ba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_structs.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2011-2012, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has dfs capability, dfs pulse structures. + */ + +#ifndef _DFS_STRUCTS_H_ +#define _DFS_STRUCTS_H_ + +/** + * This represents the general case of the radar PHY configuration, + * across all chips. + * + * It's then up to each chip layer to translate to/from this + * (eg to HAL_PHYERR_PARAM for the HAL case.) + */ + +#define WLAN_DFS_PHYERR_PARAM_NOVAL 0xFFFF +#define WLAN_DFS_PHYERR_PARAM_ENABLE 0x8000 + +/** + * For the dfs_nol_clist_update() method - this is the + * update command. + */ +enum { + DFS_NOL_CLIST_CMD_NONE = 0x0, + DFS_NOL_CLIST_CMD_UPDATE = 0x1, +}; + +/** + * struct dfs_pulse - DFS pulses. + * @rp_numpulses: Num of pulses in radar burst. + * @rp_pulsedur: Duration of each pulse in usecs. + * @rp_pulsefreq: Frequency of pulses in burst. + * @rp_max_pulsefreq: Frequency of pulses in burst. + * @rp_patterntype: fixed or variable pattern type. + * @rp_pulsevar: Time variation of pulse duration for matched + * filter (single-sided) in usecs. + * @rp_threshold: Threshold for MF output to indicate radar match. + * @rp_mindur: Min pulse duration to be considered for this pulse + * type. + * @rp_maxdur: Min pulse duration to be considered for this pulse + * type. + * @rp_rssithresh: Minimum rssi to be considered a radar pulse. + * @rp_meanoffset: Offset for timing adjustment. + * @rp_rssimargin: rssi threshold margin. In Turbo Mode HW reports + * rssi 3dBm. lower than in non TURBO mode. This + * will be used to offset that diff. + * @rp_ignore_pri_window: Ignore PRI window. + * @rp_sidx_spread: To reduce false detection use sidx spread. For HT160, + * for consistency, push all pulses at center of the + * channel to 80MHz ext when both segments are DFS. + * Maximum SIDX value spread in a matched sequence + * excluding FCC Bin 5. + * @rp_check_delta_peak: This is mainly used for ETSI Type 4 5MHz chirp pulses + * which HW cnanot identify. + * Reliably as chirping but can correctly characterize + * these with delta_peak non-zero. + * Is delta_peak check required for this filter. + * @rp_pulseid: Unique ID for identifying filter. + */ +struct dfs_pulse { + uint32_t rp_numpulses; + uint32_t rp_pulsedur; + uint32_t rp_pulsefreq; + uint32_t rp_max_pulsefreq; + uint32_t rp_patterntype; + uint32_t rp_pulsevar; + uint32_t rp_threshold; + uint32_t rp_mindur; + uint32_t rp_maxdur; + uint32_t rp_rssithresh; + uint32_t rp_meanoffset; + int32_t rp_rssimargin; + uint32_t rp_ignore_pri_window; + uint16_t rp_sidx_spread; + int8_t rp_check_delta_peak; + uint16_t rp_pulseid; +}; + +/** + * struct dfs_bin5pulse - DFS bin5 pulse. + * @b5_threshold: Number of bin5 pulses to indicate detection. + * @b5_mindur: Min duration for a bin5 pulse. + * @b5_maxdur: Max duration for a bin5 pulse. + * @b5_timewindow: Window over which to count bin5 pulses. + * @b5_rssithresh: Min rssi to be considered a pulse. + * @b5_rssimargin: rssi threshold margin. In Turbo Mode HW reports rssi 3dB + */ +struct dfs_bin5pulse { + uint32_t b5_threshold; + uint32_t b5_mindur; + uint32_t b5_maxdur; + uint32_t b5_timewindow; + uint32_t b5_rssithresh; + uint32_t b5_rssimargin; +}; + +/** + * wlan_dfs_phyerr_init_noval() - Fill wlan_dfs_phyerr_param with 0xFF. + * @pe: Pointer to wlan_dfs_phyerr_param structure. + */ +static inline void wlan_dfs_phyerr_init_noval(struct wlan_dfs_phyerr_param *pe) +{ + pe->pe_firpwr = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_rrssi = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_height = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_prssi = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_inband = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_relpwr = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_relstep = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_maxlen = WLAN_DFS_PHYERR_PARAM_NOVAL; +} + +/** + * struct wlan_dfs_radar_tab_info - Radar table information. + * @dfsdomain: DFS domain. + * @numradars: Number of radars. + * @dfs_radars: Pointer to dfs_pulse structure. + * @numb5radars: NUM5 radars. + * @b5pulses: BIN5 radars. + * @dfs_defaultparams: phyerr params. + */ +struct wlan_dfs_radar_tab_info { + uint32_t dfsdomain; + int numradars; + struct dfs_pulse *dfs_radars; + int numb5radars; + struct dfs_bin5pulse *b5pulses; + struct wlan_dfs_phyerr_param dfs_defaultparams; +}; + +#endif /* _DFS_STRUCTS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_zero_cac.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_zero_cac.h new file mode 100644 index 0000000000000000000000000000000000000000..dd718e97f4b8be504a7473d863fa88f756dd6970 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_zero_cac.h @@ -0,0 +1,1111 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DOC: This file has Zero CAC DFS APIs. + */ + +#ifndef _DFS_ZERO_CAC_H_ +#define _DFS_ZERO_CAC_H_ + +#include "dfs.h" +#include + +#ifdef CONFIG_CHAN_NUM_API +#define VHT160_IEEE_FREQ_DIFF 16 +#endif + +#define OCAC_SUCCESS 0 +#define OCAC_RESET 1 +#define OCAC_CANCEL 2 + +#define TREE_DEPTH 3 +#define N_SUBCHANS_FOR_80BW 4 + +#define INITIAL_20_CHAN_OFFSET -6 +#define INITIAL_40_CHAN_OFFSET -4 +#define INITIAL_80_CHAN_OFFSET 0 + +#define NEXT_20_CHAN_OFFSET 4 +#define NEXT_40_CHAN_OFFSET 8 +#define NEXT_80_CHAN_OFFSET 16 + +#define DFS_CHWIDTH_20_VAL 20 +#define DFS_CHWIDTH_40_VAL 40 +#define DFS_CHWIDTH_80_VAL 80 +#define DFS_CHWIDTH_160_VAL 160 + +#define WEATHER_CHAN_START 120 +#define WEATHER_CHAN_END 128 + +/* PreCAC timeout durations in ms. */ +#define MIN_PRECAC_DURATION (6 * 60 * 1000) /* 6 mins */ +#define MIN_WEATHER_PRECAC_DURATION (60 * 60 * 1000) /* 1 hour */ +#define MAX_PRECAC_DURATION (4 * 60 * 60 * 1000) /* 4 hours */ +#define MAX_WEATHER_PRECAC_DURATION (24 * 60 * 60 * 1000) /* 24 hours */ + +#define PCAC_DFS_INDEX_ZERO 0 +#define PCAC_TIMER_NOT_RUNNING 0 +#define PRECAC_NOT_STARTED 0 +/** + * struct precac_tree_node - Individual tree node structure for every node in + * the precac forest maintained. + * @left_child: Pointer to the left child of the node. + * @right_child: Pointer to the right child of the node. + * @ch_ieee: Center channel ieee value. + * @ch_freq: Center channel frequency value (BSTree node key value). + * @n_caced_subchs: Number of CACed subchannels of the ch_ieee. + * @n_nol_subchs: Number of subchannels of the ch_ieee in NOL. + * @n_valid_subchs: Number of subchannels of the ch_ieee available (as per + * the country's channel list). + * @bandwidth: Bandwidth of the ch_ieee (in the current node). + */ +struct precac_tree_node { + struct precac_tree_node *left_child; + struct precac_tree_node *right_child; + uint8_t ch_ieee; + uint16_t ch_freq; + uint8_t n_caced_subchs; + uint8_t n_nol_subchs; + uint8_t n_valid_subchs; + uint8_t bandwidth; +}; + +/** + * enum precac_chan_state - Enum for PreCAC state of a channel. + * @PRECAC_ERR: Invalid preCAC state. + * @PRECAC_REQUIRED: preCAC need to be done on the channel. + * @PRECAC_NOW: preCAC is running on the channel. + * @PRECAC_DONE: preCAC is done and channel is clear. + * @PRECAC_NOL: preCAC is done and radar is detected. + */ +enum precac_chan_state { + PRECAC_ERR = -1, + PRECAC_REQUIRED, + PRECAC_NOW, + PRECAC_DONE, + PRECAC_NOL, +}; + +/** + * struct dfs_precac_entry - PreCAC entry. + * @pe_list: PreCAC entry. + * @vht80_ch_ieee: VHT80 centre channel IEEE value. + * @vht80_ch_freq: VHT80 centre channel frequency value. + * @dfs: Pointer to wlan_dfs structure. + * @tree_root: Tree root node with 80MHz channel key. + */ +struct dfs_precac_entry { + TAILQ_ENTRY(dfs_precac_entry) pe_list; + uint8_t vht80_ch_ieee; + uint16_t vht80_ch_freq; + struct wlan_dfs *dfs; + struct precac_tree_node *tree_root; +}; + +/** + * dfs_zero_cac_timer_init() - Initialize zero-cac timers + * @dfs_soc_obj: Pointer to DFS SOC object structure. + */ +#if !defined(QCA_MCL_DFS_SUPPORT) +void dfs_zero_cac_timer_init(struct dfs_soc_priv_obj *dfs_soc_obj); +#else +static inline void +dfs_zero_cac_timer_init(struct dfs_soc_priv_obj *dfs_soc_obj) +{ +} +#endif +/** + * dfs_print_precaclists() - Print precac list. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_print_precaclists(struct wlan_dfs *dfs); +#else +static inline void dfs_print_precaclists(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_precac_lists() - Resets the precac lists. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_reset_precac_lists(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_precac_lists(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_precaclists() - Clears and initializes precac_list. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_reset_precaclists(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_precaclists(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_deinit_precac_list() - Clears the precac list. + * @dfs: Pointer to wlan_dfs dtructure. + */ +void dfs_deinit_precac_list(struct wlan_dfs *dfs); + +/** + * dfs_zero_cac_detach() - Free zero_cac memory. + * @dfs: Pointer to wlan_dfs dtructure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_zero_cac_detach(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_detach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_init_precac_list() - Init precac list. + * @dfs: Pointer to wlan_dfs dtructure. + */ +void dfs_init_precac_list(struct wlan_dfs *dfs); + +/** + * dfs_start_precac_timer() - Start precac timer. + * @dfs: Pointer to wlan_dfs structure. + * @precac_chan: Start thr precac timer in this channel. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +#ifdef CONFIG_CHAN_NUM_API +void dfs_start_precac_timer(struct wlan_dfs *dfs, + uint8_t precac_chan); +#endif + +/** + * dfs_start_precac_timer() - Start precac timer. + * @dfs: Pointer to wlan_dfs structure. + * @precac_chan_freq: Frequency to start precac timer. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_start_precac_timer_for_freq(struct wlan_dfs *dfs, + uint16_t precac_chan_freq); +#endif +#else +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_start_precac_timer(struct wlan_dfs *dfs, + uint8_t precac_chan) +{ +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +static inline +void dfs_start_precac_timer_for_freq(struct wlan_dfs *dfs, + uint16_t precac_chan_freq) +{ +} +#endif +#endif + +/** + * dfs_cancel_precac_timer() - Cancel the precac timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_cancel_precac_timer(struct wlan_dfs *dfs); +#else +static inline void dfs_cancel_precac_timer(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_zero_cac_attach() - Initialize dfs zerocac variables. + * @dfs: Pointer to DFS structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_zero_cac_attach(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_attach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_zero_cac_reset() - Reset Zero cac DFS variables. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_zero_cac_reset(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_reset(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_zero_cac_timer_detach() - Free Zero cac DFS variables. + * @dfs_soc_obj: Pointer to dfs_soc_priv_obj structure. + */ +#if !defined(QCA_MCL_DFS_SUPPORT) +void dfs_zero_cac_timer_detach(struct dfs_soc_priv_obj *dfs_soc_obj); +#else +static inline void +dfs_zero_cac_timer_detach(struct dfs_soc_priv_obj *dfs_soc_obj) +{ +} +#endif + +/** + * dfs_is_precac_done() - Is precac done. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Pointer to dfs_channel for which preCAC done is checked. + * + * Return: + * * True: If precac is done on channel. + * * False: If precac is not done on channel. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +bool dfs_is_precac_done(struct wlan_dfs *dfs, struct dfs_channel *chan); +#else +static inline bool dfs_is_precac_done(struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + return false; +} +#endif + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * dfs_decide_precac_preferred_chan() - Choose operating channel among + * configured DFS channel and + * intermediate channel based on + * precac status of configured + * DFS channel. + * @dfs: Pointer to wlan_dfs structure. + * @pref_chan: Configured DFS channel. + * @mode: Configured PHY mode. + * + * Return: True if intermediate channel needs to configure. False otherwise. + */ +#ifdef CONFIG_CHAN_NUM_API +bool +dfs_decide_precac_preferred_chan(struct wlan_dfs *dfs, + uint8_t *pref_chan, + enum wlan_phymode mode); +#endif + +/** + * dfs_decide_precac_preferred_chan_for_freq() - Choose operating channel among + * configured DFS channel and + * intermediate channel based on + * precac status of configured + * DFS channel. + * @dfs: Pointer to wlan_dfs structure. + * @pref_chan: Configured DFS channel frequency + * @mode: Configured PHY mode. + * + * Return: True if intermediate channel needs to configure. False otherwise. + */ + +#ifdef CONFIG_CHAN_FREQ_API +bool +dfs_decide_precac_preferred_chan_for_freq(struct wlan_dfs *dfs, + uint16_t *pref_chan_freq, + enum wlan_phymode mode); +#endif +#else +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_decide_precac_preferred_chan(struct wlan_dfs *dfs, + uint8_t *pref_chan, + enum wlan_phymode mode) +{ +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +static inline void +dfs_decide_precac_preferred_chan_for_freq(struct wlan_dfs *dfs, + uint8_t *pref_chan, + enum wlan_phymode mode) +{ +} +#endif +#endif + +/** + * dfs_get_ieeechan_for_precac() - Get chan of required bandwidth from + * precac_list. + * @dfs: Pointer to wlan_dfs structure. + * @exclude_pri_ch_ieee: Primary channel IEEE to be excluded for preCAC. + * @exclude_sec_ch_ieee: Secondary channel IEEE to be excluded for preCAC. + * @bandwidth: Bandwidth of requested channel. + */ +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_get_ieeechan_for_precac(struct wlan_dfs *dfs, + uint8_t exclude_pri_ch_ieee, + uint8_t exclude_sec_ch_ieee, + uint8_t bandwidth); +#endif + +/** + * dfs_get_ieeechan_for_precac_for_freq() - Get chan of required bandwidth from + * precac_list. + * @dfs: Pointer to wlan_dfs structure. + * @exclude_pri_chan_freq: Primary channel freq to be excluded for preCAC. + * @exclude_sec_chan_freq: Secondary channel freq to be excluded for preCAC. + * @bandwidth: Bandwidth of requested channel. + */ +#ifdef CONFIG_CHAN_FREQ_API +uint16_t dfs_get_ieeechan_for_precac_for_freq(struct wlan_dfs *dfs, + uint16_t exclude_pri_chan_freq, + uint16_t exclude_sec_chan_freq, + uint8_t bandwidth); +#endif + +/** + * dfs_override_precac_timeout() - Override the default precac timeout. + * @dfs: Pointer to wlan_dfs structure. + * @precac_timeout: Precac timeout value. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +int dfs_override_precac_timeout(struct wlan_dfs *dfs, + int precac_timeout); +#else +static inline int dfs_override_precac_timeout(struct wlan_dfs *dfs, + int precac_timeout) +{ + return 0; +} +#endif + +/** + * dfs_get_override_precac_timeout() - Get precac timeout. + * @dfs: Pointer wlan_dfs structure. + * @precac_timeout: Get precac timeout value in this variable. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +int dfs_get_override_precac_timeout(struct wlan_dfs *dfs, + int *precac_timeout); +#else +static inline int dfs_get_override_precac_timeout(struct wlan_dfs *dfs, + int *precac_timeout) +{ + return 0; +} +#endif + +/** + * dfs_find_vht80_chan_for_precac() - Find VHT80 channel for precac. + * @dfs: Pointer to wlan_dfs structure. + * @chan_mode: Channel mode. + * @ch_freq_seg1: Segment1 channel freq. + * @cfreq1: cfreq1. + * @cfreq2: cfreq2. + * @phy_mode: Precac phymode. + * @dfs_set_cfreq2: Precac cfreq2 + * @set_agile: Agile mode flag. + * + * Zero-CAC-DFS algorithm:- + * Zero-CAC-DFS algorithm works in stealth mode. + * 1) When any channel change happens in VHT80 mode the algorithm + * changes the HW channel mode to VHT80_80/VHT160 mode and adds a + * new channel in the secondary VHT80 to perform precac and a + * precac timer is started. However the upper layer/UMAC is unaware + * of this change. + * 2) When the precac timer expires without being interrupted by + * any channel change the secondary VHT80 channel is moved from + * precac-required-list to precac-done-list. + * 3) If there is a radar detect at any time in any segment + * (segment-1 is preimary VHT80 and segment-2 is VHT80)then the + * channel is searched in both precac-reuired-list and precac-done-list + * and moved to precac-nol-list. + * 4) Whenever channel change happens if the new channel is a DFS + * channel then precac-done-list is searched and if the channel is + * found in the precac-done-list then the CAC is skipped. + * 5) The precac expiry timer makes a vedv_restart(channel change + * with current-upper-layer-channel-mode which is VHT80). In channel + * change the algorithm tries to pick a new channel from the + * precac-required list. If none found then channel mode remains same. + * Which means when all the channels in precac-required-list are + * exhausted the VHT80_80/VHT160 comes back to VHT80 mode. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +#ifdef CONFIG_CHAN_NUM_API +void dfs_find_vht80_chan_for_precac(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#endif + +/* + * dfs_find_vht80_chan_for_precac() - Find VHT80 channel for precac. + * @dfs: Pointer to wlan_dfs structure. + * @chan_mode: Channel mode. + * @ch_freq_seg1: Segment1 channel freq in mhz. + * @cfreq1: cfreq1. + * @cfreq2: cfreq2. + * @phy_mode: Precac phymode. + * @dfs_set_cfreq2: Precac cfreq2 + * @set_agile: Agile mode flag. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_find_vht80_chan_for_precac_for_freq(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint16_t ch_freq_seg1_mhz, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#endif + +#else +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_find_vht80_chan_for_precac(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline +void dfs_find_vht80_chan_for_precac_for_freq(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint16_t ch_freq_seg1_mhz, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ +} +#endif +#endif + +#if defined(QCA_SUPPORT_AGILE_DFS) +/** + * dfs_find_pdev_for_agile_precac() - Find pdev to select channel for precac. + * @pdev: Pointer to wlan_objmgr_pdev structure. + * @cur_precac_dfs_index: current precac index + */ +void dfs_find_pdev_for_agile_precac(struct wlan_objmgr_pdev *pdev, + uint8_t *cur_precac_dfs_index); + +/** + * dfs_prepare_agile_precac_chan() - Send Agile set request for given pdev. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_prepare_agile_precac_chan(struct wlan_dfs *dfs); + +/** + * dfs_process_ocac_complete() - Process Off-Channel CAC complete indication. + * @pdev :Pointer to wlan_objmgr_pdev structure. + * @ocac_status: Off channel CAC complete status + * @center_freq : Center Frequency of O-CAC done indication. + */ +void dfs_process_ocac_complete(struct wlan_objmgr_pdev *pdev, + uint32_t ocac_status, + uint32_t center_freq); + +/** + * dfs_get_ieeechan_for_agilecac() - Find an IEEE channel for agile CAC. + * @dfs: Pointer to wlan_dfs structure. + * @ch_ieee: Pointer to channel number for agile set request. + * @pri_ch_ieee: Current primary IEEE channel. + * @sec_ch_ieee: Current secondary IEEE channel (in HT80_80 mode). + * + * Find an IEEE channel for agileCAC which is not the current operating + * channels (indicated by pri_ch_ieee, sec_ch_ieee). + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_get_ieeechan_for_agilecac(struct wlan_dfs *dfs, + uint8_t *ch_ieee, + uint8_t pri_ch_ieee, + uint8_t sec_ch_ieee); +#endif + +/** + * dfs_get_ieeechan_for_agilecac_for_freq() - Find chan freq for agile CAC. + * @dfs: Pointer to wlan_dfs structure. + * @chan_freq: Pointer to channel freq for agile set request. + * @pri_chan_freq: Current primary IEEE channel freq. + * @sec_chan_freq: Current secondary IEEE channel freq (in HT80_80 mode). + * + * Find an IEEE channel freq for agileCAC which is not the current operating + * channels (indicated by pri_chan_freq, sec_chan_freq). + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_get_ieeechan_for_agilecac_for_freq(struct wlan_dfs *dfs, + uint16_t *chan_freq, + uint16_t pri_chan_freq, + uint16_t sec_chan_freq); +#endif + +/** + * dfs_agile_precac_start() - Start agile precac. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_agile_precac_start(struct wlan_dfs *dfs); + +/** + * dfs_start_agile_precac_timer() - Start precac timer for the given channel. + * @dfs: Pointer to wlan_dfs structure. + * @ocac_status: Status of the off channel CAC. + * @adfs_param: Agile DFS CAC parameters. + * + * Start the precac timer with proper timeout values based on the channel to + * be preCACed. The preCAC channel number and chwidth information is present + * in the adfs_param argument. Once the timer is started, update the timeout + * fields in adfs_param. + */ +void dfs_start_agile_precac_timer(struct wlan_dfs *dfs, + uint8_t ocac_status, + struct dfs_agile_cac_params *adfs_param); + +/** + * dfs_set_fw_adfs_support() - Set FW aDFS support in dfs object. + * @dfs: Pointer to wlan_dfs structure. + * @fw_adfs_support_160: aDFS enabled when pdev is on 160/80P80MHz. + * @fw_adfs_support_non_160: aDFS enabled when pdev is on 20/40/80MHz. + * + * Return: void. + */ +void dfs_set_fw_adfs_support(struct wlan_dfs *dfs, + bool fw_adfs_support_160, + bool fw_adfs_support_non_160); +#else +static inline void dfs_find_pdev_for_agile_precac(struct wlan_objmgr_pdev *pdev, + uint8_t *cur_precac_dfs_index) +{ +} + +static inline void dfs_prepare_agile_precac_chan(struct wlan_dfs *dfs) +{ +} + +static inline void +dfs_process_ocac_complete(struct wlan_objmgr_pdev *pdev, + uint32_t ocac_status, + uint32_t center_freq) +{ +} + +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_get_ieeechan_for_agilecac(struct wlan_dfs *dfs, + uint8_t *ch_ieee, + uint8_t pri_ch_ieee, + uint8_t sec_ch_ieee) +{ +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline void +dfs_get_ieeechan_for_agilecac_for_freq(struct wlan_dfs *dfs, + uint16_t *chan_freq, + uint16_t pri_chan_freq, + uint16_t sec_chan_freq) +{ +} +#endif + +static inline void dfs_agile_precac_start(struct wlan_dfs *dfs) +{ +} + +static inline void +dfs_start_agile_precac_timer(struct wlan_dfs *dfs, + uint8_t ocac_status, + struct dfs_agile_cac_params *adfs_param) +{ +} + +static inline void +dfs_set_fw_adfs_support(struct wlan_dfs *dfs, + bool fw_adfs_support_160, + bool fw_adfs_support_non_160) +{ +} +#endif + +#if defined(QCA_SUPPORT_AGILE_DFS) || defined(ATH_SUPPORT_ZERO_CAC_DFS) +/** + * dfs_agile_soc_obj_init() - Initialize soc obj for agile precac. + * @dfs: Pointer to wlan_dfs structure. + * @precac_chan: Start thr precac timer in this channel. + * @ocac_status: Status of the off channel CAC. + */ +void dfs_agile_soc_obj_init(struct wlan_dfs *dfs, + struct wlan_objmgr_psoc *psoc); +#else +static inline void dfs_agile_soc_obj_init(struct wlan_dfs *dfs, + struct wlan_objmgr_psoc *psoc) +{ +} +#endif + +/** + * dfs_set_precac_enable() - Set precac enable flag. + * @dfs: Pointer to wlan_dfs structure. + * @value: input value for dfs_legacy_precac_ucfg flag. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_set_precac_enable(struct wlan_dfs *dfs, + uint32_t value); +#else +static inline void dfs_set_precac_enable(struct wlan_dfs *dfs, + uint32_t value) +{ +} +#endif + +/** + * dfs_is_legacy_precac_enabled() - Check if legacy preCAC is enabled for the + * DFS onject. + * @dfs: Pointer to the wlan_dfs object. + * + * Return: True if legacy preCAC is enabled, else false. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +bool dfs_is_legacy_precac_enabled(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_legacy_precac_enabled(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_is_agile_precac_enabled() - Check if agile preCAC is enabled for the DFS. + * @dfs: Pointer to the wlan_dfs object. + * + * Return: True if agile DFS is enabled, else false. + * + * For agile preCAC to be enabled, + * 1. User configuration should be set. + * 2. Target should support aDFS. + */ +#ifdef QCA_SUPPORT_AGILE_DFS +bool dfs_is_agile_precac_enabled(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_agile_precac_enabled(struct wlan_dfs *dfs) +{ + return false; +} +#endif + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * dfs_set_precac_intermediate_chan() - Set intermediate chan to be used while + * doing precac. + * @dfs: Pointer to wlan_dfs structure. + * @value: input value for dfs_legacy_precac_ucfg flag. + * + * Return: + * * 0 - Successfully set intermediate channel. + * * -EINVAL - Invalid channel. + */ +int32_t dfs_set_precac_intermediate_chan(struct wlan_dfs *dfs, + uint32_t value); +#else +static inline int32_t dfs_set_precac_intermediate_chan(struct wlan_dfs *dfs, + uint32_t value) +{ + return 0; +} +#endif + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * dfs_get_precac_intermediate_chan() - Get configured precac + * intermediate channel. + * @dfs: Pointer to wlan_dfs structure. + * + * Return: Configured intermediate channel number. + */ +uint32_t dfs_get_precac_intermediate_chan(struct wlan_dfs *dfs); +#else +static inline uint32_t dfs_get_intermediate_chan(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * dfs_get_precac_chan_state() - Get precac status of a given channel. + * @dfs: Pointer to wlan_dfs structure. + * @precac_chan: Channel number for which precac state need to be checked. + * + * Return: + * * PRECAC_REQUIRED: Precac has not done on precac_chan. + * * PRECAC_NOW : Precac is running on precac_chan. + * * PRECAC_DONE : precac_chan is in CAC done state in precac list. + * * PRECAC_NOL : precac_chan is in NOL state in precac list. + * * PRECAC_ERR : Invalid precac state. + */ +enum precac_chan_state +dfs_get_precac_chan_state(struct wlan_dfs *dfs, uint8_t precac_chan); + +/** + * dfs_get_precac_chan_state_for_freq() - Get precac status of a given channel. + * @dfs: Pointer to wlan_dfs structure. + * @precac_chan: Channel freq for which precac state need to be checked. + */ + +#ifdef CONFIG_CHAN_FREQ_API +enum precac_chan_state +dfs_get_precac_chan_state_for_freq(struct wlan_dfs *dfs, + uint16_t precac_chan_freq); +#endif + +#else + +#ifdef CONFIG_CHAN_NUM_API +static inline enum precac_chan_state +dfs_get_precac_chan_state(struct wlan_dfs *dfs, + uint8_t precac_chan) +{ + return PRECAC_REQUIRED; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline enum precac_chan_state +dfs_get_precac_chan_state_for_freq(struct wlan_dfs *dfs, + uint16_t precac_chan_freq) +{ + return PRECAC_REQUIRED; +} +#endif +#endif + +/** + * dfs_zero_cac_reset() - Reset Zero cac DFS variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_zero_cac_reset(struct wlan_dfs *dfs); + +/** + * dfs_reinit_precac_lists() - Reinit DFS preCAC lists. + * @src_dfs: Source DFS from which the preCAC list is copied. + * @dest_dfs: Destination DFS to which the preCAC list is copied. + * @low_5g_freq: Low 5G frequency value of the destination DFS. + * @high_5g_freq: High 5G frequency value of the destination DFS. + * + * Copy all the preCAC list entries from the source DFS to the destination DFS + * which fall within the frequency range of low_5g_freq and high_5g_freq. + * + * Return: None (void). + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +void dfs_reinit_precac_lists(struct wlan_dfs *src_dfs, + struct wlan_dfs *dest_dfs, + uint16_t low_5g_freq, + uint16_t high_5g_freq); +#else +static inline void dfs_reinit_precac_lists(struct wlan_dfs *src_dfs, + struct wlan_dfs *dest_dfs, + uint16_t low_5g_freq, + uint16_t high_5g_freq) +{ +} +#endif + +/** + * dfs_is_precac_done_on_ht20_40_80_chan() - Is precac done on a + * VHT20/40/80 channel. + *@dfs: Pointer to wlan_dfs structure. + *@chan: Channel IEEE value. + * + * Return: + * * True: If CAC is done on channel. + * * False: If CAC is not done on channel. + */ +#ifdef CONFIG_CHAN_NUM_API +bool dfs_is_precac_done_on_ht20_40_80_chan(struct wlan_dfs *dfs, + uint8_t chan); +#endif + +/** + * dfs_is_precac_done_on_ht20_40_80_chan_for_freq() - Is precac done on a + * VHT20/40/80 channel. + *@dfs: Pointer to wlan_dfs structure. + *@chan: Channel frequency + * + * Return: + * * True: If CAC is done on channel. + * * False: If CAC is not done on channel. + */ +#ifdef CONFIG_CHAN_FREQ_API +bool dfs_is_precac_done_on_ht20_40_80_chan_for_freq(struct wlan_dfs *dfs, + uint16_t chan_freq); +#endif + +/** + * dfs_is_precac_done_on_ht8080_ht160_chan() - Is precac done on + * VHT80+80 or VHT160 + * channel. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Pointer to dfs_channel for which preCAC done is checked. + * + * Return: + * * True: If CAC is done on channel. + * * False: If CAC is not done on channel. + */ +bool dfs_is_precac_done_on_ht8080_ht160_chan(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +/** + * dfs_find_chwidth_and_center_chan() - Find the channel width enum and + * primary and secondary center channel + * value of the current channel. + * @dfs: Pointer to wlan_dfs structure. + * @chwidth: Channel width enum of current channel. + * @primary_chan_ieee: Primary IEEE channel. + * @secondary_chan_ieee: Secondary IEEE channel (in HT80_80 mode). + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_find_chwidth_and_center_chan(struct wlan_dfs *dfs, + enum phy_ch_width *chwidth, + uint8_t *primary_chan_ieee, + uint8_t *secondary_chan_ieee); + +#endif + +#ifdef CONFIG_CHAN_FREQ_API +/** + * dfs_find_chwidth_and_center_chan_for_freq() - Find the channel width enum and + * primary and secondary center channel + * value of the current channel. + * @dfs: Pointer to wlan_dfs structure. + * @chwidth: Channel width enum of current channel. + * @primary_chan_freq: Primary IEEE channel freq. + * @secondary_chan_freq: Secondary IEEE channel freq (in HT80_80 mode). + */ +void dfs_find_chwidth_and_center_chan_for_freq(struct wlan_dfs *dfs, + enum phy_ch_width *chwidth, + uint16_t *primary_chan_freq, + uint16_t *secondary_chan_freq); +#endif + +/** + * dfs_mark_precac_done() - Mark the channel as preCAC done. + * @dfs: Pointer to wlan_dfs structure. + * @pri_ch_ieee: Primary channel IEEE. + * @sec_ch_ieee: Secondary channel IEEE (only in HT80_80 mode). + * @ch_width: Channel width enum. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_mark_precac_done(struct wlan_dfs *dfs, + uint8_t pri_ch_ieee, + uint8_t sec_ch_ieee, + enum phy_ch_width ch_width); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +/** + * dfs_mark_precac_done_for_freq() - Mark the channel as preCAC done. + * @dfs: Pointer to wlan_dfs structure. + * @pri_chan_freq: Primary channel IEEE freq. + * @sec_chan_freq: Secondary channel IEEE freq(only in HT80_80 mode). + * @chan_width: Channel width enum. + */ +void dfs_mark_precac_done_for_freq(struct wlan_dfs *dfs, + uint16_t pri_chan_freq, + uint16_t sec_chan_freq, + enum phy_ch_width chan_width); +#endif + +/** + * dfs_mark_precac_nol() - Mark the precac channel as radar. + * @dfs: Pointer to wlan_dfs structure. + * @is_radar_found_on_secondary_seg: Radar found on secondary seg for Cascade. + * @detector_id: detector id which found RADAR in HW. + * @channels: Array of radar found subchannels. + * @num_channels: Number of radar found subchannels. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_mark_precac_nol(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg, + uint8_t detector_id, + uint8_t *channels, + uint8_t num_channels); +#endif + +/** + * dfs_mark_precac_nol_for_freq() - Mark the precac channel as radar. + * @dfs: Pointer to wlan_dfs structure. + * @is_radar_found_on_secondary_seg: Radar found on secondary seg for Cascade. + * @detector_id: detector id which found RADAR in HW. + * @freq_list: Array of radar found frequencies. + * @num_channels: Number of radar found subchannels. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mark_precac_nol_for_freq(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg, + uint8_t detector_id, + uint16_t *freq_list, + uint8_t num_channels); +#endif + +/** + * dfs_unmark_precac_nol() - Unmark the precac channel as radar. + * @dfs: Pointer to wlan_dfs structure. + * @channel: channel marked as radar. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_unmark_precac_nol(struct wlan_dfs *dfs, uint8_t channel); +#endif + +/** + * dfs_unmark_precac_nol_for_freq() - Unmark the precac channel as radar. + * @dfs: Pointer to wlan_dfs structure. + * @channel: channel freq marked as radar. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_unmark_precac_nol_for_freq(struct wlan_dfs *dfs, uint16_t chan_freq); +#endif + +#else +#ifdef CONFIG_CHAN_NUM_API +static inline void +dfs_find_chwidth_and_center_chan(struct wlan_dfs *dfs, + enum phy_ch_width *chwidth, + uint8_t *primary_chan_ieee, + uint8_t *secondary_chan_ieee) +{ +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline void +dfs_find_chwidth_and_center_chan_for_freq(struct wlan_dfs *dfs, + enum phy_ch_width *chwidth, + uint16_t *primary_chan_freq, + uint16_t *secondary_chan_freq) +{ +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_mark_precac_done(struct wlan_dfs *dfs, + uint8_t pri_ch_ieee, + uint8_t sec_ch_ieee, + enum phy_ch_width ch_width) +{ +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline void dfs_mark_precac_done_for_freq(struct wlan_dfs *dfs, + uint16_t pri_chan_freq, + uint16_t sec_chan_freq, + enum phy_ch_width chan_width) +{ +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_mark_precac_nol(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg, + uint8_t detector_id, + uint8_t *channels, + uint8_t num_channels) +{ +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline void +dfs_mark_precac_nol_for_freq(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg, + uint8_t detector_id, + uint16_t *freq, + uint8_t num_channels) +{ +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +static inline void dfs_unmark_precac_nol(struct wlan_dfs *dfs, uint8_t channel) +{ +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static inline void dfs_unmark_precac_nol_for_freq(struct wlan_dfs *dfs, + uint16_t chan_freq) +{ +} +#endif +#endif + +/** + * dfs_is_precac_timer_running() - Check whether precac timer is running. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && !defined(QCA_MCL_DFS_SUPPORT) +bool dfs_is_precac_timer_running(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_precac_timer_running(struct wlan_dfs *dfs) +{ + return false; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +#define VHT160_FREQ_DIFF 80 + +#define INITIAL_20_CHAN_FREQ_OFFSET -30 +#define INITIAL_40_CHAN_FREQ_OFFSET -20 +#define INITIAL_80_CHAN_FREQ_OFFSET 0 + +#define NEXT_20_CHAN_FREQ_OFFSET 20 +#define NEXT_40_CHAN_FREQ_OFFSET 40 +#define NEXT_80_CHAN_FREQ_OFFSET 80 + +#define WEATHER_CHAN_START_FREQ 5600 +#define WEATHER_CHAN_END_FREQ 5640 + +#endif + +#endif /* _DFS_ZERO_CAC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_bindetects.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_bindetects.c new file mode 100644 index 0000000000000000000000000000000000000000..81db766c57bb30e3dc54e86a655fa5c6469c39eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_bindetects.c @@ -0,0 +1,1044 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: DFS specs specify various types of radars to be detected. + * Each separate type is called a Bin and has different characteristics. + * This file contains the functionality to look at a group of pulses and + * to detect whether we have detected a valid radar waveform. To do that, + * it must match the group against each different Bin's characteristics. + */ + +#include "../dfs.h" +#include "../dfs_process_radar_found_ind.h" + +/** + * dfs_find_first_index_within_window() - Find first index within window + * @pl: Pointer to dfs_pulseline structure. + * @index: Index to dfs pulse elements. + * @start_ts: Start timestamp. + * + * Return: Returns index. + */ +static inline uint32_t dfs_find_first_index_within_window( + struct dfs_pulseline *pl, + uint32_t index, + uint64_t start_ts) +{ + uint16_t i; + + /* Find the index of first element in our window of interest. */ + for (i = 0; i < pl->pl_numelems; i++) { + index = (index - 1) & DFS_MAX_PULSE_BUFFER_MASK; + if (pl->pl_elems[index].p_time >= start_ts) { + continue; + } else { + index = (index) & DFS_MAX_PULSE_BUFFER_MASK; + break; + } + } + + return index; +} + +/** + * dfs_ts_within_window() - Calculate pulses for timestamp within window + * @dfs: Pointer to wlan_dfs structure. + * @pl: Pointer to dfs_pulseline structure. + * @index: Index to dfs pulse elements. + * @dur: Pulse duration/width + * @numpulses: Number of pulses + * + * Return: Returns 1 if pulse count is incremented else returns 0. + */ +static inline bool dfs_ts_within_window( + struct wlan_dfs *dfs, + struct dfs_pulseline *pl, + uint32_t *index, + uint32_t dur, + int *numpulses) +{ + uint32_t deltadur; + + deltadur = DFS_DIFF(pl->pl_elems[*index].p_dur, dur); + if ((pl->pl_elems[*index].p_dur == 1) || + ((dur != 1) && (deltadur <= 2))) { + (*numpulses)++; + dfs_debug(dfs, WLAN_DEBUG_DFS2, "numpulses %u", *numpulses); + return 1; + } + + return 0; +} + +/** + * dfs_ts_eq_prevts() - Calculate pulses for timestamp equals to prev event + * @dfs: Pointer to wlan_dfs structure. + * @pl: Pointer to dfs_pulseline structure. + * @index: Index to dfs pulse elements. + * @dur: Pulse duration/width + * @numpulses: Number of pulses + * + * Return: Returns 1 if pulse count is incremented else returns 0. + */ +static inline bool dfs_ts_eq_prevts( + struct wlan_dfs *dfs, + struct dfs_pulseline *pl, + uint64_t next_event_ts, + uint64_t event_ts, + uint32_t refpri, + uint32_t *index, + uint32_t dur, + int *numpulses) + +{ + uint32_t deltadur; + + if (((next_event_ts - event_ts) > refpri) || + ((next_event_ts - event_ts) == 0)) { + deltadur = DFS_DIFF(pl->pl_elems[*index].p_dur, dur); + if ((pl->pl_elems[*index].p_dur == 1) || + ((pl->pl_elems[*index].p_dur != 1) && + (deltadur <= 2))) { + (*numpulses)++; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "zero PRI: numpulses %u", *numpulses); + return 1; + } + } + + return 0; +} + +/** + * dfs_pulses_within_window() - Calculate pulses within window + * @dfs: Pointer to wlan_dfs structure. + * @window_start: Start of the window. + * @window_end: End of the window. + * @index: Index to dfs pulse elements. + * @dur: Pulse duration/width. + * @refpri: reference PRI. + * + * Return: Returns 1 if pulse count is incremented else returns 0. + */ +static inline int dfs_pulses_within_window( + struct wlan_dfs *dfs, + uint64_t window_start, + uint64_t window_end, + uint32_t *index, + uint32_t dur, + uint32_t refpri) +{ + int numpulses = 0; + uint32_t i; + struct dfs_pulseline *pl = dfs->pulses; + uint64_t event_ts, prev_event_ts, next_event_ts; + uint32_t next_index; + + for (i = 0; i < pl->pl_numelems; i++) { + prev_event_ts = pl->pl_elems[*index].p_time; + *index = (*index+1) & DFS_MAX_PULSE_BUFFER_MASK; + event_ts = pl->pl_elems[*index].p_time; + next_index = (*index+1) & DFS_MAX_PULSE_BUFFER_MASK; + next_event_ts = pl->pl_elems[next_index].p_time; + dfs_debug(dfs, WLAN_DEBUG_DFS2, "ts %u", + (uint32_t)event_ts); + + if ((event_ts <= window_end) && (event_ts >= window_start)) { + if (dfs_ts_within_window(dfs, pl, index, dur, + &numpulses)) + break; + } else if (event_ts > window_end) { + *index = (*index-1) & DFS_MAX_PULSE_BUFFER_MASK; + break; + } else if (event_ts == prev_event_ts) { + if (dfs_ts_eq_prevts(dfs, pl, next_event_ts, event_ts, + refpri, index, dur, &numpulses)) + break; + } + if (dfs->dfs_min_sidx > pl->pl_elems[*index].p_sidx) + dfs->dfs_min_sidx = pl->pl_elems[*index].p_sidx; + + if (dfs->dfs_max_sidx < pl->pl_elems[*index].p_sidx) + dfs->dfs_max_sidx = pl->pl_elems[*index].p_sidx; + } + + dfs->dfs_freq_offset = + DFS_SIDX_TO_FREQ_OFFSET((dfs->dfs_min_sidx + + dfs->dfs_min_sidx) / 2); + return numpulses; +} + +/** + * dfs_count_pulses() - Count pulses + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dur: Pulse duration/width. + * @ext_chan_flag : Ext channel flag. + * @primargin: Primary margin. + * @index: Index to dfs pulse elements. + * @refpri: reference PRI. + * @start_ts: Start timestamp. + * + * Return: Returns number of pulses within window. + */ +static inline int dfs_count_pulses( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t dur, + int ext_chan_flag, + int primargin, + uint32_t index, + uint32_t refpri, + uint64_t start_ts) +{ + uint32_t n; + int numpulses = 0; + uint64_t window_start, window_end; + + for (n = 0; n <= rf->rf_numpulses; n++) { + window_start = (start_ts + (refpri*n))-(primargin+n); + window_end = window_start + 2*(primargin+n); + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "window_start %u window_end %u", + (uint32_t)window_start, (uint32_t)window_end); + numpulses += dfs_pulses_within_window(dfs, window_start, + window_end, &index, dur, refpri); + } + + return numpulses; +} + +/** + * dfs_bin_fixedpattern_check() - Fixed pattern check + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dur: Pulse duration/width. + * @ext_chan_flag : Ext channel flag. + */ +static int dfs_bin_fixedpattern_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t dur, + int ext_chan_flag) +{ + struct dfs_pulseline *pl = dfs->pulses; + int primargin, numpulses, fil_thresh; + uint64_t start_ts, end_ts; + uint32_t last_index, first_index; + uint32_t refpri; + + refpri = (rf->rf_minpri + rf->rf_maxpri)/2; + last_index = pl->pl_lastelem; + end_ts = pl->pl_elems[last_index].p_time; + start_ts = end_ts - (refpri*rf->rf_numpulses); + + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "lastelem ts=%llu start_ts=%llu, end_ts=%llu", + (unsigned long long)pl->pl_elems[last_index].p_time, + (unsigned long long)start_ts, + (unsigned long long) end_ts); + + first_index = dfs_find_first_index_within_window(pl, last_index, + start_ts); + + /* For fixed pattern types, rf->rf_patterntype=1. */ + primargin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + numpulses = dfs_count_pulses(dfs, rf, dur, ext_chan_flag, primargin, + first_index, refpri, start_ts); + + fil_thresh = dfs_get_filter_threshold(dfs, rf, ext_chan_flag); + + if (numpulses >= fil_thresh) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FOUND filterID=%u numpulses=%d unadj thresh=%d", + rf->rf_pulseid, numpulses, rf->rf_threshold); + return 1; + } else { + return 0; + } +} + +void dfs_add_pulse( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_event *re, + uint32_t deltaT, + uint64_t this_ts) +{ + uint32_t index, n, window; + struct dfs_delayline *dl; + + dl = &rf->rf_dl; + /* Circular buffer of size 2^n */ + index = (dl->dl_lastelem + 1) & DFS_MAX_DL_MASK; + if ((dl->dl_numelems) == DFS_MAX_DL_SIZE) + dl->dl_firstelem = (dl->dl_firstelem + 1) & DFS_MAX_DL_MASK; + else + dl->dl_numelems++; + dl->dl_lastelem = index; + dl->dl_elems[index].de_time = deltaT; + dl->dl_elems[index].de_ts = this_ts; + window = deltaT; + dl->dl_elems[index].de_dur = re->re_dur; + dl->dl_elems[index].de_rssi = re->re_rssi; + dl->dl_elems[index].de_seg_id = re->re_seg_id; + dl->dl_elems[index].de_sidx = re->re_sidx; + dl->dl_elems[index].de_delta_peak = re->re_delta_peak; + dl->dl_elems[index].de_psidx_diff = re->re_psidx_diff; + dl->dl_elems[index].de_seq_num = dfs->dfs_seq_num; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "adding: filter id %d, dur=%d, rssi=%d, ts=%llu", + rf->rf_pulseid, re->re_dur, + re->re_rssi, (unsigned long long int)this_ts); + + for (n = 0; n < dl->dl_numelems-1; n++) { + index = (index-1) & DFS_MAX_DL_MASK; + /* + * Calculate window based on full time stamp instead of deltaT + * deltaT (de_time) may result in incorrect window value + */ + window = (uint32_t) (this_ts - dl->dl_elems[index].de_ts); + + if (window > rf->rf_filterlen) { + dl->dl_firstelem = (index+1) & DFS_MAX_DL_MASK; + dl->dl_numelems = n+1; + } + } + + dfs_debug(dfs, WLAN_DEBUG_DFS2, "dl firstElem = %d lastElem = %d", + dl->dl_firstelem, dl->dl_lastelem); +} + +/** + * dfs_find_lowestpri() - Find lowest PRI + * @dl: Pointer to dfs delayline. + * @lowpriindex: Low PRI index. + * @lowpri: Low PRI + */ +static inline void dfs_find_lowestpri( + struct dfs_delayline *dl, + uint32_t *lowpriindex, + uint32_t *lowpri) +{ + int delayindex; + uint32_t refpri; + uint32_t n; + + /* Find out the lowest pri. */ + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) { + continue; + } else if (refpri < *lowpri) { + *lowpri = dl->dl_elems[delayindex].de_time; + *lowpriindex = n; + } + } +} + +/** + * dfs_calculate_score() - Calculate score for the score index + * if PRI match is found + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @score: score array. + * @refpri: reference PRI. + * @primargin: PRI margin. + * @score_index: Score index. + */ +static inline void dfs_calculate_score( + struct dfs_delayline *dl, + struct dfs_filter *rf, + int *score, + uint32_t refpri, + uint32_t primargin, + uint32_t score_index) +{ + int pri_match = 0; + int dindex; + uint32_t searchpri, deltapri, deltapri_2, deltapri_3; + uint32_t i; + + for (i = 0; i < dl->dl_numelems; i++) { + dindex = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[dindex].de_time; + deltapri = DFS_DIFF(searchpri, refpri); + deltapri_2 = DFS_DIFF(searchpri, 2*refpri); + deltapri_3 = DFS_DIFF(searchpri, 3*refpri); + if (rf->rf_ignore_pri_window == 2) + pri_match = ((deltapri < primargin) || + (deltapri_2 < primargin) || + (deltapri_3 < primargin)); + else + pri_match = (deltapri < primargin); + + if (pri_match) + score[score_index]++; + } +} + +/** + * dfs_find_priscores() - Find PRI score + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @score: score array. + * @primargin: PRI margin. + */ +static void dfs_find_priscores( + struct dfs_delayline *dl, + struct dfs_filter *rf, + int *score, + uint32_t primargin) +{ + int delayindex; + uint32_t refpri; + uint32_t n; + + qdf_mem_zero(score, sizeof(int)*DFS_MAX_DL_SIZE); + + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) + continue; + if (refpri < rf->rf_maxpri) { + /* Use only valid PRI range for high score. */ + dfs_calculate_score(dl, rf, score, refpri, primargin, + n); + } else { + score[n] = 0; + } + + if (score[n] > rf->rf_threshold) { + /* + * We got the most possible candidate, + * no need to continue further. + */ + break; + } + } +} + +/** + * dfs_find_highscore() - Find PRI high score + * @dl: Pointer to dfs delayline. + * @score: score array. + * @highscore: High score. + * @highscoreindex: High score index. + */ +static inline void dfs_find_highscore( + struct dfs_delayline *dl, + int *score, + uint32_t *highscore, + uint32_t *highscoreindex) +{ + int delayindex, dindex; + uint32_t n; + + *highscore = 0; + *highscoreindex = 0; + + for (n = 0; n < dl->dl_numelems; n++) { + if (score[n] > *highscore) { + *highscore = score[n]; + *highscoreindex = n; + } else if (score[n] == *highscore) { + /* + * More than one pri has highscore take the least pri. + */ + delayindex = (dl->dl_firstelem + *highscoreindex) & + DFS_MAX_DL_MASK; + dindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + if (dl->dl_elems[dindex].de_time <= + dl->dl_elems[delayindex].de_time) { + *highscoreindex = n; + } + } + } + + return; +} + +/** + * dfs_get_durmargin() - Find duration margin + * @rf: Pointer to dfs_filter structure. + * @durmargin: Duration margin + */ +static inline void dfs_get_durmargin( + struct dfs_filter *rf, + uint32_t *durmargin) +{ +#define DUR_THRESH 10 +#define LOW_MARGIN 4 +#define HIGH_MARGIN 6 + + if (rf->rf_maxdur < DUR_THRESH) + *durmargin = LOW_MARGIN; + else + *durmargin = HIGH_MARGIN; + +#undef DUR_THRESH +#undef LOW_MARGIN +#undef HIGH_MARGIN +} + +/** + * dfs_handle_fixedpattern() - Handle Fixed pattern radar + * @dfs: Pointer to wlan_dfs structure. + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @dur: Pulse duration/width + * @ext_chan_flag : Ext channel flag. + */ +static inline int dfs_handle_fixedpattern( + struct wlan_dfs *dfs, + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t dur, + int ext_chan_flag) +{ + int found = 0; + + found = dfs_bin_fixedpattern_check(dfs, rf, dur, ext_chan_flag); + if (found) + dl->dl_numelems = 0; + + return found; +} + +/** + * dfs_bin_basic_sanity() - Sanity check + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @deltaT: Delta time. + */ +static inline int dfs_bin_basic_sanity( + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t *deltaT) +{ + if (dl->dl_numelems < (rf->rf_threshold-1)) + return 0; + + if (*deltaT > rf->rf_filterlen) + return 0; + + return 1; +} + +/** + * dfs_pick_lowpri() - Pick lowpri as refpri + * @dfs: Pointer to wlan_dfs structure. + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @lowpriindex: Low PRI index. + * @scoreindex: score index. + * @primargin: PRI margin. + */ +#ifdef DFS_PRI_MULTIPLIER +static inline void dfs_pick_lowpri(struct wlan_dfs *dfs, + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t lowpriindex, + uint32_t *scoreindex, + uint32_t primargin) +{ + uint32_t candidate_refpri, deltapri, lowpri; + uint32_t dindex_candidate, dindex_lowpri; + uint32_t i; + + dindex_candidate = (dl->dl_firstelem + *scoreindex) & DFS_MAX_DL_MASK; + dindex_lowpri = (dl->dl_firstelem + lowpriindex) & DFS_MAX_DL_MASK; + + candidate_refpri = dl->dl_elems[dindex_candidate].de_time; + lowpri = dl->dl_elems[dindex_lowpri].de_time; + + if (rf->rf_ignore_pri_window == 0 && + candidate_refpri != lowpri) { + for (i = 1; i <= dfs->dfs_pri_multiplier; i++) { + deltapri = DFS_DIFF(candidate_refpri, i * lowpri); + if (deltapri < primargin) { + *scoreindex = lowpriindex; + break; + } + } + } +} +#else +static inline void dfs_pick_lowpri(struct wlan_dfs *dfs, + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t lowpriindex, + uint32_t *scoreindex, + uint32_t primargin) +{ +} +#endif + +/** + * dfs_find_scoreindex() - Find score index + * @rf: Pointer to dfs_filter structure. + * @highscore: High score. + * @lowpriindex: Low PRI index. + * @highscoreindex: High score index. + * @scoreindex: score index. + */ +static inline void dfs_find_scoreindex( + struct dfs_filter *rf, + uint32_t highscore, + uint32_t lowpriindex, + uint32_t highscoreindex, + uint32_t *scoreindex) +{ + int lowprichk = 3; + + if (rf->rf_ignore_pri_window > 0) + lowprichk = (rf->rf_threshold >> 1)+1; + else + lowprichk = 3; + + if (highscore < lowprichk) + *scoreindex = lowpriindex; + else + *scoreindex = highscoreindex; +} + +/** + * dfs_find_refs() - Find reference values. + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @scoreindex: score index. + * @refdur: Duration value. + * @refpri: Current "filter" time for start of pulse in usecs. + */ +static inline void dfs_find_refs( + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t scoreindex, + uint32_t *refdur, + uint32_t *refpri) +{ + int delayindex; + + delayindex = (dl->dl_firstelem + scoreindex) & DFS_MAX_DL_MASK; + *refdur = dl->dl_elems[delayindex].de_dur; + *refpri = dl->dl_elems[delayindex].de_time; + + if (rf->rf_fixed_pri_radar_pulse) + *refpri = (rf->rf_minpri + rf->rf_maxpri)/2; +} + +/** + * dfs_bin_success_print() - Debug print + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @ext_chan_flag: Extension channel flag. + * @numpulses: Number of pulses. + * @refpri: Current "filter" time for start of pulse in usecs. + * @refdur: Duration value. + * @primargin: PRI margin. + */ +static inline void dfs_bin_success_print( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + int ext_chan_flag, + int numpulses, + uint32_t refpri, + uint32_t refdur, + uint32_t primargin) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "ext_flag=%d MATCH filter=%u numpulses=%u thresh=%u refdur=%d refpri=%d primargin=%d", + ext_chan_flag, rf->rf_pulseid, numpulses, + rf->rf_threshold, refdur, refpri, primargin); + dfs_print_delayline(dfs, &rf->rf_dl); + dfs_print_filter(dfs, rf); +} + +int dfs_bin_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t deltaT, + uint32_t width, + int ext_chan_flag) +{ + struct dfs_delayline *dl; + uint32_t refpri, refdur; + uint32_t highscoreindex; + uint32_t primargin, highscore; + int score[DFS_MAX_DL_SIZE], found = 0; + uint32_t scoreindex, lowpriindex = 0, lowpri = 0xffff; + int numpulses = 0; + int fil_thresh; + + dl = &rf->rf_dl; + if (!dfs_bin_basic_sanity(dl, rf, &deltaT)) + return 0; + + primargin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + + if (rf->rf_patterntype == 1) + return dfs_handle_fixedpattern(dfs, dl, rf, width, + ext_chan_flag); + + dfs_find_lowestpri(dl, &lowpriindex, &lowpri); + + /* Find out the each delay element's pri score. */ + dfs_find_priscores(dl, rf, score, primargin); + + /* Find out the high scorer. */ + dfs_find_highscore(dl, score, &highscore, &highscoreindex); + + /* + * Find the average pri of pulses around the pri of highscore + * or the pulses around the lowest pri. + */ + dfs_find_scoreindex(rf, highscore, lowpriindex, highscoreindex, + &scoreindex); + + /* + * Observed ETSI type2 while channel loading 31% with pulse pri: + * 1489, 2978, 2978, 2978, 1489, 2978, 1489 us. With above logic, + * the highscore will be 4 (2978), scoreindex is 5. In this case, + * index 0, 4, 6 pulses will be not matched later in + * dfs_count_the_other_delay_elements(), which leads to the radar was + * not detected. The fix is: compare the highscore pri with lowpri, + * if they have relationship, within primargin of + * [1, dfs_pri_multiplier] times of lowpri, choose lowpri as refpri. + */ + dfs_pick_lowpri(dfs, dl, rf, lowpriindex, &scoreindex, primargin); + + /* We got the possible pri, save its parameters as reference. */ + dfs_find_refs(dl, rf, scoreindex, &refdur, &refpri); + + numpulses = dfs_bin_pri_check(dfs, rf, dl, score[scoreindex], refpri, + refdur, ext_chan_flag, refpri); + + fil_thresh = dfs_get_filter_threshold(dfs, rf, ext_chan_flag); + + if (numpulses >= fil_thresh) { + found = 1; + dfs_bin_success_print(dfs, rf, ext_chan_flag, numpulses, + refpri, refdur, primargin); + } + + return found; +} + +/** + * dfs_update_min_and_max_sidx() - Calculate min and max sidx. + * @dl: Pointer to dfs_delayline structure. + * @delayindex: Delay index. + * @sidx_min: Sidx min. + * @sidx_max: Sidx max. + * @delta_peak_match_count: Delta peak match count. + * @psidx_diff_match_count: Psidx diff match count. + * @rf: Pointer to dfs_filter structure. + */ +static inline void dfs_update_min_and_max_sidx( + struct dfs_delayline *dl, + int delayindex, + int32_t *sidx_min, + int32_t *sidx_max, + uint8_t *delta_peak_match_count, + uint8_t *psidx_diff_match_count, + struct dfs_filter *rf) +{ + /* update sidx min/max for false detection check later */ + if (*sidx_min > dl->dl_elems[delayindex].de_sidx) + *sidx_min = dl->dl_elems[delayindex].de_sidx; + + if (*sidx_max < dl->dl_elems[delayindex].de_sidx) + *sidx_max = dl->dl_elems[delayindex].de_sidx; + + if (rf->rf_check_delta_peak) { + if (dl->dl_elems[delayindex].de_delta_peak != 0) + (*delta_peak_match_count)++; + else if ((dl->dl_elems[delayindex].de_psidx_diff >= + DFS_MIN_PSIDX_DIFF) && + (dl->dl_elems[delayindex].de_psidx_diff <= + DFS_MAX_PSIDX_DIFF)) + (*psidx_diff_match_count)++; + } +} + +/** + * dfs_check_pulses_for_delta_variance() - Check pulses for delta variance. + * @rf: Pointer to dfs_filter structure. + * @numpulsetochk: Number of pulses to check. + * @delta_time_stamps: Delta time stamp. + * @fundamentalpri: Highest PRI. + * @primargin: Primary margin. + * @numpulses: Number of pulses. + * @delayindex: Delay index. + * @sidx_min: Sidx min. + * @sidx_max: Sidx max. + * @delta_peak_match_count: Delta peak match count. + * @psidx_diff_match_count: Psidx diff match count. + * @dl: Pointer to dfs_delayline structure. + */ +static inline void dfs_check_pulses_for_delta_variance( + struct dfs_filter *rf, + int numpulsetochk, + uint32_t delta_time_stamps, + int fundamentalpri, + uint32_t primargin, + int *numpulses, + int delayindex, + int32_t *sidx_min, + int32_t *sidx_max, + uint8_t *delta_peak_match_count, + uint8_t *psidx_diff_match_count, + struct dfs_delayline *dl) +{ + uint32_t delta_ts_variance, j; + + for (j = 0; j < numpulsetochk; j++) { + delta_ts_variance = DFS_DIFF(delta_time_stamps, + ((j + 1) * fundamentalpri)); + if (delta_ts_variance < (2 * (j + 1) * primargin)) { + dl->dl_seq_num_stop = + dl->dl_elems[delayindex].de_seq_num; + dfs_update_min_and_max_sidx(dl, delayindex, + sidx_min, sidx_max, + delta_peak_match_count, + psidx_diff_match_count, + rf); + (*numpulses)++; + if (rf->rf_ignore_pri_window > 0) + break; + } + } +} + +/** + * dfs_count_the_other_delay_elements() - Counts the ther delay elements. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dl: Pointer to dfs_delayline structure. + * @i: Index value. + * @refpri: Current "filter" time for start of pulse in usecs. + * @refdur: Duration value. + * @primargin: Primary margin. + * @durmargin: Duration margin. + * @numpulses: Number of pulses. + * @delta_peak_match_count: Pointer to delta_peak_match_count. + * @psidx_diff_match_count: Pointer to psidx_diff_match_count. + * @prev_good_timestamp: Previous good timestamp. + * @fundamentalpri: Highest PRI. + */ +static void dfs_count_the_other_delay_elements( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_delayline *dl, + uint32_t i, + uint32_t refpri, + uint32_t refdur, + uint32_t primargin, + uint32_t durmargin, + int *numpulses, + uint8_t *delta_peak_match_count, + uint8_t *psidx_diff_match_count, + uint32_t *prev_good_timestamp, + int fundamentalpri) +{ + int delayindex; + uint32_t searchpri, searchdur, deltadur; + uint32_t j = 0, delta_time_stamps, deltapri, k; + int dindex, primatch, numpulsetochk = 2; + int32_t sidx_min = DFS_BIG_SIDX; + int32_t sidx_max = -DFS_BIG_SIDX; + + delayindex = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[delayindex].de_time; + if (searchpri == 0) { + /* + * This events PRI is zero, take it as a valid pulse + * but decrement next event's PRI by refpri. + */ + dindex = (delayindex + 1) & DFS_MAX_DL_MASK; + dl->dl_elems[dindex].de_time -= refpri; + searchpri = refpri; + } + + searchdur = dl->dl_elems[delayindex].de_dur; + deltadur = DFS_DIFF(searchdur, refdur); + deltapri = DFS_DIFF(searchpri, refpri); + primatch = 0; + + if ((rf->rf_ignore_pri_window > 0) && (rf->rf_patterntype != 2)) { + for (j = 0; j < rf->rf_numpulses; j++) { + deltapri = DFS_DIFF(searchpri, (j + 1) * refpri); + if (deltapri < (2 * primargin)) { + primatch = 1; + break; + } + } + } else if (rf->rf_patterntype == 2) { + primatch = 1; + } else { + for (k = 1; k <= dfs->dfs_pri_multiplier; k++) { + deltapri = DFS_DIFF(searchpri, k * refpri); + if (deltapri < primargin) { + primatch = 1; + break; + } + } + } + + if (primatch && (deltadur < durmargin)) { + if (*numpulses == 1) { + dl->dl_seq_num_second = + dl->dl_elems[delayindex].de_seq_num; + dfs_update_min_and_max_sidx(dl, delayindex, + &sidx_min, &sidx_max, + delta_peak_match_count, + psidx_diff_match_count, + rf); + (*numpulses)++; + } else { + delta_time_stamps = (dl->dl_elems[delayindex].de_ts - + *prev_good_timestamp); + if ((rf->rf_ignore_pri_window > 0)) { + numpulsetochk = rf->rf_numpulses; + if ((rf->rf_patterntype == 2) && + (fundamentalpri < refpri + 100)) { + numpulsetochk = 4; + } + } else { + numpulsetochk = 4; + } + + dfs_check_pulses_for_delta_variance(rf, numpulsetochk, + delta_time_stamps, fundamentalpri, + primargin, numpulses, delayindex, + &sidx_min, &sidx_max, + delta_peak_match_count, + psidx_diff_match_count, + dl); + } + *prev_good_timestamp = dl->dl_elems[delayindex].de_ts; + dl->dl_search_pri = searchpri; + dl->dl_min_sidx = sidx_min; + dl->dl_max_sidx = sidx_max; + dl->dl_delta_peak_match_count = *delta_peak_match_count; + dl->dl_psidx_diff_match_count = *psidx_diff_match_count; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "rf->minpri=%d rf->maxpri=%d searchpri = %d index = %d numpulses = %d delta peak match count = %d psidx diff match count = %d deltapri=%d j=%d", + rf->rf_minpri, rf->rf_maxpri, searchpri, i, + *numpulses, *delta_peak_match_count, + *psidx_diff_match_count, deltapri, j); + } +} + +int dfs_bin_pri_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_delayline *dl, + uint32_t score, + uint32_t refpri, + uint32_t refdur, + int ext_chan_flag, + int fundamentalpri) +{ + uint32_t searchpri, deltapri = 0; + uint32_t averagerefpri = 0, MatchCount = 0; + uint32_t prev_good_timestamp = 0; + int dindex; + uint32_t i, primargin, durmargin, highscore = score; + uint32_t highscoreindex = 0; + /* + * First pulse in the burst is most likely being filtered out based on + * maxfilterlen. + */ + int numpulses = 1; + uint8_t delta_peak_match_count = 1; + uint8_t psidx_diff_match_count = 1; + int priscorechk = 1; + + /* Use the adjusted PRI margin to reduce false alarms + * For non fixed pattern types, rf->rf_patterntype=0. + */ + primargin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + if ((refpri > rf->rf_maxpri) || (refpri < rf->rf_minpri)) { + numpulses = 0; + return numpulses; + } + + dfs_get_durmargin(rf, &durmargin); + + if ((!rf->rf_fixed_pri_radar_pulse)) { + if (rf->rf_ignore_pri_window == 1) + priscorechk = (rf->rf_threshold >> 1); + else + priscorechk = 1; + + MatchCount = 0; + if (score > priscorechk) { + for (i = 0; i < dl->dl_numelems; i++) { + dindex = (dl->dl_firstelem + i) & + DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[dindex].de_time; + deltapri = DFS_DIFF(searchpri, refpri); + if (deltapri < primargin) { + averagerefpri += searchpri; + MatchCount++; + } + } + if (rf->rf_patterntype != 2) { + if (MatchCount > 0) + refpri = (averagerefpri / MatchCount); + } else { + refpri = (averagerefpri / score); + } + } + } + + /* Note: Following primultiple calculation should be done + * once per filter during initialization stage (dfs_attach) + * and stored in its array atleast for fixed frequency + * types like FCC Bin1 to save some CPU cycles. + * multiplication, devide operators in the following code + * are left as it is for readability hoping the complier + * will use left/right shifts wherever possible. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "refpri = %d high score = %d index = %d numpulses = %d", + refpri, highscore, highscoreindex, numpulses); + /* + * Count the other delay elements that have pri and dur with + * in the acceptable range from the reference one. + */ + for (i = 0; i < dl->dl_numelems; i++) + dfs_count_the_other_delay_elements(dfs, rf, dl, i, refpri, + refdur, primargin, durmargin, &numpulses, + &delta_peak_match_count, + &psidx_diff_match_count, + &prev_good_timestamp, fundamentalpri); + + return numpulses; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_debug.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..6b42b529eafd9caa38607eed9e73dab6be4fda8f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_debug.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013, 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: It contains useful print functions that can be used for debug. + * Add all debug related functionality into this file. + */ +#include "../dfs.h" +#include "wlan_dfs_lmac_api.h" + +void dfs_print_delayline(struct wlan_dfs *dfs, struct dfs_delayline *dl) +{ + int i = 0, index; + struct dfs_delayelem *de; + + index = dl->dl_firstelem; + for (i = 0; i < dl->dl_numelems; i++) { + de = &dl->dl_elems[index]; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Elem %u: ts=%llu diff_ts=%u (0x%x) dur=%u, seg_id=%d sidx=%d delta_peak=%d psidx_diff=%d seq_num=%d", + i, de->de_ts, de->de_time, de->de_time, + de->de_dur, de->de_seg_id, de->de_sidx, + de->de_delta_peak, de->de_psidx_diff, + de->de_seq_num); + + index = (index + 1) & DFS_MAX_DL_MASK; + } +} + +void dfs_print_filter(struct wlan_dfs *dfs, struct dfs_filter *rf) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "filterID[%d] rf_numpulses=%u; rf->rf_minpri=%u; rf->rf_maxpri=%u; rf->rf_threshold=%u; rf->rf_filterlen=%u; rf->rf_mindur=%u; rf->rf_maxdur=%u", + rf->rf_pulseid, rf->rf_numpulses, rf->rf_minpri, rf->rf_maxpri, + rf->rf_threshold, rf->rf_filterlen, rf->rf_mindur, + rf->rf_maxdur); +} + +/** + * dfs_print_filtertype() - Print the filtertype + * @dfs: Pointer to wlan_dfs structure. + * @ft: Pointer to dfs_filtertype structure. + */ +static void dfs_print_filtertype( + struct wlan_dfs *dfs, + struct dfs_filtertype *ft) +{ + uint32_t j; + struct dfs_filter *rf; + + for (j = 0; j < ft->ft_numfilters; j++) { + rf = ft->ft_filters[j]; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filter[%d] filterID = %d rf_numpulses=%u; rf->rf_minpri=%u; rf->rf_maxpri=%u; rf->rf_threshold=%u; rf->rf_filterlen=%u; rf->rf_mindur=%u; rf->rf_maxdur=%u", + j, rf->rf_pulseid, rf->rf_numpulses, + rf->rf_minpri, rf->rf_maxpri, + rf->rf_threshold, rf->rf_filterlen, + rf->rf_mindur, rf->rf_maxdur); + } +} + +void dfs_print_filters(struct wlan_dfs *dfs) +{ + struct dfs_filtertype *ft = NULL; + uint8_t i; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + for (i = 0; i < DFS_MAX_RADAR_TYPES; i++) { + if (dfs->dfs_radarf[i]) { + ft = dfs->dfs_radarf[i]; + if ((ft->ft_numfilters > DFS_MAX_NUM_RADAR_FILTERS) || + (!ft->ft_numfilters)) { + continue; + } + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "===========ft->ft_numfilters = %u===========", + ft->ft_numfilters); + dfs_print_filtertype(dfs, ft); + } + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_fcc_bin5.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_fcc_bin5.c new file mode 100644 index 0000000000000000000000000000000000000000..14b319a7ad9d55853e1c05918abd4b2935320546 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_fcc_bin5.c @@ -0,0 +1,836 @@ +/* + * Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: FCC Bin5 are special type of radars because they "chirp". Basically the + * pulses move across the frequency band and are called chirping pulses. + * dfs_check_chirping() actually examines the FFT data contained in the PHY + * error information to figure out whether the pulse is moving across + * frequencies. + */ + +#include "../dfs.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_channel.h" + +int dfs_bin5_check_pulse(struct wlan_dfs *dfs, struct dfs_event *re, + struct dfs_bin5radars *br) +{ + int b5_rssithresh = br->br_pulse.b5_rssithresh; + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_PULSE, + "re_dur=%d, rssi=%d, check_chirp=%d, hw_chirp=%d, sw_chirp=%d", + (int)re->re_dur, (int)re->re_rssi, + !!(re->re_flags & DFS_EVENT_CHECKCHIRP), + !!(re->re_flags & DFS_EVENT_HW_CHIRP), + !!(re->re_flags & DFS_EVENT_SW_CHIRP)); + + /* If the SW/HW chirp detection says to fail the pulse,do so. */ + if (DFS_EVENT_NOTCHIRP(re)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "rejecting chirp: ts=%llu, dur=%d, rssi=%d checkchirp=%d, hwchirp=%d, swchirp=%d", + (unsigned long long)re->re_full_ts, + (int)re->re_dur, (int)re->re_rssi, + !!(re->re_flags & DFS_EVENT_CHECKCHIRP), + !!(re->re_flags & DFS_EVENT_HW_CHIRP), + !!(re->re_flags & DFS_EVENT_SW_CHIRP)); + + return 0; + } + +#define CHANNEL_TURBO 0x00010 + /* Adjust the filter threshold for rssi in non TURBO mode. */ + if (!(dfs->dfs_curchan->dfs_ch_flags & CHANNEL_TURBO)) + b5_rssithresh += br->br_pulse.b5_rssimargin; + + /* Check if the pulse is within duration and rssi thresholds. */ + if ((re->re_dur >= br->br_pulse.b5_mindur) && + (re->re_dur <= br->br_pulse.b5_maxdur) && + (re->re_rssi >= b5_rssithresh)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "dur=%d, rssi=%d - adding!", + (int)re->re_dur, (int)re->re_rssi); + return 1; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "too low to be Bin5 pulse tsf=%llu, dur=%d, rssi=%d", + (unsigned long long)re->re_full_ts, + (int)re->re_dur, (int)re->re_rssi); + + return 0; +} + +int dfs_bin5_addpulse(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + struct dfs_event *re, + uint64_t thists) +{ + uint32_t index, stop; + uint64_t tsDelta; + + /* + * Check if this pulse is a valid pulse in terms of repetition, + * if not, return without adding it to the queue. PRI : Pulse + * Repitetion Interval. + * BRI : Burst Repitetion Interval. + */ + if (br->br_numelems != 0) { + index = br->br_lastelem; + tsDelta = thists - br->br_elems[index].be_ts; + if ((tsDelta < DFS_BIN5_PRI_LOWER_LIMIT) || + ((tsDelta > DFS_BIN5_PRI_HIGHER_LIMIT) && + (tsDelta < DFS_BIN5_BRI_LOWER_LIMIT))) { + return 0; + } + } + + if (dfs->dfs_min_sidx > re->re_sidx) + dfs->dfs_min_sidx = re->re_sidx; + + if (dfs->dfs_max_sidx < re->re_sidx) + dfs->dfs_max_sidx = re->re_sidx; + /* Circular buffer of size 2^n. */ + index = (br->br_lastelem + 1) & DFS_MAX_B5_MASK; + br->br_lastelem = index; + if (br->br_numelems == DFS_MAX_B5_SIZE) + br->br_firstelem = (br->br_firstelem + 1) & DFS_MAX_B5_MASK; + else + br->br_numelems++; + + br->br_elems[index].be_ts = thists; + br->br_elems[index].be_rssi = re->re_rssi; + br->br_elems[index].be_dur = re->re_dur; /* This is in u-sec */ + stop = 0; + index = br->br_firstelem; + while ((!stop) && (br->br_numelems - 1) > 0) { + if ((thists - br->br_elems[index].be_ts) > + ((uint64_t)br->br_pulse.b5_timewindow)) { + br->br_numelems--; + br->br_firstelem = + (br->br_firstelem + 1) & DFS_MAX_B5_MASK; + index = br->br_firstelem; + } else { + stop = 1; + } + } + + return 1; +} + +/** + * dfs_calculate_bursts_for_same_rssi() - Calculate bursts for same rssi. + * @dfs: Pointer to wlan_dfs structure. + * @br: Pointer to dfs_bin5radars structure. + * @bursts: Bursts. + * @numevents: Number of events. + * @prev: prev index. + * @this: index to br_elems[]. + * @index: index array. + */ +static inline void dfs_calculate_bursts_for_same_rssi( + struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + uint32_t *bursts, + uint32_t *numevents, + uint32_t prev, + uint32_t this, + int *index) +{ + uint32_t rssi_diff; + + if (br->br_elems[this].be_rssi >= br->br_elems[prev].be_rssi) + rssi_diff = (br->br_elems[this].be_rssi - + br->br_elems[prev].be_rssi); + else + rssi_diff = (br->br_elems[prev].be_rssi - + br->br_elems[this].be_rssi); + + if (rssi_diff <= DFS_BIN5_RSSI_MARGIN) { + (*bursts)++; + /* + * Save the indexes of this pair for later + * width variance check. + */ + if ((*numevents) >= 2) { + /* + * Make sure the event is not duplicated, possible in + * a 3 pulse burst. + */ + if (index[(*numevents)-1] != prev) + index[(*numevents)++] = prev; + } else { + index[(*numevents)++] = prev; + } + + index[(*numevents)++] = this; + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "Bin5 rssi_diff=%d", rssi_diff); + } +} + +void bin5_rules_check_internal(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + uint32_t *bursts, + uint32_t *numevents, + uint32_t prev, + uint32_t i, + uint32_t this, + int *index) +{ + uint64_t pri = 0; + uint32_t width_diff = 0; + + /* Rule 1: 1000 <= PRI <= 2000 + some margin. */ + if (br->br_elems[this].be_ts >= br->br_elems[prev].be_ts) { + pri = br->br_elems[this].be_ts - br->br_elems[prev].be_ts; + } else { + /* Roll over case */ + pri = br->br_elems[this].be_ts; + } + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + " pri=%llu this.ts=%llu this.dur=%d this.rssi=%d prev.ts=%llu", + (uint64_t)pri, + (uint64_t) br->br_elems[this].be_ts, + (int) br->br_elems[this].be_dur, + (int) br->br_elems[this].be_rssi, + (uint64_t)br->br_elems[prev].be_ts); + + if (((pri >= DFS_BIN5_PRI_LOWER_LIMIT) && + /*pri: pulse repitition interval in us. */ + (pri <= DFS_BIN5_PRI_HIGHER_LIMIT))) { + /* + * Rule 2: pulse width of the pulses in the + * burst should be same (+/- margin). + */ + if (br->br_elems[this].be_dur >= br->br_elems[prev].be_dur) { + width_diff = (br->br_elems[this].be_dur + - br->br_elems[prev].be_dur); + } else { + width_diff = (br->br_elems[prev].be_dur + - br->br_elems[this].be_dur); + } + + if (width_diff <= DFS_BIN5_WIDTH_MARGIN) + /* + * Rule 3: RSSI of the pulses in the + * burst should be same (+/- margin) + */ + dfs_calculate_bursts_for_same_rssi(dfs, br, bursts, + numevents, prev, this, index); + else + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "Bin5 width_diff=%d", width_diff); + } else if ((pri >= DFS_BIN5_BRI_LOWER_LIMIT) && + (pri <= DFS_BIN5_BRI_UPPER_LIMIT)) { + /* Check pulse width to make sure it is in range of bin 5. */ + (*bursts)++; + } else{ + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "Bin5 PRI check fail pri=%llu", (uint64_t)pri); + } +} + +int dfs_bin5_check(struct wlan_dfs *dfs) +{ + struct dfs_bin5radars *br; + uint32_t n = 0, i = 0, i1 = 0, this = 0, prev = 0; + uint32_t bursts = 0, total_diff = 0, average_diff = 0; + uint32_t total_width = 0, average_width = 0, numevents = 0; + int index[DFS_MAX_B5_SIZE]; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 1; + } + + for (n = 0; n < dfs->dfs_rinfo.rn_numbin5radars; n++) { + br = &(dfs->dfs_b5radars[n]); + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, "Num elems = %d", + br->br_numelems); + + /* Find a valid bin 5 pulse and use it as reference. */ + for (i1 = 0; i1 < br->br_numelems; i1++) { + this = ((br->br_firstelem + i1) & DFS_MAX_B5_MASK); + if ((br->br_elems[this].be_dur >= MIN_BIN5_DUR_MICROSEC) + && (br->br_elems[this].be_dur <= + MAX_BIN5_DUR_MICROSEC)) { + break; + } + } + + prev = this; + for (i = i1 + 1; i < br->br_numelems; i++) { + this = ((br->br_firstelem + i) & DFS_MAX_B5_MASK); + /* + * First make sure it is a bin 5 pulse by checking + * the duration. + */ + if ((br->br_elems[this].be_dur < MIN_BIN5_DUR_MICROSEC) + || (br->br_elems[this].be_dur > + MAX_BIN5_DUR_MICROSEC)) { + continue; + } + bin5_rules_check_internal(dfs, br, &bursts, &numevents, + prev, i, this, index); + prev = this; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "bursts=%u numevents=%u", bursts, numevents); + if (bursts >= br->br_pulse.b5_threshold) { + if ((br->br_elems[br->br_lastelem].be_ts - + br->br_elems[br->br_firstelem].be_ts) < + 3000000) + return 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "bursts=%u numevents=%u total_width=%d average_width=%d total_diff=%d average_diff=%d", + bursts, numevents, total_width, + average_width, total_diff, + average_diff); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "bin 5 radar detected, bursts=%d", + bursts); + return 1; + } + } + + return 0; +} + +/** + * dfs_check_chirping_sowl() - Chirp detection for Sowl/Howl. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @datalen: Phyerr buf length + * @is_ctl: detected on primary channel. + * @is_ext: detected on extension channel. + * @slope: Slope + * @is_dc: DC found + * + * Return: Return TRUE if chirping pulse, FALSE if not. Decision is made + * based on processing the FFT data included with the PHY error. + * Calculate the slope using the maximum bin index reported in + * the FFT data. Calculate slope between FFT packet 0 and packet + * n-1. Also calculate slope between packet 1 and packet n. If a + * pulse is chirping, a slope of 5 and greater is seen. + * Non-chirping pulses have slopes of 0, 1, 2 or 3. + */ +static int dfs_check_chirping_sowl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc) +{ +#define FFT_LEN 70 +#define FFT_LOWER_BIN_MAX_INDEX_BYTE 66 +#define FFT_UPPER_BIN_MAX_INDEX_BYTE 69 +#define MIN_CHIRPING_SLOPE 4 + int is_chirp = 0; + int p, num_fft_packets = 0; + int ctl_slope = 0, ext_slope = 0; + int ctl_high0 = 0, ctl_low0 = 0, ctl_slope0 = 0; + int ext_high0 = 0, ext_low0 = 0, ext_slope0 = 0; + int ctl_high1 = 0, ctl_low1 = 0, ctl_slope1 = 0; + int ext_high1 = 0, ext_low1 = 0, ext_slope1 = 0; + uint8_t *fft_data_ptr; + + *slope = 0; + *is_dc = 0; + num_fft_packets = datalen / FFT_LEN; + fft_data_ptr = (uint8_t *)buf; + + /* DEBUG - Print relevant portions of the FFT data. */ + for (p = 0; p < num_fft_packets; p++) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "fft_data_ptr=0x%pK\t", fft_data_ptr); + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "[66]=%d [69]=%d", + *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2, + *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2); + fft_data_ptr += FFT_LEN; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "datalen=%d num_fft_packets=%d", datalen, num_fft_packets); + + /* + * There is not enough FFT data to figure out whether the pulse + * is chirping or not. + */ + if (num_fft_packets < 4) + return 0; + + fft_data_ptr = (uint8_t *)buf; + + if (is_ctl) { + fft_data_ptr = (uint8_t *)buf; + ctl_low0 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + fft_data_ptr += FFT_LEN; + ctl_low1 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + + /* Last packet with first packet. */ + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 1)); + ctl_high1 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + + /* Second last packet with 0th packet. */ + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 2)); + ctl_high0 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + + ctl_slope0 = ctl_high0 - ctl_low0; + if (ctl_slope0 < 0) + ctl_slope0 *= (-1); + + ctl_slope1 = ctl_high1 - ctl_low1; + if (ctl_slope1 < 0) + ctl_slope1 *= (-1); + + ctl_slope = + ((ctl_slope0 > ctl_slope1) ? ctl_slope0 : ctl_slope1); + *slope = ctl_slope; + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "ctl_slope0=%d ctl_slope1=%d ctl_slope=%d", + ctl_slope0, ctl_slope1, ctl_slope); + } else if (is_ext) { + fft_data_ptr = (uint8_t *)buf; + ext_low0 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + + fft_data_ptr += FFT_LEN; + ext_low1 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 1)); + ext_high1 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 2)); + + ext_high0 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + + ext_slope0 = ext_high0 - ext_low0; + if (ext_slope0 < 0) + ext_slope0 *= (-1); + + ext_slope1 = ext_high1 - ext_low1; + if (ext_slope1 < 0) + ext_slope1 *= (-1); + + ext_slope = ((ext_slope0 > ext_slope1) ? + ext_slope0 : ext_slope1); + *slope = ext_slope; + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT | WLAN_DEBUG_DFS_BIN5, + "ext_slope0=%d ext_slope1=%d ext_slope=%d", + ext_slope0, ext_slope1, ext_slope); + } else + return 0; + + if ((ctl_slope >= MIN_CHIRPING_SLOPE) || + (ext_slope >= MIN_CHIRPING_SLOPE)) { + is_chirp = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5 | WLAN_DEBUG_DFS_BIN5_FFT | + WLAN_DEBUG_DFS_PHYERR_SUM, "is_chirp=%d is_dc=%d", + is_chirp, *is_dc); + } + + return is_chirp; + +#undef FFT_LEN +#undef FFT_LOWER_BIN_MAX_INDEX_BYTE +#undef FFT_UPPER_BIN_MAX_INDEX_BYTE +#undef MIN_CHIRPING_SLOPE +} + +/** + * dfs_check_chirping_merlin() - Merlin (and Osprey, etc) chirp radar chirp + * detection. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf length + * @is_ctl: detected on primary channel. + * @is_ext: detected on extension channel. + * @slope: Slope + * @is_dc: DC found + */ +static int dfs_check_chirping_merlin(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc) +{ +#define ABS_DIFF(_x, _y) ((int)_x > (int)_y ? (int)_x - (int)_y : \ + (int)_y - (int)_x) +#define ABS(_x) ((int)_x > 0 ? (int)_x : -(int)_x) + /* This should be between 1 and 3. Default is 1. */ +#define DELTA_STEP 1 + /* Number of Diffs to compute. valid range is 2-4. */ +#define NUM_DIFFS 3 + /* Threshold for difference of delta peaks. */ +#define MAX_DIFF 2 + /* Max. number of strong bins for narrow band. */ +#define BIN_COUNT_MAX 6 + + /* Dynamic 20/40 mode FFT packet format related definition. */ +#define NUM_FFT_BYTES_HT40 70 +#define NUM_BIN_BYTES_HT40 64 +#define NUM_SUBCHAN_BINS_HT40 64 +#define LOWER_INDEX_BYTE_HT40 66 +#define UPPER_INDEX_BYTE_HT40 69 +#define LOWER_WEIGHT_BYTE_HT40 64 +#define UPPER_WEIGHT_BYTE_HT40 67 +#define LOWER_MAG_BYTE_HT40 65 +#define UPPER_MAG_BYTE_HT40 68 + + /* Static 20 mode FFT packet format related definition. */ +#define NUM_FFT_BYTES_HT20 31 +#define NUM_BIN_BYTES_HT20 28 +#define NUM_SUBCHAN_BINS_HT20 56 +#define LOWER_INDEX_BYTE_HT20 30 +#define UPPER_INDEX_BYTE_HT20 30 +#define LOWER_WEIGHT_BYTE_HT20 28 +#define UPPER_WEIGHT_BYTE_HT20 28 +#define LOWER_MAG_BYTE_HT20 29 +#define UPPER_MAG_BYTE_HT20 29 + + int num_fft_packets; /* number of FFT packets reported to software */ + int num_fft_bytes; + int num_bin_bytes; + int num_subchan_bins; + int lower_index_byte; + int upper_index_byte; + int lower_weight_byte; + int upper_weight_byte; + int lower_mag_byte; + int upper_mag_byte; + int max_index_lower[DELTA_STEP + NUM_DIFFS]; + int max_index_upper[DELTA_STEP + NUM_DIFFS]; + int max_mag_lower[DELTA_STEP + NUM_DIFFS]; + int max_mag_upper[DELTA_STEP + NUM_DIFFS]; + int bin_wt_lower[DELTA_STEP + NUM_DIFFS]; + int bin_wt_upper[DELTA_STEP + NUM_DIFFS]; + int max_mag_sel[DELTA_STEP + NUM_DIFFS]; + int max_mag[DELTA_STEP + NUM_DIFFS]; + int max_index[DELTA_STEP + NUM_DIFFS]; + int max_d[] = {10, 19, 28}; + int min_d[] = {1, 2, 3}; + uint8_t *ptr; /* pointer to FFT data */ + int i; + int fft_start; + int chirp_found; + int delta_peak[NUM_DIFFS]; + int j; + int bin_count; + int bw_mask; + int delta_diff; + int same_sign; + int temp; + + if (WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + num_fft_bytes = NUM_FFT_BYTES_HT40; + num_bin_bytes = NUM_BIN_BYTES_HT40; + num_subchan_bins = NUM_SUBCHAN_BINS_HT40; + lower_index_byte = LOWER_INDEX_BYTE_HT40; + upper_index_byte = UPPER_INDEX_BYTE_HT40; + lower_weight_byte = LOWER_WEIGHT_BYTE_HT40; + upper_weight_byte = UPPER_WEIGHT_BYTE_HT40; + lower_mag_byte = LOWER_MAG_BYTE_HT40; + upper_mag_byte = UPPER_MAG_BYTE_HT40; + + /* If we are in HT40MINUS then swap primary and extension. */ + if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan)) { + temp = is_ctl; + is_ctl = is_ext; + is_ext = temp; + } + } else { + num_fft_bytes = NUM_FFT_BYTES_HT20; + num_bin_bytes = NUM_BIN_BYTES_HT20; + num_subchan_bins = NUM_SUBCHAN_BINS_HT20; + lower_index_byte = LOWER_INDEX_BYTE_HT20; + upper_index_byte = UPPER_INDEX_BYTE_HT20; + lower_weight_byte = LOWER_WEIGHT_BYTE_HT20; + upper_weight_byte = UPPER_WEIGHT_BYTE_HT20; + lower_mag_byte = LOWER_MAG_BYTE_HT20; + upper_mag_byte = UPPER_MAG_BYTE_HT20; + } + + ptr = (uint8_t *)buf; + /* Sanity check for FFT buffer. */ + if (!ptr || (datalen == 0)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "FFT buffer pointer is null or size is 0"); + return 0; + } + + num_fft_packets = (datalen - 3) / num_fft_bytes; + if (num_fft_packets < (NUM_DIFFS + DELTA_STEP)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "datalen = %d, num_fft_packets = %d, too few packets... (exiting)", + datalen, num_fft_packets); + return 0; + } + + if ((((datalen - 3) % num_fft_bytes) == 2) && + (datalen > num_fft_bytes)) { + ptr += 2; + datalen -= 2; + } + + for (i = 0; i < (NUM_DIFFS + DELTA_STEP); i++) { + fft_start = i * num_fft_bytes; + bin_wt_lower[i] = ptr[fft_start + lower_weight_byte] & 0x3f; + bin_wt_upper[i] = ptr[fft_start + upper_weight_byte] & 0x3f; + max_index_lower[i] = ptr[fft_start + lower_index_byte] >> 2; + max_index_upper[i] = (ptr[fft_start + upper_index_byte] >> 2) + + num_subchan_bins; + + if (!WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + /* For HT20 mode indices are 6 bit signed number. */ + max_index_lower[i] ^= 0x20; + max_index_upper[i] = 0; + } + + /* + * Reconstruct the maximum magnitude for each sub-channel. + * Also select and flag the max overall magnitude between + * the two sub-channels. + */ + + max_mag_lower[i] = + ((ptr[fft_start + lower_index_byte] & 0x03) << 8) + + ptr[fft_start + lower_mag_byte]; + max_mag_upper[i] = + ((ptr[fft_start + upper_index_byte] & 0x03) << 8) + + ptr[fft_start + upper_mag_byte]; + bw_mask = ((bin_wt_lower[i] == 0) ? 0 : is_ctl) + + (((bin_wt_upper[i] == 0) ? 0 : is_ext) << 1); + + /* + * Limit the max bin based on channel bandwidth + * If the upper sub-channel max index is stuck at '1', + * the signal is dominated * by residual DC + * (or carrier leak) and should be ignored. + */ + + if (bw_mask == 1) { + max_mag_sel[i] = 0; + max_mag[i] = max_mag_lower[i]; + max_index[i] = max_index_lower[i]; + } else if (bw_mask == 2) { + max_mag_sel[i] = 1; + max_mag[i] = max_mag_upper[i]; + max_index[i] = max_index_upper[i]; + } else if (max_index_upper[i] == num_subchan_bins) { + max_mag_sel[i] = 0; /* Ignore DC bin. */ + max_mag[i] = max_mag_lower[i]; + max_index[i] = max_index_lower[i]; + } else { + if (max_mag_upper[i] > max_mag_lower[i]) { + max_mag_sel[i] = 1; + max_mag[i] = max_mag_upper[i]; + max_index[i] = max_index_upper[i]; + } else { + max_mag_sel[i] = 0; + max_mag[i] = max_mag_lower[i]; + max_index[i] = max_index_lower[i]; + } + } + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "i=%d, max_index[i]=%d, max_index_lower[i]=%d, max_index_upper[i]=%d", + i, max_index[i], max_index_lower[i], + max_index_upper[i]); + } + + chirp_found = 1; + delta_diff = 0; + same_sign = 1; + + /* + * delta_diff computation -- look for movement in peak. + * make sure that the chirp direction (i.e. sign) is + * always the same, i.e. sign of the two peaks should + * be same. + */ + for (i = 0; i < NUM_DIFFS; i++) { + delta_peak[i] = max_index[i + DELTA_STEP] - max_index[i]; + if (i > 0) { + delta_diff = delta_peak[i] - delta_peak[i-1]; + same_sign = !((delta_peak[i] & 0x80) ^ + (delta_peak[i-1] & 0x80)); + } + chirp_found &= + (ABS(delta_peak[i]) >= min_d[DELTA_STEP - 1]) && + (ABS(delta_peak[i]) <= max_d[DELTA_STEP - 1]) && + same_sign && (ABS(delta_diff) <= MAX_DIFF); + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "i=%d, delta_peak[i]=%d, delta_diff=%d", + i, delta_peak[i], delta_diff); + } + + if (chirp_found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "CHIRPING_BEFORE_STRONGBIN_YES"); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "CHIRPING_BEFORE_STRONGBIN_NO"); + } + + /* + * Work around for potential hardware data corruption bug. + * Check for wide band signal by counting strong bins + * indicated by bitmap flags. This check is done if + * chirp_found is true. We do this as a final check to + * weed out corrupt FFTs bytes. This looks expensive but + * in most cases it will exit early. + */ + + for (i = 0; (i < (NUM_DIFFS + DELTA_STEP)) && + (chirp_found == 1); i++) { + bin_count = 0; + /* + * Point to the start of the 1st byte of the selected + * sub-channel. + */ + fft_start = (i * num_fft_bytes) + (max_mag_sel[i] ? + (num_subchan_bins >> 1) : 0); + for (j = 0; j < (num_subchan_bins >> 1); j++) { + /* + * If either bin is flagged "strong", accumulate + * the bin_count. It's not accurate, but good + * enough... + */ + bin_count += (ptr[fft_start + j] & 0x88) ? 1 : 0; + } + chirp_found &= (bin_count > BIN_COUNT_MAX) ? 0 : 1; + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "i=%d, computed bin_count=%d", + i, bin_count); + } + + if (chirp_found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT | + WLAN_DEBUG_DFS_PHYERR_SUM, + "CHIRPING_YES"); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT | + WLAN_DEBUG_DFS_PHYERR_SUM, + "CHIRPING_NO"); + } + + return chirp_found; +#undef ABS_DIFF +#undef ABS +#undef DELTA_STEP +#undef NUM_DIFFS +#undef MAX_DIFF +#undef BIN_COUNT_MAX + +#undef NUM_FFT_BYTES_HT40 +#undef NUM_BIN_BYTES_HT40 +#undef NUM_SUBCHAN_BINS_HT40 +#undef LOWER_INDEX_BYTE_HT40 +#undef UPPER_INDEX_BYTE_HT40 +#undef LOWER_WEIGHT_BYTE_HT40 +#undef UPPER_WEIGHT_BYTE_HT40 +#undef LOWER_MAG_BYTE_HT40 +#undef UPPER_MAG_BYTE_HT40 + +#undef NUM_FFT_BYTES_HT40 +#undef NUM_BIN_BYTES_HT40 +#undef NUM_SUBCHAN_BINS_HT40 +#undef LOWER_INDEX_BYTE_HT40 +#undef UPPER_INDEX_BYTE_HT40 +#undef LOWER_WEIGHT_BYTE_HT40 +#undef UPPER_WEIGHT_BYTE_HT40 +#undef LOWER_MAG_BYTE_HT40 +#undef UPPER_MAG_BYTE_HT40 +} + +int dfs_check_chirping(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc) +{ + if (dfs->dfs_caps.wlan_dfs_use_enhancement) { + return dfs_check_chirping_merlin(dfs, buf, datalen, is_ctl, + is_ext, slope, is_dc); + } else { + return dfs_check_chirping_sowl(dfs, buf, datalen, is_ctl, + is_ext, slope, is_dc); + } +} + +uint8_t dfs_retain_bin5_burst_pattern(struct wlan_dfs *dfs, + uint32_t diff_ts, + uint8_t old_dur) +{ + /* + * Pulses may get split into 2 during chirping, this print + * is only to show that it happened, we do not handle this + * condition if we cannot detect the chirping. + */ + /* SPLIT pulses will have a time stamp difference of < 50 */ + if (diff_ts < 50) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "SPLIT pulse diffTs=%u dur=%d (old_dur=%d)", + diff_ts, + dfs->dfs_rinfo.dfs_last_bin5_dur, old_dur); + } + + /* + * Check if this is the 2nd or 3rd pulse in the same burst, + * PRI will be between 1000 and 2000 us. + */ + if (((diff_ts >= DFS_BIN5_PRI_LOWER_LIMIT) && + (diff_ts <= DFS_BIN5_PRI_HIGHER_LIMIT))) { + /* + * This pulse belongs to the same burst as the pulse before, + * so return the same random duration for it. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "this pulse belongs to the same burst as before, give it same dur=%d (old_dur=%d)", + dfs->dfs_rinfo.dfs_last_bin5_dur, old_dur); + return dfs->dfs_rinfo.dfs_last_bin5_dur; + } + + /* This pulse does not belong to this burst, return unchanged duration*/ + return old_dur; +} + +int dfs_get_random_bin5_dur(struct wlan_dfs *dfs, + uint64_t tstamp) +{ + uint8_t new_dur = MIN_BIN5_DUR; + int range; + + get_random_bytes(&new_dur, sizeof(uint8_t)); + range = (MAX_BIN5_DUR - MIN_BIN5_DUR + 1); + new_dur %= range; + new_dur += MIN_BIN5_DUR; + + return new_dur; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_init.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_init.c new file mode 100644 index 0000000000000000000000000000000000000000..b5eaa3f40c142e96e0f31e6bae87a35d7e33c21d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_init.c @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2013, 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains initialization functions and functions that reset + * internal data structures. + */ + +#include "../dfs.h" +#include "wlan_dfs_lmac_api.h" +#include +#include +#include "wlan_dfs_utils_api.h" + +/** + * dfs_reset_filtertype() - Reset filtertype. + * @ft: Pointer to dfs_filtertype structure. + */ +static inline void dfs_reset_filtertype( + struct dfs_filtertype *ft) +{ + int j; + struct dfs_filter *rf; + struct dfs_delayline *dl; + + for (j = 0; j < ft->ft_numfilters; j++) { + rf = ft->ft_filters[j]; + dl = &(rf->rf_dl); + if (dl) { + qdf_mem_zero(dl, sizeof(*dl)); + dl->dl_lastelem = (0xFFFFFFFF) & DFS_MAX_DL_MASK; + } + } +} + +void dfs_reset_alldelaylines(struct wlan_dfs *dfs) +{ + struct dfs_filtertype *ft = NULL; + struct dfs_pulseline *pl; + int i; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + pl = dfs->pulses; + + if (!pl) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "pl is NULL"); + return; + } + + /* Reset the pulse log. */ + pl->pl_firstelem = pl->pl_numelems = 0; + pl->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; + + for (i = 0; i < DFS_MAX_RADAR_TYPES; i++) { + if (dfs->dfs_radarf[i]) { + ft = dfs->dfs_radarf[i]; + dfs_reset_filtertype(ft); + } + } + + if (!(dfs->dfs_b5radars)) { + if (dfs->dfs_rinfo.rn_numbin5radars > 0) + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "null dfs_b5radars, numbin5radars=%d domain=%d", + dfs->dfs_rinfo.rn_numbin5radars, + dfs->dfsdomain); + return; + } + + for (i = 0; i < dfs->dfs_rinfo.rn_numbin5radars; i++) { + qdf_mem_zero(&(dfs->dfs_b5radars[i].br_elems[0]), + sizeof(struct dfs_bin5elem) * DFS_MAX_B5_SIZE); + dfs->dfs_b5radars[i].br_firstelem = 0; + dfs->dfs_b5radars[i].br_numelems = 0; + dfs->dfs_b5radars[i].br_lastelem = + (0xFFFFFFFF) & DFS_MAX_B5_MASK; + } +} + +void dfs_reset_delayline(struct dfs_delayline *dl) +{ + qdf_mem_zero(&(dl->dl_elems[0]), sizeof(dl->dl_elems)); + dl->dl_lastelem = (0xFFFFFFFF) & DFS_MAX_DL_MASK; +} + +void dfs_reset_filter_delaylines(struct dfs_filtertype *dft) +{ + struct dfs_filter *df; + int i; + + for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) { + df = dft->ft_filters[i]; + dfs_reset_delayline(&(df->rf_dl)); + } +} + +void dfs_reset_radarq(struct wlan_dfs *dfs) +{ + struct dfs_event *event; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + WLAN_DFSQ_LOCK(dfs); + WLAN_DFSEVENTQ_LOCK(dfs); + while (!STAILQ_EMPTY(&(dfs->dfs_radarq))) { + event = STAILQ_FIRST(&(dfs->dfs_radarq)); + STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list); + qdf_mem_zero(event, sizeof(struct dfs_event)); + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list); + } + WLAN_DFSEVENTQ_UNLOCK(dfs); + WLAN_DFSQ_UNLOCK(dfs); +} + +/** + * dfs_fill_ft_index_table() - DFS fill ft index table. + * @dfs: Pointer to wlan_dfs structure. + * @i: Duration used as an index. + * + * Return: 1 if too many overlapping radar filters else 0. + */ +static inline bool dfs_fill_ft_index_table( + struct wlan_dfs *dfs, + int i) +{ + uint32_t stop = 0, tableindex = 0; + + while ((tableindex < DFS_MAX_RADAR_OVERLAP) && (!stop)) { + if ((dfs->dfs_ftindextable[i])[tableindex] == -1) + stop = 1; + else + tableindex++; + } + + if (stop) { + (dfs->dfs_ftindextable[i])[tableindex] = + (int8_t)(dfs->dfs_rinfo.rn_ftindex); + } else { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Too many overlapping radar filters"); + return 1; + } + + return 0; +} + +/** + * dfs_fill_filter_type() - DFS fill filter type. + * @dfs: Pointer to wlan_dfs structure. + * @ft: Double pointer to dfs_filtertype structure. + * @dfs_radars: Pointer to dfs_pulse structure. + * @min_rssithresh: Minimum RSSI threshold. + * @max_pulsedur: Maximum RSSI threshold. + * @p: Index to dfs_pulse structure. + * + * Return: 1 if too many overlapping radar filters else 0. + */ +static inline bool dfs_fill_filter_type( + struct wlan_dfs *dfs, + struct dfs_filtertype **ft, + struct dfs_pulse *dfs_radars, + int32_t *min_rssithresh, + uint32_t *max_pulsedur, + int p) +{ + int i; + + /* No filter of the appropriate dur was found. */ + if ((dfs->dfs_rinfo.rn_ftindex + 1) > DFS_MAX_RADAR_TYPES) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Too many filter types"); + return 1; + } + (*ft) = dfs->dfs_radarf[dfs->dfs_rinfo.rn_ftindex]; + (*ft)->ft_numfilters = 0; + (*ft)->ft_numpulses = dfs_radars[p].rp_numpulses; + (*ft)->ft_patterntype = dfs_radars[p].rp_patterntype; + (*ft)->ft_mindur = dfs_radars[p].rp_mindur; + (*ft)->ft_maxdur = dfs_radars[p].rp_maxdur; + (*ft)->ft_filterdur = dfs_radars[p].rp_pulsedur; + (*ft)->ft_rssithresh = dfs_radars[p].rp_rssithresh; + (*ft)->ft_rssimargin = dfs_radars[p].rp_rssimargin; + (*ft)->ft_minpri = 1000000; + + if ((*ft)->ft_rssithresh < *min_rssithresh) + *min_rssithresh = (*ft)->ft_rssithresh; + + if ((*ft)->ft_maxdur > *max_pulsedur) + *max_pulsedur = (*ft)->ft_maxdur; + + for (i = (*ft)->ft_mindur; i <= (*ft)->ft_maxdur; i++) { + if (dfs_fill_ft_index_table(dfs, i)) + return 1; + } + + dfs->dfs_rinfo.rn_ftindex++; + + return 0; +} + +int dfs_init_radar_filters(struct wlan_dfs *dfs, + struct wlan_dfs_radar_tab_info *radar_info) +{ + struct dfs_filtertype *ft = NULL; + struct dfs_filter *rf = NULL; + struct dfs_pulse *dfs_radars; + struct dfs_bin5pulse *b5pulses = NULL; + uint32_t T, Tmax; + int32_t min_rssithresh = DFS_MAX_RSSI_VALUE; + uint32_t max_pulsedur = 0; + int numpulses, p, n, i; + int numradars = 0, numb5radars = 0; + int retval; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 1; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "dfsdomain=%d, numradars=%d, numb5radars=%d", + radar_info->dfsdomain, + radar_info->numradars, radar_info->numb5radars); + + /* Clear up the dfs domain flag first. */ + dfs->wlan_dfs_isdfsregdomain = 0; + + /* + * If radar_info is NULL or dfsdomain is NULL, treat the + * rest of the radar configuration as suspect. + */ + if (!radar_info || radar_info->dfsdomain == 0) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Unknown dfs domain %d", + dfs->dfsdomain); + /* Disable radar detection since we don't have a radar domain.*/ + dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; + dfs->dfs_proc_phyerr &= ~DFS_SECOND_SEGMENT_RADAR_EN; + return 0; + } + + dfs->dfsdomain = radar_info->dfsdomain; + dfs_radars = radar_info->dfs_radars; + numradars = radar_info->numradars; + b5pulses = radar_info->b5pulses; + numb5radars = radar_info->numb5radars; + + dfs->dfs_defaultparams = radar_info->dfs_defaultparams; + + dfs->wlan_dfs_isdfsregdomain = 1; + dfs->dfs_rinfo.rn_ftindex = 0; + /* Clear filter type table. */ + for (n = 0; n < 256; n++) { + for (i = 0; i < DFS_MAX_RADAR_OVERLAP; i++) + (dfs->dfs_ftindextable[n])[i] = -1; + } + + /* Now, initialize the radar filters. */ + for (p = 0; p < numradars; p++) { + ft = NULL; + for (n = 0; n < dfs->dfs_rinfo.rn_ftindex; n++) { + if ((dfs_radars[p].rp_pulsedur == + dfs->dfs_radarf[n]->ft_filterdur) && + (dfs_radars[p].rp_numpulses == + dfs->dfs_radarf[n]->ft_numpulses) && + (dfs_radars[p].rp_mindur == + dfs->dfs_radarf[n]->ft_mindur) && + (dfs_radars[p].rp_maxdur == + dfs->dfs_radarf[n]->ft_maxdur)) { + ft = dfs->dfs_radarf[n]; + break; + } + } + + if (!ft) { + retval = dfs_fill_filter_type(dfs, &ft, dfs_radars, + &min_rssithresh, &max_pulsedur, p); + if (retval == 1) + goto bad4; + } + + rf = ft->ft_filters[ft->ft_numfilters++]; + dfs_reset_delayline(&rf->rf_dl); + numpulses = dfs_radars[p].rp_numpulses; + + rf->rf_numpulses = numpulses; + rf->rf_patterntype = dfs_radars[p].rp_patterntype; + rf->rf_sidx_spread = dfs_radars[p].rp_sidx_spread; + rf->rf_check_delta_peak = dfs_radars[p].rp_check_delta_peak; + rf->rf_pulseid = dfs_radars[p].rp_pulseid; + rf->rf_mindur = dfs_radars[p].rp_mindur; + rf->rf_maxdur = dfs_radars[p].rp_maxdur; + rf->rf_numpulses = dfs_radars[p].rp_numpulses; + rf->rf_ignore_pri_window = dfs_radars[p].rp_ignore_pri_window; + T = (100000000 / dfs_radars[p].rp_max_pulsefreq) - + 100 * (dfs_radars[p].rp_meanoffset); + rf->rf_minpri = dfs_round((int32_t)T - + (100 * (dfs_radars[p].rp_pulsevar))); + Tmax = (100000000 / dfs_radars[p].rp_pulsefreq) - + 100 * (dfs_radars[p].rp_meanoffset); + rf->rf_maxpri = dfs_round((int32_t)Tmax + + (100 * (dfs_radars[p].rp_pulsevar))); + + if (rf->rf_minpri < ft->ft_minpri) + ft->ft_minpri = rf->rf_minpri; + + rf->rf_fixed_pri_radar_pulse = ( + dfs_radars[p].rp_max_pulsefreq == + dfs_radars[p].rp_pulsefreq) ? 1 : 0; + rf->rf_threshold = dfs_radars[p].rp_threshold; + rf->rf_filterlen = rf->rf_maxpri * rf->rf_numpulses; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "minprf = %d maxprf = %d pulsevar = %d thresh=%d", + dfs_radars[p].rp_pulsefreq, + dfs_radars[p].rp_max_pulsefreq, + dfs_radars[p].rp_pulsevar, + rf->rf_threshold); + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "minpri = %d maxpri = %d filterlen = %d filterID = %d", + rf->rf_minpri, rf->rf_maxpri, + rf->rf_filterlen, rf->rf_pulseid); + } + + dfs_print_filters(dfs); + + dfs->dfs_rinfo.rn_numbin5radars = numb5radars; + if (dfs->dfs_b5radars) { + qdf_mem_free(dfs->dfs_b5radars); + dfs->dfs_b5radars = NULL; + } + + if (numb5radars) { + dfs->dfs_b5radars = (struct dfs_bin5radars *)qdf_mem_malloc( + numb5radars * sizeof(struct dfs_bin5radars)); + /* + * Malloc can return NULL if numb5radars is zero. But we still + * want to reset the delay lines. + */ + if (!(dfs->dfs_b5radars)) + goto bad4; + } + + for (n = 0; n < numb5radars; n++) { + dfs->dfs_b5radars[n].br_pulse = b5pulses[n]; + dfs->dfs_b5radars[n].br_pulse.b5_timewindow *= 1000000; + if (dfs->dfs_b5radars[n].br_pulse.b5_rssithresh < + min_rssithresh) + min_rssithresh = + dfs->dfs_b5radars[n].br_pulse.b5_rssithresh; + + if (dfs->dfs_b5radars[n].br_pulse.b5_maxdur > max_pulsedur) + max_pulsedur = dfs->dfs_b5radars[n].br_pulse.b5_maxdur; + } + dfs_reset_alldelaylines(dfs); + dfs_reset_radarq(dfs); + dfs->dfs_curchan_radindex = -1; + dfs->dfs_extchan_radindex = -1; + dfs->dfs_rinfo.rn_minrssithresh = min_rssithresh; + + /* Convert durations to TSF ticks. */ + dfs->dfs_rinfo.rn_maxpulsedur = + dfs_round((int32_t)((max_pulsedur * 100/80) * 100)); + /* + * Relax the max pulse duration a little bit due to inaccuracy + * caused by chirping. + */ + dfs->dfs_rinfo.rn_maxpulsedur = dfs->dfs_rinfo.rn_maxpulsedur + 20; + + dfs_debug(dfs, WLAN_DEBUG_DFS, "DFS min filter rssiThresh = %d", + min_rssithresh); + + dfs_debug(dfs, WLAN_DEBUG_DFS, "DFS max pulse dur = %d ticks", + dfs->dfs_rinfo.rn_maxpulsedur); + + return 0; + +bad4: + return 1; +} + +void dfs_clear_stats(struct wlan_dfs *dfs) +{ + if (!dfs) + return; + + qdf_mem_zero(&dfs->wlan_dfs_stats, sizeof(struct dfs_stats)); + dfs->wlan_dfs_stats.last_reset_tstamp = + lmac_get_tsf64(dfs->dfs_pdev_obj); +} + +bool dfs_check_intersect_excl(int low_freq, int high_freq, int center_freq) +{ + return ((center_freq > low_freq) && (center_freq < high_freq)); +} + +int dfs_check_etsi_overlap(int center_freq, int chan_width, + int en302_502_freq_low, int en302_502_freq_high) +{ + int chan_freq_low; + int chan_freq_high; + + /* Calculate low/high frequency ranges */ + chan_freq_low = center_freq - (chan_width / 2); + chan_freq_high = center_freq + (chan_width / 2); + + return ((chan_freq_high == en302_502_freq_low) || + dfs_check_intersect_excl(en302_502_freq_low, + en302_502_freq_high, + chan_freq_low) || + dfs_check_intersect_excl(en302_502_freq_low, + en302_502_freq_high, + chan_freq_high)); +} + +#ifdef CONFIG_CHAN_FREQ_API +bool dfs_is_en302_502_applicable(struct wlan_dfs *dfs) +{ + int chan_freq; + int chan_width; + int overlap = 0; + struct wlan_objmgr_vdev *vdev = NULL; + struct wlan_channel *bss_chan = NULL; + + /* Get centre frequency */ + chan_freq = dfs->dfs_curchan->dfs_ch_mhz_freq_seg1; + vdev = wlan_objmgr_pdev_get_first_vdev(dfs->dfs_pdev_obj, WLAN_DFS_ID); + if (!vdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "vdev is NULL"); + return false; + } + + bss_chan = wlan_vdev_mlme_get_bss_chan(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + /* Grab width */ + chan_width = wlan_reg_get_bw_value(bss_chan->ch_width); + + if (WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) { + /* HT80_80 mode has 2 segments and each segment must + * be checked for control channel first. + */ + overlap = dfs_check_etsi_overlap( + chan_freq, chan_width / 2, + ETSI_RADAR_EN302_502_FREQ_LOWER, + ETSI_RADAR_EN302_502_FREQ_UPPER); + + /* check for extension channel */ + chan_freq = dfs->dfs_curchan->dfs_ch_mhz_freq_seg2; + + overlap += dfs_check_etsi_overlap( + chan_freq, chan_width / 2, + ETSI_RADAR_EN302_502_FREQ_LOWER, + ETSI_RADAR_EN302_502_FREQ_UPPER); + } else { + overlap = dfs_check_etsi_overlap( + chan_freq, chan_width, + ETSI_RADAR_EN302_502_FREQ_LOWER, + ETSI_RADAR_EN302_502_FREQ_UPPER); + } + + return(wlan_reg_is_regdmn_en302502_applicable(dfs->dfs_pdev_obj) && + overlap); +} +#else +#ifdef CONFIG_CHAN_NUM_API +bool dfs_is_en302_502_applicable(struct wlan_dfs *dfs) +{ + int chan_freq; + int chan_width; + int overlap = 0; + struct wlan_objmgr_vdev *vdev = NULL; + struct wlan_channel *bss_chan = NULL; + + /* Get centre frequency */ + chan_freq = utils_dfs_chan_to_freq( + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1); + vdev = wlan_objmgr_pdev_get_first_vdev(dfs->dfs_pdev_obj, WLAN_DFS_ID); + if (!vdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "vdev is NULL"); + return false; + } + + bss_chan = wlan_vdev_mlme_get_bss_chan(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + /* Grab width */ + chan_width = wlan_reg_get_bw_value(bss_chan->ch_width); + + if (WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) { + /* HT80_80 mode has 2 segments and each segment must + * be checked for control channel first. + */ + overlap = dfs_check_etsi_overlap( + chan_freq, chan_width / 2, + ETSI_RADAR_EN302_502_FREQ_LOWER, + ETSI_RADAR_EN302_502_FREQ_UPPER); + + /* check for extension channel */ + chan_freq = utils_dfs_chan_to_freq( + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2); + + overlap += dfs_check_etsi_overlap( + chan_freq, chan_width / 2, + ETSI_RADAR_EN302_502_FREQ_LOWER, + ETSI_RADAR_EN302_502_FREQ_UPPER); + } else { + overlap = dfs_check_etsi_overlap( + chan_freq, chan_width, + ETSI_RADAR_EN302_502_FREQ_LOWER, + ETSI_RADAR_EN302_502_FREQ_UPPER); + } + + return(wlan_reg_is_regdmn_en302502_applicable(dfs->dfs_pdev_obj) && + overlap); +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_misc.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_misc.c new file mode 100644 index 0000000000000000000000000000000000000000..64602df3ad81b4e82eefe8863c8a31897567b7fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_misc.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file really does contain miscellaneous functions that didn't fit + * in anywhere else. + */ + +#include "../dfs.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +/** + * dfs_adjust_pri_per_chan_busy() - Calculates adjust_pri. + * @ext_chan_busy: Extension channel PRI. + * @pri_margin: Primary margin. + * + * Calculates the adjust_pri using ext_chan_busy, DFS_EXT_CHAN_LOADING_THRESH + * and pri_margin. + * + * Return: adjust_pri. + */ +static int dfs_adjust_pri_per_chan_busy(int ext_chan_busy, int pri_margin) +{ + int adjust_pri = 0; + + if (ext_chan_busy > DFS_EXT_CHAN_LOADING_THRESH) { + adjust_pri = ((ext_chan_busy - DFS_EXT_CHAN_LOADING_THRESH) * + (pri_margin)); + adjust_pri /= 100; + } + + return adjust_pri; +} + +/** + * dfs_adjust_thresh_per_chan_busy() - Calculates adjust_thresh. + * @ext_chan_busy: Extension channel PRI. + * @thresh: Threshold value. + * + * Calculates the adjust_thresh using ext_chan_busy, DFS_EXT_CHAN_LOADING_THRESH + * and thresh. + * + * Return: adjust_thresh. + */ +static int dfs_adjust_thresh_per_chan_busy(int ext_chan_busy, int thresh) +{ + int adjust_thresh = 0; + + if (ext_chan_busy > DFS_EXT_CHAN_LOADING_THRESH) { + adjust_thresh = ((ext_chan_busy - DFS_EXT_CHAN_LOADING_THRESH) * + thresh); + adjust_thresh /= 100; + } + + return adjust_thresh; +} + +/** + * dfs_get_cached_ext_chan_busy() - Get cached ext chan busy. + * @dfs: Pointer to wlan_dfs structure. + * @ext_chan_busy: Extension channel PRI. + */ +static inline void dfs_get_cached_ext_chan_busy( + struct wlan_dfs *dfs, + int *ext_chan_busy) +{ + *ext_chan_busy = 0; + /* Check to see if the cached value of ext_chan_busy can be used. */ + + if (dfs->dfs_rinfo.dfs_ext_chan_busy && + (dfs->dfs_rinfo.rn_lastfull_ts < + dfs->dfs_rinfo.ext_chan_busy_ts)) { + *ext_chan_busy = dfs->dfs_rinfo.dfs_ext_chan_busy; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Use cached copy of ext_chan_busy extchanbusy=%d rn_lastfull_ts=%llu ext_chan_busy_ts=%llu", + *ext_chan_busy, + (uint64_t)dfs->dfs_rinfo.rn_lastfull_ts, + (uint64_t)dfs->dfs_rinfo.ext_chan_busy_ts); + } +} + +int dfs_get_pri_margin(struct wlan_dfs *dfs, + int is_extchan_detect, + int is_fixed_pattern) +{ + int adjust_pri = 0, ext_chan_busy = 0; + int pri_margin; + + if (is_fixed_pattern) + pri_margin = DFS_DEFAULT_FIXEDPATTERN_PRI_MARGIN; + else + pri_margin = DFS_DEFAULT_PRI_MARGIN; + + if (WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + ext_chan_busy = lmac_get_ext_busy(dfs->dfs_pdev_obj); + if (ext_chan_busy >= 0) { + dfs->dfs_rinfo.ext_chan_busy_ts = + lmac_get_tsf64(dfs->dfs_pdev_obj); + dfs->dfs_rinfo.dfs_ext_chan_busy = ext_chan_busy; + } else { + dfs_get_cached_ext_chan_busy(dfs, &ext_chan_busy); + } + adjust_pri = dfs_adjust_pri_per_chan_busy(ext_chan_busy, + pri_margin); + pri_margin -= adjust_pri; + } + + return pri_margin; +} + +int dfs_get_filter_threshold(struct wlan_dfs *dfs, + struct dfs_filter *rf, + int is_extchan_detect) +{ + int ext_chan_busy = 0; + int thresh, adjust_thresh = 0; + + thresh = rf->rf_threshold; + + if (WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + ext_chan_busy = lmac_get_ext_busy(dfs->dfs_pdev_obj); + if (ext_chan_busy >= 0) { + dfs->dfs_rinfo.ext_chan_busy_ts = + lmac_get_tsf64(dfs->dfs_pdev_obj); + dfs->dfs_rinfo.dfs_ext_chan_busy = ext_chan_busy; + } else { + dfs_get_cached_ext_chan_busy(dfs, &ext_chan_busy); + } + + adjust_thresh = + dfs_adjust_thresh_per_chan_busy(ext_chan_busy, thresh); + dfs_debug(dfs, WLAN_DEBUG_DFS2, + " filterID=%d extchanbusy=%d adjust_thresh=%d", + rf->rf_pulseid, ext_chan_busy, adjust_thresh); + + thresh += adjust_thresh; + } + + return thresh; +} + +uint32_t dfs_round(int32_t val) +{ + uint32_t ival, rem; + + if (val < 0) + return 0; + ival = val/100; + rem = val - (ival * 100); + if (rem < 50) + return ival; + else + return ival + 1; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_partial_offload_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_partial_offload_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..80f508aa80c81228151ecdce124a789df3adf9e7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_partial_offload_radar.c @@ -0,0 +1,698 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has radar table and initialization function for Beeliner + * family of chipsets. + */ + +#include "../dfs.h" +#include "wlan_dfs_mlme_api.h" +#include +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_internal.h" +#include "../dfs_partial_offload_radar.h" +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +#include "../dfs_process_radar_found_ind.h" +#endif + +/** + * struct dfs_pulse dfs_fcc_radars - FCC radar table for Offload chipsets. + */ +static struct dfs_pulse dfs_fcc_radars[] = { + /* FCC TYPE 1 */ + {18, 1, 700, 700, 0, 4, 5, 0, 1, 18, 0, 3, 1, 5, 0, 0}, + {18, 1, 350, 350, 0, 4, 5, 0, 1, 18, 0, 3, 0, 5, 0, 0}, + + /* FCC TYPE 6 */ + {9, 1, 3003, 3003, 1, 7, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 1}, + + /* FCC TYPE 2 */ + {23, 5, 4347, 6666, 0, 4, 11, 0, 7, 22, 0, 3, 0, 5, 0, 2}, + + /* FCC TYPE 3 */ + {18, 10, 2000, 5000, 0, 4, 8, 6, 13, 22, 0, 3, 0, 5, 0, 5}, + + /* FCC TYPE 4 */ + {16, 15, 2000, 5000, 0, 4, 7, 11, 23, 22, 0, 3, 0, 5, 0, 11}, + + /* FCC NEW TYPE 1 */ + /* 518us to 938us pulses (min 56 pulses) */ + {57, 1, 1066, 1930, 0, 4, 20, 0, 1, 22, 0, 3, 0, 5, 0, 21}, + + /* 938us to 2000 pulses (min 26 pulses) */ + {27, 1, 500, 1066, 0, 4, 13, 0, 1, 22, 0, 3, 0, 5, 0, 22}, + + /* 2000 to 3067us pulses (min 17 pulses) */ + {18, 1, 325, 500, 0, 4, 9, 0, 1, 22, 0, 3, 0, 5, 0, 23}, +}; + +/** + * struct dfs_pulse dfs_mkk4_radars - MKK4 radar table for Offload chipsets. + */ +static struct dfs_pulse dfs_mkk4_radars[] = { + + /* following two filters are specific to Japan/MKK4 */ + /* 1389 +/- 6 us */ + {18, 1, 720, 720, 0, 4, 6, 0, 1, 18, 0, 3, 0, 5, 0, 17}, + + /* 4000 +/- 6 us */ + {18, 4, 250, 250, 0, 4, 5, 1, 6, 18, 0, 3, 0, 5, 0, 18}, + + /* 3846 +/- 7 us */ + {18, 5, 260, 260, 0, 4, 6, 1, 6, 18, 0, 3, 1, 5, 0, 19}, + + /* following filters are common to both FCC and JAPAN */ + + /* FCC TYPE 1 */ + {18, 1, 700, 700, 0, 4, 5, 0, 1, 18, 0, 3, 1, 5, 0, 0}, + {18, 1, 350, 350, 0, 4, 5, 0, 1, 18, 0, 3, 0, 5, 0, 0}, + + /* FCC TYPE 6 */ + {9, 1, 3003, 3003, 1, 7, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 1}, + + /* FCC TYPE 2 */ + {23, 5, 4347, 6666, 0, 4, 11, 0, 7, 22, 0, 3, 0, 5, 0, 2}, + + /* FCC TYPE 3 */ + {18, 10, 2000, 5000, 0, 4, 8, 6, 13, 22, 0, 3, 0, 5, 0, 5}, + + /* FCC TYPE 4 */ + {16, 15, 2000, 5000, 0, 4, 7, 11, 23, 22, 0, 3, 0, 5, 0, 11}, +}; + +/** + * struct dfs_pulse dfs_mkkn_radars - MKKN radar table for Offload chipsets. + */ +static struct dfs_pulse dfs_mkkn_radars[] = { + /** Since the table is empty no new radar type shall be detected. + * New filters shall be added to this tables after proper testing + * and verification. + */ +}; + +/** + * struct dfs_bin5pulse dfs_fcc_bin5pulses - FCC BIN5 pulses for Offload + * chipsets. + */ +static struct dfs_bin5pulse dfs_fcc_bin5pulses[] = { + {6, 28, 105, 12, 18, 5}, +}; + +/** + * struct dfs_bin5pulse dfs_jpn_bin5pulses - JAPAN BIN5 pulses for Offload + * chipsets. + */ +static struct dfs_bin5pulse dfs_jpn_bin5pulses[] = { + {5, 28, 105, 12, 22, 5}, +}; + +/** + * dfs_bin5pulse dfs_fcc_bin5pulses_ar900b - FCC BIN5 pulses for AR9300 + * chipsets. + * + * WAR : IR 42631 + * Beeliner 2 is tested at -65dbm as opposed to -62 dbm. + * For FCC/JPN chirping pulses, HW reports RSSI value that is lower by 2dbm + * when we enable noise floor claibration. This is specially true for + * frequencies that are greater than center frequency and in VHT80 mode. + */ + +static struct dfs_bin5pulse dfs_fcc_bin5pulses_ar900b[] = { + {5, 28, 105, 12, 20, 5}, +}; + +/** + * dfs_bin5pulse dfs_jpn_bin5pulses_ar900b - JAPAN BIN5 pulses for AR9300 + * chipsets. + */ +static struct dfs_bin5pulse dfs_jpn_bin5pulses_ar900b[] = { + {5, 28, 105, 12, 20, 5}, +}; + +/** + * dfs_bin5pulse dfs_fcc_bin5pulses_qca9984 - FCC BIN5 pulses for QCA9984 + * chipsets. + * WAR : IR-83400 + * Cascade is tested at -65dbm as opposed to -62 dbm. + * For FCC/JPN chirping pulses, HW reports RSSI value that is significantly + * lower at left edge especially in HT80_80 mode. Also, duration may be + * significantly low. This can result in false detection and we may have to + * raise the threshold. + */ +static struct dfs_bin5pulse dfs_fcc_bin5pulses_qca9984[] = { + {5, 20, 105, 12, 20, 0}, +}; + +/** + * dfs_bin5pulse dfs_jpn_bin5pulses_qca9984 - JAPAN BIN5 pulses for QCA9984 + * chipsets. + */ +static struct dfs_bin5pulse dfs_jpn_bin5pulses_qca9984[] = { + {5, 20, 105, 12, 20, 0}, +}; + +/** + * dfs_pulse dfs_etsi_radars - ETSI radar table. + */ +static struct dfs_pulse dfs_etsi_radars[] = { + + /* EN 302 502 frequency hopping pulse */ + /* PRF 3000, 1us duration, 9 pulses per burst */ + {9, 1, 3000, 3000, 1, 4, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 40}, + /* PRF 4500, 20us duration, 9 pulses per burst */ + {9, 20, 4500, 4500, 1, 4, 5, 19, 21, 18, 0, 0, 1, 1000, 0, 41}, + + /* Type 3 */ + /* 10 15us, 200-1000 PRF, 15 pulses */ + {15, 15, 200, 1000, 0, 4, 5, 8, 18, 22, 0, 0, 0, 5, 0, 42}, + + /* Type 4 */ + /* 1-15us, 1200-1600 PRF, 15 pulses */ + {15, 15, 1200, 1600, 0, 4, 5, 0, 18, 22, 0, 0, 0, 5, 0, 43}, + + /* TYPE staggered pulse */ + /* Type 5*/ + /* 0.8-2us, 2-3 bursts,300-400 PRF, 10 pulses each */ + {30, 2, 300, 400, 2, 30, 3, 0, 5, 15, 0, 0, 1, 5, 0, 31}, + /* Type 6 */ + /* 0.8-2us, 2-3 bursts, 400-1200 PRF, 15 pulses each */ + {30, 2, 400, 1200, 2, 30, 7, 0, 5, 15, 0, 0, 0, 5, 0, 32}, + + /* constant PRF based */ + /* Type 1 */ + /* 0.8-5us, 200 300 PRF, 10 pulses */ + {10, 5, 200, 400, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 33}, + {10, 5, 400, 600, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 37}, + {10, 5, 600, 800, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 38}, + {10, 5, 800, 1000, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 39}, + /* {10, 5, 200, 1000, 0, 6, 5, 0, 8, 15, 0, 0, 2, 5, 33}, */ + + /* Type 2 */ + /* 0.8-15us, 200-1600 PRF, 15 pulses */ + {15, 15, 200, 1600, 0, 4, 8, 0, 18, 24, 0, 0, 0, 5, 0, 34}, + + /* Type 3 */ + /* 0.8-15us, 2300-4000 PRF, 25 pulses*/ + {25, 15, 2300, 4000, 0, 4, 10, 0, 18, 24, 0, 0, 0, 5, 0, 35}, + + /* Type 4 */ + /* 20-30us, 2000-4000 PRF, 20 pulses*/ + {20, 30, 2000, 4000, 0, 4, 6, 19, 33, 24, 0, 0, 0, 24, 1, 36}, +}; + +/** + * dfs_pulse dfs_china_radars - CHINA radar table. + */ +static struct dfs_pulse dfs_china_radars[] = { + + /* TYPE staggered pulse */ + /* Type 5*/ + /* 0.8-2us, 2-3 bursts,300-400 PRF, 12 pulses each */ + {36, 2, 300, 400, 2, 30, 3, 0, 5, 15, 0, 0, 1, 0, 0, 51}, + /* Type 6 */ + /* 0.8-2us, 2-3 bursts, 400-1200 PRF, 16 pulses each */ + {48, 2, 400, 1200, 2, 30, 7, 0, 5, 15, 0, 0, 0, 0, 0, 52}, + + /* constant PRF based */ + /* Type 1 */ + /* 0.5-5us, 200 1000 PRF, 12 pulses */ + {12, 5, 200, 400, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 53}, + {12, 5, 400, 600, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 57}, + {12, 5, 600, 800, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 58}, + {12, 5, 800, 1000, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 59}, + + /* Type 2 */ + /* 0.5-15us, 200-1600 PRF, 16 pulses */ + {16, 15, 200, 1600, 0, 24, 8, 0, 18, 24, 0, 0, 0, 0, 0, 54}, + + /* Type 3 */ + /* 0.5-30us, 2300-4000 PRF, 24 pulses*/ + {24, 15, 2300, 4000, 0, 24, 10, 0, 33, 24, 0, 0, 0, 0, 0, 55}, + + /* Type 4 */ + /* 20-30us, 2000-4000 PRF, 20 pulses*/ + {20, 30, 2000, 4000, 0, 24, 6, 19, 33, 24, 0, 0, 0, 0, 0, 56}, + + /* 1us, 1000 PRF, 20 pulses */ + /* 1000 us PRI */ + {20, 1, 1000, 1000, 0, 6, 6, 0, 1, 18, 0, 3, 0, 0, 0, 50}, +}; + +/** + * dfs_pulse dfs_korea_radars - KOREA radar table. + */ +static struct dfs_pulse dfs_korea_radars[] = { + /* Korea Type 1 */ + {18, 1, 700, 700, 0, 4, 5, 0, 1, 18, 0, 3, 1, 5, 0, 40}, + + /* Korea Type 2 */ + {10, 1, 1800, 1800, 0, 4, 4, 0, 1, 18, 0, 3, 1, 5, 0, 41}, + + /* Korea Type 3 */ + {70, 1, 330, 330, 0, 4, 20, 0, 3, 18, 0, 3, 1, 5, 0, 42}, + + /* Korea Type 4 */ + {3, 1, 3003, 3003, 1, 7, 2, 0, 1, 18, 0, 0, 1, 1000, 0, 43}, +}; + +#define RSSI_THERSH_AR900B 15 +#define RSSI_THERSH_ADRASTEA 18 + +/** + * dfs_assign_fcc_pulse_table() - Assign FCC pulse table + * @rinfo: Pointer to wlan_dfs_radar_tab_info structure. + * @target_type: Target type. + * @tx_ops: target tx ops. + */ +static inline void dfs_assign_fcc_pulse_table( + struct wlan_dfs_radar_tab_info *rinfo, + uint32_t target_type, + struct wlan_lmac_if_target_tx_ops *tx_ops) +{ + rinfo->dfs_radars = dfs_fcc_radars; + rinfo->numradars = QDF_ARRAY_SIZE(dfs_fcc_radars); + + if (tx_ops->tgt_is_tgt_type_ar900b(target_type) || + tx_ops->tgt_is_tgt_type_ipq4019(target_type)) { + rinfo->b5pulses = dfs_fcc_bin5pulses_ar900b; + rinfo->numb5radars = QDF_ARRAY_SIZE(dfs_fcc_bin5pulses_ar900b); + } else if (tx_ops->tgt_is_tgt_type_qca9984(target_type) || + tx_ops->tgt_is_tgt_type_qca9888(target_type)) { + rinfo->b5pulses = dfs_fcc_bin5pulses_qca9984; + rinfo->numb5radars = + QDF_ARRAY_SIZE(dfs_fcc_bin5pulses_qca9984); + } else { + rinfo->b5pulses = dfs_fcc_bin5pulses; + rinfo->numb5radars = QDF_ARRAY_SIZE(dfs_fcc_bin5pulses); + } +} + +#ifdef DFS_OVERRIDE_RF_THRESHOLD +static void dfs_set_adrastea_rf_thrshold( + struct wlan_objmgr_psoc *psoc, + int dfsdomain, + uint32_t target_type, + struct wlan_dfs_radar_tab_info *rinfo) +{ + int i; + struct wlan_lmac_if_target_tx_ops *tx_ops; + + tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (tx_ops->tgt_is_tgt_type_adrastea(target_type) && + dfsdomain == DFS_ETSI_DOMAIN) { + for (i = 0; i < rinfo->numradars; i++) { + rinfo->dfs_radars[i].rp_rssithresh = + DFS_MIN(rinfo->dfs_radars[i].rp_rssithresh, + RSSI_THERSH_ADRASTEA); + } + } +} +#else +static inline void dfs_set_adrastea_rf_thrshold( + struct wlan_objmgr_psoc *psoc, + int dfsdomain, + uint32_t target_type, + struct wlan_dfs_radar_tab_info *rinfo) +{ +} +#endif + +void dfs_get_po_radars(struct wlan_dfs *dfs) +{ + struct wlan_dfs_radar_tab_info rinfo; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_target_tx_ops *tx_ops; + int i; + uint32_t target_type; + int dfsdomain = DFS_FCC_DOMAIN; + + /* Fetch current radar patterns from the lmac */ + qdf_mem_zero(&rinfo, sizeof(rinfo)); + + /* + * Look up the current DFS regulatory domain and decide + * which radar pulses to use. + */ + dfsdomain = utils_get_dfsdomain(dfs->dfs_pdev_obj); + target_type = lmac_get_target_type(dfs->dfs_pdev_obj); + + psoc = wlan_pdev_get_psoc(dfs->dfs_pdev_obj); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is NULL"); + return; + } + + tx_ops = &(psoc->soc_cb.tx_ops.target_tx_ops); + switch (dfsdomain) { + case DFS_FCC_DOMAIN: + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, "FCC domain"); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + dfs_assign_fcc_pulse_table(&rinfo, target_type, tx_ops); + break; + case DFS_CN_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "FCC domain -- Country China(156) override FCC radar pattern" + ); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + /* + * China uses a radar pattern that is similar to ETSI but it + * follows FCC in all other respect like transmit power, CCA + * threshold etc. + */ + rinfo.dfs_radars = dfs_china_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_china_radars); + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_ETSI_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "ETSI domain"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + if (dfs_is_en302_502_applicable(dfs)) { + rinfo.dfs_radars = dfs_etsi_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_etsi_radars); + } else { + uint8_t offset = ETSI_LEGACY_PULSE_ARR_OFFSET; + + rinfo.dfs_radars = &dfs_etsi_radars[offset]; + rinfo.numradars = + QDF_ARRAY_SIZE(dfs_etsi_radars) - offset; + } + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_KR_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "ETSI domain -- Korea(412)"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + /* + * So far we have treated Korea as part of ETSI and did not + * support any radar patters specific to Korea other than + * standard ETSI radar patterns. Ideally we would want to + * treat Korea as a different domain. This is something that + * we will address in the future. However, for now override + * ETSI tables for Korea. + */ + rinfo.dfs_radars = dfs_korea_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_korea_radars); + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_MKKN_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "MKKN domain"); + rinfo.dfsdomain = DFS_MKKN_DOMAIN; + rinfo.dfs_radars = dfs_mkkn_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_mkkn_radars); + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_MKK4_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "MKK4 domain"); + rinfo.dfsdomain = DFS_MKK4_DOMAIN; + rinfo.dfs_radars = dfs_mkk4_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_mkk4_radars); + + if (tx_ops->tgt_is_tgt_type_ar900b(target_type) || + tx_ops->tgt_is_tgt_type_ipq4019(target_type)) { + rinfo.b5pulses = dfs_jpn_bin5pulses_ar900b; + rinfo.numb5radars = QDF_ARRAY_SIZE( + dfs_jpn_bin5pulses_ar900b); + } else if (tx_ops->tgt_is_tgt_type_qca9984(target_type) || + tx_ops->tgt_is_tgt_type_qca9888(target_type)) { + rinfo.b5pulses = dfs_jpn_bin5pulses_qca9984; + rinfo.numb5radars = QDF_ARRAY_SIZE + (dfs_jpn_bin5pulses_qca9984); + } else { + rinfo.b5pulses = dfs_jpn_bin5pulses; + rinfo.numb5radars = QDF_ARRAY_SIZE( + dfs_jpn_bin5pulses); + } + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "UNINIT domain"); + rinfo.dfsdomain = DFS_UNINIT_DOMAIN; + rinfo.dfs_radars = NULL; + rinfo.numradars = 0; + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + } + + if (tx_ops->tgt_is_tgt_type_ar900b(target_type) || + tx_ops->tgt_is_tgt_type_ipq4019(target_type) || + tx_ops->tgt_is_tgt_type_qca9984(target_type) || + tx_ops->tgt_is_tgt_type_qca9888(target_type)) { + /* Beeliner WAR: lower RSSI threshold to improve detection of + * certian radar types + */ + /* Cascade WAR: + * Cascade can report lower RSSI near the channel boundary then + * expected. It can also report significantly low RSSI at center + * (as low as 16) at center. So we are lowering threshold for + * all types of radar for * Cascade. + * This may increase the possibility of false radar detection. + * IR -- 083703, 083398, 083387 + */ + + for (i = 0; i < rinfo.numradars; i++) + rinfo.dfs_radars[i].rp_rssithresh = RSSI_THERSH_AR900B; + } + + dfs_set_adrastea_rf_thrshold(psoc, dfsdomain, target_type, &rinfo); + + WLAN_DFS_DATA_STRUCT_LOCK(dfs); + dfs_init_radar_filters(dfs, &rinfo); + WLAN_DFS_DATA_STRUCT_UNLOCK(dfs); +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_send_avg_params_to_fw(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params) +{ + tgt_dfs_send_avg_params_to_fw(dfs->dfs_pdev_obj, params); +} + +/** + * dfs_no_res_from_fw_task() - The timer function that is called if there is no + * response from fw after sending the average radar pulse parameters. + */ +static os_timer_func(dfs_no_res_from_fw_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "Host wait timer expired"); + + dfs->dfs_is_host_wait_running = 0; + dfs->dfs_no_res_from_fw = 1; + dfs_radarfound_action_generic(dfs, dfs->dfs_seg_id); + dfs->dfs_seg_id = 0; +} + +void dfs_host_wait_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->dfs_host_wait_timer), + dfs_no_res_from_fw_task, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); + dfs->dfs_status_timeout_override = -1; +} + +QDF_STATUS dfs_set_override_status_timeout(struct wlan_dfs *dfs, + int status_timeout) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs->dfs_status_timeout_override = status_timeout; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Host wait status timeout is now %s : %d", + (status_timeout == -1) ? "default" : "overridden", + status_timeout); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dfs_get_override_status_timeout(struct wlan_dfs *dfs, + int *status_timeout) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *status_timeout = dfs->dfs_status_timeout_override; + + return QDF_STATUS_SUCCESS; +} + +/** + * dfs_extract_radar_found_params() - Copy the contents of average radar + * parameters to dfs_radar_found_params parameter structure. + * + * @dfs: Pointer to wlan_dfs structure which contains the average radar + * parameters. + * @params: Pointer to dfs_radar_found_params structure. + */ +static +void dfs_extract_radar_found_params(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params) +{ + qdf_mem_zero(params, sizeof(*params)); + params->pri_min = dfs->dfs_average_pri; + params->pri_max = dfs->dfs_average_pri; + params->duration_min = dfs->dfs_average_duration; + params->duration_max = dfs->dfs_average_duration; + params->sidx_min = dfs->dfs_average_sidx; + params->sidx_max = dfs->dfs_average_sidx; + + /* Bangradar will not populate any of these average + * parameters as pulse is not received. If these variables + * are not resetted here, these go as radar_found params + * for bangradar if bangradar is issued after real radar. + */ + dfs->dfs_average_sidx = 0; + dfs->dfs_average_duration = 0; + dfs->dfs_average_pri = 0; +} + +void dfs_radarfound_action_fcc(struct wlan_dfs *dfs, uint8_t seg_id) +{ + struct dfs_radar_found_params params; + + qdf_mem_copy(&dfs->dfs_radar_found_chan, dfs->dfs_curchan, + sizeof(dfs->dfs_radar_found_chan)); + dfs_extract_radar_found_params(dfs, ¶ms); + dfs_send_avg_params_to_fw(dfs, ¶ms); + dfs->dfs_is_host_wait_running = 1; + dfs->dfs_seg_id = seg_id; + qdf_timer_mod(&dfs->dfs_host_wait_timer, + (dfs->dfs_status_timeout_override == + -1) ? HOST_DFS_STATUS_WAIT_TIMER_MS : + dfs->dfs_status_timeout_override); +} + +void dfs_host_wait_timer_reset(struct wlan_dfs *dfs) +{ + dfs->dfs_is_host_wait_running = 0; + qdf_timer_sync_cancel(&dfs->dfs_host_wait_timer); +} + +/** + * dfs_action_on_spoof_success() - DFS action on spoof test pass + * @dfs: Pointer to DFS object + */ +static void dfs_action_on_spoof_success(struct wlan_dfs *dfs) +{ + dfs->dfs_spoof_test_done = 1; + if (dfs->dfs_radar_found_chan.dfs_ch_freq == + dfs->dfs_curchan->dfs_ch_freq) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "cac timer started for channel %d", + dfs->dfs_curchan->dfs_ch_ieee); + dfs_start_cac_timer(dfs); + } else{ + dfs_remove_spoof_channel_from_nol(dfs); + } +} + +void dfs_action_on_fw_radar_status_check(struct wlan_dfs *dfs, + uint32_t *status) +{ + struct wlan_objmgr_pdev *dfs_pdev; + int no_chans_avail = 0; + int error_flag = 0; + + dfs_host_wait_timer_reset(dfs); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "Host DFS status = %d", + *status); + + dfs_pdev = dfs->dfs_pdev_obj; + if (!dfs_pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_pdev_obj is NULL"); + return; + } + + switch (*status) { + case HOST_DFS_STATUS_CHECK_PASSED: + if (dfs->dfs_average_params_sent) + dfs_action_on_spoof_success(dfs); + else + error_flag = 1; + break; + case HOST_DFS_STATUS_CHECK_FAILED: + dfs->dfs_spoof_check_failed = 1; + no_chans_avail = + dfs_mlme_rebuild_chan_list_with_non_dfs_channels(dfs_pdev); + dfs_mlme_restart_vaps_with_non_dfs_chan(dfs_pdev, + no_chans_avail); + break; + case HOST_DFS_STATUS_CHECK_HW_RADAR: + if (dfs->dfs_average_params_sent) { + if (dfs->dfs_radar_found_chan.dfs_ch_freq == + dfs->dfs_curchan->dfs_ch_freq) { + dfs_radarfound_action_generic( + dfs, + dfs->dfs_seg_id); + } else { + /* Else of this case, no action is needed as + * dfs_action would have been done at timer + * expiry itself. + */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "DFS Action already taken"); + } + } else { + error_flag = 1; + } + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Status event mismatch:%d, Ignoring it", + *status); + } + + dfs->dfs_average_params_sent = 0; + qdf_mem_zero(&dfs->dfs_radar_found_chan, sizeof(struct dfs_channel)); + + if (error_flag == 1) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Received imroper response %d. Discarding it", + *status); + } +} + +void dfs_reset_spoof_test(struct wlan_dfs *dfs) +{ + dfs->dfs_spoof_test_done = 0; + dfs->dfs_spoof_check_failed = 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_phyerr_tlv.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_phyerr_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..d67ea476962c940ea2aab293fecbe965a1a48531 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_phyerr_tlv.c @@ -0,0 +1,812 @@ +/* + * Copyright (c) 2012, 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains TLV frame processing functions. + */ + +#include "../dfs.h" +#include "../dfs_channel.h" +#include "../dfs_phyerr_tlv.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +#define AGC_MB_GAIN_THRESH1 68 +#define AGC_OTHER_GAIN_THRESH1 40 +#define AGC_MB_GAIN_THRESH2 80 +#define AGC_OTHER_GAIN_THRESH2 60 +#define AGC_GAIN_RSSI_THRESH 25 + +/* + * Until "fastclk" is stored in the DFS configuration. + */ +#define PERE_IS_OVERSAMPLING(_dfs) \ + (_dfs->dfs_caps.wlan_chip_is_over_sampled ? 1 : 0) + +/** + * dfs_sign_extend_32() - Calculates extended 32bit value. + * @v: Value. + * @nb: Offset. + * + * Return: Returns Extend vale. + */ +static int32_t dfs_sign_extend_32(uint32_t v, int nb) +{ + uint32_t m = 1U << (nb - 1); + + /* Chop off high bits, just in case. */ + v &= v & ((1U << nb) - 1); + + /* Extend */ + return (v ^ m) - m; +} + +/** + * dfs_calc_freq_offset() - Calculate the frequency offset. + * @sindex: signed bin index. + * @is_oversampling: oversampling mode + * + * Calculate the frequency offset from the given signed bin index from the + * radar summary report. This takes the oversampling mode into account. + * For oversampling, each bin has resolution 44MHz/128. For non-oversampling, + * each bin has resolution 40MHz/128. It returns kHz - ie, 1000th's of MHz. + */ +static int dfs_calc_freq_offset(int sindex, int is_oversampling) +{ + if (is_oversampling) + return sindex * (44000 / 128); + else + return sindex * (40000 / 128); +} + +/** + * dfs_radar_summary_print() - Prints the Radar summary. + * @dfs: Pointer to wlan_dfs structure. + * @rsu: Pointer rx_radar_status structure. + */ +static void dfs_radar_summary_print(struct wlan_dfs *dfs, + struct rx_radar_status *rsu) +{ + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " pulsedur=%d", rsu->pulse_duration); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " rssi=%d", rsu->rssi); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " ischirp=%d", rsu->is_chirp); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " sidx=%d", rsu->sidx); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " raw tsf=%d", rsu->raw_tsf); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " tsf_offset=%d", rsu->tsf_offset); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " cooked tsf=%d", rsu->raw_tsf - rsu->tsf_offset); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " frequency offset=%d.%d MHz (oversampling=%d)", + (int)(rsu->freq_offset / 1000), + (int)abs(rsu->freq_offset % 1000), + PERE_IS_OVERSAMPLING(dfs)); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " agc_total_gain=%d", rsu->agc_total_gain); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " agc_mb_gain=%d", rsu->agc_mb_gain); +} + +/** + * dfs_radar_summary_parse() - Parse the radar summary frame. + * @dfs: pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @len: Phyerr buflen. + * @rsu: Pointer to rx_radar_status structure. + * + * The frame contents _minus_ the TLV are passed in. + */ +static void dfs_radar_summary_parse(struct wlan_dfs *dfs, + const char *buf, + size_t len, + struct rx_radar_status *rsu) +{ + uint32_t rs[3]; + + /* Drop out if we have < 2 DWORDs available. */ + if (len < sizeof(rs)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | + WLAN_DEBUG_DFS_PHYERR_SUM, + "len (%zu) < expected (%zu)!", len, sizeof(rs)); + } + + /* + * Since the TLVs may be unaligned for some reason + * we take a private copy into aligned memory. + * This enables us to use the HAL-like accessor macros + * into the DWORDs to access sub-DWORD fields. + */ + qdf_mem_copy(rs, buf, sizeof(rs)); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "two 32 bit values are: %08x %08x", rs[0], rs[1]); + + /* Populate the fields from the summary report. */ + rsu->tsf_offset = + MS(rs[RADAR_REPORT_PULSE_REG_2], RADAR_REPORT_PULSE_TSF_OFFSET); + rsu->pulse_duration = + MS(rs[RADAR_REPORT_PULSE_REG_2], RADAR_REPORT_PULSE_DUR); + rsu->is_chirp = + MS(rs[RADAR_REPORT_PULSE_REG_1], RADAR_REPORT_PULSE_IS_CHIRP); + rsu->sidx = dfs_sign_extend_32(MS(rs[RADAR_REPORT_PULSE_REG_1], + RADAR_REPORT_PULSE_SIDX), + 10); + rsu->freq_offset = + dfs_calc_freq_offset(rsu->sidx, PERE_IS_OVERSAMPLING(dfs)); + + /* These are only relevant if the pulse is a chirp. */ + rsu->delta_peak = dfs_sign_extend_32(MS(rs[RADAR_REPORT_PULSE_REG_1], + RADAR_REPORT_PULSE_DELTA_PEAK), 6); + rsu->delta_diff = + MS(rs[RADAR_REPORT_PULSE_REG_1], RADAR_REPORT_PULSE_DELTA_DIFF); + rsu->agc_total_gain = + MS(rs[RADAR_REPORT_PULSE_REG_1], RADAR_REPORT_AGC_TOTAL_GAIN); + rsu->agc_mb_gain = MS(rs[RADAR_REPORT_PULSE_REG_2], + RADAR_REPORT_PULSE_AGC_MB_GAIN); +} + +/** + * dfs_radar_fft_search_report_parse () - Parse FFT report. + * @dfs: pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @len: Phyerr buflen. + * @rsu: Pointer to rx_radar_status structure. + */ +static void dfs_radar_fft_search_report_parse(struct wlan_dfs *dfs, + const char *buf, + size_t len, + struct rx_search_fft_report *rsfr) +{ + uint32_t rs[3]; + + /* Drop out if we have < 2 DWORDs available. */ + if (len < sizeof(rs)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | + WLAN_DEBUG_DFS_PHYERR_SUM, + "len (%zu) < expected (%zu)!", len, sizeof(rs)); + } + + /* + * Since the TLVs may be unaligned for some reason we take a private + * copy into aligned memory. This enables us to use the HAL-like + * accessor macros into the DWORDs to access sub-DWORD fields. + */ + qdf_mem_copy(rs, buf, sizeof(rs)); + + rsfr->total_gain_db = + MS(rs[SEARCH_FFT_REPORT_REG_1], SEARCH_FFT_REPORT_TOTAL_GAIN_DB); + + rsfr->base_pwr_db = + MS(rs[SEARCH_FFT_REPORT_REG_1], SEARCH_FFT_REPORT_BASE_PWR_DB); + + rsfr->fft_chn_idx = + MS(rs[SEARCH_FFT_REPORT_REG_1], SEARCH_FFT_REPORT_FFT_CHN_IDX); + + rsfr->peak_sidx = dfs_sign_extend_32(MS(rs[SEARCH_FFT_REPORT_REG_1], + SEARCH_FFT_REPORT_PEAK_SIDX), 12); + + rsfr->relpwr_db = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_RELPWR_DB); + + rsfr->avgpwr_db = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_AVGPWR_DB); + + rsfr->peak_mag = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_PEAK_MAG); + + rsfr->num_str_bins_ib = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_NUM_STR_BINS_IB); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "two 32 bit values are: %08x %08x", rs[0], rs[1]); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->total_gain_db = %d", rsfr->total_gain_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->base_pwr_db = %d", rsfr->base_pwr_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->fft_chn_idx = %d", rsfr->fft_chn_idx); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->peak_sidx = %d", rsfr->peak_sidx); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->relpwr_db = %d", rsfr->relpwr_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->avgpwr_db = %d", rsfr->avgpwr_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->peak_mag = %d", rsfr->peak_mag); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->num_str_bins_ib = %d", rsfr->num_str_bins_ib); + + if (dfs->dfs_caps.wlan_chip_is_ht160) { + rsfr->seg_id = + MS(rs[SEARCH_FFT_REPORT_REG_3], SEARCH_FFT_REPORT_SEG_ID); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->seg_id = %d", rsfr->seg_id); + } +} + +/** + * dfs_check_for_false_detection() - Check for possible false detection on + * beeliner this may also work for Cascade but parameters + * (e.g. AGC_MB_GAIN_THRESH1) may be different for Cascade. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @false_detect: Pointer to save false detect value. + * @rssi: RSSI. + */ +static inline void dfs_check_for_false_detection( + struct wlan_dfs *dfs, + struct rx_radar_status *rs, + bool *false_detect, + uint8_t rssi) +{ + bool is_ht160 = false; + bool is_false_detect = false; + + is_ht160 = dfs->dfs_caps.wlan_chip_is_ht160; + is_false_detect = dfs->dfs_caps.wlan_chip_is_false_detect; + + if ((dfs->dfs_caps.wlan_chip_is_over_sampled == 0) && + (is_ht160 == 0 && is_false_detect)) { + if ((rs->agc_mb_gain > AGC_MB_GAIN_THRESH1) && + ((rs->agc_total_gain - rs->agc_mb_gain) < + AGC_OTHER_GAIN_THRESH1)) { + *false_detect = true; + } + + if ((rs->agc_mb_gain > AGC_MB_GAIN_THRESH2) && + ((rs->agc_total_gain - rs->agc_mb_gain) > + AGC_OTHER_GAIN_THRESH2) && + (rssi > AGC_GAIN_RSSI_THRESH)) { + *false_detect = true; + } + } + + if (*false_detect) + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "setting false_detect to TRUE because of mb/total_gain/rssi, agc_mb_gain=%d, agc_total_gain=%d, rssi=%d", + rs->agc_mb_gain, rs->agc_total_gain, rssi); +} + +/** + * dfs_tlv_parse_frame () - Parse a Peregrine BB TLV frame. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @rsfr: Pointer to rx_search_fft_report structure. + * @buf: Phyerr buffer. + * @len: Phyerr buflen. + * @rssi: RSSI. + * @first_short_fft_peak_mag: first short FFT peak_mag. + * @psidx_diff: Pointer to psidx diff. + * + * This routine parses each TLV, prints out what's going on and calls an + * appropriate sub-function. Since the TLV format doesn't _specify_ all TLV + * components are DWORD aligned, we must treat them as not and access the + * fields appropriately. + */ +static int dfs_tlv_parse_frame(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + struct rx_search_fft_report *rsfr, + const char *buf, + size_t len, + uint8_t rssi, + int *first_short_fft_peak_mag, + int16_t *psidx_diff) +{ + int i = 0; + uint32_t tlv_hdr[1]; + bool false_detect = false; + /* total search FFT reports including short and long */ + int8_t sfr_count = 0; + int16_t first_short_fft_psidx = 0; + + *psidx_diff = 0; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "total length = %zu bytes", len); + while ((i < len) && (false_detect == false)) { + /* Ensure we at least have four bytes. */ + if ((len - i) < sizeof(tlv_hdr)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | + WLAN_DEBUG_DFS_PHYERR_SUM, + "ran out of bytes, len=%zu, i=%d", len, i); + return 0; + } + + /* + * Copy the offset into the header, so the DWORD style access + * macros can be used. + */ + qdf_mem_copy(&tlv_hdr, buf + i, sizeof(tlv_hdr)); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "HDR: TLV SIG=0x%x, TAG=0x%x, LEN=%d bytes", + MS(tlv_hdr[TLV_REG], TLV_SIG), + MS(tlv_hdr[TLV_REG], TLV_TAG), + MS(tlv_hdr[TLV_REG], TLV_LEN)); + + /* + * Sanity check the length field is available in the remaining + * frame. Drop out if this isn't the case - we can't trust the + * rest of the TLV entries. + */ + if (MS(tlv_hdr[TLV_REG], TLV_LEN) + i >= len) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "TLV oversize: TLV LEN=%d, available=%zu, i=%d", + MS(tlv_hdr[TLV_REG], TLV_LEN), + len, i); + break; + } + + /* Skip the TLV header - one DWORD. */ + i += sizeof(tlv_hdr); + + /* Handle the payload. */ + switch (MS(tlv_hdr[TLV_REG], TLV_SIG)) { + case TAG_ID_RADAR_PULSE_SUMMARY: /* Radar pulse summary */ + dfs_radar_summary_parse(dfs, buf + i, + MS(tlv_hdr[TLV_REG], TLV_LEN), rs); + + dfs_check_for_false_detection(dfs, rs, &false_detect, + rssi); + break; + case TAG_ID_SEARCH_FFT_REPORT: + sfr_count++; + dfs_radar_fft_search_report_parse(dfs, buf + i, + MS(tlv_hdr[TLV_REG], TLV_LEN), rsfr); + + /* we are interested in the first short FFT report's + * peak_mag for this value to be reliable, we must + * ensure that + * BB_srch_fft_ctrl_4.radar_fft_short_rpt_scl is set to + * 0. + */ + if (sfr_count == 1) { + *first_short_fft_peak_mag = rsfr->peak_mag; + first_short_fft_psidx = rsfr->peak_sidx; + } + + /* + * Check for possible false detection on Peregrine. + * we examine search FFT report and make the following + * assumption as per algorithms group's input: + * (1) There may be multiple TLV + * (2) We make false detection decison solely based on + * the first TLV + * (3) If the first TLV is a serch FFT report then we + * check the peak_mag value. + * When RSSI is equal to dfs->wlan_dfs_false_rssI_thres + * (default 50) and peak_mag is less than + * 2 * dfs->wlan_dfs_peak_mag (default 40) we treat it + * as false detect. Please note that 50 is not a true + * RSSI estimate, but value indicated by HW for RF + * saturation event. + */ + if (PERE_IS_OVERSAMPLING(dfs) && + (sfr_count == 1) && + (rssi == dfs->wlan_dfs_false_rssi_thres) && + (rsfr->peak_mag < (2 * dfs->wlan_dfs_peak_mag)) + ) { + false_detect = true; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "setting false_detect to TRUE because of false_rssi_thres"); + } + + /* + * The first FFT report indicated by (sfr_count == 1) + * should correspond to the first short FFT report from + * HW and the second FFT report indicated by + * (sfr_count == 2) should correspond to the first long + * FFT report from HW for the same pulse. The short and + * log FFT reports have a factor of 4 difference in + * resolution; hence the need to multiply by 4 when + * computing the psidx_diff. + */ + if (sfr_count == 2) + *psidx_diff = rsfr->peak_sidx - + 4 * first_short_fft_psidx; + + break; + default: + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "unknown entry, SIG=0x%02x", + MS(tlv_hdr[TLV_REG], TLV_SIG)); + } + + /* Skip the payload. */ + i += MS(tlv_hdr[TLV_REG], TLV_LEN); + } + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, "done"); + + return false_detect ? 0 : 1; +} + +/** + * dfs_tlv_calc_freq_info() - Calculate the channel centre in MHz. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * + * Return: Returns the channel center. + */ +#ifdef CONFIG_CHAN_FREQ_API +static int dfs_tlv_calc_freq_info(struct wlan_dfs *dfs, + struct rx_radar_status *rs) +{ + uint32_t chan_centre; + uint32_t chan_width; + int chan_offset; + + /* For now, just handle up to VHT80 correctly. */ + if (!dfs->dfs_curchan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_curchan is null"); + return 0; + /* + * For now, the only 11ac channel with freq1/freq2 setup is + * VHT80. Should have a flag macro to check this! + */ + } else if (WLAN_IS_CHAN_11AC_VHT80(dfs->dfs_curchan)) { + /* + * 11AC, so cfreq1/cfreq2 are setup. + * If it's 80+80 this won't work - need to use seg + * appropriately! + */ + chan_centre = dfs->dfs_curchan->dfs_ch_mhz_freq_seg1; + } else { + /* + * HT20/HT40. + * This is hard-coded - it should be 5 or 10 for half/quarter + * appropriately. + */ + chan_width = 20; + + /* Grab default channel centre. */ + chan_centre = dfs->dfs_curchan->dfs_ch_freq; + + /* Calculate offset based on HT40U/HT40D and VHT40U/VHT40D. */ + if (WLAN_IS_CHAN_11N_HT40PLUS(dfs->dfs_curchan) || + (dfs->dfs_curchan->dfs_ch_flags & WLAN_CHAN_VHT40PLUS)) + chan_offset = chan_width; + else if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan) || + (dfs->dfs_curchan->dfs_ch_flags & + WLAN_CHAN_VHT40MINUS)) + chan_offset = -chan_width; + else + chan_offset = 0; + + /* Calculate new _real_ channel centre. */ + chan_centre += (chan_offset / 2); + } + + /* Return ev_chan_centre in MHz. */ + return chan_centre; +} +#else +#ifdef CONFIG_CHAN_NUM_API +static int dfs_tlv_calc_freq_info(struct wlan_dfs *dfs, + struct rx_radar_status *rs) +{ + uint32_t chan_centre; + uint32_t chan_width; + int chan_offset; + + /* For now, just handle up to VHT80 correctly. */ + if (!dfs->dfs_curchan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_curchan is null"); + return 0; + /* + * For now, the only 11ac channel with freq1/freq2 setup is + * VHT80. Should have a flag macro to check this! + */ + } else if (WLAN_IS_CHAN_11AC_VHT80(dfs->dfs_curchan)) { + /* + * 11AC, so cfreq1/cfreq2 are setup. + * If it's 80+80 this won't work - need to use seg + * appropriately! + */ + chan_centre = dfs_mlme_ieee2mhz(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1, + dfs->dfs_curchan->dfs_ch_flags); + } else { + /* + * HT20/HT40. + * This is hard-coded - it should be 5 or 10 for half/quarter + * appropriately. + */ + chan_width = 20; + + /* Grab default channel centre. */ + chan_centre = dfs_chan2freq(dfs->dfs_curchan); + + /* Calculate offset based on HT40U/HT40D and VHT40U/VHT40D. */ + if (WLAN_IS_CHAN_11N_HT40PLUS(dfs->dfs_curchan) || + dfs->dfs_curchan->dfs_ch_flags & + WLAN_CHAN_VHT40PLUS) + chan_offset = chan_width; + else if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan) || + dfs->dfs_curchan->dfs_ch_flags & + WLAN_CHAN_VHT40MINUS) + chan_offset = -chan_width; + else + chan_offset = 0; + + /* Calculate new _real_ channel centre. */ + chan_centre += (chan_offset / 2); + } + + /* Return ev_chan_centre in MHz. */ + return chan_centre; +} +#endif +#endif + + +/** + * dfs_tlv_calc_event_freq_pulse() - Calculate the centre frequency and + * low/high range for a radar pulse event. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @freq_centre: center frequency + * @freq_lo: lower bounds of frequency. + * @freq_hi: upper bounds of frequency. + * + * XXX TODO: Handle half/quarter rates correctly! + * XXX TODO: handle VHT160 correctly! + * XXX TODO: handle VHT80+80 correctly! + * + * Return: Returns 1. + */ +static int dfs_tlv_calc_event_freq_pulse(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + uint32_t *freq_centre, + uint32_t *freq_lo, + uint32_t *freq_hi) +{ + int chan_width; + int chan_centre; + + /* Fetch the channel centre frequency in MHz. */ + chan_centre = dfs_tlv_calc_freq_info(dfs, rs); + + /* Convert to KHz. */ + chan_centre *= 1000; + + /* + * XXX hard-code event width to be 2 * bin size for now; + * XXX this needs to take into account the core clock speed + * XXX for half/quarter rate mode. + */ + if (PERE_IS_OVERSAMPLING(dfs)) + chan_width = (44000 * 2 / 128); + else + chan_width = (40000 * 2 / 128); + + /* XXX adjust chan_width for half/quarter rate! */ + + /* Now we can do the math to figure out the correct channel range. */ + (*freq_centre) = (uint32_t) (chan_centre + rs->freq_offset); + (*freq_lo) = (uint32_t) ((chan_centre + rs->freq_offset) - chan_width); + (*freq_hi) = (uint32_t) ((chan_centre + rs->freq_offset) + chan_width); + + return 1; +} + +/** + * dfs_tlv_calc_event_freq_chirp() - Calculate the event freq. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @freq_centre: center frequency + * @freq_lo: lower bounds of frequency. + * @freq_hi: upper bounds of frequency. + * + * The chirp bandwidth in KHz is defined as: + * totalBW(KHz) = delta_peak(mean) + * * [ (bin resolution in KHz) / (radar_fft_long_period in uS) ] + * * pulse_duration (us) + * The bin resolution depends upon oversampling. + * For now, we treat the radar_fft_long_period as a hard-coded 8uS. + * + * Return: Returns 1 + */ +static int dfs_tlv_calc_event_freq_chirp(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + uint32_t *freq_centre, + uint32_t *freq_lo, + uint32_t *freq_hi) +{ + int32_t bin_resolution; /* KHz * 100 */ + int32_t radar_fft_long_period = 8; /* microseconds */ + int32_t delta_peak; + int32_t pulse_duration; + int32_t total_bw; + int32_t chan_centre; + int32_t freq_1, freq_2; + + /* + * KHz isn't enough resolution here! + * So treat it as deci-hertz (10Hz) and convert back to KHz later. + */ + + if (PERE_IS_OVERSAMPLING(dfs)) + bin_resolution = (OVER_SAMPLING_FREQ * HUNDRED) / NUM_BINS; + else + bin_resolution = (SAMPLING_FREQ * HUNDRED) / NUM_BINS; + + delta_peak = rs->delta_peak; + pulse_duration = rs->pulse_duration; + + total_bw = delta_peak * (bin_resolution / radar_fft_long_period) * + pulse_duration; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | WLAN_DEBUG_DFS_PHYERR_SUM, + "delta_peak=%d, pulse_duration=%d, bin_resolution=%d.%dKHz, radar_fft_long_period=%d, total_bw=%d.%ldKHz", + delta_peak, pulse_duration, bin_resolution / THOUSAND, + bin_resolution % THOUSAND, radar_fft_long_period, + total_bw / HUNDRED, + (long)abs(total_bw % HUNDRED)); + + total_bw /= HUNDRED; /* back to KHz */ + /* Grab the channel centre frequency in MHz. */ + chan_centre = dfs_tlv_calc_freq_info(dfs, rs); + + /* Early abort! */ + if (chan_centre == 0) { + (*freq_centre) = 0; + return 0; + } + + /* Convert to KHz. */ + chan_centre *= THOUSAND; + + /* + * Sidx is the starting frequency; total_bw is a signed value and for + * negative chirps (ie, moving down in frequency rather than up) the end + * frequency may be less than the start frequency. + */ + if (total_bw > 0) { + freq_1 = chan_centre + rs->freq_offset; + freq_2 = chan_centre + rs->freq_offset + total_bw; + } else { + freq_1 = chan_centre + rs->freq_offset + total_bw; + freq_2 = chan_centre + rs->freq_offset; + } + + (*freq_lo) = (uint32_t)(freq_1); + (*freq_hi) = (uint32_t)(freq_2); + (*freq_centre) = (uint32_t) (freq_1 + (abs(total_bw) / 2)); + + return 1; +} + +/** + * dfs_tlv_calc_event_freq() - Calculate the centre and band edge frequencies + * of the given radar event. + * @dfs: Pointer to wlan_dfs structure. + * @rs: Pointer to rx_radar_status structure. + * @freq_centre: Center frequency + * @freq_lo: Lower bounds of frequency. + * @freq_hi: Upper bounds of frequency. + */ +static int dfs_tlv_calc_event_freq(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + uint32_t *freq_centre, + uint32_t *freq_lo, + uint32_t *freq_hi) +{ + if (rs->is_chirp) + return dfs_tlv_calc_event_freq_chirp(dfs, rs, freq_centre, + freq_lo, freq_hi); + else + return dfs_tlv_calc_event_freq_pulse(dfs, rs, freq_centre, + freq_lo, freq_hi); +} + +int dfs_process_phyerr_bb_tlv(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ + struct rx_radar_status rs; + struct rx_search_fft_report rsfr; + int first_short_fft_peak_mag = 0; + int16_t psidx_diff; + + qdf_mem_zero(&rs, sizeof(rs)); + qdf_mem_zero(&rsfr, sizeof(rsfr)); + + /* + * Add the ppdu_start/ppdu_end fields given to us by the upper layers. + * The firmware gives us a summary set of parameters rather than the + * whole PPDU_START/PPDU_END descriptor contenst. + */ + rs.rssi = rssi; + rs.raw_tsf = rs_tstamp; + + /* Try parsing the TLV set. */ + if (!dfs_tlv_parse_frame(dfs, &rs, &rsfr, buf, datalen, rssi, + &first_short_fft_peak_mag, &psidx_diff)) + return 0; + + /* For debugging, print what we have parsed. */ + dfs_radar_summary_print(dfs, &rs); + + /* Populate dfs_phy_err from rs. */ + qdf_mem_zero(e, sizeof(*e)); + e->rssi = rs.rssi; + e->dur = rs.pulse_duration; + e->is_pri = 1; /* Always PRI for now */ + e->is_ext = 0; + e->is_dc = 0; + e->is_early = 0; + + /* + * XXX TODO: add a "chirp detection enabled" capability or config bit + * somewhere, in case for some reason the hardware chirp detection AND + * FFTs are disabled. + * For now, assume this hardware always does chirp detection. + */ + e->do_check_chirp = 1; + e->is_hw_chirp = !!(rs.is_chirp); + e->is_sw_chirp = 0; /* We don't yet do software chirp checking */ + + e->fulltsf = fulltsf; + e->rs_tstamp = rs.raw_tsf - rs.tsf_offset; + + /* XXX error check */ + (void)dfs_tlv_calc_event_freq(dfs, &rs, &e->freq, &e->freq_lo, + &e->freq_hi); + + e->seg_id = rsfr.seg_id; + e->sidx = rs.sidx; + e->freq_offset_khz = rs.freq_offset; + e->peak_mag = first_short_fft_peak_mag; + e->total_gain = rs.agc_total_gain; + e->mb_gain = rs.agc_mb_gain; + e->relpwr_db = rsfr.relpwr_db; + e->pulse_delta_peak = rs.delta_peak; + e->pulse_psidx_diff = psidx_diff; + e->pulse_delta_diff = rs.delta_diff; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR_SUM, + "fbin=%d, freq=%d.%d MHz, raw tsf=%u, offset=%d, cooked tsf=%u, rssi=%d, dur=%d, is_chirp=%d, fulltsf=%llu, freq=%d.%d MHz, freq_lo=%d.%dMHz, freq_hi=%d.%d MHz", + rs.sidx, (int) (rs.freq_offset / 1000), + (int) abs(rs.freq_offset % 1000), rs.raw_tsf, rs.tsf_offset, + e->rs_tstamp, rs.rssi, rs.pulse_duration, (int)rs.is_chirp, + (unsigned long long) fulltsf, (int)e->freq / 1000, + (int) abs(e->freq) % 1000, (int)e->freq_lo / 1000, + (int) abs(e->freq_lo) % 1000, (int)e->freq_hi / 1000, + (int) abs(e->freq_hi) % 1000); + + dfs_debug(dfs, WLAN_DEBUG_DFS_FALSE_DET, + "ts=%u, dur=%d, rssi=%d, freq_offset=%d.%dMHz, is_chirp=%d, seg_id=%d, peak_mag=%d, total_gain=%d, mb_gain=%d, relpwr_db=%d, delta_peak=%d, delta_diff=%d, psidx_diff=%d", + e->rs_tstamp, rs.pulse_duration, rs.rssi, + (int)e->freq_offset_khz / 1000, + (int)abs(e->freq_offset_khz) % 1000, (int)rs.is_chirp, + rsfr.seg_id, rsfr.peak_mag, rs.agc_total_gain, rs.agc_mb_gain, + rsfr.relpwr_db, + rs.delta_peak, + rs.delta_diff, + psidx_diff); + + return 1; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_phyerr.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_phyerr.c new file mode 100644 index 0000000000000000000000000000000000000000..913f81741bd930dbfa1a32859a1b8d266c8a7ef5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_phyerr.c @@ -0,0 +1,995 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: For each radar pulse that the HW detects, a single radar PHY error is + * reported to the driver. This PHY error contains information like the RSSI, + * the pulse duration, the pulse location (primary/extension/DC) and possibly + * FFT data. + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_channel.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +/** + * dfs_get_event_freqwidth() - Get frequency width. + * @dfs: Pointer to wlan_dfs structure. + * + * Return: Return the frequency width for the current operating channel. + * This isn't the channel width - it's how wide the reported event may be. + * For HT20 this is 20MHz. For HT40 on Howl and later it'll still be 20MHz + * - the hardware returns either pri or ext channel. + */ +static inline int dfs_get_event_freqwidth(struct wlan_dfs *dfs) +{ + /* Handle edge cases during startup/transition, shouldn't happen! */ + if (!dfs) + return 0; + + if (!dfs->dfs_curchan) + return 0; + + /* + * For now, assume 20MHz wide - but this is incorrect when operating in + * half/quarter mode! + */ + return 20; +} + +/** + * dfs_get_event_freqcentre() - Get event frequency centre. + * @dfs: Pointer to wlan_dfs structure. + * @is_pri: detected on primary channel. + * @is_ext: detected on extension channel. + * @is_dc: detected at DC. + * + * Return the centre frequency for the current operating channel and event. + * This is for post-Owl 11n chips which report pri/extension channel events. + */ +static inline uint16_t dfs_get_event_freqcentre(struct wlan_dfs *dfs, + int is_pri, + int is_ext, + int is_dc) +{ + int chan_offset = 0, chan_width; + + /* Handle edge cases during startup/transition, shouldn't happen! */ + if (!dfs) + return 0; + if (!dfs->dfs_curchan) + return 0; + + /* + * For wide channels, DC and ext frequencies need a bit of hand-holding + * based on whether it's an upper or lower channel. + */ + chan_width = dfs_get_event_freqwidth(dfs); + + if (WLAN_IS_CHAN_11N_HT40PLUS(dfs->dfs_curchan)) + chan_offset = chan_width; + else if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan)) + chan_offset = -chan_width; + else + chan_offset = 0; + + /* + * Check for DC events first - the sowl code may just set all the bits + * together. + */ + if (is_dc) { + /* XXX TODO: Should DC events be considered 40MHz wide here? */ + return dfs_chan2freq( + dfs->dfs_curchan) + (chan_offset / 2); + } + + /* + * For non-wide channels, the centre frequency is just dfs_ch_freq. + * The centre frequency for pri events is still dfs_ch_freq. + */ + if (is_pri) + return dfs_chan2freq(dfs->dfs_curchan); + + if (is_ext) + return dfs_chan2freq(dfs->dfs_curchan) + chan_width; + + return dfs_chan2freq(dfs->dfs_curchan); +} + +int dfs_process_phyerr_owl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ + const char *cbuf = (const char *) buf; + uint8_t dur; + int event_width; + + dfs->wlan_dfs_stats.owl_phy_errors++; + + /* + * HW cannot detect extension channel radar so it only passes us primary + * channel radar data. + */ + if (datalen == 0) + dur = 0; + else + dur = ((uint8_t *) cbuf)[0]; + + /* This is a spurious event; toss. */ + if (rssi == 0 && dur == 0) { + dfs->wlan_dfs_stats.datalen_discards++; + return 0; + } + + /* Fill out dfs_phy_err with the information we have at hand. */ + qdf_mem_zero(e, sizeof(*e)); + e->rssi = rssi; + e->dur = dur; + e->is_pri = 1; + e->is_ext = 0; + e->is_dc = 0; + e->is_early = 1; + e->fulltsf = fulltsf; + e->rs_tstamp = rs_tstamp; + + /* + * Owl only ever reports events on the primary channel. It doesn't + * even see events on the secondary channel. + */ + event_width = dfs_get_event_freqwidth(dfs); + e->freq = dfs_get_event_freqcentre(dfs, 1, 0, 0) * 1000; + e->freq_lo = e->freq - (event_width / 2) * 1000; + e->freq_hi = e->freq + (event_width / 2) * 1000; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR_SUM, + "rssi=%u dur=%u, freq=%d MHz, freq_lo=%d MHz, freq_hi=%d MHz", + rssi, dur, e->freq/1000, e->freq_lo/1000, + e->freq_hi / 1000); + + return 1; +} + +int dfs_process_phyerr_sowl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ +#define EXT_CH_RADAR_FOUND 0x02 +#define PRI_CH_RADAR_FOUND 0x01 +#define EXT_CH_RADAR_EARLY_FOUND 0x04 + const char *cbuf = (const char *)buf; + uint8_t dur = 0; + uint8_t pulse_bw_info, pulse_length_ext, pulse_length_pri; + int pri_found = 0, ext_found = 0; + int early_ext = 0; + int event_width; + + /* + * If radar can be detected on the extension channel, datalen zero + * pulses are bogus, discard them. + */ + if (!datalen) { + dfs->wlan_dfs_stats.datalen_discards++; + return 0; + } + + /* Ensure that we have at least three bytes of payload. */ + if (datalen < 3) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "short error frame (%d bytes)", datalen); + dfs->wlan_dfs_stats.datalen_discards++; + return 0; + } + + /* + * Fetch the payload directly - the compiler will happily generate + * byte-read instructions with a const char * cbuf pointer. + */ + pulse_length_pri = cbuf[datalen - 3]; + pulse_length_ext = cbuf[datalen - 2]; + pulse_bw_info = cbuf[datalen - 1]; + + /* + * Only the last 3 bits of the BW info are relevant, they indicate + * which channel the radar was detected in. + */ + pulse_bw_info &= 0x07; + + /* If pulse on DC, both primary and extension flags will be set */ + if (((pulse_bw_info & EXT_CH_RADAR_FOUND) && + (pulse_bw_info & PRI_CH_RADAR_FOUND))) { + /* + * Conducted testing, when pulse is on DC, both pri and ext + * durations are reported to be same. Radiated testing, when + * pulse is on DC, differentpri and ext durations are reported, + * so take the larger of the two. + */ + if (pulse_length_ext >= pulse_length_pri) { + dur = pulse_length_ext; + ext_found = 1; + } else { + dur = pulse_length_pri; + pri_found = 1; + } + dfs->wlan_dfs_stats.dc_phy_errors++; + } else { + if (pulse_bw_info & EXT_CH_RADAR_FOUND) { + dur = pulse_length_ext; + pri_found = 0; + ext_found = 1; + dfs->wlan_dfs_stats.ext_phy_errors++; + } + if (pulse_bw_info & PRI_CH_RADAR_FOUND) { + dur = pulse_length_pri; + pri_found = 1; + ext_found = 0; + dfs->wlan_dfs_stats.pri_phy_errors++; + } + if (pulse_bw_info & EXT_CH_RADAR_EARLY_FOUND) { + dur = pulse_length_ext; + pri_found = 0; + ext_found = 1; + early_ext = 1; + dfs->wlan_dfs_stats.early_ext_phy_errors++; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "EARLY ext channel dur=%u rssi=%u datalen=%d", + dur, rssi, datalen); + } + if (!pulse_bw_info) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "ERROR channel dur=%u rssi=%u pulse_bw_info=0x%x datalen MOD 4 = %d", + dur, rssi, pulse_bw_info, (datalen & 0x3)); + /* + * Bogus bandwidth info received in descriptor, so + * ignore this PHY error. + */ + dfs->wlan_dfs_stats.bwinfo_errors++; + return 0; + } + } + + /* + * Always use combined RSSI reported, unless RSSI reported on + * extension is stronger. + */ + if ((ext_rssi > rssi) && (ext_rssi < 128)) + rssi = ext_rssi; + + /* Fill out the rssi/duration fields from above. */ + qdf_mem_zero(e, sizeof(*e)); + e->rssi = rssi; + e->dur = dur; + e->is_pri = pri_found; + e->is_ext = ext_found; + e->is_dc = !!(((pulse_bw_info & EXT_CH_RADAR_FOUND) && + (pulse_bw_info & PRI_CH_RADAR_FOUND))); + e->is_early = early_ext; + e->fulltsf = fulltsf; + e->rs_tstamp = rs_tstamp; + + /* Sowl and later can report pri/ext events. */ + event_width = dfs_get_event_freqwidth(dfs); + e->freq = dfs_get_event_freqcentre(dfs, e->is_pri, e->is_ext, + e->is_dc) * 1000; + e->freq_lo = e->freq - (event_width / 2) * 1000; + e->freq_hi = e->freq + (event_width / 2) * 1000; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR_SUM, + "pulse_bw_info=0x%x pulse_length_ext=%u pulse_length_pri=%u rssi=%u ext_rssi=%u, freq=%d MHz, freq_lo=%d MHz, freq_hi=%d MHz", + pulse_bw_info, pulse_length_ext, pulse_length_pri, + rssi, ext_rssi, e->freq/1000, e->freq_lo/1000, e->freq_hi/1000); +#undef EXT_CH_RADAR_FOUND +#undef PRI_CH_RADAR_FOUND +#undef EXT_CH_RADAR_EARLY_FOUND + + return 1; +} + +int dfs_process_phyerr_merlin(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ + const char *cbuf = (const char *) buf; + uint8_t pulse_bw_info = 0; + + /* Process using the sowl code. */ + if (!dfs_process_phyerr_sowl(dfs, buf, datalen, rssi, ext_rssi, + rs_tstamp, fulltsf, e)) { + return 0; + } + + /* + * For osprey (and Merlin) bw_info has implication for selecting RSSI + * value. So re-fetch the bw_info field so the RSSI values can be + * appropriately overridden. + */ + pulse_bw_info = cbuf[datalen - 1]; + + switch (pulse_bw_info & 0x03) { + case 0x00: + /* No radar in ctrl or ext channel */ + rssi = 0; + break; + case 0x01: + /* Radar in ctrl channel */ + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "RAW RSSI: rssi=%u ext_rssi=%u", rssi, ext_rssi); + if (ext_rssi >= (rssi + 3)) { + /* + * Cannot use ctrl channel RSSI if extension channel is + * stronger. + */ + rssi = 0; + } + break; + case 0x02: + /* Radar in extension channel */ + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "RAW RSSI: rssi=%u ext_rssi=%u", rssi, ext_rssi); + if (rssi >= (ext_rssi + 12)) { + /* + * Cannot use extension channel RSSI if control channel + * is stronger + */ + rssi = 0; + } else { + rssi = ext_rssi; + } + break; + case 0x03: + /* When both are present use stronger one */ + if (rssi < ext_rssi) + rssi = ext_rssi; + break; + } + + /* + * Override the rssi decision made by the sowl code. The rest of the + * fields (duration, timestamp, etc) are left untouched. + */ + e->rssi = rssi; + + return 1; +} + +/** + * dfs_dump_phyerr_contents() - Dump the phyerr contents. + * @d: Phyerr buffer. + * @len: Phyerr buf length. + */ + +static void dfs_dump_phyerr_contents(const char *d, int len) +{ + int i, n, bufsize = 64; + + /* + * This is statically sized for a 4-digit address + 16 * 2 digit data + * string. It's done so the printk() passed to the kernel is an entire + * line, so the kernel logging code will atomically print it. Otherwise + * we'll end up with interleaved lines with output from other kernel + * threads. + */ + char buf[64]; + + /* Initial conditions */ + buf[0] = '\n'; + n = 0; + + for (i = 0; i < len; i++) { + if (i % 16 == 0) + n += snprintf(buf + n, bufsize - n, "%04x: ", i); + + n += snprintf(buf + n, bufsize - n, "%02x ", d[i] & 0xff); + if (i % 16 == 15) { + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "%s", buf); + n = 0; + buf[0] = '\0'; + } + } + + /* Print the final line if we didn't print it above. */ + if (n != 0) + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "%s", buf); +} + +/** + * dfs_bump_up_bin5_pulse_dur() - Bump up to a random BIN 5 pulse duration. + * @dfs: Pointer to wlan_dfs structure. + * @e: Pointer to dfs_phy_err structure. + * @slope: Slope value. + */ +static inline void dfs_bump_up_bin5_pulse_dur( + struct wlan_dfs *dfs, + struct dfs_phy_err *e, + int slope) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, "old dur %d slope =%d", + e->dur, slope); + + e->is_sw_chirp = 1; + /* bump up to a random bin5 pulse duration */ + if (e->dur < MIN_BIN5_DUR) + e->dur = dfs_get_random_bin5_dur(dfs, e->fulltsf); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, "new dur %d", e->dur); +} + +/** + * dfs_filter_short_pulses() - Filter short pulses. + * @dfs: Pointer to wlan_dfs structure. + * @e: Pointer to dfs_phy_err structure. + * @retval: Return value + * + * Rssi is not accurate for short pulses, so donot filter based on that for + * short duration pulses. + */ +static inline void dfs_filter_short_pulses( + struct wlan_dfs *dfs, + struct dfs_phy_err *e, + int *retval) +{ + if (dfs->dfs_caps.wlan_dfs_ext_chan_ok) { + if ((e->rssi < dfs->dfs_rinfo.rn_minrssithresh && + (e->dur > MAX_DUR_FOR_LOW_RSSI)) || + e->dur > (dfs->dfs_rinfo.rn_maxpulsedur)) { + dfs->wlan_dfs_stats.rssi_discards++; + *retval = 1; + } + } else if (e->rssi < dfs->dfs_rinfo.rn_minrssithresh || + e->dur > dfs->dfs_rinfo.rn_maxpulsedur) { + dfs->wlan_dfs_stats.rssi_discards++; + *retval = 1; + } + + if (*retval) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s pulse is discarded: dur=%d, maxpulsedur=%d, rssi=%d, minrssi=%d", + (dfs->dfs_caps.wlan_dfs_ext_chan_ok) ? + "Extension channel" : "", + e->dur, dfs->dfs_rinfo.rn_maxpulsedur, + e->rssi, dfs->dfs_rinfo.rn_minrssithresh); + } +} + +/** + * dfs_set_chan_index() - Set channel index. + * @dfs: Pointer to wlan_dfs structure. + * @e: Pointer to dfs_phy_err structure. + * @event: Pointer to dfs_event structure. + */ +static inline void dfs_set_chan_index( + struct wlan_dfs *dfs, + struct dfs_phy_err *e, + struct dfs_event *event) +{ + if (e->is_pri) { + event->re_chanindex = dfs->dfs_curchan_radindex; + } else { + event->re_chanindex = dfs->dfs_extchan_radindex; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "%s New extension channel event is added to queue", + (event->re_chanindex == -1) ? + "- phyerr on ext channel" : ""); + } +} + +/** + * dfs_is_second_seg_radar_disabled() - Check for second segment radar disabled. + * @dfs: Pointer to wlan_dfs structure. + * @seg_id: Segment id. + * + * Return: true if the second segment RADAR is enabled else false. + */ +static bool dfs_is_second_seg_radar_disabled( + struct wlan_dfs *dfs, int seg_id) +{ + if ((seg_id == SEG_ID_SECONDARY) && + !(dfs->dfs_proc_phyerr & DFS_SECOND_SEGMENT_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "Second segment radar detection is disabled"); + return true; + } + + return false; +} + +void dfs_process_phyerr(struct wlan_dfs *dfs, void *buf, uint16_t datalen, + uint8_t r_rssi, uint8_t r_ext_rssi, uint32_t r_rs_tstamp, + uint64_t r_fulltsf) +{ + struct dfs_event *event; + struct dfs_phy_err e; + int empty; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (dfs->dfs_ignore_dfs) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, "ignoring dfs"); + return; + } + + /* + * EV 129487: If radar detection is disabled, do not process PHY error + * data. + */ + + if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "DFS_RADAR_EN not set in dfs->dfs_proc_phyerr"); + return; + } + + /* + * The combined_rssi_ok support has been removed. This was only clear + * for Owl. + * XXX TODO: re-add this; it requires passing in the ctl/ext + * RSSI set from the RX status descriptor. + * XXX TODO : this may be done for us from the legacy phy error path in + * wlan_dev; please review that code. + */ + + /* + * At this time we have a radar pulse that we need to examine and + * queue. But if dfs_process_radarevent already detected radar and set + * CHANNEL_INTERFERENCE flag then do not queue any more radar data. + * When we are in a new channel this flag will be clear and we will + * start queueing data for new channel. (EV74162) + */ + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS_PHYERR_PKT) + dfs_dump_phyerr_contents(buf, datalen); + + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Radar already found in the channel, do not queue radar data"); + return; + } + dfs->dfs_phyerr_count++; + dfs->wlan_dfs_stats.total_phy_errors++; + dfs_debug(dfs, WLAN_DEBUG_DFS2, "phyerr %d len %d", + dfs->wlan_dfs_stats.total_phy_errors, datalen); + + /* + * Hardware stores this as 8 bit signed value. we will cap it at 0 if it + * is a negative number. + */ + if (r_rssi & 0x80) + r_rssi = 0; + + if (r_ext_rssi & 0x80) + r_ext_rssi = 0; + + qdf_mem_zero(&e, sizeof(e)); + + /* + * This is a bit evil - instead of just passing in the chip version, the + * existing code uses a set of HAL capability bits to determine what is + * possible. + * The way I'm decoding it is thus: + * + DFS enhancement? Merlin or later + * + DFS extension channel? Sowl or later. (Howl?) + * + otherwise, Owl (and legacy.) + */ + if (dfs->dfs_caps.wlan_chip_is_bb_tlv) { + if (dfs_process_phyerr_bb_tlv(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) { + dfs->dfs_phyerr_reject_count++; + return; + } + + if (dfs->dfs_phyerr_freq_min > e.freq) + dfs->dfs_phyerr_freq_min = e. freq; + + if (dfs->dfs_phyerr_freq_max < e.freq) + dfs->dfs_phyerr_freq_max = e. freq; + } else if (dfs->dfs_caps.wlan_dfs_use_enhancement) { + if (dfs_process_phyerr_merlin(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) + return; + } else if (dfs->dfs_caps.wlan_dfs_ext_chan_ok) { + if (dfs_process_phyerr_sowl(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) + return; + } else { + if (dfs_process_phyerr_owl(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) + return; + } + + /* + * If the hardware supports radar reporting on the extension channel + * it will supply FFT data for longer radar pulses. + * TLV chips don't go through this software check - the hardware + * check should be enough. If we want to do software checking + * later on then someone will have to craft an FFT parser + * suitable for the TLV FFT data format. + */ + if ((!dfs->dfs_caps.wlan_chip_is_bb_tlv) && + dfs->dfs_caps.wlan_dfs_ext_chan_ok) { + /* + * HW has a known issue with chirping pulses injected at or + * around DC in 40MHz mode. Such pulses are reported with much + * lower durations and SW then discards them because they do + * not fit the minimum bin5 pulse duration. To work around this + * issue, if a pulse is within a 10us range of the bin5 min + * duration, check if the pulse is chirping. If the pulse is + * chirping, bump up the duration to the minimum bin5 duration. + * This makes sure that a valid chirping pulse will not be + * discarded because of incorrect low duration. TBD - Is it + * possible to calculate the 'real' duration of the pulse using + * the slope of the FFT data? TBD - Use FFT data to + * differentiate between radar pulses and false PHY errors. + * This will let us reduce the number of false alarms seen. + * BIN 5 chirping pulses are only for FCC or Japan MMK4 domain + */ + if (((dfs->dfsdomain == DFS_FCC_DOMAIN) || + (dfs->dfsdomain == DFS_MKK4_DOMAIN)) && + (e.dur >= MAYBE_BIN5_DUR) && (e.dur < MAX_BIN5_DUR)) { + int add_dur; + int slope = 0, dc_found = 0; + + /* + * Set the event chirping flags; as we're doing an + * actual chirp check. + */ + e.do_check_chirp = 1; + e.is_hw_chirp = 0; + e.is_sw_chirp = 0; + + /* + * dfs_check_chirping() expects is_pri and is_ext to + * be '1' for true and '0' for false for now, as the + * function itself uses these values in constructing + * things rather than testing them + */ + add_dur = dfs_check_chirping(dfs, buf, datalen, + (e.is_pri ? 1 : 0), + (e.is_ext ? 1 : 0), &slope, &dc_found); + if (add_dur) { + dfs_bump_up_bin5_pulse_dur(dfs, &e, slope); + } else { + /* Set the duration so that it is rejected. */ + e.is_sw_chirp = 0; + e.dur = MAX_BIN5_DUR + 100; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "is_chirping = %d dur=%d", + add_dur, e.dur); + } + } else { + /* + * We have a pulse that is either bigger than + * MAX_BIN5_DUR or less than MAYBE_BIN5_DUR + */ + if ((dfs->dfsdomain == DFS_FCC_DOMAIN) || + (dfs->dfsdomain == DFS_MKK4_DOMAIN)) { + /* + * Would this result in very large pulses + * wrapping around to become short pulses? + */ + if (e.dur >= MAX_BIN5_DUR) { + /* + * Set the duration so that it is + * rejected. + */ + e.dur = MAX_BIN5_DUR + 50; + } + } + } + } + + /* + * Add the parsed, checked and filtered entry to the radar pulse + * event list. This is then checked by dfs_radar_processevent(). + * + * XXX TODO: some filtering is still done below this point - fix this! + */ + WLAN_DFSEVENTQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_eventq)); + WLAN_DFSEVENTQ_UNLOCK(dfs); + if (empty) + return; + + /* + * If the channel is a turbo G channel, then the event is for the + * adaptive radio (AR) pattern matching rather than radar detection. + */ + if ((dfs->dfs_curchan->dfs_ch_flags & CHANNEL_108G) == CHANNEL_108G) { + if (!(dfs->dfs_proc_phyerr & DFS_AR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "DFS_AR_EN not enabled"); + return; + } + WLAN_DFSEVENTQ_LOCK(dfs); + event = STAILQ_FIRST(&(dfs->dfs_eventq)); + if (!event) { + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "no more events space left"); + return; + } + STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + event->re_rssi = e.rssi; + event->re_dur = e.dur; + event->re_full_ts = e.fulltsf; + event->re_ts = (e.rs_tstamp) & DFS_TSMASK; + event->re_chanindex = dfs->dfs_curchan_radindex; + event->re_flags = 0; + + /* Handle chirp flags. */ + if (e.do_check_chirp) { + event->re_flags |= DFS_EVENT_CHECKCHIRP; + if (e.is_hw_chirp) + event->re_flags |= DFS_EVENT_HW_CHIRP; + if (e.is_sw_chirp) + event->re_flags |= DFS_EVENT_SW_CHIRP; + } + + WLAN_ARQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_arq), event, re_list); + WLAN_ARQ_UNLOCK(dfs); + } else { + if ((WLAN_IS_CHAN_DFS(dfs->dfs_curchan) || + ((WLAN_IS_CHAN_11AC_VHT160(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) && + WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan))) || + (dfs_is_precac_timer_running(dfs))) { + + int retval = 0; + + if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "DFS_RADAR_EN not enabled"); + return; + } + + dfs_filter_short_pulses(dfs, &e, &retval); + if (retval) + return; + + if (dfs_is_second_seg_radar_disabled(dfs, e.seg_id)) + return; + + /* Add the event to the list, if there's space. */ + WLAN_DFSEVENTQ_LOCK(dfs); + event = STAILQ_FIRST(&(dfs->dfs_eventq)); + if (!event) { + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "no more events space left"); + return; + } + STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + + dfs->dfs_phyerr_queued_count++; + dfs->dfs_phyerr_w53_counter++; + + event->re_dur = e.dur; + event->re_full_ts = e.fulltsf; + event->re_ts = (e.rs_tstamp) & DFS_TSMASK; + event->re_rssi = e.rssi; + + event->re_seg_id = e.seg_id; + event->re_sidx = e.sidx; + event->re_freq_offset_khz = e.freq_offset_khz; + event->re_peak_mag = e.peak_mag; + event->re_total_gain = e.total_gain; + event->re_mb_gain = e.mb_gain; + event->re_relpwr_db = e.relpwr_db; + event->re_delta_diff = e.pulse_delta_diff; + event->re_delta_peak = e.pulse_delta_peak; + event->re_psidx_diff = e.pulse_psidx_diff; + event->re_flags = 0; + event->re_flags |= DFS_EVENT_VALID_PSIDX_DIFF; + /* Handle chirp flags. */ + if (e.do_check_chirp) { + event->re_flags |= DFS_EVENT_CHECKCHIRP; + if (e.is_hw_chirp) + event->re_flags |= DFS_EVENT_HW_CHIRP; + if (e.is_sw_chirp) + event->re_flags |= DFS_EVENT_SW_CHIRP; + } + + /* Correctly set which channel is being reported on */ + dfs_set_chan_index(dfs, &e, event); + + WLAN_DFSQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list); + WLAN_DFSQ_UNLOCK(dfs); + } + } + + /* + * Schedule the radar/AR task as appropriate. + * XXX isn't a lock needed for wlan_radar_tasksched? + */ + if (!STAILQ_EMPTY(&dfs->dfs_arq)) { + /* XXX shouldn't this be a task/timer too? */ + dfs_process_ar_event(dfs, dfs->dfs_curchan); + } + if (!STAILQ_EMPTY(&dfs->dfs_radarq) && !dfs->wlan_radar_tasksched) { + dfs->wlan_radar_tasksched = 1; + qdf_timer_mod(&dfs->wlan_dfs_task_timer, 0); + } +#undef EXT_CH_RADAR_FOUND +#undef PRI_CH_RADAR_FOUND +#undef EXT_CH_RADAR_EARLY_FOUND +} + +#ifdef QCA_MCL_DFS_SUPPORT +void dfs_process_phyerr_filter_offload(struct wlan_dfs *dfs, + struct radar_event_info *wlan_radar_event) +{ + struct dfs_event *event; + int empty; + int do_check_chirp = 0; + int is_hw_chirp = 0; + int is_sw_chirp = 0; + int is_pri = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (dfs->dfs_ignore_dfs) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, "ignoring dfs"); + return; + } + + if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "DFS_RADAR_EN not set in dfs->dfs_proc_phyerr"); + return; + } + + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Radar already found in the channel, do not queue radar data"); + return; + } + + dfs->wlan_dfs_stats.total_phy_errors++; + if (dfs->dfs_caps.wlan_chip_is_bb_tlv) { + do_check_chirp = 1; + is_pri = 1; + is_hw_chirp = wlan_radar_event->pulse_is_chirp; + + if ((uint32_t) dfs->dfs_phyerr_freq_min > + wlan_radar_event->pulse_center_freq) { + dfs->dfs_phyerr_freq_min = + (int)wlan_radar_event->pulse_center_freq; + } + + if (dfs->dfs_phyerr_freq_max < + (int)wlan_radar_event->pulse_center_freq) { + dfs->dfs_phyerr_freq_max = + (int)wlan_radar_event->pulse_center_freq; + } + } + + /* + * Now, add the parsed, checked and filtered + * radar phyerror event radar pulse event list. + * This event will then be processed by + * dfs_radar_processevent() to see if the pattern + * of pulses in radar pulse list match any radar + * singnature in the current regulatory domain. + */ + + WLAN_DFSEVENTQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_eventq)); + WLAN_DFSEVENTQ_UNLOCK(dfs); + if (empty) + return; + /* + * Add the event to the list, if there's space. + */ + WLAN_DFSEVENTQ_LOCK(dfs); + event = STAILQ_FIRST(&(dfs->dfs_eventq)); + if (!event) { + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "%s: No more space left for queuing DFS Phyerror events", + __func__); + return; + } + STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs->dfs_phyerr_queued_count++; + dfs->dfs_phyerr_w53_counter++; + event->re_dur = (uint8_t) wlan_radar_event->pulse_duration; + event->re_rssi = wlan_radar_event->rssi; + event->re_ts = wlan_radar_event->pulse_detect_ts & DFS_TSMASK; + event->re_full_ts = (((uint64_t) wlan_radar_event->upload_fullts_high) + << 32) | wlan_radar_event->upload_fullts_low; + + /* + * Index of peak magnitude + */ + event->re_sidx = wlan_radar_event->peak_sidx; + event->re_delta_diff = wlan_radar_event->delta_diff; + event->re_delta_peak = wlan_radar_event->delta_peak; + event->re_flags = 0; + if (wlan_radar_event->is_psidx_diff_valid) { + event->re_flags |= DFS_EVENT_VALID_PSIDX_DIFF; + event->re_psidx_diff = wlan_radar_event->psidx_diff; + } + + /* + * Handle chirp flags. + */ + if (do_check_chirp) { + event->re_flags |= DFS_EVENT_CHECKCHIRP; + if (is_hw_chirp) + event->re_flags |= DFS_EVENT_HW_CHIRP; + if (is_sw_chirp) + event->re_flags |= DFS_EVENT_SW_CHIRP; + } + /* + * Correctly set which channel is being reported on + */ + if (is_pri) { + event->re_chanindex = (uint8_t) dfs->dfs_curchan_radindex; + } else { + if (dfs->dfs_extchan_radindex == -1) + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s phyerr on ext channel", __func__); + event->re_chanindex = (uint8_t) dfs->dfs_extchan_radindex; + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s:New extension channel event is added to queue", + __func__); + } + + WLAN_DFSQ_LOCK(dfs); + + STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list); + + empty = STAILQ_EMPTY(&dfs->dfs_radarq); + + WLAN_DFSQ_UNLOCK(dfs); + + if (!empty && !dfs->wlan_radar_tasksched) { + dfs->wlan_radar_tasksched = 1; + qdf_timer_mod(&dfs->wlan_dfs_task_timer, 0); + } +} +#endif + +void dfs_is_radar_enabled(struct wlan_dfs *dfs, int *ignore_dfs) +{ + *ignore_dfs = dfs->dfs_ignore_dfs; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_radarevent.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_radarevent.c new file mode 100644 index 0000000000000000000000000000000000000000..8e287bc4d22e1b0705fe293f847c30e2f2d53754 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_radarevent.c @@ -0,0 +1,1448 @@ +/* + * Copyright (c) 2013, 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This contains the functionality to process the radar event generated + * for a pulse. This will group together pulses and call various detection + * functions to figure out whether a valid radar has been detected. + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_channel.h" +#include "../dfs_internal.h" +#include "../dfs_process_radar_found_ind.h" +#include +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_partial_offload_radar.h" + +#ifdef DFS_FCC_TYPE4_DURATION_CHECK +#define DFS_WAR_30_MHZ_SEPARATION 30 +#define DFS_WAR_PEAK_INDEX_ZERO 0 +#define DFS_TYPE4_WAR_PULSE_DURATION_LOWER_LIMIT 11 +#define DFS_TYPE4_WAR_PULSE_DURATION_UPPER_LIMIT 33 +#define DFS_TYPE4_WAR_PRI_LOWER_LIMIT 200 +#define DFS_TYPE4_WAR_PRI_UPPER_LIMIT 500 +#define DFS_TYPE4_WAR_VALID_PULSE_DURATION 12 +#endif + +#define FREQ_5500_MHZ 5500 +#define FREQ_5500_MHZ 5500 + +#define DFS_MAX_FREQ_SPREAD (1375 * 1) +#define DFS_LARGE_PRI_MULTIPLIER 4 +#define DFS_W53_DEFAULT_PRI_MULTIPLIER 2 +#define DFS_INVALID_PRI_LIMIT 100 /* should we use 135? */ +#define DFS_BIG_SIDX 10000 + +#define FRAC_PRI_SCORE_ARRAY_SIZE 40 + +static char debug_dup[33]; +static int debug_dup_cnt; + +/** + * dfs_process_pulse_dur() - Process pulse duration. + * @dfs: Pointer to wlan_dfs structure. + * @re_dur: Duration. + * + * Convert the hardware provided duration to TSF ticks (usecs) taking the clock + * (fast or normal) into account. Legacy (pre-11n, Owl, Sowl, Howl) operate + * 5GHz using a 40MHz clock. Later 11n chips (Merlin, Osprey, etc) operate + * 5GHz using a 44MHz clock, so the reported pulse durations are different. + * Peregrine reports the pulse duration in microseconds regardless of the + * operating mode. (XXX TODO: verify this, obviously.) + * + * The hardware returns the duration in a variety of formats, + * so it's converted from the hardware format to TSF (usec) + * values here. + * XXX TODO: this should really be done when the PHY error + * is processed, rather than way out here.. + * + * + * Return: Returns the duration. + */ +static inline uint8_t dfs_process_pulse_dur(struct wlan_dfs *dfs, + uint8_t re_dur) +{ + /* + * Short pulses are sometimes returned as having a duration of 0, + * so round those up to 1. + * XXX This holds true for BB TLV chips too, right? + */ + if (re_dur == 0) + return 1; + + /* + * For BB TLV chips, the hardware always returns microsecond pulse + * durations. + */ + if (dfs->dfs_caps.wlan_chip_is_bb_tlv) + return re_dur; + + /* + * This is for 11n and legacy chips, which may or may not use the 5GHz + * fast clock mode. + */ + /* Convert 0.8us durations to TSF ticks (usecs) */ + return (uint8_t)dfs_round((int32_t)((dfs->dur_multiplier)*re_dur)); +} + +#ifdef DFS_FCC_TYPE4_DURATION_CHECK +/* + * dfs_dur_check() - Modify the pulse duration for FCC Type 4 and JAPAN W56 + * Type 8 radar pulses when the conditions mentioned in the + * function body are reported in the radar summary report. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * @re: Pointer to dfs_event. + * @diff_ts: timestamp of current pulse - timestamp of last pulse. + * + * return: Void + */ +static inline void dfs_dur_check( + struct wlan_dfs *dfs, + struct dfs_channel *chan, + struct dfs_event *re, + uint32_t diff_ts) +{ + if ((dfs->dfsdomain == DFS_FCC_DOMAIN || + dfs->dfsdomain == DFS_MKK4_DOMAIN) && + ((chan->dfs_ch_flags & WLAN_CHAN_VHT80) == WLAN_CHAN_VHT80) && + (DFS_DIFF(chan->dfs_ch_freq, chan->dfs_ch_mhz_freq_seg1) == + DFS_WAR_30_MHZ_SEPARATION) && + re->re_sidx == DFS_WAR_PEAK_INDEX_ZERO && + (re->re_dur > DFS_TYPE4_WAR_PULSE_DURATION_LOWER_LIMIT && + re->re_dur < DFS_TYPE4_WAR_PULSE_DURATION_UPPER_LIMIT) && + (diff_ts > DFS_TYPE4_WAR_PRI_LOWER_LIMIT && + diff_ts < DFS_TYPE4_WAR_PRI_UPPER_LIMIT)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "chan flags=%llu, Pri Chan %d MHz center %d MHZ", + chan->dfs_ch_flags, + chan->dfs_ch_freq, chan->dfs_ch_mhz_freq_seg1); + + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Report Peak Index = %d,re.re_dur = %d,diff_ts = %d", + re->re_sidx, re->re_dur, diff_ts); + + re->re_dur = DFS_TYPE4_WAR_VALID_PULSE_DURATION; + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Modifying the pulse duration to %d", re->re_dur); + } +} +#else +static inline void dfs_dur_check( + struct wlan_dfs *dfs, + struct dfs_channel *chan, + struct dfs_event *re, + uint32_t diff_ts) +{ +} +#endif + +/* + * dfs_print_radar_events() - Prints the Radar events. + * @dfs: Pointer to wlan_dfs structure. + */ +static void dfs_print_radar_events(struct wlan_dfs *dfs) +{ + int i; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "#Phyerr=%d, #false detect=%d, #queued=%d", + dfs->dfs_phyerr_count, dfs->dfs_phyerr_reject_count, + dfs->dfs_phyerr_queued_count); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_phyerr_freq_min=%d, dfs_phyerr_freq_max=%d", + dfs->dfs_phyerr_freq_min, dfs->dfs_phyerr_freq_max); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Total radar events detected=%d, entries in the radar queue follows:", + dfs->dfs_event_log_count); + + for (i = 0; (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count); + i++) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "ts=%llu diff_ts=%u rssi=%u dur=%u, is_chirp=%d, seg_id=%d, sidx=%d, freq_offset=%d.%dMHz, peak_mag=%d, total_gain=%d, mb_gain=%d, relpwr_db=%d, delta_diff=%d, delta_peak=%d, psidx_diff=%d", + dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, + dfs->radar_log[i].rssi, dfs->radar_log[i].dur, + dfs->radar_log[i].is_chirp, dfs->radar_log[i].seg_id, + dfs->radar_log[i].sidx, + (int)dfs->radar_log[i].freq_offset_khz/1000, + (int)abs(dfs->radar_log[i].freq_offset_khz)%1000, + dfs->radar_log[i].peak_mag, + dfs->radar_log[i].total_gain, + dfs->radar_log[i].mb_gain, + dfs->radar_log[i].relpwr_db, + dfs->radar_log[i].delta_diff, + dfs->radar_log[i].delta_peak, + dfs->radar_log[i].psidx_diff); + } + dfs->dfs_event_log_count = 0; + dfs->dfs_phyerr_count = 0; + dfs->dfs_phyerr_reject_count = 0; + dfs->dfs_phyerr_queued_count = 0; + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; +} + +/** + * dfs_confirm_radar() - This function checks for fractional PRI and jitter in + * sidx index to determine if the radar is real or not. + * @dfs: Pointer to dfs structure. + * @rf: Pointer to dfs_filter structure. + * @ext_chan_flag: ext chan flags. + */ +static int dfs_confirm_radar(struct wlan_dfs *dfs, + struct dfs_filter *rf, + int ext_chan_flag) +{ + int i = 0; + int index; + struct dfs_delayline *dl = &rf->rf_dl; + struct dfs_delayelem *de; + uint64_t target_ts = 0; + struct dfs_pulseline *pl; + int start_index = 0, current_index, next_index; + unsigned char scores[FRAC_PRI_SCORE_ARRAY_SIZE]; + uint32_t pri_margin; + uint64_t this_diff_ts; + uint32_t search_bin; + + unsigned char max_score = 0; + int max_score_index = 0; + + pl = dfs->pulses; + + OS_MEMZERO(scores, sizeof(scores)); + scores[0] = rf->rf_threshold; + + pri_margin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + /* + * Look for the entry that matches dl_seq_num_second. + * we need the time stamp and diff_ts from there. + */ + + for (i = 0; i < dl->dl_numelems; i++) { + index = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + de = &dl->dl_elems[index]; + if (dl->dl_seq_num_second == de->de_seq_num) + target_ts = de->de_ts - de->de_time; + } + + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS2) { + dfs_print_delayline(dfs, &rf->rf_dl); + + /* print pulse line */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "%s: Pulse Line\n", __func__); + for (i = 0; i < pl->pl_numelems; i++) { + index = (pl->pl_firstelem + i) & + DFS_MAX_PULSE_BUFFER_MASK; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Elem %u: ts=%llu dur=%u, seq_num=%d, delta_peak=%d, psidx_diff=%d\n", + i, pl->pl_elems[index].p_time, + pl->pl_elems[index].p_dur, + pl->pl_elems[index].p_seq_num, + pl->pl_elems[index].p_delta_peak, + pl->pl_elems[index].p_psidx_diff); + } + } + + /* + * Walk through the pulse line and find pulse with target_ts. + * Then continue until we find entry with seq_number dl_seq_num_stop. + */ + + for (i = 0; i < pl->pl_numelems; i++) { + index = (pl->pl_firstelem + i) & DFS_MAX_PULSE_BUFFER_MASK; + if (pl->pl_elems[index].p_time == target_ts) { + dl->dl_seq_num_start = pl->pl_elems[index].p_seq_num; + start_index = index; /* save for future use */ + } + } + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "%s: target_ts=%llu, dl_seq_num_start=%d, dl_seq_num_second=%d, dl_seq_num_stop=%d\n", + __func__, target_ts, dl->dl_seq_num_start, + dl->dl_seq_num_second, dl->dl_seq_num_stop); + + current_index = start_index; + while (pl->pl_elems[current_index].p_seq_num < dl->dl_seq_num_stop) { + next_index = (current_index + 1) & DFS_MAX_PULSE_BUFFER_MASK; + this_diff_ts = pl->pl_elems[next_index].p_time - + pl->pl_elems[current_index].p_time; + + /* Now update the score for this diff_ts */ + for (i = 1; i < FRAC_PRI_SCORE_ARRAY_SIZE; i++) { + search_bin = dl->dl_search_pri / (i + 1); + + /* + * We do not give score to PRI that is lower then the + * limit. + */ + if (search_bin < DFS_INVALID_PRI_LIMIT) + break; + + /* + * Increment the score if this_diff_ts belongs to this + * search_bin +/- margin. + */ + if ((this_diff_ts >= (search_bin - pri_margin)) && + (this_diff_ts <= + (search_bin + pri_margin))) { + /*increment score */ + scores[i]++; + } + } + current_index = next_index; + } + + for (i = 0; i < FRAC_PRI_SCORE_ARRAY_SIZE; i++) + if (scores[i] > max_score) { + max_score = scores[i]; + max_score_index = i; + } + + if (max_score_index != 0) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Rejecting Radar since Fractional PRI detected: searchpri=%d, threshold=%d, fractional PRI=%d, Fractional PRI score=%d", + dl->dl_search_pri, scores[0], + dl->dl_search_pri/(max_score_index + 1), + max_score); + return 0; + } + + + /* Check for frequency spread */ + if (dl->dl_min_sidx > pl->pl_elems[start_index].p_sidx) + dl->dl_min_sidx = pl->pl_elems[start_index].p_sidx; + + if (dl->dl_max_sidx < pl->pl_elems[start_index].p_sidx) + dl->dl_max_sidx = pl->pl_elems[start_index].p_sidx; + + if ((dl->dl_max_sidx - dl->dl_min_sidx) > rf->rf_sidx_spread) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Rejecting Radar since frequency spread is too large : min_sidx=%d, max_sidx=%d, rf_sidx_spread=%d", + dl->dl_min_sidx, dl->dl_max_sidx, + rf->rf_sidx_spread); + return 0; + } + + if ((rf->rf_check_delta_peak) && + ((dl->dl_delta_peak_match_count + + dl->dl_psidx_diff_match_count - 1) < + rf->rf_threshold)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Rejecting Radar since delta peak values are invalid : dl_delta_peak_match_count=%d, dl_psidx_diff_match_count=%d, rf_threshold=%d", + dl->dl_delta_peak_match_count, + dl->dl_psidx_diff_match_count, + rf->rf_threshold); + return 0; + } + dfs_debug(dfs, WLAN_DEBUG_DFS_FALSE_DET, "%s : dl->dl_min_sidx: %d , dl->dl_max_sidx :%d", + __func__, dl->dl_min_sidx, dl->dl_max_sidx); + + dfs->dfs_freq_offset = DFS_SIDX_TO_FREQ_OFFSET((dl->dl_min_sidx + + dl->dl_max_sidx) / 2); + return 1; +} + +/* + * dfs_reject_on_pri() - Rejecting on individual filter based on min PRI . + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @deltaT: deltaT value. + * @this_ts: Timestamp. + */ +static inline bool dfs_reject_on_pri( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint64_t deltaT, + uint64_t this_ts) +{ + if ((deltaT < rf->rf_minpri) && (deltaT != 0)) { + /* Second line of PRI filtering. */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filterID %d : Rejecting on individual filter min PRI deltaT=%lld rf->rf_minpri=%u", + rf->rf_pulseid, (uint64_t)deltaT, + rf->rf_minpri); + return 1; + } + + if (rf->rf_ignore_pri_window > 0) { + if (deltaT < rf->rf_minpri) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filterID %d : Rejecting on individual filter max PRI deltaT=%lld rf->rf_minpri=%u", + rf->rf_pulseid, (uint64_t)deltaT, + rf->rf_minpri); + /* But update the last time stamp. */ + rf->rf_dl.dl_last_ts = this_ts; + return 1; + } + } else { + /* + * The HW may miss some pulses especially with + * high channel loading. This is true for Japan + * W53 where channel loaoding is 50%. Also for + * ETSI where channel loading is 30% this can + * be an issue too. To take care of missing + * pulses, we introduce pri_margin multiplie. + * This is normally 2 but can be higher for W53. + */ + + if ((deltaT > (dfs->dfs_pri_multiplier * rf->rf_maxpri)) || + (deltaT < rf->rf_minpri)) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filterID %d : Rejecting on individual filter max PRI deltaT=%lld rf->rf_minpri=%u", + rf->rf_pulseid, (uint64_t) deltaT, + rf->rf_minpri); + /* But update the last time stamp. */ + rf->rf_dl.dl_last_ts = this_ts; + return 1; + } + } + + return 0; +} + +/** + * dfs_confirm_radar_check() - Do additioal check to conirm radar except for + * the staggered, chirp FCC Bin 5, frequency hopping indicated by + * rf_patterntype == 1. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @ext_chan_event_flag: Extension channel event flag + * @found: Pointer to radar found flag (return value). + * @false_radar_found: Pointer to false radar found (return value). + */ + +static inline void dfs_confirm_radar_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + int ext_chan_event_flag, + int *found, + int *false_radar_found) +{ + if (rf->rf_patterntype != 1) { + *found = dfs_confirm_radar(dfs, rf, ext_chan_event_flag); + *false_radar_found = (*found == 1) ? 0 : 1; + } +} + +void __dfs_process_radarevent(struct wlan_dfs *dfs, + struct dfs_filtertype *ft, + struct dfs_event *re, + uint64_t this_ts, + int *found, + int *false_radar_found) +{ + int p; + uint64_t deltaT = 0; + int ext_chan_event_flag = 0; + struct dfs_filter *rf = NULL; + int8_t ori_rf_check_delta_peak = 0; + + for (p = 0, *found = 0; (p < ft->ft_numfilters) && + (!(*found)) && !(*false_radar_found); p++) { + rf = ft->ft_filters[p]; + if ((re->re_dur >= rf->rf_mindur) && + (re->re_dur <= rf->rf_maxdur)) { + /* The above check is probably not necessary. */ + deltaT = (this_ts < rf->rf_dl.dl_last_ts) ? + (int64_t)((DFS_TSF_WRAP - rf->rf_dl.dl_last_ts) + + this_ts + 1) : + this_ts - rf->rf_dl.dl_last_ts; + + if (dfs_reject_on_pri(dfs, rf, deltaT, this_ts)) + continue; + + dfs_add_pulse(dfs, rf, re, deltaT, this_ts); + + /* + * If this is an extension channel event, flag it for + * false alarm reduction. + */ + if (re->re_chanindex == dfs->dfs_extchan_radindex) + ext_chan_event_flag = 1; + + if (rf->rf_patterntype == 2) { + *found = dfs_staggered_check(dfs, rf, + (uint32_t) deltaT, re->re_dur); + } else { + *found = dfs_bin_check(dfs, rf, + (uint32_t) deltaT, re->re_dur, + ext_chan_event_flag); + + if (*found && + (utils_get_dfsdomain(dfs->dfs_pdev_obj) != + DFS_CN_DOMAIN)) { + ori_rf_check_delta_peak = + rf->rf_check_delta_peak; + /* + * If FW does not send valid psidx_diff + * Do not do chirp check. + */ + if (rf->rf_check_delta_peak && + (!(re->re_flags & + DFS_EVENT_VALID_PSIDX_DIFF))) + rf->rf_check_delta_peak = false; + dfs_confirm_radar_check(dfs, + rf, ext_chan_event_flag, + found, + false_radar_found); + rf->rf_check_delta_peak = + ori_rf_check_delta_peak; + } + } + + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS2) + if (rf->rf_patterntype != + WLAN_DFS_RF_PATTERN_TYPE_1) + dfs_print_delayline(dfs, &rf->rf_dl); + + rf->rf_dl.dl_last_ts = this_ts; + } + } + + if (*found) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Found on channel minDur = %d, filterId = %d", + ft->ft_mindur, + rf ? rf->rf_pulseid : -1); + } + + return; +} + +/** + * dfs_cal_average_radar_parameters() - Calculate the average radar parameters. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +static void dfs_cal_average_radar_parameters(struct wlan_dfs *dfs) +{ + int i, count = 0; + u_int32_t total_pri = 0; + u_int32_t total_duration = 0; + u_int32_t total_sidx = 0; + + /* Calculating average PRI, Duration, SIDX from + * the 2nd pulse, ignoring the 1st pulse (radar_log[0]). + * This is because for the first pulse, the diff_ts will be + * (0 - current_ts) which will be a huge value. + * Average PRI computation will be wrong. FW returns a + * failure test result as PRI does not match their expected + * value. + */ + + for (i = 1; (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count); + i++) { + total_pri += dfs->radar_log[i].diff_ts; + total_duration += dfs->radar_log[i].dur; + total_sidx += dfs->radar_log[i].sidx; + count++; + } + + if (count > 0) { + dfs->dfs_average_pri = total_pri / count; + dfs->dfs_average_duration = total_duration / count; + dfs->dfs_average_sidx = total_sidx / count; + + dfs_info(dfs, WLAN_DEBUG_DFS2, + "Avg.PRI =%u, Avg.duration =%u Avg.sidx =%u", + dfs->dfs_average_pri, + dfs->dfs_average_duration, + dfs->dfs_average_sidx); + } +} +#else +static void dfs_cal_average_radar_parameters(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_radarfound_reset_vars() - Reset dfs variables after radar found + * @dfs: Pointer to wlan_dfs structure. + * @rs: Pointer to dfs_state. + * @chan: Current channel. + * @seg_id: Segment id. + */ +static inline void dfs_radarfound_reset_vars( + struct wlan_dfs *dfs, + struct dfs_state *rs, + struct dfs_channel *chan, + uint8_t seg_id) +{ + struct dfs_channel *thischan; + + /* + * TODO: Instead of discarding the radar, create a workqueue + * if the channel change is happenning through userspace and + * process the radar event once the channel change is completed. + */ + + /* Collect stats */ + dfs->wlan_dfs_stats.num_radar_detects++; + thischan = &rs->rs_chan; + if ((seg_id == SEG_ID_SECONDARY) && + (dfs_is_precac_timer_running(dfs))) + dfs->is_radar_during_precac = 1; + + /* + * If event log is on then dump the radar event queue on + * filter match. This can be used to collect information + * on false radar detection. + */ + if (dfs->dfs_event_log_on) { + dfs_cal_average_radar_parameters(dfs); + dfs_print_radar_events(dfs); + } + + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Primary channel freq = %u flags=0x%x", + chan->dfs_ch_freq, chan->dfs_ch_flagext); + + if (chan->dfs_ch_freq != thischan->dfs_ch_freq) + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Ext channel freq = %u flags=0x%x", + thischan->dfs_ch_freq, + thischan->dfs_ch_flagext); + + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + dfs->dfs_phyerr_w53_counter = 0; + + if (seg_id == SEG_ID_SECONDARY) { + dfs->wlan_dfs_stats.num_seg_two_radar_detects++; + dfs->is_radar_found_on_secondary_seg = 1; + } +} + +/* + * dfs_print_radar_found_freq() - Print radar found frequency. + * @dfs: Pointer to wlan_dfs. + */ +#ifdef CONFIG_CHAN_FREQ_API +static void dfs_print_radar_found_freq(struct wlan_dfs *dfs) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "bangradar on 2nd segment cfreq = %u", + dfs->dfs_precac_secondary_freq_mhz); +} +#else +#ifdef CONFIG_CHAN_NUM_API +static void dfs_print_radar_found_freq(struct wlan_dfs *dfs) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "bangradar on 2nd segment cfreq = %u", + dfs->dfs_precac_secondary_freq); +} +#endif +#endif + +/** + * dfs_handle_bangradar - Handle the case of bangradar + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * @rs: Pointer to dfs_state. + * Return: if bangradar then return 1. Otherwise, return 0. + */ +static inline int dfs_handle_bangradar( + struct wlan_dfs *dfs, + struct dfs_channel *chan, + struct dfs_state **rs, + uint8_t *seg_id, + int *retval) +{ + + if (dfs->dfs_bangradar_type) { + if (dfs->dfs_bangradar_type >= DFS_INVALID_BANGRADAR_TYPE) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Invalid bangradar type"); + return 1; + } + /* All bangradars are processed similarly. + * arguments for the bangradar are already stored in + * respective dfs structures. + */ + + *rs = &dfs->dfs_radar[dfs->dfs_curchan_radindex]; + if (dfs->dfs_seg_id == SEG_ID_SECONDARY) { + if (dfs_is_precac_timer_running(dfs) || + WLAN_IS_CHAN_11AC_VHT160(chan) || + WLAN_IS_CHAN_11AC_VHT80_80(chan)) { + dfs->is_radar_found_on_secondary_seg = 1; + dfs_print_radar_found_freq(dfs); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "No second segment"); + return 1; + } + } + *seg_id = dfs->dfs_seg_id; + dfs_debug(dfs, WLAN_DEBUG_DFS, "bangradar %d", + dfs->dfs_bangradar_type); + *retval = 1; + return 1; + } + return 0; +} + +/** + * dfs_process_w53_pulses() - Prrocess w53 pulses + * @dfs: Pointer to wlan_dfs structure. + * + * For chips that support frequency information, we can relax PRI + * restriction if the frequency spread is narrow. + */ +static inline void dfs_process_w53_pulses( + struct wlan_dfs *dfs) +{ + if ((dfs->dfs_phyerr_freq_max - dfs->dfs_phyerr_freq_min) < + DFS_MAX_FREQ_SPREAD) + dfs->dfs_pri_multiplier = DFS_LARGE_PRI_MULTIPLIER; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "w53_counter=%d, freq_max=%d, freq_min=%d, pri_multiplier=%d", + dfs->dfs_phyerr_w53_counter, + dfs->dfs_phyerr_freq_max, dfs->dfs_phyerr_freq_min, + dfs->dfs_pri_multiplier); + + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; +} + +/** + * dfs_handle_missing_pulses - Handle the case of missing pulses + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * + * The HW may miss some pulses especially with high channel loading. + * This is true for Japan W53 where channel loaoding is 50%. Also + * for ETSI where channel loading is 30% this can be an issue too. + * To take care of missing pulses, we introduce pri_margin multiplie. + * This is normally 2 but can be higher for W53. + * Return: If not enough pulses return 0. Otherwise, return 1. + */ +static inline int dfs_handle_missing_pulses( + struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + if ((dfs->dfsdomain == DFS_MKK4_DOMAIN) && + (dfs->dfs_caps.wlan_chip_is_bb_tlv) && + (chan->dfs_ch_freq < FREQ_5500_MHZ)) { + dfs->dfs_pri_multiplier = DFS_W53_DEFAULT_PRI_MULTIPLIER; + /* + * Do not process W53 pulses unless we have a minimum number + * of them. + */ + if (dfs->dfs_phyerr_w53_counter >= 5) + dfs_process_w53_pulses(dfs); + else + return 0; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS1, "pri_multiplier=%d", + dfs->dfs_pri_multiplier); + + return 1; +} + +/** + * dfs_is_radarq_empty - check if radarq is empty + * @dfs: Pointer to wlan_dfs structure. + * @empty: Pointer to empty + */ +static inline void dfs_is_radarq_empty( + struct wlan_dfs *dfs, + int *empty) +{ + WLAN_DFSQ_LOCK(dfs); + *empty = STAILQ_EMPTY(&(dfs->dfs_radarq)); + WLAN_DFSQ_UNLOCK(dfs); +} + +/** + * dfs_remove_event_from_radarq - remove event from radarq + * @dfs: Pointer to wlan_dfs structure. + * @event: Double pointer to the event structure + */ +static inline void dfs_remove_event_from_radarq( + struct wlan_dfs *dfs, + struct dfs_event **event) +{ + WLAN_DFSQ_LOCK(dfs); + *event = STAILQ_FIRST(&(dfs->dfs_radarq)); + if (*event) + STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list); + WLAN_DFSQ_UNLOCK(dfs); +} + +/** + * dfs_return_event_to_eventq - return event to eventq + * @dfs: Pointer to wlan_dfs structure. + * @event: Pointer to the event structure + */ +static inline void dfs_return_event_to_eventq( + struct wlan_dfs *dfs, + struct dfs_event *event) +{ + qdf_mem_zero(event, sizeof(struct dfs_event)); + WLAN_DFSEVENTQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); +} + +/** + * dfs_log_event - log dfs event + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to dfs_event re + * @this_ts: Current time stamp 64bit + * @diff_ts: Difference between 2 timestamps 32bit + * @index: Index value. + */ +static inline void dfs_log_event( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t this_ts, + uint32_t diff_ts, + uint32_t index) +{ + uint8_t i; + struct dfs_pulseline *pl = dfs->pulses; + + if (dfs->dfs_event_log_on) { + i = dfs->dfs_event_log_count % DFS_EVENT_LOG_SIZE; + dfs->radar_log[i].ts = this_ts; + dfs->radar_log[i].diff_ts = diff_ts; + dfs->radar_log[i].rssi = (*re).re_rssi; + dfs->radar_log[i].dur = (*re).re_dur; + dfs->radar_log[i].seg_id = (*re).re_seg_id; + dfs->radar_log[i].sidx = (*re).re_sidx; + dfs->radar_log[i].freq_offset_khz = + (*re).re_freq_offset_khz; + dfs->radar_log[i].peak_mag = (*re).re_peak_mag; + dfs->radar_log[i].total_gain = (*re).re_total_gain; + dfs->radar_log[i].mb_gain = (*re).re_mb_gain; + dfs->radar_log[i].relpwr_db = (*re).re_relpwr_db; + dfs->radar_log[i].delta_diff = (*re).re_delta_diff; + dfs->radar_log[i].delta_peak = (*re).re_delta_peak; + dfs->radar_log[i].psidx_diff = (*re).re_psidx_diff; + dfs->radar_log[i].is_chirp = DFS_EVENT_NOTCHIRP(re) ? + 0 : 1; + dfs->dfs_event_log_count++; + } + + dfs->dfs_seq_num++; + pl->pl_elems[index].p_seq_num = dfs->dfs_seq_num; +} + +/** + * dfs_check_if_nonbin5 - Check if radar, other than bin5, is found + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re (radar event) + * @rs: Double Pointer to rs (radar state) + * @this_ts: Current time stamp 64bit + * @diff_ts: Difference between 2 timestamps 32bit + * @found: Pointer to found. If radar found or not. + * @retval: Pointer to retval(return value). + * @false_radar_found: Pointer to false_radar_found(return value). + */ +static inline void dfs_check_if_nonbin5( + struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_state **rs, + uint64_t this_ts, + uint32_t diff_ts, + int *found, + int *retval, + int *false_radar_found) +{ + + uint32_t tabledepth = 0; + struct dfs_filtertype *ft; + uint64_t deltaT; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + " *** chan freq (%d): ts %llu dur %u rssi %u", + (*rs)->rs_chan.dfs_ch_freq, (uint64_t)this_ts, + (*re).re_dur, (*re).re_rssi); + + while ((tabledepth < DFS_MAX_RADAR_OVERLAP) && + ((dfs->dfs_ftindextable[(*re).re_dur])[tabledepth] != + -1) && (!*retval) && !(*false_radar_found)) { + ft = dfs->dfs_radarf[((dfs->dfs_ftindextable[(*re).re_dur]) + [tabledepth])]; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + " ** RD (%d): ts %x dur %u rssi %u", + (*rs)->rs_chan.dfs_ch_freq, (*re).re_ts, + (*re).re_dur, (*re).re_rssi); + + if ((*re).re_rssi < ft->ft_rssithresh && + (*re).re_dur > MAX_DUR_FOR_LOW_RSSI) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Rejecting on rssi rssi=%u thresh=%u", + (*re).re_rssi, + ft->ft_rssithresh); + tabledepth++; + continue; + } + deltaT = this_ts - ft->ft_last_ts; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "deltaT = %lld (ts: 0x%llx) (last ts: 0x%llx)", + (uint64_t)deltaT, (uint64_t)this_ts, + (uint64_t)ft->ft_last_ts); + + if ((deltaT < ft->ft_minpri) && (deltaT != 0)) { + /* + * This check is for the whole filter type. + * Individual filters will check this again. + * This is first line of filtering. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Rejecting on pri pri=%lld minpri=%u", + (uint64_t)deltaT, ft->ft_minpri); + tabledepth++; + continue; + } + + __dfs_process_radarevent(dfs, ft, re, this_ts, found, + false_radar_found); + + ft->ft_last_ts = this_ts; + *retval |= *found; + tabledepth++; + } +} + +/** + * dfs_check_each_b5radar() - Check each bin5 radar + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @br: Pointer to dfs_bin5radars structure. + * @this_ts: Current time stamp 64bit. + * @diff_ts: Difference between 2 timestamps 32bit. + * @found: Pointer to found. If radar found or not. + */ +static inline void dfs_check_each_b5radar( + struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_bin5radars *br, + uint64_t this_ts, + uint32_t diff_ts, + int *found) +{ + if (dfs_bin5_check_pulse(dfs, re, br)) { + /* + * This is a valid Bin5 pulse, check if it belongs to a + * burst. + */ + (*re).re_dur = dfs_retain_bin5_burst_pattern(dfs, diff_ts, + (*re).re_dur); + /* + * Remember our computed duration for the next pulse in the + * burst (if needed). + */ + dfs->dfs_rinfo.dfs_bin5_chirp_ts = this_ts; + dfs->dfs_rinfo.dfs_last_bin5_dur = (*re).re_dur; + + if (dfs_bin5_addpulse(dfs, br, re, this_ts)) + *found |= dfs_bin5_check(dfs); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_PULSE, + "not a BIN5 pulse (dur=%d)", (*re).re_dur); + } +} + +/** + * dfs_check_if_bin5() - Check if bin5 radar is found + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @this_ts: Current time stamp 64bit. + * @diff_ts: Difference between 2 timestamps 32bit. + * @found: Pointer to found. If radar found or not. + */ +static inline void dfs_check_if_bin5( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t this_ts, + uint32_t diff_ts, + int *found) +{ + int p; + + /* BIN5 pulses are FCC and Japan specific. */ + if ((dfs->dfsdomain == DFS_FCC_DOMAIN) || + (dfs->dfsdomain == DFS_MKK4_DOMAIN)) { + for (p = 0; (p < dfs->dfs_rinfo.rn_numbin5radars) && (!*found); + p++) { + struct dfs_bin5radars *br; + + br = &(dfs->dfs_b5radars[p]); + dfs_check_each_b5radar(dfs, re, br, this_ts, diff_ts, + found); + } + } + + if (*found) + dfs_debug(dfs, WLAN_DEBUG_DFS, "Found bin5 radar"); +} + +/** + * dfs_skip_the_event() - Skip the Radar event + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @rs: Pointer to dfs_state. + */ +static inline bool dfs_skip_the_event( + struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_state **rs) +{ + if ((*re).re_chanindex < DFS_NUM_RADAR_STATES) + (*rs) = &dfs->dfs_radar[(*re).re_chanindex]; + else + return 1; + + if ((*rs)->rs_chan.dfs_ch_flagext & CHANNEL_INTERFERENCE) + return 1; + + return 0; +} + +/** + * dfs_check_ts_wrap() - dfs check for timestamp wrap. + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @deltafull_ts: Deltafull ts. + * + * Return: Deltafull ts. + */ +static inline uint64_t dfs_check_ts_wrap( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t deltafull_ts) +{ + if (deltafull_ts > + ((uint64_t)((DFS_TSMASK - + dfs->dfs_rinfo.rn_last_ts) + + 1 + (*re).re_ts))) + deltafull_ts -= + (DFS_TSMASK - dfs->dfs_rinfo.rn_last_ts) + + 1 + (*re).re_ts; + + return deltafull_ts; +} + +/** + * dfs_calculate_ts_prefix() - Calculate deltafull ts value. + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + */ +static inline void dfs_calculate_ts_prefix( + struct wlan_dfs *dfs, + struct dfs_event *re) +{ + uint64_t deltafull_ts; + + if ((*re).re_ts <= dfs->dfs_rinfo.rn_last_ts) { + dfs->dfs_rinfo.rn_ts_prefix += (((uint64_t) 1) << DFS_TSSHIFT); + /* Now, see if it's been more than 1 wrap */ + deltafull_ts = (*re).re_full_ts - dfs->dfs_rinfo.rn_lastfull_ts; + deltafull_ts = dfs_check_ts_wrap(dfs, re, deltafull_ts); + deltafull_ts >>= DFS_TSSHIFT; + + if (deltafull_ts > 1) + dfs->dfs_rinfo.rn_ts_prefix += + ((deltafull_ts - 1) << DFS_TSSHIFT); + } else { + deltafull_ts = (*re).re_full_ts - + dfs->dfs_rinfo.rn_lastfull_ts; + if (deltafull_ts > (uint64_t) DFS_TSMASK) { + deltafull_ts >>= DFS_TSSHIFT; + dfs->dfs_rinfo.rn_ts_prefix += + ((deltafull_ts - 1) << DFS_TSSHIFT); + } + } +} + +/** + * dfs_calculate_timestamps() - Calculate various timestamps + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event) + * @this_ts : Pointer to this_ts (this timestamp) + */ + +static inline void dfs_calculate_timestamps( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t *this_ts) +{ + if (dfs->dfs_rinfo.rn_lastfull_ts == 0) { + /* + * Either not started, or 64-bit rollover exactly to + * zero Just prepend zeros to the 15-bit ts. + */ + dfs->dfs_rinfo.rn_ts_prefix = 0; + } else { + /* WAR 23031- patch duplicate ts on very short pulses. + * This pacth has two problems in linux environment. + * 1)The time stamp created and hence PRI depends + * entirely on the latency. If the latency is high, it + * possibly can split two consecutive pulses in the + * same burst so far away (the same amount of latency) + * that make them look like they are from differenct + * bursts. It is observed to happen too often. It sure + * makes the detection fail. + * 2)Even if the latency is not that bad, it simply + * shifts the duplicate timestamps to a new duplicate + * timestamp based on how they are processed. + * This is not worse but not good either. + * Take this pulse as a good one and create a probable + * PRI later. + */ + if ((*re).re_dur == 0 && (*re).re_ts == + dfs->dfs_rinfo.rn_last_unique_ts) { + debug_dup[debug_dup_cnt++] = '1'; + dfs_debug(dfs, WLAN_DEBUG_DFS1, "deltaT is 0"); + } else { + dfs->dfs_rinfo.rn_last_unique_ts = (*re).re_ts; + debug_dup[debug_dup_cnt++] = '0'; + } + + if (debug_dup_cnt >= 32) + debug_dup_cnt = 0; + + dfs_calculate_ts_prefix(dfs, re); + } + + /* + * At this stage rn_ts_prefix has either been blanked or + * calculated, so it's safe to use. + */ + *this_ts = dfs->dfs_rinfo.rn_ts_prefix | ((uint64_t) (*re).re_ts); + dfs->dfs_rinfo.rn_lastfull_ts = (*re).re_full_ts; + dfs->dfs_rinfo.rn_last_ts = (*re).re_ts; +} + +/** + * dfs_add_to_pulseline - Extract necessary items from dfs_event and + * add it as pulse in the pulseline + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event) + * @this_ts: Pointer to this_ts (this timestamp) + * @diff_ts: Diff ts. + * @index: Pointer to get index value. + */ +static inline void dfs_add_to_pulseline( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t *this_ts, + uint32_t *test_ts, + uint32_t *diff_ts, + uint32_t *index) +{ + struct dfs_pulseline *pl; + + /* + * Calculate the start of the radar pulse. + * + * The TSF is stamped by the MAC upon reception of the event, + * which is (typically?) at the end of the event. But the + * pattern matching code expects the event timestamps to be at + * the start of the event. So to fake it, we subtract the pulse + * duration from the given TSF. This is done after the 64-bit + * timestamp has been calculated so long pulses correctly + * under-wrap the counter. Ie, if this was done on the 32 + * (or 15!) bit TSF when the TSF value is closed to 0, it will + * underflow to 0xfffffXX, which would mess up the logical "OR" + * operation done above. + * This isn't valid for Peregrine as the hardware gives us the + * actual TSF offset of the radar event, not just the MAC TSF + * of the completed receive. + * + * XXX TODO: ensure that the TLV PHY error processing code will + * correctly calculate the TSF to be the start of the radar + * pulse. + * + * XXX TODO TODO: modify the TLV parsing code to subtract the + * duration from the TSF, based on the current fast clock value. + */ + if ((!dfs->dfs_caps.wlan_chip_is_bb_tlv) && (*re).re_dur != 1) + *this_ts -= (*re).re_dur; + + pl = dfs->pulses; + /* Save the pulse parameters in the pulse buffer(pulse line). */ + *index = (pl->pl_lastelem + 1) & DFS_MAX_PULSE_BUFFER_MASK; + + if (pl->pl_numelems == DFS_MAX_PULSE_BUFFER_SIZE) + pl->pl_firstelem = (pl->pl_firstelem+1) & + DFS_MAX_PULSE_BUFFER_MASK; + else + pl->pl_numelems++; + + pl->pl_lastelem = *index; + pl->pl_elems[*index].p_time = *this_ts; + pl->pl_elems[*index].p_dur = (*re).re_dur; + pl->pl_elems[*index].p_rssi = (*re).re_rssi; + pl->pl_elems[*index].p_sidx = (*re).re_sidx; + pl->pl_elems[*index].p_delta_peak = (*re).re_delta_peak; + pl->pl_elems[*index].p_psidx_diff = (*re).re_psidx_diff; + *diff_ts = (uint32_t)*this_ts - *test_ts; + *test_ts = (uint32_t)*this_ts; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "ts%u %u %u diff %u pl->pl_lastelem.p_time=%llu", + (uint32_t)*this_ts, (*re).re_dur, + (*re).re_rssi, *diff_ts, + (uint64_t)pl->pl_elems[*index].p_time); +} + +/** + * dfs_conditional_clear_delaylines - Clear delay lines to remove the + * false pulses. + * @dfs: Pointer to wlan_dfs structure. + * @diff_ts: diff between timerstamps. + * @this_ts: this timestamp value. + * @re: Pointer to dfs_event structure. + */ +static inline void dfs_conditional_clear_delaylines( + struct wlan_dfs *dfs, + uint32_t diff_ts, + uint64_t this_ts, + struct dfs_event re) +{ + struct dfs_pulseline *pl = dfs->pulses; + uint32_t index; + + /* If diff_ts is very small, we might be getting false pulse + * detects due to heavy interference. We might be getting + * spectral splatter from adjacent channel. In order to prevent + * false alarms we clear the delay-lines. This might impact + * positive detections under harsh environments, but helps with + * false detects. + */ + + if (diff_ts < DFS_INVALID_PRI_LIMIT) { + dfs->dfs_seq_num = 0; + dfs_reset_alldelaylines(dfs); + dfs_reset_radarq(dfs); + + index = (pl->pl_lastelem + 1) & DFS_MAX_PULSE_BUFFER_MASK; + if (pl->pl_numelems == DFS_MAX_PULSE_BUFFER_SIZE) + pl->pl_firstelem = (pl->pl_firstelem+1) & + DFS_MAX_PULSE_BUFFER_MASK; + else + pl->pl_numelems++; + + pl->pl_lastelem = index; + pl->pl_elems[index].p_time = this_ts; + pl->pl_elems[index].p_dur = re.re_dur; + pl->pl_elems[index].p_rssi = re.re_rssi; + pl->pl_elems[index].p_sidx = re.re_sidx; + pl->pl_elems[index].p_delta_peak = re.re_delta_peak; + pl->pl_elems[index].p_psidx_diff = re.re_psidx_diff; + dfs->dfs_seq_num++; + pl->pl_elems[index].p_seq_num = dfs->dfs_seq_num; + } +} + +/** + * dfs_process_each_radarevent - remove each event from the dfs radar queue + * and process it. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Pointer to DFS current channel. + * @rs: Pointer to dfs_state structure. + * @seg_id: segment id. + * @retval: pointer to retval. + * @false_radar_found: pointer to false radar found. + * + * Return: If radar found then return 1 else return 0. + */ +static inline int dfs_process_each_radarevent( + struct wlan_dfs *dfs, + struct dfs_channel *chan, + struct dfs_state **rs, + uint8_t *seg_id, + int *retval, + int *false_radar_found) +{ + struct dfs_event re, *event; + int found, empty; + int events_processed = 0; + uint64_t this_ts; + static uint32_t test_ts; + static uint32_t diff_ts; + uint32_t index; + + dfs_is_radarq_empty(dfs, &empty); + + while ((!empty) && (!*retval) && !(*false_radar_found) && + (events_processed < MAX_EVENTS)) { + dfs_remove_event_from_radarq(dfs, &event); + if (!event) { + empty = 1; + break; + } + events_processed++; + re = *event; + + dfs_return_event_to_eventq(dfs, event); + + *seg_id = re.re_seg_id; + found = 0; + if (dfs_skip_the_event(dfs, &re, rs)) { + dfs_is_radarq_empty(dfs, &empty); + continue; + } + + dfs_calculate_timestamps(dfs, &re, &this_ts); + + re.re_dur = dfs_process_pulse_dur(dfs, re.re_dur); + + dfs_add_to_pulseline(dfs, &re, &this_ts, &test_ts, &diff_ts, + &index); + + dfs_dur_check(dfs, chan, &re, diff_ts); + + dfs_log_event(dfs, &re, this_ts, diff_ts, index); + + dfs_conditional_clear_delaylines(dfs, diff_ts, this_ts, re); + + found = 0; + if (events_processed == 1) { + dfs->dfs_min_sidx = (re).re_sidx; + dfs->dfs_max_sidx = (re).re_sidx; + } + + dfs_check_if_bin5(dfs, &re, this_ts, diff_ts, &found); + if (found) { + *retval |= found; + dfs->dfs_freq_offset = DFS_SIDX_TO_FREQ_OFFSET( + (dfs->dfs_min_sidx + dfs->dfs_max_sidx) / 2); + return 1; + } + + dfs_check_if_nonbin5(dfs, &re, rs, this_ts, diff_ts, &found, + retval, false_radar_found); + + dfs_is_radarq_empty(dfs, &empty); + } + + return 0; +} + +/** + * dfs_false_radarfound_reset_vars () - Reset dfs variables after false radar + * found. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_false_radarfound_reset_vars( + struct wlan_dfs *dfs) +{ + dfs->dfs_seq_num = 0; + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + dfs->dfs_phyerr_w53_counter = 0; + dfs->dfs_event_log_count = 0; + dfs->dfs_phyerr_count = 0; + dfs->dfs_phyerr_reject_count = 0; + dfs->dfs_phyerr_queued_count = 0; +} + +void dfs_radarfound_action_generic(struct wlan_dfs *dfs, uint8_t seg_id) +{ + struct radar_found_info *radar_found; + + radar_found = qdf_mem_malloc(sizeof(*radar_found)); + if (!radar_found) + return; + + qdf_mem_zero(radar_found, sizeof(*radar_found)); + radar_found->segment_id = seg_id; + dfs->dfs_seg_id = seg_id; + radar_found->pdev_id = + wlan_objmgr_pdev_get_pdev_id(dfs->dfs_pdev_obj); + + dfs_process_radar_ind(dfs, radar_found); + qdf_mem_free(radar_found); +} + +void dfs_radar_found_action(struct wlan_dfs *dfs, + bool bangradar, + uint8_t seg_id) +{ + /* If Host DFS confirmation is supported, save the curchan as + * radar found chan, send radar found indication along with + * average radar parameters to FW and start the host status + * wait timer. + */ + if (!bangradar && + (utils_get_dfsdomain(dfs->dfs_pdev_obj) == DFS_FCC_DOMAIN) && + lmac_is_host_dfs_check_support_enabled(dfs->dfs_pdev_obj) && + (dfs->dfs_spoof_test_done ? dfs->dfs_use_nol : 1)) { + dfs_radarfound_action_fcc(dfs, seg_id); + } else { + dfs_radarfound_action_generic(dfs, seg_id); + } +} + +void dfs_process_radarevent( + struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + struct dfs_state *rs = NULL; + uint8_t seg_id = 0; + int retval = 0; + int false_radar_found = 0; + bool bangradar = false; + + if (!dfs_radarevent_basic_sanity(dfs, chan)) + return; + /* + * TEST : Simulate radar bang, make sure we add the channel to NOL + * (bug 29968) + */ + if (dfs_handle_bangradar(dfs, chan, &rs, &seg_id, &retval)) { + if (retval) + bangradar = true; + goto dfsfound; + } + + if (!dfs_handle_missing_pulses(dfs, chan)) + return; + + dfs_process_each_radarevent(dfs, chan, &rs, &seg_id, &retval, + &false_radar_found); + +dfsfound: + if (retval) { + dfs_radarfound_reset_vars(dfs, rs, chan, seg_id); + dfs_radar_found_action(dfs, bangradar, seg_id); + } + + if (false_radar_found) + dfs_false_radarfound_reset_vars(dfs); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..fae38b0cd719f39e51b6a50f74ee34e6462494c7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_radar.c @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_filter_init.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_partial_offload_radar.h" +#include "../dfs_internal.h" + +void dfs_get_radars(struct wlan_dfs *dfs) +{ + struct wlan_objmgr_psoc *psoc; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + psoc = wlan_pdev_get_psoc(dfs->dfs_pdev_obj); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is NULL"); + return; + } + + if (wlan_objmgr_psoc_get_dev_type(psoc) == WLAN_DEV_OL) { + /* For Partial offload */ + dfs_get_po_radars(dfs); + } else { + /* For Direct Attach (DA) */ + dfs_get_da_radars(dfs); + } +} + +int dfs_radar_disable(struct wlan_dfs *dfs) +{ + dfs->dfs_proc_phyerr &= ~DFS_AR_EN; + dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; + + return 0; +} + +void dfs_phyerr_param_copy(struct wlan_dfs_phyerr_param *dst, + struct wlan_dfs_phyerr_param *src) +{ + qdf_mem_copy(dst, src, sizeof(*dst)); +} + +#ifdef CONFIG_CHAN_FREQ_API +struct dfs_state *dfs_getchanstate(struct wlan_dfs *dfs, uint8_t *index, + int ext_chan_flag) +{ + struct dfs_state *rs = NULL; + struct dfs_channel *ch, cmp_ch1; + int i; + QDF_STATUS err; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return NULL; + } + ch = &cmp_ch1; + if (ext_chan_flag) { + err = dfs_mlme_get_extchan_for_freq( + dfs->dfs_pdev_obj, + &ch->dfs_ch_freq, + &ch->dfs_ch_flags, + &ch->dfs_ch_flagext, + &ch->dfs_ch_ieee, + &ch->dfs_ch_vhtop_ch_freq_seg1, + &ch->dfs_ch_vhtop_ch_freq_seg2, + &ch->dfs_ch_mhz_freq_seg1, + &ch->dfs_ch_mhz_freq_seg2); + + if (err == QDF_STATUS_SUCCESS) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Extension channel freq = %u flags=0x%x", + ch->dfs_ch_freq, + ch->dfs_ch_flagext); + } else { + return NULL; + } + } else { + ch = dfs->dfs_curchan; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Primary channel freq = %u flags=0x%x", + ch->dfs_ch_freq, ch->dfs_ch_flagext); + } + + for (i = 0; i < DFS_NUM_RADAR_STATES; i++) { + if ((dfs->dfs_radar[i].rs_chan.dfs_ch_freq == + ch->dfs_ch_freq) && + (dfs->dfs_radar[i].rs_chan.dfs_ch_flags == + ch->dfs_ch_flags)) { + if (index) + *index = (uint8_t)i; + return &dfs->dfs_radar[i]; + } + } + /* No existing channel found, look for first free channel state entry.*/ + for (i = 0; i < DFS_NUM_RADAR_STATES; i++) { + if (dfs->dfs_radar[i].rs_chan.dfs_ch_freq == 0) { + rs = &dfs->dfs_radar[i]; + /* Found one, set channel info and default thresholds.*/ + rs->rs_chan = *ch; + + /* Copy the parameters from the default set. */ + dfs_phyerr_param_copy(&rs->rs_param, + &dfs->dfs_defaultparams); + + if (index) + *index = (uint8_t)i; + + return rs; + } + } + dfs_debug(dfs, WLAN_DEBUG_DFS2, "No more radar states left."); + + return NULL; +} +#else +#ifdef CONFIG_CHAN_NUM_API +struct dfs_state *dfs_getchanstate(struct wlan_dfs *dfs, uint8_t *index, + int ext_chan_flag) +{ + struct dfs_state *rs = NULL; + struct dfs_channel *cmp_ch, cmp_ch1; + int i; + QDF_STATUS err; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return NULL; + } + cmp_ch = &cmp_ch1; + if (ext_chan_flag) { + err = dfs_mlme_get_extchan(dfs->dfs_pdev_obj, + &(cmp_ch->dfs_ch_freq), + &(cmp_ch->dfs_ch_flags), + &(cmp_ch->dfs_ch_flagext), + &(cmp_ch->dfs_ch_ieee), + &(cmp_ch->dfs_ch_vhtop_ch_freq_seg1), + &(cmp_ch->dfs_ch_vhtop_ch_freq_seg2)); + + if (err == QDF_STATUS_SUCCESS) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Extension channel freq = %u flags=0x%x", + cmp_ch->dfs_ch_freq, + cmp_ch->dfs_ch_flagext); + } else + return NULL; + } else { + cmp_ch = dfs->dfs_curchan; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Primary channel freq = %u flags=0x%x", + cmp_ch->dfs_ch_freq, cmp_ch->dfs_ch_flagext); + } + + for (i = 0; i < DFS_NUM_RADAR_STATES; i++) { + if ((dfs->dfs_radar[i].rs_chan.dfs_ch_freq == + cmp_ch->dfs_ch_freq) && + (dfs->dfs_radar[i].rs_chan.dfs_ch_flags == + cmp_ch->dfs_ch_flags) + ) { + if (index) + *index = (uint8_t)i; + return &(dfs->dfs_radar[i]); + } + } + /* No existing channel found, look for first free channel state entry.*/ + for (i = 0; i < DFS_NUM_RADAR_STATES; i++) { + if (dfs->dfs_radar[i].rs_chan.dfs_ch_freq == 0) { + rs = &(dfs->dfs_radar[i]); + /* Found one, set channel info and default thresholds.*/ + rs->rs_chan = *cmp_ch; + + /* Copy the parameters from the default set. */ + dfs_phyerr_param_copy(&rs->rs_param, + &dfs->dfs_defaultparams); + + if (index) + *index = (uint8_t)i; + + return rs; + } + } + dfs_debug(dfs, WLAN_DEBUG_DFS2, "No more radar states left."); + + return NULL; +} +#endif +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_radar_enable(struct wlan_dfs *dfs, int no_cac, uint32_t opmode) +{ + int is_ext_ch; + int is_fastclk = 0; + struct dfs_channel *exch, extchan; + QDF_STATUS err = QDF_STATUS_E_FAILURE; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + is_ext_ch = WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan); + lmac_dfs_disable(dfs->dfs_pdev_obj, no_cac); + /* + * In all modes, if the primary is DFS then we have to + * enable radar detection. In HT80_80, we can have + * primary non-DFS 80MHz with extension 80MHz DFS. + */ + if ((WLAN_IS_CHAN_DFS(dfs->dfs_curchan) || + ((WLAN_IS_CHAN_11AC_VHT160(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) && + WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan))) || + (dfs_is_precac_timer_running(dfs))) { + struct dfs_state *rs_pri = NULL, *rs_ext = NULL; + uint8_t index_pri, index_ext; + + dfs->dfs_proc_phyerr |= DFS_AR_EN; + dfs->dfs_proc_phyerr |= DFS_RADAR_EN; + dfs->dfs_proc_phyerr |= DFS_SECOND_SEGMENT_RADAR_EN; + + exch = &extchan; + if (is_ext_ch) { + err = dfs_mlme_get_extchan_for_freq + ( + dfs->dfs_pdev_obj, + &exch->dfs_ch_freq, + &exch->dfs_ch_flags, + &exch->dfs_ch_flagext, + &exch->dfs_ch_ieee, + &exch->dfs_ch_vhtop_ch_freq_seg1, + &exch->dfs_ch_vhtop_ch_freq_seg2, + &exch->dfs_ch_mhz_freq_seg1, + &exch->dfs_ch_mhz_freq_seg2); + } + dfs_reset_alldelaylines(dfs); + + rs_pri = dfs_getchanstate(dfs, &index_pri, 0); + if (err == QDF_STATUS_SUCCESS) + rs_ext = dfs_getchanstate(dfs, &index_ext, 1); + + if (rs_pri && ((err == QDF_STATUS_E_FAILURE) || (rs_ext))) { + struct wlan_dfs_phyerr_param pe; + + qdf_mem_set(&pe, sizeof(pe), '\0'); + + if (index_pri != dfs->dfs_curchan_radindex) + dfs_reset_alldelaylines(dfs); + + dfs->dfs_curchan_radindex = (int16_t)index_pri; + + if (rs_ext) + dfs->dfs_extchan_radindex = (int16_t)index_ext; + + dfs_phyerr_param_copy(&pe, &rs_pri->rs_param); + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "firpwr=%d, rssi=%d, height=%d, prssi=%d, inband=%d, relpwr=%d, relstep=%d, maxlen=%d", + pe.pe_firpwr, + pe.pe_rrssi, pe.pe_height, + pe.pe_prssi, pe.pe_inband, + pe.pe_relpwr, pe.pe_relstep, + pe.pe_maxlen); + + lmac_dfs_enable(dfs->dfs_pdev_obj, &is_fastclk, + &pe, dfs->dfsdomain); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Enabled radar detection on channel %d", + dfs->dfs_curchan->dfs_ch_freq); + + dfs->dur_multiplier = is_fastclk ? + DFS_FAST_CLOCK_MULTIPLIER : + DFS_NO_FAST_CLOCK_MULTIPLIER; + + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "duration multiplier is %d", + dfs->dur_multiplier); + } else + dfs_debug(dfs, WLAN_DEBUG_DFS, + "No more radar states left"); + } +} +#else +#ifdef CONFIG_CHAN_NUM_API +void dfs_radar_enable(struct wlan_dfs *dfs, int no_cac, uint32_t opmode) +{ + int is_ext_ch; + int is_fastclk = 0; + struct dfs_channel *ext_ch, extchan; + QDF_STATUS err = QDF_STATUS_E_FAILURE; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + is_ext_ch = WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan); + lmac_dfs_disable(dfs->dfs_pdev_obj, no_cac); + /* + * In all modes, if the primary is DFS then we have to + * enable radar detection. In HT80_80, we can have + * primary non-DFS 80MHz with extension 80MHz DFS. + */ + if ((WLAN_IS_CHAN_DFS(dfs->dfs_curchan) || + ((WLAN_IS_CHAN_11AC_VHT160(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) + && + WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan))) || + (dfs_is_precac_timer_running(dfs))) { + struct dfs_state *rs_pri = NULL, *rs_ext = NULL; + uint8_t index_pri, index_ext; + + dfs->dfs_proc_phyerr |= DFS_AR_EN; + dfs->dfs_proc_phyerr |= DFS_RADAR_EN; + dfs->dfs_proc_phyerr |= DFS_SECOND_SEGMENT_RADAR_EN; + + ext_ch = &extchan; + if (is_ext_ch) + err = dfs_mlme_get_extchan(dfs->dfs_pdev_obj, + &(ext_ch->dfs_ch_freq), + &(ext_ch->dfs_ch_flags), + &(ext_ch->dfs_ch_flagext), + &(ext_ch->dfs_ch_ieee), + &(ext_ch->dfs_ch_vhtop_ch_freq_seg1), + &(ext_ch->dfs_ch_vhtop_ch_freq_seg2)); + + + dfs_reset_alldelaylines(dfs); + + rs_pri = dfs_getchanstate(dfs, &index_pri, 0); + if (err == QDF_STATUS_SUCCESS) + rs_ext = dfs_getchanstate(dfs, &index_ext, 1); + + if (rs_pri && ((err == QDF_STATUS_E_FAILURE) || + (rs_ext))) { + struct wlan_dfs_phyerr_param pe; + + qdf_mem_set(&pe, sizeof(pe), '\0'); + + if (index_pri != dfs->dfs_curchan_radindex) + dfs_reset_alldelaylines(dfs); + + dfs->dfs_curchan_radindex = (int16_t)index_pri; + + if (rs_ext) + dfs->dfs_extchan_radindex = (int16_t)index_ext; + + dfs_phyerr_param_copy(&pe, &rs_pri->rs_param); + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "firpwr=%d, rssi=%d, height=%d, prssi=%d, inband=%d, relpwr=%d, relstep=%d, maxlen=%d", + pe.pe_firpwr, + pe.pe_rrssi, pe.pe_height, + pe.pe_prssi, pe.pe_inband, + pe.pe_relpwr, pe.pe_relstep, + pe.pe_maxlen); + + lmac_dfs_enable(dfs->dfs_pdev_obj, &is_fastclk, + &pe, dfs->dfsdomain); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Enabled radar detection on channel %d", + dfs->dfs_curchan->dfs_ch_freq); + + dfs->dur_multiplier = is_fastclk ? + DFS_FAST_CLOCK_MULTIPLIER : + DFS_NO_FAST_CLOCK_MULTIPLIER; + + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "duration multiplier is %d", + dfs->dur_multiplier); + } else + dfs_debug(dfs, WLAN_DEBUG_DFS, + "No more radar states left"); + } +} +#endif +#endif + +int dfs_set_thresholds(struct wlan_dfs *dfs, const uint32_t threshtype, + const uint32_t value) +{ + int16_t chanindex; + struct dfs_state *rs; + struct wlan_dfs_phyerr_param pe; + int is_fastclk = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 0; + } + + chanindex = dfs->dfs_curchan_radindex; + if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n", + __func__, + chanindex, + DFS_NUM_RADAR_STATES); + return 0; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "threshtype=%d, value=%d", threshtype, value); + + wlan_dfs_phyerr_init_noval(&pe); + + rs = &(dfs->dfs_radar[chanindex]); + switch (threshtype) { + case DFS_PARAM_FIRPWR: + rs->rs_param.pe_firpwr = (int32_t) value; + pe.pe_firpwr = value; + break; + case DFS_PARAM_RRSSI: + rs->rs_param.pe_rrssi = value; + pe.pe_rrssi = value; + break; + case DFS_PARAM_HEIGHT: + rs->rs_param.pe_height = value; + pe.pe_height = value; + break; + case DFS_PARAM_PRSSI: + rs->rs_param.pe_prssi = value; + pe.pe_prssi = value; + break; + case DFS_PARAM_INBAND: + rs->rs_param.pe_inband = value; + pe.pe_inband = value; + break; + /* 5413 specific */ + case DFS_PARAM_RELPWR: + rs->rs_param.pe_relpwr = value; + pe.pe_relpwr = value; + break; + case DFS_PARAM_RELSTEP: + rs->rs_param.pe_relstep = value; + pe.pe_relstep = value; + break; + case DFS_PARAM_MAXLEN: + rs->rs_param.pe_maxlen = value; + pe.pe_maxlen = value; + break; + default: + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "unknown threshtype (%d)", threshtype); + break; + } + + + /* + * The driver layer dfs_enable routine is tasked with translating + * values from the global format to the per-device (HAL, offload) + * format. + */ + lmac_dfs_enable(dfs->dfs_pdev_obj, &is_fastclk, + &pe, dfs->dfsdomain); + + return 1; +} + +int dfs_get_thresholds(struct wlan_dfs *dfs, + struct wlan_dfs_phyerr_param *param) +{ + lmac_dfs_get_thresholds(dfs->dfs_pdev_obj, param); + + return 1; +} + +uint16_t dfs_chan2freq(struct dfs_channel *chan) +{ + if (!chan) + return 0; + + return chan == WLAN_CHAN_ANYC ? WLAN_CHAN_ANY : chan->dfs_ch_freq; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_staggered.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_staggered.c new file mode 100644 index 0000000000000000000000000000000000000000..b0d08a69a5d6e3a8636489d139c5d834906fa877 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_staggered.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: ETSI 1.5.1 introduced new waveforms which use staggered PRIs within + * the same waveform. This file contains the detection implementation for + * these specific types of radars. This logic is different from the other + * detection because it must detect waveforms that may have 2 or more + * different PRIs (pulse repetition intervals). + */ + +#include "../dfs.h" +#include "../dfs_process_radar_found_ind.h" + +/** + * dfs_is_pri_multiple() - Is PRI is multiple. + * @sample_pri: Sample PRI. + * @refpri: Reference PRI. + */ +static int dfs_is_pri_multiple(uint32_t sample_pri, uint32_t refpri) +{ +#define MAX_ALLOWED_MISSED 3 + int i; + + if (sample_pri < refpri || (!refpri)) + return 0; + + for (i = 1; i <= MAX_ALLOWED_MISSED; i++) { + if ((sample_pri%(i*refpri) <= 5)) + return 1; + } + + return 0; +#undef MAX_ALLOWED_MISSED +} + +/** + * dfs_is_unique_pri() - Check for the unique PRI. + * @highestpri: Highest PRI. + * @midpri: MID PRI. + * @lowestpri: Lowest PRI. + * @refpri: Reference PRI. + */ +static int dfs_is_unique_pri(uint32_t highestpri, uint32_t midpri, + uint32_t lowestpri, uint32_t refpri) +{ +#define DFS_STAGGERED_PRI_MARGIN_MIN 20 +#define DFS_STAGGERED_PRI_MARGIN_MAX 400 + if ((DFS_DIFF(lowestpri, refpri) >= DFS_STAGGERED_PRI_MARGIN_MIN) && + (DFS_DIFF(midpri, refpri) >= DFS_STAGGERED_PRI_MARGIN_MIN) && + (DFS_DIFF(highestpri, refpri) >= DFS_STAGGERED_PRI_MARGIN_MIN) + ) + return 1; + + if ((dfs_is_pri_multiple(refpri, highestpri)) || + (dfs_is_pri_multiple(refpri, lowestpri)) || + (dfs_is_pri_multiple(refpri, midpri))) + return 0; +#undef DFS_STAGGERED_PRI_MARGIN_MIN +#undef DFS_STAGGERED_PRI_MARGIN_MAX + + return 0; +} + +int dfs_staggered_check(struct wlan_dfs *dfs, struct dfs_filter *rf, + uint32_t deltaT, uint32_t width) +{ + uint32_t refpri, refdur, searchpri = 0, deltapri; + uint32_t n, i, primargin, durmargin; + int score[DFS_MAX_DL_SIZE], delayindex, dindex, found = 0; + struct dfs_delayline *dl; + uint32_t scoreindex, lowpriindex = 0, lowpri = 0xffff; + int higherthan, lowerthan, numscores; + int numpulseshigh = 0, numpulsesmid = 0, numpulsestemp = 0; + uint32_t lowestscore = 0, lowestscoreindex = 0, lowestpri = 0; + uint32_t midscore = 0, midscoreindex = 0, midpri = 0; + uint32_t highestscore = 0, highestscoreindex = 0, highestpri = 0; + + dl = &rf->rf_dl; + if (dl->dl_numelems < (rf->rf_threshold-1)) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "numelems %d < threshold for filter %d", + dl->dl_numelems, + rf->rf_pulseid); + return 0; + } + if (deltaT > rf->rf_filterlen) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "numelems %d < threshold for filter %d", + dl->dl_numelems, + rf->rf_pulseid); + return 0; + } + primargin = 6; + if (rf->rf_maxdur < 10) + durmargin = 4; + else + durmargin = 6; + + qdf_mem_zero(score, sizeof(int)*DFS_MAX_DL_SIZE); + /* Find out the lowest pri */ + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) { + continue; + } else if (refpri < lowpri) { + lowpri = dl->dl_elems[delayindex].de_time; + lowpriindex = n; + } + } + + /* Find out the each delay element's pri score */ + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) + continue; + + if ((refpri > rf->rf_maxpri) || (refpri < rf->rf_minpri)) { + score[n] = 0; + continue; + } + + for (i = 0; i < dl->dl_numelems; i++) { + dindex = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[dindex].de_time; + deltapri = DFS_DIFF(searchpri, refpri); + if (deltapri < primargin) + score[n]++; + } + } + + dfs->dfs_freq_offset = DFS_SIDX_TO_FREQ_OFFSET( + (dl->dl_min_sidx + dl->dl_max_sidx) / 2); + + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refdur = dl->dl_elems[delayindex].de_time; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "score[%d]=%d pri=%d", + n, score[n], refdur); + } + + /* Find out the 2 or 3 highest scorers */ + scoreindex = 0; + highestscore = 0; + highestscoreindex = 0; + highestpri = 0; numscores = 0; lowestscore = 0; + + for (n = 0; n < dl->dl_numelems; n++) { + higherthan = 0; + lowerthan = 0; + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + + if (!dfs_is_unique_pri(highestpri, + midpri, + lowestpri, + refpri)) + continue; + + if (score[n] >= highestscore) { + lowestscore = midscore; + lowestpri = midpri; + lowestscoreindex = midscoreindex; + midscore = highestscore; + midpri = highestpri; + midscoreindex = highestscoreindex; + highestscore = score[n]; + highestpri = refpri; + highestscoreindex = n; + } else if (score[n] >= midscore) { + lowestscore = midscore; + lowestpri = midpri; + lowestscoreindex = midscoreindex; + midscore = score[n]; + midpri = refpri; + midscoreindex = n; + } else if (score[n] >= lowestscore) { + lowestscore = score[n]; + lowestpri = refpri; + lowestscoreindex = n; + } + } + + if (midscore == 0) + return 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FINAL highestscore=%d highestscoreindex = %d highestpri = %d", + highestscore, highestscoreindex, highestpri); + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FINAL lowestscore=%d lowestscoreindex=%d lowpri=%d", + lowestscore, lowestscoreindex, lowestpri); + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FINAL midscore=%d midscoreindex=%d midpri=%d", + midscore, midscoreindex, midpri); + + delayindex = (dl->dl_firstelem + highestscoreindex) & DFS_MAX_DL_MASK; + refdur = dl->dl_elems[delayindex].de_dur; + refpri = dl->dl_elems[delayindex].de_time; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "highscoreindex=%d refdur=%d refpri=%d", + highestscoreindex, refdur, refpri); + + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, highestscore, refpri, + refdur, 0, highestpri); + numpulseshigh = numpulsestemp; + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, highestscore, refpri, + refdur, 0, highestpri + midpri); + if (numpulsestemp > numpulseshigh) + numpulseshigh = numpulsestemp; + + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, highestscore, refpri, + refdur, 0, highestpri + midpri + lowestpri); + if (numpulsestemp > numpulseshigh) + numpulseshigh = numpulsestemp; + + delayindex = (dl->dl_firstelem + midscoreindex) & DFS_MAX_DL_MASK; + refdur = dl->dl_elems[delayindex].de_dur; + refpri = dl->dl_elems[delayindex].de_time; + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "midscoreindex=%d refdur=%d refpri=%d", + midscoreindex, refdur, refpri); + + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, midscore, refpri, refdur, + 0, midpri); + numpulsesmid = numpulsestemp; + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, midscore, refpri, refdur, + 0, highestpri + midpri); + if (numpulsestemp > numpulsesmid) + numpulsesmid = numpulsestemp; + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, midscore, refpri, refdur, + 0, highestpri + midpri + lowestpri); + if (numpulsestemp > numpulsesmid) + numpulsesmid = numpulsestemp; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "numpulseshigh=%d, numpulsesmid=%d", + numpulseshigh, numpulsesmid); + + if ((numpulseshigh >= rf->rf_threshold) && + (numpulsesmid >= rf->rf_threshold)) { + found = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "MATCH filter=%u numpulseshigh=%u numpulsesmid= %u thresh=%u", + rf->rf_pulseid, numpulseshigh, + numpulsesmid, rf->rf_threshold); + } + + return found; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e05d6be2c5fbe4713e0db6c5ea38afd474acf012 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs.c @@ -0,0 +1,943 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2006, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains the dfs_attach() and dfs_detach() functions as well + * as the dfs_control() function which is used to process ioctls related to DFS. + * For Linux/Mac, "radartool" is the command line tool that can be used to call + * various ioctls to set and get radar detection thresholds. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_tgt_api.h" +#include "../dfs_internal.h" +#include "../dfs_filter_init.h" +#include "../dfs_full_offload.h" +#include +#include "wlan_dfs_utils_api.h" +#include "../dfs_process_radar_found_ind.h" +#include "../dfs_partial_offload_radar.h" + +/* Disable NOL in FW. */ +#define DISABLE_NOL_FW 0 + +#ifndef WLAN_DFS_STATIC_MEM_ALLOC +/* + * dfs_alloc_wlan_dfs() - allocate wlan_dfs buffer + * + * Return: buffer, null on failure. + */ +static inline struct wlan_dfs *dfs_alloc_wlan_dfs(void) +{ + return qdf_mem_malloc(sizeof(struct wlan_dfs)); +} + +/* + * dfs_free_wlan_dfs() - Free wlan_dfs buffer + * @dfs: wlan_dfs buffer pointer + * + * Return: None + */ +static inline void dfs_free_wlan_dfs(struct wlan_dfs *dfs) +{ + qdf_mem_free(dfs); +} + +/* + * dfs_alloc_dfs_curchan() - allocate dfs_channel buffer + * + * Return: buffer, null on failure. + */ +static inline struct dfs_channel *dfs_alloc_dfs_curchan(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_channel)); +} + +static inline struct dfs_channel *dfs_alloc_dfs_prevchan(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_channel)); +} + +/* + * dfs_free_dfs_chan() - Free dfs_channel buffer + * @dfs_chan: dfs_channel buffer pointer + * + * Return: None + */ +static inline void dfs_free_dfs_chan(struct dfs_channel *dfs_chan) +{ + qdf_mem_free(dfs_chan); +} + +#else + +/* Static buffers for DFS objects */ +static struct wlan_dfs global_dfs; +static struct dfs_channel global_dfs_curchan; +static struct dfs_channel global_dfs_prevchan; + +static inline struct wlan_dfs *dfs_alloc_wlan_dfs(void) +{ + return &global_dfs; +} + +static inline void dfs_free_wlan_dfs(struct wlan_dfs *dfs) +{ +} + +static inline struct dfs_channel *dfs_alloc_dfs_curchan(void) +{ + return &global_dfs_curchan; +} + +static inline struct dfs_channel *dfs_alloc_dfs_prevchan(void) +{ + return &global_dfs_prevchan; +} + +static inline void dfs_free_dfs_chan(struct dfs_channel *dfs_chan) +{ +} +#endif + +/** + * dfs_testtimer_task() - Sends CSA in the current channel. + * + * When the user sets usenol to 0 and inject the RADAR, AP does not mark the + * channel as RADAR and does not add the channel to NOL. It sends the CSA in + * the current channel. + */ +#ifdef CONFIG_CHAN_FREQ_API +static os_timer_func(dfs_testtimer_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->wlan_dfstest = 0; + + /* + * Flip the channel back to the original channel. + * Make sure this is done properly with a CSA. + */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "go back to channel %d", + dfs->wlan_dfstest_ieeechan); + dfs_mlme_start_csa_for_freq(dfs->dfs_pdev_obj, + dfs->wlan_dfstest_ieeechan, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_mhz_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); +} +#else +#ifdef CONFIG_CHAN_NUM_API +static os_timer_func(dfs_testtimer_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->wlan_dfstest = 0; + + /* + * Flip the channel back to the original channel. + * Make sure this is done properly with a CSA. + */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "go back to channel %d", + dfs->wlan_dfstest_ieeechan); + dfs_mlme_start_csa(dfs->dfs_pdev_obj, + dfs->wlan_dfstest_ieeechan, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); +} +#endif +#endif + +int dfs_get_debug_info(struct wlan_dfs *dfs, void *data) +{ + if (data) + *(uint32_t *)data = dfs->dfs_proc_phyerr; + + return (int)dfs->dfs_proc_phyerr; +} + +void dfs_main_task_testtimer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->wlan_dfstesttimer), + dfs_testtimer_task, (void *)dfs, + QDF_TIMER_TYPE_WAKE_APPS); +} + +int dfs_create_object(struct wlan_dfs **dfs) +{ + *dfs = dfs_alloc_wlan_dfs(); + if (!(*dfs)) + return 1; + + qdf_mem_zero(*dfs, sizeof(**dfs)); + + (*dfs)->dfs_curchan = dfs_alloc_dfs_curchan(); + if (!((*dfs)->dfs_curchan)) { + dfs_free_wlan_dfs(*dfs); + return 1; + } + + (*dfs)->dfs_prevchan = dfs_alloc_dfs_prevchan(); + if (!((*dfs)->dfs_prevchan)) { + dfs_free_wlan_dfs(*dfs); + return 1; + } + qdf_mem_zero((*dfs)->dfs_prevchan, sizeof(struct dfs_channel)); + return 0; +} + +int dfs_attach(struct wlan_dfs *dfs) +{ + int ret; + + if (!dfs->dfs_is_offload_enabled) { + ret = dfs_main_attach(dfs); + + /* + * For full offload we have a wmi handler registered to process + * a radar event from firmware in the event of a radar detect. + * So, init of timer, dfs_task is not required for + * full-offload. dfs_task timer is called in + * dfs_main_timer_init within dfs_main_attach for + * partial-offload in the event of radar detect. + */ + if (ret) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_main_attach failed"); + return ret; + } + } + dfs_cac_timer_attach(dfs); + dfs_zero_cac_attach(dfs); + dfs_nol_attach(dfs); + + /* + * Init of timer ,dfs_testtimer_task is required by both partial + * and full offload, indicating test mode timer initialization for both. + */ + dfs_main_task_testtimer_init(dfs); + return 0; +} + +void dfs_stop(struct wlan_dfs *dfs) +{ + dfs_nol_timer_cleanup(dfs); + dfs_nol_workqueue_cleanup(dfs); + dfs_clear_nolhistory(dfs); +} + +void dfs_task_testtimer_reset(struct wlan_dfs *dfs) +{ + if (dfs->wlan_dfstest) { + qdf_timer_sync_cancel(&dfs->wlan_dfstesttimer); + dfs->wlan_dfstest = 0; + } +} + +void dfs_task_testtimer_detach(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->wlan_dfstesttimer); + dfs->wlan_dfstest = 0; +} + +void dfs_reset(struct wlan_dfs *dfs) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_cac_timer_reset(dfs); + dfs_zero_cac_reset(dfs); + if (!dfs->dfs_is_offload_enabled) { + dfs_main_timer_reset(dfs); + dfs_host_wait_timer_reset(dfs); + dfs_false_radarfound_reset_vars(dfs); + } + dfs_task_testtimer_reset(dfs); +} + +void dfs_timer_detach(struct wlan_dfs *dfs) +{ + dfs_cac_timer_detach(dfs); + dfs_zero_cac_timer_detach(dfs->dfs_soc_obj); + + if (!dfs->dfs_is_offload_enabled) { + dfs_main_timer_detach(dfs); + dfs_host_wait_timer_detach(dfs); + } + + dfs_task_testtimer_detach(dfs); + dfs_nol_timer_detach(dfs); +} + +void dfs_detach(struct wlan_dfs *dfs) +{ + dfs_timer_detach(dfs); + if (!dfs->dfs_is_offload_enabled) + dfs_main_detach(dfs); + dfs_zero_cac_detach(dfs); + dfs_nol_detach(dfs); +} + +#ifndef WLAN_DFS_STATIC_MEM_ALLOC +void dfs_destroy_object(struct wlan_dfs *dfs) +{ + dfs_free_dfs_chan(dfs->dfs_prevchan); + dfs_free_dfs_chan(dfs->dfs_curchan); + dfs_free_wlan_dfs(dfs); +} +#else +void dfs_destroy_object(struct wlan_dfs *dfs) +{ +} +#endif + +/* dfs_set_disable_radar_marking()- Set the flag to mark/unmark a radar flag + * on NOL channel. + * @dfs: Pointer to wlan_dfs structure. + * @disable_radar_marking: Flag to enable/disable marking channel as radar. + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +static void dfs_set_disable_radar_marking(struct wlan_dfs *dfs, + bool disable_radar_marking) +{ + dfs->dfs_disable_radar_marking = disable_radar_marking; +} +#else +static inline void dfs_set_disable_radar_marking(struct wlan_dfs *dfs, + bool disable_radar_marking) +{ +} +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +bool dfs_get_disable_radar_marking(struct wlan_dfs *dfs) +{ + return dfs->dfs_disable_radar_marking; +} +#else +static inline bool dfs_get_disable_radar_marking(struct wlan_dfs *dfs) +{ + return QDF_STATUS_SUCCESS; +} +#endif +int dfs_control(struct wlan_dfs *dfs, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize) +{ + struct wlan_dfs_phyerr_param peout; + struct dfs_ioctl_params *dfsparams; + struct dfs_bangradar_params *bangradar_params; + int error = 0; + uint32_t val = 0; + struct dfsreq_nolinfo *nol; + uint32_t *data = NULL; + int i; + struct dfs_emulate_bang_radar_test_cmd dfs_unit_test; + int usenol_pdev_param; + + qdf_mem_zero(&dfs_unit_test, sizeof(dfs_unit_test)); + + if (!dfs) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + goto bad; + } + + switch (id) { + case DFS_SET_THRESH: + if (insize < sizeof(struct dfs_ioctl_params) || !indata) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "insize = %d, expected = %zu bytes, indata = %pK", + insize, + sizeof(struct dfs_ioctl_params), + indata); + error = -EINVAL; + break; + } + dfsparams = (struct dfs_ioctl_params *)indata; + if (!dfs_set_thresholds(dfs, DFS_PARAM_FIRPWR, + dfsparams->dfs_firpwr)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_RRSSI, + dfsparams->dfs_rrssi)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_HEIGHT, + dfsparams->dfs_height)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_PRSSI, + dfsparams->dfs_prssi)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_INBAND, + dfsparams->dfs_inband)) + error = -EINVAL; + + /* 5413 speicfic. */ + if (!dfs_set_thresholds(dfs, DFS_PARAM_RELPWR, + dfsparams->dfs_relpwr)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_RELSTEP, + dfsparams->dfs_relstep)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_MAXLEN, + dfsparams->dfs_maxlen)) + error = -EINVAL; + break; + case DFS_BANGRADAR: + /* + * Handle all types of Bangradar here. + * Bangradar arguments: + * seg_id : Segment ID where radar should be injected. + * is_chirp : Is chirp radar or non chirp radar. + * freq_offset : Frequency offset from center frequency. + * + * Type 1 (DFS_BANGRADAR_FOR_ALL_SUBCHANS): To add all subchans. + * Type 2 (DFS_BANGRADAR_FOR_ALL_SUBCHANS_OF_SEGID): To add all + * subchans of given segment_id. + * Type 3 (DFS_BANGRADAR_FOR_SPECIFIC_SUBCHANS): To add specific + * subchans based on the arguments. + * + * The arguments will already be filled in the indata structure + * based on the type. + * If an argument is not specified by user, it will be set to + * default (0) in the indata already and correspondingly, + * the type will change. + */ + if (insize < sizeof(struct dfs_bangradar_params) || + !indata) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "insize = %d, expected = %zu bytes, indata = %pK", + insize, + sizeof(struct dfs_bangradar_params), + indata); + error = -EINVAL; + break; + } + bangradar_params = (struct dfs_bangradar_params *)indata; + if (bangradar_params) { + if (abs(bangradar_params->freq_offset) > + FREQ_OFFSET_BOUNDARY_FOR_80MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Frequency Offset out of bound"); + error = -EINVAL; + break; + } else if (bangradar_params->seg_id > + SEG_ID_SECONDARY) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Illegal segment ID"); + error = -EINVAL; + break; + } + dfs->dfs_bangradar_type = + bangradar_params->bangradar_type; + dfs->dfs_seg_id = bangradar_params->seg_id; + dfs->dfs_is_chirp = bangradar_params->is_chirp; + dfs->dfs_freq_offset = bangradar_params->freq_offset; + + if (dfs->dfs_is_offload_enabled) { + error = dfs_fill_emulate_bang_radar_test + (dfs, dfs->dfs_seg_id, + dfs->dfs_is_chirp, + dfs->dfs_freq_offset, + &dfs_unit_test); + } else { + error = dfs_start_host_based_bangradar(dfs); + } + } else { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "bangradar_params is NULL"); + } + + break; + case DFS_GET_THRESH: + if (!outdata || !outsize || + *outsize < sizeof(struct dfs_ioctl_params)) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct dfs_ioctl_params); + dfsparams = (struct dfs_ioctl_params *) outdata; + + qdf_mem_zero(&peout, sizeof(struct wlan_dfs_phyerr_param)); + + /* Fetch the DFS thresholds using the internal representation */ + (void) dfs_get_thresholds(dfs, &peout); + + /* Convert them to the dfs IOCTL representation. */ + wlan_dfs_dfsparam_to_ioctlparam(&peout, dfsparams); + break; + case DFS_RADARDETECTS: + if (!outdata || !outsize || *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = dfs->wlan_dfs_stats.num_radar_detects; + break; + case DFS_DISABLE_DETECT: + dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; + dfs->dfs_proc_phyerr &= ~DFS_SECOND_SEGMENT_RADAR_EN; + dfs->dfs_ignore_dfs = 1; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "enable detects, ignore_dfs %d", + dfs->dfs_ignore_dfs ? 1:0); + break; + case DFS_ENABLE_DETECT: + dfs->dfs_proc_phyerr |= DFS_RADAR_EN; + dfs->dfs_proc_phyerr |= DFS_SECOND_SEGMENT_RADAR_EN; + dfs->dfs_ignore_dfs = 0; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS + , "enable detects, ignore_dfs %d", + dfs->dfs_ignore_dfs ? 1:0); + break; + case DFS_DISABLE_FFT: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "TODO disable FFT val=0x%x", val); + break; + case DFS_ENABLE_FFT: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "TODO enable FFT val=0x%x", val); + break; + case DFS_SET_DEBUG_LEVEL: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->dfs_debug_mask = *(uint32_t *)indata; + + /* Do not allow user to set the ALWAYS/MAX bit. + * It will be used internally by dfs print macro(s) + * to print messages when dfs is NULL. + */ + dfs->dfs_debug_mask &= ~(WLAN_DEBUG_DFS_ALWAYS); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "debug level now = 0x%x", dfs->dfs_debug_mask); + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS3) { + /* Enable debug Radar Event */ + dfs->dfs_event_log_on = 1; + } else if ((utils_get_dfsdomain(dfs->dfs_pdev_obj) == + DFS_FCC_DOMAIN) && + lmac_is_host_dfs_check_support_enabled(dfs->dfs_pdev_obj)) { + dfs->dfs_event_log_on = 1; + } else { + dfs->dfs_event_log_on = 0; + } + break; + case DFS_SET_FALSE_RSSI_THRES: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->wlan_dfs_false_rssi_thres = *(uint32_t *)indata; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "false RSSI threshold now = 0x%x", + dfs->wlan_dfs_false_rssi_thres); + break; + case DFS_SET_PEAK_MAG: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->wlan_dfs_peak_mag = *(uint32_t *)indata; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "peak_mag now = 0x%x", + dfs->wlan_dfs_peak_mag); + break; + case DFS_GET_CAC_VALID_TIME: + if (!outdata || !outsize || *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = dfs->dfs_cac_valid_time; + break; + case DFS_SET_CAC_VALID_TIME: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->dfs_cac_valid_time = *(uint32_t *)indata; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs timeout = %d", dfs->dfs_cac_valid_time); + break; + case DFS_IGNORE_CAC: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + + if (*(uint32_t *)indata) + dfs->dfs_ignore_cac = 1; + else + dfs->dfs_ignore_cac = 0; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "ignore cac = 0x%x", dfs->dfs_ignore_cac); + break; + case DFS_SET_NOL_TIMEOUT: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + if (*(int *)indata) + dfs->wlan_dfs_nol_timeout = *(int *)indata; + else + dfs->wlan_dfs_nol_timeout = DFS_NOL_TIMEOUT_S; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "nol timeout = %d sec", + dfs->wlan_dfs_nol_timeout); + break; + case DFS_MUTE_TIME: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + data = (uint32_t *) indata; + dfs->wlan_dfstesttime = *data; + dfs->wlan_dfstesttime *= (1000); /* convert sec into ms */ + break; + case DFS_GET_USENOL: + if (!outdata || !outsize || *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = dfs->dfs_use_nol; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "#Phyerr=%d, #false detect=%d, #queued=%d", + dfs->dfs_phyerr_count, + dfs->dfs_phyerr_reject_count, + dfs->dfs_phyerr_queued_count); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_phyerr_freq_min=%d, dfs_phyerr_freq_max=%d", + dfs->dfs_phyerr_freq_min, + dfs->dfs_phyerr_freq_max); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Total radar events detected=%d, entries in the radar queue follows:", + dfs->dfs_event_log_count); + + for (i = 0; (i < DFS_EVENT_LOG_SIZE) && + (i < dfs->dfs_event_log_count); i++) { +#define FREQ_OFFSET1 ((int)dfs->radar_log[i].freq_offset_khz / 1000) +#define FREQ_OFFSET2 ((int)abs(dfs->radar_log[i].freq_offset_khz) % 1000) + dfs_debug(dfs, WLAN_DEBUG_DFS, + "ts=%llu diff_ts=%u rssi=%u dur=%u, is_chirp=%d, seg_id=%d, sidx=%d, freq_offset=%d.%dMHz, peak_mag=%d, total_gain=%d, mb_gain=%d, relpwr_db=%d, delta_diff=%d, delta_peak=%d, psidx_diff=%d\n", + dfs->radar_log[i].ts, + dfs->radar_log[i].diff_ts, + dfs->radar_log[i].rssi, + dfs->radar_log[i].dur, + dfs->radar_log[i].is_chirp, + dfs->radar_log[i].seg_id, + dfs->radar_log[i].sidx, + FREQ_OFFSET1, + FREQ_OFFSET2, + dfs->radar_log[i].peak_mag, + dfs->radar_log[i].total_gain, + dfs->radar_log[i].mb_gain, + dfs->radar_log[i].relpwr_db, + dfs->radar_log[i].delta_diff, + dfs->radar_log[i].delta_peak, + dfs->radar_log[i].psidx_diff); + } + dfs->dfs_event_log_count = 0; + dfs->dfs_phyerr_count = 0; + dfs->dfs_phyerr_reject_count = 0; + dfs->dfs_phyerr_queued_count = 0; + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + break; + case DFS_SET_USENOL: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->dfs_use_nol = *(uint32_t *)indata; + usenol_pdev_param = dfs->dfs_use_nol; + if (dfs->dfs_is_offload_enabled) { + if (dfs->dfs_use_nol == + USENOL_ENABLE_NOL_HOST_DISABLE_NOL_FW) + usenol_pdev_param = DISABLE_NOL_FW; + tgt_dfs_send_usenol_pdev_param(dfs->dfs_pdev_obj, + usenol_pdev_param); + } + break; + case DFS_SET_DISABLE_RADAR_MARKING: + if (dfs->dfs_is_offload_enabled && + (utils_get_dfsdomain(dfs->dfs_pdev_obj) == + DFS_FCC_DOMAIN)) { + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs_set_disable_radar_marking(dfs, *(uint8_t *)indata); + } + break; + case DFS_GET_DISABLE_RADAR_MARKING: + if (!outdata || !outsize || *outsize < sizeof(uint8_t)) { + error = -EINVAL; + break; + } + if (dfs->dfs_is_offload_enabled) { + *outsize = sizeof(uint8_t); + *((uint8_t *)outdata) = + dfs_get_disable_radar_marking(dfs); + } + break; + case DFS_GET_NOL: + if (!outdata || !outsize || + *outsize < sizeof(struct dfsreq_nolinfo)) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct dfsreq_nolinfo); + nol = (struct dfsreq_nolinfo *)outdata; + DFS_GET_NOL_LOCKED(dfs, + (struct dfsreq_nolelem *)nol->dfs_nol, + &nol->dfs_ch_nchans); + DFS_PRINT_NOL_LOCKED(dfs); + break; + case DFS_SET_NOL: + if (insize < sizeof(struct dfsreq_nolinfo) || !indata) { + error = -EINVAL; + break; + } + nol = (struct dfsreq_nolinfo *) indata; + dfs_set_nol(dfs, + (struct dfsreq_nolelem *)nol->dfs_nol, + nol->dfs_ch_nchans); + break; + case DFS_SHOW_NOL: + DFS_PRINT_NOL_LOCKED(dfs); + break; + case DFS_SHOW_NOLHISTORY: + dfs_print_nolhistory(dfs); + break; + case DFS_SHOW_PRECAC_LISTS: + dfs_print_precaclists(dfs); + break; + case DFS_RESET_PRECAC_LISTS: + dfs_reset_precac_lists(dfs); + break; + case DFS_INJECT_SEQUENCE: + error = dfs_inject_synthetic_pulse_sequence(dfs, indata); + if (error) + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Not injected Synthetic pulse"); + break; + + case DFS_ALLOW_HW_PULSES: + if (insize < sizeof(u_int8_t) || !indata) { + error = -EINVAL; + break; + } + dfs_allow_hw_pulses(dfs, !!(*(u_int8_t *)indata)); + break; + case DFS_SET_PRI_MULTIPILER: + dfs->dfs_pri_multiplier = *(int *)indata; + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Set dfs pri multiplier to %d, dfsdomain %d", + dfs->dfs_pri_multiplier, dfs->dfsdomain); + break; + default: + error = -EINVAL; + } + +bad: + return error; +} + +/** + * dfs_is_curchan_same_as_given_chan() - Find if dfs_curchan has the same + * channel parameters provided. + * @dfs_curchan: Pointer to DFS current channel structure. + * @dfs_ch_freq: New curchan's primary frequency. + * @dfs_ch_flags: New curchan's channel flags. + * @dfs_ch_flagext: New curchan's channel flags extension. + * @dfs_ch_vhtop_ch_freq_seg1: New curchan's primary centre IEEE. + * @dfs_ch_vhtop_ch_freq_seg2: New curchan's secondary centre IEEE. + * + * Return: True if curchan has the same channel parameters of the given channel, + * else false. + */ +static bool +dfs_is_curchan_same_as_given_chan(struct dfs_channel *dfs_curchan, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + if ((dfs_curchan->dfs_ch_freq == dfs_ch_freq) && + (dfs_curchan->dfs_ch_flags == dfs_ch_flags) && + (dfs_curchan->dfs_ch_flagext == dfs_ch_flagext) && + (dfs_curchan->dfs_ch_vhtop_ch_freq_seg1 == + dfs_ch_vhtop_ch_freq_seg1) && + (dfs_curchan->dfs_ch_vhtop_ch_freq_seg2 == + dfs_ch_vhtop_ch_freq_seg2)) + return true; + + return false; +} + +#ifdef CONFIG_CHAN_NUM_API +void dfs_set_current_channel(struct wlan_dfs *dfs, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (!dfs->dfs_curchan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_curchan is NULL"); + return; + } + + /* Check if the input parameters are the same as that of dfs_curchan */ + if (dfs_is_curchan_same_as_given_chan(dfs->dfs_curchan, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2)) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_curchan already updated"); + return; + } + + /* Update dfs previous channel with the old dfs_curchan, if it exists */ + if (dfs->dfs_curchan->dfs_ch_freq) + qdf_mem_copy(dfs->dfs_prevchan, + dfs->dfs_curchan, + sizeof(struct dfs_channel)); + + dfs->dfs_curchan->dfs_ch_freq = dfs_ch_freq; + dfs->dfs_curchan->dfs_ch_flags = dfs_ch_flags; + dfs->dfs_curchan->dfs_ch_flagext = dfs_ch_flagext; + dfs->dfs_curchan->dfs_ch_ieee = dfs_ch_ieee; + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1 = dfs_ch_vhtop_ch_freq_seg1; + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2 = dfs_ch_vhtop_ch_freq_seg2; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_set_current_channel_for_freq(struct wlan_dfs *dfs, + uint16_t dfs_chan_freq, + uint64_t dfs_chan_flags, + uint16_t dfs_chan_flagext, + uint8_t dfs_chan_ieee, + uint8_t dfs_chan_vhtop_freq_seg1, + uint8_t dfs_chan_vhtop_freq_seg2, + uint16_t dfs_chan_mhz_freq_seg1, + uint16_t dfs_chan_mhz_freq_seg2) + +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + /* Check if the input parameters are the same as that of dfs_curchan */ + if (dfs_is_curchan_same_as_given_chan(dfs->dfs_curchan, + dfs_chan_freq, + dfs_chan_flags, + dfs_chan_flagext, + dfs_chan_vhtop_freq_seg1, + dfs_chan_vhtop_freq_seg2)) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_curchan already updated"); + return; + } + + /* Update dfs previous channel with the old dfs_curchan, if it exists */ + if (dfs->dfs_curchan->dfs_ch_freq) + qdf_mem_copy(dfs->dfs_prevchan, + dfs->dfs_curchan, + sizeof(struct dfs_channel)); + + dfs->dfs_curchan->dfs_ch_freq = dfs_chan_freq; + dfs->dfs_curchan->dfs_ch_flags = dfs_chan_flags; + dfs->dfs_curchan->dfs_ch_flagext = dfs_chan_flagext; + dfs->dfs_curchan->dfs_ch_ieee = dfs_chan_ieee; + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1 = dfs_chan_vhtop_freq_seg1; + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2 = dfs_chan_vhtop_freq_seg2; + dfs->dfs_curchan->dfs_ch_mhz_freq_seg1 = dfs_chan_mhz_freq_seg1; + dfs->dfs_curchan->dfs_ch_mhz_freq_seg2 = dfs_chan_mhz_freq_seg2; +} +#endif + +void dfs_update_cur_chan_flags(struct wlan_dfs *dfs, + uint64_t flags, + uint16_t flagext) +{ + dfs->dfs_curchan->dfs_ch_flags = flags; + dfs->dfs_curchan->dfs_ch_flagext = flagext; +} + +int dfs_reinit_timers(struct wlan_dfs *dfs) +{ + dfs_cac_timer_attach(dfs); + dfs_zero_cac_timer_init(dfs->dfs_soc_obj); + dfs_nol_timer_init(dfs); + dfs_main_task_testtimer_init(dfs); + return 0; +} + +void dfs_reset_dfs_prevchan(struct wlan_dfs *dfs) +{ + qdf_mem_zero(dfs->dfs_prevchan, sizeof(struct dfs_channel)); +} + +bool dfs_is_hw_mode_switch_in_progress(struct wlan_dfs *dfs) +{ + return lmac_dfs_is_hw_mode_switch_in_progress(dfs->dfs_pdev_obj); +} + +void dfs_complete_deferred_tasks(struct wlan_dfs *dfs) +{ + if (dfs->dfs_defer_params.is_radar_detected) { + /* Handle radar event that was deferred and free the temporary + * storage of the radar event parameters. + */ + dfs_process_radar_ind(dfs, dfs->dfs_defer_params.radar_params); + qdf_mem_free(dfs->dfs_defer_params.radar_params); + dfs->dfs_defer_params.is_radar_detected = false; + } else if (dfs->dfs_defer_params.is_cac_completed) { + /* Handle CAC completion event that was deferred for HW mode + * switch. + */ + dfs_process_cac_completion(dfs); + dfs->dfs_defer_params.is_cac_completed = false; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_cac.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_cac.c new file mode 100644 index 0000000000000000000000000000000000000000..0bbeb00858ce23851bcbb15b03edee2c889696bd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_cac.c @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DOC: This file has the functions related to DFS CAC. + */ + +#include "../dfs_channel.h" +#include "../dfs_zero_cac.h" +#include +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" +#include "../dfs_process_radar_found_ind.h" + +#define IS_CHANNEL_WEATHER_RADAR(freq) ((freq >= 5600) && (freq <= 5650)) +#define ADJACENT_WEATHER_RADAR_CHANNEL 5580 +#define CH100_START_FREQ 5490 +#define CH100 100 + +int dfs_override_cac_timeout(struct wlan_dfs *dfs, int cac_timeout) +{ + if (!dfs) + return -EIO; + + dfs->dfs_cac_timeout_override = cac_timeout; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "CAC timeout is now %s %d", + (cac_timeout == -1) ? "default" : "overridden", + cac_timeout); + + return 0; +} + +int dfs_get_override_cac_timeout(struct wlan_dfs *dfs, int *cac_timeout) +{ + if (!dfs) + return -EIO; + + (*cac_timeout) = dfs->dfs_cac_timeout_override; + + return 0; +} + +#ifdef CONFIG_CHAN_NUM_API +void dfs_cac_valid_reset(struct wlan_dfs *dfs, + uint8_t prevchan_ieee, + uint32_t prevchan_flags) +{ + if (dfs->dfs_cac_valid_time) { + if ((prevchan_ieee != dfs->dfs_curchan->dfs_ch_ieee) || + (prevchan_flags != dfs->dfs_curchan->dfs_ch_flags)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Cancelling timer & clearing cac_valid" + ); + qdf_timer_stop(&dfs->dfs_cac_valid_timer); + dfs->dfs_cac_valid = 0; + } + } +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_cac_valid_reset_for_freq(struct wlan_dfs *dfs, + uint16_t prevchan_freq, + uint32_t prevchan_flags) +{ + if (dfs->dfs_cac_valid_time) { + if ((prevchan_freq != dfs->dfs_curchan->dfs_ch_freq) || + (prevchan_flags != dfs->dfs_curchan->dfs_ch_flags)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Cancelling timer & clearing cac_valid"); + qdf_timer_stop(&dfs->dfs_cac_valid_timer); + dfs->dfs_cac_valid = 0; + } + } +} +#endif + +/** + * dfs_cac_valid_timeout() - Timeout function for dfs_cac_valid_timer + * cac_valid bit will be reset in this function. + */ +static os_timer_func(dfs_cac_valid_timeout) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->dfs_cac_valid = 0; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, ": Timed out!!"); +} + +/** + * dfs_clear_cac_started_chan() - Clear dfs cac started channel. + * @dfs: Pointer to wlan_dfs structure. + */ +static void dfs_clear_cac_started_chan(struct wlan_dfs *dfs) +{ + qdf_mem_zero(&dfs->dfs_cac_started_chan, + sizeof(dfs->dfs_cac_started_chan)); +} + +void dfs_process_cac_completion(struct wlan_dfs *dfs) +{ + enum phy_ch_width ch_width = CH_WIDTH_INVALID; + uint16_t primary_chan_freq = 0, secondary_chan_freq = 0; + struct dfs_channel *dfs_curchan; + + dfs->dfs_cac_timer_running = 0; + dfs_curchan = dfs->dfs_curchan; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "cac expired, chan %d cur time %d", + dfs->dfs_curchan->dfs_ch_freq, + (qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000)); + + /* + * When radar is detected during a CAC we are woken up prematurely to + * switch to a new channel. Check the channel to decide how to act. + */ + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_mlme_mark_dfs_for_freq(dfs->dfs_pdev_obj, + dfs_curchan->dfs_ch_ieee, + dfs_curchan->dfs_ch_freq, + dfs_curchan->dfs_ch_mhz_freq_seg2, + dfs_curchan->dfs_ch_flags); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "CAC timer on chan %u (%u MHz) stopped due to radar", + dfs_curchan->dfs_ch_ieee, + dfs_curchan->dfs_ch_freq); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "CAC timer on channel %u (%u MHz) expired;" + "no radar detected", + dfs_curchan->dfs_ch_ieee, + dfs_curchan->dfs_ch_freq); + + /* On CAC completion, set the bit 'cac_valid'. + * CAC will not be re-done if this bit is reset. + * The flag will be reset when dfs_cac_valid_timer + * timesout. + */ + if (dfs->dfs_cac_valid_time) { + dfs->dfs_cac_valid = 1; + qdf_timer_mod(&dfs->dfs_cac_valid_timer, + dfs->dfs_cac_valid_time * 1000); + } + + dfs_find_chwidth_and_center_chan_for_freq(dfs, + &ch_width, + &primary_chan_freq, + &secondary_chan_freq); + /* Mark the current channel as preCAC done */ + dfs_mark_precac_done_for_freq(dfs, primary_chan_freq, + secondary_chan_freq, ch_width); + } + + dfs_clear_cac_started_chan(dfs); + /* Iterate over the nodes, processing the CAC completion event. */ + dfs_mlme_proc_cac(dfs->dfs_pdev_obj, 0); + + /* Send a CAC timeout, VAP up event to user space */ + dfs_mlme_deliver_event_up_after_cac(dfs->dfs_pdev_obj); + + if (dfs->dfs_defer_precac_channel_change == 1) { + dfs_mlme_channel_change_by_precac(dfs->dfs_pdev_obj); + dfs->dfs_defer_precac_channel_change = 0; + } +} + +/** + * dfs_cac_timeout() - DFS cactimeout function. + * + * Sets dfs_cac_timer_running to 0 and dfs_cac_valid_timer. + */ +#ifdef CONFIG_CHAN_FREQ_API +static os_timer_func(dfs_cac_timeout) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + + if (dfs_is_hw_mode_switch_in_progress(dfs)) + dfs->dfs_defer_params.is_cac_completed = true; + else + dfs_process_cac_completion(dfs); +} +#else +#ifdef CONFIG_CHAN_NUM_API +static os_timer_func(dfs_cac_timeout) +{ + struct wlan_dfs *dfs = NULL; + enum phy_ch_width ch_width = CH_WIDTH_INVALID; + uint8_t primary_chan_ieee = 0, secondary_chan_ieee = 0; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->dfs_cac_timer_running = 0; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "cac expired, chan %d curr time %d", + dfs->dfs_curchan->dfs_ch_freq, + (qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000)); + + /* + * When radar is detected during a CAC we are woken up prematurely to + * switch to a new channel. Check the channel to decide how to act. + */ + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_mlme_mark_dfs(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "CAC timer on channel %u (%u MHz) stopped due to radar", + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "CAC timer on channel %u (%u MHz) expired; no radar detected", + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq); + + /* On CAC completion, set the bit 'cac_valid'. + * CAC will not be re-done if this bit is reset. + * The flag will be reset when dfs_cac_valid_timer + * timesout. + */ + if (dfs->dfs_cac_valid_time) { + dfs->dfs_cac_valid = 1; + qdf_timer_mod(&dfs->dfs_cac_valid_timer, + dfs->dfs_cac_valid_time * 1000); + } + + dfs_find_chwidth_and_center_chan(dfs, + &ch_width, + &primary_chan_ieee, + &secondary_chan_ieee); + /* Mark the current channel as preCAC done */ + dfs_mark_precac_done(dfs, primary_chan_ieee, + secondary_chan_ieee, ch_width); + } + + dfs_clear_cac_started_chan(dfs); + /* Iterate over the nodes, processing the CAC completion event. */ + dfs_mlme_proc_cac(dfs->dfs_pdev_obj, 0); + + /* Send a CAC timeout, VAP up event to user space */ + dfs_mlme_deliver_event_up_after_cac(dfs->dfs_pdev_obj); + + if (dfs->dfs_defer_precac_channel_change == 1) { + dfs_mlme_channel_change_by_precac(dfs->dfs_pdev_obj); + dfs->dfs_defer_precac_channel_change = 0; + } +} +#endif +#endif + +void dfs_cac_timer_attach(struct wlan_dfs *dfs) +{ + dfs->dfs_cac_timeout_override = -1; + dfs->wlan_dfs_cac_time = WLAN_DFS_WAIT_MS; + qdf_timer_init(NULL, + &(dfs->dfs_cac_timer), + dfs_cac_timeout, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); + + qdf_timer_init(NULL, + &(dfs->dfs_cac_valid_timer), + dfs_cac_valid_timeout, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); +} + +void dfs_cac_timer_reset(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->dfs_cac_timer); + dfs_get_override_cac_timeout(dfs, + &(dfs->dfs_cac_timeout_override)); + dfs_clear_cac_started_chan(dfs); +} + +void dfs_cac_timer_detach(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->dfs_cac_timer); + + qdf_timer_free(&dfs->dfs_cac_valid_timer); + dfs->dfs_cac_valid = 0; +} + +int dfs_is_ap_cac_timer_running(struct wlan_dfs *dfs) +{ + return dfs->dfs_cac_timer_running; +} + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_start_cac_timer(struct wlan_dfs *dfs) +{ + int cac_timeout = 0; + struct dfs_channel *chan = dfs->dfs_curchan; + + cac_timeout = + dfs_mlme_get_cac_timeout_for_freq(dfs->dfs_pdev_obj, + chan->dfs_ch_freq, + chan->dfs_ch_mhz_freq_seg2, + chan->dfs_ch_flags); + + dfs->dfs_cac_started_chan = *chan; + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "chan = %d cfreq2 = %d timeout = %d sec, curr_time = %d sec", + chan->dfs_ch_ieee, chan->dfs_ch_vhtop_ch_freq_seg2, + cac_timeout, + qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000); + + qdf_timer_mod(&dfs->dfs_cac_timer, cac_timeout * 1000); + dfs->dfs_cac_aborted = 0; +} +#else +#ifdef CONFIG_CHAN_NUM_API +void dfs_start_cac_timer(struct wlan_dfs *dfs) +{ + int cac_timeout = 0; + struct dfs_channel *chan = dfs->dfs_curchan; + + cac_timeout = dfs_mlme_get_cac_timeout(dfs->dfs_pdev_obj, + chan->dfs_ch_freq, + chan->dfs_ch_vhtop_ch_freq_seg2, + chan->dfs_ch_flags); + + dfs->dfs_cac_started_chan = *chan; + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "chan = %d cfreq2 = %d timeout = %d sec, curr_time = %d sec", + chan->dfs_ch_ieee, chan->dfs_ch_vhtop_ch_freq_seg2, + cac_timeout, + qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000); + + qdf_timer_mod(&dfs->dfs_cac_timer, cac_timeout * 1000); + dfs->dfs_cac_aborted = 0; +} +#endif +#endif + +void dfs_cancel_cac_timer(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->dfs_cac_timer); + dfs_clear_cac_started_chan(dfs); +} + +void dfs_cac_stop(struct wlan_dfs *dfs) +{ + uint32_t phyerr; + + dfs_get_debug_info(dfs, (void *)&phyerr); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Stopping CAC Timer %d procphyerr 0x%08x", + dfs->dfs_curchan->dfs_ch_freq, phyerr); + qdf_timer_stop(&dfs->dfs_cac_timer); + if (dfs->dfs_cac_timer_running) + dfs->dfs_cac_aborted = 1; + dfs_clear_cac_started_chan(dfs); + dfs->dfs_cac_timer_running = 0; +} + +void dfs_stacac_stop(struct wlan_dfs *dfs) +{ + uint32_t phyerr; + + dfs_get_debug_info(dfs, (void *)&phyerr); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Stopping STA CAC Timer %d procphyerr 0x%08x", + dfs->dfs_curchan->dfs_ch_freq, phyerr); + dfs_clear_cac_started_chan(dfs); +} + +/* + * dfs_is_subset_channel_for_freq() - Find out if prev channel and current + * channel are subsets of each other. + * @old_subchans_freq: Pointer to previous sub-channels freq. + * @old_n_chans: Number of previous sub-channels. + * @new_subchans_freq: Pointer to new sub-channels freq. + * @new_n_chans: Number of new sub-channels + */ +#ifdef CONFIG_CHAN_FREQ_API +static bool +dfs_is_subset_channel_for_freq(uint16_t *old_subchans_freq, + uint8_t old_n_chans, + uint16_t *new_subchans_freq, + uint8_t new_n_chans) +{ + bool is_found; + int i, j; + + if (!new_n_chans) + return true; + + if (new_n_chans > old_n_chans) + return false; + + for (i = 0; i < new_n_chans; i++) { + is_found = false; + for (j = 0; j < old_n_chans; j++) { + if (new_subchans_freq[i] == old_subchans_freq[j]) { + is_found = true; + break; + } + } + + /* If new_subchans[i] is not found in old_subchans, then, + * new_chan is not subset of old_chan. + */ + if (!is_found) + break; + } + + return is_found; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static uint8_t +dfs_find_dfs_sub_channels_for_freq(struct wlan_dfs *dfs, + struct dfs_channel *chan, + uint16_t *subchan_arr) +{ + if (WLAN_IS_CHAN_MODE_160(chan) || WLAN_IS_CHAN_MODE_80_80(chan)) { + if (WLAN_IS_CHAN_DFS(chan) && WLAN_IS_CHAN_DFS_CFREQ2(chan)) + return dfs_get_bonding_channel_without_seg_info_for_freq + (chan, subchan_arr); + if (WLAN_IS_CHAN_DFS(chan)) + return dfs_get_bonding_channels_for_freq(dfs, + chan, + SEG_ID_PRIMARY, + DETECTOR_ID_0, + subchan_arr); + if (WLAN_IS_CHAN_DFS_CFREQ2(chan)) + return dfs_get_bonding_channels_for_freq + (dfs, chan, SEG_ID_SECONDARY, + DETECTOR_ID_0, subchan_arr); + /* All channels in 160/80_80 BW are non DFS, return 0 + * as number of subchannels + */ + return 0; + } else if (WLAN_IS_CHAN_DFS(chan)) { + return dfs_get_bonding_channel_without_seg_info_for_freq + (chan, subchan_arr); + } + /* All channels are non DFS, return 0 as number of subchannels*/ + return 0; +} +#endif + +/* dfs_is_new_chan_subset_of_old_chan() - Find if new channel is subset of + * old channel. + * @dfs: Pointer to wlan_dfs structure. + * @new_chan: Pointer to new channel of dfs_channel structure. + * @old_chan: Pointer to old channel of dfs_channel structure. + * + * Return: True if new channel is subset of old channel, else false. + */ +#ifdef CONFIG_CHAN_FREQ_API +static bool +dfs_is_new_chan_subset_of_old_chan(struct wlan_dfs *dfs, + struct dfs_channel *new_chan, + struct dfs_channel *old_chan) +{ + uint16_t new_subchans[NUM_CHANNELS_160MHZ]; + uint16_t old_subchans[NUM_CHANNELS_160MHZ]; + uint8_t n_new_subchans = 0; + uint8_t n_old_subchans = 0; + + /* Given channel is the old channel. i.e. The channel which + * should have the new channel as subset. + */ + n_old_subchans = dfs_find_dfs_sub_channels_for_freq(dfs, old_chan, + old_subchans); + /* cur_chan is the new channel to be check if subset of old channel */ + n_new_subchans = dfs_find_dfs_sub_channels_for_freq(dfs, new_chan, + new_subchans); + + return dfs_is_subset_channel_for_freq(old_subchans, + n_old_subchans, + new_subchans, + n_new_subchans); +} +#endif + +bool dfs_is_cac_required(struct wlan_dfs *dfs, + struct dfs_channel *cur_chan, + struct dfs_channel *prev_chan, + bool *continue_current_cac) +{ + struct dfs_channel *cac_started_chan = &dfs->dfs_cac_started_chan; + + if (dfs->dfs_ignore_dfs || dfs->dfs_cac_valid || dfs->dfs_ignore_cac) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Skip CAC, ignore_dfs = %d cac_valid = %d ignore_cac = %d", + dfs->dfs_ignore_dfs, dfs->dfs_cac_valid, + dfs->dfs_ignore_cac); + return false; + } + + /* If the channel has completed PRE-CAC then CAC can be skipped here. */ + if (dfs_is_precac_done(dfs, cur_chan)) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "PRE-CAC alreay done on this channel %d", + cur_chan->dfs_ch_ieee); + return false; + } + + if (dfs_is_ap_cac_timer_running(dfs)) { + /* Check if we should continue the existing CAC or + * cancel the existing CAC. + * For example: - if an existing VAP(0) is already in + * DFS wait state (which means the radio(wifi) is + * running the CAC) and it is in channel A and another + * VAP(1) comes up in the same channel then instead of + * cancelling the CAC we can let the CAC continue. + */ + if (dfs_is_new_chan_subset_of_old_chan(dfs, + cur_chan, + cac_started_chan)) { + *continue_current_cac = true; + } else { + /* New CAC is needed, cancel the running CAC + * timer. + * 1) When AP is in DFS_WAIT state and it is in + * channel A and user restarts the AP vap in + * channel B, then cancel the running CAC in + * channel A and start new CAC in channel B. + * + * 2) When AP detects the RADAR during CAC in + * channel A, it cancels the running CAC and + * tries to find channel B with the reduced + * bandwidth with of channel A. + * In this case, since the CAC is aborted by + * the RADAR, AP should start the CAC again. + */ + dfs_cancel_cac_timer(dfs); + } + } else { /* CAC timer is not running. */ + if (dfs_is_new_chan_subset_of_old_chan(dfs, + cur_chan, + prev_chan)) { + /* AP bandwidth reduce case: + * When AP detects the RADAR in in-service monitoring + * mode in channel A, it cancels the running CAC and + * tries to find the channel B with the reduced + * bandwidth of channel A. + * If the new channel B is subset of the channel A + * then AP skips the CAC. + */ + if (!dfs->dfs_cac_aborted) { + dfs_debug(dfs, WLAN_DEBUG_DFS, "Skip CAC"); + return false; + } + } + } + + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_filter_init.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_filter_init.c new file mode 100644 index 0000000000000000000000000000000000000000..01f3b821d370a5806aa0e53dd5b0b8095e1b7f8b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_filter_init.c @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2006, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains the dfs_attach() and dfs_detach() functions as well + * as the dfs_control() function which is used to process ioctls related to DFS. + * For Linux/Mac, "radartool" is the command line tool that can be used to call + * various ioctls to set and get radar detection thresholds. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_tgt_api.h" +#include "../dfs_internal.h" +#include "../dfs_filter_init.h" +#include "../dfs_partial_offload_radar.h" + +#ifndef WLAN_DFS_STATIC_MEM_ALLOC +/* + * dfs_alloc_dfs_events() - allocate dfs events buffer + * + * Return: events buffer, null on failure. + */ +static inline struct dfs_event *dfs_alloc_dfs_events(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_event) * DFS_MAX_EVENTS); +} + +/* + * dfs_free_dfs_events() - Free events buffer + * @events: Events buffer pointer + * + * Return: None + */ +static inline void dfs_free_dfs_events(struct dfs_event *events) +{ + qdf_mem_free(events); +} + +/* + * dfs_alloc_dfs_pulseline() - allocate buffer for dfs pulses + * + * Return: events buffer, null on failure. + */ +static inline struct dfs_pulseline *dfs_alloc_dfs_pulseline(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_pulseline)); +} + +/* + * dfs_free_dfs_pulseline() - Free pulse buffer + * @pulses: Pulses buffer pointer + * + * Return: None + */ +static inline void dfs_free_dfs_pulseline(struct dfs_pulseline *pulses) +{ + qdf_mem_free(pulses); +} +#else +/* Static buffers for DFS objects */ +static struct dfs_event global_dfs_event[DFS_MAX_EVENTS]; +static struct dfs_pulseline global_dfs_pulseline; + +static inline struct dfs_event *dfs_alloc_dfs_events(void) +{ + return global_dfs_event; +} + +static inline void dfs_free_dfs_events(struct dfs_event *events) +{ +} + +static inline struct dfs_pulseline *dfs_alloc_dfs_pulseline(void) +{ + return &global_dfs_pulseline; +} + +static inline void dfs_free_dfs_pulseline(struct dfs_pulseline *pulses) +{ +} +#endif + +/* + * Channel switch announcement (CSA) + * usenol=1 (default) make CSA and switch to a new channel on radar detect + * usenol=0, make CSA with next channel same as current on radar detect + * usenol=2, no CSA and stay on the same channel on radar detect + */ + +/** + * dfs_task() - The timer function to process the radar pulses. + */ +static os_timer_func(dfs_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + /* Need to take a lock here since dfs filtering data structures are + * freed and re-allocated in dfs_init_radar_filters() during channel + * change which may happen in the middle of dfs pulse processing. + */ + WLAN_DFS_DATA_STRUCT_LOCK(dfs); + dfs_process_radarevent(dfs, dfs->dfs_curchan); + WLAN_DFS_DATA_STRUCT_UNLOCK(dfs); + + dfs->wlan_radar_tasksched = 0; +} + +/** + * dfs_main_task_timer_init() - Initialize dfs task timer. + * @dfs: Pointer to wlan_dfs structure. + */ +static void dfs_main_task_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->wlan_dfs_task_timer), + dfs_task, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); +} + +/** + * dfs_free_filter() - free memory allocated for dfs ft_filters + * @radarf: pointer holding ft_filters. + * + * Return: None + */ +static void dfs_free_filter(struct dfs_filtertype *radarf) +{ + uint8_t i; + + for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) { + if (radarf->ft_filters[i]) { + qdf_mem_free(radarf->ft_filters[i]); + radarf->ft_filters[i] = NULL; + } + } +} + +/** + * dfs_alloc_mem_filter() - allocate memory for dfs ft_filters + * @radarf: pointer holding ft_filters. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_alloc_mem_filter(struct dfs_filtertype *radarf) +{ + uint8_t i; + + for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) { + radarf->ft_filters[i] = qdf_mem_malloc(sizeof(struct + dfs_filter)); + if (!radarf->ft_filters[i]) { + /* Free all the filter if malloc failed */ + dfs_free_filter(radarf); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +int dfs_main_attach(struct wlan_dfs *dfs) +{ + int i, n; + QDF_STATUS status; + struct wlan_dfs_radar_tab_info radar_info; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 0; + } + + /* If ignore_dfs is set to 1 then Radar detection is disabled. */ + if (dfs->dfs_ignore_dfs) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, "ignoring dfs"); + return 0; + } + + /* + * Zero out radar_info. It's possible that the attach function + * won't fetch an initial regulatory configuration; you really + * do want to ensure that the contents indicates there aren't + * any filters. + */ + qdf_mem_zero(&radar_info, sizeof(radar_info)); + + lmac_get_caps(dfs->dfs_pdev_obj, &(dfs->dfs_caps)); + + dfs_clear_stats(dfs); + dfs->dfs_event_log_on = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, "event log enabled by default"); + + dfs->dfs_enable = 1; + + /*Verify : Passing NULL to qdf_timer_init().*/ + dfs_main_task_timer_init(dfs); + + dfs_allow_hw_pulses(dfs, true); + dfs_host_wait_timer_init(dfs); + + WLAN_DFSQ_LOCK_CREATE(dfs); + STAILQ_INIT(&dfs->dfs_radarq); + WLAN_ARQ_LOCK_CREATE(dfs); + STAILQ_INIT(&dfs->dfs_arq); + STAILQ_INIT(&(dfs->dfs_eventq)); + WLAN_DFSEVENTQ_LOCK_CREATE(dfs); + WLAN_DFS_DATA_STRUCT_LOCK_CREATE(dfs); + + dfs->events = dfs_alloc_dfs_events(); + if (!(dfs->events)) + return 1; + + for (i = 0; i < DFS_MAX_EVENTS; i++) + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i], + re_list); + + dfs->pulses = dfs_alloc_dfs_pulseline(); + if (!(dfs->pulses)) { + dfs_free_dfs_events(dfs->events); + dfs->events = NULL; + return 1; + } + + dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; + + /* Allocate memory for radar filters. */ + for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) { + dfs->dfs_radarf[n] = (struct dfs_filtertype *) + qdf_mem_malloc(sizeof(struct dfs_filtertype)); + if (!(dfs->dfs_radarf[n])) + goto bad1; + + qdf_mem_zero(dfs->dfs_radarf[n], + sizeof(struct dfs_filtertype)); + status = dfs_alloc_mem_filter(dfs->dfs_radarf[n]); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "mem alloc for dfs_filter failed"); + goto bad1; + } + } + + /* Allocate memory for radar table. */ + dfs->dfs_ftindextable = (int8_t **)qdf_mem_malloc( + DFS_NUM_FT_IDX_TBL_ROWS*sizeof(int8_t *)); + if (!(dfs->dfs_ftindextable)) + goto bad1; + + for (n = 0; n < DFS_NUM_FT_IDX_TBL_ROWS; n++) { + dfs->dfs_ftindextable[n] = qdf_mem_malloc( + DFS_MAX_RADAR_OVERLAP*sizeof(int8_t)); + if (!(dfs->dfs_ftindextable[n])) + goto bad2; + } + + dfs->dfs_use_nol = 1; + + /* Init the cached extension channel busy for false alarm reduction */ + dfs->dfs_rinfo.ext_chan_busy_ts = lmac_get_tsf64(dfs->dfs_pdev_obj); + dfs->dfs_rinfo.dfs_ext_chan_busy = 0; + /* Init the Bin5 chirping related data */ + dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts; + dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR; + dfs->dfs_b5radars = NULL; + + /* + * If dfs_init_radar_filters() fails, we can abort here and + * reconfigure when the first valid channel + radar config + * is available. + */ + if (dfs_init_radar_filters(dfs, &radar_info)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Radar Filter Intialization Failed"); + return 1; + } + + dfs->wlan_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE; + dfs->wlan_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH; + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + dfs->dfs_phyerr_queued_count = 0; + dfs->dfs_phyerr_w53_counter = 0; + dfs->dfs_pri_multiplier = 2; + dfs_get_radars(dfs); + + return 0; + +bad2: + qdf_mem_free(dfs->dfs_ftindextable); + dfs->dfs_ftindextable = NULL; +bad1: + for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) { + if (dfs->dfs_radarf[n]) { + dfs_free_filter(dfs->dfs_radarf[n]); + qdf_mem_free(dfs->dfs_radarf[n]); + dfs->dfs_radarf[n] = NULL; + } + } + if (dfs->pulses) { + dfs_free_dfs_pulseline(dfs->pulses); + dfs->pulses = NULL; + } + if (dfs->events) { + dfs_free_dfs_events(dfs->events); + dfs->events = NULL; + } + + return 1; +} + +void dfs_main_timer_reset(struct wlan_dfs *dfs) +{ + if (dfs->wlan_radar_tasksched) { + qdf_timer_sync_cancel(&dfs->wlan_dfs_task_timer); + dfs->wlan_radar_tasksched = 0; + } +} + +void dfs_main_timer_detach(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->wlan_dfs_task_timer); + dfs->wlan_radar_tasksched = 0; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_detach(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->dfs_host_wait_timer); +} +#endif + +void dfs_main_detach(struct wlan_dfs *dfs) +{ + int n, empty; + + if (!dfs->dfs_enable) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "Already detached"); + return; + } + + dfs->dfs_enable = 0; + + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + + if (dfs->pulses) { + dfs_free_dfs_pulseline(dfs->pulses); + dfs->pulses = NULL; + } + + for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) { + if (dfs->dfs_radarf[n]) { + dfs_free_filter(dfs->dfs_radarf[n]); + qdf_mem_free(dfs->dfs_radarf[n]); + dfs->dfs_radarf[n] = NULL; + } + } + + if (dfs->dfs_ftindextable) { + for (n = 0; n < DFS_NUM_FT_IDX_TBL_ROWS; n++) { + if (dfs->dfs_ftindextable[n]) { + qdf_mem_free(dfs->dfs_ftindextable[n]); + dfs->dfs_ftindextable[n] = NULL; + } + } + qdf_mem_free(dfs->dfs_ftindextable); + dfs->dfs_ftindextable = NULL; + dfs->wlan_dfs_isdfsregdomain = 0; + } + + if (dfs->dfs_b5radars) { + qdf_mem_free(dfs->dfs_b5radars); + dfs->dfs_b5radars = NULL; + } + + dfs_reset_ar(dfs); + + WLAN_ARQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_arq)); + WLAN_ARQ_UNLOCK(dfs); + if (!empty) + dfs_reset_arq(dfs); + + if (dfs->events) { + dfs_free_dfs_events(dfs->events); + dfs->events = NULL; + } + + WLAN_DFS_DATA_STRUCT_LOCK_DESTROY(dfs); + WLAN_DFSQ_LOCK_DESTROY(dfs); + WLAN_ARQ_LOCK_DESTROY(dfs); + WLAN_DFSEVENTQ_LOCK_DESTROY(dfs); +} + +int dfs_start_host_based_bangradar(struct wlan_dfs *dfs) +{ + dfs->wlan_radar_tasksched = 1; + qdf_timer_mod(&dfs->wlan_dfs_task_timer, 0); + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_full_offload.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_full_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..026cfcc0d62be70d5d3eb5e4bf3c7dc1ad56c78f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_full_offload.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2006, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains the dfs_fill_emulate_bang_radar_test() + * which is used to send command to firmware to emulate RADAR found event. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_tgt_api.h" +#include "../dfs_internal.h" +#include "../dfs_full_offload.h" + +#if defined(WLAN_DFS_FULL_OFFLOAD) +int dfs_fill_emulate_bang_radar_test(struct wlan_dfs *dfs, + uint8_t segid, bool is_chirp, int32_t freq_offset, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + uint32_t packed_args = 0; + + if (!(WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(dfs->dfs_curchan))) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Ignore bangradar on a NON-DFS channel"); + return -EINVAL; + } + + if (segid > SEG_ID_SECONDARY) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Segment id should be 0 or 1"); + return -EINVAL; + } + packed_args = (segid) | (is_chirp << SEG_ID_SIZE) | + ((freq_offset & MASK) << (SEG_ID_SIZE + IS_CHIRP_SIZE)); + dfs_unit_test->num_args = DFS_UNIT_TEST_NUM_ARGS; + dfs_unit_test->args[IDX_CMD_ID] = + DFS_PHYERR_OFFLOAD_TEST_SET_RADAR; + dfs_unit_test->args[IDX_PDEV_ID] = + wlan_objmgr_pdev_get_pdev_id(dfs->dfs_pdev_obj); + dfs_unit_test->args[IDX_RADAR_PARAM1_ID] = packed_args; + + if (tgt_dfs_process_emulate_bang_radar_cmd(dfs->dfs_pdev_obj, + dfs_unit_test) == QDF_STATUS_E_FAILURE) { + return -EINVAL; + } + + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_nol.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_nol.c new file mode 100644 index 0000000000000000000000000000000000000000..eda749ad4c1b5c9a68d82e0393ce6d5eac1fbfac --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_nol.c @@ -0,0 +1,999 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains NOL related functionality, NOL being the non + * occupancy list. After radar has been detected in a particular channel, + * the channel cannot be used for a period of 30 minutes which is called + * the non occupancy. The NOL is basically a list of all the channels that + * radar has been detected on. Each channel has a 30 minute timer associated + * with it. This file contains the functionality to add a channel to the NOL, + * the NOL timer function and the functionality to remove a channel from the + * NOL when its time is up. + */ + +#include "../dfs.h" +#include "../dfs_channel.h" +#include "../dfs_ioctl_private.h" +#include "../dfs_internal.h" +#include +#include +#include +#include +#include +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +#include "../dfs_process_radar_found_ind.h" +#include "../dfs_partial_offload_radar.h" +#endif + +void dfs_set_update_nol_flag(struct wlan_dfs *dfs, bool val) +{ + dfs->update_nol = val; +} + +bool dfs_get_update_nol_flag(struct wlan_dfs *dfs) +{ + return dfs->update_nol; +} + +/** + * dfs_nol_timeout() - NOL timeout function. + * + * Clears the WLAN_CHAN_DFS_RADAR_FOUND flag for the NOL timeout channel. + */ +/* Unused function */ +#ifdef CONFIG_CHAN_FREQ_API +static os_timer_func(dfs_nol_timeout) +{ + struct dfs_channel *c = NULL, lc; + unsigned long oldest, now; + struct wlan_dfs *dfs = NULL; + int i; + int nchans = 0; + + c = &lc; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans); + + now = oldest = qdf_system_ticks(); + for (i = 0; i < nchans; i++) { + dfs_mlme_get_dfs_channels_for_freq + (dfs->dfs_pdev_obj, + &c->dfs_ch_freq, + &c->dfs_ch_flags, + &c->dfs_ch_flagext, + &c->dfs_ch_ieee, + &c->dfs_ch_vhtop_ch_freq_seg1, + &c->dfs_ch_vhtop_ch_freq_seg2, + &c->dfs_ch_mhz_freq_seg1, + &c->dfs_ch_mhz_freq_seg2, + i); + if (WLAN_IS_CHAN_RADAR(c)) { + if (qdf_system_time_after_eq(now, + dfs->dfs_nol_event[i] + + dfs_get_nol_timeout(dfs))) { + c->dfs_ch_flagext &= ~WLAN_CHAN_DFS_RADAR_FOUND; + if (c->dfs_ch_flags & WLAN_CHAN_DFS_RADAR) { + /* + * NB: do this here so we get only one + * msg instead of one for every channel + * table entry. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "radar on channel %u (%u MHz) cleared after timeout", + c->dfs_ch_ieee, + c->dfs_ch_freq); + } + } else if (dfs->dfs_nol_event[i] < oldest) { + oldest = dfs->dfs_nol_event[i]; + } + } + } + if (oldest != now) { + /* Arrange to process next channel up for a status change. */ + qdf_timer_mod(&dfs->dfs_nol_timer, + dfs_get_nol_timeout(dfs) - + qdf_system_ticks_to_msecs(qdf_system_ticks())); + } +} +#else +#ifdef CONFIG_CHAN_NUM_API +static os_timer_func(dfs_nol_timeout) +{ + struct dfs_channel *c = NULL, lc; + unsigned long oldest, now; + struct wlan_dfs *dfs = NULL; + int i; + int nchans = 0; + + c = &lc; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans); + + now = oldest = qdf_system_ticks(); + for (i = 0; i < nchans; i++) { + dfs_mlme_get_dfs_ch_channels(dfs->dfs_pdev_obj, + &(c->dfs_ch_freq), + &(c->dfs_ch_flags), + &(c->dfs_ch_flagext), + &(c->dfs_ch_ieee), + &(c->dfs_ch_vhtop_ch_freq_seg1), + &(c->dfs_ch_vhtop_ch_freq_seg2), + i); + if (WLAN_IS_CHAN_RADAR(c)) { + if (qdf_system_time_after_eq(now, + dfs->dfs_nol_event[i] + + dfs_get_nol_timeout(dfs))) { + c->dfs_ch_flagext &= + ~WLAN_CHAN_DFS_RADAR_FOUND; + if (c->dfs_ch_flags & + WLAN_CHAN_DFS_RADAR) { + /* + * NB: do this here so we get only one + * msg instead of one for every channel + * table entry. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "radar on channel %u (%u MHz) cleared after timeout", + + c->dfs_ch_ieee, + c->dfs_ch_freq); + } + } else if (dfs->dfs_nol_event[i] < oldest) + oldest = dfs->dfs_nol_event[i]; + } + } + if (oldest != now) { + /* Arrange to process next channel up for a status change. */ + qdf_timer_mod(&dfs->dfs_nol_timer, + dfs_get_nol_timeout(dfs) - + qdf_system_ticks_to_msecs(qdf_system_ticks())); + } +} +#endif +#endif + +/** + * dfs_nol_elem_free_work_cb - Free NOL element + * + * Free the NOL element memory + */ +static void dfs_nol_elem_free_work_cb(void *context) +{ + struct wlan_dfs *dfs = (struct wlan_dfs *)context; + struct dfs_nolelem *nol_head; + + while (true) { + WLAN_DFSNOL_LOCK(dfs); + + nol_head = TAILQ_FIRST(&dfs->dfs_nol_free_list); + if (nol_head) { + TAILQ_REMOVE(&dfs->dfs_nol_free_list, nol_head, + nolelem_list); + WLAN_DFSNOL_UNLOCK(dfs); + + qdf_timer_free(&nol_head->nol_timer); + qdf_mem_free(nol_head); + } else { + WLAN_DFSNOL_UNLOCK(dfs); + break; + } + } +} + +void dfs_nol_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->dfs_nol_timer), + dfs_nol_timeout, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); +} + +void dfs_nol_attach(struct wlan_dfs *dfs) +{ + dfs->wlan_dfs_nol_timeout = DFS_NOL_TIMEOUT_S; + dfs_nol_timer_init(dfs); + qdf_create_work(NULL, &dfs->dfs_nol_elem_free_work, + dfs_nol_elem_free_work_cb, dfs); + TAILQ_INIT(&dfs->dfs_nol_free_list); + dfs->dfs_use_nol = 1; + WLAN_DFSNOL_LOCK_CREATE(dfs); +} + +void dfs_nol_detach(struct wlan_dfs *dfs) +{ + dfs_nol_timer_cleanup(dfs); + qdf_flush_work(&dfs->dfs_nol_elem_free_work); + qdf_destroy_work(NULL, &dfs->dfs_nol_elem_free_work); + WLAN_DFSNOL_LOCK_DESTROY(dfs); +} + +void dfs_nol_timer_detach(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->dfs_nol_timer); +} + +/** + * dfs_nol_delete() - Delete the given frequency/chwidth from the NOL. + * @dfs: Pointer to wlan_dfs structure. + * @delfreq: Freq to delete. + * @delchwidth: Channel width to delete. + */ +static void dfs_nol_delete(struct wlan_dfs *dfs, + uint16_t delfreq, + uint16_t delchwidth) +{ + struct dfs_nolelem *nol, **prev_next; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "remove channel=%d/%d MHz from NOL", + delfreq, delchwidth); + prev_next = &(dfs->dfs_nol); + nol = dfs->dfs_nol; + while (nol) { + if (nol->nol_freq == delfreq && + nol->nol_chwidth == delchwidth) { + *prev_next = nol->nol_next; + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "removing channel %d/%dMHz from NOL tstamp=%d", + nol->nol_freq, + nol->nol_chwidth, + (qdf_system_ticks_to_msecs + (qdf_system_ticks()) / 1000)); + TAILQ_INSERT_TAIL(&dfs->dfs_nol_free_list, + nol, nolelem_list); + nol = *prev_next; + + /* Update the NOL counter. */ + dfs->dfs_nol_count--; + + /* Be paranoid! */ + if (dfs->dfs_nol_count < 0) { + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "dfs_nol_count < 0; eek!"); + dfs->dfs_nol_count = 0; + } + + } else { + prev_next = &(nol->nol_next); + nol = nol->nol_next; + } + } +} + +/** + * dfs_remove_from_nol() - Remove the freq from NOL list. + * + * When NOL times out, this function removes the channel from NOL list. + */ +#ifdef CONFIG_CHAN_FREQ_API +static os_timer_func(dfs_remove_from_nol) +{ + struct dfs_nolelem *nol_arg; + struct wlan_dfs *dfs; + uint16_t delfreq; + uint16_t delchwidth; + uint8_t chan; + + OS_GET_TIMER_ARG(nol_arg, struct dfs_nolelem *); + + dfs = nol_arg->nol_dfs; + delfreq = nol_arg->nol_freq; + delchwidth = nol_arg->nol_chwidth; + + /* Delete the given NOL entry. */ + DFS_NOL_DELETE_CHAN_LOCKED(dfs, delfreq, delchwidth); + + /* Update the wireless stack with the new NOL. */ + dfs_nol_update(dfs); + + dfs_mlme_nol_timeout_notification(dfs->dfs_pdev_obj); + chan = utils_dfs_freq_to_chan(delfreq); + utils_dfs_deliver_event(dfs->dfs_pdev_obj, delfreq, + WLAN_EV_NOL_FINISHED); + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "remove channel %d from nol", chan); + utils_dfs_unmark_precac_nol_for_freq(dfs->dfs_pdev_obj, delfreq); + utils_dfs_reg_update_nol_chan_for_freq(dfs->dfs_pdev_obj, + &delfreq, 1, DFS_NOL_RESET); + utils_dfs_save_nol(dfs->dfs_pdev_obj); +} +#else +#ifdef CONFIG_CHAN_NUM_API +static os_timer_func(dfs_remove_from_nol) +{ + struct dfs_nolelem *nol_arg; + struct wlan_dfs *dfs; + uint16_t delfreq; + uint16_t delchwidth; + uint8_t chan; + + OS_GET_TIMER_ARG(nol_arg, struct dfs_nolelem *); + + dfs = nol_arg->nol_dfs; + delfreq = nol_arg->nol_freq; + delchwidth = nol_arg->nol_chwidth; + + /* Delete the given NOL entry. */ + DFS_NOL_DELETE_CHAN_LOCKED(dfs, delfreq, delchwidth); + + /* Update the wireless stack with the new NOL. */ + dfs_nol_update(dfs); + + dfs_mlme_nol_timeout_notification(dfs->dfs_pdev_obj); + chan = utils_dfs_freq_to_chan(delfreq); + utils_dfs_deliver_event(dfs->dfs_pdev_obj, delfreq, + WLAN_EV_NOL_FINISHED); + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "remove channel %d from nol", chan); + utils_dfs_unmark_precac_nol(dfs->dfs_pdev_obj, chan); + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + &chan, 1, DFS_NOL_RESET); + utils_dfs_save_nol(dfs->dfs_pdev_obj); +} +#endif +#endif + +void dfs_print_nol(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + int i = 0; + uint32_t diff_ms, remaining_sec; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + nol = dfs->dfs_nol; + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, "NOL"); + while (nol) { + diff_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() - + nol->nol_start_ticks); + diff_ms = (nol->nol_timeout_ms - diff_ms); + remaining_sec = diff_ms / 1000; /* Convert to seconds */ + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, + "nol:%d channel=%d MHz width=%d MHz time left=%u seconds nol starttick=%llu", + i++, nol->nol_freq, + nol->nol_chwidth, + remaining_sec, + (uint64_t)nol->nol_start_ticks); + nol = nol->nol_next; + } +} + +void dfs_print_nolhistory(struct wlan_dfs *dfs) +{ + struct dfs_channel *chan_list; + int i, j; + int nchans; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + nchans = dfs_get_num_chans(); + + chan_list = qdf_mem_malloc(nchans * sizeof(*chan_list)); + if (!chan_list) + return; + + utils_dfs_get_nol_history_chan_list(dfs->dfs_pdev_obj, + (void *)chan_list, &nchans); + if (!nchans) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "zero chans"); + qdf_mem_free(chan_list); + return; + } + + for (i = 0, j = 0; i < nchans; i++, j++) + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, + "nolhistory = %d channel = %d MHz", + j, chan_list[i].dfs_ch_freq); + + qdf_mem_free(chan_list); +} + +void dfs_get_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int *nchan) +{ + struct dfs_nolelem *nol; + + *nchan = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + nol = dfs->dfs_nol; + while (nol) { + dfs_nol[*nchan].nol_freq = nol->nol_freq; + dfs_nol[*nchan].nol_chwidth = nol->nol_chwidth; + dfs_nol[*nchan].nol_start_ticks = nol->nol_start_ticks; + dfs_nol[*nchan].nol_timeout_ms = nol->nol_timeout_ms; + ++(*nchan); + nol = nol->nol_next; + } +} + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_set_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int nchan) +{ +#define TIME_IN_MS 1000 + uint32_t nol_time_lft_ms; + struct dfs_channel chan; + int i; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + for (i = 0; i < nchan; i++) { + nol_time_lft_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks() - + dfs_nol[i].nol_start_ticks); + + if (nol_time_lft_ms < dfs_nol[i].nol_timeout_ms) { + chan.dfs_ch_freq = dfs_nol[i].nol_freq; + chan.dfs_ch_flags = 0; + chan.dfs_ch_flagext = 0; + nol_time_lft_ms = + (dfs_nol[i].nol_timeout_ms - nol_time_lft_ms); + + DFS_NOL_ADD_CHAN_LOCKED(dfs, chan.dfs_ch_freq, + (nol_time_lft_ms / TIME_IN_MS)); + utils_dfs_reg_update_nol_chan_for_freq(dfs->dfs_pdev_obj, + &chan.dfs_ch_freq, + 1, DFS_NOL_SET); + } + } +#undef TIME_IN_MS + dfs_nol_update(dfs); +} +#else +#ifdef CONFIG_CHAN_NUM_API +void dfs_set_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int nchan) +{ +#define TIME_IN_MS 1000 + uint32_t nol_time_left_ms; + struct dfs_channel chan; + int i; + uint8_t chan_num; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + for (i = 0; i < nchan; i++) { + nol_time_left_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks() - + dfs_nol[i].nol_start_ticks); + + if (nol_time_left_ms < dfs_nol[i].nol_timeout_ms) { + chan.dfs_ch_freq = dfs_nol[i].nol_freq; + chan.dfs_ch_flags = 0; + chan.dfs_ch_flagext = 0; + nol_time_left_ms = + (dfs_nol[i].nol_timeout_ms - nol_time_left_ms); + + DFS_NOL_ADD_CHAN_LOCKED(dfs, chan.dfs_ch_freq, + (nol_time_left_ms / TIME_IN_MS)); + chan_num = utils_dfs_freq_to_chan(chan.dfs_ch_freq); + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + &chan_num, 1, DFS_NOL_SET); + } + } +#undef TIME_IN_MS + dfs_nol_update(dfs); +} +#endif +#endif + +void dfs_nol_addchan(struct wlan_dfs *dfs, + uint16_t freq, + uint32_t dfs_nol_timeout) +{ +#define TIME_IN_MS 1000 +#define TIME_IN_US (TIME_IN_MS * 1000) + struct dfs_nolelem *nol, *elem, *prev; + /* For now, assume all events are 20MHz wide. */ + int ch_width = 20; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + nol = dfs->dfs_nol; + prev = dfs->dfs_nol; + elem = NULL; + while (nol) { + if ((nol->nol_freq == freq) && + (nol->nol_chwidth == ch_width)) { + nol->nol_start_ticks = qdf_system_ticks(); + nol->nol_timeout_ms = dfs_nol_timeout * TIME_IN_MS; + + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "Update OS Ticks for NOL %d MHz / %d MHz", + nol->nol_freq, nol->nol_chwidth); + + qdf_timer_stop(&nol->nol_timer); + qdf_timer_mod(&nol->nol_timer, + dfs_nol_timeout * TIME_IN_MS); + return; + } + prev = nol; + nol = nol->nol_next; + } + + /* Add a new element to the NOL. */ + elem = (struct dfs_nolelem *)qdf_mem_malloc(sizeof(struct dfs_nolelem)); + if (!elem) + goto bad; + + qdf_mem_zero(elem, sizeof(*elem)); + elem->nol_dfs = dfs; + elem->nol_freq = freq; + elem->nol_chwidth = ch_width; + elem->nol_start_ticks = qdf_system_ticks(); + elem->nol_timeout_ms = dfs_nol_timeout*TIME_IN_MS; + elem->nol_next = NULL; + if (prev) { + prev->nol_next = elem; + } else { + /* This is the first element in the NOL. */ + dfs->dfs_nol = elem; + } + + qdf_timer_init(NULL, + &elem->nol_timer, dfs_remove_from_nol, + elem, QDF_TIMER_TYPE_WAKE_APPS); + + qdf_timer_mod(&elem->nol_timer, dfs_nol_timeout * TIME_IN_MS); + + /* Update the NOL counter. */ + dfs->dfs_nol_count++; + + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "new NOL channel %d MHz / %d MHz", + elem->nol_freq, elem->nol_chwidth); + return; + +bad: + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL | WLAN_DEBUG_DFS, + "failed to allocate memory for nol entry"); + +#undef TIME_IN_MS +#undef TIME_IN_US +} + +void dfs_get_nol_chfreq_and_chwidth(struct dfsreq_nolelem *dfs_nol, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index) +{ + if (!dfs_nol) + return; + + *nol_chfreq = dfs_nol[index].nol_freq; + *nol_chwidth = dfs_nol[index].nol_chwidth; +} + +void dfs_nol_update(struct wlan_dfs *dfs) +{ + struct dfsreq_nolelem *dfs_nol; + int nlen; + + if (!dfs->dfs_nol_count) { + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, "dfs_nol_count is zero"); + dfs_mlme_clist_update(dfs->dfs_pdev_obj, NULL, 0); + return; + } + + /* + * Allocate enough entries to store the NOL. At least on Linux + * (don't ask why), if you allocate a 0 entry array, the + * returned pointer is 0x10. Make sure you're aware of this + * when you start debugging. + */ + dfs_nol = (struct dfsreq_nolelem *)qdf_mem_malloc( + sizeof(struct dfsreq_nolelem) * dfs->dfs_nol_count); + + /* + * XXX TODO: if this fails, just schedule a task to retry + * updating the NOL at a later stage. That way the NOL + * update _DOES_ happen - hopefully the failure was just + * temporary. + */ + if (!dfs_nol) + return; + + DFS_GET_NOL_LOCKED(dfs, dfs_nol, &nlen); + + /* Be suitably paranoid for now. */ + if (nlen != dfs->dfs_nol_count) + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "nlen (%d) != dfs->dfs_nol_count (%d)!", + nlen, dfs->dfs_nol_count); + + /* + * Call the driver layer to have it recalculate the NOL flags + * for each driver/umac channel. If the list is empty, pass + * NULL instead of dfs_nol. The operating system may have some + * special representation for "malloc a 0 byte memory region" + * - for example, Linux 2.6.38-13 (ubuntu) returns 0x10 rather + * than a valid allocation (and is likely not NULL so the + * pointer doesn't match NULL checks in any later code. + */ + dfs_mlme_clist_update(dfs->dfs_pdev_obj, + (nlen > 0) ? dfs_nol : NULL, + nlen); + + qdf_mem_free(dfs_nol); +} + +void dfs_nol_free_list(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol = dfs->dfs_nol, *prev; + + while (nol) { + prev = nol; + nol = nol->nol_next; + qdf_mem_free(prev); + /* Update the NOL counter. */ + dfs->dfs_nol_count--; + + if (dfs->dfs_nol_count < 0) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_nol_count < 0"); + ASSERT(0); + } + } + + dfs->dfs_nol = NULL; +} + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_nol_timer_cleanup(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + uint16_t nol_freq; + + while (true) { + WLAN_DFSNOL_LOCK(dfs); + + nol = dfs->dfs_nol; + if (nol) { + dfs->dfs_nol = nol->nol_next; + dfs->dfs_nol_count--; + nol_freq = nol->nol_freq; + WLAN_DFSNOL_UNLOCK(dfs); + utils_dfs_reg_update_nol_chan_for_freq( + dfs->dfs_pdev_obj, + &nol_freq, + 1, + DFS_NOL_RESET); + + qdf_timer_free(&nol->nol_timer); + qdf_mem_free(nol); + } else { + WLAN_DFSNOL_UNLOCK(dfs); + break; + } + } +} +#else +#ifdef CONFIG_CHAN_NUM_API +void dfs_nol_timer_cleanup(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + uint8_t nol_chan; + + while (true) { + WLAN_DFSNOL_LOCK(dfs); + + nol = dfs->dfs_nol; + if (nol) { + dfs->dfs_nol = nol->nol_next; + dfs->dfs_nol_count--; + nol_chan = utils_dfs_freq_to_chan(nol->nol_freq); + WLAN_DFSNOL_UNLOCK(dfs); + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + &nol_chan, + 1, + DFS_NOL_RESET); + + qdf_timer_free(&nol->nol_timer); + qdf_mem_free(nol); + } else { + WLAN_DFSNOL_UNLOCK(dfs); + break; + } + } +} +#endif +#endif + +void dfs_nol_workqueue_cleanup(struct wlan_dfs *dfs) +{ + qdf_flush_work(&dfs->dfs_nol_elem_free_work); +} + +int dfs_get_use_nol(struct wlan_dfs *dfs) +{ + return dfs->dfs_use_nol; +} + +int dfs_get_nol_timeout(struct wlan_dfs *dfs) +{ + return dfs->wlan_dfs_nol_timeout; +} + +void dfs_getnol(struct wlan_dfs *dfs, void *dfs_nolinfo) +{ + struct dfsreq_nolinfo *nolinfo = (struct dfsreq_nolinfo *)dfs_nolinfo; + + DFS_GET_NOL_LOCKED(dfs, nolinfo->dfs_nol, &(nolinfo->dfs_ch_nchans)); +} + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_clear_nolhistory(struct wlan_dfs *dfs) +{ + struct dfs_channel *chan_list; + int nchans = 0; + bool sta_opmode; + + if (!dfs->dfs_is_stadfs_enabled) + return; + + sta_opmode = dfs_mlme_is_opmode_sta(dfs->dfs_pdev_obj); + if (!sta_opmode) + return; + + nchans = dfs_get_num_chans(); + + chan_list = qdf_mem_malloc(nchans * sizeof(*chan_list)); + if (!chan_list) + return; + + utils_dfs_get_nol_history_chan_list(dfs->dfs_pdev_obj, + (void *)chan_list, &nchans); + if (!nchans) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "zero chans"); + qdf_mem_free(chan_list); + return; + } + + utils_dfs_reg_update_nol_history_chan_for_freq(dfs->dfs_pdev_obj, + (void *)chan_list, nchans, + DFS_NOL_HISTORY_RESET); + + qdf_mem_free(chan_list); +} +#else +#ifdef CONFIG_CHAN_NUM_API +void dfs_clear_nolhistory(struct wlan_dfs *dfs) +{ + struct dfs_channel *chan_list; + int nchans = 0; + bool sta_opmode; + + if (!dfs->dfs_is_stadfs_enabled) + return; + + sta_opmode = dfs_mlme_is_opmode_sta(dfs->dfs_pdev_obj); + if (!sta_opmode) + return; + + nchans = dfs_get_num_chans(); + + chan_list = qdf_mem_malloc(nchans * sizeof(*chan_list)); + if (!chan_list) + return; + + utils_dfs_get_nol_history_chan_list(dfs->dfs_pdev_obj, + (void *)chan_list, &nchans); + if (!nchans) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "zero chans"); + qdf_mem_free(chan_list); + return; + } + + utils_dfs_reg_update_nol_history_ch(dfs->dfs_pdev_obj, + (void *)chan_list, nchans, + DFS_NOL_HISTORY_RESET); + + qdf_mem_free(chan_list); +} +#endif +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) && \ + defined(CONFIG_CHAN_FREQ_API) +void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + uint16_t freq_list[NUM_CHANNELS_160MHZ]; + int i, nchans = 0; + + nchans = dfs_get_bonding_channels_for_freq(dfs, + &dfs->dfs_radar_found_chan, + SEG_ID_PRIMARY, + DETECTOR_ID_0, + freq_list); + + WLAN_DFSNOL_LOCK(dfs); + for (i = 0; i < nchans && i < NUM_CHANNELS_160MHZ; i++) { + nol = dfs->dfs_nol; + while (nol) { + if (nol->nol_freq == freq_list[i]) { + OS_SET_TIMER(&nol->nol_timer, 0); + break; + } + nol = nol->nol_next; + } + } + WLAN_DFSNOL_UNLOCK(dfs); + + utils_dfs_reg_update_nol_chan_for_freq(dfs->dfs_pdev_obj, + freq_list, nchans, DFS_NOL_RESET); +} +#else +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) && \ + defined(CONFIG_CHAN_NUM_API) +void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + uint8_t channels[NUM_CHANNELS_160MHZ]; + int i, nchans = 0; + + nchans = dfs_get_bonding_channels(dfs, + &dfs->dfs_radar_found_chan, + SEG_ID_PRIMARY, + DETECTOR_ID_0, + channels); + + WLAN_DFSNOL_LOCK(dfs); + for (i = 0; i < nchans && i < NUM_CHANNELS_160MHZ; i++) { + nol = dfs->dfs_nol; + while (nol) { + if (nol->nol_freq == (uint16_t)utils_dfs_chan_to_freq( + channels[i])) { + OS_SET_TIMER(&nol->nol_timer, 0); + break; + } + nol = nol->nol_next; + } + } + WLAN_DFSNOL_UNLOCK(dfs); + + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + channels, nchans, DFS_NOL_RESET); +} +#endif +#endif + +void dfs_init_tmp_psoc_nol(struct wlan_dfs *dfs, uint8_t num_radios) +{ + struct dfs_soc_priv_obj *dfs_soc_obj = dfs->dfs_soc_obj; + + if (WLAN_UMAC_MAX_PDEVS < num_radios) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "num_radios (%u) exceeds limit", num_radios); + return; + } + + /* Allocate the temporary psoc NOL copy structure for the number + * of radios provided. + */ + dfs_soc_obj->dfs_psoc_nolinfo = + qdf_mem_malloc(sizeof(struct dfsreq_nolinfo) * num_radios); +} + +void dfs_deinit_tmp_psoc_nol(struct wlan_dfs *dfs) +{ + struct dfs_soc_priv_obj *dfs_soc_obj = dfs->dfs_soc_obj; + + if (!dfs_soc_obj->dfs_psoc_nolinfo) + return; + + qdf_mem_free(dfs_soc_obj->dfs_psoc_nolinfo); + dfs_soc_obj->dfs_psoc_nolinfo = NULL; +} + +void dfs_save_dfs_nol_in_psoc(struct wlan_dfs *dfs, + uint8_t pdev_id, + uint16_t low_5ghz_freq, + uint16_t high_5ghz_freq) +{ + struct dfs_soc_priv_obj *dfs_soc_obj = dfs->dfs_soc_obj; + struct dfsreq_nolinfo tmp_nolinfo, *nolinfo; + uint32_t i, num_chans = 0; + uint16_t tmp_freq; + + if (!dfs->dfs_nol_count) + return; + + if (!dfs_soc_obj->dfs_psoc_nolinfo) + return; + + nolinfo = &dfs_soc_obj->dfs_psoc_nolinfo[pdev_id]; + /* Fetch the NOL entries for the DFS object. */ + dfs_getnol(dfs, &tmp_nolinfo); + + /* nolinfo might already have some data. Do not overwrite it */ + num_chans = nolinfo->dfs_ch_nchans; + for (i = 0; i < tmp_nolinfo.dfs_ch_nchans; i++) { + tmp_freq = tmp_nolinfo.dfs_nol[i].nol_freq; + + /* Add to nolinfo only if within the pdev's frequency range. */ + if ((low_5ghz_freq < tmp_freq) && (high_5ghz_freq > tmp_freq)) { + /* Figure out the completed duration of each NOL. */ + uint32_t nol_completed_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks() - + tmp_nolinfo.dfs_nol[i].nol_start_ticks); + + nolinfo->dfs_nol[num_chans] = tmp_nolinfo.dfs_nol[i]; + /* Remember the remaining NOL time in the timeout + * variable. + */ + nolinfo->dfs_nol[num_chans++].nol_timeout_ms -= + nol_completed_ms; + } + } + + nolinfo->dfs_ch_nchans = num_chans; +} + +void dfs_reinit_nol_from_psoc_copy(struct wlan_dfs *dfs, uint8_t pdev_id) +{ + struct dfs_soc_priv_obj *dfs_soc_obj = dfs->dfs_soc_obj; + struct dfsreq_nolinfo *nol; + uint8_t i; + + if (!dfs_soc_obj->dfs_psoc_nolinfo) + return; + + if (!dfs_soc_obj->dfs_psoc_nolinfo[pdev_id].dfs_ch_nchans) + return; + + nol = &dfs_soc_obj->dfs_psoc_nolinfo[pdev_id]; + + /* The NOL timeout value in each entry points to the remaining time + * of the NOL. This is to indicate that the NOL entries are paused + * and are not left to continue. + * While adding these NOL, update the start ticks to current time + * to avoid losing entries which might have timed out during + * the pause and resume mechanism. + */ + for (i = 0; i < nol->dfs_ch_nchans; i++) + nol->dfs_nol[i].nol_start_ticks = qdf_system_ticks(); + dfs_set_nol(dfs, nol->dfs_nol, nol->dfs_ch_nchans); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_process_radar_found_ind.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_process_radar_found_ind.c new file mode 100644 index 0000000000000000000000000000000000000000..6c02b7036bc7818d17e76c582bc9ad214966ad92 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_process_radar_found_ind.c @@ -0,0 +1,1186 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: API for processing radar found indication. + * + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_process_radar_found_ind.h" +#include +#include +#include +#include +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" +/** + * TODO: The code is not according to the following description needs + * modification and correction. Code always adds left and right channels to + * NOL even if it is not a chirp radar. + * + * A) If chirp radar starts at boundary and ends at boundary then three channels + * will be affected. + * freq_offset.freq[0] = fn (Center frequency) + * freq_offset.freq[1] = fn-1 (Left of center) + * freq_offset.freq[2] = fn+1 (Right of center) + * + * Three channels, ch(n-1), ch(n)and ch(n+1) will be added to NOL. + * + * Chirp start freq Chirp end freq + * | | + * | | + * V V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + * + * B) If chirp radar starts at one channel and continues up to another channel + * then two channels will be affected. + * freq_offset.freq[0] = fn + * freq_offset.freq[1] = 0 + * freq_offset.freq[2] = fn+1 + * + * Three channels, ch(n-1), ch(n)and ch(n+1) will be added to NOL. + * + * Chirp start freq Chirp end freq + * | | + * | | + * V V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + * + * C) Radar found at boundary, two channels will be affected. + * freq_offset.freq[0] = fn + * freq_offset.freq[1] = 0 + * freq_offset.freq[2] = fn+1 + * + * Two channels, ch(n) and ch(n+1) will be added to NOL. + * + * dfs_freq_offset (radar found freq) + * | + * | + * V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + * + * + * D) Else only one channel will be affected. + * freq_offset.freq[0] = fn + * freq_offset.freq[1] = 0 + * freq_offset.freq[2] = 0 + * + * One channel ch(n) will be added to NOL. + * + * + * dfs_freq_offset (radar found freq) + * | + * | + * V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + */ + +int dfs_set_nol_subchannel_marking(struct wlan_dfs *dfs, + bool nol_subchannel_marking) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!dfs) + return -EIO; + + dfs->dfs_use_nol_subchannel_marking = nol_subchannel_marking; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "NOL subchannel marking is %s ", + (nol_subchannel_marking) ? "set" : "disabled"); + if (dfs->dfs_is_offload_enabled) + status = tgt_dfs_send_subchan_marking(dfs->dfs_pdev_obj, + nol_subchannel_marking); + + return qdf_status_to_os_return(status); +} + +int dfs_get_nol_subchannel_marking(struct wlan_dfs *dfs, + bool *nol_subchannel_marking) +{ + if (!dfs) + return -EIO; + + (*nol_subchannel_marking) = dfs->dfs_use_nol_subchannel_marking; + + return 0; +} + +/** + * dfs_radar_add_channel_list_to_nol_for_freq()- Add given channels to nol + * @dfs: Pointer to wlan_dfs structure. + * @freq_list: Pointer to list of frequency. + * @num_channels: Number of channels in the list. + * + * Add list of channels to nol, only if the channel is dfs. + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_FREQ_API +static QDF_STATUS +dfs_radar_add_channel_list_to_nol_for_freq(struct wlan_dfs *dfs, + uint16_t *freq_list, + uint8_t num_channels) +{ + int i; + uint16_t last_chan_freq = 0; + uint16_t nol_freq_list[NUM_CHANNELS_160MHZ]; + uint8_t num_ch = 0; + + if (num_channels > NUM_CHANNELS_160MHZ) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "Invalid num channels: %d", num_channels); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < num_channels; i++) { + if (freq_list[i] == 0 || + freq_list[i] == last_chan_freq) + continue; + if (!utils_is_dfs_chan_for_freq(dfs->dfs_pdev_obj, + freq_list[i])) { + dfs_info(dfs, WLAN_DEBUG_DFS, "ch=%d is not dfs, skip", + freq_list[i]); + continue; + } + last_chan_freq = freq_list[i]; + DFS_NOL_ADD_CHAN_LOCKED(dfs, + freq_list[i], + dfs->wlan_dfs_nol_timeout); + nol_freq_list[num_ch++] = last_chan_freq; + utils_dfs_deliver_event(dfs->dfs_pdev_obj, + freq_list[i], + WLAN_EV_NOL_STARTED); + dfs_info(dfs, WLAN_DEBUG_DFS_NOL, "ch=%d Added to NOL", + last_chan_freq); + } + + if (!num_ch) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "dfs channels not found in channel list"); + return QDF_STATUS_E_FAILURE; + } + + utils_dfs_reg_update_nol_chan_for_freq(dfs->dfs_pdev_obj, + nol_freq_list, num_ch, + DFS_NOL_SET); + + if (dfs->dfs_is_stadfs_enabled) + if (dfs_mlme_is_opmode_sta(dfs->dfs_pdev_obj)) + utils_dfs_reg_update_nol_history_chan_for_freq( + dfs->dfs_pdev_obj, nol_freq_list, + num_ch, DFS_NOL_HISTORY_SET); + + dfs_nol_update(dfs); + utils_dfs_save_nol(dfs->dfs_pdev_obj); + + return QDF_STATUS_SUCCESS; +} +#endif +/** + * dfs_radar_chan_for_80()- Find frequency offsets for 80MHz + * @freq_offset: freq offset + * @center_freq: center frequency + * + * Find frequency offsets for 80MHz + * + * Return: None + */ +static void dfs_radar_chan_for_80(struct freqs_offsets *freq_offset, + uint32_t center_freq) +{ + int i; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + if (freq_offset->offset[i] < DFS_OFFSET_SECOND_LOWER) + freq_offset->freq[i] = + DFS_THIRD_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_SECOND_LOWER) && + (freq_offset->offset[i] < DFS_OFFSET_FIRST_LOWER)) + freq_offset->freq[i] = + DFS_SECOND_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_FIRST_LOWER) && + (freq_offset->offset[i] < 0)) + freq_offset->freq[i] = + DFS_FIRST_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > 0) && + (freq_offset->offset[i] < DFS_OFFSET_FIRST_UPPER)) + freq_offset->freq[i] = + DFS_FIRST_UPPER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_FIRST_UPPER) && + (freq_offset->offset[i] < DFS_OFFSET_SECOND_UPPER)) + freq_offset->freq[i] = + DFS_SECOND_UPPER_CHANNEL(center_freq); + else if (freq_offset->offset[i] > DFS_OFFSET_SECOND_UPPER) + freq_offset->freq[i] = + DFS_THIRD_UPPER_CHANNEL(center_freq); + } +} + +/** + * dfs_radar_chan_for_40()- Find frequency offsets for 40MHz + * @freq_offset: freq offset + * @center_freq: center frequency + * + * Find frequency offsets for 40MHz + * + * Return: None + */ +static void dfs_radar_chan_for_40(struct freqs_offsets *freq_offset, + uint32_t center_freq) +{ + int i; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + if (freq_offset->offset[i] < DFS_OFFSET_FIRST_LOWER) + freq_offset->freq[i] = + DFS_SECOND_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_FIRST_LOWER) && + (freq_offset->offset[i] < 0)) + freq_offset->freq[i] = + DFS_FIRST_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > 0) && + (freq_offset->offset[i] < DFS_OFFSET_FIRST_UPPER)) + freq_offset->freq[i] = + DFS_FIRST_UPPER_CHANNEL(center_freq); + else if (freq_offset->offset[i] > DFS_OFFSET_FIRST_UPPER) + freq_offset->freq[i] = + DFS_SECOND_UPPER_CHANNEL(center_freq); + } +} + +/** + * dfs_radar_chan_for_20()- Find frequency offsets for 20MHz + * @freq_offset: freq offset + * @center_freq: center frequency + * + * Find frequency offsets for 20MHz + * + * Return: None + */ +static void dfs_radar_chan_for_20(struct freqs_offsets *freq_offset, + uint32_t center_freq) +{ + int i; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + if (freq_offset->offset[i] <= DFS_20MZ_OFFSET_LOWER) + freq_offset->freq[i] = + DFS_20MHZ_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_20MZ_OFFSET_LOWER) && + (freq_offset->offset[i] < DFS_20MZ_OFFSET_UPPER)) + freq_offset->freq[i] = center_freq; + else if (freq_offset->offset[i] >= DFS_20MZ_OFFSET_UPPER) + freq_offset->freq[i] = + DFS_20MHZ_UPPER_CHANNEL(center_freq); + } +} + +/* dfs_compute_radar_found_cfreq(): Computes the centre frequency of the + * radar hit channel. + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: Pointer to radar_found_info. + * @freq_center: Pointer to retrieve the value of radar found cfreq. + */ +#ifdef CONFIG_CHAN_FREQ_API +static void +dfs_compute_radar_found_cfreq(struct wlan_dfs *dfs, + struct radar_found_info *radar_found, + uint32_t *freq_center) +{ + struct dfs_channel *curchan = dfs->dfs_curchan; + + /* Radar found on agile detector ID. + * Applicable to chips that have a separate agile radar detector + * engine. + */ + if (radar_found->detector_id == AGILE_DETECTOR_ID) { + *freq_center = dfs->dfs_agile_precac_freq_mhz; + } else if (!radar_found->segment_id) { + *freq_center = curchan->dfs_ch_mhz_freq_seg1; + } else { + /* Radar found on secondary segment by the HW when + * preCAC was running. It (dfs_precac_enable) is specific to + * legacy chips. + */ + if (dfs_is_precac_timer_running(dfs) && + dfs_is_legacy_precac_enabled(dfs)) { + *freq_center = dfs->dfs_precac_secondary_freq_mhz; + } else { + /* Radar found on secondary segment by the HW, when preCAC + * was not running in legacy chips or preCAC was running + * in Lithium chips. + */ + *freq_center = curchan->dfs_ch_mhz_freq_seg2; + if (WLAN_IS_CHAN_MODE_160(curchan)) { + /* If center frequency of entire 160 band + * is less than center frequency of primary + * segment, then the center frequency of + * secondary segment is -40 of center + * frequency of entire 160 segment. + */ + if (curchan->dfs_ch_mhz_freq_seg2 < + curchan->dfs_ch_mhz_freq_seg1) + *freq_center -= + DFS_160MHZ_SECOND_SEG_OFFSET; + else + *freq_center += + DFS_160MHZ_SECOND_SEG_OFFSET; + } + } + } +} +#else +#ifdef CONFIG_CHAN_NUM_API +static void +dfs_compute_radar_found_cfreq(struct wlan_dfs *dfs, + struct radar_found_info + *radar_found, + uint32_t *freq_center) +{ + struct dfs_channel *curchan = dfs->dfs_curchan; + /* Radar found on agile detector ID. + * Applicable to chips that have a separate agile radar detector + * engine. + */ + if (radar_found->detector_id == AGILE_DETECTOR_ID) { + *freq_center = utils_dfs_chan_to_freq( + dfs->dfs_agile_precac_freq); + /* Radar found on primary segment by the HW. */ + } else if (radar_found->segment_id == PRIMARY_SEG) { + *freq_center = utils_dfs_chan_to_freq( + curchan->dfs_ch_vhtop_ch_freq_seg1); + } else { + /* Radar found on secondary segment by the HW when + * preCAC was running. It (dfs_precac_enable) is specific to + * legacy chips. + */ + if (dfs_is_precac_timer_running(dfs) && + dfs_is_legacy_precac_enabled(dfs)) { + *freq_center = utils_dfs_chan_to_freq( + dfs->dfs_precac_secondary_freq); + } else { + /* Radar found on secondary segment by the HW, when preCAC + * was not running in legacy chips or preCAC was running + * in Lithium chips. + */ + *freq_center = utils_dfs_chan_to_freq( + curchan->dfs_ch_vhtop_ch_freq_seg2); + if (WLAN_IS_CHAN_MODE_160(curchan)) { + /* If center frequency of entire 160 band + * is less than center frequency of primary + * segment, then the center frequency of + * secondary segment is -40 of center + * frequency of entire 160 segment. + */ + if (curchan->dfs_ch_vhtop_ch_freq_seg2 < + curchan->dfs_ch_vhtop_ch_freq_seg1) + *freq_center -= + DFS_160MHZ_SECOND_SEG_OFFSET; + else + *freq_center += + DFS_160MHZ_SECOND_SEG_OFFSET; + } + } + } +} +#endif +#endif + +/** + * dfs_find_radar_affected_subchans_for_freq() - Find radar affected sub chans. + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: Pointer to radar_found structure. + * @freq_list: Pointer to save radar affected channels. + * @freq_center: Freq_center of the radar affected chan. + * + * Return: Number of channels. + */ +#ifdef CONFIG_CHAN_FREQ_API +static uint8_t +dfs_find_radar_affected_subchans_for_freq(struct wlan_dfs *dfs, + struct radar_found_info *radar_found, + uint16_t *freq_list, + uint32_t freq_center) +{ + int i, j; + uint8_t num_radar_subchans; + uint32_t flag; + int32_t sidx; + uint16_t candidate_subchan_freq; + uint16_t cur_subchans[NUM_CHANNELS_160MHZ]; + uint8_t n_cur_subchans; + struct dfs_channel *curchan = dfs->dfs_curchan; + struct freqs_offsets freq_offset; + + qdf_mem_zero(&freq_offset, sizeof(freq_offset)); + flag = curchan->dfs_ch_flags; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) + freq_offset.offset[i] = radar_found->freq_offset; + + sidx = DFS_FREQ_OFFSET_TO_SIDX(radar_found->freq_offset); + + dfs_info(dfs, WLAN_DEBUG_DFS, + "seg=%d, det=%d, sidx=%d, offset=%d, chirp=%d, flag=%d, f=%d", + radar_found->segment_id, radar_found->detector_id, sidx, + radar_found->freq_offset, radar_found->is_chirp, + flag, freq_center); + + if ((WLAN_IS_CHAN_A(curchan)) || + WLAN_IS_CHAN_MODE_20(curchan)) { + if (radar_found->is_chirp || + (sidx && !(abs(sidx) % DFS_BOUNDARY_SIDX))) { + freq_offset.offset[LEFT_CH] -= DFS_CHIRP_OFFSET; + freq_offset.offset[RIGHT_CH] += DFS_CHIRP_OFFSET; + } + dfs_radar_chan_for_20(&freq_offset, freq_center); + } else if (WLAN_IS_CHAN_MODE_40(curchan)) { + if (radar_found->is_chirp || !(abs(sidx) % DFS_BOUNDARY_SIDX)) { + freq_offset.offset[LEFT_CH] -= DFS_CHIRP_OFFSET; + freq_offset.offset[RIGHT_CH] += DFS_CHIRP_OFFSET; + } + dfs_radar_chan_for_40(&freq_offset, freq_center); + } else if (WLAN_IS_CHAN_MODE_80(curchan) || + WLAN_IS_CHAN_MODE_160(curchan) || + WLAN_IS_CHAN_MODE_80_80(curchan)) { + if (radar_found->is_chirp || !(abs(sidx) % DFS_BOUNDARY_SIDX)) { + freq_offset.offset[LEFT_CH] -= DFS_CHIRP_OFFSET; + freq_offset.offset[RIGHT_CH] += DFS_CHIRP_OFFSET; + } + dfs_radar_chan_for_80(&freq_offset, freq_center); + } else { + dfs_err(dfs, WLAN_DEBUG_DFS, + "channel flag=%d is invalid", flag); + return 0; + } + + n_cur_subchans = + dfs_get_bonding_channels_for_freq(dfs, curchan, + radar_found->segment_id, + radar_found->detector_id, + cur_subchans); + + for (i = 0, num_radar_subchans = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + candidate_subchan_freq = freq_offset.freq[i]; + for (j = 0; j < n_cur_subchans; j++) { + if (cur_subchans[j] == candidate_subchan_freq) { + freq_list[num_radar_subchans++] = + candidate_subchan_freq; + dfs_info(dfs, WLAN_DEBUG_DFS, + "offset=%d, channel=%d", + num_radar_subchans, + freq_list[num_radar_subchans - 1]); + break; + } + } + } + return num_radar_subchans; +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_get_bonding_channels_without_seg_info(struct dfs_channel *chan, + uint8_t *channels) +{ + uint8_t center_chan; + uint8_t nchannels = 0; + + center_chan = chan->dfs_ch_vhtop_ch_freq_seg1; + + if (WLAN_IS_CHAN_MODE_20(chan)) { + nchannels = 1; + channels[0] = center_chan; + } else if (WLAN_IS_CHAN_MODE_40(chan)) { + nchannels = 2; + channels[0] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[1] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80(chan)) { + nchannels = 4; + channels[0] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[2] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[3] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80_80(chan)) { + nchannels = 8; + channels[0] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[2] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[3] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + center_chan = chan->dfs_ch_vhtop_ch_freq_seg2; + channels[4] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[5] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[6] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[7] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_160(chan)) { + nchannels = 8; + center_chan = chan->dfs_ch_vhtop_ch_freq_seg2; + channels[0] = center_chan - DFS_5GHZ_4TH_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_3RD_CHAN_OFFSET; + channels[2] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[3] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[4] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[5] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + channels[6] = center_chan + DFS_5GHZ_3RD_CHAN_OFFSET; + channels[7] = center_chan + DFS_5GHZ_4TH_CHAN_OFFSET; + } + + return nchannels; +} +#endif + +/* + * dfs_get_bonding_channel_without_seg_info_for_freq() - Get bonding frequency + * list. + * @chan: Pointer to dfs_channel. + * @freq_list: Pointer to frequency list. + */ +#ifdef CONFIG_CHAN_FREQ_API +uint8_t +dfs_get_bonding_channel_without_seg_info_for_freq(struct dfs_channel *chan, + uint16_t *freq_list) +{ + uint16_t center_freq; + uint8_t nchannels = 0; + + center_freq = chan->dfs_ch_mhz_freq_seg1; + + if (WLAN_IS_CHAN_MODE_20(chan)) { + nchannels = 1; + freq_list[0] = center_freq; + } else if (WLAN_IS_CHAN_MODE_40(chan)) { + nchannels = 2; + freq_list[0] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80(chan)) { + nchannels = 4; + freq_list[0] = center_freq - DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[2] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[3] = center_freq + DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80_80(chan)) { + nchannels = 8; + freq_list[0] = center_freq - DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[2] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[3] = center_freq + DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + center_freq = chan->dfs_ch_mhz_freq_seg2; + freq_list[4] = center_freq - DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[5] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[6] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[7] = center_freq + DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + } else if (WLAN_IS_CHAN_MODE_160(chan)) { + nchannels = 8; + center_freq = chan->dfs_ch_mhz_freq_seg2; + freq_list[0] = center_freq - DFS_5GHZ_4TH_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq - DFS_5GHZ_3RD_CHAN_FREQ_OFFSET; + freq_list[2] = center_freq - DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[3] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[4] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[5] = center_freq + DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[6] = center_freq + DFS_5GHZ_3RD_CHAN_FREQ_OFFSET; + freq_list[7] = center_freq + DFS_5GHZ_4TH_CHAN_FREQ_OFFSET; + } + + return nchannels; +} +#endif + +/* + * dfs_get_bonding_channels_for_freq() - Get bonding channel frequency. + * @dfs: Pointer to wlan_dfs. + * @curchan: Pointer to dfs_channel. + * @segment_id: Segment ID. + * @detector_id: Detector ID. + * @freq_list: Pointer to frequency list. + */ +#ifdef CONFIG_CHAN_FREQ_API +uint8_t dfs_get_bonding_channels_for_freq(struct wlan_dfs *dfs, + struct dfs_channel *curchan, + uint32_t segment_id, + uint8_t detector_id, + uint16_t *freq_list) +{ + uint16_t center_freq; + uint8_t nchannels = 0; + + if (detector_id == AGILE_DETECTOR_ID) + center_freq = dfs->dfs_agile_precac_freq_mhz; + else if (!segment_id) + center_freq = curchan->dfs_ch_mhz_freq_seg1; + else { + /* When precac is running "dfs_ch_vhtop_ch_freq_seg2" is + * zero and "dfs_precac_secondary_freq" holds the secondary + * frequency. + */ + if (dfs_is_precac_timer_running(dfs)) + center_freq = dfs->dfs_precac_secondary_freq_mhz; + else + center_freq = curchan->dfs_ch_mhz_freq_seg2; + } + + if (WLAN_IS_CHAN_MODE_20(curchan)) { + nchannels = 1; + freq_list[0] = center_freq; + } else if (WLAN_IS_CHAN_MODE_40(curchan)) { + nchannels = 2; + freq_list[0] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80(curchan) || + WLAN_IS_CHAN_MODE_80_80(curchan) || + detector_id == AGILE_DETECTOR_ID) { + /* If the current channel's bandwidth is 80/80+80/160Mhz, + * the corresponding agile Detector's bandwidth will be 80Mhz. + * Therefore, if radar is found on the agile detector find + * subchannels for 80Mhz bandwidth. + */ + nchannels = 4; + freq_list[0] = center_freq - DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[2] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[3] = center_freq + DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + } else if (WLAN_IS_CHAN_MODE_160(curchan)) { + nchannels = 8; + center_freq = curchan->dfs_ch_mhz_freq_seg2; + freq_list[0] = center_freq - DFS_5GHZ_4TH_CHAN_FREQ_OFFSET; + freq_list[1] = center_freq - DFS_5GHZ_3RD_CHAN_FREQ_OFFSET; + freq_list[2] = center_freq - DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[3] = center_freq - DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[4] = center_freq + DFS_5GHZ_NEXT_CHAN_FREQ_OFFSET; + freq_list[5] = center_freq + DFS_5GHZ_2ND_CHAN_FREQ_OFFSET; + freq_list[6] = center_freq + DFS_5GHZ_3RD_CHAN_FREQ_OFFSET; + freq_list[7] = center_freq + DFS_5GHZ_4TH_CHAN_FREQ_OFFSET; + } + + return nchannels; +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_get_bonding_channels(struct wlan_dfs *dfs, + struct dfs_channel *curchan, + uint32_t segment_id, + uint8_t detector_id, + uint8_t *channels) +{ + uint8_t center_chan; + uint8_t nchannels = 0; + + if (detector_id == AGILE_DETECTOR_ID) + center_chan = dfs->dfs_agile_precac_freq; + else if (!segment_id) + center_chan = curchan->dfs_ch_vhtop_ch_freq_seg1; + else { + /* When precac is running "dfs_ch_vhtop_ch_freq_seg2" is + * zero and "dfs_precac_secondary_freq" holds the secondary + * frequency in case of legacy chips. + * For chips that support a separate agile detector engine, + * "dfs_agile_precac_freq" holds the frequency that agile + * engine operates on. + * + * In case of radar detected by the HW in the secondary 80 + * channel,"dfs_ch_vhtop_ch_freq_seg2" holds the secondary + * segment center frequency in the below cases: + * 1. preCAC timer is running in chips that support separate + * agile engines. + * 2. preCAC timer is not running. + */ + if (dfs_is_precac_timer_running(dfs) && + dfs_is_legacy_precac_enabled(dfs)) + center_chan = dfs->dfs_precac_secondary_freq; + else + center_chan = curchan->dfs_ch_vhtop_ch_freq_seg2; + } + + if (WLAN_IS_CHAN_MODE_20(curchan)) { + nchannels = 1; + channels[0] = center_chan; + } else if (WLAN_IS_CHAN_MODE_40(curchan)) { + nchannels = 2; + channels[0] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[1] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80(curchan) || + WLAN_IS_CHAN_MODE_80_80(curchan) || + detector_id == AGILE_DETECTOR_ID) { + /* If the current channel's bandwidth is 80/80+80/160Mhz, + * the corresponding agile Detector's bandwidth will be 80Mhz. + * Therefore, if radar is found on the agile detector find + * subchannels for 80Mhz bandwidth. + */ + nchannels = 4; + channels[0] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[2] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[3] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_160(curchan)) { + nchannels = 8; + center_chan = curchan->dfs_ch_vhtop_ch_freq_seg2; + channels[0] = center_chan - DFS_5GHZ_4TH_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_3RD_CHAN_OFFSET; + channels[2] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[3] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[4] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[5] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + channels[6] = center_chan + DFS_5GHZ_3RD_CHAN_OFFSET; + channels[7] = center_chan + DFS_5GHZ_4TH_CHAN_OFFSET; + } + + return nchannels; +} +#endif + +static inline void dfs_reset_bangradar(struct wlan_dfs *dfs) +{ + dfs->dfs_bangradar_type = DFS_NO_BANGRADAR; +} + +int dfs_radarevent_basic_sanity(struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + if (!(dfs->dfs_seg_id == SEG_ID_SECONDARY && + dfs_is_precac_timer_running(dfs))) + if (!(WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(chan))) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "radar event on non-DFS chan"); + if (!(dfs->dfs_is_offload_enabled)) { + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + dfs_reset_bangradar(dfs); + } + return 0; + } + + return 1; +} + +/** + * dfs_send_csa_to_current_chan() - Send CSA to current channel + * @dfs: Pointer to wlan_dfs structure. + * + * For the test mode(usenol = 0), don't do a CSA; but setup the test timer so + * we get a CSA _back_ to the current operating channel. + */ +static inline void dfs_send_csa_to_current_chan(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->wlan_dfstesttimer); + dfs->wlan_dfstest = 1; + dfs->wlan_dfstest_ieeechan = dfs->dfs_curchan->dfs_ch_ieee; + dfs->wlan_dfstesttime = 1; /* 1ms */ + qdf_timer_mod(&dfs->wlan_dfstesttimer, dfs->wlan_dfstesttime); +} + +int dfs_second_segment_radar_disable(struct wlan_dfs *dfs) +{ + dfs->dfs_proc_phyerr &= ~DFS_SECOND_SEGMENT_RADAR_EN; + + return 0; +} + +/* dfs_prepare_nol_ie_bitmap: Create a Bitmap from the radar found subchannels + * to be sent along with RCSA. + * @dfs: Pointer to wlan_dfs. + * @radar_found: Pointer to radar_found_info. + * @in_sub_channels: Pointer to Sub-channels. + * @n_in_sub_channels: Number of sub-channels. + */ +#ifdef CONFIG_CHAN_FREQ_API +static void +dfs_prepare_nol_ie_bitmap_for_freq(struct wlan_dfs *dfs, + struct radar_found_info *radar_found, + uint16_t *in_sub_channels, + uint8_t n_in_sub_channels) +{ + uint16_t cur_subchans[NUM_CHANNELS_160MHZ]; + uint8_t n_cur_subchans; + uint8_t i; + uint8_t j; + uint8_t bits = 0x01; + + n_cur_subchans = + dfs_get_bonding_channels_for_freq(dfs, dfs->dfs_curchan, + radar_found->segment_id, + radar_found->detector_id, + cur_subchans); + dfs->dfs_nol_ie_bandwidth = MIN_DFS_SUBCHAN_BW; + dfs->dfs_nol_ie_startfreq = cur_subchans[0]; + + /* Search through the array list of radar affected subchannels + * to find if the subchannel in our current channel has radar hit. + * Break if found to reduce loop count. + */ + for (i = 0; i < n_cur_subchans; i++) { + for (j = 0; j < n_in_sub_channels; j++) { + if (cur_subchans[i] == in_sub_channels[j]) { + dfs->dfs_nol_ie_bitmap |= bits; + break; + } + } + bits <<= 1; + } +} +#endif + +void dfs_fetch_nol_ie_info(struct wlan_dfs *dfs, + uint8_t *nol_ie_bandwidth, + uint16_t *nol_ie_startfreq, + uint8_t *nol_ie_bitmap) +{ + if (nol_ie_bandwidth) + *nol_ie_bandwidth = dfs->dfs_nol_ie_bandwidth; + if (nol_ie_startfreq) + *nol_ie_startfreq = dfs->dfs_nol_ie_startfreq; + if (nol_ie_bitmap) + *nol_ie_bitmap = dfs->dfs_nol_ie_bitmap; +} + +void dfs_get_rcsa_flags(struct wlan_dfs *dfs, bool *is_rcsa_ie_sent, + bool *is_nol_ie_sent) +{ + if (is_rcsa_ie_sent) + *is_rcsa_ie_sent = dfs->dfs_is_rcsa_ie_sent; + if (is_nol_ie_sent) + *is_nol_ie_sent = dfs->dfs_is_nol_ie_sent; +} + +void dfs_set_rcsa_flags(struct wlan_dfs *dfs, bool is_rcsa_ie_sent, + bool is_nol_ie_sent) +{ + dfs->dfs_is_rcsa_ie_sent = is_rcsa_ie_sent; + dfs->dfs_is_nol_ie_sent = is_nol_ie_sent; +} + +static void dfs_reset_nol_ie_bitmap(struct wlan_dfs *dfs) +{ + dfs->dfs_nol_ie_bitmap = 0; +} + +#ifdef CONFIG_CHAN_FREQ_API +bool dfs_process_nol_ie_bitmap(struct wlan_dfs *dfs, uint8_t nol_ie_bandwidth, + uint16_t nol_ie_startfreq, uint8_t nol_ie_bitmap) +{ + uint8_t num_subchans; + uint8_t bits = 0x01; + uint16_t radar_subchans[NUM_CHANNELS_160MHZ]; + bool should_nol_ie_be_sent = true; + + qdf_mem_zero(radar_subchans, sizeof(radar_subchans)); + if (!dfs->dfs_use_nol_subchannel_marking) { + /* Since subchannel marking is disabled, disregard + * NOL IE and set NOL IE flag as false, so it + * can't be sent to uplink. + */ + num_subchans = + dfs_get_bonding_channels_for_freq(dfs, + dfs->dfs_curchan, + SEG_ID_PRIMARY, + DETECTOR_ID_0, + radar_subchans); + should_nol_ie_be_sent = false; + } else { + /* Add the NOL IE information in DFS structure so that RCSA + * and NOL IE can be sent to uplink if uplink exists. + */ + uint32_t frequency = (uint32_t)nol_ie_startfreq; + + dfs->dfs_nol_ie_bandwidth = nol_ie_bandwidth; + dfs->dfs_nol_ie_startfreq = nol_ie_startfreq; + dfs->dfs_nol_ie_bitmap = nol_ie_bitmap; + for (num_subchans = 0; num_subchans < NUM_CHANNELS_160MHZ; + num_subchans++) { + if (nol_ie_bitmap & bits) { + radar_subchans[num_subchans] = frequency; + } + bits <<= 1; + frequency += nol_ie_bandwidth; + } + } + + dfs_radar_add_channel_list_to_nol_for_freq(dfs, radar_subchans, + num_subchans); + return should_nol_ie_be_sent; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS dfs_process_radar_ind(struct wlan_dfs *dfs, + struct radar_found_info *radar_found) +{ + bool wait_for_csa = false; + uint16_t freq_list[NUM_CHANNELS_160MHZ]; + uint8_t num_channels; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t freq_center; + uint32_t radarfound_freq; + struct dfs_channel *dfs_curchan; + + /* Acquire a lock to avoid initiating mode switch till radar + * processing is completed. + */ + DFS_RADAR_MODE_SWITCH_LOCK(dfs); + + /* Before processing radar, check if HW mode switch is in progress. + * If in progress, defer the processing of radar event received till + * the mode switch is completed. + */ + if (dfs_is_hw_mode_switch_in_progress(dfs)) { + struct radar_found_info *radar_params = NULL; + + radar_params = qdf_mem_malloc(sizeof(*radar_params)); + if (!radar_params) + goto exit; + + /* If CAC timer is running, cancel it here rather than + * after processing to avoid handling unnecessary CAC timeouts. + */ + if (dfs->dfs_cac_timer_running) + dfs_cac_stop(dfs); + + /* If CAC timer is to be handled after mode switch and then + * we receive radar, no point in handling CAC completion. + */ + if (dfs->dfs_defer_params.is_cac_completed) + dfs->dfs_defer_params.is_cac_completed = false; + qdf_mem_copy(radar_params, radar_found, sizeof(*radar_params)); + dfs->dfs_defer_params.radar_params = radar_params; + dfs->dfs_defer_params.is_radar_detected = true; + status = QDF_STATUS_SUCCESS; + goto exit; + } + + dfs_curchan = dfs->dfs_curchan; + + if (!dfs_curchan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs->dfs_curchan is NULL"); + goto exit; + } + + /* Check if the current channel is a non DFS channel + * If the current channel is non-DFS and the radar is from Agile + * Detector we need to process it since Agile Detector has a + * different channel. + */ + if (!dfs_radarevent_basic_sanity(dfs, dfs_curchan) && + !(radar_found->detector_id == AGILE_DETECTOR_ID)) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "radar event on a non-DFS channel"); + goto exit; + } + + /* Sanity checks for radar on Agile detector */ + if (radar_found->detector_id == AGILE_DETECTOR_ID && + (!dfs_is_agile_precac_enabled(dfs) || !dfs->dfs_agile_precac_freq_mhz)) + { + dfs_err(dfs, WLAN_DEBUG_DFS, + "radar on Agile detector when ADFS is not running"); + goto exit; + } + + /* For Full Offload, FW sends segment id,freq_offset and chirp + * information and gets assigned when there is radar detect. In + * case of radartool bangradar enhanced command and real radar + * for DA and PO, we assign these information here. + */ + if (!(dfs->dfs_is_offload_enabled && dfs->dfs_radar_found_for_fo)) { + radar_found->segment_id = dfs->dfs_seg_id; + radar_found->freq_offset = dfs->dfs_freq_offset; + radar_found->is_chirp = dfs->dfs_is_chirp; + } + + dfs_compute_radar_found_cfreq(dfs, radar_found, &freq_center); + radarfound_freq = freq_center + radar_found->freq_offset; + + if (radar_found->detector_id == AGILE_DETECTOR_ID) + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Radar found on Agile detector freq=%d radar freq=%d", + freq_center, radarfound_freq); + else if (radar_found->segment_id == SEG_ID_SECONDARY) + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Radar found on second segment.Radarfound Freq=%d MHz.Secondary Chan cfreq=%d MHz.", + radarfound_freq, freq_center); + else + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Radar found on channel=%d, freq=%d MHz. Primary beaconning chan:%d, freq=%d MHz.", + utils_dfs_freq_to_chan(radarfound_freq), + radarfound_freq, dfs_curchan->dfs_ch_ieee, + dfs_curchan->dfs_ch_freq); + + utils_dfs_deliver_event(dfs->dfs_pdev_obj, radarfound_freq, + WLAN_EV_RADAR_DETECTED); + + if (!dfs->dfs_use_nol) { + dfs_reset_bangradar(dfs); + dfs_send_csa_to_current_chan(dfs); + status = QDF_STATUS_SUCCESS; + goto exit; + } + + if (dfs->dfs_bangradar_type == DFS_BANGRADAR_FOR_ALL_SUBCHANS) + num_channels = + dfs_get_bonding_channel_without_seg_info_for_freq + (dfs_curchan, freq_list); + /* BW reduction is dependent on subchannel marking */ + else if ((dfs->dfs_use_nol_subchannel_marking) && + (!(dfs->dfs_bangradar_type) || + (dfs->dfs_bangradar_type == + DFS_BANGRADAR_FOR_SPECIFIC_SUBCHANS))) + num_channels = + dfs_find_radar_affected_subchans_for_freq(dfs, + radar_found, + freq_list, + freq_center); + else + num_channels = dfs_get_bonding_channels_for_freq + (dfs, dfs_curchan, radar_found->segment_id, + radar_found->detector_id, freq_list); + + dfs_reset_bangradar(dfs); + + status = dfs_radar_add_channel_list_to_nol_for_freq(dfs, + freq_list, + num_channels); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "radar event received on invalid channel"); + goto exit; + } + + dfs->dfs_is_nol_ie_sent = false; + (dfs->is_radar_during_precac || + radar_found->detector_id == AGILE_DETECTOR_ID) ? + (dfs->dfs_is_rcsa_ie_sent = false) : + (dfs->dfs_is_rcsa_ie_sent = true); + if (dfs->dfs_use_nol_subchannel_marking) { + dfs_reset_nol_ie_bitmap(dfs); + dfs_prepare_nol_ie_bitmap_for_freq(dfs, radar_found, freq_list, + num_channels); + dfs->dfs_is_nol_ie_sent = true; + } + + /* + * If precac is running and the radar found in secondary + * VHT80 mark the channel as radar and add to NOL list. + * Otherwise random channel selection can choose this + * channel. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "found_on_second=%d is_pre=%d", + dfs->is_radar_found_on_secondary_seg, + dfs_is_precac_timer_running(dfs)); + /* + * Even if radar found on primary, we need to mark the channel as NOL + * in preCAC list. The preCAC list also maintains the current CAC + * channels as part of pre-cleared DFS. Hence call the API + * to mark channels as NOL irrespective of preCAC being enabled or not. + */ + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "%s: %d Radar found on dfs detector:%d", + __func__, __LINE__, radar_found->detector_id); + dfs_mark_precac_nol_for_freq(dfs, + dfs->is_radar_found_on_secondary_seg, + radar_found->detector_id, + freq_list, + num_channels); + /* + * This calls into the umac DFS code, which sets the umac + * related radar flags and begins the channel change + * machinery. + + * Even during precac, this API is called, but with a flag + * saying not to send RCSA, but only the radar affected subchannel + * information. + */ + + dfs_mlme_start_rcsa(dfs->dfs_pdev_obj, &wait_for_csa); + + /* If radar is found on preCAC or Agile CAC, return here since + * channel change is not required. + */ + if (radar_found->detector_id == AGILE_DETECTOR_ID) + goto exit; + if (!dfs->dfs_is_offload_enabled && + dfs->is_radar_found_on_secondary_seg) { + dfs_second_segment_radar_disable(dfs); + dfs->is_radar_found_on_secondary_seg = 0; + + if (dfs->is_radar_during_precac) { + dfs->is_radar_during_precac = 0; + goto exit; + } + } + + /* + * XXX TODO: the umac NOL code isn't used, but + * WLAN_CHAN_DFS_RADAR still gets set. Since the umac + * NOL code isn't used, that flag is never cleared. This + * needs to be fixed. See EV 105776. + */ + if (wait_for_csa) + goto exit; + + /* + * EV 129487 : We have detected radar in the channel, + * stop processing PHY error data as this can cause + * false detect in the new channel while channel + * change is in progress. + */ + + if (!dfs->dfs_is_offload_enabled) { + dfs_radar_disable(dfs); + dfs_second_segment_radar_disable(dfs); + } + + dfs_mlme_mark_dfs_for_freq(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_mhz_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); + +exit: + DFS_RADAR_MODE_SWITCH_UNLOCK(dfs); + return status; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_random_chan_sel.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_random_chan_sel.c new file mode 100644 index 0000000000000000000000000000000000000000..774006c2002e7a4e05934f2906d412fb0da876bd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_random_chan_sel.c @@ -0,0 +1,2280 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "../dfs.h" +#include "../dfs_random_chan_sel.h" +#include +#include +#include "../dfs_process_radar_found_ind.h" + +#ifdef WLAN_ENABLE_CHNL_MATRIX_RESTRICTION +/* + * TODO: At present SAP Channel leakage matrix for ch 144 + * is not available from system's team. So to play it safe + * and avoid crash if channel 144 is request, in following + * matix channel 144 is added such that it will cause code + * to avoid selecting channel 144. + * + * THESE ENTRIES SHOULD BE REPLACED WITH CORRECT VALUES AS + * PROVIDED BY SYSTEM'S TEAM. + */ + +/* channel tx leakage table - ht80 */ +struct dfs_matrix_tx_leak_info ht80_chan[] = { + {52, 5260, + {{36, 5180, 148}, {40, 5200, 199}, + {44, 5520, 193}, {48, 5240, 197}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, 153}, + {60, 5300, 137}, {64, 5320, 134}, + {100, 5500, 358}, {104, 5520, 350}, + {108, 5540, 404}, {112, 5560, 344}, + {116, 5580, 424}, {120, 5600, 429}, + {124, 5620, 437}, {128, 5640, 435}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + + {56, 5280, + {{36, 5180, 171}, {40, 5200, 178}, + {44, 5220, 171}, {48, 5240, 178}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, 280}, + {100, 5500, 351}, {104, 5520, 376}, + {108, 5540, 362}, {112, 5560, 362}, + {116, 5580, 403}, {120, 5600, 397}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {60,5300, + {{36, 5180, 156}, {40, 5200, 146}, + {44, 5220, DFS_TX_LEAKAGE_MIN}, {48, 5240, DFS_TX_LEAKAGE_MIN}, + {52, 5260, 180}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, 376}, {104, 5520, 360}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, DFS_TX_LEAKAGE_MAX}, + {116, 5580, 395}, {120, 5600, 399}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {64, 5320, + {{36, 5180, 217}, {40, 5200, 221}, + {44, 5220, DFS_TX_LEAKAGE_MIN}, {48, 5240, DFS_TX_LEAKAGE_MIN}, + {52, 5260, 176}, {56, 5280, 176}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, 384}, {104, 5520, 390}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, DFS_TX_LEAKAGE_MAX}, + {116, 5580, 375}, {120, 5600, 374}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {100, 5500, + {{36, 5180, 357}, {40, 5200, 326}, + {44, 5220, 321}, {48, 5240, 326}, + {52, 5260, 378}, {56, 5280, 396}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, 196}, {112, 5560, 116}, + {116, 5580, 166}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {104, 5520, + {{36, 5180, 325}, {40, 5200, 325}, + {44, 5220, 305}, {48, 5240, 352}, + {52, 5260, 411}, {56, 5280, 411}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, 460}, + {116, 5580, 198}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {108, 5540, + {{36,5180, 304}, {40, 5200, 332}, + {44, 5220, 310}, {48, 5240, 335}, + {52, 5260, 431}, {56, 5280, 391}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 280}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, 185}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {112,5560, + {{36, 5180, 327}, {40, 5200, 335}, + {44, 5220, 331}, {48, 5240, 345}, + {52, 5260, 367}, {56, 5280, 401}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 131}, {104, 5520, 132}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, 189}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {116, 5580, + {{36, 5180, 384}, {40, 5200, 372}, + {44, 5220, 389}, {48, 5240, 396}, + {52, 5260, 348}, {56, 5280, 336}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 172}, {104, 5520, 169}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {120, 5600, + {{36, 5180, 395}, {40, 5200, 419}, + {44, 5220, 439}, {48, 5240, 407}, + {52, 5260, 321}, {56, 5280, 334}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 134}, {104, 5520, 186}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, 159}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {124, 5620, + {{36, 5180, 469}, {40, 5200, 433}, + {44, 5220, 434}, {48, 5240, 435}, + {52, 5260, 332}, {56, 5280, 345}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 146}, {104, 5520, 177}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, 350}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, 138}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {128, 5640, + {{36, 5180, 408}, {40, 5200, 434}, + {44, 5220, 449}, {48, 5240, 444}, + {52, 5260, 341}, {56, 5280, 374}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 205}, {104, 5520, 208}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, 142}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {132, 5660, + {{36, 5180, DFS_TX_LEAKAGE_MAX}, {40, 5200, DFS_TX_LEAKAGE_MAX}, + {44, 5220, DFS_TX_LEAKAGE_MAX}, {48, 5240, DFS_TX_LEAKAGE_MAX}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {136, 5680, + {{36, 5180, DFS_TX_LEAKAGE_MAX}, {40, 5200, DFS_TX_LEAKAGE_MAX}, + {44, 5220, DFS_TX_LEAKAGE_MAX}, {48, 5240, DFS_TX_LEAKAGE_MAX}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {140, 5700, + {{36, 5180, DFS_TX_LEAKAGE_MAX}, {40, 5200, DFS_TX_LEAKAGE_MAX}, + {44, 5220, DFS_TX_LEAKAGE_MAX}, {48, 5240, DFS_TX_LEAKAGE_MAX}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {144, 5720, + {{36, 5180, DFS_TX_LEAKAGE_MAX}, {40, 5200, DFS_TX_LEAKAGE_MAX}, + {44, 5220, DFS_TX_LEAKAGE_MAX}, {48, 5240, DFS_TX_LEAKAGE_MAX}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, +}; + +/* channel tx leakage table - ht40 */ +struct dfs_matrix_tx_leak_info ht40_chan[] = { + {52, 5260, + {{36, 5180, DFS_TX_LEAKAGE_AUTO_MIN}, {40, 5200, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, 5220, 230}, {48, 5240, 230}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, DFS_TX_LEAKAGE_AUTO_MIN}, {64, 5320, DFS_TX_LEAKAGE_AUTO_MIN}, + {100, 5500, 625}, {104, 5520, 323}, + {108, 5540, 646}, {112, 5560, 646}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {56, 5280, + {{36, 5180, DFS_TX_LEAKAGE_AUTO_MIN}, {40, 5200, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, 5220, DFS_TX_LEAKAGE_AUTO_MIN}, {48, 5240, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, 611}, {104, 5520, 611}, + {108, 5540, 617}, {112, 5560, 617}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {60, 5300, + {{36, 5180, DFS_TX_LEAKAGE_AUTO_MIN}, {40, 5200, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, 5220, DFS_TX_LEAKAGE_AUTO_MIN}, {48, 5240, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 5260, 190}, {56, 5280, 190}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, 608}, {104, 5520, 608}, + {108, 5540, 623}, {112, 5560, 623}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {64, 5320, + {{36, 5180, DFS_TX_LEAKAGE_AUTO_MIN}, {40, 5200, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, 5220, DFS_TX_LEAKAGE_AUTO_MIN}, {48, 5240, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 5260, 295}, {56, 5280, 295}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, 594}, {104, 5520, 594}, + {108, 5540, 625}, {112, 5560, 625}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {100, 5500, + {{36, 5180, 618}, {40, 5200, 618}, + {44, 5220, 604}, {48, 5240, 604}, + {52, 5260, 596}, {56, 5280, 596}, + {60, 5300, 584}, {64, 5320, 584}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, 299}, {112, 5560, 299}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, 538}, {136,5680, 538}, + {140, 5700, 598}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {104, 5520, + {{36, 5180, 636}, {40, 5200, 636}, + {44, 5220, 601}, {48, 5240, 601}, + {52, 5260, 616}, {56, 5280, 616}, + {60, 5300, 584}, {64, 5320, 584}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, 553}, {136, 5680, 553}, + {140, 5700, 568}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {108, 5540, + {{36, 5180, 600}, {40, 5200, 600}, + {44, 5220, 627}, {48, 5240, 627}, + {52, 5260, 611}, {56, 5280, 611}, + {60, 5300, 611}, {64, 5320, 611}, + {100, 5500, 214}, {104, 5520, 214}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, DFS_TX_LEAKAGE_AUTO_MIN}, {136, 5680, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 5700, 534}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {112, 5560, + {{36, 5180, 645}, {40, 5200, 645}, + {44, 5220, 641}, {48, 5240, 641}, + {52, 5260, 618}, {56, 5280, 618}, + {60, 5300, 612}, {64, 5320, 612}, + {100, 5500, 293}, {104, 5520, 293}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, DFS_TX_LEAKAGE_AUTO_MIN}, {136, 5680, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 5700, 521}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {116, 5580, + {{36, 5180, 661}, {40, 5200, 661}, + {44, 5220, 624}, {48, 5240, 624}, + {52, 5260, 634}, {56, 5280, 634}, + {60, 5300, 611}, {64, 5320, 611}, + {100, 5500, DFS_TX_LEAKAGE_AUTO_MIN}, {104, 5520, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, 5540, 217}, {112, 5560, 217}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, DFS_TX_LEAKAGE_AUTO_MIN}, {136, 5680, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 5700, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {120, 5600, + {{36, 5180, 667}, {40, 5200, 667}, + {44, 5220, 645}, {48, 5240, 645}, + {52, 5260, 633}, {56, 5280, 633}, + {60, 5300, 619}, {64, 5320, 619}, + {100, 5500, DFS_TX_LEAKAGE_AUTO_MIN}, {104, 5520, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, 5540, 291}, {112, 5560, 291}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_AUTO_MIN}, {136, 5680, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 5700, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {124, 5620, + {{36, 5180, 676}, {40, 5200, 676}, + {44, 5220, 668}, {48, 5240, 668}, + {52, 5260, 595}, {56, 5280, 595}, + {60, 5300, 622}, {64, 5320, 622}, + {100, 5500, DFS_TX_LEAKAGE_AUTO_MIN}, {104, 5520, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, 5540, DFS_TX_LEAKAGE_AUTO_MIN}, {112, 5560, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 5580, 225}, {120, 5600, 225}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_AUTO_MIN}, {136, 5680, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 5700, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {128, 5640, + {{36, 5180, 678}, {40, 5200, 678}, + {44, 5220, 664}, {48, 5240, 664}, + {52, 5260, 651}, {56, 5280, 651}, + {60, 5300, 643}, {64, 5320, 643}, + {100, 5500, DFS_TX_LEAKAGE_AUTO_MIN}, {104, 5520, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, 5540, DFS_TX_LEAKAGE_AUTO_MIN}, {112, 5560, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 5580, 293}, {120, 5600, 293}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {132, 5660, + {{36, 5180, 689}, {40, 5200, 689}, + {44, 5220, 669}, {48, 5240, 669}, + {52, 5260, 662}, {56, 5280, 662}, + {60, 5300, 609}, {64, 5320, 609}, + {100, 5500, 538}, {104, 5520, 538}, + {108, 5540, DFS_TX_LEAKAGE_AUTO_MIN}, {112, 5560, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, 247}, {128, 5640, 247}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {136, 5680, + {{36, 5180, 703}, {40, 5200, 703}, + {44, 5220, 688}, {48, 5240, DFS_TX_LEAKAGE_MIN}, + {52, 5260, 671}, {56, 5280, 671}, + {60, 5300, 658}, {64, 5320, 658}, + {100, 5500, 504}, {104, 5520, 504}, + {108, 5540, DFS_TX_LEAKAGE_AUTO_MIN}, {112, 5560, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, 289}, {128, 5640, 289}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {140, 5700, + {{36, 5180, 695}, {40, 5200, 695}, + {44, 5220, 684}, {48, 5240, 684}, + {52, 5260, 664}, {56, 5280, 664}, + {60, 5300, 658}, {64, 5320, 658}, + {100, 5500, 601}, {104, 5520, 601}, + {108, 5540, 545}, {112, 5560, 545}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, 262}, {136, 5680, 262}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {144, 5720, + {{36, 5180, 695}, {40, 5200, 695}, + {44, 5220, 684}, {48, 5240, 684}, + {52, 5260, 664}, {56, 5280, 664}, + {60, 5300, 658}, {64, 5320, 658}, + {100, 5500, 601}, {104, 5520, 601}, + {108, 5540, 545}, {112, 5560, 545}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 5620, DFS_TX_LEAKAGE_AUTO_MIN}, {128, 5640, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 5660, 262}, {136, 5680, 262}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, +}; + +/* channel tx leakage table - ht20 */ +struct dfs_matrix_tx_leak_info ht20_chan[] = { + {52, 5260, + {{36, 5180,DFS_TX_LEAKAGE_AUTO_MIN}, {40, 5200, 286}, + {44, 5220, 225}, {48,5240, 121}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, 300}, {64, 5320, DFS_TX_LEAKAGE_AUTO_MIN}, + {100, 5500, 637}, {104, 5520, DFS_TX_LEAKAGE_MAX}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, DFS_TX_LEAKAGE_MAX}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {56, 5280, + {{36, 5180, 468}, {40, 5200, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, 5220, DFS_TX_LEAKAGE_AUTO_MIN}, {48, 5240, 206}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, DFS_TX_LEAKAGE_MAX}, {104, 5520, DFS_TX_LEAKAGE_MAX}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, DFS_TX_LEAKAGE_MAX}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {60, 5300, + {{36, 5180, 507}, {40, 5200, 440}, + {44, 5220, DFS_TX_LEAKAGE_AUTO_MIN}, {48,5240, 313}, + {52, 5260, DFS_TX_LEAKAGE_MIN}, {56, 5280, DFS_TX_LEAKAGE_MIN}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, DFS_TX_LEAKAGE_MAX}, {104, 5520, DFS_TX_LEAKAGE_MAX}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, DFS_TX_LEAKAGE_MAX}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {64, 5320 , + {{36, 5180, 516}, {40, 5200, 520}, + {44, 5220, 506}, {48, 5240,DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 5260, 301}, {56, 5280, 258}, + {60, 5300, DFS_TX_LEAKAGE_MIN}, {64, 5320, DFS_TX_LEAKAGE_MIN}, + {100, 5500, 620}, {104, 5520, 617}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, DFS_TX_LEAKAGE_MAX}, + {116, 5580, DFS_TX_LEAKAGE_MAX}, {120, 5600, DFS_TX_LEAKAGE_MAX}, + {124, 5620, DFS_TX_LEAKAGE_MAX}, {128, 5640, DFS_TX_LEAKAGE_MAX}, + {132, 5660, DFS_TX_LEAKAGE_MAX}, {136, 5680, DFS_TX_LEAKAGE_MAX}, + {140, 5700, DFS_TX_LEAKAGE_MAX}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {100, 5500, + {{36, 5180, 616}, {40, 5200, 601}, + {44, 5220, 604}, {48, 5240, 589}, + {52, 5260, 612}, {56, 5280, 592}, + {60, 5300, 590}, {64, 5320, 582}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, 131}, + {108, 5540, DFS_TX_LEAKAGE_AUTO_MIN}, {112, 5560, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 5580, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 5600, 522}, + {124, 5620, 571}, {128, 5640, 589}, + {132, 5660, 593}, {136, 5680, 598}, + {140, 5700, 594}, + {144, 5720, DFS_TX_LEAKAGE_MIN}, + } }, + + {104, 5520, + {{36, 5180, 622}, {40, 5200, 624}, + {44, 5220, 618}, {48, 5240, 610}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, DFS_TX_LEAKAGE_MIN}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, 463}, + {116, 5580, 483}, {120, 5600, 503}, + {124, 5620, 523}, {128, 5640, 565}, + {132, 5660, 570}, {136, 5680, 588}, + {140, 5700, 585}, + {144, 5720, DFS_TX_LEAKAGE_MIN}, + } }, + + {108, 5540, + {{36, 5180, 620}, {40, 5200, 638}, + {44, 5220, 611}, {48, 5240, 614}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 477}, {104, 5520, DFS_TX_LEAKAGE_MIN}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, 477}, {120, 5600, 497}, + {124, 5620, 517}, {128, 5640, 537}, + {132, 5660, 557}, {136, 5680, 577}, + {140, 5700, 603}, + {144, 5720, DFS_TX_LEAKAGE_MIN}, + } }, + + {112, 5560, + {{36, 5180, 636}, {40, 5200, 623}, + {44, 5220, 638}, {48, 5240, 628}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, 606}, + {100, 5500, 501}, {104, 5520, 481}, + {108, 5540, DFS_TX_LEAKAGE_MIN}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, 481}, + {124, 5620, 501}, {128, 5640, 421}, + {132, 5660, 541}, {136, 5680, 561}, + {140, 5700, 583}, + {144, 5720, DFS_TX_LEAKAGE_MIN}, + } }, + + {116, 5580, + {{36, 5180, 646}, {40, 5200, 648}, + {44, 5220, 633}, {48, 5240, 634}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, 615}, {64, 5320, 594}, + {100, 5500, 575}, {104, 5520, 554}, + {108, 5540, 534}, {112, 5560, DFS_TX_LEAKAGE_MIN}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, 534}, {136, 5680, 554}, + {140, 5700, 574}, + {144, 5720, DFS_TX_LEAKAGE_MIN}, + } }, + + {120, 5600, + {{36, 5180, 643}, {40, 5200, 649}, + {44, 5220, 654}, {48, 5240, 629}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, 621}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 565}, {104, 5520, 545}, + {108, 5540, 525}, {112, 5560, 505}, + {116, 5580, DFS_TX_LEAKAGE_MIN}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, 505}, + {132, 5660, 525}, {136, 5680, 545}, + {140, 5700, 565}, + {144, 5720, DFS_TX_LEAKAGE_MIN}, + } }, + + {124, 5620, + {{36, 5180, 638}, {40, 5200, 657}, + {44, 5220, 663}, {48, 5240, 649}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 581}, {104, 5520, 561}, + {108, 5540, 541}, {112, 5560, 521}, + {116, 5580, 499}, {120, 5600, DFS_TX_LEAKAGE_MIN}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, 499}, {136, 5680, 519}, + {140, 5700, 539}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {128, 5640, + {{36, 5180, 651}, {40, 5200, 651}, + {44, 5220, 674}, {48, 5240, 640}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, 603}, {104, 5520, 560}, + {108, 5540, 540}, {112, 5560, 520}, + {116, 5580, 499}, {120, 5600, 479}, + {124, 5620, DFS_TX_LEAKAGE_MIN}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, 479}, + {140, 5700, 499}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {132, 5660, + {{36, 5180, 643}, {40, 5200, 668}, + {44, 5220, 651}, {48, 5240, 657}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, DFS_TX_LEAKAGE_MAX}, {104, 5520, 602}, + {108, 5540, 578}, {112,5560, 570}, + {116, 5580, 550}, {120, 5600, 530}, + {124, 5620, 510}, {128, 5640, DFS_TX_LEAKAGE_MIN}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, 490}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {136,5680, + {{36, 5180, 654}, {40, 5200, 667}, + {44, 5220, 666}, {48, 5240, 642}, + {52, 5260, DFS_TX_LEAKAGE_MAX}, {56, 5280, DFS_TX_LEAKAGE_MAX}, + {60, 5300, DFS_TX_LEAKAGE_MAX}, {64, 5320, DFS_TX_LEAKAGE_MAX}, + {100, 5500, DFS_TX_LEAKAGE_MAX}, {104, 5520, DFS_TX_LEAKAGE_MAX}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, 596}, + {116, 5580, 555}, {120, 5600, 535}, + {124, 5620, 515}, {128, 5640, 495}, + {132, 5660, DFS_TX_LEAKAGE_MIN}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {140,5700, + {{36, 5180, 679}, {40, 5200, 673}, + {44, 5220, 667}, {48, 5240, 656}, + {52, 5260, 634}, {56, 5280, 663}, + {60, 5300, 662}, {64, 5320, 660}, + {100, 5500, DFS_TX_LEAKAGE_MAX}, {104, 5520, DFS_TX_LEAKAGE_MAX}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, 590}, + {116, 5580, 573}, {120, 5600, 553}, + {124, 5620, 533}, {128, 5640, 513}, + {132, 5660, 490}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, + + {144,5720, + {{36, 5180, 679}, {40, 5200, 673}, + {44, 5220, 667}, {48, 5240, 656}, + {52, 5260, 634}, {56, 5280, 663}, + {60, 5300, 662}, {64, 5320, 660}, + {100, 5500, DFS_TX_LEAKAGE_MAX}, {104, 5520, DFS_TX_LEAKAGE_MAX}, + {108, 5540, DFS_TX_LEAKAGE_MAX}, {112, 5560, 590}, + {116, 5580, 573}, {120, 5600, 553}, + {124, 5620, 533}, {128, 5640, 513}, + {132, 5660, 490}, {136, 5680, DFS_TX_LEAKAGE_MIN}, + {140, 5700, DFS_TX_LEAKAGE_MIN}, + {144, 5720, DFS_TX_LEAKAGE_MIN} + } }, +}; + +/* + * dfs_find_target_channel_in_channel_matrix() - finds the leakage matrix + * @ch_width: target channel width + * @NOL_channel: the NOL channel whose leakage matrix is required + * @pTarget_chnl_mtrx: pointer to target channel matrix returned. + * + * This function gives the leakage matrix for given NOL channel and ch_width + * + * Return: TRUE or FALSE + */ +#ifdef CONFIG_CHAN_NUM_API +static bool +dfs_find_target_channel_in_channel_matrix(enum phy_ch_width ch_width, + uint8_t NOL_channel, + struct dfs_tx_leak_info **pTarget_chnl_mtrx) +{ + struct dfs_tx_leak_info *target_chan_matrix = NULL; + struct dfs_matrix_tx_leak_info *pchan_matrix = NULL; + uint32_t nchan_matrix; + int i = 0; + + switch (ch_width) { + case CH_WIDTH_20MHZ: + /* HT20 */ + pchan_matrix = ht20_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht20_chan); + break; + case CH_WIDTH_40MHZ: + /* HT40 */ + pchan_matrix = ht40_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht40_chan); + break; + case CH_WIDTH_80MHZ: + /* HT80 */ + pchan_matrix = ht80_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht80_chan); + break; + default: + /* handle exception and fall back to HT20 table */ + pchan_matrix = ht20_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht20_chan); + break; + } + + for (i = 0; i < nchan_matrix; i++) { + /* find the SAP channel to map the leakage matrix */ + if (NOL_channel == pchan_matrix[i].channel) { + target_chan_matrix = pchan_matrix[i].chan_matrix; + break; + } + } + + if (!target_chan_matrix) { + return false; + } else { + *pTarget_chnl_mtrx = target_chan_matrix; + return true; + } +} +#endif + +/* + * dfs_find_target_channel_in_channel_matrix_for_freq() - finds the leakage + * matrix. + * @chan_width: target channel width + * @nol_channel: the NOL channel frequency whose leakage matrix is required + * @pTarget_chnl_mtrx: pointer to target channel matrix returned. + * + * This function gives the leakage matrix for given NOL channel and ch_width + * + * Return: TRUE or FALSE + */ +#ifdef CONFIG_CHAN_FREQ_API +static bool +dfs_find_target_channel_in_channel_matrix_for_freq(enum phy_ch_width chan_width, + uint16_t nol_freq, + struct dfs_tx_leak_info + **pTarget_chnl_mtrx) +{ + struct dfs_tx_leak_info *target_chan_matrix = NULL; + struct dfs_matrix_tx_leak_info *pchan_matrix = NULL; + uint32_t nchan_matrix; + int i = 0; + + switch (chan_width) { + case CH_WIDTH_20MHZ: + /* HT20 */ + pchan_matrix = ht20_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht20_chan); + break; + case CH_WIDTH_40MHZ: + /* HT40 */ + pchan_matrix = ht40_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht40_chan); + break; + case CH_WIDTH_80MHZ: + /* HT80 */ + pchan_matrix = ht80_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht80_chan); + break; + default: + /* handle exception and fall back to HT20 table */ + pchan_matrix = ht20_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht20_chan); + break; + } + + for (i = 0; i < nchan_matrix; i++) { + /* find the SAP channel to map the leakage matrix */ + if (nol_freq == pchan_matrix[i].channel_freq) { + target_chan_matrix = pchan_matrix[i].chan_matrix; + break; + } + } + + if (!target_chan_matrix) { + return false; + } else { + *pTarget_chnl_mtrx = target_chan_matrix; + return true; + } +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS +dfs_mark_leaking_ch(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + struct dfs_tx_leak_info *target_chan_matrix = NULL; + uint32_t num_channel = (CHAN_ENUM_5720 - CHAN_ENUM_5180) + 1; + uint32_t j = 0; + uint32_t k = 0; + uint8_t dfs_nol_channel; + struct dfs_nolelem *nol; + + nol = dfs->dfs_nol; + while (nol) { + dfs_nol_channel = wlan_freq_to_chan(nol->nol_freq); + if (false == dfs_find_target_channel_in_channel_matrix( + ch_width, dfs_nol_channel, + &target_chan_matrix)) { + /* + * should never happen, we should always find a table + * here, if we don't, need a fix here! + */ + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Couldn't find target channel matrix!"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* + * following is based on assumption that both temp_ch_lst + * and target channel matrix are in increasing order of + * ch_id + */ + for (j = 0, k = 0; j < temp_ch_lst_sz && k < num_channel;) { + if (temp_ch_lst[j] == 0) { + j++; + continue; + } + if (target_chan_matrix[k].leak_chan != temp_ch_lst[j]) { + k++; + continue; + } + /* + * check leakage from candidate channel + * to NOL channel + */ + if (target_chan_matrix[k].leak_lvl <= + dfs->tx_leakage_threshold) { + /* + * candidate channel will have + * bad leakage in NOL channel, + * remove from temp list + */ + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "dfs: channel: %d will have bad leakage due to channel: %d\n", + dfs_nol_channel, temp_ch_lst[j]); + temp_ch_lst[j] = 0; + } + j++; + k++; + } + nol = nol->nol_next; + } /* end of loop that selects each NOL */ + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +#define END_CHAN_INDEX CHAN_ENUM_5720 +#define START_CHAN_INDEX CHAN_ENUM_5180 +QDF_STATUS +dfs_mark_leaking_chan_for_freq(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_chan_lst_sz, + uint16_t *temp_freq_lst) +{ + struct dfs_tx_leak_info *target_chan_matrix = NULL; + uint32_t num_channel = (END_CHAN_INDEX - START_CHAN_INDEX) + 1; + uint32_t j = 0; + uint32_t k = 0; + struct dfs_nolelem *nol; + + nol = dfs->dfs_nol; + while (nol) { + if (false == dfs_find_target_channel_in_channel_matrix_for_freq( + ch_width, nol->nol_freq, + &target_chan_matrix)) { + /* + * should never happen, we should always find a table + * here, if we don't, need a fix here! + */ + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Couldn't find target channel matrix!"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* + * following is based on assumption that both temp_freq_lst + * and target channel matrix are in increasing order of + * ch_id + */ + for (j = 0, k = 0; j < temp_chan_lst_sz && k < num_channel;) { + if (temp_freq_lst[j] == 0) { + j++; + continue; + } + if (target_chan_matrix[k].leak_chan_freq != + temp_freq_lst[j]) { + k++; + continue; + } + /* + * check leakage from candidate channel + * to NOL channel + */ + if (target_chan_matrix[k].leak_lvl <= + dfs->tx_leakage_threshold) { + /* + * candidate channel will have + * bad leakage in NOL channel, + * remove from temp list + */ + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "dfs: channel: %d will have bad leakage due to channel: %d\n", + nol->nol_freq, temp_freq_lst[j]); + temp_freq_lst[j] = 0; + } + j++; + k++; + } + nol = nol->nol_next; + } /* end of loop that selects each NOL */ + + return QDF_STATUS_SUCCESS; +} +#endif +#else +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS +dfs_mark_leaking_ch(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +dfs_mark_leaking_chan_for_freq(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_chan_lst_sz, + uint16_t *temp_freq_lst) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif + +/** + * dfs_populate_80mhz_available_channels()- Populate channels for 80MHz using + * bitmap + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @avail_freq_list: prepared channel list + * + * Prepare 80MHz channels from the bitmap. + * + * Return: channel count + */ +#ifdef CONFIG_CHAN_NUM_API +static uint8_t dfs_populate_80mhz_available_channels( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t *avail_chnl) +{ + uint8_t i = 0; + uint8_t chnl_count = 0; + uint8_t start_chan = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan = bitmap->chan_bonding_set[i].start_chan; + if (bitmap->chan_bonding_set[i].chan_map == + DFS_80MHZ_MASK) { + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 0); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 1); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 2); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 3); + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel count %d", chnl_count); + + return chnl_count; +} +#endif + +/* + * dfs_populate_80mhz_available_channel_for_freq() - Populate 80MHZ channels + * available for selection. + * @dfs: Pointer to wlan_dfs. + * @bitmap: Pointer to bonding channel bitmap. + * @avail_freq_list: Pointer to frequency list of available channels. + */ +#ifdef CONFIG_CHAN_FREQ_API +static uint8_t dfs_populate_80mhz_available_channel_for_freq( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint16_t *avail_freq_list) +{ + uint8_t i = 0; + uint8_t chnl_count = 0; + uint16_t start_chan_freq = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan_freq = bitmap->chan_bonding_set[i].start_chan_freq; + if (bitmap->chan_bonding_set[i].chan_map == + DFS_80MHZ_MASK) { + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 0); + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 1); + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 2); + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 3); + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel count %d", chnl_count); + + return chnl_count; +} +#endif + +/** + * dfs_populate_40mhz_available_channels()- Populate channels for 40MHz using + * bitmap + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @avail_chnl: prepared channel list + * + * Prepare 40MHz channels from the bitmap. + * + * Return: channel count + */ +#ifdef CONFIG_CHAN_NUM_API +static uint8_t dfs_populate_40mhz_available_channels( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t *avail_chnl) +{ + uint8_t i = 0; + uint8_t chnl_count = 0; + uint8_t start_chan = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan = bitmap->chan_bonding_set[i].start_chan; + if ((bitmap->chan_bonding_set[i].chan_map & + DFS_40MHZ_MASK_L) == DFS_40MHZ_MASK_L) { + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 0); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 1); + } + if ((bitmap->chan_bonding_set[i].chan_map & + DFS_40MHZ_MASK_H) == DFS_40MHZ_MASK_H) { + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 2); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 3); + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel count %d", chnl_count); + + return chnl_count; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +static uint8_t +dfs_populate_40mhz_available_channel_for_freq(struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bmap, + uint16_t *avail_freq_list) +{ + uint8_t i = 0; + uint8_t chnl_count = 0; + uint16_t start_chan_freq = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan_freq = bmap->chan_bonding_set[i].start_chan_freq; + if ((bmap->chan_bonding_set[i].chan_map & + DFS_40MHZ_MASK_L) == DFS_40MHZ_MASK_L) { + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 0); + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 1); + } + if ((bmap->chan_bonding_set[i].chan_map & + DFS_40MHZ_MASK_H) == DFS_40MHZ_MASK_H) { + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 2); + avail_freq_list[chnl_count++] = start_chan_freq + + (DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET * 3); + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel count %d", chnl_count); + + return chnl_count; +} +#endif + +/** + * dfs_populate_available_channels()- Populate channels based on width and + * bitmap + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @ch_width: channel width + * @avail_chnl: prepared channel list + * + * Prepare channel list based on width and channel bitmap. + * + * Return: channel count + */ +#ifdef CONFIG_CHAN_NUM_API +static uint8_t dfs_populate_available_channels( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t ch_width, + uint8_t *avail_chnl) +{ + switch (ch_width) { + case DFS_CH_WIDTH_160MHZ: + case DFS_CH_WIDTH_80P80MHZ: + case DFS_CH_WIDTH_80MHZ: + return dfs_populate_80mhz_available_channels( + dfs, bitmap, avail_chnl); + case DFS_CH_WIDTH_40MHZ: + return dfs_populate_40mhz_available_channels( + dfs, bitmap, avail_chnl); + default: + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid ch_width %d", ch_width); + break; + } + + return 0; +} +#endif + +/** + * dfs_populate_available_channel_for_freq()- Populate channels based on width + * and bitmap. + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @chan_width: channel width + * @avail_freq_list: prepared channel list + * + * Prepare channel list based on width and channel bitmap. + * + * Return: channel count + */ +#ifdef CONFIG_CHAN_FREQ_API +static uint8_t +dfs_populate_available_channel_for_freq(struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t chan_width, + uint16_t *freq_list) +{ + switch (chan_width) { + case DFS_CH_WIDTH_160MHZ: + case DFS_CH_WIDTH_80P80MHZ: + case DFS_CH_WIDTH_80MHZ: + return dfs_populate_80mhz_available_channel_for_freq(dfs, + bitmap, + freq_list); + case DFS_CH_WIDTH_40MHZ: + return dfs_populate_40mhz_available_channel_for_freq(dfs, + bitmap, + freq_list); + default: + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid chan_width %d", chan_width); + break; + } + + return 0; +} +#endif + +/** + * dfs_get_rand_from_lst()- Get random channel from a given channel list + * @dfs: Pointer to DFS structure. + * @ch_lst: channel list + * @num_ch: number of channels + * + * Get random channel from given channel list. + * + * Return: channel number + */ +#ifdef CONFIG_CHAN_NUM_API +static uint8_t dfs_get_rand_from_lst( + struct wlan_dfs *dfs, + uint8_t *ch_lst, + uint8_t num_ch) +{ + uint8_t i; + uint32_t rand_byte = 0; + + if (!num_ch || !ch_lst) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "invalid param ch_lst %pK, num_ch = %d", + ch_lst, num_ch); + return 0; + } + + get_random_bytes((uint8_t *)&rand_byte, 1); + i = (rand_byte + qdf_mc_timer_get_system_ticks()) % num_ch; + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "random channel %d", ch_lst[i]); + + return ch_lst[i]; +} +#endif + +/** + * dfs_get_rand_from_lst_for_freq()- Get random channel from a given channel + * list. + * @dfs: Pointer to DFS structure. + * @freq_lst: Frequency list + * @num_chan: number of channels + * + * Get random channel from given channel list. + * + * Return: channel frequency. + */ + +#ifdef CONFIG_CHAN_FREQ_API +static uint16_t dfs_get_rand_from_lst_for_freq(struct wlan_dfs *dfs, + uint16_t *freq_lst, + uint8_t num_chan) +{ + uint8_t i; + uint32_t rand_byte = 0; + + if (!num_chan || !freq_lst) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "invalid param freq_lst %pK, num_chan = %d", + freq_lst, num_chan); + return 0; + } + + get_random_bytes((uint8_t *)&rand_byte, 1); + i = (rand_byte + qdf_mc_timer_get_system_ticks()) % num_chan; + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "random channel %d", freq_lst[i]); + + return freq_lst[i]; +} +#endif + +/** + * dfs_random_channel_sel_set_bitmap()- Set channel bit in bitmap based + * on given channel number + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @channel: channel number + * + * Set channel bit in bitmap based on given channel number. + * + * Return: None + */ +#ifdef CONFIG_CHAN_NUM_API +static void dfs_random_channel_sel_set_bitmap( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t channel) +{ + int i = 0; + int start_chan = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan = bitmap->chan_bonding_set[i].start_chan; + if (channel >= start_chan && channel <= start_chan + 12) { + bitmap->chan_bonding_set[i].chan_map |= + (1 << ((channel - start_chan) / + DFS_80_NUM_SUB_CHANNEL)); + return; + } + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Channel=%d is not in the bitmap", channel); +} +#endif + +/** + * dfs_random_channel_sel_set_bitmap()- Set channel bit in bitmap based + * on given channel number + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @chan_freq: channel frequency + * + * Set channel bit in bitmap based on given channel frequency. + * + * Return: None + */ +#ifdef CONFIG_CHAN_FREQ_API +#define FREQUENCY_BAND_LIMIT 60 +static void +dfs_random_channel_sel_set_bitmap_for_freq(struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint16_t chan_freq) +{ + int i = 0; + int start_chan_freq = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan_freq = bitmap->chan_bonding_set[i].start_chan_freq; + if (chan_freq >= start_chan_freq && + chan_freq <= start_chan_freq + + FREQUENCY_BAND_LIMIT) { + bitmap->chan_bonding_set[i].chan_map |= + (1 << ((chan_freq - start_chan_freq) / + DFS_80_NUM_SUB_CHANNEL_FREQ)); + return; + } + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Frequency=%d is not in the bitmap", chan_freq); +} +#endif + +/** + * dfs_find_ch_with_fallback()- find random channel + * @dfs: Pointer to DFS structure. + * @ch_wd: channel width + * @center_freq_seg1: center frequency of secondary segment. + * @ch_lst: list of available channels. + * @num_ch: number of channels in the list. + * + * Find random channel based on given channel width and channel list, + * fallback to lower width if requested channel width not available. + * + * Return: channel number + */ +#ifdef CONFIG_CHAN_NUM_API +static uint8_t dfs_find_ch_with_fallback( + struct wlan_dfs *dfs, + uint8_t *ch_wd, + uint8_t *center_freq_seg1, + uint8_t *ch_lst, + uint32_t num_ch) +{ + bool flag = false; + uint32_t rand_byte = 0; + struct chan_bonding_bitmap ch_map = { { {0} } }; + uint8_t count = 0, i, index = 0, final_cnt = 0, target_channel = 0; + uint8_t primary_seg_start_ch = 0, sec_seg_ch = 0, new_160_start_ch = 0; + uint8_t final_lst[NUM_CHANNELS] = {0}; + + /* initialize ch_map for all 80 MHz bands: we have 6 80MHz bands */ + ch_map.chan_bonding_set[0].start_chan = 36; + ch_map.chan_bonding_set[1].start_chan = 52; + ch_map.chan_bonding_set[2].start_chan = 100; + ch_map.chan_bonding_set[3].start_chan = 116; + ch_map.chan_bonding_set[4].start_chan = 132; + ch_map.chan_bonding_set[5].start_chan = 149; + + for (i = 0; i < num_ch; i++) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel = %d added to bitmap", ch_lst[i]); + dfs_random_channel_sel_set_bitmap(dfs, &ch_map, ch_lst[i]); + } + + /* populate available channel list from bitmap */ + final_cnt = dfs_populate_available_channels(dfs, &ch_map, + *ch_wd, final_lst); + + /* If no valid ch bonding found, fallback */ + if (final_cnt == 0) { + if ((*ch_wd == DFS_CH_WIDTH_160MHZ) || + (*ch_wd == DFS_CH_WIDTH_80P80MHZ) || + (*ch_wd == DFS_CH_WIDTH_80MHZ)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 40Mhz", *ch_wd); + *ch_wd = DFS_CH_WIDTH_40MHZ; + } else if (*ch_wd == DFS_CH_WIDTH_40MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from 40Mhz to 20MHz"); + *ch_wd = DFS_CH_WIDTH_20MHZ; + } + return 0; + } + + /* ch count should be > 8 to switch new channel in 160Mhz band */ + if (((*ch_wd == DFS_CH_WIDTH_160MHZ) || + (*ch_wd == DFS_CH_WIDTH_80P80MHZ)) && + (final_cnt < DFS_MAX_20M_SUB_CH)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 80Mhz", *ch_wd); + *ch_wd = DFS_CH_WIDTH_80MHZ; + return 0; + } + + if (*ch_wd == DFS_CH_WIDTH_160MHZ) { + /* + * Only 2 blocks for 160Mhz bandwidth i.e 36-64 & 100-128 + * and all the channels in these blocks are continuous + * and separated by 4Mhz. + */ + for (i = 1; ((i < final_cnt)); i++) { + if ((final_lst[i] - final_lst[i-1]) == + DFS_NEXT_5GHZ_CHANNEL) + count++; + else + count = 0; + if (count == DFS_MAX_20M_SUB_CH - 1) { + flag = true; + new_160_start_ch = final_lst[i - count]; + break; + } + } + } else if (*ch_wd == DFS_CH_WIDTH_80P80MHZ) { + flag = true; + } + + if ((flag == false) && (*ch_wd > DFS_CH_WIDTH_80MHZ)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 80Mhz", *ch_wd); + *ch_wd = DFS_CH_WIDTH_80MHZ; + return 0; + } + + if (*ch_wd == DFS_CH_WIDTH_160MHZ) { + get_random_bytes((uint8_t *)&rand_byte, 1); + rand_byte = (rand_byte + qdf_mc_timer_get_system_ticks()) + % DFS_MAX_20M_SUB_CH; + target_channel = new_160_start_ch + (rand_byte * + DFS_80_NUM_SUB_CHANNEL); + } else if (*ch_wd == DFS_CH_WIDTH_80P80MHZ) { + get_random_bytes((uint8_t *)&rand_byte, 1); + index = (rand_byte + qdf_mc_timer_get_system_ticks()) % + final_cnt; + target_channel = final_lst[index]; + index -= (index % DFS_80_NUM_SUB_CHANNEL); + primary_seg_start_ch = final_lst[index]; + + /* reset channels associate with primary 80Mhz */ + for (i = 0; i < DFS_80_NUM_SUB_CHANNEL; i++) + final_lst[i + index] = 0; + /* select and calculate center freq for secondary segment */ + for (i = 0; i < final_cnt / DFS_80_NUM_SUB_CHANNEL; i++) { + if (final_lst[i * DFS_80_NUM_SUB_CHANNEL] && + (abs(primary_seg_start_ch - + final_lst[i * DFS_80_NUM_SUB_CHANNEL]) > + (DFS_MAX_20M_SUB_CH * 2))) { + sec_seg_ch = + final_lst[i * DFS_80_NUM_SUB_CHANNEL] + + DFS_80MHZ_START_CENTER_CH_DIFF; + break; + } + } + + if (!sec_seg_ch && (final_cnt == DFS_MAX_20M_SUB_CH)) + *ch_wd = DFS_CH_WIDTH_160MHZ; + else if (!sec_seg_ch) + *ch_wd = DFS_CH_WIDTH_80MHZ; + + *center_freq_seg1 = sec_seg_ch; + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Center frequency seg1 = %d", sec_seg_ch); + } else { + target_channel = dfs_get_rand_from_lst(dfs, + final_lst, final_cnt); + } + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "target channel = %d", target_channel); + + return target_channel; +} +#endif + +/** + * dfs_find_ch_with_fallback_for_freq()- find random channel + * @dfs: Pointer to DFS structure. + * @chan_wd: channel width + * @center_freq_seg1: center frequency of secondary segment. + * @freq_lst: list of available frequency. + * @num_chan: number of channels in the list. + * + * Find random channel based on given channel width and channel list, + * fallback to lower width if requested channel width not available. + * + * Return: channel frequency. + */ +#ifdef CONFIG_CHAN_FREQ_API +static uint16_t dfs_find_ch_with_fallback_for_freq(struct wlan_dfs *dfs, + uint8_t *chan_wd, + uint16_t *center_freq_seg1, + uint16_t *freq_lst, + uint32_t num_chan) +{ + bool flag = false; + uint32_t rand_byte = 0; + struct chan_bonding_bitmap ch_map = { { {0} } }; + uint8_t count = 0, i, index = 0, final_cnt = 0; + uint16_t target_channel = 0; + uint16_t primary_seg_start_ch = 0, sec_seg_ch = 0, new_160_start_ch = 0; + uint16_t final_lst[NUM_CHANNELS] = {0}; + + /* initialize ch_map for all 80 MHz bands: we have 6 80MHz bands */ + ch_map.chan_bonding_set[0].start_chan_freq = 5180; + ch_map.chan_bonding_set[1].start_chan_freq = 5260; + ch_map.chan_bonding_set[2].start_chan_freq = 5500; + ch_map.chan_bonding_set[3].start_chan_freq = 5580; + ch_map.chan_bonding_set[4].start_chan_freq = 5660; + ch_map.chan_bonding_set[5].start_chan_freq = 5745; + + for (i = 0; i < num_chan; i++) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel = %d added to bitmap", freq_lst[i]); + dfs_random_channel_sel_set_bitmap_for_freq(dfs, &ch_map, + freq_lst[i]); + } + + /* populate available channel list from bitmap */ + final_cnt = dfs_populate_available_channel_for_freq(dfs, &ch_map, + *chan_wd, final_lst); + + /* If no valid ch bonding found, fallback */ + if (final_cnt == 0) { + if ((*chan_wd == DFS_CH_WIDTH_160MHZ) || + (*chan_wd == DFS_CH_WIDTH_80P80MHZ) || + (*chan_wd == DFS_CH_WIDTH_80MHZ)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 40Mhz", *chan_wd); + *chan_wd = DFS_CH_WIDTH_40MHZ; + } else if (*chan_wd == DFS_CH_WIDTH_40MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from 40Mhz to 20MHz"); + *chan_wd = DFS_CH_WIDTH_20MHZ; + } + return 0; + } + + /* ch count should be > 8 to switch new channel in 160Mhz band */ + if (((*chan_wd == DFS_CH_WIDTH_160MHZ) || + (*chan_wd == DFS_CH_WIDTH_80P80MHZ)) && + (final_cnt < DFS_MAX_20M_SUB_CH)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 80Mhz", *chan_wd); + *chan_wd = DFS_CH_WIDTH_80MHZ; + return 0; + } + + if (*chan_wd == DFS_CH_WIDTH_160MHZ) { + /* + * Only 2 blocks for 160Mhz bandwidth i.e 36-64 & 100-128 + * and all the channels in these blocks are continuous + * and separated by 4Mhz. + */ + for (i = 1; ((i < final_cnt)); i++) { + if ((final_lst[i] - final_lst[i - 1]) == + DFS_NEXT_5GHZ_CHANNEL_FREQ_OFFSET) + count++; + else + count = 0; + if (count == DFS_MAX_20M_SUB_CH - 1) { + flag = true; + new_160_start_ch = final_lst[i - count]; + break; + } + } + } else if (*chan_wd == DFS_CH_WIDTH_80P80MHZ) { + flag = true; + } + + if ((flag == false) && (*chan_wd > DFS_CH_WIDTH_80MHZ)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 80Mhz", *chan_wd); + *chan_wd = DFS_CH_WIDTH_80MHZ; + return 0; + } + + if (*chan_wd == DFS_CH_WIDTH_160MHZ) { + get_random_bytes((uint8_t *)&rand_byte, 1); + rand_byte = (rand_byte + qdf_mc_timer_get_system_ticks()) + % DFS_MAX_20M_SUB_CH; + target_channel = new_160_start_ch + (rand_byte * + DFS_80_NUM_SUB_CHANNEL_FREQ); + } else if (*chan_wd == DFS_CH_WIDTH_80P80MHZ) { + get_random_bytes((uint8_t *)&rand_byte, 1); + index = (rand_byte + qdf_mc_timer_get_system_ticks()) % + final_cnt; + target_channel = final_lst[index]; + index -= (index % DFS_80_NUM_SUB_CHANNEL); + primary_seg_start_ch = final_lst[index]; + + /* reset channels associate with primary 80Mhz */ + for (i = 0; i < DFS_80_NUM_SUB_CHANNEL; i++) + final_lst[i + index] = 0; + /* select and calculate center freq for secondary segment */ + for (i = 0; i < final_cnt / DFS_80_NUM_SUB_CHANNEL; i++) { + if (final_lst[i * DFS_80_NUM_SUB_CHANNEL] && + (abs(primary_seg_start_ch - + final_lst[i * DFS_80_NUM_SUB_CHANNEL]) > + (DFS_80P80M_FREQ_DIFF * 2))) { + sec_seg_ch = final_lst[i * + DFS_80_NUM_SUB_CHANNEL] + + DFS_80MHZ_START_CENTER_CH_FREQ_DIFF; + break; + } + } + + if (!sec_seg_ch && (final_cnt == DFS_MAX_20M_SUB_CH)) + *chan_wd = DFS_CH_WIDTH_160MHZ; + else if (!sec_seg_ch) + *chan_wd = DFS_CH_WIDTH_80MHZ; + + *center_freq_seg1 = sec_seg_ch; + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Center frequency seg1 = %d", sec_seg_ch); + } else { + target_channel = dfs_get_rand_from_lst_for_freq(dfs, + final_lst, + final_cnt); + } + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "target channel = %d", target_channel); + + return target_channel; +} +#endif + +bool dfs_is_freq_in_nol(struct wlan_dfs *dfs, uint32_t freq) +{ + struct dfs_nolelem *nol; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "null dfs"); + return false; + } + + nol = dfs->dfs_nol; + while (nol) { + if (freq == nol->nol_freq) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "%d is in nol", freq); + return true; + } + nol = nol->nol_next; + } + + return false; +} + +/** + * dfs_apply_rules()- prepare channel list based on flags + * @dfs: dfs handler + * @flags: channel flags + * @random_chan_list: output channel list + * @random_chan_cnt: output channel count + * @ch_list: input channel list + * @ch_cnt: input channel count + * @dfs_region: dfs region + * @acs_info: acs channel range information + * + * prepare channel list based on flags + * + * return: none + */ +#ifdef CONFIG_CHAN_NUM_API +static void dfs_apply_rules(struct wlan_dfs *dfs, + uint32_t flags, + uint8_t *random_chan_list, + uint32_t *random_chan_cnt, + struct dfs_channel *ch_list, + uint32_t ch_cnt, + uint8_t dfs_region, + struct dfs_acs_info *acs_info) +{ + struct dfs_channel *chan; + bool flag_no_weather = 0; + bool flag_no_lower_5g = 0; + bool flag_no_upper_5g = 0; + bool flag_no_dfs_chan = 0; + bool flag_no_2g_chan = 0; + bool flag_no_5g_chan = 0; + bool flag_no_japan_w53 = 0; + int i; + bool found = false; + uint16_t j; + + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "flags %d", flags); + flag_no_weather = (dfs_region == DFS_ETSI_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_WEATHER_CH : 0; + + if (dfs_region == DFS_MKK_REGION_VAL) { + flag_no_lower_5g = flags & DFS_RANDOM_CH_FLAG_NO_LOWER_5G_CH; + flag_no_upper_5g = flags & DFS_RANDOM_CH_FLAG_NO_UPEER_5G_CH; + flag_no_japan_w53 = flags & DFS_RANDOM_CH_FLAG_NO_JAPAN_W53_CH; + } + + flag_no_dfs_chan = flags & DFS_RANDOM_CH_FLAG_NO_DFS_CH; + flag_no_2g_chan = flags & DFS_RANDOM_CH_FLAG_NO_2GHZ_CH; + flag_no_5g_chan = flags & DFS_RANDOM_CH_FLAG_NO_5GHZ_CH; + + for (i = 0; i < ch_cnt; i++) { + chan = &ch_list[i]; + + if ((chan->dfs_ch_ieee == 0) || + (chan->dfs_ch_ieee > MAX_CHANNEL_NUM)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "invalid channel %d", chan->dfs_ch_ieee); + continue; + } + + if (flags & DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH) { + /* TODO : Skip all HT20 channels in the given mode */ + if (chan->dfs_ch_ieee == + dfs->dfs_curchan->dfs_ch_ieee) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip %d current operating channel", + chan->dfs_ch_ieee); + continue; + } + } + + if (acs_info && acs_info->acs_mode) { + for (j = 0; j < acs_info->num_of_channel; j++) { + if (acs_info->chan_freq_list[j] == + wlan_chan_to_freq(chan->dfs_ch_ieee)) { + found = true; + break; + } + } + + if (!found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip ch %d not in acs range", + chan->dfs_ch_ieee); + continue; + } + found = false; + } + + if (flag_no_2g_chan && + chan->dfs_ch_ieee <= DFS_MAX_24GHZ_CHANNEL) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip 2.4 GHz channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_5g_chan && + chan->dfs_ch_ieee > DFS_MAX_24GHZ_CHANNEL) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip 5 GHz channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_weather) { + if (DFS_IS_CHANNEL_WEATHER_RADAR(chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip weather channel=%d", + chan->dfs_ch_ieee); + continue; + } + } + + if (flag_no_lower_5g && + DFS_IS_CHAN_JAPAN_INDOOR(chan->dfs_ch_ieee)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip indoor channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_upper_5g && + DFS_IS_CHAN_JAPAN_OUTDOOR(chan->dfs_ch_ieee)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip outdoor channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_dfs_chan && + (chan->dfs_ch_flagext & WLAN_CHAN_DFS)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip dfs channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_japan_w53 && + DFS_IS_CHAN_JAPAN_W53(chan->dfs_ch_ieee)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip japan W53 channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (dfs_is_freq_in_nol(dfs, chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip nol channel=%d", chan->dfs_ch_ieee); + continue; + } + + random_chan_list[*random_chan_cnt] = chan->dfs_ch_ieee; + *random_chan_cnt += 1; + } +} +#endif + +/** + * dfs_apply_rules_for_freq()- prepare channel list based on flags + * @dfs: dfs handler + * @flags: channel flags + * @random_chan_freq_list: output channel list + * @random_chan_cnt: output channel count + * @chan_list: input channel list + * @chan_cnt: input channel count + * @dfs_region: dfs region + * @acs_info: acs channel range information + * + * prepare channel list based on flags + * + * return: none + */ +#ifdef CONFIG_CHAN_FREQ_API +static void dfs_apply_rules_for_freq(struct wlan_dfs *dfs, + uint32_t flags, + uint16_t *random_chan_freq_list, + uint32_t *random_chan_cnt, + struct dfs_channel *chan_list, + uint32_t chan_cnt, + uint8_t dfs_region, + struct dfs_acs_info *acs_info) +{ + struct dfs_channel *chan; + bool flag_no_weather = 0; + bool flag_no_lower_5g = 0; + bool flag_no_upper_5g = 0; + bool flag_no_dfs_chan = 0; + bool flag_no_2g_chan = 0; + bool flag_no_5g_chan = 0; + bool flag_no_japan_w53 = 0; + int i; + bool found = false; + uint16_t j; + uint16_t freq_list[NUM_CHANNELS_160MHZ]; + uint8_t num_channels = 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "flags %d", flags); + flag_no_weather = (dfs_region == DFS_ETSI_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_WEATHER_CH : 0; + + if (dfs_region == DFS_MKK_REGION_VAL) { + flag_no_lower_5g = flags & DFS_RANDOM_CH_FLAG_NO_LOWER_5G_CH; + flag_no_upper_5g = flags & DFS_RANDOM_CH_FLAG_NO_UPEER_5G_CH; + flag_no_japan_w53 = flags & DFS_RANDOM_CH_FLAG_NO_JAPAN_W53_CH; + } + + flag_no_dfs_chan = flags & DFS_RANDOM_CH_FLAG_NO_DFS_CH; + flag_no_2g_chan = flags & DFS_RANDOM_CH_FLAG_NO_2GHZ_CH; + flag_no_5g_chan = flags & DFS_RANDOM_CH_FLAG_NO_5GHZ_CH; + + if (flags & DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH) { + num_channels = + dfs_get_bonding_channel_without_seg_info_for_freq + (dfs->dfs_curchan, freq_list); + } + + for (i = 0; i < chan_cnt; i++) { + chan = &chan_list[i]; + found = false; + + if ((chan->dfs_ch_ieee == 0) || + (chan->dfs_ch_ieee > MAX_CHANNEL_NUM)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "invalid channel %d", chan->dfs_ch_ieee); + continue; + } + + if (flags & DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH) { + for (j = 0; j < num_channels; j++) { + if (chan->dfs_ch_freq == freq_list[j]) { + dfs_debug(dfs, + WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip %d current operating channel", + chan->dfs_ch_freq); + found = true; + break; + } + } + + if (found) + continue; + } + + if (acs_info && acs_info->acs_mode) { + for (j = 0; j < acs_info->num_of_channel; j++) { + if (acs_info->chan_freq_list[j] == + chan->dfs_ch_freq) { + found = true; + break; + } + } + + if (!found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip ch freq %d not in acs range", + chan->dfs_ch_freq); + continue; + } + found = false; + } + + if (flag_no_2g_chan && + chan->dfs_ch_freq <= DFS_MAX_24GHZ_CHANNEL_FREQ) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip 2.4 GHz channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_5g_chan && chan->dfs_ch_freq > + DFS_MAX_24GHZ_CHANNEL_FREQ) + { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip 5 GHz channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_weather) { + if (DFS_IS_CHANNEL_WEATHER_RADAR(chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip weather channel=%d", + chan->dfs_ch_ieee); + continue; + } + } + + if (flag_no_lower_5g && + DFS_IS_CHAN_JAPAN_INDOOR_FREQ(chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip indoor channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_upper_5g && + DFS_IS_CHAN_JAPAN_OUTDOOR_FREQ(chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip outdoor channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_dfs_chan && + (chan->dfs_ch_flagext & WLAN_CHAN_DFS)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip dfs channel=%d", chan->dfs_ch_ieee); + continue; + } + + if (flag_no_japan_w53 && + DFS_IS_CHAN_JAPAN_W53_FREQ(chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip japan W53 channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (dfs_is_freq_in_nol(dfs, chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip nol channel=%d", chan->dfs_ch_ieee); + continue; + } + + random_chan_freq_list[*random_chan_cnt] = chan->dfs_ch_freq; + *random_chan_cnt += 1; + } +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +uint8_t dfs_prepare_random_channel(struct wlan_dfs *dfs, + struct dfs_channel *ch_list, + uint32_t ch_cnt, + uint32_t flags, + uint8_t *ch_wd, + struct dfs_channel *cur_chan, + uint8_t dfs_region, + struct dfs_acs_info *acs_info) +{ + int i = 0; + uint8_t final_cnt = 0; + uint8_t target_ch = 0; + uint8_t *random_chan_list = NULL; + uint32_t random_chan_cnt = 0; + uint16_t flag_no_weather = 0; + uint8_t *leakage_adjusted_lst; + uint8_t final_lst[NUM_CHANNELS] = {0}; + + if (!ch_list || !ch_cnt) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid params %pK, ch_cnt=%d", + ch_list, ch_cnt); + return 0; + } + + if (*ch_wd < DFS_CH_WIDTH_20MHZ || *ch_wd > DFS_CH_WIDTH_80P80MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid ch_wd %d", *ch_wd); + return 0; + } + + random_chan_list = qdf_mem_malloc(ch_cnt * sizeof(*random_chan_list)); + if (!random_chan_list) + return 0; + + dfs_apply_rules(dfs, flags, random_chan_list, &random_chan_cnt, + ch_list, ch_cnt, dfs_region, acs_info); + + flag_no_weather = (dfs_region == DFS_ETSI_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_WEATHER_CH : 0; + + /* list adjusted after leakage has been marked */ + leakage_adjusted_lst = qdf_mem_malloc(random_chan_cnt); + if (!leakage_adjusted_lst) { + qdf_mem_free(random_chan_list); + return 0; + } + + do { + qdf_mem_copy(leakage_adjusted_lst, random_chan_list, + random_chan_cnt); + if (QDF_IS_STATUS_ERROR(dfs_mark_leaking_ch(dfs, *ch_wd, + random_chan_cnt, + leakage_adjusted_lst))) { + qdf_mem_free(random_chan_list); + qdf_mem_free(leakage_adjusted_lst); + return 0; + } + + if (*ch_wd == DFS_CH_WIDTH_20MHZ) { + /* + * PASS: 3 - from leakage_adjusted_lst, prepare valid + * ch list and use random number from that + */ + for (i = 0; i < random_chan_cnt; i++) { + if (leakage_adjusted_lst[i] == 0) + continue; + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "dfs: Channel=%d added to available list", + leakage_adjusted_lst[i]); + final_lst[final_cnt] = leakage_adjusted_lst[i]; + final_cnt++; + } + target_ch = dfs_get_rand_from_lst( + dfs, final_lst, final_cnt); + break; + } + + target_ch = dfs_find_ch_with_fallback(dfs, ch_wd, + &cur_chan->dfs_ch_vhtop_ch_freq_seg2, + leakage_adjusted_lst, + random_chan_cnt); + + /* + * When flag_no_weather is set, avoid usage of Adjacent + * weather radar channel in HT40 mode as extension channel + * will be on 5600. + */ + if (flag_no_weather && + (target_ch == + DFS_ADJACENT_WEATHER_RADAR_CHANNEL_NUM) && + (*ch_wd == DFS_CH_WIDTH_40MHZ)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip weather adjacent ch=%d\n", + target_ch); + continue; + } + + if (target_ch) + break; + } while (true); + + qdf_mem_free(random_chan_list); + qdf_mem_free(leakage_adjusted_lst); + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "target_ch = %d", target_ch); + + return target_ch; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +uint16_t dfs_prepare_random_channel_for_freq(struct wlan_dfs *dfs, + struct dfs_channel *chan_list, + uint32_t chan_cnt, + uint32_t flags, + uint8_t *chan_wd, + struct dfs_channel *cur_chan, + uint8_t dfs_region, + struct dfs_acs_info *acs_info) +{ + int i = 0; + uint8_t final_cnt = 0; + uint16_t target_freq = 0; + uint16_t *random_chan_freq_list = NULL; + uint32_t random_chan_cnt = 0; + uint16_t flag_no_weather = 0; + uint16_t *leakage_adjusted_lst; + uint16_t final_lst[NUM_CHANNELS] = {0}; + uint16_t *dfs_cfreq_seg2 = NULL; + + if (!chan_list || !chan_cnt) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid params %pK, chan_cnt=%d", + chan_list, chan_cnt); + return 0; + } + + if (*chan_wd < DFS_CH_WIDTH_20MHZ || *chan_wd > DFS_CH_WIDTH_80P80MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid chan_wd %d", *chan_wd); + return 0; + } + + random_chan_freq_list = + qdf_mem_malloc(chan_cnt * sizeof(*random_chan_freq_list)); + if (!random_chan_freq_list) + return 0; + + dfs_apply_rules_for_freq(dfs, flags, random_chan_freq_list, + &random_chan_cnt, chan_list, chan_cnt, + dfs_region, acs_info); + flag_no_weather = (dfs_region == DFS_ETSI_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_WEATHER_CH : 0; + + /* list adjusted after leakage has been marked */ + leakage_adjusted_lst = qdf_mem_malloc(random_chan_cnt * + sizeof(*leakage_adjusted_lst)); + if (!leakage_adjusted_lst) { + qdf_mem_free(random_chan_freq_list); + return 0; + } + + do { + int ret; + + qdf_mem_copy(leakage_adjusted_lst, random_chan_freq_list, + random_chan_cnt * sizeof(*leakage_adjusted_lst)); + ret = dfs_mark_leaking_chan_for_freq(dfs, *chan_wd, + random_chan_cnt, + leakage_adjusted_lst); + if (QDF_IS_STATUS_ERROR(ret)) { + qdf_mem_free(random_chan_freq_list); + qdf_mem_free(leakage_adjusted_lst); + return 0; + } + + if (*chan_wd == DFS_CH_WIDTH_20MHZ) { + /* + * PASS: 3 - from leakage_adjusted_lst, prepare valid + * ch list and use random number from that + */ + for (i = 0; i < random_chan_cnt; i++) { + if (leakage_adjusted_lst[i] == 0) + continue; + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Channel=%d added to available list", + leakage_adjusted_lst[i]); + final_lst[final_cnt] = leakage_adjusted_lst[i]; + final_cnt++; + } + target_freq = dfs_get_rand_from_lst_for_freq(dfs, + final_lst, + final_cnt); + break; + } + dfs_cfreq_seg2 = &cur_chan->dfs_ch_mhz_freq_seg2; + target_freq = + dfs_find_ch_with_fallback_for_freq(dfs, chan_wd, + dfs_cfreq_seg2, + leakage_adjusted_lst, + random_chan_cnt); + + /* Since notion of 80+80 is not present in the regulatory + * channel the function may return invalid 80+80 channels for + * some devices (e.g. Pine). Therefore, check if we need to + * correct it by checking the following condition. + */ + if ((*chan_wd == DFS_CH_WIDTH_80P80MHZ) && + (flags & DFS_RANDOM_CH_FLAG_RESTRICTED_80P80_ENABLED) && + !(CHAN_WITHIN_RESTRICTED_80P80(target_freq, + *dfs_cfreq_seg2))) { + *chan_wd = DFS_CH_WIDTH_160MHZ; + target_freq = dfs_find_ch_with_fallback_for_freq( + dfs, chan_wd, dfs_cfreq_seg2, + leakage_adjusted_lst, random_chan_cnt); + } + + /* + * When flag_no_weather is set, avoid usage of Adjacent + * weather radar channel in HT40 mode as extension channel + * will be on 5600. + */ + if (flag_no_weather && + (target_freq == + DFS_ADJACENT_WEATHER_RADAR_CHANNEL_FREQ) && + (*chan_wd == DFS_CH_WIDTH_40MHZ)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip weather adjacent ch=%d\n", + target_freq); + continue; + } + + if (target_freq) + break; + } while (true); + + qdf_mem_free(random_chan_freq_list); + qdf_mem_free(leakage_adjusted_lst); + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "target_freq = %d", + target_freq); + + return target_freq; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_init_deinit_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_init_deinit_api.h new file mode 100644 index 0000000000000000000000000000000000000000..4da0c94b85c74eb7910bee158352c810986b0352 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_init_deinit_api.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file init/deint functions for DFS module. + */ + +#ifndef _WLAN_DFS_INIT_DEINIT_API_H_ +#define _WLAN_DFS_INIT_DEINIT_API_H_ + +#include "wlan_dfs_ucfg_api.h" + +/** + * wlan_pdev_get_dfs_obj() - Get DFS object from PDEV. + * @pdev: Pointer to PDEV structure. + * @id: DFS component ID. + * @obj: Pointer to DFS object. + */ +struct wlan_dfs *wlan_pdev_get_dfs_obj(struct wlan_objmgr_pdev *pdev); + +/** + * register_dfs_callbacks() - Fill mlme pointers. + */ +void register_dfs_callbacks(void); + +/** + * dfs_init() - Init DFS module + */ +QDF_STATUS dfs_init(void); + +/** + * dfs_deinit() - Deinit DFS module. + */ +QDF_STATUS dfs_deinit(void); + +/** + * wlan_dfs_pdev_obj_create_notification() - DFS pdev object create handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wlan_dfs_pdev_obj_destroy_notification() - DFS pdev object delete handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wifi_dfs_psoc_enable() - handles registering dfs event handlers. + * @psoc: psoc object. + */ +QDF_STATUS wifi_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_dfs_psoc_disable() - handles deregistering dfs event handlers. + * @psoc: psoc object. + */ +QDF_STATUS wifi_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc); + +#endif /* _WLAN_DFS_INIT_DEINIT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ioctl.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..a17d29b32d64fe77c0f075b8286324da0dec56ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ioctl.h @@ -0,0 +1,359 @@ +/* + * Copyright (c) 2011, 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has dfs IOCTL Defines. + */ + +#ifndef _DFS_IOCTL_H_ +#define _DFS_IOCTL_H_ + +#define DFS_MUTE_TIME 1 +#define DFS_SET_THRESH 2 +#define DFS_GET_THRESH 3 +#define DFS_GET_USENOL 4 +#define DFS_SET_USENOL 5 +#define DFS_RADARDETECTS 6 +#define DFS_BANGRADAR 7 +#define DFS_SHOW_NOL 8 +#define DFS_DISABLE_DETECT 9 +#define DFS_ENABLE_DETECT 10 +#define DFS_DISABLE_FFT 11 +#define DFS_ENABLE_FFT 12 +#define DFS_SET_DEBUG_LEVEL 13 +#define DFS_GET_NOL 14 +#define DFS_SET_NOL 15 + +#define DFS_SET_FALSE_RSSI_THRES 16 +#define DFS_SET_PEAK_MAG 17 +#define DFS_IGNORE_CAC 18 +#define DFS_SET_NOL_TIMEOUT 19 +#define DFS_GET_CAC_VALID_TIME 20 +#define DFS_SET_CAC_VALID_TIME 21 +#define DFS_SHOW_NOLHISTORY 22 +#define DFS_SHOW_PRECAC_LISTS 23 +#define DFS_RESET_PRECAC_LISTS 24 +#define DFS_SET_DISABLE_RADAR_MARKING 25 +#define DFS_GET_DISABLE_RADAR_MARKING 26 + +#define DFS_INJECT_SEQUENCE 27 +#define DFS_ALLOW_HW_PULSES 28 +#define DFS_SET_PRI_MULTIPILER 29 + +#define RESTRICTED_80P80_START_CHAN 132 +#define RESTRICTED_80P80_END_CHAN 161 + +/* Check if the given channels are within restricted 80P80 start chan(132) and + * end chan (161). + */ +#define CHAN_WITHIN_RESTRICTED_80P80(chan, cfreq_seg2) \ + ((((chan) >= RESTRICTED_80P80_START_CHAN) && \ + ((chan) <= RESTRICTED_80P80_END_CHAN) && \ + ((cfreq_seg2) >= RESTRICTED_80P80_START_CHAN) && \ + ((cfreq_seg2) <= RESTRICTED_80P80_END_CHAN)) ? true : false) + +/* + * Spectral IOCTLs use DFS_LAST_IOCTL as the base. + * This must always be the last IOCTL in DFS and have + * the highest value. + */ +#define DFS_LAST_IOCTL 29 + +#ifndef DFS_CHAN_MAX +#define DFS_CHAN_MAX 25 +#endif + +/** + * struct dfsreq_nolelem - NOL elements. + * @nol_freq: NOL channel frequency. + * @nol_chwidth: NOL channel width. + * @nol_start_ticks: OS ticks when the NOL timer started. + * @nol_timeout_ms: Nol timeout value in msec. + */ + +struct dfsreq_nolelem { + uint16_t nol_freq; + uint16_t nol_chwidth; + unsigned long nol_start_ticks; + uint32_t nol_timeout_ms; +}; + +struct dfsreq_nolinfo { + uint32_t dfs_ch_nchans; + struct dfsreq_nolelem dfs_nol[DFS_CHAN_MAX]; +}; + +/* + * IOCTL parameter types + */ + +#define DFS_PARAM_FIRPWR 1 +#define DFS_PARAM_RRSSI 2 +#define DFS_PARAM_HEIGHT 3 +#define DFS_PARAM_PRSSI 4 +#define DFS_PARAM_INBAND 5 +/* 5413 specific parameters */ +#define DFS_PARAM_RELPWR 7 +#define DFS_PARAM_RELSTEP 8 +#define DFS_PARAM_MAXLEN 9 + +/** + * struct dfs_ioctl_params - DFS ioctl params. + * @dfs_firpwr: FIR pwr out threshold. + * @dfs_rrssi: Radar rssi thresh. + * @dfs_height: Pulse height thresh. + * @dfs_prssi: Pulse rssi thresh. + * @dfs_inband: Inband thresh. + * @dfs_relpwr: Pulse relative pwr thresh. + * @dfs_relstep: Pulse relative step thresh. + * @dfs_maxlen: Pulse max duration. + */ +struct dfs_ioctl_params { + int32_t dfs_firpwr; + int32_t dfs_rrssi; + int32_t dfs_height; + int32_t dfs_prssi; + int32_t dfs_inband; + int32_t dfs_relpwr; + int32_t dfs_relstep; + int32_t dfs_maxlen; +}; + +/* Types of Bangradar commands: + * @DFS_BANGRADAR_FOR_ALL_SUBCHANS : Bangradar with no arguments. + * All the subchannels in the current + * channel shall be added. + * @DFS_BANGRADAR_FOR_ALL_SUBCHANS_OF_SEGID : Bangradar with 1 (seg_id) argument + * All subchannels of the specific + * seg_id shall be added. + * @DFS_BANGRADAR_FOR_SPECIFIC_SUBCHANS : Bangradar with all (segment ID, + * is_chirp and frequency offset) + * arguments. + * Only radar infected subchannels + * of the specific seg_id shall be + * added. + * + * (Unless all arguments are given, we cannot determine which specific + * subchannels to simulate the radar on, hence simulate in all subchans). + */ +enum dfs_bangradar_types { + DFS_NO_BANGRADAR = 0, + DFS_BANGRADAR_FOR_ALL_SUBCHANS, + DFS_BANGRADAR_FOR_ALL_SUBCHANS_OF_SEGID, + DFS_BANGRADAR_FOR_SPECIFIC_SUBCHANS, + DFS_INVALID_BANGRADAR_TYPE +}; + +/** + * struct dfs_bangradar_params - DFS bangradar params. + * @bangradar_type: Type of Bangradar. + * @seg_id: Segment ID information. + * @is_chirp: Chirp radar or not. + * @freq_offset: Frequency offset at which radar was found. + */ +struct dfs_bangradar_params { + enum dfs_bangradar_types bangradar_type; + uint8_t seg_id; + uint8_t is_chirp; + int32_t freq_offset; +}; +#define DFS_IOCTL_PARAM_NOVAL 65535 +#define DFS_IOCTL_PARAM_ENABLE 0x8000 + +/* Random channel flags */ +/* Flag to exclude current operating channels */ +#define DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH 0x0001 /* 0000 0000 0000 0001 */ + +/* Flag to exclude weather channels */ +#define DFS_RANDOM_CH_FLAG_NO_WEATHER_CH 0x0002 /* 0000 0000 0000 0010 */ + +/* Flag to exclude indoor channels */ +#define DFS_RANDOM_CH_FLAG_NO_LOWER_5G_CH 0x0004 /* 0000 0000 0000 0100 */ + +/* Flag to exclude outdoor channels */ +#define DFS_RANDOM_CH_FLAG_NO_UPEER_5G_CH 0x0008 /* 0000 0000 0000 1000 */ + +/* Flag to exclude dfs channels */ +#define DFS_RANDOM_CH_FLAG_NO_DFS_CH 0x0010 /* 0000 0000 0001 0000 */ + +/* Flag to exclude all 5GHz channels */ +#define DFS_RANDOM_CH_FLAG_NO_5GHZ_CH 0x0020 /* 0000 0000 0010 0000 */ + +/* Flag to exclude all 2.4GHz channels */ +#define DFS_RANDOM_CH_FLAG_NO_2GHZ_CH 0x0040 /* 0000 0000 0100 0000 */ + +/* Flag to enable Reduced BW Agile DFS */ +#define DFS_RANDOM_CH_FLAG_ENABLE_REDUCED_BW 0x0080 /* 0000 0000 1000 0000 */ + +/* Flag to exclude Japan W53 channnels */ +#define DFS_RANDOM_CH_FLAG_NO_JAPAN_W53_CH 0x0100 /* 0000 0001 0000 0000 */ + +/* Restricted 80P80 MHz is enabled */ +#define DFS_RANDOM_CH_FLAG_RESTRICTED_80P80_ENABLED 0x0200 + /* 0000 0010 0000 0000 */ + +/** + * struct wlan_dfs_caps - DFS capability structure. + * @wlan_dfs_ext_chan_ok: Can radar be detected on the extension chan? + * @wlan_dfs_combined_rssi_ok: Can use combined radar RSSI? + * @wlan_dfs_use_enhancement: This flag is used to indicate if radar + * detection scheme should use enhanced chirping + * detection algorithm. This flag also determines + * if certain radar data should be discarded to + * minimize false detection of radar. + * @wlan_strong_signal_diversiry: Strong Signal fast diversity count. + * @wlan_chip_is_bb_tlv: Chip is BB TLV? + * @wlan_chip_is_over_sampled: Is Over sampled. + * @wlan_chip_is_ht160: IS VHT160? + * @wlan_chip_is_false_detect: Is False detected? + * @wlan_fastdiv_val: Goes with wlan_strong_signal_diversiry: If we + * have fast diversity capability, read off + * Strong Signal fast diversity count set in the + * ini file, and store so we can restore the + * value when radar is disabled. + */ +struct wlan_dfs_caps { + uint32_t wlan_dfs_ext_chan_ok:1, + wlan_dfs_combined_rssi_ok:1, + wlan_dfs_use_enhancement:1, + wlan_strong_signal_diversiry:1, + wlan_chip_is_bb_tlv:1, + wlan_chip_is_over_sampled:1, + wlan_chip_is_ht160:1, + wlan_chip_is_false_detect:1; + uint32_t wlan_fastdiv_val; +}; + +/** + * struct wlan_dfs_phyerr_param - DFS Phyerr structure. + * @pe_firpwr: FIR pwr out threshold. + * @pe_rrssi: Radar rssi thresh. + * @pe_height: Pulse height thresh. + * @pe_prssi: Pulse rssi thresh. + * @pe_inband: Inband thresh. + * @pe_relpwr: Relative power threshold in 0.5dB steps. + * @pe_relstep: Pulse Relative step threshold in 0.5dB steps. + * @pe_maxlen: Max length of radar sign in 0.8us units. + * @pe_usefir128: Use the average in-band power measured over 128 cycles. + * @pe_blockradar: Enable to block radar check if pkt detect is done via OFDM + * weak signal detect or pkt is detected immediately after tx + * to rx transition. + * @pe_enmaxrssi: Enable to use the max rssi instead of the last rssi during + * fine gain changes for radar detection. + */ +struct wlan_dfs_phyerr_param { + int32_t pe_firpwr; + int32_t pe_rrssi; + int32_t pe_height; + int32_t pe_prssi; + int32_t pe_inband; + uint32_t pe_relpwr; + uint32_t pe_relstep; + uint32_t pe_maxlen; + bool pe_usefir128; + bool pe_blockradar; + bool pe_enmaxrssi; +}; + +/** + * enum WLAN_DFS_EVENTS - DFS Events that will be sent to userspace + * @WLAN_EV_RADAR_DETECTED: Radar is detected + * @WLAN_EV_CAC_STARTED: CAC timer has started + * @WLAN_EV_CAC_COMPLETED: CAC timer completed + * @WLAN_EV_NOL_STARTED: NOL started + * @WLAN_EV_NOL_FINISHED: NOL Completed + * + * DFS events such as radar detected, CAC started, + * CAC completed, NOL started, NOL finished + */ +enum WLAN_DFS_EVENTS { + WLAN_EV_RADAR_DETECTED, + WLAN_EV_CAC_STARTED, + WLAN_EV_CAC_COMPLETED, + WLAN_EV_NOL_STARTED, + WLAN_EV_NOL_FINISHED, +}; + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) +/** + * Structure of Pulse to be injected into the DFS Module + * ****************************************************** + * Header + * ====== + * ----------|--------------| + * num_pulses| total_len_seq| + * ----------|--------------| + * Buffer Contents per pulse: + * ========================== + * ------|----------|-----------|----------|-----------|---------------|-------- + * r_rssi|r_ext_rssi|r_rs_tstamp|r_fulltsf |fft_datalen|total_len_pulse|FFT + * | | | | | |Buffer.. + * ------|----------|-----------|----------|-----------|---------------|-------- + */ + +/** + * struct synthetic_pulse - Radar Pulse Structure to be filled on reading the + * user file. + * @r_rssi: RSSI of the pulse. + * @r_ext_rssi: Extension Channel RSSI. + * @r_rs_tstamp: Timestamp. + * @r_fulltsf: TSF64. + * @fft_datalen: Total len of FFT. + * @total_len_pulse: Total len of the pulse. + * @fft_buf: Pointer to fft data. + */ + +struct synthetic_pulse { + uint8_t r_rssi; + uint8_t r_ext_rssi; + uint32_t r_rs_tstamp; + uint64_t r_fulltsf; + uint16_t fft_datalen; + uint16_t total_len_pulse; + unsigned char *fft_buf; +} qdf_packed; + +/** + * struct synthetic_seq - Structure to hold an array of pointers to the + * pulse structure. + * @num_pulses: Total num of pulses in the sequence. + * @total_len_seq: Total len of the sequence. + * @pulse: Array of pointers to synthetic_pulse structure. + */ + +struct synthetic_seq { + uint8_t num_pulses; + uint32_t total_len_seq; + struct synthetic_pulse *pulse[0]; +}; + +/** + * struct seq_store - Structure to hold an array of pointers to the synthetic + * sequence structure. + * @num_sequence: Total number of "sequence of pulses" in the file. + * @seq_arr: Array of pointers to synthetic_seq structure. + */ + +struct seq_store { + uint8_t num_sequence; + struct synthetic_seq *seq_arr[0]; +}; +#endif /* WLAN_DFS_PARTIAL_OFFLOAD && WLAN_DFS_SYNTHETIC_RADAR */ +#endif /* _DFS_IOCTL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_lmac_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_lmac_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5ed030da216a6b55681281a4e62cd0183d263c43 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_lmac_api.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: These APIs are used by DFS core functions to call lmac/offload + * functions. + */ + +#ifndef _WLAN_DFS_LMAC_API_H_ +#define _WLAN_DFS_LMAC_API_H_ + +#include +#include + +/** + * lmac_get_caps() - Get DFS capabilities. + * @pdev: Pointer to PDEV structure. + * @dfs_caps: Pointer to dfs_caps structure + */ +void lmac_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps); + +/** + * lmac_get_tsf64() - Get tsf64 value. + * @pdev: Pointer to PDEV structure. + * + * Return: tsf64 timer value. + */ +uint64_t lmac_get_tsf64(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_dfs_disable() - Disable DFS. + * @pdev: Pointer to PDEV structure. + * @no_cac: no_cac flag. + */ +void lmac_dfs_disable(struct wlan_objmgr_pdev *pdev, int no_cac); + +/** + * lmac_dfs_enable() - Enable DFS. + * @pdev: Pointer to PDEV structure. + * @is_fastclk: fastclk value. + * @param: Pointer to wlan_dfs_phyerr_param structure. + * @dfsdomain: DFS domain. + */ +void lmac_dfs_enable(struct wlan_objmgr_pdev *pdev, + int *is_fastclk, + struct wlan_dfs_phyerr_param *param, + int dfsdomain); + +/** + * lmac_dfs_get_thresholds() - Get thresholds. + * @pdev: Pointer to PDEV structure. + * @param: Pointer to wlan_dfs_phyerr_param structure. + */ +void lmac_dfs_get_thresholds(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_phyerr_param *param); + +/** + * lmac_get_ah_devid() - Get ah devid. + * @pdev: Pointer to PDEV structure. + */ +uint16_t lmac_get_ah_devid(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_ext_busy() - Get ext_busy. + * @pdev: Pointer to PDEV structure. + */ +uint32_t lmac_get_ext_busy(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_set_use_cac_prssi() - Set use_cac_prssi value. + * @pdev: Pointer to PDEV structure. + */ +void lmac_set_use_cac_prssi(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_target_type() - Get target type. + * @pdev: Pointer to PDEV structure. + */ +uint32_t lmac_get_target_type(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_phymode_info() - Get phymode info. + * @pdev: Pointer to PDEV structure. + */ +uint32_t lmac_get_phymode_info(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode); + +/** + * lmac_is_host_dfs_check_support_enabled() - Check if Host DFS confirmation + * feature is supported. + * @pdev: Pointer to PDEV structure. + * + * Return: true, host dfs check supported, else false. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +bool lmac_is_host_dfs_check_support_enabled(struct wlan_objmgr_pdev *pdev); +#else +static inline bool lmac_is_host_dfs_check_support_enabled( + struct wlan_objmgr_pdev *pdev) +{ + return false; +} +#endif + +/** + * lmac_dfs_is_hw_mode_switch_in_progress() - Check if HW mode switch is in + * progress. + * @pdev: Pointer to PDEV structure. + * + * Return: true if HW mode switch is in progress, else false. + */ +bool lmac_dfs_is_hw_mode_switch_in_progress(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_DFS_LMAC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_mlme_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_mlme_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3593f06d0f0a7620562363225b96ee3100fc1268 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_mlme_api.h @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: These APIs are used by DFS core functions to call mlme functions. + */ + +#ifndef _WLAN_DFS_MLME_API_H_ +#define _WLAN_DFS_MLME_API_H_ + +#include "wlan_dfs_ucfg_api.h" + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * dfs_mlme_start_rcsa() - Send RCSA to RootAP. + * @pdev: Pointer to DFS pdev object. + * @wait_for_csa: Wait for CSA from RootAP. + */ +void dfs_mlme_start_rcsa(struct wlan_objmgr_pdev *pdev, + bool *wait_for_csa); + +/** + * dfs_mlme_mark_dfs() - Mark the channel in the channel list. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @freq: Channel frequency. + * @vhtop_ch_freq_seg2: VHT80 Cfreq2. + * @flags: channel flags. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_mark_dfs(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags); +#endif + +/** + * dfs_mlme_mark_dfs_for_freq() - Mark the channel in the channel list. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @freq: Channel frequency. + * @vhtop_ch_freq_seg2_mhz: VHT80 Cfreq2 in Mhz. + * @flags: channel flags. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_mark_dfs_for_freq(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint16_t vhtop_ch_freq_mhz_seg2, + uint64_t flags); +#endif +/** + * dfs_mlme_start_csa() - Sends CSA in ieeeChan + * @pdev: Pointer to DFS pdev object. + * @ieee_chan: Channel number. + * @freq: Channel frequency. + * @cfreq2: HT80 cfreq2. + * @flags: channel flags. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_start_csa(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, + uint16_t freq, + uint8_t cfreq2, + uint64_t flags); +#endif + +/** + * dfs_mlme_start_csa_for_freq() - Sends CSA in ieeeChan + * @pdev: Pointer to DFS pdev object. + * @ieee_chan: Channel number. + * @freq: Channel frequency. + * @cfreq2: HT80 cfreq2 in Mhz. + * @flags: channel flags. + */ +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_start_csa_for_freq(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, + uint16_t freq, + uint16_t cfreq2_mhz, + uint64_t flags); +#endif +/** + * dfs_mlme_proc_cac() - Process the CAC completion event. + * @pdev: Pointer to DFS pdev object. + * @vdev_id: vdev id. + */ +void dfs_mlme_proc_cac(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id); + +/** + * dfs_mlme_deliver_event_up_after_cac() - Send a CAC timeout, VAP up event to + * userspace. + * @pdev: Pointer to DFS pdev object. + */ +void dfs_mlme_deliver_event_up_after_cac(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_get_dfs_ch_nchans() - Get number of channels in the channel list + * @pdev: Pointer to DFS pdev object. + * @nchans: Pointer to save the channel number. + */ +void dfs_mlme_get_dfs_ch_nchans(struct wlan_objmgr_pdev *pdev, int *nchans); + +/** + * dfs_mlme_get_extchan() - Get extension channel. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS dfs_mlme_get_extchan(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); +#endif + +/** + * dfs_mlme_get_extchan() - Get extension channel. + * @pdev: Pointer to DFS pdev object. + * @dfs_chan_freq: Frequency in Mhz. + * @dfs_chan_flags: Channel flags. + * @dfs_chan_flagext: Extended channel flags. + * @dfs_chan_ieee: IEEE channel number. + * @dfs_chan_vhtop_ch_freq_seg1: Channel Center IEEE. + * @dfs_chan_vhtop_ch_freq_seg2: Channel Center IEEE applicable for 80+80MHz + * mode of operation. + * @dfs_chan_mhz_freq_seg1: Primary channel center freq. + * @dfs_chan_mhz_freq_seg2: Secondary channel center freq applicable for + * 80+80 MHZ. + */ + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS dfs_mlme_get_extchan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flags, + uint16_t *dfs_chan_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_chan_vhtop_ch_freq_seg1, + uint8_t *dfs_chan_vhtop_ch_freq_seg2, + uint16_t *dfs_chan_mhz_freq_seg1, + uint16_t *dfs_chan_mhz_freq_seg2); +#endif + +/** + * dfs_mlme_set_no_chans_available() - Set no_chans_available flag. + * @pdev: Pointer to DFS pdev object. + * @val: Set this value to no_chans_available flag. + */ +void dfs_mlme_set_no_chans_available(struct wlan_objmgr_pdev *pdev, + int val); + +/** + * dfs_mlme_ieee2mhz() - Get the frequency from channel number. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @flag: Channel flag. + */ +int dfs_mlme_ieee2mhz(struct wlan_objmgr_pdev *pdev, + int ieee, + uint64_t flag); + +/** + * dfs_mlme_find_dot11_channel() - Get dot11 channel from ieee, cfreq2 and mode. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @des_cfreq2: cfreq2 + * @mode: Phymode + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + * + * Return: + * * QDF_STATUS_SUCCESS : Channel found. + * * QDF_STATUS_E_FAILURE: Channel not found. + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS +dfs_mlme_find_dot11_channel(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint8_t des_cfreq2, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); +#endif + +/** + * dfs_mlme_find_dot11_chan_for_freq() - Find a channel pointer given the mode, + * frequency and channel flags. + * @pdev: Pointer to DFS pdev object. + * @ch_freq: Channel frequency. + * @des_cfreq2_mhz: cfreq2 in MHZ. + * @mode: Phymode + * @dfs_chan_freq: Frequency in Mhz. + * @dfs_chan_flags: Channel flags. + * @dfs_chan_flagext: Extended channel flags. + * @dfs_chan_ieee: IEEE channel number. + * @dfs_chan_vhtop_ch_freq_seg1: Channel Center IEEE for primary 80 segment. + * @dfs_chan_vhtop_ch_freq_seg2: Channel Center frequency applicable for + * 80+80MHz mode of operation. + * @dfs_chan_mhz_freq_seg1: Channel center frequency of primary 80 segment. + * @dfs_chan_mhz_freq_seg2: Channel center frequency for secondary 80 + * segment applicable only for 80+80MHZ mode of + * operation. + * + * Return: + * * QDF_STATUS_SUCCESS : Channel found. + * * QDF_STATUS_E_FAILURE: Channel not found. + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +dfs_mlme_find_dot11_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t chan_freq, + uint16_t des_cfreq2_mhz, + int mode, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flags, + uint16_t *dfs_chan_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_chan_vhtop_ch_freq_seg1, + uint8_t *dfs_chan_vhtop_ch_freq_seg2, + uint16_t *dfs_chan_mhz_freq_seg1, + uint16_t *dfs_chan_mhz_freq_seg2); +#endif + +/** + * dfs_mlme_get_dfs_ch_channels() - Get channel from channel list. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + * @index: Index into channel list. + */ +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_get_dfs_ch_channels(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2, + int index); +#endif + +/** + * dfs_mlme_get_dfs_channels_for_freq() - Get DFS channel from channel list. + * @pdev: Pointer to DFS pdev object. + * @dfs_chan_freq: Frequency in Mhz. + * @dfs_chan_flags: Channel flags. + * @dfs_chan_flagext: Extended channel flags. + * @dfs_chan_ieee: IEEE channel number. + * @dfs_chan_vhtop_ch_freq_seg1: Channel Center IEEE number. + * @dfs_chan_vhtop_ch_freq_seg2: Channel Center IEEE applicable for 80+80MHz + * mode of operation. + * @dfs_chan_mhz_freq_seg1 : Primary 80 Channel Center frequency. + * @dfs_chan_mhz_freq_seg2 : Channel center frequency applicable only for + * 80+80 mode of operation. + * @index: Index into channel list. + */ +#ifdef CONFIG_CHAN_FREQ_API +void +dfs_mlme_get_dfs_channels_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flags, + uint16_t *dfs_chan_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_chan_vhtop_ch_freq_seg1, + uint8_t *dfs_chan_vhtop_ch_freq_seg2, + uint16_t *dfs_chan_mhz_freq_seg1, + uint16_t *dfs_chan_mhz_freq_seg2, + int index); +#endif + +/** + * dfs_mlme_dfs_ch_flags_ext() - Get extension channel flags. + * @pdev: Pointer to DFS pdev object. + */ +uint32_t dfs_mlme_dfs_ch_flags_ext(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_channel_change_by_precac() - Channel change by PreCAC. + * @pdev: Pointer to DFS pdev object. + */ +void dfs_mlme_channel_change_by_precac(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_nol_timeout_notification() - NOL timeout notification to userspace. + * @pdev: Pointer to DFS pdev object. + */ +void dfs_mlme_nol_timeout_notification(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_clist_update() - Mark the channel as RADAR. + * @pdev: Pointer to DFS pdev object. + * @nollist: Pointer to NOL list. + * @nentries: Number of channels in the NOL list. + */ +void dfs_mlme_clist_update(struct wlan_objmgr_pdev *pdev, + void *nollist, + int nentries); + +/** + * dfs_mlme_get_cac_timeout() - Get cac_timeout. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + * @dfs_ch_flags: Channel flags. + */ +#ifdef CONFIG_CHAN_NUM_API +int dfs_mlme_get_cac_timeout(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint8_t dfs_ch_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags); +#endif + +/** + * dfs_mlme_get_cac_timeout_for_freq() - Get cac_timeout. + * @pdev: Pointer to DFS pdev object. + * @dfs_chan_freq: Frequency in Mhz. + * @dfs_chan_vhtop_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + * @dfs_chan_flags: Channel flags. + */ +#ifdef CONFIG_CHAN_FREQ_API +int dfs_mlme_get_cac_timeout_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_chan_freq, + uint16_t dfs_chan_vhtop_freq_seg2_mhz, + uint64_t dfs_chan_flags); +#endif +/** + * dfs_mlme_rebuild_chan_list_with_non_dfs_channels() - Rebuild the channel list + * with only non DFS channels. + * @pdev: Pointer to DFS pdev object. + * + * return: On success return 1 or 0, else failure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +int dfs_mlme_rebuild_chan_list_with_non_dfs_channels( + struct wlan_objmgr_pdev *pdev); +#else +static inline int dfs_mlme_rebuild_chan_list_with_non_dfs_channels( + struct wlan_objmgr_pdev *pdev) +{ + return 0; +} +#endif + +/** + * dfs_mlme_restart_vaps_with_non_dfs_chan() - Restart vaps with non DFS + * channels + * @pdev: Pointer to DFS pdev object. + * @no_chans_avail: Indicates if no channel is available. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_mlme_restart_vaps_with_non_dfs_chan(struct wlan_objmgr_pdev *pdev, + int no_chans_avail); +#else +static inline +void dfs_mlme_restart_vaps_with_non_dfs_chan(struct wlan_objmgr_pdev *pdev, + int no_chans_avail) +{ +} +#endif + +/** + * dfs_mlme_check_allowed_prim_chanlist() - Check whether the given channel is + * present in the primary allowed channel list or not + * @pdev: Pointer to DFS pdev object. + * @chan_num: Channel number + */ +#if defined(WLAN_SUPPORT_PRIMARY_ALLOWED_CHAN) +bool dfs_mlme_check_allowed_prim_chanlist(struct wlan_objmgr_pdev *pdev, + uint32_t chan_freq); + +#else +static inline +bool dfs_mlme_check_allowed_prim_chanlist(struct wlan_objmgr_pdev *pdev, + uint32_t chan_freq) +{ + return true; +} +#endif + +/** + * dfs_mlme_handle_dfs_scan_violation() - Handle scan start failure + * due to DFS violation (presence of NOL channel in scan channel list). + * @pdev: Pointer to pdev object. + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +void dfs_mlme_handle_dfs_scan_violation(struct wlan_objmgr_pdev *pdev); +#else +static inline +void dfs_mlme_handle_dfs_scan_violation(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +/** + * dfs_mlme_is_opmode_sta() - Check if pdev opmode is STA. + * @pdev: Pointer to DFS pdev object. + * + * Return: true if pdev opmode is STA, else false. + */ +bool dfs_mlme_is_opmode_sta(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_acquire_radar_mode_switch_lock() - Acquire lock for radar processing + * over mode switch handling. + * @pdev: Pointer to DFS pdev object. + * + * Return: void. + */ +void dfs_mlme_acquire_radar_mode_switch_lock(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_release_radar_mode_switch_lock() - Release lock taken for radar + * processing over mode switch handling. + * @pdev: Pointer to DFS pdev object. + * + * Return: void. + */ +void dfs_mlme_release_radar_mode_switch_lock(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_DFS_MLME_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_public_struct.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_public_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..dfa407daeff1a7e2235c48b2d4479a8bd549b05e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_public_struct.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_dfs_public_struct.h + * This file contains DFS data structures + */ + +#ifndef __WLAN_DFS_PUBLIC_STRUCT_H_ +#define __WLAN_DFS_PUBLIC_STRUCT_H_ + +/* TODO: This structure has many redundant variables, needs cleanup */ +/** + * struct radar_found_info - radar found info + * @pdev_id: pdev id. + * @detection_mode: 0 indicates RADAR detected, non-zero indicates debug mode. + * @freq_offset: frequency offset. + * @chan_width: channel width. + * @detector_id: detector id for full-offload. + * @segment_id: segment id (same as detector_id) for partial-offload. + * @timestamp: timestamp (Time when filter match is found in Firmware). + * @is_chirp: is chirp or not. + * @chan_freq: channel frequency (Primary channel frequency). + * @radar_freq: radar frequency (Is it same as '@chan_freq'?). + * @sidx: sidx value (same as freq_offset). + */ +struct radar_found_info { + uint32_t pdev_id; + uint32_t detection_mode; + int32_t freq_offset; + uint32_t chan_width; + uint32_t detector_id; + uint32_t segment_id; + uint32_t timestamp; + uint32_t is_chirp; + uint32_t chan_freq; + uint32_t radar_freq; + int32_t sidx; +}; + +/** + * struct dfs_acs_info - acs info, ch range + * @acs_mode: to enable/disable acs 1/0. + * @chan_freq_list: channel frequency list + * @num_of_channel: number of channel in ACS channel list + */ +struct dfs_acs_info { + uint8_t acs_mode; + uint32_t *chan_freq_list; + uint8_t num_of_channel; +}; + +/** + * struct radar_event_info - radar event info. + * @pulse_is_chirp: flag to indicate if this pulse is chirp. + * @pulse_center_freq: the center frequency of the radar pulse detected, KHz. + * @pulse_duration: the duaration of the pulse in us. + * @rssi: RSSI recorded in the ppdu. + * @pulse_detect_ts: timestamp indicates the time when DFS pulse is detected. + * @upload_fullts_low: low 32 tsf timestamp get from MAC tsf timer indicates + * the time that the radar event uploading to host. + * @upload_fullts_high: high 32 tsf timestamp get from MAC tsf timer indicates + * the time that the radar event uploading to host. + * @peak_sidx: index of peak magnitude bin (signed) + * @pdev_id: pdev_id for identifying the MAC. + * @delta_diff: Delta diff value. + * @delta_peak: Delta peak value. + * @psidx_diff: Psidx diff value. + * @is_psidx_diff_valid: Does fw send valid psidx diff. + */ +struct radar_event_info { + uint8_t pulse_is_chirp; + uint32_t pulse_center_freq; + uint32_t pulse_duration; + uint8_t rssi; + uint32_t pulse_detect_ts; + uint32_t upload_fullts_low; + uint32_t upload_fullts_high; + int32_t peak_sidx; + uint8_t pdev_id; + uint8_t delta_diff; + int8_t delta_peak; + int8_t psidx_diff; + int8_t is_psidx_diff_valid; +}; + +/** + * struct dfs_user_config - user configuration required for for DFS. + * @dfs_is_phyerr_filter_offload: flag to indicate DFS phyerr filtering offload. + */ +struct dfs_user_config { + bool dfs_is_phyerr_filter_offload; +}; + +/** + * struct dfs_radar_found_params - radar found parameters. + * @pri_min: Minimum PRI of detected radar pulse. + * @pri_max: Max PRI of detected radar pulse. + * @duration_min: Min duration of detected pulse in us. + * @duration_max: Max duration of detected pulse in us. + * @sidx_min: Min softare index of detected radar pulse. + * @sidx_max: Max software index of detected radar pulse. + */ +struct dfs_radar_found_params { + u_int32_t pri_min; + u_int32_t pri_max; + u_int32_t duration_min; + u_int32_t duration_max; + u_int32_t sidx_min; + u_int32_t sidx_max; +}; + +/** + * struct dfs_agile_cac_params - Agile DFS-CAC parameters. + * @precac_chan: Agile preCAC channel. + * @precac_chan_freq: Agile preCAC channel frequency in MHZ. + * @precac_chwidth: Agile preCAC channel width. + * @min_precac_timeout: Minimum agile preCAC timeout. + * @max_precac_timeout: Maximum agile preCAC timeout. + */ +struct dfs_agile_cac_params { + uint8_t precac_chan; + uint16_t precac_chan_freq; + enum phy_ch_width precac_chwidth; + uint32_t min_precac_timeout; + uint32_t max_precac_timeout; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..c49a497ae22ddbdb2aab46ea90cf47e8684ea6e3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_tgt_api.h @@ -0,0 +1,680 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API which is exposed to outside of DFS + * component. + */ + +#ifndef _WLAN_DFS_TGT_API_H_ +#define _WLAN_DFS_TGT_API_H_ + +#include + +#define WLAN_DFS_RESET_TIME_S 7 +#define WLAN_DFS_WAIT (60 + WLAN_DFS_RESET_TIME_S) /* 60 seconds */ +#define WLAN_DFS_WAIT_MS ((WLAN_DFS_WAIT) * 1000) /*in MS*/ + +/* Command id to send test radar to firmware */ +#define DFS_PHYERR_OFFLOAD_TEST_SET_RADAR 0 + +/* Segment ID corresponding to primary segment */ +#define SEG_ID_PRIMARY 0 + +/* Segment ID corresponding to secondary segment */ +#define SEG_ID_SECONDARY 1 + +/* dfs_radar_args_for_unit_test: Radar parameters to be sent in unit test cmd. + * @IDX_CMD_ID: Index id pointing to command id value + * @IDX_PDEV_ID: Index id pointing to pdev id value + * @IDX_RADAR_PARAM1_ID: Index pointing to packed arguments value that includes + * 1). Segment ID, + * 2). Chirp information (is chirp or non chirp), + * 3). Frequency offset. + * + * The packed argument structure is: + * + * ------------------------------32 bits arg------------------------- + * + * ------------21 bits-------------|-------8 bits------|1 bit|2 bits| + * __________________________________________________________________ + *| | | | | | | | | | | | | | | + *|---------21 Unused bits---------|x|x|x| |x|x|x|x| |x| x | x | x | + *|________________________________|_|_|_|_|_|_|_|_|_|_|_____|___|___| + * + * |___________________|_____|_______| + * freq.offset Chirp segID + * + * @DFS_UNIT_TEST_NUM_ARGS: Number of arguments for bangradar unit test + * command. + * @DFS_MAX_NUM_UNIT_TEST_ARGS: Maximum number of arguments for unit test + * command in radar simulation. + */ +enum { + IDX_CMD_ID = 0, + IDX_PDEV_ID, + IDX_RADAR_PARAM1_ID, + DFS_UNIT_TEST_NUM_ARGS, + DFS_MAX_NUM_UNIT_TEST_ARGS = DFS_UNIT_TEST_NUM_ARGS +}; + +#define SEG_ID_SIZE 2 +#define IS_CHIRP_SIZE 1 +#define MASK 0xFF + +/** + * struct dfs_emulate_bang_radar_test_cmd - Unit test command structure to send + * WMI command to firmware from host + * and simulate bangradar event. + * @vdev_id: vdev id + * @num_args: number of arguments + * @args: arguments + */ +struct dfs_emulate_bang_radar_test_cmd { + uint32_t vdev_id; + uint32_t num_args; + uint32_t args[DFS_MAX_NUM_UNIT_TEST_ARGS]; +}; + +/** + * struct vdev_adfs_complete_status - OCAC complete status event param + * @vdev_id: Physical device identifier + * @chan_freq: Channel number + * @chan_width: Channel Width + * @center_freq: Center channel number + * @ocac_status: off channel cac status + */ +struct vdev_adfs_complete_status { + uint32_t vdev_id; + uint32_t chan_freq; + uint32_t chan_width; + uint32_t center_freq; + uint32_t ocac_status; +}; + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * tgt_dfs_set_current_channel() - Fill dfs channel structure from + * dfs_channel structure. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency1. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency2. + */ +#ifdef DFS_COMPONENT_ENABLE +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS tgt_dfs_set_current_channel(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2); +#endif + +/** + * tgt_dfs_set_current_channel_for_freq() - Fill dfs channel structure from + * dfs_channel structure. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency1. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency2. + * @dfs_ch_mhz_freq_seg1: Channel center frequency of primary segment in MHZ. + * @dfs_ch_mhz_freq_seg2: Channel center frequency of secondary segment in MHZ + * applicable only for 80+80MHZ mode of operation. + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +tgt_dfs_set_current_channel_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2, + uint16_t dfs_ch_mhz_freq_seg1, + uint16_t dfs_ch_mhz_freq_seg2); +#endif + +/** + * tgt_dfs_radar_enable() - Enables the radar. + * @pdev: Pointer to DFS pdev object. + * @no_cac: If no_cac is 0, it cancels the CAC. + * @enable: disable/enable radar + * + * This is called each time a channel change occurs, to (potentially) enable + * the radar code. + */ +QDF_STATUS tgt_dfs_radar_enable( + struct wlan_objmgr_pdev *pdev, + int no_cac, uint32_t opmode, bool enable); + +/** + * tgt_dfs_control()- Used to process ioctls related to DFS. + * @pdev: Pointer to DFS pdev object. + * @id: Command type. + * @indata: Input buffer. + * @insize: size of the input buffer. + * @outdata: A buffer for the results. + * @outsize: Size of the output buffer. + */ +QDF_STATUS tgt_dfs_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error); + +/** + * tgt_dfs_get_radars() - Based on the chipset, calls init radar table functions + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_get_radars(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_get_radars(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_dfs_process_radar_ind() - Process radar found indication. + * @pdev: Pointer to DFS pdev object. + * @radar_found: radar found info. + * + * Process radar found indication. + * + * Return QDF_STATUS. + */ +QDF_STATUS tgt_dfs_process_radar_ind(struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found); +#else +static inline QDF_STATUS tgt_dfs_set_current_channel( + struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_radar_enable( + struct wlan_objmgr_pdev *pdev, + int no_cac, uint32_t opmode, bool enable) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_get_radars(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_process_radar_ind( + struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_process_phyerr() - Process phyerr. + * @pdev: Pointer to DFS pdev object. + * @buf: Phyerr buffer. + * @datalen: phyerr buffer length. + * @r_rssi: RSSI. + * @r_ext_rssi: Extension channel RSSI. + * @r_rs_tstamp: Timestamp. + * @r_fulltsf: TSF64. + * + * Wrapper function for dfs_process_phyerr(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_process_phyerr(struct wlan_objmgr_pdev *pdev, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf); + +/** + * tgt_dfs_process_phyerr_filter_offload() - Process radar event. + * Wrapper function for dfs_process_phyerr_filter_offload(). This function + * called from outside of DFS component. + * @pdev: Pointer to DFS pdev object. + * @wlan_radar_event: pointer to radar_event_info. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_process_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev, + struct radar_event_info *wlan_radar_event); + +/** + * tgt_dfs_is_phyerr_filter_offload() - Is phyerr filter offload. + * @psoc: Pointer to psoc object. + * @is_phyerr_filter_offload: Pointer to is_phyerr_filter_offload. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_is_phyerr_filter_offload(struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload); + +/** + * tgt_dfs_destroy_object() - Destroys the DFS object. + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_destroy_object(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_destroy_object(struct wlan_objmgr_pdev *pdev); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * tgt_dfs_set_tx_leakage_threshold() - set tx_leakage_threshold. + * @pdev: Pointer to DFS pdev object. + * + * Return QDF_STATUS. + */ +QDF_STATUS tgt_dfs_set_tx_leakage_threshold(struct wlan_objmgr_pdev *pdev); +#else +static inline QDF_STATUS tgt_dfs_set_tx_leakage_threshold + (struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_is_precac_timer_running() - Check whether precac timer is running. + * @pdev: Pointer to DFS pdev object. + * @is_precac_timer_running: Pointer to save precac timer value. + * + * Wrapper function for dfs_is_precac_timer_running(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_is_precac_timer_running(struct wlan_objmgr_pdev *pdev, + bool *is_precac_timer_running); + +/** + * tgt_dfs_set_agile_precac_state() - set state for Agile Precac. + * + * @pdev: Pointer to DFS pdev object. + * @agile_precac_state: Agile Precac state + * + * wrapper function for dfs_set_agile_precac_state. + * This function called from outside of dfs component. + */ +QDF_STATUS tgt_dfs_set_agile_precac_state(struct wlan_objmgr_pdev *pdev, + int agile_precac_state); + +/** + * tgt_dfs_agile_precac_start() - Start agile precac + * + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_set_agile_precac_state. + * This function called from outside of dfs component. + */ +QDF_STATUS tgt_dfs_agile_precac_start(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_dfs_ocac_complete() - Process off channel cac complete indication. + * @pdev: Pointer to DFS pdev object. + * @vdev_adfs_complete_status: Off channel CAC complete status. + * + * wrapper function for dfs_set_agile_precac_state. + * This function called from outside of dfs component. + */ +QDF_STATUS tgt_dfs_ocac_complete(struct wlan_objmgr_pdev *pdev, + struct vdev_adfs_complete_status *ocac_status); + +/** + * utils_dfs_find_vht80_chan_for_precac() - Find VHT80 channel for precac. + * @pdev: Pointer to DFS pdev object. + * @chan_mode: Channel mode. + * @ch_freq_seg1: Segment1 channel freq. + * @cfreq1: cfreq1. + * @cfreq2: cfreq2. + * @phy_mode: Precac phymode. + * @dfs_set_cfreq2: Precac cfreq2 + * @set_agile: Agile mode flag. + * + * wrapper function for dfs_find_vht80_chan_for_precacdfs_cancel_cac_timer(). + * This function called from outside of dfs component. + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS tgt_dfs_find_vht80_chan_for_precac(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#endif + +/** + * tgt_dfs_find_vht80_precac_chan_freq() - Find VHT80 channel for precac + * @pdev: Pointer to DFS pdev object. + * @chan_mode: Channel mode. + * @ch_freq_seg1_mhz: Segment1 channel freq in MHZ. + * @cfreq1: cfreq1. + * @cfreq2: cfreq2. + * @phy_mode: Precac phymode. + * @dfs_set_cfreq2: Precac cfreq2 + * @set_agile: Agile mode flag. + * + * wrapper function for dfs_find_vht80_chan_for_precac_for_freq(). + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +tgt_dfs_find_vht80_precac_chan_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint16_t ch_freq_mhz_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#endif + +/** + * tgt_dfs_cac_complete() - Process cac complete indication. + * @pdev: Pointer to DFS pdev object. + * @vdev_id: vdev id. + * + * Process cac complete indication from firmware. + * + * Return QDF_STATUS. + */ +QDF_STATUS tgt_dfs_cac_complete(struct wlan_objmgr_pdev *pdev, + uint32_t vdev_id); + +/** + * tgt_dfs_reg_ev_handler() - Register dfs events. + * @psoc: Pointer to psoc. + * + * Register dfs events. + * + * Return: QDF_STATUS. + */ +QDF_STATUS tgt_dfs_reg_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_dfs_stop() - Clear dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +QDF_STATUS tgt_dfs_stop(struct wlan_objmgr_pdev *pdev); + +/** +* tgt_dfs_process_emulate_bang_radar_cmd() - Process to emulate dfs bangradar +* command. +* @pdev: Pointer to DFS pdev object. +* +* Process to emulate dfs bangradar command. +* +* Return: QDF_STATUS. +*/ +QDF_STATUS tgt_dfs_process_emulate_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * tgt_dfs_set_phyerr_filter_offload() - config phyerr filter offload + * @pdev: Pointer to DFS pdev object. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_set_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev); +#else +static inline QDF_STATUS tgt_dfs_set_phyerr_filter_offload + (struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +/** + * tgt_dfs_send_avg_params_to_fw() - send average radar parameters to fw. + * @pdev: Pointer to DFS pdev object. + * @params: Pointer to dfs radar average parameters. + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_dfs_send_avg_params_to_fw(struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params); +#endif + +/** + * tgt_dfs_action_on_status_from_fw() - trigger the action to be taken based on + * host dfs status received from fw. + * @pdev: Pointer to pdev object. + * @status: Pointer to the host dfs status received from fw. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS tgt_dfs_action_on_status_from_fw(struct wlan_objmgr_pdev *pdev, + uint32_t *status); +#else +static inline +QDF_STATUS tgt_dfs_action_on_status_from_fw(struct wlan_objmgr_pdev *pdev, + uint32_t *status) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_is_radar_enabled() - checks if radar detection is enabled. + * @pdev: Pointer to pdev object. + * @ignore_dfs: Pointer to check the value. If 1, radar detection is disabled. + */ +void tgt_dfs_is_radar_enabled(struct wlan_objmgr_pdev *pdev, int *ignore_dfs); + +/** + * tgt_dfs_reset_spoof_test() - reset the dfs spoof check variables + * @pdev: Pointer to pdev object. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS tgt_dfs_reset_spoof_test(struct wlan_objmgr_pdev *pdev); +#else +static inline +QDF_STATUS tgt_dfs_reset_spoof_test(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_enable_stadfs() - Enable/Disable STADFS capability. + * @pdev: Pointer to DFS pdev object. + * @val: input value. + */ +void tgt_dfs_enable_stadfs(struct wlan_objmgr_pdev *pdev, bool val); + +/** + * tgt_dfs_is_stadfs_enabled() - Get STADFS capability + * @pdev: Pointer to DFS pdev object. + * + * Return: true if STADFS is enabled, else false. + */ +bool tgt_dfs_is_stadfs_enabled(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_dfs_is_pdev_5ghz() - Check if the input pdev is 5GHZ. + * @pdev: Pointer to DFS pdev object. + * + * Return: true if the pdev supports 5GHz, else false. + */ +bool tgt_dfs_is_pdev_5ghz(struct wlan_objmgr_pdev *pdev); + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +/** + * tgt_dfs_send_usenol_pdev_param() - Send usenol pdev param to FW. + * @pdev: Pointer to pdev object. + * @usenol: Value of usenol + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_send_usenol_pdev_param(struct wlan_objmgr_pdev *pdev, + bool usenol); + +/** + * tgt_dfs_send_subchan_marking() - Send subchannel marking pdev param to FW. + * @pdev: Pointer to pdev object. + * @subchanmark: Value of subchannel_marking. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_send_subchan_marking(struct wlan_objmgr_pdev *pdev, + bool subchanmark); +#else +static inline +QDF_STATUS tgt_dfs_send_usenol_pdev_param(struct wlan_objmgr_pdev *pdev, + bool usenol) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +tgt_dfs_send_subchan_marking(struct wlan_objmgr_pdev *pdev, bool subchanmark) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#ifdef QCA_SUPPORT_AGILE_DFS +/** + * tgt_dfs_set_fw_adfs_support() - Set FW aDFS support in dfs object. + * @pdev: Pointer to pdev object. + * @fw_adfs_support_160: aDFS enabled when pdev is on 160/80P80MHz. + * @fw_adfs_support_non_160: aDFS enabled when pdev is on 20/40/80MHz. + * + * Return: void. + */ +void tgt_dfs_set_fw_adfs_support(struct wlan_objmgr_pdev *pdev, + bool fw_adfs_support_160, + bool fw_adfs_support_non_160); +#else +static inline +void tgt_dfs_set_fw_adfs_support(struct wlan_objmgr_pdev *pdev, + bool fw_adfs_support_160, + bool fw_adfs_support_non_160) +{ +} +#endif + +/** + * tgt_dfs_init_tmp_psoc_nol() - Init temporary psoc NOL structure. + * @pdev: Pointer to pdev object. + * @num_radios: Number of radios in the psoc. + * + * Return: void. + */ +void tgt_dfs_init_tmp_psoc_nol(struct wlan_objmgr_pdev *pdev, + uint8_t num_radios); + +/** + * tgt_dfs_deinit_tmp_psoc_nol() - De-init temporary psoc NOL structure. + * @pdev: Pointer to pdev object. + * + * Return: void. + */ +void tgt_dfs_deinit_tmp_psoc_nol(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_dfs_save_dfs_nol_in_psoc() - Save NOL data of given pdev. + * @pdev: Pointer to pdev object. + * @pdev_id: The pdev ID which will have the NOL data. + * @low_5ghz_freq: The low 5GHz frequency value of the target pdev id. + * @high_5ghz_freq: The high 5GHz frequency value of the target pdev id. + * + * Based on the frequency of the NOL channel, copy it to the target pdev_id + * structure in psoc. + * + * Return: void. + */ +void tgt_dfs_save_dfs_nol_in_psoc(struct wlan_objmgr_pdev *pdev, + uint8_t pdev_id, + uint16_t low_5ghz_freq, + uint16_t high_5ghz_freq); + +/** + * tgt_dfs_reinit_nol_from_psoc_copy() - Reinit saved NOL data to corresponding + * pdevs. + * @pdev: Pointer to pdev object. + * @pdev_id: pdev_id of the given pdev. + * + * Return: void. + */ +void tgt_dfs_reinit_nol_from_psoc_copy(struct wlan_objmgr_pdev *pdev, + uint8_t pdev_id); + +/** + * tgt_dfs_reinit_precac_lists() - Reinit preCAC lists. + * @src_pdev: Source pdev object from which the preCAC list is copied. + * @dest_pdev: Destination pdev object to which the preCAC list is copied. + * @low_5g_freq: Low 5G frequency value of the destination DFS. + * @high_5g_freq: High 5G frequency value of the destination DFS. + * + * Copy all the preCAC list entries from the source pdev object to the + * destination pdev object which fall within the frequency range of + * low_5g_freq and high_5g_freq. + * + * Return: None (void). + */ +void tgt_dfs_reinit_precac_lists(struct wlan_objmgr_pdev *src_pdev, + struct wlan_objmgr_pdev *dest_pdev, + uint16_t low_5g_freq, + uint16_t high_5g_freq); + +/** + * tgt_dfs_complete_deferred_tasks() - Process HW mode switch completion and + * handle deferred tasks. + * @pdev: Pointer to primary pdev object. + * + * Return: void. + */ +void tgt_dfs_complete_deferred_tasks(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_DFS_TGT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2d7d7fb8252beee3ae5b8cb376288c418c7fd3e2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ucfg_api.h @@ -0,0 +1,599 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API which is exposed to outside of DFS + * component. + */ + +#ifndef _WLAN_DFS_UCFG_API_H_ +#define _WLAN_DFS_UCFG_API_H_ + +#include +#include +#include + +/** + * struct dfs_to_mlme - These are MLME function pointer used by DFS component. + * @pdev_component_obj_attach: Attach DFS object to PDEV. + * @pdev_component_obj_detach: Detach DFS object from PDEV. + * @pdev_get_comp_private_obj: Get DFS object from PDEV. + * @dfs_start_rcsa: Send RCSA to RootAP. + * @mlme_mark_dfs: Calls dfs_action function. + * @mlme_start_csa: Sends CSA. + * @mlme_proc_cac: Process the CAC completion event. + * @mlme_deliver_event_up_after_cac: Send a CAC timeout, VAP up event to user + * space + * @mlme_get_dfs_ch_nchans: Get number of channels in the channel + * list. + * @mlme_get_extchan: Gets the extension channel. + * @mlme_set_no_chans_available: Sets no_chans_available flag. + * @mlme_ieee2mhz: Gets Channel freq from ieee number. + * @mlme_find_dot11_channel: Find dot11 channel. + * @mlme_get_dfs_ch_channels: Get the channel list. + * @mlme_dfs_ch_flags_ext: Gets channel extension flag. + * @mlme_channel_change_by_precac: Channel change triggered by PreCAC. + * @mlme_precac_chan_change_csa: Channel change triggered by PrCAC using + * Channel Switch Announcement. + * @mlme_nol_timeout_notification: NOL timeout notification. + * @mlme_clist_update: Updates the channel list. + * @mlme_is_opmode_sta: Check if pdev opmode is STA. + * @mlme_get_cac_timeout: Gets the CAC timeout. + * @mlme_rebuild_chan_list_with_non_dfs_channel: Rebuild channels with non-dfs + * channels. + * @mlme_restart_vaps_with_non_dfs_chan: Restart vaps with non-dfs channel. + * @mlme_check_allowed_prim_chanlist: Check whether the given channel is + * present in the primary allowed channel + * list or not. + * @mlme_update_scan_channel_list: Update the scan channel list sent to FW. + * @mlme_bringdown_vaps: Bringdown vaps if no chans is present. + * @mlme_dfs_deliver_event: Deliver DFS events to user space + * @mlme_precac_chan_change_csa_for_freq:Channel change triggered by PrCAC using + * Channel Switch Announcement. + * @mlme_mark_dfs_for_freq: Mark DFS channel frequency as radar. + * @mlme_get_extchan_for_freq: Get the extension channel. + * @mlme_find_dot11_chan_for_freq: Find a channel pointer. + * @mlme_get_dfs_channels_for_freq: Get DFS channels from current channel + * list. + * @mlme_get_cac_timeout_for_freq: Get CAC timeout for a given channel + * frequency. + * @mlme_acquire_radar_mode_switch_lock: Acquire lock for radar processing over + * mode switch. + * @mlme_release_radar_mode_switch_lock: Release lock taken for radar processing + * over mode switch. + */ +struct dfs_to_mlme { + QDF_STATUS (*pdev_component_obj_attach)(struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + QDF_STATUS (*pdev_component_obj_detach)(struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + QDF_STATUS (*dfs_start_rcsa)(struct wlan_objmgr_pdev *pdev, + bool *wait_for_csa); +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*mlme_mark_dfs)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS (*mlme_mark_dfs_for_freq)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint16_t ic_mhz_freq_seg2, + uint64_t flags); +#endif +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*mlme_start_csa)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint8_t cfreq2, uint64_t flags); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS (*mlme_start_csa_for_freq)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint16_t cfreq2, uint64_t flags); +#endif + + QDF_STATUS (*mlme_proc_cac)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_deliver_event_up_after_cac)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_get_dfs_ch_nchans)(struct wlan_objmgr_pdev *pdev, + int *nchans); +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*mlme_get_extchan)(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS (*mlme_get_extchan_for_freq)(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_vhtop_ch_freq_seg1, + uint8_t *dfs_vhtop_ch_freq_seg2, + uint16_t *dfs_ch_mhz_freq_seg1, + uint16_t *dfs_ch_mhz_freq_seg2); +#endif + QDF_STATUS (*mlme_set_no_chans_available)(struct wlan_objmgr_pdev *pdev, + int val); + QDF_STATUS (*mlme_ieee2mhz)(struct wlan_objmgr_pdev *pdev, + int ieee, + uint64_t flag, + int *freq); +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*mlme_find_dot11_channel)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint8_t des_cfreq2, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS (*mlme_find_dot11_chan_for_freq)(struct wlan_objmgr_pdev *, + uint16_t freq, + uint16_t des_cfreq2_mhz, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_freq_seg1, + uint8_t *dfs_ch_freq_seg2, + uint16_t *dfs_cfreq1_mhz, + uint16_t *dfs_cfreq2_mhz); +#endif +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*mlme_get_dfs_ch_channels)(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2, + int index); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS (*mlme_get_dfs_channels_for_freq)( + struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flags, + uint16_t *dfs_chan_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_chan_vhtop_ch_freq_seg1, + uint8_t *dfs_chan_vhtop_ch_freq_seg2, + uint16_t *dfs_chan_mhz_freq_seg1, + uint16_t *dfs_chan_mhz_freq_seg2, + int index); +#endif + QDF_STATUS (*mlme_dfs_ch_flags_ext)(struct wlan_objmgr_pdev *pdev, + uint16_t *flag_ext); + QDF_STATUS (*mlme_channel_change_by_precac)( + struct wlan_objmgr_pdev *pdev); +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS + (*mlme_precac_chan_change_csa_for_freq)(struct wlan_objmgr_pdev *, + uint16_t des_chan_freq, + enum wlan_phymode des_mode); +#endif +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS + (*mlme_precac_chan_change_csa)(struct wlan_objmgr_pdev *, + uint8_t des_chan, + enum wlan_phymode des_mode); +#endif +#endif + QDF_STATUS (*mlme_nol_timeout_notification)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_clist_update)(struct wlan_objmgr_pdev *pdev, + void *nollist, + int nentries); + bool (*mlme_is_opmode_sta)(struct wlan_objmgr_pdev *pdev); +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*mlme_get_cac_timeout)(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint8_t c_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags, + int *cac_timeout); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS + (*mlme_get_cac_timeout_for_freq)(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint16_t c_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags, + int *cac_timeout); +#endif + QDF_STATUS (*mlme_rebuild_chan_list_with_non_dfs_channels) + (struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_restart_vaps_with_non_dfs_chan) + (struct wlan_objmgr_pdev *pdev, int no_chans_avail); + bool (*mlme_check_allowed_prim_chanlist) + (struct wlan_objmgr_pdev *pdev, uint32_t chan); + QDF_STATUS (*mlme_update_scan_channel_list) + (struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_bringdown_vaps) + (struct wlan_objmgr_pdev *pdev); + void (*mlme_dfs_deliver_event) + (struct wlan_objmgr_pdev *pdev, + uint16_t freq, + enum WLAN_DFS_EVENTS event); + void (*mlme_acquire_radar_mode_switch_lock) + (struct wlan_objmgr_pdev *pdev); + void (*mlme_release_radar_mode_switch_lock) + (struct wlan_objmgr_pdev *pdev); +}; + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * wlan_dfs_pdev_obj_create_notification() - DFS pdev object create handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wlan_dfs_pdev_obj_destroy_notification() - DFS pdev object delete handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * ucfg_dfs_is_ap_cac_timer_running() - Returns the dfs cac timer. + * @pdev: Pointer to DFS pdev object. + * @is_ap_cac_timer_running: Pointer to save dfs_cac_timer_running value. + * + * Wrapper function for dfs_is_ap_cac_timer_running(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_is_ap_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int *is_ap_cac_timer_running); + +/** + * ucfg_dfs_getnol() - Wrapper function for dfs_get_nol() + * @pdev: Pointer to DFS pdev object. + * @dfs_nolinfo: Pointer to dfsreq_nolinfo structure. + * + * Wrapper function for dfs_getnol(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_getnol(struct wlan_objmgr_pdev *pdev, void *dfs_nolinfo); + +/** + * ucfg_dfs_override_cac_timeout() - Override the default CAC timeout. + * @pdev: Pointer to DFS pdev object. + * @cac_timeout: CAC timeout value. + * + * Wrapper function for dfs_override_cac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int cac_timeout, int *status); + +/** + * ucfg_dfs_get_override_cac_timeout() - Get override CAC timeout value. + * @pdev: Pointer to DFS pdev object. + * @cac_timeout: Pointer to save the CAC timeout value. + * + * Wrapper function for dfs_get_override_cac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_get_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int *cac_timeout, int *status); + +/** + * ucfg_dfs_get_override_precac_timeout() - Get precac timeout. + * @pdev: Pointer to DFS pdev object. + * @precac_timeout: Get precac timeout value in this variable. + * + * Wrapper function for dfs_get_override_precac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_get_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int *precac_timeout); + +/** + * ucfg_dfs_override_precac_timeout() - Override the default precac timeout. + * @pdev: Pointer to DFS pdev object. + * @precac_timeout: Precac timeout value. + * + * Wrapper function for dfs_override_precac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int precac_timeout); + +/** + * ucfg_dfs_set_precac_enable() - Set precac enable flag. + * @pdev: Pointer to DFS pdev object. + * @value: input value for dfs_legacy_precac_ucfg flag. + * + * Wrapper function for dfs_set_precac_enable(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_set_precac_enable(struct wlan_objmgr_pdev *pdev, + uint32_t value); + +/** + * ucfg_dfs_get_legacy_precac_enable() - Get the legacy precac enable flag. + * @pdev: Pointer to DFS pdev object. + * @buff: Pointer to save precac_enable value. + * + * Wrapper function for dfs_is_legacy_precac_enabled() and returns the + * legacy precac enable flag for partial offload chipsets. + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_get_legacy_precac_enable(struct wlan_objmgr_pdev *pdev, + bool *buff); + +/** + * ucfg_dfs_get_agile_precac_enable() - Get agile precac enable flag. + * @pdev: Pointer to DFS pdev object. + * @buff: Pointer to save dfs_agile_precac_ucfg value. + * + * Wrapper function for dfs_is_legacy_precac_enabled(). + * This function called from outside of dfs component. + * + * Return: + * * QDF_STATUS_SUCCESS: Successfully able to get agile precac flag. + * * QDF_STATUS_E_FAILURE: Failed to get agile precac flag. + */ +QDF_STATUS ucfg_dfs_get_agile_precac_enable(struct wlan_objmgr_pdev *pdev, + bool *buff); + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * ucfg_dfs_set_precac_intermediate_chan() - Set intermediate channel + * for preCAC. + * @pdev: Pointer to DFS pdev object. + * @value: Channel number of intermediate channel + * + * Wrapper function for dfs_set_precac_intermediate_chan(). + * This function is called from outside of dfs component. + * + * Return: + * * QDF_STATUS_SUCCESS : Successfully set intermediate channel. + * * QDF_STATUS_E_FAILURE: Failed to set intermediate channel. + */ +QDF_STATUS ucfg_dfs_set_precac_intermediate_chan(struct wlan_objmgr_pdev *pdev, + uint32_t value); + +/** + * ucfg_dfs_get_precac_intermediate_chan() - Get intermediate channel + * for preCAC. + * @pdev: Pointer to DFS pdev object. + * @buff: Pointer to Channel number of intermediate channel. + * + * Wrapper function for dfs_get_precac_intermediate_chan(). + * This function is called from outside of dfs component. + * + * Return: Configured intermediate precac channel. + */ +QDF_STATUS ucfg_dfs_get_precac_intermediate_chan(struct wlan_objmgr_pdev *pdev, + int *buff); + +/** + * ucfg_dfs_get_precac_chan_state() - Get precac status for the given channel. + * @pdev: Pointer to DFS pdev object. + * @precac_chan: Channel number for which precac state needs to be determined. + * + * Wrapper function for dfs_get_precac_chan_state(). + * This function called from outside of dfs component. + * + * Return: Precac state of the given channel. + */ +#ifdef CONFIG_CHAN_NUM_API +enum precac_chan_state +ucfg_dfs_get_precac_chan_state(struct wlan_objmgr_pdev *pdev, + uint8_t precac_chan); +#endif + +/** + * ucfg_dfs_get_precac_chan_state_for_freq() - Get precac status for the + * given channel. + * @pdev: Pointer to DFS pdev object. + * @precac_chan: Channel frequency for which precac state needs to be + * determined. + * + * Wrapper function for dfs_get_precac_chan_state(). + * This function called from outside of dfs component. + * + * Return: Precac state of the given channel. + */ +#ifdef CONFIG_CHAN_FREQ_API +enum precac_chan_state +ucfg_dfs_get_precac_chan_state_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t precac_freq); +#endif + +#endif + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * ucfg_dfs_update_config() - Update DFS user config. + * @psoc: Pointer to psoc. + * @req: DFS user config. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_dfs_update_config(struct wlan_objmgr_psoc *psoc, + struct dfs_user_config *req); +#else +static inline QDF_STATUS ucfg_dfs_update_config(struct wlan_objmgr_psoc *psoc, + struct dfs_user_config *req) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * ucfg_dfs_set_override_status_timeout() - override the value of host dfs + * status wait timeout. + * @pdev: Pointer to DFS pdev object. + * @status_timeout: timeout value. + * + * Wrapper function for dfs_set_override_status_timeout(). + * This function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS ucfg_dfs_set_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int status_timeout); +#else +static inline +QDF_STATUS ucfg_dfs_set_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) +/** + * ucfg_dfs_allow_hw_pulses() - Set or unset dfs-allow_hw_pulses + * which isolates synthetic radar pulse detection from actual radar detection. + * @pdev: Pointer to DFS pdev object. + * @allow_hw_pulses: Allow synthetic pulse detection true/false. + * + * Wrapper function for dfs_set_allow_hw_pulses(). + * This function called from outside of dfs component. + * + * Return: void + */ +void ucfg_dfs_allow_hw_pulses(struct wlan_objmgr_pdev *pdev, + bool allow_hw_pulses); + +/** + * ucfg_dfs_is_hw_pulses_allowed() - Check if actual radar detection is allowed + * or synthetic pulse detection is enabled. + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_is_hw_pulses_allowed(). + * This function called from outside of dfs component. + * + * Return: bool + */ +bool ucfg_dfs_is_hw_pulses_allowed(struct wlan_objmgr_pdev *pdev); +#else +static inline +void ucfg_dfs_allow_hw_pulses(struct wlan_objmgr_pdev *pdev, + bool allow_hw_pulses) +{ +} + +static inline +bool ucfg_dfs_is_hw_pulses_allowed(struct wlan_objmgr_pdev *pdev) +{ + return true; +} +#endif + +/** + * ucfg_dfs_get_override_status_timeout() - Get the value of host dfs status + * wait timeout. + * @pdev: Pointer to DFS pdev object. + * @status_timeout: Pointer to save the timeout value. + * + * Wrapper function for dfs_get_override_status_timeout(). + * This function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS ucfg_dfs_get_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int *status_timeout); +#else +static inline +QDF_STATUS ucfg_dfs_get_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int *status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * ucfg_dfs_set_nol_subchannel_marking() - Set or unset NOL subchannel marking. + * @pdev: Pointer to DFS pdev object. + * @nol_subchannel_marking: Set NOL subchannel marking based on this value. + * + * Wrapper function for dfs_set_nol_subchannel_marking(). + * This function is called from outside of dfs component. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_dfs_set_nol_subchannel_marking(struct wlan_objmgr_pdev *pdev, + bool nol_subchannel_marking); + +/** + * ucfg_dfs_get_nol_subchannel_marking() - Get the value of NOL subchannel + * marking. + * @pdev: Pointer to DFS pdev object. + * @nol_subchannel_marking: Store the value of NOL subchannel marking. + * + * Wrapper function for dfs_get_nol_subchannel_marking(). + * This function is called from outside of dfs component. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_dfs_get_nol_subchannel_marking(struct wlan_objmgr_pdev *pdev, + bool *nol_subchannel_marking); +/** + * ucfg_dfs_reinit_timers() - Init DFS timers. + * @pdev: Pointer to wlan_objmgr_pdev structure. + * + * Wrapper function to reset CAC, NOL, DFS Test Timer and ZeroCAC Timer. + * This is invoked per pdev to reinitialize timers after HW Mode Switch is + * triggered. + */ +QDF_STATUS ucfg_dfs_reinit_timers(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_dfs_reset_agile_config() - Reset ADFS config. + * @pdev: Pointer to wlan_objmgr_pdev structure. + * + * Wrapper function to reset Agile DFS config such as the variables which hold + * information about the state of the preCAC timer, active precac + * dfs index and OCAC status. It is invoked before HW Mode switch is triggered + * to ensure ADFS config is in a well known consistent state. + */ +#ifdef QCA_SUPPORT_AGILE_DFS +QDF_STATUS ucfg_dfs_reset_agile_config(struct wlan_objmgr_psoc *psoc); +#else +static inline QDF_STATUS ucfg_dfs_reset_agile_config(struct wlan_objmgr_psoc + *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _WLAN_DFS_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d074bb68831643515833053428db234841fe4033 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_utils_api.h @@ -0,0 +1,963 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API which is exposed to outside of DFS + * component. + */ + +#ifndef _WLAN_DFS_UTILS_API_H_ +#define _WLAN_DFS_UTILS_API_H_ + +#include "wlan_dfs_ucfg_api.h" +#include "wlan_reg_services_api.h" +#include + +/* Add channel to nol */ +#define DFS_NOL_SET 1 + +/* Remove channel from nol */ +#define DFS_NOL_RESET 0 + +/* Mark nol-history flag for the channel */ +#define DFS_NOL_HISTORY_SET 1 + +/* Clear nol-history flag from the channel */ +#define DFS_NOL_HISTORY_RESET 0 + +/* Max nol channels */ +#define DFS_MAX_NOL_CHANNEL 80 + +/* WLAN 2.4GHz start freq */ +#define DFS_24_GHZ_BASE_FREQ (2407) + +/* WLAN 5GHz start freq */ +#define DFS_5_GHZ_BASE_FREQ (5000) + +/* WLAN 2.4 GHz channel number 6 */ +#define DFS_24_GHZ_CHANNEL_6 (6) + +/* WLAN 2.4 GHz channel number 14 */ +#define DFS_24_GHZ_CHANNEL_14 (14) + +/* WLAN 2.4 GHz channel number 15 */ +#define DFS_24_GHZ_CHANNEL_15 (15) + +/* WLAN 2.4 GHz channel number 27 */ +#define DFS_24_GHZ_CHANNEL_27 (27) + +/* WLAN 5GHz channel number 170 */ +#define DFS_5_GHZ_CHANNEL_170 (170) + +/* WLAN 5MHz channel spacing */ +#define DFS_CHAN_SPACING_5MHZ (5) + +/* WLAN 20Hz channel spacing */ +#define DFS_CHAN_SPACING_20MHZ (20) + +/* WLAN 2.4GHz channel number 14 freq */ +#define DFS_CHAN_14_FREQ (2484) + +/* WLAN 2.4GHz channel number 15 freq */ +#define DFS_CHAN_15_FREQ (2512) + +/* WLAN 5GHz channel number 170 freq */ +#define DFS_CHAN_170_FREQ (5852) + + + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * utils_dfs_cac_valid_reset() - Cancels the dfs_cac_valid_timer timer. + * @pdev: Pointer to DFS pdev object. + * @prevchan_ieee: Prevchan number. + * @prevchan_flags: Prevchan flags. + * + * Wrapper function for dfs_cac_valid_reset(). This function called from + * outside of DFS component. + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_cac_valid_reset(struct wlan_objmgr_pdev *pdev, + uint8_t prevchan_ieee, + uint32_t prevchan_flags); +#endif + +/** + * utils_dfs_cac_valid_reset_for_freq() - Cancels the dfs_cac_valid_timer timer. + * @pdev: Pointer to DFS pdev object. + * @prevchan_freq: Prevchan frequency. + * @prevchan_flags: Prevchan flags. + * + * Wrapper function for dfs_cac_valid_reset_for_freq(). This function called + * from outside of DFS component. + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_cac_valid_reset_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t prevchan_freq, + uint32_t prevchan_flags); +#endif + +/** + * utils_dfs_reset() - Reset DFS members. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS utils_dfs_reset(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_is_freq_in_nol() - check if given channel in nol list + * @pdev: Pointer to DFS pdev object + * @freq: channel frequency + * + * check if given channel in nol list. + * + * Return: true if channel in nol, false else + */ +bool utils_dfs_is_freq_in_nol(struct wlan_objmgr_pdev *pdev, uint32_t freq); + +/** + * utils_dfs_reset_precaclists() - Clears and initializes precac_list. + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_reset_precaclists(). This function called from + * outside of DFS component. + */ +QDF_STATUS utils_dfs_reset_precaclists(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_unmark_precac_nol() - Clears precac channel marked as NOL. + * @pdev: Pointer to DFS pdev object. + * @chan: channel to be unmarked as NOL. + * + * Return void. + */ +#ifdef CONFIG_CHAN_NUM_API +void utils_dfs_unmark_precac_nol(struct wlan_objmgr_pdev *pdev, uint8_t chan); +#endif + +/** + * utils_dfs_unmark_precac_nol_for_freq() - Clears precac channel marked as NOL. + * @pdev: Pointer to DFS pdev object. + * @chan_freq: channel freq to be unmarked as NOL. + * + * Return void. + */ +#ifdef CONFIG_CHAN_FREQ_API +void utils_dfs_unmark_precac_nol_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t chan_freq); +#endif + +/** + * utils_dfs_cancel_precac_timer() - Cancel the precac timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_cancel_precac_timer(). this function called from + * outside of dfs component. + */ +QDF_STATUS utils_dfs_cancel_precac_timer(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_start_precac_timer() - Start the precac timer. + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_start_precac_timer(). This function called from + * outside of dfs component. + * + * Return: + * * QDF_STATUS_E_FAILURE: Failed to start timer. + * * QDF_STATUS_SUCCESS: Timer started successfully. + */ +QDF_STATUS utils_dfs_start_precac_timer(struct wlan_objmgr_pdev *pdev); + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +/** + * utils_dfs_precac_decide_pref_chan() - Choose preferred channel + * @pdev: Pointer to DFS pdev object. + * @ch_ieee: Pointer to channel number. + * @mode: Configured PHY mode. + * + * Wrapper function for dfs_decide_precac_preferred_chan(). This + * function called from outside of dfs component. + * + * Return: True if intermediate channel needs to configure. False otherwise. + */ +#ifdef CONFIG_CHAN_NUM_API +bool +utils_dfs_precac_decide_pref_chan(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_ieee, + enum wlan_phymode mode); +#endif + +/** + * utils_dfs_precac_decide_pref_chan() - Choose preferred channel + * @pdev: Pointer to DFS pdev object. + * @ch_freq: Pointer to channel frequency. + * @mode: Configured PHY mode. + * + * Wrapper function for dfs_decide_precac_preferred_chan(). This + * function called from outside of dfs component. + * + * Return: True if intermediate channel needs to configure. False otherwise. + */ +#ifdef CONFIG_CHAN_FREQ_API +bool +utils_dfs_precac_decide_pref_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *ch_freq, + enum wlan_phymode mode); +#endif +#endif + +/** + * utils_dfs_cancel_cac_timer() - Cancels the CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_cancel_cac_timer(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_cancel_cac_timer(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_start_cac_timer() - Starts the CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_start_cac_timer(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_start_cac_timer(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_cac_stop() - Clear the AP CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_cac_stop(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_cac_stop(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_stacac_stop() - Clear the STA CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_stacac_stop(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_stacac_stop(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_get_usenol() - Returns use_nol flag. + * @pdev: Pointer to DFS pdev object. + * @usenol: Pointer to usenol value. + * + * wrapper function for dfs_get_usenol(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_usenol(struct wlan_objmgr_pdev *pdev, + uint16_t *usenol); + +/** + * utils_dfs_radar_disable() - Disables the radar. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_radar_disable(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_radar_disable(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_set_update_nol_flag() - Sets update_nol flag. + * @pdev: Pointer to DFS pdev object. + * @val: update_nol flag. + * + * wrapper function for dfs_set_update_nol_flag(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_set_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool val); + +/** + * utils_dfs_get_update_nol_flag() - Returns update_nol flag. + * @pdev: Pointer to DFS pdev object. + * @nol_flag: Fill nol_flag in this variable. + * + * wrapper function for dfs_get_update_nol_flag(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool *nol_flag); + +/** + * utils_dfs_get_dfs_use_nol() - Get usenol. + * @pdev: Pointer to DFS pdev object. + * @dfs_use_nol: Pointer to dfs_use_nol. + * + * wrapper function for dfs_get_dfs_use_nol(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_dfs_use_nol(struct wlan_objmgr_pdev *pdev, + int *dfs_use_nol); + +/** + * utils_dfs_get_nol_timeout() - Get NOL timeout. + * @pdev: Pointer to DFS pdev object. + * @dfs_nol_timeout: Pointer to dfs_nol_timeout. + * + * wrapper function for dfs_get_nol_timeout(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_nol_timeout(struct wlan_objmgr_pdev *pdev, + int *dfs_nol_timeout); + +/** + * utils_dfs_nol_addchan() - Add channel to NOL. + * @pdev: Pointer to DFS pdev object. + * @chan: channel t o add NOL. + * @dfs_nol_timeout: NOL timeout. + * + * wrapper function for dfs_nol_addchan(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_nol_addchan(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + uint32_t dfs_nol_timeout); + +/** + * utils_dfs_nol_update() - NOL update + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_nol_update(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_nol_update(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_second_segment_radar_disable() - Disables the second segment radar. + * @pdev: Pointer to DFS pdev object. + * + * This is called when AP detects the radar, to (potentially) disable + * the radar code. + */ +QDF_STATUS utils_dfs_second_segment_radar_disable( + struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_fetch_nol_ie_info() - Fills the arguments with NOL information + * needed for sending RCSA. + * pdev: Pointer to DFS pdev object. + * nol_ie_bandwidth: Minimum DFS subchannel Bandwidth. + * nol_ie_startfreq: Radar affected channel list start subchannel's centre + * frequency. + * nol_ie_bitmap: Bitmap of radar affected subchannels. + */ +QDF_STATUS utils_dfs_fetch_nol_ie_info(struct wlan_objmgr_pdev *pdev, + uint8_t *nol_ie_bandwidth, + uint16_t *nol_ie_startfreq, + uint8_t *nol_ie_bitmap); + +/** + * utils_dfs_set_rcsa_flags() - Set flags that are required for sending + * RCSA and NOL IE. + * pdev: Pointer to DFS pdev object. + * is_rcsa_ie_sent: Boolean to check if RCSA IE should be sent or not. + * is_nol_ie_sent: Boolean to check if NOL IE should be sent or not. + */ + +QDF_STATUS utils_dfs_set_rcsa_flags(struct wlan_objmgr_pdev *pdev, + bool is_rcsa_ie_sent, + bool is_nol_ie_sent); + +/** + * utils_dfs_get_rcsa_flags() - Get flags that are required for sending + * RCSA and NOL IE. + * pdev: Pointer to DFS pdev object. + * is_rcsa_ie_sent: Boolean to check if RCSA IE should be sent or not. + * is_nol_ie_sent: Boolean to check if NOL IE should be sent or not. + */ + +QDF_STATUS utils_dfs_get_rcsa_flags(struct wlan_objmgr_pdev *pdev, + bool *is_rcsa_ie_sent, + bool *is_nol_ie_sent); + +/** + * utils_dfs_process_nol_ie_bitmap() - Update NOL with external radar + * information. + * pdev: Pointer to DFS pdev object. + * nol_ie_bandwidth: Minimum DFS subchannel Bandwidth. + * nol_ie_startfreq: Radar affected channel list start channel's + * centre frequency. + * nol_ie_bitmap: Bitmap of radar affected subchannels. + * + * Return: True if NOL IE should be propagated, else false. + */ +bool utils_dfs_process_nol_ie_bitmap(struct wlan_objmgr_pdev *pdev, + uint8_t nol_ie_bandwidth, + uint16_t nol_ie_startfreq, + uint8_t nol_ie_bitmap); + +/** + * utils_dfs_bw_reduce() - Set bw reduce. + * @pdev: Pointer to DFS pdev object. + * @bw_reduce: Fill bw_reduce value in this variable. + * + * Return: QDF_STATUS + */ +QDF_STATUS utils_dfs_bw_reduce(struct wlan_objmgr_pdev *pdev, + bool bw_reduce); + +/** + * utils_dfs_is_bw_reduce() - Get bw reduce. + * @pdev: Pointer to DFS pdev object. + * @bw_reduce: Pointer to get bw_reduce value. + * + * Return: QDF_STATUS + */ +QDF_STATUS utils_dfs_is_bw_reduce(struct wlan_objmgr_pdev *pdev, + bool *bw_reduce); +/** + * utils_dfs_set_cac_timer_running() - Sets the cac timer running. + * @pdev: Pointer to DFS pdev object. + * @val: Set this value to dfs_cac_timer_running variable. + */ +QDF_STATUS utils_dfs_set_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int val); + +/** + * utils_dfs_get_nol_chfreq_and_chwidth() - Sets the cac timer running. + * @pdev: Pointer to DFS pdev object. + * @nollist: Pointer to NOL channel entry. + * @nol_chfreq: Pointer to save channel frequency. + * @nol_chwidth: Pointer to save channel width. + * @index: Index into nol list. + */ +QDF_STATUS utils_dfs_get_nol_chfreq_and_chwidth(struct wlan_objmgr_pdev *pdev, + void *nollist, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index); + +/** + * utils_dfs_get_random_channel() - Get random channel. + * @pdev: Pointer to DFS pdev object. + * @flags: random channel selection flags. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan. + * @acs_info: acs range info. + * + * wrapper function for get_random_chan(). this + * function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_get_random_channel(struct wlan_objmgr_pdev *pdev, + uint16_t flags, struct ch_params *ch_params, + uint32_t *hw_mode, uint8_t *target_chan, + struct dfs_acs_info *acs_info); +#endif + +/** + * utils_dfs_get_random_channel_for_freq() - Get random channel. + * @pdev: Pointer to DFS pdev object. + * @flags: random channel selection flags. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan freq. + * @acs_info: acs range info. + * + * wrapper function for get_random_chan(). this + * function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +utils_dfs_get_random_channel_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t flags, + struct ch_params *ch_params, + uint32_t *hw_mode, uint16_t *target_chan, + struct dfs_acs_info *acs_info); +#endif + +/** + * utils_dfs_get_vdev_random_channel() - Get random channel for vdev + * @pdev: Pointer to DFS pdev object. + * @vdev: vdev of the request + * @flags: random channel selection flags. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan. + * @acs_info: acs range info. + * + * Get random channel based on vdev interface type. If the vdev is null, + * the function will get random channel by SAP interface type. + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_get_vdev_random_channel( + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + uint16_t flags, struct ch_params *ch_params, uint32_t *hw_mode, + uint8_t *target_chan, struct dfs_acs_info *acs_info); +#endif + +/** + * utils_dfs_get_vdev_random_channel() - Get random channel for vdev + * @pdev: Pointer to DFS pdev object. + * @vdev: vdev of the request + * @flags: random channel selection flags. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan_freq. + * @acs_info: acs range info. + * + * Get random channel based on vdev interface type. If the vdev is null, + * the function will get random channel by SAP interface type. + * + * Return: QDF_STATUS + */ + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_get_vdev_random_channel_for_freq( + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + uint16_t flags, struct ch_params *ch_params, uint32_t *hw_mode, + uint16_t *target_chan_freq, struct dfs_acs_info *acs_info); +#endif + +/** + * utils_dfs_bw_reduced_channel() - Get BW reduced channel. + * @pdev: Pointer to DFS pdev object. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan. + * + * wrapper function for get bw_reduced_channel. this + * function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_bw_reduced_channel(struct wlan_objmgr_pdev *pdev, + struct ch_params *ch_params, + uint32_t *hw_mode, + uint8_t *target_chan); +#endif + +/** + * utils_dfs_bw_reduced_channel_for_freq() - Get BW reduced channel. + * @pdev: Pointer to DFS pdev object. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan freq. + * + * wrapper function for get bw_reduced_channel. this + * function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_bw_reduced_channel_for_freq(struct wlan_objmgr_pdev *pdev, + struct ch_params *ch_params, + uint32_t *hw_mode, + uint16_t *target_chan_freq); +#endif + +/** + * utils_dfs_init_nol() - Initialize nol from platform driver. + * @pdev: pdev handler. + * + * Initialize nol from platform driver. + * + * Return: None + */ +#ifdef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +void utils_dfs_init_nol(struct wlan_objmgr_pdev *pdev); +#else +static inline void utils_dfs_init_nol(struct wlan_objmgr_pdev *pdev) +{ +} +#endif +/** + * utils_dfs_save_nol() - save nol list to platform driver. + * @pdev: pdev handler. + * + * Save nol list to platform driver. + * + * Return: None + */ +void utils_dfs_save_nol(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_print_nol_channels() - log nol channels. + * @pdev: pdev handler. + * + * log nol channels. + * + * Return: None + */ +#ifdef DFS_COMPONENT_ENABLE +void utils_dfs_print_nol_channels(struct wlan_objmgr_pdev *pdev); +#else +static inline void utils_dfs_print_nol_channels(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +/** + * utils_dfs_clear_nol_channels() - clear nol list. + * @pdev: pdev handler. + * + * log nol channels. + * + * Return: None + */ +void utils_dfs_clear_nol_channels(struct wlan_objmgr_pdev *pdev); + +/** + * utils_is_dfs_chan_for_freq() - is channel dfs. + * @pdev: pdev handler. + * @chan_freq: Channel frequency in MHZ. + * + * is channel dfs. + * + * Return: True if channel dfs, else false. + */ +#ifdef CONFIG_CHAN_FREQ_API +static inline bool utils_is_dfs_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_freq) +{ + return wlan_reg_is_dfs_for_freq(pdev, chan_freq); +} +#endif + +/** + * utils_is_dfs_cfreq2_ch() - is channel dfs cfreq2. + * @pdev: pdev handler. + * + * is channel dfs. + * + * Return: True if channel dfs cfreq2, else false. + */ +bool utils_is_dfs_cfreq2_ch(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_reg_update_nol_ch() - set nol channel + * + * @pdev: pdev ptr + * @ch_list: channel list to be returned + * @num_ch: number of channels + * @nol_ch: nol flag + * + * Return: void + */ +#ifdef CONFIG_CHAN_NUM_API +void utils_dfs_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_ch); +#endif + +/** + * utils_dfs_reg_update_nol_chan_for_freq() - set nol channel + * + * @pdev: pdev ptr + * @ch_list: freq channel list to be returned + * @num_ch: number of channels + * @nol_ch: nol flag + * + * Return: void + */ +#ifdef CONFIG_CHAN_FREQ_API +void utils_dfs_reg_update_nol_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *ch_list, + uint8_t num_ch, + bool nol_ch); +#endif +/** + * utils_dfs_freq_to_chan () - convert channel freq to channel number + * @freq: frequency + * + * Return: channel number + */ +uint8_t utils_dfs_freq_to_chan(uint32_t freq); + +/** + * utils_dfs_chan_to_freq () - convert channel number to frequency + * @chan: channel number + * + * Return: frequency + */ +#ifdef DFS_COMPONENT_ENABLE +uint32_t utils_dfs_chan_to_freq(uint8_t chan); +#else +static inline uint32_t utils_dfs_chan_to_freq(uint8_t chan) +{ + return 0; +} +#endif +/** + * utils_dfs_update_cur_chan_flags() - Update DFS channel flag and flagext. + * @pdev: Pointer to DFS pdev object. + * @flags: New channel flags + * @flagext: New Extended flags + * + * Return: QDF_STATUS + */ +QDF_STATUS utils_dfs_update_cur_chan_flags(struct wlan_objmgr_pdev *pdev, + uint64_t flags, + uint16_t flagext); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * utils_dfs_mark_leaking_ch() - to mark channel leaking in to nol + * @pdev: Pointer to pdev structure. + * @ch_width: channel width + * @temp_ch_lst_sz: the target channel list + * @temp_ch_lst: the target channel list + * + * This function removes the channels from temp channel list that + * (if selected as target channel) will cause leakage in one of + * the NOL channels + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_mark_leaking_ch(struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst); +#endif +/** + * utils_dfs_mark_leaking_chan_for_freq() - to mark channel leaking in to nol + * @pdev: Pointer to pdev structure. + * @ch_width: channel width + * @temp_ch_lst_sz: the target channel list + * @temp_ch_lst: the target frequency list + * + * This function removes the channels from temp channel list that + * (if selected as target channel) will cause leakage in one of + * the NOL channels + * + * Return: QDF_STATUS + */ +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_mark_leaking_chan_for_freq(struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint16_t *temp_ch_lst); +#endif +#else +#ifdef CONFIG_CHAN_NUM_API +static inline QDF_STATUS utils_dfs_mark_leaking_ch + (struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +static inline QDF_STATUS utils_dfs_mark_leaking_chan_for_freq + (struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint16_t *temp_ch_lst) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif +/** + * utils_get_dfsdomain() - Get DFS domain. + * @pdev: Pointer to PDEV structure. + * + * Return: DFS domain. + */ +int utils_get_dfsdomain(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_get_cur_rd() - Get current regdomain. + * @pdev: pdev ptr + * + * Return: Regdomain pair id. + */ +uint16_t utils_dfs_get_cur_rd(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_is_spoof_check_failed() - get spoof check status. + * @pdev: pdev ptr + * @is_spoof_check_failed: pointer containing the status. + * + * Return: QDF_STATUS. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS utils_dfs_is_spoof_check_failed(struct wlan_objmgr_pdev *pdev, + bool *is_spoof_check_failed); +#else +static inline +QDF_STATUS utils_dfs_is_spoof_check_failed(struct wlan_objmgr_pdev *pdev, + bool *is_spoof_check_failed) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dfs_get_num_chans() - Get the number of channels supported by the regulatory. + * + * Return: Number of supported channels. + */ +int dfs_get_num_chans(void); + +/** + * utils_dfs_get_chan_list() - Get channel list from regdb. + * @pdev: Pointer to DFS pdev object. + * @clist: Pointer to current channel list + * @num_chan: number of channels in the current channel list. + */ +void utils_dfs_get_chan_list(struct wlan_objmgr_pdev *pdev, + void *clist, + uint32_t *num_chan); + +/** + * utils_dfs_get_nol_history_chan_list() - Get nol_history channels from regdb + * component. + * @pdev: Pointer to pdev structure. + * @clist: Pointer to channel list. + * @num_chan: number of channels. + */ +void utils_dfs_get_nol_history_chan_list(struct wlan_objmgr_pdev *pdev, + void *clist, uint32_t *num_chan); + +/** + * utils_dfs_reg_update_nol_history_ch() - set nol history channel + * + * @pdev: pdev ptr + * @ch_list: channel list to be returned + * @num_ch: number of channels + * @nol_history_ch: nol history flag + * + * Return: void + */ +#ifdef CONFIG_CHAN_NUM_API +void utils_dfs_reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_history_ch); +#endif + +/** + * utils_dfs_reg_update_nol_history_chan_for_freq() - set nol history channel + * + * @pdev: pdev ptr + * @ch_list: freq channel list to be returned + * @num_ch: number of channels + * @nol_history_ch: nol history flag + * + * Return: void + */ +#ifdef CONFIG_CHAN_FREQ_API +void utils_dfs_reg_update_nol_history_chan_for_freq(struct wlan_objmgr_pdev *, + uint16_t *freq_list, + uint8_t num_ch, + bool nol_history_ch); +#endif + +/** + * utils_dfs_is_cac_required() - Check if CAC is required on the cur_chan. + * @pdev: pdev ptr + * @cur_chan: Pointer to current channel of wlan_channel structure. + * @prev_chan: Pointer to previous channel of wlan_channel structure. + * @continue_current_cac: If AP can start CAC then this variable indicates + * whether to continue with the current CAC or restart the CAC. This variable + * is valid only if this function returns true. + * + * Return: true if AP requires CAC or can continue current CAC, else false. + */ +bool utils_dfs_is_cac_required(struct wlan_objmgr_pdev *pdev, + struct wlan_channel *cur_chan, + struct wlan_channel *prev_chan, + bool *continue_current_cac); + +/** + * utils_dfs_is_cac_required_on_dfs_curchan() - Check if CAC is required on the + * dfs_curchan. + * @pdev: pdev ptr + * @continue_current_cac: If AP can start CAC then this variable indicates + * whether to continue with the current CAC or restart the CAC. This variable + * is valid only if this function returns true. + * + * This API checks if the dfs_curchan is a subset of the dfs_prevchan. + * dfs_curchan and dfs_prevchan are updated after start response by + * dfs_set_current_channel(). + * + * Return: true if AP requires CAC or can continue current CAC, else false. + */ +bool +utils_dfs_is_cac_required_on_dfs_curchan(struct wlan_objmgr_pdev *pdev, + bool *continue_current_cac); + +/** utils_dfs_is_precac_done() - Check if precac has been done in chosen channel + * @pdev: Pointer to DFS pdev object. + * @wlan_chan: Pointer to wlan channel object that can be accessed by other + * components. + * Wrapper function for dfs_is_precac_done(). This API is called from outside + * the dfs component. + * + * Return: + * * True :If precac is done on channel. + * * False:If precac is not done on channel. + */ +bool utils_dfs_is_precac_done(struct wlan_objmgr_pdev *pdev, + struct wlan_channel *wlan_chan); +/** + * utils_dfs_get_disable_radar_marking() - Retrieve the value of disable radar. + * marking. + * @pdev: Pointer to DFS pdev object. + * @dis_radar_marking: pointer to retrieve the value of disable_radar_marking. + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +QDF_STATUS utils_dfs_get_disable_radar_marking(struct wlan_objmgr_pdev *pdev, + bool *disable_radar_marking); +#else +static inline +QDF_STATUS utils_dfs_get_disable_radar_marking(struct wlan_objmgr_pdev *pdev, + bool *disable_radar_marking) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * utils_dfs_deliver_event() - Deliver DFS event to userspace. + * @pdev: Pointer to DFS pdev object + * @chan: channel radar hit on + * @event: event being sent + */ +void utils_dfs_deliver_event(struct wlan_objmgr_pdev *pdev, uint16_t freq, + enum WLAN_DFS_EVENTS event); + +/** + * utils_dfs_reset_dfs_prevchan() - Reset DFS previous channel structure. + * @pdev: Pointer to DFS pdev object. + * + * Return: None. + */ +void utils_dfs_reset_dfs_prevchan(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_DFS_UTILS_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_init_deinit_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_init_deinit_api.c new file mode 100644 index 0000000000000000000000000000000000000000..db9cc0c7ab3045ab442b150e442b9caa81accb74 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_init_deinit_api.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file init/deint functions for DFS module. + */ + +#include "wlan_dfs_ucfg_api.h" +#include "wlan_dfs_tgt_api.h" +#include +#include "wlan_dfs_utils_api.h" +#ifndef QCA_MCL_DFS_SUPPORT +#include "ieee80211_mlme_dfs_interface.h" +#endif +#include "wlan_objmgr_global_obj.h" +#include "wlan_dfs_init_deinit_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../../core/src/dfs.h" +#include "a_types.h" +#include "wlan_serialization_api.h" +#include +#include "wlan_scan_ucfg_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../../core/src/dfs_zero_cac.h" + +struct dfs_to_mlme global_dfs_to_mlme; + +struct wlan_dfs *wlan_pdev_get_dfs_obj(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + dfs = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_DFS); + + return dfs; +} + +/* + * register_dfs_precac_auto_chan_callbacks_freq() - Register auto chan switch + * frequency based APIs callback. + * @mlme_callback: Pointer to dfs_to_mlme. + */ +#ifndef QCA_MCL_DFS_SUPPORT +#if defined(WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT) && defined(CONFIG_CHAN_FREQ_API) +static inline void +register_dfs_precac_auto_chan_callbacks_freq(struct dfs_to_mlme *mlme_callback) +{ + if (!mlme_callback) + return; + + mlme_callback->mlme_precac_chan_change_csa_for_freq = + mlme_dfs_precac_chan_change_csa_for_freq; +} +#else +static inline void +register_dfs_precac_auto_chan_callbacks_freq(struct dfs_to_mlme *mlme_callback) +{ +} +#endif +#endif + +/* + * register_dfs_callbacks_for_freq() - Register dfs callbacks. + * @mlme_callback: Pointer to dfs_to_mlme. + */ +#ifndef QCA_MCL_DFS_SUPPORT +#ifdef CONFIG_CHAN_FREQ_API +static inline void +register_dfs_callbacks_for_freq(struct dfs_to_mlme *mlme_callback) +{ + if (!mlme_callback) + return; + + mlme_callback->mlme_mark_dfs_for_freq = mlme_dfs_mark_dfs_for_freq; + mlme_callback->mlme_find_dot11_chan_for_freq = + mlme_dfs_find_dot11_chan_for_freq; + mlme_callback->mlme_get_dfs_channels_for_freq = + mlme_dfs_get_dfs_channels_for_freq; + mlme_callback->mlme_get_cac_timeout_for_freq = + mlme_dfs_get_cac_timeout_for_freq; + mlme_callback->mlme_get_extchan_for_freq = + mlme_dfs_get_extchan_for_freq; + mlme_callback->mlme_start_csa_for_freq = mlme_dfs_start_csa_for_freq; +} +#endif +#endif + +#ifndef QCA_MCL_DFS_SUPPORT +void register_dfs_callbacks(void) +{ + struct dfs_to_mlme *tmp_dfs_to_mlme = &global_dfs_to_mlme; + + tmp_dfs_to_mlme->pdev_component_obj_attach = + wlan_objmgr_pdev_component_obj_attach; + tmp_dfs_to_mlme->pdev_component_obj_detach = + wlan_objmgr_pdev_component_obj_detach; + + tmp_dfs_to_mlme->dfs_start_rcsa = mlme_dfs_start_rcsa; + tmp_dfs_to_mlme->mlme_proc_cac = mlme_dfs_proc_cac; + tmp_dfs_to_mlme->mlme_deliver_event_up_after_cac = + mlme_dfs_deliver_event_up_after_cac; + tmp_dfs_to_mlme->mlme_get_dfs_ch_nchans = mlme_dfs_get_dfs_ch_nchans; + tmp_dfs_to_mlme->mlme_set_no_chans_available = + mlme_dfs_set_no_chans_available; + tmp_dfs_to_mlme->mlme_ieee2mhz = mlme_dfs_ieee2mhz; + tmp_dfs_to_mlme->mlme_dfs_ch_flags_ext = mlme_dfs_dfs_ch_flags_ext; + tmp_dfs_to_mlme->mlme_channel_change_by_precac = + mlme_dfs_channel_change_by_precac; + tmp_dfs_to_mlme->mlme_nol_timeout_notification = + mlme_dfs_nol_timeout_notification; + tmp_dfs_to_mlme->mlme_clist_update = mlme_dfs_clist_update; + tmp_dfs_to_mlme->mlme_rebuild_chan_list_with_non_dfs_channels = + mlme_dfs_rebuild_chan_list_with_non_dfs_channels; + tmp_dfs_to_mlme->mlme_restart_vaps_with_non_dfs_chan = + mlme_dfs_restart_vaps_with_non_dfs_chan; + tmp_dfs_to_mlme->mlme_is_opmode_sta = + mlme_dfs_is_opmode_sta; + tmp_dfs_to_mlme->mlme_check_allowed_prim_chanlist = + mlme_dfs_check_allowed_prim_chanlist; + tmp_dfs_to_mlme->mlme_update_scan_channel_list = + mlme_dfs_update_scan_channel_list; + tmp_dfs_to_mlme->mlme_bringdown_vaps = + mlme_dfs_bringdown_vaps; + tmp_dfs_to_mlme->mlme_dfs_deliver_event = + mlme_dfs_deliver_event; + + tmp_dfs_to_mlme->mlme_acquire_radar_mode_switch_lock = + mlme_acquire_radar_mode_switch_lock; + tmp_dfs_to_mlme->mlme_release_radar_mode_switch_lock = + mlme_release_radar_mode_switch_lock; + /* + * Register precac auto channel switch feature related callbacks + */ + register_dfs_precac_auto_chan_callbacks_freq(tmp_dfs_to_mlme); + /* Register freq based callbacks */ + register_dfs_callbacks_for_freq(tmp_dfs_to_mlme); +} +#else +void register_dfs_callbacks(void) +{ + struct dfs_to_mlme *tmp_dfs_to_mlme = &global_dfs_to_mlme; + + tmp_dfs_to_mlme->pdev_component_obj_attach = + wlan_objmgr_pdev_component_obj_attach; + tmp_dfs_to_mlme->pdev_component_obj_detach = + wlan_objmgr_pdev_component_obj_detach; +} +#endif + +/** + * dfs_psoc_obj_create_notification() - dfs psoc create notification handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status; + struct dfs_soc_priv_obj *dfs_soc_obj; + + dfs_soc_obj = qdf_mem_malloc(sizeof(*dfs_soc_obj)); + if (!dfs_soc_obj) + return QDF_STATUS_E_NOMEM; + + dfs_soc_obj->psoc = psoc; + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_DFS, + (void *)dfs_soc_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to attach psoc dfs component"); + qdf_mem_free(dfs_soc_obj); + return status; + } + /* Initialize precac timer here*/ + dfs_zero_cac_timer_init(dfs_soc_obj); + + dfs_debug(NULL, WLAN_DEBUG_DFS1, + "DFS obj attach to psoc successfully"); + + return status; +} + +/** + * dfs_psoc_obj_destroy_notification() - dfs psoc destroy notification handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status; + struct dfs_soc_priv_obj *dfs_soc_obj; + + dfs_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!dfs_soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs obj in psoc"); + return QDF_STATUS_E_FAILURE; + } + + dfs_zero_cac_timer_detach(dfs_soc_obj); + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_DFS, + dfs_soc_obj); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to detach psoc dfs component"); + + qdf_mem_free(dfs_soc_obj); + + return status; +} + +QDF_STATUS dfs_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register psoc create handler for dfs"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register psoc delete handler for dfs"); + goto err_psoc_delete; + } + + register_dfs_callbacks(); + + status = wlan_objmgr_register_pdev_create_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register pdev create handler for dfs"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register pdev delete handler for dfs"); + goto err_pdev_delete; + } + + status = qdf_print_set_category_verbose(qdf_get_pidx(), + QDF_MODULE_ID_DFS, QDF_TRACE_LEVEL_INFO, true); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to set verbose for category"); + goto err_category_verbose; + } + + return QDF_STATUS_SUCCESS; + +err_category_verbose: + wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_destroy_notification, + NULL); +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_create_notification, + NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_destroy_notification, + NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_create_notification, + NULL); +err_psoc_create: + return status; +} + +QDF_STATUS dfs_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs psoc obj create"); + + status = wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs psoc obj destroy"); + + status = wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs pdev obj create"); + + status = wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs pdev obj destroy"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_dfs_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint8_t pdev_id; + QDF_STATUS status; + bool is_5ghz = false; + + if (!pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return QDF_STATUS_E_FAILURE; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!(dfs_tx_ops && dfs_tx_ops->dfs_is_pdev_5ghz)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_tx_ops is null"); + return QDF_STATUS_E_FAILURE; + } + + status = dfs_tx_ops->dfs_is_pdev_5ghz(pdev, &is_5ghz); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Failed to get is_5ghz value"); + return QDF_STATUS_E_FAILURE; + } + + if (!is_5ghz) { + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Do not allocate DFS object for 2G, pdev_id = %d", + pdev_id); + return QDF_STATUS_SUCCESS; + } + + if (dfs_create_object(&dfs) == 1) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "failed to create object"); + return QDF_STATUS_E_FAILURE; + } + + status = global_dfs_to_mlme.pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_DFS, (void *)dfs, QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "obj attach failed"); + dfs_destroy_object(dfs); + return QDF_STATUS_E_FAILURE; + } + + dfs->dfs_pdev_obj = pdev; + + if (!dfs_tx_ops->dfs_is_tgt_offload) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_is_tgt_offload is null"); + dfs_destroy_object(dfs); + return QDF_STATUS_E_FAILURE; + } + + dfs->dfs_is_offload_enabled = dfs_tx_ops->dfs_is_tgt_offload(psoc); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_offload %d", + dfs->dfs_is_offload_enabled); + dfs_agile_soc_obj_init(dfs, psoc); + + if (dfs_attach(dfs) == 1) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_attch failed"); + dfs_destroy_object(dfs); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_dfs_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct wlan_dfs *dfs = NULL; + + if (!pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + + /* DFS is NULL during unload. should we call this function before */ + if (dfs) { + global_dfs_to_mlme.pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_DFS, + (void *)dfs); + + dfs_detach(dfs); + dfs->dfs_pdev_obj = NULL; + dfs_destroy_object(dfs); + } + + return QDF_STATUS_SUCCESS; +} + +static void dfs_scan_serialization_comp_info_cb( + struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info) +{ + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_pdev *pdev; + + if (!comp_info) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "comp_info is NULL"); + return; + } + + if (!vdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "vdev is NULL"); + return; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "pdev is NULL"); + return; + } + + comp_info->scan_info.is_cac_in_progress = false; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (dfs_is_ap_cac_timer_running(dfs)) + comp_info->scan_info.is_cac_in_progress = true; +} + +QDF_STATUS wifi_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + status = tgt_dfs_reg_ev_handler(psoc); + if (status != QDF_STATUS_SUCCESS) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "tgt_dfs_reg_ev_handler failed"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_serialization_register_comp_info_cb(psoc, + WLAN_UMAC_COMP_DFS, + WLAN_SER_CMD_SCAN, + dfs_scan_serialization_comp_info_cb); + if (status != QDF_STATUS_SUCCESS) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "Serialize scan cmd register failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + status = wlan_serialization_deregister_comp_info_cb(psoc, + WLAN_UMAC_COMP_DFS, + WLAN_SER_CMD_SCAN); + if (status != QDF_STATUS_SUCCESS) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "Serialize scan cmd deregister failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_lmac_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_lmac_api.c new file mode 100644 index 0000000000000000000000000000000000000000..48c0bb454d06edf3d65df63f170d765ab9a44edb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_lmac_api.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Functions to call lmac/offload functions from DFS component. + */ + +#include "wlan_dfs_lmac_api.h" +#include "../../core/src/dfs_internal.h" +#include + +void lmac_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_caps) + dfs_tx_ops->dfs_get_caps(pdev, dfs_caps); +} + +uint64_t lmac_get_tsf64(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint64_t tsf64 = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_gettsf64) + dfs_tx_ops->dfs_gettsf64(pdev, &tsf64); + + return tsf64; +} + +void lmac_dfs_disable(struct wlan_objmgr_pdev *pdev, int no_cac) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_disable) + dfs_tx_ops->dfs_disable(pdev, no_cac); +} + +void lmac_dfs_enable(struct wlan_objmgr_pdev *pdev, + int *is_fastclk, + struct wlan_dfs_phyerr_param *param, + int dfsdomain) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_enable) + dfs_tx_ops->dfs_enable(pdev, + is_fastclk, + param, + dfsdomain); +} + +void lmac_dfs_get_thresholds(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_phyerr_param *param) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_thresholds) + dfs_tx_ops->dfs_get_thresholds(pdev, param); +} + +uint16_t lmac_get_ah_devid(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint16_t devid = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_ah_devid) + dfs_tx_ops->dfs_get_ah_devid(pdev, &devid); + + return devid; +} + +uint32_t lmac_get_ext_busy(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint32_t ext_chan_busy = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_ext_busy) + dfs_tx_ops->dfs_get_ext_busy(pdev, &ext_chan_busy); + + return ext_chan_busy; +} + +void lmac_set_use_cac_prssi(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_set_use_cac_prssi) + dfs_tx_ops->dfs_set_use_cac_prssi(pdev); +} + +uint32_t lmac_get_target_type(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint32_t target_type = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_target_type) + dfs_tx_ops->dfs_get_target_type(pdev, &target_type); + + return target_type; +} + +uint32_t lmac_get_phymode_info(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint32_t mode_info = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + /* since dfs never comes into 2G, hardcode is_2gvht_en flag to false */ + if (dfs_tx_ops->dfs_get_phymode_info) + dfs_tx_ops->dfs_get_phymode_info(pdev, chan_mode, &mode_info, + false); + + return mode_info; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +bool lmac_is_host_dfs_check_support_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + bool enabled = false; + + psoc = wlan_pdev_get_psoc(pdev); + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_host_dfs_check_support) + dfs_tx_ops->dfs_host_dfs_check_support(pdev, &enabled); + + return enabled; +} +#endif + +bool lmac_dfs_is_hw_mode_switch_in_progress(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + bool is_hw_mode_switch_in_progress = false; + + psoc = wlan_pdev_get_psoc(pdev); + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_check_mode_switch_state) + dfs_tx_ops->dfs_check_mode_switch_state( + pdev, + &is_hw_mode_switch_in_progress); + + return is_hw_mode_switch_in_progress; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_mlme_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_mlme_api.c new file mode 100644 index 0000000000000000000000000000000000000000..54e68c6af7f254026dc8e1848b5f72a47583d8bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_mlme_api.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Functions to call mlme functions from DFS component. + */ + +#include "wlan_dfs_mlme_api.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "../../core/src/dfs.h" +#include "scheduler_api.h" +#include +#ifdef QCA_MCL_DFS_SUPPORT +#include "wni_api.h" +#endif + +void dfs_mlme_start_rcsa(struct wlan_objmgr_pdev *pdev, + bool *wait_for_csa) +{ + if (global_dfs_to_mlme.dfs_start_rcsa) + global_dfs_to_mlme.dfs_start_rcsa(pdev, wait_for_csa); +} + +#ifndef QCA_MCL_DFS_SUPPORT +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_mark_dfs(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags) +{ + if (global_dfs_to_mlme.mlme_mark_dfs) + global_dfs_to_mlme.mlme_mark_dfs(pdev, + ieee, + freq, + vhtop_ch_freq_seg2, + flags); +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_mark_dfs_for_freq(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint16_t vhtop_ch_freq_seg2, + uint64_t flags) +{ + if (global_dfs_to_mlme.mlme_mark_dfs_for_freq) + global_dfs_to_mlme.mlme_mark_dfs_for_freq(pdev, + ieee, + freq, + vhtop_ch_freq_seg2, + flags); +} +#endif +#else /* Else of ndef MCL_DFS_SUPPORT */ +#ifdef CONFIG_CHAN_NUM_API +static void dfs_send_radar_ind(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg) +{ + struct scheduler_msg sme_msg = {0}; + uint8_t vdev_id = wlan_vdev_get_id((struct wlan_objmgr_vdev *)object); + + sme_msg.type = eWNI_SME_DFS_RADAR_FOUND; + sme_msg.bodyptr = NULL; + sme_msg.bodyval = vdev_id; + scheduler_post_message(QDF_MODULE_ID_DFS, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_SME, &sme_msg); + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "eWNI_SME_DFS_RADAR_FOUND pdev%d posted", + vdev_id); +} +#endif + +/* dfs_send_radar_ind_for_freq() - Send radar found indication. + * @pdev: Pointer to wlan_objmgr_pdev. + * @object: Pointer to wlan_objmgr_vdev. + * @arg : void pointer to args. + */ +#ifdef CONFIG_CHAN_FREQ_API +static void dfs_send_radar_ind_for_freq(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg) +{ + struct scheduler_msg sme_msg = {0}; + uint8_t vdev_id = wlan_vdev_get_id((struct wlan_objmgr_vdev *)object); + + sme_msg.type = eWNI_SME_DFS_RADAR_FOUND; + sme_msg.bodyptr = NULL; + sme_msg.bodyval = vdev_id; + scheduler_post_message(QDF_MODULE_ID_DFS, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_SME, &sme_msg); + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "eWNI_SME_DFS_RADAR_FOUND pdev%d posted", + vdev_id); +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_mark_dfs(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags) +{ + struct wlan_objmgr_vdev *vdev; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return; + } + + vdev = wlan_pdev_peek_active_first_vdev(pdev, WLAN_DFS_ID); + + if (vdev) { + dfs_send_radar_ind(pdev, vdev, NULL); + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + } +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_mark_dfs_for_freq(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint16_t vhtop_ch_freq_seg2, + uint64_t flags) +{ + struct wlan_objmgr_vdev *vdev; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return; + } + + vdev = wlan_pdev_peek_active_first_vdev(pdev, WLAN_DFS_ID); + + if (vdev) { + dfs_send_radar_ind_for_freq(pdev, vdev, NULL); + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + } +} +#endif +#endif + +#ifndef QCA_MCL_DFS_SUPPORT +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_start_csa(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint8_t cfreq2, uint64_t flags) +{ + if (global_dfs_to_mlme.mlme_start_csa) + global_dfs_to_mlme.mlme_start_csa(pdev, ieee_chan, freq, cfreq2, + flags); +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_start_csa_for_freq(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint16_t cfreq2, uint64_t flags) +{ + if (global_dfs_to_mlme.mlme_start_csa_for_freq) + global_dfs_to_mlme.mlme_start_csa_for_freq(pdev, ieee_chan, + freq, cfreq2, flags); +} +#endif +#else +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_start_csa(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint8_t cfreq2, uint64_t flags) +{ + struct wlan_objmgr_vdev *vdev; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return; + } + + vdev = wlan_pdev_peek_active_first_vdev(pdev, WLAN_DFS_ID); + + if (vdev) { + dfs_send_radar_ind(pdev, vdev, NULL); + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + } +} +#endif +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_start_csa_for_freq(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint16_t cfreq2, uint64_t flags) +{ + struct wlan_objmgr_vdev *vdev; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return; + } + + vdev = wlan_pdev_peek_active_first_vdev(pdev, WLAN_DFS_ID); + + if (vdev) { + dfs_send_radar_ind(pdev, vdev, NULL); + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + } +} +#endif +#endif + +#ifndef QCA_MCL_DFS_SUPPORT +void dfs_mlme_proc_cac(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + if (global_dfs_to_mlme.mlme_proc_cac) + global_dfs_to_mlme.mlme_proc_cac(pdev); +} +#else +void dfs_mlme_proc_cac(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + struct scheduler_msg sme_msg = {0}; + + sme_msg.type = eWNI_SME_DFS_CAC_COMPLETE; + sme_msg.bodyptr = NULL; + sme_msg.bodyval = vdev_id; + scheduler_post_message(QDF_MODULE_ID_DFS, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_SME, &sme_msg); + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "eWNI_SME_DFS_CAC_COMPLETE vdev%d posted", + vdev_id); +} +#endif + +void dfs_mlme_deliver_event_up_after_cac(struct wlan_objmgr_pdev *pdev) +{ + if (global_dfs_to_mlme.mlme_deliver_event_up_after_cac) + global_dfs_to_mlme.mlme_deliver_event_up_after_cac( + pdev); +} + +void dfs_mlme_get_dfs_ch_nchans(struct wlan_objmgr_pdev *pdev, + int *nchans) +{ + if (global_dfs_to_mlme.mlme_get_dfs_ch_nchans) + global_dfs_to_mlme.mlme_get_dfs_ch_nchans(pdev, + nchans); +} + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS dfs_mlme_get_extchan(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2) +{ + if (global_dfs_to_mlme.mlme_get_extchan) + return global_dfs_to_mlme.mlme_get_extchan(pdev, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2); + + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS dfs_mlme_get_extchan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flags, + uint16_t *dfs_chan_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_chan_vhtop_ch_freq_seg1, + uint8_t *dfs_chan_vhtop_ch_freq_seg2, + uint16_t *dfs_chan_mhz_freq_seg1, + uint16_t *dfs_chan_mhz_freq_seg2) +{ + if (global_dfs_to_mlme.mlme_get_extchan_for_freq) + return global_dfs_to_mlme.mlme_get_extchan_for_freq(pdev, + dfs_chan_freq, + dfs_chan_flags, + dfs_chan_flagext, + dfs_chan_ieee, + dfs_chan_vhtop_ch_freq_seg1, + dfs_chan_vhtop_ch_freq_seg2, + dfs_chan_mhz_freq_seg1, + dfs_chan_mhz_freq_seg2); + + return QDF_STATUS_E_FAILURE; +} +#endif + +void dfs_mlme_set_no_chans_available(struct wlan_objmgr_pdev *pdev, + int val) +{ + if (global_dfs_to_mlme.mlme_set_no_chans_available) + global_dfs_to_mlme.mlme_set_no_chans_available( + pdev, + val); +} + +int dfs_mlme_ieee2mhz(struct wlan_objmgr_pdev *pdev, int ieee, uint64_t flag) +{ + int freq = 0; + + if (global_dfs_to_mlme.mlme_ieee2mhz) + global_dfs_to_mlme.mlme_ieee2mhz(pdev, + ieee, + flag, + &freq); + + return freq; +} + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS +dfs_mlme_find_dot11_channel(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint8_t des_cfreq2, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2) +{ + if (global_dfs_to_mlme.mlme_find_dot11_channel) + return global_dfs_to_mlme.mlme_find_dot11_channel(pdev, + ieee, + des_cfreq2, + mode, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2); + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +dfs_mlme_find_dot11_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + uint16_t des_cfreq2, + int mode, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flag, + uint16_t *dfs_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_cfreq1, + uint8_t *dfs_cfreq2, + uint16_t *cfreq1_mhz, + uint16_t *cfreq2_mhz) +{ + if (global_dfs_to_mlme.mlme_find_dot11_chan_for_freq) + return global_dfs_to_mlme.mlme_find_dot11_chan_for_freq(pdev, + freq, + des_cfreq2, + mode, + dfs_chan_freq, + dfs_chan_flag, + dfs_flagext, + dfs_chan_ieee, + dfs_cfreq1, + dfs_cfreq2, + cfreq1_mhz, + cfreq2_mhz); + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +void dfs_mlme_get_dfs_ch_channels(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2, + int index) +{ + if (global_dfs_to_mlme.mlme_get_dfs_ch_channels) + global_dfs_to_mlme.mlme_get_dfs_ch_channels(pdev, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2, + index); +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void dfs_mlme_get_dfs_channels_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_chan_freq, + uint64_t *dfs_chan_flags, + uint16_t *dfs_chan_flagext, + uint8_t *dfs_chan_ieee, + uint8_t *dfs_chan_vhtop_freq_seg1, + uint8_t *dfs_chan_vhtop_freq_seg2, + uint16_t *dfs_ch_mhz_freq_seg1, + uint16_t *dfs_ch_mhz_freq_seg2, + int index) +{ + if (global_dfs_to_mlme.mlme_get_dfs_channels_for_freq) + global_dfs_to_mlme.mlme_get_dfs_channels_for_freq(pdev, + dfs_chan_freq, + dfs_chan_flags, + dfs_chan_flagext, + dfs_chan_ieee, + dfs_chan_vhtop_freq_seg1, + dfs_chan_vhtop_freq_seg2, + dfs_ch_mhz_freq_seg1, + dfs_ch_mhz_freq_seg2, + index); +} +#endif + +uint32_t dfs_mlme_dfs_ch_flags_ext(struct wlan_objmgr_pdev *pdev) +{ + uint16_t flag_ext = 0; + + if (global_dfs_to_mlme.mlme_dfs_ch_flags_ext) + global_dfs_to_mlme.mlme_dfs_ch_flags_ext(pdev, + &flag_ext); + + return flag_ext; +} + +void dfs_mlme_channel_change_by_precac(struct wlan_objmgr_pdev *pdev) +{ + if (global_dfs_to_mlme.mlme_channel_change_by_precac) + global_dfs_to_mlme.mlme_channel_change_by_precac( + pdev); +} + +void dfs_mlme_nol_timeout_notification(struct wlan_objmgr_pdev *pdev) +{ + if (global_dfs_to_mlme.mlme_nol_timeout_notification) + global_dfs_to_mlme.mlme_nol_timeout_notification( + pdev); +} + +void dfs_mlme_clist_update(struct wlan_objmgr_pdev *pdev, + void *nollist, + int nentries) +{ + if (global_dfs_to_mlme.mlme_clist_update) + global_dfs_to_mlme.mlme_clist_update(pdev, + nollist, + nentries); +} + +#ifdef CONFIG_CHAN_NUM_API +int dfs_mlme_get_cac_timeout(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint8_t dfs_ch_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags) +{ + int cac_timeout = 0; + + if (global_dfs_to_mlme.mlme_get_cac_timeout) + global_dfs_to_mlme.mlme_get_cac_timeout(pdev, + dfs_ch_freq, + dfs_ch_vhtop_ch_freq_seg2, + dfs_ch_flags, + &cac_timeout); + + return cac_timeout; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +int dfs_mlme_get_cac_timeout_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_chan_freq, + uint16_t dfs_cfreq2, + uint64_t dfs_ch_flags) +{ + int cac_timeout = 0; + + if (global_dfs_to_mlme.mlme_get_cac_timeout_for_freq) + global_dfs_to_mlme.mlme_get_cac_timeout_for_freq(pdev, + dfs_chan_freq, + dfs_cfreq2, + dfs_ch_flags, + &cac_timeout); + + return cac_timeout; +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +int dfs_mlme_rebuild_chan_list_with_non_dfs_channels( + struct wlan_objmgr_pdev *pdev) +{ + if (!global_dfs_to_mlme.mlme_rebuild_chan_list_with_non_dfs_channels) + return 1; + + return global_dfs_to_mlme.mlme_rebuild_chan_list_with_non_dfs_channels( + pdev); +} + +void dfs_mlme_restart_vaps_with_non_dfs_chan(struct wlan_objmgr_pdev *pdev, + int no_chans_avail) +{ + if (!global_dfs_to_mlme.mlme_restart_vaps_with_non_dfs_chan) + return; + + global_dfs_to_mlme.mlme_restart_vaps_with_non_dfs_chan(pdev, + no_chans_avail); +} +#endif + +#if defined(WLAN_SUPPORT_PRIMARY_ALLOWED_CHAN) +bool dfs_mlme_check_allowed_prim_chanlist(struct wlan_objmgr_pdev *pdev, + uint32_t chan_freq) +{ + if (!global_dfs_to_mlme.mlme_check_allowed_prim_chanlist) + return true; + + return global_dfs_to_mlme.mlme_check_allowed_prim_chanlist(pdev, + chan_freq); +} + +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +void dfs_mlme_handle_dfs_scan_violation(struct wlan_objmgr_pdev *pdev) +{ + bool dfs_enable = 0; + + /*Disable all DFS channels in master channel list and ic channel list */ + ucfg_reg_enable_dfs_channels(pdev, dfs_enable); + + /* send the updated channel list to FW */ + global_dfs_to_mlme.mlme_update_scan_channel_list(pdev); +} +#endif + +bool dfs_mlme_is_opmode_sta(struct wlan_objmgr_pdev *pdev) +{ + if (!global_dfs_to_mlme.mlme_is_opmode_sta) + return false; + + return global_dfs_to_mlme.mlme_is_opmode_sta(pdev); +} + +void dfs_mlme_acquire_radar_mode_switch_lock(struct wlan_objmgr_pdev *pdev) +{ + if (!global_dfs_to_mlme.mlme_acquire_radar_mode_switch_lock) + return; + + global_dfs_to_mlme.mlme_acquire_radar_mode_switch_lock(pdev); +} + +void dfs_mlme_release_radar_mode_switch_lock(struct wlan_objmgr_pdev *pdev) +{ + if (!global_dfs_to_mlme.mlme_release_radar_mode_switch_lock) + return; + + global_dfs_to_mlme.mlme_release_radar_mode_switch_lock(pdev); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..577976dad0eaa7359e30a4b91281fbb23cf539e9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_tgt_api.c @@ -0,0 +1,1045 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API implementation which is exposed + * to outside of DFS component. + */ +#include +#include "wlan_dfs_tgt_api.h" +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_init_deinit_api.h" +#include "wlan_lmac_if_def.h" +#include "wlan_lmac_if_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../../core/src/dfs.h" +#include "../../core/src/dfs_zero_cac.h" +#include "../../core/src/dfs_process_radar_found_ind.h" +#include +#include "../../core/src/dfs_partial_offload_radar.h" +#ifdef QCA_MCL_DFS_SUPPORT +#include "wlan_mlme_ucfg_api.h" +#endif + +struct wlan_lmac_if_dfs_tx_ops * +wlan_psoc_get_dfs_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.dfs_tx_ops)); +} + +bool tgt_dfs_is_pdev_5ghz(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct wlan_objmgr_psoc *psoc; + bool is_5ghz = false; + QDF_STATUS status; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return false; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!(dfs_tx_ops && dfs_tx_ops->dfs_is_pdev_5ghz)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "dfs_tx_ops is null"); + return false; + } + + status = dfs_tx_ops->dfs_is_pdev_5ghz(pdev, &is_5ghz); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "Failed to get is_5ghz value"); + return false; + } + + return is_5ghz; +} + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS tgt_dfs_set_current_channel(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_current_channel(dfs, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_set_current_channel); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +tgt_dfs_set_current_channel_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_chan_freq, + uint64_t dfs_chan_flags, + uint16_t dfs_chan_flagext, + uint8_t dfs_chan_ieee, + uint8_t dfs_chan_vhtop_freq_seg1, + uint8_t dfs_chan_vhtop_freq_seg2, + uint16_t dfs_chan_mhz_freq_seg1, + uint16_t dfs_chan_mhz_freq_seg2) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_current_channel_for_freq(dfs, + dfs_chan_freq, + dfs_chan_flags, + dfs_chan_flagext, + dfs_chan_ieee, + dfs_chan_vhtop_freq_seg1, + dfs_chan_vhtop_freq_seg2, + dfs_chan_mhz_freq_seg1, + dfs_chan_mhz_freq_seg2); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(tgt_dfs_set_current_channel_for_freq); +#endif + +QDF_STATUS tgt_dfs_radar_enable(struct wlan_objmgr_pdev *pdev, + int no_cac, uint32_t opmode, bool enable) +{ + struct wlan_dfs *dfs; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_is_offload_enabled) { + if (enable) { + dfs_radar_enable(dfs, no_cac, opmode); + return QDF_STATUS_SUCCESS; + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Disabling dfs not allowed for non-offload chips"); + return QDF_STATUS_E_FAILURE; + } + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!dfs_tx_ops) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_tx_ops is null"); + return QDF_STATUS_E_FAILURE; + } + + status = dfs_tx_ops->dfs_send_offload_enable_cmd(pdev, enable); + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Failed to enable dfs offload, pdev_id: %d", + wlan_objmgr_pdev_get_pdev_id(pdev)); + + return status; +} +qdf_export_symbol(tgt_dfs_radar_enable); + +void tgt_dfs_is_radar_enabled(struct wlan_objmgr_pdev *pdev, int *ignore_dfs) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_is_radar_enabled(dfs, ignore_dfs); +} + +qdf_export_symbol(tgt_dfs_is_radar_enabled); + +QDF_STATUS tgt_dfs_process_phyerr(struct wlan_objmgr_pdev *pdev, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_is_offload_enabled) + dfs_process_phyerr(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf); + else + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Unexpect phyerror as DFS is offloaded, pdev_id: %d", + wlan_objmgr_pdev_get_pdev_id(pdev)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_process_phyerr); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_process_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev, + struct radar_event_info + *wlan_radar_event) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + if (!dfs->dfs_is_offload_enabled) + dfs_process_phyerr_filter_offload(dfs, wlan_radar_event); + else + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Unexpect phyerror as DFS is offloaded, pdev_id: %d", + wlan_objmgr_pdev_get_pdev_id(pdev)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_process_phyerr_filter_offload); + +QDF_STATUS tgt_dfs_is_phyerr_filter_offload(struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload) +{ + struct dfs_soc_priv_obj *soc_obj; + + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + + *is_phyerr_filter_offload = soc_obj->dfs_is_phyerr_filter_offload; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_is_phyerr_filter_offload); +#else +QDF_STATUS tgt_dfs_process_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev, + struct radar_event_info + *wlan_radar_event) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_dfs_is_phyerr_filter_offload(struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS tgt_dfs_is_precac_timer_running(struct wlan_objmgr_pdev *pdev, + bool *is_precac_timer_running) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *is_precac_timer_running = dfs_is_precac_timer_running(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_is_precac_timer_running); + +QDF_STATUS tgt_dfs_get_radars(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_is_offload_enabled) + dfs_get_radars(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_get_radars); + +QDF_STATUS tgt_dfs_destroy_object(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_destroy_object(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_destroy_object); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_set_tx_leakage_threshold(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + uint32_t tx_leakage_threshold = 0; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + ucfg_mlme_get_sap_tx_leakage_threshold(psoc, + &tx_leakage_threshold); + + dfs->tx_leakage_threshold = tx_leakage_threshold; + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs tx_leakage_threshold = %d", dfs->tx_leakage_threshold); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_set_tx_leakage_threshold); +#endif + +QDF_STATUS tgt_dfs_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *error = dfs_control(dfs, id, indata, insize, outdata, outsize); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_control); + +#ifdef QCA_SUPPORT_AGILE_DFS +QDF_STATUS tgt_dfs_agile_precac_start(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_agile_precac_start(dfs); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS tgt_dfs_agile_precac_start(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(tgt_dfs_agile_precac_start); + +#ifdef QCA_SUPPORT_AGILE_DFS +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS tgt_dfs_set_agile_precac_state(struct wlan_objmgr_pdev *pdev, + int agile_precac_state) +{ + struct wlan_dfs *dfs; + struct dfs_soc_priv_obj *dfs_soc; + bool is_precac_running_on_given_pdev = false; + int i; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_soc = dfs->dfs_soc_obj; + for (i = 0; i < dfs_soc->num_dfs_privs; i++) { + if (dfs_soc->dfs_priv[i].dfs == dfs) { + /* Set the pdev state to given value. */ + dfs_soc->dfs_priv[i].agile_precac_active = + agile_precac_state; + /* If the pdev state is changed to inactive, + * reset the agile channel. + */ + if (!agile_precac_state) + dfs->dfs_agile_precac_freq_mhz = 0; + if (dfs_soc->cur_precac_dfs_index == i) + is_precac_running_on_given_pdev = true; + } + } + + /* If preCAC is running on this pdev and the agile_precac_state + * is set to false, set the global state in dfs_soc_obj to false. + * If this global state is not set to false, then preCAC will not be + * started the next time this pdev becomes active. + */ + if (is_precac_running_on_given_pdev && !agile_precac_state) + dfs_soc->precac_state_started = PRECAC_NOT_STARTED; + + return QDF_STATUS_SUCCESS; +} +#else +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS tgt_dfs_set_agile_precac_state(struct wlan_objmgr_pdev *pdev, + int agile_precac_state) +{ + struct wlan_dfs *dfs; + struct dfs_soc_priv_obj *dfs_soc; + bool is_precac_running_on_given_pdev = false; + int i; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_soc = dfs->dfs_soc_obj; + for (i = 0; i < dfs_soc->num_dfs_privs; i++) { + if (dfs_soc->dfs_priv[i].dfs == dfs) { + /* Set the pdev state to given value. */ + dfs_soc->dfs_priv[i].agile_precac_active = + agile_precac_state; + /* If the pdev state is changed to inactive, + * reset the agile channel. + */ + if (!agile_precac_state) + dfs->dfs_agile_precac_freq = 0; + if (dfs_soc->cur_precac_dfs_index == i) + is_precac_running_on_given_pdev = true; + } + } + + /* If preCAC is running on this pdev and the agile_precac_state + * is set to false, set the global state in dfs_soc_obj to false. + * If this global state is not set to false, then preCAC will not be + * started the next time this pdev becomes active. + */ + if (is_precac_running_on_given_pdev && !agile_precac_state) + dfs_soc->precac_state_started = PRECAC_NOT_STARTED; + + return QDF_STATUS_SUCCESS; +} +#endif +#endif + +#else +QDF_STATUS tgt_dfs_set_agile_precac_state(struct wlan_objmgr_pdev *pdev, + int agile_precac_state) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(tgt_dfs_set_agile_precac_state); + +#ifdef QCA_SUPPORT_AGILE_DFS +QDF_STATUS tgt_dfs_ocac_complete(struct wlan_objmgr_pdev *pdev, + struct vdev_adfs_complete_status *adfs_status) +{ + struct wlan_dfs *dfs; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return status; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "dfs is null"); + return status; + } + + dfs_process_ocac_complete(pdev, adfs_status->ocac_status, + adfs_status->center_freq); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS tgt_dfs_ocac_complete(struct wlan_objmgr_pdev *pdev, + struct vdev_adfs_complete_status *adfs_status) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(tgt_dfs_ocac_complete); + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS tgt_dfs_find_vht80_chan_for_precac(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_find_vht80_chan_for_precac(dfs, + chan_mode, + ch_freq_seg1, + cfreq1, + cfreq2, + phy_mode, + dfs_set_cfreq2, + set_agile); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_find_vht80_chan_for_precac); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS +tgt_dfs_find_vht80_precac_chan_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint16_t chan_freq_seg1_mhz, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_find_vht80_chan_for_precac_for_freq(dfs, + chan_mode, + chan_freq_seg1_mhz, + cfreq1, + cfreq2, + phy_mode, + dfs_set_cfreq2, + set_agile); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(tgt_dfs_find_vht80_precac_chan_freq); +#endif + +QDF_STATUS tgt_dfs_process_radar_ind(struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found) +{ + struct wlan_dfs *dfs; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return status; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is null"); + return status; + } + + dfs->dfs_radar_found_for_fo = 1; + status = dfs_process_radar_ind(dfs, radar_found); + dfs->dfs_radar_found_for_fo = 0; + + return status; +} +qdf_export_symbol(tgt_dfs_process_radar_ind); + +#ifndef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_cac_complete(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS tgt_dfs_cac_complete(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + dfs_mlme_proc_cac(pdev, vdev_id); + + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(tgt_dfs_cac_complete); + +QDF_STATUS tgt_dfs_reg_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!dfs_tx_ops) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null dfs_tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + if (dfs_tx_ops->dfs_reg_ev_handler) + return dfs_tx_ops->dfs_reg_ev_handler(psoc); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(tgt_dfs_reg_ev_handler); + +QDF_STATUS tgt_dfs_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_stop(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_stop); + +QDF_STATUS tgt_dfs_process_emulate_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_process_emulate_bang_radar_cmd) + return dfs_tx_ops->dfs_process_emulate_bang_radar_cmd(pdev, + dfs_unit_test); + else + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_tx_ops=%pK", dfs_tx_ops); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(tgt_dfs_process_emulate_bang_radar_cmd); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_set_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct dfs_soc_priv_obj *soc_obj; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_set_phyerr_filter_offload) + return dfs_tx_ops->dfs_set_phyerr_filter_offload(pdev, + soc_obj->dfs_is_phyerr_filter_offload); + else + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_tx_ops=%pK", dfs_tx_ops); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(tgt_dfs_set_phyerr_filter_offload); +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS +tgt_dfs_send_avg_params_to_fw(struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct wlan_dfs *dfs; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return status; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return status; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_send_avg_radar_params_to_fw) + status = dfs_tx_ops->dfs_send_avg_radar_params_to_fw(pdev, + params); + + if (QDF_IS_STATUS_SUCCESS(status)) { + dfs->dfs_average_params_sent = 1; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Average radar parameters sent %d", + dfs->dfs_average_params_sent); + } + + return status; +} + +qdf_export_symbol(tgt_dfs_send_avg_params_to_fw); + +QDF_STATUS tgt_dfs_action_on_status_from_fw(struct wlan_objmgr_pdev *pdev, + uint32_t *status) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_action_on_fw_radar_status_check(dfs, status); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(tgt_dfs_action_on_status_from_fw); + +QDF_STATUS tgt_dfs_reset_spoof_test(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_reset_spoof_test(dfs); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(tgt_dfs_reset_spoof_test); +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +QDF_STATUS tgt_dfs_send_usenol_pdev_param(struct wlan_objmgr_pdev *pdev, + bool usenol) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_send_usenol_pdev_param) + return dfs_tx_ops->dfs_send_usenol_pdev_param(pdev, usenol); + + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_tx_ops=%pK", dfs_tx_ops); + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(tgt_dfs_send_usenol_pdev_param); + +QDF_STATUS tgt_dfs_send_subchan_marking(struct wlan_objmgr_pdev *pdev, + bool subchanmark) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!dfs_tx_ops) { + dfs_debug(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_tx_ops=%pK", dfs_tx_ops); + return QDF_STATUS_E_FAILURE; + } + + if (dfs_tx_ops->dfs_send_subchan_marking_pdev_param) + return dfs_tx_ops->dfs_send_subchan_marking_pdev_param( + pdev, subchanmark); + + dfs_debug(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_send_subchan_marking_pdev_param is null"); + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(tgt_dfs_send_subchan_marking); +#endif + +void tgt_dfs_enable_stadfs(struct wlan_objmgr_pdev *pdev, bool val) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs->dfs_is_stadfs_enabled = val; +} + +bool tgt_dfs_is_stadfs_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return false; + } + + return dfs->dfs_is_stadfs_enabled; +} + +#ifdef QCA_SUPPORT_AGILE_DFS +void tgt_dfs_set_fw_adfs_support(struct wlan_objmgr_pdev *pdev, + bool fw_adfs_support_160, + bool fw_adfs_support_non_160) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_set_fw_adfs_support(dfs, + fw_adfs_support_160, + fw_adfs_support_non_160); +} + +qdf_export_symbol(tgt_dfs_set_fw_adfs_support); +#endif + +void tgt_dfs_init_tmp_psoc_nol(struct wlan_objmgr_pdev *pdev, + uint8_t num_radios) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_init_tmp_psoc_nol(dfs, num_radios); +} + +qdf_export_symbol(tgt_dfs_init_tmp_psoc_nol); + +void tgt_dfs_deinit_tmp_psoc_nol(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_deinit_tmp_psoc_nol(dfs); +} + +qdf_export_symbol(tgt_dfs_deinit_tmp_psoc_nol); + +void tgt_dfs_save_dfs_nol_in_psoc(struct wlan_objmgr_pdev *pdev, + uint8_t pdev_id, + uint16_t low_5ghz_freq, + uint16_t high_5ghz_freq) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_save_dfs_nol_in_psoc(dfs, pdev_id, low_5ghz_freq, high_5ghz_freq); +} + +qdf_export_symbol(tgt_dfs_save_dfs_nol_in_psoc); + +void tgt_dfs_reinit_nol_from_psoc_copy(struct wlan_objmgr_pdev *pdev, + uint8_t pdev_id) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_reinit_nol_from_psoc_copy(dfs, pdev_id); +} + +qdf_export_symbol(tgt_dfs_reinit_nol_from_psoc_copy); + +void tgt_dfs_reinit_precac_lists(struct wlan_objmgr_pdev *src_pdev, + struct wlan_objmgr_pdev *dest_pdev, + uint16_t low_5g_freq, + uint16_t high_5g_freq) +{ + struct wlan_dfs *src_dfs, *dest_dfs; + + src_dfs = wlan_pdev_get_dfs_obj(src_pdev); + if (!src_dfs) { + dfs_err(src_dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + dest_dfs = wlan_pdev_get_dfs_obj(dest_pdev); + if (!dest_dfs) { + dfs_err(dest_dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_reinit_precac_lists(src_dfs, dest_dfs, low_5g_freq, high_5g_freq); +} + +void tgt_dfs_complete_deferred_tasks(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_complete_deferred_tasks(dfs); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..556e304dda4138423f62e224cffdc23b3642f8ad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_ucfg_api.c @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API implementation which is exposed + * to outside of DFS component. + */ + +#include "wlan_dfs_ucfg_api.h" +#include "wlan_dfs_init_deinit_api.h" +#include "../../core/src/dfs.h" +#include "../../core/src/dfs_zero_cac.h" +#include "../../core/src/dfs_partial_offload_radar.h" +#include "../../core/src/dfs_process_radar_found_ind.h" +#include + +QDF_STATUS ucfg_dfs_is_ap_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int *is_ap_cac_timer_running) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *is_ap_cac_timer_running = dfs_is_ap_cac_timer_running(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_is_ap_cac_timer_running); + +QDF_STATUS ucfg_dfs_getnol(struct wlan_objmgr_pdev *pdev, + void *dfs_nolinfo) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_getnol(dfs, dfs_nolinfo); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_getnol); + +QDF_STATUS ucfg_dfs_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int cac_timeout, + int *status) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *status = dfs_override_cac_timeout(dfs, cac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_override_cac_timeout); + +QDF_STATUS ucfg_dfs_get_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int *cac_timeout, + int *status) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *status = dfs_get_override_cac_timeout(dfs, cac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_get_override_cac_timeout); + +QDF_STATUS ucfg_dfs_get_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int *precac_timeout) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_get_override_precac_timeout(dfs, precac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_get_override_precac_timeout); + +QDF_STATUS ucfg_dfs_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int precac_timeout) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_override_precac_timeout(dfs, precac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_override_precac_timeout); + +QDF_STATUS ucfg_dfs_set_precac_enable(struct wlan_objmgr_pdev *pdev, + uint32_t value) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_precac_enable(dfs, value); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_set_precac_enable); + +QDF_STATUS ucfg_dfs_get_legacy_precac_enable(struct wlan_objmgr_pdev *pdev, + bool *buff) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + *buff = dfs_is_legacy_precac_enabled(dfs); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_get_legacy_precac_enable); + +QDF_STATUS ucfg_dfs_get_agile_precac_enable(struct wlan_objmgr_pdev *pdev, + bool *buff) +{ + struct wlan_dfs *dfs; + + if (!pdev || !buff) + return QDF_STATUS_E_FAILURE; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) { + *buff = false; + return QDF_STATUS_SUCCESS; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + *buff = dfs_is_agile_precac_enabled(dfs); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_get_agile_precac_enable); + +QDF_STATUS +ucfg_dfs_set_nol_subchannel_marking(struct wlan_objmgr_pdev *pdev, + bool nol_subchannel_marking) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_set_nol_subchannel_marking(dfs, nol_subchannel_marking); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_set_nol_subchannel_marking); + +QDF_STATUS ucfg_dfs_get_nol_subchannel_marking(struct wlan_objmgr_pdev *pdev, + bool *nol_subchannel_marking) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_get_nol_subchannel_marking(dfs, nol_subchannel_marking); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_get_nol_subchannel_marking); +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +QDF_STATUS ucfg_dfs_set_precac_intermediate_chan(struct wlan_objmgr_pdev *pdev, + uint32_t value) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_precac_intermediate_chan(dfs, value); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_dfs_get_precac_intermediate_chan(struct wlan_objmgr_pdev *pdev, + int *buff) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + *buff = dfs_get_precac_intermediate_chan(dfs); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_CHAN_NUM_API +enum precac_chan_state +ucfg_dfs_get_precac_chan_state(struct wlan_objmgr_pdev *pdev, + uint8_t precac_chan) +{ + struct wlan_dfs *dfs; + enum precac_chan_state retval = PRECAC_ERR; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return PRECAC_ERR; + } + + retval = dfs_get_precac_chan_state(dfs, precac_chan); + if (retval == PRECAC_ERR) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Could not find precac channel state"); + } + + return retval; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +enum precac_chan_state +ucfg_dfs_get_precac_chan_state_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t precac_chan_freq) +{ + struct wlan_dfs *dfs; + enum precac_chan_state retval = PRECAC_ERR; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return PRECAC_ERR; + } + + retval = dfs_get_precac_chan_state_for_freq(dfs, precac_chan_freq); + if (retval == PRECAC_ERR) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Could not find precac channel state"); + } + + return retval; +} +#endif +#endif + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS ucfg_dfs_update_config(struct wlan_objmgr_psoc *psoc, + struct dfs_user_config *req) +{ + struct dfs_soc_priv_obj *soc_obj; + + if (!psoc || !req) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "psoc: 0x%pK, req: 0x%pK", psoc, req); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj->dfs_is_phyerr_filter_offload = + req->dfs_is_phyerr_filter_offload; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_update_config); +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS ucfg_dfs_set_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int status_timeout) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_override_status_timeout(dfs, status_timeout); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_set_override_status_timeout); + +QDF_STATUS ucfg_dfs_get_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int *status_timeout) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_get_override_status_timeout(dfs, status_timeout); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_get_override_status_timeout); +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(WLAN_DFS_SYNTHETIC_RADAR) +void ucfg_dfs_allow_hw_pulses(struct wlan_objmgr_pdev *pdev, + bool allow_hw_pulses) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_allow_hw_pulses(dfs, allow_hw_pulses); +} + +qdf_export_symbol(ucfg_dfs_allow_hw_pulses); + +bool ucfg_dfs_is_hw_pulses_allowed(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return false; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + return dfs_is_hw_pulses_allowed(dfs); +} + +qdf_export_symbol(ucfg_dfs_is_hw_pulses_allowed); +#endif + +#ifdef QCA_SUPPORT_AGILE_DFS +QDF_STATUS ucfg_dfs_reset_agile_config(struct wlan_objmgr_psoc *psoc) +{ + struct dfs_soc_priv_obj *soc_obj; + + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + + dfs_reset_agile_config(soc_obj); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_reset_agile_config); +#endif + +QDF_STATUS ucfg_dfs_reinit_timers(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_reinit_timers(dfs); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_reinit_timers); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..c47703619671fb08488b9820739641623d7cdad6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_utils_api.c @@ -0,0 +1,1625 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API implementation which is exposed + * to outside of DFS component. + */ +#include +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_init_deinit_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../../core/src/dfs.h" +#include "../../core/src/dfs_zero_cac.h" +#include +#include "../../core/src/dfs_random_chan_sel.h" +#ifdef QCA_DFS_USE_POLICY_MANAGER +#include "wlan_policy_mgr_api.h" +#endif +#ifdef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +#include +#endif +#include + +struct dfs_nol_info { + uint16_t num_chans; + struct dfsreq_nolelem dfs_nol[DFS_MAX_NOL_CHANNEL]; +}; + +QDF_STATUS utils_dfs_reset(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_reset(dfs); + dfs_nol_update(dfs); + dfs_reset_precaclists(dfs); + + return QDF_STATUS_SUCCESS; +} + +bool utils_dfs_is_freq_in_nol(struct wlan_objmgr_pdev *pdev, uint32_t freq) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return false; + + return dfs_is_freq_in_nol(dfs, freq); +} + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_cac_valid_reset(struct wlan_objmgr_pdev *pdev, + uint8_t prevchan_ieee, + uint32_t prevchan_flags) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cac_valid_reset(dfs, prevchan_ieee, prevchan_flags); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cac_valid_reset); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_cac_valid_reset_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t prevchan_freq, + uint32_t prevchan_flags) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cac_valid_reset_for_freq(dfs, prevchan_freq, prevchan_flags); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(utils_dfs_cac_valid_reset_for_freq); +#endif + +QDF_STATUS utils_dfs_reset_precaclists(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_reset_precaclists(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_reset_precaclists); + +#ifdef CONFIG_CHAN_NUM_API +void utils_dfs_unmark_precac_nol(struct wlan_objmgr_pdev *pdev, uint8_t chan) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return; + + dfs_unmark_precac_nol(dfs, chan); +} + +qdf_export_symbol(utils_dfs_unmark_precac_nol); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void utils_dfs_unmark_precac_nol_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t chan_freq) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return; + + dfs_unmark_precac_nol_for_freq(dfs, chan_freq); +} + +qdf_export_symbol(utils_dfs_unmark_precac_nol_for_freq); +#endif + +QDF_STATUS utils_dfs_cancel_precac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cancel_precac_timer(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cancel_precac_timer); + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_start_precac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "NULL dfs"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_precac_secondary_freq_mhz) + return QDF_STATUS_E_FAILURE; + + dfs_start_precac_timer_for_freq(dfs, + dfs->dfs_precac_secondary_freq_mhz); + return QDF_STATUS_SUCCESS; +} +#else +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_start_precac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "NULL dfs"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_precac_secondary_freq) + return QDF_STATUS_E_FAILURE; + dfs_start_precac_timer(dfs, + dfs->dfs_precac_secondary_freq); + return QDF_STATUS_SUCCESS; +} +#endif +#endif + +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +#ifdef CONFIG_CHAN_NUM_API +bool +utils_dfs_precac_decide_pref_chan(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_ieee, + enum wlan_phymode mode) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "NULL dfs"); + return false; + } + return dfs_decide_precac_preferred_chan(dfs, ch_ieee, mode); +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +bool +utils_dfs_precac_decide_pref_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_freq, + enum wlan_phymode mode) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "NULL dfs"); + return false; + } + return dfs_decide_precac_preferred_chan_for_freq(dfs, chan_freq, mode); +} +#endif +#endif +QDF_STATUS utils_dfs_cancel_cac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cancel_cac_timer(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cancel_cac_timer); + +QDF_STATUS utils_dfs_start_cac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_start_cac_timer(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_start_cac_timer); + +QDF_STATUS utils_dfs_cac_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cac_stop(dfs); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cac_stop); + +/** dfs_fill_chan_info() - Fill the dfs channel structure with wlan + * channel. + * @chan: Pointer to DFS channel structure. + * @wlan_chan: Pointer to WLAN Channel structure. + * + * Return: void + */ +#ifdef CONFIG_CHAN_FREQ_API +static void dfs_fill_chan_info(struct dfs_channel *chan, + struct wlan_channel *wlan_chan) +{ + chan->dfs_ch_freq = wlan_chan->ch_freq; + chan->dfs_ch_flags = wlan_chan->ch_flags; + chan->dfs_ch_flagext = wlan_chan->ch_flagext; + chan->dfs_ch_ieee = wlan_chan->ch_ieee; + chan->dfs_ch_vhtop_ch_freq_seg1 = wlan_chan->ch_freq_seg1; + chan->dfs_ch_vhtop_ch_freq_seg2 = wlan_chan->ch_freq_seg2; + chan->dfs_ch_mhz_freq_seg1 = wlan_chan->ch_cfreq1; + chan->dfs_ch_mhz_freq_seg2 = wlan_chan->ch_cfreq2; +} +#else +#ifdef CONFIG_CHAN_NUM_API +static void dfs_fill_chan_info(struct dfs_channel *chan, + struct wlan_channel *wlan_chan) +{ + chan->dfs_ch_freq = wlan_chan->ch_freq; + chan->dfs_ch_flags = wlan_chan->ch_flags; + chan->dfs_ch_flagext = wlan_chan->ch_flagext; + chan->dfs_ch_ieee = wlan_chan->ch_ieee; + chan->dfs_ch_vhtop_ch_freq_seg1 = wlan_chan->ch_freq_seg1; + chan->dfs_ch_vhtop_ch_freq_seg2 = wlan_chan->ch_freq_seg2; +} +#endif +#endif + +bool utils_dfs_is_precac_done(struct wlan_objmgr_pdev *pdev, + struct wlan_channel *wlan_chan) +{ + struct wlan_dfs *dfs; + struct dfs_channel chan; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return false; + + dfs_fill_chan_info(&chan, wlan_chan); + + return dfs_is_precac_done(dfs, &chan); +} + +bool utils_dfs_is_cac_required(struct wlan_objmgr_pdev *pdev, + struct wlan_channel *cur_chan, + struct wlan_channel *prev_chan, + bool *continue_current_cac) +{ + struct wlan_dfs *dfs; + struct dfs_channel cur_channel; + struct dfs_channel prev_channel; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return false; + + dfs_fill_chan_info(&cur_channel, cur_chan); + dfs_fill_chan_info(&prev_channel, prev_chan); + + return dfs_is_cac_required(dfs, + &cur_channel, + &prev_channel, + continue_current_cac); +} + +bool +utils_dfs_is_cac_required_on_dfs_curchan(struct wlan_objmgr_pdev *pdev, + bool *continue_current_cac) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return false; + + return dfs_is_cac_required(dfs, + dfs->dfs_curchan, + dfs->dfs_prevchan, + continue_current_cac); +} + +QDF_STATUS utils_dfs_stacac_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_stacac_stop(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_stacac_stop); + +QDF_STATUS utils_dfs_get_usenol(struct wlan_objmgr_pdev *pdev, uint16_t *usenol) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *usenol = dfs_get_use_nol(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_usenol); + +QDF_STATUS utils_dfs_radar_disable(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_radar_disable(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_radar_disable); + +QDF_STATUS utils_dfs_set_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool val) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_set_update_nol_flag(dfs, val); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_set_update_nol_flag); + +QDF_STATUS utils_dfs_get_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool *nol_flag) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *nol_flag = dfs_get_update_nol_flag(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_update_nol_flag); + +QDF_STATUS utils_dfs_get_dfs_use_nol(struct wlan_objmgr_pdev *pdev, + int *dfs_use_nol) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *dfs_use_nol = dfs_get_use_nol(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_dfs_use_nol); + +QDF_STATUS utils_dfs_get_nol_timeout(struct wlan_objmgr_pdev *pdev, + int *dfs_nol_timeout) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *dfs_nol_timeout = dfs_get_nol_timeout(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_nol_timeout); + +QDF_STATUS utils_dfs_nol_addchan(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + uint32_t dfs_nol_timeout) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + DFS_NOL_ADD_CHAN_LOCKED(dfs, freq, dfs_nol_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_nol_addchan); + +QDF_STATUS utils_dfs_nol_update(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_nol_update(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_nol_update); + +QDF_STATUS utils_dfs_second_segment_radar_disable(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_second_segment_radar_disable(dfs); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS utils_dfs_bw_reduce(struct wlan_objmgr_pdev *pdev, bool bw_reduce) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs->dfs_bw_reduced = bw_reduce; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(utils_dfs_bw_reduce); + +QDF_STATUS utils_dfs_is_bw_reduce(struct wlan_objmgr_pdev *pdev, + bool *bw_reduce) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *bw_reduce = dfs->dfs_bw_reduced; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS utils_dfs_fetch_nol_ie_info(struct wlan_objmgr_pdev *pdev, + uint8_t *nol_ie_bandwidth, + uint16_t *nol_ie_startfreq, + uint8_t *nol_ie_bitmap) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_fetch_nol_ie_info(dfs, nol_ie_bandwidth, nol_ie_startfreq, + nol_ie_bitmap); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS utils_dfs_set_rcsa_flags(struct wlan_objmgr_pdev *pdev, + bool is_rcsa_ie_sent, + bool is_nol_ie_sent) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_set_rcsa_flags(dfs, is_rcsa_ie_sent, is_nol_ie_sent); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS utils_dfs_get_rcsa_flags(struct wlan_objmgr_pdev *pdev, + bool *is_rcsa_ie_sent, + bool *is_nol_ie_sent) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + dfs_get_rcsa_flags(dfs, is_rcsa_ie_sent, is_nol_ie_sent); + + return QDF_STATUS_SUCCESS; +} + +bool utils_dfs_process_nol_ie_bitmap(struct wlan_objmgr_pdev *pdev, + uint8_t nol_ie_bandwidth, + uint16_t nol_ie_startfreq, + uint8_t nol_ie_bitmap) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return false; + return dfs_process_nol_ie_bitmap(dfs, nol_ie_bandwidth, + nol_ie_startfreq, + nol_ie_bitmap); +} + +QDF_STATUS utils_dfs_set_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int val) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs->dfs_cac_timer_running = val; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_set_cac_timer_running); + +QDF_STATUS utils_dfs_get_nol_chfreq_and_chwidth(struct wlan_objmgr_pdev *pdev, + void *nollist, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_get_nol_chfreq_and_chwidth(nollist, nol_chfreq, nol_chwidth, index); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_nol_chfreq_and_chwidth); + +QDF_STATUS utils_dfs_update_cur_chan_flags(struct wlan_objmgr_pdev *pdev, + uint64_t flags, + uint16_t flagext) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_update_cur_chan_flags(dfs, flags, flagext); + + return QDF_STATUS_SUCCESS; +} + +static void utils_dfs_get_max_phy_mode(struct wlan_objmgr_pdev *pdev, + uint32_t *phy_mode) +{ + return; +} + +static void utils_dfs_get_max_sup_width(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_width) +{ + return; +} + +#ifndef QCA_DFS_USE_POLICY_MANAGER +void utils_dfs_get_nol_history_chan_list(struct wlan_objmgr_pdev *pdev, + void *clist, uint32_t *num_chan) +{ + int i, j = 0; + struct regulatory_channel *cur_chan_list; + struct wlan_dfs *dfs; + struct dfs_channel *chan_list = (struct dfs_channel *)clist; + + *num_chan = 0; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return; + + cur_chan_list = qdf_mem_malloc(NUM_CHANNELS * sizeof(*cur_chan_list)); + if (!cur_chan_list) + return; + + if (wlan_reg_get_current_chan_list( + pdev, cur_chan_list) != QDF_STATUS_SUCCESS) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "failed to get cur_chan list"); + qdf_mem_free(cur_chan_list); + return; + } + + for (i = 0; i < NUM_CHANNELS; i++) { + if (cur_chan_list[i].nol_history) { + chan_list[j].dfs_ch_freq = cur_chan_list[i].center_freq; + j++; + } + } + + *num_chan = j; + qdf_mem_free(cur_chan_list); +} + +void utils_dfs_get_chan_list(struct wlan_objmgr_pdev *pdev, + void *clist, uint32_t *num_chan) +{ + int i = 0, j = 0; + enum channel_state state; + struct regulatory_channel *cur_chan_list; + struct wlan_dfs *dfs; + struct dfs_channel *chan_list = (struct dfs_channel *)clist; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return; + + cur_chan_list = qdf_mem_malloc(NUM_CHANNELS * + sizeof(struct regulatory_channel)); + if (!cur_chan_list) { + *num_chan = 0; + return; + } + + if (wlan_reg_get_current_chan_list( + pdev, cur_chan_list) != QDF_STATUS_SUCCESS) { + *num_chan = 0; + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "failed to get curr channel list"); + return; + } + + for (i = 0; i < NUM_CHANNELS; i++) { + state = cur_chan_list[i].state; + if (state == CHANNEL_STATE_DFS || + state == CHANNEL_STATE_ENABLE) { + chan_list[j].dfs_ch_ieee = cur_chan_list[i].chan_num; + chan_list[j].dfs_ch_freq = cur_chan_list[i].center_freq; + if (state == CHANNEL_STATE_DFS) + chan_list[j].dfs_ch_flagext = + WLAN_CHAN_DFS; + + if (cur_chan_list[i].nol_history) + chan_list[j].dfs_ch_flagext |= + WLAN_CHAN_HISTORY_RADAR; + j++; + } + } + *num_chan = j; + qdf_mem_free(cur_chan_list); + + return; +} + +/** + * utils_dfs_get_channel_list() - Get channel list from regdb component, based + * on current channel list. + * @pdev: Pointer to pdev structure. + * @vdev: vdev of request + * @chan: Pointer to channel list. + * @num_chan: number of channels. + * + * Get regdb channel list based on dfs current channel. + * Ex: When AP is operating in 5GHz channel, filter 2.4GHz and 4.9GHZ channels + * so that the random channel function does not select either 2.4GHz or 4.9GHz + * channel. + */ +#ifdef CONFIG_CHAN_FREQ_API +static void utils_dfs_get_channel_list(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + struct dfs_channel *chan_list, + uint32_t *num_chan) +{ + struct dfs_channel *tmp_chan_list = NULL; + struct wlan_dfs *dfs; + bool is_curchan_5g; + bool is_curchan_24g; + bool is_curchan_49g; + uint32_t chan_num; + uint32_t center_freq; + uint16_t flagext; + int i, j = 0; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + tmp_chan_list = qdf_mem_malloc(*num_chan * sizeof(*tmp_chan_list)); + if (!tmp_chan_list) + return; + + utils_dfs_get_chan_list(pdev, (void *)tmp_chan_list, num_chan); + + chan_num = dfs->dfs_curchan->dfs_ch_ieee; + center_freq = dfs->dfs_curchan->dfs_ch_freq; + is_curchan_5g = WLAN_REG_IS_5GHZ_CH_FREQ(center_freq); + is_curchan_24g = WLAN_REG_IS_24GHZ_CH_FREQ(center_freq); + is_curchan_49g = WLAN_REG_IS_49GHZ_FREQ(center_freq); + + for (i = 0; i < *num_chan; i++) { + chan_num = tmp_chan_list[i].dfs_ch_ieee; + center_freq = tmp_chan_list[i].dfs_ch_freq; + flagext = tmp_chan_list[i].dfs_ch_flagext; + /* No change in prototype needed. Hence retaining same func */ + if (!dfs_mlme_check_allowed_prim_chanlist(pdev, center_freq)) + continue; + + if ((is_curchan_5g) && WLAN_REG_IS_5GHZ_CH_FREQ(center_freq)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + chan_list[j].dfs_ch_flagext = flagext; + j++; + } else if ((is_curchan_24g) && + WLAN_REG_IS_24GHZ_CH_FREQ(center_freq)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + j++; + } else if ((is_curchan_49g) && + WLAN_REG_IS_49GHZ_FREQ(center_freq)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + j++; + } + } + + *num_chan = j; + + qdf_mem_free(tmp_chan_list); +} +#else /* NUM_API */ +#ifdef CONFIG_CHAN_NUM_API +static void utils_dfs_get_channel_list(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + struct dfs_channel *chan_list, + uint32_t *num_chan) +{ + struct dfs_channel *tmp_chan_list = NULL; + struct wlan_dfs *dfs; + bool is_curchan_5g; + bool is_curchan_24g; + bool is_curchan_49g; + uint32_t chan_num; + uint32_t center_freq; + uint16_t flagext; + int i, j = 0; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + tmp_chan_list = qdf_mem_malloc(*num_chan * sizeof(*tmp_chan_list)); + if (!tmp_chan_list) + return; + + utils_dfs_get_chan_list(pdev, (void *)tmp_chan_list, num_chan); + + chan_num = dfs->dfs_curchan->dfs_ch_ieee; + center_freq = dfs->dfs_curchan->dfs_ch_freq; + is_curchan_5g = WLAN_REG_IS_5GHZ_CH(chan_num); + is_curchan_24g = WLAN_REG_IS_24GHZ_CH(chan_num); + is_curchan_49g = WLAN_REG_IS_49GHZ_FREQ(center_freq); + + for (i = 0; i < *num_chan; i++) { + chan_num = tmp_chan_list[i].dfs_ch_ieee; + center_freq = tmp_chan_list[i].dfs_ch_freq; + flagext = tmp_chan_list[i].dfs_ch_flagext; + + if (!dfs_mlme_check_allowed_prim_chanlist(pdev, chan_num)) + continue; + + if ((is_curchan_5g) && WLAN_REG_IS_5GHZ_CH(chan_num)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + chan_list[j].dfs_ch_flagext = flagext; + j++; + } else if ((is_curchan_24g) && + WLAN_REG_IS_24GHZ_CH(chan_num)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + j++; + } else if ((is_curchan_49g) && + WLAN_REG_IS_49GHZ_FREQ(center_freq)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + j++; + } + } + + *num_chan = j; + + qdf_mem_free(tmp_chan_list); +} +#endif +#endif + +#else + +void utils_dfs_get_nol_history_chan_list(struct wlan_objmgr_pdev *pdev, + void *clist, uint32_t *num_chan) +{ + utils_dfs_get_chan_list(pdev, clist, num_chan); +} + +static void utils_dfs_get_channel_list(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + struct dfs_channel *chan_list, + uint32_t *num_chan) +{ + uint32_t pcl_ch[NUM_CHANNELS] = {0}; + uint8_t weight_list[NUM_CHANNELS] = {0}; + uint32_t len; + uint32_t weight_len; + int i; + struct wlan_objmgr_psoc *psoc; + uint32_t conn_count = 0; + enum policy_mgr_con_mode mode; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + *num_chan = 0; + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return; + } + + len = QDF_ARRAY_SIZE(pcl_ch); + weight_len = QDF_ARRAY_SIZE(weight_list); + + if (vdev) + mode = policy_mgr_convert_device_mode_to_qdf_type( + wlan_vdev_mlme_get_opmode(vdev)); + else + mode = PM_SAP_MODE; + conn_count = policy_mgr_mode_specific_connection_count( + psoc, mode, NULL); + if (0 == conn_count) + policy_mgr_get_pcl(psoc, mode, pcl_ch, + &len, weight_list, weight_len); + else + policy_mgr_get_pcl_for_existing_conn( + psoc, mode, pcl_ch, &len, weight_list, + weight_len, true); + + if (*num_chan < len) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Invalid len src=%d, dst=%d", + *num_chan, len); + *num_chan = 0; + return; + } + + for (i = 0; i < len; i++) { + chan_list[i].dfs_ch_ieee = + wlan_reg_freq_to_chan(pdev, pcl_ch[i]); + chan_list[i].dfs_ch_freq = pcl_ch[i]; + } + *num_chan = i; + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "num channels %d", i); +} + +void utils_dfs_get_chan_list(struct wlan_objmgr_pdev *pdev, + void *clist, uint32_t *num_chan) +{ + utils_dfs_get_channel_list(pdev, NULL, (struct dfs_channel *)clist, + num_chan); +} +#endif + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_get_vdev_random_channel( + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + uint16_t flags, struct ch_params *ch_params, uint32_t *hw_mode, + uint8_t *target_chan, struct dfs_acs_info *acs_info) +{ + uint32_t dfs_reg; + uint32_t num_chan = NUM_CHANNELS; + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + struct dfs_channel *chan_list = NULL; + struct dfs_channel cur_chan; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + *target_chan = 0; + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + goto random_chan_error; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + goto random_chan_error; + } + + wlan_reg_get_dfs_region(pdev, &dfs_reg); + chan_list = qdf_mem_malloc(num_chan * sizeof(*chan_list)); + if (!chan_list) + goto random_chan_error; + + utils_dfs_get_channel_list(pdev, vdev, chan_list, &num_chan); + if (!num_chan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "zero channels"); + goto random_chan_error; + } + + cur_chan.dfs_ch_vhtop_ch_freq_seg1 = ch_params->center_freq_seg0; + cur_chan.dfs_ch_vhtop_ch_freq_seg2 = ch_params->center_freq_seg1; + + if (!ch_params->ch_width) + utils_dfs_get_max_sup_width(pdev, + (uint8_t *)&ch_params->ch_width); + + *target_chan = dfs_prepare_random_channel(dfs, chan_list, + num_chan, flags, (uint8_t *)&ch_params->ch_width, + &cur_chan, (uint8_t)dfs_reg, acs_info); + + ch_params->center_freq_seg0 = cur_chan.dfs_ch_vhtop_ch_freq_seg1; + ch_params->center_freq_seg1 = cur_chan.dfs_ch_vhtop_ch_freq_seg2; + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "input width=%d", ch_params->ch_width); + + if (*target_chan) { + wlan_reg_set_channel_params(pdev, + *target_chan, 0, ch_params); + utils_dfs_get_max_phy_mode(pdev, hw_mode); + status = QDF_STATUS_SUCCESS; + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "ch=%d, seg0=%d, seg1=%d, width=%d", + *target_chan, ch_params->center_freq_seg0, + ch_params->center_freq_seg1, ch_params->ch_width); + +random_chan_error: + qdf_mem_free(chan_list); + + return status; +} + +qdf_export_symbol(utils_dfs_get_vdev_random_channel); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_get_vdev_random_channel_for_freq( + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + uint16_t flags, struct ch_params *chan_params, uint32_t *hw_mode, + uint16_t *target_chan_freq, struct dfs_acs_info *acs_info) +{ + uint32_t dfs_reg; + uint32_t num_chan = NUM_CHANNELS; + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + struct dfs_channel *chan_list = NULL; + struct dfs_channel cur_chan; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + *target_chan_freq = 0; + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + goto random_chan_error; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + goto random_chan_error; + } + + wlan_reg_get_dfs_region(pdev, &dfs_reg); + chan_list = qdf_mem_malloc(num_chan * sizeof(*chan_list)); + if (!chan_list) + goto random_chan_error; + + utils_dfs_get_channel_list(pdev, vdev, chan_list, &num_chan); + if (!num_chan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "zero channels"); + goto random_chan_error; + } + + cur_chan.dfs_ch_vhtop_ch_freq_seg1 = chan_params->center_freq_seg0; + cur_chan.dfs_ch_vhtop_ch_freq_seg2 = chan_params->center_freq_seg1; + cur_chan.dfs_ch_mhz_freq_seg1 = chan_params->mhz_freq_seg0; + cur_chan.dfs_ch_mhz_freq_seg2 = chan_params->mhz_freq_seg1; + + if (!chan_params->ch_width) + utils_dfs_get_max_sup_width(pdev, + (uint8_t *)&chan_params->ch_width); + + *target_chan_freq = dfs_prepare_random_channel_for_freq(dfs, chan_list, + num_chan, flags, (uint8_t *)&chan_params->ch_width, + &cur_chan, (uint8_t)dfs_reg, acs_info); + + chan_params->center_freq_seg0 = cur_chan.dfs_ch_vhtop_ch_freq_seg1; + chan_params->center_freq_seg1 = cur_chan.dfs_ch_vhtop_ch_freq_seg2; + chan_params->mhz_freq_seg0 = cur_chan.dfs_ch_mhz_freq_seg1; + chan_params->mhz_freq_seg1 = cur_chan.dfs_ch_mhz_freq_seg2; + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "input width=%d", chan_params->ch_width); + + if (*target_chan_freq) { + wlan_reg_set_channel_params_for_freq(pdev, *target_chan_freq, 0, + chan_params); + utils_dfs_get_max_phy_mode(pdev, hw_mode); + status = QDF_STATUS_SUCCESS; + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "ch=%d, seg0=%d, seg1=%d, width=%d", + *target_chan_freq, chan_params->center_freq_seg0, + chan_params->center_freq_seg1, chan_params->ch_width); + +random_chan_error: + qdf_mem_free(chan_list); + + return status; +} + +qdf_export_symbol(utils_dfs_get_vdev_random_channel_for_freq); +#endif + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_get_random_channel( + struct wlan_objmgr_pdev *pdev, + uint16_t flags, + struct ch_params *ch_params, + uint32_t *hw_mode, + uint8_t *target_chan, + struct dfs_acs_info *acs_info) +{ + return utils_dfs_get_vdev_random_channel( + pdev, NULL, flags, ch_params, hw_mode, target_chan, + acs_info); +} +qdf_export_symbol(utils_dfs_get_random_channel); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_get_random_channel_for_freq( + struct wlan_objmgr_pdev *pdev, + uint16_t flags, + struct ch_params *ch_params, + uint32_t *hw_mode, + uint16_t *target_chan_freq, + struct dfs_acs_info *acs_info) +{ + return utils_dfs_get_vdev_random_channel_for_freq(pdev, NULL, flags, + ch_params, hw_mode, + target_chan_freq, + acs_info); +} + +qdf_export_symbol(utils_dfs_get_random_channel_for_freq); +#endif + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_bw_reduced_channel( + struct wlan_objmgr_pdev *pdev, + struct ch_params *ch_params, + uint32_t *hw_mode, + uint8_t *target_chan) +{ + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + enum channel_state ch_state; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + *target_chan = 0; + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return status; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return status; + } + + ch_state = wlan_reg_get_channel_state(pdev, + dfs->dfs_curchan->dfs_ch_ieee); + + if (ch_state == CHANNEL_STATE_DFS || + ch_state == CHANNEL_STATE_ENABLE) { + ch_params->center_freq_seg0 = + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1; + ch_params->center_freq_seg1 = + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2; + wlan_reg_set_channel_params(pdev, + dfs->dfs_curchan->dfs_ch_ieee, + 0, ch_params); + + *target_chan = dfs->dfs_curchan->dfs_ch_ieee; + utils_dfs_get_max_phy_mode(pdev, hw_mode); + + return QDF_STATUS_SUCCESS; + } + + return status; +} + +qdf_export_symbol(utils_dfs_bw_reduced_channel); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_bw_reduced_channel_for_freq( + struct wlan_objmgr_pdev *pdev, + struct ch_params *chan_params, + uint32_t *hw_mode, + uint16_t *target_chan_freq) +{ + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + enum channel_state ch_state; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct dfs_channel *dfs_curchan; + + *target_chan_freq = 0; + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return status; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return status; + } + dfs_curchan = dfs->dfs_curchan; + ch_state = + wlan_reg_get_channel_state_for_freq(pdev, + dfs_curchan->dfs_ch_freq); + + if (ch_state == CHANNEL_STATE_DFS || + ch_state == CHANNEL_STATE_ENABLE) { + chan_params->mhz_freq_seg0 = + dfs_curchan->dfs_ch_mhz_freq_seg1; + chan_params->mhz_freq_seg1 = + dfs_curchan->dfs_ch_mhz_freq_seg2; + wlan_reg_set_channel_params_for_freq(pdev, + dfs_curchan->dfs_ch_freq, + 0, chan_params); + + *target_chan_freq = dfs_curchan->dfs_ch_freq; + utils_dfs_get_max_phy_mode(pdev, hw_mode); + + return QDF_STATUS_SUCCESS; + } + + return status; +} + +qdf_export_symbol(utils_dfs_bw_reduced_channel_for_freq); +#endif + + +#ifdef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +void utils_dfs_init_nol(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + struct wlan_objmgr_psoc *psoc; + qdf_device_t qdf_dev; + struct dfs_nol_info *dfs_nolinfo; + int len; + + dfs = wlan_pdev_get_dfs_obj(pdev); + psoc = wlan_pdev_get_psoc(pdev); + if (!dfs || !psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs %pK, psoc %pK", dfs, psoc); + return; + } + + qdf_dev = psoc->soc_objmgr.qdf_dev; + if (!qdf_dev->dev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null device"); + return; + } + + dfs_nolinfo = qdf_mem_malloc(sizeof(*dfs_nolinfo)); + if (!dfs_nolinfo) + return; + + qdf_mem_zero(dfs_nolinfo, sizeof(*dfs_nolinfo)); + len = pld_wlan_get_dfs_nol(qdf_dev->dev, (void *)dfs_nolinfo, + (uint16_t)sizeof(*dfs_nolinfo)); + if (len > 0) { + dfs_set_nol(dfs, dfs_nolinfo->dfs_nol, dfs_nolinfo->num_chans); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "nol channels in pld"); + DFS_PRINT_NOL_LOCKED(dfs); + } else { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "no nol in pld"); + } + qdf_mem_free(dfs_nolinfo); +} +#endif +qdf_export_symbol(utils_dfs_init_nol); + +#ifndef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +void utils_dfs_save_nol(struct wlan_objmgr_pdev *pdev) +{ +} +#else +void utils_dfs_save_nol(struct wlan_objmgr_pdev *pdev) +{ + struct dfs_nol_info *dfs_nolinfo; + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + qdf_device_t qdf_dev; + int num_chans = 0; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return; + } + + qdf_dev = psoc->soc_objmgr.qdf_dev; + if (!qdf_dev->dev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null device"); + return; + } + + dfs_nolinfo = qdf_mem_malloc(sizeof(*dfs_nolinfo)); + if (!dfs_nolinfo) + return; + + qdf_mem_zero(dfs_nolinfo, sizeof(*dfs_nolinfo)); + DFS_GET_NOL_LOCKED(dfs, dfs_nolinfo->dfs_nol, &num_chans); + if (num_chans > 0) { + + if (num_chans > DFS_MAX_NOL_CHANNEL) + dfs_nolinfo->num_chans = DFS_MAX_NOL_CHANNEL; + else + dfs_nolinfo->num_chans = num_chans; + + pld_wlan_set_dfs_nol(qdf_dev->dev, (void *)dfs_nolinfo, + (uint16_t)sizeof(*dfs_nolinfo)); + } + qdf_mem_free(dfs_nolinfo); +} +#endif +qdf_export_symbol(utils_dfs_save_nol); + +void utils_dfs_print_nol_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs = NULL; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + DFS_PRINT_NOL_LOCKED(dfs); +} +qdf_export_symbol(utils_dfs_print_nol_channels); + +void utils_dfs_clear_nol_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs = NULL; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + /* First print list */ + DFS_PRINT_NOL_LOCKED(dfs); + + /* clear local cache first */ + dfs_nol_timer_cleanup(dfs); + dfs_nol_update(dfs); + + /* + * update platform driver nol list with local cache which is zero, + * cleared in above step, so this will clear list in platform driver. + */ + utils_dfs_save_nol(pdev); +} +qdf_export_symbol(utils_dfs_clear_nol_channels); + +#ifdef CONFIG_CHAN_NUM_API +void utils_dfs_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_ch) +{ + /* TODO : Need locking?*/ + wlan_reg_update_nol_ch(pdev, ch_list, num_ch, nol_ch); +} +qdf_export_symbol(utils_dfs_reg_update_nol_ch); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void utils_dfs_reg_update_nol_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *freq_list, + uint8_t num_chan, + bool nol_chan) +{ + wlan_reg_update_nol_ch_for_freq(pdev, freq_list, num_chan, nol_chan); +} + +qdf_export_symbol(utils_dfs_reg_update_nol_chan_for_freq); +#endif + +#ifdef CONFIG_CHAN_NUM_API +void utils_dfs_reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_history_ch) +{ + wlan_reg_update_nol_history_ch(pdev, ch_list, num_ch, nol_history_ch); +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +void +utils_dfs_reg_update_nol_history_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *freq_list, + uint8_t num_chan, + bool nol_history_chan) +{ + wlan_reg_update_nol_history_ch_for_freq(pdev, freq_list, num_chan, + nol_history_chan); +} +#endif + +uint8_t utils_dfs_freq_to_chan(uint32_t freq) +{ + uint8_t chan; + + if (freq == 0) + return 0; + + if (freq > DFS_24_GHZ_BASE_FREQ && freq < DFS_CHAN_14_FREQ) + chan = ((freq - DFS_24_GHZ_BASE_FREQ) / DFS_CHAN_SPACING_5MHZ); + else if (freq == DFS_CHAN_14_FREQ) + chan = DFS_24_GHZ_CHANNEL_14; + else if ((freq > DFS_24_GHZ_BASE_FREQ) && (freq < DFS_5_GHZ_BASE_FREQ)) + chan = (((freq - DFS_CHAN_15_FREQ) / DFS_CHAN_SPACING_20MHZ) + + DFS_24_GHZ_CHANNEL_15); + else + chan = (freq - DFS_5_GHZ_BASE_FREQ) / DFS_CHAN_SPACING_5MHZ; + + return chan; +} +qdf_export_symbol(utils_dfs_freq_to_chan); + +uint32_t utils_dfs_chan_to_freq(uint8_t chan) +{ + if (chan == 0) + return 0; + + if (chan < DFS_24_GHZ_CHANNEL_14) + return DFS_24_GHZ_BASE_FREQ + (chan * DFS_CHAN_SPACING_5MHZ); + else if (chan == DFS_24_GHZ_CHANNEL_14) + return DFS_CHAN_14_FREQ; + else if (chan < DFS_24_GHZ_CHANNEL_27) + return DFS_CHAN_15_FREQ + ((chan - DFS_24_GHZ_CHANNEL_15) * + DFS_CHAN_SPACING_20MHZ); + else if (chan == DFS_5_GHZ_CHANNEL_170) + return DFS_CHAN_170_FREQ; + else + return DFS_5_GHZ_BASE_FREQ + (chan * DFS_CHAN_SPACING_5MHZ); +} +qdf_export_symbol(utils_dfs_chan_to_freq); + +#ifdef QCA_MCL_DFS_SUPPORT +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS utils_dfs_mark_leaking_ch(struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + struct wlan_dfs *dfs = NULL; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + return dfs_mark_leaking_ch(dfs, ch_width, temp_ch_lst_sz, temp_ch_lst); +} +qdf_export_symbol(utils_dfs_mark_leaking_ch); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS utils_dfs_mark_leaking_chan_for_freq(struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_chan_lst_sz, + uint16_t *temp_freq_lst) +{ + struct wlan_dfs *dfs = NULL; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + return dfs_mark_leaking_chan_for_freq(dfs, ch_width, temp_chan_lst_sz, + temp_freq_lst); +} +qdf_export_symbol(utils_dfs_mark_leaking_chan_for_freq); +#endif +#endif + +int utils_get_dfsdomain(struct wlan_objmgr_pdev *pdev) +{ + enum dfs_reg dfsdomain; + + wlan_reg_get_dfs_region(pdev, &dfsdomain); + + return dfsdomain; +} + +uint16_t utils_dfs_get_cur_rd(struct wlan_objmgr_pdev *pdev) +{ + struct cur_regdmn_info cur_regdmn; + + wlan_reg_get_curr_regdomain(pdev, &cur_regdmn); + + return cur_regdmn.regdmn_pair_id; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS utils_dfs_is_spoof_check_failed(struct wlan_objmgr_pdev *pdev, + bool *is_spoof_check_failed) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is null"); + return QDF_STATUS_E_FAILURE; + } + + *is_spoof_check_failed = dfs->dfs_spoof_check_failed; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(utils_dfs_is_spoof_check_failed); +#endif + +int dfs_get_num_chans(void) +{ + return NUM_CHANNELS; +} + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +QDF_STATUS utils_dfs_get_disable_radar_marking(struct wlan_objmgr_pdev *pdev, + bool *disable_radar_marking) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return QDF_STATUS_SUCCESS; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is null"); + return QDF_STATUS_E_FAILURE; + } + + *disable_radar_marking = dfs_get_disable_radar_marking(dfs); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(utils_dfs_get_disable_radar_marking); +#endif + +bool utils_is_dfs_cfreq2_ch(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) + return false; + + return WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan); +} + +qdf_export_symbol(utils_is_dfs_cfreq2_ch); + +void utils_dfs_deliver_event(struct wlan_objmgr_pdev *pdev, uint16_t freq, + enum WLAN_DFS_EVENTS event) +{ + if (global_dfs_to_mlme.mlme_dfs_deliver_event) + global_dfs_to_mlme.mlme_dfs_deliver_event(pdev, freq, event); +} + +void utils_dfs_reset_dfs_prevchan(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + if (!tgt_dfs_is_pdev_5ghz(pdev)) + return; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is null"); + return; + } + + dfs_reset_dfs_prevchan(dfs); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_api.h b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_api.h new file mode 100644 index 0000000000000000000000000000000000000000..578a6fcaaf4ee0fd47e654ca0e96f4af6faa2738 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_api.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_LMAC_IF_API_H_ +#define _WLAN_LMAC_IF_API_H_ + +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" + +/** + * wlan_lmac_if_umac_rx_ops_register() - UMAC rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register umac RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_umac_rx_ops_register + (struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * wlan_lmac_if_set_umac_txops_registration_cb() - tx registration + * callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_set_umac_txops_registration_cb + (QDF_STATUS (*handler)(struct wlan_lmac_if_tx_ops *)); + + +/** + * wlan_lmac_if_get_mgmt_txrx_rx_ops() - retrieve the mgmt rx_ops + * @psoc: psoc context + * + * API to retrieve the mgmt rx_ops from the psoc context + * + * Return: mgmt_rx_ops pointer + */ +static inline struct wlan_lmac_if_mgmt_txrx_rx_ops * +wlan_lmac_if_get_mgmt_txrx_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.mgmt_txrx_rx_ops; +} + +/** + * wlan_lmac_if_get_dfs_rx_ops() - retrieve the dfs rx_ops + * @psoc: psoc context + * + * API to retrieve the dfs rx_ops from the psoc context + * + * Return: dfs_rx_ops pointer + */ +static inline struct wlan_lmac_if_dfs_rx_ops * +wlan_lmac_if_get_dfs_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.dfs_rx_ops; +} + +/** + * wlan_lmac_if_get_reg_rx_ops() - retrieve the reg rx_ops + * @psoc: psoc context + * + * API to retrieve the reg rx_ops from the psoc context + * + * Return: reg_rx_ops pointer + */ +static inline struct wlan_lmac_if_reg_rx_ops * +wlan_lmac_if_get_reg_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.reg_rx_ops; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * wlan_lmac_if_get_green_ap_rx_ops() - retrieve the green ap rx_ops + * @psoc: psoc context + * + * API to retrieve the dfs rx_ops from the psoc context + * + * Return: green_ap_rx_ops pointer + */ +static inline struct wlan_lmac_if_green_ap_rx_ops * +wlan_lmac_if_get_green_ap_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.green_ap_rx_ops; +} +#endif + +/** + * mgmt_txrx_get_nbuf() - retrieve nbuf from mgmt desc_id + * @pdev: pdev context + * @desc_id: mgmt desc_id + * + * API to retrieve the nbuf from mgmt desc_id + * + * Return: nbuf + */ +static inline qdf_nbuf_t +mgmt_txrx_get_nbuf(struct wlan_objmgr_pdev *pdev, uint32_t desc_id) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_nbuf_from_desc_id) + return mgmt_rx_ops->mgmt_txrx_get_nbuf_from_desc_id(pdev, + desc_id); + + return NULL; +} + +/** + * mgmt_txrx_tx_completion_handler() - mgmt tx completion handler + * @pdev: pdev context + * @desc_id: mgmt desc_id + * @status: tx status + * @params: tx params + * + * API to handle the tx completion for mgmt frames + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static inline QDF_STATUS +mgmt_txrx_tx_completion_handler(struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *params) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + qdf_nbuf_t nbuf; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_tx_completion_handler) + return mgmt_rx_ops->mgmt_tx_completion_handler(pdev, desc_id, + status, params); + + nbuf = mgmt_txrx_get_nbuf(pdev, desc_id); + if (nbuf) + qdf_nbuf_free(nbuf); + + return QDF_STATUS_E_NULL_VALUE; +} + +/** + * mgmt_txrx_rx_handler() - mgmt rx frame handler + * @psoc: psoc context + * @nbuf: nbuf + * @params: rx params + * + * API to receive mgmt frames + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static inline QDF_STATUS +mgmt_txrx_rx_handler(struct wlan_objmgr_psoc *psoc, qdf_nbuf_t nbuf, + void *params) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_rx_frame_handler) + return mgmt_rx_ops->mgmt_rx_frame_handler(psoc, nbuf, params); + + if (nbuf) + qdf_nbuf_free(nbuf); + + return QDF_STATUS_E_NULL_VALUE; +} + +/** + * mgmt_txrx_get_peer() - retrieve peer from mgmt desc_id + * @pdev: pdev context + * @desc_id: mgmt desc_id + * + * API to retrieve the peer from mgmt desc_id + * + * Return: objmgr peer pointer + */ +static inline struct wlan_objmgr_peer * +mgmt_txrx_get_peer(struct wlan_objmgr_pdev *pdev, uint32_t desc_id) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_peer_from_desc_id) + return mgmt_rx_ops->mgmt_txrx_get_peer_from_desc_id(pdev, + desc_id); + + return NULL; +} + +/** + * mgmt_txrx_get_vdev_id() - retrieve vdev_id from mgmt desc_id + * @pdev: pdev context + * @desc_id: mgmt desc_id + * + * API to retrieve the vdev_id from mgmt desc_id + * + * Return: vdev_id + */ +static inline uint8_t +mgmt_txrx_get_vdev_id(struct wlan_objmgr_pdev *pdev, uint32_t desc_id) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_vdev_id_from_desc_id) + return mgmt_rx_ops->mgmt_txrx_get_vdev_id_from_desc_id(pdev, + desc_id); + + return WLAN_UMAC_VDEV_ID_MAX; +} +/** + * mgmt_txrx_get_free_desc_count() - retrieve vdev_id from mgmt desc_id + * @pdev: pdev context + * + * API to get the free desc count mgmt desc pool + * + * Return: free_desc_count + */ +static inline uint32_t +mgmt_txrx_get_free_desc_count(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + uint32_t free_desc_count = WLAN_INVALID_MGMT_DESC_COUNT; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_free_desc_pool_count) + free_desc_count = mgmt_rx_ops->mgmt_txrx_get_free_desc_pool_count( + pdev); + + return free_desc_count; +} +#endif /* _WLAN_LMAC_IF_API_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_def.h b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_def.h new file mode 100644 index 0000000000000000000000000000000000000000..2f2b0150505d022e57086255191fa19efe64ff58 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_def.h @@ -0,0 +1,1726 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_LMAC_IF_DEF_H_ +#define _WLAN_LMAC_IF_DEF_H_ + +#include "qdf_status.h" +#include "wlan_objmgr_cmn.h" +#ifdef DFS_COMPONENT_ENABLE +#include +#endif +#include "wlan_mgmt_txrx_utils_api.h" +#include "wlan_scan_public_structs.h" + +#ifdef WLAN_ATF_ENABLE +#include "wlan_atf_utils_defs.h" +#endif +#ifdef QCA_SUPPORT_SON +#include +#endif +#ifdef WLAN_SA_API_ENABLE +#include "wlan_sa_api_utils_defs.h" +#endif +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include "wlan_spectral_public_structs.h" +#endif +#include + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +#include "wlan_crypto_global_def.h" +#endif + +#ifdef WLAN_CFR_ENABLE +#include "wlan_cfr_utils_api.h" +#endif + +#include +#include + +/* Number of dev type: Direct attach and Offload */ +#define MAX_DEV_TYPE 2 + +#ifdef WIFI_POS_CONVERGED +/* forward declarations */ +struct oem_data_req; +struct oem_data_rsp; +#endif /* WIFI_POS_CONVERGED */ + +#ifdef DIRECT_BUF_RX_ENABLE +/* forward declarations for direct buf rx */ +struct direct_buf_rx_data; +/* Forward declaration for module_ring_params */ +struct module_ring_params; +/*Forward declaration for dbr_module_config */ +struct dbr_module_config; +#endif + +#ifdef FEATURE_WLAN_TDLS +#include "wlan_tdls_public_structs.h" +#endif + +#ifdef QCA_SUPPORT_CP_STATS +#include +#endif /* QCA_SUPPORT_CP_STATS */ + +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS +/** + * struct wlan_lmac_if_cp_stats_tx_ops - defines southbound tx callbacks for + * control plane statistics component + * @cp_stats_attach: function pointer to register events from FW + * @cp_stats_detach: function pointer to unregister events from FW + */ +struct wlan_lmac_if_cp_stats_tx_ops { + QDF_STATUS (*cp_stats_attach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_detach)(struct wlan_objmgr_psoc *posc); +#ifdef CONFIG_MCL + void (*inc_wake_lock_stats)(uint32_t reason, + struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count); + QDF_STATUS (*send_req_stats)(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req); +#endif +}; + +/** + * struct wlan_lmac_if_cp_stats_rx_ops - defines southbound rx callbacks for + * control plane statistics component + * @cp_stats_rx_event_handler: function pointer to rx FW events + */ +struct wlan_lmac_if_cp_stats_rx_ops { + QDF_STATUS (*cp_stats_rx_event_handler)(struct wlan_objmgr_vdev *vdev); +#ifdef CONFIG_MCL + QDF_STATUS (*process_stats_event)(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev); +#endif +}; +#endif + +/** + * struct wlan_lmac_if_mgmt_txrx_tx_ops - structure of tx function + * pointers for mgmt txrx component + * @mgmt_tx_send: function pointer to transmit mgmt tx frame + * @beacon_send: function pointer to transmit beacon frame + * @fd_action_frame_send: function pointer to transmit FD action frame + * @tx_drain_nbuf_op: function pointer for any umac nbuf realted ops for + * pending mgmt frames cleanup + */ +struct wlan_lmac_if_mgmt_txrx_tx_ops { + QDF_STATUS (*mgmt_tx_send)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t nbuf, u_int32_t desc_id, + void *mgmt_tx_params); + QDF_STATUS (*beacon_send)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t nbuf); + QDF_STATUS (*fd_action_frame_send)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t nbuf); + void (*tx_drain_nbuf_op)(struct wlan_objmgr_pdev *pdev, + qdf_nbuf_t nbuf); +}; + +/** + * struct wlan_lmac_if_scan_tx_ops - south bound tx function pointers for scan + * @scan_start: function to start scan + * @scan_cancel: function to cancel scan + * @pno_start: start pno scan + * @pno_stop: stop pno scan + * @scan_reg_ev_handler: function to register for scan events + * @scan_unreg_ev_handler: function to unregister for scan events + * + * scan module uses these functions to avail ol/da lmac services + */ +struct wlan_lmac_if_scan_tx_ops { + QDF_STATUS (*scan_start)(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req); + QDF_STATUS (*scan_cancel)(struct wlan_objmgr_pdev *pdev, + struct scan_cancel_param *req); + QDF_STATUS (*pno_start)(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *req); + QDF_STATUS (*pno_stop)(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + QDF_STATUS (*scan_reg_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*scan_unreg_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*set_chan_list)(struct wlan_objmgr_pdev *pdev, void *arg); +}; + +/** + * struct wlan_lmac_if_ftm_tx_ops - south bound tx function pointers for ftm + * @ftm_attach: function to register event handlers with FW + * @ftm_detach: function to de-register event handlers with FW + * @ftm_cmd_send: function to send FTM commands to FW + * + * ftm module uses these functions to avail ol/da lmac services + */ +struct wlan_lmac_if_ftm_tx_ops { + QDF_STATUS (*ftm_attach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*ftm_detach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*ftm_cmd_send)(struct wlan_objmgr_pdev *pdev, + uint8_t *buf, uint32_t len, uint8_t mac_id); +}; + +enum wlan_mlme_cfg_id; +/** + * struct wlan_lmac_if_mlme_tx_ops - south bound tx function pointers for mlme + * @get_wifi_iface_id: function to get wifi interface id + * @vdev_mlme_attach: function to register events + * @vdev_mlme_detach: function to unregister events + * @vdev_create_send: function to send vdev create + * @vdev_start_send: function to send vdev start + * @vdev_up_send: function to send vdev up + * @vdev_delete_send: function to send vdev delete + * @vdev_stop_send: function to send vdev stop + * @vdev_down_send: function to send vdev down + * @vdev_set_param_send: function to send vdev parameter + * @vdev_set_tx_rx_decap_type: function to send vdev tx rx cap/decap type + * @vdev_set_nac_rssi_send: function to send nac rssi + * @vdev_set_neighbour_rx_cmd_send: function to send vdev neighbour rx cmd + * @vdev_sifs_trigger_send: function to send vdev sifs trigger + * @vdev_set_custom_aggr_size_cmd_send: function to send custom aggr size + * @vdev_config_ratemask_cmd_send: function to send ratemask + * @peer_flush_tids_send: function to flush peer tids + * @multiple_vdev_restart_req_cmd: function to send multiple vdev restart + * @beacon_send_cmd: function to send beacon + * @beacon_tmpl_send: function to send beacon template + * @vdev_bcn_miss_offload_send: function to send beacon miss offload + * @vdev_sta_ps_param_send: function to sent STA power save config + * @peer_delete_all_send: function to send vdev delete all peer request + * @psoc_vdev_rsp_timer_init: function to initialize psoc vdev response timer + * @psoc_vdev_rsp_timer_deinit: function to deinitialize psoc vdev rsp timer + * @psoc_vdev_rsp_timer_inuse: function to determine whether the vdev rsp + * timer is inuse or not + * @psoc_vdev_rsp_timer_mod: function to modify the time of vdev rsp timer + * @psoc_wake_lock_init: Initialize psoc wake lock for vdev response timer + * @psoc_wake_lock_deinit: De-Initialize psoc wake lock for vdev response timer + */ +struct wlan_lmac_if_mlme_tx_ops { + uint32_t (*get_wifi_iface_id) (struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*vdev_mlme_attach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*vdev_mlme_detach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*vdev_create_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_create_params *param); + QDF_STATUS (*vdev_start_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_start_params *param); + QDF_STATUS (*vdev_up_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_up_params *param); + QDF_STATUS (*vdev_delete_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_delete_params *param); + QDF_STATUS (*vdev_stop_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_stop_params *param); + QDF_STATUS (*vdev_down_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_down_params *param); + QDF_STATUS (*vdev_set_param_send)(struct wlan_objmgr_vdev *vdev, + struct vdev_set_params *param); + QDF_STATUS (*vdev_set_tx_rx_decap_type)(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t value); + QDF_STATUS (*vdev_set_nac_rssi_send)( + struct wlan_objmgr_vdev *vdev, + struct vdev_scan_nac_rssi_params *param); + QDF_STATUS (*vdev_set_neighbour_rx_cmd_send)( + struct wlan_objmgr_vdev *vdev, + struct set_neighbour_rx_params *param, + uint8_t *mac); + QDF_STATUS (*vdev_sifs_trigger_send)( + struct wlan_objmgr_vdev *vdev, + struct sifs_trigger_param *param); + QDF_STATUS (*vdev_set_custom_aggr_size_cmd_send)( + struct wlan_objmgr_vdev *vdev, + struct set_custom_aggr_size_params *param); + QDF_STATUS (*vdev_config_ratemask_cmd_send)( + struct wlan_objmgr_vdev *vdev, + struct config_ratemask_params *param); + QDF_STATUS (*peer_flush_tids_send)( + struct wlan_objmgr_vdev *vdev, + struct peer_flush_params *param); + QDF_STATUS (*multiple_vdev_restart_req_cmd)( + struct wlan_objmgr_pdev *pdev, + struct multiple_vdev_restart_params *param); + QDF_STATUS (*beacon_cmd_send)(struct wlan_objmgr_vdev *vdev, + struct beacon_params *param); + QDF_STATUS (*beacon_tmpl_send)(struct wlan_objmgr_vdev *vdev, + struct beacon_tmpl_params *param); +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) + QDF_STATUS (*vdev_fils_enable_send)(struct wlan_objmgr_vdev *vdev, + struct config_fils_params *param); +#endif + QDF_STATUS (*vdev_bcn_miss_offload_send)(struct wlan_objmgr_vdev *vdev); + QDF_STATUS (*vdev_sta_ps_param_send)(struct wlan_objmgr_vdev *vdev, + struct sta_ps_params *param); + QDF_STATUS (*peer_delete_all_send)( + struct wlan_objmgr_vdev *vdev, + struct peer_delete_all_params *param); + QDF_STATUS (*psoc_vdev_rsp_timer_init)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + void (*psoc_vdev_rsp_timer_deinit)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + QDF_STATUS (*psoc_vdev_rsp_timer_inuse)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + QDF_STATUS (*psoc_vdev_rsp_timer_mod)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + int mseconds); + void (*psoc_wake_lock_init)( + struct wlan_objmgr_psoc *psoc); + void (*psoc_wake_lock_deinit)( + struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*vdev_mgr_rsp_timer_stop)( + struct wlan_objmgr_psoc *psoc, + struct vdev_response_timer *vdev_rsp, + enum wlan_vdev_mgr_tgt_if_rsp_bit clear_bit); +}; + +/** + * struct wlan_lmac_if_scan_rx_ops - south bound rx function pointers for scan + * @scan_ev_handler: scan event handler + * @scan_set_max_active_scans: set max active scans allowed + * + * lmac modules uses this API to post scan events to scan module + */ +struct wlan_lmac_if_scan_rx_ops { + QDF_STATUS (*scan_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct scan_event_info *event_info); + QDF_STATUS (*scan_set_max_active_scans)(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans); +}; + +#ifdef CONVERGED_P2P_ENABLE + +/* forward declarations for p2p tx ops */ +struct p2p_ps_config; +struct p2p_lo_start; +struct p2p_set_mac_filter; + +/** + * struct wlan_lmac_if_p2p_tx_ops - structure of tx function pointers + * for P2P component + * @set_ps: function pointer to set power save + * @lo_start: function pointer to start listen offload + * @lo_stop: function pointer to stop listen offload + * @set_noa: function pointer to disable/enable NOA + * @reg_lo_ev_handler: function pointer to register lo event handler + * @reg_noa_ev_handler: function pointer to register noa event handler + * @unreg_lo_ev_handler: function pointer to unregister lo event handler + * @unreg_noa_ev_handler:function pointer to unregister noa event handler + * @reg_mac_addr_rx_filter_handler: function pointer to register/unregister + * set mac addr status event callback. + * @set_mac_addr_rx_filter_cmd: function pointer to set mac addr rx filter + */ +struct wlan_lmac_if_p2p_tx_ops { + QDF_STATUS (*set_ps)(struct wlan_objmgr_psoc *psoc, + struct p2p_ps_config *ps_config); +#ifdef FEATURE_P2P_LISTEN_OFFLOAD + QDF_STATUS (*lo_start)(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_start *lo_start); + QDF_STATUS (*lo_stop)(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id); + QDF_STATUS (*reg_lo_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*unreg_lo_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); +#endif + QDF_STATUS (*set_noa)(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, bool disable_noa); + QDF_STATUS (*reg_noa_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*unreg_noa_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*reg_mac_addr_rx_filter_handler)( + struct wlan_objmgr_psoc *psoc, bool reg); + QDF_STATUS (*set_mac_addr_rx_filter_cmd)( + struct wlan_objmgr_psoc *psoc, + struct p2p_set_mac_filter *param); +}; +#endif + +#ifdef WLAN_ATF_ENABLE + +/** + * struct wlan_lmac_if_atf_tx_ops - ATF specific tx function pointers + * @atf_node_unblock: Resume node + * @atf_set_enable_disable: Set atf enable/disable + * @atf_tokens_used: Get used atf tokens + * @atf_get_unused_txtoken: Get unused atf tokens + * @atf_peer_resume: Resume peer + * @atf_tokens_unassigned: Set unassigned atf tockens + * @atf_capable_peer: Set atf state change + * @atf_airtime_estimate: Get estimated airtime + * @atf_debug_peerstate: Get peer state + * @atf_enable_disable: Set atf peer stats enable/disable + * @atf_ssid_sched_policy: Set ssid schedule policy + * @atf_set: Set atf + * @atf_set_grouping: Set atf grouping + * @atf_set_group_ac: Set atf Group AC + * @atf_send_peer_request: Send peer requests + * @atf_set_bwf: Set bandwidth fairness + * @atf_peer_buf_held: Get buffer held + * @atf_get_peer_airtime: Get peer airtime + * @atf_get_chbusyper: Get channel busy + * @atf_open: ATF open + * @atf_register_event_handler ATF register wmi event handlers + * @atf_unregister_event_handler ATF unregister wmi event handlers + */ +struct wlan_lmac_if_atf_tx_ops { + void (*atf_node_unblock)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + void (*atf_set_enable_disable)(struct wlan_objmgr_pdev *pdev, + uint8_t value); + uint8_t (*atf_tokens_used)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + void (*atf_get_unused_txtoken)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + int *unused_token); + void (*atf_peer_resume)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + void (*atf_tokens_unassigned)(struct wlan_objmgr_pdev *pdev, + uint32_t tokens_unassigned); + void (*atf_capable_peer)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + uint8_t val, uint8_t atfstate_change); + uint32_t (*atf_airtime_estimate)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + uint32_t tput, + uint32_t *possible_tput); + uint32_t (*atf_debug_peerstate)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct atf_peerstate *peerstate); + int32_t (*atf_enable_disable)(struct wlan_objmgr_vdev *vdev, + uint8_t value); + int32_t (*atf_ssid_sched_policy)(struct wlan_objmgr_vdev *vdev, + uint8_t value); + int32_t (*atf_set)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_req *atf_req, + uint8_t atf_tput_based); + int32_t (*atf_set_grouping)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_ssid_group_req *atf_grp_req, + uint8_t atf_tput_based); + int32_t (*atf_set_group_ac)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_group_wmm_ac_req *atf_acreq, + uint8_t atf_tput_based); + int32_t (*atf_send_peer_request)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_peer_ext_request *atfr, + uint8_t atf_tput_based); + int32_t (*atf_set_bwf)(struct wlan_objmgr_pdev *pdev, + struct pdev_bwf_req *bwf_req); + uint32_t (*atf_peer_buf_held)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_get_peer_airtime)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_get_chbusyper)(struct wlan_objmgr_pdev *pdev); + void (*atf_open)(struct wlan_objmgr_psoc *psoc); + void (*atf_register_event_handler)(struct wlan_objmgr_psoc *psoc); + void (*atf_unregister_event_handler)(struct wlan_objmgr_psoc *psoc); +}; +#endif + +#ifdef WLAN_SUPPORT_FILS +/** + * struct wlan_lmac_if_fd_tx_ops - FILS Discovery specific Tx function pointers + * @fd_vdev_config_fils: Enable and configure FILS Discovery + * @fd_register_event_handler: Register swfda WMI event handler + * @fd_unregister_event_handler: Un-register swfda WMI event handler + * @fd_offload_tmpl_send: Send FD template to FW + */ +struct wlan_lmac_if_fd_tx_ops { + QDF_STATUS (*fd_vdev_config_fils)(struct wlan_objmgr_vdev *vdev, + uint32_t fd_period); + void (*fd_register_event_handler)(struct wlan_objmgr_psoc *psoc); + void (*fd_unregister_event_handler)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*fd_offload_tmpl_send)(struct wlan_objmgr_pdev *pdev, + struct fils_discovery_tmpl_params *fd_tmpl_param); +}; +#endif + +#ifdef WLAN_SA_API_ENABLE + +/** + * struct wlan_lmac_if_sa_api_tx_ops - SA API specific tx function pointers + */ + +struct wlan_lmac_if_sa_api_tx_ops { + void (*sa_api_register_event_handler)(struct wlan_objmgr_psoc *psoc); + void (*sa_api_unregister_event_handler)(struct wlan_objmgr_psoc *posc); + void (*sa_api_enable_sa) (struct wlan_objmgr_pdev *pdev, + uint32_t enable, uint32_t mode, uint32_t rx_antenna); + void (*sa_api_set_rx_antenna) (struct wlan_objmgr_pdev *pdev, + uint32_t antenna); + void (*sa_api_set_tx_antenna) (struct wlan_objmgr_peer *peer, + uint32_t *antenna_array); + void (*sa_api_set_tx_default_antenna) (struct wlan_objmgr_pdev *pdev, + u_int32_t antenna); + void (*sa_api_set_training_info) (struct wlan_objmgr_peer *peer, + uint32_t *rate_array, + uint32_t *antenna_array, + uint32_t numpkts); + void (*sa_api_prepare_rateset)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct sa_rate_info *rate_info); + void (*sa_api_set_node_config_ops) (struct wlan_objmgr_peer *peer, + uint32_t cmd_id, uint16_t args_count, + u_int32_t args_arr[]); +}; + +#endif + +#ifdef WLAN_CFR_ENABLE +/** + * struct wlan_lmac_if_cfr_tx_ops - CFR specific tx function pointers + * @cfr_init_pdev: Initialize CFR + * @cfr_deinit_pdev: De-initialize CFR + * @cfr_enable_cfr_timer: Function to enable CFR timer + * @cfr_start_capture: Function to start CFR capture + * @cfr_stop_capture: Function to stop CFR capture + * @cfr_config_rcc: Function to set the Repetitive channel capture params + * @cfr_start_lut_timer: Function to start timer to flush aged-out LUT entries + * @cfr_stop_lut_timer: Function to stop timer to flush aged-out LUT entries + * @cfr_default_ta_ra_cfg: Function to configure default values for TA_RA mode + * @cfr_dump_lut_enh: Function to dump LUT entries + * @cfr_rx_tlv_process: Function to process PPDU status TLVs + * @cfr_update_global_cfg: Function to update the global config for + * a successful commit session. + */ +struct wlan_lmac_if_cfr_tx_ops { + int (*cfr_init_pdev)(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + int (*cfr_deinit_pdev)(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + int (*cfr_enable_cfr_timer)(struct wlan_objmgr_pdev *pdev, + uint32_t cfr_timer); + int (*cfr_start_capture)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct cfr_capture_params *params); + int (*cfr_stop_capture)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); +#ifdef WLAN_ENH_CFR_ENABLE + QDF_STATUS (*cfr_config_rcc)(struct wlan_objmgr_pdev *pdev, + struct cfr_rcc_param *params); + void (*cfr_start_lut_timer)(struct wlan_objmgr_pdev *pdev); + void (*cfr_stop_lut_timer)(struct wlan_objmgr_pdev *pdev); + void (*cfr_default_ta_ra_cfg)(struct cfr_rcc_param *params, + bool allvalid, uint16_t reset_cfg); + void (*cfr_dump_lut_enh)(struct wlan_objmgr_pdev *pdev); + void (*cfr_rx_tlv_process)(struct wlan_objmgr_pdev *pdev, void *nbuf); + void (*cfr_update_global_cfg)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*cfr_subscribe_ppdu_desc)(struct wlan_objmgr_pdev *pdev, + bool is_subscribe); +#endif +}; +#endif /* WLAN_CFR_ENABLE */ + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +struct wmi_spectral_cmd_ops; +/** + * struct wlan_lmac_if_sptrl_tx_ops - Spectral south bound Tx operations + * @sptrlto_spectral_init: Initialize LMAC/target_if Spectral + * @sptrlto_spectral_deinit: De-initialize LMAC/target_if Spectral + * @sptrlto_set_spectral_config: Set Spectral configuration + * @sptrlto_get_spectral_config: Get Spectral configuration + * @sptrlto_start_spectral_scan: Start Spectral Scan + * @sptrlto_stop_spectral_scan: Stop Spectral Scan + * @sptrlto_is_spectral_active: Get whether Spectral is active + * @sptrlto_is_spectral_enabled: Get whether Spectral is enabled + * @sptrlto_set_icm_active: Set whether ICM is active or inactive + * @sptrlto_get_icm_active: Get whether ICM is active or inactive + * @sptrlto_get_nominal_nf: Get Nominal Noise Floor for the current + * frequency band + * @sptrlto_set_debug_level: Set Spectral debug level + * @sptrlto_get_debug_level: Get Spectral debug level + * @sptrlto_get_chaninfo: Get channel information + * @sptrlto_clear_chaninfo: Clear channel information + * @sptrlto_get_spectral_capinfo: Get Spectral capability information + * @sptrlto_get_spectral_diagstats: Get Spectral diagnostic statistics + * @sptrlto_register_netlink_cb: Register Spectral Netlink callbacks + * @sptrlto_use_nl_bcast: Get whether to use Netlink broadcast/unicast + * @sptrlto_deregister_netlink_cb: De-register Spectral Netlink callbacks + * @sptrlto_process_spectral_report: Process spectral report + * @sptrlto_set_dma_debug: Set DMA debug for Spectral + * @sptrlto_direct_dma_support: Whether Direct-DMA is supported on this radio + * @sptrlto_check_and_do_dbr_ring_debug: Start/Stop Spectral ring debug based + * on the previous state + * @sptrlto_check_and_do_dbr_buff_debug: Start/Stop Spectral buffer debug based + * on the previous state + **/ +struct wlan_lmac_if_sptrl_tx_ops { + void *(*sptrlto_pdev_spectral_init)(struct wlan_objmgr_pdev *pdev); + void (*sptrlto_pdev_spectral_deinit)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*sptrlto_set_spectral_config) + (struct wlan_objmgr_pdev *pdev, + const u_int32_t threshtype, + const u_int32_t value, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + QDF_STATUS (*sptrlto_get_spectral_config) + (struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config, + enum spectral_scan_mode smode); + QDF_STATUS (*sptrlto_start_spectral_scan) + (struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + QDF_STATUS (*sptrlto_stop_spectral_scan) + (struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode, + enum spectral_cp_error_code *err); + bool (*sptrlto_is_spectral_active)(struct wlan_objmgr_pdev *pdev, + const enum spectral_scan_mode smode); + bool (*sptrlto_is_spectral_enabled)(struct wlan_objmgr_pdev *pdev, + enum spectral_scan_mode smode); + QDF_STATUS (*sptrlto_set_debug_level)(struct wlan_objmgr_pdev *pdev, + u_int32_t debug_level); + u_int32_t (*sptrlto_get_debug_level)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*sptrlto_get_spectral_capinfo) + (struct wlan_objmgr_pdev *pdev, + struct spectral_caps *scaps); + QDF_STATUS (*sptrlto_get_spectral_diagstats) + (struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *stats); + void (*sptrlto_register_wmi_spectral_cmd_ops)( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + void (*sptrlto_register_netlink_cb)( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb); + bool (*sptrlto_use_nl_bcast)(struct wlan_objmgr_pdev *pdev); + void (*sptrlto_deregister_netlink_cb)(struct wlan_objmgr_pdev *pdev); + int (*sptrlto_process_spectral_report)( + struct wlan_objmgr_pdev *pdev, + void *payload); + QDF_STATUS (*sptrlto_set_dma_debug)( + struct wlan_objmgr_pdev *pdev, + enum spectral_dma_debug dma_debug_type, + bool dma_debug_enable); + bool (*sptrlto_direct_dma_support)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*sptrlto_check_and_do_dbr_ring_debug)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*sptrlto_check_and_do_dbr_buff_debug)( + struct wlan_objmgr_pdev *pdev); + +}; +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +#ifdef WIFI_POS_CONVERGED +/* + * struct wlan_lmac_if_wifi_pos_tx_ops - structure of firmware tx function + * pointers for wifi_pos component + * @data_req_tx: function pointer to send wifi_pos req to firmware + * @wifi_pos_register_events: function pointer to register wifi_pos events + * @wifi_pos_deregister_events: function pointer to deregister wifi_pos events + * @wifi_pos_get_vht_ch_width: Function pointer to get max supported bw by FW + */ +struct wlan_lmac_if_wifi_pos_tx_ops { + QDF_STATUS (*data_req_tx)(struct wlan_objmgr_pdev *pdev, + struct oem_data_req *req); + QDF_STATUS (*wifi_pos_register_events)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*wifi_pos_deregister_events)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*wifi_pos_get_vht_ch_width)(struct wlan_objmgr_psoc *psoc, + enum phy_ch_width *ch_width); +}; +#endif + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * struct wlan_lmac_if_direct_buf_rx_tx_ops - structire of direct buf rx txops + * @direct_buf_rx_module_register: Registration API callback for modules + * to register with direct buf rx framework + * @direct_buf_rx_module_unregister: Unregistration API to clean up module + * specific resources in DBR + * @direct_buf_rx_register_events: Registration of WMI events for direct + * buffer rx framework + * @direct_buf_rx_unregister_events: Unregistraton of WMI events for direct + * buffer rx framework + * @direct_buf_rx_print_ring_stat: Print ring status per module per pdev + * + * @direct_buf_rx_get_ring_params: Get ring parameters for module_id + * @direct_buf_rx_start_ring_debug: Start DBR ring debug + * @direct_buf_rx_stop_ring_debug: Stop DBR ring debug + * @direct_buf_rx_start_buffer_poisoning: Start DBR buffer poisoning + * @direct_buf_rx_stop_buffer_poisoning: Stop DBR buffer poisoning + */ +struct wlan_lmac_if_direct_buf_rx_tx_ops { + QDF_STATUS (*direct_buf_rx_module_register)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + struct dbr_module_config *dbr_config, + bool (*dbr_rsp_handler) + (struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data)); + QDF_STATUS (*direct_buf_rx_module_unregister)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id); + QDF_STATUS (*direct_buf_rx_register_events)( + struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*direct_buf_rx_unregister_events)( + struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*direct_buf_rx_print_ring_stat)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*direct_buf_rx_get_ring_params) + (struct wlan_objmgr_pdev *pdev, + struct module_ring_params *param, + uint8_t module_id, uint8_t srng_id); + QDF_STATUS (*direct_buf_rx_start_ring_debug)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + uint32_t num_ring_debug_entries); + QDF_STATUS (*direct_buf_rx_stop_ring_debug)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id); + QDF_STATUS (*direct_buf_rx_start_buffer_poisoning)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, uint32_t value); + QDF_STATUS (*direct_buf_rx_stop_buffer_poisoning)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id); +}; +#endif /* DIRECT_BUF_RX_ENABLE */ + +#ifdef FEATURE_WLAN_TDLS +/* fwd declarations for tdls tx ops */ +struct tdls_info; +struct tdls_peer_update_state; +struct tdls_channel_switch_params; +struct sta_uapsd_trig_params; +/** + * struct wlan_lmac_if_tdls_tx_ops - south bound tx function pointers for tdls + * @update_fw_state: function to update tdls firmware state + * @update_peer_state: function to update tdls peer state + * @set_offchan_mode: function to set tdls offchannel mode + * @tdls_reg_ev_handler: function to register for tdls events + * @tdls_unreg_ev_handler: function to unregister for tdls events + * + * tdls module uses these functions to avail ol/da lmac services + */ +struct wlan_lmac_if_tdls_tx_ops { + QDF_STATUS (*update_fw_state)(struct wlan_objmgr_psoc *psoc, + struct tdls_info *req); + QDF_STATUS (*update_peer_state)(struct wlan_objmgr_psoc *psoc, + struct tdls_peer_update_state *param); + QDF_STATUS (*set_offchan_mode)(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *param); + QDF_STATUS (*tdls_reg_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*tdls_unreg_ev_handler) (struct wlan_objmgr_psoc *psoc, + void *arg); +}; + +/* fwd declarations for tdls rx ops */ +struct tdls_event_info; +/** + * struct wlan_lmac_if_tdls_rx_ops - south bound rx function pointers for tdls + * @tdls_ev_handler: function to handler tdls event + * + * lmac modules uses this API to post scan events to tdls module + */ +struct wlan_lmac_if_tdls_rx_ops { + QDF_STATUS (*tdls_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct tdls_event_info *info); +}; +#endif + +/** + * struct wlan_lmac_if_ftm_rx_ops - south bound rx function pointers for FTM + * @ftm_ev_handler: function to handle FTM event + * + * lmac modules uses this API to post FTM events to FTM module + */ +struct wlan_lmac_if_ftm_rx_ops { + QDF_STATUS (*ftm_ev_handler)(struct wlan_objmgr_pdev *pdev, + uint8_t *event_buf, uint32_t len); +}; + +/** + * struct wlan_lmac_reg_if_tx_ops - structure of tx function + * pointers for regulatory component + * @register_master_handler: pointer to register event handler + * @unregister_master_handler: pointer to unregister event handler + * @register_11d_new_cc_handler: pointer to register 11d cc event handler + * @unregister_11d_new_cc_handler: pointer to unregister 11d cc event handler + * @send_ctl_info: call-back function to send CTL info to firmware + */ +struct wlan_lmac_if_reg_tx_ops { + QDF_STATUS (*register_master_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*unregister_master_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + + QDF_STATUS (*set_country_code)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*fill_umac_legacy_chanlist)(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *cur_chan_list); + QDF_STATUS (*register_11d_new_cc_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*unregister_11d_new_cc_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*start_11d_scan)(struct wlan_objmgr_psoc *psoc, + struct reg_start_11d_scan_req *reg_start_11d_scan_req); + QDF_STATUS (*stop_11d_scan)(struct wlan_objmgr_psoc *psoc, + struct reg_stop_11d_scan_req *reg_stop_11d_scan_req); + bool (*is_there_serv_ready_extn)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*set_user_country_code)(struct wlan_objmgr_psoc *psoc, + uint8_t pdev_id, + struct cc_regdmn_s *rd); + QDF_STATUS (*set_country_failed)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*register_ch_avoid_event_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*unregister_ch_avoid_event_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*send_ctl_info)(struct wlan_objmgr_psoc *psoc, + struct reg_ctl_params *params); +}; + +/** + * struct wlan_lmac_if_dfs_tx_ops - Function pointer to call offload/lmac + * functions from DFS module. + * @dfs_enable: Enable DFS. + * @dfs_get_caps: Get DFS capabilities. + * @dfs_disable: Disable DFS + * @dfs_gettsf64: Get tsf64 value. + * @dfs_set_use_cac_prssi: Set use_cac_prssi value. + * @dfs_get_dfsdomain: Get DFS domain. + * @dfs_is_countryCode_CHINA: Check is country code CHINA. + * @dfs_get_thresholds: Get thresholds. + * @dfs_get_ext_busy: Get ext_busy. + * @dfs_get_target_type: Get target type. + * @dfs_is_countryCode_KOREA_ROC3: Check is county code Korea. + * @dfs_get_ah_devid: Get ah devid. + * @dfs_get_phymode_info: Get phymode info. + * @dfs_reg_ev_handler: Register dfs event handler. + * @dfs_process_emulate_bang_radar_cmd: Process emulate bang radar test command. + * @dfs_agile_ch_cfg_cmd: Send Agile Channel Configuration command + * @dfs_ocac_abort_cmd: Send Off-Channel CAC abort command. + * @dfs_is_pdev_5ghz: Check if the given pdev is 5GHz. + * @dfs_set_phyerr_filter_offload: Config phyerr filter offload. + * @dfs_send_offload_enable_cmd: Send dfs offload enable command to fw. + * @dfs_host_dfs_check_support: To check Host DFS confirmation feature + * support. + * @dfs_send_avg_radar_params_to_fw: Send average radar parameters to FW. + * @dfs_send_usenol_pdev_param: Send usenol pdev param to FW. + * @dfs_send_subchan_marking_pdev_param: Send subchan marking pdev param to FW. + * @dfs_check_mode_switch_state: Find if HW mode switch is in progress. + */ + +struct wlan_lmac_if_dfs_tx_ops { + QDF_STATUS (*dfs_enable)(struct wlan_objmgr_pdev *pdev, + int *is_fastclk, + struct wlan_dfs_phyerr_param *param, + uint32_t dfsdomain); + QDF_STATUS (*dfs_get_caps)(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps); + QDF_STATUS (*dfs_disable)(struct wlan_objmgr_pdev *pdev, + int no_cac); + QDF_STATUS (*dfs_gettsf64)(struct wlan_objmgr_pdev *pdev, + uint64_t *tsf64); + QDF_STATUS (*dfs_set_use_cac_prssi)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_get_thresholds)(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_phyerr_param *param); + QDF_STATUS (*dfs_get_ext_busy)(struct wlan_objmgr_pdev *pdev, + int *dfs_ext_chan_busy); + QDF_STATUS (*dfs_get_target_type)(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type); + QDF_STATUS (*dfs_get_ah_devid)(struct wlan_objmgr_pdev *pdev, + uint16_t *devid); + QDF_STATUS (*dfs_get_phymode_info)(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint32_t *mode_info, + bool is_2gvht_en); + QDF_STATUS (*dfs_reg_ev_handler)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*dfs_process_emulate_bang_radar_cmd)( + struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); + QDF_STATUS (*dfs_agile_ch_cfg_cmd)( + struct wlan_objmgr_pdev *pdev, + struct dfs_agile_cac_params *adfs_params); + QDF_STATUS (*dfs_ocac_abort_cmd)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_is_pdev_5ghz)(struct wlan_objmgr_pdev *pdev, + bool *is_5ghz); + QDF_STATUS (*dfs_set_phyerr_filter_offload)( + struct wlan_objmgr_pdev *pdev, + bool dfs_phyerr_filter_offload); + bool (*dfs_is_tgt_offload)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*dfs_send_offload_enable_cmd)( + struct wlan_objmgr_pdev *pdev, + bool enable); + QDF_STATUS (*dfs_host_dfs_check_support)(struct wlan_objmgr_pdev *pdev, + bool *enabled); + QDF_STATUS (*dfs_send_avg_radar_params_to_fw)( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params); + QDF_STATUS (*dfs_send_usenol_pdev_param)(struct wlan_objmgr_pdev *pdev, + bool usenol); + QDF_STATUS (*dfs_send_subchan_marking_pdev_param)( + struct wlan_objmgr_pdev *pdev, + bool subchanmark); + QDF_STATUS (*dfs_check_mode_switch_state)( + struct wlan_objmgr_pdev *pdev, + bool *is_hw_mode_switch_in_progress); +}; + +/** + * struct wlan_lmac_if_target_tx_ops - Function pointers to call target + * functions from other modules. + * @tgt_is_tgt_type_ar900b: To check AR900B target type. + * @tgt_is_tgt_type_ipq4019: To check IPQ4019 target type. + * @tgt_is_tgt_type_qca9984: To check QCA9984 target type. + * @tgt_is_tgt_type_qca9888: To check QCA9888 target type. + * @tgt_is_tgt_type_adrastea: To check QCS40X target type. + * @tgt_get_tgt_type: Get target type + * @tgt_get_tgt_version: Get target version + * @tgt_get_tgt_revision: Get target revision + */ +struct wlan_lmac_if_target_tx_ops { + bool (*tgt_is_tgt_type_ar900b)(uint32_t); + bool (*tgt_is_tgt_type_ipq4019)(uint32_t); + bool (*tgt_is_tgt_type_qca9984)(uint32_t); + bool (*tgt_is_tgt_type_qca9888)(uint32_t); + bool (*tgt_is_tgt_type_adrastea)(uint32_t); + uint32_t (*tgt_get_tgt_type)(struct wlan_objmgr_psoc *psoc); + uint32_t (*tgt_get_tgt_version)(struct wlan_objmgr_psoc *psoc); + uint32_t (*tgt_get_tgt_revision)(struct wlan_objmgr_psoc *psoc); +}; + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +/** + * struct wlan_lmac_if_offchan_txrx_ops - Function pointers to check target + * capabilities related to offchan txrx. + * @offchan_data_tid_support: To check if target supports separate tid for + * offchan data tx. + */ +struct wlan_lmac_if_offchan_txrx_ops { + bool (*offchan_data_tid_support)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +struct wlan_green_ap_egap_params; +/** + * struct wlan_lmac_if_green_ap_tx_ops - structure of tx function + * pointers for green ap component + * @enable_egap: function pointer to send enable egap indication to fw + * @ps_on_off_send: function pointer to send enable/disable green ap ps to fw + */ +struct wlan_lmac_if_green_ap_tx_ops { + QDF_STATUS (*enable_egap)(struct wlan_objmgr_pdev *pdev, + struct wlan_green_ap_egap_params *egap_params); + QDF_STATUS (*ps_on_off_send)(struct wlan_objmgr_pdev *pdev, + bool value, uint8_t pdev_id); + QDF_STATUS (*reset_dev)(struct wlan_objmgr_pdev *pdev); + uint16_t (*get_current_channel)(struct wlan_objmgr_pdev *pdev); + uint64_t (*get_current_channel_flags)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*get_capab)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +#ifdef FEATURE_COEX +struct coex_config_params; + +/** + * struct wlan_lmac_if_coex_tx_ops - south bound tx function pointers for coex + * @coex_config_send: function pointer to send coex config to fw + */ +struct wlan_lmac_if_coex_tx_ops { + QDF_STATUS (*coex_config_send)(struct wlan_objmgr_pdev *pdev, + struct coex_config_params *param); +}; +#endif + +#ifdef WLAN_FEATURE_GPIO_CFG +struct gpio_config_params; +struct gpio_output_params; + +/** + * struct wlan_lmac_if_gpio_tx_ops - south bound tx function pointers for gpio + * @set_gpio_config: function pointert to send gpio config to fw + * @set_gpio_output: function pointert to send gpio output to fw + */ +struct wlan_lmac_if_gpio_tx_ops { + QDF_STATUS (*set_gpio_config)(struct wlan_objmgr_psoc *psoc, + struct gpio_config_params *param); + QDF_STATUS (*set_gpio_output)(struct wlan_objmgr_psoc *psoc, + struct gpio_output_params *param); +}; +#endif + +/** + * struct wlan_lmac_if_tx_ops - south bound tx function pointers + * @mgmt_txrx_tx_ops: mgmt txrx tx ops + * @scan: scan tx ops + * @dfs_tx_ops: dfs tx ops. + * @green_ap_tx_ops: green_ap tx_ops + * @cp_stats_tx_ops: cp stats tx_ops + * @coex_ops: coex tx_ops + * @gpio_ops: gpio tx_ops + * + * Callback function tabled to be registered with umac. + * umac will use the functional table to send events/frames to wmi + */ + +struct wlan_lmac_if_tx_ops { + /* Components to declare function pointers required by the module + * in component specific structure. + * The component specific ops structure can be declared in this file + * only + */ + struct wlan_lmac_if_mgmt_txrx_tx_ops mgmt_txrx_tx_ops; + struct wlan_lmac_if_scan_tx_ops scan; +#ifdef CONVERGED_P2P_ENABLE + struct wlan_lmac_if_p2p_tx_ops p2p; +#endif +#ifdef QCA_SUPPORT_SON + struct wlan_lmac_if_son_tx_ops son_tx_ops; +#endif + +#ifdef WLAN_ATF_ENABLE + struct wlan_lmac_if_atf_tx_ops atf_tx_ops; +#endif +#ifdef QCA_SUPPORT_CP_STATS + struct wlan_lmac_if_cp_stats_tx_ops cp_stats_tx_ops; +#endif +#ifdef WLAN_SA_API_ENABLE + struct wlan_lmac_if_sa_api_tx_ops sa_api_tx_ops; +#endif + +#ifdef WLAN_CFR_ENABLE + struct wlan_lmac_if_cfr_tx_ops cfr_tx_ops; +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + struct wlan_lmac_if_sptrl_tx_ops sptrl_tx_ops; +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED + struct wlan_lmac_if_crypto_tx_ops crypto_tx_ops; +#endif + +#ifdef WIFI_POS_CONVERGED + struct wlan_lmac_if_wifi_pos_tx_ops wifi_pos_tx_ops; +#endif + struct wlan_lmac_if_reg_tx_ops reg_ops; + struct wlan_lmac_if_dfs_tx_ops dfs_tx_ops; + +#ifdef FEATURE_WLAN_TDLS + struct wlan_lmac_if_tdls_tx_ops tdls_tx_ops; +#endif + +#ifdef WLAN_SUPPORT_FILS + struct wlan_lmac_if_fd_tx_ops fd_tx_ops; +#endif + struct wlan_lmac_if_mlme_tx_ops mops; + struct wlan_lmac_if_target_tx_ops target_tx_ops; + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE + struct wlan_lmac_if_offchan_txrx_ops offchan_txrx_ops; +#endif + +#ifdef DIRECT_BUF_RX_ENABLE + struct wlan_lmac_if_direct_buf_rx_tx_ops dbr_tx_ops; +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP + struct wlan_lmac_if_green_ap_tx_ops green_ap_tx_ops; +#endif + + struct wlan_lmac_if_ftm_tx_ops ftm_tx_ops; + +#ifdef FEATURE_COEX + struct wlan_lmac_if_coex_tx_ops coex_ops; +#endif + +#ifdef WLAN_FEATURE_GPIO_CFG + struct wlan_lmac_if_gpio_tx_ops gpio_ops; +#endif +}; + +/** + * struct wlan_lmac_if_mgmt_txrx_rx_ops - structure of rx function + * pointers for mgmt txrx component + * @mgmt_tx_completion_handler: function pointer to give tx completions + * to mgmt txrx comp. + * @mgmt_rx_frame_handler: function pointer to give rx frame to mgmt txrx comp. + * @mgmt_txrx_get_nbuf_from_desc_id: function pointer to get nbuf from desc id + * @mgmt_txrx_get_peer_from_desc_id: function pointer to get peer from desc id + * @mgmt_txrx_get_vdev_id_from_desc_id: function pointer to get vdev id from + * desc id + */ +struct wlan_lmac_if_mgmt_txrx_rx_ops { + QDF_STATUS (*mgmt_tx_completion_handler)( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *tx_compl_params); + QDF_STATUS (*mgmt_rx_frame_handler)( + struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params); + qdf_nbuf_t (*mgmt_txrx_get_nbuf_from_desc_id)( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + struct wlan_objmgr_peer * (*mgmt_txrx_get_peer_from_desc_id)( + struct wlan_objmgr_pdev *pdev, uint32_t desc_id); + uint8_t (*mgmt_txrx_get_vdev_id_from_desc_id)( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + uint32_t (*mgmt_txrx_get_free_desc_pool_count)( + struct wlan_objmgr_pdev *pdev); +}; + +struct wlan_lmac_if_reg_rx_ops { + QDF_STATUS (*master_list_handler)(struct cur_regulatory_info + *reg_info); + QDF_STATUS (*reg_11d_new_cc_handler)(struct wlan_objmgr_psoc *psoc, + struct reg_11d_new_country *reg_11d_new_cc); + QDF_STATUS (*reg_set_regdb_offloaded)(struct wlan_objmgr_psoc *psoc, + bool val); + QDF_STATUS (*reg_set_11d_offloaded)(struct wlan_objmgr_psoc *psoc, + bool val); + QDF_STATUS (*reg_set_6ghz_supported)(struct wlan_objmgr_psoc *psoc, + bool val); + QDF_STATUS (*get_dfs_region)(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg); + QDF_STATUS (*reg_ch_avoid_event_handler)(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_ind); + uint8_t (*reg_freq_to_chan)(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + QDF_STATUS (*reg_set_chan_144)(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144); + bool (*reg_get_chan_144)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*reg_program_default_cc)(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn); + QDF_STATUS (*reg_get_current_regdomain)(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn); + QDF_STATUS (*reg_enable_dfs_channels)(struct wlan_objmgr_pdev *pdev, + bool dfs_enable); + QDF_STATUS (*reg_modify_pdev_chan_range)(struct + wlan_objmgr_pdev *pdev); + QDF_STATUS (*reg_disable_chan_coex)(struct wlan_objmgr_pdev *pdev, + uint8_t unii_5g_bitmap); + bool (*reg_ignore_fw_reg_offload_ind)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*reg_get_unii_5g_bitmap)(struct wlan_objmgr_pdev *pdev, + uint8_t *bitmap); +}; + +#ifdef CONVERGED_P2P_ENABLE + +/* forward declarations for p2p rx ops */ +struct p2p_noa_info; +struct p2p_lo_event; +struct p2p_set_mac_filter_evt; + +/** + * struct wlan_lmac_if_p2p_rx_ops - structure of rx function pointers + * for P2P component + * @lo_ev_handler: function pointer to give listen offload event + * @noa_ev_handler: function pointer to give noa event + * @add_mac_addr_filter_evt_handler: function pointer to process add mac addr + * rx filter event + */ +struct wlan_lmac_if_p2p_rx_ops { +#ifdef FEATURE_P2P_LISTEN_OFFLOAD + QDF_STATUS (*lo_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_event *event_info); +#endif + QDF_STATUS (*noa_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct p2p_noa_info *event_info); + QDF_STATUS (*add_mac_addr_filter_evt_handler)( + struct wlan_objmgr_psoc *psoc, + struct p2p_set_mac_filter_evt *event_info); + +}; +#endif + +#ifdef WLAN_ATF_ENABLE + +/** + * struct wlan_lmac_if_atf_rx_ops - ATF south bound rx function pointers + * @atf_get_atf_commit: Get ATF commit state + * @atf_get_fmcap: Get firmware capability for ATF + * @atf_get_obss_scale: Get OBSS scale + * @atf_get_mode: Get mode of ATF + * @atf_get_msdu_desc: Get msdu desc for ATF + * @atf_get_max_vdevs: Get maximum vdevs for a Radio + * @atf_get_peers: Get number of peers for a radio + * @atf_get_tput_based: Get throughput based enabled/disabled + * @atf_get_logging: Get logging enabled/disabled + * @atf_update_buf_held: Set Num buf held by subgroup + * @atf_get_ssidgroup: Get ssid group state + * @atf_get_vdev_ac_blk_cnt: Get AC block count for vdev + * @atf_get_peer_blk_txbitmap: Get peer tx traffic AC bitmap + * @atf_get_vdev_blk_txtraffic: Get vdev tx traffic block state + * @atf_get_sched: Get ATF scheduled policy + * @atf_get_tx_tokens: Get Tx tokens + * @atf_buf_distribute: Distribute Buffers + * @atf_get_tx_tokens_common: Get common tx tokens + * @atf_get_shadow_alloted_tx_tokens: Get shadow alloted tx tokens + * @atf_get_peer_stats: Get atf peer stats + * @atf_adjust_subgroup_txtokens: Adjust tokens based on actual duration + * @atf_account_subgroup_txtokens: Estimate tx time & update subgroup tokens + * @atf_subgroup_free_buf: On tx completion, update num buf held + * @atf_update_subgroup_tidstate: TID state (Paused/unpaused) of node + * @atf_get_subgroup_airtime: Get subgroup airtime + * @atf_get_token_allocated: Get atf token allocated + * @atf_get_token_utilized: Get atf token utilized + * @atf_set_sched: Set ATF schedule policy + * @atf_set_fmcap: Set firmware capability for ATF + * @atf_set_obss_scale: Set ATF obss scale + * @atf_set_msdu_desc: Set msdu desc + * @atf_set_max_vdevs: Set maximum vdevs number + * @atf_set_peers: Set peers number + * @atf_set_peer_stats: Set peer stats + * @atf_set_vdev_blk_txtraffic: Set Block/unblock vdev tx traffic + * @atf_peer_blk_txtraffic: Block peer tx traffic + * @atf_peer_unblk_txtraffic: Unblock peer tx traffic + * @atf_set_token_allocated: Set atf token allocated + * @atf_set_token_utilized: Set atf token utilized + */ +struct wlan_lmac_if_atf_rx_ops { + uint8_t (*atf_get_atf_commit)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_fmcap)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_obss_scale)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_mode)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_msdu_desc)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_max_vdevs)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_peers)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_tput_based)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_logging)(struct wlan_objmgr_pdev *pdev); + void* (*atf_update_buf_held)(struct wlan_objmgr_peer *peer, + int8_t ac); + uint32_t (*atf_get_ssidgroup)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_vdev_ac_blk_cnt)(struct wlan_objmgr_vdev *vdev); + uint8_t (*atf_get_peer_blk_txbitmap)(struct wlan_objmgr_peer *peer); + uint8_t (*atf_get_vdev_blk_txtraffic)(struct wlan_objmgr_vdev *vdev); + uint32_t (*atf_get_sched)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_tx_tokens)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_buf_distribute)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + int8_t ac); + uint32_t (*atf_get_txtokens_common)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_shadow_alloted_tx_tokens)( + struct wlan_objmgr_pdev *pdev); + void (*atf_get_peer_stats)(struct wlan_objmgr_peer *peer, + struct atf_stats *stats); + QDF_STATUS + (*atf_adjust_subgroup_txtokens)(struct wlan_objmgr_peer *pr, + uint8_t ac, uint32_t actual_duration, + uint32_t est_duration); + QDF_STATUS + (*atf_account_subgroup_txtokens)(struct wlan_objmgr_peer *pr, + uint8_t ac, + uint32_t duration); + QDF_STATUS + (*atf_subgroup_free_buf)(uint16_t buf_acc_size, void *bf_atf_sg); + QDF_STATUS + (*atf_update_subgroup_tidstate)(struct wlan_objmgr_peer *peer, + uint8_t atf_nodepaused); + uint8_t (*atf_get_subgroup_airtime)(struct wlan_objmgr_peer *peer, + uint8_t ac); + uint16_t (*atf_get_token_allocated)(struct wlan_objmgr_peer *peer); + uint16_t (*atf_get_token_utilized)(struct wlan_objmgr_peer *peer); + void (*atf_set_sched)(struct wlan_objmgr_pdev *pdev, uint32_t value); + void (*atf_set_fmcap)(struct wlan_objmgr_psoc *psoc, uint32_t value); + void (*atf_set_obss_scale)(struct wlan_objmgr_pdev *pdev, + uint32_t value); + void (*atf_set_msdu_desc)(struct wlan_objmgr_psoc *psoc, + uint32_t value); + void (*atf_set_max_vdevs)(struct wlan_objmgr_psoc *psoc, + uint32_t value); + void (*atf_set_peers)(struct wlan_objmgr_psoc *psoc, uint32_t value); + void (*atf_set_peer_stats)(struct wlan_objmgr_peer *peer, + struct atf_stats *stats); + void (*atf_set_vdev_blk_txtraffic)(struct wlan_objmgr_vdev *vdev, + uint8_t value); + void (*atf_peer_blk_txtraffic)(struct wlan_objmgr_peer *peer, + int8_t ac_id); + void (*atf_peer_unblk_txtraffic)(struct wlan_objmgr_peer *peer, + int8_t ac_id); + void (*atf_set_token_allocated)(struct wlan_objmgr_peer *peer, + uint16_t value); + void (*atf_set_token_utilized)(struct wlan_objmgr_peer *peer, + uint16_t value); +}; +#endif + +#ifdef WLAN_SUPPORT_FILS +/** + * struct wlan_lmac_if_fd_rx_ops - FILS Discovery specific Rx function pointers + * @fd_is_fils_enable: FILS enabled or not + * @fd_alloc: Allocate FD buffer + * @fd_stop: Stop and free deferred FD buffer + * @fd_free: Free FD frame buffer + * @fd_get_valid_fd_period: Get valid FD period + * @fd_swfda_handler: SWFDA event handler + * @fd_offload: Offload FD frame + */ +struct wlan_lmac_if_fd_rx_ops { + uint8_t (*fd_is_fils_enable)(struct wlan_objmgr_vdev *vdev); + void (*fd_alloc)(struct wlan_objmgr_vdev *vdev); + void (*fd_stop)(struct wlan_objmgr_vdev *vdev); + void (*fd_free)(struct wlan_objmgr_vdev *vdev); + uint32_t (*fd_get_valid_fd_period)(struct wlan_objmgr_vdev *vdev, + uint8_t *is_modified); + QDF_STATUS (*fd_swfda_handler)(struct wlan_objmgr_vdev *vdev); + QDF_STATUS (*fd_offload)(struct wlan_objmgr_vdev *vdev, + uint32_t vdev_id); +}; +#endif + +#ifdef WLAN_SA_API_ENABLE + +/** + * struct wlan_lmac_if_sa_api_rx_ops - SA API south bound rx function pointers + */ +struct wlan_lmac_if_sa_api_rx_ops { + uint32_t (*sa_api_get_sa_supported)(struct wlan_objmgr_psoc *psoc); + uint32_t (*sa_api_get_validate_sw)(struct wlan_objmgr_psoc *psoc); + void (*sa_api_enable_sa)(struct wlan_objmgr_psoc *psoc, uint32_t value); + uint32_t (*sa_api_get_sa_enable)(struct wlan_objmgr_psoc *psoc); + void (*sa_api_peer_assoc_hanldler)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, struct sa_rate_cap *); + uint32_t (*sa_api_update_tx_feedback)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct sa_tx_feedback *feedback); + uint32_t (*sa_api_update_rx_feedback)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct sa_rx_feedback *feedback); + uint32_t (*sa_api_ucfg_set_param)(struct wlan_objmgr_pdev *pdev, + char *val); + uint32_t (*sa_api_ucfg_get_param)(struct wlan_objmgr_pdev *pdev, + char *val); + uint32_t (*sa_api_is_tx_feedback_enabled) + (struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_is_rx_feedback_enabled) + (struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_convert_rate_2g)(uint32_t rate); + uint32_t (*sa_api_convert_rate_5g)(uint32_t rate); + uint32_t (*sa_api_get_sa_mode)(struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_get_beacon_txantenna)(struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_cwm_action)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +#ifdef WLAN_CFR_ENABLE + +/** + * struct wlan_lmac_if_cfr_rx_ops - CFR south bound rx function pointers + * @cfr_support_set: Set the CFR support based on FW advert + * @cfr_info_send: Send cfr info to upper layers + */ +struct wlan_lmac_if_cfr_rx_ops { + void (*cfr_support_set)(struct wlan_objmgr_psoc *psoc, uint32_t value); + uint32_t (*cfr_info_send)(struct wlan_objmgr_pdev *pdev, void *head, + size_t hlen, void *data, size_t dlen, + void *tail, size_t tlen); +}; +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * struct wlan_lmac_if_sptrl_rx_ops - Spectral south bound Rx operations + * + * @sptrlro_get_target_handle: Get Spectral handle for target/LMAC private data + * @sptrlro_vdev_get_chan_freq_seg2: Get secondary 80 center frequency + * @sptrlro_spectral_is_feature_disabled: Check if spectral feature is disabled + */ +struct wlan_lmac_if_sptrl_rx_ops { + void * (*sptrlro_get_target_handle)(struct wlan_objmgr_pdev *pdev); + int16_t (*sptrlro_vdev_get_chan_freq)(struct wlan_objmgr_vdev *vdev); + int16_t (*sptrlro_vdev_get_chan_freq_seg2) + (struct wlan_objmgr_vdev *vdev); + enum phy_ch_width (*sptrlro_vdev_get_ch_width)( + struct wlan_objmgr_vdev *vdev); + int (*sptrlro_vdev_get_sec20chan_freq_mhz)( + struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq); + bool (*sptrlro_spectral_is_feature_disabled)( + struct wlan_objmgr_psoc *psoc); +}; +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +#ifdef WIFI_POS_CONVERGED +/** + * struct wlan_lmac_if_wifi_pos_rx_ops - structure of rx function + * pointers for wifi_pos component + * @oem_rsp_event_rx: callback for WMI_OEM_RESPONSE_EVENTID + */ +struct wlan_lmac_if_wifi_pos_rx_ops { + int (*oem_rsp_event_rx)(struct wlan_objmgr_psoc *psoc, + struct oem_data_rsp *oem_rsp); +}; +#endif + +/** + * struct wlan_lmac_if_dfs_rx_ops - Function pointers to call dfs functions + * from lmac/offload. + * @dfs_get_radars: Calls init radar table functions. + * @dfs_process_phyerr: Process phyerr. + * @dfs_destroy_object: Destroys the DFS object. + * @dfs_radar_enable: Enables the radar. + * @dfs_is_radar_enabled: Check if the radar is enabled. + * @dfs_control: Used to process ioctls related to DFS. + * @dfs_is_precac_timer_running: Check whether precac timer is running. + * @dfs_find_vht80_chan_for_precac: Find VHT80 channel for precac. + * @dfs_cancel_precac_timer: Cancel the precac timer. + * @dfs_override_precac_timeout: Override the default precac timeout. + * @dfs_set_precac_enable: Set precac enable flag. + * @dfs_get_legacy_precac_enable: Get the precac enable flag for + * partial offload (legacy) chipsets. + * @dfs_set_precac_intermediate_chan: Set intermediate channel for precac. + * @dfs_get_precac_intermediate_chan: Get intermediate channel for precac. + * @dfs_precac_preferred_chan: Configure preferred channel during + * precac. + * dfs_get_precac_chan_state: Get precac status for given channel. + * dfs_start_precac_timer: Start precac timer. + * @dfs_get_override_precac_timeout: Get precac timeout. + * @dfs_set_current_channel: Set DFS current channel. + * @dfs_process_radar_ind: Process radar found indication. + * @dfs_dfs_cac_complete_ind: Process cac complete indication. + * @dfs_agile_precac_start: Initiate Agile PreCAC run. + * @dfs_set_agile_precac_state: Set agile precac state. + * @dfs_reset_adfs_config: Reset agile dfs variables. + * @dfs_dfs_ocac_complete_ind: Process offchan cac complete indication. + * @dfs_stop: Clear dfs timers. + * @dfs_reinit_timers: Reinitialize DFS timers. + * @dfs_enable_stadfs: Enable/Disable STADFS capability. + * @dfs_is_stadfs_enabled: Get STADFS capability value. + * @dfs_process_phyerr_filter_offload:Process radar event. + * @dfs_is_phyerr_filter_offload: Check whether phyerr filter is offload. + * @dfs_action_on_status: Trigger the action to be taken based on + * on host dfs status received from fw. + * @dfs_override_status_timeout: Override the value of host dfs status + * wait timeout. + * @dfs_get_override_status_timeout: Get the value of host dfs status wait + * timeout. + * @dfs_reset_spoof_test: Checks if radar detection is enabled. + * @dfs_is_disable_radar_marking_set: Check if dis_radar_marking param is set. + * @dfs_allow_hw_pulses: Set or unset dfs_allow_hw_pulses which + * allow or disallow HW pulses. + * @dfs_is_hw_pulses_allowed: Check if HW pulses are allowed or not. + * @dfs_set_fw_adfs_support: Set the agile DFS FW support in DFS. + * @dfs_reset_dfs_prevchan: Reset DFS previous channel structure. + * @dfs_init_tmp_psoc_nol: Init temporary PSOC NOL structure. + * @dfs_deinit_tmp_psoc_nol: Deinit temporary PSOC NOL structure. + * @dfs_save_dfs_nol_in_psoc: Copy DFS NOL data to the PSOC copy. + * @dfs_reinit_nol_from_psoc_copy: Reinit DFS NOL from the PSOC NOL copy. + * @dfs_reinit_precac_lists: Reinit precac lists from other pdev. + * @dfs_complete_deferred_tasks: Process mode switch completion in DFS. + */ +struct wlan_lmac_if_dfs_rx_ops { + QDF_STATUS (*dfs_get_radars)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_process_phyerr)(struct wlan_objmgr_pdev *pdev, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf); + QDF_STATUS (*dfs_destroy_object)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_radar_enable)( + struct wlan_objmgr_pdev *pdev, + int no_cac, + uint32_t opmode, + bool enable); + void (*dfs_is_radar_enabled)(struct wlan_objmgr_pdev *pdev, + int *ignore_dfs); + QDF_STATUS (*dfs_control)(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error); + QDF_STATUS (*dfs_is_precac_timer_running)(struct wlan_objmgr_pdev *pdev, + bool *is_precac_timer_running + ); +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS + (*dfs_find_vht80_chan_for_precac)(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS + (*dfs_find_vht80_chan_for_precac_for_freq)(struct wlan_objmgr_pdev + *pdev, + uint32_t chan_mode, + uint16_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#endif + QDF_STATUS (*dfs_agile_precac_start)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_set_agile_precac_state)(struct wlan_objmgr_pdev *pdev, + int agile_precac_state); + QDF_STATUS (*dfs_reset_adfs_config)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS + (*dfs_dfs_ocac_complete_ind)(struct wlan_objmgr_pdev *pdev, + struct vdev_adfs_complete_status *ocac_st); + QDF_STATUS (*dfs_start_precac_timer)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_cancel_precac_timer)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_override_precac_timeout)( + struct wlan_objmgr_pdev *pdev, + int precac_timeout); + QDF_STATUS (*dfs_set_precac_enable)(struct wlan_objmgr_pdev *pdev, + uint32_t value); + QDF_STATUS + (*dfs_get_legacy_precac_enable)(struct wlan_objmgr_pdev *pdev, + bool *buff); + QDF_STATUS (*dfs_get_agile_precac_enable)(struct wlan_objmgr_pdev *pdev, + bool *buff); +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT + QDF_STATUS (*dfs_set_precac_intermediate_chan)(struct wlan_objmgr_pdev *pdev, + uint32_t value); + QDF_STATUS (*dfs_get_precac_intermediate_chan)(struct wlan_objmgr_pdev *pdev, + int *buff); +#ifdef CONFIG_CHAN_NUM_API + bool (*dfs_decide_precac_preferred_chan)(struct wlan_objmgr_pdev *pdev, + uint8_t *pref_chan, + enum wlan_phymode mode); +#endif +#ifdef CONFIG_CHAN_FREQ_API + bool (*dfs_decide_precac_preferred_chan_for_freq)(struct + wlan_objmgr_pdev *pdev, + uint16_t *pref_chan_freq, + enum wlan_phymode mode); +#endif + +#ifdef CONFIG_CHAN_NUM_API + enum precac_chan_state (*dfs_get_precac_chan_state)(struct wlan_objmgr_pdev *pdev, + uint8_t precac_chan); +#endif + +#ifdef CONFIG_CHAN_FREQ_API + enum precac_chan_state (*dfs_get_precac_chan_state_for_freq)(struct + wlan_objmgr_pdev *pdev, + uint16_t pcac_freq); +#endif +#endif + QDF_STATUS (*dfs_get_override_precac_timeout)( + struct wlan_objmgr_pdev *pdev, + int *precac_timeout); +#ifdef CONFIG_CHAN_NUM_API + QDF_STATUS (*dfs_set_current_channel)(struct wlan_objmgr_pdev *pdev, + uint16_t ic_freq, + uint64_t ic_flags, + uint16_t ic_flagext, + uint8_t ic_ieee, + uint8_t ic_vhtop_ch_freq_seg1, + uint8_t ic_vhtop_ch_freq_seg2); +#endif +#ifdef CONFIG_CHAN_FREQ_API + QDF_STATUS + (*dfs_set_current_channel_for_freq)(struct wlan_objmgr_pdev *pdev, + uint16_t ic_freq, + uint64_t ic_flags, + uint16_t ic_flagext, + uint8_t ic_ieee, + uint8_t ic_vhtop_ch_freq_seg1, + uint8_t ic_vhtop_ch_freq_seg2, + uint16_t dfs_ch_mhz_freq_seg1, + uint16_t dfs_ch_mhz_freq_seg2); +#endif +#ifdef DFS_COMPONENT_ENABLE + QDF_STATUS (*dfs_process_radar_ind)(struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found); + QDF_STATUS (*dfs_dfs_cac_complete_ind)(struct wlan_objmgr_pdev *pdev, + uint32_t vdev_id); +#endif + QDF_STATUS (*dfs_stop)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_reinit_timers)(struct wlan_objmgr_pdev *pdev); + void (*dfs_enable_stadfs)(struct wlan_objmgr_pdev *pdev, bool val); + bool (*dfs_is_stadfs_enabled)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_process_phyerr_filter_offload)( + struct wlan_objmgr_pdev *pdev, + struct radar_event_info *wlan_radar_info); + QDF_STATUS (*dfs_is_phyerr_filter_offload)( + struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload); + QDF_STATUS (*dfs_action_on_status)(struct wlan_objmgr_pdev *pdev, + u_int32_t *dfs_status_check); + QDF_STATUS (*dfs_override_status_timeout)( + struct wlan_objmgr_pdev *pdev, + int status_timeout); + QDF_STATUS (*dfs_get_override_status_timeout)( + struct wlan_objmgr_pdev *pdev, + int *status_timeout); + QDF_STATUS (*dfs_reset_spoof_test)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_is_disable_radar_marking_set)(struct wlan_objmgr_pdev + *pdev, + bool *disable_radar_marking); + QDF_STATUS (*dfs_set_nol_subchannel_marking)( + struct wlan_objmgr_pdev *pdev, + bool value); + QDF_STATUS (*dfs_get_nol_subchannel_marking)( + struct wlan_objmgr_pdev *pdev, + bool *value); + QDF_STATUS (*dfs_set_bw_reduction)(struct wlan_objmgr_pdev *pdev, + bool value); + QDF_STATUS (*dfs_is_bw_reduction_needed)(struct wlan_objmgr_pdev *pdev, + bool *bw_reduce); + void (*dfs_allow_hw_pulses)(struct wlan_objmgr_pdev *pdev, + bool allow_hw_pulses); + bool (*dfs_is_hw_pulses_allowed)(struct wlan_objmgr_pdev *pdev); + void (*dfs_set_fw_adfs_support)(struct wlan_objmgr_pdev *pdev, + bool fw_adfs_support_160, + bool fw_adfs_support_non_160); + void (*dfs_reset_dfs_prevchan)(struct wlan_objmgr_pdev *pdev); + void (*dfs_init_tmp_psoc_nol)(struct wlan_objmgr_pdev *pdev, + uint8_t num_radios); + void (*dfs_deinit_tmp_psoc_nol)(struct wlan_objmgr_pdev *pdev); + void (*dfs_save_dfs_nol_in_psoc)(struct wlan_objmgr_pdev *pdev, + uint8_t pdev_id, + uint16_t low_5ghz_freq, + uint16_t high_5ghz_freq); + void (*dfs_reinit_nol_from_psoc_copy)(struct wlan_objmgr_pdev *pdev, + uint8_t pdev_id); + void (*dfs_reinit_precac_lists)(struct wlan_objmgr_pdev *src_pdev, + struct wlan_objmgr_pdev *dest_pdev, + uint16_t low_5g_freq, + uint16_t high_5g_freq); + void (*dfs_complete_deferred_tasks)(struct wlan_objmgr_pdev *pdev); +}; + +/** + * struct wlan_lmac_if_mlme_rx_ops: Function pointer to call MLME functions + * @vdev_mgr_start_response: function to handle start response + * @vdev_mgr_stop_response: function to handle stop response + * @vdev_mgr_delete_response: function to handle delete response + * @vdev_mgr_offload_bcn_tx_status_event_handle: function to handle offload + * beacon tx + * @vdev_mgr_tbttoffset_update_handle: function to handle tbtt offset event + * @vdev_mgr_peer_delete_all_response: function to handle vdev delete all peer + * event + * @psoc_get_wakelock_info: function to get wakelock info + * @psoc_get_vdev_response_timer_info: function to get vdev response timer + * structure for a specific vdev id + * @vdev_mgr_multi_vdev_restart_resp: function to handle mvr response + */ +struct wlan_lmac_if_mlme_rx_ops { + QDF_STATUS (*vdev_mgr_start_response)( + struct wlan_objmgr_psoc *psoc, + struct vdev_start_response *rsp); + QDF_STATUS (*vdev_mgr_stop_response)( + struct wlan_objmgr_psoc *psoc, + struct vdev_stop_response *rsp); + QDF_STATUS (*vdev_mgr_delete_response)( + struct wlan_objmgr_psoc *psoc, + struct vdev_delete_response *rsp); + QDF_STATUS (*vdev_mgr_offload_bcn_tx_status_event_handle)( + uint32_t vdev_id, + uint32_t tx_status); + QDF_STATUS (*vdev_mgr_tbttoffset_update_handle)( + uint32_t num_vdevs, + bool is_ext); + QDF_STATUS (*vdev_mgr_peer_delete_all_response)( + struct wlan_objmgr_psoc *psoc, + struct peer_delete_all_response *rsp); + QDF_STATUS (*vdev_mgr_multi_vdev_restart_resp)( + struct wlan_objmgr_psoc *psoc, + struct multi_vdev_restart_resp *rsp); +#ifdef FEATURE_VDEV_RSP_WAKELOCK + struct psoc_mlme_wakelock *(*psoc_get_wakelock_info)( + struct wlan_objmgr_psoc *psoc); +#endif + struct vdev_response_timer *(*psoc_get_vdev_response_timer_info)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); +}; + +#ifdef WLAN_SUPPORT_GREEN_AP +struct wlan_lmac_if_green_ap_rx_ops { + bool (*is_ps_enabled)(struct wlan_objmgr_pdev *pdev); + bool (*is_dbg_print_enabled)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*ps_get)(struct wlan_objmgr_pdev *pdev, uint8_t *value); + QDF_STATUS (*ps_set)(struct wlan_objmgr_pdev *pdev, uint8_t value); + void (*suspend_handle)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +/** + * struct wlan_lmac_if_rx_ops - south bound rx function pointers + * @mgmt_txrx_tx_ops: mgmt txrx rx ops + * @scan: scan rx ops + * @dfs_rx_ops: dfs rx ops. + * @cp_stats_rx_ops: cp stats rx ops + * @cfr_rx_ops: cfr rx ops + * + * Callback function tabled to be registered with lmac/wmi. + * lmac will use the functional table to send events/frames to umac + */ +struct wlan_lmac_if_rx_ops { + /* Components to declare function pointers required by the module + * in component specific structure. + * The component specific ops structure can be declared in this file + * only + */ + struct wlan_lmac_if_mgmt_txrx_rx_ops mgmt_txrx_rx_ops; + struct wlan_lmac_if_scan_rx_ops scan; +#ifdef CONVERGED_P2P_ENABLE + struct wlan_lmac_if_p2p_rx_ops p2p; +#endif + +#ifdef WLAN_ATF_ENABLE + struct wlan_lmac_if_atf_rx_ops atf_rx_ops; +#endif +#ifdef QCA_SUPPORT_CP_STATS + struct wlan_lmac_if_cp_stats_rx_ops cp_stats_rx_ops; +#endif +#ifdef WLAN_SA_API_ENABLE + struct wlan_lmac_if_sa_api_rx_ops sa_api_rx_ops; +#endif + +#ifdef WLAN_CFR_ENABLE + struct wlan_lmac_if_cfr_rx_ops cfr_rx_ops; +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + struct wlan_lmac_if_sptrl_rx_ops sptrl_rx_ops; +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED + struct wlan_lmac_if_crypto_rx_ops crypto_rx_ops; +#endif +#ifdef WIFI_POS_CONVERGED + struct wlan_lmac_if_wifi_pos_rx_ops wifi_pos_rx_ops; +#endif + struct wlan_lmac_if_reg_rx_ops reg_rx_ops; + struct wlan_lmac_if_dfs_rx_ops dfs_rx_ops; +#ifdef FEATURE_WLAN_TDLS + struct wlan_lmac_if_tdls_rx_ops tdls_rx_ops; +#endif + +#ifdef WLAN_SUPPORT_FILS + struct wlan_lmac_if_fd_rx_ops fd_rx_ops; +#endif + + struct wlan_lmac_if_mlme_rx_ops mops; + +#ifdef WLAN_SUPPORT_GREEN_AP + struct wlan_lmac_if_green_ap_rx_ops green_ap_rx_ops; +#endif + + struct wlan_lmac_if_ftm_rx_ops ftm_rx_ops; +}; + +/* Function pointer to call legacy tx_ops registration in OL/WMA. + */ +extern QDF_STATUS (*wlan_lmac_if_umac_tx_ops_register) + (struct wlan_lmac_if_tx_ops *tx_ops); +#endif /* _WLAN_LMAC_IF_DEF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/src/wlan_lmac_if.c b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/src/wlan_lmac_if.c new file mode 100644 index 0000000000000000000000000000000000000000..cd8fdf12a2c70aadf4bd790cea35e480a44f1c34 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/src/wlan_lmac_if.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include +#include "wlan_lmac_if_def.h" +#include "wlan_lmac_if_api.h" +#include "wlan_mgmt_txrx_tgt_api.h" +#include "wlan_scan_tgt_api.h" +#include +#include +#ifdef WLAN_ATF_ENABLE +#include "wlan_atf_tgt_api.h" +#endif +#ifdef WLAN_SA_API_ENABLE +#include "wlan_sa_api_tgt_api.h" +#endif +#ifdef WIFI_POS_CONVERGED +#include "target_if_wifi_pos.h" +#endif /* WIFI_POS_CONVERGED */ +#include "wlan_reg_tgt_api.h" +#ifdef CONVERGED_P2P_ENABLE +#include "wlan_p2p_tgt_api.h" +#endif +#ifdef FEATURE_WLAN_TDLS +#include "wlan_tdls_tgt_api.h" +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +#include "wlan_crypto_global_api.h" +#endif +#ifdef DFS_COMPONENT_ENABLE +#include +#include +#include +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +#include +#include +#endif +#include + +#ifdef WLAN_SUPPORT_FILS +#include +#endif + +#ifdef QCA_SUPPORT_CP_STATS +#include +#endif /* QCA_SUPPORT_CP_STATS */ +#include + +#ifdef WLAN_CFR_ENABLE +#include "wlan_cfr_tgt_api.h" +#endif + +#ifdef WIFI_POS_CONVERGED +#include "wifi_pos_api.h" +#endif + +/* Function pointer for OL/WMA specific UMAC tx_ops + * registration. + */ +QDF_STATUS (*wlan_lmac_if_umac_tx_ops_register) + (struct wlan_lmac_if_tx_ops *tx_ops); +qdf_export_symbol(wlan_lmac_if_umac_tx_ops_register); + +static void +tgt_vdev_mgr_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + tgt_vdev_mgr_register_rx_ops(rx_ops); +} + +#ifdef QCA_SUPPORT_CP_STATS +/** + * wlan_lmac_if_cp_stats_rx_ops_register() - API to register cp stats Rx Ops + * @rx_ops: pointer to lmac rx ops + * + * This API will be used to register function pointers for FW events + * + * Return: void + */ +static void +wlan_lmac_if_cp_stats_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + tgt_cp_stats_register_rx_ops(rx_ops); +} +#else +static void +wlan_lmac_if_cp_stats_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /* QCA_SUPPORT_CP_STATS */ + +#ifdef WLAN_ATF_ENABLE +/** + * wlan_lmac_if_atf_rx_ops_register() - Function to register ATF RX ops. + */ +static void +wlan_lmac_if_atf_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_atf_rx_ops *atf_rx_ops = &rx_ops->atf_rx_ops; + + /* ATF rx ops */ + atf_rx_ops->atf_get_atf_commit = tgt_atf_get_atf_commit; + atf_rx_ops->atf_get_fmcap = tgt_atf_get_fmcap; + atf_rx_ops->atf_get_obss_scale = tgt_atf_get_obss_scale; + atf_rx_ops->atf_get_mode = tgt_atf_get_mode; + atf_rx_ops->atf_get_msdu_desc = tgt_atf_get_msdu_desc; + atf_rx_ops->atf_get_max_vdevs = tgt_atf_get_max_vdevs; + atf_rx_ops->atf_get_peers = tgt_atf_get_peers; + atf_rx_ops->atf_get_tput_based = tgt_atf_get_tput_based; + atf_rx_ops->atf_get_logging = tgt_atf_get_logging; + atf_rx_ops->atf_update_buf_held = tgt_atf_update_buf_held; + atf_rx_ops->atf_get_ssidgroup = tgt_atf_get_ssidgroup; + atf_rx_ops->atf_get_vdev_ac_blk_cnt = tgt_atf_get_vdev_ac_blk_cnt; + atf_rx_ops->atf_get_peer_blk_txbitmap = tgt_atf_get_peer_blk_txbitmap; + atf_rx_ops->atf_get_vdev_blk_txtraffic = tgt_atf_get_vdev_blk_txtraffic; + atf_rx_ops->atf_get_sched = tgt_atf_get_sched; + atf_rx_ops->atf_get_tx_tokens = tgt_atf_get_tx_tokens; + atf_rx_ops->atf_account_subgroup_txtokens = + tgt_atf_account_subgroup_txtokens; + atf_rx_ops->atf_adjust_subgroup_txtokens = + tgt_atf_adjust_subgroup_txtokens; + atf_rx_ops->atf_get_subgroup_airtime = tgt_atf_get_subgroup_airtime; + atf_rx_ops->atf_subgroup_free_buf = tgt_atf_subgroup_free_buf; + atf_rx_ops->atf_update_subgroup_tidstate = + tgt_atf_update_subgroup_tidstate; + atf_rx_ops->atf_buf_distribute = tgt_atf_buf_distribute; + atf_rx_ops->atf_get_shadow_alloted_tx_tokens = + tgt_atf_get_shadow_alloted_tx_tokens; + atf_rx_ops->atf_get_txtokens_common = tgt_atf_get_txtokens_common; + atf_rx_ops->atf_get_peer_stats = tgt_atf_get_peer_stats; + atf_rx_ops->atf_get_token_allocated = tgt_atf_get_token_allocated; + atf_rx_ops->atf_get_token_utilized = tgt_atf_get_token_utilized; + + atf_rx_ops->atf_set_sched = tgt_atf_set_sched; + atf_rx_ops->atf_set_fmcap = tgt_atf_set_fmcap; + atf_rx_ops->atf_set_obss_scale = tgt_atf_set_obss_scale; + atf_rx_ops->atf_set_msdu_desc = tgt_atf_set_msdu_desc; + atf_rx_ops->atf_set_max_vdevs = tgt_atf_set_max_vdevs; + atf_rx_ops->atf_set_peers = tgt_atf_set_peers; + atf_rx_ops->atf_set_peer_stats = tgt_atf_set_peer_stats; + atf_rx_ops->atf_set_vdev_blk_txtraffic = tgt_atf_set_vdev_blk_txtraffic; + atf_rx_ops->atf_peer_blk_txtraffic = tgt_atf_peer_blk_txtraffic; + atf_rx_ops->atf_peer_unblk_txtraffic = tgt_atf_peer_unblk_txtraffic; + atf_rx_ops->atf_set_token_allocated = tgt_atf_set_token_allocated; + atf_rx_ops->atf_set_token_utilized = tgt_atf_set_token_utilized; +} +#else +static void +wlan_lmac_if_atf_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WLAN_SUPPORT_FILS +static void +wlan_lmac_if_fd_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_fd_rx_ops *fd_rx_ops = &rx_ops->fd_rx_ops; + + fd_rx_ops->fd_is_fils_enable = tgt_fd_is_fils_enable; + fd_rx_ops->fd_alloc = tgt_fd_alloc; + fd_rx_ops->fd_stop = tgt_fd_stop; + fd_rx_ops->fd_free = tgt_fd_free; + fd_rx_ops->fd_get_valid_fd_period = tgt_fd_get_valid_fd_period; + fd_rx_ops->fd_swfda_handler = tgt_fd_swfda_handler; + fd_rx_ops->fd_offload = tgt_fd_offload; +} +#else +static void +wlan_lmac_if_fd_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WLAN_SA_API_ENABLE +/** + * wlan_lmac_if_sa_api_rx_ops_register() - Function to register SA_API RX ops. + */ +static void +wlan_lmac_if_sa_api_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_sa_api_rx_ops *sa_api_rx_ops = &rx_ops->sa_api_rx_ops; + + /* SA API rx ops */ + sa_api_rx_ops->sa_api_get_sa_supported = tgt_sa_api_get_sa_supported; + sa_api_rx_ops->sa_api_get_validate_sw = tgt_sa_api_get_validate_sw; + sa_api_rx_ops->sa_api_enable_sa = tgt_sa_api_enable_sa; + sa_api_rx_ops->sa_api_get_sa_enable = tgt_sa_api_get_sa_enable; + + sa_api_rx_ops->sa_api_peer_assoc_hanldler = tgt_sa_api_peer_assoc_hanldler; + sa_api_rx_ops->sa_api_update_tx_feedback = tgt_sa_api_update_tx_feedback; + sa_api_rx_ops->sa_api_update_rx_feedback = tgt_sa_api_update_rx_feedback; + + sa_api_rx_ops->sa_api_ucfg_set_param = tgt_sa_api_ucfg_set_param; + sa_api_rx_ops->sa_api_ucfg_get_param = tgt_sa_api_ucfg_get_param; + + sa_api_rx_ops->sa_api_is_tx_feedback_enabled = tgt_sa_api_is_tx_feedback_enabled; + sa_api_rx_ops->sa_api_is_rx_feedback_enabled = tgt_sa_api_is_rx_feedback_enabled; + + sa_api_rx_ops->sa_api_convert_rate_2g = tgt_sa_api_convert_rate_2g; + sa_api_rx_ops->sa_api_convert_rate_5g = tgt_sa_api_convert_rate_5g; + sa_api_rx_ops->sa_api_get_sa_mode = tgt_sa_api_get_sa_mode; + + sa_api_rx_ops->sa_api_get_beacon_txantenna = tgt_sa_api_get_beacon_txantenna; + sa_api_rx_ops->sa_api_cwm_action = tgt_sa_api_cwm_action; +} +#else +static void +wlan_lmac_if_sa_api_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WLAN_CFR_ENABLE +/** + * wlan_lmac_if_cfr_rx_ops_register() - Function to register CFR RX ops + */ +static void +wlan_lmac_if_cfr_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_cfr_rx_ops *cfr_rx_ops = &rx_ops->cfr_rx_ops; + + /* CFR rx ops */ + cfr_rx_ops->cfr_support_set = tgt_cfr_support_set; + cfr_rx_ops->cfr_info_send = tgt_cfr_info_send; +} +#else +static void +wlan_lmac_if_cfr_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +static void +wlan_lmac_if_crypto_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + wlan_crypto_register_crypto_rx_ops(&rx_ops->crypto_rx_ops); +} +#else +static void +wlan_lmac_if_crypto_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WIFI_POS_CONVERGED +static void wlan_lmac_if_umac_rx_ops_register_wifi_pos( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + wifi_pos_register_rx_ops(rx_ops); +} +#else +static void wlan_lmac_if_umac_rx_ops_register_wifi_pos( + struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /* WIFI_POS_CONVERGED */ + +static void wlan_lmac_if_umac_reg_rx_ops_register( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->reg_rx_ops.master_list_handler = + tgt_reg_process_master_chan_list; + + rx_ops->reg_rx_ops.reg_11d_new_cc_handler = + tgt_reg_process_11d_new_country; + + rx_ops->reg_rx_ops.reg_set_regdb_offloaded = + tgt_reg_set_regdb_offloaded; + + rx_ops->reg_rx_ops.reg_set_11d_offloaded = + tgt_reg_set_11d_offloaded; + + rx_ops->reg_rx_ops.reg_set_6ghz_supported = + tgt_reg_set_6ghz_supported; + + rx_ops->reg_rx_ops.get_dfs_region = + wlan_reg_get_dfs_region; + + rx_ops->reg_rx_ops.reg_ch_avoid_event_handler = + tgt_reg_process_ch_avoid_event; + + rx_ops->reg_rx_ops.reg_freq_to_chan = + wlan_reg_freq_to_chan; + + rx_ops->reg_rx_ops.reg_set_chan_144 = + ucfg_reg_modify_chan_144; + + rx_ops->reg_rx_ops.reg_get_chan_144 = + ucfg_reg_get_en_chan_144; + + rx_ops->reg_rx_ops.reg_program_default_cc = + ucfg_reg_program_default_cc; + + rx_ops->reg_rx_ops.reg_get_current_regdomain = + wlan_reg_get_curr_regdomain; + + rx_ops->reg_rx_ops.reg_enable_dfs_channels = + ucfg_reg_enable_dfs_channels; + + rx_ops->reg_rx_ops.reg_modify_pdev_chan_range = + wlan_reg_modify_pdev_chan_range; + + rx_ops->reg_rx_ops.reg_ignore_fw_reg_offload_ind = + tgt_reg_ignore_fw_reg_offload_ind; + + rx_ops->reg_rx_ops.reg_disable_chan_coex = + wlan_reg_disable_chan_coex; + + rx_ops->reg_rx_ops.reg_get_unii_5g_bitmap = + ucfg_reg_get_unii_5g_bitmap; +} + +#ifdef CONVERGED_P2P_ENABLE +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +static void wlan_lmac_if_umac_rx_ops_register_p2p( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->p2p.lo_ev_handler = tgt_p2p_lo_event_cb; + rx_ops->p2p.noa_ev_handler = tgt_p2p_noa_event_cb; + rx_ops->p2p.add_mac_addr_filter_evt_handler = + tgt_p2p_add_mac_addr_status_event_cb; +} +#else +static void wlan_lmac_if_umac_rx_ops_register_p2p( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->p2p.noa_ev_handler = tgt_p2p_noa_event_cb; + rx_ops->p2p.add_mac_addr_filter_evt_handler = + tgt_p2p_add_mac_addr_status_event_cb; +} +#endif +#else +static void wlan_lmac_if_umac_rx_ops_register_p2p( + struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +/* + * register_precac_auto_chan_rx_ops_ieee() - Register auto chan switch rx ops + * for IEEE channel based APIs. + * rx_ops: Pointer to wlan_lmac_if_dfs_rx_ops + */ +#ifdef DFS_COMPONENT_ENABLE +#if defined(WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT) && defined(CONFIG_CHAN_NUM_API) +static inline void +register_precac_auto_chan_rx_ops_ieee(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ + if (!rx_ops) + return; + rx_ops->dfs_get_precac_chan_state = ucfg_dfs_get_precac_chan_state; +} +#else +static inline void +register_precac_auto_chan_rx_ops_ieee(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ +} +#endif +#endif + +/* + * register_precac_auto_chan_rx_ops_freq() - Register auto chan switch rx ops + * for frequency based channel APIs. + * rx_ops: Pointer to wlan_lmac_if_dfs_rx_ops + */ +#ifdef DFS_COMPONENT_ENABLE +#if defined(WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT) && defined(CONFIG_CHAN_FREQ_API) +static inline void +register_precac_auto_chan_rx_ops_freq(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ + if (!rx_ops) + return; + rx_ops->dfs_get_precac_chan_state_for_freq = + ucfg_dfs_get_precac_chan_state_for_freq; +} +#else +static inline void +register_precac_auto_chan_rx_ops_freq(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ +} +#endif +#endif + +#ifdef DFS_COMPONENT_ENABLE +#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT +static inline void +register_precac_auto_chan_rx_ops(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ + if (!rx_ops) + return; + rx_ops->dfs_set_precac_intermediate_chan = + ucfg_dfs_set_precac_intermediate_chan; + rx_ops->dfs_get_precac_intermediate_chan = + ucfg_dfs_get_precac_intermediate_chan; +} +#else +static inline void +register_precac_auto_chan_rx_ops(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ +} +#endif + +/* + * register_dfs_rx_ops_for_freq() - Register DFS rx ops for frequency based + * channel APIs. + * rx_ops: Pointer to wlan_lmac_if_dfs_rx_ops. + */ +#ifdef CONFIG_CHAN_FREQ_API +static void register_dfs_rx_ops_for_freq(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ + if (!rx_ops) + return; + rx_ops->dfs_find_vht80_chan_for_precac_for_freq = + tgt_dfs_find_vht80_precac_chan_freq; + rx_ops->dfs_set_current_channel_for_freq = + tgt_dfs_set_current_channel_for_freq; +} +#endif + +/* + * register_dfs_rx_ops_for_ieee() - Register DFS rx ops for IEEE channel based + * APIs + * rx_ops: Pointer to wlan_lmac_if_dfs_rx_ops. + */ + +#ifdef CONFIG_CHAN_NUM_API +static void register_dfs_rx_ops_for_ieee(struct wlan_lmac_if_dfs_rx_ops *rx_ops) +{ + if (!rx_ops) + return; + rx_ops->dfs_find_vht80_chan_for_precac = + tgt_dfs_find_vht80_chan_for_precac; + rx_ops->dfs_set_current_channel = + tgt_dfs_set_current_channel; +} +#endif + +static QDF_STATUS +wlan_lmac_if_umac_dfs_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + + dfs_rx_ops = &rx_ops->dfs_rx_ops; + + dfs_rx_ops->dfs_get_radars = tgt_dfs_get_radars; + dfs_rx_ops->dfs_process_phyerr = tgt_dfs_process_phyerr; + dfs_rx_ops->dfs_destroy_object = tgt_dfs_destroy_object; + dfs_rx_ops->dfs_radar_enable = tgt_dfs_radar_enable; + dfs_rx_ops->dfs_is_radar_enabled = tgt_dfs_is_radar_enabled; + dfs_rx_ops->dfs_control = tgt_dfs_control; + dfs_rx_ops->dfs_is_precac_timer_running = + tgt_dfs_is_precac_timer_running; + dfs_rx_ops->dfs_agile_precac_start = + tgt_dfs_agile_precac_start; + dfs_rx_ops->dfs_set_agile_precac_state = + tgt_dfs_set_agile_precac_state; + dfs_rx_ops->dfs_start_precac_timer = utils_dfs_start_precac_timer; + dfs_rx_ops->dfs_cancel_precac_timer = utils_dfs_cancel_precac_timer; + dfs_rx_ops->dfs_reset_adfs_config = ucfg_dfs_reset_agile_config; + dfs_rx_ops->dfs_override_precac_timeout = + ucfg_dfs_override_precac_timeout; + dfs_rx_ops->dfs_set_precac_enable = ucfg_dfs_set_precac_enable; + dfs_rx_ops->dfs_get_legacy_precac_enable = + ucfg_dfs_get_legacy_precac_enable; + dfs_rx_ops->dfs_get_agile_precac_enable = + ucfg_dfs_get_agile_precac_enable; + dfs_rx_ops->dfs_get_override_precac_timeout = + ucfg_dfs_get_override_precac_timeout; + dfs_rx_ops->dfs_process_radar_ind = tgt_dfs_process_radar_ind; + dfs_rx_ops->dfs_dfs_cac_complete_ind = tgt_dfs_cac_complete; + dfs_rx_ops->dfs_dfs_ocac_complete_ind = tgt_dfs_ocac_complete; + dfs_rx_ops->dfs_stop = tgt_dfs_stop; + dfs_rx_ops->dfs_reinit_timers = ucfg_dfs_reinit_timers; + dfs_rx_ops->dfs_enable_stadfs = tgt_dfs_enable_stadfs; + dfs_rx_ops->dfs_is_stadfs_enabled = tgt_dfs_is_stadfs_enabled; + dfs_rx_ops->dfs_process_phyerr_filter_offload = + tgt_dfs_process_phyerr_filter_offload; + dfs_rx_ops->dfs_is_phyerr_filter_offload = + tgt_dfs_is_phyerr_filter_offload; + dfs_rx_ops->dfs_action_on_status = tgt_dfs_action_on_status_from_fw; + dfs_rx_ops->dfs_override_status_timeout = + ucfg_dfs_set_override_status_timeout; + dfs_rx_ops->dfs_get_override_status_timeout = + ucfg_dfs_get_override_status_timeout; + dfs_rx_ops->dfs_reset_spoof_test = + tgt_dfs_reset_spoof_test; + dfs_rx_ops->dfs_is_disable_radar_marking_set = + utils_dfs_get_disable_radar_marking; + dfs_rx_ops->dfs_set_nol_subchannel_marking = + ucfg_dfs_set_nol_subchannel_marking; + dfs_rx_ops->dfs_get_nol_subchannel_marking = + ucfg_dfs_get_nol_subchannel_marking; + dfs_rx_ops->dfs_set_bw_reduction = + utils_dfs_bw_reduce; + dfs_rx_ops->dfs_is_bw_reduction_needed = + utils_dfs_is_bw_reduce; + dfs_rx_ops->dfs_allow_hw_pulses = + ucfg_dfs_allow_hw_pulses; + dfs_rx_ops->dfs_is_hw_pulses_allowed = + ucfg_dfs_is_hw_pulses_allowed; + dfs_rx_ops->dfs_set_fw_adfs_support = + tgt_dfs_set_fw_adfs_support; + dfs_rx_ops->dfs_reset_dfs_prevchan = + utils_dfs_reset_dfs_prevchan; + dfs_rx_ops->dfs_init_tmp_psoc_nol = + tgt_dfs_init_tmp_psoc_nol; + dfs_rx_ops->dfs_deinit_tmp_psoc_nol = + tgt_dfs_deinit_tmp_psoc_nol; + dfs_rx_ops->dfs_save_dfs_nol_in_psoc = + tgt_dfs_save_dfs_nol_in_psoc; + dfs_rx_ops->dfs_reinit_nol_from_psoc_copy = + tgt_dfs_reinit_nol_from_psoc_copy; + dfs_rx_ops->dfs_reinit_precac_lists = + tgt_dfs_reinit_precac_lists; + dfs_rx_ops->dfs_complete_deferred_tasks = + tgt_dfs_complete_deferred_tasks; + register_precac_auto_chan_rx_ops(dfs_rx_ops); + register_precac_auto_chan_rx_ops_ieee(dfs_rx_ops); + register_precac_auto_chan_rx_ops_freq(dfs_rx_ops); + register_dfs_rx_ops_for_freq(dfs_rx_ops); + register_dfs_rx_ops_for_ieee(dfs_rx_ops); + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_dfs_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef FEATURE_WLAN_TDLS +static QDF_STATUS +wlan_lmac_if_umac_tdls_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->tdls_rx_ops.tdls_ev_handler = tgt_tdls_event_handler; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_tdls_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS +wlan_lmac_if_umac_green_ap_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->green_ap_rx_ops.is_ps_enabled = wlan_green_ap_is_ps_enabled; + rx_ops->green_ap_rx_ops.is_dbg_print_enabled = + ucfg_green_ap_get_debug_prints; + rx_ops->green_ap_rx_ops.ps_set = ucfg_green_ap_set_ps_config; + rx_ops->green_ap_rx_ops.ps_get = ucfg_green_ap_get_ps_config; + rx_ops->green_ap_rx_ops.suspend_handle = wlan_green_ap_suspend_handle; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_green_ap_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef QCA_WIFI_FTM +static QDF_STATUS +wlan_lmac_if_umac_ftm_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_ftm_rx_ops *ftm_rx_ops; + + ftm_rx_ops = &rx_ops->ftm_rx_ops; + + ftm_rx_ops->ftm_ev_handler = wlan_ftm_process_utf_event; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_ftm_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif +/** + * wlan_lmac_if_umac_rx_ops_register() - UMAC rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register umac RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +wlan_lmac_if_umac_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + /* Component specific public api's to be called to register + * respective callbacks + * Ex: rx_ops->fp = function; + */ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_txrx_rx_ops; + + if (!rx_ops) { + qdf_print("%s: lmac if rx ops pointer is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + /* mgmt txrx rx ops */ + mgmt_txrx_rx_ops = &rx_ops->mgmt_txrx_rx_ops; + + mgmt_txrx_rx_ops->mgmt_tx_completion_handler = + tgt_mgmt_txrx_tx_completion_handler; + mgmt_txrx_rx_ops->mgmt_rx_frame_handler = + tgt_mgmt_txrx_rx_frame_handler; + mgmt_txrx_rx_ops->mgmt_txrx_get_nbuf_from_desc_id = + tgt_mgmt_txrx_get_nbuf_from_desc_id; + mgmt_txrx_rx_ops->mgmt_txrx_get_peer_from_desc_id = + tgt_mgmt_txrx_get_peer_from_desc_id; + mgmt_txrx_rx_ops->mgmt_txrx_get_vdev_id_from_desc_id = + tgt_mgmt_txrx_get_vdev_id_from_desc_id; + mgmt_txrx_rx_ops->mgmt_txrx_get_free_desc_pool_count = + tgt_mgmt_txrx_get_free_desc_pool_count; + + /* scan rx ops */ + rx_ops->scan.scan_ev_handler = tgt_scan_event_handler; + rx_ops->scan.scan_set_max_active_scans = tgt_scan_set_max_active_scans; + + wlan_lmac_if_atf_rx_ops_register(rx_ops); + + wlan_lmac_if_cp_stats_rx_ops_register(rx_ops); + + wlan_lmac_if_sa_api_rx_ops_register(rx_ops); + + wlan_lmac_if_cfr_rx_ops_register(rx_ops); + + wlan_lmac_if_crypto_rx_ops_register(rx_ops); + /* wifi_pos rx ops */ + wlan_lmac_if_umac_rx_ops_register_wifi_pos(rx_ops); + + /* tdls rx ops */ + wlan_lmac_if_umac_tdls_rx_ops_register(rx_ops); + + wlan_lmac_if_umac_reg_rx_ops_register(rx_ops); + + /* p2p rx ops */ + wlan_lmac_if_umac_rx_ops_register_p2p(rx_ops); + + /* DFS rx_ops */ + wlan_lmac_if_umac_dfs_rx_ops_register(rx_ops); + + wlan_lmac_if_umac_green_ap_rx_ops_register(rx_ops); + + /* FTM rx_ops */ + wlan_lmac_if_umac_ftm_rx_ops_register(rx_ops); + + /* FILS Discovery */ + wlan_lmac_if_fd_rx_ops_register(rx_ops); + + /* MLME rx_ops */ + tgt_vdev_mgr_rx_ops_register(rx_ops); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_lmac_if_set_umac_txops_registration_cb() - tx registration + * callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_set_umac_txops_registration_cb(QDF_STATUS (*handler) + (struct wlan_lmac_if_tx_ops *)) +{ + wlan_lmac_if_umac_tx_ops_register = handler; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_lmac_if_set_umac_txops_registration_cb); + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main.c b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main.c new file mode 100644 index 0000000000000000000000000000000000000000..f0fc6b768d4c9c1498b45c8a9c263486e74dfceb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main.c @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains main green ap function definitions + */ + +#include "wlan_green_ap_main_i.h" + +/* + * wlan_green_ap_ant_ps_reset() - Reset function + * @green_ap - green ap context + * + * Reset fiunction, so that Antenna Mask can come into effect. + * This applies for only few of the hardware chips + * + * Return: QDF_STATUS + */ +static QDF_STATUS wlan_green_ap_ant_ps_reset + (struct wlan_pdev_green_ap_ctx *green_ap_ctx) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + struct wlan_objmgr_pdev *pdev; + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + pdev = green_ap_ctx->pdev; + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_tx_ops->reset_dev) + return QDF_STATUS_SUCCESS; + + /* + * Add protection against green AP enabling interrupts + * when not valid or no VAPs exist + */ + if (wlan_util_is_vdev_active(pdev, WLAN_GREEN_AP_ID) == + QDF_STATUS_SUCCESS) + green_ap_tx_ops->reset_dev(pdev); + else + green_ap_err("Green AP tried to enable IRQs when invalid"); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_lmac_if_green_ap_tx_ops * +wlan_psoc_get_green_ap_tx_ops(struct wlan_pdev_green_ap_ctx *green_ap_ctx) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev = green_ap_ctx->pdev; + + if (!pdev) { + green_ap_err("pdev context obtained is NULL"); + return NULL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + green_ap_err("pdev context obtained is NULL"); + return NULL; + } + + return &((psoc->soc_cb.tx_ops.green_ap_tx_ops)); +} + +bool wlan_is_egap_enabled(struct wlan_pdev_green_ap_ctx *green_ap_ctx) +{ + struct wlan_green_ap_egap_params *egap_params; + + if (!green_ap_ctx) { + green_ap_err("green ap context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + egap_params = &green_ap_ctx->egap_params; + + if (egap_params->fw_egap_support && + egap_params->host_enable_egap && + egap_params->egap_feature_flags) + return true; + return false; +} +qdf_export_symbol(wlan_is_egap_enabled); + +/** + * wlan_green_ap_ps_event_state_update() - Update PS state and event + * @pdev: pdev pointer + * @state: ps state + * @event: ps event + * + * @Return: Success or Failure + */ +static QDF_STATUS wlan_green_ap_ps_event_state_update( + struct wlan_pdev_green_ap_ctx *green_ap_ctx, + enum wlan_green_ap_ps_state state, + enum wlan_green_ap_ps_event event) +{ + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_ctx->ps_state = state; + green_ap_ctx->ps_event = event; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_state_mc(struct wlan_pdev_green_ap_ctx *green_ap_ctx, + enum wlan_green_ap_ps_event event) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + uint8_t pdev_id; + + /* + * Remove the assignments once channel info is available for + * converged component. + */ + uint16_t channel = 1; + uint32_t channel_flags = 1; + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_ctx->pdev) { + green_ap_err("pdev obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(green_ap_ctx->pdev); + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_tx_ops->ps_on_off_send) { + green_ap_err("tx op for sending enbale/disable green ap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + + if (green_ap_tx_ops->get_current_channel) + channel = green_ap_tx_ops->get_current_channel( + green_ap_ctx->pdev); + + if (green_ap_tx_ops->get_current_channel_flags) + channel_flags = green_ap_tx_ops->get_current_channel_flags( + green_ap_ctx->pdev); + + /* handle the green ap ps event */ + switch (event) { + case WLAN_GREEN_AP_ADD_STA_EVENT: + green_ap_ctx->num_nodes++; + break; + + case WLAN_GREEN_AP_DEL_STA_EVENT: + if (green_ap_ctx->num_nodes) + green_ap_ctx->num_nodes--; + break; + + case WLAN_GREEN_AP_ADD_MULTISTREAM_STA_EVENT: + green_ap_ctx->num_nodes_multistream++; + break; + + case WLAN_GREEN_AP_DEL_MULTISTREAM_STA_EVENT: + if (green_ap_ctx->num_nodes_multistream) + green_ap_ctx->num_nodes_multistream--; + break; + + case WLAN_GREEN_AP_PS_START_EVENT: + case WLAN_GREEN_AP_PS_STOP_EVENT: + case WLAN_GREEN_AP_PS_ON_EVENT: + case WLAN_GREEN_AP_PS_WAIT_EVENT: + break; + + default: + green_ap_err("Invalid event: %d", event); + break; + } + + green_ap_debug("Green-AP event: %d, state: %d, num_nodes: %d", + event, green_ap_ctx->ps_state, green_ap_ctx->num_nodes); + + /* Confirm that power save is enabled before doing state transitions */ + if (!green_ap_ctx->ps_enable) { + green_ap_debug("Green-AP is disabled"); + if (green_ap_ctx->ps_state == WLAN_GREEN_AP_PS_ON_STATE) { + if (green_ap_tx_ops->ps_on_off_send(green_ap_ctx->pdev, + false, pdev_id)) + green_ap_err("failed to set green ap mode"); + wlan_green_ap_ant_ps_reset(green_ap_ctx); + } + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_IDLE_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + goto done; + } + + /* handle the green ap ps state */ + switch (green_ap_ctx->ps_state) { + case WLAN_GREEN_AP_PS_IDLE_STATE: + if ((green_ap_ctx->num_nodes && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NO_STA) || + (green_ap_ctx->num_nodes_multistream && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NUM_STREAM)) { + /* + * Multistream nodes present, switchoff the power save + */ + green_ap_info("Transition to OFF from IDLE"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + } else { + /* No Active nodes, get into power save */ + green_ap_info("Transition to WAIT from IDLE"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_trans_time * 1000); + } + break; + + case WLAN_GREEN_AP_PS_OFF_STATE: + if ((!green_ap_ctx->num_nodes && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NO_STA) || + (!green_ap_ctx->num_nodes_multistream && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NUM_STREAM)) { + green_ap_info("Transition to WAIT from OFF"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_trans_time * 1000); + } + break; + + case WLAN_GREEN_AP_PS_WAIT_STATE: + if ((!green_ap_ctx->num_nodes && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NO_STA) || + (!green_ap_ctx->num_nodes_multistream && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NUM_STREAM)) { + if ((channel == 0) || (channel_flags == 0)) { + /* + * Stay in the current state and restart the + * timer to check later. + */ + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_on_time * 1000); + } else { + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_ON_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + + green_ap_info("Transition to ON from WAIT"); + green_ap_tx_ops->ps_on_off_send( + green_ap_ctx->pdev, true, pdev_id); + wlan_green_ap_ant_ps_reset(green_ap_ctx); + + if (green_ap_ctx->ps_on_time) + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_on_time * 1000); + } + } else { + green_ap_info("Transition to OFF from WAIT"); + qdf_timer_stop(&green_ap_ctx->ps_timer); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + } + break; + + case WLAN_GREEN_AP_PS_ON_STATE: + if ((green_ap_ctx->num_nodes && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NO_STA) || + (green_ap_ctx->num_nodes_multistream && + green_ap_ctx->ps_mode == WLAN_GREEN_AP_MODE_NUM_STREAM)) { + qdf_timer_stop(&green_ap_ctx->ps_timer); + if (green_ap_tx_ops->ps_on_off_send( + green_ap_ctx->pdev, false, pdev_id)) { + green_ap_err("Failed to set Green AP mode"); + goto done; + } + wlan_green_ap_ant_ps_reset(green_ap_ctx); + green_ap_info("Transition to OFF from ON\n"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + } else if ((green_ap_ctx->ps_event == + WLAN_GREEN_AP_PS_WAIT_EVENT) && + (green_ap_ctx->ps_on_time)) { + /* ps_on_time timeout, switch to ps wait */ + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_ON_EVENT); + + if (green_ap_tx_ops->ps_on_off_send( + green_ap_ctx->pdev, false, pdev_id)) { + green_ap_err("Failed to set Green AP mode"); + goto done; + } + + wlan_green_ap_ant_ps_reset(green_ap_ctx); + green_ap_info("Transition to WAIT from ON\n"); + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_trans_time * 1000); + } + break; + + default: + green_ap_err("invalid state %d", green_ap_ctx->ps_state); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + break; + } + +done: + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; +} + +void wlan_green_ap_timer_fn(void *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_objmgr_pdev *pdev_ctx = (struct wlan_objmgr_pdev *)pdev; + + if (!pdev_ctx) { + green_ap_err("pdev context passed is NULL"); + return; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev_ctx, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return; + } + wlan_green_ap_state_mc(green_ap_ctx, green_ap_ctx->ps_event); +} + +void wlan_green_ap_check_mode(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *flag = (uint8_t *)arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_SAP_MODE) + *flag = 1; + + wlan_vdev_obj_unlock(vdev); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..664531d2a5f9b329edd8171c7540df39d2984a57 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main_i.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * DOC: This file has main green ap structures. + */ + +#ifndef _WLAN_GREEN_AP_MAIN_I_H_ +#define _WLAN_GREEN_AP_MAIN_I_H_ + +#include +#include +#include +#include +#include +#include +#include "wlan_utility.h" +#include + +#define WLAN_GREEN_AP_PS_ON_TIME (0) +#define WLAN_GREEN_AP_PS_TRANS_TIME (20) + +#define green_ap_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_GREEN_AP, params) +#define green_ap_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_GREEN_AP, params) +#define green_ap_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_GREEN_AP, params) +#define green_ap_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_GREEN_AP, params) +#define green_ap_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_GREEN_AP, params) +#define green_ap_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_GREEN_AP, params) + +#define greenap_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_GREEN_AP, params) +#define greenap_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_GREEN_AP, params) +#define greenap_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_GREEN_AP, params) +#define greenap_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_GREEN_AP, params) +#define greenap_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_GREEN_AP, params) + +#define WLAN_GREEN_AP_PS_DISABLE 0 +#define WLAN_GREEN_AP_PS_ENABLE 1 +#define WLAN_GREEN_AP_PS_SUSPEND 2 +/** + * enum wlan_green_ap_ps_state - PS states + * @WLAN_GREEN_AP_PS_IDLE_STATE - Idle + * @WLAN_GREEN_AP_PS_OFF_STATE - Off + * @WLAN_GREEN_AP_PS_WAIT_STATE - Wait + * @WLAN_GREEN_AP_PS_ON_STATE - On + */ +enum wlan_green_ap_ps_state { + WLAN_GREEN_AP_PS_IDLE_STATE = 1, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_ON_STATE, +}; + +/** + * enum wlan_green_ap_ps_event - PS event + * @WLAN_GREEN_AP_PS_START_EVENT - Start + * @WLAN_GREEN_AP_PS_STOP_EVENT - Stop + * @WLAN_GREEN_AP_ADD_STA_EVENT - Sta assoc + * @WLAN_GREEN_AP_DEL_STA_EVENT - Sta disassoc + * @WLAN_GREEN_AP_ADD_MULTISTREAM_STA_EVENT - Multistream sta assoc + * @WLAN_GREEN_AP_DEL_MULTISTREAM_STA_EVENT - Multistream sta disassoc + * @WLAN_GREEN_AP_PS_ON_EVENT - PS on + * @WLAN_GREEN_AP_PS_OFF_EVENT - PS off + */ +enum wlan_green_ap_ps_event { + WLAN_GREEN_AP_PS_START_EVENT = 1, + WLAN_GREEN_AP_PS_STOP_EVENT, + WLAN_GREEN_AP_ADD_STA_EVENT, + WLAN_GREEN_AP_DEL_STA_EVENT, + WLAN_GREEN_AP_ADD_MULTISTREAM_STA_EVENT, + WLAN_GREEN_AP_DEL_MULTISTREAM_STA_EVENT, + WLAN_GREEN_AP_PS_ON_EVENT, + WLAN_GREEN_AP_PS_WAIT_EVENT, +}; + +/** + * struct wlan_pdev_green_ap_ctx - green ap context + * @pdev - Pdev pointer + * @ps_enable - Enable PS + * @ps_mode - No sta or Multistream sta mode + * @ps_on_time - PS on time, once enabled + * @ps_trans_time - PS transition time + * @num_nodes - Number of nodes associated to radio + * @num_nodes_multistream - Multistream nodes associated to radio + * @ps_state - PS state + * @ps_event - PS event + * @ps_timer - Timer + * @lock: green ap spinlock + * @egap_params - Enhanced green ap params + */ +struct wlan_pdev_green_ap_ctx { + struct wlan_objmgr_pdev *pdev; + uint8_t ps_enable; + uint8_t ps_mode; + uint8_t ps_on_time; + uint32_t ps_trans_time; + uint32_t num_nodes; + uint32_t num_nodes_multistream; + enum wlan_green_ap_ps_state ps_state; + enum wlan_green_ap_ps_event ps_event; + qdf_timer_t ps_timer; + qdf_spinlock_t lock; + struct wlan_green_ap_egap_params egap_params; + bool dbg_enable; +}; + +/** + * wlan_psoc_get_green_ap_tx_ops() - Obtain green ap tx ops from green ap ctx + * @green_ap_ctx: green ap context + * + * @Return: green ap tx ops pointer + */ +struct wlan_lmac_if_green_ap_tx_ops * +wlan_psoc_get_green_ap_tx_ops(struct wlan_pdev_green_ap_ctx *green_ap_ctx); + +/** + * wlan_is_egap_enabled() - Get Enhance Green AP feature status + * @green_ap_ctx: green ap context + * + * Return: true if firmware, feature_flag and ini are all egap enabled + */ +bool wlan_is_egap_enabled(struct wlan_pdev_green_ap_ctx *green_ap_ctx); + +/** + * wlan_green_ap_state_mc() - Green ap state machine + * @green_ap_ctx: green ap context + * @event: ps event + * + * @Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_state_mc(struct wlan_pdev_green_ap_ctx *green_ap_ctx, + enum wlan_green_ap_ps_event event); + +/** + * wlan_green_ap_timer_fn() - Green ap timer callback + * @pdev: pdev pointer + * + * @Return: None + */ +void wlan_green_ap_timer_fn(void *pdev); + +/** + * wlan_green_ap_check_mode() - Check for mode + * @pdev: pdev pointer + * @object: vdev object + * @arg: flag to be set + * + * Callback to check if all modes on radio are configured as AP + * + * @Return: None + */ +void wlan_green_ap_check_mode(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg); +#endif /* _WLAN_GREEN_AP_MAIN_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/cfg_green_ap_params.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/cfg_green_ap_params.h new file mode 100644 index 0000000000000000000000000000000000000000..410dfb5d4786b4da5347d6225f6116f2c431b874 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/cfg_green_ap_params.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized definitions of converged configuration. + */ + +#ifndef __CFG_GREEN_AP_PARAMS_H +#define __CFG_GREEN_AP_PARAMS_H + +/* + * + * gEnableGreenAp - Enable green ap feature + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to enable green ap feature + * + * Related: None + * + * Supported Feature: SAP + * + * Usage: Internal/External + * + * + */ + +#define CFG_ENABLE_GREEN_AP_FEATURE CFG_INI_BOOL( \ + "gEnableGreenAp", \ + PLATFORM_VALUE(1, 0), \ + "enable green ap") + +/* + * + * gEnableEGAP - Enable the enhanced green ap feature + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to enable the enhanced green ap feature + * + * Related: None + * + * Supported Feature: SAP + * + * Usage: Internal/External + * + * + */ + +#define CFG_ENABLE_EGAP_FEATURE CFG_INI_BOOL( \ + "gEnableEGAP", \ + PLATFORM_VALUE(1, 0), \ + "enable e-gap") +/* + * + * gEGAPInactTime - configure the inactive time for EGAP + * @Min: 0 + * @Max: 300000 + * @Default: 2000 + * + * This ini is used to configure the inactive time for EGAP + * + * Related: None + * + * Supported Feature: SAP + * + * Usage: Internal/External + * + * + */ + +#define CFG_EGAP_INACT_TIME_FEATURE CFG_INI_UINT( \ + "gEGAPInactTime", \ + 0, \ + 300000, \ + 2000, \ + CFG_VALUE_OR_DEFAULT, \ + "egap inactivity time") +/* + * + * gEGAPWaitTime - configure the wait time for EGAP + * @Min: 0 + * @Max: 300000 + * @Default: 150 + * + * This ini is used to configure the wait time for EGAP + * + * Related: None + * + * Supported Feature: SAP + * + * Usage: Internal/External + * + * + */ + +#define CFG_EGAP_WAIT_TIME_FEATURE CFG_INI_UINT( \ + "gEGAPWaitTime", \ + 0, \ + 300000, \ + 150, \ + CFG_VALUE_OR_DEFAULT, \ + "egap wait time") +/* + * + * gEGAPFeatures - Configure the EGAP flags + * @Min: 0 + * @Max: 15 + * @Default: 3 + * + * This ini is used to configure the EGAP flags + * + * Related: None + * + * Supported Feature: SAP + * + * Usage: Internal/External + * + * + */ + +#define CFG_EGAP_FLAGS_FEATURE CFG_INI_UINT( \ + "gEGAPFeatures", \ + 0, \ + 15, \ + 3, \ + CFG_VALUE_OR_DEFAULT, \ + "egap flag") + +#ifdef WLAN_SUPPORT_GREEN_AP +#define CFG_GREEN_AP_ALL \ + CFG(CFG_ENABLE_GREEN_AP_FEATURE) \ + CFG(CFG_ENABLE_EGAP_FEATURE) \ + CFG(CFG_EGAP_INACT_TIME_FEATURE) \ + CFG(CFG_EGAP_WAIT_TIME_FEATURE) \ + CFG(CFG_EGAP_FLAGS_FEATURE) +#else +#define CFG_GREEN_AP_ALL +#endif + +#endif /* __CFG_GREEN_AP_PARAMS_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_api.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_api.h new file mode 100644 index 0000000000000000000000000000000000000000..70eb59520871f3172e1d78e60b7e4bc89ba31ab8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_api.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains green ap north bound interface definitions + */ + +#ifndef _WLAN_GREEN_AP_API_H_ +#define _WLAN_GREEN_AP_API_H_ + +#include +#include +#include + +/* Green ap mode of operation */ +#define WLAN_GREEN_AP_MODE_NO_STA 1 /* PS if no sta connected */ +#define WLAN_GREEN_AP_MODE_NUM_STREAM 2 /* PS if 1x1 clients only connected */ + +/** + * struct wlan_green_ap_egap_params - enhance green ap params + * @fw_egap_support: fw enhance green ap support + * @host_enable_egap: HOST enhance green ap support + * @egap_inactivity_time: inactivity time + * @egap_wait_time: wait time + * @egap_feature_flags: feature flags + */ +struct wlan_green_ap_egap_params { + bool fw_egap_support; + bool host_enable_egap; + uint32_t egap_inactivity_time; + uint32_t egap_wait_time; + uint32_t egap_feature_flags; +}; + +/** + * struct wlan_green_ap_egap_status_info - enhance green ap params + * @status: egap status + * @mac_id: mac id + * @tx_chainmask: tx chainmask + * @rx_chainmask: rx chainmask + */ +struct wlan_green_ap_egap_status_info { + uint32_t status; + uint32_t mac_id; + uint32_t tx_chainmask; + uint32_t rx_chainmask; +}; + +/** + * wlan_green_ap_init() - initialize green ap component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_init(void); + +/** + * wlan_green_ap_deinit() - De-initialize green ap component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_deinit(void); + +/** + * wlan_green_ap_start() - Start green ap + * @pdev: pdev pointer + * + * Call this function when the first SAP comes up + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_start(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_stop() - Stop green ap + * @pdev: pdev pointer + * + * Call this function when the last SAP goes down + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_stop(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_pdev_open() - open component and update config params + * @pdev: pdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_add_sta() - On association + * @pdev: pdev pointer + * + * Call this function when new node is associated + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_add_sta(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_add_multistream_sta() - On association + * @pdev: pdev pointer + * + * Call this function when new multistream node is associated + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_add_multistream_sta(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_del_sta() - On disassociation + * @pdev: pdev pointer + * + * Call this function when new node is disassociated + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_del_sta(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_del_multistream_sta() - On disassociation + * @pdev: pdev pointer + * + * Call this function when new multistream node is disassociated + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_del_multistream_sta(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_is_ps_enabled() - is power save enabled + * @pdev: pdev pointer + * + * Check if power save is enabled in FW + * + * Return: Success or Failure + */ +bool wlan_green_ap_is_ps_enabled(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_suspend_handle() - handle driver suspend + * @pdev: pdev pointer + * + * Return: None + */ +void wlan_green_ap_suspend_handle(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_get_capab() - get lmac capability + * @pdev: pdev pointer + * + * Return: Appropriate status + */ +QDF_STATUS wlan_green_ap_get_capab(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_GREEN_AP_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..cb1992294998291d9223d5b01221fc17f0a55fe1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_ucfg_api.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains green ap north bound interface definitions + */ + +#ifndef _WLAN_GREEN_AP_UCFG_API_H_ +#define _WLAN_GREEN_AP_UCFG_API_H_ + +#include +#include +#include +#include "wlan_utility.h" + +/** + * ucfg_green_ap_enable_egap() - Enable enhanced green ap + * @pdev: pdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_enable_egap(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_green_ap_set_ps_config() - Set ps value + * @pdev: pdev pointer + * @value - value to be set + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_set_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t value); +/** + * ucfg_green_ap_get_ps_config() - Check if ps is enabled or not + * @pdev: pdev pointer + * @ps_enable: pointer to ps enable config value + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_get_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t *ps_enable); + +/** + * ucfg_green_ap_set_transition_time() - Set transition time + * @pdev: pdev pointer + * @val: transition time + * + * This API sets custom transition time + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_set_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t val); + +/** + * ucfg_green_ap_get_transition_time() - Get transition time + * @pdev: pdev pointer + * @ps_trans_time: pointer to transition time + * + * This API gets transition time + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_get_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t *ps_trans_time); + +/** + * ucfg_green_ap_config() - Config green AP + * @pdev: pdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_config(struct wlan_objmgr_pdev *pdev, uint8_t val); + +/** + * ucfg_green_ap_enable_debug_prints() - Enable debugs + * @pdev: pdev pointer + * + * Return: None + */ +void ucfg_green_ap_enable_debug_prints(struct wlan_objmgr_pdev *pdev, + uint32_t val); + +/** + * ucfg_green_ap_get_debug_prints() - Check if debug enabled + * @pdev: pdev pointer + * + * Return: Debug value + */ +bool ucfg_green_ap_get_debug_prints(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_GREEN_AP_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_api.c b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_api.c new file mode 100644 index 0000000000000000000000000000000000000000..7fa69dceb56d280913a824c993d1feda89216155 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_api.c @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains green ap north bound interface definitions + */ +#include +#include <../../core/src/wlan_green_ap_main_i.h> +#include +#include "cfg_green_ap_params.h" +#include "cfg_ucfg_api.h" + +QDF_STATUS wlan_green_ap_get_capab( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_EXISTS; + } + + if (green_ap_tx_ops->get_capab) + return green_ap_tx_ops->get_capab(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_green_ap_pdev_obj_create_notification() - called from objmgr when pdev + * is created + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being created and + * creates green ap context and attach it to objmgr. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_green_ap_pdev_obj_create_notification( + struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = qdf_mem_malloc(sizeof(*green_ap_ctx)); + if (!green_ap_ctx) + return QDF_STATUS_E_NOMEM; + + green_ap_ctx->ps_state = WLAN_GREEN_AP_PS_IDLE_STATE; + green_ap_ctx->ps_event = WLAN_GREEN_AP_PS_WAIT_EVENT; + green_ap_ctx->ps_mode = WLAN_GREEN_AP_MODE_NO_STA; + green_ap_ctx->num_nodes = 0; + green_ap_ctx->num_nodes_multistream = 0; + green_ap_ctx->ps_on_time = WLAN_GREEN_AP_PS_ON_TIME; + green_ap_ctx->ps_trans_time = WLAN_GREEN_AP_PS_TRANS_TIME; + + green_ap_ctx->pdev = pdev; + + qdf_timer_init(NULL, &green_ap_ctx->ps_timer, + wlan_green_ap_timer_fn, + pdev, QDF_TIMER_TYPE_WAKE_APPS); + + qdf_spinlock_create(&green_ap_ctx->lock); + if (wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_GREEN_AP, + green_ap_ctx, QDF_STATUS_SUCCESS) + != QDF_STATUS_SUCCESS) { + green_ap_err("Failed to attach green ap ctx in pdev ctx"); + status = QDF_STATUS_E_FAILURE; + goto err_pdev_attach; + } + + green_ap_info("Green AP creation successful, green ap ctx: %pK, pdev: %pK", + green_ap_ctx, pdev); + + return QDF_STATUS_SUCCESS; + +err_pdev_attach: + qdf_spinlock_destroy(&green_ap_ctx->lock); + qdf_timer_free(&green_ap_ctx->ps_timer); + qdf_mem_free(green_ap_ctx); + return status; +} + +/** + * wlan_green_ap_pdev_obj_destroy_notification() - called from objmgr when + * pdev is destroyed + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being destroyed + * and deletes green ap context and detach it from objmgr. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_green_ap_pdev_obj_destroy_notification( + struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context is already NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_info("Deleting green ap pdev obj, green ap ctx: %pK, pdev: %pK", + green_ap_ctx, pdev); + + if (wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_GREEN_AP, green_ap_ctx) != + QDF_STATUS_SUCCESS) { + green_ap_err("Failed to detach green ap ctx in psoc ctx"); + return QDF_STATUS_E_FAILURE; + } + + qdf_timer_free(&green_ap_ctx->ps_timer); + qdf_spinlock_destroy(&green_ap_ctx->lock); + + qdf_mem_free(green_ap_ctx); + green_ap_info("green ap deletion successful"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + green_ap_err("Failed to register green ap obj create handler"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + green_ap_err("Failed to register green ap obj destroy handler"); + goto err_pdev_delete; + } + + green_ap_info("Successfully registered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_create_notification, + NULL); +err_pdev_create: + return status; +} + +QDF_STATUS wlan_green_ap_deinit(void) +{ + if (wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_create_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_destroy_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + green_ap_info("Successfully unregistered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_objmgr_psoc *psoc; + + if (!pdev) { + green_ap_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + green_ap_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + green_ap_ctx->ps_enable = cfg_get(psoc, + CFG_ENABLE_GREEN_AP_FEATURE); + green_ap_ctx->egap_params.host_enable_egap = cfg_get(psoc, + CFG_ENABLE_EGAP_FEATURE); + green_ap_ctx->egap_params.egap_inactivity_time = cfg_get(psoc, + CFG_EGAP_INACT_TIME_FEATURE); + green_ap_ctx->egap_params.egap_wait_time = cfg_get(psoc, + CFG_EGAP_WAIT_TIME_FEATURE); + green_ap_ctx->egap_params.egap_feature_flags = cfg_get(psoc, + CFG_EGAP_FLAGS_FEATURE); + + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_start(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP start received"); + + /* Make sure the start function does not get called 2 times */ + qdf_spin_lock_bh(&green_ap_ctx->lock); + + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + + if (green_ap_ctx->ps_state == WLAN_GREEN_AP_PS_IDLE_STATE) { + if (green_ap_ctx->ps_enable) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_PS_START_EVENT); + } + } + + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_E_ALREADY; +} + +QDF_STATUS wlan_green_ap_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP stop received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + + /* Delete the timer just to be sure */ + qdf_timer_stop(&green_ap_ctx->ps_timer); + + /* Disable the power save */ + green_ap_ctx->ps_enable = WLAN_GREEN_AP_PS_DISABLE; + + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_PS_STOP_EVENT); +} + +QDF_STATUS wlan_green_ap_add_sta(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP add sta received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_ADD_STA_EVENT); +} + +QDF_STATUS wlan_green_ap_add_multistream_sta(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP add multistream sta received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_ADD_MULTISTREAM_STA_EVENT); +} + +QDF_STATUS wlan_green_ap_del_sta(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP del sta received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_info("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_DEL_STA_EVENT); +} + +QDF_STATUS wlan_green_ap_del_multistream_sta(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP del multistream sta received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_info("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_DEL_MULTISTREAM_STA_EVENT); +} + +bool wlan_green_ap_is_ps_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if ((green_ap_ctx->ps_state == WLAN_GREEN_AP_PS_ON_STATE) && + (green_ap_ctx->ps_enable)) + return true; + + return false; + +} + +void wlan_green_ap_suspend_handle(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return; + } + + wlan_green_ap_stop(pdev); + + green_ap_ctx->ps_enable = WLAN_GREEN_AP_PS_SUSPEND; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..5d84587eb11f64aa4824721bff91cad29da2b02a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_ucfg_api.c @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains green ap north bound interface definitions + */ + +#include +#include +#include <../../core/src/wlan_green_ap_main_i.h> + +QDF_STATUS ucfg_green_ap_enable_egap(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_tx_ops->enable_egap) { + green_ap_err("tx op for sending enbale/disable green ap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + return green_ap_tx_ops->enable_egap(pdev, &green_ap_ctx->egap_params); +} + +QDF_STATUS ucfg_green_ap_set_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t value) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + green_ap_ctx->ps_enable = value; + if (value == WLAN_GREEN_AP_MODE_NUM_STREAM) + green_ap_ctx->ps_mode = WLAN_GREEN_AP_MODE_NUM_STREAM; + else + green_ap_ctx->ps_mode = WLAN_GREEN_AP_MODE_NO_STA; + + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_get_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t *ps_enable) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + *ps_enable = green_ap_ctx->ps_enable; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_set_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t val) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + green_ap_ctx->ps_trans_time = val; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_get_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t *ps_trans_time) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + *ps_trans_time = green_ap_ctx->ps_trans_time; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_config(struct wlan_objmgr_pdev *pdev, uint8_t val) +{ + + uint8_t flag; + + if (wlan_green_ap_get_capab(pdev) == QDF_STATUS_E_NOSUPPORT) { + green_ap_err("GreenAP not supported on radio\n"); + return QDF_STATUS_E_NOSUPPORT; + } + + if (val) { + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + wlan_objmgr_pdev_iterate_obj_list(pdev, + WLAN_VDEV_OP, + wlan_green_ap_check_mode, + &flag, 0, WLAN_GREEN_AP_ID); + if (flag == 1) { + green_ap_err("Radio not in AP mode." + "Feature not supported"); + return QDF_STATUS_E_NOSUPPORT; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_NOSUPPORT; + } + + ucfg_green_ap_set_ps_config(pdev, val); + + if (wlan_util_is_vdev_active(pdev, WLAN_GREEN_AP_ID) == + QDF_STATUS_SUCCESS) + wlan_green_ap_start(pdev); + } else { + wlan_green_ap_stop(pdev); + } + + return QDF_STATUS_SUCCESS; +} + +void ucfg_green_ap_enable_debug_prints(struct wlan_objmgr_pdev *pdev, + uint32_t val) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return; + } + + green_ap_ctx->dbg_enable = val; +} + +bool ucfg_green_ap_get_debug_prints(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return false; + } + + return green_ap_ctx->dbg_enable; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_mlme_cmn.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_mlme_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..3991f6aee8275130b7ae7234e0fd50f7362e47b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_mlme_cmn.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define Common MLME structure and APIs + */ +#ifndef _WLAN_MLME_CMN_H_ +#define _WLAN_MLME_CMN_H_ + +#include +#include +#include + +/** + * struct vdev_mlme_ext_ops - VDEV MLME legacy callbacks structure + * @mlme_psoc_ext_hdl_create: callback to invoke creation of legacy + * psoc object + * @mlme_psoc_ext_hdl_destroy: callback to invoke destroy of legacy + * psoc object + * @mlme_pdev_ext_hdl_create: callback to invoke creation of legacy + * pdev object + * @mlme_pdev_ext_hdl_destroy: callback to invoke destroy of legacy + * pdev object + * @mlme_vdev_ext_hdl_create: callback to invoke creation of legacy + * vdev object + * @mlme_vdev_ext_hdl_post_create: callback to invoke post creation actions + * of legacy vdev object + * @mlme_vdev_ext_hdl_destroy: callback to invoke destroy of legacy + * vdev object + * @mlme_vdev_start_fw_send: callback to invoke vdev start command + * @mlme_vdev_stop_fw_send: callback to invoke vdev stop command + * @mlme_vdev_down_fw_send: callback to invoke vdev down command + * @mlme_multivdev_restart_fw_send: callback to invoke multivdev restart + * command + * @mlme_vdev_enqueue_exp_cmd: callback to enqueue exception command + * required by serialization + * @mlme_multi_vdev_restart_resp: callback to process multivdev restart + * response + */ +struct mlme_ext_ops { + QDF_STATUS (*mlme_psoc_ext_hdl_create)( + struct psoc_mlme_obj *psoc_mlme); + QDF_STATUS (*mlme_psoc_ext_hdl_destroy)( + struct psoc_mlme_obj *pdev_mlme); + QDF_STATUS (*mlme_pdev_ext_hdl_create)( + struct pdev_mlme_obj *pdev_mlme); + QDF_STATUS (*mlme_pdev_ext_hdl_destroy)( + struct pdev_mlme_obj *pdev_mlme); + QDF_STATUS (*mlme_vdev_ext_hdl_create)( + struct vdev_mlme_obj *vdev_mlme); + QDF_STATUS (*mlme_vdev_ext_hdl_post_create)( + struct vdev_mlme_obj *vdev_mlme); + QDF_STATUS (*mlme_vdev_ext_hdl_destroy)( + struct vdev_mlme_obj *vdev_mlme); + QDF_STATUS (*mlme_vdev_start_fw_send)( + struct wlan_objmgr_vdev *vdev, uint8_t restart); + QDF_STATUS (*mlme_vdev_stop_fw_send)(struct wlan_objmgr_vdev *vdev); + QDF_STATUS (*mlme_vdev_down_fw_send)(struct wlan_objmgr_vdev *vdev); + QDF_STATUS (*mlme_multivdev_restart_fw_send)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_vdev_enqueue_exp_cmd)( + struct vdev_mlme_obj *vdev_mlme, + uint8_t cmd_type); + QDF_STATUS (*mlme_vdev_ext_delete_rsp)( + struct wlan_objmgr_psoc *psoc, + struct vdev_delete_response *rsp); + QDF_STATUS (*mlme_multi_vdev_restart_resp)( + struct wlan_objmgr_psoc *psoc, + struct multi_vdev_restart_resp *resp); +}; + +/** + * mlme_psoc_ops_ext_hdl_create() - Alloc PSOC mlme ext handle + * @psoc_mlme: PSOC MLME comp object + * + * API to allocate PSOC MLME ext handle + * + * Return: SUCCESS on successful allocation + * Else FAILURE + */ +QDF_STATUS mlme_psoc_ops_ext_hdl_create(struct psoc_mlme_obj *psoc_mlme); + +/** + * mlme_psoc_ops_ext_hdl_destroy() - Destroy PSOC mlme ext handle + * @psoc_mlme: PSOC MLME comp object + * + * API to free psoc MLME ext handle + * + * Return: SUCCESS on successful free + * Else FAILURE + */ +QDF_STATUS mlme_psoc_ops_ext_hdl_destroy(struct psoc_mlme_obj *psoc_mlme); + +/** + * mlme_pdev_ops_ext_hdl_create - Alloc PDEV mlme ext handle + * @pdev_mlme_obj: PDEV MLME comp object + * + * API to allocate PDEV MLME ext handle + * + * Return: SUCCESS on successful allocation + * Else FAILURE + */ +QDF_STATUS mlme_pdev_ops_ext_hdl_create(struct pdev_mlme_obj *pdev_mlme); + +/** + * mlme_pdev_ops_ext_hdl_destroy - Destroy PDEV mlme ext handle + * @pdev_mlme_obj: PDEV MLME comp object + * + * API to free pdev MLME ext handle + * + * Return: SUCCESS on successful free + * Else FAILURE + */ +QDF_STATUS mlme_pdev_ops_ext_hdl_destroy(struct pdev_mlme_obj *pdev_mlme); + +/** + * mlme_vdev_ops_ext_hdl_create - Alloc VDEV mlme ext handle + * @vdev_mlme_obj: VDEV MLME comp object + * + * API to allocate VDEV MLME ext handle + * + * Return: SUCCESS on successful allocation + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_ext_hdl_create(struct vdev_mlme_obj *vdev_mlme); + +/** + * mlme_vdev_ops_ext_hdl_post_create - Perform post VDEV mlme ext handle alloc + * operations + * @vdev_mlme_obj: VDEV MLME comp object + * + * API to perform post vdev MLME ext handle allocation operations + * + * Return: SUCCESS on initialization successful + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_ext_hdl_post_create(struct vdev_mlme_obj *vdev_mlme); + +/** + * mlme_vdev_ops_ext_hdl_destroy - Destroy VDEV mlme ext handle + * @vdev_mlme_obj: VDEV MLME comp object + * + * API to free vdev MLME ext handle + * + * Return: SUCCESS on successful free + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_ext_hdl_destroy(struct vdev_mlme_obj *vdev_mlme); + +/** + * mlme_vdev_enqueue_exp_ser_cmd - Enqueue exception serialization cmd + * @vdev_mlme_obj: VDEV MLME comp object + * @cmd_type: Serialization command type + * + * API to enqueue the exception serialization command, used by + * mlme-serialization wrapper layer + * + * Return: SUCCESS on successful enqueuing the command + * Else FAILURE + */ +QDF_STATUS mlme_vdev_enqueue_exp_ser_cmd(struct vdev_mlme_obj *vdev_mlme, + uint8_t cmd_type); + +/** + * mlme_vdev_ops_start_fw_send - Send WMI START/RESTART commmand to FW + * @vdev: VDEV object + * + * API to send WMI start/restart command to FW + * + * Return: SUCCESS on successful sending the command + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_start_fw_send(struct wlan_objmgr_vdev *vdev, + uint8_t restart); + +/** + * mlme_vdev_ops_multivdev_restart_fw_cmd_send - Send WMI Multivdev restart + * commmand to FW + * @pdev: PDEV object + * + * API to send WMI multivdev restart command to FW + * + * Return: SUCCESS on successful sending the command + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_multivdev_restart_fw_cmd_send( + struct wlan_objmgr_pdev *pdev); + +/** + * mlme_vdev_ops_stop_fw_send - Send WMI STOP commmand to FW + * @vdev: VDEV object + * + * API to send WMI stop command to FW + * + * Return: SUCCESS on successful sending the command + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_stop_fw_send(struct wlan_objmgr_vdev *vdev); + +/** + * mlme_vdev_ops_down_fw_send - Send WMI Down commmand to FW + * @vdev: VDEV object + * + * API to send WMI down command to FW + * + * Return: SUCCESS on successful sending the command + * Else FAILURE + */ +QDF_STATUS mlme_vdev_ops_down_fw_send(struct wlan_objmgr_vdev *vdev); + +/* + * mlme_vdev_ops_ext_hdl_multivdev_restart_resp() - Handler multivdev restart + * response event + * @psoc: PSOC object manager handle + * @resp: Restart response event + * + * Return: Success on successful handling of the response event, + * Else failure + */ +QDF_STATUS mlme_vdev_ops_ext_hdl_multivdev_restart_resp( + struct wlan_objmgr_psoc *psoc, + struct multi_vdev_restart_resp *resp); + +/** + * mlme_set_ops_register_cb - Sets ops registration callback + * @ops_cb: Function pointer + * + * API to set ops registration call back + * + * Return: void + */ +typedef struct mlme_ext_ops *(*mlme_get_global_ops_cb)(void); +void mlme_set_ops_register_cb(mlme_get_global_ops_cb ops_cb); + +/** + * wlan_cmn_mlme_init - Initializes MLME component + * + * Registers callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful registration + * FAILURE, if registration fails + */ +QDF_STATUS wlan_cmn_mlme_init(void); + +/** + * wlan_cmn_mlme_deinit - Uninitializes MLME component + * + * Unregisters callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful registration + * FAILURE, if registration fails + */ +QDF_STATUS wlan_cmn_mlme_deinit(void); + +/** + * mlme_vdev_ops_ext_hdl_delete_rsp - Vdev Delete response ext handler + * @psoc: PSOC object + * @rsp: Vdev delete response received from the firmware + * + * API to invoke the legacy delete response handler for legacy cleanup + * + * Return: SUCCESS on successful deletion + * FAILURE, if deletion fails + */ +QDF_STATUS mlme_vdev_ops_ext_hdl_delete_rsp(struct wlan_objmgr_psoc *psoc, + struct vdev_delete_response *rsp); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_pdev_mlme.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_pdev_mlme.h new file mode 100644 index 0000000000000000000000000000000000000000..6f3c68ffeb1fc027834e50cb0a8b56f86e312ef0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_pdev_mlme.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define PDEV MLME structure and APIs + */ +#ifndef _WLAN_PDEV_MLME_H_ +#define _WLAN_PDEV_MLME_H_ + +#include +#include +#include + +/* + * struct pdev_restart_attr - Pdev restart attributes + * @vdev: vdev on which the pdev restart cmd was enqueued + * @restart_bmap: Bitmap for vdev requesting multivdev restart + */ +struct pdev_restart_attr { + struct wlan_objmgr_vdev *vdev; + unsigned long restart_bmap[2]; +}; + +/** + * struct pdev_mlme_obj - PDEV MLME component object + * @pdev: PDEV object + * @ext_pdev_ptr: PDEV MLME legacy pointer + * @mlme_register_ops: Call back to register MLME legacy APIs + * @vdev_restart_lock: Lock for VDEVs restart + * @restart_req_timer: Timer handle for VDEVs restart + * @restart_pend_vdev_bmap: Bitmap for VDEV RESTART command pending + * @restart_send_vdev_bmap: Bitmap for VDEV RESTART command sending + * @start_send_vdev_arr: Bitmap for VDEV START command sending + */ +struct pdev_mlme_obj { + struct wlan_objmgr_pdev *pdev; + mlme_pdev_ext_t *ext_pdev_ptr; + QDF_STATUS (*mlme_register_ops)(struct vdev_mlme_obj *vdev_mlme); + qdf_spinlock_t vdev_restart_lock; + qdf_timer_t restart_req_timer; + unsigned long restart_pend_vdev_bmap[2]; + unsigned long restart_send_vdev_bmap[2]; + unsigned long start_send_vdev_arr[2]; + struct pdev_restart_attr pdev_restart; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_psoc_mlme.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_psoc_mlme.h new file mode 100644 index 0000000000000000000000000000000000000000..c7ca931ca5d5a1ee125c92904b5728e59ba170c1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_psoc_mlme.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define PSOC MLME structure + */ +#ifndef _WLAN_PSOC_MLME_H_ +#define _WLAN_PSOC_MLME_H_ + +#include +#include +#include +#ifdef FEATURE_VDEV_RSP_WAKELOCK +#include +#endif + +/* Max RNR size given max vaps are 16 */ +#define MAX_RNR_SIZE 256 + +/** + * struct wlan_rnr_global_cache - RNR cache buffer per soc + * @rnr_buf: RNR cache buffer + * @rnr_cnt: Count of APs in cache + * @rnr_size: Size of RNR cache (RNR IE) + */ +struct wlan_6ghz_rnr_global_cache { + char rnr_buf[MAX_RNR_SIZE]; + int rnr_cnt; + uint16_t rnr_size; +}; + +/** + * struct psoc_mlme_obj - PSoC MLME component object + * @psoc: PSoC object + * @ext_psoc_ptr: PSoC legacy pointer + * @psoc_vdev_rt: PSoC Vdev response timer + * @psoc_mlme_wakelock: Wakelock to prevent system going to suspend + * @rnr_6ghz_cache: Cache of 6Ghz vap in RNR ie format + */ +struct psoc_mlme_obj { + struct wlan_objmgr_psoc *psoc; + mlme_psoc_ext_t *ext_psoc_ptr; + struct vdev_response_timer psoc_vdev_rt[WLAN_UMAC_PSOC_MAX_VDEVS]; +#ifdef FEATURE_VDEV_RSP_WAKELOCK + struct psoc_mlme_wakelock psoc_mlme_wakelock; +#endif + struct wlan_6ghz_rnr_global_cache rnr_6ghz_cache; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_vdev_mlme.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_vdev_mlme.h new file mode 100644 index 0000000000000000000000000000000000000000..5bf922d9c99e89effa2467245e40be1db5d8e08c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/include/wlan_vdev_mlme.h @@ -0,0 +1,885 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define VDEV MLME structure and APIs + */ +#ifndef _WLAN_VDEV_MLME_H_ +#define _WLAN_VDEV_MLME_H_ + +#include +#include +#include +#include + +struct vdev_mlme_obj; + +/* Requestor ID for multiple vdev restart */ +#define MULTIPLE_VDEV_RESTART_REQ_ID 0x1234 + +/* values for vdev_type */ +#define WLAN_VDEV_MLME_TYPE_UNKNOWN 0x0 +#define WLAN_VDEV_MLME_TYPE_AP 0x1 +#define WLAN_VDEV_MLME_TYPE_STA 0x2 +#define WLAN_VDEV_MLME_TYPE_IBSS 0x3 +#define WLAN_VDEV_MLME_TYPE_MONITOR 0x4 +#define WLAN_VDEV_MLME_TYPE_NAN 0x5 +#define WLAN_VDEV_MLME_TYPE_OCB 0x6 +#define WLAN_VDEV_MLME_TYPE_NDI 0x7 + +/* values for vdev_subtype */ +#define WLAN_VDEV_MLME_SUBTYPE_UNKNOWN 0x0 +#define WLAN_VDEV_MLME_SUBTYPE_P2P_DEVICE 0x1 +#define WLAN_VDEV_MLME_SUBTYPE_P2P_CLIENT 0x2 +#define WLAN_VDEV_MLME_SUBTYPE_P2P_GO 0x3 +#define WLAN_VDEV_MLME_SUBTYPE_PROXY_STA 0x4 +#define WLAN_VDEV_MLME_SUBTYPE_MESH 0x5 +#define WLAN_VDEV_MLME_SUBTYPE_MESH_11S 0x6 +#define WLAN_VDEV_MLME_SUBTYPE_SMART_MONITOR 0x7 + +/* vdev control flags (per bits) */ +#define WLAN_VDEV_MLME_FLAGS_NON_MBSSID_AP 0x00000001 +#define WLAN_VDEV_MLME_FLAGS_TRANSMIT_AP 0x00000002 +#define WLAN_VDEV_MLME_FLAGS_NON_TRANSMIT_AP 0x00000004 + +/** + * struct vdev_mlme_proto_generic - generic mlme proto structure + * sent in frames + * @dtim_period: frequency of data transmissions per beacon 1-255 + * @slot_time: slot time + * @protection_mode: rts cts protection mode + * @beacon_interval: beacon interval + * @ldpc: low density parity check value + * @nss: number of spatial stream + * @nss_2g: 2.4GHz number of spatial stream + * @nss_5g: 5GHz number of spatial stream + * @tsfadjust: adjusted timer sync value + */ +struct vdev_mlme_proto_generic { + uint8_t dtim_period; + uint32_t slot_time; + uint32_t protection_mode; + uint16_t beacon_interval; + uint8_t ldpc; + uint8_t nss; + uint8_t nss_2g; + uint8_t nss_5g; + uint64_t tsfadjust; +}; + +/** + * struct vdev_mlme_proto_ap - ap specific mlme protocol + * @. + */ +struct vdev_mlme_proto_ap { +}; + +/** + * struct vdev_mlme_proto_sta - sta specific mlme protocol + * @assoc_id: association id of station + * @uapsd_cfg: uapsd configuration + */ +struct vdev_mlme_proto_sta { + uint16_t assoc_id; + uint16_t uapsd_cfg; +}; + +/** + * struct vdev_mlme_proto_bss_color - bss color cfg + * @flags: proposed for future use cases, currently not used. + * @evt_type: bss color collision event. + * @current_bss_color: current bss color. + * @detection_period_ms: scan interval for both AP and STA mode. + * @scan_period_ms: scan period for passive scan to detect collision. + * @free_slot_expiry_time_ms: FW to notify host at timer expiry after + * which Host will disable the bss color. + */ +struct vdev_mlme_proto_bss_color { + uint32_t flags; + uint8_t evt_type; + uint32_t current_bss_color; + uint32_t detection_period_ms; + uint32_t scan_period_ms; + uint32_t free_slot_expiry_time_ms; +}; + +/** + * struct vdev_mlme_vht_info - vdev VHT information + * @caps: vht capabilities + * @subfer: su beam former capability + * @subfee: su beam formee capability + * @mubfer: mu beam former capability + * @mubfee: mu beam formee capability + * @implicit_bf: Implicit BF support + * @sounding_dimension: Beamformer number of sounding dimension + * @bfee_sts_cap: beam formee STA capability + * @allow_vht: vht capability status + */ +struct vdev_mlme_vht_info { + uint32_t caps; + uint8_t subfer; + uint8_t mubfer; + uint8_t subfee; + uint8_t mubfee; + uint8_t implicit_bf; + uint8_t sounding_dimension; + uint8_t bfee_sts_cap; + bool allow_vht; +}; + +/** + * struct vdev_mlme_ht_info - vdev HT information + * @ht_caps: HT capabilities + * @allow_ht: HT capability status + */ +struct vdev_mlme_ht_info { + uint32_t ht_caps; + bool allow_ht; +}; + +/** + * struct vdev_mlme_he_ops_info - vdev mlme HEOPS information + * @he_ops: he ops + */ +struct vdev_mlme_he_ops_info { + uint32_t he_ops; +}; + +/** + * struct vdev_mlme_he_ops_info - vdev protocol structure holding information + * that is used in frames + * @generic: generic protocol information + * @ap: ap specific protocol information + * @sta: sta specific protocol information + * @vht_info: vht information + * @ht_info: ht capabilities information + * @he_ops_info: he ops information + * @bss_color: 11ax HE BSS Color information + */ +struct vdev_mlme_proto { + struct vdev_mlme_proto_generic generic; + struct vdev_mlme_proto_ap ap; + struct vdev_mlme_proto_sta sta; + struct vdev_mlme_vht_info vht_info; + struct vdev_mlme_ht_info ht_info; + struct vdev_mlme_he_ops_info he_ops_info; + struct vdev_mlme_proto_bss_color bss_color; +}; + +/** + * struct vdev_mlme_mgmt_generic - generic vdev mlme mgmt cfg + * @rts_threshold: RTS threshold + * @frag_threshold: Fragmentation threshold + * @probe_delay: time in msec for delaying to send first probe request + * @repeat_probe_time: probe request transmission time + * @drop_unencry: drop unencrypted status + * @ tx_pwrlimit: Tx power limit + * @tx_power: Tx power + * @minpower: Min power + * @maxpower: Max power + * @maxregpower: max regulatory power + * @antennamax: max antenna + * @reg_class_id: reg domain class id + * @ampdu: ampdu limit + * @amsdu: amsdu limit + * @ssid: service set identifier + * @ssid_len: ssid length + * @type: vdev type + * @sub_type: vdev subtype + * @rx_decap_type: rx decap type + * @tx_encap_type: tx encap type + * @disable_hw_ack: disable ha ack flag + * @bssid: bssid + * @phy_mode: phy mode + */ +struct vdev_mlme_mgmt_generic { + uint32_t rts_threshold; + uint32_t frag_threshold; + uint32_t probe_delay; + uint32_t repeat_probe_time; + uint32_t drop_unencry; + uint32_t tx_pwrlimit; + uint8_t tx_power; + uint8_t minpower; + uint8_t maxpower; + uint8_t maxregpower; + uint8_t antennamax; + uint8_t reg_class_id; + uint8_t ampdu; + uint8_t amsdu; + char ssid[WLAN_SSID_MAX_LEN + 1]; + uint8_t ssid_len; + uint8_t type; + uint8_t subtype; + uint8_t rx_decap_type; + uint8_t tx_encap_type; + bool disable_hw_ack; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; + uint32_t phy_mode; +}; + +/** + * struct vdev_mlme_mgmt_ap - ap specific vdev mlme mgmt cfg + * @hidden_ssid: flag to indicate whether it is hidden ssid + * @cac_duration_ms: cac duration in millseconds + */ +struct vdev_mlme_mgmt_ap { + bool hidden_ssid; + uint32_t cac_duration_ms; +}; + +/** + * struct vdev_mlme_mgmt_sta - sta specific vdev mlme mgmt cfg + * @. + */ +struct vdev_mlme_mgmt_sta { +}; + +/** + * struct vdev_mlme_inactivity_params - vdev mlme inactivity parameters + * @bmiss_first_bcnt: bmiss first time + * @bmiss_final_bcnt: bmiss final time + * @keepalive_min_idle_inactive_time_secs: min time AP consider STA to be + * inactive + * @keepalive_max_idle_inactive_time_secs: max inactive idle time for AP to send + * data-null + * @keepalive_max_unresponsive_time_secs: max time to send WMI_STA_KICKOUT + */ +struct vdev_mlme_inactivity_params { + uint32_t bmiss_first_bcnt; + uint32_t bmiss_final_bcnt; + uint32_t keepalive_min_idle_inactive_time_secs; + uint32_t keepalive_max_idle_inactive_time_secs; + uint32_t keepalive_max_unresponsive_time_secs; +}; + +/** + * struct vdev_mlme_rate_info - vdev mlme rate information + * @rate_flags: dynamic bandwidth info + * @per_band_tx_mgmt_rate: per band Tx mgmt rate + * @max_rate: max bandwidth rate + * @tx_mgmt_rate: Tx Mgmt rate + * @bcn_tx_rate: beacon Tx rate + * @bcn_tx_rate_code: beacon Tx rate code + * @type: Type of ratemask configuration + * @lower32: Lower 32 bits in the 1st 64-bit value + * @higher32: Higher 32 bits in the 1st 64-bit value + * @lower32_2: Lower 32 bits in the 2nd 64-bit value + * @half_rate: Half rate + * @quarter_rate: quarter rate + */ +struct vdev_mlme_rate_info { + uint32_t rate_flags; + uint32_t per_band_tx_mgmt_rate; + uint32_t max_rate; + uint32_t tx_mgmt_rate; + uint32_t bcn_tx_rate; +#ifdef WLAN_BCN_RATECODE_ENABLE + uint32_t bcn_tx_rate_code; +#endif + uint8_t type; + uint32_t lower32; + uint32_t higher32; + uint32_t lower32_2; + bool half_rate; + bool quarter_rate; +}; + +/** + * struct vdev_mlme_chainmask_info - vdev mlme chainmask information + * @tx_chainmask: Tx chainmask + * @rx_chainmask: Rx Chainmask + * @num_rx_chain: Num of bits set in Rx chain + * @num_tx_chain: Num of bits set in Tx chain + */ +struct vdev_mlme_chainmask_info { + uint8_t tx_chainmask; + uint8_t rx_chainmask; + uint8_t num_rx_chain; + uint8_t num_tx_chain; +}; + +/** + * struct vdev_mlme_powersave_info - vdev mlme powersave information + * @packet_powersave: packet powersave + * @max_li_of_moddtim: max mod dtim + * @dyndtim_cnt: dynamic dtim count + * @listen_interval: listen interval + * @moddtim_cnt: mod dtim count + */ +struct vdev_mlme_powersave_info { + uint32_t packet_powersave; + uint32_t max_li_of_moddtim; + uint32_t dyndtim_cnt; + uint32_t listen_interval; + uint32_t moddtim_cnt; +}; + +/** + * struct vdev_mlme_beacon_info - vdev mlme beacon information + * @beacon_buffer: buffer allocated for beacon frame + * @beacon_offsets: beacon IE's offsets + */ +struct vdev_mlme_beacon_info { + qdf_nbuf_t beacon_buffer; + void *beacon_offsets; +}; + +/** + * struct vdev_mlme_mbss_11ax - mbss 11ax fields required for up cmd + * @profile_idx: profile index of the connected non-trans ap (mbssid case). + * 0 means invalid. + * @profile_num: the total profile numbers of non-trans aps (mbssid + * case). + * 0 means non-MBSS AP. + * @mbssid-flags: MBSS IE flags indicating vdev type + * @vdevid_trans: id of transmitting vdev for MBSS IE + * @trans_bssid: bssid of transmitted AP (MBSS IE case) + */ +struct vdev_mlme_mbss_11ax { + uint32_t profile_idx; + uint32_t profile_num; + uint32_t mbssid_flags; + uint8_t vdevid_trans; + uint8_t trans_bssid[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct vdev_mlme_mgmt - vdev mlme mgmt related cfg + * @generic: generic mgmt information + * @ap: ap specific mgmt information + * @sta: sta specific mgmt information + * @inactivity_params: inactivity parameters + * @rate_info: bandwidth rate information + * @chainmask_info: Chainmask information + * @powersave_info: Power save parameters + * @beacon_info: beacon buffer information + * @mbss_11ax: MBSS 11ax information + */ +struct vdev_mlme_mgmt { + struct vdev_mlme_mgmt_generic generic; + struct vdev_mlme_mgmt_ap ap; + struct vdev_mlme_mgmt_sta sta; + struct vdev_mlme_inactivity_params inactivity_params; + struct vdev_mlme_rate_info rate_info; + struct vdev_mlme_chainmask_info chainmask_info; + struct vdev_mlme_powersave_info powersave_info; + struct vdev_mlme_beacon_info beacon_info; + struct vdev_mlme_mbss_11ax mbss_11ax; +}; + +/** + * enum beacon_update_op - Beacon update op type + * @BEACON_INIT: Initialize beacon + * @BEACON_REINIT: Re-initialize beacon + * @BEACON_UPDATE: Update dynamic fields of beacon + * @BEACON_CSA: Enable CSA IE + * @BEACON_FREE: Beacon buffer free + */ +enum beacon_update_op { + BEACON_INIT, + BEACON_REINIT, + BEACON_UPDATE, + BEACON_CSA, + BEACON_FREE, +}; + +/** + * enum vdev_cmd_type - Command type + * @START_REQ: Start request + * @RESTART_REQ: Restart request + * @STOP_REQ: STOP request + * @DELETE_REQ: DELETE request + */ +enum vdev_cmd_type { + START_REQ, + RESTART_REQ, + STOP_REQ, + DELETE_REQ, +}; + +/** + * enum vdev_start_resp_type - start respone type + * @START_RESPONSE: Start response + * @RESTART_RESPONSE: Restart response + */ +enum vdev_start_resp_type { + START_RESPONSE = 0, + RESTART_RESPONSE, +}; + +/** + * struct vdev_mlme_ops - VDEV MLME operation callbacks structure + * @mlme_vdev_validate_basic_params: callback to validate VDEV basic params + * @mlme_vdev_reset_proto_params: callback to Reset protocol params + * @mlme_vdev_start_send: callback to initiate actions of VDEV + * MLME start operation + * @mlme_vdev_restart_send: callback to initiate actions of VDEV + * MLME restart operation + * @mlme_vdev_stop_start_send: callback to block start/restart VDEV + * request command + * @mlme_vdev_start_continue: callback to initiate operations on + * LMAC/FW start response + * @mlme_vdev_up_send: callback to initiate actions of VDEV + * MLME up operation + * @mlme_vdev_notify_up_complete: callback to notify VDEV MLME on moving + * to UP state + * @mlme_vdev_notify_roam_start: callback to initiate roaming + * @mlme_vdev_update_beacon: callback to initiate beacon update + * @mlme_vdev_disconnect_peers: callback to initiate disconnection of + * peers + * @mlme_vdev_dfs_cac_timer_stop: callback to stop the DFS CAC timer + * @mlme_vdev_stop_send: callback to initiate actions of VDEV + * MLME stop operation + * @mlme_vdev_stop_continue: callback to initiate operations on + * LMAC/FW stop response + * @mlme_vdev_bss_peer_delete_continue: callback to initiate operations on BSS + * peer delete completion + * @mlme_vdev_down_send: callback to initiate actions of VDEV + * MLME down operation + * @mlme_vdev_notify_start_state_exit: callback to notify on vdev start + * start state exit + * @mlme_vdev_is_newchan_no_cac: callback to check CAC is required + * @mlme_vdev_ext_peer_delete_all_rsp: callback to initiate actions for + * vdev mlme peer delete all response + */ +struct vdev_mlme_ops { + QDF_STATUS (*mlme_vdev_validate_basic_params)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_reset_proto_params)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_start_send)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_restart_send)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_stop_start_send)( + struct vdev_mlme_obj *vdev_mlme, + enum vdev_cmd_type type, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_start_continue)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_sta_conn_start)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_start_req_failed)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_up_send)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_notify_up_complete)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_notify_roam_start)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_update_beacon)( + struct vdev_mlme_obj *vdev_mlme, + enum beacon_update_op op, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_disconnect_peers)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_dfs_cac_timer_stop)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_stop_send)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_stop_continue)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_down_send)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_notify_down_complete)( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data); + QDF_STATUS (*mlme_vdev_ext_stop_rsp)( + struct vdev_mlme_obj *vdev_mlme, + struct vdev_stop_response *rsp); + QDF_STATUS (*mlme_vdev_ext_start_rsp)( + struct vdev_mlme_obj *vdev_mlme, + struct vdev_start_response *rsp); + QDF_STATUS (*mlme_vdev_notify_start_state_exit)( + struct vdev_mlme_obj *vdev_mlme); + QDF_STATUS (*mlme_vdev_is_newchan_no_cac)( + struct vdev_mlme_obj *vdev_mlme); + QDF_STATUS (*mlme_vdev_ext_peer_delete_all_rsp)( + struct vdev_mlme_obj *vdev_mlme, + struct peer_delete_all_response *rsp); +}; + +/** + * struct vdev_mlme_obj - VDEV MLME component object + * @proto: VDEV MLME proto substructure + * @mgmt: VDEV MLME mgmt substructure + * @sm_lock: VDEV SM lock + * @vdev_cmd_lock: VDEV MLME command atomicity + * @sm_hdl: VDEV SM handle + * @vdev: Pointer to vdev objmgr + * @ops: VDEV MLME callback table + * @ext_vdev_ptr: VDEV MLME legacy pointer + * @vdev_rt: VDEV response timer + * @vdev_wakelock: vdev wakelock sub structure + */ +struct vdev_mlme_obj { + struct vdev_mlme_proto proto; + struct vdev_mlme_mgmt mgmt; +#ifdef VDEV_SM_LOCK_SUPPORT + qdf_spinlock_t sm_lock; + qdf_mutex_t vdev_cmd_lock; +#endif + struct wlan_sm *sm_hdl; + struct wlan_objmgr_vdev *vdev; + struct vdev_mlme_ops *ops; + mlme_vdev_ext_t *ext_vdev_ptr; +}; + +/** + * wlan_vdev_mlme_set_ssid() - set ssid + * @vdev: VDEV object + * @ssid: SSID (input) + * @ssid_len: Length of SSID + * + * API to set the SSID of VDEV + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: SUCCESS, if update is done + * FAILURE, if ssid length is > max ssid len + */ +static inline QDF_STATUS wlan_vdev_mlme_set_ssid( + struct wlan_objmgr_vdev *vdev, + const uint8_t *ssid, uint8_t ssid_len) +{ + struct vdev_mlme_obj *vdev_mlme; + + /* This API is invoked with lock acquired, do not add log prints */ + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return QDF_STATUS_E_FAILURE; + + if (ssid_len <= WLAN_SSID_MAX_LEN) { + qdf_mem_copy(vdev_mlme->mgmt.generic.ssid, ssid, ssid_len); + vdev_mlme->mgmt.generic.ssid_len = ssid_len; + } else { + vdev_mlme->mgmt.generic.ssid_len = 0; + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_vdev_mlme_get_ssid() - get ssid + * @vdev: VDEV object + * @ssid: SSID + * @ssid_len: Length of SSID + * + * API to get the SSID of VDEV, it updates the SSID and its length + * in @ssid, @ssid_len respectively + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: SUCCESS, if update is done + * FAILURE, if ssid length is > max ssid len + */ +static inline QDF_STATUS wlan_vdev_mlme_get_ssid( + struct wlan_objmgr_vdev *vdev, + uint8_t *ssid, uint8_t *ssid_len) +{ + struct vdev_mlme_obj *vdev_mlme; + + /* This API is invoked with lock acquired, do not add log prints */ + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return QDF_STATUS_E_FAILURE; + + if (vdev_mlme->mgmt.generic.ssid_len > 0) { + *ssid_len = vdev_mlme->mgmt.generic.ssid_len; + qdf_mem_copy(ssid, vdev_mlme->mgmt.generic.ssid, *ssid_len); + } else { + *ssid_len = 0; + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_vdev_mlme_set_nss() - set NSS + * @vdev: VDEV object + * @nss: nss configured by user + * + * API to set the Number of Spatial streams + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_nss( + struct wlan_objmgr_vdev *vdev, + uint8_t nss) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return; + + vdev_mlme->proto.generic.nss = nss; +} + +/** + * wlan_vdev_mlme_get_nss() - get NSS + * @vdev: VDEV object + * + * API to get the Number of Spatial Streams + * + * Return: + * @nss: nss value + */ +static inline uint8_t wlan_vdev_mlme_get_nss( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return 0; + + return vdev_mlme->proto.generic.nss; +} + +/** + * wlan_vdev_mlme_set_txchainmask() - set Tx chainmask + * @vdev: VDEV object + * @chainmask : chainmask either configured by user or max supported + * + * API to set the Tx chainmask + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_txchainmask( + struct wlan_objmgr_vdev *vdev, + uint8_t chainmask) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + + if (!vdev_mlme) + return; + + vdev_mlme->mgmt.chainmask_info.tx_chainmask = chainmask; +} + +/** + * wlan_vdev_mlme_get_txchainmask() - get Tx chainmask + * @vdev: VDEV object + * + * API to get the Tx chainmask + * + * Return: + * @chainmask : Tx chainmask either configured by user or max supported + */ +static inline uint8_t wlan_vdev_mlme_get_txchainmask( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return 0; + + return vdev_mlme->mgmt.chainmask_info.tx_chainmask; +} + +/** + * wlan_vdev_mlme_set_rxchainmask() - set Rx chainmask + * @vdev: VDEV object + * @chainmask : Rx chainmask either configured by user or max supported + * + * API to set the Rx chainmask + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_rxchainmask( + struct wlan_objmgr_vdev *vdev, + uint8_t chainmask) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return; + + vdev_mlme->mgmt.chainmask_info.rx_chainmask = chainmask; +} + +/** + * wlan_vdev_mlme_get_rxchainmask() - get Rx chainmask + * @vdev: VDEV object + * + * API to get the Rx chainmask + * + * Return: + * @chainmask : Rx chainmask either configured by user or max supported + */ +static inline uint8_t wlan_vdev_mlme_get_rxchainmask( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + /* This API is invoked with lock acquired, do not add log prints */ + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return 0; + + return vdev_mlme->mgmt.chainmask_info.rx_chainmask; +} + +/** + * wlan_vdev_mlme_set_txpower() - set tx power + * @vdev: VDEV object + * @txpow: tx power either configured by used or max allowed + * + * API to set the tx power + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_txpower( + struct wlan_objmgr_vdev *vdev, + uint8_t txpow) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return; + + vdev_mlme->mgmt.generic.tx_power = txpow; +} + +/** + * wlan_vdev_mlme_get_txpower() - get tx power + * @vdev: VDEV object + * + * API to get the tx power + * + * Return: + * @txpow: tx power either configured by used or max allowed + */ +static inline uint8_t wlan_vdev_mlme_get_txpower( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return 0; + + return vdev_mlme->mgmt.generic.tx_power; +} + +/** + * wlan_vdev_mlme_set_maxrate() - set max rate + * @vdev: VDEV object + * @maxrate: configured by used or based on configured mode + * + * API to set the max rate the vdev supports + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_maxrate( + struct wlan_objmgr_vdev *vdev, + uint32_t maxrate) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return; + + vdev_mlme->mgmt.rate_info.max_rate = maxrate; +} + +/** + * wlan_vdev_mlme_get_maxrate() - get max rate + * @vdev: VDEV object + * + * API to get the max rate the vdev supports + * + * Return: + * @maxrate: configured by used or based on configured mode + */ +static inline uint32_t wlan_vdev_mlme_get_maxrate( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return 0; + + return vdev_mlme->mgmt.rate_info.max_rate; +} + +/** + * wlan_vdev_mlme_set_txmgmtrate() - set txmgmtrate + * @vdev: VDEV object + * @txmgmtrate: Tx Mgmt rate + * + * API to set Mgmt Tx rate + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_txmgmtrate( + struct wlan_objmgr_vdev *vdev, + uint32_t txmgmtrate) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return; + + vdev_mlme->mgmt.rate_info.tx_mgmt_rate = txmgmtrate; +} + +/** + * wlan_vdev_mlme_get_txmgmtrate() - get txmgmtrate + * @vdev: VDEV object + * + * API to get Mgmt Tx rate + * + * Return: + * @txmgmtrate: Tx Mgmt rate + */ +static inline uint32_t wlan_vdev_mlme_get_txmgmtrate( + struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) + return 0; + + return vdev_mlme->mgmt.rate_info.tx_mgmt_rate; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_mlme_dbg.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_mlme_dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..0408235f01ab0c6eac2add0fa3227d146139637f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_mlme_dbg.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the debug utils for MLME + */ +#ifndef _WLAN_MLME_DBG_H_ +#define _WLAN_MLME_DBG_H_ + +#include +#include + +#define mlme_alert(format, args...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_err(format, args...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_warn(format, args...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_info(format, args...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_debug(format, args...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_nofl_alert(format, args...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_nofl_err(format, args...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_nofl_warn(format, args...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_nofl_info(format, args...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#define mlme_nofl_debug(format, args...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_CMN_MLME, format, ## args) + +#endif /* _WLAN_MLME_DBG_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_pdev_mlme_main.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_pdev_mlme_main.h new file mode 100644 index 0000000000000000000000000000000000000000..d7c13abf51ec0f937e9ffdfde909be25e6ffeb25 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_pdev_mlme_main.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define PDEV MLME structure and APIs + */ +#ifndef _WLAN_PDEV_MLME_MAIN_H_ +#define _WLAN_PDEV_MLME_MAIN_H_ + +/** + * wlan_pdev_mlme_init - Initializes PDEV MLME component + * + * Registers callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful registration + * FAILURE, if registration fails + */ +QDF_STATUS wlan_pdev_mlme_init(void); + +/** + * wlan_pdev_mlme_deinit - Uninitializes PDEV MLME component + * + * Unregisters callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful registration + * FAILURE, if registration fails + */ +QDF_STATUS wlan_pdev_mlme_deinit(void); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_psoc_mlme_main.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_psoc_mlme_main.h new file mode 100644 index 0000000000000000000000000000000000000000..36026b7ae1a8d35e1366813d0cd2ad331893ef1b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_psoc_mlme_main.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define PSOC MLME structure + */ +#ifndef _WLAN_PSOC_MLME_MAIN_H_ +#define _WLAN_PSOC_MLME_MAIN_H_ + +/** + * wlan_psoc_mlme_init() - Initializes PSOC MLME component + * + * Registers callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful initialization + * FAILURE, if initialization fails + */ +QDF_STATUS wlan_psoc_mlme_init(void); + +/** + * wlan_psoc_mlme_deinit() - Uninitializes PSOC MLME component + * + * Unregisters callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful de-initialization + * FAILURE, if de-initialization fails + */ +QDF_STATUS wlan_psoc_mlme_deinit(void); + +/** + * mlme_psoc_get_priv: get MLME priv object from psoc object + * @psoc: pointer to psoc object + * + * Return: pointer to MLME psoc private object + */ +struct psoc_mlme_obj *mlme_psoc_get_priv(struct wlan_objmgr_psoc *psoc); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_vdev_mlme_main.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_vdev_mlme_main.h new file mode 100644 index 0000000000000000000000000000000000000000..e13bcddf009b8e42a22eec02d2cea4558e5ccd3f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/inc/wlan_vdev_mlme_main.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define VDEV MLME init/deinit APIs + */ + +#ifndef _WLAN_VDEV_MLME_MAIN_H_ +#define _WLAN_VDEV_MLME_MAIN_H_ + +#include +#include + +/** + * wlan_mlme_get_lmac_tx_ops() - get tx ops + * @psoc: pointer to psoc obj + * + * Return: pointer to tx ops + */ +static inline struct wlan_lmac_if_mlme_tx_ops * +wlan_mlme_get_lmac_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.mops; +} + +/** + * enum wlan_vdev_state - VDEV state + * @WLAN_VDEV_S_INIT: Default state, IDLE state + * @WLAN_VDEV_S_START: START state + * @WLAN_VDEV_S_DFS_CAC_WAIT: CAC period + * @WLAN_VDEV_S_UP: UP state + * @WLAN_VDEV_S_SUSPEND: Suspend state + * @WLAN_VDEV_S_STOP: STOP state + * @WLAN_VDEV_S_MAX: MAX state + * @WLAN_VDEV_SS_START_START_PROGRESS: Start progress sub state + * @WLAN_VDEV_SS_START_RESTART_PROGRESS: Restart progress sub state + * @WLAN_VDEV_SS_START_CONN_PROGRESS: Start connection progress sub state + * @WLAN_VDEV_SS_START_DISCONN_PROGRESS: Start Disconnection progress sub state + * @WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN: Suspend down sub state + * @WLAN_VDEV_SS_SUSPEND_SUSPEND_RESTART: Suspend restart sub state + * @WLAN_VDEV_SS_SUSPEND_HOST_RESTART: Suspend host restart sub state + * @WLAN_VDEV_SS_SUSPEND_CSA_RESTART: Suspend CSA restart sub state + * @WLAN_VDEV_SS_STOP_STOP_PROGRESS: Stop progress sub state + * @WLAN_VDEV_SS_STOP_DOWN_PROGRESS: Stop down progress sub state + * @WLAN_VDEV_SS_IDLE: Idle sub state (used, only if a state + * does not have substate) + * @WLAN_VDEV_SS_MAX: Max substate + */ +enum wlan_vdev_state { + WLAN_VDEV_S_INIT = 0, + WLAN_VDEV_S_START = 1, + WLAN_VDEV_S_DFS_CAC_WAIT = 2, + WLAN_VDEV_S_UP = 3, + WLAN_VDEV_S_SUSPEND = 4, + WLAN_VDEV_S_STOP = 5, + WLAN_VDEV_S_MAX = 6, + WLAN_VDEV_SS_START_START_PROGRESS = 7, + WLAN_VDEV_SS_START_RESTART_PROGRESS = 8, + WLAN_VDEV_SS_START_CONN_PROGRESS = 9, + WLAN_VDEV_SS_START_DISCONN_PROGRESS = 10, + WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN = 11, + WLAN_VDEV_SS_SUSPEND_SUSPEND_RESTART = 12, + WLAN_VDEV_SS_SUSPEND_HOST_RESTART = 13, + WLAN_VDEV_SS_SUSPEND_CSA_RESTART = 14, + WLAN_VDEV_SS_STOP_STOP_PROGRESS = 15, + WLAN_VDEV_SS_STOP_DOWN_PROGRESS = 16, + WLAN_VDEV_SS_IDLE = 17, + WLAN_VDEV_SS_MAX = 18, +}; + +/** + * enum wlan_vdev_sm_evt - VDEV SM event + * @WLAN_VDEV_SM_EV_START: Start VDEV UP operation + * @WLAN_VDEV_SM_EV_START_REQ: Invokes VDEV START handshake + * @WLAN_VDEV_SM_EV_RESTART_REQ: Invokes VDEV RESTART handshake + * @WLAN_VDEV_SM_EV_START_RESP: Notification on START resp + * @WLAN_VDEV_SM_EV_RESTART_RESP: Notification on RESTART resp + * @WLAN_VDEV_SM_EV_START_REQ_FAIL: Notification on START req failure + * @WLAN_VDEV_SM_EV_RESTART_REQ_FAIL: Notification on RESTART req failure + * @WLAN_VDEV_SM_EV_START_SUCCESS: Notification of Join Success + * @WLAN_VDEV_SM_EV_CONN_PROGRESS: Invoke Connection/up process + * @WLAN_VDEV_SM_EV_STA_CONN_START: Invoke Station Connection process + * @WLAN_VDEV_SM_EV_DFS_CAC_WAIT: Invoke DFS CAC WAIT timer + * @WLAN_VDEV_SM_EV_DFS_CAC_COMPLETED: Notifies on CAC completion + * @WLAN_VDEV_SM_EV_DOWN: Invokes VDEV DOWN operation + * @WLAN_VDEV_SM_EV_CONNECTION_FAIL: Notifications for UP/connection failure + * @WLAN_VDEV_SM_EV_STOP_RESP: Notifcation of stop response + * @WLAN_VDEV_SM_EV_STOP_FAIL: Notification of stop req failure + * @WLAN_VDEV_SM_EV_DOWN_FAIL: Notification of down failure + * @WLAN_VDEV_SM_EV_DISCONNECT_COMPLETE: Notification of Peer cleanup complete + * @WLAN_VDEV_SM_EV_SUSPEND_RESTART: Invokes suspend restart operation + * @WLAN_VDEV_SM_EV_HOST_RESTART: Invokes host only restart operation + * @WLAN_VDEV_SM_EV_UP_HOST_RESTART: Moves to UP state without sending UP + * command to lower layers + * @WLAN_VDEV_SM_EV_FW_VDEV_RESTART: Invokes FW only restart + * @WLAN_VDEV_SM_EV_UP_FAIL: Notification of up command failure + * @WLAN_VDEV_SM_EV_RADAR_DETECTED: Notification of RADAR detected, Random + * channel should be selected before + * triggering this event + * @WLAN_VDEV_SM_EV_CSA_RESTART: Invokes CSA IE operation + * @WLAN_VDEV_SM_EV_CSA_COMPLETE: Notifiction of CSA process complete + * @WLAN_VDEV_SM_EV_MLME_DOWN_REQ: Invoke DOWN command operation + * @WLAN_VDEV_SM_EV_DOWN_COMPLETE: Notification of DOWN complete + * @WLAN_VDEV_SM_EV_ROAM: Notifiction on ROAMING + * @WLAN_VDEV_SM_EV_STOP_REQ: Invoke API to initiate STOP handshake + * @WLAN_VDEV_SM_EV_CHAN_SWITCH_DISABLED:Test only, CSA completes without + * change in channel + */ +enum wlan_vdev_sm_evt { + WLAN_VDEV_SM_EV_START = 0, + WLAN_VDEV_SM_EV_START_REQ = 1, + WLAN_VDEV_SM_EV_RESTART_REQ = 2, + WLAN_VDEV_SM_EV_START_RESP = 3, + WLAN_VDEV_SM_EV_RESTART_RESP = 4, + WLAN_VDEV_SM_EV_START_REQ_FAIL = 5, + WLAN_VDEV_SM_EV_RESTART_REQ_FAIL = 6, + WLAN_VDEV_SM_EV_START_SUCCESS = 7, + WLAN_VDEV_SM_EV_CONN_PROGRESS = 8, + WLAN_VDEV_SM_EV_STA_CONN_START = 9, + WLAN_VDEV_SM_EV_DFS_CAC_WAIT = 10, + WLAN_VDEV_SM_EV_DFS_CAC_COMPLETED = 11, + WLAN_VDEV_SM_EV_DOWN = 12, + WLAN_VDEV_SM_EV_CONNECTION_FAIL = 13, + WLAN_VDEV_SM_EV_STOP_RESP = 14, + WLAN_VDEV_SM_EV_STOP_FAIL = 15, + WLAN_VDEV_SM_EV_DOWN_FAIL = 16, + WLAN_VDEV_SM_EV_DISCONNECT_COMPLETE = 17, + WLAN_VDEV_SM_EV_SUSPEND_RESTART = 18, + WLAN_VDEV_SM_EV_HOST_RESTART = 19, + WLAN_VDEV_SM_EV_UP_HOST_RESTART = 20, + WLAN_VDEV_SM_EV_FW_VDEV_RESTART = 21, + WLAN_VDEV_SM_EV_UP_FAIL = 22, + WLAN_VDEV_SM_EV_RADAR_DETECTED = 23, + WLAN_VDEV_SM_EV_CSA_RESTART = 24, + WLAN_VDEV_SM_EV_CSA_COMPLETE = 25, + WLAN_VDEV_SM_EV_MLME_DOWN_REQ = 26, + WLAN_VDEV_SM_EV_DOWN_COMPLETE = 27, + WLAN_VDEV_SM_EV_ROAM = 28, + WLAN_VDEV_SM_EV_STOP_REQ = 29, + WLAN_VDEV_SM_EV_CHAN_SWITCH_DISABLED = 30, +}; + +/** + * wlan_vdev_mlme_init - Initializes VDEV MLME component + * + * Registers callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful registration + * FAILURE, if registration fails + */ +QDF_STATUS wlan_vdev_mlme_init(void); + +/** + * wlan_vdev_mlme_deinit - Uninitializes VDEV MLME component + * + * Unregisters callbacks with object manager for create/destroy + * + * Return: SUCCESS on successful registration + * FAILURE, if registration fails + */ +QDF_STATUS wlan_vdev_mlme_deinit(void); + +/** + * wlan_mlme_psoc_enable - MLME initializations on PSOC enable + * + * Initializes MLME params on PSOC eable + * + * Return: SUCCESS on successful initialization + * FAILURE, if initialization fails + */ +QDF_STATUS wlan_mlme_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_mlme_psoc_disable - MLME clean up on PSOC disable + * + * cleanup MLME params on PSOC eable + * + * Return: SUCCESS on successful cleanup + * FAILURE, if cleanup fails + */ +QDF_STATUS wlan_mlme_psoc_disable(struct wlan_objmgr_psoc *psoc); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_cmn_mlme_main.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_cmn_mlme_main.c new file mode 100644 index 0000000000000000000000000000000000000000..0f03758b28f0de019a655b2cc3c11566ad8faba2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_cmn_mlme_main.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements MLME global APIs + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct mlme_ext_ops *glbl_ops; +mlme_get_global_ops_cb glbl_ops_cb; + +QDF_STATUS wlan_cmn_mlme_init(void) +{ + QDF_STATUS status; + + status = wlan_psoc_mlme_init(); + if (status != QDF_STATUS_SUCCESS) + return status; + + status = wlan_pdev_mlme_init(); + if (status != QDF_STATUS_SUCCESS) + return status; + + status = wlan_vdev_mlme_init(); + if (status != QDF_STATUS_SUCCESS) + return status; + + if (glbl_ops_cb) + glbl_ops = glbl_ops_cb(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cmn_mlme_deinit(void) +{ + QDF_STATUS status; + + status = wlan_vdev_mlme_deinit(); + if (status != QDF_STATUS_SUCCESS) + return status; + + status = wlan_pdev_mlme_deinit(); + if (status != QDF_STATUS_SUCCESS) + return status; + + status = wlan_psoc_mlme_deinit(); + if (status != QDF_STATUS_SUCCESS) + return status; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS mlme_psoc_ops_ext_hdl_create(struct psoc_mlme_obj *psoc_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_psoc_ext_hdl_create) + ret = glbl_ops->mlme_psoc_ext_hdl_create(psoc_mlme); + + return ret; +} + +QDF_STATUS mlme_psoc_ops_ext_hdl_destroy(struct psoc_mlme_obj *psoc_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_psoc_ext_hdl_destroy) + ret = glbl_ops->mlme_psoc_ext_hdl_destroy(psoc_mlme); + + return ret; +} + +QDF_STATUS mlme_pdev_ops_ext_hdl_create(struct pdev_mlme_obj *pdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_pdev_ext_hdl_create) + ret = glbl_ops->mlme_pdev_ext_hdl_create(pdev_mlme); + + return ret; +} + +QDF_STATUS mlme_pdev_ops_ext_hdl_destroy(struct pdev_mlme_obj *pdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_pdev_ext_hdl_destroy) + ret = glbl_ops->mlme_pdev_ext_hdl_destroy(pdev_mlme); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_ext_hdl_create(struct vdev_mlme_obj *vdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_ext_hdl_create) + ret = glbl_ops->mlme_vdev_ext_hdl_create(vdev_mlme); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_ext_hdl_post_create(struct vdev_mlme_obj *vdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_ext_hdl_post_create) + ret = glbl_ops->mlme_vdev_ext_hdl_post_create(vdev_mlme); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_ext_hdl_destroy(struct vdev_mlme_obj *vdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_ext_hdl_destroy) + ret = glbl_ops->mlme_vdev_ext_hdl_destroy(vdev_mlme); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_start_fw_send(struct wlan_objmgr_vdev *vdev, + uint8_t restart) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_start_fw_send) + ret = glbl_ops->mlme_vdev_start_fw_send(vdev, restart); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_multivdev_restart_fw_cmd_send( + struct wlan_objmgr_pdev *pdev) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_multivdev_restart_fw_send) + glbl_ops->mlme_multivdev_restart_fw_send(pdev); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_stop_fw_send(struct wlan_objmgr_vdev *vdev) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_stop_fw_send) + ret = glbl_ops->mlme_vdev_stop_fw_send(vdev); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_down_fw_send(struct wlan_objmgr_vdev *vdev) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_down_fw_send) + ret = glbl_ops->mlme_vdev_down_fw_send(vdev); + + return ret; +} + +QDF_STATUS mlme_vdev_enqueue_exp_ser_cmd(struct vdev_mlme_obj *vdev_mlme, + uint8_t cmd_type) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (glbl_ops && glbl_ops->mlme_vdev_enqueue_exp_cmd) + ret = glbl_ops->mlme_vdev_enqueue_exp_cmd(vdev_mlme, cmd_type); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_ext_hdl_delete_rsp(struct wlan_objmgr_psoc *psoc, + struct vdev_delete_response *rsp) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((glbl_ops) && glbl_ops->mlme_vdev_ext_delete_rsp) + ret = glbl_ops->mlme_vdev_ext_delete_rsp(psoc, rsp); + + return ret; +} + +QDF_STATUS mlme_vdev_ops_ext_hdl_multivdev_restart_resp( + struct wlan_objmgr_psoc *psoc, + struct multi_vdev_restart_resp *resp) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((glbl_ops) && glbl_ops->mlme_multi_vdev_restart_resp) + ret = glbl_ops->mlme_multi_vdev_restart_resp(psoc, resp); + + return ret; +} + +void mlme_set_ops_register_cb(mlme_get_global_ops_cb ops_cb) +{ + glbl_ops_cb = ops_cb; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_pdev_mlme_main.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_pdev_mlme_main.c new file mode 100644 index 0000000000000000000000000000000000000000..26c6f03818969c7b4c5602ea480950882a6504c7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_pdev_mlme_main.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements PDEV MLME APIs + */ + +#include +#include +#include +#include +#include "include/wlan_mlme_cmn.h" +#include "include/wlan_pdev_mlme.h" +#include "wlan_pdev_mlme_main.h" +#include "wlan_pdev_mlme_api.h" + +static QDF_STATUS mlme_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct pdev_mlme_obj *pdev_mlme; + + if (!pdev) { + mlme_err(" PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_mlme = qdf_mem_malloc(sizeof(*pdev_mlme)); + if (!pdev_mlme) { + mlme_err(" MLME component object alloc failed"); + return QDF_STATUS_E_NOMEM; + } + + pdev_mlme->pdev = pdev; + + if (mlme_pdev_ops_ext_hdl_create(pdev_mlme) != QDF_STATUS_SUCCESS) + goto init_failed; + + wlan_objmgr_pdev_component_obj_attach(pdev, WLAN_UMAC_COMP_MLME, + (void *)pdev_mlme, + QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; + +init_failed: + qdf_mem_free(pdev_mlme); + + return QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS mlme_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct pdev_mlme_obj *pdev_mlme; + + pdev_mlme = wlan_pdev_mlme_get_cmpt_obj(pdev); + if (!pdev_mlme) { + mlme_info(" PDEV MLME component object is NULL"); + return QDF_STATUS_SUCCESS; + } + + mlme_pdev_ops_ext_hdl_destroy(pdev_mlme); + + wlan_objmgr_pdev_component_obj_detach(pdev, WLAN_UMAC_COMP_MLME, + (void *)pdev_mlme); + qdf_mem_free(pdev_mlme); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_pdev_mlme_init(void) +{ + if (wlan_objmgr_register_pdev_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_pdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + if (wlan_objmgr_register_pdev_destroy_handler + (WLAN_UMAC_COMP_MLME, + mlme_pdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + if (wlan_objmgr_unregister_pdev_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_pdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_pdev_mlme_deinit(void) +{ + if (wlan_objmgr_unregister_pdev_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_pdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + if (wlan_objmgr_unregister_pdev_destroy_handler + (WLAN_UMAC_COMP_MLME, + mlme_pdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_psoc_mlme_main.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_psoc_mlme_main.c new file mode 100644 index 0000000000000000000000000000000000000000..cf409c99e3474c4bc7873110fdc61813c274880d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_psoc_mlme_main.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements PSOC MLME APIs + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct psoc_mlme_obj *mlme_psoc_get_priv(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mlme_obj *psoc_mlme; + + psoc_mlme = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MLME); + if (!psoc_mlme) { + mlme_err("PSOC MLME component object is NULL"); + return NULL; + } + + return psoc_mlme; +} + +qdf_export_symbol(mlme_psoc_get_priv); + +static QDF_STATUS mlme_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct psoc_mlme_obj *psoc_mlme; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + psoc_mlme = qdf_mem_malloc(sizeof(struct psoc_mlme_obj)); + if (!psoc_mlme) { + mlme_err("Failed to allocate PSOS mlme Object"); + return QDF_STATUS_E_NOMEM; + } + + psoc_mlme->psoc = psoc; + + status = mlme_psoc_ops_ext_hdl_create(psoc_mlme); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Failed to allocate psoc ext handle"); + goto init_failed; + } + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_MLME, + psoc_mlme, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Failed to attach psoc_ctx with psoc"); + goto init_failed; + } + + return QDF_STATUS_SUCCESS; +init_failed: + qdf_mem_free(psoc_mlme); + + return status; +} + +static QDF_STATUS mlme_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct psoc_mlme_obj *psoc_mlme; + + psoc_mlme = mlme_psoc_get_priv(psoc); + if (!psoc_mlme) { + mlme_err("PSOC MLME component object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + wlan_objmgr_psoc_component_obj_detach(psoc, WLAN_UMAC_COMP_MLME, + psoc_mlme); + + mlme_psoc_ops_ext_hdl_destroy(psoc_mlme); + + qdf_mem_free(psoc_mlme); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_psoc_mlme_init(void) +{ + if (wlan_objmgr_register_psoc_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_psoc_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + if (wlan_objmgr_register_psoc_destroy_handler + (WLAN_UMAC_COMP_MLME, + mlme_psoc_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + if (wlan_objmgr_unregister_psoc_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_psoc_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_psoc_mlme_deinit(void) +{ + if (wlan_objmgr_unregister_psoc_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_psoc_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + if (wlan_objmgr_unregister_psoc_destroy_handler + (WLAN_UMAC_COMP_MLME, + mlme_psoc_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_vdev_mlme_main.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_vdev_mlme_main.c new file mode 100644 index 0000000000000000000000000000000000000000..7ef6dfb1285f8671d526208c26c428d45552a55a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_objmgr/dispatcher/src/wlan_vdev_mlme_main.c @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements MLME component object creation/initialization/destroy + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS mlme_vdev_obj_create_handler(struct wlan_objmgr_vdev *vdev, + void *arg) +{ + struct vdev_mlme_obj *vdev_mlme; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + struct pdev_mlme_obj *pdev_mlme; + struct wlan_lmac_if_mlme_tx_ops *txops; + QDF_STATUS status; + + if (!vdev) { + mlme_err(" VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mlme_err(" PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /** + * 1st check whether for this vdev any vdev commands are pending for + * response. + */ + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + mlme_err("PSOC is NULL"); + return QDF_STATUS_E_FAILURE; + } + + txops = wlan_mlme_get_lmac_tx_ops(psoc); + if (!txops || !txops->psoc_vdev_rsp_timer_inuse) { + mlme_err("Failed to get mlme txrx_ops PSOC_%d", + wlan_psoc_get_id(psoc)); + return QDF_STATUS_E_FAILURE; + } + + status = txops->psoc_vdev_rsp_timer_inuse(psoc, wlan_vdev_get_id(vdev)); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("The vdev response is pending for VDEV_%d status:%d", + wlan_vdev_get_id(vdev), status); + return QDF_STATUS_E_FAILURE; + } + + pdev_mlme = wlan_pdev_mlme_get_cmpt_obj(pdev); + if (!pdev_mlme) { + mlme_err("PDEV MLME is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_mlme = qdf_mem_malloc(sizeof(*vdev_mlme)); + if (!vdev_mlme) + return QDF_STATUS_E_NOMEM; + + vdev_mlme->vdev = vdev; + + if (pdev_mlme->mlme_register_ops(vdev_mlme) != QDF_STATUS_SUCCESS) { + mlme_err("Callbacks registration is failed"); + goto init_failed; + } + + if (mlme_vdev_sm_create(vdev_mlme) != QDF_STATUS_SUCCESS) { + mlme_err("SME creation failed"); + goto init_failed; + } + + if (mlme_vdev_ops_ext_hdl_create(vdev_mlme) != + QDF_STATUS_SUCCESS) { + mlme_err("Legacy vdev object creation failed"); + goto ext_hdl_create_failed; + } + + wlan_objmgr_vdev_component_obj_attach((struct wlan_objmgr_vdev *)vdev, + WLAN_UMAC_COMP_MLME, + (void *)vdev_mlme, + QDF_STATUS_SUCCESS); + + if (mlme_vdev_ops_ext_hdl_post_create(vdev_mlme) != + QDF_STATUS_SUCCESS) { + mlme_err("Legacy vdev object post creation failed"); + goto ext_hdl_post_create_failed; + } + + return QDF_STATUS_SUCCESS; + +ext_hdl_post_create_failed: + mlme_vdev_ops_ext_hdl_destroy(vdev_mlme); + wlan_objmgr_vdev_component_obj_detach(vdev, WLAN_UMAC_COMP_MLME, + vdev_mlme); +ext_hdl_create_failed: + mlme_vdev_sm_destroy(vdev_mlme); +init_failed: + qdf_mem_free(vdev_mlme); + + return QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS mlme_vdev_obj_destroy_handler(struct wlan_objmgr_vdev *vdev, + void *arg) +{ + struct vdev_mlme_obj *vdev_mlme; + + if (!vdev) { + mlme_err(" VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_info(" VDEV MLME component object is NULL"); + return QDF_STATUS_SUCCESS; + } + + mlme_vdev_sm_destroy(vdev_mlme); + + mlme_vdev_ops_ext_hdl_destroy(vdev_mlme); + + wlan_objmgr_vdev_component_obj_detach(vdev, WLAN_UMAC_COMP_MLME, + vdev_mlme); + qdf_mem_free(vdev_mlme); + + return QDF_STATUS_SUCCESS; +} + +static void mlme_scan_serialization_comp_info_cb( + struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info) +{ + struct wlan_objmgr_pdev *pdev; + QDF_STATUS status; + + if (!comp_info || !vdev) { + mlme_err("comp_info or vdev is NULL"); + return; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mlme_err("pdev is NULL"); + return; + } + + comp_info->scan_info.is_mlme_op_in_progress = false; + + status = wlan_util_is_pdev_scan_allowed(pdev, WLAN_MLME_SER_IF_ID); + if (status != QDF_STATUS_SUCCESS) + comp_info->scan_info.is_mlme_op_in_progress = true; +} + +QDF_STATUS wlan_mlme_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *tx_ops; + + status = wlan_serialization_register_comp_info_cb + (psoc, + WLAN_UMAC_COMP_MLME, + WLAN_SER_CMD_SCAN, + mlme_scan_serialization_comp_info_cb); + if (status != QDF_STATUS_SUCCESS) { + mlme_err("Serialize scan cmd register failed"); + return status; + } + + /* Register for WMI events into target_if rx */ + tx_ops = wlan_mlme_get_lmac_tx_ops(psoc); + if (tx_ops && tx_ops->vdev_mlme_attach) + tx_ops->vdev_mlme_attach(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mlme_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *tx_ops; + + status = wlan_serialization_deregister_comp_info_cb + (psoc, + WLAN_UMAC_COMP_MLME, + WLAN_SER_CMD_SCAN); + if (status != QDF_STATUS_SUCCESS) { + mlme_err("Serialize scan cmd deregister failed"); + return status; + } + + /* Unregister WMI events */ + tx_ops = wlan_mlme_get_lmac_tx_ops(psoc); + if (tx_ops && tx_ops->vdev_mlme_detach) + tx_ops->vdev_mlme_detach(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_vdev_mlme_init(void) +{ + if (wlan_objmgr_register_vdev_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_vdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + if (wlan_objmgr_register_vdev_destroy_handler + (WLAN_UMAC_COMP_MLME, + mlme_vdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + if (wlan_objmgr_unregister_vdev_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_vdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_vdev_mlme_deinit(void) +{ + if (wlan_objmgr_unregister_vdev_create_handler + (WLAN_UMAC_COMP_MLME, + mlme_vdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + if (wlan_objmgr_unregister_vdev_destroy_handler + (WLAN_UMAC_COMP_MLME, + mlme_vdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_utils/wlan_vdev_mlme_ser_if.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_utils/wlan_vdev_mlme_ser_if.c new file mode 100644 index 0000000000000000000000000000000000000000..80ea23bc43155dffce5b964fcc6cb3f47b27aa1a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_utils/wlan_vdev_mlme_ser_if.c @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file wlan_vdev_mlme_ser.c + * This file contains the APIs to support interface between vdev_mlme and + * serialization module + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum wlan_serialization_status +wlan_vdev_mlme_ser_start_bss(struct wlan_serialization_command *cmd) +{ + struct vdev_mlme_obj *vdev_mlme; + + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + /* + * Serialization command filtering logic + * a. Cancel any existing start bss cmd in the pending queue + * b. If there is an start bss cmd in active queue and + * there is no stop bss cmd in pending queue, + * then explicitly enqueue a stop bss cmd to avoid back to + * back execution of UP cmd. + * c. Enqueue the new start bss cmd with serialization + */ + wlan_vdev_mlme_ser_cancel_request( + cmd->vdev, + WLAN_SER_CMD_VDEV_START_BSS, + WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE); + + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, cmd)) { + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(cmd->vdev); + if (mlme_vdev_enqueue_exp_ser_cmd(vdev_mlme, + WLAN_SER_CMD_VDEV_STOP_BSS)) { + mlme_err("Unable to add the exception cmd request"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + } + + return wlan_serialization_request(cmd); +} + +enum wlan_serialization_status +wlan_vdev_mlme_ser_stop_bss(struct wlan_serialization_command *cmd) +{ + uint8_t stop_cmd_pending; + uint8_t ret; + + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + /* + * Serialization command filtering logic + * a. Cancel any existing start/stop/restart command in the pending + * queue. + * b. If there is a stop cmd in active queue then return + * c. Else enqueue the cmd + * d. If stop cmd already existed in pending queue then return with + * already exists else return the enqueued return value. + */ + stop_cmd_pending = + wlan_serialization_is_cmd_present_in_pending_queue(NULL, cmd); + wlan_vdev_mlme_ser_cancel_request(cmd->vdev, + WLAN_SER_CMD_NONSCAN, + WLAN_SER_CANCEL_VDEV_NON_SCAN_NB_CMD); + + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, cmd)) { + mlme_debug("Cmd already exist in the active queue"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + ret = wlan_serialization_request(cmd); + + if (stop_cmd_pending && ret == WLAN_SER_CMD_PENDING) + return WLAN_SER_CMD_ALREADY_EXISTS; + else + return ret; +} + +enum wlan_serialization_status +wlan_vdev_mlme_ser_vdev_restart(struct wlan_serialization_command *cmd) +{ + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + /* + * Serialization command filtering logic + * a. If there exists START or PDEV/VDEV restart command in the pending + * queue then ignore this new vdev restart request. + * b. Else enqueue the new VDEV RESTART cmd + */ + cmd->cmd_type = WLAN_SER_CMD_VDEV_START_BSS; + if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, cmd)) { + mlme_debug("Start cmd already in the pending queue"); + return WLAN_SER_CMD_ALREADY_EXISTS; + } + + cmd->cmd_type = WLAN_SER_CMD_PDEV_RESTART; + if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, cmd)) { + mlme_debug("Pdev restart already in the pending queue"); + return WLAN_SER_CMD_ALREADY_EXISTS; + } + + cmd->cmd_type = WLAN_SER_CMD_VDEV_RESTART; + if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, cmd)) { + mlme_debug("Vdev restart already in the pending queue"); + return WLAN_SER_CMD_ALREADY_EXISTS; + } + + return wlan_serialization_request(cmd); +} + +void wlan_mlme_restart_pdev_iter_cb(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *pdev_restart_pending = (uint8_t *)arg; + struct wlan_serialization_command cmd = {0}; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + + cmd.vdev = vdev; + cmd.cmd_id = vdev_id; + cmd.cmd_type = WLAN_SER_CMD_PDEV_RESTART; + /* + * Serialization command filtering logic + * a. Cancel any existing VDEV restart cmd in the pending queue + * b. If Pdev restart already exist in pending queue then return else + * enqueue the new PDEV RESTART cmd + */ + wlan_vdev_mlme_ser_cancel_request( + vdev, + WLAN_SER_CMD_VDEV_RESTART, + WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE); + + if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, &cmd)) { + mlme_debug("Cmd already exist in the pending queue vdev:%u", + vdev_id); + *pdev_restart_pending = 1; + } +} + +enum wlan_serialization_status +wlan_vdev_mlme_ser_pdev_restart(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev; + uint8_t pdev_restart_in_pending = 0; + + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + + pdev = wlan_vdev_get_pdev(cmd->vdev); + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_mlme_restart_pdev_iter_cb, + &pdev_restart_in_pending, 0, + WLAN_MLME_SER_IF_ID); + + if (pdev_restart_in_pending) + return WLAN_SER_CMD_ALREADY_EXISTS; + + return wlan_serialization_request(cmd); +} + +enum wlan_serialization_status +wlan_vdev_mlme_ser_connect(struct wlan_serialization_command *cmd) +{ + struct vdev_mlme_obj *vdev_mlme; + + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + /* + * Serialization command filtering logic + * a. Cancel any existing CONNECT cmd in the pending queue + * b. If there is an CONNECT cmd in active queue and there is no + * DISCONNECT cmd in pending queue, then explicitly enqueue a + * DISCONNECT cmd to avoid back to back execution of CONNECT cmd. + * c. Enqueue the new CONNECT cmd to the pending queue + */ + wlan_vdev_mlme_ser_cancel_request( + cmd->vdev, + WLAN_SER_CMD_VDEV_CONNECT, + WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE); + + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, cmd)) { + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(cmd->vdev); + if (mlme_vdev_enqueue_exp_ser_cmd(vdev_mlme, + WLAN_SER_CMD_VDEV_DISCONNECT)) { + mlme_err("Unable to add the exception cmd request"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + } + + return wlan_serialization_request(cmd); +} + +enum wlan_serialization_status +wlan_vdev_mlme_ser_disconnect(struct wlan_serialization_command *cmd) +{ + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + /* + * Serialization command filtering logic + * a.Cancel any existing non-blocking non-scan command in the + * pending queue + * b.If there is a DISCONNECT cmd in active queue then return + * c.Else enqueue the DISCONNECT cmd + */ + wlan_vdev_mlme_ser_cancel_request(cmd->vdev, + WLAN_SER_CMD_NONSCAN, + WLAN_SER_CANCEL_VDEV_NON_SCAN_NB_CMD); + + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, cmd)) { + mlme_debug("Cmd already exist in the active queue"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + return wlan_serialization_request(cmd); +} + +static void +wlan_mlme_cancel_pending_csa_restart(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = object; + bool *csa_restart_pending = arg; + struct wlan_serialization_command cmd = {0}; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + + cmd.vdev = vdev; + cmd.cmd_id = vdev_id; + cmd.cmd_type = WLAN_SER_CMD_PDEV_CSA_RESTART; + if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, &cmd)) { + mlme_debug("Cmd already exist in the pending queue vdev:%u", + vdev_id); + *csa_restart_pending = true; + } + + wlan_vdev_mlme_ser_cancel_request( + vdev, + WLAN_SER_CMD_PDEV_CSA_RESTART, + WLAN_SER_CANCEL_VDEV_NON_SCAN_CMD_TYPE); +} + +static void +wlan_mlme_check_pdev_restart(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = object; + bool *pdev_restart_pending = arg; + struct wlan_serialization_command cmd = {0}; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + + cmd.vdev = vdev; + cmd.cmd_id = vdev_id; + cmd.cmd_type = WLAN_SER_CMD_PDEV_RESTART; + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, &cmd)) { + mlme_debug("Pdev restart already in the active queue vdev:%u", + vdev_id); + *pdev_restart_pending = true; + } +} + +enum wlan_serialization_status +wlan_vdev_mlme_ser_pdev_csa_restart(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev; + bool csa_restart_pending = false; + bool pdev_restart_pending = false; + enum wlan_serialization_status ret; + + if (!cmd || !cmd->vdev) { + mlme_err("Null input"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + if (!wlan_ser_is_vdev_queue_enabled(cmd->vdev)) + return WLAN_SER_CMD_QUEUE_DISABLED; + + /* + * Serialization command filtering logic + * a. Cancel any existing PDEV CSA restart cmd in the pending queue + * b. If there exists PDEV RESTART command in the active queue + * then deny this request + * c. If PDEV CSA RESTART cmd already existed in pending queue + * then enqueue and return already exists + * d. Else enqueue this PDEV CSA RESTART cmd + */ + pdev = wlan_vdev_get_pdev(cmd->vdev); + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_mlme_cancel_pending_csa_restart, + &csa_restart_pending, 0, + WLAN_MLME_SER_IF_ID); + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_mlme_check_pdev_restart, + &pdev_restart_pending, 0, + WLAN_MLME_SER_IF_ID); + + if (pdev_restart_pending) + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + + ret = wlan_serialization_request(cmd); + + if (csa_restart_pending && ret == WLAN_SER_CMD_PENDING) + return WLAN_SER_CMD_ALREADY_EXISTS; + + return ret; +} + +void +wlan_vdev_mlme_ser_remove_request(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_serialization_queued_cmd_info cmd = {0}; + + mlme_debug("Vdev:%d remove cmd:%d", wlan_vdev_get_id(vdev), cmd_type); + + cmd.vdev = vdev; + cmd.cmd_id = cmd_id; + cmd.cmd_type = cmd_type; + cmd.requestor = WLAN_UMAC_COMP_MLME; + cmd.req_type = WLAN_SER_CANCEL_NON_SCAN_CMD; + cmd.queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + + /* Inform serialization for command completion */ + wlan_serialization_remove_cmd(&cmd); +} + +void +wlan_vdev_mlme_ser_cancel_request(struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type, + enum wlan_serialization_cancel_type req_type) +{ + struct wlan_serialization_queued_cmd_info cmd = {0}; + + cmd.vdev = vdev; + cmd.cmd_type = cmd_type; + cmd.req_type = req_type; + cmd.requestor = WLAN_UMAC_COMP_MLME; + cmd.queue_type = WLAN_SERIALIZATION_PENDING_QUEUE; + + wlan_serialization_cancel_request(&cmd); +} + +void +mlme_ser_inc_act_cmd_timeout(struct wlan_serialization_command *cmd) +{ + mlme_debug("Increase timeout of cmd type:%d", cmd->cmd_type); + wlan_serialization_update_timer(cmd); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_utils/wlan_vdev_mlme_ser_if.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_utils/wlan_vdev_mlme_ser_if.h new file mode 100644 index 0000000000000000000000000000000000000000..4f90e3080690a10a20f942a6b2d79b4b62be7795 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/mlme_utils/wlan_vdev_mlme_ser_if.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file wlan_vdev_mlme_ser.h + * This file implements the APIs to support interface between vdev_mlme and + * serialization module + */ + +#ifndef _WLAN_VDEV_MLME_SER_IF_H_ +#define _WLAN_VDEV_MLME_SER_IF_H_ + +#include +#include +#include +#include + +/** + * wlan_vdev_mlme_ser_start_bss() - Add start_bss cmd to serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_start_bss(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_stop_bss() - Add stop_bss cmd to serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_stop_bss(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_vdev_restart() - Add vdev restart cmd to serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_vdev_restart(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_pdev_restart() - Add pdev restart cmd to serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_pdev_restart(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_connect() - Add connect cmd to serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_connect(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_disconnect() - Add disconnect cmd to serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_disconnect(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_remove_request() - Remove a request from to + * serialization + * @vdev: Object manager vdev object + * @cmd_id: Serialization command id + * @cmd_type: Serialization command type + * + * Return: void + */ +void +wlan_vdev_mlme_ser_remove_request(struct wlan_objmgr_vdev *vdev, + uint32_t cmd_id, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_vdev_mlme_ser_cancel_request() - Cancel a request from to + * serialization + * @vdev: Object manager vdev object + * @cmd_type: Serialization command type + * @req_type: Type of command cancellation. i.e single/vdev/pdev + * + * Return: void + */ +void +wlan_vdev_mlme_ser_cancel_request(struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type, + enum wlan_serialization_cancel_type req_type); +/** + * mlme_ser_inc_act_cmd_timeout() - Increase timeout of active cmd + * @cmd: Serialization command + * + * Return: void + */ +void mlme_ser_inc_act_cmd_timeout(struct wlan_serialization_command *cmd); + +/** + * wlan_vdev_mlme_ser_pdev_csa_restart - Add pdev CSA restart cmd to + * serialization + * @cmd: Serialization command + * + * Return: Status of enqueue in the serialization module + */ +enum wlan_serialization_status +wlan_vdev_mlme_ser_pdev_csa_restart(struct wlan_serialization_command *cmd); + +#endif /* _WLAN_VDEV_MLME_SER_IF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/pdev_mgr/dispatcher/inc/wlan_pdev_mlme_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/pdev_mgr/dispatcher/inc/wlan_pdev_mlme_api.h new file mode 100644 index 0000000000000000000000000000000000000000..4b99fd081274276c5716917bb57e1e7a25a7041a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/pdev_mgr/dispatcher/inc/wlan_pdev_mlme_api.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define PDEV MLME public APIs + */ + +#ifndef _WLAN_PDEV_MLME_API_H_ +#define _WLAN_PDEV_MLME_API_H_ + +/** + * wlan_pdev_mlme_get_cmpt_obj - Returns MLME component object + * @pdev: PDEV object + * + * Retrieves MLME component object from PDEV object + * + * Return: comp handle on SUCCESS + * NULL, if it fails to retrieve + */ +struct pdev_mlme_obj *wlan_pdev_mlme_get_cmpt_obj( + struct wlan_objmgr_pdev *pdev); +/** + * wlan_pdev_mlme_get_ext_hdl - Returns legacy handle + * @pdev: PDEV object + * + * Retrieves legacy handle from pdev mlme component object + * + * Return: legacy handle on SUCCESS + * NULL, if it fails to retrieve + */ +mlme_pdev_ext_t *wlan_pdev_mlme_get_ext_hdl(struct wlan_objmgr_pdev *pdev); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/pdev_mgr/dispatcher/src/wlan_pdev_mlme_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/pdev_mgr/dispatcher/src/wlan_pdev_mlme_api.c new file mode 100644 index 0000000000000000000000000000000000000000..87973069bef9aa7329526b64947c269bc7b90458 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/pdev_mgr/dispatcher/src/wlan_pdev_mlme_api.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements PDEV MLME public APIs + */ + +#include +#include +#include "include/wlan_pdev_mlme.h" +#include +#include + +struct pdev_mlme_obj *wlan_pdev_mlme_get_cmpt_obj(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_mlme_obj *pdev_mlme; + + if (!pdev) { + mlme_err("pdev is NULL"); + return NULL; + } + + pdev_mlme = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MLME); + if (!pdev_mlme) { + mlme_err(" MLME component object is NULL"); + return NULL; + } + + return pdev_mlme; +} + +mlme_pdev_ext_t *wlan_pdev_mlme_get_ext_hdl(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_mlme_obj *pdev_mlme; + + pdev_mlme = wlan_pdev_mlme_get_cmpt_obj(pdev); + if (pdev_mlme) + return pdev_mlme->ext_pdev_ptr; + + return NULL; +} + +qdf_export_symbol(wlan_pdev_mlme_get_ext_hdl); + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/psoc_mgr/dispatcher/inc/wlan_psoc_mlme_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/psoc_mgr/dispatcher/inc/wlan_psoc_mlme_api.h new file mode 100644 index 0000000000000000000000000000000000000000..f9911ab14c77d3704a5bb090e52ebe0a723fb0cc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/psoc_mgr/dispatcher/inc/wlan_psoc_mlme_api.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define PSOC MLME public APIs + */ + +#ifndef _WLAN_PSOC_MLME_API_H_ +#define _WLAN_PSOC_MLME_API_H_ + +/** + * wlan_psoc_mlme_get_cmpt_obj() - Returns PSOC MLME component object + * @psoc: PSOC object + * + * Retrieves MLME component object from PSOC object + * + * Return: comp handle on SUCCESS + * NULL, if it fails to retrieve + */ +struct psoc_mlme_obj *wlan_psoc_mlme_get_cmpt_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_psoc_mlme_get_ext_hdl() - Returns legacy handle + * @psoc: PSOC object + * + * Retrieves legacy handle from psoc mlme component object + * + * Return: legacy handle on SUCCESS + * NULL, if it fails to retrieve + */ +mlme_psoc_ext_t *wlan_psoc_mlme_get_ext_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_psoc_mlme_set_ext_hdl() - Set legacy handle + * @psoc_mlme: psoc_mlme object + * psoc_ext_hdl: PSOC level legacy handle + * + * Sets legacy handle in psoc mlme component object + * + * Return: Void + */ +void wlan_psoc_mlme_set_ext_hdl(struct psoc_mlme_obj *psoc_mlme, + mlme_psoc_ext_t *psoc_ext_hdl); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/psoc_mgr/dispatcher/src/wlan_psoc_mlme_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/psoc_mgr/dispatcher/src/wlan_psoc_mlme_api.c new file mode 100644 index 0000000000000000000000000000000000000000..508d017531b519711c16e1b95ad4555efe6c3942 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/psoc_mgr/dispatcher/src/wlan_psoc_mlme_api.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements PSOC MLME public APIs + */ + +#include +#include +#include +#include +#include + +struct psoc_mlme_obj *wlan_psoc_mlme_get_cmpt_obj(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mlme_obj *psoc_mlme; + + psoc_mlme = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MLME); + if (!psoc_mlme) { + mlme_err("PSOC MLME component object is NULL"); + return NULL; + } + + return psoc_mlme; +} + +qdf_export_symbol(wlan_psoc_mlme_get_cmpt_obj); + +mlme_psoc_ext_t *wlan_psoc_mlme_get_ext_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mlme_obj *psoc_mlme; + + psoc_mlme = wlan_psoc_mlme_get_cmpt_obj(psoc); + if (psoc_mlme) + return psoc_mlme->ext_psoc_ptr; + + return NULL; +} + +qdf_export_symbol(wlan_psoc_mlme_get_ext_hdl); + +void wlan_psoc_mlme_set_ext_hdl(struct psoc_mlme_obj *psoc_mlme, + mlme_psoc_ext_t *psoc_ext_hdl) +{ + psoc_mlme->ext_psoc_ptr = psoc_ext_hdl; +} + +qdf_export_symbol(wlan_psoc_mlme_set_ext_hdl); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mgr_ops.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mgr_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..7d3bc3c9350ec46f4156e62353bf82ddf0f92c3c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mgr_ops.c @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: vdev_mgr_ops.c + * + * This file provide API definitions for filling data structures + * and sending vdev mgmt commands to target_if/mlme + */ +#include "vdev_mgr_ops.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS vdev_mgr_create_param_update( + struct vdev_mlme_obj *mlme_obj, + struct vdev_create_params *param) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct vdev_mlme_mbss_11ax *mbss; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mlme_err("PDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + mbss = &mlme_obj->mgmt.mbss_11ax; + param->pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + param->vdev_id = wlan_vdev_get_id(vdev); + param->nss_2g = mlme_obj->proto.generic.nss_2g; + param->nss_5g = mlme_obj->proto.generic.nss_5g; + param->type = mlme_obj->mgmt.generic.type; + param->subtype = mlme_obj->mgmt.generic.subtype; + param->mbssid_flags = mbss->mbssid_flags; + param->vdevid_trans = mbss->vdevid_trans; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_create_send(struct vdev_mlme_obj *mlme_obj) +{ + QDF_STATUS status; + struct vdev_create_params param = {0}; + + if (!mlme_obj) { + mlme_err("VDEV_MLME is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_create_param_update(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + status = tgt_vdev_mgr_create_send(mlme_obj, ¶m); + + return status; +} + +static QDF_STATUS vdev_mgr_start_param_update( + struct vdev_mlme_obj *mlme_obj, + struct vdev_start_params *param) +{ + struct wlan_channel *des_chan; + uint32_t dfs_reg; + bool set_agile = false, dfs_set_cfreq2 = false; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + enum QDF_OPMODE op_mode; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mlme_err("PDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_MLME_SB_ID) != + QDF_STATUS_SUCCESS) { + mlme_err("Failed to get pdev reference"); + return QDF_STATUS_E_FAILURE; + } + + des_chan = wlan_vdev_mlme_get_des_chan(vdev); + param->vdev_id = wlan_vdev_get_id(vdev); + + op_mode = wlan_vdev_mlme_get_opmode(vdev); + if ((op_mode == QDF_SAP_MODE || op_mode == QDF_P2P_GO_MODE) && + (WLAN_REG_IS_5GHZ_CH_FREQ(des_chan->ch_freq) || + WLAN_REG_IS_49GHZ_FREQ(des_chan->ch_freq) || + WLAN_REG_IS_6GHZ_CHAN_FREQ(des_chan->ch_freq))) { + tgt_dfs_set_current_channel_for_freq(pdev, des_chan->ch_freq, + des_chan->ch_flags, + des_chan->ch_flagext, + des_chan->ch_ieee, + des_chan->ch_freq_seg1, + des_chan->ch_freq_seg2, + des_chan->ch_cfreq1, + des_chan->ch_cfreq2); + if (des_chan->ch_cfreq2) + param->channel.dfs_set_cfreq2 = + utils_is_dfs_cfreq2_ch(pdev); + } + param->beacon_interval = mlme_obj->proto.generic.beacon_interval; + param->dtim_period = mlme_obj->proto.generic.dtim_period; + param->disable_hw_ack = mlme_obj->mgmt.generic.disable_hw_ack; + param->preferred_rx_streams = + mlme_obj->mgmt.chainmask_info.num_rx_chain; + param->preferred_tx_streams = + mlme_obj->mgmt.chainmask_info.num_tx_chain; + + wlan_reg_get_dfs_region(pdev, &dfs_reg); + param->regdomain = dfs_reg; + param->he_ops = mlme_obj->proto.he_ops_info.he_ops; + + param->channel.chan_id = des_chan->ch_ieee; + param->channel.pwr = mlme_obj->mgmt.generic.tx_power; + param->channel.mhz = des_chan->ch_freq; + param->channel.half_rate = mlme_obj->mgmt.rate_info.half_rate; + param->channel.quarter_rate = mlme_obj->mgmt.rate_info.quarter_rate; + + if (op_mode == QDF_SAP_MODE || op_mode == QDF_P2P_GO_MODE) + param->channel.dfs_set = wlan_reg_is_dfs_for_freq( + pdev, + des_chan->ch_freq); + + param->channel.is_chan_passive = + utils_is_dfs_chan_for_freq(pdev, param->channel.mhz); + param->channel.allow_ht = mlme_obj->proto.ht_info.allow_ht; + param->channel.allow_vht = mlme_obj->proto.vht_info.allow_vht; + param->channel.phy_mode = mlme_obj->mgmt.generic.phy_mode; + param->channel.cfreq1 = des_chan->ch_cfreq1; + param->channel.cfreq2 = des_chan->ch_cfreq2; + param->channel.maxpower = mlme_obj->mgmt.generic.maxpower; + param->channel.minpower = mlme_obj->mgmt.generic.minpower; + param->channel.maxregpower = mlme_obj->mgmt.generic.maxregpower; + param->channel.antennamax = mlme_obj->mgmt.generic.antennamax; + param->channel.reg_class_id = mlme_obj->mgmt.generic.reg_class_id; + param->bcn_tx_rate_code = vdev_mgr_fetch_ratecode(mlme_obj); + param->ldpc_rx_enabled = mlme_obj->proto.generic.ldpc; + if (mlme_obj->mgmt.generic.type == WLAN_VDEV_MLME_TYPE_AP) { + param->hidden_ssid = mlme_obj->mgmt.ap.hidden_ssid; + param->cac_duration_ms = mlme_obj->mgmt.ap.cac_duration_ms; + } + wlan_vdev_mlme_get_ssid(vdev, param->ssid.mac_ssid, + ¶m->ssid.length); + + if (des_chan->ch_phymode == WLAN_PHYMODE_11AC_VHT80 || + des_chan->ch_phymode == WLAN_PHYMODE_11AXA_HE80) { + tgt_dfs_find_vht80_precac_chan_freq(pdev, + des_chan->ch_phymode, + des_chan->ch_freq_seg1, + ¶m->channel.cfreq1, + ¶m->channel.cfreq2, + ¶m->channel.phy_mode, + &dfs_set_cfreq2, + &set_agile); + param->channel.dfs_set_cfreq2 = dfs_set_cfreq2; + param->channel.set_agile = set_agile; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_MLME_SB_ID); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_start_send( + struct vdev_mlme_obj *mlme_obj, + bool restart) +{ + QDF_STATUS status; + struct vdev_start_params param = {0}; + + if (!mlme_obj) { + mlme_err("VDEV_MLME is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_start_param_update(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + param.is_restart = restart; + status = tgt_vdev_mgr_start_send(mlme_obj, ¶m); + + return status; +} + +static QDF_STATUS vdev_mgr_delete_param_update( + struct vdev_mlme_obj *mlme_obj, + struct vdev_delete_params *param) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = wlan_vdev_get_id(vdev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_delete_send(struct vdev_mlme_obj *mlme_obj) +{ + QDF_STATUS status; + struct vdev_delete_params param; + + if (!mlme_obj) { + mlme_err("VDEV_MLME is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_delete_param_update(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + status = tgt_vdev_mgr_delete_send(mlme_obj, ¶m); + + return status; +} + +static QDF_STATUS vdev_mgr_stop_param_update( + struct vdev_mlme_obj *mlme_obj, + struct vdev_stop_params *param) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = wlan_vdev_get_id(vdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_stop_send(struct vdev_mlme_obj *mlme_obj) +{ + QDF_STATUS status; + struct vdev_stop_params param = {0}; + + if (!mlme_obj) { + mlme_err("VDEV_MLME is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_stop_param_update(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + status = tgt_vdev_mgr_stop_send(mlme_obj, ¶m); + + return status; +} + +static QDF_STATUS vdev_mgr_bcn_tmpl_param_update( + struct vdev_mlme_obj *mlme_obj, + struct beacon_tmpl_params *param) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS vdev_mgr_sta_ps_param_update( + struct vdev_mlme_obj *mlme_obj, + struct sta_ps_params *param) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + param->vdev_id = wlan_vdev_get_id(vdev); + param->param_id = WLAN_MLME_CFG_UAPSD; + param->value = mlme_obj->proto.sta.uapsd_cfg; + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS vdev_mgr_up_param_update( + struct vdev_mlme_obj *mlme_obj, + struct vdev_up_params *param) +{ + struct vdev_mlme_mbss_11ax *mbss; + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + param->vdev_id = wlan_vdev_get_id(vdev); + param->assoc_id = mlme_obj->proto.sta.assoc_id; + mbss = &mlme_obj->mgmt.mbss_11ax; + if (mbss->profile_idx) { + param->profile_idx = mbss->profile_idx; + param->profile_num = mbss->profile_num; + qdf_mem_copy(param->trans_bssid, mbss->trans_bssid, + QDF_MAC_ADDR_SIZE); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_up_send(struct vdev_mlme_obj *mlme_obj) +{ + QDF_STATUS status; + struct vdev_up_params param = {0}; + struct sta_ps_params ps_param = {0}; + struct beacon_tmpl_params bcn_tmpl_param = {0}; + enum QDF_OPMODE opmode; + struct wlan_objmgr_vdev *vdev; + struct config_fils_params fils_param = {0}; + uint8_t is_6g_sap_fd_enabled; + + if (!mlme_obj) { + mlme_err("VDEV_MLME is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev_mgr_up_param_update(mlme_obj, ¶m); + vdev_mgr_bcn_tmpl_param_update(mlme_obj, &bcn_tmpl_param); + + opmode = wlan_vdev_mlme_get_opmode(vdev); + if (opmode == QDF_STA_MODE) { + vdev_mgr_sta_ps_param_update(mlme_obj, &ps_param); + status = tgt_vdev_mgr_sta_ps_param_send(mlme_obj, &ps_param); + + } + + status = tgt_vdev_mgr_beacon_tmpl_send(mlme_obj, &bcn_tmpl_param); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = tgt_vdev_mgr_up_send(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + is_6g_sap_fd_enabled = wlan_vdev_mlme_feat_ext_cap_get(vdev, + WLAN_VDEV_FEXT_FILS_DISC_6G_SAP); + mlme_debug("SAP FD enabled %d", is_6g_sap_fd_enabled); + if (opmode == QDF_SAP_MODE && mlme_obj->vdev->vdev_mlme.des_chan && + WLAN_REG_IS_6GHZ_CHAN_FREQ( + mlme_obj->vdev->vdev_mlme.des_chan->ch_freq)) { + fils_param.vdev_id = wlan_vdev_get_id(mlme_obj->vdev); + if (is_6g_sap_fd_enabled) { + fils_param.fd_period = DEFAULT_FILS_DISCOVERY_PERIOD; + } else { + fils_param.send_prb_rsp_frame = true; + fils_param.fd_period = DEFAULT_PROBE_RESP_PERIOD; + } + status = tgt_vdev_mgr_fils_enable_send(mlme_obj, + &fils_param); + } + + return status; +} + +static QDF_STATUS vdev_mgr_down_param_update( + struct vdev_mlme_obj *mlme_obj, + struct vdev_down_params *param) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = wlan_vdev_get_id(vdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_down_send(struct vdev_mlme_obj *mlme_obj) +{ + QDF_STATUS status; + struct vdev_down_params param = {0}; + + if (!mlme_obj) { + mlme_err("VDEV_MLME is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_down_param_update(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + status = tgt_vdev_mgr_down_send(mlme_obj, ¶m); + + return status; +} + +static QDF_STATUS vdev_mgr_peer_flush_tids_param_update( + struct vdev_mlme_obj *mlme_obj, + struct peer_flush_params *param, + uint8_t *mac, + uint32_t peer_tid_bitmap) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = wlan_vdev_get_id(vdev); + param->peer_tid_bitmap = peer_tid_bitmap; + qdf_mem_copy(param->peer_mac, mac, QDF_MAC_ADDR_SIZE); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_peer_flush_tids_send(struct vdev_mlme_obj *mlme_obj, + uint8_t *mac, + uint32_t peer_tid_bitmap) +{ + QDF_STATUS status; + struct peer_flush_params param = {0}; + + if (!mlme_obj || !mac) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_peer_flush_tids_param_update(mlme_obj, ¶m, + mac, peer_tid_bitmap); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + status = tgt_vdev_mgr_peer_flush_tids_send(mlme_obj, ¶m); + + return status; +} + +static QDF_STATUS vdev_mgr_multiple_restart_param_update( + struct wlan_objmgr_pdev *pdev, + struct mlme_channel_param *chan, + uint32_t disable_hw_ack, + uint32_t *vdev_ids, + uint32_t num_vdevs, + struct vdev_mlme_mvr_param *mvr_param, + struct multiple_vdev_restart_params *param) +{ + param->pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + param->requestor_id = MULTIPLE_VDEV_RESTART_REQ_ID; + param->disable_hw_ack = disable_hw_ack; + param->cac_duration_ms = WLAN_DFS_WAIT_MS; + param->num_vdevs = num_vdevs; + + qdf_mem_copy(param->vdev_ids, vdev_ids, + sizeof(uint32_t) * (param->num_vdevs)); + qdf_mem_copy(¶m->ch_param, chan, + sizeof(struct mlme_channel_param)); + qdf_mem_copy(param->mvr_param, mvr_param, + sizeof(*mvr_param) * (param->num_vdevs)); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_multiple_restart_send(struct wlan_objmgr_pdev *pdev, + struct mlme_channel_param *chan, + uint32_t disable_hw_ack, + uint32_t *vdev_ids, + uint32_t num_vdevs, + struct vdev_mlme_mvr_param *mvr_param) +{ + struct multiple_vdev_restart_params param = {0}; + + vdev_mgr_multiple_restart_param_update(pdev, chan, + disable_hw_ack, + vdev_ids, num_vdevs, + mvr_param, ¶m); + + return tgt_vdev_mgr_multiple_vdev_restart_send(pdev, ¶m); +} + +qdf_export_symbol(vdev_mgr_multiple_restart_send); + +static QDF_STATUS vdev_mgr_set_custom_aggr_size_param_update( + struct vdev_mlme_obj *mlme_obj, + struct set_custom_aggr_size_params *param, + bool is_amsdu) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + param->aggr_type = is_amsdu ? WLAN_MLME_CUSTOM_AGGR_TYPE_AMSDU + : WLAN_MLME_CUSTOM_AGGR_TYPE_AMPDU; + /* + * We are only setting TX params, therefore + * we are disabling rx_aggr_size + */ + param->rx_aggr_size_disable = true; + param->tx_aggr_size = is_amsdu ? mlme_obj->mgmt.generic.amsdu + : mlme_obj->mgmt.generic.ampdu; + param->vdev_id = wlan_vdev_get_id(vdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_set_custom_aggr_size_send( + struct vdev_mlme_obj *vdev_mlme, + bool is_amsdu) +{ + QDF_STATUS status; + struct set_custom_aggr_size_params param = {0}; + + status = vdev_mgr_set_custom_aggr_size_param_update(vdev_mlme, + ¶m, is_amsdu); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + return tgt_vdev_mgr_set_custom_aggr_size_send(vdev_mlme, ¶m); +} + +static QDF_STATUS vdev_mgr_peer_delete_all_param_update( + struct vdev_mlme_obj *mlme_obj, + struct peer_delete_all_params *param) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = wlan_vdev_get_id(vdev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS vdev_mgr_peer_delete_all_send(struct vdev_mlme_obj *mlme_obj) +{ + QDF_STATUS status; + struct peer_delete_all_params param = {0}; + + if (!mlme_obj) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + status = vdev_mgr_peer_delete_all_param_update(mlme_obj, ¶m); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("Param Update Error: %d", status); + return status; + } + + status = tgt_vdev_mgr_peer_delete_all_send(mlme_obj, ¶m); + + return status; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mgr_ops.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mgr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f6461213aba55da257b950650f2142d79700f24e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mgr_ops.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: vdev_mgr_ops.h + * + * This header file provides API declarations for filling data structures + * and sending vdev mgmt commands to target_if/mlme/vdev_mgr + */ + +#ifndef __VDEV_MGR_OPS_H__ +#define __VDEV_MGR_OPS_H__ + +#include +#include +#include +#include "include/wlan_vdev_mlme.h" + +/** + * vdev_mgr_create_send() – MLME API to create command to + * target_if + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_create_send(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_start_send() – MLME API to send start request to + * target_if + * @mlme_obj: pointer to vdev_mlme_obj + * @restart: flag to indicate type of request START/RESTART + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_start_send(struct vdev_mlme_obj *mlme_obj, bool restart); + +/** + * vdev_mgr_delete_send() – MLME API to send delete request to + * target_if + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_delete_send(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_peer_flush_tids_send () – MLME API to setup peer flush tids + * @mlme_obj: pointer to vdev_mlme_obj + * @mac: pointer to peer mac address + * @peer_tid_bitmap: peer tid bitmap + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_peer_flush_tids_send(struct vdev_mlme_obj *mlme_obj, + uint8_t *mac, + uint32_t peer_tid_bitmap); +/** + * vdev_mgr_stop_send () – MLME API to send STOP request + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_stop_send(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_up_send () – MLME API to send UP command + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_up_send(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_down_send () – MLME API to send down command + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_down_send(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_set_param_send() – MLME API to send vdev param + * @mlme_obj: pointer to vdev_mlme_obj + * @param_id: parameter id + * @param_value: value corresponding to parameter id + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_set_param_send(struct vdev_mlme_obj *mlme_obj, + uint32_t param_id, + uint32_t param_value); + +/** + * vdev_mgr_set_neighbour_rx_cmd_send() – MLME API to send neighbour Rx + * cmd + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to set neighbour rx params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_set_neighbour_rx_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct set_neighbour_rx_params *param); + +/** + * vdev_mgr_set_nac_rssi_send() – MLME API to send nac rssi + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_scan_nac_rssi_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_nac_rssi_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_scan_nac_rssi_params *param); + +/** + * vdev_mgr_sifs_trigger_send() – MLME API to send SIFS trigger + * @mlme_obj: pointer to vdev_mlme_obj + * @param_value: parameter value + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_sifs_trigger_send( + struct vdev_mlme_obj *mlme_obj, + uint32_t param_value); + +/** + * vdev_mgr_set_custom_aggr_size_cmd_send() – MLME API to send custom aggr + * size + * @mlme_obj: pointer to vdev_mlme_obj + * @is_amsdu: boolean to indicate value corresponds to amsdu/ampdu + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_set_custom_aggr_size_send( + struct vdev_mlme_obj *mlme_obj, bool is_amsdu); + +/** + * vdev_mgr_onfig_ratemask_cmd_send() – MLME API to send ratemask + * @mlme_obj: pointer to vdev_mlme_obj + * @type: type of ratemask configuration + * @lower32: Lower 32 bits in the 1st 64-bit value + * @higher32: Higher 32 bits in the 1st 64-bit value + * @lower32_2: Lower 32 bits in the 2nd 64-bit value + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_config_ratemask_cmd_send( + struct vdev_mlme_obj *mlme_obj, + uint8_t type, + uint32_t lower32, + uint32_t higher32, + uint32_t lower32_2); + +/** + * vdev_mgr_beacon_stop() – MLME API to stop beacon + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_beacon_stop(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_beacon_free() – MLME API to free beacon + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_beacon_free(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_beacon_send() – MLME API to send beacon + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to beacon_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_beacon_send(struct vdev_mlme_obj *mlme_obj, + struct beacon_params *param); + +/** + * vdev_mgr_beacon_tmpl_send() – MLME API to send beacon template + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to beacon_tmpl_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_beacon_tmpl_send(struct vdev_mlme_obj *mlme_obj, + struct beacon_tmpl_params *param); + +/** + * vdev_mgr_bcn_miss_offload_send() – MLME API to send bcn miss offload + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_bcn_miss_offload_send(struct vdev_mlme_obj *mlme_obj); + +/** + * vdev_mgr_multiple_restart_send() – MLME API to send multiple vdev restart + * @pdev: pointer to pdev object + * @chan: pointer to channel param structure + * @disable_hw_ack: ddisable hw ack value + * @vdev_ids: pointer to list of vdev ids which require restart + * @num_vdevs: number of vdevs in list + * @mvr_param: multiple vdev restart param + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_multiple_restart_send( + struct wlan_objmgr_pdev *pdev, + struct mlme_channel_param *chan, + uint32_t disable_hw_ack, + uint32_t *vdev_ids, + uint32_t num_vdevs, + struct vdev_mlme_mvr_param *mvr_param); + +/** + * vdev_mgr_peer_delete_all_send() – MLME API to send peer delete all request + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS vdev_mgr_peer_delete_all_send(struct vdev_mlme_obj *mlme_obj); + +#ifdef WLAN_BCN_RATECODE_ENABLE +static inline uint32_t vdev_mgr_fetch_ratecode(struct vdev_mlme_obj *mlme_obj) +{ + return mlme_obj->mgmt.rate_info.bcn_tx_rate_code; +} +#else +static inline uint32_t vdev_mgr_fetch_ratecode(struct vdev_mlme_obj *mlme_obj) +{ + return mlme_obj->mgmt.rate_info.bcn_tx_rate; +} +#endif +#endif /* __VDEV_MGR_OPS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm.c new file mode 100644 index 0000000000000000000000000000000000000000..47d178eb3af52f870a9fe4b72e650ba887a15e3e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm.c @@ -0,0 +1,1934 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements VDEV MLME SM + */ + +#include +#include +#include +#include "include/wlan_vdev_mlme.h" +#include "vdev_mlme_sm.h" + +/** + * mlme_vdev_set_state() - set mlme state + * @vdev: VDEV object + * @state: MLME state + * + * API to set MLME state + * + * Return: void + */ +static void mlme_vdev_set_state(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_state state) +{ + if (state < WLAN_VDEV_S_MAX) { + vdev->vdev_mlme.mlme_state = state; + } else { + mlme_err("mlme state (%d) is invalid", state); + QDF_BUG(0); + } +} + +/** + * mlme_vdev_set_substate() - set mlme sub state + * @vdev: VDEV object + * @substate: MLME sub state + * + * API to set MLME sub state + * + * Return: void + */ +static void mlme_vdev_set_substate(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_state substate) +{ + if ((substate > WLAN_VDEV_S_MAX) && (substate < WLAN_VDEV_SS_MAX)) { + vdev->vdev_mlme.mlme_substate = substate; + } else { + mlme_err(" mlme sub state (%d) is invalid", substate); + QDF_BUG(0); + } +} + +/** + * mlme_vdev_sm_state_update() - set mlme state and sub state + * @vdev_mlme: MLME VDEV comp object + * @state: MLME state + * @substate: MLME sub state + * + * API to invoke util APIs to set state and MLME sub state + * + * Return: void + */ +static void mlme_vdev_sm_state_update(struct vdev_mlme_obj *vdev_mlme, + enum wlan_vdev_state state, + enum wlan_vdev_state substate) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + if (!vdev) { + mlme_err(" VDEV is NULL"); + QDF_BUG(0); + } + + mlme_vdev_set_state(vdev, state); + mlme_vdev_set_substate(vdev, substate); +} + +/** + * mlme_vdev_sm_transition_to() - invokes state transition + * @vdev_mlme: MLME VDEV comp object + * @state: new MLME state + * + * API to invoke SM API to move to new state + * + * Return: void + */ +static void mlme_vdev_sm_transition_to(struct vdev_mlme_obj *vdev_mlme, + enum wlan_vdev_state state) +{ + wlan_sm_transition_to(vdev_mlme->sm_hdl, state); +} + +/** + * mlme_vdev_state_init_entry() - Entry API for Init state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to INIT state + * + * Return: void + */ +static void mlme_vdev_state_init_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + + mlme_vdev_sm_state_update(vdev_mlme, WLAN_VDEV_S_INIT, + WLAN_VDEV_SS_IDLE); +} + +/** + * mlme_vdev_state_init_exit() - Exit API for Init state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of INIT state + * + * Return: void + */ +static void mlme_vdev_state_init_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_state_init_event() - Init State event handler + * @ctx: VDEV MLME object + * + * API to handle events in INIT state + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_state_init_event(void *ctx, uint16_t event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_START: + /* call mlme callback API for sanity checks */ + if (mlme_vdev_validate_basic_params(vdev_mlme, event_data_len, + event_data) == QDF_STATUS_SUCCESS) { + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_S_START); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_START_REQ, + event_data_len, event_data); + status = true; + } else { + mlme_err( + "failed to validate vdev init params to move to START state"); + status = true; + mlme_vdev_notify_down_complete(vdev_mlme, + event_data_len, + event_data); + } + break; + + case WLAN_VDEV_SM_EV_DOWN_COMPLETE: + case WLAN_VDEV_SM_EV_DOWN: + case WLAN_VDEV_SM_EV_START_REQ_FAIL: + /* already in down state, notify DOWN command is completed */ + /* NOTE: Keep this function call always at the end, to allow + * connection restart from this event + */ + mlme_vdev_notify_down_complete(vdev_mlme, event_data_len, + event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_state_start_entry() - Entry API for Start state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to START state + * + * Return: void + */ +static void mlme_vdev_state_start_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + + mlme_vdev_sm_state_update(vdev_mlme, WLAN_VDEV_S_START, + WLAN_VDEV_SS_IDLE); +} + +/** + * mlme_vdev_state_start_exit() - Exit API for Start state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of START state + * + * Return: void + */ +static void mlme_vdev_state_start_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_state_start_event() - Start State event handler + * @ctx: VDEV MLME object + * + * API to handle events in START state + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_state_start_event(void *ctx, uint16_t event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_START_REQ: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_START_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, event, event_data_len, + event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RESTART_REQ: + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_RESTART_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, event, event_data_len, + event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_STA_CONN_START: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_CONN_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_state_dfs_cac_wait_entry() - Entry API for DFS CAC WAIT state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to DFS CAC WAIT state + * + * Return: void + */ +static void mlme_vdev_state_dfs_cac_wait_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + + mlme_vdev_sm_state_update(vdev_mlme, WLAN_VDEV_S_DFS_CAC_WAIT, + WLAN_VDEV_SS_IDLE); +} + +/** + * mlme_vdev_state_dfs_cac_wait_exit() - Exit API for DFS CAC WAIT state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of DFS CAC WAIT state + * + * Return: void + */ +static void mlme_vdev_state_dfs_cac_wait_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_state_dfs_cac_wait_event() - DFS CAC WAIT State event handler + * @ctx: VDEV MLME object + * + * API to handle events in DFS CAC WAIT state + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_state_dfs_cac_wait_event(void *ctx, uint16_t event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + enum QDF_OPMODE mode; + struct wlan_objmgr_vdev *vdev; + bool status; + + vdev = vdev_mlme->vdev; + + mode = wlan_vdev_mlme_get_opmode(vdev); + + switch (event) { + case WLAN_VDEV_SM_EV_DFS_CAC_WAIT: + /* DFS timer should have started already, then only this event + * could have been triggered + */ + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + /* stop the CAC timer, then notify state machine */ + mlme_vdev_dfs_cac_timer_stop(vdev_mlme, event_data_len, + event_data); + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_STOP); + mlme_vdev_sm_deliver_event(vdev_mlme, WLAN_VDEV_SM_EV_STOP_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + /* the random channel should have been selected, before issuing + * this event + */ + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_START); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_RESTART_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DFS_CAC_COMPLETED: + if (mode == QDF_STA_MODE) { + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_S_START); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_STA_CONN_START, + event_data_len, event_data); + } else { + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_UP); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_START_SUCCESS, + event_data_len, event_data); + } + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_state_up_entry() - Entry API for UP state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to UP state + * + * Return: void + */ +static void mlme_vdev_state_up_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + + mlme_vdev_sm_state_update(vdev_mlme, WLAN_VDEV_S_UP, + WLAN_VDEV_SS_IDLE); +} + +/** + * mlme_vdev_state_up_exit() - Exit API for UP state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of UP state + * + * Return: void + */ +static void mlme_vdev_state_up_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_state_up_event() - UP State event handler + * @ctx: VDEV MLME object + * + * API to handle events in UP state + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_state_up_event(void *ctx, uint16_t event, + uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + enum QDF_OPMODE mode; + struct wlan_objmgr_vdev *vdev; + bool status; + + vdev = vdev_mlme->vdev; + mode = wlan_vdev_mlme_get_opmode(vdev); + + switch (event) { + case WLAN_VDEV_SM_EV_START_SUCCESS: + mlme_vdev_update_beacon(vdev_mlme, BEACON_INIT, + event_data_len, event_data); + if (mlme_vdev_up_send(vdev_mlme, event_data_len, + event_data) != QDF_STATUS_SUCCESS) + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_UP_FAIL, + event_data_len, event_data); + else + mlme_vdev_notify_up_complete(vdev_mlme, event_data_len, + event_data); + + status = true; + break; + + case WLAN_VDEV_SM_EV_SUSPEND_RESTART: + case WLAN_VDEV_SM_EV_HOST_RESTART: + case WLAN_VDEV_SM_EV_CSA_RESTART: + /* These events are not supported in STA mode */ + if (mode == QDF_STA_MODE) + QDF_BUG(0); + + case WLAN_VDEV_SM_EV_DOWN: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_SUSPEND); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + /* These events are not supported in STA mode */ + if (mode == QDF_STA_MODE) + QDF_BUG(0); + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_SUSPEND); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_CSA_RESTART, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_UP_HOST_RESTART: + /* Reinit beacon, send template to FW(use ping-pong buffer) */ + mlme_vdev_update_beacon(vdev_mlme, BEACON_UPDATE, + event_data_len, event_data); + case WLAN_VDEV_SM_EV_START: + /* notify that UP command is completed */ + mlme_vdev_notify_up_complete(vdev_mlme, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_FW_VDEV_RESTART: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_START); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_RESTART_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_UP_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_SUSPEND); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_ROAM: + mlme_vdev_notify_roam_start(vdev_mlme, event_data_len, + event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_state_suspend_entry() - Entry API for Suspend state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to SUSPEND state + * + * Return: void + */ +static void mlme_vdev_state_suspend_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + + mlme_vdev_sm_state_update(vdev_mlme, WLAN_VDEV_S_SUSPEND, + WLAN_VDEV_SS_IDLE); +} + +/** + * mlme_vdev_state_suspend_exit() - Exit API for Suspend state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of SUSPEND state + * + * Return: void + */ +static void mlme_vdev_state_suspend_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_state_suspend_event() - Suspend State event handler + * @ctx: VDEV MLME object + * + * API to handle events in SUSPEND state + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_state_suspend_event(void *ctx, uint16_t event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_DOWN: + case WLAN_VDEV_SM_EV_RESTART_REQ_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_SUSPEND_RESTART: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_RESTART); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_HOST_RESTART: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_HOST_RESTART); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_CSA_RESTART: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_CSA_RESTART); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_UP_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN); + mlme_vdev_sm_deliver_event(vdev_mlme, WLAN_VDEV_SM_EV_DOWN, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_state_stop_entry() - Entry API for Stop state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to STOP state + * + * Return: void + */ +static void mlme_vdev_state_stop_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *) ctx; + + mlme_vdev_sm_state_update(vdev_mlme, WLAN_VDEV_S_STOP, + WLAN_VDEV_SS_IDLE); +} + +/** + * mlme_vdev_state_stop_exit() - Exit API for Stop state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of STOP state + * + * Return: void + */ +static void mlme_vdev_state_stop_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_state_stop_event() - Stop State event handler + * @ctx: VDEV MLME object + * + * API to handle events in STOP state + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_state_stop_event(void *ctx, uint16_t event, + uint16_t event_data_len, + void *event_data) +{ + QDF_BUG(0); + return false; +} + +/** + * mlme_vdev_subst_start_start_progress_entry() - Entry API for Start Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to START-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_start_progress_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_START) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_START_START_PROGRESS); +} + +/** + * mlme_vdev_subst_start_start_progress_exit() - Exit API for Start Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of START-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_start_progress_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_start_start_progress_event() - Event handler API for Start + * Progress substate + * @ctx: VDEV MLME object + * + * API to handle events in START-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_start_start_progress_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + bool status; + + vdev = vdev_mlme->vdev; + + switch (event) { + case WLAN_VDEV_SM_EV_START_REQ: + /* send vdev start req command to FW */ + mlme_vdev_start_send(vdev_mlme, event_data_len, event_data); + status = true; + break; + /* While waiting for START response, move to RESTART_PROGRESS, + * wait for START response to send RESTART req */ + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_RESTART_PROGRESS); + status = true; + break; + case WLAN_VDEV_SM_EV_START_RESP: + case WLAN_VDEV_SM_EV_RESTART_RESP: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_CONN_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_CONN_PROGRESS, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_START_REQ_FAIL: + mlme_vdev_start_req_failed(vdev_mlme, + event_data_len, event_data); + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_INIT); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_DISCONN_PROGRESS); + /* block start request, if it is pending */ + mlme_vdev_stop_start_send(vdev_mlme, START_REQ, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_start_restart_progress_entry() - Entry API for Restart + * progress sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to RESTART-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_restart_progress_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_START) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_START_RESTART_PROGRESS); +} + +/** + * mlme_vdev_subst_start_restart_progress_exit() - Exit API for Restart Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of RESTART-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_restart_progress_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_start_restart_progress_event() - Event handler API for + * Restart Progress substate + * @ctx: VDEV MLME object + * + * API to handle events in RESTART-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_start_restart_progress_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + bool status; + + vdev = vdev_mlme->vdev; + + switch (event) { + case WLAN_VDEV_SM_EV_RESTART_REQ: + /* If Start resp is pending, send restart after start response */ + case WLAN_VDEV_SM_EV_START_RESP: + /* send vdev restart req command to FW */ + mlme_vdev_restart_send(vdev_mlme, event_data_len, event_data); + status = true; + break; + case WLAN_VDEV_SM_EV_RESTART_RESP: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_CONN_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_CONN_PROGRESS, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RESTART_REQ_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_SUSPEND); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_DISCONN_PROGRESS); + /* block restart request, if it is pending */ + mlme_vdev_stop_start_send(vdev_mlme, RESTART_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + /* It is complicated to handle RADAR detected in this substate, + * as vdev updates des channels as bss channel on response, + * it would be easily handled, if it is deferred by DFS module + */ + QDF_BUG(0); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_start_conn_progress_entry() - Entry API for Conn. Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to CONN-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_conn_progress_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_START) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_START_CONN_PROGRESS); +} + +/** + * mlme_vdev_subst_start_conn_progress_exit() - Exit API for Conn. Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of CONN-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_conn_progress_exit(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + + mlme_vdev_notify_start_state_exit(vdev_mlme); +} + +/** + * mlme_vdev_subst_start_conn_progress_event() - Event handler API for Conn. + * Progress substate + * @ctx: VDEV MLME object + * + * API to handle events in CONN-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_start_conn_progress_event(void *ctx, + uint16_t event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + enum QDF_OPMODE mode; + struct wlan_objmgr_vdev *vdev; + bool status; + + vdev = vdev_mlme->vdev; + + mode = wlan_vdev_mlme_get_opmode(vdev); + + switch (event) { + case WLAN_VDEV_SM_EV_CONN_PROGRESS: + /* This API decides to move to DFS CAC WAIT or UP state, + * for station notify connection state machine */ + if (mlme_vdev_start_continue(vdev_mlme, event_data_len, + event_data) != QDF_STATUS_SUCCESS) + mlme_vdev_sm_deliver_event( + vdev_mlme, + WLAN_VDEV_SM_EV_CONNECTION_FAIL, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DFS_CAC_WAIT: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_DFS_CAC_WAIT); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_START_SUCCESS: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_UP); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_STA_CONN_START: + /* This event triggers station connection, if it is blocked for + * CAC WAIT + */ + if (mode != QDF_STA_MODE) + QDF_BUG(0); + + mlme_vdev_sta_conn_start(vdev_mlme, event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + if (mode != QDF_STA_MODE) + QDF_BUG(0); + + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + case WLAN_VDEV_SM_EV_CONNECTION_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_START_DISCONN_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_start_disconn_progress_entry() - Entry API for Disconn + * progress sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to DISCONN-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static void mlme_vdev_subst_start_disconn_progress_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_START) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_START_DISCONN_PROGRESS); +} + +/** + * mlme_vdev_subst_start_disconn_progress_exit() - Exit API for Disconn Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of DISCONN-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_start_disconn_progress_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_start_disconn_progress_event() - Event handler API for Discon + * Progress substate + * @ctx: VDEV MLME object + * + * API to handle events in DISCONN-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_start_disconn_progress_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_START_RESP: + /* clean up, if any needs to be cleaned up */ + case WLAN_VDEV_SM_EV_CONNECTION_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_STOP); + mlme_vdev_sm_deliver_event(vdev_mlme, WLAN_VDEV_SM_EV_STOP_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RESTART_RESP: + case WLAN_VDEV_SM_EV_RESTART_REQ_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_SUSPEND); + mlme_vdev_sm_deliver_event(vdev_mlme, WLAN_VDEV_SM_EV_DOWN, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_START_REQ_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_INIT); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_suspend_suspend_down_entry() - Entry API for Suspend down + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to SUSPEND-DOWN substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_suspend_down_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_SUSPEND) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN); +} + +/** + * mlme_vdev_subst_suspend_suspend_down_exit() - Exit API for Suspend down + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of SUSPEND-DOWN substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_suspend_down_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_suspend_suspend_down_event() - Event handler API for Suspend + * down substate + * @ctx: VDEV MLME object + * + * API to handle events in SUSPEND-DOWN substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_suspend_suspend_down_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_DOWN: + case WLAN_VDEV_SM_EV_RESTART_REQ_FAIL: + mlme_vdev_disconnect_peers(vdev_mlme, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DISCONNECT_COMPLETE: + /* clean up, if any needs to be cleaned up */ + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_STOP); + mlme_vdev_sm_deliver_event(vdev_mlme, WLAN_VDEV_SM_EV_STOP_REQ, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_suspend_suspend_restart_entry() - Entry API for Suspend + * restart substate + * @ctx: VDEV MLME object + * + * API to perform operations on moving to SUSPEND-RESTART substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_suspend_restart_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_SUSPEND) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_SUSPEND_SUSPEND_RESTART); +} + +/** + * mlme_vdev_subst_suspend_suspend_restart_exit() - Exit API for Suspend restart + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of SUSPEND-RESTART substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_suspend_restart_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_suspend_suspend_restart_event() - Event handler API for + * Suspend restart substate + * @ctx: VDEV MLME object + * + * API to handle events in SUSPEND-RESTART substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_suspend_suspend_restart_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_SUSPEND_RESTART: + mlme_vdev_disconnect_peers(vdev_mlme, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DISCONNECT_COMPLETE: + /* clean up, if any needs to be cleaned up */ + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_START); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_RESTART_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_CSA_RESTART); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_CSA_RESTART, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_suspend_host_restart_entry() - Entry API for Host restart + * substate + * @ctx: VDEV MLME object + * + * API to perform operations on moving to HOST-RESTART substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_host_restart_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_SUSPEND) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_SUSPEND_HOST_RESTART); +} + +/** + * mlme_vdev_subst_suspend_host_restart_exit() - Exit API for host restart + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of HOST-RESTART substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_host_restart_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_suspend_host_restart_entry() - Event handler API for Host + * restart substate + * @ctx: VDEV MLME object + * + * API to handle events in HOST-RESTART substate + * + * Return: void + */ +static bool mlme_vdev_subst_suspend_host_restart_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_HOST_RESTART: + mlme_vdev_disconnect_peers(vdev_mlme, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DISCONNECT_COMPLETE: + /* VDEV up command need not be sent */ + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_UP); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_UP_HOST_RESTART, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_CSA_RESTART); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_CSA_RESTART, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_suspend_csa_restart_entry() - Entry API for CSA restart + * substate + * @ctx: VDEV MLME object + * + * API to perform operations on moving to CSA-RESTART substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_csa_restart_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_SUSPEND) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_SUSPEND_CSA_RESTART); +} + +/** + * mlme_vdev_subst_suspend_csa_restart_exit() - Exit API for CSA restart + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of CSA-RESTART substate + * + * Return: void + */ +static void mlme_vdev_subst_suspend_csa_restart_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_suspend_csa_restart_event() - Event handler API for CSA + * restart substate + * @ctx: VDEV MLME object + * + * API to handle events in CSA-RESTART substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_suspend_csa_restart_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_CHAN_SWITCH_DISABLED: + /** + * This event is sent when CSA count becomes 0 without + * change in channel i.e. only Beacon Probe response template + * is updated (CSA / ECSA IE is removed). + */ + + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_UP); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_UP_HOST_RESTART, + event_data_len, event_data); + status = true; + break; + case WLAN_VDEV_SM_EV_CSA_RESTART: + mlme_vdev_update_beacon(vdev_mlme, BEACON_CSA, + event_data_len, event_data); + status = true; + break; + case WLAN_VDEV_SM_EV_CSA_COMPLETE: + if (mlme_vdev_is_newchan_no_cac(vdev_mlme) == + QDF_STATUS_SUCCESS) { + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_S_START); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_RESTART_REQ, + event_data_len, event_data); + } else { + mlme_vdev_sm_transition_to + (vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_RESTART); + mlme_vdev_sm_deliver_event + (vdev_mlme, WLAN_VDEV_SM_EV_SUSPEND_RESTART, + event_data_len, event_data); + } + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN); + mlme_vdev_sm_deliver_event(vdev_mlme, event, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_RADAR_DETECTED: + /* since channel change is already in progress, + * dfs ignore radar detected event + */ + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_stop_stop_progress_entry() - Entry API for Stop Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to STOP-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_stop_stop_progress_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *) ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_STOP) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_STOP_STOP_PROGRESS); +} + +/** + * mlme_vdev_subst_stop_stop_progress_exit() - Exit API for Stop Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of STOP-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_stop_stop_progress_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_stop_stop_progress_event() - Event handler API for Stop + * Progress substate + * @ctx: VDEV MLME object + * + * API to handle events in STOP-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_stop_stop_progress_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + /* Debug framework is required to hold the events */ + + switch (event) { + case WLAN_VDEV_SM_EV_STOP_REQ: + /* send vdev stop command to FW and delete BSS peer*/ + mlme_vdev_stop_send(vdev_mlme, event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_STOP_RESP: + /* Processes stop response, and checks BSS peer delete wait + * is needed + */ + mlme_vdev_stop_continue(vdev_mlme, event_data_len, event_data); + status = true; + break; + + /* This event should be given by MLME on stop complete and BSS + * peer delete complete to move forward + */ + case WLAN_VDEV_SM_EV_MLME_DOWN_REQ: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_STOP_DOWN_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_MLME_DOWN_REQ, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_STOP_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, + WLAN_VDEV_SS_STOP_DOWN_PROGRESS); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_MLME_DOWN_REQ, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + +/** + * mlme_vdev_subst_stop_down_progress_entry() - Entry API for Down Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving to DOWN-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_stop_down_progress_entry(void *ctx) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + if (wlan_vdev_mlme_get_state(vdev) != WLAN_VDEV_S_STOP) + QDF_BUG(0); + + mlme_vdev_set_substate(vdev, WLAN_VDEV_SS_STOP_DOWN_PROGRESS); +} + +/** + * mlme_vdev_subst_stop_down_progress_exit() - Exit API for Down Progress + * sub state + * @ctx: VDEV MLME object + * + * API to perform operations on moving out of DOWN-PROGRESS substate + * + * Return: void + */ +static void mlme_vdev_subst_stop_down_progress_exit(void *ctx) +{ + /* NONE */ +} + +/** + * mlme_vdev_subst_stop_down_progress_event() - Event handler API for Down + * Progress substate + * @ctx: VDEV MLME object + * + * API to handle events in DOWN-PROGRESS substate + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +static bool mlme_vdev_subst_stop_down_progress_event(void *ctx, + uint16_t event, uint16_t event_data_len, void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme = (struct vdev_mlme_obj *)ctx; + bool status; + + switch (event) { + case WLAN_VDEV_SM_EV_DOWN: + status = true; + break; + + case WLAN_VDEV_SM_EV_MLME_DOWN_REQ: + /* send vdev down command to FW, if send is successful, sends + * DOWN_COMPLETE event + */ + mlme_vdev_down_send(vdev_mlme, event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN_COMPLETE: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_INIT); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_DOWN_COMPLETE, + event_data_len, event_data); + status = true; + break; + + case WLAN_VDEV_SM_EV_DOWN_FAIL: + mlme_vdev_sm_transition_to(vdev_mlme, WLAN_VDEV_S_INIT); + mlme_vdev_sm_deliver_event(vdev_mlme, + WLAN_VDEV_SM_EV_DOWN_COMPLETE, + event_data_len, event_data); + status = true; + break; + + default: + status = false; + break; + } + + return status; +} + + +static const char *vdev_sm_event_names[] = { + "EV_START", + "EV_START_REQ", + "EV_RESTART_REQ", + "EV_START_RESP", + "EV_RESTART_RESP", + "EV_START_REQ_FAIL", + "EV_RESTART_REQ_FAIL", + "EV_START_SUCCESS", + "EV_CONN_PROGRESS", + "EV_STA_CONN_START", + "EV_DFS_CAC_WAIT", + "EV_DFS_CAC_COMPLETED", + "EV_DOWN", + "EV_CONNECTION_FAIL", + "EV_STOP_RESP", + "EV_STOP_FAIL", + "EV_DOWN_FAIL", + "EV_DISCONNECT_COMPLETE", + "EV_SUSPEND_RESTART", + "EV_HOST_RESTART", + "EV_UP_HOST_RESTART", + "EV_FW_VDEV_RESTART", + "EV_UP_FAIL", + "EV_RADAR_DETECTED", + "EV_CSA_RESTART", + "EV_CSA_COMPLETE", + "EV_MLME_DOWN_REQ", + "EV_DOWN_COMPLETE", + "EV_ROAM", + "EV_STOP_REQ", + "EV_CHAN_SWITCH_DISABLED", +}; + +struct wlan_sm_state_info sm_info[] = { + { + (uint8_t)WLAN_VDEV_S_INIT, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + true, + "INIT", + mlme_vdev_state_init_entry, + mlme_vdev_state_init_exit, + mlme_vdev_state_init_event + }, + { + (uint8_t)WLAN_VDEV_S_START, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + true, + "START", + mlme_vdev_state_start_entry, + mlme_vdev_state_start_exit, + mlme_vdev_state_start_event + }, + { + (uint8_t)WLAN_VDEV_S_DFS_CAC_WAIT, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + true, + "DFS_CAC_WAIT", + mlme_vdev_state_dfs_cac_wait_entry, + mlme_vdev_state_dfs_cac_wait_exit, + mlme_vdev_state_dfs_cac_wait_event + }, + { + (uint8_t)WLAN_VDEV_S_UP, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + true, + "UP", + mlme_vdev_state_up_entry, + mlme_vdev_state_up_exit, + mlme_vdev_state_up_event + }, + { + (uint8_t)WLAN_VDEV_S_SUSPEND, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + true, + "SUSPEND", + mlme_vdev_state_suspend_entry, + mlme_vdev_state_suspend_exit, + mlme_vdev_state_suspend_event + }, + { + (uint8_t)WLAN_VDEV_S_STOP, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_VDEV_SS_STOP_STOP_PROGRESS, + true, + "STOP", + mlme_vdev_state_stop_entry, + mlme_vdev_state_stop_exit, + mlme_vdev_state_stop_event + }, + { + (uint8_t)WLAN_VDEV_S_MAX, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "INVALID", + NULL, + NULL, + NULL + }, + { + (uint8_t)WLAN_VDEV_SS_START_START_PROGRESS, + (uint8_t)WLAN_VDEV_S_START, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "ST-START_PROG", + mlme_vdev_subst_start_start_progress_entry, + mlme_vdev_subst_start_start_progress_exit, + mlme_vdev_subst_start_start_progress_event + }, + { + (uint8_t)WLAN_VDEV_SS_START_RESTART_PROGRESS, + (uint8_t)WLAN_VDEV_S_START, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "ST-RESTART_PROG", + mlme_vdev_subst_start_restart_progress_entry, + mlme_vdev_subst_start_restart_progress_exit, + mlme_vdev_subst_start_restart_progress_event + }, + { + (uint8_t)WLAN_VDEV_SS_START_CONN_PROGRESS, + (uint8_t)WLAN_VDEV_S_START, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "ST-CONN_PROG", + mlme_vdev_subst_start_conn_progress_entry, + mlme_vdev_subst_start_conn_progress_exit, + mlme_vdev_subst_start_conn_progress_event + }, + { + (uint8_t)WLAN_VDEV_SS_START_DISCONN_PROGRESS, + (uint8_t)WLAN_VDEV_S_START, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "ST-DISCONN_PROG", + mlme_vdev_subst_start_disconn_progress_entry, + mlme_vdev_subst_start_disconn_progress_exit, + mlme_vdev_subst_start_disconn_progress_event + }, + { + (uint8_t)WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN, + (uint8_t)WLAN_VDEV_S_SUSPEND, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "SP-SUSPEND_DOWN", + mlme_vdev_subst_suspend_suspend_down_entry, + mlme_vdev_subst_suspend_suspend_down_exit, + mlme_vdev_subst_suspend_suspend_down_event + }, + { + (uint8_t)WLAN_VDEV_SS_SUSPEND_SUSPEND_RESTART, + (uint8_t)WLAN_VDEV_S_SUSPEND, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "SP-SUSPEND_RESTART", + mlme_vdev_subst_suspend_suspend_restart_entry, + mlme_vdev_subst_suspend_suspend_restart_exit, + mlme_vdev_subst_suspend_suspend_restart_event + }, + { + (uint8_t)WLAN_VDEV_SS_SUSPEND_HOST_RESTART, + (uint8_t)WLAN_VDEV_S_SUSPEND, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "SP-HOST_RESTART", + mlme_vdev_subst_suspend_host_restart_entry, + mlme_vdev_subst_suspend_host_restart_exit, + mlme_vdev_subst_suspend_host_restart_event + }, + { + (uint8_t)WLAN_VDEV_SS_SUSPEND_CSA_RESTART, + (uint8_t)WLAN_VDEV_S_SUSPEND, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "SP-CSA_RESTART", + mlme_vdev_subst_suspend_csa_restart_entry, + mlme_vdev_subst_suspend_csa_restart_exit, + mlme_vdev_subst_suspend_csa_restart_event + }, + { + (uint8_t)WLAN_VDEV_SS_STOP_STOP_PROGRESS, + (uint8_t)WLAN_VDEV_S_STOP, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "STOP-STOP_PROG", + mlme_vdev_subst_stop_stop_progress_entry, + mlme_vdev_subst_stop_stop_progress_exit, + mlme_vdev_subst_stop_stop_progress_event + }, + { + (uint8_t)WLAN_VDEV_SS_STOP_DOWN_PROGRESS, + (uint8_t)WLAN_VDEV_S_STOP, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "STOP-DOWN_PROG", + mlme_vdev_subst_stop_down_progress_entry, + mlme_vdev_subst_stop_down_progress_exit, + mlme_vdev_subst_stop_down_progress_event + }, + { + (uint8_t)WLAN_VDEV_SS_IDLE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "IDLE", + NULL, + NULL, + NULL, + }, + { + (uint8_t)WLAN_VDEV_SS_MAX, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + (uint8_t)WLAN_SM_ENGINE_STATE_NONE, + false, + "INVALID", + NULL, + NULL, + NULL, + }, +}; + +QDF_STATUS mlme_vdev_sm_deliver_event(struct vdev_mlme_obj *vdev_mlme, + enum wlan_vdev_sm_evt event, + uint16_t event_data_len, void *event_data) +{ + return wlan_sm_dispatch(vdev_mlme->sm_hdl, event, + event_data_len, event_data); +} + +void mlme_vdev_sm_print_state_event(struct vdev_mlme_obj *vdev_mlme, + enum wlan_vdev_sm_evt event) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + + mlme_nofl_debug("[%s]%s - %s, %s", vdev_mlme->sm_hdl->name, + sm_info[state].name, sm_info[substate].name, + vdev_sm_event_names[event]); +} + +void mlme_vdev_sm_print_state(struct vdev_mlme_obj *vdev_mlme) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + struct wlan_objmgr_vdev *vdev; + + vdev = vdev_mlme->vdev; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + + mlme_nofl_debug("[%s]%s - %s", vdev_mlme->sm_hdl->name, + sm_info[state].name, sm_info[substate].name); +} + +#ifdef SM_ENG_HIST_ENABLE +void mlme_vdev_sm_history_print(struct vdev_mlme_obj *vdev_mlme) +{ + return wlan_sm_print_history(vdev_mlme->sm_hdl); +} +#endif + +QDF_STATUS mlme_vdev_sm_create(struct vdev_mlme_obj *vdev_mlme) +{ + struct wlan_sm *sm; + uint8_t name[WLAN_SM_ENGINE_MAX_NAME]; + + qdf_snprintf(name, sizeof(name), "VDEV%d-MLME", + wlan_vdev_get_id(vdev_mlme->vdev)); + sm = wlan_sm_create(name, vdev_mlme, + WLAN_VDEV_S_INIT, + sm_info, + QDF_ARRAY_SIZE(sm_info), + vdev_sm_event_names, + QDF_ARRAY_SIZE(vdev_sm_event_names)); + if (!sm) { + mlme_err("VDEV MLME SM allocation failed"); + return QDF_STATUS_E_FAILURE; + } + vdev_mlme->sm_hdl = sm; + + mlme_vdev_sm_spinlock_create(vdev_mlme); + + mlme_vdev_cmd_mutex_create(vdev_mlme); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS mlme_vdev_sm_destroy(struct vdev_mlme_obj *vdev_mlme) +{ + mlme_vdev_cmd_mutex_destroy(vdev_mlme); + + mlme_vdev_sm_spinlock_destroy(vdev_mlme); + + wlan_sm_delete(vdev_mlme->sm_hdl); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm.h new file mode 100644 index 0000000000000000000000000000000000000000..deb784b49a122b3d5a1808cf5b89eed77049fb68 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm.h @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Declares VDEV MLME SM APIs and structures + */ + +#ifndef _VDEV_MLME_SM_H_ +#define _VDEV_MLME_SM_H_ + +/** + * mlme_vdev_sm_deliver_event() - Delivers event to VDEV MLME SM + * @vdev_mlme: MLME VDEV comp object + * @event: MLME event + * @event_data_len: data size + * @event_data: event data + * + * API to dispatch event to VDEV MLME SM + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +QDF_STATUS mlme_vdev_sm_deliver_event(struct vdev_mlme_obj *vdev_mlme, + enum wlan_vdev_sm_evt event, + uint16_t event_data_len, + void *event_data); + +/** + * mlme_vdev_sm_print_state_event() - Prints the state/substate, event + * @vdev_mlme: MLME VDEV comp object + * @event: MLME event + * + * API to print current state/substate, events in readable format + * + * Return: void + */ +void mlme_vdev_sm_print_state_event(struct vdev_mlme_obj *vdev_mlme, + enum wlan_vdev_sm_evt event); + +/** + * mlme_vdev_sm_print_state() - Prints the state/substate + * @vdev_mlme: MLME VDEV comp object + * + * API to print current state/substate + * + * Return: void + */ +void mlme_vdev_sm_print_state(struct vdev_mlme_obj *vdev_mlme); +#ifdef SM_ENG_HIST_ENABLE +/** + * mlme_vdev_sm_history_print() - Prints SM history + * @vdev_mlme: MLME VDEV comp object + * + * API to print SM history + * + * Return: void + */ +void mlme_vdev_sm_history_print(struct vdev_mlme_obj *vdev_mlme); +#endif + +#endif + +/** + * mlme_vdev_sm_create - Invoke SME creation for VDEV + * @vdev_mlme_obj: VDEV MLME comp object + * + * API allocates VDEV MLME SM and initializes SM lock + * + * Return: SUCCESS on successful allocation + * FAILURE, if registration fails + */ +QDF_STATUS mlme_vdev_sm_create(struct vdev_mlme_obj *vdev_mlme); + +/** + * mlme_vdev_sm_destroy - Invoke SME destroy for VDEV + * @vdev_mlme_obj: VDEV MLME comp object + * + * API frees VDEV MLME SM and destroys the SM lock + * + * Return: SUCCESS on successful destroy + * FAILURE, if registration fails + */ +QDF_STATUS mlme_vdev_sm_destroy(struct vdev_mlme_obj *vdev_mlme); + +/** + * mlme_vdev_validate_basic_params - Validate basic params + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API validate MLME VDEV basic parameters + * + * Return: SUCCESS on successful validation + * FAILURE, if any parameter is not initialized + */ +static inline QDF_STATUS mlme_vdev_validate_basic_params( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_validate_basic_params) + ret = vdev_mlme->ops->mlme_vdev_validate_basic_params( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_reset_proto_params - Reset VDEV protocol params + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API resets the protocol params fo vdev + * + * Return: SUCCESS on successful reset + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_reset_proto_params( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_reset_proto_params) + ret = vdev_mlme->ops->mlme_vdev_reset_proto_params( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_start_send - Invokes VDEV start operation + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV start operation + * + * Return: SUCCESS on successful completion of start operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_start_send( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_start_send) + ret = vdev_mlme->ops->mlme_vdev_start_send( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_restart_send - Invokes VDEV restart operation + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV restart operation + * + * Return: SUCCESS on successful completion of restart operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_restart_send( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_restart_send) + ret = vdev_mlme->ops->mlme_vdev_restart_send( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_stop_start_send - Invoke block VDEV restart operation + * @vdev_mlme_obj: VDEV MLME comp object + * @restart: restart req/start req + * @event_data_len: data size + * @event_data: event data + * + * API invokes stops pending VDEV restart operation + * + * Return: SUCCESS alsways + */ +static inline QDF_STATUS mlme_vdev_stop_start_send( + struct vdev_mlme_obj *vdev_mlme, + uint8_t restart, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_stop_start_send) + ret = vdev_mlme->ops->mlme_vdev_stop_start_send( + vdev_mlme, restart, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_start_continue - VDEV start response handling + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV start response actions + * + * Return: SUCCESS on successful completion of start response operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_start_continue( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_start_continue) + ret = vdev_mlme->ops->mlme_vdev_start_continue( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_start_req_failed - Invoke Station VDEV connection, if it pause + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes on START fail response + * + * Return: SUCCESS on successful invocation of callback + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_start_req_failed( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_start_req_failed) + ret = vdev_mlme->ops->mlme_vdev_start_req_failed( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_sta_conn_start - Invoke Station VDEV connection, if it pause + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes connection SM to start station connection + * + * Return: SUCCESS on successful invocation of connection sm + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_sta_conn_start( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_sta_conn_start) + ret = vdev_mlme->ops->mlme_vdev_sta_conn_start( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_up_send - VDEV up operation + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV up operations + * + * Return: SUCCESS on successful completion of up operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_up_send( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_up_send) + ret = vdev_mlme->ops->mlme_vdev_up_send( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_notify_up_complete - VDEV up state transition notification + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API notifies MLME on moving to UP state + * + * Return: SUCCESS on successful completion of up notification + * FAILURE, if it fails due to any + */ +static inline +QDF_STATUS mlme_vdev_notify_up_complete(struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, + void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (vdev_mlme->ops && vdev_mlme->ops->mlme_vdev_notify_up_complete) + ret = vdev_mlme->ops->mlme_vdev_notify_up_complete( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_notify_roam_start - VDEV Roaming notification + * @vdev_mlme_obj: VDEV MLME comp object + * @event_len: data size + * @event_data: event data + * + * API notifies MLME on roaming + * + * Return: SUCCESS on successful completion of up notification + * FAILURE, if it fails due to any + */ +static inline +QDF_STATUS mlme_vdev_notify_roam_start(struct vdev_mlme_obj *vdev_mlme, + uint16_t event_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (vdev_mlme->ops && vdev_mlme->ops->mlme_vdev_notify_roam_start) + ret = vdev_mlme->ops->mlme_vdev_notify_roam_start(vdev_mlme, + event_len, + event_data); + + return ret; +} + +/** + * mlme_vdev_update_beacon - Updates beacon + * @vdev_mlme_obj: VDEV MLME comp object + * @op: beacon update type + * @event_data_len: data size + * @event_data: event data + * + * API updates/allocates/frees the beacon + * + * Return: SUCCESS on successful update of beacon + * FAILURE, if it fails due to any + */ +static inline +QDF_STATUS mlme_vdev_update_beacon(struct vdev_mlme_obj *vdev_mlme, + enum beacon_update_op op, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if (vdev_mlme->ops && vdev_mlme->ops->mlme_vdev_update_beacon) + ret = vdev_mlme->ops->mlme_vdev_update_beacon(vdev_mlme, op, + event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_disconnect_peers - Disconnect peers + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API trigger stations disconnection with AP VDEV or AP disconnection with STA + * VDEV + * + * Return: SUCCESS on successful invocation of station disconnection + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_disconnect_peers( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_disconnect_peers) + ret = vdev_mlme->ops->mlme_vdev_disconnect_peers( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_dfs_cac_timer_stop - Stop CAC timer + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API stops the CAC timer through DFS API + * + * Return: SUCCESS on successful CAC timer stop + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_dfs_cac_timer_stop( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_dfs_cac_timer_stop) + ret = vdev_mlme->ops->mlme_vdev_dfs_cac_timer_stop( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_stop_send - Invokes VDEV stop operation + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV stop operation + * + * Return: SUCCESS on successful completion of stop operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_stop_send( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_stop_send) + ret = vdev_mlme->ops->mlme_vdev_stop_send( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_stop_continue - VDEV stop response handling + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV stop response actions + * + * Return: SUCCESS on successful completion of stop response operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_stop_continue( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_stop_continue) + ret = vdev_mlme->ops->mlme_vdev_stop_continue(vdev_mlme, + event_data_len, + event_data); + + return ret; +} + +/** + * mlme_vdev_down_send - VDEV down operation + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API invokes VDEV down operation + * + * Return: SUCCESS on successful completion of VDEV down operation + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_down_send( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_down_send) + ret = vdev_mlme->ops->mlme_vdev_down_send( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_notify_down_complete - VDEV init state transition notification + * @vdev_mlme_obj: VDEV MLME comp object + * @event_data_len: data size + * @event_data: event data + * + * API notifies MLME on moving to INIT state + * + * Return: SUCCESS on successful completion of down notification + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_notify_down_complete( + struct vdev_mlme_obj *vdev_mlme, + uint16_t event_data_len, void *event_data) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_notify_down_complete) + ret = vdev_mlme->ops->mlme_vdev_notify_down_complete( + vdev_mlme, event_data_len, event_data); + + return ret; +} + +/** + * mlme_vdev_notify_start_state_exit - VDEV SM start state exit notification + * @vdev_mlme_obj: VDEV MLME comp object + * + * API notifies on start state exit + * + * Return: SUCCESS on successful completion of notification + * FAILURE, if it fails due to any + */ +static inline QDF_STATUS mlme_vdev_notify_start_state_exit( + struct vdev_mlme_obj *vdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && + vdev_mlme->ops->mlme_vdev_notify_start_state_exit) + ret = vdev_mlme->ops->mlme_vdev_notify_start_state_exit( + vdev_mlme); + + return ret; +} + +/** + * mlme_vdev_is_newchan_no_cac - Checks new channel requires CAC + * @vdev_mlme_obj: VDEV MLME comp object + * + * API checks whether Channel needs CAC period, + * if yes, it moves to SUSPEND_RESTART to disconnect stations before + * sending RESTART to FW, otherwise, it moves to RESTART_PROGRESS substate + * + * Return: SUCCESS to move to RESTART_PROGRESS substate + * FAILURE, move to SUSPEND_RESTART state + */ +static inline QDF_STATUS mlme_vdev_is_newchan_no_cac( + struct vdev_mlme_obj *vdev_mlme) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_is_newchan_no_cac) + ret = vdev_mlme->ops->mlme_vdev_is_newchan_no_cac(vdev_mlme); + + return ret; +} + +#ifdef VDEV_SM_LOCK_SUPPORT +/** + * mlme_vdev_sm_spinlock_create - Create VDEV MLME spinlock + * @vdev_mlme_obj: VDEV MLME comp object + * + * Creates VDEV MLME spinlock + * + * Return: void + */ +static inline void mlme_vdev_sm_spinlock_create(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_spinlock_create(&vdev_mlme->sm_lock); +} + +/** + * mlme_vdev_sm_spinlock_destroy - Destroy VDEV MLME spinlock + * @vdev_mlme_obj: VDEV MLME comp object + * + * Destroy VDEV MLME spinlock + * + * Return: void + */ +static inline void mlme_vdev_sm_spinlock_destroy( + struct vdev_mlme_obj *vdev_mlme) +{ + qdf_spinlock_destroy(&vdev_mlme->sm_lock); +} + +/** + * mlme_vdev_sm_spin_lock - acquire spinlock + * @vdev_mlme_obj: vdev mlme comp object + * + * acquire vdev mlme spinlock + * + * return: void + */ +static inline void mlme_vdev_sm_spin_lock(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_spin_lock_bh(&vdev_mlme->sm_lock); +} + +/** + * mlme_vdev_sm_spin_unlock - release spinlock + * @vdev_mlme_obj: vdev mlme comp object + * + * release vdev mlme spinlock + * + * return: void + */ +static inline void mlme_vdev_sm_spin_unlock(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_spin_unlock_bh(&vdev_mlme->sm_lock); +} + +/** + * mlme_vdev_cmd_mutex_create - Create VDEV MLME cmd mutex + * @vdev_mlme_obj: VDEV MLME comp object + * + * Creates VDEV MLME cmd mutex + * + * Return: void + */ +static inline void +mlme_vdev_cmd_mutex_create(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_mutex_create(&vdev_mlme->vdev_cmd_lock); +} + +/** + * mlme_vdev_cmd_mutex_destroy - Destroy VDEV MLME cmd mutex + * @vdev_mlme_obj: VDEV MLME comp object + * + * Destroy VDEV MLME cmd mutex + * + * Return: void + */ +static inline void +mlme_vdev_cmd_mutex_destroy(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_mutex_destroy(&vdev_mlme->vdev_cmd_lock); +} + +/** + * mlme_vdev_cmd_mutex_acquire - acquire mutex + * @vdev_mlme_obj: vdev mlme comp object + * + * acquire vdev mlme cmd mutex + * + * return: void + */ +static inline void mlme_vdev_cmd_mutex_acquire(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_mutex_acquire(&vdev_mlme->vdev_cmd_lock); +} + +/** + * mlme_vdev_cmd_mutex_release - release mutex + * @vdev_mlme_obj: vdev mlme comp object + * + * release vdev mlme cmd mutex + * + * return: void + */ +static inline void mlme_vdev_cmd_mutex_release(struct vdev_mlme_obj *vdev_mlme) +{ + qdf_mutex_release(&vdev_mlme->vdev_cmd_lock); +} + +#else +static inline void mlme_vdev_sm_spinlock_create(struct vdev_mlme_obj *vdev_mlme) +{ + mlme_debug("VDEV SM lock is disabled!!!"); +} + +static inline void mlme_vdev_sm_spinlock_destroy( + struct vdev_mlme_obj *vdev_mlme) +{ + mlme_debug("VDEV SM lock is disabled!!!"); +} + +static inline void mlme_vdev_sm_spin_lock(struct vdev_mlme_obj *vdev_mlme) +{ +} + +static inline void mlme_vdev_sm_spin_unlock(struct vdev_mlme_obj *vdev_mlme) +{ +} + +static inline void +mlme_vdev_cmd_mutex_create(struct vdev_mlme_obj *vdev_mlme) +{ + mlme_debug("VDEV CMD lock is disabled!!!"); +} + +static inline void +mlme_vdev_cmd_mutex_destroy(struct vdev_mlme_obj *vdev_mlme) +{ + mlme_debug("VDEV CMD lock is disabled!!!"); +} + +static inline void mlme_vdev_cmd_mutex_acquire(struct vdev_mlme_obj *vdev_mlme) +{ +} + +static inline void mlme_vdev_cmd_mutex_release(struct vdev_mlme_obj *vdev_mlme) +{ +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_rx_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_rx_api.h new file mode 100644 index 0000000000000000000000000000000000000000..893b76185aed147cb4714d75ac126ddbbfebed37 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_rx_api.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_rx_ops.h + * + * API declarations to handle WMI response event corresponding to vdev mgmt + */ + +#ifndef __WLAN_VDEV_MGR_RX_OPS_H__ +#define __WLAN_VDEV_MGR_RX_OPS_H__ + +#include +#include + +/** + * tgt_vdev_mgr_register_rx_ops() - API to register rx ops with lmac + * @rx_ops: rx ops struct + * + * Return: none + */ +void tgt_vdev_mgr_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * tgt_vdev_mgr_ext_tbttoffset_update_handle() - API to handle ext tbtt offset + * update event + * @num_vdevs: number of vdevs + * @is_ext: ext is set/reset + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +tgt_vdev_mgr_ext_tbttoffset_update_handle(uint32_t num_vdevs, bool is_ext); + +/** + * tgt_vdev_mgr_get_response_timer_info() - API to get vdev_mgr timer info + * @psoc: objmgr psoc object + * @vdev_id: vdev id + * + * Return: struct vdev_response_timer on success else NULL + */ +struct vdev_response_timer * +tgt_vdev_mgr_get_response_timer_info(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + +#endif /* __WLAN_VDEV_MGR_RX_OPS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_rx_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_rx_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..4495ae8c3bad852c47ab27678615f2646e289166 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_rx_defs.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_tgt_if_rx_defs.h + * + * This header file provides definitions to data structures for + * corresponding vdev mgmt operation + */ + +#ifndef __WLAN_VDEV_MGR_TGT_IF_RX_DEFS_H__ +#define __WLAN_VDEV_MGR_TGT_IF_RX_DEFS_H__ + +#include +#include +#ifdef FEATURE_RUNTIME_PM +#include +#endif + +/** + * enum wlan_vdev_mgr_tgt_if_rsp_bit - response status bit + * START_RESPONSE_BIT: vdev start response bit + * RESTART_RESPONSE_BIT: vdev restart response bit + * STOP_RESPONSE_BIT: vdev stop response bit + * DELETE_RESPONSE_BIT: vdev delete response bit + * PEER_DELETE_ALL_RESPONSE_BIT: vdev peer delete all response bit + */ +enum wlan_vdev_mgr_tgt_if_rsp_bit { + START_RESPONSE_BIT = 0, + RESTART_RESPONSE_BIT = 1, + STOP_RESPONSE_BIT = 2, + DELETE_RESPONSE_BIT = 3, + PEER_DELETE_ALL_RESPONSE_BIT = 4, + RESPONSE_BIT_MAX, +}; + +/** + * string_from_rsp_bit() - Convert response bit to string + * @bit - response bit as in wlan_vdev_mgr_tgt_if_rsp_bit + * + * Please note to add new string in the array at index equal to + * its enum value in wlan_vdev_mgr_tgt_if_rsp_bit. + */ +static inline char *string_from_rsp_bit(enum wlan_vdev_mgr_tgt_if_rsp_bit bit) +{ + static const char *strings[] = { "START", + "RESTART", + "STOP", + "DELETE", + "PEER DELETE ALL", + "RESPONE MAX"}; + return (char *)strings[bit]; +} + +#ifdef FEATURE_RUNTIME_PM +/* Add extra PMO_RESUME_TIMEOUT for runtime PM resume timeout */ +#define START_RESPONSE_TIMER (6000 + PMO_RESUME_TIMEOUT) +#define STOP_RESPONSE_TIMER (4000 + PMO_RESUME_TIMEOUT) +#define DELETE_RESPONSE_TIMER (4000 + PMO_RESUME_TIMEOUT) +#define PEER_DELETE_ALL_RESPONSE_TIMER (6000 + PMO_RESUME_TIMEOUT) +#else +#define START_RESPONSE_TIMER 6000 +#define STOP_RESPONSE_TIMER 4000 +#define DELETE_RESPONSE_TIMER 4000 +#define PEER_DELETE_ALL_RESPONSE_TIMER 6000 +#endif + +/** + * struct vdev_response_timer - vdev mgmt response ops timer + * @psoc: Object manager psoc + * @rsp_timer: VDEV MLME mgmt response timer + * @rsp_status: variable to check response status + * @expire_time: time to expire timer + * @timer_status: status of timer + * @rsp_timer_inuse: Status bit to inform whether the rsp timer is inuse + * @vdev_id: vdev object id + */ +struct vdev_response_timer { + struct wlan_objmgr_psoc *psoc; + qdf_timer_t rsp_timer; + unsigned long rsp_status; + uint32_t expire_time; + QDF_STATUS timer_status; + qdf_atomic_t rsp_timer_inuse; + uint8_t vdev_id; +}; + +/** + * struct vdev_start_response - start response structure + * @vdev_id: vdev id + * @requestor_id: requester id + * @status: status of start request + * @resp_type: response of event type START/RESTART + * @chain_mask: chain mask + * @smps_mode: smps mode + * @mac_id: mac id + * @cfgd_tx_streams: configured tx streams + * @cfgd_rx_streams: configured rx streams + * @max_allowed_tx_power: max tx power allowed + */ +struct vdev_start_response { + uint8_t vdev_id; + uint32_t requestor_id; + uint32_t status; + uint32_t resp_type; + uint32_t chain_mask; + uint32_t smps_mode; + uint32_t mac_id; + uint32_t cfgd_tx_streams; + uint32_t cfgd_rx_streams; + uint32_t max_allowed_tx_power; +}; + +/** + * struct vdev_stop_response - stop response structure + * @vdev_id: vdev id + */ +struct vdev_stop_response { + uint8_t vdev_id; +}; + +/** + * struct vdev_delete_response - delete response structure + * @vdev_id: vdev id + */ +struct vdev_delete_response { + uint8_t vdev_id; +}; + +/** + * struct peer_delete_all_response - peer delete all response structure + * @vdev_id: vdev id + * @status: FW status for vdev delete all peer request + */ +struct peer_delete_all_response { + uint8_t vdev_id; + uint8_t status; +}; + +#define VDEV_ID_BMAP_SIZE 2 +/** + * struct multi_vdev_restart_resp - multi-vdev restart response structure + * @pdev_id: pdev id + * @status: FW status for multi vdev restart request + * @vdev_id_bmap: Bitmap of vdev_ids + */ +struct multi_vdev_restart_resp { + uint8_t pdev_id; + uint8_t status; + unsigned long vdev_id_bmap[VDEV_ID_BMAP_SIZE]; +}; + +#endif /* __WLAN_VDEV_MGR_TGT_IF_RX_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_tx_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_tx_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ddca045633c64707a9b01e9b4fca1eb948c793c2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_tx_api.h @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: vdev_mgr_iface_api.h + * + * API declarations required for following + * - get/set common mlme data structure fields + * - send WMI command using Tx Ops + */ + +#ifndef __WLAN_VDEV_MGR_TX_OPS_API_H__ +#define __WLAN_VDEV_MGR_TX_OPS_API_H__ + +#include +#include +#include + +/** + * tgt_vdev_mgr_create_send() – API to send create command + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_create_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_create_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_create_params *param); + +/** + * tgt_vdev_mgr_create_complete() – API to send wmi cfg corresponding + * to create command + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_create_complete(struct vdev_mlme_obj *mlme_obj); + +/** + * tgt_vdev_mgr_start_send() – API to send start command + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_start_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_start_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_start_params *param); + +/** + * tgt_vdev_mgr_delete_send() – API to send delete + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_delete_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_delete_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_delete_params *param); + +/** + * tgt_vdev_mgr_peer_flush_tids_send() – API to send peer flush tids in FW + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to peer_flush_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_peer_flush_tids_send( + struct vdev_mlme_obj *mlme_obj, + struct peer_flush_params *param); + +/** + * tgt_vdev_mgr_stop_send() – API to send stop command + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_stop_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_stop_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_stop_params *param); + +/** + * tgt_vdev_mgr_beacon_stop() – API to handle beacon buffer + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_beacon_stop(struct vdev_mlme_obj *mlme_obj); + +/** + * tgt_vdev_mgr_beacon_free() – API to free beacon buffer + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_beacon_free(struct vdev_mlme_obj *mlme_obj); + +/** + * tgt_vdev_mgr_up_send() – API to send up + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_up_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_up_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_up_params *param); + +/** + * tgt_vdev_mgr_down_send() – API to send down + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_down_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_down_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_down_params *param); + +/** + * tgt_vdev_mgr_set_neighbour_rx_cmd_send() – API to send neighbour rx + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to set_neighbour_rx_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_set_neighbour_rx_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct set_neighbour_rx_params *param); + +/** + * tgt_vdev_mgr_nac_rssi_send() – API to send NAC RSSI + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_scan_nac_rssi_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_nac_rssi_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_scan_nac_rssi_params *param); + +/** + * tgt_vdev_mgr_sifs_trigger_send() – API to send SIFS trigger + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to sifs_trigger_param + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_sifs_trigger_send( + struct vdev_mlme_obj *mlme_obj, + struct sifs_trigger_param *param); + +/** + * tgt_vdev_mgr_set_custom_aggr_size_send() – API to send custom aggr size + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to set_custom_aggr_size_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_set_custom_aggr_size_send( + struct vdev_mlme_obj *mlme_obj, + struct set_custom_aggr_size_params *param); + +/** + * tgt_vdev_mgr_config_ratemask_cmd_send() – API to configure ratemask + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to config_ratemask_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_config_ratemask_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct config_ratemask_params *param); + +/** + * tgt_vdev_mgr_sta_ps_param_send() – API to send sta power save configuration + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to sta_ps_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_sta_ps_param_send( + struct vdev_mlme_obj *mlme_obj, + struct sta_ps_params *param); + +/** + * tgt_vdev_mgr_beacon_cmd_send() – API to send beacon + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to beacon_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_beacon_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct beacon_params *param); + +/** + * tgt_vdev_mgr_beacon_tmpl_send() – API to send beacon template + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to beacon_tmpl_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_beacon_tmpl_send( + struct vdev_mlme_obj *mlme_obj, + struct beacon_tmpl_params *param); + +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +/** + * tgt_vdev_mgr_fils_enable_send()- API to send fils enable command + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to config_fils_params struct + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_fils_enable_send( + struct vdev_mlme_obj *mlme_obj, + struct config_fils_params *param); +#else +/** + * tgt_vdev_mgr_fils_enable_send()- API to send fils enable command + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to config_fils_params struct + * + * Return: QDF_STATUS - Success or Failure + */ +static inline QDF_STATUS tgt_vdev_mgr_fils_enable_send( + struct vdev_mlme_obj *mlme_obj, + struct config_fils_params *param) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_vdev_mgr_multiple_vdev_restart_send() – API to send multiple vdev + * restart + * @pdev: pointer to pdev + * @param: pointer to multiple_vdev_restart_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_multiple_vdev_restart_send( + struct wlan_objmgr_pdev *pdev, + struct multiple_vdev_restart_params *param); + +/** + * tgt_vdev_mgr_set_tx_rx_decap_type() – API to send tx rx decap type + * @mlme_obj: pointer to vdev mlme obj + * @param_id: param id + * value: value to set for param id + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_set_tx_rx_decap_type(struct vdev_mlme_obj *mlme_obj, + enum wlan_mlme_cfg_id param_id, + uint32_t value); + +/** + * tgt_vdev_mgr_set_param_send() – API to send parameter cfg + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to vdev_set_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_set_param_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_set_params *param); + +/** + * tgt_vdev_mgr_bcn_miss_offload_send() – API to send beacon miss offload + * @mlme_obj: pointer to vdev_mlme_obj + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_bcn_miss_offload_send(struct vdev_mlme_obj *mlme_obj); + +/** + * tgt_vdev_mgr_peer_delete_all_send() – API to send peer delete all request + * @mlme_obj: pointer to vdev_mlme_obj + * @param: pointer to peer_delete_all_params + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS tgt_vdev_mgr_peer_delete_all_send( + struct vdev_mlme_obj *mlme_obj, + struct peer_delete_all_params *param); + +#endif /* __WLAN_VDEV_MGR_TX_OPS_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_tx_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_tx_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..51283ab146fd62956b0335efa2f0ab93b4855ff1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_tgt_if_tx_defs.h @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_tgt_if_tx_defs.h + * + * This file provides definitions to data structures required for vdev Tx ops + */ + +#ifndef __WLAN_VDEV_MGR_TX_OPS_DEFS_H__ +#define __WLAN_VDEV_MGR_TX_OPS_DEFS_H__ + +#include + +/** + * struct mac_ssid - mac ssid structure + * @length: ssid length + * @mac_ssid: ssid + */ +struct mlme_mac_ssid { + uint8_t length; + uint8_t mac_ssid[WLAN_SSID_MAX_LEN]; +} qdf_packed; + +/** slot time long */ +#define WLAN_MLME_VDEV_SLOT_TIME_LONG 0x1 +/** slot time short */ +#define WLAN_MLME_VDEV_SLOT_TIME_SHORT 0x2 + +/** + * enum MLME_bcn_tx_rate_code - beacon tx rate code + */ +enum mlme_bcn_tx_rate_code { + MLME_BCN_TX_RATE_CODE_1_M = 0x43, + MLME_BCN_TX_RATE_CODE_2_M = 0x42, + MLME_BCN_TX_RATE_CODE_5_5_M = 0x41, + MLME_BCN_TX_RATE_CODE_6_M = 0x03, + MLME_BCN_TX_RATE_CODE_9_M = 0x07, + MLME_BCN_TX_RATE_CODE_11M = 0x40, + MLME_BCN_TX_RATE_CODE_12_M = 0x02, + MLME_BCN_TX_RATE_CODE_18_M = 0x06, + MLME_BCN_TX_RATE_CODE_24_M = 0x01, + MLME_BCN_TX_RATE_CODE_36_M = 0x05, + MLME_BCN_TX_RATE_CODE_48_M = 0x00, + MLME_BCN_TX_RATE_CODE_54_M = 0x04, +}; + +/** + * enum wlan_mlme_host_sta_ps_param_uapsd - STA UPASD params + */ +enum wlan_mlme_host_sta_ps_param_uapsd { + WLAN_MLME_HOST_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WLAN_MLME_HOST_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WLAN_MLME_HOST_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WLAN_MLME_HOST_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WLAN_MLME_HOST_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WLAN_MLME_HOST_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WLAN_MLME_HOST_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WLAN_MLME_HOST_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; + +/** + * enum wlan_mlme_host_vdev_start_status - vdev start status code + */ +enum wlan_mlme_host_vdev_start_status { + WLAN_MLME_HOST_VDEV_START_OK = 0, + WLAN_MLME_HOST_VDEV_START_CHAN_INVALID, + WLAN_MLME_HOST_VDEV_START_CHAN_BLOCKED, + WLAN_MLME_HOST_VDEV_START_CHAN_DFS_VIOLATION, + WLAN_MLME_HOST_VDEV_START_CHAN_INVALID_REGDOMAIN, + WLAN_MLME_HOST_VDEV_START_CHAN_INVALID_BAND, + WLAN_MLME_HOST_VDEV_START_TIMEOUT, + /* Add new response status code from here */ + WLAN_MLME_HOST_VDEV_START_MAX_REASON, +}; + +/** + * string_from_start_rsp_status() - Convert start response status to string + * @start_rsp - start response status + * + * Please note to add new string in the array at index equal to + * its enum value in wlan_mlme_host_vdev_start_status. + */ +static inline char *string_from_start_rsp_status( + enum wlan_mlme_host_vdev_start_status start_rsp) +{ + static const char *strings[] = { "START_OK", + "CHAN_INVALID", + "CHAN_BLOCKED", + "CHAN_DFS_VIOLATION", + "CHAN_INVALID_REGDOMAIN", + "CHAN_INVALID_BAND", + "START_RESPONSE_TIMEOUT", + "START_RESPONSE_UNKNOWN"}; + + if (start_rsp >= WLAN_MLME_HOST_VDEV_START_MAX_REASON) + start_rsp = WLAN_MLME_HOST_VDEV_START_MAX_REASON; + + return (char *)strings[start_rsp]; +} + +/** + * enum wlan_mlme_host_start_event_param - start/restart resp event + */ +enum wlan_mlme_host_start_event_param { + WLAN_MLME_HOST_VDEV_START_RESP_EVENT = 0, + WLAN_MLME_HOST_VDEV_RESTART_RESP_EVENT, +}; + +/** + * enum wlan_mlme_custom_aggr_type: custon aggregate type + * @WLAN_MLME_CUSTOM_AGGR_TYPE_AMPDU: A-MPDU aggregation + * @WLAN_MLME_CUSTOM_AGGR_TYPE_AMSDU: A-MSDU aggregation + * @WLAN_MLME_CUSTOM_AGGR_TYPE_MAX: Max type + */ +enum wlan_mlme_custom_aggr_type { + WLAN_MLME_CUSTOM_AGGR_TYPE_AMPDU = 0, + WLAN_MLME_CUSTOM_AGGR_TYPE_AMSDU = 1, + WLAN_MLME_CUSTOM_AGGR_TYPE_MAX, +}; + +/** + * struct sta_ps_params - sta ps cmd parameter + * @vdev_id: vdev id + * @param_id: sta ps parameter + * @value: sta ps parameter value + */ +struct sta_ps_params { + uint32_t vdev_id; + uint32_t param_id; + uint32_t value; +}; + +/** + * struct tbttoffset_params - Tbttoffset event params + * @vdev_id: Virtual AP device identifier + * @tbttoffset : Tbttoffset for the virtual AP device + * @vdev_tbtt_qtime_lo: Tbtt qtime low value + * @vdev_tbtt_qtime_hi: Tbtt qtime high value + */ +struct tbttoffset_params { + uint32_t vdev_id; + uint32_t tbttoffset; + uint32_t vdev_tbtt_qtime_lo; + uint32_t vdev_tbtt_qtime_hi; +}; + +/** + * struct beacon_tmpl_params - beacon template cmd parameter + * @vdev_id: vdev id + * @tim_ie_offset: tim ie offset + * @mbssid_ie_offset: mbssid ie offset + * @tmpl_len: beacon template length + * @tmpl_len_aligned: beacon template alignment + * @csa_switch_count_offset: CSA swith count offset in beacon frame + * @ext_csa_switch_count_offset: ECSA switch count offset in beacon frame + * @esp_ie_offset: ESP IE offset in beacon frame + * @mu_edca_ie_offset: Mu EDCA IE offset in beacon frame + * @frm: beacon template parameter + */ +struct beacon_tmpl_params { + uint8_t vdev_id; + uint32_t tim_ie_offset; + uint32_t mbssid_ie_offset; + uint32_t tmpl_len; + uint32_t tmpl_len_aligned; + uint32_t csa_switch_count_offset; + uint32_t ext_csa_switch_count_offset; + uint32_t esp_ie_offset; + uint32_t mu_edca_ie_offset; + uint8_t *frm; +}; + +/** + * struct beacon_params - beacon cmd parameter + * @vdev_id: vdev id + * @beacon_interval: Beacon interval + * @wbuf: beacon buffer + * @frame_ctrl: frame control field + * @bcn_txant: beacon antenna + * @is_dtim_count_zero: is it dtim beacon + * @is_bitctl_reqd: is Bit control required + * @is_high_latency: Is this high latency target + */ +struct beacon_params { + uint8_t vdev_id; + uint16_t beacon_interval; + qdf_nbuf_t wbuf; + uint16_t frame_ctrl; + uint32_t bcn_txant; + bool is_dtim_count_zero; + bool is_bitctl_reqd; + bool is_high_latency; +}; + +/* struct fils_discovery_tmpl_params - FILS Discovery template cmd parameter + * @vdev_id: vdev ID + * @tmpl_len: FILS Discovery template length + * @tmpl_aligned: FILS Discovery template alignment + * @frm: FILS Discovery template parameter + */ +struct fils_discovery_tmpl_params { + uint8_t vdev_id; + uint32_t tmpl_len; + uint32_t tmpl_len_aligned; + uint8_t *frm; +}; + +/** + * struct mlme_channel_param - Channel parameters with all + * info required by target. + * @chan_id: channel id + * @pwr: channel power + * @mhz: channel frequency + * @half_rate: is half rate + * @quarter_rate: is quarter rate + * @dfs_set: is dfs channel + * @dfs_set_cfreq2: is secondary freq dfs channel + * @is_chan_passive: is this passive channel + * @allow_ht: HT allowed in chan + * @allow_vht: VHT allowed on chan + * @set_agile: is agile mode + * @phy_mode: phymode (vht80 or ht40 or ...) + * @cfreq1: centre frequency on primary + * @cfreq2: centre frequency on secondary + * @maxpower: max power for channel + * @minpower: min power for channel + * @maxreqpower: Max regulatory power + * @antennamac: Max antenna + * @reg_class_id: Regulatory class id. + */ +struct mlme_channel_param { + uint8_t chan_id; + uint8_t pwr; + uint32_t mhz; + uint32_t half_rate:1, + quarter_rate:1, + dfs_set:1, + dfs_set_cfreq2:1, + is_chan_passive:1, + allow_ht:1, + allow_vht:1, + set_agile:1; + enum wlan_phymode phy_mode; + uint32_t cfreq1; + uint32_t cfreq2; + int8_t maxpower; + int8_t minpower; + int8_t maxregpower; + uint8_t antennamax; + uint8_t reg_class_id; +}; + +/** + * struct vdev_mlme_mvr_param - Multiple vdev restart params + * @phymode: phymode information + */ +struct vdev_mlme_mvr_param { + uint32_t phymode; +}; + +/** + * struct multiple_vdev_restart_params - Multiple vdev restart cmd parameter + * @pdev_id: Pdev identifier + * @requestor_id: Unique id identifying the module + * @disable_hw_ack: Flag to indicate disabling HW ACK during CAC + * @cac_duration_ms: CAC duration on the given channel + * @num_vdevs: No. of vdevs that need to be restarted + * @ch_param: Pointer to channel_param + * @vdev_ids: Pointer to array of vdev_ids + * @mvr_param: array holding multi vdev restart param + */ +struct multiple_vdev_restart_params { + uint32_t pdev_id; + uint32_t requestor_id; + uint32_t disable_hw_ack; + uint32_t cac_duration_ms; + uint32_t num_vdevs; + struct mlme_channel_param ch_param; + uint32_t vdev_ids[WLAN_UMAC_PDEV_MAX_VDEVS]; + struct vdev_mlme_mvr_param mvr_param[WLAN_UMAC_PDEV_MAX_VDEVS]; +}; + +/** + * struct peer_flush_params - peer flush cmd parameter + * @peer_tid_bitmap: peer tid bitmap + * @vdev_id: vdev id + * @peer_mac: peer mac address + */ +struct peer_flush_params { + uint32_t peer_tid_bitmap; + uint8_t vdev_id; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; +}; + +/* Default FILS DISCOVERY/probe response sent in period of 20TU */ +#define DEFAULT_FILS_DISCOVERY_PERIOD 20 +#define DEFAULT_PROBE_RESP_PERIOD 20 + +/** + * struct config_fils_params - FILS config params + * @vdev_id: vdev id + * @fd_period: 0 - Disabled, non-zero - Period in ms (mili seconds) + * @send_prb_rsp_frame: send broadcast prb resp frame + */ +struct config_fils_params { + uint8_t vdev_id; + uint32_t fd_period; + uint32_t send_prb_rsp_frame: 1; +}; + +/** + * struct config_ratemask_params - ratemask config parameters + * @vdev_id: vdev id + * @type: Type + * @lower32: Lower 32 bits in the 1st 64-bit value + * @higher32: Higher 32 bits in the 1st 64-bit value + * @lower32_2: Lower 32 bits in the 2nd 64-bit value + * @higher32_2: Higher 32 bits in the 2nd 64-bit value + */ +struct config_ratemask_params { + uint8_t vdev_id; + uint8_t type; + uint32_t lower32; + uint32_t higher32; + uint32_t lower32_2; + uint32_t higher32_2; +}; + +/** + * struct set_custom_aggr_size_params - custom aggr size params + * @vdev_id : vdev id + * @tx_aggr_size : TX aggr size + * @rx_aggr_size : RX aggr size + * @enable_bitmap: Bitmap for aggr size check + */ +struct set_custom_aggr_size_params { + uint32_t vdev_id; + uint32_t tx_aggr_size; + uint32_t rx_aggr_size; + uint32_t ac:2, + aggr_type:1, + tx_aggr_size_disable:1, + rx_aggr_size_disable:1, + tx_ac_enable:1, + reserved:26; +}; + +/** + * struct sifs_trigger_param - sifs_trigger cmd parameter + * @vdev_id: vdev id + * @param_value: parameter value + */ +struct sifs_trigger_param { + uint32_t vdev_id; + uint32_t param_value; +}; + +/** + * struct set_neighbour_rx_params - Neighbour RX params + * @vdev_id: vdev id + * @idx: index of param + * @action: action + * @type: Type of param + */ +struct set_neighbour_rx_params { + uint8_t vdev_id; + uint32_t idx; + uint32_t action; + uint32_t type; +}; + +/** + * struct vdev_scan_nac_rssi_params - NAC_RSSI cmd parameter + * @vdev_id: vdev id + * @bssid_addr: BSSID address + * @client_addr: client address + * @chan_num: channel number + * @action:NAC_RSSI action, + */ +struct vdev_scan_nac_rssi_params { + uint32_t vdev_id; + uint8_t bssid_addr[QDF_MAC_ADDR_SIZE]; + uint8_t client_addr[QDF_MAC_ADDR_SIZE]; + uint32_t chan_num; + uint32_t action; /* WMI_FILTER_NAC_RSSI_ACTION */ +}; + +/** + * struct vdev_start_params - vdev start cmd parameter + * @vdev_id: vdev id + * @beacon_interval: beacon interval + * @dtim_period: dtim period + * @is_restart: flag to check if it is vdev + * @disable_hw_ack: to update disable hw ack flag + * @hidden_ssid: hidden ssid + * @pmf_enabled: pmf enabled + * @ssid: ssid MAC + * @num_noa_descriptors: number of noa descriptors + * @preferred_tx_streams: preferred tx streams + * @preferred_rx_streams: preferred rx streams + * @cac_duration_ms: cac duration in milliseconds + * @regdomain: Regulatory domain + * @he_ops: HE ops + * @channel_param: Channel params required by target. + * @bcn_tx_rate_code: Beacon tx rate code. + * @ldpc_rx_enabled: Enable/Disable LDPC RX for this vdev + */ +struct vdev_start_params { + uint8_t vdev_id; + uint32_t beacon_interval; + uint32_t dtim_period; + bool is_restart; + uint32_t disable_hw_ack; + bool hidden_ssid; + bool pmf_enabled; + struct mlme_mac_ssid ssid; + uint32_t num_noa_descriptors; + uint32_t preferred_rx_streams; + uint32_t preferred_tx_streams; + uint32_t cac_duration_ms; + uint32_t regdomain; + uint32_t he_ops; + struct mlme_channel_param channel; + enum mlme_bcn_tx_rate_code bcn_tx_rate_code; + bool ldpc_rx_enabled; +}; + +/** + * struct vdev_set_params - vdev set cmd parameter + * @vdev_id: vdev id + * @param_id: parameter id + * @param_value: parameter value + */ +struct vdev_set_params { + uint32_t vdev_id; + uint32_t param_id; + uint32_t param_value; +}; + +/** + * struct vdev_create_params - vdev create cmd parameter + * @vdev_id: interface id + * @type: interface type + * @subtype: interface subtype + * @nss_2g: NSS for 2G + * @nss_5g: NSS for 5G + * @pdev_id: pdev id on pdev for this vdev + * @mbssid_flags: MBSS IE flags indicating vdev type + * @vdevid_trans: id of transmitting vdev for MBSS IE + */ +struct vdev_create_params { + uint8_t vdev_id; + uint32_t type; + uint32_t subtype; + uint8_t nss_2g; + uint8_t nss_5g; + uint32_t pdev_id; + uint32_t mbssid_flags; + uint8_t vdevid_trans; +}; + +/** + * struct vdev_delete_params - vdev delete cmd parameter + * @vdev_id: vdev id + */ +struct vdev_delete_params { + uint8_t vdev_id; +}; + +/** + * struct vdev_stop_params - vdev stop cmd parameter + * @vdev_id: vdev id + */ +struct vdev_stop_params { + uint8_t vdev_id; +}; + +/** + * struct vdev_up_params - vdev up cmd parameter + * @vdev_id: vdev id + * @assoc_id: association id + * @profile_idx: profile index of the connected non-trans ap (mbssid case). + * 0 means invalid. + * @profile_num: the total profile numbers of non-trans aps (mbssid case). + * 0 means non-MBSS AP. + * @trans_bssid: bssid of transmitted AP (MBSS IE case) + */ +struct vdev_up_params { + uint8_t vdev_id; + uint16_t assoc_id; + uint32_t profile_idx; + uint32_t profile_num; + uint8_t trans_bssid[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct vdev_down_params - vdev down cmd parameter + * @vdev_id: vdev id + */ +struct vdev_down_params { + uint8_t vdev_id; +}; + +/** + * struct peer_delete_all_params - peer delete all request parameter + * @vdev_id: vdev id + */ +struct peer_delete_all_params { + uint8_t vdev_id; +}; + +#endif /* __WLAN_VDEV_MGR_TX_OPS_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..484175b71b8ad0d1fe6279e2d73f946829ec7a20 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_ucfg_api.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_ucfg_api.h + * + * This header file provides definitions to data structures required + * for mlme ucfg and declarations for ucfg public APIs + */ + +#ifndef __WLAN_VDEV_MGR_UCFG_API_H__ +#define __WLAN_VDEV_MGR_UCFG_API_H__ + +#include +#include +#include +#include + +enum wlan_mlme_cfg_id { + WLAN_MLME_CFG_DTIM_PERIOD, + WLAN_MLME_CFG_SLOT_TIME, + WLAN_MLME_CFG_PROTECTION_MODE, + WLAN_MLME_CFG_BEACON_INTERVAL, + WLAN_MLME_CFG_LDPC, + WLAN_MLME_CFG_NSS, + WLAN_MLME_CFG_TSF_ADJUST, + WLAN_MLME_CFG_ASSOC_ID, + WLAN_MLME_CFG_VHT_CAPS, + WLAN_MLME_CFG_SUBFER, + WLAN_MLME_CFG_MUBFER, + WLAN_MLME_CFG_SUBFEE, + WLAN_MLME_CFG_MUBFEE, + WLAN_MLME_CFG_IMLICIT_BF, + WLAN_MLME_CFG_SOUNDING_DIM, + WLAN_MLME_CFG_BFEE_STS_CAP, + WLAN_MLME_CFG_TXBF_CAPS, + WLAN_MLME_CFG_HT_CAPS, + WLAN_MLME_CFG_HE_OPS, + WLAN_MLME_CFG_RTS_THRESHOLD, + WLAN_MLME_CFG_FRAG_THRESHOLD, + WLAN_MLME_CFG_PROBE_DELAY, + WLAN_MLME_CFG_REPEAT_PROBE_TIME, + WLAN_MLME_CFG_DROP_UNENCRY, + WLAN_MLME_CFG_TX_PWR_LIMIT, + WLAN_MLME_CFG_TX_POWER, + WLAN_MLME_CFG_AMPDU, + WLAN_MLME_CFG_AMPDU_SIZE, + WLAN_MLME_CFG_AMSDU, + WLAN_MLME_CFG_AMSDU_SIZE, + WLAN_MLME_CFG_SSID, + WLAN_MLME_CFG_SSID_LEN, + WLAN_MLME_CFG_OP_MODE, + WLAN_MLME_CFG_BMISS_FIRST_BCNT, + WLAN_MLME_CFG_BMISS_FINAL_BCNT, + WLAN_MLME_CFG_MIN_IDLE_INACTIVE_TIME, + WLAN_MLME_CFG_MAX_IDLE_INACTIVE_TIME, + WLAN_MLME_CFG_MAX_UNRESPONSIVE_INACTIVE_TIME, + WLAN_MLME_CFG_RATE_FLAGS, + WLAN_MLME_CFG_PER_BAND_TX_MGMT_RATE, + WLAN_MLME_CFG_MAX_RATE, + WLAN_MLME_CFG_TX_MGMT_RATE, + WLAN_MLME_CFG_TX_MGMT_RATE_CODE, + WLAN_MLME_CFG_TX_CHAINMASK, + WLAN_MLME_CFG_RX_CHAINMASK, + WLAN_MLME_CFG_PKT_POWERSAVE, + WLAN_MLME_CFG_MAX_LI_OF_MODDTIM, + WLAN_MLME_CFG_DYNDTIM_CNT, + WLAN_MLME_CFG_LISTEN_INTERVAL, + WLAN_MLME_CFG_MODDTIM_CNT, + WLAN_MLME_CFG_BEACON_BUFFER, + WLAN_MLME_CFG_BEACON_OFFSETS, + WLAN_MLME_CFG_PROFILE_IDX, + WLAN_MLME_CFG_PROFILE_NUM, + WLAN_MLME_CFG_MBSSID_FLAGS, + WLAN_MLME_CFG_VDEVID_TRANS, + WLAN_MLME_CFG_TRANS_BSSID, + WLAN_MLME_CFG_TYPE, + WLAN_MLME_CFG_SUBTYPE, + WLAN_MLME_CFG_UAPSD, + WLAN_MLME_CFG_TX_ENCAP_TYPE, + WLAN_MLME_CFG_RX_DECAP_TYPE, + WLAN_MLME_CFG_RATEMASK_TYPE, + WLAN_MLME_CFG_RATEMASK_LOWER32, + WLAN_MLME_CFG_RATEMASK_HIGHER32, + WLAN_MLME_CFG_RATEMASK_LOWER32_2, + WLAN_MLME_CFG_BCN_TX_RATE, + WLAN_MLME_CFG_BCN_TX_RATE_CODE, + WLAN_MLME_CFG_RATEMASK_CAPS, + WLAN_MLME_CFG_ENABLE_MULTI_GROUP_KEY, + WLAN_MLME_CFG_MAX_GROUP_KEYS, + WLAN_MLME_CFG_MAX +}; + +/** + * struct wlan_vdev_mgr_cfg - vdev mgr configuration + * @value: configuration value + * @tsf: tsf adjust value + * @trans_bssid: transmission bssid address + * @ssid_cfg: ssid configuration + */ +struct wlan_vdev_mgr_cfg { + union { + uint32_t value; + uint64_t tsf; + uint8_t trans_bssid[QDF_MAC_ADDR_SIZE]; + struct mlme_mac_ssid ssid_cfg; + }; +}; + +/** + * ucfg_wlan_vdev_mgr_set_param() – ucfg MLME API to + * set value into mlme vdev mgr component + * @vdev: pointer to vdev object + * @param_id: param of type wlan_mlme_cfg_id + * @mlme_cfg: value to set into mlme vdev mgr + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_wlan_vdev_mgr_set_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + struct wlan_vdev_mgr_cfg mlme_cfg); + +/** + * ucfg_wlan_vdev_mgr_get_param() – ucfg MLME API to + * get value from mlme vdev mgr component + * @vdev: pointer to vdev object + * @param_id: param of type wlan_mlme_cfg_id + * @param_value: pointer to store the value of mlme vdev mgr + * + * Return: void + */ +void ucfg_wlan_vdev_mgr_get_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t *param_value); + +/** + * ucfg_wlan_vdev_mgr_get_param_ssid() – ucfg MLME API to + * get ssid from mlme vdev mgr component + * @vdev: pointer to vdev object + * @ssid: pointer to store the ssid + * @ssid_len: pointer to store the ssid length value + * + * Return: void + */ +void ucfg_wlan_vdev_mgr_get_param_ssid(struct wlan_objmgr_vdev *vdev, + uint8_t *ssid, + uint8_t *ssid_len); + +/** + * ucfg_wlan_vdev_mgr_get_param_bssid() – ucfg MLME API to + * get bssid from mlme vdev mgr component + * @vdev: pointer to vdev object + * @bssid: pointer to store the bssid + * + */ +void ucfg_wlan_vdev_mgr_get_param_bssid( + struct wlan_objmgr_vdev *vdev, + uint8_t *bssid); + +/** + * ucfg_wlan_vdev_mgr_get_beacon_buffer() – ucfg MLME API to + * get beacon buffer from mlme vdev mgr component + * @vdev: pointer to vdev object + * @buf: pointer to store the beacon buffer + * + * Return: void + */ +void ucfg_wlan_vdev_mgr_get_beacon_buffer(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t buf); + +/** + * ucfg_wlan_vdev_mgr_get_trans_bssid() – ucfg MLME API to + * get transmission bssid from mlme vdev mgr component + * @vdev: pointer to vdev object + * @addr: pointer to store the transmission bssid + * + * Return: void + */ +void ucfg_wlan_vdev_mgr_get_trans_bssid(struct wlan_objmgr_vdev *vdev, + uint8_t *addr); + +/** + * ucfg_wlan_vdev_mgr_get_tsf_adjust() – ucfg MLME API to + * get tsf_adjust from mlme vdev mgr component + * @vdev: pointer to vdev object + * @tsf_adjust: pointer to store the tsf adjust value + * + * Return: void + */ +void ucfg_wlan_vdev_mgr_get_tsf_adjust(struct wlan_objmgr_vdev *vdev, + uint64_t *tsf_adjust); + +#endif /* __WLAN_VDEV_MLME_UCFG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..296fba7b87f79141ca5c2c1b960ffbd725d0a910 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mgr_utils_api.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_utils_api.h + * + * This file provides declaration for APIs used for psoc enable/disable + */ + +#ifndef __WLAN_VDEV_MGR_UTILS_API_H__ +#define __WLAN_VDEV_MGR_UTILS_API_H__ + +#include +#include +#include +#include + +/** + * wlan_util_vdev_get_cdp_txrx_opmode - get cdp txrx opmode from qdf mode + * @vdev: pointer to vdev object + * + * Return: wlan_opmode + */ +enum wlan_op_mode +wlan_util_vdev_get_cdp_txrx_opmode(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_util_vdev_get_cdp_txrx_subtype - get cdp txrx subtype from qdf mode + * @vdev: pointer to vdev object + * + * Return: wlan_opmode + */ +enum wlan_op_subtype +wlan_util_vdev_get_cdp_txrx_subtype(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_util_vdev_mlme_set_ratemask_config) – common MLME API to set + * ratemask configuration and send it to FW + * @vdev_mlme: pointer to vdev_mlme object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS +wlan_util_vdev_mlme_set_ratemask_config(struct vdev_mlme_obj *vdev_mlme); + +/** + * wlan_util_vdev_mlme_set_param() – common MLME API to fill common + * parameters of vdev_mlme object + * @vdev_mlme: pointer to vdev_mlme object + * @param_id: param id for which the value should be set + * @param_value: value that should bem set to the parameter + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_util_vdev_mlme_set_param(struct vdev_mlme_obj *vdev_mlme, + enum wlan_mlme_cfg_id param_id, + struct wlan_vdev_mgr_cfg mlme_cfg); + +/** + * wlan_util_vdev_mlme_get_param() – common MLME API to get common + * parameters of vdev_mlme object + * @vdev_mlme: pointer to vdev_mlme object + * @param_id: param id for which the value should be set + * @param_value: value that should bem set to the parameter + * + * Return: QDF_STATUS - Success or Failure + */ +void wlan_util_vdev_mlme_get_param(struct vdev_mlme_obj *vdev_mlme, + enum wlan_mlme_cfg_id param_id, + uint32_t *param_value); + +/** + * wlan_util_vdev_get_param() – common MLME API to get common + * parameters of vdev_mlme object + * @vdev: pointer to vdev object + * @param_id: param id for which the value should be set + * @param_value: value that should bem set to the parameter + * + * Return: QDF_STATUS - Success or Failure + */ +void wlan_util_vdev_get_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t *param_value); + +#endif /* __WLAN_VDEV_MGR_UTILS_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mlme_api.h b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mlme_api.h new file mode 100644 index 0000000000000000000000000000000000000000..11945cc8c4caee6086229db56af35c946fb784da --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/inc/wlan_vdev_mlme_api.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define VDEV MLME public APIs + */ + +#ifndef _WLAN_VDEV_MLME_API_H_ +#define _WLAN_VDEV_MLME_API_H_ + +#include +/** + * wlan_vdev_mlme_get_cmpt_obj - Retrieves MLME component object + * from VDEV object + * @vdev: pointer to vdev object + * + * Retrieves MLME component object from VDEV object + * + * Return: comp handle on SUCCESS + * NULL, if it fails to retrieve + */ +struct vdev_mlme_obj *wlan_vdev_mlme_get_cmpt_obj( + struct wlan_objmgr_vdev *vdev); +/** + * wlan_vdev_mlme_set_ext_hdl - Sets legacy handle + * @vdev: pointer to vdev object + * @ext_hdl: pointer to legacy handle + * + * Sets Legacy handle to MLME component object + * + * Return: + */ +void wlan_vdev_mlme_set_ext_hdl(struct wlan_objmgr_vdev *vdev, + mlme_vdev_ext_t *ext_hdl); + +/** + * wlan_vdev_mlme_get_ext_hdl - Returns legacy handle + * @vdev: pointer to vdev object + * + * Retrieves legacy handle from vdev mlme component object + * + * Return: legacy handle on SUCCESS + * NULL, if it fails to retrieve + */ +mlme_vdev_ext_t *wlan_vdev_mlme_get_ext_hdl(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_sm_deliver_evt() - Delivers event to VDEV MLME SM + * @vdev: Object manager VDEV object + * @event: MLME event + * @event_data_len: data size + * @event_data: event data + * + * API to dispatch event to VDEV MLME SM with lock acquired + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +QDF_STATUS wlan_vdev_mlme_sm_deliver_evt(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_sm_evt event, + uint16_t event_data_len, + void *event_data); + +/** + * wlan_vdev_mlme_sm_deliver_evt_sync() - Delivers event to VDEV MLME SM sync + * @vdev: Object manager VDEV object + * @event: MLME event + * @event_data_len: data size + * @event_data: event data + * + * API to dispatch event to VDEV MLME SM with lock acquired + * + * Return: SUCCESS: on handling event + * FAILURE: on ignoring the event + */ +QDF_STATUS wlan_vdev_mlme_sm_deliver_evt_sync(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_sm_evt event, + uint16_t event_data_len, + void *event_data); + +#ifdef SM_ENG_HIST_ENABLE +/** + * wlan_vdev_mlme_sm_history_print() - Prints SM history + * @vdev: Object manager VDEV object + * + * API to print SM history + * + * Return: void + */ +void wlan_vdev_mlme_sm_history_print(struct wlan_objmgr_vdev *vdev); + +#endif + +/** + * wlan_vdev_allow_connect_n_tx() - Checks whether VDEV is in operational state + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state to allow tx or connections + * + * Return: SUCCESS: to allow tx or connection + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_allow_connect_n_tx(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_is_active() - Checks whether VDEV is in active state + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state to check channel is configured in FW + * + * Return: SUCCESS: valid channel is configured + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_mlme_is_active(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_chan_config_valid() - Checks whether VDEV chan config valid + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state to check channel is configured in Host + * + * Return: SUCCESS: valid channel is configured + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_chan_config_valid(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_is_csa_restart() - Checks whether VDEV MLME SM is in CSA + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state for CSA_RESTART substate + * + * Return: SUCCESS: if it is in CSA_RESTART sub state + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_mlme_is_csa_restart(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_is_going_down() - Checks whether VDEV is being brought down + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state to check VDEV is being brought down + * + * Return: SUCCESS: valid channel is configured + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_is_going_down(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_is_restart_progress() - Checks VDEV restart is in progress + * @vdev: Object manager VDEV object + * + * API to check whether restarts is in progress + * + * Return: SUCCESS: if restart is in progress + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_is_restart_progress(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_is_dfs_cac_wait() - Checks VDEV is in cac wait state + * @vdev: Object manager VDEV object + * + * API to check whether state is cac wait state + * + * Return: SUCCESS: if state is cac wait state + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_is_dfs_cac_wait(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_cmd_lock - Acquire lock for command queuing atomicity + * + * API to take VDEV MLME command lock + * + * Return: void + */ +void wlan_vdev_mlme_cmd_lock(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_cmd_unlock - Release lock for command queuing atomicity + * + * API to release VDEV MLME command lock + * + * Return: void + */ +void wlan_vdev_mlme_cmd_unlock(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_is_scan_allowed() - Checks whether scan is allowed + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state to check scan is allowed + * + * Return: SUCCESS: if scan is allowed + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_mlme_is_scan_allowed(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_vdev_mlme_is_init_state() - Checks whether vdev is in init state + * @vdev: Object manager VDEV object + * + * API to checks the VDEV MLME SM state is in init state or not + * + * Return: SUCCESS: if vdev is in init state + * FAILURE: otherwise failure + */ +QDF_STATUS wlan_vdev_mlme_is_init_state(struct wlan_objmgr_vdev *vdev); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_tgt_if_rx_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_tgt_if_rx_api.c new file mode 100644 index 0000000000000000000000000000000000000000..2979d716a30b7d983b9f3142962b139f1e63b3ed --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_tgt_if_rx_api.c @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_tgt_if_rx_api.c + * + * This file provide definition for APIs registered for LMAC MLME Rx Ops + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct vdev_response_timer * +tgt_vdev_mgr_get_response_timer_info(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct psoc_mlme_obj *psoc_mlme; + + if (vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + mlme_err("Incorrect vdev_id: %d", vdev_id); + return NULL; + } + + psoc_mlme = mlme_psoc_get_priv(psoc); + if (!psoc_mlme) { + mlme_err("VDEV_%d PSOC_%d PSOC_MLME is NULL", vdev_id, + wlan_psoc_get_id(psoc)); + return NULL; + } + + return &psoc_mlme->psoc_vdev_rt[vdev_id]; +} + +qdf_export_symbol(tgt_vdev_mgr_get_response_timer_info); + +static QDF_STATUS tgt_vdev_mgr_start_response_handler( + struct wlan_objmgr_psoc *psoc, + struct vdev_start_response *rsp) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct vdev_mlme_obj *vdev_mlme; + struct wlan_objmgr_vdev *vdev; + + if (!rsp || !psoc) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, rsp->vdev_id, + WLAN_VDEV_TARGET_IF_ID); + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("VDEV_%d PSOC_%d VDEV_MLME is NULL", rsp->vdev_id, + wlan_psoc_get_id(psoc)); + goto tgt_vdev_mgr_start_response_handler_end; + } + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_ext_start_rsp) + status = vdev_mlme->ops->mlme_vdev_ext_start_rsp( + vdev_mlme, + rsp); + +tgt_vdev_mgr_start_response_handler_end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_VDEV_TARGET_IF_ID); + return status; +} + +static QDF_STATUS tgt_vdev_mgr_stop_response_handler( + struct wlan_objmgr_psoc *psoc, + struct vdev_stop_response *rsp) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct vdev_mlme_obj *vdev_mlme; + struct wlan_objmgr_vdev *vdev; + + if (!rsp || !psoc) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, rsp->vdev_id, + WLAN_VDEV_TARGET_IF_ID); + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("VDEV_%d: PSOC_%d VDEV_MLME is NULL", rsp->vdev_id, + wlan_psoc_get_id(psoc)); + goto tgt_vdev_mgr_stop_response_handler_end; + } + + if ((vdev_mlme->ops) && vdev_mlme->ops->mlme_vdev_ext_stop_rsp) + status = vdev_mlme->ops->mlme_vdev_ext_stop_rsp( + vdev_mlme, + rsp); + +tgt_vdev_mgr_stop_response_handler_end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_VDEV_TARGET_IF_ID); + return status; +} + +static QDF_STATUS tgt_vdev_mgr_delete_response_handler( + struct wlan_objmgr_psoc *psoc, + struct vdev_delete_response *rsp) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + status = mlme_vdev_ops_ext_hdl_delete_rsp(psoc, rsp); + return status; +} + +static QDF_STATUS tgt_vdev_mgr_peer_delete_all_response_handler( + struct wlan_objmgr_psoc *psoc, + struct peer_delete_all_response *rsp) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct vdev_mlme_obj *vdev_mlme; + struct wlan_objmgr_vdev *vdev; + + if (!rsp || !psoc) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + rsp->vdev_id, + WLAN_VDEV_TARGET_IF_ID); + if (!vdev) { + mlme_err("VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("VDEV_%d: PSOC_%d VDEV_MLME is NULL", rsp->vdev_id, + wlan_psoc_get_id(psoc)); + goto tgt_vdev_mgr_peer_delete_all_response_handler_end; + } + + if ((vdev_mlme->ops) && + vdev_mlme->ops->mlme_vdev_ext_peer_delete_all_rsp) + status = vdev_mlme->ops->mlme_vdev_ext_peer_delete_all_rsp( + vdev_mlme, + rsp); + +tgt_vdev_mgr_peer_delete_all_response_handler_end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_VDEV_TARGET_IF_ID); + return status; +} + +static QDF_STATUS +tgt_vdev_mgr_offload_bcn_tx_status_event_handler(uint32_t vdev_id, + uint32_t tx_status) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + return status; +} + +static QDF_STATUS +tgt_vdev_mgr_tbttoffset_update_handler(uint32_t num_vdevs, bool is_ext) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + return status; +} + +QDF_STATUS +tgt_vdev_mgr_ext_tbttoffset_update_handle(uint32_t num_vdevs, bool is_ext) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + return status; +} + +static QDF_STATUS tgt_vdev_mgr_multi_vdev_restart_resp_handler( + struct wlan_objmgr_psoc *psoc, + struct multi_vdev_restart_resp *resp) +{ + return mlme_vdev_ops_ext_hdl_multivdev_restart_resp(psoc, resp); +} + +#ifdef FEATURE_VDEV_RSP_WAKELOCK +static struct psoc_mlme_wakelock * +tgt_psoc_get_wakelock_info(struct wlan_objmgr_psoc *psoc) +{ + struct psoc_mlme_obj *psoc_mlme; + + psoc_mlme = mlme_psoc_get_priv(psoc); + if (!psoc_mlme) { + mlme_err("PSOC_MLME is NULL"); + return NULL; + } + + return &psoc_mlme->psoc_mlme_wakelock; +} + +static inline void +tgt_psoc_reg_wakelock_info_rx_op(struct wlan_lmac_if_mlme_rx_ops + *mlme_rx_ops) +{ + mlme_rx_ops->psoc_get_wakelock_info = tgt_psoc_get_wakelock_info; +} +#else +static inline void +tgt_psoc_reg_wakelock_info_rx_op(struct wlan_lmac_if_mlme_rx_ops + *mlme_rx_ops) +{ +} +#endif + +void tgt_vdev_mgr_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_mlme_rx_ops *mlme_rx_ops = &rx_ops->mops; + + mlme_rx_ops->vdev_mgr_offload_bcn_tx_status_event_handle = + tgt_vdev_mgr_offload_bcn_tx_status_event_handler; + mlme_rx_ops->vdev_mgr_tbttoffset_update_handle = + tgt_vdev_mgr_tbttoffset_update_handler; + mlme_rx_ops->vdev_mgr_start_response = + tgt_vdev_mgr_start_response_handler; + mlme_rx_ops->vdev_mgr_stop_response = + tgt_vdev_mgr_stop_response_handler; + mlme_rx_ops->vdev_mgr_delete_response = + tgt_vdev_mgr_delete_response_handler; + mlme_rx_ops->vdev_mgr_peer_delete_all_response = + tgt_vdev_mgr_peer_delete_all_response_handler; + mlme_rx_ops->psoc_get_vdev_response_timer_info = + tgt_vdev_mgr_get_response_timer_info; + mlme_rx_ops->vdev_mgr_multi_vdev_restart_resp = + tgt_vdev_mgr_multi_vdev_restart_resp_handler; + tgt_psoc_reg_wakelock_info_rx_op(&rx_ops->mops); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_tgt_if_tx_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_tgt_if_tx_api.c new file mode 100644 index 0000000000000000000000000000000000000000..d7cdc2aab189740162967a06f49d50b6a565a3fa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_tgt_if_tx_api.c @@ -0,0 +1,656 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_tgt_if_tx_api.c + * + * This file provides definitions for mlme tgt_if APIs, which will + * further call target_if/mlme component using LMAC MLME txops + */ +#include +#include +#include "include/wlan_vdev_mlme.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline struct wlan_lmac_if_mlme_tx_ops +*wlan_vdev_mlme_get_lmac_txops(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + return target_if_vdev_mgr_get_tx_ops(psoc); +} + +QDF_STATUS tgt_vdev_mgr_create_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_create_params *param) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + ol_txrx_soc_handle soc_txrx_handle; + enum wlan_op_mode cdp_txrx_opmode; + enum wlan_op_subtype cdp_txrx_subtype; + uint32_t vdev_id; + uint8_t *vdev_addr; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_create_send) { + mlme_err("VDEV_%d No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mlme_err("psoc object is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_create_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) { + mlme_err("VDEV_%d PSOC_%d Tx Ops Error : %d", vdev_id, + wlan_psoc_get_id(psoc), status); + return status; + } + + cdp_txrx_opmode = wlan_util_vdev_get_cdp_txrx_opmode(vdev); + cdp_txrx_subtype = wlan_util_vdev_get_cdp_txrx_subtype(vdev); + vdev_addr = wlan_vdev_mlme_get_macaddr(vdev); + pdev = wlan_vdev_get_pdev(vdev); + soc_txrx_handle = wlan_psoc_get_dp_handle(psoc); + if (!soc_txrx_handle) + return QDF_STATUS_E_FAILURE; + + return cdp_vdev_attach(soc_txrx_handle, + wlan_objmgr_pdev_get_pdev_id(pdev), + vdev_addr, vdev_id, + cdp_txrx_opmode, + cdp_txrx_subtype); +} + +QDF_STATUS tgt_vdev_mgr_create_complete(struct vdev_mlme_obj *vdev_mlme) +{ + struct wlan_objmgr_vdev *vdev; + struct vdev_set_params param = {0}; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct vdev_mlme_inactivity_params *inactivity; + uint8_t vdev_id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + vdev = vdev_mlme->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_set_param_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + inactivity = &vdev_mlme->mgmt.inactivity_params; + + param.vdev_id = vdev_id; + + param.param_value = + inactivity->keepalive_min_idle_inactive_time_secs; + param.param_id = WLAN_MLME_CFG_MIN_IDLE_INACTIVE_TIME; + status = txops->vdev_set_param_send(vdev, ¶m); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Failed to set min idle inactive time!", + vdev_id); + + param.param_value = + inactivity->keepalive_max_idle_inactive_time_secs; + param.param_id = WLAN_MLME_CFG_MAX_IDLE_INACTIVE_TIME; + status = txops->vdev_set_param_send(vdev, ¶m); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Failed to set max idle inactive time!", + vdev_id); + + param.param_value = + inactivity->keepalive_max_unresponsive_time_secs; + param.param_id = WLAN_MLME_CFG_MAX_UNRESPONSIVE_INACTIVE_TIME; + status = txops->vdev_set_param_send(vdev, ¶m); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Failed to set max unresponse inactive time!", + vdev_id); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_start_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_start_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_start_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_start_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_delete_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_delete_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + ol_txrx_soc_handle soc_txrx_handle; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_delete_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + soc_txrx_handle = wlan_psoc_get_dp_handle(psoc); + if (soc_txrx_handle) + cdp_vdev_detach(soc_txrx_handle, wlan_vdev_get_id(vdev), + NULL, NULL); + + status = txops->vdev_delete_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_peer_flush_tids_send( + struct vdev_mlme_obj *mlme_obj, + struct peer_flush_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->peer_flush_tids_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->peer_flush_tids_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_vdev_mgr_stop_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_stop_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_stop_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_stop_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_beacon_stop(struct vdev_mlme_obj *mlme_obj) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_vdev_mgr_beacon_free(struct vdev_mlme_obj *mlme_obj) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_vdev_mgr_up_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_up_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + ol_txrx_soc_handle soc_txrx_handle; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_up_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + /* cdp set rx and tx decap type */ + psoc = wlan_vdev_get_psoc(vdev); + soc_txrx_handle = wlan_psoc_get_dp_handle(psoc); + if (!soc_txrx_handle || vdev_id == WLAN_INVALID_VDEV_ID) + return QDF_STATUS_E_INVAL; + + status = txops->vdev_up_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_down_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_down_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + enum QDF_OPMODE opmode; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_down_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mlme_err("PDEV is NULL"); + return QDF_STATUS_E_INVAL; + } + + opmode = wlan_vdev_mlme_get_opmode(vdev); + if (wlan_util_is_vdev_active(pdev, WLAN_VDEV_TARGET_IF_ID) == + QDF_STATUS_SUCCESS) { + + if (opmode == QDF_SAP_MODE) + utils_dfs_cancel_precac_timer(pdev); + } + + status = txops->vdev_down_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_set_neighbour_rx_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct set_neighbour_rx_params *param) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_vdev_mgr_nac_rssi_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_scan_nac_rssi_params *param) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_vdev_mgr_sifs_trigger_send( + struct vdev_mlme_obj *mlme_obj, + struct sifs_trigger_param *param) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_sifs_trigger_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_sifs_trigger_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_set_custom_aggr_size_send( + struct vdev_mlme_obj *mlme_obj, + struct set_custom_aggr_size_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_set_custom_aggr_size_cmd_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_set_custom_aggr_size_cmd_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_config_ratemask_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct config_ratemask_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_config_ratemask_cmd_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_config_ratemask_cmd_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_beacon_cmd_send( + struct vdev_mlme_obj *mlme_obj, + struct beacon_params *param) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_vdev_mgr_beacon_tmpl_send( + struct vdev_mlme_obj *mlme_obj, + struct beacon_tmpl_params *param) +{ + return QDF_STATUS_SUCCESS; +} + +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +QDF_STATUS tgt_vdev_mgr_fils_enable_send( + struct vdev_mlme_obj *mlme_obj, + struct config_fils_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_fils_enable_send) { + mlme_err("VDEV_%d: No Tx Ops fils Enable", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_fils_enable_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops fils Enable Error : %d", + vdev_id, status); + + return status; +} +#endif + +QDF_STATUS tgt_vdev_mgr_multiple_vdev_restart_send( + struct wlan_objmgr_pdev *pdev, + struct multiple_vdev_restart_params *param) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_pdev(pdev, + param->vdev_ids[0], + WLAN_VDEV_TARGET_IF_ID); + if (vdev) { + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->multiple_vdev_restart_req_cmd) { + mlme_err("VDEV_%d: No Tx Ops", wlan_vdev_get_id(vdev)); + wlan_objmgr_vdev_release_ref(vdev, + WLAN_VDEV_TARGET_IF_ID); + return QDF_STATUS_E_INVAL; + } + + status = txops->multiple_vdev_restart_req_cmd(pdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("Tx Ops Error: %d", status); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_VDEV_TARGET_IF_ID); + } + + return status; +} + +QDF_STATUS tgt_vdev_mgr_set_tx_rx_decap_type(struct vdev_mlme_obj *mlme_obj, + enum wlan_mlme_cfg_id param_id, + uint32_t value) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!mlme_obj) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_set_tx_rx_decap_type) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_set_tx_rx_decap_type(vdev, param_id, value); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_set_param_send( + struct vdev_mlme_obj *mlme_obj, + struct vdev_set_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_set_param_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_set_param_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_sta_ps_param_send( + struct vdev_mlme_obj *mlme_obj, + struct sta_ps_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->vdev_sta_ps_param_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->vdev_sta_ps_param_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return status; +} + +QDF_STATUS tgt_vdev_mgr_peer_delete_all_send( + struct vdev_mlme_obj *mlme_obj, + struct peer_delete_all_params *param) +{ + QDF_STATUS status; + struct wlan_lmac_if_mlme_tx_ops *txops; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_id; + + if (!param) { + mlme_err("Invalid input"); + return QDF_STATUS_E_INVAL; + } + + vdev = mlme_obj->vdev; + vdev_id = wlan_vdev_get_id(vdev); + txops = wlan_vdev_mlme_get_lmac_txops(vdev); + if (!txops || !txops->peer_delete_all_send) { + mlme_err("VDEV_%d: No Tx Ops", vdev_id); + return QDF_STATUS_E_INVAL; + } + + status = txops->peer_delete_all_send(vdev, param); + if (QDF_IS_STATUS_ERROR(status)) + mlme_err("VDEV_%d: Tx Ops Error : %d", vdev_id, status); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6582f3cc7e702cf77903c9d0575f8128f6ef7509 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_ucfg_api.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_ucfg_api.c + * + * This file provides definitions to APIs to get/set mlme fields in + * vdev mlme core data structures + */ +#include +#include "wlan_vdev_mgr_ucfg_api.h" +#include "include/wlan_vdev_mlme.h" +#include +#include +#include +#include + +void ucfg_wlan_vdev_mgr_get_param_bssid( + struct wlan_objmgr_vdev *vdev, + uint8_t *bssid) +{ + struct vdev_mlme_mgmt *mlme_mgmt; + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_MLME); + + if (!vdev_mlme) { + mlme_err("VDEV_MLME is NULL"); + return; + } + + mlme_mgmt = &vdev_mlme->mgmt; + + qdf_mem_copy(bssid, mlme_mgmt->generic.bssid, + QDF_MAC_ADDR_SIZE); +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_get_param_bssid); + +void ucfg_wlan_vdev_mgr_get_param_ssid( + struct wlan_objmgr_vdev *vdev, + uint8_t *ssid, uint8_t *ssid_len) +{ + struct vdev_mlme_mgmt *mlme_mgmt; + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_MLME); + + if (!vdev_mlme) { + QDF_ASSERT(0); + return; + } + + mlme_mgmt = &vdev_mlme->mgmt; + + *ssid_len = mlme_mgmt->generic.ssid_len; + qdf_mem_copy(ssid, mlme_mgmt->generic.ssid, + mlme_mgmt->generic.ssid_len); +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_get_param_ssid); + +void ucfg_wlan_vdev_mgr_get_beacon_buffer( + struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t buf) +{ + struct vdev_mlme_obj *vdev_mlme; + struct vdev_mlme_mgmt *mlme_mgmt; + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_MLME); + + if (!vdev_mlme) { + QDF_ASSERT(0); + return; + } + + mlme_mgmt = &vdev_mlme->mgmt; + + buf = mlme_mgmt->beacon_info.beacon_buffer; +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_get_beacon_buffer); + +void ucfg_wlan_vdev_mgr_get_trans_bssid( + struct wlan_objmgr_vdev *vdev, + uint8_t *addr) +{ + struct vdev_mlme_obj *vdev_mlme; + struct vdev_mlme_mgmt *mlme_mgmt; + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_MLME); + + if (!vdev_mlme) { + QDF_ASSERT(0); + return; + } + + mlme_mgmt = &vdev_mlme->mgmt; + + qdf_mem_copy(addr, mlme_mgmt->mbss_11ax.trans_bssid, QDF_MAC_ADDR_SIZE); +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_get_trans_bssid); + +void ucfg_wlan_vdev_mgr_get_tsf_adjust( + struct wlan_objmgr_vdev *vdev, + uint64_t *tsf_adjust) +{ + struct vdev_mlme_obj *vdev_mlme; + struct vdev_mlme_proto *mlme_proto; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + + if (!vdev_mlme) { + mlme_err("VDEV MLME is NULL"); + return; + } + + mlme_proto = &vdev_mlme->proto; + + *tsf_adjust = mlme_proto->generic.tsfadjust; +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_get_tsf_adjust); + +QDF_STATUS ucfg_wlan_vdev_mgr_set_param( + struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + struct wlan_vdev_mgr_cfg mlme_cfg) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj( + vdev, + WLAN_UMAC_COMP_MLME); + + if (!vdev_mlme) { + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + return wlan_util_vdev_mlme_set_param(vdev_mlme, param_id, mlme_cfg); +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_set_param); + +void ucfg_wlan_vdev_mgr_get_param( + struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t *value) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj( + vdev, + WLAN_UMAC_COMP_MLME); + + if (!vdev_mlme) { + QDF_ASSERT(0); + return; + } + + wlan_util_vdev_mlme_get_param(vdev_mlme, param_id, value); +} + +qdf_export_symbol(ucfg_wlan_vdev_mgr_get_param); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..b631396ce720a101d07662f71732c45c94bf0ea3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mgr_utils_api.c @@ -0,0 +1,586 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_vdev_mgr_utils_api.c + * + * This file provide definition for APIs to enable Tx Ops and Rx Ops registered + * through LMAC + */ +#include +#include +#include +#include +#include +#include + +static QDF_STATUS vdev_mgr_config_ratemask_update( + struct vdev_mlme_obj *mlme_obj, + struct config_ratemask_params *param) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = mlme_obj->vdev; + param->vdev_id = wlan_vdev_get_id(vdev); + param->type = mlme_obj->mgmt.rate_info.type; + param->lower32 = mlme_obj->mgmt.rate_info.lower32; + param->higher32 = mlme_obj->mgmt.rate_info.higher32; + param->lower32_2 = mlme_obj->mgmt.rate_info.lower32_2; + + return QDF_STATUS_SUCCESS; +} + +enum wlan_op_subtype +wlan_util_vdev_get_cdp_txrx_subtype(struct wlan_objmgr_vdev *vdev) +{ + enum QDF_OPMODE qdf_opmode; + enum wlan_op_subtype cdp_txrx_subtype; + + qdf_opmode = wlan_vdev_mlme_get_opmode(vdev); + switch (qdf_opmode) { + case QDF_P2P_DEVICE_MODE: + cdp_txrx_subtype = wlan_op_subtype_p2p_device; + break; + case QDF_P2P_CLIENT_MODE: + cdp_txrx_subtype = wlan_op_subtype_p2p_cli; + break; + case QDF_P2P_GO_MODE: + cdp_txrx_subtype = wlan_op_subtype_p2p_go; + break; + default: + cdp_txrx_subtype = wlan_op_subtype_none; + }; + + return cdp_txrx_subtype; +} + +enum wlan_op_mode +wlan_util_vdev_get_cdp_txrx_opmode(struct wlan_objmgr_vdev *vdev) +{ + enum QDF_OPMODE qdf_opmode; + enum wlan_op_mode cdp_txrx_opmode; + + qdf_opmode = wlan_vdev_mlme_get_opmode(vdev); + switch (qdf_opmode) { + case QDF_STA_MODE: + cdp_txrx_opmode = wlan_op_mode_sta; + break; + case QDF_SAP_MODE: + cdp_txrx_opmode = wlan_op_mode_ap; + break; + case QDF_MONITOR_MODE: + cdp_txrx_opmode = wlan_op_mode_monitor; + break; + case QDF_P2P_DEVICE_MODE: + cdp_txrx_opmode = wlan_op_mode_ap; + break; + case QDF_P2P_CLIENT_MODE: + cdp_txrx_opmode = wlan_op_mode_sta; + break; + case QDF_P2P_GO_MODE: + cdp_txrx_opmode = wlan_op_mode_ap; + break; + case QDF_OCB_MODE: + cdp_txrx_opmode = wlan_op_mode_ocb; + break; + case QDF_IBSS_MODE: + cdp_txrx_opmode = wlan_op_mode_ibss; + break; + case QDF_NDI_MODE: + cdp_txrx_opmode = wlan_op_mode_ndi; + break; + default: + cdp_txrx_opmode = wlan_op_mode_unknown; + }; + + return cdp_txrx_opmode; +} + +QDF_STATUS +wlan_util_vdev_mlme_set_ratemask_config(struct vdev_mlme_obj *vdev_mlme) +{ + struct config_ratemask_params rm_param = {0}; + + if (!vdev_mlme) { + mlme_err("VDEV MLME is NULL"); + return QDF_STATUS_E_FAILURE; + } + + vdev_mgr_config_ratemask_update(vdev_mlme, &rm_param); + + return tgt_vdev_mgr_config_ratemask_cmd_send(vdev_mlme, &rm_param); +} + +qdf_export_symbol(wlan_util_vdev_mlme_set_ratemask_config); + +QDF_STATUS +wlan_util_vdev_mlme_set_param(struct vdev_mlme_obj *vdev_mlme, + enum wlan_mlme_cfg_id param_id, + struct wlan_vdev_mgr_cfg mlme_cfg) +{ + struct wlan_objmgr_vdev *vdev; + struct vdev_mlme_proto *mlme_proto; + struct vdev_mlme_mgmt *mlme_mgmt; + struct vdev_mlme_inactivity_params *inactivity_params; + bool is_wmi_cmd = false; + int ret = QDF_STATUS_SUCCESS; + struct vdev_set_params param = {0}; + + if (!vdev_mlme) { + mlme_err("VDEV MLME is NULL"); + return QDF_STATUS_E_FAILURE; + } + vdev = vdev_mlme->vdev; + mlme_proto = &vdev_mlme->proto; + mlme_mgmt = &vdev_mlme->mgmt; + inactivity_params = &mlme_mgmt->inactivity_params; + + switch (param_id) { + case WLAN_MLME_CFG_DTIM_PERIOD: + mlme_proto->generic.dtim_period = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_SLOT_TIME: + mlme_proto->generic.slot_time = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_PROTECTION_MODE: + mlme_proto->generic.protection_mode = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_BEACON_INTERVAL: + mlme_proto->generic.beacon_interval = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_LDPC: + mlme_proto->generic.ldpc = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_NSS: + mlme_proto->generic.nss = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_TSF_ADJUST: + mlme_proto->generic.tsfadjust = mlme_cfg.tsf; + break; + case WLAN_MLME_CFG_ASSOC_ID: + mlme_proto->sta.assoc_id = mlme_cfg.value; + break; + case WLAN_MLME_CFG_VHT_CAPS: + mlme_proto->vht_info.caps = mlme_cfg.value; + break; + case WLAN_MLME_CFG_SUBFER: + mlme_proto->vht_info.subfer = mlme_cfg.value; + break; + case WLAN_MLME_CFG_MUBFER: + mlme_proto->vht_info.mubfer = mlme_cfg.value; + break; + case WLAN_MLME_CFG_SUBFEE: + mlme_proto->vht_info.subfee = mlme_cfg.value; + break; + case WLAN_MLME_CFG_MUBFEE: + mlme_proto->vht_info.mubfee = mlme_cfg.value; + break; + case WLAN_MLME_CFG_IMLICIT_BF: + mlme_proto->vht_info.implicit_bf = mlme_cfg.value; + break; + case WLAN_MLME_CFG_SOUNDING_DIM: + mlme_proto->vht_info.sounding_dimension = mlme_cfg.value; + break; + case WLAN_MLME_CFG_BFEE_STS_CAP: + mlme_proto->vht_info.bfee_sts_cap = mlme_cfg.value; + break; + case WLAN_MLME_CFG_TXBF_CAPS: + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_HT_CAPS: + mlme_proto->ht_info.ht_caps = mlme_cfg.value; + break; + case WLAN_MLME_CFG_HE_OPS: + mlme_proto->he_ops_info.he_ops = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_RTS_THRESHOLD: + mlme_mgmt->generic.rts_threshold = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_FRAG_THRESHOLD: + mlme_mgmt->generic.frag_threshold = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_PROBE_DELAY: + mlme_mgmt->generic.probe_delay = mlme_cfg.value; + break; + case WLAN_MLME_CFG_REPEAT_PROBE_TIME: + mlme_mgmt->generic.repeat_probe_time = mlme_cfg.value; + break; + case WLAN_MLME_CFG_DROP_UNENCRY: + mlme_mgmt->generic.drop_unencry = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_TX_PWR_LIMIT: + mlme_mgmt->generic.tx_pwrlimit = mlme_cfg.value; + break; + case WLAN_MLME_CFG_TX_POWER: + mlme_mgmt->generic.tx_power = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_AMPDU: + mlme_mgmt->generic.ampdu = mlme_cfg.value; + mlme_cfg.value = (mlme_cfg.value << 8) + 0xFF; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_AMPDU_SIZE: + mlme_mgmt->generic.ampdu = mlme_cfg.value; + break; + case WLAN_MLME_CFG_AMSDU: + mlme_mgmt->generic.amsdu = mlme_cfg.value; + mlme_cfg.value = (mlme_cfg.value << 8) + 0xFF; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_AMSDU_SIZE: + mlme_mgmt->generic.amsdu = mlme_cfg.value; + break; + case WLAN_MLME_CFG_BMISS_FIRST_BCNT: + inactivity_params->bmiss_first_bcnt = mlme_cfg.value; + break; + case WLAN_MLME_CFG_BMISS_FINAL_BCNT: + inactivity_params->bmiss_final_bcnt = mlme_cfg.value; + break; + case WLAN_MLME_CFG_MIN_IDLE_INACTIVE_TIME: + inactivity_params->keepalive_min_idle_inactive_time_secs = + mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_MAX_IDLE_INACTIVE_TIME: + inactivity_params->keepalive_max_idle_inactive_time_secs = + mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_MAX_UNRESPONSIVE_INACTIVE_TIME: + inactivity_params->keepalive_max_unresponsive_time_secs = + mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_RATE_FLAGS: + mlme_mgmt->rate_info.rate_flags = mlme_cfg.value; + break; + case WLAN_MLME_CFG_PER_BAND_TX_MGMT_RATE: + mlme_mgmt->rate_info.per_band_tx_mgmt_rate = mlme_cfg.value; + break; + case WLAN_MLME_CFG_MAX_RATE: + mlme_mgmt->rate_info.max_rate = mlme_cfg.value; + break; + case WLAN_MLME_CFG_TX_MGMT_RATE: + mlme_mgmt->rate_info.tx_mgmt_rate = mlme_cfg.value; + break; + case WLAN_MLME_CFG_TX_CHAINMASK: + mlme_mgmt->chainmask_info.tx_chainmask = mlme_cfg.value; + break; + case WLAN_MLME_CFG_RX_CHAINMASK: + mlme_mgmt->chainmask_info.rx_chainmask = mlme_cfg.value; + break; + case WLAN_MLME_CFG_PKT_POWERSAVE: + mlme_mgmt->powersave_info.packet_powersave = mlme_cfg.value; + break; + case WLAN_MLME_CFG_MAX_LI_OF_MODDTIM: + mlme_mgmt->powersave_info.max_li_of_moddtim = mlme_cfg.value; + break; + case WLAN_MLME_CFG_DYNDTIM_CNT: + mlme_mgmt->powersave_info.dyndtim_cnt = mlme_cfg.value; + break; + case WLAN_MLME_CFG_LISTEN_INTERVAL: + mlme_mgmt->powersave_info.listen_interval = mlme_cfg.value; + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_MODDTIM_CNT: + mlme_mgmt->powersave_info.moddtim_cnt = mlme_cfg.value; + break; + case WLAN_MLME_CFG_PROFILE_IDX: + mlme_mgmt->mbss_11ax.profile_idx = mlme_cfg.value; + break; + case WLAN_MLME_CFG_PROFILE_NUM: + mlme_mgmt->mbss_11ax.profile_num = mlme_cfg.value; + break; + case WLAN_MLME_CFG_MBSSID_FLAGS: + mlme_mgmt->mbss_11ax.mbssid_flags = mlme_cfg.value; + break; + case WLAN_MLME_CFG_VDEVID_TRANS: + mlme_mgmt->mbss_11ax.vdevid_trans = mlme_cfg.value; + break; + case WLAN_MLME_CFG_SSID: + if (mlme_cfg.ssid_cfg.length <= WLAN_SSID_MAX_LEN) { + qdf_mem_copy(mlme_mgmt->generic.ssid, + mlme_cfg.ssid_cfg.mac_ssid, + mlme_cfg.ssid_cfg.length); + mlme_mgmt->generic.ssid_len = + mlme_cfg.ssid_cfg.length; + } else { + mlme_mgmt->generic.ssid_len = 0; + } + + break; + case WLAN_MLME_CFG_TRANS_BSSID: + qdf_mem_copy(mlme_mgmt->mbss_11ax.trans_bssid, + mlme_cfg.trans_bssid, QDF_MAC_ADDR_SIZE); + break; + case WLAN_MLME_CFG_TYPE: + mlme_mgmt->generic.type = mlme_cfg.value; + break; + case WLAN_MLME_CFG_SUBTYPE: + mlme_mgmt->generic.subtype = mlme_cfg.value; + break; + case WLAN_MLME_CFG_UAPSD: + mlme_proto->sta.uapsd_cfg = mlme_cfg.value; + break; + case WLAN_MLME_CFG_TX_ENCAP_TYPE: + is_wmi_cmd = true; + mlme_mgmt->generic.tx_encap_type = mlme_cfg.value; + tgt_vdev_mgr_set_tx_rx_decap_type(vdev_mlme, + WLAN_MLME_CFG_TX_ENCAP_TYPE, + mlme_cfg.value); + break; + case WLAN_MLME_CFG_RX_DECAP_TYPE: + is_wmi_cmd = true; + mlme_mgmt->generic.rx_decap_type = mlme_cfg.value; + tgt_vdev_mgr_set_tx_rx_decap_type(vdev_mlme, + WLAN_MLME_CFG_RX_DECAP_TYPE, + mlme_cfg.value); + break; + case WLAN_MLME_CFG_RATEMASK_TYPE: + mlme_mgmt->rate_info.type = mlme_cfg.value; + break; + case WLAN_MLME_CFG_RATEMASK_LOWER32: + mlme_mgmt->rate_info.lower32 = mlme_cfg.value; + break; + case WLAN_MLME_CFG_RATEMASK_HIGHER32: + mlme_mgmt->rate_info.higher32 = mlme_cfg.value; + break; + case WLAN_MLME_CFG_RATEMASK_LOWER32_2: + mlme_mgmt->rate_info.lower32_2 = mlme_cfg.value; + break; + case WLAN_MLME_CFG_BCN_TX_RATE: + mlme_mgmt->rate_info.bcn_tx_rate = mlme_cfg.value; + break; + case WLAN_MLME_CFG_BCN_TX_RATE_CODE: + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_TX_MGMT_RATE_CODE: + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_ENABLE_MULTI_GROUP_KEY: + is_wmi_cmd = true; + break; + case WLAN_MLME_CFG_MAX_GROUP_KEYS: + is_wmi_cmd = true; + break; + default: + break; + } + + if (is_wmi_cmd) { + param.param_id = param_id; + param.vdev_id = wlan_vdev_get_id(vdev); + param.param_value = mlme_cfg.value; + ret = tgt_vdev_mgr_set_param_send(vdev_mlme, ¶m); + } + + return ret; +} + +qdf_export_symbol(wlan_util_vdev_mlme_set_param); + +void wlan_util_vdev_mlme_get_param(struct vdev_mlme_obj *vdev_mlme, + enum wlan_mlme_cfg_id param_id, + uint32_t *value) +{ + struct vdev_mlme_proto *mlme_proto; + struct vdev_mlme_mgmt *mlme_mgmt; + struct vdev_mlme_inactivity_params *inactivity_params; + + if (!vdev_mlme) { + mlme_err("VDEV MLME is NULL"); + return; + } + mlme_proto = &vdev_mlme->proto; + mlme_mgmt = &vdev_mlme->mgmt; + inactivity_params = &mlme_mgmt->inactivity_params; + + switch (param_id) { + case WLAN_MLME_CFG_DTIM_PERIOD: + *value = mlme_proto->generic.dtim_period; + break; + case WLAN_MLME_CFG_SLOT_TIME: + *value = mlme_proto->generic.slot_time; + break; + case WLAN_MLME_CFG_PROTECTION_MODE: + *value = mlme_proto->generic.protection_mode; + break; + case WLAN_MLME_CFG_BEACON_INTERVAL: + *value = mlme_proto->generic.beacon_interval; + break; + case WLAN_MLME_CFG_LDPC: + *value = mlme_proto->generic.ldpc; + break; + case WLAN_MLME_CFG_NSS: + *value = mlme_proto->generic.nss; + break; + case WLAN_MLME_CFG_ASSOC_ID: + *value = mlme_proto->sta.assoc_id; + break; + case WLAN_MLME_CFG_VHT_CAPS: + *value = mlme_proto->vht_info.caps; + break; + case WLAN_MLME_CFG_SUBFER: + *value = mlme_proto->vht_info.subfer; + break; + case WLAN_MLME_CFG_MUBFER: + *value = mlme_proto->vht_info.mubfer; + break; + case WLAN_MLME_CFG_SUBFEE: + *value = mlme_proto->vht_info.subfee; + break; + case WLAN_MLME_CFG_MUBFEE: + *value = mlme_proto->vht_info.mubfee; + break; + case WLAN_MLME_CFG_IMLICIT_BF: + *value = mlme_proto->vht_info.implicit_bf; + break; + case WLAN_MLME_CFG_SOUNDING_DIM: + *value = mlme_proto->vht_info.sounding_dimension; + break; + case WLAN_MLME_CFG_BFEE_STS_CAP: + *value = mlme_proto->vht_info.bfee_sts_cap; + break; + case WLAN_MLME_CFG_HT_CAPS: + *value = mlme_proto->ht_info.ht_caps; + break; + case WLAN_MLME_CFG_HE_OPS: + *value = mlme_proto->he_ops_info.he_ops; + break; + case WLAN_MLME_CFG_RTS_THRESHOLD: + *value = mlme_mgmt->generic.rts_threshold; + break; + case WLAN_MLME_CFG_FRAG_THRESHOLD: + *value = mlme_mgmt->generic.frag_threshold; + break; + case WLAN_MLME_CFG_PROBE_DELAY: + *value = mlme_mgmt->generic.probe_delay; + break; + case WLAN_MLME_CFG_REPEAT_PROBE_TIME: + *value = mlme_mgmt->generic.repeat_probe_time; + break; + case WLAN_MLME_CFG_DROP_UNENCRY: + *value = mlme_mgmt->generic.drop_unencry; + break; + case WLAN_MLME_CFG_TX_PWR_LIMIT: + *value = mlme_mgmt->generic.tx_pwrlimit; + break; + case WLAN_MLME_CFG_TX_POWER: + *value = mlme_mgmt->generic.tx_power; + break; + case WLAN_MLME_CFG_AMPDU: + *value = mlme_mgmt->generic.ampdu; + break; + case WLAN_MLME_CFG_AMSDU: + *value = mlme_mgmt->generic.amsdu; + break; + case WLAN_MLME_CFG_SSID_LEN: + *value = mlme_mgmt->generic.ssid_len; + break; + case WLAN_MLME_CFG_BMISS_FIRST_BCNT: + *value = inactivity_params->bmiss_first_bcnt; + break; + case WLAN_MLME_CFG_BMISS_FINAL_BCNT: + *value = inactivity_params->bmiss_final_bcnt; + break; + case WLAN_MLME_CFG_MIN_IDLE_INACTIVE_TIME: + *value = + inactivity_params->keepalive_min_idle_inactive_time_secs; + break; + case WLAN_MLME_CFG_MAX_IDLE_INACTIVE_TIME: + *value = + inactivity_params->keepalive_max_idle_inactive_time_secs; + break; + case WLAN_MLME_CFG_MAX_UNRESPONSIVE_INACTIVE_TIME: + *value = + inactivity_params->keepalive_max_unresponsive_time_secs; + break; + case WLAN_MLME_CFG_RATE_FLAGS: + *value = mlme_mgmt->rate_info.rate_flags; + break; + case WLAN_MLME_CFG_PER_BAND_TX_MGMT_RATE: + *value = mlme_mgmt->rate_info.per_band_tx_mgmt_rate; + break; + case WLAN_MLME_CFG_MAX_RATE: + *value = mlme_mgmt->rate_info.max_rate; + break; + case WLAN_MLME_CFG_TX_MGMT_RATE: + *value = mlme_mgmt->rate_info.tx_mgmt_rate; + break; + case WLAN_MLME_CFG_TX_CHAINMASK: + *value = mlme_mgmt->chainmask_info.tx_chainmask; + break; + case WLAN_MLME_CFG_RX_CHAINMASK: + *value = mlme_mgmt->chainmask_info.rx_chainmask; + break; + case WLAN_MLME_CFG_PKT_POWERSAVE: + *value = mlme_mgmt->powersave_info.packet_powersave; + break; + case WLAN_MLME_CFG_MAX_LI_OF_MODDTIM: + *value = mlme_mgmt->powersave_info.max_li_of_moddtim; + break; + case WLAN_MLME_CFG_DYNDTIM_CNT: + *value = mlme_mgmt->powersave_info.dyndtim_cnt; + break; + case WLAN_MLME_CFG_LISTEN_INTERVAL: + *value = mlme_mgmt->powersave_info.listen_interval; + break; + case WLAN_MLME_CFG_MODDTIM_CNT: + *value = mlme_mgmt->powersave_info.moddtim_cnt; + break; + case WLAN_MLME_CFG_PROFILE_IDX: + *value = mlme_mgmt->mbss_11ax.profile_idx; + break; + case WLAN_MLME_CFG_PROFILE_NUM: + *value = mlme_mgmt->mbss_11ax.profile_num; + break; + case WLAN_MLME_CFG_MBSSID_FLAGS: + *value = mlme_mgmt->mbss_11ax.mbssid_flags; + break; + case WLAN_MLME_CFG_VDEVID_TRANS: + *value = mlme_mgmt->mbss_11ax.vdevid_trans; + break; + case WLAN_MLME_CFG_BCN_TX_RATE: + *value = mlme_mgmt->rate_info.bcn_tx_rate; + break; + default: + break; + } +} + +qdf_export_symbol(wlan_util_vdev_mlme_get_param); + +void wlan_util_vdev_get_param(struct wlan_objmgr_vdev *vdev, + enum wlan_mlme_cfg_id param_id, + uint32_t *value) +{ + ucfg_wlan_vdev_mgr_get_param(vdev, param_id, value); +} + +qdf_export_symbol(wlan_util_vdev_get_param); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mlme_api.c b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mlme_api.c new file mode 100644 index 0000000000000000000000000000000000000000..fafc0bed56c3e9bd0c38d41e8f6d7f26abbf45df --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/mlme/vdev_mgr/dispatcher/src/wlan_vdev_mlme_api.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implements VDEV MLME public APIs + */ + +#include +#include +#include "include/wlan_vdev_mlme.h" +#include "../../core/src/vdev_mlme_sm.h" +#include +#include + +struct vdev_mlme_obj *wlan_vdev_mlme_get_cmpt_obj(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + if (!vdev) { + mlme_err("vdev is NULL"); + return NULL; + } + + vdev_mlme = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_MLME); + if (!vdev_mlme) { + mlme_err(" MLME component object is NULL"); + return NULL; + } + + return vdev_mlme; +} + +qdf_export_symbol(wlan_vdev_mlme_get_cmpt_obj); + +void wlan_vdev_mlme_set_ext_hdl(struct wlan_objmgr_vdev *vdev, + mlme_vdev_ext_t *ext_hdl) +{ + struct vdev_mlme_obj *vdev_mlme; + + if (!ext_hdl) { + mlme_err("Invalid input"); + return; + } + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (vdev_mlme) + vdev_mlme->ext_vdev_ptr = ext_hdl; +} + +qdf_export_symbol(wlan_vdev_mlme_set_ext_hdl); + +mlme_vdev_ext_t *wlan_vdev_mlme_get_ext_hdl(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (vdev_mlme) + return vdev_mlme->ext_vdev_ptr; + + return NULL; +} + +qdf_export_symbol(wlan_vdev_mlme_get_ext_hdl); + +QDF_STATUS wlan_vdev_mlme_sm_deliver_evt(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_sm_evt event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme; + QDF_STATUS status; + enum wlan_vdev_state state_entry, state_exit; + enum wlan_vdev_state substate_entry, substate_exit; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("vdev component object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mlme_vdev_sm_spin_lock(vdev_mlme); + + /* store entry state and sub state for prints */ + state_entry = wlan_vdev_mlme_get_state(vdev); + substate_entry = wlan_vdev_mlme_get_substate(vdev); + mlme_vdev_sm_print_state_event(vdev_mlme, event); + + status = mlme_vdev_sm_deliver_event(vdev_mlme, event, event_data_len, + event_data); + /* Take exit state, exit substate for prints */ + state_exit = wlan_vdev_mlme_get_state(vdev); + substate_exit = wlan_vdev_mlme_get_substate(vdev); + /* If no state and substate change, don't print */ + if (!((state_entry == state_exit) && (substate_entry == substate_exit))) + mlme_vdev_sm_print_state(vdev_mlme); + mlme_vdev_sm_spin_unlock(vdev_mlme); + + return status; +} + +qdf_export_symbol(wlan_vdev_mlme_sm_deliver_evt); + +QDF_STATUS wlan_vdev_mlme_sm_deliver_evt_sync(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_sm_evt event, + uint16_t event_data_len, + void *event_data) +{ + struct vdev_mlme_obj *vdev_mlme; + QDF_STATUS status; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("vdev component object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + status = mlme_vdev_sm_deliver_event(vdev_mlme, event, event_data_len, + event_data); + + return status; +} + +qdf_export_symbol(wlan_vdev_mlme_sm_deliver_evt_sync); + +#ifdef SM_ENG_HIST_ENABLE +void wlan_vdev_mlme_sm_history_print(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("vdev component object is NULL"); + return; + } + + mlme_vdev_sm_history_print(vdev_mlme); +} +#endif + +QDF_STATUS wlan_vdev_allow_connect_n_tx(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + if ((state == WLAN_VDEV_S_UP) || + ((state == WLAN_VDEV_S_SUSPEND) && + (substate == WLAN_VDEV_SS_SUSPEND_CSA_RESTART))) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_vdev_mlme_is_active(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + + state = wlan_vdev_mlme_get_state(vdev); + if ((state == WLAN_VDEV_S_UP) || (state == WLAN_VDEV_S_DFS_CAC_WAIT) || + (state == WLAN_VDEV_S_SUSPEND)) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(wlan_vdev_mlme_is_active); + +QDF_STATUS wlan_vdev_chan_config_valid(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + if (!((state == WLAN_VDEV_S_INIT) || (state == WLAN_VDEV_S_STOP))) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(wlan_vdev_chan_config_valid); + +QDF_STATUS wlan_vdev_mlme_is_csa_restart(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + if ((state == WLAN_VDEV_S_SUSPEND) && + (substate == WLAN_VDEV_SS_SUSPEND_CSA_RESTART)) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_vdev_is_going_down(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + if ((state == WLAN_VDEV_S_STOP) || + ((state == WLAN_VDEV_S_SUSPEND) && + (substate == WLAN_VDEV_SS_SUSPEND_SUSPEND_DOWN))) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_vdev_is_restart_progress(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + enum wlan_vdev_state substate; + + state = wlan_vdev_mlme_get_state(vdev); + substate = wlan_vdev_mlme_get_substate(vdev); + if ((state == WLAN_VDEV_S_START) && + (substate == WLAN_VDEV_SS_START_RESTART_PROGRESS)) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_vdev_is_dfs_cac_wait(struct wlan_objmgr_vdev *vdev) +{ + if (wlan_vdev_mlme_get_state(vdev) == WLAN_VDEV_S_DFS_CAC_WAIT) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +void wlan_vdev_mlme_cmd_lock(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("vdev component object is NULL"); + return; + } + + mlme_vdev_cmd_mutex_acquire(vdev_mlme); +} + +void wlan_vdev_mlme_cmd_unlock(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_mlme_obj *vdev_mlme; + + vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(vdev); + if (!vdev_mlme) { + mlme_err("vdev component object is NULL"); + return; + } + + mlme_vdev_cmd_mutex_release(vdev_mlme); +} + +QDF_STATUS wlan_vdev_mlme_is_scan_allowed(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + + state = wlan_vdev_mlme_get_state(vdev); + if ((state == WLAN_VDEV_S_INIT) || (state == WLAN_VDEV_S_UP) || + (state == WLAN_VDEV_S_STOP)) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_vdev_mlme_is_init_state(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_vdev_state state; + + state = wlan_vdev_mlme_get_state(vdev); + if (state == WLAN_VDEV_S_INIT) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_build_chan_list.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_build_chan_list.c new file mode 100644 index 0000000000000000000000000000000000000000..d783f8158316141ea1f35927ff25ab2995931dab --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_build_chan_list.c @@ -0,0 +1,1245 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_build_chan_list.c + * This file defines the API to build master and current channel list. + */ + +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include "reg_callbacks.h" +#include "reg_services_common.h" +#include "reg_db.h" +#include "reg_db_parser.h" +#include "reg_offload_11d_scan.h" +#include +#include "reg_build_chan_list.h" +#include + +#define MAX_PWR_FCC_CHAN_12 8 +#define MAX_PWR_FCC_CHAN_13 2 +#define CHAN_144_CENT_FREQ 5720 + +/** + * reg_fill_channel_info() - Populate TX power, antenna gain, channel state, + * channel flags, min and max bandwidth to master channel list. + * @chan_enum: Channel enum. + * @reg_rule: Pointer to regulatory rule which has tx power and antenna gain. + * @master_list: Pointer to master channel list. + * @min_bw: minimum bandwidth to be used for given channel. + */ +static void reg_fill_channel_info(enum channel_enum chan_enum, + struct cur_reg_rule *reg_rule, + struct regulatory_channel *master_list, + uint16_t min_bw) +{ + master_list[chan_enum].chan_flags &= ~REGULATORY_CHAN_DISABLED; + + master_list[chan_enum].tx_power = reg_rule->reg_power; + master_list[chan_enum].ant_gain = reg_rule->ant_gain; + master_list[chan_enum].state = CHANNEL_STATE_ENABLE; + + if (reg_rule->flags & REGULATORY_CHAN_NO_IR) { + master_list[chan_enum].chan_flags |= REGULATORY_CHAN_NO_IR; + master_list[chan_enum].state = CHANNEL_STATE_DFS; + } + + if (reg_rule->flags & REGULATORY_CHAN_RADAR) { + master_list[chan_enum].chan_flags |= REGULATORY_CHAN_RADAR; + master_list[chan_enum].state = CHANNEL_STATE_DFS; + } + + if (reg_rule->flags & REGULATORY_CHAN_INDOOR_ONLY) + master_list[chan_enum].chan_flags |= + REGULATORY_CHAN_INDOOR_ONLY; + + if (reg_rule->flags & REGULATORY_CHAN_NO_OFDM) + master_list[chan_enum].chan_flags |= REGULATORY_CHAN_NO_OFDM; + + master_list[chan_enum].min_bw = min_bw; + if (master_list[chan_enum].max_bw == 20) + master_list[chan_enum].max_bw = reg_rule->max_bw; +} + +/** + * reg_populate_band_channels() - For all the valid regdb channels in the master + * channel list, find the regulatory rules and call reg_fill_channel_info() to + * populate master channel list with txpower, antennagain, BW info, etc. + * @start_chan: Start channel enum. + * @end_chan: End channel enum. + * @rule_start_ptr: Pointer to regulatory rules. + * @num_reg_rules: Number of regulatory rules. + * @min_reg_bw: Minimum regulatory bandwidth. + * @mas_chan_list: Pointer to master channel list. + */ +static void reg_populate_band_channels(enum channel_enum start_chan, + enum channel_enum end_chan, + struct cur_reg_rule *rule_start_ptr, + uint32_t num_reg_rules, + uint16_t min_reg_bw, + struct regulatory_channel *mas_chan_list) +{ + struct cur_reg_rule *found_rule_ptr; + struct cur_reg_rule *cur_rule_ptr; + struct regulatory_channel; + enum channel_enum chan_enum; + uint32_t rule_num, bw; + uint16_t max_bw; + uint16_t min_bw; + + for (chan_enum = start_chan; chan_enum <= end_chan; chan_enum++) { + found_rule_ptr = NULL; + + max_bw = QDF_MIN((uint16_t)20, channel_map[chan_enum].max_bw); + min_bw = QDF_MAX(min_reg_bw, channel_map[chan_enum].min_bw); + + if (channel_map[chan_enum].chan_num == INVALID_CHANNEL_NUM) + continue; + + for (bw = max_bw; bw >= min_bw; bw = bw / 2) { + for (rule_num = 0, cur_rule_ptr = rule_start_ptr; + rule_num < num_reg_rules; + cur_rule_ptr++, rule_num++) { + if ((cur_rule_ptr->start_freq <= + mas_chan_list[chan_enum].center_freq - + bw / 2) && + (cur_rule_ptr->end_freq >= + mas_chan_list[chan_enum].center_freq + + bw / 2) && (min_bw <= bw)) { + found_rule_ptr = cur_rule_ptr; + break; + } + } + + if (found_rule_ptr) + break; + } + + if (found_rule_ptr) { + mas_chan_list[chan_enum].max_bw = bw; + reg_fill_channel_info(chan_enum, found_rule_ptr, + mas_chan_list, min_bw); + /* Disable 2.4 Ghz channels that dont have 20 mhz bw */ + if (start_chan == MIN_24GHZ_CHANNEL && + mas_chan_list[chan_enum].max_bw < 20) { + mas_chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + mas_chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + } + } + } +} + +/** + * reg_update_max_bw_per_rule() - Update max bandwidth value for given regrules. + * @num_reg_rules: Number of regulatory rules. + * @reg_rule_start: Pointer to regulatory rules. + * @max_bw: Maximum bandwidth + */ +static void reg_update_max_bw_per_rule(uint32_t num_reg_rules, + struct cur_reg_rule *reg_rule_start, + uint16_t max_bw) +{ + uint32_t count; + + for (count = 0; count < num_reg_rules; count++) + reg_rule_start[count].max_bw = + min(reg_rule_start[count].max_bw, max_bw); +} + +/** + * reg_do_auto_bw_correction() - Calculate and update the maximum bandwidth + * value. + * @num_reg_rules: Number of regulatory rules. + * @reg_rule_ptr: Pointer to regulatory rules. + * @max_bw: Maximum bandwidth + */ +static void reg_do_auto_bw_correction(uint32_t num_reg_rules, + struct cur_reg_rule *reg_rule_ptr, + uint16_t max_bw) +{ + uint32_t count; + uint16_t new_bw; + + for (count = 0; count < num_reg_rules - 1; count++) { + if (reg_rule_ptr[count].end_freq == + reg_rule_ptr[count + 1].start_freq) { + new_bw = QDF_MIN(max_bw, reg_rule_ptr[count].max_bw + + reg_rule_ptr[count + 1].max_bw); + reg_rule_ptr[count].max_bw = new_bw; + reg_rule_ptr[count + 1].max_bw = new_bw; + } + } +} + +/** + * reg_modify_chan_list_for_dfs_channels() - disable the DFS channels if + * dfs_enable set to false. + * @chan_list: Pointer to regulatory channel list. + * @dfs_enabled: if false, then disable the DFS channels. + */ +static void reg_modify_chan_list_for_dfs_channels( + struct regulatory_channel *chan_list, bool dfs_enabled) +{ + enum channel_enum chan_enum; + + if (dfs_enabled) + return; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].state == CHANNEL_STATE_DFS) { + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } +} + +/** + * reg_modify_chan_list_for_indoor_channels() - Disable the indoor channels if + * indoor_chan_enabled flag is set to false. + * @pdev_priv_obj: Pointer to regulatory private pdev structure. + */ +static void reg_modify_chan_list_for_indoor_channels( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + enum channel_enum chan_enum; + struct regulatory_channel *chan_list = pdev_priv_obj->cur_chan_list; + + if (!pdev_priv_obj->indoor_chan_enabled) { + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (REGULATORY_CHAN_INDOOR_ONLY & + chan_list[chan_enum].chan_flags) { + chan_list[chan_enum].state = + CHANNEL_STATE_DFS; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_NO_IR; + } + } + } + + if (pdev_priv_obj->force_ssc_disable_indoor_channel && + pdev_priv_obj->sap_state) { + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (REGULATORY_CHAN_INDOOR_ONLY & + chan_list[chan_enum].chan_flags) { + chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } + } +} + +#ifdef CONFIG_BAND_6GHZ +static void reg_modify_chan_list_for_band_6G( + struct regulatory_channel *chan_list) +{ + enum channel_enum chan_enum; + + reg_debug("disabling 6G"); + for (chan_enum = MIN_6GHZ_CHANNEL; + chan_enum <= MAX_6GHZ_CHANNEL; chan_enum++) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } +} +#else +static inline void reg_modify_chan_list_for_band_6G( + struct regulatory_channel *chan_list) +{ +} +#endif + +/** + * reg_modify_chan_list_for_band() - Based on the input band bitmap, either + * disable 2GHz, 5GHz, or 6GHz channels. + * @chan_list: Pointer to regulatory channel list. + * @band_bitmap: Input bitmap of reg_wifi_band values. + */ +static void reg_modify_chan_list_for_band(struct regulatory_channel *chan_list, + uint32_t band_bitmap) +{ + enum channel_enum chan_enum; + + if (!band_bitmap) + return; + + if (!(band_bitmap & BIT(REG_BAND_5G))) { + reg_debug("disabling 5G"); + for (chan_enum = MIN_5GHZ_CHANNEL; + chan_enum <= MAX_5GHZ_CHANNEL; chan_enum++) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } + } + + if (!(band_bitmap & BIT(REG_BAND_2G))) { + reg_debug("disabling 2G"); + for (chan_enum = MIN_24GHZ_CHANNEL; + chan_enum <= MAX_24GHZ_CHANNEL; chan_enum++) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } + } + + if (!(band_bitmap & BIT(REG_BAND_6G))) + reg_modify_chan_list_for_band_6G(chan_list); + +} + +/** + * reg_modify_chan_list_for_fcc_channel() - Set maximum FCC txpower for channel + * 12 and 13 if set_fcc_channel flag is set to true. + * @chan_list: Pointer to regulatory channel list. + * @set_fcc_channel: If this flag is set to true, then set the max FCC txpower + * for channel 12 and 13. + */ +static void reg_modify_chan_list_for_fcc_channel( + struct regulatory_channel *chan_list, bool set_fcc_channel) +{ + enum channel_enum chan_enum; + + if (!set_fcc_channel) + return; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].center_freq == CHAN_12_CENT_FREQ) + chan_list[chan_enum].tx_power = MAX_PWR_FCC_CHAN_12; + + if (chan_list[chan_enum].center_freq == CHAN_13_CENT_FREQ) + chan_list[chan_enum].tx_power = MAX_PWR_FCC_CHAN_13; + } +} + +/** + * reg_modify_chan_list_for_chan_144() - Disable channel 144 if en_chan_144 flag + * is set to false. + * @chan_list: Pointer to regulatory channel list. + * @en_chan_144: if false, then disable channel 144. + */ +static void reg_modify_chan_list_for_chan_144( + struct regulatory_channel *chan_list, bool en_chan_144) +{ + enum channel_enum chan_enum; + + if (en_chan_144) + return; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].center_freq == CHAN_144_CENT_FREQ) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } + } +} + +/** + * reg_modify_chan_list_for_nol_list() - Disable the channel if nol_chan flag is + * set. + * @chan_list: Pointer to regulatory channel list. + */ +static void reg_modify_chan_list_for_nol_list( + struct regulatory_channel *chan_list) +{ + enum channel_enum chan_enum; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].nol_chan) { + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } +} + +/** + * reg_find_low_limit_chan_enum() - Find low limit 2G and 5G channel enums. + * @chan_list: Pointer to regulatory channel list. + * @low_freq: low limit frequency. + * @low_limit: pointer to output low limit enum. + * + * Return: None + */ +static void reg_find_low_limit_chan_enum( + struct regulatory_channel *chan_list, qdf_freq_t low_freq, + uint32_t *low_limit) +{ + enum channel_enum chan_enum; + uint16_t min_bw; + uint16_t max_bw; + qdf_freq_t center_freq; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + min_bw = chan_list[chan_enum].min_bw; + max_bw = chan_list[chan_enum].max_bw; + center_freq = chan_list[chan_enum].center_freq; + + if ((center_freq - min_bw / 2) >= low_freq) { + if ((center_freq - max_bw / 2) < low_freq) { + if (max_bw <= 20) + max_bw = ((center_freq - low_freq) * 2); + if (max_bw < min_bw) + max_bw = min_bw; + chan_list[chan_enum].max_bw = max_bw; + } + *low_limit = chan_enum; + break; + } + } +} + +/** + * reg_find_high_limit_chan_enum() - Find high limit 2G and 5G channel enums. + * @chan_list: Pointer to regulatory channel list. + * @high_freq: high limit frequency. + * @high_limit: pointer to output high limit enum. + * + * Return: None + */ +static void reg_find_high_limit_chan_enum( + struct regulatory_channel *chan_list, qdf_freq_t high_freq, + uint32_t *high_limit) +{ + enum channel_enum chan_enum; + uint16_t min_bw; + uint16_t max_bw; + qdf_freq_t center_freq; + + for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) { + min_bw = chan_list[chan_enum].min_bw; + max_bw = chan_list[chan_enum].max_bw; + center_freq = chan_list[chan_enum].center_freq; + + if (center_freq + min_bw / 2 <= high_freq) { + if ((center_freq + max_bw / 2) > high_freq) { + if (max_bw <= 20) + max_bw = ((high_freq - + center_freq) * 2); + if (max_bw < min_bw) + max_bw = min_bw; + chan_list[chan_enum].max_bw = max_bw; + } + *high_limit = chan_enum; + break; + } + + if (chan_enum == 0) + break; + } +} + +#ifdef REG_DISABLE_JP_CH144 +/** + * reg_modify_chan_list_for_japan() - Disable channel 144 for MKK17_MKKC + * regdomain by default. + * @pdev: Pointer to pdev + * + * Return: None + */ +static void +reg_modify_chan_list_for_japan(struct wlan_objmgr_pdev *pdev) +{ +#define MKK17_MKKC 0xE1 + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return; + } + + if (pdev_priv_obj->reg_dmn_pair == MKK17_MKKC) + pdev_priv_obj->en_chan_144 = false; + +#undef MKK17_MKKC +} +#else +static inline void +reg_modify_chan_list_for_japan(struct wlan_objmgr_pdev *pdev) +{ +} +#endif +/** + * reg_modify_chan_list_for_freq_range() - Modify channel list for the given low + * and high frequency range. + * @chan_list: Pointer to regulatory channel list. + * @low_freq_2g: Low frequency 2G. + * @high_freq_2g: High frequency 2G. + * @low_freq_5g: Low frequency 5G. + * @high_freq_5g: High frequency 5G. + * + * Return: None + */ +static void +reg_modify_chan_list_for_freq_range(struct regulatory_channel *chan_list, + qdf_freq_t low_freq_2g, + qdf_freq_t high_freq_2g, + qdf_freq_t low_freq_5g, + qdf_freq_t high_freq_5g) +{ + uint32_t low_limit_2g = NUM_CHANNELS; + uint32_t high_limit_2g = NUM_CHANNELS; + uint32_t low_limit_5g = NUM_CHANNELS; + uint32_t high_limit_5g = NUM_CHANNELS; + enum channel_enum chan_enum; + bool chan_in_range; + + reg_find_low_limit_chan_enum(chan_list, low_freq_2g, &low_limit_2g); + reg_find_low_limit_chan_enum(chan_list, low_freq_5g, &low_limit_5g); + reg_find_high_limit_chan_enum(chan_list, high_freq_2g, &high_limit_2g); + reg_find_high_limit_chan_enum(chan_list, high_freq_5g, &high_limit_5g); + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + chan_in_range = false; + if ((low_limit_2g <= chan_enum) && + (high_limit_2g >= chan_enum) && + (low_limit_2g != NUM_CHANNELS) && + (high_limit_2g != NUM_CHANNELS)) + chan_in_range = true; + + if ((low_limit_5g <= chan_enum) && + (high_limit_5g >= chan_enum) && + (low_limit_5g != NUM_CHANNELS) && + (high_limit_5g != NUM_CHANNELS)) + chan_in_range = true; + + if (!chan_in_range) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } + } +} + +void reg_init_pdev_mas_chan_list( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj, + struct mas_chan_params *mas_chan_params) +{ + qdf_mem_copy(pdev_priv_obj->mas_chan_list, + mas_chan_params->mas_chan_list, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + pdev_priv_obj->dfs_region = mas_chan_params->dfs_region; + + pdev_priv_obj->phybitmap = mas_chan_params->phybitmap; + + pdev_priv_obj->reg_dmn_pair = mas_chan_params->reg_dmn_pair; + pdev_priv_obj->ctry_code = mas_chan_params->ctry_code; + + pdev_priv_obj->def_region_domain = mas_chan_params->reg_dmn_pair; + pdev_priv_obj->def_country_code = mas_chan_params->ctry_code; + + qdf_mem_copy(pdev_priv_obj->default_country, + mas_chan_params->default_country, REG_ALPHA2_LEN + 1); + + qdf_mem_copy(pdev_priv_obj->current_country, + mas_chan_params->current_country, REG_ALPHA2_LEN + 1); +} + +/** + * reg_modify_chan_list_for_cached_channels() - If num_cache_channels are + * non-zero, then disable the pdev channels which is given in + * cache_disable_chan_list. + * @pdev_priv_obj: Pointer to regulatory pdev private object. + */ +#ifdef DISABLE_CHANNEL_LIST +static void reg_modify_chan_list_for_cached_channels( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + uint32_t i, j; + uint32_t num_cache_channels = pdev_priv_obj->num_cache_channels; + struct regulatory_channel *chan_list = pdev_priv_obj->cur_chan_list; + struct regulatory_channel *cache_chan_list = + pdev_priv_obj->cache_disable_chan_list; + + if (!num_cache_channels) + return; + + if (pdev_priv_obj->disable_cached_channels) { + for (i = 0; i < num_cache_channels; i++) + for (j = 0; j < NUM_CHANNELS; j++) + if (cache_chan_list[i].chan_num == + chan_list[j].chan_num) { + chan_list[j].state = + CHANNEL_STATE_DISABLE; + chan_list[j].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } +} +#else +static void reg_modify_chan_list_for_cached_channels( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ +} +#endif + +#ifdef CONFIG_REG_CLIENT +/** + * reg_modify_chan_list_for_srd_channels() - Modify SRD channels in ETSI13 + * @pdev: Pointer to pdev object + * @chan_list: Current channel list + * + * This function converts SRD channels to passive in ETSI13 regulatory domain + * when enable_srd_chan_in_master_mode is not set. + */ +static void +reg_modify_chan_list_for_srd_channels(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + enum channel_enum chan_enum; + + if (!reg_is_etsi13_regdmn(pdev)) + return; + + if (reg_is_etsi13_srd_chan_allowed_master_mode(pdev)) + return; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].chan_flags & REGULATORY_CHAN_DISABLED) + continue; + + if (reg_is_etsi13_srd_chan(pdev, + chan_list[chan_enum].chan_num)) { + chan_list[chan_enum].state = + CHANNEL_STATE_DFS; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_NO_IR; + } + } +} +#else +static inline void +reg_modify_chan_list_for_srd_channels(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ +} +#endif + +#ifdef DISABLE_UNII_SHARED_BANDS +/** + * reg_is_reg_unii_band_1_set() - Check UNII bitmap + * @unii_bitmap: 5G UNII band bitmap + * + * This function checks the input bitmap to disable UNII-1 band channels. + * + * Return: Return true if UNII-1 channels need to be disabled, + * else return false. + */ +static bool reg_is_reg_unii_band_1_set(uint8_t unii_bitmap) +{ + return !!(unii_bitmap & BIT(REG_UNII_BAND_1)); +} + +/** + * reg_is_reg_unii_band_2a_set() - Check UNII bitmap + * @unii_bitmap: 5G UNII band bitmap + * + * This function checks the input bitmap to disable UNII-2A band channels. + * + * Return: Return true if UNII-2A channels need to be disabled, + * else return false. + */ +static bool reg_is_reg_unii_band_2a_set(uint8_t unii_bitmap) +{ + return !!(unii_bitmap & BIT(REG_UNII_BAND_2A)); +} + +/** + * reg_is_5g_enum() - Check if channel enum is a 5G channel enum + * @chan_enum: channel enum + * + * Return: Return true if the input channel enum is 5G, else return false. + */ +static bool reg_is_5g_enum(enum channel_enum chan_enum) +{ + return (chan_enum >= MIN_5GHZ_CHANNEL && chan_enum <= MAX_5GHZ_CHANNEL); +} + +/** + * reg_remove_unii_chan_from_chan_list() - Remove UNII band channels + * @chan_list: Pointer to current channel list + * @start_enum: starting enum value + * @end_enum: ending enum value + * + * Remove channels in a unii band based in on the input start_enum and end_enum. + * Disable the state and flags. Set disable_coex flag to true. + * + * return: void. + */ +static void +reg_remove_unii_chan_from_chan_list(struct regulatory_channel *chan_list, + enum channel_enum start_enum, + enum channel_enum end_enum) +{ + enum channel_enum chan_enum; + + if (!(reg_is_5g_enum(start_enum) && reg_is_5g_enum(end_enum))) { + reg_err_rl("start_enum or end_enum is invalid"); + return; + } + + for (chan_enum = start_enum; chan_enum <= end_enum; chan_enum++) { + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= REGULATORY_CHAN_DISABLED; + } +} + +/** + * reg_modify_disable_chan_list_for_unii1_and_unii2a() - Disable UNII-1 and + * UNII2A band + * @pdev_priv_obj: Pointer to pdev private object + * + * This function disables the UNII-1 and UNII-2A band channels + * based on input unii_5g_bitmap. + * + * Return: void. + */ +static void +reg_modify_disable_chan_list_for_unii1_and_unii2a( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + uint8_t unii_bitmap = pdev_priv_obj->unii_5g_bitmap; + struct regulatory_channel *chan_list = pdev_priv_obj->cur_chan_list; + + if (reg_is_reg_unii_band_1_set(unii_bitmap)) { + reg_remove_unii_chan_from_chan_list(chan_list, + MIN_UNII_1_BAND_CHANNEL, + MAX_UNII_1_BAND_CHANNEL); + } + + if (reg_is_reg_unii_band_2a_set(unii_bitmap)) { + reg_remove_unii_chan_from_chan_list(chan_list, + MIN_UNII_2A_BAND_CHANNEL, + MAX_UNII_2A_BAND_CHANNEL); + } +} +#else +static inline bool reg_is_reg_unii_band_1_set(uint8_t unii_bitmap) +{ + return false; +} + +static inline bool reg_is_reg_unii_band_2a_set(uint8_t unii_bitmap) +{ + return false; +} + +static inline bool reg_is_5g_enum(enum channel_enum chan_enum) +{ + return false; +} + +static inline void +reg_remove_unii_chan_from_chan_list(struct regulatory_channel *chan_list, + enum channel_enum start_enum, + enum channel_enum end_enum) +{ +} + +static inline void +reg_modify_disable_chan_list_for_unii1_and_unii2a( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ +} +#endif + +void reg_compute_pdev_current_chan_list(struct wlan_regulatory_pdev_priv_obj + *pdev_priv_obj) +{ + qdf_mem_copy(pdev_priv_obj->cur_chan_list, pdev_priv_obj->mas_chan_list, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + reg_modify_chan_list_for_freq_range(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->range_2g_low, + pdev_priv_obj->range_2g_high, + pdev_priv_obj->range_5g_low, + pdev_priv_obj->range_5g_high); + + reg_modify_chan_list_for_band(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->band_capability); + + reg_modify_disable_chan_list_for_unii1_and_unii2a(pdev_priv_obj); + + reg_modify_chan_list_for_dfs_channels(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->dfs_enabled); + + reg_modify_chan_list_for_nol_list(pdev_priv_obj->cur_chan_list); + + reg_modify_chan_list_for_indoor_channels(pdev_priv_obj); + + reg_modify_chan_list_for_fcc_channel(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->set_fcc_channel); + + reg_modify_chan_list_for_chan_144(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->en_chan_144); + + reg_modify_chan_list_for_cached_channels(pdev_priv_obj); + + reg_modify_chan_list_for_srd_channels(pdev_priv_obj->pdev_ptr, + pdev_priv_obj->cur_chan_list); +} + +void reg_reset_reg_rules(struct reg_rule_info *reg_rules) +{ + qdf_mem_zero(reg_rules, sizeof(*reg_rules)); +} + +void reg_save_reg_rules_to_pdev( + struct reg_rule_info *psoc_reg_rules, + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + uint32_t reg_rule_len; + struct reg_rule_info *pdev_reg_rules; + + qdf_spin_lock_bh(&pdev_priv_obj->reg_rules_lock); + + pdev_reg_rules = &pdev_priv_obj->reg_rules; + reg_reset_reg_rules(pdev_reg_rules); + + pdev_reg_rules->num_of_reg_rules = psoc_reg_rules->num_of_reg_rules; + if (!pdev_reg_rules->num_of_reg_rules) { + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); + reg_err("no reg rules in psoc"); + return; + } + + reg_rule_len = pdev_reg_rules->num_of_reg_rules * + sizeof(struct cur_reg_rule); + qdf_mem_copy(pdev_reg_rules->reg_rules, psoc_reg_rules->reg_rules, + reg_rule_len); + + qdf_mem_copy(pdev_reg_rules->alpha2, pdev_priv_obj->current_country, + REG_ALPHA2_LEN + 1); + pdev_reg_rules->dfs_region = pdev_priv_obj->dfs_region; + + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); +} + +void reg_propagate_mas_chan_list_to_pdev(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + enum direction *dir = arg; + uint32_t pdev_id; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + struct reg_rule_info *psoc_reg_rules; + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + reg_init_pdev_mas_chan_list( + pdev_priv_obj, + &psoc_priv_obj->mas_chan_params[pdev_id]); + psoc_reg_rules = &psoc_priv_obj->mas_chan_params[pdev_id].reg_rules; + reg_save_reg_rules_to_pdev(psoc_reg_rules, pdev_priv_obj); + reg_modify_chan_list_for_japan(pdev); + pdev_priv_obj->chan_list_recvd = + psoc_priv_obj->chan_list_recvd[pdev_id]; + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + if (reg_tx_ops->fill_umac_legacy_chanlist) { + reg_tx_ops->fill_umac_legacy_chanlist( + pdev, pdev_priv_obj->cur_chan_list); + } else { + if (*dir == NORTHBOUND) + reg_send_scheduler_msg_nb(psoc, pdev); + else + reg_send_scheduler_msg_sb(psoc, pdev); + } +} + +/** + * reg_populate_6g_band_channels() - For all the valid 6GHz regdb channels + * in the master channel list, find the regulatory rules and call + * reg_fill_channel_info() to populate master channel list with txpower, + * antennagain, BW info, etc. + * @reg_rule_5g: Pointer to regulatory rule. + * @num_5g_reg_rules: Number of regulatory rules. + * @min_bw_5g: Minimum regulatory bandwidth. + * @mas_chan_list: Pointer to the master channel list. + */ +#ifdef CONFIG_BAND_6GHZ +static void +reg_populate_6g_band_channels(struct cur_reg_rule *reg_rule_5g, + uint32_t num_5g_reg_rules, + uint16_t min_bw_5g, + struct regulatory_channel *mas_chan_list) +{ + reg_populate_band_channels(MIN_6GHZ_CHANNEL, + MAX_6GHZ_CHANNEL, + reg_rule_5g, + num_5g_reg_rules, + min_bw_5g, + mas_chan_list); +} +#else +static void +reg_populate_6g_band_channels(struct cur_reg_rule *reg_rule_5g, + uint32_t num_5g_reg_rules, + uint16_t min_bw_5g, + struct regulatory_channel *mas_chan_list) +{ +} +#endif /* CONFIG_BAND_6GHZ */ + +#ifdef CONFIG_REG_CLIENT +/** + * reg_send_ctl_info() - Send CTL info to firmware when regdb is not offloaded + * @soc_reg: soc private object for regulatory + * @regulatory_info: regulatory info + * @tx_ops: send operations for regulatory component + * + * Return: QDF_STATUS + */ +static QDF_STATUS +reg_send_ctl_info(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulatory_info, + struct wlan_lmac_if_reg_tx_ops *tx_ops) +{ + struct wlan_objmgr_psoc *psoc = regulatory_info->psoc; + struct reg_ctl_params params = {0}; + QDF_STATUS status; + uint16_t regd_index; + uint32_t index_2g, index_5g; + + if (soc_reg->offload_enabled) + return QDF_STATUS_SUCCESS; + + if (!tx_ops || !tx_ops->send_ctl_info) { + reg_err("No regulatory tx_ops"); + return QDF_STATUS_E_FAULT; + } + + status = reg_get_rdpair_from_regdmn_id(regulatory_info->reg_dmn_pair, + ®d_index); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("Failed to get regdomain index for regdomain pair: %x", + regulatory_info->reg_dmn_pair); + return status; + } + + index_2g = g_reg_dmn_pairs[regd_index].dmn_id_2g; + index_5g = g_reg_dmn_pairs[regd_index].dmn_id_5g; + params.ctl_2g = regdomains_2g[index_2g].ctl_val; + params.ctl_5g = regdomains_5g[index_5g].ctl_val; + params.regd_2g = reg_2g_sub_dmn_code[index_2g]; + params.regd_5g = reg_5g_sub_dmn_code[index_5g]; + + if (reg_is_world_ctry_code(regulatory_info->reg_dmn_pair)) + params.regd = regulatory_info->reg_dmn_pair; + else + params.regd = regulatory_info->ctry_code | COUNTRY_ERD_FLAG; + + reg_debug("regdomain pair = %u, regdomain index = %u", + regulatory_info->reg_dmn_pair, regd_index); + reg_debug("index_2g = %u, index_5g = %u, ctl_2g = %x, ctl_5g = %x", + index_2g, index_5g, params.ctl_2g, params.ctl_5g); + reg_debug("regd_2g = %x, regd_5g = %x, regd = %x", + params.regd_2g, params.regd_5g, params.regd); + + status = tx_ops->send_ctl_info(psoc, ¶ms); + if (QDF_IS_STATUS_ERROR(status)) + reg_err("Failed to send CTL info to firmware"); + + return status; +} +#else +static QDF_STATUS +reg_send_ctl_info(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulatory_info, + struct wlan_lmac_if_reg_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS reg_process_master_chan_list( + struct cur_regulatory_info *regulat_info) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + uint32_t num_2g_reg_rules, num_5g_reg_rules; + struct cur_reg_rule *reg_rule_2g, *reg_rule_5g; + uint16_t min_bw_2g, max_bw_2g, min_bw_5g, max_bw_5g; + struct regulatory_channel *mas_chan_list; + struct wlan_objmgr_psoc *psoc; + enum channel_enum chan_enum; + wlan_objmgr_ref_dbgid dbg_id; + enum direction dir; + uint8_t phy_id; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct reg_rule_info *reg_rules; + QDF_STATUS status; + + psoc = regulat_info->psoc; + soc_reg = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + tx_ops = reg_get_psoc_tx_ops(psoc); + phy_id = regulat_info->phy_id; + + if (reg_ignore_default_country(soc_reg, regulat_info)) { + status = reg_set_curr_country(soc_reg, regulat_info, tx_ops); + if (QDF_IS_STATUS_SUCCESS(status)) { + reg_debug("WLAN restart - Ignore default CC for phy_id: %u", + phy_id); + return QDF_STATUS_SUCCESS; + } + } + + reg_debug("process reg master chan list"); + + if (soc_reg->offload_enabled) { + dbg_id = WLAN_REGULATORY_NB_ID; + dir = NORTHBOUND; + } else { + dbg_id = WLAN_REGULATORY_SB_ID; + dir = SOUTHBOUND; + } + + if (regulat_info->status_code != REG_SET_CC_STATUS_PASS) { + reg_err("Set country code failed, status code %d", + regulat_info->status_code); + + pdev = wlan_objmgr_get_pdev_by_id(psoc, phy_id, dbg_id); + if (!pdev) { + reg_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (tx_ops->set_country_failed) + tx_ops->set_country_failed(pdev); + + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + if (regulat_info->status_code != REG_CURRENT_ALPHA2_NOT_FOUND) + return QDF_STATUS_E_FAILURE; + + soc_reg->new_user_ctry_pending[phy_id] = false; + soc_reg->new_11d_ctry_pending[phy_id] = false; + soc_reg->world_country_pending[phy_id] = true; + } + + mas_chan_list = soc_reg->mas_chan_params[phy_id].mas_chan_list; + + reg_init_channel_map(regulat_info->dfs_region); + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; + chan_enum++) { + mas_chan_list[chan_enum].chan_num = + channel_map[chan_enum].chan_num; + mas_chan_list[chan_enum].center_freq = + channel_map[chan_enum].center_freq; + mas_chan_list[chan_enum].chan_flags = + REGULATORY_CHAN_DISABLED; + mas_chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + if (!soc_reg->retain_nol_across_regdmn_update) + mas_chan_list[chan_enum].nol_chan = false; + } + + soc_reg->num_phy = regulat_info->num_phy; + soc_reg->mas_chan_params[phy_id].phybitmap = + regulat_info->phybitmap; + soc_reg->mas_chan_params[phy_id].dfs_region = + regulat_info->dfs_region; + soc_reg->mas_chan_params[phy_id].ctry_code = + regulat_info->ctry_code; + soc_reg->mas_chan_params[phy_id].reg_dmn_pair = + regulat_info->reg_dmn_pair; + qdf_mem_copy(soc_reg->mas_chan_params[phy_id].current_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + qdf_mem_copy(soc_reg->cur_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + reg_debug("set cur_country %.2s", soc_reg->cur_country); + + min_bw_2g = regulat_info->min_bw_2g; + max_bw_2g = regulat_info->max_bw_2g; + reg_rule_2g = regulat_info->reg_rules_2g_ptr; + num_2g_reg_rules = regulat_info->num_2g_reg_rules; + reg_update_max_bw_per_rule(num_2g_reg_rules, + reg_rule_2g, max_bw_2g); + + min_bw_5g = regulat_info->min_bw_5g; + max_bw_5g = regulat_info->max_bw_5g; + reg_rule_5g = regulat_info->reg_rules_5g_ptr; + num_5g_reg_rules = regulat_info->num_5g_reg_rules; + reg_update_max_bw_per_rule(num_5g_reg_rules, + reg_rule_5g, max_bw_5g); + + reg_rules = &soc_reg->mas_chan_params[phy_id].reg_rules; + reg_reset_reg_rules(reg_rules); + + reg_rules->num_of_reg_rules = num_5g_reg_rules + num_2g_reg_rules; + if (reg_rules->num_of_reg_rules > MAX_REG_RULES) { + reg_err("number of reg rules exceeds limit"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_rules->num_of_reg_rules) { + if (num_2g_reg_rules) + qdf_mem_copy(reg_rules->reg_rules, + reg_rule_2g, num_2g_reg_rules * + sizeof(struct cur_reg_rule)); + if (num_5g_reg_rules) + qdf_mem_copy(reg_rules->reg_rules + + num_2g_reg_rules, reg_rule_5g, + num_5g_reg_rules * + sizeof(struct cur_reg_rule)); + } + + if (num_5g_reg_rules != 0) + reg_do_auto_bw_correction(num_5g_reg_rules, + reg_rule_5g, max_bw_5g); + + if (num_2g_reg_rules != 0) + reg_populate_band_channels(MIN_24GHZ_CHANNEL, MAX_24GHZ_CHANNEL, + reg_rule_2g, num_2g_reg_rules, + min_bw_2g, mas_chan_list); + + if (num_5g_reg_rules != 0) { + reg_populate_band_channels(MIN_5GHZ_CHANNEL, MAX_5GHZ_CHANNEL, + reg_rule_5g, num_5g_reg_rules, + min_bw_5g, mas_chan_list); + reg_populate_band_channels(MIN_49GHZ_CHANNEL, + MAX_49GHZ_CHANNEL, + reg_rule_5g, num_5g_reg_rules, + min_bw_5g, mas_chan_list); + reg_populate_6g_band_channels(reg_rule_5g, + num_5g_reg_rules, + min_bw_5g, + mas_chan_list); + } + + soc_reg->chan_list_recvd[phy_id] = true; + status = reg_send_ctl_info(soc_reg, regulat_info, tx_ops); + if (!QDF_IS_STATUS_SUCCESS(status)) + return status; + + if (soc_reg->new_user_ctry_pending[phy_id]) { + soc_reg->new_user_ctry_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_USERSPACE; + soc_reg->user_ctry_set = true; + reg_debug("new user country is set"); + reg_run_11d_state_machine(psoc); + } else if (soc_reg->new_init_ctry_pending[phy_id]) { + soc_reg->new_init_ctry_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_USERSPACE; + reg_debug("new init country is set"); + } else if (soc_reg->new_11d_ctry_pending[phy_id]) { + soc_reg->new_11d_ctry_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_11D; + soc_reg->user_ctry_set = false; + reg_run_11d_state_machine(psoc); + } else if (soc_reg->world_country_pending[phy_id]) { + soc_reg->world_country_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_CORE; + soc_reg->user_ctry_set = false; + reg_run_11d_state_machine(psoc); + } else { + if (soc_reg->cc_src == SOURCE_UNKNOWN && + soc_reg->num_phy == phy_id + 1) + soc_reg->cc_src = SOURCE_DRIVER; + + qdf_mem_copy(soc_reg->mas_chan_params[phy_id].default_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + + soc_reg->mas_chan_params[phy_id].def_country_code = + regulat_info->ctry_code; + soc_reg->mas_chan_params[phy_id].def_region_domain = + regulat_info->reg_dmn_pair; + + if (soc_reg->cc_src == SOURCE_DRIVER) { + qdf_mem_copy(soc_reg->def_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + + soc_reg->def_country_code = regulat_info->ctry_code; + soc_reg->def_region_domain = + regulat_info->reg_dmn_pair; + + if (reg_is_world_alpha2(regulat_info->alpha2)) { + soc_reg->cc_src = SOURCE_CORE; + reg_run_11d_state_machine(psoc); + } + } + } + + pdev = wlan_objmgr_get_pdev_by_id(psoc, phy_id, dbg_id); + if (pdev) { + reg_propagate_mas_chan_list_to_pdev(psoc, pdev, &dir); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(chan_list, pdev_priv_obj->cur_chan_list, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_build_chan_list.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_build_chan_list.h new file mode 100644 index 0000000000000000000000000000000000000000..36fcf13f6e1d183b886a711c76f6034149a458fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_build_chan_list.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_build_chan_list.h + * This file provides prototypes of the regulatory component to build master + * and current channel list. + */ + +#ifndef __REG_BUILD_CHAN_LIST_H__ +#define __REG_BUILD_CHAN_LIST_H__ + +#define CHAN_12_CENT_FREQ 2467 +#define CHAN_13_CENT_FREQ 2472 + +/** + * reg_reset_reg_rules() - provides the reg domain rules info + * @reg_rules: reg rules pointer + * + * Return: None + */ +void reg_reset_reg_rules(struct reg_rule_info *reg_rules); + +/** + * reg_init_pdev_mas_chan_list() - Initialize pdev master channel list + * @pdev_priv_obj: Pointer to regdb pdev private object. + * @mas_chan_params: Master channel params. + */ +void reg_init_pdev_mas_chan_list( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj, + struct mas_chan_params *mas_chan_params); + +/** + * reg_save_reg_rules_to_pdev() - Save psoc reg-rules to pdev. + * @pdev_priv_obj: Pointer to regdb pdev private object. + */ +void reg_save_reg_rules_to_pdev( + struct reg_rule_info *psoc_reg_rules, + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj); + +/** + * reg_compute_pdev_current_chan_list() - Compute pdev current channel list. + * @pdev_priv_obj: Pointer to regdb pdev private object. + */ +void reg_compute_pdev_current_chan_list( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj); + +/** + * reg_propagate_mas_chan_list_to_pdev() - Propagate master channel list to pdev + * @psoc: Pointer to psoc object. + * @object: Void pointer to pdev object. + * @arg: Pointer to direction. + */ +void reg_propagate_mas_chan_list_to_pdev(struct wlan_objmgr_psoc *psoc, + void *object, void *arg); + +/** + * reg_process_master_chan_list() - Compute master channel list based on the + * regulatory rules. + * @reg_info: Pointer to regulatory info + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info *reg_info); + +QDF_STATUS reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list); + +/** + * reg_update_nol_history_ch() - Set nol-history flag for the channels in the + * list. + * + * @pdev: Pdev ptr. + * @ch_list: Input channel list. + * @num_ch: Number of channels. + * @nol_history_ch: NOL-History flag. + * + * Return: void + */ +void reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, + uint8_t num_chan, + bool nol_history_chan); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_callbacks.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_callbacks.c new file mode 100644 index 0000000000000000000000000000000000000000..ed253021d0778bbfaacc6f346c8dac017c89ca3f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_callbacks.c @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_callbacks.c + * This file defines regulatory callback functions + */ + +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include +#include "reg_callbacks.h" +#include "reg_services_common.h" +#include "reg_build_chan_list.h" + +/** + * reg_call_chan_change_cbks() - Call registered callback functions on channel + * change. + * @psoc: Pointer to global psoc structure. + * @pdev: Pointer to global pdev structure. + */ +static void reg_call_chan_change_cbks(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct chan_change_cbk_entry *cbk_list; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *cur_chan_list; + uint32_t ctr; + struct avoid_freq_ind_data *avoid_freq_ind = NULL; + reg_chan_change_callback callback; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_alert("psoc reg component is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_alert("pdev reg component is NULL"); + return; + } + + cur_chan_list = qdf_mem_malloc(NUM_CHANNELS * sizeof(*cur_chan_list)); + if (!cur_chan_list) + return; + + qdf_mem_copy(cur_chan_list, + pdev_priv_obj->cur_chan_list, + NUM_CHANNELS * + sizeof(struct regulatory_channel)); + + if (psoc_priv_obj->ch_avoid_ind) { + avoid_freq_ind = qdf_mem_malloc(sizeof(*avoid_freq_ind)); + if (!avoid_freq_ind) + goto skip_ch_avoid_ind; + + qdf_mem_copy(&avoid_freq_ind->freq_list, + &psoc_priv_obj->avoid_freq_list, + sizeof(struct ch_avoid_ind_type)); + qdf_mem_copy(&avoid_freq_ind->chan_list, + &psoc_priv_obj->unsafe_chan_list, + sizeof(struct unsafe_ch_list)); + psoc_priv_obj->ch_avoid_ind = false; + } + +skip_ch_avoid_ind: + cbk_list = psoc_priv_obj->cbk_list; + + for (ctr = 0; ctr < REG_MAX_CHAN_CHANGE_CBKS; ctr++) { + callback = NULL; + qdf_spin_lock_bh(&psoc_priv_obj->cbk_list_lock); + if (cbk_list[ctr].cbk) + callback = cbk_list[ctr].cbk; + qdf_spin_unlock_bh(&psoc_priv_obj->cbk_list_lock); + if (callback) + callback(psoc, pdev, cur_chan_list, avoid_freq_ind, + cbk_list[ctr].arg); + } + qdf_mem_free(cur_chan_list); + if (avoid_freq_ind) + qdf_mem_free(avoid_freq_ind); +} + +/** + * reg_alloc_and_fill_payload() - Alloc and fill payload structure. + * @psoc: Pointer to global psoc structure. + * @pdev: Pointer to global pdev structure. + */ +static void reg_alloc_and_fill_payload(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct reg_sched_payload **payload) +{ + *payload = qdf_mem_malloc(sizeof(**payload)); + if (*payload) { + (*payload)->psoc = psoc; + (*payload)->pdev = pdev; + } +} + +/** + * reg_chan_change_flush_cbk_sb() - Flush south bound channel change callbacks. + * @msg: Pointer to scheduler msg structure. + */ +static QDF_STATUS reg_chan_change_flush_cbk_sb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_sched_chan_change_cbks_sb() - Schedule south bound channel change + * callbacks. + * @msg: Pointer to scheduler msg structure. + */ +static QDF_STATUS reg_sched_chan_change_cbks_sb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + reg_call_chan_change_cbks(psoc, pdev); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_chan_change_flush_cbk_nb() - Flush north bound channel change callbacks. + * @msg: Pointer to scheduler msg structure. + */ +static QDF_STATUS reg_chan_change_flush_cbk_nb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_sched_chan_change_cbks_nb() - Schedule north bound channel change + * callbacks. + * @msg: Pointer to scheduler msg structure. + */ +static QDF_STATUS reg_sched_chan_change_cbks_nb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + reg_call_chan_change_cbks(psoc, pdev); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_send_scheduler_msg_sb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct scheduler_msg msg = {0}; + struct reg_sched_payload *payload; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_alert("pdev reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!pdev_priv_obj->pdev_opened) { + reg_err("hlos not initialized"); + return QDF_STATUS_E_FAILURE; + } + + if (!pdev_priv_obj->chan_list_recvd) { + reg_err("Empty channel list"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + status = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + reg_err("error taking pdev ref cnt"); + return status; + } + + reg_alloc_and_fill_payload(psoc, pdev, &payload); + if (!payload) { + reg_err("malloc failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + return QDF_STATUS_E_NOMEM; + } + + msg.bodyptr = payload; + msg.callback = reg_sched_chan_change_cbks_sb; + msg.flush_callback = reg_chan_change_flush_cbk_sb; + + status = scheduler_post_message(QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + qdf_mem_free(payload); + } + + return status; +} + +QDF_STATUS reg_send_scheduler_msg_nb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct scheduler_msg msg = {0}; + struct reg_sched_payload *payload; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_alert("pdev reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!pdev_priv_obj->pdev_opened) { + reg_err("hlos not initialized"); + return QDF_STATUS_E_FAILURE; + } + + if (!pdev_priv_obj->chan_list_recvd) { + reg_err("Empty channel list"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + status = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_REGULATORY_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + reg_err("error taking pdev ref cnt"); + return status; + } + + reg_alloc_and_fill_payload(psoc, pdev, &payload); + if (!payload) { + reg_err("malloc failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + return QDF_STATUS_E_NOMEM; + } + msg.bodyptr = payload; + msg.callback = reg_sched_chan_change_cbks_nb; + msg.flush_callback = reg_chan_change_flush_cbk_nb; + + status = scheduler_post_message(QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + qdf_mem_free(payload); + } + + return status; +} + +QDF_STATUS reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("sap_state: %d", sap_state); + + if (pdev_priv_obj->sap_state == sap_state) + return QDF_STATUS_SUCCESS; + + pdev_priv_obj->sap_state = sap_state; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +void reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, void *arg) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint32_t count; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + qdf_spin_lock_bh(&psoc_priv_obj->cbk_list_lock); + for (count = 0; count < REG_MAX_CHAN_CHANGE_CBKS; count++) + if (!psoc_priv_obj->cbk_list[count].cbk) { + psoc_priv_obj->cbk_list[count].cbk = cbk; + psoc_priv_obj->cbk_list[count].arg = arg; + psoc_priv_obj->num_chan_change_cbks++; + break; + } + qdf_spin_unlock_bh(&psoc_priv_obj->cbk_list_lock); + + if (count == REG_MAX_CHAN_CHANGE_CBKS) + reg_err("callback list is full"); +} + +void reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint32_t count; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + qdf_spin_lock_bh(&psoc_priv_obj->cbk_list_lock); + for (count = 0; count < REG_MAX_CHAN_CHANGE_CBKS; count++) + if (psoc_priv_obj->cbk_list[count].cbk == cbk) { + psoc_priv_obj->cbk_list[count].cbk = NULL; + psoc_priv_obj->num_chan_change_cbks--; + break; + } + qdf_spin_unlock_bh(&psoc_priv_obj->cbk_list_lock); + + if (count == REG_MAX_CHAN_CHANGE_CBKS) + reg_err("callback not found in the list"); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_callbacks.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_callbacks.h new file mode 100644 index 0000000000000000000000000000000000000000..bf4b62789c230811daedab673beeb49bad349089 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_callbacks.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_callbks.h + * This file provides prototypes of reg callbacks. + */ + +#ifndef __REG_CALLBKS_H_ +#define __REG_CALLBKS_H_ + +#ifdef CONFIG_REG_CLIENT +/** + * reg_register_chan_change_callback() - Register channel change callbacks + * @psoc: Pointer to psoc + * @cbk: Pointer to callback function + * @arg: List of arguments + */ +void reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, void *arg); + +/** + * reg_unregister_chan_change_callback() - Unregister channel change callbacks + * @psoc: Pointer to psoc + * @cbk: Pointer to callback function + */ +void reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk); + +/** + * reg_notify_sap_event() - Notify regulatory domain for sap event + * @pdev: The physical dev to set the band for + * @sap_state: true for sap start else false + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state); + +/** + * reg_send_scheduler_msg_sb() - Start scheduler to call list of south bound + * callbacks registered whenever current chan list changes. + * @psoc: Pointer to PSOC structure. + * @pdev: Pointer to PDEV structure. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_send_scheduler_msg_sb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * reg_send_scheduler_msg_nb() - Start scheduler to call list of north bound + * @psoc: Pointer to global psoc structure. + * @pdev: Pointer to global pdev structure. + */ +QDF_STATUS reg_send_scheduler_msg_nb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); +#else +static inline void reg_register_chan_change_callback( + struct wlan_objmgr_psoc *psoc, reg_chan_change_callback cbk, + void *arg) +{ +} + +static inline void reg_unregister_chan_change_callback( + struct wlan_objmgr_psoc *psoc, reg_chan_change_callback cbk) +{ +} + +static inline QDF_STATUS reg_send_scheduler_msg_sb( + struct wlan_objmgr_psoc *psoc, struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_send_scheduler_msg_nb( + struct wlan_objmgr_psoc *psoc, struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.c new file mode 100644 index 0000000000000000000000000000000000000000..8b47d2f8fcf16d8a80c24260e8fb27547626ebed --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.c @@ -0,0 +1,1668 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db.c + * This file implements QCA regulatory database. + * Current implementation conforms to database version 31. + */ + +#include +#include +#include +#include +#include "reg_db.h" + +enum country_code { + CTRY_AFGHANISTAN = 4, + CTRY_ALAND_ISLANDS = 248, + CTRY_ALBANIA = 8, + CTRY_ALGERIA = 12, + CTRY_AMERICAN_SAMOA = 16, + CTRY_ANDORRA = 20, + CTRY_ANGUILLA = 660, + CTRY_ANTIGUA_AND_BARBUDA = 28, + CTRY_ARGENTINA = 32, + CTRY_ARMENIA = 51, + CTRY_MYANMAR = 104, + CTRY_ARUBA = 533, + CTRY_AUSTRALIA = 36, + CTRY_AUSTRIA = 40, + CTRY_AZERBAIJAN = 31, + CTRY_BAHAMAS = 44, + CTRY_BAHRAIN = 48, + CTRY_BANGLADESH = 50, + CTRY_BARBADOS = 52, + CTRY_BELARUS = 112, + CTRY_BELGIUM = 56, + CTRY_BELIZE = 84, + CTRY_BERMUDA = 60, + CTRY_BHUTAN = 64, + CTRY_BOLIVIA = 68, + CTRY_BOSNIA_HERZ = 70, + CTRY_BRAZIL = 76, + CTRY_BRUNEI_DARUSSALAM = 96, + CTRY_BULGARIA = 100, + CTRY_BURKINA_FASO = 854, + CTRY_CAMBODIA = 116, + CTRY_CAMEROON = 120, + CTRY_CANADA = 124, + CTRY_CAYMAN_ISLANDS = 136, + CTRY_CENTRAL_AFRICA_REPUBLIC = 140, + CTRY_CHAD = 148, + CTRY_CHILE = 152, + CTRY_CHINA = 156, + CTRY_CHRISTMAS_ISLAND = 162, + CTRY_COLOMBIA = 170, + CTRY_COOK_ISLANDS = 184, + CTRY_COSTA_RICA = 188, + CTRY_COTE_DIVOIRE = 384, + CTRY_CROATIA = 191, + CTRY_CYPRUS = 196, + CTRY_CZECH = 203, + CTRY_DENMARK = 208, + CTRY_DOMINICA = 212, + CTRY_DOMINICAN_REPUBLIC = 214, + CTRY_ECUADOR = 218, + CTRY_EGYPT = 818, + CTRY_EL_SALVADOR = 222, + CTRY_ESTONIA = 233, + CTRY_ETHIOPIA = 231, + CTRY_FALKLAND_ISLANDS = 238, + CTRY_FAROE_ISLANDS = 234, + CTRY_FINLAND = 246, + CTRY_FRANCE = 250, + CTRY_FRENCH_GUIANA = 254, + CTRY_FRENCH_POLYNESIA = 258, + CTRY_FRENCH_SOUTHERN_TERRITORIES = 260, + CTRY_GEORGIA = 268, + CTRY_GERMANY = 276, + CTRY_GHANA = 288, + CTRY_GIBRALTAR = 292, + CTRY_GREECE = 300, + CTRY_GREENLAND = 304, + CTRY_GRENADA = 308, + CTRY_GUADELOUPE = 312, + CTRY_GUAM = 316, + CTRY_GUATEMALA = 320, + CTRY_GUERNSEY = 831, + CTRY_GUYANA = 328, + CTRY_HAITI = 332, + CTRY_HEARD_ISLAND_AND_MCDONALD_ISLANDS = 334, + CTRY_HOLY_SEE = 336, + CTRY_HONDURAS = 340, + CTRY_HONG_KONG = 344, + CTRY_HUNGARY = 348, + CTRY_ICELAND = 352, + CTRY_INDIA = 356, + CTRY_INDONESIA = 360, + CTRY_IRAQ = 368, + CTRY_IRELAND = 372, + CTRY_ISLE_OF_MAN = 833, + CTRY_ISRAEL = 376, + CTRY_ITALY = 380, + CTRY_JAMAICA = 388, + CTRY_JAPAN = 392, + CTRY_JAPAN15 = 4015, + CTRY_JERSEY = 832, + CTRY_JORDAN = 400, + CTRY_KAZAKHSTAN = 398, + CTRY_KENYA = 404, + CTRY_KOREA_ROC = 410, + CTRY_KUWAIT = 414, + CTRY_LATVIA = 428, + CTRY_LEBANON = 422, + CTRY_LESOTHO = 426, + CTRY_LIECHTENSTEIN = 438, + CTRY_LITHUANIA = 440, + CTRY_LUXEMBOURG = 442, + CTRY_MACAU = 446, + CTRY_MACEDONIA = 807, + CTRY_MALAWI = 454, + CTRY_MALAYSIA = 458, + CTRY_MALDIVES = 462, + CTRY_MALTA = 470, + CTRY_MARSHALL_ISLANDS = 584, + CTRY_MARTINIQUE = 474, + CTRY_MAURITANIA = 478, + CTRY_MAURITIUS = 480, + CTRY_MAYOTTE = 175, + CTRY_MEXICO = 484, + CTRY_MICRONESIA = 583, + CTRY_MOLDOVA = 498, + CTRY_MONACO = 492, + CTRY_MONGOLIA = 496, + CTRY_MONTENEGRO = 499, + CTRY_MONTSERRAT = 500, + CTRY_MOROCCO = 504, + CTRY_NAMIBIA = 516, + CTRY_NEPAL = 524, + CTRY_NETHERLANDS = 528, + CTRY_NETHERLANDS_ANTILLES = 530, + CTRY_NEW_CALEDONIA = 540, + CTRY_NEW_ZEALAND = 554, + CTRY_NIGERIA = 566, + CTRY_NORTHERN_MARIANA_ISLANDS = 580, + CTRY_NICARAGUA = 558, + CTRY_NIUE = 570, + CTRY_NORFOLK_ISLAND = 574, + CTRY_NORWAY = 578, + CTRY_OMAN = 512, + CTRY_PAKISTAN = 586, + CTRY_PALAU = 585, + CTRY_PANAMA = 591, + CTRY_PAPUA_NEW_GUINEA = 598, + CTRY_PARAGUAY = 600, + CTRY_PERU = 604, + CTRY_PHILIPPINES = 608, + CTRY_POLAND = 616, + CTRY_PORTUGAL = 620, + CTRY_PUERTO_RICO = 630, + CTRY_QATAR = 634, + CTRY_REUNION = 638, + CTRY_ROMANIA = 642, + CTRY_RUSSIA = 643, + CTRY_RWANDA = 646, + CTRY_SAINT_BARTHELEMY = 652, + CTRY_SAINT_HELENA_ASCENSION_AND_TRISTAN_DA_CUNHA = 654, + CTRY_SAINT_KITTS_AND_NEVIS = 659, + CTRY_SAINT_LUCIA = 662, + CTRY_SAINT_MARTIN = 663, + CTRY_SAINT_PIERRE_AND_MIQUELON = 666, + CTRY_SAINT_VINCENT_AND_THE_GRENADIENS = 670, + CTRY_SAMOA = 882, + CTRY_SAN_MARINO = 674, + CTRY_SAO_TOME_AND_PRINCIPE = 678, + CTRY_SAUDI_ARABIA = 682, + CTRY_SENEGAL = 686, + CTRY_SERBIA = 688, + CTRY_SINGAPORE = 702, + CTRY_SINT_MAARTEN = 534, + CTRY_SLOVAKIA = 703, + CTRY_SLOVENIA = 705, + CTRY_SOUTH_AFRICA = 710, + CTRY_SPAIN = 724, + CTRY_SURINAME = 740, + CTRY_SRI_LANKA = 144, + CTRY_SVALBARD_AND_JAN_MAYEN = 744, + CTRY_SWEDEN = 752, + CTRY_SWITZERLAND = 756, + CTRY_TAIWAN = 158, + CTRY_TANZANIA = 834, + CTRY_THAILAND = 764, + CTRY_TOGO = 768, + CTRY_TRINIDAD_Y_TOBAGO = 780, + CTRY_TUNISIA = 788, + CTRY_TURKEY = 792, + CTRY_TURKS_AND_CAICOS = 796, + CTRY_UGANDA = 800, + CTRY_UKRAINE = 804, + CTRY_UAE = 784, + CTRY_UNITED_KINGDOM = 826, + CTRY_UNITED_STATES = 840, + CTRY_UNITED_STATES_MINOR_OUTLYING_ISLANDS = 581, + CTRY_URUGUAY = 858, + CTRY_UZBEKISTAN = 860, + CTRY_VANUATU = 548, + CTRY_VENEZUELA = 862, + CTRY_VIET_NAM = 704, + CTRY_VIRGIN_ISLANDS = 850, + CTRY_VIRGIN_ISLANDS_BRITISH = 92, + CTRY_WALLIS_AND_FUTUNA = 876, + CTRY_XA = 4100, /* Used by Linux Client for legacy MKK domain */ + CTRY_YEMEN = 887, + CTRY_ZIMBABWE = 716, +}; + +enum reg_domain { + NULL1_WORLD = 0x03, + + FCC1_FCCA = 0x10, + FCC1_WORLD = 0x11, + FCC2_FCCA = 0x20, + FCC2_WORLD = 0x21, + FCC2_ETSIC = 0x22, + FCC3_FCCA = 0x3A, + FCC3_WORLD = 0x3B, + FCC3_ETSIC = 0x3F, + FCC4_FCCA = 0x12, + FCC5_FCCA = 0x13, + FCC6_WORLD = 0x23, + FCC6_FCCA = 0x14, + FCC8_FCCA = 0x16, + FCC8_WORLD = 0x09, + FCC9_FCCA = 0x17, + FCC10_FCCA = 0x18, + FCC11_WORLD = 0x19, + FCC13_WORLD = 0xE4, + FCC14_FCCB = 0xE6, +#ifdef CONFIG_BAND_6GHZ + FCC15_FCCA = 0xEA, + FCC16_FCCA = 0xE8, + FCC17_FCCA = 0xE9, +#endif + ETSI1_WORLD = 0x37, + ETSI3_WORLD = 0x36, + ETSI4_WORLD = 0x30, + ETSI8_WORLD = 0x3D, + ETSI9_WORLD = 0x3E, + ETSI10_WORLD = 0x24, + ETSI10_FCCA = 0x25, + ETSI11_WORLD = 0x26, + ETSI12_WORLD = 0x28, + ETSI13_WORLD = 0x27, + ETSI14_WORLD = 0x29, + ETSI15_WORLD = 0x31, + + APL1_WORLD = 0x52, + APL1_ETSIC = 0x55, + APL2_WORLD = 0x45, + APL2_ETSIC = 0x56, + APL2_ETSID = 0x41, + APL4_WORLD = 0x42, + APL6_WORLD = 0x5B, + APL8_WORLD = 0x5D, + APL9_WORLD = 0x5E, + APL9_MKKC = 0x48, + APL9_KRRA = 0x43, + APL10_WORLD = 0x5F, + APL11_FCCA = 0x4F, + APL12_WORLD = 0x51, + APL13_WORLD = 0x5A, + APL14_WORLD = 0x57, + APL15_WORLD = 0x59, + APL16_WORLD = 0x70, + APL16_ETSIC = 0x6D, + APL17_ETSIC = 0xE7, + APL17_ETSID = 0xE0, + APL19_ETSIC = 0x71, + APL20_WORLD = 0xE5, + APL23_WORLD = 0xE3, + APL24_ETSIC = 0xE2, + + MKK3_MKKC = 0x82, + MKK5_MKKA = 0x99, + MKK5_MKKC = 0x88, + MKK11_MKKC = 0xD7, + MKK16_MKKC = 0xDF, + MKK17_MKKC = 0xE1, + + WORLD_60 = 0x60, + WORLD_61 = 0x61, + WORLD_62 = 0x62, + WORLD_63 = 0x63, + WORLD_65 = 0x65, + WORLD_64 = 0x64, + WORLD_66 = 0x66, + WORLD_69 = 0x69, + WORLD_67 = 0x67, + WORLD_68 = 0x68, + WORLD_6A = 0x6A, + WORLD_6C = 0x6C, +}; + +#ifndef CONFIG_REG_CLIENT +const struct country_code_to_reg_domain g_all_countries[] = { + {CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", 40, 160, 0}, + {CTRY_ALAND_ISLANDS, FCC3_WORLD, "AX", 40, 160, 0}, + {CTRY_ALBANIA, ETSI1_WORLD, "AL", 40, 160, 0}, + {CTRY_ALGERIA, APL13_WORLD, "DZ", 40, 160, 0}, + {CTRY_AMERICAN_SAMOA, FCC3_FCCA, "AS", 40, 160, 0}, + {CTRY_ANDORRA, ETSI1_WORLD, "AD", 40, 160, 0}, + {CTRY_ANGUILLA, ETSI1_WORLD, "AI", 40, 160, 0}, + {CTRY_ANTIGUA_AND_BARBUDA, ETSI1_WORLD, "AG", 40, 160, 0}, + {CTRY_ARGENTINA, APL16_ETSIC, "AR", 40, 160, 0}, + {CTRY_ARMENIA, APL4_WORLD, "AM", 40, 160, 0}, + {CTRY_ARUBA, ETSI1_WORLD, "AW", 40, 160, 0}, + {CTRY_AUSTRALIA, FCC6_WORLD, "AU", 40, 160, 0}, + {CTRY_AUSTRIA, ETSI1_WORLD, "AT", 40, 160, 0}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", 40, 160, 0}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS", 40, 160, 0}, + {CTRY_BAHRAIN, APL15_WORLD, "BH", 40, 160, 0}, + {CTRY_BANGLADESH, APL1_WORLD, "BD", 40, 160, 0}, + {CTRY_BARBADOS, FCC2_WORLD, "BB", 40, 160, 0}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", 40, 160, 0}, + {CTRY_BELGIUM, ETSI1_WORLD, "BE", 40, 160, 0}, + {CTRY_BELIZE, ETSI8_WORLD, "BZ", 40, 160, 0}, + {CTRY_BERMUDA, FCC3_FCCA, "BM", 40, 160, 0}, + {CTRY_BHUTAN, ETSI1_WORLD, "BT", 40, 160, 0}, + {CTRY_BOLIVIA, FCC8_WORLD, "BO", 40, 160, 0}, + {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", 40, 160, 0}, + {CTRY_BRAZIL, FCC3_ETSIC, "BR", 40, 160, 0}, + {CTRY_BRUNEI_DARUSSALAM, FCC8_WORLD, "BN", 40, 160, 0}, + {CTRY_BULGARIA, ETSI1_WORLD, "BG", 40, 160, 0}, + {CTRY_BURKINA_FASO, FCC3_WORLD, "BF", 40, 160, 0}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH", 40, 160, 0}, + {CTRY_CAMEROON, ETSI1_WORLD, "CM", 40, 160, 0}, + {CTRY_CANADA, FCC6_FCCA, "CA", 40, 160, 0}, + {CTRY_CAYMAN_ISLANDS, FCC3_WORLD, "KY", 40, 160, 0}, + {CTRY_CENTRAL_AFRICA_REPUBLIC, FCC3_WORLD, "CF", 40, 40, 0}, + {CTRY_CHAD, ETSI1_WORLD, "TD", 40, 160, 0}, + {CTRY_CHILE, FCC13_WORLD, "CL", 40, 160, 0}, + {CTRY_CHINA, APL14_WORLD, "CN", 40, 160, 0}, + {CTRY_CHRISTMAS_ISLAND, FCC3_WORLD, "CX", 40, 160, 0}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO", 40, 160, 0}, + {CTRY_COOK_ISLANDS, FCC3_WORLD, "CK", 40, 160, 0}, + {CTRY_COSTA_RICA, FCC3_WORLD, "CR", 40, 160, 0}, + {CTRY_COTE_DIVOIRE, FCC3_WORLD, "CI", 40, 160, 0}, + {CTRY_CROATIA, ETSI1_WORLD, "HR", 40, 160, 0}, + {CTRY_CYPRUS, ETSI1_WORLD, "CY", 40, 160, 0}, + {CTRY_CZECH, ETSI1_WORLD, "CZ", 40, 160, 0}, + {CTRY_DENMARK, ETSI1_WORLD, "DK", 40, 160, 0}, + {CTRY_DOMINICA, FCC1_FCCA, "DM", 40, 160, 0}, + {CTRY_DOMINICAN_REPUBLIC, FCC3_FCCA, "DO", 40, 160, 0}, + {CTRY_ECUADOR, FCC3_FCCA, "EC", 40, 160, 0}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", 40, 160, 0}, + {CTRY_EL_SALVADOR, FCC3_WORLD, "SV", 40, 160, 0}, + {CTRY_ESTONIA, ETSI1_WORLD, "EE", 40, 160, 0}, + {CTRY_ETHIOPIA, ETSI1_WORLD, "ET", 40, 160, 0}, + {CTRY_FALKLAND_ISLANDS, ETSI1_WORLD, "FK", 40, 160, 0}, + {CTRY_FAROE_ISLANDS, ETSI1_WORLD, "FO", 40, 160, 0}, + {CTRY_FINLAND, ETSI1_WORLD, "FI", 40, 160, 0}, + {CTRY_FRANCE, ETSI1_WORLD, "FR", 40, 160, 0}, + {CTRY_FRENCH_GUIANA, ETSI1_WORLD, "GF", 40, 160, 0}, + {CTRY_FRENCH_POLYNESIA, ETSI1_WORLD, "PF", 40, 160, 0}, + {CTRY_FRENCH_SOUTHERN_TERRITORIES, ETSI1_WORLD, "TF", 40, 160, 0}, + {CTRY_GEORGIA, ETSI1_WORLD, "GE", 40, 160, 0}, + {CTRY_GERMANY, ETSI1_WORLD, "DE", 40, 160, 0}, + {CTRY_GHANA, FCC3_WORLD, "GH", 40, 160, 0}, + {CTRY_GIBRALTAR, ETSI1_WORLD, "GI", 40, 160, 0}, + {CTRY_GREECE, ETSI1_WORLD, "GR", 40, 160, 0}, + {CTRY_GREENLAND, ETSI1_WORLD, "GL", 40, 160, 0}, + {CTRY_GRENADA, FCC3_FCCA, "GD", 40, 160, 0}, + {CTRY_GUADELOUPE, ETSI1_WORLD, "GP", 40, 160, 0}, + {CTRY_GUAM, FCC3_FCCA, "GU", 40, 160, 0}, + {CTRY_GUATEMALA, ETSI1_WORLD, "GT", 40, 160, 0}, + {CTRY_GUERNSEY, ETSI1_WORLD, "GG", 40, 160, 0}, + {CTRY_GUYANA, APL1_ETSIC, "GY", 40, 160, 0}, + {CTRY_HAITI, FCC3_FCCA, "HT", 40, 160, 0}, + {CTRY_HEARD_ISLAND_AND_MCDONALD_ISLANDS, FCC6_WORLD, "HM", 40, 160, 0}, + {CTRY_HOLY_SEE, ETSI1_WORLD, "VA", 40, 160, 0}, + {CTRY_HONDURAS, FCC3_WORLD, "HN", 40, 160, 0}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK", 40, 160, 0}, + {CTRY_HUNGARY, ETSI1_WORLD, "HU", 40, 160, 0}, + {CTRY_ICELAND, ETSI1_WORLD, "IS", 40, 160, 0}, + {CTRY_INDIA, APL19_ETSIC, "IN", 40, 160, 0}, + {CTRY_INDONESIA, APL2_ETSID, "ID", 40, 80, 0}, + {CTRY_IRAQ, ETSI1_WORLD, "IQ", 40, 160, 0}, + {CTRY_IRELAND, ETSI1_WORLD, "IE", 40, 160, 0}, + {CTRY_ISLE_OF_MAN, ETSI1_WORLD, "IM", 40, 160, 0}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL", 40, 160, 0}, + {CTRY_ITALY, ETSI1_WORLD, "IT", 40, 160, 0}, + {CTRY_JAMAICA, FCC13_WORLD, "JM", 40, 160, 0}, + {CTRY_JAPAN, MKK17_MKKC, "JP", 40, 160, 0}, + {CTRY_JAPAN15, MKK17_MKKC, "JP", 40, 160, 0}, + {CTRY_JERSEY, ETSI1_WORLD, "JE", 40, 160, 0}, + {CTRY_JORDAN, APL4_WORLD, "JO", 40, 160, 0}, + {CTRY_KAZAKHSTAN, MKK5_MKKC, "KZ", 40, 160, 0}, + {CTRY_KENYA, ETSI13_WORLD, "KE", 40, 160, 0}, + {CTRY_KOREA_ROC, APL9_MKKC, "KR", 40, 160, 0}, + {CTRY_KUWAIT, ETSI1_WORLD, "KW", 40, 160, 0}, + {CTRY_LATVIA, ETSI1_WORLD, "LV", 40, 160, 0}, + {CTRY_LEBANON, FCC3_WORLD, "LB", 40, 160, 0}, + {CTRY_LESOTHO, ETSI1_WORLD, "LS", 40, 160, 0}, + {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", 40, 160, 0}, + {CTRY_LITHUANIA, ETSI1_WORLD, "LT", 40, 160, 0}, + {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", 40, 160, 0}, + {CTRY_MACAU, FCC3_WORLD, "MO", 40, 160, 0}, + {CTRY_MACEDONIA, ETSI1_WORLD, "MK", 40, 160, 0}, + {CTRY_MALAWI, ETSI1_WORLD, "MW", 40, 160, 0}, + {CTRY_MALAYSIA, FCC11_WORLD, "MY", 40, 160, 0}, + {CTRY_MALDIVES, APL6_WORLD, "MV", 40, 160, 0}, + {CTRY_MALTA, ETSI1_WORLD, "MT", 40, 160, 0}, + {CTRY_MARSHALL_ISLANDS, FCC3_FCCA, "MH", 40, 160, 0}, + {CTRY_MARTINIQUE, ETSI1_WORLD, "MQ", 40, 160, 0}, + {CTRY_MAURITANIA, ETSI1_WORLD, "MR", 40, 160, 0}, + {CTRY_MAURITIUS, ETSI1_WORLD, "MU", 40, 160, 0}, + {CTRY_MAYOTTE, ETSI1_WORLD, "YT", 40, 160, 0}, + {CTRY_MEXICO, FCC3_ETSIC, "MX", 40, 160, 0}, + {CTRY_MICRONESIA, FCC3_FCCA, "FM", 40, 160, 0}, + {CTRY_MOLDOVA, ETSI1_WORLD, "MD", 40, 160, 0}, + {CTRY_MONACO, ETSI1_WORLD, "MC", 40, 160, 0}, + {CTRY_MONGOLIA, FCC3_WORLD, "MN", 40, 160, 0}, + {CTRY_MONTENEGRO, ETSI1_WORLD, "ME", 40, 160, 0}, + {CTRY_MONTSERRAT, ETSI1_WORLD, "MS", 40, 160, 0}, + {CTRY_MOROCCO, ETSI3_WORLD, "MA", 40, 160, 0}, + {CTRY_MYANMAR, APL1_WORLD, "MM", 40, 160, 0}, + {CTRY_NAMIBIA, APL20_WORLD, "NA", 40, 160, 0}, + {CTRY_NEPAL, APL23_WORLD, "NP", 40, 160, 0}, + {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", 40, 160, 0}, + {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN", 40, 160, 0}, + {CTRY_NEW_CALEDONIA, ETSI1_WORLD, "NC", 40, 160, 0}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", 40, 160, 0}, + {CTRY_NIGERIA, APL6_WORLD, "NG", 40, 160, 0}, + {CTRY_NORTHERN_MARIANA_ISLANDS, FCC3_FCCA, "MP", 40, 160, 0}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI", 40, 160, 0}, + {CTRY_NIUE, ETSI1_WORLD, "NU", 40, 160, 0}, + {CTRY_NORFOLK_ISLAND, FCC6_WORLD, "NF", 40, 160, 0}, + {CTRY_NORWAY, ETSI1_WORLD, "NO", 40, 160, 0}, + {CTRY_OMAN, ETSI1_WORLD, "OM", 40, 160, 0}, + {CTRY_PAKISTAN, APL1_ETSIC, "PK", 40, 160, 0}, + {CTRY_PALAU, FCC3_FCCA, "PW", 40, 160, 0}, + {CTRY_PANAMA, FCC14_FCCB, "PA", 40, 160, 0}, + {CTRY_PAPUA_NEW_GUINEA, FCC3_WORLD, "PG", 40, 160, 0}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY", 40, 160, 0}, + {CTRY_PERU, FCC3_WORLD, "PE", 40, 160, 0}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH", 40, 160, 0}, + {CTRY_POLAND, ETSI1_WORLD, "PL", 40, 160, 0}, + {CTRY_PORTUGAL, ETSI1_WORLD, "PT", 40, 160, 0}, + {CTRY_PUERTO_RICO, FCC3_FCCA, "PR", 40, 160, 0}, + {CTRY_QATAR, ETSI14_WORLD, "QA", 40, 160, 0}, + {CTRY_REUNION, ETSI1_WORLD, "RE", 40, 160, 0}, + {CTRY_ROMANIA, ETSI1_WORLD, "RO", 40, 160, 0}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU", 40, 160, 0}, + {CTRY_RWANDA, FCC3_WORLD, "RW", 40, 160, 0}, + {CTRY_SAINT_BARTHELEMY, ETSI1_WORLD, "BL", 40, 160, 0}, + {CTRY_SAINT_HELENA_ASCENSION_AND_TRISTAN_DA_CUNHA, ETSI1_WORLD, "SH", + 40, 160, 0}, + {CTRY_SAINT_KITTS_AND_NEVIS, APL10_WORLD, "KN", 40, 160, 0}, + {CTRY_SAINT_LUCIA, APL10_WORLD, "LC", 40, 160, 0}, + {CTRY_SAINT_MARTIN, ETSI1_WORLD, "MF", 40, 160, 0}, + {CTRY_SAINT_PIERRE_AND_MIQUELON, ETSI1_WORLD, "PM", 40, 160, 0}, + {CTRY_SAINT_VINCENT_AND_THE_GRENADIENS, ETSI1_WORLD, + "VC" , 40, 160, 0}, + {CTRY_SAMOA, ETSI1_WORLD, "WS", 40, 40, 0}, + {CTRY_SAN_MARINO, FCC3_FCCA, "SM", 40, 160, 0}, + {CTRY_SAO_TOME_AND_PRINCIPE, FCC3_WORLD, "ST", 40, 160, 0}, + {CTRY_SAUDI_ARABIA, ETSI15_WORLD, "SA", 40, 160, 0}, + {CTRY_SENEGAL, FCC13_WORLD, "SN", 40, 160, 0}, + {CTRY_SERBIA, ETSI1_WORLD, "RS", 40, 160, 0}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG", 40, 160, 0}, + {CTRY_SINT_MAARTEN, ETSI1_WORLD, "SX", 40, 160, 0}, + {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", 40, 160, 0}, + {CTRY_SLOVENIA, ETSI1_WORLD, "SI", 40, 160, 0}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", 40, 160, 0}, + {CTRY_SPAIN, ETSI1_WORLD, "ES", 40, 160, 0}, + {CTRY_SURINAME, ETSI1_WORLD, "SR", 40, 160, 0}, + {CTRY_SRI_LANKA, FCC3_ETSIC, "LK", 40, 160, 0}, + {CTRY_SVALBARD_AND_JAN_MAYEN, FCC6_WORLD, "SJ", 40, 160, 0}, + {CTRY_SWEDEN, ETSI1_WORLD, "SE", 40, 160, 0}, + {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", 40, 160, 0}, + {CTRY_TAIWAN, FCC3_FCCA, "TW", 40, 160, 0}, + {CTRY_TANZANIA, APL1_WORLD, "TZ", 40, 160, 0}, + {CTRY_THAILAND, FCC3_WORLD, "TH", 40, 160, 0}, + {CTRY_TOGO, ETSI1_WORLD, "TG", 40, 40, 0}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", 40, 160, 0}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", 40, 160, 0}, + {CTRY_TURKEY, ETSI1_WORLD, "TR", 40, 160, 0}, + {CTRY_TURKS_AND_CAICOS, FCC3_WORLD, "TC", 40, 160, 0}, + {CTRY_UGANDA, FCC3_WORLD, "UG", 40, 160, 0}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA", 40, 160, 0}, + {CTRY_UAE, FCC3_WORLD, "AE", 40, 160, 0}, + {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", 40, 160, 0}, + {CTRY_UNITED_STATES, FCC8_FCCA, "US", 40, 160, 0}, + {CTRY_UNITED_STATES_MINOR_OUTLYING_ISLANDS, FCC8_FCCA, "UM", 40, 160, + 0}, + {CTRY_URUGUAY, FCC2_WORLD, "UY", 40, 160, 0}, + {CTRY_UZBEKISTAN, ETSI3_WORLD, "UZ", 40, 160, 0}, + {CTRY_VANUATU, FCC3_WORLD, "VU", 40, 160, 0}, + {CTRY_VENEZUELA, FCC2_ETSIC, "VE", 40, 160, 0}, + {CTRY_VIET_NAM, FCC3_WORLD, "VN", 40, 80, 0}, + {CTRY_VIRGIN_ISLANDS, FCC3_FCCA, "VI", 40, 160, 0}, + {CTRY_VIRGIN_ISLANDS_BRITISH, ETSI1_WORLD, "VG", 40, 160, 0}, + {CTRY_WALLIS_AND_FUTUNA, ETSI1_WORLD, "WF", 40, 160, 0}, + {CTRY_YEMEN, ETSI1_WORLD, "YE", 40, 160, 0}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW", 40, 160, 0}, +}; +#else +#ifdef WLAN_FEATURE_DSRC +const struct country_code_to_reg_domain g_all_countries[] = { + {CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", 40, 160, 0}, + {CTRY_ALAND_ISLANDS, FCC3_WORLD, "AX", 40, 160, 0}, + {CTRY_ALBANIA, ETSI13_WORLD, "AL", 40, 160, 0}, + {CTRY_ALGERIA, APL13_WORLD, "DZ", 40, 160, 0}, + {CTRY_AMERICAN_SAMOA, FCC3_FCCA, "AS", 40, 160, 0}, + {CTRY_ANDORRA, ETSI1_WORLD, "AD", 40, 160, 0}, + {CTRY_ANGUILLA, ETSI1_WORLD, "AI", 40, 160, 0}, + {CTRY_ANTIGUA_AND_BARBUDA, ETSI10_WORLD, "AG", 40, 160, 0}, + {CTRY_ARGENTINA, APL17_ETSIC, "AR", 40, 160, 0}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM", 40, 160, 0}, + {CTRY_ARUBA, ETSI1_WORLD, "AW", 40, 160, 0}, + {CTRY_AUSTRALIA, FCC6_WORLD, "AU", 40, 160, 0}, + {CTRY_AUSTRIA, ETSI10_WORLD, "AT", 40, 160, 0}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", 40, 160, 0}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS", 40, 160, 0}, + {CTRY_BAHRAIN, APL15_WORLD, "BH", 40, 160, 0}, + {CTRY_BANGLADESH, APL1_WORLD, "BD", 40, 160, 0}, + {CTRY_BARBADOS, FCC2_WORLD, "BB", 40, 160, 0}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", 40, 160, 0}, + {CTRY_BELGIUM, ETSI10_WORLD, "BE", 40, 160, 0}, + {CTRY_BELIZE, ETSI8_WORLD, "BZ", 40, 160, 0}, + {CTRY_BERMUDA, FCC3_FCCA, "BM", 40, 160, 0}, + {CTRY_BHUTAN, ETSI1_WORLD, "BT", 40, 160, 0}, + {CTRY_BOLIVIA, FCC3_WORLD, "BO", 40, 160, 0}, + {CTRY_BOSNIA_HERZ, ETSI13_WORLD, "BA", 40, 160, 0}, + {CTRY_BRAZIL, FCC3_ETSIC, "BR", 40, 160, 0}, + {CTRY_BRUNEI_DARUSSALAM, FCC3_WORLD, "BN", 40, 160, 0}, + {CTRY_BULGARIA, ETSI10_WORLD, "BG", 40, 160, 0}, + {CTRY_BURKINA_FASO, FCC3_WORLD, "BF", 40, 160, 0}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH", 40, 160, 0}, + {CTRY_CAMEROON, ETSI1_WORLD, "CM", 40, 160, 0}, + {CTRY_CANADA, FCC3_FCCA, "CA", 40, 160, 0}, + {CTRY_CAYMAN_ISLANDS, FCC3_WORLD, "KY", 40, 160, 0}, + {CTRY_CENTRAL_AFRICA_REPUBLIC, FCC3_WORLD, "CF", 40, 40, 0}, + {CTRY_CHAD, ETSI1_WORLD, "TD", 40, 160, 0}, + {CTRY_CHILE, FCC13_WORLD, "CL", 40, 160, 0}, + {CTRY_CHINA, APL14_WORLD, "CN", 40, 160, 0}, + {CTRY_CHRISTMAS_ISLAND, FCC3_WORLD, "CX", 40, 160, 0}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO", 40, 160, 0}, + {CTRY_COOK_ISLANDS, FCC3_WORLD, "CK", 40, 160, 0}, + {CTRY_COSTA_RICA, FCC3_WORLD, "CR", 40, 160, 0}, + {CTRY_COTE_DIVOIRE, FCC3_WORLD, "CI", 40, 160, 0}, + {CTRY_CROATIA, ETSI10_WORLD, "HR", 40, 160, 0}, + {CTRY_CYPRUS, ETSI10_WORLD, "CY", 40, 160, 0}, + {CTRY_CZECH, ETSI10_WORLD, "CZ", 40, 160, 0}, + {CTRY_DENMARK, ETSI10_WORLD, "DK", 40, 160, 0}, + {CTRY_DOMINICA, FCC2_FCCA, "DM", 40, 160, 0}, + {CTRY_DOMINICAN_REPUBLIC, FCC3_FCCA, "DO", 40, 160, 0}, + {CTRY_ECUADOR, FCC3_FCCA, "EC", 40, 160, 0}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", 40, 160, 0}, + {CTRY_EL_SALVADOR, FCC3_WORLD, "SV", 40, 160, 0}, + {CTRY_ESTONIA, ETSI10_WORLD, "EE", 40, 160, 0}, + {CTRY_ETHIOPIA, ETSI1_WORLD, "ET", 40, 160, 0}, + {CTRY_FALKLAND_ISLANDS, ETSI10_WORLD, "FK", 40, 160, 0}, + {CTRY_FAROE_ISLANDS, ETSI10_WORLD, "FO", 40, 160, 0}, + {CTRY_FINLAND, ETSI10_WORLD, "FI", 40, 160, 0}, + {CTRY_FRANCE, ETSI10_WORLD, "FR", 40, 160, 0}, + {CTRY_FRENCH_GUIANA, ETSI10_WORLD, "GF", 40, 160, 0}, + {CTRY_FRENCH_POLYNESIA, ETSI10_WORLD, "PF", 40, 160, 0}, + {CTRY_FRENCH_SOUTHERN_TERRITORIES, ETSI10_WORLD, "TF", 40, 160, 0}, + {CTRY_GEORGIA, ETSI1_WORLD, "GE", 40, 160, 0}, + {CTRY_GERMANY, ETSI10_WORLD, "DE", 40, 160, 0}, + {CTRY_GHANA, FCC3_WORLD, "GH", 40, 160, 0}, + {CTRY_GIBRALTAR, ETSI10_WORLD, "GI", 40, 160, 0}, + {CTRY_GREECE, ETSI10_WORLD, "GR", 40, 160, 0}, + {CTRY_GREENLAND, ETSI10_WORLD, "GL", 40, 160, 0}, + {CTRY_GRENADA, FCC3_FCCA, "GD", 40, 160, 0}, + {CTRY_GUADELOUPE, ETSI1_WORLD, "GP", 40, 160, 0}, + {CTRY_GUAM, FCC10_FCCA, "GU", 40, 160, 0}, + {CTRY_GUATEMALA, ETSI1_WORLD, "GT", 40, 160, 0}, + {CTRY_GUERNSEY, ETSI10_WORLD, "GG", 40, 160, 0}, + {CTRY_GUYANA, APL1_ETSIC, "GY", 40, 160, 0}, + {CTRY_HAITI, FCC3_FCCA, "HT", 40, 160, 0}, + {CTRY_HEARD_ISLAND_AND_MCDONALD_ISLANDS, FCC6_WORLD, "HM", 40, 160, 0}, + {CTRY_HOLY_SEE, ETSI10_WORLD, "VA", 40, 160, 0}, + {CTRY_HONDURAS, FCC13_WORLD, "HN", 40, 160, 0}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK", 40, 160, 0}, + {CTRY_HUNGARY, ETSI10_WORLD, "HU", 40, 160, 0}, + {CTRY_ICELAND, ETSI10_WORLD, "IS", 40, 160, 0}, + {CTRY_INDIA, APL15_WORLD, "IN", 40, 160, 0}, + {CTRY_INDONESIA, APL2_ETSID, "ID", 40, 80, 0}, + {CTRY_IRAQ, ETSI1_WORLD, "IQ", 40, 160, 0}, + {CTRY_IRELAND, ETSI10_WORLD, "IE", 40, 160, 0}, + {CTRY_ISLE_OF_MAN, ETSI10_WORLD, "IM", 40, 160, 0}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL", 40, 160, 0}, + {CTRY_ITALY, ETSI10_WORLD, "IT", 40, 160, 0}, + {CTRY_JAMAICA, FCC13_WORLD, "JM", 40, 160, 0}, + {CTRY_JAPAN, MKK17_MKKC, "JP", 40, 160, 0}, + {CTRY_JAPAN15, MKK5_MKKC, "JP", 40, 160, 0}, + {CTRY_XA, MKK5_MKKA, "XA", 40, 160, 0}, + {CTRY_JERSEY, ETSI10_WORLD, "JE", 40, 160, 0}, + {CTRY_JORDAN, APL4_WORLD, "JO", 40, 160, 0}, + {CTRY_KAZAKHSTAN, MKK5_MKKC, "KZ", 40, 160, 0}, + {CTRY_KENYA, ETSI13_WORLD, "KE", 40, 160, 0}, + {CTRY_KOREA_ROC, APL9_MKKC, "KR", 40, 160, 0}, + {CTRY_KUWAIT, ETSI13_WORLD, "KW", 40, 160, 0}, + {CTRY_LATVIA, ETSI10_WORLD, "LV", 40, 160, 0}, + {CTRY_LEBANON, FCC3_WORLD, "LB", 40, 160, 0}, + {CTRY_LESOTHO, ETSI1_WORLD, "LS", 40, 160, 0}, + {CTRY_LIECHTENSTEIN, ETSI10_WORLD, "LI", 40, 160, 0}, + {CTRY_LITHUANIA, ETSI10_WORLD, "LT", 40, 160, 0}, + {CTRY_LUXEMBOURG, ETSI10_WORLD, "LU", 40, 160, 0}, + {CTRY_MACAU, FCC3_WORLD, "MO", 40, 160, 0}, + {CTRY_MACEDONIA, ETSI13_WORLD, "MK", 40, 160, 0}, + {CTRY_MALAWI, ETSI1_WORLD, "MW", 40, 160, 0}, + {CTRY_MALAYSIA, FCC11_WORLD, "MY", 40, 160, 0}, + {CTRY_MALDIVES, APL6_WORLD, "MV", 40, 160, 0}, + {CTRY_MALTA, ETSI10_WORLD, "MT", 40, 160, 0}, + {CTRY_MARSHALL_ISLANDS, FCC3_FCCA, "MH", 40, 160, 0}, + {CTRY_MARTINIQUE, ETSI10_WORLD, "MQ", 40, 160, 0}, + {CTRY_MAURITANIA, ETSI1_WORLD, "MR", 40, 160, 0}, + {CTRY_MAURITIUS, ETSI13_WORLD, "MU", 40, 160, 0}, + {CTRY_MAYOTTE, ETSI1_WORLD, "YT", 40, 160, 0}, + {CTRY_MEXICO, FCC3_ETSIC, "MX", 40, 160, 0}, + {CTRY_MICRONESIA, FCC3_FCCA, "FM", 40, 160, 0}, + {CTRY_MOLDOVA, ETSI13_WORLD, "MD", 40, 160, 0}, + {CTRY_MONACO, ETSI10_WORLD, "MC", 40, 160, 0}, + {CTRY_MONGOLIA, FCC3_WORLD, "MN", 40, 160, 0}, + {CTRY_MONTENEGRO, ETSI10_WORLD, "ME", 40, 160, 0}, + {CTRY_MONTSERRAT, ETSI10_WORLD, "MS", 40, 160, 0}, + {CTRY_MOROCCO, ETSI3_WORLD, "MA", 40, 160, 0}, + {CTRY_MYANMAR, APL1_WORLD, "MM", 40, 160, 0}, + {CTRY_NAMIBIA, APL20_WORLD, "NA", 40, 160, 0}, + {CTRY_NEPAL, APL23_WORLD, "NP", 40, 160, 0}, + {CTRY_NETHERLANDS, ETSI10_WORLD, "NL", 40, 160, 0}, + {CTRY_NETHERLANDS_ANTILLES, ETSI10_WORLD, "AN", 40, 160, 0}, + {CTRY_NEW_CALEDONIA, ETSI10_WORLD, "NC", 40, 160, 0}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", 40, 160, 0}, + {CTRY_NIGERIA, APL6_WORLD, "NG", 40, 160, 0}, + {CTRY_NORTHERN_MARIANA_ISLANDS, FCC10_FCCA, "MP", 40, 160, 0}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI", 40, 160, 0}, + {CTRY_NIUE, ETSI10_WORLD, "NU", 40, 160, 0}, + {CTRY_NORFOLK_ISLAND, FCC6_WORLD, "NF", 40, 160, 0}, + {CTRY_NORWAY, ETSI10_WORLD, "NO", 40, 160, 0}, + {CTRY_OMAN, ETSI1_WORLD, "OM", 40, 160, 0}, + {CTRY_PAKISTAN, APL1_ETSIC, "PK", 40, 160, 0}, + {CTRY_PALAU, FCC3_FCCA, "PW", 40, 160, 0}, + {CTRY_PANAMA, FCC14_FCCB, "PA", 40, 160, 0}, + {CTRY_PAPUA_NEW_GUINEA, FCC3_WORLD, "PG", 40, 160, 0}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY", 40, 160, 0}, + {CTRY_PERU, FCC3_WORLD, "PE", 40, 160, 0}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH", 40, 160, 0}, + {CTRY_POLAND, ETSI10_WORLD, "PL", 40, 160, 0}, + {CTRY_PORTUGAL, ETSI10_WORLD, "PT", 40, 160, 0}, + {CTRY_PUERTO_RICO, FCC10_FCCA, "PR", 40, 160, 0}, + {CTRY_QATAR, ETSI14_WORLD, "QA", 40, 160, 0}, + {CTRY_REUNION, ETSI1_WORLD, "RE", 40, 160, 0}, + {CTRY_ROMANIA, ETSI10_WORLD, "RO", 40, 160, 0}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU", 40, 160, 0}, + {CTRY_RWANDA, FCC3_WORLD, "RW", 40, 160, 0}, + {CTRY_SAINT_BARTHELEMY, ETSI1_WORLD, "BL", 40, 160, 0}, + {CTRY_SAINT_HELENA_ASCENSION_AND_TRISTAN_DA_CUNHA, ETSI10_WORLD, "SH", + 40, 160, 0}, + {CTRY_SAINT_KITTS_AND_NEVIS, APL10_WORLD, "KN", 40, 160, 0}, + {CTRY_SAINT_LUCIA, APL10_WORLD, "LC", 40, 160, 0}, + {CTRY_SAINT_MARTIN, ETSI1_WORLD, "MF", 40, 160, 0}, + {CTRY_SAINT_PIERRE_AND_MIQUELON, ETSI13_WORLD, "PM", 40, 160, 0}, + {CTRY_SAINT_VINCENT_AND_THE_GRENADIENS, ETSI13_WORLD, "VC", + 40, 160, 0}, + {CTRY_SAMOA, ETSI1_WORLD, "WS", 40, 40, 0}, + {CTRY_SAN_MARINO, FCC3_FCCA, "SM", 40, 160, 0}, + {CTRY_SAO_TOME_AND_PRINCIPE, FCC3_WORLD, "ST", 40, 160, 0}, + {CTRY_SAUDI_ARABIA, ETSI15_WORLD, "SA", 40, 160, 0}, + {CTRY_SENEGAL, FCC13_WORLD, "SN", 40, 160, 0}, + {CTRY_SERBIA, ETSI13_WORLD, "RS", 40, 160, 0}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG", 40, 160, 0}, + {CTRY_SINT_MAARTEN, ETSI10_WORLD, "SX", 40, 160, 0}, + {CTRY_SLOVAKIA, ETSI10_WORLD, "SK", 40, 160, 0}, + {CTRY_SLOVENIA, ETSI10_WORLD, "SI", 40, 160, 0}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", 40, 160, 0}, + {CTRY_SPAIN, ETSI10_WORLD, "ES", 40, 160, 0}, + {CTRY_SURINAME, ETSI1_WORLD, "SR", 40, 160, 0}, + {CTRY_SRI_LANKA, FCC3_ETSIC, "LK", 40, 160, 0}, + {CTRY_SVALBARD_AND_JAN_MAYEN, FCC6_WORLD, "SJ", 40, 160, 0}, + {CTRY_SWEDEN, ETSI10_WORLD, "SE", 40, 160, 0}, + {CTRY_SWITZERLAND, ETSI10_WORLD, "CH", 40, 160, 0}, + {CTRY_TAIWAN, FCC3_FCCA, "TW", 40, 160, 0}, + {CTRY_TANZANIA, APL1_WORLD, "TZ", 40, 160, 0}, + {CTRY_THAILAND, FCC3_WORLD, "TH", 40, 160, 0}, + {CTRY_TOGO, ETSI1_WORLD, "TG", 40, 40, 0}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", 40, 160, 0}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", 40, 160, 0}, + {CTRY_TURKEY, ETSI13_WORLD, "TR", 40, 160, 0}, + {CTRY_TURKS_AND_CAICOS, FCC3_WORLD, "TC", 40, 160, 0}, + {CTRY_UGANDA, FCC3_WORLD, "UG", 40, 160, 0}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA", 40, 160, 0}, + {CTRY_UAE, FCC3_WORLD, "AE", 40, 160, 0}, + {CTRY_UNITED_KINGDOM, ETSI10_WORLD, "GB", 40, 160, 0}, + {CTRY_UNITED_STATES, FCC10_FCCA, "US", 40, 160, 0}, + {CTRY_UNITED_STATES_MINOR_OUTLYING_ISLANDS, FCC10_FCCA, "UM", 40, 160, + 0}, + {CTRY_URUGUAY, FCC2_WORLD, "UY", 40, 160, 0}, + {CTRY_UZBEKISTAN, ETSI3_WORLD, "UZ", 40, 160, 0}, + {CTRY_VANUATU, FCC3_WORLD, "VU", 40, 160, 0}, + {CTRY_VENEZUELA, FCC2_ETSIC, "VE", 40, 160, 0}, + {CTRY_VIET_NAM, FCC3_WORLD, "VN", 40, 80, 0}, + {CTRY_VIRGIN_ISLANDS, FCC10_FCCA, "VI", 40, 160, 0}, + {CTRY_VIRGIN_ISLANDS_BRITISH, ETSI10_WORLD, "VG", 40, 160, 0}, + {CTRY_WALLIS_AND_FUTUNA, ETSI1_WORLD, "WF", 40, 160, 0}, + {CTRY_YEMEN, ETSI1_WORLD, "YE", 40, 160, 0}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW", 40, 160, 0}, +}; +#else +const struct country_code_to_reg_domain g_all_countries[] = { + {CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", 40, 160, 0}, + {CTRY_ALAND_ISLANDS, FCC3_WORLD, "AX", 40, 160, 0}, + {CTRY_ALBANIA, ETSI13_WORLD, "AL", 40, 160, 0}, + {CTRY_ALGERIA, APL13_WORLD, "DZ", 40, 160, 0}, + {CTRY_AMERICAN_SAMOA, FCC3_FCCA, "AS", 40, 160, 0}, + {CTRY_ANDORRA, ETSI13_WORLD, "AD", 40, 160, 0}, + {CTRY_ANGUILLA, ETSI1_WORLD, "AI", 40, 160, 0}, + {CTRY_ANTIGUA_AND_BARBUDA, ETSI13_WORLD, "AG", 40, 160, 0}, + {CTRY_ARGENTINA, APL17_ETSIC, "AR", 40, 160, 0}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM", 40, 160, 0}, + {CTRY_ARUBA, ETSI1_WORLD, "AW", 40, 160, 0}, + {CTRY_AUSTRALIA, FCC6_WORLD, "AU", 40, 160, 0}, + {CTRY_AUSTRIA, ETSI13_WORLD, "AT", 40, 160, 0}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", 40, 160, 0}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS", 40, 160, 0}, + {CTRY_BAHRAIN, APL15_WORLD, "BH", 40, 160, 0}, + {CTRY_BANGLADESH, APL1_WORLD, "BD", 40, 160, 0}, + {CTRY_BARBADOS, FCC2_WORLD, "BB", 40, 160, 0}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", 40, 160, 0}, + {CTRY_BELGIUM, ETSI13_WORLD, "BE", 40, 160, 0}, + {CTRY_BELIZE, ETSI8_WORLD, "BZ", 40, 160, 0}, + {CTRY_BERMUDA, FCC3_FCCA, "BM", 40, 160, 0}, + {CTRY_BHUTAN, ETSI1_WORLD, "BT", 40, 160, 0}, + {CTRY_BOLIVIA, FCC3_WORLD, "BO", 40, 160, 0}, + {CTRY_BOSNIA_HERZ, ETSI13_WORLD, "BA", 40, 160, 0}, + {CTRY_BRAZIL, FCC3_ETSIC, "BR", 40, 160, 0}, + {CTRY_BRUNEI_DARUSSALAM, FCC3_WORLD, "BN", 40, 160, 0}, + {CTRY_BULGARIA, ETSI13_WORLD, "BG", 40, 160, 0}, + {CTRY_BURKINA_FASO, FCC3_WORLD, "BF", 40, 160, 0}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH", 40, 160, 0}, + {CTRY_CAMEROON, ETSI1_WORLD, "CM", 40, 160, 0}, + {CTRY_CANADA, FCC6_FCCA, "CA", 40, 160, 0}, + {CTRY_CAYMAN_ISLANDS, FCC3_WORLD, "KY", 40, 160, 0}, + {CTRY_CENTRAL_AFRICA_REPUBLIC, FCC3_WORLD, "CF", 40, 40, 0}, + {CTRY_CHAD, ETSI1_WORLD, "TD", 40, 160, 0}, + {CTRY_CHILE, FCC13_WORLD, "CL", 40, 160, 0}, + {CTRY_CHINA, APL14_WORLD, "CN", 40, 160, 0}, + {CTRY_CHRISTMAS_ISLAND, FCC3_WORLD, "CX", 40, 160, 0}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO", 40, 160, 0}, + {CTRY_COOK_ISLANDS, FCC3_WORLD, "CK", 40, 160, 0}, + {CTRY_COSTA_RICA, FCC3_WORLD, "CR", 40, 160, 0}, + {CTRY_COTE_DIVOIRE, FCC3_WORLD, "CI", 40, 160, 0}, + {CTRY_CROATIA, ETSI13_WORLD, "HR", 40, 160, 0}, + {CTRY_CYPRUS, ETSI13_WORLD, "CY", 40, 160, 0}, + {CTRY_CZECH, ETSI13_WORLD, "CZ", 40, 160, 0}, + {CTRY_DENMARK, ETSI13_WORLD, "DK", 40, 160, 0}, + {CTRY_DOMINICA, FCC2_FCCA, "DM", 40, 160, 0}, + {CTRY_DOMINICAN_REPUBLIC, FCC3_FCCA, "DO", 40, 160, 0}, + {CTRY_ECUADOR, FCC3_FCCA, "EC", 40, 160, 0}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", 40, 160, 0}, + {CTRY_EL_SALVADOR, FCC3_WORLD, "SV", 40, 160, 0}, + {CTRY_ESTONIA, ETSI13_WORLD, "EE", 40, 160, 0}, + {CTRY_ETHIOPIA, ETSI1_WORLD, "ET", 40, 160, 0}, + {CTRY_FALKLAND_ISLANDS, ETSI13_WORLD, "FK", 40, 160, 0}, + {CTRY_FAROE_ISLANDS, ETSI13_WORLD, "FO", 40, 160, 0}, + {CTRY_FINLAND, ETSI13_WORLD, "FI", 40, 160, 0}, + {CTRY_FRANCE, ETSI13_WORLD, "FR", 40, 160, 0}, + {CTRY_FRENCH_GUIANA, ETSI13_WORLD, "GF", 40, 160, 0}, + {CTRY_FRENCH_POLYNESIA, ETSI13_WORLD, "PF", 40, 160, 0}, + {CTRY_FRENCH_SOUTHERN_TERRITORIES, ETSI13_WORLD, "TF", 40, 160, 0}, + {CTRY_GEORGIA, ETSI1_WORLD, "GE", 40, 160, 0}, + {CTRY_GERMANY, ETSI13_WORLD, "DE", 40, 160, 0}, + {CTRY_GHANA, FCC3_WORLD, "GH", 40, 160, 0}, + {CTRY_GIBRALTAR, ETSI1_WORLD, "GI", 40, 160, 0}, + {CTRY_GREECE, ETSI13_WORLD, "GR", 40, 160, 0}, + {CTRY_GREENLAND, ETSI1_WORLD, "GL", 40, 160, 0}, + {CTRY_GRENADA, FCC3_FCCA, "GD", 40, 160, 0}, + {CTRY_GUADELOUPE, ETSI1_WORLD, "GP", 40, 160, 0}, + {CTRY_GUAM, FCC3_FCCA, "GU", 40, 160, 0}, + {CTRY_GUATEMALA, ETSI1_WORLD, "GT", 40, 160, 0}, + {CTRY_GUERNSEY, ETSI13_WORLD, "GG", 40, 160, 0}, + {CTRY_GUYANA, APL1_ETSIC, "GY", 40, 160, 0}, + {CTRY_HAITI, FCC3_FCCA, "HT", 40, 160, 0}, + {CTRY_HEARD_ISLAND_AND_MCDONALD_ISLANDS, FCC6_WORLD, "HM", 40, 160, 0}, + {CTRY_HOLY_SEE, ETSI13_WORLD, "VA", 40, 160, 0}, + {CTRY_HONDURAS, FCC13_WORLD, "HN", 40, 160, 0}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK", 40, 160, 0}, + {CTRY_HUNGARY, ETSI13_WORLD, "HU", 40, 160, 0}, + {CTRY_ICELAND, ETSI13_WORLD, "IS", 40, 160, 0}, + {CTRY_INDIA, APL19_ETSIC, "IN", 40, 160, 0}, + {CTRY_INDONESIA, APL2_ETSID, "ID", 40, 80, 0}, + {CTRY_IRAQ, ETSI1_WORLD, "IQ", 40, 160, 0}, + {CTRY_IRELAND, ETSI13_WORLD, "IE", 40, 160, 0}, + {CTRY_ISLE_OF_MAN, ETSI13_WORLD, "IM", 40, 160, 0}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL", 40, 160, 0}, + {CTRY_ITALY, ETSI13_WORLD, "IT", 40, 160, 0}, + {CTRY_JAMAICA, FCC13_WORLD, "JM", 40, 160, 0}, + {CTRY_JAPAN, MKK17_MKKC, "JP", 40, 160, 0}, + {CTRY_JAPAN15, MKK5_MKKC, "JP", 40, 160, 0}, + {CTRY_XA, MKK5_MKKA, "XA", 40, 160, 0}, + {CTRY_JERSEY, ETSI13_WORLD, "JE", 40, 160, 0}, + {CTRY_JORDAN, APL4_WORLD, "JO", 40, 160, 0}, + {CTRY_KAZAKHSTAN, MKK5_MKKC, "KZ", 40, 160, 0}, + {CTRY_KENYA, ETSI13_WORLD, "KE", 40, 160, 0}, + {CTRY_KOREA_ROC, APL9_MKKC, "KR", 40, 160, 0}, + {CTRY_KUWAIT, ETSI13_WORLD, "KW", 40, 160, 0}, + {CTRY_LATVIA, ETSI13_WORLD, "LV", 40, 160, 0}, + {CTRY_LEBANON, FCC3_WORLD, "LB", 40, 160, 0}, + {CTRY_LESOTHO, ETSI1_WORLD, "LS", 40, 160, 0}, + {CTRY_LIECHTENSTEIN, ETSI13_WORLD, "LI", 40, 160, 0}, + {CTRY_LITHUANIA, ETSI13_WORLD, "LT", 40, 160, 0}, + {CTRY_LUXEMBOURG, ETSI13_WORLD, "LU", 40, 160, 0}, + {CTRY_MACAU, FCC3_WORLD, "MO", 40, 160, 0}, + {CTRY_MACEDONIA, ETSI13_WORLD, "MK", 40, 160, 0}, + {CTRY_MALAWI, ETSI1_WORLD, "MW", 40, 160, 0}, + {CTRY_MALAYSIA, FCC11_WORLD, "MY", 40, 160, 0}, + {CTRY_MALDIVES, APL6_WORLD, "MV", 40, 160, 0}, + {CTRY_MALTA, ETSI13_WORLD, "MT", 40, 160, 0}, + {CTRY_MARSHALL_ISLANDS, FCC3_FCCA, "MH", 40, 160, 0}, + {CTRY_MARTINIQUE, ETSI13_WORLD, "MQ", 40, 160, 0}, + {CTRY_MAURITANIA, ETSI1_WORLD, "MR", 40, 160, 0}, + {CTRY_MAURITIUS, ETSI13_WORLD, "MU", 40, 160, 0}, + {CTRY_MAYOTTE, ETSI1_WORLD, "YT", 40, 160, 0}, + {CTRY_MEXICO, FCC3_ETSIC, "MX", 40, 160, 0}, + {CTRY_MICRONESIA, FCC3_FCCA, "FM", 40, 160, 0}, + {CTRY_MOLDOVA, ETSI13_WORLD, "MD", 40, 160, 0}, + {CTRY_MONACO, ETSI13_WORLD, "MC", 40, 160, 0}, + {CTRY_MONGOLIA, FCC3_WORLD, "MN", 40, 160, 0}, + {CTRY_MONTENEGRO, ETSI13_WORLD, "ME", 40, 160, 0}, + {CTRY_MONTSERRAT, ETSI13_WORLD, "MS", 40, 160, 0}, + {CTRY_MOROCCO, ETSI3_WORLD, "MA", 40, 160, 0}, + {CTRY_MYANMAR, APL1_WORLD, "MM", 40, 160, 0}, + {CTRY_NAMIBIA, APL20_WORLD, "NA", 40, 160, 0}, + {CTRY_NEPAL, APL23_WORLD, "NP", 40, 160, 0}, + {CTRY_NETHERLANDS, ETSI13_WORLD, "NL", 40, 160, 0}, + {CTRY_NETHERLANDS_ANTILLES, ETSI13_WORLD, "AN", 40, 160, 0}, + {CTRY_NEW_CALEDONIA, ETSI13_WORLD, "NC", 40, 160, 0}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", 40, 160, 0}, + {CTRY_NIGERIA, APL6_WORLD, "NG", 40, 160, 0}, + {CTRY_NORTHERN_MARIANA_ISLANDS, FCC3_FCCA, "MP", 40, 160, 0}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI", 40, 160, 0}, + {CTRY_NIUE, ETSI13_WORLD, "NU", 40, 160, 0}, + {CTRY_NORFOLK_ISLAND, FCC6_WORLD, "NF", 40, 160, 0}, + {CTRY_NORWAY, ETSI13_WORLD, "NO", 40, 160, 0}, + {CTRY_OMAN, ETSI1_WORLD, "OM", 40, 160, 0}, + {CTRY_PAKISTAN, APL1_ETSIC, "PK", 40, 160, 0}, + {CTRY_PALAU, FCC3_FCCA, "PW", 40, 160, 0}, + {CTRY_PANAMA, FCC14_FCCB, "PA", 40, 160, 0}, + {CTRY_PAPUA_NEW_GUINEA, FCC3_WORLD, "PG", 40, 160, 0}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY", 40, 160, 0}, + {CTRY_PERU, FCC3_WORLD, "PE", 40, 160, 0}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH", 40, 160, 0}, + {CTRY_POLAND, ETSI13_WORLD, "PL", 40, 160, 0}, + {CTRY_PORTUGAL, ETSI13_WORLD, "PT", 40, 160, 0}, + {CTRY_PUERTO_RICO, FCC3_FCCA, "PR", 40, 160, 0}, + {CTRY_QATAR, ETSI14_WORLD, "QA", 40, 160, 0}, + {CTRY_REUNION, ETSI1_WORLD, "RE", 40, 160, 0}, + {CTRY_ROMANIA, ETSI13_WORLD, "RO", 40, 160, 0}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU", 40, 160, 0}, + {CTRY_RWANDA, FCC3_WORLD, "RW", 40, 160, 0}, + {CTRY_SAINT_BARTHELEMY, ETSI1_WORLD, "BL", 40, 160, 0}, + {CTRY_SAINT_HELENA_ASCENSION_AND_TRISTAN_DA_CUNHA, ETSI13_WORLD, "SH", + 40, 160, 0}, + {CTRY_SAINT_KITTS_AND_NEVIS, APL10_WORLD, "KN", 40, 160, 0}, + {CTRY_SAINT_LUCIA, APL10_WORLD, "LC", 40, 160, 0}, + {CTRY_SAINT_MARTIN, ETSI1_WORLD, "MF", 40, 160, 0}, + {CTRY_SAINT_PIERRE_AND_MIQUELON, ETSI13_WORLD, "PM", 40, 160, 0}, + {CTRY_SAINT_VINCENT_AND_THE_GRENADIENS, ETSI13_WORLD, "VC", + 40, 160, 0}, + {CTRY_SAMOA, ETSI1_WORLD, "WS", 40, 40, 0}, + {CTRY_SAN_MARINO, FCC3_FCCA, "SM", 40, 160, 0}, + {CTRY_SAO_TOME_AND_PRINCIPE, FCC3_WORLD, "ST", 40, 160, 0}, + {CTRY_SAUDI_ARABIA, ETSI15_WORLD, "SA", 40, 160, 0}, + {CTRY_SENEGAL, FCC13_WORLD, "SN", 40, 160, 0}, + {CTRY_SERBIA, ETSI13_WORLD, "RS", 40, 160, 0}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG", 40, 160, 0}, + {CTRY_SINT_MAARTEN, ETSI13_WORLD, "SX", 40, 160, 0}, + {CTRY_SLOVAKIA, ETSI13_WORLD, "SK", 40, 160, 0}, + {CTRY_SLOVENIA, ETSI13_WORLD, "SI", 40, 160, 0}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", 40, 160, 0}, + {CTRY_SPAIN, ETSI13_WORLD, "ES", 40, 160, 0}, + {CTRY_SURINAME, ETSI1_WORLD, "SR", 40, 160, 0}, + {CTRY_SRI_LANKA, FCC3_ETSIC, "LK", 40, 160, 0}, + {CTRY_SVALBARD_AND_JAN_MAYEN, FCC6_WORLD, "SJ", 40, 160, 0}, + {CTRY_SWEDEN, ETSI13_WORLD, "SE", 40, 160, 0}, + {CTRY_SWITZERLAND, ETSI13_WORLD, "CH", 40, 160, 0}, + {CTRY_TAIWAN, FCC3_FCCA, "TW", 40, 160, 0}, + {CTRY_TANZANIA, APL1_WORLD, "TZ", 40, 160, 0}, + {CTRY_THAILAND, FCC3_WORLD, "TH", 40, 160, 0}, + {CTRY_TOGO, ETSI1_WORLD, "TG", 40, 40, 0}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", 40, 160, 0}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", 40, 160, 0}, + {CTRY_TURKEY, ETSI13_WORLD, "TR", 40, 160, 0}, + {CTRY_TURKS_AND_CAICOS, FCC3_WORLD, "TC", 40, 160, 0}, + {CTRY_UGANDA, FCC3_WORLD, "UG", 40, 160, 0}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA", 40, 160, 0}, + {CTRY_UAE, FCC3_WORLD, "AE", 40, 160, 0}, + {CTRY_UNITED_KINGDOM, ETSI13_WORLD, "GB", 40, 160, 0}, + {CTRY_UNITED_STATES, FCC3_FCCA, "US", 40, 160, 0}, + {CTRY_UNITED_STATES_MINOR_OUTLYING_ISLANDS, FCC3_FCCA, "UM", 40, 160, + 0}, + {CTRY_URUGUAY, FCC2_WORLD, "UY", 40, 160, 0}, + {CTRY_UZBEKISTAN, ETSI3_WORLD, "UZ", 40, 160, 0}, + {CTRY_VANUATU, FCC3_WORLD, "VU", 40, 160, 0}, + {CTRY_VENEZUELA, FCC2_ETSIC, "VE", 40, 160, 0}, + {CTRY_VIET_NAM, FCC3_WORLD, "VN", 40, 80, 0}, + {CTRY_VIRGIN_ISLANDS, FCC3_FCCA, "VI", 40, 160, 0}, + {CTRY_VIRGIN_ISLANDS_BRITISH, ETSI13_WORLD, "VG", 40, 160, 0}, + {CTRY_WALLIS_AND_FUTUNA, ETSI1_WORLD, "WF", 40, 160, 0}, + {CTRY_YEMEN, ETSI1_WORLD, "YE", 40, 160, 0}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW", 40, 160, 0}, +}; +#endif +#endif + +enum reg_domains_2g { + FCCA, + FCCB, + WORLD, + MKKA, + MKKC, + ETSIC, + ETSID, + KRRA, + WORLD_2G_1, + WORLD_2G_2, + WORLD_2G_3, + + REG_DOMAINS_2G_MAX, +}; + +enum reg_domains_5g { + NULL1, + FCC1, + FCC2, + FCC3, + FCC4, + FCC5, + FCC6, + FCC8, + FCC10, + FCC11, + FCC13, + FCC14, +#ifdef CONFIG_BAND_6GHZ + FCC15, + FCC16, + FCC17, +#endif + ETSI1, + ETSI3, + ETSI4, + ETSI8, + ETSI9, + ETSI10, + ETSI11, + ETSI12, + ETSI13, + ETSI14, + ETSI15, + APL1, + APL2, + APL4, + APL6, + APL8, + APL9, + APL10, + APL11, + APL12, + APL13, + APL14, + APL15, + APL16, + APL17, + APL19, + APL20, + APL23, + APL24, + MKK3, + MKK4, + MKK5, + MKK9, + MKK10, + MKK11, + MKK16, + MKK17, + WORLD_5G_1, + WORLD_5G_2, + + REG_DOMAINS_5G_MAX, +}; + +const struct reg_domain_pair g_reg_dmn_pairs[] = { + {NULL1_WORLD, NULL1, WORLD}, + + {FCC1_FCCA, FCC1, FCCA}, + {FCC1_WORLD, FCC1, WORLD}, + {FCC2_FCCA, FCC2, FCCA}, + {FCC2_WORLD, FCC2, WORLD}, + {FCC2_ETSIC, FCC2, ETSIC}, + {FCC3_FCCA, FCC3, FCCA}, + {FCC3_WORLD, FCC3, WORLD}, + {FCC3_ETSIC, FCC3, ETSIC}, + {FCC4_FCCA, FCC4, FCCA}, + {FCC5_FCCA, FCC5, FCCA}, + {FCC6_WORLD, FCC6, WORLD}, + {FCC6_FCCA, FCC6, FCCA}, + {FCC8_FCCA, FCC8, FCCA}, + {FCC8_WORLD, FCC8, WORLD}, + {FCC10_FCCA, FCC10, FCCA}, + {FCC11_WORLD, FCC11, WORLD}, + {FCC13_WORLD, FCC13, WORLD}, + {FCC14_FCCB, FCC14, FCCB}, +#ifdef CONFIG_BAND_6GHZ + {FCC15_FCCA, FCC15, FCCA}, + {FCC16_FCCA, FCC16, FCCA}, + {FCC17_FCCA, FCC17, FCCA}, +#endif + {ETSI1_WORLD, ETSI1, WORLD}, + {ETSI3_WORLD, ETSI3, WORLD}, + {ETSI4_WORLD, ETSI4, WORLD}, + {ETSI8_WORLD, ETSI8, WORLD}, + {ETSI9_WORLD, ETSI9, WORLD}, + {ETSI10_WORLD, ETSI10, WORLD}, + {ETSI10_FCCA, ETSI10, FCCA}, + {ETSI11_WORLD, ETSI11, WORLD}, + {ETSI12_WORLD, ETSI12, WORLD}, + {ETSI13_WORLD, ETSI13, WORLD}, + {ETSI14_WORLD, ETSI14, WORLD}, + {ETSI15_WORLD, ETSI15, WORLD}, + + {APL1_WORLD, APL1, WORLD}, + {APL1_ETSIC, APL1, ETSIC}, + {APL2_WORLD, APL2, WORLD}, + {APL2_ETSIC, APL2, ETSIC}, + {APL2_ETSID, APL2, ETSID}, + {APL4_WORLD, APL4, WORLD}, + {APL6_WORLD, APL6, WORLD}, + {APL8_WORLD, APL8, WORLD}, + {APL9_WORLD, APL9, WORLD}, + {APL9_MKKC, APL9, MKKC}, + {APL9_KRRA, APL9, KRRA}, + {APL10_WORLD, APL10, WORLD}, + {APL11_FCCA, APL11, FCCA}, + {APL12_WORLD, APL12, WORLD}, + {APL13_WORLD, APL13, WORLD}, + {APL14_WORLD, APL14, WORLD}, + {APL15_WORLD, APL15, WORLD}, + {APL16_WORLD, APL16, WORLD}, + {APL16_ETSIC, APL16, ETSIC}, + {APL17_ETSIC, APL17, ETSIC}, + {APL17_ETSID, APL17, ETSID}, + {APL19_ETSIC, APL19, ETSIC}, + {APL20_WORLD, APL20, WORLD}, + {APL23_WORLD, APL23, WORLD}, + {APL24_ETSIC, APL24, ETSIC}, + + {MKK3_MKKC, MKK3, MKKC}, + {MKK5_MKKA, MKK5, MKKA}, + {MKK5_MKKC, MKK5, MKKC}, + {MKK11_MKKC, MKK11, MKKC}, + {MKK16_MKKC, MKK16, MKKC}, + {MKK17_MKKC, MKK17, MKKC}, + + {WORLD_60, WORLD_5G_2, WORLD_2G_3}, + {WORLD_61, WORLD_5G_2, WORLD_2G_3}, + {WORLD_62, WORLD_5G_2, WORLD_2G_3}, + {WORLD_63, WORLD_5G_1, WORLD_2G_2}, + {WORLD_65, WORLD_5G_1, WORLD_2G_2}, + {WORLD_64, WORLD_5G_1, WORLD_2G_1}, + {WORLD_66, WORLD_5G_2, WORLD_2G_1}, + {WORLD_69, WORLD_5G_2, WORLD_2G_1}, + {WORLD_67, WORLD_5G_2, WORLD_2G_2}, + {WORLD_68, WORLD_5G_2, WORLD_2G_2}, + {WORLD_6A, WORLD_5G_2, WORLD_2G_2}, + {WORLD_6C, WORLD_5G_2, WORLD_2G_2}, +}; + +enum reg_rules_2g { + + CHAN_1_11_1, + CHAN_1_11_2, + CHAN_1_11_3, + CHAN_1_13_1, + CHAN_1_13_2, + CHAN_1_13_3, + CHAN_1_13_4, + CHAN_1_13_5, + CHAN_12_12_1, + CHAN_12_13_1, + CHAN_14_1, + CHAN_14_2, +}; + +const struct regulatory_rule reg_rules_2g[] = { + + [CHAN_1_11_1] = {2402, 2472, 40, 30, 0}, + [CHAN_1_11_2] = {2402, 2472, 40, 20, 0}, + [CHAN_1_11_3] = {2402, 2472, 40, 36, 0}, + [CHAN_1_13_1] = {2402, 2482, 40, 20, 0}, + [CHAN_1_13_2] = {2402, 2482, 40, 30, 0}, + [CHAN_1_13_3] = {2402, 2482, 40, 36, 0}, + [CHAN_1_13_4] = {2402, 2482, 40, 23, 0}, + [CHAN_1_13_5] = {2402, 2482, 20, 20, 0}, + [CHAN_12_12_1] = {2457, 2477, 20, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_12_13_1] = {2457, 2482, 20, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_14_1] = {2474, 2494, 20, 23, REGULATORY_CHAN_NO_OFDM}, + [CHAN_14_2] = {2474, 2494, 20, 20, + REGULATORY_CHAN_NO_OFDM | REGULATORY_CHAN_NO_IR}, +}; + + +const struct regdomain regdomains_2g[] = { + + [FCCA] = {CTL_FCC, DFS_UNINIT_REGION, 0, 6, 1, {CHAN_1_11_1} }, + [FCCB] = {CTL_FCC, DFS_UNINIT_REGION, 0, 6, 1, {CHAN_1_11_3} }, + [WORLD] = {CTL_ETSI, DFS_UNINIT_REGION, 0, 0, 1, {CHAN_1_13_1} }, + [MKKA] = {CTL_MKK, DFS_UNINIT_REGION, 0, 0, 2, {CHAN_1_13_4, + CHAN_14_1} }, + [MKKC] = {CTL_MKK, DFS_UNINIT_REGION, 0, 0, 1, {CHAN_1_13_4} }, + [ETSIC] = {CTL_ETSI, DFS_UNINIT_REGION, 0, 0, 1, {CHAN_1_13_2} }, + [ETSID] = {CTL_ETSI, DFS_UNINIT_REGION, 0, 0, 1, {CHAN_1_13_5} }, + [KRRA] = {CTL_KOR, DFS_UNINIT_REGION, 0, 0, 1, {CHAN_1_13_4} }, + [WORLD_2G_1] = {CTL_NONE, DFS_UNINIT_REGION, 0, 0, 1, {CHAN_1_11_2} }, + [WORLD_2G_2] = {CTL_NONE, DFS_UNINIT_REGION, 0, 0, 2, + {CHAN_1_11_2, CHAN_12_13_1} }, + [WORLD_2G_3] = {CTL_NONE, DFS_UNINIT_REGION, 0, 0, 2, + {CHAN_1_11_2, CHAN_12_12_1} }, +}; + + +enum reg_rules_5g { + + CHAN_4910_4990_1, + CHAN_4940_4990_1, + CHAN_5030_5090_1, + CHAN_5170_5250_1, + CHAN_5170_5250_2, + CHAN_5170_5250_3, + CHAN_5170_5250_4, + CHAN_5170_5250_5, + CHAN_5170_5250_6, + CHAN_5170_5250_7, + CHAN_5170_5250_8, + CHAN_5170_5250_9, + CHAN_5170_5250_10, + CHAN_5170_5330_1, + CHAN_5170_5330_2, + CHAN_5250_5330_1, + CHAN_5250_5330_2, + CHAN_5250_5330_3, + CHAN_5250_5330_4, + CHAN_5250_5330_5, + CHAN_5250_5330_6, + CHAN_5250_5330_7, + CHAN_5250_5330_8, + CHAN_5250_5330_9, + CHAN_5250_5330_10, + CHAN_5250_5330_11, + CHAN_5250_5330_12, + CHAN_5250_5330_13, + CHAN_5250_5330_14, + CHAN_5250_5330_15, + CHAN_5250_5330_16, + CHAN_5490_5730_1, + CHAN_5490_5730_2, + CHAN_5490_5730_3, + CHAN_5490_5730_4, + CHAN_5490_5730_5, + CHAN_5490_5730_6, + CHAN_5490_5730_7, + CHAN_5490_5710_1, + CHAN_5490_5710_2, + CHAN_5490_5710_3, + CHAN_5490_5710_4, + CHAN_5490_5710_5, + CHAN_5490_5710_6, + CHAN_5490_5710_7, + CHAN_5490_5710_8, + CHAN_5490_5590_1, + CHAN_5490_5590_2, + CHAN_5490_5590_3, + CHAN_5490_5570_1, + CHAN_5490_5650_2, + CHAN_5490_5670_1, + CHAN_5490_5670_2, + CHAN_5490_5630_1, + CHAN_5650_5730_1, + CHAN_5650_5730_2, + CHAN_5650_5730_3, + CHAN_5735_5835_1, + CHAN_5735_5835_2, + CHAN_5735_5835_3, + CHAN_5735_5835_4, + CHAN_5735_5835_5, + CHAN_5735_5835_6, + CHAN_5735_5835_7, + CHAN_5735_5835_8, + CHAN_5735_5835_9, + CHAN_5735_5875_1, + CHAN_5735_5875_2, + CHAN_5735_5875_3, + CHAN_5735_5875_4, + CHAN_5735_5875_5, + CHAN_5735_5815_1, + CHAN_5735_5815_2, + CHAN_5735_5815_3, + CHAN_5735_5815_4, + CHAN_5735_5775_1, + CHAN_5835_5855_1, + CHAN_5855_5875_1, + CHAN_5850_5925_1, + CHAN_5850_5925_2, +#ifdef CONFIG_BAND_6GHZ + CHAN_5935_6435_1, + CHAN_5935_6435_2, + CHAN_6435_6535_1, + CHAN_6435_6535_2, + CHAN_6535_6875_1, + CHAN_6535_6875_2, + CHAN_6875_7115_1, + CHAN_6875_7115_2, +#endif +}; + +const struct regulatory_rule reg_rules_5g[] = { + + [CHAN_4910_4990_1] = {4910, 4990, 20, 20, 0}, + [CHAN_4940_4990_1] = {4940, 4990, 20, 33, 0}, + [CHAN_5030_5090_1] = {5030, 5090, 20, 20, 0}, + [CHAN_5170_5250_1] = {5170, 5250, 80, 17, 0}, + [CHAN_5170_5250_2] = {5170, 5250, 80, 23, 0}, + [CHAN_5170_5250_3] = {5170, 5250, 80, 20, 0}, + [CHAN_5170_5250_4] = {5170, 5250, 80, 30, 0}, + [CHAN_5170_5250_5] = {5170, 5250, 80, 24, 0}, + [CHAN_5170_5250_6] = {5170, 5250, 80, 18, 0}, + [CHAN_5170_5250_7] = {5170, 5250, 80, 20, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5170_5250_8] = {5170, 5250, 80, 23, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5170_5250_9] = {5170, 5250, 40, 30, 0}, + [CHAN_5170_5250_10] = {5170, 5250, 20, 20, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5170_5330_1] = {5170, 5330, 160, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_5170_5330_2] = {5170, 5330, 160, 24, 0}, + [CHAN_5250_5330_1] = {5250, 5330, 80, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_2] = {5250, 5330, 80, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_3] = {5250, 5330, 80, 18, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_4] = {5250, 5330, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_5] = {5250, 5330, 80, 23, 0}, + [CHAN_5250_5330_6] = {5250, 5330, 80, 30, 0}, + [CHAN_5250_5330_7] = {5250, 5330, 80, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_8] = {5250, 5330, 80, 36, 0}, + [CHAN_5250_5330_9] = {5250, 5330, 80, 20, 0}, + [CHAN_5250_5330_10] = {5250, 5330, 80, 24, 0}, + [CHAN_5250_5330_11] = {5250, 5330, 80, 20, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5250_5330_12] = {5250, 5330, 80, 23, REGULATORY_CHAN_RADAR | + REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5250_5330_13] = {5250, 5330, 40, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_14] = {5250, 5330, 80, 20, REGULATORY_CHAN_RADAR | + REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5250_5330_15] = {5250, 5330, 20, 20, REGULATORY_CHAN_RADAR | + REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5250_5330_16] = {5250, 5330, 80, 23, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5490_5730_1] = {5490, 5730, 160, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5730_2] = {5490, 5730, 160, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_5490_5730_3] = {5490, 5730, 160, 30, 0}, + [CHAN_5490_5730_4] = {5490, 5730, 160, 24, 0}, + [CHAN_5490_5730_5] = {5490, 5730, 160, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5730_6] = {5490, 5730, 160, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5730_7] = {5490, 5730, 160, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_1] = {5490, 5710, 160, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_2] = {5490, 5710, 160, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_3] = {5490, 5710, 160, 27, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_4] = {5490, 5710, 40, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_5] = {5490, 5710, 160, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_6] = {5490, 5710, 160, 26, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_7] = {5490, 5710, 160, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_8] = {5490, 5710, 20, 27, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5590_1] = {5490, 5590, 80, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5590_2] = {5490, 5590, 80, 30, 0}, + [CHAN_5490_5590_3] = {5490, 5590, 80, 36, 0}, + [CHAN_5490_5570_1] = {5490, 5570, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5650_2] = {5490, 5650, 160, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5670_1] = {5490, 5670, 160, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5670_2] = {5490, 5670, 160, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5630_1] = {5490, 5630, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5650_5730_1] = {5650, 5730, 80, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5650_5730_2] = {5650, 5730, 80, 30, 0}, + [CHAN_5650_5730_3] = {5650, 5730, 80, 36, 0}, + [CHAN_5735_5835_1] = {5735, 5835, 80, 23, 0}, + [CHAN_5735_5835_2] = {5735, 5835, 80, 30, 0}, + [CHAN_5735_5835_3] = {5735, 5835, 80, 20, 0}, + [CHAN_5735_5835_4] = {5735, 5835, 80, 33, 0}, + [CHAN_5735_5835_5] = {5735, 5835, 80, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_5735_5835_6] = {5735, 5835, 80, 24, 0}, + [CHAN_5735_5835_7] = {5735, 5835, 80, 36, 0}, + [CHAN_5735_5835_8] = {5735, 5835, 80, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5835_9] = {5735, 5835, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5875_1] = {5735, 5875, 20, 27, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5875_2] = {5735, 5875, 20, 30, 0}, + [CHAN_5735_5875_3] = {5735, 5875, 80, 30, 0}, + [CHAN_5735_5875_4] = {5735, 5875, 80, 14, 0}, + [CHAN_5735_5875_5] = {5735, 5875, 80, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5815_1] = {5735, 5815, 80, 30, 0}, + [CHAN_5735_5815_2] = {5735, 5815, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5815_3] = {5735, 5815, 80, 23, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5735_5815_4] = {5735, 5815, 20, 20, 0}, + [CHAN_5735_5775_1] = {5735, 5775, 40, 23, 0}, + [CHAN_5835_5855_1] = {5835, 5855, 20, 30, 0}, + [CHAN_5855_5875_1] = {5855, 5875, 20, 30, 0}, + [CHAN_5850_5925_1] = {5850, 5925, 20, 24, 0}, + [CHAN_5850_5925_2] = {5850, 5925, 20, 30, 0}, +#ifdef CONFIG_BAND_6GHZ + [CHAN_5935_6435_1] = {5935, 6435, 160, 18, REGULATORY_CHAN_AFC}, + [CHAN_5935_6435_2] = {5935, 6435, 160, 30, REGULATORY_CHAN_AFC}, + [CHAN_6435_6535_1] = {6435, 6535, 100, 18, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_6435_6535_2] = {6435, 6535, 100, 24, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_6535_6875_1] = {6535, 6875, 160, 18, REGULATORY_CHAN_AFC}, + [CHAN_6535_6875_2] = {6535, 6875, 160, 30, REGULATORY_CHAN_AFC}, + [CHAN_6875_7115_1] = {6875, 7115, 160, 18, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_6875_7115_2] = {6875, 7115, 160, 24, REGULATORY_CHAN_INDOOR_ONLY}, +#endif +}; + + +const struct regdomain regdomains_5g[] = { + + [FCC1] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 3, {CHAN_5170_5250_1, + CHAN_5250_5330_1, + CHAN_5735_5835_2} }, + + [FCC2] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5735_5835_2} }, + + [FCC3] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 4, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2} }, + + [FCC4] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 1, {CHAN_4940_4990_1} }, + + [FCC5] = {CTL_FCC, DFS_UNINIT_REGION, 2, 6, 2, {CHAN_5170_5250_4, + CHAN_5735_5835_2} }, + + [FCC6] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 5, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5590_1, + CHAN_5650_5730_1, + CHAN_5735_5835_2} }, + + [FCC8] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 4, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2} }, + + [FCC10] = {CTL_FCC, DFS_FCC_REGION, 2, 0, 5, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2, + CHAN_5850_5925_1} }, + + [FCC11] = {CTL_FCC, DFS_FCC_REGION, 2, 6, 4, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5650_2, + CHAN_5735_5835_6} }, + + [FCC13] = {CTL_FCC, DFS_UNINIT_REGION, 2, 0, 4, {CHAN_5170_5330_2, + CHAN_5250_5330_10, + CHAN_5490_5730_4, + CHAN_5735_5835_2} }, + + [FCC14] = {CTL_FCC, DFS_UNINIT_REGION, 2, 0, 4, {CHAN_5170_5250_4, + CHAN_5250_5330_10, + CHAN_5490_5730_4, + CHAN_5735_5835_2} }, + +#ifdef CONFIG_BAND_6GHZ + [FCC15] = {CTL_FCC, DFS_FCC_REGION, 2, 0, 8, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2, + CHAN_5935_6435_1, + CHAN_6435_6535_1, + CHAN_6535_6875_1, + CHAN_6875_7115_1} }, + + [FCC16] = {CTL_FCC, DFS_FCC_REGION, 2, 0, 8, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2, + CHAN_5935_6435_2, + CHAN_6435_6535_2, + CHAN_6535_6875_2, + CHAN_6875_7115_2} }, + + [FCC17] = {CTL_FCC, DFS_FCC_REGION, 2, 0, 6, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2, + CHAN_5935_6435_2, + CHAN_6535_6875_2} }, +#endif + + [ETSI1] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 3, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5710_1} }, + + [ETSI3] = {CTL_ETSI, DFS_ETSI_REGION, 5, 0, 2, {CHAN_5170_5250_2, + CHAN_5250_5330_1} }, + + [ETSI4] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 2, {CHAN_5170_5250_6, + CHAN_5250_5330_3} }, + + [ETSI8] = {CTL_ETSI, DFS_UNINIT_REGION, 20, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_5, + CHAN_5490_5730_3, + CHAN_5735_5835_2} }, + + [ETSI9] = {CTL_ETSI, DFS_ETSI_REGION, 20, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5710_5, + CHAN_5735_5835_6} }, + + [ETSI10] = {CTL_ETSI, DFS_ETSI_REGION, 10, 0, 4, {CHAN_5170_5250_7, + CHAN_5250_5330_14, + CHAN_5490_5710_3, + CHAN_5850_5925_2} }, + + [ETSI11] = {CTL_ETSI, DFS_ETSI_REGION, 10, 0, 4, {CHAN_5170_5250_10, + CHAN_5250_5330_15, + CHAN_5490_5710_8, + CHAN_5735_5875_1} }, + + [ETSI12] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 4, {CHAN_5170_5250_7, + CHAN_5250_5330_14, + CHAN_5490_5730_6, + CHAN_5735_5835_8} }, + + [ETSI13] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 4, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5730_5, + CHAN_5735_5875_4} }, + + [ETSI14] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5730_7, + CHAN_5735_5875_5} }, + + [ETSI15] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5730_5, + CHAN_5735_5815_2} }, + + [APL1] = {CTL_ETSI, DFS_UNINIT_REGION, 2, 0, 1, {CHAN_5735_5835_2} }, + + [APL2] = {CTL_ETSI, DFS_UNINIT_REGION, 2, 0, 1, {CHAN_5735_5815_4} }, + + [APL4] = {CTL_ETSI, DFS_UNINIT_REGION, 2, 0, 2, {CHAN_5170_5250_2, + CHAN_5735_5835_1} }, + + [APL6] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 3, {CHAN_5170_5250_3, + CHAN_5250_5330_2, + CHAN_5735_5835_3} }, + + [APL8] = {CTL_FCC, DFS_ETSI_REGION, 2, 0, 2, {CHAN_5250_5330_4, + CHAN_5735_5835_2} }, + + [APL9] = {CTL_MKK, DFS_KR_REGION, 2, 6, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5730_6, + CHAN_5735_5835_1} }, + + [APL10] = {CTL_ETSI, DFS_FCC_REGION, 2, 6, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_4, + CHAN_5490_5710_1, + CHAN_5735_5815_1} }, + + [APL11] = { CTL_ETSI, DFS_FCC_REGION, 2, 0, 4, {CHAN_5170_5250_9, + CHAN_5250_5330_13, + CHAN_5490_5710_4, + CHAN_5735_5875_2} }, + + [APL12] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5490_5570_1, + CHAN_5735_5775_1} }, + + [APL13] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5670_2} }, + + [APL14] = {CTL_MKK, DFS_CN_REGION, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5735_5835_4} }, + + [APL15] = {CTL_FCC, DFS_UNINIT_REGION, 2, 0, 3, {CHAN_5170_5250_8, + CHAN_5250_5330_16, + CHAN_5735_5835_4} }, + + [APL16] = {CTL_FCC, DFS_UNINIT_REGION, 2, 0, 5, {CHAN_5170_5250_1, + CHAN_5250_5330_6, + CHAN_5490_5590_2, + CHAN_5650_5730_2, + CHAN_5735_5835_2} }, + + [APL17] = {CTL_FCC, DFS_UNINIT_REGION, 2, 0, 5, {CHAN_5170_5250_2, + CHAN_5250_5330_8, + CHAN_5490_5590_3, + CHAN_5650_5730_3, + CHAN_5735_5835_7} }, + + [APL19] = {CTL_FCC, DFS_FCC_REGION, 2, 0, 4, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5875_3} }, + + [APL20] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 4, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5730_5, + CHAN_5735_5835_4} }, + + [APL23] = {CTL_ETSI, DFS_UNINIT_REGION, 2, 0, 3, {CHAN_5170_5250_7, + CHAN_5250_5330_11, + CHAN_5735_5835_3} }, + + [APL24] = {CTL_ETSI, DFS_ETSI_REGION, 2, 0, 3, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5735_5815_3} }, + + [MKK3] = {CTL_MKK, DFS_UNINIT_REGION, 2, 0, 1, {CHAN_5170_5250_3} }, + + [MKK5] = {CTL_MKK, DFS_MKK_REGION, 2, 0, 3, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5710_7} }, + + [MKK11] = {CTL_MKK, DFS_MKK_REGION, 2, 0, 5, {CHAN_4910_4990_1, + CHAN_5170_5250_2, + CHAN_5030_5090_1, + CHAN_5250_5330_1, + CHAN_5490_5710_7} }, + + [MKK16] = {CTL_MKK, DFS_MKK_REGION, 2, 0, 1, {CHAN_5490_5710_6} }, + + [MKK17] = {CTL_MKK, DFS_MKKN_REGION, 2, 0, 3, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5730_6} }, + + [WORLD_5G_1] = {CTL_NONE, DFS_UNINIT_REGION, 2, 0, 2, + {CHAN_5170_5330_1, + CHAN_5735_5835_5} }, + + [WORLD_5G_2] = {CTL_NONE, DFS_UNINIT_REGION, 2, 0, 3, + {CHAN_5170_5330_1, + CHAN_5490_5730_2, + CHAN_5735_5835_5} }, +}; + +#ifdef CONFIG_REG_CLIENT +const uint32_t reg_2g_sub_dmn_code[REG_DOMAINS_2G_MAX] = { + [FCCA] = 0x0A10, + [FCCB] = 0x0B90, + [WORLD] = 0x0199, + [MKKA] = 0x0A40, + [MKKC] = 0x0A50, + [ETSIC] = 0x0C30, + [ETSID] = 0x0F30, + [KRRA] = 0x0A60, +}; + +const uint32_t reg_5g_sub_dmn_code[REG_DOMAINS_5G_MAX] = { + [NULL1] = 0x0198, + [FCC1] = 0x0110, + [FCC2] = 0x0120, + [FCC3] = 0x0160, + [FCC4] = 0x0165, + [FCC5] = 0x0510, + [FCC6] = 0x0610, + [FCC8] = 0x0810, + [FCC10] = 0x0B10, + [FCC11] = 0x0B20, + [FCC13] = 0x0B60, + [FCC14] = 0x0B70, + [ETSI1] = 0x0130, + [ETSI3] = 0x0330, + [ETSI4] = 0x0430, + [ETSI8] = 0x0830, + [ETSI9] = 0x0930, + [ETSI10] = 0x0D30, + [ETSI11] = 0x0E30, + [ETSI12] = 0x0E38, + [ETSI13] = 0x0E39, + [ETSI14] = 0x0E40, + [ETSI15] = 0x0E41, + [APL1] = 0x0150, + [APL2] = 0x0250, + [APL4] = 0x0450, + [APL6] = 0x0650, + [APL8] = 0x0850, + [APL9] = 0x0950, + [APL10] = 0x1050, + [APL11] = 0x1150, + [APL12] = 0x1160, + [APL13] = 0x1170, + [APL14] = 0x1180, + [APL15] = 0x1190, + [APL16] = 0x1200, + [APL17] = 0x1210, + [APL23] = 0x1280, + [APL20] = 0x1250, + [APL23] = 0x1280, + [MKK3] = 0x0340, + [MKK5] = 0x0540, + [MKK11] = 0x1140, + [MKK16] = 0x1640, + [MKK17] = 0x1650, +}; +#endif + +QDF_STATUS reg_get_num_countries(int *num_countries) +{ + *num_countries = QDF_ARRAY_SIZE(g_all_countries); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_num_reg_dmn_pairs(int *num_reg_dmn) +{ + *num_reg_dmn = QDF_ARRAY_SIZE(g_reg_dmn_pairs); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_default_country(uint16_t *default_country) +{ + *default_country = CTRY_UNITED_STATES; + + return QDF_STATUS_SUCCESS; +} + +bool reg_etsi13_regdmn(uint8_t reg_dmn) +{ + return reg_dmn == ETSI13; +} + +bool reg_en302_502_regdmn(uint16_t regdmn) +{ + return ((regdmn == ETSI11_WORLD) || + (regdmn == ETSI12_WORLD) || + (regdmn == ETSI14_WORLD) || + (regdmn == ETSI15_WORLD)); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.h new file mode 100644 index 0000000000000000000000000000000000000000..b07c14339a647fc1a577e1e3162ad752c3d992a4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db.h + * This file contains regulatory component data structures + */ + +#ifndef __REG_DB_H +#define __REG_DB_H + +/* Alpha2 code for world reg domain */ +#define REG_WORLD_ALPHA2 "00" + +/** + * struct regulatory_rule + * @start_freq: start frequency + * @end_freq: end frequency + * @max_bw: maximum bandwidth + * @reg_power: regulatory power + * @flags: regulatory flags + */ +struct regulatory_rule { + uint16_t start_freq; + uint16_t end_freq; + uint16_t max_bw; + uint8_t reg_power; + uint16_t flags; +}; + +/** + * struct regdomain + * @ctl_val: CTL value + * @dfs_region: dfs region + * @min_bw: minimum bandwidth + * @num_reg_rules: number of regulatory rules + * @reg_rules_id: regulatory rule index + */ +struct regdomain { + uint8_t ctl_val; + enum dfs_reg dfs_region; + uint16_t min_bw; + uint8_t ant_gain; + uint8_t num_reg_rules; + uint8_t reg_rule_id[MAX_REG_RULES]; +}; + +/** + * struct country_code_to_reg_domain + * @country_code: country code + * @reg_dmn_pair_id: reg domainpair id + * @alpha2: iso-3166 alpha2 + * @max_bw_2g: maximum 2g bandwidth + * @max_bw_5g: maximum 5g bandwidth + * @phymode_bitmap: phymodes not supported + */ +struct country_code_to_reg_domain { + uint16_t country_code; + uint16_t reg_dmn_pair_id; + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + uint16_t max_bw_2g; + uint16_t max_bw_5g; + uint16_t phymode_bitmap; +}; + +/** + * struct reg_domain_pair + * @reg_dmn_pair_id: reg domainpiar value + * @dmn_id_5g: 5g reg domain value + * @dmn_id_2g: 2g regdomain value + */ +struct reg_domain_pair { + uint16_t reg_dmn_pair_id; + uint8_t dmn_id_5g; + uint8_t dmn_id_2g; +}; + +QDF_STATUS reg_get_num_countries(int *num_countries); + +QDF_STATUS reg_get_num_reg_dmn_pairs(int *num_reg_dmn); + +QDF_STATUS reg_get_default_country(uint16_t *default_country); + +/** + * reg_etsi13_regdmn () - Checks if the reg domain is ETSI13 or not + * @reg_dmn: reg domain + * + * Return: true or false + */ +bool reg_etsi13_regdmn(uint8_t reg_dmn); + +/** + * reg_en302_502_regdmn() - Check if the reg domain is en302_502 applicable. + * @reg_dmn: Regulatory domain pair ID. + * + * Return: True if EN302_502 applicable, else false. + */ +bool reg_en302_502_regdmn(uint16_t reg_dmn); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.c new file mode 100644 index 0000000000000000000000000000000000000000..3d993a1357edd6168ab0db64aa059bbf71154fc7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.c @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db_parser.c + * This file provides regulatory data base parser functions. + */ + +#include +#include +#include +#include "reg_db.h" +#include "reg_db_parser.h" +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" + +QDF_STATUS reg_is_country_code_valid(uint8_t *alpha2) +{ + uint16_t i; + int num_countries; + + reg_get_num_countries(&num_countries); + + for (i = 0; i < num_countries; i++) { + if ((g_all_countries[i].alpha2[0] == alpha2[0]) && + (g_all_countries[i].alpha2[1] == alpha2[1])) + return QDF_STATUS_SUCCESS; + else + continue; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS reg_regrules_assign(uint8_t dmn_id_2g, uint8_t dmn_id_5g, + uint8_t ant_gain_2g, uint8_t ant_gain_5g, + struct cur_regulatory_info *reg_info) + +{ + uint8_t k; + uint8_t rule_index; + struct cur_reg_rule *r_r_2g = reg_info->reg_rules_2g_ptr; + struct cur_reg_rule *r_r_5g = reg_info->reg_rules_5g_ptr; + + for (k = 0; k < reg_info->num_2g_reg_rules; k++) { + rule_index = regdomains_2g[dmn_id_2g].reg_rule_id[k]; + r_r_2g->start_freq = reg_rules_2g[rule_index].start_freq; + r_r_2g->end_freq = reg_rules_2g[rule_index].end_freq; + r_r_2g->max_bw = reg_rules_2g[rule_index].max_bw; + r_r_2g->reg_power = reg_rules_2g[rule_index].reg_power; + r_r_2g->flags = reg_rules_2g[rule_index].flags; + r_r_2g->ant_gain = ant_gain_2g; + r_r_2g++; + } + + for (k = 0; k < reg_info->num_5g_reg_rules; k++) { + rule_index = regdomains_5g[dmn_id_5g].reg_rule_id[k]; + r_r_5g->start_freq = reg_rules_5g[rule_index].start_freq; + r_r_5g->end_freq = reg_rules_5g[rule_index].end_freq; + r_r_5g->max_bw = reg_rules_5g[rule_index].max_bw; + r_r_5g->reg_power = reg_rules_5g[rule_index].reg_power; + r_r_5g->flags = reg_rules_5g[rule_index].flags; + r_r_5g->ant_gain = ant_gain_5g; + r_r_5g++; + } + + if ((r_r_2g == reg_info->reg_rules_2g_ptr) && + (r_r_5g == reg_info->reg_rules_5g_ptr)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_rdpair_from_country_iso(uint8_t *alpha2, + uint16_t *country_index, + uint16_t *regdmn_pair) +{ + uint16_t i, j; + int num_countries; + int num_reg_dmn; + + reg_get_num_countries(&num_countries); + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + + for (i = 0; i < num_countries; i++) { + if ((g_all_countries[i].alpha2[0] == alpha2[0]) && + (g_all_countries[i].alpha2[1] == alpha2[1])) + break; + } + + if (i == num_countries) { + *country_index = -1; + return QDF_STATUS_E_FAILURE; + } + + for (j = 0; j < num_reg_dmn; j++) { + if (g_reg_dmn_pairs[j].reg_dmn_pair_id == + g_all_countries[i].reg_dmn_pair_id) + break; + } + + if (j == num_reg_dmn) { + *regdmn_pair = -1; + return QDF_STATUS_E_FAILURE; + } + + *country_index = i; + *regdmn_pair = j; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_rdpair_from_regdmn_id(uint16_t regdmn_id, + uint16_t *regdmn_pair) +{ + uint16_t j; + int num_reg_dmn; + + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + + for (j = 0; j < num_reg_dmn; j++) { + if (g_reg_dmn_pairs[j].reg_dmn_pair_id == regdmn_id) + break; + } + + if (j == num_reg_dmn) { + *regdmn_pair = -1; + return QDF_STATUS_E_FAILURE; + } + + *regdmn_pair = j; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_rdpair_from_country_code(uint16_t cc, + uint16_t *country_index, + uint16_t *regdmn_pair) +{ + uint16_t i, j; + int num_countries; + int num_reg_dmn; + + reg_get_num_countries(&num_countries); + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + + for (i = 0; i < num_countries; i++) { + if (g_all_countries[i].country_code == cc) + break; + } + + if (i == num_countries) { + *country_index = -1; + return QDF_STATUS_E_FAILURE; + } + + for (j = 0; j < num_reg_dmn; j++) { + if (g_reg_dmn_pairs[j].reg_dmn_pair_id == + g_all_countries[i].reg_dmn_pair_id) + break; + } + + if (j == num_reg_dmn) { + *regdmn_pair = -1; + return QDF_STATUS_E_FAILURE; + } + + *country_index = i; + *regdmn_pair = j; + + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_get_reginfo_form_country_code_and_regdmn_pair( + struct cur_regulatory_info *reg_info, + uint16_t country_index, + uint16_t regdmn_pair) +{ + uint8_t rule_size_2g, rule_size_5g; + uint8_t dmn_id_5g, dmn_id_2g; + uint8_t ant_gain_2g, ant_gain_5g; + QDF_STATUS err; + + dmn_id_5g = g_reg_dmn_pairs[regdmn_pair].dmn_id_5g; + dmn_id_2g = g_reg_dmn_pairs[regdmn_pair].dmn_id_2g; + + rule_size_2g = QDF_ARRAY_SIZE(regdomains_2g[dmn_id_2g].reg_rule_id); + rule_size_5g = QDF_ARRAY_SIZE(regdomains_5g[dmn_id_5g].reg_rule_id); + + if (((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules)) { + + qdf_mem_copy(reg_info->alpha2, + g_all_countries[country_index].alpha2, + sizeof(g_all_countries[country_index].alpha2)); + + reg_info->ctry_code = + g_all_countries[country_index].country_code; + reg_info->reg_dmn_pair = + g_reg_dmn_pairs[regdmn_pair].reg_dmn_pair_id; + reg_info->dfs_region = regdomains_5g[dmn_id_5g].dfs_region; + reg_info->phybitmap = + g_all_countries[country_index].phymode_bitmap; + + reg_info->max_bw_2g = g_all_countries[country_index].max_bw_2g; + reg_info->max_bw_5g = g_all_countries[country_index].max_bw_5g; + + reg_info->min_bw_2g = regdomains_2g[dmn_id_2g].min_bw; + reg_info->min_bw_5g = regdomains_5g[dmn_id_5g].min_bw; + + ant_gain_2g = regdomains_2g[dmn_id_2g].ant_gain; + ant_gain_5g = regdomains_5g[dmn_id_5g].ant_gain; + + reg_info->num_2g_reg_rules = + regdomains_2g[dmn_id_2g].num_reg_rules; + reg_info->num_5g_reg_rules = + regdomains_5g[dmn_id_5g].num_reg_rules; + + reg_info->reg_rules_2g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_2g_reg_rules) * + sizeof(struct cur_reg_rule)); + reg_info->reg_rules_5g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_5g_reg_rules) * + sizeof(struct cur_reg_rule)); + + err = reg_regrules_assign(dmn_id_2g, dmn_id_5g, + ant_gain_2g, ant_gain_5g, reg_info); + + if (err == QDF_STATUS_E_FAILURE) { + reg_err("No rule for country index = %d regdmn_pair = %d", + country_index, regdmn_pair); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + } else if (!(((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules))) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_REG_CLIENT +/** + * reg_update_alpha2_from_domain() - Get country alpha2 code from reg domain + * @reg_info: pointer to hold alpha2 code + * + * This function is used to populate alpha2 of @reg_info with: + * (a) "00" (REG_WORLD_ALPHA2) for WORLD domain and + * (b) alpha2 of first country matching with non WORLD domain. + * + * Return: None + */ +static void +reg_update_alpha2_from_domain(struct cur_regulatory_info *reg_info) +{ + uint16_t i; + int num_countries; + + if (reg_is_world_ctry_code(reg_info->reg_dmn_pair)) { + qdf_mem_copy(reg_info->alpha2, REG_WORLD_ALPHA2, + sizeof(reg_info->alpha2)); + return; + } + + reg_get_num_countries(&num_countries); + + for (i = 0; i < (uint16_t)num_countries; i++) + if (g_all_countries[i].reg_dmn_pair_id == + reg_info->reg_dmn_pair) + break; + + if (i == (uint16_t)num_countries) + return; + + qdf_mem_copy(reg_info->alpha2, g_all_countries[i].alpha2, + sizeof(g_all_countries[i].alpha2)); + reg_info->ctry_code = g_all_countries[i].country_code; +} +#else +static inline void +reg_update_alpha2_from_domain(struct cur_regulatory_info *reg_info) +{ +} +#endif + +static inline QDF_STATUS reg_get_reginfo_form_regdmn_pair( + struct cur_regulatory_info *reg_info, + uint16_t regdmn_pair) +{ + uint8_t rule_size_2g, rule_size_5g; + uint8_t dmn_id_5g, dmn_id_2g; + uint8_t ant_gain_2g, ant_gain_5g; + QDF_STATUS err; + + dmn_id_5g = g_reg_dmn_pairs[regdmn_pair].dmn_id_5g; + dmn_id_2g = g_reg_dmn_pairs[regdmn_pair].dmn_id_2g; + + rule_size_2g = QDF_ARRAY_SIZE(regdomains_2g[dmn_id_2g].reg_rule_id); + rule_size_5g = QDF_ARRAY_SIZE(regdomains_5g[dmn_id_5g].reg_rule_id); + + if (((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules)) { + + qdf_mem_zero(reg_info->alpha2, sizeof(reg_info->alpha2)); + + reg_info->reg_dmn_pair = + g_reg_dmn_pairs[regdmn_pair].reg_dmn_pair_id; + reg_info->ctry_code = 0; + + reg_update_alpha2_from_domain(reg_info); + + reg_info->dfs_region = regdomains_5g[dmn_id_5g].dfs_region; + reg_info->phybitmap = 0; + + reg_info->max_bw_2g = 40; + reg_info->max_bw_5g = 160; + + reg_info->min_bw_2g = regdomains_2g[dmn_id_2g].min_bw; + reg_info->min_bw_5g = regdomains_5g[dmn_id_5g].min_bw; + + ant_gain_2g = regdomains_2g[dmn_id_2g].ant_gain; + ant_gain_5g = regdomains_5g[dmn_id_5g].ant_gain; + + reg_info->num_2g_reg_rules = + regdomains_2g[dmn_id_2g].num_reg_rules; + reg_info->num_5g_reg_rules = + regdomains_5g[dmn_id_5g].num_reg_rules; + + reg_info->reg_rules_2g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_2g_reg_rules) * + sizeof(struct cur_reg_rule)); + reg_info->reg_rules_5g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_5g_reg_rules) * + sizeof(struct cur_reg_rule)); + + err = reg_regrules_assign(dmn_id_2g, dmn_id_5g, + ant_gain_2g, ant_gain_5g, reg_info); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("No rule for regdmn_pair = %d\n", regdmn_pair); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + } else if (!(((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules))) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_cur_reginfo(struct cur_regulatory_info *reg_info, + uint16_t country_index, + uint16_t regdmn_pair) +{ + if ((country_index != (uint16_t)(-1)) && + (regdmn_pair != (uint16_t)(-1))) + return reg_get_reginfo_form_country_code_and_regdmn_pair( + reg_info, country_index, regdmn_pair); + else if (regdmn_pair != (uint16_t)(-1)) + return reg_get_reginfo_form_regdmn_pair(reg_info, regdmn_pair); + else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..cc9cd021adccf3448c1e6c0a9318f5d40fdb3ae7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db.h + * This file contains regulatory data base parser function declarations + */ + +#ifndef __REG_DB_PARSER_H +#define __REG_DB_PARSER_H + +extern const struct country_code_to_reg_domain g_all_countries[]; +extern const struct reg_domain_pair g_reg_dmn_pairs[]; +extern const struct regulatory_rule reg_rules_2g[]; +extern const struct regdomain regdomains_2g[]; +extern const struct regulatory_rule reg_rules_5g[]; +extern const struct regdomain regdomains_5g[]; + +#ifdef CONFIG_REG_CLIENT +extern const uint32_t reg_2g_sub_dmn_code[]; +extern const uint32_t reg_5g_sub_dmn_code[]; +#endif + +/** + * reg_is_country_code_valid() - Check if the given country code is valid + * @alpha2: Country string + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_is_country_code_valid(uint8_t *alpha2); + +/** + * reg_regrules_assign() - Get 2GHz and 5GHz regulatory rules from regdomain + * structure. + * @dmn_id_2g: 2GHz regdomain ID + * @dmn_id_5g: 5GHz regdomain ID + * @ant_gain_2g: 2GHz antenna gain + * @ant_gain_5g: 5GHz antenna gain + * @reg_info: Pointer to current regulatory info structure + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_regrules_assign(uint8_t dmn_id_2g, uint8_t dmn_id_5g, + uint8_t ant_gain_2g, uint8_t ant_gain_5g, + struct cur_regulatory_info *reg_info); + +/** + * reg_get_cur_reginfo() - Get current regulatory info for a given country code + * @reg_info: Pointer to current regulatory info structure + * @country_index: Country code index in the country code table + * @regdmn_pair: Regdomain pair ID + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_cur_reginfo(struct cur_regulatory_info *reg_info, + uint16_t country_index, uint16_t regdmn_pair); + +/** + * reg_get_rdpair_from_country_iso() - Get regdomain pair ID from country string + * @alpha: Pointer to country code string + * @country_index: Pointer to save country code index + * @regdmn_pair: Pointer to save regdomain pair ID index + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_rdpair_from_country_iso(uint8_t *alpha, + uint16_t *country_index, + uint16_t *regdmn_pair); + +/** + * reg_get_rdpair_from_country_code() - Get regdomain pair ID from country code + * @cc: Country code + * @country_index: Pointer to save country code index + * @regdmn_pair: Pointer to save regdomain pair ID index + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_rdpair_from_country_code(uint16_t cc, + uint16_t *country_index, + uint16_t *regdmn_pair); + +/** + * reg_get_rdpair_from_regdmn_id() - Get regdomain pair ID from regdomain ID + * @regdmn_id: Regdomain ID + * @regdmn_pair: Pointer to save regdomain pair ID index + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_rdpair_from_regdmn_id(uint16_t regdmn_id, + uint16_t *regdmn_pair); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_host_11d.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_host_11d.c new file mode 100644 index 0000000000000000000000000000000000000000..cde9ebb794eaa4b82ba03e218caedfa3ce80eedd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_host_11d.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Add host 11d scan utility functions + */ + +#include +#include + +#include "reg_priv_objs.h" + +#include "reg_host_11d.h" +#include "reg_services_common.h" + +static QDF_STATUS reg_11d_scan_trigger_handler( + struct wlan_regulatory_psoc_priv_obj *soc_reg) +{ + struct scan_start_request *req; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) + return QDF_STATUS_E_NOMEM; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + soc_reg->psoc_ptr, + soc_reg->vdev_id_for_11d_scan, + WLAN_REGULATORY_SB_ID); + if (!vdev) { + reg_err("vdev object is NULL id %d", + soc_reg->vdev_id_for_11d_scan); + qdf_mem_free(req); + return QDF_STATUS_E_FAILURE; + } + + ucfg_scan_init_default_params(vdev, req); + + req->scan_req.scan_id = ucfg_scan_get_scan_id(soc_reg->psoc_ptr); + if (!req->scan_req.scan_id) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_REGULATORY_SB_ID); + qdf_mem_free(req); + reg_err("Invalid scan ID"); + return QDF_STATUS_E_FAILURE; + } + soc_reg->scan_id = req->scan_req.scan_id; + req->scan_req.vdev_id = soc_reg->vdev_id_for_11d_scan; + req->scan_req.scan_req_id = soc_reg->scan_req_id; + req->scan_req.scan_priority = SCAN_PRIORITY_LOW; + req->scan_req.scan_f_passive = false; + + status = ucfg_scan_start(req); + reg_nofl_debug("11d scan trigger vdev %d scan_id %d req_id %d status %d", + soc_reg->vdev_id_for_11d_scan, soc_reg->scan_id, + soc_reg->scan_req_id, status); + + if (status != QDF_STATUS_SUCCESS) + /* Don't free req here, ucfg_scan_start will do free */ + reg_err("11d scan req failed vdev %d", + soc_reg->vdev_id_for_11d_scan); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_REGULATORY_SB_ID); + + return status; +} + +static void reg_11d_scan_event_cb( + struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg) +{ +}; + +QDF_STATUS reg_11d_host_scan( + struct wlan_regulatory_psoc_priv_obj *soc_reg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + reg_debug("host 11d enabled %d, inited: %d", soc_reg->enable_11d_supp, + soc_reg->is_host_11d_inited); + if (!soc_reg->is_host_11d_inited) + return QDF_STATUS_E_FAILURE; + + if (soc_reg->enable_11d_supp) { + qdf_mc_timer_stop(&soc_reg->timer); + status = reg_11d_scan_trigger_handler(soc_reg); + if (status != QDF_STATUS_SUCCESS) + return status; + + qdf_mc_timer_start(&soc_reg->timer, soc_reg->scan_11d_interval); + } else { + qdf_mc_timer_stop(&soc_reg->timer); + } + return status; +} + +static void reg_11d_scan_timer(void *context) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg = context; + + reg_debug("11d scan timeout"); + + if (!soc_reg) + return; + + reg_11d_host_scan(soc_reg); +} + +QDF_STATUS reg_11d_host_scan_init(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = reg_get_psoc_obj(psoc); + if (!soc_reg) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + if (soc_reg->is_host_11d_inited) { + reg_debug("host 11d scan are already inited"); + return QDF_STATUS_SUCCESS; + } + soc_reg->scan_req_id = + ucfg_scan_register_requester(psoc, "11d", + reg_11d_scan_event_cb, + soc_reg); + qdf_mc_timer_init(&soc_reg->timer, QDF_TIMER_TYPE_SW, + reg_11d_scan_timer, soc_reg); + + soc_reg->is_host_11d_inited = true; + reg_debug("reg 11d scan inited"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_11d_host_scan_deinit(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = reg_get_psoc_obj(psoc); + if (!soc_reg) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + if (!soc_reg->is_host_11d_inited) { + reg_debug("host 11d scan are not inited"); + return QDF_STATUS_SUCCESS; + } + qdf_mc_timer_stop(&soc_reg->timer); + qdf_mc_timer_destroy(&soc_reg->timer); + ucfg_scan_unregister_requester(psoc, soc_reg->scan_req_id); + soc_reg->is_host_11d_inited = false; + reg_debug("reg 11d scan deinit"); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_host_11d.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_host_11d.h new file mode 100644 index 0000000000000000000000000000000000000000..6d6a3b1e88d6e9005bf876ac72a3703d76aabd66 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_host_11d.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Defines host 11d scan utility functions + */ +#ifndef _REG_HOST_11D_H_ +#define _REG_HOST_11D_H_ + +#ifdef HOST_11D_SCAN +/** + * reg_11d_host_scan() - Start/stop 11d scan + * @soc_reg: soc regulatory context + * + * This function gets called upon 11d scan enable/disable changed. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS reg_11d_host_scan(struct wlan_regulatory_psoc_priv_obj *soc_reg); + +/** + * reg_11d_host_scan_init() - Init 11d host scan resource + * @psoc: soc context + * + * This function gets called during pdev create notification callback to + * init the 11d scan related resource. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS reg_11d_host_scan_init(struct wlan_objmgr_psoc *psoc); + +/** + * reg_11d_host_scan_deinit() - Deinit 11d host scan resource + * @psoc: soc context + * + * This function gets called during pdev destroy notification callback to + * deinit the 11d scan related resource. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS reg_11d_host_scan_deinit(struct wlan_objmgr_psoc *psoc); +#else + +static inline QDF_STATUS reg_11d_host_scan( + struct wlan_regulatory_psoc_priv_obj *soc_reg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_11d_host_scan_init(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_11d_host_scan_deinit(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_lte.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_lte.c new file mode 100644 index 0000000000000000000000000000000000000000..2687c314ffe6104212c6c6fca912a46fc79220cd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_lte.c @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_lte.c + * This file contains the LTE feature APIs. + */ + +#include +#include +#include +#include "reg_services_public_struct.h" +#include +#include +#include "reg_services_common.h" +#include "reg_priv_objs.h" +#include "reg_build_chan_list.h" +#include "reg_callbacks.h" +#include "reg_lte.h" + +#ifdef LTE_COEX +/** + * reg_process_ch_avoid_freq() - Update unsafe frequencies in psoc_priv_obj + * @psoc: pointer to psoc object + * @pdev: pointer to pdev object + * + * Return: QDF_STATUS + */ +static QDF_STATUS reg_process_ch_avoid_freq(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + enum channel_enum ch_loop; + enum channel_enum start_ch_idx; + enum channel_enum end_ch_idx; + uint8_t start_channel; + uint8_t end_channel; + uint32_t i; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct ch_avoid_freq_type *range; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < psoc_priv_obj->avoid_freq_list.ch_avoid_range_cnt; + i++) { + if (psoc_priv_obj->unsafe_chan_list.chan_cnt >= NUM_CHANNELS) { + reg_warn("LTE Coex unsafe channel list full"); + break; + } + + start_ch_idx = INVALID_CHANNEL; + end_ch_idx = INVALID_CHANNEL; + range = &psoc_priv_obj->avoid_freq_list.avoid_freq_range[i]; + + start_channel = reg_freq_to_chan(pdev, range->start_freq); + end_channel = reg_freq_to_chan(pdev, range->end_freq); + reg_debug("start: freq %d, ch %d, end: freq %d, ch %d", + range->start_freq, start_channel, range->end_freq, + end_channel); + + /* do not process frequency bands that are not mapped to + * predefined channels + */ + if (start_channel == 0 || end_channel == 0) + continue; + + for (ch_loop = 0; ch_loop < NUM_CHANNELS; + ch_loop++) { + if (REG_CH_TO_FREQ(ch_loop) >= range->start_freq) { + start_ch_idx = ch_loop; + break; + } + } + for (ch_loop = 0; ch_loop < NUM_CHANNELS; + ch_loop++) { + if (REG_CH_TO_FREQ(ch_loop) >= range->end_freq) { + end_ch_idx = ch_loop; + if (REG_CH_TO_FREQ(ch_loop) > range->end_freq) + end_ch_idx--; + break; + } + } + + if (start_ch_idx == INVALID_CHANNEL || + end_ch_idx == INVALID_CHANNEL) + continue; + + for (ch_loop = start_ch_idx; ch_loop <= end_ch_idx; + ch_loop++) { + psoc_priv_obj->unsafe_chan_list.chan_freq_list[ + psoc_priv_obj->unsafe_chan_list.chan_cnt++] = + REG_CH_TO_FREQ(ch_loop); + if (psoc_priv_obj->unsafe_chan_list.chan_cnt >= + NUM_CHANNELS) { + reg_warn("LTECoex unsafe ch list full"); + break; + } + } + } + + if (!psoc_priv_obj->unsafe_chan_list.chan_cnt) + return QDF_STATUS_SUCCESS; + + for (ch_loop = 0; ch_loop < psoc_priv_obj->unsafe_chan_list.chan_cnt; + ch_loop++) { + if (ch_loop >= NUM_CHANNELS) + break; + reg_debug("Unsafe freq %d", + psoc_priv_obj->unsafe_chan_list.chan_freq_list[ch_loop]); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_update_unsafe_ch() - Updates unsafe channels in current channel list + * @psoc: Pointer to psoc structure + * @object: Pointer to pdev structure + * @arg: List of arguments + * + * Return: None + */ +static void reg_update_unsafe_ch(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + QDF_STATUS status; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return; + } + + if (psoc_priv_obj->ch_avoid_ind) { + status = reg_process_ch_avoid_freq(psoc, pdev); + if (QDF_IS_STATUS_ERROR(status)) + psoc_priv_obj->ch_avoid_ind = false; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_nb(psoc, pdev); + + if (QDF_IS_STATUS_ERROR(status)) + reg_err("channel change msg schedule failed"); +} + +QDF_STATUS reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_event) +{ + uint32_t i; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + QDF_STATUS status; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + if (CH_AVOID_RULE_DO_NOT_RESTART == + psoc_priv_obj->restart_beaconing) { + reg_debug("skipping all LTE Coex unsafe channel range"); + return QDF_STATUS_SUCCESS; + } + /* Make unsafe channel list */ + reg_debug("band count %d", ch_avoid_event->ch_avoid_range_cnt); + + /* generate vendor specific event */ + qdf_mem_zero(&psoc_priv_obj->avoid_freq_list, + sizeof(struct ch_avoid_ind_type)); + qdf_mem_zero(&psoc_priv_obj->unsafe_chan_list, + sizeof(struct unsafe_ch_list)); + + for (i = 0; i < ch_avoid_event->ch_avoid_range_cnt; i++) { + if ((CH_AVOID_RULE_RESTART_24G_ONLY == + psoc_priv_obj->restart_beaconing) && + REG_IS_5GHZ_FREQ(ch_avoid_event-> + avoid_freq_range[i].start_freq)) { + reg_debug( + "skipping 5Ghz LTE Coex unsafe channel range"); + continue; + } + psoc_priv_obj->avoid_freq_list.avoid_freq_range[i].start_freq = + ch_avoid_event->avoid_freq_range[i].start_freq; + psoc_priv_obj->avoid_freq_list.avoid_freq_range[i].end_freq = + ch_avoid_event->avoid_freq_range[i].end_freq; + } + psoc_priv_obj->avoid_freq_list.ch_avoid_range_cnt = + ch_avoid_event->ch_avoid_range_cnt; + + psoc_priv_obj->ch_avoid_ind = true; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_NB_ID); + + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + status = wlan_objmgr_iterate_obj_list( + psoc, WLAN_PDEV_OP, reg_update_unsafe_ch, NULL, 1, + WLAN_REGULATORY_NB_ID); + + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + + return status; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_lte.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_lte.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3288192ceeae6fbf14e590753fcdd2069b3a0a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_lte.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db_lte.h + * This file provides prototypes of the regulatory component + * LTE functions + */ + +#ifdef LTE_COEX +/** + * reg_process_ch_avoid_event() - Process channel avoid event + * @psoc: psoc for country information + * @ch_avoid_event: channel avoid event buffer + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_process_ch_avoid_event( + struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_event); +#else +static inline QDF_STATUS reg_process_ch_avoid_event( + struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_event) +{ + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_offload_11d_scan.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_offload_11d_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..2099d3847ad8d4a3d55749bcc2001a35ce95bb29 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_offload_11d_scan.c @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Add 11d utility functions + */ + +#include +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include "reg_services_common.h" +#include "reg_offload_11d_scan.h" +#include "reg_host_11d.h" + +#ifdef TARGET_11D_SCAN + +QDF_STATUS reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct set_country country_code; + struct wlan_objmgr_psoc *psoc; + struct cc_regdmn_s rd; + QDF_STATUS status; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + uint8_t pdev_id; + + if (!country) { + reg_err("Null country code"); + return QDF_STATUS_E_INVAL; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + psoc = wlan_pdev_get_psoc(pdev); + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("Null psoc reg component"); + return QDF_STATUS_E_INVAL; + } + + if (!qdf_mem_cmp(psoc_priv_obj->cur_country, country, REG_ALPHA2_LEN)) { + if (psoc_priv_obj->cc_src == SOURCE_11D) { + reg_debug("same country"); + return QDF_STATUS_SUCCESS; + } + } + + reg_info("set new 11d country:%c%c to fW", + country[0], country[1]); + + qdf_mem_copy(country_code.country, country, REG_ALPHA2_LEN + 1); + country_code.pdev_id = pdev_id; + + psoc_priv_obj->new_11d_ctry_pending[pdev_id] = true; + + if (psoc_priv_obj->offload_enabled) { + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_country_code) { + tx_ops->set_country_code(psoc, &country_code); + } else { + reg_err("country set fw handler not present"); + psoc_priv_obj->new_11d_ctry_pending[pdev_id] = false; + return QDF_STATUS_E_FAULT; + } + status = QDF_STATUS_SUCCESS; + } else { + qdf_mem_copy(rd.cc.alpha, country, REG_ALPHA2_LEN + 1); + rd.flags = ALPHA_IS_SET; + reg_program_chan_list(pdev, &rd); + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +/** + * reg_send_11d_flush_cbk() - release 11d psoc reference + * @msg: Pointer to scheduler message. + * + * Return: QDF_STATUS + */ +static QDF_STATUS reg_send_11d_flush_cbk(struct scheduler_msg *msg) +{ + struct reg_11d_scan_msg *scan_msg_11d = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = scan_msg_11d->psoc; + + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + qdf_mem_free(scan_msg_11d); + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_send_11d_msg_cbk() - Send start/stop 11d scan message. + * @msg: Pointer to scheduler message. + * + * Return: QDF_STATUS + */ +static QDF_STATUS reg_send_11d_msg_cbk(struct scheduler_msg *msg) +{ + struct reg_11d_scan_msg *scan_msg_11d = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = scan_msg_11d->psoc; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct reg_start_11d_scan_req start_req; + struct reg_stop_11d_scan_req stop_req; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + tx_ops = reg_get_psoc_tx_ops(psoc); + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("Null psoc priv obj"); + goto end; + } + + if (psoc_priv_obj->vdev_id_for_11d_scan == INVALID_VDEV_ID) { + psoc_priv_obj->enable_11d_supp = false; + reg_err("Invalid vdev"); + goto end; + } + + if (scan_msg_11d->enable_11d_supp) { + start_req.vdev_id = psoc_priv_obj->vdev_id_for_11d_scan; + start_req.scan_period_msec = psoc_priv_obj->scan_11d_interval; + start_req.start_interval_msec = 0; + reg_debug("sending start msg"); + tx_ops->start_11d_scan(psoc, &start_req); + } else { + stop_req.vdev_id = psoc_priv_obj->vdev_id_for_11d_scan; + reg_debug("sending stop msg"); + tx_ops->stop_11d_scan(psoc, &stop_req); + } + +end: + qdf_mem_free(scan_msg_11d); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + return QDF_STATUS_SUCCESS; +} + +/** + * reg_sched_11d_msg() - Schedules 11d scan message. + * @scan_msg_11d: 11d scan message + */ +static QDF_STATUS reg_sched_11d_msg(struct reg_11d_scan_msg *scan_msg_11d) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + status = wlan_objmgr_psoc_try_get_ref(scan_msg_11d->psoc, + WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + msg.bodyptr = scan_msg_11d; + msg.callback = reg_send_11d_msg_cbk; + msg.flush_callback = reg_send_11d_flush_cbk; + + status = scheduler_post_message(QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) + wlan_objmgr_psoc_release_ref(scan_msg_11d->psoc, + WLAN_REGULATORY_SB_ID); + + return status; +} + +void reg_run_11d_state_machine(struct wlan_objmgr_psoc *psoc) +{ + bool temp_11d_support; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + bool world_mode; + struct reg_11d_scan_msg *scan_msg_11d; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("Null reg psoc private obj"); + return; + } + + if (psoc_priv_obj->vdev_id_for_11d_scan == INVALID_VDEV_ID) { + psoc_priv_obj->enable_11d_supp = false; + reg_err("Invalid vdev"); + return; + } + + world_mode = reg_is_world_alpha2(psoc_priv_obj->cur_country); + + temp_11d_support = psoc_priv_obj->enable_11d_supp; + if ((psoc_priv_obj->enable_11d_in_world_mode) && (world_mode)) + psoc_priv_obj->enable_11d_supp = true; + else if (((psoc_priv_obj->user_ctry_set) && + (psoc_priv_obj->user_ctry_priority)) || + (psoc_priv_obj->master_vdev_cnt)) + psoc_priv_obj->enable_11d_supp = false; + else + psoc_priv_obj->enable_11d_supp = + psoc_priv_obj->enable_11d_supp_original; + + reg_debug("inside 11d state machine:tmp %d 11d_supp %d org %d set %d pri %d cnt %d vdev %d", + temp_11d_support, + psoc_priv_obj->enable_11d_supp, + psoc_priv_obj->enable_11d_supp_original, + psoc_priv_obj->user_ctry_set, + psoc_priv_obj->user_ctry_priority, + psoc_priv_obj->master_vdev_cnt, + psoc_priv_obj->vdev_id_for_11d_scan); + + if (temp_11d_support != psoc_priv_obj->enable_11d_supp) { + if (psoc_priv_obj->is_11d_offloaded) { + scan_msg_11d = qdf_mem_malloc(sizeof(*scan_msg_11d)); + if (!scan_msg_11d) + return; + scan_msg_11d->psoc = psoc; + scan_msg_11d->enable_11d_supp = + psoc_priv_obj->enable_11d_supp; + reg_sched_11d_msg(scan_msg_11d); + } else { + reg_11d_host_scan(psoc_priv_obj); + } + } +} + +QDF_STATUS reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_pdev *parent_pdev; + struct wlan_objmgr_psoc *parent_psoc; + uint32_t vdev_id; + enum QDF_OPMODE op_mode; + uint8_t i; + + op_mode = wlan_vdev_mlme_get_opmode(vdev); + + parent_pdev = wlan_vdev_get_pdev(vdev); + parent_psoc = wlan_pdev_get_psoc(parent_pdev); + + psoc_priv_obj = reg_get_psoc_obj(parent_psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + if ((op_mode == QDF_STA_MODE) || + (op_mode == QDF_P2P_DEVICE_MODE) || + (op_mode == QDF_P2P_CLIENT_MODE)) { + vdev_id = wlan_vdev_get_id(vdev); + if (!psoc_priv_obj->vdev_cnt_11d) { + psoc_priv_obj->vdev_id_for_11d_scan = vdev_id; + reg_debug("running 11d state machine, opmode %d", + op_mode); + reg_run_11d_state_machine(parent_psoc); + } + + for (i = 0; i < MAX_STA_VDEV_CNT; i++) { + if (psoc_priv_obj->vdev_ids_11d[i] == INVALID_VDEV_ID) { + psoc_priv_obj->vdev_ids_11d[i] = vdev_id; + break; + } + } + psoc_priv_obj->vdev_cnt_11d++; + } + + if ((op_mode == QDF_P2P_GO_MODE) || (op_mode == QDF_SAP_MODE)) { + reg_debug("running 11d state machine, opmode %d", op_mode); + psoc_priv_obj->master_vdev_cnt++; + reg_run_11d_state_machine(parent_psoc); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_pdev *parent_pdev; + struct wlan_objmgr_psoc *parent_psoc; + enum QDF_OPMODE op_mode; + uint32_t vdev_id; + uint8_t i; + + if (!vdev) { + reg_err("NULL vdev"); + return QDF_STATUS_E_INVAL; + } + op_mode = wlan_vdev_mlme_get_opmode(vdev); + + parent_pdev = wlan_vdev_get_pdev(vdev); + parent_psoc = wlan_pdev_get_psoc(parent_pdev); + + psoc_priv_obj = reg_get_psoc_obj(parent_psoc); + if (!psoc_priv_obj) { + reg_err("NULL reg psoc private obj"); + return QDF_STATUS_E_FAULT; + } + + if ((op_mode == QDF_P2P_GO_MODE) || (op_mode == QDF_SAP_MODE)) { + psoc_priv_obj->master_vdev_cnt--; + reg_debug("run 11d state machine, deleted opmode %d", + op_mode); + reg_run_11d_state_machine(parent_psoc); + return QDF_STATUS_SUCCESS; + } + + if ((op_mode == QDF_STA_MODE) || (op_mode == QDF_P2P_DEVICE_MODE) || + (op_mode == QDF_P2P_CLIENT_MODE)) { + vdev_id = wlan_vdev_get_id(vdev); + for (i = 0; i < MAX_STA_VDEV_CNT; i++) { + if (psoc_priv_obj->vdev_ids_11d[i] == vdev_id) { + psoc_priv_obj->vdev_ids_11d[i] = + INVALID_VDEV_ID; + psoc_priv_obj->vdev_cnt_11d--; + break; + } + } + + if (psoc_priv_obj->vdev_id_for_11d_scan != vdev_id) + return QDF_STATUS_SUCCESS; + + if (!psoc_priv_obj->vdev_cnt_11d) { + psoc_priv_obj->vdev_id_for_11d_scan = INVALID_VDEV_ID; + psoc_priv_obj->enable_11d_supp = false; + return QDF_STATUS_SUCCESS; + } + + for (i = 0; i < MAX_STA_VDEV_CNT; i++) { + if (psoc_priv_obj->vdev_ids_11d[i] == INVALID_VDEV_ID) + continue; + psoc_priv_obj->vdev_id_for_11d_scan = + psoc_priv_obj->vdev_ids_11d[i]; + psoc_priv_obj->enable_11d_supp = false; + reg_debug("running 11d state machine, vdev %d", + psoc_priv_obj->vdev_id_for_11d_scan); + reg_run_11d_state_machine(parent_psoc); + break; + } + } + + return QDF_STATUS_SUCCESS; +} + +bool reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("NULL reg psoc private obj"); + return false; + } + + return psoc_priv_obj->enable_11d_supp; +} + +QDF_STATUS reg_save_new_11d_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct set_country country_code; + uint8_t pdev_id; + uint8_t ctr; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("NULL reg psoc private obj"); + + return QDF_STATUS_E_FAILURE; + } + + /* + * Need firmware to send channel list event + * for all phys. Therefore set pdev_id to 0xFF + */ + pdev_id = 0xFF; + for (ctr = 0; ctr < psoc_priv_obj->num_phy; ctr++) + psoc_priv_obj->new_11d_ctry_pending[ctr] = true; + + qdf_mem_copy(country_code.country, country, REG_ALPHA2_LEN + 1); + country_code.pdev_id = pdev_id; + + if (psoc_priv_obj->offload_enabled) { + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_country_code) { + tx_ops->set_country_code(psoc, &country_code); + } else { + reg_err("NULL country set handler"); + for (ctr = 0; ctr < psoc_priv_obj->num_phy; ctr++) + psoc_priv_obj->new_11d_ctry_pending[ctr] = + false; + return QDF_STATUS_E_FAULT; + } + } + + return QDF_STATUS_SUCCESS; +} + +bool reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("NULL reg psoc private obj"); + return QDF_STATUS_E_FAILURE; + } + + return (psoc_priv_obj->enable_11d_supp && + !psoc_priv_obj->is_11d_offloaded); +} + +QDF_STATUS reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, bool val) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("NULL psoc reg component"); + return QDF_STATUS_E_FAILURE; + } + + psoc_priv_obj->is_11d_offloaded = val; + reg_debug("set is_11d_offloaded %d", val); + return QDF_STATUS_SUCCESS; +} + +bool reg_is_11d_offloaded(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("NULL reg psoc private obj"); + return false; + } + + return psoc_priv_obj->is_11d_offloaded; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_offload_11d_scan.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_offload_11d_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..ad1e64c91f61c360399b7f55f5e3db4f894ea2bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_offload_11d_scan.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Add 11d utility functions + */ + +#ifdef TARGET_11D_SCAN +/** + * reg_run_11d_state_machine() - 11d state machine function. + * @psoc: soc context + */ +void reg_run_11d_state_machine(struct wlan_objmgr_psoc *psoc); + +/** + * reg_set_11d_country() - Set the 11d regulatory country + * @pdev: pdev device for country information + * @country: country value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_11d_country(struct wlan_objmgr_pdev *pdev, uint8_t *country); + +/** + * reg_is_11d_scan_inprogress() - Check 11d scan is supported + * @psoc: psoc ptr + * + * Return: true if 11d scan supported, else false. + */ +bool reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc); + +/** + * reg_save_new_11d_country() - Save the 11d new country + * @psoc: psoc for country information + * @country: country value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_save_new_11d_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * reg_is_11d_offloaded() - whether 11d offloaded supported or not + * @psoc: psoc ptr + * + * Return: bool + */ +bool reg_is_11d_offloaded(struct wlan_objmgr_psoc *psoc); + +/** + * reg_11d_enabled_on_host() - know whether 11d enabled on host + * @psoc: psoc ptr + * + * Return: bool + */ +bool reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc); + +/** + * reg_11d_vdev_created_update() - vdev obj create callback + * @vdev: vdev pointer + * + * updates 11d state when a vdev is created. + * + * Return: Success or Failure + */ +QDF_STATUS reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev); + +/** + * reg_11d_vdev_delete_update() - update 11d state upon vdev delete + * @vdev: vdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev); + +/** + * reg_set_11d_offloaded() - Set 11d offloaded flag + * @psoc: psoc ptr + * @val: 11d offloaded value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, bool val); + +#else + +static inline void reg_run_11d_state_machine(struct wlan_objmgr_psoc *psoc) +{ +} + +static inline QDF_STATUS reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return QDF_STATUS_SUCCESS; +} + +static inline bool reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static inline QDF_STATUS reg_save_new_11d_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + return QDF_STATUS_SUCCESS; +} + +static inline bool reg_is_11d_offloaded(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static inline bool reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static inline QDF_STATUS reg_11d_vdev_created_update( + struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_11d_vdev_delete_update( + struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_opclass.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_opclass.c new file mode 100644 index 0000000000000000000000000000000000000000..d661630d032f68d82843ee4da50787b9c5f16031 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_opclass.c @@ -0,0 +1,882 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_opclass.c + * This file defines regulatory opclass functions. + */ + +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include "reg_db.h" +#include "reg_db_parser.h" +#include "reg_host_11d.h" +#include +#include "reg_build_chan_list.h" +#include "reg_opclass.h" +#include "reg_services_common.h" + +#ifdef HOST_OPCLASS +static struct reg_dmn_supp_op_classes reg_dmn_curr_supp_opp_classes = { 0 }; +#endif + +static const struct reg_dmn_op_class_map_t global_op_class[] = { + {81, 25, BW20, BIT(BEHAV_NONE), 2407, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {82, 25, BW20, BIT(BEHAV_NONE), 2414, + {14} }, + {83, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 2407, + {1, 2, 3, 4, 5, 6, 7, 8, 9} }, + {84, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 2407, + {5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {115, 20, BW20, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48} }, + {116, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {36, 44} }, + {117, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {40, 48} }, + {118, 20, BW20, BIT(BEHAV_NONE), 5000, + {52, 56, 60, 64} }, + {119, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {52, 60} }, + {120, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {56, 64} }, + {121, 20, BW20, BIT(BEHAV_NONE), 5000, + {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144} }, + {122, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {100, 108, 116, 124, 132, 140} }, + {123, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {104, 112, 120, 128, 136, 144} }, + {125, 20, BW20, BIT(BEHAV_NONE), 5000, + {149, 153, 157, 161, 165, 169} }, + {126, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {149, 157} }, + {127, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {153, 161} }, + {128, 80, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, + 100, 104, 108, 112, 116, 120, 124, + 128, 132, 136, 140, 144, + 149, 153, 157, 161} }, + {129, 160, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, + 100, 104, 108, 112, 116, 120, 124, 128} }, + {130, 80, BW80, BIT(BEHAV_BW80_PLUS), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, + 100, 104, 108, 112, 116, 120, 124, 128, + 132, 136, 140, 144, 149, 153, 157, 161} }, + +#ifdef CONFIG_BAND_6GHZ + {131, 20, BW20, BIT(BEHAV_NONE), 5940, + {1, 5, 9, 13, 17, 21, 25, 29, 33, + 37, 41, 45, 49, 53, 57, 61, 65, 69, + 73, 77, 81, 85, 89, 93, 97, + 101, 105, 109, 113, 117, 121, 125, + 129, 133, 137, 141, 145, 149, 153, + 157, 161, 165, 169, 173, 177, 181, + 185, 189, 193, 197, 201, 205, 209, + 213, 217, 221, 225, 229, 233} }, + + {132, 40, BW40_LOW_PRIMARY, BIT(BEHAV_NONE), 5940, + {1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, + 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, + 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, + 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, 213, 217, + 221, 225, 229, 233} }, + + {133, 80, BW80, BIT(BEHAV_NONE), 5940, + {1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, + 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, + 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, + 141, 145, 149, 153, 157, 161, 165, 169, 173, + 177, 181, 185, 189, 193, 197, 201, 205, 209, 213, + 217, 221, 225, 229, 233} }, + + {134, 160, BW80, BIT(BEHAV_NONE), 5940, + {1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, + 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, + 93, 97, 101, 105, 109, 113, 117, 121, 125, + 129, 133, 137, 141, 145, 149, 153, 157, 161, + 165, 169, 173, 177, 181, 185, 189, 193, 197, + 201, 205, 209, 213, 217, 221, 225, 229, 233} }, + + {135, 80, BW80, BIT(BEHAV_BW80_PLUS), 5940, + {1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, + 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, + 85, 89, 93, 97, 101, 105, 109, 113, 117, + 121, 125, 129, 133, 137, 141, 145, 149, + 153, 157, 161, 165, 169, 173, 177, 181, + 185, 189, 193, 197, 201, 205, 209, 213, + 217, 221, 225, 229, 233} }, +#endif + {0, 0, 0, 0, 0, {0} }, +}; + +static const struct reg_dmn_op_class_map_t us_op_class[] = { + {1, 20, BW20, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48} }, + {2, 20, BW20, BIT(BEHAV_NONE), 5000, + {52, 56, 60, 64} }, + {4, 20, BW20, BIT(BEHAV_NONE), 5000, + {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144} }, + {5, 20, BW20, BIT(BEHAV_NONE), 5000, + {149, 153, 157, 161, 165} }, + {12, 25, BW20, BIT(BEHAV_NONE), 2407, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} }, + {22, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {36, 44} }, + {23, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {52, 60} }, + {24, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {100, 108, 116, 124, 132} }, + {26, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {149, 157} }, + {27, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {40, 48} }, + {28, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {56, 64} }, + {29, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {104, 112, 120, 128, 136} }, + {30, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {153, 161} }, + {31, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {153, 161} }, + {32, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 2407, + {1, 2, 3, 4, 5, 6, 7} }, + {33, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 2407, + {5, 6, 7, 8, 9, 10, 11} }, + {128, 80, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, + 104, 108, 112, 116, 120, 124, 128, 132, + 136, 140, 144, 149, 153, 157, 161} }, + {129, 160, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, + 104, 108, 112, 116, 120, 124, 128} }, + {130, 80, BW80, BIT(BEHAV_BW80_PLUS), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, + 104, 108, 112, 116, 120, 124, 128, 132, + 136, 140, 144, 149, 153, 157, 161} }, + {0, 0, 0, 0, 0, {0} }, +}; + +static const struct reg_dmn_op_class_map_t euro_op_class[] = { + {1, 20, BW20, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48} }, + {2, 20, BW20, BIT(BEHAV_NONE), 5000, + {52, 56, 60, 64} }, + {3, 20, BW20, BIT(BEHAV_NONE), 5000, + {100, 104, 108, 112, 116, 120, + 124, 128, 132, 136, 140} }, + {4, 25, BW20, BIT(BEHAV_NONE), 2407, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {5, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {36, 44} }, + {6, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {52, 60} }, + {7, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {100, 108, 116, 124, 132} }, + {8, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {40, 48} }, + {9, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {56, 64} }, + {10, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {104, 112, 120, 128, 136} }, + {11, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 2407, + {1, 2, 3, 4, 5, 6, 7, 8, 9} }, + {12, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 2407, + {5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {17, 20, BW20, BIT(BEHAV_NONE), 5000, + {149, 153, 157, 161, 165, 169} }, + {128, 80, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, + 124, 128} }, + {129, 160, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, + 104, 108, 112, 116, 120, 124, 128} }, + {130, 80, BW80, BIT(BEHAV_BW80_PLUS), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, + 124, 128} }, + {0, 0, 0, 0, 0, {0} }, +}; + +static const struct reg_dmn_op_class_map_t japan_op_class[] = { + {1, 20, BW20, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48} }, + {30, 25, BW20, BIT(BEHAV_NONE), 2407, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {31, 25, BW20, BIT(BEHAV_NONE), 2407, + {14} }, + {32, 20, BW20, BIT(BEHAV_NONE), 5000, + {52, 56, 60, 64} }, + {34, 20, BW20, BIT(BEHAV_NONE), 5000, + {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140} }, + {36, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {36, 44} }, + {37, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {52, 60} }, + {39, 40, BW40_LOW_PRIMARY, BIT(BEHAV_BW40_LOW_PRIMARY), 5000, + {100, 108, 116, 124, 132} }, + {41, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {40, 48} }, + {42, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {56, 64} }, + {44, 40, BW40_HIGH_PRIMARY, BIT(BEHAV_BW40_HIGH_PRIMARY), 5000, + {104, 112, 120, 128, 136} }, + {128, 80, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, + 124, 128} }, + {129, 160, BW80, BIT(BEHAV_NONE), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, + 104, 108, 112, 116, 120, 124, 128} }, + {130, 80, BW80, BIT(BEHAV_BW80_PLUS), 5000, + {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, + 124, 128} }, + {0, 0, 0, 0, 0, {0} }, +}; + +#ifdef HOST_OPCLASS +/** + * reg_get_class_from_country()- Get Class from country + * @country- Country + * + * Return: class. + */ +static const +struct reg_dmn_op_class_map_t *reg_get_class_from_country(uint8_t *country) +{ + const struct reg_dmn_op_class_map_t *class = NULL; + + reg_debug_rl("Country %c%c 0x%x", country[0], country[1], country[2]); + + switch (country[2]) { + case OP_CLASS_US: + class = us_op_class; + break; + + case OP_CLASS_EU: + class = euro_op_class; + break; + + case OP_CLASS_JAPAN: + class = japan_op_class; + break; + + case OP_CLASS_GLOBAL: + class = global_op_class; + break; + + default: + if (!qdf_mem_cmp(country, "US", 2)) + class = us_op_class; + else if (!qdf_mem_cmp(country, "EU", 2)) + class = euro_op_class; + else if (!qdf_mem_cmp(country, "JP", 2)) + class = japan_op_class; + else + class = global_op_class; + } + return class; +} + +uint16_t reg_dmn_get_chanwidth_from_opclass(uint8_t *country, uint8_t channel, + uint8_t opclass) +{ + const struct reg_dmn_op_class_map_t *class; + uint16_t i; + + class = reg_get_class_from_country(country); + + while (class->op_class) { + if (opclass == class->op_class) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + class->channels[i]); i++) { + if (channel == class->channels[i]) + return class->chan_spacing; + } + } + class++; + } + + return 0; +} + +uint16_t reg_dmn_get_opclass_from_channel(uint8_t *country, uint8_t channel, + uint8_t offset) +{ + const struct reg_dmn_op_class_map_t *class = NULL; + uint16_t i = 0; + + class = reg_get_class_from_country(country); + while (class && class->op_class) { + if ((offset == class->offset) || (offset == BWALL)) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + class->channels[i]); i++) { + if (channel == class->channels[i]) + return class->op_class; + } + } + class++; + } + + return 0; +} + +uint8_t reg_dmn_get_opclass_from_freq_width(uint8_t *country, + qdf_freq_t freq, + uint8_t ch_width, + uint16_t behav_limit) +{ + const struct reg_dmn_op_class_map_t *op_class_tbl = NULL; + uint16_t i = 0; + + op_class_tbl = reg_get_class_from_country(country); + + while (op_class_tbl && op_class_tbl->op_class) { + if (op_class_tbl->chan_spacing == ch_width) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + op_class_tbl->channels[i]); i++) { + if ((op_class_tbl->start_freq + + (FREQ_TO_CHAN_SCALE * + op_class_tbl->channels[i]) == freq) && + (behav_limit & op_class_tbl->behav_limit)) { + return op_class_tbl->op_class; + } + } + } + op_class_tbl++; + } + + return 0; +} + +void reg_dmn_print_channels_in_opclass(uint8_t *country, uint8_t op_class) +{ + const struct reg_dmn_op_class_map_t *class = NULL; + uint16_t i = 0; + + class = reg_get_class_from_country(country); + + if (!class) { + reg_err("class is NULL"); + return; + } + + while (class->op_class) { + if (class->op_class == op_class) { + for (i = 0; + (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + class->channels[i]); i++) { + reg_debug("Valid channel(%d) in requested RC(%d)", + class->channels[i], op_class); + } + break; + } + class++; + } + if (!class->op_class) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Invalid requested RC (%d)", op_class); +} + +uint16_t reg_dmn_set_curr_opclasses(uint8_t num_classes, uint8_t *class) +{ + uint8_t i; + + if (num_classes > REG_MAX_SUPP_OPER_CLASSES) { + reg_err("invalid num classes %d", num_classes); + return 0; + } + + for (i = 0; i < num_classes; i++) + reg_dmn_curr_supp_opp_classes.classes[i] = class[i]; + + reg_dmn_curr_supp_opp_classes.num_classes = num_classes; + + return 0; +} + +uint16_t reg_dmn_get_curr_opclasses(uint8_t *num_classes, uint8_t *class) +{ + uint8_t i; + + if (!num_classes || !class) { + reg_err("either num_classes or class is null"); + return 0; + } + + for (i = 0; i < reg_dmn_curr_supp_opp_classes.num_classes; i++) + class[i] = reg_dmn_curr_supp_opp_classes.classes[i]; + + *num_classes = reg_dmn_curr_supp_opp_classes.num_classes; + + return 0; +} + +#ifdef CONFIG_CHAN_FREQ_API +void reg_freq_width_to_chan_op_class_auto(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ + if (reg_freq_to_band(freq) == REG_BAND_6G) { + global_tbl_lookup = true; + if (chan_width == BW_40_MHZ) + behav_limit = BIT(BEHAV_NONE); + } else { + global_tbl_lookup = false; + } + + reg_freq_width_to_chan_op_class(pdev, freq, + chan_width, + global_tbl_lookup, + behav_limit, + op_class, + chan_num); +} + +void reg_freq_width_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ + const struct reg_dmn_op_class_map_t *op_class_tbl; + enum channel_enum chan_enum; + uint16_t i; + + chan_enum = reg_get_chan_enum_for_freq(freq); + + if (chan_enum == INVALID_CHANNEL) { + reg_err_rl("Invalid chan enum %d", chan_enum); + return; + } + + if (global_tbl_lookup) { + op_class_tbl = global_op_class; + } else { + if (channel_map == channel_map_us) + op_class_tbl = us_op_class; + else if (channel_map == channel_map_eu) + op_class_tbl = euro_op_class; + else if (channel_map == channel_map_china) + op_class_tbl = us_op_class; + else if (channel_map == channel_map_jp) + op_class_tbl = japan_op_class; + else + op_class_tbl = global_op_class; + } + + while (op_class_tbl->op_class) { + if (op_class_tbl->chan_spacing >= chan_width) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + op_class_tbl->channels[i]); i++) { + if ((op_class_tbl->start_freq + + FREQ_TO_CHAN_SCALE * + op_class_tbl->channels[i] == freq) && + (behav_limit & op_class_tbl->behav_limit || + behav_limit == BIT(BEHAV_NONE))) { + *chan_num = op_class_tbl->channels[i]; + *op_class = op_class_tbl->op_class; + return; + } + } + } + op_class_tbl++; + } + + reg_err_rl("no op class for frequency %d", freq); +} + +void reg_freq_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ + enum channel_enum chan_enum; + struct regulatory_channel *cur_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct ch_params chan_params; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err_rl("NULL pdev reg obj"); + return; + } + + cur_chan_list = pdev_priv_obj->cur_chan_list; + + chan_enum = reg_get_chan_enum_for_freq(freq); + + if (chan_enum == INVALID_CHANNEL) { + reg_err_rl("Invalid chan enum %d", chan_enum); + return; + } + + chan_params.ch_width = CH_WIDTH_MAX; + reg_set_channel_params_for_freq(pdev, freq, 0, &chan_params); + + reg_freq_width_to_chan_op_class(pdev, freq, + reg_get_bw_value(chan_params.ch_width), + global_tbl_lookup, + behav_limit, + op_class, + chan_num); +} + +bool reg_country_opclass_freq_check(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t op_class, + qdf_freq_t chan_freq) +{ + const struct reg_dmn_op_class_map_t *op_class_tbl; + uint8_t i; + + op_class_tbl = reg_get_class_from_country((uint8_t *)country); + + while (op_class_tbl && op_class_tbl->op_class) { + if (op_class_tbl->op_class == op_class) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + op_class_tbl->channels[i]); i++) { + if (op_class_tbl->channels[i] * + FREQ_TO_CHAN_SCALE + + op_class_tbl->start_freq == chan_freq) + return true; + } + } + op_class_tbl++; + } + return false; +} + +#endif + +uint16_t reg_get_op_class_width(struct wlan_objmgr_pdev *pdev, + uint8_t op_class, + bool global_tbl_lookup) +{ + const struct reg_dmn_op_class_map_t *op_class_tbl; + + if (global_tbl_lookup) { + op_class_tbl = global_op_class; + } else { + if (channel_map == channel_map_us) + op_class_tbl = us_op_class; + else if (channel_map == channel_map_eu) + op_class_tbl = euro_op_class; + else if (channel_map == channel_map_china) + op_class_tbl = us_op_class; + else if (channel_map == channel_map_jp) + op_class_tbl = japan_op_class; + else + op_class_tbl = global_op_class; + } + + while (op_class_tbl->op_class) { + if (op_class_tbl->op_class == op_class) + return op_class_tbl->chan_spacing; + op_class_tbl++; + } + + return 0; +} + +uint16_t reg_chan_opclass_to_freq(uint8_t chan, + uint8_t op_class, + bool global_tbl_lookup) +{ + const struct reg_dmn_op_class_map_t *op_class_tbl = NULL; + uint8_t i = 0; + + if (global_tbl_lookup) { + op_class_tbl = global_op_class; + } else { + if (channel_map == channel_map_global) { + op_class_tbl = global_op_class; + } else if (channel_map == channel_map_us) { + op_class_tbl = us_op_class; + } else if (channel_map == channel_map_eu) { + op_class_tbl = euro_op_class; + } else if (channel_map == channel_map_china) { + op_class_tbl = us_op_class; + } else if (channel_map == channel_map_jp) { + op_class_tbl = japan_op_class; + } else { + reg_err_rl("Invalid channel map"); + return 0; + } + } + + while (op_class_tbl->op_class) { + if (op_class_tbl->op_class == op_class) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + op_class_tbl->channels[i]); i++) { + if (op_class_tbl->channels[i] == chan) { + chan = op_class_tbl->channels[i]; + return op_class_tbl->start_freq + + (chan * FREQ_TO_CHAN_SCALE); + } + } + reg_err_rl("Channel not found"); + return 0; + } + op_class_tbl++; + } + reg_err_rl("Invalid opclass"); + return 0; +} + +#ifdef HOST_OPCLASS_EXT +qdf_freq_t reg_country_chan_opclass_to_freq(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t chan, uint8_t op_class, + bool strict) +{ + const struct reg_dmn_op_class_map_t *op_class_tbl, *op_class_tbl_org; + uint16_t i; + + if (reg_is_6ghz_op_class(pdev, op_class)) + op_class_tbl_org = global_op_class; + else + op_class_tbl_org = + reg_get_class_from_country((uint8_t *)country); + op_class_tbl = op_class_tbl_org; + while (op_class_tbl && op_class_tbl->op_class) { + if (op_class_tbl->op_class == op_class) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + op_class_tbl->channels[i]); i++) { + if (op_class_tbl->channels[i] == chan) + return op_class_tbl->start_freq + + (chan * FREQ_TO_CHAN_SCALE); + } + } + op_class_tbl++; + } + reg_debug_rl("Not found ch %d in op class %d ch list, strict %d", + chan, op_class, strict); + if (strict) + return 0; + + op_class_tbl = op_class_tbl_org; + while (op_class_tbl && op_class_tbl->op_class) { + for (i = 0; (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + op_class_tbl->channels[i]); i++) { + if (op_class_tbl->channels[i] == chan) + return op_class_tbl->start_freq + + (chan * FREQ_TO_CHAN_SCALE); + } + op_class_tbl++; + } + reg_debug_rl("Got invalid freq 0 for ch %d", chan); + + return 0; +} +#endif + +static void +reg_get_op_class_tbl_by_chan_map(const struct + reg_dmn_op_class_map_t **op_class_tbl) +{ + if (channel_map == channel_map_us) + *op_class_tbl = us_op_class; + else if (channel_map == channel_map_eu) + *op_class_tbl = euro_op_class; + else if (channel_map == channel_map_china) + *op_class_tbl = us_op_class; + else if (channel_map == channel_map_jp) + *op_class_tbl = japan_op_class; + else + *op_class_tbl = global_op_class; +} + +/** + * reg_get_channel_cen - Calculate central channel in the channel set. + * + * @op_class_tbl - Pointer to op_class_tbl. + * @idx - Pointer to channel index. + * @num_channels - Number of channels. + * @center_chan - Pointer to center channel number + * + * Return : void + */ +static void reg_get_channel_cen(const struct + reg_dmn_op_class_map_t *op_class_tbl, + uint8_t *idx, + uint8_t num_channels, + uint8_t *center_chan) +{ + uint8_t i; + uint16_t new_chan = 0; + + for (i = *idx; i < (*idx + num_channels); i++) + new_chan += op_class_tbl->channels[i]; + + new_chan = new_chan / num_channels; + *center_chan = new_chan; + *idx = *idx + num_channels; +} + +/** + * reg_get_chan_or_chan_center - Calculate central channel in the channel set. + * + * @op_class_tbl - Pointer to op_class_tbl. + * @idx - Pointer to channel index. + * + * Return : Center channel number + */ +static uint8_t reg_get_chan_or_chan_center(const struct + reg_dmn_op_class_map_t *op_class_tbl, + uint8_t *idx) +{ + uint8_t center_chan; + + if (((op_class_tbl->chan_spacing == BW_80_MHZ) && + (op_class_tbl->behav_limit == BIT(BEHAV_NONE))) || + ((op_class_tbl->chan_spacing == BW_80_MHZ) && + (op_class_tbl->behav_limit == BIT(BEHAV_BW80_PLUS)))) { + reg_get_channel_cen(op_class_tbl, + idx, + NUM_20_MHZ_CHAN_IN_80_MHZ_CHAN, + ¢er_chan); + } else if (op_class_tbl->chan_spacing == BW_160_MHZ) { + reg_get_channel_cen(op_class_tbl, + idx, + NUM_20_MHZ_CHAN_IN_160_MHZ_CHAN, + ¢er_chan); + } else { + center_chan = op_class_tbl->channels[*idx]; + *idx = *idx + 1; + } + + return center_chan; +} + +/** + * reg_get_channels_from_opclassmap()- Get channels from the opclass map + * @pdev: Pointer to pdev + * @reg_ap_cap: Pointer to reg_ap_cap + * @index: Pointer to index of reg_ap_cap + * @op_class_tbl: Pointer to op_class_tbl + * @is_opclass_operable: Set true if opclass is operable, else set false + * + * Populate channels from opclass map to reg_ap_cap as supported and + * non-supported channels. + * + * Return: void. + */ +static void +reg_get_channels_from_opclassmap( + struct wlan_objmgr_pdev *pdev, + struct regdmn_ap_cap_opclass_t *reg_ap_cap, + uint8_t index, + const struct reg_dmn_op_class_map_t *op_class_tbl, + bool *is_opclass_operable) +{ + uint8_t op_cls_chan; + qdf_freq_t search_freq; + bool is_freq_present; + uint8_t chan_idx = 0, n_sup_chans = 0, n_unsup_chans = 0; + + while (op_class_tbl->channels[chan_idx]) { + op_cls_chan = op_class_tbl->channels[chan_idx]; + search_freq = op_class_tbl->start_freq + + (FREQ_TO_CHAN_SCALE * op_cls_chan); + is_freq_present = + reg_is_freq_present_in_cur_chan_list(pdev, search_freq); + + if (!is_freq_present) { + reg_ap_cap[index].non_sup_chan_list[n_unsup_chans++] = + reg_get_chan_or_chan_center(op_class_tbl, + &chan_idx); + reg_ap_cap[index].num_non_supported_chan++; + } else { + reg_ap_cap[index].sup_chan_list[n_sup_chans++] = + reg_get_chan_or_chan_center(op_class_tbl, + &chan_idx); + reg_ap_cap[index].num_supported_chan++; + } + } + + if (reg_ap_cap[index].num_supported_chan >= 1) + *is_opclass_operable = true; +} + +QDF_STATUS reg_get_opclass_details(struct wlan_objmgr_pdev *pdev, + struct regdmn_ap_cap_opclass_t *reg_ap_cap, + uint8_t *n_opclasses, + uint8_t max_supp_op_class, + bool global_tbl_lookup) +{ + uint8_t max_reg_power = 0; + const struct reg_dmn_op_class_map_t *op_class_tbl; + uint8_t index = 0; + + if (global_tbl_lookup) + op_class_tbl = global_op_class; + else + reg_get_op_class_tbl_by_chan_map(&op_class_tbl); + + max_reg_power = reg_get_max_tx_power(pdev); + + while (op_class_tbl->op_class && (index < max_supp_op_class)) { + bool is_opclass_operable = false; + + qdf_mem_zero(reg_ap_cap[index].sup_chan_list, + REG_MAX_CHANNELS_PER_OPERATING_CLASS); + reg_ap_cap[index].num_supported_chan = 0; + qdf_mem_zero(reg_ap_cap[index].non_sup_chan_list, + REG_MAX_CHANNELS_PER_OPERATING_CLASS); + reg_ap_cap[index].num_non_supported_chan = 0; + reg_get_channels_from_opclassmap(pdev, + reg_ap_cap, + index, + op_class_tbl, + &is_opclass_operable); + if (is_opclass_operable) { + reg_ap_cap[index].op_class = op_class_tbl->op_class; + reg_ap_cap[index].ch_width = + op_class_tbl->chan_spacing; + reg_ap_cap[index].start_freq = + op_class_tbl->start_freq; + reg_ap_cap[index].max_tx_pwr_dbm = max_reg_power; + reg_ap_cap[index].behav_limit = + op_class_tbl->behav_limit; + index++; + } + + op_class_tbl++; + } + + *n_opclasses = index; + + return QDF_STATUS_SUCCESS; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_opclass.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_opclass.h new file mode 100644 index 0000000000000000000000000000000000000000..0cd260c0cdf8859da78d158c23e6fd83995fbb16 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_opclass.h @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_opclass.h + * This file provides prototypes of the regulatory opclass functions + */ + +#ifndef __REG_OPCLASS_H__ +#define __REG_OPCLASS_H__ + +#ifdef HOST_OPCLASS +/** + * reg_dmn_get_chanwidth_from_opclass() - Get channel width from opclass. + * @country: Country code + * @channel: Channel number + * @opclass: Operating class + * + * Return: Channel width + */ +uint16_t reg_dmn_get_chanwidth_from_opclass(uint8_t *country, uint8_t channel, + uint8_t opclass); + +/** + * reg_dmn_get_opclass_from_channel() - Get operating class from channel. + * @country: Country code. + * @channel: Channel number. + * @offset: Operating class offset. + * + * Return: Error code. + */ +uint16_t reg_dmn_get_opclass_from_channel(uint8_t *country, uint8_t channel, + uint8_t offset); + +/** + * reg_dmn_get_opclass_from_freq_width() - Get operating class from frequency + * @country: Country code. + * @freq: Channel center frequency. + * @ch_width: Channel width. + * @behav_limit: Behaviour limit. + * + * Return: Error code. + */ +uint8_t reg_dmn_get_opclass_from_freq_width(uint8_t *country, + qdf_freq_t freq, + uint8_t ch_width, + uint16_t behav_limit); + +/** + * reg_dmn_get_opclass_from_channe() - Print channels in op class. + * @country: Country code. + * @opclass: opclass. + * + * Return: Void. + */ +void reg_dmn_print_channels_in_opclass(uint8_t *country, uint8_t op_class); + +/** + * reg_dmn_set_curr_opclasses() - Set current operating class + * @num_classes: Number of classes + * @class: Pointer to operating class. + * + * Return: Error code. + */ +uint16_t reg_dmn_set_curr_opclasses(uint8_t num_classes, uint8_t *class); + +/** + * reg_dmn_get_curr_opclasses() - Get current supported operating classes. + * @num_classes: Number of classes. + * @class: Pointer to operating class. + * + * Return: Error code. + */ +uint16_t reg_dmn_get_curr_opclasses(uint8_t *num_classes, uint8_t *class); + +/** + * reg_get_opclass_details() - Get details about the current opclass table. + * @pdev: Pointer to pdev. + * @reg_ap_cap: Pointer to reg_ap_cap. + * @n_opclasses: Pointer to number of opclasses. + * @max_supp_op_class: Maximum number of operating classes supported. + * @global_tbl_lookup: Whether to lookup global op class table. + * + * Return: QDF_STATUS_SUCCESS if success, else return QDF_STATUS_FAILURE. + */ +QDF_STATUS reg_get_opclass_details(struct wlan_objmgr_pdev *pdev, + struct regdmn_ap_cap_opclass_t *reg_ap_cap, + uint8_t *n_opclasses, + uint8_t max_supp_op_class, + bool global_tbl_lookup); +#ifdef CONFIG_CHAN_FREQ_API + +/** + * reg_freq_width_to_chan_op_class() - convert frequency to oper class, + * channel + * @pdev: pdev pointer + * @freq: channel frequency in mhz + * @chan_width: channel width + * @global_tbl_lookup: whether to lookup global op class tbl + * @behav_limit: behavior limit + * @op_class: operating class + * @chan_num: channel number + * + * Return: Void. + */ +void reg_freq_width_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num); + +/** + * reg_freq_width_to_chan_op_class_auto() - convert frequency to operating + * class,channel after fixing up the global_tbl_lookup and behav_limit + * for 6G frequencies. + * @pdev: pdev pointer + * @freq: channel frequency in mhz + * @chan_width: channel width + * @global_tbl_lookup: whether to lookup global op class tbl + * @behav_limit: behavior limit + * @op_class: operating class + * @chan_num: channel number + * + * Return: Void. + */ +void reg_freq_width_to_chan_op_class_auto(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num); + +/** + * reg_freq_to_chan_op_class() - convert frequency to oper class, + * channel + * @pdev: pdev pointer + * @freq: channel frequency in mhz + * @global_tbl_lookup: whether to lookup global op class tbl + * @behav_limit: behavior limit + * @op_class: operating class + * @chan_num: channel number + * + * Return: Void. + */ +void reg_freq_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num); + +/** + * reg_country_opclass_freq_check() - check for frequency in (tbl, oper class) + * + * @pdev: pdev pointer + * @country: country from country IE + * @op_class: operating class + * @chan_freq: channel frequency in mhz + * + * Return: bool + */ +bool reg_country_opclass_freq_check(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t op_class, + qdf_freq_t chan_freq); +#endif + +/** + * reg_get_op_class_width() - get oper class width + * + * @pdev: pdev pointer + * @global_tbl_lookup: whether to lookup global op class tbl + * @op_class: operating class + * Return: uint16 + */ +uint16_t reg_get_op_class_width(struct wlan_objmgr_pdev *pdev, + uint8_t op_class, + bool global_tbl_lookup); + +#ifdef HOST_OPCLASS_EXT +/** + * reg_country_chan_opclass_to_freq() - Convert channel number to frequency + * based on country code and op class + * @pdev: pdev object. + * @country: country code. + * @chan: IEEE Channel Number. + * @op_class: Opclass. + * @strict: flag to find channel from matched operating class code. + * + * Look up (channel, operating class) pair in country operating class tables + * and return the channel frequency. + * If not found and "strict" flag is false, try to get frequency (Mhz) by + * channel number only. + * + * Return: Channel center frequency else return 0. + */ +qdf_freq_t reg_country_chan_opclass_to_freq(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t chan, uint8_t op_class, + bool strict); +#endif + +/** + * reg_chan_opclass_to_freq() - Convert channel number and opclass to frequency + * @chan: IEEE Channel Number. + * @op_class: Opclass. + * @global_tbl_lookup: Global table lookup. + * + * Return: Channel center frequency else return 0. + */ +uint16_t reg_chan_opclass_to_freq(uint8_t chan, + uint8_t op_class, + bool global_tbl_lookup); +#else + +static inline uint16_t reg_dmn_get_chanwidth_from_opclass( + uint8_t *country, uint8_t channel, uint8_t opclass) +{ + return 0; +} + +static inline uint16_t reg_dmn_set_curr_opclasses( + uint8_t num_classes, uint8_t *class) +{ + return 0; +} + +static inline uint16_t reg_dmn_get_curr_opclasses( + uint8_t *num_classes, uint8_t *class) +{ + return 0; +} + +static inline uint16_t reg_dmn_get_opclass_from_channel( + uint8_t *country, uint8_t channel, uint8_t offset) +{ + return 0; +} + +static inline +uint8_t reg_dmn_get_opclass_from_freq_width(uint8_t *country, + qdf_freq_t freq, + uint8_t ch_width, + uint16_t behav_limit) +{ + return 0; +} + +static inline void reg_dmn_print_channels_in_opclass(uint8_t *country, + uint8_t op_class) +{ +} + +static inline +QDF_STATUS reg_get_opclass_details(struct wlan_objmgr_pdev *pdev, + struct regdmn_ap_cap_opclass_t *reg_ap_cap, + uint8_t *n_opclasses, + uint8_t max_supp_op_class, + bool global_tbl_lookup) +{ + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONFIG_CHAN_FREQ_API + +static inline void +reg_freq_width_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ +} + +static inline void +reg_freq_width_to_chan_op_class_auto(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ +} + +static inline void +reg_freq_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ +} + +static inline bool +reg_country_opclass_freq_check(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t op_class, + uint16_t chan_freq) +{ +} + +#endif + +static inline uint16_t reg_get_op_class_width(struct wlan_objmgr_pdev *pdev, + uint8_t op_class, + bool global_tbl_lookup) +{ + return 0; +} + +#ifdef HOST_OPCLASS_EXT +static inline +qdf_freq_t reg_country_chan_opclass_to_freq(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t chan, uint8_t op_class, + bool strict) +{ + return 0; +} +#endif + +static inline uint16_t +reg_chan_opclass_to_freq(uint8_t chan, + uint8_t op_class, + bool global_tbl_lookup) +{ + return 0; +} + +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv_objs.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv_objs.c new file mode 100644 index 0000000000000000000000000000000000000000..8f31c6a3a3f9c6aa8b99000a31b199984b386bb6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv_objs.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_priv_objs.c + * This file defines the APIs to create regulatory private PSOC and PDEV + * objects. + */ + +#include +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include "reg_services_common.h" +#include "reg_build_chan_list.h" +#include "reg_host_11d.h" +#include "reg_callbacks.h" + +struct wlan_regulatory_psoc_priv_obj *reg_get_psoc_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + if (!psoc) { + reg_alert("psoc is NULL"); + return NULL; + } + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_REGULATORY); + + return psoc_priv_obj; +} + +struct wlan_regulatory_pdev_priv_obj *reg_get_pdev_obj( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_reg; + + if (!pdev) { + reg_alert("pdev is NULL"); + return NULL; + } + + pdev_reg = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_REGULATORY); + + return pdev_reg; +} + +QDF_STATUS wlan_regulatory_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg_obj; + struct regulatory_channel *mas_chan_list; + enum channel_enum chan_enum; + QDF_STATUS status; + uint8_t i; + uint8_t pdev_cnt; + + soc_reg_obj = qdf_mem_malloc(sizeof(*soc_reg_obj)); + if (!soc_reg_obj) + return QDF_STATUS_E_NOMEM; + + soc_reg_obj->offload_enabled = false; + soc_reg_obj->psoc_ptr = psoc; + soc_reg_obj->dfs_enabled = true; + soc_reg_obj->band_capability = (BIT(REG_BAND_2G) | BIT(REG_BAND_5G) | + BIT(REG_BAND_6G)); + soc_reg_obj->enable_11d_supp = false; + soc_reg_obj->indoor_chan_enabled = true; + soc_reg_obj->force_ssc_disable_indoor_channel = false; + soc_reg_obj->master_vdev_cnt = 0; + soc_reg_obj->vdev_cnt_11d = 0; + soc_reg_obj->vdev_id_for_11d_scan = INVALID_VDEV_ID; + soc_reg_obj->restart_beaconing = CH_AVOID_RULE_RESTART; + soc_reg_obj->enable_srd_chan_in_master_mode = 0xFF; + soc_reg_obj->enable_11d_in_world_mode = false; + soc_reg_obj->retain_nol_across_regdmn_update = false; + + for (i = 0; i < MAX_STA_VDEV_CNT; i++) + soc_reg_obj->vdev_ids_11d[i] = INVALID_VDEV_ID; + + qdf_spinlock_create(&soc_reg_obj->cbk_list_lock); + + for (pdev_cnt = 0; pdev_cnt < PSOC_MAX_PHY_REG_CAP; pdev_cnt++) { + mas_chan_list = + soc_reg_obj->mas_chan_params[pdev_cnt].mas_chan_list; + soc_reg_obj->chan_list_recvd[pdev_cnt] = false; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + mas_chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + mas_chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + mas_chan_list[chan_enum].nol_chan = false; + } + } + + status = wlan_objmgr_psoc_component_obj_attach( + psoc, WLAN_UMAC_COMP_REGULATORY, soc_reg_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_spinlock_destroy(&soc_reg_obj->cbk_list_lock); + qdf_mem_free(soc_reg_obj); + reg_err("Obj attach failed"); + return status; + } + + reg_debug("reg psoc obj created with status %d", status); + + return status; +} + +QDF_STATUS wlan_regulatory_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err_rl("NULL reg psoc priv obj"); + return QDF_STATUS_E_FAULT; + } + + psoc_priv_obj->psoc_ptr = NULL; + qdf_spinlock_destroy(&psoc_priv_obj->cbk_list_lock); + + status = wlan_objmgr_psoc_component_obj_detach( + psoc, WLAN_UMAC_COMP_REGULATORY, psoc_priv_obj); + + if (status != QDF_STATUS_SUCCESS) + reg_err_rl("psoc_priv_obj private obj detach failed"); + + reg_debug("reg psoc obj detached"); + + qdf_mem_free(psoc_priv_obj); + + return status; +} + +#ifdef DISABLE_UNII_SHARED_BANDS +/** + * reg_reset_unii_5g_bitmap() - Reset the value of unii_5g_bitmap. + * @pdev_priv_obj: pointer to wlan_regulatory_pdev_priv_obj. + * + * Return : void + */ +static void +reg_reset_unii_5g_bitmap(struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + pdev_priv_obj->unii_5g_bitmap = 0x0; +} +#else +static void inline +reg_reset_unii_5g_bitmap(struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ +} +#endif + +QDF_STATUS wlan_regulatory_pdev_obj_created_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap_ptr; + struct wlan_objmgr_psoc *parent_psoc; + uint32_t pdev_id; + uint32_t cnt; + uint32_t range_2g_low, range_2g_high; + uint32_t range_5g_low, range_5g_high; + QDF_STATUS status; + struct reg_rule_info *psoc_reg_rules; + + pdev_priv_obj = qdf_mem_malloc(sizeof(*pdev_priv_obj)); + if (!pdev_priv_obj) + return QDF_STATUS_E_NOMEM; + + parent_psoc = wlan_pdev_get_psoc(pdev); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + psoc_priv_obj = reg_get_psoc_obj(parent_psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + qdf_mem_free(pdev_priv_obj); + return QDF_STATUS_E_FAULT; + } + + pdev_priv_obj->pdev_ptr = pdev; + pdev_priv_obj->dfs_enabled = psoc_priv_obj->dfs_enabled; + pdev_priv_obj->set_fcc_channel = false; + pdev_priv_obj->band_capability = psoc_priv_obj->band_capability; + pdev_priv_obj->indoor_chan_enabled = + psoc_priv_obj->indoor_chan_enabled; + pdev_priv_obj->en_chan_144 = true; + reg_reset_unii_5g_bitmap(pdev_priv_obj); + + qdf_spinlock_create(&pdev_priv_obj->reg_rules_lock); + + reg_cap_ptr = psoc_priv_obj->reg_cap; + pdev_priv_obj->force_ssc_disable_indoor_channel = + psoc_priv_obj->force_ssc_disable_indoor_channel; + + for (cnt = 0; cnt < PSOC_MAX_PHY_REG_CAP; cnt++) { + if (!reg_cap_ptr) { + qdf_mem_free(pdev_priv_obj); + reg_err("reg cap ptr is NULL"); + return QDF_STATUS_E_FAULT; + } + + if (reg_cap_ptr->phy_id == pdev_id) + break; + reg_cap_ptr++; + } + + if (cnt == PSOC_MAX_PHY_REG_CAP) { + qdf_mem_free(pdev_priv_obj); + reg_err("extended capabilities not found for pdev"); + return QDF_STATUS_E_FAULT; + } + + range_2g_low = reg_cap_ptr->low_2ghz_chan; + range_2g_high = reg_cap_ptr->high_2ghz_chan; + range_5g_low = reg_cap_ptr->low_5ghz_chan; + range_5g_high = reg_cap_ptr->high_5ghz_chan; + + pdev_priv_obj->range_2g_low = range_2g_low; + pdev_priv_obj->range_2g_high = range_2g_high; + pdev_priv_obj->range_5g_low = range_5g_low; + pdev_priv_obj->range_5g_high = range_5g_high; + pdev_priv_obj->wireless_modes = reg_cap_ptr->wireless_modes; + + reg_init_pdev_mas_chan_list(pdev_priv_obj, + &psoc_priv_obj->mas_chan_params[pdev_id]); + + psoc_reg_rules = &psoc_priv_obj->mas_chan_params[pdev_id].reg_rules; + reg_save_reg_rules_to_pdev(psoc_reg_rules, pdev_priv_obj); + pdev_priv_obj->chan_list_recvd = + psoc_priv_obj->chan_list_recvd[pdev_id]; + + status = wlan_objmgr_pdev_component_obj_attach( + pdev, WLAN_UMAC_COMP_REGULATORY, pdev_priv_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("Obj attach failed"); + qdf_mem_free(pdev_priv_obj); + return status; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + if (!psoc_priv_obj->is_11d_offloaded) + reg_11d_host_scan_init(parent_psoc); + + reg_debug("reg pdev obj created with status %d", status); + + return status; +} + +QDF_STATUS wlan_regulatory_pdev_obj_destroyed_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + QDF_STATUS status; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint32_t pdev_id; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc_priv_obj = reg_get_psoc_obj(wlan_pdev_get_psoc(pdev)); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!psoc_priv_obj->is_11d_offloaded) + reg_11d_host_scan_deinit(wlan_pdev_get_psoc(pdev)); + + pdev_priv_obj->pdev_ptr = NULL; + + status = wlan_objmgr_pdev_component_obj_detach( + pdev, WLAN_UMAC_COMP_REGULATORY, pdev_priv_obj); + + if (status != QDF_STATUS_SUCCESS) + reg_err("reg pdev private obj detach failed"); + + reg_debug("reg pdev obj deleted"); + + qdf_spin_lock_bh(&pdev_priv_obj->reg_rules_lock); + reg_reset_reg_rules(&pdev_priv_obj->reg_rules); + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); + + qdf_spinlock_destroy(&pdev_priv_obj->reg_rules_lock); + + qdf_mem_free(pdev_priv_obj); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv_objs.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv_objs.h new file mode 100644 index 0000000000000000000000000000000000000000..dc0c8bce08cde8209c591b53b91f2922c0aa8582 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv_objs.h @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_priv_objs.h + * This file contains regulatory component private data structures. + */ + +#ifndef __REG_PRIV_OBJS_H +#define __REG_PRIV_OBJS_H + +#define reg_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_REGULATORY, params) +#define reg_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_REGULATORY, params) +#define reg_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_REGULATORY, params) +#define reg_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_REGULATORY, params) +#define reg_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_REGULATORY, params) +#define reg_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_REGULATORY, params) +#define reg_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_REGULATORY, params) +#define reg_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_REGULATORY, params) + +#define reg_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_REGULATORY, params) +#define reg_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_REGULATORY, params) +#define reg_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_REGULATORY, params) +#define reg_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_REGULATORY, params) +#define reg_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_REGULATORY, params) + +/** + * typedef reg_chan_change_callback() - Regulatory channel change callback + * @psoc: Pointer to psoc + * @pdev: Pointer to pdev + * @chan_list: Pointer to regulatory channel list + * @avoid_freq_ind: Pointer to avoid frequencies + * @arg: list of arguments + */ +typedef void (*reg_chan_change_callback)( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list, + struct avoid_freq_ind_data *avoid_freq_ind, + void *arg); + +/** + * struct chan_change_cbk_entry - Channel change callback entry + * @cbk: Callback + * @arg: Arguments + */ +struct chan_change_cbk_entry { + reg_chan_change_callback cbk; + void *arg; +}; + +/** + * struct wlan_regulatory_psoc_priv_obj - wlan regulatory psoc private object + * @chan_list_recvd: whether channel list has been received + * @new_user_ctry_pending: In this array, element[phy_id] is true if any user + * country update is pending for pdev (phy_id), used in case of MCL. + * @new_init_ctry_pending: In this array, element[phy_id] is true if any user + * country update is pending for pdev (phy_id), used in case of WIN. + * @new_11d_ctry_pending: In this array, element[phy_id] is true if any 11d + * country update is pending for pdev (phy_id). + * @world_country_pending: In this array, element[phy_id] is true if any world + * country update is pending for pdev (phy_id). + * @band_capability: bitmap of bands enabled, using enum reg_wifi_band as the + * bit position value + * @ignore_fw_reg_offload_ind: Ignore FW reg offload indication + * @six_ghz_supported: whether 6ghz is supported + * @retain_nol_across_regdmn_update: Retain the NOL list across the regdomain + * changes. + */ +struct wlan_regulatory_psoc_priv_obj { + struct mas_chan_params mas_chan_params[PSOC_MAX_PHY_REG_CAP]; + bool chan_list_recvd[PSOC_MAX_PHY_REG_CAP]; + bool offload_enabled; + bool six_ghz_supported; + uint8_t num_phy; + char cur_country[REG_ALPHA2_LEN + 1]; + char def_country[REG_ALPHA2_LEN + 1]; + uint16_t def_country_code; + uint16_t def_region_domain; + enum country_src cc_src; + struct wlan_objmgr_psoc *psoc_ptr; + bool new_user_ctry_pending[PSOC_MAX_PHY_REG_CAP]; + bool new_init_ctry_pending[PSOC_MAX_PHY_REG_CAP]; + bool new_11d_ctry_pending[PSOC_MAX_PHY_REG_CAP]; + bool world_country_pending[PSOC_MAX_PHY_REG_CAP]; + bool dfs_enabled; + uint32_t band_capability; + bool indoor_chan_enabled; + bool ignore_fw_reg_offload_ind; + bool enable_11d_supp_original; + bool enable_11d_supp; + bool is_11d_offloaded; + uint8_t vdev_id_for_11d_scan; + uint8_t master_vdev_cnt; + uint8_t vdev_cnt_11d; + uint32_t scan_11d_interval; +#ifdef HOST_11D_SCAN + bool is_host_11d_inited; + wlan_scan_requester scan_req_id; + uint32_t scan_id; + qdf_mc_timer_t timer; +#endif + uint8_t vdev_ids_11d[MAX_STA_VDEV_CNT]; + bool user_ctry_priority; + bool user_ctry_set; + struct chan_change_cbk_entry cbk_list[REG_MAX_CHAN_CHANGE_CBKS]; + uint8_t num_chan_change_cbks; + uint8_t ch_avoid_ind; + struct unsafe_ch_list unsafe_chan_list; + struct ch_avoid_ind_type avoid_freq_list; + enum restart_beaconing_on_ch_avoid_rule restart_beaconing; + struct wlan_psoc_host_hal_reg_capabilities_ext + reg_cap[PSOC_MAX_PHY_REG_CAP]; + bool force_ssc_disable_indoor_channel; + uint8_t enable_srd_chan_in_master_mode; + bool enable_11d_in_world_mode; + qdf_spinlock_t cbk_list_lock; + bool retain_nol_across_regdmn_update; +}; + +/** + * struct wlan_regulatory_pdev_priv_obj - wlan regulatory pdev private object + * @pdev_opened: whether pdev has been opened by application + * @band_capability: bitmap of bands enabled, using enum reg_wifi_band as the + * bit position value + */ +struct wlan_regulatory_pdev_priv_obj { + struct regulatory_channel cur_chan_list[NUM_CHANNELS]; + struct regulatory_channel mas_chan_list[NUM_CHANNELS]; +#ifdef DISABLE_CHANNEL_LIST + struct regulatory_channel cache_disable_chan_list[NUM_CHANNELS]; + uint32_t num_cache_channels; + bool disable_cached_channels; +#endif + char default_country[REG_ALPHA2_LEN + 1]; + uint16_t def_region_domain; + uint16_t def_country_code; + char current_country[REG_ALPHA2_LEN + 1]; + uint16_t reg_dmn_pair; + uint16_t ctry_code; +#ifdef DISABLE_UNII_SHARED_BANDS + uint8_t unii_5g_bitmap; +#endif + enum dfs_reg dfs_region; + uint32_t phybitmap; + struct wlan_objmgr_pdev *pdev_ptr; + qdf_freq_t range_2g_low; + qdf_freq_t range_2g_high; + qdf_freq_t range_5g_low; + qdf_freq_t range_5g_high; + bool dfs_enabled; + bool set_fcc_channel; + uint32_t band_capability; + bool indoor_chan_enabled; + bool en_chan_144; + uint32_t wireless_modes; + struct ch_avoid_ind_type freq_avoid_list; + bool force_ssc_disable_indoor_channel; + bool sap_state; + struct reg_rule_info reg_rules; + qdf_spinlock_t reg_rules_lock; + bool chan_list_recvd; + bool pdev_opened; +}; + +/** + * reg_get_psoc_obj() - Provides the reg component object pointer + * @psoc: pointer to psoc object. + * + * Return: reg component object pointer + */ +struct wlan_regulatory_psoc_priv_obj *reg_get_psoc_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * reg_get_pdev_obj() - Provides the reg component object pointer + * @psoc: pointer to psoc object. + * + * Return: reg component object pointer + */ +struct wlan_regulatory_pdev_priv_obj *reg_get_pdev_obj( + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_regulatory_psoc_obj_created_notification() - PSOC obj create callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is created. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list); + +/** + * wlan_regulatory_psoc_obj_destroyed_notification() - PSOC obj delete callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is deleted. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list); + +/** + * wlan_regulatory_pdev_obj_created_notification() - PDEV obj create callback + * @pdev: pdev object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the pdev object is created. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_pdev_obj_created_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list); + +/** + * wlan_regulatory_pdev_obj_destroyed_notification() - PDEV obj destroy callback + * @pdev: pdev object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the pdev object is destroyed. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_pdev_obj_destroyed_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services_common.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services_common.c new file mode 100644 index 0000000000000000000000000000000000000000..be27444d70a67faea1b843b658dca11899a8aa20 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services_common.c @@ -0,0 +1,3722 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_services_common.c + * This file defines regulatory component service functions + */ + +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include "reg_callbacks.h" +#include "reg_services_common.h" +#include +#include "reg_db.h" +#include "reg_db_parser.h" +#include "reg_build_chan_list.h" +#include +#include + +const struct chan_map *channel_map; +#ifdef CONFIG_CHAN_NUM_API +static const struct bonded_channel bonded_chan_40mhz_list[] = { + {36, 40}, + {44, 48}, + {52, 56}, + {60, 64}, + {100, 104}, + {108, 112}, + {116, 120}, + {124, 128}, + {132, 136}, + {140, 144}, + {149, 153}, + {157, 161}, + {165, 169} +}; + +static const struct bonded_channel bonded_chan_80mhz_list[] = { + {36, 48}, + {52, 64}, + {100, 112}, + {116, 128}, + {132, 144}, + {149, 161} +}; + +static const struct bonded_channel bonded_chan_160mhz_list[] = { + {36, 64}, + {100, 128} +}; +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +/* bonded_chan_40mhz_list_freq - List of 40MHz bonnded channel frequencies */ +static const struct bonded_channel_freq bonded_chan_40mhz_list_freq[] = { + {5180, 5200}, + {5220, 5240}, + {5260, 5280}, + {5300, 5320}, + {5500, 5520}, + {5540, 5560}, + {5580, 5600}, + {5620, 5640}, + {5660, 5680}, + {5700, 5720}, + {5745, 5765}, + {5785, 5805}, + {5825, 5845}, +#ifdef CONFIG_BAND_6GHZ + {5945, 5965}, + {5985, 6005}, + {6025, 6045}, + {6065, 6085}, + {6105, 6125}, + {6145, 6165}, + {6185, 6205}, + {6225, 6245}, + {6265, 6285}, + {6305, 6325}, + {6345, 6365}, + {6385, 6405}, + {6425, 6445}, + {6465, 6485}, + {6505, 6525}, + {6545, 6565}, + {6585, 6605}, + {6625, 6645}, + {6665, 6685}, + {6705, 6725}, + {6745, 6765}, + {6785, 6805}, + {6825, 6845}, + {6865, 6885}, + {6905, 6925}, + {6945, 6965}, + {6985, 7005}, + {7025, 7045}, + {7065, 7085} +#endif /*CONFIG_BAND_6GHZ*/ +}; + +/* bonded_chan_80mhz_list_freq - List of 80MHz bonnded channel frequencies */ +static const struct bonded_channel_freq bonded_chan_80mhz_list_freq[] = { + {5180, 5240}, + {5260, 5320}, + {5500, 5560}, + {5580, 5640}, + {5660, 5720}, + {5745, 5805}, +#ifdef CONFIG_BAND_6GHZ + {5945, 6005}, + {6025, 6085}, + {6105, 6165}, + {6185, 6245}, + {6265, 6325}, + {6345, 6405}, + {6425, 6485}, + {6505, 6565}, + {6585, 6645}, + {6665, 6725}, + {6745, 6805}, + {6825, 6885}, + {6905, 6965}, + {6985, 7045} +#endif /*CONFIG_BAND_6GHZ*/ +}; + +/* bonded_chan_160mhz_list_freq - List of 160MHz bonnded channel frequencies */ +static const struct bonded_channel_freq bonded_chan_160mhz_list_freq[] = { + {5180, 5320}, + {5500, 5640}, +#ifdef CONFIG_BAND_6GHZ + {5945, 6085}, + {6105, 6245}, + {6265, 6405}, + {6425, 6565}, + {6585, 6725}, + {6745, 6885}, + {6905, 7045} +#endif /*CONFIG_BAND_6GHZ*/ +}; +#endif /*CONFIG_CHAN_FREQ_API*/ + +static const enum phy_ch_width get_next_lower_bw[] = { + [CH_WIDTH_80P80MHZ] = CH_WIDTH_160MHZ, + [CH_WIDTH_160MHZ] = CH_WIDTH_80MHZ, + [CH_WIDTH_80MHZ] = CH_WIDTH_40MHZ, + [CH_WIDTH_40MHZ] = CH_WIDTH_20MHZ, + [CH_WIDTH_20MHZ] = CH_WIDTH_10MHZ, + [CH_WIDTH_10MHZ] = CH_WIDTH_5MHZ, + [CH_WIDTH_5MHZ] = CH_WIDTH_INVALID +}; + +const struct chan_map channel_map_us[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, 1, 5, 5}, + [CHAN_ENUM_4945] = {4945, 11, 10, 10}, + [CHAN_ENUM_4947] = {4947, 2, 5, 5}, + [CHAN_ENUM_4950] = {4950, 20, 10, 20}, + [CHAN_ENUM_4952] = {4952, 3, 5, 5}, + [CHAN_ENUM_4955] = {4955, 21, 10, 20}, + [CHAN_ENUM_4957] = {4957, 4, 5, 5}, + [CHAN_ENUM_4960] = {4960, 22, 10, 20}, + [CHAN_ENUM_4962] = {4962, 5, 5, 5}, + [CHAN_ENUM_4965] = {4965, 23, 10, 20}, + [CHAN_ENUM_4967] = {4967, 6, 5, 5}, + [CHAN_ENUM_4970] = {4970, 24, 10, 20}, + [CHAN_ENUM_4972] = {4972, 7, 5, 5}, + [CHAN_ENUM_4975] = {4975, 25, 10, 20}, + [CHAN_ENUM_4977] = {4977, 8, 5, 5}, + [CHAN_ENUM_4980] = {4980, 26, 10, 20}, + [CHAN_ENUM_4982] = {4982, 9, 5, 5}, + [CHAN_ENUM_4985] = {4985, 19, 10, 10}, + [CHAN_ENUM_4987] = {4987, 10, 5, 5}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, +#ifdef WLAN_FEATURE_DSRC + [CHAN_ENUM_5850] = {5850, 170, 2, 160}, + [CHAN_ENUM_5855] = {5855, 171, 2, 160}, + [CHAN_ENUM_5860] = {5860, 172, 2, 160}, +#else + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, +#endif + [CHAN_ENUM_5865] = {5865, 173, 2, 160}, +#ifdef WLAN_FEATURE_DSRC + [CHAN_ENUM_5870] = {5870, 174, 2, 160}, +#else + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, +#endif + [CHAN_ENUM_5875] = {5875, 175, 2, 160}, + [CHAN_ENUM_5880] = {5880, 176, 2, 160}, + [CHAN_ENUM_5885] = {5885, 177, 2, 160}, + [CHAN_ENUM_5890] = {5890, 178, 2, 160}, + [CHAN_ENUM_5895] = {5895, 179, 2, 160}, + [CHAN_ENUM_5900] = {5900, 180, 2, 160}, + [CHAN_ENUM_5905] = {5905, 181, 2, 160}, + [CHAN_ENUM_5910] = {5910, 182, 2, 160}, + [CHAN_ENUM_5915] = {5915, 183, 2, 160}, + [CHAN_ENUM_5920] = {5920, 184, 2, 160}, +#ifdef CONFIG_BAND_6GHZ + [CHAN_ENUM_5945] = {5945, 1, 2, 160}, + [CHAN_ENUM_5965] = {5965, 5, 2, 160}, + [CHAN_ENUM_5985] = {5985, 9, 2, 160}, + [CHAN_ENUM_6005] = {6005, 13, 2, 160}, + [CHAN_ENUM_6025] = {6025, 17, 2, 160}, + [CHAN_ENUM_6045] = {6045, 21, 2, 160}, + [CHAN_ENUM_6065] = {6065, 25, 2, 160}, + [CHAN_ENUM_6085] = {6085, 29, 2, 160}, + [CHAN_ENUM_6105] = {6105, 33, 2, 160}, + [CHAN_ENUM_6125] = {6125, 37, 2, 160}, + [CHAN_ENUM_6145] = {6145, 41, 2, 160}, + [CHAN_ENUM_6165] = {6165, 45, 2, 160}, + [CHAN_ENUM_6185] = {6185, 49, 2, 160}, + [CHAN_ENUM_6205] = {6205, 53, 2, 160}, + [CHAN_ENUM_6225] = {6225, 57, 2, 160}, + [CHAN_ENUM_6245] = {6245, 61, 2, 160}, + [CHAN_ENUM_6265] = {6265, 65, 2, 160}, + [CHAN_ENUM_6285] = {6285, 69, 2, 160}, + [CHAN_ENUM_6305] = {6305, 73, 2, 160}, + [CHAN_ENUM_6325] = {6325, 77, 2, 160}, + [CHAN_ENUM_6345] = {6345, 81, 2, 160}, + [CHAN_ENUM_6365] = {6365, 85, 2, 160}, + [CHAN_ENUM_6385] = {6385, 89, 2, 160}, + [CHAN_ENUM_6405] = {6405, 93, 2, 160}, + [CHAN_ENUM_6425] = {6425, 97, 2, 160}, + [CHAN_ENUM_6445] = {6445, 101, 2, 160}, + [CHAN_ENUM_6465] = {6465, 105, 2, 160}, + [CHAN_ENUM_6485] = {6485, 109, 2, 160}, + [CHAN_ENUM_6505] = {6505, 113, 2, 160}, + [CHAN_ENUM_6525] = {6525, 117, 2, 160}, + [CHAN_ENUM_6545] = {6545, 121, 2, 160}, + [CHAN_ENUM_6565] = {6565, 125, 2, 160}, + [CHAN_ENUM_6585] = {6585, 129, 2, 160}, + [CHAN_ENUM_6605] = {6605, 133, 2, 160}, + [CHAN_ENUM_6625] = {6625, 137, 2, 160}, + [CHAN_ENUM_6645] = {6645, 141, 2, 160}, + [CHAN_ENUM_6665] = {6665, 145, 2, 160}, + [CHAN_ENUM_6685] = {6685, 149, 2, 160}, + [CHAN_ENUM_6705] = {6705, 153, 2, 160}, + [CHAN_ENUM_6725] = {6725, 157, 2, 160}, + [CHAN_ENUM_6745] = {6745, 161, 2, 160}, + [CHAN_ENUM_6765] = {6765, 165, 2, 160}, + [CHAN_ENUM_6785] = {6785, 169, 2, 160}, + [CHAN_ENUM_6805] = {6805, 173, 2, 160}, + [CHAN_ENUM_6825] = {6825, 177, 2, 160}, + [CHAN_ENUM_6845] = {6845, 181, 2, 160}, + [CHAN_ENUM_6865] = {6865, 185, 2, 160}, + [CHAN_ENUM_6885] = {6885, 189, 2, 160}, + [CHAN_ENUM_6905] = {6905, 193, 2, 160}, + [CHAN_ENUM_6925] = {6925, 197, 2, 160}, + [CHAN_ENUM_6945] = {6945, 201, 2, 160}, + [CHAN_ENUM_6965] = {6965, 205, 2, 160}, + [CHAN_ENUM_6985] = {6985, 209, 2, 160}, + [CHAN_ENUM_7005] = {7005, 213, 2, 160}, + [CHAN_ENUM_7025] = {7025, 217, 2, 160}, + [CHAN_ENUM_7045] = {7045, 221, 2, 160}, + [CHAN_ENUM_7065] = {7065, 225, 2, 160}, + [CHAN_ENUM_7085] = {7085, 229, 2, 160}, + [CHAN_ENUM_7105] = {7105, 233, 2, 160} +#endif /* CONFIG_BAND_6GHZ */ +}; + +const struct chan_map channel_map_eu[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4945] = {4945, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4947] = {4947, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, 173, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, 175, 2, 160}, + [CHAN_ENUM_5880] = {5880, 176, 2, 160}, + [CHAN_ENUM_5885] = {5885, 177, 2, 160}, + [CHAN_ENUM_5890] = {5890, 178, 2, 160}, + [CHAN_ENUM_5895] = {5895, 179, 2, 160}, + [CHAN_ENUM_5900] = {5900, 180, 2, 160}, + [CHAN_ENUM_5905] = {5905, 181, 2, 160}, + [CHAN_ENUM_5910] = {5910, 182, 2, 160}, + [CHAN_ENUM_5915] = {5915, 183, 2, 160}, + [CHAN_ENUM_5920] = {5920, 184, 2, 160}, +#ifdef CONFIG_BAND_6GHZ + [CHAN_ENUM_5945] = {5945, 1, 2, 160}, + [CHAN_ENUM_5965] = {5965, 5, 2, 160}, + [CHAN_ENUM_5985] = {5985, 9, 2, 160}, + [CHAN_ENUM_6005] = {6005, 13, 2, 160}, + [CHAN_ENUM_6025] = {6025, 17, 2, 160}, + [CHAN_ENUM_6045] = {6045, 21, 2, 160}, + [CHAN_ENUM_6065] = {6065, 25, 2, 160}, + [CHAN_ENUM_6085] = {6085, 29, 2, 160}, + [CHAN_ENUM_6105] = {6105, 33, 2, 160}, + [CHAN_ENUM_6125] = {6125, 37, 2, 160}, + [CHAN_ENUM_6145] = {6145, 41, 2, 160}, + [CHAN_ENUM_6165] = {6165, 45, 2, 160}, + [CHAN_ENUM_6185] = {6185, 49, 2, 160}, + [CHAN_ENUM_6205] = {6205, 53, 2, 160}, + [CHAN_ENUM_6225] = {6225, 57, 2, 160}, + [CHAN_ENUM_6245] = {6245, 61, 2, 160}, + [CHAN_ENUM_6265] = {6265, 65, 2, 160}, + [CHAN_ENUM_6285] = {6285, 69, 2, 160}, + [CHAN_ENUM_6305] = {6305, 73, 2, 160}, + [CHAN_ENUM_6325] = {6325, 77, 2, 160}, + [CHAN_ENUM_6345] = {6345, 81, 2, 160}, + [CHAN_ENUM_6365] = {6365, 85, 2, 160}, + [CHAN_ENUM_6385] = {6385, 89, 2, 160}, + [CHAN_ENUM_6405] = {6405, 93, 2, 160}, + [CHAN_ENUM_6425] = {6425, 97, 2, 160}, + [CHAN_ENUM_6445] = {6445, 101, 2, 160}, + [CHAN_ENUM_6465] = {6465, 105, 2, 160}, + [CHAN_ENUM_6485] = {6485, 109, 2, 160}, + [CHAN_ENUM_6505] = {6505, 113, 2, 160}, + [CHAN_ENUM_6525] = {6525, 117, 2, 160}, + [CHAN_ENUM_6545] = {6545, 121, 2, 160}, + [CHAN_ENUM_6565] = {6565, 125, 2, 160}, + [CHAN_ENUM_6585] = {6585, 129, 2, 160}, + [CHAN_ENUM_6605] = {6605, 133, 2, 160}, + [CHAN_ENUM_6625] = {6625, 137, 2, 160}, + [CHAN_ENUM_6645] = {6645, 141, 2, 160}, + [CHAN_ENUM_6665] = {6665, 145, 2, 160}, + [CHAN_ENUM_6685] = {6685, 149, 2, 160}, + [CHAN_ENUM_6705] = {6705, 153, 2, 160}, + [CHAN_ENUM_6725] = {6725, 157, 2, 160}, + [CHAN_ENUM_6745] = {6745, 161, 2, 160}, + [CHAN_ENUM_6765] = {6765, 165, 2, 160}, + [CHAN_ENUM_6785] = {6785, 169, 2, 160}, + [CHAN_ENUM_6805] = {6805, 173, 2, 160}, + [CHAN_ENUM_6825] = {6825, 177, 2, 160}, + [CHAN_ENUM_6845] = {6845, 181, 2, 160}, + [CHAN_ENUM_6865] = {6865, 185, 2, 160}, + [CHAN_ENUM_6885] = {6885, 189, 2, 160}, + [CHAN_ENUM_6905] = {6905, 193, 2, 160}, + [CHAN_ENUM_6925] = {6925, 197, 2, 160}, + [CHAN_ENUM_6945] = {6945, 201, 2, 160}, + [CHAN_ENUM_6965] = {6965, 205, 2, 160}, + [CHAN_ENUM_6985] = {6985, 209, 2, 160}, + [CHAN_ENUM_7005] = {7005, 213, 2, 160}, + [CHAN_ENUM_7025] = {7025, 217, 2, 160}, + [CHAN_ENUM_7045] = {7045, 221, 2, 160}, + [CHAN_ENUM_7065] = {7065, 225, 2, 160}, + [CHAN_ENUM_7085] = {7085, 229, 2, 160}, + [CHAN_ENUM_7105] = {7105, 233, 2, 160} +#endif /* CONFIG_BAND_6GHZ */ +}; + +const struct chan_map channel_map_jp[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, 182, 5, 5}, + [CHAN_ENUM_4915] = {4915, 183, 10, 10}, + [CHAN_ENUM_4917] = {4917, 183, 5, 5}, + [CHAN_ENUM_4920] = {4920, 184, 10, 20}, + [CHAN_ENUM_4922] = {4922, 184, 5, 5}, + [CHAN_ENUM_4925] = {4925, 185, 10, 10}, + [CHAN_ENUM_4927] = {4927, 185, 5, 5}, + [CHAN_ENUM_4932] = {4932, 186, 5, 5}, + [CHAN_ENUM_4935] = {4935, 187, 10, 10}, + [CHAN_ENUM_4937] = {4937, 187, 5, 5}, + [CHAN_ENUM_4940] = {4940, 188, 10, 20}, + [CHAN_ENUM_4942] = {4942, 188, 5, 5}, + [CHAN_ENUM_4945] = {4945, 189, 10, 10}, + [CHAN_ENUM_4947] = {4947, 189, 5, 5}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, 192, 20, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, 196, 20, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, 6, 5, 5}, + [CHAN_ENUM_5035] = {5035, 7, 10, 10}, + [CHAN_ENUM_5037] = {5037, 7, 5, 5}, + [CHAN_ENUM_5040] = {5040, 8, 10, 20}, + [CHAN_ENUM_5042] = {5042, 8, 5, 5}, + [CHAN_ENUM_5045] = {5045, 9, 10, 10}, + [CHAN_ENUM_5047] = {5047, 9, 5, 5}, + [CHAN_ENUM_5052] = {5052, 10, 5, 5}, + [CHAN_ENUM_5055] = {5055, 11, 10, 10}, + [CHAN_ENUM_5057] = {5057, 11, 5, 5}, + [CHAN_ENUM_5060] = {5060, 12, 20, 20}, + [CHAN_ENUM_5080] = {5080, 16, 20, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5880] = {5880, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5885] = {5885, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5890] = {5890, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5895] = {5895, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5900] = {5900, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5905] = {5905, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5910] = {5910, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5915] = {5915, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5920] = {5920, INVALID_CHANNEL_NUM, 2, 160}, +#ifdef CONFIG_BAND_6GHZ + [CHAN_ENUM_5945] = {5945, 1, 2, 160}, + [CHAN_ENUM_5965] = {5965, 5, 2, 160}, + [CHAN_ENUM_5985] = {5985, 9, 2, 160}, + [CHAN_ENUM_6005] = {6005, 13, 2, 160}, + [CHAN_ENUM_6025] = {6025, 17, 2, 160}, + [CHAN_ENUM_6045] = {6045, 21, 2, 160}, + [CHAN_ENUM_6065] = {6065, 25, 2, 160}, + [CHAN_ENUM_6085] = {6085, 29, 2, 160}, + [CHAN_ENUM_6105] = {6105, 33, 2, 160}, + [CHAN_ENUM_6125] = {6125, 37, 2, 160}, + [CHAN_ENUM_6145] = {6145, 41, 2, 160}, + [CHAN_ENUM_6165] = {6165, 45, 2, 160}, + [CHAN_ENUM_6185] = {6185, 49, 2, 160}, + [CHAN_ENUM_6205] = {6205, 53, 2, 160}, + [CHAN_ENUM_6225] = {6225, 57, 2, 160}, + [CHAN_ENUM_6245] = {6245, 61, 2, 160}, + [CHAN_ENUM_6265] = {6265, 65, 2, 160}, + [CHAN_ENUM_6285] = {6285, 69, 2, 160}, + [CHAN_ENUM_6305] = {6305, 73, 2, 160}, + [CHAN_ENUM_6325] = {6325, 77, 2, 160}, + [CHAN_ENUM_6345] = {6345, 81, 2, 160}, + [CHAN_ENUM_6365] = {6365, 85, 2, 160}, + [CHAN_ENUM_6385] = {6385, 89, 2, 160}, + [CHAN_ENUM_6405] = {6405, 93, 2, 160}, + [CHAN_ENUM_6425] = {6425, 97, 2, 160}, + [CHAN_ENUM_6445] = {6445, 101, 2, 160}, + [CHAN_ENUM_6465] = {6465, 105, 2, 160}, + [CHAN_ENUM_6485] = {6485, 109, 2, 160}, + [CHAN_ENUM_6505] = {6505, 113, 2, 160}, + [CHAN_ENUM_6525] = {6525, 117, 2, 160}, + [CHAN_ENUM_6545] = {6545, 121, 2, 160}, + [CHAN_ENUM_6565] = {6565, 125, 2, 160}, + [CHAN_ENUM_6585] = {6585, 129, 2, 160}, + [CHAN_ENUM_6605] = {6605, 133, 2, 160}, + [CHAN_ENUM_6625] = {6625, 137, 2, 160}, + [CHAN_ENUM_6645] = {6645, 141, 2, 160}, + [CHAN_ENUM_6665] = {6665, 145, 2, 160}, + [CHAN_ENUM_6685] = {6685, 149, 2, 160}, + [CHAN_ENUM_6705] = {6705, 153, 2, 160}, + [CHAN_ENUM_6725] = {6725, 157, 2, 160}, + [CHAN_ENUM_6745] = {6745, 161, 2, 160}, + [CHAN_ENUM_6765] = {6765, 165, 2, 160}, + [CHAN_ENUM_6785] = {6785, 169, 2, 160}, + [CHAN_ENUM_6805] = {6805, 173, 2, 160}, + [CHAN_ENUM_6825] = {6825, 177, 2, 160}, + [CHAN_ENUM_6845] = {6845, 181, 2, 160}, + [CHAN_ENUM_6865] = {6865, 185, 2, 160}, + [CHAN_ENUM_6885] = {6885, 189, 2, 160}, + [CHAN_ENUM_6905] = {6905, 193, 2, 160}, + [CHAN_ENUM_6925] = {6925, 197, 2, 160}, + [CHAN_ENUM_6945] = {6945, 201, 2, 160}, + [CHAN_ENUM_6965] = {6965, 205, 2, 160}, + [CHAN_ENUM_6985] = {6985, 209, 2, 160}, + [CHAN_ENUM_7005] = {7005, 213, 2, 160}, + [CHAN_ENUM_7025] = {7025, 217, 2, 160}, + [CHAN_ENUM_7045] = {7045, 221, 2, 160}, + [CHAN_ENUM_7065] = {7065, 225, 2, 160}, + [CHAN_ENUM_7085] = {7085, 229, 2, 160}, + [CHAN_ENUM_7105] = {7105, 233, 2, 160} +#endif /* CONFIG_BAND_6GHZ */ +}; + +const struct chan_map channel_map_global[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4945] = {4945, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4947] = {4947, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, 173, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5880] = {5880, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5885] = {5885, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5890] = {5890, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5895] = {5895, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5900] = {5900, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5905] = {5905, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5910] = {5910, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5915] = {5915, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5920] = {5920, INVALID_CHANNEL_NUM, 2, 160}, +#ifdef CONFIG_BAND_6GHZ + [CHAN_ENUM_5945] = {5945, 1, 2, 160}, + [CHAN_ENUM_5965] = {5965, 5, 2, 160}, + [CHAN_ENUM_5985] = {5985, 9, 2, 160}, + [CHAN_ENUM_6005] = {6005, 13, 2, 160}, + [CHAN_ENUM_6025] = {6025, 17, 2, 160}, + [CHAN_ENUM_6045] = {6045, 21, 2, 160}, + [CHAN_ENUM_6065] = {6065, 25, 2, 160}, + [CHAN_ENUM_6085] = {6085, 29, 2, 160}, + [CHAN_ENUM_6105] = {6105, 33, 2, 160}, + [CHAN_ENUM_6125] = {6125, 37, 2, 160}, + [CHAN_ENUM_6145] = {6145, 41, 2, 160}, + [CHAN_ENUM_6165] = {6165, 45, 2, 160}, + [CHAN_ENUM_6185] = {6185, 49, 2, 160}, + [CHAN_ENUM_6205] = {6205, 53, 2, 160}, + [CHAN_ENUM_6225] = {6225, 57, 2, 160}, + [CHAN_ENUM_6245] = {6245, 61, 2, 160}, + [CHAN_ENUM_6265] = {6265, 65, 2, 160}, + [CHAN_ENUM_6285] = {6285, 69, 2, 160}, + [CHAN_ENUM_6305] = {6305, 73, 2, 160}, + [CHAN_ENUM_6325] = {6325, 77, 2, 160}, + [CHAN_ENUM_6345] = {6345, 81, 2, 160}, + [CHAN_ENUM_6365] = {6365, 85, 2, 160}, + [CHAN_ENUM_6385] = {6385, 89, 2, 160}, + [CHAN_ENUM_6405] = {6405, 93, 2, 160}, + [CHAN_ENUM_6425] = {6425, 97, 2, 160}, + [CHAN_ENUM_6445] = {6445, 101, 2, 160}, + [CHAN_ENUM_6465] = {6465, 105, 2, 160}, + [CHAN_ENUM_6485] = {6485, 109, 2, 160}, + [CHAN_ENUM_6505] = {6505, 113, 2, 160}, + [CHAN_ENUM_6525] = {6525, 117, 2, 160}, + [CHAN_ENUM_6545] = {6545, 121, 2, 160}, + [CHAN_ENUM_6565] = {6565, 125, 2, 160}, + [CHAN_ENUM_6585] = {6585, 129, 2, 160}, + [CHAN_ENUM_6605] = {6605, 133, 2, 160}, + [CHAN_ENUM_6625] = {6625, 137, 2, 160}, + [CHAN_ENUM_6645] = {6645, 141, 2, 160}, + [CHAN_ENUM_6665] = {6665, 145, 2, 160}, + [CHAN_ENUM_6685] = {6685, 149, 2, 160}, + [CHAN_ENUM_6705] = {6705, 153, 2, 160}, + [CHAN_ENUM_6725] = {6725, 157, 2, 160}, + [CHAN_ENUM_6745] = {6745, 161, 2, 160}, + [CHAN_ENUM_6765] = {6765, 165, 2, 160}, + [CHAN_ENUM_6785] = {6785, 169, 2, 160}, + [CHAN_ENUM_6805] = {6805, 173, 2, 160}, + [CHAN_ENUM_6825] = {6825, 177, 2, 160}, + [CHAN_ENUM_6845] = {6845, 181, 2, 160}, + [CHAN_ENUM_6865] = {6865, 185, 2, 160}, + [CHAN_ENUM_6885] = {6885, 189, 2, 160}, + [CHAN_ENUM_6905] = {6905, 193, 2, 160}, + [CHAN_ENUM_6925] = {6925, 197, 2, 160}, + [CHAN_ENUM_6945] = {6945, 201, 2, 160}, + [CHAN_ENUM_6965] = {6965, 205, 2, 160}, + [CHAN_ENUM_6985] = {6985, 209, 2, 160}, + [CHAN_ENUM_7005] = {7005, 213, 2, 160}, + [CHAN_ENUM_7025] = {7025, 217, 2, 160}, + [CHAN_ENUM_7045] = {7045, 221, 2, 160}, + [CHAN_ENUM_7065] = {7065, 225, 2, 160}, + [CHAN_ENUM_7085] = {7085, 229, 2, 160}, + [CHAN_ENUM_7105] = {7105, 233, 2, 160} +#endif /* CONFIG_BAND_6GHZ */ +}; + +const struct chan_map channel_map_china[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4945] = {4945, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4947] = {4947, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5880] = {5880, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5885] = {5885, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5890] = {5890, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5895] = {5895, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5900] = {5900, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5905] = {5905, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5910] = {5910, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5915] = {5915, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5920] = {5920, INVALID_CHANNEL_NUM, 2, 160}, +#ifdef CONFIG_BAND_6GHZ + [CHAN_ENUM_5945] = {5945, 1, 2, 160}, + [CHAN_ENUM_5965] = {5965, 5, 2, 160}, + [CHAN_ENUM_5985] = {5985, 9, 2, 160}, + [CHAN_ENUM_6005] = {6005, 13, 2, 160}, + [CHAN_ENUM_6025] = {6025, 17, 2, 160}, + [CHAN_ENUM_6045] = {6045, 21, 2, 160}, + [CHAN_ENUM_6065] = {6065, 25, 2, 160}, + [CHAN_ENUM_6085] = {6085, 29, 2, 160}, + [CHAN_ENUM_6105] = {6105, 33, 2, 160}, + [CHAN_ENUM_6125] = {6125, 37, 2, 160}, + [CHAN_ENUM_6145] = {6145, 41, 2, 160}, + [CHAN_ENUM_6165] = {6165, 45, 2, 160}, + [CHAN_ENUM_6185] = {6185, 49, 2, 160}, + [CHAN_ENUM_6205] = {6205, 53, 2, 160}, + [CHAN_ENUM_6225] = {6225, 57, 2, 160}, + [CHAN_ENUM_6245] = {6245, 61, 2, 160}, + [CHAN_ENUM_6265] = {6265, 65, 2, 160}, + [CHAN_ENUM_6285] = {6285, 69, 2, 160}, + [CHAN_ENUM_6305] = {6305, 73, 2, 160}, + [CHAN_ENUM_6325] = {6325, 77, 2, 160}, + [CHAN_ENUM_6345] = {6345, 81, 2, 160}, + [CHAN_ENUM_6365] = {6365, 85, 2, 160}, + [CHAN_ENUM_6385] = {6385, 89, 2, 160}, + [CHAN_ENUM_6405] = {6405, 93, 2, 160}, + [CHAN_ENUM_6425] = {6425, 97, 2, 160}, + [CHAN_ENUM_6445] = {6445, 101, 2, 160}, + [CHAN_ENUM_6465] = {6465, 105, 2, 160}, + [CHAN_ENUM_6485] = {6485, 109, 2, 160}, + [CHAN_ENUM_6505] = {6505, 113, 2, 160}, + [CHAN_ENUM_6525] = {6525, 117, 2, 160}, + [CHAN_ENUM_6545] = {6545, 121, 2, 160}, + [CHAN_ENUM_6565] = {6565, 125, 2, 160}, + [CHAN_ENUM_6585] = {6585, 129, 2, 160}, + [CHAN_ENUM_6605] = {6605, 133, 2, 160}, + [CHAN_ENUM_6625] = {6625, 137, 2, 160}, + [CHAN_ENUM_6645] = {6645, 141, 2, 160}, + [CHAN_ENUM_6665] = {6665, 145, 2, 160}, + [CHAN_ENUM_6685] = {6685, 149, 2, 160}, + [CHAN_ENUM_6705] = {6705, 153, 2, 160}, + [CHAN_ENUM_6725] = {6725, 157, 2, 160}, + [CHAN_ENUM_6745] = {6745, 161, 2, 160}, + [CHAN_ENUM_6765] = {6765, 165, 2, 160}, + [CHAN_ENUM_6785] = {6785, 169, 2, 160}, + [CHAN_ENUM_6805] = {6805, 173, 2, 160}, + [CHAN_ENUM_6825] = {6825, 177, 2, 160}, + [CHAN_ENUM_6845] = {6845, 181, 2, 160}, + [CHAN_ENUM_6865] = {6865, 185, 2, 160}, + [CHAN_ENUM_6885] = {6885, 189, 2, 160}, + [CHAN_ENUM_6905] = {6905, 193, 2, 160}, + [CHAN_ENUM_6925] = {6925, 197, 2, 160}, + [CHAN_ENUM_6945] = {6945, 201, 2, 160}, + [CHAN_ENUM_6965] = {6965, 205, 2, 160}, + [CHAN_ENUM_6985] = {6985, 209, 2, 160}, + [CHAN_ENUM_7005] = {7005, 213, 2, 160}, + [CHAN_ENUM_7025] = {7025, 217, 2, 160}, + [CHAN_ENUM_7045] = {7045, 221, 2, 160}, + [CHAN_ENUM_7065] = {7065, 225, 2, 160}, + [CHAN_ENUM_7085] = {7085, 229, 2, 160}, + [CHAN_ENUM_7105] = {7105, 233, 2, 160} +#endif /* CONFIG_BAND_6GHZ */ +}; + +void reg_init_channel_map(enum dfs_reg dfs_region) +{ + switch (dfs_region) { + case DFS_UNINIT_REGION: + case DFS_UNDEF_REGION: + channel_map = channel_map_global; + break; + case DFS_FCC_REGION: + channel_map = channel_map_us; + break; + case DFS_ETSI_REGION: + channel_map = channel_map_eu; + break; + case DFS_MKK_REGION: + case DFS_MKKN_REGION: + channel_map = channel_map_jp; + break; + case DFS_CN_REGION: + channel_map = channel_map_china; + break; + case DFS_KR_REGION: + channel_map = channel_map_eu; + break; + } +} + +uint16_t reg_get_bw_value(enum phy_ch_width bw) +{ + switch (bw) { + case CH_WIDTH_20MHZ: + return 20; + case CH_WIDTH_40MHZ: + return 40; + case CH_WIDTH_80MHZ: + return 80; + case CH_WIDTH_160MHZ: + return 160; + case CH_WIDTH_80P80MHZ: + return 160; + case CH_WIDTH_INVALID: + return 0; + case CH_WIDTH_5MHZ: + return 5; + case CH_WIDTH_10MHZ: + return 10; + case CH_WIDTH_MAX: + return 160; + default: + return 0; + } +} + +struct wlan_lmac_if_reg_tx_ops *reg_get_psoc_tx_ops( + struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.reg_ops)); +} + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan) +{ + int i, count; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (!num_chan || !ch_list) { + reg_err("chan_list or num_ch is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* set the current channel list */ + reg_channels = pdev_priv_obj->cur_chan_list; + + for (i = 0, count = 0; i < NUM_CHANNELS; i++) { + if (reg_channels[i].state && + reg_channels[i].chan_flags != REGULATORY_CHAN_DISABLED) { + ch_list[count].chan_num = + reg_channels[i].chan_num; + ch_list[count].center_freq = + reg_channels[i].center_freq; + ch_list[count++].tx_power = + reg_channels[i].tx_power; + } + } + + *num_chan = count; + + return QDF_STATUS_SUCCESS; +} + +enum channel_enum reg_get_chan_enum(uint8_t chan_num) +{ + uint32_t count; + + for (count = 0; count < NUM_CHANNELS; count++) + if (channel_map[count].chan_num == chan_num) + return count; + + reg_err_rl("invalid channel number %d", chan_num); + + return INVALID_CHANNEL; +} + +enum channel_state reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint8_t ch) +{ + enum channel_enum ch_idx; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + ch_idx = reg_get_chan_enum(ch); + + if (ch_idx == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return CHANNEL_STATE_INVALID; + } + + return pdev_priv_obj->cur_chan_list[ch_idx].state; +} + +/** + * reg_get_5g_bonded_chan_array() - get ptr to bonded channel + * @pdev: Pointer to pdev structure + * @oper_ch: operating channel number + * @bonded_chan_ar: bonded channel array + * @array_size; Array size + * @bonded_chan_ptr_ptr: bonded channel ptr ptr + * + * Return: bonded channel state + */ +static enum channel_state reg_get_5g_bonded_chan_array( + struct wlan_objmgr_pdev *pdev, + uint8_t oper_chan, + const struct bonded_channel bonded_chan_ar[], + uint16_t array_size, + const struct bonded_channel **bonded_chan_ptr_ptr) +{ + int i; + uint8_t chan_num; + const struct bonded_channel *bonded_chan_ptr = NULL; + enum channel_state chan_state = CHANNEL_STATE_INVALID; + enum channel_state temp_chan_state; + + for (i = 0; i < array_size; i++) { + if ((oper_chan >= bonded_chan_ar[i].start_ch) && + (oper_chan <= bonded_chan_ar[i].end_ch)) { + bonded_chan_ptr = &bonded_chan_ar[i]; + break; + } + } + + if (!bonded_chan_ptr) + return chan_state; + + *bonded_chan_ptr_ptr = bonded_chan_ptr; + chan_num = bonded_chan_ptr->start_ch; + while (chan_num <= bonded_chan_ptr->end_ch) { + temp_chan_state = reg_get_channel_state(pdev, chan_num); + if (temp_chan_state < chan_state) + chan_state = temp_chan_state; + chan_num = chan_num + 4; + } + + return chan_state; +} + +enum channel_state reg_get_5g_bonded_channel( + struct wlan_objmgr_pdev *pdev, uint8_t chan_num, + enum phy_ch_width ch_width, + const struct bonded_channel **bonded_chan_ptr_ptr) +{ + if (ch_width == CH_WIDTH_80P80MHZ) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_80mhz_list, + QDF_ARRAY_SIZE(bonded_chan_80mhz_list), + bonded_chan_ptr_ptr); + else if (ch_width == CH_WIDTH_160MHZ) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_160mhz_list, + QDF_ARRAY_SIZE(bonded_chan_160mhz_list), + bonded_chan_ptr_ptr); + else if (ch_width == CH_WIDTH_80MHZ) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_80mhz_list, + QDF_ARRAY_SIZE(bonded_chan_80mhz_list), + bonded_chan_ptr_ptr); + else if (ch_width == CH_WIDTH_40MHZ) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_40mhz_list, + QDF_ARRAY_SIZE(bonded_chan_40mhz_list), + bonded_chan_ptr_ptr); + else + return reg_get_channel_state(pdev, chan_num); +} + +enum channel_state reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, + uint8_t ch, enum phy_ch_width bw) +{ + enum channel_enum ch_indx; + enum channel_state chan_state; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + bool bw_enabled = false; + const struct bonded_channel *bonded_chan_ptr = NULL; + + if (bw > CH_WIDTH_80P80MHZ) { + reg_err("bw passed is not good"); + return CHANNEL_STATE_INVALID; + } + + chan_state = reg_get_5g_bonded_channel(pdev, ch, bw, &bonded_chan_ptr); + + if ((chan_state == CHANNEL_STATE_INVALID) || + (chan_state == CHANNEL_STATE_DISABLE)) + return chan_state; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return CHANNEL_STATE_INVALID; + } + reg_channels = pdev_priv_obj->cur_chan_list; + + ch_indx = reg_get_chan_enum(ch); + if (ch_indx == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + if (bw == CH_WIDTH_5MHZ) + bw_enabled = true; + else if (bw == CH_WIDTH_10MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 10) && + (reg_channels[ch_indx].max_bw >= 10); + else if (bw == CH_WIDTH_20MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 20) && + (reg_channels[ch_indx].max_bw >= 20); + else if (bw == CH_WIDTH_40MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 40) && + (reg_channels[ch_indx].max_bw >= 40); + else if (bw == CH_WIDTH_80MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 80) && + (reg_channels[ch_indx].max_bw >= 80); + else if (bw == CH_WIDTH_160MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 160) && + (reg_channels[ch_indx].max_bw >= 160); + else if (bw == CH_WIDTH_80P80MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 80) && + (reg_channels[ch_indx].max_bw >= 80); + + if (bw_enabled) + return chan_state; + else + return CHANNEL_STATE_DISABLE; +} + +enum channel_state reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, + uint8_t oper_ch, uint8_t sec_ch, + enum phy_ch_width bw) +{ + enum channel_enum chan_idx; + enum channel_state chan_state; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + bool bw_enabled = false; + enum channel_state chan_state2 = CHANNEL_STATE_INVALID; + + if (bw > CH_WIDTH_40MHZ) + return CHANNEL_STATE_INVALID; + + if (bw == CH_WIDTH_40MHZ) { + if ((sec_ch + 4 != oper_ch) && + (oper_ch + 4 != sec_ch)) + return CHANNEL_STATE_INVALID; + chan_state2 = reg_get_channel_state(pdev, sec_ch); + if (chan_state2 == CHANNEL_STATE_INVALID) + return chan_state2; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return CHANNEL_STATE_INVALID; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + chan_state = reg_get_channel_state(pdev, oper_ch); + if (chan_state2 < chan_state) + chan_state = chan_state2; + + if ((chan_state == CHANNEL_STATE_INVALID) || + (chan_state == CHANNEL_STATE_DISABLE)) + return chan_state; + + chan_idx = reg_get_chan_enum(oper_ch); + if (chan_idx == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + if (bw == CH_WIDTH_5MHZ) + bw_enabled = true; + else if (bw == CH_WIDTH_10MHZ) + bw_enabled = (reg_channels[chan_idx].min_bw <= 10) && + (reg_channels[chan_idx].max_bw >= 10); + else if (bw == CH_WIDTH_20MHZ) + bw_enabled = (reg_channels[chan_idx].min_bw <= 20) && + (reg_channels[chan_idx].max_bw >= 20); + else if (bw == CH_WIDTH_40MHZ) + bw_enabled = (reg_channels[chan_idx].min_bw <= 40) && + (reg_channels[chan_idx].max_bw >= 40); + + if (bw_enabled) + return chan_state; + else + return CHANNEL_STATE_DISABLE; + + return CHANNEL_STATE_ENABLE; +} +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_combine_channel_states() - Get minimum of channel state1 and state2 + * @chan_state1: Channel state1 + * @chan_state2: Channel state2 + * + * Return: Channel state + */ +static enum channel_state reg_combine_channel_states( + enum channel_state chan_state1, + enum channel_state chan_state2) +{ + if ((chan_state1 == CHANNEL_STATE_INVALID) || + (chan_state2 == CHANNEL_STATE_INVALID)) + return CHANNEL_STATE_INVALID; + else + return min(chan_state1, chan_state2); +} + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_set_5g_channel_params () - Sets channel parameteres for given bandwidth + * @ch: channel number. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +static void reg_set_5g_channel_params(struct wlan_objmgr_pdev *pdev, + uint8_t ch, + struct ch_params *ch_params) +{ + /* + * Set channel parameters like center frequency for a bonded channel + * state. Also return the maximum bandwidth supported by the channel. + */ + + enum channel_state chan_state = CHANNEL_STATE_ENABLE; + enum channel_state chan_state2 = CHANNEL_STATE_ENABLE; + const struct bonded_channel *bonded_chan_ptr = NULL; + const struct bonded_channel *bonded_chan_ptr2 = NULL; + + if (!ch_params) { + reg_err("ch_params is NULL"); + return; + } + + if (ch_params->ch_width >= CH_WIDTH_MAX) { + if (ch_params->center_freq_seg1 != 0) + ch_params->ch_width = CH_WIDTH_80P80MHZ; + else + ch_params->ch_width = CH_WIDTH_160MHZ; + } + + while (ch_params->ch_width != CH_WIDTH_INVALID) { + bonded_chan_ptr = NULL; + bonded_chan_ptr2 = NULL; + chan_state = reg_get_5g_bonded_channel( + pdev, ch, ch_params->ch_width, + &bonded_chan_ptr); + + chan_state = reg_get_5g_bonded_channel_state( + pdev, ch, ch_params->ch_width); + + if (ch_params->ch_width == CH_WIDTH_80P80MHZ) { + chan_state2 = reg_get_5g_bonded_channel_state( + pdev, ch_params->center_freq_seg1 - 2, + CH_WIDTH_80MHZ); + + chan_state = reg_combine_channel_states( + chan_state, chan_state2); + } + + if ((chan_state != CHANNEL_STATE_ENABLE) && + (chan_state != CHANNEL_STATE_DFS)) + goto update_bw; + + if (ch_params->ch_width <= CH_WIDTH_20MHZ) { + ch_params->sec_ch_offset = NO_SEC_CH; + ch_params->center_freq_seg0 = ch; + break; + } else if (ch_params->ch_width >= CH_WIDTH_40MHZ) { + reg_get_5g_bonded_chan_array( + pdev, ch, bonded_chan_40mhz_list, + QDF_ARRAY_SIZE(bonded_chan_40mhz_list), + &bonded_chan_ptr2); + if (!bonded_chan_ptr || !bonded_chan_ptr2) + goto update_bw; + if (ch == bonded_chan_ptr2->start_ch) + ch_params->sec_ch_offset = LOW_PRIMARY_CH; + else + ch_params->sec_ch_offset = HIGH_PRIMARY_CH; + + ch_params->center_freq_seg0 = + (bonded_chan_ptr->start_ch + + bonded_chan_ptr->end_ch) / 2; + break; + } +update_bw: + ch_params->ch_width = get_next_lower_bw[ch_params->ch_width]; + } + + if (ch_params->ch_width == CH_WIDTH_160MHZ) { + ch_params->center_freq_seg1 = ch_params->center_freq_seg0; + chan_state = reg_get_5g_bonded_channel( + pdev, ch, CH_WIDTH_80MHZ, &bonded_chan_ptr); + if (bonded_chan_ptr) + ch_params->center_freq_seg0 = + (bonded_chan_ptr->start_ch + + bonded_chan_ptr->end_ch) / 2; + } + + /* Overwrite center_freq_seg1 to 0 for non 160 and 80+80 width */ + if (!(ch_params->ch_width == CH_WIDTH_160MHZ || + ch_params->ch_width == CH_WIDTH_80P80MHZ)) + ch_params->center_freq_seg1 = 0; + + reg_nofl_debug("ch %d ch_wd %d freq0 %d freq1 %d", ch, + ch_params->ch_width, ch_params->center_freq_seg0, + ch_params->center_freq_seg1); +} + +/** + * reg_set_2g_channel_params() - set the 2.4G bonded channel parameters + * @oper_ch: operating channel + * @ch_params: channel parameters + * @sec_ch_2g: 2.4G secondary channel + * + * Return: void + */ +static void reg_set_2g_channel_params(struct wlan_objmgr_pdev *pdev, + uint16_t oper_ch, + struct ch_params *ch_params, + uint16_t sec_ch_2g) +{ + enum channel_state chan_state = CHANNEL_STATE_ENABLE; + + if (ch_params->ch_width >= CH_WIDTH_MAX) + ch_params->ch_width = CH_WIDTH_40MHZ; + if ((reg_get_bw_value(ch_params->ch_width) > 20) && !sec_ch_2g) { + if (oper_ch >= 1 && oper_ch <= 5) + sec_ch_2g = oper_ch + 4; + else if (oper_ch >= 6 && oper_ch <= 13) + sec_ch_2g = oper_ch - 4; + } + + while (ch_params->ch_width != CH_WIDTH_INVALID) { + chan_state = reg_get_2g_bonded_channel_state( + pdev, oper_ch, sec_ch_2g, ch_params->ch_width); + if (chan_state == CHANNEL_STATE_ENABLE) { + if (ch_params->ch_width == CH_WIDTH_40MHZ) { + if (oper_ch < sec_ch_2g) + ch_params->sec_ch_offset = + LOW_PRIMARY_CH; + else + ch_params->sec_ch_offset = + HIGH_PRIMARY_CH; + ch_params->center_freq_seg0 = + (oper_ch + sec_ch_2g) / 2; + } else { + ch_params->sec_ch_offset = NO_SEC_CH; + ch_params->center_freq_seg0 = oper_ch; + } + break; + } + + ch_params->ch_width = get_next_lower_bw[ch_params->ch_width]; + } + /* Overwrite center_freq_seg1 to 0 for 2.4 Ghz */ + ch_params->center_freq_seg1 = 0; +} + +void reg_set_channel_params(struct wlan_objmgr_pdev *pdev, + uint8_t ch, uint8_t sec_ch_2g, + struct ch_params *ch_params) +{ + if (REG_IS_5GHZ_CH(ch)) + reg_set_5g_channel_params(pdev, ch, ch_params); + else if (REG_IS_24GHZ_CH(ch)) + reg_set_2g_channel_params(pdev, ch, ch_params, sec_ch_2g); +} +#endif /* CONFIG_CHAN_NUM_API */ + +QDF_STATUS reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + if (!country_code) { + reg_err("country_code is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(country_code, psoc_priv_obj->def_country, + REG_ALPHA2_LEN + 1); + + return QDF_STATUS_SUCCESS; +} + +void reg_get_current_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg component pdev priv is NULL"); + return; + } + + *dfs_reg = pdev_priv_obj->dfs_region; +} + +void reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("psoc reg component is NULL"); + return; + } + + pdev_priv_obj->dfs_region = dfs_reg; + + reg_init_channel_map(dfs_reg); +} + +#ifdef CONFIG_CHAN_NUM_API +uint32_t reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + enum channel_enum chan_enum; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *reg_channels; + + chan_enum = reg_get_chan_enum(chan_num); + + if (chan_enum == INVALID_CHANNEL) { + reg_err("channel is invalid"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + return reg_channels[chan_enum].tx_power; +} + +qdf_freq_t reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + enum channel_enum chan_enum; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *reg_channels; + + chan_enum = reg_get_chan_enum(chan_num); + + if (chan_enum == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + return reg_channels[chan_enum].center_freq; +} + +bool reg_is_dfs_ch(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state(pdev, chan); + + return ch_state == CHANNEL_STATE_DFS; +} +#endif /* CONFIG_CHAN_NUM_API */ + +uint8_t reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + uint32_t count; + struct regulatory_channel *chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (freq == 0) { + reg_err_rl("Invalid freq %d", freq); + return 0; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return 0; + } + + chan_list = pdev_priv_obj->mas_chan_list; + for (count = 0; count < NUM_CHANNELS; count++) { + if (chan_list[count].center_freq >= freq) + break; + } + + if (count == NUM_CHANNELS) + goto end; + + if (chan_list[count].center_freq == freq) + return chan_list[count].chan_num; + + if (count == 0) + goto end; + + if ((chan_list[count - 1].chan_num == INVALID_CHANNEL_NUM) || + (chan_list[count].chan_num == INVALID_CHANNEL_NUM)) { + reg_err("Frequency %d invalid in current reg domain", freq); + return 0; + } + + return (chan_list[count - 1].chan_num + + (freq - chan_list[count - 1].center_freq) / 5); + +end: + reg_err_rl("invalid frequency %d", freq); + return 0; +} + +static uint16_t reg_compute_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num, + enum channel_enum min_chan_range, + enum channel_enum max_chan_range) +{ + uint16_t count; + struct regulatory_channel *chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return 0; + } + + chan_list = pdev_priv_obj->mas_chan_list; + + for (count = min_chan_range; count <= max_chan_range; count++) { + if (REG_IS_49GHZ_FREQ(chan_list[count].center_freq)) { + if (chan_list[count].chan_num == chan_num) + break; + continue; + } else if ((chan_list[count].chan_num >= chan_num) && + (chan_list[count].chan_num != INVALID_CHANNEL_NUM)) + break; + } + + if (count == max_chan_range + 1) + goto end; + + if (chan_list[count].chan_num == chan_num) { + if (chan_list[count].chan_flags & REGULATORY_CHAN_DISABLED) + reg_err("Channel %d disabled in current reg domain", + chan_num); + return chan_list[count].center_freq; + } + + if (count == min_chan_range) + goto end; + + if ((chan_list[count - 1].chan_num == INVALID_CHANNEL_NUM) || + REG_IS_49GHZ_FREQ(chan_list[count - 1].center_freq) || + (chan_list[count].chan_num == INVALID_CHANNEL_NUM)) { + reg_err("Channel %d invalid in current reg domain", + chan_num); + return 0; + } + + return (chan_list[count - 1].center_freq + + (chan_num - chan_list[count - 1].chan_num) * 5); + +end: + + reg_debug_rl("Invalid channel %d", chan_num); + return 0; +} + +uint16_t reg_legacy_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + uint16_t min_chan_range = MIN_24GHZ_CHANNEL; + uint16_t max_chan_range = MAX_5GHZ_CHANNEL; + + if (chan_num == 0) { + reg_err_rl("Invalid channel %d", chan_num); + return 0; + } + + return reg_compute_chan_to_freq(pdev, chan_num, + min_chan_range, + max_chan_range); +} + +#ifdef CONFIG_CHAN_NUM_API +qdf_freq_t reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + uint32_t count; + struct regulatory_channel *chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (chan_num == 0) { + reg_err_rl("Invalid channel %d", chan_num); + return 0; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return 0; + } + + chan_list = pdev_priv_obj->cur_chan_list; + for (count = 0; count < NUM_CHANNELS; count++) + if (chan_list[count].chan_num == chan_num) { + if (reg_chan_in_range(chan_list, + pdev_priv_obj->range_2g_low, + pdev_priv_obj->range_2g_high, + pdev_priv_obj->range_5g_low, + pdev_priv_obj->range_5g_high, + count)) { + return chan_list[count].center_freq; + } + } + + reg_debug_rl("invalid channel %d", chan_num); + return 0; +} + +bool reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, uint8_t chan_num) +{ + qdf_freq_t freq = 0; + + freq = reg_chan_to_freq(pdev, chan_num); + + return REG_IS_49GHZ_FREQ(freq) ? true : false; +} + +enum band_info reg_chan_to_band(uint8_t chan_num) +{ + if (chan_num <= 14) + return BAND_2G; + + return BAND_5G; +} + +void reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, + uint8_t num_chan, + bool nol_chan) +{ + enum channel_enum chan_enum; + struct regulatory_channel *mas_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t i; + + if (!num_chan || !chan_list) { + reg_err("chan_list or num_ch is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + mas_chan_list = pdev_priv_obj->mas_chan_list; + for (i = 0; i < num_chan; i++) { + chan_enum = reg_get_chan_enum(chan_list[i]); + if (chan_enum == INVALID_CHANNEL) { + reg_err("Invalid ch in nol list, chan %d", + chan_list[i]); + continue; + } + mas_chan_list[chan_enum].nol_chan = nol_chan; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); +} +#endif /* CONFIG_CHAN_NUM_API */ + +QDF_STATUS reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct cur_regulatory_info *reg_info; + uint16_t cc = -1; + uint16_t country_index = -1, regdmn_pair = -1; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS err; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err("reg soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + reg_info = (struct cur_regulatory_info *)qdf_mem_malloc + (sizeof(struct cur_regulatory_info)); + if (!reg_info) + return QDF_STATUS_E_NOMEM; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info->psoc = psoc; + reg_info->phy_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (regdmn == 0) { + reg_get_default_country(®dmn); + regdmn |= COUNTRY_ERD_FLAG; + } + + if (regdmn & COUNTRY_ERD_FLAG) { + cc = regdmn & ~COUNTRY_ERD_FLAG; + + reg_get_rdpair_from_country_code(cc, + &country_index, + ®dmn_pair); + + err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("Unable to set country code\n"); + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->ctry_code = cc; + + } else { + reg_get_rdpair_from_regdmn_id(regdmn, ®dmn_pair); + + err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("Unable to set country code\n"); + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->reg_dmn_pair = regdmn; + } + + reg_info->offload_enabled = false; + reg_process_master_chan_list(reg_info); + + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_program_chan_list(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + struct cur_regulatory_info *reg_info; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t country_index = -1, regdmn_pair = -1; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint8_t pdev_id; + QDF_STATUS err; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err(" pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (psoc_priv_obj->offload_enabled) { + if ((rd->flags == ALPHA_IS_SET) && (rd->cc.alpha[2] == 'O')) + pdev_priv_obj->indoor_chan_enabled = false; + else + pdev_priv_obj->indoor_chan_enabled = true; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_user_country_code) { + psoc_priv_obj->new_init_ctry_pending[pdev_id] = true; + return tx_ops->set_user_country_code(psoc, pdev_id, rd); + } + + return QDF_STATUS_E_FAILURE; + } + + reg_info = (struct cur_regulatory_info *)qdf_mem_malloc + (sizeof(struct cur_regulatory_info)); + if (!reg_info) + return QDF_STATUS_E_NOMEM; + + reg_info->psoc = psoc; + reg_info->phy_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (rd->flags == CC_IS_SET) { + reg_get_rdpair_from_country_code(rd->cc.country_code, + &country_index, + ®dmn_pair); + } else if (rd->flags == ALPHA_IS_SET) { + reg_get_rdpair_from_country_iso(rd->cc.alpha, + &country_index, + ®dmn_pair); + } else if (rd->flags == REGDMN_IS_SET) { + reg_get_rdpair_from_regdmn_id(rd->cc.regdmn_id, + ®dmn_pair); + } + + err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("Unable to set country code\n"); + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + return QDF_STATUS_E_FAILURE; + } + + reg_info->offload_enabled = false; + reg_process_master_chan_list(reg_info); + + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err("reg pdev priv is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (rd->flags == CC_IS_SET) { + rd->cc.country_code = pdev_priv_obj->ctry_code; + } else if (rd->flags == ALPHA_IS_SET) { + qdf_mem_copy(rd->cc.alpha, pdev_priv_obj->current_country, + sizeof(rd->cc.alpha)); + } else if (rd->flags == REGDMN_IS_SET) { + rd->cc.regdmn_id = pdev_priv_obj->reg_dmn_pair; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, bool val) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc_priv_obj->offload_enabled = val; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint16_t index; + int num_reg_dmn; + uint8_t phy_id; + + psoc = wlan_pdev_get_psoc(pdev); + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("soc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + phy_id = wlan_objmgr_pdev_get_pdev_id(pdev); + cur_regdmn->regdmn_pair_id = + psoc_priv_obj->mas_chan_params[phy_id].reg_dmn_pair; + + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + for (index = 0; index < num_reg_dmn; index++) { + if (g_reg_dmn_pairs[index].reg_dmn_pair_id == + cur_regdmn->regdmn_pair_id) + break; + } + + if (index == num_reg_dmn) { + reg_debug_rl("invalid regdomain"); + return QDF_STATUS_E_FAILURE; + } + + cur_regdmn->dmn_id_2g = g_reg_dmn_pairs[index].dmn_id_2g; + cur_regdmn->dmn_id_5g = g_reg_dmn_pairs[index].dmn_id_5g; + cur_regdmn->ctl_2g = regdomains_2g[cur_regdmn->dmn_id_2g].ctl_val; + cur_regdmn->ctl_5g = regdomains_5g[cur_regdmn->dmn_id_5g].ctl_val; + cur_regdmn->dfs_region = + regdomains_5g[cur_regdmn->dmn_id_5g].dfs_region; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->en_chan_144 == enable_ch_144) { + reg_info("chan 144 is already %d", enable_ch_144); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_debug("setting chan 144: %d", enable_ch_144); + pdev_priv_obj->en_chan_144 = enable_ch_144; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + if (reg_tx_ops->fill_umac_legacy_chanlist) + reg_tx_ops->fill_umac_legacy_chanlist(pdev, + pdev_priv_obj->cur_chan_list); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +bool reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return false; + } + + return pdev_priv_obj->en_chan_144; +} + +struct wlan_psoc_host_hal_reg_capabilities_ext *reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return NULL; + } + + return psoc_priv_obj->reg_cap; +} + +QDF_STATUS reg_set_hal_reg_cap( + struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap, + uint16_t phy_cnt) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (phy_cnt > PSOC_MAX_PHY_REG_CAP) { + reg_err("phy cnt:%d is more than %d", phy_cnt, + PSOC_MAX_PHY_REG_CAP); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(psoc_priv_obj->reg_cap, reg_cap, + phy_cnt * + sizeof(struct wlan_psoc_host_hal_reg_capabilities_ext)); + + return QDF_STATUS_SUCCESS; +} + +bool reg_chan_in_range(struct regulatory_channel *chan_list, + qdf_freq_t low_freq_2g, qdf_freq_t high_freq_2g, + qdf_freq_t low_freq_5g, qdf_freq_t high_freq_5g, + enum channel_enum ch_enum) +{ + uint32_t low_limit_2g = NUM_CHANNELS; + uint32_t high_limit_2g = NUM_CHANNELS; + uint32_t low_limit_5g = NUM_CHANNELS; + uint32_t high_limit_5g = NUM_CHANNELS; + bool chan_in_range; + enum channel_enum chan_enum; + uint16_t min_bw; + qdf_freq_t center_freq; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if ((center_freq - min_bw / 2) >= low_freq_2g) { + low_limit_2g = chan_enum; + break; + } + } + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if ((center_freq - min_bw / 2) >= low_freq_5g) { + low_limit_5g = chan_enum; + break; + } + } + + for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if (center_freq + min_bw / 2 <= high_freq_2g) { + high_limit_2g = chan_enum; + break; + } + if (chan_enum == 0) + break; + } + + for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if (center_freq + min_bw / 2 <= high_freq_5g) { + high_limit_5g = chan_enum; + break; + } + if (chan_enum == 0) + break; + } + + chan_in_range = false; + if ((low_limit_2g <= ch_enum) && + (high_limit_2g >= ch_enum) && + (low_limit_2g != NUM_CHANNELS) && + (high_limit_2g != NUM_CHANNELS)) + chan_in_range = true; + if ((low_limit_5g <= ch_enum) && + (high_limit_5g >= ch_enum) && + (low_limit_5g != NUM_CHANNELS) && + (high_limit_5g != NUM_CHANNELS)) + chan_in_range = true; + + if (chan_in_range) + return true; + else + return false; +} + +#ifdef CONFIG_CHAN_NUM_API +void reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, uint8_t num_chan, + bool nol_history_chan) +{ + enum channel_enum chan_enum; + struct regulatory_channel *mas_chan_list; + struct regulatory_channel *cur_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t i; + + if (!num_chan || !chan_list) { + reg_err("chan_list or num_ch is NULL"); + return; + } + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_REGULATORY); + + if (!pdev_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + mas_chan_list = pdev_priv_obj->mas_chan_list; + cur_chan_list = pdev_priv_obj->cur_chan_list; + + for (i = 0; i < num_chan; i++) { + chan_enum = reg_get_chan_enum(chan_list[i]); + if (chan_enum == INVALID_CHANNEL) { + reg_err("Invalid ch in nol list, chan %d", + chan_list[i]); + continue; + } + mas_chan_list[chan_enum].nol_history = nol_history_chan; + cur_chan_list[chan_enum].nol_history = nol_history_chan; + } +} + +bool reg_is_24ghz_ch(uint32_t chan) +{ + return REG_IS_24GHZ_CH(chan); +} + +bool reg_is_5ghz_ch(uint32_t chan) +{ + return REG_IS_5GHZ_CH(chan); +} +#endif /* CONFIG_CHAN_NUM_API */ + +bool reg_is_24ghz_ch_freq(uint32_t freq) +{ + return REG_IS_24GHZ_CH_FREQ(freq); +} + +bool reg_is_5ghz_ch_freq(uint32_t freq) +{ + return REG_IS_5GHZ_FREQ(freq); +} + +/** + * BAND_2G_PRESENT() - Check if REG_BAND_2G is set in the band_mask + * @band_mask: Bitmask for bands + * + * Return: True if REG_BAND_2G is set in the band_mask, else false + */ +static inline bool BAND_2G_PRESENT(uint8_t band_mask) +{ + return !!(band_mask & (BIT(REG_BAND_2G))); +} + +/** + * BAND_5G_PRESENT() - Check if REG_BAND_5G is set in the band_mask + * @band_mask: Bitmask for bands + * + * Return: True if REG_BAND_5G is set in the band_mask, else false + */ +static inline bool BAND_5G_PRESENT(uint8_t band_mask) +{ + return !!(band_mask & (BIT(REG_BAND_5G))); +} + +bool reg_is_freq_indoor(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq) +{ + struct regulatory_channel *cur_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + enum channel_enum chan_enum; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return false; + } + + chan_enum = reg_get_chan_enum_for_freq(freq); + + if (chan_enum == INVALID_CHANNEL) { + reg_err_rl("Invalid chan enum %d", chan_enum); + return false; + } + + cur_chan_list = pdev_priv_obj->cur_chan_list; + + return (cur_chan_list[chan_enum].chan_flags & + REGULATORY_CHAN_INDOOR_ONLY); +} + +#ifdef CONFIG_BAND_6GHZ +bool reg_is_6ghz_chan_freq(uint16_t freq) +{ + return REG_IS_6GHZ_FREQ(freq); +} + +uint16_t reg_min_6ghz_chan_freq(void) +{ + return REG_MIN_6GHZ_CHAN_FREQ; +} + +uint16_t reg_max_6ghz_chan_freq(void) +{ + return REG_MAX_6GHZ_CHAN_FREQ; +} + +bool reg_is_6ghz_psc_chan_freq(uint16_t freq) +{ + if (!REG_IS_6GHZ_FREQ(freq)) { + reg_debug(" Channel frequency is not a 6GHz frequency"); + return false; + } + + if (!(((freq - SIXG_STARTING_FREQ) + (FREQ_LEFT_SHIFT)) % + (FREQ_TO_CHAN_SCALE * NUM_80MHZ_BAND_IN_6G))) { + return true; + } + + reg_debug_rl("Channel freq %d MHz is not a 6GHz PSC frequency", freq); + + return false; +} + +/** + * BAND_6G_PRESENT() - Check if REG_BAND_6G is set in the band_mask + * @band_mask: Bitmask for bands + * + * Return: True if REG_BAND_6G is set in the band_mask, else false + */ +static inline bool BAND_6G_PRESENT(uint8_t band_mask) +{ + return !!(band_mask & (BIT(REG_BAND_6G))); +} +#else +static inline bool BAND_6G_PRESENT(uint8_t band_mask) +{ + return false; +} +#endif /* CONFIG_BAND_6GHZ */ + +uint16_t +reg_get_band_channel_list(struct wlan_objmgr_pdev *pdev, + uint8_t band_mask, + struct regulatory_channel *channel_list) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *cur_chan_list; + uint16_t i, num_channels = 0; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return 0; + } + + cur_chan_list = pdev_priv_obj->cur_chan_list; + + if (BAND_2G_PRESENT(band_mask)) { + for (i = MIN_24GHZ_CHANNEL; i <= MAX_24GHZ_CHANNEL; i++) { + if ((cur_chan_list[i].state != CHANNEL_STATE_DISABLE) && + !(cur_chan_list[i].chan_flags & + REGULATORY_CHAN_DISABLED)) { + channel_list[num_channels] = cur_chan_list[i]; + num_channels++; + } + } + } + if (BAND_5G_PRESENT(band_mask)) { + for (i = MIN_49GHZ_CHANNEL; i <= MAX_5GHZ_CHANNEL; i++) { + if ((cur_chan_list[i].state != CHANNEL_STATE_DISABLE) && + !(cur_chan_list[i].chan_flags & + REGULATORY_CHAN_DISABLED)) { + channel_list[num_channels] = cur_chan_list[i]; + num_channels++; + } + } + } + if (BAND_6G_PRESENT(band_mask)) { + for (i = MIN_6GHZ_CHANNEL; i <= MAX_6GHZ_CHANNEL; i++) { + if ((cur_chan_list[i].state != CHANNEL_STATE_DISABLE) && + !(cur_chan_list[i].chan_flags & + REGULATORY_CHAN_DISABLED)) { + channel_list[num_channels] = cur_chan_list[i]; + num_channels++; + } + } + } + + if (!num_channels) { + reg_err("Failed to retrieve the channel list"); + return 0; + } + + return num_channels; +} + +qdf_freq_t reg_chan_band_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num, + uint8_t band_mask) +{ + enum channel_enum min_chan, max_chan; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t freq; + + if (chan_num == 0) { + reg_err_rl("Invalid channel %d", chan_num); + return 0; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return 0; + } + + if (BAND_6G_PRESENT(band_mask)) { + if (BAND_2G_PRESENT(band_mask) || + BAND_5G_PRESENT(band_mask)) { + reg_err_rl("Incorrect band_mask %x", band_mask); + return 0; + } + + min_chan = MIN_6GHZ_CHANNEL; + max_chan = MAX_6GHZ_CHANNEL; + return reg_compute_chan_to_freq(pdev, chan_num, + min_chan, + max_chan); + } else { + if (BAND_2G_PRESENT(band_mask)) { + min_chan = MIN_24GHZ_CHANNEL; + max_chan = MAX_24GHZ_CHANNEL; + freq = reg_compute_chan_to_freq(pdev, chan_num, + min_chan, + max_chan); + if (freq != 0) + return freq; + } + + if (BAND_5G_PRESENT(band_mask)) { + min_chan = MIN_49GHZ_CHANNEL; + max_chan = MAX_5GHZ_CHANNEL; + + return reg_compute_chan_to_freq(pdev, chan_num, + min_chan, + max_chan); + } + + reg_err_rl("Incorrect band_mask %x", band_mask); + return 0; + } +} + +bool reg_is_49ghz_freq(qdf_freq_t freq) +{ + return REG_IS_49GHZ_FREQ(freq); +} + +qdf_freq_t reg_ch_num(uint32_t ch_enum) +{ + return REG_CH_NUM(ch_enum); +} + +qdf_freq_t reg_ch_to_freq(uint32_t ch_enum) +{ + return REG_CH_TO_FREQ(ch_enum); +} + +#ifdef CONFIG_CHAN_NUM_API +bool reg_is_same_band_channels(uint8_t chan_num1, uint8_t chan_num2) +{ + return (chan_num1 && chan_num2 && + (REG_IS_5GHZ_CH(chan_num1) == REG_IS_5GHZ_CH(chan_num2))); +} + +bool reg_is_channel_valid_5g_sbs(uint8_t curchan, uint8_t newchan) +{ + return REG_IS_CHANNEL_VALID_5G_SBS(curchan, newchan); +} + +uint8_t reg_min_24ghz_ch_num(void) +{ + return REG_MIN_24GHZ_CH_NUM; +} + +uint8_t reg_max_24ghz_ch_num(void) +{ + return REG_MAX_24GHZ_CH_NUM; +} + +uint8_t reg_min_5ghz_ch_num(void) +{ + return REG_MIN_5GHZ_CH_NUM; +} + +uint8_t reg_max_5ghz_ch_num(void) +{ + return REG_MAX_5GHZ_CH_NUM; +} +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +qdf_freq_t reg_min_24ghz_chan_freq(void) +{ + return REG_MIN_24GHZ_CH_FREQ; +} + +qdf_freq_t reg_max_24ghz_chan_freq(void) +{ + return REG_MAX_24GHZ_CH_FREQ; +} + +qdf_freq_t reg_min_5ghz_chan_freq(void) +{ + return REG_MIN_5GHZ_CH_FREQ; +} + +qdf_freq_t reg_max_5ghz_chan_freq(void) +{ + return REG_MAX_5GHZ_CH_FREQ; +} +#endif /* CONFIG_CHAN_FREQ_API */ + +QDF_STATUS reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, + bool enable) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->dfs_enabled == enable) { + reg_info("dfs_enabled is already set to %d", enable); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("set dfs_enabled: %d", enable); + + pdev_priv_obj->dfs_enabled = enable; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + + /* Fill the ic channel list with the updated current channel + * chan list. + */ + if (reg_tx_ops->fill_umac_legacy_chanlist) + reg_tx_ops->fill_umac_legacy_chanlist(pdev, + pdev_priv_obj->cur_chan_list); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +bool reg_is_regdmn_en302502_applicable(struct wlan_objmgr_pdev *pdev) +{ + struct cur_regdmn_info cur_reg_dmn; + QDF_STATUS status; + + status = reg_get_curr_regdomain(pdev, &cur_reg_dmn); + if (status != QDF_STATUS_SUCCESS) { + reg_err("Failed to get reg domain"); + return false; + } + + return reg_en302_502_regdmn(cur_reg_dmn.regdmn_pair_id); +} + +QDF_STATUS reg_modify_pdev_chan_range(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap_ptr; + uint32_t cnt; + uint32_t phy_id; + enum direction dir; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct target_pdev_info *tgt_pdev; + + tgt_pdev = wlan_pdev_get_tgt_if_handle(pdev); + phy_id = (uint32_t)target_pdev_get_phy_idx(tgt_pdev); + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_cap_ptr = psoc_priv_obj->reg_cap; + + for (cnt = 0; cnt < PSOC_MAX_PHY_REG_CAP; cnt++) { + if (!reg_cap_ptr) { + qdf_mem_free(pdev_priv_obj); + reg_err("reg cap ptr is NULL"); + return QDF_STATUS_E_FAULT; + } + + if (reg_cap_ptr->phy_id == phy_id) + break; + reg_cap_ptr++; + } + + if (cnt == PSOC_MAX_PHY_REG_CAP) { + qdf_mem_free(pdev_priv_obj); + reg_err("extended capabilities not found for pdev"); + return QDF_STATUS_E_FAULT; + } + + if (psoc_priv_obj->offload_enabled) { + dir = NORTHBOUND; + } else { + dir = SOUTHBOUND; + } + + pdev_priv_obj->range_2g_low = reg_cap_ptr->low_2ghz_chan; + pdev_priv_obj->range_2g_high = reg_cap_ptr->high_2ghz_chan; + pdev_priv_obj->range_5g_low = reg_cap_ptr->low_5ghz_chan; + pdev_priv_obj->range_5g_high = reg_cap_ptr->high_5ghz_chan; + pdev_priv_obj->wireless_modes = reg_cap_ptr->wireless_modes; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + + /* Fill the ic channel list with the updated current channel + * chan list. + */ + if (reg_tx_ops->fill_umac_legacy_chanlist) { + reg_tx_ops->fill_umac_legacy_chanlist(pdev, + pdev_priv_obj->cur_chan_list); + + } else { + if (dir == NORTHBOUND) + status = reg_send_scheduler_msg_nb(psoc, pdev); + else + status = reg_send_scheduler_msg_sb(psoc, pdev); + } + + return status; +} + +#ifdef DISABLE_UNII_SHARED_BANDS +/** + * reg_is_reg_unii_band_1_or_reg_unii_band_2a() - Check the input bitmap + * @unii_5g_bitmap: 5G UNII band bitmap + * + * This function checks if either REG_UNII_BAND_1 or REG_UNII_BAND_2A, + * are present in the 5G UNII band bitmap. + * + * Return: Return true if REG_UNII_BAND_1 or REG_UNII_BAND_2A, are present in + * the UNII 5g bitmap else return false. + */ +static bool +reg_is_reg_unii_band_1_or_reg_unii_band_2a(uint8_t unii_5g_bitmap) +{ + if (!unii_5g_bitmap) + return false; + + return ((unii_5g_bitmap & (BIT(REG_UNII_BAND_1) | + BIT(REG_UNII_BAND_2A))) == unii_5g_bitmap); +} + +QDF_STATUS reg_disable_chan_coex(struct wlan_objmgr_pdev *pdev, + uint8_t unii_5g_bitmap) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err_rl("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (unii_5g_bitmap && + !reg_is_reg_unii_band_1_or_reg_unii_band_2a(unii_5g_bitmap)) { + reg_err_rl("Invalid unii_5g_bitmap = %d", unii_5g_bitmap); + return QDF_STATUS_E_FAILURE; + } + + if (pdev_priv_obj->unii_5g_bitmap == unii_5g_bitmap) { + reg_debug_rl("UNII bitmask for 5G channels is already set %d", + unii_5g_bitmap); + return QDF_STATUS_SUCCESS; + } + + reg_debug_rl("Setting UNII bitmask for 5G: %d", unii_5g_bitmap); + pdev_priv_obj->unii_5g_bitmap = unii_5g_bitmap; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + + if (reg_tx_ops->fill_umac_legacy_chanlist) { + reg_tx_ops->fill_umac_legacy_chanlist(pdev, + pdev_priv_obj->cur_chan_list); + } + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS reg_get_channel_list_with_power_for_freq(struct wlan_objmgr_pdev + *pdev, + struct channel_power + *ch_list, + uint8_t *num_chan) +{ + int i, count; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (!num_chan || !ch_list) { + reg_err("chan_list or num_ch is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* set the current channel list */ + reg_channels = pdev_priv_obj->cur_chan_list; + + for (i = 0, count = 0; i < NUM_CHANNELS; i++) { + if (reg_channels[i].state && + !(reg_channels[i].chan_flags & REGULATORY_CHAN_DISABLED)) { + ch_list[count].center_freq = + reg_channels[i].center_freq; + ch_list[count++].tx_power = + reg_channels[i].tx_power; + } + } + + *num_chan = count; + + return QDF_STATUS_SUCCESS; +} + +enum channel_enum reg_get_chan_enum_for_freq(qdf_freq_t freq) +{ + uint32_t count; + + for (count = 0; count < NUM_CHANNELS; count++) + if (channel_map[count].center_freq == freq) + return count; + + reg_err("invalid channel center frequency %d", freq); + + return INVALID_CHANNEL; +} + +bool +reg_is_freq_present_in_cur_chan_list(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + enum channel_enum chan_enum; + struct regulatory_channel *cur_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err_rl("pdev reg obj is NULL"); + return false; + } + + cur_chan_list = pdev_priv_obj->cur_chan_list; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) + if (cur_chan_list[chan_enum].center_freq == freq) + if ((cur_chan_list[chan_enum].state != + CHANNEL_STATE_DISABLE) && + !(cur_chan_list[chan_enum].chan_flags & + REGULATORY_CHAN_DISABLED)) + return true; + + reg_debug_rl("Channel center frequency %d not found", freq); + + return false; +} + +enum channel_state reg_get_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + enum channel_enum ch_idx; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + ch_idx = reg_get_chan_enum_for_freq(freq); + + if (ch_idx == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return CHANNEL_STATE_INVALID; + } + + return pdev_priv_obj->cur_chan_list[ch_idx].state; +} + +static uint32_t reg_get_channel_flags_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + enum channel_enum chan_enum; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + chan_enum = reg_get_chan_enum_for_freq(freq); + + if (chan_enum == INVALID_CHANNEL) { + reg_err("chan freq is not valid"); + return REGULATORY_CHAN_INVALID; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return REGULATORY_CHAN_INVALID; + } + + return pdev_priv_obj->cur_chan_list[chan_enum].chan_flags; +} + +/** + * reg_get_5g_bonded_chan_array_for_freq()- Return the channel state for a + * 5G or 6G channel frequency based on the bonded channel. + * @pdev: Pointer to pdev. + * @freq: Channel center frequency. + * @bonded_chan_ar: Array of bonded channel frequencies. + * @array_size: Array size. + * @bonded_chan_ptr_ptr: Pointer to bonded_channel_freq. + * + * Return: Channel State + */ +static enum channel_state +reg_get_5g_bonded_chan_array_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + const struct bonded_channel_freq + bonded_chan_ar[], + uint16_t array_size, + const struct bonded_channel_freq + **bonded_chan_ptr_ptr) +{ + int i; + uint16_t chan_cfreq; + const struct bonded_channel_freq *bonded_chan_ptr = NULL; + enum channel_state chan_state = CHANNEL_STATE_INVALID; + enum channel_state temp_chan_state; + + for (i = 0; i < array_size; i++) { + if ((freq >= bonded_chan_ar[i].start_freq) && + (freq <= bonded_chan_ar[i].end_freq)) { + bonded_chan_ptr = &bonded_chan_ar[i]; + break; + } + } + + if (!bonded_chan_ptr) + return chan_state; + + *bonded_chan_ptr_ptr = bonded_chan_ptr; + chan_cfreq = bonded_chan_ptr->start_freq; + while (chan_cfreq <= bonded_chan_ptr->end_freq) { + temp_chan_state = reg_get_channel_state_for_freq(pdev, + chan_cfreq); + if (temp_chan_state < chan_state) + chan_state = temp_chan_state; + chan_cfreq = chan_cfreq + 20; + } + + return chan_state; +} + +/** + * reg_get_5g_bonded_channel_for_freq()- Return the channel state for a + * 5G or 6G channel frequency based on the channel width and bonded channel + * @pdev: Pointer to pdev. + * @freq: Channel center frequency. + * @ch_width: Channel Width. + * @bonded_chan_ptr_ptr: Pointer to bonded_channel_freq. + * + * Return: Channel State + */ +enum channel_state +reg_get_5g_bonded_channel_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + enum phy_ch_width ch_width, + const struct bonded_channel_freq + **bonded_chan_ptr_ptr) + +{ + if (ch_width == CH_WIDTH_80P80MHZ) + return reg_get_5g_bonded_chan_array_for_freq(pdev, freq, + bonded_chan_80mhz_list_freq, + QDF_ARRAY_SIZE(bonded_chan_80mhz_list_freq), + bonded_chan_ptr_ptr); + else if (ch_width == CH_WIDTH_160MHZ) + return reg_get_5g_bonded_chan_array_for_freq(pdev, freq, + bonded_chan_160mhz_list_freq, + QDF_ARRAY_SIZE(bonded_chan_160mhz_list_freq), + bonded_chan_ptr_ptr); + else if (ch_width == CH_WIDTH_80MHZ) + return reg_get_5g_bonded_chan_array_for_freq(pdev, freq, + bonded_chan_80mhz_list_freq, + QDF_ARRAY_SIZE(bonded_chan_80mhz_list_freq), + bonded_chan_ptr_ptr); + else if (ch_width == CH_WIDTH_40MHZ) + return reg_get_5g_bonded_chan_array_for_freq(pdev, freq, + bonded_chan_40mhz_list_freq, + QDF_ARRAY_SIZE(bonded_chan_40mhz_list_freq), + bonded_chan_ptr_ptr); + else + return reg_get_channel_state_for_freq(pdev, freq); +} + +enum channel_state +reg_get_5g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + enum phy_ch_width bw) +{ + enum channel_enum ch_indx; + enum channel_state chan_state; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + bool bw_enabled = false; + const struct bonded_channel_freq *bonded_chan_ptr = NULL; + + if (bw > CH_WIDTH_80P80MHZ) { + reg_err("bw passed is not good"); + return CHANNEL_STATE_INVALID; + } + + chan_state = reg_get_5g_bonded_channel_for_freq(pdev, freq, bw, + &bonded_chan_ptr); + + if ((chan_state == CHANNEL_STATE_INVALID) || + (chan_state == CHANNEL_STATE_DISABLE)) + return chan_state; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return CHANNEL_STATE_INVALID; + } + reg_channels = pdev_priv_obj->cur_chan_list; + + ch_indx = reg_get_chan_enum_for_freq(freq); + if (ch_indx == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + if (bw == CH_WIDTH_5MHZ) + bw_enabled = true; + else if (bw == CH_WIDTH_10MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 10) && + (reg_channels[ch_indx].max_bw >= 10); + else if (bw == CH_WIDTH_20MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 20) && + (reg_channels[ch_indx].max_bw >= 20); + else if (bw == CH_WIDTH_40MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 40) && + (reg_channels[ch_indx].max_bw >= 40); + else if (bw == CH_WIDTH_80MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 80) && + (reg_channels[ch_indx].max_bw >= 80); + else if (bw == CH_WIDTH_160MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 160) && + (reg_channels[ch_indx].max_bw >= 160); + else if (bw == CH_WIDTH_80P80MHZ) + bw_enabled = (reg_channels[ch_indx].min_bw <= 80) && + (reg_channels[ch_indx].max_bw >= 80); + + if (bw_enabled) + return chan_state; + else + return CHANNEL_STATE_DISABLE; +} + +enum channel_state +reg_get_2g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t oper_ch_freq, + qdf_freq_t sec_ch_freq, + enum phy_ch_width bw) +{ + enum channel_enum chan_idx; + enum channel_state chan_state; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + bool bw_enabled = false; + enum channel_state chan_state2 = CHANNEL_STATE_INVALID; + + if (bw > CH_WIDTH_40MHZ) + return CHANNEL_STATE_INVALID; + + if (bw == CH_WIDTH_40MHZ) { + if ((sec_ch_freq + 20 != oper_ch_freq) && + (oper_ch_freq + 20 != sec_ch_freq)) + return CHANNEL_STATE_INVALID; + chan_state2 = reg_get_channel_state_for_freq(pdev, sec_ch_freq); + if (chan_state2 == CHANNEL_STATE_INVALID) + return chan_state2; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return CHANNEL_STATE_INVALID; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + chan_state = reg_get_channel_state_for_freq(pdev, oper_ch_freq); + if (chan_state2 < chan_state) + chan_state = chan_state2; + + if ((chan_state == CHANNEL_STATE_INVALID) || + (chan_state == CHANNEL_STATE_DISABLE)) + return chan_state; + + chan_idx = reg_get_chan_enum_for_freq(oper_ch_freq); + if (chan_idx == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + if (bw == CH_WIDTH_5MHZ) + bw_enabled = true; + else if (bw == CH_WIDTH_10MHZ) + bw_enabled = (reg_channels[chan_idx].min_bw <= 10) && + (reg_channels[chan_idx].max_bw >= 10); + else if (bw == CH_WIDTH_20MHZ) + bw_enabled = (reg_channels[chan_idx].min_bw <= 20) && + (reg_channels[chan_idx].max_bw >= 20); + else if (bw == CH_WIDTH_40MHZ) + bw_enabled = (reg_channels[chan_idx].min_bw <= 40) && + (reg_channels[chan_idx].max_bw >= 40); + + if (bw_enabled) + return chan_state; + else + return CHANNEL_STATE_DISABLE; + + return CHANNEL_STATE_ENABLE; +} + +/** + * reg_set_5g_channel_params_for_freq()- Set channel parameters like center + * frequency for a bonded channel state. Also return the maximum bandwidth + * supported by the channel. + * @pdev: Pointer to pdev. + * @freq: Channel center frequency. + * ch_params: Pointer to ch_params. + * + * Return: void + */ +static void reg_set_5g_channel_params_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + struct ch_params *ch_params) +{ + /* + * Set channel parameters like center frequency for a bonded channel + * state. Also return the maximum bandwidth supported by the channel. + */ + + enum channel_state chan_state = CHANNEL_STATE_ENABLE; + enum channel_state chan_state2 = CHANNEL_STATE_ENABLE; + const struct bonded_channel_freq *bonded_chan_ptr = NULL; + const struct bonded_channel_freq *bonded_chan_ptr2 = NULL; + + if (!ch_params) { + reg_err("ch_params is NULL"); + return; + } + + if (ch_params->ch_width >= CH_WIDTH_MAX) { + if (ch_params->mhz_freq_seg1 != 0) + ch_params->ch_width = CH_WIDTH_80P80MHZ; + else + ch_params->ch_width = CH_WIDTH_160MHZ; + } + + while (ch_params->ch_width != CH_WIDTH_INVALID) { + bonded_chan_ptr = NULL; + bonded_chan_ptr2 = NULL; + chan_state = reg_get_5g_bonded_channel_for_freq( + pdev, freq, ch_params->ch_width, + &bonded_chan_ptr); + + chan_state = reg_get_5g_bonded_channel_state_for_freq( + pdev, freq, ch_params->ch_width); + + if (ch_params->ch_width == CH_WIDTH_80P80MHZ) { + chan_state2 = reg_get_5g_bonded_channel_state_for_freq( + pdev, ch_params->mhz_freq_seg1 - + NEAREST_20MHZ_CHAN_FREQ_OFFSET, + CH_WIDTH_80MHZ); + + chan_state = reg_combine_channel_states( + chan_state, chan_state2); + } + + if ((chan_state != CHANNEL_STATE_ENABLE) && + (chan_state != CHANNEL_STATE_DFS)) + goto update_bw; + if (ch_params->ch_width <= CH_WIDTH_20MHZ) { + ch_params->sec_ch_offset = NO_SEC_CH; + ch_params->mhz_freq_seg0 = freq; + if (reg_is_6ghz_chan_freq(ch_params->mhz_freq_seg0)) + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + SIXG_STARTING_FREQ) / FREQ_TO_CHAN_SCALE; + else + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + FIVEG_STARTING_FREQ) / FREQ_TO_CHAN_SCALE; + break; + } else if (ch_params->ch_width >= CH_WIDTH_40MHZ) { + reg_get_5g_bonded_chan_array_for_freq( + pdev, freq, bonded_chan_40mhz_list_freq, + QDF_ARRAY_SIZE(bonded_chan_40mhz_list_freq), + &bonded_chan_ptr2); + if (!bonded_chan_ptr || !bonded_chan_ptr2) + goto update_bw; + if (freq == bonded_chan_ptr2->start_freq) + ch_params->sec_ch_offset = LOW_PRIMARY_CH; + else + ch_params->sec_ch_offset = HIGH_PRIMARY_CH; + + ch_params->mhz_freq_seg0 = + (bonded_chan_ptr->start_freq + + bonded_chan_ptr->end_freq) / 2; + if (reg_is_6ghz_chan_freq(ch_params->mhz_freq_seg0)) + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + SIXG_STARTING_FREQ) / FREQ_TO_CHAN_SCALE; + else + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + FIVEG_STARTING_FREQ) / FREQ_TO_CHAN_SCALE; + break; + } +update_bw: + ch_params->ch_width = get_next_lower_bw[ch_params->ch_width]; + } + + if (ch_params->ch_width == CH_WIDTH_160MHZ) { + ch_params->mhz_freq_seg1 = ch_params->mhz_freq_seg0; + if (reg_is_6ghz_chan_freq(ch_params->mhz_freq_seg1)) + ch_params->center_freq_seg1 = + (ch_params->mhz_freq_seg1 - SIXG_STARTING_FREQ) / + FREQ_TO_CHAN_SCALE; + else + ch_params->center_freq_seg1 = + (ch_params->mhz_freq_seg1 - FIVEG_STARTING_FREQ) / + FREQ_TO_CHAN_SCALE; + chan_state = reg_get_5g_bonded_channel_for_freq( + pdev, freq, CH_WIDTH_80MHZ, &bonded_chan_ptr); + if (bonded_chan_ptr) { + ch_params->mhz_freq_seg0 = + (bonded_chan_ptr->start_freq + + bonded_chan_ptr->end_freq) / 2; + if (reg_is_6ghz_chan_freq(ch_params->mhz_freq_seg0)) + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + SIXG_STARTING_FREQ) / FREQ_TO_CHAN_SCALE; + else + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + FIVEG_STARTING_FREQ) / FREQ_TO_CHAN_SCALE; + } + } + + /* Overwrite mhz_freq_seg1 to 0 for non 160 and 80+80 width */ + if (!(ch_params->ch_width == CH_WIDTH_160MHZ || + ch_params->ch_width == CH_WIDTH_80P80MHZ)) { + ch_params->mhz_freq_seg1 = 0; + ch_params->center_freq_seg1 = 0; + } +} + +/** + * reg_set_2g_channel_params_for_freq() - set the 2.4G bonded channel parameters + * @oper_freq: operating channel + * @ch_params: channel parameters + * @sec_ch_2g_freq: 2.4G secondary channel + * + * Return: void + */ +static void reg_set_2g_channel_params_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t oper_freq, + struct ch_params *ch_params, + uint16_t sec_ch_2g_freq) +{ + enum channel_state chan_state = CHANNEL_STATE_ENABLE; + + if (ch_params->ch_width >= CH_WIDTH_MAX) + ch_params->ch_width = CH_WIDTH_40MHZ; + if ((reg_get_bw_value(ch_params->ch_width) > 20) && !sec_ch_2g_freq) { + if (oper_freq >= TWOG_CHAN_1_IN_MHZ && oper_freq <= + TWOG_CHAN_5_IN_MHZ) + sec_ch_2g_freq = oper_freq + 20; + else if (oper_freq >= TWOG_CHAN_6_IN_MHZ && oper_freq <= + TWOG_CHAN_13_IN_MHZ) + sec_ch_2g_freq = oper_freq - 20; + } + + while (ch_params->ch_width != CH_WIDTH_INVALID) { + chan_state = + reg_get_2g_bonded_channel_state_for_freq(pdev, oper_freq, + sec_ch_2g_freq, + ch_params->ch_width); + if ((chan_state == CHANNEL_STATE_ENABLE) || + (chan_state == CHANNEL_STATE_DFS)) { + if (ch_params->ch_width == CH_WIDTH_40MHZ) { + if (oper_freq < sec_ch_2g_freq) + ch_params->sec_ch_offset = + LOW_PRIMARY_CH; + else + ch_params->sec_ch_offset = + HIGH_PRIMARY_CH; + ch_params->mhz_freq_seg0 = + (oper_freq + sec_ch_2g_freq) / 2; + if (ch_params->mhz_freq_seg0 == + TWOG_CHAN_14_IN_MHZ) + ch_params->center_freq_seg0 = 14; + else + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + TWOG_STARTING_FREQ) / + FREQ_TO_CHAN_SCALE; + } else { + ch_params->sec_ch_offset = NO_SEC_CH; + ch_params->mhz_freq_seg0 = oper_freq; + if (ch_params->mhz_freq_seg0 == + TWOG_CHAN_14_IN_MHZ) + ch_params->center_freq_seg0 = 14; + else + ch_params->center_freq_seg0 = + (ch_params->mhz_freq_seg0 - + TWOG_STARTING_FREQ) / + FREQ_TO_CHAN_SCALE; + } + break; + } + + ch_params->ch_width = get_next_lower_bw[ch_params->ch_width]; + } + /* Overwrite mhz_freq_seg1 and center_freq_seg1 to 0 for 2.4 Ghz */ + ch_params->mhz_freq_seg1 = 0; + ch_params->center_freq_seg1 = 0; +} + +void reg_set_channel_params_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + qdf_freq_t sec_ch_2g_freq, + struct ch_params *ch_params) +{ + if (reg_is_5ghz_ch_freq(freq) || reg_is_6ghz_chan_freq(freq)) + reg_set_5g_channel_params_for_freq(pdev, freq, ch_params); + else if (reg_is_24ghz_ch_freq(freq)) + reg_set_2g_channel_params_for_freq(pdev, freq, ch_params, + sec_ch_2g_freq); +} + +uint8_t reg_get_channel_reg_power_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + enum channel_enum chan_enum; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *reg_channels; + + chan_enum = reg_get_chan_enum_for_freq(freq); + + if (chan_enum == INVALID_CHANNEL) { + reg_err("channel is invalid"); + return REG_INVALID_TXPOWER; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return REG_INVALID_TXPOWER; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + return reg_channels[chan_enum].tx_power; +} + +bool reg_is_dfs_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq) +{ + uint32_t chan_flags; + + chan_flags = reg_get_channel_flags_for_freq(pdev, freq); + + return chan_flags & REGULATORY_CHAN_RADAR; +} + +#ifdef CONFIG_REG_CLIENT +/** + * reg_get_psoc_mas_chan_list () - Get psoc master channel list + * @pdev: pointer to pdev object + * @psoc: pointer to psoc object + * + * Return: psoc master chanel list + */ +static struct regulatory_channel *reg_get_psoc_mas_chan_list( + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + uint8_t pdev_id; + + soc_reg = reg_get_psoc_obj(psoc); + if (!soc_reg) { + reg_err("reg psoc private obj is NULL"); + return NULL; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return soc_reg->mas_chan_params[pdev_id].mas_chan_list; +} +#else +static inline struct regulatory_channel *reg_get_psoc_mas_chan_list( + struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_psoc *psoc) +{ + return NULL; +} +#endif + +void reg_update_nol_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_freq_list, + uint8_t num_chan, + bool nol_chan) +{ + enum channel_enum chan_enum; + struct regulatory_channel *mas_chan_list, *psoc_mas_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + uint16_t i; + + if (!num_chan || !chan_freq_list) { + reg_err("chan_freq_list or num_ch is NULL"); + return; + } + + psoc = wlan_pdev_get_psoc(pdev); + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err("reg pdev private obj is NULL"); + return; + } + + psoc_mas_chan_list = reg_get_psoc_mas_chan_list(pdev, psoc); + + mas_chan_list = pdev_priv_obj->mas_chan_list; + for (i = 0; i < num_chan; i++) { + chan_enum = reg_get_chan_enum_for_freq(chan_freq_list[i]); + if (chan_enum == INVALID_CHANNEL) { + reg_err("Invalid freq in nol list, freq %d", + chan_freq_list[i]); + continue; + } + mas_chan_list[chan_enum].nol_chan = nol_chan; + if (psoc_mas_chan_list) + psoc_mas_chan_list[chan_enum].nol_chan = nol_chan; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_send_scheduler_msg_sb(psoc, pdev); +} + +void reg_update_nol_history_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_list, + uint8_t num_chan, + bool nol_history_chan) +{ + enum channel_enum chan_enum; + struct regulatory_channel *mas_chan_list; + struct regulatory_channel *cur_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t i; + + if (!num_chan || !chan_list) { + reg_err("chan_list or num_ch is NULL"); + return; + } + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_REGULATORY); + + if (!pdev_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + mas_chan_list = pdev_priv_obj->mas_chan_list; + cur_chan_list = pdev_priv_obj->cur_chan_list; + + for (i = 0; i < num_chan; i++) { + chan_enum = reg_get_chan_enum_for_freq(chan_list[i]); + if (chan_enum == INVALID_CHANNEL) { + reg_err("Invalid ch in nol list, chan %d", + chan_list[i]); + continue; + } + mas_chan_list[chan_enum].nol_history = nol_history_chan; + cur_chan_list[chan_enum].nol_history = nol_history_chan; + } +} + +static inline bool REG_IS_FREQUENCY_VALID_5G_SBS(qdf_freq_t curfreq, + qdf_freq_t newfreq) +{ + return ((curfreq) > (newfreq) ? + REG_CH_TO_FREQ(reg_get_chan_enum_for_freq(curfreq)) + - REG_CH_TO_FREQ(reg_get_chan_enum_for_freq(newfreq)) + > REG_SBS_SEPARATION_THRESHOLD : + REG_CH_TO_FREQ(reg_get_chan_enum_for_freq(newfreq)) + - REG_CH_TO_FREQ(reg_get_chan_enum_for_freq(curfreq)) + > REG_SBS_SEPARATION_THRESHOLD); +} + +bool reg_is_frequency_valid_5g_sbs(qdf_freq_t curfreq, qdf_freq_t newfreq) +{ + return REG_IS_FREQUENCY_VALID_5G_SBS(curfreq, newfreq); +} + +qdf_freq_t reg_min_chan_freq(void) +{ + return channel_map[MIN_24GHZ_CHANNEL].center_freq; +} + +qdf_freq_t reg_max_chan_freq(void) +{ + return channel_map[NUM_CHANNELS - 1].center_freq; +} + +bool reg_is_same_band_freqs(qdf_freq_t freq1, qdf_freq_t freq2) +{ + return (freq1 && freq2 && ((REG_IS_6GHZ_FREQ(freq1) && + REG_IS_6GHZ_FREQ(freq2)) || + (REG_IS_5GHZ_FREQ(freq1) && + REG_IS_5GHZ_FREQ(freq2)) || + (REG_IS_24GHZ_CH_FREQ(freq1) && + REG_IS_24GHZ_CH_FREQ(freq2)))); +} + +enum reg_wifi_band reg_freq_to_band(qdf_freq_t freq) +{ + if (REG_IS_24GHZ_CH_FREQ(freq)) + return REG_BAND_2G; + else if (REG_IS_5GHZ_FREQ(freq) || REG_IS_49GHZ_FREQ(freq)) + return REG_BAND_5G; + else if (REG_IS_6GHZ_FREQ(freq)) + return REG_BAND_6G; + return REG_BAND_UNKNOWN; +} + +#endif /* CONFIG_CHAN_FREQ_API */ + +uint8_t reg_get_max_tx_power(struct wlan_objmgr_pdev *pdev) +{ + struct regulatory_channel *cur_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint8_t i, max_tx_power = 0; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + cur_chan_list = pdev_priv_obj->cur_chan_list; + + for (i = 0; i < NUM_CHANNELS; i++) { + if (cur_chan_list[i].state != CHANNEL_STATE_DISABLE && + cur_chan_list[i].chan_flags != REGULATORY_CHAN_DISABLED) { + if (cur_chan_list[i].tx_power > max_tx_power) + max_tx_power = cur_chan_list[i].tx_power; + } + } + + if (!max_tx_power) + reg_err_rl("max_tx_power is zero"); + + return max_tx_power; +} + +QDF_STATUS reg_set_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_reg->ignore_fw_reg_offload_ind = true; + return QDF_STATUS_SUCCESS; +} + +bool reg_get_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) + return false; + + return psoc_reg->ignore_fw_reg_offload_ind; +} + +QDF_STATUS reg_set_6ghz_supported(struct wlan_objmgr_psoc *psoc, bool val) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc_priv_obj->six_ghz_supported = val; + + return QDF_STATUS_SUCCESS; +} + +bool reg_is_6ghz_op_class(struct wlan_objmgr_pdev *pdev, uint8_t op_class) +{ + return ((op_class >= MIN_6GHZ_OPER_CLASS) && + (op_class <= MAX_6GHZ_OPER_CLASS)); +} + +bool reg_is_6ghz_supported(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return false; + } + + return psoc_priv_obj->six_ghz_supported; +} + +#ifdef DISABLE_UNII_SHARED_BANDS +QDF_STATUS +reg_get_unii_5g_bitmap(struct wlan_objmgr_pdev *pdev, uint8_t *bitmap) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err_rl("pdev reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + *bitmap = pdev_priv_obj->unii_5g_bitmap; + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef CONFIG_REG_CLIENT +enum band_info reg_band_bitmap_to_band_info(uint32_t band_bitmap) +{ + if ((band_bitmap & BIT(REG_BAND_2G)) && + (band_bitmap & BIT(REG_BAND_5G)) && + (band_bitmap & BIT(REG_BAND_6G))) + return BAND_ALL; + else if ((band_bitmap & BIT(REG_BAND_5G)) && + (band_bitmap & BIT(REG_BAND_6G))) + return BAND_5G; + else if ((band_bitmap & BIT(REG_BAND_2G)) && + (band_bitmap & BIT(REG_BAND_6G))) + return BAND_2G; + else if ((band_bitmap & BIT(REG_BAND_2G)) && + (band_bitmap & BIT(REG_BAND_5G))) + return BAND_ALL; + else if (band_bitmap & BIT(REG_BAND_2G)) + return BAND_2G; + else if (band_bitmap & BIT(REG_BAND_5G)) + return BAND_5G; + else if (band_bitmap & BIT(REG_BAND_6G)) + return BAND_2G; + else + return BAND_UNKNOWN; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services_common.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services_common.h new file mode 100644 index 0000000000000000000000000000000000000000..d18e01827905df5265e992b8d42bf93fa0e53f50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services_common.h @@ -0,0 +1,1030 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_services.h + * This file provides prototypes of the regulatory component + * service functions + */ + +#ifndef __REG_SERVICES_COMMON_H_ +#define __REG_SERVICES_COMMON_H_ + +#define IS_VALID_PSOC_REG_OBJ(psoc_priv_obj) (psoc_priv_obj) +#define IS_VALID_PDEV_REG_OBJ(pdev_priv_obj) (pdev_priv_obj) +#define FREQ_TO_CHAN_SCALE 5 +/* The distance between the 80Mhz center and the nearest 20Mhz channel */ +#define NEAREST_20MHZ_CHAN_FREQ_OFFSET 10 +#define NUM_20_MHZ_CHAN_IN_80_MHZ_CHAN 4 +#define NUM_20_MHZ_CHAN_IN_160_MHZ_CHAN 8 + +#ifdef CONFIG_CHAN_NUM_API +#define REG_MIN_24GHZ_CH_NUM channel_map[MIN_24GHZ_CHANNEL].chan_num +#define REG_MAX_24GHZ_CH_NUM channel_map[MAX_24GHZ_CHANNEL].chan_num +#define REG_MIN_5GHZ_CH_NUM channel_map[MIN_5GHZ_CHANNEL].chan_num +#define REG_MAX_5GHZ_CH_NUM channel_map[MAX_5GHZ_CHANNEL].chan_num + +#define REG_IS_24GHZ_CH(chan_num) \ + (((chan_num) >= REG_MIN_24GHZ_CH_NUM) && \ + ((chan_num) <= REG_MAX_24GHZ_CH_NUM)) +#endif /* CONFIG_CHAN_NUM_API */ + +#define REG_MIN_24GHZ_CH_FREQ channel_map[MIN_24GHZ_CHANNEL].center_freq +#define REG_MAX_24GHZ_CH_FREQ channel_map[MAX_24GHZ_CHANNEL].center_freq + +#define REG_IS_24GHZ_CH_FREQ(freq) \ + (((freq) >= REG_MIN_24GHZ_CH_FREQ) && \ + ((freq) <= REG_MAX_24GHZ_CH_FREQ)) + +#ifdef CONFIG_CHAN_FREQ_API +#define REG_MIN_5GHZ_CH_FREQ channel_map[MIN_5GHZ_CHANNEL].center_freq +#define REG_MAX_5GHZ_CH_FREQ channel_map[MAX_5GHZ_CHANNEL].center_freq +#endif /* CONFIG_CHAN_FREQ_API */ + +#define REG_MIN_49GHZ_CH_FREQ channel_map[MIN_49GHZ_CHANNEL].center_freq +#define REG_MAX_49GHZ_CH_FREQ channel_map[MAX_49GHZ_CHANNEL].center_freq + +#define REG_IS_49GHZ_FREQ(freq) \ + (((freq) >= REG_MIN_49GHZ_CH_FREQ) && \ + ((freq) <= REG_MAX_49GHZ_CH_FREQ)) + +#ifdef CONFIG_CHAN_NUM_API +#define REG_IS_5GHZ_CH(chan_num) \ + (((chan_num) >= REG_MIN_5GHZ_CH_NUM) && \ + ((chan_num) <= REG_MAX_5GHZ_CH_NUM)) +#endif /* CONFIG_CHAN_NUM_API */ + +#define REG_IS_5GHZ_FREQ(freq) \ + (((freq) >= channel_map[MIN_5GHZ_CHANNEL].center_freq) && \ + ((freq) <= channel_map[MAX_5GHZ_CHANNEL].center_freq)) + +#ifdef CONFIG_BAND_6GHZ +#define FREQ_LEFT_SHIFT 55 +#define SIXG_STARTING_FREQ 5940 +#define NUM_80MHZ_BAND_IN_6G 16 +#define NUM_PSC_FREQ 15 +#define PSC_BAND_MHZ (FREQ_TO_CHAN_SCALE * NUM_80MHZ_BAND_IN_6G) +#define REG_MIN_6GHZ_CHAN_FREQ channel_map[MIN_6GHZ_CHANNEL].center_freq +#define REG_MAX_6GHZ_CHAN_FREQ channel_map[MAX_6GHZ_CHANNEL].center_freq +#else +#define FREQ_LEFT_SHIFT 0 +#define SIXG_STARTING_FREQ 0 +#define NUM_80MHZ_BAND_IN_6G 0 +#define NUM_PSC_FREQ 0 +#define PSC_BAND_MHZ (FREQ_TO_CHAN_SCALE * NUM_80MHZ_BAND_IN_6G) +#define REG_MIN_6GHZ_CHAN_FREQ 0 +#define REG_MAX_6GHZ_CHAN_FREQ 0 +#endif /*CONFIG_BAND_6GHZ*/ + +#define REG_CH_NUM(ch_enum) channel_map[ch_enum].chan_num +#define REG_CH_TO_FREQ(ch_enum) channel_map[ch_enum].center_freq + +/* EEPROM setting is a country code */ +#define COUNTRY_ERD_FLAG 0x8000 +#define MIN_6GHZ_OPER_CLASS 131 +#define MAX_6GHZ_OPER_CLASS 135 + +extern const struct chan_map *channel_map; +extern const struct chan_map channel_map_us[]; +extern const struct chan_map channel_map_eu[]; +extern const struct chan_map channel_map_jp[]; +extern const struct chan_map channel_map_china[]; +extern const struct chan_map channel_map_global[]; + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_get_chan_enum() - Get channel enum for given channel number + * @chan_num: Channel number + * + * Return: Channel enum + */ +enum channel_enum reg_get_chan_enum(uint8_t chan_num); + +/** + * reg_get_channel_list_with_power() - Provides the channel list with power + * @pdev: Pointer to pdev + * @ch_list: Pointer to the channel list. + * @num_chan: Pointer to save number of channels + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan); + +/** + * reg_get_channel_state() - Get channel state from regulatory + * @pdev: Pointer to pdev + * @ch: channel number. + * + * Return: channel state + */ +enum channel_state reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint8_t ch); + +/** + * reg_get_5g_bonded_channel() - get the 5G bonded channel state + * @pdev: Pointer to pdev structure + * @chan_num: channel number + * @ch_width: channel width + * @bonded_chan_ptr_ptr: bonded channel ptr ptr + * + * Return: channel state + */ +enum channel_state reg_get_5g_bonded_channel( + struct wlan_objmgr_pdev *pdev, uint8_t chan_num, + enum phy_ch_width ch_width, + const struct bonded_channel **bonded_chan_ptr_ptr); + +/** + * reg_get_5g_bonded_channel_state() - Get channel state for 5G bonded channel + * @pdev: Pointer to pdev + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw); + +/** + * reg_get_2g_bonded_channel_state() - Get channel state for 2G bonded channel + * @ch: channel number. + * @pdev: Pointer to pdev + * @oper_ch: Primary channel number + * @sec_ch: Secondary channel number + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t oper_ch, uint8_t sec_ch, + enum phy_ch_width bw); + +/** + * reg_set_channel_params () - Sets channel parameteres for given bandwidth + * @pdev: Pointer to pdev + * @ch: channel number. + * @sec_ch_2g: Secondary 2G channel + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void reg_set_channel_params(struct wlan_objmgr_pdev *pdev, + uint8_t ch, uint8_t sec_ch_2g, + struct ch_params *ch_params); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_read_default_country() - Get the default regulatory country + * @psoc: The physical SoC to get default country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * reg_get_current_dfs_region () - Get the current dfs region + * @pdev: Pointer to pdev + * @dfs_reg: pointer to dfs region + * + * Return: None + */ +void reg_get_current_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_get_channel_reg_power() - Get the txpower for the given channel + * @pdev: Pointer to pdev + * @chan_num: Channel number + * + * Return: txpower + */ +uint32_t reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); + +/** + * reg_get_channel_freq() - Get the channel frequency + * @pdev: Pointer to pdev + * @chan_num: Channel number + * + * Return: frequency + */ +qdf_freq_t reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_get_bw_value() - give bandwidth value + * bw: bandwidth enum + * + * Return: uint16_t + */ +uint16_t reg_get_bw_value(enum phy_ch_width bw); + +/** + * reg_set_dfs_region () - Set the current dfs region + * @pdev: Pointer to pdev + * @dfs_reg: pointer to dfs region + * + * Return: None + */ +void reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_chan_to_band() - Get band from channel number + * @chan_num: channel number + * + * Return: band info + */ +enum band_info reg_chan_to_band(uint8_t chan_num); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_program_chan_list() - Set user country code and populate the channel list + * @pdev: Pointer to pdev + * @rd: Pointer to cc_regdmn_s structure + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_program_chan_list(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_update_nol_ch () - Updates NOL channels in current channel list + * @pdev: pointer to pdev object + * @ch_list: pointer to NOL channel list + * @num_ch: No.of channels in list + * @update_nol: set/reset the NOL status + * + * Return: None + */ +void reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, uint8_t *chan_list, + uint8_t num_chan, bool nol_chan); + +/** + * reg_is_dfs_ch () - Checks the channel state for DFS + * @pdev: pdev ptr + * @chan: channel + * + * Return: true or false + */ +bool reg_is_dfs_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_freq_to_chan() - Get channel number from frequency. + * @pdev: Pointer to pdev + * @freq: Channel frequency + * + * Return: Channel number if success, otherwise 0 + */ +uint8_t reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_chan_to_freq() - Get frequency from channel number + * @pdev: Pointer to pdev + * @chan_num: Channel number + * + * Return: Channel frequency if success, otherwise 0 + */ +qdf_freq_t reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, uint8_t chan_num); + +/** + * reg_legacy_chan_to_freq() - Get freq from chan noumber, for 2G and 5G + * @pdev: Pointer to pdev + * @chan_num: Channel number + * + * Return: Channel frequency if success, otherwise 0 + */ +uint16_t reg_legacy_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); + +/** + * reg_chan_is_49ghz() - Check if the input channel number is 4.9GHz + * @pdev: Pdev pointer + * @chan_num: Input channel number + * + * Return: true if the channel is 4.9GHz else false. + */ +bool reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, uint8_t chan_num); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_program_default_cc() - Program default country code + * @pdev: Pdev pointer + * @regdmn: Regdomain value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn); + +/** + * reg_get_current_cc() - Get current country code + * @pdev: Pdev pointer + * @regdmn: Pointer to get current country values + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +/** + * reg_set_regdb_offloaded() - set/clear regulatory offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, bool val); + +/** + * reg_get_curr_regdomain() - Get current regdomain in use + * @pdev: pdev pointer + * @cur_regdmn: Current regdomain info + * + * Return: QDF status + */ +QDF_STATUS reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn); + +/** + * reg_modify_chan_144() - Enable/Disable channel 144 + * @pdev: pdev pointer + * @en_chan_144: flag to disable/enable channel 144 + * + * Return: Success or Failure + */ +QDF_STATUS reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, bool en_chan_144); + +/** + * reg_get_en_chan_144() - get en_chan_144 flag value + * @pdev: pdev pointer + * + * Return: en_chan_144 flag value + */ +bool reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev); + +/** + * reg_get_hal_reg_cap() - Get HAL REG capabilities + * @psoc: psoc for country information + * + * Return: hal reg cap pointer + */ +struct wlan_psoc_host_hal_reg_capabilities_ext *reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc); + +/** + * reg_set_hal_reg_cap() - Set HAL REG capabilities + * @psoc: psoc for country information + * @reg_cap: Regulatory caps pointer + * @phy_cnt: number of phy + * + * Return: hal reg cap pointer + */ +QDF_STATUS reg_set_hal_reg_cap( + struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap, + uint16_t phy_cnt); + +/** + * reg_chan_in_range() - Check if the given channel is in pdev's channel range + * @chan_list: Pointer to regulatory channel list. + * @low_freq_2g: Low frequency 2G. + * @high_freq_2g: High frequency 2G. + * @low_freq_5g: Low frequency 5G. + * @high_freq_5g: High frequency 5G. + * @ch_enum: Channel enum. + * + * Return: true if ch_enum is with in pdev's channel range, else false. + */ +bool reg_chan_in_range(struct regulatory_channel *chan_list, + qdf_freq_t low_freq_2g, qdf_freq_t high_freq_2g, + qdf_freq_t low_freq_5g, qdf_freq_t high_freq_5g, + enum channel_enum ch_enum); + +/** + * reg_init_channel_map() - Initialize the channel list based on the dfs region. + * @dfs_region: Dfs region + */ +void reg_init_channel_map(enum dfs_reg dfs_region); + +/** + * reg_get_psoc_tx_ops() - Get regdb tx ops + * @psoc: Pointer to psoc structure + */ +struct wlan_lmac_if_reg_tx_ops *reg_get_psoc_tx_ops( + struct wlan_objmgr_psoc *psoc); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_update_nol_history_ch() - Set nol-history flag for the channels in the + * list. + * @pdev: Pdev ptr. + * @ch_list: Input channel list. + * @num_ch: Number of channels. + * @nol_history_ch: NOL-History flag. + * + * Return: void + */ +void reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, + uint8_t num_chan, + bool nol_history_chan); + +/** + * reg_is_24ghz_ch() - Check if the given channel number is 2.4GHz + * @chan: Channel number + * + * Return: true if channel number is 2.4GHz, else false + */ +bool reg_is_24ghz_ch(uint32_t chan); + +/** + * reg_is_5ghz_ch() - Check if the given channel number is 5GHz + * @chan: Channel number + * + * Return: true if channel number is 5GHz, else false + */ +bool reg_is_5ghz_ch(uint32_t chan); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_is_24ghz_ch_freq() - Check if the given channel frequency is 2.4GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 2.4GHz, else false + */ +bool reg_is_24ghz_ch_freq(uint32_t freq); + +/** + * reg_is_5ghz_ch_freq() - Check if the given channel frequency is 5GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 5GHz, else false + */ +bool reg_is_5ghz_ch_freq(uint32_t freq); + +/** + * reg_is_freq_indoor() - Check if the input frequency is an indoor frequency. + * @pdev: Pointer to pdev. + * @freq: Channel frequency. + * + * Return: Return true if the input frequency is indoor, else false. + */ +bool reg_is_freq_indoor(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq); + +#ifdef CONFIG_BAND_6GHZ +/** + * reg_is_6ghz_chan_freq() - Check if the given channel frequency is 6GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 6GHz, else false + */ +bool reg_is_6ghz_chan_freq(uint16_t freq); + +/** + * REG_IS_6GHZ_FREQ() - Check if the given channel frequency is 6GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 6GHz, else false + */ +static inline bool REG_IS_6GHZ_FREQ(uint16_t freq) +{ + return ((freq >= REG_MIN_6GHZ_CHAN_FREQ) && + (freq <= REG_MAX_6GHZ_CHAN_FREQ)); +} + +/** + * reg_is_6ghz_psc_chan_freq() - Check if the given 6GHz channel frequency is + * preferred scanning channel frequency. + * @freq: Channel frequency + * + * Return: true if given 6GHz channel frequency is preferred scanning channel + * frequency, else false + */ +bool reg_is_6ghz_psc_chan_freq(uint16_t freq); + +/** + * reg_min_6ghz_chan_freq() - Get minimum 6GHz channel center frequency + * + * Return: Minimum 6GHz channel center frequency + */ +uint16_t reg_min_6ghz_chan_freq(void); + +/** + * reg_max_6ghz_chan_freq() - Get maximum 6GHz channel center frequency + * + * Return: Maximum 6GHz channel center frequency + */ +uint16_t reg_max_6ghz_chan_freq(void); +#else +static inline bool reg_is_6ghz_chan_freq(uint16_t freq) +{ + return false; +} + +static inline bool REG_IS_6GHZ_FREQ(uint16_t freq) +{ + return false; +} + +static inline bool reg_is_6ghz_psc_chan_freq(uint16_t freq) +{ + return false; +} + +static inline uint16_t reg_min_6ghz_chan_freq(void) +{ + return 0; +} + +static inline uint16_t reg_max_6ghz_chan_freq(void) +{ + return 0; +} +#endif /* CONFIG_BAND_6GHZ */ + +/** + * reg_get_band_channel_list() - Get the channel list and number of channels + * @pdev: pdev ptr + * @band_mask: Input bitmap with band set + * @channel_list: Pointer to Channel List + * + * Get the given channel list and number of channels from the current channel + * list based on input band bitmap. + * + * Return: Number of channels, else 0 to indicate error + */ +uint16_t reg_get_band_channel_list(struct wlan_objmgr_pdev *pdev, + uint8_t band_mask, + struct regulatory_channel *channel_list); + +/** + * reg_chan_band_to_freq - Return channel frequency based on the channel number + * and band. + * @pdev: pdev ptr + * @chan: Channel Number + * @band_mask: Bitmap for bands + * + * Return: Return channel frequency or return 0, if the channel is disabled or + * if the input channel number or band_mask is invalid. Composite bands are + * supported only for 2.4Ghz and 5Ghz bands. For other bands the following + * priority is given: 1) 6Ghz 2) 5Ghz 3) 2.4Ghz. + */ +qdf_freq_t reg_chan_band_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan, + uint8_t band_mask); + +/** + * reg_is_49ghz_freq() - Check if the given channel frequency is 4.9GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 4.9GHz, else false + */ +bool reg_is_49ghz_freq(qdf_freq_t freq); + +/** + * reg_ch_num() - Get channel number from channel enum + * @ch_enum: Channel enum + * + * Return: channel number + */ +qdf_freq_t reg_ch_num(uint32_t ch_enum); + +/** + * reg_ch_to_freq() - Get channel frequency from channel enum + * @ch_enum: Channel enum + * + * Return: channel frequency + */ +qdf_freq_t reg_ch_to_freq(uint32_t ch_enum); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_is_same_band_channels() - Check if given channel numbers have same band + * @chan_num1: Channel number1 + * @chan_num2: Channel number2 + * + * Return: true if both the channels has the same band. + */ +bool reg_is_same_band_channels(uint8_t chan_num1, uint8_t chan_num2); + +/** + * reg_is_channel_valid_5g_sbs() Check if the given channel is 5G SBS. + * @curchan: current channel + * @newchan:new channel + * + * Return: true if the given channel is a valid 5G SBS + */ +bool reg_is_channel_valid_5g_sbs(uint8_t curchan, uint8_t newchan); + +/** + * reg_min_24ghz_ch_num() - Get minimum 2.4GHz channel number + * + * Return: Minimum 2.4GHz channel number + */ +uint8_t reg_min_24ghz_ch_num(void); + +/** + * reg_max_24ghz_ch_num() - Get maximum 2.4GHz channel number + * + * Return: Maximum 2.4GHz channel number + */ +uint8_t reg_max_24ghz_ch_num(void); + +/** + * reg_min_5ghz_ch_num() - Get minimum 5GHz channel number + * + * Return: Minimum 5GHz channel number + */ +uint8_t reg_min_5ghz_ch_num(void); + +/** + * reg_max_5ghz_ch_num() - Get maximum 5GHz channel number + * + * Return: Maximum 5GHz channel number + */ +uint8_t reg_max_5ghz_ch_num(void); +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +/** + * reg_min_24ghz_chan_freq() - Get minimum 2.4GHz channel frequency + * + * Return: Minimum 2.4GHz channel frequency + */ +qdf_freq_t reg_min_24ghz_chan_freq(void); + +/** + * reg_max_24ghz_chan_freq() - Get maximum 2.4GHz channel frequency + * + * Return: Maximum 2.4GHz channel frequency + */ +qdf_freq_t reg_max_24ghz_chan_freq(void); + +/** + * reg_min_5ghz_chan_freq() - Get minimum 5GHz channel frequency + * + * Return: Minimum 5GHz channel frequency + */ +qdf_freq_t reg_min_5ghz_chan_freq(void); + +/** + * reg_max_5ghz_chan_freq() - Get maximum 5GHz channel frequency + * + * Return: Maximum 5GHz channel frequency + */ +qdf_freq_t reg_max_5ghz_chan_freq(void); +#endif /* CONFIG_CHAN_FREQ_API */ + +/** + * reg_enable_dfs_channels() - Enable the use of DFS channels + * @pdev: The physical dev to enable/disable DFS channels for + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, bool enable); + +/** + * reg_is_regdmn_en302502_applicable() - Find if ETSI EN302_502 radar pattern + * is applicable in current regulatory domain. + * @pdev: Pdev object pointer. + * + * Return: True if en302_502 is applicable, else false. + */ +bool reg_is_regdmn_en302502_applicable(struct wlan_objmgr_pdev *pdev); + +/** + * reg_modify_pdev_chan_range() - Compute current channel list + * in accordance with the modified reg caps. + * @pdev: The physical dev for which channel list must be built. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_modify_pdev_chan_range(struct wlan_objmgr_pdev *pdev); + +#ifdef DISABLE_UNII_SHARED_BANDS +/** + * reg_disable_chan_coex() - Disable Coexisting channels based on the input + * bitmask. + * @pdev: pointer to wlan_objmgr_pdev. + * unii_5g_bitmap: UNII 5G bitmap. + * + * Return : QDF_STATUS + */ +QDF_STATUS reg_disable_chan_coex(struct wlan_objmgr_pdev *pdev, + uint8_t unii_5g_bitmap); +#endif + +#ifdef CONFIG_CHAN_FREQ_API +/** + * reg_is_freq_present_in_cur_chan_list() - Check the input frequency + * @pdev: Pointer to pdev + * @freq: Channel center frequency in MHz + * + * Check if the input channel center frequency is present in the current + * channel list + * + * Return: Return true if channel center frequency is present in the current + * channel list, else return false. + */ +bool +reg_is_freq_present_in_cur_chan_list(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * reg_get_chan_enum_for_freq() - Get channel enum for given channel frequency + * @freq: Channel Frequency + * + * Return: Channel enum + */ +enum channel_enum reg_get_chan_enum_for_freq(qdf_freq_t freq); + +/** + * reg_get_channel_list_with_power_for_freq() - Provides the channel list with + * power + * @pdev: Pointer to pdev + * @ch_list: Pointer to the channel list. + * @num_chan: Pointer to save number of channels + * + * Return: QDF_STATUS + */ +QDF_STATUS +reg_get_channel_list_with_power_for_freq(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan); + +/** + * reg_get_channel_state_for_freq() - Get channel state from regulatory + * @pdev: Pointer to pdev + * @freq: channel center frequency. + * + * Return: channel state + */ +enum channel_state reg_get_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * reg_get_5g_bonded_channel_state_for_freq() - Get channel state for + * 5G bonded channel using the channel frequency + * @pdev: Pointer to pdev + * @freq: channel center frequency. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state +reg_get_5g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + enum phy_ch_width bw); + +/** + * reg_get_2g_bonded_channel_state_for_freq() - Get channel state for 2G + * bonded channel + * @freq: channel center frequency. + * @pdev: Pointer to pdev + * @oper_ch_freq: Primary channel center frequency + * @sec_ch_freq: Secondary channel center frequency + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state +reg_get_2g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t oper_ch_freq, + qdf_freq_t sec_ch_freq, + enum phy_ch_width bw); + +/** + * reg_set_channel_params_for_freq () - Sets channel parameteres for given + * bandwidth + * @pdev: Pointer to pdev + * @freq: Channel center frequency. + * @sec_ch_2g_freq: Secondary 2G channel frequency + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void reg_set_channel_params_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + qdf_freq_t sec_ch_2g_freq, + struct ch_params *ch_params); + +/** + * reg_get_channel_reg_power_for_freq() - Get the txpower for the given channel + * @pdev: Pointer to pdev + * @freq: Channel frequency + * + * Return: txpower + */ +uint8_t reg_get_channel_reg_power_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * reg_update_nol_ch_for_freq () - Updates NOL channels in current channel list + * @pdev: pointer to pdev object + * @chan_freq_list: pointer to NOL channel list + * @num_ch: No.of channels in list + * @update_nol: set/reset the NOL status + * + * Return: None + */ +void reg_update_nol_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_freq_list, + uint8_t num_chan, + bool nol_chan); +/** + * reg_is_dfs_for_freq () - Checks the channel state for DFS + * @pdev: pdev ptr + * @freq: Channel center frequency + * + * Return: true or false + */ +bool reg_is_dfs_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq); + +/** + * reg_chan_freq_is_49ghz() - Check if the input channel center frequency is + * 4.9GHz + * @pdev: Pdev pointer + * @chan_num: Input channel center frequency + * + * Return: true if the frequency is 4.9GHz else false. + */ +bool reg_chan_freq_is_49ghz(qdf_freq_t freq); + +/** + * reg_update_nol_history_ch_for_freq() - Set nol-history flag for the channels + * in the list. + * @pdev: Pdev ptr. + * @chan_list: Input channel freqeuncy list. + * @num_ch: Number of channels. + * @nol_history_ch: NOL-History flag. + * + * Return: void + */ +void reg_update_nol_history_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_list, + uint8_t num_chan, + bool nol_history_chan); + +/** + * reg_is_same_5g_band_freqs() - Check if given channel center + * frequencies have same band + * @freq1: Channel Center Frequency 1 + * @freq2: Channel Center Frequency 2 + * + * Return: true if both the frequencies has the same band. + */ +bool reg_is_same_band_freqs(qdf_freq_t freq1, qdf_freq_t freq2); + +/** + * reg_is_frequency_valid_5g_sbs() Check if the given frequency is 5G SBS. + * @curfreq: current channel frequency + * @newfreq: new channel center frequency + * + * Return: true if the given center frequency is a valid 5G SBS + */ +bool reg_is_frequency_valid_5g_sbs(qdf_freq_t curfreq, qdf_freq_t newfreq); + +/** + * reg_freq_to_band() - Get band from channel frequency + * @chan_num: channel frequency + * + * Return: wifi band + */ +enum reg_wifi_band reg_freq_to_band(qdf_freq_t freq); + +/** + * reg_min_chan_freq() - minimum channel frequency supported + * + * Return: channel frequency + */ +qdf_freq_t reg_min_chan_freq(void); + +/** + * reg_max_chan_freq() - maximum channel frequency supported + * + * Return: channel frequency + */ +qdf_freq_t reg_max_chan_freq(void); + +/** + * reg_get_5g_bonded_channel_for_freq()- Return the channel state for a + * 5G or 6G channel frequency based on the channel width and bonded channel + * @pdev: Pointer to pdev. + * @freq: Channel center frequency. + * @ch_width: Channel Width. + * @bonded_chan_ptr_ptr: Pointer to bonded_channel_freq. + * + * Return: Channel State + */ +enum channel_state +reg_get_5g_bonded_channel_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + enum phy_ch_width ch_width, + const struct bonded_channel_freq + **bonded_chan_ptr_ptr); +#endif /* CONFIG_CHAN_FREQ_API */ + +/** + * reg_get_max_tx_power() - Get maximum tx power from the current channel list + * @pdev: Pointer to pdev + * + * Return: return the value of the maximum tx power in the current channel list + * + */ +uint8_t reg_get_max_tx_power(struct wlan_objmgr_pdev *pdev); + +/** + * reg_set_ignore_fw_reg_offload_ind() - Set if regdb offload indication + * needs to be ignored + * @psoc: Pointer to psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc); + +/** + * reg_get_ignore_fw_reg_offload_ind() - Check whether regdb offload indication + * needs to be ignored + * + * @psoc: Pointer to psoc + */ +bool reg_get_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc); + +/** + * reg_set_6ghz_supported() - Set if 6ghz is supported + * + * @psoc: Pointer to psoc + * @val: value + */ +QDF_STATUS reg_set_6ghz_supported(struct wlan_objmgr_psoc *psoc, + bool val); + +/** + * reg_is_6ghz_op_class() - Check whether 6ghz oper class + * + * @pdev: Pointer to pdev + * @op_class: oper class + */ +bool reg_is_6ghz_op_class(struct wlan_objmgr_pdev *pdev, + uint8_t op_class); + +/** + * reg_is_6ghz_supported() - Whether 6ghz is supported + * + * @psoc: pointer to psoc + */ +bool reg_is_6ghz_supported(struct wlan_objmgr_psoc *psoc); + +/** + * reg_get_unii_5g_bitmap() - get unii_5g_bitmap value + * @pdev: pdev pointer + * @bitmap: Pointer to retrieve the unii_5g_bitmap of enum reg_unii_band + * + * Return: QDF_STATUS + */ +#ifdef DISABLE_UNII_SHARED_BANDS +QDF_STATUS +reg_get_unii_5g_bitmap(struct wlan_objmgr_pdev *pdev, uint8_t *bitmap); +#endif + +#ifdef CONFIG_REG_CLIENT +/** + * reg_band_bitmap_to_band_info() - Convert the band_bitmap to a band_info enum. + * Since band_info enum only has combinations for 2G and 5G, 6G is not + * considered in this function. + * @band_bitmap: bitmap on top of reg_wifi_band of bands enabled + * + * Return: BAND_ALL if both 2G and 5G band is enabled + * BAND_2G if 2G is enabled but 5G isn't + * BAND_5G if 5G is enabled but 2G isn't + */ +enum band_info reg_band_bitmap_to_band_info(uint32_t band_bitmap); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..44800675e8a318755516ecadaa3e64505ae05dfc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_utils.c @@ -0,0 +1,1039 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_utils.c + * This file defines the APIs to set and get the regulatory variables. + */ + +#include +#include +#include +#include +#include "reg_priv_objs.h" +#include "reg_utils.h" +#include "reg_callbacks.h" +#include "reg_db.h" +#include "reg_db_parser.h" +#include "reg_host_11d.h" +#include +#include +#include +#include "reg_services_common.h" +#include "reg_build_chan_list.h" + +#define DEFAULT_WORLD_REGDMN 0x60 + +#define IS_VALID_PSOC_REG_OBJ(psoc_priv_obj) (psoc_priv_obj) +#define IS_VALID_PDEV_REG_OBJ(pdev_priv_obj) (pdev_priv_obj) + +#ifdef CONFIG_CHAN_NUM_API +bool reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint8_t ch) +{ + enum channel_enum ch_idx; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + ch_idx = reg_get_chan_enum(ch); + + if (ch_idx == INVALID_CHANNEL) + return false; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return false; + } + + if (pdev_priv_obj->cur_chan_list[ch_idx].chan_flags & + REGULATORY_CHAN_RADAR) + return true; + + return false; +} +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +bool reg_chan_has_dfs_attribute_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + enum channel_enum ch_idx; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + ch_idx = reg_get_chan_enum_for_freq(freq); + + if (ch_idx == INVALID_CHANNEL) + return false; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return false; + } + + if (pdev_priv_obj->cur_chan_list[ch_idx].chan_flags & + REGULATORY_CHAN_RADAR) + return true; + + return false; +} +#endif /* CONFIG_CHAN_FREQ_API */ + +bool reg_is_world_ctry_code(uint16_t ctry_code) +{ + if ((ctry_code & 0xFFF0) == DEFAULT_WORLD_REGDMN) + return true; + + return false; +} + +QDF_STATUS reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + if (!country_code) { + reg_err("country_code is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(country_code, psoc_reg->cur_country, REG_ALPHA2_LEN + 1); + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_set_default_country() - Read the default country for the regdomain + * @country: country code. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + if (!country) { + reg_err("country is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("set default_country: %s", country); + + qdf_mem_copy(psoc_reg->def_country, country, REG_ALPHA2_LEN + 1); + + return QDF_STATUS_SUCCESS; +} + +bool reg_is_world_alpha2(uint8_t *alpha2) +{ + if ((alpha2[0] == '0') && (alpha2[1] == '0')) + return true; + + return false; +} + +bool reg_is_us_alpha2(uint8_t *alpha2) +{ + if ((alpha2[0] == 'U') && (alpha2[1] == 'S')) + return true; + + return false; +} + +QDF_STATUS reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct set_country cc; + struct wlan_objmgr_psoc *psoc; + struct cc_regdmn_s rd; + uint8_t pdev_id; + + if (!pdev) { + reg_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!country) { + reg_err("country code is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + psoc = wlan_pdev_get_psoc(pdev); + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!qdf_mem_cmp(psoc_reg->cur_country, country, REG_ALPHA2_LEN)) { + if (psoc_reg->cc_src == SOURCE_USERSPACE || + psoc_reg->cc_src == SOURCE_CORE) { + reg_debug("country is not different"); + return QDF_STATUS_SUCCESS; + } + } + + reg_debug("programming new country: %s to firmware", country); + + qdf_mem_copy(cc.country, country, REG_ALPHA2_LEN + 1); + cc.pdev_id = pdev_id; + + if (!psoc_reg->offload_enabled && !reg_is_world_alpha2(country)) { + QDF_STATUS status; + + status = reg_is_country_code_valid(country); + if (!QDF_IS_STATUS_SUCCESS(status)) { + reg_err("Unable to set country code: %s\n", country); + reg_err("Restoring to world domain"); + qdf_mem_copy(cc.country, REG_WORLD_ALPHA2, + REG_ALPHA2_LEN + 1); + } + } + + + if (reg_is_world_alpha2(cc.country)) + psoc_reg->world_country_pending[pdev_id] = true; + else + psoc_reg->new_user_ctry_pending[pdev_id] = true; + + if (psoc_reg->offload_enabled) { + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_country_code) { + tx_ops->set_country_code(psoc, &cc); + } else { + reg_err("country set fw handler not present"); + psoc_reg->new_user_ctry_pending[pdev_id] = false; + return QDF_STATUS_E_FAULT; + } + } else { + if (reg_is_world_alpha2(cc.country)) { + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg component pdev priv is NULL"); + psoc_reg->world_country_pending[pdev_id] = + false; + return QDF_STATUS_E_INVAL; + } + if (reg_is_world_ctry_code( + pdev_priv_obj->def_region_domain)) + rd.cc.regdmn_id = + pdev_priv_obj->def_region_domain; + else + rd.cc.regdmn_id = DEFAULT_WORLD_REGDMN; + rd.flags = REGDMN_IS_SET; + } else { + qdf_mem_copy(rd.cc.alpha, cc.country, + REG_ALPHA2_LEN + 1); + rd.flags = ALPHA_IS_SET; + } + + reg_program_chan_list(pdev, &rd); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_reset_country(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(psoc_reg->cur_country, + psoc_reg->def_country, + REG_ALPHA2_LEN + 1); + reg_debug("set cur_country %.2s", psoc_reg->cur_country); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, + enum country_src source) +{ + if (!reg_domain_ptr) { + reg_err("Invalid reg domain pointer"); + return QDF_STATUS_E_FAULT; + } + + *reg_domain_ptr = 0; + + if (!country_alpha2) { + reg_err("Country code is NULL"); + return QDF_STATUS_E_FAULT; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_CHAN_NUM_API +bool reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state(pdev, chan); + + return (ch_state == CHANNEL_STATE_DFS) || + (ch_state == CHANNEL_STATE_DISABLE); +} +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +bool reg_is_passive_or_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + enum channel_state chan_state; + + chan_state = reg_get_channel_state_for_freq(pdev, freq); + + return (chan_state == CHANNEL_STATE_DFS) || + (chan_state == CHANNEL_STATE_DISABLE); +} +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef WLAN_FEATURE_DSRC +#ifdef CONFIG_CHAN_FREQ_API +bool reg_is_dsrc_freq(qdf_freq_t freq) +{ + if (!REG_IS_5GHZ_FREQ(freq)) + return false; + + if (!(freq >= REG_DSRC_START_FREQ && freq <= REG_DSRC_END_FREQ)) + return false; + + return true; +} +#endif /*CONFIG_CHAN_FREQ_API*/ + +#ifdef CONFIG_CHAN_NUM_API +bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + qdf_freq_t freq = 0; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return false; + } + + if (!REG_IS_5GHZ_CH(chan)) + return false; + + freq = reg_chan_to_freq(pdev, chan); + + if (!(freq >= REG_DSRC_START_FREQ && freq <= REG_DSRC_END_FREQ)) + return false; + + return true; +} +#endif /* CONFIG_CHAN_NUM_API */ + +#else + +bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + struct cur_regdmn_info cur_reg_dmn; + QDF_STATUS status; + + status = reg_get_curr_regdomain(pdev, &cur_reg_dmn); + if (status != QDF_STATUS_SUCCESS) { + reg_debug_rl("Failed to get reg domain"); + return false; + } + + return reg_etsi13_regdmn(cur_reg_dmn.dmn_id_5g); +} + +#ifdef CONFIG_CHAN_FREQ_API +bool reg_is_etsi13_srd_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq) +{ + if (!REG_IS_5GHZ_FREQ(freq)) + return false; + + if (!(freq >= REG_ETSI13_SRD_START_FREQ && + freq <= REG_ETSI13_SRD_END_FREQ)) + return false; + + return reg_is_etsi13_regdmn(pdev); +} +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef CONFIG_CHAN_NUM_API +bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + qdf_freq_t freq = 0; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return false; + } + + if (!REG_IS_5GHZ_CH(chan)) + return false; + + freq = reg_chan_to_freq(pdev, chan); + + if (!(freq >= REG_ETSI13_SRD_START_FREQ && + freq <= REG_ETSI13_SRD_END_FREQ)) + return false; + + return reg_is_etsi13_regdmn(pdev); +} +#endif /* CONFIG_CHAN_NUM_API */ + +bool reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + if (!pdev) { + reg_alert("pdev is NULL"); + return true; + } + psoc = wlan_pdev_get_psoc(pdev); + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_alert("psoc reg component is NULL"); + return true; + } + + return psoc_priv_obj->enable_srd_chan_in_master_mode && + reg_is_etsi13_regdmn(pdev); +} +#endif + +QDF_STATUS reg_set_band(struct wlan_objmgr_pdev *pdev, uint32_t band_bitmap) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->band_capability == band_bitmap) { + reg_info("same band %d", band_bitmap); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("set band bitmap: %d", band_bitmap); + pdev_priv_obj->band_capability = band_bitmap; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +QDF_STATUS reg_get_band(struct wlan_objmgr_pdev *pdev, + uint32_t *band_bitmap) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_debug("get band bitmap: %d", pdev_priv_obj->band_capability); + *band_bitmap = pdev_priv_obj->band_capability; + + return QDF_STATUS_SUCCESS; +} + +#ifdef DISABLE_CHANNEL_LIST +QDF_STATUS reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_priv_obj->disable_cached_channels = false; + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_sb(psoc, pdev); + return status; +} + +QDF_STATUS reg_disable_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_priv_obj->disable_cached_channels = true; + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_sb(psoc, pdev); + return status; +} + +#ifdef CONFIG_CHAN_FREQ_API +QDF_STATUS reg_cache_channel_freq_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + uint16_t i, j; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + if (pdev_priv_obj->num_cache_channels > 0) { + pdev_priv_obj->num_cache_channels = 0; + qdf_mem_zero(&pdev_priv_obj->cache_disable_chan_list, + sizeof(pdev_priv_obj->cache_disable_chan_list)); + } + + for (i = 0; i < num_channels; i++) { + for (j = 0; j < NUM_CHANNELS; j++) { + if (channel_list[i] == pdev_priv_obj-> + cur_chan_list[j].center_freq) { + pdev_priv_obj-> + cache_disable_chan_list[i].center_freq = + channel_list[i]; + pdev_priv_obj-> + cache_disable_chan_list[i].state = + pdev_priv_obj->cur_chan_list[j].state; + pdev_priv_obj-> + cache_disable_chan_list[i].chan_flags = + pdev_priv_obj-> + cur_chan_list[j].chan_flags; + } + } + } + pdev_priv_obj->num_cache_channels = num_channels; + + return QDF_STATUS_SUCCESS; +} +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef CONFIG_CHAN_NUM_API +QDF_STATUS reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + uint8_t i, j; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + if (pdev_priv_obj->num_cache_channels > 0) { + pdev_priv_obj->num_cache_channels = 0; + qdf_mem_zero(&pdev_priv_obj->cache_disable_chan_list, + sizeof(pdev_priv_obj->cache_disable_chan_list)); + } + + for (i = 0; i < num_channels; i++) { + for (j = 0; j < NUM_CHANNELS; j++) { + if (channel_list[i] == pdev_priv_obj-> + cur_chan_list[j].chan_num) { + pdev_priv_obj-> + cache_disable_chan_list[i].chan_num = + channel_list[i]; + pdev_priv_obj-> + cache_disable_chan_list[i].state = + pdev_priv_obj->cur_chan_list[j].state; + pdev_priv_obj-> + cache_disable_chan_list[i].chan_flags = + pdev_priv_obj-> + cur_chan_list[j].chan_flags; + } + } + } + pdev_priv_obj->num_cache_channels = num_channels; + + return QDF_STATUS_SUCCESS; +} +#endif /* CONFIG_CHAN_NUM_API */ +#endif + +#ifdef CONFIG_REG_CLIENT + +QDF_STATUS reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->set_fcc_channel == fcc_constraint) { + reg_info("same fcc_constraint %d", fcc_constraint); + return QDF_STATUS_SUCCESS; + } + + reg_info("set fcc_constraint: %d", fcc_constraint); + pdev_priv_obj->set_fcc_channel = fcc_constraint; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +bool reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return false; + } + + if (freq != CHAN_12_CENT_FREQ && freq != CHAN_13_CENT_FREQ) + return false; + + if (!pdev_priv_obj->set_fcc_channel) + return false; + + return true; +} + +#endif /* CONFIG_REG_CLIENT */ + +/** + * reg_change_pdev_for_config() - Update user configuration in pdev private obj. + * @psoc: Pointer to global psoc structure. + * @object: Pointer to global pdev structure. + * @arg: Pointer to argument list. + */ +static void reg_change_pdev_for_config(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return; + } + + pdev_priv_obj->dfs_enabled = psoc_priv_obj->dfs_enabled; + pdev_priv_obj->indoor_chan_enabled = psoc_priv_obj->indoor_chan_enabled; + pdev_priv_obj->force_ssc_disable_indoor_channel = + psoc_priv_obj->force_ssc_disable_indoor_channel; + pdev_priv_obj->band_capability = psoc_priv_obj->band_capability; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_send_scheduler_msg_sb(psoc, pdev); +} + +QDF_STATUS reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + QDF_STATUS status; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc_priv_obj->enable_11d_supp_original = + config_vars.enable_11d_support; + psoc_priv_obj->scan_11d_interval = config_vars.scan_11d_interval; + psoc_priv_obj->user_ctry_priority = config_vars.userspace_ctry_priority; + psoc_priv_obj->dfs_enabled = config_vars.dfs_enabled; + psoc_priv_obj->indoor_chan_enabled = config_vars.indoor_chan_enabled; + psoc_priv_obj->force_ssc_disable_indoor_channel = + config_vars.force_ssc_disable_indoor_channel; + psoc_priv_obj->band_capability = config_vars.band_capability; + psoc_priv_obj->restart_beaconing = config_vars.restart_beaconing; + psoc_priv_obj->enable_srd_chan_in_master_mode = + config_vars.enable_srd_chan_in_master_mode; + psoc_priv_obj->enable_11d_in_world_mode = + config_vars.enable_11d_in_world_mode; + psoc_priv_obj->retain_nol_across_regdmn_update = + config_vars.retain_nol_across_regdmn_update; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + status = wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + reg_change_pdev_for_config, + NULL, 1, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + + return status; +} + +#ifdef CONFIG_CHAN_FREQ_API +bool reg_is_disable_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state_for_freq(pdev, freq); + + return ch_state == CHANNEL_STATE_DISABLE; +} +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef CONFIG_CHAN_NUM_API +bool reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state(pdev, chan); + + return ch_state == CHANNEL_STATE_DISABLE; +} +#endif /* CONFIG_CHAN_NUM_API */ + +bool reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return false; + } + + return psoc_priv_obj->offload_enabled; +} + +void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + QDF_STATUS status; + uint32_t count; + enum direction dir; + uint32_t pdev_cnt; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + qdf_mem_copy(psoc_priv_obj->cur_country, alpha2, + REG_ALPHA2_LEN); + reg_debug("set cur_country %.2s", psoc_priv_obj->cur_country); + for (count = 0; count < NUM_CHANNELS; count++) { + reg_channels[count].chan_num = channel_map[count].chan_num; + reg_channels[count].center_freq = + channel_map[count].center_freq; + reg_channels[count].nol_chan = false; + } + + for (pdev_cnt = 0; pdev_cnt < PSOC_MAX_PHY_REG_CAP; pdev_cnt++) { + qdf_mem_copy(psoc_priv_obj->mas_chan_params[pdev_cnt]. + mas_chan_list, reg_channels, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + psoc_priv_obj->mas_chan_params[pdev_cnt].dfs_region = + dfs_region; + } + + dir = SOUTHBOUND; + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return; + } + status = wlan_objmgr_iterate_obj_list( + psoc, WLAN_PDEV_OP, reg_propagate_mas_chan_list_to_pdev, + &dir, 1, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); +} + +enum country_src reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return SOURCE_UNKNOWN; + } + + qdf_mem_copy(alpha2, psoc_priv_obj->cur_country, REG_ALPHA2_LEN + 1); + + return psoc_priv_obj->cc_src; +} + +QDF_STATUS reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (!pdev) { + reg_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err("pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&pdev_priv_obj->reg_rules_lock); + qdf_mem_copy(reg_rules, &pdev_priv_obj->reg_rules, + sizeof(struct reg_rule_info)); + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); + + return QDF_STATUS_SUCCESS; +} + +void reg_reset_ctry_pending_hints(struct wlan_regulatory_psoc_priv_obj + *soc_reg) +{ + uint8_t ctr; + + if (!soc_reg->offload_enabled) + return; + + for (ctr = 0; ctr < PSOC_MAX_PHY_REG_CAP; ctr++) { + soc_reg->new_user_ctry_pending[ctr] = false; + soc_reg->new_init_ctry_pending[ctr] = false; + soc_reg->new_11d_ctry_pending[ctr] = false; + soc_reg->world_country_pending[ctr] = false; + } +} + +QDF_STATUS reg_set_curr_country(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info, + struct wlan_lmac_if_reg_tx_ops *tx_ops) +{ + struct wlan_objmgr_psoc *psoc = regulat_info->psoc; + uint8_t pdev_id; + uint8_t phy_num; + struct set_country country_code; + QDF_STATUS status; + + /* + * During SSR/WLAN restart ignore master channel list + * for all events and in the last event handling if + * current country and default country is different, send the last + * configured (soc_reg->cur_country) country. + */ + if ((regulat_info->num_phy != regulat_info->phy_id + 1) || + (!qdf_mem_cmp(soc_reg->cur_country, regulat_info->alpha2, + REG_ALPHA2_LEN))) + return QDF_STATUS_SUCCESS; + + /* + * Need firmware to send channel list event + * for all phys. Therefore set pdev_id to 0xFF + */ + pdev_id = 0xFF; + for (phy_num = 0; phy_num < regulat_info->num_phy; phy_num++) { + if (soc_reg->cc_src == SOURCE_USERSPACE) + soc_reg->new_user_ctry_pending[phy_num] = true; + else if (soc_reg->cc_src == SOURCE_11D) + soc_reg->new_11d_ctry_pending[phy_num] = true; + else + soc_reg->world_country_pending[phy_num] = true; + } + + qdf_mem_zero(&country_code, sizeof(country_code)); + qdf_mem_copy(country_code.country, soc_reg->cur_country, + sizeof(soc_reg->cur_country)); + country_code.pdev_id = pdev_id; + + if (!tx_ops || !tx_ops->set_country_code) { + reg_err("No regulatory tx_ops"); + status = QDF_STATUS_E_FAULT; + goto error; + } + + status = tx_ops->set_country_code(psoc, &country_code); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("Failed to send country code to fw"); + goto error; + } + + reg_debug("Target CC: %.2s, Restore to Previous CC: %.2s", + regulat_info->alpha2, soc_reg->cur_country); + + return status; + +error: + reg_reset_ctry_pending_hints(soc_reg); + + return status; +} + +bool reg_ignore_default_country(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info) +{ + uint8_t phy_num; + + if (!soc_reg->offload_enabled) + return false; + + if (soc_reg->cc_src == SOURCE_UNKNOWN) + return false; + + phy_num = regulat_info->phy_id; + if (soc_reg->new_user_ctry_pending[phy_num] || + soc_reg->new_init_ctry_pending[phy_num] || + soc_reg->new_11d_ctry_pending[phy_num] || + soc_reg->world_country_pending[phy_num]) + return false; + + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_utils.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..91bd0cbaaab440cb419917a61e68bd25fdabc965 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_utils.h @@ -0,0 +1,655 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_utils.h + * This file provides prototypes for setting and getting regulatory variables. + */ + +#ifndef __REG_UTILS_H_ +#define __REG_UTILS_H_ + +#ifdef WLAN_FEATURE_DSRC +#define REG_DSRC_START_FREQ channel_map[MIN_DSRC_CHANNEL].center_freq +#define REG_DSRC_END_FREQ channel_map[MAX_DSRC_CHANNEL].center_freq +#endif + +#define REG_ETSI13_SRD_START_FREQ 5745 +#define REG_ETSI13_SRD_END_FREQ 5865 + +#ifdef CONFIG_CHAN_NUM_API +#define REG_IS_CHANNEL_VALID_5G_SBS(curchan, newchan) \ + ((curchan) > (newchan) ? \ + REG_CH_TO_FREQ(reg_get_chan_enum(curchan)) \ + - REG_CH_TO_FREQ(reg_get_chan_enum(newchan)) \ + > REG_SBS_SEPARATION_THRESHOLD : \ + REG_CH_TO_FREQ(reg_get_chan_enum(newchan)) \ + - REG_CH_TO_FREQ(reg_get_chan_enum(curchan)) \ + > REG_SBS_SEPARATION_THRESHOLD) +#endif /* CONFIG_LEGACY_REG_API */ + +/** + * reg_is_world_ctry_code() - Check if the given country code is WORLD regdomain + * @ctry_code: Country code value. + * + * Return: If country code is WORLD regdomain return true else false + */ +bool reg_is_world_ctry_code(uint16_t ctry_code); + +#if defined(CONFIG_REG_CLIENT) && defined(CONFIG_CHAN_NUM_API) +/** + * reg_chan_has_dfs_attribute() - check channel has dfs attribue or not + * @ch: channel number. + * + * This API get chan initial dfs attribue flag from regdomain + * + * Return: true if chan is dfs, otherwise false + */ +bool reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint8_t ch); + +/** + * reg_is_passive_or_disable_ch() - Check if the given channel is passive or + * disabled. + * @pdev: Pointer to physical dev + * @chan: Channel number + * + * Return: true if channel is passive or disabled, else false. + */ +bool reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan); + +/** + * reg_is_disable_ch() - Check if the given channel in disable state + * @pdev: Pointer to pdev + * @chan: channel number + * + * Return: True if channel state is disabled, else false + */ +bool reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan); +#else +static inline bool +reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint8_t ch) +{ + return false; +} + +static inline bool +reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan) +{ + return false; +} + +static inline bool +reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan) +{ + return false; +} +#endif /* defined(CONFIG_REG_CLIENT) && defined(CONFIG_CHAN_NUM_API) */ + +#if defined(CONFIG_REG_CLIENT) && defined(CONFIG_CHAN_FREQ_API) +/** + * reg_chan_has_dfs_attribute_for_freq() - check channel frequency has dfs + * attribue or not + * @freq: channel frequency. + * + * This API gets initial dfs attribute flag of the channel frequency from + * regdomain + * + * Return: true if channel frequency is dfs, otherwise false + */ +bool reg_chan_has_dfs_attribute_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); +/** + * reg_is_passive_or_disable_for_freq() - Check if the given channel is + * passive or disabled. + * @pdev: Pointer to physical dev + * @chan: Channel frequency + * + * Return: true if channel frequency is passive or disabled, else false. + */ +bool reg_is_passive_or_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); +/** + * reg_is_disable_for_freq() - Check if the given channel frequency in + * disable state + * @pdev: Pointer to pdev + * @freq: Channel frequency + * + * Return: True if channel state is disabled, else false + */ +bool reg_is_disable_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq); +#else +static inline bool +reg_chan_has_dfs_attribute_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return false; +} + +static inline bool +reg_is_passive_or_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return false; +} + +static inline bool +reg_is_disable_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq) +{ + return false; +} +#endif /* defined(CONFIG_REG_CLIENT) && defined(CONFIG_CHAN_FREQ_API) */ + +#ifdef DISABLE_CHANNEL_LIST +/** + * reg_disable_cached_channels() - Disable cached channels + * @pdev: The physical dev to cache the channels for + */ +QDF_STATUS reg_disable_cached_channels(struct wlan_objmgr_pdev *pdev); +/** + * reg_restore_cached_channels() - Restore disabled cached channels + * @pdev: The physical dev to cache the channels for + */ +QDF_STATUS reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev); +#else +static inline +QDF_STATUS reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +static inline +QDF_STATUS reg_disable_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DISABLE_CHANNEL_LIST */ + +#if defined(DISABLE_CHANNEL_LIST) && defined(CONFIG_CHAN_FREQ_API) +/** + * reg_cache_channel_freq_state() - Cache the current state of the channels + * based on the channel center frequency + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + */ +QDF_STATUS reg_cache_channel_freq_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels); +#else +static inline +QDF_STATUS reg_cache_channel_freq_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* defined(DISABLE_CHANNEL_LIST) && defined(CONFIG_CHAN_FREQ_API) */ + +#if defined(DISABLE_CHANNEL_LIST) && defined(CONFIG_CHAN_NUM_API) +/** + * reg_cache_channel_state() - Cache the current state of the channels + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + */ +QDF_STATUS reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels); +#else +static inline +QDF_STATUS reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* defined (DISABLE_CHANNEL_LIST) && defined(CONFIG_CHAN_NUM_API) */ + +#ifdef CONFIG_REG_CLIENT +/** + * reg_set_band() - Sets the band information for the PDEV + * @pdev: The physical dev to set the band for + * @band_bitmap: The set band parameters to configure for the physical device + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_band(struct wlan_objmgr_pdev *pdev, uint32_t band_bitmap); + +/** + * reg_get_band() - Get the band information for the PDEV + * @pdev: The physical dev to get the band for + * @band_bitmap: The band parameters of the physical device + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_band(struct wlan_objmgr_pdev *pdev, uint32_t *band_bitmap); + +/** + * reg_set_fcc_constraint() - Apply fcc constraints on channels 12/13 + * @pdev: The physical dev to set the band for + * + * This function reduces the transmit power on channels 12 and 13, to comply + * with FCC regulations in the USA. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint); + +/** + * reg_get_fcc_constraint() - Check FCC constraint on given frequency + * @pdev: physical dev to get + * @freq: frequency to be checked + * + * Return: If FCC constraint is applied on given frequency return true + * else return false. + */ +bool reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq); + +/** + * reg_read_current_country() - Get the current regulatory country + * @psoc: The physical SoC to get current country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * reg_set_default_country() - Set the default regulatory country + * @psoc: The physical SoC to set default country for + * @req: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * reg_is_world_alpha2 - is reg world mode + * @alpha2: country code pointer + * + * Return: true or false + */ +bool reg_is_world_alpha2(uint8_t *alpha2); + +/** + * reg_is_us_alpha2 - is US country code + * @alpha2: country code pointer + * + * Return: true or false + */ +bool reg_is_us_alpha2(uint8_t *alpha2); + +/** + * reg_set_country() - Set the current regulatory country + * @pdev: pdev device for country information + * @country: country value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_country(struct wlan_objmgr_pdev *pdev, uint8_t *country); + +/** + * reg_reset_country() - Reset the regulatory country to default + * @psoc: The physical SoC to reset country for + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_reset_country(struct wlan_objmgr_psoc *psoc); + +/** + * reg_get_domain_from_country_code() - Get regdomain from country code + * @reg_domain_ptr: Pointer to save regdomain + * @country_alpha2: country string + * @source: Country code source + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, + enum country_src source); + +/** + * reg_set_config_vars () - set configration variables + * @psoc: psoc ptr + * @config_vars: configuration struct + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars); + +/** + * reg_is_regdb_offloaded() - is regdb offloaded + * @psoc: Pointer to psoc object + * + * Return: true if regdb is offloaded, else false + */ +bool reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc); + +/** + * reg_program_mas_chan_list() - Program the master channel list + * @psoc: Pointer to psoc structure + * @reg_channels: Pointer to reg channels + * @alpha2: country string + * @dfs_region: DFS region + */ +void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, enum dfs_reg dfs_region); + +/** + * reg_get_regd_rules() - provides the reg domain rules info + * @pdev: pdev pointer + * @reg_rules: regulatory rules + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules); + +/** + * reg_get_cc_and_src() - Get country string and country source + * @psoc: Pointer to psoc + * @alpha2: Pointer to save country string + * + * Return: country_src + */ +enum country_src reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2); + +/** + * reg_reset_ctry_pending_hints() - Reset all country pending hints + * @soc_reg: regulatory private object + * + * Return: None + */ +void +reg_reset_ctry_pending_hints(struct wlan_regulatory_psoc_priv_obj *soc_reg); + +/** + * reg_set_curr_country() - Set current country update + * @soc_reg: regulatory private object + * @regulat_info: regulatory info from firmware + * @tx_ops: send operations for regulatory component + * + * During SSR or restart of wlan modules after interface change timer phase, + * this function is used to send the recent user/11d country code to firmware. + * + * Return: QDF_STATUS_SUCCESS if correct country is configured + * else return failure + * error code. + */ +QDF_STATUS reg_set_curr_country( + struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info, + struct wlan_lmac_if_reg_tx_ops *tx_ops); + +/** + * reg_ignore_default_country() - Ignore default country update + * @soc_reg: regulatory private object + * @regulat_info: regulatory info from firmware + * + * During SSR or restart of wlan modules after interface change timer phase, + * this function is used to ignore default country code from firmware. + * + * Return: If default country needs to be ignored return true else false. + */ +bool reg_ignore_default_country(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info); + +#else +static inline QDF_STATUS reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + return QDF_STATUS_SUCCESS; +} + +static inline bool reg_is_world_alpha2(uint8_t *alpha2) +{ + return false; +} + +static inline bool reg_is_us_alpha2(uint8_t *alpha2) +{ + return false; +} + +static inline QDF_STATUS reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_reset_country(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_get_domain_from_country_code( + v_REGDOMAIN_t *reg_domain_ptr, const uint8_t *country_alpha2, + enum country_src source) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars) +{ + return QDF_STATUS_SUCCESS; +} + +static inline bool reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static inline void reg_program_mas_chan_list( + struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, enum dfs_reg dfs_region) +{ +} + +static inline QDF_STATUS reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules) +{ + return QDF_STATUS_SUCCESS; +} + +static inline enum country_src reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2) +{ + return SOURCE_UNKNOWN; +} + +static inline void +reg_reset_ctry_pending_hints(struct wlan_regulatory_psoc_priv_obj *soc_reg) +{ +} + +static inline QDF_STATUS reg_set_curr_country( + struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info, + struct wlan_lmac_if_reg_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +bool reg_ignore_default_country(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info) +{ + return false; +} + +static inline +QDF_STATUS reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +bool reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq) +{ + return false; +} + +#endif + +#if defined(WLAN_FEATURE_DSRC) && defined(CONFIG_REG_CLIENT) +/** + * reg_is_dsrc_freq () - Checks the channel frequency is DSRC or not + * @freq: Channel center frequency + * @pdev: pdev ptr + * + * Return: true or false + */ +#ifdef CONFIG_CHAN_FREQ_API +bool reg_is_dsrc_freq(qdf_freq_t freq); +#endif /* CONFIG_CHAN_FREQ_API*/ + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_is_dsrc_chan () - Checks the channel for DSRC or not + * @chan: channel + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan); +#endif /* CONFIG_CHAN_NUM_API */ + +static inline bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + return false; +} + +static inline bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + return false; +} + +/** + * reg_is_etsi13_srd_chan_for_freq() - Checks the channel for ETSI13 srd ch + * frequency or not + * @freq: Channel center frequency + * @pdev: pdev ptr + * + * Return: true or false + */ +static inline bool +reg_is_etsi13_srd_chan_for_freq(struct wlan_objmgr_pdev *pdev, uint16_t freq) +{ + return false; +} + +static inline bool +reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev) +{ + return true; +} +#elif defined(CONFIG_REG_CLIENT) +static inline bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + return false; +} + +static inline bool reg_is_dsrc_freq(qdf_freq_t freq) +{ + return false; +} + +#ifdef CONFIG_CHAN_FREQ_API +bool reg_is_etsi13_srd_chan_for_freq(struct wlan_objmgr_pdev + *pdev, uint16_t freq); +#endif /*CONFIG_CHAN_FREQ_API */ + +/** + * reg_is_etsi13_regdmn () - Checks if the current reg domain is ETSI13 or not + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev); + +#ifdef CONFIG_CHAN_NUM_API +/** + * reg_is_etsi13_srd_chan () - Checks the channel for ETSI13 srd ch or not + * @chan: channel + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * reg_is_etsi13_srd_chan_allowed_master_mode() - Checks if regdmn is ETSI13 + * and SRD channels are allowed in master mode or not. + * + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev); +#else +static inline bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + return false; +} + +static inline bool reg_is_dsrc_freq(qdf_freq_t freq) +{ + return false; +} + +static inline +bool reg_is_etsi13_srd_chan_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq) +{ + return false; +} + +static inline bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + return false; +} + +static inline bool +reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev) +{ + return false; +} + +static inline bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + return false; +} +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/reg_services_public_struct.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/reg_services_public_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..2e3f55ee1b592bd5e644a93933b76f45f28f95f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/reg_services_public_struct.h @@ -0,0 +1,1136 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: reg_services_public_struct.h + * This file contains regulatory data structures + */ + +#ifndef __REG_SERVICES_PUBLIC_STRUCT_H_ +#define __REG_SERVICES_PUBLIC_STRUCT_H_ + +#define REG_SBS_SEPARATION_THRESHOLD 100 + +#ifdef CONFIG_BAND_6GHZ +#define REG_MAX_CHANNELS_PER_OPERATING_CLASS 70 +#else +#define REG_MAX_CHANNELS_PER_OPERATING_CLASS 25 +#endif + +#define REG_MAX_SUPP_OPER_CLASSES 32 +#define REG_MAX_CHAN_CHANGE_CBKS 30 +#define REG_INVALID_TXPOWER 255 +#define MAX_STA_VDEV_CNT 4 +#define INVALID_VDEV_ID 0xFF +#define INVALID_CHANNEL_NUM 0x0 +#define CH_AVOID_MAX_RANGE 4 +#define REG_ALPHA2_LEN 2 +#define MAX_REG_RULES 10 + +#define REGULATORY_CHAN_DISABLED BIT(0) +#define REGULATORY_CHAN_NO_IR BIT(1) +#define REGULATORY_CHAN_RADAR BIT(3) +#define REGULATORY_CHAN_NO_OFDM BIT(6) +#define REGULATORY_CHAN_INDOOR_ONLY BIT(9) +#define REGULATORY_CHAN_AFC BIT(13) + +#define REGULATORY_CHAN_NO_HT40 BIT(4) +#define REGULATORY_CHAN_NO_80MHZ BIT(7) +#define REGULATORY_CHAN_NO_160MHZ BIT(8) +#define REGULATORY_CHAN_NO_20MHZ BIT(11) +#define REGULATORY_CHAN_NO_10MHZ BIT(12) +#define REGULATORY_CHAN_INVALID BIT(14) + +#define REGULATORY_PHYMODE_NO11A BIT(0) +#define REGULATORY_PHYMODE_NO11B BIT(1) +#define REGULATORY_PHYMODE_NO11G BIT(2) +#define REGULATORY_CHAN_NO11N BIT(3) +#define REGULATORY_PHYMODE_NO11AC BIT(4) +#define REGULATORY_PHYMODE_NO11AX BIT(5) + +#define BW_80_MHZ 80 +#define BW_160_MHZ 160 +#define BW_40_MHZ 40 + +/** + * enum dfs_reg - DFS region + * @DFS_UNINIT_REGION: un-initialized region + * @DFS_FCC_REGION: FCC region + * @DFS_ETSI_REGION: ETSI region + * @DFS_MKK_REGION: MKK region + * @DFS_CN_REGION: China region + * @DFS_KR_REGION: Korea region + * @DFS_MKK_REGION: MKKN region + * that supports updated W53 RADAR pattern + * detection. + * @DFS_UNDEF_REGION: Undefined region + */ + +enum dfs_reg { + DFS_UNINIT_REGION = 0, + DFS_FCC_REGION = 1, + DFS_ETSI_REGION = 2, + DFS_MKK_REGION = 3, + DFS_CN_REGION = 4, + DFS_KR_REGION = 5, + DFS_MKKN_REGION = 6, + DFS_UNDEF_REGION = 0xFFFF, +}; + +/** enum op_class_table_num + * OP_CLASS_US- Class corresponds to US + * OP_CLASS_EU- Class corresponds to EU + * OP_CLASS_JAPAN- Class corresponds to JAPAN + * OP_CLASS_GLOBAL- Class corresponds to GLOBAL + */ +enum op_class_table_num { + OP_CLASS_US = 1, + OP_CLASS_EU, + OP_CLASS_JAPAN, + OP_CLASS_GLOBAL +}; + +/** + * enum channel_enum - channel enumeration + * @CHAN_ENUM_2412: channel with freq 2412 + * @CHAN_ENUM_2417: channel with freq 2417 + * @CHAN_ENUM_2422: channel with freq 2422 + * @CHAN_ENUM_2427: channel with freq 2427 + * @CHAN_ENUM_2432: channel with freq 2432 + * @CHAN_ENUM_2437: channel with freq 2437 + * @CHAN_ENUM_2442: channel with freq 2442 + * @CHAN_ENUM_2447: channel with freq 2447 + * @CHAN_ENUM_2452: channel with freq 2452 + * @CHAN_ENUM_2457: channel with freq 2457 + * @CHAN_ENUM_2462: channel with freq 2462 + * @CHAN_ENUM_2467: channel with freq 2467 + * @CHAN_ENUM_2472: channel with freq 2472 + * @CHAN_ENUM_2484: channel with freq 2484 + * @CHAN_ENUM_4912: channel with freq 4912 + * @CHAN_ENUM_4915: channel with freq 4915 + * @CHAN_ENUM_4917: channel with freq 4917 + * @CHAN_ENUM_4920: channel with freq 4920 + * @CHAN_ENUM_4922: channel with freq 4922 + * @CHAN_ENUM_4925: channel with freq 4925 + * @CHAN_ENUM_4927: channel with freq 4927 + * @CHAN_ENUM_4932: channel with freq 4932 + * @CHAN_ENUM_4935: channel with freq 4935 + * @CHAN_ENUM_4937: channel with freq 4937 + * @CHAN_ENUM_4940: channel with freq 4940 + * @CHAN_ENUM_4942: channel with freq 4942 + * @CHAN_ENUM_4945: channel with freq 4945 + * @CHAN_ENUM_4947: channel with freq 4947 + * @CHAN_ENUM_4950: channel with freq 4950 + * @CHAN_ENUM_4952: channel with freq 4952 + * @CHAN_ENUM_4955: channel with freq 4955 + * @CHAN_ENUM_4957: channel with freq 4957 + * @CHAN_ENUM_4960: channel with freq 4960 + * @CHAN_ENUM_4962: channel with freq 4962 + * @CHAN_ENUM_4965: channel with freq 4965 + * @CHAN_ENUM_4967: channel with freq 4967 + * @CHAN_ENUM_4970: channel with freq 4970 + * @CHAN_ENUM_4972: channel with freq 4972 + * @CHAN_ENUM_4975: channel with freq 4975 + * @CHAN_ENUM_4977: channel with freq 4977 + * @CHAN_ENUM_4980: channel with freq 4980 + * @CHAN_ENUM_4982: channel with freq 4982 + * @CHAN_ENUM_4985: channel with freq 4985 + * @CHAN_ENUM_4987: channel with freq 4987 + * @CHAN_ENUM_5032: channel with freq 5032 + * @CHAN_ENUM_5035: channel with freq 5035 + * @CHAN_ENUM_5037: channel with freq 5037 + * @CHAN_ENUM_5040: channel with freq 5040 + * @CHAN_ENUM_5042: channel with freq 5042 + * @CHAN_ENUM_5045: channel with freq 5045 + * @CHAN_ENUM_5047: channel with freq 5047 + * @CHAN_ENUM_5052: channel with freq 5052 + * @CHAN_ENUM_5055: channel with freq 5055 + * @CHAN_ENUM_5057: channel with freq 5057 + * @CHAN_ENUM_5060: channel with freq 5060 + * @CHAN_ENUM_5080: channel with freq 5080 + * @CHAN_ENUM_5180: channel with freq 5180 + * @CHAN_ENUM_5200: channel with freq 5200 + * @CHAN_ENUM_5220: channel with freq 5220 + * @CHAN_ENUM_5240: channel with freq 5240 + * @CHAN_ENUM_5260: channel with freq 5260 + * @CHAN_ENUM_5280: channel with freq 5280 + * @CHAN_ENUM_5300: channel with freq 5300 + * @CHAN_ENUM_5320: channel with freq 5320 + * @CHAN_ENUM_5500: channel with freq 5500 + * @CHAN_ENUM_5520: channel with freq 5520 + * @CHAN_ENUM_5540: channel with freq 5540 + * @CHAN_ENUM_5560: channel with freq 5560 + * @CHAN_ENUM_5580: channel with freq 5580 + * @CHAN_ENUM_5600: channel with freq 5600 + * @CHAN_ENUM_5620: channel with freq 5620 + * @CHAN_ENUM_5640: channel with freq 5640 + * @CHAN_ENUM_5660: channel with freq 5660 + * @CHAN_ENUM_5680: channel with freq 5680 + * @CHAN_ENUM_5700: channel with freq 5700 + * @CHAN_ENUM_5720: channel with freq 5720 + * @CHAN_ENUM_5745: channel with freq 5745 + * @CHAN_ENUM_5765: channel with freq 5765 + * @CHAN_ENUM_5785: channel with freq 5785 + * @CHAN_ENUM_5805: channel with freq 5805 + * @CHAN_ENUM_5825: channel with freq 5825 + * @CHAN_ENUM_5845: channel with freq 5845 + * @CHAN_ENUM_5850: channel with freq 5850 + * @CHAN_ENUM_5855: channel with freq 5855 + * @CHAN_ENUM_5860: channel with freq 5860 + * @CHAN_ENUM_5865: channel with freq 5865 + * @CHAN_ENUM_5870: channel with freq 5870 + * @CHAN_ENUM_5875: channel with freq 5875 + * @CHAN_ENUM_5880: channel with freq 5880 + * @CHAN_ENUM_5885: channel with freq 5885 + * @CHAN_ENUM_5890: channel with freq 5890 + * @CHAN_ENUM_5895: channel with freq 5895 + * @CHAN_ENUM_5900: channel with freq 5900 + * @CHAN_ENUM_5905: channel with freq 5905 + * @CHAN_ENUM_5910: channel with freq 5910 + * @CHAN_ENUM_5915: channel with freq 5915 + * @CHAN_ENUM_5920: channel with freq 5920 + * @CHAN_ENUM_5945: channel with freq 5945 + * @CHAN_ENUM_5965: channel with freq 5965 + * @CHAN_ENUM_5985: channel with freq 5985 + * @CHAN_ENUM_6005: channel with freq 6005 + * @CHAN_ENUM_6025: channel with freq 6025 + * @CHAN_ENUM_6045: channel with freq 6045 + * @CHAN_ENUM_6065: channel with freq 6065 + * @CHAN_ENUM_6085: channel with freq 6085 + * @CHAN_ENUM_6105: channel with freq 6105 + * @CHAN_ENUM_6125: channel with freq 6125 + * @CHAN_ENUM_6145: channel with freq 6145 + * @CHAN_ENUM_6165: channel with freq 6165 + * @CHAN_ENUM_6185: channel with freq 6185 + * @CHAN_ENUM_6205: channel with freq 6205 + * @CHAN_ENUM_6225: channel with freq 6225 + * @CHAN_ENUM_6245: channel with freq 6245 + * @CHAN_ENUM_6265: channel with freq 6265 + * @CHAN_ENUM_6285: channel with freq 6285 + * @CHAN_ENUM_6305: channel with freq 6305 + * @CHAN_ENUM_6325: channel with freq 6325 + * @CHAN_ENUM_6345: channel with freq 6345 + * @CHAN_ENUM_6365: channel with freq 6365 + * @CHAN_ENUM_6385: channel with freq 6385 + * @CHAN_ENUM_6405: channel with freq 6405 + * @CHAN_ENUM_6425: channel with freq 6425 + * @CHAN_ENUM_6445: channel with freq 6445 + * @CHAN_ENUM_6465: channel with freq 6465 + * @CHAN_ENUM_6485: channel with freq 6485 + * @CHAN_ENUM_6505: channel with freq 6505 + * @CHAN_ENUM_6525: channel with freq 6525 + * @CHAN_ENUM_6545: channel with freq 6545 + * @CHAN_ENUM_6565: channel with freq 6565 + * @CHAN_ENUM_6585: channel with freq 6585 + * @CHAN_ENUM_6605: channel with freq 6605 + * @CHAN_ENUM_6625: channel with freq 6625 + * @CHAN_ENUM_6645: channel with freq 6645 + * @CHAN_ENUM_6665: channel with freq 6665 + * @CHAN_ENUM_6685: channel with freq 6685 + * @CHAN_ENUM_6705: channel with freq 6705 + * @CHAN_ENUM_6725: channel with freq 6725 + * @CHAN_ENUM_6745: channel with freq 6745 + * @CHAN_ENUM_6765: channel with freq 6765 + * @CHAN_ENUM_6785: channel with freq 6785 + * @CHAN_ENUM_6805: channel with freq 6805 + * @CHAN_ENUM_6825: channel with freq 6825 + * @CHAN_ENUM_6845: channel with freq 6845 + * @CHAN_ENUM_6865: channel with freq 6865 + * @CHAN_ENUM_6885: channel with freq 6885 + * @CHAN_ENUM_6905: channel with freq 6905 + * @CHAN_ENUM_6925: channel with freq 6925 + * @CHAN_ENUM_6945: channel with freq 6945 + * @CHAN_ENUM_6965: channel with freq 6965 + * @CHAN_ENUM_6985: channel with freq 6985 + * @CHAN_ENUM_7005: channel with freq 7005 + * @CHAN_ENUM_7025: channel with freq 7025 + * @CHAN_ENUM_7045: channel with freq 7045 + * @CHAN_ENUM_7065: channel with freq 7065 + * @CHAN_ENUM_7085: channel with freq 7085 + * @CHAN_ENUM_7105: channel with freq 7105 + */ +enum channel_enum { + CHAN_ENUM_2412, + CHAN_ENUM_2417, + CHAN_ENUM_2422, + CHAN_ENUM_2427, + CHAN_ENUM_2432, + CHAN_ENUM_2437, + CHAN_ENUM_2442, + CHAN_ENUM_2447, + CHAN_ENUM_2452, + CHAN_ENUM_2457, + CHAN_ENUM_2462, + CHAN_ENUM_2467, + CHAN_ENUM_2472, + CHAN_ENUM_2484, + + CHAN_ENUM_4912, + CHAN_ENUM_4915, + CHAN_ENUM_4917, + CHAN_ENUM_4920, + CHAN_ENUM_4922, + CHAN_ENUM_4925, + CHAN_ENUM_4927, + CHAN_ENUM_4932, + CHAN_ENUM_4935, + CHAN_ENUM_4937, + CHAN_ENUM_4940, + CHAN_ENUM_4942, + CHAN_ENUM_4945, + CHAN_ENUM_4947, + CHAN_ENUM_4950, + CHAN_ENUM_4952, + CHAN_ENUM_4955, + CHAN_ENUM_4957, + CHAN_ENUM_4960, + CHAN_ENUM_4962, + CHAN_ENUM_4965, + CHAN_ENUM_4967, + CHAN_ENUM_4970, + CHAN_ENUM_4972, + CHAN_ENUM_4975, + CHAN_ENUM_4977, + CHAN_ENUM_4980, + CHAN_ENUM_4982, + CHAN_ENUM_4985, + CHAN_ENUM_4987, + CHAN_ENUM_5032, + CHAN_ENUM_5035, + CHAN_ENUM_5037, + CHAN_ENUM_5040, + CHAN_ENUM_5042, + CHAN_ENUM_5045, + CHAN_ENUM_5047, + CHAN_ENUM_5052, + CHAN_ENUM_5055, + CHAN_ENUM_5057, + CHAN_ENUM_5060, + CHAN_ENUM_5080, + + CHAN_ENUM_5180, + CHAN_ENUM_5200, + CHAN_ENUM_5220, + CHAN_ENUM_5240, + CHAN_ENUM_5260, + CHAN_ENUM_5280, + CHAN_ENUM_5300, + CHAN_ENUM_5320, + CHAN_ENUM_5500, + CHAN_ENUM_5520, + CHAN_ENUM_5540, + CHAN_ENUM_5560, + CHAN_ENUM_5580, + CHAN_ENUM_5600, + CHAN_ENUM_5620, + CHAN_ENUM_5640, + CHAN_ENUM_5660, + CHAN_ENUM_5680, + CHAN_ENUM_5700, + CHAN_ENUM_5720, + CHAN_ENUM_5745, + CHAN_ENUM_5765, + CHAN_ENUM_5785, + CHAN_ENUM_5805, + CHAN_ENUM_5825, + CHAN_ENUM_5845, + + CHAN_ENUM_5850, + CHAN_ENUM_5855, + CHAN_ENUM_5860, + CHAN_ENUM_5865, + CHAN_ENUM_5870, + CHAN_ENUM_5875, + CHAN_ENUM_5880, + CHAN_ENUM_5885, + CHAN_ENUM_5890, + CHAN_ENUM_5895, + CHAN_ENUM_5900, + CHAN_ENUM_5905, + CHAN_ENUM_5910, + CHAN_ENUM_5915, + CHAN_ENUM_5920, +#ifdef CONFIG_BAND_6GHZ + CHAN_ENUM_5945, + CHAN_ENUM_5965, + CHAN_ENUM_5985, + CHAN_ENUM_6005, + CHAN_ENUM_6025, + CHAN_ENUM_6045, + CHAN_ENUM_6065, + CHAN_ENUM_6085, + CHAN_ENUM_6105, + CHAN_ENUM_6125, + CHAN_ENUM_6145, + CHAN_ENUM_6165, + CHAN_ENUM_6185, + CHAN_ENUM_6205, + CHAN_ENUM_6225, + CHAN_ENUM_6245, + CHAN_ENUM_6265, + CHAN_ENUM_6285, + CHAN_ENUM_6305, + CHAN_ENUM_6325, + CHAN_ENUM_6345, + CHAN_ENUM_6365, + CHAN_ENUM_6385, + CHAN_ENUM_6405, + CHAN_ENUM_6425, + CHAN_ENUM_6445, + CHAN_ENUM_6465, + CHAN_ENUM_6485, + CHAN_ENUM_6505, + CHAN_ENUM_6525, + CHAN_ENUM_6545, + CHAN_ENUM_6565, + CHAN_ENUM_6585, + CHAN_ENUM_6605, + CHAN_ENUM_6625, + CHAN_ENUM_6645, + CHAN_ENUM_6665, + CHAN_ENUM_6685, + CHAN_ENUM_6705, + CHAN_ENUM_6725, + CHAN_ENUM_6745, + CHAN_ENUM_6765, + CHAN_ENUM_6785, + CHAN_ENUM_6805, + CHAN_ENUM_6825, + CHAN_ENUM_6845, + CHAN_ENUM_6865, + CHAN_ENUM_6885, + CHAN_ENUM_6905, + CHAN_ENUM_6925, + CHAN_ENUM_6945, + CHAN_ENUM_6965, + CHAN_ENUM_6985, + CHAN_ENUM_7005, + CHAN_ENUM_7025, + CHAN_ENUM_7045, + CHAN_ENUM_7065, + CHAN_ENUM_7085, + CHAN_ENUM_7105, +#endif /* CONFIG_BAND_6GHZ */ + + NUM_CHANNELS, + + MIN_24GHZ_CHANNEL = CHAN_ENUM_2412, + MAX_24GHZ_CHANNEL = CHAN_ENUM_2484, + NUM_24GHZ_CHANNELS = (MAX_24GHZ_CHANNEL - MIN_24GHZ_CHANNEL + 1), + + MIN_49GHZ_CHANNEL = CHAN_ENUM_4912, + MAX_49GHZ_CHANNEL = CHAN_ENUM_5080, + NUM_49GHZ_CHANNELS = (MAX_49GHZ_CHANNEL - MIN_49GHZ_CHANNEL + 1), + + MIN_5GHZ_CHANNEL = CHAN_ENUM_5180, + MAX_5GHZ_CHANNEL = CHAN_ENUM_5920, + NUM_5GHZ_CHANNELS = (MAX_5GHZ_CHANNEL - MIN_5GHZ_CHANNEL + 1), + + MIN_DSRC_CHANNEL = CHAN_ENUM_5850, + MAX_DSRC_CHANNEL = CHAN_ENUM_5920, + NUM_DSRC_CHANNELS = (MAX_DSRC_CHANNEL - MIN_DSRC_CHANNEL + 1), + + INVALID_CHANNEL = 0xBAD, + +#ifdef DISABLE_UNII_SHARED_BANDS + MIN_UNII_1_BAND_CHANNEL = CHAN_ENUM_5180, + MAX_UNII_1_BAND_CHANNEL = CHAN_ENUM_5240, + NUM_UNII_1_BAND_CHANNELS = (MAX_UNII_1_BAND_CHANNEL - + MIN_UNII_1_BAND_CHANNEL + 1), + + MIN_UNII_2A_BAND_CHANNEL = CHAN_ENUM_5260, + MAX_UNII_2A_BAND_CHANNEL = CHAN_ENUM_5320, + NUM_UNII_2A_BAND_CHANNELS = (MAX_UNII_2A_BAND_CHANNEL - + MIN_UNII_2A_BAND_CHANNEL + 1), +#endif + +#ifdef CONFIG_BAND_6GHZ + MIN_6GHZ_CHANNEL = CHAN_ENUM_5945, + MAX_6GHZ_CHANNEL = CHAN_ENUM_7105, + NUM_6GHZ_CHANNELS = (MAX_6GHZ_CHANNEL - MIN_6GHZ_CHANNEL + 1), +#else + MIN_6GHZ_CHANNEL = INVALID_CHANNEL, + MAX_6GHZ_CHANNEL = INVALID_CHANNEL, + NUM_6GHZ_CHANNELS = 0, +#endif /* CONFIG_BAND_6GHZ */ +}; + +/** + * enum channel_state - channel state + * @CHANNEL_STATE_DISABLE: disabled state + * @CHANNEL_STATE_PASSIVE: passive state + * @CHANNEL_STATE_DFS: dfs state + * @CHANNEL_STATE_ENABLE: enabled state + * @CHANNEL_STATE_INVALID: invalid state + */ +enum channel_state { + CHANNEL_STATE_DISABLE, + CHANNEL_STATE_PASSIVE, + CHANNEL_STATE_DFS, + CHANNEL_STATE_ENABLE, + CHANNEL_STATE_INVALID, +}; + +/** + * enum reg_domain: reg domain + * @REGDOMAIN_FCC: FCC domain + * @REGDOMAIN_ETSI: ETSI domain + * @REGDOMAIN_JAPAN: JAPAN domain + * @REGDOMAIN_WORLD: WORLD domain + * @REGDOMAIN_COUNT: Max domain + */ +typedef enum { + REGDOMAIN_FCC, + REGDOMAIN_ETSI, + REGDOMAIN_JAPAN, + REGDOMAIN_WORLD, + REGDOMAIN_COUNT +} v_REGDOMAIN_t; + +/** + * enum ctl_value - CTL value + * @CTL_FCC: CTL FCC + * @CTL_MKK: CTL MKK + * @CTL_ETSI: CTL ETSI + * @CTL_KOR: CTL KOR + * @CTL_CHN: CTL CHINA + * @CTL_USER_DEF: CTL USER_DEF + * @CTL_NONE: CTL NONE + */ +enum ctl_value { + CTL_FCC = 0x10, + CTL_ETSI = 0x30, + CTL_MKK = 0x40, + CTL_KOR = 0x50, + CTL_CHN = 0x60, + CTL_USER_DEF = 0x70, + CTL_NONE = 0xff +}; + +/** + * struct ch_params + * @ch_width: channel width + * @sec_ch_offset: secondary channel offset + * @center_freq_seg0: channel number for segment 0 + * @center_freq_seg1: channel number segment 1 + * @mhz_freq_seg0: Center frequency for segment 0 + * @mhz_freq_seg1: Center frequency for segment 1 + */ +struct ch_params { + enum phy_ch_width ch_width; + uint8_t sec_ch_offset; + uint8_t center_freq_seg0; + uint8_t center_freq_seg1; + qdf_freq_t mhz_freq_seg0; + qdf_freq_t mhz_freq_seg1; +}; + +/** + * struct channel_power + * @center_freq: Channel Center Frequency + * @chan_num: channel number + * @tx_power: TX power + */ +struct channel_power { + qdf_freq_t center_freq; + uint8_t chan_num; + uint32_t tx_power; +}; + +/** + * enum offset_t: channel offset + * @BW20: 20 mhz channel + * @BW40_LOW_PRIMARY: lower channel in 40 mhz + * @BW40_HIGH_PRIMARY: higher channel in 40 mhz + * @BW80: 80 mhz channel + * @BWALL: unknown bandwidth + */ +enum offset_t { + BW20 = 0, + BW40_LOW_PRIMARY = 1, + BW40_HIGH_PRIMARY = 3, + BW80, + BWALL, + BW_INVALID = 0xFF +}; + +/** + * enum behav_limit - behavior limit + * @BEHAV_NONE: none + * @BEHAV_BW40_LOW_PRIMARY: BW40 low primary + * @BEHAV_BW40_HIGH_PRIMARY: BW40 high primary + * @BEHAV_BW80_PLUS: BW 80 plus + * @BEHAV_INVALID: invalid behavior + */ +enum behav_limit { + BEHAV_NONE, + BEHAV_BW40_LOW_PRIMARY, + BEHAV_BW40_HIGH_PRIMARY, + BEHAV_BW80_PLUS, + BEHAV_INVALID = 0xFF +}; + +/** + * struct reg_dmn_op_class_map_t: operating class + * @op_class: operating class number + * @chan_spacing: channel spacing + * @offset: offset + * @behav_limit: OR of bitmaps of enum behav_limit + * @start_freq: starting frequency + * @channels: channel set + */ +struct reg_dmn_op_class_map_t { + uint8_t op_class; + uint8_t chan_spacing; + enum offset_t offset; + uint16_t behav_limit; + qdf_freq_t start_freq; + uint8_t channels[REG_MAX_CHANNELS_PER_OPERATING_CLASS]; +}; + +/** + * struct regdmn_ap_cap_opclass_t: AP Cap operation class table + * @op_class: operating class number + * @ch_width: channel width in MHz + * @start_freq: Starting Frequency in MHz + * @behav_limit: OR of bitmaps of enum behav_limit + * @max_tx_pwr_dbm: Maximum tx power in dbm + * @num_supported_chan: Number of supported channels + * @num_non_supported_chan: Number of non-supported channels + * @sup_chan_list: Array of supported channel numbers + * @non_sup_chan_list: Array of non supported channel numbers + */ +struct regdmn_ap_cap_opclass_t { + uint8_t op_class; + uint8_t ch_width; + qdf_freq_t start_freq; + uint16_t behav_limit; + uint8_t max_tx_pwr_dbm; + uint8_t num_supported_chan; + uint8_t num_non_supported_chan; + uint8_t sup_chan_list[REG_MAX_CHANNELS_PER_OPERATING_CLASS]; + uint8_t non_sup_chan_list[REG_MAX_CHANNELS_PER_OPERATING_CLASS]; +}; + +/** + * struct reg_dmn_supp_op_classes: operating classes + * @num_classes: number of classes + * @classes: classes + */ +struct reg_dmn_supp_op_classes { + uint8_t num_classes; + uint8_t classes[REG_MAX_SUPP_OPER_CLASSES]; +}; + +/** + * struct reg_start_11d_scan_req: start 11d scan request + * @vdev_id: vdev id + * @scan_period_msec: scan duration in milli-seconds + * @start_interval_msec: offset duration to start the scan in milli-seconds + */ +struct reg_start_11d_scan_req { + uint8_t vdev_id; + uint32_t scan_period_msec; + uint32_t start_interval_msec; +}; + +/** + * struct reg_11d_scan_msg: 11d scan message structure + * @psoc: pointer to psoc object + * @enable_11d_supp: enable 11d scan or disable 11d scan + */ +struct reg_11d_scan_msg { + struct wlan_objmgr_psoc *psoc; + bool enable_11d_supp; +}; +/** + * struct reg_stop_11d_scan_req: stop 11d scan request + * @vdev_id: vdev id + */ +struct reg_stop_11d_scan_req { + uint8_t vdev_id; +}; + +/** + * struct reg_11d_new_country: regulatory 11d new coutry code + * @alpha2: new 11d alpha2 + */ +struct reg_11d_new_country { + uint8_t alpha2[REG_ALPHA2_LEN + 1]; +}; + +/** + * enum country_src: country source + * @SOURCE_QUERY: source query + * @SOURCE_CORE: source regulatory core + * @SOURCE_DRIVER: source driver + * @SOURCE_USERSPACE: source userspace + * @SOURCE_11D: source 11D + */ +enum country_src { + SOURCE_UNKNOWN, + SOURCE_QUERY, + SOURCE_CORE, + SOURCE_DRIVER, + SOURCE_USERSPACE, + SOURCE_11D +}; + +/** + * struct regulatory_channel + * @center_freq: center frequency + * @chan_num: channel number + * @state: channel state + * @chan_flags: channel flags + * @tx_power: TX powers + * @min_bw: min bandwidth + * @max_bw: max bandwidth + * @nol_chan: whether channel is nol + * @nol_history: Set NOL-History when STA vap detects RADAR. + */ +struct regulatory_channel { + qdf_freq_t center_freq; + uint8_t chan_num; + enum channel_state state; + uint32_t chan_flags; + uint32_t tx_power; + uint16_t min_bw; + uint16_t max_bw; + uint8_t ant_gain; + bool nol_chan; + bool nol_history; +}; + +/** + * struct regulatory: regulatory information + * @reg_domain: regulatory domain pair + * @eeprom_rd_ext: eeprom value + * @country_code: current country in integer + * @alpha2: current alpha2 + * @def_country: default country alpha2 + * @def_region: DFS region + * @ctl_2g: 2G CTL value + * @ctl_5g: 5G CTL value + * @reg_pair: pointer to regulatory pair + * @cc_src: country code src + * @reg_flags: kernel regulatory flags + */ +struct regulatory { + uint32_t reg_domain; + uint32_t eeprom_rd_ext; + uint16_t country_code; + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + uint8_t ctl_2g; + uint8_t ctl_5g; + const void *regpair; + enum country_src cc_src; + uint32_t reg_flags; +}; + +/** + * struct chan_map + * @center_freq: center freq in mhz + * @chan_num: channel number + * @min_bw: min bw + * @max_bw: max bw + */ +struct chan_map { + qdf_freq_t center_freq; + uint8_t chan_num; + uint16_t min_bw; + uint16_t max_bw; +}; + +/** + * struct bonded_channel + * @start_ch: start channel + * @end_ch: end channel + */ +struct bonded_channel { + uint8_t start_ch; + uint8_t end_ch; +}; + +/** + * struct bonded_channel_freq + * @start_freq: start channel frequency + * @end_freq: end channel frequency + */ +struct bonded_channel_freq { + uint16_t start_freq; + uint16_t end_freq; +}; + +struct set_country { + uint8_t country[REG_ALPHA2_LEN + 1]; + uint8_t pdev_id; +}; +/** + * enum ht_sec_ch_offset + * @NO_SEC_CH: no secondary + * @LOW_PRIMARY_CH: low primary + * @HIGH_PRIMARY_CH: high primary + */ +enum ht_sec_ch_offset { + NO_SEC_CH = 0, + LOW_PRIMARY_CH = 1, + HIGH_PRIMARY_CH = 3, +}; + +enum cc_setting_code { + REG_SET_CC_STATUS_PASS = 0, + REG_CURRENT_ALPHA2_NOT_FOUND = 1, + REG_INIT_ALPHA2_NOT_FOUND = 2, + REG_SET_CC_CHANGE_NOT_ALLOWED = 3, + REG_SET_CC_STATUS_NO_MEMORY = 4, + REG_SET_CC_STATUS_FAIL = 5, +}; + +/** + * struct cur_reg_rule + * @start_freq: start frequency + * @end_freq: end frequency + * @max_bw: maximum bandwidth + * @reg_power: regulatory power + * @ant_gain: antenna gain + * @flags: regulatory flags + */ +struct cur_reg_rule { + uint16_t start_freq; + uint16_t end_freq; + uint16_t max_bw; + uint8_t reg_power; + uint8_t ant_gain; + uint16_t flags; +}; + +/** + * struct cur_regulatory_info + * @psoc: psoc ptr + * @status_code: status value + * @num_phy: number of phy + * @phy_id: phy id + * @reg_dmn_pair: reg domain pair + * @ctry_code: country code + * @alpha2: country alpha2 + * @offload_enabled: offload enabled + * @dfs_reg: dfs region + * @phybitmap: phy bit map + * @min_bw_2g: minimum 2G bw + * @max_bw_2g: maximum 2G bw + * @min_bw_5g: minimum 5G bw + * @max_bw_5g: maximum 5G bw + * @num_2g_reg_rules: number 2G reg rules + * @num_5g_reg_rules: number 5G and 6G reg rules + * @reg_rules_2g_ptr: ptr to 2G reg rules + * @reg_rules_5g_ptr: ptr to 5G reg rules + */ +struct cur_regulatory_info { + struct wlan_objmgr_psoc *psoc; + enum cc_setting_code status_code; + uint8_t num_phy; + uint8_t phy_id; + uint16_t reg_dmn_pair; + uint16_t ctry_code; + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + bool offload_enabled; + enum dfs_reg dfs_region; + uint32_t phybitmap; + uint32_t min_bw_2g; + uint32_t max_bw_2g; + uint32_t min_bw_5g; + uint32_t max_bw_5g; + uint32_t num_2g_reg_rules; + uint32_t num_5g_reg_rules; + struct cur_reg_rule *reg_rules_2g_ptr; + struct cur_reg_rule *reg_rules_5g_ptr; +}; + +/** + * struct reg_rule_info + * @alpha2: alpha2 of reg rules + * @dfs_region: dfs region + * @num_of_reg_rules: number of reg rules + * @reg_rules: regulatory rules array + */ +struct reg_rule_info { + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + enum dfs_reg dfs_region; + uint8_t num_of_reg_rules; + struct cur_reg_rule reg_rules[MAX_REG_RULES]; +}; + +/** + * enum reg_reg_wifi_band + * @REG_BAND_2G: 2G band + * @REG_BAND_5G: 5G band + * @REG_BAND_6G: 6G band + * @REG_BAND_UNKNOWN: Unsupported band + */ +enum reg_wifi_band { + REG_BAND_2G, + REG_BAND_5G, + REG_BAND_6G, + REG_BAND_UNKNOWN +}; + +#ifdef DISABLE_UNII_SHARED_BANDS +/** + * enum reg_unii_band + * @REG_UNII_BAND_1: Disable UNII-1 band channels + * @REG_UNII_BAND_2A: Disable UNII-2A band channels + */ +enum reg_unii_band { + REG_UNII_BAND_1 = 0x0, + REG_UNII_BAND_2A = 0x1, +}; +#endif + +#define REG_BAND_MASK_ALL (BIT(REG_BAND_2G) | BIT(REG_BAND_5G) \ + | BIT(REG_BAND_6G)) + +/* Avoid the use of band_info as it does not support 6GHz band. Use + * reg_wifi_band, as it supports the 6GHz band + */ +/** + * enum band_info + * @BAND_ALL:all bands + * @BAND_2G: 2G band + * @BAND_5G: 5G band + * @BAND_UNKNOWN: Unsupported band + */ +enum band_info { + BAND_ALL, + BAND_2G, + BAND_5G, + BAND_UNKNOWN +}; + +/** + * enum restart_beaconing_on_ch_avoid_rule: control the beaconing entity to + * move away from active LTE channels + * @CH_AVOID_RULE_DO_NOT_RESTART: Do not move from active LTE + * channels + * @CH_AVOID_RULE_RESTART: Move from active LTE channels + * @CH_AVOID_RULE_RESTART_24G_ONLY: move from 2.4G active LTE + * channels only + */ +enum restart_beaconing_on_ch_avoid_rule { + CH_AVOID_RULE_DO_NOT_RESTART, + CH_AVOID_RULE_RESTART, + CH_AVOID_RULE_RESTART_24G_ONLY, +}; + +/** + * struct reg_config_vars + * @enable_11d_support: enable 11d support + * @scan_11d_interval: 11d scan interval in ms + * @userspace_ctry_priority: user priority + * @band_capability: band capability + * @dfs_disable: dfs disabled + * @indoor_channel_support: indoor channel support + * @force_ssc_disable_indoor_channel: Disable indoor channel on sap start + * @restart_beaconing: control the beaconing entity to move + * away from active LTE channels + * @enable_srd_chan_in_master_mode: SRD channel support in master mode + * @enable_11d_in_world_mode: enable 11d in world mode + * @retain_nol_across_regdmn_update: Retain the NOL list across the regdomain. + */ +struct reg_config_vars { + uint32_t enable_11d_support; + uint32_t scan_11d_interval; + uint32_t userspace_ctry_priority; + uint32_t band_capability; + uint32_t dfs_enabled; + uint32_t indoor_chan_enabled; + uint32_t force_ssc_disable_indoor_channel; + enum restart_beaconing_on_ch_avoid_rule restart_beaconing; + uint8_t enable_srd_chan_in_master_mode; + bool enable_11d_in_world_mode; + bool retain_nol_across_regdmn_update; +}; + +/** + * struct reg_freq_range + * @low_freq: low frequency + * @high_freq: high frequency + */ +struct reg_freq_range { + uint32_t low_freq; + uint32_t high_freq; +}; + +/** + * struct reg_sched_payload + * @psoc: psoc ptr + * @pdev: pdev ptr + */ +struct reg_sched_payload { + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; +}; + +/** + * enum direction + * @NORTHBOUND: northbound + * @SOUTHBOUND: southbound + */ +enum direction { + NORTHBOUND, + SOUTHBOUND, +}; + +/** + * struct mas_chan_params + * @dfs_region: dfs region + * @phybitmap: phybitmap + * @mas_chan_list: master chan list + * @default_country: default country + * @current_country: current country + * @def_region_domain: default reg domain + * @def_country_code: default country code + * @reg_dmn_pair: reg domain pair + * @ctry_code: country code + * @reg_rules: regulatory rules + */ +struct mas_chan_params { + enum dfs_reg dfs_region; + uint32_t phybitmap; + struct regulatory_channel mas_chan_list[NUM_CHANNELS]; + char default_country[REG_ALPHA2_LEN + 1]; + char current_country[REG_ALPHA2_LEN + 1]; + uint16_t def_region_domain; + uint16_t def_country_code; + uint16_t reg_dmn_pair; + uint16_t ctry_code; + struct reg_rule_info reg_rules; +}; + +/** + * enum cc_regdmn_flag: Regdomain flags + * @INVALID: Invalid flag + * @CC_IS_SET: Country code is set + * @REGDMN_IS_SET: Regdomain ID is set + * @ALPHA_IS_SET: Country ISO is set + */ +enum cc_regdmn_flag { + INVALID_CC, + CC_IS_SET, + REGDMN_IS_SET, + ALPHA_IS_SET, +}; + +/** + * struct cc_regdmn_s: User country code or regdomain + * @country_code: Country code + * @regdmn_id: Regdomain pair ID + * @alpha: Country ISO + * @flags: Regdomain flags + */ +struct cc_regdmn_s { + union { + uint16_t country_code; + uint16_t regdmn_id; + uint8_t alpha[REG_ALPHA2_LEN + 1]; + } cc; + uint8_t flags; +}; + +/** + * struct cur_regdmn_info: Current regulatory info + * @regdmn_pair_id: Current regdomain pair ID + * @dmn_id_2g: 2GHz regdomain ID + * @dmn_id_5g: 5GHz regdomain ID + * @ctl_2g: 2GHz CTL value + * @ctl_5g: 5GHzCTL value + * @dfs_region: dfs region + */ +struct cur_regdmn_info { + uint16_t regdmn_pair_id; + uint16_t dmn_id_2g; + uint16_t dmn_id_5g; + uint8_t ctl_2g; + uint8_t ctl_5g; + uint8_t dfs_region; +}; + +/** + * struct ch_avoid_freq_type + * @start_freq: start freq + * @end_freq: end freq + */ +struct ch_avoid_freq_type { + qdf_freq_t start_freq; + qdf_freq_t end_freq; +}; + +/** + * struct ch_avoid_ind_type + * @ch_avoid_range_cnt: count + * @avoid_freq_range: avoid freq range array + */ +struct ch_avoid_ind_type { + uint32_t ch_avoid_range_cnt; + struct ch_avoid_freq_type avoid_freq_range[CH_AVOID_MAX_RANGE]; +}; + +/** + * struct unsafe_ch_list + * @chan_cnt: no.of channels + * @chan_freq_list: channel frequency list + */ +struct unsafe_ch_list { + uint16_t chan_cnt; + uint16_t chan_freq_list[NUM_CHANNELS]; +}; + +/** + * struct avoid_freq_ind_data + * @freq_list: frequency list + * @chan_list: channel list + */ +struct avoid_freq_ind_data { + struct ch_avoid_ind_type freq_list; + struct unsafe_ch_list chan_list; +}; + +#define FIVEG_STARTING_FREQ 5000 +#define TWOG_STARTING_FREQ 2407 +#define TWOG_CHAN_14_IN_MHZ 2484 +#define TWOG_CHAN_1_IN_MHZ 2412 +#define TWOG_CHAN_5_IN_MHZ 2432 +#define TWOG_CHAN_6_IN_MHZ 2437 +#define TWOG_CHAN_13_IN_MHZ 2472 + +/** + * struct reg_ctl_params - reg ctl and regd info + * @regd: regdomain pair + * @regd_2g: 2g sub domain code + * @regd_5g: 5g sub domain code + * @ctl_2g: 2g ctl info + * @ctl_5g: 5g ctl info + */ +struct reg_ctl_params { + uint32_t regd; + uint16_t regd_2g; + uint16_t regd_5g; + uint8_t ctl_2g; + uint8_t ctl_5g; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_services_api.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_services_api.h new file mode 100644 index 0000000000000000000000000000000000000000..0c8f420ee0cebe21a7dab14df08307b1276b0bbc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_services_api.h @@ -0,0 +1,1318 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_reg_services_api.h + * This file provides prototypes of the routines needed for the + * external components to utilize the services provided by the + * regulatory component. + */ + +#ifndef __WLAN_REG_SERVICES_API_H +#define __WLAN_REG_SERVICES_API_H + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_min_24ghz_ch_num() - Get minimum 2.4GHz channel number + * + * Return: Minimum 2.4GHz channel number + */ +#define WLAN_REG_MIN_24GHZ_CH_NUM wlan_reg_min_24ghz_ch_num() +uint8_t wlan_reg_min_24ghz_ch_num(void); + +/** + * wlan_reg_max_24ghz_ch_num() - Get maximum 2.4GHz channel number + * + * Return: Maximum 2.4GHz channel number + */ +#define WLAN_REG_MAX_24GHZ_CH_NUM wlan_reg_max_24ghz_ch_num() +uint8_t wlan_reg_max_24ghz_ch_num(void); + +/** + * wlan_reg_min_5ghz_ch_num() - Get minimum 5GHz channel number + * + * Return: Minimum 5GHz channel number + */ +#define WLAN_REG_MIN_5GHZ_CH_NUM wlan_reg_min_5ghz_ch_num() +uint8_t wlan_reg_min_5ghz_ch_num(void); + +/** + * wlan_reg_max_5ghz_ch_num() - Get maximum 5GHz channel number + * + * Return: Maximum 5GHz channel number + */ +#define WLAN_REG_MAX_5GHZ_CH_NUM wlan_reg_max_5ghz_ch_num() +uint8_t wlan_reg_max_5ghz_ch_num(void); +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +/** + * wlan_reg_min_24ghz_chan_freq() - Get minimum 2.4GHz channel frequency + * + * Return: Minimum 2.4GHz channel frequency + */ +#define WLAN_REG_MIN_24GHZ_CHAN_FREQ wlan_reg_min_24ghz_chan_freq() +qdf_freq_t wlan_reg_min_24ghz_chan_freq(void); + +/** + * wlan_reg_max_24ghz_chan_freq() - Get maximum 2.4GHz channel frequency + * + * Return: Maximum 2.4GHz channel frequency + */ +#define WLAN_REG_MAX_24GHZ_CHAN_FREQ wlan_reg_max_24ghz_chan_freq() +qdf_freq_t wlan_reg_max_24ghz_chan_freq(void); + +/** + * wlan_reg_min_5ghz_chan_freq() - Get minimum 5GHz channel frequency + * + * Return: Minimum 5GHz channel frequency + */ +#define WLAN_REG_MIN_5GHZ_CHAN_FREQ wlan_reg_min_5ghz_chan_freq() +qdf_freq_t wlan_reg_min_5ghz_chan_freq(void); + +/** + * wlan_reg_max_5ghz_chan_freq() - Get maximum 5GHz channel frequency + * + * Return: Maximum 5GHz channel frequency + */ +#define WLAN_REG_MAX_5GHZ_CHAN_FREQ wlan_reg_max_5ghz_chan_freq() +qdf_freq_t wlan_reg_max_5ghz_chan_freq(void); +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_is_24ghz_ch() - Check if the given channel number is 2.4GHz + * @chan: Channel number + * + * Return: true if channel number is 2.4GHz, else false + */ +#define WLAN_REG_IS_24GHZ_CH(chan) wlan_reg_is_24ghz_ch(chan) +bool wlan_reg_is_24ghz_ch(uint8_t chan); + +/** + * wlan_reg_is_5ghz_ch() - Check if the given channel number is 5GHz + * @chan: Channel number + * + * Return: true if channel number is 5GHz, else false + */ +#define WLAN_REG_IS_5GHZ_CH(chan) wlan_reg_is_5ghz_ch(chan) +bool wlan_reg_is_5ghz_ch(uint8_t chan); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_is_24ghz_ch_freq() - Check if the given channel frequency is 2.4GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 2.4GHz, else false + */ +#define WLAN_REG_IS_24GHZ_CH_FREQ(freq) wlan_reg_is_24ghz_ch_freq(freq) +bool wlan_reg_is_24ghz_ch_freq(qdf_freq_t freq); + +/** + * wlan_reg_is_5ghz_ch_freq() - Check if the given channel frequency is 5GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 5GHz, else false + */ +#define WLAN_REG_IS_5GHZ_CH_FREQ(freq) wlan_reg_is_5ghz_ch_freq(freq) +bool wlan_reg_is_5ghz_ch_freq(qdf_freq_t freq); + +/** + * wlan_reg_is_freq_indoor() - Check if a frequency is indoor. + * @pdev: Pointer to pdev. + * @freq: Channel frequency. + * + * Return: Return true if a frequency is indoor, else false. + */ +bool wlan_reg_is_freq_indoor(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq); + +#ifdef CONFIG_BAND_6GHZ +/** + * wlan_reg_is_6ghz_chan_freq() - Check if the given channel frequency is 6GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 6GHz, else false + */ +#define WLAN_REG_IS_6GHZ_CHAN_FREQ(freq) wlan_reg_is_6ghz_chan_freq(freq) +bool wlan_reg_is_6ghz_chan_freq(uint16_t freq); + +/** + * wlan_reg_is_6ghz_psc_chan_freq() - Check if the given 6GHz channel frequency + * is preferred scanning channel frequency. + * @freq: Channel frequency + * + * Return: true if given 6GHz channel frequency is preferred scanning channel + * frequency, else false + */ +#define WLAN_REG_IS_6GHZ_PSC_CHAN_FREQ(freq) \ + wlan_reg_is_6ghz_psc_chan_freq(freq) +bool wlan_reg_is_6ghz_psc_chan_freq(uint16_t freq); + +/** + * wlan_reg_min_6ghz_chan_freq() - Get minimum 6GHz channel center frequency + * + * Return: Minimum 6GHz channel center frequency + */ +#define WLAN_REG_MIN_6GHZ_CHAN_FREQ wlan_reg_min_6ghz_chan_freq() +uint16_t wlan_reg_min_6ghz_chan_freq(void); + +/** + * wlan_reg_max_6ghz_chan_freq() - Get maximum 6GHz channel center frequency + * + * Return: Maximum 6GHz channel center frequency + */ +#define WLAN_REG_MAX_6GHZ_CHAN_FREQ wlan_reg_max_6ghz_chan_freq() +uint16_t wlan_reg_max_6ghz_chan_freq(void); + +#else + +#define WLAN_REG_IS_6GHZ_CHAN_FREQ(freq) (false) +static inline bool wlan_reg_is_6ghz_chan_freq(uint16_t freq) +{ + return false; +} + +#define WLAN_REG_IS_6GHZ_PSC_CHAN_FREQ(freq) (false) +static inline bool wlan_reg_is_6ghz_psc_chan_freq(uint16_t freq) +{ + return false; +} + +#define WLAN_REG_MIN_6GHZ_CHAN_FREQ (false) +static inline uint16_t wlan_reg_min_6ghz_chan_freq(void) +{ + return 0; +} + +#define WLAN_REG_MAX_6GHZ_CHAN_FREQ (false) +static inline uint16_t wlan_reg_max_6ghz_chan_freq(void) +{ + return 0; +} +#endif /* CONFIG_BAND_6GHZ */ + +/** + * wlan_reg_get_band_channel_list() - Get channel list based on the band_mask + * @pdev: pdev ptr + * @band_mask: Input bitmap with band set + * @channel_list: Pointer to Channel List + * + * Get the given channel list and number of channels from the current channel + * list based on input band bitmap. + * + * Return: Number of channels, else 0 to indicate error + */ +uint16_t +wlan_reg_get_band_channel_list(struct wlan_objmgr_pdev *pdev, + uint8_t band_mask, + struct regulatory_channel *channel_list); + +/** + * wlan_reg_chan_band_to_freq - Return channel frequency based on the channel + * number and band. + * @pdev: pdev ptr + * @chan: Channel Number + * @band_mask: Bitmap for bands + * + * Return: Return channel frequency or return 0, if the channel is disabled or + * if the input channel number or band_mask is invalid. Composite bands are + * supported only for 2.4Ghz and 5Ghz bands. For other bands the following + * priority is given: 1) 6Ghz 2) 5Ghz 3) 2.4Ghz. + */ +qdf_freq_t wlan_reg_chan_band_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan, + uint8_t band_mask); + +/** + * wlan_reg_is_49ghz_freq() - Check if the given channel frequency is 4.9GHz + * @freq: Channel frequency + * + * Return: true if channel frequency is 4.9GHz, else false + */ +#define WLAN_REG_IS_49GHZ_FREQ(freq) wlan_reg_is_49ghz_freq(freq) +bool wlan_reg_is_49ghz_freq(qdf_freq_t freq); + +/** + * wlan_reg_ch_num() - Get channel number from channel enum + * @ch_enum: Channel enum + * + * Return: channel number + */ +#define WLAN_REG_CH_NUM(ch_enum) wlan_reg_ch_num(ch_enum) +uint8_t wlan_reg_ch_num(uint32_t ch_enum); + +/** + * wlan_reg_ch_to_freq() - Get channel frequency from channel enum + * @ch_enum: Channel enum + * + * Return: channel frequency + */ +#define WLAN_REG_CH_TO_FREQ(ch_enum) wlan_reg_ch_to_freq(ch_enum) +qdf_freq_t wlan_reg_ch_to_freq(uint32_t ch_enum); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_is_same_band_channels() - Check if given channel numbers have same + * band + * @chan_num1: Channel number1 + * @chan_num2: Channel number2 + * + * Return: true if both the channels has the same band. + */ +#define WLAN_REG_IS_SAME_BAND_CHANNELS(chan_num1, chan_num2) \ + wlan_reg_is_same_band_channels(chan_num1, chan_num2) +bool wlan_reg_is_same_band_channels(uint8_t chan_num1, uint8_t chan_num2); + +/** + * wlan_reg_is_channel_valid_5g_sbs() Check if the given channel is 5G SBS. + * @curchan: current channel + * @newchan:new channel + * + * Return: true if the given channel is a valid 5G SBS + */ +#define WLAN_REG_IS_CHANNEL_VALID_5G_SBS(curchan, newchan) \ + wlan_reg_is_channel_valid_5g_sbs(curchan, newchan) +bool wlan_reg_is_channel_valid_5g_sbs(uint8_t curchan, uint8_t newchan); +#endif /* CONFIG_CHAN_NUM_API */ + + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_chan_to_band() - Get band from channel number + * @chan_num: channel number + * + * Return: band info + */ +#define WLAN_REG_CHAN_TO_BAND(chan_num) wlan_reg_chan_to_band(chan_num) +enum band_info wlan_reg_chan_to_band(uint8_t chan_num); + +/** + * wlan_reg_get_channel_list_with_power() - Provide the channel list with power + * @ch_list: pointer to the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan); +#endif /* CONFIG_CHAN_NUM_API */ +/** + * wlan_reg_read_default_country() - Read the default country for the regdomain + * @country: pointer to the country code. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * wlan_reg_get_fcc_constraint() - Check FCC constraint on given frequency + * @pdev: physical dev to get + * @freq: frequency to be checked + * + * Return: If FCC constraint is on applied given frequency return true + * else return false. + */ +bool wlan_reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq); + +#ifdef CONFIG_REG_CLIENT +/** + * wlan_reg_read_current_country() - Read the current country for the regdomain + * @country: pointer to the country code. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_chan_has_dfs_attribute() - check channel has dfs attribute flag + * @ch: channel number. + * + * This API get chan initial dfs attribute from regdomain + * + * Return: true if chan is dfs, otherwise false + */ +bool +wlan_reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint8_t ch); + +/** + * wlan_reg_is_etsi13_srd_chan () - Checks if the ch is ETSI13 srd ch or not + * @pdev: pdev ptr + * @chan_num: channel + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +/** + * wlan_reg_is_etsi13_srd_chan_for_freq () - Checks if the ch is ETSI13 srd ch + * or not + * @pdev: pdev ptr + * @freq: channel center frequency + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_srd_chan_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); +#endif /*CONFIG_CHAN_FREQ_API*/ + +/** + * wlan_reg_is_etsi13_regdmn() - Checks if current reg domain is ETSI13 or not + * @pdev: pdev ptr + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_reg_is_etsi13_srd_chan_allowed_master_mode() - Checks if regdmn is + * ETSI13 and SRD channels are allowed in master mode or not. + * + * @pdev: pdev ptr + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev + *pdev); +#endif + +/** + * wlan_reg_is_world() - reg is world mode + * @country: The country information + * + * Return: true or false + */ +bool wlan_reg_is_world(uint8_t *country); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_get_chan_enum() - Get channel enum for given channel number + * @chan_num: Channel number + * + * Return: Channel enum + */ +enum channel_enum wlan_reg_get_chan_enum(uint8_t chan_num); + +/** + * wlan_reg_get_channel_state() - Get channel state from regulatory + * @ch: channel number. + * + * Return: channel state + */ +enum channel_state wlan_reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint8_t ch); + +/** + * wlan_reg_get_5g_bonded_channel_state() - Get 5G bonded channel state + * @pdev: The physical dev to program country code or regdomain + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw); + +/** + * wlan_reg_get_2g_bonded_channel_state() - Get 2G bonded channel state + * @pdev: The physical dev to program country code or regdomain + * @ch: channel number. + * @sec_ch: Secondary channel. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch, enum phy_ch_width bw); + +/** + * wlan_reg_set_channel_params () - Sets channel parameteres for given bandwidth + * @pdev: The physical dev to program country code or regdomain + * @ch: channel number. + * @sec_ch_2g: Secondary channel. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void wlan_reg_set_channel_params(struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch_2g, + struct ch_params *ch_params); +#endif /* CONFIG_CHAN_NUM_API */ +/** + * wlan_reg_get_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: Status + */ +QDF_STATUS wlan_reg_get_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_get_channel_reg_power() - Provide the channel regulatory power + * @chan_num: channel number + * + * Return: int + */ +uint32_t wlan_reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); + +/** + * wlan_reg_get_channel_freq() - provide the channel center freq + * @chan_num: channel number + * + * Return: int + */ +qdf_freq_t wlan_reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_get_current_chan_list() - provide the pdev current channel list + * @pdev: pdev pointer + * @chan_list: channel list pointer + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_get_bonded_channel_state() - get bonded channel state + * @pdev: pdev ptr + * @ch: chennal number + * @bw: chennal number + * @sec_ch: secondary channel + * + * Return: enum channel_state + */ +enum channel_state wlan_reg_get_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw, uint8_t sec_ch); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_set_dfs_region() - set the dfs region + * @pdev: pdev ptr + * @dfs_reg: dfs region + * + * Return: void + */ +void wlan_reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg); + +/** + * wlan_reg_get_bw_value() - provide the channel center freq + * @chan_num: chennal number + * + * Return: int + */ +uint16_t wlan_reg_get_bw_value(enum phy_ch_width bw); + +/** + * wlan_reg_get_domain_from_country_code() - provide the channel center freq + * @reg_domain_ptr: regulatory domain ptr + * @country_alpha2: country alpha2 + * @source: alpha2 source + * + * Return: int + */ +QDF_STATUS wlan_reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, + enum country_src source); + +/** + * wlan_reg_dmn_get_opclass_from_channel() - provide the channel center freq + * @country: country alpha2 + * @channel: channel number + * @offset: offset + * + * Return: int + */ +uint16_t wlan_reg_dmn_get_opclass_from_channel(uint8_t *country, + uint8_t channel, + uint8_t offset); + +/** + * wlan_reg_get_opclass_from_freq_width() - Get operating class from frequency + * @country: Country code. + * @freq: Channel center frequency. + * @ch_width: Channel width. + * @behav_limit: Behaviour limit. + * + * Return: Error code. + */ +uint8_t wlan_reg_get_opclass_from_freq_width(uint8_t *country, + qdf_freq_t freq, + uint8_t ch_width, + uint16_t behav_limit); + +/** + * wlan_reg_dmn_print_channels_in_opclass() - Print channels in op-class + * @country: country alpha2 + * @opclass: oplcass + * + * Return: void + */ +void wlan_reg_dmn_print_channels_in_opclass(uint8_t *country, + uint8_t opclass); + + +/** + * wlan_reg_dmn_get_chanwidth_from_opclass() - get channel width from + * operating class + * @country: country alpha2 + * @channel: channel number + * @opclass: operating class + * + * Return: int + */ +uint16_t wlan_reg_dmn_get_chanwidth_from_opclass(uint8_t *country, + uint8_t channel, + uint8_t opclass); +/** + * wlan_reg_dmn_set_curr_opclasses() - set operating class + * @num_classes: number of classes + * @class: operating class + * + * Return: int + */ +uint16_t wlan_reg_dmn_set_curr_opclasses(uint8_t num_classes, + uint8_t *class); + +/** + * wlan_reg_dmn_get_curr_opclasses() - get current oper classes + * @num_classes: number of classes + * @class: operating class + * + * Return: int + */ +uint16_t wlan_reg_dmn_get_curr_opclasses(uint8_t *num_classes, + uint8_t *class); + + +/** + * wlan_reg_get_opclass_details() - Get details about the current opclass table. + * @pdev: Pointer to pdev. + * @reg_ap_cap: Pointer to reg_ap_cap. + * @n_opclasses: Pointer to number of opclasses. + * @max_supp_op_class: Maximum number of operating classes supported. + * @global_tbl_lookup: Whether to lookup global op class tbl. + * + * Return: QDF_STATUS_SUCCESS if success, else return QDF_STATUS_FAILURE. + */ +QDF_STATUS +wlan_reg_get_opclass_details(struct wlan_objmgr_pdev *pdev, + struct regdmn_ap_cap_opclass_t *reg_ap_cap, + uint8_t *n_opclasses, + uint8_t max_supp_op_class, + bool global_tbl_lookup); + +/** + * wlan_regulatory_init() - init regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_init(void); + +/** + * wlan_regulatory_deinit() - deinit regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_deinit(void); + +/** + * regulatory_psoc_open() - open regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_psoc_open(struct wlan_objmgr_psoc *psoc); + + +/** + * regulatory_psoc_close() - close regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * regulatory_pdev_open() - Open regulatory component + * @pdev: Pointer to pdev structure + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * regulatory_pdev_close() - Close regulatory component + * @pdev: Pointer to pdev structure. + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_pdev_close(struct wlan_objmgr_pdev *pdev); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_update_nol_ch () - set nol channel + * @pdev: pdev ptr + * @ch_list: channel list to be returned + * @num_ch: number of channels + * @nol_ch: nol flag + * + * Return: void + */ +void wlan_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_ch); + +/** + * wlan_reg_is_dsrc_chan () - Checks if the channel is dsrc channel or not + * @pdev: pdev ptr + * @chan_num: channel + * + * Return: true or false + */ +bool wlan_reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan_num); + +/** + * wlan_reg_is_passive_or_disable_ch () - Checks chan state for passive + * and disabled + * @pdev: pdev ptr + * @chan: channel + * + * Return: true or false + */ +bool wlan_reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint8_t chan); + +/** + * wlan_reg_is_disable_ch () - Checks chan state for disabled + * @pdev: pdev ptr + * @chan: channel + * + * Return: true or false + */ +bool wlan_reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint8_t chan); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_freq_to_chan () - convert channel freq to channel number + * @pdev: The physical dev to set current country for + * @freq: frequency + * + * Return: true or false + */ +uint8_t wlan_reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * wlan_reg_chan_to_freq () - convert channel number to frequency + * @chan: channel number + * + * Return: true or false + */ +qdf_freq_t wlan_reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan); + +/** + * wlan_reg_legacy_chan_to_freq () - convert chan to freq, for 2G and 5G + * @chan: channel number + * + * Return: frequency + */ +qdf_freq_t wlan_reg_legacy_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan); + +/** + * wlan_reg_is_us() - reg is us country + * @country: The country information + * + * Return: true or false + */ +bool wlan_reg_is_us(uint8_t *country); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_chan_is_49ghz() - Check if the input channel number is 4.9GHz + * @pdev: Pdev pointer + * @chan_num: Input channel number + * + * Return: true if the channel is 4.9GHz else false. + */ +bool wlan_reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_set_country() - Set the current regulatory country + * @pdev: The physical dev to set current country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country); + +/** + * wlan_reg_set_11d_country() - Set the 11d regulatory country + * @pdev: The physical dev to set current country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country); + +/** + * wlan_reg_register_chan_change_callback () - add chan change cbk + * @psoc: psoc ptr + * @cbk: callback + * @arg: argument + * + * Return: true or false + */ +void wlan_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk, void *arg); + +/** + * wlan_reg_unregister_chan_change_callback () - remove chan change cbk + * @psoc: psoc ptr + * @cbk:callback + * + * Return: true or false + */ +void wlan_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk); + +/** + * wlan_reg_is_11d_offloaded() - 11d offloaded supported + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_is_11d_offloaded(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_reg_11d_enabled_on_host() - 11d enabled don host + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_reg_get_chip_mode() - get supported chip mode + * @pdev: pdev pointer + * @chip_mode: chip mode + * + * Return: QDF STATUS + */ +QDF_STATUS wlan_reg_get_chip_mode(struct wlan_objmgr_pdev *pdev, + uint32_t *chip_mode); + +/** + * wlan_reg_is_11d_scan_inprogress() - checks 11d scan status + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc); +/** + * wlan_reg_get_freq_range() - Get 2GHz and 5GHz frequency range + * @pdev: pdev pointer + * @low_2g: low 2GHz frequency range + * @high_2g: high 2GHz frequency range + * @low_5g: low 5GHz frequency range + * @high_5g: high 5GHz frequency range + * + * Return: QDF status + */ +QDF_STATUS wlan_reg_get_freq_range(struct wlan_objmgr_pdev *pdev, + qdf_freq_t *low_2g, + qdf_freq_t *high_2g, + qdf_freq_t *low_5g, + qdf_freq_t *high_5g); +/** + * wlan_reg_get_tx_ops () - get regulatory tx ops + * @psoc: psoc ptr + * + */ +struct wlan_lmac_if_reg_tx_ops * +wlan_reg_get_tx_ops(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_reg_get_curr_regdomain() - Get current regdomain in use + * @pdev: pdev pointer + * @cur_regdmn: Current regdomain info + * + * Return: QDF status + */ +QDF_STATUS wlan_reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_update_nol_history_ch() - Set nol-history flag for the channels in + * the list. + * + * @pdev: Pdev ptr + * @ch_list: Input channel list. + * @num_ch: Number of channels. + * @nol_history_ch: Nol history value. + * + * Return: void + */ +void wlan_reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_history_ch); +#endif /* CONFIG_CHAN_NUM_API */ +/** + * wlan_reg_is_regdmn_en302502_applicable() - Find if ETSI EN302_502 radar + * pattern is applicable in the current regulatory domain. + * @pdev: Pdev ptr. + * + * Return: Boolean. + * True: If EN302_502 is applicable. + * False: otherwise. + */ +bool wlan_reg_is_regdmn_en302502_applicable(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_reg_modify_pdev_chan_range() - Compute current channel list for the + * modified channel range in the regcap. + * @pdev: pointer to wlan_objmgr_pdev. + * + * Return : QDF_STATUS + */ +QDF_STATUS wlan_reg_modify_pdev_chan_range(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_reg_disable_chan_coex() - Disable Coexisting channels based on the input + * bitmask + * @pdev: pointer to wlan_objmgr_pdev. + * unii_5g_bitmap: UNII 5G bitmap. + * + * Return : QDF_STATUS + */ +#ifdef DISABLE_UNII_SHARED_BANDS +QDF_STATUS wlan_reg_disable_chan_coex(struct wlan_objmgr_pdev *pdev, + uint8_t unii_5g_bitmap); +#else +static inline QDF_STATUS +wlan_reg_disable_chan_coex(struct wlan_objmgr_pdev *pdev, + uint8_t unii_5g_bitmap) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +/** + * wlan_reg_is_same_band_freqs() - Check if two channel frequencies + * have same band + * @freq1: Frequency 1 + * @freq2: Frequency 2 + * + * Return: true if both the channel frequency has the same band. + */ +#define WLAN_REG_IS_SAME_BAND_FREQS(freq1, freq2) \ + wlan_reg_is_same_band_freqs(freq1, freq2) +bool wlan_reg_is_same_band_freqs(qdf_freq_t freq1, qdf_freq_t freq2); + +/** + * wlan_reg_get_chan_enum_for_freq() - Get channel enum for given channel center + * frequency + * @freq: Channel center frequency + * + * Return: Channel enum + */ +enum channel_enum wlan_reg_get_chan_enum_for_freq(qdf_freq_t freq); + +/** + * wlan_reg_update_nol_history_ch_for_freq() - Set nol-history flag for the + * channels in the list. + * + * @pdev: Pdev ptr + * @ch_list: Input channel list. + * @num_ch: Number of channels. + * @nol_history_ch: Nol history value. + * + * Return: void + */ +void wlan_reg_update_nol_history_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *ch_list, + uint8_t num_ch, + bool nol_history_ch); +/** + * wlan_reg_is_frequency_valid_5g_sbs() Check if the given frequency is 5G SBS. + * @curfreq: current channel center frequency + * @newfreq:new channel center frequency + * + * Return: true if the given frequency is a valid 5G SBS + */ +#define WLAN_REG_IS_FREQUENCY_VALID_5G_SBS(curfreq, newfreq) \ + wlan_reg_is_frequency_valid_5g_sbs(curfreq, newfreq) +bool wlan_reg_is_frequency_valid_5g_sbs(qdf_freq_t curfreq, qdf_freq_t newfreq); + +/** + * wlan_reg_chan_has_dfs_attribute_for_freq() - check channel has dfs + * attribute flag + * @freq: channel center frequency. + * + * This API get chan initial dfs attribute from regdomain + * + * Return: true if chan is dfs, otherwise false + */ +bool +wlan_reg_chan_has_dfs_attribute_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * wlan_reg_get_channel_list_with_power_for_freq() - Provide the channel list + * with power + * @ch_list: pointer to the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_reg_get_channel_list_with_power_for_freq(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan); + +/** + * wlan_reg_get_5g_bonded_channel_state_for_freq() - Get 5G bonded channel state + * @pdev: The physical dev to program country code or regdomain + * @freq: channel frequency. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state +wlan_reg_get_5g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + enum phy_ch_width bw); +/** + * wlan_reg_get_2g_bonded_channel_state_for_freq() - Get 2G bonded channel state + * @pdev: The physical dev to program country code or regdomain + * @freq: channel center frequency. + * @sec_ch_freq: Secondary channel center frequency. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state +wlan_reg_get_2g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + qdf_freq_t sec_ch_freq, + enum phy_ch_width bw); + +/** + * wlan_reg_get_channel_state_for_freq() - Get channel state from regulatory + * @pdev: Pointer to pdev + * @freq: channel center frequency. + * + * Return: channel state + */ +enum channel_state +wlan_reg_get_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * wlan_reg_set_channel_params_for_freq() - Sets channel parameteres for + * given bandwidth + * @pdev: The physical dev to program country code or regdomain + * @freq: channel center frequency. + * @sec_ch_2g_freq: Secondary channel center frequency. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void wlan_reg_set_channel_params_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + qdf_freq_t sec_ch_2g_freq, + struct ch_params *ch_params); + +/** + * wlan_reg_get_channel_cfreq_reg_power_for_freq() - Provide the channel + * regulatory power + * @freq: channel center frequency + * + * Return: int + */ +uint8_t wlan_reg_get_channel_reg_power_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * wlan_reg_update_nol_ch_for_freq () - set nol channel + * @pdev: pdev ptr + * @chan_freq_list: channel list to be returned + * @num_ch: number of channels + * @nol_ch: nol flag + * + * Return: void + */ +void wlan_reg_update_nol_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_freq_list, + uint8_t num_ch, + bool nol_ch); + +/** + * wlan_reg_is_dfs_freq() - Checks the channel state for DFS + * @freq: Channel center frequency + * + * Return: true or false + */ +bool wlan_reg_is_dfs_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq); + +/** + * wlan_reg_is_dsrc_freq() - Checks if the channel is dsrc channel or not + * @freq: Channel center frequency + * + * Return: true or false + */ +bool wlan_reg_is_dsrc_freq(qdf_freq_t freq); + +/** + * wlan_reg_is_passive_or_disable_for_freq() - Checks chan state for passive + * and disabled + * @pdev: pdev ptr + * @freq: Channel center frequency + * + * Return: true or false + */ +bool wlan_reg_is_passive_or_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * wlan_reg_is_disable_for_freq() - Checks chan state for disabled + * @pdev: pdev ptr + * @freq: Channel center frequency + * + * Return: true or false + */ +bool wlan_reg_is_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq); + +/** + * wlan_reg_chan_to_band() - Get band from channel number + * @chan_num: channel number + * + * Return: wifi band + */ +enum reg_wifi_band wlan_reg_freq_to_band(qdf_freq_t freq); + +/** + * wlan_reg_min_chan_freq() - Minimum channel frequency supported + * + * Return: frequency + */ +qdf_freq_t wlan_reg_min_chan_freq(void); + +/** + * wlan_reg_max_chan_freq() - Return max. frequency + * + * Return: frequency + */ +qdf_freq_t wlan_reg_max_chan_freq(void); + +/** + * wlan_reg_freq_width_to_chan_op_class() -Get op class from freq + * @pdev: pdev ptr + * @freq: channel frequency + * @chan_width: channel width + * @global_tbl_lookup: whether to look up global table + * @behav_limit: behavior limit + * @op_class: operating class + * @chan_num: channel number + * + * Return: void + */ +void wlan_reg_freq_width_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num); + +/** + * wlan_reg_freq_width_to_chan_op_class_auto() - convert frequency to + * operating class,channel + * @pdev: pdev pointer + * @freq: channel frequency in mhz + * @chan_width: channel width + * @global_tbl_lookup: whether to lookup global op class tbl + * @behav_limit: behavior limit + * @op_class: operating class + * @chan_num: channel number + * + * Return: Void. + */ +void wlan_reg_freq_width_to_chan_op_class_auto(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num); + +/** + * wlan_reg_freq_to_chan_and_op_class() - Converts freq to oper class + * @pdev: pdev ptr + * @freq: channel frequency + * @global_tbl_lookup: whether to look up global table + * @behav_limit: behavior limit + * @op_class: operating class + * @chan_num: channel number + * + * Return: void + */ +void wlan_reg_freq_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num); + +/** + * wlan_reg_country_opclass_freq_check() - checks frequency in (ctry, op class) + * pair + * @pdev: pdev ptr + * @country: country information + * @op_class: operating class + * @chan_freq: channel frequency + * + * Return: bool + */ +bool wlan_reg_country_opclass_freq_check(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t op_class, + qdf_freq_t chan_freq); + +/** + * wlan_reg_get_5g_bonded_channel_and_state_for_freq()- Return the channel + * state for a 5G or 6G channel frequency based on the channel width and + * bonded channel. + * @pdev: Pointer to pdev. + * @freq: Channel center frequency. + * @bw Channel Width. + * @bonded_chan_ptr_ptr: Pointer to bonded_channel_freq. + * + * Return: Channel State + */ +enum channel_state +wlan_reg_get_5g_bonded_channel_and_state_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + enum phy_ch_width bw, + const + struct bonded_channel_freq + **bonded_chan_ptr_ptr); +#endif /*CONFIG_CHAN_FREQ_API */ + +/** + * wlan_reg_get_op_class_width() - Get operating class chan width + * @pdev: pdev ptr + * @freq: channel frequency + * @global_tbl_lookup: whether to look up global table + * @op_class: operating class + * @chan_num: channel number + * + * Return: channel width of op class + */ +uint16_t wlan_reg_get_op_class_width(struct wlan_objmgr_pdev *pdev, + uint8_t op_class, + bool global_tbl_lookup); + +/** + * wlan_reg_is_6ghz_op_class() - Whether 6ghz oper class + * @pdev: pdev ptr + * @op_class: operating class + * + * Return: bool + */ +bool wlan_reg_is_6ghz_op_class(struct wlan_objmgr_pdev *pdev, + uint8_t op_class); + +/** + * wlan_reg_is_6ghz_supported() - Whether 6ghz is supported + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_is_6ghz_supported(struct wlan_objmgr_psoc *psoc); + +#ifdef HOST_OPCLASS_EXT +/** + * wlan_reg_country_chan_opclass_to_freq() - Convert channel number to + * frequency based on country code and op class + * @pdev: pdev object. + * @country: country code. + * @chan: IEEE Channel Number. + * @op_class: Opclass. + * @strict: flag to find channel from matched operating class code. + * + * Look up (channel, operating class) pair in country operating class tables + * and return the channel frequency. + * If not found and "strict" flag is false, try to get frequency (Mhz) by + * channel number only. + * + * Return: Channel center frequency else return 0. + */ +qdf_freq_t +wlan_reg_country_chan_opclass_to_freq(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t chan, uint8_t op_class, + bool strict); +#endif + +/** + * reg_chan_opclass_to_freq() - Convert channel number and opclass to frequency + * @chan: IEEE Channel Number. + * @op_class: Opclass. + * @global_tbl_lookup: Global table lookup. + * + * Return: Channel center frequency else return 0. + */ +uint16_t wlan_reg_chan_opclass_to_freq(uint8_t chan, + uint8_t op_class, + bool global_tbl_lookup); +#ifdef CONFIG_REG_CLIENT +/** + * wlan_reg_band_bitmap_to_band_info() - Convert the band_bitmap to a + * band_info enum + * @band_bitmap: bitmap on top of reg_wifi_band of bands enabled + * + * Return: BAND_ALL if both 2G and 5G band is enabled + * BAND_2G if 2G is enabled but 5G isn't + * BAND_5G if 5G is enabled but 2G isn't + */ +enum band_info wlan_reg_band_bitmap_to_band_info(uint32_t band_bitmap); +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1014c325e75bf146ab804312b0dfff22e39b88ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_tgt_api.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_reg_tgt_api.h + * This file provides prototypes of the regulatory component target + * interface routines + */ + +#ifndef __WLAN_REG_TGT_API_H +#define __WLAN_REG_TGT_API_H + +QDF_STATUS tgt_reg_process_master_chan_list(struct cur_regulatory_info + *reg_info); + +/** + * tgt_reg_process_11d_new_country() - process new 11d country event + * @psoc: pointer to psoc + * @reg_11d_new_cc: new 11d country pointer + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_reg_process_11d_new_country(struct wlan_objmgr_psoc *psoc, + struct reg_11d_new_country *reg_11d_new_cc); + +/** + * tgt_reg_set_regdb_offloaded() - set/clear regulatory offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS tgt_reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, + bool val); + +/** + * tgt_reg_set_11d_offloaded() - set/clear 11d offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS tgt_reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val); +/** + * tgt_reg_process_ch_avoid_event() - process new ch avoid event + * @psoc: pointer to psoc + * @ch_avoid_evnt: channel avoid event + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_evnt); + +/** + * tgt_reg_ignore_fw_reg_offload_ind() - Check whether regdb offload indication + * from FW needs to be ignored. + * @psoc: Pointer to psoc + */ +bool tgt_reg_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_reg_set_6ghz_supported() - Whether 6ghz is supported by the chip + * @psoc: Pointer to psoc + * @val: value + */ +QDF_STATUS tgt_reg_set_6ghz_supported(struct wlan_objmgr_psoc *psoc, + bool val); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..657bce376bd664df5e319783693abf558501a634 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_reg_ucfg_api.h + * This file provides prototypes of the regulatory component user + * config interface routines + */ + +#ifndef __WLAN_REG_UCFG_API_H +#define __WLAN_REG_UCFG_API_H + +typedef QDF_STATUS (*reg_event_cb)(void *status_struct); + +/** + * ucfg_reg_set_band() - Sets the band information for the PDEV + * @pdev: The physical pdev to set the band for + * @band_bitmap: The band bitmap parameter (over reg_wifi_band) to configure + * for the physical device + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_band(struct wlan_objmgr_pdev *pdev, + uint32_t band_bitmap); + +/** + * ucfg_reg_get_band() - Gets the band information for the PDEV + * @pdev: The physical pdev to get the band for + * @band_bitmap: The band parameter of the physical device + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_band(struct wlan_objmgr_pdev *pdev, + uint32_t *band_bitmap); + +/** + * ucfg_reg_notify_sap_event() - Notify regulatory domain for sap event + * @pdev: The physical dev to set the band for + * @sap_state: true for sap start else false + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state); + +/** + * ucfg_reg_cache_channel_state() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + * Return: QDF_STATUS + */ +#if defined(DISABLE_CHANNEL_LIST) && defined(CONFIG_CHAN_NUM_API) +void ucfg_reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels); +#else +static inline +void ucfg_reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ +} +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * ucfg_reg_cache_channel_freq_state() - Cache the current state of the + * channels based on the channel center frequency. + * @pdev: Pointer to pdev. + * @channel_list: List of the channels for which states need to be cached. + * @num_channels: Number of channels in the list. + * + * Return: QDF_STATUS + */ +#if defined(DISABLE_CHANNEL_LIST) && defined(CONFIG_CHAN_FREQ_API) +void ucfg_reg_cache_channel_freq_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels); +#else +static inline +void ucfg_reg_cache_channel_freq_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ +} +#endif /* CONFIG_CHAN_FREQ_API */ + + +#ifdef DISABLE_CHANNEL_LIST +/** + * ucfg_reg_disable_cached_channels() - Disable cached channels + * @pdev: The physical dev to cache the channels for + * + * Return: None + */ +void ucfg_reg_disable_cached_channels(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_reg_restore_cached_channels() - Restore disabled cached channels + * @pdev: The physical dev to cache the channels for + * + * Return: None + */ +void ucfg_reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev); +#else +static inline +void ucfg_reg_disable_cached_channels(struct wlan_objmgr_pdev *pdev) +{ +} +static inline +void ucfg_reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +/** + * ucfg_reg_set_fcc_constraint() - apply fcc constraints on channels 12/13 + * @pdev: The physical pdev to reduce tx power for + * + * This function adjusts the transmit power on channels 12 and 13, to comply + * with FCC regulations in the USA. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint); + +/** + * ucfg_reg_get_default_country() - Get the default regulatory country + * @psoc: The physical SoC to get default country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * ucfg_reg_get_current_country() - Get the current regulatory country + * @psoc: The physical SoC to get current country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); +/** + * ucfg_reg_set_default_country() - Set the default regulatory country + * @psoc: The physical SoC to set default country for + * @country_code: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * ucfg_reg_set_country() - Set the current regulatory country + * @pdev: The physical dev to set current country for + * @country_code: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_country(struct wlan_objmgr_pdev *dev, + uint8_t *country_code); + +/** + * ucfg_reg_reset_country() - Reset the regulatory country to default + * @psoc: The physical SoC to reset country for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_reset_country(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_enable_dfs_channels() - Enable the use of DFS channels + * @pdev: The physical dev to enable DFS channels for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, + bool dfs_enable); + +QDF_STATUS ucfg_reg_register_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg); +QDF_STATUS ucfg_reg_unregister_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg); +QDF_STATUS ucfg_reg_init_handler(uint8_t pdev_id); + +QDF_STATUS ucfg_reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn); + +/** + * ucfg_reg_program_cc() - Program user country code or regdomain + * @pdev: The physical dev to program country code or regdomain + * @rd: User country code or regdomain + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_program_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +/** + * ucfg_reg_get_current_cc() - get current country code or regdomain + * @pdev: The physical dev to program country code or regdomain + * @rd: Pointer to country code or regdomain + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +/** + * ucfg_reg_set_config_vars () - Set the config vars in reg component + * @psoc: psoc ptr + * @config_vars: config variables structure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars); + +/** + * ucfg_reg_get_current_chan_list () - get current channel list + * @pdev: pdev ptr + * @chan_list: channel list + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list); + +/** + * ucfg_reg_modify_chan_144() - Enable/Disable channel 144 + * @pdev: pdev pointer + * @enable_chan_144: flag to disable/enable channel 144 + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144); + +/** + * ucfg_reg_get_en_chan_144() - get en_chan_144 flag value + * @pdev: pdev pointer + * + * Return: en_chan_144 flag value + */ +bool ucfg_reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_reg_is_regdb_offloaded () - is regulatory database offloaded + * @psoc: psoc ptr + * + * Return: bool + */ +bool ucfg_reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_program_mas_chan_list () - program master channel list + * @psoc: psoc ptr + * @reg_channels: regulatory channels + * @alpha2: country code + * @dfs_region: dfs region + * + * Return: void + */ +void ucfg_reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region); + +/** + * ucfg_reg_get_regd_rules() - provides the reg domain rules info pointer + * @pdev: pdev ptr + * @reg_rules: regulatory rules + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules); + +/** + * ucfg_reg_register_chan_change_callback () - add chan change cbk + * @psoc: psoc ptr + * @cbk: callback + * @arg: argument + * + * Return: void + */ +void ucfg_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk, void *arg); + +/** + * ucfg_reg_unregister_chan_change_callback () - remove chan change cbk + * @psoc: psoc ptr + * @cbk: callback + * + * Return: void + */ +void ucfg_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk); + +/** + * ucfg_reg_get_cc_and_src () - get country code and src + * @psoc: psoc ptr + * @alpha2: country code alpha2 + * + * Return: void + */ +enum country_src ucfg_reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2); + +/** + * ucfg_reg_unit_simulate_ch_avoid () - fake a ch avoid event + * @psoc: psoc ptr + * @ch_avoid: ch_avoid_ind_type ranges + * + * This function inject a ch_avoid event for unit test sap chan switch. + * + * Return: void + */ +void ucfg_reg_unit_simulate_ch_avoid(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid); + +/** + * ucfg_reg_11d_vdev_delete_update() - update vdev delete to regulatory + * @vdev: vdev ptr + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_reg_11d_vdev_created_update() - update vdev create to regulatory + * @vdev: vdev ptr + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_reg_get_hal_reg_cap() - return hal reg cap + * @psoc: psoc ptr + * + * Return: ptr to wlan_psoc_host_hal_reg_capabilities_ext + */ +struct wlan_psoc_host_hal_reg_capabilities_ext *ucfg_reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_set_hal_reg_cap() - update hal reg cap + * @psoc: psoc ptr + * @reg_cap: Regulatory cap array + * @phy_cnt: Number of phy + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_hal_reg_cap(struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap, + uint16_t phy_cnt); + +/** + * ucfg_set_ignore_fw_reg_offload_ind() - API to set ignore regdb offload ind + * @psoc: psoc ptr + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_set_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_get_unii_5g_bitmap() - get unii_5g_bitmap value + * @pdev: pdev pointer + * @bitmap: Pointer to retrieve unii_5g_bitmap of enum reg_unii_band. + * + * Return: QDF_STATUS + */ +#ifdef DISABLE_UNII_SHARED_BANDS +QDF_STATUS +ucfg_reg_get_unii_5g_bitmap(struct wlan_objmgr_pdev *pdev, uint8_t *bitmap); +#else +static inline QDF_STATUS +ucfg_reg_get_unii_5g_bitmap(struct wlan_objmgr_pdev *pdev, uint8_t *bitmap) +{ + *bitmap = 0; + return QDF_STATUS_SUCCESS; +} +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_services_api.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_services_api.c new file mode 100644 index 0000000000000000000000000000000000000000..baa186a724ae204d8bca7b9f461ea5c978546af4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_services_api.c @@ -0,0 +1,1125 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file wlan_reg_services_api.c + * @brief contains regulatory service functions + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../core/src/reg_priv_objs.h" +#include "../../core/src/reg_utils.h" +#include "../../core/src/reg_services_common.h" +#include "../../core/src/reg_db.h" +#include "../../core/src/reg_db_parser.h" +#include <../../core/src/reg_build_chan_list.h> +#include <../../core/src/reg_opclass.h> +#include <../../core/src/reg_callbacks.h> +#include <../../core/src/reg_offload_11d_scan.h> +#include + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_get_channel_list_with_power() - Provide the channel list with power + * @ch_list: pointer to the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan) +{ + /* + * Update the channel list with channel information with power. + */ + return reg_get_channel_list_with_power(pdev, ch_list, num_chan); +} +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_read_default_country() - Read the default country for the regdomain + * @country: pointer to the country code. + * + * Return: None + */ +QDF_STATUS wlan_reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + /* + * Get the default country information + */ + return reg_read_default_country(psoc, country); +} + +QDF_STATUS wlan_reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + /* + * Get the current country information + */ + return reg_read_current_country(psoc, country); +} + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_get_channel_state() - Get channel state from regulatory + * @ch: channel number. + * + * Return: channel state + */ +enum channel_state wlan_reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint8_t ch) +{ + /* + * Get channel state from regulatory + */ + return reg_get_channel_state(pdev, ch); +} + +bool +wlan_reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint8_t ch) +{ + return reg_chan_has_dfs_attribute(pdev, ch); +} + +/** + * wlan_reg_get_5g_bonded_channel_state() - Get 5G bonded channel state + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw) +{ + /* + * Get channel state from regulatory + */ + return reg_get_5g_bonded_channel_state(pdev, ch, bw); +} + +/** + * wlan_reg_get_2g_bonded_channel_state() - Get 2G bonded channel state + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch, enum phy_ch_width bw) +{ + /* + * Get channel state from regulatory + */ + return reg_get_2g_bonded_channel_state(pdev, ch, sec_ch, bw); +} + +/** + * wlan_reg_set_channel_params() - Sets channel parameteres for given bandwidth + * @ch: channel number. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void wlan_reg_set_channel_params(struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch_2g, + struct ch_params *ch_params) +{ + /* + * Set channel parameters like center frequency for a bonded channel + * state. Also return the maximum bandwidth supported by the channel. + */ + reg_set_channel_params(pdev, ch, sec_ch_2g, ch_params); +} +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_get_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: Status + */ +QDF_STATUS wlan_reg_get_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg) +{ + /* + * Get the current dfs region + */ + reg_get_current_dfs_region(pdev, dfs_reg); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_CHAN_NUM_API +uint32_t wlan_reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_get_channel_reg_power(pdev, chan_num); +} + +/** + * wlan_reg_get_channel_freq() - get regulatory power for channel + * @chan_num: channel number + * + * Return: int + */ +qdf_freq_t wlan_reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_get_channel_freq(pdev, chan_num); +} +#endif /* CONFIG_CHAN_NUM_API */ + +QDF_STATUS wlan_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + return reg_get_current_chan_list(pdev, chan_list); +} + +qdf_export_symbol(wlan_reg_get_current_chan_list); + +/** + * wlan_reg_get_bw_value() - give bandwidth value + * bw: bandwidth enum + * + * Return: uint16_t + */ +uint16_t wlan_reg_get_bw_value(enum phy_ch_width bw) +{ + return reg_get_bw_value(bw); +} + +qdf_export_symbol(wlan_reg_get_bw_value); + +#ifdef CONFIG_CHAN_NUM_API +/** + * wlan_reg_get_bonded_channel_state() - Get 2G bonded channel state + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw, uint8_t sec_ch) +{ + if (WLAN_REG_IS_24GHZ_CH(ch)) + return reg_get_2g_bonded_channel_state(pdev, ch, + sec_ch, bw); + else + return reg_get_5g_bonded_channel_state(pdev, ch, + bw); +} +#endif /* CONFIG_CHAN_NUM_API */ + +/** + * wlan_reg_set_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: None + */ +void wlan_reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg) +{ + reg_set_dfs_region(pdev, dfs_reg); +} + +QDF_STATUS wlan_reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, enum country_src source) +{ + + return reg_get_domain_from_country_code(reg_domain_ptr, + country_alpha2, source); +} + + +uint16_t wlan_reg_dmn_get_opclass_from_channel(uint8_t *country, + uint8_t channel, + uint8_t offset) +{ + return reg_dmn_get_opclass_from_channel(country, channel, + offset); +} + +uint8_t wlan_reg_get_opclass_from_freq_width(uint8_t *country, + qdf_freq_t freq, + uint8_t ch_width, + uint16_t behav_limit) +{ + return reg_dmn_get_opclass_from_freq_width(country, freq, ch_width, + behav_limit); +} + +void wlan_reg_dmn_print_channels_in_opclass(uint8_t *country, + uint8_t opclass) +{ + reg_dmn_print_channels_in_opclass(country, opclass); +} + +uint16_t wlan_reg_dmn_get_chanwidth_from_opclass(uint8_t *country, + uint8_t channel, + uint8_t opclass) +{ + return reg_dmn_get_chanwidth_from_opclass(country, channel, + opclass); +} + +uint16_t wlan_reg_dmn_set_curr_opclasses(uint8_t num_classes, + uint8_t *class) +{ + return reg_dmn_set_curr_opclasses(num_classes, class); +} + +uint16_t wlan_reg_dmn_get_curr_opclasses(uint8_t *num_classes, + uint8_t *class) +{ + return reg_dmn_get_curr_opclasses(num_classes, class); +} + +QDF_STATUS +wlan_reg_get_opclass_details(struct wlan_objmgr_pdev *pdev, + struct regdmn_ap_cap_opclass_t *reg_ap_cap, + uint8_t *n_opclasses, + uint8_t max_supp_op_class, + bool global_tbl_lookup) +{ + return reg_get_opclass_details(pdev, reg_ap_cap, n_opclasses, + max_supp_op_class, + global_tbl_lookup); +} + +QDF_STATUS wlan_regulatory_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + return status; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + goto unreg_psoc_create; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + goto unreg_psoc_destroy; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + goto unreg_pdev_create; + } + channel_map = channel_map_global; + reg_debug("regulatory handlers registered with obj mgr"); + + return status; + +unreg_pdev_create: + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_created_notification, + NULL); + +unreg_psoc_destroy: + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_destroyed_notification, + NULL); + +unreg_psoc_create: + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_created_notification, + NULL); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_regulatory_deinit(void) +{ + QDF_STATUS status, ret_status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg pdev obj destroy handler"); + ret_status = status; + } + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg pdev obj create handler"); + ret_status = status; + } + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg psoc obj destroy handler"); + ret_status = status; + } + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg psoc obj create handler"); + ret_status = status; + } + + reg_debug("deregistered callbacks with obj mgr"); + + return ret_status; +} + +QDF_STATUS regulatory_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_tx_ops *tx_ops; + + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->register_master_handler) + tx_ops->register_master_handler(psoc, NULL); + if (tx_ops->register_11d_new_cc_handler) + tx_ops->register_11d_new_cc_handler(psoc, NULL); + if (tx_ops->register_ch_avoid_event_handler) + tx_ops->register_ch_avoid_event_handler(psoc, NULL); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS regulatory_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_tx_ops *tx_ops; + + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->unregister_11d_new_cc_handler) + tx_ops->unregister_11d_new_cc_handler(psoc, NULL); + if (tx_ops->unregister_master_handler) + tx_ops->unregister_master_handler(psoc, NULL); + if (tx_ops->unregister_ch_avoid_event_handler) + tx_ops->unregister_ch_avoid_event_handler(psoc, NULL); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS regulatory_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *parent_psoc; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->pdev_opened = true; + + parent_psoc = wlan_pdev_get_psoc(pdev); + + reg_send_scheduler_msg_nb(parent_psoc, pdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS regulatory_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_regulatory_psoc_priv_obj *soc_reg; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->pdev_opened = false; + + psoc = wlan_pdev_get_psoc(pdev); + soc_reg = reg_get_psoc_obj(psoc); + if (!soc_reg) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + reg_reset_ctry_pending_hints(soc_reg); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_CHAN_NUM_API +void wlan_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, uint8_t *ch_list, + uint8_t num_ch, bool nol_ch) +{ + reg_update_nol_ch(pdev, ch_list, num_ch, nol_ch); +} + +void wlan_reg_update_nol_history_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, uint8_t num_ch, + bool nol_history_ch) +{ + reg_update_nol_history_ch(pdev, ch_list, num_ch, nol_history_ch); +} + +bool wlan_reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + return reg_is_passive_or_disable_ch(pdev, chan); +} + +bool wlan_reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, + uint8_t chan) +{ + return reg_is_disable_ch(pdev, chan); +} +#endif /* CONFIG_CHAN_NUM_API */ + +uint8_t wlan_reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_freq_to_chan(pdev, freq); +} + +qdf_export_symbol(wlan_reg_freq_to_chan); + +#ifdef CONFIG_CHAN_NUM_API +qdf_freq_t wlan_reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_chan_to_freq(pdev, chan_num); +} + +qdf_export_symbol(wlan_reg_chan_to_freq); + +qdf_freq_t wlan_reg_legacy_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_legacy_chan_to_freq(pdev, chan_num); +} + +bool wlan_reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_chan_is_49ghz(pdev, chan_num); +} +#endif /* CONFIG_CHAN_NUM_API */ + +QDF_STATUS wlan_reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return reg_set_country(pdev, country); +} + +QDF_STATUS wlan_reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return reg_set_11d_country(pdev, country); +} + +bool wlan_reg_is_world(uint8_t *country) +{ + return reg_is_world_alpha2(country); +} + +bool wlan_reg_is_us(uint8_t *country) +{ + return reg_is_us_alpha2(country); +} + +void wlan_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk, void *arg) +{ + reg_register_chan_change_callback(psoc, (reg_chan_change_callback)cbk, + arg); + +} + +void wlan_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk) +{ + reg_unregister_chan_change_callback(psoc, + (reg_chan_change_callback)cbk); +} + +bool wlan_reg_is_11d_offloaded(struct wlan_objmgr_psoc *psoc) +{ + return reg_is_11d_offloaded(psoc); +} + +bool wlan_reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + return reg_11d_enabled_on_host(psoc); +} + +#ifdef CONFIG_CHAN_NUM_API +bool wlan_reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan_num) +{ + return reg_is_dsrc_chan(pdev, chan_num); +} + +bool wlan_reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_is_etsi13_srd_chan(pdev, chan_num); +} +#endif /* CONFIG_CHAN_NUM_API */ + +bool wlan_reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + return reg_is_etsi13_regdmn(pdev); +} + +bool wlan_reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev + *pdev) +{ + return reg_is_etsi13_srd_chan_allowed_master_mode(pdev); +} + +bool wlan_reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq) +{ + return reg_get_fcc_constraint(pdev, freq); +} + +QDF_STATUS wlan_reg_get_chip_mode(struct wlan_objmgr_pdev *pdev, + uint32_t *chip_mode) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (!pdev_priv_obj) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + *chip_mode = pdev_priv_obj->wireless_modes; + + return QDF_STATUS_SUCCESS; +} + +bool wlan_reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc) +{ + return reg_is_11d_scan_inprogress(psoc); +} + +QDF_STATUS wlan_reg_get_freq_range(struct wlan_objmgr_pdev *pdev, + qdf_freq_t *low_2g, + qdf_freq_t *high_2g, + qdf_freq_t *low_5g, + qdf_freq_t *high_5g) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (!pdev_priv_obj) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + *low_2g = pdev_priv_obj->range_2g_low; + *high_2g = pdev_priv_obj->range_2g_high; + *low_5g = pdev_priv_obj->range_5g_low; + *high_5g = pdev_priv_obj->range_5g_high; + + return QDF_STATUS_SUCCESS; +} + +struct wlan_lmac_if_reg_tx_ops * +wlan_reg_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return reg_get_psoc_tx_ops(psoc); +} + +QDF_STATUS wlan_reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn) +{ + return reg_get_curr_regdomain(pdev, cur_regdmn); +} + +#ifdef CONFIG_CHAN_NUM_API +uint8_t wlan_reg_min_24ghz_ch_num(void) +{ + return reg_min_24ghz_ch_num(); +} + +uint8_t wlan_reg_max_24ghz_ch_num(void) +{ + return reg_max_24ghz_ch_num(); +} + +uint8_t wlan_reg_min_5ghz_ch_num(void) +{ + return reg_min_5ghz_ch_num(); +} + +uint8_t wlan_reg_max_5ghz_ch_num(void) +{ + return reg_max_5ghz_ch_num(); +} +#endif /* CONFIG_CHAN_NUM_API */ + +#ifdef CONFIG_CHAN_FREQ_API +qdf_freq_t wlan_reg_min_24ghz_chan_freq(void) +{ + return reg_min_24ghz_chan_freq(); +} + +qdf_freq_t wlan_reg_max_24ghz_chan_freq(void) +{ + return reg_max_24ghz_chan_freq(); +} + +qdf_freq_t wlan_reg_min_5ghz_chan_freq(void) +{ + return reg_min_5ghz_chan_freq(); +} + +qdf_freq_t wlan_reg_max_5ghz_chan_freq(void) +{ + return reg_max_5ghz_chan_freq(); +} +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef CONFIG_CHAN_NUM_API +bool wlan_reg_is_24ghz_ch(uint8_t chan) +{ + return reg_is_24ghz_ch(chan); +} + +bool wlan_reg_is_5ghz_ch(uint8_t chan) +{ + return reg_is_5ghz_ch(chan); +} +#endif /* CONFIG_CHAN_NUM_API */ + +bool wlan_reg_is_24ghz_ch_freq(qdf_freq_t freq) +{ + return reg_is_24ghz_ch_freq(freq); +} + +bool wlan_reg_is_5ghz_ch_freq(qdf_freq_t freq) +{ + return reg_is_5ghz_ch_freq(freq); +} + +bool wlan_reg_is_freq_indoor(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq) +{ + return reg_is_freq_indoor(pdev, freq); +} + +#ifdef CONFIG_BAND_6GHZ +bool wlan_reg_is_6ghz_chan_freq(uint16_t freq) +{ + return reg_is_6ghz_chan_freq(freq); +} + +uint16_t wlan_reg_min_6ghz_chan_freq(void) +{ + return reg_min_6ghz_chan_freq(); +} + +uint16_t wlan_reg_max_6ghz_chan_freq(void) +{ + return reg_max_6ghz_chan_freq(); +} + +bool wlan_reg_is_6ghz_psc_chan_freq(uint16_t freq) +{ + return reg_is_6ghz_psc_chan_freq(freq); +} + +#endif /* CONFIG_BAND_6GHZ */ + +uint16_t +wlan_reg_get_band_channel_list(struct wlan_objmgr_pdev *pdev, + uint8_t band_mask, + struct regulatory_channel *channel_list) +{ + if (!pdev) { + reg_err("pdev object is NULL"); + return 0; + } + + return reg_get_band_channel_list(pdev, band_mask, channel_list); +} + +qdf_freq_t wlan_reg_chan_band_to_freq(struct wlan_objmgr_pdev *pdev, + uint8_t chan, uint8_t band_mask) +{ + return reg_chan_band_to_freq(pdev, chan, band_mask); +} + +bool wlan_reg_is_49ghz_freq(qdf_freq_t freq) +{ + return reg_is_49ghz_freq(freq); +} + +uint8_t wlan_reg_ch_num(uint32_t ch_enum) +{ + return reg_ch_num(ch_enum); +} + +qdf_freq_t wlan_reg_ch_to_freq(uint32_t ch_enum) +{ + return reg_ch_to_freq(ch_enum); +} + +#ifdef CONFIG_CHAN_NUM_API +bool wlan_reg_is_same_band_channels(uint8_t chan_num1, uint8_t chan_num2) +{ + return reg_is_same_band_channels(chan_num1, chan_num2); +} + +bool wlan_reg_is_channel_valid_5g_sbs(uint8_t curchan, uint8_t newchan) +{ + return reg_is_channel_valid_5g_sbs(curchan, newchan); +} + +enum band_info wlan_reg_chan_to_band(uint8_t chan_num) +{ + return reg_chan_to_band(chan_num); +} + +qdf_export_symbol(wlan_reg_chan_to_band); + +/** + * wlan_reg_get_chan_enum() - Get channel enum for given channel number + * @chan_num: Channel number + * + * Return: Channel enum + */ +enum channel_enum wlan_reg_get_chan_enum(uint8_t chan_num) +{ + return reg_get_chan_enum(chan_num); +} +#endif /* CONFIG_CHAN_NUM_API */ + +bool wlan_reg_is_regdmn_en302502_applicable(struct wlan_objmgr_pdev *pdev) +{ + return reg_is_regdmn_en302502_applicable(pdev); +} + +/** + * wlan_reg_modify_pdev_chan_range() - Compute current channel list for the + * modified regcap. + * @pdev: pointer to struct wlan_objmgr_pdev + * + */ +QDF_STATUS wlan_reg_modify_pdev_chan_range(struct wlan_objmgr_pdev *pdev) +{ + return reg_modify_pdev_chan_range(pdev); +} + +#ifdef DISABLE_UNII_SHARED_BANDS +QDF_STATUS wlan_reg_disable_chan_coex(struct wlan_objmgr_pdev *pdev, + uint8_t unii_5g_bitmap) +{ + return reg_disable_chan_coex(pdev, unii_5g_bitmap); +} +#endif + +#ifdef CONFIG_CHAN_FREQ_API +bool wlan_reg_is_same_band_freqs(qdf_freq_t freq1, qdf_freq_t freq2) +{ + return reg_is_same_band_freqs(freq1, freq2); +} + +bool wlan_reg_is_frequency_valid_5g_sbs(qdf_freq_t curfreq, qdf_freq_t newfreq) +{ + return reg_is_frequency_valid_5g_sbs(curfreq, newfreq); +} + +enum channel_enum wlan_reg_get_chan_enum_for_freq(qdf_freq_t freq) +{ + return reg_get_chan_enum_for_freq(freq); +} + +bool wlan_reg_is_etsi13_srd_chan_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_is_etsi13_srd_chan_for_freq(pdev, freq); +} + +bool wlan_reg_is_dsrc_freq(qdf_freq_t freq) +{ + return reg_is_dsrc_freq(freq); +} + +void wlan_reg_update_nol_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *chan_freq_list, + uint8_t num_ch, + bool nol_ch) +{ + reg_update_nol_ch_for_freq(pdev, chan_freq_list, num_ch, nol_ch); +} + +void wlan_reg_update_nol_history_ch_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t *ch_list, + uint8_t num_ch, + bool nol_history_ch) +{ + reg_update_nol_history_ch_for_freq(pdev, + ch_list, + num_ch, + nol_history_ch); +} + +bool wlan_reg_is_dfs_for_freq(struct wlan_objmgr_pdev *pdev, qdf_freq_t freq) +{ + return reg_is_dfs_for_freq(pdev, freq); +} + +bool wlan_reg_is_passive_or_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_is_passive_or_disable_for_freq(pdev, freq); +} + +bool wlan_reg_is_disable_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_is_disable_for_freq(pdev, freq); +} + +QDF_STATUS +wlan_reg_get_channel_list_with_power_for_freq(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan) +{ + return reg_get_channel_list_with_power_for_freq(pdev, + ch_list, + num_chan); +} + +bool +wlan_reg_chan_has_dfs_attribute_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_chan_has_dfs_attribute_for_freq(pdev, freq); +} + +enum channel_state +wlan_reg_get_5g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + enum phy_ch_width bw) +{ + return reg_get_5g_bonded_channel_state_for_freq(pdev, freq, bw); +} + +enum channel_state +wlan_reg_get_2g_bonded_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + qdf_freq_t sec_ch_freq, + enum phy_ch_width bw) +{ + return reg_get_2g_bonded_channel_state_for_freq(pdev, + freq, + sec_ch_freq, + bw); +} + +void wlan_reg_set_channel_params_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + qdf_freq_t sec_ch_2g_freq, + struct ch_params *ch_params) +{ + reg_set_channel_params_for_freq(pdev, freq, sec_ch_2g_freq, ch_params); +} + +enum channel_state +wlan_reg_get_channel_state_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_get_channel_state_for_freq(pdev, freq); +} + +uint8_t wlan_reg_get_channel_reg_power_for_freq(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq) +{ + return reg_get_channel_reg_power_for_freq(pdev, freq); +} + +enum reg_wifi_band wlan_reg_freq_to_band(qdf_freq_t freq) +{ + return reg_freq_to_band(freq); +} +qdf_export_symbol(wlan_reg_freq_to_band); + +qdf_freq_t wlan_reg_min_chan_freq(void) +{ + return reg_min_chan_freq(); +} + +qdf_freq_t wlan_reg_max_chan_freq(void) +{ + return reg_max_chan_freq(); +} + +void wlan_reg_freq_width_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ + return reg_freq_width_to_chan_op_class(pdev, freq, chan_width, + global_tbl_lookup, + behav_limit, + op_class, + chan_num); +} + +void wlan_reg_freq_width_to_chan_op_class_auto(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + uint16_t chan_width, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ + reg_freq_width_to_chan_op_class_auto(pdev, freq, chan_width, + global_tbl_lookup, + behav_limit, + op_class, + chan_num); +} + +void wlan_reg_freq_to_chan_op_class(struct wlan_objmgr_pdev *pdev, + qdf_freq_t freq, + bool global_tbl_lookup, + uint16_t behav_limit, + uint8_t *op_class, + uint8_t *chan_num) +{ + return reg_freq_to_chan_op_class(pdev, freq, + global_tbl_lookup, + behav_limit, + op_class, + chan_num); +} + +bool wlan_reg_country_opclass_freq_check(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t op_class, + qdf_freq_t chan_freq) +{ + return reg_country_opclass_freq_check(pdev, country, + op_class, chan_freq); +} + +enum channel_state +wlan_reg_get_5g_bonded_channel_and_state_for_freq(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + enum phy_ch_width bw, + const + struct bonded_channel_freq + **bonded_chan_ptr_ptr) +{ + /* + * Get channel frequencies and state from regulatory + */ + return reg_get_5g_bonded_channel_for_freq(pdev, freq, bw, + bonded_chan_ptr_ptr); +} + +qdf_export_symbol(wlan_reg_get_5g_bonded_channel_and_state_for_freq); + +#endif /* CONFIG CHAN FREQ API */ + +uint16_t wlan_reg_get_op_class_width(struct wlan_objmgr_pdev *pdev, + uint8_t op_class, + bool global_tbl_lookup) +{ + return reg_get_op_class_width(pdev, op_class, + global_tbl_lookup); +} + +bool wlan_reg_is_6ghz_op_class(struct wlan_objmgr_pdev *pdev, + uint8_t op_class) +{ + return reg_is_6ghz_op_class(pdev, op_class); +} + +bool wlan_reg_is_6ghz_supported(struct wlan_objmgr_psoc *psoc) +{ + return reg_is_6ghz_supported(psoc); +} + +#ifdef HOST_OPCLASS_EXT +qdf_freq_t +wlan_reg_country_chan_opclass_to_freq(struct wlan_objmgr_pdev *pdev, + const uint8_t country[3], + uint8_t chan, uint8_t op_class, + bool strict) +{ + return reg_country_chan_opclass_to_freq(pdev, country, chan, op_class, + strict); +} +#endif + +uint16_t wlan_reg_chan_opclass_to_freq(uint8_t chan, + uint8_t op_class, + bool global_tbl_lookup) +{ + if (!chan || !op_class) + return 0; + + return reg_chan_opclass_to_freq(chan, op_class, global_tbl_lookup); +} + +#ifdef CONFIG_REG_CLIENT +enum band_info wlan_reg_band_bitmap_to_band_info(uint32_t band_bitmap) +{ + return reg_band_bitmap_to_band_info(band_bitmap); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..ed1ec38a6ac07fd19a5a8c3ebeb8ac42a2b19ba2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_tgt_api.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file wlan_req_tgt_api.c + * @brief contains regulatory target interface definations + */ + +#include +#include +#include +#include +#include +#include +#include <../../core/src/reg_priv_objs.h> +#include <../../core/src/reg_utils.h> +#include <../../core/src/reg_services_common.h> +#include <../../core/src/reg_lte.h> +#include <../../core/src/reg_build_chan_list.h> +#include <../../core/src/reg_offload_11d_scan.h> + +/** + * tgt_process_master_chan_list() - process master channel list + * @reg_info: regulatory info + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_reg_process_master_chan_list(struct cur_regulatory_info + *reg_info) +{ + return reg_process_master_chan_list(reg_info); +} + +QDF_STATUS tgt_reg_process_11d_new_country(struct wlan_objmgr_psoc *psoc, + struct reg_11d_new_country *reg_11d_new_cc) +{ + return reg_save_new_11d_country(psoc, reg_11d_new_cc->alpha2); +} + +QDF_STATUS tgt_reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + return reg_set_regdb_offloaded(psoc, val); +} + +QDF_STATUS tgt_reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + return reg_set_11d_offloaded(psoc, val); +} + +QDF_STATUS tgt_reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_evnt) +{ + return reg_process_ch_avoid_event(psoc, ch_avoid_evnt); +} + +bool tgt_reg_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc) +{ + return reg_get_ignore_fw_reg_offload_ind(psoc); +} + +QDF_STATUS tgt_reg_set_6ghz_supported(struct wlan_objmgr_psoc *psoc, + bool val) +{ + return reg_set_6ghz_supported(psoc, val); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..7cc643217795d819c5e56eb882ea0bc860ee4b51 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file wlan_req_ucfg_api.c + * @brief contains regulatory user config interface definations + */ + +#include +#include +#include +#include <../../core/src/reg_priv_objs.h> +#include <../../core/src/reg_utils.h> +#include <../../core/src/reg_services_common.h> +#include <../../core/src/reg_lte.h> +#include <../../core/src/reg_offload_11d_scan.h> +#include <../../core/src/reg_build_chan_list.h> +#include <../../core/src/reg_callbacks.h> +#include + +QDF_STATUS ucfg_reg_register_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg) +{ + /* Register a event cb handler */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_reg_unregister_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg) +{ + /* unregister a event cb handler */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_reg_init_handler(uint8_t pdev_id) +{ + /* regulatory initialization handler */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + return reg_get_current_chan_list(pdev, chan_list); +} + +qdf_export_symbol(ucfg_reg_get_current_chan_list); + +QDF_STATUS ucfg_reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144) +{ + return reg_modify_chan_144(pdev, enable_ch_144); +} + +bool ucfg_reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev) +{ + return reg_get_en_chan_144(pdev); +} + +QDF_STATUS ucfg_reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars) +{ + return reg_set_config_vars(psoc, config_vars); +} + +bool ucfg_reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc) +{ + return reg_is_regdb_offloaded(psoc); +} + +void ucfg_reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region) +{ + reg_program_mas_chan_list(psoc, reg_channels, alpha2, dfs_region); +} + +QDF_STATUS ucfg_reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules) +{ + return reg_get_regd_rules(pdev, reg_rules); +} + +QDF_STATUS ucfg_reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn) +{ + return reg_program_default_cc(pdev, regdmn); +} + +QDF_STATUS ucfg_reg_program_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + return reg_program_chan_list(pdev, rd); +} + +QDF_STATUS ucfg_reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + return reg_get_current_cc(pdev, rd); +} + +#ifdef CONFIG_REG_CLIENT + +QDF_STATUS ucfg_reg_set_band(struct wlan_objmgr_pdev *pdev, + uint32_t band_bitmap) +{ + return reg_set_band(pdev, band_bitmap); +} + +QDF_STATUS ucfg_reg_get_band(struct wlan_objmgr_pdev *pdev, + uint32_t *band_bitmap) +{ + return reg_get_band(pdev, band_bitmap); +} + +/** + * ucfg_reg_notify_sap_event() - Notify regulatory domain for sap event + * @pdev: The physical dev to set the band for + * @sap_state: true for sap start else false + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state) +{ + return reg_notify_sap_event(pdev, sap_state); +} + +/** + * ucfg_reg_set_fcc_constraint() - apply fcc constraints on channels 12/13 + * @pdev: The physical pdev to reduce tx power for + * + * This function adjusts the transmit power on channels 12 and 13, to comply + * with FCC regulations in the USA. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint) +{ + return reg_set_fcc_constraint(pdev, fcc_constraint); +} + +QDF_STATUS ucfg_reg_get_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + return reg_read_current_country(psoc, country_code); +} + +/** + * ucfg_reg_set_default_country() - Set the default regulatory country + * @psoc: The physical SoC to set default country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + return reg_set_default_country(psoc, country); +} +#endif + +/** + * ucfg_reg_get_default_country() - Get the default regulatory country + * @psoc: The physical SoC to get default country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + return reg_read_default_country(psoc, country_code); +} + +/** + * ucfg_reg_set_country() - Set the current regulatory country + * @pdev: The physical dev to set current country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return reg_set_country(pdev, country); +} + +/** + * ucfg_reg_reset_country() - Reset the regulatory country to default + * @psoc: The physical SoC to reset country for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_reset_country(struct wlan_objmgr_psoc *psoc) +{ + return reg_reset_country(psoc); +} + +/** + * ucfg_reg_enable_dfs_channels() - Enable the use of DFS channels + * @pdev: The physical dev to enable DFS channels for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, + bool dfs_enable) +{ + return reg_enable_dfs_channels(pdev, dfs_enable); +} + +void ucfg_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk, void *arg) +{ + reg_register_chan_change_callback(psoc, (reg_chan_change_callback)cbk, + arg); +} + +void ucfg_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + void *cbk) +{ + reg_unregister_chan_change_callback(psoc, + (reg_chan_change_callback)cbk); +} + +enum country_src ucfg_reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2) +{ + return reg_get_cc_and_src(psoc, alpha2); +} + +void ucfg_reg_unit_simulate_ch_avoid(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid) +{ + reg_process_ch_avoid_event(psoc, ch_avoid); +} + +QDF_STATUS ucfg_reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev) +{ + return reg_11d_vdev_delete_update(vdev); +} + +QDF_STATUS ucfg_reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev) +{ + return reg_11d_vdev_created_update(vdev); +} + +struct wlan_psoc_host_hal_reg_capabilities_ext *ucfg_reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc) +{ + return reg_get_hal_reg_cap(psoc); +} +qdf_export_symbol(ucfg_reg_get_hal_reg_cap); + +QDF_STATUS ucfg_reg_set_hal_reg_cap(struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *hal_reg_cap, + uint16_t phy_cnt) + +{ + return reg_set_hal_reg_cap(psoc, hal_reg_cap, phy_cnt); +} +qdf_export_symbol(ucfg_reg_set_hal_reg_cap); + +#ifdef DISABLE_CHANNEL_LIST +#ifdef CONFIG_CHAN_FREQ_API +/** + * ucfg_reg_cache_channel_freq_state() - Cache the current state of the channels + * based of the channel center frequency. + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + */ +void ucfg_reg_cache_channel_freq_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + reg_cache_channel_freq_state(pdev, channel_list, num_channels); +} +#endif /* CONFIG_CHAN_FREQ_API */ + +#ifdef CONFIG_CHAN_NUM_API +/** + * ucfg_reg_cache_channel_state() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + */ +void ucfg_reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, uint32_t num_channels) +{ + reg_cache_channel_state(pdev, channel_list, num_channels); +} +#endif /* CONFIG_CHAN_NUM_API */ + +void ucfg_reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + reg_restore_cached_channels(pdev); +} + +void ucfg_reg_disable_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + reg_disable_cached_channels(pdev); +} + +#endif + +QDF_STATUS ucfg_set_ignore_fw_reg_offload_ind(struct wlan_objmgr_psoc *psoc) +{ + return reg_set_ignore_fw_reg_offload_ind(psoc); +} + +#ifdef DISABLE_UNII_SHARED_BANDS +QDF_STATUS +ucfg_reg_get_unii_5g_bitmap(struct wlan_objmgr_pdev *pdev, uint8_t *bitmap) +{ + return reg_get_unii_5g_bitmap(pdev, bitmap); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.c new file mode 100644 index 0000000000000000000000000000000000000000..dd1f48cb1cc68c54fa9b3b30b1261fdf3f41dae5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan 11d api and functionality + */ +#include +#include +#include +#include +#include +#include +#include "wlan_scan_main.h" +#include "wlan_scan_11d.h" +#include "wlan_reg_services_api.h" +#include "wlan_reg_ucfg_api.h" + +/** + * wlan_pdevid_get_cc_db() - private API to get cc db from pdev id + * @psoc: psoc object + * @pdev_id: pdev id + * + * Return: cc db for the pdev id + */ +static struct scan_country_code_db * +wlan_pdevid_get_cc_db(struct wlan_objmgr_psoc *psoc, uint8_t pdev_id) +{ + struct wlan_scan_obj *scan_obj; + + if (pdev_id > WLAN_UMAC_MAX_PDEVS) { + scm_err("invalid pdev_id %d", pdev_id); + return NULL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return NULL; + + return &scan_obj->cc_db[pdev_id]; +} + +/** + * wlan_pdev_get_cc_db() - private API to get cc db from pdev + * @psoc: psoc object + * @pdev: Pdev object + * + * Return: cc db for the pdev + */ +static struct scan_country_code_db * +wlan_pdev_get_cc_db(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + + if (!pdev) { + scm_err("pdev is NULL"); + return NULL; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return wlan_pdevid_get_cc_db(psoc, pdev_id); +} + +/** + * scm_11d_elected_country_algo_fcc - private api to get cc per fcc algo + * @cc_db: scan country code db + * + * Return: true or false + */ +static bool +scm_11d_elected_country_algo_fcc(struct scan_country_code_db *cc_db) +{ + uint8_t i; + uint8_t country_idx; + uint16_t max_votes; + bool found = false; + + if (!cc_db->num_country_codes) { + scm_err("No AP with 11d Country code is present in scan list"); + return false; + } + + max_votes = cc_db->votes[0].votes; + if (wlan_reg_is_us(cc_db->votes[0].cc)) { + found = true; + country_idx = 0; + goto algo_done; + } else if (max_votes >= MIN_11D_AP_COUNT) { + found = true; + country_idx = 0; + } + + for (i = 1; i < cc_db->num_country_codes; i++) { + if (wlan_reg_is_us(cc_db->votes[i].cc)) { + found = true; + country_idx = i; + goto algo_done; + } + + if ((max_votes < cc_db->votes[i].votes) && + (cc_db->votes[i].votes >= MIN_11D_AP_COUNT)) { + scm_debug("Votes for Country %c%c : %d", + cc_db->votes[i].cc[0], + cc_db->votes[i].cc[1], + cc_db->votes[i].votes); + max_votes = cc_db->votes[i].votes; + country_idx = i; + found = true; + } + } + +algo_done: + if (found) { + qdf_mem_copy(cc_db->elected_cc, + cc_db->votes[country_idx].cc, + REG_ALPHA2_LEN + 1); + + scm_debug("Selected Country is %c%c With count %d", + cc_db->votes[country_idx].cc[0], + cc_db->votes[country_idx].cc[1], + cc_db->votes[country_idx].votes); + } + + return found; +} + +/** + * scm_11d_elected_country_info - private api to get cc + * @cc_db: scan country code db + * + * Return: true or false + */ +static bool +scm_11d_elected_country_info(struct scan_country_code_db *cc_db) +{ + uint8_t i, j = 0; + uint8_t max_votes; + + if (!cc_db->num_country_codes) { + scm_err("No AP with 11d Country code is present in scan list"); + return false; + } + + max_votes = cc_db->votes[0].votes; + + for (i = 1; i < cc_db->num_country_codes; i++) { + /* + * If we have a tie for max votes for 2 different country codes, + * pick random. + */ + if (max_votes < cc_db->votes[i].votes) { + scm_debug("Votes for Country %c%c : %d", + cc_db->votes[i].cc[0], + cc_db->votes[i].cc[1], + cc_db->votes[i].votes); + + max_votes = cc_db->votes[i].votes; + j = i; + } + } + + qdf_mem_copy(cc_db->elected_cc, cc_db->votes[j].cc, + REG_ALPHA2_LEN + 1); + + scm_debug("Selected Country is %c%c With count %d", + cc_db->votes[j].cc[0], + cc_db->votes[j].cc[1], + cc_db->votes[j].votes); + + return true; +} + +/** + * scm_11d_set_country_code - private api to set cc per 11d learning + * @pdev: pdev object + * @elected_cc: elected country code + * @current_cc: current country code + * + * Return: true or false + */ +static bool +scm_11d_set_country_code(struct wlan_objmgr_pdev *pdev, + uint8_t *elected_cc, uint8_t *current_cc) +{ + scm_debug("elected country %c%c, current country %c%c", + elected_cc[0], elected_cc[1], current_cc[0], current_cc[1]); + + if (!qdf_mem_cmp(elected_cc, current_cc, REG_ALPHA2_LEN + 1)) + return true; + + wlan_reg_set_11d_country(pdev, elected_cc); + return true; +} + +/** + * scm_11d_reset_cc_db - reset the country code db + * @cc_db: the pointer of country code db + * + * Return: void + */ +static void scm_11d_reset_cc_db(struct scan_country_code_db *cc_db) +{ + qdf_mem_zero(cc_db->votes, sizeof(cc_db->votes)); + qdf_mem_zero(cc_db->elected_cc, sizeof(cc_db->elected_cc)); + cc_db->num_country_codes = 0; +} + +QDF_STATUS scm_11d_cc_db_init(struct wlan_objmgr_psoc *psoc) +{ + struct scan_country_code_db *cc_db; + struct wlan_scan_obj *scan_obj; + + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + cc_db = (struct scan_country_code_db *)qdf_mem_malloc_atomic( + sizeof(struct scan_country_code_db) * WLAN_UMAC_MAX_PDEVS); + if (!cc_db) { + scm_err("alloc country code db error"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_zero(cc_db, + sizeof(struct scan_country_code_db) * + WLAN_UMAC_MAX_PDEVS); + + scan_obj->cc_db = cc_db; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_11d_cc_db_deinit(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_free(scan_obj->cc_db); + return QDF_STATUS_SUCCESS; +} + +void scm_11d_handle_country_info(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry) +{ + uint8_t i; + bool match = false; + uint8_t num_country_codes; + struct scan_country_code_db *cc_db; + struct wlan_country_ie *cc_ie; + + cc_ie = util_scan_entry_country(scan_entry); + if (!cc_ie) + return; + + cc_db = wlan_pdev_get_cc_db(psoc, pdev); + if (!cc_db) + return; + + /* just to be sure, convert to UPPER case here */ + for (i = 0; i < 3; i++) + cc_ie->cc[i] = qdf_toupper(cc_ie->cc[i]); + + num_country_codes = cc_db->num_country_codes; + for (i = 0; i < num_country_codes; i++) { + match = !qdf_mem_cmp(cc_db->votes[i].cc, cc_ie->cc, + REG_ALPHA2_LEN); + if (match) + break; + } + + if (match) { + cc_db->votes[i].votes++; + return; + } + + if (num_country_codes >= SCAN_MAX_NUM_COUNTRY_CODE) { + scm_debug("country code db already full: %d", + num_country_codes); + return; + } + + /* add country code to end of the list */ + qdf_mem_copy(cc_db->votes[num_country_codes].cc, cc_ie->cc, + REG_ALPHA2_LEN + 1); + cc_db->votes[num_country_codes].votes = 1; + cc_db->num_country_codes++; +} + +void scm_11d_decide_country_code(struct wlan_objmgr_vdev *vdev) +{ + uint8_t current_cc[REG_ALPHA2_LEN + 1]; + bool found; + struct scan_country_code_db *cc_db; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (!wlan_reg_11d_enabled_on_host(psoc)) + return; + + if (SOURCE_UNKNOWN == ucfg_reg_get_cc_and_src(psoc, current_cc)) { + scm_err("fail to get current country code"); + return; + } + + cc_db = wlan_pdev_get_cc_db(psoc, pdev); + if (!cc_db) { + scm_err("scan_db is NULL"); + return; + } + + if (wlan_reg_is_us(current_cc) || wlan_reg_is_world(current_cc)) + found = scm_11d_elected_country_algo_fcc(cc_db); + else + found = scm_11d_elected_country_info(cc_db); + + if (found) + scm_11d_set_country_code(pdev, cc_db->elected_cc, + current_cc); + scm_11d_reset_cc_db(cc_db); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.h new file mode 100644 index 0000000000000000000000000000000000000000..9ec31b7fc068bd7c73c2608b7b19aba9f82e142b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan 11d entry api + */ + +#ifndef _WLAN_SCAN_11D_H_ +#define _WLAN_SCAN_11D_H_ + +#define SCAN_MAX_NUM_COUNTRY_CODE 100 +#define MIN_11D_AP_COUNT 3 + +/** + * struct scan_country_code_votes - votes to country code mapping structure + * @votes: votes + * @cc: country code + */ +struct scan_country_code_votes { + uint16_t votes; + uint8_t cc[REG_ALPHA2_LEN + 1]; +}; + +/** + * struct scan_country_code_db - country code data base definition + * @elected_cc: elected country code + * @num_country_codes: number of country codes encountered + * @votes: votes to country code mapping array + */ +struct scan_country_code_db { + uint8_t elected_cc[REG_ALPHA2_LEN + 1]; + uint8_t num_country_codes; + struct scan_country_code_votes votes[SCAN_MAX_NUM_COUNTRY_CODE]; +}; + +/** + * scm_11d_cc_db_init() - API to init 11d country code db + * @psoc: psoc object + * + * Initialize the country code database. + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_11d_cc_db_init(struct wlan_objmgr_psoc *psoc); + +/** + * scm_11d_cc_db_deinit() - API to deinit 11d country code db + * @psoc: psoc object + * + * free the country code database. + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_11d_cc_db_deinit(struct wlan_objmgr_psoc *psoc); + +/** + * scm_11d_handle_country_info() - API to handle 11d country info + * @psoc: psoc object + * @pdev: pdev object + * @scan_entry: the pointer to scan entry + * + * Update the country code database per the country code from country IE. + * + * Return: void + */ +void scm_11d_handle_country_info(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * scm_11d_decide_country_code() - API to decide the country code per 11d + * @vdev: vdev object + * + * Decide which country will be elected from the country database. If one + * cadidate country is found, then it set the country code. + * + * Return: void + */ +void scm_11d_decide_country_code(struct wlan_objmgr_vdev *vdev); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_bss_score.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_bss_score.c new file mode 100644 index 0000000000000000000000000000000000000000..22802dca3b45e1dd899b88f700ef2f359b99aabe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_bss_score.c @@ -0,0 +1,914 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: contains scan bss scoring logic + */ + +#include +#include "wlan_scan_main.h" +#include "wlan_scan_cache_db_i.h" +#ifdef WLAN_POLICY_MGR_ENABLE +#include "wlan_policy_mgr_api.h" +#endif +#include "wlan_reg_services_api.h" +#include "wlan_crypto_global_api.h" + +#define SCM_20MHZ_BW_INDEX 0 +#define SCM_40MHZ_BW_INDEX 1 +#define SCM_80MHZ_BW_INDEX 2 +#define SCM_160MHZ_BW_INDEX 3 +#define SCM_MAX_BW_INDEX 4 + +#define SCM_NSS_1x1_INDEX 0 +#define SCM_NSS_2x2_INDEX 1 +#define SCM_NSS_3x3_INDEX 2 +#define SCM_NSS_4x4_INDEX 3 +#define SCM_MAX_NSS_INDEX 4 + +#define SCM_BAND_2G_INDEX 0 +#define SCM_BAND_5G_INDEX 1 +/* 2 and 3 are reserved */ +#define SCM_MAX_BAND_INDEX 4 + +#define SCM_SCORE_INDEX_0 0 +#define SCM_SCORE_INDEX_3 3 +#define SCM_SCORE_INDEX_7 7 +#define SCM_SCORE_OFFSET_INDEX_7_4 4 +#define SCM_SCORE_INDEX_11 11 +#define SCM_SCORE_OFFSET_INDEX_11_8 8 +#define SCM_SCORE_MAX_INDEX 15 +#define SCM_SCORE_OFFSET_INDEX_15_12 12 + +#define SCM_MAX_OCE_WAN_DL_CAP 16 + +#define SCM_MAX_CHANNEL_WEIGHT 100 +#define SCM_MAX_CHANNEL_UTILIZATION 100 +#define SCM_MAX_ESTIMATED_AIR_TIME_FRACTION 255 +#define MAX_AP_LOAD 255 + +#define SCM_MAX_WEIGHT_OF_PCL_CHANNELS 255 +#define SCM_PCL_GROUPS_WEIGHT_DIFFERENCE 20 + +/* Congestion threshold (channel load %) to consider band and OCE WAN score */ +#define CONGESTION_THRSHOLD_FOR_BAND_OCE_SCORE 75 + +bool scm_is_better_bss(struct scan_default_params *params, + struct scan_cache_entry *bss1, + struct scan_cache_entry *bss2) +{ + if (bss1->bss_score > bss2->bss_score) + return true; + else if (bss1->bss_score == bss2->bss_score) + if (bss1->rssi_raw > bss2->rssi_raw) + return true; + + return false; +} + +/** + * scm_limit_max_per_index_score() -check if per index score does not exceed + * 100% (0x64). If it exceed make it 100% + * + * @per_index_score: per_index_score as input + * + * Return: per_index_score within the max limit + */ +static uint32_t scm_limit_max_per_index_score(uint32_t per_index_score) +{ + uint8_t i, score; + + for (i = 0; i < MAX_INDEX_PER_INI; i++) { + score = WLAN_GET_SCORE_PERCENTAGE(per_index_score, i); + if (score > MAX_INDEX_SCORE) + WLAN_SET_SCORE_PERCENTAGE(per_index_score, + MAX_INDEX_SCORE, i); + } + + return per_index_score; +} + +void scm_validate_scoring_config(struct scoring_config *score_cfg) +{ + int total_weight; + + total_weight = score_cfg->weight_cfg.rssi_weightage + + score_cfg->weight_cfg.ht_caps_weightage + + score_cfg->weight_cfg.vht_caps_weightage + + score_cfg->weight_cfg.chan_width_weightage + + score_cfg->weight_cfg.chan_band_weightage + + score_cfg->weight_cfg.nss_weightage + + score_cfg->weight_cfg.beamforming_cap_weightage + + score_cfg->weight_cfg.pcl_weightage + + score_cfg->weight_cfg.channel_congestion_weightage + + score_cfg->weight_cfg.oce_wan_weightage; + + if (total_weight > MAX_BSS_SCORE) { + + scm_err("total weight is greater than %d fallback to default values", + MAX_BSS_SCORE); + + score_cfg->weight_cfg.rssi_weightage = RSSI_WEIGHTAGE; + score_cfg->weight_cfg.ht_caps_weightage = + HT_CAPABILITY_WEIGHTAGE; + score_cfg->weight_cfg.vht_caps_weightage = VHT_CAP_WEIGHTAGE; + score_cfg->weight_cfg.chan_width_weightage = + CHAN_WIDTH_WEIGHTAGE; + score_cfg->weight_cfg.chan_band_weightage = + CHAN_BAND_WEIGHTAGE; + score_cfg->weight_cfg.nss_weightage = NSS_WEIGHTAGE; + score_cfg->weight_cfg.beamforming_cap_weightage = + BEAMFORMING_CAP_WEIGHTAGE; + score_cfg->weight_cfg.pcl_weightage = PCL_WEIGHT; + score_cfg->weight_cfg.channel_congestion_weightage = + CHANNEL_CONGESTION_WEIGHTAGE; + score_cfg->weight_cfg.oce_wan_weightage = OCE_WAN_WEIGHTAGE; + } + + score_cfg->bandwidth_weight_per_index = + scm_limit_max_per_index_score( + score_cfg->bandwidth_weight_per_index); + score_cfg->nss_weight_per_index = + scm_limit_max_per_index_score(score_cfg->nss_weight_per_index); + score_cfg->band_weight_per_index = + scm_limit_max_per_index_score(score_cfg->band_weight_per_index); + + + score_cfg->esp_qbss_scoring.score_pcnt3_to_0 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt3_to_0); + score_cfg->esp_qbss_scoring.score_pcnt7_to_4 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt7_to_4); + score_cfg->esp_qbss_scoring.score_pcnt11_to_8 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt11_to_8); + score_cfg->esp_qbss_scoring.score_pcnt15_to_12 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt15_to_12); + + score_cfg->oce_wan_scoring.score_pcnt3_to_0 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt3_to_0); + score_cfg->oce_wan_scoring.score_pcnt7_to_4 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt7_to_4); + score_cfg->oce_wan_scoring.score_pcnt11_to_8 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt11_to_8); + score_cfg->oce_wan_scoring.score_pcnt15_to_12 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt15_to_12); + +} + +/** + * scm_get_rssi_pcnt_for_slot () - calculate rssi % score based on the slot + * index between the high rssi and low rssi threshold + * @high_rssi_threshold: High rssi of the window + * @low_rssi_threshold: low rssi of the window + * @high_rssi_pcnt: % score for the high rssi + * @low_rssi_pcnt: %score for the low rssi + * @bucket_size: bucket size of the window + * @bss_rssi: Input rssi for which value need to be calculated + * + * Return : rssi pct to use for the given rssi + */ +static inline +int8_t scm_get_rssi_pcnt_for_slot(int32_t high_rssi_threshold, + int32_t low_rssi_threshold, uint32_t high_rssi_pcnt, + uint32_t low_rssi_pcnt, uint32_t bucket_size, int8_t bss_rssi) +{ + int8_t slot_index, slot_size, rssi_diff, num_slot, rssi_pcnt; + + num_slot = ((high_rssi_threshold - + low_rssi_threshold) / bucket_size) + 1; + slot_size = ((high_rssi_pcnt - low_rssi_pcnt) + + (num_slot / 2)) / (num_slot); + rssi_diff = high_rssi_threshold - bss_rssi; + slot_index = (rssi_diff / bucket_size) + 1; + rssi_pcnt = high_rssi_pcnt - (slot_size * slot_index); + if (rssi_pcnt < low_rssi_pcnt) + rssi_pcnt = low_rssi_pcnt; + + scm_debug("Window %d -> %d pcnt range %d -> %d bucket_size %d bss_rssi %d num_slot %d slot_size %d rssi_diff %d slot_index %d rssi_pcnt %d", + high_rssi_threshold, low_rssi_threshold, high_rssi_pcnt, + low_rssi_pcnt, bucket_size, bss_rssi, num_slot, slot_size, + rssi_diff, slot_index, rssi_pcnt); + + return rssi_pcnt; +} + +/** + * scm_calculate_rssi_score () - Calculate RSSI score based on AP RSSI + * @score_param: rssi score params + * @rssi: rssi of the AP + * @rssi_weightage: rssi_weightage out of total weightage + * + * Return : rssi score + */ +static int32_t scm_calculate_rssi_score( + struct rssi_cfg_score *score_param, + int32_t rssi, uint8_t rssi_weightage) +{ + int8_t rssi_pcnt; + int32_t total_rssi_score; + int32_t best_rssi_threshold; + int32_t good_rssi_threshold; + int32_t bad_rssi_threshold; + uint32_t good_rssi_pcnt; + uint32_t bad_rssi_pcnt; + uint32_t good_bucket_size; + uint32_t bad_bucket_size; + + best_rssi_threshold = score_param->best_rssi_threshold*(-1); + good_rssi_threshold = score_param->good_rssi_threshold*(-1); + bad_rssi_threshold = score_param->bad_rssi_threshold*(-1); + good_rssi_pcnt = score_param->good_rssi_pcnt; + bad_rssi_pcnt = score_param->bad_rssi_pcnt; + good_bucket_size = score_param->good_rssi_bucket_size; + bad_bucket_size = score_param->bad_rssi_bucket_size; + + total_rssi_score = (BEST_CANDIDATE_MAX_WEIGHT * rssi_weightage); + + /* + * If RSSI is better than the best rssi threshold then it return full + * score. + */ + if (rssi > best_rssi_threshold) + return total_rssi_score; + /* + * If RSSI is less or equal to bad rssi threshold then it return + * least score. + */ + if (rssi <= bad_rssi_threshold) + return (total_rssi_score * bad_rssi_pcnt) / 100; + + /* RSSI lies between best to good rssi threshold */ + if (rssi > good_rssi_threshold) + rssi_pcnt = scm_get_rssi_pcnt_for_slot(best_rssi_threshold, + good_rssi_threshold, 100, good_rssi_pcnt, + good_bucket_size, rssi); + else + rssi_pcnt = scm_get_rssi_pcnt_for_slot(good_rssi_threshold, + bad_rssi_threshold, good_rssi_pcnt, + bad_rssi_pcnt, bad_bucket_size, + rssi); + + return (total_rssi_score * rssi_pcnt) / 100; + +} + +/** + * scm_calculate_pcl_score () - Calculate PCL score based on PCL weightage + * @pcl_chan_weight: pcl weight of BSS channel + * @pcl_weightage: PCL _weightage out of total weightage + * + * Return : pcl score + */ +static int32_t scm_calculate_pcl_score(int pcl_chan_weight, + uint8_t pcl_weightage) +{ + int32_t pcl_score = 0; + int32_t temp_pcl_chan_weight = 0; + + if (pcl_chan_weight) { + temp_pcl_chan_weight = + (SCM_MAX_WEIGHT_OF_PCL_CHANNELS - pcl_chan_weight); + temp_pcl_chan_weight = qdf_do_div(temp_pcl_chan_weight, + SCM_PCL_GROUPS_WEIGHT_DIFFERENCE); + pcl_score = pcl_weightage - temp_pcl_chan_weight; + if (pcl_score < 0) + pcl_score = 0; + } + return pcl_score * BEST_CANDIDATE_MAX_WEIGHT; + +} + +/** + * scm_rssi_is_same_bucket () - check if both rssi fall in same bucket + * @rssi_top_thresh: high rssi threshold of the the window + * @low_rssi_threshold: low rssi of the window + * @rssi_ref1: rssi ref one + * @rssi_ref2: rssi ref two + * @bucket_size: bucket size of the window + * + * Return : true if both fall in same window + */ +static inline bool scm_rssi_is_same_bucket(int8_t rssi_top_thresh, + int8_t rssi_ref1, int8_t rssi_ref2, int8_t bucket_size) +{ + int8_t rssi_diff1 = 0; + int8_t rssi_diff2 = 0; + + rssi_diff1 = rssi_top_thresh - rssi_ref1; + rssi_diff2 = rssi_top_thresh - rssi_ref2; + + return (rssi_diff1 / bucket_size) == (rssi_diff2 / bucket_size); +} + +/** + * scm_roam_calculate_prorated_pcnt_by_rssi () - Calculate prorated RSSI score + * based on AP RSSI. This will be used to determine HT VHT score + * @score_param: rssi score params + * @rssi: bss rssi + * @rssi_weightage: rssi_weightage out of total weightage + * + * If rssi is greater than good threshold return 100, if less than bad return 0, + * if between good and bad, return prorated rssi score for the index. + * + * Return : rssi prorated score + */ +static int8_t scm_roam_calculate_prorated_pcnt_by_rssi( + struct rssi_cfg_score *score_param, + int32_t rssi, uint8_t rssi_weightage) +{ + int32_t good_rssi_threshold; + int32_t bad_rssi_threshold; + int8_t rssi_pref_5g_rssi_thresh; + bool same_bucket; + + good_rssi_threshold = score_param->good_rssi_threshold * (-1); + bad_rssi_threshold = score_param->bad_rssi_threshold * (-1); + rssi_pref_5g_rssi_thresh = score_param->rssi_pref_5g_rssi_thresh * (-1); + + /* If RSSI is greater than good rssi return full weight */ + if (rssi > good_rssi_threshold) + return BEST_CANDIDATE_MAX_WEIGHT; + + same_bucket = scm_rssi_is_same_bucket(good_rssi_threshold, + rssi, rssi_pref_5g_rssi_thresh, + score_param->bad_rssi_bucket_size); + if (same_bucket || (rssi < rssi_pref_5g_rssi_thresh)) + return 0; + /* If RSSI is less or equal to bad rssi threshold then it return 0 */ + if (rssi <= bad_rssi_threshold) + return 0; + + /* If RSSI is between good and bad threshold */ + return scm_get_rssi_pcnt_for_slot(good_rssi_threshold, + bad_rssi_threshold, + score_param->good_rssi_pcnt, + score_param->bad_rssi_pcnt, + score_param->bad_rssi_bucket_size, + rssi); +} + +/** + * scm_calculate_bandwidth_score () - Calculate BW score + * @entry: scan entry + * @score_config: scoring config + * @prorated_pct: prorated % to return dependent on RSSI + * + * Return : bw score + */ +static int32_t scm_calculate_bandwidth_score( + struct scan_cache_entry *entry, + struct scoring_config *score_config, uint8_t prorated_pct) +{ + uint32_t score; + int32_t bw_weight_per_idx; + uint8_t cbmode = 0; + uint8_t ch_width_index; + bool is_vht = false; + + bw_weight_per_idx = score_config->bandwidth_weight_per_index; + + if (WLAN_REG_IS_24GHZ_CH_FREQ(entry->channel.chan_freq)) { + cbmode = score_config->cb_mode_24G; + if (score_config->vht_24G_cap) + is_vht = true; + } else if (score_config->vht_cap) { + is_vht = true; + cbmode = score_config->cb_mode_5G; + } + + if (IS_WLAN_PHYMODE_160MHZ(entry->phy_mode)) + ch_width_index = SCM_160MHZ_BW_INDEX; + else if (IS_WLAN_PHYMODE_80MHZ(entry->phy_mode)) + ch_width_index = SCM_80MHZ_BW_INDEX; + else if (IS_WLAN_PHYMODE_40MHZ(entry->phy_mode)) + ch_width_index = SCM_40MHZ_BW_INDEX; + else + ch_width_index = SCM_20MHZ_BW_INDEX; + + + if (!score_config->ht_cap && ch_width_index > SCM_20MHZ_BW_INDEX) + ch_width_index = SCM_20MHZ_BW_INDEX; + + if (!is_vht && ch_width_index > SCM_40MHZ_BW_INDEX) + ch_width_index = SCM_40MHZ_BW_INDEX; + + if (cbmode && ch_width_index > SCM_20MHZ_BW_INDEX) + score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx, + ch_width_index); + else + score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx, + SCM_20MHZ_BW_INDEX); + + return (prorated_pct * score * + score_config->weight_cfg.chan_width_weightage) / + BEST_CANDIDATE_MAX_WEIGHT; +} + +/** + * scm_get_score_for_index () - get score for the given index + * @index: index for which we need the score + * @weightage: weigtage for the param + * @score: per slot score + * + * Return : score for the index + */ +static int32_t scm_get_score_for_index(uint8_t index, + uint8_t weightage, struct per_slot_scoring *score) +{ + if (index <= SCM_SCORE_INDEX_3) + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt3_to_0, + index); + else if (index <= SCM_SCORE_INDEX_7) + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt7_to_4, + index - SCM_SCORE_OFFSET_INDEX_7_4); + else if (index <= SCM_SCORE_INDEX_11) + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt11_to_8, + index - SCM_SCORE_OFFSET_INDEX_11_8); + else + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt15_to_12, + index - SCM_SCORE_OFFSET_INDEX_15_12); +} + +/** + * scm_get_congestion_pct () - Calculate congestion pct from esp/qbss load + * @entry: bss information + * + * Return : congestion pct + */ +static int32_t scm_get_congestion_pct(struct scan_cache_entry *entry) +{ + uint32_t ap_load = 0; + uint32_t est_air_time_percentage = 0; + uint32_t congestion = 0; + + if (entry->air_time_fraction) { + /* Convert 0-255 range to percentage */ + est_air_time_percentage = entry->air_time_fraction * + SCM_MAX_CHANNEL_WEIGHT; + est_air_time_percentage = qdf_do_div(est_air_time_percentage, + SCM_MAX_ESTIMATED_AIR_TIME_FRACTION); + /* + * Calculate channel congestion from estimated air time + * fraction. + */ + congestion = SCM_MAX_CHANNEL_UTILIZATION - + est_air_time_percentage; + } else if (entry->qbss_chan_load) { + ap_load = (entry->qbss_chan_load * BEST_CANDIDATE_MAX_WEIGHT); + /* + * Calculate ap_load in % from qbss channel load from + * 0-255 range + */ + congestion = qdf_do_div(ap_load, MAX_AP_LOAD); + } + + return congestion; +} + +/** + * scm_calculate_congestion_score () - Calculate congestion score + * @entry: bss information + * @score_params: bss score params + * @congestion_pct: congestion pct + * + * Return : congestion score + */ +static int32_t scm_calculate_congestion_score( + struct scan_cache_entry *entry, + struct scoring_config *score_params, + uint32_t *congestion_pct) +{ + uint32_t window_size; + uint8_t index; + int32_t good_rssi_threshold; + + *congestion_pct = scm_get_congestion_pct(entry); + + if (!score_params->esp_qbss_scoring.num_slot) + return 0; + + if (score_params->esp_qbss_scoring.num_slot > + SCM_SCORE_MAX_INDEX) + score_params->esp_qbss_scoring.num_slot = + SCM_SCORE_MAX_INDEX; + + good_rssi_threshold = + score_params->rssi_score.good_rssi_threshold * (-1); + + /* For bad zone rssi get score from last index */ + if (entry->rssi_raw <= good_rssi_threshold) + return scm_get_score_for_index( + score_params->esp_qbss_scoring.num_slot, + score_params->weight_cfg. + channel_congestion_weightage, + &score_params->esp_qbss_scoring); + + if (!*congestion_pct) + return score_params->weight_cfg.channel_congestion_weightage * + WLAN_GET_SCORE_PERCENTAGE( + score_params->esp_qbss_scoring.score_pcnt3_to_0, + SCM_SCORE_INDEX_0); + + window_size = BEST_CANDIDATE_MAX_WEIGHT / + score_params->esp_qbss_scoring.num_slot; + + /* Desired values are from 1 to 15, as 0 is for not present. so do +1 */ + index = qdf_do_div(*congestion_pct, window_size) + 1; + + if (index > score_params->esp_qbss_scoring.num_slot) + index = score_params->esp_qbss_scoring.num_slot; + + return scm_get_score_for_index(index, score_params->weight_cfg. + channel_congestion_weightage, + &score_params->esp_qbss_scoring); +} + +/** + * scm_calculate_nss_score () - Calculate congestion score + * @psoc: psoc ptr + * @score_config: scoring config + * @ap_nss: ap nss + * @prorated_pct: prorated % to return dependent on RSSI + * + * Return : nss score + */ +static int32_t scm_calculate_nss_score(struct wlan_objmgr_psoc *psoc, + struct scoring_config *score_config, uint8_t ap_nss, + uint8_t prorated_pct, uint32_t sta_nss) +{ + uint8_t nss; + uint8_t score_pct; + + nss = ap_nss; + if (sta_nss < nss) + nss = sta_nss; + + if (nss == 4) + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_4x4_INDEX); + else if (nss == 3) + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_3x3_INDEX); + else if (nss == 2) + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_2x2_INDEX); + else + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_1x1_INDEX); + + return (score_config->weight_cfg.nss_weightage * score_pct * + prorated_pct) / BEST_CANDIDATE_MAX_WEIGHT; +} + +/** + * scm_calculate_oce_wan_score () - Calculate oce wan score + * @entry: bss information + * @score_params: bss score params + * + * Return : oce wan score + */ +static int32_t scm_calculate_oce_wan_score( + struct scan_cache_entry *entry, + struct scoring_config *score_params) +{ + uint32_t window_size; + uint8_t index; + struct oce_reduced_wan_metrics wan_metrics; + uint8_t *mbo_oce_ie; + + if (!score_params->oce_wan_scoring.num_slot) + return 0; + + if (score_params->oce_wan_scoring.num_slot > + SCM_SCORE_MAX_INDEX) + score_params->oce_wan_scoring.num_slot = + SCM_SCORE_MAX_INDEX; + + window_size = SCM_SCORE_MAX_INDEX/ + score_params->oce_wan_scoring.num_slot; + mbo_oce_ie = util_scan_entry_mbo_oce(entry); + if (wlan_parse_oce_reduced_wan_metrics_ie(mbo_oce_ie, + &wan_metrics)) { + scm_err("downlink_av_cap %d", wan_metrics.downlink_av_cap); + /* if capacity is 0 return 0 score */ + if (!wan_metrics.downlink_av_cap) + return 0; + /* Desired values are from 1 to WLAN_SCORE_MAX_INDEX */ + index = qdf_do_div(wan_metrics.downlink_av_cap, + window_size); + } else { + index = SCM_SCORE_INDEX_0; + } + + if (index > score_params->oce_wan_scoring.num_slot) + index = score_params->oce_wan_scoring.num_slot; + + return scm_get_score_for_index(index, + score_params->weight_cfg.oce_wan_weightage, + &score_params->oce_wan_scoring); +} + +#ifdef WLAN_POLICY_MGR_ENABLE + +static uint32_t scm_get_sta_nss(struct wlan_objmgr_psoc *psoc, + uint8_t bss_channel, + uint8_t vdev_nss_2g, + uint8_t vdev_nss_5g) +{ + /* + * If station support nss as 2*2 but AP support NSS as 1*1, + * this AP will be given half weight compare to AP which are having + * NSS as 2*2. + */ + + if (policy_mgr_is_chnl_in_diff_band( + psoc, wlan_chan_to_freq(bss_channel)) && + policy_mgr_is_hw_dbs_capable(psoc) && + !(policy_mgr_is_hw_dbs_2x2_capable(psoc))) + return 1; + + return (WLAN_REG_IS_24GHZ_CH(bss_channel) ? + vdev_nss_2g : + vdev_nss_5g); +} +#else +static uint32_t scm_get_sta_nss(struct wlan_objmgr_psoc *psoc, + uint8_t bss_channel, + uint8_t vdev_nss_2g, + uint8_t vdev_nss_5g) +{ + return (WLAN_REG_IS_24GHZ_CH(bss_channel) ? + vdev_nss_2g : + vdev_nss_5g); +} +#endif + +/** + * scm_calculate_sae_pk_ap_weightage() - Calculate SAE-PK AP weightage + * @entry: bss entry + * @score_params: bss score params + * @sae_pk_cap_present: sae_pk cap presetn in RSNXE capability field + * + * Return: SAE-PK AP weightage score + */ +static uint32_t +scm_calculate_sae_pk_ap_weightage(struct scan_cache_entry *entry, + struct scoring_config *score_params, + bool *sae_pk_cap_present) +{ + uint8_t *rsnxe_ie, *rsnxe_cap, cap_len; + + rsnxe_ie = util_scan_entry_rsnxe(entry); + + rsnxe_cap = wlan_crypto_parse_rsnxe_ie(rsnxe_ie, &cap_len); + + if (!rsnxe_cap) + return 0; + + *sae_pk_cap_present = *rsnxe_cap & WLAN_CRYPTO_RSNX_CAP_SAE_PK; + if (*sae_pk_cap_present) + return score_params->weight_cfg.sae_pk_ap_weightage * + MAX_INDEX_SCORE; + + return 0; +} + +int scm_calculate_bss_score(struct wlan_objmgr_psoc *psoc, + struct scan_default_params *params, + struct scan_cache_entry *entry, + int pcl_chan_weight) +{ + int32_t score = 0; + int32_t rssi_score = 0; + int32_t pcl_score = 0; + int32_t ht_score = 0; + int32_t vht_score = 0; + int32_t he_score = 0; + int32_t bandwidth_score = 0; + int32_t beamformee_score = 0; + int32_t band_score = 0; + int32_t nss_score = 0; + int32_t congestion_score = 0; + int32_t congestion_pct = 0; + int32_t oce_wan_score = 0; + uint8_t prorated_pcnt; + bool is_vht = false; + int8_t good_rssi_threshold; + int8_t rssi_pref_5g_rssi_thresh; + bool same_bucket = false; + bool ap_su_beam_former = false; + uint32_t sae_pk_score = 0; + bool sae_pk_cap_present = 0; + struct wlan_ie_vhtcaps *vht_cap; + struct scoring_config *score_config; + struct weight_config *weight_config; + struct wlan_scan_obj *scan_obj; + uint32_t sta_nss; + struct wlan_objmgr_pdev *pdev = NULL; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return 0; + } + + score_config = &scan_obj->scan_def.score_config; + weight_config = &score_config->weight_cfg; + + rssi_score = scm_calculate_rssi_score(&score_config->rssi_score, + entry->rssi_raw, weight_config->rssi_weightage); + score += rssi_score; + + pcl_score = scm_calculate_pcl_score(pcl_chan_weight, + weight_config->pcl_weightage); + score += pcl_score; + + prorated_pcnt = scm_roam_calculate_prorated_pcnt_by_rssi( + &score_config->rssi_score, entry->rssi_raw, + weight_config->rssi_weightage); + /* If device and AP supports HT caps, extra 10% score will be added */ + if (score_config->ht_cap && entry->ie_list.htcap) + ht_score = prorated_pcnt * + weight_config->ht_caps_weightage; + score += ht_score; + + if (WLAN_REG_IS_24GHZ_CH_FREQ(entry->channel.chan_freq)) { + if (score_config->vht_24G_cap) + is_vht = true; + } else if (score_config->vht_cap) { + is_vht = true; + } + /* + * If device and AP supports VHT caps, Extra 6% score will + * be added to score + */ + if (is_vht && entry->ie_list.vhtcap) + vht_score = prorated_pcnt * + weight_config->vht_caps_weightage; + score += vht_score; + + if (score_config->he_cap && entry->ie_list.hecap) + he_score = prorated_pcnt * + weight_config->he_caps_weightage; + score += he_score; + + bandwidth_score = scm_calculate_bandwidth_score(entry, score_config, + prorated_pcnt); + score += bandwidth_score; + + good_rssi_threshold = + score_config->rssi_score.good_rssi_threshold * (-1); + rssi_pref_5g_rssi_thresh = + score_config->rssi_score.rssi_pref_5g_rssi_thresh * (-1); + if (entry->rssi_raw < good_rssi_threshold) + same_bucket = scm_rssi_is_same_bucket(good_rssi_threshold, + entry->rssi_raw, rssi_pref_5g_rssi_thresh, + score_config->rssi_score.bad_rssi_bucket_size); + + vht_cap = (struct wlan_ie_vhtcaps *) util_scan_entry_vhtcap(entry); + if (vht_cap && vht_cap->su_beam_former) + ap_su_beam_former = true; + if (is_vht && ap_su_beam_former && + (entry->rssi_raw > rssi_pref_5g_rssi_thresh) && !same_bucket) + beamformee_score = BEST_CANDIDATE_MAX_WEIGHT * + weight_config->beamforming_cap_weightage; + score += beamformee_score; + + congestion_score = scm_calculate_congestion_score(entry, score_config, + &congestion_pct); + score += congestion_score; + /* + * Consider OCE WAN score and band preference score only if + * congestion_pct is greater than CONGESTION_THRSHOLD_FOR_BAND_OCE_SCORE + */ + if (congestion_pct < CONGESTION_THRSHOLD_FOR_BAND_OCE_SCORE) { + /* + * If AP is on 5Ghz channel , extra weigtage is added to BSS + * score. if RSSI is greater tha 5g rssi threshold or fall in + * same bucket else give weigtage to 2.4 GH. + */ + if ((entry->rssi_raw > rssi_pref_5g_rssi_thresh) && + !same_bucket) { + if (WLAN_REG_IS_5GHZ_CH_FREQ(entry->channel.chan_freq)) + band_score = + weight_config->chan_band_weightage * + WLAN_GET_SCORE_PERCENTAGE( + score_config->band_weight_per_index, + SCM_BAND_5G_INDEX); + } else if (WLAN_REG_IS_24GHZ_CH_FREQ( + entry->channel.chan_freq)) { + band_score = weight_config->chan_band_weightage * + WLAN_GET_SCORE_PERCENTAGE( + score_config->band_weight_per_index, + SCM_BAND_2G_INDEX); + } + score += band_score; + + oce_wan_score = scm_calculate_oce_wan_score(entry, + score_config); + score += oce_wan_score; + } + + sae_pk_score = scm_calculate_sae_pk_ap_weightage(entry, score_config, + &sae_pk_cap_present); + score += sae_pk_score; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, entry->pdev_id, WLAN_SCAN_ID); + if (!pdev) { + scm_err("pdev is NULL"); + return 0; + } + + sta_nss = scm_get_sta_nss(psoc, + wlan_reg_freq_to_chan( + pdev, + entry->channel.chan_freq), + score_config->vdev_nss_24g, + score_config->vdev_nss_5g); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_SCAN_ID); + /* + * If station support nss as 2*2 but AP support NSS as 1*1, + * this AP will be given half weight compare to AP which are having + * NSS as 2*2. + */ + nss_score = scm_calculate_nss_score(psoc, score_config, entry->nss, + prorated_pcnt, sta_nss); + score += nss_score; + + scm_nofl_debug("Self: HT %d VHT %d HE %d VHT_24Ghz %d BF cap %d cb_mode_24g %d cb_mode_5G %d NSS %d", + score_config->ht_cap, score_config->vht_cap, + score_config->he_cap, score_config->vht_24G_cap, + score_config->beamformee_cap, score_config->cb_mode_24G, + score_config->cb_mode_5G, sta_nss); + + scm_nofl_debug("Candidate("QDF_MAC_ADDR_FMT" freq %d): rssi %d HT %d VHT %d HE %d su bfer %d phy %d air time frac %d qbss %d cong_pct %d NSS %d sae_pk_cap_present %d", + QDF_MAC_ADDR_REF(entry->bssid.bytes), + entry->channel.chan_freq, + entry->rssi_raw, util_scan_entry_htcap(entry) ? 1 : 0, + util_scan_entry_vhtcap(entry) ? 1 : 0, + util_scan_entry_hecap(entry) ? 1 : 0, ap_su_beam_former, + entry->phy_mode, entry->air_time_fraction, + entry->qbss_chan_load, congestion_pct, entry->nss, + sae_pk_cap_present); + + scm_nofl_debug("Scores: prorated_pcnt %d rssi %d pcl %d ht %d vht %d he %d bfee %d bw %d band %d congestion %d nss %d oce wan %d sae_pk %d TOTAL %d", + prorated_pcnt, rssi_score, pcl_score, ht_score, + vht_score, he_score, beamformee_score, bandwidth_score, + band_score, congestion_score, nss_score, oce_wan_score, + sae_pk_score, score); + + entry->bss_score = score; + return score; +} + +bool scm_get_pcl_weight_of_channel(uint32_t chan_freq, + struct scan_filter *filter, + int *pcl_chan_weight, + uint8_t *weight_list) +{ + int i; + bool found = false; + + if (!filter) + return found; + + for (i = 0; i < filter->num_of_pcl_channels; i++) { + if (filter->pcl_freq_list[i] == chan_freq) { + *pcl_chan_weight = filter->pcl_weight_list[i]; + found = true; + break; + } + } + return found; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.c new file mode 100644 index 0000000000000000000000000000000000000000..2c6b2a4ffc59366f886ef26a8e0e29307129138e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.c @@ -0,0 +1,1801 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan cache api and functionality + * The Scan entries are protected by scan_db_lock. Holding the lock + * for whole scan operation during get/flush scan results may take + * more than 5 ms and thus ref count is used along with scan_db_lock. + * Below are the operation on scan cache entry: + * - While adding new node to the entry scan_db_lock is taken and ref_cnt + * is initialized and incremented. Also the cookie will be set to valid value. + * - The ref count incremented during adding new node should be decremented only + * by a delete operation on the node. But there can be multiple concurrent + * delete operations on a node from different threads which may lead to ref + * count being decremented multiple time and freeing the node even if node + * is in use. So to maintain atomicity between multiple delete operations + * on a same node from different threads, a cookie is used to check if node is + * logically deleted or not. A delete operation will set the cookie to 0 + * making it invalid. So if the 2nd thread find the cookie as invalid it will + * not try to delete and decrement the ref count of the node again. + * - This Cookie is also used to check if node is valid while iterating through + * the scan cache to avoid duplicate entries. + * - Once ref_cnt become 0, i.e. it is logically deleted and no thread is using + * it the node is physically deleted from the scan cache. + * - While reading the node the ref_cnt should be incremented. Once reading + * operation is done ref_cnt is decremented. + */ +#include +#include +#include +#include +#include +#include +#include "wlan_scan_main.h" +#include "wlan_scan_cache_db_i.h" +#include "wlan_reg_services_api.h" +#include "wlan_reg_ucfg_api.h" +#include +#include + +#ifdef FEATURE_6G_SCAN_CHAN_SORT_ALGO + +struct channel_list_db *scm_get_rnr_channel_db(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj = NULL; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + + if (!scan_obj) + return NULL; + + return &scan_obj->rnr_channel_db; +} + +struct meta_rnr_channel *scm_get_chan_meta(struct wlan_objmgr_psoc *psoc, + uint32_t chan_freq) +{ + int i; + struct channel_list_db *rnr_channel_db; + + if (!psoc || !chan_freq || !wlan_reg_is_6ghz_chan_freq(chan_freq)) + return NULL; + + rnr_channel_db = scm_get_rnr_channel_db(psoc); + if (!rnr_channel_db) + return NULL; + + for (i = 0; i < QDF_ARRAY_SIZE(rnr_channel_db->channel); i++) + if (rnr_channel_db->channel[i].chan_freq == chan_freq) + return &rnr_channel_db->channel[i]; + + return NULL; +} + +static void scm_add_rnr_channel_db(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *entry) +{ + uint32_t chan_freq; + uint8_t is_6g_bss, i; + struct meta_rnr_channel *channel; + struct rnr_bss_info *rnr_bss; + struct scan_rnr_node *rnr_node; + + chan_freq = entry->channel.chan_freq; + is_6g_bss = wlan_reg_is_6ghz_chan_freq(chan_freq); + + /* Return if the BSS is not 6G and RNR IE is not present */ + if (!(is_6g_bss || entry->ie_list.rnrie)) + return; + + scm_debug("BSS freq %d BSSID: "QDF_MAC_ADDR_FMT, chan_freq, + QDF_MAC_ADDR_REF(entry->bssid.bytes)); + if (is_6g_bss) { + channel = scm_get_chan_meta(psoc, chan_freq); + if (!channel) { + scm_debug("Failed to get chan Meta freq %d", chan_freq); + return; + } + channel->bss_beacon_probe_count++; + channel->beacon_probe_last_time_found = entry->scan_entry_time; + } + + /* + * If scan entry got RNR IE then loop through all + * entries and increase the BSS count in respective channels + */ + if (!entry->ie_list.rnrie) + return; + + for (i = 0; i < MAX_RNR_BSS; i++) { + rnr_bss = &entry->rnr.bss_info[i]; + /* Skip if entry is not valid */ + if (!rnr_bss->channel_number) + continue; + chan_freq = wlan_reg_chan_opclass_to_freq(rnr_bss->channel_number, + rnr_bss->operating_class, + false); + channel = scm_get_chan_meta(psoc, chan_freq); + if (!channel) { + scm_debug("Failed to get chan Meta freq %d", chan_freq); + continue; + } + channel->bss_beacon_probe_count++; + /* Don't add RNR entry if list is full */ + if (qdf_list_size(&channel->rnr_list) >= WLAN_MAX_RNR_COUNT) { + scm_debug("List is full"); + return; + } + + rnr_node = qdf_mem_malloc(sizeof(struct scan_rnr_node)); + if (!rnr_node) + return; + rnr_node->entry.timestamp = entry->scan_entry_time; + if (!qdf_is_macaddr_zero(&rnr_bss->bssid)) + qdf_mem_copy(&rnr_node->entry.bssid, + &rnr_bss->bssid, + QDF_MAC_ADDR_SIZE); + if (rnr_bss->short_ssid) + rnr_node->entry.short_ssid = rnr_bss->short_ssid; + scm_debug("Add freq %d: "QDF_MAC_ADDR_FMT" short ssid %x", chan_freq, + QDF_MAC_ADDR_REF(rnr_bss->bssid.bytes), + rnr_bss->short_ssid); + qdf_list_insert_back(&channel->rnr_list, + &rnr_node->node); + } +} +#endif + +/** + * scm_del_scan_node() - API to remove scan node from the list + * @list: hash list + * @scan_node: node to be removed + * + * This should be called while holding scan_db_lock. + * + * Return: void + */ +static void scm_del_scan_node(qdf_list_t *list, + struct scan_cache_node *scan_node) +{ + QDF_STATUS status; + + status = qdf_list_remove_node(list, &scan_node->node); + if (QDF_IS_STATUS_SUCCESS(status)) { + util_scan_free_cache_entry(scan_node->entry); + qdf_mem_free(scan_node); + } +} + +/** + * scm_del_scan_node_from_db() - API to del the scan entry + * @scan_db: scan database + * @scan_entry:entry scan_node + * + * API to flush the scan entry. This should be called while + * holding scan_db_lock. + * + * Return: QDF status. + */ +static QDF_STATUS scm_del_scan_node_from_db(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t hash_idx; + + if (!scan_node) + return QDF_STATUS_E_INVAL; + + hash_idx = SCAN_GET_HASH(scan_node->entry->bssid.bytes); + scm_del_scan_node(&scan_db->scan_hash_tbl[hash_idx], scan_node); + scan_db->num_entries--; + + return status; +} + +/** + * scm_scan_entry_get_ref() - api to increase ref count of scan entry + * @scan_node: scan node + * + * Return: void + */ +static void scm_scan_entry_get_ref(struct scan_cache_node *scan_node) +{ + if (!scan_node) { + scm_err("scan_node is NULL"); + QDF_ASSERT(0); + return; + } + qdf_atomic_inc(&scan_node->ref_cnt); +} + +/** + * scm_scan_entry_put_ref() - Api to decrease ref count of scan entry + * and free if it become 0 + * @scan_db: scan database + * @scan_node: scan node + * @lock_needed: if scan_db_lock is needed + * + * Return: void + */ +static void scm_scan_entry_put_ref(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node, bool lock_needed) +{ + + if (!scan_node) { + scm_err("scan_node is NULL"); + QDF_ASSERT(0); + return; + } + + if (lock_needed) + qdf_spin_lock_bh(&scan_db->scan_db_lock); + + if (!qdf_atomic_read(&scan_node->ref_cnt)) { + if (lock_needed) + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + scm_err("scan_node ref cnt is 0"); + QDF_ASSERT(0); + return; + } + + /* Decrement ref count, free scan_node, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&scan_node->ref_cnt)) + scm_del_scan_node_from_db(scan_db, scan_node); + + if (lock_needed) + qdf_spin_unlock_bh(&scan_db->scan_db_lock); +} + +/** + * scm_scan_entry_del() - API to delete scan node + * @scan_db: data base + * @scan_node: node to be deleted + * + * Call must be protected by scan_db->scan_db_lock + * + * Return: void + */ + +static void scm_scan_entry_del(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node) +{ + if (!scan_node) { + scm_err("scan node is NULL"); + QDF_ASSERT(0); + return; + } + + if (scan_node->cookie != SCAN_NODE_ACTIVE_COOKIE) { + scm_debug("node is already deleted"); + return; + } + /* Seems node is already deleted */ + if (!qdf_atomic_read(&scan_node->ref_cnt)) { + scm_debug("node is already deleted ref 0"); + return; + } + scan_node->cookie = 0; + scm_scan_entry_put_ref(scan_db, scan_node, false); +} + +/** + * scm_add_scan_node() - API to add scan node + * @scan_db: data base + * @scan_node: node to be added + * @dup_node: node before which new node to be added + * if it's not NULL, otherwise add node to tail + * + * Call must be protected by scan_db->scan_db_lock + * + * Return: void + */ +static void scm_add_scan_node(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node, + struct scan_cache_node *dup_node) +{ + uint8_t hash_idx; + + hash_idx = + SCAN_GET_HASH(scan_node->entry->bssid.bytes); + + qdf_atomic_init(&scan_node->ref_cnt); + scan_node->cookie = SCAN_NODE_ACTIVE_COOKIE; + scm_scan_entry_get_ref(scan_node); + if (!dup_node) + qdf_list_insert_back(&scan_db->scan_hash_tbl[hash_idx], + &scan_node->node); + else + qdf_list_insert_before(&scan_db->scan_hash_tbl[hash_idx], + &scan_node->node, &dup_node->node); + + scan_db->num_entries++; +} + + +/** + * scm_get_next_valid_node() - API get the next valid scan node from + * the list + * @list: hash list + * @cur_node: current node pointer + * + * API to get next active node from the list. If cur_node is NULL + * it will return first node of the list. + * Call must be protected by scan_db->scan_db_lock + * + * Return: next scan node + */ +static qdf_list_node_t * +scm_get_next_valid_node(qdf_list_t *list, + qdf_list_node_t *cur_node) +{ + qdf_list_node_t *next_node = NULL; + qdf_list_node_t *temp_node = NULL; + struct scan_cache_node *scan_node; + + if (cur_node) + qdf_list_peek_next(list, cur_node, &next_node); + else + qdf_list_peek_front(list, &next_node); + + while (next_node) { + scan_node = qdf_container_of(next_node, + struct scan_cache_node, node); + if (scan_node->cookie == SCAN_NODE_ACTIVE_COOKIE) + return next_node; + /* + * If node is not valid check for next entry + * to get next valid node. + */ + qdf_list_peek_next(list, next_node, &temp_node); + next_node = temp_node; + temp_node = NULL; + } + + return next_node; +} + +/** + * scm_get_next_node() - API get the next scan node from + * the list + * @scan_db: scan data base + * @list: hash list + * @cur_node: current node pointer + * + * API get the next node from the list. If cur_node is NULL + * it will return first node of the list + * + * Return: next scan cache node + */ +static struct scan_cache_node * +scm_get_next_node(struct scan_dbs *scan_db, + qdf_list_t *list, struct scan_cache_node *cur_node) +{ + struct scan_cache_node *next_node = NULL; + qdf_list_node_t *next_list = NULL; + + qdf_spin_lock_bh(&scan_db->scan_db_lock); + if (cur_node) { + next_list = scm_get_next_valid_node(list, &cur_node->node); + /* Decrement the ref count of the previous node */ + scm_scan_entry_put_ref(scan_db, + cur_node, false); + } else { + next_list = scm_get_next_valid_node(list, NULL); + } + /* Increase the ref count of the obtained node */ + if (next_list) { + next_node = qdf_container_of(next_list, + struct scan_cache_node, node); + scm_scan_entry_get_ref(next_node); + } + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + + return next_node; +} + +/** + * scm_check_and_age_out() - check and age out the old entries + * @scan_db: scan db + * @scan_node: node to check for age out + * @scan_aging_time: scan cache aging time + * + * Return: void + */ +static void scm_check_and_age_out(struct scan_dbs *scan_db, + struct scan_cache_node *node, + qdf_time_t scan_aging_time) +{ + if (util_scan_entry_age(node->entry) >= + scan_aging_time) { + scm_debug("Aging out BSSID: "QDF_MAC_ADDR_FMT" with age %lu ms", + QDF_MAC_ADDR_REF(node->entry->bssid.bytes), + util_scan_entry_age(node->entry)); + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, node); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + } +} + +static bool scm_bss_is_connected(struct scan_cache_entry *entry) +{ + if (entry->mlme_info.assoc_state == SCAN_ENTRY_CON_STATE_ASSOC) + return true; + return false; +} +void scm_age_out_entries(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db) +{ + int i; + struct scan_cache_node *cur_node = NULL; + struct scan_cache_node *next_node = NULL; + struct scan_default_params *def_param; + + def_param = wlan_scan_psoc_get_def_params(psoc); + if (!def_param) { + scm_err("wlan_scan_psoc_get_def_params failed"); + return; + } + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + if (!scm_bss_is_connected(cur_node->entry)) + scm_check_and_age_out(scan_db, cur_node, + def_param->scan_cache_aging_time); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + next_node = NULL; + } + } +} + +/** + * scm_flush_oldest_entry() - Iterate over scan db and flust out the + * oldest entry + * @scan_db: scan db from which oldest entry needs to be flushed + * + * Return: QDF_STATUS + */ +static QDF_STATUS scm_flush_oldest_entry(struct scan_dbs *scan_db) +{ + int i; + struct scan_cache_node *oldest_node = NULL; + struct scan_cache_node *cur_node; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + /* Get the first valid node for the hash */ + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], + NULL); + /* Iterate scan db and flush out oldest node + * take ref_cnt for oldest_node + */ + + while (cur_node) { + if (!oldest_node || + (util_scan_entry_age(oldest_node->entry) < + util_scan_entry_age(cur_node->entry))) { + if (oldest_node) + scm_scan_entry_put_ref(scan_db, + oldest_node, + true); + oldest_node = cur_node; + scm_scan_entry_get_ref(oldest_node); + } + + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], + cur_node); + }; + } + + if (oldest_node) { + scm_debug("Flush oldest BSSID: "QDF_MAC_ADDR_FMT" with age %lu ms", + QDF_MAC_ADDR_REF(oldest_node->entry->bssid.bytes), + util_scan_entry_age(oldest_node->entry)); + /* Release ref_cnt taken for oldest_node and delete it */ + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, oldest_node); + scm_scan_entry_put_ref(scan_db, oldest_node, false); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * scm_update_alt_wcn_ie() - update the alternate WCN IE + * @from: copy from + * @dst: copy to + * + * Return: void + */ +static void scm_update_alt_wcn_ie(struct scan_cache_entry *from, + struct scan_cache_entry *dst) +{ + uint32_t alt_wcn_ie_len; + + if (from->frm_subtype == dst->frm_subtype) + return; + + if (!from->ie_list.wcn && !dst->ie_list.wcn) + return; + + /* Existing WCN IE is empty. */ + if (!from->ie_list.wcn) + return; + + alt_wcn_ie_len = 2 + from->ie_list.wcn[1]; + if (alt_wcn_ie_len > WLAN_MAX_IE_LEN + 2) { + scm_err("invalid IE len"); + return; + } + + if (!dst->alt_wcn_ie.ptr) { + /* allocate this additional buffer for alternate WCN IE */ + dst->alt_wcn_ie.ptr = + qdf_mem_malloc_atomic(WLAN_MAX_IE_LEN + 2); + if (!dst->alt_wcn_ie.ptr) { + scm_err("failed to allocate memory"); + return; + } + } + qdf_mem_copy(dst->alt_wcn_ie.ptr, + from->ie_list.wcn, alt_wcn_ie_len); + dst->alt_wcn_ie.len = alt_wcn_ie_len; +} + +/** + * scm_update_mlme_info() - update mlme info + * @src: source scan entry + * @dest: destination scan entry + * + * Return: void + */ +static inline void +scm_update_mlme_info(struct scan_cache_entry *src, + struct scan_cache_entry *dest) +{ + qdf_mem_copy(&dest->mlme_info, &src->mlme_info, + sizeof(struct mlme_info)); +} + +/** + * scm_copy_info_from_dup_entry() - copy duplicate node info + * to new scan entry + * @pdev: pdev ptr + * @scan_obj: scan obj ptr + * @scan_db: scan database + * @scan_params: new entry to be added + * @scan_node: duplicate entry + * + * Copy duplicate node info to new entry. + * + * Return: void + */ +static void +scm_copy_info_from_dup_entry(struct wlan_objmgr_pdev *pdev, + struct wlan_scan_obj *scan_obj, + struct scan_dbs *scan_db, + struct scan_cache_entry *scan_params, + struct scan_cache_node *scan_node) +{ + struct scan_cache_entry *scan_entry; + uint64_t time_gap; + + scan_entry = scan_node->entry; + + /* Update probe resp entry as well if AP is in hidden mode */ + if (scan_params->frm_subtype == MGMT_SUBTYPE_PROBE_RESP && + scan_entry->is_hidden_ssid) + scan_params->is_hidden_ssid = true; + + /* + * If AP changed its beacon from not having an SSID to showing it the + * kernel will drop the entry asumming that something is wrong with AP. + * This can result in connection failure while updating the bss during + * connection. So flush the hidden entry from kernel before indicating + * the new entry. + */ + if (scan_entry->is_hidden_ssid && + scan_params->frm_subtype == MGMT_SUBTYPE_BEACON && + !util_scan_is_null_ssid(&scan_params->ssid)) { + if (scan_obj->cb.unlink_bss) { + scm_debug("Hidden AP "QDF_MAC_ADDR_FMT" switch to non-hidden SSID, So unlink the entry", + QDF_MAC_ADDR_REF(scan_entry->bssid.bytes)); + scan_obj->cb.unlink_bss(pdev, scan_entry); + } + } + + /* If old entry have the ssid but new entry does not */ + if (util_scan_is_null_ssid(&scan_params->ssid) && + scan_entry->ssid.length) { + /* + * New entry has a hidden SSID and old one has the SSID. + * Add the entry by using the ssid of the old entry + * only if diff of saved SSID time and current time is + * less than HIDDEN_SSID_TIME time. + * This will avoid issues in case AP changes its SSID + * while remain hidden. + */ + time_gap = + qdf_mc_timer_get_system_time() - + scan_entry->hidden_ssid_timestamp; + if (time_gap <= HIDDEN_SSID_TIME) { + scan_params->hidden_ssid_timestamp = + scan_entry->hidden_ssid_timestamp; + scan_params->ssid.length = + scan_entry->ssid.length; + qdf_mem_copy(scan_params->ssid.ssid, + scan_entry->ssid.ssid, + scan_entry->ssid.length); + } + } + + /* + * Due to Rx sensitivity issue, sometime beacons are seen on adjacent + * channel so workaround in software is needed. If DS params or HT info + * are present driver can get proper channel info from these IEs and set + * channel_mismatch so that the older RSSI values are used in new entry. + * + * For the cases where DS params and HT info is not present, driver + * needs to check below conditions to get proper channel and set + * channel_mismatch so that the older RSSI values are used in new entry: + * -- The old entry channel and new entry channel are not same + * -- RSSI is less than -80, this indicate that the signal has leaked + * in adjacent channel. + */ + if ((scan_params->frm_subtype == MGMT_SUBTYPE_BEACON) && + !util_scan_entry_htinfo(scan_params) && + !util_scan_entry_ds_param(scan_params) && + (scan_params->channel.chan_freq != scan_entry->channel.chan_freq) && + (scan_params->rssi_raw < ADJACENT_CHANNEL_RSSI_THRESHOLD)) { + scan_params->channel.chan_freq = scan_entry->channel.chan_freq; + scan_params->channel_mismatch = true; + } + + /* Use old value for rssi if beacon was heard on adjacent channel. */ + if (scan_params->channel_mismatch) { + scan_params->snr = scan_entry->snr; + scan_params->avg_snr = scan_entry->avg_snr; + scan_params->rssi_raw = scan_entry->rssi_raw; + scan_params->avg_rssi = scan_entry->avg_rssi; + scan_params->rssi_timestamp = + scan_entry->rssi_timestamp; + } else { + /* If elapsed time since last rssi and snr update for this + * entry is smaller than a thresold, calculate a + * running average of the RSSI and SNR values. + * Otherwise new frames RSSI and SNR are more representive + * of the signal strength. + */ + time_gap = + scan_params->scan_entry_time - + scan_entry->rssi_timestamp; + if (time_gap > WLAN_RSSI_AVERAGING_TIME) { + scan_params->avg_rssi = + WLAN_RSSI_IN(scan_params->rssi_raw); + scan_params->avg_snr = + WLAN_SNR_IN(scan_params->snr); + } + else { + /* Copy previous average rssi and snr to new entry */ + scan_params->avg_snr = scan_entry->avg_snr; + scan_params->avg_rssi = scan_entry->avg_rssi; + /* Average with previous samples */ + WLAN_RSSI_LPF(scan_params->avg_rssi, + scan_params->rssi_raw); + WLAN_SNR_LPF(scan_params->avg_snr, + scan_params->snr); + } + + scan_params->rssi_timestamp = scan_params->scan_entry_time; + } + + /* copy wsn ie from scan_entry to scan_params*/ + scm_update_alt_wcn_ie(scan_entry, scan_params); + + /* copy mlme info from scan_entry to scan_params*/ + scm_update_mlme_info(scan_entry, scan_params); +} + +/** + * scm_find_duplicate() - find duplicate entry, + * if present, add input scan entry before it and delete + * duplicate entry. otherwise add entry to tail + * @pdev: pdev ptr + * @scan_obj: scan obj ptr + * @scan_db: scan db + * @entry: input scan cache entry + * @dup_node: node before which new entry to be added + * + * ref_cnt is taken for dup_node, caller should release ref taken + * if returns true. + * + * Return: bool + */ +static bool +scm_find_duplicate(struct wlan_objmgr_pdev *pdev, + struct wlan_scan_obj *scan_obj, + struct scan_dbs *scan_db, + struct scan_cache_entry *entry, + struct scan_cache_node **dup_node) +{ + uint8_t hash_idx; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + hash_idx = SCAN_GET_HASH(entry->bssid.bytes); + + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], + NULL); + + while (cur_node) { + if (util_is_scan_entry_match(entry, + cur_node->entry)) { + scm_copy_info_from_dup_entry(pdev, scan_obj, scan_db, + entry, cur_node); + *dup_node = cur_node; + return true; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], cur_node); + cur_node = next_node; + next_node = NULL; + } + + return false; +} + +/** + * scm_add_update_entry() - add or update scan entry + * @psoc: psoc ptr + * @pdev: pdev pointer + * @scan_params: new received entry + * + * Return: QDF_STATUS + */ +static QDF_STATUS scm_add_update_entry(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, struct scan_cache_entry *scan_params) +{ + struct scan_cache_node *dup_node = NULL; + struct scan_cache_node *scan_node = NULL; + bool is_dup_found = false; + QDF_STATUS status; + struct scan_dbs *scan_db; + struct wlan_scan_obj *scan_obj; + uint8_t security_type; + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (scan_params->frm_subtype == + MGMT_SUBTYPE_PROBE_RESP && + !scan_params->ie_list.ssid) + scm_debug("Probe resp doesn't contain SSID"); + + + if (scan_params->ie_list.csa || + scan_params->ie_list.xcsa || + scan_params->ie_list.cswrp) + scm_debug("CSA IE present for BSSID: "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(scan_params->bssid.bytes)); + + is_dup_found = scm_find_duplicate(pdev, scan_obj, scan_db, scan_params, + &dup_node); + + security_type = scan_params->security_type; + scm_nofl_debug("Received %s: "QDF_MAC_ADDR_FMT" \"%.*s\" freq %d rssi %d tsf_delta %u seq %d snr %d phy %d hidden %d mismatch %d %s%s%s%s pdev %d boot_time %llu ns", + (scan_params->frm_subtype == MGMT_SUBTYPE_PROBE_RESP) ? + "prb rsp" : "bcn", + QDF_MAC_ADDR_REF(scan_params->bssid.bytes), + scan_params->ssid.length, scan_params->ssid.ssid, + scan_params->channel.chan_freq, scan_params->rssi_raw, + scan_params->tsf_delta, scan_params->seq_num, + scan_params->snr, scan_params->phy_mode, + scan_params->is_hidden_ssid, + scan_params->channel_mismatch, + security_type & SCAN_SECURITY_TYPE_WPA ? "[WPA]" : "", + security_type & SCAN_SECURITY_TYPE_RSN ? "[RSN]" : "", + security_type & SCAN_SECURITY_TYPE_WAPI ? "[WAPI]" : "", + security_type & SCAN_SECURITY_TYPE_WEP ? "[WEP]" : "", + wlan_objmgr_pdev_get_pdev_id(pdev), + scan_params->boottime_ns); + + if (scan_obj->cb.inform_beacon) + scan_obj->cb.inform_beacon(pdev, scan_params); + + if (scan_db->num_entries >= MAX_SCAN_CACHE_SIZE) { + status = scm_flush_oldest_entry(scan_db); + if (QDF_IS_STATUS_ERROR(status)) { + /* release ref taken for dup node */ + if (is_dup_found) + scm_scan_entry_put_ref(scan_db, dup_node, true); + return status; + } + } + + scan_node = qdf_mem_malloc(sizeof(*scan_node)); + if (!scan_node) { + /* release ref taken for dup node */ + if (is_dup_found) + scm_scan_entry_put_ref(scan_db, dup_node, true); + return QDF_STATUS_E_NOMEM; + } + + scan_node->entry = scan_params; + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_add_scan_node(scan_db, scan_node, dup_node); + + if (is_dup_found) { + /* release ref taken for dup node and delete it */ + scm_scan_entry_del(scan_db, dup_node); + scm_scan_entry_put_ref(scan_db, dup_node, false); + } + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS __scm_handle_bcn_probe(struct scan_bcn_probe_event *bcn) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev = NULL; + struct scan_cache_entry *scan_entry; + struct wlan_scan_obj *scan_obj; + qdf_list_t *scan_list = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t list_count, i; + qdf_list_node_t *next_node = NULL; + struct scan_cache_node *scan_node; + struct wlan_frame_hdr *hdr = NULL; + + if (!bcn) { + scm_err("bcn is NULL"); + return QDF_STATUS_E_INVAL; + } + if (!bcn->rx_data) { + scm_err("rx_data iS NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + if (!bcn->buf) { + scm_err("buf is NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + hdr = (struct wlan_frame_hdr *)qdf_nbuf_data(bcn->buf); + psoc = bcn->psoc; + pdev = wlan_objmgr_get_pdev_by_id(psoc, + bcn->rx_data->pdev_id, WLAN_SCAN_ID); + if (!pdev) { + scm_err("pdev is NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + if (qdf_nbuf_len(bcn->buf) <= + (sizeof(struct wlan_frame_hdr) + + offsetof(struct wlan_bcn_frame, ie))) { + scm_debug("invalid beacon/probe length"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + if (bcn->frm_type == MGMT_SUBTYPE_BEACON && + wlan_reg_is_dfs_for_freq(pdev, bcn->rx_data->chan_freq)) { + util_scan_add_hidden_ssid(pdev, bcn->buf); + } + + scan_list = + util_scan_unpack_beacon_frame(pdev, qdf_nbuf_data(bcn->buf), + qdf_nbuf_len(bcn->buf), bcn->frm_type, + bcn->rx_data); + if (!scan_list || qdf_list_empty(scan_list)) { + scm_debug("failed to unpack %d frame BSSID: "QDF_MAC_ADDR_FMT, + bcn->frm_type, QDF_MAC_ADDR_REF(hdr->i_addr3)); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + list_count = qdf_list_size(scan_list); + for (i = 0; i < list_count; i++) { + status = qdf_list_remove_front(scan_list, &next_node); + if (QDF_IS_STATUS_ERROR(status) || !next_node) { + scm_debug("list remove failure i:%d, lsize:%d, BSSID: "QDF_MAC_ADDR_FMT, + i, list_count, QDF_MAC_ADDR_REF(hdr->i_addr3)); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + scan_node = qdf_container_of(next_node, + struct scan_cache_node, node); + + scan_entry = scan_node->entry; + + if (scan_obj->drop_bcn_on_chan_mismatch && + scan_entry->channel_mismatch) { + scm_nofl_debug("Drop frame for chan mismatch "QDF_MAC_ADDR_FMT" Seq Num: %d freq %d RSSI %d", + QDF_MAC_ADDR_REF(scan_entry->bssid.bytes), + scan_entry->seq_num, + scan_entry->channel.chan_freq, + scan_entry->rssi_raw); + util_scan_free_cache_entry(scan_entry); + qdf_mem_free(scan_node); + continue; + } + /* Do not add invalid channel entry as kernel will reject it */ + if (scan_obj->drop_bcn_on_invalid_freq && + wlan_reg_is_disable_for_freq(pdev, + scan_entry->channel.chan_freq)) { + scm_nofl_debug("Drop frame for invalid freq %d: "QDF_MAC_ADDR_FMT" Seq Num: %d RSSI %d", + scan_entry->channel.chan_freq, + QDF_MAC_ADDR_REF(scan_entry->bssid.bytes), + scan_entry->seq_num, + scan_entry->rssi_raw); + util_scan_free_cache_entry(scan_entry); + qdf_mem_free(scan_node); + continue; + } + if (scan_obj->cb.update_beacon) + scan_obj->cb.update_beacon(pdev, scan_entry); + + if (wlan_reg_11d_enabled_on_host(psoc)) + scm_11d_handle_country_info(psoc, pdev, scan_entry); + + status = scm_add_update_entry(psoc, pdev, scan_entry); + if (QDF_IS_STATUS_ERROR(status)) { + scm_debug("failed to add entry for BSSID: "QDF_MAC_ADDR_FMT" Seq Num: %d", + QDF_MAC_ADDR_REF(scan_entry->bssid.bytes), + scan_entry->seq_num); + util_scan_free_cache_entry(scan_entry); + qdf_mem_free(scan_node); + continue; + } + + qdf_mem_free(scan_node); + } + +free_nbuf: + if (scan_list) + qdf_mem_free(scan_list); + if (bcn->psoc) + wlan_objmgr_psoc_release_ref(bcn->psoc, WLAN_SCAN_ID); + if (pdev) + wlan_objmgr_pdev_release_ref(pdev, WLAN_SCAN_ID); + if (bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn->buf) + qdf_nbuf_free(bcn->buf); + qdf_mem_free(bcn); + + return status; +} + +QDF_STATUS scm_handle_bcn_probe(struct scheduler_msg *msg) +{ + if (!msg) { + scm_err("msg is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + return __scm_handle_bcn_probe(msg->bodyptr); +} + +/** + * scm_list_insert_sorted() - add the entries in scan_list in sorted way + * @psoc: psoc ptr + * @filter: scan filter + * @scan_node: node entry to be inserted + * @scan_list: Temp scan list + * + * Add the entries in scan_list in sorted way considering + * cap_val and prefer val. The node is copy of original scan entry and + * thus no lock is required. + * + * Return: void + */ +static void scm_list_insert_sorted(struct wlan_objmgr_psoc *psoc, + struct scan_filter *filter, + struct scan_cache_node *scan_node, + qdf_list_t *scan_list) +{ + struct scan_cache_node *cur_node; + qdf_list_node_t *cur_lst = NULL, *next_lst = NULL; + struct scan_default_params *params; + int pcl_chan_weight = 0; + + params = wlan_scan_psoc_get_def_params(psoc); + if (!params) { + scm_err("wlan_scan_psoc_get_def_params failed"); + return; + } + if (filter->num_of_pcl_channels > 0 && + (scan_node->entry->rssi_raw > SCM_PCL_RSSI_THRESHOLD)) { + if (scm_get_pcl_weight_of_channel( + scan_node->entry->channel.chan_freq, + filter, &pcl_chan_weight, + filter->pcl_weight_list)) { + scm_debug("pcl freq %d pcl_chan_weight %d", + scan_node->entry->channel.chan_freq, + pcl_chan_weight); + } + } + if (params->is_bssid_hint_priority && + !qdf_mem_cmp(filter->bssid_hint.bytes, + scan_node->entry->bssid.bytes, + QDF_MAC_ADDR_SIZE)) + scan_node->entry->bss_score = BEST_CANDIDATE_MAX_BSS_SCORE; + else + scm_calculate_bss_score(psoc, params, + scan_node->entry, pcl_chan_weight); + + if (qdf_list_empty(scan_list)) { + qdf_list_insert_front(scan_list, &scan_node->node); + return; + } + + qdf_list_peek_front(scan_list, &cur_lst); + + while (cur_lst) { + cur_node = qdf_container_of(cur_lst, + struct scan_cache_node, node); + if (scm_is_better_bss(params, + scan_node->entry, cur_node->entry)) { + qdf_list_insert_before(scan_list, + &scan_node->node, + &cur_node->node); + break; + } + qdf_list_peek_next(scan_list, + cur_lst, &next_lst); + cur_lst = next_lst; + next_lst = NULL; + } + + if (!cur_lst) + qdf_list_insert_back(scan_list, + &scan_node->node); +} + +/** + * scm_scan_apply_filter_get_entry() - apply filter and get the + * scan entry + * @psoc: psoc pointer + * @db_entry: scan entry + * @filter: filter to be applied + * @scan_list: scan list to which entry is added + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_scan_apply_filter_get_entry(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *db_entry, + struct scan_filter *filter, + qdf_list_t *scan_list) +{ + struct scan_cache_node *scan_node = NULL; + struct security_info security = {0}; + bool match; + + if (!filter) + match = true; + else + match = scm_filter_match(psoc, db_entry, + filter, &security); + + if (!match) + return QDF_STATUS_SUCCESS; + + scan_node = qdf_mem_malloc_atomic(sizeof(*scan_node)); + if (!scan_node) + return QDF_STATUS_E_NOMEM; + + scan_node->entry = + util_scan_copy_cache_entry(db_entry); + + if (!scan_node->entry) { + qdf_mem_free(scan_node); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(&scan_node->entry->neg_sec_info, + &security, sizeof(scan_node->entry->neg_sec_info)); + + if (!filter || !filter->bss_scoring_required) + qdf_list_insert_front(scan_list, + &scan_node->node); + else + scm_list_insert_sorted(psoc, filter, scan_node, scan_list); + + return QDF_STATUS_SUCCESS; +} + +/** + * scm_get_results() - Iterate and get scan results + * @psoc: psoc ptr + * @scan_db: scan db + * @filter: filter to be applied + * @scan_list: scan list to which entry is added + * + * Return: void + */ +static void scm_get_results(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db, struct scan_filter *filter, + qdf_list_t *scan_list) +{ + int i, count; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + count = qdf_list_size(&scan_db->scan_hash_tbl[i]); + if (!count) + continue; + while (cur_node) { + scm_scan_apply_filter_get_entry(psoc, + cur_node->entry, filter, scan_list); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } +} + +QDF_STATUS scm_purge_scan_results(qdf_list_t *scan_list) +{ + QDF_STATUS status; + struct scan_cache_node *cur_node; + qdf_list_node_t *cur_lst = NULL, *next_lst = NULL; + + if (!scan_list) { + scm_err("scan_result is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = qdf_list_peek_front(scan_list, &cur_lst); + + while (cur_lst) { + qdf_list_peek_next( + scan_list, cur_lst, &next_lst); + cur_node = qdf_container_of(cur_lst, + struct scan_cache_node, node); + status = qdf_list_remove_node(scan_list, + cur_lst); + if (QDF_IS_STATUS_SUCCESS(status)) { + util_scan_free_cache_entry(cur_node->entry); + qdf_mem_free(cur_node); + } + cur_lst = next_lst; + next_lst = NULL; + } + + qdf_list_destroy(scan_list); + qdf_mem_free(scan_list); + + return status; +} + +qdf_list_t *scm_get_scan_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + qdf_list_t *tmp_list; + + if (!pdev) { + scm_err("pdev is NULL"); + return NULL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return NULL; + } + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return NULL; + } + + tmp_list = qdf_mem_malloc_atomic(sizeof(*tmp_list)); + if (!tmp_list) { + scm_err("failed tp allocate scan_result"); + return NULL; + } + qdf_list_create(tmp_list, + MAX_SCAN_CACHE_SIZE); + scm_age_out_entries(psoc, scan_db); + scm_get_results(psoc, scan_db, filter, tmp_list); + + return tmp_list; +} + +/** + * scm_iterate_db_and_call_func() - iterate and call the func + * @scan_db: scan db + * @func: func to be called + * @arg: func arg + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_iterate_db_and_call_func(struct scan_dbs *scan_db, + scan_iterator_func func, void *arg) +{ + int i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + if (!func) + return QDF_STATUS_E_INVAL; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + status = func(arg, cur_node->entry); + if (QDF_IS_STATUS_ERROR(status)) { + scm_scan_entry_put_ref(scan_db, + cur_node, true); + return status; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } + + return status; +} + +QDF_STATUS +scm_iterate_scan_db(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg) +{ + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + QDF_STATUS status; + + if (!func) { + scm_err("func is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!pdev) { + scm_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + scm_age_out_entries(psoc, scan_db); + status = scm_iterate_db_and_call_func(scan_db, func, arg); + + return status; +} + +/** + * scm_scan_apply_filter_flush_entry() -flush scan entries depending + * on filter + * @psoc: psoc ptr + * @scan_db: scan db + * @db_node: node on which filters are applied + * @filter: filter to be applied + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_scan_apply_filter_flush_entry(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db, + struct scan_cache_node *db_node, + struct scan_filter *filter) +{ + struct security_info security = {0}; + bool match; + + if (!filter) + match = true; + else + match = scm_filter_match(psoc, db_node->entry, + filter, &security); + + if (!match) + return QDF_STATUS_SUCCESS; + + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, db_node); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + + return QDF_STATUS_SUCCESS; +} + +/** + * scm_flush_scan_entries() - API to flush scan entries depending on filters + * @psoc: psoc ptr + * @scan_db: scan db + * @filter: filter + * + * Return: void + */ +static void scm_flush_scan_entries(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db, + struct scan_filter *filter) +{ + int i; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + scm_scan_apply_filter_flush_entry(psoc, scan_db, + cur_node, filter); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } +} + +QDF_STATUS scm_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) { + scm_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + scm_flush_scan_entries(psoc, scan_db, filter); + + return status; +} + +/** + * scm_filter_channels() - Remove entries not belonging to channel list + * @scan_db: scan db + * @db_node: node on which filters are applied + * @chan_freq_list: valid channel frequency (in MHz) list + * @num_chan: number of channels + * + * Return: QDF_STATUS + */ +static void scm_filter_channels(struct wlan_objmgr_pdev *pdev, + struct scan_dbs *scan_db, + struct scan_cache_node *db_node, + uint32_t *chan_freq_list, uint32_t num_chan) +{ + int i; + bool match = false; + + for (i = 0; i < num_chan; i++) { + if (chan_freq_list[i] == util_scan_entry_channel_frequency( + db_node->entry)) { + match = true; + break; + } + } + + if (!match) { + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, db_node); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + } +} + +void scm_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint32_t *chan_freq_list, uint32_t num_chan) +{ + int i; + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + scm_debug("num_chan = %d", num_chan); + + if (!pdev) { + scm_err("pdev is NULL"); + return; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return; + } + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return; + } + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + scm_filter_channels(pdev, scan_db, + cur_node, chan_freq_list, num_chan); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } +} + +QDF_STATUS scm_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan obj is NULL"); + return QDF_STATUS_E_INVAL; + } + switch (type) { + case SCAN_CB_TYPE_INFORM_BCN: + scan_obj->cb.inform_beacon = cb; + break; + case SCAN_CB_TYPE_UPDATE_BCN: + scan_obj->cb.update_beacon = cb; + break; + case SCAN_CB_TYPE_UNLINK_BSS: + scan_obj->cb.unlink_bss = cb; + break; + default: + scm_err("invalid cb type %d", type); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_db_init(struct wlan_objmgr_psoc *psoc) +{ + int i, j; + struct scan_dbs *scan_db; + + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Initialize the scan database per pdev */ + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + scan_db = wlan_pdevid_get_scan_db(psoc, i); + if (!scan_db) { + scm_err("scan_db is NULL %d", i); + continue; + } + scan_db->num_entries = 0; + qdf_spinlock_create(&scan_db->scan_db_lock); + for (j = 0; j < SCAN_HASH_SIZE; j++) + qdf_list_create(&scan_db->scan_hash_tbl[j], + MAX_SCAN_CACHE_SIZE); + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_db_deinit(struct wlan_objmgr_psoc *psoc) +{ + int i, j; + struct scan_dbs *scan_db; + + if (!psoc) { + scm_err("scan obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Initialize the scan database per pdev */ + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + scan_db = wlan_pdevid_get_scan_db(psoc, i); + if (!scan_db) { + scm_err("scan_db is NULL %d", i); + continue; + } + + scm_flush_scan_entries(psoc, scan_db, NULL); + for (j = 0; j < SCAN_HASH_SIZE; j++) + qdf_list_destroy(&scan_db->scan_hash_tbl[j]); + qdf_spinlock_destroy(&scan_db->scan_db_lock); + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_6G_SCAN_CHAN_SORT_ALGO +QDF_STATUS scm_channel_list_db_init(struct wlan_objmgr_psoc *psoc) +{ + uint32_t i, j; + uint32_t min_freq, max_freq; + struct channel_list_db *rnr_channel_db; + + min_freq = wlan_reg_min_6ghz_chan_freq(); + max_freq = wlan_reg_max_6ghz_chan_freq(); + + scm_info("min_freq %d max_freq %d", min_freq, max_freq); + i = min_freq; + rnr_channel_db = scm_get_rnr_channel_db(psoc); + if (!rnr_channel_db) + return QDF_STATUS_E_INVAL; + + for (j = 0; j < QDF_ARRAY_SIZE(rnr_channel_db->channel); j++) { + if (i >= min_freq && i <= max_freq) + rnr_channel_db->channel[j].chan_freq = i; + i += 20; + /* init list for all to avoid uninitialized list */ + qdf_list_create(&rnr_channel_db->channel[j].rnr_list, + WLAN_MAX_RNR_COUNT); + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_channel_list_db_deinit(struct wlan_objmgr_psoc *psoc) +{ + int i; + qdf_list_node_t *cur_node, *next_node; + struct meta_rnr_channel *channel; + struct scan_rnr_node *rnr_node; + struct channel_list_db *rnr_channel_db; + + rnr_channel_db = scm_get_rnr_channel_db(psoc); + if (!rnr_channel_db) + return QDF_STATUS_E_INVAL; + + for (i = 0; i < QDF_ARRAY_SIZE(rnr_channel_db->channel); i++) { + channel = &rnr_channel_db->channel[i]; + channel->chan_freq = 0; + channel->beacon_probe_last_time_found = 0; + channel->bss_beacon_probe_count = 0; + channel->saved_profile_count = 0; + cur_node = NULL; + qdf_list_peek_front(&channel->rnr_list, &cur_node); + while (cur_node) { + next_node = NULL; + qdf_list_peek_next(&channel->rnr_list, cur_node, + &next_node); + rnr_node = qdf_container_of(cur_node, + struct scan_rnr_node, + node); + qdf_list_remove_node(&channel->rnr_list, + &rnr_node->node); + qdf_mem_free(rnr_node); + cur_node = next_node; + next_node = NULL; + } + qdf_list_destroy(&channel->rnr_list); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_rnr_db_flush(struct wlan_objmgr_psoc *psoc) +{ + int i; + qdf_list_node_t *cur_node, *next_node; + struct meta_rnr_channel *channel; + struct scan_rnr_node *rnr_node; + struct channel_list_db *rnr_channel_db; + + rnr_channel_db = scm_get_rnr_channel_db(psoc); + if (!rnr_channel_db) + return QDF_STATUS_E_INVAL; + + for (i = 0; i < QDF_ARRAY_SIZE(rnr_channel_db->channel); i++) { + channel = &rnr_channel_db->channel[i]; + cur_node = NULL; + qdf_list_peek_front(&channel->rnr_list, &cur_node); + while (cur_node) { + next_node = NULL; + qdf_list_peek_next(&channel->rnr_list, cur_node, + &next_node); + rnr_node = qdf_container_of(cur_node, + struct scan_rnr_node, + node); + qdf_list_remove_node(&channel->rnr_list, + &rnr_node->node); + qdf_mem_free(rnr_node); + cur_node = next_node; + next_node = NULL; + } + /* Reset beacon info */ + channel->beacon_probe_last_time_found = 0; + channel->bss_beacon_probe_count = 0; + } + + return QDF_STATUS_SUCCESS; +} + +void scm_update_rnr_from_scan_cache(struct wlan_objmgr_pdev *pdev) +{ + uint8_t i; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + struct wlan_objmgr_psoc *psoc; + struct scan_cache_entry *entry; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return; + } + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + entry = cur_node->entry; + scm_add_rnr_channel_db(psoc, entry); + next_node = + scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], + cur_node); + cur_node = next_node; + next_node = NULL; + } + } +} +#endif + +QDF_STATUS scm_update_scan_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *entry) +{ + uint8_t hash_idx; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + hash_idx = SCAN_GET_HASH(entry->bssid.bytes); + + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], NULL); + + while (cur_node) { + if (util_is_scan_entry_match(entry, + cur_node->entry)) { + /* Acquire db lock to prevent simultaneous update */ + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_update_mlme_info(entry, cur_node->entry); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + scm_scan_entry_put_ref(scan_db, + cur_node, true); + return QDF_STATUS_SUCCESS; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], cur_node); + cur_node = next_node; + } + + return QDF_STATUS_E_INVAL; +} + +QDF_STATUS scm_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, struct mlme_info *mlme) +{ + uint8_t hash_idx; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + struct wlan_objmgr_psoc *psoc; + struct scan_cache_entry *entry; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + hash_idx = SCAN_GET_HASH(bss_info->bssid.bytes); + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], NULL); + while (cur_node) { + entry = cur_node->entry; + if (qdf_is_macaddr_equal(&bss_info->bssid, &entry->bssid) && + (util_is_ssid_match(&bss_info->ssid, &entry->ssid)) && + (bss_info->freq == entry->channel.chan_freq)) { + /* Acquire db lock to prevent simultaneous update */ + qdf_spin_lock_bh(&scan_db->scan_db_lock); + qdf_mem_copy(&entry->mlme_info, mlme, + sizeof(struct mlme_info)); + scm_scan_entry_put_ref(scan_db, + cur_node, false); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + return QDF_STATUS_SUCCESS; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], cur_node); + cur_node = next_node; + } + + return QDF_STATUS_E_INVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.h new file mode 100644 index 0000000000000000000000000000000000000000..54140085ee014699ed22f940735a63e76504dce0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan cache entry api + */ + +#ifndef _WLAN_SCAN_CACHE_DB_H_ +#define _WLAN_SCAN_CACHE_DB_H_ + +#include +#include +#include +#include +#include + +#define SCAN_HASH_SIZE 64 +#define SCAN_GET_HASH(addr) \ + (((const uint8_t *)(addr))[QDF_MAC_ADDR_SIZE - 1] % SCAN_HASH_SIZE) + +#define SCM_PCL_RSSI_THRESHOLD -75 +#define ADJACENT_CHANNEL_RSSI_THRESHOLD -80 + +/** + * struct scan_dbs - scan cache data base definition + * @num_entries: number of scan entries + * @scan_hash_tbl: link list of bssid hashed scan cache entries for a pdev + */ +struct scan_dbs { + uint32_t num_entries; + qdf_spinlock_t scan_db_lock; + qdf_list_t scan_hash_tbl[SCAN_HASH_SIZE]; +}; + +/** + * struct scan_bcn_probe_event - beacon/probe info + * @frm_type: frame type + * @rx_data: mgmt rx data + * @psoc: psoc pointer + * @buf: rx frame + */ +struct scan_bcn_probe_event { + uint32_t frm_type; + struct mgmt_rx_event_params *rx_data; + struct wlan_objmgr_psoc *psoc; + qdf_nbuf_t buf; +}; + +/** + * scm_handle_bcn_probe() - Process beacon and probe rsp + * @msg: schedular msg with bcn info; + * + * API to handle the beacon/probe resp. msg->bodyptr will be consumed and freed + * by this func + * + * Return: QDF status. + */ +QDF_STATUS scm_handle_bcn_probe(struct scheduler_msg *msg); + +/** + * __scm_handle_bcn_probe() - Process beacon and probe rsp + * @bcn: beacon info; + * + * API to handle the beacon/probe resp. bcn will be consumed and freed by this + * func + * + * Return: QDF status. + */ +QDF_STATUS __scm_handle_bcn_probe(struct scan_bcn_probe_event *bcn); + +/** + * scm_age_out_entries() - Age out entries older than aging time + * @psoc: psoc pointer + * @scan_db: scan database + * + * Return: void. + */ +void scm_age_out_entries(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db); + +/** + * scm_get_scan_result() - fetches scan result + * @pdev: pdev info + * @filter: Filters + * + * This function fetches scan result + * + * Return: scan list + */ +qdf_list_t *scm_get_scan_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * scm_purge_scan_results() - purge the scan list + * @scan_result: scan list to be purged + * + * This function purge the temp scan list + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_purge_scan_results(qdf_list_t *scan_result); + +/** + * scm_update_scan_mlme_info() - updates scan entry with mlme data + * @pdev: pdev object + * @scan_entry: source scan entry to read mlme info + * + * This function updates scan db with scan_entry->mlme_info + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_update_scan_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * scm_flush_results() - flush scan entries matching the filter + * @pdev: vdev object + * @filter: filter to flush the scan entries + * + * Flush scan entries matching the filter. + * + * Return: QDF status. + */ +QDF_STATUS scm_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * scm_filter_valid_channel() - The Public API to filter scan result + * based on valid channel list + * @pdev: pdev object + * @chan_freq_list: valid channel frequency (in MHz) list + * @num_chan: number of valid channels + * + * The Public API to to filter scan result + * based on valid channel list. + * + * Return: void. + */ +void scm_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint32_t *chan_freq_list, uint32_t num_chan); + +/** + * scm_iterate_scan_db() - function to iterate scan table + * @pdev: pdev object + * @func: iterator function pointer + * @arg: argument to be passed to func() + * + * API, this API iterates scan table and invokes func + * on each scan enetry by passing scan entry and arg. + * + * Return: QDF_STATUS + */ +QDF_STATUS +scm_iterate_scan_db(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg); + +/** + * scm_scan_register_bcn_cb() - API to register api to indicate bcn/probe + * as soon as they are received + * @pdev: psoc + * @cb: callback to be registered + * @type: Type of callback to be registered + * + * Return: enum scm_scan_status + */ +QDF_STATUS scm_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type); + +/** + * scm_db_init() - API to init scan db + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_db_init(struct wlan_objmgr_psoc *psoc); + +/** + * scm_db_deinit() - API to deinit scan db + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_db_deinit(struct wlan_objmgr_psoc *psoc); + +#ifdef FEATURE_6G_SCAN_CHAN_SORT_ALGO + +/** + * scm_get_rnr_channel_db() - API to get rnr db + * @psoc: psoc + * + * Return: rnr db + */ +struct channel_list_db *scm_get_rnr_channel_db(struct wlan_objmgr_psoc *psoc); + +/** + * scm_get_chan_meta() - API to return channel meta + * @psoc: psoc + * @freq: channel frequency + * + * Return: channel meta information + */ +struct meta_rnr_channel *scm_get_chan_meta(struct wlan_objmgr_psoc *psoc, + uint32_t chan_freq); + +/** + * scm_channel_list_db_init() - API to init scan list priority list db + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_channel_list_db_init(struct wlan_objmgr_psoc *psoc); + +/** + * scm_channel_list_db_deinit() - API to deinit scan list priority list db + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_channel_list_db_deinit(struct wlan_objmgr_psoc *psoc); + +/** + * scm_rnr_db_flush() - API to flush rnr entries + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_rnr_db_flush(struct wlan_objmgr_psoc *psoc); + +/** + * scm_update_rnr_from_scan_cache() - API to update rnr info from scan cache + * @pdev: pdev + * + * Return: void + */ +void scm_update_rnr_from_scan_cache(struct wlan_objmgr_pdev *pdev); + +#else +static inline QDF_STATUS scm_channel_list_db_init(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS scm_channel_list_db_deinit(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * scm_validate_scoring_config() - validate score config + * @score_cfg: config to be validated + * + * Return: void + */ +void scm_validate_scoring_config( + struct scoring_config *score_cfg); + +/** + * scm_scan_update_mlme_by_bssinfo() - updates scan entry with mlme data + * @pdev: pdev object + * @bss_info: BSS information + * + * This function updates scan db with scan_entry->mlme_info + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, struct mlme_info *mlme); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db_i.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db_i.h new file mode 100644 index 0000000000000000000000000000000000000000..690dad03d531167fcb95c3de61742fbd66d2820b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db_i.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan internal api + */ + +#ifndef _WLAN_SCAN_CACHE_DB_I_H_ +#define _WLAN_SCAN_CACHE_DB_I_H_ + +/** + * scm_filter_match() - private API to check if entry is match to filter + * psoc: psoc ptr; + * @db_entry: db entry + * @filter: filter + * @security: negotiated security if match is found + * + * Return: true if entry match filter + */ +bool scm_filter_match(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *db_entry, + struct scan_filter *filter, + struct security_info *security); + +/** + * scm_is_better_bss() - Is bss1 better than bss2 + * @params: scan params + * @bss1: Pointer to the first BSS. + * @bss2: Pointer to the second BSS. + * + * This routine helps in determining the preference value + * of a particular BSS in the scan result which is further + * used in the sorting logic of the final candidate AP's. + * + * Return: true, if bss1 is better than bss2 + * false, if bss2 is better than bss1. + */ +bool scm_is_better_bss(struct scan_default_params *params, + struct scan_cache_entry *bss1, + struct scan_cache_entry *bss2); + +/** + * scm_calculate_bss_score() - calculate BSS score used to get + * the preference + * @psoc: psoc ptr; + * @params: scan params + * @entry: scan entry for which score needs to be calculated + * @pcl_chan_weight: weight for pcl channel + * + * Return: scan db for the pdev id + */ +int scm_calculate_bss_score( + struct wlan_objmgr_psoc *psoc, + struct scan_default_params *params, + struct scan_cache_entry *entry, + int pcl_chan_weight); + +/** + * wlan_pdevid_get_scan_db() - private API to get scan db from pdev id + * @psoc: psoc object + * @pdev_id: Pdev_id + * Return: scan db for the pdev id + */ +static inline struct scan_dbs * +wlan_pdevid_get_scan_db(struct wlan_objmgr_psoc *psoc, uint8_t pdev_id) +{ + struct wlan_scan_obj *scan_obj = NULL; + + if (pdev_id > WLAN_UMAC_MAX_PDEVS) { + scm_err("invalid pdev_id %d", pdev_id); + return NULL; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + + if (!scan_obj) + return NULL; + + return &(scan_obj->scan_db[pdev_id]); +} + +/** + * wlan_pdev_get_scan_db() - private API to get scan db from pdev + * @psoc: psoc object + * @pdev: Pdev + * + * Return: scan db for the pdev + */ +static inline struct scan_dbs * +wlan_pdev_get_scan_db(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + + if (!pdev) { + scm_err("pdev is NULL"); + return NULL; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return wlan_pdevid_get_scan_db(psoc, pdev_id); +} + +/** + * scm_get_pcl_weight_of_channel() - Get PCL weight if channel is present in pcl + * @chan_freq: channel frequency of bss, unit: MHz + * @filter: filter + * @pcl_chan_weight: Get PCL weight for corresponding channel + * @weight_list: Weight list for all the pcl channels. + * + * Get pcl_chan_weight if provided channel is present in pcl list + * + * Return: true or false + */ +bool scm_get_pcl_weight_of_channel(uint32_t chan_freq, + struct scan_filter *filter, + int *pcl_chan_weight, + uint8_t *weight_list); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..1c20975a94c06f4fa3bb7ca5bcf46f5aab32814b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c @@ -0,0 +1,1269 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: contains scan cache filter logic + */ + +#include +#include "wlan_scan_main.h" +#include "wlan_scan_cache_db_i.h" + +/** + * scm_is_open_security() - Check if scan entry support open security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if open security else false + */ +static bool scm_is_open_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + bool match = false; + int i; + + if (db_entry->cap_info.wlan_caps.privacy) + return false; + + /* Check MC cipher and Auth type requested. */ + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + if (WLAN_ENCRYPT_TYPE_NONE == + filter->mc_enc_type[i]) { + security->mc_enc = + filter->mc_enc_type[i]; + match = true; + break; + } + } + if (!match && filter->num_of_mc_enc_type) + return match; + + match = false; + /* Check Auth list. It should contain AuthOpen. */ + for (i = 0; i < filter->num_of_auth; i++) { + if ((WLAN_AUTH_TYPE_OPEN_SYSTEM == + filter->auth_type[i]) || + (WLAN_AUTH_TYPE_AUTOSWITCH == + filter->auth_type[i])) { + security->auth_type = + WLAN_AUTH_TYPE_OPEN_SYSTEM; + match = true; + break; + } + } + + return match; +} + +/** + * scm_is_cipher_match() - Check if cipher match the cipher list + * @cipher_list: cipher list to match + * @num_cipher: number of cipher in cipher list + * @cipher_to_match: cipher to found in cipher list + * + * Return: true if open security else false + */ +static bool scm_is_cipher_match( + uint32_t *cipher_list, + uint16_t num_cipher, uint32_t cipher_to_match) +{ + int i; + bool match = false; + + for (i = 0; i < num_cipher ; i++) { + match = (cipher_list[i] == cipher_to_match); + if (match) + break; + } + + return match; +} + +/** + * scm_get_cipher_suite_type() - get cypher suite type from enc type + * @enc: enc type + * + * Return: cypher suite type + */ +static uint8_t scm_get_cipher_suite_type(enum wlan_enc_type enc) +{ + uint8_t cipher_type; + + switch (enc) { + case WLAN_ENCRYPT_TYPE_WEP40: + case WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: + cipher_type = WLAN_CSE_WEP40; + break; + case WLAN_ENCRYPT_TYPE_WEP104: + case WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: + cipher_type = WLAN_CSE_WEP104; + break; + case WLAN_ENCRYPT_TYPE_TKIP: + cipher_type = WLAN_CSE_TKIP; + break; + case WLAN_ENCRYPT_TYPE_AES: + cipher_type = WLAN_CSE_CCMP; + break; + case WLAN_ENCRYPT_TYPE_AES_GCMP: + cipher_type = WLAN_CSE_GCMP_128; + break; + case WLAN_ENCRYPT_TYPE_AES_GCMP_256: + cipher_type = WLAN_CSE_GCMP_256; + break; + case WLAN_ENCRYPT_TYPE_NONE: + cipher_type = WLAN_CSE_NONE; + break; + case WLAN_ENCRYPT_TYPE_WPI: + cipher_type = WLAN_WAI_CERT_OR_SMS4; + break; + default: + cipher_type = WLAN_CSE_RESERVED; + break; + } + + return cipher_type; +} + +/** + * scm_is_wep_security() - Check if scan entry support WEP security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if WEP security else false + */ +static bool scm_is_wep_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + QDF_STATUS status; + bool match = false; + enum wlan_auth_type neg_auth = WLAN_AUTH_TYPE_OPEN_SYSTEM; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + + if (!security) + return false; + + /* If privacy bit is not set, consider no match */ + if (!db_entry->cap_info.wlan_caps.privacy) + return false; + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + switch (filter->mc_enc_type[i]) { + case WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP40: + case WLAN_ENCRYPT_TYPE_WEP104: + /* + * Multicast list may contain WEP40/WEP104. + * Check whether it matches UC. + */ + if (security->uc_enc == + filter->mc_enc_type[i]) { + match = true; + neg_mccipher = + filter->mc_enc_type[i]; + } + break; + default: + match = false; + break; + } + if (match) + break; + } + + if (!match) + return match; + + for (i = 0; i < filter->num_of_auth; i++) { + switch (filter->auth_type[i]) { + case WLAN_AUTH_TYPE_OPEN_SYSTEM: + case WLAN_AUTH_TYPE_SHARED_KEY: + case WLAN_AUTH_TYPE_AUTOSWITCH: + match = true; + neg_auth = filter->auth_type[i]; + break; + default: + match = false; + } + if (match) + break; + } + + if (!match) + return match; + + /* + * In case of WPA / WPA2, check whether it supports WEP as well. + * Prepare the encryption type for WPA/WPA2 functions + */ + if (security->uc_enc == WLAN_ENCRYPT_TYPE_WEP40_STATICKEY) + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP40; + else if (security->uc_enc == WLAN_ENCRYPT_TYPE_WEP104) + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP104; + + /* else we can use the encryption type directly */ + if (util_scan_entry_wpa(db_entry)) { + struct wlan_wpa_ie wpa = {0}; + uint8_t cipher_type; + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + status = wlan_parse_wpa_ie(util_scan_entry_wpa(db_entry), &wpa); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse WPA IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_wpa(db_entry), + util_scan_get_wpa_len(db_entry)); + return false; + } + + match = scm_is_cipher_match(&wpa.mc_cipher, + 1, WLAN_WPA_SEL(cipher_type)); + } + if (!match && util_scan_entry_rsn(db_entry)) { + struct wlan_rsn_ie rsn = {0}; + uint8_t cipher_type; + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + status = wlan_parse_rsn_ie(util_scan_entry_rsn(db_entry), &rsn); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse RSN IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_rsn(db_entry), + util_scan_get_rsn_len(db_entry)); + return false; + } + match = scm_is_cipher_match(&rsn.gp_cipher_suite, + 1, WLAN_RSN_SEL(cipher_type)); + } + + + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_check_pmf_match() - Check PMF security of entry match filter + * @filter: scan filter + * @db_entry: ap entry + * @rsn: rsn IE of the scan entry + * + * Return: true if PMF security match else false + */ +static bool +scm_check_pmf_match(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct wlan_rsn_ie *rsn) +{ + enum wlan_pmf_cap ap_pmf_cap = WLAN_PMF_DISABLED; + bool match = true; + + if (rsn->cap & RSN_CAP_MFP_CAPABLE) + ap_pmf_cap = WLAN_PMF_CAPABLE; + if (rsn->cap & RSN_CAP_MFP_REQUIRED) + ap_pmf_cap = WLAN_PMF_REQUIRED; + + if ((filter->pmf_cap == WLAN_PMF_REQUIRED) && + (ap_pmf_cap == WLAN_PMF_DISABLED)) + match = false; + else if ((filter->pmf_cap == WLAN_PMF_DISABLED) && + (ap_pmf_cap == WLAN_PMF_REQUIRED)) + match = false; + + if (!match) + scm_debug(QDF_MAC_ADDR_FMT" : PMF cap didn't match (filter %d AP %d)", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes), + filter->pmf_cap, + ap_pmf_cap); + + return match; +} + +/** + * scm_is_rsn_mcast_cipher_match() - match the rsn mcast cipher type with AP's + * mcast cipher + * @rsn: AP's RSNE + * @filter: scan filter + * @neg_mccipher: negotiated mc cipher if matched. + * + * Return: true if mc cipher is negotiated + */ +static bool +scm_is_rsn_mcast_cipher_match(struct wlan_rsn_ie *rsn, + struct scan_filter *filter, enum wlan_enc_type *neg_mccipher) +{ + int i; + bool match; + uint8_t cipher_type; + + if (!rsn || !neg_mccipher || !filter) + return false; + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + + if (filter->mc_enc_type[i] == WLAN_ENCRYPT_TYPE_ANY) { + /* Try the more secured ones first. */ + /* Check GCMP_256 first */ + cipher_type = WLAN_CSE_GCMP_256; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES_GCMP_256; + return true; + } + /* Check GCMP */ + cipher_type = WLAN_CSE_GCMP_128; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES_GCMP; + return true; + } + /* Check AES */ + cipher_type = WLAN_CSE_CCMP; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES; + return true; + } + /* Check TKIP */ + cipher_type = WLAN_CSE_TKIP; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_TKIP; + return true; + } + } else { + cipher_type = + scm_get_cipher_suite_type(filter->mc_enc_type[i]); + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = filter->mc_enc_type[i]; + return true; + } + } + } + + return false; +} + +/** + * scm_is_rsn_security() - Check if scan entry support RSN security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if RSN security else false + */ +static bool scm_is_rsn_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + uint8_t cipher_type; + bool match_any_akm, match = false; + enum wlan_auth_type neg_auth = WLAN_NUM_OF_SUPPORT_AUTH_TYPE; + enum wlan_auth_type filter_akm; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + struct wlan_rsn_ie rsn = {0}; + QDF_STATUS status; + bool is_adaptive_11r; + + if (!security) + return false; + if (!util_scan_entry_rsn(db_entry)) { + scm_debug(QDF_MAC_ADDR_FMT" : doesn't have RSN IE", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + status = wlan_parse_rsn_ie(util_scan_entry_rsn(db_entry), &rsn); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse RSN IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_rsn(db_entry), + util_scan_get_rsn_len(db_entry)); + return false; + } + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + match = scm_is_cipher_match(rsn.pwise_cipher_suites, + rsn.pwise_cipher_count, WLAN_RSN_SEL(cipher_type)); + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : pairwise cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + match = scm_is_rsn_mcast_cipher_match(&rsn, filter, &neg_mccipher); + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : mcast cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + is_adaptive_11r = (db_entry->adaptive_11r_ap && + filter->enable_adaptive_11r); + + /* Initializing with false as it has true value already */ + match = false; + for (i = 0; i < filter->num_of_auth; i++) { + + filter_akm = filter->auth_type[i]; + if (filter_akm == WLAN_AUTH_TYPE_ANY) + match_any_akm = true; + else + match_any_akm = false; + /* + * Ciphers are supported, Match authentication algorithm and + * pick first matching authtype. + */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_FT_SHA384))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FT_FILS_SHA384)) { + neg_auth = WLAN_AUTH_TYPE_FT_FILS_SHA384; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_FT_SHA256))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FT_FILS_SHA256)) { + neg_auth = WLAN_AUTH_TYPE_FT_FILS_SHA256; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_SHA384))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FILS_SHA384)) { + neg_auth = WLAN_AUTH_TYPE_FILS_SHA384; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_SHA256))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FILS_SHA256)) { + neg_auth = WLAN_AUTH_TYPE_FILS_SHA256; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SAE))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_SAE)) { + neg_auth = WLAN_AUTH_TYPE_SAE; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, WLAN_RSN_DPP_AKM)) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_DPP_RSN)) { + neg_auth = WLAN_AUTH_TYPE_DPP_RSN; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_OSEN_AKM)) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_OSEN)) { + neg_auth = WLAN_AUTH_TYPE_OSEN; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_OWE))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_OWE)) { + neg_auth = WLAN_AUTH_TYPE_OWE; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FT_IEEE8021X))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FT_RSN)) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FT_PSK))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FT_RSN_PSK)) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN_PSK; + match = true; + break; + } + } + /* ESE only supports 802.1X. No PSK. */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_CCKM_AKM)) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_CCKM_RSN)) { + neg_auth = WLAN_AUTH_TYPE_CCKM_RSN; + match = true; + break; + } + } + /* RSN */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_IEEE8021X))) { + if (is_adaptive_11r && + (filter_akm == WLAN_AUTH_TYPE_FT_RSN)) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN; + match = true; + break; + } + + if (match_any_akm || + (WLAN_AUTH_TYPE_RSN == filter_akm)) { + neg_auth = WLAN_AUTH_TYPE_RSN; + match = true; + break; + } + } + /* TKIP */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_PSK))) { + if (is_adaptive_11r && + (filter_akm == WLAN_AUTH_TYPE_FT_RSN_PSK)) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN_PSK; + match = true; + break; + } + + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_RSN_PSK)) { + neg_auth = WLAN_AUTH_TYPE_RSN_PSK; + match = true; + break; + } + } + /* SHA256 */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SHA256_PSK))) { + if (is_adaptive_11r && + (filter_akm == WLAN_AUTH_TYPE_FT_RSN_PSK)) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN_PSK; + match = true; + break; + } + + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_RSN_PSK_SHA256)) { + neg_auth = WLAN_AUTH_TYPE_RSN_PSK_SHA256; + match = true; + break; + } + } + /* 8021X SHA256 */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SHA256_IEEE8021X))) { + if (is_adaptive_11r && + (filter_akm == WLAN_AUTH_TYPE_FT_RSN)) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN; + match = true; + break; + } + + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_RSN_8021X_SHA256)) { + neg_auth = WLAN_AUTH_TYPE_RSN_8021X_SHA256; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SUITEB_EAP_SHA256))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_SUITEB_EAP_SHA256)) { + neg_auth = WLAN_AUTH_TYPE_SUITEB_EAP_SHA256; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SUITEB_EAP_SHA384))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_SUITEB_EAP_SHA384)) { + neg_auth = WLAN_AUTH_TYPE_SUITEB_EAP_SHA384; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FT_SAE))) { + if (match_any_akm || + (filter_akm == WLAN_AUTH_TYPE_FT_SAE)) { + neg_auth = WLAN_AUTH_TYPE_FT_SAE; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, rsn.akm_suite_count, + WLAN_RSN_SEL( + WLAN_AKM_FT_SUITEB_EAP_SHA384))) { + if (match_any_akm || + (filter_akm == + WLAN_AUTH_TYPE_FT_SUITEB_EAP_SHA384)) { + neg_auth = WLAN_AUTH_TYPE_FT_SUITEB_EAP_SHA384; + match = true; + break; + } + } + } + + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : akm suites didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + if (!filter->ignore_pmf_cap) + match = scm_check_pmf_match(filter, db_entry, &rsn); + + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_is_wpa_mcast_cipher_match() - match the wpa mcast cipher type with AP's + * mcast cipher + * @wpa: AP's WPA IE + * @filter: scan filter + * @neg_mccipher: negotiated mc cipher if matched. + * + * Return: true if mc cipher is negotiated + */ +static bool +scm_is_wpa_mcast_cipher_match(struct wlan_wpa_ie *wpa, + struct scan_filter *filter, enum wlan_enc_type *neg_mccipher) +{ + int i; + bool match; + uint8_t cipher_type; + + if (!wpa || !neg_mccipher || !filter) + return false; + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + + if (filter->mc_enc_type[i] == WLAN_ENCRYPT_TYPE_ANY) { + /* Try the more secured ones first. */ + + /* Check AES */ + cipher_type = WLAN_CSE_CCMP; + match = scm_is_cipher_match(&wpa->mc_cipher, 1, + WLAN_WPA_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES; + return true; + } + /* Check TKIP */ + cipher_type = WLAN_CSE_TKIP; + match = scm_is_cipher_match(&wpa->mc_cipher, 1, + WLAN_WPA_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_TKIP; + return true; + } + } else { + cipher_type = + scm_get_cipher_suite_type(filter->mc_enc_type[i]); + match = scm_is_cipher_match(&wpa->mc_cipher, 1, + WLAN_WPA_SEL(cipher_type)); + if (match) { + *neg_mccipher = filter->mc_enc_type[i]; + return true; + } + } + } + + return false; +} + +/** + * scm_is_wpa_security() - Check if scan entry support WPA security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if WPA security else false + */ +static bool scm_is_wpa_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + QDF_STATUS status; + uint8_t cipher_type; + bool match_any_akm, match = false; + enum wlan_auth_type neg_auth = WLAN_NUM_OF_SUPPORT_AUTH_TYPE; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + struct wlan_wpa_ie wpa = {0}; + + if (!security) + return false; + if (!util_scan_entry_wpa(db_entry)) { + scm_debug(QDF_MAC_ADDR_FMT" : AP doesn't have WPA IE", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + status = wlan_parse_wpa_ie(util_scan_entry_wpa(db_entry), &wpa); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse WPA IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_wpa(db_entry), + util_scan_get_wpa_len(db_entry)); + return false; + } + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + match = scm_is_cipher_match(wpa.uc_ciphers, + wpa.uc_cipher_count, WLAN_WPA_SEL(cipher_type)); + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : unicase cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + match = scm_is_wpa_mcast_cipher_match(&wpa, filter, &neg_mccipher); + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : mcast cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + /* Initializing with false as it has true value already */ + match = false; + for (i = 0; i < filter->num_of_auth; i++) { + + if (filter->auth_type[i] == WLAN_AUTH_TYPE_ANY) + match_any_akm = true; + else + match_any_akm = false; + /* + * Ciphers are supported, Match authentication algorithm and + * pick first matching authtype. + */ + /**/ + if (scm_is_cipher_match(wpa.auth_suites, + wpa.auth_suite_count, + WLAN_WPA_SEL(WLAN_AKM_IEEE8021X))) { + if (match_any_akm || (WLAN_AUTH_TYPE_WPA == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_WPA; + match = true; + break; + } + } + if (scm_is_cipher_match(wpa.auth_suites, + wpa.auth_suite_count, + WLAN_WPA_SEL(WLAN_AKM_PSK))) { + if (match_any_akm || (WLAN_AUTH_TYPE_WPA_PSK == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_WPA_PSK; + match = true; + break; + } + } + if (scm_is_cipher_match(wpa.auth_suites, + wpa.auth_suite_count, + WLAN_WPA_CCKM_AKM)) { + if (match_any_akm || (WLAN_AUTH_TYPE_CCKM_WPA == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_CCKM_WPA; + match = true; + break; + } + } + } + + if (!match) + scm_debug(QDF_MAC_ADDR_FMT" : akm didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_is_wapi_security() - Check if scan entry support WAPI security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if WAPI security else false + */ +static bool scm_is_wapi_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + uint8_t cipher_type; + bool match = false; + enum wlan_auth_type neg_auth = WLAN_NUM_OF_SUPPORT_AUTH_TYPE; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + struct wlan_wapi_ie wapi = {0}; + + if (!security) + return false; + if (!util_scan_entry_wapi(db_entry)) { + scm_debug(QDF_MAC_ADDR_FMT" : mcast cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + wlan_parse_wapi_ie( + util_scan_entry_wapi(db_entry), &wapi); + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + match = scm_is_cipher_match(wapi.uc_cipher_suites, + wapi.uc_cipher_count, WLAN_WAPI_SEL(cipher_type)); + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : unicast cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + cipher_type = + scm_get_cipher_suite_type( + filter->mc_enc_type[i]); + match = scm_is_cipher_match(&wapi.mc_cipher_suite, + 1, WLAN_WAPI_SEL(cipher_type)); + if (match) + break; + } + if (!match) { + scm_debug(QDF_MAC_ADDR_FMT" : mcast cipher didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + neg_mccipher = filter->mc_enc_type[i]; + + if (scm_is_cipher_match(wapi.akm_suites, + wapi.akm_suite_count, + WLAN_WAPI_SEL(WLAN_WAI_CERT_OR_SMS4))) { + neg_auth = + WLAN_AUTH_TYPE_WAPI_WAI_CERTIFICATE; + } else if (scm_is_cipher_match(wapi.akm_suites, + wapi.akm_suite_count, WLAN_WAPI_SEL(WLAN_WAI_PSK))) { + neg_auth = WLAN_AUTH_TYPE_WAPI_WAI_PSK; + } else { + scm_debug(QDF_MAC_ADDR_FMT" : akm is not supported", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + match = false; + for (i = 0; i < filter->num_of_auth; i++) { + if (filter->auth_type[i] == neg_auth) { + match = true; + break; + } + } + + if (!match) + scm_debug(QDF_MAC_ADDR_FMT" : akm suite didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_is_def_security() - Check if any security in filter match + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if any security else false + */ +static bool scm_is_def_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + + /* It is allowed to match anything. Try the more secured ones first. */ + /* Check GCMP_256 first */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES_GCMP_256; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + + /* Check GCMP */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES_GCMP; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + + /* Check AES */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check TKIP */ + security->uc_enc = WLAN_ENCRYPT_TYPE_TKIP; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check AES */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check TKIP */ + security->uc_enc = WLAN_ENCRYPT_TYPE_TKIP; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check WAPI */ + security->uc_enc = WLAN_ENCRYPT_TYPE_WPI; + if (scm_is_wapi_security(filter, db_entry, security)) + return true; + + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP104; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP40; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP104_STATICKEY; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP40_STATICKEY; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + + /* It must be open and no enc */ + if (db_entry->cap_info.wlan_caps.privacy) + return false; + + security->auth_type = WLAN_AUTH_TYPE_OPEN_SYSTEM; + security->mc_enc = WLAN_ENCRYPT_TYPE_NONE; + security->uc_enc = WLAN_ENCRYPT_TYPE_NONE; + + return true; +} + +/** + * scm_is_fils_config_match() - Check if FILS config matches + * @filter: scan filter + * @db_entry: db entry + * + * Return: true if FILS config matches else false + */ +static bool scm_is_fils_config_match(struct scan_filter *filter, + struct scan_cache_entry *db_entry) +{ + int i; + struct fils_indication_ie *indication_ie; + uint8_t *data; + uint8_t *end_ptr; + + if (!filter->fils_scan_filter.realm_check) + return true; + + if (!db_entry->ie_list.fils_indication) + return false; + + + indication_ie = + (struct fils_indication_ie *) db_entry->ie_list.fils_indication; + + end_ptr = (uint8_t *)indication_ie + indication_ie->len + 2; + + data = indication_ie->variable_data; + if (indication_ie->is_cache_id_present && + (data + CACHE_IDENTIFIER_LEN) <= end_ptr) + data += CACHE_IDENTIFIER_LEN; + + if (indication_ie->is_hessid_present && + (data + HESSID_LEN) <= end_ptr) + data += HESSID_LEN; + + for (i = 1; i <= indication_ie->realm_identifiers_cnt && + (data + REAM_HASH_LEN) <= end_ptr; i++) { + if (!qdf_mem_cmp(filter->fils_scan_filter.fils_realm, + data, REAM_HASH_LEN)) + return true; + /* Max realm count reached */ + if (indication_ie->realm_identifiers_cnt == i) + break; + else + data = data + REAM_HASH_LEN; + } + + return false; +} + +/** + * scm_is_security_match() - Check if security in filter match + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if security match else false + */ +static bool scm_is_security_match(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + bool match = false; + struct security_info local_security = {0}; + + if (!filter->num_of_enc_type) + return true; + + for (i = 0; (i < filter->num_of_enc_type) && + !match; i++) { + + local_security.uc_enc = + filter->enc_type[i]; + + switch (filter->enc_type[i]) { + case WLAN_ENCRYPT_TYPE_NONE: + match = scm_is_open_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP40: + case WLAN_ENCRYPT_TYPE_WEP104: + match = scm_is_wep_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_TKIP: + case WLAN_ENCRYPT_TYPE_AES: + case WLAN_ENCRYPT_TYPE_AES_GCMP: + case WLAN_ENCRYPT_TYPE_AES_GCMP_256: + /* First check if there is a RSN match */ + match = scm_is_rsn_security(filter, db_entry, + &local_security); + /* If not RSN, then check WPA match */ + if (!match) + match = scm_is_wpa_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_WPI:/* WAPI */ + match = scm_is_wapi_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_ANY: + default: + match = scm_is_def_security(filter, db_entry, + &local_security); + break; + } + } + + if (match && security) + qdf_mem_copy(security, &local_security, sizeof(*security)); + + return match; +} + +bool scm_filter_match(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *db_entry, + struct scan_filter *filter, + struct security_info *security) +{ + int i; + bool match = false; + struct scan_default_params *def_param; + struct wlan_country_ie *cc_ie; + + def_param = wlan_scan_psoc_get_def_params(psoc); + if (!def_param) + return false; + + if (filter->age_threshold && filter->age_threshold < + util_scan_entry_age(db_entry)) + return false; + + if (filter->p2p_results && !db_entry->is_p2p) + return false; + + if (db_entry->ssid.length) { + for (i = 0; i < filter->num_of_ssid; i++) { + if (util_is_ssid_match(&filter->ssid_list[i], + &db_entry->ssid)) { + match = true; + break; + } + } + } + /* + * In OWE transition mode, ssid is hidden. And supplicant does not issue + * scan with specific ssid prior to connect as in other hidden ssid + * cases. Add explicit check to allow OWE when ssid is hidden. + */ + if (!match && util_scan_entry_is_hidden_ap(db_entry)) { + for (i = 0; i < filter->num_of_auth; i++) { + if (filter->auth_type[i] == WLAN_AUTH_TYPE_OWE) { + match = true; + break; + } + } + } + if (!match && filter->num_of_ssid) + return false; + + match = false; + /* TO do Fill p2p MAC*/ + for (i = 0; i < filter->num_of_bssid; i++) { + if (util_is_bssid_match(&filter->bssid_list[i], + &db_entry->bssid)) { + match = true; + break; + } + /* TODO match p2p mac */ + } + if (!match && filter->num_of_bssid) + return false; + + match = false; + for (i = 0; i < filter->num_of_channels; i++) { + if (!filter->chan_freq_list[i] || + filter->chan_freq_list[i] == + db_entry->channel.chan_freq) { + match = true; + break; + } + } + + if (!match && filter->num_of_channels) + return false; + + if (filter->rrm_measurement_filter) + return true; + + /* TODO match phyMode */ + + if (!filter->ignore_auth_enc_type && + !scm_is_security_match(filter, db_entry, security)) { + scm_debug(QDF_MAC_ADDR_FMT" : Ignore as security profile didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + if (!util_is_bss_type_match(filter->bss_type, db_entry->cap_info)) { + scm_debug(QDF_MAC_ADDR_FMT" : Ignore as bss type didn't match cap_info %x bss_type %d", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes), + db_entry->cap_info.value, + filter->bss_type); + return false; + } + + /* TODO match rate set */ + + if (filter->only_wmm_ap && + !db_entry->ie_list.wmeinfo && + !db_entry->ie_list.wmeparam) { + scm_debug(QDF_MAC_ADDR_FMT" : Ignore as required wmeinfo and wme params not present", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + /* Match realm */ + if (!scm_is_fils_config_match(filter, db_entry)) { + scm_debug(QDF_MAC_ADDR_FMT" :Ignore as fils config didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + + cc_ie = util_scan_entry_country(db_entry); + if (!util_country_code_match(filter->country, cc_ie)) { + scm_debug(QDF_MAC_ADDR_FMT" : Ignore as country %.*s didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes), + 2, filter->country); + return false; + } + + if (!util_mdie_match(filter->mobility_domain, + (struct rsn_mdie *)db_entry->ie_list.mdie)) { + scm_debug(QDF_MAC_ADDR_FMT" : Ignore as mdie didn't match", + QDF_MAC_ADDR_REF(db_entry->bssid.bytes)); + return false; + } + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.c new file mode 100644 index 0000000000000000000000000000000000000000..44a2bb68af5479beedeca223472e7d6a75fd47a9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains core scan function definitions + */ +#include +#include +#include "wlan_scan_main.h" + +QDF_STATUS wlan_scan_psoc_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = qdf_mem_malloc_atomic(sizeof(struct wlan_scan_obj)); + if (!scan_obj) { + scm_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + /* Attach scan private date to psoc */ + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_SCAN, (void *)scan_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Failed to attach psoc scan component"); + else + scm_debug("Scan object attach to psoc successful"); + + return status; +} + +QDF_STATUS wlan_scan_psoc_destroyed_notification( + struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + void *scan_obj = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SCAN); + + if (!scan_obj) { + scm_err("Failed to detach scan in psoc ctx"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_SCAN, scan_obj); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Failed to detach psoc scan component"); + + qdf_mem_free(scan_obj); + + return status; +} + +QDF_STATUS wlan_scan_vdev_created_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list) +{ + struct scan_vdev_obj *scan_vdev_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_vdev_obj = qdf_mem_malloc_atomic(sizeof(struct scan_vdev_obj)); + if (!scan_vdev_obj) { + scm_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + /* Attach scan private date to vdev */ + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_SCAN, (void *)scan_vdev_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to attach vdev scan component"); + qdf_mem_free(scan_vdev_obj); + } else { + scm_debug("vdev scan object attach successful"); + } + + return status; +} + +QDF_STATUS wlan_scan_vdev_destroyed_notification( + struct wlan_objmgr_vdev *vdev, + void *arg_list) +{ + void *scan_vdev_obj = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_SCAN); + + if (!scan_vdev_obj) { + scm_err("Failed to detach scan in vdev ctx"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_SCAN, scan_vdev_obj); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Failed to detach vdev scan component"); + + qdf_mem_free(scan_vdev_obj); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.h new file mode 100644 index 0000000000000000000000000000000000000000..26ea25e967bd94f55cac45bb0a3852819e054bad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.h @@ -0,0 +1,759 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan init/deinit public api + */ + +#ifndef _WLAN_SCAN_MAIN_API_H_ +#define _WLAN_SCAN_MAIN_API_H_ + +#include +#include +#include +#include +#include +#include "wlan_scan_cache_db.h" +#include "wlan_scan_11d.h" +#include "wlan_scan_cfg.h" + +#define scm_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_SCAN, params) +#define scm_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_SCAN, params) +#define scm_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_SCAN, params) +#define scm_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_SCAN, params) +#define scm_info(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_SCAN, params) +#define scm_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_SCAN, params) + +/* Rate Limited Logs */ +#define scm_alert_rl(params...) \ + QDF_TRACE_FATAL_RL(QDF_MODULE_ID_SCAN, params) +#define scm_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_SCAN, params) +#define scm_warn_rl(params...) \ + QDF_TRACE_WARN_RL(QDF_MODULE_ID_SCAN, params) +#define scm_info_rl(params...) \ + QDF_TRACE_INFO_RL(QDF_MODULE_ID_SCAN, params) +#define scm_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_SCAN, params) + +#define scm_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_SCAN, params) + +#define scm_hex_dump(level, data, buf_len) \ + qdf_trace_hex_dump(QDF_MODULE_ID_SCAN, level, data, buf_len) + +#define MAX_SCAN_EVENT_HANDLERS_PER_PDEV 100 +#define WLAN_MAX_MODULE_NAME 40 +#define WLAN_MAX_REQUESTORS 200 +#define WLAN_SCAN_ID_MASK 0x00000FFF +#define WLAN_HOST_SCAN_REQ_ID_PREFIX 0x0000A000 +#define SCAN_NPROBES_DEFAULT 2 +#define WLAN_P2P_SOCIAL_CHANNELS 3 + +#define SCAN_BURST_SCAN_MAX_NUM_OFFCHANNELS (3) +#define SCAN_SCAN_IDLE_TIME_DEFAULT (25) +#define SCAN_3PORT_CONC_SCAN_MAX_BURST_DURATION (25) +#define SCAN_CTS_DURATION_MS_MAX (32) +#define SCAN_ROAM_SCAN_CHANNEL_SWITCH_TIME (4) +#define SCAN_DWELL_TIME_PROBE_TIME_MAP_SIZE (11) +#define SCAN_GO_MIN_ACTIVE_SCAN_BURST_DURATION (40) +#define SCAN_GO_MAX_ACTIVE_SCAN_BURST_DURATION (240) +#define SCAN_P2P_SCAN_MAX_BURST_DURATION (240) +#define SCAN_GO_BURST_SCAN_MAX_NUM_OFFCHANNELS (6) + +/* MAX RNR entries per channel*/ +#define WLAN_MAX_RNR_COUNT 15 + +/** + * struct probe_time_dwell_time - probe time, dwell time map + * @dwell_time: dwell time + * @probe_time: repeat probe time + */ +struct probe_time_dwell_time { + uint8_t dwell_time; + uint8_t probe_time; +}; + +/* + * For the requestor id: + * bit 0~12 is used for real requestor id. + * bit 13~15 is used for requestor prefix. + * bit 16~19 is used by specific user to aware it is issued by himself. + * bit 20~31 is reserved. + */ +#define WLAN_SCAN_REQUESTER_ID_PREFIX 0x0000A000 +#define WLAN_SCAN_REQUESTER_ID_MASK 0x00001FFF + +#define SCM_NUM_RSSI_CAT 15 +#define SCAN_STA_MIRACAST_MCC_REST_TIME 400 + +#define SCAN_TIMEOUT_GRACE_PERIOD 10 +#define SCAN_MAX_BSS_PDEV 100 +#define SCAN_PRIORITY SCAN_PRIORITY_LOW + +/* DBS Scan policy selection ext flags */ +#define SCAN_FLAG_EXT_DBS_SCAN_POLICY_MASK 0x00000003 +#define SCAN_FLAG_EXT_DBS_SCAN_POLICY_BIT 0 +#define SCAN_DBS_POLICY_DEFAULT 0x0 +#define SCAN_DBS_POLICY_FORCE_NONDBS 0x1 +#define SCAN_DBS_POLICY_IGNORE_DUTY 0x2 +#define SCAN_DBS_POLICY_MAX 0x3 +/* Minimum number of channels for enabling DBS Scan */ +#define SCAN_MIN_CHAN_DBS_SCAN_THRESHOLD 8 +/* + * Enable Reception of Public Action frame with this flag + */ +#define SCAN_FLAG_EXT_FILTER_PUBLIC_ACTION_FRAME 0x4 + +/* Indicate to scan all PSC channel */ +#define SCAN_FLAG_EXT_6GHZ_SCAN_ALL_PSC_CH 0x8 + +/* Indicate to scan all NON-PSC channel */ +#define SCAN_FLAG_EXT_6GHZ_SCAN_ALL_NON_PSC_CH 0x10 + +/* Indicate to save scan result matching hint from scan client */ +#define SCAN_FLAG_EXT_6GHZ_MATCH_HINT 0x20 + +/* Skip any channel on which RNR information is not received */ +#define SCAN_FLAG_EXT_6GHZ_SKIP_NON_RNR_CH 0x40 + +/* Indicate client hint req is high priority than FW rnr or FILS discovery */ +#define SCAN_FLAG_EXT_6GHZ_CLIENT_HIGH_PRIORITY 0x80 + +/* Passive dwell time if bt_a2dp is enabled. Time in msecs*/ +#define PASSIVE_DWELL_TIME_BT_A2DP_ENABLED 28 + +/** + * struct cb_handler - defines scan event handler + * call back function and arguments + * @func: handler function pointer + * @arg: argument to handler function + */ +struct cb_handler { + scan_event_handler func; + void *arg; +}; + +/** + * struct pdev_scan_ev_handler - pdev scan event handlers + * @cb_handler: array of registered scan handlers + */ +struct pdev_scan_ev_handler { + uint32_t handler_cnt; + struct cb_handler cb_handlers[MAX_SCAN_EVENT_HANDLERS_PER_PDEV]; +}; + +/** + * struct global_scan_ev_handlers - per pdev registered scan event handlers + * @pdev_scan_ev_handler: per pdev registered scan event handlers + */ +struct global_scan_ev_handlers { + struct pdev_scan_ev_handler pdev_ev_handlers[WLAN_UMAC_MAX_PDEVS]; +}; + +/** + * struct scan_requester_info - defines scan requester id + * and event handler mapping + * @requester: requester ID allocated + * @module: module name of requester + * @ev_handler: event handlerto be invoked + */ +struct scan_requester_info { + wlan_scan_requester requester; + uint8_t module[WLAN_MAX_MODULE_NAME]; + struct cb_handler ev_handler; +}; + +/** + * struct pdev_scan_info - defines per pdev scan info + * @wide_band_scan: wide band scan capability + * @last_scan_time: time of last scan start on this pdev + * @custom_chan_list: scan only these channels + * @conf_bssid: configured bssid of the hidden AP + * @conf_ssid: configured desired ssid + */ +struct pdev_scan_info { + bool wide_band_scan; + qdf_time_t last_scan_time; + struct chan_list custom_chan_list; + uint8_t conf_bssid[QDF_MAC_ADDR_SIZE]; + struct wlan_ssid conf_ssid; +}; + +/** + * struct scan_vdev_obj - scan vdev obj + * @pno_match_evt_received: pno match received + * @pno_in_progress: pno in progress + * @scan_disabled: if scan is disabled for this vdev + * @first_scan_done: Whether its the first scan or not for this particular vdev. + */ +struct scan_vdev_obj { + bool pno_match_evt_received; + bool pno_in_progress; + uint32_t scan_disabled; + bool first_scan_done; +}; + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * struct pno_def_config - def configuration for PNO + * @pno_offload_enabled: flag to check if PNO offload is enabled/disabled + * @channel_prediction: config PNO channel prediction feature status + * @top_k_num_of_channels: def top K number of channels are used for tanimoto + * distance calculation. + * @stationary_thresh: def threshold val to determine that STA is stationary. + * @pnoscan_adaptive_dwell_mode: def adaptive dwelltime mode for pno scan + * @channel_prediction_full_scan: def periodic timer upon which full scan needs + * to be triggered. + * @dfs_chnl_scan_enable: Enable dfs channel PNO scan + * @scan_support_enabled: PNO scan support enabled/disabled + * @scan_timer_repeat_value: PNO scan timer repeat value + * @slow_scan_multiplier: PNO slow scan timer multiplier + * @max_sched_scan_plan_interval: PNO scan interval + * @max_sched_scan_plan_iterations: PNO scan number of iterations + * @scan_backoff_multiplier: Scan banckoff multiplier + * @pno_wake_lock: pno wake lock + * @pno_cb: callback to call on PNO completion + * @mawc_params: Configuration parameters for NLO MAWC. + * @user_config_sched_scan_plan: if enabled set user confing sched scan plan + */ +struct pno_def_config { + bool pno_offload_enabled; + bool channel_prediction; + uint8_t top_k_num_of_channels; + uint8_t stationary_thresh; + enum scan_dwelltime_adaptive_mode adaptive_dwell_mode; + uint32_t channel_prediction_full_scan; + bool dfs_chnl_scan_enabled; + bool scan_support_enabled; + uint32_t scan_timer_repeat_value; + uint32_t slow_scan_multiplier; + uint32_t max_sched_scan_plan_interval; + uint32_t max_sched_scan_plan_iterations; + uint8_t scan_backoff_multiplier; + qdf_wake_lock_t pno_wake_lock; + struct cb_handler pno_cb; + struct nlo_mawc_params mawc_params; + bool user_config_sched_scan_plan; +}; +#endif + +#ifdef FEATURE_WLAN_EXTSCAN +/** + * struct extscan_def_config - def configuration for EXTSCAN + * @extscan_enabled: enable extscan + * @extscan_passive_max_chn_time: max passive channel time + * @extscan_passive_min_chn_time: min passive channel time + * @extscan_active_max_chn_time: max active channel time + * @extscan_active_min_chn_time: min active channel time + */ +struct extscan_def_config { + bool extscan_enabled; + uint32_t extscan_passive_max_chn_time; + uint32_t extscan_passive_min_chn_time; + uint32_t extscan_active_max_chn_time; + uint32_t extscan_active_min_chn_time; +}; +#endif + +/** + * struct scan_default_params - default scan parameters to be used + * @active_dwell: default active dwell time + * @allow_dfs_chan_in_first_scan: first scan should contain dfs channels or not. + * @allow_dfs_chan_in_scan: Scan DFS channels or not. + * @skip_dfs_chan_in_p2p_search: Skip DFS channels in p2p search. + * @use_wake_lock_in_user_scan: if wake lock will be acquired during user scan + * @active_dwell_2g: default active dwell time for 2G channels, if it's not zero + * @active_dwell_6g: default active dwell time for 6G channels + * @passive_dwell_6g: default passive dwell time for 6G channels + * @passive_dwell:default passive dwell time + * @max_rest_time: default max rest time + * @sta_miracast_mcc_rest_time: max rest time for miracast and mcc + * @min_rest_time: default min rest time + * @idle_time: default idle time + * @conc_active_dwell: default concurrent active dwell time + * @conc_passive_dwell: default concurrent passive dwell time + * @conc_max_rest_time: default concurrent max rest time + * @conc_min_rest_time: default concurrent min rest time + * @conc_idle_time: default concurrent idle time + * @repeat_probe_time: default repeat probe time + * @probe_spacing_time: default probe spacing time + * @probe_delay: default probe delay + * @burst_duration: default burst duration + * @max_scan_time: default max scan time + * @num_probes: default maximum number of probes to sent + * @cache_aging_time: default scan cache aging time + * @select_5gh_margin: Prefer connecting to 5G AP even if + * its RSSI is lower by select_5gh_margin dbm than 2.4G AP. + * applicable if prefer_5ghz is set. + * @is_bssid_hint_priority: True if bssid_hint is given priority + * @enable_mac_spoofing: enable mac address spoof in scan + * @max_bss_per_pdev: maximum number of bss entries to be maintained per pdev + * @max_active_scans_allowed: maximum number of active parallel scan allowed + * per psoc + * @scan_mode_6g: scan mode in 6Ghz + * @enable_connected_scan: enable scans after connection + * @scan_priority: default scan priority + * @adaptive_dwell_time_mode: adaptive dwell mode with connection + * @adaptive_dwell_time_mode_nc: adaptive dwell mode without connection + * @honour_nl_scan_policy_flags: honour nl80211 scan policy flags + * @extscan_adaptive_dwell_mode: Adaptive dwell mode during ext scan + * @scan_f_passive: passively scan all channels including active channels + * @scan_f_bcast_probe: add wild card ssid prbreq even if ssid_list is specified + * @scan_f_cck_rates: add cck rates to rates/xrates ie in prb req + * @scan_f_ofdm_rates: add ofdm rates to rates/xrates ie in prb req + * @scan_f_chan_stat_evnt: enable indication of chan load and noise floor + * @scan_f_filter_prb_req: filter Probe request frames + * @scan_f_bypass_dfs_chn: when set, do not scan DFS channels + * @scan_f_continue_on_err:continue scan even if few certain erros have occurred + * @scan_f_offchan_mgmt_tx: allow mgmt transmission during off channel scan + * @scan_f_offchan_data_tx: allow data transmission during off channel scan + * @scan_f_promisc_mode: scan with promiscuous mode + * @scan_f_capture_phy_err: enable capture ppdu with phy errrors + * @scan_f_strict_passive_pch: do passive scan on passive channels + * @scan_f_half_rate: enable HALF (10MHz) rate support + * @scan_f_quarter_rate: set Quarter (5MHz) rate support + * @scan_f_force_active_dfs_chn: allow to send probe req on DFS channel + * @scan_f_add_tpc_ie_in_probe: add TPC ie in probe req frame + * @scan_f_add_ds_ie_in_probe: add DS ie in probe req frame + * @scan_f_add_spoofed_mac_in_probe: use random mac address for TA in probe + * @scan_f_add_rand_seq_in_probe: use random sequence number in probe + * @scan_f_en_ie_whitelist_in_probe: enable ie whitelist in probe + * @scan_f_forced: force scan even in presence of data traffic + * @scan_f_2ghz: scan 2.4 GHz channels + * @scan_f_5ghz: scan 5 GHz channels + * @scan_f_wide_band: scan in 40 MHz or higher bandwidth + * @scan_flags: variable to read and set scan_f_* flags in one shot + * can be used to dump all scan_f_* flags for debug + * @scan_ev_started: notify scan started event + * @scan_ev_completed: notify scan completed event + * @scan_ev_bss_chan: notify bss chan event + * @scan_ev_foreign_chan: notify foreign chan event + * @scan_ev_dequeued: notify scan request dequed event + * @scan_ev_preempted: notify scan preempted event + * @scan_ev_start_failed: notify scan start failed event + * @scan_ev_restarted: notify scan restarted event + * @scan_ev_foreign_chn_exit: notify foreign chan exit event + * @scan_ev_invalid: notify invalid scan request event + * @scan_ev_gpio_timeout: notify gpio timeout event + * @scan_ev_suspended: notify scan suspend event + * @scan_ev_resumed: notify scan resumed event + * @scan_events: variable to read and set scan_ev_* flags in one shot + * can be used to dump all scan_ev_* flags for debug + * @roam_params: roam related params + */ +struct scan_default_params { + uint32_t active_dwell; + bool allow_dfs_chan_in_first_scan; + bool allow_dfs_chan_in_scan; + bool skip_dfs_chan_in_p2p_search; + bool use_wake_lock_in_user_scan; + uint32_t active_dwell_2g; + uint32_t active_dwell_6g; + uint32_t passive_dwell_6g; + uint32_t passive_dwell; + uint32_t max_rest_time; + uint32_t sta_miracast_mcc_rest_time; + uint32_t min_rest_time; + uint32_t idle_time; + uint32_t conc_active_dwell; + uint32_t conc_passive_dwell; + uint32_t conc_max_rest_time; + uint32_t conc_min_rest_time; + uint32_t conc_idle_time; + uint32_t repeat_probe_time; + uint32_t probe_spacing_time; + uint32_t probe_delay; + uint32_t burst_duration; + uint32_t max_scan_time; + uint32_t num_probes; + qdf_time_t scan_cache_aging_time; + uint32_t select_5ghz_margin; + bool enable_mac_spoofing; + bool is_bssid_hint_priority; + uint32_t usr_cfg_probe_rpt_time; + uint32_t usr_cfg_num_probes; + uint16_t max_bss_per_pdev; + uint32_t max_active_scans_allowed; + uint8_t sta_scan_burst_duration; + uint8_t p2p_scan_burst_duration; + uint8_t go_scan_burst_duration; + uint8_t ap_scan_burst_duration; + enum scan_mode_6ghz scan_mode_6g; + bool enable_connected_scan; + enum scan_priority scan_priority; + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode_nc; + bool honour_nl_scan_policy_flags; + enum scan_dwelltime_adaptive_mode extscan_adaptive_dwell_mode; + union { + struct { + uint32_t scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_wide_band:1; + }; + uint32_t scan_flags; + }; + union { + struct { + uint32_t scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; + }; + uint32_t scan_events; + }; + struct scoring_config score_config; +}; + +/** + * struct scan_cb - nif/sif function callbacks + * @inform_beacon: cb to indicate frame to OS + * @update_beacon: cb to indicate frame to MLME + * @unlink_bss: cb to unlink bss from kernel cache + */ +struct scan_cb { + update_beacon_cb inform_beacon; + update_beacon_cb update_beacon; + update_beacon_cb unlink_bss; + /* Define nif/sif function callbacks here */ +}; + +/** + * struct wlan_scan_obj - scan object definition + * @scan_disabled: if scan is disabled + * @scan_db: scan cache data base + * @cc_db: pointer of country code data base + * @lock: spin lock + * @scan_def: default scan parameters + * @cb: nif/sif function callbacks + * @requesters: requester allocation pool + * @scan_ids: last allocated scan id + * @global_evhandlers: registered scan event handlers + * @pdev_info: pointer to pdev info + * @pno_cfg: default pno configuration + * @extscan_cfg: default extscan configuration + * @ie_whitelist: default ie whitelist attrs + * @bt_a2dp_enabled: if bt a2dp is enabled + * @miracast_enabled: miracast enabled + * @disable_timeout: command timeout disabled + * @drop_bcn_on_chan_mismatch: drop bcn if channel mismatch + * @drop_bcn_on_invalid_freq: drop bcn if freq is invalid in IEs (DS/HT/HE) + * @scan_start_request_buff: buffer used to pass + * scan config to event handlers + * @rnr_channel_db: RNR channel list database + * @allow_bss_with_incomplete_ie: Continue scan entry even if any corrupted IES + * are present. + */ +struct wlan_scan_obj { + uint32_t scan_disabled; + qdf_spinlock_t lock; + qdf_atomic_t scan_ids; + struct scan_dbs scan_db[WLAN_UMAC_MAX_PDEVS]; + struct scan_country_code_db *cc_db; + struct scan_default_params scan_def; + struct scan_cb cb; + struct scan_requester_info requesters[WLAN_MAX_REQUESTORS]; + struct global_scan_ev_handlers global_evhandlers; + struct pdev_scan_info pdev_info[WLAN_UMAC_MAX_PDEVS]; +#ifdef FEATURE_WLAN_SCAN_PNO + struct pno_def_config pno_cfg; +#endif +#ifdef FEATURE_WLAN_EXTSCAN + struct extscan_def_config extscan_cfg; +#endif + struct probe_req_whitelist_attr ie_whitelist; + bool bt_a2dp_enabled; + bool miracast_enabled; + bool disable_timeout; + bool drop_bcn_on_chan_mismatch; + bool drop_bcn_on_invalid_freq; + struct scan_start_request scan_start_request_buff; +#ifdef FEATURE_6G_SCAN_CHAN_SORT_ALGO + struct channel_list_db rnr_channel_db; +#endif + bool allow_bss_with_incomplete_ie; +}; + +/** + * wlan_psoc_get_scan_obj() - private API to get scan object from psoc + * @psoc: psoc object + * + * Return: scan object + */ +#define wlan_psoc_get_scan_obj(psoc) \ + wlan_psoc_get_scan_obj_fl(psoc, \ + __func__, __LINE__) + +static inline struct wlan_scan_obj * +wlan_psoc_get_scan_obj_fl(struct wlan_objmgr_psoc *psoc, + const char *func, uint32_t line) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = (struct wlan_scan_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SCAN); + if (!scan_obj) { + scm_err("%s:%u, Failed to get scan object", func, line); + return NULL; + } + return scan_obj; +} + +/** + * wlan_pdev_get_scan_obj() - private API to get scan object from pdev + * @psoc: pdev object + * + * Return: scan object + */ +static inline struct wlan_scan_obj * +wlan_pdev_get_scan_obj(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + + return wlan_psoc_get_scan_obj(psoc); +} + +/** + * wlan_vdev_get_scan_obj() - private API to get scan object from vdev + * @psoc: vdev object + * + * Return: scan object + */ +static inline struct wlan_scan_obj * +wlan_vdev_get_scan_obj(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + + return wlan_pdev_get_scan_obj(pdev); +} + +/** + * wlan_get_vdev_scan_obj() - private API to get scan object vdev + * @vdev: vdev object + * + * Return: scan object + */ +static inline struct scan_vdev_obj * +wlan_get_vdev_scan_obj(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = (struct scan_vdev_obj *) + wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_SCAN); + + return scan_vdev_obj; +} + +/** + * wlan_scan_vdev_get_pdev_id() - private API to get pdev id from vdev object + * @vdev: vdev object + * + * Return: parent pdev id + */ +static inline uint8_t +wlan_scan_vdev_get_pdev_id(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + + return wlan_objmgr_pdev_get_pdev_id(pdev); +} + +/** + * wlan_pdev_get_pdev_scan_ev_handlers() - private API to get + * pdev scan event handlers + * @vdev: pdev object + * + * Return: pdev_scan_ev_handler object + */ +static inline struct pdev_scan_ev_handler* +wlan_pdev_get_pdev_scan_ev_handlers(struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdevid; + struct wlan_scan_obj *scan = NULL; + + if (!pdev) + goto err; + + pdevid = wlan_objmgr_pdev_get_pdev_id(pdev); + scan = wlan_pdev_get_scan_obj(pdev); + if (!scan) + goto err; + + return &scan->global_evhandlers.pdev_ev_handlers[pdevid]; + +err: + scm_err("NULL pointer, pdev: 0x%pK, scan_obj: 0x%pK", + pdev, scan); + return NULL; +} + +/** + * wlan_vdev_get_pdev_scan_ev_handlers() - private API to get + * pdev scan event handlers + * @vdev: vdev object + * + * Return: pdev_scan_ev_handler object + */ +static inline struct pdev_scan_ev_handler* +wlan_vdev_get_pdev_scan_ev_handlers(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + + return wlan_pdev_get_pdev_scan_ev_handlers(pdev); +} + +/** + * wlan_scan_psoc_get_def_params() - private API to get scan defaults + * @psoc: psoc object + * + * Return: scan defaults + */ +static inline struct scan_default_params* +wlan_scan_psoc_get_def_params(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan = NULL; + + if (!psoc) { + scm_err("null psoc"); + return NULL; + } + scan = wlan_psoc_get_scan_obj(psoc); + + if (!scan) + return NULL; + + return &scan->scan_def; +} + +/** + * wlan_vdev_get_def_scan_params() - private API to get scan defaults + * @vdev: vdev object + * + * Return: scan defaults + */ +static inline struct scan_default_params* +wlan_vdev_get_def_scan_params(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + if (!vdev) { + scm_err("null vdev"); + return NULL; + } + psoc = wlan_vdev_get_psoc(vdev); + + return wlan_scan_psoc_get_def_params(psoc); +} + +/** + * wlan_scan_psoc_created_notification() - scan psoc create handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_psoc_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * wlan_scan_psoc_deleted_notification() - scan psoc delete handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_psoc_destroyed_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * wlan_scan_vdev_created_notification() - scan psoc create handler + * @vdev: vdev object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_vdev_created_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list); + +/** + * wlan_scan_vdev_destroyed_notification() - scan psoc delete handler + * @vdev: vdev object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_vdev_destroyed_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c new file mode 100644 index 0000000000000000000000000000000000000000..f39ee60c17e7d7e0ae3e06f695beb7cbcedacf04 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c @@ -0,0 +1,1885 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan manager functionality + */ + +#include +#include +#include +#include "wlan_scan_main.h" +#include "wlan_scan_manager.h" +#include "wlan_utility.h" +#include +#ifdef FEATURE_WLAN_SCAN_PNO +#include +#endif +#ifdef WLAN_POLICY_MGR_ENABLE +#include +#endif +#include +#include + +/* Beacon/probe weightage multiplier */ +#define BCN_PROBE_WEIGHTAGE 5 + +/* Saved profile weightage multiplier */ +#define SAVED_PROFILE_WEIGHTAGE 10 + +/* maximum number of 6ghz hints can be sent per scan request */ +#define MAX_HINTS_PER_SCAN_REQ 15 + +/* maximum number of hints can be sent per 6ghz channel */ +#define MAX_HINTS_PER_CHANNEL 4 + +QDF_STATUS +scm_scan_free_scan_request_mem(struct scan_start_request *req) +{ + void *ie; + + if (!req) { + scm_err("null request"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Free vendor(extra) ie */ + ie = req->scan_req.extraie.ptr; + if (ie) { + req->scan_req.extraie.ptr = NULL; + req->scan_req.extraie.len = 0; + qdf_mem_free(ie); + } + + /* Free htcap ie */ + ie = req->scan_req.htcap.ptr; + if (ie) { + req->scan_req.htcap.len = 0; + req->scan_req.htcap.ptr = NULL; + qdf_mem_free(ie); + } + + /* Free vhtcap ie */ + ie = req->scan_req.vhtcap.ptr; + if (ie) { + req->scan_req.vhtcap.len = 0; + req->scan_req.vhtcap.ptr = NULL; + qdf_mem_free(ie); + } + /* free scan_start_request memory */ + qdf_mem_free(req); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_scan_get_pdev_global_event_handlers(struct scan_event_listeners *listeners, + struct pdev_scan_ev_handler *pdev_ev_handler) +{ + uint32_t i; + struct cb_handler *cb_handlers = &(pdev_ev_handler->cb_handlers[0]); + + for (i = 0; i < MAX_SCAN_EVENT_HANDLERS_PER_PDEV; i++, cb_handlers++) { + if ((cb_handlers->func) && + (listeners->count < MAX_SCAN_EVENT_LISTENERS)) { + listeners->cb[listeners->count].func = + cb_handlers->func; + listeners->cb[listeners->count].arg = + cb_handlers->arg; + listeners->count++; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_scan_get_requester_event_handler(struct scan_event_listeners *listeners, + struct scan_requester_info *requesters, + wlan_scan_requester requester_id) +{ + uint32_t idx; + struct cb_handler *ev_handler; + + idx = requester_id & WLAN_SCAN_REQUESTER_ID_PREFIX; + if (idx != WLAN_SCAN_REQUESTER_ID_PREFIX) + return QDF_STATUS_SUCCESS; + + idx = requester_id & WLAN_SCAN_REQUESTER_ID_MASK; + if (idx < WLAN_MAX_REQUESTORS) { + ev_handler = &(requesters[idx].ev_handler); + if (ev_handler->func) { + if (listeners->count < MAX_SCAN_EVENT_LISTENERS) { + listeners->cb[listeners->count].func = + ev_handler->func; + listeners->cb[listeners->count].arg = + ev_handler->arg; + listeners->count++; + } + } + return QDF_STATUS_SUCCESS; + } else { + scm_err("invalid requester id"); + return QDF_STATUS_E_INVAL; + } + +} + +static void scm_scan_post_event(struct wlan_objmgr_vdev *vdev, + struct scan_event *event) +{ + uint32_t i = 0; + struct wlan_scan_obj *scan; + struct pdev_scan_ev_handler *pdev_ev_handler; + struct cb_handler *cb_handlers; + struct scan_requester_info *requesters; + struct scan_event_listeners *listeners; + + if (!vdev || !event) { + scm_err("vdev: 0x%pK, event: 0x%pK", vdev, event); + return; + } + if (!event->requester) { + scm_err("invalid requester id"); + QDF_ASSERT(0); + } + scan = wlan_vdev_get_scan_obj(vdev); + pdev_ev_handler = wlan_vdev_get_pdev_scan_ev_handlers(vdev); + if (!pdev_ev_handler) + return; + cb_handlers = &(pdev_ev_handler->cb_handlers[0]); + requesters = scan->requesters; + + listeners = qdf_mem_malloc_atomic(sizeof(*listeners)); + if (!listeners) { + scm_warn("couldn't allocate listeners list"); + return; + } + + /* initialize number of listeners */ + listeners->count = 0; + + /* + * Initiator of scan request decides which all scan events + * he is interested in and FW will send only those scan events + * to host driver. + * All the events received by scan module will be notified + * to all registered handlers. + */ + + qdf_spin_lock_bh(&scan->lock); + /* find all global scan event handlers on this pdev */ + scm_scan_get_pdev_global_event_handlers(listeners, pdev_ev_handler); + /* find owner who triggered this scan request */ + scm_scan_get_requester_event_handler(listeners, requesters, + event->requester); + qdf_spin_unlock_bh(&scan->lock); + + /* notify all interested handlers */ + for (i = 0; i < listeners->count; i++) + listeners->cb[i].func(vdev, event, listeners->cb[i].arg); + qdf_mem_free(listeners); +} + +static QDF_STATUS +scm_release_serialization_command(struct wlan_objmgr_vdev *vdev, + uint32_t scan_id) +{ + struct wlan_serialization_queued_cmd_info cmd = {0}; + + cmd.requestor = WLAN_UMAC_COMP_SCAN; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = scan_id; + cmd.req_type = WLAN_SER_CANCEL_SINGLE_SCAN; + cmd.vdev = vdev; + cmd.queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + + /* Inform serialization for command completion */ + wlan_serialization_remove_cmd(&cmd); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_post_internal_scan_complete_event(struct scan_start_request *req, + enum scan_completion_reason reason) +{ + struct scan_event event = {0, }; + + /* prepare internal scan complete event */ + event.type = SCAN_EVENT_TYPE_COMPLETED; + event.reason = reason; + event.chan_freq = 0; /* Invalid frequency */ + event.vdev_id = req->scan_req.vdev_id; + event.requester = req->scan_req.scan_req_id; + event.scan_id = req->scan_req.scan_id; + /* Fill scan_start_request used to trigger this scan */ + event.scan_start_req = req; + /* post scan event to registered handlers */ + scm_scan_post_event(req->vdev, &event); + + return QDF_STATUS_SUCCESS; +} + +static inline struct pdev_scan_info * +scm_scan_get_pdev_priv_info(uint8_t pdev_id, struct wlan_scan_obj *scan_obj) +{ + return &scan_obj->pdev_info[pdev_id]; +} + +static QDF_STATUS +scm_update_last_scan_time(struct scan_start_request *req) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + struct pdev_scan_info *pdev_scan_info; + + scan_obj = wlan_vdev_get_scan_obj(req->vdev); + pdev_id = wlan_scan_vdev_get_pdev_id(req->vdev); + pdev_scan_info = scm_scan_get_pdev_priv_info(pdev_id, scan_obj); + /* update last scan start time */ + pdev_scan_info->last_scan_time = qdf_system_ticks(); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_activate_scan_request(struct scan_start_request *req) +{ + QDF_STATUS status; + + status = tgt_scan_start(req); + if (status != QDF_STATUS_SUCCESS) { + scm_err("tgt_scan_start failed, status: %d", status); + /* scan could not be started and hence + * we will not receive any completions. + * post scan cancelled + */ + scm_post_internal_scan_complete_event(req, + SCAN_REASON_CANCELLED); + return status; + } + /* save last scan start time */ + status = scm_update_last_scan_time(req); + + return status; +} + +static QDF_STATUS +scm_cancel_scan_request(struct scan_start_request *req) +{ + struct scan_cancel_request cancel_req = {0, }; + QDF_STATUS status; + + cancel_req.vdev = req->vdev; + cancel_req.cancel_req.scan_id = req->scan_req.scan_id; + cancel_req.cancel_req.requester = req->scan_req.scan_req_id; + cancel_req.cancel_req.req_type = WLAN_SCAN_CANCEL_SINGLE; + cancel_req.cancel_req.vdev_id = req->scan_req.vdev_id; + /* send scan cancel to fw */ + status = tgt_scan_cancel(&cancel_req); + if (status != QDF_STATUS_SUCCESS) + scm_err("tgt_scan_cancel failed: status: %d, scanid: %d", + status, req->scan_req.scan_id); + /* notify event handler about scan cancellation */ + scm_post_internal_scan_complete_event(req, SCAN_REASON_CANCELLED); + + return status; +} + +static QDF_STATUS +scm_scan_serialize_callback(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + struct scan_start_request *req; + QDF_STATUS status; + + if (!cmd) { + scm_err("cmd is NULL, reason: %d", reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!cmd->umac_cmd) { + scm_err("cmd->umac_cmd is NULL , reason: %d", reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + req = cmd->umac_cmd; + if (!req->vdev) { + scm_err("NULL vdev. req:0x%pK, reason:%d\n", req, reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_mtrace(QDF_MODULE_ID_SERIALIZATION, QDF_MODULE_ID_SCAN, reason, + req->scan_req.vdev_id, req->scan_req.scan_id); + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + /* command moved to active list + * modify the params if required for concurency case. + */ + status = scm_activate_scan_request(req); + break; + + case WLAN_SER_CB_CANCEL_CMD: + /* command removed from pending list. + * notify registered scan event handlers with + * status completed and reason cancelled. + */ + status = scm_post_internal_scan_complete_event(req, + SCAN_REASON_CANCELLED); + break; + + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + /* active command timed out. + * prepare internal scan cancel request + */ + status = scm_cancel_scan_request(req); + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + /* command successfully completed. + * Release vdev reference and free scan_start_request memory + */ + cmd->umac_cmd = NULL; + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + status = scm_scan_free_scan_request_mem(req); + break; + + default: + /* Do nothing but logging */ + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +bool scm_is_scan_allowed(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_scan_obj *scan_psoc_obj; + struct scan_vdev_obj *scan_vdev_obj; + + if (!vdev) { + scm_err("vdev is NULL"); + return false; + } + + scan_psoc_obj = wlan_vdev_get_scan_obj(vdev); + if (!scan_psoc_obj) { + scm_err("Couldn't find scan psoc object"); + return false; + } + + if (scan_psoc_obj->scan_disabled) { + scm_err_rl("scan disabled %x, for psoc", + scan_psoc_obj->scan_disabled); + return false; + } + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("Couldn't find scan vdev object"); + return false; + } + + if (scan_vdev_obj->scan_disabled) { + scm_err_rl("scan disabled %x on vdev_id:%d", + scan_vdev_obj->scan_disabled, + wlan_vdev_get_id(vdev)); + return false; + } + + return true; +} + +#ifdef WLAN_POLICY_MGR_ENABLE +/** + * scm_update_dbs_scan_ctrl_ext_flag() - update dbs scan ctrl flags + * @req: pointer to scan request + * + * This function sets scan_ctrl_flags_ext value depending on the type of + * scan and the channel lists. + * + * Non-DBS scan is requested if any of the below case is met: + * 1. HW is DBS incapable + * 2. A high accuracy scan request is sent by kernel. + * + * DBS scan is enabled for these conditions: + * 1. A low power or low span scan request is sent by kernel. + * For remaining cases DBS is enabled by default. + * Return: void + */ +static void +scm_update_dbs_scan_ctrl_ext_flag(struct scan_start_request *req) +{ + struct wlan_objmgr_psoc *psoc; + uint32_t scan_dbs_policy = SCAN_DBS_POLICY_DEFAULT; + bool ndi_present; + + psoc = wlan_vdev_get_psoc(req->vdev); + + if (!policy_mgr_is_dbs_scan_allowed(psoc)) { + scan_dbs_policy = SCAN_DBS_POLICY_FORCE_NONDBS; + goto end; + } + + if (!wlan_scan_cfg_honour_nl_scan_policy_flags(psoc)) { + scm_debug_rl("nl scan policy flags not honoured, goto end"); + goto end; + } + + ndi_present = policy_mgr_mode_specific_connection_count(psoc, + PM_NDI_MODE, + NULL); + + if (ndi_present && !policy_mgr_is_hw_dbs_2x2_capable(psoc)) { + scm_debug("NDP present go for DBS scan"); + goto end; + } + + if (req->scan_req.scan_policy_high_accuracy) { + scm_debug("high accuracy scan received, going for non-dbs scan"); + scan_dbs_policy = SCAN_DBS_POLICY_FORCE_NONDBS; + goto end; + } + if ((req->scan_req.scan_policy_low_power) || + (req->scan_req.scan_policy_low_span)) { + scm_debug("low power/span scan received, going for dbs scan"); + scan_dbs_policy = SCAN_DBS_POLICY_IGNORE_DUTY; + goto end; + } + +end: + req->scan_req.scan_ctrl_flags_ext |= + ((scan_dbs_policy << SCAN_FLAG_EXT_DBS_SCAN_POLICY_BIT) + & SCAN_FLAG_EXT_DBS_SCAN_POLICY_MASK); +} + +/** + * scm_update_passive_dwell_time() - update dwell passive time + * @vdev: vdev object + * @req: scan request + * + * Return: None + */ +static void +scm_update_passive_dwell_time(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) + return; + + if (policy_mgr_is_sta_connected_2g(psoc) && + !policy_mgr_is_hw_dbs_capable(psoc) && + ucfg_scan_get_bt_activity(psoc)) + req->scan_req.dwell_time_passive = + PASSIVE_DWELL_TIME_BT_A2DP_ENABLED; +} + +static const struct probe_time_dwell_time + scan_probe_time_dwell_time_map[SCAN_DWELL_TIME_PROBE_TIME_MAP_SIZE] = { + {28, 11}, /* 0 SSID */ + {28, 20}, /* 1 SSID */ + {28, 20}, /* 2 SSID */ + {28, 20}, /* 3 SSID */ + {28, 20}, /* 4 SSID */ + {28, 20}, /* 5 SSID */ + {28, 20}, /* 6 SSID */ + {28, 11}, /* 7 SSID */ + {28, 11}, /* 8 SSID */ + {28, 11}, /* 9 SSID */ + {28, 8} /* 10 SSID */ +}; + +/** + * scm_scan_get_burst_duration() - get burst duration depending on max chan + * and miracast. + * @max_ch_time: max channel time + * @miracast_enabled: if miracast is enabled + * + * Return: burst_duration + */ +static inline +int scm_scan_get_burst_duration(int max_ch_time, bool miracast_enabled) +{ + int burst_duration = 0; + + if (miracast_enabled) { + /* + * When miracast is running, burst + * duration needs to be minimum to avoid + * any stutter or glitch in miracast + * during station scan + */ + if (max_ch_time <= SCAN_GO_MIN_ACTIVE_SCAN_BURST_DURATION) + burst_duration = max_ch_time; + else + burst_duration = SCAN_GO_MIN_ACTIVE_SCAN_BURST_DURATION; + } else { + /* + * If miracast is not running, accommodate max + * stations to make the scans faster + */ + burst_duration = SCAN_GO_BURST_SCAN_MAX_NUM_OFFCHANNELS * + max_ch_time; + + if (burst_duration > SCAN_GO_MAX_ACTIVE_SCAN_BURST_DURATION) { + uint8_t channels = SCAN_P2P_SCAN_MAX_BURST_DURATION / + max_ch_time; + + if (channels) + burst_duration = channels * max_ch_time; + else + burst_duration = + SCAN_GO_MAX_ACTIVE_SCAN_BURST_DURATION; + } + } + return burst_duration; +} + +#define SCM_ACTIVE_DWELL_TIME_NAN 60 +#define SCM_ACTIVE_DWELL_TIME_SAP 40 + +/** + * scm_req_update_concurrency_params() - update scan req params depending on + * concurrent mode present. + * @vdev: vdev object pointer + * @req: scan request + * @scan_obj: scan object + * + * Return: void + */ +static void scm_req_update_concurrency_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ + bool ap_present, go_present, sta_active, p2p_cli_present, ndi_present; + struct wlan_objmgr_psoc *psoc; + uint16_t sap_peer_count = 0; + uint16_t go_peer_count = 0; + struct wlan_objmgr_pdev *pdev; + + psoc = wlan_vdev_get_psoc(vdev); + pdev = wlan_vdev_get_pdev(vdev); + + if (!psoc || !pdev) + return; + + ap_present = policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL); + go_present = policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL); + p2p_cli_present = policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_CLIENT_MODE, NULL); + sta_active = policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL); + ndi_present = policy_mgr_mode_specific_connection_count( + psoc, PM_NDI_MODE, NULL); + if (ap_present) + sap_peer_count = + wlan_util_get_peer_count_for_mode(pdev, QDF_SAP_MODE); + if (go_present) + go_peer_count = + wlan_util_get_peer_count_for_mode(pdev, QDF_P2P_GO_MODE); + + if (!req->scan_req.scan_f_passive) + scm_update_passive_dwell_time(vdev, req); + + if (policy_mgr_get_connection_count(psoc)) { + if (req->scan_req.scan_f_passive) + req->scan_req.dwell_time_passive = + scan_obj->scan_def.conc_passive_dwell; + else + req->scan_req.dwell_time_active = + scan_obj->scan_def.conc_active_dwell; + req->scan_req.max_rest_time = + scan_obj->scan_def.conc_max_rest_time; + req->scan_req.min_rest_time = + scan_obj->scan_def.conc_min_rest_time; + req->scan_req.idle_time = scan_obj->scan_def.conc_idle_time; + } + + if (wlan_vdev_is_up(req->vdev) != QDF_STATUS_SUCCESS) + req->scan_req.adaptive_dwell_time_mode = + scan_obj->scan_def.adaptive_dwell_time_mode_nc; + /* + * If AP/GO is active and has connected clients : + * 1.set min rest time same as max rest time, so that + * firmware spends more time on home channel which will + * increase the probability of sending beacon at TBTT + * 2.if DBS is supported and SAP is not on 2g, + * do not reset active dwell time for 2g. + */ + + /* + * For SAP, the dwell time cannot exceed 32 ms as it can't go + * offchannel more than 32 ms. For Go, since we + * advertise NOA, GO can have regular dwell time which is 40 ms. + */ + if ((ap_present && sap_peer_count) || + (go_present && go_peer_count)) { + if ((policy_mgr_is_hw_dbs_capable(psoc) && + policy_mgr_is_sap_go_on_2g(psoc)) || + !policy_mgr_is_hw_dbs_capable(psoc)) { + if (ap_present) + req->scan_req.dwell_time_active_2g = + QDF_MIN(req->scan_req.dwell_time_active, + (SCAN_CTS_DURATION_MS_MAX - + SCAN_ROAM_SCAN_CHANNEL_SWITCH_TIME)); + else + req->scan_req.dwell_time_active_2g = 0; + } + req->scan_req.min_rest_time = req->scan_req.max_rest_time; + } + + if (policy_mgr_current_concurrency_is_mcc(psoc)) + req->scan_req.min_rest_time = + scan_obj->scan_def.conc_max_rest_time; + + /* + * If scan req for SAP (ACS Sacn) use dwell_time_active_def as dwell + * time for 2g channels instead of dwell_time_active_2g + */ + if (vdev->vdev_mlme.vdev_opmode == QDF_SAP_MODE) + req->scan_req.dwell_time_active_2g = SCM_ACTIVE_DWELL_TIME_SAP; + + if (req->scan_req.scan_type == SCAN_TYPE_DEFAULT) { + /* + * Decide burst_duration and dwell_time_active based on + * what type of devices are active. + */ + do { + if (ap_present && go_present && sta_active) { + if (req->scan_req.dwell_time_active <= + SCAN_3PORT_CONC_SCAN_MAX_BURST_DURATION) + req->scan_req.burst_duration = + req->scan_req.dwell_time_active; + else + req->scan_req.burst_duration = + SCAN_3PORT_CONC_SCAN_MAX_BURST_DURATION; + + break; + } + + if (scan_obj->miracast_enabled && + policy_mgr_is_mcc_in_24G(psoc)) + req->scan_req.max_rest_time = + scan_obj->scan_def.sta_miracast_mcc_rest_time; + + if (go_present) { + /* + * Background scan while GO is sending beacons. + * Every off-channel transition has overhead of + * 2 beacon intervals for NOA. Maximize number + * of channels in every transition by using + * burst scan. + */ + if (scan_obj->scan_def.go_scan_burst_duration) + req->scan_req.burst_duration = + scan_obj-> + scan_def.go_scan_burst_duration; + else + req->scan_req.burst_duration = + scm_scan_get_burst_duration( + req->scan_req. + dwell_time_active, + scan_obj-> + miracast_enabled); + break; + } + if ((sta_active || p2p_cli_present)) { + if (scan_obj->scan_def.sta_scan_burst_duration) + req->scan_req.burst_duration = + scan_obj->scan_def. + sta_scan_burst_duration; + break; + } + + if (go_present && sta_active) { + req->scan_req.burst_duration = + req->scan_req.dwell_time_active; + break; + } + + if (ndi_present || (p2p_cli_present && sta_active)) { + req->scan_req.burst_duration = 0; + break; + } + } while (0); + + if (ap_present) { + uint8_t ssid_num; + + ssid_num = req->scan_req.num_ssids * + req->scan_req.num_bssid; + req->scan_req.repeat_probe_time = + scan_probe_time_dwell_time_map[ + QDF_MIN(ssid_num, + SCAN_DWELL_TIME_PROBE_TIME_MAP_SIZE + - 1)].probe_time; + req->scan_req.n_probes = + (req->scan_req.repeat_probe_time > 0) ? + req->scan_req.dwell_time_active / + req->scan_req.repeat_probe_time : 0; + } + } + + if (ap_present) { + uint16_t ap_chan_freq; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + + ap_chan_freq = policy_mgr_get_channel(psoc, PM_SAP_MODE, NULL); + /* + * P2P/STA scan while SoftAP is sending beacons. + * Max duration of CTS2self is 32 ms, which limits the + * dwell time. + * If DBS is supported and: + * 1.if SAP is on 2G channel then keep passive + * dwell time default. + * 2.if SAP is on 5G/6G channel then update dwell time active. + */ + if (sap_peer_count) { + if (policy_mgr_is_hw_dbs_capable(psoc) && + (WLAN_REG_IS_5GHZ_CH_FREQ(ap_chan_freq) || + WLAN_REG_IS_6GHZ_CHAN_FREQ(ap_chan_freq))) { + req->scan_req.dwell_time_active = + QDF_MIN(req->scan_req.dwell_time_active, + (SCAN_CTS_DURATION_MS_MAX - + SCAN_ROAM_SCAN_CHANNEL_SWITCH_TIME)); + } + if (!policy_mgr_is_hw_dbs_capable(psoc) || + (policy_mgr_is_hw_dbs_capable(psoc) && + WLAN_REG_IS_5GHZ_CH_FREQ(ap_chan_freq))) { + req->scan_req.dwell_time_passive = + req->scan_req.dwell_time_active; + } + } + + if (scan_obj->scan_def.ap_scan_burst_duration) { + req->scan_req.burst_duration = + scan_obj->scan_def.ap_scan_burst_duration; + } else { + req->scan_req.burst_duration = 0; + if (wlan_reg_is_dfs_for_freq(pdev, ap_chan_freq)) + req->scan_req.burst_duration = + SCAN_BURST_SCAN_MAX_NUM_OFFCHANNELS * + req->scan_req.dwell_time_active; + } + } + + if (ndi_present) { + req->scan_req.dwell_time_active = + SCM_ACTIVE_DWELL_TIME_NAN; + req->scan_req.dwell_time_active_2g = + QDF_MIN(req->scan_req.dwell_time_active_2g, + SCM_ACTIVE_DWELL_TIME_NAN); + scm_debug("NDP active modify dwell time 2ghz %d", + req->scan_req.dwell_time_active_2g); + } +} + +/** + * scm_scan_chlist_concurrency_modify() - modify chan list to skip 5G if + * required + * @vdev: vdev object + * @req: scan request + * + * Check and skip 5G chan list based on DFS AP present and current hw mode. + * + * Return: void + */ +static inline void scm_scan_chlist_concurrency_modify( + struct wlan_objmgr_vdev *vdev, struct scan_start_request *req) +{ + struct wlan_objmgr_psoc *psoc; + uint32_t i; + uint32_t num_scan_channels; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) + return; + /* do this only for STA and P2P-CLI mode */ + if (!(wlan_vdev_mlme_get_opmode(req->vdev) == QDF_STA_MODE) && + !(wlan_vdev_mlme_get_opmode(req->vdev) == QDF_P2P_CLIENT_MODE)) + return; + if (!policy_mgr_scan_trim_5g_chnls_for_dfs_ap(psoc)) + return; + num_scan_channels = 0; + for (i = 0; i < req->scan_req.chan_list.num_chan; i++) { + if (WLAN_REG_IS_5GHZ_CH_FREQ( + req->scan_req.chan_list.chan[i].freq)) { + continue; + } + req->scan_req.chan_list.chan[num_scan_channels++] = + req->scan_req.chan_list.chan[i]; + } + if (num_scan_channels < req->scan_req.chan_list.num_chan) + scm_debug("5g chan skipped (%d, %d)", + req->scan_req.chan_list.num_chan, num_scan_channels); + req->scan_req.chan_list.num_chan = num_scan_channels; +} +#else +static inline +void scm_req_update_concurrency_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ +} + +static inline void +scm_update_dbs_scan_ctrl_ext_flag(struct scan_start_request *req) +{ +} + +static inline void scm_scan_chlist_concurrency_modify( + struct wlan_objmgr_vdev *vdev, struct scan_start_request *req) +{ +} +#endif + +#ifdef CONFIG_BAND_6GHZ +static void +scm_update_6ghz_channel_list(struct wlan_objmgr_vdev *vdev, + struct chan_list *chan_list, + struct wlan_scan_obj *scan_obj) +{ + uint8_t i; + struct regulatory_channel *chan_list_6g; + bool psc_channel_found = false; + bool channel_6g_found = false; + uint8_t num_scan_channels = 0, channel_count; + struct wlan_objmgr_pdev *pdev; + uint32_t freq; + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) + return; + + scm_debug("6g scan mode %d", scan_obj->scan_def.scan_mode_6g); + for (i = 0; i < chan_list->num_chan; i++) { + freq = chan_list->chan[i].freq; + if ((scan_obj->scan_def.scan_mode_6g == + SCAN_MODE_6G_NO_CHANNEL) && + (wlan_reg_is_6ghz_chan_freq(freq))) { + /* Drop the 6Ghz channels */ + continue; + } else if ((scan_obj->scan_def.scan_mode_6g == + SCAN_MODE_6G_PSC_CHANNEL) && + (wlan_reg_is_6ghz_chan_freq(freq))) { + /* Allow only PSC channels */ + if (wlan_reg_is_6ghz_psc_chan_freq(freq)) + psc_channel_found = true; + else + continue; + } else if ((scan_obj->scan_def.scan_mode_6g == + SCAN_MODE_6G_ALL_CHANNEL) && + (wlan_reg_is_6ghz_chan_freq(freq))) { + /* Allow any 6ghz channel */ + channel_6g_found = true; + } + chan_list->chan[num_scan_channels++] = + chan_list->chan[i]; + } + + scm_debug("psc_channel_found %d channel_6g_found%d", + psc_channel_found, channel_6g_found); + if ((scan_obj->scan_def.scan_mode_6g == SCAN_MODE_6G_PSC_CHANNEL && + !psc_channel_found) || + (scan_obj->scan_def.scan_mode_6g == SCAN_MODE_6G_ALL_CHANNEL && + !channel_6g_found)) { + chan_list_6g = qdf_mem_malloc(NUM_6GHZ_CHANNELS * + sizeof(struct regulatory_channel)); + if (!chan_list_6g) + goto end; + + /* Add the 6Ghz channels based on config*/ + channel_count = wlan_reg_get_band_channel_list(pdev, + BIT(REG_BAND_6G), + chan_list_6g); + scm_debug("Number of 6G channels %d", channel_count); + for (i = 0; i < channel_count; i++) { + if ((scan_obj->scan_def.scan_mode_6g == + SCAN_MODE_6G_PSC_CHANNEL) && + (!psc_channel_found) && + wlan_reg_is_6ghz_psc_chan_freq(chan_list_6g[i]. + center_freq)) { + chan_list->chan[num_scan_channels++].freq = + chan_list_6g[i].center_freq; + } else if ((scan_obj->scan_def.scan_mode_6g == + SCAN_MODE_6G_ALL_CHANNEL) && + (!channel_6g_found)) { + chan_list->chan[num_scan_channels++].freq = + chan_list_6g[i].center_freq; + } + } + qdf_mem_free(chan_list_6g); + } +end: + chan_list->num_chan = num_scan_channels; +} +#else +static void +scm_update_6ghz_channel_list(struct wlan_objmgr_vdev *vdev, + struct chan_list *chan_list, + struct wlan_scan_obj *scan_obj) +{ +} +#endif + +#ifdef FEATURE_6G_SCAN_CHAN_SORT_ALGO +static void scm_sort_6ghz_channel_list(struct wlan_objmgr_vdev *vdev, + struct chan_list *chan_list) +{ + uint8_t i, j = 0, max, tmp_list_count; + struct meta_rnr_channel *channel; + struct chan_info temp_list[MAX_6GHZ_CHANNEL]; + struct rnr_chan_weight *rnr_chan_info, temp; + uint32_t weight; + struct wlan_objmgr_psoc *psoc; + + rnr_chan_info = qdf_mem_malloc(sizeof(rnr_chan_info) * MAX_6GHZ_CHANNEL); + if (!rnr_chan_info) + return; + + for (i = 0; i < chan_list->num_chan; i++) { + if (WLAN_REG_IS_6GHZ_CHAN_FREQ(chan_list->chan[i].freq)) + temp_list[j++].freq = chan_list->chan[i].freq; + } + tmp_list_count = j; + scm_debug("Total 6ghz channels %d", tmp_list_count); + + /* No Need to sort if the 6ghz channels are less than one */ + if (tmp_list_count < 1) { + qdf_mem_free(rnr_chan_info); + return; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + scm_err("Psoc is NULL"); + return; + } + + /* compute the weightage */ + for (i = 0, j = 0; i < tmp_list_count; i++) { + channel = scm_get_chan_meta(psoc, temp_list[i].freq); + if (!channel) + continue; + weight = channel->bss_beacon_probe_count * BCN_PROBE_WEIGHTAGE + + channel->saved_profile_count * SAVED_PROFILE_WEIGHTAGE; + rnr_chan_info[j].weight = weight; + rnr_chan_info[j].chan_freq = temp_list[i].freq; + j++; + scm_debug("Freq %d weight %d bcn_cnt %d", temp_list[i].freq, + weight, channel->bss_beacon_probe_count); + } + + /* Sort the channel using selection sort - descending order */ + for (i = 0; i < tmp_list_count - 1; i++) { + max = i; + for (j = i + 1; j < tmp_list_count; j++) { + if (rnr_chan_info[j].weight > + rnr_chan_info[max].weight) + max = j; + } + if (max != i) { + qdf_mem_copy(&temp, &rnr_chan_info[max], + sizeof(*rnr_chan_info)); + qdf_mem_copy(&rnr_chan_info[max], &rnr_chan_info[i], + sizeof(*rnr_chan_info)); + qdf_mem_copy(&rnr_chan_info[i], &temp, + sizeof(*rnr_chan_info)); + } + } + + /* update the 6g list based on the weightage */ + for (i = 0, j = 0; (i < NUM_CHANNELS && j < tmp_list_count); i++) { + if (wlan_reg_is_6ghz_chan_freq(chan_list->chan[i].freq)) + chan_list->chan[i].freq = rnr_chan_info[j++].chan_freq; + } + qdf_mem_free(rnr_chan_info); +} + +static void scm_update_rnr_info(struct wlan_objmgr_psoc *psoc, + struct scan_start_request *req) +{ + uint8_t i, num_bssid = 0, num_ssid = 0; + uint8_t total_count = MAX_HINTS_PER_SCAN_REQ; + uint32_t freq; + struct meta_rnr_channel *chan; + qdf_list_node_t *cur_node, *next_node = NULL; + struct scan_rnr_node *rnr_node; + struct chan_list *chan_list; + QDF_STATUS status; + + if (!req) + return; + + chan_list = &req->scan_req.chan_list; + for (i = 0; i < chan_list->num_chan; i++) { + freq = chan_list->chan[i].freq; + + chan = scm_get_chan_meta(psoc, freq); + if (!chan) { + scm_debug("Failed to get meta, freq %d", freq); + continue; + } + if (qdf_list_empty(&chan->rnr_list)) + continue; + + qdf_list_peek_front(&chan->rnr_list, &cur_node); + while (cur_node && total_count) { + rnr_node = qdf_container_of(cur_node, + struct scan_rnr_node, + node); + if (!qdf_is_macaddr_zero(&rnr_node->entry.bssid) && + req->scan_req.num_hint_bssid < + WLAN_SCAN_MAX_HINT_BSSID) { + qdf_mem_copy(&req->scan_req.hint_bssid[num_bssid++].bssid, + &rnr_node->entry.bssid, + QDF_MAC_ADDR_SIZE); + req->scan_req.num_hint_bssid++; + total_count--; + } else if (rnr_node->entry.short_ssid && + req->scan_req.num_hint_s_ssid < + WLAN_SCAN_MAX_HINT_S_SSID) { + req->scan_req.hint_s_ssid[num_ssid++].short_ssid = + rnr_node->entry.short_ssid; + req->scan_req.num_hint_s_ssid++; + total_count--; + } + status = qdf_list_peek_next(&chan->rnr_list, cur_node, + &next_node); + if (QDF_IS_STATUS_ERROR(status)) + break; + cur_node = next_node; + next_node = NULL; + } + } +} + +static void scm_add_rnr_info(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req) +{ + struct wlan_objmgr_psoc *psoc; + struct channel_list_db *rnr_db; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) + return; + rnr_db = scm_get_rnr_channel_db(psoc); + if (!rnr_db) + return; + + rnr_db->scan_count++; + if (rnr_db->scan_count >= RNR_UPDATE_SCAN_CNT_THRESHOLD) { + rnr_db->scan_count = 0; + scm_rnr_db_flush(psoc); + scm_update_rnr_from_scan_cache(pdev); + } + + scm_update_rnr_info(psoc, req); +} + +#else +static void scm_sort_6ghz_channel_list(struct wlan_objmgr_vdev *vdev, + struct chan_list *chan_list) +{ +} + +static void scm_add_rnr_info(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req) +{ +} +#endif + +/** + * scm_update_channel_list() - update scan req params depending on dfs inis + * and initial scan request. + * @req: scan request + * @scan_obj: scan object + * + * Return: void + */ +static void +scm_update_channel_list(struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ + uint8_t i; + uint8_t num_scan_channels = 0; + struct scan_vdev_obj *scan_vdev_obj; + struct wlan_objmgr_pdev *pdev; + bool first_scan_done = true; + bool p2p_search = false; + bool skip_dfs_ch = true; + uint32_t first_freq; + + pdev = wlan_vdev_get_pdev(req->vdev); + + scan_vdev_obj = wlan_get_vdev_scan_obj(req->vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return; + } + + if (!scan_vdev_obj->first_scan_done) { + first_scan_done = false; + scan_vdev_obj->first_scan_done = true; + } + + if (req->scan_req.scan_type == SCAN_TYPE_P2P_SEARCH) + p2p_search = true; + /* + * No need to update channels if req is single channel* ie ROC, + * Preauth or a single channel scan etc. + * If the single chan in the scan channel list is an NOL channel,it is + * removed and it would reduce the number of scan channels to 0. + */ + first_freq = req->scan_req.chan_list.chan[0].freq; + if ((req->scan_req.chan_list.num_chan == 1) && + (!utils_dfs_is_freq_in_nol(pdev, first_freq))) + return; + + /* do this only for STA and P2P-CLI mode */ + if ((!(wlan_vdev_mlme_get_opmode(req->vdev) == QDF_STA_MODE) && + !(wlan_vdev_mlme_get_opmode(req->vdev) == QDF_P2P_CLIENT_MODE)) && + !p2p_search) + skip_dfs_ch = false; + + if ((scan_obj->scan_def.allow_dfs_chan_in_scan && + (scan_obj->scan_def.allow_dfs_chan_in_first_scan || + first_scan_done)) && + !(scan_obj->scan_def.skip_dfs_chan_in_p2p_search && p2p_search) && + !scan_obj->miracast_enabled) + skip_dfs_ch = false; + + for (i = 0; i < req->scan_req.chan_list.num_chan; i++) { + uint32_t freq; + + freq = req->scan_req.chan_list.chan[i].freq; + if (skip_dfs_ch && + wlan_reg_chan_has_dfs_attribute_for_freq(pdev, freq)) { + scm_nofl_debug("Skip DFS freq %d", freq); + continue; + } + if (utils_dfs_is_freq_in_nol(pdev, freq)) { + scm_nofl_debug("Skip NOL freq %d", freq); + continue; + } + + req->scan_req.chan_list.chan[num_scan_channels++] = + req->scan_req.chan_list.chan[i]; + } + + req->scan_req.chan_list.num_chan = num_scan_channels; + /* Dont upadte the channel list for SAP mode */ + if (wlan_vdev_mlme_get_opmode(req->vdev) != QDF_SAP_MODE) { + scm_update_6ghz_channel_list(req->vdev, + &req->scan_req.chan_list, + scan_obj); + scm_sort_6ghz_channel_list(req->vdev, &req->scan_req.chan_list); + } + scm_scan_chlist_concurrency_modify(req->vdev, req); +} + +/** + * scm_scan_req_update_params() - update scan req params depending on modes + * and scan type. + * @vdev: vdev object pointer + * @req: scan request + * @scan_obj: scan object + * + * Return: void + */ +static void +scm_scan_req_update_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ + struct chan_list *custom_chan_list; + struct wlan_objmgr_pdev *pdev; + uint8_t pdev_id; + + /* Ensure correct number of probes are sent on active channel */ + if (!req->scan_req.repeat_probe_time) + req->scan_req.repeat_probe_time = + req->scan_req.dwell_time_active / SCAN_NPROBES_DEFAULT; + + if (req->scan_req.scan_f_passive) + req->scan_req.scan_ctrl_flags_ext |= + SCAN_FLAG_EXT_FILTER_PUBLIC_ACTION_FRAME; + + if (!req->scan_req.n_probes) + req->scan_req.n_probes = (req->scan_req.repeat_probe_time > 0) ? + req->scan_req.dwell_time_active / + req->scan_req.repeat_probe_time : 0; + + if (req->scan_req.scan_type == SCAN_TYPE_P2P_SEARCH || + req->scan_req.scan_type == SCAN_TYPE_P2P_LISTEN) { + req->scan_req.adaptive_dwell_time_mode = SCAN_DWELL_MODE_STATIC; + req->scan_req.dwell_time_active_2g = 0; + if (req->scan_req.scan_type == SCAN_TYPE_P2P_LISTEN) { + req->scan_req.repeat_probe_time = 0; + } else { + req->scan_req.scan_f_filter_prb_req = true; + if (!req->scan_req.num_ssids) + req->scan_req.scan_f_bcast_probe = true; + + req->scan_req.dwell_time_active += + P2P_SEARCH_DWELL_TIME_INC; + /* + * 3 channels with default max dwell time 40 ms. + * Cap limit will be set by + * P2P_SCAN_MAX_BURST_DURATION. Burst duration + * should be such that no channel is scanned less + * than the dwell time in normal scenarios. + */ + if (req->scan_req.chan_list.num_chan == + WLAN_P2P_SOCIAL_CHANNELS && + !scan_obj->miracast_enabled) + req->scan_req.repeat_probe_time = + req->scan_req.dwell_time_active / 5; + else + req->scan_req.repeat_probe_time = + req->scan_req.dwell_time_active / 3; + if (scan_obj->scan_def.p2p_scan_burst_duration) { + req->scan_req.burst_duration = + scan_obj->scan_def. + p2p_scan_burst_duration; + } else { + req->scan_req.burst_duration = + BURST_SCAN_MAX_NUM_OFFCHANNELS * + req->scan_req.dwell_time_active; + if (req->scan_req.burst_duration > + P2P_SCAN_MAX_BURST_DURATION) { + uint8_t channels = + P2P_SCAN_MAX_BURST_DURATION / + req->scan_req.dwell_time_active; + if (channels) + req->scan_req.burst_duration = + channels * + req->scan_req.dwell_time_active; + else + req->scan_req.burst_duration = + P2P_SCAN_MAX_BURST_DURATION; + } + } + req->scan_req.scan_ev_bss_chan = false; + } + } else { + req->scan_req.scan_f_cck_rates = true; + if (!req->scan_req.num_ssids) + req->scan_req.scan_f_bcast_probe = true; + req->scan_req.scan_f_add_ds_ie_in_probe = true; + req->scan_req.scan_f_filter_prb_req = true; + req->scan_req.scan_f_add_tpc_ie_in_probe = true; + } + + scm_update_dbs_scan_ctrl_ext_flag(req); + + /* + * No need to update conncurrency parmas if req is passive scan on + * single channel ie ROC, Preauth etc + */ + if (!(req->scan_req.scan_f_passive && + req->scan_req.chan_list.num_chan == 1) && + req->scan_req.scan_type != SCAN_TYPE_RRM) + scm_req_update_concurrency_params(vdev, req, scan_obj); + + /* + * Set wide band flag if enabled. This will cause + * phymode TLV being sent to FW. + */ + pdev = wlan_vdev_get_pdev(vdev); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (ucfg_scan_get_wide_band_scan(pdev)) + req->scan_req.scan_f_wide_band = true; + else + req->scan_req.scan_f_wide_band = false; + + /* + * Overwrite scan channles with custom scan channel + * list if configured. + */ + custom_chan_list = &scan_obj->pdev_info[pdev_id].custom_chan_list; + if (custom_chan_list->num_chan) + qdf_mem_copy(&req->scan_req.chan_list, custom_chan_list, + sizeof(struct chan_list)); + else if (!req->scan_req.chan_list.num_chan) + ucfg_scan_init_chanlist_params(req, 0, NULL, NULL); + + if (scan_obj->scan_def.scan_mode_6g != SCAN_MODE_6G_NO_CHANNEL) + scm_add_rnr_info(pdev, req); + scm_update_channel_list(req, scan_obj); +} + +static inline void scm_print_scan_req_info(struct scan_req_params *req) +{ + uint32_t buff_len; + char *chan_buff; + uint32_t len = 0; + uint8_t idx, count = 0; + struct chan_list *chan_lst; +#define MAX_SCAN_FREQ_TO_PRINT 60 + + scm_nofl_debug("Scan start: scan id %d vdev %d Dwell time: act %d pass %d act_2G %d act_6G %d pass_6G %d, probe time %d n_probes %d flags %x ext_flag %x events %x policy %d wide_bw %d pri %d", + req->scan_id, req->vdev_id, req->dwell_time_active, + req->dwell_time_passive, req->dwell_time_active_2g, + req->dwell_time_active_6g, req->dwell_time_passive_6g, + req->repeat_probe_time, req->n_probes, req->scan_flags, + req->scan_ctrl_flags_ext, req->scan_events, + req->scan_policy_type, req->scan_f_wide_band, + req->scan_priority); + + for (idx = 0; idx < req->num_ssids; idx++) + scm_nofl_debug("SSID[%d]: %.*s", idx, req->ssid[idx].length, + req->ssid[idx].ssid); + + chan_lst = &req->chan_list; + + if (!chan_lst->num_chan) + return; + /* + * Buffer of (num channl * 5) + 1 to consider the 4 char freq and + * 1 space after it for each channel and 1 to end the string with NULL. + */ + buff_len = + (QDF_MIN(MAX_SCAN_FREQ_TO_PRINT, chan_lst->num_chan) * 5) + 1; + chan_buff = qdf_mem_malloc(buff_len); + if (!chan_buff) + return; + scm_nofl_debug("Total freq %d", chan_lst->num_chan); + for (idx = 0; idx < chan_lst->num_chan; idx++) { + len += qdf_scnprintf(chan_buff + len, buff_len - len, "%d ", + chan_lst->chan[idx].freq); + count++; + if (count >= MAX_SCAN_FREQ_TO_PRINT) { + /* Print the MAX_SCAN_FREQ_TO_PRINT channels */ + scm_nofl_debug("Freq list: %s", chan_buff); + len = 0; + count = 0; + } + } + if (len) + scm_nofl_debug("Freq list: %s", chan_buff); + + qdf_mem_free(chan_buff); +} +QDF_STATUS +scm_scan_start_req(struct scheduler_msg *msg) +{ + struct wlan_serialization_command cmd = {0, }; + enum wlan_serialization_status ser_cmd_status; + struct scan_start_request *req = NULL; + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + + if (!msg) { + scm_err("msg received is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + if (!msg->bodyptr) { + scm_err("bodyptr is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + + if (!scm_is_scan_allowed(req->vdev)) { + scm_err("scan disabled, rejecting the scan req"); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + scan_obj = wlan_vdev_get_scan_obj(req->vdev); + if (!scan_obj) { + scm_debug("Couldn't find scan object"); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + scm_scan_req_update_params(req->vdev, req, scan_obj); + scm_print_scan_req_info(&req->scan_req); + + if (!req->scan_req.chan_list.num_chan) { + scm_info("Reject 0 channel Scan"); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = req->scan_req.scan_id; + cmd.cmd_cb = scm_scan_serialize_callback; + cmd.umac_cmd = req; + cmd.source = WLAN_UMAC_COMP_SCAN; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = req->scan_req.max_scan_time + + SCAN_TIMEOUT_GRACE_PERIOD; + cmd.vdev = req->vdev; + + if (scan_obj->disable_timeout) + cmd.cmd_timeout_duration = 0; + + qdf_mtrace(QDF_MODULE_ID_SCAN, QDF_MODULE_ID_SERIALIZATION, + WLAN_SER_CMD_SCAN, req->vdev->vdev_objmgr.vdev_id, + req->scan_req.scan_id); + + ser_cmd_status = wlan_serialization_request(&cmd); + switch (ser_cmd_status) { + case WLAN_SER_CMD_PENDING: + /* command moved to pending list.Do nothing */ + break; + case WLAN_SER_CMD_ACTIVE: + /* command moved to active list. Do nothing */ + break; + default: + scm_debug("ser cmd status %d", ser_cmd_status); + goto err; + } + + return status; +err: + /* + * notify registered scan event handlers + * about internal error + */ + scm_post_internal_scan_complete_event(req, + SCAN_REASON_INTERNAL_FAILURE); + /* + * cmd can't be serviced. + * release vdev reference and free scan_start_request memory + */ + if (req) { + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + scm_scan_free_scan_request_mem(req); + } + + return status; +} + +static inline enum wlan_serialization_cancel_type +get_serialization_cancel_type(enum scan_cancel_req_type type) +{ + enum wlan_serialization_cancel_type serialization_type; + + switch (type) { + case WLAN_SCAN_CANCEL_SINGLE: + serialization_type = WLAN_SER_CANCEL_SINGLE_SCAN; + break; + case WLAN_SCAN_CANCEL_VDEV_ALL: + serialization_type = WLAN_SER_CANCEL_VDEV_SCANS; + break; + case WLAN_SCAN_CANCEL_PDEV_ALL: + serialization_type = WLAN_SER_CANCEL_PDEV_SCANS; + break; + case WLAN_SCAN_CANCEL_HOST_VDEV_ALL: + serialization_type = WLAN_SER_CANCEL_VDEV_HOST_SCANS; + break; + default: + QDF_ASSERT(0); + scm_warn("invalid scan_cancel_req_type: %d", type); + serialization_type = WLAN_SER_CANCEL_PDEV_SCANS; + break; + } + + return serialization_type; +} + +QDF_STATUS +scm_scan_cancel_req(struct scheduler_msg *msg) +{ + struct wlan_serialization_queued_cmd_info cmd = {0,}; + struct wlan_serialization_command ser_cmd = {0,}; + enum wlan_serialization_cmd_status ser_cmd_status; + struct scan_cancel_request *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!msg) { + scm_err("msg received is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + if (!msg->bodyptr) { + scm_err("Bodyptr is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + /* + * If requester wants to wait for target scan cancel event + * instead of internally generated cancel event, just check + * which queue this scan request belongs to and send scan + * cancel request to FW accordingly. + * Else generate internal scan cancel event and notify + * handlers and free scan request resources. + */ + if (req->wait_tgt_cancel && + (req->cancel_req.req_type == WLAN_SCAN_CANCEL_SINGLE)) { + ser_cmd.cmd_type = WLAN_SER_CMD_SCAN; + ser_cmd.cmd_id = req->cancel_req.scan_id; + ser_cmd.cmd_cb = NULL; + ser_cmd.umac_cmd = NULL; + ser_cmd.source = WLAN_UMAC_COMP_SCAN; + ser_cmd.is_high_priority = false; + ser_cmd.vdev = req->vdev; + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, &ser_cmd)) + ser_cmd_status = WLAN_SER_CMD_IN_ACTIVE_LIST; + else if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, &ser_cmd)) + ser_cmd_status = WLAN_SER_CMD_IN_PENDING_LIST; + else + ser_cmd_status = WLAN_SER_CMD_NOT_FOUND; + } else { + cmd.requestor = 0; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = req->cancel_req.scan_id; + cmd.vdev = req->vdev; + cmd.queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE | + WLAN_SERIALIZATION_PENDING_QUEUE; + cmd.req_type = get_serialization_cancel_type(req->cancel_req.req_type); + + ser_cmd_status = wlan_serialization_cancel_request(&cmd); + } + + scm_debug("status: %d, reqid: %d, scanid: %d, vdevid: %d, type: %d", + ser_cmd_status, req->cancel_req.requester, + req->cancel_req.scan_id, req->cancel_req.vdev_id, + req->cancel_req.req_type); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_IN_PENDING_LIST: + /* do nothing */ + break; + case WLAN_SER_CMD_IN_ACTIVE_LIST: + case WLAN_SER_CMDS_IN_ALL_LISTS: + /* send wmi scan cancel to fw */ + status = tgt_scan_cancel(req); + break; + case WLAN_SER_CMD_NOT_FOUND: + /* do nothing */ + break; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + /* Release vdev reference and scan cancel request + * processing is complete + */ + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + /* Free cancel request memory */ + qdf_mem_free(req); + + return status; +} + +#ifdef FEATURE_WLAN_SCAN_PNO +static QDF_STATUS +scm_pno_event_handler(struct wlan_objmgr_vdev *vdev, + struct scan_event *event) +{ + struct scan_vdev_obj *scan_vdev_obj; + struct wlan_scan_obj *scan_psoc_obj; + scan_event_handler pno_cb; + void *cb_arg; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + scan_psoc_obj = wlan_vdev_get_scan_obj(vdev); + if (!scan_vdev_obj || !scan_psoc_obj) { + scm_err("null scan_vdev_obj %pK scan_obj %pK", + scan_vdev_obj, scan_psoc_obj); + return QDF_STATUS_E_INVAL; + } + + switch (event->type) { + case SCAN_EVENT_TYPE_NLO_COMPLETE: + if (!scan_vdev_obj->pno_match_evt_received) + return QDF_STATUS_SUCCESS; + qdf_wake_lock_release(&scan_psoc_obj->pno_cfg.pno_wake_lock, + WIFI_POWER_EVENT_WAKELOCK_PNO); + qdf_wake_lock_timeout_acquire( + &scan_psoc_obj->pno_cfg.pno_wake_lock, + SCAN_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT); + scan_vdev_obj->pno_match_evt_received = false; + break; + case SCAN_EVENT_TYPE_NLO_MATCH: + scan_vdev_obj->pno_match_evt_received = true; + qdf_wake_lock_timeout_acquire( + &scan_psoc_obj->pno_cfg.pno_wake_lock, + SCAN_PNO_MATCH_WAKE_LOCK_TIMEOUT); + return QDF_STATUS_SUCCESS; + default: + return QDF_STATUS_E_INVAL; + } + qdf_spin_lock_bh(&scan_psoc_obj->lock); + pno_cb = scan_psoc_obj->pno_cfg.pno_cb.func; + cb_arg = scan_psoc_obj->pno_cfg.pno_cb.arg; + qdf_spin_unlock_bh(&scan_psoc_obj->lock); + + if (pno_cb) + pno_cb(vdev, event, cb_arg); + + return QDF_STATUS_SUCCESS; +} +#else + +static QDF_STATUS +scm_pno_event_handler(struct wlan_objmgr_vdev *vdev, + struct scan_event *event) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * scm_scan_update_scan_event() - update scan event + * @scan: scan object + * @event: scan event + * @scan_start_req: scan_start_req used for triggering scan + * + * update scan params in scan event + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_scan_update_scan_event(struct wlan_scan_obj *scan, + struct scan_event *event, + struct scan_start_request *scan_start_req) +{ + if (!event) + return QDF_STATUS_E_NULL_VALUE; + + if (!scan || !scan_start_req) { + event->scan_start_req = NULL; + return QDF_STATUS_E_NULL_VALUE; + } + /* copy scan start request to pass back buffer */ + qdf_mem_copy(&scan->scan_start_request_buff, scan_start_req, + sizeof(struct scan_start_request)); + /* reset all pointers */ + scan->scan_start_request_buff.scan_req.extraie.ptr = NULL; + scan->scan_start_request_buff.scan_req.extraie.len = 0; + scan->scan_start_request_buff.scan_req.htcap.ptr = NULL; + scan->scan_start_request_buff.scan_req.htcap.len = 0; + scan->scan_start_request_buff.scan_req.vhtcap.ptr = NULL; + scan->scan_start_request_buff.scan_req.vhtcap.len = 0; + + event->scan_start_req = &scan->scan_start_request_buff; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +scm_scan_event_handler(struct scheduler_msg *msg) +{ + struct wlan_objmgr_vdev *vdev; + struct scan_event *event; + struct scan_event_info *event_info; + struct wlan_serialization_command cmd = {0,}; + struct wlan_serialization_command *queued_cmd; + struct scan_start_request *scan_start_req; + struct wlan_scan_obj *scan; + + if (!msg) { + scm_err("NULL msg received "); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + if (!msg->bodyptr) { + scm_err("NULL scan event received"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + event_info = msg->bodyptr; + vdev = event_info->vdev; + event = &(event_info->event); + + scm_debug("vdevid:%d, type:%d, reason:%d, freq:%d, reqstr:%d, scanid:%d", + event->vdev_id, event->type, event->reason, event->chan_freq, + event->requester, event->scan_id); + /* + * NLO requests are never queued, so post NLO events + * without checking for their presence in active queue. + */ + switch (event->type) { + case SCAN_EVENT_TYPE_NLO_COMPLETE: + case SCAN_EVENT_TYPE_NLO_MATCH: + scm_pno_event_handler(vdev, event); + goto exit; + default: + break; + } + + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = event->scan_id; + cmd.cmd_cb = NULL; + cmd.umac_cmd = NULL; + cmd.source = WLAN_UMAC_COMP_SCAN; + cmd.is_high_priority = false; + cmd.vdev = vdev; + if (!wlan_serialization_is_cmd_present_in_active_queue(NULL, &cmd)) { + /* + * We received scan event for an already completed/cancelled + * scan request. Drop this event. + */ + scm_debug("Received scan event while request not in active queue"); + goto exit; + } + + /* Fill scan_start_request used to trigger this scan */ + queued_cmd = wlan_serialization_get_scan_cmd_using_scan_id( + wlan_vdev_get_psoc(vdev), wlan_vdev_get_id(vdev), + event->scan_id, true); + + if (!queued_cmd) { + scm_err("NULL queued_cmd"); + goto exit; + } + if (!queued_cmd->umac_cmd) { + scm_err("NULL umac_cmd"); + goto exit; + } + scan_start_req = queued_cmd->umac_cmd; + + if (scan_start_req->scan_req.scan_req_id != event->requester) { + scm_err("req ID mismatch, scan_req_id:%d, event_req_id:%d", + scan_start_req->scan_req.scan_req_id, event->requester); + goto exit; + } + + scan = wlan_vdev_get_scan_obj(vdev); + if (scan) + scm_scan_update_scan_event(scan, event, scan_start_req); + + switch (event->type) { + case SCAN_EVENT_TYPE_COMPLETED: + if (event->reason == SCAN_REASON_COMPLETED) + scm_11d_decide_country_code(vdev); + /* fall through to release the command */ + case SCAN_EVENT_TYPE_START_FAILED: + case SCAN_EVENT_TYPE_DEQUEUED: + scm_release_serialization_command(vdev, event->scan_id); + break; + default: + break; + } + + /* Notify all interested parties */ + scm_scan_post_event(vdev, event); + +exit: + /* free event info memory */ + qdf_mem_free(event_info); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SCAN_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_scan_event_flush_callback(struct scheduler_msg *msg) +{ + struct wlan_objmgr_vdev *vdev; + struct scan_event_info *event_info; + struct scan_event *event; + + if (!msg || !msg->bodyptr) { + scm_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + event_info = msg->bodyptr; + vdev = event_info->vdev; + event = &event_info->event; + + scm_debug("Flush scan event vdev %d type %d reason %d freq: %d req %d scanid %d", + event->vdev_id, event->type, event->reason, event->chan_freq, + event->requester, event->scan_id); + + /* free event info memory */ + qdf_mem_free(event_info); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SCAN_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_bcn_probe_flush_callback(struct scheduler_msg *msg) +{ + struct scan_bcn_probe_event *bcn; + + bcn = msg->bodyptr; + + if (!bcn) { + scm_err("bcn is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + if (bcn->psoc) + wlan_objmgr_psoc_release_ref(bcn->psoc, WLAN_SCAN_ID); + if (bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn->buf) + qdf_nbuf_free(bcn->buf); + qdf_mem_free(bcn); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_scan_start_flush_callback(struct scheduler_msg *msg) +{ + struct scan_start_request *req; + + if (!msg || !msg->bodyptr) { + scm_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + scm_post_internal_scan_complete_event(req, SCAN_REASON_CANCELLED); + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + scm_scan_free_scan_request_mem(req); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_scan_cancel_flush_callback(struct scheduler_msg *msg) +{ + struct scan_cancel_request *req; + + if (!msg || !msg->bodyptr) { + scm_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + /* Free cancel request memory */ + qdf_mem_free(req); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..a7600d1b9be8a91c73c36a6718569c4f34242ca3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: Defines internal scan manager api + * Core routines which deal with starting a scan, + * serializing scan requests, scan cancellation, scan completion, + * scan event processing. + */ + +#ifndef _WLAN_SCAN_MANAGER_API_H_ +#define _WLAN_SCAN_MANAGER_API_H_ + +#include "wlan_scan_main.h" + +/* + * Maximum numbers of callback functions that may be invoked + * for a particular scan event. + */ +#define MAX_SCAN_EVENT_LISTENERS (MAX_SCAN_EVENT_HANDLERS_PER_PDEV + 1) + +/** + * struct scan_event_listners - listeners interested in a particular scan event + * @count: number of listners + * @cb: callback handler + */ +struct scan_event_listeners { + uint32_t count; + struct cb_handler cb[MAX_SCAN_EVENT_LISTENERS]; +}; + +/** + * scm_is_scan_allowed() - check if scan is allowed + * @vdev: vdev for which scan allowed is check + * + * Return: true if scan is allowed else false + */ +bool scm_is_scan_allowed(struct wlan_objmgr_vdev *vdev); + +/** + * scm_scan_start_req() - scan start req core api + * @msg: scheduler message object containing start scan req params + * @req: start scan req params + * + * The API to start a scan + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_start_req(struct scheduler_msg *msg); + +/** + * scm_scan_cancel_req() - scan cancel req core api + * @msg: scheduler message object containing stop scan params + * @req: stop scan params + * + * The API to cancel a scan + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_cancel_req(struct scheduler_msg *msg); + + +/** + * scm_scan_event_handler() - core scan event handler from tgt interface + * @msg: scheduler message object containing scan event + * + * This function calls registered event handlers of various modules + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_event_handler(struct scheduler_msg *msg); + +/** + * scm_scan_free_scan_request_mem() - Free scan request memory + * @req: scan_start_request object + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_free_scan_request_mem(struct scan_start_request *req); + +/** + * scm_scan_event_flush_callback() - flush scan event + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_event_flush_callback(struct scheduler_msg *msg); + +/** + * scm_bcn_probe_flush_callback() - flush beacon/probe response + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_bcn_probe_flush_callback(struct scheduler_msg *msg); + +/** + * scm_scan_start_flush_callback() - flush scan start request + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_start_flush_callback(struct scheduler_msg *msg); + +/** + * scm_scan_cancel_flush_callback() - flush scan cancel request + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_cancel_flush_callback(struct scheduler_msg *msg); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5d69e1acff2e90e6c22b03d974454de8ba4c6e2d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_api.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains APIs of EXTSCAN component + */ + +#ifndef _WLAN_EXTSCAN_API_H_ +#define _WLAN_EXTSCAN_API_H_ + +#include +#include +#include +#include "../../core/src/wlan_scan_main.h" + +#ifdef FEATURE_WLAN_EXTSCAN + +/** + * extscan_get_enable() - API to get extscan enable value + * @psoc: pointer to psoc object + * + * Return: true if enabled else false. + */ +bool extscan_get_enable(struct wlan_objmgr_psoc *psoc); + +/** + * extscan_get_passive_max_time() - API to get passive + * max channel time + * @psoc: pointer to psoc object + * @passive_max_chn_time: extscan passive max channel time + * + * Return: none + */ +void extscan_get_passive_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *passive_max_chn_time); + +/** + * extscan_get_active_max_time() - API to get active + * max channel time + * @psoc: pointer to psoc object + * @active_max_chn_time: extscan active max channel time + * + * Return: none + */ +void extscan_get_active_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_max_chn_time); + +/** + * extscan_get_active_min_time() - API to set active + * min channel time + * @psoc: pointer to psoc object + * @active_min_chn_time: extscan active min channel time + * + * Return: none + */ +void extscan_get_active_min_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_min_chn_time); + +/** + * wlan_extscan_global_init() - Initialize extscan + * @psoc: pointer to psoc object + * @scan_obj: pointer to scan object + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_extscan_global_init(struct wlan_objmgr_psoc *psoc, + struct wlan_scan_obj *scan_obj); + +/** + * wlan_extscan_global_deinit() - Deinitialize extscan + * @psoc: pointer to psoc object + * @scan_obj: pointer to scan object + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_extscan_global_deinit(void); + +#else + +static inline +bool cfg_extscan_get_enable(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static inline +void cfg_extscan_get_passive_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *passive_max_chn_time) +{ +} + +static inline +void cfg_extscan_get_active_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_max_chn_time) +{ +} + +static inline +void cfg_extscan_get_active_min_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_min_chn_time) +{ +} + +static inline QDF_STATUS +wlan_extscan_global_init(struct wlan_objmgr_psoc *psoc, + struct wlan_scan_obj *scan_obj) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wlan_extscan_global_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_cfg.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..c10dd959548e45b969b6db71c2be308d9300cd66 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_cfg.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized definitions of EXTSCAN component + */ +#ifndef _WLAN_EXTSCAN_CONFIG_H_ +#define _WLAN_EXTSCAN_CONFIG_H_ + +#include "cfg_define.h" + +#ifdef FEATURE_WLAN_EXTSCAN + +/* + * + * gExtScanPassiveMaxChannelTime - Set max channel time for external + * passive scan + * @Min: 0 + * @Max: 500 + * @Default: 110 + * + * This ini is used to set maximum channel time in secs spent in + * external passive scan + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_EXTSCAN_PASSIVE_MAX_CHANNEL_TIME CFG_INI_UINT(\ + "gExtScanPassiveMaxChannelTime",\ + 0, 500, 110, CFG_VALUE_OR_DEFAULT,\ + "ext scan passive max channel time") + +/* + * + * gExtScanPassiveMinChannelTime - Set min channel time for external + * passive scan + * @Min: 0 + * @Max: 500 + * @Default: 60 + * + * This ini is used to set minimum channel time in secs spent in + * external passive scan + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_EXTSCAN_PASSIVE_MIN_CHANNEL_TIME CFG_INI_UINT(\ + "gExtScanPassiveMinChannelTime",\ + 0, 500, 60, CFG_VALUE_OR_DEFAULT,\ + "ext scan passive min channel time") + +/* + * + * gExtScanActiveMaxChannelTime - Set min channel time for external + * active scan + * @Min: 0 + * @Max: 110 + * @Default: 40 + * + * This ini is used to set maximum channel time in secs spent in + * external active scan + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ + +#define CFG_EXTSCAN_ACTIVE_MAX_CHANNEL_TIME CFG_INI_UINT(\ + "gExtScanActiveMaxChannelTime",\ + 0, 110, 40, CFG_VALUE_OR_DEFAULT,\ + "ext scan active max channel time") + +/* + * + * gExtScanActiveMinChannelTime - Set min channel time for external + * active scan + * @Min: 0 + * @Max: 110 + * @Default: 20 + * + * This ini is used to set minimum channel time in secs spent in + * external active scan + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_EXTSCAN_ACTIVE_MIN_CHANNEL_TIME CFG_INI_UINT(\ + "gExtScanActiveMinChannelTime",\ + 0, 110, 20, CFG_VALUE_OR_DEFAULT,\ + "ext scan active min channel time") + +/* + * + * gExtScanEnable - Enable external scan + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to control enabling of external scan + * feature. + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_EXTSCAN_ALLOWED CFG_INI_BOOL(\ + "gExtScanEnable",\ + 1,\ + "ext scan enable") + +#define CFG_EXTSCAN_ALL \ + CFG(CFG_EXTSCAN_PASSIVE_MAX_CHANNEL_TIME) \ + CFG(CFG_EXTSCAN_PASSIVE_MIN_CHANNEL_TIME) \ + CFG(CFG_EXTSCAN_ACTIVE_MAX_CHANNEL_TIME) \ + CFG(CFG_EXTSCAN_ACTIVE_MIN_CHANNEL_TIME) \ + CFG(CFG_EXTSCAN_ALLOWED) + +#else +#define CFG_EXTSCAN_ALL +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..dae25f32d657140a35ba4a34b878f64dc704b0ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_extscan_ucfg_api.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains UCFG APIs of EXTSCAN component + */ + +#ifndef _WLAN_EXTSCAN_UCFG_API_H_ +#define _WLAN_EXTSCAN_UCFG_API_H_ + +#include "wlan_extscan_api.h" +#include +#include +#include + +#ifdef FEATURE_WLAN_EXTSCAN + +/** + * ucfg_extscan_get_enable() - API to get extscan enable value + * @psoc: pointer to psoc object + * + * Return: true if enabled else false. + */ +static inline +bool ucfg_extscan_get_enable(struct wlan_objmgr_psoc *psoc) +{ + return extscan_get_enable(psoc); +} + +/** + * ucfg_extscan_get_passive_max_time() - API to get passive + * max channel time + * @psoc: pointer to psoc object + * @passive_max_chn_time: extscan passive max channel time + * + * Return: none + */ +static inline +void ucfg_extscan_get_passive_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *passive_max_chn_time) +{ + extscan_get_passive_max_time(psoc, + passive_max_chn_time); +} + +/** + * ucfg_extscan_get_active_max_time() - API to get active + * max channel time + * @psoc: pointer to psoc object + * @active_max_chn_time: extscan active max channel time + * + * Return: none + */ +static inline +void ucfg_extscan_get_active_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_max_chn_time) +{ + extscan_get_active_max_time(psoc, + active_max_chn_time); +} + +/** + * ucfg_extscan_get_active_min_time() - API to set active + * min channel time + * @psoc: pointer to psoc object + * @active_min_chn_time: extscan active min channel time + * + * Return: none + */ +static inline +void ucfg_extscan_get_active_min_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_min_chn_time) +{ + extscan_get_active_min_time(psoc, + active_min_chn_time); +} + +#else + +static inline +bool ucfg_extscan_get_enable(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2f364debe68006caa6d7554d3afca5f13f6d2f9c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_api.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan api + */ + +#ifndef _WLAN_SCAN_API_H_ +#define _WLAN_SCAN_API_H_ + +#include +#include +#include +#include "../../core/src/wlan_scan_main.h" + +/** + * wlan_scan_cfg_set_active_2g_dwelltime() - API to set scan active 2g dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwell time + * + * Return: none + */ +void wlan_scan_cfg_set_active_2g_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time); + +/** + * wlan_scan_cfg_get_active_2g_dwelltime() - API to get active 2g dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan active dwell time + */ +void wlan_scan_cfg_get_active_2g_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time); + +/** + * wlan_scan_cfg_set_active_dwelltime() - API to set scan active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwell time + * + * Return: none + */ +void wlan_scan_cfg_set_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time); +/** + * wlan_scan_cfg_get_active_dwelltime() - API to get active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan active dwell time + */ +void wlan_scan_cfg_get_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time); + +/** + * wlan_scan_cfg_set_passive_dwelltime() - API to set scan passive dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwell time + * + * Return: none + */ +void wlan_scan_cfg_set_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time); +/** + * wlan_scan_cfg_get_passive_dwelltime() - API to get passive dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwelltime + * + * Return: scan passive dwell time + */ +void wlan_scan_cfg_get_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time); + +/** + * wlan_scan_cfg_get_conc_active_dwelltime() - Get concurrent active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan concurrent active dwell time + */ +void wlan_scan_cfg_get_conc_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time); + +/** + * wlan_scan_cfg_set_conc_active_dwelltime() - Set concurrent active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan concurrent active dwell time + */ +void wlan_scan_cfg_set_conc_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time); + +/** + * wlan_scan_cfg_get_conc_passive_dwelltime() - Get passive concurrent dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwelltime + * + * Return: scan concurrent passive dwell time + */ +void wlan_scan_cfg_get_conc_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time); + +/** + * wlan_scan_cfg_set_conc_passive_dwelltime() - Set passive concurrent dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwelltime + * + * Return: scan concurrent passive dwell time + */ +void wlan_scan_cfg_set_conc_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time); + +/** + * wlan_scan_cfg_honour_nl_scan_policy_flags() - API to get nl scan policy + * flags honoured + * @psoc: pointer to psoc object + * + * Return: nl scan policy flags honoured or not + */ +bool wlan_scan_cfg_honour_nl_scan_policy_flags(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_scan_cfg_get_conc_max_resttime() - API to get max rest time + * @psoc: pointer to psoc object + * @rest_time: scan concurrent max resttime + * + * Return: scan concurrent max rest time + */ +void wlan_scan_cfg_get_conc_max_resttime(struct wlan_objmgr_psoc *psoc, + uint32_t *rest_time); + +/** + * wlan_scan_cfg_get_dfs_chan_scan_allowed() - API to get dfs scan enabled + * @psoc: pointer to psoc object + * @enable_dfs_scan: DFS scan enabled or not. + * + * Return: None + */ +void wlan_scan_cfg_get_dfs_chan_scan_allowed(struct wlan_objmgr_psoc *psoc, + bool *enable_dfs_scan); + +/** + * wlan_scan_cfg_set_dfs_chan_scan_allowed() - API to set dfs scan enabled. + * @psoc: pointer to psoc object + * @enable_dfs_scan: Set dfs scan enabled or not. + * + * Return: None + */ +void wlan_scan_cfg_set_dfs_chan_scan_allowed(struct wlan_objmgr_psoc *psoc, + bool enable_dfs_scan); + +/** + * wlan_scan_cfg_get_conc_min_resttime() - API to get concurrent min rest time + * @psoc: pointer to psoc object + * @rest_time: scan concurrent min rest time + * + * Return: scan concurrent min rest time + */ +void wlan_scan_cfg_get_conc_min_resttime(struct wlan_objmgr_psoc *psoc, + uint32_t *rest_time); + +/** + * wlan_scan_is_snr_monitor_enabled() - API to get SNR monitoring enabled or not + * @psoc: pointer to psoc object + * + * Return: enable/disable snr monitor mode. + */ +bool wlan_scan_is_snr_monitor_enabled(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_scan_process_bcn_probe_rx_sync() - handle bcn without posting to + * scheduler thread + * @psoc: psoc context + * @buf: frame buf + * @params: rx event params + * @frm_type: frame type + * + * handle bcn without posting to scheduler thread, this should be called + * while caller is already in scheduler thread context + * + * Return: success or error code. + */ +QDF_STATUS +wlan_scan_process_bcn_probe_rx_sync(struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *rx_param, + enum mgmt_frame_type frm_type); + +/** + * wlan_scan_get_aging_time - Get the scan aging time config + * @psoc: psoc context + * + * Return: Scan aging time config + */ +qdf_time_t wlan_scan_get_aging_time(struct wlan_objmgr_psoc *psoc); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_cfg.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..175a3beccbb1121496cc9c6abfe9501641fc7eca --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_cfg.h @@ -0,0 +1,1338 @@ +/* + * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized definitions of SCAN component + */ +#ifndef __CONFIG_SCAN_H +#define __CONFIG_SCAN_H + +#include "cfg_define.h" + +/** + * enum scan_mode_6ghz - scan mode for 6GHz + * @SCAN_MODE_6G_NO_CHANNEL: Remove 6GHz channels in the scan request + * @SCAN_MODE_6G_PSC_CHANNEL: Allow/Add 6Ghz PSC channels to scan request + * @SCAN_MODE_6G_ALL_CHANNEL: Allow all the 6Ghz channels + */ +enum scan_mode_6ghz { + SCAN_MODE_6G_NO_CHANNEL, + SCAN_MODE_6G_PSC_CHANNEL, + SCAN_MODE_6G_ALL_CHANNEL, + SCAN_MODE_6G_MAX = SCAN_MODE_6G_ALL_CHANNEL, +}; + +/* + * + * drop_bcn_on_chan_mismatch - drop the beacon for chan mismatch + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to decide drop the beacon or not if channel received + * in metadata doesn't match the one in beacon. + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_DROP_BCN_ON_CHANNEL_MISMATCH CFG_INI_BOOL(\ + "drop_bcn_on_chan_mismatch",\ + true,\ + "drop bcn on channel mismatch") + +/* + * + * drop_bcn_on_invalid_freq - drop the beacon or probe resp with invalid freq + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to decide whether to drop the beacon/probe resp or not + * if channel received in DS param, HT info and HE IE is invalid. + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_DROP_BCN_ON_INVALID_FREQ CFG_INI_BOOL(\ + "drop_bcn_on_invalid_freq",\ + true,\ + "drop bcn on invalid freq in HT, DS, HE IE") + +/* + * + * gActiveMaxChannelTime - Set max channel time for active scan + * @Min: 0 + * @Max: 10000 + * @Default: 40 + * + * This ini is used to set maximum channel time in msecs spent in + * active scan + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_ACTIVE_MAX_CHANNEL_TIME CFG_INI_UINT(\ + "gActiveMaxChannelTime",\ + 0, 10000, PLATFORM_VALUE(40, 105),\ + CFG_VALUE_OR_DEFAULT, "active dwell time") + +/* + * + * gEnableDFSChnlScan - Enable/Disable scan on DFS channels + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to enable/disable scan on DFS channels. + * + * Related: Scan + * + * Usage: External + * + * + */ +#define CFG_ENABLE_DFS_SCAN CFG_INI_BOOL( \ + "gEnableDFSChnlScan", \ + true, \ + "enable dfs scan") + +/* + * + * gInitialScanNoDFSChnl - Exclude DFS channels in first scan + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to enable/disable scan on DFS channels, in first scan only + * + * Related: Scan + * + * Usage: External + * + * + */ +#define CFG_INITIAL_NO_DFS_SCAN CFG_INI_BOOL( \ + "gInitialScanNoDFSChnl", \ + false, \ + "disable initial dfs scan") + +/* + * + * active_max_channel_time_2g - Set max time for active 2G channel scan + * @Min: 0 + * @Max: 10000 + * @Default: 80 + * + * This ini is used to set maximum time in msecs spent in active 2G channel scan + * if it's not zero, in case of zero, CFG_ACTIVE_MAX_CHANNEL_TIME is used for 2G + * channels also. + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_ACTIVE_MAX_2G_CHANNEL_TIME CFG_INI_UINT(\ + "active_max_channel_time_2g",\ + 0, 10000, PLATFORM_VALUE(80, 0),\ + CFG_VALUE_OR_DEFAULT, "active dwell time for 2G channels") + +/* + * + * active_max_channel_time_6g - Set max time for active 6G channel scan + * @Min: 0 + * @Max: 10000 + * @Default: 40 + * + * This ini is used to set maximum time in msecs spent in active 6G channel scan + * + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_ACTIVE_MAX_6G_CHANNEL_TIME CFG_INI_UINT(\ + "active_max_channel_time_6g",\ + 0, 10000, 40,\ + CFG_VALUE_OR_DEFAULT, "active dwell time for 6G channels") + +/* + * + * passive_max_channel_time_6g - Set max time for passive 6G channel scan + * @Min: 0 + * @Max: 10000 + * @Default: 30 + * + * This ini is used to set maximum time in msecs spent in passive 6G chan scan + * + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_PASSIVE_MAX_6G_CHANNEL_TIME CFG_INI_UINT(\ + "passive_max_channel_time_6g",\ + 0, 10000, 30,\ + CFG_VALUE_OR_DEFAULT, "passive dwell time for 6G channels") + +/* + * + * gPassiveMaxChannelTime - Set max channel time for passive scan + * @Min: 0 + * @Max: 10000 + * @Default: 110 + * + * This ini is used to set maximum channel time in msecs spent in + * passive scan + * + * Related: None + * + * Usage: External + * + * + */ +#define CFG_PASSIVE_MAX_CHANNEL_TIME CFG_INI_UINT(\ + "gPassiveMaxChannelTime",\ + 0, 10000, PLATFORM_VALUE(110, 300),\ + CFG_VALUE_OR_DEFAULT, "passive dwell time") + +/* + * + * gScanNumProbes - Set the number of probes on each channel for active scan + * @Min: 0 + * @Max: 20 + * @Default: 0 + * + * This ini is used to set number of probes on each channel for + * active scan + */ +#define CFG_SCAN_NUM_PROBES CFG_INI_UINT(\ + "gScanNumProbes",\ + 0, 20, PLATFORM_VALUE(0, 2),\ + CFG_VALUE_OR_DEFAULT,\ + "number of probes on each channel") + +/* + * + * gScanProbeRepeatTime - Set the probe repeat time on each channel + * @Min: 0 + * @Max: 30 + * @Default: 0 + * + * This ini is used to set probe repeat time on each channel for + * active scan + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_SCAN_PROBE_REPEAT_TIME CFG_INI_UINT(\ + "gScanProbeRepeatTime",\ + 0, 50, PLATFORM_VALUE(20, 50),\ + CFG_VALUE_OR_DEFAULT,\ + "probe repeat time on each channel") + +/* + * + * hostscan_adaptive_dwell_mode - Enable adaptive dwell mode + * during host scan with conneciton + * @Min: 0 + * @Max: 4 + * @Default: 2 + * + * This ini will set the algo used in dwell time optimization + * during host scan with connection. + * See enum wmi_dwelltime_adaptive_mode. + * Acceptable values for this: + * 0: Default (Use firmware default mode) + * 1: Conservative optimization + * 2: Moderate optimization + * 3: Aggressive optimization + * 4: Static + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_ADAPTIVE_SCAN_DWELL_MODE CFG_INI_UINT(\ + "hostscan_adaptive_dwell_mode",\ + 0, 4, PLATFORM_VALUE(2, 0),\ + CFG_VALUE_OR_DEFAULT,\ + "Enable adaptive dwell mode") + +/* + * + * hostscan_adaptive_dwell_mode_no_conn - Enable adaptive dwell mode + * during host scan without conneciton + * @Min: 0 + * @Max: 4 + * @Default: 4 + * + * This ini will set the algo used in dwell time optimization + * during host scan with connection. + * See enum wmi_dwelltime_adaptive_mode. + * Acceptable values for this: + * 0: Default (Use firmware default mode) + * 1: Conservative optimization + * 2: Moderate optimization + * 3: Aggressive optimization + * 4: Static + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_ADAPTIVE_SCAN_DWELL_MODE_NC CFG_INI_UINT(\ + "hostscan_adaptive_dwell_mode_no_conn",\ + 0, 4, PLATFORM_VALUE(4, 0),\ + CFG_VALUE_OR_DEFAULT,\ + "Enable adaptive dwell mode without connection") + +/* + * + * honour_nl_scan_policy_flags - This ini will decide whether to honour + * NL80211 scan policy flags + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This parameter will decide whether to honour scan flags such as + * NL80211_SCAN_FLAG_HIGH_ACCURACY , NL80211_SCAN_FLAG_LOW_SPAN, + * NL80211_SCAN_FLAG_LOW_POWER. + * Acceptable values for this: + * 0: Config is disabled + * 1: Config is enabled + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: Internal + * + * + */ +#define CFG_HONOUR_NL_SCAN_POLICY_FLAGS CFG_INI_BOOL(\ + "honour_nl_scan_policy_flags",\ + true, \ + "honour NL80211 scan policy flags") + +/* + * + * is_bssid_hint_priority - Set priority for connection with bssid_hint + * BSSID. + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to give priority to BSS for connection which comes + * as part of bssid_hint + * + * Related: None + * + * Supported Feature: STA + * + * Usage: External + * + * + */ +#define CFG_IS_BSSID_HINT_PRIORITY CFG_INI_UINT(\ + "is_bssid_hint_priority",\ + 0, 1, 0,\ + CFG_VALUE_OR_DEFAULT, \ + "Set priority for connection with bssid_hint") + +#ifdef FEATURE_WLAN_SCAN_PNO +/* + * + * g_user_config_sched_scan_plan - set user config sched scan plans. + * @Min: 0 + * @Max:1 + * @Default: 1 + * + * This ini is used to decide if user config number of sched scan plan needs to + * be configured or only one sched scan plan needs to be configured. + * If this ini is enabled then user config number of sched scan plans will be + * configured else only one sched scan plan will be configured. + * + * Supported Feature: PNO scan + * + * Usage: External + * + * + */ + +#define CFG_USER_CONFIG_SCHED_SCAN_PLAN CFG_INI_BOOL(\ + "g_user_config_sched_scan_plan",\ + true, \ + "set user config sched scan plans") + +/* + * + * g_max_sched_scan_plan_iterations - pno sched max scan plan iterations. + * @Min: 1 + * @Max: 100 + * @Default: 10 + * + * This ini is used to set max sched scan plan iterations for pno scan + * (value in seconds). + * + * Related: gPNOScanSupport + * + * Supported Feature: PNO scan + * + * Usage: External + * + * + */ +#define CFG_MAX_SCHED_SCAN_PLAN_ITERATIONS CFG_INI_UINT( \ + "g_max_sched_scan_plan_iterations", \ + 1, 100, 10, \ + CFG_VALUE_OR_DEFAULT, \ + "Max sched scan plan iterations") + +/* + * + * g_max_sched_scan_plan_int - pno sched max scan plan interval. + * @Min: 1 + * @Max: 7200 + * @Default: 3600 + * + * This ini is used to set max sched scan plan interval for pno scan + * (value in seconds). + * + * Related: gPNOScanSupport + * + * Supported Feature: PNO scan + * + * Usage: External + * + * + */ +#define CFG_MAX_SCHED_SCAN_PLAN_INTERVAL CFG_INI_UINT( \ + "g_max_sched_scan_plan_int", \ + 1, 7200, 3600, \ + CFG_VALUE_OR_DEFAULT, \ + "Max sched scan plan interval") + +/* + * + * gEnableDFSPnoChnlScan - enable dfs channels in PNO scan + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to enable/disable dfs channels in PNO scan request, + * enabling this ini enables driver to include dfs channels in its + * PNO scan request + * Related: NA + * + * Supported Feature: DFS, PNO + * + * Usage: Internal/External + * + * + */ +#define CFG_ENABLE_DFS_PNO_CHNL_SCAN CFG_INI_BOOL( \ + "gEnableDFSPnoChnlScan", \ + true, \ + "Enable dfs channels in PNO Scan") + +/* + * + * gPNOScanSupport - Enable or Disable PNO scan + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to Enable or Disable PNO scan + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_PNO_SCAN_SUPPORT CFG_INI_BOOL( \ + "gPNOScanSupport", \ + true, \ + "Enable/Disable PNO scan") + +/* + * + * gPNOScanTimerRepeatValue - Set PNO scan timer repeat value + * @Min: 0 + * @Max: 0xffffffff + * @Default: 30 + * + * This ini is used by firmware to set fast scan max cycles + * equal to gPNOScanTimerRepeatValue. Taking power consumption + * into account firmware after gPNOScanTimerRepeatValue times + * fast_scan_period switches to slow_scan_period. + * + * Usage: External + * + * + */ +#define CFG_PNO_SCAN_TIMER_REPEAT_VALUE CFG_INI_UINT( \ + "gPNOScanTimerRepeatValue", \ + 0, \ + 0xffffffff, \ + 30, \ + CFG_VALUE_OR_DEFAULT, \ + "PNO scan timer repeat value") + +/* + * + * gPNOSlowScanMultiplier - Set PNO slow scan multiplier + * @Min: 0 + * @Max: 30 + * @Default: 6 + * + * This ini is used by firmware to set slow scan period + * as gPNOSlowScanMultiplier times fast_scan_period. + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_PNO_SLOW_SCAN_MULTIPLIER CFG_INI_UINT( \ + "gPNOSlowScanMultiplier", \ + 0, \ + 30, \ + 6, \ + CFG_VALUE_OR_DEFAULT, \ + "PNO slow scan multiplier") + +/* + * + * gPNOChannelPrediction - Enable/disable the PNO channel + * prediction feature. + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * In current PNO implementation, scan is always done until all configured + * channels are scanned. If we can determine DUT is stationary based on + * scanning a subset of channels, we may cancel the remaining channels. + * Hence, we can save additional power consumption. + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_PNO_CHANNEL_PREDICTION CFG_INI_BOOL( \ + "gPNOChannelPrediction", \ + false, \ + "enable/disable PNO channel prediction feature") + +/* + * + * gTopKNumOfChannels - top K number of channels are used for tanimoto distance + * @Min: 1 + * @Max: 5 + * @Default: 3 + * + * These are the top channels on which the probability of finding the AP's is + * extremely high. This number is intended for tweaking the internal algorithm + * for experiments. This should not be changed externally. + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_TOP_K_NUM_OF_CHANNELS CFG_INI_UINT( \ + "gTopKNumOfChannels", \ + 1, \ + 5, \ + 3, \ + CFG_VALUE_OR_DEFAULT, \ + "Top K number of channels") +/* + * + * gStationaryThreshold - STA threshold value to determine if it is stationary + * @Min: 0 + * @Max: 100 + * @Default: 10 + * + * This is the threshold value to determine that the STA is + * stationary. If the tanimoto distance is less than this + * value, then the device is considered to be stationary. + * This parameter is intended to tweak the internal algorithm + * for experiments. This should not be changed externally. + * + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_STATIONARY_THRESHOLD CFG_INI_UINT( \ + "gStationaryThreshold", \ + 0, \ + 100, \ + 10, \ + CFG_VALUE_OR_DEFAULT, \ + "Threshold to determine if sta is stationary") + +/* + * + * gChPredictionFullScanMs - Set periodic timer for channel prediction + * @Min: 3000 + * @Max: 0x7fffffff + * @Default: 60000 + * + * This ini is used to set the periodic timer upon which + * a full scan needs to be triggered when PNO channel + * prediction feature is enabled. This parameter is intended + * to tweak the internal algortihm for experiments. + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: Internal + * + * + */ +#define CFG_CHANNEL_PREDICTION_SCAN_TIMER CFG_INI_UINT( \ + "gChPredictionFullScanMs", \ + 3000, \ + 0x7fffffff, \ + 60000, \ + CFG_VALUE_OR_DEFAULT, \ + "Timer value for channel prediction") + +/* + * + * pnoscan_adaptive_dwell_mode - Enable adaptive dwell mode + * during pno scan + * @Min: 0 + * @Max: 4 + * @Default: 1 + * + * This ini will set the algo used in dwell time optimization + * during pno scan. see enum scan_dwelltime_adaptive_mode. + * Acceptable values for this: + * 0: Default (Use firmware default mode) + * 1: Conservative optimization + * 2: Moderate optimization + * 3: Aggressive optimization + * 4: Static + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_ADAPTIVE_PNOSCAN_DWELL_MODE CFG_INI_UINT( \ + "pnoscan_adaptive_dwell_mode", \ + 0, \ + 4, \ + 1, \ + CFG_VALUE_OR_DEFAULT, \ + "Algorithm used in dwell time optimization") + +/* + * + * gScanBackoffMultiplier - For NLO/PNO, multiply fast scan period by this every + * max cycles + * @Min: 0 + * @Max: 255 + * @Default: 0 + * + * For Network Listen Offload and Perfered Network Offload, multiply the fast + * scan period by this value after max cycles have occurred. Setting this to 0 + * disables the feature. + * + * @E.g. + * # Disable scan backoff multiplier + * gScanBackoffMultiplier=0 + * # Effectively the same + * gScanBackoffMultiplier=1 + * # Double the scan period after each max cycles have occurred + * gScanBackoffMultiplier=2 + * + * Related: NLO, PNO + * + * Usage: Internal/External + * + * + */ +#define CFG_SCAN_BACKOFF_MULTIPLIER CFG_INI_UINT( \ + "gScanBackoffMultiplier", \ + 0, \ + 255, \ + 0, \ + CFG_VALUE_OR_DEFAULT, \ + "Scan backoff multiplier") + +/* + * + * mawc_nlo_enabled - For NLO/PNO, enable MAWC based scan + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * Enable/Disable the Motion Aided Wireless Connectivity + * based NLO using this parameter + * + * Related: NLO, PNO + * + * Usage: Internal/External + * + * + */ +#define CFG_MAWC_NLO_ENABLED CFG_INI_BOOL( \ + "mawc_nlo_enabled", \ + 0, \ + "Enable MAWC based scan") + +/* + * + * mawc_nlo_exp_backoff_ratio - Exponential back off ratio + * @Min: 0 + * @Max: 300 + * @Default: 3 + * + * Configure the exponential back off ratio using this + * parameter for MAWC based NLO + * ratio of exponential backoff, next = current + current*ratio/100 + * + * Related: NLO, PNO + * + * Usage: Internal/External + * + * + */ +#define CFG_MAWC_NLO_EXP_BACKOFF_RATIO CFG_INI_UINT( \ + "mawc_nlo_exp_backoff_ratio", \ + 0, \ + 300, \ + 3, \ + CFG_VALUE_OR_DEFAULT, \ + "MWAC based NLO exponential ratio") + +/* + * + * mawc_nlo_init_scan_interval - Initial Scan Interval + * @Min: 1000 + * @Max: 0xFFFFFFFF + * @Default: 10000 + * + * Configure the initial scan interval using this + * parameter for MAWC based NLO (Units in Milliseconds) + * + * Related: NLO, PNO + * + * Usage: Internal/External + * + * + */ +#define CFG_MAWC_NLO_INIT_SCAN_INTERVAL CFG_INI_UINT( \ + "mawc_nlo_init_scan_interval", \ + 1000, \ + 0xFFFFFFFF, \ + 10000, \ + CFG_VALUE_OR_DEFAULT, \ + "Initial Scan Interval") + +/* + * + * mawc_nlo_max_scan_interval - Maximum Scan Interval + * @Min: 1000 + * @Max: 0xFFFFFFFF + * @Default: 60000 + * + * Configure the maximum scan interval using this + * parameter for MAWC based NLO (Units in Milliseconds) + * + * Related: NLO, PNO + * + * Usage: Internal/External + * + * + */ +#define CFG_MAWC_NLO_MAX_SCAN_INTERVAL CFG_INI_UINT( \ + "mawc_nlo_max_scan_interval", \ + 1000, \ + 0xFFFFFFFF, \ + 60000, \ + CFG_VALUE_OR_DEFAULT, \ + "Maximum Scan Interval") + +#define CFG_SCAN_PNO \ + CFG(CFG_MAX_SCHED_SCAN_PLAN_ITERATIONS) \ + CFG(CFG_MAX_SCHED_SCAN_PLAN_INTERVAL) \ + CFG(CFG_PNO_SCAN_SUPPORT) \ + CFG(CFG_ENABLE_DFS_PNO_CHNL_SCAN) \ + CFG(CFG_PNO_SCAN_TIMER_REPEAT_VALUE) \ + CFG(CFG_PNO_SLOW_SCAN_MULTIPLIER) \ + CFG(CFG_PNO_CHANNEL_PREDICTION) \ + CFG(CFG_TOP_K_NUM_OF_CHANNELS) \ + CFG(CFG_STATIONARY_THRESHOLD) \ + CFG(CFG_CHANNEL_PREDICTION_SCAN_TIMER) \ + CFG(CFG_ADAPTIVE_PNOSCAN_DWELL_MODE) \ + CFG(CFG_SCAN_BACKOFF_MULTIPLIER) \ + CFG(CFG_MAWC_NLO_ENABLED) \ + CFG(CFG_MAWC_NLO_EXP_BACKOFF_RATIO) \ + CFG(CFG_MAWC_NLO_INIT_SCAN_INTERVAL) \ + CFG(CFG_MAWC_NLO_MAX_SCAN_INTERVAL) \ + CFG(CFG_USER_CONFIG_SCHED_SCAN_PLAN) + +#else +#define CFG_SCAN_PNO +#endif /* FEATURE_WLAN_SCAN_PNO */ + +/* + * + * gActiveMaxChannelTimeConc - Maximum active scan time in milliseconds. + * @Min: 0 + * @Max: 10000 + * @Default: 40 + * + * This ini is used to set maximum active scan time in STA+SAP concurrent + * mode. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: Internal/External + * + * + */ +#define CFG_ACTIVE_MAX_CHANNEL_TIME_CONC CFG_INI_UINT(\ + "gActiveMaxChannelTimeConc",\ + 0, 10000, PLATFORM_VALUE(40, 0),\ + CFG_VALUE_OR_DEFAULT, \ + "active scan time in STA+SAP concurrent") + +/* + * + * gPassiveMaxChannelTimeConc - Maximum passive scan time in milliseconds. + * @Min: 0 + * @Max: 10000 + * @Default: 110 + * + * This ini is used to set maximum passive scan time in STA+SAP concurrent + * mode. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: Internal/External + * + * + */ +#define CFG_PASSIVE_MAX_CHANNEL_TIME_CONC CFG_INI_UINT(\ + "gPassiveMaxChannelTimeConc",\ + 0, 10000, PLATFORM_VALUE(110, 0),\ + CFG_VALUE_OR_DEFAULT, \ + "Set priority for connection with bssid_hint") + +/* + * + * gRestTimeConc - Rest time before moving to a new channel to scan. + * @Min: 0 + * @Max: 10000 + * @Default: 100 + * + * This ini is used to configure rest time. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: Internal/External + * + * + */ +#define CFG_MAX_REST_TIME_CONC CFG_INI_UINT(\ + "nRestTimeConc",\ + 0, 10000, PLATFORM_VALUE(100, 0),\ + CFG_VALUE_OR_DEFAULT, \ + "Rest time before moving to a new channel") + +/* + * + * min_rest_time_conc - Mininum time spent on home channel before moving to a + * new channel to scan. + * @Min: 0 + * @Max: 50 + * @Default: 50 + * + * This ini is used to configure minimum time spent on home channel before + * moving to a new channel to scan. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: Internal/External + * + * + */ +#define CFG_MIN_REST_TIME_CONC CFG_INI_UINT(\ + "min_rest_time_conc",\ + 0, 50, PLATFORM_VALUE(50, 0),\ + CFG_VALUE_OR_DEFAULT, \ + "minimum time spent on home channel") + +/* + * + * wake_lock_in_user_scan - use to acquire wake lock during user scan + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This INI is added for a specific OEM on their request, who don’t want to + * use PNO offload scan (sched scans). This is useful only if PNO scan offload + * is disabled. If PNO scan is enabled this INI should be disabled and its + * by default disabled intentionally. + * This is used to acquire wake lock to handle the case where PNO scan offload + * is disabled so that wlan is not suspended during scan before connect and + * thus scan is not aborted in between. In case PNO scan is offloaded, the FW + * will take care of connect scans and will wake up host when candidate is found + * + * Related: Scan + * + * Usage: Internal/External + * + * + */ +#define CFG_ENABLE_WAKE_LOCK_IN_SCAN CFG_INI_BOOL( \ + "wake_lock_in_user_scan", \ + false, \ + "use wake lock during scan") + +/* + * + * gIdleTimeConc - Data inactivity time in msec. + * @Min: 0 + * @Max: 25 + * @Default: 25 + * + * This ini is used to configure data inactivity time in msec on bss channel + * that will be used by scan engine in firmware. + * For example if this value is 25ms then firmware will check for data + * inactivity every 25ms till gRestTimeConc is reached. + * If inactive then scan engine will move from home channel to scan the next + * frequency. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: Internal/External + * + * + */ +#define CFG_IDLE_TIME_CONC CFG_INI_UINT(\ + "gIdleTimeConc",\ + 0, 25, PLATFORM_VALUE(25, 0),\ + CFG_VALUE_OR_DEFAULT, \ + "data inactivity time on bss channel") + +/* + * + * gEnableMacAddrSpoof - Enable mac address randomization feature. + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to enable/disable mac address randomization for scan. + * + * Supported Feature: SCAN + * + * + * Usage: Internal/External + * + * + */ +#define CFG_ENABLE_MAC_ADDR_SPOOFING CFG_INI_BOOL( \ + "gEnableMacAddrSpoof", \ + true, \ + "Enable mac spoofing") + +/* + * + * gScanAgingTime - Set scan aging time + * @Min: 0 + * @Max: 200 + * @Default: 30 + * + * This ini is used to set scan aging timeout value + * in secs. For example after 30 secs the bss results + * greater than 30secs age will be flushed. + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#ifdef QCA_WIFI_NAPIER_EMULATION +#define CFG_SCAN_AGING_TIME_DEFAULT (90) +#else +#define CFG_SCAN_AGING_TIME_DEFAULT (30) +#endif + +#define CFG_SCAN_AGING_TIME CFG_INI_UINT( \ + "gScanAgingTime", \ + 0, \ + 200, \ + CFG_SCAN_AGING_TIME_DEFAULT, \ + CFG_VALUE_OR_DEFAULT, \ + "scan aging time") +/* + * + * extscan_adaptive_dwell_mode Enable adaptive dwell mode + * during ext scan + * @Min: 0 + * @Max: 4 + * @Default: 1 + * + * This ini will set the algo used in dwell time optimization + * during ext scan. see enum scan_dwelltime_adaptive_mode. + * Acceptable values for this: + * 0: Default (Use firmware default mode) + * 1: Conservative optimization + * 2: Moderate optimization + * 3: Aggressive optimization + * 4: Static + * + * Related: None + * + * Supported Feature: Scan + * + * Usage: External + * + * + */ +#define CFG_ADAPTIVE_EXTSCAN_DWELL_MODE CFG_INI_UINT( \ + "extscan_adaptive_dwell_mode", \ + 0, \ + 4, \ + 1, \ + CFG_VALUE_OR_DEFAULT, \ + "ext scan adaptive dwell mode") + +/* + * + * sta_scan_burst_duration - Burst duration in case of split scan. + * @Min: 0 + * @Max: 180 + * @Default: 0 + * + * This ini is used to set burst duration of sta scan requests. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: External + * + * + */ +#define CFG_STA_SCAN_BURST_DURATION CFG_INI_UINT( \ + "sta_scan_burst_duration", \ + 0, \ + 180, \ + 0, \ + CFG_VALUE_OR_DEFAULT, \ + "sta scan burst duration") + +/* + * + * p2p_scan_burst_duration - Burst duration in case of split scan for p2p scan. + * @Min: 0 + * @Max: 180 + * @Default: 0 + * + * This ini is used to set burst duration of scan for p2p scan requests. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: External + * + * + */ + +#define CFG_P2P_SCAN_BURST_DURATION CFG_INI_UINT( \ + "p2p_scan_burst_duration", \ + 0, \ + 180, \ + 0, \ + CFG_VALUE_OR_DEFAULT, \ + "p2p scan burst duration") +/* + * + * go_scan_burst_duration - Burst duration in case of split scan when GO is + * active. + * @Min: 0 + * @Max: 180 + * @Default: 0 + * + * This ini is used to set burst duration of scan when GO is active. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: External + * + * + */ +#define CFG_GO_SCAN_BURST_DURATION CFG_INI_UINT( \ + "go_scan_burst_duration", \ + 0, \ + 180, \ + 0, \ + CFG_VALUE_OR_DEFAULT, \ + "go scan burst duration") + +/* + * + * ap_scan_burst_duration - Burst duration in case of split scan when ap + * is active. + * @Min: 0 + * @Max: 32 + * @Default: 0 + * + * This ini is used to set burst duration of scan when SAP is active. + * + * Related: None. + * + * Supported Feature: Concurrency + * + * Usage: External + * + * + */ +#define CFG_AP_SCAN_BURST_DURATION CFG_INI_UINT( \ + "ap_scan_burst_duration", \ + 0, \ + 32, \ + 0, \ + CFG_VALUE_OR_DEFAULT, \ + "ap scan burst duration") + +/* + * + * gSkipDfsChannelInP2pSearch - Skip DFS Channel in case of P2P Search + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini is used to disable(skip) dfs channel in p2p search. + * Related: None. + * + * Supported Feature: DFS P2P + * + * Usage: External + * + * + */ +#define CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH CFG_INI_BOOL( \ + "gSkipDfsChannelInP2pSearch", \ + 1, \ + "skip dfs channel in p2p search") + +/* + * + * gEnableConnectedScan - Will enable or disable scan in connected state + * This ini is used to enable or disable the scanning in + * Connected state + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * Related: None + * + * Supported Feature: STA + * + * Usage: External + * + * + */ +#define CFG_ENABLE_CONNECTED_SCAN CFG_INI_BOOL( \ + "gEnableConnectedScan", \ + true, \ + "Enable/disable scan in connected state") + +/* + * + * gEnableSNRMonitoring - Enables SNR Monitoring + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to set default snr monitor + * + * Related: None + * + * Supported Feature: STA + * + * Usage: Internal/External + * + * + */ +#define CFG_ENABLE_SNR_MONITORING CFG_INI_BOOL(\ + "gEnableSNRMonitoring",\ + false,\ + "Enable/Disable SNR Monitoring") + +/* + * + * scan_mode_6ghz - 6ghz Scan mode + * @Min: 0 + * @Max: 2 + * @Default: 2 + * + * Configure the 6Ghz scan mode + * 0 - Remove 6GHz channels in the scan request + * 1 - Allow/Add 6Ghz PSC channels to scan request + * 2 - Allow all the 6Ghz channels + * + * Related: SCAN + * + * Usage: Internal/External + * + * + */ +#define CFG_6GHZ_SCAN_MODE CFG_INI_UINT( \ + "scan_mode_6ghz", \ + SCAN_MODE_6G_NO_CHANNEL, \ + SCAN_MODE_6G_MAX, \ + PLATFORM_VALUE(SCAN_MODE_6G_PSC_CHANNEL, \ + SCAN_MODE_6G_ALL_CHANNEL), \ + CFG_VALUE_OR_DEFAULT, \ + "6ghz scan mode") + +/* + * + * scan_allow_bss_with_corrupted_ie - Continue scan even if corrupted IEs are + * present. + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to continue scan even if corrupted IEs are present. If this + * ini is enable, the scan module skips the IEs following corrupted IEs(IE's + * with invalid len) and adds the scan entry without completely dropping the + * frame. + * + * Related: scan + * + * Usage: External + * + * + */ +#define CFG_SCAN_ALLOW_BSS_WITH_CORRUPTED_IE CFG_INI_BOOL( \ + "scan_allow_bss_with_corrupted_ie", \ + false, \ + "scan allow bss with corrupted ie") + +#define CFG_SCAN_ALL \ + CFG(CFG_DROP_BCN_ON_CHANNEL_MISMATCH) \ + CFG(CFG_DROP_BCN_ON_INVALID_FREQ) \ + CFG(CFG_ENABLE_WAKE_LOCK_IN_SCAN) \ + CFG(CFG_ACTIVE_MAX_CHANNEL_TIME) \ + CFG(CFG_ENABLE_DFS_SCAN) \ + CFG(CFG_ENABLE_CONNECTED_SCAN) \ + CFG(CFG_INITIAL_NO_DFS_SCAN) \ + CFG(CFG_ACTIVE_MAX_2G_CHANNEL_TIME) \ + CFG(CFG_PASSIVE_MAX_CHANNEL_TIME) \ + CFG(CFG_ACTIVE_MAX_6G_CHANNEL_TIME) \ + CFG(CFG_PASSIVE_MAX_6G_CHANNEL_TIME) \ + CFG(CFG_SCAN_NUM_PROBES) \ + CFG(CFG_SCAN_PROBE_REPEAT_TIME) \ + CFG(CFG_ADAPTIVE_SCAN_DWELL_MODE) \ + CFG(CFG_ADAPTIVE_SCAN_DWELL_MODE_NC) \ + CFG(CFG_HONOUR_NL_SCAN_POLICY_FLAGS) \ + CFG(CFG_IS_BSSID_HINT_PRIORITY) \ + CFG(CFG_PASSIVE_MAX_CHANNEL_TIME_CONC) \ + CFG(CFG_ACTIVE_MAX_CHANNEL_TIME_CONC) \ + CFG(CFG_MAX_REST_TIME_CONC) \ + CFG(CFG_MIN_REST_TIME_CONC) \ + CFG(CFG_IDLE_TIME_CONC) \ + CFG(CFG_ENABLE_MAC_ADDR_SPOOFING) \ + CFG(CFG_SCAN_AGING_TIME) \ + CFG(CFG_ADAPTIVE_EXTSCAN_DWELL_MODE) \ + CFG(CFG_STA_SCAN_BURST_DURATION) \ + CFG(CFG_P2P_SCAN_BURST_DURATION) \ + CFG(CFG_GO_SCAN_BURST_DURATION) \ + CFG(CFG_ENABLE_SNR_MONITORING) \ + CFG(CFG_AP_SCAN_BURST_DURATION) \ + CFG(CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH) \ + CFG(CFG_6GHZ_SCAN_MODE) \ + CFG(CFG_SCAN_ALLOW_BSS_WITH_CORRUPTED_IE) \ + CFG_SCAN_PNO + +#endif /* __CONFIG_SCAN_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..bba10550cf9205b1c6e2bcfe26c15bf0e5ed3afb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h @@ -0,0 +1,1516 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan structure definations + */ + +#ifndef _WLAN_SCAN_STRUCTS_H_ +#define _WLAN_SCAN_STRUCTS_H_ +#include +#include +#include +#include +#include +#include +#include + +typedef uint16_t wlan_scan_requester; +typedef uint32_t wlan_scan_id; + +#define WLAN_SCAN_MAX_HINT_S_SSID 10 +#define WLAN_SCAN_MAX_HINT_BSSID 10 +#define MAX_RNR_BSS 5 +#define WLAN_SCAN_MAX_NUM_SSID 16 +#define WLAN_SCAN_MAX_NUM_BSSID 4 + +#define SCM_CANCEL_SCAN_WAIT_TIME 50 +#define SCM_CANCEL_SCAN_WAIT_ITERATION 600 + +#define INVAL_SCAN_ID 0xFFFFFFFF +#define CANCEL_HOST_SCAN_ID 0xFFFFFFFE +#define INVAL_VDEV_ID 0xFFFFFFFF +#define INVAL_PDEV_ID 0xFFFFFFFF + +#define USER_SCAN_REQUESTOR_ID 0xA0000 +#define PREAUTH_REQUESTOR_ID 0xC0000 + +#define BURST_SCAN_MAX_NUM_OFFCHANNELS 3 +#define P2P_SCAN_MAX_BURST_DURATION 180 +/* Increase dwell time for P2P search in ms */ +#define P2P_SEARCH_DWELL_TIME_INC 20 + +#define PROBE_REQ_BITMAP_LEN 8 +#define MAX_PROBE_REQ_OUIS 16 + +#define RSSI_WEIGHTAGE 20 +#define HT_CAPABILITY_WEIGHTAGE 2 +#define VHT_CAP_WEIGHTAGE 1 +#define HE_CAP_WEIGHTAGE 2 +#define CHAN_WIDTH_WEIGHTAGE 17 +#define CHAN_BAND_WEIGHTAGE 2 +#define NSS_WEIGHTAGE 16 +#define BEAMFORMING_CAP_WEIGHTAGE 2 +#define PCL_WEIGHT 10 +#define CHANNEL_CONGESTION_WEIGHTAGE 5 +#define OCE_WAN_WEIGHTAGE 0 +#define BEST_CANDIDATE_MAX_WEIGHT 100 +#define MAX_BSS_SCORE 200 +#define MAX_INDEX_SCORE 100 +#define MAX_INDEX_PER_INI 4 +#define SAE_PK_AP_WEIGHTAGE 3 + +#define BEST_CANDIDATE_MAX_BSS_SCORE (MAX_BSS_SCORE * 100) + +#define WLAN_GET_BITS(_val, _index, _num_bits) \ + (((_val) >> (_index)) & ((1 << (_num_bits)) - 1)) + +#define WLAN_SET_BITS(_var, _index, _num_bits, _val) do { \ + (_var) &= ~(((1 << (_num_bits)) - 1) << (_index)); \ + (_var) |= (((_val) & ((1 << (_num_bits)) - 1)) << (_index)); \ + } while (0) + +#define WLAN_GET_SCORE_PERCENTAGE(value32, bw_index) \ + WLAN_GET_BITS(value32, (8 * (bw_index)), 8) +#define WLAN_SET_SCORE_PERCENTAGE(value32, score_pcnt, bw_index) \ + WLAN_SET_BITS(value32, (8 * (bw_index)), 8, score_pcnt) +#define TBTT_INFO_COUNT 16 + +/* forward declaration */ +struct wlan_objmgr_vdev; +struct wlan_objmgr_pdev; +struct wlan_objmgr_psoc; + +/** + * struct channel_info - BSS channel information + * @chan_freq: channel frequency + * @cfreq0: channel frequency index0 + * @cfreq1: channel frequency index1 + * @priv: channel private information + */ +struct channel_info { + uint32_t chan_freq; + uint32_t cfreq0; + uint32_t cfreq1; + void *priv; +}; + +/** + * struct element_info - defines length of a memory block and memory block + * @len: length of memory block + * @ptr: memory block pointer + */ +struct element_info { + uint32_t len; + uint8_t *ptr; +}; + +/** + * struct ie_list - pointers to various IEs + * @tim: pointer to tim ie + * @country: pointer to country ie + * @ssid: pointer to ssid ie + * @rates: pointer to supported rates ie + * @xrates: pointer to extended supported rate ie + * @ds_param: pointer to ds params + * @csa: pointer to csa ie + * @xcsa: pointer to extended csa ie + * @mcst: pointer to maximum channel switch time ie + * @wpa: pointer to wpa ie + * @wcn: pointer to wcn ie + * @rsn: pointer to rsn ie + * @wps: pointer to wps ie + * @wmeinfo: pointer to wmeinfo ie + * @wmeparam: pointer to wmeparam ie + * @quiet: pointer to quiet ie + * @htcap: pointer to htcap ie + * @htinfo: pointer to htinfo ie + * @athcaps: pointer to athcaps ie + * @athextcaps: pointer to extended athcaps ie + * @sfa: pointer to sfa ie + * @vendor: pointer to vendor ie + * @qbssload: pointer to qbssload ie + * @wapi: pointer to wapi ie + * @p2p: pointer to p2p ie + * @alt_wcn: pointer to alternate wcn ie + * @extcaps: pointer to extended caps ie + * @ibssdfs: pointer to ibssdfs ie + * @sonadv: pointer to wifi son ie + * @vhtcap: pointer to vhtcap ie + * @vhtop: pointer to vhtop ie + * @opmode: pointer to opmode ie + * @cswrp: pointer to channel switch announcement wrapper ie + * @widebw: pointer to wide band channel switch sub ie + * @txpwrenvlp: pointer to tx power envelop sub ie + * @hecap: pointer to hecap ie + * @hecap_6g: pointer to he 6ghz cap ie + * @srp: pointer to spatial reuse parameter sub extended ie + * @fils_indication: pointer to FILS indication ie + * @esp: pointer to ESP indication ie + * @mbo_oce: pointer to mbo/oce indication ie + * @rnrie: reduced neighbor report IE + * @adaptive_11r: pointer to adaptive 11r IE + * @single_pmk: Pointer to sae single pmk IE + * @rsnxe: Pointer to rsnxe IE + */ +struct ie_list { + uint8_t *tim; + uint8_t *country; + uint8_t *ssid; + uint8_t *rates; + uint8_t *xrates; + uint8_t *ds_param; + uint8_t *csa; + uint8_t *xcsa; + uint8_t *mcst; + uint8_t *wpa; + uint8_t *wcn; + uint8_t *rsn; + uint8_t *wps; + uint8_t *wmeinfo; + uint8_t *wmeparam; + uint8_t *quiet; + uint8_t *htcap; + uint8_t *htinfo; + uint8_t *athcaps; + uint8_t *athextcaps; + uint8_t *sfa; + uint8_t *vendor; + uint8_t *qbssload; + uint8_t *wapi; + uint8_t *p2p; + uint8_t *alt_wcn; + uint8_t *extcaps; + uint8_t *ibssdfs; + uint8_t *sonadv; + uint8_t *vhtcap; + uint8_t *vhtop; + uint8_t *opmode; + uint8_t *cswrp; + uint8_t *widebw; + uint8_t *txpwrenvlp; + uint8_t *bwnss_map; + uint8_t *secchanoff; + uint8_t *mdie; + uint8_t *hecap; + uint8_t *hecap_6g; + uint8_t *heop; + uint8_t *srp; + uint8_t *fils_indication; + uint8_t *esp; + uint8_t *mbo_oce; + uint8_t *muedca; + uint8_t *rnrie; + uint8_t *extender; + uint8_t *adaptive_11r; + uint8_t *single_pmk; + uint8_t *rsnxe; +}; + +enum scan_entry_connection_state { + SCAN_ENTRY_CON_STATE_NONE, + SCAN_ENTRY_CON_STATE_AUTH, + SCAN_ENTRY_CON_STATE_ASSOC +}; + +/** + * struct mlme_info - mlme specific info + * temporarily maintained in scan cache for backward compatibility. + * must be removed as part of umac convergence. + * @bad_ap_time: time when this ap was marked bad + * @status: status + * @rank: rank + * @utility: utility + * @assoc_state: association state + * @chanload: channel load + */ +struct mlme_info { + qdf_time_t bad_ap_time; + uint32_t status; + uint32_t rank; + uint32_t utility; + uint32_t assoc_state; + uint32_t chanload; +}; + +/** + * struct bss_info - information required to uniquely define a bss + * @freq: freq of operating primary channel + * @ssid: ssid of bss + * @bssid: bssid of bss + */ +struct bss_info { + uint32_t freq; + struct wlan_ssid ssid; + struct qdf_mac_addr bssid; +}; + +#define SCAN_NODE_ACTIVE_COOKIE 0x1248F842 +/** + * struct scan_cache_node - Scan cache entry node + * @node: node pointers + * @ref_cnt: ref count if in use + * @cookie: cookie to check if entry is logically active + * @entry: scan entry pointer + */ +struct scan_cache_node { + qdf_list_node_t node; + qdf_atomic_t ref_cnt; + uint32_t cookie; + struct scan_cache_entry *entry; +}; + +struct security_info { + enum wlan_enc_type uc_enc; + enum wlan_enc_type mc_enc; + enum wlan_auth_type auth_type; +}; + +/** + * struct scan_mbssid_info - Scan mbssid information + * @profile_num: profile number + * @profile_count: total profile count + * @trans_bssid: TX BSSID address + */ +struct scan_mbssid_info { + uint8_t profile_num; + uint8_t profile_count; + uint8_t trans_bssid[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct rnr_bss_info - Reduced Neighbor Report BSS information + * @neighbor_ap_tbtt_offset: Neighbor AP TBTT offset + * @channel_number: channel number + * @operating_class: operting class + * @bssid: BSS MAC address + * @short_ssid: short ssid + * @bss_params: BSS parameters + */ +struct rnr_bss_info { + uint8_t neighbor_ap_tbtt_offset; + uint32_t channel_number; + uint32_t operating_class; + struct qdf_mac_addr bssid; + uint32_t short_ssid; + uint8_t bss_params; +}; + +/** + * struct tbtt_information_header - TBTT information header + * @tbbt_info_fieldtype: TBTT information field type + * @filter_neighbor_ap: filtered neighbor ap + * @tbbt_info_count: TBTT information count + * @tbtt_info_length: TBTT informaiton length + */ +struct tbtt_information_header { + uint16_t tbbt_info_fieldtype:2; + uint16_t filtered_neighbor_ap:1; + uint16_t reserved:1; + uint16_t tbtt_info_count:4; + uint16_t tbtt_info_length:8; +}; + +/** + * struct neighbor_ap_info_field - Neighbor information field + * @tbtt_info_header: TBTT information header + * @operting_class: operating class + * @channel_number: channel number + */ +struct neighbor_ap_info_field { + struct tbtt_information_header tbtt_header; + uint8_t operting_class; + uint8_t channel_number; +}; + +/** + * enum tbtt_information_field - TBTT information field + * @TBTT_NEIGHBOR_AP_OFFSET_ONLY: TBTT information field type + * @TBTT_NEIGHBOR_AP_BSS_PARAM: neighbor AP and bss param + * @TBTT_NEIGHBOR_AP_SHORTSSID: neighbor AP and Short ssid + * @TBTT_NEIGHBOR_AP_S_SSID_BSS_PARAM: neighbor AP, short ssid and bss param + * @TBTT_NEIGHBOR_AP_BSSID: neighbor AP and bssid + * @TBTT_NEIGHBOR_AP_BSSID_BSS_PARAM: neighbor AP, bssid and bss param + * @TBTT_NEIGHBOR_AP_BSSSID_S_SSID: neighbor AP, bssid and short ssid + * @TBTT_NEIGHBOR_AP_BSSID_S_SSID_BSS_PARAM: neighbor AP, bssid, short ssid + * and bss params + */ +enum tbtt_information_field { + TBTT_NEIGHBOR_AP_OFFSET_ONLY = 1, + TBTT_NEIGHBOR_AP_BSS_PARAM = 2, + TBTT_NEIGHBOR_AP_SHORTSSID = 5, + TBTT_NEIGHBOR_AP_S_SSID_BSS_PARAM = 6, + TBTT_NEIGHBOR_AP_BSSID = 7, + TBTT_NEIGHBOR_AP_BSSID_BSS_PARAM = 8, + TBTT_NEIGHBOR_AP_BSSSID_S_SSID = 11, + TBTT_NEIGHBOR_AP_BSSID_S_SSID_BSS_PARAM = 12 +}; + +/** + * struct reduced_neighbor_report - Reduced Neighbor Report + * @bss_info: RNR BSS Information + */ +struct reduced_neighbor_report { + struct rnr_bss_info bss_info[MAX_RNR_BSS]; +}; + +#define SCAN_SECURITY_TYPE_WEP 0x01 +#define SCAN_SECURITY_TYPE_WPA 0x02 +#define SCAN_SECURITY_TYPE_WAPI 0x04 +#define SCAN_SECURITY_TYPE_RSN 0x08 + +/** + * struct scan_cache_entry: structure containing scan entry + * @frm_subtype: updated from beacon/probe + * @bssid: bssid + * @mac_addr: mac address + * @ssid: ssid + * @is_hidden_ssid: is AP having hidden ssid. + * @security_type: security supported + * @seq_num: sequence number + * @phy_mode: Phy mode of the AP + * @avg_rssi: Average RSSI of the AP + * @rssi_raw: The rssi of the last beacon/probe received + * @snr: The snr of the last beacon/probe received + * @avg_snr: Average SNR of the AP + * @bcn_int: Beacon interval of the AP + * @cap_info: Capability of the AP + * @tsf_info: TSF info + * @erp: erp info + * @dtim_period: dtime period + * @air_time_fraction: Air time fraction from ESP param + * @qbss_chan_load: Qbss channel load + * @nss: supported NSS information + * @is_p2p_ssid: is P2P entry + * @adaptive_11r_ap: flag to check if AP supports adaptive 11r + * @scan_entry_time: boottime in microsec when last beacon/probe is received + * @rssi_timestamp: boottime in microsec when RSSI was updated + * @hidden_ssid_timestamp: boottime in microsec when hidden + * ssid was received + * @mbssid_info: Multi bssid information + * @rnr: Reduced neighbor report information + * @channel: channel info on which AP is present + * @channel_mismatch: if channel received in metadata + * doesnot match the one in beacon + * @tsf_delta: TSF delta + * @bss_score: bss score calculated on basis of RSSI/caps etc. + * @neg_sec_info: negotiated security info + * @per_chain_rssi: per chain RSSI value received. + * boottime_ns: boottime in ns. + * @rrm_parent_tsf: RRM parent tsf + * @mlme_info: Mlme info, this will be updated by MLME for the scan entry + * @alt_wcn_ie: alternate WCN IE + * @ie_list: IE list pointers + * @raw_frame: contain raw frame and the length of the raw frame + * @pdev_id: pdev id + */ +struct scan_cache_entry { + uint8_t frm_subtype; + struct qdf_mac_addr bssid; + struct qdf_mac_addr mac_addr; + struct wlan_ssid ssid; + bool is_hidden_ssid; + uint8_t security_type; + uint16_t seq_num; + enum wlan_phymode phy_mode; + int32_t avg_rssi; + int8_t rssi_raw; + uint8_t snr; + uint32_t avg_snr; + uint16_t bcn_int; + union wlan_capability cap_info; + union { + uint8_t data[8]; + uint64_t tsf; + } tsf_info; + uint8_t erp; + uint8_t dtim_period; + uint8_t air_time_fraction; + uint8_t qbss_chan_load; + uint8_t nss; + bool is_p2p; + bool adaptive_11r_ap; + qdf_time_t scan_entry_time; + qdf_time_t rssi_timestamp; + qdf_time_t hidden_ssid_timestamp; + struct scan_mbssid_info mbssid_info; + struct reduced_neighbor_report rnr; + struct channel_info channel; + bool channel_mismatch; + struct mlme_info mlme_info; + uint32_t tsf_delta; + uint32_t bss_score; + struct security_info neg_sec_info; + uint8_t per_chain_rssi[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; + uint64_t boottime_ns; + uint32_t rrm_parent_tsf; + struct element_info alt_wcn_ie; + struct ie_list ie_list; + struct element_info raw_frame; + /* + * This is added temporarily for 6GHz channel to freq conversion + * to get pdev wherever it requores to convert frequency to + * channel as regulatory apis requires pdev as argument + */ + uint8_t pdev_id; +}; + +#define MAX_FAVORED_BSSID 16 +#define MAX_ALLOWED_SSID_LIST 4 + +/** + * struct weight_config - weight params to calculate best candidate + * @rssi_weightage: RSSI weightage + * @ht_caps_weightage: HT caps weightage + * @vht_caps_weightage: VHT caps weightage + * @he_caps_weightage: HE caps weightage + * @chan_width_weightage: Channel width weightage + * @chan_band_weightage: Channel band weightage + * @nss_weightage: NSS weightage + * @beamforming_cap_weightage: Beamforming caps weightage + * @pcl_weightage: PCL weightage + * @channel_congestion_weightage: channel congestion weightage + * @oce_wan_weightage: OCE WAN metrics weightage + * @sae_pk_ap_weightage: SAE-PK AP weigtage + */ +struct weight_config { + uint8_t rssi_weightage; + uint8_t ht_caps_weightage; + uint8_t vht_caps_weightage; + uint8_t he_caps_weightage; + uint8_t chan_width_weightage; + uint8_t chan_band_weightage; + uint8_t nss_weightage; + uint8_t beamforming_cap_weightage; + uint8_t pcl_weightage; + uint8_t channel_congestion_weightage; + uint8_t oce_wan_weightage; + uint8_t sae_pk_ap_weightage; +}; + +/** + * struct rssi_cfg_score - rssi related params for scoring logic + * @best_rssi_threshold: RSSI weightage + * @good_rssi_threshold: HT caps weightage + * @bad_rssi_threshold: VHT caps weightage + * @good_rssi_pcnt: HE caps weightage + * @bad_rssi_pcnt: Channel width weightage + * @good_rssi_bucket_size: Channel band weightage + * @bad_rssi_bucket_size: NSS weightage + * @rssi_pref_5g_rssi_thresh: Beamforming caps weightage + */ +struct rssi_cfg_score { + uint32_t best_rssi_threshold; + uint32_t good_rssi_threshold; + uint32_t bad_rssi_threshold; + uint32_t good_rssi_pcnt; + uint32_t bad_rssi_pcnt; + uint32_t good_rssi_bucket_size; + uint32_t bad_rssi_bucket_size; + uint32_t rssi_pref_5g_rssi_thresh; +}; + +/** + * struct per_slot_scoring - define % score for differents slots for a + * scoring param. + * num_slot: number of slots in which the param will be divided. + * Max 15. index 0 is used for 'not_present. Num_slot will + * equally divide 100. e.g, if num_slot = 4 slot 0 = 0-25%, slot + * 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100% + * score_pcnt3_to_0: Conatins score percentage for slot 0-3 + * BITS 0-7 :- the scoring pcnt when not present + * BITS 8-15 :- SLOT_1 + * BITS 16-23 :- SLOT_2 + * BITS 24-31 :- SLOT_3 + * score_pcnt7_to_4: Conatins score percentage for slot 4-7 + * BITS 0-7 :- SLOT_4 + * BITS 8-15 :- SLOT_5 + * BITS 16-23 :- SLOT_6 + * BITS 24-31 :- SLOT_7 + * score_pcnt11_to_8: Conatins score percentage for slot 8-11 + * BITS 0-7 :- SLOT_8 + * BITS 8-15 :- SLOT_9 + * BITS 16-23 :- SLOT_10 + * BITS 24-31 :- SLOT_11 + * score_pcnt15_to_12: Conatins score percentage for slot 12-15 + * BITS 0-7 :- SLOT_12 + * BITS 8-15 :- SLOT_13 + * BITS 16-23 :- SLOT_14 + * BITS 24-31 :- SLOT_15 + */ +struct per_slot_scoring { + uint32_t num_slot; + uint32_t score_pcnt3_to_0; + uint32_t score_pcnt7_to_4; + uint32_t score_pcnt11_to_8; + uint32_t score_pcnt15_to_12; +}; + +/** + * struct scoring_config - Scoring related configuration + * @weight_cfg: weigtage config for config + * @rssi_score: Rssi related config for scoring config + * @esp_qbss_scoring: esp and qbss related scoring config + * @oce_wan_scoring: oce related scoring config + * @bandwidth_weight_per_index: BW wight per index + * @nss_weight_per_index: nss weight per index + * @band_weight_per_index: band weight per index + * @cb_mode_24G: cb mode supprted for 2.4Ghz + * @cb_mode_5G: cb mode supprted for 5Ghz + * @nss: Number of NSS the device support + * @ht_cap: If dev is configured as HT capable + * @vht_cap:If dev is configured as VHT capable + * @he_cap: If dev is configured as HE capable + * @vht_24G_cap:If dev is configured as VHT capable for 2.4Ghz + * @beamformee_cap:If dev is configured as BF capable + */ +struct scoring_config { + struct weight_config weight_cfg; + struct rssi_cfg_score rssi_score; + struct per_slot_scoring esp_qbss_scoring; + struct per_slot_scoring oce_wan_scoring; + uint32_t bandwidth_weight_per_index; + uint32_t nss_weight_per_index; + uint32_t band_weight_per_index; + uint8_t cb_mode_24G; + uint8_t cb_mode_5G; + uint8_t vdev_nss_24g; + uint8_t vdev_nss_5g; + uint8_t ht_cap:1, + vht_cap:1, + he_cap:1, + vht_24G_cap:1, + beamformee_cap:1; +}; + +#define WLAN_SCAN_FILTER_NUM_SSID 5 +#define WLAN_SCAN_FILTER_NUM_BSSID 5 + +#define REAM_HASH_LEN 2 +#define CACHE_IDENTIFIER_LEN 2 +#define HESSID_LEN 6 + +/** + * struct fils_filter_info: FILS info present in scan filter + * @realm_check: whether realm check is required + * @fils_realm: realm hash value + * @security_type: type of security supported + */ +struct fils_filter_info { + bool realm_check; + uint8_t fils_realm[REAM_HASH_LEN]; + uint8_t security_type; +}; + +/** + * @bss_scoring_required :- flag to bypass scoring filtered results + * @enable_adaptive_11r: flag to check if adaptive 11r ini is enabled + * @age_threshold: If set return entry which are newer than the age_threshold + * @p2p_results: If only p2p entries is required + * @rrm_measurement_filter: For measurement reports.if set, only SSID, BSSID + * and channel is considered for filtering. + * @num_of_bssid: number of bssid passed + * @num_of_ssid: number of ssid + * @num_of_channels: number of channels + * @num_of_auth: number of auth types + * @num_of_enc_type: number of unicast enc type + * @num_of_mc_enc_type: number of multicast enc type + * @pmf_cap: Pmf capability + * @ignore_pmf_cap: Ignore pmf capability match + * @num_of_pcl_channels: number of pcl channels + * @bss_type: bss type BSS/IBSS etc + * @dot11_mode: operating modes 0 mean any + * 11a , 11g, 11n , 11ac , 11b etc + * @band: to get specific band 2.4G, 5G or 4.9 G + * @rssi_threshold: AP having RSSI greater than + * rssi threasholed (ignored if set 0) + * @only_wmm_ap: If only Qos AP is needed + * @ignore_auth_enc_type: Ignore enc type if + * this is set (For WPS/OSEN connection) + * @mobility_domain: Mobility domain for 11r + * @country[3]: Ap with specific country code + * @bssid_list: bssid list + * @ssid_list: ssid list + * @chan_freq_list: channel frequency list, frequency unit: MHz + * @auth_type: auth type list + * @enc_type: unicast enc type list + * @mc_enc_type: multicast cast enc type list + * @pcl_freq_list: PCL channel frequency list, frequency unit: MHz + * @fils_scan_filter: FILS info + * @pcl_weight_list: PCL Weight list + * @bssid_hint: Mac address of bssid_hint + */ +struct scan_filter { + bool bss_scoring_required; + bool enable_adaptive_11r; + qdf_time_t age_threshold; + uint32_t p2p_results; + uint32_t rrm_measurement_filter; + uint32_t num_of_bssid; + uint32_t num_of_ssid; + uint32_t num_of_channels; + uint32_t num_of_auth; + uint32_t num_of_enc_type; + uint32_t num_of_mc_enc_type; + enum wlan_pmf_cap pmf_cap; + bool ignore_pmf_cap; + uint32_t num_of_pcl_channels; + enum wlan_bss_type bss_type; + enum wlan_phymode dot11_mode; + enum wlan_band band; + uint32_t rssi_threshold; + uint32_t only_wmm_ap; + uint32_t ignore_auth_enc_type; + uint32_t mobility_domain; + /* Variable params list */ + uint8_t country[3]; + struct qdf_mac_addr bssid_list[WLAN_SCAN_FILTER_NUM_BSSID]; + struct wlan_ssid ssid_list[WLAN_SCAN_FILTER_NUM_SSID]; + uint32_t chan_freq_list[NUM_CHANNELS]; + enum wlan_auth_type auth_type[WLAN_NUM_OF_SUPPORT_AUTH_TYPE]; + enum wlan_enc_type enc_type[WLAN_NUM_OF_ENCRYPT_TYPE]; + enum wlan_enc_type mc_enc_type[WLAN_NUM_OF_ENCRYPT_TYPE]; + uint32_t pcl_freq_list[NUM_CHANNELS]; + struct fils_filter_info fils_scan_filter; + uint8_t pcl_weight_list[NUM_CHANNELS]; + struct qdf_mac_addr bssid_hint; +}; + +/** + * enum scan_disable_reason - scan enable/disable reason + * @REASON_SUSPEND: reason is suspend + * @REASON_SYSTEM_DOWN: reason is system going down + * @REASON_USER_SPACE: reason is user space initiated + * @REASON_VDEV_DOWN: reason is vdev going down + */ +enum scan_disable_reason { + REASON_SUSPEND = 0x1, + REASON_SYSTEM_DOWN = 0x2, + REASON_USER_SPACE = 0x4, + REASON_VDEV_DOWN = 0x8, +}; + +/** + * enum scan_priority - scan priority definitions + * @SCAN_PRIORITY_VERY_LOW: very low priority + * @SCAN_PRIORITY_LOW: low scan priority + * @SCAN_PRIORITY_MEDIUM: medium priority + * @SCAN_PRIORITY_HIGH: high priority + * @SCAN_PRIORITY_VERY_HIGH: very high priority + * @SCAN_PRIORITY_COUNT: number of priorities supported + */ +enum scan_priority { + SCAN_PRIORITY_VERY_LOW, + SCAN_PRIORITY_LOW, + SCAN_PRIORITY_MEDIUM, + SCAN_PRIORITY_HIGH, + SCAN_PRIORITY_VERY_HIGH, + SCAN_PRIORITY_COUNT, +}; + +/** + * enum scan_phy_mode - phymode used for scan + * @SCAN_PHY_MODE_11A: 11a mode + * @SCAN_PHY_MODE_11G: 11g mode + * @SCAN_PHY_MODE_11B: 11b mode + * @SCAN_PHY_MODE_11GONLY: 11g only mode + * @SCAN_PHY_MODE_11NA_HT20: 11na ht20 mode + * @SCAN_PHY_MODE_11NG_HT20: 11ng ht20 mode + * @SCAN_PHY_MODE_11NA_HT40: 11na ht40 mode + * @SCAN_PHY_MODE_11NG_HT40: 11ng ht40 mode + * @SCAN_PHY_MODE_11AC_VHT20: 11ac vht20 mode + * @SCAN_PHY_MODE_11AC_VHT40: 11ac vht40 mode + * @SCAN_PHY_MODE_11AC_VHT80: 11ac vht80 mode + * @SCAN_PHY_MODE_11AC_VHT20_2G: 2GHz 11ac vht20 mode + * @SCAN_PHY_MODE_11AC_VHT40_2G: 2GHz 11ac vht40 mode + * @SCAN_PHY_MODE_11AC_VHT80_2G: 2GHz 11ac vht80 mode + * @SCAN_PHY_MODE_11AC_VHT80_80: 11ac vht 80+80 mode + * @SCAN_PHY_MODE_11AC_VHT160: 11ac vht160 mode + * @SCAN_PHY_MODE_11AX_HE20: 11ax he20 mode + * @SCAN_PHY_MODE_11AX_HE40: 11ax he40 mode + * @SCAN_PHY_MODE_11AX_HE80: 11ax he80 mode + * @SCAN_PHY_MODE_11AX_HE80_80: 11ax he80+80 mode + * @SCAN_PHY_MODE_11AX_HE160: 11ax he160 mode + * @SCAN_PHY_MODE_11AX_HE20_2G: 2GHz 11ax he20 mode + * @SCAN_PHY_MODE_11AX_HE40_2G: 2GHz 11ax he40 mode + * @SCAN_PHY_MODE_11AX_HE80_2G: 2GHz 11ax he80 mode + * @SCAN_PHY_MODE_UNKNOWN: unknown phy mode + * @SCAN_PHY_MODE_MAX: max valid phymode + */ +enum scan_phy_mode { + SCAN_PHY_MODE_11A = 0, + SCAN_PHY_MODE_11G = 1, + SCAN_PHY_MODE_11B = 2, + SCAN_PHY_MODE_11GONLY = 3, + SCAN_PHY_MODE_11NA_HT20 = 4, + SCAN_PHY_MODE_11NG_HT20 = 5, + SCAN_PHY_MODE_11NA_HT40 = 6, + SCAN_PHY_MODE_11NG_HT40 = 7, + SCAN_PHY_MODE_11AC_VHT20 = 8, + SCAN_PHY_MODE_11AC_VHT40 = 9, + SCAN_PHY_MODE_11AC_VHT80 = 10, + SCAN_PHY_MODE_11AC_VHT20_2G = 11, + SCAN_PHY_MODE_11AC_VHT40_2G = 12, + SCAN_PHY_MODE_11AC_VHT80_2G = 13, + SCAN_PHY_MODE_11AC_VHT80_80 = 14, + SCAN_PHY_MODE_11AC_VHT160 = 15, + SCAN_PHY_MODE_11AX_HE20 = 16, + SCAN_PHY_MODE_11AX_HE40 = 17, + SCAN_PHY_MODE_11AX_HE80 = 18, + SCAN_PHY_MODE_11AX_HE80_80 = 19, + SCAN_PHY_MODE_11AX_HE160 = 20, + SCAN_PHY_MODE_11AX_HE20_2G = 21, + SCAN_PHY_MODE_11AX_HE40_2G = 22, + SCAN_PHY_MODE_11AX_HE80_2G = 23, + SCAN_PHY_MODE_UNKNOWN = 24, + SCAN_PHY_MODE_MAX = 24 +}; + +/** + * enum scan_dwelltime_adaptive_mode: dwelltime_mode + * @SCAN_DWELL_MODE_DEFAULT: Use firmware default mode + * @SCAN_DWELL_MODE_CONSERVATIVE: Conservative adaptive mode + * @SCAN_DWELL_MODE_MODERATE: Moderate adaptive mode + * @SCAN_DWELL_MODE_AGGRESSIVE: Aggressive adaptive mode + * @SCAN_DWELL_MODE_STATIC: static adaptive mode + */ +enum scan_dwelltime_adaptive_mode { + SCAN_DWELL_MODE_DEFAULT = 0, + SCAN_DWELL_MODE_CONSERVATIVE = 1, + SCAN_DWELL_MODE_MODERATE = 2, + SCAN_DWELL_MODE_AGGRESSIVE = 3, + SCAN_DWELL_MODE_STATIC = 4 +}; + +/** + * struct scan_random_attr - holds scan randomization attrs + * @randomize: set to true for scan randomization + * @mac_addr: mac addr to be randomized + * @mac_mask: used to represent bits in mac_addr for randomization + */ +struct scan_random_attr { + bool randomize; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + uint8_t mac_mask[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct probe_req_whitelist_attr - holds probe req ie whitelist attrs + * @white_list: enable/disable whitelist + * @ie_bitmap: bitmap of IEs to be enabled + * @num_vendor_oui: number of vendor OUIs + * @voui: vendor oui buffer + */ +struct probe_req_whitelist_attr { + bool white_list; + uint32_t ie_bitmap[PROBE_REQ_BITMAP_LEN]; + uint32_t num_vendor_oui; + uint32_t voui[MAX_PROBE_REQ_OUIS]; +}; + +/** + * struct chan_info - channel information + * @freq: frequency to scan + * @phymode: phymode in which @frequency should be scanned + */ +struct chan_info { + qdf_freq_t freq; + uint32_t phymode; +}; + +/** + * struct chan_list - list of frequencies to be scanned + * and their phymode + * @num_chan: number of channels to scan + * @chan: channel parameters used for this scan + */ +struct chan_list { + uint8_t num_chan; + struct chan_info chan[NUM_CHANNELS]; +}; + +/** + * struct hint_short_ssid - short SSID hint + * and their phymode + * @freq_flags: freq unit: MHz (upper 16bits) + * flags (lower 16bits) + * @short_ssid: short SSID + */ +struct hint_short_ssid { + uint32_t freq_flags; + uint32_t short_ssid; +}; + +/** + * struct hint_bssid - BSSID hint + * and their phymode + * @freq_flags: freq unit: MHz (upper 16bits) + * flags (lower 16bits) + * @bssid: BSSID + */ +struct hint_bssid { + uint32_t freq_flags; + struct qdf_mac_addr bssid; +}; + +/** + * enum scan_request_type: scan type + * @SCAN_TYPE_DEFAULT: Def scan + * @SCAN_TYPE_P2P_SEARCH: P2P Search + * @SCAN_TYPE_P2P_LISTEN: P2P listed + * @SCAN_TYPE_RRM: RRM scan request + */ +enum scan_request_type { + SCAN_TYPE_DEFAULT = 0, + SCAN_TYPE_P2P_SEARCH = 1, + SCAN_TYPE_P2P_LISTEN = 2, + SCAN_TYPE_RRM = 3 +}; + +/** + * struct scan_req_params - start scan request parameter + * @scan_id: scan id + * @scan_req_id: scan requester id + * @vdev_id: vdev id where scan was originated + * @pdev_id: pdev id of parent pdev + * @scan_priority: scan priority + * @scan_ev_started: notify scan started event + * @scan_ev_completed: notify scan completed event + * @scan_ev_bss_chan: notify bss chan event + * @scan_ev_foreign_chan: notify foreign chan event + * @scan_ev_dequeued: notify scan request dequed event + * @scan_ev_preempted: notify scan preempted event + * @scan_ev_start_failed: notify scan start failed event + * @scan_ev_restarted: notify scan restarted event + * @scan_ev_foreign_chn_exit: notify foreign chan exit event + * @scan_ev_invalid: notify invalid scan request event + * @scan_ev_gpio_timeout: notify gpio timeout event + * @scan_ev_suspended: notify scan suspend event + * @scan_ev_resumed: notify scan resumed event + * @scan_events: variable to read and set scan_ev_* flags in one shot + * can be used to dump all scan_ev_* flags for debug + * @dwell_time_active: active dwell time + * @dwell_time_active_2g: active dwell time for 2G channels, if it's not zero + * @dwell_time_passive: passive dwell time + * @dwell_time_active_6g: 6Ghz active dwell time + * @dwell_time_passive_6g: 6Ghz passive dwell time + * @min_rest_time: min rest time + * @max_rest_time: max rest time + * @repeat_probe_time: repeat probe time + * @probe_spacing_time: probe spacing time + * @idle_time: idle time + * @max_scan_time: max scan time + * @probe_delay: probe delay + * @scan_offset_time: Support split scanning on the + * same channel for CBS feature. + * @scan_f_passive: passively scan all channels including active channels + * @scan_f_bcast_probe: add wild card ssid prbreq even if ssid_list is specified + * @scan_f_cck_rates: add cck rates to rates/xrates ie in prb req + * @scan_f_ofdm_rates: add ofdm rates to rates/xrates ie in prb req + * @scan_f_chan_stat_evnt: enable indication of chan load and noise floor + * @scan_f_filter_prb_req: filter Probe request frames + * @scan_f_bypass_dfs_chn: when set, do not scan DFS channels + * @scan_f_continue_on_err:continue scan even if few certain erros have occurred + * @scan_f_offchan_mgmt_tx: allow mgmt transmission during off channel scan + * @scan_f_offchan_data_tx: allow data transmission during off channel scan + * @scan_f_promisc_mode: scan with promiscuous mode + * @scan_f_capture_phy_err: enable capture ppdu with phy errrors + * @scan_f_strict_passive_pch: do passive scan on passive channels + * @scan_f_half_rate: enable HALF (10MHz) rate support + * @scan_f_quarter_rate: set Quarter (5MHz) rate support + * @scan_f_force_active_dfs_chn: allow to send probe req on DFS channel + * @scan_f_add_tpc_ie_in_probe: add TPC ie in probe req frame + * @scan_f_add_ds_ie_in_probe: add DS ie in probe req frame + * @scan_f_add_spoofed_mac_in_probe: use random mac address for TA in probe + * @scan_f_add_rand_seq_in_probe: use random sequence number in probe + * @scan_f_en_ie_whitelist_in_probe: enable ie whitelist in probe + * @scan_f_forced: force scan even in presence of data traffic + * @scan_f_2ghz: scan 2.4 GHz channels + * @scan_f_5ghz: scan 5 GHz channels + * @scan_f_wide_band: scan in 40 MHz or higher bandwidth + * @scan_flags: variable to read and set scan_f_* flags in one shot + * can be used to dump all scan_f_* flags for debug + * @burst_duration: burst duration + * @num_bssid: no of bssid + * @num_ssids: no of ssid + * @n_probes: no of probe + * @chan_list: channel list + * @ssid: ssid list + * @bssid_list: Lisst of bssid to scan + * @scan_random: scan randomization params + * @ie_whitelist: probe req IE whitelist attrs + * @extraie: list of optional/vendor specific ie's to be added in probe requests + * @htcap: htcap ie + * @vhtcap: vhtcap ie + * @scan_ctrl_flags_ext: scan control flag extended + * @num_hint_s_ssid: number of short SSID hints + * @num_hint_bssid: number of BSSID hints + * @hint_s_ssid: short SSID hints + * @hint_bssid: BSSID hints + */ + +struct scan_req_params { + uint32_t scan_id; + uint32_t scan_req_id; + uint32_t vdev_id; + uint32_t pdev_id; + enum scan_priority scan_priority; + enum scan_request_type scan_type; + union { + struct { + uint32_t scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; + }; + uint32_t scan_events; + }; + uint32_t dwell_time_active; + uint32_t dwell_time_active_2g; + uint32_t dwell_time_passive; + uint32_t dwell_time_active_6g; + uint32_t dwell_time_passive_6g; + uint32_t min_rest_time; + uint32_t max_rest_time; + uint32_t repeat_probe_time; + uint32_t probe_spacing_time; + uint32_t idle_time; + uint32_t max_scan_time; + uint32_t probe_delay; + uint32_t scan_offset_time; + union { + struct { + uint32_t scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_wide_band:1; + }; + uint32_t scan_flags; + }; + union { + struct { + uint32_t scan_policy_high_accuracy:1, + scan_policy_low_span:1, + scan_policy_low_power:1; + }; + uint32_t scan_policy_type; + }; + + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; + uint32_t burst_duration; + uint32_t num_bssid; + uint32_t num_ssids; + uint32_t n_probes; + struct chan_list chan_list; + struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; + struct qdf_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; + struct scan_random_attr scan_random; + struct probe_req_whitelist_attr ie_whitelist; + struct element_info extraie; + struct element_info htcap; + struct element_info vhtcap; + uint32_t scan_ctrl_flags_ext; + uint32_t num_hint_s_ssid; + uint32_t num_hint_bssid; + struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID]; + struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID]; +}; + +/** + * struct scan_start_request - scan request config + * @vdev: vdev + * @scan_req: common scan start request parameters + */ +struct scan_start_request { + struct wlan_objmgr_vdev *vdev; + struct scan_req_params scan_req; +}; + +/** + * enum scan_cancel_type - type specifiers for cancel scan request + * @WLAN_SCAN_CANCEL_SINGLE: cancel particular scan specified by scan_id + * @WLAN_SCAN_CANCEL_VAP_ALL: cancel all scans running on a particular vdevid + * @WLAN_SCAN_CANCEL_PDEV_ALL: cancel all scans running on parent pdev of vdevid + * @WLAN_SCAN_CANCEL_HOST_VDEV_ALL: Cancel all host triggered scans alone on + * vdev + */ +enum scan_cancel_req_type { + WLAN_SCAN_CANCEL_SINGLE = 1, + WLAN_SCAN_CANCEL_VDEV_ALL, + WLAN_SCAN_CANCEL_PDEV_ALL, + WLAN_SCAN_CANCEL_HOST_VDEV_ALL, +}; + +/** + * struct scan_cancel_param - stop scan cmd parameter + * @requester: scan requester + * @scan_id: scan id + * @req_type: scan request type + * @vdev_id: vdev id + * @pdev_id: pdev id of parent pdev + */ +struct scan_cancel_param { + uint32_t requester; + uint32_t scan_id; + enum scan_cancel_req_type req_type; + uint32_t vdev_id; + uint32_t pdev_id; +}; + +/** + * struct scan_cancel_request - stop scan cmd + * @vdev: vdev object + * @wait_tgt_cancel: wait for target to cancel scan + * @cancel_req: stop scan cmd parameter + */ +struct scan_cancel_request { + /* Extra parameters consumed by scan module or serialization */ + struct wlan_objmgr_vdev *vdev; + bool wait_tgt_cancel; + /* Actual scan cancel request parameters */ + struct scan_cancel_param cancel_req; +}; + +/** + * enum scan_event_type - scan event types + * @SCAN_EVENT_TYPE_STARTED: scan started + * @SCAN_EVENT_TYPE_COMPLETED: scan completed + * @SCAN_EVENT_TYPE_BSS_CHANNEL: HW came back to home channel + * @SCAN_EVENT_TYPE_FOREIGN_CHANNEL: HW moved to foreign channel + * @SCAN_EVENT_TYPE_DEQUEUED: scan request dequeued + * @SCAN_EVENT_TYPE_PREEMPTED: scan got preempted + * @SCAN_EVENT_TYPE_START_FAILED: couldn't start scan + * @SCAN_EVENT_TYPE_RESTARTED: scan restarted + * @SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT: HW exited foreign channel + * @SCAN_EVENT_TYPE_SUSPENDED: scan got suspended + * @SCAN_EVENT_TYPE_RESUMED: scan resumed + * @SCAN_EVENT_TYPE_NLO_COMPLETE: NLO completed + * @SCAN_EVENT_TYPE_NLO_MATCH: NLO match event + * @SCAN_EVENT_TYPE_INVALID: invalid request + * @SCAN_EVENT_TYPE_GPIO_TIMEOUT: gpio timeout + * @SCAN_EVENT_TYPE_RADIO_MEASUREMENT_START: radio measurement start + * @SCAN_EVENT_TYPE_RADIO_MEASUREMENT_END: radio measurement end + * @SCAN_EVENT_TYPE_BSSID_MATCH: bssid match found + * @SCAN_EVENT_TYPE_FOREIGN_CHANNEL_GET_NF: foreign channel noise floor + * @SCAN_EVENT_TYPE_MAX: marker for invalid event + */ +enum scan_event_type { + SCAN_EVENT_TYPE_STARTED, + SCAN_EVENT_TYPE_COMPLETED, + SCAN_EVENT_TYPE_BSS_CHANNEL, + SCAN_EVENT_TYPE_FOREIGN_CHANNEL, + SCAN_EVENT_TYPE_DEQUEUED, + SCAN_EVENT_TYPE_PREEMPTED, + SCAN_EVENT_TYPE_START_FAILED, + SCAN_EVENT_TYPE_RESTARTED, + SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT, + SCAN_EVENT_TYPE_SUSPENDED, + SCAN_EVENT_TYPE_RESUMED, + SCAN_EVENT_TYPE_NLO_COMPLETE, + SCAN_EVENT_TYPE_NLO_MATCH, + SCAN_EVENT_TYPE_INVALID, + SCAN_EVENT_TYPE_GPIO_TIMEOUT, + SCAN_EVENT_TYPE_RADIO_MEASUREMENT_START, + SCAN_EVENT_TYPE_RADIO_MEASUREMENT_END, + SCAN_EVENT_TYPE_BSSID_MATCH, + SCAN_EVENT_TYPE_FOREIGN_CHANNEL_GET_NF, + SCAN_EVENT_TYPE_MAX, +}; + +/** + * enum scan_completion_reason - scan completion reason + * @SCAN_REASON_NONE: un specified reason + * @SCAN_REASON_COMPLETED: scan successfully completed + * @SCAN_REASON_CANCELLED: scan got cancelled + * @SCAN_REASON_PREEMPTED: scan got preempted + * @SCAN_REASON_TIMEDOUT: couldnt complete within specified time + * @SCAN_REASON_INTERNAL_FAILURE: cancelled because of some failure + * @SCAN_REASON_SUSPENDED: scan suspended + * @SCAN_REASON_RUN_FAILED: run failed + * @SCAN_REASON_TERMINATION_FUNCTION: termination function + * @SCAN_REASON_MAX_OFFCHAN_RETRIES: max retries exceeded thresold + * @SCAN_REASON_DFS_VIOLATION: Scan start failure due to DFS violation. + * @SCAN_REASON_MAX: invalid completion reason marker + */ +enum scan_completion_reason { + SCAN_REASON_NONE, + SCAN_REASON_COMPLETED, + SCAN_REASON_CANCELLED, + SCAN_REASON_PREEMPTED, + SCAN_REASON_TIMEDOUT, + SCAN_REASON_INTERNAL_FAILURE, + SCAN_REASON_SUSPENDED, + SCAN_REASON_RUN_FAILED, + SCAN_REASON_TERMINATION_FUNCTION, + SCAN_REASON_MAX_OFFCHAN_RETRIES, + SCAN_REASON_DFS_VIOLATION, + SCAN_REASON_MAX, +}; + +/** + * struct scan_event - scan event definition + * @vdev_id: vdev where scan was run + * @type: type of scan event + * @reason: completion reason + * @chan_freq: channel centre frequency + * @requester: requester id + * @scan_id: scan id + * @timestamp: timestamp in microsec recorded by target for the scan event + * @scan_start_req: scan request object used to start this scan + */ +struct scan_event { + uint32_t vdev_id; + enum scan_event_type type; + enum scan_completion_reason reason; + uint32_t chan_freq; + uint32_t requester; + uint32_t scan_id; + uint32_t timestamp; + struct scan_start_request *scan_start_req; +}; + +/** + * struct scan_event_info - scan event information + * @vdev: vdev object + * @event: scan event + */ +struct scan_event_info { + struct wlan_objmgr_vdev *vdev; + struct scan_event event; +}; + +/** + * enum scm_scan_status - scan status + * @SCAN_NOT_IN_PROGRESS: Neither active nor pending scan in progress + * @SCAN_IS_ACTIVE: scan request is present only in active list + * @SCAN_IS_PENDING: scan request is present only in pending list + * @SCAN_IS_ACTIVE_AND_PENDING: scan request is present in active + * and pending both lists + */ +enum scm_scan_status { + SCAN_NOT_IN_PROGRESS = 0, /* Must be 0 */ + SCAN_IS_ACTIVE, + SCAN_IS_PENDING, + SCAN_IS_ACTIVE_AND_PENDING, +}; + +/** + * scan_event_handler() - function prototype of scan event handlers + * @vdev: vdev object + * @event: scan event + * @arg: argument + * + * PROTO TYPE, scan event handler call back function prototype + * + * @Return: void + */ +typedef void (*scan_event_handler) (struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg); + +/** + * enum scan_cb_type - update beacon cb type + * @SCAN_CB_TYPE_INFORM_BCN: Calback to indicate beacon to OS + * @SCAN_CB_TYPE_UPDATE_BCN: Calback to indicate beacon + * @SCAN_CB_TYPE_UNLINK_BSS: cb to unlink bss entry + * to MLME and update MLME info + * + */ +enum scan_cb_type { + SCAN_CB_TYPE_INFORM_BCN, + SCAN_CB_TYPE_UPDATE_BCN, + SCAN_CB_TYPE_UNLINK_BSS, +}; + +/* Set PNO */ +#define SCAN_PNO_MAX_PLAN_REQUEST 2 +#define SCAN_PNO_MAX_NETW_CHANNELS_EX (NUM_CHANNELS) +#define SCAN_PNO_MAX_SUPP_NETWORKS 16 +#define SCAN_PNO_DEF_SLOW_SCAN_MULTIPLIER 6 +#define SCAN_PNO_DEF_SCAN_TIMER_REPEAT 20 +#define SCAN_PNO_MATCH_WAKE_LOCK_TIMEOUT (5 * 1000) /* in msec */ +#define SCAN_MAX_IE_LENGTH 255 +#ifdef CONFIG_SLUB_DEBUG_ON +#define SCAN_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT (2 * 1000) /* in msec */ +#else +#define SCAN_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT (1 * 1000) /* in msec */ +#endif /* CONFIG_SLUB_DEBUG_ON */ + +/** + * enum ssid_bc_type - SSID broadcast type + * @SSID_BC_TYPE_UNKNOWN: Broadcast unknown + * @SSID_BC_TYPE_NORMAL: Broadcast normal + * @SSID_BC_TYPE_HIDDEN: Broadcast hidden + */ +enum ssid_bc_type { + SSID_BC_TYPE_UNKNOWN = 0, + SSID_BC_TYPE_NORMAL = 1, + SSID_BC_TYPE_HIDDEN = 2, +}; + +/** + * struct pno_nw_type - pno nw type + * @ssid: ssid + * @authentication: authentication type + * @encryption: encryption type + * @bcastNetwType: broadcast nw type + * @ucChannelCount: uc channel count + * @aChannels: pno channel + * @rssiThreshold: rssi threshold + */ +struct pno_nw_type { + struct wlan_ssid ssid; + uint32_t authentication; + uint32_t encryption; + uint32_t bc_new_type; + uint8_t channel_cnt; + uint32_t channels[SCAN_PNO_MAX_NETW_CHANNELS_EX]; + int32_t rssi_thresh; +}; + +/** + * struct connected_pno_band_rssi_pref - BSS preference based on band + * and RSSI + * @band: band preference + * @rssi_pref: RSSI preference + */ +struct cpno_band_rssi_pref { + int8_t band; + int8_t rssi; +}; + +/** + * struct nlo_mawc_params - Motion Aided Wireless Connectivity based + * Network List Offload configuration + * @vdev_id: VDEV ID on which the configuration needs to be applied + * @enable: flag to enable or disable + * @exp_backoff_ratio: ratio of exponential backoff + * @init_scan_interval: initial scan interval(msec) + * @max_scan_interval: max scan interval(msec) + */ +struct nlo_mawc_params { + uint8_t vdev_id; + bool enable; + uint32_t exp_backoff_ratio; + uint32_t init_scan_interval; + uint32_t max_scan_interval; +}; + +/** + * struct pno_scan_req_params - PNO Scan request structure + * @networks_cnt: Number of networks + * @do_passive_scan: Flag to request passive scan to fw + * @vdev_id: vdev id + * @fast_scan_period: Fast Scan period + * @slow_scan_period: Slow scan period + * @delay_start_time: delay in seconds to use before starting the first scan + * @fast_scan_max_cycles: Fast scan max cycles + * @scan_backoff_multiplier: multiply fast scan period by this after max cycles + * @pno_channel_prediction: PNO channel prediction feature status + * @uint32_t active_dwell_time: active dwell time + * @uint32_t passive_dwell_time: passive dwell time + * @top_k_num_of_channels: top K number of channels are used for tanimoto + * distance calculation. + * @stationary_thresh: threshold value to determine that the STA is stationary. + * @adaptive_dwell_mode: adaptive dwelltime mode for pno scan + * @channel_prediction_full_scan: periodic timer upon which a full scan needs + * to be triggered. + * @networks_list: Preferred network list + * @scan_random: scan randomization params + * @ie_whitelist: probe req IE whitelist attrs + * @relative_rssi_set: Flag to check whether realtive_rssi is set or not + * @relative_rssi: Relative rssi threshold, used for connected pno + * @band_rssi_pref: Band and RSSI preference that can be given to one BSS + * over the other BSS + * + * E.g. + * { fast_scan_period=120, fast_scan_max_cycles=2, + * slow_scan_period=1800, scan_backoff_multiplier=2 } + * Result: 120s x2, 240s x2, 480s x2, 960s x2, 1800s xN + * @mawc_params: Configuration parameters for NLO MAWC. + */ +struct pno_scan_req_params { + uint32_t networks_cnt; + bool do_passive_scan; + uint32_t vdev_id; + uint32_t fast_scan_period; + uint32_t slow_scan_period; + uint32_t delay_start_time; + uint32_t fast_scan_max_cycles; + uint8_t scan_backoff_multiplier; + uint32_t active_dwell_time; + uint32_t passive_dwell_time; + uint32_t pno_channel_prediction; + uint32_t top_k_num_of_channels; + uint32_t stationary_thresh; + enum scan_dwelltime_adaptive_mode adaptive_dwell_mode; + uint32_t channel_prediction_full_scan; + struct pno_nw_type networks_list[SCAN_PNO_MAX_SUPP_NETWORKS]; + struct scan_random_attr scan_random; + struct probe_req_whitelist_attr ie_whitelist; + bool relative_rssi_set; + int8_t relative_rssi; + struct cpno_band_rssi_pref band_rssi_pref; + struct nlo_mawc_params mawc_params; +}; + +/** + * struct scan_user_cfg - user configuration required for for scan + * @ie_whitelist: probe req IE whitelist attrs + * @sta_miracast_mcc_rest_time: sta miracast mcc rest time + * @score_config: scoring logic configuration + */ +struct scan_user_cfg { + struct probe_req_whitelist_attr ie_whitelist; + uint32_t sta_miracast_mcc_rest_time; + struct scoring_config score_config; +}; + +/** + * update_beacon_cb() - cb to inform/update beacon + * @psoc: psoc pointer + * @scan_params: scan entry to inform/update + * + * @Return: void + */ +typedef void (*update_beacon_cb) (struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * scan_iterator_func() - function prototype of scan iterator function + * @scan_entry: scan entry object + * @arg: extra argument + * + * PROTO TYPE, scan iterator function prototype + * + * @Return: QDF_STATUS + */ +typedef QDF_STATUS (*scan_iterator_func) (void *arg, + struct scan_cache_entry *scan_entry); + +/** + * enum scan_priority - scan priority definitions + * @SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT: disable scan command timeout + * @SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH: config to drop beacon/probe + * response frames if received channel and IE channels do not match + */ +enum scan_config { + SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT, + SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH, +}; + +/** + * enum ext_cap_bit_field - Extended capabilities bit field + * @BSS_2040_COEX_MGMT_SUPPORT: 20/40 BSS Coexistence Management Support field + * @OBSS_NARROW_BW_RU_IN_ULOFDMA_TOLERENT_SUPPORT: OBSS Narrow Bandwidth RU + * in UL OFDMA Tolerance Support + */ +enum ext_cap_bit_field { + BSS_2040_COEX_MGMT_SUPPORT = 0, + OBSS_NARROW_BW_RU_IN_ULOFDMA_TOLERENT_SUPPORT = 79, +}; + +/** + * scan_rnr_info - RNR information + * @timestamp: time stamp of beacon/probe + * @short_ssid: Short SSID + * @bssid: BSSID + */ +struct scan_rnr_info { + qdf_time_t timestamp; + uint32_t short_ssid; + struct qdf_mac_addr bssid; +}; + +/** + * struct scan_rnr_node - Scan RNR entry node + * @node: node pointers + * @entry: scan RNR entry pointer + */ +struct scan_rnr_node { + qdf_list_node_t node; + struct scan_rnr_info entry; +}; + +/** + * meta_rnr_channel - Channel information for scan priority algorithm + * @chan_freq: channel frequency + * @bss_beacon_probe_count: Beacon and probe request count + * @saved_profile_count: Saved profile count + * @beacon_probe_last_time_found: Timestamp of beacon/probe observed + * @rnr_list: RNR list to store RNR IE information + */ +struct meta_rnr_channel { + uint32_t chan_freq; + uint32_t bss_beacon_probe_count; + uint32_t saved_profile_count; + qdf_time_t beacon_probe_last_time_found; + qdf_list_t rnr_list; +}; + +#define RNR_UPDATE_SCAN_CNT_THRESHOLD 2 +/** + * channel_list_db - Database for channel information + * @channel: channel meta information + * @scan_count: scan count since the db was updated + */ +struct channel_list_db { + struct meta_rnr_channel channel[NUM_6GHZ_CHANNELS]; + uint8_t scan_count; +}; + +/** + * rnr_chan_weight - RNR channel weightage + * @chan_freq: channel frequency + * @weight: weightage of the channel + */ +struct rnr_chan_weight { + uint32_t chan_freq; + uint32_t weight; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..23127e13ecaae4f29cb8a07f407b23a11e1e33c0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_tgt_api.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan south bound interface definitions + */ + +#ifndef _WLAN_SCAN_TGT_API_H_ +#define _WLAN_SCAN_TGT_API_H_ + +#include +#include +#include +#include +#include +#include + +/** + * tgt_scan_bcn_probe_rx_callback() - The callbeack registered to tx/rx module + * @psoc: psoc context + * @peer: peer + * @buf: frame buf + * @params: rx event params + * @frm_type: frame type + * + * The callbeack registered to tx/rx module and is called when beacon + * or probe resp is recived. This will post a msg to target_if queue. + * + * Return: success or error code. + */ +QDF_STATUS tgt_scan_bcn_probe_rx_callback(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *rx_param, + enum mgmt_frame_type frm_type); + +/** + * tgt_scan_event_handler() - The callbeack registered to WMI for scan events + * @psoc: psoc handle + * @event_info: event info + * + * The callbeack registered to WMI for scan events and is called + * event for scan is received. This will post a msg to target_if queue. + * + * Return: 0 for success or error code. + */ +QDF_STATUS +tgt_scan_event_handler(struct wlan_objmgr_psoc *psoc, + struct scan_event_info *event_info); + +#ifdef FEATURE_WLAN_SCAN_PNO + +/** + * tgt_scan_pno_start() - invoke lmac send PNO start req + * @vdev: vdev pointer + * @req: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS tgt_scan_pno_start(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req); + +/** + * tgt_scan_pno_stop() - invoke lmac send PNO stop req + * @vdev: vdev pointer + * @vdev_id: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS tgt_scan_pno_stop(struct wlan_objmgr_vdev *vdev, + uint8_t vdev_id); + +#endif + +/** + * tgt_scan_start() - invoke lmac scan start + * @req: scan request object + * + * This API invokes lmac API function to start scan + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_start(struct scan_start_request *req); + + +/** + * tgt_scan_cancel() - invoke lmac scan cancel + * @req: scan request object + * + * This API invokes lmac API function to cancel scan + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_cancel(struct scan_cancel_request *req); + +/** + * tgt_scan_register_ev_handler() - invoke lmac register scan event handler + * @psoc: psoc object + * + * This API invokes lmac API function to register for scan events + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_register_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_scan_unregister_ev_handler() - invoke lmac unregister scan event handler + * @psoc: psoc object + * + * This API invokes lmac API function to unregister for scan events + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_unregister_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_scan_set_max_active_scans() - lmac handler to set max active scans + * @psoc: psoc object + * @max_active_scans: maximum active scans allowed on underlying psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..4f2f5a66b267163edee4fe2aeb629a6160bf50c4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h @@ -0,0 +1,1048 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan north bound interface api + */ + +#ifndef _WLAN_SCAN_UCFG_API_H_ +#define _WLAN_SCAN_UCFG_API_H_ + +#include +#include +#include +#include +#include +#include "wlan_scan_api.h" + +/** + * ucfg_scan_register_requester() - assigns requester ID to caller and + * registers scan event call back handler + * @psoc: psoc object + * @module_name:name of requester module + * @event_cb: event callback function pointer + * @arg: argument to @event_cb + * + * API, allows other components to allocate requester id + * Normally used by modules at init time to register their callback + * and get one requester id. @event_cb will be invoked for + * all scan events whose requester id matches with @requester. + * + * Return: assigned non zero requester id for success + * zero (0) for failure + */ +wlan_scan_requester +ucfg_scan_register_requester(struct wlan_objmgr_psoc *psoc, + uint8_t *module_name, scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_unregister_requester() -reclaims previously allocated requester ID + * @psoc: psoc object + * @requester: requester ID to reclaim. + * + * API, reclaims previously allocated requester id by + * ucfg_scan_get_req_id_reg_cb() + * + * Return: void + */ +void ucfg_scan_unregister_requester(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester); + + +/** + * ucfg_get_scan_requester_name()- returns module name of requester ID owner + * @psoc: psoc object + * @requester: requester ID + * + * API, returns module name of requester id owner + * + * Return: pointer to module name or "unknown" if requester id not found. + */ +uint8_t *ucfg_get_scan_requester_name(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester); + + + +/** + * ucfg_scan_get_scan_id() - allocates scan ID + * @psoc: psoc object + * + * API, allocates a new scan id for caller + * + * Return: newly allocated scan ID + */ +wlan_scan_id +ucfg_scan_get_scan_id(struct wlan_objmgr_psoc *psoc); + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * ucfg_scan_pno_start() - Public API to start PNO + * @vdev: vdev pointer + * @req: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS ucfg_scan_pno_start(struct wlan_objmgr_vdev *vdev, +struct pno_scan_req_params *req); + +/** + * ucfg_scan_pno_stop() - Public API to stop PNO + * @vdev: vdev pointer + * @req: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS ucfg_scan_pno_stop(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_get_pno_in_progress() - Public API to check if pno is in progress + * @vdev: vdev pointer + * + * Return: true if pno in progress else false. + */ +bool ucfg_scan_get_pno_in_progress(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_get_pno_match() - Public API to check if pno matched + * @vdev: vdev pointer + * + * Return: true if pno matched else false. + */ +bool ucfg_scan_get_pno_match(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_register_pno_cb() - register pno cb + * @psoc: psoc object + * @event_cb: callback function pointer + * @arg: argument to @event_cb + * + * Return: QDF_STATUS + */ +QDF_STATUS +ucfg_scan_register_pno_cb(struct wlan_objmgr_psoc *psoc, + scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_get_pno_def_params() - get the defaults pno params + * @vdev: vdev object + * @req: pno request object + * + * Return: QDF_STATUS_SUCCESS or error code + */ +QDF_STATUS +ucfg_scan_get_pno_def_params(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req); + +#else + +static inline bool +ucfg_scan_get_pno_in_progress(struct wlan_objmgr_vdev *vdev) +{ + return false; +} + +static inline bool +ucfg_scan_get_pno_match(struct wlan_objmgr_vdev *vdev) +{ + return false; +} +#endif /* FEATURE_WLAN_SCAN_PNO */ +/** + * ucfg_scm_scan_free_scan_request_mem() - Free scan request memory + * @req: scan_start_request object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scm_scan_free_scan_request_mem(struct scan_start_request *req); + +/** + * ucfg_scan_start() - Public API to start a scan + * @req: start scan req params + * + * The Public API to start a scan. Post a msg to target_if queue + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_start(struct scan_start_request *req); + +/** + * ucfg_scan_set_psoc_enable() - Public API to enable scans for psoc + * @psoc: psoc on which scans need to be disabled + * @reason: reason for enable/disabled + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_psoc_set_enable(struct wlan_objmgr_psoc *psoc, + enum scan_disable_reason reason); + +/** + * ucfg_scan_psoc_set_disable() - Public API to disable scans for psoc + * @psoc: psoc on which scans need to be disabled + * @reason: reason for enable/disabled + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_psoc_set_disable(struct wlan_objmgr_psoc *psoc, + enum scan_disable_reason reason); + +/** + * ucfg_scan_vdev_set_enable() - Public API to enable scans for vdev + * @psoc: psoc on which scans need to be disabled + * @reason: reason for enable/disabled + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_vdev_set_enable(struct wlan_objmgr_vdev *vdev, + enum scan_disable_reason reason); + +/** + * ucfg_scan_vdev_set_disable() - Public API to disable scans for vdev + * @psoc: psoc on which scans need to be disabled + * @reason: reason for enable/disabled + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_vdev_set_disable(struct wlan_objmgr_vdev *vdev, + enum scan_disable_reason reason); + + + +/** + * ucfg_scan_set_miracast() - Public API to disable/enable miracast flag + * @psoc: psoc pointer + * @enable: enable miracast if true disable is false + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_miracast( + struct wlan_objmgr_psoc *psoc, bool enable); + +/** + * ucfg_scan_set_global_config() - Public API to set global scan config + * @psoc: psoc context + * @config: config to set + * @val: new config value + * + * Return: QDF_STATUS. + */ +QDF_STATUS +ucfg_scan_set_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t val); + +/** + * ucfg_scan_get_global_config() - Public API to get global scan config + * @psoc: psoc context + * @config: config to set + * @val: uint32* to hold returned config value + * + * Return: QDF_STATUS. + */ +QDF_STATUS +ucfg_scan_get_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t *val); + +/** + * ucfg_scan_set_wide_band_scan() - Public API to disable/enable wide band scan + * @pdev: psoc on which scans need to be disabled + * @enable: enable wide band scan if @enable is true, disable otherwise + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_wide_band_scan( + struct wlan_objmgr_pdev *pdev, bool enable); + +/** + * ucfg_scan_get_wide_band_scan() - Public API to check if + * wide band scan is enabled or disabled + * @pdev: psoc on which scans status need to be checked + * + * Return: true if enabled else false. + */ +bool ucfg_scan_get_wide_band_scan(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_scan_set_custom_scan_chan_list() - Public API to restrict scan + * to few pre configured channels + * @pdev: psoc on which scans need to be disabled + * @chan_list: list of channels to scan if set + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_custom_scan_chan_list( + struct wlan_objmgr_pdev *pdev, struct chan_list *chan_list); +/** + * ucfg_scan_set_ssid_bssid_hidden_ssid_beacon() - API to configure + * ssid, bssid of hidden beacon + * @pdev: psoc on which ssid bssid need to configure + * @bssid: bssid of the hidden AP + * @ssid: desired ssid + * + * Return: QDF_STATUS. + */ +#ifdef WLAN_DFS_CHAN_HIDDEN_SSID +QDF_STATUS +ucfg_scan_config_hidden_ssid_for_bssid(struct wlan_objmgr_pdev *pdev, + uint8_t *bssid, + struct wlan_ssid *ssid); +#else +static inline QDF_STATUS +ucfg_scan_config_hidden_ssid_for_bssid(struct wlan_objmgr_pdev *pdev, + uint8_t *bssid, + struct wlan_ssid *ssid) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_DFS_CHAN_HIDDEN_SSID */ +/** + * ucfg_scan_cancel() - Public API to stop a scan + * @req: stop scan request params + * + * The Public API to stop a scan. Post a msg to target_if queue + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_cancel(struct scan_cancel_request *req); + +/** + * ucfg_scan_cancel_sync() - Public API to stop a scan and wait + * till all scan are completed + * @req: stop scan request params + * + * The Public API to stop a scan and wait + * till all scan are completed + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_cancel_sync(struct scan_cancel_request *req); + +/** + * ucfg_scan_get_result() - The Public API to get scan results + * @pdev: pdev info + * @filter: Filters + * + * This function fetches scan result + * + * Return: scan list pointer + */ +qdf_list_t *ucfg_scan_get_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * ucfg_scan_purge_results() - purge the scan list + * @scan_list: scan list to be purged + * + * This function purge the temp scan list + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_purge_results(qdf_list_t *scan_list); + +/** + * ucfg_scan_flush_results() - The Public API to flush scan result + * @pdev: pdev object + * @filter: filter to flush the scan entries + * + * The Public API to flush scan result. + * + * Return: 0 for success or error code. + */ +QDF_STATUS ucfg_scan_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * ucfg_scan_filter_valid_channel() - The Public API to filter scan result + * based on valid channel list + * @pdev: pdev object + * @chan_freq_list: valid channel frequency (in MHz) list + * @num_chan: number of valid channels + * + * The Public API to to filter scan result + * based on valid channel list. + * + * Return: void. + */ +void ucfg_scan_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint32_t *chan_freq_list, uint32_t num_chan); + +/** + * ucfg_scan_db_iterate() - function to iterate scan table + * @pdev: pdev object + * @func: iterator function pointer + * @arg: argument to be passed to func() + * + * API, this API iterates scan table and invokes func + * on each scan enetry by passing scan entry and arg. + * + * Return: QDF_STATUS + */ +QDF_STATUS +ucfg_scan_db_iterate(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg); + +/** + * ucfg_scan_update_mlme_by_bssinfo() - The Public API to update mlme + * info in the scan entry + * @pdev: pdev object + * @bssid: bssid info to find the matching scan entry + * @mlme_info: mlme info to be updated. + * + * The Public API to update mlme info in the scan entry. + * Post a msg to target_if queue + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, + struct mlme_info *mlme_info); + +/** + * ucfg_scan_register_event_handler() - The Public API to register + * an event cb handler + * @pdev: pdev object + * @event_cb: callback function to register + * @arg: component specific priv argument to @event_cb callback function + * + * The Public API to register a event cb handler. This cb is called whenever + * any scan event is received on @pdev. + * + * Return: 0 for success or error code. + */ + +QDF_STATUS +ucfg_scan_register_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_unregister_event_handler() - Public API to unregister + * event cb handler + * @pdev: pdev object + * @event_cb: callback function to unregister + * @arg: component specific priv argument to @event_cb callback function + * + * Unregister a event cb handler. cb and arg will be used to + * find the calback. + * + * Return: void + */ + +void +ucfg_scan_unregister_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_init_default_params() - get the defaults scan params + * @vdev: vdev object + * @req: scan request object + * + * get the defaults scan params + * + * Return: QDF_STATUS_SUCCESS or error code + */ +QDF_STATUS +ucfg_scan_init_default_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req); + +/** + * ucfg_scan_init_ssid_params() - initialize scan request ssid list + * + * @scan_req: scan request object + * @num_ssid: number of ssid's in ssid list + * @ssid_list: ssid list + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +ucfg_scan_init_ssid_params(struct scan_start_request *scan_req, + uint32_t num_ssid, struct wlan_ssid *ssid_list); + +/** + * ucfg_scan_init_bssid_params() - initialize scan request bssid list + * @scan_req: scan request object + * @num_ssid: number of bssid's in bssid list + * @bssid_list: bssid list + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +ucfg_scan_init_bssid_params(struct scan_start_request *scan_req, + uint32_t num_ssid, struct qdf_mac_addr *bssid_list); + +/** + * ucfg_scan_init_chanlist_params() - initialize scan request channel list + * @scan_req: scan request object + * @num_chans: number of channels in channel list + * @chan_list: channel list + * @phymode: phymode in which scan shall be done + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +ucfg_scan_init_chanlist_params(struct scan_start_request *scan_req, + uint32_t num_chans, uint32_t *chan_list, uint32_t *phymode); + +/** + * ucfg_scan_get_vdev_status() - API to check vdev scan status + * @vdev: vdev object + * + * Return: enum scm_scan_status + */ +enum scm_scan_status +ucfg_scan_get_vdev_status(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_get_pdev_status() - API to check pdev scan status + * @pdev: vdev object + * + * Return: enum scm_scan_status + */ +enum scm_scan_status +ucfg_scan_get_pdev_status(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_scan_register_bcn_cb() - API to register api + * to inform/update bcn/probe as soon as they are received + * @pdev: psoc + * @cb: callback to be registered + * @type: Type of callback to be registered + * + * Return: enum scm_scan_status + */ +QDF_STATUS ucfg_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type); + +/* + * ucfg_scan_update_user_config() - Update scan cache user config + * @psoc: psoc + * @scan_cfg: scan user config + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_update_user_config(struct wlan_objmgr_psoc *psoc, + struct scan_user_cfg *scan_cfg); + +/* + * ucfg_scan_init() - Scan module initialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_init(void); + +/** + * ucfg_scan_deinit() - Scan module deinitialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_deinit(void); + +/** + * ucfg_scan_psoc_enable() - Scan module enable API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_psoc_enable() - Scan module disable API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_psoc_open() - Scan module psoc open API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_psoc_close() - Scan module psoc close API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_max_active_scans() - API to get max active scans + * supported on this psoc + * @psoc: psoc object + * + * Return: uint32_t + */ +uint32_t ucfg_scan_get_max_active_scans(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_ie_whitelist_enabled() - Checks for IE whitelisting enable + * @psoc: pointer to psoc object + * @vdev: pointer to vdev + * + * This function is used to check whether IE whitelisting is enabled or not + * + * Return: If enabled returns true else returns false + */ +bool ucfg_ie_whitelist_enabled(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_copy_ie_whitelist_attrs() - Populate probe req IE whitelist attrs + * @psoc: pointer to psoc object + * @ie_whitelist: output parameter to hold ie whitelist attrs + * + * If IE whitelisting is enabled then invoke this function to copy + * IE whitelisting attrs from wlan scan object + * + * Return: true - successful copy + * false - copy failed + */ +bool ucfg_copy_ie_whitelist_attrs(struct wlan_objmgr_psoc *psoc, + struct probe_req_whitelist_attr *ie_whitelist); + +/** + * ucfg_scan_set_bt_activity() - API to set bt activity + * @psoc: pointer to psoc object + * @bt_a2dp_active: bt activiy value + * + * Return: None + */ +void ucfg_scan_set_bt_activity(struct wlan_objmgr_psoc *psoc, + bool bt_a2dp_active); +/** + * ucfg_scan_get_bt_activity() - API to get bt activity + * @psoc: pointer to psoc object + * + * Return: true if enabled else false. + */ +bool ucfg_scan_get_bt_activity(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_is_mac_spoofing_enabled() - API to check if mac spoofing is enabled + * @psoc: pointer to psoc object + * + * Return: true if enabled else false. + */ +bool ucfg_scan_is_mac_spoofing_enabled(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_extscan_adaptive_dwell_mode() - API to get the adaptive dwell + * mode during ext scan + * @psoc: pointer to psoc object + * + * Return: value of type enum scan_dwelltime_adaptive_mode + */ +enum scan_dwelltime_adaptive_mode +ucfg_scan_get_extscan_adaptive_dwell_mode(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_cfg_set_active_dwelltime() - API to set scan active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwell time + * + * Return: none + */ +static inline +void ucfg_scan_cfg_set_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + return wlan_scan_cfg_set_active_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_set_active_2g_dwelltime() - API to set scan active 2g dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwell time + * + * Return: none + */ +static inline +void ucfg_scan_cfg_set_active_2g_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + return wlan_scan_cfg_set_active_2g_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_get_active_dwelltime() - API to get active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan active dwell time + */ +static inline +void ucfg_scan_cfg_get_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + return wlan_scan_cfg_get_active_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_set_passive_dwelltime() - API to set scan passive dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwell time + * + * Return: none + */ +static inline +void ucfg_scan_cfg_set_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + return wlan_scan_cfg_set_passive_dwelltime(psoc, dwell_time); +} +/** + * ucfg_scan_cfg_get_passive_dwelltime() - API to get passive dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwelltime + * + * Return: scan passive dwell time + */ +static inline +void ucfg_scan_cfg_get_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + return wlan_scan_cfg_get_passive_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_get_active_2g_dwelltime() - API to get active 2g dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active 2g dwelltime + * + * Return: scan active 2g dwelltime + */ +static inline +void ucfg_scan_cfg_get_active_2g_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + return wlan_scan_cfg_get_active_2g_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_get_conc_active_dwelltime() - Get concurrent active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan concurrent active dwell time + */ +static inline +void ucfg_scan_cfg_get_conc_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + return wlan_scan_cfg_get_conc_active_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_set_conc_active_dwelltime() - Set concurrent active dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan active dwelltime + * + * Return: scan concurrent active dwell time + */ +static inline +void ucfg_scan_cfg_set_conc_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + return wlan_scan_cfg_set_conc_active_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_get_conc_passive_dwelltime() - Get passive concurrent dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwelltime + * + * Return: scan concurrent passive dwell time + */ +static inline +void ucfg_scan_cfg_get_conc_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + return wlan_scan_cfg_get_conc_passive_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_set_conc_passive_dwelltime() - Set passive concurrent dwelltime + * @psoc: pointer to psoc object + * @dwell_time: scan passive dwelltime + * + * Return: scan concurrent passive dwell time + */ +static inline +void ucfg_scan_cfg_set_conc_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + return wlan_scan_cfg_set_conc_passive_dwelltime(psoc, dwell_time); +} + +/** + * ucfg_scan_cfg_get_dfs_chan_scan_allowed() - API to get dfs scan enabled + * @psoc: pointer to psoc object + * @enable_dfs_scan: DFS scan enabled or not. + * + * Return: None + */ +static inline +void ucfg_scan_cfg_get_dfs_chan_scan_allowed(struct wlan_objmgr_psoc *psoc, + bool *dfs_scan_enable) +{ + return wlan_scan_cfg_get_dfs_chan_scan_allowed(psoc, dfs_scan_enable); +} + +/** + * ucfg_scan_cfg_set_dfs_channel_scan() - API to set dfs scan enabled + * @psoc: pointer to psoc object + * @enable_dfs_scan: Set DFS scan enabled or not. + * + * Return: None + */ +static inline +void ucfg_scan_cfg_set_dfs_chan_scan_allowed(struct wlan_objmgr_psoc *psoc, + bool dfs_scan_enable) +{ + return wlan_scan_cfg_set_dfs_chan_scan_allowed(psoc, dfs_scan_enable); +} + +/** + * ucfg_scan_wake_lock_in_user_scan() - API to determine if wake lock in user + * scan is used. + * @psoc: pointer to psoc object + * + * Return: true if wake lock in user scan is required + */ +bool ucfg_scan_wake_lock_in_user_scan(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_cfg_honour_nl_scan_policy_flags() - API to get nl scan policy + * flags honoured. + * @psoc: pointer to psoc object + * + * Return: nl scan flags is honoured or not + */ +static inline +bool ucfg_scan_cfg_honour_nl_scan_policy_flags(struct wlan_objmgr_psoc *psoc) +{ + return wlan_scan_cfg_honour_nl_scan_policy_flags(psoc); +} + +/** + * ucfg_scan_cfg_get_conc_max_resttime() - API to get max rest time + * @psoc: pointer to psoc object + * @rest_time: scan concurrent max resttime + * + * Return: scan concurrent max rest time + */ +static inline +void ucfg_scan_cfg_get_conc_max_resttime(struct wlan_objmgr_psoc *psoc, + uint32_t *rest_time) +{ + return wlan_scan_cfg_get_conc_max_resttime(psoc, rest_time); +} + +/** + * ucfg_scan_cfg_get_conc_min_resttime() - API to get concurrent min rest time + * @psoc: pointer to psoc object + * @rest_time: scan concurrent min rest time + * + * Return: scan concurrent min rest time + */ +static inline +void ucfg_scan_cfg_get_conc_min_resttime(struct wlan_objmgr_psoc *psoc, + uint32_t *rest_time) +{ + return wlan_scan_cfg_get_conc_min_resttime(psoc, rest_time); +} + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * ucfg_scan_is_pno_offload_enabled() - Check if pno offload is enabled + * @psoc: pointer to psoc object + * + * Return: pno_offload_enabled flag + */ +bool ucfg_scan_is_pno_offload_enabled(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_set_pno_offload() - API to set pno offload flag based on the + * capability received from the firmware. + * @psoc: pointer to psoc object + * @rest_time: scan concurrent min rest time + * + * Return: scan concurrent min rest time + */ +void ucfg_scan_set_pno_offload(struct wlan_objmgr_psoc *psoc, bool value); + +/** + * ucfg_scan_get_pno_scan_support() - Check if pno scan support is enabled + * @psoc: pointer to psoc object + * + * Return: scan_support_enabled flag + */ +bool ucfg_scan_get_pno_scan_support(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_get_scan_backoff_multiplier() - get scan backoff multiplier value + * @psoc: pointer to psoc object + * + * Return: scan_support_enabled flag + */ +uint8_t ucfg_get_scan_backoff_multiplier(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_is_dfs_chnl_scan_enabled() - Check if PNO dfs channel scan support + * is enabled + * @psoc: pointer to psoc object + * + * Return: dfs_chnl_scan_enabled flag + */ +bool ucfg_scan_is_dfs_chnl_scan_enabled(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_scan_timer_repeat_value() - API to get PNO scan timer repeat + * value + * @psoc: pointer to psoc object + * + * Return: scan_timer_repeat_value + */ +uint32_t ucfg_scan_get_scan_timer_repeat_value(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_slow_scan_multiplier() - API to get PNO slow scan multiplier + * value + * @psoc: pointer to psoc object + * + * Return: slow_scan_multiplier value + */ +uint32_t ucfg_scan_get_slow_scan_multiplier(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_max_sched_scan_plan_interval() - API to get maximum scheduled + * scan plan interval + * @psoc: pointer to psoc object + * + * Return: max_sched_scan_plan_interval value. + */ +uint32_t +ucfg_scan_get_max_sched_scan_plan_interval(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_max_sched_scan_plan_iterations() - API to get maximum scheduled + * scan plan iterations + * @psoc: pointer to psoc object + * + * Return: value. + */ +uint32_t +ucfg_scan_get_max_sched_scan_plan_iterations(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_user_config_sched_scan_plan() - API to get user config sched + * scan plan configuration value + * @psoc: pointer to psoc object + * + * Return: value. + */ +bool +ucfg_scan_get_user_config_sched_scan_plan(struct wlan_objmgr_psoc *psoc); + +#else +static inline +bool ucfg_scan_is_pno_offload_enabled(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline +void ucfg_scan_set_pno_offload(struct wlan_objmgr_psoc *psoc, bool value) +{ +} + +static inline +bool ucfg_scan_get_pno_scan_support(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline +uint8_t ucfg_get_scan_backoff_multiplier(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline +bool ucfg_scan_is_dfs_chnl_scan_enabled(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline +uint32_t ucfg_scan_get_scan_timer_repeat_value(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline +uint32_t ucfg_scan_get_slow_scan_multiplier(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline uint32_t +ucfg_scan_get_max_sched_scan_plan_interval(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline uint32_t +ucfg_scan_get_max_sched_scan_plan_iterations(struct wlan_objmgr_psoc *psoc) +{ + return 0; +} + +static inline bool +ucfg_scan_get_user_config_sched_scan_plan(struct wlan_objmgr_psoc *psoc) +{ + return true; +} + +#endif /* FEATURE_WLAN_SCAN_PNO */ + +/** + * ucfg_scan_is_connected_scan_enabled() - API to get scan enabled after connect + * @psoc: pointer to psoc object + * + * Return: value. + */ +bool ucfg_scan_is_connected_scan_enabled(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_is_snr_monitor_enabled() - API to get SNR monitoring enabled or not + * @psoc: pointer to psoc object + * + * Return: value. + */ +static inline +bool ucfg_scan_is_snr_monitor_enabled(struct wlan_objmgr_psoc *psoc) +{ + return wlan_scan_is_snr_monitor_enabled(psoc); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..f40242324a1d32bf0976f18bc2d0d29360ff39ad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_utils_api.h @@ -0,0 +1,1708 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan public utility functions + */ + +#ifndef _WLAN_SCAN_UTILS_H_ +#define _WLAN_SCAN_UTILS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ASCII_SPACE_CHARACTER 32 + +/** + * util_is_scan_entry_match() - func to check if both scan entry + * are from same AP + * @entry1: scan entry 1 + * @entry2: scan entry 2 + * + * match the two scan entries + * + * Return: true if entry match else false. + */ +bool util_is_scan_entry_match( + struct scan_cache_entry *entry1, + struct scan_cache_entry *entry2); + +/** + * util_scan_unpack_beacon_frame() - func to unpack beacon frame to scan entry + * @pdev: pdev pointer + * @frame: beacon/probe frame + * @frame_len: beacon frame len + * @frm_subtype: beacon or probe + * @rx_param: rx meta data + * + * get the defaults scan params + * + * Return: unpacked list of scan entries. + */ +qdf_list_t *util_scan_unpack_beacon_frame( + struct wlan_objmgr_pdev *pdev, + uint8_t *frame, qdf_size_t frame_len, uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param); + +/** + * util_scan_add_hidden_ssid() - func to add hidden ssid + * @pdev: pdev pointer + * @frame: beacon buf + * + * Return: + */ +#ifdef WLAN_DFS_CHAN_HIDDEN_SSID +QDF_STATUS +util_scan_add_hidden_ssid(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t bcnbuf); +#else +static inline QDF_STATUS +util_scan_add_hidden_ssid(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t bcnbuf) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_DFS_CHAN_HIDDEN_SSID */ + +/** + * util_scan_get_ev_type_name() - converts enum event to printable string + * @event: event of type scan_event_type + * + * API, converts enum event to printable character string + * + * Return: pointer to printable string + */ +const char *util_scan_get_ev_type_name(enum scan_event_type event); + +/** + * util_scan_get_ev_reason_name() - converts enum reason to printable string + * @reason enum of scan completion reason + * + * API, converts enum event to printable character string + * + * Return: pointer to printable string + */ +const char *util_scan_get_ev_reason_name(enum scan_completion_reason reason); + +/** + * util_scan_entry_macaddr() - function to read transmitter address + * @scan_entry: scan entry + * + * API, function to read transmitter address of scan entry + * + * Return: pointer to mac address + */ +static inline uint8_t* +util_scan_entry_macaddr(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->mac_addr.bytes[0]); +} + +/** + * util_scan_entry_bssid() - function to read bssid + * @scan_entry: scan entry + * + * API, function to read bssid of scan entry + * + * Return: pointer to mac address + */ +static inline uint8_t* +util_scan_entry_bssid(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->bssid.bytes[0]); +} + +/** + * util_scan_entry_capinfo() - function to read capibility info + * @scan_entry: scan entry + * + * API, function to read capibility info of scan entry + * + * Return: capability info + */ +static inline union wlan_capability +util_scan_entry_capinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->cap_info; +} + +/** + * util_scan_entry_beacon_interval() - function to read beacon interval + * @scan_entry: scan entry + * + * API, function to read beacon interval of scan entry + * + * Return: beacon interval + */ +static inline uint16_t +util_scan_entry_beacon_interval(struct scan_cache_entry *scan_entry) +{ + return scan_entry->bcn_int; +} + +/** + * util_scan_entry_sequence_number() - function to read sequence number + * @scan_entry: scan entry + * + * API, function to read sequence number of scan entry + * + * Return: sequence number + */ +static inline uint16_t +util_scan_entry_sequence_number(struct scan_cache_entry *scan_entry) +{ + return scan_entry->seq_num; +} + +/** + * util_scan_entry_tsf() - function to read tsf + * @scan_entry: scan entry + * + * API, function to read tsf of scan entry + * + * Return: tsf + */ +static inline uint8_t* +util_scan_entry_tsf(struct scan_cache_entry *scan_entry) +{ + return scan_entry->tsf_info.data; +} + +/** + * util_scan_entry_reset_timestamp() - function to reset bcn receive timestamp + * @scan_entry: scan entry + * + * API, function to reset bcn receive timestamp of scan entry + * + * Return: void + */ +static inline void +util_scan_entry_reset_timestamp(struct scan_cache_entry *scan_entry) +{ + scan_entry->scan_entry_time = 0; +} + +/* + * Macros used for RSSI calculation. + */ +#define WLAN_RSSI_AVERAGING_TIME (5 * 1000) /* 5 seconds */ + +#define WLAN_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */ + +#define WLAN_RSSI_LPF_LEN 0 +#define WLAN_RSSI_DUMMY_MARKER 0x127 + +#define WLAN_EP_MUL(x, mul) ((x) * (mul)) + +#define WLAN_EP_RND(x, mul) ((((x)%(mul)) >= ((mul)/2)) ?\ + ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) + +#define WLAN_RSSI_GET(x) WLAN_EP_RND(x, WLAN_RSSI_EP_MULTIPLIER) + +#define RSSI_LPF_THRESHOLD -20 + + +#define WLAN_RSSI_OUT(x) (((x) != WLAN_RSSI_DUMMY_MARKER) ? \ + (WLAN_EP_RND((x), WLAN_RSSI_EP_MULTIPLIER)) : WLAN_RSSI_DUMMY_MARKER) + + +#define WLAN_RSSI_IN(x) (WLAN_EP_MUL((x), WLAN_RSSI_EP_MULTIPLIER)) + +#define WLAN_LPF_RSSI(x, y, len) \ + ((x != WLAN_RSSI_DUMMY_MARKER) ? ((((x) << 3) + (y) - (x)) >> 3) : (y)) + +#define WLAN_RSSI_LPF(x, y) do { \ + if ((y) < RSSI_LPF_THRESHOLD) \ + x = WLAN_LPF_RSSI((x), WLAN_RSSI_IN((y)), WLAN_RSSI_LPF_LEN); \ + } while (0) + +#define WLAN_ABS_RSSI_LPF(x, y) do { \ + if ((y) >= (RSSI_LPF_THRESHOLD + WLAN_DEFAULT_NOISE_FLOOR)) \ + x = WLAN_LPF_RSSI((x), WLAN_RSSI_IN((y)), WLAN_RSSI_LPF_LEN); \ + } while (0) + +#define WLAN_SNR_EP_MULTIPLIER BIT(7) /* pow2 to optimize out * and / */ +#define WLAN_SNR_DUMMY_MARKER 0x127 +#define SNR_LPF_THRESHOLD 0 +#define WLAN_SNR_LPF_LEN 10 + +#define WLAN_SNR_OUT(x) (((x) != WLAN_SNR_DUMMY_MARKER) ? \ + (WLAN_EP_RND((x), WLAN_SNR_EP_MULTIPLIER)) : WLAN_SNR_DUMMY_MARKER) + +#define WLAN_SNR_IN(x) (WLAN_EP_MUL((x), WLAN_SNR_EP_MULTIPLIER)) + +#define WLAN_LPF_SNR(x, y, len) \ + ((x != WLAN_SNR_DUMMY_MARKER) ? ((((x) << 3) + (y) - (x)) >> 3) : (y)) + +#define WLAN_SNR_LPF(x, y) do { \ + if ((y) > SNR_LPF_THRESHOLD) \ + x = WLAN_LPF_SNR((x), WLAN_SNR_IN((y)), WLAN_SNR_LPF_LEN); \ + } while (0) + +/** + * util_scan_entry_rssi() - function to read rssi of scan entry + * @scan_entry: scan entry + * + * API, function to read rssi value of scan entry + * + * Return: rssi + */ +static inline int32_t +util_scan_entry_rssi(struct scan_cache_entry *scan_entry) +{ + return WLAN_RSSI_OUT(scan_entry->avg_rssi); +} + +/** + * util_scan_entry_snr() - function to read snr of scan entry + * @scan_entry: scan entry + * + * API, function to read snr value of scan entry + * + * Return: snr + */ +static inline uint8_t +util_scan_entry_snr(struct scan_cache_entry *scan_entry) +{ + uint32_t snr = WLAN_SNR_OUT(scan_entry->avg_snr); + /* + * An entry is in the BSS list means we've received at least one beacon + * from the corresponding AP, so the snr must be initialized. + * + * If the SNR is not initialized, return 0 (i.e. SNR == Noise Floor). + * Once se_avgsnr field has been initialized, ATH_SNR_OUT always + * returns values that fit in an 8-bit variable. + */ + return (snr >= WLAN_SNR_DUMMY_MARKER) ? 0 : (uint8_t)snr; +} + +/** + * util_scan_entry_phymode() - function to read phymode of scan entry + * @scan_entry: scan entry + * + * API, function to read phymode of scan entry + * + * Return: phymode + */ +static inline enum wlan_phymode +util_scan_entry_phymode(struct scan_cache_entry *scan_entry) +{ + return scan_entry->phy_mode; +} + +/** + * util_is_ssid_match() - to check if ssid match + * @ssid1: ssid 1 + * @ssid2: ssid 2 + * + * Return: true if ssid match + */ +static inline bool +util_is_ssid_match(struct wlan_ssid *ssid1, + struct wlan_ssid *ssid2) +{ + if (ssid1->length != ssid2->length) + return false; + + if (!qdf_mem_cmp(ssid1->ssid, + ssid2->ssid, ssid1->length)) + return true; + + return false; +} + +/** + * util_is_bssid_match() - to check if bssid match + * @bssid1: bssid 1 + * @bssid2: bssid 2 + * + * Return: true if bssid match + */ +static inline bool util_is_bssid_match(struct qdf_mac_addr *bssid1, + struct qdf_mac_addr *bssid2) +{ + + if (qdf_is_macaddr_zero(bssid1) || + qdf_is_macaddr_broadcast(bssid1)) + return true; + + if (qdf_is_macaddr_equal(bssid1, bssid2)) + return true; + + return false; +} + +/** + * util_is_bss_type_match() - to check if bss type + * @bss_type: bss type + * @cap: capability + * + * Return: true if bss type match + */ +static inline bool util_is_bss_type_match(enum wlan_bss_type bss_type, + union wlan_capability cap) +{ + bool match = true; + + switch (bss_type) { + case WLAN_TYPE_ANY: + break; + case WLAN_TYPE_IBSS: + if (!cap.wlan_caps.ibss) + match = false; + break; + case WLAN_TYPE_BSS: + if (!cap.wlan_caps.ess) + match = false; + break; + default: + match = false; + } + + return match; +} + +/** + * util_country_code_match() - to check if country match + * @country: country code pointer + * @country_ie: country IE in beacon + * + * Return: true if country match + */ +static inline bool util_country_code_match(uint8_t *country, + struct wlan_country_ie *cc) +{ + if (!country || !country[0]) + return true; + + if (!cc) + return false; + + if (cc->cc[0] == country[0] && + cc->cc[1] == country[1]) + return true; + + return false; +} + +/** + * util_mdie_match() - to check if mdie match + * @mobility_domain: mobility domain + * @mdie: mobility domain ie + * + * Return: true if country match + */ +static inline bool util_mdie_match(uint16_t mobility_domain, + struct rsn_mdie *mdie) +{ + uint16_t md; + + if (!mobility_domain) + return true; + + if (!mdie) + return false; + + md = + (mdie->mobility_domain[1] << 8) | + mdie->mobility_domain[0]; + + if (md == mobility_domain) + return true; + + return false; +} + +/** + * util_scan_entry_ssid() - function to read ssid of scan entry + * @scan_entry: scan entry + * + * API, function to read ssid of scan entry + * + * Return: ssid + */ +static inline struct wlan_ssid* +util_scan_entry_ssid(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->ssid); +} + +/** + * util_scan_entry_dtimperiod() - function to read dtim period of scan entry + * @scan_entry: scan entry + * + * API, function to read dtim period of scan entry + * + * Return: dtim period + */ +static inline uint8_t +util_scan_entry_dtimperiod(struct scan_cache_entry *scan_entry) +{ + return scan_entry->dtim_period; +} + +/** + * util_scan_entry_tim() - function to read tim ie of scan entry + * @scan_entry: scan entry + * + * API, function to read tim ie of scan entry + * + * Return: timie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_tim(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.tim; +} + +/** + * util_scan_entry_beacon_frame() - function to read full beacon or + * probe resp frame + * @scan_entry: scan entry + * + * API, function to read full beacon or probe resp frame including frame header + * + * Return: beacon/probe resp frame + */ +static inline struct element_info +util_scan_entry_beacon_frame(struct scan_cache_entry *scan_entry) +{ + /* util_scan_entry_beacon_data */ + return scan_entry->raw_frame; +} + +/** + * util_scan_entry_ie_data() - function to read tagged IEs + * @scan_entry: scan entry + * + * API, function to read beacon/probe response frames starting from tagged IEs + * (excluding frame header and fixed parameters) + * + * Return: tagged IES of beacon/probe resp frame + */ +static inline uint8_t* +util_scan_entry_ie_data(struct scan_cache_entry *scan_entry) +{ + struct element_info bcn_frm; + uint8_t *ie_data = NULL; + + bcn_frm = util_scan_entry_beacon_frame(scan_entry); + ie_data = (uint8_t *) (bcn_frm.ptr + + sizeof(struct wlan_frame_hdr) + + offsetof(struct wlan_bcn_frame, ie)); + return ie_data; +} + +/** + * util_scan_entry_ie_len() - function to read length of all tagged IEs + * @scan_entry: scan entry + * + * API, function to read length of all tagged IEs + * + * Return: length of all tagged IEs + */ +static inline uint16_t +util_scan_entry_ie_len(struct scan_cache_entry *scan_entry) +{ + struct element_info bcn_frm; + uint16_t ie_len = 0; + + bcn_frm = util_scan_entry_beacon_frame(scan_entry); + ie_len = (uint16_t) (bcn_frm.len - + sizeof(struct wlan_frame_hdr) - + offsetof(struct wlan_bcn_frame, ie)); + return ie_len; +} + +/** + * util_scan_entry_frame_len() - function to frame length + * @scan_entry: scan entry + * + * API, function to read frame length + * + * Return: frame length + */ +static inline uint32_t +util_scan_entry_frame_len(struct scan_cache_entry *scan_entry) +{ + return scan_entry->raw_frame.len; +} + +/** + * util_scan_entry_frame_ptr() - function to get frame ptr + * @scan_entry: scan entry + * + * API, function to read frame ptr + * + * Return: frame ptr + */ +static inline uint8_t* +util_scan_entry_frame_ptr(struct scan_cache_entry *scan_entry) +{ + return scan_entry->raw_frame.ptr; +} + +/** + * util_scan_entry_copy_ie_data() - function to get a copy of all tagged IEs + * @scan_entry: scan entry + * + * API, function to get a copy of all tagged IEs in passed memory + * + * Return: QDF_STATUS_SUCCESS if tagged IEs copied successfully + * QDF_STATUS_E_NOMEM if passed memory/length can't hold all tagged IEs + */ +static inline QDF_STATUS +util_scan_entry_copy_ie_data(struct scan_cache_entry *scan_entry, + uint8_t *iebuf, uint16_t *ie_len) +{ + u_int8_t *buff; + u_int16_t buff_len; + + /* iebuf can be NULL, ie_len must be a valid pointer. */ + QDF_ASSERT(ie_len); + if (!ie_len) + return QDF_STATUS_E_NULL_VALUE; + + buff = util_scan_entry_ie_data(scan_entry); + buff_len = util_scan_entry_ie_len(scan_entry); + /* + * If caller passed a buffer, check the length to make sure + * it's large enough. + * If no buffer is passed, just return the length of the IE blob. + */ + if (iebuf) { + if (*ie_len >= buff_len) { + qdf_mem_copy(iebuf, buff, buff_len); + *ie_len = buff_len; + return QDF_STATUS_SUCCESS; + } + } + + *ie_len = buff_len; + return QDF_STATUS_E_NOMEM; +} + +/** + * util_scan_free_cache_entry() - function to free scan + * cache entry + * @scan_entry: scan entry + * + * API, function to free scan cache entry + * + * Return: void + */ +static inline void +util_scan_free_cache_entry(struct scan_cache_entry *scan_entry) +{ + if (!scan_entry) + return; + if (scan_entry->alt_wcn_ie.ptr) + qdf_mem_free(scan_entry->alt_wcn_ie.ptr); + if (scan_entry->raw_frame.ptr) + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); +} + +#define conv_ptr(_address, _base1, _base2) \ + ((_address) ? (((u_int8_t *) (_address) - \ + (u_int8_t *) (_base1)) + (u_int8_t *) (_base2)) : NULL) + +/** + * util_scan_copy_beacon_data() - copy beacon and update ie ptrs + * cache entry + * @new_entry: new scan entry + * @scan_entry: entry from where data is copied + * + * API, function to copy beacon and update ie ptrs + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +util_scan_copy_beacon_data(struct scan_cache_entry *new_entry, + struct scan_cache_entry *scan_entry) +{ + u_int8_t *new_ptr, *old_ptr; + struct ie_list *ie_lst; + + new_entry->raw_frame.ptr = + qdf_mem_malloc_atomic(scan_entry->raw_frame.len); + if (!new_entry->raw_frame.ptr) + return QDF_STATUS_E_NOMEM; + + qdf_mem_copy(new_entry->raw_frame.ptr, + scan_entry->raw_frame.ptr, + scan_entry->raw_frame.len); + new_entry->raw_frame.len = scan_entry->raw_frame.len; + new_ptr = new_entry->raw_frame.ptr; + old_ptr = scan_entry->raw_frame.ptr; + + new_entry->ie_list = scan_entry->ie_list; + + ie_lst = &new_entry->ie_list; + + /* New info_element needs also be added in ieee80211_parse_beacon */ + ie_lst->tim = conv_ptr(ie_lst->tim, old_ptr, new_ptr); + ie_lst->country = conv_ptr(ie_lst->country, old_ptr, new_ptr); + ie_lst->ssid = conv_ptr(ie_lst->ssid, old_ptr, new_ptr); + ie_lst->rates = conv_ptr(ie_lst->rates, old_ptr, new_ptr); + ie_lst->xrates = conv_ptr(ie_lst->xrates, old_ptr, new_ptr); + ie_lst->ds_param = conv_ptr(ie_lst->ds_param, old_ptr, new_ptr); + ie_lst->csa = conv_ptr(ie_lst->csa, old_ptr, new_ptr); + ie_lst->xcsa = conv_ptr(ie_lst->xcsa, old_ptr, new_ptr); + ie_lst->mcst = conv_ptr(ie_lst->mcst, old_ptr, new_ptr); + ie_lst->secchanoff = conv_ptr(ie_lst->secchanoff, old_ptr, new_ptr); + ie_lst->wpa = conv_ptr(ie_lst->wpa, old_ptr, new_ptr); + ie_lst->wcn = conv_ptr(ie_lst->wcn, old_ptr, new_ptr); + ie_lst->rsn = conv_ptr(ie_lst->rsn, old_ptr, new_ptr); + ie_lst->wps = conv_ptr(ie_lst->wps, old_ptr, new_ptr); + ie_lst->wmeinfo = conv_ptr(ie_lst->wmeinfo, old_ptr, new_ptr); + ie_lst->wmeparam = conv_ptr(ie_lst->wmeparam, old_ptr, new_ptr); + ie_lst->quiet = conv_ptr(ie_lst->quiet, old_ptr, new_ptr); + ie_lst->htcap = conv_ptr(ie_lst->htcap, old_ptr, new_ptr); + ie_lst->htinfo = conv_ptr(ie_lst->htinfo, old_ptr, new_ptr); + ie_lst->athcaps = conv_ptr(ie_lst->athcaps, old_ptr, new_ptr); + ie_lst->athextcaps = conv_ptr(ie_lst->athextcaps, old_ptr, new_ptr); + ie_lst->sfa = conv_ptr(ie_lst->sfa, old_ptr, new_ptr); + ie_lst->vendor = conv_ptr(ie_lst->vendor, old_ptr, new_ptr); + ie_lst->qbssload = conv_ptr(ie_lst->qbssload, old_ptr, new_ptr); + ie_lst->wapi = conv_ptr(ie_lst->wapi, old_ptr, new_ptr); + ie_lst->p2p = conv_ptr(ie_lst->p2p, old_ptr, new_ptr); + ie_lst->alt_wcn = conv_ptr(ie_lst->alt_wcn, old_ptr, new_ptr); + ie_lst->extcaps = conv_ptr(ie_lst->extcaps, old_ptr, new_ptr); + ie_lst->ibssdfs = conv_ptr(ie_lst->ibssdfs, old_ptr, new_ptr); + ie_lst->sonadv = conv_ptr(ie_lst->sonadv, old_ptr, new_ptr); + ie_lst->vhtcap = conv_ptr(ie_lst->vhtcap, old_ptr, new_ptr); + ie_lst->vhtop = conv_ptr(ie_lst->vhtop, old_ptr, new_ptr); + ie_lst->opmode = conv_ptr(ie_lst->opmode, old_ptr, new_ptr); + ie_lst->cswrp = conv_ptr(ie_lst->cswrp, old_ptr, new_ptr); + ie_lst->widebw = conv_ptr(ie_lst->widebw, old_ptr, new_ptr); + ie_lst->txpwrenvlp = conv_ptr(ie_lst->txpwrenvlp, old_ptr, new_ptr); + ie_lst->bwnss_map = conv_ptr(ie_lst->bwnss_map, old_ptr, new_ptr); + ie_lst->mdie = conv_ptr(ie_lst->mdie, old_ptr, new_ptr); + ie_lst->hecap = conv_ptr(ie_lst->hecap, old_ptr, new_ptr); + ie_lst->heop = conv_ptr(ie_lst->heop, old_ptr, new_ptr); + ie_lst->fils_indication = conv_ptr(ie_lst->fils_indication, + old_ptr, new_ptr); + ie_lst->esp = conv_ptr(ie_lst->esp, old_ptr, new_ptr); + ie_lst->mbo_oce = conv_ptr(ie_lst->mbo_oce, old_ptr, new_ptr); + ie_lst->muedca = conv_ptr(ie_lst->muedca, old_ptr, new_ptr); + ie_lst->rnrie = conv_ptr(ie_lst->rnrie, old_ptr, new_ptr); + ie_lst->extender = conv_ptr(ie_lst->extender, old_ptr, new_ptr); + ie_lst->adaptive_11r = conv_ptr(ie_lst->adaptive_11r, old_ptr, new_ptr); + ie_lst->single_pmk = conv_ptr(ie_lst->single_pmk, old_ptr, new_ptr); + ie_lst->rsnxe = conv_ptr(ie_lst->rsnxe, old_ptr, new_ptr); + + return QDF_STATUS_SUCCESS; +} +/** + * util_scan_copy_cache_entry() - function to create a copy + * of scan cache entry + * @scan_entry: scan entry + * + * API, function to create a copy of scan cache entry + * + * Return: copy of scan_entry + */ +static inline struct scan_cache_entry * +util_scan_copy_cache_entry(struct scan_cache_entry *scan_entry) +{ + struct scan_cache_entry *new_entry; + QDF_STATUS status; + + if (!scan_entry) + return NULL; + + new_entry = + qdf_mem_malloc_atomic(sizeof(*scan_entry)); + if (!new_entry) + return NULL; + + qdf_mem_copy(new_entry, + scan_entry, sizeof(*scan_entry)); + + if (scan_entry->alt_wcn_ie.ptr) { + new_entry->alt_wcn_ie.ptr = + qdf_mem_malloc_atomic(scan_entry->alt_wcn_ie.len); + if (!new_entry->alt_wcn_ie.ptr) { + qdf_mem_free(new_entry); + return NULL; + } + qdf_mem_copy(new_entry->alt_wcn_ie.ptr, + scan_entry->alt_wcn_ie.ptr, + scan_entry->alt_wcn_ie.len); + new_entry->alt_wcn_ie.len = + scan_entry->alt_wcn_ie.len; + } + + status = util_scan_copy_beacon_data(new_entry, scan_entry); + if (QDF_IS_STATUS_ERROR(status)) { + util_scan_free_cache_entry(new_entry); + return NULL; + } + + return new_entry; +} + +/** + * util_scan_entry_channel() - function to read channel info + * @scan_entry: scan entry + * + * API, function to read channel info + * + * Return: channel info + */ +static inline struct channel_info* +util_scan_entry_channel(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->channel); +} + +/** + * util_scan_entry_channel_frequency() - function to read channel number + * @scan_entry: scan entry + * + * API, function to read channel number + * + * Return: channel number + */ +static inline uint32_t +util_scan_entry_channel_frequency(struct scan_cache_entry *scan_entry) +{ + return scan_entry->channel.chan_freq; +} + +/** + * util_scan_entry_erpinfo() - function to read erp info + * @scan_entry: scan entry + * + * API, function to read erp info + * + * Return: erp info + */ +static inline uint8_t +util_scan_entry_erpinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->erp; +} + +/** + * util_scan_entry_rates() - function to read supported rates IE + * @scan_entry: scan entry + * + * API, function to read supported rates IE + * + * Return: basic ratesie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_rates(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.rates; +} + +/** + * util_scan_entry_xrates()- function to read extended supported rates IE + * @scan_entry: scan entry + * + * API, function to read extended supported rates IE + * + * Return: extended supported ratesie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_xrates(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.xrates; +} + +/** + * util_scan_entry_rsn()- function to read rsn IE + * @scan_entry: scan entry + * + * API, function to read rsn IE + * + * Return: rsnie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_rsn(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.rsn; +} + +/** + * util_scan_entry_adaptive_11r()- function to read adaptive 11r Vendor IE + * @scan_entry: scan entry + * + * API, function to read adaptive 11r IE + * + * Return: apaptive 11r ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_adaptive_11r(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.adaptive_11r; +} + +/** + * util_scan_entry_single_pmk()- function to read single pmk Vendor IE + * @scan_entry: scan entry + * + * API, function to read sae single pmk IE + * + * Return: true if single_pmk ie is present or false if ie is not present + */ +static inline bool +util_scan_entry_single_pmk(struct scan_cache_entry *scan_entry) +{ + if (scan_entry->ie_list.single_pmk) + return true; + + return false; +} + +/** + * util_scan_get_rsn_len()- function to read rsn IE length if present + * @scan_entry: scan entry + * + * API, function to read rsn length if present + * + * Return: rsnie length + */ +static inline uint8_t +util_scan_get_rsn_len(struct scan_cache_entry *scan_entry) +{ + if (scan_entry && scan_entry->ie_list.rsn) + return scan_entry->ie_list.rsn[1] + 2; + else + return 0; +} + + +/** + * util_scan_entry_wpa() - function to read wpa IE + * @scan_entry: scan entry + * + * API, function to read wpa IE + * + * Return: wpaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wpa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wpa; +} + +/** + * util_scan_get_wpa_len()- function to read wpa IE length if present + * @scan_entry: scan entry + * + * API, function to read wpa ie length if present + * + * Return: wpa ie length + */ +static inline uint8_t +util_scan_get_wpa_len(struct scan_cache_entry *scan_entry) +{ + if (scan_entry && scan_entry->ie_list.wpa) + return scan_entry->ie_list.wpa[1] + 2; + else + return 0; +} + + +/** + * util_scan_entry_wapi() - function to read wapi IE + * @scan_entry: scan entry + * + * API, function to read wapi IE + * + * Return: wapiie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wapi(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wapi; +} + +/** + * util_scan_entry_wps() - function to read wps IE + * @scan_entry: scan entry + * + * API, function to read wps IE + * + * Return: wpsie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wps; +} + +/** + * util_scan_entry_sfa() - function to read sfa IE + * @scan_entry: scan entry + * + * API, function to read sfa IE + * + * Return: sfaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_sfa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.sfa; +} + +/** + * util_scan_entry_ds_param() - function to read ds params + * @scan_entry: scan entry + * + * API, function to read ds params + * + * Return: ds params or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_ds_param(struct scan_cache_entry *scan_entry) +{ + if (scan_entry) + return scan_entry->ie_list.ds_param; + else + return NULL; +} + +/** + * util_scan_entry_csa() - function to read csa IE + * @scan_entry: scan entry + * + * API, function to read csa IE + * + * Return: csaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_csa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.csa; +} + +/** + * util_scan_entry_xcsa() - function to read extended csa IE + * @scan_entry: scan entry + * + * API, function to read extended csa IE + * + * Return: extended csaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_xcsa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.xcsa; +} + +/** + * util_scan_entry_htinfo() - function to read htinfo IE + * @scan_entry: scan entry + * + * API, function to read htinfo IE + * + * Return: htinfoie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_htinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.htinfo; +} + + +/** + * util_scan_entry_htcap() - function to read htcap IE + * @scan_entry: scan entry + * + * API, function to read htcap IE + * + * Return: htcapie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_htcap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.htcap; +} + +/** + * util_scan_entry_vhtcap() - function to read vhtcap IE + * @scan_entry: scan entry + * + * API, function to read vhtcap IE + * + * Return: vhtcapie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_vhtcap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.vhtcap; +} + +/** + * util_scan_entry_vhtop() - function to read vhtop IE + * @scan_entry: scan entry + * + * API, function to read vhtop IE + * + * Return: vhtopie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_vhtop(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.vhtop; +} + +/** + * util_scan_entry_quiet() - function to read quiet IE + * @scan_entry: scan entry + * + * API, function to read quiet IE + * + * Return: quietie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_quiet(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.quiet; +} + +/** + * util_scan_entry_qbssload() - function to read qbss load IE + * @scan_entry: scan entry + * + * API, function to read qbss load IE + * + * Return: qbss loadie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_qbssload(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.qbssload; +} + +/** + * util_scan_entry_vendor() - function to read vendor IE + * @scan_entry: scan entry + * + * API, function to read vendor IE + * + * Return: vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_vendor(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.vendor; +} + +/** + * util_scan_entry_country() - function to read country IE + * @scan_entry: scan entry + * + * API, function to read country IE + * + * Return: countryie or NULL if ie is not present + */ +static inline struct wlan_country_ie* +util_scan_entry_country(struct scan_cache_entry *scan_entry) +{ + return (struct wlan_country_ie *)scan_entry->ie_list.country; +} + +/** + * util_scan_entry_copy_country() - function to copy country name + * @scan_entry: scan entry + * @cntry: out buffer + * + * API, function to copy country name code string in given memory @centry + * + * Return: QDF_STATUS_SUCCESS if successfully copied country name + * QDF_STATUS_E_INVAL if passed buffer is null + * QDF_STATUS_E_NOMEM if scan entry dont have country IE + */ +static inline QDF_STATUS +util_scan_entry_copy_country(struct scan_cache_entry *scan_entry, + uint8_t *cntry) +{ + struct wlan_country_ie *country_ie; + + if (!cntry) + return QDF_STATUS_E_INVAL; + + country_ie = util_scan_entry_country(scan_entry); + + if (!country_ie) + return QDF_STATUS_E_NOMEM; + + qdf_mem_copy(cntry, country_ie->cc, 3); + + return QDF_STATUS_SUCCESS; +} + +/** + * util_scan_entry_wmeinfo() - function to read wme info ie + * @scan_entry: scan entry + * + * API, function to read wme info ie + * + * Return: wme infoie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wmeinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wmeinfo; +} + +/** + * util_scan_entry_wmeparam() - function to read wme param ie + * @scan_entry: scan entry + * + * API, function to read wme param ie + * + * Return: wme paramie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wmeparam(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wmeparam; +} + +/** + * util_scan_entry_age() - function to read age of scan entry + * @scan_entry: scan entry + * + * API, function to read age of scan entry + * + * Return: age in ms + */ +static inline qdf_time_t +util_scan_entry_age(struct scan_cache_entry *scan_entry) +{ + qdf_time_t ts = scan_entry->scan_entry_time; + + return qdf_mc_timer_get_system_time() - ts; +} + +/** + * util_scan_mlme_info() - function to read mlme info struct + * @scan_entry: scan entry + * + * API, function to read mlme info struct + * + * Return: mlme info + */ +static inline struct mlme_info* +util_scan_mlme_info(struct scan_cache_entry *scan_entry) +{ + return &scan_entry->mlme_info; +} + +/** + * util_scan_entry_bss_type() - function to read bss type + * @scan_entry: scan entry + * + * API, function to read bss type + * + * Return: bss type + */ +static inline enum wlan_bss_type +util_scan_entry_bss_type(struct scan_cache_entry *scan_entry) +{ + if (scan_entry->cap_info.value & WLAN_CAPINFO_ESS) + return WLAN_TYPE_BSS; + else if (scan_entry->cap_info.value & WLAN_CAPINFO_IBSS) + return WLAN_TYPE_IBSS; + else + return WLAN_TYPE_ANY; +} + +/** + * util_scan_entry_privacy() - function to check if privacy is enebled + * @scan_entry: scan entry + * + * API, function to check if privacy is enebled + * + * Return: true if privacy is enabled, false other wise + */ +static inline bool +util_scan_entry_privacy(struct scan_cache_entry *scan_entry) +{ + return (scan_entry->cap_info.value & + WLAN_CAPINFO_PRIVACY) ? true : false; +} + +/** + * util_scan_entry_athcaps() - function to read ath caps vendor ie + * @scan_entry: scan entry + * + * API, function to read ath caps vendor ie + * + * Return: ath caps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_athcaps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.athcaps; +} + +/** + * util_scan_entry_athextcaps() - function to read ath extcaps vendor ie + * @scan_entry: scan entry + * + * API, function to read ath extcaps vendor ie + * + * Return: ath extcaps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_athextcaps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.athextcaps; +} + +/** + * util_scan_entry_bwnss_map() - function to read bwnss_map ie + * @scan_entry: scan entry + * + * API, function to read bwnss_map ie + * + * Return: bwnss_map ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_bwnss_map(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.bwnss_map; +} + +/** + * util_scan_entry_sonie() - function to read son ie + * @scan_entry: scan entry + * + * API, function to read son ie + * + * Return: son ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_sonie(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.sonadv; +} + +/** + * util_scan_entry_widebw() - function to read wide band chan switch sub elem ie + * @scan_entry: scan entry + * + * API, function to read wide band chan switch sub elem ie + * + * Return: wide band chan switch sub elem or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_widebw(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.widebw; +} + +/** + * util_scan_entry_secchanoff() - function to read secondary channel offset ie + * @scan_entry: scan entry + * + * API, function to read secondary channel offset ie + * + * Return: secondary channel offset element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_secchanoff(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.secchanoff; +} + +/** + * util_scan_entry_cswrp() - function to read channel switch wrapper ie + * @scan_entry: scan entry + * + * API, function to read channel switch wrapper ie + * + * Return: channel switch wrapper element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_cswrp(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.cswrp; +} + +/** + * util_scan_entry_omn() - function to read operating mode notification ie + * @scan_entry: scan entry + * + * API, function to read operating mode notification + * + * Return: operating mode notification element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_omn(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.opmode; +} + +/** + * util_scan_entry_extcaps() - function to read extcap ie + * @scan_entry: scan entry + * + * API, function to read extcap ie + * + * Return: extcap element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_extcaps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.extcaps; +} + +/** + * util_scan_entry_get_extcap() - function to read extended capability field ie + * @scan_entry: scan entry + * @extcap_bit_field: extended capability bit field + * @extcap_value: pointer to fill extended capability field value + * + * API, function to read extended capability field + * + * Return: QDF_STATUS_SUCCESS if extended capability field is found + * QDF_STATUS_E_NOMEM if extended capability field is not found + */ +static inline QDF_STATUS +util_scan_entry_get_extcap(struct scan_cache_entry *scan_entry, + enum ext_cap_bit_field extcap_bit_field, + uint8_t *extcap_value) +{ + struct wlan_ext_cap_ie *ext_cap = + (struct wlan_ext_cap_ie *)util_scan_entry_extcaps(scan_entry); + + uint8_t ext_caps_byte = (extcap_bit_field >> 3); + uint8_t ext_caps_bit_pos = extcap_bit_field & 0x7; + + *extcap_value = 0; + + if (!ext_cap) + return QDF_STATUS_E_NULL_VALUE; + + if (ext_cap->ext_cap_len <= ext_caps_byte) + return QDF_STATUS_E_NULL_VALUE; + + *extcap_value = + ((ext_cap->ext_caps[ext_caps_byte] >> ext_caps_bit_pos) & 0x1); + + return QDF_STATUS_SUCCESS; +} + +/** + * util_scan_entry_athcaps() - function to read ath caps vendor ie + * @scan_entry: scan entry + * + * API, function to read ath caps vendor ie + * + * Return: ath caps vendorie or NULL if ie is not present + */ +static inline struct mlme_info* +util_scan_entry_mlme_info(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->mlme_info); +} + +/** +* util_scan_entry_mcst() - function to read mcst IE +* @scan_entry:scan entry +* +* API, function to read mcst IE +* +* Return: mcst or NULL if ie is not present +*/ +static inline uint8_t* +util_scan_entry_mcst(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.mcst; +} + +/** + * util_scan_entry_hecap() - function to read he caps vendor ie + * @scan_entry: scan entry + * + * API, function to read he caps vendor ie + * + * Return: he caps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_hecap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.hecap; +} + +/** + * util_scan_entry_he_6g_cap() - function to read he 6GHz caps vendor ie + * @scan_entry: scan entry + * + * API, function to read he 6GHz caps vendor ie + * + * Return: he caps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_he_6g_cap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.hecap_6g; +} + +/** + * util_scan_entry_heop() - function to read heop vendor ie + * @scan_entry: scan entry + * + * API, function to read heop vendor ie + * + * Return, heop vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_heop(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.heop; +} + +/** + * util_scan_entry_muedca() - function to read MU-EDCA IE + * @scan_entry: scan entry + * + * API, function to read MU-EDCA IE + * + * Return, MUEDCA IE or NULL if IE is not present + */ +static inline uint8_t* +util_scan_entry_muedca(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.muedca; +} + +/** + * util_scan_entry_spatial_reuse_parameter() - function to read spatial reuse + * parameter ie + * @scan_entry: scan entry + * + * API, function to read scan_entry reuse parameter ie + * + * Return, spatial reuse parameter ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_spatial_reuse_parameter(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.srp; +} + +/** + * util_scan_entry_fils_indication() - function to read FILS indication ie + * @scan_entry: scan entry + * + * API, function to read FILS indication ie + * + * Return, FILS indication ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_fils_indication(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.fils_indication; +} + +/** + * util_get_last_scan_time() - function to get last scan time on this pdev + * @vdev: vdev object + * + * API, function to read last scan time on this pdev + * + * Return: qdf_time_t + */ +qdf_time_t +util_get_last_scan_time(struct wlan_objmgr_vdev *vdev); + +/** + * util_scan_entry_update_mlme_info() - function to update mlme info + * @scan_entry: scan entry object + * + * API, function to update mlme info in scan DB + * + * Return: QDF_STATUS + */ +QDF_STATUS +util_scan_entry_update_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * util_scan_is_hidden_ssid() - function to check if ssid is hidden + * @ssid: struct ie_ssid object + * + * API, function to check if ssid is hidden + * + * Return: true if ap is hidden, false otherwise + */ +bool +util_scan_is_hidden_ssid(struct ie_ssid *ssid); + +/** + * util_scan_entry_is_hidden_ap() - function to check if ap is hidden + * @scan_entry: scan entry + * + * API, function to check if ap is hidden + * + * Return: true if ap is hidden, false otherwise + */ +static inline bool +util_scan_entry_is_hidden_ap(struct scan_cache_entry *scan_entry) +{ + return util_scan_is_hidden_ssid( + (struct ie_ssid *)scan_entry->ie_list.ssid); +} + +/** + * util_scan_entry_espinfo() - function to read ESP info + * @scan_entry: scan entry + * + * API, function to read ESP info + * + * Return: erp info + */ +static inline uint8_t * +util_scan_entry_esp_info(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.esp; +} + +/** + * util_scan_entry_mbo_oce() - function to read MBO/OCE ie + * @scan_entry: scan entry + * + * API, function to read MBO/OCE ie + * + * Return: MBO/OCE ie + */ +static inline uint8_t * +util_scan_entry_mbo_oce(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.mbo_oce; +} + +/** + * util_scan_entry_rsnxe() - function to read RSNXE ie + * @scan_entry: scan entry + * + * API, function to read RSNXE ie + * + * Return: RSNXE ie + */ +static inline uint8_t * +util_scan_entry_rsnxe(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.rsnxe; +} + +/** + * util_scan_scm_chan_to_band() - function to tell band for channel number + * @chan: Channel number + * + * Return: Band information as per channel + */ +enum wlan_band util_scan_scm_chan_to_band(uint32_t chan); + +/** + * util_scan_scm_freq_to_band() - API to get band from frequency + * @freq: Channel frequency + * + * Return: Band information as per frequency + */ +enum wlan_band util_scan_scm_freq_to_band(uint16_t freq); + +/** + * util_is_scan_completed() - function to get scan complete status + * @event: scan event + * @success: true if scan complete success, false otherwise + * + * API, function to get the scan result + * + * Return: true if scan complete, false otherwise + */ +bool util_is_scan_completed(struct scan_event *event, bool *success); + +/** + * util_scan_entry_extenderie() - function to read extender IE + * @scan_entry: scan entry + * + * API, function to read extender IE + * + * Return: extenderie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_extenderie(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.extender; +} + +/** + * util_scan_entry_mdie() - function to read Mobility Domain IE + * @scan_entry: scan entry + * + * API, function to read Mobility Domain IE + * + * Return: MDIE or NULL if IE is not present + */ +static inline uint8_t* +util_scan_entry_mdie(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.mdie; +} + +/** + * util_scan_is_null_ssid() - to check for NULL ssid + * @ssid: ssid + * + * Return: true if NULL ssid else false + */ +static inline bool util_scan_is_null_ssid(struct wlan_ssid *ssid) +{ + uint32_t ssid_length; + uint8_t *ssid_str; + + if (ssid->length == 0) + return true; + + /* Consider 0 or space for hidden SSID */ + if (0 == ssid->ssid[0]) + return true; + + ssid_length = ssid->length; + ssid_str = ssid->ssid; + + while (ssid_length) { + if (*ssid_str != ASCII_SPACE_CHARACTER && + *ssid_str) + break; + ssid_str++; + ssid_length--; + } + + if (ssid_length == 0) + return true; + + return false; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_extscan_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_extscan_api.c new file mode 100644 index 0000000000000000000000000000000000000000..9bfacca33021ec044e788f197a97a5f1fcdf3060 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_extscan_api.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: This file contains all EXTSSCAN component's APIs + */ + +#include "wlan_extscan_api.h" +#include "cfg_ucfg_api.h" + +bool extscan_get_enable(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return false; + } + + return scan_obj->extscan_cfg.extscan_enabled; +} + +void extscan_get_passive_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *passive_max_chn_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return; + } + + *passive_max_chn_time = + scan_obj->extscan_cfg.extscan_passive_max_chn_time; +} + +void extscan_get_active_max_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_max_chn_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return; + } + + *active_max_chn_time = + scan_obj->extscan_cfg.extscan_active_max_chn_time; +} + +void extscan_get_active_min_time(struct wlan_objmgr_psoc *psoc, + uint32_t *active_min_chn_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return; + } + + *active_min_chn_time = + scan_obj->extscan_cfg.extscan_active_min_chn_time; +} + +QDF_STATUS +wlan_extscan_global_init(struct wlan_objmgr_psoc *psoc, + struct wlan_scan_obj *scan_obj) +{ + struct extscan_def_config *extscan_def = &scan_obj->extscan_cfg; + + extscan_def->extscan_enabled = true; + extscan_def->extscan_passive_max_chn_time = + cfg_get(psoc, CFG_EXTSCAN_PASSIVE_MAX_CHANNEL_TIME); + extscan_def->extscan_passive_min_chn_time = + cfg_get(psoc, CFG_EXTSCAN_PASSIVE_MIN_CHANNEL_TIME); + extscan_def->extscan_active_max_chn_time = + cfg_get(psoc, CFG_EXTSCAN_ACTIVE_MAX_CHANNEL_TIME); + extscan_def->extscan_active_min_chn_time = + cfg_get(psoc, CFG_EXTSCAN_ACTIVE_MIN_CHANNEL_TIME); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_extscan_global_deinit() +{ + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_api.c new file mode 100644 index 0000000000000000000000000000000000000000..01ddfaa3f9708c413ae29d46a9c346301e9dc67c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_api.c @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: This file contains all SCAN component's APIs + */ + +#include "cfg_ucfg_api.h" +#include "wlan_scan_api.h" + +void wlan_scan_cfg_get_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + *dwell_time = scan_obj->scan_def.passive_dwell; +} + +void wlan_scan_cfg_set_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + scan_obj->scan_def.passive_dwell = dwell_time; +} + +void wlan_scan_cfg_get_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + *dwell_time = scan_obj->scan_def.active_dwell; +} + +void wlan_scan_cfg_set_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + scan_obj->scan_def.active_dwell = dwell_time; +} + +void wlan_scan_cfg_get_active_2g_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + *dwell_time = scan_obj->scan_def.active_dwell_2g; +} + +void wlan_scan_cfg_set_active_2g_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + scan_obj->scan_def.active_dwell_2g = dwell_time; +} + +void wlan_scan_cfg_get_conc_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + *dwell_time = scan_obj->scan_def.conc_active_dwell; +} + +void wlan_scan_cfg_set_conc_active_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + scan_obj->scan_def.conc_active_dwell = dwell_time; +} + +void wlan_scan_cfg_get_conc_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t *dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + *dwell_time = scan_obj->scan_def.conc_passive_dwell; +} + +void wlan_scan_cfg_set_conc_passive_dwelltime(struct wlan_objmgr_psoc *psoc, + uint32_t dwell_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + scan_obj->scan_def.conc_passive_dwell = dwell_time; +} + +void +wlan_scan_cfg_get_dfs_chan_scan_allowed(struct wlan_objmgr_psoc *psoc, + bool *enable_dfs_scan) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + *enable_dfs_scan = scan_obj->scan_def.allow_dfs_chan_in_scan; +} + +void +wlan_scan_cfg_set_dfs_chan_scan_allowed(struct wlan_objmgr_psoc *psoc, + bool enable_dfs_scan) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + scan_obj->scan_def.allow_dfs_chan_in_scan = enable_dfs_scan; +} + +bool wlan_scan_cfg_honour_nl_scan_policy_flags(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + return scan_obj->scan_def.honour_nl_scan_policy_flags; +} + +void wlan_scan_cfg_get_conc_max_resttime(struct wlan_objmgr_psoc *psoc, + uint32_t *rest_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + *rest_time = scan_obj->scan_def.conc_max_rest_time; +} + +void wlan_scan_cfg_get_conc_min_resttime(struct wlan_objmgr_psoc *psoc, + uint32_t *rest_time) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return; + + *rest_time = scan_obj->scan_def.conc_min_rest_time; +} + +bool wlan_scan_is_snr_monitor_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return cfg_default(CFG_ENABLE_SNR_MONITORING); + + return scan_obj->scan_def.scan_f_chan_stat_evnt; +} + +QDF_STATUS +wlan_scan_process_bcn_probe_rx_sync(struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *rx_param, + enum mgmt_frame_type frm_type) +{ + struct scan_bcn_probe_event *bcn = NULL; + QDF_STATUS status; + + if ((frm_type != MGMT_PROBE_RESP) && + (frm_type != MGMT_BEACON)) { + scm_err("frame is not beacon or probe resp"); + status = QDF_STATUS_E_INVAL; + goto free; + } + + bcn = qdf_mem_malloc_atomic(sizeof(*bcn)); + if (!bcn) { + status = QDF_STATUS_E_NOMEM; + goto free; + } + bcn->rx_data = + qdf_mem_malloc_atomic(sizeof(*rx_param)); + if (!bcn->rx_data) { + status = QDF_STATUS_E_NOMEM; + goto free; + } + + if (frm_type == MGMT_PROBE_RESP) + bcn->frm_type = MGMT_SUBTYPE_PROBE_RESP; + else + bcn->frm_type = MGMT_SUBTYPE_BEACON; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("unable to get reference"); + goto free; + } + + bcn->psoc = psoc; + bcn->buf = buf; + qdf_mem_copy(bcn->rx_data, rx_param, sizeof(*rx_param)); + + return __scm_handle_bcn_probe(bcn); +free: + if (bcn && bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn) + qdf_mem_free(bcn); + if (buf) + qdf_nbuf_free(buf); + + return status; +} + +qdf_time_t wlan_scan_get_aging_time(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return cfg_default(CFG_SCAN_AGING_TIME) * 1000; + + return scan_obj->scan_def.scan_cache_aging_time; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8f098f6975828241e384750004d7428024abcc70 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_tgt_api.c @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan south bound interface definitions + */ + +#include +#include +#include "../../core/src/wlan_scan_main.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include <../../core/src/wlan_scan_manager.h> + +static inline struct wlan_lmac_if_scan_tx_ops * +wlan_psoc_get_scan_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.scan)); +} + +static inline struct wlan_lmac_if_scan_tx_ops * +wlan_vdev_get_scan_txops(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + scm_err("NULL psoc"); + return NULL; + } + + return wlan_psoc_get_scan_txops(psoc); +} + +static inline struct wlan_lmac_if_scan_rx_ops * +wlan_vdev_get_scan_rxops(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + scm_err("NULL psoc"); + return NULL; + } + + return &((psoc->soc_cb.rx_ops.scan)); +} + +#ifdef FEATURE_WLAN_SCAN_PNO + +QDF_STATUS tgt_scan_pno_start(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) { + scm_err("NULL PSOC"); + return QDF_STATUS_E_FAILURE; + } + scan_ops = wlan_psoc_get_scan_txops(psoc); + if (!scan_ops) { + scm_err("NULL scan_ops"); + return QDF_STATUS_E_FAILURE; + } + /* invoke wmi_unified_pno_start_cmd() */ + QDF_ASSERT(scan_ops->pno_start); + if (scan_ops->pno_start) + return scan_ops->pno_start(psoc, req); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_scan_pno_stop(struct wlan_objmgr_vdev *vdev, + uint8_t vdev_id) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) { + scm_err("NULL PSOC"); + return QDF_STATUS_E_FAILURE; + } + scan_ops = wlan_psoc_get_scan_txops(psoc); + if (!scan_ops) { + scm_err("NULL scan_ops"); + return QDF_STATUS_E_FAILURE; + } + /* invoke wmi_unified_pno_stop_cmd() */ + QDF_ASSERT(scan_ops->pno_stop); + if (scan_ops->pno_stop) + return scan_ops->pno_stop(psoc, vdev_id); + + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS +tgt_scan_start(struct scan_start_request *req) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev = req->vdev; + + if (!vdev) { + scm_err("vdev is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + pdev = wlan_vdev_get_pdev(vdev); + if (!psoc || !pdev) { + scm_err("psoc: 0x%pK or pdev: 0x%pK is NULL", psoc, pdev); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_scan_start_cmd_send() */ + QDF_ASSERT(scan_ops->scan_start); + if (scan_ops->scan_start) + return scan_ops->scan_start(pdev, req); + else + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS +tgt_scan_cancel(struct scan_cancel_request *req) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev = req->vdev; + + if (!vdev) { + scm_err("vdev is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + psoc = wlan_vdev_get_psoc(vdev); + pdev = wlan_vdev_get_pdev(vdev); + if (!psoc || !pdev) { + scm_err("psoc: 0x%pK or pdev: 0x%pK is NULL", psoc, pdev); + return QDF_STATUS_E_NULL_VALUE; + } + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_scan_stop_cmd_send() */ + QDF_ASSERT(scan_ops->scan_cancel); + if (scan_ops->scan_cancel) + return scan_ops->scan_cancel(pdev, &req->cancel_req); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_scan_register_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops = NULL; + + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_register_event_handler() + * since event id, handler function and context is + * already known to offload lmac, passing NULL as argument. + * DA can pass necessary arguments by clubing then into + * some structure. + */ + QDF_ASSERT(scan_ops->scan_reg_ev_handler); + if (scan_ops->scan_reg_ev_handler) + return scan_ops->scan_reg_ev_handler(psoc, NULL); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_scan_unregister_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops = NULL; + + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_register_event_handler() + * since event id, handler function and context is + * already known to offload lmac, passing NULL as argument. + * DA can pass necessary arguments by clubing then into + * some structure. + */ + QDF_ASSERT(scan_ops->scan_unreg_ev_handler); + if (scan_ops->scan_unreg_ev_handler) + return scan_ops->scan_unreg_ev_handler(psoc, NULL); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_scan_event_handler(struct wlan_objmgr_psoc *psoc, + struct scan_event_info *event_info) +{ + struct scheduler_msg msg = {0}; + struct scan_event *event = &event_info->event; + uint8_t vdev_id = event->vdev_id; + QDF_STATUS status; + + if (!psoc || !event_info) { + scm_err("psoc: 0x%pK, event_info: 0x%pK", psoc, event_info); + return QDF_STATUS_E_NULL_VALUE; + } + + event_info->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, WLAN_SCAN_ID); + if (!event_info->vdev) { + scm_err("null vdev, vdev_id: %d, psoc: 0x%pK", vdev_id, psoc); + return QDF_STATUS_E_INVAL; + } + msg.bodyptr = event_info; + msg.callback = scm_scan_event_handler; + msg.flush_callback = scm_scan_event_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_vdev_release_ref(event_info->vdev, WLAN_SCAN_ID); + } + + return status; +} + +QDF_STATUS tgt_scan_bcn_probe_rx_callback(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *rx_param, + enum mgmt_frame_type frm_type) +{ + struct scheduler_msg msg = {0}; + struct scan_bcn_probe_event *bcn = NULL; + QDF_STATUS status; + uint32_t scan_queue_size = 0; + + if ((frm_type != MGMT_PROBE_RESP) && + (frm_type != MGMT_BEACON)) { + scm_err("frame is not beacon or probe resp"); + status = QDF_STATUS_E_INVAL; + goto free; + } + + bcn = qdf_mem_malloc_atomic(sizeof(*bcn)); + if (!bcn) { + status = QDF_STATUS_E_NOMEM; + goto free; + } + bcn->rx_data = + qdf_mem_malloc_atomic(sizeof(*rx_param)); + if (!bcn->rx_data) { + status = QDF_STATUS_E_NOMEM; + goto free; + } + + if (frm_type == MGMT_PROBE_RESP) + bcn->frm_type = MGMT_SUBTYPE_PROBE_RESP; + else + bcn->frm_type = MGMT_SUBTYPE_BEACON; + + /* Check if the beacon/probe frame can be posted in the scan queue */ + status = scheduler_get_queue_size(QDF_MODULE_ID_SCAN, &scan_queue_size); + if (!QDF_IS_STATUS_SUCCESS(status) || + scan_queue_size > MAX_BCN_PROBE_IN_SCAN_QUEUE) { + scm_debug_rl("Dropping beacon/probe frame, queue size %d", + scan_queue_size); + status = QDF_STATUS_E_FAILURE; + goto free; + } + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("unable to get reference"); + goto free; + } + + bcn->psoc = psoc; + bcn->buf = buf; + qdf_mem_copy(bcn->rx_data, rx_param, sizeof(*rx_param)); + + msg.bodyptr = bcn; + msg.callback = scm_handle_bcn_probe; + msg.flush_callback = scm_bcn_probe_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, &msg); + + if (QDF_IS_STATUS_SUCCESS(status)) + return status; + + wlan_objmgr_psoc_release_ref(psoc, WLAN_SCAN_ID); + +free: + if (bcn && bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn) + qdf_mem_free(bcn); + if (buf) + qdf_nbuf_free(buf); + + return status; +} + +QDF_STATUS +tgt_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans) +{ + struct scan_default_params *scan_params = NULL; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_params = wlan_scan_psoc_get_def_params(psoc); + if (!scan_params) { + scm_err("wlan_scan_psoc_get_def_params returned NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_params->max_active_scans_allowed = max_active_scans; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..b77e7fc3939e59f80e01fcc020babe1ad20f3e2a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c @@ -0,0 +1,2070 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan north bound interface definitions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../core/src/wlan_scan_main.h" +#include "../../core/src/wlan_scan_manager.h" +#include "../../core/src/wlan_scan_cache_db.h" +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include +#endif +#ifdef WLAN_POLICY_MGR_ENABLE +#include +#include +#endif +#include "cfg_ucfg_api.h" +#include "wlan_extscan_api.h" + +QDF_STATUS ucfg_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type) +{ + return scm_scan_register_bcn_cb(psoc, cb, type); +} + +qdf_list_t *ucfg_scan_get_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + return scm_get_scan_result(pdev, filter); +} + +QDF_STATUS ucfg_scan_db_iterate(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg) +{ + return scm_iterate_scan_db(pdev, func, arg); +} + +QDF_STATUS ucfg_scan_purge_results(qdf_list_t *scan_list) +{ + return scm_purge_scan_results(scan_list); +} + +QDF_STATUS ucfg_scan_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + return scm_flush_results(pdev, filter); +} + +void ucfg_scan_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint32_t *chan_freq_list, uint32_t num_chan) +{ + scm_filter_valid_channel(pdev, chan_freq_list, num_chan); +} + +QDF_STATUS ucfg_scan_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_created_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to register psoc create handler"); + goto fail_create_psoc; + } + + status = wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_destroyed_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to create psoc delete handler"); + goto fail_psoc_destroy; + } + scm_debug("scan psoc create and delete handler registered with objmgr"); + + status = wlan_objmgr_register_vdev_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_created_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to register vdev create handler"); + goto fail_pdev_create; + } + + status = wlan_objmgr_register_vdev_destroy_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_destroyed_notification, NULL); + if (QDF_IS_STATUS_SUCCESS(status)) { + scm_debug("scan vdev create and delete handler registered with objmgr"); + return QDF_STATUS_SUCCESS; + } + + scm_err("Failed to destroy vdev delete handler"); + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_created_notification, NULL); +fail_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_destroyed_notification, NULL); +fail_psoc_destroy: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_created_notification, NULL); +fail_create_psoc: + return status; +} + +QDF_STATUS ucfg_scan_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister psoc create handler"); + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister psoc delete handler"); + + status = wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister vdev create handler"); + + status = wlan_objmgr_unregister_vdev_destroy_handler( + WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister vdev delete handler"); + + return status; +} + +#ifdef FEATURE_WLAN_SCAN_PNO + +QDF_STATUS ucfg_scan_pno_start(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req) +{ + struct scan_vdev_obj *scan_vdev_obj; + QDF_STATUS status; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return QDF_STATUS_E_INVAL; + } + if (scan_vdev_obj->pno_in_progress) { + scm_err("pno already in progress"); + return QDF_STATUS_E_ALREADY; + } + + status = tgt_scan_pno_start(vdev, req); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("pno start failed"); + else + scan_vdev_obj->pno_in_progress = true; + + return status; +} + +QDF_STATUS ucfg_scan_pno_stop(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + QDF_STATUS status; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return QDF_STATUS_E_INVAL; + } + if (!scan_vdev_obj->pno_in_progress) { + scm_debug("pno already stopped"); + return QDF_STATUS_SUCCESS; + } + + status = tgt_scan_pno_stop(vdev, wlan_vdev_get_id(vdev)); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("pno stop failed"); + else + scan_vdev_obj->pno_in_progress = false; + + return status; +} + +bool ucfg_scan_get_pno_in_progress(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return false; + } + + return scan_vdev_obj->pno_in_progress; +} + +bool ucfg_scan_get_pno_match(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return false; + } + + return scan_vdev_obj->pno_match_evt_received; +} + +static QDF_STATUS +wlan_pno_global_init(struct wlan_objmgr_psoc *psoc, + struct wlan_scan_obj *scan_obj) +{ + struct nlo_mawc_params *mawc_cfg; + struct pno_def_config *pno_def; + + pno_def = &scan_obj->pno_cfg; + qdf_wake_lock_create(&pno_def->pno_wake_lock, "wlan_pno_wl"); + mawc_cfg = &pno_def->mawc_params; + pno_def->channel_prediction = cfg_get(psoc, CFG_PNO_CHANNEL_PREDICTION); + pno_def->top_k_num_of_channels = + cfg_get(psoc, CFG_TOP_K_NUM_OF_CHANNELS); + pno_def->stationary_thresh = cfg_get(psoc, CFG_STATIONARY_THRESHOLD); + pno_def->channel_prediction_full_scan = + cfg_get(psoc, CFG_CHANNEL_PREDICTION_SCAN_TIMER); + pno_def->adaptive_dwell_mode = + cfg_get(psoc, CFG_ADAPTIVE_PNOSCAN_DWELL_MODE); + pno_def->dfs_chnl_scan_enabled = + cfg_get(psoc, CFG_ENABLE_DFS_PNO_CHNL_SCAN); + pno_def->scan_support_enabled = + cfg_get(psoc, CFG_PNO_SCAN_SUPPORT); + pno_def->scan_timer_repeat_value = + cfg_get(psoc, CFG_PNO_SCAN_TIMER_REPEAT_VALUE); + pno_def->slow_scan_multiplier = + cfg_get(psoc, CFG_PNO_SLOW_SCAN_MULTIPLIER); + pno_def->scan_backoff_multiplier = + cfg_get(psoc, CFG_SCAN_BACKOFF_MULTIPLIER); + pno_def->max_sched_scan_plan_interval = + cfg_get(psoc, CFG_MAX_SCHED_SCAN_PLAN_INTERVAL); + pno_def->max_sched_scan_plan_iterations = + cfg_get(psoc, CFG_MAX_SCHED_SCAN_PLAN_ITERATIONS); + pno_def->user_config_sched_scan_plan = + cfg_get(psoc, CFG_USER_CONFIG_SCHED_SCAN_PLAN); + + mawc_cfg->enable = cfg_get(psoc, CFG_MAWC_NLO_ENABLED); + mawc_cfg->exp_backoff_ratio = + cfg_get(psoc, CFG_MAWC_NLO_EXP_BACKOFF_RATIO); + mawc_cfg->init_scan_interval = + cfg_get(psoc, CFG_MAWC_NLO_INIT_SCAN_INTERVAL); + mawc_cfg->max_scan_interval = + cfg_get(psoc, CFG_MAWC_NLO_MAX_SCAN_INTERVAL); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_pno_global_deinit(struct wlan_scan_obj *scan_obj) +{ + qdf_wake_lock_destroy(&scan_obj->pno_cfg.pno_wake_lock); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_POLICY_MGR_ENABLE +/* + * ucfg_scan_update_pno_dwell_time() - update active and passive dwell time + * depending on active concurrency modes + * @vdev: vdev object pointer + * @req: scan request + * + * Return: void + */ +static void ucfg_scan_update_pno_dwell_time(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req, struct scan_default_params *scan_def) +{ + bool sap_or_p2p_present; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) + return; + + sap_or_p2p_present = policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL) || + policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL) || + policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_CLIENT_MODE, NULL); + + if (sap_or_p2p_present) { + req->active_dwell_time = scan_def->conc_active_dwell; + req->passive_dwell_time = scan_def->conc_passive_dwell; + } + +} +#else +static inline void ucfg_scan_update_pno_dwell_time(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req, struct scan_default_params *scan_def){} +#endif + +QDF_STATUS +ucfg_scan_get_pno_def_params(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req) +{ + struct scan_default_params *scan_def; + struct wlan_scan_obj *scan; + struct pno_def_config *pno_def; + + if (!vdev || !req) { + scm_err("vdev: 0x%pK, req: 0x%pK", + vdev, req); + return QDF_STATUS_E_INVAL; + } + + scan = wlan_vdev_get_scan_obj(vdev); + if (!scan) { + scm_err("scan is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_def = wlan_vdev_get_def_scan_params(vdev); + if (!scan_def) { + scm_err("wlan_vdev_get_def_scan_params returned NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + pno_def = &scan->pno_cfg; + req->active_dwell_time = scan_def->active_dwell; + req->passive_dwell_time = scan_def->passive_dwell; + req->scan_random.randomize = scan_def->enable_mac_spoofing; + + /* + * Update active and passive dwell time depending + * upon the present active concurrency mode + */ + ucfg_scan_update_pno_dwell_time(vdev, req, scan_def); + req->adaptive_dwell_mode = pno_def->adaptive_dwell_mode; + req->pno_channel_prediction = pno_def->channel_prediction; + req->top_k_num_of_channels = pno_def->top_k_num_of_channels; + req->stationary_thresh = pno_def->stationary_thresh; + req->channel_prediction_full_scan = + pno_def->channel_prediction_full_scan; + req->mawc_params.vdev_id = wlan_vdev_get_id(vdev); + qdf_mem_copy(&req->mawc_params, &pno_def->mawc_params, + sizeof(req->mawc_params)); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_register_pno_cb(struct wlan_objmgr_psoc *psoc, + scan_event_handler event_cb, void *arg) +{ + struct wlan_scan_obj *scan; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + scan = wlan_psoc_get_scan_obj(psoc); + if (!scan) { + scm_err("scan object null"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_bh(&scan->lock); + scan->pno_cfg.pno_cb.func = event_cb; + scan->pno_cfg.pno_cb.arg = arg; + qdf_spin_unlock_bh(&scan->lock); + scm_debug("event_cb: 0x%pK, arg: 0x%pK", event_cb, arg); + + return QDF_STATUS_SUCCESS; +} + +#else + +static inline QDF_STATUS +wlan_pno_global_init(struct wlan_objmgr_psoc *psoc, + struct wlan_scan_obj *scan_obj) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +wlan_pno_global_deinit(struct wlan_scan_obj *scan_obj) +{ + return QDF_STATUS_SUCCESS; +} + +#endif + +QDF_STATUS +ucfg_scan_set_custom_scan_chan_list(struct wlan_objmgr_pdev *pdev, + struct chan_list *chan_list) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev || !chan_list) { + scm_warn("pdev: 0x%pK, chan_list: 0x%pK", pdev, chan_list); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + + qdf_mem_copy(&scan_obj->pdev_info[pdev_id].custom_chan_list, + chan_list, sizeof(*chan_list)); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scm_scan_free_scan_request_mem(struct scan_start_request *req) +{ + return scm_scan_free_scan_request_mem(req); +} + +QDF_STATUS +ucfg_scan_start(struct scan_start_request *req) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + if (!req || !req->vdev) { + scm_err("req or vdev within req is NULL"); + if (req) + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!scm_is_scan_allowed(req->vdev)) { + scm_err_rl("scan disabled, rejecting the scan req"); + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_AGAIN; + } + + /* Try to get vdev reference. Return if reference could + * not be taken. Reference will be released once scan + * request handling completes along with free of @req. + */ + status = wlan_objmgr_vdev_try_get_ref(req->vdev, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("unable to get reference"); + scm_scan_free_scan_request_mem(req); + return status; + } + + msg.bodyptr = req; + msg.callback = scm_scan_start_req; + msg.flush_callback = scm_scan_start_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_OS_IF, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + scm_scan_free_scan_request_mem(req); + } + + return status; +} + +QDF_STATUS ucfg_scan_psoc_set_enable(struct wlan_objmgr_psoc *psoc, + enum scan_disable_reason reason) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_obj->scan_disabled &= ~reason; + scm_debug("Psoc scan_disabled %x", scan_obj->scan_disabled); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_scan_psoc_set_disable(struct wlan_objmgr_psoc *psoc, + enum scan_disable_reason reason) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_obj->scan_disabled |= reason; + + scm_debug("Psoc scan_disabled %x", scan_obj->scan_disabled); + + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS ucfg_scan_vdev_set_enable(struct wlan_objmgr_vdev *vdev, + enum scan_disable_reason reason) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_vdev_obj->scan_disabled &= ~reason; + + scm_debug("Vdev scan_disabled %x", scan_vdev_obj->scan_disabled); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_scan_vdev_set_disable(struct wlan_objmgr_vdev *vdev, + enum scan_disable_reason reason) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_vdev_obj->scan_disabled |= reason; + + scm_debug("Vdev scan_disabled %x", scan_vdev_obj->scan_disabled); + + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS ucfg_scan_set_miracast( + struct wlan_objmgr_psoc *psoc, bool enable) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_NULL_VALUE; + } + scan_obj->miracast_enabled = enable; + scm_debug("set miracast_enable to %d", scan_obj->miracast_enabled); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_set_wide_band_scan(struct wlan_objmgr_pdev *pdev, bool enable) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev) { + scm_warn("null vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) + return QDF_STATUS_E_FAILURE; + + scm_debug("set wide_band_scan to %d", enable); + scan_obj->pdev_info[pdev_id].wide_band_scan = enable; + + return QDF_STATUS_SUCCESS; +} + +bool ucfg_scan_get_wide_band_scan(struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev) { + scm_warn("null vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) + return QDF_STATUS_E_FAILURE; + + return scan_obj->pdev_info[pdev_id].wide_band_scan; +} + +#ifdef WLAN_DFS_CHAN_HIDDEN_SSID +QDF_STATUS +ucfg_scan_config_hidden_ssid_for_bssid(struct wlan_objmgr_pdev *pdev, + uint8_t *bssid, struct wlan_ssid *ssid) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev) { + scm_warn("null vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) + return QDF_STATUS_E_FAILURE; + + scm_debug("Configure bsssid:"QDF_MAC_ADDR_FMT" ssid:%.*s", + QDF_MAC_ADDR_REF(bssid), ssid->length, ssid->ssid); + qdf_mem_copy(scan_obj->pdev_info[pdev_id].conf_bssid, + bssid, QDF_MAC_ADDR_SIZE); + scan_obj->pdev_info[pdev_id].conf_ssid.length = ssid->length; + qdf_mem_copy(scan_obj->pdev_info[pdev_id].conf_ssid.ssid, + ssid->ssid, + scan_obj->pdev_info[pdev_id].conf_ssid.length); + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_DFS_CHAN_HIDDEN_SSID */ + +QDF_STATUS +ucfg_scan_cancel(struct scan_cancel_request *req) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + if (!req || !req->vdev) { + scm_err("req or vdev within req is NULL"); + if (req) + qdf_mem_free(req); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wlan_objmgr_vdev_try_get_ref(req->vdev, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("Failed to get vdev ref; status:%d", status); + goto req_free; + } + + msg.bodyptr = req; + msg.callback = scm_scan_cancel_req; + msg.flush_callback = scm_scan_cancel_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_OS_IF, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) + goto vdev_put; + + return QDF_STATUS_SUCCESS; + +vdev_put: + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + +req_free: + qdf_mem_free(req); + + return status; +} + +QDF_STATUS +ucfg_scan_cancel_sync(struct scan_cancel_request *req) +{ + QDF_STATUS status; + bool cancel_vdev = false, cancel_pdev = false; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + uint32_t max_wait_iterations = SCM_CANCEL_SCAN_WAIT_ITERATION; + qdf_event_t cancel_scan_event; + + if (!req || !req->vdev) { + scm_err("req or vdev within req is NULL"); + if (req) + qdf_mem_free(req); + return QDF_STATUS_E_NULL_VALUE; + } + + if (req->cancel_req.req_type == WLAN_SCAN_CANCEL_PDEV_ALL) + cancel_pdev = true; + else if (req->cancel_req.req_type == WLAN_SCAN_CANCEL_VDEV_ALL || + req->cancel_req.req_type == WLAN_SCAN_CANCEL_HOST_VDEV_ALL) + cancel_vdev = true; + + vdev = req->vdev; + status = ucfg_scan_cancel(req); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + memset(&cancel_scan_event, 0, sizeof(cancel_scan_event)); + /* + * If cancel req is to cancel all scan of pdev or vdev + * wait until all scan of pdev or vdev get cancelled + */ + qdf_event_create(&cancel_scan_event); + qdf_event_reset(&cancel_scan_event); + + if (cancel_pdev) { + pdev = wlan_vdev_get_pdev(vdev); + while ((ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) && max_wait_iterations) { + scm_debug("wait for all pdev scan to get complete"); + qdf_wait_single_event(&cancel_scan_event, + SCM_CANCEL_SCAN_WAIT_TIME); + max_wait_iterations--; + } + } else if (cancel_vdev) { + while ((ucfg_scan_get_vdev_status(vdev) != + SCAN_NOT_IN_PROGRESS) && max_wait_iterations) { + scm_debug("wait for all vdev scan to get complete"); + qdf_wait_single_event(&cancel_scan_event, + SCM_CANCEL_SCAN_WAIT_TIME); + max_wait_iterations--; + } + } + + qdf_event_destroy(&cancel_scan_event); + + if (!max_wait_iterations) { + scm_err("Failed to wait for scans to get complete"); + return QDF_STATUS_E_TIMEOUT; + } + + return status; +} + +wlan_scan_requester +ucfg_scan_register_requester(struct wlan_objmgr_psoc *psoc, + uint8_t *name, scan_event_handler event_cb, void *arg) +{ + int i, j; + struct wlan_scan_obj *scan; + struct scan_requester_info *requesters; + wlan_scan_requester requester = {0}; + + if (!psoc) { + scm_err("null psoc"); + return 0; + } + scan = wlan_psoc_get_scan_obj(psoc); + if (!scan) + return 0; + + requesters = scan->requesters; + qdf_spin_lock_bh(&scan->lock); + for (i = 0; i < WLAN_MAX_REQUESTORS; ++i) { + if (requesters[i].requester == 0) { + requesters[i].requester = + WLAN_SCAN_REQUESTER_ID_PREFIX | i; + j = 0; + while (name[j] && (j < (WLAN_MAX_MODULE_NAME - 1))) { + requesters[i].module[j] = name[j]; + ++j; + } + requesters[i].module[j] = 0; + requesters[i].ev_handler.func = event_cb; + requesters[i].ev_handler.arg = arg; + requester = requesters[i].requester; + break; + } + } + qdf_spin_unlock_bh(&scan->lock); + scm_debug("module: %s, event_cb: 0x%pK, arg: 0x%pK, reqid: %d", + name, event_cb, arg, requester); + + return requester; +} + +void +ucfg_scan_unregister_requester(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester) +{ + int idx; + struct wlan_scan_obj *scan; + struct scan_requester_info *requesters; + + idx = requester & WLAN_SCAN_REQUESTER_ID_PREFIX; + if (idx != WLAN_SCAN_REQUESTER_ID_PREFIX) { + scm_err("prefix didn't match for requester id %d", requester); + return; + } + + idx = requester & WLAN_SCAN_REQUESTER_ID_MASK; + if (idx >= WLAN_MAX_REQUESTORS) { + scm_err("requester id %d greater than max value", requester); + return; + } + + if (!psoc) { + scm_err("null psoc"); + return; + } + scan = wlan_psoc_get_scan_obj(psoc); + if (!scan) + return; + requesters = scan->requesters; + scm_debug("reqid: %d", requester); + + qdf_spin_lock_bh(&scan->lock); + requesters[idx].requester = 0; + requesters[idx].module[0] = 0; + requesters[idx].ev_handler.func = NULL; + requesters[idx].ev_handler.arg = NULL; + qdf_spin_unlock_bh(&scan->lock); +} + +uint8_t* +ucfg_get_scan_requester_name(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester) +{ + int idx = requester & WLAN_SCAN_REQUESTER_ID_MASK; + struct wlan_scan_obj *scan; + struct scan_requester_info *requesters; + + if (!psoc) { + scm_err("null psoc"); + return "null"; + } + scan = wlan_psoc_get_scan_obj(psoc); + if (!scan) + return "null"; + + requesters = scan->requesters; + + if ((idx < WLAN_MAX_REQUESTORS) && + (requesters[idx].requester == requester)) { + return requesters[idx].module; + } + + return (uint8_t *)"unknown"; +} + +wlan_scan_id +ucfg_scan_get_scan_id(struct wlan_objmgr_psoc *psoc) +{ + wlan_scan_id id; + struct wlan_scan_obj *scan; + + if (!psoc) { + QDF_ASSERT(0); + scm_err("null psoc"); + return 0; + } + + scan = wlan_psoc_get_scan_obj(psoc); + if (!scan) { + scm_err("scan object null"); + return 0; + } + + id = qdf_atomic_inc_return(&scan->scan_ids); + id = id & WLAN_SCAN_ID_MASK; + /* Mark this scan request as triggered by host + * by setting WLAN_HOST_SCAN_REQ_ID_PREFIX flag. + */ + id = id | WLAN_HOST_SCAN_REQ_ID_PREFIX; + scm_debug("scan_id: 0x%x", id); + + return id; +} + +static QDF_STATUS +scm_add_scan_event_handler(struct pdev_scan_ev_handler *pdev_ev_handler, + scan_event_handler event_cb, void *arg) +{ + struct cb_handler *cb_handler; + uint32_t handler_cnt = pdev_ev_handler->handler_cnt; + + /* Assign next available slot to this registration request */ + cb_handler = &(pdev_ev_handler->cb_handlers[handler_cnt]); + cb_handler->func = event_cb; + cb_handler->arg = arg; + pdev_ev_handler->handler_cnt++; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_register_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg) +{ + uint32_t idx; + struct wlan_scan_obj *scan; + struct pdev_scan_ev_handler *pdev_ev_handler; + struct cb_handler *cb_handler; + + /* scan event handler call back can't be NULL */ + if (!pdev || !event_cb) { + scm_err("pdev: %pK, event_cb: %pK", pdev, event_cb); + return QDF_STATUS_E_NULL_VALUE; + } + + scm_debug("pdev: %pK, event_cb: %pK, arg: %pK\n", pdev, event_cb, arg); + + scan = wlan_pdev_get_scan_obj(pdev); + pdev_ev_handler = wlan_pdev_get_pdev_scan_ev_handlers(pdev); + if (!pdev_ev_handler) { + scm_err("null pdev_ev_handler"); + return QDF_STATUS_E_NULL_VALUE; + } + cb_handler = &(pdev_ev_handler->cb_handlers[0]); + + qdf_spin_lock_bh(&scan->lock); + /* Ensure its not a duplicate registration request */ + for (idx = 0; idx < MAX_SCAN_EVENT_HANDLERS_PER_PDEV; + idx++, cb_handler++) { + if ((cb_handler->func == event_cb) && + (cb_handler->arg == arg)) { + qdf_spin_unlock_bh(&scan->lock); + scm_debug("func: %pK, arg: %pK already exists", + event_cb, arg); + return QDF_STATUS_SUCCESS; + } + } + + QDF_ASSERT(pdev_ev_handler->handler_cnt < + MAX_SCAN_EVENT_HANDLERS_PER_PDEV); + + if (pdev_ev_handler->handler_cnt >= MAX_SCAN_EVENT_HANDLERS_PER_PDEV) { + qdf_spin_unlock_bh(&scan->lock); + scm_warn("No more registrations possible"); + return QDF_STATUS_E_NOMEM; + } + + scm_add_scan_event_handler(pdev_ev_handler, event_cb, arg); + qdf_spin_unlock_bh(&scan->lock); + + scm_debug("event_cb: 0x%pK, arg: 0x%pK", event_cb, arg); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_scan_global_init(struct wlan_objmgr_psoc *psoc, + struct wlan_scan_obj *scan_obj) +{ + scan_obj->scan_disabled = 0; + scan_obj->drop_bcn_on_chan_mismatch = + cfg_get(psoc, CFG_DROP_BCN_ON_CHANNEL_MISMATCH); + scan_obj->drop_bcn_on_invalid_freq = + cfg_get(psoc, CFG_DROP_BCN_ON_INVALID_FREQ); + scan_obj->disable_timeout = false; + scan_obj->scan_def.active_dwell = + cfg_get(psoc, CFG_ACTIVE_MAX_CHANNEL_TIME); + /* the ini is disallow DFS channel scan if ini is 1, so negate that */ + scan_obj->scan_def.allow_dfs_chan_in_first_scan = + !cfg_get(psoc, CFG_INITIAL_NO_DFS_SCAN); + scan_obj->scan_def.allow_dfs_chan_in_scan = + cfg_get(psoc, CFG_ENABLE_DFS_SCAN); + scan_obj->scan_def.skip_dfs_chan_in_p2p_search = + cfg_get(psoc, CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH); + scan_obj->scan_def.use_wake_lock_in_user_scan = + cfg_get(psoc, CFG_ENABLE_WAKE_LOCK_IN_SCAN); + scan_obj->scan_def.active_dwell_2g = + cfg_get(psoc, CFG_ACTIVE_MAX_2G_CHANNEL_TIME); + scan_obj->scan_def.active_dwell_6g = + cfg_get(psoc, CFG_ACTIVE_MAX_6G_CHANNEL_TIME); + scan_obj->scan_def.passive_dwell_6g = + cfg_get(psoc, CFG_PASSIVE_MAX_6G_CHANNEL_TIME); + scan_obj->scan_def.passive_dwell = + cfg_get(psoc, CFG_PASSIVE_MAX_CHANNEL_TIME); + scan_obj->scan_def.max_rest_time = SCAN_MAX_REST_TIME; + scan_obj->scan_def.sta_miracast_mcc_rest_time = + SCAN_STA_MIRACAST_MCC_REST_TIME; + scan_obj->scan_def.min_rest_time = SCAN_MIN_REST_TIME; + scan_obj->scan_def.conc_active_dwell = + cfg_get(psoc, CFG_ACTIVE_MAX_CHANNEL_TIME_CONC); + scan_obj->scan_def.conc_passive_dwell = + cfg_get(psoc, CFG_PASSIVE_MAX_CHANNEL_TIME_CONC); + scan_obj->scan_def.conc_max_rest_time = + cfg_get(psoc, CFG_MAX_REST_TIME_CONC); + scan_obj->scan_def.conc_min_rest_time = + cfg_get(psoc, CFG_MIN_REST_TIME_CONC); + scan_obj->scan_def.conc_idle_time = + cfg_get(psoc, CFG_IDLE_TIME_CONC); + scan_obj->scan_def.repeat_probe_time = + cfg_get(psoc, CFG_SCAN_PROBE_REPEAT_TIME); + scan_obj->scan_def.probe_spacing_time = SCAN_PROBE_SPACING_TIME; + scan_obj->scan_def.probe_delay = SCAN_PROBE_DELAY; + scan_obj->scan_def.burst_duration = SCAN_BURST_DURATION; + scan_obj->scan_def.max_scan_time = SCAN_MAX_SCAN_TIME; + scan_obj->scan_def.num_probes = cfg_get(psoc, CFG_SCAN_NUM_PROBES); + scan_obj->scan_def.scan_cache_aging_time = + (cfg_get(psoc, CFG_SCAN_AGING_TIME) * 1000); + scan_obj->scan_def.max_bss_per_pdev = SCAN_MAX_BSS_PDEV; + scan_obj->scan_def.scan_priority = SCAN_PRIORITY; + scan_obj->scan_def.idle_time = SCAN_NETWORK_IDLE_TIMEOUT; + scan_obj->scan_def.adaptive_dwell_time_mode = + cfg_get(psoc, CFG_ADAPTIVE_SCAN_DWELL_MODE); + scan_obj->scan_def.adaptive_dwell_time_mode_nc = + cfg_get(psoc, CFG_ADAPTIVE_SCAN_DWELL_MODE_NC); + scan_obj->scan_def.honour_nl_scan_policy_flags = + cfg_get(psoc, CFG_HONOUR_NL_SCAN_POLICY_FLAGS); + scan_obj->scan_def.enable_mac_spoofing = + cfg_get(psoc, CFG_ENABLE_MAC_ADDR_SPOOFING); + scan_obj->scan_def.is_bssid_hint_priority = + cfg_get(psoc, CFG_IS_BSSID_HINT_PRIORITY); + scan_obj->scan_def.extscan_adaptive_dwell_mode = + cfg_get(psoc, CFG_ADAPTIVE_EXTSCAN_DWELL_MODE); + + /* init burst durations */ + scan_obj->scan_def.sta_scan_burst_duration = + cfg_get(psoc, CFG_STA_SCAN_BURST_DURATION); + scan_obj->scan_def.p2p_scan_burst_duration = + cfg_get(psoc, CFG_P2P_SCAN_BURST_DURATION); + scan_obj->scan_def.go_scan_burst_duration = + cfg_get(psoc, CFG_GO_SCAN_BURST_DURATION); + scan_obj->scan_def.ap_scan_burst_duration = + cfg_get(psoc, CFG_AP_SCAN_BURST_DURATION); + /* scan contrl flags */ + scan_obj->scan_def.scan_f_passive = true; + scan_obj->scan_def.scan_f_ofdm_rates = true; + scan_obj->scan_def.scan_f_2ghz = true; + scan_obj->scan_def.scan_f_5ghz = true; + scan_obj->scan_def.scan_f_chan_stat_evnt = + cfg_get(psoc, CFG_ENABLE_SNR_MONITORING); + /* scan event flags */ + scan_obj->scan_def.scan_ev_started = true; + scan_obj->scan_def.scan_ev_completed = true; + scan_obj->scan_def.scan_ev_bss_chan = true; + scan_obj->scan_def.scan_ev_foreign_chan = true; + scan_obj->scan_def.scan_ev_foreign_chn_exit = true; + scan_obj->scan_def.scan_ev_dequeued = true; + scan_obj->scan_def.scan_ev_preempted = true; + scan_obj->scan_def.scan_ev_start_failed = true; + scan_obj->scan_def.scan_ev_restarted = true; + scan_obj->scan_def.enable_connected_scan = + cfg_get(psoc, CFG_ENABLE_CONNECTED_SCAN); + scan_obj->scan_def.scan_mode_6g = cfg_get(psoc, CFG_6GHZ_SCAN_MODE); + scan_obj->allow_bss_with_incomplete_ie = + cfg_get(psoc, CFG_SCAN_ALLOW_BSS_WITH_CORRUPTED_IE); + /* init scan id seed */ + qdf_atomic_init(&scan_obj->scan_ids); + + /* init extscan */ + wlan_extscan_global_init(psoc, scan_obj); + + return wlan_pno_global_init(psoc, scan_obj); +} + +static void +wlan_scan_global_deinit(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + wlan_pno_global_deinit(scan_obj); + wlan_extscan_global_deinit(); +} + +static QDF_STATUS +scm_remove_scan_event_handler(struct pdev_scan_ev_handler *pdev_ev_handler, + struct cb_handler *entry) +{ + struct cb_handler *last_entry; + uint32_t handler_cnt = pdev_ev_handler->handler_cnt; + + /* Replace event handler being deleted + * with the last one in the list. + */ + last_entry = &(pdev_ev_handler->cb_handlers[handler_cnt - 1]); + entry->func = last_entry->func; + entry->arg = last_entry->arg; + + /* Clear our last entry */ + last_entry->func = NULL; + last_entry->arg = NULL; + pdev_ev_handler->handler_cnt--; + + return QDF_STATUS_SUCCESS; +} + +void +ucfg_scan_unregister_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg) +{ + uint8_t found = false; + uint32_t idx; + uint32_t handler_cnt; + struct wlan_scan_obj *scan; + struct cb_handler *cb_handler; + struct pdev_scan_ev_handler *pdev_ev_handler; + + scm_debug("pdev: %pK, event_cb: 0x%pK, arg: 0x%pK", pdev, event_cb, + arg); + if (!pdev) { + scm_err("null pdev"); + return; + } + scan = wlan_pdev_get_scan_obj(pdev); + if (!scan) + return; + + pdev_ev_handler = wlan_pdev_get_pdev_scan_ev_handlers(pdev); + if (!pdev_ev_handler) + return; + + cb_handler = &(pdev_ev_handler->cb_handlers[0]); + + qdf_spin_lock_bh(&scan->lock); + handler_cnt = pdev_ev_handler->handler_cnt; + if (!handler_cnt) { + qdf_spin_unlock_bh(&scan->lock); + scm_info("No event handlers registered"); + return; + } + + for (idx = 0; idx < MAX_SCAN_EVENT_HANDLERS_PER_PDEV; + idx++, cb_handler++) { + if ((cb_handler->func == event_cb) && + (cb_handler->arg == arg)) { + /* Event handler found, remove it + * from event handler list. + */ + found = true; + scm_remove_scan_event_handler(pdev_ev_handler, + cb_handler); + handler_cnt--; + break; + } + } + qdf_spin_unlock_bh(&scan->lock); + + scm_debug("event handler %s, remaining handlers: %d", + (found ? "removed" : "not found"), handler_cnt); +} + +QDF_STATUS +ucfg_scan_init_default_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req) +{ + struct scan_default_params *def; + + if (!vdev | !req) { + scm_err("vdev: 0x%pK, req: 0x%pK", vdev, req); + return QDF_STATUS_E_INVAL; + } + def = wlan_vdev_get_def_scan_params(vdev); + if (!def) { + scm_err("wlan_vdev_get_def_scan_params returned NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* Zero out everything and explicitly set fields as required */ + qdf_mem_zero(req, sizeof(*req)); + + req->vdev = vdev; + req->scan_req.vdev_id = wlan_vdev_get_id(vdev); + req->scan_req.scan_type = SCAN_TYPE_DEFAULT; + req->scan_req.scan_priority = def->scan_priority; + req->scan_req.dwell_time_active = def->active_dwell; + req->scan_req.dwell_time_active_2g = def->active_dwell_2g; + req->scan_req.dwell_time_active_6g = def->active_dwell_6g; + req->scan_req.dwell_time_passive_6g = def->passive_dwell_6g; + req->scan_req.dwell_time_passive = def->passive_dwell; + req->scan_req.min_rest_time = def->min_rest_time; + req->scan_req.max_rest_time = def->max_rest_time; + req->scan_req.repeat_probe_time = def->repeat_probe_time; + req->scan_req.probe_spacing_time = def->probe_spacing_time; + req->scan_req.idle_time = def->idle_time; + req->scan_req.max_scan_time = def->max_scan_time; + req->scan_req.probe_delay = def->probe_delay; + req->scan_req.burst_duration = def->burst_duration; + req->scan_req.n_probes = def->num_probes; + req->scan_req.adaptive_dwell_time_mode = + def->adaptive_dwell_time_mode; + req->scan_req.scan_flags = def->scan_flags; + req->scan_req.scan_events = def->scan_events; + req->scan_req.scan_random.randomize = def->enable_mac_spoofing; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_init_ssid_params(struct scan_start_request *req, + uint32_t num_ssid, struct wlan_ssid *ssid_list) +{ + uint32_t max_ssid = sizeof(req->scan_req.ssid) / + sizeof(req->scan_req.ssid[0]); + + if (!req) { + scm_err("null request"); + return QDF_STATUS_E_NULL_VALUE; + } + if (!num_ssid) { + /* empty channel list provided */ + req->scan_req.num_ssids = 0; + qdf_mem_zero(&req->scan_req.ssid[0], + sizeof(req->scan_req.ssid)); + return QDF_STATUS_SUCCESS; + } + if (!ssid_list) { + scm_err("null ssid_list while num_ssid: %d", num_ssid); + return QDF_STATUS_E_NULL_VALUE; + } + if (num_ssid > max_ssid) { + /* got a big list. alert and continue */ + scm_warn("overflow: received %d, max supported : %d", + num_ssid, max_ssid); + return QDF_STATUS_E_E2BIG; + } + + if (max_ssid > num_ssid) + max_ssid = num_ssid; + + req->scan_req.num_ssids = max_ssid; + qdf_mem_copy(&req->scan_req.ssid[0], ssid_list, + (req->scan_req.num_ssids * sizeof(req->scan_req.ssid[0]))); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_init_bssid_params(struct scan_start_request *req, + uint32_t num_bssid, struct qdf_mac_addr *bssid_list) +{ + uint32_t max_bssid = sizeof(req->scan_req.bssid_list) / + sizeof(req->scan_req.bssid_list[0]); + + if (!req) { + scm_err("null request"); + return QDF_STATUS_E_NULL_VALUE; + } + if (!num_bssid) { + /* empty channel list provided */ + req->scan_req.num_bssid = 0; + qdf_mem_zero(&req->scan_req.bssid_list[0], + sizeof(req->scan_req.bssid_list)); + return QDF_STATUS_SUCCESS; + } + if (!bssid_list) { + scm_err("null bssid_list while num_bssid: %d", num_bssid); + return QDF_STATUS_E_NULL_VALUE; + } + if (num_bssid > max_bssid) { + /* got a big list. alert and continue */ + scm_warn("overflow: received %d, max supported : %d", + num_bssid, max_bssid); + return QDF_STATUS_E_E2BIG; + } + + if (max_bssid > num_bssid) + max_bssid = num_bssid; + + req->scan_req.num_bssid = max_bssid; + qdf_mem_copy(&req->scan_req.bssid_list[0], bssid_list, + req->scan_req.num_bssid * sizeof(req->scan_req.bssid_list[0])); + + return QDF_STATUS_SUCCESS; +} + +/** + * is_chan_enabled_for_scan() - helper API to check if a frequency + * is allowed to scan. + * @reg_chan: regulatory_channel object + * @low_2g: lower 2.4 GHz frequency thresold + * @high_2g: upper 2.4 GHz frequency thresold + * @low_5g: lower 5 GHz frequency thresold + * @high_5g: upper 5 GHz frequency thresold + * + * Return: true if scan is allowed. false otherwise. + */ +static bool +is_chan_enabled_for_scan(struct regulatory_channel *reg_chan, + qdf_freq_t low_2g, qdf_freq_t high_2g, qdf_freq_t low_5g, + qdf_freq_t high_5g) +{ + if (reg_chan->state == CHANNEL_STATE_DISABLE) + return false; + if (reg_chan->nol_chan) + return false; + /* 2 GHz channel */ + if ((util_scan_scm_chan_to_band(reg_chan->chan_num) == + WLAN_BAND_2_4_GHZ) && + ((reg_chan->center_freq < low_2g) || + (reg_chan->center_freq > high_2g))) + return false; + else if ((util_scan_scm_chan_to_band(reg_chan->chan_num) == + WLAN_BAND_5_GHZ) && + ((reg_chan->center_freq < low_5g) || + (reg_chan->center_freq > high_5g))) + return false; + + return true; +} + +QDF_STATUS +ucfg_scan_init_chanlist_params(struct scan_start_request *req, + uint32_t num_chans, uint32_t *chan_list, uint32_t *phymode) +{ + uint32_t idx; + QDF_STATUS status; + struct regulatory_channel *reg_chan_list = NULL; + qdf_freq_t low_2g, high_2g, low_5g, high_5g; + struct wlan_objmgr_pdev *pdev = NULL; + uint32_t *scan_freqs = NULL; + uint32_t max_chans = sizeof(req->scan_req.chan_list.chan) / + sizeof(req->scan_req.chan_list.chan[0]); + if (!req) { + scm_err("null request"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (req->vdev) + pdev = wlan_vdev_get_pdev(req->vdev); + /* + * If 0 channels are provided for scan and + * wide band scan is enabled, scan all 20 mhz + * available channels. This is required as FW + * scans all channel/phy mode combinations + * provided in scan channel list if 0 chans are + * provided in scan request causing scan to take + * too much time to complete. + */ + if (pdev && !num_chans) { + reg_chan_list = qdf_mem_malloc_atomic(NUM_CHANNELS * + sizeof(struct regulatory_channel)); + if (!reg_chan_list) { + status = QDF_STATUS_E_NOMEM; + goto end; + } + scan_freqs = + qdf_mem_malloc_atomic(sizeof(uint32_t) * max_chans); + if (!scan_freqs) { + status = QDF_STATUS_E_NOMEM; + goto end; + } + status = ucfg_reg_get_current_chan_list(pdev, reg_chan_list); + if (QDF_IS_STATUS_ERROR(status)) + goto end; + + status = wlan_reg_get_freq_range(pdev, &low_2g, + &high_2g, &low_5g, &high_5g); + if (QDF_IS_STATUS_ERROR(status)) + goto end; + + for (idx = 0, num_chans = 0; + (idx < NUM_CHANNELS && num_chans < max_chans); idx++) + if (is_chan_enabled_for_scan(®_chan_list[idx], + low_2g, high_2g, low_5g, high_5g)) + scan_freqs[num_chans++] = + reg_chan_list[idx].center_freq; + + chan_list = scan_freqs; + } + + if (!num_chans) { + /* empty channel list provided */ + qdf_mem_zero(&req->scan_req.chan_list, + sizeof(req->scan_req.chan_list)); + req->scan_req.chan_list.num_chan = 0; + status = QDF_STATUS_SUCCESS; + goto end; + } + if (!chan_list) { + scm_info("null chan_list while num_chans: %d", num_chans); + status = QDF_STATUS_E_NULL_VALUE; + goto end; + } + + if (num_chans > max_chans) { + /* got a big list. alert and fail */ + scm_warn("overflow: received %d, max supported : %d", + num_chans, max_chans); + status = QDF_STATUS_E_E2BIG; + goto end; + } + + req->scan_req.chan_list.num_chan = num_chans; + for (idx = 0; idx < num_chans; idx++) { + req->scan_req.chan_list.chan[idx].freq = + (chan_list[idx] > WLAN_24_GHZ_BASE_FREQ) ? + chan_list[idx] : + wlan_reg_chan_to_freq(pdev, chan_list[idx]); + if (phymode) + req->scan_req.chan_list.chan[idx].phymode = + phymode[idx]; + else if (req->scan_req.chan_list.chan[idx].freq <= + WLAN_CHAN_15_FREQ) + req->scan_req.chan_list.chan[idx].phymode = + SCAN_PHY_MODE_11G; + else + req->scan_req.chan_list.chan[idx].phymode = + SCAN_PHY_MODE_11A; + } + +end: + if (scan_freqs) + qdf_mem_free(scan_freqs); + + if (reg_chan_list) + qdf_mem_free(reg_chan_list); + + return QDF_STATUS_SUCCESS; +} + +static inline enum scm_scan_status +get_scan_status_from_serialization_status( + enum wlan_serialization_cmd_status status) +{ + enum scm_scan_status scan_status; + + switch (status) { + case WLAN_SER_CMD_IN_PENDING_LIST: + scan_status = SCAN_IS_PENDING; + break; + case WLAN_SER_CMD_IN_ACTIVE_LIST: + scan_status = SCAN_IS_ACTIVE; + break; + case WLAN_SER_CMDS_IN_ALL_LISTS: + scan_status = SCAN_IS_ACTIVE_AND_PENDING; + break; + case WLAN_SER_CMD_NOT_FOUND: + scan_status = SCAN_NOT_IN_PROGRESS; + break; + default: + scm_warn("invalid serialization status %d", status); + QDF_ASSERT(0); + scan_status = SCAN_NOT_IN_PROGRESS; + break; + } + + return scan_status; +} + +enum scm_scan_status +ucfg_scan_get_vdev_status(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_serialization_cmd_status status; + + if (!vdev) { + scm_err("null vdev"); + return SCAN_NOT_IN_PROGRESS; + } + status = wlan_serialization_vdev_scan_status(vdev); + + return get_scan_status_from_serialization_status(status); +} + +enum scm_scan_status +ucfg_scan_get_pdev_status(struct wlan_objmgr_pdev *pdev) +{ + enum wlan_serialization_cmd_status status; + + if (!pdev) { + scm_err("null pdev"); + return SCAN_NOT_IN_PROGRESS; + } + status = wlan_serialization_pdev_scan_status(pdev); + + return get_scan_status_from_serialization_status(status); +} + +static void +ucfg_scan_register_unregister_bcn_cb(struct wlan_objmgr_psoc *psoc, + bool enable) +{ + QDF_STATUS status; + struct mgmt_txrx_mgmt_frame_cb_info cb_info[2]; + + cb_info[0].frm_type = MGMT_PROBE_RESP; + cb_info[0].mgmt_rx_cb = tgt_scan_bcn_probe_rx_callback; + cb_info[1].frm_type = MGMT_BEACON; + cb_info[1].mgmt_rx_cb = tgt_scan_bcn_probe_rx_callback; + + if (enable) + status = wlan_mgmt_txrx_register_rx_cb(psoc, + WLAN_UMAC_COMP_SCAN, cb_info, 2); + else + status = wlan_mgmt_txrx_deregister_rx_cb(psoc, + WLAN_UMAC_COMP_SCAN, cb_info, 2); + if (status != QDF_STATUS_SUCCESS) + scm_err("%s the Handle with MGMT TXRX layer has failed", + enable ? "Registering" : "Deregistering"); +} + +QDF_STATUS ucfg_scan_update_user_config(struct wlan_objmgr_psoc *psoc, + struct scan_user_cfg *scan_cfg) +{ + struct wlan_scan_obj *scan_obj; + struct scan_default_params *scan_def; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + + scan_def = &scan_obj->scan_def; + scan_obj->ie_whitelist = scan_cfg->ie_whitelist; + scan_def->sta_miracast_mcc_rest_time = + scan_cfg->sta_miracast_mcc_rest_time; + + qdf_mem_copy(&scan_def->score_config, &scan_cfg->score_config, + sizeof(struct scoring_config)); + scm_validate_scoring_config(&scan_def->score_config); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +static QDF_STATUS +ucfg_scan_cancel_pdev_scan(struct wlan_objmgr_pdev *pdev) +{ + struct scan_cancel_request *req; + QDF_STATUS status; + struct wlan_objmgr_vdev *vdev; + + req = qdf_mem_malloc_atomic(sizeof(*req)); + if (!req) { + scm_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_SCAN_ID); + if (!vdev) { + scm_err("Failed to get vdev"); + qdf_mem_free(req); + return QDF_STATUS_E_INVAL; + } + req->vdev = vdev; + req->cancel_req.scan_id = INVAL_SCAN_ID; + req->cancel_req.pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + req->cancel_req.vdev_id = INVAL_VDEV_ID; + req->cancel_req.req_type = WLAN_SCAN_CANCEL_PDEV_ALL; + status = ucfg_scan_cancel_sync(req); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Cancel scan request failed"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SCAN_ID); + + return status; +} + +static QDF_STATUS +ucfg_scan_suspend_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i; + + ucfg_scan_psoc_set_disable(psoc, REASON_SUSPEND); + /* Check all pdev */ + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, WLAN_SCAN_ID); + if (!pdev) + continue; + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) + status = ucfg_scan_cancel_pdev_scan(pdev); + wlan_objmgr_pdev_release_ref(pdev, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to cancel scan for pdev_id %d", i); + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +ucfg_scan_resume_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + ucfg_scan_psoc_set_enable(psoc, REASON_SUSPEND); + return QDF_STATUS_SUCCESS; +} + +static inline void +ucfg_scan_register_pmo_handler(void) +{ + pmo_register_suspend_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_suspend_handler, NULL); + pmo_register_resume_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_resume_handler, NULL); +} + +static inline void +ucfg_scan_unregister_pmo_handler(void) +{ + pmo_unregister_suspend_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_suspend_handler); + pmo_unregister_resume_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_resume_handler); +} + +#else +static inline void +ucfg_scan_register_pmo_handler(void) +{ +} + +static inline void +ucfg_scan_unregister_pmo_handler(void) +{ +} +#endif + +QDF_STATUS +ucfg_scan_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scm_debug("psoc open: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + /* Initialize the scan Globals */ + wlan_scan_global_init(psoc, scan_obj); + qdf_spinlock_create(&scan_obj->lock); + ucfg_scan_register_pmo_handler(); + scm_db_init(psoc); + scm_channel_list_db_init(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scm_debug("psoc close: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scm_db_deinit(psoc); + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + ucfg_scan_unregister_pmo_handler(); + qdf_spinlock_destroy(&scan_obj->lock); + wlan_scan_global_deinit(psoc); + scm_channel_list_db_deinit(psoc); + + return QDF_STATUS_SUCCESS; +} + +static bool scm_serialization_scan_rules_cb( + union wlan_serialization_rules_info *comp_info, + uint8_t comp_id) +{ + switch (comp_id) { + case WLAN_UMAC_COMP_TDLS: + if (comp_info->scan_info.is_tdls_in_progress) { + scm_debug("Cancel scan. Tdls in progress"); + return false; + } + break; + case WLAN_UMAC_COMP_DFS: + if (comp_info->scan_info.is_cac_in_progress) { + scm_debug("Cancel scan. CAC in progress"); + return false; + } + break; + case WLAN_UMAC_COMP_MLME: + if (comp_info->scan_info.is_mlme_op_in_progress) { + scm_debug("Cancel scan. MLME operation in progress"); + return false; + } + break; + default: + scm_debug("not handled comp_id %d", comp_id); + break; + } + + return true; +} + +QDF_STATUS +ucfg_scan_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + scm_debug("psoc enable: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + /* Subscribe for scan events from lmac layesr */ + status = tgt_scan_register_ev_handler(psoc); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + if (!wlan_reg_is_11d_offloaded(psoc)) + scm_11d_cc_db_init(psoc); + ucfg_scan_register_unregister_bcn_cb(psoc, true); + status = wlan_serialization_register_apply_rules_cb(psoc, + WLAN_SER_CMD_SCAN, + scm_serialization_scan_rules_cb); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + return status; +} + +QDF_STATUS +ucfg_scan_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + scm_debug("psoc disable: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + /* Unsubscribe for scan events from lmac layesr */ + status = tgt_scan_unregister_ev_handler(psoc); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + ucfg_scan_register_unregister_bcn_cb(psoc, false); + if (!wlan_reg_is_11d_offloaded(psoc)) + scm_11d_cc_db_deinit(psoc); + + return status; +} + +uint32_t +ucfg_scan_get_max_active_scans(struct wlan_objmgr_psoc *psoc) +{ + struct scan_default_params *scan_params = NULL; + + if (!psoc) { + scm_err("null psoc"); + return 0; + } + scan_params = wlan_scan_psoc_get_def_params(psoc); + if (!scan_params) { + scm_err("Failed to get scan object"); + return 0; + } + + return scan_params->max_active_scans_allowed; +} + +bool ucfg_copy_ie_whitelist_attrs(struct wlan_objmgr_psoc *psoc, + struct probe_req_whitelist_attr *ie_whitelist) +{ + struct wlan_scan_obj *scan_obj = NULL; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + qdf_mem_copy(ie_whitelist, &scan_obj->ie_whitelist, + sizeof(*ie_whitelist)); + + return true; +} + +bool ucfg_ie_whitelist_enabled(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_scan_obj *scan_obj = NULL; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + if ((wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE) || + wlan_vdev_is_up(vdev) == QDF_STATUS_SUCCESS) + return false; + + if (!scan_obj->ie_whitelist.white_list) + return false; + + return true; +} + +void ucfg_scan_set_bt_activity(struct wlan_objmgr_psoc *psoc, + bool bt_a2dp_active) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return; + } + scan_obj->bt_a2dp_enabled = bt_a2dp_active; +} + +bool ucfg_scan_get_bt_activity(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return false; + } + + return scan_obj->bt_a2dp_enabled; +} + +bool ucfg_scan_wake_lock_in_user_scan(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + return scan_obj->scan_def.use_wake_lock_in_user_scan; +} + +bool ucfg_scan_is_connected_scan_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return cfg_default(CFG_ENABLE_CONNECTED_SCAN); + } + + return scan_obj->scan_def.enable_connected_scan; +} + +bool ucfg_scan_is_mac_spoofing_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return cfg_default(CFG_ENABLE_MAC_ADDR_SPOOFING); + } + + return scan_obj->scan_def.enable_mac_spoofing; +} + +enum scan_dwelltime_adaptive_mode +ucfg_scan_get_extscan_adaptive_dwell_mode(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return cfg_default(CFG_ADAPTIVE_EXTSCAN_DWELL_MODE); + } + + return scan_obj->scan_def.extscan_adaptive_dwell_mode; +} + +QDF_STATUS +ucfg_scan_set_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t val) +{ + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object config:%d, val:%d", + config, val); + return QDF_STATUS_E_INVAL; + } + switch (config) { + case SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT: + scan_obj->disable_timeout = !!val; + break; + case SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH: + scan_obj->drop_bcn_on_chan_mismatch = !!val; + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS ucfg_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, struct mlme_info *mlme) +{ + QDF_STATUS status; + + status = scm_scan_update_mlme_by_bssinfo(pdev, bss_info, mlme); + + return status; +} + +QDF_STATUS +ucfg_scan_get_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t *val) +{ + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj || !val) { + scm_err("scan object:%pK config:%d, val:0x%pK", + scan_obj, config, val); + return QDF_STATUS_E_INVAL; + } + switch (config) { + case SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT: + *val = scan_obj->disable_timeout; + break; + case SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH: + *val = scan_obj->drop_bcn_on_chan_mismatch; + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +#ifdef FEATURE_WLAN_SCAN_PNO +bool ucfg_scan_is_pno_offload_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return false; + } + + return scan_obj->pno_cfg.pno_offload_enabled; +} + +void ucfg_scan_set_pno_offload(struct wlan_objmgr_psoc *psoc, bool value) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return; + } + + scan_obj->pno_cfg.pno_offload_enabled = value; +} + +bool ucfg_scan_get_pno_scan_support(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return cfg_default(CFG_PNO_SCAN_SUPPORT); + } + + return scan_obj->pno_cfg.scan_support_enabled; +} + +uint8_t ucfg_get_scan_backoff_multiplier(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return cfg_default(CFG_SCAN_BACKOFF_MULTIPLIER); + } + return scan_obj->pno_cfg.scan_backoff_multiplier; +} + +bool ucfg_scan_is_dfs_chnl_scan_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return cfg_default(CFG_ENABLE_DFS_PNO_CHNL_SCAN); + } + return scan_obj->pno_cfg.dfs_chnl_scan_enabled; +} + +uint32_t ucfg_scan_get_scan_timer_repeat_value(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return cfg_default(CFG_PNO_SCAN_TIMER_REPEAT_VALUE); + } + return scan_obj->pno_cfg.scan_timer_repeat_value; +} + +uint32_t ucfg_scan_get_slow_scan_multiplier(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return cfg_default(CFG_PNO_SLOW_SCAN_MULTIPLIER); + } + return scan_obj->pno_cfg.slow_scan_multiplier; +} + +uint32_t +ucfg_scan_get_max_sched_scan_plan_interval(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return cfg_default(CFG_MAX_SCHED_SCAN_PLAN_INTERVAL); + } + + return scan_obj->pno_cfg.max_sched_scan_plan_interval; +} + +uint32_t +ucfg_scan_get_max_sched_scan_plan_iterations(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return cfg_default(CFG_MAX_SCHED_SCAN_PLAN_ITERATIONS); + } + + return scan_obj->pno_cfg.max_sched_scan_plan_iterations; +} + +bool +ucfg_scan_get_user_config_sched_scan_plan(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return cfg_default(CFG_MAX_SCHED_SCAN_PLAN_ITERATIONS); + } + + return scan_obj->pno_cfg.user_config_sched_scan_plan; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6c11c2205115881736ecb2ee39e5bf857b412cec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c @@ -0,0 +1,2099 @@ +/* + * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: Defines scan utility functions + */ + +#include +#include +#include +#include <../../core/src/wlan_scan_cache_db.h> +#include <../../core/src/wlan_scan_main.h> +#include + +#define MAX_IE_LEN 1024 +#define SHORT_SSID_LEN 4 +#define NEIGHBOR_AP_LEN 1 +#define BSS_PARAMS_LEN 1 + +const char* +util_scan_get_ev_type_name(enum scan_event_type type) +{ + static const char * const event_name[] = { + [SCAN_EVENT_TYPE_STARTED] = "STARTED", + [SCAN_EVENT_TYPE_COMPLETED] = "COMPLETED", + [SCAN_EVENT_TYPE_BSS_CHANNEL] = "HOME_CHANNEL", + [SCAN_EVENT_TYPE_FOREIGN_CHANNEL] = "FOREIGN_CHANNEL", + [SCAN_EVENT_TYPE_DEQUEUED] = "DEQUEUED", + [SCAN_EVENT_TYPE_PREEMPTED] = "PREEMPTED", + [SCAN_EVENT_TYPE_START_FAILED] = "START_FAILED", + [SCAN_EVENT_TYPE_RESTARTED] = "RESTARTED", + [SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT] = "FOREIGN_CHANNEL_EXIT", + [SCAN_EVENT_TYPE_SUSPENDED] = "SUSPENDED", + [SCAN_EVENT_TYPE_RESUMED] = "RESUMED", + [SCAN_EVENT_TYPE_NLO_COMPLETE] = "NLO_COMPLETE", + [SCAN_EVENT_TYPE_NLO_MATCH] = "NLO_MATCH", + [SCAN_EVENT_TYPE_INVALID] = "INVALID", + [SCAN_EVENT_TYPE_GPIO_TIMEOUT] = "GPIO_TIMEOUT", + [SCAN_EVENT_TYPE_RADIO_MEASUREMENT_START] = + "RADIO_MEASUREMENT_START", + [SCAN_EVENT_TYPE_RADIO_MEASUREMENT_END] = + "RADIO_MEASUREMENT_END", + [SCAN_EVENT_TYPE_BSSID_MATCH] = "BSSID_MATCH", + [SCAN_EVENT_TYPE_FOREIGN_CHANNEL_GET_NF] = + "FOREIGN_CHANNEL_GET_NF", + }; + + if (type >= SCAN_EVENT_TYPE_MAX) + return "UNKNOWN"; + + return event_name[type]; +} + + +const char* +util_scan_get_ev_reason_name(enum scan_completion_reason reason) +{ + static const char * const reason_name[] = { + [SCAN_REASON_NONE] = "NONE", + [SCAN_REASON_COMPLETED] = "COMPLETED", + [SCAN_REASON_CANCELLED] = "CANCELLED", + [SCAN_REASON_PREEMPTED] = "PREEMPTED", + [SCAN_REASON_TIMEDOUT] = "TIMEDOUT", + [SCAN_REASON_INTERNAL_FAILURE] = "INTERNAL_FAILURE", + [SCAN_REASON_SUSPENDED] = "SUSPENDED", + [SCAN_REASON_RUN_FAILED] = "RUN_FAILED", + [SCAN_REASON_TERMINATION_FUNCTION] = "TERMINATION_FUNCTION", + [SCAN_REASON_MAX_OFFCHAN_RETRIES] = "MAX_OFFCHAN_RETRIES", + [SCAN_REASON_DFS_VIOLATION] = "DFS_NOL_VIOLATION", + }; + + if (reason >= SCAN_REASON_MAX) + return "UNKNOWN"; + + return reason_name[reason]; +} + +qdf_time_t +util_get_last_scan_time(struct wlan_objmgr_vdev *vdev) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!vdev) { + scm_warn("null vdev"); + QDF_ASSERT(0); + return 0; + } + pdev_id = wlan_scan_vdev_get_pdev_id(vdev); + scan_obj = wlan_vdev_get_scan_obj(vdev); + + if (scan_obj) + return scan_obj->pdev_info[pdev_id].last_scan_time; + else + return 0; +} + +enum wlan_band util_scan_scm_chan_to_band(uint32_t chan) +{ + if (WLAN_CHAN_IS_2GHZ(chan)) + return WLAN_BAND_2_4_GHZ; + + return WLAN_BAND_5_GHZ; +} + +enum wlan_band util_scan_scm_freq_to_band(uint16_t freq) +{ + if (WLAN_REG_IS_24GHZ_CH_FREQ(freq)) + return WLAN_BAND_2_4_GHZ; + + return WLAN_BAND_5_GHZ; +} + +bool util_is_scan_entry_match( + struct scan_cache_entry *entry1, + struct scan_cache_entry *entry2) +{ + + if (entry1->cap_info.wlan_caps.ess != + entry2->cap_info.wlan_caps.ess) + return false; + + if (entry1->cap_info.wlan_caps.ess && + !qdf_mem_cmp(entry1->bssid.bytes, + entry2->bssid.bytes, QDF_MAC_ADDR_SIZE)) { + /* Check for BSS */ + if (util_is_ssid_match(&entry1->ssid, &entry2->ssid) || + util_scan_is_null_ssid(&entry1->ssid) || + util_scan_is_null_ssid(&entry2->ssid)) + return true; + } else if (entry1->cap_info.wlan_caps.ibss && + (entry1->channel.chan_freq == + entry2->channel.chan_freq)) { + /* + * Same channel cannot have same SSID for + * different IBSS, so no need to check BSSID + */ + if (util_is_ssid_match( + &entry1->ssid, &entry2->ssid)) + return true; + } else if (!entry1->cap_info.wlan_caps.ibss && + !entry1->cap_info.wlan_caps.ess && + !qdf_mem_cmp(entry1->bssid.bytes, + entry2->bssid.bytes, QDF_MAC_ADDR_SIZE)) { + /* In case of P2P devices, ess and ibss will be set to zero */ + return true; + } + + return false; +} + +static bool util_is_pureg_rate(uint8_t *rates, uint8_t nrates) +{ + static const uint8_t g_rates[] = {12, 18, 24, 36, 48, 72, 96, 108}; + bool pureg = false; + uint8_t i, j; + + for (i = 0; i < nrates; i++) { + for (j = 0; j < QDF_ARRAY_SIZE(g_rates); j++) { + if (WLAN_RV(rates[i]) == g_rates[j]) { + pureg = true; + break; + } + } + if (pureg) + break; + } + + return pureg; +} + +#ifdef CONFIG_BAND_6GHZ +static struct he_oper_6g_param *util_scan_get_he_6g_params(uint8_t *he_ops) +{ + uint8_t len; + uint32_t he_oper_params; + + if (!he_ops) + return NULL; + + len = he_ops[1]; + he_ops += sizeof(struct ie_header); + + if (len < WLAN_HEOP_FIXED_PARAM_LENGTH) + return NULL; + + /* element id extension */ + he_ops++; + len--; + + he_oper_params = LE_READ_4(he_ops); + if (!(he_oper_params & WLAN_HEOP_6GHZ_INFO_PRESENT_MASK)) + return NULL; + + /* fixed params - element id extension */ + he_ops += WLAN_HEOP_FIXED_PARAM_LENGTH - 1; + len -= WLAN_HEOP_FIXED_PARAM_LENGTH - 1; + + if (!len) + return NULL; + + /* vht oper params */ + if (he_oper_params & WLAN_HEOP_VHTOP_PRESENT_MASK) { + if (len < WLAN_HEOP_VHTOP_LENGTH) + return NULL; + he_ops += WLAN_HEOP_VHTOP_LENGTH; + len -= WLAN_HEOP_VHTOP_LENGTH; + } + + if (!len) + return NULL; + + if (he_oper_params & WLAN_HEOP_CO_LOCATED_BSS_MASK) { + he_ops += WLAN_HEOP_CO_LOCATED_BSS_LENGTH; + len -= WLAN_HEOP_CO_LOCATED_BSS_LENGTH; + } + + if (len < sizeof(struct he_oper_6g_param)) + return NULL; + + return (struct he_oper_6g_param *)he_ops; +} + +static QDF_STATUS +util_scan_get_chan_from_he_6g_params(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params, + qdf_freq_t *chan_freq, uint8_t band_mask) +{ + struct he_oper_6g_param *he_6g_params; + uint8_t *he_ops; + struct wlan_scan_obj *scan_obj; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + he_ops = util_scan_entry_heop(scan_params); + if (!util_scan_entry_hecap(scan_params) || !he_ops) + return QDF_STATUS_SUCCESS; + + he_6g_params = util_scan_get_he_6g_params(he_ops); + if (!he_6g_params) + return QDF_STATUS_SUCCESS; + + *chan_freq = wlan_reg_chan_band_to_freq(pdev, + he_6g_params->primary_channel, + band_mask); + if (scan_obj->drop_bcn_on_invalid_freq && + wlan_reg_is_disable_for_freq(pdev, *chan_freq)) { + scm_debug_rl(QDF_MAC_ADDR_FMT": Drop as invalid channel %d freq %d in HE 6Ghz params", + scan_params->bssid.bytes, + he_6g_params->primary_channel, *chan_freq); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +static enum wlan_phymode +util_scan_get_phymode_6g(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params) +{ + struct he_oper_6g_param *he_6g_params; + enum wlan_phymode phymode = WLAN_PHYMODE_11AXA_HE20; + uint8_t *he_ops; + uint8_t band_mask = BIT(REG_BAND_6G); + + he_ops = util_scan_entry_heop(scan_params); + if (!util_scan_entry_hecap(scan_params) || !he_ops) + return phymode; + + he_6g_params = util_scan_get_he_6g_params(he_ops); + if (!he_6g_params) + return phymode; + + switch (he_6g_params->width) { + case WLAN_HE_6GHZ_CHWIDTH_20: + phymode = WLAN_PHYMODE_11AXA_HE20; + break; + case WLAN_HE_6GHZ_CHWIDTH_40: + phymode = WLAN_PHYMODE_11AXA_HE40; + break; + case WLAN_HE_6GHZ_CHWIDTH_80: + phymode = WLAN_PHYMODE_11AXA_HE80; + break; + case WLAN_HE_6GHZ_CHWIDTH_160_80_80: + if (WLAN_IS_HE80_80(he_6g_params)) + phymode = WLAN_PHYMODE_11AXA_HE80_80; + else if (WLAN_IS_HE160(he_6g_params)) + phymode = WLAN_PHYMODE_11AXA_HE160; + else + phymode = WLAN_PHYMODE_11AXA_HE80; + break; + default: + scm_err("Invalid he_6g_params width: %d", he_6g_params->width); + phymode = WLAN_PHYMODE_11AXA_HE20; + break; + } + + if (he_6g_params->chan_freq_seg0) + scan_params->channel.cfreq0 = + wlan_reg_chan_band_to_freq(pdev, + he_6g_params->chan_freq_seg0, + band_mask); + if (he_6g_params->chan_freq_seg1) + scan_params->channel.cfreq1 = + wlan_reg_chan_band_to_freq(pdev, + he_6g_params->chan_freq_seg1, + band_mask); + + return phymode; +} +#else +static QDF_STATUS +util_scan_get_chan_from_he_6g_params(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params, + qdf_freq_t *chan_freq, uint8_t band_mask) +{ + return QDF_STATUS_SUCCESS; +} +static inline enum wlan_phymode +util_scan_get_phymode_6g(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params) +{ + return WLAN_PHYMODE_AUTO; +} +#endif + +static inline +uint32_t util_scan_sec_chan_freq_from_htinfo(struct wlan_ie_htinfo_cmn *htinfo, + uint32_t primary_chan_freq) +{ + if (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_ABOVE) + return primary_chan_freq + WLAN_CHAN_SPACING_20MHZ; + else if (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_BELOW) + return primary_chan_freq - WLAN_CHAN_SPACING_20MHZ; + + return 0; +} + +static enum wlan_phymode +util_scan_get_phymode_5g(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params) +{ + enum wlan_phymode phymode = WLAN_PHYMODE_AUTO; + uint16_t ht_cap = 0; + struct htcap_cmn_ie *htcap; + struct wlan_ie_htinfo_cmn *htinfo; + struct wlan_ie_vhtop *vhtop; + uint8_t band_mask = BIT(REG_BAND_5G); + + htcap = (struct htcap_cmn_ie *) + util_scan_entry_htcap(scan_params); + htinfo = (struct wlan_ie_htinfo_cmn *) + util_scan_entry_htinfo(scan_params); + vhtop = (struct wlan_ie_vhtop *) + util_scan_entry_vhtop(scan_params); + + if (!(htcap && htinfo)) + return WLAN_PHYMODE_11A; + + if (htcap) + ht_cap = le16toh(htcap->hc_cap); + + if (ht_cap & WLAN_HTCAP_C_CHWIDTH40) + phymode = WLAN_PHYMODE_11NA_HT40; + else + phymode = WLAN_PHYMODE_11NA_HT20; + + scan_params->channel.cfreq0 = + util_scan_sec_chan_freq_from_htinfo(htinfo, + scan_params->channel.chan_freq); + + if (util_scan_entry_vhtcap(scan_params) && vhtop) { + switch (vhtop->vht_op_chwidth) { + case WLAN_VHTOP_CHWIDTH_2040: + if (ht_cap & WLAN_HTCAP_C_CHWIDTH40) + phymode = WLAN_PHYMODE_11AC_VHT40; + else + phymode = WLAN_PHYMODE_11AC_VHT20; + break; + case WLAN_VHTOP_CHWIDTH_80: + if (WLAN_IS_REVSIG_VHT80_80(vhtop)) + phymode = WLAN_PHYMODE_11AC_VHT80_80; + else if (WLAN_IS_REVSIG_VHT160(vhtop)) + phymode = WLAN_PHYMODE_11AC_VHT160; + else + phymode = WLAN_PHYMODE_11AC_VHT80; + break; + case WLAN_VHTOP_CHWIDTH_160: + phymode = WLAN_PHYMODE_11AC_VHT160; + break; + case WLAN_VHTOP_CHWIDTH_80_80: + phymode = WLAN_PHYMODE_11AC_VHT80_80; + break; + default: + scm_err("bad channel: %d", + vhtop->vht_op_chwidth); + phymode = WLAN_PHYMODE_11AC_VHT20; + break; + } + if (vhtop->vht_op_ch_freq_seg1) + scan_params->channel.cfreq0 = + wlan_reg_chan_band_to_freq(pdev, + vhtop->vht_op_ch_freq_seg1, + band_mask); + if (vhtop->vht_op_ch_freq_seg2) + scan_params->channel.cfreq1 = + wlan_reg_chan_band_to_freq(pdev, + vhtop->vht_op_ch_freq_seg2, + band_mask); + } + + if (!util_scan_entry_hecap(scan_params)) + return phymode; + + /* for 5Ghz Check for HE, only if VHT cap and HE cap are present */ + if (!IS_WLAN_PHYMODE_VHT(phymode)) + return phymode; + + switch (phymode) { + case WLAN_PHYMODE_11AC_VHT20: + phymode = WLAN_PHYMODE_11AXA_HE20; + break; + case WLAN_PHYMODE_11AC_VHT40: + phymode = WLAN_PHYMODE_11AXA_HE40; + break; + case WLAN_PHYMODE_11AC_VHT80: + phymode = WLAN_PHYMODE_11AXA_HE80; + break; + case WLAN_PHYMODE_11AC_VHT160: + phymode = WLAN_PHYMODE_11AXA_HE160; + break; + case WLAN_PHYMODE_11AC_VHT80_80: + phymode = WLAN_PHYMODE_11AXA_HE80_80; + break; + default: + phymode = WLAN_PHYMODE_11AXA_HE20; + break; + } + + return phymode; +} + +static enum wlan_phymode +util_scan_get_phymode_2g(struct scan_cache_entry *scan_params) +{ + enum wlan_phymode phymode = WLAN_PHYMODE_AUTO; + uint16_t ht_cap = 0; + struct htcap_cmn_ie *htcap; + struct wlan_ie_htinfo_cmn *htinfo; + struct wlan_ie_vhtop *vhtop; + + htcap = (struct htcap_cmn_ie *) + util_scan_entry_htcap(scan_params); + htinfo = (struct wlan_ie_htinfo_cmn *) + util_scan_entry_htinfo(scan_params); + vhtop = (struct wlan_ie_vhtop *) + util_scan_entry_vhtop(scan_params); + + if (htcap) + ht_cap = le16toh(htcap->hc_cap); + + if (htcap && htinfo) { + if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_ABOVE)) + phymode = WLAN_PHYMODE_11NG_HT40PLUS; + else if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_BELOW)) + phymode = WLAN_PHYMODE_11NG_HT40MINUS; + else + phymode = WLAN_PHYMODE_11NG_HT20; + } else if (util_scan_entry_xrates(scan_params)) { + /* only 11G stations will have more than 8 rates */ + phymode = WLAN_PHYMODE_11G; + } else { + /* Some mischievous g-only APs do not set extended rates */ + if (util_scan_entry_rates(scan_params)) { + if (util_is_pureg_rate(&scan_params->ie_list.rates[2], + scan_params->ie_list.rates[1])) + phymode = WLAN_PHYMODE_11G; + else + phymode = WLAN_PHYMODE_11B; + } else { + phymode = WLAN_PHYMODE_11B; + } + } + + /* Check for VHT only if HT cap is present */ + if (!IS_WLAN_PHYMODE_HT(phymode)) + return phymode; + + scan_params->channel.cfreq0 = + util_scan_sec_chan_freq_from_htinfo(htinfo, + scan_params->channel.chan_freq); + + if (util_scan_entry_vhtcap(scan_params) && vhtop) { + switch (vhtop->vht_op_chwidth) { + case WLAN_VHTOP_CHWIDTH_2040: + if (phymode == WLAN_PHYMODE_11NG_HT40PLUS) + phymode = WLAN_PHYMODE_11AC_VHT40PLUS_2G; + else if (phymode == WLAN_PHYMODE_11NG_HT40MINUS) + phymode = WLAN_PHYMODE_11AC_VHT40MINUS_2G; + else + phymode = WLAN_PHYMODE_11AC_VHT20_2G; + + break; + default: + scm_info("bad vht_op_chwidth: %d", + vhtop->vht_op_chwidth); + phymode = WLAN_PHYMODE_11AC_VHT20_2G; + break; + } + } + + if (!util_scan_entry_hecap(scan_params)) + return phymode; + + if (phymode == WLAN_PHYMODE_11AC_VHT40PLUS_2G || + phymode == WLAN_PHYMODE_11NG_HT40PLUS) + phymode = WLAN_PHYMODE_11AXG_HE40PLUS; + else if (phymode == WLAN_PHYMODE_11AC_VHT40MINUS_2G || + phymode == WLAN_PHYMODE_11NG_HT40MINUS) + phymode = WLAN_PHYMODE_11AXG_HE40MINUS; + else + phymode = WLAN_PHYMODE_11AXG_HE20; + + return phymode; +} + +static enum wlan_phymode +util_scan_get_phymode(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params) +{ + if (WLAN_REG_IS_24GHZ_CH_FREQ(scan_params->channel.chan_freq)) + return util_scan_get_phymode_2g(scan_params); + else if (WLAN_REG_IS_6GHZ_CHAN_FREQ(scan_params->channel.chan_freq)) + return util_scan_get_phymode_6g(pdev, scan_params); + else + return util_scan_get_phymode_5g(pdev, scan_params); +} + +static QDF_STATUS +util_scan_parse_chan_switch_wrapper_ie(struct scan_cache_entry *scan_params, + struct ie_header *sub_ie, qdf_size_t sub_ie_len) +{ + /* Walk through to check nothing is malformed */ + while (sub_ie_len >= sizeof(struct ie_header)) { + /* At least one more header is present */ + sub_ie_len -= sizeof(struct ie_header); + + if (sub_ie->ie_len == 0) { + sub_ie += 1; + continue; + } + if (sub_ie_len < sub_ie->ie_len) { + scm_err("Incomplete corrupted IE:%x", + WLAN_ELEMID_CHAN_SWITCH_WRAP); + return QDF_STATUS_E_INVAL; + } + switch (sub_ie->ie_id) { + case WLAN_ELEMID_COUNTRY: + if (sub_ie->ie_len < WLAN_COUNTRY_IE_MIN_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.country = (uint8_t *)sub_ie; + break; + case WLAN_ELEMID_WIDE_BAND_CHAN_SWITCH: + if (sub_ie->ie_len != WLAN_WIDE_BW_CHAN_SWITCH_IE_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.widebw = (uint8_t *)sub_ie; + break; + case WLAN_ELEMID_VHT_TX_PWR_ENVLP: + if (sub_ie->ie_len > WLAN_TPE_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.txpwrenvlp = (uint8_t *)sub_ie; + break; + } + /* Consume sub info element */ + sub_ie_len -= sub_ie->ie_len; + /* go to next Sub IE */ + sub_ie = (struct ie_header *) + (((uint8_t *) sub_ie) + + sizeof(struct ie_header) + sub_ie->ie_len); + } + + return QDF_STATUS_SUCCESS; +} + +bool +util_scan_is_hidden_ssid(struct ie_ssid *ssid) +{ + uint8_t i; + + /* + * We flag this as Hidden SSID if the Length is 0 + * of the SSID only contains 0's + */ + if (!ssid || !ssid->ssid_len) + return true; + + for (i = 0; i < ssid->ssid_len; i++) + if (ssid->ssid[i] != 0) + return false; + + /* All 0's */ + return true; +} + +static QDF_STATUS +util_scan_update_rnr(struct rnr_bss_info *rnr, + struct neighbor_ap_info_field *ap_info, + uint8_t *data) +{ + uint8_t tbtt_info_length; + + tbtt_info_length = ap_info->tbtt_header.tbtt_info_length; + + switch (tbtt_info_length) { + case TBTT_NEIGHBOR_AP_OFFSET_ONLY: + /* Dont store it skip*/ + break; + + case TBTT_NEIGHBOR_AP_BSS_PARAM: + /* Dont store it skip*/ + break; + + case TBTT_NEIGHBOR_AP_SHORTSSID: + rnr->channel_number = ap_info->channel_number; + rnr->operating_class = ap_info->operting_class; + qdf_mem_copy(&rnr->short_ssid, &data[1], SHORT_SSID_LEN); + break; + + case TBTT_NEIGHBOR_AP_S_SSID_BSS_PARAM: + rnr->channel_number = ap_info->channel_number; + rnr->operating_class = ap_info->operting_class; + qdf_mem_copy(&rnr->short_ssid, &data[1], SHORT_SSID_LEN); + rnr->bss_params = data[5]; + break; + + case TBTT_NEIGHBOR_AP_BSSID: + rnr->channel_number = ap_info->channel_number; + rnr->operating_class = ap_info->operting_class; + qdf_mem_copy(&rnr->bssid, &data[1], QDF_MAC_ADDR_SIZE); + break; + + case TBTT_NEIGHBOR_AP_BSSID_BSS_PARAM: + rnr->channel_number = ap_info->channel_number; + rnr->operating_class = ap_info->operting_class; + qdf_mem_copy(&rnr->bssid, &data[1], QDF_MAC_ADDR_SIZE); + rnr->bss_params = data[7]; + break; + + case TBTT_NEIGHBOR_AP_BSSSID_S_SSID: + rnr->channel_number = ap_info->channel_number; + rnr->operating_class = ap_info->operting_class; + qdf_mem_copy(&rnr->bssid, &data[1], QDF_MAC_ADDR_SIZE); + qdf_mem_copy(&rnr->short_ssid, &data[7], SHORT_SSID_LEN); + break; + + case TBTT_NEIGHBOR_AP_BSSID_S_SSID_BSS_PARAM: + rnr->channel_number = ap_info->channel_number; + rnr->operating_class = ap_info->operting_class; + qdf_mem_copy(&rnr->bssid, &data[1], QDF_MAC_ADDR_SIZE); + qdf_mem_copy(&rnr->short_ssid, &data[7], SHORT_SSID_LEN); + rnr->bss_params = data[11]; + break; + + default: + scm_debug("Wrong fieldtype"); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +util_scan_parse_rnr_ie(struct scan_cache_entry *scan_entry, + struct ie_header *ie) +{ + uint32_t rnr_ie_len; + uint16_t tbtt_count, tbtt_length, i, fieldtype; + uint8_t *data; + struct neighbor_ap_info_field *neighbor_ap_info; + + rnr_ie_len = ie->ie_len; + data = (uint8_t *)ie + sizeof(struct ie_header); + + while (data < ((uint8_t *)ie + rnr_ie_len + 2)) { + neighbor_ap_info = (struct neighbor_ap_info_field *)data; + tbtt_count = neighbor_ap_info->tbtt_header.tbtt_info_count; + tbtt_length = neighbor_ap_info->tbtt_header.tbtt_info_length; + fieldtype = neighbor_ap_info->tbtt_header.tbbt_info_fieldtype; + scm_debug("channel number %d, op class %d", + neighbor_ap_info->channel_number, + neighbor_ap_info->operting_class); + scm_debug("tbtt_count %d, tbtt_length %d, fieldtype %d", + tbtt_count, tbtt_length, fieldtype); + data += sizeof(struct neighbor_ap_info_field); + + if (tbtt_count > TBTT_INFO_COUNT) + break; + + for (i = 0; i < (tbtt_count + 1) && + data < ((uint8_t *)ie + rnr_ie_len + 2); i++) { + if (i < MAX_RNR_BSS) + util_scan_update_rnr( + &scan_entry->rnr.bss_info[i], + neighbor_ap_info, + data); + data += tbtt_length; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +util_scan_parse_extn_ie(struct scan_cache_entry *scan_params, + struct ie_header *ie) +{ + struct extn_ie_header *extn_ie = (struct extn_ie_header *) ie; + + switch (extn_ie->ie_extn_id) { + case WLAN_EXTN_ELEMID_MAX_CHAN_SWITCH_TIME: + if (extn_ie->ie_len != WLAN_MAX_CHAN_SWITCH_TIME_IE_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.mcst = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_SRP: + scan_params->ie_list.srp = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_HECAP: + scan_params->ie_list.hecap = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_HEOP: + scan_params->ie_list.heop = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_ESP: + scan_params->ie_list.esp = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_MUEDCA: + scan_params->ie_list.muedca = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_HE_6G_CAP: + scan_params->ie_list.hecap_6g = (uint8_t *)ie; + break; + default: + break; + } + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +util_scan_parse_vendor_ie(struct scan_cache_entry *scan_params, + struct ie_header *ie) +{ + if (!scan_params->ie_list.vendor) + scan_params->ie_list.vendor = (uint8_t *)ie; + + if (is_wpa_oui((uint8_t *)ie)) { + scan_params->ie_list.wpa = (uint8_t *)ie; + } else if (is_wps_oui((uint8_t *)ie)) { + scan_params->ie_list.wps = (uint8_t *)ie; + /* WCN IE should be a subset of WPS IE */ + if (is_wcn_oui((uint8_t *)ie)) + scan_params->ie_list.wcn = (uint8_t *)ie; + } else if (is_wme_param((uint8_t *)ie)) { + scan_params->ie_list.wmeparam = (uint8_t *)ie; + } else if (is_wme_info((uint8_t *)ie)) { + scan_params->ie_list.wmeinfo = (uint8_t *)ie; + } else if (is_atheros_oui((uint8_t *)ie)) { + scan_params->ie_list.athcaps = (uint8_t *)ie; + } else if (is_atheros_extcap_oui((uint8_t *)ie)) { + scan_params->ie_list.athextcaps = (uint8_t *)ie; + } else if (is_sfa_oui((uint8_t *)ie)) { + scan_params->ie_list.sfa = (uint8_t *)ie; + } else if (is_p2p_oui((uint8_t *)ie)) { + scan_params->ie_list.p2p = (uint8_t *)ie; + } else if (is_qca_son_oui((uint8_t *)ie, + QCA_OUI_WHC_AP_INFO_SUBTYPE)) { + scan_params->ie_list.sonadv = (uint8_t *)ie; + } else if (is_ht_cap((uint8_t *)ie)) { + /* we only care if there isn't already an HT IE (ANA) */ + if (!scan_params->ie_list.htcap) { + if (ie->ie_len != (WLAN_VENDOR_HT_IE_OFFSET_LEN + + sizeof(struct htcap_cmn_ie))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.htcap = + (uint8_t *)&(((struct wlan_vendor_ie_htcap *)ie)->ie); + } + } else if (is_ht_info((uint8_t *)ie)) { + /* we only care if there isn't already an HT IE (ANA) */ + if (!scan_params->ie_list.htinfo) { + if (ie->ie_len != WLAN_VENDOR_HT_IE_OFFSET_LEN + + sizeof(struct wlan_ie_htinfo_cmn)) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.htinfo = + (uint8_t *)&(((struct wlan_vendor_ie_htinfo *) + ie)->hi_ie); + } + } else if (is_interop_vht((uint8_t *)ie) && + !(scan_params->ie_list.vhtcap)) { + uint8_t *vendor_ie = (uint8_t *)(ie); + + if (ie->ie_len < ((WLAN_VENDOR_VHTCAP_IE_OFFSET + + sizeof(struct wlan_ie_vhtcaps)) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + vendor_ie = ((uint8_t *)(ie)) + WLAN_VENDOR_VHTCAP_IE_OFFSET; + if (vendor_ie[1] != (sizeof(struct wlan_ie_vhtcaps)) - + sizeof(struct ie_header)) + return QDF_STATUS_E_INVAL; + /* location where Interop Vht Cap IE and VHT OP IE Present */ + scan_params->ie_list.vhtcap = (((uint8_t *)(ie)) + + WLAN_VENDOR_VHTCAP_IE_OFFSET); + if (ie->ie_len > ((WLAN_VENDOR_VHTCAP_IE_OFFSET + + sizeof(struct wlan_ie_vhtcaps)) - + sizeof(struct ie_header))) { + if (ie->ie_len < ((WLAN_VENDOR_VHTOP_IE_OFFSET + + sizeof(struct wlan_ie_vhtop)) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + vendor_ie = ((uint8_t *)(ie)) + + WLAN_VENDOR_VHTOP_IE_OFFSET; + if (vendor_ie[1] != (sizeof(struct wlan_ie_vhtop) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.vhtop = (((uint8_t *)(ie)) + + WLAN_VENDOR_VHTOP_IE_OFFSET); + } + } else if (is_bwnss_oui((uint8_t *)ie)) { + /* + * Bandwidth-NSS map has sub-type & version. + * hence copy data just after version byte + */ + if (ie->ie_len > WLAN_BWNSS_MAP_OFFSET) + scan_params->ie_list.bwnss_map = (((uint8_t *)ie) + 8); + } else if (is_mbo_oce_oui((uint8_t *)ie)) { + scan_params->ie_list.mbo_oce = (uint8_t *)ie; + } else if (is_extender_oui((uint8_t *)ie)) { + scan_params->ie_list.extender = (uint8_t *)ie; + } else if (is_adaptive_11r_oui((uint8_t *)ie)) { + if ((ie->ie_len < OUI_LENGTH) || + (ie->ie_len > MAX_ADAPTIVE_11R_IE_LEN)) + return QDF_STATUS_E_INVAL; + + scan_params->ie_list.adaptive_11r = (uint8_t *)ie + + sizeof(struct ie_header); + } else if (is_sae_single_pmk_oui((uint8_t *)ie)) { + if ((ie->ie_len < OUI_LENGTH) || + (ie->ie_len > MAX_SAE_SINGLE_PMK_IE_LEN)) { + scm_debug("Invalid sae single pmk OUI"); + return QDF_STATUS_E_INVAL; + } + scan_params->ie_list.single_pmk = (uint8_t *)ie + + sizeof(struct ie_header); + } + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +util_scan_populate_bcn_ie_list(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params, + qdf_freq_t *chan_freq, uint8_t band_mask) +{ + struct ie_header *ie, *sub_ie; + uint32_t ie_len, sub_ie_len; + QDF_STATUS status; + uint8_t chan_idx; + struct wlan_scan_obj *scan_obj; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + ie_len = util_scan_entry_ie_len(scan_params); + ie = (struct ie_header *) + util_scan_entry_ie_data(scan_params); + + while (ie_len >= sizeof(struct ie_header)) { + ie_len -= sizeof(struct ie_header); + + if (!ie->ie_len) { + ie += 1; + continue; + } + + if (ie_len < ie->ie_len) { + if (scan_obj->allow_bss_with_incomplete_ie) { + scm_debug(QDF_MAC_ADDR_FMT": Scan allowed with incomplete corrupted IE:%x, ie_len: %d, ie->ie_len: %d, stop processing further", + QDF_MAC_ADDR_REF(scan_params->bssid.bytes), + ie->ie_id, ie_len, ie->ie_len); + break; + } + scm_debug(QDF_MAC_ADDR_FMT": Scan not allowed with incomplete corrupted IE:%x, ie_len: %d, ie->ie_len: %d, stop processing further", + QDF_MAC_ADDR_REF(scan_params->bssid.bytes), + ie->ie_id, ie_len, ie->ie_len); + return QDF_STATUS_E_INVAL; + } + + switch (ie->ie_id) { + case WLAN_ELEMID_SSID: + if (ie->ie_len > (sizeof(struct ie_ssid) - + sizeof(struct ie_header))) + goto err; + scan_params->ie_list.ssid = (uint8_t *)ie; + break; + case WLAN_ELEMID_RATES: + if (ie->ie_len > WLAN_SUPPORTED_RATES_IE_MAX_LEN) + goto err; + scan_params->ie_list.rates = (uint8_t *)ie; + break; + case WLAN_ELEMID_DSPARMS: + if (ie->ie_len != WLAN_DS_PARAM_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.ds_param = (uint8_t *)ie; + chan_idx = + ((struct ds_ie *)ie)->cur_chan; + *chan_freq = wlan_reg_chan_band_to_freq(pdev, chan_idx, + band_mask); + /* Drop if invalid freq */ + if (scan_obj->drop_bcn_on_invalid_freq && + wlan_reg_is_disable_for_freq(pdev, *chan_freq)) { + scm_debug_rl(QDF_MAC_ADDR_FMT": Drop as invalid channel %d freq %d in DS IE", + scan_params->bssid.bytes, + chan_idx, *chan_freq); + return QDF_STATUS_E_INVAL; + } + break; + case WLAN_ELEMID_TIM: + if (ie->ie_len < WLAN_TIM_IE_MIN_LENGTH) + goto err; + scan_params->ie_list.tim = (uint8_t *)ie; + scan_params->dtim_period = + ((struct wlan_tim_ie *)ie)->tim_period; + break; + case WLAN_ELEMID_COUNTRY: + if (ie->ie_len < WLAN_COUNTRY_IE_MIN_LEN) + goto err; + scan_params->ie_list.country = (uint8_t *)ie; + break; + case WLAN_ELEMID_QBSS_LOAD: + if (ie->ie_len != sizeof(struct qbss_load_ie) - + sizeof(struct ie_header)) { + /* + * Expected QBSS IE length is 5Bytes; For some + * old cisco AP, QBSS IE length is 4Bytes, which + * doesn't match with latest spec, So ignore + * QBSS IE in such case. + */ + break; + } + scan_params->ie_list.qbssload = (uint8_t *)ie; + break; + case WLAN_ELEMID_CHANSWITCHANN: + if (ie->ie_len != WLAN_CSA_IE_MAX_LEN) + goto err; + scan_params->ie_list.csa = (uint8_t *)ie; + break; + case WLAN_ELEMID_IBSSDFS: + if (ie->ie_len < WLAN_IBSSDFS_IE_MIN_LEN) + goto err; + scan_params->ie_list.ibssdfs = (uint8_t *)ie; + break; + case WLAN_ELEMID_QUIET: + if (ie->ie_len != WLAN_QUIET_IE_MAX_LEN) + goto err; + scan_params->ie_list.quiet = (uint8_t *)ie; + break; + case WLAN_ELEMID_ERP: + if (ie->ie_len != (sizeof(struct erp_ie) - + sizeof(struct ie_header))) + goto err; + scan_params->erp = ((struct erp_ie *)ie)->value; + break; + case WLAN_ELEMID_HTCAP_ANA: + if (ie->ie_len != sizeof(struct htcap_cmn_ie)) + goto err; + scan_params->ie_list.htcap = + (uint8_t *)&(((struct htcap_ie *)ie)->ie); + break; + case WLAN_ELEMID_RSN: + /* + * For security cert TC, RSNIE length can be 1 but if + * beacon is dropped, old entry will remain in scan + * cache and cause cert TC failure as connection with + * old entry with valid RSN IE will pass. + * So instead of dropping the frame, do not store the + * RSN pointer so that old entry is overwritten. + */ + if (ie->ie_len >= WLAN_RSN_IE_MIN_LEN) + scan_params->ie_list.rsn = (uint8_t *)ie; + break; + case WLAN_ELEMID_XRATES: + scan_params->ie_list.xrates = (uint8_t *)ie; + break; + case WLAN_ELEMID_EXTCHANSWITCHANN: + if (ie->ie_len != WLAN_XCSA_IE_MAX_LEN) + goto err; + scan_params->ie_list.xcsa = (uint8_t *)ie; + break; + case WLAN_ELEMID_SECCHANOFFSET: + if (ie->ie_len != WLAN_SECCHANOFF_IE_MAX_LEN) + goto err; + scan_params->ie_list.secchanoff = (uint8_t *)ie; + break; + case WLAN_ELEMID_HTINFO_ANA: + if (ie->ie_len != sizeof(struct wlan_ie_htinfo_cmn)) + goto err; + scan_params->ie_list.htinfo = + (uint8_t *)&(((struct wlan_ie_htinfo *) ie)->hi_ie); + chan_idx = ((struct wlan_ie_htinfo_cmn *) + (scan_params->ie_list.htinfo))->hi_ctrlchannel; + *chan_freq = wlan_reg_chan_band_to_freq(pdev, chan_idx, + band_mask); + /* Drop if invalid freq */ + if (scan_obj->drop_bcn_on_invalid_freq && + wlan_reg_is_disable_for_freq(pdev, *chan_freq)) { + scm_debug_rl(QDF_MAC_ADDR_FMT": Drop as invalid channel %d freq %d in HT_INFO IE", + scan_params->bssid.bytes, + chan_idx, *chan_freq); + return QDF_STATUS_E_INVAL; + } + break; + case WLAN_ELEMID_WAPI: + if (ie->ie_len < WLAN_WAPI_IE_MIN_LEN) + goto err; + scan_params->ie_list.wapi = (uint8_t *)ie; + break; + case WLAN_ELEMID_XCAPS: + if (ie->ie_len > WLAN_EXTCAP_IE_MAX_LEN) + goto err; + scan_params->ie_list.extcaps = (uint8_t *)ie; + break; + case WLAN_ELEMID_VHTCAP: + if (ie->ie_len != (sizeof(struct wlan_ie_vhtcaps) - + sizeof(struct ie_header))) + goto err; + scan_params->ie_list.vhtcap = (uint8_t *)ie; + break; + case WLAN_ELEMID_VHTOP: + if (ie->ie_len != (sizeof(struct wlan_ie_vhtop) - + sizeof(struct ie_header))) + goto err; + scan_params->ie_list.vhtop = (uint8_t *)ie; + break; + case WLAN_ELEMID_OP_MODE_NOTIFY: + if (ie->ie_len != WLAN_OPMODE_IE_MAX_LEN) + goto err; + scan_params->ie_list.opmode = (uint8_t *)ie; + break; + case WLAN_ELEMID_MOBILITY_DOMAIN: + if (ie->ie_len != WLAN_MOBILITY_DOMAIN_IE_MAX_LEN) + goto err; + scan_params->ie_list.mdie = (uint8_t *)ie; + break; + case WLAN_ELEMID_VENDOR: + status = util_scan_parse_vendor_ie(scan_params, + ie); + if (QDF_IS_STATUS_ERROR(status)) + goto err_status; + break; + case WLAN_ELEMID_CHAN_SWITCH_WRAP: + scan_params->ie_list.cswrp = (uint8_t *)ie; + /* Go to next sub IE */ + sub_ie = (struct ie_header *) + (((uint8_t *)ie) + sizeof(struct ie_header)); + sub_ie_len = ie->ie_len; + status = + util_scan_parse_chan_switch_wrapper_ie( + scan_params, sub_ie, sub_ie_len); + if (QDF_IS_STATUS_ERROR(status)) { + goto err_status; + } + break; + case WLAN_ELEMID_FILS_INDICATION: + if (ie->ie_len < WLAN_FILS_INDICATION_IE_MIN_LEN) + goto err; + scan_params->ie_list.fils_indication = (uint8_t *)ie; + break; + case WLAN_ELEMID_RSNXE: + if (!ie->ie_len) + goto err; + scan_params->ie_list.rsnxe = (uint8_t *)ie; + break; + case WLAN_ELEMID_EXTN_ELEM: + status = util_scan_parse_extn_ie(scan_params, ie); + if (QDF_IS_STATUS_ERROR(status)) + goto err_status; + break; + case WLAN_ELEMID_REDUCED_NEIGHBOR_REPORT: + if (ie->ie_len < WLAN_RNR_IE_MIN_LEN) + goto err; + scan_params->ie_list.rnrie = (uint8_t *)ie; + status = util_scan_parse_rnr_ie(scan_params, ie); + if (QDF_IS_STATUS_ERROR(status)) + goto err_status; + break; + default: + break; + } + + /* Consume info element */ + ie_len -= ie->ie_len; + /* Go to next IE */ + ie = (struct ie_header *) + (((uint8_t *) ie) + + sizeof(struct ie_header) + + ie->ie_len); + } + + return QDF_STATUS_SUCCESS; + +err: + status = QDF_STATUS_E_INVAL; +err_status: + scm_debug("failed to parse IE - id: %d, len: %d", + ie->ie_id, ie->ie_len); + + return status; +} + +/** + * util_scan_update_esp_data: update ESP params from beacon/probe response + * @esp_information: pointer to wlan_esp_information + * @scan_entry: new received entry + * + * The Estimated Service Parameters element is + * used by a AP to provide information to another STA which + * can then use the information as input to an algorithm to + * generate an estimate of throughput between the two STAs. + * The ESP Information List field contains from 1 to 4 ESP + * Information fields(each field 24 bits), each corresponding + * to an access category for which estimated service parameters + * information is provided. + * + * Return: None + */ +static void util_scan_update_esp_data(struct wlan_esp_ie *esp_information, + struct scan_cache_entry *scan_entry) +{ + + uint8_t *data; + int i = 0; + uint64_t total_elements; + struct wlan_esp_info *esp_info; + struct wlan_esp_ie *esp_ie; + + esp_ie = (struct wlan_esp_ie *) + util_scan_entry_esp_info(scan_entry); + + total_elements = esp_ie->esp_len; + data = (uint8_t *)esp_ie + 3; + do_div(total_elements, ESP_INFORMATION_LIST_LENGTH); + + if (total_elements > MAX_ESP_INFORMATION_FIELD) { + scm_err("No of Air time fractions are greater than supported"); + return; + } + + for (i = 0; i < total_elements && + data < ((uint8_t *)esp_ie + esp_ie->esp_len + 3); i++) { + esp_info = (struct wlan_esp_info *)data; + if (esp_info->access_category == ESP_AC_BK) { + qdf_mem_copy(&esp_information->esp_info_AC_BK, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + continue; + } + if (esp_info->access_category == ESP_AC_BE) { + qdf_mem_copy(&esp_information->esp_info_AC_BE, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + continue; + } + if (esp_info->access_category == ESP_AC_VI) { + qdf_mem_copy(&esp_information->esp_info_AC_VI, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + continue; + } + if (esp_info->access_category == ESP_AC_VO) { + qdf_mem_copy(&esp_information->esp_info_AC_VO, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + break; + } + } +} + +/** + * util_scan_scm_update_bss_with_esp_dataa: calculate estimated air time + * fraction + * @scan_entry: new received entry + * + * This function process all Access category ESP params and provide + * best effort air time fraction. + * If best effort is not available, it will choose VI, VO and BK in sequence + * + */ +static void util_scan_scm_update_bss_with_esp_data( + struct scan_cache_entry *scan_entry) +{ + uint8_t air_time_fraction = 0; + struct wlan_esp_ie esp_information; + + if (!scan_entry->ie_list.esp) + return; + + util_scan_update_esp_data(&esp_information, scan_entry); + + /* + * If the ESP metric is transmitting multiple airtime fractions, then + * follow the sequence AC_BE, AC_VI, AC_VO, AC_BK and pick whichever is + * the first one available + */ + if (esp_information.esp_info_AC_BE.access_category + == ESP_AC_BE) + air_time_fraction = + esp_information.esp_info_AC_BE. + estimated_air_fraction; + else if (esp_information.esp_info_AC_VI.access_category + == ESP_AC_VI) + air_time_fraction = + esp_information.esp_info_AC_VI. + estimated_air_fraction; + else if (esp_information.esp_info_AC_VO.access_category + == ESP_AC_VO) + air_time_fraction = + esp_information.esp_info_AC_VO. + estimated_air_fraction; + else if (esp_information.esp_info_AC_BK.access_category + == ESP_AC_BK) + air_time_fraction = + esp_information.esp_info_AC_BK. + estimated_air_fraction; + scan_entry->air_time_fraction = air_time_fraction; +} + +/** + * util_scan_scm_calc_nss_supported_by_ap() - finds out nss from AP + * @scan_entry: new received entry + * + * Return: number of nss advertised by AP + */ +static int util_scan_scm_calc_nss_supported_by_ap( + struct scan_cache_entry *scan_params) +{ + struct htcap_cmn_ie *htcap; + struct wlan_ie_vhtcaps *vhtcaps; + uint8_t rx_mcs_map; + + htcap = (struct htcap_cmn_ie *) + util_scan_entry_htcap(scan_params); + vhtcaps = (struct wlan_ie_vhtcaps *) + util_scan_entry_vhtcap(scan_params); + if (vhtcaps) { + rx_mcs_map = vhtcaps->rx_mcs_map; + if ((rx_mcs_map & 0xC0) != 0xC0) + return 4; + + if ((rx_mcs_map & 0x30) != 0x30) + return 3; + + if ((rx_mcs_map & 0x0C) != 0x0C) + return 2; + } else if (htcap) { + if (htcap->mcsset[3]) + return 4; + + if (htcap->mcsset[2]) + return 3; + + if (htcap->mcsset[1]) + return 2; + + } + return 1; +} + +#ifdef WLAN_DFS_CHAN_HIDDEN_SSID +QDF_STATUS +util_scan_add_hidden_ssid(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t bcnbuf) +{ + struct wlan_frame_hdr *hdr; + struct wlan_bcn_frame *bcn; + struct wlan_scan_obj *scan_obj; + struct wlan_ssid *conf_ssid; + struct ie_header *ie; + uint32_t frame_len = qdf_nbuf_len(bcnbuf); + uint16_t bcn_ie_offset, ssid_ie_start_offset, ssid_ie_end_offset; + uint16_t tmplen, ie_length; + uint8_t *pbeacon, *tmp; + bool set_ssid_flag = false; + struct ie_ssid ssid = {0}; + uint8_t pdev_id; + + if (!pdev) { + scm_warn("pdev: 0x%pK is NULL", pdev); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) { + scm_warn("null scan_obj"); + return QDF_STATUS_E_NULL_VALUE; + } + + conf_ssid = &scan_obj->pdev_info[pdev_id].conf_ssid; + + hdr = (struct wlan_frame_hdr *)qdf_nbuf_data(bcnbuf); + + /* received bssid does not match configured bssid */ + if (qdf_mem_cmp(hdr->i_addr3, scan_obj->pdev_info[pdev_id].conf_bssid, + QDF_MAC_ADDR_SIZE) || + conf_ssid->length == 0) { + return QDF_STATUS_SUCCESS; + } + + bcn = (struct wlan_bcn_frame *)(qdf_nbuf_data(bcnbuf) + sizeof(*hdr)); + pbeacon = (uint8_t *)bcn; + + ie = (struct ie_header *)(pbeacon + + offsetof(struct wlan_bcn_frame, ie)); + + bcn_ie_offset = offsetof(struct wlan_bcn_frame, ie); + ie_length = (uint16_t)(frame_len - sizeof(*hdr) - + bcn_ie_offset); + + while (ie_length >= sizeof(struct ie_header)) { + ie_length -= sizeof(struct ie_header); + + bcn_ie_offset += sizeof(struct ie_header); + + if (ie_length < ie->ie_len) { + scm_debug("Incomplete corrupted IE:%x", ie->ie_id); + return QDF_STATUS_E_INVAL; + } + if (ie->ie_id == WLAN_ELEMID_SSID) { + if (ie->ie_len > (sizeof(struct ie_ssid) - + sizeof(struct ie_header))) { + return QDF_STATUS_E_INVAL; + } + ssid.ssid_id = ie->ie_id; + ssid.ssid_len = ie->ie_len; + + if (ssid.ssid_len) + qdf_mem_copy(ssid.ssid, + ie + sizeof(struct ie_header), + ssid.ssid_len); + + if (util_scan_is_hidden_ssid(&ssid)) { + set_ssid_flag = true; + ssid_ie_start_offset = bcn_ie_offset - + sizeof(struct ie_header); + ssid_ie_end_offset = bcn_ie_offset + + ie->ie_len; + } + } + if (ie->ie_len == 0) { + ie += 1; /* next IE */ + continue; + } + if (ie->ie_id == WLAN_ELEMID_VENDOR && + is_wps_oui((uint8_t *)ie)) { + set_ssid_flag = false; + break; + } + /* Consume info element */ + ie_length -= ie->ie_len; + /* Go to next IE */ + ie = (struct ie_header *)(((uint8_t *)ie) + + sizeof(struct ie_header) + + ie->ie_len); + } + + if (set_ssid_flag) { + /* Hidden SSID if the Length is 0 */ + if (!ssid.ssid_len) { + /* increase the taillength by length of ssid */ + if (qdf_nbuf_put_tail(bcnbuf, + conf_ssid->length) == NULL) { + scm_debug("No enough tailroom"); + return QDF_STATUS_E_NOMEM; + } + /* length of the buffer to be copied */ + tmplen = frame_len - + sizeof(*hdr) - ssid_ie_end_offset; + /* + * tmp memory to copy the beacon info + * after ssid ie. + */ + tmp = qdf_mem_malloc(tmplen * sizeof(u_int8_t)); + if (!tmp) + return QDF_STATUS_E_NOMEM; + + /* Copy beacon data after ssid ie to tmp */ + qdf_nbuf_copy_bits(bcnbuf, (sizeof(*hdr) + + ssid_ie_end_offset), tmplen, tmp); + /* Add ssid length */ + *(pbeacon + (ssid_ie_start_offset + 1)) + = conf_ssid->length; + /* Insert the SSID string */ + qdf_mem_copy((pbeacon + ssid_ie_end_offset), + conf_ssid->ssid, conf_ssid->length); + /* Copy rest of the beacon data */ + qdf_mem_copy((pbeacon + ssid_ie_end_offset + + conf_ssid->length), tmp, tmplen); + qdf_mem_free(tmp); + + /* Hidden ssid with all 0's */ + } else if (ssid.ssid_len == conf_ssid->length) { + /* Insert the SSID string */ + qdf_mem_copy((pbeacon + ssid_ie_start_offset + + sizeof(struct ie_header)), + conf_ssid->ssid, conf_ssid->length); + } else { + scm_debug("mismatch in hidden ssid length"); + return QDF_STATUS_E_INVAL; + } + } + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_DFS_CHAN_HIDDEN_SSID */ + +#ifdef WLAN_ADAPTIVE_11R +/** + * scm_fill_adaptive_11r_cap() - Check if the AP supports adaptive 11r + * @scan_entry: Pointer to the scan entry + * + * Return: true if adaptive 11r is advertised else false + */ +static void scm_fill_adaptive_11r_cap(struct scan_cache_entry *scan_entry) +{ + uint8_t *ie; + uint8_t data; + bool adaptive_11r; + + ie = util_scan_entry_adaptive_11r(scan_entry); + if (!ie) + return; + + data = *(ie + OUI_LENGTH); + adaptive_11r = (data & 0x1) ? true : false; + + scan_entry->adaptive_11r_ap = adaptive_11r; +} +#else +static void scm_fill_adaptive_11r_cap(struct scan_cache_entry *scan_entry) +{ + scan_entry->adaptive_11r_ap = false; +} +#endif + +static void util_scan_set_security(struct scan_cache_entry *scan_params) +{ + if (util_scan_entry_wpa(scan_params)) + scan_params->security_type |= SCAN_SECURITY_TYPE_WPA; + + if (util_scan_entry_rsn(scan_params)) + scan_params->security_type |= SCAN_SECURITY_TYPE_RSN; + if (util_scan_entry_wapi(scan_params)) + scan_params->security_type |= SCAN_SECURITY_TYPE_WAPI; + + if (!scan_params->security_type && + scan_params->cap_info.wlan_caps.privacy) + scan_params->security_type |= SCAN_SECURITY_TYPE_WEP; +} + +static QDF_STATUS +util_scan_gen_scan_entry(struct wlan_objmgr_pdev *pdev, + uint8_t *frame, qdf_size_t frame_len, + uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param, + struct scan_mbssid_info *mbssid_info, + qdf_list_t *scan_list) +{ + struct wlan_frame_hdr *hdr; + struct wlan_bcn_frame *bcn; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct ie_ssid *ssid; + struct scan_cache_entry *scan_entry; + struct qbss_load_ie *qbss_load; + struct scan_cache_node *scan_node; + uint8_t i; + qdf_freq_t chan_freq = 0; + uint8_t band_mask; + + scan_entry = qdf_mem_malloc_atomic(sizeof(*scan_entry)); + if (!scan_entry) { + scm_err("failed to allocate memory for scan_entry"); + return QDF_STATUS_E_NOMEM; + } + scan_entry->raw_frame.ptr = + qdf_mem_malloc_atomic(frame_len); + if (!scan_entry->raw_frame.ptr) { + scm_err("failed to allocate memory for frame"); + qdf_mem_free(scan_entry); + return QDF_STATUS_E_NOMEM; + } + + bcn = (struct wlan_bcn_frame *) + (frame + sizeof(*hdr)); + hdr = (struct wlan_frame_hdr *)frame; + + /* update timestamp in nanoseconds needed by kernel layers */ + scan_entry->boottime_ns = qdf_get_bootbased_boottime_ns(); + + scan_entry->frm_subtype = frm_subtype; + qdf_mem_copy(scan_entry->bssid.bytes, + hdr->i_addr3, QDF_MAC_ADDR_SIZE); + /* Scr addr */ + qdf_mem_copy(scan_entry->mac_addr.bytes, + hdr->i_addr2, QDF_MAC_ADDR_SIZE); + scan_entry->seq_num = + (le16toh(*(uint16_t *)hdr->i_seq) >> WLAN_SEQ_SEQ_SHIFT); + + scan_entry->snr = rx_param->snr; + scan_entry->avg_snr = WLAN_SNR_IN(scan_entry->snr); + scan_entry->rssi_raw = rx_param->rssi; + scan_entry->avg_rssi = WLAN_RSSI_IN(scan_entry->rssi_raw); + scan_entry->tsf_delta = rx_param->tsf_delta; + scan_entry->pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + /* Copy per chain rssi to scan entry */ + qdf_mem_copy(scan_entry->per_chain_rssi, rx_param->rssi_ctl, + WLAN_MGMT_TXRX_HOST_MAX_ANTENNA); + band_mask = BIT(wlan_reg_freq_to_band(rx_param->chan_freq)); + + if (!wlan_psoc_nif_fw_ext_cap_get(wlan_pdev_get_psoc(pdev), + WLAN_SOC_CEXT_HW_DB2DBM)) { + for (i = 0; i < WLAN_MGMT_TXRX_HOST_MAX_ANTENNA; i++) { + if (scan_entry->per_chain_rssi[i] != + WLAN_INVALID_PER_CHAIN_SNR) + scan_entry->per_chain_rssi[i] += + WLAN_NOISE_FLOOR_DBM_DEFAULT; + else + scan_entry->per_chain_rssi[i] = + WLAN_INVALID_PER_CHAIN_RSSI; + } + } + + /* store jiffies */ + scan_entry->rrm_parent_tsf = (uint32_t)qdf_system_ticks(); + + scan_entry->bcn_int = le16toh(bcn->beacon_interval); + + /* + * In case if the beacon dosnt have + * valid beacon interval falback to def + */ + if (!scan_entry->bcn_int) + scan_entry->bcn_int = 100; + scan_entry->cap_info.value = le16toh(bcn->capability.value); + qdf_mem_copy(scan_entry->tsf_info.data, + bcn->timestamp, 8); + scan_entry->erp = ERP_NON_ERP_PRESENT; + + scan_entry->scan_entry_time = + qdf_mc_timer_get_system_time(); + + scan_entry->raw_frame.len = frame_len; + qdf_mem_copy(scan_entry->raw_frame.ptr, + frame, frame_len); + status = util_scan_populate_bcn_ie_list(pdev, scan_entry, &chan_freq, + band_mask); + if (QDF_IS_STATUS_ERROR(status)) { + scm_debug(QDF_MAC_ADDR_FMT": failed to parse beacon IE", + scan_entry->bssid.bytes); + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + return QDF_STATUS_E_FAILURE; + } + + ssid = (struct ie_ssid *) + scan_entry->ie_list.ssid; + + if (ssid && (ssid->ssid_len > WLAN_SSID_MAX_LEN)) { + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + return QDF_STATUS_E_FAILURE; + } + + if (scan_entry->ie_list.p2p) + scan_entry->is_p2p = true; + + if (!chan_freq && util_scan_entry_hecap(scan_entry)) { + status = util_scan_get_chan_from_he_6g_params(pdev, scan_entry, + &chan_freq, + band_mask); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + return QDF_STATUS_E_FAILURE; + } + } + + if (chan_freq) + scan_entry->channel.chan_freq = chan_freq; + + /* If no channel info is present in beacon use meta channel */ + if (!scan_entry->channel.chan_freq) { + scan_entry->channel.chan_freq = rx_param->chan_freq; + } else if (rx_param->chan_freq != + scan_entry->channel.chan_freq) { + if (!wlan_reg_is_49ghz_freq(scan_entry->channel.chan_freq)) + scan_entry->channel_mismatch = true; + } + + if (util_scan_is_hidden_ssid(ssid)) { + scan_entry->ie_list.ssid = NULL; + scan_entry->is_hidden_ssid = true; + } else { + qdf_mem_copy(scan_entry->ssid.ssid, + ssid->ssid, ssid->ssid_len); + scan_entry->ssid.length = ssid->ssid_len; + scan_entry->hidden_ssid_timestamp = + scan_entry->scan_entry_time; + } + qdf_mem_copy(&scan_entry->mbssid_info, mbssid_info, + sizeof(scan_entry->mbssid_info)); + + scan_entry->phy_mode = util_scan_get_phymode(pdev, scan_entry); + + scan_entry->nss = util_scan_scm_calc_nss_supported_by_ap(scan_entry); + scm_fill_adaptive_11r_cap(scan_entry); + util_scan_set_security(scan_entry); + + util_scan_scm_update_bss_with_esp_data(scan_entry); + qbss_load = (struct qbss_load_ie *) + util_scan_entry_qbssload(scan_entry); + if (qbss_load) + scan_entry->qbss_chan_load = qbss_load->qbss_chan_load; + + scan_node = qdf_mem_malloc_atomic(sizeof(*scan_node)); + if (!scan_node) { + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + return QDF_STATUS_E_FAILURE; + } + + scan_node->entry = scan_entry; + qdf_list_insert_front(scan_list, &scan_node->node); + + return status; +} + +/** + * util_scan_find_ie() - find information element + * @eid: element id + * @ies: pointer consisting of IEs + * @len: IE length + * + * Return: NULL if the element ID is not found or + * a pointer to the first byte of the requested + * element + */ +static uint8_t *util_scan_find_ie(uint8_t eid, uint8_t *ies, + int32_t len) +{ + while (len >= 2 && len >= ies[1] + 2) { + if (ies[0] == eid) + return ies; + len -= ies[1] + 2; + ies += ies[1] + 2; + } + + return NULL; +} + +#ifdef WLAN_FEATURE_MBSSID +static void util_gen_new_bssid(uint8_t *bssid, uint8_t max_bssid, + uint8_t mbssid_index, + uint8_t *new_bssid_addr) +{ + uint8_t lsb_n; + int i; + + for (i = 0; i < QDF_MAC_ADDR_SIZE; i++) + new_bssid_addr[i] = bssid[i]; + + lsb_n = new_bssid_addr[5] & ((1 << max_bssid) - 1); + + new_bssid_addr[5] &= ~((1 << max_bssid) - 1); + new_bssid_addr[5] |= (lsb_n + mbssid_index) % (1 << max_bssid); +} + +static uint32_t util_gen_new_ie(uint8_t *ie, uint32_t ielen, + uint8_t *subelement, + size_t subie_len, uint8_t *new_ie) +{ + uint8_t *pos, *tmp; + const uint8_t *tmp_old, *tmp_new; + uint8_t *sub_copy; + size_t tmp_rem_len; + + /* copy subelement as we need to change its content to + * mark an ie after it is processed. + */ + sub_copy = qdf_mem_malloc(subie_len); + if (!sub_copy) + return 0; + qdf_mem_copy(sub_copy, subelement, subie_len); + + pos = &new_ie[0]; + + /* new ssid */ + tmp_new = util_scan_find_ie(WLAN_ELEMID_SSID, sub_copy, subie_len); + if (tmp_new) { + if ((pos + tmp_new[1] + 2) <= (new_ie + ielen)) { + qdf_mem_copy(pos, tmp_new, tmp_new[1] + 2); + pos += (tmp_new[1] + 2); + } + } + + /* go through IEs in ie (skip SSID) and subelement, + * merge them into new_ie + */ + tmp_old = util_scan_find_ie(WLAN_ELEMID_SSID, ie, ielen); + tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; + + while (tmp_old + tmp_old[1] + 2 - ie <= ielen) { + if (tmp_old[0] == 0) { + tmp_old++; + continue; + } + + tmp = (uint8_t *)util_scan_find_ie(tmp_old[0], sub_copy, + subie_len); + if (!tmp) { + /* ie in old ie but not in subelement */ + if (tmp_old[0] != WLAN_ELEMID_MULTIPLE_BSSID) { + if ((pos + tmp_old[1] + 2) <= + (new_ie + ielen)) { + qdf_mem_copy(pos, tmp_old, + tmp_old[1] + 2); + pos += tmp_old[1] + 2; + } + } + } else { + /* ie in transmitting ie also in subelement, + * copy from subelement and flag the ie in subelement + * as copied (by setting eid field to 0xff). For + * vendor ie, compare OUI + type + subType to + * determine if they are the same ie. + */ + tmp_rem_len = subie_len - (tmp - sub_copy); + if (tmp_old[0] == WLAN_ELEMID_VENDOR && + tmp_rem_len >= 7) { + if (!qdf_mem_cmp(tmp_old + 2, tmp + 2, 5)) { + /* same vendor ie, copy from + * subelement + */ + if ((pos + tmp[1] + 2) <= + (new_ie + ielen)) { + qdf_mem_copy(pos, tmp, + tmp[1] + 2); + pos += tmp[1] + 2; + tmp[0] = 0xff; + } + } else { + if ((pos + tmp_old[1] + 2) <= + (new_ie + ielen)) { + qdf_mem_copy(pos, tmp_old, + tmp_old[1] + 2); + pos += tmp_old[1] + 2; + } + } + } else { + /* copy ie from subelement into new ie */ + if ((pos + tmp[1] + 2) <= (new_ie + ielen)) { + qdf_mem_copy(pos, tmp, tmp[1] + 2); + pos += tmp[1] + 2; + tmp[0] = 0xff; + } + } + } + + if (tmp_old + tmp_old[1] + 2 - ie == ielen) + break; + + tmp_old += tmp_old[1] + 2; + } + + /* go through subelement again to check if there is any ie not + * copied to new ie, skip ssid, capability, bssid-index ie + */ + tmp_new = sub_copy; + while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { + if (!(tmp_new[0] == WLAN_ELEMID_NONTX_BSSID_CAP || + tmp_new[0] == WLAN_ELEMID_SSID || + tmp_new[0] == WLAN_ELEMID_MULTI_BSSID_IDX || + tmp_new[0] == 0xff)) { + if ((pos + tmp_new[1] + 2) <= (new_ie + ielen)) { + qdf_mem_copy(pos, tmp_new, tmp_new[1] + 2); + pos += tmp_new[1] + 2; + } + } + if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len) + break; + tmp_new += tmp_new[1] + 2; + } + + qdf_mem_free(sub_copy); + return pos - new_ie; +} + +static QDF_STATUS util_scan_parse_mbssid(struct wlan_objmgr_pdev *pdev, + uint8_t *frame, qdf_size_t frame_len, + uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param, + qdf_list_t *scan_list) +{ + struct wlan_bcn_frame *bcn; + struct wlan_frame_hdr *hdr; + struct scan_mbssid_info mbssid_info; + QDF_STATUS status; + uint8_t *pos, *subelement, *mbssid_end_pos; + uint8_t *tmp, *mbssid_index_ie; + uint32_t subie_len, new_ie_len; + uint8_t new_bssid[QDF_MAC_ADDR_SIZE], bssid[QDF_MAC_ADDR_SIZE]; + uint8_t *new_ie; + uint8_t *ie, *new_frame = NULL; + uint64_t ielen, new_frame_len; + + hdr = (struct wlan_frame_hdr *)frame; + bcn = (struct wlan_bcn_frame *)(frame + sizeof(struct wlan_frame_hdr)); + ie = (uint8_t *)&bcn->ie; + ielen = (uint16_t)(frame_len - + sizeof(struct wlan_frame_hdr) - + offsetof(struct wlan_bcn_frame, ie)); + qdf_mem_copy(bssid, hdr->i_addr3, QDF_MAC_ADDR_SIZE); + + if (!util_scan_find_ie(WLAN_ELEMID_MULTIPLE_BSSID, ie, ielen)) + return QDF_STATUS_E_FAILURE; + + pos = ie; + + new_ie = qdf_mem_malloc(ielen); + if (!new_ie) + return QDF_STATUS_E_NOMEM; + + while (pos < ie + ielen + 2) { + tmp = util_scan_find_ie(WLAN_ELEMID_MULTIPLE_BSSID, pos, + ielen - (pos - ie)); + if (!tmp) + break; + + mbssid_info.profile_count = 1 << tmp[2]; + mbssid_end_pos = tmp + tmp[1] + 2; + /* Skip Element ID, Len, MaxBSSID Indicator */ + if (tmp[1] < 4) + break; + for (subelement = tmp + 3; subelement < mbssid_end_pos - 1; + subelement += 2 + subelement[1]) { + subie_len = subelement[1]; + if (mbssid_end_pos - subelement < 2 + subie_len) + break; + if (subelement[0] != 0 || subelement[1] < 4) { + /* not a valid BSS profile */ + continue; + } + + if (subelement[2] != WLAN_ELEMID_NONTX_BSSID_CAP || + subelement[3] != 2) { + /* The first element within the Nontransmitted + * BSSID Profile is not the Nontransmitted + * BSSID Capability element. + */ + continue; + } + + /* found a Nontransmitted BSSID Profile */ + mbssid_index_ie = + util_scan_find_ie(WLAN_ELEMID_MULTI_BSSID_IDX, + subelement + 2, subie_len); + if (!mbssid_index_ie || mbssid_index_ie[1] < 1 || + mbssid_index_ie[2] == 0) { + /* No valid Multiple BSSID-Index element */ + continue; + } + qdf_mem_copy(&mbssid_info.trans_bssid, bssid, + QDF_MAC_ADDR_SIZE); + mbssid_info.profile_num = mbssid_index_ie[2]; + util_gen_new_bssid(bssid, tmp[2], mbssid_index_ie[2], + new_bssid); + new_ie_len = util_gen_new_ie(ie, ielen, subelement + 2, + subie_len, new_ie); + if (!new_ie_len) + continue; + + new_frame_len = frame_len - ielen + new_ie_len; + new_frame = qdf_mem_malloc(new_frame_len); + if (!new_frame) { + qdf_mem_free(new_ie); + return QDF_STATUS_E_NOMEM; + } + + /* + * Copy the header(24byte), timestamp(8 byte), + * beaconinterval(2byte) and capability(2byte) + */ + qdf_mem_copy(new_frame, frame, 36); + /* Copy the new ie generated from MBSSID profile*/ + hdr = (struct wlan_frame_hdr *)new_frame; + qdf_mem_copy(hdr->i_addr2, new_bssid, + QDF_MAC_ADDR_SIZE); + qdf_mem_copy(hdr->i_addr3, new_bssid, + QDF_MAC_ADDR_SIZE); + /* Copy the new ie generated from MBSSID profile*/ + qdf_mem_copy(new_frame + + offsetof(struct wlan_bcn_frame, ie) + + sizeof(struct wlan_frame_hdr), + new_ie, new_ie_len); + status = util_scan_gen_scan_entry(pdev, new_frame, + new_frame_len, + frm_subtype, + rx_param, + &mbssid_info, + scan_list); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(new_frame); + scm_err("failed to generate a scan entry"); + break; + } + /* scan entry makes its own copy so free the frame*/ + qdf_mem_free(new_frame); + } + + pos = mbssid_end_pos; + } + qdf_mem_free(new_ie); + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS util_scan_parse_mbssid(struct wlan_objmgr_pdev *pdev, + uint8_t *frame, qdf_size_t frame_len, + uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param, + qdf_list_t *scan_list) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS +util_scan_parse_beacon_frame(struct wlan_objmgr_pdev *pdev, + uint8_t *frame, + qdf_size_t frame_len, + uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param, + qdf_list_t *scan_list) +{ + struct wlan_bcn_frame *bcn; + struct wlan_frame_hdr *hdr; + uint8_t *mbssid_ie = NULL, *extcap_ie; + uint32_t ie_len = 0; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct scan_mbssid_info mbssid_info = { 0 }; + + hdr = (struct wlan_frame_hdr *)frame; + bcn = (struct wlan_bcn_frame *) + (frame + sizeof(struct wlan_frame_hdr)); + ie_len = (uint16_t)(frame_len - + sizeof(struct wlan_frame_hdr) - + offsetof(struct wlan_bcn_frame, ie)); + + extcap_ie = util_scan_find_ie(WLAN_ELEMID_XCAPS, + (uint8_t *)&bcn->ie, ie_len); + /* Process MBSSID when Multiple BSSID (Bit 22) is set in Ext Caps */ + if (extcap_ie && + extcap_ie[1] >= 3 && extcap_ie[1] <= WLAN_EXTCAP_IE_MAX_LEN && + (extcap_ie[4] & 0x40)) { + mbssid_ie = util_scan_find_ie(WLAN_ELEMID_MULTIPLE_BSSID, + (uint8_t *)&bcn->ie, ie_len); + if (mbssid_ie) { + if (mbssid_ie[1] <= 0) { + scm_debug("MBSSID IE length is wrong %d", + mbssid_ie[1]); + return status; + } + qdf_mem_copy(&mbssid_info.trans_bssid, + hdr->i_addr3, QDF_MAC_ADDR_SIZE); + mbssid_info.profile_count = 1 << mbssid_ie[2]; + } + } + + status = util_scan_gen_scan_entry(pdev, frame, frame_len, + frm_subtype, rx_param, + &mbssid_info, + scan_list); + + /* + * IF MBSSID IE is present in the beacon then + * scan component will create a new entry for + * each BSSID found in the MBSSID + */ + if (mbssid_ie) + status = util_scan_parse_mbssid(pdev, frame, frame_len, + frm_subtype, rx_param, + scan_list); + + if (QDF_IS_STATUS_ERROR(status)) + scm_debug_rl("Failed to create a scan entry"); + + return status; +} + +qdf_list_t * +util_scan_unpack_beacon_frame(struct wlan_objmgr_pdev *pdev, uint8_t *frame, + qdf_size_t frame_len, uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param) +{ + qdf_list_t *scan_list; + QDF_STATUS status; + + scan_list = qdf_mem_malloc_atomic(sizeof(*scan_list)); + if (!scan_list) { + scm_err("failed to allocate scan_list"); + return NULL; + } + qdf_list_create(scan_list, MAX_SCAN_CACHE_SIZE); + + status = util_scan_parse_beacon_frame(pdev, frame, frame_len, + frm_subtype, rx_param, + scan_list); + if (QDF_IS_STATUS_ERROR(status)) { + ucfg_scan_purge_results(scan_list); + return NULL; + } + + return scan_list; +} + +QDF_STATUS +util_scan_entry_update_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry) +{ + + if (!pdev || !scan_entry) { + scm_err("pdev 0x%pK, scan_entry: 0x%pK", pdev, scan_entry); + return QDF_STATUS_E_INVAL; + } + + return scm_update_scan_mlme_info(pdev, scan_entry); +} + +bool util_is_scan_completed(struct scan_event *event, bool *success) +{ + if ((event->type == SCAN_EVENT_TYPE_COMPLETED) || + (event->type == SCAN_EVENT_TYPE_DEQUEUED) || + (event->type == SCAN_EVENT_TYPE_START_FAILED)) { + if ((event->type == SCAN_EVENT_TYPE_COMPLETED) && + (event->reason == SCAN_REASON_COMPLETED)) + *success = true; + else + *success = false; + + return true; + } + + *success = false; + return false; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_api.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_api.h new file mode 100644 index 0000000000000000000000000000000000000000..7b835438b9c0551ff6786efc69c1ed13a87c497b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_api.h @@ -0,0 +1,476 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_api.h + * This file declares public APIs of wifi positioning component + */ +#ifndef _WIFI_POS_API_H_ +#define _WIFI_POS_API_H_ + +/* Include files */ +#include "wifi_pos_utils_pub.h" +#include "../src/wifi_pos_utils_i.h" + +/* forward reference */ +struct wlan_objmgr_psoc; +struct wifi_pos_driver_caps; + +/** + * struct wifi_pos_field - wifi positioning field element + * @id: RTT field id + * @offset: data offset in field info buffer + * @length: length of related data in field info buffer + */ +struct wifi_pos_field { + uint32_t id; + uint32_t offset; + uint32_t length; +}; + +/** + * struct wifi_pos_field_info - wifi positioning field info buffer + * @count: number of @wifi_pos_field elements + * @fields: buffer to hold @wifi_pos_field elements + */ +struct wifi_pos_field_info { + uint32_t count; + struct wifi_pos_field fields[1]; +}; + +#ifdef WIFI_POS_CONVERGED +/** + * enum oem_err_msg - err msg returned to user space + * @OEM_ERR_NULL_CONTEXT: NULL context + * @OEM_ERR_APP_NOT_REGISTERED: OEM App is not registered + * @OEM_ERR_INVALID_SIGNATURE: Invalid signature + * @OEM_ERR_NULL_MESSAGE_HEADER: Invalid message header + * @OEM_ERR_INVALID_MESSAGE_TYPE: Invalid message type + * @OEM_ERR_INVALID_MESSAGE_LENGTH: Invalid length in message body + */ +enum oem_err_msg { + OEM_ERR_NULL_CONTEXT = 1, + OEM_ERR_APP_NOT_REGISTERED, + OEM_ERR_INVALID_SIGNATURE, + OEM_ERR_NULL_MESSAGE_HEADER, + OEM_ERR_INVALID_MESSAGE_TYPE, + OEM_ERR_INVALID_MESSAGE_LENGTH +}; + +/* this struct is needed since MLME is not converged yet */ +struct wifi_pos_ch_info { + uint8_t chan_id; + uint32_t mhz; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t info; + uint32_t reg_info_1; + uint32_t reg_info_2; + uint8_t nss; + uint32_t rate_flags; + uint8_t sec_ch_offset; + uint32_t ch_width; +}; + +/** + * typedef wifi_pos_ch_info_rsp - Channel information + * @chan_id: channel id + * @reserved0: reserved for padding and future use + * @mhz: primary 20 MHz channel frequency in mhz + * @band_center_freq1: Center frequency 1 in MHz + * @band_center_freq2: Center frequency 2 in MHz, valid only for 11ac + * VHT 80+80 mode + * @info: channel info + * @reg_info_1: regulatory information field 1 which contains min power, + * max power, reg power and reg class id + * @reg_info_2: regulatory information field 2 which contains antennamax + */ +struct qdf_packed wifi_pos_ch_info_rsp { + uint32_t chan_id; + uint32_t reserved0; + uint32_t mhz; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t info; + uint32_t reg_info_1; + uint32_t reg_info_2; +}; + +/** + * struct wifi_pos_peer_status_info - Status information for a given peer + * @peer_mac_addr: peer mac address + * @peer_status: peer status: 1: CONNECTED, 2: DISCONNECTED + * @vdev_id: vdev_id for the peer mac + * @peer_capability: peer capability: 0: RTT/RTT2, 1: RTT3. Default is 0 + * @reserved0: reserved0 + * @peer_chan_info: channel info on which peer is connected + */ +struct qdf_packed wifi_pos_peer_status_info { + uint8_t peer_mac_addr[ETH_ALEN]; + uint8_t peer_status; + uint8_t vdev_id; + uint32_t peer_capability; + uint32_t reserved0; + struct wifi_pos_ch_info_rsp peer_chan_info; +}; + +/** + * struct wifi_pos_req_msg - wifi pos request struct + * @msg_type: message type + * @pid: process id + * @buf: request buffer + * @buf_len: request buffer length + * @field_info_buf: buffer containing field info + * @field_info_buf_len: length of field info buffer + * @rsp_version: nl type or ani type + * + */ +struct wifi_pos_req_msg { + enum wifi_pos_cmd_ids msg_type; + uint32_t pid; + uint8_t *buf; + uint32_t buf_len; + struct wifi_pos_field_info *field_info_buf; + uint32_t field_info_buf_len; + uint32_t rsp_version; +}; + +/** + * ucfg_wifi_pos_process_req: ucfg API to be called from HDD/OS_IF to process a + * wifi_pos request from userspace + * @psoc: pointer to psoc object + * @req: wifi_pos request msg + * @send_rsp_cb: callback pointer required to send msg to userspace + * + * Return: status of operation + */ +QDF_STATUS ucfg_wifi_pos_process_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req, + wifi_pos_send_rsp_handler send_rsp_cb); + +/** + * wifi_pos_init: initializes WIFI POS component, called by dispatcher init + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_init(void); + +/** + * wifi_pos_deinit: de-initializes WIFI POS component, called by dispatcher init + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_deinit(void); + +/** + * wifi_pos_psoc_enable: psoc enable API for wifi positioning component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_psoc_disable: psoc disable API for wifi positioning component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_set_oem_target_type: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_oem_target_type(struct wlan_objmgr_psoc *psoc, uint32_t val); + +/** + * wifi_pos_set_oem_fw_version: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_oem_fw_version(struct wlan_objmgr_psoc *psoc, uint32_t val); + +/** + * wifi_pos_set_drv_ver_major: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_major(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_drv_ver_minor: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_minor(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_drv_ver_patch: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_patch(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_drv_ver_build: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_build(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_dwell_time_min: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_dwell_time_min(struct wlan_objmgr_psoc *psoc, uint16_t val); + +/** + * wifi_pos_set_dwell_time_max: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_dwell_time_max(struct wlan_objmgr_psoc *psoc, uint16_t val); + +/** + * wifi_pos_set_current_dwell_time_min: public API to set param in wifi_pos + * private object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_current_dwell_time_min(struct wlan_objmgr_psoc *psoc, + uint16_t val); + +/** + * wifi_pos_set_current_dwell_time_max: public API to set param in wifi_pos + * private object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_current_dwell_time_max(struct wlan_objmgr_psoc *psoc, + uint16_t val); + +/** + * wifi_pos_populate_caps() - populate oem capabilities + * @psoc: psoc object + * @caps: pointer to populate the capabilities + * + * Return: error code + */ +QDF_STATUS wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps); + +struct wlan_lmac_if_rx_ops; +/** + * wifi_pos_register_rx_ops: function to register with lmac rx ops + * @rx_ops: lmac rx ops struct object + * + * Return: None + */ +void wifi_pos_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * ucfg_wifi_pos_get_ftm_cap: API to get fine timing measurement caps + * @psoc: psoc object + * + * Return: FTM value + */ +uint32_t ucfg_wifi_pos_get_ftm_cap(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_wifi_pos_set_ftm_cap: API to set fine timing measurement caps + * @psoc: psoc object + * @val: value to set + * + * Return: None + */ +void ucfg_wifi_pos_set_ftm_cap(struct wlan_objmgr_psoc *psoc, uint32_t val); + +/** + * ucfg_wifi_pos_set_oem_6g_supported: API to set oem target 6g enabled/disabled + * @psoc: psoc object + * @val: value to set + * + * Return: None + */ +void ucfg_wifi_pos_set_oem_6g_supported(struct wlan_objmgr_psoc *psoc, + bool val); + +/** + * ucfg_wifi_pos_is_nl_rsp: API to check if response is nl or ani type + * @psoc: psoc object + * + * Return: true if response is nl type + */ +bool ucfg_wifi_pos_is_nl_rsp(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_get_app_pid: returns oem app pid. + * @psoc: pointer to psoc object + * + * Return: oem app pid + */ +uint32_t wifi_pos_get_app_pid(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_is_app_registered: indicates if oem app is registered. + * @psoc: pointer to psoc object + * + * Return: true if app is registered, false otherwise + */ +bool wifi_pos_is_app_registered(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_get_psoc: API to get global PSOC object + * + * Since request from userspace is not associated with any vdev/pdev/psoc, this + * API is used to get global psoc object. + * Return: global psoc object. + */ +struct wlan_objmgr_psoc *wifi_pos_get_psoc(void); + +#else +static inline QDF_STATUS wifi_pos_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wifi_pos_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wifi_pos_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wifi_pos_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_FEATURE_CIF_CFR) && defined(WIFI_POS_CONVERGED) +/** + * wifi_pos_init_cir_cfr_rings: API to set DMA ring cap in wifi pos psoc private + * object + * @psoc: pointer to psoc object + * @hal_soc: hal soc pointer + * @num_mac: number of macs + * @buf: buffer containing dma ring cap + * + * Return: status of operation. + */ +QDF_STATUS wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, void *buf); +#else +static inline QDF_STATUS wifi_pos_init_cir_cfr_rings( + struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, void *buf) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wifi_pos_register_get_fw_phy_mode_for_freq_cb: API to register callback + * to get current PHY mode + * @psoc: pointer to psoc object + * @handler: callback to be registered + * + * Return: QDF_STATUS_SUCCESS in case of success, error codes in + * case of failure + */ +QDF_STATUS wifi_pos_register_get_fw_phy_mode_for_freq_cb( + struct wlan_objmgr_psoc *psoc, + void (*handler)(uint32_t, uint32_t, uint32_t *)); + +/** + * wifi_pos_register_get_phy_mode_cb: API to register callback to get + * current PHY mode + * @psoc: pointer to psoc object + * @handler: callback to be registered + * + * Return: QDF_STATUS_SUCCESS in case of success, error codes in + * case of failure + */ +QDF_STATUS wifi_pos_register_get_phy_mode_cb( + struct wlan_objmgr_psoc *psoc, + void (*handler)(uint8_t, uint32_t, uint32_t *)); + +/** + * wifi_pos_register_send_action: API to register callback to send + * action frames + * @psoc: pointer to psoc object + * @handler: callback to be registered + * + * Return: QDF_STATUS_SUCCESS in case of success, error codes in + * case of failure + */ +QDF_STATUS wifi_pos_register_send_action( + struct wlan_objmgr_psoc *psoc, + void (*handler)(struct wlan_objmgr_psoc *psoc, + uint32_t sub_type, + uint8_t *buf, + uint32_t buf_len)); + +/** + * wifi_pos_send_report_resp: Send report to osif + * @psoc: pointer to psoc object + * @req_id: Request id + * @dest_mac: destination mac address + * @err_code: Error code to be sent + * + * Return: QDF_STATUS_SUCCESS in case of success, error codes in + * case of failure + */ +QDF_STATUS wifi_pos_send_report_resp(struct wlan_objmgr_psoc *psoc, + int req_id, uint8_t *dest_mac, + int err_code); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_utils_pub.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_utils_pub.h new file mode 100644 index 0000000000000000000000000000000000000000..9b1c66b1d7558e5358b9b9e4cbc6591fc5612917 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_utils_pub.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_utils_pub.h + * This file declares public utils of wifi positioning component + */ +#ifndef _WIFI_POS_UTILS_PUB_H_ +#define _WIFI_POS_UTILS_PUB_H_ + +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "qdf_trace.h" + +#define WIFIPOS_RESERVE_BYTES 100 +#define OEM_TARGET_SIGNATURE_LEN 8 +#define OEM_TARGET_SIGNATURE "QUALCOMM" + +#define MAX_CHANNELS 255 +#define OEM_CAP_MAX_NUM_CHANNELS 128 + +#define WIFI_POS_RSP_V1_FLAT_MEMORY 0x00000001 +#define WIFI_POS_RSP_V2_NL 0x00000002 + +/** + * enum wifi_pos_cmd_ids + * @WIFI_POS_CMD_REGISTRATION: app registration + * @WIFI_POS_CMD_GET_CAPS: get driver capabilities + * @WIFI_POS_CMD_GET_CH_INFO: get channel info + * @WIFI_POS_CMD_OEM_DATA: oem data req/rsp + * @WIFI_POS_CMD_MAX: Max cld80211 vendor sub cmds + */ + +enum wifi_pos_cmd_ids { + WIFI_POS_CMD_INVALID = 0, + WIFI_POS_CMD_REGISTRATION = 1, + WIFI_POS_CMD_SET_CAPS = 2, + WIFI_POS_CMD_GET_CAPS = 3, + WIFI_POS_CMD_GET_CH_INFO = 4, + WIFI_POS_CMD_OEM_DATA = 5, + WIFI_POS_CMD_ERROR = 6, + WIFI_POS_PEER_STATUS_IND = 7, + /* keep last */ + WIFI_POS_CMD__AFTER_LAST, + WIFI_POS_CMD_MAX = + WIFI_POS_CMD__AFTER_LAST - 1 +}; + + +/** + * struct wifi_pos_driver_version - Driver version identifier (w.x.y.z) + * @major: Version ID major number + * @minor: Version ID minor number + * @patch: Version ID patch number + * @build: Version ID build number + */ +struct qdf_packed wifi_pos_driver_version { + uint8_t major; + uint8_t minor; + uint8_t patch; + uint8_t build; +}; + +/** + * struct wifi_pos_channel_power + * @center_freq: Channel Center Frequency + * @chan_num: channel number + * @tx_power: TX power + */ +struct wifi_pos_channel_power { + uint32_t center_freq; + uint32_t chan_num; + uint32_t tx_power; +}; + +/** + * struct wifi_pos_channel_list + * @valid_channels: no of valid channels + * @chan_info: channel info + */ +struct qdf_packed wifi_pos_channel_list { + uint16_t num_channels; + struct wifi_pos_channel_power chan_info[MAX_CHANNELS]; +}; + +/** + * struct wifi_pos_driver_caps - OEM Data Capabilities + * @oem_target_signature: Signature of chipset vendor + * @oem_target_type: Chip type + * @oem_fw_version: Firmware version + * @driver_version: Host software version + * @allowed_dwell_time_min: Channel dwell time - allowed minimum + * @allowed_dwell_time_max: Channel dwell time - allowed maximum + * @curr_dwell_time_min: Channel dwell time - current minimim + * @curr_dwell_time_max: Channel dwell time - current maximum + * @supported_bands: Supported bands, 2.4G or 5G Hz + * @num_channels: Num of channels IDs to follow + * @channel_list: List of channel IDs + */ +struct qdf_packed wifi_pos_driver_caps { + uint8_t oem_target_signature[OEM_TARGET_SIGNATURE_LEN]; + uint32_t oem_target_type; + uint32_t oem_fw_version; + struct wifi_pos_driver_version driver_version; + uint16_t allowed_dwell_time_min; + uint16_t allowed_dwell_time_max; + uint16_t curr_dwell_time_min; + uint16_t curr_dwell_time_max; + uint16_t supported_bands; + uint16_t num_channels; + uint8_t channel_list[OEM_CAP_MAX_NUM_CHANNELS]; +}; + +/** + * struct wifi_pos_user_defined_caps - OEM capability to be exchanged between + * host and userspace + * @ftm_rr: FTM range report capability bit + * @lci_capability: LCI capability bit + * @reserved1: reserved + * @reserved2: reserved + */ +struct wifi_pos_user_defined_caps { + uint32_t ftm_rr:1; + uint32_t lci_capability:1; + uint32_t reserved1:30; + uint32_t reserved2; +}; + +/** + * struct wifi_pos_oem_get_cap_rsp - capabilities set by userspace and target. + * @driver_cap: target capabilities + * @user_defined_cap: capabilities set by userspace via set request + */ +struct qdf_packed wifi_pos_oem_get_cap_rsp { + struct wifi_pos_driver_caps driver_cap; + struct wifi_pos_user_defined_caps user_defined_cap; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_api.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_api.c new file mode 100644 index 0000000000000000000000000000000000000000..60e714cfa6a4927236d759ddc6333889d6d29e41 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_api.c @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wifi_pos_api.c + * This file defines the APIs wifi_pos component. + */ + +#include "wifi_pos_api.h" +#include "wifi_pos_utils_i.h" +#include "wifi_pos_main_i.h" +#include "os_if_wifi_pos.h" +#include "target_if_wifi_pos.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" + +QDF_STATUS wifi_pos_init(void) +{ + QDF_STATUS status; + + wifi_pos_lock_init(); + + /* register psoc create handler functions. */ + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("register_psoc_create_handler failed, status: %d", + status); + return status; + } + + /* register psoc delete handler functions. */ + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("register_psoc_destroy_handler failed, status: %d", + status); + } + + return status; +} + +QDF_STATUS wifi_pos_deinit(void) +{ + QDF_STATUS status; + + /* deregister psoc create handler functions. */ + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("unregister_psoc_create_handler failed, status: %d", + status); + return status; + } + + /* deregister psoc delete handler functions. */ + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("unregister_psoc_destroy_handler failed, status: %d", + status); + } + + wifi_pos_lock_deinit(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct wlan_lmac_if_wifi_pos_tx_ops *tx_ops; + + tx_ops = wifi_pos_get_tx_ops(psoc); + if (!tx_ops) { + wifi_pos_err("tx_ops is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + status = tx_ops->wifi_pos_register_events(psoc); + + if (QDF_IS_STATUS_ERROR(status)) + wifi_pos_err("target_if_wifi_pos_register_events failed"); + + return status; +} + +QDF_STATUS wifi_pos_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct wlan_lmac_if_wifi_pos_tx_ops *tx_ops; + + tx_ops = wifi_pos_get_tx_ops(psoc); + if (!tx_ops) { + wifi_pos_err("tx_ops is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + status = tx_ops->wifi_pos_deregister_events(psoc); + + if (QDF_IS_STATUS_ERROR(status)) + wifi_pos_err("target_if_wifi_pos_deregister_events failed"); + + return QDF_STATUS_SUCCESS; +} + +void wifi_pos_set_oem_target_type(struct wlan_objmgr_psoc *psoc, uint32_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->oem_target_type = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_oem_fw_version(struct wlan_objmgr_psoc *psoc, uint32_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->oem_fw_version = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_major(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.major = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_minor(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.minor = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_patch(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.patch = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_build(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.build = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_dwell_time_min(struct wlan_objmgr_psoc *psoc, uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->allowed_dwell_time_min = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} +void wifi_pos_set_dwell_time_max(struct wlan_objmgr_psoc *psoc, uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->allowed_dwell_time_max = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_current_dwell_time_max(struct wlan_objmgr_psoc *psoc, + uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->current_dwell_time_max = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_current_dwell_time_min(struct wlan_objmgr_psoc *psoc, + uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->current_dwell_time_max = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +uint32_t wifi_pos_get_app_pid(struct wlan_objmgr_psoc *psoc) +{ + uint32_t app_pid; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return 0; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + app_pid = wifi_pos_psoc->app_pid; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + return app_pid; + +} + +bool wifi_pos_is_app_registered(struct wlan_objmgr_psoc *psoc) +{ + bool is_app_registered; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return false; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + is_app_registered = wifi_pos_psoc->is_app_registered; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + return is_app_registered; +} + +#ifdef WLAN_FEATURE_CIF_CFR +QDF_STATUS wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, void *buf) +{ + return target_if_wifi_pos_init_cir_cfr_rings(psoc, hal_soc, + num_mac, buf); +} +#endif + +QDF_STATUS wifi_pos_register_get_phy_mode_cb( + struct wlan_objmgr_psoc *psoc, + void (*handler)(uint8_t, uint32_t, uint32_t *)) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc; + + if (!psoc) { + wifi_pos_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!handler) { + wifi_pos_err("Null callback"); + return QDF_STATUS_E_NULL_VALUE; + } + wifi_pos_psoc = wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wifi_pos_psoc->wifi_pos_get_phy_mode = handler; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_register_get_fw_phy_mode_for_freq_cb( + struct wlan_objmgr_psoc *psoc, + void (*handler)(uint32_t, uint32_t, uint32_t *)) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc; + + if (!psoc) { + wifi_pos_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!handler) { + wifi_pos_err("Null callback"); + return QDF_STATUS_E_NULL_VALUE; + } + wifi_pos_psoc = wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wifi_pos_psoc->wifi_pos_get_fw_phy_mode_for_freq = handler; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_register_send_action( + struct wlan_objmgr_psoc *psoc, + void (*handler)(struct wlan_objmgr_psoc *psoc, + uint32_t sub_type, + uint8_t *buf, + uint32_t buf_len)) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc; + + if (!psoc) { + wifi_pos_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!handler) { + wifi_pos_err("Null callback"); + return QDF_STATUS_E_NULL_VALUE; + } + wifi_pos_psoc = wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wifi_pos_psoc->wifi_pos_send_action = handler; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main.c new file mode 100644 index 0000000000000000000000000000000000000000..871b38cdd04e43cb470be71cf503e23e16097975 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main.c @@ -0,0 +1,898 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_main.c + * This file defines the important functions pertinent to + * wifi positioning to initialize and de-initialize the component. + */ +#include "target_if_wifi_pos.h" +#include "wifi_pos_oem_interface_i.h" +#include "wifi_pos_utils_i.h" +#include "wifi_pos_api.h" +#include "wifi_pos_main_i.h" +#include "wifi_pos_ucfg_i.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_ptt_sock_svc.h" + +#include "wlan_reg_services_api.h" +/* forward declartion */ +struct regulatory_channel; + +#define REG_SET_CHANNEL_REG_POWER(reg_info_1, val) do { \ + reg_info_1 &= 0xff00ffff; \ + reg_info_1 |= ((val & 0xff) << 16); \ +} while (0) + +/* max tx power is in 1 dBm units */ +#define REG_SET_CHANNEL_MAX_TX_POWER(reg_info_2, val) do { \ + reg_info_2 &= 0xffff00ff; \ + reg_info_2 |= ((val & 0xff) << 8); \ +} while (0) + +/* channel info consists of 6 bits of channel mode */ + +#define REG_SET_CHANNEL_MODE(reg_channel, val) do { \ + (reg_channel)->info &= 0xffffffc0; \ + (reg_channel)->info |= (val); \ +} while (0) + +/* + * obj mgr api to iterate over vdevs does not provide a direct array or vdevs, + * rather takes a callback that is called for every vdev. wifi pos needs to + * store device mode and vdev id of all active vdevs and provide this info to + * user space as part of APP registration response. due to this, vdev_idx is + * used to identify how many vdevs have been populated by obj manager API. + */ +static uint32_t vdev_idx; + +/** + * wifi_pos_get_tlv_support: indicates if firmware supports TLV wifi pos msg + * @psoc: psoc object + * + * Return: status of operation + */ +static bool wifi_pos_get_tlv_support(struct wlan_objmgr_psoc *psoc) +{ + /* this is TBD */ + return true; +} + +struct wlan_lmac_if_wifi_pos_tx_ops * + wifi_pos_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + wifi_pos_err("psoc is null"); + return NULL; + } + + return &psoc->soc_cb.tx_ops.wifi_pos_tx_ops; +} + +#ifdef CNSS_GENL +static uint8_t * +wifi_pos_prepare_reg_resp(uint32_t *rsp_len, + struct app_reg_rsp_vdev_info *vdevs_info) +{ + uint32_t *nl_sign; + uint8_t *resp_buf; + struct wifi_app_reg_rsp *app_reg_rsp; + + /* + * allocate ENHNC_FLAGS_LEN i.e. 4bytes extra memory in app_reg_resp + * to indicate NLA type response is supported for OEM request + * commands. + */ + *rsp_len = (sizeof(struct app_reg_rsp_vdev_info) * vdev_idx) + + sizeof(uint8_t) + ENHNC_FLAGS_LEN; + resp_buf = qdf_mem_malloc(*rsp_len); + if (!resp_buf) + return NULL; + + app_reg_rsp = (struct wifi_app_reg_rsp *)resp_buf; + app_reg_rsp->num_inf = vdev_idx; + qdf_mem_copy(&app_reg_rsp->vdevs, vdevs_info, + sizeof(struct app_reg_rsp_vdev_info) * vdev_idx); + + nl_sign = (uint32_t *)&app_reg_rsp->vdevs[vdev_idx]; + *nl_sign |= NL_ENABLE_OEM_REQ_RSP; + + return resp_buf; +} +#else +static uint8_t * +wifi_pos_prepare_reg_resp(uint32_t *rsp_len, + struct app_reg_rsp_vdev_info *vdevs_info) +{ + uint8_t *resp_buf; + struct wifi_app_reg_rsp *app_reg_rsp; + + *rsp_len = (sizeof(struct app_reg_rsp_vdev_info) * vdev_idx) + + sizeof(uint8_t); + resp_buf = qdf_mem_malloc(*rsp_len); + if (!resp_buf) + return NULL; + + app_reg_rsp = (struct wifi_app_reg_rsp *)resp_buf; + app_reg_rsp->num_inf = vdev_idx; + qdf_mem_copy(&app_reg_rsp->vdevs, vdevs_info, + sizeof(struct app_reg_rsp_vdev_info) * vdev_idx); + + return resp_buf; +} +#endif + +static QDF_STATUS wifi_pos_process_data_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + uint8_t idx; + uint32_t sub_type = 0; + uint32_t channel_mhz = 0; + uint32_t pdev_id = 0; + uint32_t offset; + struct oem_data_req data_req; + struct wlan_lmac_if_wifi_pos_tx_ops *tx_ops; + struct wlan_objmgr_pdev *pdev; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received data req pid(%d), len(%d)", + req->pid, req->buf_len); + + /* look for fields */ + if (req->field_info_buf) + for (idx = 0; idx < req->field_info_buf->count; idx++) { + offset = req->field_info_buf->fields[idx].offset; + /* + * replace following reads with read_api based on + * length + */ + if (req->field_info_buf->fields[idx].id == + WMIRTT_FIELD_ID_oem_data_sub_type) { + sub_type = *((uint32_t *)&req->buf[offset]); + continue; + } + + if (req->field_info_buf->fields[idx].id == + WMIRTT_FIELD_ID_channel_mhz) { + channel_mhz = *((uint32_t *)&req->buf[offset]); + continue; + } + + if (req->field_info_buf->fields[idx].id == + WMIRTT_FIELD_ID_pdev) { + pdev_id = *((uint32_t *)&req->buf[offset]); + /* pdev_id in FW starts from 1. So convert it to + * host id by decrementing it. + * zero has special meaning due to backward + * compatibility. Dont change it. + */ + if (pdev_id) + pdev_id -= 1; + continue; + } + } + + switch (sub_type) { + case TARGET_OEM_CAPABILITY_REQ: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_LCR: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_LCI: + /* TBD */ + break; + case TARGET_OEM_MEASUREMENT_REQ: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_FTMRR: + wifi_pos_debug("FTMRR request"); + if (wifi_pos_obj->wifi_pos_send_action) + wifi_pos_obj->wifi_pos_send_action(psoc, sub_type, + req->buf, + req->buf_len); + break; + case TARGET_OEM_CONFIGURE_WRU: + wifi_pos_debug("WRU request"); + if (wifi_pos_obj->wifi_pos_send_action) + wifi_pos_obj->wifi_pos_send_action(psoc, sub_type, + req->buf, + req->buf_len); + break; + default: + wifi_pos_debug("invalid sub type or not passed"); + + tx_ops = wifi_pos_get_tx_ops(psoc); + if (!tx_ops) { + wifi_pos_err("tx ops null"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, + WLAN_WIFI_POS_CORE_ID); + if (!pdev) { + wifi_pos_err("pdev null"); + return QDF_STATUS_E_INVAL; + } + data_req.data_len = req->buf_len; + data_req.data = req->buf; + tx_ops->data_req_tx(pdev, &data_req); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_WIFI_POS_CORE_ID); + break; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wifi_pos_process_set_cap_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + int error_code; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + struct wifi_pos_user_defined_caps *caps = + (struct wifi_pos_user_defined_caps *)req->buf; + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received set cap req pid(%d), len(%d)", + req->pid, req->buf_len); + + wifi_pos_obj->ftm_rr = caps->ftm_rr; + wifi_pos_obj->lci_capability = caps->lci_capability; + error_code = qdf_status_to_os_return(QDF_STATUS_SUCCESS); + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + WIFI_POS_CMD_SET_CAPS, + sizeof(error_code), + (uint8_t *)&error_code); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wifi_pos_process_get_cap_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + struct wifi_pos_oem_get_cap_rsp cap_rsp = { { {0} } }; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received get cap req pid(%d), len(%d)", + req->pid, req->buf_len); + + wifi_pos_populate_caps(psoc, &cap_rsp.driver_cap); + cap_rsp.user_defined_cap.ftm_rr = wifi_pos_obj->ftm_rr; + cap_rsp.user_defined_cap.lci_capability = wifi_pos_obj->lci_capability; + + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + WIFI_POS_CMD_GET_CAPS, + sizeof(cap_rsp), + (uint8_t *)&cap_rsp); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_send_report_resp(struct wlan_objmgr_psoc *psoc, + int req_id, uint8_t *dest_mac, + int err_code) +{ + struct wifi_pos_err_msg_report err_report = {0}; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + err_report.msg_tag_len = OEM_MSG_RSP_HEAD_TAG_ID << 16; + err_report.msg_tag_len |= (sizeof(err_report) - + sizeof(err_report.err_rpt)) & 0x0000FFFF; + err_report.msg_subtype = TARGET_OEM_ERROR_REPORT_RSP; + err_report.req_id = req_id & 0xFFFF; + err_report.req_id |= ((err_code & 0xFF) << 16); + err_report.req_id |= (0x1 << 24); + err_report.time_left = 0xFFFFFFFF; + err_report.err_rpt.tag_len = OEM_MEAS_RSP_HEAD_TAG_ID << 16; + err_report.err_rpt.tag_len |= + (sizeof(struct wifi_pos_err_rpt)) & 0x0000FFFF; + memcpy(&err_report.err_rpt.dest_mac, dest_mac, QDF_MAC_ADDR_SIZE); + + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + WIFI_POS_CMD_OEM_DATA, + sizeof(err_report), + (uint8_t *)&err_report); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wifi_pos_get_vht_ch_width(struct wlan_objmgr_psoc *psoc, + enum phy_ch_width *ch_width) +{ + struct wlan_lmac_if_wifi_pos_tx_ops *tx_ops; + + tx_ops = wifi_pos_get_tx_ops(psoc); + if (!tx_ops) { + qdf_print("tx ops null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!tx_ops->wifi_pos_get_vht_ch_width) { + wifi_pos_err("wifi pos get vht ch width is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + return tx_ops->wifi_pos_get_vht_ch_width( + psoc, ch_width); +} + +static void wifi_update_channel_bw_info(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + uint16_t freq, + struct wifi_pos_ch_info_rsp *chan_info) +{ + struct ch_params ch_params = {0}; + uint16_t sec_ch_2g = 0; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + uint32_t phy_mode; + QDF_STATUS status; + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + status = wifi_pos_get_vht_ch_width(psoc, &ch_params.ch_width); + + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("can not get vht ch width"); + return; + } + + wlan_reg_set_channel_params_for_freq(pdev, freq, + sec_ch_2g, &ch_params); + chan_info->band_center_freq1 = ch_params.mhz_freq_seg0; + wifi_pos_psoc->wifi_pos_get_fw_phy_mode_for_freq(freq, + ch_params.ch_width, + &phy_mode); + REG_SET_CHANNEL_MODE(chan_info, phy_mode); +} + +static void wifi_pos_get_reg_info(struct wlan_objmgr_pdev *pdev, + uint16_t freq, uint32_t *reg_info_1, + uint32_t *reg_info_2) +{ + uint32_t reg_power = wlan_reg_get_channel_reg_power_for_freq(pdev, + freq); + + *reg_info_1 = 0; + *reg_info_2 = 0; + + REG_SET_CHANNEL_REG_POWER(*reg_info_1, reg_power); + REG_SET_CHANNEL_MAX_TX_POWER(*reg_info_2, reg_power); +} + +/** + * wifi_pos_get_valid_channels: Get the list of valid channels from the + * given channel list + * @channels: Channel list to be validated + * @num_ch: NUmber of channels in the channel list to be validated + * @valid_channel_list: Pointer to valid channel list + * + * Return: Number of valid channels in the given list + */ + +static uint32_t wifi_pos_get_valid_channels(uint8_t *channels, uint32_t num_ch, + uint8_t *valid_channel_list) { + uint32_t i, num_valid_channels = 0; + + for (i = 0; i < num_ch; i++) { + if (wlan_reg_get_chan_enum(channels[i]) == INVALID_CHANNEL) + continue; + valid_channel_list[num_valid_channels++] = channels[i]; + } + return num_valid_channels; +} + +static void wifi_pos_pdev_iterator(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + QDF_STATUS status; + uint8_t num_channels; + struct wlan_objmgr_pdev *pdev = obj; + struct wifi_pos_channel_list *chan_list = arg; + struct channel_power *ch_info = NULL; + + if (!chan_list) { + wifi_pos_err("wifi_pos priv arg is null"); + return; + } + ch_info = (struct channel_power *)chan_list->chan_info; + status = wlan_reg_get_channel_list_with_power(pdev, ch_info, + &num_channels); + + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("Failed to get valid channel list"); + return; + } + chan_list->num_channels = num_channels; +} + +static void wifi_pos_get_ch_info(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_channel_list *chan_list) +{ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wifi_pos_pdev_iterator, + chan_list, true, WLAN_WIFI_POS_CORE_ID); + wifi_pos_notice("num channels: %d", chan_list->num_channels); +} + +static QDF_STATUS wifi_pos_process_ch_info_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + uint8_t idx, band_mask; + uint8_t *buf; + uint32_t len, i, freq; + uint32_t reg_info_1; + uint32_t reg_info_2; + bool oem_6g_support_disable; + uint8_t *channels = req->buf; + struct wlan_objmgr_pdev *pdev; + uint32_t num_ch = req->buf_len; + uint8_t valid_channel_list[NUM_CHANNELS]; + uint32_t num_valid_channels = 0; + struct wifi_pos_ch_info_rsp *ch_info; + struct wifi_pos_channel_list *ch_list; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received ch info req pid(%d), len(%d)", + req->pid, req->buf_len); + + /* get first pdev since we need that only for freq and dfs state */ + pdev = wlan_objmgr_get_pdev_by_id(psoc, 0, WLAN_WIFI_POS_CORE_ID); + if (!pdev) { + wifi_pos_err("pdev get API failed"); + return QDF_STATUS_E_INVAL; + } + if (num_ch > NUM_CHANNELS) { + wifi_pos_err("Invalid number of channels"); + return QDF_STATUS_E_INVAL; + } + + ch_list = qdf_mem_malloc(sizeof(*ch_list)); + if (!ch_list) + return QDF_STATUS_E_NOMEM; + + if (num_ch == 0 && req->rsp_version == WIFI_POS_RSP_V2_NL) { + wifi_pos_get_ch_info(psoc, ch_list); + qdf_spin_lock_bh(&wifi_pos_obj->wifi_pos_lock); + oem_6g_support_disable = wifi_pos_obj->oem_6g_support_disable; + qdf_spin_unlock_bh(&wifi_pos_obj->wifi_pos_lock); + + /* ch_list has the frequencies in order of 2.4g, 5g & 6g */ + for (i = 0; i < ch_list->num_channels; i++) { + freq = ch_list->chan_info[i].center_freq; + if (oem_6g_support_disable && + WLAN_REG_IS_6GHZ_CHAN_FREQ(freq)) + continue; + num_valid_channels++; + } + } else { + /* v1 has ch_list with frequencies in order of 2.4g, 5g only */ + num_valid_channels = wifi_pos_get_valid_channels( + channels, num_ch, + valid_channel_list); + band_mask = BIT(REG_BAND_5G) | BIT(REG_BAND_2G); + for (i = 0; i < num_valid_channels; i++) { + ch_list->chan_info[i].chan_num = valid_channel_list[i]; + ch_list->chan_info[i].center_freq = + wlan_reg_chan_band_to_freq( + pdev, + ch_list->chan_info[i].chan_num, + band_mask); + } + } + + len = sizeof(uint8_t) + sizeof(struct wifi_pos_ch_info_rsp) * + num_valid_channels; + buf = qdf_mem_malloc(len); + if (!buf) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_WIFI_POS_CORE_ID); + qdf_mem_free(ch_list); + return QDF_STATUS_E_NOMEM; + } + + /* First byte of message body will have num of channels */ + buf[0] = num_valid_channels; + ch_info = (struct wifi_pos_ch_info_rsp *)&buf[1]; + for (idx = 0; idx < num_valid_channels; idx++) { + ch_info[idx].reserved0 = 0; + ch_info[idx].chan_id = ch_list->chan_info[idx].chan_num; + ch_info[idx].mhz = ch_list->chan_info[idx].center_freq; + ch_info[idx].band_center_freq1 = ch_info[idx].mhz; + ch_info[idx].band_center_freq2 = 0; + ch_info[idx].info = 0; + wifi_pos_get_reg_info(pdev, ch_info[idx].mhz, + ®_info_1, ®_info_2); + + if (wlan_reg_is_dfs_for_freq(pdev, ch_info[idx].mhz)) + WIFI_POS_SET_DFS(ch_info[idx].info); + + wifi_update_channel_bw_info(psoc, pdev, + ch_info[idx].mhz, + &ch_info[idx]); + + ch_info[idx].reg_info_1 = reg_info_1; + ch_info[idx].reg_info_2 = reg_info_2; + } + + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + WIFI_POS_CMD_GET_CH_INFO, + len, buf); + + qdf_mem_free(buf); + qdf_mem_free(ch_list); + wlan_objmgr_pdev_release_ref(pdev, WLAN_WIFI_POS_CORE_ID); + + return QDF_STATUS_SUCCESS; +} + +static void wifi_pos_vdev_iterator(struct wlan_objmgr_psoc *psoc, + void *vdev, void *arg) +{ + struct app_reg_rsp_vdev_info *vdev_info = arg; + + vdev_info[vdev_idx].dev_mode = wlan_vdev_mlme_get_opmode(vdev); + vdev_info[vdev_idx].vdev_id = wlan_vdev_get_id(vdev); + vdev_idx++; +} + +static QDF_STATUS wifi_pos_process_app_reg_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + uint8_t err = 0, *app_reg_rsp; + uint32_t rsp_len; + char *sign_str = NULL; + struct app_reg_rsp_vdev_info vdevs_info[WLAN_UMAC_PSOC_MAX_VDEVS] + = { { 0 } }; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_err("Received App Req Req pid(%d), len(%d)", + req->pid, req->buf_len); + + sign_str = (char *)req->buf; + /* Registration request is only allowed for QTI Application */ + if ((OEM_APP_SIGNATURE_LEN != req->buf_len) || + (strncmp(sign_str, OEM_APP_SIGNATURE_STR, + OEM_APP_SIGNATURE_LEN))) { + wifi_pos_err("Invalid signature pid(%d)", req->pid); + ret = QDF_STATUS_E_PERM; + err = OEM_ERR_INVALID_SIGNATURE; + goto app_reg_failed; + } + + wifi_pos_debug("Valid App Req Req from pid(%d)", req->pid); + qdf_spin_lock_bh(&wifi_pos_obj->wifi_pos_lock); + wifi_pos_obj->is_app_registered = true; + wifi_pos_obj->app_pid = req->pid; + qdf_spin_unlock_bh(&wifi_pos_obj->wifi_pos_lock); + + vdev_idx = 0; + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + wifi_pos_vdev_iterator, + vdevs_info, true, WLAN_WIFI_POS_CORE_ID); + + app_reg_rsp = wifi_pos_prepare_reg_resp(&rsp_len, vdevs_info); + if (!app_reg_rsp) { + ret = QDF_STATUS_E_NOMEM; + err = OEM_ERR_NULL_CONTEXT; + goto app_reg_failed; + } + + if (!vdev_idx) + wifi_pos_debug("no active vdev"); + + vdev_idx = 0; + wifi_pos_obj->wifi_pos_send_rsp(req->pid, WIFI_POS_CMD_REGISTRATION, + rsp_len, (uint8_t *)app_reg_rsp); + + qdf_mem_free(app_reg_rsp); + return ret; + +app_reg_failed: + + wifi_pos_obj->wifi_pos_send_rsp(req->pid, WIFI_POS_CMD_ERROR, + sizeof(err), &err); + return ret; +} + +/** + * wifi_pos_tlv_callback: wifi pos msg handler registered for TLV type req + * @wmi_msg: wmi type request msg + * + * Return: status of operation + */ +static QDF_STATUS wifi_pos_tlv_callback(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + wifi_pos_debug("enter: msg_type: %d", req->msg_type); + switch (req->msg_type) { + case WIFI_POS_CMD_REGISTRATION: + return wifi_pos_process_app_reg_req(psoc, req); + case WIFI_POS_CMD_OEM_DATA: + return wifi_pos_process_data_req(psoc, req); + case WIFI_POS_CMD_GET_CH_INFO: + return wifi_pos_process_ch_info_req(psoc, req); + case WIFI_POS_CMD_SET_CAPS: + return wifi_pos_process_set_cap_req(psoc, req); + case WIFI_POS_CMD_GET_CAPS: + return wifi_pos_process_get_cap_req(psoc, req); + default: + wifi_pos_err("invalid request type"); + break; + } + return 0; +} + +/** + * wifi_pos_non_tlv_callback: wifi pos msg handler registered for non-TLV + * type req + * @wmi_msg: wmi type request msg + * + * Return: status of operation + */ +static QDF_STATUS wifi_pos_non_tlv_callback(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj; + + /* + * this is for WIN, if they have multiple psoc, we dont want to create + * multiple priv object. Since there is just one LOWI app registered to + * one driver, avoid 2nd private object with another psoc. + */ + if (wifi_pos_get_psoc()) { + wifi_pos_debug("global psoc obj already set. do not allocate another psoc private object"); + return QDF_STATUS_SUCCESS; + } else { + wifi_pos_debug("setting global pos object"); + wifi_pos_set_psoc(psoc); + } + + /* initialize wifi-pos psoc priv object */ + wifi_pos_obj = qdf_mem_malloc(sizeof(*wifi_pos_obj)); + if (!wifi_pos_obj) { + wifi_pos_clear_psoc(); + return QDF_STATUS_E_NOMEM; + } + + qdf_spinlock_create(&wifi_pos_obj->wifi_pos_lock); + /* Register TLV or non-TLV callbacks depending on target fw version */ + if (wifi_pos_get_tlv_support(psoc)) + wifi_pos_obj->wifi_pos_req_handler = wifi_pos_tlv_callback; + else + wifi_pos_obj->wifi_pos_req_handler = wifi_pos_non_tlv_callback; + + /* + * MGMT Rx is not handled in this phase since wifi pos only uses few + * measurement subtypes under RRM_RADIO_MEASURE_REQ. Rest of them are + * used for 80211k. That part is not yet converged and still follows + * legacy MGMT Rx to work. Action frame in new TXRX can be registered + * at per ACTION Frame type granularity only. + */ + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("obj attach with psoc failed with status: %d", + status); + qdf_spinlock_destroy(&wifi_pos_obj->wifi_pos_lock); + qdf_mem_free(wifi_pos_obj); + wifi_pos_clear_psoc(); + } + + return status; +} + +QDF_STATUS wifi_pos_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = NULL; + + if (wifi_pos_get_psoc() == psoc) { + wifi_pos_debug("deregistering wifi_pos_psoc object"); + wifi_pos_clear_psoc(); + } else { + wifi_pos_warn("un-related PSOC closed. do nothing"); + return QDF_STATUS_SUCCESS; + } + + wifi_pos_obj = wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos_obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + target_if_wifi_pos_deinit_dma_rings(psoc); + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_obj); + if (status != QDF_STATUS_SUCCESS) + wifi_pos_err("wifi_pos_obj detach failed"); + + wifi_pos_debug("wifi_pos_obj deleted with status %d", status); + qdf_spinlock_destroy(&wifi_pos_obj->wifi_pos_lock); + qdf_mem_free(wifi_pos_obj); + + return status; +} + +int wifi_pos_oem_rsp_handler(struct wlan_objmgr_psoc *psoc, + struct oem_data_rsp *oem_rsp) +{ + uint32_t len; + uint8_t *data; + uint32_t app_pid; + struct wifi_pos_psoc_priv_obj *priv = + wifi_pos_get_psoc_priv_obj(psoc); + wifi_pos_send_rsp_handler wifi_pos_send_rsp; + + if (!priv) { + wifi_pos_err("private object is NULL"); + return -EINVAL; + } + + qdf_spin_lock_bh(&priv->wifi_pos_lock); + app_pid = priv->app_pid; + wifi_pos_send_rsp = priv->wifi_pos_send_rsp; + qdf_spin_unlock_bh(&priv->wifi_pos_lock); + + len = oem_rsp->rsp_len_1 + oem_rsp->rsp_len_2 + oem_rsp->dma_len; + if (oem_rsp->rsp_len_1 > OEM_DATA_RSP_SIZE || + oem_rsp->rsp_len_2 > OEM_DATA_RSP_SIZE) { + wifi_pos_err("invalid length of Oem Data response"); + return -EINVAL; + } + + if (!wifi_pos_send_rsp) { + wifi_pos_err("invalid response handler"); + return -EINVAL; + } + + wifi_pos_debug("oem data rsp, len: %d to pid: %d", len, app_pid); + + if (oem_rsp->rsp_len_2 + oem_rsp->dma_len) { + /* stitch togther the msg data_1 + CIR/CFR + data_2 */ + data = qdf_mem_malloc(len); + if (!data) + return -ENOMEM; + + qdf_mem_copy(data, oem_rsp->data_1, oem_rsp->rsp_len_1); + qdf_mem_copy(&data[oem_rsp->rsp_len_1], + oem_rsp->vaddr, oem_rsp->dma_len); + qdf_mem_copy(&data[oem_rsp->rsp_len_1 + oem_rsp->dma_len], + oem_rsp->data_2, oem_rsp->rsp_len_2); + + wifi_pos_send_rsp(app_pid, WIFI_POS_CMD_OEM_DATA, len, data); + qdf_mem_free(data); + } else { + wifi_pos_send_rsp(app_pid, WIFI_POS_CMD_OEM_DATA, + oem_rsp->rsp_len_1, oem_rsp->data_1); + } + + return 0; +} + +void wifi_pos_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_wifi_pos_rx_ops *wifi_pos_rx_ops; + + wifi_pos_rx_ops = &rx_ops->wifi_pos_rx_ops; + wifi_pos_rx_ops->oem_rsp_event_rx = wifi_pos_oem_rsp_handler; +} + +QDF_STATUS wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + uint16_t i, count = 0; + uint32_t freq; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + struct wifi_pos_channel_list *ch_list = NULL; + + wifi_pos_debug("Enter"); + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + ch_list = qdf_mem_malloc(sizeof(*ch_list)); + if (!ch_list) + return QDF_STATUS_E_NOMEM; + + strlcpy(caps->oem_target_signature, + OEM_TARGET_SIGNATURE, + OEM_TARGET_SIGNATURE_LEN); + caps->oem_target_type = wifi_pos_obj->oem_target_type; + caps->oem_fw_version = wifi_pos_obj->oem_fw_version; + caps->driver_version.major = wifi_pos_obj->driver_version.major; + caps->driver_version.minor = wifi_pos_obj->driver_version.minor; + caps->driver_version.patch = wifi_pos_obj->driver_version.patch; + caps->driver_version.build = wifi_pos_obj->driver_version.build; + caps->allowed_dwell_time_min = wifi_pos_obj->allowed_dwell_time_min; + caps->allowed_dwell_time_max = wifi_pos_obj->allowed_dwell_time_max; + caps->curr_dwell_time_min = wifi_pos_obj->current_dwell_time_min; + caps->curr_dwell_time_max = wifi_pos_obj->current_dwell_time_max; + caps->supported_bands = wlan_objmgr_psoc_get_band_capability(psoc); + wifi_pos_get_ch_info(psoc, ch_list); + + /* copy valid channels list to caps */ + for (i = 0; i < ch_list->num_channels; i++) { + freq = ch_list->chan_info[i].center_freq; + if (WLAN_REG_IS_6GHZ_CHAN_FREQ(freq)) + continue; + caps->channel_list[count++] = ch_list->chan_info[i].chan_num; + } + caps->num_channels = count; + qdf_mem_free(ch_list); + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..227c2ceb1d4829c945fb31afc5c431f05b8b4c10 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main_i.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017, 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_main_i.h + * This file prototyps the important functions pertinent to wifi positioning + * component. + */ + +#ifndef _WIFI_POS_MAIN_H_ +#define _WIFI_POS_MAIN_H_ + +#ifdef CNSS_GENL +#define ENHNC_FLAGS_LEN 4 +#define NL_ENABLE_OEM_REQ_RSP 0x00000001 +#endif + +/* forward reference */ +struct wlan_objmgr_psoc; + +/** + * wifi_pos_psoc_obj_created_notification: callback registered to be called when + * psoc object is created. + * @psoc: pointer to psoc object just created + * @arg_list: argument list + * + * This function will: + * create WIFI POS psoc object and attach to psoc + * register TLV vs nonTLV callbacks + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list); + +/** + * wifi_pos_psoc_obj_destroyed_notification: callback registered to be called + * when psoc object is destroyed. + * @psoc: pointer to psoc object just about to be destroyed + * @arg_list: argument list + * + * This function will: + * detach WIFI POS from psoc object and free + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list); + +/** + * wifi_pos_oem_rsp_handler: lmac rx ops registered + * @psoc: pointer to psoc object + * @oem_rsp: response from firmware + * + * Return: status of operation + */ +int wifi_pos_oem_rsp_handler(struct wlan_objmgr_psoc *psoc, + struct oem_data_rsp *oem_rsp); + +/** + * wifi_pos_get_tx_ops: api to get tx ops + * @psoc: pointer to psoc object + * + * Return: tx ops + */ +struct wlan_lmac_if_wifi_pos_tx_ops * + wifi_pos_get_tx_ops(struct wlan_objmgr_psoc *psoc); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_oem_interface_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_oem_interface_i.h new file mode 100644 index 0000000000000000000000000000000000000000..53ace2d270d650376667f4afa928f02632946c4e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_oem_interface_i.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wifi_pos_oem_interface.h + * This file defines the interface between host driver and userspace layer. + */ +#ifndef _WIFI_POS_OEM_INTERFACE_H_ +#define _WIFI_POS_OEM_INTERFACE_H_ + +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "wlan_objmgr_cmn.h" + +#define TARGET_OEM_CAPABILITY_REQ 0x01 +#define TARGET_OEM_CAPABILITY_RSP 0x02 +#define TARGET_OEM_MEASUREMENT_REQ 0x03 +#define TARGET_OEM_MEASUREMENT_RSP 0x04 +#define TARGET_OEM_ERROR_REPORT_RSP 0x05 +#define TARGET_OEM_NAN_MEAS_REQ 0x06 +#define TARGET_OEM_NAN_MEAS_RSP 0x07 +#define TARGET_OEM_NAN_PEER_INFO 0x08 +#define TARGET_OEM_CONFIGURE_LCR 0x09 +#define TARGET_OEM_CONFIGURE_LCI 0x0A +#define TARGET_OEM_CONFIGURE_WRU 0x80 +#define TARGET_OEM_CONFIGURE_FTMRR 0x81 + +#define WIFI_POS_FLAG_DFS 10 +#define WIFI_POS_SET_DFS(info) (info |= (1 << WIFI_POS_FLAG_DFS)) + +/** + * enum WMIRTT_FIELD_ID - identifies which field is being specified + * @WMIRTT_FIELD_ID_oem_data_sub_type: oem data req sub type + * @WMIRTT_FIELD_ID_channel_mhz: channel mhz info + * @WMIRTT_FIELD_ID_pdev: pdev info + */ +enum WMIRTT_FIELD_ID { + WMIRTT_FIELD_ID_oem_data_sub_type, + WMIRTT_FIELD_ID_channel_mhz, + WMIRTT_FIELD_ID_pdev, +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg.c new file mode 100644 index 0000000000000000000000000000000000000000..00f82358380acf4027ab3a41dec122a38807a89e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * This file defines the important dispatcher APIs pertinent to + * wifi positioning. + */ +#include "wifi_pos_utils_i.h" +#include "wifi_pos_api.h" +#include "wifi_pos_ucfg_i.h" +#include "wlan_ptt_sock_svc.h" + +QDF_STATUS ucfg_wifi_pos_process_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req, + wifi_pos_send_rsp_handler send_rsp_cb) +{ + uint8_t err; + uint32_t app_pid; + bool is_app_registered; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + wifi_pos_debug("enter"); + + if (!wifi_pos_psoc_obj) { + wifi_pos_err("wifi_pos_psoc_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&wifi_pos_psoc_obj->wifi_pos_lock); + wifi_pos_psoc_obj->wifi_pos_send_rsp = send_rsp_cb; + is_app_registered = wifi_pos_psoc_obj->is_app_registered; + app_pid = wifi_pos_psoc_obj->app_pid; + wifi_pos_psoc_obj->rsp_version = req->rsp_version; + qdf_spin_unlock_bh(&wifi_pos_psoc_obj->wifi_pos_lock); + + if (!wifi_pos_psoc_obj->wifi_pos_req_handler) { + wifi_pos_err("wifi_pos_psoc_obj->wifi_pos_req_handler is null"); + err = OEM_ERR_NULL_CONTEXT; + send_rsp_cb(app_pid, WIFI_POS_CMD_ERROR, sizeof(err), &err); + return QDF_STATUS_E_NULL_VALUE; + } + + if (req->msg_type != WIFI_POS_CMD_REGISTRATION && + (!is_app_registered || app_pid != req->pid)) { + wifi_pos_err("requesting app is not registered, app_registered: %d, requesting pid: %d, stored pid: %d", + is_app_registered, req->pid, app_pid); + err = OEM_ERR_APP_NOT_REGISTERED; + send_rsp_cb(app_pid, WIFI_POS_CMD_ERROR, sizeof(err), &err); + return QDF_STATUS_E_INVAL; + } + + return wifi_pos_psoc_obj->wifi_pos_req_handler(psoc, req); +} + + +uint32_t ucfg_wifi_pos_get_ftm_cap(struct wlan_objmgr_psoc *psoc) +{ + uint32_t val = 0; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_alert("unable to get wifi_pos psoc obj"); + return val; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + val = wifi_pos_psoc->fine_time_meas_cap; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + return val; +} + +void ucfg_wifi_pos_set_ftm_cap(struct wlan_objmgr_psoc *psoc, uint32_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_alert("unable to get wifi_pos psoc obj"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->fine_time_meas_cap = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void ucfg_wifi_pos_set_oem_6g_supported(struct wlan_objmgr_psoc *psoc, + bool val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_psoc) { + wifi_pos_alert("unable to get wifi_pos psoc obj"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->oem_6g_support_disable = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +bool ucfg_wifi_pos_is_nl_rsp(struct wlan_objmgr_psoc *psoc) +{ + uint32_t val = 0; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_alert("unable to get wifi_pos psoc obj"); + return false; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + val = wifi_pos_psoc->rsp_version; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + if (val == WIFI_POS_RSP_V2_NL) + return true; + else + return false; + +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg_i.h new file mode 100644 index 0000000000000000000000000000000000000000..730be472f4104de06386f5f2912042099dc6d668 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg_i.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_ucfg_i.h + * This file prototyps the important functions pertinent to wifi positioning + * component. + */ + +#ifndef _WIFI_POS_UCFG_H_ +#define _WIFI_POS_UCFG_H_ + +#include "qdf_types.h" +#include "qdf_status.h" + +struct wlan_objmgr_psoc; +struct wifi_pos_req_msg; + +/** + * ucfg_wifi_pos_process_req: ucfg API to be called from HDD/OS_IF to process a + * wifi_pos request from userspace + * @psoc: pointer to psoc object + * @req: wifi_pos request msg + * @send_rsp_cb: callback pointer required to send msg to userspace + * + * Return: status of operation + */ +QDF_STATUS ucfg_wifi_pos_process_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req, + wifi_pos_send_rsp_handler send_rsp_cb); + +#endif /* _WIFI_POS_UCFG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..d4144d01048e1eeb43fbd61f9882ae1ed6be762d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wifi_pos_utils.c + * This file defines the utility helper functions for wifi_pos component. + */ + +#include "qdf_types.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wifi_pos_utils_i.h" + +/* lock to protect use of psoc global pointer variable */ +static qdf_spinlock_t psoc_ptr_lock; + +/* + * WIFI pos command are not associated with any pdev/psoc/vdev, so the callback + * registered with GENL socket does not receive any pdev/pdev/vdev object. + * Since PSOC is top most object, it was decided to keep WIFI POS private obj + * within PSOC and hence, this module need to hang on to the first PSOC that + * was created for all its internal usage. + */ +static struct wlan_objmgr_psoc *wifi_pos_psoc_obj; + +void wifi_pos_lock_init(void) +{ + qdf_spinlock_create(&psoc_ptr_lock); +} + +void wifi_pos_lock_deinit(void) +{ + qdf_spinlock_destroy(&psoc_ptr_lock); +} + +struct wlan_objmgr_psoc *wifi_pos_get_psoc(void) +{ + struct wlan_objmgr_psoc *tmp; + + qdf_spin_lock_bh(&psoc_ptr_lock); + tmp = wifi_pos_psoc_obj; + qdf_spin_unlock_bh(&psoc_ptr_lock); + + return tmp; +} + +qdf_export_symbol(wifi_pos_get_psoc); + +void wifi_pos_set_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc *tmp; + + qdf_spin_lock_bh(&psoc_ptr_lock); + tmp = wifi_pos_psoc_obj; + if (!wifi_pos_psoc_obj) + wifi_pos_psoc_obj = psoc; + qdf_spin_unlock_bh(&psoc_ptr_lock); + + if (tmp) + wifi_pos_warn("global psoc obj already set"); +} + +void wifi_pos_clear_psoc(void) +{ + struct wlan_objmgr_psoc *tmp; + + qdf_spin_lock_bh(&psoc_ptr_lock); + tmp = wifi_pos_psoc_obj; + if (wifi_pos_psoc_obj) + wifi_pos_psoc_obj = NULL; + qdf_spin_unlock_bh(&psoc_ptr_lock); + + if (!tmp) + wifi_pos_warn("global psoc obj already cleared"); +} + +/** + * wifi_pos_get_psoc_priv_obj: returns wifi_pos priv object within psoc + * @psoc: pointer to psoc object + * + * Return: wifi_pos_psoc_priv_obj + */ +struct wifi_pos_psoc_priv_obj *wifi_pos_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct wifi_pos_psoc_priv_obj *obj; + + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_WIFI_POS); + + return obj; +} + +qdf_export_symbol(wifi_pos_get_psoc_priv_obj); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils_i.h new file mode 100644 index 0000000000000000000000000000000000000000..465de82a8ef766aaa8de07e352b8f66f03e34df0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils_i.h @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_utils_i.h + * This file defines the prototypes for the utility helper functions + * for the wifi_pos component. + */ + +#ifdef WIFI_POS_CONVERGED +#ifndef _WIFI_POS_UTILS_H_ +#define _WIFI_POS_UTILS_H_ +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "ol_defines.h" +#include "qdf_trace.h" +#include "qdf_module.h" +#include "wifi_pos_utils_pub.h" + +struct wlan_objmgr_psoc; +struct wifi_pos_req_msg; + +#define wifi_pos_alert(params...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_WIFIPOS, params) +#define wifi_pos_err(params...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_WIFIPOS, params) +#define wifi_pos_warn(params...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_WIFIPOS, params) +#define wifi_pos_notice(params...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_WIFIPOS, params) +#define wifi_pos_debug(params...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_WIFIPOS, params) + +#define wifipos_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_WIFIPOS, params) +#define wifipos_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_WIFIPOS, params) +#define wifipos_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_WIFIPOS, params) +#define wifipos_nofl_notice(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_WIFIPOS, params) +#define wifipos_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_WIFIPOS, params) + +#define OEM_APP_SIGNATURE_LEN 16 +#define OEM_APP_SIGNATURE_STR "QUALCOMM-OEM-APP" + +#ifndef OEM_DATA_RSP_SIZE +#define OEM_DATA_RSP_SIZE 1724 +/* Header + VHT80 CIR * 2 chains */ +#define OEM_DATA_DMA_BUFF_SIZE (64 + 512 * 4 * 2) +#endif + +/** + * struct app_reg_rsp_vdev_info - vdev info struct + * @dev_mode: device mode + * @vdev_id: vdev id + * + */ +struct qdf_packed app_reg_rsp_vdev_info { + uint8_t dev_mode; + uint8_t vdev_id; +}; + +/** + * struct wifi_app_reg_rsp - app registration response struct + * @num_inf: number of interfaces active + * @vdevs: array indicating all active vdev's information + * + */ +struct qdf_packed wifi_app_reg_rsp { + uint8_t num_inf; + struct app_reg_rsp_vdev_info vdevs[1]; +}; + +/** + * struct oem_data_req - data request to be sent to firmware + * @data_len: len of data + * @data: buffer containing data + * + */ +struct oem_data_req { + uint32_t data_len; + uint8_t *data; +}; + +/** + * struct oem_data_rsp - response from firmware to data request sent earlier + * @rsp_len_1: len of data_1 + * @data_1: first part of payload + * @rsp_len_2: len of data_2 + * @data_2: second part of payload + * @dma_len: len of DMAed data + * @vaddr: virtual address of DMA data start + * + */ +struct oem_data_rsp { + uint32_t rsp_len_1; + uint8_t *data_1; + uint32_t rsp_len_2; + uint8_t *data_2; + uint32_t dma_len; + void *vaddr; +}; + +/** + * struct wifi_pos_err_rpt - Error report response for userspace. + * @tag_len: tlv header of the message. + * @info: Report info. Reserved for error report. + * @dest_mac: Mac address of the sta in the request. + * @reserved: Reserved in error report. + */ +struct qdf_packed wifi_pos_err_rpt { + uint32_t tag_len; + uint32_t info; + uint8_t dest_mac[QDF_MAC_ADDR_SIZE + 2]; + uint32_t reserved; +}; + +#define OEM_MSG_RSP_HEAD_TAG_ID 33 +#define OEM_MEAS_RSP_HEAD_TAG_ID 41 +/** + * struct wifi_pos_err_msg_report - Error report message + * @msg_tag_len: Message tlv header + * @msg_subtype: Message subtype + * @req_id: id corresponding to the request. + * @fragment_info: Valid only for fragments. + * @pdev_id: pdev_id of radion. + * @time_left: time left in the measurment req. + * @err_rpt: Error report data. + */ +struct qdf_packed wifi_pos_err_msg_report { + uint32_t msg_tag_len; + uint32_t msg_subtype; + uint32_t req_id; + uint32_t fragment_info; + uint32_t pdev_id; + uint32_t time_left; + struct wifi_pos_err_rpt err_rpt; +}; + +/** + * struct wifi_pos_dma_rings_cap - capabilities requested by firmware. + * @pdev_id: pdev_id or mac_id of ring + * @min_num_ptr: minimum depth of ring required + * @min_buf_size: minimum size of each buffer + * @min_buf_align: minimum allignment of buffer memory + */ +struct wifi_pos_dma_rings_cap { + uint32_t pdev_id; + uint32_t min_num_ptr; + uint32_t min_buf_size; + uint32_t min_buf_align; +}; + +/** + * struct wifi_pos_dma_buf_info - buffer info struct containing phy to virtual + * mapping. + * @cookie: this identifies location of DMA buffer in pool array + * @paddr: aligned physical address as exchanged with firmware + * @vaddr: virtual address - unaligned. this helps in freeing later + * @offset: offset of aligned address from unaligned + */ +struct wifi_pos_dma_buf_info { + uint32_t cookie; + void *paddr; + void *vaddr; + uint8_t offset; +}; + +/** + * struct wifi_pos_dma_rings_cfg - DMA ring parameters to be programmed to FW. + * @pdev_id: pdev_id of ring + * @num_ptr: depth of ring + * @base_paddr_unaligned: base physical addr unaligned + * @base_vaddr_unaligned: base virtual addr unaligned + * @base_paddr_aligned: base physical addr aligned + * @base_vaddr_aligned: base virtual addr unaligned + * @head_idx_addr: head index addr + * @tail_idx_addr: tail index addr + * @srng: hal srng + */ +struct wifi_pos_dma_rings_cfg { + uint32_t pdev_id; + uint32_t num_ptr; + uint32_t ring_alloc_size; + void *base_paddr_unaligned; + void *base_vaddr_unaligned; + void *base_paddr_aligned; + void *base_vaddr_aligned; + void *head_idx_addr; + void *tail_idx_addr; + void *srng; +}; + +typedef void (*wifi_pos_send_rsp_handler)(uint32_t, enum wifi_pos_cmd_ids, + uint32_t, uint8_t *); + +/** + * struct wifi_pos_psoc_priv_obj - psoc obj data for wifi_pos + * @app_pid: pid of app registered to host driver + * @is_app_registered: indicates if app is registered + * @fine_time_meas_cap: FTM cap for different roles, reflection of ini + * @ftm_rr: configured value of FTM Ranging Request capability + * @lci_capability: configured value of LCI capability + * @rsvd: reserved + * @oem_target_type + * @oem_target_type: oem target type, populated from HDD + * @oem_fw_version: firmware version, populated from HDD + * @driver_version: driver version, populated from HDD + * @allowed_dwell_time_min: allowed dwell time min, populated from HDD + * @allowed_dwell_time_max: allowed dwell time max, populated from HDD + * @current_dwell_time_min: current dwell time min, populated from HDD + * @current_dwell_time_max: current dwell time max, populated from HDD + * @hal_soc: hal_soc + * @num_rings: DMA ring cap requested by firmware + * @dma_cap: dma cap as read from service ready ext event + * @dma_cfg: DMA ring cfg to be programmed to firmware + * @dma_buf_pool: DMA buffer pools maintained at host: this will be 2-D array + * where with num_rows = number of rings num_elements in each row = ring depth + * @wifi_pos_lock: lock to access wifi pos priv object + * @oem_6g_support_disable: oem target 6ghz support is disabled if set + * @wifi_pos_req_handler: function pointer to handle TLV or non-TLV + * @wifi_pos_send_rsp: function pointer to send msg to userspace APP + * @wifi_pos_get_phy_mode: function pointer to get wlan phymode for given + * channel, channel width + * @wifi_pos_get_fw_phy_mode_for_freq: function pointer to get fw phymode + * for given freq and channel width + * @wifi_pos_send_action: function pointer to send registered action frames + * to userspace APP + * @rsp_version: rsp version + * + * wifi pos request messages + * <----- fine_time_meas_cap (in bits) -----> + *+----------+-----+-----+------+------+-------+-------+-----+-----+ + *| 8-31 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *+----------+-----+-----+------+------+-------+-------+-----+-----+ + *| reserved | SAP | SAP |P2P-GO|P2P-GO|P2P-CLI|P2P-CLI| STA | STA | + *| |resp |init |resp |init |resp |init |resp |init | + *+----------+-----+-----+------+------+-------+-------+-----+-----+ + * resp - responder role; init- initiator role + * + */ +struct wifi_pos_psoc_priv_obj { + uint32_t app_pid; + bool is_app_registered; + uint32_t fine_time_meas_cap; + uint32_t ftm_rr:1; + uint32_t lci_capability:1; + uint32_t rsvd:30; + + uint32_t oem_target_type; + uint32_t oem_fw_version; + struct wifi_pos_driver_version driver_version; + uint16_t allowed_dwell_time_min; + uint16_t allowed_dwell_time_max; + uint16_t current_dwell_time_min; + uint16_t current_dwell_time_max; + + void *hal_soc; + uint8_t num_rings; + struct wifi_pos_dma_rings_cap *dma_cap; + struct wifi_pos_dma_rings_cfg *dma_cfg; + struct wifi_pos_dma_buf_info **dma_buf_pool; + + qdf_spinlock_t wifi_pos_lock; + bool oem_6g_support_disable; + QDF_STATUS (*wifi_pos_req_handler)(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req); + wifi_pos_send_rsp_handler wifi_pos_send_rsp; + void (*wifi_pos_get_phy_mode)(uint8_t, uint32_t, uint32_t *); + void (*wifi_pos_get_fw_phy_mode_for_freq)(uint32_t, uint32_t, + uint32_t *); + void (*wifi_pos_send_action)(struct wlan_objmgr_psoc *psoc, + uint32_t oem_subtype, uint8_t *buf, + uint32_t len); + uint32_t rsp_version; +}; + +/** + * wifi_pos_get_psoc_priv_obj: API to get wifi_psoc private object + * @psoc: pointer to psoc object + * + * Return: psoc private object on success, NULL otherwise + */ +struct wifi_pos_psoc_priv_obj *wifi_pos_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_lock_init: API to init lock used protect use of psoc global pointer + * variable + * + * Return: none. + */ +void wifi_pos_lock_init(void); + +/** + * wifi_pos_lock_deinit: API to deinit lock used protect use of psoc global + * pointer variable + * + * Return: none. + */ +void wifi_pos_lock_deinit(void); + +/** + * wifi_pos_set_psoc: API to set global PSOC object + * @psoc: pointer to psoc object + * + * Since request from userspace is not associated with any vdev/pdev/psoc, this + * API is used to set global psoc object. + * + * Return: none. + */ +void wifi_pos_set_psoc(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_get_psoc: API to get global PSOC object + * + * Since request from userspace is not associated with any vdev/pdev/psoc, this + * API is used to get global psoc object. + * Return: global psoc object. + */ +struct wlan_objmgr_psoc *wifi_pos_get_psoc(void); + +/** + * wifi_pos_get_psoc: API to clear global PSOC object + * + * Return: none. + */ +void wifi_pos_clear_psoc(void); + +/** + * wifi_pos_populate_caps: API to get OEM caps + * @psoc: psoc object + * @caps: capabilities buffer to populate + * + * Return: status of operation. + */ +QDF_STATUS wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps); + +/** + * wifi_pos_get_app_pid: returns oem app pid. + * @psoc: pointer to psoc object + * + * Return: oem app pid + */ +uint32_t wifi_pos_get_app_pid(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_is_app_registered: indicates if oem app is registered. + * @psoc: pointer to psoc object + * + * Return: true if app is registered, false otherwise + */ +bool wifi_pos_is_app_registered(struct wlan_objmgr_psoc *psoc); + +#endif /* _WIFI_POS_UTILS_H_ */ +#endif /* WIFI_POS_CONVERGED */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/inc/epping_internal.h b/drivers/staging/qca-wifi-host-cmn/utils/epping/inc/epping_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..ff1960e9423f28a6099f4528fa83d72953e9c16c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/inc/epping_internal.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EPPING_INTERNAL_H +#define EPPING_INTERNAL_H +/**=========================================================================== + + \file epping_internal.h + + \brief Linux epping internal head file + + ==========================================================================*/ + +/*--------------------------------------------------------------------------- + Include files + -------------------------------------------------------------------------*/ + +#include +#include +#include +#include +#include +#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK) +#include +#endif +#include "htc_api.h" +#include "htc_packet.h" +#include "epping_test.h" +#include +#include +#include + +#define EPPING_LOG_MASK (1< +#include + +/* epping_main signatures */ +#ifdef WLAN_FEATURE_EPPING +int epping_open(void); +void epping_close(void); +void epping_disable(void); +int epping_enable(struct device *parent_dev, bool rtnl_held); +void epping_enable_adapter(void); +#else +static inline int epping_open(void) +{ + return QDF_STATUS_E_INVAL; +} + +static inline int epping_enable(struct device *parent_dev, bool rtnl_held) +{ + return QDF_STATUS_E_INVAL; +} + +static inline void epping_close(void) {} +static inline void epping_disable(void) {} +static inline void epping_enable_adapter(void) {} +#endif +#endif /* end #ifndef EPPING_MAIN_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_helper.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..a75878af451fc13554419669ed6e1b46dcdd9686 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_helper.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_main.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" + +int epping_cookie_init(epping_context_t *pEpping_ctx) +{ + uint32_t i, j; + + pEpping_ctx->cookie_list = NULL; + pEpping_ctx->cookie_count = 0; + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + pEpping_ctx->s_cookie_mem[i] = + qdf_mem_malloc(sizeof(struct epping_cookie) * + MAX_COOKIE_SLOT_SIZE); + if (!pEpping_ctx->s_cookie_mem[i]) + goto error; + } + qdf_spinlock_create(&pEpping_ctx->cookie_lock); + + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + struct epping_cookie *cookie_mem = pEpping_ctx->s_cookie_mem[i]; + for (j = 0; j < MAX_COOKIE_SLOT_SIZE; j++) { + epping_free_cookie(pEpping_ctx, &cookie_mem[j]); + } + } + return 0; +error: + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + if (pEpping_ctx->s_cookie_mem[i]) { + qdf_mem_free(pEpping_ctx->s_cookie_mem[i]); + pEpping_ctx->s_cookie_mem[i] = NULL; + } + } + return -ENOMEM; +} + +/* cleanup cookie queue */ +void epping_cookie_cleanup(epping_context_t *pEpping_ctx) +{ + int i; + qdf_spin_lock_bh(&pEpping_ctx->cookie_lock); + pEpping_ctx->cookie_list = NULL; + pEpping_ctx->cookie_count = 0; + qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock); + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + if (pEpping_ctx->s_cookie_mem[i]) { + qdf_mem_free(pEpping_ctx->s_cookie_mem[i]); + pEpping_ctx->s_cookie_mem[i] = NULL; + } + } +} + +void epping_free_cookie(epping_context_t *pEpping_ctx, + struct epping_cookie *cookie) +{ + qdf_spin_lock_bh(&pEpping_ctx->cookie_lock); + cookie->next = pEpping_ctx->cookie_list; + pEpping_ctx->cookie_list = cookie; + pEpping_ctx->cookie_count++; + qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock); +} + +struct epping_cookie *epping_alloc_cookie(epping_context_t *pEpping_ctx) +{ + struct epping_cookie *cookie; + + qdf_spin_lock_bh(&pEpping_ctx->cookie_lock); + cookie = pEpping_ctx->cookie_list; + if (cookie) { + pEpping_ctx->cookie_list = cookie->next; + pEpping_ctx->cookie_count--; + } + qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock); + return cookie; +} + +void epping_get_dummy_mac_addr(tSirMacAddr macAddr) +{ + macAddr[0] = 69; /* E */ + macAddr[1] = 80; /* P */ + macAddr[2] = 80; /* P */ + macAddr[3] = 73; /* I */ + macAddr[4] = 78; /* N */ + macAddr[5] = 71; /* G */ +} + +void epping_hex_dump(void *data, int buf_len, const char *str) +{ + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, "%s: E, %s", __func__, str); + + EPPING_HEX_DUMP(QDF_TRACE_LEVEL_INFO, data, buf_len); + + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, "%s: X %s", __func__, str); +} + +void *epping_get_qdf_ctx(void) +{ + qdf_device_t *qdf_ctx; + + qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + return qdf_ctx; +} + +void epping_log_packet(epping_adapter_t *adapter, + EPPING_HEADER *eppingHdr, int ret, const char *str) +{ + if (eppingHdr->Cmd_h & EPPING_LOG_MASK) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: cmd = %d, seqNo = %u, flag = 0x%x, ret = %d, " + "txCount = %lu, txDrop = %lu, txBytes = %lu," + "rxCount = %lu, rxDrop = %lu, rxBytes = %lu\n", + str, eppingHdr->Cmd_h, eppingHdr->SeqNo, + eppingHdr->CmdFlags_h, ret, + adapter->stats.tx_packets, + adapter->stats.tx_dropped, + adapter->stats.tx_bytes, + adapter->stats.rx_packets, + adapter->stats.rx_dropped, + adapter->stats.rx_bytes); + } +} + +void epping_log_stats(epping_adapter_t *adapter, const char *str) +{ + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: txCount = %lu, txDrop = %lu, tx_bytes = %lu, " + "rxCount = %lu, rxDrop = %lu, rx_bytes = %lu, tx_acks = %u\n", + str, + adapter->stats.tx_packets, + adapter->stats.tx_dropped, + adapter->stats.tx_bytes, + adapter->stats.rx_packets, + adapter->stats.rx_dropped, + adapter->stats.rx_bytes, + adapter->pEpping_ctx->total_tx_acks); +} + +void epping_set_kperf_flag(epping_adapter_t *adapter, + HTC_ENDPOINT_ID eid, uint8_t kperf_flag) +{ + adapter->pEpping_ctx->kperf_num_rx_recv[eid] = 0; + adapter->pEpping_ctx->kperf_num_tx_acks[eid] = 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_main.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_main.c new file mode 100644 index 0000000000000000000000000000000000000000..7f8ceb83df6522ab0332c228a9947436970ced1e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_main.c @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_main.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bmi.h" +#include "ol_fw.h" +#include "ol_if_athvar.h" +#include "hif.h" +#include "epping_main.h" +#include "epping_internal.h" +#include "wlan_policy_mgr_api.h" + +#ifdef TIMER_MANAGER +#define TIMER_MANAGER_STR " +TIMER_MANAGER" +#else +#define TIMER_MANAGER_STR "" +#endif + +#ifdef MEMORY_DEBUG +#define MEMORY_DEBUG_STR " +MEMORY_DEBUG" +#else +#define MEMORY_DEBUG_STR "" +#endif + +#ifdef HIF_SDIO +#define WLAN_WAIT_TIME_WLANSTART 10000 +#else +#define WLAN_WAIT_TIME_WLANSTART 2000 +#endif + +#ifdef WLAN_FEATURE_EPPING +static struct epping_context *g_epping_ctx; + +/** + * epping_open(): End point ping driver open Function + * + * This function is called by HDD to open epping module + * + * + * return - 0 for success, negative for failure + */ +int epping_open(void) +{ + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: Enter", __func__); + + g_epping_ctx = qdf_mem_malloc(sizeof(*g_epping_ctx)); + + if (!g_epping_ctx) + return -ENOMEM; + + g_epping_ctx->con_mode = cds_get_conparam(); + return 0; +} + +/** + * epping_disable(): End point ping driver disable Function + * + * This is the driver disable function - called by HDD to + * disable epping module + * + * return: none + */ +void epping_disable(void) +{ + epping_context_t *epping_ctx; + struct hif_opaque_softc *hif_ctx; + HTC_HANDLE htc_handle; + + epping_ctx = g_epping_ctx; + if (!epping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: epping_ctx = NULL", __func__); + return; + } + + hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); + if (!hif_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: hif_ctx = NULL", __func__); + return; + } + hif_disable_isr(hif_ctx); + hif_reset_soc(hif_ctx); + + htc_handle = cds_get_context(QDF_MODULE_ID_HTC); + if (!htc_handle) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: htc_handle = NULL", __func__); + return; + } + htc_stop(htc_handle); + epping_cookie_cleanup(epping_ctx); + htc_destroy(htc_handle); + + if (epping_ctx->epping_adapter) { + epping_destroy_adapter(epping_ctx->epping_adapter); + epping_ctx->epping_adapter = NULL; + } +} + +/** + * epping_close(): End point ping driver close Function + * + * This is the driver close function - called by HDD to close epping module + * + * return: none + */ +void epping_close(void) +{ + epping_context_t *to_free; + + if (!g_epping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: g_epping_ctx = NULL", __func__); + return; + } + + to_free = g_epping_ctx; + g_epping_ctx = NULL; + qdf_mem_free(to_free); +} + +/** + * epping_target_suspend_acknowledge() - process wow ack/nack from fw + * @context: htc_init_info->context + * @wow_nack: true when wow is rejected + */ +static void epping_target_suspend_acknowledge(void *context, bool wow_nack) +{ + if (!g_epping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_ctx is NULL", __func__); + return; + } + /* EPPING_TODO: do we need wow_nack? */ + g_epping_ctx->wow_nack = wow_nack; +} + +#ifdef WLAN_FEATURE_BMI +/** + * epping_update_ol_config - API to update ol configuration parameters + * + * Return: void + */ +static void epping_update_ol_config(void) +{ + struct ol_config_info cfg; + struct ol_context *ol_ctx = cds_get_context(QDF_MODULE_ID_BMI); + + if (!ol_ctx) + return; + + cfg.enable_self_recovery = 0; + cfg.enable_uart_print = 0; + cfg.enable_fw_log = 0; + cfg.enable_ramdump_collection = 0; + cfg.enable_lpass_support = 0; + + ol_init_ini_config(ol_ctx, &cfg); +} + +static +QDF_STATUS epping_bmi_download_fw(struct ol_context *ol_ctx) +{ + epping_update_ol_config(); + + /* Initialize BMI and Download firmware */ + if (bmi_download_firmware(ol_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: BMI failed to download target", __func__); + bmi_cleanup(ol_ctx); + return QDF_STATUS_E_INVAL; + } + + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, + "%s: bmi_download_firmware done", __func__); + return QDF_STATUS_SUCCESS; +} +#else +static +QDF_STATUS epping_bmi_download_fw(struct ol_context *ol_ctx) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * epping_enable(): End point ping driver enable Function + * + * This is the driver enable function - called by HDD to enable + * epping module + * + * return - 0 : success, negative: error + */ +int epping_enable(struct device *parent_dev, bool rtnl_held) +{ + int ret = 0; + epping_context_t *epping_ctx = NULL; + struct cds_context *p_cds_context = NULL; + qdf_device_t qdf_ctx; + struct htc_init_info htc_info; + struct hif_opaque_softc *scn; + tSirMacAddr adapter_macAddr; + struct ol_context *ol_ctx = NULL; + struct hif_target_info *tgt_info; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: Enter", __func__); + + p_cds_context = cds_get_global_context(); + + if (!p_cds_context) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Failed cds_get_global_context", __func__); + ret = -1; + return ret; + } + + epping_ctx = g_epping_ctx; + if (!epping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Failed to get epping_ctx", __func__); + ret = -1; + return ret; + } + epping_ctx->parent_dev = (void *)parent_dev; + epping_get_dummy_mac_addr(adapter_macAddr); + + /* Initialize the timer module */ + qdf_timer_module_init(); + + scn = cds_get_context(QDF_MODULE_ID_HIF); + if (!scn) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: scn is null!", __func__); + return A_ERROR; + } + + tgt_info = hif_get_target_info_handle(scn); + + ol_ctx = cds_get_context(QDF_MODULE_ID_BMI); + if (!ol_ctx) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: ol_ctx is NULL", __func__); + return A_ERROR; + } + + if (epping_bmi_download_fw(ol_ctx) != QDF_STATUS_SUCCESS) + return A_ERROR; + + /* store target type and target version info in hdd ctx */ + epping_ctx->target_type = tgt_info->target_type; + + htc_info.pContext = NULL; + htc_info.TargetFailure = ol_target_failure; + htc_info.TargetSendSuspendComplete = epping_target_suspend_acknowledge; + qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + + /* Create HTC */ + p_cds_context->htc_ctx = htc_create(scn, &htc_info, qdf_ctx, + cds_get_conparam()); + if (!p_cds_context->htc_ctx) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: Failed to Create HTC", __func__); + bmi_cleanup(ol_ctx); + return A_ERROR; + } + epping_ctx->HTCHandle = + cds_get_context(QDF_MODULE_ID_HTC); + if (!epping_ctx->HTCHandle) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: HTCHandle is NULL", __func__); + return A_ERROR; + } + + if (bmi_done(ol_ctx)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Failed to complete BMI phase", __func__); + goto error_end; + } + + /* start HIF */ + if (htc_wait_target(epping_ctx->HTCHandle) != QDF_STATUS_SUCCESS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_wait_target error", __func__); + goto error_end; + } + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: HTC ready", __func__); + + ret = epping_connect_service(epping_ctx); + if (ret != 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_wait_targetdone", __func__); + goto error_end; + } + if (htc_start(epping_ctx->HTCHandle) != QDF_STATUS_SUCCESS) + goto error_end; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: HTC started", __func__); + + /* init the tx cookie resource */ + ret = epping_cookie_init(epping_ctx); + if (ret < 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: cookie init failed", __func__); + htc_stop(epping_ctx->HTCHandle); + epping_cookie_cleanup(epping_ctx); + goto error_end; + } + + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: Exit", __func__); + return ret; + +error_end: + htc_destroy(p_cds_context->htc_ctx); + p_cds_context->htc_ctx = NULL; + bmi_cleanup(ol_ctx); + return A_ERROR; +} + +void epping_enable_adapter(void) +{ + epping_context_t *epping_ctx = g_epping_ctx; + tSirMacAddr adapter_macaddr; + + if (!epping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, "epping context is NULL"); + return; + } + + epping_get_dummy_mac_addr(adapter_macaddr); + epping_ctx->epping_adapter = epping_add_adapter(epping_ctx, + adapter_macaddr, + QDF_STA_MODE, true); + if (!epping_ctx->epping_adapter) + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, "epping add adapter failed"); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_rx.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..a273146fbdaf28a14eeeac3cbe84002294e85493 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_rx.c @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2014-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_rx.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" +#include "epping_test.h" +#include + +#define AR6000_MAX_RX_BUFFERS 16 +#define AR6000_BUFFER_SIZE 1664 +#define AR6000_MIN_HEAD_ROOM 64 + +static bool enb_rx_dump; + +#ifdef HIF_SDIO +void epping_refill(void *ctx, HTC_ENDPOINT_ID Endpoint) +{ + epping_context_t *pEpping_ctx = (epping_context_t *) ctx; + void *osBuf; + int RxBuffers; + int buffersToRefill; + HTC_PACKET *pPacket; + HTC_PACKET_QUEUE queue; + + buffersToRefill = (int)AR6000_MAX_RX_BUFFERS - + htc_get_num_recv_buffers(pEpping_ctx->HTCHandle, Endpoint); + + if (buffersToRefill <= 0) { + /* fast return, nothing to fill */ + return; + } + + INIT_HTC_PACKET_QUEUE(&queue); + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: providing htc with %d buffers at eid=%d\n", + __func__, buffersToRefill, Endpoint); + + for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) { + osBuf = qdf_nbuf_alloc(NULL, AR6000_BUFFER_SIZE, + AR6000_MIN_HEAD_ROOM, 4, false); + if (!osBuf) { + break; + } + /* the HTC packet wrapper is at the head of the reserved area + * in the skb */ + pPacket = (HTC_PACKET *) (A_NETBUF_HEAD(osBuf)); + /* set re-fill info */ + SET_HTC_PACKET_INFO_RX_REFILL(pPacket, osBuf, + qdf_nbuf_data(osBuf), + AR6000_BUFFER_SIZE, Endpoint); + SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, osBuf); + /* add to queue */ + HTC_PACKET_ENQUEUE(&queue, pPacket); + } + + if (!HTC_QUEUE_EMPTY(&queue)) { + /* add packets */ + htc_add_receive_pkt_multiple(pEpping_ctx->HTCHandle, &queue); + } +} +#endif /* HIF_SDIO */ + +void epping_rx(void *ctx, HTC_PACKET *pPacket) +{ + epping_context_t *pEpping_ctx = (epping_context_t *) ctx; + epping_adapter_t *adapter = pEpping_ctx->epping_adapter; + struct net_device *dev = adapter->dev; + QDF_STATUS status = pPacket->Status; + HTC_ENDPOINT_ID eid = pPacket->Endpoint; + struct sk_buff *pktSkb = (struct sk_buff *)pPacket->pPktContext; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: adapter = 0x%pK eid=%d, skb=0x%pK, data=0x%pK, len=0x%x status:%d", + __func__, adapter, eid, pktSkb, pPacket->pBuffer, + pPacket->ActualLength, status); + + if (status != QDF_STATUS_SUCCESS) { + if (status != QDF_STATUS_E_CANCELED) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, "%s: RX ERR (%d)", + __func__, status); + } + qdf_nbuf_free(pktSkb); + return; + } + + /* deliver to up layer */ + if (pktSkb) { + if (EPPING_ALIGNMENT_PAD > 0) { + A_NETBUF_PULL(pktSkb, EPPING_ALIGNMENT_PAD); + } + if (enb_rx_dump) + epping_hex_dump((void *)qdf_nbuf_data(pktSkb), + pktSkb->len, __func__); + pktSkb->dev = dev; + if ((pktSkb->dev->flags & IFF_UP) == IFF_UP) { + pktSkb->protocol = eth_type_trans(pktSkb, pktSkb->dev); + ++adapter->stats.rx_packets; + adapter->stats.rx_bytes += pktSkb->len; + qdf_net_buf_debug_release_skb(pktSkb); + if (hdd_napi_enabled(HDD_NAPI_ANY)) + netif_receive_skb(pktSkb); + else + netif_rx_ni(pktSkb); + if ((adapter->stats.rx_packets % + EPPING_STATS_LOG_COUNT) == 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: total_rx_pkts = %lu", + __func__, + adapter->stats.rx_packets); + } + } else { + ++adapter->stats.rx_dropped; + qdf_nbuf_free(pktSkb); + } + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_tx.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..202e02e56758ff062ddf02a0acd82983f83cdb58 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_tx.c @@ -0,0 +1,394 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_tx.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" +#include "epping_test.h" + +#define TX_RETRY_TIMEOUT_IN_MS 1 + +static bool enb_tx_dump; + +void epping_tx_dup_pkt(epping_adapter_t *adapter, + HTC_ENDPOINT_ID eid, qdf_nbuf_t skb) +{ + struct epping_cookie *cookie = NULL; + int skb_len, ret; + qdf_nbuf_t new_skb; + + cookie = epping_alloc_cookie(adapter->pEpping_ctx); + if (!cookie) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_alloc_cookie returns no resource\n", + __func__); + return; + } + new_skb = qdf_nbuf_copy(skb); + if (!new_skb) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: qdf_nbuf_copy returns no resource\n", __func__); + epping_free_cookie(adapter->pEpping_ctx, cookie); + return; + } + SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, + cookie, qdf_nbuf_data(skb), + qdf_nbuf_len(new_skb), eid, 0); + SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, new_skb); + skb_len = (int)qdf_nbuf_len(new_skb); + /* send the packet */ + ret = htc_send_pkt(adapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt); + if (ret != QDF_STATUS_SUCCESS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_send_pkt failed, ret = %d\n", __func__, ret); + epping_free_cookie(adapter->pEpping_ctx, cookie); + qdf_nbuf_free(new_skb); + return; + } + adapter->stats.tx_bytes += skb_len; + ++adapter->stats.tx_packets; + if (((adapter->stats.tx_packets + + adapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 && + (adapter->stats.tx_packets || adapter->stats.tx_dropped)) { + epping_log_stats(adapter, __func__); + } +} + +static int epping_tx_send_int(qdf_nbuf_t skb, epping_adapter_t *adapter) +{ + EPPING_HEADER *eppingHdr = (EPPING_HEADER *) qdf_nbuf_data(skb); + HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; + struct epping_cookie *cookie = NULL; + uint8_t ac = 0; + QDF_STATUS ret = QDF_STATUS_SUCCESS; + int skb_len; + EPPING_HEADER tmpHdr = *eppingHdr; + + /* allocate resource for this packet */ + cookie = epping_alloc_cookie(adapter->pEpping_ctx); + /* no resource */ + if (!cookie) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_alloc_cookie returns no resource\n", + __func__); + return A_ERROR; + } + + if (enb_tx_dump) + epping_hex_dump((void *)eppingHdr, skb->len, __func__); + /* + * a quirk of linux, the payload of the frame is 32-bit aligned and thus + * the addition of the HTC header will mis-align the start of the HTC + * frame, so we add some padding which will be stripped off in the target + */ + if (EPPING_ALIGNMENT_PAD > 0) { + A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD); + } + /* prepare ep/HTC information */ + ac = eppingHdr->StreamNo_h; + eid = adapter->pEpping_ctx->EppingEndpoint[ac]; + if (eid < 0 || eid >= EPPING_MAX_NUM_EPIDS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: invalid eid = %d, ac = %d\n", __func__, eid, + ac); + return A_ERROR; + } + if (tmpHdr.Cmd_h == EPPING_CMD_RESET_RECV_CNT || + tmpHdr.Cmd_h == EPPING_CMD_CONT_RX_START) { + epping_set_kperf_flag(adapter, eid, tmpHdr.CmdBuffer_t[0]); + } + SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, + cookie, qdf_nbuf_data(skb), qdf_nbuf_len(skb), + eid, 0); + SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, skb); + skb_len = skb->len; + /* send the packet */ + ret = htc_send_pkt(adapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt); + epping_log_packet(adapter, &tmpHdr, ret, __func__); + if (ret != QDF_STATUS_SUCCESS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_send_pkt failed, status = %d\n", __func__, + ret); + epping_free_cookie(adapter->pEpping_ctx, cookie); + return A_ERROR; + } + adapter->stats.tx_bytes += skb_len; + ++adapter->stats.tx_packets; + if (((adapter->stats.tx_packets + + adapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 && + (adapter->stats.tx_packets || adapter->stats.tx_dropped)) { + epping_log_stats(adapter, __func__); + } + + return 0; +} + +void epping_tx_timer_expire(epping_adapter_t *adapter) +{ + qdf_nbuf_t nodrop_skb; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, "%s: queue len: %d\n", __func__, + qdf_nbuf_queue_len(&adapter->nodrop_queue)); + + if (!qdf_nbuf_queue_len(&adapter->nodrop_queue)) { + /* nodrop queue is empty so no need to arm timer */ + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + return; + } + + /* try to flush nodrop queue */ + while ((nodrop_skb = qdf_nbuf_queue_remove(&adapter->nodrop_queue))) { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, true); + if (epping_tx_send_int(nodrop_skb, adapter)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: nodrop: %pK xmit fail in timer\n", + __func__, nodrop_skb); + /* fail to xmit so put the nodrop packet to the nodrop queue */ + qdf_nbuf_queue_insert_head(&adapter->nodrop_queue, + nodrop_skb); + break; + } else { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, false); + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: nodrop: %pK xmit ok in timer\n", + __func__, nodrop_skb); + } + } + + /* if nodrop queue is not empty, continue to arm timer */ + if (nodrop_skb) { + qdf_spin_lock_bh(&adapter->data_lock); + /* if nodrop queue is not empty, continue to arm timer */ + if (adapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) { + adapter->epping_timer_state = EPPING_TX_TIMER_RUNNING; + qdf_timer_mod(&adapter->epping_timer, + TX_RETRY_TIMEOUT_IN_MS); + } + qdf_spin_unlock_bh(&adapter->data_lock); + } else { + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + } +} + +int epping_tx_send(qdf_nbuf_t skb, epping_adapter_t *adapter) +{ + qdf_nbuf_t nodrop_skb; + EPPING_HEADER *eppingHdr; + uint8_t ac = 0; + + eppingHdr = (EPPING_HEADER *) qdf_nbuf_data(skb); + + if (!IS_EPPING_PACKET(eppingHdr)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Recived non endpoint ping packets\n", __func__); + /* no packet to send, cleanup */ + qdf_nbuf_free(skb); + return -ENOMEM; + } + + /* the stream ID is mapped to an access class */ + ac = eppingHdr->StreamNo_h; + /* hard coded two ep ids */ + if (ac != 0 && ac != 1) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: ac %d is not mapped to mboxping service\n", + __func__, ac); + qdf_nbuf_free(skb); + return -ENOMEM; + } + + /* + * some EPPING packets cannot be dropped no matter what access class + * it was sent on. A special care has been taken: + * 1. when there is no TX resource, queue the control packets to + * a special queue + * 2. when there is TX resource, send the queued control packets first + * and then other packets + * 3. a timer launches to check if there is queued control packets and + * flush them + */ + + /* check the nodrop queue first */ + while ((nodrop_skb = qdf_nbuf_queue_remove(&adapter->nodrop_queue))) { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, true); + if (epping_tx_send_int(nodrop_skb, adapter)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: nodrop: %pK xmit fail\n", __func__, + nodrop_skb); + /* fail to xmit so put the nodrop packet to the nodrop queue */ + qdf_nbuf_queue_insert_head(&adapter->nodrop_queue, + nodrop_skb); + /* no cookie so free the current skb */ + goto tx_fail; + } else { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, false); + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: nodrop: %pK xmit ok\n", __func__, + nodrop_skb); + } + } + + /* send the original packet */ + if (epping_tx_send_int(skb, adapter)) + goto tx_fail; + + return 0; + +tx_fail: + if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) { + /* allow to drop the skb so drop it */ + qdf_nbuf_free(skb); + ++adapter->stats.tx_dropped; + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Tx skb %pK dropped, stats.tx_dropped = %ld\n", + __func__, skb, adapter->stats.tx_dropped); + return -ENOMEM; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: nodrop: %pK queued\n", __func__, skb); + qdf_nbuf_queue_add(&adapter->nodrop_queue, skb); + qdf_spin_lock_bh(&adapter->data_lock); + if (adapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) { + adapter->epping_timer_state = EPPING_TX_TIMER_RUNNING; + qdf_timer_mod(&adapter->epping_timer, + TX_RETRY_TIMEOUT_IN_MS); + } + qdf_spin_unlock_bh(&adapter->data_lock); + } + + return 0; +} + +#ifdef HIF_SDIO +enum htc_send_full_action epping_tx_queue_full(void *Context, + HTC_PACKET *pPacket) +{ + /* + * Call netif_stop_queue frequently will impact the mboxping tx t-put. + * Return HTC_SEND_FULL_KEEP directly in epping_tx_queue_full to avoid. + */ + return HTC_SEND_FULL_KEEP; +} +#endif /* HIF_SDIO */ +void epping_tx_complete(void *ctx, HTC_PACKET *htc_pkt) +{ + epping_context_t *pEpping_ctx = (epping_context_t *) ctx; + epping_adapter_t *adapter = pEpping_ctx->epping_adapter; + struct net_device *dev = adapter->dev; + QDF_STATUS status; + HTC_ENDPOINT_ID eid; + qdf_nbuf_t pktSkb; + struct epping_cookie *cookie; + A_BOOL flushing = false; + qdf_nbuf_queue_t skb_queue; + + if (!htc_pkt) + return; + + qdf_nbuf_queue_init(&skb_queue); + + qdf_spin_lock_bh(&adapter->data_lock); + + status = htc_pkt->Status; + eid = htc_pkt->Endpoint; + pktSkb = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); + cookie = htc_pkt->pPktContext; + + if (!pktSkb) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: NULL skb from hc packet", __func__); + QDF_BUG(0); + } else { + if (htc_pkt->pBuffer != qdf_nbuf_data(pktSkb)) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: htc_pkt buffer not equal to skb->data", + __func__); + QDF_BUG(0); + } + /* add this to the list, use faster non-lock API */ + qdf_nbuf_queue_add(&skb_queue, pktSkb); + + if (QDF_IS_STATUS_SUCCESS(status)) { + if (htc_pkt->ActualLength != + qdf_nbuf_len(pktSkb)) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: htc_pkt length not equal to skb->len", + __func__); + QDF_BUG(0); + } + } + } + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s skb=%pK data=%pK len=0x%x eid=%d ", + __func__, pktSkb, htc_pkt->pBuffer, + htc_pkt->ActualLength, eid); + + if (QDF_IS_STATUS_ERROR(status)) { + if (status == QDF_STATUS_E_CANCELED) { + /* a packet was flushed */ + flushing = true; + } + if (status != QDF_STATUS_E_RESOURCES) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s() -TX ERROR, status: 0x%x", + __func__, status); + } + } else { + EPPING_LOG(QDF_TRACE_LEVEL_INFO, "%s: OK\n", __func__); + flushing = false; + } + + epping_free_cookie(adapter->pEpping_ctx, cookie); + qdf_spin_unlock_bh(&adapter->data_lock); + + /* free all skbs in our local list */ + while (qdf_nbuf_queue_len(&skb_queue)) { + /* use non-lock version */ + pktSkb = qdf_nbuf_queue_remove(&skb_queue); + if (!pktSkb) + break; + qdf_nbuf_tx_free(pktSkb, QDF_NBUF_PKT_ERROR); + pEpping_ctx->total_tx_acks++; + } + + if (!flushing) { + netif_wake_queue(dev); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_txrx.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..0200fd6f43105b1a7e547f1534bdef1c42a675c5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_txrx.c @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_txrx.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" + +static int epping_start_adapter(epping_adapter_t *adapter); +static void epping_stop_adapter(epping_adapter_t *adapter); + +static void epping_timer_expire(void *data) +{ + struct net_device *dev = (struct net_device *)data; + epping_adapter_t *adapter; + + if (!dev) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: netdev = NULL", __func__); + return; + } + + adapter = netdev_priv(dev); + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter = NULL", __func__); + return; + } + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + epping_tx_timer_expire(adapter); +} + +static int epping_ndev_open(struct net_device *dev) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + epping_start_adapter(adapter); + return ret; +} + +static int epping_ndev_stop(struct net_device *dev) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + ret = -ENODEV; + goto end; + } + epping_stop_adapter(adapter); +end: + return ret; +} + +static void epping_ndev_uninit(struct net_device *dev) +{ + epping_adapter_t *adapter; + + adapter = netdev_priv(dev); + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + goto end; + } + epping_stop_adapter(adapter); +end: + return; +} + +static void epping_tx_queue_timeout(struct net_device *dev) +{ + epping_adapter_t *adapter; + + adapter = netdev_priv(dev); + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + goto end; + } + + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: Transmission timeout occurred, adapter->started= %d", + __func__, adapter->started); + + /* Getting here implies we disabled the TX queues + * for too long. Since this is epping + * (not because of disassociation or low resource scenarios), + * try to restart the queue + */ + if (adapter->started) + netif_wake_queue(dev); +end: + return; + +} + +static netdev_tx_t epping_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + kfree_skb(skb); + ret = -ENODEV; + goto end; + } + qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__); + ret = epping_tx_send(skb, adapter); +end: + return NETDEV_TX_OK; +} + +static struct net_device_stats *epping_get_stats(struct net_device *dev) +{ + epping_adapter_t *adapter = netdev_priv(dev); + + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, "%s: adapter = NULL", + __func__); + return NULL; + } + + return &adapter->stats; +} + +static int epping_ndev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + ret = -ENODEV; + goto end; + } + if (dev != adapter->dev) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: HDD adapter/dev inconsistency", __func__); + ret = -ENODEV; + goto end; + } + + if ((!ifr) || (!ifr->ifr_data)) { + ret = -EINVAL; + goto end; + } + + switch (cmd) { + case (SIOCDEVPRIVATE + 1): + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: do not support ioctl %d (SIOCDEVPRIVATE + 1)", + __func__, cmd); + break; + default: + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, "%s: unknown ioctl %d", + __func__, cmd); + ret = -EINVAL; + break; + } + +end: + return ret; +} + +static int epping_set_mac_address(struct net_device *dev, void *addr) +{ + epping_adapter_t *adapter = netdev_priv(dev); + struct sockaddr *psta_mac_addr = addr; + qdf_mem_copy(&adapter->macAddressCurrent, + psta_mac_addr->sa_data, ETH_ALEN); + qdf_mem_copy(dev->dev_addr, psta_mac_addr->sa_data, ETH_ALEN); + return 0; +} + +static void epping_stop_adapter(epping_adapter_t *adapter) +{ + qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + + if (!qdf_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: qdf_ctx is NULL\n", __func__); + return; + } + + if (adapter && adapter->started) { + EPPING_LOG(LOG1, FL("Disabling queues")); + netif_tx_disable(adapter->dev); + netif_carrier_off(adapter->dev); + adapter->started = false; + pld_request_bus_bandwidth(qdf_ctx->dev, + PLD_BUS_WIDTH_LOW); + } +} + +static int epping_start_adapter(epping_adapter_t *adapter) +{ + qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + + if (!qdf_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: qdf_ctx is NULL", __func__); + return -EINVAL; + } + + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter= NULL\n", __func__); + return -EINVAL; + } + if (!adapter->started) { + pld_request_bus_bandwidth(qdf_ctx->dev, + PLD_BUS_WIDTH_HIGH); + netif_carrier_on(adapter->dev); + EPPING_LOG(LOG1, FL("Enabling queues")); + netif_tx_start_all_queues(adapter->dev); + adapter->started = true; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_WARN, + "%s: adapter %pK already started\n", __func__, + adapter); + } + return 0; +} + +static int epping_register_adapter(epping_adapter_t *adapter, bool rtnl_held) +{ + int ret = 0; + + if (!rtnl_held) + ret = register_netdev(adapter->dev); + else + ret = register_netdevice(adapter->dev); + if (ret != 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: unable to register device\n", + adapter->dev->name); + } else { + adapter->registered = true; + } + return ret; +} + +static void epping_unregister_adapter(epping_adapter_t *adapter) +{ + if (adapter) { + epping_stop_adapter(adapter); + if (adapter->registered) { + unregister_netdev(adapter->dev); + adapter->registered = false; + } + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter = NULL, unable to unregister device\n", + __func__); + } +} + +void epping_destroy_adapter(epping_adapter_t *adapter) +{ + struct net_device *dev = NULL; + epping_context_t *pEpping_ctx; + + if (!adapter || !adapter->pEpping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter = NULL\n", __func__); + return; + } + + dev = adapter->dev; + pEpping_ctx = adapter->pEpping_ctx; + epping_unregister_adapter(adapter); + + qdf_spinlock_destroy(&adapter->data_lock); + qdf_timer_free(&adapter->epping_timer); + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + + while (qdf_nbuf_queue_len(&adapter->nodrop_queue)) { + qdf_nbuf_t tmp_nbuf = NULL; + tmp_nbuf = qdf_nbuf_queue_remove(&adapter->nodrop_queue); + if (tmp_nbuf) + qdf_nbuf_free(tmp_nbuf); + } + + free_netdev(dev); + if (!pEpping_ctx) + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: pEpping_ctx = NULL\n", __func__); + else + pEpping_ctx->epping_adapter = NULL; +} + +static struct net_device_ops epping_drv_ops = { + .ndo_open = epping_ndev_open, + .ndo_stop = epping_ndev_stop, + .ndo_uninit = epping_ndev_uninit, + .ndo_start_xmit = epping_hard_start_xmit, + .ndo_tx_timeout = epping_tx_queue_timeout, + .ndo_get_stats = epping_get_stats, + .ndo_do_ioctl = epping_ndev_ioctl, + .ndo_set_mac_address = epping_set_mac_address, + .ndo_select_queue = NULL, +}; + +#define EPPING_TX_QUEUE_MAX_LEN 128 /* need to be power of 2 */ + +epping_adapter_t *epping_add_adapter(epping_context_t *pEpping_ctx, + tSirMacAddr macAddr, + enum QDF_OPMODE device_mode, + bool rtnl_held) +{ + struct net_device *dev; + epping_adapter_t *adapter; + + dev = alloc_netdev(sizeof(epping_adapter_t), "wifi%d", +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) + NET_NAME_UNKNOWN, +#endif + ether_setup); + if (!dev) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Cannot allocate epping_adapter_t\n", __func__); + return NULL; + } + + adapter = netdev_priv(dev); + qdf_mem_zero(adapter, sizeof(*adapter)); + adapter->dev = dev; + adapter->pEpping_ctx = pEpping_ctx; + adapter->device_mode = device_mode; /* station, SAP, etc */ + qdf_mem_copy(dev->dev_addr, (void *)macAddr, sizeof(tSirMacAddr)); + qdf_mem_copy(adapter->macAddressCurrent.bytes, + macAddr, sizeof(tSirMacAddr)); + qdf_spinlock_create(&adapter->data_lock); + qdf_nbuf_queue_init(&adapter->nodrop_queue); + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + qdf_timer_init(epping_get_qdf_ctx(), &adapter->epping_timer, + epping_timer_expire, dev, QDF_TIMER_TYPE_SW); + dev->type = ARPHRD_IEEE80211; + dev->needed_headroom += 24; + dev->netdev_ops = &epping_drv_ops; + dev->watchdog_timeo = 5 * HZ; /* XXX */ + dev->tx_queue_len = EPPING_TXBUF - 1; /* 1 for mgmt frame */ + if (epping_register_adapter(adapter, rtnl_held) == 0) { + EPPING_LOG(LOG1, FL("Disabling queues")); + netif_tx_disable(dev); + netif_carrier_off(dev); + return adapter; + } else { + epping_destroy_adapter(adapter); + return NULL; + } +} + +int epping_connect_service(epping_context_t *pEpping_ctx) +{ + int status, i; + struct htc_service_connect_req connect; + struct htc_service_connect_resp response; + + qdf_mem_zero(&connect, sizeof(connect)); + qdf_mem_zero(&response, sizeof(response)); + + /* these fields are the same for all service endpoints */ + connect.EpCallbacks.pContext = pEpping_ctx; + connect.EpCallbacks.EpTxCompleteMultiple = NULL; + connect.EpCallbacks.EpRecv = epping_rx; + /* epping_tx_complete use Multiple version */ + connect.EpCallbacks.EpTxComplete = epping_tx_complete; + connect.MaxSendQueueDepth = 64; + +#ifdef HIF_SDIO + connect.EpCallbacks.EpRecvRefill = epping_refill; + connect.EpCallbacks.EpSendFull = + epping_tx_queue_full /* ar6000_tx_queue_full */; +#elif defined(HIF_USB) || defined(HIF_PCI) || defined(HIF_SNOC) || \ + defined(HIF_IPCI) + connect.EpCallbacks.EpRecvRefill = NULL /* provided by HIF */; + connect.EpCallbacks.EpSendFull = NULL /* provided by HIF */; + /* disable flow control for hw flow control */ + connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; +#endif + + /* connect to service */ + connect.service_id = WMI_DATA_BE_SVC; + status = htc_connect_service(pEpping_ctx->HTCHandle, &connect, &response); + if (QDF_IS_STATUS_ERROR(status)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "Failed to connect to Endpoint Ping BE service status:%d\n", + status); + return status; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "eppingtest BE endpoint:%d\n", response.Endpoint); + } + pEpping_ctx->EppingEndpoint[0] = response.Endpoint; + +#if defined(HIF_PCI) || defined(HIF_USB) || defined(HIF_SNOC) || \ + defined(HIF_IPCI) + connect.service_id = WMI_DATA_BK_SVC; + status = htc_connect_service(pEpping_ctx->HTCHandle, &connect, &response); + if (QDF_IS_STATUS_ERROR(status)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "Failed to connect to Endpoint Ping BK service status:%d\n", + status); + return status; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "eppingtest BK endpoint:%d\n", response.Endpoint); + } + pEpping_ctx->EppingEndpoint[1] = response.Endpoint; + /* Since we do not create other two SVC use BK endpoint + * for rest ACs (2, 3) */ + for (i = 2; i < EPPING_MAX_NUM_EPIDS; i++) { + pEpping_ctx->EppingEndpoint[i] = response.Endpoint; + } +#else + /* we only use one endpoint for high latenance bus. + * Map all AC's EPIDs to the same endpoint ID returned by HTC */ + for (i = 0; i < EPPING_MAX_NUM_EPIDS; i++) { + pEpping_ctx->EppingEndpoint[i] = response.Endpoint; + } +#endif + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.c b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.c new file mode 100644 index 0000000000000000000000000000000000000000..c0889fd83db5af83c4bd40123c7ef8ec160f45f8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.c @@ -0,0 +1,4563 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* Host Debug log implementation */ + +#include "athdefs.h" +#include "a_types.h" +#include "dbglog_host.h" +#include "wmi.h" +#include "wmi_unified_api.h" +#include "wma.h" +#include "ol_defines.h" +#include +#include "host_diag_core_event.h" +#include "qwlan_version.h" +#include +#include +#include + +#ifdef WLAN_OPEN_SOURCE +#include +#endif /* WLAN_OPEN_SOURCE */ +#include "wmi_unified_priv.h" + +#ifdef CNSS_GENL +#include +#include "wlan_cfg80211.h" +#endif + +#ifdef MULTI_IF_NAME +#define CLD_DEBUGFS_DIR "cld" MULTI_IF_NAME +#else + +#define CLD_DEBUGFS_DIR "cld" +#endif +#define DEBUGFS_BLOCK_NAME "dbglog_block" + +#define ATH_MODULE_NAME fwlog +#include +#define FWLOG_DEBUG ATH_DEBUG_MAKE_MODULE_MASK(0) + +static int get_version; +static int gprint_limiter; +static bool tgt_assert_enable; +#ifdef WLAN_DEBUG +static ATH_DEBUG_MASK_DESCRIPTION g_fwlog_debug_description[] = { + {FWLOG_DEBUG, "fwlog"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(fwlog, + "fwlog", + "Firmware Debug Log", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO | + ATH_DEBUG_ERR, + ATH_DEBUG_DESCRIPTION_COUNT + (g_fwlog_debug_description), + g_fwlog_debug_description); +#endif + +module_dbg_print mod_print[WLAN_MODULE_ID_MAX]; + +uint32_t dbglog_process_type = DBGLOG_PROCESS_NET_RAW; + +static const char *dbglog_get_module_str(uint32_t module_id) +{ + switch (module_id) { + case WLAN_MODULE_INF: + return "INF"; + case WLAN_MODULE_WMI: + return "WMI"; + case WLAN_MODULE_STA_PWRSAVE: + return "STA PS"; + case WLAN_MODULE_WHAL: + return "WHAL"; + case WLAN_MODULE_COEX: + return "COEX"; + case WLAN_MODULE_ROAM: + return "ROAM"; + case WLAN_MODULE_RESMGR_CHAN_MANAGER: + return "CHANMGR"; + case WLAN_MODULE_RESMGR: + return "RESMGR"; + case WLAN_MODULE_VDEV_MGR: + return "VDEV"; + case WLAN_MODULE_SCAN: + return "SCAN"; + case WLAN_MODULE_RATECTRL: + return "RC"; + case WLAN_MODULE_AP_PWRSAVE: + return "AP PS"; + case WLAN_MODULE_BLOCKACK: + return "BA"; + case WLAN_MODULE_MGMT_TXRX: + return "MGMT"; + case WLAN_MODULE_DATA_TXRX: + return "DATA"; + case WLAN_MODULE_HTT: + return "HTT"; + case WLAN_MODULE_HOST: + return "HOST"; + case WLAN_MODULE_BEACON: + return "BEACON"; + case WLAN_MODULE_OFFLOAD: + return "OFFLOAD"; + case WLAN_MODULE_WAL: + return "WAL"; + case WAL_MODULE_DE: + return "DE"; + case WLAN_MODULE_PCIELP: + return "PCIELP"; + case WLAN_MODULE_RTT: + return "RTT"; + case WLAN_MODULE_DCS: + return "DCS"; + case WLAN_MODULE_CACHEMGR: + return "CACHEMGR"; + case WLAN_MODULE_ANI: + return "ANI"; + case WLAN_MODULE_TEST: + return "TESTPOINT"; + case WLAN_MODULE_STA_SMPS: + return "STA_SMPS"; + case WLAN_MODULE_TDLS: + return "TDLS"; + case WLAN_MODULE_P2P: + return "P2P"; + case WLAN_MODULE_WOW: + return "WoW"; + case WLAN_MODULE_IBSS_PWRSAVE: + return "IBSS PS"; + case WLAN_MODULE_EXTSCAN: + return "ExtScan"; + case WLAN_MODULE_UNIT_TEST: + return "UNIT_TEST"; + case WLAN_MODULE_MLME: + return "MLME"; + case WLAN_MODULE_SUPPL: + return "SUPPLICANT"; + default: + return "UNKNOWN"; + } +} + +char *DBG_MSG_ARR[WLAN_MODULE_ID_MAX][MAX_DBG_MSGS] = { + { + "INF_MSG_START", + "INF_ASSERTION_FAILED", + "INF_TARGET_ID", + "INF_MSG_END" + }, + { + "WMI_DBGID_DEFINITION_START", + "WMI_CMD_RX_XTND_PKT_TOO_SHORT", + "WMI_EXTENDED_CMD_NOT_HANDLED", + "WMI_CMD_RX_PKT_TOO_SHORT", + "WMI_CALLING_WMI_EXTENSION_FN", + "WMI_CMD_NOT_HANDLED", + "WMI_IN_SYNC", + "WMI_TARGET_WMI_SYNC_CMD", + "WMI_SET_SNR_THRESHOLD_PARAMS", + "WMI_SET_RSSI_THRESHOLD_PARAMS", + "WMI_SET_LQ_THRESHOLD_PARAMS", + "WMI_TARGET_CREATE_PSTREAM_CMD", + "WMI_WI_DTM_INUSE", + "WMI_TARGET_DELETE_PSTREAM_CMD", + "WMI_TARGET_IMPLICIT_DELETE_PSTREAM_CMD", + "WMI_TARGET_GET_BIT_RATE_CMD", + "WMI_GET_RATE_MASK_CMD_FIX_RATE_MASK_IS", + "WMI_TARGET_GET_AVAILABLE_CHANNELS_CMD", + "WMI_TARGET_GET_TX_PWR_CMD", + "WMI_FREE_EVBUF_WMIBUF", + "WMI_FREE_EVBUF_DATABUF", + "WMI_FREE_EVBUF_BADFLAG", + "WMI_HTC_RX_ERROR_DATA_PACKET", + "WMI_HTC_RX_SYNC_PAUSING_FOR_MBOX", + "WMI_INCORRECT_WMI_DATA_HDR_DROPPING_PKT", + "WMI_SENDING_READY_EVENT", + "WMI_SETPOWER_MDOE_TO_MAXPERF", + "WMI_SETPOWER_MDOE_TO_REC", + "WMI_BSSINFO_EVENT_FROM", + "WMI_TARGET_GET_STATS_CMD", + "WMI_SENDING_SCAN_COMPLETE_EVENT", + "WMI_SENDING_RSSI_INDB_THRESHOLD_EVENT ", + "WMI_SENDING_RSSI_INDBM_THRESHOLD_EVENT", + "WMI_SENDING_LINK_QUALITY_THRESHOLD_EVENT", + "WMI_SENDING_ERROR_REPORT_EVENT", + "WMI_SENDING_CAC_EVENT", + "WMI_TARGET_GET_ROAM_TABLE_CMD", + "WMI_TARGET_GET_ROAM_DATA_CMD", + "WMI_SENDING_GPIO_INTR_EVENT", + "WMI_SENDING_GPIO_ACK_EVENT", + "WMI_SENDING_GPIO_DATA_EVENT", + "WMI_CMD_RX", + "WMI_CMD_RX_XTND", + "WMI_EVENT_SEND", + "WMI_EVENT_SEND_XTND", + "WMI_CMD_PARAMS_DUMP_START", + "WMI_CMD_PARAMS_DUMP_END", + "WMI_CMD_PARAMS", + "WMI_EVENT_ALLOC_FAILURE", + "WMI_DBGID_DCS_PARAM_CMD", + "WMI_SEND_EVENT_WRONG_TLV", + "WMI_SEND_EVENT_NO_TLV_DEF", + "WMI_DBGID_DEFNITION_END", + }, + { + "PS_STA_DEFINITION_START", + "PS_STA_PM_ARB_REQUEST", + "PS_STA_DELIVER_EVENT", + "PS_STA_PSPOLL_SEQ_DONE", + "PS_STA_COEX_MODE", + "PS_STA_PSPOLL_ALLOW", + "PS_STA_SET_PARAM", + "PS_STA_SPECPOLL_TIMER_STARTED", + "PS_STA_SPECPOLL_TIMER_STOPPED", + }, + { + "WHAL_DBGID_DEFINITION_START", + "WHAL_ERROR_ANI_CONTROL", + "WHAL_ERROR_CHIP_TEST1", + "WHAL_ERROR_CHIP_TEST2", + "WHAL_ERROR_EEPROM_CHECKSUM", + "WHAL_ERROR_EEPROM_MACADDR", + "WHAL_ERROR_INTERRUPT_HIU", + "WHAL_ERROR_KEYCACHE_RESET", + "WHAL_ERROR_KEYCACHE_SET", + "WHAL_ERROR_KEYCACHE_TYPE", + "WHAL_ERROR_KEYCACHE_TKIPENTRY", + "WHAL_ERROR_KEYCACHE_WEPLENGTH", + "WHAL_ERROR_PHY_INVALID_CHANNEL", + "WHAL_ERROR_POWER_AWAKE", + "WHAL_ERROR_POWER_SET", + "WHAL_ERROR_RECV_STOPDMA", + "WHAL_ERROR_RECV_STOPPCU", + "WHAL_ERROR_RESET_CHANNF1", + "WHAL_ERROR_RESET_CHANNF2", + "WHAL_ERROR_RESET_PM", + "WHAL_ERROR_RESET_OFFSETCAL", + "WHAL_ERROR_RESET_RFGRANT", + "WHAL_ERROR_RESET_RXFRAME", + "WHAL_ERROR_RESET_STOPDMA", + "WHAL_ERROR_RESET_ERRID", + "WHAL_ERROR_RESET_ADCDCCAL1", + "WHAL_ERROR_RESET_ADCDCCAL2", + "WHAL_ERROR_RESET_TXIQCAL", + "WHAL_ERROR_RESET_RXIQCAL", + "WHAL_ERROR_RESET_CARRIERLEAK", + "WHAL_ERROR_XMIT_COMPUTE", + "WHAL_ERROR_XMIT_NOQUEUE", + "WHAL_ERROR_XMIT_ACTIVEQUEUE", + "WHAL_ERROR_XMIT_BADTYPE", + "WHAL_ERROR_XMIT_STOPDMA", + "WHAL_ERROR_INTERRUPT_BB_PANIC", + "WHAL_ERROR_PAPRD_MAXGAIN_ABOVE_WINDOW", + "WHAL_ERROR_QCU_HW_PAUSE_MISMATCH", + "WHAL_DBGID_DEFINITION_END", + }, + { + "COEX_DEBUGID_START", + "BTCOEX_DBG_MCI_1", + "BTCOEX_DBG_MCI_2", + "BTCOEX_DBG_MCI_3", + "BTCOEX_DBG_MCI_4", + "BTCOEX_DBG_MCI_5", + "BTCOEX_DBG_MCI_6", + "BTCOEX_DBG_MCI_7", + "BTCOEX_DBG_MCI_8", + "BTCOEX_DBG_MCI_9", + "BTCOEX_DBG_MCI_10", + "COEX_WAL_BTCOEX_INIT", + "COEX_WAL_PAUSE", + "COEX_WAL_RESUME", + "COEX_UPDATE_AFH", + "COEX_HWQ_EMPTY_CB", + "COEX_MCI_TIMER_HANDLER", + "COEX_MCI_RECOVER", + "ERROR_COEX_MCI_ISR", + "ERROR_COEX_MCI_GPM", + "COEX_ProfileType", + "COEX_LinkID", + "COEX_LinkState", + "COEX_LinkRole", + "COEX_LinkRate", + "COEX_VoiceType", + "COEX_TInterval", + "COEX_WRetrx", + "COEX_Attempts", + "COEX_PerformanceState", + "COEX_LinkType", + "COEX_RX_MCI_GPM_VERSION_QUERY", + "COEX_RX_MCI_GPM_VERSION_RESPONSE", + "COEX_RX_MCI_GPM_STATUS_QUERY", + "COEX_STATE_WLAN_VDEV_DOWN", + "COEX_STATE_WLAN_VDEV_START", + "COEX_STATE_WLAN_VDEV_CONNECTED", + "COEX_STATE_WLAN_VDEV_SCAN_STARTED", + "COEX_STATE_WLAN_VDEV_SCAN_END", + "COEX_STATE_WLAN_DEFAULT", + "COEX_CHANNEL_CHANGE", + "COEX_POWER_CHANGE", + "COEX_CONFIG_MGR", + "COEX_TX_MCI_GPM_BT_CAL_REQ", + "COEX_TX_MCI_GPM_BT_CAL_GRANT", + "COEX_TX_MCI_GPM_BT_CAL_DONE", + "COEX_TX_MCI_GPM_WLAN_CAL_REQ", + "COEX_TX_MCI_GPM_WLAN_CAL_GRANT", + "COEX_TX_MCI_GPM_WLAN_CAL_DONE", + "COEX_TX_MCI_GPM_BT_DEBUG", + "COEX_TX_MCI_GPM_VERSION_QUERY", + "COEX_TX_MCI_GPM_VERSION_RESPONSE", + "COEX_TX_MCI_GPM_STATUS_QUERY", + "COEX_TX_MCI_GPM_HALT_BT_GPM", + "COEX_TX_MCI_GPM_WLAN_CHANNELS", + "COEX_TX_MCI_GPM_BT_PROFILE_INFO", + "COEX_TX_MCI_GPM_BT_STATUS_UPDATE", + "COEX_TX_MCI_GPM_BT_UPDATE_FLAGS", + "COEX_TX_MCI_GPM_UNKNOWN", + "COEX_TX_MCI_SYS_WAKING", + "COEX_TX_MCI_LNA_TAKE", + "COEX_TX_MCI_LNA_TRANS", + "COEX_TX_MCI_SYS_SLEEPING", + "COEX_TX_MCI_REQ_WAKE", + "COEX_TX_MCI_REMOTE_RESET", + "COEX_TX_MCI_TYPE_UNKNOWN", + "COEX_WHAL_MCI_RESET", + "COEX_POLL_BT_CAL_DONE_TIMEOUT", + "COEX_WHAL_PAUSE", + "COEX_RX_MCI_GPM_BT_CAL_REQ", + "COEX_RX_MCI_GPM_BT_CAL_DONE", + "COEX_RX_MCI_GPM_BT_CAL_GRANT", + "COEX_WLAN_CAL_START", + "COEX_WLAN_CAL_RESULT", + "COEX_BtMciState", + "COEX_BtCalState", + "COEX_WlanCalState", + "COEX_RxReqWakeCount", + "COEX_RxRemoteResetCount", + "COEX_RESTART_CAL", + "COEX_SENDMSG_QUEUE", + "COEX_RESETSEQ_LNAINFO_TIMEOUT", + "COEX_MCI_ISR_IntRaw", + "COEX_MCI_ISR_Int1Raw", + "COEX_MCI_ISR_RxMsgRaw", + "COEX_WHAL_COEX_RESET", + "COEX_WAL_COEX_INIT", + "COEX_TXRX_CNT_LIMIT_ISR", + "COEX_CH_BUSY", + "COEX_REASSESS_WLAN_STATE", + "COEX_BTCOEX_WLAN_STATE_UPDATE", + "COEX_BT_NUM_OF_PROFILES", + "COEX_BT_NUM_OF_HID_PROFILES", + "COEX_BT_NUM_OF_ACL_PROFILES", + "COEX_BT_NUM_OF_HI_ACL_PROFILES", + "COEX_BT_NUM_OF_VOICE_PROFILES", + "COEX_WLAN_AGGR_LIMIT", + "COEX_BT_LOW_PRIO_BUDGET", + "COEX_BT_HI_PRIO_BUDGET", + "COEX_BT_IDLE_TIME", + "COEX_SET_COEX_WEIGHT", + "COEX_WLAN_WEIGHT_GROUP", + "COEX_BT_WEIGHT_GROUP", + "COEX_BT_INTERVAL_ALLOC", + "COEX_BT_SCHEME", + "COEX_BT_MGR", + "COEX_BT_SM_ERROR", + "COEX_SYSTEM_UPDATE", + "COEX_LOW_PRIO_LIMIT", + "COEX_HI_PRIO_LIMIT", + "COEX_BT_INTERVAL_START", + "COEX_WLAN_INTERVAL_START", + "COEX_NON_LINK_BUDGET", + "COEX_CONTENTION_MSG", + "COEX_SET_NSS", + "COEX_SELF_GEN_MASK", + "COEX_PROFILE_ERROR", + "COEX_WLAN_INIT", + "COEX_BEACON_MISS", + "COEX_BEACON_OK", + "COEX_BTCOEX_SCAN_ACTIVITY", + "COEX_SCAN_ACTIVITY", + "COEX_FORCE_QUIETTIME", + "COEX_BT_MGR_QUIETTIME", + "COEX_BT_INACTIVITY_TRIGGER", + "COEX_BT_INACTIVITY_REPORTED", + "COEX_TX_MCI_GPM_WLAN_PRIO", + "COEX_TX_MCI_GPM_BT_PAUSE_PROFILE", + "COEX_TX_MCI_GPM_WLAN_SET_ACL_INACTIVITY", + "COEX_RX_MCI_GPM_BT_ACL_INACTIVITY_REPORT", + "COEX_GENERIC_ERROR", + "COEX_RX_RATE_THRESHOLD", + "COEX_RSSI", + "COEX_WLAN_VDEV_NOTIF_START", /* 133 */ + "COEX_WLAN_VDEV_NOTIF_UP", /* 134 */ + "COEX_WLAN_VDEV_NOTIF_DOWN", /* 135 */ + "COEX_WLAN_VDEV_NOTIF_STOP", /* 136 */ + "COEX_WLAN_VDEV_NOTIF_ADD_PEER", /* 137 */ + "COEX_WLAN_VDEV_NOTIF_DELETE_PEER", /* 138 */ + "COEX_WLAN_VDEV_NOTIF_CONNECTED_PEER", /* 139 */ + "COEX_WLAN_VDEV_NOTIF_PAUSE", /* 140 */ + "COEX_WLAN_VDEV_NOTIF_UNPAUSED", /* 141 */ + "COEX_STATE_WLAN_VDEV_PEER_ADD", /* 142 */ + "COEX_STATE_WLAN_VDEV_CONNECTED_PEER", /* 143 */ + "COEX_STATE_WLAN_VDEV_DELETE_PEER", /* 144 */ + "COEX_STATE_WLAN_VDEV_PAUSE", /* 145 */ + "COEX_STATE_WLAN_VDEV_UNPAUSED", /* 146 */ + "COEX_SCAN_CALLBACK", /* 147 */ + "COEX_RC_SET_CHAINMASK", /* 148 */ + "COEX_TX_MCI_GPM_WLAN_SET_BT_RXSS_THRES", /* 149 */ + "COEX_TX_MCI_GPM_BT_RXSS_THRES_QUERY", /* 150 */ + "COEX_BT_RXSS_THRES", /* 151 */ + "COEX_BT_PROFILE_ADD_RMV", /* 152 */ + "COEX_BT_SCHED_INFO", /* 153 */ + "COEX_TRF_MGMT", /* 154 */ + "COEX_SCHED_START", /* 155 */ + "COEX_SCHED_RESULT", /* 156 */ + "COEX_SCHED_ERROR", /* 157 */ + "COEX_SCHED_PRE_OP", /* 158 */ + "COEX_SCHED_POST_OP", /* 159 */ + "COEX_RX_RATE", /* 160 */ + "COEX_ACK_PRIORITY", /* 161 */ + "COEX_STATE_WLAN_VDEV_UP", /* 162 */ + "COEX_STATE_WLAN_VDEV_PEER_UPDATE", /* 163 */ + "COEX_STATE_WLAN_VDEV_STOP", /* 164 */ + "COEX_WLAN_PAUSE_PEER", /* 165 */ + "COEX_WLAN_UNPAUSE_PEER", /* 166 */ + "COEX_WLAN_PAUSE_INTERVAL_START", /* 167 */ + "COEX_WLAN_POSTPAUSE_INTERVAL_START", /* 168 */ + "COEX_TRF_FREERUN", /* 169 */ + "COEX_TRF_SHAPE_PM", /* 170 */ + "COEX_TRF_SHAPE_PSP", /* 171 */ + "COEX_TRF_SHAPE_S_CTS", /* 172 */ + "COEX_CHAIN_CONFIG", /* 173 */ + "COEX_SYSTEM_MONITOR", /* 174 */ + "COEX_SINGLECHAIN_INIT", /* 175 */ + "COEX_MULTICHAIN_INIT", /* 176 */ + "COEX_SINGLECHAIN_DBG_1", /* 177 */ + "COEX_SINGLECHAIN_DBG_2", /* 178 */ + "COEX_SINGLECHAIN_DBG_3", /* 179 */ + "COEX_MULTICHAIN_DBG_1", /* 180 */ + "COEX_MULTICHAIN_DBG_2", /* 181 */ + "COEX_MULTICHAIN_DBG_3", /* 182 */ + "COEX_PSP_TX_CB", /* 183 */ + "COEX_PSP_RX_CB", /* 184 */ + "COEX_PSP_STAT_1", /* 185 */ + "COEX_PSP_SPEC_POLL", /* 186 */ + "COEX_PSP_READY_STATE", /* 187 */ + "COEX_PSP_TX_STATUS_STATE", /* 188 */ + "COEX_PSP_RX_STATUS_STATE_1", /* 189 */ + "COEX_PSP_NOT_READY_STATE", /* 190 */ + "COEX_PSP_DISABLED_STATE", /* 191 */ + "COEX_PSP_ENABLED_STATE", /* 192 */ + "COEX_PSP_SEND_PSPOLL", /* 193 */ + "COEX_PSP_MGR_ENTER", /* 194 */ + "COEX_PSP_MGR_RESULT", /* 195 */ + "COEX_PSP_NONWLAN_INTERVAL", /* 196 */ + "COEX_PSP_STAT_2", /* 197 */ + "COEX_PSP_RX_STATUS_STATE_2", /* 198 */ + "COEX_PSP_ERROR", /* 199 */ + "COEX_T2BT", /* 200 */ + "COEX_BT_DURATION", /* 201 */ + "COEX_TX_MCI_GPM_WLAN_SCHED_INFO_TRIG", /* 202 */ + "COEX_TX_MCI_GPM_WLAN_SCHED_INFO_TRIG_RSP", /* 203 */ + "COEX_TX_MCI_GPM_SCAN_OP", /* 204 */ + "COEX_TX_MCI_GPM_BT_PAUSE_GPM_TX", /* 205 */ + "COEX_CTS2S_SEND", /* 206 */ + "COEX_CTS2S_RESULT", /* 207 */ + "COEX_ENTER_OCS", /* 208 */ + "COEX_EXIT_OCS", /* 209 */ + "COEX_UPDATE_OCS", /* 210 */ + "COEX_STATUS_OCS", /* 211 */ + "COEX_STATS_BT", /* 212 */ + "COEX_MWS_WLAN_INIT", + "COEX_MWS_WBTMR_SYNC", + "COEX_MWS_TYPE2_RX", + "COEX_MWS_TYPE2_TX", + "COEX_MWS_WLAN_CHAVD", + "COEX_MWS_WLAN_CHAVD_INSERT", + "COEX_MWS_WLAN_CHAVD_MERGE", + "COEX_MWS_WLAN_CHAVD_RPT", + "COEX_MWS_CP_MSG_SEND", + "COEX_MWS_CP_ESCAPE", + "COEX_MWS_CP_UNFRAME", + "COEX_MWS_CP_SYNC_UPDATE", + "COEX_MWS_CP_SYNC", + "COEX_MWS_CP_WLAN_STATE_IND", + "COEX_MWS_CP_SYNCRESP_TIMEOUT", + "COEX_MWS_SCHEME_UPDATE", + "COEX_MWS_WLAN_EVENT", + "COEX_MWS_UART_UNESCAPE", + "COEX_MWS_UART_ENCODE_SEND", + "COEX_MWS_UART_RECV_DECODE", + "COEX_MWS_UL_HDL", + "COEX_MWS_REMOTE_EVENT", + "COEX_MWS_OTHER", + "COEX_MWS_ERROR", + "COEX_MWS_ANT_DIVERSITY", /* 237 */ + "COEX_P2P_GO", + "COEX_P2P_CLIENT", + "COEX_SCC_1", + "COEX_SCC_2", + "COEX_MCC_1", + "COEX_MCC_2", + "COEX_TRF_SHAPE_NOA", + "COEX_NOA_ONESHOT", + "COEX_NOA_PERIODIC", + "COEX_LE_1", + "COEX_LE_2", + "COEX_ANT_1", + "COEX_ANT_2", + "COEX_ENTER_NOA", + "COEX_EXIT_NOA", + "COEX_BT_SCAN_PROTECT", /* 253 */ + "COEX_DEBUG_ID_END" /* 254 */ + }, + { + "ROAM_DBGID_DEFINITION_START", + "ROAM_MODULE_INIT", + "ROAM_DEV_START", + "ROAM_CONFIG_RSSI_THRESH", + "ROAM_CONFIG_SCAN_PERIOD", + "ROAM_CONFIG_AP_PROFILE", + "ROAM_CONFIG_CHAN_LIST", + "ROAM_CONFIG_SCAN_PARAMS", + "ROAM_CONFIG_RSSI_CHANGE", + "ROAM_SCAN_TIMER_START", + "ROAM_SCAN_TIMER_EXPIRE", + "ROAM_SCAN_TIMER_STOP", + "ROAM_SCAN_STARTED", + "ROAM_SCAN_COMPLETE", + "ROAM_SCAN_CANCELLED", + "ROAM_CANDIDATE_FOUND", + "ROAM_RSSI_ACTIVE_SCAN", + "ROAM_RSSI_ACTIVE_ROAM", + "ROAM_RSSI_GOOD", + "ROAM_BMISS_FIRST_RECV", + "ROAM_DEV_STOP", + "ROAM_FW_OFFLOAD_ENABLE", + "ROAM_CANDIDATE_SSID_MATCH", + "ROAM_CANDIDATE_SECURITY_MATCH", + "ROAM_LOW_RSSI_INTERRUPT", + "ROAM_HIGH_RSSI_INTERRUPT", + "ROAM_SCAN_REQUESTED", + "ROAM_BETTER_CANDIDATE_FOUND", + "ROAM_BETTER_AP_EVENT", + "ROAM_CANCEL_LOW_PRIO_SCAN", + "ROAM_FINAL_BMISS_RECVD", + "ROAM_CONFIG_SCAN_MODE", + "ROAM_BMISS_FINAL_SCAN_ENABLE", + "ROAM_SUITABLE_AP_EVENT", + "ROAM_RSN_IE_PARSE_ERROR", + "ROAM_WPA_IE_PARSE_ERROR", + "ROAM_SCAN_CMD_FROM_HOST", + "ROAM_HO_SORT_CANDIDATE", + "ROAM_HO_SAVE_CANDIDATE", + "ROAM_HO_GET_CANDIDATE", + "ROAM_HO_OFFLOAD_SET_PARAM", + "ROAM_HO_SM", + "ROAM_HO_HTT_SAVED", + "ROAM_HO_SYNC_START", + "ROAM_HO_START", + "ROAM_HO_COMPLETE", + "ROAM_HO_STOP", + "ROAM_HO_HTT_FORWARD", + "ROAM_DBGID_DEFINITION_END" + }, + { + "RESMGR_CHMGR_DEFINITION_START", + "RESMGR_CHMGR_PAUSE_COMPLETE", + "RESMGR_CHMGR_CHANNEL_CHANGE", + "RESMGR_CHMGR_RESUME_COMPLETE", + "RESMGR_CHMGR_VDEV_PAUSE", + "RESMGR_CHMGR_VDEV_UNPAUSE", + "RESMGR_CHMGR_CTS2S_TX_COMP", + "RESMGR_CHMGR_CFEND_TX_COMP", + "RESMGR_CHMGR_DEFINITION_END" + }, + { + "RESMGR_DEFINITION_START", + "RESMGR_OCS_ALLOCRAM_SIZE", + "RESMGR_OCS_RESOURCES", + "RESMGR_LINK_CREATE", + "RESMGR_LINK_DELETE", + "RESMGR_OCS_CHREQ_CREATE", + "RESMGR_OCS_CHREQ_DELETE", + "RESMGR_OCS_CHREQ_START", + "RESMGR_OCS_CHREQ_STOP", + "RESMGR_OCS_SCHEDULER_INVOKED", + "RESMGR_OCS_CHREQ_GRANT", + "RESMGR_OCS_CHREQ_COMPLETE", + "RESMGR_OCS_NEXT_TSFTIME", + "RESMGR_OCS_TSF_TIMEOUT_US", + "RESMGR_OCS_CURR_CAT_WINDOW", + "RESMGR_OCS_CURR_CAT_WINDOW_REQ", + "RESMGR_OCS_CURR_CAT_WINDOW_TIMESLOT", + "RESMGR_OCS_CHREQ_RESTART", + "RESMGR_OCS_CLEANUP_CH_ALLOCATORS", + "RESMGR_OCS_PURGE_CHREQ", + "RESMGR_OCS_CH_ALLOCATOR_FREE", + "RESMGR_OCS_RECOMPUTE_SCHEDULE", + "RESMGR_OCS_NEW_CAT_WINDOW_REQ", + "RESMGR_OCS_NEW_CAT_WINDOW_TIMESLOT", + "RESMGR_OCS_CUR_CH_ALLOC", + "RESMGR_OCS_WIN_CH_ALLOC", + "RESMGR_OCS_SCHED_CH_CHANGE", + "RESMGR_OCS_CONSTRUCT_CAT_WIN", + "RESMGR_OCS_CHREQ_PREEMPTED", + "RESMGR_OCS_CH_SWITCH_REQ", + "RESMGR_OCS_CHANNEL_SWITCHED", + "RESMGR_OCS_CLEANUP_STALE_REQS", + "RESMGR_OCS_CHREQ_UPDATE", + "RESMGR_OCS_REG_NOA_NOTIF", + "RESMGR_OCS_DEREG_NOA_NOTIF", + "RESMGR_OCS_GEN_PERIODIC_NOA", + "RESMGR_OCS_RECAL_QUOTAS", + "RESMGR_OCS_GRANTED_QUOTA_STATS", + "RESMGR_OCS_ALLOCATED_QUOTA_STATS", + "RESMGR_OCS_REQ_QUOTA_STATS", + "RESMGR_OCS_TRACKING_TIME_FIRED", + "RESMGR_VC_ARBITRATE_ATTRIBUTES", + "RESMGR_OCS_LATENCY_STRICT_TIME_SLOT", + "RESMGR_OCS_CURR_TSF", + "RESMGR_OCS_QUOTA_REM", + "RESMGR_OCS_LATENCY_CASE_NO", + "RESMGR_OCS_WIN_CAT_DUR", + "RESMGR_VC_UPDATE_CUR_VC", + "RESMGR_VC_REG_UNREG_LINK", + "RESMGR_VC_PRINT_LINK", + "RESMGR_OCS_MISS_TOLERANCE", + "RESMGR_DYN_SCH_ALLOCRAM_SIZE", + "RESMGR_DYN_SCH_ENABLE", + "RESMGR_DYN_SCH_ACTIVE", + "RESMGR_DYN_SCH_CH_STATS_START", + "RESMGR_DYN_SCH_CH_SX_STATS", + "RESMGR_DYN_SCH_TOT_UTIL_PER", + "RESMGR_DYN_SCH_HOME_CH_QUOTA", + "RESMGR_OCS_REG_RECAL_QUOTA_NOTIF", + "RESMGR_OCS_DEREG_RECAL_QUOTA_NOTIF", + "RESMGR_DEFINITION_END" + }, + { + "VDEV_MGR_DEBID_DEFINITION_START", /* vdev Mgr */ + "VDEV_MGR_FIRST_BEACON_MISS_DETECTED", + "VDEV_MGR_FINAL_BEACON_MISS_DETECTED", + "VDEV_MGR_BEACON_IN_SYNC", + "VDEV_MGR_AP_KEEPALIVE_IDLE", + "VDEV_MGR_AP_KEEPALIVE_INACTIVE", + "VDEV_MGR_AP_KEEPALIVE_UNRESPONSIVE", + "VDEV_MGR_AP_TBTT_CONFIG", + "VDEV_MGR_FIRST_BCN_RECEIVED", + "VDEV_MGR_VDEV_START", + "VDEV_MGR_VDEV_UP", + "VDEV_MGR_PEER_AUTHORIZED", + "VDEV_MGR_OCS_HP_LP_REQ_POSTED", + "VDEV_MGR_VDEV_START_OCS_HP_REQ_COMPLETE", + "VDEV_MGR_VDEV_START_OCS_HP_REQ_STOP", + "VDEV_MGR_HP_START_TIME", + "VDEV_MGR_VDEV_PAUSE_DELAY_UPDATE", + "VDEV_MGR_VDEV_PAUSE_FAIL", + "VDEV_MGR_GEN_PERIODIC_NOA", + "VDEV_MGR_OFF_CHAN_GO_CH_REQ_SETUP", + "VDEV_MGR_DEFINITION_END", + }, + { + "SCAN_START_COMMAND_FAILED", /* scan */ + "SCAN_STOP_COMMAND_FAILED", + "SCAN_EVENT_SEND_FAILED", + "SCAN_ENGINE_START", + "SCAN_ENGINE_CANCEL_COMMAND", + "SCAN_ENGINE_STOP_DUE_TO_TIMEOUT", + "SCAN_EVENT_SEND_TO_HOST", + "SCAN_FWLOG_EVENT_ADD", + "SCAN_FWLOG_EVENT_REM", + "SCAN_FWLOG_EVENT_PREEMPTED", + "SCAN_FWLOG_EVENT_RESTARTED", + "SCAN_FWLOG_EVENT_COMPLETED", + }, + { + "RATECTRL_DBGID_DEFINITION_START", /* Rate ctrl */ + "RATECTRL_DBGID_ASSOC", + "RATECTRL_DBGID_NSS_CHANGE", + "RATECTRL_DBGID_CHAINMASK_ERR", + "RATECTRL_DBGID_UNEXPECTED_FRAME", + "RATECTRL_DBGID_WAL_RCQUERY", + "RATECTRL_DBGID_WAL_RCUPDATE", + "RATECTRL_DBGID_GTX_UPDATE", + "RATECTRL_DBGID_DEFINITION_END" + }, + { + "AP_PS_DBGID_DEFINITION_START", + "AP_PS_DBGID_UPDATE_TIM", + "AP_PS_DBGID_PEER_STATE_CHANGE", + "AP_PS_DBGID_PSPOLL", + "AP_PS_DBGID_PEER_CREATE", + "AP_PS_DBGID_PEER_DELETE", + "AP_PS_DBGID_VDEV_CREATE", + "AP_PS_DBGID_VDEV_DELETE", + "AP_PS_DBGID_SYNC_TIM", + "AP_PS_DBGID_NEXT_RESPONSE", + "AP_PS_DBGID_START_SP", + "AP_PS_DBGID_COMPLETED_EOSP", + "AP_PS_DBGID_TRIGGER", + "AP_PS_DBGID_DUPLICATE_TRIGGER", + "AP_PS_DBGID_UAPSD_RESPONSE", + "AP_PS_DBGID_SEND_COMPLETE", + "AP_PS_DBGID_SEND_N_COMPLETE", + "AP_PS_DBGID_DETECT_OUT_OF_SYNC_STA", + "AP_PS_DBGID_DELIVER_CAB", + }, + { + "" /* Block Ack */ + }, + /* Mgmt TxRx */ + { + "MGMT_TXRX_DBGID_DEFINITION_START", + "MGMT_TXRX_FORWARD_TO_HOST", + "MGMT_TXRX_DBGID_DEFINITION_END", + }, + { /* Data TxRx */ + "DATA_TXRX_DBGID_DEFINITION_START", + "DATA_TXRX_DBGID_RX_DATA_SEQ_LEN_INFO", + "DATA_TXRX_DBGID_DEFINITION_END", + }, + {"" /* HTT */ + }, + {"" /* HOST */ + }, + {"" /* BEACON */ + "BEACON_EVENT_SWBA_SEND_FAILED", + "BEACON_EVENT_EARLY_RX_BMISS_STATUS", + "BEACON_EVENT_EARLY_RX_SLEEP_SLOP", + "BEACON_EVENT_EARLY_RX_CONT_BMISS_TIMEOUT", + "BEACON_EVENT_EARLY_RX_PAUSE_SKIP_BCN_NUM", + "BEACON_EVENT_EARLY_RX_CLK_DRIFT", + "BEACON_EVENT_EARLY_RX_AP_DRIFT", + "BEACON_EVENT_EARLY_RX_BCN_TYPE",}, + { /* Offload Mgr */ + "OFFLOAD_MGR_DBGID_DEFINITION_START", + "OFFLOADMGR_REGISTER_OFFLOAD", + "OFFLOADMGR_DEREGISTER_OFFLOAD", + "OFFLOADMGR_NO_REG_DATA_HANDLERS", + "OFFLOADMGR_NO_REG_EVENT_HANDLERS", + "OFFLOADMGR_REG_OFFLOAD_FAILED", + "OFFLOADMGR_DBGID_DEFINITION_END", + }, + { + "WAL_DBGID_DEFINITION_START", + "WAL_DBGID_FAST_WAKE_REQUEST", + "WAL_DBGID_FAST_WAKE_RELEASE", + "WAL_DBGID_SET_POWER_STATE", + "WAL_DBGID_MISSING", + "WAL_DBGID_CHANNEL_CHANGE_FORCE_RESET", + "WAL_DBGID_CHANNEL_CHANGE", + "WAL_DBGID_VDEV_START", + "WAL_DBGID_VDEV_STOP", + "WAL_DBGID_VDEV_UP", + "WAL_DBGID_VDEV_DOWN", + "WAL_DBGID_SW_WDOG_RESET", + "WAL_DBGID_TX_SCH_REGISTER_TIDQ", + "WAL_DBGID_TX_SCH_UNREGISTER_TIDQ", + "WAL_DBGID_TX_SCH_TICKLE_TIDQ", + "WAL_DBGID_XCESS_FAILURES", + "WAL_DBGID_AST_ADD_WDS_ENTRY", + "WAL_DBGID_AST_DEL_WDS_ENTRY", + "WAL_DBGID_AST_WDS_ENTRY_PEER_CHG", + "WAL_DBGID_AST_WDS_SRC_LEARN_FAIL", + "WAL_DBGID_STA_KICKOUT", + "WAL_DBGID_BAR_TX_FAIL", + "WAL_DBGID_BAR_ALLOC_FAIL", + "WAL_DBGID_LOCAL_DATA_TX_FAIL", + "WAL_DBGID_SECURITY_PM4_QUEUED", + "WAL_DBGID_SECURITY_GM1_QUEUED", + "WAL_DBGID_SECURITY_PM4_SENT", + "WAL_DBGID_SECURITY_ALLOW_DATA", + "WAL_DBGID_SECURITY_UCAST_KEY_SET", + "WAL_DBGID_SECURITY_MCAST_KEY_SET", + "WAL_DBGID_SECURITY_ENCR_EN", + "WAL_DBGID_BB_WDOG_TRIGGERED", + "WAL_DBGID_RX_LOCAL_BUFS_LWM", + "WAL_DBGID_RX_LOCAL_DROP_LARGE_MGMT", + "WAL_DBGID_VHT_ILLEGAL_RATE_PHY_ERR_DETECTED", + "WAL_DBGID_DEV_RESET", + "WAL_DBGID_TX_BA_SETUP", + "WAL_DBGID_RX_BA_SETUP", + "WAL_DBGID_DEV_TX_TIMEOUT", + "WAL_DBGID_DEV_RX_TIMEOUT", + "WAL_DBGID_STA_VDEV_XRETRY", + "WAL_DBGID_DCS", + "WAL_DBGID_MGMT_TX_FAIL", + "WAL_DBGID_SET_M4_SENT_MANUALLY", + "WAL_DBGID_PROCESS_4_WAY_HANDSHAKE", + "WAL_DBGID_WAL_CHANNEL_CHANGE_START", + "WAL_DBGID_WAL_CHANNEL_CHANGE_COMPLETE", + "WAL_DBGID_WHAL_CHANNEL_CHANGE_START", + "WAL_DBGID_WHAL_CHANNEL_CHANGE_COMPLETE", + "WAL_DBGID_TX_MGMT_DESCID_SEQ_TYPE_LEN", + "WAL_DBGID_TX_DATA_MSDUID_SEQ_TYPE_LEN", + "WAL_DBGID_TX_DISCARD", + "WAL_DBGID_TX_MGMT_COMP_DESCID_STATUS", + "WAL_DBGID_TX_DATA_COMP_MSDUID_STATUS", + "WAL_DBGID_RESET_PCU_CYCLE_CNT", + "WAL_DBGID_SETUP_RSSI_INTERRUPTS", + "WAL_DBGID_BRSSI_CONFIG", + "WAL_DBGID_CURRENT_BRSSI_AVE", + "WAL_DBGID_BCN_TX_COMP", + "WAL_DBGID_SET_HW_CHAINMASK", + "WAL_DBGID_SET_HW_CHAINMASK_TXRX_STOP_FAIL", + "WAL_DBGID_GET_HW_CHAINMASK", + "WAL_DBGID_SMPS_DISABLE", + "WAL_DBGID_SMPS_ENABLE_HW_CNTRL", + "WAL_DBGID_SMPS_SWSEL_CHAINMASK", + "WAL_DBGID_DEFINITION_END", + }, + { + "" /* DE */ + }, + { + "" /* pcie lp */ + }, + { + /* RTT */ + "RTT_CALL_FLOW", + "RTT_REQ_SUB_TYPE", + "RTT_MEAS_REQ_HEAD", + "RTT_MEAS_REQ_BODY", + "", + "", + "RTT_INIT_GLOBAL_STATE", + "", + "RTT_REPORT", + "", + "RTT_ERROR_REPORT", + "RTT_TIMER_STOP", + "RTT_SEND_TM_FRAME", + "RTT_V3_RESP_CNT", + "RTT_V3_RESP_FINISH", + "RTT_CHANNEL_SWITCH_REQ", + "RTT_CHANNEL_SWITCH_GRANT", + "RTT_CHANNEL_SWITCH_COMPLETE", + "RTT_CHANNEL_SWITCH_PREEMPT", + "RTT_CHANNEL_SWITCH_STOP", + "RTT_TIMER_START", + }, + { /* RESOURCE */ + "RESOURCE_DBGID_DEFINITION_START", + "RESOURCE_PEER_ALLOC", + "RESOURCE_PEER_FREE", + "RESOURCE_PEER_ALLOC_WAL_PEER", + "RESOURCE_PEER_NBRHOOD_MGMT_ALLOC", + "RESOURCE_PEER_NBRHOOD_MGMT_INFO,RESOURCE_DBGID_DEFINITION_END", + }, + { /* DCS */ + "WLAN_DCS_DBGID_INIT", + "WLAN_DCS_DBGID_WMI_CWINT", + "WLAN_DCS_DBGID_TIMER", + "WLAN_DCS_DBGID_CMDG", + "WLAN_DCS_DBGID_CMDS", + "WLAN_DCS_DBGID_DINIT" + }, + { /* CACHEMGR */ + "" + }, + { /* ANI */ + "ANI_DBGID_POLL", + "ANI_DBGID_CONTROL", + "ANI_DBGID_OFDM_PARAMS", + "ANI_DBGID_CCK_PARAMS", + "ANI_DBGID_RESET", + "ANI_DBGID_RESTART", + "ANI_DBGID_OFDM_LEVEL", + "ANI_DBGID_CCK_LEVEL", + "ANI_DBGID_FIRSTEP", + "ANI_DBGID_CYCPWR", + "ANI_DBGID_MRC_CCK", + "ANI_DBGID_SELF_CORR_LOW", + "ANI_DBGID_ENABLE", + "ANI_DBGID_CURRENT_LEVEL", + "ANI_DBGID_POLL_PERIOD", + "ANI_DBGID_LISTEN_PERIOD", + "ANI_DBGID_OFDM_LEVEL_CFG", + "ANI_DBGID_CCK_LEVEL_CFG" + }, + { + "P2P_DBGID_DEFINITION_START", + "P2P_DEV_REGISTER", + "P2P_HANDLE_NOA", + "P2P_UPDATE_SCHEDULE_OPPS", + "P2P_UPDATE_SCHEDULE", + "P2P_UPDATE_START_TIME", + "P2P_UPDATE_START_TIME_DIFF_TSF32", + "P2P_UPDATE_START_TIME_FINAL", + "P2P_SETUP_SCHEDULE_TIMER", + "P2P_PROCESS_SCHEDULE_AFTER_CALC", + "P2P_PROCESS_SCHEDULE_STARTED_TIMER", + "P2P_CALC_SCHEDULES_FIRST_CALL_ALL_NEXT_EVENT", + "P2P_CALC_SCHEDULES_FIRST_VALUE", + "P2P_CALC_SCHEDULES_EARLIEST_NEXT_EVENT", + "P2P_CALC_SCHEDULES_SANITY_COUNT", + "P2P_CALC_SCHEDULES_CALL_ALL_NEXT_EVENT_FROM_WHILE_LOOP", + "P2P_CALC_SCHEDULES_TIMEOUT_1", + "P2P_CALC_SCHEDULES_TIMEOUT_2", + "P2P_FIND_ALL_NEXT_EVENTS_REQ_EXPIRED", + "P2P_FIND_ALL_NEXT_EVENTS_REQ_ACTIVE", + "P2P_FIND_NEXT_EVENT_REQ_NOT_STARTED", + "P2P_FIND_NEXT_EVENT_REQ_COMPLETE_NON_PERIODIC", + "P2P_FIND_NEXT_EVENT_IN_MID_OF_NOA", + "P2P_FIND_NEXT_EVENT_REQ_COMPLETE", + "P2P_SCHEDULE_TIMEOUT", + "P2P_CALC_SCHEDULES_ENTER", + "P2P_PROCESS_SCHEDULE_ENTER", + "P2P_FIND_ALL_NEXT_EVENTS_INDIVIDUAL_REQ_AFTER_CHANGE", + "P2P_FIND_ALL_NEXT_EVENTS_INDIVIDUAL_REQ_BEFORE_CHANGE", + "P2P_FIND_ALL_NEXT_EVENTS_ENTER", + "P2P_FIND_NEXT_EVENT_ENTER", + "P2P_NOA_GO_PRESENT", + "P2P_NOA_GO_ABSENT", + "P2P_GO_NOA_NOTIF", + "P2P_GO_TBTT_OFFSET", + "P2P_GO_GET_NOA_INFO", + "P2P_GO_ADD_ONE_SHOT_NOA", + "P2P_GO_GET_NOA_IE", + "P2P_GO_BCN_TX_COMP", + "P2P_DBGID_DEFINITION_END", + }, + { + "CSA_DBGID_DEFINITION_START", + "CSA_OFFLOAD_POOL_INIT", + "CSA_OFFLOAD_REGISTER_VDEV", + "CSA_OFFLOAD_DEREGISTER_VDEV", + "CSA_DEREGISTER_VDEV_ERROR", + "CSA_OFFLOAD_BEACON_RECEIVED", + "CSA_OFFLOAD_BEACON_CSA_RECV", + "CSA_OFFLOAD_CSA_RECV_ERROR_IE", + "CSA_OFFLOAD_CSA_TIMER_ERROR", + "CSA_OFFLOAD_CSA_TIMER_EXP", + "CSA_OFFLOAD_WMI_EVENT_ERROR", + "CSA_OFFLOAD_WMI_EVENT_SENT", + "CSA_OFFLOAD_WMI_CHANSWITCH_RECV", + "CSA_DBGID_DEFINITION_END", + }, + { /* NLO offload */ + "" + }, + { + "WLAN_CHATTER_DBGID_DEFINITION_START", + "WLAN_CHATTER_ENTER", + "WLAN_CHATTER_EXIT", + "WLAN_CHATTER_FILTER_HIT", + "WLAN_CHATTER_FILTER_MISS", + "WLAN_CHATTER_FILTER_FULL", + "WLAN_CHATTER_FILTER_TM_ADJ", + "WLAN_CHATTER_BUFFER_FULL", + "WLAN_CHATTER_TIMEOUT", + "WLAN_CHATTER_DBGID_DEFINITION_END", + }, + { + "WOW_DBGID_DEFINITION_START", + "WOW_ENABLE_CMDID", + "WOW_RECV_DATA_PKT", + "WOW_WAKE_HOST_DATA", + "WOW_RECV_MGMT", + "WOW_WAKE_HOST_MGMT", + "WOW_RECV_EVENT", + "WOW_WAKE_HOST_EVENT", + "WOW_INIT", + "WOW_RECV_MAGIC_PKT", + "WOW_RECV_BITMAP_PATTERN", + "WOW_AP_VDEV_DISALLOW", + "WOW_STA_VDEV_DISALLOW", + "WOW_P2PGO_VDEV_DISALLOW", + "WOW_NS_OFLD_ENABLE", + "WOW_ARP_OFLD_ENABLE", + "WOW_NS_ARP_OFLD_DISABLE", + "WOW_NS_RECEIVED", + "WOW_NS_REPLIED", + "WOW_ARP_RECEIVED", + "WOW_ARP_REPLIED", + "WOW_DBGID_DEFINITION_END", + }, + { /* WAL VDEV */ + "" + }, + { /* WAL PDEV */ + "" + }, + { /* TEST */ + "TP_CHANGE_CHANNEL", + "TP_LOCAL_SEND", + }, + { /* STA SMPS */ + "STA_SMPS_DBGID_DEFINITION_START", + "STA_SMPS_DBGID_CREATE_PDEV_INSTANCE", + "STA_SMPS_DBGID_CREATE_VIRTUAL_CHAN_INSTANCE", + "STA_SMPS_DBGID_DELETE_VIRTUAL_CHAN_INSTANCE", + "STA_SMPS_DBGID_CREATE_STA_INSTANCE", + "STA_SMPS_DBGID_DELETE_STA_INSTANCE", + "STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_START", + "STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_STOP", + "STA_SMPS_DBGID_SEND_SMPS_ACTION_FRAME", + "STA_SMPS_DBGID_HOST_FORCED_MODE", + "STA_SMPS_DBGID_FW_FORCED_MODE", + "STA_SMPS_DBGID_RSSI_THRESHOLD_CROSSED", + "STA_SMPS_DBGID_SMPS_ACTION_FRAME_COMPLETION", + "STA_SMPS_DBGID_DTIM_EBT_EVENT_CHMASK_UPDATE", + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE", + "STA_SMPS_DBGID_DTIM_BEACON_EVENT_CHMASK_UPDATE", + "STA_SMPS_DBGID_DTIM_POWER_STATE_CHANGE", + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_SLEEP", + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_AWAKE", + "SMPS_DBGID_DEFINITION_END", + }, + { /* SWBMISS */ + "SWBMISS_DBGID_DEFINITION_START", + "SWBMISS_ENABLED", + "SWBMISS_DISABLED", + "SWBMISS_DBGID_DEFINITION_END", + }, + { /* WMMAC */ + "" + }, + { /* TDLS */ + "TDLS_DBGID_DEFINITION_START", + "TDLS_DBGID_VDEV_CREATE", + "TDLS_DBGID_VDEV_DELETE", + "TDLS_DBGID_ENABLED_PASSIVE", + "TDLS_DBGID_ENABLED_ACTIVE", + "TDLS_DBGID_DISABLED", + "TDLS_DBGID_CONNTRACK_TIMER", + "TDLS_DBGID_WAL_SET", + "TDLS_DBGID_WAL_GET", + "TDLS_DBGID_WAL_PEER_UPDATE_SET", + "TDLS_DBGID_WAL_PEER_UPDATE_EVT", + "TDLS_DBGID_WAL_VDEV_CREATE", + "TDLS_DBGID_WAL_VDEV_DELETE", + "TDLS_DBGID_WLAN_EVENT", + "TDLS_DBGID_WLAN_PEER_UPDATE_SET", + "TDLS_DBGID_PEER_EVT_DRP_THRESH", + "TDLS_DBGID_PEER_EVT_DRP_RATE", + "TDLS_DBGID_PEER_EVT_DRP_RSSI", + "TDLS_DBGID_PEER_EVT_DISCOVER", + "TDLS_DBGID_PEER_EVT_DELETE", + "TDLS_DBGID_PEER_CAP_UPDATE", + "TDLS_DBGID_UAPSD_SEND_PTI_FRAME", + "TDLS_DBGID_UAPSD_SEND_PTI_FRAME2PEER", + "TDLS_DBGID_UAPSD_START_PTR_TIMER", + "TDLS_DBGID_UAPSD_CANCEL_PTR_TIMER", + "TDLS_DBGID_UAPSD_PTR_TIMER_TIMEOUT", + "TDLS_DBGID_UAPSD_STA_PS_EVENT_HANDLER", + "TDLS_DBGID_UAPSD_PEER_EVENT_HANDLER", + "TDLS_DBGID_UAPSD_PS_DEFAULT_SETTINGS", + "TDLS_DBGID_UAPSD_GENERIC", + }, + { /* HB */ + "WLAN_HB_DBGID_DEFINITION_START", + "WLAN_HB_DBGID_INIT", + "WLAN_HB_DBGID_TCP_GET_TXBUF_FAIL", + "WLAN_HB_DBGID_TCP_SEND_FAIL", + "WLAN_HB_DBGID_BSS_PEER_NULL", + "WLAN_HB_DBGID_UDP_GET_TXBUF_FAIL", + "WLAN_HB_DBGID_UDP_SEND_FAIL", + "WLAN_HB_DBGID_WMI_CMD_INVALID_PARAM", + "WLAN_HB_DBGID_WMI_CMD_INVALID_OP", + "WLAN_HB_DBGID_WOW_NOT_ENTERED", + "WLAN_HB_DBGID_ALLOC_SESS_FAIL", + "WLAN_HB_DBGID_CTX_NULL", + "WLAN_HB_DBGID_CHKSUM_ERR", + "WLAN_HB_DBGID_UDP_TX", + "WLAN_HB_DBGID_TCP_TX", + "WLAN_HB_DBGID_DEFINITION_END", + }, + { /* TXBF */ + "TXBFEE_DBGID_START", + "TXBFEE_DBGID_NDPA_RECEIVED", + "TXBFEE_DBGID_HOST_CONFIG_TXBFEE_TYPE", + "TXBFER_DBGID_SEND_NDPA", + "TXBFER_DBGID_GET_NDPA_BUF_FAIL", + "TXBFER_DBGID_SEND_NDPA_FAIL", + "TXBFER_DBGID_GET_NDP_BUF_FAIL", + "TXBFER_DBGID_SEND_NDP_FAIL", + "TXBFER_DBGID_GET_BRPOLL_BUF_FAIL", + "TXBFER_DBGID_SEND_BRPOLL_FAIL", + "TXBFER_DBGID_HOST_CONFIG_CMDID", + "TXBFEE_DBGID_HOST_CONFIG_CMDID", + "TXBFEE_DBGID_ENABLED_ENABLED_UPLOAD_H", + "TXBFEE_DBGID_UPLOADH_CV_TAG", + "TXBFEE_DBGID_UPLOADH_H_TAG", + "TXBFEE_DBGID_CAPTUREH_RECEIVED", + "TXBFEE_DBGID_PACKET_IS_STEERED", + "TXBFEE_UPLOADH_EVENT_ALLOC_MEM_FAIL", + "TXBFEE_DBGID_END", + }, + { /*BATCH SCAN */ + }, + { /*THERMAL MGR */ + "THERMAL_MGR_DBGID_DEFINITION_START", + "THERMAL_MGR_NEW_THRESH", + "THERMAL_MGR_THRESH_CROSSED", + "THERMAL_MGR_DBGID_DEFINITION END", + }, + { /* WLAN_MODULE_PHYERR_DFS */ + "" + }, + { + /* WLAN_MODULE_RMC */ + "RMC_DBGID_DEFINITION_START", + "RMC_CREATE_INSTANCE", + "RMC_DELETE_INSTANCE", + "RMC_LDR_SEL", + "RMC_NO_LDR", + "RMC_LDR_NOT_SEL", + "RMC_LDR_INF_SENT", + "RMC_PEER_ADD", + "RMC_PEER_DELETE", + "RMC_PEER_UNKNOWN", + "RMC_SET_MODE", + "RMC_SET_ACTION_PERIOD", + "RMC_ACRION_FRAME_RX", + "RMC_DBGID_DEFINITION_END", + }, + { + /* WLAN_MODULE_STATS */ + "WLAN_STATS_DBGID_DEFINITION_START", + "WLAN_STATS_DBGID_EST_LINKSPEED_VDEV_EN_DIS", + "WLAN_STATS_DBGID_EST_LINKSPEED_CHAN_TIME_START", + "WLAN_STATS_DBGID_EST_LINKSPEED_CHAN_TIME_END", + "WLAN_STATS_DBGID_EST_LINKSPEED_CALC", + "WLAN_STATS_DBGID_EST_LINKSPEED_UPDATE_HOME_CHAN", + "WLAN_STATS_DBGID_DEFINITION_END", + }, + { + /* WLAN_MODULE_NAN */ + }, + { + /* WLAN_MODULE_IBSS_PWRSAVE */ + "IBSS_PS_DBGID_DEFINITION_START", + "IBSS_PS_DBGID_PEER_CREATE", + "IBSS_PS_DBGID_PEER_DELETE", + "IBSS_PS_DBGID_VDEV_CREATE", + "IBSS_PS_DBGID_VDEV_DELETE", + "IBSS_PS_DBGID_VDEV_EVENT", + "IBSS_PS_DBGID_PEER_EVENT", + "IBSS_PS_DBGID_DELIVER_CAB", + "IBSS_PS_DBGID_DELIVER_UC_DATA", + "IBSS_PS_DBGID_DELIVER_UC_DATA_ERROR", + "IBSS_PS_DBGID_UC_INACTIVITY_TMR_RESTART", + "IBSS_PS_DBGID_MC_INACTIVITY_TMR_RESTART", + "IBSS_PS_DBGID_NULL_TX_COMPLETION", + "IBSS_PS_DBGID_ATIM_TIMER_START", + "IBSS_PS_DBGID_UC_ATIM_SEND", + "IBSS_PS_DBGID_BC_ATIM_SEND", + "IBSS_PS_DBGID_UC_TIMEOUT", + "IBSS_PS_DBGID_PWR_COLLAPSE_ALLOWED", + "IBSS_PS_DBGID_PWR_COLLAPSE_NOT_ALLOWED", + "IBSS_PS_DBGID_SET_PARAM", + "IBSS_PS_DBGID_HOST_TX_PAUSE", + "IBSS_PS_DBGID_HOST_TX_UNPAUSE", + "IBSS_PS_DBGID_PS_DESC_BIN_HWM", + "IBSS_PS_DBGID_PS_DESC_BIN_LWM", + "IBSS_PS_DBGID_PS_KICKOUT_PEER", + "IBSS_PS_DBGID_SET_PEER_PARAM", + "IBSS_PS_DBGID_BCN_ATIM_WIN_MISMATCH", + "IBSS_PS_DBGID_RX_CHAINMASK_CHANGE", + }, + { + /* HIF UART Interface DBGIDs */ + "HIF_UART_DBGID_START", + "HIF_UART_DBGID_POWER_STATE", + "HIF_UART_DBGID_TXRX_FLOW", + "HIF_UART_DBGID_TXRX_CTRL_CHAR", + "HIF_UART_DBGID_TXRX_BUF_DUMP", + }, + { + /* LPI */ + "" + }, + { + /* EXTSCAN DBGIDs */ + "EXTSCAN_START", + "EXTSCAN_STOP", + "EXTSCAN_CLEAR_ENTRY_CONTENT", + "EXTSCAN_GET_FREE_ENTRY_SUCCESS", + "EXTSCAN_GET_FREE_ENTRY_INCONSISTENT", + "EXTSCAN_GET_FREE_ENTRY_NO_MORE_ENTRIES", + "EXTSCAN_CREATE_ENTRY_SUCCESS", + "EXTSCAN_CREATE_ENTRY_ERROR", + "EXTSCAN_SEARCH_SCAN_ENTRY_QUEUE", + "EXTSCAN_SEARCH_SCAN_ENTRY_KEY_FOUND", + "EXTSCAN_SEARCH_SCAN_ENTRY_KEY_NOT_FOUND", + "EXTSCAN_ADD_ENTRY", + "EXTSCAN_BUCKET_SEND_OPERATION_EVENT", + "EXTSCAN_BUCKET_SEND_OPERATION_EVENT_FAILED", + "EXTSCAN_BUCKET_START_SCAN_CYCLE", + "EXTSCAN_BUCKET_PERIODIC_TIMER", + "EXTSCAN_SEND_START_STOP_EVENT", + "EXTSCAN_NOTIFY_WLAN_CHANGE", + "EXTSCAN_NOTIFY_WLAN_HOTLIST_MATCH", + "EXTSCAN_MAIN_RECEIVED_FRAME", + "EXTSCAN_MAIN_NO_SSID_IE", + "EXTSCAN_MAIN_MALFORMED_FRAME", + "EXTSCAN_FIND_BSSID_BY_REFERENCE", + "EXTSCAN_FIND_BSSID_BY_REFERENCE_ERROR", + "EXTSCAN_NOTIFY_TABLE_USAGE", + "EXTSCAN_FOUND_RSSI_ENTRY", + "EXTSCAN_BSSID_FOUND_RSSI_SAMPLE", + "EXTSCAN_BSSID_ADDED_RSSI_SAMPLE", + "EXTSCAN_BSSID_REPLACED_RSSI_SAMPLE", + "EXTSCAN_BSSID_TRANSFER_CURRENT_SAMPLES", + "EXTSCAN_BUCKET_PROCESS_SCAN_EVENT", + "EXTSCAN_BUCKET_CANNOT_FIND_BUCKET", + "EXTSCAN_START_SCAN_REQUEST_FAILED", + "EXTSCAN_BUCKET_STOP_CURRENT_SCANS", + "EXTSCAN_BUCKET_SCAN_STOP_REQUEST", + "EXTSCAN_BUCKET_PERIODIC_TIMER_ERROR", + "EXTSCAN_BUCKET_START_OPERATION", + "EXTSCAN_START_INTERNAL_ERROR", + "EXTSCAN_NOTIFY_HOTLIST_MATCH", + "EXTSCAN_CONFIG_HOTLIST_TABLE", + "EXTSCAN_CONFIG_WLAN_CHANGE_TABLE", + }, + { /* UNIT_TEST */ + "UNIT_TEST_GEN", + }, + { /* MLME */ + "MLME_DEBUG_CMN", + "MLME_IF", + "MLME_AUTH", + "MLME_REASSOC", + "MLME_DEAUTH", + "MLME_DISASSOC", + "MLME_ROAM", + "MLME_RETRY", + "MLME_TIMER", + "MLME_FRMPARSE", + }, + { /*SUPPLICANT */ + "SUPPL_INIT", + "SUPPL_RECV_EAPOL", + "SUPPL_RECV_EAPOL_TIMEOUT", + "SUPPL_SEND_EAPOL", + "SUPPL_MIC_MISMATCH", + "SUPPL_FINISH", + }, +}; + +int dbglog_module_log_enable(wmi_unified_t wmi_handle, uint32_t mod_id, + bool isenable) +{ + uint32_t val = 0; + + if (mod_id > WLAN_MODULE_ID_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("dbglog_module_log_enable: Invalid module id %d\n", + mod_id)); + return -EINVAL; + } + + WMI_DBGLOG_SET_MODULE_ID(val, mod_id); + if (isenable) { + /* set it to global module level */ + WMI_DBGLOG_SET_LOG_LEVEL(val, DBGLOG_INFO); + } else { + /* set it to ERROR level */ + WMI_DBGLOG_SET_LOG_LEVEL(val, DBGLOG_ERR); + } + wma_config_debug_module_cmd(wmi_handle, WMI_DEBUG_LOG_PARAM_LOG_LEVEL, + val, NULL, 0); + + return 0; +} + +int dbglog_vap_log_enable(wmi_unified_t wmi_handle, uint16_t vap_id, + bool isenable) +{ + if (vap_id > DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("dbglog_vap_log_enable:Invalid vap_id %d\n", + vap_id)); + return -EINVAL; + } + + wma_config_debug_module_cmd(wmi_handle, + isenable ? WMI_DEBUG_LOG_PARAM_VDEV_ENABLE : + WMI_DEBUG_LOG_PARAM_VDEV_DISABLE, vap_id, + NULL, 0); + + return 0; +} + +int dbglog_set_log_lvl(wmi_unified_t wmi_handle, DBGLOG_LOG_LVL log_lvl) +{ + uint32_t val = 0; + + if (log_lvl > DBGLOG_LVL_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("dbglog_set_log_lvl:Invalid log level %d\n", + log_lvl)); + return -EINVAL; + } + + WMI_DBGLOG_SET_MODULE_ID(val, WMI_DEBUG_LOG_MODULE_ALL); + WMI_DBGLOG_SET_LOG_LEVEL(val, log_lvl); + wma_config_debug_module_cmd(wmi_handle, WMI_DEBUG_LOG_PARAM_LOG_LEVEL, + val, NULL, 0); + + return 0; +} + +int dbglog_set_mod_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_log_lvl) +{ + /* set the global module level to log_lvl */ + wma_config_debug_module_cmd(wmi_handle, WMI_DEBUG_LOG_PARAM_LOG_LEVEL, + mod_log_lvl, NULL, 0); + + return 0; +} + +int dbglog_set_mod_wow_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_log_lvl) +{ + /* set the global module level to log_lvl */ + wma_config_debug_module_cmd(wmi_handle, + WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP, + mod_log_lvl, NULL, 0); + + return 0; +} + +void +dbglog_set_vap_enable_bitmap(wmi_unified_t wmi_handle, + uint32_t vap_enable_bitmap) +{ + wma_config_debug_module_cmd(wmi_handle, + WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP, + vap_enable_bitmap, NULL, 0); +} + +void +dbglog_set_mod_enable_bitmap(wmi_unified_t wmi_handle, uint32_t log_level, + uint32_t *mod_enable_bitmap, uint32_t bitmap_len) +{ + wma_config_debug_module_cmd(wmi_handle, + WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP, + log_level, mod_enable_bitmap, bitmap_len); +} + +int dbglog_report_enable(wmi_unified_t wmi_handle, bool isenable) +{ + int bitmap[2] = { 0 }; + + if (isenable) { + /* set the vap enable bitmap */ + dbglog_set_vap_enable_bitmap(wmi_handle, 0xFFFF); + bitmap[0] = 0xFFFFFFFF; + bitmap[1] = 0x1F; + /* set the module level bitmap */ + dbglog_set_mod_enable_bitmap(wmi_handle, 0x0, bitmap, 2); + } else { + dbglog_set_vap_enable_bitmap(wmi_handle, bitmap[0]); + dbglog_set_mod_enable_bitmap(wmi_handle, DBGLOG_LVL_MAX, bitmap, + 2); + } + return 0; +} + +static char *dbglog_get_msg(uint32_t moduleid, uint32_t debugid) +{ + static char unknown_str[64]; + + if (moduleid < WLAN_MODULE_ID_MAX && debugid < MAX_DBG_MSGS) { + char *str = DBG_MSG_ARR[moduleid][debugid]; + if (str && str[0] != '\0') + return str; + } + + snprintf(unknown_str, sizeof(unknown_str), + "UNKNOWN %u:%u", moduleid, debugid); + + return unknown_str; +} + +static +void dbglog_printf(uint32_t timestamp, uint16_t vap_id, const char *fmt, ...) +{ + char buf[128]; + va_list ap; + + if (vap_id < DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] vap-%u ", timestamp, + vap_id)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] ", timestamp)); + } + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s\n", buf)); +} + +static void +dbglog_printf_no_line_break(uint32_t timestamp, + uint16_t vap_id, const char *fmt, ...) +{ + char buf[128]; + va_list ap; + + if (vap_id < DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] vap-%u ", timestamp, + vap_id)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] ", timestamp)); + } + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s", buf)); +} + +#define USE_NUMERIC 0 + +static A_BOOL +dbglog_default_print_handler(uint32_t mod_id, uint16_t vap_id, uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, + uint32_t *args) +{ + int i; + + if (vap_id < DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] vap-%u %s ( ", + timestamp, vap_id, dbglog_get_msg(mod_id, + dbg_id))); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] %s ( ", timestamp, + dbglog_get_msg(mod_id, dbg_id))); + } + + for (i = 0; i < numargs; i++) { +#if USE_NUMERIC + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%u", args[i])); +#else + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%#x", args[i])); +#endif + if ((i + 1) < numargs) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (", ")); + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (" )\n")); + + return true; +} + +#define DBGLOG_PARSE_ARGS_STRING_LENGTH (DBGLOG_NUM_ARGS_MAX * 11 + 10) +static int dbglog_print_raw_data(uint32_t *buffer, uint32_t length) +{ + uint32_t timestamp; + uint32_t debugid; + uint32_t moduleid; + uint16_t numargs, curArgs; + uint32_t count = 0, totalWriteLen, writeLen; + char parseArgsString[DBGLOG_PARSE_ARGS_STRING_LENGTH]; + char *dbgidString; + + while ((count + 1) < length) { + + debugid = DBGLOG_GET_DBGID(buffer[count + 1]); + moduleid = DBGLOG_GET_MODULEID(buffer[count + 1]); + numargs = DBGLOG_GET_NUMARGS(buffer[count + 1]); + timestamp = DBGLOG_GET_TIME_STAMP(buffer[count]); + + if (moduleid < WLAN_MODULE_ID_MAX && debugid < MAX_DBG_MSGS + && numargs <= DBGLOG_NUM_ARGS_MAX) { + + OS_MEMZERO(parseArgsString, sizeof(parseArgsString)); + totalWriteLen = 0; + + if (!numargs || (count + numargs + 2 > length)) + goto skip_args_processing; + + for (curArgs = 0; curArgs < numargs; curArgs++) { + /* + * Using sprintf_s instead of sprintf, + * to avoid length overflow + */ + writeLen = + snprintf(parseArgsString + totalWriteLen, + DBGLOG_PARSE_ARGS_STRING_LENGTH - + totalWriteLen, "%x ", + buffer[count + 2 + curArgs]); + totalWriteLen += writeLen; + } +skip_args_processing: + if (debugid < MAX_DBG_MSGS) { + dbgidString = DBG_MSG_ARR[moduleid][debugid]; + if (dbgidString) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s(%x %x):%s\n", + dbgidString, timestamp, + buffer[count + 1], + parseArgsString)); + } else { + /* host need sync with FW id */ + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s:m:%x,id:%x(%x %x):%s\n", + "UNKNOWN", moduleid, + debugid, timestamp, + buffer[count + 1], + parseArgsString)); + } + } else if (debugid == + DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG) { + /* specific debugid */ + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s:m:%x,id:%x(%x %x):%s\n", + "DBGLOG_SM_MSG", moduleid, + debugid, timestamp, + buffer[count + 1], + parseArgsString)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s:m:%x,id:%x(%x %x):%s\n", + "UNKNOWN", moduleid, debugid, + timestamp, buffer[count + 1], + parseArgsString)); + } + } + + /* 32 bit Time stamp + 32 bit Dbg header */ + count += numargs + 2; + } + + return 0; + +} + +#ifdef WLAN_OPEN_SOURCE +static int +dbglog_debugfs_raw_data(wmi_unified_t wmi_handle, const uint8_t *buf, + uint32_t length, uint32_t dropped) +{ + struct fwdebug *fwlog = (struct fwdebug *)&wmi_handle->dbglog; + struct dbglog_slot *slot; + struct sk_buff *skb; + size_t slot_len; + + if (WARN_ON(length > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE; + + skb = alloc_skb(slot_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + slot = (struct dbglog_slot *)skb_put(skb, slot_len); + slot->diag_type = (uint32_t) DIAG_TYPE_FW_DEBUG_MSG; + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(length); + slot->dropped = cpu_to_le32(dropped); + memcpy(slot->payload, buf, length); + + /* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */ + memset(slot->payload + length, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - length); + + spin_lock(&fwlog->fwlog_queue.lock); + + __skb_queue_tail(&fwlog->fwlog_queue, skb); + + complete(&fwlog->fwlog_completion); + + /* drop oldest entries */ + while (skb_queue_len(&fwlog->fwlog_queue) > ATH6KL_FWLOG_MAX_ENTRIES) { + skb = __skb_dequeue(&fwlog->fwlog_queue); + kfree_skb(skb); + } + + spin_unlock(&fwlog->fwlog_queue.lock); + + return true; +} +#endif /* WLAN_OPEN_SOURCE */ + +/** + * nl_srv_bcast_fw_logs() - Wrapper func to send bcast msgs to FW logs mcast grp + * @skb: sk buffer pointer + * + * Sends the bcast message to FW logs multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_bcast_fw_logs(struct sk_buff *skb) +{ +#ifdef CNSS_GENL + return nl_srv_bcast(skb, CLD80211_MCGRP_FW_LOGS, WLAN_NL_MSG_CNSS_DIAG); +#else + return nl_srv_bcast(skb); +#endif +} + +/** + * send_fw_diag_nl_data - pack the data from fw diag event handler + * @buffer: buffer of diag event + * @len: length of the diag event + * @event: the even type + * + * return: 0 if sent successfully, otherwise error code + */ +static int send_fw_diag_nl_data(const uint8_t *buffer, uint32_t len, + uint32_t event_type) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + int res = 0; + tAniNlHdr *wnl; + int radio; + int msg_len; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + if (nl_srv_is_initialized() != 0) + return -EIO; + + radio = cds_get_radio_index(); + if (radio == -EINVAL) + return -EIO; + + if (cds_is_multicast_logging()) { + msg_len = len + sizeof(radio); + skb_out = nlmsg_new(msg_len, GFP_KERNEL); + if (!skb_out) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to allocate new skb\n")); + return -ENOMEM; + } + nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, msg_len, + 0); + if (!nlh) { + kfree_skb(skb_out); + return -EMSGSIZE; + } + wnl = (tAniNlHdr *)nlh; + wnl->radio = radio; + + /* data buffer offset from nlmsg_hdr + sizeof(int) radio */ + memcpy(nlmsg_data(nlh) + sizeof(radio), buffer, len); + + res = nl_srv_bcast_fw_logs(skb_out); + if ((res < 0) && (res != -ESRCH)) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("%s: nl_srv_bcast_fw_logs failed 0x%x\n", + __func__, res)); + return res; + } + } + return res; +} + +/** + * process_fw_diag_event_data() - process diag events and fw messages + * @datap: data to be processed + * @num_data: number of data chunks + * + * return: success + */ +static int +process_fw_diag_event_data(uint8_t *datap, uint32_t num_data) +{ + uint32_t diag_type; + uint32_t nl_data_len; /* diag hdr + payload */ + uint32_t diag_data_len; /* each fw diag payload */ + struct wlan_diag_data *diag_data; + + while (num_data >= sizeof(struct wlan_diag_data)) { + diag_data = (struct wlan_diag_data *)datap; + diag_type = WLAN_DIAG_0_TYPE_GET(diag_data->word0); + diag_data_len = WLAN_DIAG_0_LEN_GET(diag_data->word0); + /* Length of diag struct and len of payload */ + nl_data_len = sizeof(struct wlan_diag_data) + diag_data_len; + if (nl_data_len > num_data) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("processed all the messages\n")); + return 0; + } + + switch (diag_type) { + case DIAG_TYPE_FW_EVENT: + return send_fw_diag_nl_data(datap, nl_data_len, + diag_type); + break; + case DIAG_TYPE_FW_LOG: + return send_fw_diag_nl_data(datap, nl_data_len, + diag_type); + break; + } + /* Move to the next event and send to cnss-diag */ + datap += nl_data_len; + num_data -= nl_data_len; + } + + return 0; +} + +static int +send_diag_netlink_data(const uint8_t *buffer, uint32_t len, uint32_t cmd) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + int res = 0; + struct dbglog_slot *slot; + size_t slot_len; + tAniNlHdr *wnl; + int radio; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + if (nl_srv_is_initialized() != 0) + return -EIO; + + radio = cds_get_radio_index(); + if (radio == -EINVAL) + return -EIO; + + if (cds_is_multicast_logging()) { + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE + + sizeof(radio); + + skb_out = nlmsg_new(slot_len, GFP_ATOMIC); + if (!skb_out) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to allocate new skb\n")); + return A_ERROR; + } + + nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, + slot_len, 0); + if (!nlh) { + kfree_skb(skb_out); + return -EMSGSIZE; + } + wnl = (tAniNlHdr *)nlh; + wnl->radio = radio; + /* data buffer offset from: nlmsg_hdr + sizeof(int) radio */ + slot = (struct dbglog_slot *) (nlmsg_data(nlh) + sizeof(radio)); + slot->diag_type = cmd; + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(len); + /* Version mapped to get_version here */ + slot->dropped = get_version; + memcpy(slot->payload, buffer, len); + + /* + * Need to pad each record to fixed length + * ATH6KL_FWLOG_PAYLOAD_SIZE + */ + memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len); + + res = nl_srv_bcast_fw_logs(skb_out); + if ((res < 0) && (res != -ESRCH)) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("%s: nl_srv_bcast_fw_logs failed 0x%x\n", + __func__, res)); + return res; + } + } + return res; +} + +static int +dbglog_process_netlink_data(wmi_unified_t wmi_handle, const uint8_t *buffer, + uint32_t len, uint32_t dropped) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + int res = 0; + struct dbglog_slot *slot; + size_t slot_len; + tAniNlHdr *wnl; + int radio; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + if (nl_srv_is_initialized() != 0) + return -EIO; + + radio = cds_get_radio_index(); + if (radio == -EINVAL) + return -EIO; + + if (cds_is_multicast_logging()) { + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE + + sizeof(radio); + + skb_out = nlmsg_new(slot_len, GFP_KERNEL); + if (!skb_out) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to allocate new skb\n")); + return A_ERROR; + } + + nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, + slot_len, 0); + if (!nlh) { + kfree_skb(skb_out); + return -EMSGSIZE; + } + wnl = (tAniNlHdr *)nlh; + wnl->radio = radio; + /* data buffer offset from: nlmsg_hdr + sizeof(int) radio */ + slot = (struct dbglog_slot *) (nlmsg_data(nlh) + sizeof(radio)); + slot->diag_type = (uint32_t) DIAG_TYPE_FW_DEBUG_MSG; + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(len); + slot->dropped = cpu_to_le32(dropped); + memcpy(slot->payload, buffer, len); + + /* + * Need to pad each record to fixed length + * ATH6KL_FWLOG_PAYLOAD_SIZE + */ + memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len); + + res = nl_srv_bcast_fw_logs(skb_out); + if ((res < 0) && (res != -ESRCH)) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("%s: nl_srv_bcast_fw_logs failed 0x%x\n", + __func__, res)); + return res; + } + } + return res; +} + +/* + * WMI diag data event handler, this function invoked as a CB + * when there DIAG_EVENT, DIAG_MSG, DIAG_DBG to be + * forwarded from the FW. This is the new implementation for + * replacement of fw_dbg and dbg messages + */ + +static int diag_fw_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + + tp_wma_handle wma = (tp_wma_handle) scn; + WMI_DIAG_EVENTID_param_tlvs *param_buf; + uint8_t *datap; + uint32_t len = 0; + uint32_t *buffer; + + if (!wma) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("NULL Pointer assigned\n")); + return A_ERROR; + } + /* when fw asser occurs,host can't use TLV format. */ + if (wma->is_fw_assert) { + datap = data; + len = datalen; + wma->is_fw_assert = 0; + } else { + param_buf = (WMI_DIAG_EVENTID_param_tlvs *) data; + if (!param_buf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Get NULL point message from FW\n")); + return A_ERROR; + } + + datap = param_buf->bufp; + len = param_buf->num_bufp; + + if (!get_version) { + if (len < 2*(sizeof(uint32_t))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("len is less than expected\n")); + return A_ERROR; + } + buffer = (uint32_t *) datap; + buffer++; /* skip offset */ + if (WLAN_DIAG_TYPE_CONFIG == DIAG_GET_TYPE(*buffer)) { + if (len < 3*(sizeof(uint32_t))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("len is less than expected\n")); + return A_ERROR; + } + buffer++; /* skip */ + if (DIAG_VERSION_INFO == DIAG_GET_ID(*buffer)) { + if (len < 4*(sizeof(uint32_t))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("len is less than expected\n")); + return A_ERROR; + } + buffer++; /* skip */ + /* get payload */ + get_version = *buffer; + } + } + } + } + if (dbglog_process_type == DBGLOG_PROCESS_PRINT_RAW) { + if (!gprint_limiter) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("NOT Supported only supports net link socket\n")); + gprint_limiter = true; + } + return 0; + } + + if (dbglog_process_type == DBGLOG_PROCESS_NET_RAW) { + return send_diag_netlink_data((uint8_t *) datap, + len, DIAG_TYPE_FW_MSG); + } +#ifdef WLAN_OPEN_SOURCE + if (dbglog_process_type == DBGLOG_PROCESS_POOL_RAW) { + if (!gprint_limiter) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("NOT Supported only supports net link socket\n")); + gprint_limiter = true; + } + return 0; + } +#endif /* WLAN_OPEN_SOURCE */ + if (!gprint_limiter) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("NOT Supported only supports net link socket\n")); + gprint_limiter = true; + } + /* Always returns zero */ + return 0; +} + +/* + * WMI diag data event handler, this function invoked as a CB + * when there DIAG_DATA to be forwarded from the FW. + */ +static int +fw_diag_data_event_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + + WMI_DIAG_DATA_CONTAINER_EVENTID_param_tlvs *param_buf; + uint8_t *datap; + uint32_t num_data; /* Total events */ + + param_buf = (WMI_DIAG_DATA_CONTAINER_EVENTID_param_tlvs *) data; + if (!param_buf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Got NULL point message from FW\n")); + return A_ERROR; + } + + num_data = param_buf->num_bufp; + + datap = (uint8_t *) param_buf->bufp; + + return process_fw_diag_event_data(datap, num_data); +} + +int dbglog_parse_debug_logs(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + tp_wma_handle wma = (tp_wma_handle) scn; + uint32_t count; + uint32_t *buffer; + uint32_t timestamp; + uint32_t debugid; + uint32_t moduleid; + uint16_t vapid; + uint16_t numargs; + qdf_size_t length; + uint32_t dropped; + WMI_DEBUG_MESG_EVENTID_param_tlvs *param_buf; + uint8_t *datap; + uint32_t len; + + if (!wma) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("NULL Pointer assigned\n")); + return A_ERROR; + } + /*when fw asser occurs,host can't use TLV format. */ + if (wma->is_fw_assert) { + datap = data; + len = datalen; + wma->is_fw_assert = 0; + } else { + param_buf = (WMI_DEBUG_MESG_EVENTID_param_tlvs *) data; + if (!param_buf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Get NULL point message from FW\n")); + return A_ERROR; + } + + datap = param_buf->bufp; + len = param_buf->num_bufp; + } + + if (len < sizeof(dropped)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid length\n")); + return A_ERROR; + } + + dropped = *((uint32_t *) datap); + if (dropped > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("%d log buffers are dropped\n", dropped)); + } + datap += sizeof(dropped); + len -= sizeof(dropped); + + count = 0; + buffer = (uint32_t *) datap; + length = (len >> 2); + + if (dbglog_process_type == DBGLOG_PROCESS_PRINT_RAW) + return dbglog_print_raw_data(buffer, length); + + if (dbglog_process_type == DBGLOG_PROCESS_NET_RAW) { + return dbglog_process_netlink_data((wmi_unified_t) wma-> + wmi_handle, + (uint8_t *) buffer, + len, dropped); + } +#ifdef WLAN_OPEN_SOURCE + if (dbglog_process_type == DBGLOG_PROCESS_POOL_RAW) { + return dbglog_debugfs_raw_data((wmi_unified_t) wma->wmi_handle, + (uint8_t *) buffer, len, + dropped); + } +#endif /* WLAN_OPEN_SOURCE */ + + while ((count + 2) < length) { + timestamp = DBGLOG_GET_TIME_STAMP(buffer[count]); + debugid = DBGLOG_GET_DBGID(buffer[count + 1]); + moduleid = DBGLOG_GET_MODULEID(buffer[count + 1]); + vapid = DBGLOG_GET_VDEVID(buffer[count + 1]); + numargs = DBGLOG_GET_NUMARGS(buffer[count + 1]); + + if ((count + 2 + numargs) > length) + return A_OK; + + if (moduleid >= WLAN_MODULE_ID_MAX) + return A_OK; + + if (!mod_print[moduleid]) { + /* + * No module specific log registered + * use the default handler + */ + dbglog_default_print_handler(moduleid, vapid, debugid, + timestamp, numargs, + (((uint32_t *) buffer) + + 2 + count)); + } else { + if (!(mod_print[moduleid](moduleid, vapid, debugid, + timestamp, numargs, + (((uint32_t *) buffer) + + 2 + count)))) { + /* + * The message is not handled + * by the module specific handler + */ + dbglog_default_print_handler(moduleid, vapid, + debugid, timestamp, + numargs, + (((uint32_t *) + buffer) + 2 + + count)); + + } + } + + /* 32 bit Time stamp + 32 bit Dbg header */ + count += numargs + 2; + } + /* Always returns zero */ + return A_OK; +} + +void dbglog_reg_modprint(uint32_t mod_id, module_dbg_print printfn) +{ + if (!mod_print[mod_id]) { + mod_print[mod_id] = printfn; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("module print is already registered for this module %d\n", + mod_id)); + } +} + +static void +dbglog_sm_print(uint32_t timestamp, + uint16_t vap_id, + uint16_t numargs, + uint32_t *args, + const char *module_prefix, + const char *const states[], uint32_t num_states, + const char *const events[], uint32_t num_events) +{ + uint8_t type, arg1, arg2, arg3; + uint32_t extra, extra2, extra3; + + if (numargs != 4) + return; + + type = (args[0] >> 24) & 0xff; + arg1 = (args[0] >> 16) & 0xff; + arg2 = (args[0] >> 8) & 0xff; + arg3 = (args[0] >> 0) & 0xff; + + extra = args[1]; + extra2 = args[2]; + extra3 = args[3]; + + switch (type) { + case 0: /* state transition */ + if (arg1 < num_states && arg2 < num_states) { + dbglog_printf(timestamp, vap_id, + "%s: %s => %s (%#x, %#x, %#x)", + module_prefix, states[arg1], states[arg2], + extra, extra2, extra3); + } else { + dbglog_printf(timestamp, vap_id, + "%s: %u => %u (%#x, %#x, %#x)", + module_prefix, arg1, arg2, extra, extra2, + extra3); + } + break; + case 1: /* dispatch event */ + if (arg1 < num_states && arg2 < num_events) { + dbglog_printf(timestamp, vap_id, + "%s: %s < %s (%#x, %#x, %#x)", + module_prefix, states[arg1], events[arg2], + extra, extra2, extra3); + } else { + dbglog_printf(timestamp, vap_id, + "%s: %u < %u (%#x, %#x, %#x)", + module_prefix, arg1, arg2, extra, extra2, + extra3); + } + break; + case 2: /* warning */ + switch (arg1) { + case 0: /* unhandled event */ + if (arg2 < num_states && arg3 < num_events) { + dbglog_printf(timestamp, vap_id, + "%s: unhandled event %s in state %s (%#x, %#x, %#x)", + module_prefix, events[arg3], + states[arg2], extra, extra2, + extra3); + } else { + dbglog_printf(timestamp, vap_id, + "%s: unhandled event %u in state %u (%#x, %#x, %#x)", + module_prefix, arg3, arg2, extra, + extra2, extra3); + } + break; + default: + break; + + } + break; + } +} + +static A_BOOL +dbglog_sta_powersave_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "IDLE", + "ACTIVE", + "SLEEP_TXQ_FLUSH", + "SLEEP_TX_SENT", + "PAUSE", + "SLEEP_DOZE", + "SLEEP_AWAKE", + "ACTIVE_TXQ_FLUSH", + "ACTIVE_TX_SENT", + "PAUSE_TXQ_FLUSH", + "PAUSE_TX_SENT", + "IDLE_TXQ_FLUSH", + "IDLE_TX_SENT", + }; + + static const char *const events[] = { + "START", + "STOP", + "PAUSE", + "UNPAUSE", + "TIM", + "DTIM", + "SEND_COMPLETE", + "PRE_SEND", + "RX", + "HWQ_EMPTY", + "PAUSE_TIMEOUT", + "TXRX_INACTIVITY_TIMEOUT", + "PSPOLL_TIMEOUT", + "UAPSD_TIMEOUT", + "DELAYED_SLEEP_TIMEOUT", + "SEND_N_COMPLETE", + "TIDQ_PAUSE_COMPLETE", + "SEND_PSPOLL", + "SEND_SPEC_PSPOLL", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "STA PS", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case PS_STA_PM_ARB_REQUEST: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "PM ARB request flags=%x, last_time=%x %s: %s", + args[1], args[2], + dbglog_get_module_str(args[0]), + args[3] ? "SLEEP" : "WAKE"); + } + break; + case PS_STA_DELIVER_EVENT: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, "STA PS: %s %s", + (args[0] == 0 ? "PAUSE_COMPLETE" : + (args[0] == 1 ? "UNPAUSE_COMPLETE" : + (args[0] == 2 ? "SLEEP" : + (args[0] == + 3 ? "AWAKE" : "UNKNOWN")))), + (args[1] == + 0 ? "SUCCESS" : (args[1] == + 1 ? "TXQ_FLUSH_TIMEOUT" + : (args[1] == + 2 ? "NO_ACK" + : (args[1] == + 3 ? + "RX_LEAK_TIMEOUT" + : (args[1] == + 4 ? + "PSPOLL_UAPSD_BUSY_TIMEOUT" + : + "UNKNOWN")))))); + } + break; + case PS_STA_PSPOLL_SEQ_DONE: + if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "STA PS poll: queue=%u comp=%u rsp=%u rsp_dur=%u fc=%x qos=%x %s", + args[0], args[1], args[2], args[3], + (args[4] >> 16) & 0xffff, + (args[4] >> 8) & 0xff, + (args[4] & 0xff) == + 0 ? "SUCCESS" : (args[4] & 0xff) == + 1 ? "NO_ACK" : (args[4] & 0xff) == + 2 ? "DROPPED" : (args[4] & 0xff) == + 3 ? "FILTERED" : (args[4] & 0xff) == + 4 ? "RSP_TIMEOUT" : "UNKNOWN"); + } + break; + case PS_STA_COEX_MODE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, "STA PS COEX MODE %s", + args[0] ? "ENABLED" : "DISABLED"); + } + break; + case PS_STA_PSPOLL_ALLOW: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "STA PS-Poll %s flags=%x time=%u", + args[0] ? "ALLOW" : "DISALLOW", args[1], + args[2]); + } + break; + case PS_STA_SET_PARAM: + if (numargs == 2) { + struct { + char *name; + int is_time_param; + } params[] = { + { + "MAX_SLEEP_ATTEMPTS", 0 + }, { + "DELAYED_SLEEP", 1 + }, { + "TXRX_INACTIVITY", 1 + }, { + "MAX_TX_BEFORE_WAKE", 0 + }, { + "UAPSD_TIMEOUT", 1 + }, { + "UAPSD_CONFIG", 0 + }, { + "PSPOLL_RESPONSE_TIMEOUT", 1 + }, { + "MAX_PSPOLL_BEFORE_WAKE", 0 + }, { + "RX_WAKE_POLICY", 0 + }, { + "DELAYED_PAUSE_RX_LEAK", 1 + }, { + "TXRX_INACTIVITY_BLOCKED_RETRY", 1 + }, { + "SPEC_WAKE_INTERVAL", 1 + }, { + "MAX_SPEC_NODATA_PSPOLL", 0 + }, { + "ESTIMATED_PSPOLL_RESP_TIME", 1 + }, { + "QPOWER_MAX_PSPOLL_BEFORE_WAKE", 0 + }, { + "QPOWER_ENABLE", 0 + }, + }; + uint32_t param = args[0]; + uint32_t value = args[1]; + + if (param < QDF_ARRAY_SIZE(params)) { + if (params[param].is_time_param) { + dbglog_printf(timestamp, vap_id, + "STA PS SET_PARAM %s => %u (us)", + params[param].name, + value); + } else { + dbglog_printf(timestamp, vap_id, + "STA PS SET_PARAM %s => %#x", + params[param].name, + value); + } + } else { + dbglog_printf(timestamp, vap_id, + "STA PS SET_PARAM %x => %#x", + param, value); + } + } + break; + case PS_STA_SPECPOLL_TIMER_STARTED: + dbglog_printf(timestamp, vap_id, + "SPEC Poll Timer Started: Beacon time Remaining:%d wakeup interval:%d", + args[0], args[1]); + break; + case PS_STA_SPECPOLL_TIMER_STOPPED: + dbglog_printf(timestamp, vap_id, "SPEC Poll Timer Stopped"); + break; + default: + return false; + } + + return true; +} + +/* IBSS PS sub modules */ +enum wlan_ibss_ps_sub_module { + WLAN_IBSS_PS_SUB_MODULE_IBSS_NW_SM = 0, + WLAN_IBSS_PS_SUB_MODULE_IBSS_SELF_PS = 1, + WLAN_IBSS_PS_SUB_MODULE_IBSS_PEER_PS = 2, + WLAN_IBSS_PS_SUB_MODULE_MAX = 3, +}; + +#define WLAN_IBSS_PS_SUB_MODULE_OFFSET 0x1E + +static A_BOOL +dbglog_ibss_powersave_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const nw_states[] = { + "WAIT_FOR_TBTT", + "ATIM_WINDOW_PRE_BCN", + "ATIM_WINDOW_POST_BCN", + "OUT_OF_ATIM_WINDOW", + "PAUSE_PENDING", + "PAUSED", + }; + + static const char *const ps_states[] = { + "ACTIVE", + "SLEEP_TX_SEND", + "SLEEP_DOZE_PAUSE_PENDING", + "SLEEP_DOZE", + "SLEEP_AWAKE", + "ACTIVE_TX_SEND", + "PAUSE_TX_SEND", + "PAUSED", + }; + + static const char *const peer_ps_states[] = { + "ACTIVE", + "SLEEP_AWAKE", + "SLEEP_DOZE", + "PS_UNKNOWN", + }; + + static const char *const events[] = { + "START", + "STOP", + "SWBA", + "TBTT", + "TX_BCN_CMP", + "SEND_COMPLETE", + "SEND_N_COMPLETE", + "PRE_SEND", + "RX", + "UC_INACTIVITY_TIMEOUT", + "BC_INACTIVITY_TIMEOUT", + "ATIM_WINDOW_BEGIN", + "ATIM_WINDOW_END", + "HWQ_EMPTY", + "UC_ATIM_RCVD", + "TRAFFIC_EXCHANGE_DONE", + "POWER_SAVE_STATE_CHANGE", + "NEW_PEER_JOIN", + "IBSS_VDEV_PAUSE_REQUEST", + "IBSS_VDEV_PAUSE_RESPONSE", + "IBSS_VDEV_PAUSE_TIMEOUT", + "IBSS_VDEV_UNPAUSE_REQUEST", + "PS_STATE_CHANGE", + }; + + enum wlan_ibss_ps_sub_module sub_module; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + sub_module = (args[1] >> WLAN_IBSS_PS_SUB_MODULE_OFFSET) & 0x3; + switch (sub_module) { + case WLAN_IBSS_PS_SUB_MODULE_IBSS_NW_SM: + dbglog_sm_print(timestamp, vap_id, numargs, args, + "IBSS PS NW", nw_states, + QDF_ARRAY_SIZE(nw_states), events, + QDF_ARRAY_SIZE(events)); + break; + case WLAN_IBSS_PS_SUB_MODULE_IBSS_SELF_PS: + dbglog_sm_print(timestamp, vap_id, numargs, args, + "IBSS PS Self", ps_states, + QDF_ARRAY_SIZE(ps_states), events, + QDF_ARRAY_SIZE(events)); + break; + case WLAN_IBSS_PS_SUB_MODULE_IBSS_PEER_PS: + dbglog_sm_print(timestamp, vap_id, numargs, args, + "IBSS PS Peer", peer_ps_states, + QDF_ARRAY_SIZE(peer_ps_states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + break; + } + break; + case IBSS_PS_DBGID_PEER_CREATE: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: peer alloc failed for peer ID:%u", + args[0]); + } else if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: create peer ID=%u", args[0]); + } + break; + case IBSS_PS_DBGID_PEER_DELETE: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: delete peer ID=%u num_peers:%d num_sleeping_peers:%d ps_enabled_for_this_peer:%d", + args[0], args[1], args[2], args[3]); + } + break; + case IBSS_PS_DBGID_VDEV_CREATE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev alloc failed", args[0]); + } else if (numargs == 0) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev created"); + } + break; + case IBSS_PS_DBGID_VDEV_DELETE: + dbglog_printf(timestamp, vap_id, "IBSS PS: vdev deleted"); + break; + + case IBSS_PS_DBGID_VDEV_EVENT: + if (numargs == 1) { + if (args[0] == 5) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev event for peer add"); + } else if (args[0] == 7) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev event for peer delete"); + } else { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev event %u", + args[0]); + } + } + break; + + case IBSS_PS_DBGID_PEER_EVENT: + if (numargs == 4) { + if (args[0] == 0xFFFF) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: pre_send for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x20000) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: send_complete for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x10) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: send_n_complete for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x40) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: rx event for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: hw_q_empty for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } + } + break; + + case IBSS_PS_DBGID_DELIVER_CAB: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Deliver CAB n_mpdu:%d send_flags:%0x tid_cur:%d q_depth_for_other_tid:%d", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_DELIVER_UC_DATA: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Deliver UC data peer:%d tid:%d n_mpdu:%d send_flags:%0x", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_DELIVER_UC_DATA_ERROR: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Deliver UC data error peer:%d tid:%d allowed_tidmask:%0x, pending_tidmap:%0x", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_UC_INACTIVITY_TMR_RESTART: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: UC timer restart peer:%d timer_val:%0x", + args[0], args[1]); + } + break; + + case IBSS_PS_DBGID_MC_INACTIVITY_TMR_RESTART: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: MC timer restart timer_val:%0x", + args[0]); + } + break; + + case IBSS_PS_DBGID_NULL_TX_COMPLETION: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: null tx completion peer:%d tx_completion_status:%d flags:%0x", + args[0], args[1], args[2]); + } + break; + + case IBSS_PS_DBGID_ATIM_TIMER_START: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: ATIM timer start tsf:%0x %0x tbtt:%0x %0x", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_UC_ATIM_SEND: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Send ATIM to peer:%d", args[1]); + } else if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: no peers to send UC ATIM", + args[1]); + } + break; + + case IBSS_PS_DBGID_BC_ATIM_SEND: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: MC Data, num_of_peers:%d bc_atim_sent:%d", + args[1], args[0]); + } + break; + + case IBSS_PS_DBGID_UC_TIMEOUT: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: UC timeout for peer:%d send_null:%d", + args[0], args[1]); + } + break; + + case IBSS_PS_DBGID_PWR_COLLAPSE_ALLOWED: + dbglog_printf(timestamp, vap_id, + "IBSS PS: allow power collapse"); + break; + + case IBSS_PS_DBGID_PWR_COLLAPSE_NOT_ALLOWED: + if (numargs == 0) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed by INI"); + } else if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed since peer id:%d is not PS capable", + args[0]); + } else if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed - no peers in NW"); + } else if (numargs == 3) { + if (args[0] == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed, non-zero qdepth %d %d", + args[1], args[2]); + } else if (args[0] == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed by peer:%d peer_flags:%0x", + args[1], args[2]); + } + } else if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed by state m/c nw_cur_state:%d nw_next_state:%d ps_cur_state:%d flags:%0x", + args[1], args[2], args[3], args[4]); + } + break; + + case IBSS_PS_DBGID_SET_PARAM: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Set Param ID:%0x Value:%0x", + args[0], args[1]); + } + break; + + case IBSS_PS_DBGID_HOST_TX_PAUSE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Pausing host, vdev_map:%0x", + args[0]); + } + break; + + case IBSS_PS_DBGID_HOST_TX_UNPAUSE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Unpausing host, vdev_map:%0x", + args[0]); + } + break; + case IBSS_PS_DBGID_PS_DESC_BIN_LWM: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: LWM, vdev_map:%0x", args[0]); + } + break; + + case IBSS_PS_DBGID_PS_DESC_BIN_HWM: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: HWM, vdev_map:%0x", args[0]); + } + break; + + case IBSS_PS_DBGID_PS_KICKOUT_PEER: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Kickout peer id:%d atim_fail_cnt:%d status:%d", + args[0], args[1], args[2]); + } + break; + + case IBSS_PS_DBGID_SET_PEER_PARAM: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Set Peer Id:%d Param ID:%0x Value:%0x", + args[0], args[1], args[2]); + } + break; + + case IBSS_PS_DBGID_BCN_ATIM_WIN_MISMATCH: + if (numargs == 4) { + if (args[0] == 0xDEAD) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: ATIM window length mismatch, our's:%d, peer id:%d, peer's:%d", + args[1], args[2], args[3]); + } else if (args[0] == 0xBEEF) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Peer ATIM window length changed, peer id:%d, peer recorded atim window:%d new atim window:%d", + args[1], args[2], args[3]); + } + } + break; + + case IBSS_PS_DBGID_RX_CHAINMASK_CHANGE: + if (numargs == 2) { + if (args[1] == 0x1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Voting for low power chainmask from :%d", + args[0]); + } else { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Voting for high power chainmask from :%d", + args[0]); + } + } + break; + + default: + return false; + } + + return true; +} + +static +A_BOOL dbglog_ratectrl_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case RATECTRL_DBGID_ASSOC: + dbglog_printf(timestamp, vap_id, + "RATE: ChainMask %d, phymode %d, ni_flags 0x%08x, vht_mcs_set 0x%04x, ht_mcs_set 0x%04x", + args[0], args[1], args[2], args[3], args[4]); + break; + case RATECTRL_DBGID_NSS_CHANGE: + dbglog_printf(timestamp, vap_id, "RATE: NEW NSS %d\n", args[0]); + break; + case RATECTRL_DBGID_CHAINMASK_ERR: + dbglog_printf(timestamp, vap_id, + "RATE: Chainmask ERR %d %d %d\n", args[0], + args[1], args[2]); + break; + case RATECTRL_DBGID_UNEXPECTED_FRAME: + dbglog_printf(timestamp, vap_id, + "RATE: WARN1: rate %d flags 0x%08x\n", args[0], + args[1]); + break; + case RATECTRL_DBGID_WAL_RCQUERY: + dbglog_printf(timestamp, vap_id, + "ratectrl_dbgid_wal_rcquery [rix1 %d rix2 %d rix3 %d proberix %d ppduflag 0x%x] ", + args[0], args[1], args[2], args[3], args[4]); + break; + case RATECTRL_DBGID_WAL_RCUPDATE: + dbglog_printf(timestamp, vap_id, + "ratectrl_dbgid_wal_rcupdate [numelems %d ppduflag 0x%x] ", + args[0], args[1]); + break; + case RATECTRL_DBGID_GTX_UPDATE: + { + switch (args[0]) { + case 255: + dbglog_printf(timestamp, vap_id, + "GtxInitPwrCfg [bw[last %d|cur %d] rtcode 0x%x tpc %d tpc_init_pwr_cfg %d] ", + args[1] >> 8, args[1] & 0xff, + args[2], args[3], args[4]); + break; + case 254: + dbglog_printf(timestamp, vap_id, + "gtx_cfg_addr [RTMask0@0x%x PERThreshold@0x%x gtxTPCMin@0x%x userGtxMask@0x%x] ", + args[1], args[2], args[3], + args[4]); + break; + default: + dbglog_printf(timestamp, vap_id, + "gtx_update [act %d bw %d rix 0x%x tpc %d per %d lastrssi %d] ", + args[0], args[1], args[2], + args[3], args[4], args[5]); + } + } + break; + } + return true; +} + +static +A_BOOL dbglog_ani_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case ANI_DBGID_ENABLE: + dbglog_printf(timestamp, vap_id, "ANI Enable: %d", args[0]); + break; + case ANI_DBGID_POLL: + dbglog_printf(timestamp, vap_id, + "ANI POLLING: AccumListenTime %d ListenTime %d ofdmphyerr %d cckphyerr %d", + args[0], args[1], args[2], args[3]); + break; + case ANI_DBGID_RESTART: + dbglog_printf(timestamp, vap_id, "ANI Restart"); + break; + case ANI_DBGID_CURRENT_LEVEL: + dbglog_printf(timestamp, vap_id, + "ANI CURRENT LEVEL ofdm level %d cck level %d", + args[0], args[1]); + break; + case ANI_DBGID_OFDM_LEVEL: + dbglog_printf(timestamp, vap_id, + "ANI UPDATE ofdm level %d firstep %d firstep_low %d cycpwr_thr %d self_corr_low %d", + args[0], args[1], args[2], args[3], args[4]); + break; + case ANI_DBGID_CCK_LEVEL: + dbglog_printf(timestamp, vap_id, + "ANI UPDATE cck level %d firstep %d firstep_low %d mrc_cck %d", + args[0], args[1], args[2], args[3]); + break; + case ANI_DBGID_CONTROL: + dbglog_printf(timestamp, vap_id, + "ANI CONTROL ofdmlevel %d ccklevel %d\n", + args[0]); + + break; + case ANI_DBGID_OFDM_PARAMS: + dbglog_printf(timestamp, vap_id, + "ANI ofdm_control firstep %d cycpwr %d\n", + args[0], args[1]); + break; + case ANI_DBGID_CCK_PARAMS: + dbglog_printf(timestamp, vap_id, + "ANI cck_control mrc_cck %d barker_threshold %d\n", + args[0], args[1]); + break; + case ANI_DBGID_RESET: + dbglog_printf(timestamp, vap_id, + "ANI resetting resetflag %d resetCause %8x channel index %d", + args[0], args[1], args[2]); + break; + case ANI_DBGID_SELF_CORR_LOW: + dbglog_printf(timestamp, vap_id, "ANI self_corr_low %d", + args[0]); + break; + case ANI_DBGID_FIRSTEP: + dbglog_printf(timestamp, vap_id, + "ANI firstep %d firstep_low %d", args[0], + args[1]); + break; + case ANI_DBGID_MRC_CCK: + dbglog_printf(timestamp, vap_id, "ANI mrc_cck %d", args[0]); + break; + case ANI_DBGID_CYCPWR: + dbglog_printf(timestamp, vap_id, "ANI cypwr_thresh %d", + args[0]); + break; + case ANI_DBGID_POLL_PERIOD: + dbglog_printf(timestamp, vap_id, + "ANI Configure poll period to %d", args[0]); + break; + case ANI_DBGID_LISTEN_PERIOD: + dbglog_printf(timestamp, vap_id, + "ANI Configure listen period to %d", args[0]); + break; + case ANI_DBGID_OFDM_LEVEL_CFG: + dbglog_printf(timestamp, vap_id, + "ANI Configure ofdm level to %d", args[0]); + break; + case ANI_DBGID_CCK_LEVEL_CFG: + dbglog_printf(timestamp, vap_id, + "ANI Configure cck level to %d", args[0]); + break; + default: + dbglog_printf(timestamp, vap_id, "ANI arg1 %d arg2 %d arg3 %d", + args[0], args[1], args[2]); + break; + } + return true; +} + +static A_BOOL +dbglog_ap_powersave_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case AP_PS_DBGID_UPDATE_TIM: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "AP PS: TIM update AID=%u %s", + args[0], args[1] ? "set" : "clear"); + } + break; + case AP_PS_DBGID_PEER_STATE_CHANGE: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u power save %s", + args[0], + args[1] ? "enabled" : "disabled"); + } + break; + case AP_PS_DBGID_PSPOLL: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u pspoll response tid=%u flags=%x", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_PEER_CREATE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "AP PS: create peer AID=%u", args[0]); + } + break; + case AP_PS_DBGID_PEER_DELETE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "AP PS: delete peer AID=%u", args[0]); + } + break; + case AP_PS_DBGID_VDEV_CREATE: + dbglog_printf(timestamp, vap_id, "AP PS: vdev create"); + break; + case AP_PS_DBGID_VDEV_DELETE: + dbglog_printf(timestamp, vap_id, "AP PS: vdev delete"); + break; + case AP_PS_DBGID_SYNC_TIM: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u advertised=%#x buffered=%#x", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_NEXT_RESPONSE: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u select next response %s%s%s", + args[0], args[1] ? "(usp active) " : "", + args[2] ? "(pending usp) " : "", + args[3] ? "(pending poll response)" : ""); + } + break; + case AP_PS_DBGID_START_SP: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u START SP tsf=%#x (%u)", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_COMPLETED_EOSP: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u EOSP eosp_tsf=%#x trigger_tsf=%#x", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_TRIGGER: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u TRIGGER tsf=%#x %s%s", + args[0], args[1], + args[2] ? "(usp active) " : "", + args[3] ? "(send_n in progress)" : ""); + } + break; + case AP_PS_DBGID_DUPLICATE_TRIGGER: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u DUP TRIGGER tsf=%#x seq=%u ac=%u", + args[0], args[1], args[2], args[3]); + } + break; + case AP_PS_DBGID_UAPSD_RESPONSE: + if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u UAPSD response tid=%u, n_mpdu=%u flags=%#x max_sp=%u current_sp=%u", + args[0], args[1], args[2], args[3], + (args[4] >> 16) & 0xffff, + args[4] & 0xffff); + } + break; + case AP_PS_DBGID_SEND_COMPLETE: + if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u SEND_COMPLETE fc=%#x qos=%#x %s%s", + args[0], args[1], args[2], + args[3] ? "(usp active) " : "", + args[4] ? "(pending poll response)" : ""); + } + break; + case AP_PS_DBGID_SEND_N_COMPLETE: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u SEND_N_COMPLETE %s%s", + args[0], + args[1] ? "(usp active) " : "", + args[2] ? "(pending poll response)" : ""); + } + break; + case AP_PS_DBGID_DETECT_OUT_OF_SYNC_STA: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u detected out-of-sync now=%u tx_waiting=%u txq_depth=%u", + args[0], args[1], args[2], args[3]); + } + break; + case AP_PS_DBGID_DELIVER_CAB: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: CAB %s n_mpdus=%u, flags=%x, extra=%u", + (args[0] == 17) ? "MGMT" : "DATA", + args[1], args[2], args[3]); + } + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_wal_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "ACTIVE", + "WAIT", + "WAIT_FILTER", + "PAUSE", + "PAUSE_SEND_N", + "BLOCK", + }; + + static const char *const events[] = { + "PAUSE", + "PAUSE_FILTER", + "UNPAUSE", + + "BLOCK", + "BLOCK_FILTER", + "UNBLOCK", + + "HWQ_EMPTY", + "ALLOW_N", + }; + +#define WAL_VDEV_TYPE(type) \ + (type == 0 ? "AP" : \ + (type == 1 ? "STA" : \ + (type == 2 ? "IBSS" : \ + (type == 2 ? "MONITOR" : \ + "UNKNOWN")))) + +#define WAL_SLEEP_STATE(state) \ + (state == 1 ? "NETWORK SLEEP" : \ + (state == 2 ? "AWAKE" : \ + (state == 3 ? "SYSTEM SLEEP" : \ + "UNKNOWN"))) + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "TID PAUSE", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case WAL_DBGID_SET_POWER_STATE: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "WAL %s => %s, req_count=%u", + WAL_SLEEP_STATE(args[0]), + WAL_SLEEP_STATE(args[1]), args[2]); + } + break; + case WAL_DBGID_CHANNEL_CHANGE_FORCE_RESET: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "WAL channel change (force reset) freq=%u, flags=%u mode=%u rx_ok=%u tx_ok=%u", + args[0] & 0x0000ffff, + (args[0] & 0xffff0000) >> 16, args[1], + args[2], args[3]); + } + break; + case WAL_DBGID_CHANNEL_CHANGE: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "WAL channel change freq=%u, mode=%u flags=%u rx_ok=1 tx_ok=1", + args[0] & 0x0000ffff, + (args[0] & 0xffff0000) >> 16, args[1]); + } + break; + case WAL_DBGID_VDEV_START: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, "WAL %s vdev started", + WAL_VDEV_TYPE(args[0])); + } + break; + case WAL_DBGID_VDEV_STOP: + dbglog_printf(timestamp, vap_id, "WAL %s vdev stopped", + WAL_VDEV_TYPE(args[0])); + break; + case WAL_DBGID_VDEV_UP: + dbglog_printf(timestamp, vap_id, "WAL %s vdev up, count=%u", + WAL_VDEV_TYPE(args[0]), args[1]); + break; + case WAL_DBGID_VDEV_DOWN: + dbglog_printf(timestamp, vap_id, "WAL %s vdev down, count=%u", + WAL_VDEV_TYPE(args[0]), args[1]); + break; + case WAL_DBGID_TX_MGMT_DESCID_SEQ_TYPE_LEN: + dbglog_printf(timestamp, vap_id, + "WAL Tx Mgmt frame desc_id=0x%x, seq=0x%x, type=0x%x, len=0x%x islocal=0x%x", + args[0], args[1], args[2], + (args[3] & 0xffff0000) >> 16, + args[3] & 0x0000ffff); + break; + case WAL_DBGID_TX_MGMT_COMP_DESCID_STATUS: + dbglog_printf(timestamp, vap_id, + "WAL Tx Mgmt frame completion desc_id=0x%x, status=0x%x, islocal=0x%x", + args[0], args[1], args[2]); + break; + case WAL_DBGID_TX_DATA_MSDUID_SEQ_TYPE_LEN: + dbglog_printf(timestamp, vap_id, + "WAL Tx Data frame msdu_id=0x%x, seq=0x%x, type=0x%x, len=0x%x", + args[0], args[1], args[2], args[3]); + break; + case WAL_DBGID_TX_DATA_COMP_MSDUID_STATUS: + dbglog_printf(timestamp, vap_id, + "WAL Tx Data frame completion desc_id=0x%x, status=0x%x, seq=0x%x", + args[0], args[1], args[2]); + break; + case WAL_DBGID_RESET_PCU_CYCLE_CNT: + dbglog_printf(timestamp, vap_id, + "WAL PCU cycle counter value at reset:%x", + args[0]); + break; + case WAL_DBGID_TX_DISCARD: + dbglog_printf(timestamp, vap_id, + "WAL Tx enqueue discard msdu_id=0x%x", args[0]); + break; + case WAL_DBGID_SET_HW_CHAINMASK: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SET_HW_CHAINMASK pdev=%d, txchain=0x%x, rxchain=0x%x", + args[0], args[1], args[2]); + break; + case WAL_DBGID_SET_HW_CHAINMASK_TXRX_STOP_FAIL: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SET_HW_CHAINMASK_TXRX_STOP_FAIL rxstop=%d, txstop=%d", + args[0], args[1]); + break; + case WAL_DBGID_GET_HW_CHAINMASK: + dbglog_printf(timestamp, vap_id, "WAL_DBGID_GET_HW_CHAINMASK " + "txchain=0x%x, rxchain=0x%x", args[0], args[1]); + break; + case WAL_DBGID_SMPS_DISABLE: + dbglog_printf(timestamp, vap_id, "WAL_DBGID_SMPS_DISABLE"); + break; + case WAL_DBGID_SMPS_ENABLE_HW_CNTRL: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SMPS_ENABLE_HW_CNTRL low_pwr_mask=0x%x, high_pwr_mask=0x%x", + args[0], args[1]); + break; + case WAL_DBGID_SMPS_SWSEL_CHAINMASK: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SMPS_SWSEL_CHAINMASK low_pwr=0x%x, chain_mask=0x%x", + args[0], args[1]); + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_scan_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "IDLE", + "BSSCHAN", + "WAIT_FOREIGN_CHAN", + "FOREIGN_CHANNEL", + "TERMINATING" + }; + + static const char *const events[] = { + "REQ", + "STOP", + "BSSCHAN", + "FOREIGN_CHAN", + "CHECK_ACTIVITY", + "REST_TIME_EXPIRE", + "DWELL_TIME_EXPIRE", + "PROBE_TIME_EXPIRE", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "SCAN", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + return false; + } + + return true; +} + +static +A_BOOL dbglog_coex_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + uint8_t i; + char *dbg_id_str; + + static const char *const wlan_rx_xput_status[] = { + "WLAN_XPUT_NORMAL", + "WLAN_XPUT_UNDER_THRESH", + "WLAN_XPUT_CRITICAL", + "WLAN_XPUT_RECOVERY_TIMEOUT", + }; + + static const char *const coex_sched_req[] = { + "SCHED_REQ_NEXT", + "SCHED_REQ_BT", + "SCHED_REQ_WLAN", + "SCHED_REQ_POSTPAUSE", + "SCHED_REQ_UNPAUSE", + }; + + static const char *const coex_sched_type[] = { + "SCHED_NONE", + "SCHED_WLAN", + "SCHED_BT", + "SCHED_WLAN_PAUSE", + "SCHED_WLAN_POSTPAUSE", + "SCHED_WLAN_UNPAUSE", + "COEX_SCHED_MWS", + }; + + static const char *const coex_trf_mgmt_type[] = { + "TRF_MGMT_FREERUN", + "TRF_MGMT_SHAPE_PM", + "TRF_MGMT_SHAPE_PSP", + "TRF_MGMT_SHAPE_S_CTS", + "TRF_MGMT_SHAPE_OCS", + "TRF_MGMT_SHAPE_FIXED_TIME", + "TRF_MGMT_SHAPE_NOA", + "TRF_MGMT_SHAPE_OCS_CRITICAL", + "TRF_MGMT_NONE", + }; + + static const char *const coex_system_status[] = { + "ALL_OFF", + "BTCOEX_NOT_REQD", + "WLAN_IS_IDLE", + "EXECUTE_SCHEME", + "BT_FULL_CONCURRENCY", + "WLAN_SLEEPING", + "WLAN_IS_PAUSED", + "WAIT_FOR_NEXT_ACTION", + "SOC_WAKE", + }; + + static const char *const wlan_rssi_type[] = { + "LOW_RSSI", + "MID_RSSI", + "HI_RSSI", + "INVALID_RSSI", + }; + + static const char *const coex_bt_scheme[] = { + "IDLE_CTRL", + "ACTIVE_ASYNC_CTRL", + "PASSIVE_SYNC_CTRL", + "ACTIVE_SYNC_CTRL", + "DEFAULT_CTRL", + "CONCURRENCY_CTRL", + }; + + static const char *const wal_peer_rx_rate_stats_event_sent[] = { + "PR_RX_EVT_SENT_NONE", + "PR_RX_EVT_SENT_LOWER", + "PR_RX_EVT_SENT_UPPER", + }; + + static const char *const wlan_psp_stimulus[] = { + "ENTRY", + "EXIT", + "PS_READY", + "PS_NOT_READY", + "RX_MORE_DATA_RCVD", + "RX_NO_MORE_DATA_RCVD", + "TX_DATA_COMPLT", + "TX_COMPLT", + "TIM_SET", + "REQ", + "DONE_SUCCESS", + "DONE_NO_PS_POLL_ACK", + "DONE_RESPONSE_TMO", + "DONE_DROPPED", + "DONE_FILTERED", + "WLAN_START", + "NONWLAN_START", + "NONWLAN_INTVL_UPDATE", + "NULL_TX", + "NULL_TX_COMPLT", + "BMISS_FIRST", + "NULL_TX_FAIL", + "RX_NO_MORE_DATA_DATAFRM", + }; + + static const char *const coex_pspoll_state[] = { + "STATE_DISABLED", + "STATE_NOT_READY", + "STATE_ENABLED", + "STATE_READY", + "STATE_TX_STATUS", + "STATE_RX_STATUS", + }; + + static const char *const coex_scheduler_interval[] = { + "COEX_SCHED_NONWLAN_INT", + "COEX_SCHED_WLAN_INT", + }; + + static const char *const wlan_weight[] = { + "BT_COEX_BASE", + "BT_COEX_LOW", + "BT_COEX_MID", + "BT_COEX_MID_NONSYNC", + "BT_COEX_HI_NONVOICE", + "BT_COEX_HI", + "BT_COEX_CRITICAL", + }; + + static const char *const wlan_power_state[] = { + "SLEEP", + "AWAKE", + "FULL_SLEEP", + }; + + static const char *const coex_psp_error_type[] = { + "DISABLED_STATE", + "VDEV_NULL", + "COEX_PSP_ENTRY", + "ZERO_INTERVAL", + "COEX_PSP_EXIT", + "READY_DISABLED", + "READY_NOT_DISABLED", + "POLL_PKT_DROPPED", + "SET_TIMER_PARAM", + }; + + static const char *const wlan_phymode[] = { + "A", + "G", + "B", + "G_ONLY", + "NA_HT20", + "NG_HT20", + "NA_HT40", + "NG_HT40", + "AC_VHT20", + "AC_VHT40", + "AC_VHT80", + "AC_VHT20_2G", + "AC_VHT40_2G", + "AC_VHT80_2G", + "UNKNOWN", + }; + + static const char *const wlan_curr_band[] = { + "2G", + "5G", + }; + + dbg_id_str = dbglog_get_msg(mod_id, dbg_id); + + switch (dbg_id) { + case COEX_SYSTEM_UPDATE: + if (numargs == 1 && args[0] < 9) { + dbglog_printf(timestamp, vap_id, "%s: %s", dbg_id_str, + coex_system_status[args[0]]); + } else if (numargs >= 5 && args[0] < 9 && args[2] < 9) { + dbglog_printf(timestamp, vap_id, + "%s: %s, WlanSysState(0x%x), %s, NumChains(%u), AggrLimit(%u)", + dbg_id_str, coex_system_status[args[0]], + args[1], coex_trf_mgmt_type[args[2]], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_SCHED_START: + if (numargs >= 5 && args[0] < 5 && args[2] < 9 && args[3] < 4 + && args[4] < 4) { + if (args[1] == 0xffffffff) { + dbglog_printf(timestamp, vap_id, + "%s: %s, DETERMINE_DURATION, %s, %s, %s", + dbg_id_str, + coex_sched_req[args[0]], + coex_trf_mgmt_type[args[2]], + wlan_rx_xput_status[args[3]], + wlan_rssi_type[args[4]]); + } else { + dbglog_printf(timestamp, vap_id, + "%s: %s, IntvlDur(%u), %s, %s, %s", + dbg_id_str, + coex_sched_req[args[0]], args[1], + coex_trf_mgmt_type[args[2]], + wlan_rx_xput_status[args[3]], + wlan_rssi_type[args[4]]); + } + } else { + return false; + } + break; + case COEX_SCHED_RESULT: + if (numargs >= 5 && args[0] < 5 && args[1] < 9 && args[2] < 9) { + dbglog_printf(timestamp, vap_id, + "%s: %s, %s, %s, CoexMgrPolicy(%u), IdleOverride(%u)", + dbg_id_str, coex_sched_req[args[0]], + coex_trf_mgmt_type[args[1]], + coex_trf_mgmt_type[args[2]], args[3], + args[4]); + } else { + return false; + } + break; + case COEX_BT_SCHEME: + if (numargs >= 1 && args[0] < 6) { + dbglog_printf(timestamp, vap_id, "%s: %s", dbg_id_str, + coex_bt_scheme[args[0]]); + } else { + return false; + } + break; + case COEX_TRF_FREERUN: + if (numargs >= 5 && args[0] < 7) { + dbglog_printf(timestamp, vap_id, + "%s: %s, AllocatedBtIntvls(%u), BtIntvlCnt(%u), AllocatedWlanIntvls(%u), WlanIntvlCnt(%u)", + dbg_id_str, coex_sched_type[args[0]], + args[1], args[2], args[3], args[4]); + } else { + return false; + } + break; + case COEX_TRF_SHAPE_PM: /* used by ocs now */ + if (numargs >= 3) { + dbglog_printf(timestamp, vap_id, + "%s: IntvlLength(%u), BtDuration(%u), WlanDuration(%u)", + dbg_id_str, args[0], args[1], args[2]); + } else { + return false; + } + break; + case COEX_SYSTEM_MONITOR: + if (numargs >= 5 && args[1] < 4 && args[4] < 4) { + dbglog_printf(timestamp, vap_id, + "%s: WlanRxCritical(%u), %s, MinDirectRxRate(%u), MonitorActiveNum(%u), %s", + dbg_id_str, args[0], + wlan_rx_xput_status[args[1]], args[2], + args[3], wlan_rssi_type[args[4]]); + } else { + return false; + } + break; + case COEX_RX_RATE: + if (numargs >= 5 && args[4] < 3) { + dbglog_printf(timestamp, vap_id, + "%s: NumUnderThreshPeers(%u), MinDirectRate(%u), LastRateSample(%u), DeltaT(%u), %s", + dbg_id_str, args[0], args[1], args[2], + args[3], + wal_peer_rx_rate_stats_event_sent[args + [4]]); + } else { + return false; + } + break; + case COEX_WLAN_INTERVAL_START: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: WlanIntvlCnt(%u), Duration(%u), Weight(%u), BaseIdleOverride(%u), WeightMat[0](0x%x)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_WLAN_POSTPAUSE_INTERVAL_START: + if (numargs >= 4) { + dbglog_printf(timestamp, vap_id, + "%s: WlanPostPauseIntvlCnt(%u), XputMonitorActiveNum(%u), Duration(%u), Weight(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3]); + } else { + return false; + } + break; + case COEX_BT_INTERVAL_START: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: BtIntvlCnt(%u), Duration(%u), Weight(%u), BaseIdleOverride(%u), WeightMat[0](0x%x), ", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_POWER_CHANGE: + if (numargs >= 3 && args[1] < 3 && args[2] < 3) { + dbglog_printf(timestamp, vap_id, + "%s: Event(0x%x) %s->%s", dbg_id_str, + args[0], wlan_power_state[args[1]], + wlan_power_state[args[2]]); + } else { + return false; + } + break; + case COEX_CHANNEL_CHANGE: + if (numargs >= 5 && args[3] < 2 && args[4] < 15) { + dbglog_printf(timestamp, vap_id, + "%s: %uMhz->%uMhz, WlanSysState(0x%x), CurrBand(%s), PhyMode(%s)", + dbg_id_str, args[0], args[1], args[2], + wlan_curr_band[args[3]], + wlan_phymode[args[4]]); + } else { + return false; + } + break; + case COEX_PSP_MGR_ENTER: + if (numargs >= 5 && args[0] < 23 && + args[1] < 6 && args[3] < 2) { + dbglog_printf(timestamp, vap_id, + "%s: %s, %s, PsPollAvg(%u), %s, CurrT(%u)", + dbg_id_str, wlan_psp_stimulus[args[0]], + coex_pspoll_state[args[1]], args[2], + coex_scheduler_interval[args[3]], + args[4]); + } else { + return false; + } + break; + /* Translate following into decimal */ + case COEX_SINGLECHAIN_DBG_1: + case COEX_SINGLECHAIN_DBG_2: + case COEX_SINGLECHAIN_DBG_3: + case COEX_MULTICHAIN_DBG_1: + case COEX_MULTICHAIN_DBG_2: + case COEX_MULTICHAIN_DBG_3: + case BTCOEX_DBG_MCI_1: + case BTCOEX_DBG_MCI_2: + case BTCOEX_DBG_MCI_3: + case BTCOEX_DBG_MCI_4: + case BTCOEX_DBG_MCI_5: + case BTCOEX_DBG_MCI_6: + case BTCOEX_DBG_MCI_7: + case BTCOEX_DBG_MCI_8: + case BTCOEX_DBG_MCI_9: + case BTCOEX_DBG_MCI_10: + + if (numargs > 0) { + dbglog_printf_no_line_break(timestamp, vap_id, "%s: %u", + dbg_id_str, args[0]); + for (i = 1; i < numargs; i++) + dbglog_printf_no_line_break(timestamp, vap_id, + "%u", args[i]); + dbglog_printf_no_line_break(timestamp, vap_id, "\n"); + } else { + return false; + } + break; + case COEX_LinkID: + if (numargs >= 4) { + if (args[0]) { /* Add profile */ + dbglog_printf(timestamp, vap_id, + "%s Alloc: LocalID(%u), RemoteID(%u), MinFreeLocalID(%u)", + dbg_id_str, args[1], args[2], + args[3]); + } else { /* Remove profile */ + dbglog_printf(timestamp, vap_id, + "%s Dealloc: LocalID(%u), RemoteID(%u), MinFreeLocalID(%u)", + dbg_id_str, args[1], args[2], + args[3]); + } + } else { + return false; + } + break; + case COEX_PSP_MGR_RESULT: + if (numargs >= 5 && args[0] < 6) { + dbglog_printf(timestamp, vap_id, + "%s: %s, PsPollAvg(%u), EstimationOverrun(%u), EstimationUnderun(%u), NotReadyErr(%u)", + dbg_id_str, coex_pspoll_state[args[0]], + args[1], args[2], args[3], args[4]); + } else { + return false; + } + break; + case COEX_TRF_SHAPE_PSP: + if (numargs >= 5 && args[0] < 7 && args[1] < 7) { + dbglog_printf(timestamp, vap_id, + "%s: %s, %s, Dur(%u), BtTriggerRecvd(%u), PspWlanCritical(%u)", + dbg_id_str, coex_sched_type[args[0]], + wlan_weight[args[1]], args[2], args[3], + args[4]); + } else { + return false; + } + break; + case COEX_PSP_SPEC_POLL: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: PsPollSpecEna(%u), Count(%u), NextTS(%u), AllowSpecPsPollTx(%u), Intvl(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_READY_STATE: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: T2NonWlan(%u), CoexSchedulerEndTS(%u), MoreData(%u), PSPRespExpectedTS(%u), NonWlanIdleT(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_NONWLAN_INTERVAL: + if (numargs >= 4) { + dbglog_printf(timestamp, vap_id, + "%s: NonWlanBaseIntvl(%u), NonWlanIdleT(%u), PSPSpecIntvl(%u), ApRespTimeout(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3]); + } else { + return false; + } + break; + case COEX_PSP_ERROR: + if (numargs >= 1 && args[0] < 9) { + dbglog_printf_no_line_break(timestamp, vap_id, "%s: %s", + dbg_id_str, + coex_psp_error_type[args + [0]]); + for (i = 1; i < numargs; i++) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (", %u", args[i])); + } + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("\n")); + } else { + return false; + } + break; + case COEX_PSP_STAT_1: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: ApResp0(%u), ApResp1(%u), ApResp2(%u), ApResp3(%u), ApResp4(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_STAT_2: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: DataPt(%u), Max(%u), NextApRespIndex(%u), NumOfValidDataPts(%u), PsPollAvg(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_RX_STATUS_STATE_1: + if (numargs >= 5) { + if (args[2]) { + dbglog_printf(timestamp, vap_id, + "%s: RsExpectedTS(%u), RespActualTS(%u), Overrun, RsOverrunT(%u), RsRxDur(%u)", + dbg_id_str, args[0], args[1], + args[3], args[4]); + } else { + dbglog_printf(timestamp, vap_id, + "%s: RsExpectedTS(%u), RespActualTS(%u), Underrun, RsUnderrunT(%u), RsRxDur(%u)", + dbg_id_str, args[0], args[1], + args[3], args[4]); + } + } else { + return false; + } + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_beacon_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "INIT", + "ADJUST_START", + "ADJUSTING", + "ADJUST_HOLD", + }; + + static const char *const events[] = { + "ADJUST_START", + "ADJUST_RESTART", + "ADJUST_STOP", + "ADJUST_PAUSE", + "ADJUST_UNPAUSE", + "ADJUST_INC_SLOP_STEP", + "ADJUST_HOLD", + "ADJUST_HOLD_TIME_OUT", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "EARLY_RX", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case BEACON_EVENT_EARLY_RX_BMISS_STATUS: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "early_rx bmiss status:rcv=%d total=%d miss=%d", + args[0], args[1], args[2]); + } + break; + case BEACON_EVENT_EARLY_RX_SLEEP_SLOP: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx update sleep_slop:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_CONT_BMISS_TIMEOUT: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx cont bmiss timeout,update sleep_slop:%d", + args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_PAUSE_SKIP_BCN_NUM: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx skip bcn num:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_CLK_DRIFT: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx clk drift:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_AP_DRIFT: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx ap drift:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_BCN_TYPE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx bcn type:%d", args[0]); + } + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_data_txrx_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case DATA_TXRX_DBGID_RX_DATA_SEQ_LEN_INFO: + dbglog_printf(timestamp, vap_id, + "DATA RX seq=0x%x, len=0x%x, stored=0x%x, duperr=0x%x", + args[0], args[1], (args[2] & 0xffff0000) >> 16, + args[2] & 0x0000ffff); + break; + default: + return false; + } + + return true; +} + +static +A_BOOL dbglog_smps_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "S_INACTIVE", + "S_STATIC", + "S_DYNAMIC", + "S_STALLED", + "S_INACTIVE_WAIT", + "S_STATIC_WAIT", + "S_DYNAMIC_WAIT", + }; + + static const char *const events[] = { + "E_STOP", + "E_STOP_COMPL", + "E_START", + "E_STATIC", + "E_STATIC_COMPL", + "E_DYNAMIC", + "E_DYNAMIC_COMPL", + "E_STALL", + "E_RSSI_ABOVE_THRESH", + "E_RSSI_BELOW_THRESH", + "E_FORCED_NONE", + }; + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "STA_SMPS SM", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case STA_SMPS_DBGID_CREATE_PDEV_INSTANCE: + dbglog_printf(timestamp, vap_id, "STA_SMPS Create PDEV ctx %#x", + args[0]); + break; + case STA_SMPS_DBGID_CREATE_VIRTUAL_CHAN_INSTANCE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS Create Virtual Chan ctx %#x", args[0]); + break; + case STA_SMPS_DBGID_DELETE_VIRTUAL_CHAN_INSTANCE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS Delete Virtual Chan ctx %#x", args[0]); + break; + case STA_SMPS_DBGID_CREATE_STA_INSTANCE: + dbglog_printf(timestamp, vap_id, "STA_SMPS Create STA ctx %#x", + args[0]); + break; + case STA_SMPS_DBGID_DELETE_STA_INSTANCE: + dbglog_printf(timestamp, vap_id, "STA_SMPS Delete STA ctx %#x", + args[0]); + break; + case STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_START: + break; + case STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_STOP: + break; + case STA_SMPS_DBGID_SEND_SMPS_ACTION_FRAME: + dbglog_printf(timestamp, vap_id, + "STA_SMPS STA %#x Signal SMPS mode as %s; cb_flags %#x", + args[0], + (args[1] == + 0 ? "DISABLED" : (args[1] == + 0x1 ? "STATIC" : (args[1] == + 0x3 ? + "DYNAMIC" : + "UNKNOWN"))), + args[2]); + break; + case STA_SMPS_DBGID_DTIM_EBT_EVENT_CHMASK_UPDATE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_EBT_EVENT_CHMASK_UPDATE"); + break; + case STA_SMPS_DBGID_DTIM_CHMASK_UPDATE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE tx_mask %#x rx_mask %#x arb_dtim_mask %#x", + args[0], args[1], args[2]); + break; + case STA_SMPS_DBGID_DTIM_BEACON_EVENT_CHMASK_UPDATE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_BEACON_EVENT_CHMASK_UPDATE"); + break; + case STA_SMPS_DBGID_DTIM_POWER_STATE_CHANGE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_POWER_STATE_CHANGE cur_pwr_state %s new_pwr_state %s", + (args[0] == + 0x1 ? "SLEEP" : (args[0] == + 0x2 ? "AWAKE" : (args[0] == + 0x3 ? + "FULL_SLEEP" : + "UNKNOWN"))), + (args[1] == + 0x1 ? "SLEEP" : (args[1] == + 0x2 ? "AWAKE" : (args[1] == + 0x3 ? + "FULL_SLEEP" : + "UNKNOWN")))); + break; + case STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_SLEEP: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_SLEEP tx_mask %#x rx_mask %#x orig_rx %#x dtim_rx %#x", + args[0], args[1], args[2], args[3]); + break; + case STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_AWAKE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_AWAKE tx_mask %#x rx_mask %#x orig_rx %#x", + args[0], args[1], args[2]); + break; + default: + dbglog_printf(timestamp, vap_id, "STA_SMPS: UNKNOWN DBGID!"); + return false; + } + + return true; +} + +static A_BOOL +dbglog_p2p_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "ACTIVE", + "DOZE", + "TX_BCN", + "CTWIN", + "OPPPS", + }; + + static const char *const events[] = { + "ONESHOT_NOA", + "CTWINDOW", + "PERIODIC_NOA", + "IDLE", + "NOA_CHANGED", + "TBTT", + "TX_BCN_CMP", + "OPPPS_OK", + "OPPPS_CHANGED", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "P2P GO PS", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_pcielp_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "STOP", + "TX", + "RX", + "SLEEP", + "SUSPEND", + }; + + static const char *const events[] = { + "VDEV_UP", + "ALL_VDEV_DOWN", + "AWAKE", + "SLEEP", + "TX_ACTIVITY", + "TX_INACTIVITY", + "TX_AC_CHANGE", + "SUSPEND", + "RESUME", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "PCIELP", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + return false; + } + + return true; +} + +#ifdef WLAN_OPEN_SOURCE +static int dbglog_block_open(struct inode *inode, struct file *file) +{ + struct fwdebug *fwlog = inode->i_private; + + if (fwlog->fwlog_open) + return -EBUSY; + + fwlog->fwlog_open = true; + + file->private_data = inode->i_private; + return 0; +} + +static int dbglog_block_release(struct inode *inode, struct file *file) +{ + struct fwdebug *fwlog = inode->i_private; + + fwlog->fwlog_open = false; + + return 0; +} + +static ssize_t dbglog_block_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fwdebug *fwlog = file->private_data; + struct sk_buff *skb; + ssize_t ret_cnt; + size_t len = 0, not_copied; + char *buf; + int ret; + + buf = vzalloc(count); + if (!buf) + return -ENOMEM; + + spin_lock_bh(&fwlog->fwlog_queue.lock); + + if (skb_queue_len(&fwlog->fwlog_queue) == 0) { + /* we must init under queue lock */ + init_completion(&fwlog->fwlog_completion); + + spin_unlock_bh(&fwlog->fwlog_queue.lock); + + ret = + wait_for_completion_interruptible(&fwlog->fwlog_completion); + if (ret == -ERESTARTSYS) { + vfree(buf); + return ret; + } + + spin_lock_bh(&fwlog->fwlog_queue.lock); + } + + while ((skb = __skb_dequeue(&fwlog->fwlog_queue))) { + if (skb->len > count - len) { + /* not enough space, put skb back and leave */ + __skb_queue_head(&fwlog->fwlog_queue, skb); + break; + } + + memcpy(buf + len, skb->data, skb->len); + len += skb->len; + + kfree_skb(skb); + } + + spin_unlock_bh(&fwlog->fwlog_queue.lock); + + /* FIXME: what to do if len == 0? */ + not_copied = copy_to_user(user_buf, buf, len); + if (not_copied != 0) { + ret_cnt = -EFAULT; + goto out; + } + + *ppos = *ppos + len; + + ret_cnt = len; + +out: + vfree(buf); + + return ret_cnt; +} + +static const struct file_operations fops_dbglog_block = { + .open = dbglog_block_open, + .release = dbglog_block_release, + .read = dbglog_block_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#ifdef WLAN_DEBUGFS + +static void dbglog_debugfs_init(wmi_unified_t wmi_handle) +{ + + wmi_handle->debugfs_phy = debugfs_create_dir(CLD_DEBUGFS_DIR, NULL); + if (!wmi_handle->debugfs_phy) { + qdf_print("Failed to create WMI debug fs"); + return; + } + + debugfs_create_file(DEBUGFS_BLOCK_NAME, 0400, + wmi_handle->debugfs_phy, &wmi_handle->dbglog, + &fops_dbglog_block); + + return; +} + +static void dbglog_debugfs_remove(wmi_unified_t wmi_handle) +{ + debugfs_remove_recursive(wmi_handle->debugfs_phy); +} + +#else + +static void dbglog_debugfs_init(wmi_unified_t wmi_handle) +{ +} + +static void dbglog_debugfs_remove(wmi_unified_t wmi_handle) +{ +} + +#endif /* End of WLAN_DEBUGFS */ + +#endif /* WLAN_OPEN_SOURCE */ + +/** + * cnss_diag_handle_crash_inject() - API to handle crash inject command + * @slot: pointer to struct dbglog_slot + * + * API to handle CNSS diag crash inject command + * + * Return: None + */ +static void cnss_diag_handle_crash_inject(struct dbglog_slot *slot) +{ + switch (slot->diag_type) { + case DIAG_TYPE_CRASH_INJECT: + if (slot->length != 2) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("crash_inject cmd error\n")); + return; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s : DIAG_TYPE_CRASH_INJECT: %d %d\n", + __func__, slot->payload[0], + slot->payload[1])); + if (!tgt_assert_enable) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s: tgt Assert Disabled\n", + __func__)); + return; + } + wma_cli_set2_command(0, (int)GEN_PARAM_CRASH_INJECT, + slot->payload[0], + slot->payload[1], GEN_CMD); + break; + default: + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown cmd[%d] error\n", + slot->diag_type)); + break; + } +} + +#ifdef CNSS_GENL +/** + * cnss_diag_cmd_handler() - API to handle CNSS diag command + * @data: Data received + * @data_len: length of the data received + * @ctx: Pointer to stored context + * @pid: Process ID + * + * API to handle CNSS diag commands from user space + * + * Return: None + */ +static void cnss_diag_cmd_handler(const void *data, int data_len, + void *ctx, int pid) +{ + struct dbglog_slot *slot = NULL; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; + int len; + + /* + * audit note: it is ok to pass a NULL policy here since a + * length check on the data is added later already + */ + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, + data, data_len, NULL)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: nla parse fails\n", + __func__)); + return; + } + + if (!tb[CLD80211_ATTR_DATA]) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: attr VENDOR_DATA fails\n", + __func__)); + return; + } + + len = nla_len(tb[CLD80211_ATTR_DATA]); + if (len < sizeof(struct dbglog_slot)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: attr length less than sizeof(struct dbglog_slot)\n", + __func__)); + return; + } + + slot = (struct dbglog_slot *)nla_data(tb[CLD80211_ATTR_DATA]); + if (len != (sizeof(struct dbglog_slot) + (uint64_t) slot->length)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: attr length check fails\n", + __func__)); + return; + } + + cnss_diag_handle_crash_inject(slot); + return; +} + +int cnss_diag_activate_service(void) +{ + register_cld_cmd_cb(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_cmd_handler, NULL); + return 0; +} + +int cnss_diag_deactivate_service(void) +{ + deregister_cld_cmd_cb(WLAN_NL_MSG_CNSS_DIAG); + return 0; +} + +#else + +/** + * brief cnss_diag_msg_callback() - Call back invoked by netlink service + * + * This function gets invoked by netlink service when a message is recevied + * from the cnss-diag application in user-space. + * + * param - + * - skb - skb with netlink message + * + * return - 0 for success, non zero for failure + */ +static int cnss_diag_msg_callback(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + uint8_t *msg; + + nlh = (struct nlmsghdr *)skb->data; + if (!nlh) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Netlink header null\n", __func__)); + return A_ERROR; + } + + msg = NLMSG_DATA(nlh); + cnss_diag_handle_crash_inject((struct dbglog_slot *)msg); + + return 0; +} + +int cnss_diag_activate_service(void) +{ + int ret; + + /* Register the msg handler for msgs addressed to WLAN_NL_MSG_OEM */ + ret = nl_srv_register(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_msg_callback); + if (ret) + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("CNSS-DIAG Registration failed")); + + return ret; +} + +int cnss_diag_deactivate_service(void) +{ + int ret; + + /* + * Deregister the msg handler for msgs addressed to + * WLAN_NL_MSG_CNSS_DIAG + */ + ret = nl_srv_unregister(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_msg_callback); + if (ret) + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("CNSS-DIAG Registration failed")); + + return ret; +} +#endif + +static A_BOOL +dbglog_wow_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + + switch (dbg_id) { + case WOW_NS_OFLD_ENABLE: + if (4 == numargs) { + dbglog_printf(timestamp, vap_id, + "Enable NS offload, for sender %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", + *(uint8_t *) &args[0], + *((uint8_t *) &args[0] + 1), + *((uint8_t *) &args[0] + 2), + *((uint8_t *) &args[0] + 3), + *(uint8_t *) &args[1], + *((uint8_t *) &args[1] + 1), + *((uint8_t *) &args[1] + 2), + *((uint8_t *) &args[1] + 3), + *(uint8_t *) &args[2], + *((uint8_t *) &args[2] + 1), + *((uint8_t *) &args[2] + 2), + *((uint8_t *) &args[2] + 3), + *(uint8_t *) &args[3], + *((uint8_t *) &args[3] + 1), + *((uint8_t *) &args[3] + 2), + *((uint8_t *) &args[3] + 3)); + } else { + return false; + } + break; + case WOW_ARP_OFLD_ENABLE: + if (1 == numargs) { + dbglog_printf(timestamp, vap_id, + "Enable ARP offload, for sender %d.%d.%d.%d", + *(uint8_t *) args, + *((uint8_t *) args + 1), + *((uint8_t *) args + 2), + *((uint8_t *) args + 3)); + } else { + return false; + } + break; + case WOW_NS_ARP_OFLD_DISABLE: + if (0 == numargs) { + dbglog_printf(timestamp, vap_id, + "disable NS/ARP offload"); + } else { + return false; + } + break; + case WOW_NS_RECEIVED: + if (4 == numargs) { + dbglog_printf(timestamp, vap_id, + "NS requested from %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", + *(uint8_t *) &args[0], + *((uint8_t *) &args[0] + 1), + *((uint8_t *) &args[0] + 2), + *((uint8_t *) &args[0] + 3), + *(uint8_t *) &args[1], + *((uint8_t *) &args[1] + 1), + *((uint8_t *) &args[1] + 2), + *((uint8_t *) &args[1] + 3), + *(uint8_t *) &args[2], + *((uint8_t *) &args[2] + 1), + *((uint8_t *) &args[2] + 2), + *((uint8_t *) &args[2] + 3), + *(uint8_t *) &args[3], + *((uint8_t *) &args[3] + 1), + *((uint8_t *) &args[3] + 2), + *((uint8_t *) &args[3] + 3)); + } else { + return false; + } + break; + case WOW_NS_REPLIED: + if (4 == numargs) { + dbglog_printf(timestamp, vap_id, + "NS replied to %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", + *(uint8_t *) &args[0], + *((uint8_t *) &args[0] + 1), + *((uint8_t *) &args[0] + 2), + *((uint8_t *) &args[0] + 3), + *(uint8_t *) &args[1], + *((uint8_t *) &args[1] + 1), + *((uint8_t *) &args[1] + 2), + *((uint8_t *) &args[1] + 3), + *(uint8_t *) &args[2], + *((uint8_t *) &args[2] + 1), + *((uint8_t *) &args[2] + 2), + *((uint8_t *) &args[2] + 3), + *(uint8_t *) &args[3], + *((uint8_t *) &args[3] + 1), + *((uint8_t *) &args[3] + 2), + *((uint8_t *) &args[3] + 3)); + } else { + return false; + } + break; + case WOW_ARP_RECEIVED: + if (1 == numargs) { + dbglog_printf(timestamp, vap_id, + "ARP requested from %d.%d.%d.%d", + *(uint8_t *) args, + *((uint8_t *) args + 1), + *((uint8_t *) args + 2), + *((uint8_t *) args + 3)); + } else { + return false; + } + break; + break; + case WOW_ARP_REPLIED: + if (1 == numargs) { + dbglog_printf(timestamp, vap_id, + "ARP replied to %d.%d.%d.%d", + *(uint8_t *) args, + *((uint8_t *) args + 1), + *((uint8_t *) args + 2), + *((uint8_t *) args + 3)); + } else { + return false; + } + break; + default: + return false; + } + + return true; +} + +int dbglog_parser_type_init(wmi_unified_t wmi_handle, int type) +{ + if (type >= DBGLOG_PROCESS_MAX) + return A_ERROR; + + dbglog_process_type = type; + gprint_limiter = false; + + return A_OK; +} + +int dbglog_init(wmi_unified_t wmi_handle) +{ + int res = 0; + + OS_MEMSET(mod_print, 0, sizeof(mod_print)); + + dbglog_reg_modprint(WLAN_MODULE_STA_PWRSAVE, + dbglog_sta_powersave_print_handler); + dbglog_reg_modprint(WLAN_MODULE_AP_PWRSAVE, + dbglog_ap_powersave_print_handler); + dbglog_reg_modprint(WLAN_MODULE_WAL, dbglog_wal_print_handler); + dbglog_reg_modprint(WLAN_MODULE_SCAN, dbglog_scan_print_handler); + dbglog_reg_modprint(WLAN_MODULE_RATECTRL, + dbglog_ratectrl_print_handler); + dbglog_reg_modprint(WLAN_MODULE_ANI, dbglog_ani_print_handler); + dbglog_reg_modprint(WLAN_MODULE_COEX, dbglog_coex_print_handler); + dbglog_reg_modprint(WLAN_MODULE_BEACON, dbglog_beacon_print_handler); + dbglog_reg_modprint(WLAN_MODULE_WOW, dbglog_wow_print_handler); + dbglog_reg_modprint(WLAN_MODULE_DATA_TXRX, + dbglog_data_txrx_print_handler); + dbglog_reg_modprint(WLAN_MODULE_STA_SMPS, dbglog_smps_print_handler); + dbglog_reg_modprint(WLAN_MODULE_P2P, dbglog_p2p_print_handler); + dbglog_reg_modprint(WLAN_MODULE_PCIELP, dbglog_pcielp_print_handler); + dbglog_reg_modprint(WLAN_MODULE_IBSS_PWRSAVE, + dbglog_ibss_powersave_print_handler); + tgt_assert_enable = wmi_handle->tgt_force_assert_enable; + + /* Register handler for F3 or debug messages */ + res = + wmi_unified_register_event_handler(wmi_handle, + wmi_dbg_msg_event_id, + dbglog_parse_debug_logs, + WMA_RX_WORK_CTX); + if (res != 0) + return res; + + /* Register handler for FW diag events */ + res = wmi_unified_register_event_handler(wmi_handle, + wmi_diag_container_event_id, + fw_diag_data_event_handler, + WMA_RX_WORK_CTX); + if (res != 0) + return res; + + /* Register handler for new FW diag Event, LOG, MSG combined */ + res = wmi_unified_register_event_handler(wmi_handle, wmi_diag_event_id, + diag_fw_handler, + WMA_RX_WORK_CTX); + if (res != 0) + return res; + +#ifdef WLAN_OPEN_SOURCE + /* Initialize the fw debug log queue */ + skb_queue_head_init(&wmi_handle->dbglog.fwlog_queue); + init_completion(&wmi_handle->dbglog.fwlog_completion); + + /* Initialize debugfs */ + dbglog_debugfs_init(wmi_handle); +#endif /* WLAN_OPEN_SOURCE */ + + return res; +} + +int dbglog_deinit(wmi_unified_t wmi_handle) +{ + int res = 0; + +#ifdef WLAN_OPEN_SOURCE + /* DeInitialize the fw debug log queue */ + skb_queue_purge(&wmi_handle->dbglog.fwlog_queue); + complete(&wmi_handle->dbglog.fwlog_completion); + + /* Deinitialize the debugfs */ + dbglog_debugfs_remove(wmi_handle); +#endif /* WLAN_OPEN_SOURCE */ + tgt_assert_enable = 0; + res = + wmi_unified_unregister_event_handler(wmi_handle, + wmi_dbg_msg_event_id); + if (res != 0) + return res; + + return res; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.h b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.h new file mode 100644 index 0000000000000000000000000000000000000000..93b21efea03a7a42c01d51dcf02a14d24ced32f4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2011, 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DBGLOG_HOST_H_ +#define _DBGLOG_HOST_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dbglog_common.h" +#include "wmi_unified_param.h" + +#define DIAG_FWID_OFFSET 24 +#define DIAG_FWID_MASK 0xFF000000 /* Bit 24-31 */ + +#define DIAG_TIMESTAMP_OFFSET 0 +#define DIAG_TIMESTAMP_MASK 0x00FFFFFF /* Bit 0-23 */ + +#define DIAG_ID_OFFSET 16 +#define DIAG_ID_MASK 0xFFFF0000 /* Bit 16-31 */ + +#define DIAG_VDEVID_OFFSET 11 +#define DIAG_VDEVID_MASK 0x0000F800 /* Bit 11-15 */ +#define DIAG_VDEVID_NUM_MAX 16 + +#define DIAG_VDEVLEVEL_OFFSET 8 +#define DIAG_VDEVLEVEL_MASK 0x00000700 /* Bit 8-10 */ + +#define DIAG_PAYLEN_OFFSET 0 +#define DIAG_PAYLEN_MASK 0x000000FF /* Bit 0-7 */ + +#define DIAG_PAYLEN_OFFSET16 0 +#define DIAG_PAYLEN_MASK16 0x0000FFFF /* Bit 0-16 */ + +#define DIAG_GET_TYPE(arg) \ + ((arg & DIAG_FWID_MASK) >> DIAG_FWID_OFFSET) + +#define DIAG_GET_TIME_STAMP(arg) \ + ((arg & DIAG_TIMESTAMP_MASK) >> DIAG_TIMESTAMP_OFFSET) + +#define DIAG_GET_ID(arg) \ + ((arg & DIAG_ID_MASK) >> DIAG_ID_OFFSET) + +#define DIAG_GET_VDEVID(arg) \ + ((arg & DIAG_VDEVID_MASK) >> DIAG_VDEVID_OFFSET) + +#define DIAG_GET_VDEVLEVEL(arg) \ + ((arg & DIAG_VDEVLEVEL_MASK) >> DIAG_VDEVLEVEL_OFFSET) + +#define DIAG_GET_PAYLEN(arg) \ + ((arg & DIAG_PAYLEN_MASK) >> DIAG_PAYLEN_OFFSET) + +#define DIAG_GET_PAYLEN16(arg) \ + ((arg & DIAG_PAYLEN_MASK16) >> DIAG_PAYLEN_OFFSET16) + +#ifdef FEATURE_FW_LOG_PARSING +/* + * set the dbglog parser type + */int +dbglog_parser_type_init(wmi_unified_t wmi_handle, int type); + +/** dbglog_int - Registers a WMI event handle for WMI_DBGMSG_EVENT + * @brief wmi_handle - handle to wmi module + */ +int +dbglog_init(wmi_unified_t wmi_handle); + +/** dbglog_deinit - UnRegisters a WMI event handle for WMI_DBGMSG_EVENT + * @brief wmi_handle - handle to wmi module + */ +int +dbglog_deinit(wmi_unified_t wmi_handle); + +/** set the size of the report size + * @brief wmi_handle - handle to Wmi module + * @brief size - Report size + */ +int +dbglog_set_report_size(wmi_unified_t wmi_handle, uint16_t size); + +/** Set the resolution for time stamp + * @brief wmi_handle - handle to Wmi module + * @ brief tsr - time stamp resolution + */ +int +dbglog_set_timestamp_resolution(wmi_unified_t wmi_handle, + uint16_t tsr); + +/** Enable reporting. If it is set to false then Target wont deliver + * any debug information + */ +int +dbglog_report_enable(wmi_unified_t wmi_handle, A_BOOL isenable); + +/** Set the log level + * @brief DBGLOG_INFO - Information lowest log level + * @brief DBGLOG_WARNING + * @brief DBGLOG_ERROR - default log level + */ +int +dbglog_set_log_lvl(wmi_unified_t wmi_handle, DBGLOG_LOG_LVL log_lvl); + +/* + * set the debug log level for a given module + * mod_id_lvl : the format is more user friendly. + * module_id = mod_id_lvl/10; + * log_level = mod_id_lvl%10; + * example : mod_id_lvl is 153. then module id is 15 and log level is 3. + * this format allows user to pass a sinlge value + * (which is the most convenient way for most of the OSs) + * to be passed from user to the driver. + */ +int +dbglog_set_mod_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_id_lvl); + +/* + * set the debug log level for wow module + * mod_id_lvl : the format is more user friendly. + * module_id = mod_id_lvl/10; + * log_level = mod_id_lvl%10; + * example : mod_id_lvl is 153. then module id is 15 and log level is 3. + * this format allows user to pass a sinlge value + * (which is the most convenient way for most of the OSs) + * to be passed from user to the driver. + */ +int +dbglog_set_mod_wow_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_id_lvl); + +/** Enable/Disable the logging for VAP */ +int +dbglog_vap_log_enable(wmi_unified_t wmi_handle, uint16_t vap_id, + A_BOOL isenable); +/** Enable/Disable logging for Module */ +int +dbglog_module_log_enable(wmi_unified_t wmi_handle, uint32_t mod_id, + A_BOOL isenable); + +/** set vap enablie bitmap */ +void +dbglog_set_vap_enable_bitmap(wmi_unified_t wmi_handle, + uint32_t vap_enable_bitmap); + +/** set log level for all the modules specified in the bitmap. + * for all other modules with 0 in the bitmap (or) outside the bitmap, + * the log level be reset to DBGLOG_ERR. + */ +void +dbglog_set_mod_enable_bitmap(wmi_unified_t wmi_handle, + uint32_t log_level, + uint32_t *mod_enable_bitmap, + uint32_t bitmap_len); + +int +dbglog_parse_debug_logs(ol_scn_t scn, u_int8_t *datap, + u_int32_t len); + +/** + * cnss_diag_activate_service() - API to register CNSS diag cmd handler + * + * API to register the handler for the NL message received from cnss_diag + * application. + * + * Return: 0 + */ +int cnss_diag_activate_service(void); + +/** + * cnss_diag_deactivate_service() - API to deregister CNSS diag cmd handler + * + * API to deregister the handler for the NL message received from cnss_diag + * application. + * + * Return: 0 + */ +int cnss_diag_deactivate_service(void); + +#else +static inline int +dbglog_parser_type_init(wmi_unified_t wmi_handle, int type) +{ + return A_OK; +} + +static inline int +dbglog_init(wmi_unified_t wmi_handle) +{ + return A_OK; +} + +static inline int +dbglog_deinit(wmi_unified_t wmi_handle) +{ + return A_OK; +} + +static inline int +dbglog_report_enable(wmi_unified_t wmi_handle, A_BOOL isenable) +{ + return A_OK; +} + +static inline int +dbglog_set_log_lvl(wmi_unified_t wmi_handle, DBGLOG_LOG_LVL log_lvl) +{ + return A_OK; +} + +static inline int cnss_diag_activate_service(void) +{ + return A_OK; +} + +static inline int cnss_diag_deactivate_service(void) +{ + return A_OK; +} + +static inline int +dbglog_module_log_enable(wmi_unified_t wmi_handle, uint32_t mod_id, + A_BOOL isenable) +{ + return A_OK; +} + +static inline int +dbglog_vap_log_enable(wmi_unified_t wmi_handle, uint16_t vap_id, + A_BOOL isenable) +{ + return A_OK; +} + +static inline int +dbglog_set_mod_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_id_lvl) +{ + return A_OK; +} + +static inline int +dbglog_set_mod_wow_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_id_lvl) +{ + return A_OK; +} +#endif /* FEATURE_FW_LOG_PARSING */ + +#ifdef __cplusplus +} +#endif + +#endif /* _DBGLOG_HOST_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/fw_dbglog_api.c b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/fw_dbglog_api.c new file mode 100644 index 0000000000000000000000000000000000000000..2f22ad9fd77a6d3cd510026763b90cecc4f3015f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/fw_dbglog_api.c @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "fw_dbglog_api.h" +#include "fw_dbglog_priv.h" + +static inline struct dbglog_info *handle2info( + struct common_dbglog_handle *dbg_handle) +{ + return (struct dbglog_info *)dbg_handle; +} + +void fwdbg_set_log_lvl(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint32_t log_lvl) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_set_log_lvl) + dbg_info->ops->dbglog_set_log_lvl(scn, log_lvl); + +} + +int fwdbg_fw_handler(struct common_dbglog_handle *dbg_handle, ol_scn_t soc, + uint8_t *data, uint32_t datalen) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_fw_handler) + return dbg_info->ops->dbglog_fw_handler(soc, data, datalen); + + return 0; +} + +int fwdbg_parse_debug_logs(struct common_dbglog_handle *dbg_handle, + ol_scn_t soc, uint8_t *datap, + uint16_t len, void *context) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_parse_debug_logs) + return dbg_info->ops->dbglog_parse_debug_logs(soc, + datap, len, context); + + return 0; +} +qdf_export_symbol(fwdbg_parse_debug_logs); + +void fwdbg_ratelimit_set(struct common_dbglog_handle *dbg_handle, + uint32_t burst_limit) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_ratelimit_set) + dbg_info->ops->dbglog_ratelimit_set(burst_limit); + +} + +void fwdbg_vap_log_enable(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint16_t vap_id, bool isenable) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_vap_log_enable) + dbg_info->ops->dbglog_vap_log_enable(scn, vap_id, + isenable); + +} + +void fwdbg_set_timestamp_resolution(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t tsr) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_set_timestamp_resolution) + dbg_info->ops->dbglog_set_timestamp_resolution(scn, tsr); + +} + +void fwdbg_reporting_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, bool isenable) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_reporting_enable) + dbg_info->ops->dbglog_reporting_enable(scn, isenable); + +} + +void fwdbg_module_log_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint32_t mod_id, bool isenable) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_module_log_enable) + dbg_info->ops->dbglog_module_log_enable(scn, mod_id, + isenable); + +} + +void fwdbg_init(struct common_dbglog_handle *dbg_handle, void *soc) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_init) + dbg_info->ops->dbglog_init(soc); + +} + +void fwdbg_free(struct common_dbglog_handle *dbg_handle, void *soc) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_free) + dbg_info->ops->dbglog_free(soc); + +} + +void fwdbg_set_report_size(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t size) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_set_report_size) + dbg_info->ops->dbglog_set_report_size(scn, size); + +} + +int fwdbg_smartlog_init(struct common_dbglog_handle *dbg_handle, void *icp) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->smartlog_init) + return dbg_info->ops->smartlog_init(icp); + + return 0; +} + +void fwdbg_smartlog_deinit(struct common_dbglog_handle *dbg_handle, void *sc) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->smartlog_deinit) + dbg_info->ops->smartlog_deinit(sc); +} + +ssize_t fwdbg_smartlog_dump(struct common_dbglog_handle *dbg_handle, + struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->smartlog_dump) + return dbg_info->ops->smartlog_dump(dev, attr, buf); + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_api.h b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_api.h new file mode 100644 index 0000000000000000000000000000000000000000..6499e14c4452e111a0533b319e2d8504cfd5ce93 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_api.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _FW_DBGLOG_API_H_ +#define _FW_DBGLOG_API_H_ + +#include "target_if.h" + +/** + * fwdbg_set_log_lvl() - API to set debug log level + * @dbg_handle: Debug module handle + * @scn: scn handle + * @log_lvl: value of log level + * + * Send wmi configuration command to set debug log level. + * + * Return: None + */ +void fwdbg_set_log_lvl(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint32_t log_lvl); + +/** + * fwdbg_fw_handler() - Firmware handler interface + * @dbg_handle: Debug module handle + * @sc: soc handle + * @data: Reference to command data + * @datalen: length of data + * + * Return: 0 success + */ +int fwdbg_fw_handler(struct common_dbglog_handle *dbg_handle, ol_scn_t sc, + uint8_t *data, uint32_t datalen); + +/** + * fwdbg_parse_debug_logs() - API to parse firmware debug logs + * @dbg_handle: Debug module handle + * @soc: soc handle + * @datap: Reference to log data + * @len: length of data + * @context: log context + * + * API parse firmware debug log messages and prints to console. + * + * Return: 0 success + */ +int fwdbg_parse_debug_logs(struct common_dbglog_handle *dbg_handle, + ol_scn_t soc, uint8_t *datap, + uint16_t len, void *context); + +/** + * fwdbg_ratelimit_set() - API to set rate limit + * @dbg_handle: Debug module handle + * @burst_limit: burst limit + * + * Return: None + */ +void fwdbg_ratelimit_set(struct common_dbglog_handle *dbg_handle, + uint32_t burst_limit); + +/** + * fwdbg_vap_log_enable() - API to Enable/Disable the logging for VAP + * @dbg_handle: Debug module handle + * @scn: scn handle + * @vap_id: VAP id + * @isenable: Enable/disable + * + * API allows to enable or disable debuglogs at VAP level. It encodes wmi + * config command based on VAP id and sends wmi command to firmware to + * enable/disable debuglog. + * + * Return: None + */ +void fwdbg_vap_log_enable(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint16_t vap_id, bool isenable); + +/** + * fwdbg_set_timestamp_resolution - Set the resolution for time stamp + * @dbg_handle: Debug module handle + * @scn: scn handle + * @tsr: time stamp resolution + * + * Set the resolution for time stamp in debug logs. It encodes wmi + * config command to desired timestamp resolution and sends wmi command to + * firmware. + * + * Return: None + */ +void fwdbg_set_timestamp_resolution(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t tsr); + +/** + * fwdbg_reporting_enable() - Enable reporting. + * @dbg_handle: Debug module handle + * @scn: scn handle + * @isenable: Enable/disable + * + * API to enable debug information reporting. It encodes wmi config command + * to enable reporting. If set to false then Target wont deliver any debug + * information. + * + * Return: None + */ +void fwdbg_reporting_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, bool isenable); + +/** + * fwdbg_module_log_enable() - Enable/Disable logging for Module. + * @dbg_handle: Debug module handle + * @scn: scn handle + * @mod_id: Module id + * @isenable: Enable/disable + * + * API allows to enable or disable debuglogs per module. It encodes wmi + * config command based on module id and sends wmi command to firmware to + * enable/disable debuglog for that module. + * + * Return: None + */ +void fwdbg_module_log_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint32_t mod_id, bool isenable); + +/** + * fwdbg_init() - Initialize debuglog. + * @dbg_handle: Debug module handle + * @soc: soc handle + * + * It initializes debuglog print function for set of modules and + * initializes WMI event handler for debuglog message event. + * + * Return: None + */ +void fwdbg_init(struct common_dbglog_handle *dbg_handle, void *soc); + +/** + * fwdbg_free() - Free debug handler. + * @dbg_handle: Debug module handle + * @soc: soc handle + * + * Return: None + */ +void fwdbg_free(struct common_dbglog_handle *dbg_handle, void *soc); + +/** + * fwdbg_set_report_size() - set the size of the report size + * @dbg_handle: Debug module handle + * @scn: soc handler + * @size: Report size + * + * Set the debug log report size. It encodes wmi config command to + * desired report size and sends wmi command to firmware. + * + * Return: None + */ +void fwdbg_set_report_size(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t size); + +/** + * fwdbg_smartlog_init() - initialize smart logging feature + * @dbg_handle: Debug module handle + * @ic: ic handler + * + * Return: 0 Success + */ +int fwdbg_smartlog_init(struct common_dbglog_handle *dbg_handle, void *icp); + +/** + * fwdbg_smartlog_deinit() - uninitializes smart logging feature + * @dbg_handle: Debug module handle + * @sc: sc handler + * + * Return: None + */ +void fwdbg_smartlog_deinit(struct common_dbglog_handle *dbg_handle, void *sc); + +/** + * fwdbg_smartlog_dump() - dumps smart logs + * @dev: dev handler + * @dbg_handle: Debug module handle + * @attr: dev handler attributes + * @buf: destination buffer to dump smart logs + * + * Return: 0 success + */ +ssize_t fwdbg_smartlog_dump(struct common_dbglog_handle *dbg_handle, + struct device *dev, + struct device_attribute *attr, char *buf); +#endif /* _FW_DBGLOG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_priv.h b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..e7c5abdaeb33e2dc1a10fb6eaebee119c1c9ceaa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_priv.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless + * Module Interface (WMI). + */ +#ifndef _FW_DBGLOG_PRIV_H_ +#define _FW_DBGLOG_PRIV_H_ + +#include + +struct dbglog_ops { + +void (*dbglog_set_log_lvl)(ol_scn_t scn, uint32_t log_lvl); +int (*dbglog_fw_handler)(ol_scn_t soc, uint8_t *data, uint32_t datalen); +int (*dbglog_parse_debug_logs)(ol_scn_t scn, + u_int8_t *datap, uint16_t len, void *context); +void (*dbglog_ratelimit_set)(uint32_t burst_limit); +void (*dbglog_vap_log_enable)(ol_scn_t soc, uint16_t vap_id, + bool isenable); +void (*dbglog_set_timestamp_resolution)(ol_scn_t soc, uint16_t tsr); +void (*dbglog_reporting_enable)(ol_scn_t soc, bool isenable); +void (*dbglog_module_log_enable)(ol_scn_t scn, + uint32_t mod_id, bool isenable); +void (*dbglog_init)(void *scn); +void (*dbglog_set_report_size)(ol_scn_t scn, uint16_t size); +void (*dbglog_free)(void *soc); +int (*smartlog_init)(void *icp); +void (*smartlog_deinit)(void *sc); +ssize_t (*smartlog_dump)(struct device *dev, + struct device_attribute *attr, char *buf); + +}; + +struct dbglog_info { + struct dbglog_ops *ops; +}; +#endif /*_FW_DBGLOG_PRIV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_event.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_event.h new file mode 100644 index 0000000000000000000000000000000000000000..6a4ea97c72939fd6fe5d4786a058136b3929ed3e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_event.h @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__HOST_DIAG_CORE_EVENT_H) +#define __HOST_DIAG_CORE_EVENT_H + +/**========================================================================= + + \file host_diag_core_event.h + + \brief WLAN UTIL host DIAG Events + + Definitions for DIAG Events + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include "qdf_types.h" +#include "i_host_diag_core_event.h" + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define WAKE_LOCK_NAME_LEN 80 +#define RSN_OUI_SIZE 4 + +/** + * enum wifi_frm_type: type of frame + * + * @MGMT: Indicates management frames + * @CTRL: Indicates control frames + * @DATA: Inidcates data frames + */ +enum wifi_frm_type { + MGMT = 0x00, + CTRL = 0x01, + DATA = 0x02, +}; + +/* + * enum mgmt_frm_subtype: sub types of mgmt frames + * + * @ASSOC_REQ: association request frame + * @ASSOC_RESP: association response frame + * @REASSOC_REQ: reassociation request frame + * @REASSOC_RESP: reassociation response frame + * @PROBE_REQ: probe request frame + * @PROBE_RESP: probe response frame + * @BEACON: beacon frame + * @ATIM: ATIM frame + * @DISASSOC: disassociation frame + * @AUTH: authentication frame + * @DEAUTH: deauthentication frame + * @ACTION: action frame + * @ACTION_NO_ACK: action no ack frame + */ +enum mgmt_frm_subtype { + ASSOC_REQ = 0x00, + ASSOC_RESP = 0x01, + REASSOC_REQ = 0x02, + REASSOC_RESP = 0x03, + PROBE_REQ = 0x04, + PROBE_RESP = 0x05, + BEACON = 0x08, + ATIM = 0x09, + DISASSOC = 0x0a, + AUTH = 0x0b, + DEAUTH = 0x0c, + ACTION = 0x0d, + ACTION_NO_ACK = 0x0e, +}; + +/** + * enum mgmt_auth_type: type of authentication + * + * @AUTH_OPEN: no security applied + * @AUTH_SHARED: WEP type of auth + * @AUTH_WPA_EAP: WPA1 EAP based auth + * @AUTH_WPA_PSK: WPA1 PSK based auth + * @AUTH_WPA2_EAP: WPA2 EAP based auth + * @AUTH_WPA2_PSK: WPA2 PSK based auth + * @AUTH_WAPI_CERT: WAPI CERT based auth + * @AUTH_WAPI_PSK: WAPI PSK based auth + */ +enum mgmt_auth_type { + AUTH_OPEN = 0x00, + AUTH_SHARED = 0x01, + AUTH_WPA_EAP = 0x02, + AUTH_WPA_PSK = 0x03, + AUTH_WPA2_EAP = 0x04, + AUTH_WPA2_PSK = 0x05, + AUTH_WAPI_CERT = 0x06, + AUTH_WAPI_PSK = 0x07, + AUTH_MAX = 0xff, +}; + +/** + * enum mgmt_encrypt_type: type of encryption + * + * @ENC_MODE_OPEN: no encryption applied + * @ENC_MODE_WEP40: WEP 40 bits encryption + * @ENC_MODE_WEP104: WEP 104 bits encryption + * @ENC_MODE_TKIP: TKIP based encryption + * @ENC_MODE_AES: AES based encryption + * @ENC_MODE_AES_GCMP: AES with GCMP encryption + * @ENC_MODE_AES_GCMP_256: AES with 256 bit GCMP encryption + * @ENC_MODE_SMS4: WAPI based SMS4 encryption + */ +enum mgmt_encrypt_type { + ENC_MODE_OPEN = 0x00, + ENC_MODE_WEP40 = 0x01, + ENC_MODE_WEP104 = 0x02, + ENC_MODE_TKIP = 0x03, + ENC_MODE_AES = 0x04, + ENC_MODE_AES_GCMP = 0x05, + ENC_MODE_AES_GCMP_256 = 0x06, + ENC_MODE_SMS4 = 0x07, + ENC_MODE_MAX = 0x0f, +}; + +/** + * enum mgmt_ch_width: channel width of connection + * + * @BW_20MHZ: 20 MHz of channel bonding + * @BW_40MHZ: 40 MHz of channel bonding + * @BW_80MHZ: 80 MHz of channel bonding + * @BW_160MHZ: 160 MHz of channel bonding + * @BW_80P80MHZ: 80 + 80 MHz of channel bonding + * @BW_5MHZ: 5 MHz of channel bonding + * @BW_10MHZ: 10 MHz of channel bonding + */ +enum mgmt_ch_width { + BW_20MHZ = 0x00, + BW_40MHZ = 0x01, + BW_80MHZ = 0x02, + BW_160MHZ = 0x03, + BW_80P80MHZ = 0x04, + BW_5MHZ = 0x05, + BW_10MHZ = 0x06, + BW_MAX = 0xff, +}; + +/** + * enum mgmt_dot11_mode: 80211 mode of operation + * + * @DOT11_MODE_ABG: 802.11-ABG mix mode + * @DOT11_MODE_11A: 802.11-A mode + * @DOT11_MODE_11B: 802.11-B mode + * @DOT11_MODE_11G: 802.11-G mode + * @DOT11_MODE_11N: 802.11-N mode + * @DOT11_MODE_11AC: 802.11-AC mode + * @DOT11_MODE_11G_ONLY: 802.11-G only mode + * @DOT11_MODE_11N_ONLY: 802.11-N only mode + * @DOT11_MODE_11AC_ONLY: 802.11-AC only mode + * @DOT11_MODE_AUTO: 802.11 auto mode + * @DOT11_MODE_11AX: 802.11-AX mode + * @DOT11_MODE_11AX_ONLY: 802.11-AX only mode + */ +enum mgmt_dot11_mode { + DOT11_MODE_ABG = 0x00, + DOT11_MODE_11A = 0x01, + DOT11_MODE_11B = 0x02, + DOT11_MODE_11G = 0x03, + DOT11_MODE_11N = 0x04, + DOT11_MODE_11AC = 0x05, + DOT11_MODE_11G_ONLY = 0x06, + DOT11_MODE_11N_ONLY = 0x07, + DOT11_MODE_11AC_ONLY = 0x08, + DOT11_MODE_AUTO = 0x09, + DOT11_MODE_11AX = 0x0a, + DOT11_MODE_11AX_ONLY = 0x0b, + DOT11_MODE_MAX = 0xff, +}; + +/** + * enum mgmt_bss_type: persona type + * + * @STA_PERSONA: STA mode + * @SAP_PERSONA: SAP mode + * @P2P_CLIENT_PERSONA: P2P cli mode + * @P2P_GO_PERSONA: P2P go mode + * @FTM_PERSONA: FTM mode + * @IBSS_PERSONA: IBSS mode + * @MONITOR_PERSONA: monitor mode + * @P2P_DEVICE_PERSONA: P2P device mode + * @OCB_PERSONA: OCB mode + * @EPPING_PERSONA: epping mode + * @QVIT_PERSONA: QVIT mode + * @NDI_PERSONA: NDI mode + * @WDS_PERSONA: WDS mode + * @BTAMP_PERSONA: BT amp mode + * @AHDEMO_PERSONA: AH demo mode + */ +enum mgmt_bss_type { + STA_PERSONA = 0x00, + SAP_PERSONA = 0x01, + P2P_CLIENT_PERSONA = 0x02, + P2P_GO_PERSONA = 0x03, + FTM_PERSONA = 0x04, + IBSS_PERSONA = 0x05, + MONITOR_PERSONA = 0x06, + P2P_DEVICE_PERSONA = 0x07, + OCB_PERSONA = 0x08, + EPPING_PERSONA = 0x09, + QVIT_PERSONA = 0x0a, + NDI_PERSONA = 0x0b, + WDS_PERSONA = 0x0c, + BTAMP_PERSONA = 0x0d, + AHDEMO_PERSONA = 0x0e, + MAX_PERSONA = 0xff, +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SECURITY + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t authMode; + uint8_t encryptionModeUnicast; + uint8_t encryptionModeMulticast; + uint8_t pmkIDMatch; + uint8_t bssid[6]; + uint8_t keyId; + uint8_t status; +} host_event_wlan_security_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_STATUS_V2 + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t ssid[32]; + uint8_t bssType; + uint8_t rssi; + uint8_t channel; + uint8_t qosCapability; + uint8_t authType; + uint8_t encryptionType; + uint8_t reason; + uint8_t reasonDisconnect; +} host_event_wlan_status_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_HANDOFF + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t currentApBssid[6]; + uint8_t currentApRssi; + uint8_t candidateApBssid[6]; + uint8_t candidateApRssi; +} host_event_wlan_handoff_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_VCC + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t rssi; + uint8_t txPer; + uint8_t rxPer; + int linkQuality; +} host_event_wlan_vcc_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_QOS + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t reasonCode; +} host_event_wlan_qos_payload_type; + +/** + * host_event_wlan_connection_stats: to capture connection details + * + * @rssi: RSSI signal strength of connected AP, units in dbM + * @ssid_len: length of SSID + * @ssid: SSID of AP where STA is connected + * @bssid: bssid of AP where STA is connected + * @operating_channel: channel on which AP is connected + * @qos_capability: QoS is enabled or no + * @chnl_bw: channel BW of connection, units in MHz + * Range: enum mgmt_ch_width + * @dot11mode: 802.11 mode of current connection + * Range: enum mgmt_dot11_mode + * @bss_type: type of the BSS whether AP/IBSS/P2PGO + * Range: enum mgmt_bss_type bss_type + * @auth_type: type of authentication for connected AP + * Range: enum mgmt_auth_type + * @encryption_type: type of encryption applied + * Range: enum mgmt_encrypt_type + * @reserved1: reserved for future use + * @est_link_speed: link speed of connection, units in Mbps + * @result_code: result code of connection success or failure + * @reason_code: if failed then what is the reason + * @op_freq: channel frequency in MHz on which AP is connected + */ +struct host_event_wlan_connection_stats { + int8_t rssi; + uint8_t ssid_len; + char ssid[32]; + uint8_t bssid[6]; + uint8_t operating_channel; + uint8_t qos_capability; + uint8_t chnl_bw; + uint8_t dot11mode; + uint8_t bss_type; + uint8_t auth_type; + uint8_t encryption_type; + uint8_t reserved1; + uint32_t est_link_speed; + uint16_t result_code; + uint16_t reason_code; + uint32_t op_freq; +} qdf_packed; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_PE + ------------------------------------------------------------------------*/ +typedef struct { + char bssid[6]; + uint16_t event_type; + uint16_t sme_state; + uint16_t mlm_state; + uint16_t status; + uint16_t reason_code; +} host_event_wlan_pe_payload_type; + +/** + * host_event_wlan_mgmt_payload_type: To capture TX/RX mgmt frames' payload + * + * @mgmt_type: type of frames, value: enum wifi_frm_type + * @mgmt_subtype: subtype of mgmt frame, value: enum mgmt_frm_subtype + * @operating_channel: operating channel of AP + * @ssid_len: length of SSID, max 32 bytes long as per standard + * @ssid: SSID of connected AP + * @self_mac_addr: mac address of self interface + * @bssid: BSSID for which frame is received + * @result_code: result code TX/RX OTA delivery + * @reason_code: reason code given in TX/RX frame + */ +struct host_event_wlan_mgmt_payload_type { + uint8_t mgmt_type; + uint8_t mgmt_subtype; + uint8_t operating_channel; + uint8_t ssid_len; + char ssid[32]; + char self_mac_addr[6]; + char bssid[6]; + uint16_t result_code; + uint16_t reason_code; +} qdf_packed; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_ADD_BLOCK_ACK_SUCCESS + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucBaBufferSize; + uint16_t usBaSSN; + uint8_t fInitiator; +} host_event_wlan_add_block_ack_success_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_ADD_BLOCK_ACK_FAILED + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucReasonCode; + uint8_t fInitiator; +} host_event_wlan_add_block_ack_failed_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_DELETE_BLOCK_ACK_SUCCESS + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucDeleteReasonCode; +} host_event_wlan_add_block_ack_deleted_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_DELETE_BLOCK_ACK_FAILED + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucDeleteReasonCode; + uint8_t ucFailReasonCode; +} host_event_wlan_add_block_ack_delete_failed_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_BSS_PROTECTION + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t event_type; + uint8_t prot_type; +} host_event_wlan_bss_prot_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_BRINGUP_STATUS + ------------------------------------------------------------------------*/ +typedef struct { + uint16_t wlanStatus; + char driverVersion[10]; +} host_event_wlan_bringup_status_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_POWERSAVE_WOW + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t event_subtype; + uint8_t wow_type; + uint8_t wow_magic_pattern[6]; + uint8_t wow_del_ptrn_id; + uint8_t wow_wakeup_cause; + uint8_t wow_wakeup_cause_pbm_ptrn_id; +} host_event_wlan_powersave_wow_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_POWERSAVE_WOW_STATS + ------------------------------------------------------------------------*/ +/** + * host_event_wlan_powersave_wow_stats - Structure holding wow stats information + * @wow_ucast_wake_up_count: wow unicast packet wakeup count + * @wow_bcast_wake_up_count: wow broadcast packet wakeup count + * @wow_ipv4_mcast_wake_up_count: wow ipv4 multicast packet wakeup count + * @wow_ipv6_mcast_wake_up_count: wow ipv6 multicast packet wakeup count + * @wow_ipv6_mcast_ra_stats: wow ipv6 multicast router advertisement + * packet wakeup count + * @wow_ipv6_mcast_ns_stats: wow ipv6 multicast Neighbor Solicitation + * packet wakeup count + * @wow_ipv6_mcast_na_stats: wow ipv6 multicast address space + * packet wakeup count + * @wow_pno_match_wake_up_count: wow preferred network offload match + * packet wakeup count + * @wow_pno_complete_wake_up_count: wow preferred network offload complete + * packet wakeup count + * @wow_gscan_wake_up_count: wow external scan packet wakeup count + * @wow_low_rssi_wake_up_count: wow low rssi packet wakeup count + * @wow_rssi_breach_wake_up_count: wow rssi breach packet wakeup count + * @wow_icmpv4_count: wow icmpv4 packet count + * @wow_icmpv6_count: wow icmpv6 packet count + * @wow_oem_response_wake_up_count: wow oem response packet wakeup count + * + * This structure contains the wow stats information related to diag event + */ +struct host_event_wlan_powersave_wow_stats { + uint32_t wow_ucast_wake_up_count; + uint32_t wow_bcast_wake_up_count; + uint32_t wow_ipv4_mcast_wake_up_count; + uint32_t wow_ipv6_mcast_wake_up_count; + uint32_t wow_ipv6_mcast_ra_stats; + uint32_t wow_ipv6_mcast_ns_stats; + uint32_t wow_ipv6_mcast_na_stats; + uint32_t wow_pno_match_wake_up_count; + uint32_t wow_pno_complete_wake_up_count; + uint32_t wow_gscan_wake_up_count; + uint32_t wow_low_rssi_wake_up_count; + uint32_t wow_rssi_breach_wake_up_count; + uint32_t wow_icmpv4_count; + uint32_t wow_icmpv6_count; + uint32_t wow_oem_response_wake_up_count; + uint32_t Reserved_1; + uint32_t Reserved_2; + uint32_t Reserved_3; + uint32_t Reserved_4; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_BTC + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t btAddr[6]; + uint16_t connHandle; + uint8_t connStatus; + uint8_t linkType; + uint8_t scoInterval; + uint8_t scoWindow; + uint8_t retransWindow; + uint8_t mode; +} host_event_wlan_btc_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_EAPOL + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_eapol - Structure holding the eapol information + * @event_sub_type: 0-Transmitted, 1-Received + * @eapol_packet_type: 0 - EAP Start, 1 - EAPOL Start, 2 - EAPOL Logoff + 3 - EAPOL Key, 4 - EAPOL Encapsulated Alert + * @eapol_key_info: This field from the driver is in big endian format. + * So, the masks .0x8013. can be used to extract the + * message type. After masking, the values corresponding + * to messages 1/2/3/4 are given below: + * Msg. 1 0x8000 + * Msg. 2 0x0001 + * Msg. 3 0x8013 + * Msg. 4 0x0003 + * @eapol_rate: Rate at which the frame is received + * @dest_addr: Destination address + * @src_addr: Source address + * + * This structure contains the EAPOL information related to logging + */ +struct host_event_wlan_eapol { + uint8_t event_sub_type; + uint8_t eapol_packet_type; + uint16_t eapol_key_info; + uint16_t eapol_rate; + uint8_t dest_addr[6]; + uint8_t src_addr[6]; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_LOW_RESOURCE_FAILURE + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_low_resource_failure - Structure holding the + * low resource failure information + * @event_sub_type: Gives further information about reason for + * low resource condition + * + * This structure will hold the low resource failure information + */ +struct host_event_wlan_low_resource_failure { + uint8_t event_sub_type; +}; + +/** + * enum resource_failure_type - Reason for low resource failure condition + * @WIFI_EVENT_MEMORY_FAILURE: Memory failure + * + * This enum has the reason codes why the low resource situation is observed + */ +enum resource_failure_type { + WIFI_EVENT_MEMORY_FAILURE, +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_RSN_INFO + ------------------------------------------------------------------------- + */ +/** + * struct event_wlan_csr_rsn_info - Structure holding the + * RSN information for assoc request + * @akm_suite: Gives information about akm suites used in assoc request + * @ucast_cipher: Unicast cipher used in assoc request + * @mcast_cipher: Multi cast cipher used in assoc request + * @group_mgmt: Requested group mgmt cipher suite + * + * This structure will hold the RSN information for assoc request + */ +struct event_wlan_csr_rsn_info { + uint8_t akm_suite[RSN_OUI_SIZE]; + uint8_t ucast_cipher[RSN_OUI_SIZE]; + uint8_t mcast_cipher[RSN_OUI_SIZE]; + uint8_t group_mgmt[RSN_OUI_SIZE]; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_AUTH_INFO + ------------------------------------------------------------------------- + */ +/** + * struct event_wlan_lim_auth_info - Structure holding the + * algo num, seq num and status code for auth request + * @auth_algo_num: Gives information about algo num used in auth request + * @auth_transaction_seq_num: seq num of auth request + * @auth_status_code: status code of auth request + * + * This structure will hold the algo num, seq num and status code + * for auth request + */ +struct event_wlan_lim_auth_info { + uint16_t auth_algo_num; + uint16_t auth_transaction_seq_num; + uint16_t auth_status_code; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_WAKE_LOCK + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_wake_lock - Structure holding the wakelock information + * @status: Whether the wakelock is taken/released + * @reason: Reason for taking this wakelock + * @timeout: Timeout value in case of timed wakelocks + * @name_len: Length of the name of the wakelock that will follow + * @name: Name of the wakelock + * + * This structure will hold the wakelock information + */ +struct host_event_wlan_wake_lock { + uint32_t status; + uint32_t reason; + uint32_t timeout; + uint32_t name_len; + char name[WAKE_LOCK_NAME_LEN]; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_LOG_COMPLETE + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_log_complete - Holds log completion details + * @is_fatal: Indicates if the event is fatal or not + * @indicator: Source of the bug report - Framework/Host/Firmware + * @reason_code: Reason for triggering bug report + * @reserved: Reserved field + * + * This structure holds the log completion related information + */ +struct host_event_wlan_log_complete { + uint32_t is_fatal; + uint32_t indicator; + uint32_t reason_code; + uint32_t reserved; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_STA_KICKOUT + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_kickout - Holds diag event details + * @reasoncode: Indicates the reasoncode of event + * @peer_macaddr: Indicates the peer macaddr + * @vdev_id: Indicate unique id for identifying the VDEV + * + * This structure holds the diag event related information + */ + +struct host_event_wlan_kickout { + uint32_t reasoncode; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t vdev_id; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SOFTAP_DATASTALL/EVENT_WLAN_STA_DATASTALL + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_softap_datastall - Holds diag event details + * @reason: Indicates the reason of event + * + *This structure holds the host diag event related information + */ + +struct host_event_wlan_datastall { + uint32_t reason; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SSR_REINIT_SUBSYSTEM + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_ssr_reinit - Holds diag event details + * @status: Indicates the status of event + * + *This structure holds the host diag event related information + */ + +struct host_event_wlan_ssr_reinit { + uint32_t status; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_ssr_shutdown - Holds diag event details + * @status: Indicates the status of event + * + *This structure holds the host diag event related information + */ + +struct host_event_wlan_ssr_shutdown { + uint32_t status; +}; + + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum host_sta_kickout_events - Enum containing sta kickout subtype + * @HOST_STA_KICKOUT_REASON_BMISS: Indicate sta got disconnected reason + * beacon miss + * @HOST_STA_KICKOUT_REASON_XRETRY: Indicate sta got disconnected reason xretry + * @HOST_STA_KICKOUT_REASON_UNSPECIFIED: Indicate sta disconnection + * reason unspecified + * @HOST_STA_KICKOUT_REASON_KEEP_ALIVE: Indicate sta is disconnected + * because of keep alive + * @HOST_STA_KICKOUT_REASON_BTM: BTM request from AP with disassoc imminent + * reason + * + * This enum contains the event subtype + */ +enum host_sta_kickout_events { + HOST_STA_KICKOUT_REASON_BMISS, + HOST_STA_KICKOUT_REASON_XRETRY, + HOST_STA_KICKOUT_REASON_UNSPECIFIED, + HOST_STA_KICKOUT_REASON_KEEP_ALIVE, + HOST_STA_KICKOUT_REASON_BTM, +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum host_datastall_events - Enum containing datastall subtype + * @DATA_STALL_NONE: Indicate no data stall + * @FW_VDEV_PAUSE: Indicate FW vdev Pause + * @HWSCHED_CMD_FILTER:Indicate HW sched command filter + * @HWSCHED_CMD_FLUSH: Indicate HW sched command flush + * @FW_RX_REFILL_FAILED:Indicate FW rx refill failed + * @FW_RX_FCS_LEN_ERROR:Indicate FW fcs len error + * @FW_WDOG_ERRORS:Indicate watchdog error + * @FW_BB_WDOG_ERROR:Indicate BB watchdog error + * @STA_TX_TIMEOUT: Indicate sta tx timeout + * @SOFTAP_TX_TIMEOUT:Indicate softap tx timeout + * @NUD_FAILURE: Indicare NUD Failure + * + * This enum contains the event subtype + */ +enum host_datastall_events { + DATA_STALL_NONE, + FW_VDEV_PAUSE, + HWSCHED_CMD_FILTER, + HWSCHED_CMD_FLUSH, + FW_RX_REFILL_FAILED, + FW_RX_FCS_LEN_ERROR, + FW_WDOG_ERRORS, + FW_BB_WDOG_ERROR, + STA_TX_TIMEOUT, + SOFTAP_TX_TIMEOUT, + NUD_FAILURE, +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum host_ssr_events - Enum containing ssr subtype + * @SSR_SUB_SYSTEM_REINIT: Indicate ssr reinit state + * @SSR_SUB_SYSTEM_SHUTDOWN: Indicate ssr shutdown state + * + * This enum contains the event subtype + */ +enum host_ssr_events { + SSR_SUB_SYSTEM_REINIT, + SSR_SUB_SYSTEM_SHUTDOWN, +}; + +/** + * struct host_event_tdls_teardown - tdls teardown diag event + * @reason: reason for tear down + * @peer_mac: peer mac + * + * This structure contains tdls teardown diag event info + */ +struct host_event_tdls_teardown { + uint32_t reason; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct host_event_tdls_enable_link - tdls enable link event + * @peer_mac: peer mac + * @is_off_chan_supported: if off channel supported + * @is_off_chan_configured: if off channel configured + * @is_off_chan_established: if off channel established + * + * This structure contain tdls enable link diag event info + */ +struct host_event_tdls_enable_link { + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t is_off_chan_supported; + uint8_t is_off_chan_configured; + uint8_t is_off_chan_established; +}; + +/** + * struct host_event_suspend - suspend/resume state + * @state: suspend/resume state + * + * This structure contains suspend resume diag event info + */ +struct host_event_suspend { + uint8_t state; +}; + +/** + * struct host_event_offload_req - offload state + * @offload_type: offload type + * @state: enabled or disabled state + * + * This structure contains offload diag event info + */ +struct host_event_offload_req { + uint8_t offload_type; + uint8_t state; +}; + +/** + * struct host_event_tdls_scan_rejected - scan + * rejected due to tdls + * @status: rejected status + * + * This structure contains scan rejected due to + * tdls event info + */ +struct host_event_tdls_scan_rejected { + uint8_t status; +}; + +/** + * struct host_event_tdls_tx_rx_mgmt - for TX RX management frame + * @event_id: event ID + * @tx_rx: tx or rx + * @type: type of frame + * @action_sub_type: action frame type + * @peer_mac: peer mac + * + * This structure contains tdls TX RX management frame info + */ +struct host_event_tdls_tx_rx_mgmt { + uint8_t event_id; + uint8_t tx_rx; + uint8_t type; + uint8_t action_sub_type; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum wifi_connectivity_events - Enum containing EAPOL sub type + * @WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED: EAPOL transmitted + * @WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED: EAPOL received + * + * This enum contains the EAPOL subtype + */ +enum wifi_connectivity_events { + WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, + WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, +}; + +/** + * enum wake_lock_reason - Reason for taking/releasing wakelock + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT: Driver initialization + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_REINIT: Driver re-initialization + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_EXIT: Driver shutdown + * @WIFI_POWER_EVENT_WAKELOCK_SCAN: Scan request/response handling + * @WIFI_POWER_EVENT_WAKELOCK_EXT_SCAN: Extended scan request/response handling + * @WIFI_POWER_EVENT_WAKELOCK_RESUME_WLAN: Driver resume + * @WIFI_POWER_EVENT_WAKELOCK_ROC: Remain on channel request/response handling + * @WIFI_POWER_EVENT_WAKELOCK_AUTO_SUSPEND: Auto suspend related handling + * @WIFI_POWER_EVENT_WAKELOCK_IPA: IPA related handling + * @WIFI_POWER_EVENT_WAKELOCK_ADD_STA: Addition of STA + * @WIFI_POWER_EVENT_WAKELOCK_HOLD_RX: Wakelocks taken for receive + * @WIFI_POWER_EVENT_WAKELOCK_SAP: SoftAP related wakelocks + * @WIFI_POWER_EVENT_WAKELOCK_WOW: WoW feature related + * @WIFI_POWER_EVENT_WAKELOCK_PNO: PNO feature related + * @WIFI_POWER_EVENT_WAKELOCK_DEL_STA: Deletion of a station + * @WIFI_POWER_EVENT_WAKELOCK_DFS: DFS related wakelocks + * @WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP: Firmware response + * @WIFI_POWER_EVENT_WAKELOCK_MISC: Miscellaneous wakelocks + * @WIFI_POWER_EVENT_WAKELOCK_DHCP: DHCP negotiation under way + * @WIFI_POWER_EVENT_WAKELOCK_CONNECT: connection in progress + * @WIFI_POWER_EVENT_WAKELOCK_IFACE_CHANGE_TIMER: iface change timer running + * @WIFI_POWER_EVENT_WAKELOCK_MONITOR_MODE: Montitor mode wakelock + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_RESTART: Wakelock for Idle Restart + * @WIFI_POWER_EVENT_WAKELOCK_TDLS: Wakelock for TDLS + * + * Indicates the reason for which the wakelock was taken/released + */ +enum wake_lock_reason { + WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT, + WIFI_POWER_EVENT_WAKELOCK_DRIVER_REINIT, + WIFI_POWER_EVENT_WAKELOCK_DRIVER_EXIT, + WIFI_POWER_EVENT_WAKELOCK_SCAN, + WIFI_POWER_EVENT_WAKELOCK_EXT_SCAN, + WIFI_POWER_EVENT_WAKELOCK_RESUME_WLAN, + WIFI_POWER_EVENT_WAKELOCK_ROC, + WIFI_POWER_EVENT_WAKELOCK_AUTO_SUSPEND, + WIFI_POWER_EVENT_WAKELOCK_IPA, + WIFI_POWER_EVENT_WAKELOCK_ADD_STA, + WIFI_POWER_EVENT_WAKELOCK_HOLD_RX, + WIFI_POWER_EVENT_WAKELOCK_SAP, + WIFI_POWER_EVENT_WAKELOCK_WOW, + WIFI_POWER_EVENT_WAKELOCK_PNO, + WIFI_POWER_EVENT_WAKELOCK_DEL_STA, + WIFI_POWER_EVENT_WAKELOCK_DFS, + WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP, + WIFI_POWER_EVENT_WAKELOCK_MISC, + WIFI_POWER_EVENT_WAKELOCK_DHCP, + WIFI_POWER_EVENT_WAKELOCK_CONNECT, + WIFI_POWER_EVENT_WAKELOCK_IFACE_CHANGE_TIMER, + WIFI_POWER_EVENT_WAKELOCK_MONITOR_MODE, + WIFI_POWER_EVENT_WAKELOCK_DRIVER_IDLE_RESTART, + WIFI_POWER_EVENT_WAKELOCK_TDLS, +}; + +/* The length of interface name should >= IFNAMSIZ */ +#define HOST_EVENT_INTF_STR_LEN 16 +#define HOST_EVENT_HW_MODE_STR_LEN 12 + +/** + * struct host_event_wlan_acs_req - payload for ACS diag event + * @intf: network interface name for WLAN + * @hw_mode: hw mode configured by hostapd + * @bw: channel bandwidth(MHz) + * @ht: a flag indicating whether HT phy mode is enabled + * @vht: a flag indicating whether VHT phy mode is enabled + * @chan_start: starting channel number for ACS scan + * @chan_end: ending channel number for ACS scan + * + * This structure includes all the payload related to ACS request parameters + */ +struct host_event_wlan_acs_req { + uint8_t intf[HOST_EVENT_INTF_STR_LEN]; + uint8_t hw_mode[HOST_EVENT_HW_MODE_STR_LEN]; + uint16_t bw; + uint8_t ht; + uint8_t vht; + uint16_t chan_start; + uint16_t chan_end; +}; + +/** + * struct host_event_wlan_acs_scan_start - payload for ACS scan request + * @scan_id: scan request ID + * @vdev_id: vdev/session ID + * + * This structure includes all the payload related to ACS scan request + * parameters + */ +struct host_event_wlan_acs_scan_start { + uint32_t scan_id; + uint8_t vdev_id; +}; + +#define HOST_EVENT_STATUS_STR_LEN 24 + +/** + * struct host_event_wlan_acs_scan_done - payload for ACS scan done event + * @status: indicating whether ACS scan is successful + * @vdev_id: vdev/session ID + * @scan_id: scan request ID + * + * This structure includes all the payload related to ACS scan done event + */ +struct host_event_wlan_acs_scan_done { + uint8_t status[HOST_EVENT_STATUS_STR_LEN]; + uint32_t scan_id; + uint8_t vdev_id; +}; + +/** + * struct host_event_wlan_acs_chan_spectral_weight - payload for spectral + * weight event indication + * @chan: channel number + * @weight: channel weight + * @rssi: RSSI value obtained after scanning + * @bss_count: number of BSS detected on this channel + * + * This structure includes all the payload related to a channel's weight + * evaluation result + */ +struct host_event_wlan_acs_chan_spectral_weight { + uint16_t chan; + uint16_t weight; + int32_t rssi; + uint16_t bss_count; +}; + +/** + * struct host_event_wlan_acs_best_chan - payload for ACS best channel event + * @chan: channel number + * @weight: channel weight + * + * This structure includes all the payload related to the best channel + * selected after ACS procedure + */ +struct host_event_wlan_acs_best_chan { + uint16_t chan; + uint16_t weight; +}; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __HOST_DIAG_CORE_EVENT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_log.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_log.h new file mode 100644 index 0000000000000000000000000000000000000000..a83a21446dd9b58fa0e0e68c34e2303dc39234a5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_log.h @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2014-2017, 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__HOST_DIAG_CORE_LOG_H) +#define __HOST_DIAG_CORE_LOG_H + +/**========================================================================= + + \file host_diag_core_log.h + + \brief WLAN UTIL host DIAG logs + + Definitions for WLAN UTIL host diag events + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include "qdf_types.h" +#include "i_host_diag_core_log.h" + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +#define HOST_LOG_MAX_NUM_SSID (21) +#define HOST_LOG_MAX_NUM_BSSID (21) +#define HOST_LOG_MAX_SSID_SIZE (32) +#define HOST_LOG_MAX_BSSID_SIZE (6) +#define HOST_LOG_MAX_NUM_CHANNEL (64) +#define HOST_LOG_MAX_NUM_HO_CANDIDATE_APS (20) +#define HOST_LOG_MAX_WOW_PTRN_SIZE (128) +#define HOST_LOG_MAX_WOW_PTRN_MASK_SIZE (16) +#define VOS_LOG_PKT_LOG_SIZE (2048) +#define HOST_LOG_PKT_LOG_THRESHOLD 40960 +#define HOST_LOG_MAX_COLD_BOOT_CAL_DATA_SIZE (2048) + +/* Version to be updated whenever format of vos_log_pktlog_info changes */ +#define VERSION_LOG_WLAN_PKT_LOG_INFO_C 1 +/* Version to be updated whenever format of host_log_cold_boot_cal_data_type + * changes + */ +#define VERSION_LOG_WLAN_COLD_BOOT_CAL_DATA_C 1 + +/*--------------------------------------------------------------------------- + This packet contains the scan results of the recent scan operation + LOG_WLAN_SCAN_C 0x1496 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t eventId; + uint8_t numSsid; + uint8_t ssid[HOST_LOG_MAX_NUM_SSID][HOST_LOG_MAX_SSID_SIZE]; + uint8_t bssid[HOST_LOG_MAX_NUM_BSSID][HOST_LOG_MAX_BSSID_SIZE]; + uint8_t totalSsid; + uint8_t minChnTime; + uint8_t maxChnTime; + uint16_t timeBetweenBgScan; + uint8_t BSSMode; + uint8_t numChannel; + uint8_t channels[HOST_LOG_MAX_NUM_CHANNEL]; + uint16_t status; +} host_log_scan_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to IBSS connection setup + LOG_WLAN_IBSS_C 0x1497 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t eventId; + uint8_t channelSetting; + struct qdf_mac_addr bssid; + struct qdf_mac_addr peer_macaddr; + uint8_t ssid[HOST_LOG_MAX_SSID_SIZE]; + uint8_t operatingChannel; + uint8_t beaconInterval; + uint8_t status; + uint32_t op_freq; +} host_log_ibss_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to 802.11D + LOG_WLAN_80211D_C 0x1498 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t eventId; + uint8_t numChannel; + uint8_t Channels[HOST_LOG_MAX_NUM_CHANNEL]; + uint8_t TxPwr[HOST_LOG_MAX_NUM_CHANNEL]; + uint8_t countryCode[3]; + uint8_t supportMultipleDomain; +} host_log_802_11d_pkt_type; + +/*--------------------------------------------------------------------------- + This is a log packet which contains below handoff information: + - Current AP + RSSI (if already associated) + - Candidate AP + RSSI (before association and when the list is updated) + - For each BSSID in candidate list, provide RSSI, QoS and security compatibility + LOG_WLAN_HANDOFF_C 0x1499 + ---------------------------------------------------------------------------*/ +typedef struct { + uint8_t ssid[9]; + uint8_t bssid[HOST_LOG_MAX_BSSID_SIZE]; + uint8_t channel_id; + uint32_t qos_score; + uint32_t sec_score; + uint32_t rssi_score; + uint32_t overall_score; + uint32_t tx_per; /* represented as a % */ + uint32_t rx_per; /* represented as a % */ + +} host_log_ho_ap_info; + +typedef struct { + log_hdr_type hdr; + uint32_t num_aps; + host_log_ho_ap_info current_ap_info; + host_log_ho_ap_info + candidate_ap_info[HOST_LOG_MAX_NUM_HO_CANDIDATE_APS]; +} host_log_ho_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to the EDCA parameters + advertised by the AP + LOG_WLAN_QOS_EDCA_C 0x149A + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t aci_be; + uint8_t cw_be; + uint16_t txoplimit_be; + uint8_t aci_bk; + uint8_t cw_bk; + uint16_t txoplimit_bk; + uint8_t aci_vi; + uint8_t cw_vi; + uint16_t txoplimit_vi; + uint8_t aci_vo; + uint8_t cw_vo; + uint16_t txoplimit_vo; +} host_log_qos_edca_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the total number of beacon received value + LOG_WLAN_BEACON_UPDATE_C 0x149B + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint32_t bcn_rx_cnt; +} host_log_beacon_update_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to a WoW patern value when set + LOG_WLAN_POWERSAVE_WOW_ADD_PTRN_C 0x149C + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t pattern_id; + uint8_t pattern_byte_offset; + uint8_t pattern_size; + uint8_t pattern[HOST_LOG_MAX_WOW_PTRN_SIZE]; + uint8_t pattern_mask_size; + uint8_t pattern_mask[HOST_LOG_MAX_WOW_PTRN_MASK_SIZE]; +} host_log_powersave_wow_add_ptrn_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the Tspec info negotiated with the AP for the + specific AC + LOG_WLAN_QOS_TSPEC_C 0x14A2 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t tsinfo[3]; + uint16_t nominal_msdu_size; + uint16_t maximum_msdu_size; + uint32_t min_service_interval; + uint32_t max_service_interval; + uint32_t inactivity_interval; + uint32_t suspension_interval; + uint32_t svc_start_time; + uint32_t min_data_rate; + uint32_t mean_data_rate; + uint32_t peak_data_rate; + uint32_t max_burst_size; + uint32_t delay_bound; + uint32_t min_phy_rate; + uint16_t surplus_bw_allowance; + uint16_t medium_time; +} host_log_qos_tspec_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains data information when stall detected + LOG_TRSP_DATA_STALL_C 0x1801 + ---------------------------------------------------------------------------*/ + +typedef struct { + char channelName[4]; + uint32_t numDesc; + uint32_t numFreeDesc; + uint32_t numRsvdDesc; + uint32_t headDescOrder; + uint32_t tailDescOrder; + uint32_t ctrlRegVal; + uint32_t statRegVal; + uint32_t numValDesc; + uint32_t numInvalDesc; +} host_log_data_stall_channel_type; + +typedef struct { + log_hdr_type hdr; + uint32_t PowerState; + uint32_t numFreeBd; + host_log_data_stall_channel_type dxeChannelInfo[4]; +} host_log_data_stall_type; + +/*--------------------------------------------------------------------------- + This packet contains the rssi value from BSS descriptor + LOG_WLAN_RSSI_UPDATE_C 0x1354 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + int8_t rssi; +} host_log_rssi_pkt_type; + +/** + * struct host_log_pktlog_info - Packet log info + * @log_hdr: Log header + * @buf_len: Length of the buffer that follows + * @buf: Buffer containing the packet log info + * + * Structure containing the packet log information + * LOG_WLAN_PKT_LOG_INFO_C 0x18E0 + */ +struct host_log_pktlog_info { + log_hdr_type log_hdr; + uint32_t version; + uint32_t seq_no; + uint32_t buf_len; + uint8_t buf[]; +}; + +/** + * struct host_log_cold_boot_cal_data_type - Cold boot cal log info + * @hdr: Log header + * @version: version + * @flags: Flag to indicate if more data follows + * @cb_cal_data_len: Length of the cal data + * @cb_cal_data: Cold boot cal data + * + * Structure containing the cold boot calibration data + * log information + * LOG_WLAN_COLD_BOOT_CAL_DATA_C 0x1A18 + */ +struct host_log_cold_boot_cal_data_type { + log_hdr_type hdr; + uint32_t version; + uint32_t flags; + uint32_t cb_cal_data_len; + uint8_t cb_cal_data[HOST_LOG_MAX_COLD_BOOT_CAL_DATA_SIZE]; +}; + +#define WLAN_MAX_ROAM_CANDIDATE_AP 9 +#define WLAN_MAX_ROAM_SCAN_CHAN 38 +#define WLAN_MAX_SSID_SIZE 32 + +/** + * host_log_wlan_mgmt_tx_rx_info: To capture TX/RX mgmt frames' payload + * @hdr: Log header + * @version: Version number of the payload + * @vdev_id: Vdev id + * @is_tx: 1 - TX frame, 0 - RX frame + * @mgmt_type: type of frames, value: enum wifi_frm_type + * @mgmt_subtype: subtype of mgmt frame, value: enum mgmt_frm_subtype + * @mgmt_frame_seq_num: Frame sequence number in 802.11 header + * @operating_freq: operating frequency of AP + * @ssid_len: length of SSID, max 32 bytes long as per standard + * @ssid: SSID of connected AP + * @self_mac_addr: mac address of self interface + * @bssid: BSSID for which frame is received + * @mac_failure_reason: Internal driver failure reason + * @mgmt_status_code: 802.11 management frame response status code from + * section 9.4.1.9 IEEE 802.11 - 2016 + * @auth_algo: Authentication algorithm number + * @auth_transaction_num: Authentication transaction sequence number + * @is_retry: Is retry frame + * @rssi: RSSI for the received frame + * @origin: 1- Sent by host. 2- sent by firmware + */ +struct host_log_wlan_mgmt_tx_rx_info { + log_hdr_type hdr; + uint8_t version; + uint8_t vdev_id; + bool is_tx; + uint8_t mgmt_type; + uint8_t mgmt_subtype; + uint16_t mgmt_frame_seq_num; + uint8_t operating_freq; + uint8_t ssid_len; + char ssid[WLAN_MAX_SSID_SIZE]; + uint8_t self_mac_addr[QDF_MAC_ADDR_SIZE]; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; + uint16_t mac_failure_reason; + uint16_t mgmt_status_code; + uint8_t auth_algo; + uint8_t auth_transaction_num; + uint8_t is_retry; + uint32_t rssi; + uint8_t origin; +} qdf_packed; + +/** + * struct wlan_roam_btm_trigger_data - BTM roam trigger related information + * @btm_request_mode: BTM request mode - solicited/unsolicited + * @disassoc_timer: Number of TBTT before AP disassociates the STA in ms + * @validity_interval: Preferred candidate list validity interval in ms + * @candidate_list_count: Number of candidates in BTM request. + * @btm_resp_status: Status code of the BTM response. + */ +struct wlan_roam_btm_trigger_data { + uint8_t btm_request_mode; + uint32_t disassoc_timer; + uint32_t validity_interval; + uint16_t candidate_list_count; + uint16_t btm_resp_status; +} qdf_packed; + +/** + * struct wlan_roam_cu_trigger_data - BSS Load roam trigger parameters + * @cu_load: Connected AP CU load percentage + */ +struct wlan_roam_cu_trigger_data { + uint16_t cu_load; +} qdf_packed; + +/** + * Struct wlan_roam_rssi_trigger_data - RSSI roam trigger related + * parameters + * @threshold: RSSI threshold value in dBm for LOW rssi roam trigger + */ +struct wlan_roam_rssi_trigger_data { + uint32_t threshold; +} qdf_packed; + +/** + * struct wlan_roam_deauth_trigger_data - Deauth roaming trigger related + * parameters + * @type: 1- Deauthentication 2- Disassociation + * @reason: Status code of the Deauth/Disassoc received + */ +struct wlan_roam_deauth_trigger_data { + uint8_t type; + uint32_t reason; +} qdf_packed; + +/** + * struct host_log_wlan_roam_trigger_info - Roam trigger + * related info + * @hdr: Log header + * @version: Version number of the payload + * @vdev_id: Vdev id + * @trigger_reason: Roaming trigger reason + * @trigger_sub_reason: Roaming trigger sub reason + * @current_rssi: Current connected AP RSSI + * @timestamp: Host driver timestamp in msecs + * @btm_trig_data: BTM trigger related data + * @cu_load_data: CU load trigger related data + * @rssi_trig_data: RSSI roam trigger related data + * @deauth_trig_data: Deauth Roam trigger related data + */ +struct host_log_wlan_roam_trigger_info { + log_hdr_type hdr; + uint8_t version; + uint8_t vdev_id; + uint32_t trigger_reason; + uint32_t trigger_sub_reason; + uint32_t current_rssi; + uint32_t timestamp; + union { + struct wlan_roam_btm_trigger_data btm_trig_data; + struct wlan_roam_cu_trigger_data cu_load_data; + struct wlan_roam_rssi_trigger_data rssi_trig_data; + struct wlan_roam_deauth_trigger_data deauth_trig_data; + }; +} qdf_packed; + +/** + * struct host_log_wlan_roam_candidate_info - Roam scan candidate APs related + * info + * @version: Payload structure version + * @timestamp: Host timestamp in millisecs + * @type: 0 - Candidate AP; 1 - Current connected AP. + * @bssid: AP bssid. + * @freq: Channel frquency + * @cu_load: Channel utilization load of the AP. + * @cu_score: Channel Utilization score. + * @rssi: Candidate AP rssi + * @rssi_score: AP RSSI score + * @total_score: Total score of the candidate AP. + * @etp: Estimated throughput value of the AP in Mbps + */ +struct host_log_wlan_roam_candidate_info { + uint8_t version; + uint32_t timestamp; + uint8_t type; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; + uint16_t freq; + uint32_t cu_load; + uint32_t cu_score; + uint32_t rssi; + uint32_t rssi_score; + uint32_t total_score; + uint32_t etp; +} qdf_packed; + +/** + * struct host_log_wlan_roam_scan_data - Roam scan event details + * @hdr: Log header + * @version: Version number of the diag log payload + * @vdev_id: Vdev ID + * @type: 0 - Partial roam scan; 1 - Full roam scan + * @num_ap: Number of candidate APs. + * @num_chan: Number of channels. + * @timestamp: Time of day in milliseconds at which scan was triggered + * @trigger_reason: Roam scan trigger reason + * @next_rssi_threshold: Next roam can trigger rssi threshold + * @chan_freq: List of frequencies scanned as part of roam scan + * @ap: List of candidate AP info + */ +struct host_log_wlan_roam_scan_data { + log_hdr_type hdr; + uint8_t version; + uint8_t vdev_id; + uint16_t type; + uint8_t num_ap; + uint8_t num_chan; + uint32_t timestamp; + uint32_t trigger_reason; + uint32_t next_rssi_threshold; + uint16_t chan_freq[WLAN_MAX_ROAM_SCAN_CHAN]; + struct host_log_wlan_roam_candidate_info ap[WLAN_MAX_ROAM_CANDIDATE_AP]; +} qdf_packed; + +/** + * struct host_log_wlan_roam_result_info - Roam result related info. + * @hdr: Log header + * @version: Payload strcuture version + * @vdev_id: Vdev Id + * @status: 0 - Roaming is success ; 1 - Roaming failed + * @timestamp: Host timestamp in millisecs + * @fail_reason: One of WMI_ROAM_FAIL_REASON_ID + */ +struct host_log_wlan_roam_result_info { + log_hdr_type hdr; + uint8_t version; + uint8_t vdev_id; + bool status; + uint32_t timestamp; + uint32_t fail_reason; +} qdf_packed; + +/** + * struct wlan_rrm_beacon_report - RRM beacon report related + * parameters + * @req_bssid: beacon report requestor BSSID + * @req_ssid: Requested SSID for beacon report + * @is_wildcard_bssid: Is the BSSID FF:FF:FF:FF:FF:FF + * @req_reg_class: Regulatory class mentioned in the request + * @req_measurement_mode: Measurement mode. Active/Passive/Beacon report Table + * @req_measurement_duration: Measurement duration requested. + * @num_reports_in_frame: Number of BSS scanned + * @is_last_frame_in_req: True if this frame is the last frame sent for the + * request + */ +struct wlan_rrm_beacon_report { + uint8_t req_bssid[QDF_MAC_ADDR_SIZE]; + uint8_t req_ssid[WLAN_MAX_SSID_SIZE]; + bool is_wildcard_bssid; + uint8_t req_reg_class; + uint16_t req_measurement_mode; + uint16_t req_measurement_duration; + uint8_t num_reports_in_frame; + bool is_last_frame_in_req; +} qdf_packed; + +/** + * struct host_log_wlan_rrm_tx_rx_info - RRM frame related details + * @hdr: Log header + * @version: Version of the payload struture + * @vdev_id: Vdev id + * @orgin: Sent by host or firmware + * @is_tx: Is Tx frame or RX frame + * @roam_result: Roaming result + * @timestamp: Time of the day in milliseconds + * @mgmt_frame_seq_num: Frame sequence number + * @received_chan_freq: Frame received channel frequency + * @action_category: Action frame category + * @rrm_action_code: Radio measurement/Noise measurement + * @radio_measurement_type: Neighbor report/Beacon report + * @bssid: BSSID field in frame + * @req_num_freq: Number of frequencies provided in request + * @req_freq: Frequencies requested + * @fail_reason_code: response TX failure status code + * @rssi: Rx frame rssi + * @bcn_rpt: Beacon report related parameters + */ +struct host_log_wlan_rrm_tx_rx_info { + log_hdr_type hdr; + uint8_t version; + uint8_t vdev_id; + uint8_t origin; + bool is_tx; + bool roam_result; + uint32_t timestamp; + uint16_t mgmt_frame_seq_num; + uint16_t received_chan_freq; + uint8_t action_category; + uint8_t rrm_action_code; + uint8_t radio_measurement_type; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; + uint8_t req_num_freq; + uint16_t req_freq[WLAN_MAX_ROAM_SCAN_CHAN]; + uint8_t fail_reason_code; + uint32_t rssi; + struct wlan_rrm_beacon_report bcn_rpt; +} qdf_packed; + +/** + * struct host_event_proto_pkt_info - DP protocol pkt info + * @hdr: Log header + * @version: version + * @type: data pkt type + * @subtype: data pkt subtype + * @dir: tx or rx + * @sa: source MAC address + * @da: destination MAC address + * @msdu_id: MSDU id + * @status: status + * + * Structure containing the protocol data pkt info + * + * LOG_WLAN_DP_PROTO_PKT_INFO_C 0x1A1E + */ +struct host_event_proto_pkt_info { + log_hdr_type hdr; + uint32_t version; + uint8_t type; + uint8_t subtype; + uint8_t dir; + uint8_t sa[QDF_MAC_ADDR_SIZE]; + uint8_t da[QDF_MAC_ADDR_SIZE]; + uint16_t msdu_id; + uint8_t status; +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __HOST_DIAG_CORE_LOG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_event_defs.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_event_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..76cbcfb04d9bec24b0b768404dc6db348f374563 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_event_defs.h @@ -0,0 +1,936 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EVENT_DEFS_H +#define EVENT_DEFS_H + +typedef enum { + EVENT_DROP_ID = 0, + + /* Events between 0x1 to 0x674 are not used */ + + /* + * + * EVENT_WLAN_SECURITY + * @ eventId: Event id + * @ authMode: Shows the auth mode + * @ encryptionModeUnicast: Encryption Mode Unicast + * @ encryptionModeMulticast: Encryption Mode Multicast + * @ pmkIDMatch: PMK ID Match + * @ bssid: BSSID < 0 to 5 > + * @ keyId: Key ID + * @ status: Shows the status 0 is Success and 1 is failure + * + * This event is used in SECURITY to send various wlan security modes + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 5 - Remove Key Req + * 6 - Remove Key Rsp + * 7 - PMKID Candidate Found + * 8 - PMKID Update + * 9 - Mic Error + * 10 - Set UniCast Key Req + * 11 - Set UniCast Key Rsp + * 12 - Set BCast Key Req + * 13 - Set BCast Key Rsp + * + * Auth Mode: offset: 1 length: 1 + * 0 - Open + * 1 - Shared + * 2 - WPA EAP + * 3 - WPA PSK + * 4 - WPA2 EAP + * 5 - WPA2 PSK + * + * Encryption Mode Unicast: offset: 2 length: 1 + * 0 - Open + * 1 - WEP40 + * 2 - WEP104 + * 3 - TKIP + * 4 - AES + * + * Encryption Mode Multicast: offset: 3 length: 1 + * 0 - Open + * 1 - WEP40 + * 2 - WEP104 + * 3 - TKIP + * 4 - AES + * + * ENC_MODE_SMS4: offset: 4 length:1 + * + * PMK ID Match: offset: 5 length: 1 + * 0 - No Match + * 1 - Match + * + * BSSID[0]: offset: 6 length: 1 + * BSSID[1]: offset: 7 length: 1 + * BSSID[2]: offset: 8 length: 1 + * BSSID[3]: offset: 9 length: 1 + * BSSID[4]: offset: 10 length: 1 + * BSSID[5]: offset: 11 length: 1 + * + * Key ID: offset: 12 length: 1 + * Status: offset: 13 length: 1 + * 0 - Success + * 1 - Failure + * + * Supported Feature: wlan security + * + * + */ + + EVENT_WLAN_SECURITY = 0x675, /* 13 byte payload */ + + /* + * + * EVENT_WLAN_STATUS + * @ eventId: Event id + * @ ssid: SSID + * @ bssType: BSS Type + * @ rssi: RSSI + * @ channel: Channel + * @ qosCapability: QoS Capability + * @ authmode: Auth Mode + * @ encryptionType: Encryption Type + * @ reason: Reason + * @ reasonDisconnect: Reason Disconnect + * + * This event is used to send wlan status + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 0 - Connect + * 1 - Disconnect + * + * SSID: offset: 1 length: 32 + * BSS Type: offset: 33 + SSID length, length: 1 + * 0 - None + * 1 - BSS_TYPE_INFRASTRUCTURE + * 2 - BSS_TYPE_INFRA_AP + * 3 - BSS_TYPE_IBSS + * 4 - BSS_TYPE_START_IBSS + * 5 - BSS_TYPE_NDI + * 6 - BSS_TYPE_ANY + * + * RSSI: offset: 34 length: 1 + * Channel: offset: 35 length: 1 + * QoS Capability: offset: 36 length: 1 + * Auth Mode: offset: 37 length: 1 + * 0 - Open + * 1 - Shared + * 2 - WPA EAP + * 3 - WPA PSK + * 4 - WPA2 EAP + * 5 - WPA2 PSK + * 6 - WAPI CERT + * 7 - WAPI PSK + * + * Encryption Type: offset: 38 length: 1 + * 0 - Open + * 1 - WEP40 + * 2 - WEP104 + * 3 - TKIP + * 4 - AES + * + * ENC_MODE_SMS4: offset: 39 length: 1 + * + * Reason: offset: 40 length: 1 + * 0 - Unspecified + * 1 - User Requested + * 2 - Mic Error + * 3 - Diassoc + * 4 - Deauth + * 5 - Handoff + * + * Reason Disconnect: offset: 41 length: 1 + * + * Supported Feature: wlan status + * + * + */ + + EVENT_WLAN_STATUS, /* 15 byte payload */ + + /* Events 0x677 and 0x678 are not used */ + + /* + * + * EVENT_WLAN_QOS + * @ eventId: event id + * @ reasonCode: Reason for event + * + * This event is used to send quality set services + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 0 - Add TS Req + * 1 - Add TS Rsp + * 2 - Delts + * + * Reason Code: offset: 1 length: 1 + * 0 - Admission Accepted + * 1 - Invalid Params + * 2 - Reserved + * 3 - Refused + * 4 - User Requested + * 5 - Ind From AP + * + * Supported Feature: Qos wlan + * + * + */ + + EVENT_WLAN_QOS = 0x679, /* 2 byte payload */ + + /* + * + * EVENT_WLAN_PE + * @bssid: BSSID + * @ event_type: Event type + * @ sme_state: SME state + * @ mlm_state: MLM state + * @ status: 0 - Success, 1 - Failure < majority 0 is success > + * @reason_code: reason for event report + * + * This event is used in PE to send different diag events. + * Values for parameters are defined below: + * + * bssid[0]: offset: 0 length: 1 + * bssid[1]: offset: 1 length: 1 + * bssid[2]: offset: 2 length: 1 + * bssid[3]: offset: 3 length: 1 + * bssid[4]: offset: 4 length: 1 + * bssid[5]: offset: 5 length: 1 + * + * Event Type: offset: 6 length: 2 + * 0 - SCAN REQ EVENT + * 1 - SCAN ABORT IND EVENT + * 2 - SCAN_RSP_EVENT + * 3 - JOIN_REQ_EVENT + * 4 - JOIN_RSP_EVENT + * 5 - SETCONTEXT_REQ_EVENT + * 6 - SETCONTEXT_RSP_EVENT + * 7 - REASSOC_REQ_EVENT + * 8 - REASSOC_RSP_EVENT + * 9 - AUTH_REQ_EVENT + * 10 - AUTH_RSP_EVENT + * 11 - DISASSOC_REQ_EVENT + * 12 - DISASSOC_RSP_EVENT + * 13 - DISASSOC_IND_EVENT + * 14 - DISASSOC_CNF_EVENT + * 15 - DEAUTH_REQ_EVENT + * 16 - DEAUTH_RSP_EVENT + * 17 - DEAUTH_IND_EVENT + * 18 - START_BSS_REQ_EVENT + * 19 - START_BSS_RSP_EVENT + * 20 - AUTH_IND_EVENT + * 21 - ASSOC_IND_EVENT + * 22 - ASSOC_CNF_EVENT + * 23 - REASSOC_IND_EVENT + * 24 - SWITCH_CHL_IND_EVENT + * 25 - SWITCH_CHL_RSP_EVENT + * 26 - STOP_BSS_REQ_EVENT + * 27 - STOP_BSS_RSP_EVENT + * 28 - DEAUTH_CNF_EVENT + * 29 - ADDTS_REQ_EVENT + * 30 - ADDTS_RSP_EVENT + * 31 - DELTS_REQ_EVENT + * 32 - DELTS_RSP_EVENT + * 33 - DELTS_IND_EVENT + * 34 - ENTER_BMPS_REQ_EVENT + * 35 - ENTER_BMPS_RSP_EVENT + * 36 - EXIT_BMPS_REQ_EVENT + * 37 - BMPS_RSP_EVENT + * 38 - EXIT_BMPS_IND_EVENT + * 39 - ENTER_IMPS_REQ_EVENT + * 40 - ENTER_IMPS_RSP_EVENT + * 41 - EXIT_IMPS_REQ_EVENT + * 42 - EXIT_IMPS_RSP_EVENT + * 43 - ENTER_UAPSD_REQ_EVENT + * 44 - ENTER_UAPSD_RSP_EVENT + * 45 - EXIT_UAPSD_REQ_EVENT + * 46 - EXIT_UAPSD_RSP_EVENT + * 47 - WOWL_ADD_BCAST_PTRN_EVENT + * 48 - WOWL_DEL_BCAST_PTRN_EVENT + * 49 - ENTER_WOWL_REQ_EVENT + * 50 - ENTER_WOWL_RSP_EVENT + * 51 - EXIT_WOWL_REQ_EVENT + * 52 - EXIT_WOWL_RSP_EVENT + * 53 - HAL_ADDBA_REQ_EVENT + * 54 - HAL_ADDBA_RSP_EVENT + * 55 - HAL_DELBA_IND_EVENT + * 56 - HB_FAILURE_TIMEOUT + * 57 - PRE_AUTH_REQ_EVENT + * 58 - PRE_AUTH_RSP_EVENT + * 59 - PREAUTH_DONE + * 60 - REASSOCIATING + * 61 - CONNECTED + * 62 - ASSOC_REQ_EVENT + * 63 - AUTH_COMP_EVENT + * 64 - ASSOC_COMP_EVENT + * 65 - AUTH_START_EVENT + * 66 - ASSOC_START_EVENT + * 67 - REASSOC_START_EVENT + * 68 - ROAM_AUTH_START_EVENT + * 69 - ROAM_AUTH_COMP_EVENT + * 70 - ROAM_ASSOC_START_EVENT + * 71 - ROAM_ASSOC_COMP_EVENT + * 72 - SCAN_COMPLETE_EVENT + * 73 - SCAN_RESULT_FOUND_EVENT + * 74 - ASSOC_TIMEOUT + * 75 - AUTH_TIMEOUT + * 76 - DEAUTH_FRAME_EVENT + * 77 - DISASSOC_FRAME_EVENT + * + * SME State: offset: 8 length: 2 + * 0 - OFFLINE + * 1 - IDLE + * 2 - SUSPEND + * 3 - WT SCAN + * 4 - WT JOIN + * 5 - WT AUTH + * 6 - WT ASSOC + * 7 - WT REASSOC + * 8 - WT REASSOC LINK FAIL + * 9 - JOIN FAILURE + * 10 - ASSOCIATED + * 11 - REASSOCIATED + * 12 - LINK EST + * 13 - LINK EST WT SCAN + * 14 - WT PRE AUTH + * 15 - WT DISASSOC + * 16 - WT DEAUTH + * 17 - WT START BSS + * 18 - WT STOP BSS + * 19 - NORMAL + * 20 - CHANNEL SCAN + * 21 - NORMAL CHANNEL SCAN + * + * MLM State: offset: 10 legth: 2 + * 0 - MLM OFFLINE + * 1 - MLM IDLE + * 2 - MLM WT PROBE RESP + * 3 - MLM PASSIVE SCAN + * 4 - MLM WT JOIN BEACON + * 5 - MLM JOINED + * 6 - MLM BSS STARTED + * 7 - MLM WT AUTH FRAME + * 8 - MLM WT AUTH FRAME + * 9 - MLM WT AUTH FRAME + * 10 - MLM AUTH RSP TIMEOUT + * 11 - MLM AUTHENTICATED + * 12 - MLM WT ASSOC RSP + * 13 - MLM WT REASSOC RSP + * 14 - MLM ASSOCIATED + * 15 - MLM REASSOCIATED + * 16 - MLM LINK ESTABLISHED + * 17 - MLM WT ASSOC CNF + * 18 - MLM LEARN + * 19 - MLM WT ADD BSS RSP + * 20 - MLM WT DEL BSS RSP + * 21 - MLM WT ADD BSS RSP ASSOC + * 22 - MLM WT ADD BSS RSP REASSOC + * 23 - MLM WT ADD BSS RSP PREASSOC + * 24 - MLM WT ADD STA RSP + * 25 - MLM WT DEL STA RSP + * 26 - MLM WT ASSOC DEL STA RSP + * 27 - MLM WT SET BSS KEY + * 28 - MLM WT SET STA KEY + * 29 - MLM WT SET STA BCASTKEY + * 30 - MLM WT ADDBA RSP + * 31 - MLM WT REMOVE BSS KEY + * 32 - MLM WT REMOVE STA KEY + * 33 - MLM WT SET MIMOPS + * + * Status: offset: 12 length: 2 + * Reason Code: offset: 14 length: 2 + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_PE, /* 16 byte payload */ + + /* Events between 0x67b to 0x67f are not used */ + + /* + * + * EVENT_WLAN_BRINGUP_STATUS + * @ wlanStatus: Describe wlan status + * @ driverVersion: Driver version between 0 to 9 + * + * This event is used in BRINGUP to send wlan status + * Values for parameters are defined below: + * WLAN Status: offset: 0 length: 2 + * 0 - WLAN Disabled + * 1 - WLAN Enabled + * 2 - Reset Fail + * 3 - Reset Success + * 4 - Device Removed + * 5 - Devide Inserted + * 6 - Driver Unloaded + * 7 - Driver Loaded + * + * driverVersion: offset: 2 length: 10 + * + * Supported Feature: Bringup + * + * + */ + + EVENT_WLAN_BRINGUP_STATUS = 0x680, /* 12 byte payload */ + + /* + * + * EVENT_WLAN_POWERSAVE_GENERIC + * @ event_subtype: Event subtype + * @ full_power_request_reason: Full power request reason + * @ pmc_current_state: Pmc current state + * @ enable_disable_powersave_mode: Enable disable powersave mode + * @ winmob_d_power_state: winmob d power state + * @ dtim_period: DTIM period + * @ final_listen_intv: Final listen int + * @ bmps_auto_timer_duration: BMPS auto timer duration + * @ bmps_period: BMPS period + * + * This event is used in POWERSAVE to send wlan status + * Values for parameters are defined below: + * Event Sub Type: offset: 0 length: 1 + * Full Power Req Reason: offset: 1 length: 1 + * PMC Current State: offset: 2 length: 1 + * Enable disable powersave mode: 3 length: 1 + * Winmob D Power State: offset: 4 length: 1 + * DTIM Period: offset:5 length: 1 + * Final Listen INTV: offset:6 length: 2 + * BMPS Auto Timer Duration: 8 length: 2 + * BMPS Period: offset: 10 length:2 + * + * Supported Feature: POWERSAVE GENERIC + * + * + */ + + EVENT_WLAN_POWERSAVE_GENERIC, /* 16 byte payload */ + + /* + * + * EVENT_WLAN_POWERSAVE_WOW + * @ event_subtype: Event subtype + * @ wow_type: Wow type + * @ wow_magic_pattern: It will use pattern from 0 to 5 + * @ wow_del_ptrn_id: Wow delete pattern id + * @ wow_wakeup_cause: Wow wakeup cause + * @ wow_wakeup_cause_pbm_ptrn_id: Wow wakeup cause pbm pattern id + * + * This event is used in POWERSAVE WOW to send the wow wakeup pattern, + * cause etc + * Values for parameters are defined below: + * Event Sub Type: offset: 0 length: 1 + * 0 - Enter + * 1 - Exit + * 2 - Del Pattern + * 3 - Wakup + * + * WOW Type: offset: 1 length: 1 + * 0 - None + * 1 - Magic Pkt Only + * 2 - Byte Match Only + * 3 - Magic Pkt Byte Match + * + * WOW Magic Pattern: offset:2 length: 6 + * WOW Del Pattern ID: offset:8 length: 1 + * WOW Wakeup Cause: offset: 9 length: 1 + * 0 - Magic Pkt Match + * 1 - Ptrn Byte Match + * WOW Wakeup Cause PBM Ptrn ID: offset: 10 length: 1 + * + * Supported Feature: Powersave wow + * + * + */ + + EVENT_WLAN_POWERSAVE_WOW, /* 11 byte payload */ + + /* Events between 0x683 to 0x690 are not used */ + + /* + * + * EVENT_WLAN_BTC + * @ eventId: Event id + * @ btAddr: BT address + * @ connHandle: Connection handle + * @ connStatus: Connection status + * @ linkType: Link Type + * @ scoInterval: Synchronous Connection Oriented interval + * @ scoWindow: Synchronous Connection Oriented window + * @ retransWindow: Retransmisson window + * @ mode: Mode + * + * This event is used in Bluetooth to send the btc status + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 0 - DEVICE SWITCHED ON + * 1 - DEVICE SWITCHED OFF + * 2 - INQUIRY STARTED + * 3 - INQUIRY STOPPED + * 4 - PAGE STARTED + * 5 - PAGE STOPPED + * 6 - CREATE ACL CONNECTION + * 7 - ACL CONNECTION COMPLETE + * 8 - CREATE SYNC CONNECTION + * 9 - SYNC CONNECTION COMPLETE + * 10 - SYNC CONNECTION UPDATED + * 11 - DISCONNECTION COMPLETE + * 12 - MODE CHANGED + * 13 - A2DP STREAM START + * 14 - A2DP STREAM STOP + * + * BT Addr[0]: offset: 1 length: 1 + * BT Addr[1]: offset: 2 length: 1 + * BT Addr[2]: offset: 3 length: 1 + * BT Addr[3]: offset: 4 length: 1 + * BT Addr[4]: offset: 5 length: 1 + * BT Addr[5]: offset: 6 length: 1 + * + * Conn Handle: offset: 7 length: 2 + * 65535 - Invalid + * + * Conn Status: offset:9 length: 1 + * 0 - Fail + * 1 - success + * + * Link Type: offset: 10 length: 1 + * 0 - SCO + * 1 - ACL + * 2 - ESCO + * + * Sco Interval: offset: 11 length: 1 + * Sco Window: offset: 12 length: 1 + * Retrans Window: offset: 13 length: 1 + * + * Mode: offset: 14 length: 1 + * 0 - Active + * 1 - Hold + * 2 - Sniff + * 3 - Park + * + * Supported Feature: Bluetooth + * + * + */ + + EVENT_WLAN_BTC = 0x691, /* 15 byte payload */ + + /* + * + * EVENT_WLAN_EAPOL + * @ event_sub_type: 0-Transmitted, 1-Received + * @ eapol_packet_type: 0 - EAP Start, 1 - EAPOL Start, 2 - EAPOL + * Logoff, 3 - EAPOL Key, 4 - EAPOL Encapsulated Alert + * @ eapol_key_info: This field from the driver is in big endian format + * @ eapol_rate: Rate at which the frame is received + * @ dest_addr: Destination address + * * @ src_addr: Source address + * + * This event is used to send Extensible Authentication Protocol + * information + * Values for parameters are defined below: + * event_sub_type: offset: 0 length: 1 + * eapol_packet_type: offset: 1 length: 1 + * eapol_key_info: offset:2 length: 2 + * eapol_rate: offset: 4 length: 2 + * dest_addr[0]: offset: 6 length: 1 + * dest_addr[1]: offset: 7 length: 1 + * dest_addr[2]: offset: 8 length: 1 + * dest_addr[3]: offset: 9 length: 1 + * dest_addr[4]: offset: 10 length: 1 + * dest_addr[5]: offset: 11 length: 1 + * src_addr[0]: offset: 12 length: 1 + * src_addr[1]: offset: 13 length: 1 + * src_addr[2]: offset: 14 length: 1 + * src_addr[3]: offset: 15 length: 1 + * src_addr[4]: offset: 16 length: 1 + * src_addr[5]: offset: 17 length: 1 + * + * Supported Feature: Extensible Authentication Protocol + * + * + */ + + EVENT_WLAN_EAPOL = 0xA8D,/* 18 bytes payload */ + + /* + * + * EVENT_WLAN_WAKE_LOCK + * @ status: Whether the wakelock is taken/released + * @ reason: Reason for taking this wakelock + * @ timeout: Timeout value in case of timed wakelocks + * @ name_len: Length of the name of the wakelock that will follow + * @ name: Name of the wakelock + * + * This event is used to send wakelock information + * Values for parameters are defined below: + * status: offset: 0 length: 4 + * reason: offset: 4 length: 4 + * timeout: offset: 8 length: 4 + * name_len: offset: 12 length: 4 + * + * Supported Feature: wlan wakelock + * + * + */ + + EVENT_WLAN_WAKE_LOCK = 0xAA2, /* 96 bytes payload */ + EVENT_WLAN_BEACON_RECEIVED = 0xAA6, /* FW event: 2726 */ + + /* + * + * EVENT_WLAN_LOG_COMPLETE + * @ is_fatal: Indicates if the event is fatal or not + * @ indicator: Source of the bug report - Framework/Host/Firmware + * @ reason_code: Reason for triggering bug report + * @ reserved: Reserved field + * + * This event is used to send log completion related information + * Values for parameters are defined below: + * is_fatal: offset: 0 length: 4 + * indicator: offset: 4 length: 4 + * reason_code: offset: 8 length: 4 + * reserved: offset: 12 length: 4 + * + * Supported Feature: Logging + * + * + */ + + EVENT_WLAN_LOG_COMPLETE = 0xAA7, /* 16 bytes payload */ + + /* + * + * EVENT_WLAN_STATUS_V2 + * @ event_id: Event id + * @ ssid: Network SSID + * @ bssType: BSS Type + * @ rssi: RSSI + * @ channel: Channel Numbers + * @ qosCapability: quality of service capability + * @ authType: Authentication type + * @ encryptionType: Encryption type + * @ reason: Reason for triggering status + * @ reasonDisconnect:Reason for disconnection + * + * This event is used to send varius wlan status + * Values for parameters are defined below: + * eventId: offset: 0 length: 1 + * ssid[0] - ssid[31]: offset: 1 to 32, length: 1 + * bssType: offset: 33 length: 1 + * rssi: offset: 34 length: 1 + * channel: offset: 35 length: 1 + * qosCapability: offset: 36 length: 1 + * authType: offset: 37 length: 1 + * encryptionType: offset: 38 length: 1 + * reason: offset: 39 length: 1 + * reasonDisconnect: offset: 40 length: 1 + * + * Supported Feature: Wlan status + * + * + */ + + EVENT_WLAN_STATUS_V2 = 0xAB3, + + /* + * + * EVENT_WLAN_TDLS_TEARDOWN + * @ reason: reason for tear down. + * @peer_mac: Peer mac address + * + * + * This event is sent when TDLS tear down happens. + * + * Supported Feature: TDLS + * + * + */ + EVENT_WLAN_TDLS_TEARDOWN = 0xAB5, + + /* + * + * EVENT_WLAN_TDLS_ENABLE_LINK + * @peer_mac: peer mac + * @is_off_chan_supported: If peer supports off channel + * @is_off_chan_configured: If off channel is configured + * @is_off_chan_established: If off channel is established + * + * + * This event is sent when TDLS enable link happens. + * + * Supported Feature: TDLS + * + * + */ + EVENT_WLAN_TDLS_ENABLE_LINK = 0XAB6, + + /* + * + * EVENT_WLAN_SUSPEND_RESUME + * @ state: suspend/resume state + * + * This event is used to send suspend resume info + * Values for parameters are defined below: + * suspend: offset: 0 length: 1 + * 0 - HDD_WLAN_EARLY_SUSPEND + * 1 - HDD_WLAN_SUSPEND + * 2 - HDD_WLAN_EARLY_RESUME + * 3 - HDD_WLAN_RESUME + * + * Supported Feature: suspend/resume + * + * + */ + + EVENT_WLAN_SUSPEND_RESUME = 0xAB7, + + /* + * + * EVENT_WLAN_OFFLOAD_REQ + * @ offload_type: offload type + * @ state: enabled or disabled state + * + * This event is used to send offload info + * Values for parameters are defined below: + * offloadType: offset: 0 length: 1 + * 0 - SIR_IPV4_ARP_REPLY_OFFLOAD + * 1 - SIR_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD + * 2 - SIR_IPV6_NS_OFFLOAD + * + * enableOrDisable: offset: 1 length: 1 + * 0 - SIR_OFFLOAD_DISABLE + * 1 - SIR_OFFLOAD_ENABLE + * + * Supported Feature: offload + * + * + */ + + EVENT_WLAN_OFFLOAD_REQ = 0xAB8, + + /* + * + * EVENT_TDLS_SCAN_BLOCK + * @status: rejected status + * + * + * This event is sent when scan is rejected due to TDLS. + * + * Supported Feature: TDLS + * + * + */ + EVENT_TDLS_SCAN_BLOCK = 0xAB9, + + /* + * + * EVENT_WLAN_TDLS_TX_RX_MGMT + * @event_id: event id + * @tx_rx: tx or rx + * @type: type of frame + * @action_sub_type: action frame type + * @peer_mac: peer mac + * + * + * This event is sent when TDLS mgmt rx tx happens. + * + * Supported Feature: TDLS + * + * + */ + EVENT_WLAN_TDLS_TX_RX_MGMT = 0xABA, + + /* + * + * EVENT_WLAN_LOW_RESOURCE_FAILURE + * @ WIFI_EVENT_MEMORY_FAILURE: Memory failure + * + * This event is used to send reason why low resource situation + * is observed + * + * Supported Feature: Memory + * + * + */ + + EVENT_WLAN_LOW_RESOURCE_FAILURE = 0xABB, + + /* + * + * EVENT_WLAN_POWERSAVE_WOW_STATS + * @ wow_ucast_wake_up_count: send unicast packet count + * @ wow_bcast_wake_up_count: send broadcast packet count + * @ wow_ipv4_mcast_wake_up_coun: send ipv4 multicast packet count + * @ wow_ipv6_mcast_wake_up_count: send ipv6 multicast packet count + * @ wow_ipv6_mcast_ra_stats: send ipv6 multicast ra packet count + * @ wow_ipv6_mcast_ns_stats: send ipv6 multicast ns packet count + * @ wow_ipv6_mcast_na_stats: send ipv6 multicast na packet count + * @ wow_pno_match_wake_up_count: preferred network offload match count + * @ wow_pno_complete_wake_up_count: preferred network offload complete + * @ wow_gscan_wake_up_count:Reason: send external scan packet count + * @ wow_low_rssi_wake_up_count: send low rssi packet count + * @ wow_rssi_breach_wake_up_count: send rssi breach packet count + * @ wow_icmpv4_count: Send icmpv4 packet count + * @ wow_icmpv6_count: send icmpv6 packet count + * @ wow_oem_response_wake_up_count: Send oem response packet count + * + * This event is used to send wow wakeup stats information + * + * Supported Feature: Wlan powersave wow + * + * + */ + EVENT_WLAN_POWERSAVE_WOW_STATS = 0xB33, + + /* + * + * EVENT_WLAN_STA_KICKOUT + * @reasoncode: Indicates the reasoncode of event + * @peer_macaddr: Indicates the peer macaddr + * @vdev_id: Indicate unique id for identifying the VDEV + * + * This event is used to send sta kickout information + * Values for parameters are defined below: + * Reasoncode: offset: 0 length: 4 + * Peer macaddr: offset: 4 length: 6 + * VDEV ID: offset: 10 length 1 + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_STA_KICKOUT = 0xB39, + + /* + * + * EVENT_WLAN_STA_DATASTALL + * @reason: Indicates the reason of event + * + * This event is used to send sta datastall information + * Values for parameters are defined below: + * Reason: offset:0 length: 4 + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_STA_DATASTALL = 0xB3A, + + /* + * + * EVENT_WLAN_SOFTAP_DATASTALL + * @reason: Indicates the reason of event + * + * This event is used to send SAP datastall information + * Values for parameters are defined below: + * Reason: offset:0 length: 4 + * + * Supported Feature: SAP + * + * + */ + + EVENT_WLAN_SOFTAP_DATASTALL = 0xB3B, + + /* + * + * EVENT_WLAN_SSR_REINIT_SUBSYSTEM + * @status: Indicates the status of event + * + * This event is used to send ssr reinit status + * Values for parameters are defined below: + * Status: offset: 0 length: 4 + * + * Supported Feature: SSR + * + * + */ + + EVENT_WLAN_SSR_REINIT_SUBSYSTEM = 0xB3C, + + /* + * + * EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM + * @status: Indicates the status of event + * + * This event is used to send ssr shutdown status + * Values for parameters are defined below: + * Status: offset: 0 length: 4 + * + * Supported Feature: SSR + * + * + */ + + EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM = 0xB3D, + EVENT_WLAN_ACS_REQ = 0xC4A, + EVENT_WLAN_ACS_SCAN_START = 0xC4B, + EVENT_WLAN_ACS_SCAN_DONE = 0xC4C, + EVENT_WLAN_ACS_CHANNEL_SPECTRAL_WEIGHT = 0xC4D, + EVENT_WLAN_ACS_BEST_CHANNEL = 0xC4E, + EVENT_WLAN_HOST_MGMT_TX_V2 = 0xC52, + EVENT_WLAN_HOST_MGMT_RX_V2 = 0xC53, + EVENT_WLAN_CONN_STATS_V2 = 0xC56, + + /* + * + * EVENT_WLAN_RSN_INFO + * @akm_suite: Gives information about akm suites used in assoc request + * @ucast_cipher: Unicast cipher used in assoc request + * @mcast_cipher: Multi cast cipher used in assoc request + * @group_mgmt: Requested group mgmt cipher suite + * + * This event is used to send RSN information used + * in assoc request. + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_RSN_INFO = 0xC5B, + + /* + * + * EVENT_WLAN_AUTH_INFO + * @auth_algo_num: Gives information about algo num used in auth request + * @auth_transaction_seq_num: seq num of auth request + * @auth_status_code: status code of auth request + * + * This event is used to send algo num, seq num and status code + * for auth request + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_AUTH_INFO = 0xC92, + + EVENT_MAX_ID = 0x0FFF +} event_id_enum_type; + +#endif /* EVENT_DEFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/log_codes.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/log_codes.h new file mode 100644 index 0000000000000000000000000000000000000000..ddc8301f8976b9d66feda905fed89564a098c92e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/log_codes.h @@ -0,0 +1,2076 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef LOG_CODES_H +#define LOG_CODES_H + +/*=========================================================================== + + Log Code Definitions + + General Description + This file contains log code definitions and is shared with the tools. + + ===========================================================================*/ + +/* DO NOT MODIFY THIS FILE WITHOUT PRIOR APPROVAL +** +** Log codes, by design, are a tightly controlled set of values. +** Developers may not create log codes at will. +** +** Request new logs using the following process: +** +** 1. Send email to asw.diag.request requesting log codassignments. +** 2. Identify the log needed by name. +** 3. Provide a brief description for the log. +** +*/ + +/*=========================================================================== + + Edit History + + $Header: //source/qcom/qct/core/services/diag/api/inc/main/latest/log_codes.h#9 $ + + when who what, where, why + -------- --- ---------------------------------------------------------- + 07/30/09 dhao Consolidate log_codes_apps.h + 07/30/09 dhao Add Last log code definition for Equip ID 11 + 06/26/09 dhao Update format the macro + 06/24/09 sar Reverted last change. + 06/24/09 sar Added log code for LOG_MC_STM_C. + 11/02/01 sfh Featurize common NAS log codes for UMTS. + 10/30/01 sfh Added log code for LOG_GPS_FATPATH_INFO_C. + 10/24/01 sfh Added updates for UMTS equipment ID and log codes. + 06/27/01 lad Added multiple equipment ID support. + 05/22/01 sfh Reserved log codes 158 - 168. + 05/21/01 sfh Keep confusing XXX_BASE_C names for backwards compatibility. + 05/16/01 sfh Reserved log code 155. + 05/08/01 sfh Reserved log codes 150 - 154. + 04/06/01 lad Added definitions of base IDs (such as LOG_WCDMA_BASE_C). + This is currently using temporary ID values in the 0x1000 + range. + 02/23/01 lad Created file from DMSS log.h. Log codes only + + ===========================================================================*/ +#include + +/* ------------------------------------------------------------------------- + * Data Declarations + * ------------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------------- + * Log equipment IDs. + * The most significant 4 bits of the 16 bit log code is the equipment ID. + * Orignally, the mobile was to have an ID, and base stations and other + * IDs. As QCT technology diversifies, new equipment IDs are assigned to new + * technology areas. 0x2000 and 0x3000 are reserved for legacy reasons, so + * the first + * addition starts at 0x4000. + * ------------------------------------------------------------------------- */ + +#define LOG_1X_BASE_C ((uint16_t) 0x1000) +#define LOG_WCDMA_BASE_C ((uint16_t) 0x4000) +#define LOG_GSM_BASE_C ((uint16_t) 0x5000) +#define LOG_LBS_BASE_C ((uint16_t) 0x6000) +#define LOG_UMTS_BASE_C ((uint16_t) 0x7000) +#define LOG_TDMA_BASE_C ((uint16_t) 0x8000) +#define LOG_DTV_BASE_C ((uint16_t) 0xA000) +#define LOG_APPS_BASE_C ((uint16_t) 0xB000) +#define LOG_LTE_BASE_C ((uint16_t) (0xB000 + 0x0010)) +#define LOG_LTE_LAST_C ((uint16_t) 0xB1FF) +#define LOG_WIMAX_BASE_C ((uint16_t) 0xB400) +#define LOG_DSP_BASE_C ((uint16_t) 0xC000) + +#define LOG_TOOLS_BASE_C ((uint16_t) 0xF000) + +/* LOG_BASE_C is what was used before expanding the use of the equipment ID. + * TODO: Once all targets are using the "core" diag system, this should be + * omitted. */ +#define LOG_BASE_C LOG_1X_BASE_C + +/* ------------------------------------------------------------------------- + * Log Codes + * These codes identify the kind of information contained in a log entry. + * They are used in conjunction with the 'code' field of the log entry + * header. The data types associated with each code are defined below. + * ------------------------------------------------------------------------- */ + +/* The upper 4 bits of the 16 bit log entry code specify which type + * of equipment created the log entry. */ + +/* 0 Mobile Station temporal analyzer entry */ +#define LOG_TA_C (0x0 + LOG_1X_BASE_C) + +/* 1 AGC values and closed loop power control entry */ +#define LOG_AGC_PCTL_C (0x1 + LOG_1X_BASE_C) + +/* 2 Forward link frame rates and types entry */ +#define LOG_F_MUX1_C (0x2 + LOG_1X_BASE_C) + +/* 3 Reverse link frame rates and types entry */ +#define LOG_R_MUX1_C (0x3 + LOG_1X_BASE_C) + +/* 4 Access channel message entry */ +#define LOG_AC_MSG_C (0x4 + LOG_1X_BASE_C) + +/* 5 Reverse link traffic channel message entry */ +#define LOG_R_TC_MSG_C (0x5 + LOG_1X_BASE_C) + +/* 6 Sync channel message entry */ +#define LOG_SC_MSG_C (0x6 + LOG_1X_BASE_C) + +/* 7 Paging channel message entry */ +#define LOG_PC_MSG_C (0x7 + LOG_1X_BASE_C) + +/* 8 Forward link traffic channel message entry */ +#define LOG_F_TC_MSG_C (0x8 + LOG_1X_BASE_C) + +/* 9 Forward link vocoder packet entry */ +#define LOG_VOC_FOR_C (0x9 + LOG_1X_BASE_C) + +/* 10 Reverse link vocoder packet entry */ +#define LOG_VOC_REV_C (0xA + LOG_1X_BASE_C) + +/* 11 Temporal analyzer finger info only */ +#define LOG_FING_C (0xB + LOG_1X_BASE_C) + +/* 12 Searcher pathlog info (Reused old SRCH logtype) */ +#define LOG_SRCH_C (0xC + LOG_1X_BASE_C) + +/* 13 Position and speed information read from ETAK */ +#define LOG_ETAK_C (0xD + LOG_1X_BASE_C) + +/* 14 Markov frame statistics */ +#define LOG_MAR_C (0xE + LOG_1X_BASE_C) + +/* 15 New and improved temporal analyzer searcher info */ +#define LOG_SRCH2_C (0xF + LOG_1X_BASE_C) + +/* 16 The Fujitsu handset information */ +#define LOG_HANDSET_C (0x10 + LOG_1X_BASE_C) + +/* 17 Vocoder bit error rate mask */ +#define LOG_ERRMASK_C (0x11 + LOG_1X_BASE_C) + +/* 18 Analog voice channel information */ +#define LOG_ANALOG_INFO_C (0x12 + LOG_1X_BASE_C) + +/* 19 Access probe information */ +#define LOG_ACC_INFO_C (0x13 + LOG_1X_BASE_C) + +/* 20 Position & speed info read from GPS receiver */ +#define LOG_GPS_C (0x14 + LOG_1X_BASE_C) + +/* 21 Test Command information */ +#define LOG_TEST_CMD_C (0x15 + LOG_1X_BASE_C) + +/* 22 Sparse (20ms) AGC / closed loop power control entry */ +#define LOG_S_AGC_PCTL_C (0x16 + LOG_1X_BASE_C) + +/* 23 Notification of a band class change */ +#define LOG_BAND_CHANGE_C (0x17 + LOG_1X_BASE_C) + +/* 24 DM debug messages, if being logged via log services */ +#define LOG_DBG_MSG_C (0x18 + LOG_1X_BASE_C) + +/* 25 General temporal analyzer entry */ +#define LOG_GENRL_TA_C (0x19 + LOG_1X_BASE_C) + +/* 26 General temporal analyzer w/supplemental channels */ +#define LOG_GENRL_TA_SUP_CH_C (0x1A + LOG_1X_BASE_C) + +/* Featurization Removal requested by CMI + #ifdef FEATURE_PLT + */ + +/* 27 Decoder raw bits logging */ +#define LOG_PLT_C (0x1B + LOG_1X_BASE_C) + +/* Featurization Removal requested by CMI + #else + 27 EFS Usage Info - No implementation as yet + #define LOG_EFS_INFO_C (0x1B + LOG_1X_BASE_C) + #endif + */ + +/* 28 Analog Forward Channel */ +#define LOG_ANALOG_FORW_C (0x1C + LOG_1X_BASE_C) + +/* 29 Analog Reverse Channel */ +#define LOG_ANALOG_REVS_C (0x1D + LOG_1X_BASE_C) + +/* 30 Analog Handoff Entry */ +#define LOG_ANALOG_HANDOFF_C (0x1E + LOG_1X_BASE_C) + +/* 31 FM Slot Statistis entry */ +#define LOG_ANALOG_FMSLOT_C (0x1F + LOG_1X_BASE_C) + +/* 32 FOCC Word Sync Count entry */ +#define LOG_ANALOG_WS_COUNT_C (0x20 + LOG_1X_BASE_C) + +/* 33 */ +#define LOG_RLP_PACKET_C (0x21 + LOG_1X_BASE_C) + +/* 34 */ +#define LOG_ASYNC_TCP_SEG_C (0x22 + LOG_1X_BASE_C) + +/* 35 */ +#define LOG_PACKET_DATA_IP_PACKETS_C (0x23 + LOG_1X_BASE_C) + +/* 36 */ +#define LOG_FNBDT_MESSAGE_LOG_C (0x24 + LOG_1X_BASE_C) + +/* Begin IS-2000 LOG features */ + +/* 37 RLP RX Frames logging */ +#define LOG_RLP_RX_FRAMES_C (0x25 + LOG_1X_BASE_C) + +/* 38 RLP TX Frames logging */ +#define LOG_RLP_TX_FRAMES_C (0x26 + LOG_1X_BASE_C) + +/* 39 Reserved for additions to RLP frames */ +#define LOG_RLP_RSVD1_C (0x27 + LOG_1X_BASE_C) + +/* 40 Reserved for additions to RLP frames */ +#define LOG_RLP_RSVD2_C (0x28 + LOG_1X_BASE_C) + +/* 41 Forward Link Frame Types logging */ +#define LOG_FWD_FRAME_TYPES_C (0x29 + LOG_1X_BASE_C) + +/* 42 Reverse Link Frame Types logging */ +#define LOG_REV_FRAME_TYPES_C (0x2A + LOG_1X_BASE_C) + +/* 43 Fast Forward Power Control Parameters logging */ +#define LOG_FFWD_PCTRL_C (0x2B + LOG_1X_BASE_C) + +/* 44 Reverse Power Control Parameters logging */ +#define LOG_REV_PCTRL_C (0x2C + LOG_1X_BASE_C) + +/* 45 Searcher and Finger Information logging */ +#define LOG_SRCH_FING_INFO_C (0x2D + LOG_1X_BASE_C) + +/* 46 Service Configuration logging */ +#define LOG_SVC_CONFIG_C (0x2E + LOG_1X_BASE_C) + +/* 47 Active Set Configuration logging */ +#define LOG_ASET_CONFIG_C (0x2F + LOG_1X_BASE_C) + +/* 48 Quick Paging Channel logging */ +#define LOG_QPCH_C (0x30 + LOG_1X_BASE_C) + +/* 49 RLP Statistics logging */ +#define LOG_RLP_STAT_C (0x31 + LOG_1X_BASE_C) + +/* 50 Simple Test Data Service Option logging */ +#define LOG_STDSO_C (0x32 + LOG_1X_BASE_C) + +/* 51 Pilot Phase Measurement results logging */ +#define LOG_SRCH_PPM_RES_C (0x33 + LOG_1X_BASE_C) + +/* 52 Pilot Phase Measurement Data Base logging */ +#define LOG_SRCH_PPM_DB_C (0x34 + LOG_1X_BASE_C) + +/* 53 Pilot Phase Measurement search results logging */ +#define LOG_SRCH_PPM_C (0x35 + LOG_1X_BASE_C) + +/* 54 IS-801 forward link message */ +#define LOG_GPS_FWD_MSG_C (0x36 + LOG_1X_BASE_C) + +/* 55 IS-801 reverse link message */ +#define LOG_GPS_REV_MSG_C (0x37 + LOG_1X_BASE_C) + +/* 56 GPS search session statistics */ +#define LOG_GPS_STATS_MSG_C (0x38 + LOG_1X_BASE_C) + +/* 57 GPS search results */ +#define LOG_GPS_SRCH_PEAKS_MSG_C (0x39 + LOG_1X_BASE_C) + +/* 58 Factory Testmode logging */ +#define LOG_FTM_C (0x3A + LOG_1X_BASE_C) + +/* 59 Multiple Peak Logging */ +#define LOG_SRCH_GPS_MULTI_PEAKS_INFO_C (0x3B + LOG_1X_BASE_C) + +/* 60 Post processed search results logs */ +#define LOG_SRCH_GPS_POST_PROC_C (0x3C + LOG_1X_BASE_C) + +/* 61 FULL Test Data Service Option logging */ +#define LOG_FTDSO_C (0x3D + LOG_1X_BASE_C) + +/* 62 Bluetooth logging */ +#define LOG_BT_RESERVED_CODES_BASE_C (0x3E + LOG_1X_BASE_C) +/* Keep confusing name for backwards compatibility. */ +#define LOG_BT_BASE_C LOG_BT_RESERVED_CODES_BASE_C + +/* 92 Bluetooth's last log code */ +#define LOG_BT_LAST_C (30 + LOG_BT_RESERVED_CODES_BASE_C) + +/* 93 HDR log codes */ +#define LOG_HDR_RESERVED_CODES_BASE_C (0x5D + LOG_1X_BASE_C) +/* Keep confusing name for backwards compatibility. */ +#define LOG_HDR_BASE_C LOG_HDR_RESERVED_CODES_BASE_C + +/* 143 is HDR's last log code */ +#define LOG_HDR_LAST_C (50 + LOG_HDR_RESERVED_CODES_BASE_C) + +/* 144 IS2000 DCCH Forward link channel */ +#define LOG_FOR_DCCH_MSG_C (0x90 + LOG_1X_BASE_C) +#define LOG_DCCH_FWD_C LOG_FOR_DCCH_MSG_C + +/* 145 IS2000 DCCH Forward link channel */ +#define LOG_REV_DCCH_MSG_C (0x91 + LOG_1X_BASE_C) +#define LOG_DCCH_REV_C LOG_REV_DCCH_MSG_C + +/* 146 IS2000 DCCH Forward link channel */ +#define LOG_ZREX_C (0x92 + LOG_1X_BASE_C) + +/* 147 Active set info logging, similar to ASET_CONFIG, but simpler. */ +#define LOG_ASET_INFO_C (0x93 + LOG_1X_BASE_C) + +/* 148 Pilot Phase Measurement four-shoulder-search resutlts logging */ +#define LOG_SRCH_PPM_4SHOULDER_RES_C (0x94 + LOG_1X_BASE_C) + +/* 149 Extended Pilot Phase Measurement Data Base logging */ +#define LOG_SRCH_EXT_PPM_DB_C (0x95 + LOG_1X_BASE_C) + +/* 150 GPS Visit Parameters */ +#define LOG_GPS_VISIT_PARAMETERS_C (0x96 + LOG_1X_BASE_C) + +/* 151 GPS Measurement */ +#define LOG_GPS_MEASUREMENT_C (0x97 + LOG_1X_BASE_C) + +/* 152 UIM Data */ +#define LOG_UIM_DATA_C (0x98 + LOG_1X_BASE_C) + +/* 153 STDSO plus P2 */ +#define LOG_STDSO_P2_C (0x99 + LOG_1X_BASE_C) + +/* 154 FTDSO plus P2 */ +#define LOG_FTDSO_P2_C (0x9A + LOG_1X_BASE_C) + +/* 155 Search PPM Statistics */ +#define LOG_SRCH_PPM_STATS_C (0x9B + LOG_1X_BASE_C) + +/* 156 PPP Tx Frames */ +#define LOG_PPP_TX_FRAMES_C (0x9C + LOG_1X_BASE_C) + +/* 157 PPP Rx Frames */ +#define LOG_PPP_RX_FRAMES_C (0x9D + LOG_1X_BASE_C) + +/* 158-187 SSL reserved log codes */ +#define LOG_SSL_RESERVED_CODES_BASE_C (0x9E + LOG_1X_BASE_C) +#define LOG_SSL_LAST_C (29 + LOG_SSL_RESERVED_CODES_BASE_C) + +/* 188-199 Puma reserved log codes */ +/* 188 QPCH, version 2 */ +#define LOG_QPCH_VER_2_C (0xBC + LOG_1X_BASE_C) + +/* 189 Enhanced Access Probe */ +#define LOG_EA_PROBE_C (0xBD + LOG_1X_BASE_C) + +/* 190 BCCH Frame Information */ +#define LOG_BCCH_FRAME_INFO_C (0xBE + LOG_1X_BASE_C) + +/* 191 FCCCH Frame Information */ +#define LOG_FCCCH_FRAME_INFO_C (0xBF + LOG_1X_BASE_C) + +/* 192 FDCH Frame Information */ +#define LOG_FDCH_FRAME_INFO_C (0xC0 + LOG_1X_BASE_C) + +/* 193 RDCH Frame Information */ +#define LOG_RDCH_FRAME_INFO_C (0xC1 + LOG_1X_BASE_C) + +/* 194 FFPC Information */ +#define LOG_FFPC_INFO_C (0xC2 + LOG_1X_BASE_C) + +/* 195 RPC Information */ +#define LOG_RPC_INFO_C (0xC3 + LOG_1X_BASE_C) + +/* 196 Searcher and Finger Information */ +#define LOG_SRCH_FING_INFO_VER_2_C (0xC4 + LOG_1X_BASE_C) + +/* 197 Service Configuration, version 2 */ +#define LOG_SRV_CONFIG_VER_2_C (0xC5 + LOG_1X_BASE_C) + +/* 198 Active Set Information, version 2 */ +#define LOG_ASET_INFO_VER_2_C (0xC6 + LOG_1X_BASE_C) + +/* 199 Reduced Active Set */ +#define LOG_REDUCED_ASET_INFO_C (0xC7 + LOG_1X_BASE_C) + +/* 200 Search Triage Info */ +#define LOG_SRCH_TRIAGE_INFO_C (0xC8 + LOG_1X_BASE_C) + +/* 201 RDA Frame Information */ +#define LOG_RDA_FRAME_INFO_C (0xC9 + LOG_1X_BASE_C) + +/* 202 gpsOne fatpath information */ +#define LOG_GPS_FATPATH_INFO_C (0xCA + LOG_1X_BASE_C) + +/* 203 Extended AGC */ +#define LOG_EXTENDED_AGC_C (0xCB + LOG_1X_BASE_C) + +/* 204 Transmit AGC */ +#define LOG_TRANSMIT_AGC_C (0xCC + LOG_1X_BASE_C) + +/* 205 I/Q Offset registers */ +#define LOG_IQ_OFFSET_REGISTERS_C (0xCD + LOG_1X_BASE_C) + +/* 206 DACC I/Q Accumulator registers */ +#define LOG_DACC_IQ_ACCUMULATOR_C (0xCE + LOG_1X_BASE_C) + +/* 207 Register polling results */ +#define LOG_REGISTER_POLLING_RESULTS_C (0xCF + LOG_1X_BASE_C) + +/* 208 System arbitration module */ +#define LOG_AT_SAM_C (0xD0 + LOG_1X_BASE_C) + +/* 209 Diablo searcher finger log */ +#define LOG_DIABLO_SRCH_FING_INFO_C (0xD1 + LOG_1X_BASE_C) + +/* 210 log reserved for dandrus */ +#define LOG_SD20_LAST_ACTION_C (0xD2 + LOG_1X_BASE_C) + +/* 211 log reserved for dandrus */ +#define LOG_SD20_LAST_ACTION_HYBRID_C (0xD3 + LOG_1X_BASE_C) + +/* 212 log reserved for dandrus */ +#define LOG_SD20_SS_OBJECT_C (0xD4 + LOG_1X_BASE_C) + +/* 213 log reserved for dandrus */ +#define LOG_SD20_SS_OBJECT_HYBRID_C (0xD5 + LOG_1X_BASE_C) + +/* 214 log reserved for jpinos */ +#define LOG_BCCH_SIGNALING_C (0xD6 + LOG_1X_BASE_C) + +/* 215 log reserved for jpinos */ +#define LOG_REACH_SIGNALING_C (0xD7 + LOG_1X_BASE_C) + +/* 216 log reserved for jpinos */ +#define LOG_FCCCH_SIGNALING_C (0xD8 + LOG_1X_BASE_C) + +/* 217 RDA Frame Information 2 */ +#define LOG_RDA_FRAME_INFO_2_C (0xD9 + LOG_1X_BASE_C) + +/* 218 */ +#define LOG_GPS_BIT_EDGE_RESULTS_C (0xDA + LOG_1X_BASE_C) + +/* 219 */ +#define LOG_PE_DATA_C (0xDB + LOG_1X_BASE_C) + +/* 220 */ +#define LOG_PE_PARTIAL_DATA_C (0xDC + LOG_1X_BASE_C) + +/* 221 */ +#define LOG_GPS_SINGLE_PEAK_SRCH_RESULTS_C (0xDD + LOG_1X_BASE_C) + +/* 222 */ +#define LOG_SRCH4_SAMPRAM_C (0xDE + LOG_1X_BASE_C) + +/* 223 */ +#define HDR_AN_PPP_TX_FRAMES (0xDF + LOG_1X_BASE_C) + +/* 224 */ +#define HDR_AN_PPP_RX_FRAMES (0xE0 + LOG_1X_BASE_C) + +/* 225 */ +#define LOG_GPS_SCHEDULER_TRACE_C (0xE1 + LOG_1X_BASE_C) + +/* 226 */ +#define LOG_MPEG4_YUV_FRAME_C (0xE2 + LOG_1X_BASE_C) + +/* 227 */ +#define LOG_MPEG4_CLIP_STATS_C (0xE3 + LOG_1X_BASE_C) + +/* 228 */ +#define LOG_MPEG4_CLIP_STATS_VER2_C (0xE4 + LOG_1X_BASE_C) + +/* 226-241 MMEG reserved. */ +#define LOG_MPEG_RESERVED_CODES_BASE_C (0xF1 + LOG_1X_BASE_C) + +/* 242-274 BREW reserved log range */ +#define LOG_BREW_RESERVED_CODES_BASE_C (0xF2 + LOG_1X_BASE_C) +#define LOG_BREW_LAST_C (32 + LOG_BREW_RESERVED_CODES_BASE_C) + +/* 275-339 PPP Extended Frames */ +#define LOG_PPP_FRAMES_RESERVED_CODES_BASE_C (0x113 + LOG_1X_BASE_C) +#define LOG_PPP_FRAMES_LAST_C (64 + LOG_PPP_FRAMES_RESERVED_CODES_BASE_C) + +#define LOG_PPP_EXT_FRAMED_RX_UM_C (0x113 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_RX_RM_C (0x114 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_RX_AN_C (0x115 + LOG_1X_BASE_C) + +#define LOG_PPP_EXT_FRAMED_TX_UM_C (0x123 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_TX_RM_C (0x124 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_TX_AN_C (0x125 + LOG_1X_BASE_C) + +#define LOG_PPP_EXT_UNFRAMED_RX_UM_C (0x133 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_RX_RM_C (0x134 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_RX_AN_C (0x135 + LOG_1X_BASE_C) + +#define LOG_PPP_EXT_UNFRAMED_TX_UM_C (0x143 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_TX_RM_C (0x144 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_TX_AN_C (0x145 + LOG_1X_BASE_C) + +/* 340 LOG_PE_DATA_EXT_C */ +#define LOG_PE_DATA_EXT_C (0x154 + LOG_1X_BASE_C) + +/* REX Subsystem logs */ +#define LOG_MEMDEBUG_C (0x155 + LOG_1X_BASE_C) +#define LOG_SYSPROFILE_C (0x156 + LOG_1X_BASE_C) +#define LOG_TASKPROFILE_C (0x157 + LOG_1X_BASE_C) +#define LOG_COREDUMP_C (0x158 + LOG_1X_BASE_C) + +/* 341-349 REX subsystem logs */ +#define LOG_REX_RESERVED_CODES_BASE_C (0x155 + LOG_1X_BASE_C) +#define LOG_REX_LAST_C (8 + LOG_REX_RESERVED_CODES_BASE_C) + +/* 350 LOG_PE_PARTIAL_DATA_EXT_C */ +#define LOG_PE_PARTIAL_DATA_EXT_C (0x15E + LOG_1X_BASE_C) + +/* 351 LOG_DIAG_STRESS_TEST_C */ +#define LOG_DIAG_STRESS_TEST_C (0x15F + LOG_1X_BASE_C) + +/* 352 LOG_WMS_READ_C */ +#define LOG_WMS_READ_C (0x160 + LOG_1X_BASE_C) + +/* 353 Search Triage Info Version 2 */ +#define LOG_SRCH_TRIAGE_INFO2_C (0x161 + LOG_1X_BASE_C) + +/* 354 RLP Rx FDCH Frames */ +#define LOG_RLP_RX_FDCH_FRAMES_C (0x162 + LOG_1X_BASE_C) + +/* 355 RLP Tx FDCH Frames */ +#define LOG_RLP_TX_FDCH_FRAMES_C (0x163 + LOG_1X_BASE_C) + +/* 356-371 QTV subsystem logs */ +#define LOG_QTV_RESERVED_CODES_BASE_C (0x164 + LOG_1X_BASE_C) +#define LOG_QTV_LAST_C (15 + LOG_QTV_RESERVED_CODES_BASE_C) + +/* 372 Searcher 4 1X */ +#define LOG_SRCH4_1X_C (0x174 + LOG_1X_BASE_C) + +/* 373 Searcher sleep statistics */ +#define LOG_SRCH_SLEEP_STATS_C (0x175 + LOG_1X_BASE_C) + +/* 374 Service Configuration, version 3 */ +#define LOG_SRV_CONFIG_VER_3_C (0x176 + LOG_1X_BASE_C) + +/* 375 Searcher 4 HDR */ +#define LOG_SRCH4_HDR_C (0x177 + LOG_1X_BASE_C) + +/* 376 Searcher 4 AFLT */ +#define LOG_SRCH4_AFLT_C (0x178 + LOG_1X_BASE_C) + +/* 377 Enhanced Finger Information */ +#define LOG_ENH_FING_INFO_C (0x179 + LOG_1X_BASE_C) + +/* 378 DV Information */ +#define LOG_DV_INFO_C (0x17A + LOG_1X_BASE_C) + +/* 379 WMS set routes information */ +#define LOG_WMS_SET_ROUTES_C (0x17B + LOG_1X_BASE_C) + +/* 380 FTM Version 2 Logs */ +#define LOG_FTM_VER_2_C (0x17C + LOG_1X_BASE_C) + +/* 381 GPS Multipeak logging */ +#define LOG_SRCH_GPS_MULTI_PEAKS_SIMPLIFIED_INFO_C (0x17D + LOG_1X_BASE_C) + +/* 382 GPS Multipeak logging */ +#define LOG_SRCH_GPS_MULTI_PEAKS_VERBOSE_INFO_C (0x17E + LOG_1X_BASE_C) + +/* 383-403 HDR reserved logs */ +#define LOG_HDR_RESERVED_CODES_BASE_2_C (0x17F + LOG_1X_BASE_C) +#define LOG_HDR_LAST_2_C (20 + LOG_HDR_RESERVED_CODES_BASE_2_C) + +/* RLP Rx - PDCH partial MuxPDU5 frames */ +#define LOG_RLP_RX_PDCH_PARTIAL_MUXPDU5_FRAMES_C (0x194 + LOG_1X_BASE_C) + +/* RLP Tx - PDCH partial MuxPDU5 frames */ +#define LOG_RLP_TX_PDCH_PARTIAL_MUXPDU5_FRAMES_C (0x195 + LOG_1X_BASE_C) + +/* RLP Rx internal details */ +#define LOG_RLP_RX_INTERNAL_DETAILS_C (0x196 + LOG_1X_BASE_C) + +/* RLP Tx internal details */ +#define LOG_RLP_TX_INTERNAL_DETAILS_C (0x197 + LOG_1X_BASE_C) + +/* MPEG4 Clip Statistics version 3 */ +#define LOG_MPEG4_CLIP_STATS_VER3_C (0x198 + LOG_1X_BASE_C) + +/* Mobile IP Performance */ +#define LOG_MOBILE_IP_PERFORMANCE_C (0x199 + LOG_1X_BASE_C) + +/* 410-430 Searcher reserved logs */ +#define LOG_SEARCHER_RESERVED_CODES_BASE_C (0x19A + LOG_1X_BASE_C) +#define LOG_SEARCHER_LAST_2_C (21 + LOG_SEARCHER_RESERVED_CODES_BASE_C) + +/* 432-480 QTV reserved logs */ +#define LOG_QTV2_RESERVED_CODES_BASE_C (0x1B0 + LOG_1X_BASE_C) +#define LOG_QTV2_LAST_C (48 + LOG_QTV2_RESERVED_CODES_BASE_C) + +#define LOG_QTV_PDS2_STATS (0x1B6 + LOG_1X_BASE_C) +#define LOG_QTV_PDS2_GET_REQUEST (0x1B7 + LOG_1X_BASE_C) +#define LOG_QTV_PDS2_GET_RESP_HEADER (0x1B8 + LOG_1X_BASE_C) +#define LOG_QTV_PDS2_GET_RESP_PCKT (0x1B9 + LOG_1X_BASE_C) +#define LOG_QTV_CMX_AUDIO_INPUT_DATA_C (0x1BA + LOG_1X_BASE_C) +#define LOG_QTV_RTSP_OPTIONS_C (0x1BB + LOG_1X_BASE_C) +#define LOG_QTV_RTSP_GET_PARAMETER_C (0x1BC + LOG_1X_BASE_C) +#define LOG_QTV_RTSP_SET_PARAMETER_C (0x1BD + LOG_1X_BASE_C) +#define LOG_QTV_VIDEO_BITSTREAM (0x1BE + LOG_1X_BASE_C) +#define LOG_ARM_VIDEO_DECODE_STATS (0x1BF + LOG_1X_BASE_C) +#define LOG_QTV_DSP_SLICE_BUFFER_C (0x1C0 + LOG_1X_BASE_C) +#define LOG_QTV_CMD_LOGGING_C (0x1C1 + LOG_1X_BASE_C) +#define LOG_QTV_AUDIO_FRAME_PTS_INFO_C (0x1C2 + LOG_1X_BASE_C) +#define LOG_QTV_VIDEO_FRAME_DECODE_INFO_C (0x1C3 + LOG_1X_BASE_C) +#define LOG_QTV_RTCP_COMPOUND_RR_C (0x1C4 + LOG_1X_BASE_C) +#define LOG_QTV_FRAME_BUFFER_RELEASE_REASON_C (0x1C5 + LOG_1X_BASE_C) +#define LOG_QTV_AUDIO_CHANNEL_SWITCH_FRAME_C (0x1C6 + LOG_1X_BASE_C) +#define LOG_QTV_RTP_DECRYPTED_PKT_C (0x1C7 + LOG_1X_BASE_C) +#define LOG_QTV_PCR_DRIFT_RATE_C (0x1C8 + LOG_1X_BASE_C) + +/* GPS PDSM logs */ +#define LOG_PDSM_POSITION_REPORT_CALLBACK_C (0x1E1 + LOG_1X_BASE_C) +#define LOG_PDSM_PD_EVENT_CALLBACK_C (0x1E2 + LOG_1X_BASE_C) +#define LOG_PDSM_PA_EVENT_CALLBACK_C (0x1E3 + LOG_1X_BASE_C) +#define LOG_PDSM_NOTIFY_VERIFY_REQUEST_C (0x1E4 + LOG_1X_BASE_C) +#define LOG_PDSM_RESERVED1_C (0x1E5 + LOG_1X_BASE_C) +#define LOG_PDSM_RESERVED2_C (0x1E6 + LOG_1X_BASE_C) + +/* Searcher Demodulation Status log */ +#define LOG_SRCH_DEMOD_STATUS_C (0x1E7 + LOG_1X_BASE_C) + +/* Searcher Call Statistics log */ +#define LOG_SRCH_CALL_STATISTICS_C (0x1E8 + LOG_1X_BASE_C) + +/* GPS MS-MPC Forward link */ +#define LOG_MS_MPC_FWD_LINK_C (0x1E9 + LOG_1X_BASE_C) + +/* GPS MS-MPC Reverse link */ +#define LOG_MS_MPC_REV_LINK_C (0x1EA + LOG_1X_BASE_C) + +/* Protocol Services Data */ +#define LOG_DATA_PROTOCOL_LOGGING_C (0x1EB + LOG_1X_BASE_C) + +/* MediaFLO reserved log codes */ +#define LOG_MFLO_RESERVED_CODES_BASE_C (0x1EC + LOG_1X_BASE_C) +#define LOG_MFLO_LAST_C (99 + LOG_MFLO_RESERVED_CODES_BASE_C) + +/* GPS demodulation tracking header info */ +#define LOG_GPS_DEMOD_TRACKING_HEADER_C (0x250 + LOG_1X_BASE_C) + +/* GPS demodulation tracking results */ +#define LOG_GPS_DEMOD_TRACKING_C (0x251 + LOG_1X_BASE_C) + +/* GPS bit edge logs from demod tracking */ +#define LOG_GPS_DEMOD_BIT_EDGE_C (0x252 + LOG_1X_BASE_C) + +/* GPS demodulation soft decisions */ +#define LOG_GPS_DEMOD_SOFT_DECISIONS_C (0x253 + LOG_1X_BASE_C) + +/* GPS post-processed demod tracking results */ +#define LOG_GPS_DEMOD_TRACKING_POST_PROC_C (0x254 + LOG_1X_BASE_C) + +/* GPS subframe log */ +#define LOG_GPS_DEMOD_SUBFRAME_C (0x255 + LOG_1X_BASE_C) + +/* F-CPCCH Quality Information */ +#define LOG_F_CPCCH_QUALITY_INFO_C (0x256 + LOG_1X_BASE_C) + +/* Reverse PDCCH/PDCH Frame Information */ +#define LOG_R_PDCCH_R_PDCH_FRAME_INFO_C (0x257 + LOG_1X_BASE_C) + +/* Forward G Channel Information */ +#define LOG_F_GCH_INFO_C (0x258 + LOG_1X_BASE_C) + +/* Forward G Channel Frame Information */ +#define LOG_F_GCH_FRAME_INFO_C (0x259 + LOG_1X_BASE_C) + +/* Forward RC Channel Information */ +#define LOG_F_RCCH_INFO_C (0x25A + LOG_1X_BASE_C) + +/* Forward ACK Channel Information */ +#define LOG_F_ACKCH_INFO_C (0x25B + LOG_1X_BASE_C) + +/* Forward ACK Channel ACKDA Information */ +#define LOG_F_ACKCH_ACKDA_C (0x25C + LOG_1X_BASE_C) + +/* Reverse REQ Channel Information */ +#define LOG_R_REQCH_INFO_C (0x25D + LOG_1X_BASE_C) + +/* Sleep Task Statistics */ +#define LOG_SLEEP_STATS_C (0x25E + LOG_1X_BASE_C) + +/* Sleep controller statistics 1X */ +#define LOG_1X_SLEEP_CONTROLLER_STATS_C (0x25F + LOG_1X_BASE_C) + +/* Sleep controller statistics HDR */ +#define LOG_HDR_SLEEP_CONTROLLER_STATS_C (0x260 + LOG_1X_BASE_C) + +/* Sleep controller statistics GSM */ +#define LOG_GSM_SLEEP_CONTROLLER_STATS_C (0x261 + LOG_1X_BASE_C) + +/* Sleep controller statistics WCDMA */ +#define LOG_WCDMA_SLEEP_CONTROLLER_STATS_C (0x262 + LOG_1X_BASE_C) + +/* Sleep task and controller reserved logs */ +#define LOG_SLEEP_APPS_STATS_C (0x263 + LOG_1X_BASE_C) +#define LOG_SLEEP_STATS_RESERVED2_C (0x264 + LOG_1X_BASE_C) +#define LOG_SLEEP_STATS_RESERVED3_C (0x265 + LOG_1X_BASE_C) + +/* DV Information placeholder channel logs */ +#define LOG_PDCCH_LO_SELECTED_C (0x266 + LOG_1X_BASE_C) +#define LOG_PDCCH_HI_SELECTED_C (0x267 + LOG_1X_BASE_C) +#define LOG_WALSH_SELECTED_C (0x268 + LOG_1X_BASE_C) +#define LOG_PDCH_BE_SELECTED_C (0x269 + LOG_1X_BASE_C) +#define LOG_PDCCH_LLR_SELECTED_C (0x26A + LOG_1X_BASE_C) +#define LOG_CQI_ACK_LO_SELECTED_C (0x26B + LOG_1X_BASE_C) +#define LOG_CQI_ACK_HI_SELECTED_C (0x26C + LOG_1X_BASE_C) +#define LOG_RL_GAIN_SELECTED_C (0x26D + LOG_1X_BASE_C) +#define LOG_PDCCH0_SNDA_SELECTED_C (0x26E + LOG_1X_BASE_C) +#define LOG_PDCCH1_SNDA_SELECTED_C (0x26F + LOG_1X_BASE_C) + +/* 624 WMS Message List */ +#define LOG_WMS_MESSAGE_LIST_C (0x270 + LOG_1X_BASE_C) + +/* 625 Multimode Generic SIM Driver Interface */ +#define LOG_MM_GENERIC_SIM_DRIVER_C (0x271 + LOG_1X_BASE_C) + +/* 626 Generic SIM Toolkit Task */ +#define LOG_GENERIC_SIM_TOOLKIT_TASK_C (0x272 + LOG_1X_BASE_C) + +/* 627 Call Manager Phone events log */ +#define LOG_CM_PH_EVENT_C (0x273 + LOG_1X_BASE_C) + +/* 628 WMS Set Message List */ +#define LOG_WMS_SET_MESSAGE_LIST_C (0x274 + LOG_1X_BASE_C) + +/* 629-704 HDR reserved logs */ +#define LOG_HDR_RESERVED_CODES_BASE_3_C (0x275 + LOG_1X_BASE_C) +#define LOG_HDR_LAST_3_C (75 + LOG_HDR_RESERVED_CODES_BASE_3_C) + +/* 705 Call Manager call event log */ +#define LOG_CM_CALL_EVENT_C (0x2C1 + LOG_1X_BASE_C) + +/* 706-738 QVP reserved logs */ +#define LOG_QVP_RESERVED_CODES_BASE_C (0x2C2 + LOG_1X_BASE_C) +#define LOG_QVP_LAST_C (32 + LOG_QVP_RESERVED_CODES_BASE_C) + +/* 739 GPS PE Position Report log */ +#define LOG_GPS_PE_POSITION_REPORT_C (0x2E3 + LOG_1X_BASE_C) + +/* 740 GPS PE Position Report Extended log */ +#define LOG_GPS_PE_POSITION_REPORT_EXT_C (0x2E4 + LOG_1X_BASE_C) + +/* 741 log */ +#define LOG_MDDI_HOST_STATS_C (0x2E5 + LOG_1X_BASE_C) + +/* GPS Decoded Ephemeris */ +#define LOG_GPS_DECODED_EPHEMERIS_C (0x2E6 + LOG_1X_BASE_C) + +/* GPS Decoded Almanac */ +#define LOG_GPS_DECODED_ALMANAC_C (0x2E7 + LOG_1X_BASE_C) + +/* Transceiver Resource Manager */ +#define LOG_TRANSCEIVER_RESOURCE_MGR_C (0x2E8 + LOG_1X_BASE_C) + +/* GPS Position Engine Info */ +#define LOG_GPS_POSITION_ENGINE_INFO_C (0x2E9 + LOG_1X_BASE_C) + +/* 746-810 RAPTOR reserved log range */ +#define LOG_RAPTOR_RESERVED_CODES_BASE_C (0x2EA + LOG_1X_BASE_C) +#define LOG_RAPTOR_LAST_C (64 + LOG_RAPTOR_RESERVED_CODES_BASE_C) + +/* QOS Specification Logging */ + +/* QOS Requested Log */ +#define LOG_QOS_REQUESTED_C (0x32B + LOG_1X_BASE_C) + +/* QOS Granted Log */ +#define LOG_QOS_GRANTED_C (0x32C + LOG_1X_BASE_C) + +/* QOS State Log */ +#define LOG_QOS_STATE_C (0x32D + LOG_1X_BASE_C) + +#define LOG_QOS_MODIFIED_C (0x32E + LOG_1X_BASE_C) + +#define LOG_QDJ_ENQUEUE_C (0x32F + LOG_1X_BASE_C) +#define LOG_QDJ_DEQUEUE_C (0x330 + LOG_1X_BASE_C) +#define LOG_QDJ_UPDATE_C (0x331 + LOG_1X_BASE_C) +#define LOG_QDTX_ENCODER_C (0x332 + LOG_1X_BASE_C) +#define LOG_QDTX_DECODER_C (0x333 + LOG_1X_BASE_C) + +#define LOG_PORT_ASSIGNMENT_STATUS_C (0x334 + LOG_1X_BASE_C) + +/* Protocol Services reserved log codes */ +#define LOG_PS_RESERVED_CODES_BASE_C (0x335 + LOG_1X_BASE_C) +#define LOG_PS_LAST_C (25 + LOG_PS_RESERVED_C) + +#define LOG_PS_STAT_IP_C (0x335 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_IPV4_C (0x335 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_IPV6_C (0x336 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_ICMPV4_C (0x337 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_ICMPV6_C (0x338 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_TCP_C (0x339 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_UDP_C (0x33A + LOG_1X_BASE_C) + +/* Protocol Services describe all TCP instances */ +#define LOG_PS_STAT_DESC_ALL_TCP_INST_C (0x33B + LOG_1X_BASE_C) + +/* Protocol Services describe all memory pool instances */ +#define LOG_PS_STAT_DESC_ALL_MEM_POOL_INST_C (0x33C + LOG_1X_BASE_C) + +/* Protocol Services describe all IFACE instances */ +#define LOG_PS_STAT_DESC_ALL_IFACE_INST_C (0x33D + LOG_1X_BASE_C) + +/* Protocol Services describe all PPP instances */ +#define LOG_PS_STAT_DESC_ALL_PPP_INST_C (0x33E + LOG_1X_BASE_C) + +/* Protocol Services describe all ARP instances */ +#define LOG_PS_STAT_DESC_ALL_ARP_INST_C (0x33F + LOG_1X_BASE_C) + +/* Protocol Services describe delta instance */ +#define LOG_PS_STAT_DESC_DELTA_INST_C (0x340 + LOG_1X_BASE_C) + +/* Protocol Services instance TCP statistics */ +#define LOG_PS_STAT_TCP_INST_C (0x341 + LOG_1X_BASE_C) + +/* Protocol Services instance UDP statistics */ +#define LOG_PS_STAT_UDP_INST_C (0x342 + LOG_1X_BASE_C) + +/* Protocol Services instance PPP statistics */ +#define LOG_PS_STAT_PPP_INST_C (0x343 + LOG_1X_BASE_C) + +/* Protocol Services instance IFACE statistics */ +#define LOG_PS_STAT_IFACE_INST_C (0x344 + LOG_1X_BASE_C) + +/* Protocol Services instance memory statistics */ +#define LOG_PS_STAT_MEM_INST_C (0x345 + LOG_1X_BASE_C) + +/* Protocol Services instance flow statistics */ +#define LOG_PS_STAT_FLOW_INST_C (0x346 + LOG_1X_BASE_C) + +/* Protocol Services instance physical link statistics */ +#define LOG_PS_STAT_PHYS_LINK_INST_C (0x347 + LOG_1X_BASE_C) + +/* Protocol Services instance ARP statistics */ +#define LOG_PS_STAT_ARP_INST_C (0x348 + LOG_1X_BASE_C) + +/* Protocol Services instance LLC statistics */ +#define LOG_PS_STAT_LLC_INST_C (0x349 + LOG_1X_BASE_C) + +/* Protocol Services instance IPHC statistics */ +#define LOG_PS_STAT_IPHC_INST_C (0x34A + LOG_1X_BASE_C) + +/* Protocol Services instance ROHC statistics */ +#define LOG_PS_STAT_ROHC_INST_C (0x34B + LOG_1X_BASE_C) + +/* Protocol Services instance RSVP statistics */ +#define LOG_PS_STAT_RSVP_INST_C (0x34C + LOG_1X_BASE_C) + +/* Protocol Services describe all LLC instances */ +#define LOG_PS_STAT_DESC_ALL_LLC_INST_C (0x34D + LOG_1X_BASE_C) + +/* Protocol Services describe all RSVP instances */ +#define LOG_PS_STAT_DESC_ALL_RSVP_INST_C (0x34E + LOG_1X_BASE_C) + +/* Call Manager Serving System event log */ +#define LOG_CM_SS_EVENT_C (0x34F + LOG_1X_BASE_C) + +/* VcTcxo manager’s automatic frequency control log */ +#define LOG_TCXOMGR_AFC_DATA_C (0x350 + LOG_1X_BASE_C) + +/* Clock transactions and general clocks status log */ +#define LOG_CLOCK_C (0x351 + LOG_1X_BASE_C) + +/* GPS search processed peak results and their associated search parameters */ +#define LOG_GPS_PROCESSED_PEAK_C (0x352 + LOG_1X_BASE_C) + +#define LOG_MDSP_LOG_CHUNKS_C (0x353 + LOG_1X_BASE_C) + +/* Periodic RSSI update log */ +#define LOG_WLAN_RSSI_UPDATE_C (0x354 + LOG_1X_BASE_C) + +/* Periodic Link Layer statistics log */ +#define LOG_WLAN_LL_STAT_C (0x355 + LOG_1X_BASE_C) + +/* QOS Extended State Log */ +#define LOG_QOS_STATE_EX_C (0x356 + LOG_1X_BASE_C) + +/* Bluetooth host HCI transmitted data */ +#define LOG_BT_HOST_HCI_TX_C (0x357 + LOG_1X_BASE_C) + +/* Bluetooth host HCI received data */ +#define LOG_BT_HOST_HCI_RX_C (0x358 + LOG_1X_BASE_C) + +/* Internal - GPS PE Position Report Part 3 */ +#define LOG_GPS_PE_POSITION_REPORT_PART3_C (0x359 + LOG_1X_BASE_C) + +/* Extended log code which logs requested QoS */ +#define LOG_QOS_REQUESTED_EX_C (0x35A + LOG_1X_BASE_C) + +/* Extended log code which logs granted QoS */ +#define LOG_QOS_GRANTED_EX_C (0x35B + LOG_1X_BASE_C) + +/* Extended log code which logs modified QoS */ +#define LOG_QOS_MODIFIED_EX_C (0x35C + LOG_1X_BASE_C) + +/* Bus Monitor Profiling Info */ +#define LOG_BUS_MON_PROF_INFO_C (0x35D + LOG_1X_BASE_C) + +/* Pilot Phase Measurement Search results */ +#define LOG_SRCH_PPM_RES_VER_2_C (0x35E + LOG_1X_BASE_C) + +/* Pilot Phase Measurement Data Base */ +#define LOG_SRCH_PPM_DB_VER_2_C (0x35F + LOG_1X_BASE_C) + +/* Pilot Phase Measurement state machine */ +#define LOG_PPM_SM_C (0x360 + LOG_1X_BASE_C) + +/* Robust Header Compression - Compressor */ +#define LOG_ROHC_COMPRESSOR_C (0x361 + LOG_1X_BASE_C) + +/* Robust Header Compression - Decompressor */ +#define LOG_ROHC_DECOMPRESSOR_C (0x362 + LOG_1X_BASE_C) + +/* Robust Header Compression - Feedback Compressor */ +#define LOG_ROHC_FEEDBACK_COMPRESSOR_C (0x363 + LOG_1X_BASE_C) + +/* Robust Header Compression - Feedback Decompressor */ +#define LOG_ROHC_FEEDBACK_DECOMPRESSOR_C (0x364 + LOG_1X_BASE_C) + +/* Bluetooth HCI commands */ +#define LOG_BT_HCI_CMD_C (0x365 + LOG_1X_BASE_C) + +/* Bluetooth HCI events */ +#define LOG_BT_HCI_EV_C (0x366 + LOG_1X_BASE_C) + +/* Bluetooth HCI Transmitted ACL data */ +#define LOG_BT_HCI_TX_ACL_C (0x367 + LOG_1X_BASE_C) + +/* Bluetooth HCI Received ACL data */ +#define LOG_BT_HCI_RX_ACL_C (0x368 + LOG_1X_BASE_C) + +/* Bluetooth SOC H4 Deep Sleep */ +#define LOG_BT_SOC_H4DS_C (0x369 + LOG_1X_BASE_C) + +/* UMTS to CDMA Handover Message */ +#define LOG_UMTS_TO_CDMA_HANDOVER_MSG_C (0x36A + LOG_1X_BASE_C) + +/* Graphic Event Data */ +#define LOG_PROFILER_GRAPHIC_DATA_C (0x36B + LOG_1X_BASE_C) + +/* Audio Event Data */ +#define LOG_PROFILER_AUDIO_DATA_C (0x36C + LOG_1X_BASE_C) + +/* GPS Spectral Information */ +#define LOG_GPS_SPECTRAL_INFO_C (0x36D + LOG_1X_BASE_C) + +/* AHB Performance Monitor LOG data */ +#define LOG_APM_C (0x36E + LOG_1X_BASE_C) + +/* GPS Clock Report */ +#define LOG_CONVERGED_GPS_CLOCK_REPORT_C (0x36F + LOG_1X_BASE_C) + +/* GPS Position Report */ +#define LOG_CONVERGED_GPS_POSITION_REPORT_C (0x370 + LOG_1X_BASE_C) + +/* GPS Measurement Report */ +#define LOG_CONVERGED_GPS_MEASUREMENT_REPORT_C (0x371 + LOG_1X_BASE_C) + +/* GPS RF Status Report */ +#define LOG_CONVERGED_GPS_RF_STATUS_REPORT_C (0x372 + LOG_1X_BASE_C) + +/* VOIP To CDMA Handover Message - Obsoleted by 0x138B - 0x138D */ +#define LOG_VOIP_TO_CDMA_HANDOVER_MSG_C (0x373 + LOG_1X_BASE_C) + +/* GPS Prescribed Dwell Result */ +#define LOG_GPS_PRESCRIBED_DWELL_RESULT_C (0x374 + LOG_1X_BASE_C) + +/* CGPS IPC Data */ +#define LOG_CGPS_IPC_DATA_C (0x375 + LOG_1X_BASE_C) + +/* CGPS Non IPC Data */ +#define LOG_CGPS_NON_IPC_DATA_C (0x376 + LOG_1X_BASE_C) + +/* CGPS Session Report */ +#define LOG_CGPS_REP_EVT_LOG_PACKET_C (0x377 + LOG_1X_BASE_C) + +/* CGPS PDSM Get Position */ +#define LOG_CGPS_PDSM_GET_POSITION_C (0x378 + LOG_1X_BASE_C) + +/* CGPS PDSM Set Parameters */ +#define LOG_CGPS_PDSM_SET_PARAMETERS_C (0x379 + LOG_1X_BASE_C) + +/* CGPS PDSM End Session */ +#define LOG_CGPS_PDSM_END_SESSION_C (0x37A + LOG_1X_BASE_C) + +/* CGPS PDSM notify Verify Response */ +#define LOG_CGPS_PDSM_NOTIFY_VERIFY_RESP_C (0x37B + LOG_1X_BASE_C) + +/* CGPS PDSM Position Report Callback */ +#define LOG_CGPS_PDSM_POSITION_REPORT_CALLBACK_C (0x37C + LOG_1X_BASE_C) + +/* CGPS PDSM PD Event Callback */ +#define LOG_CGPS_PDSM_PD_EVENT_CALLBACK_C (0x37D + LOG_1X_BASE_C) + +/* CGPS PDSM PA Event Callback */ +#define LOG_CGPS_PDSM_PA_EVENT_CALLBACK_C (0x37E + LOG_1X_BASE_C) + +/* CGPS PDSM notify Verify Request Callback */ +#define LOG_CGPS_PDSM_NOTIFY_VERIFY_REQUEST_C (0x37F + LOG_1X_BASE_C) + +/* CGPS PDSM PD Command Error Callback */ +#define LOG_CGPS_PDSM_PD_CMD_ERR_CALLBACK_C (0x380 + LOG_1X_BASE_C) + +/* CGPS PDSM PA Command Error Callback */ +#define LOG_CGPS_PDSM_PA_CMD_ERR_CALLBACK_C (0x381 + LOG_1X_BASE_C) + +/* CGPS PDSM Position Error */ +#define LOG_CGPS_PDSM_POS_ERROR_C (0x382 + LOG_1X_BASE_C) + +/* CGPS PDSM Extended Status Position Report */ +#define LOG_CGPS_PDSM_EXT_STATUS_POS_REPORT_C (0x383 + LOG_1X_BASE_C) + +/* CGPS PDSM Extended Status NMEA Report */ +#define LOG_CGPS_PDSM_EXT_STATUS_NMEA_REPORT_C (0x384 + LOG_1X_BASE_C) + +/* CGPS PDSM Extended Status Measurement Report */ +#define LOG_CGPS_PDSM_EXT_STATUS_MEAS_REPORT_C (0x385 + LOG_1X_BASE_C) + +/* CGPS Report Server TX Packet */ +#define LOG_CGPS_REP_SVR_TX_LOG_PACKET_C (0x386 + LOG_1X_BASE_C) + +/* CGPS Report Server RX Packet */ +#define LOG_CGPS_REP_SVR_RX_LOG_PACKET_C (0x387 + LOG_1X_BASE_C) + +/* UMTS To CDMA Handover Paging Channel Message */ +#define LOG_UMTS_TO_CDMA_HANDOVER_PCH_MSG_C (0x388 + LOG_1X_BASE_C) + +/* UMTS To CDMA Handover Traffic Channel Message */ +#define LOG_UMTS_TO_CDMA_HANDOVER_TCH_MSG_C (0x389 + LOG_1X_BASE_C) + +/* Converged GPS IQ Report */ +#define LOG_CONVERGED_GPS_IQ_REPORT_C (0x38A + LOG_1X_BASE_C) + +/* VOIP To CDMA Paging Channel Handover Message */ +#define LOG_VOIP_TO_CDMA_PCH_HANDOVER_MSG_C (0x38B + LOG_1X_BASE_C) + +/* VOIP To CDMA Access Channel Handover Message */ +#define LOG_VOIP_TO_CDMA_ACH_HANDOVER_MSG_C (0x38C + LOG_1X_BASE_C) + +/* VOIP To CDMA Forward Traffic Channel Handover Message */ +#define LOG_VOIP_TO_CDMA_FTC_HANDOVER_MSG_C (0x38D + LOG_1X_BASE_C) + +/* QMI reserved logs */ +#define LOG_QMI_RESERVED_CODES_BASE_C (0x38E + LOG_1X_BASE_C) +#define LOG_QMI_LAST_C (32 + LOG_QMI_RESERVED_CODES_BASE_C) + +/* QOS Info Code Update Log */ +#define LOG_QOS_INFO_CODE_UPDATE_C (0x3AF + LOG_1X_BASE_C) + +/* Transmit(Uplink) Vocoder PCM Packet Log */ +#define LOG_TX_PCM_PACKET_C (0x3B0 + LOG_1X_BASE_C) + +/* Audio Vocoder Data Paths */ +#define LOG_AUDVOC_DATA_PATHS_PACKET_C (0x3B0 + LOG_1X_BASE_C) + +/* Receive(Downlink) Vocoder PCM Packet Log */ +#define LOG_RX_PCM_PACKET_C (0x3B1 + LOG_1X_BASE_C) + +/* CRC of YUV frame log */ +#define LOG_DEC_CRC_FRAME_C (0x3B2 + LOG_1X_BASE_C) + +/* FLUTE Session Information */ +#define LOG_FLUTE_SESSION_INFO_C (0x3B3 + LOG_1X_BASE_C) + +/* FLUTE ADP File Information */ +#define LOG_FLUTE_ADP_FILE_INFO_C (0x3B4 + LOG_1X_BASE_C) + +/* FLUTE File Request Information */ +#define LOG_FLUTE_FILE_REQ_INFO_C (0x3B5 + LOG_1X_BASE_C) + +/* FLUTE FDT Instance Information */ +#define LOG_FLUTE_FDT_INST_C (0x3B6 + LOG_1X_BASE_C) + +/* FLUTE FDT Information */ +#define LOG_FLUTE_FDT_INFO_C (0x3B7 + LOG_1X_BASE_C) + +/* FLUTE File Log Packet Information */ +#define LOG_FLUTE_FILE_INFO_C (0x3B8 + LOG_1X_BASE_C) + +/* 3G 1X Parameter Overhead Information */ +#define LOG_VOIP_TO_CDMA_3G1X_PARAMETERS_C (0x3B9 + LOG_1X_BASE_C) + +/* CGPS ME Job Info */ +#define LOG_CGPS_ME_JOB_INFO_C (0x3BA + LOG_1X_BASE_C) + +/* CGPS ME SV Lists */ +#define LOG_CPGS_ME_SV_LISTS_C (0x3BB + LOG_1X_BASE_C) + +/* Flexible Profiling Status */ +#define LOG_PROFDIAG_GEN_STATUS_C (0x3BC + LOG_1X_BASE_C) + +/* Flexible Profiling Results */ +#define LOG_PROFDIAG_GEN_PROF_C (0x3BD + LOG_1X_BASE_C) + +/* FLUTE ADP File Content Log Packet Information */ +#define LOG_FLUTE_ADP_FILE_C (0x3BE + LOG_1X_BASE_C) + +/* FLUTE FDT Instance File Content Log Packet Information */ +#define LOG_FLUTE_FDT_INST_FILE_C (0x3BF + LOG_1X_BASE_C) + +/* FLUTE FDT Entries Information */ +#define LOG_FLUTE_FDT_ENTRIES_INFO_C (0x3C0 + LOG_1X_BASE_C) + +/* FLUTE File Contents Log Packet Information */ +#define LOG_FLUTE_FILE_C (0x3C1 + LOG_1X_BASE_C) + +/* CGPS ME Time-Transfer Info */ +#define LOG_CGPS_ME_TIME_TRANSFER_INFO_C (0x3C2 + LOG_1X_BASE_C) + +/* CGPS ME UMTS Time-Tagging Info */ +#define LOG_CGPS_ME_UMTS_TIME_TAGGING_INFO_C (0x3C3 + LOG_1X_BASE_C) + +/* CGPS ME Generic Time Estimate Put lnfo */ +#define LOG_CGPS_ME_TIME_EST_PUT_INFO_C (0x3C4 + LOG_1X_BASE_C) + +/* CGPS ME Generic Freq Estimate Put lnfo */ +#define LOG_CGPS_ME_FREQ_EST_PUT_INFO_C (0x3C5 + LOG_1X_BASE_C) + +/* CGPS Slow Clock Report */ +#define LOG_CGPS_SLOW_CLOCK_REPORT_C (0x3C6 + LOG_1X_BASE_C) + +/* Converged GPS Medium Grid */ +#define LOG_CONVERGED_GPS_MEDIUM_GRID_C (0x3C7 + LOG_1X_BASE_C) + +/* Static information about the driver or device */ +#define LOG_SNSD_INFO_C (0x3C8 + LOG_1X_BASE_C) + +/* Dynamic state information about the device or driver */ +#define LOG_SNSD_STATE_C (0x3C9 + LOG_1X_BASE_C) + +/* Data from a driver */ +#define LOG_SNSD_DATA (0x3CA + LOG_1X_BASE_C) +#define LOG_SNSD_DATA_C (0x3CA + LOG_1X_BASE_C) + +/* CGPS Cell DB Cell Change Info */ +#define LOG_CGPS_CELLDB_CELL_CHANGE_INFO_C (0x3CB + LOG_1X_BASE_C) + +/* xScalar YUV frame log */ +#define LOG_DEC_XSCALE_YUV_FRAME_C (0x3CC + LOG_1X_BASE_C) + +/* CRC of xScaled YUV frame log */ +#define LOG_DEC_XSCALE_CRC_FRAME_C (0x3CD + LOG_1X_BASE_C) + +/* CGPS Frequency Estimate Report */ +#define LOG_CGPS_FREQ_EST_REPORT_C (0x3CE + LOG_1X_BASE_C) + +/* GPS DCME Srch Job Completed */ +#define LOG_GPS_DCME_SRCH_JOB_COMPLETED_C (0x3CF + LOG_1X_BASE_C) + +/* CGPS ME Fastscan results */ +#define LOG_CGPS_ME_FASTSCAN_RESULTS_C (0x3D0 + LOG_1X_BASE_C) + +/* XO frequency Estimation log */ +#define LOG_XO_FREQ_EST_C (0x3D1 + LOG_1X_BASE_C) + +/* Tcxomgr field calibration data */ +#define LOG_TCXOMGR_FIELD_CAL_C (0x3D2 + LOG_1X_BASE_C) + +/* UMB Call Processing Connection Attempt */ +#define LOG_UMBCP_CONNECTION_ATTEMPT_C (0x3D3 + LOG_1X_BASE_C) + +/* UMB Call Processing Connection Release */ +#define LOG_UMBCP_CONNECTION_RELEASE_C (0x3D4 + LOG_1X_BASE_C) + +/* UMB Call Processing Page Message */ +#define LOG_UMBCP_PAGE_MESSAGE_C (0x3D5 + LOG_1X_BASE_C) + +/* UMB Call Processing OVHD Information */ +#define LOG_UMBCP_OVHD_INFO_C (0x3D6 + LOG_1X_BASE_C) + +/* UMB Call Processing Session Attempt */ +#define LOG_UMBCP_SESSION_ATTEMPT_C (0x3D7 + LOG_1X_BASE_C) + +/* UMB Call Processing Route Information */ +#define LOG_UMBCP_ROUTE_INFO_C (0x3D8 + LOG_1X_BASE_C) + +/* UMB Call Processing State Information */ +#define LOG_UMBCP_STATE_INFO_C (0x3D9 + LOG_1X_BASE_C) + +/* UMB Call Processing SNP */ +#define LOG_UMBCP_SNP_C (0x3DA + LOG_1X_BASE_C) + +/* CGPS Session Early Exit Decision */ +#define LOG_CGPS_SESSION_EARLY_EXIT_DECISION_C (0x3DB + LOG_1X_BASE_C) + +/* GPS RF Linearity Status */ +#define LOG_CGPS_ME_RF_LINEARITY_INFO_C (0x3DC + LOG_1X_BASE_C) + +/* CGPS ME 5ms IQ Sums */ +#define LOG_CGPS_ME_5MS_IQ_SUMS_C (0x3DD + LOG_1X_BASE_C) + +/* CGPS ME 20ms IQ Sums */ +#define LOG_CPGS_ME_20MS_IQ_SUMS_C (0x3DE + LOG_1X_BASE_C) + +/* ROHC Compressor Statistics */ +#define LOG_ROHC_COMPRESSOR_STATS_C (0x3DF + LOG_1X_BASE_C) + +/* ROHC Decompressor Statistics */ +#define LOG_ROHC_DECOMPRESSOR_STATS_C (0x3E0 + LOG_1X_BASE_C) + +/* Sensors - Kalman filter information */ +#define LOG_SENSOR_KF_INFO_C (0x3E1 + LOG_1X_BASE_C) + +/* Sensors - Integrated measurements */ +#define LOG_SENSOR_INT_MEAS_C (0x3E2 + LOG_1X_BASE_C) + +/* Sensors - Bias calibration values */ +#define LOG_SENSOR_BIAS_CALIBRATION_C (0x3E3 + LOG_1X_BASE_C) + +/* Log codes 0x13E4-0x13E7 are not following standard log naming convention */ + +/* DTV ISDB-T Transport Stream Packets */ +#define LOG_DTV_ISDB_TS_PACKETS (0x3E4 + LOG_1X_BASE_C) + +/* DTV ISDB-T PES Packets */ +#define LOG_DTV_ISDB_PES_PACKETS (0x3E5 + LOG_1X_BASE_C) + +/* DTV ISDB-T Sections */ +#define LOG_DTV_ISDB_SECTIONS (0x3E6 + LOG_1X_BASE_C) + +/* DTV ISDB-T Buffering */ +#define LOG_DTV_ISDB_BUFFERING (0x3E7 + LOG_1X_BASE_C) + +/* WLAN System Acquisition and Handoff */ +#define LOG_WLAN_SYS_ACQ_HO_C (0x3E8 + LOG_1X_BASE_C) + +/* WLAN General Configurable Parameters */ +#define LOG_WLAN_GEN_CONFIG_PARAMS_C (0x3E9 + LOG_1X_BASE_C) + +/* UMB Physical Layer Channel and Interference Estimation */ +#define LOG_UMB_PHY_RX_DPICH_CIE_C (0x3EA + LOG_1X_BASE_C) + +/* UMB Physical Layer MMSE/MRC Demodulated Data Symbols (Low) */ +#define LOG_UMB_PHY_RX_DATA_DEMOD_LOW_C (0x3EB + LOG_1X_BASE_C) + +/* UMB Physical Layer MMSE/MRC Demodulated Data Symbols (High) */ +#define LOG_UMB_PHY_RX_DATA_DEMOD_HIGH_C (0x3EC + LOG_1X_BASE_C) + +/* UMB Physical Layer DCH Decoder */ +#define LOG_UMB_PHY_RX_DCH_DECODER_C (0x3ED + LOG_1X_BASE_C) + +/* UMB Physical Layer DCH Statistics */ +#define LOG_UMB_PHY_DCH_STATISTICS_C (0x3EE + LOG_1X_BASE_C) + +/* UMB Physical Layer CqiPich Processing */ +#define LOG_UMB_PHY_RX_CQIPICH_C (0x3EF + LOG_1X_BASE_C) + +/* UMB Physical Layer MIMO/SIMO in CqiPich (High) */ +#define LOG_UMB_PHY_RX_CQIPICH_CHANTAPS_HIGH_C (0x3F0 + LOG_1X_BASE_C) + +/* UMB Physical Layer MIMO/SIMO in CquiPich (Low) */ +#define LOG_UMB_PHY_RX_CQIPICH_CHANTAPS_LOW_C (0x3F1 + LOG_1X_BASE_C) + +/* UMB Physical Layer Time-Domain Channel Taps (High) */ +#define LOG_UMB_PHY_RX_PPICH_CHAN_EST_HIGH_C (0x3F2 + LOG_1X_BASE_C) + +/* UMB Physical Layer Time-Domain Channel Taps (Low) */ +#define LOG_UMB_PHY_RX_PPICH_CHAN_EST_LOW_C (0x3F3 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator */ +#define LOG_UMB_PHY_TX_PICH_CONFIG_C (0x3F4 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ACK (High) */ +#define LOG_UMB_PHY_TX_ACK_HIGH_C (0x3F5 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ACK (Low) */ +#define LOG_UMB_PHY_TX_ACK_LOW_C (0x3F6 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-PICH */ +#define LOG_UMB_PHY_TX_PICH_C (0x3F7 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ACH (Access) */ +#define LOG_UMB_PHY_TX_ACH_C (0x3F8 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ODDCCH (High) */ +#define LOG_UMB_PHY_TX_ODCCH_HIGH_C (0x3F9 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ODDCCH (Low) */ +#define LOG_UMB_PHY_TX_ODCCH_LOW_C (0x3FA + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-CDCCH */ +#define LOG_UMB_PHY_TX_RCDCCH_CONFIG_C (0x3FB + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for CQI sent on RCDCCH */ +#define LOG_UMB_PHY_TX_NONFLSS_CQICH_C (0x3FC + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for CQI sent on RCDCCH */ +#define LOG_UMB_PHY_TX_FLSS_CQICH_C (0x3FD + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for PACH sent on RCDCCH */ +#define LOG_UMB_PHY_TX_PAHCH_C (0x3FE + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for REQ sent on RCDCCH */ +#define LOG_UMB_PHY_TX_REQCH_C (0x3FF + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for PSD sent on RCDCCH */ +#define LOG_UMB_PHY_TX_PSDCH_C (0x400 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-DCH */ +#define LOG_UMB_PHY_TX_DCH_C (0x401 + LOG_1X_BASE_C) + +/* UMB Physical Layer Time/Frequency/RxPower Estimate */ +#define LOG_UMB_PHY_RX_TIME_FREQ_POWER_ESTIMATE_C (0x402 + LOG_1X_BASE_C) + +/* UMB Physical Layer FLCS Processing */ +#define LOG_UMB_PHY_RX_FLCS_PROCESSING_C (0x403 + LOG_1X_BASE_C) + +/* UMB Physical Layer PBCCH Processing */ +#define LOG_UMB_PHY_RX_PBCCH_PROCESSING_C (0x404 + LOG_1X_BASE_C) + +/* UMB Physical Layer SBCCH Processing */ +#define LOG_UMB_PHY_RX_SBCCH_PROCESSING_C (0x405 + LOG_1X_BASE_C) + +/* UMB Physical Layer QPCH Processing */ +#define LOG_UMB_PHY_RX_QPCH_PROCESSING_C (0x406 + LOG_1X_BASE_C) + +/* UMB Physical Layer MRC Demodulated Data Symbols (Preamble SBCCH/QPCH) */ +#define LOG_UMB_PHY_RX_SBCCH_DEMOD_C (0x407 + LOG_1X_BASE_C) + +/* UMB Physical Layer MRC Demodulated Data Symbols (Preamble PBCCH) */ +#define LOG_UMB_PHY_RX_PBCCH_DEMOD_C (0x408 + LOG_1X_BASE_C) + +/* UMB Physical Layer VCQI */ +#define LOG_UMB_PHY_RX_VCQI_C (0x409 + LOG_1X_BASE_C) + +/* UMB Physical Layer Acquisition Algorithm */ +#define LOG_UMB_PHY_RX_INITIAL_ACQUISITION_C (0x40A + LOG_1X_BASE_C) + +/* UMB Physical Layer Handoff Search Algorithm */ +#define LOG_UMB_PHY_RX_HANDOFF_SEARCH_C (0x40B + LOG_1X_BASE_C) + +/* UMB RF RFFE Configuration Info */ +#define LOG_UMB_AT_RFFE_CONFG_C (0x40C + LOG_1X_BASE_C) + +/* UMB RF Calibrated Values After Powerup */ +#define LOG_UMB_AT_RFFE_RX_CALIB_C (0x40D + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Acquisition Mode */ +#define LOG_UMB_AT_RFFE_RX_ACQ_C (0x40E + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Idle Mode */ +#define LOG_UMB_AT_RFFE_RX_IDLE_C (0x40F + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Connected Mode */ +#define LOG_UMB_AT_RFFE_RX_CONNECTED_C (0x410 + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Connected Mode (FTM) */ +#define LOG_UMB_AT_RFFE_RX_CONNECTED_FTM_C (0x411 + LOG_1X_BASE_C) + +/* UMB RF Jammer Detector Functionality */ +#define LOG_UMB_AT_RFFE_RX_JAMMER_DETECTOR_FUNCTIONALITY_C (0x412 + LOG_1X_BASE_C) + +/* UMB RF Jammer Detector Response */ +#define LOG_UMB_AT_RFFE_RX_JAMMER_DETECTOR_RESPONSE_C (0x413 + LOG_1X_BASE_C) + +/* UMB RF RFFE TX Power Control */ +#define LOG_UMB_AT_RFFE_TX_BETA_SCALING_C (0x414 + LOG_1X_BASE_C) + +/* UMB Searcher Dump */ +#define LOG_UMB_SEARCHER_DUMP_C (0x415 + LOG_1X_BASE_C) + +/* UMB System Acquire */ +#define LOG_UMB_SYSTEM_ACQUIRE_C (0x416 + LOG_1X_BASE_C) + +/* UMB Set Maintenance */ +#define LOG_UMB_SET_MAINTENANCE_C (0x417 + LOG_1X_BASE_C) + +/* UMB QPCH */ +#define LOG_UMB_QPCH_C (0x418 + LOG_1X_BASE_C) + +/* UMB RLL Forward Partial RP Packet */ +#define LOG_UMB_RLL_FORWARD_PARTIAL_RP_C (0x419 + LOG_1X_BASE_C) + +/* UMB RLL Reverse Partial RP Packet */ +#define LOG_UMB_RLL_REVERSE_PARTIAL_RP_C (0x41A + LOG_1X_BASE_C) + +/* UMB RLL Forward Signal Packet */ +#define LOG_UMB_RLL_FORWARD_SIGNAL_C (0x41B + LOG_1X_BASE_C) + +/* UMB RLL Reverse Signal Packet */ +#define LOG_UMB_RLL_REVERSE_SIGNAL_C (0x41C + LOG_1X_BASE_C) + +/* UMB RLL Forward Statistics */ +#define LOG_UMB_RLL_FORWARD_STATS_C (0x41D + LOG_1X_BASE_C) + +/* UMB RLL Reverse Statistics */ +#define LOG_UMB_RLL_REVERSE_STATS_C (0x41E + LOG_1X_BASE_C) + +/* UMB RLL IRTP */ +#define LOG_UMB_RLL_IRTP_C (0x41F + LOG_1X_BASE_C) + +/* UMB AP Forward Link MAC Packets */ +#define LOG_UMB_AP_FL_MAC_PACKET_C (0x420 + LOG_1X_BASE_C) + +/* UMB AP Reverse Link MAC Packets */ +#define LOG_UMB_AP_RL_MAC_PACKET_C (0x421 + LOG_1X_BASE_C) + +/* GPS Performance Statistics log */ +#define LOG_CGPS_PERFORMANCE_STATS_C (0x422 + LOG_1X_BASE_C) + +/* UMB Searcher General Status */ +#define LOG_UMB_SRCH_GENERAL_STATUS_C (0x423 + LOG_1X_BASE_C) + +/* UMB Superframe Scheduler */ +#define LOG_UMB_SUPERFRAME_SCHEDULER_C (0x424 + LOG_1X_BASE_C) + +/* UMB Sector List */ +#define LOG_UMB_SECTOR_LIST_C (0x425 + LOG_1X_BASE_C) + +/* UMB MAC Access Attempt Command */ +#define LOG_UMB_MAC_ACCESS_ATTEMPT_CMD_C (0x426 + LOG_1X_BASE_C) + +/* UMB MAC Access Probe Information */ +#define LOG_UMB_MAC_ACCESS_PROBE_INFO_C (0x427 + LOG_1X_BASE_C) + +/* UMB MAC RTCMAC Package Information */ +#define LOG_UMB_MAC_RTCMAC_PKG_INFO_C (0x428 + LOG_1X_BASE_C) + +/* UMB MAC Super Frame Information */ +#define LOG_UMB_MAC_SI_INFO_C (0x429 + LOG_1X_BASE_C) + +/* UMB MAC Quick Channel Information */ +#define LOG_UMB_MAC_QCI_INFO_C (0x42A + LOG_1X_BASE_C) + +/* UMB MAC Paging Id List */ +#define LOG_UMB_MAC_PAGING_ID_LIST_C (0x42B + LOG_1X_BASE_C) + +/* UMB MAC Quick Paging Channel Information */ +#define LOG_UMB_MAC_QPCH_INFO_C (0x42C + LOG_1X_BASE_C) + +/* UMB MAC FTCMAC Information */ +#define LOG_UMB_MAC_FTCMAC_PKG_INFO_C (0x42D + LOG_1X_BASE_C) + +/* UMB MAC Access Grant Receiving */ +#define LOG_UMB_MAC_ACCESS_GRANT_C (0x42E + LOG_1X_BASE_C) + +/* UMB MAC Generic Debug Log */ +#define LOG_UMB_MAC_GEN_DEBUG_LOG_PKG_C (0x42F + LOG_1X_BASE_C) + +/* CGPS Frequency Bias Estimate */ +#define LOG_CGPS_MC_FREQ_BIAS_EST_C (0x430 + LOG_1X_BASE_C) + +/* UMB MAC Request Report Information Log */ +#define LOG_UMB_MAC_REQCH_REPORT_INFO_C (0x431 + LOG_1X_BASE_C) + +/* UMB MAC Reverse Link QoS Token Bucket Information Log */ +#define LOG_UMB_MAC_RLQOS_TOKEN_BUCKET_INFO_C (0x432 + LOG_1X_BASE_C) + +/* UMB MAC Reverse Link QoS Stream Information Log */ +#define LOG_UMB_MAC_RLQOS_STREAM_INFO_C (0x433 + LOG_1X_BASE_C) + +/* UMB MAC Reverse Link QoS Allotment Information Log */ +#define LOG_UMB_MAC_RLQOS_ALLOTMENT_INFO_C (0x434 + LOG_1X_BASE_C) + +/* UMB Searcher Recent State Machine Transactions */ +#define LOG_UMB_SRCH_STM_ACTIVITY_C (0x435 + LOG_1X_BASE_C) + +/* Performance Counters on ARM11 Profiling Information */ +#define LOG_ARM11_PERF_CNT_INFO_C (0x436 + LOG_1X_BASE_C) + +/* Protocol Services describe all flow instances */ +#define LOG_PS_STAT_DESC_ALL_FLOW_INST_C (0x437 + LOG_1X_BASE_C) + +/* Protocol Services describe all physical link instances */ +#define LOG_PS_STAT_DESC_ALL_PHYS_LINK_INST_C (0x438 + LOG_1X_BASE_C) + +/* Protocol Services describe all UDP instances */ +#define LOG_PS_STAT_DESC_ALL_UDP_INST_C (0x439 + LOG_1X_BASE_C) + +/* Searcher 4 Multi-Carrier HDR */ +#define LOG_SRCH4_MC_HDR_C (0x43A + LOG_1X_BASE_C) + +/* Protocol Services describe all IPHC instances */ +#define LOG_PS_STAT_DESC_ALL_IPHC_INST_C (0x43B + LOG_1X_BASE_C) + +/* Protocol Services describe all ROHC instances */ +#define LOG_PS_STAT_DESC_ALL_ROHC_INST_C (0x43C + LOG_1X_BASE_C) + +/* BCast security add program information */ +#define LOG_BCAST_SEC_ADD_PROGRAM_INFO_C (0x43D + LOG_1X_BASE_C) + +/* BCast security add program complete */ +#define LOG_BCAST_SEC_ADD_PROGRAM_COMPLETE_C (0x43E + LOG_1X_BASE_C) + +/* BCast security SDP parse */ +#define LOG_BCAST_SEC_SDP_PARSE_C (0x43F + LOG_1X_BASE_C) + +/* CGPS ME dynamic power optimization status */ +#define LOG_CGPS_ME_DPO_STATUS_C (0x440 + LOG_1X_BASE_C) + +/* CGPS PDSM on demand session start */ +#define LOG_CGPS_PDSM_ON_DEMAND_SESSION_START_C (0x441 + LOG_1X_BASE_C) + +/* CGPS PDSM on demand session stop */ +#define LOG_CGPS_PDSM_ON_DEMAND_SESSION_STOP_C (0x442 + LOG_1X_BASE_C) + +/* CGPS PDSM on demand session not started */ +#define LOG_CGPS_PDSM_ON_DEMAND_SESSION_NOT_STARTED_C (0x443 + LOG_1X_BASE_C) + +/* CGPS PDSM extern coarse position inject start */ +#define LOG_CGPS_PDSM_EXTERN_COARSE_POS_INJ_START_C (0x444 + LOG_1X_BASE_C) + +/* DTV ISDB-T TMCC information */ +#define LOG_DTV_ISDB_TMCC_C (0x445 + LOG_1X_BASE_C) + +/* RF development */ +#define LOG_RF_DEV_C (0x446 + LOG_1X_BASE_C) + +/* RF RFM API */ +#define LOG_RF_RFM_API_C (0x447 + LOG_1X_BASE_C) + +/* RF RFM state */ +#define LOG_RF_RFM_STATE_C (0x448 + LOG_1X_BASE_C) + +/* 1X RF Warmup */ +#define LOG_1X_RF_WARMUP_C (0x449 + LOG_1X_BASE_C) + +/* 1X RF power limiting */ +#define LOG_1X_RF_PWR_LMT_C (0x44A + LOG_1X_BASE_C) + +/* 1X RF state */ +#define LOG_1X_RF_STATE_C (0x44B + LOG_1X_BASE_C) + +/* 1X RF sleep */ +#define LOG_1X_RF_SLEEP_C (0x44C + LOG_1X_BASE_C) + +/* 1X RF TX state */ +#define LOG_1X_RF_TX_STATE_C (0x44D + LOG_1X_BASE_C) + +/* 1X RF IntelliCeiver state */ +#define LOG_1X_RF_INT_STATE_C (0x44E + LOG_1X_BASE_C) + +/* 1X RF RX ADC clock */ +#define LOG_1X_RF_RX_ADC_CLK_C (0x44F + LOG_1X_BASE_C) + +/* 1X RF LNA switch point */ +#define LOG_1X_RF_LNA_SWITCHP_C (0x450 + LOG_1X_BASE_C) + +/* 1X RF RX calibration */ +#define LOG_1X_RF_RX_CAL_C (0x451 + LOG_1X_BASE_C) + +/* 1X RF API */ +#define LOG_1X_RF_API_C (0x452 + LOG_1X_BASE_C) + +/* 1X RF RX PLL locking status */ +#define LOG_1X_RF_RX_PLL_LOCK_C (0x453 + LOG_1X_BASE_C) + +/* 1X RF voltage regulator */ +#define LOG_1X_RF_VREG_C (0x454 + LOG_1X_BASE_C) + +/* CGPS DIAG successful fix count */ +#define LOG_CGPS_DIAG_SUCCESSFUL_FIX_COUNT_C (0x455 + LOG_1X_BASE_C) + +/* CGPS MC track dynamic power optimization status */ +#define LOG_CGPS_MC_TRACK_DPO_STATUS_C (0x456 + LOG_1X_BASE_C) + +/* CGPS MC SBAS demodulated bits */ +#define LOG_CGPS_MC_SBAS_DEMOD_BITS_C (0x457 + LOG_1X_BASE_C) + +/* CGPS MC SBAS demodulated soft symbols */ +#define LOG_CGPS_MC_SBAS_DEMOD_SOFT_SYMBOLS_C (0x458 + LOG_1X_BASE_C) + +/* Data Services PPP configuration */ +#define LOG_DS_PPP_CONFIG_PARAMS_C (0x459 + LOG_1X_BASE_C) + +/* Data Services physical link configuration */ +#define LOG_DS_PHYS_LINK_CONFIG_PARAMS_C (0x45A + LOG_1X_BASE_C) + +/* Data Services PPP device configuration */ +#define LOG_PS_PPP_DEV_CONFIG_PARAMS_C (0x45B + LOG_1X_BASE_C) + +/* CGPS PDSM GPS state information */ +#define LOG_CGPS_PDSM_GPS_STATE_INFO_C (0x45C + LOG_1X_BASE_C) + +/* CGPS PDSM EXT status GPS state information */ +#define LOG_CGPS_PDSM_EXT_STATUS_GPS_STATE_INFO_C (0x45D + LOG_1X_BASE_C) + +/* CGPS ME Rapid Search Report */ +#define LOG_CGPS_ME_RAPID_SEARCH_REPORT_C (0x45E + LOG_1X_BASE_C) + +/* CGPS PDSM XTRA-T session */ +#define LOG_CGPS_PDSM_XTRA_T_SESSION_C (0x45F + LOG_1X_BASE_C) + +/* CGPS PDSM XTRA-T upload */ +#define LOG_CGPS_PDSM_XTRA_T_UPLOAD_C (0x460 + LOG_1X_BASE_C) + +/* CGPS Wiper Position Report */ +#define LOG_CGPS_WIPER_POSITION_REPORT_C (0x461 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard HTTP Digest Request Info */ +#define LOG_DTV_DVBH_SEC_SC_HTTP_DIGEST_REQ_C (0x462 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard HTTP Digest Response Info */ +#define LOG_DTV_DVBH_SEC_SC_HTTP_DIGEST_RSP_C (0x463 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Registration Request Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_REG_REQ_C (0x464 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Registration Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_REG_COMPLETE_C (0x465 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Deregistration Request Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_DEREG_REQ_C (0x466 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Deregistration Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_DEREG_COMPLETE_C (0x467 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM Request Info */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_REQ_C (0x468 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM Request Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_REQ_COMPLETE_C (0x469 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Program Selection Info */ +#define LOG_DTV_DVBH_SEC_SC_PROG_SEL_C (0x46A + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Program Selection Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_PROG_SEL_COMPLETE_C (0x46B + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_C (0x46C + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM Verification Message */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_VERIFICATION_C (0x46D + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Parental Control Message */ +#define LOG_DTV_DVBH_SEC_SC_PARENTAL_CTRL_C (0x46E + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard STKM */ +#define LOG_DTV_DVBH_SEC_SC_STKM_C (0x46F + LOG_1X_BASE_C) + +/* Protocol Services Statistics Global Socket */ +#define LOG_PS_STAT_GLOBAL_SOCK_C (0x470 + LOG_1X_BASE_C) + +/* MCS Application Manager */ +#define LOG_MCS_APPMGR_C (0x471 + LOG_1X_BASE_C) + +/* MCS MSGR */ +#define LOG_MCS_MSGR_C (0x472 + LOG_1X_BASE_C) + +/* MCS QTF */ +#define LOG_MCS_QTF_C (0x473 + LOG_1X_BASE_C) + +/* Sensors Stationary Detector Output */ +#define LOG_STATIONARY_DETECTOR_OUTPUT_C (0x474 + LOG_1X_BASE_C) + +/* Print out the ppm data portion */ +#define LOG_CGPS_PDSM_EXT_STATUS_MEAS_REPORT_PPM_C (0x475 + LOG_1X_BASE_C) + +/* GNSS Position Report */ +#define LOG_GNSS_POSITION_REPORT_C (0x476 + LOG_1X_BASE_C) + +/* GNSS GPS Measurement Report */ +#define LOG_GNSS_GPS_MEASUREMENT_REPORT_C (0x477 + LOG_1X_BASE_C) + +/* GNSS Clock Report */ +#define LOG_GNSS_CLOCK_REPORT_C (0x478 + LOG_1X_BASE_C) + +/* GNSS Demod Soft Decision */ +#define LOG_GNSS_DEMOD_SOFT_DECISIONS_C (0x479 + LOG_1X_BASE_C) + +/* GNSS ME 5MS IQ sum */ +#define LOG_GNSS_ME_5MS_IQ_SUMS_C (0x47A + LOG_1X_BASE_C) + +/* GNSS CD DB report */ +#define LOG_GNSS_CD_DB_REPORT_C (0x47B + LOG_1X_BASE_C) + +/* GNSS PE WLS position report */ +#define LOG_GNSS_PE_WLS_POSITION_REPORT_C (0x47C + LOG_1X_BASE_C) + +/* GNSS PE KF position report */ +#define LOG_GNSS_PE_KF_POSITION_REPORT_C (0x47D + LOG_1X_BASE_C) + +/* GNSS PRX RF HW status report */ +#define LOG_GNSS_PRX_RF_HW_STATUS_REPORT_C (0x47E + LOG_1X_BASE_C) + +/* GNSS DRX RF HW status report */ +#define LOG_GNSS_DRX_RF_HW_STATUS_REPORT_C (0x47F + LOG_1X_BASE_C) + +/* GNSS Glonass Measurement report */ +#define LOG_GNSS_GLONASS_MEASUREMENT_REPORT_C (0x480 + LOG_1X_BASE_C) + +/* GNSS GPS HBW RXD measurement */ +#define LOG_GNSS_GPS_HBW_RXD_MEASUREMENT_C (0x481 + LOG_1X_BASE_C) + +/* GNSS PDSM position report callback */ +#define LOG_GNSS_PDSM_POSITION_REPORT_CALLBACK_C (0x482 + LOG_1X_BASE_C) + +/* ISense Request String */ +#define LOG_ISENSE_REQUEST_STR_C (0x483 + LOG_1X_BASE_C) + +/* ISense Response String */ +#define LOG_ISENSE_RESPONSE_STR_C (0x484 + LOG_1X_BASE_C) + +/* Bluetooth SOC General Log Packet*/ +#define LOG_BT_SOC_GENERAL_C (0x485 + LOG_1X_BASE_C) + +/* QCRil Call Flow */ +#define LOG_QCRIL_CALL_FLOW_C (0x486 + LOG_1X_BASE_C) + +/* CGPS Wideband FFT stats */ +#define LOG_CGPS_WB_FFT_STATS_C (0x487 + LOG_1X_BASE_C) + +/* CGPS Slow Clock Calibration Report*/ +#define LOG_CGPS_SLOW_CLOCK_CALIB_REPORT_C (0x488 + LOG_1X_BASE_C) + +/* SNS GPS TIMESTAMP */ +#define LOG_SNS_GPS_TIMESTAMP_C (0x489 + LOG_1X_BASE_C) + +/* GNSS Search Strategy Task Allocation */ +#define LOG_GNSS_SEARCH_STRATEGY_TASK_ALLOCATION_C (0x48A + LOG_1X_BASE_C) + +/* RF MC STM state */ +#define LOG_1XHDR_MC_STATE_C (0x48B + LOG_1X_BASE_C) + +/* Record in the Sparse Network DB */ +#define LOG_CGPS_SNDB_RECORD_C (0x48C + LOG_1X_BASE_C) + +/* Record removed from the DB */ +#define LOG_CGPS_SNDB_REMOVE_C (0x48D + LOG_1X_BASE_C) + +/* CGPS Reserved */ +#define LOG_GNSS_CC_PERFORMANCE_STATS_C (0x48E + LOG_1X_BASE_C) + +/* GNSS PDSM Set Paramerters */ +#define LOG_GNSS_PDSM_SET_PARAMETERS_C (0x48F + LOG_1X_BASE_C) + +/* GNSS PDSM PD Event Callback */ +#define LOG_GNSS_PDSM_PD_EVENT_CALLBACK_C (0x490 + LOG_1X_BASE_C) + +/* GNSS PDSM PA Event Callback */ +#define LOG_GNSS_PDSM_PA_EVENT_CALLBACK_C (0x491 + LOG_1X_BASE_C) + +/* CGPS Reserved */ +#define LOG_CGPS_RESERVED2_C (0x492 + LOG_1X_BASE_C) + +/* CGPS Reserved */ +#define LOG_CGPS_RESERVED3_C (0x493 + LOG_1X_BASE_C) + +/* GNSS PDSM EXT Status MEAS Report */ +#define LOG_GNSS_PDSM_EXT_STATUS_MEAS_REPORT_C (0x494 + LOG_1X_BASE_C) + +/* GNSS SM Error */ +#define LOG_GNSS_SM_ERROR_C (0x495 + LOG_1X_BASE_C) + +/* WLAN Scan */ +#define LOG_WLAN_SCAN_C (0x496 + LOG_1X_BASE_C) + +/* WLAN IBSS */ +#define LOG_WLAN_IBSS_C (0x497 + LOG_1X_BASE_C) + +/* WLAN 802.11d*/ +#define LOG_WLAN_80211D_C (0x498 + LOG_1X_BASE_C) + +/* WLAN Handoff */ +#define LOG_WLAN_HANDOFF_C (0x499 + LOG_1X_BASE_C) + +/* WLAN QoS EDCA */ +#define LOG_WLAN_QOS_EDCA_C (0x49A + LOG_1X_BASE_C) + +/* WLAN Beacon Update */ +#define LOG_WLAN_BEACON_UPDATE_C (0x49B + LOG_1X_BASE_C) + +/* WLAN Power save wow add pattern */ +#define LOG_WLAN_POWERSAVE_WOW_ADD_PTRN_C (0x49C + LOG_1X_BASE_C) + +/* WLAN WCM link metrics */ +#define LOG_WLAN_WCM_LINKMETRICS_C (0x49D + LOG_1X_BASE_C) + +/* WLAN wps scan complete*/ +#define LOG_WLAN_WPS_SCAN_COMPLETE_C (0x49E + LOG_1X_BASE_C) + +/* WLAN WPS WSC Message */ +#define LOG_WLAN_WPS_WSC_MESSAGE_C (0x49F + LOG_1X_BASE_C) + +/* WLAN WPS credentials */ +#define LOG_WLAN_WPS_CREDENTIALS_C (0x4A0 + LOG_1X_BASE_C) + +/* WLAN Qos TSpec*/ +#define LOG_WLAN_QOS_TSPEC_C (0x4A2 + LOG_1X_BASE_C) + +/* PMIC Vreg Control */ +#define LOG_PM_VREG_CONTROL_C (0x4A3 + LOG_1X_BASE_C) + +/* PMIC Vreg Level */ +#define LOG_PM_VREG_LEVEL_C (0x4A4 + LOG_1X_BASE_C) + +/* PMIC Vreg State */ +#define LOG_PM_VREG_STATE_C (0x4A5 + LOG_1X_BASE_C) + +/* CGPS SM EPH Randomization info */ +#define LOG_CGPS_SM_EPH_RANDOMIZATION_INFO_C (0x4A6 + LOG_1X_BASE_C) + +/* Audio calibration data */ +#define LOG_QACT_DATA_C (0x4A7 + LOG_1X_BASE_C) + +/* Compass 2D Tracked Calibration Set */ +#define LOG_SNS_VCPS_2D_TRACKED_CAL_SET (0x4A8 + LOG_1X_BASE_C) + +/* Compass 3D Tracked Calibration Set */ +#define LOG_SNS_VCPS_3D_TRACKED_CAL_SET (0x4A9 + LOG_1X_BASE_C) + +/* Calibration metric */ +#define LOG_SNS_VCPS_CAL_METRIC (0x4AA + LOG_1X_BASE_C) + +/* Accelerometer distance */ +#define LOG_SNS_VCPS_ACCEL_DIST (0x4AB + LOG_1X_BASE_C) + +/* Plane update */ +#define LOG_SNS_VCPS_PLANE_UPDATE (0x4AC + LOG_1X_BASE_C) + +/* Location report */ +#define LOG_SNS_VCPS_LOC_REPORT (0x4AD + LOG_1X_BASE_C) + +/* CM Active subscription */ +#define LOG_CM_PH_EVENT_SUBSCRIPTION_PREF_INFO_C (0x4AE + LOG_1X_BASE_C) + +/* DSDS version of CM call event */ +#define LOG_CM_DS_CALL_EVENT_C (0x4AF + LOG_1X_BASE_C) + +/* Sensors ?MobiSens Output */ +#define LOG_MOBISENS_OUTPUT_C (0x4B0 + LOG_1X_BASE_C) + +/* Accelerometer Data */ +#define LOG_ACCEL_DATA_C (0x4B1 + LOG_1X_BASE_C) + +/* Accelerometer Compensated Data */ +#define LOG_ACCEL_COMP_DATA_C (0x4B2 + LOG_1X_BASE_C) + +/* Motion State Data */ +#define LOG_MOTION_STATE_DATA_C (0x4B3 + LOG_1X_BASE_C) + +/* Stationary Position Indicator */ +#define LOG_STAT_POS_IND_C (0x4B4 + LOG_1X_BASE_C) + +/* Motion State Features */ +#define LOG_MOTION_STATE_FEATURES_C (0x4B5 + LOG_1X_BASE_C) + +/* Motion State Hard Decision */ +#define LOG_MOTION_STATE_HARD_DECISION_C (0x4B6 + LOG_1X_BASE_C) + +/* Motion State Soft Decision */ +#define LOG_MOTION_STATE_SOFT_DECISION_C (0x4B7 + LOG_1X_BASE_C) + +/* Sensors Software Version */ +#define LOG_SENSORS_SOFTWARE_VERSION_C (0x4B8 + LOG_1X_BASE_C) + +/* MobiSens Stationary Position Indicator Log Packet */ +#define LOG_MOBISENS_SPI_C (0x4B9 + LOG_1X_BASE_C) + +/* XO calibration raw IQ data */ +#define LOG_XO_IQ_DATA_C (0x4BA + LOG_1X_BASE_C) + +/*DTV CMMB Control Tabl Updated*/ +#define LOG_DTV_CMMB_CONTROL_TABLE_UPDATE ((0x4BB) + LOG_1X_BASE_C) + +/*DTV CMMB Media API Buffering Status*/ +#define LOG_DTV_CMMB_MEDIA_BUFFERING_STATUS ((0x4BC) + LOG_1X_BASE_C) + +/*DTV CMMB *Emergency Broadcast Data*/ +#define LOG_DTV_CMMB_CONTROL_EMERGENCY_BCAST ((0x4BD) + LOG_1X_BASE_C) + +/*DTV CMMB EMM/ECM Data*/ +#define LOG_DTV_CMMB_CAS_EMM_ECM ((0x4BE) + LOG_1X_BASE_C) + +/*DTV CMMB HW Status*/ +#define LOG_DTV_CMMB_HW_PERFORMANCE ((0x4BF) + LOG_1X_BASE_C) + +/*DTV CMMB ESSG Program Indication Information*/ +#define LOG_DTV_CMMB_ESG_PROGRAM_INDICATION_INFORMATION ((0x4C0) + LOG_1X_BASE_C) + +/* Sensors ¨C binary output of converted sensor data */ +#define LOG_CONVERTED_SENSOR_DATA_C ((0x4C1) + LOG_1X_BASE_C) + +/* CM Subscription event */ +#define LOG_CM_SUBSCRIPTION_EVENT_C ((0x4C2) + LOG_1X_BASE_C) + +/* Sensor Ambient Light Data */ +#define LOG_SNS_ALS_DATA_C ((0x4C3) + LOG_1X_BASE_C) + +/*Sensor Ambient Light Adaptive Data */ +#define LOG_SNS_ALS_DATA_ADAPTIVE_C ((0x4C4) + LOG_1X_BASE_C) + +/*Sensor Proximity Distance Data */ +#define LOG_SNS_PRX_DIST_DATA_C ((0x4C5) + LOG_1X_BASE_C) + +/*Sensor Proximity Data */ +#define LOG_SNS_PRX_DATA_C ((0x4C6) + LOG_1X_BASE_C) + +#define LOG_GNSS_SBAS_REPORT_C ((0x4C7) + LOG_1X_BASE_C) + +#define LOG_CPU_MONITOR_MODEM_C ((0x4C8) + LOG_1X_BASE_C) + +#define LOG_CPU_MONITOR_APPS_C ((0x4C9) + LOG_1X_BASE_C) + +#define LOG_BLAST_TASKPROFILE_C ((0x4CA) + LOG_1X_BASE_C) + +#define LOG_BLAST_SYSPROFILE_C ((0x4CB) + LOG_1X_BASE_C) + +#define LOG_FM_RADIO_FTM_C ((0x4CC) + LOG_1X_BASE_C) + +#define LOG_FM_RADIO_C ((0x4CD) + LOG_1X_BASE_C) + +#define LOG_UIM_DS_DATA_C ((0x4CE) + LOG_1X_BASE_C) + +#define LOG_QMI_CALL_FLOW_C ((0x4CF) + LOG_1X_BASE_C) + +#define LOG_APR_MODEM_C ((0x4D0) + LOG_1X_BASE_C) + +#define LOG_APR_APPS_C ((0x4D1) + LOG_1X_BASE_C) + +#define LOG_APR_ADSP_C ((0x4D2) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_RX_RAW_PACKET_C ((0x4D3) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_TX_RAW_PACKET_C ((0x4D4) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_RX_FRAME_PACKET_C ((0x4D5) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_TX_FRAME_PACKET_C ((0x4D6) + LOG_1X_BASE_C) + +#define LOG_CGPS_PDSM_EXT_STATUS_POS_INJ_REQ_INFO_C ((0x4D7) + LOG_1X_BASE_C) + +#define LOG_TEMPERATURE_MONITOR_C ((0x4D8) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_REST_DETECT_C ((0x4D9) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_ORIENTATION_C ((0x4DA) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_FACING_C ((0x4DB) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_BASIC_C ((0x4DC) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_HINBYE_C ((0x4DD) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRE_MEASUREMENT_REPORT_C ((0x4DE) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRE_POSITION_REPORT_C ((0x4E0) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRE_SVPOLY_REPORT_C ((0x4E1) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRSYNC_C ((0x4E2) + LOG_1X_BASE_C) + +#define LOG_SNS_MGR_EVENT_NOTIFY_C ((0x4E3) + LOG_1X_BASE_C) + +#define LOG_SNS_MGR_EVENT_REGISTER_C ((0x4E4) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_SESSION_BEGIN_C ((0x4E5) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_SESSION_PPM_SUSPEND_C ((0x4E6) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_REPORT_THROTTLED_C ((0x4E7) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_REPORT_FIRED_C ((0x4E8) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_SESSION_END_C ((0x4E9) + LOG_1X_BASE_C) + +#define LOG_TRSP_DATA_STALL_C ((0x801) + LOG_1X_BASE_C) + +#define LOG_WLAN_PKT_LOG_INFO_C ((0x8E0) + LOG_1X_BASE_C) + +/* The last defined DMSS log code */ +#define LOG_1X_LAST_C ((0x8E0) + LOG_1X_BASE_C) + +#define LOG_WLAN_COLD_BOOT_CAL_DATA_C ((0xA18) + LOG_1X_BASE_C) + +#define LOG_WLAN_AUTH_ASSOC_TX_RX_INFO_C ((0xA19) + LOG_1X_BASE_C) +#define LOG_WLAN_ROAM_TRIGGER_INFO_C ((0xA1A) + LOG_1X_BASE_C) +#define LOG_WLAN_ROAM_SCAN_INFO_C ((0xA1B) + LOG_1X_BASE_C) +#define LOG_WLAN_ROAM_RESULT_INFO_C ((0xA1C) + LOG_1X_BASE_C) +#define LOG_WLAN_RRM_TX_RX_INFO_C ((0xA1D) + LOG_1X_BASE_C) + +#define LOG_WLAN_DP_PROTO_PKT_INFO_C ((0xA1E) + LOG_1X_BASE_C) + +/* This is only here for old (pre equipment ID update) logging code */ +#define LOG_LAST_C (LOG_1X_LAST_C & 0xFFF) + +/* ------------------------------------------------------------------------- + * APPS LOG definition: + * The max number of 16 log codes is assigned for Apps. + * The last apps log code could be 0xB00F. + * Below definition is consolidated from log_codes_apps.h + * ------------------------------------------------------------------------- */ + +/* ======================== APPS Profiling ======================== */ +#define LOG_APPS_SYSPROFILE_C (0x01 + LOG_APPS_BASE_C) +#define LOG_APPS_TASKPROFILE_C (0x02 + LOG_APPS_BASE_C) + +/* The last defined APPS log code */ +/* Change it to (0x02 + LOG_LTE_LAST_C) to allow LTE log codes */ +#define LOG_APPS_LAST_C (0x02 + LOG_LTE_LAST_C) + +/* ------------------------------------------------------------------------- + * Log Equipment IDs. + * The number is represented by 4 bits. + * ------------------------------------------------------------------------- */ +typedef enum { + LOG_EQUIP_ID_OEM = 0, /* 3rd party OEM (licensee) use */ + LOG_EQUIP_ID_1X = 1, /* Traditional 1X line of products */ + LOG_EQUIP_ID_RSVD2 = 2, + LOG_EQUIP_ID_RSVD3 = 3, + LOG_EQUIP_ID_WCDMA = 4, + LOG_EQUIP_ID_GSM = 5, + LOG_EQUIP_ID_LBS = 6, + LOG_EQUIP_ID_UMTS = 7, + LOG_EQUIP_ID_TDMA = 8, + LOG_EQUIP_ID_BOA = 9, + LOG_EQUIP_ID_DTV = 10, + LOG_EQUIP_ID_APPS = 11, + LOG_EQUIP_ID_DSP = 12, + + LOG_EQUIP_ID_LAST_DEFAULT = LOG_EQUIP_ID_DSP +} log_equip_id_enum_type; + +#define LOG_EQUIP_ID_MAX 0xF /* The equipment ID is 4 bits */ + +/* Note that these are the official values and are used by default in + diagtune.h. + */ +#define LOG_EQUIP_ID_0_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_1_LAST_CODE_DEFAULT LOG_1X_LAST_C +#define LOG_EQUIP_ID_2_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_3_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_4_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_5_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_6_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_7_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_8_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_9_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_10_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_11_LAST_CODE_DEFAULT LOG_LTE_LAST_C +#define LOG_EQUIP_ID_12_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_13_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_14_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_15_LAST_CODE_DEFAULT 0 + +#endif /* LOG_CODES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/host_diag_log.c b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/host_diag_log.c new file mode 100644 index 0000000000000000000000000000000000000000..f42743e9c8ac4fab5459113da3bf0d656e1b1414 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/host_diag_log.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*============================================================================ + FILE: host_diag_log.c + + OVERVIEW: This source file contains definitions for WLAN UTIL diag APIs + + DEPENDENCIES: + ============================================================================*/ + +#include "qdf_types.h" +#include "i_host_diag_core_log.h" +#include "host_diag_core_event.h" +#include "wlan_nlink_common.h" +#include "cds_sched.h" +#include "wlan_ptt_sock_svc.h" +#include "wlan_nlink_srv.h" +#include "cds_api.h" +#include "wlan_ps_wow_diag.h" +#include "qdf_str.h" + +#define PTT_MSG_DIAG_CMDS_TYPE (0x5050) + +#define DIAG_TYPE_LOGS (1) +#define DIAG_TYPE_EVENTS (2) + +#define DIAG_SWAP16(A) ((((uint16_t)(A) & 0xff00) >> 8) | (((uint16_t)(A) & 0x00ff) << 8)) + +typedef struct event_report_s { + uint32_t diag_type; + uint16_t event_id; + uint16_t length; +} event_report_t; + +/**--------------------------------------------------------------------------- + + \brief host_diag_log_set_code() - + + This function sets the logging code in the given log record. + + \param - ptr - Pointer to the log header type. + - code - log code. + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_log_set_code(void *ptr, uint16_t code) +{ + if (ptr) { + /* All log packets are required to start with 'log_header_type' */ + ((log_hdr_type *) ptr)->code = code; + } +} + +/**--------------------------------------------------------------------------- + + \brief host_diag_log_set_length() - + + This function sets the length field in the given log record. + + \param - ptr - Pointer to the log header type. + - length - log length. + + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_log_set_length(void *ptr, uint16_t length) +{ + if (ptr) { + /* All log packets are required to start with 'log_header_type' */ + ((log_hdr_type *) ptr)->len = (uint16_t) length; + } +} + +/**--------------------------------------------------------------------------- + + \brief host_diag_log_submit() - + + This function sends the log data to the ptt socket app only if it is registered with the driver. + + \param - ptr - Pointer to the log header type. + + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_log_submit(void *plog_hdr_ptr) +{ + log_hdr_type *pHdr = (log_hdr_type *) plog_hdr_ptr; + tAniHdr *wmsg = NULL; + uint8_t *pBuf; + uint16_t data_len; + uint16_t total_len; + + if (cds_is_load_or_unload_in_progress()) + return; + + if (nl_srv_is_initialized() != 0) + return; + + if (cds_is_multicast_logging()) { + data_len = pHdr->len; + + total_len = sizeof(tAniHdr) + sizeof(uint32_t) + data_len; + + pBuf = (uint8_t *) qdf_mem_malloc(total_len); + + if (!pBuf) + return; + + wmsg = (tAniHdr *) pBuf; + wmsg->type = PTT_MSG_DIAG_CMDS_TYPE; + wmsg->length = total_len; + wmsg->length = DIAG_SWAP16(wmsg->length); + pBuf += sizeof(tAniHdr); + + /* Diag Type events or log */ + *(uint32_t *) pBuf = DIAG_TYPE_LOGS; + pBuf += sizeof(uint32_t); + + memcpy(pBuf, pHdr, data_len); + ptt_sock_send_msg_to_app (wmsg, 0, ANI_NL_MSG_PUMAC, + INVALID_PID); + qdf_mem_free((void *)wmsg); + } + return; +} + +/** + * host_diag_log_wlock() - This function is used to send wake lock diag events + * @reason: Reason why the wakelock was taken or released + * @wake_lock_name: Function in which the wakelock was taken or released + * @timeout: Timeout value in case of timed wakelocks + * @status: Status field indicating whether the wake lock was taken/released + * + * This function is used to send wake lock diag events to user space + * + * Return: None + * + */ +void host_diag_log_wlock(uint32_t reason, const char *wake_lock_name, + uint32_t timeout, uint32_t status) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct host_event_wlan_wake_lock); + + if ((nl_srv_is_initialized() != 0) || + (cds_is_wakelock_enabled() == false)) + return; + + wlan_diag_event.status = status; + wlan_diag_event.reason = reason; + wlan_diag_event.timeout = timeout; + wlan_diag_event.name_len = strlen(wake_lock_name); + strlcpy(&wlan_diag_event.name[0], + wake_lock_name, + wlan_diag_event.name_len+1); + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_WAKE_LOCK); +} + +/**--------------------------------------------------------------------------- + + \brief host_diag_event_report_payload() - + + This function sends the event data to the ptt socket app only if it is + registered with the driver. + + \param - ptr - Pointer to the log header type. + + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_event_report_payload(uint16_t event_Id, uint16_t length, + void *pPayload) +{ + tAniHdr *wmsg = NULL; + uint8_t *pBuf; + event_report_t *pEvent_report; + uint16_t total_len; + + if (cds_is_load_or_unload_in_progress()) + return; + + if (nl_srv_is_initialized() != 0) + return; + + if (cds_is_multicast_logging()) { + total_len = sizeof(tAniHdr) + sizeof(event_report_t) + length; + + pBuf = (uint8_t *) qdf_mem_malloc(total_len); + + if (!pBuf) + return; + + wmsg = (tAniHdr *) pBuf; + wmsg->type = PTT_MSG_DIAG_CMDS_TYPE; + wmsg->length = total_len; + wmsg->length = DIAG_SWAP16(wmsg->length); + pBuf += sizeof(tAniHdr); + + pEvent_report = (event_report_t *) pBuf; + pEvent_report->diag_type = DIAG_TYPE_EVENTS; + pEvent_report->event_id = event_Id; + pEvent_report->length = length; + + pBuf += sizeof(event_report_t); + + memcpy(pBuf, pPayload, length); + + if (ptt_sock_send_msg_to_app + (wmsg, 0, ANI_NL_MSG_PUMAC, INVALID_PID) < 0) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "Ptt Socket error sending message to the app!!"); + qdf_mem_free((void *)wmsg); + return; + } + + qdf_mem_free((void *)wmsg); + } + + return; + +} + +/** + * host_log_low_resource_failure() - This function is used to send low + * resource failure event + * @event_sub_type: Reason why the failure was observed + * + * This function is used to send low resource failure events to user space + * + * Return: None + * + */ +void host_log_low_resource_failure(uint8_t event_sub_type) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct host_event_wlan_low_resource_failure); + + wlan_diag_event.event_sub_type = event_sub_type; + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, + EVENT_WLAN_LOW_RESOURCE_FAILURE); +} + +void host_log_rsn_info(uint8_t *ucast_cipher, uint8_t *mcast_cipher, + uint8_t *akm_suite, uint8_t *group_mgmt) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct event_wlan_csr_rsn_info); + + qdf_mem_copy(wlan_diag_event.ucast_cipher, ucast_cipher, + RSN_OUI_SIZE); + qdf_mem_copy(wlan_diag_event.mcast_cipher, mcast_cipher, + RSN_OUI_SIZE); + qdf_mem_copy(wlan_diag_event.akm_suite, akm_suite, + RSN_OUI_SIZE); + qdf_mem_copy(wlan_diag_event.group_mgmt, group_mgmt, + RSN_OUI_SIZE); + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, + EVENT_WLAN_RSN_INFO); +} + +void +host_log_wlan_auth_info(uint16_t auth_algo_num, uint16_t auth_tx_seq_num, + uint16_t auth_status_code) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct event_wlan_lim_auth_info); + + wlan_diag_event.auth_algo_num = auth_algo_num; + wlan_diag_event.auth_transaction_seq_num = auth_tx_seq_num; + wlan_diag_event.auth_status_code = auth_status_code; + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, + EVENT_WLAN_AUTH_INFO); +} + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * qdf_wow_wakeup_host_event()- send wow wakeup event + * @wow_wakeup_cause: WOW wakeup reason code + * + * This function sends wow wakeup reason code diag event + * + * Return: void. + */ +void qdf_wow_wakeup_host_event(uint8_t wow_wakeup_cause) +{ + WLAN_HOST_DIAG_EVENT_DEF(wowRequest, + host_event_wlan_powersave_wow_payload_type); + qdf_mem_zero(&wowRequest, sizeof(wowRequest)); + + wowRequest.event_subtype = WLAN_WOW_WAKEUP; + wowRequest.wow_wakeup_cause = wow_wakeup_cause; + WLAN_HOST_DIAG_EVENT_REPORT(&wowRequest, + EVENT_WLAN_POWERSAVE_WOW); +} + +void host_log_acs_req_event(uint8_t *intf, const uint8_t *hw_mode, uint16_t bw, + uint8_t ht, uint8_t vht, uint16_t chan_start, + uint16_t chan_end) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_req, struct host_event_wlan_acs_req); + + qdf_str_lcopy(acs_req.intf, intf, HOST_EVENT_INTF_STR_LEN); + qdf_str_lcopy(acs_req.hw_mode, hw_mode, HOST_EVENT_HW_MODE_STR_LEN); + acs_req.bw = bw; + acs_req.ht = ht; + acs_req.vht = vht; + acs_req.chan_start = chan_start; + acs_req.chan_end = chan_end; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_req, EVENT_WLAN_ACS_REQ); +} + +void host_log_acs_scan_start(uint32_t scan_id, uint8_t vdev_id) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_scan_start, + struct host_event_wlan_acs_scan_start); + + acs_scan_start.scan_id = scan_id; + acs_scan_start.vdev_id = vdev_id; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_scan_start, + EVENT_WLAN_ACS_SCAN_START); +} + +void host_log_acs_scan_done(const uint8_t *status, + uint8_t vdev_id, uint32_t scan_id) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_scan_done, + struct host_event_wlan_acs_scan_done); + + qdf_str_lcopy(acs_scan_done.status, status, HOST_EVENT_STATUS_STR_LEN); + acs_scan_done.vdev_id = vdev_id; + acs_scan_done.scan_id = scan_id; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_scan_done, EVENT_WLAN_ACS_SCAN_DONE); +} + +void host_log_acs_chan_spect_weight(uint16_t chan, uint16_t weight, + int32_t rssi, uint16_t bss_count) +{ + WLAN_HOST_DIAG_EVENT_DEF( + acs_chan_spect_weight, + struct host_event_wlan_acs_chan_spectral_weight); + + acs_chan_spect_weight.chan = chan; + acs_chan_spect_weight.weight = weight; + acs_chan_spect_weight.rssi = rssi; + acs_chan_spect_weight.bss_count = bss_count; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_chan_spect_weight, + EVENT_WLAN_ACS_CHANNEL_SPECTRAL_WEIGHT); +} + +void host_log_acs_best_chan(uint16_t chan, uint16_t weight) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_best_chan, + struct host_event_wlan_acs_best_chan); + + acs_best_chan.chan = chan; + acs_best_chan.weight = weight; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_best_chan, + EVENT_WLAN_ACS_BEST_CHANNEL); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_event.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_event.h new file mode 100644 index 0000000000000000000000000000000000000000..e20d9f1407bed8154a255dde7b4dd15e4a1d580a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_event.h @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__I_HOST_DIAG_CORE_EVENT_H) +#define __I_HOST_DIAG_CORE_EVENT_H + +/**========================================================================= + + \file i_host_diag_core_event.h + + \brief Android specific definitions for WLAN UTIL DIAG events + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#ifdef FEATURE_WLAN_DIAG_SUPPORT +#include +#endif + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT + +void host_diag_event_report_payload(uint16_t event_Id, uint16_t length, + void *pPayload); +/*--------------------------------------------------------------------------- + Allocate an event payload holder + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_EVENT_DEF(payload_name, payload_type) \ + payload_type(payload_name) + +/*--------------------------------------------------------------------------- + Report the event + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_EVENT_REPORT(payload_ptr, ev_id) \ + do { \ + host_diag_event_report_payload(ev_id, \ + sizeof(*(payload_ptr)), \ + (void *)(payload_ptr)); \ + } while (0) + +#else /* FEATURE_WLAN_DIAG_SUPPORT */ + +#define WLAN_HOST_DIAG_EVENT_DEF(payload_name, payload_type) +#define WLAN_HOST_DIAG_EVENT_REPORT(payload_ptr, ev_id) + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +/** + * enum auth_timeout_type - authentication timeout type + * @AUTH_FAILURE_TIMEOUT: auth failure timeout + * @AUTH_RESPONSE_TIMEOUT: auth response timeout + */ +enum auth_timeout_type { + AUTH_FAILURE_TIMEOUT, + AUTH_RESPONSE_TIMEOUT, +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void host_diag_log_wlock(uint32_t reason, const char *wake_lock_name, + uint32_t timeout, uint32_t status); +#else +static inline void host_diag_log_wlock(uint32_t reason, + const char *wake_lock_name, + uint32_t timeout, uint32_t status) +{ + +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void host_log_low_resource_failure(uint8_t event_sub_type); +#else +static inline void host_log_low_resource_failure(uint8_t event_sub_type) +{ + +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * host_log_rsn_info() - This function is used to send + * requested rsn info in assoc request + * @ucast_cipher: Unicast ciphers used in assoc request + * @mcast_cipher: Group ciphers used in assoc request + * @akm_suite: Gives information about akm suites used in assoc request + * @group_mgmt: Requested group mgmt cipher suite + * + * This function is used to send RSN info used in assoc req to user space + * + * Return: None + * + */ +void host_log_rsn_info(uint8_t *ucast_cipher, uint8_t *mcast_cipher, + uint8_t *auth_suite, uint8_t *gp_mgmt_cipher); + +#else +static inline void host_log_rsn_info(uint8_t *ucast_cipher, + uint8_t *mcast_cipher, + uint8_t *auth_suite, + uint8_t *gp_mgmt_cipher) +{ + +} + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * host_log_wlan_auth_info() - This function is used to send + * algo num, seq num and status code for auth request + * @auth_algo_num: Gives information about algo num used in auth request + * @auth_tx_seq_num: seq num of auth request + * @auth_status_code: status code of auth request + * + * This function is used to send send algo num, seq num and status code + * for auth request + * + * Return: None + * + */ +void +host_log_wlan_auth_info(uint16_t auth_algo_num, uint16_t auth_tx_seq_num, + uint16_t auth_status_code); + +#else +static inline void +host_log_wlan_auth_info(uint16_t auth_algo_num, uint16_t auth_tx_seq_num, + uint16_t auth_status_code) +{ +} + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void qdf_wow_wakeup_host_event(uint8_t wow_wakeup_cause); + +/** + * host_log_acs_req_event() - ACS request event indication + * @intf: network interface name for WLAN + * @hw_mode: hw mode configured by hostapd + * @bw: channel bandwidth (MHz) + * @ht: a flag indicating whether HT phy mode is enabled + * @vht: a flag indicating whether VHT phy mode is enabled + * @chan_start: starting channel number for ACS scan + * @chan_end: ending channel number for ACS scan + * + * Indicates the diag event for ACS request with payload related + * to parameters populated by hostapd + * + * Return: None + */ +void host_log_acs_req_event(uint8_t *intf, const uint8_t *hw_mode, + uint16_t bw, uint8_t ht, uint8_t vht, + uint16_t chan_start, uint16_t chan_end); + +/** + * host_log_acs_scan_start() - ACS scan start event indication + * @scan_id: scan request ID + * @vdev_id: vdev/session ID + * + * Indicates the diag event for ACS scan start request + * + * Return: None + */ +void host_log_acs_scan_start(uint32_t scan_id, uint8_t vdev_id); + +/** + * host_log_acs_scan_done() - ACS scan done event indication + * @status: indicating whether ACS scan is successful + * @vdev_id: vdev/session ID + * @scan_id: scan request ID + * + * Indicates the diag event for ACS scan done + * + * Return: None + */ +void host_log_acs_scan_done(const uint8_t *status, uint8_t vdev_id, + uint32_t scan_id); + +/** + * host_log_acs_chan_spect_weight() - ACS channel spectral weight indication + * weight event indication + * @chan: channel number + * @weight: channel weight + * @rssi: RSSI value obtained after scanning + * @bss_count: number of BSS detected on this channel + * + * Indicates a diag event for ACS channel weight evaluation result + * + * Return: None + */ +void host_log_acs_chan_spect_weight(uint16_t chan, uint16_t weight, + int32_t rssi, uint16_t bss_count); + +/** + * host_log_acs_best_chan() - ACS best channel event indication + * @chan: channel number + * @weight: channel weight + * + * Indicates the best channel has been selected after ACS + * + * Return: None + */ +void host_log_acs_best_chan(uint16_t chan, uint16_t weight); + +#else +static inline void qdf_wow_wakeup_host_event(uint8_t wow_wakeup_cause) +{ + return; +} + +static inline void host_log_acs_req_event(uint8_t *intf, const uint8_t *hw_mode, + uint16_t bw, uint8_t ht, uint8_t vht, + uint16_t chan_start, + uint16_t chan_end) +{ +} + +static inline void host_log_acs_scan_start(uint32_t scan_id, uint8_t vdev_id) +{ +} + +static inline void host_log_acs_scan_done(const uint8_t *status, + uint8_t vdev_id, uint32_t scan_id) +{ +} + +static inline void host_log_acs_chan_spect_weight(uint16_t chan, + uint16_t weight, int32_t rssi, + uint16_t bss_count) +{ +} + +static inline void host_log_acs_best_chan(uint16_t chan, uint32_t weight) +{ +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __I_HOST_DIAG_CORE_EVENT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_log.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_log.h new file mode 100644 index 0000000000000000000000000000000000000000..0d0f770bf1e0e3bdcfd497fcfe9ba8696a18287f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_log.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__I_HOST_DIAG_CORE_LOG_H) +#define __I_HOST_DIAG_CORE_LOG_H + +#include + +/**========================================================================= + + \file i_host_diag_core_event.h + + \brief android-specific definitions for WLAN UTIL DIAG logs + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +/* FIXME To be removed when DIAG support is added. This definiton should be */ +/* picked from log.h file above. */ +typedef struct { + /* Specifies the length, in bytes of the entry, including this header. */ + uint16_t len; + + /* Specifies the log code for the entry */ + uint16_t code; + + /*Time Stamp lo */ + uint32_t ts_lo; + + /*Time Stamp hi */ + uint32_t ts_hi; +} __packed log_hdr_type; + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void host_diag_log_set_code(void *ptr, uint16_t code); +void host_diag_log_set_length(void *ptr, uint16_t length); +void host_diag_log_set_timestamp(void *plog_hdr_ptr); +void host_diag_log_submit(void *plog_hdr_ptr); + +/*--------------------------------------------------------------------------- + Allocate an event payload holder + ---------------------------------------------------------------------------*/ + +#define WLAN_HOST_DIAG_LOG_ALLOC(payload_ptr, payload_type, log_code) \ + do { \ + payload_ptr = (payload_type *)qdf_mem_malloc(sizeof(payload_type)); \ + if (payload_ptr) { \ + host_diag_log_set_code(payload_ptr, log_code); \ + host_diag_log_set_length(payload_ptr, sizeof(payload_type)); \ + } \ + } while (0) + +/*--------------------------------------------------------------------------- + Report the event + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_LOG_REPORT(payload_ptr) \ + do { \ + if (payload_ptr) { \ + host_diag_log_submit(payload_ptr); \ + qdf_mem_free(payload_ptr); \ + } \ + } while (0) + +/*--------------------------------------------------------------------------- + Free the payload + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_LOG_FREE(payload_ptr) \ + do { \ + if (payload_ptr) { \ + qdf_mem_free(payload_ptr); \ + } \ + } while (0) + +#else /* FEATURE_WLAN_DIAG_SUPPORT */ + +#define WLAN_HOST_DIAG_LOG_ALLOC(payload_ptr, payload_type, log_code) +#define WLAN_HOST_DIAG_LOG_REPORT(payload_ptr) +#define WLAN_HOST_DIAG_LOG_FREE(payload_ptr) + +static inline void host_diag_log_set_code(void *ptr, uint16_t code) +{ +} + +static inline void host_diag_log_set_length(void *ptr, uint16_t length) +{ +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __I_HOST_DIAG_CORE_LOG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_logging_sock_svc.h b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_logging_sock_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..7db67a26664318c06a090d54d532d9f4e52d7775 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_logging_sock_svc.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_logging_sock_svc.h +* +******************************************************************************/ + +#ifndef WLAN_LOGGING_SOCK_SVC_H +#define WLAN_LOGGING_SOCK_SVC_H + +#include +#include +#include +#include + +int wlan_logging_sock_init_svc(void); +int wlan_logging_sock_deinit_svc(void); +int wlan_log_to_user(QDF_TRACE_LEVEL log_level, char *to_be_sent, int length); + +/** + * wlan_logging_set_flush_timer() - Sets the time period for log flush timer + * @milliseconds: Time period in milliseconds + * + * This function sets the time period interval during which the log buffers + * will be flushed out to user space. Setting this interval can set an + * approximate maximum delay after which any message logged through QDF_TRACE + * will appear at user-space + * + * Return: void + */ +int wlan_logging_set_flush_timer(uint32_t milliseconds); + +#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE +void wlan_logging_set_per_pkt_stats(void); +void wlan_logging_set_fw_flush_complete(void); +void wlan_flush_host_logs_for_fatal(void); +void wlan_logging_set_active(bool active); +void wlan_logging_set_log_to_console(bool log_to_console); +#else +static inline void wlan_flush_host_logs_for_fatal(void) {} +static inline void wlan_logging_set_per_pkt_stats(void) {} +static inline void wlan_logging_set_fw_flush_complete(void) {} +static inline void wlan_logging_set_active(bool active) {} +static inline void wlan_logging_set_log_to_console(bool log_to_console) {} +#endif /* WLAN_LOGGING_SOCK_SVC_ENABLE */ + +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) && \ + defined(FEATURE_PKTLOG) && !defined(REMOVE_PKT_LOG) +/** + * wlan_deregister_txrx_packetdump() - tx/rx packet dump + * deregistration + * @pdev_id: id of the datapath pdev handle + * + * This function is used to deregister tx/rx packet dump callbacks + * with ol, pe and htt layers + * + * Return: None + * + */ +void wlan_deregister_txrx_packetdump(uint8_t pdev_id); + +/** + * wlan_register_txrx_packetdump() - tx/rx packet dump + * registration + * @pdev_id: id of the datapath pdev handle + * + * This function is used to register tx/rx packet dump callbacks + * with ol, pe and htt layers + * + * Return: None + * + */ +void wlan_register_txrx_packetdump(uint8_t pdev_id); +#else +static inline void wlan_deregister_txrx_packetdump(uint8_t pdev_id) {} +static inline void wlan_register_txrx_packetdump(uint8_t pdev_id) {} +#endif + +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) && defined(FEATURE_WLAN_DIAG_SUPPORT) +void wlan_report_log_completion(uint32_t is_fatal, + uint32_t indicator, + uint32_t reason_code, + uint8_t ring_id); +#else +static inline void wlan_report_log_completion(uint32_t is_fatal, + uint32_t indicator, + uint32_t reason_code, + uint8_t ring_id) +{ + return; +} + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) && \ + defined(FEATURE_PKTLOG) && !defined(REMOVE_PKT_LOG) +void wlan_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump, void *data); +#else +static inline +void wlan_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump, void *data) +{ +} +#endif + +/** + * enum tx_status - tx status + * @tx_status_ok: successfully sent + acked + * @tx_status_discard: discard - not sent (congestion control) + * @tx_status_no_ack: no_ack - sent, but no ack + * @tx_status_download_fail: download_fail - + * the host could not deliver the tx frame to the target + * @tx_status_peer_del: peer_del - tx completion for + * already deleted peer used for HL case + * + * This enum has tx status types + */ +enum tx_status { + tx_status_ok, + tx_status_discard, + tx_status_no_ack, + tx_status_download_fail, + tx_status_peer_del, +}; + +#endif /* WLAN_LOGGING_SOCK_SVC_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_roam_debug.h b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_roam_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..9b66b843d7a31ad47e4f92c598987a0c3b217805 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_roam_debug.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: Roaming debug log operations declarations + */ +#ifndef _WLAN_ROAM_DEBUG_H_ +#define _WLAN_ROAM_DEBUG_H_ + + +#define roam_debug(args ...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_ROAM_DEBUG, ## args) + +/** + * struct wlan_roam_debug_rec - roam debug information record definition + * @time: timestamp when record was added + * @operation: identifier for operation, command, event, etc. + * @vdev_id: vdev identifier + * @peer_id: peer_id. Range 0 - 255, 0xffff is invalid peer_id. + * @mac_addr: mac address of peer + * @peer_obj: pointer to peer object + * @arg1: Optional argument #1 + * @arg2: Opttional argument #2 + */ +struct wlan_roam_debug_rec { + uint64_t time; + uint8_t operation; + uint8_t vdev_id; + uint16_t peer_id; + struct qdf_mac_addr mac_addr; + void *peer_obj; + uint32_t arg1; + uint32_t arg2; +}; + +#ifndef WLAN_ROAM_DEBUG_MAX_REC +#define WLAN_ROAM_DEBUG_MAX_REC 256 +#endif + +/** + * struct wlan_roam_debug_info - Buffer to store the wma debug records + * @index: index of the most recent entry in the circular buffer + * @num_max_rec: maximum records stored in the records array + * @rec: array to store wma debug records, used in circular fashion + */ +struct wlan_roam_debug_info { + qdf_atomic_t index; + uint32_t num_max_rec; + struct wlan_roam_debug_rec rec[WLAN_ROAM_DEBUG_MAX_REC]; +}; + +/** + * @DEBUG_PEER_CREATE_SEND: sent peer_create command to firmware + * @DEBUG_PEER_CREATE_RESP: received peer create response + * @DEBUG_PEER_DELETE_SEND: sent peer delete command to firmware + * @DEBUG_PEER_DELETE_RESP: received peer delete response + * @DEBUG_PEER_MAP_EVENT: received peer map event + * @DEBUG_PEER_UNMAP_EVENT: received peer unmap event + * @DEBUG_PEER_UNREF_DELETE: peer reference is decremented + * @DEBUG_DELETING_PEER_OBJ: peer object is deleted + * @DEBUG_ROAM_SYNCH_IND: received roam offload sync indication + * @DEBUG_ROAM_SYNCH_CNF: sent roam offload sync confirmation + * @DEBUG_ROAM_SYNCH_FAIL: received roam sync failure indication + * @DEBUG_ROAM_EVENT: received roam event + * @DEBUG_BUS_SUSPEND: host going into suspend mode + * @DEBUG_BUS_RESUME: host operation resumed + */ + +enum peer_debug_op { + DEBUG_PEER_CREATE_SEND = 0, + DEBUG_PEER_CREATE_RESP, + DEBUG_PEER_DELETE_SEND, + DEBUG_PEER_DELETE_RESP, + DEBUG_PEER_MAP_EVENT, + DEBUG_PEER_UNMAP_EVENT, + DEBUG_PEER_UNREF_DELETE, + DEBUG_DELETING_PEER_OBJ, + DEBUG_ROAM_SYNCH_IND, + DEBUG_ROAM_SYNCH_CNF, + DEBUG_ROAM_SYNCH_FAIL, + DEBUG_ROAM_EVENT, + DEBUG_WOW_ROAM_EVENT, + DEBUG_BUS_SUSPEND, + DEBUG_BUS_RESUME, + DEBUG_WOW_REASON, +}; + +#define DEBUG_INVALID_PEER_ID 0xffff +#define DEBUG_INVALID_VDEV_ID 0xff + +#ifdef FEATURE_ROAM_DEBUG +/** + * wlan_roam_debug_log() - Add a debug log entry to wlan roam debug records + * @vdev_id: vdev identifier + * @op: operation identifier + * @peer_id: peer id + * @mac_addr: mac address of peer, can be NULL + * @peer_obj: peer object address, can be NULL + * @arg1: extra argument #1 + * @arg2: extra argument #2 + * + * Return: none + */ +void wlan_roam_debug_log(uint8_t vdev_id, uint8_t op, + uint16_t peer_id, void *mac_addr, + void *peer_obj, uint32_t arg1, uint32_t arg2); + +/** + * wlan_roam_debug_dump_table() - Print the roam debug log records + * print all the valid debug records in the order of timestamp + * + * Return: none + */ +void wlan_roam_debug_dump_table(void); + +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY +/** + * wlan_roam_debug_init() - Allocate log buffer dynamically + * + * Return: none + */ +void wlan_roam_debug_init(void); +/** + * wlan_roam_debug_deinit() - Free log buffer allocated dynamically + * + * Return: none + */ +void wlan_roam_debug_deinit(void); +#else /* WLAN_LOGGING_BUFFERS_DYNAMICALLY */ +static inline void wlan_roam_debug_init(void) +{ +} + +static inline void wlan_roam_debug_deinit(void) +{ +} +#endif /* WLAN_LOGGING_BUFFERS_DYNAMICALLY */ + +#else /* FEATURE_ROAM_DEBUG */ +static inline void +wlan_roam_debug_log(uint8_t vdev_id, uint8_t op, + uint16_t peer_id, void *mac_addr, + void *peer_obj, uint32_t arg1, uint32_t arg2) +{ +} + +static inline void wlan_roam_debug_dump_table(void) +{ +} + +static inline void wlan_roam_debug_init(void) +{ +} + +static inline void wlan_roam_debug_deinit(void) +{ +} +#endif /* FEATURE_ROAM_DEBUG */ + +#endif /* _WLAN_ROAM_DEBUG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_logging_sock_svc.c b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_logging_sock_svc.c new file mode 100644 index 0000000000000000000000000000000000000000..724ae687a4c6096fc868722f2acd626ef4b955db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_logging_sock_svc.c @@ -0,0 +1,1525 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_logging_sock_svc.c +* +******************************************************************************/ + +#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "host_diag_core_log.h" +#include + +#ifdef CNSS_GENL +#include +#endif + +#if defined(FEATURE_FW_LOG_PARSING) || defined(FEATURE_WLAN_DIAG_SUPPORT) || \ + defined(FEATURE_PKTLOG) +#include +#include "ani_global.h" +#endif + +#ifdef FEATURE_PKTLOG +#ifndef REMOVE_PKT_LOG +#include "wma.h" +#include "pktlog_ac.h" +#include +#endif +#endif + +#define MAX_NUM_PKT_LOG 32 + +#define LOGGING_TRACE(level, args ...) \ + QDF_TRACE(QDF_MODULE_ID_HDD, level, ## args) + +/* Global variables */ + +#define ANI_NL_MSG_LOG_TYPE 89 +#define ANI_NL_MSG_READY_IND_TYPE 90 +#ifndef MAX_LOGMSG_COUNT +#define MAX_LOGMSG_COUNT 256 +#endif +#define MAX_LOGMSG_LENGTH 2048 +#define MAX_SKBMSG_LENGTH 4096 + +#define WLAN_LOG_BUFFER_SIZE 2048 +#if defined(FEATURE_PKTLOG) && !defined(REMOVE_PKT_LOG) +/** + * Buffer to accommodate - + * pktlog buffer (2048 bytes) + * ath_pktlog_hdr (16 bytes) + * pkt_dump (8 bytes) + * extra padding (40 bytes) + * + * Note: pktlog buffer size is dependent on RX_BUFFER_SIZE and + * HTT_T2H_MAX_MSG_SIZE. Adjust WLAN_LOG_BUFFER_SIZE + * based on the above mentioned macros. + */ +#define ATH_PKTLOG_HDR_SIZE (sizeof(struct ath_pktlog_hdr)) +#define PKT_DUMP_HDR_SIZE (sizeof(struct packet_dump)) +#define EXTRA_PADDING 40 + +#define MAX_PKTSTATS_LENGTH \ + ((WLAN_LOG_BUFFER_SIZE) + (ATH_PKTLOG_HDR_SIZE) + \ + (PKT_DUMP_HDR_SIZE) + (EXTRA_PADDING)) +#else +#define MAX_PKTSTATS_LENGTH WLAN_LOG_BUFFER_SIZE +#endif /* FEATURE_PKTLOG */ + +#define MAX_PKTSTATS_BUFF 16 +#define HOST_LOG_DRIVER_MSG 0x001 +#define HOST_LOG_PER_PKT_STATS 0x002 +#define HOST_LOG_FW_FLUSH_COMPLETE 0x003 +#define DIAG_TYPE_LOGS 1 +#define PTT_MSG_DIAG_CMDS_TYPE 0x5050 + +struct log_msg { + struct list_head node; + unsigned int radio; + unsigned int index; + /* indicates the current filled log length in logbuf */ + unsigned int filled_length; + /* + * Buf to hold the log msg + * tAniHdr + log + */ + char logbuf[MAX_LOGMSG_LENGTH]; +}; + +/** + * struct packet_dump - This data structure contains the + * Tx/Rx packet stats + * @status: Status + * @type: Type + * @driver_ts: driver timestamp + * @fw_ts: fw timestamp + */ +struct packet_dump { + unsigned char status; + unsigned char type; + uint32_t driver_ts; + uint16_t fw_ts; +} __attribute__((__packed__)); + +/** + * struct pkt_stats_msg - This data structure contains the + * pkt stats node for link list + * @node: LinkList node + * @node: Pointer to skb + */ +struct pkt_stats_msg { + struct list_head node; + struct sk_buff *skb; +}; + +#define MAX_FLUSH_TIMER_PERIOD_VALUE 3600000 /* maximum of 1 hour (in ms) */ +struct wlan_logging { + /* Log Fatal and ERROR to console */ + bool log_to_console; + /* Number of buffers to be used for logging */ + uint32_t num_buf; + uint32_t buffer_length; + /* Lock to synchronize access to shared logging resource */ + spinlock_t spin_lock; + /* Holds the free node which can be used for filling logs */ + struct list_head free_list; + /* Holds the filled nodes which needs to be indicated to APP */ + struct list_head filled_list; + /* Wait queue for Logger thread */ + wait_queue_head_t wait_queue; + /* Logger thread */ + struct task_struct *thread; + /* Logging thread sets this variable on exit */ + struct completion shutdown_comp; + /* Indicates to logger thread to exit */ + bool exit; + /* Holds number of dropped logs */ + unsigned int drop_count; + /* current logbuf to which the log will be filled to */ + struct log_msg *pcur_node; + /* Event flag used for wakeup and post indication*/ + unsigned long eventFlag; + /* Indicates logger thread is activated */ + bool is_active; + /* Flush completion check */ + bool is_flush_complete; + /* paramaters for pkt stats */ + struct list_head pkt_stat_free_list; + struct list_head pkt_stat_filled_list; + struct pkt_stats_msg *pkt_stats_pcur_node; + unsigned int pkt_stat_drop_cnt; + spinlock_t pkt_stats_lock; + unsigned int pkt_stats_msg_idx; + qdf_timer_t flush_timer; + bool is_flush_timer_initialized; + uint32_t flush_timer_period; + qdf_spinlock_t flush_timer_lock; +}; + +static struct wlan_logging gwlan_logging; +static struct pkt_stats_msg *gpkt_stats_buffers; + +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY + +static struct log_msg *gplog_msg; + +static inline QDF_STATUS allocate_log_msg_buffer(void) +{ + gplog_msg = vzalloc(MAX_LOGMSG_COUNT * sizeof(*gplog_msg)); + + return gplog_msg ? QDF_STATUS_SUCCESS : QDF_STATUS_E_NOMEM; +} + +static inline void free_log_msg_buffer(void) +{ + vfree(gplog_msg); + gplog_msg = NULL; +} + +#else +static struct log_msg gplog_msg[MAX_LOGMSG_COUNT]; + +static inline QDF_STATUS allocate_log_msg_buffer(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void free_log_msg_buffer(void) +{ } +#endif + +/* Need to call this with spin_lock acquired */ +static int wlan_queue_logmsg_for_app(void) +{ + char *ptr; + int ret = 0; + ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)]; + ptr[gwlan_logging.pcur_node->filled_length] = '\0'; + + *(unsigned short *)(gwlan_logging.pcur_node->logbuf) = + ANI_NL_MSG_LOG_TYPE; + *(unsigned short *)(gwlan_logging.pcur_node->logbuf + 2) = + gwlan_logging.pcur_node->filled_length; + list_add_tail(&gwlan_logging.pcur_node->node, + &gwlan_logging.filled_list); + + if (!list_empty(&gwlan_logging.free_list)) { + /* Get buffer from free list */ + gwlan_logging.pcur_node = + (struct log_msg *)(gwlan_logging.free_list.next); + list_del_init(gwlan_logging.free_list.next); + } else if (!list_empty(&gwlan_logging.filled_list)) { + /* Get buffer from filled list */ + /* This condition will drop the packet from being + * indicated to app + */ + gwlan_logging.pcur_node = + (struct log_msg *)(gwlan_logging.filled_list.next); + ++gwlan_logging.drop_count; + list_del_init(gwlan_logging.filled_list.next); + ret = 1; + } + + /* Reset the current node values */ + gwlan_logging.pcur_node->filled_length = 0; + return ret; +} + +static const char *current_process_name(void) +{ + if (in_irq()) + return "irq"; + + if (in_softirq()) + return "soft_irq"; + + return current->comm; +} + +/** + * wlan_add_user_log_time_stamp() - populate firmware and kernel timestamps + * @tbuf: Pointer to time stamp buffer + * @tbuf_sz: Time buffer size + * @ts: Time stamp value + * + * For adrastea time stamp is QTIMER raw tick which will be used by cnss_diag + * to convert it into user visible time stamp. In adrstea FW also uses QTIMER + * raw ticks which is needed to synchronize host and fw log time stamps + * + * Also add logcat timestamp so that driver logs and + * logcat logs can be co-related + * + * For discrete solution e.g rome use system tick and convert it into + * seconds.milli seconds + * + * Return: number of characters written in target buffer not including + * trailing '/0' + */ +static int wlan_add_user_log_time_stamp(char *tbuf, size_t tbuf_sz, uint64_t ts) +{ + char time_buf[20]; + + qdf_get_time_of_the_day_in_hr_min_sec_usec(time_buf, sizeof(time_buf)); + + return scnprintf(tbuf, tbuf_sz, "[%.6s][0x%llx]%s", + current_process_name(), (unsigned long long)ts, + time_buf); +} + +#ifdef WLAN_MAX_LOGS_PER_SEC +static qdf_time_t __log_window_end_ticks; +static qdf_atomic_t __log_window_count; + +/** + * assert_on_excessive_logging() - Check for and panic on excessive logging + * + * Track logging count using a quasi-tumbling window, 1 second long. If the max + * logging count for a given window is exceeded, panic. + * + * Return: None + */ +static void assert_on_excessive_logging(void) +{ + qdf_time_t now = qdf_system_ticks(); + + /* + * If 'now' is more recent than the end of the window, reset. + * + * Note: This is not thread safe, and can result in more than one reset. + * For our purposes, this is fine. + */ + if (!qdf_atomic_read(&__log_window_count)) { + __log_window_end_ticks = now + qdf_system_ticks_per_sec; + } else if (qdf_system_time_after(now, __log_window_end_ticks)) { + __log_window_end_ticks = now + qdf_system_ticks_per_sec; + qdf_atomic_set(&__log_window_count, 0); + } + + /* this _is_ thread safe, and results in at most one panic */ + if (qdf_atomic_inc_return(&__log_window_count) == WLAN_MAX_LOGS_PER_SEC) + QDF_DEBUG_PANIC("Exceeded %d logs per second", + WLAN_MAX_LOGS_PER_SEC); +} +#else +static inline void assert_on_excessive_logging(void) { } +#endif /* WLAN_MAX_LOGS_PER_SEC */ + +static inline void +log_to_console(QDF_TRACE_LEVEL level, const char *timestamp, const char *msg) +{ + switch (level) { + case QDF_TRACE_LEVEL_FATAL: + pr_alert("%s %s\n", timestamp, msg); + assert_on_excessive_logging(); + break; + case QDF_TRACE_LEVEL_ERROR: + pr_err("%s %s\n", timestamp, msg); + assert_on_excessive_logging(); + break; + case QDF_TRACE_LEVEL_WARN: + pr_warn("%s %s\n", timestamp, msg); + assert_on_excessive_logging(); + break; + case QDF_TRACE_LEVEL_INFO: + pr_info("%s %s\n", timestamp, msg); + assert_on_excessive_logging(); + break; + case QDF_TRACE_LEVEL_INFO_HIGH: + case QDF_TRACE_LEVEL_INFO_MED: + case QDF_TRACE_LEVEL_INFO_LOW: + case QDF_TRACE_LEVEL_DEBUG: + default: + /* these levels should not be logged to console */ + break; + } +} + +int wlan_log_to_user(QDF_TRACE_LEVEL log_level, char *to_be_sent, int length) +{ + char *ptr; + char tbuf[60]; + int tlen; + int total_log_len; + unsigned int *pfilled_length; + bool wake_up_thread = false; + unsigned long flags; + uint64_t ts; + + /* Add the current time stamp */ + ts = qdf_get_log_timestamp(); + tlen = wlan_add_user_log_time_stamp(tbuf, sizeof(tbuf), ts); + + /* if logging isn't up yet, just dump to dmesg */ + if (!gwlan_logging.is_active) { + log_to_console(log_level, tbuf, to_be_sent); + return 0; + } + + /* 1+1 indicate '\n'+'\0' */ + total_log_len = length + tlen + 1 + 1; + + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + /* wlan logging svc resources are not yet initialized */ + if (!gwlan_logging.pcur_node) { + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + return -EIO; + } + + pfilled_length = &gwlan_logging.pcur_node->filled_length; + + /* Check if we can accommodate more log into current node/buffer */ + if ((MAX_LOGMSG_LENGTH - (*pfilled_length + + sizeof(tAniNlHdr))) < total_log_len) { + wake_up_thread = true; + wlan_queue_logmsg_for_app(); + pfilled_length = &gwlan_logging.pcur_node->filled_length; + } + + ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)]; + + if (unlikely(MAX_LOGMSG_LENGTH < (sizeof(tAniNlHdr) + total_log_len))) { + /* + * Assumption here is that we receive logs which is less than + * MAX_LOGMSG_LENGTH, where we can accommodate the + * tAniNlHdr + [context][timestamp] + log + * If log length is over MAX_LOGMSG_LENGTH, + * the overflow part will be discarded. + */ + length = MAX_LOGMSG_LENGTH - sizeof(tAniNlHdr) - tlen - 2; + /* + * QDF_ASSERT if complete log was not accommodated into + * the available buffer. + */ + QDF_ASSERT(0); + } + + memcpy(&ptr[*pfilled_length], tbuf, tlen); + memcpy(&ptr[*pfilled_length + tlen], to_be_sent, length); + *pfilled_length += tlen + length; + ptr[*pfilled_length] = '\n'; + *pfilled_length += 1; + + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + + /* Wakeup logger thread */ + if (wake_up_thread) { + set_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); + } + + if (gwlan_logging.log_to_console) + log_to_console(log_level, tbuf, to_be_sent); + + return 0; +} + +/** + * nl_srv_bcast_host_logs() - Wrapper to send bcast msgs to host logs mcast grp + * @skb: sk buffer pointer + * + * Sends the bcast message to host logs multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +#ifdef CNSS_GENL +static int nl_srv_bcast_host_logs(struct sk_buff *skb) +{ + return nl_srv_bcast(skb, CLD80211_MCGRP_HOST_LOGS, ANI_NL_MSG_LOG); +} +#else +static int nl_srv_bcast_host_logs(struct sk_buff *skb) +{ + return nl_srv_bcast(skb); +} +#endif + +#ifndef REMOVE_PKT_LOG +/** + * pkt_stats_fill_headers() - This function adds headers to skb + * @skb: skb to which headers need to be added + * + * Return: 0 on success or Errno on failure + */ +static int pkt_stats_fill_headers(struct sk_buff *skb) +{ + struct host_log_pktlog_info cds_pktlog; + int cds_pkt_size = sizeof(struct host_log_pktlog_info); + tAniNlHdr msg_header; + int extra_header_len, nl_payload_len; + static int nlmsg_seq; + int diag_type; + + qdf_mem_zero(&cds_pktlog, cds_pkt_size); + cds_pktlog.version = VERSION_LOG_WLAN_PKT_LOG_INFO_C; + cds_pktlog.buf_len = skb->len; + cds_pktlog.seq_no = gwlan_logging.pkt_stats_msg_idx++; + host_diag_log_set_code(&cds_pktlog, LOG_WLAN_PKT_LOG_INFO_C); + host_diag_log_set_length(&cds_pktlog.log_hdr, skb->len + + cds_pkt_size); + + if (unlikely(skb_headroom(skb) < cds_pkt_size)) { + qdf_nofl_err("VPKT [%d]: Insufficient headroom, head[%pK], data[%pK], req[%zu]", + __LINE__, skb->head, skb->data, + sizeof(msg_header)); + return -EIO; + } + + qdf_mem_copy(skb_push(skb, cds_pkt_size), + &cds_pktlog, cds_pkt_size); + + if (unlikely(skb_headroom(skb) < sizeof(int))) { + qdf_nofl_err("VPKT [%d]: Insufficient headroom, head[%pK], data[%pK], req[%zu]", + __LINE__, skb->head, skb->data, + sizeof(int)); + return -EIO; + } + + diag_type = DIAG_TYPE_LOGS; + qdf_mem_copy(skb_push(skb, sizeof(int)), &diag_type, sizeof(int)); + + extra_header_len = sizeof(msg_header.radio) + sizeof(tAniHdr) + + sizeof(struct nlmsghdr); + nl_payload_len = extra_header_len + skb->len; + + msg_header.nlh.nlmsg_type = ANI_NL_MSG_PUMAC; + msg_header.nlh.nlmsg_len = nl_payload_len; + msg_header.nlh.nlmsg_flags = NLM_F_REQUEST; + msg_header.nlh.nlmsg_pid = 0; + msg_header.nlh.nlmsg_seq = nlmsg_seq++; + msg_header.radio = 0; + msg_header.wmsg.type = PTT_MSG_DIAG_CMDS_TYPE; + msg_header.wmsg.length = cpu_to_be16(skb->len); + + if (unlikely(skb_headroom(skb) < sizeof(msg_header))) { + qdf_nofl_err("VPKT [%d]: Insufficient headroom, head[%pK], data[%pK], req[%zu]", + __LINE__, skb->head, skb->data, + sizeof(msg_header)); + return -EIO; + } + + qdf_mem_copy(skb_push(skb, sizeof(msg_header)), &msg_header, + sizeof(msg_header)); + + return 0; +} + +/** + * nl_srv_bcast_diag() - Wrapper to send bcast msgs to diag events mcast grp + * @skb: sk buffer pointer + * + * Sends the bcast message to diag events multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_bcast_diag(struct sk_buff *skb) +{ +#ifdef CNSS_GENL + return nl_srv_bcast(skb, CLD80211_MCGRP_DIAG_EVENTS, ANI_NL_MSG_PUMAC); +#else + return nl_srv_bcast(skb); +#endif +} + +/** + * pktlog_send_per_pkt_stats_to_user() - This function is used to send the per + * packet statistics to the user + * + * This function is used to send the per packet statistics to the user + * + * Return: Success if the message is posted to user + */ +static int pktlog_send_per_pkt_stats_to_user(void) +{ + int ret = -1; + struct pkt_stats_msg *pstats_msg; + unsigned long flags; + struct sk_buff *skb_new = NULL; + static int rate_limit; + bool free_old_skb = false; + + while (!list_empty(&gwlan_logging.pkt_stat_filled_list) + && !gwlan_logging.exit) { + skb_new = dev_alloc_skb(MAX_SKBMSG_LENGTH); + if (!skb_new) { + if (!rate_limit) { + qdf_nofl_err("%s: dev_alloc_skb() failed for msg size[%d] drop count = %u", + __func__, MAX_SKBMSG_LENGTH, + gwlan_logging.drop_count); + } + rate_limit = 1; + ret = -ENOMEM; + break; + } + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, flags); + + pstats_msg = (struct pkt_stats_msg *) + (gwlan_logging.pkt_stat_filled_list.next); + list_del_init(gwlan_logging.pkt_stat_filled_list.next); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + + ret = pkt_stats_fill_headers(pstats_msg->skb); + if (ret < 0) { + qdf_nofl_err("%s failed to fill headers %d", + __func__, ret); + free_old_skb = true; + goto err; + } + ret = nl_srv_bcast_diag(pstats_msg->skb); + if (ret < 0) { + qdf_nofl_info("%s: Send Failed %d drop_count = %u", + __func__, ret, + ++gwlan_logging.pkt_stat_drop_cnt); + } else { + ret = 0; + } +err: + /* + * Free old skb in case or error before assigning new skb + * to the free list. + */ + if (free_old_skb) + dev_kfree_skb(pstats_msg->skb); + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, flags); + pstats_msg->skb = skb_new; + list_add_tail(&pstats_msg->node, + &gwlan_logging.pkt_stat_free_list); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + ret = 0; + } + + return ret; + +} +#else +static inline +int pktlog_send_per_pkt_stats_to_user(void) +{ + return 0; +} +#endif + +static int send_filled_buffers_to_user(void) +{ + int ret = -1; + struct log_msg *plog_msg; + int payload_len; + int tot_msg_len; + tAniNlHdr *wnl; + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + static int nlmsg_seq; + unsigned long flags; + static int rate_limit; + + while (!list_empty(&gwlan_logging.filled_list) + && !gwlan_logging.exit) { + + skb = dev_alloc_skb(MAX_LOGMSG_LENGTH); + if (!skb) { + if (!rate_limit) { + qdf_nofl_err("%s: dev_alloc_skb() failed for msg size[%d] drop count = %u", + __func__, MAX_LOGMSG_LENGTH, + gwlan_logging.drop_count); + } + rate_limit = 1; + ret = -ENOMEM; + break; + } + rate_limit = 0; + + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + + plog_msg = (struct log_msg *) + (gwlan_logging.filled_list.next); + list_del_init(gwlan_logging.filled_list.next); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + /* 4 extra bytes for the radio idx */ + payload_len = plog_msg->filled_length + + sizeof(wnl->radio) + sizeof(tAniHdr); + + tot_msg_len = NLMSG_SPACE(payload_len); + nlh = nlmsg_put(skb, 0, nlmsg_seq++, + ANI_NL_MSG_LOG, payload_len, NLM_F_REQUEST); + if (!nlh) { + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + list_add_tail(&plog_msg->node, + &gwlan_logging.free_list); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + qdf_nofl_err("%s: drop_count = %u", __func__, + ++gwlan_logging.drop_count); + qdf_nofl_err("%s: nlmsg_put() failed for msg size[%d]", + __func__, tot_msg_len); + dev_kfree_skb(skb); + skb = NULL; + ret = -EINVAL; + continue; + } + + wnl = (tAniNlHdr *) nlh; + wnl->radio = plog_msg->radio; + memcpy(&wnl->wmsg, plog_msg->logbuf, + plog_msg->filled_length + sizeof(tAniHdr)); + + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + list_add_tail(&plog_msg->node, &gwlan_logging.free_list); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + + ret = nl_srv_bcast_host_logs(skb); + /* print every 64th drop count */ + if (ret < 0 && (!(gwlan_logging.drop_count % 0x40))) { + qdf_nofl_err("%s: Send Failed %d drop_count = %u", + __func__, ret, ++gwlan_logging.drop_count); + } + } + + return ret; +} + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * wlan_report_log_completion() - Report bug report completion to userspace + * @is_fatal: Type of event, fatal or not + * @indicator: Source of bug report, framework/host/firmware + * @reason_code: Reason for triggering bug report + * @ring_id: Ring id of logging entities + * + * This function is used to report the bug report completion to userspace + * + * Return: None + */ +void wlan_report_log_completion(uint32_t is_fatal, + uint32_t indicator, + uint32_t reason_code, + uint8_t ring_id) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct host_event_wlan_log_complete); + + wlan_diag_event.is_fatal = is_fatal; + wlan_diag_event.indicator = indicator; + wlan_diag_event.reason_code = reason_code; + wlan_diag_event.reserved = ring_id; + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_LOG_COMPLETE); +} +#endif + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * send_flush_completion_to_user() - Indicate flush completion to the user + * @ring_id: Ring id of logging entities + * + * This function is used to send the flush completion message to user space + * + * Return: None + */ +static void send_flush_completion_to_user(uint8_t ring_id) +{ + uint32_t is_fatal, indicator, reason_code; + bool recovery_needed; + + cds_get_and_reset_log_completion(&is_fatal, + &indicator, &reason_code, &recovery_needed); + + /* Error on purpose, so that it will get logged in the kmsg */ + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "%s: Sending flush done to userspace reason code %d", + __func__, reason_code); + + wlan_report_log_completion(is_fatal, indicator, reason_code, ring_id); + + if (recovery_needed) + cds_trigger_recovery(QDF_REASON_UNSPECIFIED); +} +#endif + +static void setup_flush_timer(void) +{ + qdf_spin_lock(&gwlan_logging.flush_timer_lock); + if (!gwlan_logging.is_flush_timer_initialized || + (gwlan_logging.flush_timer_period == 0)) { + qdf_spin_unlock(&gwlan_logging.flush_timer_lock); + return; + } + qdf_timer_mod(&gwlan_logging.flush_timer, + gwlan_logging.flush_timer_period); + qdf_spin_unlock(&gwlan_logging.flush_timer_lock); +} + +/** + * wlan_logging_thread() - The WLAN Logger thread + * @Arg - pointer to the HDD context + * + * This thread logs log message to App registered for the logs. + */ +static int wlan_logging_thread(void *Arg) +{ + int ret_wait_status = 0; + int ret = 0; + unsigned long flags; + + while (!gwlan_logging.exit) { + setup_flush_timer(); + ret_wait_status = + wait_event_interruptible(gwlan_logging.wait_queue, + (!list_empty + (&gwlan_logging.filled_list) + || test_bit( + HOST_LOG_DRIVER_MSG, + &gwlan_logging.eventFlag) + || test_bit( + HOST_LOG_PER_PKT_STATS, + &gwlan_logging.eventFlag) + || test_bit( + HOST_LOG_FW_FLUSH_COMPLETE, + &gwlan_logging.eventFlag) + || gwlan_logging.exit)); + + if (ret_wait_status == -ERESTARTSYS) { + qdf_nofl_err("%s: wait_event_interruptible returned -ERESTARTSYS", + __func__); + break; + } + + if (gwlan_logging.exit) + break; + + + if (test_and_clear_bit(HOST_LOG_DRIVER_MSG, + &gwlan_logging.eventFlag)) { + ret = send_filled_buffers_to_user(); + if (-ENOMEM == ret) + msleep(200); +#ifdef FEATURE_WLAN_DIAG_SUPPORT + if (WLAN_LOG_INDICATOR_HOST_ONLY == + cds_get_log_indicator()) { + send_flush_completion_to_user( + RING_ID_DRIVER_DEBUG); + } +#endif + } + + if (test_and_clear_bit(HOST_LOG_PER_PKT_STATS, + &gwlan_logging.eventFlag)) { + ret = pktlog_send_per_pkt_stats_to_user(); + if (-ENOMEM == ret) + msleep(200); + } + + if (test_and_clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, + &gwlan_logging.eventFlag)) { + /* Flush bit could have been set while we were mid + * way in the logging thread. So, need to check other + * buffers like log messages, per packet stats again + * to flush any residual data in them + */ + if (gwlan_logging.is_flush_complete == true) { + gwlan_logging.is_flush_complete = false; +#ifdef FEATURE_WLAN_DIAG_SUPPORT + send_flush_completion_to_user( + RING_ID_DRIVER_DEBUG); +#endif + } else { + gwlan_logging.is_flush_complete = true; + /* Flush all current host logs*/ + spin_lock_irqsave(&gwlan_logging.spin_lock, + flags); + wlan_queue_logmsg_for_app(); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, + flags); + set_bit(HOST_LOG_DRIVER_MSG, + &gwlan_logging.eventFlag); + set_bit(HOST_LOG_PER_PKT_STATS, + &gwlan_logging.eventFlag); + set_bit(HOST_LOG_FW_FLUSH_COMPLETE, + &gwlan_logging.eventFlag); + wake_up_interruptible( + &gwlan_logging.wait_queue); + } + } + } + + complete_and_exit(&gwlan_logging.shutdown_comp, 0); + + return 0; +} + +void wlan_logging_set_active(bool active) +{ + gwlan_logging.is_active = active; +} + +void wlan_logging_set_log_to_console(bool log_to_console) +{ + gwlan_logging.log_to_console = log_to_console; +} + +static void flush_log_buffers_timer(void *dummy) +{ + wlan_flush_host_logs_for_fatal(); +} + +int wlan_logging_set_flush_timer(uint32_t milliseconds) +{ + if (milliseconds > MAX_FLUSH_TIMER_PERIOD_VALUE) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "ERROR! value should be (0 - %d)\n", + MAX_FLUSH_TIMER_PERIOD_VALUE); + return -EINVAL; + } + if (!gwlan_logging.is_active) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "WLAN-Logging not active"); + return -EINVAL; + } + qdf_spin_lock(&gwlan_logging.flush_timer_lock); + if (!gwlan_logging.is_flush_timer_initialized) { + qdf_spin_unlock(&gwlan_logging.flush_timer_lock); + return -EINVAL; + } + gwlan_logging.flush_timer_period = milliseconds; + if (milliseconds) { + qdf_timer_mod(&gwlan_logging.flush_timer, + gwlan_logging.flush_timer_period); + } + qdf_spin_unlock(&gwlan_logging.flush_timer_lock); + return 0; +} + +static void flush_timer_init(void) +{ + qdf_spinlock_create(&gwlan_logging.flush_timer_lock); + qdf_timer_init(NULL, &gwlan_logging.flush_timer, + flush_log_buffers_timer, NULL, + QDF_TIMER_TYPE_SW); + gwlan_logging.is_flush_timer_initialized = true; + gwlan_logging.flush_timer_period = 0; +} + +int wlan_logging_sock_init_svc(void) +{ + int i = 0, j, pkt_stats_size; + unsigned long irq_flag; + + flush_timer_init(); + spin_lock_init(&gwlan_logging.spin_lock); + spin_lock_init(&gwlan_logging.pkt_stats_lock); + + gwlan_logging.log_to_console = false; + gwlan_logging.num_buf = MAX_LOGMSG_COUNT; + gwlan_logging.buffer_length = MAX_LOGMSG_LENGTH; + + if (allocate_log_msg_buffer() != QDF_STATUS_SUCCESS) { + qdf_nofl_err("%s: Could not allocate memory for log_msg", + __func__); + return -ENOMEM; + } + + spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag); + INIT_LIST_HEAD(&gwlan_logging.free_list); + INIT_LIST_HEAD(&gwlan_logging.filled_list); + + for (i = 0; i < gwlan_logging.num_buf; i++) { + list_add(&gplog_msg[i].node, &gwlan_logging.free_list); + gplog_msg[i].index = i; + } + gwlan_logging.pcur_node = (struct log_msg *) + (gwlan_logging.free_list.next); + list_del_init(gwlan_logging.free_list.next); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag); + + /* Initialize the pktStats data structure here */ + pkt_stats_size = sizeof(struct pkt_stats_msg); + gpkt_stats_buffers = vmalloc(MAX_PKTSTATS_BUFF * pkt_stats_size); + if (!gpkt_stats_buffers) { + qdf_nofl_err("%s: Could not allocate memory for Pkt stats", + __func__); + goto err1; + } + qdf_mem_zero(gpkt_stats_buffers, + MAX_PKTSTATS_BUFF * pkt_stats_size); + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_msg_idx = 0; + INIT_LIST_HEAD(&gwlan_logging.pkt_stat_free_list); + INIT_LIST_HEAD(&gwlan_logging.pkt_stat_filled_list); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + + + for (i = 0; i < MAX_PKTSTATS_BUFF; i++) { + gpkt_stats_buffers[i].skb = dev_alloc_skb(MAX_PKTSTATS_LENGTH); + if (!gpkt_stats_buffers[i].skb) { + qdf_nofl_err("%s: Memory alloc failed for skb", + __func__); + /* free previously allocated skb and return */ + for (j = 0; j < i ; j++) + dev_kfree_skb(gpkt_stats_buffers[j].skb); + goto err2; + } + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + list_add(&gpkt_stats_buffers[i].node, + &gwlan_logging.pkt_stat_free_list); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + } + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_pcur_node = (struct pkt_stats_msg *) + (gwlan_logging.pkt_stat_free_list.next); + list_del_init(gwlan_logging.pkt_stat_free_list.next); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + /* Pkt Stats intialization done */ + + init_waitqueue_head(&gwlan_logging.wait_queue); + gwlan_logging.exit = false; + clear_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag); + init_completion(&gwlan_logging.shutdown_comp); + gwlan_logging.thread = kthread_create(wlan_logging_thread, NULL, + "wlan_logging_thread"); + if (IS_ERR(gwlan_logging.thread)) { + qdf_nofl_err("%s: Could not Create LogMsg Thread Controller", + __func__); + goto err3; + } + wake_up_process(gwlan_logging.thread); + gwlan_logging.is_active = true; + gwlan_logging.is_flush_complete = false; + + return 0; + +err3: + for (i = 0; i < MAX_PKTSTATS_BUFF; i++) { + if (gpkt_stats_buffers[i].skb) + dev_kfree_skb(gpkt_stats_buffers[i].skb); + } +err2: + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_pcur_node = NULL; + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + vfree(gpkt_stats_buffers); + gpkt_stats_buffers = NULL; +err1: + spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag); + gwlan_logging.pcur_node = NULL; + spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag); + free_log_msg_buffer(); + + return -ENOMEM; +} + +static void flush_timer_deinit(void) +{ + gwlan_logging.is_flush_timer_initialized = false; + qdf_spin_lock(&gwlan_logging.flush_timer_lock); + qdf_timer_stop(&gwlan_logging.flush_timer); + qdf_timer_free(&gwlan_logging.flush_timer); + qdf_spin_unlock(&gwlan_logging.flush_timer_lock); + qdf_spinlock_destroy(&gwlan_logging.flush_timer_lock); +} + +int wlan_logging_sock_deinit_svc(void) +{ + unsigned long irq_flag; + int i; + + if (!gwlan_logging.pcur_node) + return 0; + + INIT_COMPLETION(gwlan_logging.shutdown_comp); + gwlan_logging.exit = true; + gwlan_logging.is_active = false; +#if defined(FEATURE_FW_LOG_PARSING) || defined(FEATURE_WLAN_DIAG_SUPPORT) + cds_set_multicast_logging(0); +#endif + gwlan_logging.is_flush_complete = false; + clear_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); + wait_for_completion(&gwlan_logging.shutdown_comp); + + spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag); + gwlan_logging.pcur_node = NULL; + spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag); + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_pcur_node = NULL; + gwlan_logging.pkt_stats_msg_idx = 0; + gwlan_logging.pkt_stat_drop_cnt = 0; + for (i = 0; i < MAX_PKTSTATS_BUFF; i++) { + if (gpkt_stats_buffers[i].skb) + dev_kfree_skb(gpkt_stats_buffers[i].skb); + } + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + + vfree(gpkt_stats_buffers); + gpkt_stats_buffers = NULL; + free_log_msg_buffer(); + flush_timer_deinit(); + + return 0; +} + +/** + * wlan_logging_set_per_pkt_stats() - This function triggers per packet logging + * + * This function is used to send signal to the logger thread for logging per + * packet stats + * + * Return: None + * + */ +void wlan_logging_set_per_pkt_stats(void) +{ + if (gwlan_logging.is_active == false) + return; + + set_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); +} + +/* + * wlan_logging_set_fw_flush_complete() - FW log flush completion + * + * This function is used to send signal to the logger thread to indicate + * that the flushing of FW logs is complete by the FW + * + * Return: None + * + */ +void wlan_logging_set_fw_flush_complete(void) +{ + if (!gwlan_logging.is_active) + return; + + set_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); +} + +/** + * wlan_flush_host_logs_for_fatal() - Flush host logs + * + * This function is used to send signal to the logger thread to + * Flush the host logs + * + * Return: None + */ +void wlan_flush_host_logs_for_fatal(void) +{ + unsigned long flags; + + if (gwlan_logging.flush_timer_period == 0) + qdf_nofl_info("%s:flush all host logs Setting HOST_LOG_POST_MAS", + __func__); + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + wlan_queue_logmsg_for_app(); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + set_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); +} + +#ifdef FEATURE_PKTLOG +#ifndef REMOVE_PKT_LOG + +static uint8_t gtx_count; +static uint8_t grx_count; + +/** + * wlan_get_pkt_stats_free_node() - Get the free node for pkt stats + * + * This function is used to get the free node for pkt stats from + * free list/filles list + * + * Return: int + * + */ +static int wlan_get_pkt_stats_free_node(void) +{ + int ret = 0; + + list_add_tail(&gwlan_logging.pkt_stats_pcur_node->node, + &gwlan_logging.pkt_stat_filled_list); + + if (!list_empty(&gwlan_logging.pkt_stat_free_list)) { + /* Get buffer from free list */ + gwlan_logging.pkt_stats_pcur_node = + (struct pkt_stats_msg *)(gwlan_logging.pkt_stat_free_list.next); + list_del_init(gwlan_logging.pkt_stat_free_list.next); + } else if (!list_empty(&gwlan_logging.pkt_stat_filled_list)) { + /* Get buffer from filled list. This condition will drop the + * packet from being indicated to app + */ + gwlan_logging.pkt_stats_pcur_node = + (struct pkt_stats_msg *) + (gwlan_logging.pkt_stat_filled_list.next); + ++gwlan_logging.pkt_stat_drop_cnt; + /* print every 64th drop count */ + if ( + cds_is_multicast_logging() && + (!(gwlan_logging.pkt_stat_drop_cnt % 0x40))) { + qdf_nofl_err("%s: drop_count = %u", + __func__, gwlan_logging.pkt_stat_drop_cnt); + } + list_del_init(gwlan_logging.pkt_stat_filled_list.next); + ret = 1; + } + + /* Reset the skb values, essential if dequeued from filled list */ + skb_trim(gwlan_logging.pkt_stats_pcur_node->skb, 0); + return ret; +} + +/** + * wlan_pkt_stats_to_logger_thread() - Add the pkt stats to SKB + * @pl_hdr: Pointer to pl_hdr + * @pkt_dump: Pointer to pkt_dump + * @data: Pointer to data + * + * This function adds the pktstats hdr and data to current + * skb node of free list. + * + * Return: None + */ +void wlan_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump, void *data) +{ + struct ath_pktlog_hdr *pktlog_hdr; + struct packet_dump *pkt_stats_dump; + int total_stats_len = 0; + bool wake_up_thread = false; + unsigned long flags; + struct sk_buff *ptr; + int hdr_size; + + pktlog_hdr = (struct ath_pktlog_hdr *)pl_hdr; + + if (!pktlog_hdr) { + qdf_nofl_err("%s : Invalid pkt_stats_header", __func__); + return; + } + + pkt_stats_dump = (struct packet_dump *)pkt_dump; + total_stats_len = sizeof(struct ath_pktlog_hdr) + + pktlog_hdr->size; + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, flags); + + if (!gwlan_logging.pkt_stats_pcur_node) { + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + return; + } + + /* Check if we can accommodate more log into current node/buffer */ + hdr_size = sizeof(struct host_log_pktlog_info) + + sizeof(tAniNlHdr); + if ((total_stats_len + hdr_size) >= + skb_tailroom(gwlan_logging.pkt_stats_pcur_node->skb)) { + wake_up_thread = true; + wlan_get_pkt_stats_free_node(); + } + + ptr = gwlan_logging.pkt_stats_pcur_node->skb; + qdf_mem_copy(skb_put(ptr, + sizeof(struct ath_pktlog_hdr)), + pktlog_hdr, + sizeof(struct ath_pktlog_hdr)); + + if (pkt_stats_dump) { + qdf_mem_copy(skb_put(ptr, + sizeof(struct packet_dump)), + pkt_stats_dump, + sizeof(struct packet_dump)); + pktlog_hdr->size -= sizeof(struct packet_dump); + } + + if (data) + qdf_mem_copy(skb_put(ptr, + pktlog_hdr->size), + data, pktlog_hdr->size); + + if (pkt_stats_dump && pkt_stats_dump->type == STOP_MONITOR) { + wake_up_thread = true; + wlan_get_pkt_stats_free_node(); + } + + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + + /* Wakeup logger thread */ + if (true == wake_up_thread) { + set_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); + } +} + +/** + * driver_hal_status_map() - maps driver to hal + * status + * @status: status to be mapped + * + * This function is used to map driver to hal status + * + * Return: None + * + */ +static void driver_hal_status_map(uint8_t *status) +{ + switch (*status) { + case tx_status_ok: + *status = TX_PKT_FATE_ACKED; + break; + case tx_status_discard: + *status = TX_PKT_FATE_DRV_DROP_OTHER; + break; + case tx_status_no_ack: + *status = TX_PKT_FATE_SENT; + break; + case tx_status_download_fail: + *status = TX_PKT_FATE_FW_QUEUED; + break; + default: + *status = TX_PKT_FATE_DRV_DROP_OTHER; + break; + } +} + +/* + * send_packetdump() - send packet dump + * @soc: soc handle + * @vdev_id: ID of the virtual device handle + * @netbuf: netbuf + * @status: status of tx packet + * @type: type of packet + * + * This function is used to send packet dump to HAL layer + * using wlan_pkt_stats_to_logger_thread + * + * Return: None + * + */ +static void send_packetdump(ol_txrx_soc_handle soc, + uint8_t vdev_id, qdf_nbuf_t netbuf, + uint8_t status, uint8_t type) +{ + struct ath_pktlog_hdr pktlog_hdr = {0}; + struct packet_dump pd_hdr = {0}; + + if (!netbuf) { + qdf_nofl_err("%s: Invalid netbuf.", __func__); + return; + } + + /* Send packet dump only for STA interface */ + if (wlan_op_mode_sta != cdp_get_opmode(soc, vdev_id)) + return; + +#if defined(HELIUMPLUS) + pktlog_hdr.flags |= PKTLOG_HDR_SIZE_16; +#endif + + pktlog_hdr.log_type = PKTLOG_TYPE_PKT_DUMP; + pktlog_hdr.size = sizeof(pd_hdr) + netbuf->len; + + pd_hdr.status = status; + pd_hdr.type = type; + pd_hdr.driver_ts = qdf_get_monotonic_boottime(); + + if ((type == TX_MGMT_PKT) || (type == TX_DATA_PKT)) + gtx_count++; + else if ((type == RX_MGMT_PKT) || (type == RX_DATA_PKT)) + grx_count++; + + wlan_pkt_stats_to_logger_thread(&pktlog_hdr, &pd_hdr, netbuf->data); +} + + +/* + * send_packetdump_monitor() - sends start/stop packet dump indication + * @type: type of packet + * + * This function is used to indicate HAL layer to start/stop monitoring + * of packets + * + * Return: None + * + */ +static void send_packetdump_monitor(uint8_t type) +{ + struct ath_pktlog_hdr pktlog_hdr = {0}; + struct packet_dump pd_hdr = {0}; + +#if defined(HELIUMPLUS) + pktlog_hdr.flags |= PKTLOG_HDR_SIZE_16; +#endif + + pktlog_hdr.log_type = PKTLOG_TYPE_PKT_DUMP; + pktlog_hdr.size = sizeof(pd_hdr); + + pd_hdr.type = type; + + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "fate Tx-Rx %s: type: %d", __func__, type); + + wlan_pkt_stats_to_logger_thread(&pktlog_hdr, &pd_hdr, NULL); +} + +void wlan_deregister_txrx_packetdump(uint8_t pdev_id) +{ + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + if (!soc) + return; + + if (gtx_count || grx_count) { + cdp_deregister_packetdump_cb(soc, pdev_id); + wma_deregister_packetdump_callback(); + send_packetdump_monitor(STOP_MONITOR); + csr_packetdump_timer_stop(); + + gtx_count = 0; + grx_count = 0; + } else + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "%s: deregistered packetdump already", __func__); +} + +/* + * check_txrx_packetdump_count() - function to check + * tx/rx packet dump global counts + * @pdev_id: datapath pdev identifier + * + * This function is used to check global counts of tx/rx + * packet dump functionality. + * + * Return: 1 if either gtx_count or grx_count reached 32 + * 0 otherwise + * + */ +static bool check_txrx_packetdump_count(uint8_t pdev_id) +{ + if (gtx_count == MAX_NUM_PKT_LOG || + grx_count == MAX_NUM_PKT_LOG) { + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "%s gtx_count: %d grx_count: %d deregister packetdump", + __func__, gtx_count, grx_count); + wlan_deregister_txrx_packetdump(pdev_id); + return 1; + } + return 0; +} + +/* + * tx_packetdump_cb() - tx packet dump callback + * @soc: soc handle + * @pdev_id: datapath pdev id + * @vdev_id: vdev id + * @netbuf: netbuf + * @status: status of tx packet + * @type: packet type + * + * This function is used to send tx packet dump to HAL layer + * and deregister packet dump callbacks + * + * Return: None + * + */ +static void tx_packetdump_cb(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint8_t vdev_id, + qdf_nbuf_t netbuf, + uint8_t status, uint8_t type) +{ + bool temp; + + if (!soc) + return; + + temp = check_txrx_packetdump_count(pdev_id); + if (temp) + return; + + driver_hal_status_map(&status); + send_packetdump(soc, vdev_id, netbuf, status, type); +} + + +/* + * rx_packetdump_cb() - rx packet dump callback + * @soc: soc handle + * @pdev_id: datapath pdev id + * @vdev_id: vdev id + * @netbuf: netbuf + * @status: status of rx packet + * @type: packet type + * + * This function is used to send rx packet dump to HAL layer + * and deregister packet dump callbacks + * + * Return: None + * + */ +static void rx_packetdump_cb(ol_txrx_soc_handle soc, + uint8_t pdev_id, uint8_t vdev_id, + qdf_nbuf_t netbuf, + uint8_t status, uint8_t type) +{ + bool temp; + + if (!soc) + return; + + temp = check_txrx_packetdump_count(pdev_id); + if (temp) + return; + + send_packetdump(soc, vdev_id, netbuf, status, type); +} + +void wlan_register_txrx_packetdump(uint8_t pdev_id) +{ + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + if (!soc) + return; + + cdp_register_packetdump_cb(soc, pdev_id, + tx_packetdump_cb, rx_packetdump_cb); + wma_register_packetdump_callback(tx_packetdump_cb, + rx_packetdump_cb); + send_packetdump_monitor(START_MONITOR); + + gtx_count = 0; + grx_count = 0; + + csr_packetdump_timer_start(); +} +#endif /* REMOVE_PKT_LOG */ +#endif /* FEATURE_PKTLOG */ +#endif /* WLAN_LOGGING_SOCK_SVC_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_roam_debug.c b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_roam_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..c6ced135050f351f689425338093af6a3854abb8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_roam_debug.c @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: Roaming debug log operations routines and global data + */ + +#include +#include +#include +#include +#include +#include +#include +#include "wlan_roam_debug.h" + +#ifdef FEATURE_ROAM_DEBUG +#ifdef WLAN_LOGGING_BUFFERS_DYNAMICALLY +static struct wlan_roam_debug_info *global_wlan_roam_debug_table; + +/** + * wlan_roam_debug_init() - Allocate log buffer dynamically + * + * Return: none + */ +void wlan_roam_debug_init(void) +{ + global_wlan_roam_debug_table = vzalloc( + sizeof(*global_wlan_roam_debug_table)); + + QDF_BUG(global_wlan_roam_debug_table); + + if (global_wlan_roam_debug_table) { + qdf_atomic_init(&global_wlan_roam_debug_table->index); + global_wlan_roam_debug_table->num_max_rec = + WLAN_ROAM_DEBUG_MAX_REC; + } +} + +qdf_export_symbol(wlan_roam_debug_init); + +static inline struct wlan_roam_debug_info *wlan_roam_debug_get_table(void) +{ + return global_wlan_roam_debug_table; +} + +/** + * wlan_roam_debug_deinit() - Free log buffer allocated dynamically + * + * Return: none + */ +void wlan_roam_debug_deinit(void) +{ + vfree(global_wlan_roam_debug_table); + global_wlan_roam_debug_table = NULL; +} + +qdf_export_symbol(wlan_roam_debug_deinit); +#else /* WLAN_LOGGING_BUFFERS_DYNAMICALLY */ +/* + * wlan roam debug log is stored in this global structure. It can be accessed + * without requiring any psoc or vdev context. It will be accessible in + * the crash dump without having to dereference complex stack traces. + */ +static struct wlan_roam_debug_info global_wlan_roam_debug_table = { + { 0 }, + WLAN_ROAM_DEBUG_MAX_REC, +}; + +static inline struct wlan_roam_debug_info *wlan_roam_debug_get_table(void) +{ + return &global_wlan_roam_debug_table; +} +#endif /* WLAN_LOGGING_BUFFERS_DYNAMICALLY */ + +/** + * wlan_roam_next_debug_log_index() - atomically increment and wrap around index + * @index: address of index to increment + * @size: wrap around this value + * + * Return: new value of index + */ +static int wlan_roam_next_debug_log_index(qdf_atomic_t *index, int size) +{ + int i = qdf_atomic_inc_return(index); + + if (i == WLAN_ROAM_DEBUG_MAX_REC) + qdf_atomic_sub(WLAN_ROAM_DEBUG_MAX_REC, index); + while (i >= size) + i -= WLAN_ROAM_DEBUG_MAX_REC; + + return i; +} + +/** + * wlan_roam_debug_log() - Add a debug log entry to wlan roam debug records + * @vdev_id: vdev identifier + * @op: operation identifier + * @peer_id: peer id + * @mac_addr: mac address of peer, can be NULL + * @peer_obj: peer object address, can be NULL + * @arg1: extra argument #1 + * @arg2: extra argument #2 + * + * Return: none + */ +void wlan_roam_debug_log(uint8_t vdev_id, uint8_t op, + uint16_t peer_id, void *mac_addr, + void *peer_obj, uint32_t arg1, uint32_t arg2) +{ + uint32_t i; + struct wlan_roam_debug_info *dbg_tbl; + struct wlan_roam_debug_rec *rec; + + dbg_tbl = wlan_roam_debug_get_table(); + if (!dbg_tbl) + return; + + i = wlan_roam_next_debug_log_index( + &dbg_tbl->index, + WLAN_ROAM_DEBUG_MAX_REC); + rec = &dbg_tbl->rec[i]; + rec->time = qdf_get_log_timestamp(); + rec->operation = op; + rec->vdev_id = vdev_id; + rec->peer_id = peer_id; + if (mac_addr) + qdf_mem_copy(rec->mac_addr.bytes, mac_addr, + QDF_MAC_ADDR_SIZE); + else + qdf_mem_zero(rec->mac_addr.bytes, + QDF_MAC_ADDR_SIZE); + rec->peer_obj = peer_obj; + rec->arg1 = arg1; + rec->arg2 = arg2; +} +qdf_export_symbol(wlan_roam_debug_log); + +/** + * wlan_roam_debug_string() - convert operation value to printable string + * @op: operation identifier + * + * Return: printable string for the operation + */ +static char *wlan_roam_debug_string(uint32_t op) +{ + switch (op) { + case DEBUG_PEER_CREATE_SEND: + return "peer create send"; + case DEBUG_PEER_CREATE_RESP: + return "peer create resp_event"; + case DEBUG_PEER_DELETE_SEND: + return "peer delete send"; + case DEBUG_PEER_DELETE_RESP: + return "peer delete resp_event"; + case DEBUG_PEER_MAP_EVENT: + return "peer map event"; + case DEBUG_PEER_UNMAP_EVENT: + return "peer unmap event"; + case DEBUG_PEER_UNREF_DELETE: + return "peer unref delete"; + case DEBUG_DELETING_PEER_OBJ: + return "peer obj deleted"; + case DEBUG_ROAM_SYNCH_IND: + return "roam synch ind event"; + case DEBUG_ROAM_SYNCH_CNF: + return "roam sync conf sent"; + case DEBUG_ROAM_SYNCH_FAIL: + return "roam sync fail event"; + case DEBUG_ROAM_EVENT: + return "roam event"; + case DEBUG_WOW_ROAM_EVENT: + return "wow wakeup roam event"; + case DEBUG_BUS_SUSPEND: + return "host suspend"; + case DEBUG_BUS_RESUME: + return "host wakeup"; + case DEBUG_WOW_REASON: + return "wow wakeup reason"; + default: + return "unknown"; + } +} + +/** + * wlan_roam_debug_dump_table() - Print the wlan roam debug log records + * print all the valid debug records in the order of timestamp + * + * Return: none + */ +void wlan_roam_debug_dump_table(void) +{ + uint32_t i; + int32_t current_index; + struct wlan_roam_debug_info *dbg_tbl; + struct wlan_roam_debug_rec *dbg_rec; + uint64_t startt = 0; + uint32_t delta; + +#define DEBUG_CLOCK_TICKS_PER_MSEC 19200 + + dbg_tbl = wlan_roam_debug_get_table(); + if (!dbg_tbl) + return; + + current_index = qdf_atomic_read(&dbg_tbl->index); + if (current_index < 0) { + roam_debug("No records to dump"); + return; + } + roam_debug("Dumping all records. current index %d", current_index); + + i = current_index; + do { + /* wrap around */ + i = (i + 1) % WLAN_ROAM_DEBUG_MAX_REC; + dbg_rec = &dbg_tbl->rec[i]; + /* skip unused entry */ + if (dbg_rec->time == 0) + continue; + if (startt == 0) + startt = dbg_rec->time; + + /* + * Divide by 19200 == right shift 8 bits, then divide by 75 + * 32 bit computation keeps both 32 and 64 bit compilers happy. + * The value will roll over after approx. 33554 seconds. + */ + delta = (uint32_t) (((dbg_rec->time - startt) >> 8) & + 0xffffffff); + delta = delta / (DEBUG_CLOCK_TICKS_PER_MSEC >> 8); + + roam_debug("index = %5d timestamp = 0x%016llx delta ms = %-12u", + i, dbg_rec->time, delta); + roam_debug("info = %-24s vdev_id = %-3d mac addr = "QDF_MAC_ADDR_FMT, + wlan_roam_debug_string(dbg_rec->operation), + (int8_t)dbg_rec->vdev_id, + QDF_MAC_ADDR_REF(dbg_rec->mac_addr.bytes)); + roam_debug("peer obj = 0x%pK peer_id = %-4d", dbg_rec->peer_obj, + (int8_t)dbg_rec->peer_id); + roam_debug("arg1 = 0x%-8x arg2 = 0x%-8x", dbg_rec->arg1, + dbg_rec->arg2); + } while (i != current_index); +} +qdf_export_symbol(wlan_roam_debug_dump_table); + +#endif /* FEATURE_ROAM_DEBUG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_common.h b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_common.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca6f2b0fb9a39c830962357736e75a40e7a383d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_common.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*=========================================================================== + \file wlan_nlink_common.h + + Exports and types for the Netlink Service interface. This header file contains + message types and definitions that is shared between the user space service + (e.g. logging service) and WLAN kernel module. + + ===========================================================================*/ + +#ifndef WLAN_NLINK_COMMON_H__ +#define WLAN_NLINK_COMMON_H__ + +#include + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +/*--------------------------------------------------------------------------- + * External Functions + *-------------------------------------------------------------------------*/ + +/*--------------------------------------------------------------------------- + * Preprocessor Definitions and Constants + *-------------------------------------------------------------------------*/ +#define WLAN_NL_MAX_PAYLOAD 5120 /* maximum size for netlink message */ +#define WLAN_NLINK_PROTO_FAMILY NETLINK_USERSOCK +#define WLAN_NLINK_MCAST_GRP_ID 0x01 + +/*--------------------------------------------------------------------------- + * Type Declarations + *-------------------------------------------------------------------------*/ + +/* + * The following enum defines the target service within WLAN driver for which the + * message is intended for. Each service along with its counterpart + * in the user space, define a set of messages they recognize. + * Each of this message will have an header of type tAniMsgHdr defined below. + * Each Netlink message to/from a kernel module will contain only one + * message which is preceded by a tAniMsgHdr. The maximun size (in bytes) of + * a netlink message is assumed to be MAX_PAYLOAD bytes. + * + * +------------+-------+----------+----------+ + * |Netlink hdr | Align |tAniMsgHdr| msg body | + * +------------+-------+----------|----------+ + */ + +/* Message Types */ +#define WLAN_SVC_FW_CRASHED_IND 0x100 +#define WLAN_SVC_LTE_COEX_IND 0x101 +#define WLAN_SVC_WLAN_AUTO_SHUTDOWN_IND 0x102 +#define WLAN_SVC_DFS_CAC_START_IND 0x103 +#define WLAN_SVC_DFS_CAC_END_IND 0x104 +#define WLAN_SVC_DFS_RADAR_DETECT_IND 0x105 +#define WLAN_SVC_WLAN_STATUS_IND 0x106 +#define WLAN_SVC_WLAN_VERSION_IND 0x107 +#define WLAN_SVC_DFS_ALL_CHANNEL_UNAVAIL_IND 0x108 +#define WLAN_SVC_WLAN_TP_IND 0x109 +#define WLAN_SVC_RPS_ENABLE_IND 0x10A +#define WLAN_SVC_WLAN_TP_TX_IND 0x10B +#define WLAN_SVC_WLAN_AUTO_SHUTDOWN_CANCEL_IND 0x10C +#define WLAN_SVC_WLAN_RADIO_INDEX 0x10D +#define WLAN_SVC_FW_SHUTDOWN_IND 0x10E +#define WLAN_SVC_CORE_MINFREQ 0x10F +#define WLAN_SVC_MAX_SSID_LEN 32 +#define WLAN_SVC_MAX_BSSID_LEN 6 +#define WLAN_SVC_MAX_STR_LEN 16 +#define WLAN_SVC_MAX_NUM_CHAN 128 +#define WLAN_SVC_COUNTRY_CODE_LEN 3 + +#define ANI_NL_MSG_BASE 0x10 /* Some arbitrary base */ + +typedef enum eAniNlModuleTypes { + ANI_NL_MSG_PUMAC = ANI_NL_MSG_BASE + 0x01, /* PTT Socket App */ + ANI_NL_MSG_PTT = ANI_NL_MSG_BASE + 0x07, /* Quarky GUI */ + WLAN_NL_MSG_OEM = ANI_NL_MSG_BASE + 0x09, + WLAN_NL_MSG_SVC = ANI_NL_MSG_BASE + 0x0a, + WLAN_NL_MSG_CNSS_DIAG = ANI_NL_MSG_BASE + 0x0B, /* Value needs to be 27 */ + ANI_NL_MSG_LOG, + WLAN_NL_MSG_SPECTRAL_SCAN, + ANI_NL_MSG_MAX +} tAniNlModTypes, tWlanNlModTypes; + +#define WLAN_NL_MSG_BASE ANI_NL_MSG_BASE +#define WLAN_NL_MSG_MAX ANI_NL_MSG_MAX + +/* All Netlink messages must contain this header */ +typedef struct sAniHdr { + unsigned short type; + unsigned short length; +} tAniHdr, tAniMsgHdr; + +typedef struct sAniNlMsg { + struct nlmsghdr nlh; /* Netlink Header */ + int radio; /* unit number of the radio */ + tAniHdr wmsg; /* Airgo Message Header */ +} tAniNlHdr; + +struct radio_index_tlv { + unsigned short type; + unsigned short length; + int radio; +}; + +/** + * struct svc_channel_info - Channel information + * @chan_id: Channel ID + * @reserved0: Reserved for padding and future use + * @mhz: Primary 20 MHz channel frequency in MHz + * @band_center_freq1: Center frequency 1 in MHz + * @band_center_freq2: Center frequency 2 in MHz + * @info: Channel info + * @reg_info_1: Regulatory information field 1 which contains + * MIN power, MAX power, reg power and reg class ID + * @reg_info_2: Regulatory information field 2 which contains antennamax + */ +struct svc_channel_info { + uint32_t chan_id; + uint32_t reserved0; + uint32_t mhz; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t info; + uint32_t reg_info_1; + uint32_t reg_info_2; +}; + +struct wlan_status_data { + uint8_t lpss_support; + uint8_t is_on; + uint8_t vdev_id; + uint8_t is_connected; + int8_t rssi; + uint8_t ssid_len; + uint8_t country_code[WLAN_SVC_COUNTRY_CODE_LEN]; + uint32_t vdev_mode; + uint32_t freq; + uint32_t numChannels; + uint8_t channel_list[WLAN_SVC_MAX_NUM_CHAN]; + uint8_t ssid[WLAN_SVC_MAX_SSID_LEN]; + uint8_t bssid[WLAN_SVC_MAX_BSSID_LEN]; + struct svc_channel_info channel_info[WLAN_SVC_MAX_NUM_CHAN]; +}; + +struct wlan_version_data { + uint32_t chip_id; + char chip_name[WLAN_SVC_MAX_STR_LEN]; + char chip_from[WLAN_SVC_MAX_STR_LEN]; + char host_version[WLAN_SVC_MAX_STR_LEN]; + char fw_version[WLAN_SVC_MAX_STR_LEN]; +}; + +struct wlan_dfs_info { + uint16_t channel; + uint8_t country_code[WLAN_SVC_COUNTRY_CODE_LEN]; +}; + +/* + * Maximim number of queues supported by WLAN driver. Setting an upper + * limit. Actual number of queues may be smaller than this value. + */ +#define WLAN_SVC_IFACE_NUM_QUEUES 6 + +/** + * struct wlan_rps_data - structure to send RPS info to cnss-daemon + * @ifname: interface name for which the RPS data belongs to + * @num_queues: number of rx queues for which RPS data is being sent + * @cpu_map_list: array of cpu maps for different rx queues supported by + * the wlan driver + * + * The structure specifies the format of data exchanged between wlan + * driver and cnss-daemon. On receipt of the data, cnss-daemon is expected + * to apply the 'cpu_map' for each rx queue belonging to the interface 'ifname' + */ +struct wlan_rps_data { + char ifname[IFNAMSIZ]; + uint16_t num_queues; + uint16_t cpu_map_list[WLAN_SVC_IFACE_NUM_QUEUES]; +}; + +/** + * enum wlan_tp_level - indicates wlan throughput level + * @WLAN_SVC_TP_NONE: used for initialization + * @WLAN_SVC_TP_LOW: used to identify low throughput level + * @WLAN_SVC_TP_MEDIUM: used to identify medium throughput level + * @WLAN_SVC_TP_HIGH: used to identify high throughput level + * + * The different throughput levels are determined on the basis of # of tx and + * rx packets and other threshold values. For example, if the # of total + * packets sent or received by the driver is greater than 500 in the last 100ms + * , the driver has a high throughput requirement. The driver may tweak certain + * system parameters based on the throughput level. + */ +enum wlan_tp_level { + WLAN_SVC_TP_NONE, + WLAN_SVC_TP_LOW, + WLAN_SVC_TP_MEDIUM, + WLAN_SVC_TP_HIGH, +}; + +/** + * struct wlan_core_minfreq - msg to [re]set the min freq of a set of cores + * @magic: signature token: 0xBABA + * @reserved: unused for now + * @coremask: bitmap of cores (16 bits) bit0=CORE0, bit1=CORE1, ... + * coremask is ONLY valid for set command + * valid values: 0xf0, or 0x0f + * @freq: frequency in KH + * > 0: "set to the given frequency" + * == 0: "free; remove the lock" + * + * Msg structure passed by the driver to cnss-daemon. + * + * Semantical Alert: + * There can be only one outstanding lock, even for different masks. + */ +#define WLAN_CORE_MINFREQ_MAGIC 0xBABA +struct wlan_core_minfreq { + uint16_t magic; + uint16_t reserved; + uint16_t coremask; + uint16_t freq; +}; + +/* Indication to enable TCP delayed ack in TPUT indication */ +#define TCP_DEL_ACK_IND (1 << 0) +#define TCP_DEL_ACK_IND_MASK 0x1 +/* Indication to enable TCP advance window scaling in TPUT indication */ +#define TCP_ADV_WIN_SCL (1 << 1) +#define TCP_ADV_WIN_SCL_MASK 0x2 + +/* TCP limit output bytes for low and high TPUT */ +#define TCP_LIMIT_OUTPUT_BYTES_LOW 506072 +#define TCP_LIMIT_OUTPUT_BYTES_HI 4048579 + +/* TCP window scale for low and high TPUT */ +#define WIN_SCALE_LOW 2 +#define WIN_SCALE_HI 1 + +/* TCP DEL ACK value for low and high TPUT */ +#define TCP_DEL_ACK_LOW 0 +#define TCP_DEL_ACK_HI 20 + +/** + * struct wlan_rx_tp_data - msg to TCP delayed ack and advance window scaling + * @level: Throughput level. + * @rx_tp_flags: Bit map of flags, for which this indcation will take + * effect, bit map for TCP_ADV_WIN_SCL and TCP_DEL_ACK_IND. + */ +struct wlan_rx_tp_data { + enum wlan_tp_level level; + uint16_t rx_tp_flags; +}; + +/** + * struct wlan_tx_tp_data - msg to TCP for Tx Dir + * @level: Throughput level. + * @tcp_limit_output: Tcp limit output flag. + * + */ +struct wlan_tx_tp_data { + enum wlan_tp_level level; + bool tcp_limit_output; +}; + +#endif /* WLAN_NLINK_COMMON_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_srv.h b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_srv.h new file mode 100644 index 0000000000000000000000000000000000000000..c66b06b534566a422c908e4b7a214a5150b945b1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_srv.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2012-2017, 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_nlink_srv.h +* +* wlan_nlink_srv is used to RX/TX Netlink messages from user space to kernel +* modules and vice versa. Kernel modules must register a message handler for a +* message type so that the wlan_nlink_srv can invoke the corresponding msg handler +* whenever a Netlink message of a particular type has been received from an +* application. In the opposite direction, wlan_nlink_srv provides a mechanism +* which kernel modules can use to send Netlink messages to applications. +* +******************************************************************************/ + +#ifndef WLAN_NLINK_SRV_H +#define WLAN_NLINK_SRV_H + +#include +#include +#include + +#define INVALID_PID -1 +#define NLINK_MAX_CALLBACKS (WLAN_NL_MSG_MAX - WLAN_NL_MSG_BASE) + +typedef int (*nl_srv_msg_callback)(struct sk_buff *skb); + +/** + * cld80211_oem_send_reply() - API to send cld80211 msg + * @skb: Sk buffer + * @hdr: nl80211hdr pointer + * @nest: pointer of vendor nested attribute + * @flags: Flags + * + * API to send cld80211 msg to applications + * + * Return: None + */ +void cld80211_oem_send_reply(struct sk_buff *msg, void *hdr, + struct nlattr *nest, int flags); + +/** + * nl80211hdr_put() - API to allocate skb for cld80211 msg + * @hdr: nl80211hdr pointer + * @portid: Port ID + * @nest: pointer of vendor nested attribute + * @flags: Flags + * + * API to allocate skb for cld80211 msg + * + * Return: Pointer to skbuff + */ + +struct sk_buff * +cld80211_oem_rsp_alloc_skb(uint32_t portid, void **hdr, struct nlattr **nest, + int *flags); +int nl_srv_init(void *wiphy, int proto); +void nl_srv_exit(void); +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler); +int nl_srv_unregister(tWlanNlModTypes msg_type, + nl_srv_msg_callback msg_handler); + +#ifdef CNSS_GENL +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag, + int app_id, int mcgroup_id); +int nl_srv_bcast(struct sk_buff *skb, int mcgroup_id, int app_id); + +/** + * nl80211hdr_put() - API to fill genlmsg header + * @skb: Sk buffer + * @portid: Port ID + * @seq: Sequence number + * @flags: Flags + * @cmd: Command id + * + * API to fill genl message header for brodcast events to user space + * + * Return: Pointer to user specific header/payload + */ +void *nl80211hdr_put(struct sk_buff *skb, uint32_t portid, + uint32_t seq, int flags, uint8_t cmd); +#else +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag); +int nl_srv_bcast(struct sk_buff *skb); +#endif + +int nl_srv_is_initialized(void); +void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/nlink/src/wlan_nlink_srv.c b/drivers/staging/qca-wifi-host-cmn/utils/nlink/src/wlan_nlink_srv.c new file mode 100644 index 0000000000000000000000000000000000000000..d0b79feb060bba5320ff8c7efcb88018b4296ba0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/nlink/src/wlan_nlink_srv.c @@ -0,0 +1,840 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_nlink_srv.c +* +* This file contains the definitions specific to the wlan_nlink_srv +* +******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define WLAN_CLD80211_MAX_SIZE (SKB_WITH_OVERHEAD(8192UL) - NLMSG_HDRLEN) + +#if defined(CONFIG_CNSS_LOGGER) + +#include + +static int radio_idx = -EINVAL; +static void *wiphy_ptr; +static bool logger_initialized; + +/** + * nl_srv_init() - wrapper function to register to cnss_logger + * @wiphy: the pointer to the wiphy structure + * @proto: the host log netlink protocol + * + * The netlink socket is no longer initialized in the driver itself, instead + * will be initialized in the cnss_logger module, the driver should register + * itself to cnss_logger module to get the radio_index for all the netlink + * operation. (cfg80211 vendor command is using different netlink socket). + * + * The cnss_logger_device_register() use to register the driver with the + * wiphy structure and the module name (debug purpose) and then return the + * radio_index depending on the availibility. + * + * Return: radio index for success and -EINVAL for failure + */ +int nl_srv_init(void *wiphy, int proto) +{ + if (logger_initialized) + goto initialized; + + wiphy_ptr = wiphy; + radio_idx = cnss_logger_device_register(wiphy, THIS_MODULE->name); + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "%s: radio_index: %d, wiphy_ptr: %pK", + __func__, radio_idx, wiphy_ptr); + + if (radio_idx >= 0) + logger_initialized = true; + +initialized: + return radio_idx; +} + +/** + * nl_srv_exit() - wrapper function to unregister from cnss_logger + * + * The cnss_logger_device_unregister() use to unregister the driver with + * the radio_index assigned and wiphy structure from cnss_logger. + * + * Return: None + */ +void nl_srv_exit(void) +{ + if (logger_initialized) { + cnss_logger_device_unregister(radio_idx, wiphy_ptr); + radio_idx = -EINVAL; + wiphy_ptr = NULL; + logger_initialized = false; + } +} + +/** + * nl_srv_ucast() - wrapper function to do unicast tx through cnss_logger + * @skb: the socket buffer to send + * @dst_pid: the port id + * @flag: the blocking or nonblocking flag + * + * The nl_srv_is_initialized() is used to do sanity check if the netlink + * service is ready, e.g if the radio_index is assigned properly, if not + * the driver should take the responsibility to free the skb. + * + * The cnss_logger_nl_ucast() use the same parameters to send the socket + * buffers. + * + * Return: the error of the transmission status + */ +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag) +{ + int err = -EINVAL; + + /* sender's pid */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + NETLINK_CB(skb).pid = 0; +#else + NETLINK_CB(skb).portid = 0; +#endif + /* not multicast */ + NETLINK_CB(skb).dst_group = 0; + + if (nl_srv_is_initialized() == 0) { + err = cnss_logger_nl_ucast(skb, dst_pid, flag); + if (err < 0) + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_unicast to pid[%d] failed, ret[%d]", + dst_pid, err); + } else { + dev_kfree_skb(skb); + } + + return err; +} + +/** + * nl_srv_bcast() - wrapper function to do broadcast tx through cnss_logger + * @skb: the socket buffer to send + * + * The cnss_logger_nl_bcast() is used to transmit the socket buffer. + * + * Return: status of transmission + */ +int nl_srv_bcast(struct sk_buff *skb) +{ + int err = -EINVAL; + int flags = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + /* sender's pid */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + NETLINK_CB(skb).pid = 0; +#else + NETLINK_CB(skb).portid = 0; +#endif + /* destination group */ + NETLINK_CB(skb).dst_group = WLAN_NLINK_MCAST_GRP_ID; + + if (nl_srv_is_initialized() == 0) { + err = cnss_logger_nl_bcast(skb, WLAN_NLINK_MCAST_GRP_ID, flags); + if ((err < 0) && (err != -ESRCH)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_broadcast failed err = %d", + err); + dev_kfree_skb(skb); + } + } + else + dev_kfree_skb(skb); + return err; +} +qdf_export_symbol(nl_srv_bcast); + +/** + * nl_srv_unregister() - wrapper function to unregister event to cnss_logger + * @msg_type: the message to unregister + * @msg_handler: the message handler + * + * The cnss_logger_event_unregister() is used to unregister the message and + * message handler. + * + * Return: 0 if successfully unregister, otherwise proper error code + */ +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int ret = -EINVAL; + + if (nl_srv_is_initialized() != 0) + return ret; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + msg_handler) { + ret = cnss_logger_event_unregister(radio_idx, msg_type, + msg_handler); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "NLINK: nl_srv_unregister failed for msg_type %d", + msg_type); + ret = -EINVAL; + } + + return ret; +} + +/** + * nl_srv_register() - wrapper function to register event to cnss_logger + * @msg_type: the message to register + * @msg_handler: the message handler + * + * The cnss_logger_event_register() is used to register the message and + * message handler. + * + * Return: 0 if successfully register, otherwise proper error code + */ +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int ret = -EINVAL; + + if (nl_srv_is_initialized() != 0) + return ret; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + msg_handler) { + ret = cnss_logger_event_register(radio_idx, msg_type, + msg_handler); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "NLINK: nl_srv_register failed for msg_type %d", + msg_type); + ret = -EINVAL; + } + + return ret; +} + +/** + * nl_srv_is_initialized() - check if netlink service is initialized + * + * Return: 0 if it is initialized, otherwise error code + */ +inline int nl_srv_is_initialized(void) +{ + if (logger_initialized) + return 0; + else + return -EPERM; +} +qdf_export_symbol(nl_srv_is_initialized); + +/* + * If MULTI_IF_NAME is not defined, then this is the primary instance of the + * driver and the diagnostics netlink socket will be available. If + * MULTI_IF_NAME is defined then this is not the primary instance of the driver + * and the diagnotics netlink socket will not be available since this + * diagnostics netlink socket can only be exposed by one instance of the driver. + */ +#elif defined(CNSS_GENL) +#include +#include +#include +#include + +void cld80211_oem_send_reply(struct sk_buff *msg, void *hdr, + struct nlattr *nest, int flags) +{ + struct genl_family *cld80211_fam = cld80211_get_genl_family(); + + nla_nest_end(msg, nest); + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(cld80211_fam, &init_net, msg, 0, + CLD80211_MCGRP_OEM_MSGS, flags); +} + +struct sk_buff * +cld80211_oem_rsp_alloc_skb(uint32_t portid, void **hdr, struct nlattr **nest, + int *flags) +{ + struct sk_buff *msg; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + *flags = GFP_ATOMIC; + + msg = nlmsg_new(WLAN_CLD80211_MAX_SIZE, *flags); + if (!msg) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nlmsg malloc fails"); + return NULL; + } + + *hdr = nl80211hdr_put(msg, portid, 0, *flags, WLAN_NL_MSG_OEM); + if (*hdr == NULL) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nl80211 hdr put failed"); + goto nla_put_failure; + } + + *nest = nla_nest_start(msg, CLD80211_ATTR_VENDOR_DATA); + if (*nest == NULL) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nla_nest_start failed"); + goto nla_put_failure; + } + return msg; +nla_put_failure: + genlmsg_cancel(msg, *hdr); + nlmsg_free(msg); + return NULL; +} + +/* For CNSS_GENL netlink sockets will be initialized by CNSS Kernel Module */ +int nl_srv_init(void *wiphy, int proto) +{ + return 0; +} + +void nl_srv_exit(void) +{ +} + +int nl_srv_is_initialized(void) +{ + return 0; +} + +/* Not implemented by CNSS kernel module */ +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +void *nl80211hdr_put(struct sk_buff *skb, uint32_t portid, + uint32_t seq, int flags, uint8_t cmd) +{ + struct genl_family *cld80211_fam = cld80211_get_genl_family(); + + return genlmsg_put(skb, portid, seq, cld80211_fam, flags, cmd); +} + +/** + * cld80211_fill_data() - API to fill payload to nl message + * @msg: Sk buffer + * @portid: Port ID + * @seq: Sequence number + * @flags: Flags + * @cmd: Command ID + * @buf: data buffer/payload to be filled + * @len: length of the payload ie. @buf + * + * API to fill the payload/data of the nl message to be sent + * + * Return: zero on success + */ +static int cld80211_fill_data(struct sk_buff *msg, uint32_t portid, + uint32_t seq, int flags, uint8_t cmd, + uint8_t *buf, int len) +{ + void *hdr; + struct nlattr *nest; + + hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nl80211 hdr put failed"); + return -EPERM; + } + + nest = nla_nest_start(msg, CLD80211_ATTR_VENDOR_DATA); + if (!nest) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nla_nest_start failed"); + goto nla_put_failure; + } + + if (nla_put(msg, CLD80211_ATTR_DATA, len, buf)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nla_put failed"); + goto nla_put_failure; + } + + nla_nest_end(msg, nest); + genlmsg_end(msg, hdr); + + return 0; +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EPERM; +} + +/** + * send_msg_to_cld80211() - API to send message to user space Application + * @mcgroup_id: Multicast group ID + * @pid: Port ID + * @app_id: Application ID + * @buf: Data/payload buffer to be sent + * @len: Length of the data ie. @buf + * + * API to send the nl message to user space application. + * + * Return: zero on success + */ +static int send_msg_to_cld80211(int mcgroup_id, int pid, int app_id, + uint8_t *buf, int len) +{ + struct sk_buff *msg; + struct genl_family *cld80211_fam = cld80211_get_genl_family(); + int status; + int flags = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + if (len > NLMSG_DEFAULT_SIZE) { + if (len > WLAN_CLD80211_MAX_SIZE) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "buf size:%d if more than max size: %d", + len, (int) WLAN_CLD80211_MAX_SIZE); + return -ENOMEM; + } + msg = nlmsg_new(WLAN_CLD80211_MAX_SIZE, flags); + } else { + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, flags); + } + if (!msg) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nlmsg malloc fails"); + return -EPERM; + } + + status = cld80211_fill_data(msg, pid, 0, 0, app_id, buf, len); + if (status) { + nlmsg_free(msg); + return -EPERM; + } + + genlmsg_multicast_netns(cld80211_fam, &init_net, msg, 0, + mcgroup_id, flags); + return 0; +} + +/** + * nl_srv_bcast() - wrapper function to do broadcast events to user space apps + * @skb: the socket buffer to send + * @mcgroup_id: multicast group id + * @app_id: application id + * + * This function is common wrapper to send broadcast events to different + * user space applications. + * + * return: none + */ +int nl_srv_bcast(struct sk_buff *skb, int mcgroup_id, int app_id) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + void *msg = NLMSG_DATA(nlh); + uint32_t msg_len = nlmsg_len(nlh); + int status; + + status = send_msg_to_cld80211(mcgroup_id, 0, app_id, msg, msg_len); + if (status) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "send msg to cld80211 fails for app id %d", app_id); + dev_kfree_skb(skb); + return -EPERM; + } + + dev_kfree_skb(skb); + return 0; +} +qdf_export_symbol(nl_srv_bcast); + +/** + * nl_srv_ucast() - wrapper function to do unicast events to user space apps + * @skb: the socket buffer to send + * @dst_pid: destination process IF + * @flag: flags + * @app_id: application id + * @mcgroup_id: Multicast group ID + * + * This function is common wrapper to send unicast events to different + * user space applications. This internally used broadcast API with multicast + * group mcgrp_id. This wrapper serves as a common API in both + * new generic netlink infra and legacy implementation. + * + * return: zero on success, error code otherwise + */ +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag, + int app_id, int mcgroup_id) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + void *msg = NLMSG_DATA(nlh); + uint32_t msg_len = nlmsg_len(nlh); + int status; + + status = send_msg_to_cld80211(mcgroup_id, dst_pid, app_id, + msg, msg_len); + if (status) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "send msg to cld80211 fails for app id %d", app_id); + dev_kfree_skb(skb); + return -EPERM; + } + + dev_kfree_skb(skb); + return 0; +} + +#elif !defined(MULTI_IF_NAME) || defined(MULTI_IF_LOG) + +/* Global variables */ +static DEFINE_MUTEX(nl_srv_sem); +static struct sock *nl_srv_sock; +static nl_srv_msg_callback nl_srv_msg_handler[NLINK_MAX_CALLBACKS]; + +/* Forward declaration */ +static void nl_srv_rcv(struct sk_buff *sk); +static void nl_srv_rcv_skb(struct sk_buff *skb); +static void nl_srv_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh); + +/* + * Initialize the netlink service. + * Netlink service is usable after this. + */ +int nl_srv_init(void *wiphy, int proto) +{ + int retcode = 0; + struct netlink_kernel_cfg cfg = { + .groups = WLAN_NLINK_MCAST_GRP_ID, + .input = nl_srv_rcv + }; + + nl_srv_sock = netlink_kernel_create(&init_net, proto, + &cfg); + + if (nl_srv_sock) { + memset(nl_srv_msg_handler, 0, sizeof(nl_srv_msg_handler)); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "NLINK: netlink_kernel_create failed"); + retcode = -ECONNREFUSED; + } + return retcode; +} + +/* + * Deinit the netlink service. + * Netlink service is unusable after this. + */ +void nl_srv_exit(void) +{ + if (nl_srv_is_initialized() == 0) + netlink_kernel_release(nl_srv_sock); + + nl_srv_sock = NULL; +} + +/* + * Register a message handler for a specified module. + * Each module (e.g. WLAN_NL_MSG_BTC )will register a + * handler to handle messages addressed to it. + */ +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int retcode = 0; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + msg_handler) { + nl_srv_msg_handler[msg_type - WLAN_NL_MSG_BASE] = msg_handler; + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: nl_srv_register failed for msg_type %d", + msg_type); + retcode = -EINVAL; + } + + return retcode; +} + +qdf_export_symbol(nl_srv_register); + +/* + * Unregister the message handler for a specified module. + */ +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int retcode = 0; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + (nl_srv_msg_handler[msg_type - WLAN_NL_MSG_BASE] == msg_handler)) { + nl_srv_msg_handler[msg_type - WLAN_NL_MSG_BASE] = NULL; + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: nl_srv_unregister failed for msg_type %d", + msg_type); + retcode = -EINVAL; + } + + return retcode; +} + +/* + * Unicast the message to the process in user space identfied + * by the dst-pid + */ +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag) +{ + int err = -EINVAL; + + NETLINK_CB(skb).portid = 0; /* sender's pid */ + NETLINK_CB(skb).dst_group = 0; /* not multicast */ + + if (nl_srv_sock) { + err = netlink_unicast(nl_srv_sock, skb, dst_pid, flag); + if (err < 0) + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_unicast to pid[%d] failed, ret[%d]", + dst_pid, err); + } else { + dev_kfree_skb(skb); + } + + return err; +} + +/* + * Broadcast the message. Broadcast will return an error if + * there are no listeners + */ +int nl_srv_bcast(struct sk_buff *skb) +{ + int err = -EINVAL; + int flags = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + NETLINK_CB(skb).portid = 0; /* sender's pid */ + NETLINK_CB(skb).dst_group = WLAN_NLINK_MCAST_GRP_ID; /* destination group */ + + if (nl_srv_sock) { + err = netlink_broadcast(nl_srv_sock, skb, 0, + WLAN_NLINK_MCAST_GRP_ID, flags); + if ((err < 0) && (err != -ESRCH)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_broadcast failed err = %d", + err); + dev_kfree_skb(skb); + } + } else + dev_kfree_skb(skb); + return err; +} +qdf_export_symbol(nl_srv_bcast); + +/* + * Processes the Netlink socket input queue. + * Dequeue skb's from the socket input queue and process + * all the netlink messages in that skb, before moving + * to the next skb. + */ +static void nl_srv_rcv(struct sk_buff *sk) +{ + mutex_lock(&nl_srv_sem); + nl_srv_rcv_skb(sk); + mutex_unlock(&nl_srv_sem); +} + +/* + * Each skb could contain multiple Netlink messages. Process all the + * messages in one skb and discard malformed skb's silently. + */ +static void nl_srv_rcv_skb(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + + while (skb->len >= NLMSG_SPACE(0)) { + u32 rlen; + + nlh = (struct nlmsghdr *)skb->data; + + if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Invalid " + "Netlink message: skb[%pK], len[%d], nlhdr[%pK], nlmsg_len[%d]", + skb, skb->len, nlh, nlh->nlmsg_len); + return; + } + + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; + nl_srv_rcv_msg(skb, nlh); + skb_pull(skb, rlen); + } +} + +/* + * Process a netlink message. + * Each netlink message will have a message of type tAniMsgHdr inside. + */ +static void nl_srv_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + int type; + + /* Only requests are handled by kernel now */ + if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Received Invalid NL Req type [%x]", + nlh->nlmsg_flags); + return; + } + + type = nlh->nlmsg_type; + + /* Unknown message */ + if (type < WLAN_NL_MSG_BASE || type >= WLAN_NL_MSG_MAX) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Received Invalid NL Msg type [%x]", type); + return; + } + + /* + * All the messages must at least carry the tAniMsgHdr + * Drop any message with invalid length + */ + if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(tAniMsgHdr))) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Received NL Msg with invalid len[%x]", + nlh->nlmsg_len); + return; + } + + /* turn type into dispatch table offset */ + type -= WLAN_NL_MSG_BASE; + + /* dispatch to handler */ + if (nl_srv_msg_handler[type]) { + (nl_srv_msg_handler[type])(skb); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: No handler for Netlink Msg [0x%X]", type); + } +} + +/** + * nl_srv_is_initialized() - This function is used check if the netlink + * service is initialized + * + * This function is used check if the netlink service is initialized + * + * Return: Return -EPERM if the service is not initialized + * + */ +int nl_srv_is_initialized(void) +{ + if (nl_srv_sock) + return 0; + + return -EPERM; +} +qdf_export_symbol(nl_srv_is_initialized); + +#else + +int nl_srv_init(void *wiphy, int proto) +{ + return 0; +} + +void nl_srv_exit(void) +{ +} + +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag) +{ + dev_kfree_skb(skb); + return 0; +} + +int nl_srv_bcast(struct sk_buff *skb) +{ + dev_kfree_skb(skb); + return 0; +} +qdf_export_symbol(nl_srv_bcast); + +int nl_srv_is_initialized(void) +{ + return -EPERM; +} +qdf_export_symbol(nl_srv_is_initialized); +#endif + +/** + * nl_srv_ucast_oem() - Wrapper function to send ucast msgs to OEM + * @skb: sk buffer pointer + * @dst_pid: Destination PID + * @flag: flags + * + * Sends the ucast message to OEM with generic nl socket if CNSS_GENL + * is enabled. Else, use the legacy netlink socket to send. + * + * Return: None + */ +#ifdef CNSS_GENL +void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag) +{ + nl_srv_ucast(skb, dst_pid, flag, WLAN_NL_MSG_OEM, + CLD80211_MCGRP_OEM_MSGS); +} +#else +void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag) +{ + nl_srv_ucast(skb, dst_pid, flag); +} + +qdf_export_symbol(nl_srv_ucast_oem); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog.h new file mode 100644 index 0000000000000000000000000000000000000000..ef370ee0863aa0b1468e79668e6d709e38dd671a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PKTLOG_ +#define _PKTLOG_ +#ifndef REMOVE_PKT_LOG + +/** + * @typedef ol_pktlog_dev_handle + * @brief opaque handle for pktlog device object + */ +struct ol_pktlog_dev_t; +typedef struct ol_pktlog_dev_t *ol_pktlog_dev_handle; +#endif /* #ifndef REMOVE_PKT_LOG */ +#endif /* _PKTLOG_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac.h new file mode 100644 index 0000000000000000000000000000000000000000..a7d46b728aec7507602c72adb9b084736b0df442 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PKTLOG_AC_H_ +#define _PKTLOG_AC_H_ + +#include "hif.h" +#ifndef REMOVE_PKT_LOG +#include "ol_if_athvar.h" +#include "osdep.h" +#include +#include +#include +#include +#include +#include + +#define NO_REG_FUNCS 4 + +/* Locking interface for pktlog */ +#define PKTLOG_LOCK_INIT(_pl_info) qdf_spinlock_create(&(_pl_info)->log_lock) +#define PKTLOG_LOCK_DESTROY(_pl_info) \ + qdf_spinlock_destroy(&(_pl_info)->log_lock) +#define PKTLOG_LOCK(_pl_info) qdf_spin_lock_bh(&(_pl_info)->log_lock) +#define PKTLOG_UNLOCK(_pl_info) qdf_spin_unlock_bh(&(_pl_info)->log_lock) + +#define PKTLOG_MODE_SYSTEM 1 +#define PKTLOG_MODE_ADAPTER 2 + +/* + * The proc entry starts with magic number and version field which will be + * used by post processing scripts. These fields are not needed by applications + * that do not use these scripts. This is skipped using the offset value. + */ +#define PKTLOG_READ_OFFSET 8 + +/* forward declaration for cdp_pdev */ +struct cdp_pdev; + +/* Opaque softc */ +struct ol_ath_generic_softc_t; +typedef struct ol_ath_generic_softc_t *ol_ath_generic_softc_handle; +extern void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn); +extern int pktlog_alloc_buf(struct hif_opaque_softc *scn); +extern void pktlog_release_buf(struct hif_opaque_softc *scn); + +ssize_t pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos, + struct ath_pktlog_info *pl_info, bool *read_complete); + +/** + * wdi_pktlog_unsubscribe() - Unsubscribe pktlog callbacks + * @pdev_id: pdev id + * @log_state: Pktlog registration + * + * Return: zero on success, non-zero on failure + */ +A_STATUS wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state); + +struct ol_pl_arch_dep_funcs { + void (*pktlog_init)(struct hif_opaque_softc *scn); + int (*pktlog_enable)(struct hif_opaque_softc *scn, int32_t log_state, + bool ini, uint8_t user, + uint32_t is_iwpriv_command); + int (*pktlog_setsize)(struct hif_opaque_softc *scn, int32_t log_state); + int (*pktlog_disable)(struct hif_opaque_softc *scn); +}; + +struct ol_pl_os_dep_funcs { + int (*pktlog_attach)(struct hif_opaque_softc *scn); + void (*pktlog_detach)(struct hif_opaque_softc *scn); + +}; + +struct ath_pktlog_wmi_params { + WMI_PKTLOG_EVENT pktlog_event; + WMI_CMD_ID cmd_id; + bool ini_triggered; + uint8_t user_triggered; +}; + +extern struct ol_pl_arch_dep_funcs ol_pl_funcs; +extern struct ol_pl_os_dep_funcs *g_ol_pl_os_dep_funcs; + +/* Pktlog handler to save the state of the pktlogs */ +struct pktlog_dev_t { + struct ol_pl_arch_dep_funcs *pl_funcs; + struct ath_pktlog_info *pl_info; + ol_ath_generic_softc_handle scn; + uint8_t pdev_id; + char *name; + bool tgt_pktlog_alloced; + bool is_pktlog_cb_subscribed; + bool mt_pktlog_enabled; + uint32_t htc_err_cnt; + uint8_t htc_endpoint; + void *htc_pdev; + bool vendor_cmd_send; + uint8_t callback_type; + uint32_t invalid_packets; +}; + +#define PKTLOG_SYSCTL_SIZE 14 +#define PKTLOG_MAX_SEND_QUEUE_DEPTH 64 + +/* + * Linux specific pktlog state information + */ +struct ath_pktlog_info_lnx { + struct ath_pktlog_info info; + struct ctl_table sysctls[PKTLOG_SYSCTL_SIZE]; + struct proc_dir_entry *proc_entry; + struct ctl_table_header *sysctl_header; +}; + +#define PL_INFO_LNX(_pl_info) ((struct ath_pktlog_info_lnx *)(_pl_info)) + +extern struct ol_pktlog_dev_t ol_pl_dev; + +/* + * WDI related data and functions + * Callback function to the WDI events + */ +void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status); + +void pktlog_init(struct hif_opaque_softc *scn); +int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool, uint8_t, uint32_t); +int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini_triggered, uint8_t user_triggered, + uint32_t is_iwpriv_command); +int pktlog_setsize(struct hif_opaque_softc *scn, int32_t log_state); +int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff); +int pktlog_disable(struct hif_opaque_softc *scn); +int pktlogmod_init(void *context); +void pktlogmod_exit(void *context); +int pktlog_htc_attach(void); + +/** + * pktlog_process_fw_msg() - process packetlog message + * @pdev_id: physical device instance id + * @msg_word: message buffer + * @msg_len: message length + * + * Return: None + */ +void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *msg_word, + uint32_t msg_len); +void lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status); + +#define ol_pktlog_attach(_scn) \ + do { \ + if (g_ol_pl_os_dep_funcs) { \ + g_ol_pl_os_dep_funcs->pktlog_attach(_scn); \ + } \ + } while (0) + +#define ol_pktlog_detach(_scn) \ + do { \ + if (g_ol_pl_os_dep_funcs) { \ + g_ol_pl_os_dep_funcs->pktlog_detach(_scn); \ + } \ + } while (0) + +#else /* REMOVE_PKT_LOG */ +#define ol_pktlog_attach(_scn) ({ (void)_scn; }) +#define ol_pktlog_detach(_scn) ({ (void)_scn; }) +static inline void pktlog_init(struct hif_opaque_softc *scn) +{ + return; +} + +static inline int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini, uint8_t user, + uint32_t is_iwpriv_command) +{ + return 0; +} + +static inline +int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini_triggered, uint8_t user_triggered, + uint32_t is_iwpriv_command) +{ + return 0; +} + +static inline int pktlog_setsize(struct hif_opaque_softc *scn, + int32_t log_state) +{ + return 0; +} + +static inline int pktlog_clearbuff(struct hif_opaque_softc *scn, + bool clear_buff) +{ + return 0; +} + +static inline int pktlog_disable(struct hif_opaque_softc *scn) +{ + return 0; +} + +static inline int pktlog_htc_attach(void) +{ + return 0; +} + +static inline void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *msg_word, + uint32_t msg_len) +{ } +#endif /* REMOVE_PKT_LOG */ +#endif /* _PKTLOG_AC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_api.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_api.h new file mode 100644 index 0000000000000000000000000000000000000000..bc75425b93fbb2c2df8653c82fd8f2210c4f451c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_api.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2012-2014, 2016-2018, 2020 The Linux Foundation. + * All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * The file is used to define structures that are shared between + * kernel space and user space pktlog application. + */ + +#ifndef _PKTLOG_AC_API_ +#define _PKTLOG_AC_API_ +#ifndef REMOVE_PKT_LOG + +/** + * @typedef ol_pktlog_dev_handle + * @brief opaque handle for pktlog device object + */ +struct ol_pktlog_dev_t; + +/** + * @typedef hif_opaque_softc_handle + * @brief opaque handle for hif_opaque_softc + */ +struct hif_opaque_softc; +typedef struct hif_opaque_softc *hif_opaque_softc_handle; + +enum pktlog_callback_regtype { + PKTLOG_DEFAULT_CALLBACK_REGISTRATION, + PKTLOG_LITE_CALLBACK_REGISTRATION +}; + +/** + * @typedef net_device_handle + * @brief opaque handle linux phy device object + */ +struct net_device; +typedef struct net_device *net_device_handle; + +struct pktlog_dev_t; + +void pktlog_sethandle(struct pktlog_dev_t **pl_handle, + hif_opaque_softc_handle scn); +void pktlog_set_pdev_id(struct pktlog_dev_t *pl_dev, uint8_t pdev_id); + +void *get_txrx_context(void); + +struct pktlog_dev_t *get_pktlog_handle(void); +void pktlog_set_callback_regtype(enum pktlog_callback_regtype callback_type); + +/* Packet log state information */ +#ifndef _PKTLOG_INFO +#define _PKTLOG_INFO + +/** + * enum ath_pktlog_state - pktlog status + * @PKTLOG_OPR_IN_PROGRESS : pktlog command in progress + * @PKTLOG_OPR_IN_PROGRESS_READ_START: pktlog read is issued + * @PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED: + * as part of pktlog read, pktlog is disabled + * @PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE: + * as part of read, till pktlog read is complete + * @PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE: + * as part of read, pktlog clear buffer is done + * @PKTLOG_OPR_NOT_IN_PROGRESS: no pktlog command in progress + */ +enum ath_pktlog_state { + PKTLOG_OPR_IN_PROGRESS = 0, + PKTLOG_OPR_IN_PROGRESS_READ_START, + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED, + PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE, + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE, + PKTLOG_OPR_NOT_IN_PROGRESS +}; + +struct ath_pktlog_info { + struct ath_pktlog_buf *buf; + uint32_t log_state; + uint32_t saved_state; + uint32_t options; + /* Initial saved state: It will save the log state in pktlog + * open and used in pktlog release after + * pktlog read is complete. + */ + uint32_t init_saved_state; + enum ath_pktlog_state curr_pkt_state; + + /* Size of buffer in bytes */ + int32_t buf_size; + qdf_spinlock_t log_lock; + struct mutex pktlog_mutex; + + /* Threshold of TCP SACK packets for triggered stop */ + int sack_thr; + + /* # of tail packets to log after triggered stop */ + int tail_length; + + /* throuput threshold in bytes for triggered stop */ + uint32_t thruput_thresh; + + /* (aggregated or single) packet size in bytes */ + uint32_t pktlen; + + /* a temporary variable for counting TX throughput only */ + /* PER threshold for triggered stop, 10 for 10%, range [1, 99] */ + uint32_t per_thresh; + + /* Phyerr threshold for triggered stop */ + uint32_t phyerr_thresh; + + /* time period for counting trigger parameters, in milisecond */ + uint32_t trigger_interval; + uint32_t start_time_thruput; + uint32_t start_time_per; +}; +#endif /* _PKTLOG_INFO */ +#else /* REMOVE_PKT_LOG */ +typedef void *pktlog_dev_handle; +#define pktlog_set_pdev_id(pl_dev, pdev_id) \ + do { \ + (void)pl_dev; \ + (void)pdev_id; \ + } while (0) + +#define pktlog_sethandle(pl_handle, scn) \ + do { \ + (void)pl_handle; \ + (void)scn; \ + } while (0) + +#define ol_pl_set_name(dev) \ + do { \ + (void)scn; \ + (void)dev; \ + } while (0) + +#endif /* REMOVE_PKT_LOG */ +#endif /* _PKTLOG_AC_API_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_i.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_i.h new file mode 100644 index 0000000000000000000000000000000000000000..5f7d9beba2ff32c6abc0b25d305f0ee4c0780334 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_i.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PKTLOG_AC_I_ +#define _PKTLOG_AC_I_ + +#ifndef REMOVE_PKT_LOG + +#include +#include + + +#define PKTLOG_TAG "ATH_PKTLOG" +#define PKTLOG_DEFAULT_BUFSIZE (10 * 1024 * 1024) /* 10MB */ +#define PKTLOG_DEFAULT_SACK_THR 3 +#define PKTLOG_DEFAULT_TAIL_LENGTH 100 +#define PKTLOG_DEFAULT_THRUPUT_THRESH (64 * 1024) +#define PKTLOG_DEFAULT_PER_THRESH 30 +#define PKTLOG_DEFAULT_PHYERR_THRESH 300 +#define PKTLOG_DEFAULT_TRIGGER_INTERVAL 500 + +/* Max Pktlog buffer size received from fw/hw */ +#define MAX_PKTLOG_RECV_BUF_SIZE 2048 + +struct ath_pktlog_arg { + struct ath_pktlog_info *pl_info; + uint32_t flags; + uint16_t missed_cnt; +#ifdef HELIUMPLUS + uint8_t log_type; + uint8_t macId; +#else + uint16_t log_type; +#endif + size_t log_size; + uint16_t timestamp; +#ifdef PKTLOG_HAS_SPECIFIC_DATA + uint32_t type_specific_data; +#endif + char *buf; +}; + +void pktlog_getbuf_intsafe(struct ath_pktlog_arg *plarg); +char *pktlog_getbuf(struct pktlog_dev_t *pl_dev, + struct ath_pktlog_info *pl_info, + size_t log_size, struct ath_pktlog_hdr *pl_hdr); + +#ifdef PKTLOG_HAS_SPECIFIC_DATA +/** + * pktlog_hdr_set_specific_data() - set type specific data + * @log_hdr: pktlog header + * @type_specific_data: type specific data + * + * Return: None + */ +void +pktlog_hdr_set_specific_data(struct ath_pktlog_hdr *log_hdr, + uint32_t type_specific_data); + +/** + * pktlog_hdr_get_specific_data() - get type specific data + * @log_hdr: pktlog header + * @type_specific_data: type specific data + * + * Return: pktlog subtype + */ +uint32_t +pktlog_hdr_get_specific_data(struct ath_pktlog_hdr *log_hdr); + +/** + * pktlog_arg_set_specific_data() - set type specific data + * @log_hdr: pktlog arg + * @type_specific_data: type specific data + * + * Return: None + */ +void +pktlog_arg_set_specific_data(struct ath_pktlog_arg *plarg, + uint32_t type_specific_data); + +/** + * pktlog_arg_get_specific_data() - set type specific data + * @log_hdr: pktlog arg + * @type_specific_data: type specific data + * + * Return: pktlog subtype + */ +uint32_t +pktlog_arg_get_specific_data(struct ath_pktlog_arg *plarg); +#else +static inline void +pktlog_hdr_set_specific_data(struct ath_pktlog_hdr *log_hdr, + uint32_t type_specific_data) +{ +} + +static inline uint32_t +pktlog_hdr_get_specific_data(struct ath_pktlog_hdr *log_hdr) +{ + return 0; +} + +static inline void +pktlog_arg_set_specific_data(struct ath_pktlog_arg *plarg, + uint32_t type_specific_data) +{ +} + +static inline uint32_t +pktlog_arg_get_specific_data(struct ath_pktlog_arg *plarg) +{ + return 0; +} +#endif /* PKTLOG_HAS_SPECIFIC_DATA */ +#endif /* REMOVE_PKT_LOG */ +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_wifi2.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_wifi2.h new file mode 100644 index 0000000000000000000000000000000000000000..d24d52709539c0f2101361f1e72d3692a5911ad1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_wifi2.h @@ -0,0 +1,164 @@ +/** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ol_txrx_types.h" +#include "ol_htt_tx_api.h" +#include "ol_tx_desc.h" +#include "qdf_mem.h" +#include "htt.h" +#include "htt_internal.h" +#include "pktlog_ac_i.h" +#include "wma_api.h" +#include "wlan_logging_sock_svc.h" + +#define TX_DESC_ID_LOW_MASK 0xffff +#define TX_DESC_ID_LOW_SHIFT 0 +#define TX_DESC_ID_HIGH_MASK 0xffff0000 +#define TX_DESC_ID_HIGH_SHIFT 16 + +#ifndef REMOVE_PKT_LOG +/** + * process_tx_info() - process tx pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data); + +/** + * process_rx_info_remote() - process rx pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +A_STATUS process_rx_info_remote(void *pdev, void *data); + +/** + * process_rx_info() - process rx pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +A_STATUS process_rx_info(void *pdev, void *data); + +/** + * process_rate_find() - process rate event pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +A_STATUS process_rate_find(void *pdev, void *data); + +/** + * process_rate_update() - process rate event pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +A_STATUS process_rate_update(void *pdev, void *data); + +/** + * process_sw_event() - process sw event pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +A_STATUS process_sw_event(void *pdev, void *data); +#else +static inline +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data) +{ + return 0; +} + +static inline +A_STATUS process_rx_info_remote(void *pdev, void *data) +{ + return 0; +} + +static inline +A_STATUS process_rx_info(void *pdev, void *data) +{ + return 0; +} + +static inline +A_STATUS process_rate_find(void *pdev, void *data) +{ + return 0; +} + +static inline +A_STATUS process_rate_update(void *pdev, void *data) +{ + return 0; +} + +static inline +A_STATUS process_sw_event(void *pdev, void *data) +{ + return 0; +} +#endif /* REMOVE_PKT_LOG */ + +/** + * process_offload_pktlog_wifi3() - Process full pktlog events + * pdev: abstract pdev handle + * data: pktlog buffer + * + * Return: zero on success, non-zero on failure + */ +static inline A_STATUS +process_offload_pktlog_wifi3(struct cdp_pdev *pdev, void *data) +{ + return 0; +} + +/** + * process_rx_desc_remote_wifi3() - Process pktlog buffers received + * from monitor status ring + * @pdev: pdev handle + * @data: pktlog buffer pointer + * + * Return: 0 - success/non-zero - failure + */ +static inline int +process_rx_desc_remote_wifi3(void *pdev, void *data) +{ + return 0; +} + +/** + * process_pktlog_lite_wifi3() - Process pktlog buffers received + * from monitor status ring + * @pdev: pdev handle + * @data: pktlog buffer pointer + * + * Return: 0 - success/non-zero - failure + */ +static inline int +process_pktlog_lite_wifi3(void *context, void *log_data, + uint16_t log_type) +{ + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_wifi3.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_wifi3.h new file mode 100644 index 0000000000000000000000000000000000000000..5ffe4f85f9c4e30f276ad3c4255a69c67556cb19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_wifi3.h @@ -0,0 +1,148 @@ +/** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "pktlog_ac_i.h" +#include "wlan_logging_sock_svc.h" + +#ifndef REMOVE_PKT_LOG +/** + * process_offload_pktlog_wifi3() - Process full pktlog events + * pdev: abstract pdev handle + * data: pktlog buffer + * + * Return: zero on success, non-zero on failure + */ +A_STATUS +process_offload_pktlog_wifi3(struct cdp_pdev *pdev, void *data); + +/** + * process_rx_desc_remote_wifi3() - Process pktlog buffers received + * from monitor status ring + * @pdev: pdev handle + * @data: pktlog buffer pointer + * + * Return: 0 - success/non-zero - failure + */ +int process_rx_desc_remote_wifi3(void *pdev, void *data); + +/** + * process_pktlog_lite_wifi3() - Process pktlog buffers received + * from monitor status ring + * @pdev: pdev handle + * @data: pktlog buffer pointer + * + * Return: 0 - success/non-zero - failure + */ +int process_pktlog_lite_wifi3(void *context, void *log_data, + uint16_t log_type); +#else +static inline A_STATUS +process_offload_pktlog_wifi3(struct cdp_pdev *pdev, void *data) +{ + return 0; +} + +static inline +int process_rx_desc_remote_wifi3(void *pdev, void *data) +{ + return 0; +} + +static inline int +process_pktlog_lite_wifi3(void *context, void *log_data, + uint16_t log_type) +{ + return 0; +} +#endif /* REMOVE_PKT_LOG */ + +/** + * process_tx_info() - process tx pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +static inline +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data) +{ + return 0; +} + +/** + * process_rx_info_remote() - process rx pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +static inline +A_STATUS process_rx_info_remote(void *pdev, void *data) +{ + return 0; +} + +/** + * process_rx_remote() - process rx pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +static inline +A_STATUS process_rx_info(void *pdev, void *data) +{ + return 0; +} + +/** + * process_rate_find() - process rate event pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +static inline +A_STATUS process_rate_find(void *pdev, void *data) +{ + return 0; +} + +/** + * process_rate_update() - process rate event pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +static inline +A_STATUS process_rate_update(void *pdev, void *data) +{ + return 0; +} + +/** + * process_sw_event() - process sw event pktlog buffers + * @txrx_pdev: ol pdev handle + * @data: pktlog buffer + * + * Return: 0 - success/non-zero - failure + */ +static inline +A_STATUS process_sw_event(void *pdev, void *data) +{ + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/linux_ac.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/linux_ac.c new file mode 100644 index 0000000000000000000000000000000000000000..5fe1391612753f13fb9e6eb4452d70cb38489087 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/linux_ac.c @@ -0,0 +1,1072 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REMOVE_PKT_LOG +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif +#ifndef __KERNEL__ +#define __KERNEL__ +#endif +/* + * Linux specific implementation of Pktlogs for 802.11ac + */ +#include +#include +#include +#include +#include +#include +#include +#include "i_host_diag_core_log.h" +#include "host_diag_core_log.h" +#include "ani_global.h" + +#define PKTLOG_DEVNAME_SIZE 32 +#define MAX_WLANDEV 1 + +#ifdef MULTI_IF_NAME +#define PKTLOG_PROC_DIR "ath_pktlog" MULTI_IF_NAME +#else +#define PKTLOG_PROC_DIR "ath_pktlog" +#endif + +/* Permissions for creating proc entries */ +#define PKTLOG_PROC_PERM 0444 +#define PKTLOG_PROCSYS_DIR_PERM 0555 +#define PKTLOG_PROCSYS_PERM 0644 + +#ifndef __MOD_INC_USE_COUNT +#define PKTLOG_MOD_INC_USE_COUNT do { \ + if (!try_module_get(THIS_MODULE)) { \ + qdf_nofl_info("try_module_get failed"); \ + } } while (0) + +#define PKTLOG_MOD_DEC_USE_COUNT module_put(THIS_MODULE) +#else +#define PKTLOG_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define PKTLOG_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif + +static struct ath_pktlog_info *g_pktlog_info; + +static struct proc_dir_entry *g_pktlog_pde; + +static DEFINE_MUTEX(proc_mutex); + +static int pktlog_attach(struct hif_opaque_softc *scn); +static void pktlog_detach(struct hif_opaque_softc *scn); +static int pktlog_open(struct inode *i, struct file *f); +static int pktlog_release(struct inode *i, struct file *f); +static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes, + loff_t *ppos); + +static struct file_operations pktlog_fops = { + open: pktlog_open, + release:pktlog_release, + read : pktlog_read, +}; + +void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + if (pl_dev) + pl_dev->pl_info->log_state = 0; +} + +int pktlog_alloc_buf(struct hif_opaque_softc *scn) +{ + uint32_t page_cnt; + unsigned long vaddr; + struct page *vpg; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_buf *buffer; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_nofl_info(PKTLOG_TAG + "%s: pdev_txrx_handle->pl_dev is null", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE; + + qdf_spin_lock_bh(&pl_info->log_lock); + if (pl_info->buf) { + qdf_spin_unlock_bh(&pl_info->log_lock); + qdf_nofl_info(PKTLOG_TAG "Buffer is already in use"); + return -EINVAL; + } + qdf_spin_unlock_bh(&pl_info->log_lock); + + buffer = vmalloc((page_cnt + 2) * PAGE_SIZE); + if (!buffer) { + return -ENOMEM; + } + + buffer = (struct ath_pktlog_buf *) + (((unsigned long)(buffer) + PAGE_SIZE - 1) + & PAGE_MASK); + + for (vaddr = (unsigned long)(buffer); + vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE)); + vaddr += PAGE_SIZE) { + vpg = vmalloc_to_page((const void *)vaddr); + SetPageReserved(vpg); + } + + qdf_spin_lock_bh(&pl_info->log_lock); + if (pl_info->buf) + pktlog_release_buf(scn); + + pl_info->buf = buffer; + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; +} + +void pktlog_release_buf(struct hif_opaque_softc *scn) +{ + unsigned long page_cnt; + unsigned long vaddr; + struct page *vpg; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return; + } + + if (!pl_dev->pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return; + } + + pl_info = pl_dev->pl_info; + + page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) / + PAGE_SIZE) + 1; + + for (vaddr = (unsigned long)(pl_info->buf); + vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE); + vaddr += PAGE_SIZE) { + vpg = vmalloc_to_page((const void *)vaddr); + ClearPageReserved(vpg); + } + + vfree(pl_info->buf); + pl_info->buf = NULL; +} + +static void pktlog_cleanup(struct ath_pktlog_info *pl_info) +{ + pl_info->log_state = 0; + PKTLOG_LOCK_DESTROY(pl_info); + mutex_destroy(&pl_info->pktlog_mutex); +} + +/* sysctl procfs handler to enable pktlog */ +static int +qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos) +{ + int ret, enable; + ol_ath_generic_softc_handle scn; + struct pktlog_dev_t *pl_dev; + + mutex_lock(&proc_mutex); + scn = (ol_ath_generic_softc_handle) ctl->extra1; + + if (!scn) { + mutex_unlock(&proc_mutex); + qdf_nofl_info("%s: Invalid scn context", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + mutex_unlock(&proc_mutex); + qdf_nofl_info("%s: Invalid pktlog context", __func__); + ASSERT(0); + return -ENODEV; + } + + ctl->data = &enable; + ctl->maxlen = sizeof(enable); + + if (write) { + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + if (ret == 0) { + ret = pl_dev->pl_funcs->pktlog_enable( + (struct hif_opaque_softc *)scn, enable, + cds_is_packet_log_enabled(), 0, 1); + } + else + QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, + "Line:%d %s:proc_dointvec failed reason %d", + __LINE__, __func__, ret); + } else { + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + if (ret) + QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, + "Line:%d %s:proc_dointvec failed reason %d", + __LINE__, __func__, ret); + } + + ctl->data = NULL; + ctl->maxlen = 0; + mutex_unlock(&proc_mutex); + + return ret; +} + +static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev) +{ + return pl_dev->pl_info->buf_size; +} + +/* sysctl procfs handler to set/get pktlog size */ +static int +qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos) +{ + int ret, size; + ol_ath_generic_softc_handle scn; + struct pktlog_dev_t *pl_dev; + + mutex_lock(&proc_mutex); + scn = (ol_ath_generic_softc_handle) ctl->extra1; + + if (!scn) { + mutex_unlock(&proc_mutex); + qdf_nofl_info("%s: Invalid scn context", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + mutex_unlock(&proc_mutex); + qdf_nofl_info("%s: Invalid pktlog handle", __func__); + ASSERT(0); + return -ENODEV; + } + + ctl->data = &size; + ctl->maxlen = sizeof(size); + + if (write) { + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + if (ret == 0) + ret = pl_dev->pl_funcs->pktlog_setsize( + (struct hif_opaque_softc *)scn, size); + } else { + size = get_pktlog_bufsize(pl_dev); + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + } + + ctl->data = NULL; + ctl->maxlen = 0; + mutex_unlock(&proc_mutex); + + return ret; +} + +/* Register sysctl table */ +static int pktlog_sysctl_register(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info_lnx *pl_info_lnx; + char *proc_name; + + if (pl_dev) { + pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info); + proc_name = pl_dev->name; + } else { + pl_info_lnx = PL_INFO_LNX(g_pktlog_info); + proc_name = PKTLOG_PROC_SYSTEM; + } + + /* + * Setup the sysctl table for creating the following sysctl entries: + * /proc/sys/PKTLOG_PROC_DIR//enable for enabling/disabling + * pktlog + * /proc/sys/PKTLOG_PROC_DIR//size for changing the buffer size + */ + memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls)); + pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR; + pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM; + pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2]; + + /* [1] is NULL terminator */ + pl_info_lnx->sysctls[2].procname = proc_name; + pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM; + pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4]; + + /* [3] is NULL terminator */ + pl_info_lnx->sysctls[4].procname = "enable"; + pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable; + pl_info_lnx->sysctls[4].extra1 = scn; + + pl_info_lnx->sysctls[5].procname = "size"; + pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size; + pl_info_lnx->sysctls[5].extra1 = scn; + + pl_info_lnx->sysctls[6].procname = "options"; + pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[6].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options; + pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options); + + pl_info_lnx->sysctls[7].procname = "sack_thr"; + pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[7].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr; + pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr); + + pl_info_lnx->sysctls[8].procname = "tail_length"; + pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[8].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length; + pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length); + + pl_info_lnx->sysctls[9].procname = "thruput_thresh"; + pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[9].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh; + pl_info_lnx->sysctls[9].maxlen = + sizeof(pl_info_lnx->info.thruput_thresh); + + pl_info_lnx->sysctls[10].procname = "phyerr_thresh"; + pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[10].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh; + pl_info_lnx->sysctls[10].maxlen = + sizeof(pl_info_lnx->info.phyerr_thresh); + + pl_info_lnx->sysctls[11].procname = "per_thresh"; + pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[11].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh; + pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh); + + pl_info_lnx->sysctls[12].procname = "trigger_interval"; + pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[12].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval; + pl_info_lnx->sysctls[12].maxlen = + sizeof(pl_info_lnx->info.trigger_interval); + /* [13] is NULL terminator */ + + /* and register everything */ + /* register_sysctl_table changed from 2.6.21 onwards */ + pl_info_lnx->sysctl_header = + register_sysctl_table(pl_info_lnx->sysctls); + + if (!pl_info_lnx->sysctl_header) { + qdf_nofl_info("%s: failed to register sysctls!", proc_name); + return -EINVAL; + } + + return 0; +} + +/* + * Initialize logging for system or adapter + * Parameter scn should be NULL for system wide logging + */ +static int pktlog_attach(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info_lnx *pl_info_lnx; + char *proc_name; + struct proc_dir_entry *proc_entry; + + /* Allocate pktlog dev for later use */ + pl_dev = get_pktlog_handle(); + + if (pl_dev) { + pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL); + if (!pl_info_lnx) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Allocation failed for pl_info", + __func__); + goto attach_fail1; + } + + pl_dev->pl_info = &pl_info_lnx->info; + pl_dev->name = WLANDEV_BASENAME; + proc_name = pl_dev->name; + + if (!pl_dev->pl_funcs) + pl_dev->pl_funcs = &ol_pl_funcs; + + /* + * Valid for both direct attach and offload architecture + */ + pl_dev->pl_funcs->pktlog_init(scn); + } else { + return -EINVAL; + } + + /* + * initialize log info + * might be good to move to pktlog_init + */ + /* pl_dev->tgt_pktlog_alloced = false; */ + pl_info_lnx->proc_entry = NULL; + pl_info_lnx->sysctl_header = NULL; + + proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM, + g_pktlog_pde, &pktlog_fops, + &pl_info_lnx->info); + + if (!proc_entry) { + qdf_nofl_info(PKTLOG_TAG "%s: create_proc_entry failed for %s", + __func__, proc_name); + goto attach_fail1; + } + + pl_info_lnx->proc_entry = proc_entry; + + if (pktlog_sysctl_register(scn)) { + qdf_nofl_info(PKTLOG_TAG "%s: sysctl register failed for %s", + __func__, proc_name); + goto attach_fail2; + } + + return 0; + +attach_fail2: + remove_proc_entry(proc_name, g_pktlog_pde); + +attach_fail1: + if (pl_dev) + kfree(pl_dev->pl_info); + + return -EINVAL; +} + +static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev) +{ + struct ath_pktlog_info_lnx *pl_info_lnx; + + if (!pl_dev) { + qdf_nofl_info("%s: Invalid pktlog context", __func__); + ASSERT(0); + return; + } + + pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : + PL_INFO_LNX(g_pktlog_info); + + if (pl_info_lnx->sysctl_header) { + unregister_sysctl_table(pl_info_lnx->sysctl_header); + pl_info_lnx->sysctl_header = NULL; + } +} + +static void pktlog_detach(struct hif_opaque_softc *scn) +{ + struct ath_pktlog_info *pl_info; + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_nofl_info("%s: Invalid pktlog context", __func__); + ASSERT(0); + return; + } + + pl_info = pl_dev->pl_info; + if (!pl_info) { + qdf_print("%s: Invalid pktlog handle", __func__); + ASSERT(0); + return; + } + mutex_lock(&pl_info->pktlog_mutex); + remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde); + pktlog_sysctl_unregister(pl_dev); + + qdf_spin_lock_bh(&pl_info->log_lock); + + if (pl_info->buf) { + pktlog_release_buf(scn); + pl_dev->tgt_pktlog_alloced = false; + } + qdf_spin_unlock_bh(&pl_info->log_lock); + mutex_unlock(&pl_info->pktlog_mutex); + pktlog_cleanup(pl_info); + + if (pl_dev) { + kfree(pl_info); + pl_dev->pl_info = NULL; + } +} + +static int __pktlog_open(struct inode *i, struct file *f) +{ + struct hif_opaque_softc *scn; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_info_lnx *pl_info_lnx; + int ret = 0; + + PKTLOG_MOD_INC_USE_COUNT; + scn = cds_get_context(QDF_MODULE_ID_HIF); + if (!scn) { + qdf_print("%s: Invalid scn context", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: Invalid pktlog handle", __func__); + ASSERT(0); + return -ENODEV; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_nofl_err("%s: pl_info NULL", __func__); + return -EINVAL; + } + + mutex_lock(&pl_info->pktlog_mutex); + pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : + PL_INFO_LNX(g_pktlog_info); + + if (!pl_info_lnx->sysctl_header) { + mutex_unlock(&pl_info->pktlog_mutex); + qdf_print("%s: pktlog sysctl is unergistered.", __func__); + ASSERT(0); + return -EINVAL; + } + + if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) { + mutex_unlock(&pl_info->pktlog_mutex); + qdf_print("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS", + __func__, pl_info->curr_pkt_state); + return -EBUSY; + } + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START; + + pl_info->init_saved_state = pl_info->log_state; + if (!pl_info->log_state) { + /* Pktlog is already disabled. + * Proceed to read directly. + */ + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; + mutex_unlock(&pl_info->pktlog_mutex); + return ret; + } + /* Disbable the pktlog internally. */ + ret = pl_dev->pl_funcs->pktlog_disable(scn); + pl_info->log_state = 0; + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; + mutex_unlock(&pl_info->pktlog_mutex); + return ret; +} + +static int pktlog_open(struct inode *i, struct file *f) +{ + struct qdf_op_sync *op_sync; + int errno; + + errno = qdf_op_protect(&op_sync); + if (errno) + return errno; + + errno = __pktlog_open(i, f); + + qdf_op_unprotect(op_sync); + + return errno; +} + +static int __pktlog_release(struct inode *i, struct file *f) +{ + struct hif_opaque_softc *scn; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_info_lnx *pl_info_lnx; + int ret = 0; + + PKTLOG_MOD_DEC_USE_COUNT; + scn = cds_get_context(QDF_MODULE_ID_HIF); + if (!scn) { + qdf_print("%s: Invalid scn context", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: Invalid pktlog handle", __func__); + ASSERT(0); + return -ENODEV; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: Invalid pktlog info", __func__); + ASSERT(0); + return -EINVAL; + } + + mutex_lock(&pl_info->pktlog_mutex); + pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : + PL_INFO_LNX(g_pktlog_info); + + if (!pl_info_lnx->sysctl_header) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + mutex_unlock(&pl_info->pktlog_mutex); + qdf_print("%s: pktlog sysctl is unergistered.", __func__); + ASSERT(0); + return -EINVAL; + } + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE; + /*clear pktlog buffer.*/ + pktlog_clearbuff(scn, true); + pl_info->log_state = pl_info->init_saved_state; + pl_info->init_saved_state = 0; + + /*Enable pktlog again*/ + ret = __pktlog_enable( + (struct hif_opaque_softc *)scn, pl_info->log_state, + cds_is_packet_log_enabled(), 0, 1); + + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + mutex_unlock(&pl_info->pktlog_mutex); + if (ret != 0) + qdf_print("%s: pktlog cannot be enabled. ret value %d", + __func__, ret); + + return ret; +} + +static int pktlog_release(struct inode *i, struct file *f) +{ + struct qdf_op_sync *op_sync; + int errno; + + errno = qdf_op_protect(&op_sync); + if (errno) + return errno; + + errno = __pktlog_release(i, f); + + qdf_op_unprotect(op_sync); + + return errno; +} + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +/** + * pktlog_read_proc_entry() - This function is used to read data from the + * proc entry into the readers buffer + * @buf: Readers buffer + * @nbytes: Number of bytes to read + * @ppos: Offset within the drivers buffer + * @pl_info: Packet log information pointer + * @read_complete: Boolean value indication whether read is complete + * + * This function is used to read data from the proc entry into the readers + * buffer. Its functionality is similar to 'pktlog_read' which does + * copy to user to the user space buffer + * + * Return: Number of bytes read from the buffer + * + */ + ssize_t +pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos, + struct ath_pktlog_info *pl_info, bool *read_complete) +{ + size_t bufhdr_size; + size_t count = 0, ret_val = 0; + int rem_len; + int start_offset, end_offset; + int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset; + struct ath_pktlog_buf *log_buf; + + qdf_spin_lock_bh(&pl_info->log_lock); + log_buf = pl_info->buf; + + *read_complete = false; + + if (!log_buf) { + *read_complete = true; + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; + } + + if (*ppos == 0 && pl_info->log_state) { + pl_info->saved_state = pl_info->log_state; + pl_info->log_state = 0; + } + + bufhdr_size = sizeof(log_buf->bufhdr); + + /* copy valid log entries from circular buffer into user space */ + rem_len = nbytes; + count = 0; + + if (*ppos < bufhdr_size) { + count = MIN((bufhdr_size - *ppos), rem_len); + qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos, + count); + rem_len -= count; + ret_val += count; + } + + start_offset = log_buf->rd_offset; + cur_wr_offset = log_buf->wr_offset; + + if ((rem_len == 0) || (start_offset < 0)) + goto rd_done; + + fold_offset = -1; + cur_rd_offset = start_offset; + + /* Find the last offset and fold-offset if the buffer is folded */ + do { + struct ath_pktlog_hdr *log_hdr; + int log_data_offset; + + log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data + + cur_rd_offset); + + log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); + + if ((fold_offset == -1) + && ((pl_info->buf_size - log_data_offset) + <= log_hdr->size)) + fold_offset = log_data_offset - 1; + + PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); + + if ((fold_offset == -1) && (cur_rd_offset == 0) + && (cur_rd_offset != cur_wr_offset)) + fold_offset = log_data_offset + log_hdr->size - 1; + + end_offset = log_data_offset + log_hdr->size - 1; + } while (cur_rd_offset != cur_wr_offset); + + ppos_data = *ppos + ret_val - bufhdr_size + start_offset; + + if (fold_offset == -1) { + if (ppos_data > end_offset) + goto rd_done; + + count = MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_mem_copy(buf + ret_val, + log_buf->log_data + ppos_data, + count); + ret_val += count; + rem_len -= count; + } else { + if (ppos_data <= fold_offset) { + count = MIN(rem_len, (fold_offset - ppos_data + 1)); + qdf_mem_copy(buf + ret_val, + log_buf->log_data + ppos_data, + count); + ret_val += count; + rem_len -= count; + } + + if (rem_len == 0) + goto rd_done; + + ppos_data = + *ppos + ret_val - (bufhdr_size + + (fold_offset - start_offset + 1)); + + if (ppos_data <= end_offset) { + count = MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_mem_copy(buf + ret_val, + log_buf->log_data + ppos_data, + count); + ret_val += count; + rem_len -= count; + } + } + +rd_done: + if ((ret_val < nbytes) && pl_info->saved_state) { + pl_info->log_state = pl_info->saved_state; + pl_info->saved_state = 0; + } + *ppos += ret_val; + + if (ret_val == 0) { + /* Write pointer might have been updated during the read. + * So, if some data is written into, lets not reset the pointers + * We can continue to read from the offset position + */ + if (cur_wr_offset != log_buf->wr_offset) { + *read_complete = false; + } else { + pl_info->buf->rd_offset = -1; + pl_info->buf->wr_offset = 0; + pl_info->buf->bytes_written = 0; + pl_info->buf->offset = PKTLOG_READ_OFFSET; + *read_complete = true; + } + } + qdf_spin_unlock_bh(&pl_info->log_lock); + return ret_val; +} + +static ssize_t +__pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) +{ + size_t bufhdr_size; + size_t count = 0, ret_val = 0; + int rem_len; + int start_offset, end_offset; + int fold_offset, ppos_data, cur_rd_offset; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_buf *log_buf; + + pl_info = PDE_DATA(file->f_path.dentry->d_inode); + if (!pl_info) + return 0; + + qdf_spin_lock_bh(&pl_info->log_lock); + log_buf = pl_info->buf; + + if (!log_buf) { + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; + } + + if (pl_info->log_state) { + /* Read is not allowed when write is going on + * When issuing cat command, ensure to send + * pktlog disable command first. + */ + qdf_spin_unlock_bh(&pl_info->log_lock); + return -EINVAL; + } + + if (*ppos == 0 && pl_info->log_state) { + pl_info->saved_state = pl_info->log_state; + pl_info->log_state = 0; + } + + bufhdr_size = sizeof(log_buf->bufhdr); + + /* copy valid log entries from circular buffer into user space */ + + rem_len = nbytes; + count = 0; + + if (*ppos < bufhdr_size) { + count = QDF_MIN((bufhdr_size - *ppos), rem_len); + qdf_spin_unlock_bh(&pl_info->log_lock); + if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos, + count)) { + return -EFAULT; + } + rem_len -= count; + ret_val += count; + qdf_spin_lock_bh(&pl_info->log_lock); + } + + start_offset = log_buf->rd_offset; + + if ((rem_len == 0) || (start_offset < 0)) + goto rd_done; + + fold_offset = -1; + cur_rd_offset = start_offset; + + /* Find the last offset and fold-offset if the buffer is folded */ + do { + struct ath_pktlog_hdr *log_hdr; + int log_data_offset; + + log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data + + cur_rd_offset); + + log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); + + if ((fold_offset == -1) + && ((pl_info->buf_size - log_data_offset) + <= log_hdr->size)) + fold_offset = log_data_offset - 1; + + PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); + + if ((fold_offset == -1) && (cur_rd_offset == 0) + && (cur_rd_offset != log_buf->wr_offset)) + fold_offset = log_data_offset + log_hdr->size - 1; + + end_offset = log_data_offset + log_hdr->size - 1; + } while (cur_rd_offset != log_buf->wr_offset); + + ppos_data = *ppos + ret_val - bufhdr_size + start_offset; + + if (fold_offset == -1) { + if (ppos_data > end_offset) + goto rd_done; + + count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_spin_unlock_bh(&pl_info->log_lock); + + if (copy_to_user(buf + ret_val, + log_buf->log_data + ppos_data, count)) { + return -EFAULT; + } + + ret_val += count; + rem_len -= count; + qdf_spin_lock_bh(&pl_info->log_lock); + } else { + if (ppos_data <= fold_offset) { + count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1)); + qdf_spin_unlock_bh(&pl_info->log_lock); + if (copy_to_user(buf + ret_val, + log_buf->log_data + ppos_data, + count)) { + return -EFAULT; + } + ret_val += count; + rem_len -= count; + qdf_spin_lock_bh(&pl_info->log_lock); + } + + if (rem_len == 0) + goto rd_done; + + ppos_data = + *ppos + ret_val - (bufhdr_size + + (fold_offset - start_offset + 1)); + + if (ppos_data <= end_offset) { + count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_spin_unlock_bh(&pl_info->log_lock); + if (copy_to_user(buf + ret_val, + log_buf->log_data + ppos_data, + count)) { + return -EFAULT; + } + ret_val += count; + rem_len -= count; + qdf_spin_lock_bh(&pl_info->log_lock); + } + } + +rd_done: + if ((ret_val < nbytes) && pl_info->saved_state) { + pl_info->log_state = pl_info->saved_state; + pl_info->saved_state = 0; + } + *ppos += ret_val; + + qdf_spin_unlock_bh(&pl_info->log_lock); + return ret_val; +} + +static ssize_t +pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) +{ + struct ath_pktlog_info *info = PDE_DATA(file->f_path.dentry->d_inode); + struct qdf_op_sync *op_sync; + ssize_t err_size; + + if (!info) + return 0; + + err_size = qdf_op_protect(&op_sync); + if (err_size) + return err_size; + + mutex_lock(&info->pktlog_mutex); + err_size = __pktlog_read(file, buf, nbytes, ppos); + mutex_unlock(&info->pktlog_mutex); + + qdf_op_unprotect(op_sync); + + return err_size; +} + +int pktlogmod_init(void *context) +{ + int ret; + + /* create the proc directory entry */ + g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL); + + if (!g_pktlog_pde) { + qdf_nofl_info(PKTLOG_TAG "%s: proc_mkdir failed", __func__); + return -EPERM; + } + + /* Attach packet log */ + ret = pktlog_attach((struct hif_opaque_softc *)context); + + /* If packet log init failed */ + if (ret) + goto attach_fail; + + return ret; + +attach_fail: + remove_proc_entry(PKTLOG_PROC_DIR, NULL); + g_pktlog_pde = NULL; + + return ret; +} + +void pktlogmod_exit(void *context) +{ + if (!g_pktlog_pde) + return; + + pktlog_detach((struct hif_opaque_softc *)context); + + /* + * pdev kill needs to be implemented + */ + remove_proc_entry(PKTLOG_PROC_DIR, NULL); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c new file mode 100644 index 0000000000000000000000000000000000000000..a7a0583e2217e71271e7f7321de7f2f6384ba6be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c @@ -0,0 +1,1183 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REMOVE_PKT_LOG + +#include "qdf_mem.h" +#include "athdefs.h" +#include "pktlog_ac_i.h" +#include "cds_api.h" +#include "wma_types.h" +#include "htc.h" +#include +#include +#ifdef PKTLOG_LEGACY +#include "pktlog_wifi2.h" +#else +#include "pktlog_wifi3.h" +#endif /* PKTLOG_LEGACY */ + +wdi_event_subscribe PKTLOG_TX_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RX_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER; +wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER; +wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER; +wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER; +wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER; + +struct ol_pl_arch_dep_funcs ol_pl_funcs = { + .pktlog_init = pktlog_init, + .pktlog_enable = pktlog_enable, + .pktlog_setsize = pktlog_setsize, + .pktlog_disable = pktlog_disable, /* valid for f/w disable */ +}; + +struct pktlog_dev_t pl_dev = { + .pl_funcs = &ol_pl_funcs, +}; + +void pktlog_sethandle(struct pktlog_dev_t **pl_handle, + struct hif_opaque_softc *scn) +{ + pl_dev.scn = (ol_ath_generic_softc_handle) scn; + *pl_handle = &pl_dev; +} + +void pktlog_set_pdev_id(struct pktlog_dev_t *pl_dev, uint8_t pdev_id) +{ + pl_dev->pdev_id = pdev_id; +} + +void pktlog_set_callback_regtype( + enum pktlog_callback_regtype callback_type) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("Invalid pl_dev"); + return; + } + + pl_dev->callback_type = callback_type; +} + +struct pktlog_dev_t *get_pktlog_handle(void) +{ + uint8_t pdev_id = WMI_PDEV_ID_SOC; + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + return cdp_get_pldev(soc, pdev_id); +} + +static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types, + WMI_CMD_ID cmd_id, bool ini_triggered, + uint8_t user_triggered) +{ + struct scheduler_msg msg = { 0 }; + QDF_STATUS status; + struct ath_pktlog_wmi_params *param; + + param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params)); + + if (!param) + return A_NO_MEMORY; + + param->cmd_id = cmd_id; + param->pktlog_event = event_types; + param->ini_triggered = ini_triggered; + param->user_triggered = user_triggered; + + msg.type = WMA_PKTLOG_ENABLE_REQ; + msg.bodyptr = param; + msg.bodyval = 0; + + status = scheduler_post_message(QDF_MODULE_ID_WMA, + QDF_MODULE_ID_WMA, + QDF_MODULE_ID_WMA, &msg); + + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(param); + return A_ERROR; + } + + return A_OK; +} + +static inline A_STATUS +pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state, + bool ini_triggered, uint8_t user_triggered) +{ + uint32_t types = 0; + + if (log_state & ATH_PKTLOG_TX) + types |= WMI_PKTLOG_EVENT_TX; + + if (log_state & ATH_PKTLOG_RX) + types |= WMI_PKTLOG_EVENT_RX; + + if (log_state & ATH_PKTLOG_RCFIND) + types |= WMI_PKTLOG_EVENT_RCF; + + if (log_state & ATH_PKTLOG_RCUPDATE) + types |= WMI_PKTLOG_EVENT_RCU; + + if (log_state & ATH_PKTLOG_SW_EVENT) + types |= WMI_PKTLOG_EVENT_SW; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s: Pktlog events: %d", __func__, types); + + return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID, + ini_triggered, user_triggered); +} + +#ifdef PKTLOG_LEGACY +/** + * wdi_pktlog_subscribe() - Subscribe pktlog callbacks + * @pdev_id: pdev id + * @log_state: Pktlog registration + * + * Return: zero on success, non-zero on failure + */ +static inline A_STATUS +wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state) +{ + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + if (pdev_id < 0) { + qdf_print("Invalid pdev in %s", __func__); + return A_ERROR; + } + + if (log_state & ATH_PKTLOG_TX) { + if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_TX_SUBSCRIBER, + WDI_EVENT_TX_STATUS)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RX) { + if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_RX_SUBSCRIBER, + WDI_EVENT_RX_DESC)) { + return A_ERROR; + } + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_RX_REMOTE_SUBSCRIBER, + WDI_EVENT_RX_DESC_REMOTE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCFIND) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_RCFIND_SUBSCRIBER, + WDI_EVENT_RATE_FIND)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCUPDATE) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_RCUPDATE_SUBSCRIBER, + WDI_EVENT_RATE_UPDATE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_SW_EVENT) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_SW_EVENT_SUBSCRIBER, + WDI_EVENT_SW_EVENT)) { + return A_ERROR; + } + } + + return A_OK; +} +#else +static inline A_STATUS +wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state) +{ + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + if (pdev_id < 0) { + qdf_print("Invalid pdev in %s", __func__); + return A_ERROR; + } + + if ((log_state & ATH_PKTLOG_TX) || + (log_state & ATH_PKTLOG_RCFIND) || + (log_state & ATH_PKTLOG_RCUPDATE) || + (log_state & ATH_PKTLOG_SW_EVENT)) { + if (cdp_wdi_event_sub(soc, + pdev_id, + &PKTLOG_OFFLOAD_SUBSCRIBER, + WDI_EVENT_OFFLOAD_ALL)) { + return A_ERROR; + } + } + + if (log_state & ATH_PKTLOG_RX) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_RX_SUBSCRIBER, + WDI_EVENT_RX_DESC)) { + return A_ERROR; + } + } + + if (log_state & ATH_PKTLOG_SW_EVENT) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_SW_EVENT_SUBSCRIBER, + WDI_EVENT_SW_EVENT)) { + return A_ERROR; + } + } + + if (log_state & ATH_PKTLOG_LITE_T2H) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_LITE_T2H_SUBSCRIBER, + WDI_EVENT_LITE_T2H)) { + return A_ERROR; + } + } + + if (log_state & ATH_PKTLOG_LITE_RX) { + if (cdp_wdi_event_sub(soc, pdev_id, + &PKTLOG_LITE_RX_SUBSCRIBER, + WDI_EVENT_LITE_RX)) { + return A_ERROR; + } + } + + return A_OK; +} +#endif + +void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status) +{ + switch (event) { + case WDI_EVENT_OFFLOAD_ALL: + { + if (process_offload_pktlog_wifi3(pdev, log_data)) { + qdf_print("Unable to process offload info"); + return; + } + break; + } + case WDI_EVENT_TX_STATUS: + { + /* + * process TX message + */ + if (process_tx_info(pdev, log_data)) { + qdf_print("Unable to process TX info"); + return; + } + break; + } + case WDI_EVENT_RX_DESC: + { + /* + * process RX message for local frames + */ + if (process_rx_info(pdev, log_data)) { + qdf_print("Unable to process RX info"); + return; + } + break; + } + case WDI_EVENT_RX_DESC_REMOTE: + { + /* + * process RX message for remote frames + */ + if (process_rx_info_remote(pdev, log_data)) { + qdf_print("Unable to process RX info"); + return; + } + break; + } + case WDI_EVENT_RATE_FIND: + { + /* + * process RATE_FIND message + */ + if (process_rate_find(pdev, log_data)) { + qdf_print("Unable to process RC_FIND info"); + return; + } + break; + } + case WDI_EVENT_RATE_UPDATE: + { + /* + * process RATE_UPDATE message + */ + if (process_rate_update(pdev, log_data)) { + qdf_print("Unable to process RC_UPDATE"); + return; + } + break; + } + case WDI_EVENT_SW_EVENT: + { + /* + * process SW EVENT message + */ + if (process_sw_event(pdev, log_data)) { + qdf_print("Unable to process SW_EVENT"); + return; + } + break; + } + default: + break; + } +} + +void +lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status) +{ + switch (event) { + case WDI_EVENT_RX_DESC: + { + if (process_rx_desc_remote_wifi3(context, log_data)) { + qdf_print("Unable to process RX info"); + return; + } + break; + } + case WDI_EVENT_LITE_T2H: + { + if (process_pktlog_lite_wifi3(context, log_data, + PKTLOG_TYPE_LITE_T2H)) { + qdf_print("Unable to process lite_t2h"); + return; + } + break; + } + case WDI_EVENT_LITE_RX: + { + if (process_pktlog_lite_wifi3(context, log_data, + PKTLOG_TYPE_LITE_RX)) { + qdf_print("Unable to process lite_rx"); + return; + } + break; + } + default: + break; + } +} + +#ifdef PKTLOG_LEGACY +A_STATUS +wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state) +{ + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + /* TODO: WIN implementation to get soc */ + + if (log_state & ATH_PKTLOG_TX) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_TX_SUBSCRIBER, + WDI_EVENT_TX_STATUS)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RX) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_RX_SUBSCRIBER, + WDI_EVENT_RX_DESC)) { + return A_ERROR; + } + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_RX_REMOTE_SUBSCRIBER, + WDI_EVENT_RX_DESC_REMOTE)) { + return A_ERROR; + } + } + + if (log_state & ATH_PKTLOG_RCFIND) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_RCFIND_SUBSCRIBER, + WDI_EVENT_RATE_FIND)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCUPDATE) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_RCUPDATE_SUBSCRIBER, + WDI_EVENT_RATE_UPDATE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCUPDATE) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_SW_EVENT_SUBSCRIBER, + WDI_EVENT_SW_EVENT)) { + return A_ERROR; + } + } + + return A_OK; +} +#else +A_STATUS +wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state) +{ + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + if ((log_state & ATH_PKTLOG_TX) || + (log_state & ATH_PKTLOG_RCFIND) || + (log_state & ATH_PKTLOG_RCUPDATE) || + (log_state & ATH_PKTLOG_SW_EVENT)) { + if (cdp_wdi_event_unsub(soc, + pdev_id, + &PKTLOG_OFFLOAD_SUBSCRIBER, + WDI_EVENT_OFFLOAD_ALL)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RX) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_RX_SUBSCRIBER, + WDI_EVENT_RX_DESC)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_LITE_T2H) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_LITE_T2H_SUBSCRIBER, + WDI_EVENT_LITE_T2H)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_LITE_RX) { + if (cdp_wdi_event_unsub(soc, pdev_id, + &PKTLOG_LITE_RX_SUBSCRIBER, + WDI_EVENT_LITE_RX)) { + return A_ERROR; + } + } + + return A_OK; +} +#endif + +int pktlog_disable(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + uint8_t save_pktlog_state; + uint8_t pdev_id = WMI_PDEV_ID_SOC; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("Invalid pl_dev"); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_dev->pl_info) { + qdf_print("Invalid pl_info"); + return -EINVAL; + } + + if (pdev_id < 0) { + qdf_print("Invalid pdev"); + return -EINVAL; + } + + if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS || + pl_info->curr_pkt_state == + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED || + pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE || + pl_info->curr_pkt_state == + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE) + return -EBUSY; + + save_pktlog_state = pl_info->curr_pkt_state; + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + + if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Failed to disable pktlog in target"); + return -EINVAL; + } + + if (pl_dev->is_pktlog_cb_subscribed && + wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Cannot unsubscribe pktlog from the WDI"); + return -EINVAL; + } + pl_dev->is_pktlog_cb_subscribed = false; + if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START) + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; + else + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + return 0; +} + +#ifdef PKTLOG_LEGACY +/** + * pktlog_callback_registration() - Register pktlog handlers based on + * on callback type + * @callback_type: pktlog full or lite registration + * + * Return: None + */ +static void pktlog_callback_registration(uint8_t callback_type) +{ + if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) { + PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback; + } +} +#else +static void pktlog_callback_registration(uint8_t callback_type) +{ + if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) { + PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback; + PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback; + PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback; + } else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) { + PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback; + PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback; + } +} +#endif + +#define ONE_MEGABYTE (1024 * 1024) + +void pktlog_init(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + uint32_t buff_size; + + if (!pl_dev || !pl_dev->pl_info) { + qdf_print("pl_dev or pl_info is invalid"); + return; + } + + pl_info = pl_dev->pl_info; + + OS_MEMZERO(pl_info, sizeof(*pl_info)); + PKTLOG_LOCK_INIT(pl_info); + mutex_init(&pl_info->pktlog_mutex); + + buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE; + + pl_info->buf_size = (buff_size ? buff_size : ONE_MEGABYTE); + pl_info->buf = NULL; + pl_info->log_state = 0; + pl_info->init_saved_state = 0; + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR; + pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH; + pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH; + pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH; + pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH; + pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL; + pl_info->pktlen = 0; + pl_info->start_time_thruput = 0; + pl_info->start_time_per = 0; + pl_dev->vendor_cmd_send = false; + + pktlog_callback_registration(pl_dev->callback_type); +} + +int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini_triggered, uint8_t user_triggered, + uint32_t is_iwpriv_command) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + uint8_t pdev_id; + int error; + + if (!scn) { + qdf_print("%s: Invalid scn context", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + if (!pl_dev) { + qdf_print("%s: Invalid pktlog context", __func__); + ASSERT(0); + return -EINVAL; + } + + pdev_id = WMI_PDEV_ID_SOC; + if (pdev_id < 0) { + qdf_print("%s: Invalid txrx context", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + if (!pl_info) { + qdf_print("%s: Invalid pl_info context", __func__); + ASSERT(0); + return -EINVAL; + } + + if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE) + return -EBUSY; + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + /* is_iwpriv_command : 0 indicates its a vendor command + * log_state: 0 indicates pktlog disable command + * vendor_cmd_send flag; false means no vendor pktlog enable + * command was sent previously + */ + if (is_iwpriv_command == 0 && log_state == 0 && + pl_dev->vendor_cmd_send == false) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog operation not in progress", __func__); + return 0; + } + + if (!pl_dev->tgt_pktlog_alloced) { + if (!pl_info->buf) { + error = pktlog_alloc_buf(scn); + + if (error != 0) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buff alloc failed", + __func__); + return -ENOMEM; + } + + if (!pl_info->buf) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buf alloc failed", + __func__); + ASSERT(0); + return -ENOMEM; + } + + } + + qdf_spin_lock_bh(&pl_info->log_lock); + pl_info->buf->bufhdr.version = CUR_PKTLOG_VER; + pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM; + pl_info->buf->wr_offset = 0; + pl_info->buf->rd_offset = -1; + /* These below variables are used by per packet stats*/ + pl_info->buf->bytes_written = 0; + pl_info->buf->msg_index = 1; + pl_info->buf->offset = PKTLOG_READ_OFFSET; + qdf_spin_unlock_bh(&pl_info->log_lock); + + pl_info->start_time_thruput = os_get_timestamp(); + pl_info->start_time_per = pl_info->start_time_thruput; + + pl_dev->tgt_pktlog_alloced = true; + } + if (log_state != 0) { + /* WDI subscribe */ + if (!pl_dev->is_pktlog_cb_subscribed) { + error = wdi_pktlog_subscribe(pdev_id, log_state); + if (error) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Unable to subscribe to the WDI %s", + __func__); + return -EINVAL; + } + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Unable to subscribe %d to the WDI %s", + log_state, __func__); + return -EINVAL; + } + /* WMI command to enable pktlog on the firmware */ + if (pktlog_enable_tgt(scn, log_state, ini_triggered, + user_triggered)) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Device cannot be enabled, %s", __func__); + return -EINVAL; + } + pl_dev->is_pktlog_cb_subscribed = true; + + if (is_iwpriv_command == 0) + pl_dev->vendor_cmd_send = true; + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + pl_dev->pl_funcs->pktlog_disable(scn); + if (is_iwpriv_command == 0) + pl_dev->vendor_cmd_send = false; + } + + pl_info->log_state = log_state; + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + return 0; +} + +int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini_triggered, uint8_t user_triggered, + uint32_t is_iwpriv_command) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + int err; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_info handle", __func__); + return -EINVAL; + } + + mutex_lock(&pl_info->pktlog_mutex); + err = __pktlog_enable(scn, log_state, ini_triggered, + user_triggered, is_iwpriv_command); + mutex_unlock(&pl_info->pktlog_mutex); + return err; +} + +static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + uint8_t pdev_id = WMI_PDEV_ID_SOC; + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + uint32_t buff_size; + uint32_t max_allowed_buff_size; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + if (pdev_id < 0) { + qdf_print("%s: invalid pdev", __func__); + return -EINVAL; + } + + if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) { + qdf_print("%s: pktlog is not configured", __func__); + return -EBUSY; + } + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + + buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE; + max_allowed_buff_size = (buff_size ? buff_size : ONE_MEGABYTE); + + if (size < ONE_MEGABYTE || size > max_allowed_buff_size) { + qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.", + __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE), + (max_allowed_buff_size / ONE_MEGABYTE)); + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Invalid requested buff size", __func__); + return -EINVAL; + } + + if (size == pl_info->buf_size) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Pktlog Buff Size is already of same size.", + __func__); + return 0; + } + + if (pl_info->log_state) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Logging should be disabled before changing" + "buffer size.", __func__); + return -EINVAL; + } + + qdf_spin_lock_bh(&pl_info->log_lock); + if (pl_info->buf) { + if (pl_dev->is_pktlog_cb_subscribed && + wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_spin_unlock_bh(&pl_info->log_lock); + qdf_print("Cannot unsubscribe pktlog from the WDI"); + return -EFAULT; + } + pktlog_release_buf(scn); + pl_dev->is_pktlog_cb_subscribed = false; + pl_dev->tgt_pktlog_alloced = false; + } + + if (size != 0) { + qdf_print("%s: New Pktlog Buff Size is %d", __func__, size); + pl_info->buf_size = size; + } + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; +} + +int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + int status; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + mutex_lock(&pl_info->pktlog_mutex); + status = __pktlog_setsize(scn, size); + mutex_unlock(&pl_info->pktlog_mutex); + + return status; +} + +int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + uint8_t save_pktlog_state; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + if (!clear_buff) + return -EINVAL; + + if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE || + pl_info->curr_pkt_state == + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE) + return -EBUSY; + + save_pktlog_state = pl_info->curr_pkt_state; + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + + if (pl_info->log_state) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Logging should be disabled before clearing " + "pktlog buffer.", __func__); + return -EINVAL; + } + + if (pl_info->buf) { + if (pl_info->buf_size > 0) { + qdf_debug("pktlog buffer is cleared"); + memset(pl_info->buf, 0, pl_info->buf_size); + pl_dev->is_pktlog_cb_subscribed = false; + pl_dev->tgt_pktlog_alloced = false; + pl_info->buf->rd_offset = -1; + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buffer size is not proper. " + "Existing Buf size %d", __func__, + pl_info->buf_size); + return -EFAULT; + } + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buff is NULL", __func__); + return -EFAULT; + } + + if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE) + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE; + else + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + + return 0; +} + +void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *buff, uint32_t len) +{ + uint32_t *pl_hdr; + uint32_t log_type; + struct ol_fw_data pl_fw_data; + + if (pdev_id == OL_TXRX_INVALID_PDEV_ID) { + qdf_print("%s: txrx pdev_id is invalid", __func__); + return; + } + pl_hdr = buff; + pl_fw_data.data = pl_hdr; + pl_fw_data.len = len; + + log_type = + (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + + if ((log_type == PKTLOG_TYPE_TX_CTRL) + || (log_type == PKTLOG_TYPE_TX_STAT) + || (log_type == PKTLOG_TYPE_TX_MSDU_ID) + || (log_type == PKTLOG_TYPE_TX_FRM_HDR) + || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR)) + wdi_event_handler(WDI_EVENT_TX_STATUS, + pdev_id, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_RC_FIND) + wdi_event_handler(WDI_EVENT_RATE_FIND, + pdev_id, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_RC_UPDATE) + wdi_event_handler(WDI_EVENT_RATE_UPDATE, + pdev_id, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_RX_STAT) + wdi_event_handler(WDI_EVENT_RX_DESC, + pdev_id, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_SW_EVENT) + wdi_event_handler(WDI_EVENT_SW_EVENT, + pdev_id, &pl_fw_data); +} + +#if defined(QCA_WIFI_3_0_ADRASTEA) +static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf) +{ + int rc = 0; /* sane */ + + if ((!nbuf) || + (nbuf->data < nbuf->head) || + ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf))) + rc = -EINVAL; + + return rc; +} +/** + * pktlog_t2h_msg_handler() - Target to host message handler + * @context: pdev context + * @pkt: HTC packet + * + * Return: None + */ +static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt) +{ + struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context; + qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; + uint32_t *msg_word; + uint32_t msg_len; + + /* check for sanity of the packet, have seen corrupted pkts */ + if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) { + qdf_print("%s: packet 0x%pK corrupted? Leaking...", + __func__, pktlog_t2h_msg); + /* do not free; may crash! */ + QDF_ASSERT(0); + return; + } + + /* check for successful message reception */ + if (pkt->Status != QDF_STATUS_SUCCESS) { + if (pkt->Status != QDF_STATUS_E_CANCELED) + pdev->htc_err_cnt++; + qdf_nbuf_free(pktlog_t2h_msg); + return; + } + + /* confirm alignment */ + qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0); + + msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg); + msg_len = qdf_nbuf_len(pktlog_t2h_msg); + pktlog_process_fw_msg(pdev->pdev_id, msg_word, msg_len); + + qdf_nbuf_free(pktlog_t2h_msg); +} + +/** + * pktlog_tx_resume_handler() - resume callback + * @context: pdev context + * + * Return: None + */ +static void pktlog_tx_resume_handler(void *context) +{ + qdf_print("%s: Not expected", __func__); + qdf_assert(0); +} + +/** + * pktlog_h2t_send_complete() - send complete indication + * @context: pdev context + * @htc_pkt: HTC packet + * + * Return: None + */ +static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) +{ + qdf_print("%s: Not expected", __func__); + qdf_assert(0); +} + +/** + * pktlog_h2t_full() - queue full indication + * @context: pdev context + * @pkt: HTC packet + * + * Return: HTC action + */ +static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt) +{ + return HTC_SEND_FULL_KEEP; +} + +/** + * pktlog_htc_connect_service() - create new endpoint for packetlog + * @pdev - pktlog pdev + * + * Return: 0 for success/failure + */ +static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev) +{ + struct htc_service_connect_req connect; + struct htc_service_connect_resp response; + QDF_STATUS status; + + qdf_mem_zero(&connect, sizeof(connect)); + qdf_mem_zero(&response, sizeof(response)); + + connect.pMetaData = NULL; + connect.MetaDataLength = 0; + connect.EpCallbacks.pContext = pdev; + connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete; + connect.EpCallbacks.EpTxCompleteMultiple = NULL; + connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler; + connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler; + + /* rx buffers currently are provided by HIF, not by EpRecvRefill */ + connect.EpCallbacks.EpRecvRefill = NULL; + connect.EpCallbacks.RecvRefillWaterMark = 1; + /* N/A, fill is done by HIF */ + + connect.EpCallbacks.EpSendFull = pktlog_h2t_full; + /* + * Specify how deep to let a queue get before htc_send_pkt will + * call the EpSendFull function due to excessive send queue depth. + */ + connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH; + + /* disable flow control for HTT data message service */ + connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + + /* connect to control service */ + connect.service_id = PACKET_LOG_SVC; + + status = htc_connect_service(pdev->htc_pdev, &connect, &response); + + if (status != QDF_STATUS_SUCCESS) { + pdev->mt_pktlog_enabled = false; + return -EIO; /* failure */ + } + + pdev->htc_endpoint = response.Endpoint; + pdev->mt_pktlog_enabled = true; + + return 0; /* success */ +} + +/** + * pktlog_htc_attach() - attach pktlog HTC service + * + * Return: 0 for success/failure + */ +int pktlog_htc_attach(void) +{ + struct pktlog_dev_t *pl_pdev = get_pktlog_handle(); + void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC); + + if ((!pl_pdev) || (!htc_pdev)) { + qdf_print("Invalid pl_dev or htc_pdev handle"); + return -EINVAL; + } + + pl_pdev->htc_pdev = htc_pdev; + return pktlog_htc_connect_service(pl_pdev); +} +#else +int pktlog_htc_attach(void) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("Invalid pl_dev handle"); + return -EINVAL; + } + + pl_dev->mt_pktlog_enabled = false; + return 0; +} +#endif +#endif /* REMOVE_PKT_LOG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_internal.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_internal.c new file mode 100644 index 0000000000000000000000000000000000000000..d028931f0145ff8b84f129c86ac864063a5b7096 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_internal.c @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REMOVE_PKT_LOG +#include "ol_txrx_types.h" +#include "ol_htt_tx_api.h" +#include "ol_tx_desc.h" +#include "qdf_mem.h" +#include "htt.h" +#include "htt_internal.h" +#include "pktlog_ac_i.h" +#include "wma_api.h" +#include "wlan_logging_sock_svc.h" + +#ifdef PKTLOG_HAS_SPECIFIC_DATA +void +pktlog_hdr_set_specific_data(struct ath_pktlog_hdr *log_hdr, + uint32_t type_specific_data) +{ + log_hdr->type_specific_data = type_specific_data; +} + +uint32_t +pktlog_hdr_get_specific_data(struct ath_pktlog_hdr *log_hdr) +{ + return log_hdr->type_specific_data; +} + +void +pktlog_arg_set_specific_data(struct ath_pktlog_arg *plarg, + uint32_t type_specific_data) +{ + plarg->type_specific_data = type_specific_data; +} + +uint32_t +pktlog_arg_get_specific_data(struct ath_pktlog_arg *plarg) +{ + return plarg->type_specific_data; +} +#endif /* PKTLOG_HAS_SPECIFIC_DATA */ + +void pktlog_getbuf_intsafe(struct ath_pktlog_arg *plarg) +{ + struct ath_pktlog_buf *log_buf; + int32_t buf_size; + struct ath_pktlog_hdr *log_hdr; + int32_t cur_wr_offset; + char *log_ptr; + struct ath_pktlog_info *pl_info; + uint16_t log_type; + size_t log_size; + uint32_t flags; +#ifdef HELIUMPLUS + uint8_t mac_id; +#endif + + if (!plarg) { + qdf_nofl_info("Invalid parg in %s", __func__); + return; + } + + pl_info = plarg->pl_info; +#ifdef HELIUMPLUS + mac_id = plarg->macId; + log_type = plarg->log_type; +#else + log_type = plarg->log_type; +#endif + log_size = plarg->log_size; + log_buf = pl_info->buf; + flags = plarg->flags; + + if (!log_buf) { + qdf_nofl_info("Invalid log_buf in %s", __func__); + return; + } + + + buf_size = pl_info->buf_size; + cur_wr_offset = log_buf->wr_offset; + /* Move read offset to the next entry if there is a buffer overlap */ + if (log_buf->rd_offset >= 0) { + if ((cur_wr_offset <= log_buf->rd_offset) + && (cur_wr_offset + sizeof(struct ath_pktlog_hdr)) > + log_buf->rd_offset) { + PKTLOG_MOV_RD_IDX(log_buf->rd_offset, log_buf, + buf_size); + } + } else { + log_buf->rd_offset = cur_wr_offset; + } + + log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data + cur_wr_offset); + + log_hdr->flags = flags; +#ifdef HELIUMPLUS + log_hdr->macId = mac_id; + log_hdr->log_type = log_type; +#else + log_hdr->log_type = log_type; +#endif + log_hdr->size = (uint16_t) log_size; + log_hdr->missed_cnt = plarg->missed_cnt; + log_hdr->timestamp = plarg->timestamp; + pktlog_hdr_set_specific_data(log_hdr, + pktlog_arg_get_specific_data(plarg)); + cur_wr_offset += sizeof(*log_hdr); + + if ((buf_size - cur_wr_offset) < log_size) { + while ((cur_wr_offset <= log_buf->rd_offset) + && (log_buf->rd_offset < buf_size)) { + PKTLOG_MOV_RD_IDX(log_buf->rd_offset, log_buf, + buf_size); + } + cur_wr_offset = 0; + } + + while ((cur_wr_offset <= log_buf->rd_offset) + && (cur_wr_offset + log_size) > log_buf->rd_offset) { + PKTLOG_MOV_RD_IDX(log_buf->rd_offset, log_buf, buf_size); + } + + log_ptr = &(log_buf->log_data[cur_wr_offset]); + cur_wr_offset += log_hdr->size; + + log_buf->wr_offset = ((buf_size - cur_wr_offset) >= + sizeof(struct ath_pktlog_hdr)) ? cur_wr_offset : + 0; + + plarg->buf = log_ptr; +} + +char *pktlog_getbuf(struct pktlog_dev_t *pl_dev, + struct ath_pktlog_info *pl_info, + size_t log_size, struct ath_pktlog_hdr *pl_hdr) +{ + struct ath_pktlog_arg plarg = { 0, }; + uint8_t flags = 0; + + plarg.pl_info = pl_info; +#ifdef HELIUMPLUS + plarg.macId = pl_hdr->macId; + plarg.log_type = pl_hdr->log_type; +#else + plarg.log_type = pl_hdr->log_type; +#endif + plarg.log_size = log_size; + plarg.flags = pl_hdr->flags; + plarg.missed_cnt = pl_hdr->missed_cnt; + plarg.timestamp = pl_hdr->timestamp; + pktlog_arg_set_specific_data(&plarg, + pktlog_hdr_get_specific_data(pl_hdr)); + + if (flags & PHFLAGS_INTERRUPT_CONTEXT) { + /* + * We are already in interrupt context, no need to make it + * intsafe. call the function directly. + */ + pktlog_getbuf_intsafe(&plarg); + } else { + PKTLOG_LOCK(pl_info); + pktlog_getbuf_intsafe(&plarg); + PKTLOG_UNLOCK(pl_info); + } + + return plarg.buf; +} +#endif /*REMOVE_PKT_LOG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_wifi2.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_wifi2.c new file mode 100644 index 0000000000000000000000000000000000000000..2cc8ae7866af19e12da69534be7cda7dffb48f3b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_wifi2.c @@ -0,0 +1,1196 @@ +/** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* WIFI2 - Refers to legacy platforms */ +#include "pktlog_wifi2.h" + +#ifndef REMOVE_PKT_LOG +static struct txctl_frm_hdr frm_hdr; + +#ifndef HELIUMPLUS +/** + * process_ieee_hdr(): Process ieee header from the pktlog buffer + * @data: pktlog buffer + * + * Return: None + */ +static void process_ieee_hdr(void *data) +{ + uint8_t dir; + struct ieee80211_frame *wh = (struct ieee80211_frame *)(data); + + frm_hdr.framectrl = *(uint16_t *)(wh->i_fc); + frm_hdr.seqctrl = *(uint16_t *)(wh->i_seq); + dir = (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK); + + if (dir == IEEE80211_FC1_DIR_TODS) { + frm_hdr.bssid_tail = + (wh->i_addr1[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr1 + [QDF_MAC_ADDR_SIZE + - 1]); + frm_hdr.sa_tail = + (wh->i_addr2[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr2 + [QDF_MAC_ADDR_SIZE + - 1]); + frm_hdr.da_tail = + (wh->i_addr3[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr3 + [QDF_MAC_ADDR_SIZE + - 1]); + } else if (dir == IEEE80211_FC1_DIR_FROMDS) { + frm_hdr.bssid_tail = + (wh->i_addr2[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr2 + [QDF_MAC_ADDR_SIZE + - 1]); + frm_hdr.sa_tail = + (wh->i_addr3[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr3 + [QDF_MAC_ADDR_SIZE + - 1]); + frm_hdr.da_tail = + (wh->i_addr1[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr1 + [QDF_MAC_ADDR_SIZE + - 1]); + } else { + frm_hdr.bssid_tail = + (wh->i_addr3[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr3 + [QDF_MAC_ADDR_SIZE + - 1]); + frm_hdr.sa_tail = + (wh->i_addr2[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr2 + [QDF_MAC_ADDR_SIZE + - 1]); + frm_hdr.da_tail = + (wh->i_addr1[QDF_MAC_ADDR_SIZE - 2] << 8) | (wh-> + i_addr1 + [QDF_MAC_ADDR_SIZE + - 1]); + } +} + +/** + * fill_ieee80211_hdr_data() - fill ieee802.11 data header + * @txrx_pdev: txrx pdev + * @pl_msdu_info: msdu info + * @data: data received from event + * + * Return: none + */ +/* TODO: Platform specific function */ +static void +fill_ieee80211_hdr_data(struct cdp_pdev *pdev, + struct ath_pktlog_msdu_info *pl_msdu_info, + void *data) +{ + uint32_t i; + uint32_t *htt_tx_desc; + struct ol_tx_desc_t *tx_desc; + uint8_t msdu_id_offset = MSDU_ID_INFO_ID_OFFSET; + uint16_t tx_desc_id; + uint32_t *msdu_id_info = (uint32_t *) + ((void *)data + sizeof(struct ath_pktlog_hdr)); + uint32_t *msdu_id = (uint32_t *)((char *)msdu_id_info + + msdu_id_offset); + uint8_t *addr, *vap_addr; + uint8_t vdev_id; + qdf_nbuf_t netbuf; + uint32_t len; + struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)pdev; + + pl_msdu_info->num_msdu = *msdu_id_info; + pl_msdu_info->priv_size = sizeof(uint32_t) * + pl_msdu_info->num_msdu + sizeof(uint32_t); + + if (pl_msdu_info->num_msdu > MAX_PKT_INFO_MSDU_ID) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid num_msdu count", + __func__); + qdf_assert(0); + return; + } + for (i = 0; i < pl_msdu_info->num_msdu; i++) { + /* + * Handle big endianness + * Increment msdu_id once after retrieving + * lower 16 bits and uppper 16 bits + */ + if (!(i % 2)) { + tx_desc_id = ((*msdu_id & TX_DESC_ID_LOW_MASK) + >> TX_DESC_ID_LOW_SHIFT); + } else { + tx_desc_id = ((*msdu_id & TX_DESC_ID_HIGH_MASK) + >> TX_DESC_ID_HIGH_SHIFT); + msdu_id += 1; + } + if (tx_desc_id >= txrx_pdev->tx_desc.pool_size) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: drop due to invalid msdu id = %x", + __func__, tx_desc_id); + return; + } + tx_desc = ol_tx_desc_find(txrx_pdev, tx_desc_id); + qdf_assert(tx_desc); + netbuf = tx_desc->netbuf; + htt_tx_desc = (uint32_t *)tx_desc->htt_tx_desc; + qdf_assert(htt_tx_desc); + + qdf_nbuf_peek_header(netbuf, &addr, &len); + + if (len < (2 * QDF_MAC_ADDR_SIZE)) { + qdf_print("TX frame does not have a valid address"); + return; + } + /* Adding header information for the TX data frames */ + vdev_id = (uint8_t)(*(htt_tx_desc + + HTT_TX_VDEV_ID_WORD) >> + HTT_TX_VDEV_ID_SHIFT) & + HTT_TX_VDEV_ID_MASK; + + vap_addr = wma_get_vdev_address_by_vdev_id(vdev_id); + + frm_hdr.da_tail = (addr[QDF_MAC_ADDR_SIZE - 2] << 8) | + (addr[QDF_MAC_ADDR_SIZE - 1]); + frm_hdr.sa_tail = + (addr[2 * QDF_MAC_ADDR_SIZE - 2] << 8) | + (addr[2 * QDF_MAC_ADDR_SIZE - 1]); + if (vap_addr) { + frm_hdr.bssid_tail = + (vap_addr[QDF_MAC_ADDR_SIZE - 2] << 8) | + (vap_addr[QDF_MAC_ADDR_SIZE - 1]); + } else { + frm_hdr.bssid_tail = 0x0000; + } + pl_msdu_info->priv.msdu_len[i] = *(htt_tx_desc + + HTT_TX_MSDU_LEN_DWORD) + & HTT_TX_MSDU_LEN_MASK; + /* + * Add more information per MSDU + * e.g., protocol information + */ + } +} +#endif /* HELIUMPLUS */ + +#ifdef HELIUMPLUS +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data) +{ + /* + * Must include to process different types + * TX_CTL, TX_STATUS, TX_MSDU_ID, TX_FRM_HDR + */ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!txrx_pdev) { + qdf_nofl_info("Invalid pdev in %s", __func__); + return A_ERROR; + } + + if (!pl_dev) { + qdf_nofl_err("Invalid pktlog handle in %s", __func__); + qdf_assert(pl_dev); + return A_ERROR; + } + + qdf_assert(data); + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_hdr.type_specific_data = + *(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); + pl_info = pl_dev->pl_info; + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_CTRL) { + size_t log_size = sizeof(frm_hdr) + pl_hdr.size; + void *txdesc_hdr_ctl = (void *) + pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + qdf_assert(txdesc_hdr_ctl); + qdf_assert(pl_hdr.size < (370 * sizeof(u_int32_t))); + + qdf_mem_copy(txdesc_hdr_ctl, &frm_hdr, sizeof(frm_hdr)); + qdf_mem_copy((char *)txdesc_hdr_ctl + sizeof(frm_hdr), + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + pl_hdr.size = log_size; + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txdesc_hdr_ctl); + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_STAT) { + struct ath_pktlog_tx_status txstat_log; + size_t log_size = pl_hdr.size; + + txstat_log.ds_status = (void *) + pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_assert(txstat_log.ds_status); + qdf_mem_copy(txstat_log.ds_status, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + /* TODO: MCL specific API */ + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txstat_log.ds_status); + } + return A_OK; +} +#else +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data) +{ + /* + * Must include to process different types + * TX_CTL, TX_STATUS, TX_MSDU_ID, TX_FRM_HDR + */ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!txrx_pdev) { + qdf_print("Invalid pdev in %s", __func__); + return A_ERROR; + } + + if (!pl_dev) { + qdf_nofl_err("Invalid pktlog handle in %s", __func__); + qdf_assert(pl_dev); + return A_ERROR; + } + + qdf_assert(data); + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + + pktlog_hdr_set_specific_data(&pl_hdr, + *(pl_tgt_hdr + + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET)); + + pl_info = pl_dev->pl_info; + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_FRM_HDR) { + /* Valid only for the TX CTL */ + process_ieee_hdr(fw_data->data + sizeof(pl_hdr)); + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_VIRT_ADDR) { + uint32_t desc_id = (uint32_t)*((uint32_t *)(fw_data->data + + sizeof(pl_hdr))); + uint32_t vdev_id = desc_id; + + /* if the pkt log msg is for the bcn frame the vdev id + * is piggybacked in desc_id and the MSB of the desc ID + * would be set to FF + */ +#define BCN_DESC_ID 0xFF + if ((desc_id >> 24) == BCN_DESC_ID) { + void *data; + uint32_t buf_size; + + vdev_id &= 0x00FFFFFF; + /* TODO: MCL specific API */ + data = wma_get_beacon_buffer_by_vdev_id(vdev_id, + &buf_size); + if (data) { + /* TODO: platform specific API */ + process_ieee_hdr(data); + qdf_mem_free(data); + } + } else { + /* + * TODO: get the hdr content for mgmt frames from + * Tx mgmt desc pool + */ + } + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_CTRL) { + struct ath_pktlog_txctl txctl_log; + size_t log_size = sizeof(txctl_log.priv); + + txctl_log.txdesc_hdr_ctl = (void *)pktlog_getbuf(pl_dev, + pl_info, + log_size, + &pl_hdr); + + if (!txctl_log.txdesc_hdr_ctl) { + qdf_nofl_info + ("failed to get txctl_log.txdesc_hdr_ctl buf"); + return A_ERROR; + } + + /* + * frm hdr is currently Valid only for local frames + * Add capability to include the fmr hdr for remote frames + */ + txctl_log.priv.frm_hdr = frm_hdr; + qdf_assert(txctl_log.priv.txdesc_ctl); + qdf_assert(pl_hdr.size < sizeof(txctl_log.priv.txdesc_ctl)); + pl_hdr.size = (pl_hdr.size > sizeof(txctl_log.priv.txdesc_ctl)) + ? sizeof(txctl_log.priv.txdesc_ctl) : + pl_hdr.size; + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy((void *)&txctl_log.priv.txdesc_ctl, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + qdf_assert(txctl_log.txdesc_hdr_ctl); + qdf_mem_copy(txctl_log.txdesc_hdr_ctl, &txctl_log.priv, + sizeof(txctl_log.priv)); + pl_hdr.size = log_size; + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txctl_log.txdesc_hdr_ctl); + /* Add Protocol information and HT specific information */ + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_STAT) { + struct ath_pktlog_tx_status txstat_log; + size_t log_size = pl_hdr.size; + + txstat_log.ds_status = (void *) + pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_assert(txstat_log.ds_status); + qdf_mem_copy(txstat_log.ds_status, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txstat_log.ds_status); + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_MSDU_ID) { + struct ath_pktlog_msdu_info pl_msdu_info; + size_t log_size; + + qdf_mem_zero(&pl_msdu_info, sizeof(pl_msdu_info)); + log_size = sizeof(pl_msdu_info.priv); + + if (pl_dev->mt_pktlog_enabled == false) + fill_ieee80211_hdr_data(txrx_pdev, + &pl_msdu_info, fw_data->data); + + pl_msdu_info.ath_msdu_info = pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy((void *)&pl_msdu_info.priv.msdu_id_info, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + sizeof(pl_msdu_info.priv.msdu_id_info)); + qdf_mem_copy(pl_msdu_info.ath_msdu_info, &pl_msdu_info.priv, + sizeof(pl_msdu_info.priv)); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + pl_msdu_info.ath_msdu_info); + } + + return A_OK; +} +#endif /* HELIUMPLUS */ + +A_STATUS process_rx_info_remote(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct htt_host_rx_desc_base *rx_desc; + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_rx_info rxstat_log; + size_t log_size; + struct ol_rx_remote_data *r_data = (struct ol_rx_remote_data *)data; + qdf_nbuf_t msdu; + + if (!pdev || !r_data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + pl_info = pl_dev->pl_info; + msdu = r_data->msdu; + + while (msdu) { + rx_desc = + (struct htt_host_rx_desc_base *)(qdf_nbuf_data(msdu)) - 1; + log_size = + sizeof(*rx_desc) - sizeof(struct htt_host_fw_desc_base); + + /* + * Construct the pktlog header pl_hdr + * Because desc is DMA'd to the host memory + */ + pl_hdr.flags = (1 << PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr.missed_cnt = 0; +#if defined(HELIUMPLUS) + pl_hdr.macId = r_data->mac_id; + pl_hdr.log_type = PKTLOG_TYPE_RX_STAT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; +#else + pl_hdr.log_type = PKTLOG_TYPE_RX_STAT; +#endif + pl_hdr.size = sizeof(*rx_desc) - + sizeof(struct htt_host_fw_desc_base); +#if defined(HELIUMPLUS) + pl_hdr.timestamp = + rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32; + pl_hdr.type_specific_data = 0xDEADAA; +#else + pl_hdr.timestamp = rx_desc->ppdu_end.tsf_timestamp; +#endif /* !defined(HELIUMPLUS) */ + + pktlog_hdr_set_specific_data(&pl_hdr, 0xDEADAA); + + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy(rxstat_log.rx_desc, (void *)rx_desc + + sizeof(struct htt_host_fw_desc_base), pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + rxstat_log.rx_desc); + msdu = qdf_nbuf_next(msdu); + } + return A_OK; +} + +#ifdef HELIUMPLUS +A_STATUS process_rx_info(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rx_info rxstat_log; + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev) { + qdf_nofl_info("Invalid pdev in %s", __func__); + return A_ERROR; + } + + pl_dev = ((struct ol_txrx_pdev_t *)pdev)->pl_dev; + if (!pl_dev) { + qdf_nofl_info("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_info = pl_dev->pl_info; + pl_tgt_hdr = (uint32_t *)fw_data->data; + + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy(rxstat_log.rx_desc, + (void *)fw_data->data + sizeof(struct ath_pktlog_hdr), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); + + return A_OK; +} +#else +A_STATUS process_rx_info(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rx_info rxstat_log; + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev) { + qdf_nofl_info("Invalid pdev in %s", __func__); + return A_ERROR; + } + + pl_dev = ((struct ol_txrx_pdev_t *)pdev)->pl_dev; + if (!pl_dev) { + qdf_nofl_info("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_info = pl_dev->pl_info; + pl_tgt_hdr = (uint32_t *)fw_data->data; + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy(rxstat_log.rx_desc, + (void *)fw_data->data + sizeof(struct ath_pktlog_hdr), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); + + return A_OK; +} +#endif /* HELIUMPLUS */ + +#ifdef HELIUMPLUS +A_STATUS process_rate_find(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_rc_find rcf_log; + uint32_t *pl_tgt_hdr; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + rcf_log.rcFind = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcf_log.rcFind, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcf_log.rcFind); + + return A_OK; +} + +#else +A_STATUS process_rate_find(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_rc_find rcf_log; + uint32_t *pl_tgt_hdr; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + rcf_log.rcFind = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcf_log.rcFind, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcf_log.rcFind); + + return A_OK; +} +#endif + +#ifdef HELIUMPLUS +A_STATUS process_rate_update(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rc_update rcu_log; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + log_size = pl_hdr.size; + pl_info = pl_dev->pl_info; + + /* + * Will be uncommented when the rate control update + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + rcu_log.txRateCtrl = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcu_log.txRateCtrl, + ((char *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcu_log.txRateCtrl); + return A_OK; +} +#else +A_STATUS process_rate_update(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rc_update rcu_log; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + log_size = pl_hdr.size; + pl_info = pl_dev->pl_info; + + /* + * Will be uncommented when the rate control update + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + rcu_log.txRateCtrl = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcu_log.txRateCtrl, + ((char *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcu_log.txRateCtrl); + return A_OK; +} +#endif /* HELIUMPLUS */ + +#ifdef HELIUMPLUS +A_STATUS process_sw_event(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_sw_event sw_event; + uint32_t *pl_tgt_hdr; + + if (!pdev) { + qdf_print("Invalid pdev in %s", __func__); + return A_ERROR; + } + if (!data) { + qdf_print("Invalid data in %s", __func__); + return A_ERROR; + } + if (!pl_dev) { + qdf_print("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + + pl_hdr.type_specific_data = + *(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + sw_event.sw_event = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(sw_event.sw_event, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, sw_event.sw_event); + + return A_OK; +} +#else +A_STATUS process_sw_event(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_sw_event sw_event; + uint32_t *pl_tgt_hdr; + + if (!pdev) { + qdf_print("Invalid pdev in %s", __func__); + return A_ERROR; + } + if (!data) { + qdf_print("Invalid data in %s", __func__); + return A_ERROR; + } + if (!pl_dev) { + qdf_print("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + + pktlog_hdr_set_specific_data(&pl_hdr, + *(pl_tgt_hdr + + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET)); + + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + sw_event.sw_event = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(sw_event.sw_event, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, sw_event.sw_event); + + return A_OK; +} +#endif /* HELIUMPLUS */ +#endif /* REMOVE_PKT_LOG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_wifi3.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_wifi3.c new file mode 100644 index 0000000000000000000000000000000000000000..c74c7374aa6b85c9044cdd652c4b7535d7fd54be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_wifi3.c @@ -0,0 +1,163 @@ +/** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* WIFI3 - Refers to platforms - 6290/6390/6490 */ +#include "pktlog_wifi3.h" + +#ifndef REMOVE_PKT_LOG +A_STATUS +process_offload_pktlog_wifi3(struct cdp_pdev *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_hdr pl_hdr; + uint32_t *pl_tgt_hdr; + void *txdesc_hdr_ctl = NULL; + size_t log_size = 0; + + if (!pl_dev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid context in %s\n", __func__); + return A_ERROR; + } + + if (!data) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid data in %s\n", __func__); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)data; + + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + + pl_hdr.type_specific_data = *(pl_tgt_hdr + + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); + + if (pl_hdr.size > MAX_PKTLOG_RECV_BUF_SIZE) { + pl_dev->invalid_packets++; + return A_ERROR; + } + + /* + * Must include to process different types + * TX_CTL, TX_STATUS, TX_MSDU_ID, TX_FRM_HDR + */ + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + txdesc_hdr_ctl = + (void *)pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); + if (!txdesc_hdr_ctl) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Failed to allocate pktlog descriptor"); + return A_NO_MEMORY; + } + qdf_assert(txdesc_hdr_ctl); + qdf_assert(pl_hdr->size < PKTLOG_MAX_TX_WORDS * sizeof(u_int32_t)); + qdf_mem_copy(txdesc_hdr_ctl, + ((void *)data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, txdesc_hdr_ctl); + + return A_OK; +} + +int process_rx_desc_remote_wifi3(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_rx_info rxstat_log; + size_t log_size; + struct ath_pktlog_info *pl_info; + qdf_nbuf_t log_nbuf = (qdf_nbuf_t)data; + + if (!pl_dev) { + qdf_err("Pktlog handle is NULL"); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (1 << PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr.missed_cnt = 0; + pl_hdr.log_type = PKTLOG_TYPE_RX_STATBUF; + pl_hdr.size = qdf_nbuf_len(log_nbuf); + pl_hdr.timestamp = 0; + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (!rxstat_log.rx_desc) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx descriptor is NULL", __func__); + return -EINVAL; + } + + qdf_mem_copy(rxstat_log.rx_desc, qdf_nbuf_data(log_nbuf), pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + rxstat_log.rx_desc); + return 0; +} + +int +process_pktlog_lite_wifi3(void *context, void *log_data, + uint16_t log_type) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_rx_info rxstat_log; + size_t log_size; + qdf_nbuf_t log_nbuf = (qdf_nbuf_t)log_data; + + if (!pl_dev) { + qdf_err("Pktlog handle is NULL"); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + qdf_mem_zero(&pl_hdr, sizeof(pl_hdr)); + pl_hdr.flags = (1 << PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr.missed_cnt = 0; + pl_hdr.log_type = log_type; + pl_hdr.size = qdf_nbuf_len(log_nbuf); + pl_hdr.timestamp = 0; + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (!rxstat_log.rx_desc) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx descriptor is NULL", __func__); + return -EINVAL; + } + + qdf_mem_copy(rxstat_log.rx_desc, qdf_nbuf_data(log_nbuf), pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); + return 0; +} +#endif /* REMOVE_PKT_LOG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/ptt/inc/wlan_ptt_sock_svc.h b/drivers/staging/qca-wifi-host-cmn/utils/ptt/inc/wlan_ptt_sock_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..4aded38fef354d1bf3c397493ae22bf75064ce0f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/ptt/inc/wlan_ptt_sock_svc.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2012-2018,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_ptt_sock_svc.c +* +******************************************************************************/ +#ifndef PTT_SOCK_SVC_H +#define PTT_SOCK_SVC_H +#include +#include +#include +#include + +/* + * Quarky Message Format: + * The following is the messaging protocol between Quarky and PTT Socket App. + * The totalMsgLen is the length from Radio till msgBody. The value of Radio + * is always defaulted to 0. The MsgLen is the length from msgId till msgBody. + * The length of the msgBody varies with respect to the MsgId. Buffer space + * for MsgBody is already allocated in the received buffer. So in case of READ + * we just need to populate the values in the received message and send it + * back + * +------------+-------+-------+--------+-------+---------+ + * |TotalMsgLen | Radio | MsgId | MsgLen |Status |MsgBody | + * +------------+-------+-------|--------+-------+---------+ + * <------4----><--4---><---2--><---2---><---4--><---------> + */ +/* PTT Socket App Message Ids */ +#define PTT_MSG_READ_REGISTER 0x3040 +#define PTT_MSG_WRITE_REGISTER 0x3041 +#define PTT_MSG_READ_MEMORY 0x3044 +#define PTT_MSG_WRITE_MEMORY 0x3045 +#define PTT_MSG_LOG_DUMP_DBG 0x32A1 +#define PTT_MSG_FTM_CMDS_TYPE 0x4040 +#define ANI_DRIVER_MSG_START 0x0001 +#define ANI_MSG_APP_REG_REQ (ANI_DRIVER_MSG_START + 0) +#define ANI_MSG_APP_REG_RSP (ANI_DRIVER_MSG_START + 1) +#define ANI_MSG_OEM_DATA_REQ (ANI_DRIVER_MSG_START + 2) +#define ANI_MSG_OEM_DATA_RSP (ANI_DRIVER_MSG_START + 3) +#define ANI_MSG_CHANNEL_INFO_REQ (ANI_DRIVER_MSG_START + 4) +#define ANI_MSG_CHANNEL_INFO_RSP (ANI_DRIVER_MSG_START + 5) +#define ANI_MSG_OEM_ERROR (ANI_DRIVER_MSG_START + 6) +#define ANI_MSG_PEER_STATUS_IND (ANI_DRIVER_MSG_START + 7) +#define ANI_MSG_SET_OEM_CAP_REQ (ANI_DRIVER_MSG_START + 8) +#define ANI_MSG_SET_OEM_CAP_RSP (ANI_DRIVER_MSG_START + 9) +#define ANI_MSG_GET_OEM_CAP_REQ (ANI_DRIVER_MSG_START + 10) +#define ANI_MSG_GET_OEM_CAP_RSP (ANI_DRIVER_MSG_START + 11) + +#define ANI_MAX_RADIOS 3 +#define ANI_NL_MSG_OK 0 +#define ANI_NL_MSG_ERROR -1 +#define ANI_NL_MSG_OVERHEAD (NLMSG_SPACE(tAniHdr + 4)) +/* + * Packet Format for READ_REGISTER & WRITE_REGISTER: + * TotalMsgLen : 4 bytes [value=20 bytes] + * Radio : 4 bytes + * MsgId : 2 bytes + * MsgLen : 2 bytes + * Status : 4 bytes + * Address : 4 bytes + * Payload : 4 bytes + */ +/* + * Packet Format for READ_MEMORY & WRITE_MEMORY : + * TotalMsgLen : 4 bytes [value= 20+LEN_PAYLOAD bytes] + * Radio : 4 bytes + * MsgId : 2 bytes + * MsgLen : 2 bytes + * Status : 4 bytes + * Address : 4 bytes + * Length : 4 bytes [LEN_PAYLOAD] + * Payload : LEN_PAYLOAD bytes + */ +#if defined(PTT_SOCK_SVC_ENABLE) && defined(CNSS_GENL) +/** + * ptt_sock_activate_svc() - API to register PTT/PUMAC command handlers + * + * API to register the handler for PTT/PUMAC NL messages. + * + * Return: None + */ +void ptt_sock_activate_svc(void); + +/** + * ptt_sock_deactivate_svc() - API to deregister PTT/PUMAC command handlers + * + * API to deregister the handler for PTT/PUMAC NL messages. + * + * Return: None + */ +void ptt_sock_deactivate_svc(void); + +#else +static inline void ptt_sock_activate_svc(void) +{ +} +static inline void ptt_sock_deactivate_svc(void) +{ +} +#endif + +int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, int src_mod, int pid); +/* + * Format of message exchanged between the PTT Socket App in userspace and the + * WLAN Driver, in either direction. Each msg will begin with this header and + * will followed by the Quarky message + */ +struct sAniAppRegReq { + tAniNlModTypes type; /* module id */ + int pid; /* process id */ +}; + +/** + * struct sptt_app_reg_req - PTT register request structure + * @radio: Radio ID + * @wmsg: ANI header + * + * payload structure received as nl data from PTT app/user space + */ +struct sptt_app_reg_req { + int radio; + tAniHdr wmsg; +}; + +struct sAniNlAppRegRsp { + tAniHdr wniHdr; /* Generic WNI msg header */ + struct sAniAppRegReq regReq; /* The original request msg */ + int ret; /* Return code */ +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/ptt/src/wlan_ptt_sock_svc.c b/drivers/staging/qca-wifi-host-cmn/utils/ptt/src/wlan_ptt_sock_svc.c new file mode 100644 index 0000000000000000000000000000000000000000..be58621e178cfbfd47025687ed616488f486236c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/ptt/src/wlan_ptt_sock_svc.c @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_ptt_sock_svc.c +* +******************************************************************************/ +#ifdef PTT_SOCK_SVC_ENABLE +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CNSS_GENL +#include +#include +#endif + +#define PTT_SOCK_DEBUG +#ifdef PTT_SOCK_DEBUG +#define PTT_TRACE(level, args ...) QDF_TRACE(QDF_MODULE_ID_QDF, level, ## args) +#else +#define PTT_TRACE(level, args ...) +#endif + +#ifdef PTT_SOCK_DEBUG_VERBOSE +/* Utility function to perform a hex dump */ +static void ptt_sock_dump_buf(const unsigned char *pbuf, int cnt) +{ + int i; + + for (i = 0; i < cnt; i++) { + if ((i % 16) == 0) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "\n%pK:", pbuf); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, " %02X", + *pbuf); + pbuf++; + } + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, "\n"); +} +#endif + +/** + * nl_srv_ucast_ptt() - Wrapper function to send ucast msgs to PTT + * @skb: sk buffer pointer + * @dst_pid: Destination PID + * @flag: flags + * + * Sends the ucast message to PTT with generic nl socket if CNSS_GENL + * is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_ucast_ptt(struct sk_buff *skb, int dst_pid, int flag) +{ +#ifdef CNSS_GENL + return nl_srv_ucast(skb, dst_pid, flag, ANI_NL_MSG_PUMAC, + CLD80211_MCGRP_DIAG_EVENTS); +#else + return nl_srv_ucast(skb, dst_pid, flag); +#endif +} + +/** + * nl_srv_bcast_ptt() - Wrapper function to send bcast msgs to DIAG mcast group + * @skb: sk buffer pointer + * + * Sends the bcast message to DIAG multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_bcast_ptt(struct sk_buff *skb) +{ +#ifdef CNSS_GENL + return nl_srv_bcast(skb, CLD80211_MCGRP_DIAG_EVENTS, ANI_NL_MSG_PUMAC); +#else + return nl_srv_bcast(skb); +#endif +} + +/** + * ptt_sock_send_msg_to_app() - Send nl message to user space + * wmsg: Message header + * radio: Unit number of the radio + * src_mod: Message type + * pid: Process ID to which message will be unicast. Message + * will be broadcast when PID is INVALID_PID + * + * Utility function to send a netlink message to an application in user space + * + * Return: 0 on success and negative value on failure + */ +int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, int src_mod, int pid) +{ + int err = -1; + int payload_len; + int tot_msg_len; + tAniNlHdr *wnl; + struct sk_buff *skb; + struct nlmsghdr *nlh; + int wmsg_length = be16_to_cpu(wmsg->length); + static int nlmsg_seq; + + if (radio < 0 || radio > ANI_MAX_RADIOS) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "%s: invalid radio id [%d]\n", + __func__, radio); + return -EINVAL; + } + payload_len = wmsg_length + sizeof(wnl->radio) + sizeof(*wmsg); + tot_msg_len = NLMSG_SPACE(payload_len); + skb = dev_alloc_skb(tot_msg_len); + if (!skb) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "%s: dev_alloc_skb() failed for msg size[%d]\n", + __func__, tot_msg_len); + return -ENOMEM; + } + nlh = + nlmsg_put(skb, pid, nlmsg_seq++, src_mod, payload_len, + NLM_F_REQUEST); + if (!nlh) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "%s: nlmsg_put() failed for msg size[%d]\n", __func__, + tot_msg_len); + kfree_skb(skb); + return -ENOMEM; + } + wnl = (tAniNlHdr *) nlh; + wnl->radio = radio; + memcpy(&wnl->wmsg, wmsg, wmsg_length); +#ifdef PTT_SOCK_DEBUG_VERBOSE + ptt_sock_dump_buf((const unsigned char *)skb->data, skb->len); +#endif + + if (pid != INVALID_PID) + err = nl_srv_ucast_ptt(skb, pid, MSG_DONTWAIT); + else + err = nl_srv_bcast_ptt(skb); + + if (err) + PTT_TRACE(QDF_TRACE_LEVEL_INFO, + "%s:Failed sending Msg Type [0x%X] to pid[%d]\n", + __func__, be16_to_cpu(wmsg->type), pid); + return err; +} + +#ifdef CNSS_GENL +/** + * ptt_cmd_handler() - Handler function for PTT commands + * @data: Data to be parsed + * @data_len: Length of the data received + * @ctx: Registered context reference + * @pid: Process id of the user space application + * + * This function handles the command from PTT user space application + * + * Return: None + */ +static void ptt_cmd_handler(const void *data, int data_len, void *ctx, int pid) +{ + uint16_t length; + struct sptt_app_reg_req *payload; + struct nlattr *tb[CLD80211_ATTR_MAX + 1]; + + /* + * audit note: it is ok to pass a NULL policy here since a + * length check on the data is added later already + */ + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, + data, data_len, NULL)) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "Invalid ATTR"); + return; + } + + if (!tb[CLD80211_ATTR_DATA]) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "attr ATTR_DATA failed"); + return; + } + + if (nla_len(tb[CLD80211_ATTR_DATA]) < sizeof(struct sptt_app_reg_req)) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "%s:attr length check fails\n", + __func__); + return; + } + + payload = (struct sptt_app_reg_req *)(nla_data(tb[CLD80211_ATTR_DATA])); + length = be16_to_cpu(payload->wmsg.length); + if ((USHRT_MAX - length) < (sizeof(payload->radio) + sizeof(tAniHdr))) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "u16 overflow length %d %zu %zu", + length, + sizeof(payload->radio), + sizeof(tAniHdr)); + return; + } + + if (nla_len(tb[CLD80211_ATTR_DATA]) < (length + + sizeof(payload->radio) + + sizeof(tAniHdr))) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "ATTR_DATA len check failed"); + return; + } + + switch (payload->wmsg.type) { + case ANI_MSG_APP_REG_REQ: + ptt_sock_send_msg_to_app(&payload->wmsg, payload->radio, + ANI_NL_MSG_PUMAC, pid); + break; + default: + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "Unknown msg type %d", + payload->wmsg.type); + break; + } +} + +void ptt_sock_activate_svc(void) +{ + register_cld_cmd_cb(ANI_NL_MSG_PUMAC, ptt_cmd_handler, NULL); + register_cld_cmd_cb(ANI_NL_MSG_PTT, ptt_cmd_handler, NULL); +} + +void ptt_sock_deactivate_svc(void) +{ + deregister_cld_cmd_cb(ANI_NL_MSG_PTT); + deregister_cld_cmd_cb(ANI_NL_MSG_PUMAC); +} +#endif +#endif /* PTT_SOCK_SVC_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/qld/inc/qld_api.h b/drivers/staging/qca-wifi-host-cmn/utils/qld/inc/qld_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9afdf28ebe274887c0c2625bfb5f92619ca7a3c4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/qld/inc/qld_api.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qld_api.h + * QLD: This file provides public exposed functions + */ + +#ifndef _QLD_API_H_ +#define _QLD_API_H_ + +#define QLD_MAX_NAME 48 + +/** + * struct qld_entry - Individual entry in qld_event + * @addr: Start address of object to dump + * @size: Size of memory dump + * @name: Name of memory dump + */ +struct qld_entry { + uint64_t addr; + size_t size; + char name[QLD_MAX_NAME]; +}; + +/** + * typedef qld_iter_func - qld callback function + * @req: opaque pointer + * @qld_entry: qld_entry + * + * Return: 0 - OK -EINVAL - On failure + */ +typedef int (*qld_iter_func)(void *req, struct qld_entry *entry); + +/** + * qld_iterate_list() - qld list iteration routine + * @gen_table: callback function to genrate table + * @req: opaque request + * + * Return: 0 - OK -EINVAL - On failure + */ +int qld_iterate_list(qld_iter_func gen_table, void *req); + +/** + * qld_register() - Register qld for the given address + * @addr: starting address the dump + * @size: size of memory to dump + * @name: name identifier of dump + * + * Return: 0 - OK -EINVAL -ENOMEM - On failure + */ +int qld_register(void *addr, size_t size, char *name); + +/** + * qld_unregister() - Un-register qld for the given address + * @addr: starting address the dump + * + * Return: 0 - OK -EINVAL - On failure + */ +int qld_unregister(void *addr); + +/** + * qld_list_init() - Initialize qld list + * @max_list: maximum size list supports + * + * Return: 0 - OK -EINVAL -ENOMEM - On failure + */ +int qld_list_init(uint32_t max_list); + +/** + * qld_list_delete() - empty qld list + * + * Return: 0 - OK -EINVAL - On failure + */ +int qld_list_delete(void); + +/** + * qld_list_deinit() - De-initialize qld list + * + * Return: 0 - OK -EINVAL - On failure + */ +int qld_list_deinit(void); + +/** + * qld_get_list_count () - get size of qld list + * @list_count: list_count to set + * + * Return: 0 - OK -EINVAL - On failure + */ +int qld_get_list_count(uint32_t *list_count); + +/** + * is_qld_enable() - check if qld feature is set + * + * Return: true on success, false on failure + */ +bool is_qld_enable(void); + +#endif /* _QLD_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/qld/inc/qld_priv.h b/drivers/staging/qca-wifi-host-cmn/utils/qld/inc/qld_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..ebf7b56931a8157f1b268e0928998ff1c8204107 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/qld/inc/qld_priv.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qld_priv.h + * QLD: This file provies Private functions for qld + */ + +#ifndef _QLD_PRIV_H_ +#define _QLD_PRIV_H_ + +#include +#include +#include + +#define qld_alert(format, args...) \ + QDF_TRACE_FATAL(QDF_MODULE_ID_QLD, format, ## args) + +#define qld_err(format, args...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_QLD, format, ## args) + +#define qld_warn(format, args...) \ + QDF_TRACE_WARN(QDF_MODULE_ID_QLD, format, ## args) + +#define qld_info(format, args...) \ + QDF_TRACE_INFO(QDF_MODULE_ID_QLD, format, ## args) + +#define qld_debug(format, args...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_QLD, format, ## args) + +/** + * struct qld_list_handle - Top level qld structure + * @qld_lock: Spinlock for structure + * @qld_list: linked list for linking + * @qld_max_list: maximum list size + */ +struct qld_list_handle { + qdf_spinlock_t qld_lock; + qdf_list_t qld_list; + uint32_t qld_max_list; +}; + +/** + * struct qld_node - qld node + * @node: single node of linked list + * @entry: single qld_entry in list + */ +struct qld_node { + qdf_list_node_t node; + struct qld_entry entry; +}; + +#endif /*_QLD_PRIV_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/qld/src/qld.c b/drivers/staging/qca-wifi-host-cmn/utils/qld/src/qld.c new file mode 100644 index 0000000000000000000000000000000000000000..d377d59a89dc00d2de518a58870efed46cb03e2a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/qld/src/qld.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qld + * QLD: main file of QCA Live Dump (QLD) + */ + +#include "qld_priv.h" +#include "qld_api.h" +#include "qdf_module.h" + +/* Handle for qld structure */ +static struct qld_list_handle *qld_handle; + +bool is_qld_enable(void) +{ + if (!qld_handle) + return false; + + return true; +} + +qdf_export_symbol(is_qld_enable); + +int qld_list_init(uint32_t max_list) +{ + if (!max_list) + return -EINVAL; + + qld_handle = qdf_mem_malloc(sizeof(*qld_handle)); + + if (!qld_handle) + return -ENOMEM; + + qdf_spinlock_create(&qld_handle->qld_lock); + qld_handle->qld_max_list = max_list; + qdf_list_create(&qld_handle->qld_list, qld_handle->qld_max_list); + qld_debug("LIST init with max size of %u", qld_handle->qld_max_list); + return 0; +} + +qdf_export_symbol(qld_list_init); + +int qld_list_deinit(void) +{ + if (!qld_handle) { + qld_err("Handle NULL"); + return -EINVAL; + } + /* Delete the list */ + qld_list_delete(); + qdf_list_destroy(&qld_handle->qld_list); + qdf_spinlock_destroy(&qld_handle->qld_lock); + qdf_mem_free(qld_handle); + qld_handle = NULL; + qld_debug("LIST De-initialized"); + return 0; +} + +qdf_export_symbol(qld_list_deinit); + +int qld_list_delete(void) +{ + struct qld_node *qld; + qdf_list_node_t *node = NULL; + qdf_list_t *list; + + if (!qld_handle) { + qld_err("Handle NULL"); + return -EINVAL; + } + list = &qld_handle->qld_list; + qdf_spinlock_acquire(&qld_handle->qld_lock); + /* Check and remove the elements of list */ + while (qdf_list_remove_front(list, &node) == QDF_STATUS_SUCCESS) { + qld = qdf_container_of(node, struct qld_node, node); + qdf_mem_free(qld); + } + qdf_spinlock_release(&qld_handle->qld_lock); + qld_debug("LIST Emptied"); + return 0; +} + +qdf_export_symbol(qld_list_delete); + +int qld_register(void *addr, size_t size, char *name) +{ + struct qld_node *qld; + uint32_t list_count = 0; + + if (!qld_handle || !addr) { + qld_err("Handle or address is NULL"); + return -EINVAL; + } + + if ((qld_get_list_count(&list_count) != 0)) { + qdf_err("QLD: Invalid list count"); + return -EINVAL; + } + if (list_count >= qld_handle->qld_max_list) { + qld_err("List full,reg failed.Increase list size"); + return -EINVAL; + } + /* Check if data is already registered */ + qdf_spinlock_acquire(&qld_handle->qld_lock); + qdf_list_for_each(&qld_handle->qld_list, qld, node) { + if (qld->entry.addr == (uintptr_t)addr) { + qld_err("%s already registered", qld->entry.name); + qdf_spinlock_release(&qld_handle->qld_lock); + return -EINVAL; + } + } + qdf_spinlock_release(&qld_handle->qld_lock); + qld = qdf_mem_malloc(sizeof(*qld)); + if (!qld) + return -ENOMEM; + + qld_debug("Insert addr=%pK size=%zu name=%s", (void *)addr, size, name); + qdf_spinlock_acquire(&qld_handle->qld_lock); + qld->entry.addr = (uintptr_t)addr; + qld->entry.size = size; + qdf_snprintf(qld->entry.name, sizeof(qld->entry.name), "%s", name); + qdf_list_insert_front(&qld_handle->qld_list, &qld->node); + qdf_spinlock_release(&qld_handle->qld_lock); + return 0; +} + +qdf_export_symbol(qld_register); + +int qld_unregister(void *addr) +{ + struct qld_node *qld = NULL; + + if (!qld_handle || !addr) { + qld_err("Handle or address is NULL"); + return -EINVAL; + } + + qdf_spinlock_acquire(&qld_handle->qld_lock); + qdf_list_for_each(&qld_handle->qld_list, qld, node) { + if (qld->entry.addr == (uintptr_t)addr) + break; + } + qdf_list_remove_node(&qld_handle->qld_list, &qld->node); + qld_debug("Delete name=%s, size=%zu", qld->entry.name, qld->entry.size); + qdf_mem_free(qld); + qdf_spinlock_release(&qld_handle->qld_lock); + return 0; +} + +qdf_export_symbol(qld_unregister); + +int qld_iterate_list(qld_iter_func gen_table, void *qld_req) +{ + struct qld_node *qld = NULL; + + if (!qld_handle) + return -EINVAL; + + if (!qld_req || !gen_table) { + qld_err("req buffer or func is NULL %s", __func__); + return -EINVAL; + } + qdf_spinlock_acquire(&qld_handle->qld_lock); + qdf_list_for_each(&qld_handle->qld_list, qld, node) { + (gen_table)(qld_req, &qld->entry); + } + qdf_spinlock_release(&qld_handle->qld_lock); + return 0; +} + +qdf_export_symbol(qld_iterate_list); + +int qld_get_list_count(uint32_t *list_count) +{ + if (!qld_handle) { + qld_err("Handle NULL"); + return -EINVAL; + } + *list_count = qld_handle->qld_list.count; + return 0; +} + +qdf_export_symbol(qld_get_list_count); diff --git a/drivers/staging/qca-wifi-host-cmn/utils/sys/queue.h b/drivers/staging/qca-wifi-host-cmn/utils/sys/queue.h new file mode 100644 index 0000000000000000000000000000000000000000..23b184385378d8be99dcf8a5291cec870fef10fd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/sys/queue.h @@ -0,0 +1,592 @@ +/* +* Copyright (c) 1991, 1993 +* The Regents of the University of California. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* 1. Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* 2. Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* 4. Neither the name of the University nor the names of its contributors +* may be used to endorse or promote products derived from this software +* without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +* SUCH DAMAGE. +* +* @(#)queue.h 8.5 (Berkeley) 8/20/94 +* $FreeBSD: src/sys/sys/queue.h,v 1.58 2004/04/07 04:19:49 imp Exp $ +*/ + +#ifndef _QUEUE_H_ +#define _QUEUE_H_ + +/* + * This file defines four types of data structures: singly-linked lists, + * singly-linked tail queues, lists and tail queues. + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A singly-linked tail queue is headed by a pair of pointers, one to the + * head of the list and the other to the tail of the list. The elements are + * singly linked for minimum space and pointer manipulation overhead at the + * expense of O(n) removal for arbitrary elements. New elements can be added + * to the list after an existing element, at the head of the list, or at the + * end of the list. Elements being removed from the head of the tail queue + * should use the explicit macro for this purpose for optimum efficiency. + * A singly-linked tail queue may only be traversed in the forward direction. + * Singly-linked tail queues are ideal for applications with large datasets + * and few or no removals or for implementing a FIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * For details on the use of these macros, see the queue(3) manual page. + * + * + * SLIST LIST STAILQ TAILQ + * _HEAD + + + + + * _HEAD_INITIALIZER + + + + + * _ENTRY + + + + + * _INIT + + + + + * _EMPTY + + + + + * _FIRST + + + + + * _NEXT + + + + + * _PREV - - - + + * _LAST - - + + + * _FOREACH + + + + + * _FOREACH_SAFE + + + + + * _FOREACH_REVERSE - - - + + * _FOREACH_REVERSE_SAFE - - - + + * _INSERT_HEAD + + + + + * _INSERT_BEFORE - + - + + * _INSERT_AFTER + + + + + * _INSERT_TAIL - - + + + * _CONCAT - - + + + * _REMOVE_HEAD + - + - + * _REMOVE + + + + + * + */ +#define QUEUE_MACRO_DEBUG 0 +#if QUEUE_MACRO_DEBUG +/* + * Store the last 2 places the queue element or head was altered + */ +struct qm_trace { + char *lastfile; + int lastline; + char *prevfile; + int prevline; +}; + +#define TRACEBUF struct qm_trace trace; +#define TRASHIT(x) do {(x) = (void *)NULL; } while (0) + +#define QMD_TRACE_HEAD(head) do { \ + (head)->trace.prevline = (head)->trace.lastline; \ + (head)->trace.prevfile = (head)->trace.lastfile; \ + (head)->trace.lastline = __LINE__; \ + (head)->trace.lastfile = __FILE__; \ +} while (0) + +#define QMD_TRACE_ELEM(elem) do { \ + (elem)->trace.prevline = (elem)->trace.lastline; \ + (elem)->trace.prevfile = (elem)->trace.lastfile; \ + (elem)->trace.lastline = __LINE__; \ + (elem)->trace.lastfile = __FILE__; \ +} while (0) + +#else +#define QMD_TRACE_ELEM(elem) +#define QMD_TRACE_HEAD(head) +#define TRACEBUF +#define TRASHIT(x) do {(x) = (void *)0; } while (0) +#endif /* QUEUE_MACRO_DEBUG */ + +#ifdef ATHR_RNWF +/* + * NDIS contains a defn for SLIST_ENTRY and SINGLE_LIST_ENTRY + */ +#endif + +/* + * Singly-linked List declarations. + */ +#define SLIST_HEAD(name, type) \ + struct name { \ + struct type *slh_first; /* first element */ \ + } + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SING_LIST_ENTRY(type) \ + struct { \ + struct type *sle_next; /* next element */ \ + } + +/* + * Singly-linked List functions. + */ +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) + +#define SLIST_FIRST(head) ((head)->slh_first) + +#define SLIST_FOREACH(var, head, field) \ + for ((var) = SLIST_FIRST((head)); \ + (var); \ + (var) = SLIST_NEXT((var), field)) + +#define SLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = SLIST_FIRST((head)); \ + (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ + for ((varp) = &SLIST_FIRST((head)); \ + ((var) = *(varp)) != NULL; \ + (varp) = &SLIST_NEXT((var), field)) + +#define SLIST_INIT(head) do { \ + SLIST_FIRST((head)) = NULL; \ +} while (0) + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ + SLIST_NEXT((slistelm), field) = (elm); \ +} while (0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ + SLIST_FIRST((head)) = (elm); \ +} while (0) + +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if (SLIST_FIRST((head)) == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = SLIST_FIRST((head)); \ + while (SLIST_NEXT(curelm, field) != (elm)) \ + curelm = SLIST_NEXT(curelm, field); \ + SLIST_NEXT(curelm, field) = \ + SLIST_NEXT(SLIST_NEXT(curelm, field), field);\ + } \ +} while (0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), \ + field); \ +} while (0) + +/* + * Singly-linked Tail queue declarations. + */ +#define STAILQ_HEAD(name, type) \ + struct name { \ + struct type *stqh_first; \ + struct type **stqh_last; \ + } + +#define STAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).stqh_first } + +#define STAILQ_ENTRY(type) \ + struct { \ + struct type *stqe_next; /* next element */ \ + } + +/* + * Singly-linked Tail queue functions. + */ +#define STAILQ_CONCAT(head1, head2) do { \ + if (!STAILQ_EMPTY((head2))) { \ + *(head1)->stqh_last = (head2)->stqh_first; \ + (head1)->stqh_last = (head2)->stqh_last; \ + STAILQ_INIT((head2)); \ + } \ +} while (0) + +#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) + +#define STAILQ_FIRST(head) ((head)->stqh_first) + +#define STAILQ_FOREACH(var, head, field) \ + for ((var) = STAILQ_FIRST((head)); \ + (var); \ + (var) = STAILQ_NEXT((var), field)) + +#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = STAILQ_FIRST((head)); \ + (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define STAILQ_INIT(head) do { \ + STAILQ_FIRST((head)) = NULL; \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ + if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), \ + field)) == NULL) \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ + STAILQ_NEXT((tqelm), field) = (elm); \ +} while (0) + +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == \ + NULL) \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ + STAILQ_FIRST((head)) = (elm); \ +} while (0) + +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + STAILQ_NEXT((elm), field) = NULL; \ + *(head)->stqh_last = (elm); \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ +} while (0) + +#define STAILQ_LAST(head, type, field) \ + (STAILQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *) \ + ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) + +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) + +#define STAILQ_REMOVE(head, elm, type, field) do { \ + if (STAILQ_FIRST((head)) == (elm)) { \ + STAILQ_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = STAILQ_FIRST((head)); \ + while (STAILQ_NEXT(curelm, field) != (elm)) \ + curelm = STAILQ_NEXT(curelm, field); \ + if ((STAILQ_NEXT(curelm, field) = \ + STAILQ_NEXT(STAILQ_NEXT(curelm, field), \ + field)) == NULL) \ + (head)->stqh_last = &STAILQ_NEXT((curelm),\ + field); \ + } \ +} while (0) + +#define STAILQ_REMOVE_AFTER(head, elm, field) do { \ + if (STAILQ_NEXT(elm, field)) { \ + if ((STAILQ_NEXT(elm, field) = \ + STAILQ_NEXT(STAILQ_NEXT(elm, field), \ + field)) == NULL) \ + (head)->stqh_last = \ + &STAILQ_NEXT((elm), field); \ + } \ +} while (0) + +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if ((STAILQ_FIRST((head)) = \ + STAILQ_NEXT(STAILQ_FIRST((head)), field)) == \ + NULL)\ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ + if ((STAILQ_FIRST((head)) = \ + STAILQ_NEXT((elm), field)) == NULL) \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +/* + * List declarations. + */ +#define ATH_LIST_HEAD(name, type) \ + struct name { \ + struct type *lh_first; \ + } + +#ifndef LIST_HEAD +#define LIST_HEAD ATH_LIST_HEAD +#endif + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ + struct { \ + struct type *le_next; \ + struct type **le_prev; \ + } + +/* + * List functions. + */ + +#define LIST_EMPTY(head) ((head)->lh_first == NULL) + +#define LIST_FIRST(head) ((head)->lh_first) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = LIST_FIRST((head)); \ + (var); \ + (var) = LIST_NEXT((var), field)) + +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST((head)); \ + (var) && ((tvar) = LIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define LIST_INIT(head) do { \ + LIST_FIRST((head)) = NULL; \ +} while (0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if ((LIST_NEXT((elm), field) = \ + LIST_NEXT((listelm), field)) != NULL) \ + LIST_NEXT((listelm), field)->field.le_prev = \ + &LIST_NEXT((elm), field); \ + LIST_NEXT((listelm), field) = (elm); \ + (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + LIST_NEXT((elm), field) = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ + LIST_FIRST((head))->field.le_prev = \ + &LIST_NEXT((elm), field); \ + LIST_FIRST((head)) = (elm); \ + (elm)->field.le_prev = &LIST_FIRST((head)); \ +} while (0) + +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_REMOVE(elm, field) do { \ + if (LIST_NEXT((elm), field) != NULL) \ + LIST_NEXT((elm), field)->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = LIST_NEXT((elm), field); \ +} while (0) + +/* + * Tail queue declarations. + */ +#ifndef TRACE_TX_LEAK +#define TRACE_TX_LEAK 0 +#endif + +#if TRACE_TX_LEAK +#define HEADNAME char headname[64]; +#define COPY_HEADNAME(head) OS_MEMCPY((head)->headname, #head, sizeof(#head)) +#else +#define HEADNAME +#define COPY_HEADNAME(head) +#endif + +#define TAILQ_HEAD(name, type) \ + struct name { \ + struct type *tqh_first; \ + struct type **tqh_last; \ + HEADNAME \ + TRACEBUF \ + } + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define TAILQ_ENTRY(type) \ + struct { \ + struct type *tqe_next; \ + struct type **tqe_prev; \ + TRACEBUF \ + } + +/* + * Tail queue functions. + */ + +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) + +#define TAILQ_FIRST(head) ((head)->tqh_first) + +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = TAILQ_FIRST((head)); \ + (var); \ + (var) = TAILQ_NEXT((var), field)) + +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = TAILQ_LAST((head), headname); \ + (var); \ + (var) = TAILQ_PREV((var), headname, field)) + +#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ + for ((var) = TAILQ_LAST((head), headname); \ + (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ + (var) = (tvar)) + +#define TAILQ_INIT(head) do { \ + TAILQ_FIRST((head)) = NULL; \ + (head)->tqh_last = &TAILQ_FIRST((head)); \ + COPY_HEADNAME(head); \ + QMD_TRACE_HEAD(head); \ +} while (0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), \ + field)) != NULL) \ + TAILQ_NEXT((elm), field)->field.tqe_prev = \ + &TAILQ_NEXT((elm), field); \ + else { \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + QMD_TRACE_HEAD(head); \ + } \ + TAILQ_NEXT((listelm), field) = (elm); \ + (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ + QMD_TRACE_ELEM(&(elm)->field); \ + QMD_TRACE_ELEM(&listelm->field); \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + TAILQ_NEXT((elm), field) = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ + QMD_TRACE_ELEM(&(elm)->field); \ + QMD_TRACE_ELEM(&listelm->field); \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL)\ + TAILQ_FIRST((head))->field.tqe_prev = \ + &TAILQ_NEXT((elm), field); \ + else \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + TAILQ_FIRST((head)) = (elm); \ + (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ + QMD_TRACE_HEAD(head); \ + QMD_TRACE_ELEM(&(elm)->field); \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + TAILQ_NEXT((elm), field) = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + QMD_TRACE_HEAD(head); \ + QMD_TRACE_ELEM(&(elm)->field); \ +} while (0) + +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) + +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if ((TAILQ_NEXT((elm), field)) != NULL) \ + TAILQ_NEXT((elm), field)->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else { \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + QMD_TRACE_HEAD(head); \ + } \ + *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ + TRASHIT((elm)->field.tqe_next); \ + TRASHIT((elm)->field.tqe_prev); \ + QMD_TRACE_ELEM(&(elm)->field); \ +} while (0) + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + *(head1)->tqh_last = (head2)->tqh_first; \ + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;\ + (head1)->tqh_last = (head2)->tqh_last; \ + TAILQ_INIT((head2)); \ + } \ +} while (0) + +#ifdef _KERNEL + +/* + * XXX insque() and remque() are an old way of handling certain queues. + * They bogusly assumes that all queue heads look alike. + */ + +struct quehead { + struct quehead *qh_link; + struct quehead *qh_rlink; +}; + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) + +static inline void insque(void *a, void *b) +{ + struct quehead *element = (struct quehead *)a, + *head = (struct quehead *)b; + + element->qh_link = head->qh_link; + element->qh_rlink = head; + head->qh_link = element; + element->qh_link->qh_rlink = element; +} + +static inline void remque(void *a) +{ + struct quehead *element = (struct quehead *)a; + + element->qh_link->qh_rlink = element->qh_rlink; + element->qh_rlink->qh_link = element->qh_link; + element->qh_rlink = 0; +} + +#else /* !(__GNUC__ || __INTEL_COMPILER) */ + +void insque(void *a, void *b); +void remque(void *a); + +#endif /* __GNUC__ || __INTEL_COMPILER */ + +#endif /* _KERNEL */ + +#endif /* _QUEUE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wbuff/inc/wbuff.h b/drivers/staging/qca-wifi-host-cmn/wbuff/inc/wbuff.h new file mode 100644 index 0000000000000000000000000000000000000000..5afe0d195283d9aa407b7178e70d4f64919ff860 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wbuff/inc/wbuff.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wbuff.h + * wbuff buffer management APIs + */ + +#ifndef _WBUFF_H +#define _WBUFF_H + +#include +#include + +/* wbuff available pools */ +/* Pool of nbuf size 256 bytes */ +#define WBUFF_POOL_0 0 +/* Pool of nbuf size 512 bytes */ +#define WBUFF_POOL_1 1 +/* Pool of nbuf size 1024 bytes */ +#define WBUFF_POOL_2 2 +/* Pool of nbuf 2048 bytes */ +#define WBUFF_POOL_3 3 + +/** + * struct wbuff_alloc_request - allocation structure for registering each + * pool for wbuff module. + * @slot: pool_slot identifier + * @size: number of buffers for @pool_slot + */ +struct wbuff_alloc_request { + uint8_t slot; + uint16_t size; +}; + +/* Opaque handle for wbuff */ +struct wbuff_mod_handle; + +#ifdef WLAN_FEATURE_WBUFF +/** + * wbuff_module_init() - Initializes the wbuff module + * + * Return: QDF_STATUS_SUCCESS - init success + * QDF_STATUS_E_NOSUPPORT - init failure + */ +QDF_STATUS wbuff_module_init(void); + +/** + * wbuff_module_deinit() - De-initializes the wbuff module + * + * Return: QDF_STATUS_SUCCESS - de-init success + * QDF_STATUS_E_INVAL - de-init failure (wbuff not initialized) + */ +QDF_STATUS wbuff_module_deinit(void); + +/** + * wbuff_module_register() - Registers a module with wbuff + * @req: allocation request from registered module + * @num: number of pools required + * @reserve: nbuf headroom to start with + * @align: alignment for the nbuf + * + * Return: Handle if registration success + * NULL if registration failure + */ +struct wbuff_mod_handle * +wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num, + int reserve, int align); + +/** + * wbuff_module_deregister() - De-registers a module with wbuff + * @hdl: wbuff_handle corresponding to the module + * + * Return: QDF_STATUS_SUCCESS - deregistration success + * QDF_STATUS_E_INVAL - deregistration failure + */ +QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl); + +/** + * wbuff_buff_get() - return buffer to the requester + * @handle: wbuff_handle corresponding to the module + * @len: length of buffer requested + * @func_name: function from which buffer is requested + * @line_num: line number in the file + * + * Return: Network buffer if success + * NULL if failure + */ +qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, + const char *func_name, uint32_t line_num); + +/** + * wbuff_buff_put() - put the buffer back to wbuff pool + * @hdl: wbuff_handle corresponding to the module + * @buf: pointer to network buffer + * + * Return: NULL if success (buffer consumed) + * @buf if failure (buffer not consumed) + */ +qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf); + +#else + +static inline QDF_STATUS wbuff_module_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS wbuff_module_deinit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline struct wbuff_mod_handle * +wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num, + int reserve, int align) +{ + return NULL; +} + +static inline QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline qdf_nbuf_t +wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, const char *func_name, + uint32_t line_num) +{ + return NULL; +} + +static inline qdf_nbuf_t +wbuff_buff_put(qdf_nbuf_t buf) +{ + return buf; +} + +#endif +#endif /* _WBUFF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/wbuff/src/i_wbuff.h b/drivers/staging/qca-wifi-host-cmn/wbuff/src/i_wbuff.h new file mode 100644 index 0000000000000000000000000000000000000000..ce7b69ec718416ac4bd0c97359affc541668e5be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wbuff/src/i_wbuff.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_wbuff.h + * wbuff private + */ + +#ifndef _I_WBUFF_H +#define _I_WBUFF_H + +#include + +/* Number of modules supported by wbuff */ +#define WBUFF_MAX_MODULES 4 + +/* Number of pools supported per module */ +#define WBUFF_MAX_POOLS 4 + +/* Max buffer size supported by wbuff in bytes */ +#define WBUFF_MAX_BUFFER_SIZE 2048 + +/* wbuff pool buffer lengths in bytes*/ +#define WBUFF_LEN_POOL0 256 +#define WBUFF_LEN_POOL1 512 +#define WBUFF_LEN_POOL2 1024 +#define WBUFF_LEN_POOL3 2048 + +/* wbuff max pool sizes */ +/* Allocation of size 256 bytes */ +#define WBUFF_POOL_0_MAX 256 +/* Allocation of size 512 bytes */ +#define WBUFF_POOL_1_MAX 128 +/* Allocation of size 1024 bytes */ +#define WBUFF_POOL_2_MAX 64 +/* Allocation of size 2048 bytes */ +#define WBUFF_POOL_3_MAX 32 + +#define WBUFF_MSLOT_SHIFT 4 +#define WBUFF_MSLOT_BITMASK 0xF0 + +#define WBUFF_PSLOT_SHIFT 1 +#define WBUFF_PSLOT_BITMASK 0xE + +/* Comparison array for maximum allocation per pool*/ +uint16_t wbuff_alloc_max[WBUFF_MAX_POOLS] = {WBUFF_POOL_0_MAX, + WBUFF_POOL_1_MAX, + WBUFF_POOL_2_MAX, + WBUFF_POOL_3_MAX}; + +/** + * struct wbuff_handle - wbuff handle to the registered module + * @id: the identifier for the registered module. + */ +struct wbuff_handle { + uint8_t id; +}; + +/** + * struct wbuff_module - allocation holder for wbuff registered module + * @registered: To identify whether module is registered + * @pending_returns: Number of buffers pending to be returned to + * wbuff by the module + * @lock: Lock for accessing per module buffer slots + * @handle: wbuff handle for the registered module + * @reserve: nbuf headroom to start with + * @align: alignment for the nbuf + * @pool[]: pools for all available buffers for the module + */ +struct wbuff_module { + bool registered; + uint16_t pending_returns; + qdf_spinlock_t lock; + struct wbuff_handle handle; + int reserve; + int align; + qdf_nbuf_t pool[WBUFF_MAX_POOLS]; +}; + +/** + * struct wbuff_holder - allocation holder for wbuff + * @initialized: to identified whether module is initialized + */ +struct wbuff_holder { + bool initialized; + struct wbuff_module mod[WBUFF_MAX_MODULES]; +}; +#endif /* _WBUFF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/wbuff/src/wbuff.c b/drivers/staging/qca-wifi-host-cmn/wbuff/src/wbuff.c new file mode 100644 index 0000000000000000000000000000000000000000..65455bfb4d09f8662cf8fb0285bb56aba5071fed --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wbuff/src/wbuff.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wbuff.c + * wbuff buffer management APIs + */ + +#include +#include "i_wbuff.h" + +/** + * Allocation holder array for all wbuff registered modules + */ +struct wbuff_holder wbuff; + +/** + * wbuff_get_pool_slot_from_len() - get pool_slot from length + * @len: length of the buffer + * + * Return: pool slot + */ +static uint8_t wbuff_get_pool_slot_from_len(uint16_t len) +{ + if ((len > 0) && (len <= WBUFF_LEN_POOL0)) + return WBUFF_POOL_0; + else if ((len > WBUFF_LEN_POOL0) && (len <= WBUFF_LEN_POOL1)) + return WBUFF_POOL_1; + else if ((len > WBUFF_LEN_POOL1) && (len <= WBUFF_LEN_POOL2)) + return WBUFF_POOL_2; + else + return WBUFF_POOL_3; +} + +/** + * wbuff_get_len_from_pool_slot() - get len from pool slot + * @pool_slot: wbuff pool_slot + * + * Return: nbuf length from pool slot + */ +static uint32_t wbuff_get_len_from_pool_slot(uint16_t pool_slot) +{ + uint32_t len = 0; + + switch (pool_slot) { + case 0: + len = WBUFF_LEN_POOL0; + break; + case 1: + len = WBUFF_LEN_POOL1; + break; + case 2: + len = WBUFF_LEN_POOL2; + break; + case 3: + len = WBUFF_LEN_POOL3; + break; + default: + len = 0; + } + + return len; +} + +/** + * wbuff_get_free_mod_slot() - get free module slot + * + * Return: module slot + */ +static uint8_t wbuff_get_free_mod_slot(void) +{ + uint8_t mslot = 0; + + for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) { + qdf_spin_lock_bh(&wbuff.mod[mslot].lock); + if (!wbuff.mod[mslot].registered) { + wbuff.mod[mslot].registered = true; + qdf_spin_unlock_bh(&wbuff.mod[mslot].lock); + break; + } + qdf_spin_unlock_bh(&wbuff.mod[mslot].lock); + } + + return mslot; +} + +/** + * wbuff_is_valid_alloc_req() - validate alloc request + * @req: allocation request from registered module + * @num: number of pools required + * + * Return: true if valid wbuff_alloc_request + * false if invalid wbuff_alloc_request + */ +static bool wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req, + uint8_t num) +{ + uint16_t psize = 0; + uint8_t alloc = 0, pslot = 0; + + for (alloc = 0; alloc < num; alloc++) { + pslot = req[alloc].slot; + psize = req[alloc].size; + if ((pslot > WBUFF_MAX_POOLS - 1) || + (psize > wbuff_alloc_max[pslot])) + return false; + } + + return true; +} + +/** + * wbuff_prepare_nbuf() - allocate nbuf + * @mslot: module slot + * @pslot: pool slot + * @len: length of the buffer + * @reserve: nbuf headroom to start with + * @align: alignment for the nbuf + * + * Return: nbuf if success + * NULL if failure + */ +static qdf_nbuf_t wbuff_prepare_nbuf(uint8_t mslot, uint8_t pslot, + uint32_t len, int reserve, int align) +{ + qdf_nbuf_t buf; + unsigned long dev_scratch = 0; + + buf = qdf_nbuf_alloc(NULL, roundup(len + reserve, align), reserve, + align, false); + if (!buf) + return NULL; + dev_scratch = mslot; + dev_scratch <<= WBUFF_MSLOT_SHIFT; + dev_scratch |= ((pslot << WBUFF_PSLOT_SHIFT) | 1); + qdf_nbuf_set_dev_scratch(buf, dev_scratch); + + return buf; +} + +/** + * wbuff_is_valid_handle() - validate wbuff handle + * @handle: wbuff handle passed by module + * + * Return: true - valid wbuff_handle + * false - invalid wbuff_handle + */ +static bool wbuff_is_valid_handle(struct wbuff_handle *handle) +{ + if ((handle) && (handle->id < WBUFF_MAX_MODULES) && + (wbuff.mod[handle->id].registered)) + return true; + + return false; +} + +QDF_STATUS wbuff_module_init(void) +{ + struct wbuff_module *mod = NULL; + uint8_t mslot = 0, pslot = 0; + + if (!qdf_nbuf_is_dev_scratch_supported()) { + wbuff.initialized = false; + return QDF_STATUS_E_NOSUPPORT; + } + + for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) { + mod = &wbuff.mod[mslot]; + qdf_spinlock_create(&mod->lock); + for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++) + mod->pool[pslot] = NULL; + mod->registered = false; + } + wbuff.initialized = true; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wbuff_module_deinit(void) +{ + struct wbuff_module *mod = NULL; + uint8_t mslot = 0; + + if (!wbuff.initialized) + return QDF_STATUS_E_INVAL; + + wbuff.initialized = false; + for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) { + mod = &wbuff.mod[mslot]; + if (mod->registered) + wbuff_module_deregister((struct wbuff_mod_handle *) + &mod->handle); + qdf_spinlock_destroy(&mod->lock); + } + + return QDF_STATUS_SUCCESS; +} + +struct wbuff_mod_handle * +wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num, + int reserve, int align) +{ + struct wbuff_module *mod = NULL; + qdf_nbuf_t buf = NULL; + uint32_t len = 0; + uint16_t idx = 0, psize = 0; + uint8_t alloc = 0, mslot = 0, pslot = 0; + + if (!wbuff.initialized) + return NULL; + + if ((num == 0) || (num > WBUFF_MAX_POOLS)) + return NULL; + + if (!wbuff_is_valid_alloc_req(req, num)) + return NULL; + + mslot = wbuff_get_free_mod_slot(); + if (mslot == WBUFF_MAX_MODULES) + return NULL; + + mod = &wbuff.mod[mslot]; + + mod->handle.id = mslot; + + for (alloc = 0; alloc < num; alloc++) { + pslot = req[alloc].slot; + psize = req[alloc].size; + len = wbuff_get_len_from_pool_slot(pslot); + /** + * Allocate pool_cnt number of buffers for + * the pool given by pslot + */ + for (idx = 0; idx < psize; idx++) { + buf = wbuff_prepare_nbuf(mslot, pslot, len, reserve, + align); + if (!buf) + continue; + if (!mod->pool[pslot]) { + qdf_nbuf_set_next(buf, NULL); + mod->pool[pslot] = buf; + } else { + qdf_nbuf_set_next(buf, mod->pool[pslot]); + mod->pool[pslot] = buf; + } + } + } + mod->reserve = reserve; + mod->align = align; + + return (struct wbuff_mod_handle *)&mod->handle; +} + +QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl) +{ + struct wbuff_handle *handle; + struct wbuff_module *mod = NULL; + uint8_t mslot = 0, pslot = 0; + qdf_nbuf_t first = NULL, buf = NULL; + + handle = (struct wbuff_handle *)hdl; + + if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle))) + return QDF_STATUS_E_INVAL; + + mslot = handle->id; + mod = &wbuff.mod[mslot]; + + qdf_spin_lock_bh(&mod->lock); + for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++) { + first = mod->pool[pslot]; + while (first) { + buf = first; + first = qdf_nbuf_next(buf); + qdf_nbuf_free(buf); + } + } + mod->registered = false; + qdf_spin_unlock_bh(&mod->lock); + + return QDF_STATUS_SUCCESS; +} + +qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, + const char *func_name, uint32_t line_num) +{ + struct wbuff_handle *handle; + struct wbuff_module *mod = NULL; + uint8_t mslot = 0; + uint8_t pslot = 0; + qdf_nbuf_t buf = NULL; + + handle = (struct wbuff_handle *)hdl; + + if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len || + (len > WBUFF_MAX_BUFFER_SIZE)) + return NULL; + + mslot = handle->id; + pslot = wbuff_get_pool_slot_from_len(len); + mod = &wbuff.mod[mslot]; + + qdf_spin_lock_bh(&mod->lock); + if (mod->pool[pslot]) { + buf = mod->pool[pslot]; + mod->pool[pslot] = qdf_nbuf_next(buf); + mod->pending_returns++; + } + qdf_spin_unlock_bh(&mod->lock); + if (buf) { + qdf_nbuf_set_next(buf, NULL); + qdf_net_buf_debug_update_node(buf, func_name, line_num); + } + + return buf; +} + +qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf) +{ + qdf_nbuf_t buffer = buf; + unsigned long slot_info = 0; + uint8_t mslot = 0, pslot = 0; + + if (!wbuff.initialized) + return buffer; + + slot_info = qdf_nbuf_get_dev_scratch(buf); + if (!slot_info) + return buffer; + + mslot = (slot_info & WBUFF_MSLOT_BITMASK) >> WBUFF_MSLOT_SHIFT; + pslot = (slot_info & WBUFF_PSLOT_BITMASK) >> WBUFF_PSLOT_SHIFT; + qdf_nbuf_reset(buffer, wbuff.mod[mslot].reserve, wbuff.mod[mslot]. + align); + qdf_spin_lock_bh(&wbuff.mod[mslot].lock); + if (wbuff.mod[mslot].registered) { + qdf_nbuf_set_next(buffer, wbuff.mod[mslot].pool[pslot]); + wbuff.mod[mslot].pool[pslot] = buffer; + wbuff.mod[mslot].pending_returns--; + buffer = NULL; + } + qdf_spin_unlock_bh(&wbuff.mod[mslot].lock); + + return buffer; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wlan_cfg/cfg_dp.h b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/cfg_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..f9f61732bc988cb84df14d0046bf490f6865b8a7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/cfg_dp.h @@ -0,0 +1,1101 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains definitions of Data Path configuration. + */ + +#ifndef _CFG_DP_H_ +#define _CFG_DP_H_ + +#include "cfg_define.h" + +#define WLAN_CFG_MAX_CLIENTS 64 +#define WLAN_CFG_MAX_CLIENTS_MIN 8 +#define WLAN_CFG_MAX_CLIENTS_MAX 64 + +/* Change this to a lower value to enforce scattered idle list mode */ +#define WLAN_CFG_MAX_ALLOC_SIZE 0x200000 +#define WLAN_CFG_MAX_ALLOC_SIZE_MIN 0x80000 +#define WLAN_CFG_MAX_ALLOC_SIZE_MAX 0x200000 + +#define WLAN_CFG_NUM_TCL_DATA_RINGS 3 +#define WLAN_CFG_NUM_TCL_DATA_RINGS_MIN 3 +#define WLAN_CFG_NUM_TCL_DATA_RINGS_MAX 3 + +#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || \ + defined(QCA_LL_PDEV_TX_FLOW_CONTROL) +#define WLAN_CFG_TX_FLOW_START_QUEUE_OFFSET 10 +#define WLAN_CFG_TX_FLOW_STOP_QUEUE_TH 15 +#else +#define WLAN_CFG_TX_FLOW_START_QUEUE_OFFSET 0 +#define WLAN_CFG_TX_FLOW_STOP_QUEUE_TH 0 +#endif + +#define WLAN_CFG_PER_PDEV_TX_RING_MIN 0 +#define WLAN_CFG_PER_PDEV_TX_RING_MAX 1 + +#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) +#define WLAN_CFG_PER_PDEV_RX_RING 0 +#define WLAN_CFG_PER_PDEV_LMAC_RING 0 +#define WLAN_LRO_ENABLE 0 +#define WLAN_CFG_MAC_PER_TARGET 2 +#ifdef IPA_OFFLOAD +/* Using TCL data ring 2 for IPA Tx And + * WBM2SW ring 2 for Tx completion + */ +#define WLAN_CFG_IPA_TX_N_TXCMPL_RING 2 +/* Size of TCL TX Ring */ +#if defined(TX_TO_NPEERS_INC_TX_DESCS) +#define WLAN_CFG_TX_RING_SIZE 2048 +#else +#define WLAN_CFG_TX_RING_SIZE 1024 +#endif +#define WLAN_CFG_IPA_TX_RING_SIZE 1024 +#define WLAN_CFG_IPA_TX_COMP_RING_SIZE 1024 + +#define WLAN_CFG_PER_PDEV_TX_RING 0 +#define WLAN_CFG_IPA_UC_TX_BUF_SIZE 2048 +#define WLAN_CFG_IPA_UC_TX_PARTITION_BASE 3000 +#define WLAN_CFG_IPA_UC_RX_IND_RING_COUNT 1024 +#else +#define WLAN_CFG_TX_RING_SIZE 512 +#define WLAN_CFG_PER_PDEV_TX_RING 1 +#define WLAN_CFG_IPA_UC_TX_BUF_SIZE 0 +#define WLAN_CFG_IPA_UC_TX_PARTITION_BASE 0 +#define WLAN_CFG_IPA_UC_RX_IND_RING_COUNT 0 +#endif + +#if defined(TX_TO_NPEERS_INC_TX_DESCS) +#define WLAN_CFG_TX_COMP_RING_SIZE 4096 + +/* Tx Descriptor and Tx Extension Descriptor pool sizes */ +#define WLAN_CFG_NUM_TX_DESC 4096 +#define WLAN_CFG_NUM_TX_EXT_DESC 4096 +#else +#define WLAN_CFG_TX_COMP_RING_SIZE 1024 + +/* Tx Descriptor and Tx Extension Descriptor pool sizes */ +#define WLAN_CFG_NUM_TX_DESC 1024 +#define WLAN_CFG_NUM_TX_EXT_DESC 1024 +#endif + +/* Interrupt Mitigation - Batch threshold in terms of number of frames */ +#define WLAN_CFG_INT_BATCH_THRESHOLD_TX 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_RX 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_OTHER 1 + +/* Interrupt Mitigation - Timer threshold in us */ +#define WLAN_CFG_INT_TIMER_THRESHOLD_TX 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_RX 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_OTHER 8 +#endif + +#define WLAN_CFG_RX_PENDING_HL_THRESHOLD 0x60000 +#define WLAN_CFG_RX_PENDING_HL_THRESHOLD_MIN 0 +#define WLAN_CFG_RX_PENDING_HL_THRESHOLD_MAX 0x80000 + +#define WLAN_CFG_RX_PENDING_LO_THRESHOLD 0x60000 +#define WLAN_CFG_RX_PENDING_LO_THRESHOLD_MIN 100 +#define WLAN_CFG_RX_PENDING_LO_THRESHOLD_MAX 0x80000 + +#define WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING 256 +#define WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING 512 + +#define WLAN_CFG_PER_PDEV_RX_RING_MIN 0 +#define WLAN_CFG_PER_PDEV_RX_RING_MAX 0 + +#define WLAN_CFG_PER_PDEV_LMAC_RING_MIN 0 +#define WLAN_CFG_PER_PDEV_LMAC_RING_MAX 1 + +#define WLAN_CFG_TX_RING_SIZE_MIN 512 +#define WLAN_CFG_TX_RING_SIZE_MAX 0x80000 + +#define WLAN_CFG_TX_COMP_RING_SIZE_MIN 512 +#define WLAN_CFG_TX_COMP_RING_SIZE_MAX 0x80000 + +#define WLAN_CFG_NUM_TX_DESC_MIN 1024 +#define WLAN_CFG_NUM_TX_DESC_MAX 32768 + +#define WLAN_CFG_NUM_TX_EXT_DESC_MIN 1024 +#define WLAN_CFG_NUM_TX_EXT_DESC_MAX 0x80000 + +#define WLAN_CFG_INT_BATCH_THRESHOLD_TX_MIN 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_TX_MAX 256 + +#define WLAN_CFG_INT_BATCH_THRESHOLD_RX_MIN 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_RX_MAX 128 + +#define WLAN_CFG_INT_BATCH_THRESHOLD_REO_RING_MIN 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_REO_RING_MAX 128 + +#define WLAN_CFG_INT_BATCH_THRESHOLD_WBM_RELEASE_RING_MIN 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_WBM_RELEASE_RING_MAX 128 + +#define WLAN_CFG_INT_BATCH_THRESHOLD_OTHER_MIN 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_OTHER_MAX 1 + +#define WLAN_CFG_INT_TIMER_THRESHOLD_TX_MIN 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_TX_MAX 100 + +#define WLAN_CFG_INT_TIMER_THRESHOLD_RX_MIN 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_RX_MAX 500 + +#define WLAN_CFG_INT_TIMER_THRESHOLD_OTHER_MIN 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_OTHER_MAX 1000 + +#define WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING_MIN 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING_MAX 512 + +#define WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING_MIN 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING_MAX 500 + +#define WLAN_CFG_NSS_TX_COMP_RING_SIZE 0x2000 +#define WLAN_CFG_NSS_TX_COMP_RING_SIZE_MIN 0x2000 +#define WLAN_CFG_NSS_TX_COMP_RING_SIZE_MAX 0xc000 + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + +/* Per vdev pools */ +#define WLAN_CFG_NUM_TX_DESC_POOL 3 +#define WLAN_CFG_NUM_TXEXT_DESC_POOL 3 + +#else /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +#ifdef TX_PER_PDEV_DESC_POOL +#define WLAN_CFG_NUM_TX_DESC_POOL MAX_PDEV_CNT +#define WLAN_CFG_NUM_TXEXT_DESC_POOL MAX_PDEV_CNT + +#else /* TX_PER_PDEV_DESC_POOL */ + +#define WLAN_CFG_NUM_TX_DESC_POOL 3 +#define WLAN_CFG_NUM_TXEXT_DESC_POOL 3 + +#endif /* TX_PER_PDEV_DESC_POOL */ +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +#define WLAN_CFG_NUM_TXEXT_DESC_POOL_MIN 1 +#define WLAN_CFG_NUM_TXEXT_DESC_POOL_MAX 4 + +#define WLAN_CFG_HTT_PKT_TYPE 2 +#define WLAN_CFG_HTT_PKT_TYPE_MIN 2 +#define WLAN_CFG_HTT_PKT_TYPE_MAX 2 + +#define WLAN_CFG_MAX_PEER_ID 64 +#define WLAN_CFG_MAX_PEER_ID_MIN 64 +#define WLAN_CFG_MAX_PEER_ID_MAX 64 + +#define WLAN_CFG_RX_DEFRAG_TIMEOUT 100 +#define WLAN_CFG_RX_DEFRAG_TIMEOUT_MIN 100 +#define WLAN_CFG_RX_DEFRAG_TIMEOUT_MAX 100 + +#define WLAN_CFG_NUM_TCL_DATA_RINGS 3 +#define WLAN_CFG_NUM_TCL_DATA_RINGS_MIN 3 +#define WLAN_CFG_NUM_TCL_DATA_RINGS_MAX 3 + +#define WLAN_CFG_NUM_REO_DEST_RING 4 +#define WLAN_CFG_NUM_REO_DEST_RING_MIN 4 +#define WLAN_CFG_NUM_REO_DEST_RING_MAX 4 + +#define WLAN_CFG_WBM_RELEASE_RING_SIZE 1024 +#define WLAN_CFG_WBM_RELEASE_RING_SIZE_MIN 64 +#define WLAN_CFG_WBM_RELEASE_RING_SIZE_MAX 1024 + +#define WLAN_CFG_TCL_CMD_RING_SIZE 32 +#define WLAN_CFG_TCL_CMD_RING_SIZE_MIN 32 +#define WLAN_CFG_TCL_CMD_RING_SIZE_MAX 32 + +#define WLAN_CFG_TCL_STATUS_RING_SIZE 32 +#define WLAN_CFG_TCL_STATUS_RING_SIZE_MIN 32 +#define WLAN_CFG_TCL_STATUS_RING_SIZE_MAX 32 + +#if defined(QCA_WIFI_QCA6290) +#define WLAN_CFG_REO_DST_RING_SIZE 1024 +#else +#define WLAN_CFG_REO_DST_RING_SIZE 2048 +#endif + +#define WLAN_CFG_REO_DST_RING_SIZE_MIN 1024 +#define WLAN_CFG_REO_DST_RING_SIZE_MAX 2048 + +#define WLAN_CFG_REO_REINJECT_RING_SIZE 128 +#define WLAN_CFG_REO_REINJECT_RING_SIZE_MIN 32 +#define WLAN_CFG_REO_REINJECT_RING_SIZE_MAX 128 + +#define WLAN_CFG_RX_RELEASE_RING_SIZE 1024 +#define WLAN_CFG_RX_RELEASE_RING_SIZE_MIN 8 +#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ + defined(QCA_WIFI_QCA6750) +#define WLAN_CFG_RX_RELEASE_RING_SIZE_MAX 1024 +#else +#define WLAN_CFG_RX_RELEASE_RING_SIZE_MAX 8192 +#endif + +#define WLAN_CFG_REO_EXCEPTION_RING_SIZE 128 +#define WLAN_CFG_REO_EXCEPTION_RING_SIZE_MIN 128 +#define WLAN_CFG_REO_EXCEPTION_RING_SIZE_MAX 128 + +#define WLAN_CFG_REO_CMD_RING_SIZE 128 +#define WLAN_CFG_REO_CMD_RING_SIZE_MIN 64 +#define WLAN_CFG_REO_CMD_RING_SIZE_MAX 128 + +#define WLAN_CFG_REO_STATUS_RING_SIZE 128 +#define WLAN_CFG_REO_STATUS_RING_SIZE_MIN 128 +#define WLAN_CFG_REO_STATUS_RING_SIZE_MAX 2048 + +#define WLAN_CFG_RXDMA_BUF_RING_SIZE 1024 +#define WLAN_CFG_RXDMA_BUF_RING_SIZE_MIN 1024 +#define WLAN_CFG_RXDMA_BUF_RING_SIZE_MAX 1024 + +#define WLAN_CFG_RXDMA_REFILL_RING_SIZE 4096 +#define WLAN_CFG_RXDMA_REFILL_RING_SIZE_MIN 16 +#define WLAN_CFG_RXDMA_REFILL_RING_SIZE_MAX 4096 + +#define WLAN_CFG_TX_DESC_LIMIT_0 0 +#define WLAN_CFG_TX_DESC_LIMIT_0_MIN 4096 +#define WLAN_CFG_TX_DESC_LIMIT_0_MAX 32768 + +#define WLAN_CFG_TX_DESC_LIMIT_1 0 +#define WLAN_CFG_TX_DESC_LIMIT_1_MIN 4096 +#define WLAN_CFG_TX_DESC_LIMIT_1_MAX 32768 + +#define WLAN_CFG_TX_DESC_LIMIT_2 0 +#define WLAN_CFG_TX_DESC_LIMIT_2_MIN 4096 +#define WLAN_CFG_TX_DESC_LIMIT_2_MAX 32768 + +#define WLAN_CFG_TX_DEVICE_LIMIT 65536 +#define WLAN_CFG_TX_DEVICE_LIMIT_MIN 16384 +#define WLAN_CFG_TX_DEVICE_LIMIT_MAX 65536 + +#define WLAN_CFG_TX_SW_INTERNODE_QUEUE 1024 +#define WLAN_CFG_TX_SW_INTERNODE_QUEUE_MIN 128 +#define WLAN_CFG_TX_SW_INTERNODE_QUEUE_MAX 1024 + +#define WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE 4096 +#define WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE_MIN 16 +#define WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE_MAX 8192 + +#define WLAN_CFG_RXDMA_MONITOR_DST_RING_SIZE 2048 +#define WLAN_CFG_RXDMA_MONITOR_DST_RING_SIZE_MIN 48 +#define WLAN_CFG_RXDMA_MONITOR_DST_RING_SIZE_MAX 8192 + +#define WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE 1024 +#define WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE_MIN 16 +#define WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE_MAX 8192 + +#define WLAN_CFG_RXDMA_MONITOR_DESC_RING_SIZE 4096 +#define WLAN_CFG_RXDMA_MONITOR_DESC_RING_SIZE_MIN 4096 +#define WLAN_CFG_RXDMA_MONITOR_DESC_RING_SIZE_MAX 16384 + +#define WLAN_CFG_RXDMA_ERR_DST_RING_SIZE 1024 +#define WLAN_CFG_RXDMA_ERR_DST_RING_SIZE_MIN 1024 +#define WLAN_CFG_RXDMA_ERR_DST_RING_SIZE_MAX 8192 + +#define WLAN_CFG_RXDMA_MONITOR_RX_DROP_THRESH_SIZE 32 +#define WLAN_CFG_RXDMA_MONITOR_RX_DROP_THRESH_SIZE_MIN 0 +#define WLAN_CFG_RXDMA_MONITOR_RX_DROP_THRESH_SIZE_MAX 256 + +/** + * Allocate as many RX descriptors as buffers in the SW2RXDMA + * ring. This value may need to be tuned later. + */ +#if defined(QCA_HOST2FW_RXBUF_RING) +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE 1 +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MIN 1 +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MAX 1 + +/** + * For low memory AP cases using 1 will reduce the rx descriptors memory req + */ +#elif defined(QCA_LOWMEM_CONFIG) || defined(QCA_512M_CONFIG) +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE 1 +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MIN 1 +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MAX 3 + +/** + * AP use cases need to allocate more RX Descriptors than the number of + * entries avaialable in the SW2RXDMA buffer replenish ring. This is to account + * for frames sitting in REO queues, HW-HW DMA rings etc. Hence using a + * multiplication factor of 3, to allocate three times as many RX descriptors + * as RX buffers. + */ +#else +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE 3 +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MIN 1 +#define WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MAX 3 +#endif //QCA_HOST2FW_RXBUF_RING + +#define WLAN_CFG_RX_FLOW_SEARCH_TABLE_SIZE 16384 +#define WLAN_CFG_RX_FLOW_SEARCH_TABLE_SIZE_MIN 1 +#define WLAN_CFG_RX_FLOW_SEARCH_TABLE_SIZE_MAX 16384 + +#define WLAN_CFG_PKTLOG_BUFFER_SIZE 10 +#define WLAN_CFG_PKTLOG_MIN_BUFFER_SIZE 1 +#define WLAN_CFG_PKTLOG_MAX_BUFFER_SIZE 10 + +/* DP INI Declerations */ +#define CFG_DP_HTT_PACKET_TYPE \ + CFG_INI_UINT("dp_htt_packet_type", \ + WLAN_CFG_HTT_PKT_TYPE_MIN, \ + WLAN_CFG_HTT_PKT_TYPE_MAX, \ + WLAN_CFG_HTT_PKT_TYPE, \ + CFG_VALUE_OR_DEFAULT, "DP HTT packet type") + +#define CFG_DP_INT_BATCH_THRESHOLD_OTHER \ + CFG_INI_UINT("dp_int_batch_threshold_other", \ + WLAN_CFG_INT_BATCH_THRESHOLD_OTHER_MIN, \ + WLAN_CFG_INT_BATCH_THRESHOLD_OTHER_MAX, \ + WLAN_CFG_INT_BATCH_THRESHOLD_OTHER, \ + CFG_VALUE_OR_DEFAULT, "DP INT batch threshold Other") + +#define CFG_DP_INT_BATCH_THRESHOLD_RX \ + CFG_INI_UINT("dp_int_batch_threshold_rx", \ + WLAN_CFG_INT_BATCH_THRESHOLD_RX_MIN, \ + WLAN_CFG_INT_BATCH_THRESHOLD_RX_MAX, \ + WLAN_CFG_INT_BATCH_THRESHOLD_RX, \ + CFG_VALUE_OR_DEFAULT, "DP INT batch threshold Rx") + +#define CFG_DP_INT_BATCH_THRESHOLD_TX \ + CFG_INI_UINT("dp_int_batch_threshold_tx", \ + WLAN_CFG_INT_BATCH_THRESHOLD_TX_MIN, \ + WLAN_CFG_INT_BATCH_THRESHOLD_TX_MAX, \ + WLAN_CFG_INT_BATCH_THRESHOLD_TX, \ + CFG_VALUE_OR_DEFAULT, "DP INT threshold Tx") + +#define CFG_DP_INT_TIMER_THRESHOLD_OTHER \ + CFG_INI_UINT("dp_int_timer_threshold_other", \ + WLAN_CFG_INT_TIMER_THRESHOLD_OTHER_MIN, \ + WLAN_CFG_INT_TIMER_THRESHOLD_OTHER_MAX, \ + WLAN_CFG_INT_TIMER_THRESHOLD_OTHER, \ + CFG_VALUE_OR_DEFAULT, "DP INT timer threshold Other") + +#define CFG_DP_INT_TIMER_THRESHOLD_RX \ + CFG_INI_UINT("dp_int_timer_threshold_rx", \ + WLAN_CFG_INT_TIMER_THRESHOLD_RX_MIN, \ + WLAN_CFG_INT_TIMER_THRESHOLD_RX_MAX, \ + WLAN_CFG_INT_TIMER_THRESHOLD_RX, \ + CFG_VALUE_OR_DEFAULT, "DP INT timer threshold Rx") + +#define CFG_DP_INT_TIMER_THRESHOLD_REO_RING \ + CFG_INI_UINT("dp_int_timer_threshold_reo_ring", \ + WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING_MIN, \ + WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING_MAX, \ + WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING, \ + CFG_VALUE_OR_DEFAULT, "DP INT timer threshold Reo ring") + +#define CFG_DP_INT_TIMER_THRESHOLD_WBM_RELEASE_RING \ + CFG_INI_UINT("dp_int_timer_threshold_wbm_release_ring", \ + WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING_MIN, \ + WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING_MAX, \ + WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING, \ + CFG_VALUE_OR_DEFAULT, "DP INT timer threshold wbm release ring") + +#define CFG_DP_INT_TIMER_THRESHOLD_TX \ + CFG_INI_UINT("dp_int_timer_threshold_tx", \ + WLAN_CFG_INT_TIMER_THRESHOLD_TX_MIN, \ + WLAN_CFG_INT_TIMER_THRESHOLD_TX_MAX, \ + WLAN_CFG_INT_TIMER_THRESHOLD_TX, \ + CFG_VALUE_OR_DEFAULT, "DP INT timer threshold Tx") + +#define CFG_DP_MAX_ALLOC_SIZE \ + CFG_INI_UINT("dp_max_alloc_size", \ + WLAN_CFG_MAX_ALLOC_SIZE_MIN, \ + WLAN_CFG_MAX_ALLOC_SIZE_MAX, \ + WLAN_CFG_MAX_ALLOC_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP Max Alloc Size") + +#define CFG_DP_MAX_CLIENTS \ + CFG_INI_UINT("dp_max_clients", \ + WLAN_CFG_MAX_CLIENTS_MIN, \ + WLAN_CFG_MAX_CLIENTS_MAX, \ + WLAN_CFG_MAX_CLIENTS, \ + CFG_VALUE_OR_DEFAULT, "DP Max Clients") + +#define CFG_DP_MAX_PEER_ID \ + CFG_INI_UINT("dp_max_peer_id", \ + WLAN_CFG_MAX_PEER_ID_MIN, \ + WLAN_CFG_MAX_PEER_ID_MAX, \ + WLAN_CFG_MAX_PEER_ID, \ + CFG_VALUE_OR_DEFAULT, "DP Max Peer ID") + +#define CFG_DP_REO_DEST_RINGS \ + CFG_INI_UINT("dp_reo_dest_rings", \ + WLAN_CFG_NUM_REO_DEST_RING_MIN, \ + WLAN_CFG_NUM_REO_DEST_RING_MAX, \ + WLAN_CFG_NUM_REO_DEST_RING, \ + CFG_VALUE_OR_DEFAULT, "DP REO Destination Rings") + +#define CFG_DP_TCL_DATA_RINGS \ + CFG_INI_UINT("dp_tcl_data_rings", \ + WLAN_CFG_NUM_TCL_DATA_RINGS_MIN, \ + WLAN_CFG_NUM_TCL_DATA_RINGS_MAX, \ + WLAN_CFG_NUM_TCL_DATA_RINGS, \ + CFG_VALUE_OR_DEFAULT, "DP TCL Data Rings") + +#define CFG_DP_TX_DESC \ + CFG_INI_UINT("dp_tx_desc", \ + WLAN_CFG_NUM_TX_DESC_MIN, \ + WLAN_CFG_NUM_TX_DESC_MAX, \ + WLAN_CFG_NUM_TX_DESC, \ + CFG_VALUE_OR_DEFAULT, "DP Tx Descriptors") + +#define CFG_DP_TX_EXT_DESC \ + CFG_INI_UINT("dp_tx_ext_desc", \ + WLAN_CFG_NUM_TX_EXT_DESC_MIN, \ + WLAN_CFG_NUM_TX_EXT_DESC_MAX, \ + WLAN_CFG_NUM_TX_EXT_DESC, \ + CFG_VALUE_OR_DEFAULT, "DP Tx Ext Descriptors") + +#define CFG_DP_TX_EXT_DESC_POOLS \ + CFG_INI_UINT("dp_tx_ext_desc_pool", \ + WLAN_CFG_NUM_TXEXT_DESC_POOL_MIN, \ + WLAN_CFG_NUM_TXEXT_DESC_POOL_MAX, \ + WLAN_CFG_NUM_TXEXT_DESC_POOL, \ + CFG_VALUE_OR_DEFAULT, "DP Tx Ext Descriptors Pool") + +#define CFG_DP_PDEV_RX_RING \ + CFG_INI_UINT("dp_pdev_rx_ring", \ + WLAN_CFG_PER_PDEV_RX_RING_MIN, \ + WLAN_CFG_PER_PDEV_RX_RING_MAX, \ + WLAN_CFG_PER_PDEV_RX_RING, \ + CFG_VALUE_OR_DEFAULT, "DP PDEV Rx Ring") + +#define CFG_DP_PDEV_TX_RING \ + CFG_INI_UINT("dp_pdev_tx_ring", \ + WLAN_CFG_PER_PDEV_TX_RING_MIN, \ + WLAN_CFG_PER_PDEV_TX_RING_MAX, \ + WLAN_CFG_PER_PDEV_TX_RING, \ + CFG_VALUE_OR_DEFAULT, \ + "DP PDEV Tx Ring") + +#define CFG_DP_RX_DEFRAG_TIMEOUT \ + CFG_INI_UINT("dp_rx_defrag_timeout", \ + WLAN_CFG_RX_DEFRAG_TIMEOUT_MIN, \ + WLAN_CFG_RX_DEFRAG_TIMEOUT_MAX, \ + WLAN_CFG_RX_DEFRAG_TIMEOUT, \ + CFG_VALUE_OR_DEFAULT, "DP Rx Defrag Timeout") + +#define CFG_DP_TX_COMPL_RING_SIZE \ + CFG_INI_UINT("dp_tx_compl_ring_size", \ + WLAN_CFG_TX_COMP_RING_SIZE_MIN, \ + WLAN_CFG_TX_COMP_RING_SIZE_MAX, \ + WLAN_CFG_TX_COMP_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP Tx Completion Ring Size") + +#define CFG_DP_TX_RING_SIZE \ + CFG_INI_UINT("dp_tx_ring_size", \ + WLAN_CFG_TX_RING_SIZE_MIN,\ + WLAN_CFG_TX_RING_SIZE_MAX,\ + WLAN_CFG_TX_RING_SIZE,\ + CFG_VALUE_OR_DEFAULT, "DP Tx Ring Size") + +#define CFG_DP_NSS_COMP_RING_SIZE \ + CFG_INI_UINT("dp_nss_comp_ring_size", \ + WLAN_CFG_NSS_TX_COMP_RING_SIZE_MIN, \ + WLAN_CFG_NSS_TX_COMP_RING_SIZE_MAX, \ + WLAN_CFG_NSS_TX_COMP_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP NSS completion Ring Size") + +#define CFG_DP_PDEV_LMAC_RING \ + CFG_INI_UINT("dp_pdev_lmac_ring", \ + WLAN_CFG_PER_PDEV_LMAC_RING_MIN, \ + WLAN_CFG_PER_PDEV_LMAC_RING_MAX, \ + WLAN_CFG_PER_PDEV_LMAC_RING, \ + CFG_VALUE_OR_DEFAULT, "DP pdev LMAC ring") +/* + * + * dp_rx_pending_hl_threshold - High threshold of frame number to start + * frame dropping scheme + * @Min: 0 + * @Max: 524288 + * @Default: 393216 + * + * This ini entry is used to set a high limit threshold to start frame + * dropping scheme + * + * Usage: External + * + * + */ +#define CFG_DP_RX_PENDING_HL_THRESHOLD \ + CFG_INI_UINT("dp_rx_pending_hl_threshold", \ + WLAN_CFG_RX_PENDING_HL_THRESHOLD_MIN, \ + WLAN_CFG_RX_PENDING_HL_THRESHOLD_MAX, \ + WLAN_CFG_RX_PENDING_HL_THRESHOLD, \ + CFG_VALUE_OR_DEFAULT, "DP rx pending hl threshold") + +/* + * + * dp_rx_pending_lo_threshold - Low threshold of frame number to stop + * frame dropping scheme + * @Min: 100 + * @Max: 524288 + * @Default: 393216 + * + * This ini entry is used to set a low limit threshold to stop frame + * dropping scheme + * + * Usage: External + * + * + */ +#define CFG_DP_RX_PENDING_LO_THRESHOLD \ + CFG_INI_UINT("dp_rx_pending_lo_threshold", \ + WLAN_CFG_RX_PENDING_LO_THRESHOLD_MIN, \ + WLAN_CFG_RX_PENDING_LO_THRESHOLD_MAX, \ + WLAN_CFG_RX_PENDING_LO_THRESHOLD, \ + CFG_VALUE_OR_DEFAULT, "DP rx pending lo threshold") + +#define CFG_DP_BASE_HW_MAC_ID \ + CFG_INI_UINT("dp_base_hw_macid", \ + 0, 1, 1, \ + CFG_VALUE_OR_DEFAULT, "DP Base HW Mac ID") + +#define CFG_DP_RX_HASH \ + CFG_INI_BOOL("dp_rx_hash", true, \ + "DP Rx Hash") + +#define CFG_DP_TSO \ + CFG_INI_BOOL("TSOEnable", false, \ + "DP TSO Enabled") + +#define CFG_DP_LRO \ + CFG_INI_BOOL("LROEnable", WLAN_LRO_ENABLE, \ + "DP LRO Enable") + +/* + * + * CFG_DP_SG - Enable the SG feature standalonely + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini entry is used to enable/disable SG feature standalonely. + * Also does Rome support SG on TX, lithium does not. + * For example the lithium does not support SG on UDP frames. + * Which is able to handle SG only for TSO frames(in case TSO is enabled). + * + * Usage: External + * + * + */ +#define CFG_DP_SG \ + CFG_INI_BOOL("dp_sg_support", false, \ + "DP SG Enable") + +#define WLAN_CFG_GRO_ENABLE_MIN 0 +#define WLAN_CFG_GRO_ENABLE_MAX 3 +#define WLAN_CFG_GRO_ENABLE_DEFAULT 0 +#define DP_GRO_ENABLE_BIT_SET BIT(0) +#define DP_TC_BASED_DYNAMIC_GRO BIT(1) + +/* + * + * CFG_DP_GRO - Enable the GRO feature standalonely + * @Min: 0 + * @Max: 3 + * @Default: 0 + * + * This ini entry is used to enable/disable GRO feature standalonely. + * Value 0: Disable GRO feature + * Value 1: Enable GRO feature always + * Value 3: Enable GRO dynamic feature where TC rule can control GRO + * behavior + * + * Usage: External + * + * + */ +#define CFG_DP_GRO \ + CFG_INI_UINT("GROEnable", \ + WLAN_CFG_GRO_ENABLE_MIN, \ + WLAN_CFG_GRO_ENABLE_MAX, \ + WLAN_CFG_GRO_ENABLE_DEFAULT, \ + CFG_VALUE_OR_DEFAULT, "DP GRO Enable") + +#define WLAN_CFG_TC_INGRESS_PRIO_MIN 0 +#define WLAN_CFG_TC_INGRESS_PRIO_MAX 0xFFFF +#define WLAN_CFG_TC_INGRESS_PRIO_DEFAULT 0 + +#define CFG_DP_TC_INGRESS_PRIO \ + CFG_INI_UINT("tc_ingress_prio", \ + WLAN_CFG_TC_INGRESS_PRIO_MIN, \ + WLAN_CFG_TC_INGRESS_PRIO_MAX, \ + WLAN_CFG_TC_INGRESS_PRIO_DEFAULT, \ + CFG_VALUE_OR_DEFAULT, "DP tc ingress prio") + +#define CFG_DP_OL_TX_CSUM \ + CFG_INI_BOOL("dp_offload_tx_csum_support", false, \ + "DP tx csum Enable") + +#define CFG_DP_OL_RX_CSUM \ + CFG_INI_BOOL("dp_offload_rx_csum_support", false, \ + "DP rx csum Enable") + +#define CFG_DP_RAWMODE \ + CFG_INI_BOOL("dp_rawmode_support", false, \ + "DP rawmode Enable") + +#define CFG_DP_PEER_FLOW_CTRL \ + CFG_INI_BOOL("dp_peer_flow_control_support", false, \ + "DP peer flow ctrl Enable") + +#define CFG_DP_NAPI \ + CFG_INI_BOOL("dp_napi_enabled", PLATFORM_VALUE(true, false), \ + "DP Napi Enabled") +/* + * + * gEnableP2pIpTcpUdpChecksumOffload - Enable checksum offload for P2P mode + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * This ini entry is used to enable/disable TX checksum(UDP/TCP) for P2P modes. + * This includes P2P device mode, P2P client mode and P2P GO mode. + * The feature is enabled by default. To disable TX checksum for P2P, add the + * following entry in ini file: + * gEnableP2pIpTcpUdpChecksumOffload=0 + * + * Usage: External + * + * + */ +#define CFG_DP_P2P_TCP_UDP_CKSUM_OFFLOAD \ + CFG_INI_BOOL("gEnableP2pIpTcpUdpChecksumOffload", true, \ + "DP TCP UDP Checksum Offload for P2P mode (device/cli/go)") + +/* + * + * gEnableNanIpTcpUdpChecksumOffload - Enable NAN checksum offload + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * Usage: External + * + * + */ +#define CFG_DP_NAN_TCP_UDP_CKSUM_OFFLOAD \ + CFG_INI_BOOL("gEnableNanIpTcpUdpChecksumOffload", true, \ + "DP TCP UDP Checksum Offload for NAN mode") + +/* + * + * gEnableIpTcpUdpChecksumOffload - Enable checksum offload + * @Min: 0 + * @Max: 1 + * @Default: 1 + * + * Usage: External + * + * + */ +#define CFG_DP_TCP_UDP_CKSUM_OFFLOAD \ + CFG_INI_BOOL("gEnableIpTcpUdpChecksumOffload", true, \ + "DP TCP UDP Checksum Offload") + +#define CFG_DP_DEFRAG_TIMEOUT_CHECK \ + CFG_INI_BOOL("dp_defrag_timeout_check", true, \ + "DP Defrag Timeout Check") + +#define CFG_DP_WBM_RELEASE_RING \ + CFG_INI_UINT("dp_wbm_release_ring", \ + WLAN_CFG_WBM_RELEASE_RING_SIZE_MIN, \ + WLAN_CFG_WBM_RELEASE_RING_SIZE_MAX, \ + WLAN_CFG_WBM_RELEASE_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP WBM Release Ring") + +#define CFG_DP_TCL_CMD_RING \ + CFG_INI_UINT("dp_tcl_cmd_ring", \ + WLAN_CFG_TCL_CMD_RING_SIZE_MIN, \ + WLAN_CFG_TCL_CMD_RING_SIZE_MAX, \ + WLAN_CFG_TCL_CMD_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP TCL command ring") + +#define CFG_DP_TCL_STATUS_RING \ + CFG_INI_UINT("dp_tcl_status_ring",\ + WLAN_CFG_TCL_STATUS_RING_SIZE_MIN, \ + WLAN_CFG_TCL_STATUS_RING_SIZE_MAX, \ + WLAN_CFG_TCL_STATUS_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP TCL status ring") + +#define CFG_DP_REO_REINJECT_RING \ + CFG_INI_UINT("dp_reo_reinject_ring", \ + WLAN_CFG_REO_REINJECT_RING_SIZE_MIN, \ + WLAN_CFG_REO_REINJECT_RING_SIZE_MAX, \ + WLAN_CFG_REO_REINJECT_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP REO reinject ring") + +#define CFG_DP_RX_RELEASE_RING \ + CFG_INI_UINT("dp_rx_release_ring", \ + WLAN_CFG_RX_RELEASE_RING_SIZE_MIN, \ + WLAN_CFG_RX_RELEASE_RING_SIZE_MAX, \ + WLAN_CFG_RX_RELEASE_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP Rx release ring") + +#define CFG_DP_REO_EXCEPTION_RING \ + CFG_INI_UINT("dp_reo_exception_ring", \ + WLAN_CFG_REO_EXCEPTION_RING_SIZE_MIN, \ + WLAN_CFG_REO_EXCEPTION_RING_SIZE_MAX, \ + WLAN_CFG_REO_EXCEPTION_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP REO exception ring") + +#define CFG_DP_REO_CMD_RING \ + CFG_INI_UINT("dp_reo_cmd_ring", \ + WLAN_CFG_REO_CMD_RING_SIZE_MIN, \ + WLAN_CFG_REO_CMD_RING_SIZE_MAX, \ + WLAN_CFG_REO_CMD_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP REO command ring") + +#define CFG_DP_REO_STATUS_RING \ + CFG_INI_UINT("dp_reo_status_ring", \ + WLAN_CFG_REO_STATUS_RING_SIZE_MIN, \ + WLAN_CFG_REO_STATUS_RING_SIZE_MAX, \ + WLAN_CFG_REO_STATUS_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP REO status ring") + +#define CFG_DP_RXDMA_BUF_RING \ + CFG_INI_UINT("dp_rxdma_buf_ring", \ + WLAN_CFG_RXDMA_BUF_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_BUF_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_BUF_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RXDMA buffer ring") + +#define CFG_DP_RXDMA_REFILL_RING \ + CFG_INI_UINT("dp_rxdma_refill_ring", \ + WLAN_CFG_RXDMA_REFILL_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_REFILL_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_REFILL_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RXDMA refilll ring") + +#define CFG_DP_TX_DESC_LIMIT_0 \ + CFG_INI_UINT("dp_tx_desc_limit_0", \ + WLAN_CFG_TX_DESC_LIMIT_0_MIN, \ + WLAN_CFG_TX_DESC_LIMIT_0_MAX, \ + WLAN_CFG_TX_DESC_LIMIT_0, \ + CFG_VALUE_OR_DEFAULT, "DP TX DESC limit 0") + +#define CFG_DP_TX_DESC_LIMIT_1 \ + CFG_INI_UINT("dp_tx_desc_limit_1", \ + WLAN_CFG_TX_DESC_LIMIT_1_MIN, \ + WLAN_CFG_TX_DESC_LIMIT_1_MAX, \ + WLAN_CFG_TX_DESC_LIMIT_1, \ + CFG_VALUE_OR_DEFAULT, "DP TX DESC limit 1") + +#define CFG_DP_TX_DESC_LIMIT_2 \ + CFG_INI_UINT("dp_tx_desc_limit_2", \ + WLAN_CFG_TX_DESC_LIMIT_2_MIN, \ + WLAN_CFG_TX_DESC_LIMIT_2_MAX, \ + WLAN_CFG_TX_DESC_LIMIT_2, \ + CFG_VALUE_OR_DEFAULT, "DP TX DESC limit 2") + +#define CFG_DP_TX_DEVICE_LIMIT \ + CFG_INI_UINT("dp_tx_device_limit", \ + WLAN_CFG_TX_DEVICE_LIMIT_MIN, \ + WLAN_CFG_TX_DEVICE_LIMIT_MAX, \ + WLAN_CFG_TX_DEVICE_LIMIT, \ + CFG_VALUE_OR_DEFAULT, "DP TX DEVICE limit") + +#define CFG_DP_TX_SW_INTERNODE_QUEUE \ + CFG_INI_UINT("dp_tx_sw_internode_queue", \ + WLAN_CFG_TX_SW_INTERNODE_QUEUE_MIN, \ + WLAN_CFG_TX_SW_INTERNODE_QUEUE_MAX, \ + WLAN_CFG_TX_SW_INTERNODE_QUEUE, \ + CFG_VALUE_OR_DEFAULT, "DP TX SW internode queue") + +#define CFG_DP_RXDMA_MONITOR_BUF_RING \ + CFG_INI_UINT("dp_rxdma_monitor_buf_ring", \ + WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RXDMA monitor buffer ring") + +#define CFG_DP_RXDMA_MONITOR_DST_RING \ + CFG_INI_UINT("dp_rxdma_monitor_dst_ring", \ + WLAN_CFG_RXDMA_MONITOR_DST_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_MONITOR_DST_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_MONITOR_DST_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RXDMA monitor destination ring") + +#define CFG_DP_RXDMA_MONITOR_STATUS_RING \ + CFG_INI_UINT("dp_rxdma_monitor_status_ring", \ + WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RXDMA monitor status ring") + +#define CFG_DP_RXDMA_MONITOR_DESC_RING \ + CFG_INI_UINT("dp_rxdma_monitor_desc_ring", \ + WLAN_CFG_RXDMA_MONITOR_DESC_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_MONITOR_DESC_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_MONITOR_DESC_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RXDMA monitor destination ring") + +#define CFG_DP_RXDMA_ERR_DST_RING \ + CFG_INI_UINT("dp_rxdma_err_dst_ring", \ + WLAN_CFG_RXDMA_ERR_DST_RING_SIZE_MIN, \ + WLAN_CFG_RXDMA_ERR_DST_RING_SIZE_MAX, \ + WLAN_CFG_RXDMA_ERR_DST_RING_SIZE, \ + CFG_VALUE_OR_DEFAULT, "RXDMA err destination ring") + +#define CFG_DP_PER_PKT_LOGGING \ + CFG_INI_UINT("enable_verbose_debug", \ + 0, 0xffff, 0, \ + CFG_VALUE_OR_DEFAULT, "Enable excessive per packet logging") + +#define CFG_DP_TX_FLOW_START_QUEUE_OFFSET \ + CFG_INI_UINT("TxFlowStartQueueOffset", \ + 0, 30, WLAN_CFG_TX_FLOW_START_QUEUE_OFFSET, \ + CFG_VALUE_OR_DEFAULT, "Start queue offset") + +#define CFG_DP_TX_FLOW_STOP_QUEUE_TH \ + CFG_INI_UINT("TxFlowStopQueueThreshold", \ + 0, 50, 15, \ + CFG_VALUE_OR_DEFAULT, "Stop queue Threshold") + +#define CFG_DP_IPA_UC_TX_BUF_SIZE \ + CFG_INI_UINT("IpaUcTxBufSize", \ + 0, 4096, WLAN_CFG_IPA_UC_TX_BUF_SIZE, \ + CFG_VALUE_OR_DEFAULT, "IPA tx buffer size") + +#define CFG_DP_IPA_UC_TX_PARTITION_BASE \ + CFG_INI_UINT("IpaUcTxPartitionBase", \ + 0, 9000, WLAN_CFG_IPA_UC_TX_PARTITION_BASE, \ + CFG_VALUE_OR_DEFAULT, "IPA tx partition base") + +#define CFG_DP_IPA_UC_RX_IND_RING_COUNT \ + CFG_INI_UINT("IpaUcRxIndRingCount", \ + 0, 2048, WLAN_CFG_IPA_UC_RX_IND_RING_COUNT, \ + CFG_VALUE_OR_DEFAULT, "IPA rx indication ring count") + +#define CFG_DP_REORDER_OFFLOAD_SUPPORT \ + CFG_INI_UINT("gReorderOffloadSupported", \ + 0, 1, 1, \ + CFG_VALUE_OR_DEFAULT, "Packet reordering offload to firmware") + +#define CFG_DP_AP_STA_SECURITY_SEPERATION \ + CFG_INI_BOOL("gDisableIntraBssFwd", \ + false, "Disable intrs BSS Rx packets") + +#define CFG_DP_ENABLE_DATA_STALL_DETECTION \ + CFG_INI_BOOL("gEnableDataStallDetection", \ + true, "Enable/Disable Data stall detection") + +#define CFG_DP_RX_SW_DESC_WEIGHT \ + CFG_INI_UINT("dp_rx_sw_desc_weight", \ + WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MIN, \ + WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE_MAX, \ + WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE, \ + CFG_VALUE_OR_DEFAULT, "DP RX SW DESC weight") + +#define CFG_DP_RX_FLOW_SEARCH_TABLE_SIZE \ + CFG_INI_UINT("dp_rx_flow_search_table_size", \ + WLAN_CFG_RX_FLOW_SEARCH_TABLE_SIZE_MIN, \ + WLAN_CFG_RX_FLOW_SEARCH_TABLE_SIZE_MAX, \ + WLAN_CFG_RX_FLOW_SEARCH_TABLE_SIZE, \ + CFG_VALUE_OR_DEFAULT, \ + "DP Rx Flow Search Table Size in number of entries") + +#define CFG_DP_RX_FLOW_TAG_ENABLE \ + CFG_INI_BOOL("dp_rx_flow_tag_enable", false, \ + "Enable/Disable DP Rx Flow Tag") + +#define CFG_DP_RX_FLOW_SEARCH_TABLE_PER_PDEV \ + CFG_INI_BOOL("dp_rx_per_pdev_flow_search", false, \ + "DP Rx Flow Search Table Is Per PDev") + +#define CFG_DP_RX_MON_PROTOCOL_FLOW_TAG_ENABLE \ + CFG_INI_BOOL("dp_rx_monitor_protocol_flow_tag_enable", true, \ + "Enable/Disable Rx Protocol & Flow tags in Monitor mode") + +/* + * + * dp_rx_fisa_enable - Control Rx datapath FISA + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to enable DP Rx FISA feature + * + * Related: dp_rx_flow_search_table_size + * + * Supported Feature: STA,P2P and SAP IPA disabled terminating + * + * Usage: Internal/External + * + * + */ +#define CFG_DP_RX_FISA_ENABLE \ + CFG_INI_BOOL("dp_rx_fisa_enable", false, \ + "Enable/Disable DP Rx FISA") + +#define CFG_DP_RXDMA_MONITOR_RX_DROP_THRESHOLD \ + CFG_INI_UINT("mon_drop_thresh", \ + WLAN_CFG_RXDMA_MONITOR_RX_DROP_THRESH_SIZE_MIN, \ + WLAN_CFG_RXDMA_MONITOR_RX_DROP_THRESH_SIZE_MAX, \ + WLAN_CFG_RXDMA_MONITOR_RX_DROP_THRESH_SIZE, \ + CFG_VALUE_OR_DEFAULT, "RXDMA monitor rx drop theshold") + +#define CFG_DP_PKTLOG_BUFFER_SIZE \ + CFG_INI_UINT("PktlogBufSize", \ + WLAN_CFG_PKTLOG_MIN_BUFFER_SIZE, \ + WLAN_CFG_PKTLOG_MAX_BUFFER_SIZE, \ + WLAN_CFG_PKTLOG_BUFFER_SIZE, \ + CFG_VALUE_OR_DEFAULT, "Packet Log buffer size") + +/* + * + * legacy_mode_csum_disable - Disable csum offload for legacy 802.11abg modes + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to disable HW checksum offload capability for legacy + * connections + * + * Related: gEnableIpTcpUdpChecksumOffload should be enabled + * + * Usage: Internal + * + * + */ + +#define CFG_DP_LEGACY_MODE_CSUM_DISABLE \ + CFG_INI_BOOL("legacy_mode_csum_disable", false, \ + "Enable/Disable legacy mode checksum") + +/* + * + * wow_check_rx_pending_enable - control to check RX frames pending in Wow + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to control DP Software to perform RX pending check + * before entering WoW mode + * + * Usage: Internal + * + * + */ +#define CFG_DP_WOW_CHECK_RX_PENDING \ + CFG_INI_BOOL("wow_check_rx_pending_enable", \ + false, \ + "enable rx frame pending check in WoW mode") + +/* + * + * gForceRX64BA - enable force 64 blockack mode for RX + * @Min: 0 + * @Max: 1 + * @Default: 0 + * + * This ini is used to control DP Software to use 64 blockack + * for RX direction forcibly + * + * Usage: Internal + * + * + */ +#define CFG_FORCE_RX_64_BA \ + CFG_INI_BOOL("gForceRX64BA", \ + false, "Enable/Disable force 64 blockack in RX side") + +#define CFG_DP \ + CFG(CFG_DP_HTT_PACKET_TYPE) \ + CFG(CFG_DP_INT_BATCH_THRESHOLD_OTHER) \ + CFG(CFG_DP_INT_BATCH_THRESHOLD_RX) \ + CFG(CFG_DP_INT_BATCH_THRESHOLD_TX) \ + CFG(CFG_DP_INT_TIMER_THRESHOLD_OTHER) \ + CFG(CFG_DP_INT_TIMER_THRESHOLD_RX) \ + CFG(CFG_DP_INT_TIMER_THRESHOLD_TX) \ + CFG(CFG_DP_MAX_ALLOC_SIZE) \ + CFG(CFG_DP_MAX_CLIENTS) \ + CFG(CFG_DP_MAX_PEER_ID) \ + CFG(CFG_DP_REO_DEST_RINGS) \ + CFG(CFG_DP_TCL_DATA_RINGS) \ + CFG(CFG_DP_TX_DESC) \ + CFG(CFG_DP_TX_EXT_DESC) \ + CFG(CFG_DP_TX_EXT_DESC_POOLS) \ + CFG(CFG_DP_PDEV_RX_RING) \ + CFG(CFG_DP_PDEV_TX_RING) \ + CFG(CFG_DP_RX_DEFRAG_TIMEOUT) \ + CFG(CFG_DP_TX_COMPL_RING_SIZE) \ + CFG(CFG_DP_TX_RING_SIZE) \ + CFG(CFG_DP_NSS_COMP_RING_SIZE) \ + CFG(CFG_DP_PDEV_LMAC_RING) \ + CFG(CFG_DP_BASE_HW_MAC_ID) \ + CFG(CFG_DP_RX_HASH) \ + CFG(CFG_DP_TSO) \ + CFG(CFG_DP_LRO) \ + CFG(CFG_DP_SG) \ + CFG(CFG_DP_GRO) \ + CFG(CFG_DP_TC_INGRESS_PRIO) \ + CFG(CFG_DP_OL_TX_CSUM) \ + CFG(CFG_DP_OL_RX_CSUM) \ + CFG(CFG_DP_RAWMODE) \ + CFG(CFG_DP_PEER_FLOW_CTRL) \ + CFG(CFG_DP_NAPI) \ + CFG(CFG_DP_TCP_UDP_CKSUM_OFFLOAD) \ + CFG(CFG_DP_NAN_TCP_UDP_CKSUM_OFFLOAD) \ + CFG(CFG_DP_P2P_TCP_UDP_CKSUM_OFFLOAD) \ + CFG(CFG_DP_DEFRAG_TIMEOUT_CHECK) \ + CFG(CFG_DP_WBM_RELEASE_RING) \ + CFG(CFG_DP_TCL_CMD_RING) \ + CFG(CFG_DP_TCL_STATUS_RING) \ + CFG(CFG_DP_REO_REINJECT_RING) \ + CFG(CFG_DP_RX_RELEASE_RING) \ + CFG(CFG_DP_REO_EXCEPTION_RING) \ + CFG(CFG_DP_REO_CMD_RING) \ + CFG(CFG_DP_REO_STATUS_RING) \ + CFG(CFG_DP_RXDMA_BUF_RING) \ + CFG(CFG_DP_RXDMA_REFILL_RING) \ + CFG(CFG_DP_TX_DESC_LIMIT_0) \ + CFG(CFG_DP_TX_DESC_LIMIT_1) \ + CFG(CFG_DP_TX_DESC_LIMIT_2) \ + CFG(CFG_DP_TX_DEVICE_LIMIT) \ + CFG(CFG_DP_TX_SW_INTERNODE_QUEUE) \ + CFG(CFG_DP_RXDMA_MONITOR_BUF_RING) \ + CFG(CFG_DP_RXDMA_MONITOR_DST_RING) \ + CFG(CFG_DP_RXDMA_MONITOR_STATUS_RING) \ + CFG(CFG_DP_RXDMA_MONITOR_DESC_RING) \ + CFG(CFG_DP_RXDMA_ERR_DST_RING) \ + CFG(CFG_DP_PER_PKT_LOGGING) \ + CFG(CFG_DP_TX_FLOW_START_QUEUE_OFFSET) \ + CFG(CFG_DP_TX_FLOW_STOP_QUEUE_TH) \ + CFG(CFG_DP_IPA_UC_TX_BUF_SIZE) \ + CFG(CFG_DP_IPA_UC_TX_PARTITION_BASE) \ + CFG(CFG_DP_IPA_UC_RX_IND_RING_COUNT) \ + CFG(CFG_DP_REORDER_OFFLOAD_SUPPORT) \ + CFG(CFG_DP_AP_STA_SECURITY_SEPERATION) \ + CFG(CFG_DP_ENABLE_DATA_STALL_DETECTION) \ + CFG(CFG_DP_RX_SW_DESC_WEIGHT) \ + CFG(CFG_DP_RX_FLOW_SEARCH_TABLE_SIZE) \ + CFG(CFG_DP_RX_FLOW_TAG_ENABLE) \ + CFG(CFG_DP_RX_FLOW_SEARCH_TABLE_PER_PDEV) \ + CFG(CFG_DP_RX_MON_PROTOCOL_FLOW_TAG_ENABLE) \ + CFG(CFG_DP_RXDMA_MONITOR_RX_DROP_THRESHOLD) \ + CFG(CFG_DP_PKTLOG_BUFFER_SIZE) \ + CFG(CFG_DP_RX_FISA_ENABLE) \ + CFG(CFG_DP_LEGACY_MODE_CSUM_DISABLE) \ + CFG(CFG_DP_RX_PENDING_HL_THRESHOLD) \ + CFG(CFG_DP_RX_PENDING_LO_THRESHOLD) \ + CFG(CFG_DP_WOW_CHECK_RX_PENDING) \ + CFG(CFG_FORCE_RX_64_BA) +#endif /* _CFG_DP_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.c b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..e760e8fe8938372610c39448bd3217bf86003fa8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.c @@ -0,0 +1,1328 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(CONFIG_HL_SUPPORT) +#include "wlan_tgt_def_config_hl.h" +#else +#include "wlan_tgt_def_config.h" +#endif + +#include "qdf_trace.h" +#include "qdf_mem.h" +#include +#include "wlan_cfg.h" +#include "cfg_ucfg_api.h" +#include "hal_api.h" +#include "dp_types.h" + +/* + * FIX THIS - + * For now, all these configuration parameters are hardcoded. + * Many of these should actually be coming from dts file/ini file + */ + +/* + * The max allowed size for tx comp ring is 8191. + * This is limitted by h/w ring max size. + * As this is not a power of 2 it does not work with nss offload so the + * nearest available size which is power of 2 is 4096 chosen for nss + */ + +#define WLAN_CFG_TX_RING_MASK_0 0x1 +#define WLAN_CFG_TX_RING_MASK_1 0x2 +#define WLAN_CFG_TX_RING_MASK_2 0x4 +#define WLAN_CFG_TX_RING_MASK_3 0x0 + +#define WLAN_CFG_RX_RING_MASK_0 0x1 +#define WLAN_CFG_RX_RING_MASK_1 0x2 +#define WLAN_CFG_RX_RING_MASK_2 0x4 +#define WLAN_CFG_RX_RING_MASK_3 0x8 + +#define WLAN_CFG_RX_MON_RING_MASK_0 0x1 +#define WLAN_CFG_RX_MON_RING_MASK_1 0x2 +#define WLAN_CFG_RX_MON_RING_MASK_2 0x4 +#define WLAN_CFG_RX_MON_RING_MASK_3 0x0 + +#define WLAN_CFG_HOST2RXDMA_MON_RING_MASK_0 0x1 +#define WLAN_CFG_HOST2RXDMA_MON_RING_MASK_1 0x2 +#define WLAN_CFG_HOST2RXDMA_MON_RING_MASK_2 0x4 + +#define WLAN_CFG_RXDMA2HOST_MON_RING_MASK_0 0x1 +#define WLAN_CFG_RXDMA2HOST_MON_RING_MASK_1 0x2 +#define WLAN_CFG_RXDMA2HOST_MON_RING_MASK_2 0x4 + +#define WLAN_CFG_RX_ERR_RING_MASK_0 0x1 +#define WLAN_CFG_RX_ERR_RING_MASK_1 0x0 +#define WLAN_CFG_RX_ERR_RING_MASK_2 0x0 +#define WLAN_CFG_RX_ERR_RING_MASK_3 0x0 + +#define WLAN_CFG_RX_WBM_REL_RING_MASK_0 0x1 +#define WLAN_CFG_RX_WBM_REL_RING_MASK_1 0x0 +#define WLAN_CFG_RX_WBM_REL_RING_MASK_2 0x0 +#define WLAN_CFG_RX_WBM_REL_RING_MASK_3 0x0 + +#define WLAN_CFG_REO_STATUS_RING_MASK_0 0x1 +#define WLAN_CFG_REO_STATUS_RING_MASK_1 0x0 +#define WLAN_CFG_REO_STATUS_RING_MASK_2 0x0 +#define WLAN_CFG_REO_STATUS_RING_MASK_3 0x0 + +#define WLAN_CFG_RXDMA2HOST_RING_MASK_0 0x1 +#define WLAN_CFG_RXDMA2HOST_RING_MASK_1 0x2 +#define WLAN_CFG_RXDMA2HOST_RING_MASK_2 0x4 +#define WLAN_CFG_RXDMA2HOST_RING_MASK_3 0x0 + +#define WLAN_CFG_HOST2RXDMA_RING_MASK_0 0x1 +#define WLAN_CFG_HOST2RXDMA_RING_MASK_1 0x2 +#define WLAN_CFG_HOST2RXDMA_RING_MASK_2 0x4 +#define WLAN_CFG_HOST2RXDMA_RING_MASK_3 0x0 + +#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) +static const int tx_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_TX_RING_MASK_0, 0, 0, 0, 0, 0, 0}; + +#ifndef IPA_OFFLOAD +static const int rx_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, WLAN_CFG_RX_RING_MASK_0, WLAN_CFG_RX_RING_MASK_1, WLAN_CFG_RX_RING_MASK_2, WLAN_CFG_RX_RING_MASK_3, 0, 0}; +#else +static const int rx_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, WLAN_CFG_RX_RING_MASK_0, WLAN_CFG_RX_RING_MASK_1, WLAN_CFG_RX_RING_MASK_2, 0, 0, 0}; +#endif + +static const int rx_mon_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, WLAN_CFG_RX_MON_RING_MASK_0, WLAN_CFG_RX_MON_RING_MASK_1, 0, 0, 0, 0}; + +static const int host2rxdma_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rxdma2host_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, WLAN_CFG_RXDMA2HOST_RING_MASK_0, WLAN_CFG_RXDMA2HOST_RING_MASK_1}; + +static const int host2rxdma_mon_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rxdma2host_mon_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rx_err_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, WLAN_CFG_RX_ERR_RING_MASK_0}; + +static const int rx_wbm_rel_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, WLAN_CFG_RX_WBM_REL_RING_MASK_0}; + +static const int reo_status_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, WLAN_CFG_REO_STATUS_RING_MASK_0}; + +static const int tx_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rx_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rx_mon_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int host2rxdma_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rxdma2host_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int host2rxdma_mon_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rxdma2host_mon_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rx_err_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int rx_wbm_rel_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +static const int reo_status_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0, 0, 0, 0}; + +#else + +static const int tx_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_TX_RING_MASK_0, + WLAN_CFG_TX_RING_MASK_1, + WLAN_CFG_TX_RING_MASK_2, + WLAN_CFG_TX_RING_MASK_3}; + +static const int rx_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_RING_MASK_0, + WLAN_CFG_RX_RING_MASK_1, + WLAN_CFG_RX_RING_MASK_2, + WLAN_CFG_RX_RING_MASK_3}; + +static const int rx_mon_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0}; + +static const int host2rxdma_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0}; + +static const int rxdma2host_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, 0, 0, 0}; + +static const int host2rxdma_mon_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_HOST2RXDMA_MON_RING_MASK_0, + WLAN_CFG_HOST2RXDMA_MON_RING_MASK_1, + WLAN_CFG_HOST2RXDMA_MON_RING_MASK_2}; + +static const int rxdma2host_mon_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RXDMA2HOST_MON_RING_MASK_0, + WLAN_CFG_RXDMA2HOST_MON_RING_MASK_1, + WLAN_CFG_RXDMA2HOST_MON_RING_MASK_2}; + +static const int rx_err_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_ERR_RING_MASK_0, + WLAN_CFG_RX_ERR_RING_MASK_1, + WLAN_CFG_RX_ERR_RING_MASK_2, + WLAN_CFG_RX_ERR_RING_MASK_3}; + +static const int rx_wbm_rel_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_WBM_REL_RING_MASK_0, + WLAN_CFG_RX_WBM_REL_RING_MASK_1, + WLAN_CFG_RX_WBM_REL_RING_MASK_2, + WLAN_CFG_RX_WBM_REL_RING_MASK_3}; + +static const int reo_status_ring_mask_msi[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_REO_STATUS_RING_MASK_0, + WLAN_CFG_REO_STATUS_RING_MASK_1, + WLAN_CFG_REO_STATUS_RING_MASK_2, + WLAN_CFG_REO_STATUS_RING_MASK_3}; + +static const int tx_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_TX_RING_MASK_0, + WLAN_CFG_TX_RING_MASK_1, + WLAN_CFG_TX_RING_MASK_2, + WLAN_CFG_TX_RING_MASK_3}; + +static const int rx_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + WLAN_CFG_RX_RING_MASK_0, + WLAN_CFG_RX_RING_MASK_1, + WLAN_CFG_RX_RING_MASK_2, + WLAN_CFG_RX_RING_MASK_3}; + +static const int rx_mon_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + 0, + 0, + WLAN_CFG_RX_MON_RING_MASK_0, + WLAN_CFG_RX_MON_RING_MASK_1, + WLAN_CFG_RX_MON_RING_MASK_2}; + +static const int host2rxdma_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_HOST2RXDMA_RING_MASK_0, + WLAN_CFG_HOST2RXDMA_RING_MASK_1, + WLAN_CFG_HOST2RXDMA_RING_MASK_2, + WLAN_CFG_HOST2RXDMA_RING_MASK_3}; + +static const int rxdma2host_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RXDMA2HOST_RING_MASK_0, + WLAN_CFG_RXDMA2HOST_RING_MASK_1, + WLAN_CFG_RXDMA2HOST_RING_MASK_2, + WLAN_CFG_RXDMA2HOST_RING_MASK_3}; + +static const int host2rxdma_mon_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + 0, + 0, + WLAN_CFG_HOST2RXDMA_MON_RING_MASK_0, + WLAN_CFG_HOST2RXDMA_MON_RING_MASK_1, + WLAN_CFG_HOST2RXDMA_MON_RING_MASK_2}; + +static const int rxdma2host_mon_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + 0, + 0, + WLAN_CFG_RXDMA2HOST_MON_RING_MASK_0, + WLAN_CFG_RXDMA2HOST_MON_RING_MASK_1, + WLAN_CFG_RXDMA2HOST_MON_RING_MASK_2}; + +static const int rx_err_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_ERR_RING_MASK_0, + WLAN_CFG_RX_ERR_RING_MASK_1, + WLAN_CFG_RX_ERR_RING_MASK_2, + WLAN_CFG_RX_ERR_RING_MASK_3}; + +static const int rx_wbm_rel_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_WBM_REL_RING_MASK_0, + WLAN_CFG_RX_WBM_REL_RING_MASK_1, + WLAN_CFG_RX_WBM_REL_RING_MASK_2, + WLAN_CFG_RX_WBM_REL_RING_MASK_3}; + +static const int reo_status_ring_mask_integrated[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_REO_STATUS_RING_MASK_0, + WLAN_CFG_REO_STATUS_RING_MASK_1, + WLAN_CFG_REO_STATUS_RING_MASK_2, + WLAN_CFG_REO_STATUS_RING_MASK_3}; +#endif /* MAX_PDEV_CNT == 1 */ + +/** + * g_wlan_srng_cfg[] - Per ring_type specific configuration + * + */ +struct wlan_srng_cfg g_wlan_srng_cfg[MAX_RING_TYPES]; + +/* REO_DST ring configuration */ +struct wlan_srng_cfg wlan_srng_reo_cfg = { + .timer_threshold = WLAN_CFG_INT_TIMER_THRESHOLD_REO_RING, + .batch_count_threshold = 0, + .low_threshold = 0, +}; + +/* WBM2SW_RELEASE ring configuration */ +struct wlan_srng_cfg wlan_srng_wbm_release_cfg = { + .timer_threshold = WLAN_CFG_INT_TIMER_THRESHOLD_WBM_RELEASE_RING, + .batch_count_threshold = 0, + .low_threshold = 0, +}; + +/* RXDMA_BUF ring configuration */ +struct wlan_srng_cfg wlan_srng_rxdma_buf_cfg = { + .timer_threshold = WLAN_CFG_INT_TIMER_THRESHOLD_RX, + .batch_count_threshold = 0, + .low_threshold = WLAN_CFG_RXDMA_REFILL_RING_SIZE >> 3, +}; + +/* RXDMA_MONITOR_BUF ring configuration */ +struct wlan_srng_cfg wlan_srng_rxdma_monitor_buf_cfg = { + .timer_threshold = WLAN_CFG_INT_TIMER_THRESHOLD_RX, + .batch_count_threshold = 0, + .low_threshold = WLAN_CFG_RXDMA_MONITOR_BUF_RING_SIZE >> 3, +}; + +/* RXDMA_MONITOR_STATUS ring configuration */ +struct wlan_srng_cfg wlan_srng_rxdma_monitor_status_cfg = { + .timer_threshold = WLAN_CFG_INT_TIMER_THRESHOLD_RX, + .batch_count_threshold = 0, + .low_threshold = WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE >> 3, +}; + +/* DEFAULT_CONFIG ring configuration */ +struct wlan_srng_cfg wlan_srng_default_cfg = { + .timer_threshold = WLAN_CFG_INT_TIMER_THRESHOLD_OTHER, + .batch_count_threshold = WLAN_CFG_INT_BATCH_THRESHOLD_OTHER, + .low_threshold = 0, +}; + +void wlan_set_srng_cfg(struct wlan_srng_cfg **wlan_cfg) +{ + g_wlan_srng_cfg[REO_DST] = wlan_srng_reo_cfg; + g_wlan_srng_cfg[WBM2SW_RELEASE] = wlan_srng_wbm_release_cfg; + g_wlan_srng_cfg[REO_EXCEPTION] = wlan_srng_default_cfg; + g_wlan_srng_cfg[REO_REINJECT] = wlan_srng_default_cfg; + g_wlan_srng_cfg[REO_CMD] = wlan_srng_default_cfg; + g_wlan_srng_cfg[REO_STATUS] = wlan_srng_default_cfg; + g_wlan_srng_cfg[TCL_DATA] = wlan_srng_default_cfg; + g_wlan_srng_cfg[TCL_CMD] = wlan_srng_default_cfg; + g_wlan_srng_cfg[TCL_STATUS] = wlan_srng_default_cfg; + g_wlan_srng_cfg[WBM_IDLE_LINK] = wlan_srng_default_cfg; + g_wlan_srng_cfg[SW2WBM_RELEASE] = wlan_srng_default_cfg; + g_wlan_srng_cfg[RXDMA_BUF] = wlan_srng_rxdma_buf_cfg; + g_wlan_srng_cfg[RXDMA_DST] = wlan_srng_default_cfg; + g_wlan_srng_cfg[RXDMA_MONITOR_BUF] = + wlan_srng_rxdma_monitor_buf_cfg; + g_wlan_srng_cfg[RXDMA_MONITOR_STATUS] = + wlan_srng_rxdma_monitor_status_cfg; + g_wlan_srng_cfg[RXDMA_MONITOR_DST] = wlan_srng_default_cfg; + g_wlan_srng_cfg[RXDMA_MONITOR_DESC] = wlan_srng_default_cfg; + g_wlan_srng_cfg[DIR_BUF_RX_DMA_SRC] = wlan_srng_default_cfg; +#ifdef WLAN_FEATURE_CIF_CFR + g_wlan_srng_cfg[WIFI_POS_SRC] = wlan_srng_default_cfg; +#endif + *wlan_cfg = g_wlan_srng_cfg; +} + +static const uint8_t rx_fst_toeplitz_key[WLAN_CFG_RX_FST_TOEPLITZ_KEYLEN] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa +}; + +void wlan_cfg_fill_interrupt_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int interrupt_mode, + bool is_monitor_mode) { + int i = 0; + + if (interrupt_mode == DP_INTR_INTEGRATED) { + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + wlan_cfg_ctx->int_tx_ring_mask[i] = + tx_ring_mask_integrated[i]; + wlan_cfg_ctx->int_rx_ring_mask[i] = + rx_ring_mask_integrated[i]; + wlan_cfg_ctx->int_rx_mon_ring_mask[i] = + rx_mon_ring_mask_integrated[i]; + wlan_cfg_ctx->int_rx_err_ring_mask[i] = + rx_err_ring_mask_integrated[i]; + wlan_cfg_ctx->int_rx_wbm_rel_ring_mask[i] = + rx_wbm_rel_ring_mask_integrated[i]; + wlan_cfg_ctx->int_reo_status_ring_mask[i] = + reo_status_ring_mask_integrated[i]; + wlan_cfg_ctx->int_rxdma2host_ring_mask[i] = + rxdma2host_ring_mask_integrated[i]; + wlan_cfg_ctx->int_host2rxdma_ring_mask[i] = + host2rxdma_ring_mask_integrated[i]; + wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[i] = + host2rxdma_mon_ring_mask_integrated[i]; + wlan_cfg_ctx->int_rxdma2host_mon_ring_mask[i] = + rxdma2host_mon_ring_mask_integrated[i]; + } + } else if (interrupt_mode == DP_INTR_MSI || interrupt_mode == + DP_INTR_POLL) { + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + wlan_cfg_ctx->int_tx_ring_mask[i] = tx_ring_mask_msi[i]; + wlan_cfg_ctx->int_rx_mon_ring_mask[i] = + rx_mon_ring_mask_msi[i]; + wlan_cfg_ctx->int_rx_err_ring_mask[i] = + rx_err_ring_mask_msi[i]; + wlan_cfg_ctx->int_rx_wbm_rel_ring_mask[i] = + rx_wbm_rel_ring_mask_msi[i]; + wlan_cfg_ctx->int_reo_status_ring_mask[i] = + reo_status_ring_mask_msi[i]; + if (is_monitor_mode) { + wlan_cfg_ctx->int_rx_ring_mask[i] = 0; + wlan_cfg_ctx->int_rxdma2host_ring_mask[i] = 0; + } else { + wlan_cfg_ctx->int_rx_ring_mask[i] = + rx_ring_mask_msi[i]; + wlan_cfg_ctx->int_rxdma2host_ring_mask[i] = + rxdma2host_ring_mask_msi[i]; + } + wlan_cfg_ctx->int_host2rxdma_ring_mask[i] = + host2rxdma_ring_mask_msi[i]; + wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[i] = + host2rxdma_mon_ring_mask_msi[i]; + wlan_cfg_ctx->int_rxdma2host_mon_ring_mask[i] = + rxdma2host_mon_ring_mask_msi[i]; + } + } else { + qdf_err("Interrupt mode %d", interrupt_mode); + } +} + +/** + * wlan_cfg_soc_attach() - Allocate and prepare SoC configuration + * @psoc - Object manager psoc + * Return: wlan_cfg_ctx - Handle to Configuration context + */ +struct wlan_cfg_dp_soc_ctxt * +wlan_cfg_soc_attach(struct cdp_ctrl_objmgr_psoc *psoc) +{ + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = + qdf_mem_malloc(sizeof(struct wlan_cfg_dp_soc_ctxt)); + uint32_t gro_bit_set; + + if (!wlan_cfg_ctx) + return NULL; + + wlan_cfg_ctx->rxdma1_enable = WLAN_CFG_RXDMA1_ENABLE; + wlan_cfg_ctx->num_int_ctxts = WLAN_CFG_INT_NUM_CONTEXTS; + wlan_cfg_ctx->max_clients = cfg_get(psoc, CFG_DP_MAX_CLIENTS); + wlan_cfg_ctx->max_alloc_size = cfg_get(psoc, CFG_DP_MAX_ALLOC_SIZE); + wlan_cfg_ctx->per_pdev_tx_ring = cfg_get(psoc, CFG_DP_PDEV_TX_RING); + wlan_cfg_ctx->num_tcl_data_rings = cfg_get(psoc, CFG_DP_TCL_DATA_RINGS); + wlan_cfg_ctx->per_pdev_rx_ring = cfg_get(psoc, CFG_DP_PDEV_RX_RING); + wlan_cfg_ctx->per_pdev_lmac_ring = cfg_get(psoc, CFG_DP_PDEV_LMAC_RING); + wlan_cfg_ctx->num_reo_dest_rings = cfg_get(psoc, CFG_DP_REO_DEST_RINGS); + wlan_cfg_ctx->num_tx_desc_pool = MAX_TXDESC_POOLS; + wlan_cfg_ctx->num_tx_ext_desc_pool = cfg_get(psoc, + CFG_DP_TX_EXT_DESC_POOLS); + wlan_cfg_ctx->num_tx_desc = cfg_get(psoc, CFG_DP_TX_DESC); + wlan_cfg_ctx->min_tx_desc = WLAN_CFG_NUM_TX_DESC_MIN; + wlan_cfg_ctx->num_tx_ext_desc = cfg_get(psoc, CFG_DP_TX_EXT_DESC); + wlan_cfg_ctx->htt_packet_type = cfg_get(psoc, CFG_DP_HTT_PACKET_TYPE); + wlan_cfg_ctx->max_peer_id = cfg_get(psoc, CFG_DP_MAX_PEER_ID); + + wlan_cfg_ctx->tx_ring_size = cfg_get(psoc, CFG_DP_TX_RING_SIZE); + wlan_cfg_ctx->tx_comp_ring_size = cfg_get(psoc, + CFG_DP_TX_COMPL_RING_SIZE); + + wlan_cfg_ctx->tx_comp_ring_size_nss = + cfg_get(psoc, CFG_DP_NSS_COMP_RING_SIZE); + + wlan_cfg_ctx->int_batch_threshold_tx = + cfg_get(psoc, CFG_DP_INT_BATCH_THRESHOLD_TX); + wlan_cfg_ctx->int_timer_threshold_tx = + cfg_get(psoc, CFG_DP_INT_TIMER_THRESHOLD_TX); + wlan_cfg_ctx->int_batch_threshold_rx = + cfg_get(psoc, CFG_DP_INT_BATCH_THRESHOLD_RX); + wlan_cfg_ctx->int_timer_threshold_rx = + cfg_get(psoc, CFG_DP_INT_TIMER_THRESHOLD_RX); + wlan_cfg_ctx->int_batch_threshold_other = + cfg_get(psoc, CFG_DP_INT_BATCH_THRESHOLD_OTHER); + wlan_cfg_ctx->int_timer_threshold_other = + cfg_get(psoc, CFG_DP_INT_TIMER_THRESHOLD_OTHER); + wlan_cfg_ctx->pktlog_buffer_size = + cfg_get(psoc, CFG_DP_PKTLOG_BUFFER_SIZE); + + /* This is default mapping and can be overridden by HW config + * received from FW */ + wlan_cfg_set_hw_mac_idx(wlan_cfg_ctx, 0, 0); + if (MAX_PDEV_CNT > 1) + wlan_cfg_set_hw_mac_idx(wlan_cfg_ctx, 1, 2); + if (MAX_PDEV_CNT > 2) + wlan_cfg_set_hw_mac_idx(wlan_cfg_ctx, 2, 1); + + wlan_cfg_ctx->base_hw_macid = cfg_get(psoc, CFG_DP_BASE_HW_MAC_ID); + + wlan_cfg_ctx->rx_hash = cfg_get(psoc, CFG_DP_RX_HASH); + wlan_cfg_ctx->tso_enabled = cfg_get(psoc, CFG_DP_TSO); + wlan_cfg_ctx->lro_enabled = cfg_get(psoc, CFG_DP_LRO); + wlan_cfg_ctx->sg_enabled = cfg_get(psoc, CFG_DP_SG); + gro_bit_set = cfg_get(psoc, CFG_DP_GRO); + if (gro_bit_set & DP_GRO_ENABLE_BIT_SET) { + wlan_cfg_ctx->gro_enabled = true; + if (gro_bit_set & DP_TC_BASED_DYNAMIC_GRO) + wlan_cfg_ctx->tc_based_dynamic_gro = true; + } + wlan_cfg_ctx->tc_ingress_prio = cfg_get(psoc, CFG_DP_TC_INGRESS_PRIO); + wlan_cfg_ctx->ol_tx_csum_enabled = cfg_get(psoc, CFG_DP_OL_TX_CSUM); + wlan_cfg_ctx->ol_rx_csum_enabled = cfg_get(psoc, CFG_DP_OL_RX_CSUM); + wlan_cfg_ctx->rawmode_enabled = cfg_get(psoc, CFG_DP_RAWMODE); + wlan_cfg_ctx->peer_flow_ctrl_enabled = + cfg_get(psoc, CFG_DP_PEER_FLOW_CTRL); + wlan_cfg_ctx->napi_enabled = cfg_get(psoc, CFG_DP_NAPI); + wlan_cfg_ctx->p2p_tcp_udp_checksumoffload = + cfg_get(psoc, CFG_DP_P2P_TCP_UDP_CKSUM_OFFLOAD); + wlan_cfg_ctx->nan_tcp_udp_checksumoffload = + cfg_get(psoc, CFG_DP_NAN_TCP_UDP_CKSUM_OFFLOAD); + wlan_cfg_ctx->tcp_udp_checksumoffload = + cfg_get(psoc, CFG_DP_TCP_UDP_CKSUM_OFFLOAD); + wlan_cfg_ctx->legacy_mode_checksumoffload_disable = + cfg_get(psoc, CFG_DP_LEGACY_MODE_CSUM_DISABLE); + wlan_cfg_ctx->per_pkt_trace = cfg_get(psoc, CFG_DP_PER_PKT_LOGGING); + wlan_cfg_ctx->defrag_timeout_check = + cfg_get(psoc, CFG_DP_DEFRAG_TIMEOUT_CHECK); + wlan_cfg_ctx->rx_defrag_min_timeout = + cfg_get(psoc, CFG_DP_RX_DEFRAG_TIMEOUT); + + wlan_cfg_ctx->wbm_release_ring = cfg_get(psoc, + CFG_DP_WBM_RELEASE_RING); + wlan_cfg_ctx->tcl_cmd_ring = cfg_get(psoc, + CFG_DP_TCL_CMD_RING); + wlan_cfg_ctx->tcl_status_ring = cfg_get(psoc, + CFG_DP_TCL_STATUS_RING); + wlan_cfg_ctx->reo_reinject_ring = cfg_get(psoc, + CFG_DP_REO_REINJECT_RING); + wlan_cfg_ctx->rx_release_ring = cfg_get(psoc, + CFG_DP_RX_RELEASE_RING); + wlan_cfg_ctx->reo_exception_ring = cfg_get(psoc, + CFG_DP_REO_EXCEPTION_RING); + wlan_cfg_ctx->reo_cmd_ring = cfg_get(psoc, + CFG_DP_REO_CMD_RING); + wlan_cfg_ctx->reo_status_ring = cfg_get(psoc, + CFG_DP_REO_STATUS_RING); + wlan_cfg_ctx->rxdma_refill_ring = cfg_get(psoc, + CFG_DP_RXDMA_REFILL_RING); + wlan_cfg_ctx->tx_desc_limit_0 = cfg_get(psoc, + CFG_DP_TX_DESC_LIMIT_0); + wlan_cfg_ctx->tx_desc_limit_1 = cfg_get(psoc, + CFG_DP_TX_DESC_LIMIT_1); + wlan_cfg_ctx->tx_desc_limit_2 = cfg_get(psoc, + CFG_DP_TX_DESC_LIMIT_2); + wlan_cfg_ctx->tx_device_limit = cfg_get(psoc, + CFG_DP_TX_DEVICE_LIMIT); + wlan_cfg_ctx->tx_sw_internode_queue = cfg_get(psoc, + CFG_DP_TX_SW_INTERNODE_QUEUE); + wlan_cfg_ctx->rxdma_err_dst_ring = cfg_get(psoc, + CFG_DP_RXDMA_ERR_DST_RING); + wlan_cfg_ctx->enable_data_stall_detection = + cfg_get(psoc, CFG_DP_ENABLE_DATA_STALL_DETECTION); + wlan_cfg_ctx->enable_force_rx_64_ba = + cfg_get(psoc, CFG_FORCE_RX_64_BA); + wlan_cfg_ctx->tx_flow_start_queue_offset = + cfg_get(psoc, CFG_DP_TX_FLOW_START_QUEUE_OFFSET); + wlan_cfg_ctx->tx_flow_stop_queue_threshold = + cfg_get(psoc, CFG_DP_TX_FLOW_STOP_QUEUE_TH); + wlan_cfg_ctx->disable_intra_bss_fwd = + cfg_get(psoc, CFG_DP_AP_STA_SECURITY_SEPERATION); + wlan_cfg_ctx->rx_sw_desc_weight = cfg_get(psoc, + CFG_DP_RX_SW_DESC_WEIGHT); + wlan_cfg_ctx->rx_toeplitz_hash_key = (uint8_t *)rx_fst_toeplitz_key; + wlan_cfg_ctx->rx_flow_max_search = WLAN_CFG_RX_FST_MAX_SEARCH; + wlan_cfg_ctx->is_rx_flow_tag_enabled = + cfg_get(psoc, CFG_DP_RX_FLOW_TAG_ENABLE); + wlan_cfg_ctx->is_rx_flow_search_table_per_pdev = + cfg_get(psoc, CFG_DP_RX_FLOW_SEARCH_TABLE_PER_PDEV); + wlan_cfg_ctx->rx_flow_search_table_size = + cfg_get(psoc, CFG_DP_RX_FLOW_SEARCH_TABLE_SIZE); + wlan_cfg_ctx->is_rx_mon_protocol_flow_tag_enabled = + cfg_get(psoc, CFG_DP_RX_MON_PROTOCOL_FLOW_TAG_ENABLE); + wlan_cfg_ctx->mon_drop_thresh = + cfg_get(psoc, CFG_DP_RXDMA_MONITOR_RX_DROP_THRESHOLD); + wlan_cfg_ctx->is_rx_fisa_enabled = cfg_get(psoc, CFG_DP_RX_FISA_ENABLE); + wlan_cfg_ctx->rx_pending_high_threshold = + cfg_get(psoc, CFG_DP_RX_PENDING_HL_THRESHOLD); + wlan_cfg_ctx->rx_pending_low_threshold = + cfg_get(psoc, CFG_DP_RX_PENDING_LO_THRESHOLD); + wlan_cfg_ctx->wow_check_rx_pending_enable = + cfg_get(psoc, CFG_DP_WOW_CHECK_RX_PENDING); + + return wlan_cfg_ctx; +} + +void wlan_cfg_soc_detach(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx) +{ + qdf_mem_free(wlan_cfg_ctx); +} + +struct wlan_cfg_dp_pdev_ctxt * +wlan_cfg_pdev_attach(struct cdp_ctrl_objmgr_psoc *psoc) +{ + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx = + qdf_mem_malloc(sizeof(struct wlan_cfg_dp_pdev_ctxt)); + + if (!wlan_cfg_ctx) + return NULL; + + wlan_cfg_ctx->rx_dma_buf_ring_size = cfg_get(psoc, + CFG_DP_RXDMA_BUF_RING); + wlan_cfg_ctx->dma_mon_buf_ring_size = cfg_get(psoc, + CFG_DP_RXDMA_MONITOR_BUF_RING); + wlan_cfg_ctx->dma_mon_dest_ring_size = cfg_get(psoc, + CFG_DP_RXDMA_MONITOR_DST_RING); + wlan_cfg_ctx->dma_mon_status_ring_size = cfg_get(psoc, + CFG_DP_RXDMA_MONITOR_STATUS_RING); + wlan_cfg_ctx->rxdma_monitor_desc_ring = cfg_get(psoc, + CFG_DP_RXDMA_MONITOR_DESC_RING); + wlan_cfg_ctx->num_mac_rings = NUM_RXDMA_RINGS_PER_PDEV; + + return wlan_cfg_ctx; +} + +void wlan_cfg_pdev_detach(struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx) +{ + if (wlan_cfg_ctx) + qdf_mem_free(wlan_cfg_ctx); +} + +int wlan_cfg_get_mon_drop_thresh(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->mon_drop_thresh; +} + +void wlan_cfg_set_num_contexts(struct wlan_cfg_dp_soc_ctxt *cfg, int num) +{ + cfg->num_int_ctxts = num; +} + +void wlan_cfg_set_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg, uint32_t val) +{ + cfg->max_peer_id = val; +} + +void wlan_cfg_set_max_ast_idx(struct wlan_cfg_dp_soc_ctxt *cfg, uint32_t val) +{ + cfg->max_ast_idx = val; +} + +int wlan_cfg_get_max_ast_idx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->max_ast_idx; +} + +void wlan_cfg_set_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_tx_ring_mask[context] = mask; +} + +void wlan_cfg_set_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rx_ring_mask[context] = mask; +} + +void wlan_cfg_set_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rx_mon_ring_mask[context] = mask; +} + +int wlan_cfg_get_host2rxdma_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_host2rxdma_mon_ring_mask[context]; +} + +void wlan_cfg_set_host2rxdma_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_host2rxdma_mon_ring_mask[context] = mask; +} + +int wlan_cfg_get_rxdma2host_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rxdma2host_mon_ring_mask[context]; +} + +void wlan_cfg_set_rxdma2host_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rxdma2host_mon_ring_mask[context] = mask; +} + +void wlan_cfg_set_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rxdma2host_ring_mask[context] = mask; +} + +int wlan_cfg_get_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rxdma2host_ring_mask[context]; +} + +void wlan_cfg_set_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_host2rxdma_ring_mask[context] = mask; +} + +int wlan_cfg_get_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_host2rxdma_ring_mask[context]; +} + +void wlan_cfg_set_hw_mac_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx, + int hw_macid) +{ + qdf_assert_always(pdev_idx < MAX_PDEV_CNT); + cfg->hw_macid[pdev_idx] = hw_macid; +} + +int wlan_cfg_get_hw_mac_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx) +{ + qdf_assert_always(pdev_idx < MAX_PDEV_CNT); + return cfg->hw_macid[pdev_idx]; +} + +int wlan_cfg_get_target_pdev_id(struct wlan_cfg_dp_soc_ctxt *cfg, + int hw_macid) +{ + int idx; + + for (idx = 0; idx < MAX_PDEV_CNT; idx++) { + if (cfg->hw_macid[idx] == hw_macid) + return (idx + 1); + } + qdf_assert_always(idx < MAX_PDEV_CNT); + return WLAN_INVALID_PDEV_ID; +} + +void wlan_cfg_set_pdev_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx, + int hw_macid) +{ + qdf_assert_always(pdev_idx < MAX_PDEV_CNT); + qdf_assert_always(hw_macid < MAX_NUM_LMAC_HW); + + cfg->hw_macid_pdev_id_map[hw_macid] = pdev_idx; +} + +int wlan_cfg_get_pdev_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int hw_macid) +{ + qdf_assert_always(hw_macid < MAX_NUM_LMAC_HW); + return cfg->hw_macid_pdev_id_map[hw_macid]; +} + +void wlan_cfg_set_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_ce_ring_mask[context] = mask; +} + +void wlan_cfg_set_rxbuf_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context, + int mask) +{ + cfg->int_rx_ring_mask[context] = mask; +} + +int wlan_cfg_set_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + return cfg->int_rx_err_ring_mask[context] = mask; +} + +int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + return cfg->int_rx_wbm_rel_ring_mask[context] = mask; +} + +int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + return cfg->int_reo_status_ring_mask[context] = mask; +} + +int wlan_cfg_get_num_contexts(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_int_ctxts; +} + +int wlan_cfg_get_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_tx_ring_mask[context]; +} + +int wlan_cfg_get_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_rx_ring_mask[context]; +} + +int wlan_cfg_get_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rx_err_ring_mask[context]; +} + +int wlan_cfg_get_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rx_wbm_rel_ring_mask[context]; +} + +int wlan_cfg_get_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_reo_status_ring_mask[context]; +} + +int wlan_cfg_get_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_rx_mon_ring_mask[context]; +} + +int wlan_cfg_get_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_ce_ring_mask[context]; +} + +uint32_t wlan_cfg_get_max_clients(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->max_clients; +} + +uint32_t wlan_cfg_max_alloc_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->max_alloc_size; +} + +int wlan_cfg_per_pdev_tx_ring(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->per_pdev_tx_ring; +} + +uint32_t +wlan_cfg_rx_pending_hl_threshold(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_pending_high_threshold; +} + +uint32_t +wlan_cfg_rx_pending_lo_threshold(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_pending_low_threshold; +} + +int wlan_cfg_per_pdev_lmac_ring(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->per_pdev_lmac_ring; +} + +int wlan_cfg_num_tcl_data_rings(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tcl_data_rings; +} + +int wlan_cfg_tx_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_ring_size; +} + +int wlan_cfg_tx_comp_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_comp_ring_size; +} + +int wlan_cfg_per_pdev_rx_ring(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->per_pdev_rx_ring; +} + +int wlan_cfg_num_reo_dest_rings(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_reo_dest_rings; +} + +int wlan_cfg_pkt_type(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->htt_packet_type; /*htt_pkt_type_ethernet*/ +} + +int wlan_cfg_get_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_desc_pool; +} + +void wlan_cfg_set_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool) +{ + cfg->num_tx_desc_pool = num_pool; +} + +int wlan_cfg_get_num_tx_ext_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_ext_desc_pool; +} + +void wlan_cfg_set_num_tx_ext_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool) +{ + cfg->num_tx_ext_desc_pool = num_pool; +} + +int wlan_cfg_get_reo_dst_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->reo_dst_ring_size; +} + +void wlan_cfg_set_reo_dst_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg, + int reo_dst_ring_size) +{ + cfg->reo_dst_ring_size = reo_dst_ring_size; +} + +void wlan_cfg_set_raw_mode_war(struct wlan_cfg_dp_soc_ctxt *cfg, + bool raw_mode_war) +{ + cfg->raw_mode_war = raw_mode_war; +} + +bool wlan_cfg_get_raw_mode_war(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->raw_mode_war; +} + +int wlan_cfg_get_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_desc; +} + +void wlan_cfg_set_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_desc) +{ + cfg->num_tx_desc = num_desc; +} + +int wlan_cfg_get_min_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->min_tx_desc; +} + +int wlan_cfg_get_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_ext_desc; +} + +void wlan_cfg_set_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_ext_desc) +{ + cfg->num_tx_ext_desc = num_ext_desc; +} + +uint32_t wlan_cfg_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + /* TODO: This should be calculated based on target capabilities */ + return cfg->max_peer_id; +} + +int wlan_cfg_get_dma_mon_buf_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->dma_mon_buf_ring_size; +} + +int wlan_cfg_get_dma_mon_dest_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->dma_mon_dest_ring_size; +} + +int wlan_cfg_get_dma_mon_stat_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->dma_mon_status_ring_size; +} + +int +wlan_cfg_get_dma_mon_desc_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->rxdma_monitor_desc_ring; +} + +int wlan_cfg_get_rx_dma_buf_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->rx_dma_buf_ring_size; +} + +int wlan_cfg_get_num_mac_rings(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->num_mac_rings; +} + +bool wlan_cfg_is_gro_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->gro_enabled; +} + +bool wlan_cfg_is_lro_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->lro_enabled; +} + +bool wlan_cfg_is_ipa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->ipa_enabled; +} + +void wlan_cfg_set_rx_hash(struct wlan_cfg_dp_soc_ctxt *cfg, bool val) +{ + cfg->rx_hash = val; +} + +bool wlan_cfg_is_rx_hash_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_hash; +} + +int wlan_cfg_get_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->nss_enabled; +} + +void wlan_cfg_set_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg, int nss_enabled) +{ + cfg->nss_enabled = nss_enabled; +} + +int wlan_cfg_get_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->nss_cfg; +} + +void wlan_cfg_set_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg, int nss_cfg) +{ + cfg->nss_cfg = nss_cfg; + if (cfg->nss_cfg) + cfg->tx_comp_ring_size = cfg->tx_comp_ring_size_nss; +} + +int wlan_cfg_get_int_batch_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_batch_threshold_tx; +} + +int wlan_cfg_get_int_timer_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_tx; +} + +int wlan_cfg_get_int_batch_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_batch_threshold_rx; +} + +int wlan_cfg_get_int_timer_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_rx; +} + +int wlan_cfg_get_int_batch_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_batch_threshold_other; +} + +int wlan_cfg_get_int_timer_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_other; +} + +int wlan_cfg_get_int_timer_threshold_mon(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_mon; +} + +int wlan_cfg_get_p2p_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->p2p_tcp_udp_checksumoffload; +} + +int wlan_cfg_get_nan_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->nan_tcp_udp_checksumoffload; +} + +int wlan_cfg_get_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tcp_udp_checksumoffload; +} + +int wlan_cfg_get_rx_defrag_min_timeout(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_defrag_min_timeout; +} + +int wlan_cfg_get_defrag_timeout_check(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->defrag_timeout_check; +} + +int +wlan_cfg_get_dp_soc_wbm_release_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->wbm_release_ring; +} + +int +wlan_cfg_get_dp_soc_tcl_cmd_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tcl_cmd_ring; +} + +int +wlan_cfg_get_dp_soc_tcl_status_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tcl_status_ring; +} + +int +wlan_cfg_get_dp_soc_reo_reinject_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->reo_reinject_ring; +} + +int +wlan_cfg_get_dp_soc_rx_release_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_release_ring; +} + +int +wlan_cfg_get_dp_soc_reo_exception_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->reo_exception_ring; +} + +int +wlan_cfg_get_dp_soc_reo_cmd_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->reo_cmd_ring; +} + +int +wlan_cfg_get_dp_soc_reo_status_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->reo_status_ring; +} + +int +wlan_cfg_get_dp_soc_rxdma_refill_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rxdma_refill_ring; +} + +int +wlan_cfg_get_dp_soc_tx_desc_limit_0(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_desc_limit_0; +} + +int +wlan_cfg_get_dp_soc_tx_desc_limit_1(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_desc_limit_1; +} + +int +wlan_cfg_get_dp_soc_tx_desc_limit_2(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_desc_limit_2; +} + +int +wlan_cfg_get_dp_soc_tx_device_limit(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_device_limit; +} + +int +wlan_cfg_get_dp_soc_tx_sw_internode_queue(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_sw_internode_queue; +} + +int +wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rxdma_err_dst_ring; +} + +int +wlan_cfg_get_dp_soc_rx_sw_desc_weight(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_sw_desc_weight; +} + +bool +wlan_cfg_get_dp_caps(struct wlan_cfg_dp_soc_ctxt *cfg, + enum cdp_capabilities dp_caps) +{ + switch (dp_caps) { + case CDP_CFG_DP_TSO: + return cfg->tso_enabled; + case CDP_CFG_DP_LRO: + return cfg->lro_enabled; + case CDP_CFG_DP_SG: + return cfg->sg_enabled; + case CDP_CFG_DP_GRO: + return cfg->gro_enabled; + case CDP_CFG_DP_OL_TX_CSUM: + return cfg->ol_tx_csum_enabled; + case CDP_CFG_DP_OL_RX_CSUM: + return cfg->ol_rx_csum_enabled; + case CDP_CFG_DP_RAWMODE: + return cfg->rawmode_enabled; + case CDP_CFG_DP_PEER_FLOW_CTRL: + return cfg->peer_flow_ctrl_enabled; + default: + return false; + } +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * wlan_cfg_get_tx_flow_stop_queue_th() - Get flow control stop threshold + * @cfg: config context + * + * Return: stop threshold + */ +int wlan_cfg_get_tx_flow_stop_queue_th(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_flow_stop_queue_threshold; +} + +/** + * wlan_cfg_get_tx_flow_start_queue_offset() - Get flow control start offset + * for TX to resume + * @cfg: config context + * + * Return: stop threshold + */ +int wlan_cfg_get_tx_flow_start_queue_offset(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_flow_start_queue_offset; +} +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +void wlan_cfg_set_rx_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val) +{ + cfg->is_rx_flow_tag_enabled = val; +} + +uint8_t *wlan_cfg_rx_fst_get_hash_key(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_toeplitz_hash_key; +} + +uint8_t wlan_cfg_rx_fst_get_max_search(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_flow_max_search; +} + +bool wlan_cfg_is_rx_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->is_rx_flow_tag_enabled; +} + +#ifdef WLAN_SUPPORT_RX_FISA +bool wlan_cfg_is_rx_fisa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return (bool)(cfg->is_rx_fisa_enabled); +} +#else +bool wlan_cfg_is_rx_fisa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return false; +} +#endif + +void +wlan_cfg_set_rx_flow_search_table_per_pdev(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val) +{ + cfg->is_rx_flow_search_table_per_pdev = val; +} + +bool wlan_cfg_is_rx_flow_search_table_per_pdev(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->is_rx_flow_search_table_per_pdev; +} + +void wlan_cfg_set_rx_flow_search_table_size(struct wlan_cfg_dp_soc_ctxt *cfg, + uint16_t val) +{ + cfg->rx_flow_search_table_size = val; +} + +uint16_t +wlan_cfg_get_rx_flow_search_table_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_flow_search_table_size; +} + +void +wlan_cfg_set_rx_mon_protocol_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val) +{ + cfg->is_rx_mon_protocol_flow_tag_enabled = val; +} + +bool +wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->is_rx_mon_protocol_flow_tag_enabled; +} + +bool wlan_cfg_is_dp_force_rx_64_ba(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->enable_force_rx_64_ba; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.h b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..7fd657610af65d07597eca7acd4fe8169f86d686 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.h @@ -0,0 +1,1350 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_CFG_H +#define __WLAN_CFG_H + +/* + * Temporary place holders. These should come either from target config + * or platform configuration + */ +#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) +#define WLAN_CFG_DST_RING_CACHED_DESC 0 +#define MAX_PDEV_CNT 1 +#define WLAN_CFG_INT_NUM_CONTEXTS 7 +#define WLAN_CFG_RXDMA1_ENABLE 1 +/* + * This mask defines how many transmit frames account for 1 NAPI work unit + * 0 means each tx completion is 1 unit + */ +#define DP_TX_NAPI_BUDGET_DIV_MASK 0 + +/* PPDU Stats Configuration - Configure bitmask for enabling tx ppdu tlv's */ +#define DP_PPDU_TXLITE_STATS_BITMASK_CFG 0x3FFF + +#define NUM_RXDMA_RINGS_PER_PDEV 2 + +/*Maximum Number of LMAC instances*/ +#define MAX_NUM_LMAC_HW 2 +#else +#define WLAN_CFG_DST_RING_CACHED_DESC 1 +#define MAX_PDEV_CNT 3 +#define WLAN_CFG_INT_NUM_CONTEXTS 11 +#define NUM_RXDMA_RINGS_PER_PDEV 1 +#define MAX_NUM_LMAC_HW 3 + +#endif + +#define WLAN_CFG_INT_NUM_CONTEXTS_MAX 11 + +/* Tx configuration */ +#define MAX_LINK_DESC_BANKS 8 +#define MAX_TXDESC_POOLS 4 +#define MAX_TCL_DATA_RINGS 4 + +/* Rx configuration */ +#define MAX_RXDESC_POOLS 4 +#define MAX_REO_DEST_RINGS 4 +#define MAX_RX_MAC_RINGS 2 + +/* DP process status */ +#if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1) +#define CONFIG_PROCESS_RX_STATUS 1 +#define CONFIG_PROCESS_TX_STATUS 1 +#else +#define CONFIG_PROCESS_RX_STATUS 0 +#define CONFIG_PROCESS_TX_STATUS 0 +#endif + +/* Miscellaneous configuration */ +#define MAX_IDLE_SCATTER_BUFS 16 +#define DP_MAX_IRQ_PER_CONTEXT 12 +#define MAX_HTT_METADATA_LEN 32 +#define MAX_NUM_PEER_ID_PER_PEER 8 +#define DP_MAX_TIDS 17 +#define DP_NON_QOS_TID 16 +#define DP_NULL_DATA_TID 17 + +#define WLAN_CFG_RX_FST_MAX_SEARCH 2 +#define WLAN_CFG_RX_FST_TOEPLITZ_KEYLEN 40 + +struct wlan_cfg_dp_pdev_ctxt; + +/** + * struct wlan_srng_cfg - Per ring configuration parameters + * @timer_threshold: Config to control interrupts based on timer duration + * @batch_count_threshold: Config to control interrupts based on + * number of packets in the ring + * @low_threshold: Config to control low threshold interrupts for SRC rings + */ +struct wlan_srng_cfg { + uint32_t timer_threshold; + uint32_t batch_count_threshold; + uint32_t low_threshold; +}; + +/** + * struct wlan_cfg_dp_soc_ctxt - Configuration parameters for SoC (core TxRx) + * @num_int_ctxts: Number of NAPI/Interrupt contexts to be registered for DP + * @max_clients: Maximum number of peers/stations supported by device + * @max_alloc_size: Maximum allocation size for any dynamic memory + * allocation request for this device + * @per_pdev_tx_ring: 0: TCL ring is not mapped per radio + * 1: Each TCL ring is mapped to one radio/pdev + * @num_tcl_data_rings: Number of TCL Data rings supported by device + * @per_pdev_rx_ring: 0: REO ring is not mapped per radio + * 1: Each REO ring is mapped to one radio/pdev + * @num_tx_desc_pool: Number of Tx Descriptor pools + * @num_tx_ext_desc_pool: Number of Tx MSDU extension Descriptor pools + * @num_tx_desc: Number of Tx Descriptors per pool + * @min_tx_desc: Minimum number of Tx Descriptors per pool + * @num_tx_ext_desc: Number of Tx MSDU extension Descriptors per pool + * @max_peer_id: Maximum value of peer id that FW can assign for a client + * @htt_packet_type: Default 802.11 encapsulation type for any VAP created + * @int_tx_ring_mask: Bitmap of Tx interrupts mapped to each NAPI/Intr context + * @int_rx_ring_mask: Bitmap of Rx interrupts mapped to each NAPI/Intr context + * @int_rx_mon_ring_mask: Bitmap of Rx monitor ring interrupts mapped to each + * NAPI/Intr context + * @int_rx_err_ring_mask: Bitmap of Rx err ring interrupts mapped to each + * NAPI/Intr context + * @int_wbm_rel_ring_mask: Bitmap of wbm rel ring interrupts mapped to each + * NAPI/Intr context + * @int_reo_status_ring_mask: Bitmap of reo status ring interrupts mapped to + * each NAPI/Intr context + * @int_ce_ring_mask: Bitmap of CE interrupts mapped to each NAPI/Intr context + * @lro_enabled: enable/disable lro feature + * @rx_hash: Enable hash based steering of rx packets + * @tso_enabled: enable/disable tso feature + * @lro_enabled: enable/disable LRO feature + * @sg_enabled: enable disable scatter gather feature + * @gro_enabled: enable disable GRO feature + * @tc_based_dynamic_gro: enable/disable tc based dynamic gro + * @tc_ingress_prio: ingress prio to be checked for dynamic gro + * @ipa_enabled: Flag indicating if IPA is enabled + * @ol_tx_csum_enabled: Flag indicating if TX csum is enabled + * @ol_rx_csum_enabled: Flag indicating if Rx csum is enabled + * @rawmode_enabled: Flag indicating if RAW mode is enabled + * @peer_flow_ctrl_enabled: Flag indicating if peer flow control is enabled + * @napi_enabled: enable/disable interrupt mode for reaping tx and rx packets + * @p2p_tcp_udp_checksumoffload: enable/disable checksum offload for P2P mode + * @nan_tcp_udp_checksumoffload: enable/disable checksum offload for NAN mode + * @tcp_udp_checksumoffload: enable/disable checksum offload + * @nss_cfg: nss configuration + * @rx_defrag_min_timeout: rx defrag minimum timeout + * @wbm_release_ring: wbm release ring size + * @tcl_cmd_ring: tcl cmd ring size + * @tcl_status_ring: tcl status ring size + * @reo_reinject_ring: reo reinject ring + * @rx_release_ring: rx release ring size + * @reo_exception_ring: reo exception ring size + * @reo_cmd_ring: reo cmd ring size + * @reo_status_ring: reo status ting size + * @rxdma_refill_ring: rxdma refill ring size + * @rxdma_err_dst_ring: rxdma error detination ring size + * @raw_mode_war: enable/disable raw mode war + * @enable_data_stall_detection: flag to enable data stall detection + * @enable_force_rx_64_ba: flag to enable force 64 blockack in RX + * @disable_intra_bss_fwd: flag to disable intra bss forwarding + * @rxdma1_enable: flag to indicate if rxdma1 is enabled + * @tx_desc_limit_0: tx_desc limit for 5G H + * @tx_desc_limit_1: tx_desc limit for 2G + * @tx_desc_limit_2: tx_desc limit for 5G L + * @tx_device_limit: tx device limit + * @tx_sw_internode_queue: tx sw internode queue + * @tx_comp_loop_pkt_limit: Max # of packets to be processed in 1 tx comp loop + * @rx_reap_loop_pkt_limit: Max # of packets to be processed in 1 rx reap loop + * @rx_hp_oos_update_limit: Max # of HP OOS (out of sync) updates + * @rx_enable_eol_data_check: flag to enable check for more ring data at end of + * dp_rx_process loop + * @tx_comp_enable_eol_data_check: flag to enable/disable checking for more data + * at end of tx_comp_handler loop. + * @rx_sw_desc_weight: rx sw descriptor weight configuration + * @is_rx_mon_protocol_flow_tag_enabled: flag to enable/disable RX protocol or + * flow tagging in monitor/mon-lite mode + * @is_rx_flow_tag_enabled: flag to enable/disable RX flow tagging using FSE + * @is_rx_flow_search_table_per_pdev: flag to indicate if a per-SOC or per-pdev + * table should be used + * @rx_flow_search_table_size: indicates the number of flows in the flow search + * table + * @rx_flow_max_search: max skid length for each hash entry + * @rx_toeplitz_hash_key: toeplitz key pointer used for hash computation over + * 5 tuple flow entry + * @pktlog_buffer_size: packet log buffer size + * @is_rx_fisa_enabled: flag to enable/disable FISA Rx + * @rx_pending_high_threshold: threshold of starting pkt drop + * @rx_pending_low_threshold: threshold of stopping pkt drop + * @wow_check_rx_pending_enable: Enable RX frame pending check in WoW + */ +struct wlan_cfg_dp_soc_ctxt { + int num_int_ctxts; + int max_clients; + int max_alloc_size; + int per_pdev_tx_ring; + int num_tcl_data_rings; + int per_pdev_rx_ring; + int per_pdev_lmac_ring; + int num_reo_dest_rings; + int num_tx_desc_pool; + int num_tx_ext_desc_pool; + int num_tx_desc; + int min_tx_desc; + int num_tx_ext_desc; + int max_peer_id; + int htt_packet_type; + int int_batch_threshold_tx; + int int_timer_threshold_tx; + int int_batch_threshold_rx; + int int_timer_threshold_rx; + int int_batch_threshold_other; + int int_timer_threshold_other; + int int_timer_threshold_mon; + int tx_ring_size; + int tx_comp_ring_size; + int tx_comp_ring_size_nss; + int int_tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_host2rxdma_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rxdma2host_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_ce_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rxdma2host_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_host2rxdma_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int hw_macid[MAX_PDEV_CNT]; + int hw_macid_pdev_id_map[MAX_NUM_LMAC_HW]; + int base_hw_macid; + bool rx_hash; + bool tso_enabled; + bool lro_enabled; + bool sg_enabled; + bool gro_enabled; + bool tc_based_dynamic_gro; + uint32_t tc_ingress_prio; + bool ipa_enabled; + bool ol_tx_csum_enabled; + bool ol_rx_csum_enabled; + bool rawmode_enabled; + bool peer_flow_ctrl_enabled; + bool napi_enabled; + bool p2p_tcp_udp_checksumoffload; + bool nan_tcp_udp_checksumoffload; + bool tcp_udp_checksumoffload; + bool legacy_mode_checksumoffload_disable; + bool defrag_timeout_check; + int nss_cfg; + uint32_t tx_flow_stop_queue_threshold; + uint32_t tx_flow_start_queue_offset; + int rx_defrag_min_timeout; + int reo_dst_ring_size; + int wbm_release_ring; + int tcl_cmd_ring; + int tcl_status_ring; + int reo_reinject_ring; + int rx_release_ring; + int reo_exception_ring; + int reo_cmd_ring; + int reo_status_ring; + int rxdma_refill_ring; + int rxdma_err_dst_ring; + uint32_t per_pkt_trace; + bool raw_mode_war; + bool enable_data_stall_detection; + bool enable_force_rx_64_ba; + bool disable_intra_bss_fwd; + bool rxdma1_enable; + int max_ast_idx; + int tx_desc_limit_0; + int tx_desc_limit_1; + int tx_desc_limit_2; + int tx_device_limit; + int tx_sw_internode_queue; + int mon_drop_thresh; +#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT + uint32_t tx_comp_loop_pkt_limit; + uint32_t rx_reap_loop_pkt_limit; + uint32_t rx_hp_oos_update_limit; + bool rx_enable_eol_data_check; + bool tx_comp_enable_eol_data_check; +#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */ + int rx_sw_desc_weight; + bool is_rx_mon_protocol_flow_tag_enabled; + bool is_rx_flow_tag_enabled; + bool is_rx_flow_search_table_per_pdev; + uint16_t rx_flow_search_table_size; + uint16_t rx_flow_max_search; + uint8_t *rx_toeplitz_hash_key; + uint8_t pktlog_buffer_size; + uint8_t is_rx_fisa_enabled; + uint32_t rx_pending_high_threshold; + uint32_t rx_pending_low_threshold; + bool wow_check_rx_pending_enable; +}; + +/** + * struct wlan_cfg_dp_pdev_ctxt - Configuration parameters for pdev (radio) + * @rx_dma_buf_ring_size - Size of RxDMA buffer ring + * @dma_mon_buf_ring_size - Size of RxDMA Monitor buffer ring + * @dma_mon_dest_ring_size - Size of RxDMA Monitor Destination ring + * @dma_mon_status_ring_size - Size of RxDMA Monitor Status ring + * @rxdma_monitor_desc_ring - rxdma monitor desc ring size + */ +struct wlan_cfg_dp_pdev_ctxt { + int rx_dma_buf_ring_size; + int dma_mon_buf_ring_size; + int dma_mon_dest_ring_size; + int dma_mon_status_ring_size; + int rxdma_monitor_desc_ring; + int num_mac_rings; + int nss_enabled; +}; + +/** + * wlan_cfg_soc_attach() - Attach configuration interface for SoC + * @ctrl_obj - PSOC object + * + * Allocates context for Soc configuration parameters, + * Read configuration information from device tree/ini file and + * returns back handle + * + * Return: Handle to configuration context + */ +struct wlan_cfg_dp_soc_ctxt * +wlan_cfg_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_obj); + +/** + * wlan_cfg_soc_detach() - Detach soc configuration handle + * @wlan_cfg_ctx: soc configuration handle + * + * De-allocates memory allocated for SoC configuration + * + * Return:none + */ +void wlan_cfg_soc_detach(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/** + * wlan_cfg_pdev_attach() Attach configuration interface for pdev + * @ctrl_obj - PSOC object + * + * Allocates context for pdev configuration parameters, + * Read configuration information from device tree/ini file and + * returns back handle + * + * Return: Handle to configuration context + */ +struct wlan_cfg_dp_pdev_ctxt * +wlan_cfg_pdev_attach(struct cdp_ctrl_objmgr_psoc *ctrl_obj); + +/** + * wlan_cfg_pdev_detach() Detach and free pdev configuration handle + * @wlan_cfg_pdev_ctx - PDEV Configuration Handle + * + * Return: void + */ +void wlan_cfg_pdev_detach(struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +void wlan_cfg_set_num_contexts(struct wlan_cfg_dp_soc_ctxt *cfg, int num); +void wlan_cfg_set_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_rxbuf_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context, + int mask); +void wlan_cfg_set_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg, uint32_t val); +void wlan_cfg_set_max_ast_idx(struct wlan_cfg_dp_soc_ctxt *cfg, uint32_t val); +int wlan_cfg_get_max_ast_idx(struct wlan_cfg_dp_soc_ctxt *cfg); +int wlan_cfg_get_mon_drop_thresh(struct wlan_cfg_dp_soc_ctxt *cfg); +int wlan_cfg_set_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +/** + * wlan_cfg_get_num_contexts() - Number of interrupt contexts to be registered + * @wlan_cfg_ctx - Configuration Handle + * + * For WIN, DP_NUM_INTERRUPT_CONTEXTS will be equal to number of CPU cores. + * Each context (for linux it is a NAPI context) will have a tx_ring_mask, + * rx_ring_mask ,and rx_monitor_ring mask to indicate the rings + * that are processed by the handler. + * + * Return: num_contexts + */ +int wlan_cfg_get_num_contexts(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/** + * wlan_cfg_get_tx_ring_mask() - Return Tx interrupt mask mapped to an + * interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_tx_ring_mask[context] + */ +int wlan_cfg_get_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_get_rx_ring_mask() - Return Rx interrupt mask mapped to an + * interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rx_ring_mask[context] + */ +int wlan_cfg_get_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_get_rx_mon_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rx_mon_ring_mask[context] + */ +int wlan_cfg_get_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_set_rxdma2host_ring_mask() - Set rxdma2host ring interrupt mask + * for the given interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + */ +void wlan_cfg_set_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); + +/** + * wlan_cfg_get_rxdma2host_ring_mask() - Return rxdma2host ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rxdma2host_ring_mask[context] + */ +int wlan_cfg_get_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context); + +/** + * wlan_cfg_set_host2rxdma_ring_mask() - Set host2rxdma ring interrupt mask + * for the given interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + */ +void wlan_cfg_set_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); + +/** + * wlan_cfg_get_host2rxdma_ring_mask() - Return host2rxdma ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_host2rxdma_ring_mask[context] + */ +int wlan_cfg_get_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context); + +/** + * wlan_cfg_set_host2rxdma_mon_ring_mask() - Set host2rxdma monitor ring + * interrupt mask for the given interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + */ +void wlan_cfg_set_host2rxdma_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); + +/** + * wlan_cfg_get_host2rxdma_mon_ring_mask() - Return host2rxdma monitoe ring + * interrupt mask mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_host2rxdma_mon_ring_mask[context] + */ +int wlan_cfg_get_host2rxdma_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context); + +/** + * wlan_cfg_set_rxdma2host_mon_ring_mask() - Set rxdma2host monitor + * destination ring interrupt mask + * for the given interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + */ +void wlan_cfg_set_rxdma2host_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); + +/** + * wlan_cfg_get_rxdma2host_mon_ring_mask() - Return rxdma2host monitor + * destination ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rxdma2host_mon_ring_mask[context] + */ +int wlan_cfg_get_rxdma2host_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context); + +/** + * wlan_cfg_set_hw_macidx() - Set HW MAC Idx for the given PDEV index + * + * @wlan_cfg_ctx - Configuration Handle + * @pdev_idx - Index of SW PDEV + * @hw_macid - HW MAC Id + * + */ +void wlan_cfg_set_hw_mac_idx + (struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx, int hw_macid); + +/** + * wlan_cfg_get_hw_mac_idx() - Get 0 based HW MAC index for the given + * PDEV index + * + * @wlan_cfg_ctx - Configuration Handle + * @pdev_idx - Index of SW PDEV + * + * Return: HW MAC index + */ +int wlan_cfg_get_hw_mac_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx); + +/** + * wlan_cfg_get_target_pdev_id() - Get target PDEV ID for HW MAC ID + * + * @wlan_cfg_ctx - Configuration Handle + * @hw_macid - Index of hw mac + * + * Return: PDEV ID + */ +int +wlan_cfg_get_target_pdev_id(struct wlan_cfg_dp_soc_ctxt *cfg, int hw_macid); + +/** + * wlan_cfg_set_pdev_idx() - Set 0 based host PDEV index for the given + * hw mac index + * + * @wlan_cfg_ctx - Configuration Handle + * @pdev_idx - Index of SW PDEV + * @hw_macid - Index of hw mac + * + * Return: PDEV index + */ +void wlan_cfg_set_pdev_idx + (struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx, int hw_macid); + +/** + * wlan_cfg_get_pdev_idx() - Get 0 based PDEV index for the given + * hw mac index + * + * @wlan_cfg_ctx - Configuration Handle + * @hw_macid - Index of hw mac + * + * Return: PDEV index + */ +int wlan_cfg_get_pdev_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int hw_macid); + +/** + * wlan_cfg_get_rx_err_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rx_err_ring_mask[context] + */ +int wlan_cfg_get_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int + context); + +/** + * wlan_cfg_get_rx_wbm_rel_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_wbm_rel_ring_mask[context] + */ +int wlan_cfg_get_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int + context); + +/** + * wlan_cfg_get_reo_status_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_reo_status_ring_mask[context] + */ +int wlan_cfg_get_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int + context); + +/** + * wlan_cfg_get_ce_ring_mask() - Return CE ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_ce_ring_mask[context] + */ +int wlan_cfg_get_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_get_max_clients() - Return maximum number of peers/stations + * supported by device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: max_clients + */ +uint32_t wlan_cfg_get_max_clients(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/** + * wlan_cfg_max_alloc_size() - Return Maximum allocation size for any dynamic + * memory allocation request for this device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: max_alloc_size + */ +uint32_t wlan_cfg_max_alloc_size(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_per_pdev_tx_ring() - Return true if Tx rings are mapped as + * one per radio + * @wlan_cfg_ctx - Configuration Handle + * + * Return: per_pdev_tx_ring + */ +int wlan_cfg_per_pdev_tx_ring(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_num_tcl_data_rings() - Number of TCL Data rings supported by device + * @wlan_cfg_ctx + * + * Return: num_tcl_data_rings + */ +int wlan_cfg_num_tcl_data_rings(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_per_pdev_rx_ring() - Return true if Rx rings are mapped as + * one per radio + * @wlan_cfg_ctx + * + * Return: per_pdev_rx_ring + */ +int wlan_cfg_per_pdev_rx_ring(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_per_pdev_lmac_ring() - Return true if error rings are mapped as + * one per radio + * @wlan_cfg_ctx + * + * Return: return 1 if per pdev error ring else 0 + */ +int wlan_cfg_per_pdev_lmac_ring(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_num_reo_dest_rings() - Number of REO Data rings supported by device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_reo_dest_rings + */ +int wlan_cfg_num_reo_dest_rings(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_pkt_type() - Default 802.11 encapsulation type + * @wlan_cfg_ctx - Configuration Handle + * + * Return: htt_pkt_type_ethernet + */ +int wlan_cfg_pkt_type(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_get_num_tx_desc_pool() - Number of Tx Descriptor pools for the + * device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_desc_pool + */ +int wlan_cfg_get_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_desc_pool() - Set the number of Tx Descriptor pools for the + * device + * @wlan_cfg_ctx - Configuration Handle + * @num_pool - Number of pool + */ +void wlan_cfg_set_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool); + +/* + * wlan_cfg_get_num_tx_ext_desc_pool() - Number of Tx MSDU ext Descriptor + * pools + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_ext_desc_pool + */ +int wlan_cfg_get_num_tx_ext_desc_pool( + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_get_reo_dst_ring_size() - Get REO destination ring size + * + * @wlan_cfg_ctx - Configuration Handle + * + * Return: reo_dst_ring_size + */ +int wlan_cfg_get_reo_dst_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_set_num_tx_desc_pool() - Set the REO Destination ring size + * + * @wlan_cfg_ctx - Configuration Handle + * @reo_dst_ring_size - REO Destination ring size + */ +void wlan_cfg_set_reo_dst_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg, + int reo_dst_ring_size); + +/* + * wlan_cfg_set_raw_mode_war() - Set raw mode war configuration + * + * @wlan_cfg_ctx - Configuration Handle + * @raw_mode_war - raw mode war configuration + */ +void wlan_cfg_set_raw_mode_war(struct wlan_cfg_dp_soc_ctxt *cfg, + bool raw_mode_war); + +/* + * wlan_cfg_get_raw_mode_war() - Get raw mode war configuration + * + * @wlan_cfg_ctx - Configuration Handle + * + * Return: reo_dst_ring_size + */ +bool wlan_cfg_get_raw_mode_war(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_set_num_tx_ext_desc_pool() - Set the number of Tx MSDU ext Descriptor + * pools + * @wlan_cfg_ctx - Configuration Handle + * @num_pool - Number of pool + */ +void wlan_cfg_set_num_tx_ext_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool); + +/* + * wlan_cfg_get_num_tx_desc() - Number of Tx Descriptors per pool + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_desc + */ +int wlan_cfg_get_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_get_min_tx_desc() - Minimum number of Tx Descriptors per pool + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_desc + */ +int wlan_cfg_get_min_tx_desc(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_desc() - Set the number of Tx Descriptors per pool + * + * @wlan_cfg_ctx - Configuration Handle + * @num_desc: Number of descriptor + */ +void wlan_cfg_set_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_desc); + +/* + * wlan_cfg_get_num_tx_ext_desc() - Number of Tx MSDU extension Descriptors + * per pool + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_ext_desc + */ +int wlan_cfg_get_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_ext_desc() - Set the number of Tx MSDU extension Descriptors + * per pool + * @wlan_cfg_ctx - Configuration Handle + * @num_desc: Number of descriptor + */ +void wlan_cfg_set_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_ext_desc); + +/* + * wlan_cfg_max_peer_id() - Get maximum peer ID + * @cfg: Configuration Handle + * + * Return: maximum peer ID + */ +uint32_t wlan_cfg_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dma_mon_buf_ring_size() - Return Size of monitor buffer ring + * @wlan_cfg_pdev_ctx + * + * Return: dma_mon_buf_ring_size + */ +int wlan_cfg_get_dma_mon_buf_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_dma_mon_dest_ring_size() - Return Size of RxDMA Monitor + * Destination ring + * @wlan_cfg_pdev_ctx + * + * Return: dma_mon_dest_size + */ +int wlan_cfg_get_dma_mon_dest_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_dma_mon_stat_ring_size() - Return size of Monitor Status ring + * @wlan_cfg_pdev_ctx + * + * Return: dma_mon_stat_ring_size + */ +int wlan_cfg_get_dma_mon_stat_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_dma_mon_desc_ring_size - Get rxdma monitor size + * @wlan_cfg_soc_ctx + * + * Return: rxdma monitor desc ring size + */ +int +wlan_cfg_get_dma_mon_desc_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg); + +/* + * wlan_cfg_get_rx_dma_buf_ring_size() - Return Size of RxDMA buffer ring + * @wlan_cfg_pdev_ctx + * + * Return: rx_dma_buf_ring_size + */ +int wlan_cfg_get_rx_dma_buf_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_rx_pending_hl_threshold() - Return high threshold of rx pending + * @wlan_cfg_pdev_ctx + * + * Return: rx_pending_high_threshold + */ +uint32_t +wlan_cfg_rx_pending_hl_threshold(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_rx_pending_lo_threshold() - Return low threshold of rx pending + * @wlan_cfg_pdev_ctx + * + * Return: rx_pending_low_threshold + */ +uint32_t +wlan_cfg_rx_pending_lo_threshold(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_num_mac_rings() - Return the number of MAC RX DMA rings + * per pdev + * @wlan_cfg_pdev_ctx + * + * Return: number of mac DMA rings per pdev + */ +int wlan_cfg_get_num_mac_rings(struct wlan_cfg_dp_pdev_ctxt *cfg); + +/* + * wlan_cfg_is_lro_enabled - Return LRO enabled/disabled + * @wlan_cfg_dp_soc_ctxt + * + * Return: true - LRO enabled false - LRO disabled + */ +bool wlan_cfg_is_lro_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_is_gro_enabled - Return GRO enabled/disabled + * @wlan_cfg_dp_soc_ctxt + * + * Return: true - GRO enabled false - GRO disabled + */ +bool wlan_cfg_is_gro_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_is_rx_hash_enabled - Return RX hash enabled/disabled + * @wlan_cfg_dp_soc_ctxt + * + * Return: true - enabled false - disabled + */ +bool wlan_cfg_is_rx_hash_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_is_ipa_enabled - Return IPA enabled/disabled + * @wlan_cfg_dp_soc_ctxt + * + * Return: true - enabled false - disabled + */ +bool wlan_cfg_is_ipa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_set_rx_hash - set rx hash enabled/disabled + * @wlan_cfg_soc_ctx + * @rx_hash + */ +void wlan_cfg_set_rx_hash(struct wlan_cfg_dp_soc_ctxt *cfg, bool rx_hash); + +/* + * wlan_cfg_get_dp_pdev_nss_enabled - Return pdev nss enabled/disabled + * @wlan_cfg_pdev_ctx + * + * Return: 1 - enabled 0 - disabled + */ +int wlan_cfg_get_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg); + +/* + * wlan_cfg_set_dp_pdev_nss_enabled - set pdev nss enabled/disabled + * @wlan_cfg_pdev_ctx + */ +void wlan_cfg_set_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg, int nss_enabled); + +/* + * wlan_cfg_get_dp_soc_nss_cfg - Return soc nss config + * @wlan_cfg_pdev_ctx + * + * Return: nss_cfg + */ +int wlan_cfg_get_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_set_dp_soc_nss_cfg - set soc nss config + * @wlan_cfg_pdev_ctx + * + */ +void wlan_cfg_set_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg, int nss_cfg); + +/* + * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for Tx + * @wlan_cfg_soc_ctx + * + * Return: Batch threshold + */ +int wlan_cfg_get_int_batch_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_timer_threshold_tx - Get interrupt mitigation cfg for Tx + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_rx - Get interrupt mitigation cfg for Rx + * @wlan_cfg_soc_ctx + * + * Return: Batch threshold + */ +int wlan_cfg_get_int_batch_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_rx - Get interrupt mitigation cfg for Rx + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for other srngs + * @wlan_cfg_soc_ctx + * + * Return: Batch threshold + */ +int wlan_cfg_get_int_batch_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for other srngs + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_timer_threshold_mon - Get int mitigation cfg for mon srngs + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_mon(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_checksum_offload - Get checksum offload enable or disable status + * @wlan_cfg_soc_ctx + * + * Return: Checksum offload enable or disable + */ +int wlan_cfg_get_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_nan_checksum_offload - Get checksum offload enable/disable val + * @wlan_cfg_soc_ctx + * + * Return: Checksum offload enable or disable value for NAN mode + */ +int wlan_cfg_get_nan_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_p2p_checksum_offload - Get checksum offload enable/disable val + * @wlan_cfg_soc_ctx + * + * Return: Checksum offload enable or disable value for P2P mode + */ +int wlan_cfg_get_p2p_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_tx_ring_size - Get Tx DMA ring size (TCL Data Ring) + * @wlan_cfg_soc_ctx + * + * Return: Tx Ring Size + */ +int wlan_cfg_tx_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_tx_comp_ring_size - Get Tx completion ring size (WBM Ring) + * @wlan_cfg_soc_ctx + * + * Return: Tx Completion ring size + */ +int wlan_cfg_tx_comp_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_wbm_release_ring_size - Get wbm_release_ring size + * @wlan_cfg_soc_ctx + * + * Return: wbm_release_ring size + */ +int +wlan_cfg_get_dp_soc_wbm_release_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tcl_cmd_ring_size - Get tcl_cmd_ring size + * @wlan_cfg_soc_ctx + * + * Return: tcl_cmd_ring size + */ +int +wlan_cfg_get_dp_soc_tcl_cmd_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tcl_status_ring_size - Get tcl_status_ring size + * @wlan_cfg_soc_ctx + * + * Return: tcl_status_ring size + */ +int +wlan_cfg_get_dp_soc_tcl_status_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_reo_reinject_ring_size - Get reo_reinject_ring size + * @wlan_cfg_soc_ctx + * + * Return: reo_reinject_ring size + */ +int +wlan_cfg_get_dp_soc_reo_reinject_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_rx_release_ring_size - Get rx_release_ring size + * @wlan_cfg_soc_ctx + * + * Return: rx_release_ring size + */ +int +wlan_cfg_get_dp_soc_rx_release_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_reo_exception_ring_size - Get reo_exception_ring size + * @wlan_cfg_soc_ctx + * + * Return: reo_exception_ring size + */ +int +wlan_cfg_get_dp_soc_reo_exception_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_reo_cmd_ring_size - Get reo_cmd_ring size + * @wlan_cfg_soc_ctx + * + * Return: reo_cmd_ring size + */ +int +wlan_cfg_get_dp_soc_reo_cmd_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_reo_status_ring_size - Get reo_status_ring size + * @wlan_cfg_soc_ctx + * + * Return: reo_status_ring size + */ +int +wlan_cfg_get_dp_soc_reo_status_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tx_desc_limit_0 - Get tx desc limit for 5G H + * @wlan_cfg_soc_ctx + * + * Return: tx desc limit for 5G H + */ +int +wlan_cfg_get_dp_soc_tx_desc_limit_0(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tx_desc_limit_1 - Get tx desc limit for 2G + * @wlan_cfg_soc_ctx + * + * Return: tx desc limit for 2G + */ +int +wlan_cfg_get_dp_soc_tx_desc_limit_1(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tx_desc_limit_2 - Get tx desc limit for 5G L + * @wlan_cfg_soc_ctx + * + * Return: tx desc limit for 5G L + */ +int +wlan_cfg_get_dp_soc_tx_desc_limit_2(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tx_device_limit - Get tx device limit + * @wlan_cfg_soc_ctx + * + * Return: tx device limit + */ +int +wlan_cfg_get_dp_soc_tx_device_limit(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_tx_sw_internode_queue - Get tx sw internode queue + * @wlan_cfg_soc_ctx + * + * Return: tx sw internode queue + */ +int +wlan_cfg_get_dp_soc_tx_sw_internode_queue(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_rxdma_refill_ring_size - Get rxdma refill ring size + * @wlan_cfg_soc_ctx + * + * Return: rxdma refill ring size + */ +int +wlan_cfg_get_dp_soc_rxdma_refill_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size - Get rxdma dst ring size + * @wlan_cfg_soc_ctx + * + * Return: rxdma error dst ring size + */ +int +wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_soc_rx_sw_desc_weight - Get rx sw desc weight + * @wlan_cfg_soc_ctx + * + * Return: rx_sw_desc_weight + */ +int +wlan_cfg_get_dp_soc_rx_sw_desc_weight(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dp_caps - Get dp capablities + * @wlan_cfg_soc_ctx + * @dp_caps: enum for dp capablities + * + * Return: bool if a dp capabilities is enabled + */ +bool +wlan_cfg_get_dp_caps(struct wlan_cfg_dp_soc_ctxt *cfg, + enum cdp_capabilities dp_caps); + +/** + * wlan_set_srng_cfg() - Fill per ring specific + * configuration parameters + * @wlan_cfg: global srng configuration table + * + * Return: None + */ +void wlan_set_srng_cfg(struct wlan_srng_cfg **wlan_cfg); + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +int wlan_cfg_get_tx_flow_stop_queue_th(struct wlan_cfg_dp_soc_ctxt *cfg); + +int wlan_cfg_get_tx_flow_start_queue_offset(struct wlan_cfg_dp_soc_ctxt *cfg); +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ +int wlan_cfg_get_rx_defrag_min_timeout(struct wlan_cfg_dp_soc_ctxt *cfg); + +int wlan_cfg_get_defrag_timeout_check(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_get_rx_flow_search_table_size() - Return the size of Rx FST + * in number of entries + * + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * + * Return: rx_fst_size + */ +uint16_t +wlan_cfg_get_rx_flow_search_table_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_rx_fst_get_max_search() - Return the max skid length for FST search + * + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * + * Return: max_search + */ +uint8_t wlan_cfg_rx_fst_get_max_search(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_rx_fst_get_hash_key() - Return Toeplitz Hash Key used for FST + * search + * + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * + * Return: 320-bit Hash Key + */ +uint8_t *wlan_cfg_rx_fst_get_hash_key(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_set_rx_flow_tag_enabled() - set rx flow tag enabled flag in + * DP soc context + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * @val: Rx flow tag feature flag value + * + * Return: None + */ +void wlan_cfg_set_rx_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val); + +/** + * wlan_cfg_is_rx_flow_tag_enabled() - get rx flow tag enabled flag from + * DP soc context + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * + * Return: true if feature is enabled, else false + */ +bool wlan_cfg_is_rx_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_set_rx_flow_search_table_per_pdev() - Set flag to indicate that + * Rx FST is per pdev + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * @val: boolean flag indicating Rx FST per pdev or per SOC + * + * Return: None + */ +void +wlan_cfg_set_rx_flow_search_table_per_pdev(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val); + +/** + * wlan_cfg_is_rx_flow_search_table_per_pdev() - get RX FST flag for per pdev + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * + * Return: true if Rx FST is per pdev, else false + */ +bool +wlan_cfg_is_rx_flow_search_table_per_pdev(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_set_rx_flow_search_table_size() - set RX FST size in DP SoC context + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * @val: Rx FST size in number of entries + * + * Return: None + */ +void +wlan_cfg_set_rx_flow_search_table_size(struct wlan_cfg_dp_soc_ctxt *cfg, + uint16_t val); + +/** + * wlan_cfg_set_rx_mon_protocol_flow_tag_enabled() - set mon rx tag enabled flag + * in DP soc context + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * @val: Rx protocol or flow tag feature flag value in monitor mode from INI + * + * Return: None + */ +void +wlan_cfg_set_rx_mon_protocol_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val); + +/** + * wlan_cfg_is_rx_mon_protocol_flow_tag_enabled() - get mon rx tag enabled flag + * from DP soc context + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * + * Return: true if feature is enabled in monitor mode for protocol or flow + * tagging in INI, false otherwise + */ +bool +wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_fill_interrupt_mask() - set interrupt mask + * + * @wlan_cfg_dp_soc_ctxt: soc configuration context + * @interrupt_mode: interrupt_mode: MSI/LEGACY + * @is_monitor_mode: is monitor mode enabled + * + * Return: void + */ +void wlan_cfg_fill_interrupt_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int interrupt_mode, bool is_monitor_mode); + +/** + * wlan_cfg_is_rx_fisa_enabled() - Get Rx FISA enabled flag + * + * + * @cfg: soc configuration context + * + * Return: true if enabled, false otherwise. + */ +bool wlan_cfg_is_rx_fisa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/** + * wlan_cfg_is_dp_force_rx_64_ba() - Get force use 64 BA flag + * @cfg: config context + * + * Return: force use 64 BA flag + */ +bool wlan_cfg_is_dp_force_rx_64_ba(struct wlan_cfg_dp_soc_ctxt *cfg); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_filtered_logging.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_filtered_logging.h new file mode 100644 index 0000000000000000000000000000000000000000..08700275701235b94697193e0ab032de271fd7c7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_filtered_logging.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef WMI_FILTERED_LOGGING_H +#define WMI_FILTERED_LOGGING_H + +#include +#include "wmi_unified_priv.h" + +#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING +/** + * wmi_specific_cmd_record() - Record user specified command + * @wmi_handle: handle to WMI + * @id: cmd id + * @buf: buf containing cmd details + * + * Check if the command id is in target list, + * if found, record it. + * + * Context: the function will not sleep, caller is expected to hold + * proper locking. + * + * Return: none + */ +void wmi_specific_cmd_record(wmi_unified_t wmi_handle, + uint32_t id, uint8_t *buf); + +/** + * wmi_specific_evt_record() - Record user specified event + * @wmi_handle: handle to WMI + * @id: cmd id + * @buf: buf containing event details + * + * Check if the event id is in target list, + * if found, record it. + * + * Context: the function will not sleep, caller is expected to hold + * proper locking. + * + * Return: none + */ +void wmi_specific_evt_record(wmi_unified_t wmi_handle, + uint32_t id, uint8_t *buf); + +/** + * wmi_filtered_logging_init() - initialize filtered logging + * @wmi_handle: handle to WMI + * + * Context: the function will not sleep, no lock needed + * + * Return: none + */ +void wmi_filtered_logging_init(wmi_unified_t wmi_handle); + +/** + * wmi_filtered_logging_free() - free the buffers for filtered logging + * @wmi_handle: handle to WMI + * + * Context: the function will not sleep, no lock needed + * + * Return: none + */ +void wmi_filtered_logging_free(wmi_unified_t wmi_handle); + +/* + * Debugfs read/write functions + */ +/** + * debug_filtered_wmi_cmds_show() - debugfs read function for filtered_wmi_cmds + * @m: seq_file handle + * @v: not used, offset of read + * Return: number of bytes read + */ +int debug_filtered_wmi_cmds_show(qdf_debugfs_file_t m, void *v); + +/** + * debug_filtered_wmi_evts_show() - debugfs read function for filtered_wmi_evts + * @m: seq_file handle + * @v: not used, offset of read + * Return: number of bytes read + */ +int debug_filtered_wmi_evts_show(qdf_debugfs_file_t m, void *v); + +/** + * debug_wmi_filtered_command_log_show() - debugfs read function for + * wmi_filtered_command_log + * @m: seq_file handle + * @v: not used, offset of read + * Return: number of bytes read + */ +int debug_wmi_filtered_command_log_show(qdf_debugfs_file_t m, void *v); + +/** + * debug_wmi_filtered_event_log_show() - debugfs read function for + * wmi_filtered_event_log + * @m: seq_file handle + * @v: not used, offset of read + * Return: number of bytes read + */ +int debug_wmi_filtered_event_log_show(qdf_debugfs_file_t m, void *v); + +/** + * debug_wmi_filtered_wmi_cmds_write() - debugfs write for filtered_wmi_cmds + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +ssize_t debug_filtered_wmi_cmds_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos); + +/** + * debug_wmi_filtered_wmi_evts_write() - debugfs write for filtered_wmi_evts + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +ssize_t debug_filtered_wmi_evts_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos); + +/** + * debug_wmi_filtered_command_log_write() - debugfs write for + * filtered_command_log + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +ssize_t debug_wmi_filtered_command_log_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos); + +/** + * debug_wmi_filtered_event_log_write() - debugfs write for filtered_event_log + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +ssize_t debug_wmi_filtered_event_log_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos); + +#else /* WMI_INTERFACE_FILTERED_EVENT_LOGGING */ + +static inline void wmi_specific_cmd_record(wmi_unified_t wmi_handle, + uint32_t id, uint8_t *buf) +{ + /* do nothing */ +} + +static inline void wmi_specific_evt_record(wmi_unified_t wmi_handle, + uint32_t id, uint8_t *buf) +{ + /* do nothing */ +} + +static inline void wmi_filtered_logging_init(wmi_unified_t wmi_handle) +{ + /* do nothing */ +} + +static inline void wmi_filtered_logging_free(wmi_unified_t wmi_handle) +{ + /* do nothing */ +} +#endif /* end of WMI_INTERFACE_FILTERED_EVENT_LOGGING */ + +#endif /*WMI_FILTERED_LOGGING_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_hang_event.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_hang_event.h new file mode 100644 index 0000000000000000000000000000000000000000..96127ea6f622c03e7716c7b1ee8900ed8b5754b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_hang_event.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef WMI_HANG_EVENT_H +#define WMI_HANG_EVENT_H + +#include +#ifdef WLAN_HANG_EVENT + +/** + * wmi_hang_event_notifier_register() - wmi hang event notifier register + * @wmi_hdl: WMI Handle + * + * This function registers wmi layer notifier for the hang event notifier chain. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_hang_event_notifier_register(struct wmi_unified *wmi_hdl); + +/** + * wmi_hang_event_notifier_unregister() - wmi hang event notifier unregister + * @wmi_hdl: WMI Handle + * + * This function unregisters wmi layer notifier for the hang event notifier + * chain. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_hang_event_notifier_unregister(void); +#else +static inline +QDF_STATUS wmi_hang_event_notifier_register(struct wmi_unified *wmi_hdl) +{ + return 0; +} + +static inline QDF_STATUS wmi_hang_event_notifier_unregister(void) +{ + return 0; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_action_oui_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_action_oui_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..2ea52db8202c2f34f2e0a335dd30dc443bb7114a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_action_oui_tlv.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_ACTION_OUI_TLV_H_ +#define _WMI_UNIFIED_ACTION_OUI_TLV_H_ + +#ifdef WLAN_FEATURE_ACTION_OUI + +#include "wmi.h" +#include "wmi_unified.h" +#include "wmi_unified_api.h" +#include "wmi_unified_param.h" + +/** + * wmi_get_action_oui_info_mask() - convert info mask to firmware specific + * @info_mask: host specific info mask + * + * Return: firmware specific information mask + */ +uint32_t wmi_get_action_oui_info_mask(uint32_t info_mask); + +/** + * wmi_get_action_oui_id() - convert action id to firmware specific + * @action_id: host specific action id + * @id: output pointer to hold converted fw specific action id + * + * Return: true on conversion else failure + */ +bool wmi_get_action_oui_id(enum action_oui_id action_id, + wmi_vendor_oui_action_id *id); + + +/** + * wmi_fill_oui_extensions() - populates wmi_vendor_oui_ext array + * @extension: pointer to user supplied action oui extensions + * @no_oui_extns: number of action oui extensions + * @cmd_ext: output pointer to TLV + * + * This function parses the user supplied input data and populates the + * array of variable structures TLV in WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID + * + * Return: None + */ +void wmi_fill_oui_extensions(struct action_oui_extension *extension, + uint32_t no_oui_extns, + wmi_vendor_oui_ext *cmd_ext); + +/** + * wmi_fill_oui_extensions_buffer() - populates data buffer in action oui cmd + * @extension: pointer to user supplied action oui extensions + * @cmd_ext: pointer to vendor_oui_ext TLV in action oui cmd + * @no_oui_extns: number of action oui extensions + * @rem_var_buf_len: remaining length of buffer to be populated + * @var_buf: output pointer to hold variable length data + * + * This function parses the user supplied input data and populates the variable + * buffer of type array byte TLV in WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID + * + * Return: QDF_STATUS_SUCCESS for successful fill else QDF_STATUS_E_INVAL + */ +QDF_STATUS +wmi_fill_oui_extensions_buffer(struct action_oui_extension *extension, + wmi_vendor_oui_ext *cmd_ext, + uint32_t no_oui_extns, uint32_t rem_var_buf_len, + uint8_t *var_buf); + +/** + * send_action_oui_cmd_tlv() - send action oui cmd to firmware + * @wmi_handle: wmi handler + * @req: pointer to action oui info + * + * Return: QDF_STATUS_SUCCESS on successful transmission else + * QDF_STATUS_E_INVAL or QDF_STATUS_E_NOMEM + */ +QDF_STATUS +send_action_oui_cmd_tlv(wmi_unified_t wmi_handle, + struct action_oui_request *req); + +#endif /* WLAN_FEATURE_ACTION_OUI */ + +#endif /* _WMI_UNIFIED_ACTION_OUI_TLV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..cbeba3e2d1d3344d50ffcab8730154911ba47281 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_APF_TLV_H_ +#define _WMI_UNIFIED_APF_TLV_H_ + +#ifdef FEATURE_WLAN_APF + +#include "wmi_unified.h" +#include "wmi_unified_api.h" +#include "wmi_unified_param.h" + +/** + * wmi_send_set_active_apf_mode_cmd_tlv() - configure active APF mode in FW + * @wmi_handle: the WMI handle + * @vdev_id: the Id of the vdev to apply the configuration to + * @ucast_mode: the active APF mode to configure for unicast packets + * @mcast_bcast_mode: the active APF mode to configure for multicast/broadcast + * packets + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_set_active_apf_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode); + +/** + * wmi_send_apf_enable_cmd_tlv() - send cmd to enable/disable APF interpreter + * @wmi_handle: the WMI handle + * @vdev_id: VDEV on which APF interpreter is to be enabled/disabled + * @enable: true: enable, false: disable + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_apf_enable_cmd_tlv(wmi_unified_t wmi_handle, uint32_t vdev_id, + bool enable); + +/** + * wmi_send_apf_write_work_memory_cmd_tlv() - send cmd to write into the APF + * work + * memory + * @wmi_handle: the WMI handle + * @apf_write_params: parameters and buffer pointer for the write + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_apf_write_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_write_memory_params + *apf_write_params); + +/** + * wmi_send_apf_read_work_memory_cmd_tlv() - send cmd to read part of APF + * work memory + * @wmi_handle: the WMI handle + * @apf_read_params: contains relative address and length to read from + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_apf_read_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_read_memory_params + *apf_read_params); + +/** + * wmi_extract_apf_read_memory_resp_event_tlv() - extract read memory response + * event into the given structure pointer + * @wmi_handle: the WMI handle + * @evt_buf: Pointer to the event buffer + * @resp: pointer to memory to extract event parameters into + * + * Return: QDF status + */ +QDF_STATUS +wmi_extract_apf_read_memory_resp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *resp); +#endif /* FEATURE_WLAN_APF */ + +#endif /* _WMI_UNIFIED_APF_TLV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h new file mode 100644 index 0000000000000000000000000000000000000000..7cbc50e104cdcc659009d1219b6df1857181872d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h @@ -0,0 +1,3925 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI). + */ + +#ifndef _WMI_UNIFIED_API_H_ +#define _WMI_UNIFIED_API_H_ + +#include +#include "htc_api.h" +#include "wmi_unified_param.h" +#include "service_ready_param.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_mgmt_txrx_utils_api.h" +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include "wmi_unified_pmo_api.h" +#endif +#include "wlan_scan_public_structs.h" +#ifdef WLAN_FEATURE_ACTION_OUI +#include "wlan_action_oui_public_struct.h" +#endif +#ifdef WLAN_SUPPORT_GREEN_AP +#include "wlan_green_ap_api.h" +#endif +#ifdef WLAN_FEATURE_DSRC +#include "wlan_ocb_public_structs.h" +#endif +#ifdef WLAN_SUPPORT_TWT +#include "wmi_unified_twt_param.h" +#include "wmi_unified_twt_api.h" +#endif + +#ifdef FEATURE_WLAN_EXTSCAN +#include "wmi_unified_extscan_api.h" +#endif + +#ifdef IPA_OFFLOAD +#include "wlan_ipa_public_struct.h" +#endif + +#ifdef WMI_SMART_ANT_SUPPORT +#include "wmi_unified_smart_ant_api.h" +#endif + +#ifdef WMI_DBR_SUPPORT +#include "wmi_unified_dbr_api.h" +#endif + +#ifdef WMI_ATF_SUPPORT +#include "wmi_unified_atf_api.h" +#endif + +#ifdef WMI_AP_SUPPORT +#include "wmi_unified_ap_api.h" +#endif + +#ifdef WLAN_FEATURE_DSRC +#include "wmi_unified_ocb_api.h" +#endif + +#ifdef WLAN_FEATURE_NAN +#include "wmi_unified_nan_api.h" +#endif + +#ifdef CONVERGED_P2P_ENABLE +#include +#endif + +#ifdef WMI_ROAM_SUPPORT +#include "wmi_unified_roam_api.h" +#endif + +#ifdef WMI_CONCURRENCY_SUPPORT +#include "wmi_unified_concurrency_api.h" +#endif + +#ifdef WMI_STA_SUPPORT +#include "wmi_unified_sta_api.h" +#endif + +#ifdef WLAN_FW_OFFLOAD +#include "wmi_unified_fwol_api.h" +#endif + +typedef qdf_nbuf_t wmi_buf_t; +#define wmi_buf_data(_buf) qdf_nbuf_data(_buf) + +#define WMI_LOGD(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, ## args) +#define WMI_LOGI(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, ## args) +#define WMI_LOGW(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_WARN, ## args) +#define WMI_LOGE(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, ## args) +#define WMI_LOGP(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_FATAL, ## args) + +/* Number of bits to shift to combine 32 bit integer to 64 bit */ +#define WMI_LOWER_BITS_SHIFT_32 0x20 + +#define PHYERROR_MAX_BUFFER_LENGTH 0x7F000000 + +struct wmi_soc; +struct policy_mgr_dual_mac_config; + +/** + * enum wmi_target_type - type of supported wmi command + * @WMI_TLV_TARGET: tlv based target + * @WMI_NON_TLV_TARGET: non-tlv based target + * + */ +enum wmi_target_type { + WMI_TLV_TARGET, + WMI_NON_TLV_TARGET, + WMI_MAX_TARGET_TYPE +}; + +/** + * enum wmi_rx_exec_ctx - wmi rx execution context + * @WMI_RX_WORK_CTX: work queue context execution provided by WMI layer + * @WMI_RX_UMAC_CTX: execution context provided by umac layer + * @WMI_RX_SERIALIZER_CTX: Execution context is serialized thread context + * + */ +enum wmi_rx_exec_ctx { + WMI_RX_WORK_CTX, + WMI_RX_UMAC_CTX, + WMI_RX_TASKLET_CTX = WMI_RX_UMAC_CTX, + WMI_RX_SERIALIZER_CTX = 2 +}; + +/** + * enum wmi_fw_mem_prio - defines FW Memory requirement type + * @WMI_FW_MEM_HIGH_PRIORITY: Memory requires contiguous memory allocation + * @WMI_FW_MEM_LOW_PRIORITY: Memory can be fragmented + * @WMI_FW_PRIORITY_MAX: Invalid type + */ +enum wmi_fw_mem_prio { + WMI_FW_MEM_HIGH_PRIORITY = 0, + WMI_FW_MEM_LOW_PRIORITY, + WMI_FW_PRIORITY_MAX +}; + +/** + * struct wmi_unified_attach_params - wmi init parameters + * @osdev: NIC device + * @target_type: type of supported wmi command + * @use_cookie: flag to indicate cookie based allocation + * @is_async_ep: queueing interrupt or non-interrupt endpoint + * @rx_ops: handle to wmi ops + * @psoc: objmgr psoc + * @max_commands: max commands + * @soc_id: SoC device instance id + */ +struct wmi_unified_attach_params { + osdev_t osdev; + enum wmi_target_type target_type; + bool use_cookie; + bool is_async_ep; + struct wlan_objmgr_psoc *psoc; + uint16_t max_commands; + uint32_t soc_id; +}; + +/** + * attach for unified WMI + * + * @param scn_handle : handle to SCN. + * @param params : attach params for WMI + * + */ +void *wmi_unified_attach(void *scn_handle, + struct wmi_unified_attach_params *params); + + + +/** + * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro + * + * @wmi_handle: wmi handle + * @cmd: mgmt command + * @header: pointer to 802.11 header + * @vdev_id: vdev id + * @chanfreq: channel frequency + * + * Return: none + */ +void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, + void *header, uint32_t vdev_id, uint32_t chanfreq); + +/** + * detach for unified WMI + * + * @param wmi_handle : handle to WMI. + * @return void. + */ +void wmi_unified_detach(struct wmi_unified *wmi_handle); + +/** + * API to sync time between host and firmware + * + * @wmi_handle: handle to WMI. + * + * Return: none + */ +void wmi_send_time_stamp_sync_cmd_tlv(wmi_unified_t wmi_handle); + +void +wmi_unified_remove_work(struct wmi_unified *wmi_handle); + +/** + * generic function to allocate WMI buffer + * + * @param wmi_handle : handle to WMI. + * @param len : length of the buffer + * @return wmi_buf_t. + */ +#ifdef NBUF_MEMORY_DEBUG +#define wmi_buf_alloc(h, l) wmi_buf_alloc_debug(h, l, __func__, __LINE__) +wmi_buf_t +wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, + const char *func_name, uint32_t line_num); +#else +/** + * wmi_buf_alloc() - generic function to allocate WMI buffer + * @wmi_handle: handle to WMI. + * @len: length of the buffer + * + * Return: return wmi_buf_t or null if memory alloc fails + */ +#define wmi_buf_alloc(wmi_handle, len) \ + wmi_buf_alloc_fl(wmi_handle, len, __func__, __LINE__) + +wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, + const char *func, uint32_t line); +#endif + +/** + * generic function frees WMI net buffer + * + * @param net_buf : Pointer ot net_buf to be freed + */ +void wmi_buf_free(wmi_buf_t net_buf); + +/** + * wmi_unified_cmd_send() - generic function to send unified WMI command + * @wmi_handle: handle to WMI. + * @buf: wmi command buffer + * @buflen: wmi command buffer length + * @cmd_id: WMI cmd id + * + * Note, it is NOT safe to access buf after calling this function! + * + * Return: QDF_STATUS + */ +#define wmi_unified_cmd_send(wmi_handle, buf, buflen, cmd_id) \ + wmi_unified_cmd_send_fl(wmi_handle, buf, buflen, \ + cmd_id, __func__, __LINE__) + +QDF_STATUS +wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf, + uint32_t buflen, uint32_t cmd_id, + const char *func, uint32_t line); + +#ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI +/** + * wmi_unified_cmd_send_over_qmi() - generic function to send unified WMI command + * over QMI + * @wmi_handle: handle to WMI. + * @buf: wmi command buffer + * @buflen: wmi command buffer length + * @cmd_id: WMI cmd id + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle, + wmi_buf_t buf, uint32_t buflen, + uint32_t cmd_id); + +/** + * wmi_process_qmi_fw_event() - Process WMI event received over QMI + * @wmi_cb_ctx: WMI handle received as call back context + * @buf: Pointer to WMI event buffer + * @len: Len of WMI buffer received + * + * Return: None + */ +int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len); +#else +static inline +QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle, + wmi_buf_t buf, uint32_t buflen, + uint32_t cmd_id) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) +{ + return -EINVAL; +} +#endif + +/** + * wmi_unified_register_event() - WMI event handler + * registration function for converged components + * + * @wmi_handle: handle to WMI. + * @event_id: WMI event ID + * @handler_func: Event handler call back function + * + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_register_event(wmi_unified_t wmi_handle, + uint32_t event_id, + wmi_unified_event_handler handler_func); + +/** + * wmi_unified_register_event_handler() - WMI event handler + * registration function + * + * @wmi_handle: handle to WMI. + * @event_id: WMI event ID + * @handler_func: Event handler call back function + * @rx_ctx: rx event processing context + * + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_register_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id, + wmi_unified_event_handler handler_func, + uint8_t rx_ctx); + +/** + * WMI event handler unregister function for converged componets + * + * @param wmi_handle : handle to WMI. + * @param event_id : WMI event ID + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_unregister_event(wmi_unified_t wmi_handle, + uint32_t event_id); + +/** + * WMI event handler unregister function + * + * @param wmi_handle : handle to WMI. + * @param event_id : WMI event ID + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id); + +/** + * wmi_unified_connect_htc_service() - WMI API to get connect to HTC service + * @wmi_handle: handle to WMI. + * @htc_handle: handle to HTC. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAULT for failure + */ +QDF_STATUS +wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, + HTC_HANDLE htc_handle); + +/* + * WMI API to verify the host has enough credits to suspend + * @param wmi_handle : handle to WMI. + */ + +int wmi_is_suspend_ready(wmi_unified_t wmi_handle); + +/** + * WMI API to get updated host_credits + * @param wmi_handle : handle to WMI. + */ + +int wmi_get_host_credits(wmi_unified_t wmi_handle); + +/** + * WMI API to get WMI Pending Commands in the HTC queue + * @param wmi_handle : handle to WMI. + */ + +int wmi_get_pending_cmds(wmi_unified_t wmi_handle); + +/** + * WMI API to set target suspend state + * @param wmi_handle : handle to WMI. + * @param val : suspend state boolean + */ +void wmi_set_target_suspend(wmi_unified_t wmi_handle, bool val); + +/** + * wmi_is_target_suspended() - WMI API to check target suspend state + * @wmi_handle: handle to WMI. + * + * WMI API to check target suspend state + * + * Return: true if target is suspended, else false. + */ +bool wmi_is_target_suspended(struct wmi_unified *wmi_handle); + +#ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI +/** + * wmi_set_qmi_stats() - WMI API to set qmi stats enabled/disabled + * @wmi_handle: handle to WMI. + * @val: suspend state boolean + */ +void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val); + +/** + * wmi_is_qmi_stats_enabled() - WMI API to check if periodic stats + * over qmi is enableid + * @wmi_handle: handle to WMI. + * + * WMI API to check if periodic stats over qmi is enabled + * + * Return: true if qmi stats is enabled, else false. + */ +bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle); +#else +static inline +void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val) +{} + +static inline +bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle) +{ + return false; +} +#endif /* end if of WLAN_FEATURE_WMI_SEND_RECV_QMI */ + +/** + * WMI API to set bus suspend state + * @param wmi_handle: handle to WMI. + * @param val: suspend state boolean + */ +void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val); + +/** + * WMI API to set crash injection state + * @param wmi_handle: handle to WMI. + * @param val: crash injection state boolean + */ +void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag); + +/** + * WMI API to set target assert + * @param wmi_handle: handle to WMI. + * @param val: target assert config value. + * + * Return: none. + */ +void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val); + +/** + * generic function to block unified WMI command + * @param wmi_handle : handle to WMI. + * @return 0 on success and -ve on failure. + */ +int +wmi_stop(wmi_unified_t wmi_handle); + +/** + * generic function to start unified WMI command + * @param wmi_handle : handle to WMI. + * @return 0 on success and -ve on failure. + */ +int +wmi_start(wmi_unified_t wmi_handle); + +/** + * API to flush all the previous packets associated with the wmi endpoint + * + * @param wmi_handle : handle to WMI. + */ +void +wmi_flush_endpoint(wmi_unified_t wmi_handle); + +/** + * wmi_pdev_id_conversion_enable() - API to enable pdev_id and phy_id + * conversion in WMI. By default pdev_id and + * phyid conversion is not done in WMI. + * This API can be used enable conversion in WMI. + * @param wmi_handle : handle to WMI + * @param *pdev_id_map : pdev conversion map + * @param size : size of pdev_id_map + * Return none + */ +void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle, + uint32_t *pdev_id_map, + uint8_t size); + +/** + * API to handle wmi rx event after UMAC has taken care of execution + * context + * + * @param wmi_handle : handle to WMI. + * @param evt_buf : wmi event buffer + */ +void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf); +#ifdef FEATURE_RUNTIME_PM +void +wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, bool val); +bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle); +#else +static inline void +wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, bool val) +{ + return; +} +static inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) +{ + return false; +} +#endif + +void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle); + +void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx); + +/** + * UMAC Callback to process fw event. + * @param wmi_handle : handle to WMI. + * @param evt_buf : wmi event buffer + */ +void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf); +uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle); + +/** + * wmi_unified_soc_set_hw_mode_cmd() - Send WMI_SOC_SET_HW_MODE_CMDID to FW + * @wmi_handle: wmi handle + * @hw_mode_index: The HW_Mode field is a enumerated type that is selected + * from the HW_Mode table, which is returned in the WMI_SERVICE_READY_EVENTID. + * + * Request HardWare (HW) Mode change to WLAN firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_soc_set_hw_mode_cmd(wmi_unified_t wmi_handle, + uint32_t hw_mode_index); + +/** + * wmi_extract_hw_mode_resp() - function to extract HW mode change response + * @wmi_hdl: WMI handle + * @evt_buf: Buffer holding event data + * @cmd_status: command status + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS +wmi_unified_extract_hw_mode_resp(wmi_unified_t wmi, + void *evt_buf, + uint32_t *cmd_status); + +/** + * wmi_unified_extract_roam_trigger_stats() - Extract roam trigger related + * stats + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @trig: Pointer to destination structure to fill data + * @idx: TLV id + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_extract_roam_trigger_stats(wmi_unified_t wmi, void *evt_buf, + struct wmi_roam_trigger_info *trig, + uint8_t idx); + +/** + * wmi_unified_extract_roam_scan_stats() - Extract roam scan stats from + * firmware + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + * @chan_idx: Index of the channel frequency for this roam trigger + * @ap_idx: Index of the candidate AP for this roam trigger + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_extract_roam_scan_stats(wmi_unified_t wmi, void *evt_buf, + struct wmi_roam_scan_data *dst, uint8_t idx, + uint8_t chan_idx, uint8_t ap_idx); + +/** + * wmi_unified_extract_roam_result_stats() - Extract roam result related stats + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_extract_roam_result_stats(wmi_unified_t wmi, void *evt_buf, + struct wmi_roam_result *dst, + uint8_t idx); + +/** + * wmi_unified_extract_roam_11kv_stats() - Extract BTM/Neigh report stats + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + * @rpt_idx: index of the current channel + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_extract_roam_11kv_stats(wmi_unified_t wmi, void *evt_buf, + struct wmi_neighbor_report_data *dst, + uint8_t idx, uint8_t rpt_idx); +/** + * wmi_unified_extract_roam_msg_info() - Extract Roam msg stats + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_extract_roam_msg_info(wmi_unified_t wmi, void *evt_buf, + struct wmi_roam_msg_info *dst, uint8_t idx); + +/** + * wmi_unified_vdev_create_send() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_create_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct vdev_create_params *param); + +/** + * wmi_unified_vdev_delete_send() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_delete_send(wmi_unified_t wmi_handle, + uint8_t if_id); + +/** + * wmi_unified_vdev_nss_chain_params_send() - send VDEV nss chain params to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @nss_chains_user_cfg: user configured params to send + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_vdev_nss_chain_params_send( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct vdev_nss_chains *nss_chains_user_cfg); + +/** + * wmi_unified_vdev_stop_send() - send vdev stop command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_stop_send(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +/** + * wmi_unified_vdev_up_send() - send vdev up command in fw + * @wmi_handle: wmi handle + * @bssid: bssid + * @params: pointer to hold vdev up parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_up_send(wmi_unified_t wmi_handle, + uint8_t bssid[QDF_MAC_ADDR_SIZE], + struct vdev_up_params *params); + +/** + * wmi_unified_vdev_down_send() - send vdev down command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_down_send(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +/** + * wmi_unified_vdev_start_send() - send vdev start command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_start_send(wmi_unified_t wmi_handle, + struct vdev_start_params *req); + +/** + * wmi_unified_vdev_set_nac_rssi_send() - send NAC_RSSI command to fw + * @wmi_handle: handle to WMI + * @req: pointer to hold nac rssi request data + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_vdev_set_nac_rssi_send(wmi_unified_t wmi_handle, + struct vdev_scan_nac_rssi_params *req); + +/** + * wmi_unified_vdev_set_param_send() - WMI vdev set parameter function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold vdev set parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_vdev_set_param_send(wmi_unified_t wmi_handle, + struct vdev_set_params *param); + +/** + * wmi_unified_sifs_trigger_send() - WMI vdev sifs trigger parameter function + * @wmi_handle: handle to WMI. + * @param: pointer to hold sifs trigger parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_sifs_trigger_send(wmi_unified_t wmi_handle, + struct sifs_trigger_param *param); + +/** + * wmi_unified_peer_delete_send() - send PEER delete command to fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_peer_delete_send(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + uint8_t vdev_id); + +/** + * wmi_unified_peer_flush_tids_send() - flush peer tids packets in fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to hold peer flush tid parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_peer_flush_tids_send(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_flush_params *param); + +/** + * wmi_unified_peer_delete_all_send() - send PEER delete all command to fw + * @wmi_hdl: wmi handle + * @param: pointer to hold peer delete all parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_delete_all_send( + wmi_unified_t wmi_hdl, + struct peer_delete_all_params *param); + +/** + * wmi_set_peer_param() - set peer parameter in fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to hold peer set parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_set_peer_param_send(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_set_params *param); + +/** + * wmi_unified_peer_create_send() - send peer create command to fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @peer_type: peer type + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_create_send(wmi_unified_t wmi_handle, + struct peer_create_params *param); + +QDF_STATUS wmi_unified_stats_request_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct stats_request_params *param); + +/** + * wmi_unified_green_ap_ps_send() - enable green ap powersave command + * @wmi_handle: wmi handle + * @value: value + * @pdev_id: pdev id to have radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_green_ap_ps_send(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id); + +/** + * wmi_unified_wow_enable_send() - WMI wow enable function + * @wmi_handle: handle to WMI. + * @param: pointer to hold wow enable parameter + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_enable_send(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, + uint8_t mac_id); + +/** + * wmi_unified_wow_wakeup_send() - WMI wow wakeup function + * @wmi_handle: handle to WMI. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_wakeup_send(wmi_unified_t wmi_handle); + +/** + * wmi_unified_wow_add_wakeup_event_send() - WMI wow wakeup function + * @wmi_handle: handle to WMI. + * @param: pointer to wow wakeup event parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_wow_add_wakeup_event_send(wmi_unified_t wmi_handle, + struct wow_add_wakeup_params *param); + +/** + * wmi_unified_wow_add_wakeup_pattern_send() - WMI wow wakeup pattern function + * @wmi_handle: handle to WMI. + * @param: pointer to wow wakeup pattern parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_wow_add_wakeup_pattern_send( + wmi_unified_t wmi_handle, + struct wow_add_wakeup_pattern_params *param); + +/** + * wmi_unified_wow_remove_wakeup_pattern_send() - wow wakeup pattern function + * @wmi_handle: handle to WMI. + * @param: pointer to wow wakeup pattern parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_wow_remove_wakeup_pattern_send( + wmi_unified_t wmi_handle, + struct wow_remove_wakeup_pattern_params *param); + +/** + * wmi_unified_packet_log_enable_send() - WMI request stats function + * @wmi_handle : handle to WMI. + * @PKTLOG_EVENT : PKTLOG Event + * @mac_id : MAC id corresponds to pdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_packet_log_enable_send(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, + uint8_t mac_id); + +/** + * wmi_unified_peer_based_pktlog_send() - WMI request enable peer + * based filtering + * @wmi_handle: handle to WMI. + * @macaddr: PEER mac address to be filtered + * @mac_id: Mac id + * @enb_dsb: Enable or Disable peer based pktlog + * filtering + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_based_pktlog_send(wmi_unified_t wmi_handle, + uint8_t *macaddr, + uint8_t mac_id, + uint8_t enb_dsb); + +/** + * wmi_unified_packet_log_disable__send() - WMI pktlog disable function + * @wmi_handle: handle to WMI. + * @PKTLOG_EVENT: packet log event + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_packet_log_disable_send(wmi_unified_t wmi_handle, + uint8_t mac_id); + +/** + * wmi_unified_suspend_send() - WMI suspend function + * @wmi_handle: handle to WMI. + * @param: pointer to hold suspend parameter + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_suspend_send(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id); + +/** + * wmi_unified_resume_send - WMI resume function + * @wmi_handle : handle to WMI. + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_resume_send(wmi_unified_t wmi_handle, + uint8_t mac_id); + +/** + * wmi_unified_pdev_param_send() - set pdev parameters + * @wmi_handle: wmi handle + * @param: pointer to pdev parameter + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures, + * errno on failure + */ +QDF_STATUS +wmi_unified_pdev_param_send(wmi_unified_t wmi_handle, + struct pdev_params *param, + uint8_t mac_id); + +/** + * wmi_unified_fd_tmpl_send_cmd() - WMI FILS Discovery send function + * @wmi_handle: handle to WMI. + * @param: pointer to hold FILS Discovery send cmd parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_fd_tmpl_send_cmd(wmi_unified_t wmi_handle, + struct fils_discovery_tmpl_params *param); + +/** + * wmi_unified_beacon_tmpl_send_cmd() - WMI beacon send function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold beacon send cmd parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_beacon_tmpl_send_cmd(wmi_unified_t wmi_handle, + struct beacon_tmpl_params *param); + +/** + * wmi_unified_peer_assoc_send() - WMI peer assoc function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to peer assoc parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_assoc_send(wmi_unified_t wmi_handle, + struct peer_assoc_params *param); + +/** + * wmi_unified_sta_ps_cmd_send() - set sta powersave parameters + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to sta_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_sta_ps_cmd_send(wmi_unified_t wmi_handle, + struct sta_ps_params *param); + +/** + * wmi_unified_ap_ps_cmd_send() - set ap powersave parameters + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to ap_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ap_ps_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct ap_ps_params *param); + +/** + * wmi_unified_scan_start_cmd_send() - WMI scan start function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold scan start cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_scan_start_cmd_send(wmi_unified_t wmi_handle, + struct scan_req_params *param); + +/** + * wmi_unified_scan_stop_cmd_send() - WMI scan start function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold scan start cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_scan_stop_cmd_send(wmi_unified_t wmi_handle, + struct scan_cancel_param *param); + +/** + * wmi_unified_scan_chan_list_cmd_send() - WMI scan channel list function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold scan channel list parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_scan_chan_list_cmd_send(wmi_unified_t wmi_handle, + struct scan_chan_list_params *param); + + +/** + * wmi_crash_inject() - inject fw crash + * @wmi_handle: wmi handle + * @param: ponirt to crash inject parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_crash_inject(wmi_unified_t wmi_handle, + struct crash_inject *param); + +/** + * wmi_unified_pdev_utf_cmd() - send utf command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_utf_params + * @mac_id: mac id to have radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_utf_cmd_send(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id); + +#ifdef FEATURE_FW_LOG_PARSING +/** + * wmi_unified_dbglog_cmd_send() - set debug log level + * @wmi_handle: handle to WMI. + * @param: pointer to hold dbglog level parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_dbglog_cmd_send(wmi_unified_t wmi_handle, + struct dbglog_params *param); +#else +static inline QDF_STATUS +wmi_unified_dbglog_cmd_send(wmi_unified_t wmi_handle, + struct dbglog_params *param) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wmi_mgmt_unified_cmd_send() - management cmd over wmi layer + * @wmi_handle: handle to WMI. + * @param: pointer to hold mgmt cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_mgmt_unified_cmd_send(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param); + +/** + * wmi_offchan_data_tx_cmd_send() - Send offchan data tx cmd over wmi layer + * @wmi_handle: handle to WMI. + * @param: pointer to hold offchan data cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_offchan_data_tx_cmd_send( + wmi_unified_t wmi_handle, + struct wmi_offchan_data_tx_params *param); + +/** + * wmi_unified_modem_power_state() - set modem power state to fw + * @wmi_handle: wmi handle + * @param_value: parameter value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_modem_power_state(wmi_unified_t wmi_handle, + uint32_t param_value); + +/** + * wmi_unified_set_sta_ps_mode() - set sta powersave params in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @val: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_set_sta_ps_mode(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint8_t val); + +/** + * wmi_unified_set_sta_uapsd_auto_trig_cmd() - set uapsd auto trigger command + * @wmi_handle: wmi handle + * @param: uapsd cmd parameter strcture + * + * This function sets the trigger + * uapsd params such as service interval, delay interval + * and suspend interval which will be used by the firmware + * to send trigger frames periodically when there is no + * traffic on the transmit side. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS +wmi_unified_set_sta_uapsd_auto_trig_cmd(wmi_unified_t wmi_handle, + struct sta_uapsd_trig_params *param); + +/** + * wmi_get_temperature() - get pdev temperature req + * @wmi_handle: wmi handle + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_get_temperature(wmi_unified_t wmi_handle); + +/** + * wmi_set_smps_params() - set smps params + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_set_smps_params(wmi_unified_t wmi_handle, + uint8_t vdev_id, + int value); + +/** + * wmi_set_mimops() - set MIMO powersave + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_set_mimops(wmi_unified_t wmi_handle, + uint8_t vdev_id, int value); + +/** + * wmi_unified_lro_config_cmd() - process the LRO config command + * @wmi_handle: Pointer to wmi handle + * @wmi_lro_cmd: Pointer to LRO configuration parameters + * + * This function sends down the LRO configuration parameters to + * the firmware to enable LRO, sets the TCP flags and sets the + * seed values for the toeplitz hash generation + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_lro_config_cmd(wmi_unified_t wmi_handle, + struct wmi_lro_config_cmd_t *wmi_lro_cmd); + +/** + * wmi_unified_set_thermal_mgmt_cmd() - set thermal mgmt command to fw + * @wmi_handle: Pointer to wmi handle + * @thermal_info: Thermal command information + * + * This function sends the thermal management command + * to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_thermal_mgmt_cmd(wmi_unified_t wmi_handle, + struct thermal_cmd_params *thermal_info); + +/** + * wmi_unified_peer_rate_report_cmd() - process the peer rate report command + * @wmi_handle: Pointer to wmi handle + * @rate_report_params: Pointer to peer rate report parameters + * + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +QDF_STATUS +wmi_unified_peer_rate_report_cmd( + wmi_unified_t wmi_handle, + struct wmi_peer_rate_report_params *rate_report_params); + +/** + * wmi_unified_process_update_edca_param() - update EDCA params + * @wmi_handle: wmi handle + * @vdev_id: vdev id. + * @mu_edca_param: mu_edca_param. + * @wmm_vparams: edca parameters + * + * This function updates EDCA parameters to the target + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_process_update_edca_param( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]); + +/** + * wmi_unified_probe_rsp_tmpl_send_cmd() - send probe response template to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @probe_rsp_info: probe response info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_probe_rsp_tmpl_send_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info); + +/** + * wmi_unified_setup_install_key_cmd - send key to install to fw + * @wmi_handle: wmi handle + * @key_params: key parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_setup_install_key_cmd(wmi_unified_t wmi_handle, + struct set_key_params *key_params); + +/** + * wmi_unified_get_pn_send_cmd() - send command to fw get PN for peer + * @wmi_handle: wmi handle + * @pn_params: PN parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_get_pn_send_cmd(wmi_unified_t wmi_hdl, + struct peer_request_pn_param *pn_params); + +/** + * wmi_unified_p2p_go_set_beacon_ie_cmd() - set beacon IE for p2p go + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @p2p_ie: p2p IE + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_p2p_go_set_beacon_ie_cmd(wmi_unified_t wmi_hdl, + uint32_t vdev_id, + uint8_t *p2p_ie); + +/** + * wmi_unified_scan_probe_setoui_cmd() - set scan probe OUI + * @wmi_handle: wmi handle + * @psetoui: OUI parameters + * + * set scan probe OUI parameters in firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_scan_probe_setoui_cmd(wmi_unified_t wmi_handle, + struct scan_mac_oui *psetoui); + +#ifdef IPA_OFFLOAD +/** wmi_unified_ipa_offload_control_cmd() - ipa offload control parameter + * @wmi_handle: wmi handle + * @ipa_offload: ipa offload control parameter + * + * Returns: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures, + * error number otherwise + */ +QDF_STATUS +wmi_unified_ipa_offload_control_cmd( + wmi_unified_t wmi_handle, + struct ipa_uc_offload_control_params *ipa_offload); +#endif + +/** + * wmi_unified_pno_stop_cmd() - PNO stop request + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function request FW to stop ongoing PNO operation. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pno_stop_cmd(wmi_unified_t wmi_handle, uint8_t vdev_id); + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * wmi_unified_pno_start_cmd() - PNO start request + * @wmi_handle: wmi handle + * @pno: PNO request + * + * This function request FW to start PNO request. + * Request: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pno_start_cmd(wmi_unified_t wmi_handle, + struct pno_scan_req_params *pno); +#endif + +/** + * wmi_unified_nlo_mawc_cmd() - NLO MAWC cmd configuration + * @wmi_handle: wmi handle + * @params: Configuration parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nlo_mawc_cmd(wmi_unified_t wmi_handle, + struct nlo_mawc_params *params); + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS +/** + * wmi_unified_process_ll_stats_clear_cmd() - clear link layer stats + * @wmi_handle: wmi handle + * @clear_req: ll stats clear request command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ll_stats_clear_cmd(wmi_unified_t wmi_handle, + const struct ll_stats_clear_params *clear_req); + +/** + * wmi_unified_process_ll_stats_set_cmd() - link layer stats set request + * @wmi_handle: wmi handle + * @set_req: ll stats set request command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ll_stats_set_cmd(wmi_unified_t wmi_handle, + const struct ll_stats_set_params *set_req); + +/** + * wmi_unified_process_ll_stats_get_cmd() - link layer stats get request + * @wmi_handle: wmi handle + * @get_req: ll stats get request command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ll_stats_get_cmd(wmi_unified_t wmi_handle, + const struct ll_stats_get_params *get_req); +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + +/** + * wmi_unified_congestion_request_cmd() - send request to fw to get CCA + * @wmi_handle: wma handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_congestion_request_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +/** + * wmi_unified_snr_request_cmd() - send request to fw to get RSSI stats + * @wmi_handle: wmi handle + * @rssi_req: get RSSI request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_snr_request_cmd(wmi_unified_t wmi_handle); + +/** + * wmi_unified_snr_cmd() - get RSSI from fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_snr_cmd(wmi_unified_t wmi_handle, uint8_t vdev_id); + +/** + * wmi_unified_link_status_req_cmd() - process link status request from UMAC + * @wmi_handle: wmi handle + * @params: get link status params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_link_status_req_cmd(wmi_unified_t wmi_handle, + struct link_status_params *params); + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * wmi_unified_egap_conf_params_cmd() - send wmi cmd of egap config params + * @wmi_handle: wmi handler + * @egap_params: pointer to egap_params + * + * Return: 0 for success, otherwise appropriate error code + */ +QDF_STATUS +wmi_unified_egap_conf_params_cmd( + wmi_unified_t wmi_handle, + struct wlan_green_ap_egap_params *egap_params); +#endif + +/** + * wmi_unified_csa_offload_enable() - send CSA offload enable command + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_csa_offload_enable(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +#ifdef WLAN_FEATURE_CIF_CFR +/** + * wmi_unified_oem_dma_ring_cfg() - configure OEM DMA rings + * @wmi_handle: wmi handle + * @data_len: len of dma cfg req + * @data: dma cfg req + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_oem_dma_ring_cfg(wmi_unified_t wmi_handle, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg); +#endif + +/** + * wmi_unified_start_oem_data_cmd() - start oem data request to target + * @wmi_handle: wmi handle + * @data_len: the length of @data + * @data: the pointer to data buf + * + * This is legacy api for oem data request, using wmi command + * WMI_OEM_REQ_CMDID. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_start_oem_data_cmd(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data); + +#ifdef FEATURE_OEM_DATA +/** + * wmi_unified_start_oemv2_data_cmd() - start oem data cmd to target + * @wmi_handle: wmi handle + * @params: oem data params + * + * This is common api for oem data, using wmi command WMI_OEM_DATA_CMDID. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_start_oemv2_data_cmd(wmi_unified_t wmi_handle, + struct oem_data *params); +#endif + +/** + * wmi_unified_dfs_phyerr_filter_offload_en_cmd() - enable dfs phyerr filter + * @wmi_handle: wmi handle + * @dfs_phyerr_filter_offload: is dfs phyerr filter offload + * + * Send WMI_DFS_PHYERR_FILTER_ENA_CMDID or + * WMI_DFS_PHYERR_FILTER_DIS_CMDID command + * to firmware based on phyerr filtering + * offload status. + * + * Return: 1 success, 0 failure + */ +QDF_STATUS +wmi_unified_dfs_phyerr_filter_offload_en_cmd(wmi_unified_t wmi_handle, + bool dfs_phyerr_filter_offload); + +#if !defined(REMOVE_PKT_LOG) && defined(FEATURE_PKTLOG) +/** + * wmi_unified_pktlog_wmi_send_cmd() - send pktlog event command to target + * @wmi_handle: wmi handle + * @pktlog_event: pktlog event + * @cmd_id: pktlog cmd id + * @user_triggered: user triggered input for PKTLOG enable mode + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pktlog_wmi_send_cmd(wmi_unified_t wmi_handle, + WMI_PKTLOG_EVENT pktlog_event, + uint32_t cmd_id, + uint8_t user_triggered); +#endif + +/** + * wmi_unified_stats_ext_req_cmd() - request ext stats from fw + * @wmi_handle: wmi handle + * @preq: stats ext params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_stats_ext_req_cmd(wmi_unified_t wmi_handle, + struct stats_ext_params *preq); + +/** + * wmi_unified_process_dhcpserver_offload_cmd() - enable DHCP server offload + * @wmi_handle: wmi handle + * @pDhcpSrvOffloadInfo: DHCP server offload info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_dhcpserver_offload_cmd( + wmi_unified_t wmi_handle, + struct dhcp_offload_info_params *params); + +/** + * wmi_unified_send_regdomain_info_to_fw_cmd() - send regdomain info to fw + * @wmi_handle: wmi handle + * @reg_dmn: reg domain + * @regdmn2G: 2G reg domain + * @regdmn5G: 5G reg domain + * @ctl2G: 2G test limit + * @ctl5G: 5G test limit + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_regdomain_info_to_fw_cmd(wmi_unified_t wmi_handle, + uint32_t reg_dmn, + uint16_t regdmn2G, + uint16_t regdmn5G, + uint8_t ctl2G, + uint8_t ctl5G); + +QDF_STATUS +wmi_unified_process_fw_mem_dump_cmd(wmi_unified_t wmi_hdl, + struct fw_dump_req_param *mem_dump_req); + +/** + * wmi_unified_cfg_action_frm_tb_ppdu_cmd()-send action frame TB PPDU cfg to FW + * @wmi_handle: Pointer to WMi handle + * @cfg_info: Pointer to cfg msg + * + * This function sends action frame TB PPDU cfg to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + * + */ +QDF_STATUS +wmi_unified_cfg_action_frm_tb_ppdu_cmd( + wmi_unified_t wmi_handle, + struct cfg_action_frm_tb_ppdu_param *cfg_info); + +/** + * wmi_unified_save_fw_version_cmd() - save fw version + * @wmi_handle: pointer to wmi handle + * @evt_buf: Event buffer + * + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + * + */ +QDF_STATUS wmi_unified_save_fw_version_cmd(wmi_unified_t wmi_handle, + void *evt_buf); + +/** + * wmi_unified_log_supported_evt_cmd() - Enable/Disable FW diag/log events + * @wmi_handle: wmi handle + * @event: Event received from FW + * @len: Length of the event + * + * Enables the low frequency events and disables the high frequency + * events. Bit 17 indicates if the event if low/high frequency. + * 1 - high frequency, 0 - low frequency + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures + */ +QDF_STATUS wmi_unified_log_supported_evt_cmd(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t len); + +/** + * wmi_unified_enable_specific_fw_logs_cmd() - Start/Stop logging of diag log id + * @wmi_handle: wmi handle + * @start_log: Start logging related parameters + * + * Send the command to the FW based on which specific logging of diag + * event/log id can be started/stopped + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_enable_specific_fw_logs_cmd(wmi_unified_t wmi_handle, + struct wmi_wifi_start_log *start_log); + +/** + * wmi_unified_flush_logs_to_fw_cmd() - Send log flush command to FW + * @wmi_handle: WMI handle + * + * This function is used to send the flush command to the FW, + * that will flush the fw logs that are residue in the FW + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_flush_logs_to_fw_cmd(wmi_unified_t wmi_handle); + +/** + * wmi_unified_unit_test_cmd() - send unit test command to fw. + * @wmi_handle: wmi handle + * @wmi_utest: unit test command + * + * This function send unit test command to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_unit_test_cmd(wmi_unified_t wmi_handle, + struct wmi_unit_test_cmd *wmi_utest); + +#ifdef FEATURE_WLAN_APF +/** + * wmi_unified_set_active_apf_mode_cmd() - config active APF mode in FW + * @wmi: the WMI handle + * @vdev_id: the Id of the vdev to apply the configuration to + * @ucast_mode: the active APF mode to configure for unicast packets + * @mcast_bcast_mode: the active APF mode to configure for multicast/broadcast + * packets + */ +QDF_STATUS +wmi_unified_set_active_apf_mode_cmd(wmi_unified_t wmi, uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode); + +/** + * wmi_unified_send_apf_enable_cmd() - send apf enable/disable cmd + * @wmi: wmi handle + * @vdev_id: VDEV id + * @enable: true: enable, false: disable + * + * This function passes the apf enable command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_apf_enable_cmd(wmi_unified_t wmi, + uint32_t vdev_id, bool enable); + +/** + * wmi_unified_send_apf_write_work_memory_cmd() - send cmd to write into the APF + * work memory. + * @wmi: wmi handle + * @write_params: parameters and buffer pointer for the write + * + * This function passes the write apf work mem command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_apf_write_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_write_memory_params *write_params); + +/** + * wmi_unified_send_apf_read_work_memory_cmd() - send cmd to read part of APF + * work memory + * @wmi: wmi handle + * @read_params: contains relative address and length to read from + * + * This function passes the read apf work mem command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_apf_read_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_read_memory_params *read_params); + +/** + * wmi_extract_apf_read_memory_resp_event() - exctract read mem resp event + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @resp: pointer to memory to extract event parameters into + * + * This function exctracts read mem response event into the given structure ptr + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_apf_read_memory_resp_event(wmi_unified_t wmi, void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *read_mem_evt); +#endif /* FEATURE_WLAN_APF */ + +/** + * wmi_send_get_user_position_cmd() - send get user position command to fw + * @wmi_handle: wmi handle + * @value: user pos value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_get_user_position_cmd(wmi_unified_t wmi_handle, uint32_t value); + +/** + * wmi_send_get_peer_mumimo_tx_count_cmd() - send get mumio tx count + * command to fw + * @wmi_handle: wmi handle + * @value: user pos value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_get_peer_mumimo_tx_count_cmd(wmi_unified_t wmi_handle, + uint32_t value); + +/** + * wmi_send_reset_peer_mumimo_tx_count_cmd() - send reset peer mumimo + * tx count to fw + * @wmi_handle: wmi handle + * @value: reset tx count value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_reset_peer_mumimo_tx_count_cmd(wmi_unified_t wmi_handle, + uint32_t value); + +/* + * wmi_unified_send_btcoex_wlan_priority_cmd() - send btcoex priority commands + * @wmi_handle: wmi handle + * @param: wmi btcoex cfg params + * + * Send WMI_BTCOEX_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +wmi_unified_send_btcoex_wlan_priority_cmd(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param); + +/** + * wmi_unified_send_btcoex_duty_cycle_cmd() - send btcoex duty cycle commands + * @wmi_handle: wmi handle + * @param: wmi btcoex cfg params + * + * Send WMI_BTCOEX_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +wmi_unified_send_btcoex_duty_cycle_cmd(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param); + +/** + * wmi_unified_send_coex_ver_cfg_cmd() - send coex ver cfg command + * @wmi_handle: wmi handle + * @param: wmi coex ver cfg params + * + * Send WMI_COEX_VERSION_CFG_CMID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +wmi_unified_send_coex_ver_cfg_cmd(wmi_unified_t wmi_handle, + coex_ver_cfg_t *param); + +/** + * wmi_unified_send_coex_config_cmd() - send coex ver cfg command + * @wmi_handle: wmi handle + * @param: wmi coex cfg cmd params + * + * Send WMI_COEX_CFG_CMD parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +wmi_unified_send_coex_config_cmd(wmi_unified_t wmi_handle, + struct coex_config_params *param); + +/** + * wmi_unified_pdev_fips_cmd_send() - WMI pdev fips cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold pdev fips param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_pdev_fips_cmd_send(wmi_unified_t wmi_handle, + struct fips_params *param); + +#ifdef WLAN_FEATURE_DISA +/** + * wmi_unified_encrypt_decrypt_send_cmd() - send encryptdecrypt cmd to fw + * @wmi_handle: wmi handle + * @params: encrypt/decrypt params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_encrypt_decrypt_send_cmd(void *wmi_hdl, + struct disa_encrypt_decrypt_req_params + *params); +#endif /* WLAN_FEATURE_DISA */ + +/** + * wmi_unified_wlan_profile_enable_cmd_send() - WMI wlan profile enable + * cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold wlan profile param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_wlan_profile_enable_cmd_send(wmi_unified_t wmi_handle, + struct wlan_profile_params *param); + +/** + * wmi_unified_wlan_profile_trigger_cmd_send() - WMI wlan profile trigger + * cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold wlan profile param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_wlan_profile_trigger_cmd_send(wmi_unified_t wmi_handle, + struct wlan_profile_params *param); + +/** + * wmi_unified_set_chan_cmd_send() - WMI set channel cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold channel param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_chan_cmd_send(wmi_unified_t wmi_handle, + struct channel_param *param); + +/** + * wmi_unified_set_ratepwr_table_cmd_send() - WMI ratepwr table cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold ratepwr table param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_ratepwr_table_cmd_send(wmi_unified_t wmi_handle, + struct ratepwr_table_params *param); + +/** + * wmi_unified_get_ratepwr_table_cmd_send() - WMI ratepwr table cmd function + * @wmi_handle: handle to WMI. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_ratepwr_table_cmd_send(wmi_unified_t wmi_handle); + +/** + * wmi_unified_set_ratepwr_chainmsk_cmd_send() - WMI ratepwr + * chainmsk cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold ratepwr chainmsk param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_ratepwr_chainmsk_cmd_send(wmi_unified_t wmi_handle, + struct ratepwr_chainmsk_params + *param); + +/** + * wmi_unified_set_macaddr_cmd_send() - WMI set macaddr cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold macaddr param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_macaddr_cmd_send(wmi_unified_t wmi_handle, + struct macaddr_params *param); + +/** + * wmi_unified_pdev_scan_start_cmd_send() - WMI pdev scan start cmd function + * @wmi_handle: handle to WMI. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_scan_start_cmd_send(wmi_unified_t wmi_handle); + +/** + * wmi_unified_pdev_scan_end_cmd_send() - WMI pdev scan end cmd function + * @wmi_handle: handle to WMI. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_scan_end_cmd_send(wmi_unified_t wmi_handle); + +/** + * wmi_unified_set_acparams_cmd_send() - WMI set acparams cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold acparams param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_acparams_cmd_send(wmi_unified_t wmi_handle, + struct acparams_params *param); + +/** + * wmi_unified_set_vap_dscp_tid_map_cmd_send() - WMI set vap dscp + * tid map cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold dscp param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_vap_dscp_tid_map_cmd_send( + wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param); + +/** + * wmi_unified_proxy_ast_reserve_cmd_send() - WMI proxy ast + * reserve cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold ast param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_proxy_ast_reserve_cmd_send(wmi_unified_t wmi_handle, + struct proxy_ast_reserve_params *param); + +/** + * wmi_unified_set_bridge_mac_addr_cmd_send() - WMI set bridge mac + * addr cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold bridge mac addr param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_bridge_mac_addr_cmd_send( + wmi_unified_t wmi_handle, + struct set_bridge_mac_addr_params *param); + +/** + * wmi_unified_phyerr_enable_cmd_send() - WMI phyerr enable cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold phyerr enable param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_phyerr_enable_cmd_send(wmi_unified_t wmi_handle); + +/** + * wmi_unified_phyerr_disable_cmd_send() - WMI phyerr disable cmd function + * @wmi_handle: handle to WMI. + * @param: pointer to hold phyerr disable param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_phyerr_disable_cmd_send(wmi_unified_t wmi_handle); + +/** + * wmi_unified_smart_ant_enable_tx_feedback_cmd_send() - + * WMI set tx antenna function + * @wmi_handle: handle to WMI. + * @param: pointer to hold antenna param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_enable_tx_feedback_cmd_send( + wmi_unified_t wmi_handle, + struct smart_ant_enable_tx_feedback_params *param); + +/** + * wmi_unified_vdev_spectral_configure_cmd_send() - + * WMI set spectral config function + * @wmi_handle: handle to WMI. + * @param: pointer to hold spectral config param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_spectral_configure_cmd_send( + wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param); + +/** + * wmi_unified_vdev_spectral_enable_cmd_send() - WMI enable spectral function + * @wmi_handle: handle to WMI. + * @param: pointer to hold enable spectral param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_spectral_enable_cmd_send( + wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param); + +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +/** + * wmi_unified_vdev_fils_enable_cmd_send() - WMI send fils enable command + * @param wmi_handle: handle to WMI. + * @param config_fils_params: fils enable parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_vdev_fils_enable_cmd_send(struct wmi_unified *wmi_handle, + struct config_fils_params *param); +#endif + +/** + * wmi_unified_bss_chan_info_request_cmd_send() - WMI bss chan info + * request function + * @wmi_handle: handle to WMI. + * @param: pointer to hold chan info param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_bss_chan_info_request_cmd_send( + wmi_unified_t wmi_handle, + struct bss_chan_info_request_params *param); + +/** + * wmi_unified_thermal_mitigation_param_cmd_send() - + * WMI thermal mitigation function + * @wmi_handle: handle to WMI. + * @param: pointer to hold thermal mitigation param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_thermal_mitigation_param_cmd_send( + wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param); + +/** + * wmi_unified_vdev_set_fwtest_param_cmd_send() - WMI set fwtest function + * @wmi_handle: handle to WMI. + * @param: pointer to hold fwtest param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_fwtest_param_cmd_send( + wmi_unified_t wmi_handle, + struct set_fwtest_params *param); + +/** + * wmi_unified_vdev_set_custom_aggr_size_cmd_send() - WMI set custom aggr + * size command + * @wmi_handle: handle to WMI. + * @param: pointer to hold custom aggr size param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_custom_aggr_size_cmd_send( + wmi_unified_t wmi_handle, + struct set_custom_aggr_size_params *param); + +/** + * wmi_unified_vdev_set_qdepth_thresh_cmd_send() - WMI set qdepth threshold + * @wmi_handle: handle to WMI. + * @param: pointer to hold set qdepth thresh param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_qdepth_thresh_cmd_send( + wmi_unified_t wmi_handle, + struct set_qdepth_thresh_params *param); + +/** + * wmi_unified_pdev_set_regdomain_params_cmd_send() - WMI set regdomain + * function + * @wmi_handle: handle to WMI. + * @param: pointer to hold regdomain param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_set_regdomain_cmd_send( + wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param); + +/** + * wmi_unified_set_beacon_filter_cmd_send() - WMI set beacon filter function + * @wmi_handle: handle to WMI. + * @param: pointer to hold beacon filter param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_beacon_filter_cmd_send( + wmi_unified_t wmi_handle, + struct set_beacon_filter_params *param); + +/** + * wmi_unified_remove_beacon_filter_cmd_send() - WMI set beacon filter function + * @wmi_handle: handle to WMI. + * @param: pointer to hold beacon filter param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_remove_beacon_filter_cmd_send( + wmi_unified_t wmi_handle, + struct remove_beacon_filter_params *param); + +/** + * wmi_unified_addba_clearresponse_cmd_send() - WMI addba resp cmd function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold addba resp parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_addba_clearresponse_cmd_send( + wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_clearresponse_params *param); + +/** + * wmi_unified_addba_send_cmd_send() - WMI addba send function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold addba parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_addba_send_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_send_params *param); + +/** + * wmi_unified_delba_send_cmd_send() - WMI delba cmd function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold delba parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_delba_send_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct delba_send_params *param); + +/** + * wmi_unified_addba_setresponse_cmd_send() - WMI addba set resp cmd function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold addba set resp parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_addba_setresponse_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_setresponse_params *param); + +/** + * wmi_unified_singleamsdu_cmd_send() - WMI singleamsdu function + * @wmi_handle: handle to WMI. + * @macaddr: MAC address + * @param: pointer to hold singleamsdu parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_singleamsdu_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct singleamsdu_params *param); + +/** + * wmi_unified_mu_scan_cmd_send() - WMI set mu scan function + * @wmi_handle: handle to WMI. + * @param: pointer to hold mu scan param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_mu_scan_cmd_send(wmi_unified_t wmi_handle, + struct mu_scan_params *param); + +/** + * wmi_unified_lteu_config_cmd_send() - WMI set mu scan function + * @wmi_handle: handle to WMI. + * @param: pointer to hold mu scan param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_lteu_config_cmd_send(wmi_unified_t wmi_handle, + struct lteu_config_params *param); + +/** + * wmi_unified_set_psmode_cmd_send() - WMI set mu scan function + * @wmi_handle: handle to WMI. + * @param: pointer to hold mu scan param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_psmode_cmd_send(wmi_unified_t wmi_handle, + struct set_ps_mode_params *param); + +/** + * wmi_unified_init_cmd_send() - send initialization cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to wmi init param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_init_cmd_send(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param); + +/** + * wmi_service_enabled() - Check if service enabled + * @wmi_handle: wmi handle + * @service_id: service identifier + * + * Return: 1 enabled, 0 disabled + */ +bool wmi_service_enabled(wmi_unified_t wmi_handle, uint32_t service_id); + +/** + * wmi_save_service_bitmap() - save service bitmap + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @bitmap_buf: bitmap buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS failure code + */ +QDF_STATUS wmi_save_service_bitmap(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf); + +/** + * wmi_save_ext_service_bitmap() - save extended service bitmap + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS failure code + */ +QDF_STATUS wmi_save_ext_service_bitmap(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf); + +/** + * wmi_save_fw_version() - Save fw version + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_save_fw_version(wmi_unified_t wmi_handle, void *evt_buf); + +/** + * wmi_get_target_cap_from_service_ready() - extract service ready event + * @wmi_handle: wmi handle + * @evt_buf: pointer to received event buffer + * @ev: pointer to hold target capability information extracted from even + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_get_target_cap_from_service_ready( + wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_psoc_target_capability_info *ev); + +/** + * wmi_extract_hal_reg_cap() - extract HAL registered capabilities + * @wmi_handle: wmi handle + * @evt_buf: Pointer to event buffer + * @hal_reg_cap: pointer to hold HAL reg capabilities + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_hal_reg_cap(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_psoc_hal_reg_capability *hal_reg_cap); + +/** + * wmi_extract_num_mem_reqs_from_service_ready() - Extract number of memory + * entries requested + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * + * Return: Number of entries requested + */ +uint32_t wmi_extract_num_mem_reqs_from_service_ready( + wmi_unified_t wmi_handle, + void *evt_buf); + +/** + * wmi_extract_host_mem_req_from_service_ready() - Extract host memory + * request event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @mem_reqs: pointer to host memory request structure + * @num_active_peers: number of active peers for peer cache + * @num_peers: number of peers + * @fw_prio: FW priority + * @idx: Index for memory request + * + * Return: Host memory request parameters requested by target + */ +QDF_STATUS wmi_extract_host_mem_req_from_service_ready( + wmi_unified_t wmi_handle, void *evt_buf, host_mem_req *mem_reqs, + uint32_t num_active_peers, uint32_t num_peers, + enum wmi_fw_mem_prio fw_prio, uint16_t idx); + +/** + * wmi_ready_extract_init_status() - Extract init status from ready event + * @wmi_handle: wmi handle + * @ev: Pointer to event buffer + * + * Return: ready status + */ +uint32_t wmi_ready_extract_init_status(wmi_unified_t wmi_handle, void *ev); + +/** + * wmi_ready_extract_mac_addr() - extract mac address from ready event + * @wmi_handle: wmi handle + * @ev: pointer to event buffer + * @macaddr: Pointer to hold MAC address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_ready_extract_mac_addr(wmi_unified_t wmi_handle, + void *ev, uint8_t *macaddr); + +/** + * wmi_ready_extract_mac_addr() - extract MAC address list from ready event + * @wmi_handle: wmi handle + * @ev: pointer to event buffer + * @num_mac_addr: Pointer to number of entries + * + * Return: address to start of mac addr list + */ +wmi_host_mac_addr +*wmi_ready_extract_mac_addr_list(wmi_unified_t wmi_handle, void *ev, + uint8_t *num_mac_addr); + +/** + * wmi_extract_ready_params() - Extract data from ready event apart from + * status, macaddr and version. + * @wmi_handle: Pointer to WMI handle. + * @evt_buf: Pointer to Ready event buffer. + * @ev_param: Pointer to host defined struct to copy the data from event. + * + * Return: QDF_STATUS_SUCCESS on success. + */ +QDF_STATUS wmi_extract_ready_event_params( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_ready_ev_param *ev_param); + +/** + * wmi_extract_fw_version() - extract fw version + * @wmi_handle: wmi handle + * @ev: pointer to event buffer + * @fw_ver: Pointer to hold fw version + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_fw_version(wmi_unified_t wmi_handle, void *ev, + struct wmi_host_fw_ver *fw_ver); + +/** + * wmi_extract_fw_abi_version() - extract fw abi version + * @wmi_handle: wmi handle + * @ev: Pointer to event buffer + * @fw_ver: Pointer to hold fw abi version + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_fw_abi_version(wmi_unified_t wmi_handle, void *ev, + struct wmi_host_fw_abi_ver *fw_ver); + +/** + * wmi_check_and_update_fw_version() - Ready and fw version check + * @wmi_handle: wmi handle + * @ev: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_check_and_update_fw_version(wmi_unified_t wmi_handle, void *ev); + +/** + * wmi_extract_dbglog_data_len() - extract debuglog data length + * @wmi_handle: wmi handle + * @evt_b: pointer to event buffer + * @len: length of buffer + * + * Return: length + */ +uint8_t *wmi_extract_dbglog_data_len(wmi_unified_t wmi_handle, + void *evt_b, uint32_t *len); + +/** + * wmi_send_ext_resource_config() - send extended resource configuration + * @wmi_handle: wmi handle + * @ext_cfg: pointer to extended resource configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_send_ext_resource_config(wmi_unified_t wmi_handle, + wmi_host_ext_resource_config *ext_cfg); + +/** + * wmi_unified_rtt_meas_req_test_cmd_send() - WMI rtt meas req test function + * @wmi_handle: handle to WMI. + * @param: pointer to hold rtt meas req test param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_rtt_meas_req_test_cmd_send(wmi_unified_t wmi_handle, + struct rtt_meas_req_test_params *param); + +/** + * wmi_unified_rtt_meas_req_cmd_send() - WMI rtt meas req function + * @wmi_handle: handle to WMI. + * @param: pointer to hold rtt meas req param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_rtt_meas_req_cmd_send(wmi_unified_t wmi_handle, + struct rtt_meas_req_params *param); + +/** + * wmi_unified_rtt_keepalive_req_cmd_send() - WMI rtt meas req test function + * @wmi_handle: handle to WMI. + * @param: pointer to hold rtt meas req test param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_rtt_keepalive_req_cmd_send(wmi_unified_t wmi_handle, + struct rtt_keepalive_req_params *param); + +/** + * wmi_unified_lci_set_cmd_send() - WMI lci set function + * @wmi_handle: handle to WMI. + * @param: pointer to hold lci param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lci_set_cmd_send(wmi_unified_t wmi_handle, + struct lci_set_params *param); + +/** + * wmi_unified_lcr_set_cmd_send() - WMI lcr set function + * @wmi_handle: handle to WMI. + * @param: pointer to hold lcr param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lcr_set_cmd_send(wmi_unified_t wmi_handle, + struct lcr_set_params *param); + +/** + * wmi_unified_extract_pn() - extract pn event data + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: pointer to get pn event param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extract_pn(wmi_unified_t wmi_hdl, void *evt_buf, + struct wmi_host_get_pn_event *param); + +/** + * wmi_unified_send_periodic_chan_stats_config_cmd() - send periodic chan + * stats cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold periodic chan stats param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_periodic_chan_stats_config_cmd( + wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param); + +/* Extract APIs */ + +/** + * wmi_extract_fips_event_data() - extract fips event data + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: pointer to FIPS event param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_fips_event_data(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_fips_event_param *param); + +#ifdef WLAN_FEATURE_DISA +/** + * wmi_extract_encrypt_decrypt_resp_params() - + * extract encrypt decrypt resp params from event buffer + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: encrypt decrypt resp params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +wmi_extract_encrypt_decrypt_resp_params(void *wmi_hdl, void *evt_buf, + struct disa_encrypt_decrypt_resp_params + *param); +#endif /* WLAN_FEATURE_DISA */ + +/** + * wmi_extract_mgmt_rx_params() - extract management rx params from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @hdr: Pointer to hold header + * @bufp: Pointer to hold pointer to rx param buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_mgmt_rx_params(wmi_unified_t wmi_handle, void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp); + +/** + * wmi_extract_vdev_roam_param() - extract vdev roam param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ev: Pointer to hold roam param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_roam_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_roam_event *ev); + +/** + * wmi_extract_vdev_scan_ev_param() - extract vdev scan param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold vdev scan param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_scan_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct scan_event *param); + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * wmi_extract_nlo_match_ev_param() - extract NLO match param from event + * @wmi_handle: pointer to WMI handle + * @evt_buf: pointer to WMI event buffer + * @param: pointer to scan event param for NLO match + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +wmi_extract_nlo_match_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct scan_event *param); + +/** + * wmi_extract_nlo_complete_ev_param() - extract NLO complete param from event + * @wmi_handle: pointer to WMI handle + * @evt_buf: pointer to WMI event buffer + * @param: pointer to scan event param for NLO complete + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +wmi_extract_nlo_complete_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct scan_event *param); +#endif + +/** + * wmi_extract_mu_ev_param() - extract mu param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold mu report + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_mu_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_mu_report_event *param); + +/** + * wmi_extract_mu_db_entry() - extract mu db entry from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @idx: index + * @param: Pointer to hold mu db entry + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_mu_db_entry(wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, wmi_host_mu_db_entry *param); + +/** + * wmi_extract_mumimo_tx_count_ev_param() - extract mumimo tx count from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold mumimo tx count + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_mumimo_tx_count_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_peer_txmu_cnt_event *param); + +/** + * wmi_extract_peer_gid_userpos_list_ev_param() - extract peer userpos list + * from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold peer gid userposition list + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_peer_gid_userpos_list_ev_param( + wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_peer_gid_userpos_list_event *param); + +/** + * wmi_extract_esp_estimate_ev_param() - extract air time from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold esp event + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_esp_estimate_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct esp_estimation_event *param); + +/** + * wmi_extract_gpio_input_ev_param() - extract gpio input param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @gpio_num: Pointer to hold gpio number + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_gpio_input_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *gpio_num); + +/** + * wmi_extract_pdev_reserve_ast_ev_param() - extract reserve ast entry + * param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold reserve ast entry param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_reserve_ast_ev_param( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_proxy_ast_reserve_param *param); +/** + * wmi_extract_pdev_generic_buffer_ev_param() - extract pdev generic buffer + * from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to generic buffer param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_generic_buffer_ev_param( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_generic_buffer_event *param); + +/** + * wmi_extract_peer_ratecode_list_ev() - extract peer ratecode from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @peer_mac: Pointer to hold peer mac address + * @pdev_id: Pointer to hold pdev_id + * @rate_cap: Pointer to hold ratecode + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_ratecode_list_ev( + wmi_unified_t wmi_handle, void *evt_buf, + uint8_t *peer_mac, uint32_t *pdev_id, + wmi_sa_rate_cap *rate_cap); + +/** + * wmi_extract_bcnflt_stats() - extract bcn fault stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into bcn fault stats + * @bcnflt_stats: Pointer to hold bcn fault stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_bcnflt_stats( + wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats); + +/** + * wmi_extract_rtt_hdr() - extract rtt header from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ev: Pointer to hold rtt header + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_rtt_hdr(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_event_hdr *ev); + +/** + * wmi_extract_rtt_ev() - extract rtt event + * @wmi_handle: wmi handle + * @evt_buf: Pointer to event buffer + * @ev: Pointer to hold rtt event + * @hdump: Pointer to hold hex dump + * @hdump_len: hex dump length + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_rtt_ev(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_meas_event *ev, + uint8_t *hdump, uint16_t hdump_len); + +/** + * wmi_extract_rtt_error_report_ev() - extract rtt error report from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ev: Pointer to hold rtt error report + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_rtt_error_report_ev(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_error_report_event *ev); + +/** + * wmi_extract_chan_stats() - extract chan stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into chan stats + * @chan_stats: Pointer to hold chan stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_chan_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats); + +/** + * wmi_extract_thermal_stats() - extract thermal stats from event + * @wmi_handle: wmi handle + * @evt_buf: Pointer to event buffer + * @temp: Pointer to hold extracted temperature + * @level: Pointer to hold extracted level + * @pdev_id: Pointer to hold extracted pdev_id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_thermal_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *temp, uint32_t *level, + uint32_t *pdev_id); + +/** + * wmi_extract_thermal_level_stats() - extract thermal level stats from + * event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @idx: Index to level stats + * @levelcount: Pointer to hold levelcount + * @dccount: Pointer to hold dccount + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_thermal_level_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, uint32_t *levelcount, + uint32_t *dccount); + +/** + * wmi_extract_comb_phyerr() - extract comb phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: data length of event buffer + * @buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_comb_phyerr(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr); + +/** + * wmi_extract_single_phyerr() - extract single phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: data length of event buffer + * @buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_single_phyerr(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr); + +/** + * wmi_extract_composite_phyerr() - extract composite phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: Length of event buffer + * @phyerr: Pointer to hold phy error + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_composite_phyerr(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr); + +/** + * wmi_extract_profile_ctx() - extract profile context from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @profile_ctx: Pointer to hold profile context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_profile_ctx(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx); + +/** + * wmi_extract_profile_data() - extract profile data from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @idx: index of profile data + * @profile_data: Pointer to hold profile data + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_profile_data(wmi_unified_t wmi_handle, void *evt_buf, uint8_t idx, + wmi_host_wlan_profile_t *profile_data); + +/** + * wmi_extract_stats_param() - extract all stats count from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @stats_param: Pointer to hold stats count + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_stats_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_stats_event *stats_param); + +/** + * wmi_extract_pdev_stats() - extract pdev stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into pdev stats + * @pdev_stats: Pointer to hold pdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_pdev_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_pdev_stats *pdev_stats); + +/** + * extract_unit_test() - extract unit test from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @unit_test: Pointer to hold unit-test header + * @maxspace: The amount of space in evt_buf + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_unit_test(wmi_unified_t wmi_handle, void *evt_buf, + wmi_unit_test_event *unit_test, uint32_t maxspace); + +/** + * wmi_extract_pdev_ext_stats() - extract extended pdev stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into extended pdev stats + * @pdev_ext_stats: Pointer to hold extended pdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_pdev_ext_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + wmi_host_pdev_ext_stats *pdev_ext_stats); + +/** + * wmi_extract_peer_extd_stats() - extract extended peer stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into extended peer stats + * @peer_extd_stats: Pointer to hold extended peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_peer_extd_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + wmi_host_peer_extd_stats *peer_extd_stats); + +/** + * wmi_extract_peer_adv_stats() - extract advance (extd2) peer stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @peer_adv_stats: Pointer to hold extended peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_adv_stats( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_peer_adv_stats *peer_adv_stats); + +/** + * wmi_extract_bss_chan_info_event() - extract bss channel information + * from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @bss_chan_info: Pointer to hold bss channel information + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_bss_chan_info_event( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_bss_chan_info_event *bss_chan_info); + +/** + * wmi_extract_peer_stats() - extract peer stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into peer stats + * @peer_stats: Pointer to hold peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_peer_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_peer_stats *peer_stats); + +/** + * wmi_extract_tx_data_traffic_ctrl_ev() - extract tx data traffic control + * from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ev: Pointer to hold data traffic control + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_tx_data_traffic_ctrl_ev(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_tx_data_traffic_ctrl_event *ev); + +/** + * wmi_extract_vdev_stats() - extract vdev stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into vdev stats + * @vdev_stats: Pointer to hold vdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_vdev_stats *vdev_stats); + +/** + * wmi_extract_per_chain_rssi_stats() - extract rssi stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into rssi stats + * @rssi_stats: Pointer to hold rssi stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_per_chain_rssi_stats( + wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + struct wmi_host_per_chain_rssi_stats *rssi_stats); + +#ifdef WLAN_FEATURE_MIB_STATS +/** + * wmi_extract_mib_stats() - extract mib stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @mib_stats: pointer to hold mib stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_mib_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct mib_stats_metrics *mib_stats); +#endif + +/** + * wmi_extract_vdev_extd_stats() - extract extended vdev stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into extended vdev stats + * @vdev_extd_stats: Pointer to hold extended vdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_extd_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + wmi_host_vdev_extd_stats *vdev_extd_stats); + +/** + * wmi_extract_bcn_stats() - extract beacon stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into beacon stats + * @vdev_bcn_stats: Pointer to hold beacon stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_bcn_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcn_stats *vdev_bcn_stats); + +/** + * wmi_extract_vdev_nac_rssi_stats() - extract NAC_RSSI stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_extd_stats: Pointer to hold nac rssi stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_nac_rssi_stats( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats); + +/** + * wmi_extract_peer_retry_stats() - extract peer retry stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into peer retry stats + * @peer_retry_stats: Pointer to hold peer retry stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_retry_stats( + wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, struct wmi_host_peer_retry_stats *peer_retry_stats); + +/** + * wmi_unified_send_power_dbg_cmd() - send power debug commands + * @wmi_handle: wmi handle + * @param: wmi power debug parameter + * + * Send WMI_POWER_DEBUG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_power_dbg_cmd(wmi_unified_t wmi_handle, + struct wmi_power_dbg_params *param); + +/** + * wmi_extract_sar_cap_service_ready_ext() - extract SAR cap from + * FW service ready event + * @wmi_handle: wmi handle + * @evt_buf: event buffer received from firmware + * @ext_param: extended target info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_sar_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *ext_param); + +/** + * wmi_unified_fw_test_cmd() - send fw test command to fw. + * @wmi_handle: wmi handle + * @wmi_fwtest: fw test command + * + * This function sends fw test command to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_fw_test_cmd(wmi_unified_t wmi_handle, + struct set_fwtest_params *wmi_fwtest); + +/** + * wmi_unified_peer_rx_reorder_queue_setup_send() - send rx reorder queue + * setup command to fw + * @wmi_handle: wmi handle + * @param: Rx reorder queue setup parameters + * + * Return: QDF_STATUS for success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_rx_reorder_queue_setup_send( + wmi_unified_t wmi_handle, + struct rx_reorder_queue_setup_params *param); + +/** + * wmi_unified_peer_rx_reorder_queue_remove_send() - send rx reorder queue + * remove command to fw + * @wmi_handle: wmi handle + * @param: Rx reorder queue remove parameters + * + * Return: QDF_STATUS for success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_rx_reorder_queue_remove_send( + wmi_unified_t wmi_handle, + struct rx_reorder_queue_remove_params *param); + +/* + * wmi_extract_service_ready_ext() - extract extended service ready + * @wmi_handle: wmi handle + * @param: wmi power debug parameter + * + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_extract_service_ready_ext( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *param); + +/* + * wmi_extract_service_ready_ext2() - extract extended2 service ready + * @wmi_handle: wmi handle + * @evt_buff: pointer to event buffer + * @param: wmi ext2 base parameters + * + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_extract_service_ready_ext2( + struct wmi_unified *wmi_handle, uint8_t *evt_buf, + struct wlan_psoc_host_service_ext2_param *param); + +/** + * wmi_extract_hw_mode_cap_service_ready_ext() - + * extract HW mode cap from service ready event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @hw_mode_idx: hw mode idx should be less than num_mode + * @param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_hw_mode_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param); + +/** + * wmi_extract_mac_phy_cap_service_ready_ext() - + * extract MAC phy cap from service ready event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @hw_mode_id: hw mode id of hw_mode_caps + * @phy_id: phy_id within hw_mode_cap + * @param: pointer to mac phy caps structure to hold the values from event + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_mac_phy_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint8_t hw_mode_id, + uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param); + +/** + * wmi_extract_reg_cap_service_ready_ext() - + * extract REG cap from service ready event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @phy_idx: phy idx should be less than num_mode + * @param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +wmi_extract_reg_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param); + +/** + * wmi_extract_dbr_ring_cap_service_ready_ext: Extract direct buffer rx + * capability received through + * extended service ready event + * @wmi_handle: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer rx ring cap struct + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_ring_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param); + +/** + * wmi_extract_dbr_ring_cap_service_ready_ext2: Extract direct buffer rx + * capability received through + * extended service ready2 event + * @wmi_handle: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer rx ring cap struct + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_ring_cap_service_ready_ext2( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param); + +/** + * wmi_extract_spectral_scaling_params_service_ready_ext: Extract Spectral + * scaling params received through + * extended service ready event + * @wmi_handle: WMI handle + * @evt_buf: Event buffer + * @idx: Index + * @param: Pointer to Spectral scaling params + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_spectral_scaling_params_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_spectral_scaling_params *param); + +/** + * wmi_extract_pdev_utf_event() - + * extract UTF data from pdev utf event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_pdev_utf_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *param); + +/** + * wmi_extract_pdev_qvit_event() - + * extract UTF data from pdev qvit event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_pdev_qvit_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *param); + +#ifdef WLAN_SUPPORT_RF_CHARACTERIZATION +/** + * wmi_extract_num_rf_characterziation_entries - Extract number of RF + * characterization metrics received from the RF characterization event. + * @wmi_hdl: WMI handle + * @evt_buf: Event buffer + * @num_rf_characterization_entries: Number of RF characterization metrics + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_num_rf_characterization_entries(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + uint32_t *num_rf_characterization_entries); + +/** + * wmi_extract_rf_characterziation_entries - Extract RF characterization metrics + * received from the RF characterization event. + * @wmi_hdl: WMI handle + * @evt_buf: Event buffer + * @num_rf_characterization_entries: Number of RF characterization metrics + * @rf_characterization_entries: Pointer to RF characterization metrics + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_rf_characterization_entries(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + uint32_t num_rf_characterization_entries, + struct wmi_host_rf_characterization_event_param *rf_characterization_entries); +#endif + +/* + * wmi_extract_chainmask_tables_tlv() - extract chain mask tables + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer. + * @chainmask_table: pointer to struct wlan_psoc_host_chainmask_table + * + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_extract_chainmask_tables( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wlan_psoc_host_chainmask_table *chainmask_table); + +/** + * wmi_unified_dfs_phyerr_offload_en_cmd() - enable dfs phyerr offload + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_dfs_phyerr_offload_en_cmd(wmi_unified_t wmi_handle, + uint32_t pdev_id); + +/** + * wmi_unified_dfs_phyerr_offload_dis_cmd() - disable dfs phyerr offload + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_dfs_phyerr_offload_dis_cmd(wmi_unified_t wmi_handle, + uint32_t pdev_id); + +#ifdef QCA_SUPPORT_AGILE_DFS +/** + * wmi_unified_send_vdev_adfs_ch_cfg_cmd() - send adfs channel config command + * @wmi_handle: wmi handle + * @param: adfs channel config params + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_vdev_adfs_ch_cfg_cmd(wmi_unified_t wmi_handle, + struct vdev_adfs_ch_cfg_params *param); + +/** + * wmi_unified_send_vdev_adfs_ocac_abort_cmd() - send adfs o-cac abort command + * @wmi_handle: wmi handle + * @param: adfs channel o-cac abort params + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_vdev_adfs_ocac_abort_cmd(wmi_unified_t wmi_handle, + struct vdev_adfs_abort_params *param); +#endif + +/** + * wmi_unified_set_country_cmd_send() - WMI set country function + * @wmi_handle : handle to WMI. + * @param : pointer to hold set country cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_country_cmd_send(wmi_unified_t wmi_handle, + struct set_country *param); + +#ifdef WLAN_FEATURE_ACTION_OUI +/** + * wmi_unified_send_action_oui_cmd() - send action oui cmd to fw + * @wmi_handle: wma handle + * @req: wmi action oui message to be send + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_action_oui_cmd(wmi_unified_t wmi_handle, + struct action_oui_request *req); +#endif /* WLAN_FEATURE_ACTION_OUI */ + +/** + * wmi_unified_send_request_get_rcpi_cmd() - command to request rcpi value + * @wmi_handle: wma handle + * @get_rcpi_param: rcpi params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_request_get_rcpi_cmd(wmi_unified_t wmi_handle, + struct rcpi_req *get_rcpi_param); + +/** + * wmi_extract_rcpi_response_event - api to extract RCPI event params + * @wmi_handle: wma handle + * @evt_buf: pointer to event buffer + * @res: pointer to hold rcpi response from firmware + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +QDF_STATUS +wmi_extract_rcpi_response_event(wmi_unified_t wmi_handle, void *evt_buf, + struct rcpi_res *res); + +#ifdef WMI_INTERFACE_EVENT_LOGGING +void wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +#endif /* WMI_INTERFACE_EVENT_LOGGING */ + +/** + * wmi_unified_send_wds_entry_list_cmd() - WMI function to get list of + * wds entries from FW + * @wmi_handle: wmi handle + * + * Send WMI_PDEV_WDS_ENTRY_LIST_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_dump_wds_table_cmd(wmi_unified_t wmi_handle); + +/** + * wmi_extract_wds_entry - api to extract wds entry + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @wds_entry: wds entry + * @idx: index to point wds entry in event buffer + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +QDF_STATUS +wmi_extract_wds_entry(wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wdsentry *wds_entry, u_int32_t idx); + +/** + * wmi_unified_send_obss_detection_cfg_cmd() - WMI function to send obss + * detection configuration to FW. + * @wmi_handle: wmi handle + * @cfg: obss detection configuration + * + * Send WMI_SAP_OBSS_DETECTION_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_send_obss_detection_cfg_cmd( + wmi_unified_t wmi_handle, + struct wmi_obss_detection_cfg_param *cfg); + +/** + * wmi_unified_extract_obss_detection_info() - WMI function to extract obss + * detection info from FW. + * @wmi_handle: wmi handle + * @data: event data from firmware + * @info: Pointer to hold obss detection info + * + * This function is used to extract obss info from firmware. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_extract_obss_detection_info( + wmi_unified_t wmi_handle, + uint8_t *data, + struct wmi_obss_detect_info *info); + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS wmi_extract_green_ap_egap_status_info( + wmi_unified_t wmi_hdl, uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params); +#endif + +/** + * wmi_unified_send_roam_scan_stats_cmd() - Wrapper to request roam scan stats + * @wmi_handle: wmi handle + * @params: request params + * + * This function is used to send the roam scan stats request command to + * firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_roam_scan_stats_cmd(wmi_unified_t wmi_handle, + struct wmi_roam_scan_stats_req *params); + +/** + * wmi_extract_roam_scan_stats_res_evt() - API to extract roam scan stats res + * @wmi: wmi handle + * @evt_buf: pointer to the event buffer + * @vdev_id: output pointer to hold vdev id + * @res_param: output pointer to hold extracted memory + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_extract_roam_scan_stats_res_evt(wmi_unified_t wmi, void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param); + +/** + * wmi_extract_offload_bcn_tx_status_evt() - API to extract bcn tx status event + * @wmi_handle: wmi handle + * @evt_buf: pointer to the event buffer + * @vdev_id: output pointer to hold vdev id + * @tx_status: output pointer to hold bcn tx status + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_extract_offload_bcn_tx_status_evt(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *vdev_id, uint32_t *tx_status); + +/* wmi_get_ch_width_from_phy_mode() - convert phy mode to channel width + * @wmi_handle: wmi handle + * @phymode: phy mode + * + * Return: wmi channel width + */ +wmi_host_channel_width wmi_get_ch_width_from_phy_mode( + wmi_unified_t wmi_handle, WMI_HOST_WLAN_PHY_MODE phymode); + +#ifdef QCA_SUPPORT_CP_STATS +/** + * wmi_extract_cca_stats() - api to extract congestion stats from event buffer + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @datalen: length of buffer + * @stats: buffer to populated after stats extraction + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_cca_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_congestion_stats *stats); +#endif /* QCA_SUPPORT_CP_STATS */ + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +/** + * wmi_unified_dfs_send_avg_params_cmd() - send average radar parameters cmd. + * @wmi_handle: wmi handle + * @params: radar found params + * + * This function passes the average radar parameters to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_dfs_send_avg_params_cmd(wmi_unified_t wmi_handle, + struct dfs_radar_found_params *params); + +/** + * wmi_extract_dfs_status_from_fw() - extract host dfs status from fw. + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @dfs_status_check: pointer to the host dfs status + * + * This function extracts the result of host dfs from fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_dfs_status_from_fw(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *dfs_status_check); +#endif +#ifdef OL_ATH_SMART_LOGGING +/** + * wmi_unified_send_smart_logging_enable_cmd() - send smart logging enable cmd + * @wmi_handle: wmi handle + * @param: enable/disable + * + * This function enables/disable the smart logging feature + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_smart_logging_enable_cmd(wmi_unified_t wmi_handle, + uint32_t param); + +/** + * wmi_unified_send_smart_logging_fatal_cmd() - send smart logging fatal cmd + * @wmi_handle: wmi handle + * @param: Fatal event + * + * This function sends the smart log fatal events to the FW + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_smart_logging_fatal_cmd(wmi_unified_t wmi_handle, + struct wmi_debug_fatal_events *param); + +/** + * wmi_extract_smartlog_ev() - extract smartlog event info from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ev: Pointer to hold fatal events + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_smartlog_ev(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_debug_fatal_events *ev); + +#endif /* OL_ATH_SMART_LOGGING */ + +/** + * wmi_process_fw_event_worker_thread_ctx() - process in worker thread context + * @wmi_handle: handle to wmi + * @evt_buf: pointer to event buffer + * + * Event process by below function will be in worker thread context. + * Use this method for events which are not critical and not + * handled in protocol stack. + * + * Return: none + */ +void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, + void *evt_buf); + +/** + * wmi_extract_ctl_failsafe_check_ev_param() - extract ctl failsafe + * status from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ev: Pointer to hold ctl status + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_ctl_failsafe_check_ev_param( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_pdev_ctl_failsafe_event *param); + +#ifdef OBSS_PD +/** + * wmi_unified_send_obss_spatial_reuse_set_cmd() - send obss pd offset + * @wmi_handle: wmi handle + * @oobss_spatial_reuse_param: Pointer to obsspd min max offset + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_obss_spatial_reuse_set_cmd( + wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_param *obss_spatial_reuse_param); + +/** + * wmi_unified_send_obss_spatial_reuse_set_def_thresh_cmd() - send def thresh + * @wmi_handle: wmi handle + * @thresh: Pointer to def thresh + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_obss_spatial_reuse_set_def_thresh_cmd( + wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_def_thresh *thresh); + +#endif /* OBSS_PD */ + +/** + * wmi_convert_pdev_id_host_to_target() - Convert pdev_id from host to target + * defines. For legacy there is not conversion required. Just return pdev_id as + * it is. + * @wmi_handle: wmi handle + * @host_pdev_id: host pdev_id to be converted. + * @target_pdev_id: Output target pdev id. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_convert_pdev_id_host_to_target(wmi_unified_t wmi_handle, + uint32_t host_pdev_id, + uint32_t *target_pdev_id); + +/** + * wmi_unified_send_bss_color_change_enable_cmd() - WMI function to send bss + * color change enable to FW. + * @wmi_handle: wmi handle + * @vdev_id: vdev ID + * @enable: enable or disable color change handeling within firmware + * + * Send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID parameters to fw, + * thereby firmware updates bss color when AP announces bss color change. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_bss_color_change_enable_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable); + +/** + * wmi_unified_send_obss_color_collision_cfg_cmd() - WMI function to send bss + * color collision detection configuration to FW. + * @wmi_handle: wmi handle + * @cfg: obss color collision detection configuration + * + * Send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_obss_color_collision_cfg_cmd( + wmi_unified_t wmi_handle, + struct wmi_obss_color_collision_cfg_param *cfg); + +/** + * wmi_unified_extract_obss_color_collision_info() - WMI function to extract + * obss color collision info from FW. + * @wmi_handle: wmi handle + * @data: event data from firmware + * @info: Pointer to hold bss color collision info + * + * This function is used to extract bss collision info from firmware. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_extract_obss_color_collision_info( + wmi_unified_t wmi_handle, + uint8_t *data, struct wmi_obss_color_collision_info *info); + +#ifdef CRYPTO_SET_KEY_CONVERGED +/** + * wlan_crypto_cipher_to_wmi_cipher() - Convert crypto cipher to WMI cipher + * @crypto_cipher: cipher type in crypto format + * + * Return: cipher type in WMI cipher type + */ +uint8_t wlan_crypto_cipher_to_wmi_cipher( + enum wlan_crypto_cipher_type crypto_cipher); + +/** + * wlan_crypto_cipher_to_cdp_sec_type() - Convert crypto cipher to CDP type + * @crypto_cipher: cipher type in crypto format + * + * Return: security type in cdp_sec_type data format type + */ +enum cdp_sec_type wlan_crypto_cipher_to_cdp_sec_type( + enum wlan_crypto_cipher_type crypto_cipher); + +#endif + +/** + * wmi_unified_send_mws_coex_req_cmd() - WMI function to send coex req cmd + * @wmi_hdl: wmi handle + * @vdev_id: Vdev Id + * @cmd_id: Coex cmd for which info is required + * + * Send wmi coex command to fw. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_mws_coex_req_cmd(struct wmi_unified *wmi_handle, + uint32_t vdev_id, uint32_t cmd_id); + +/** + * wmi_unified_send_idle_trigger_monitor() - send idle trigger monitor command + * @wmi_handle: WMI handle + * @val: idle trigger monitor value - 1 for idle monitor on, 0 for idle monitor + * off + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS +wmi_unified_send_idle_trigger_monitor(wmi_unified_t wmi_handle, uint8_t val); + + +#ifdef WIFI_POS_CONVERGED +/** + * wmi_extract_oem_response_param() - WMI function to extract OEM response param + * @wmi_hdl: WMI handle + * @resp_buf: Buffer holding response data + * @oem_resp_param: zero-filled structure pointer to hold oem response data + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS +wmi_extract_oem_response_param(wmi_unified_t wmi_hdl, void *resp_buf, + struct wmi_oem_response_param *oem_resp_param); +#endif /* WIFI_POS_CONVERGED */ +/** + * wmi_critical_events_in_flight() - get the number of critical events in flight + * + * @wmi_hdl: WMI handle + * + * Return: the number of critical events in flight. + */ +uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi); + + +#ifdef FEATURE_ANI_LEVEL_REQUEST +/** + * wmi_unified_ani_level_cmd_send() - WMI function to send get ani level cmd + * @wmi_hdl: WMI handle + * @freqs: pointer to list of freqs for which ANI levels are to be fetched + * @num_freqs: number of freqs in the above parameter + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS wmi_unified_ani_level_cmd_send(wmi_unified_t wmi_handle, + uint32_t *freqs, + uint8_t num_freqs); + +/** + * wmi_unified_extract_ani_level() - WMI function to receive ani level cmd + * @wmi_hdl: WMI handle + * @info: pointer to ANI data received from the FW and stored in HOST + * @num_freqs: number of freqs in the above parameter + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS wmi_unified_extract_ani_level(wmi_unified_t wmi_handle, + uint8_t *data, + struct wmi_host_ani_level_event **info, + uint32_t *num_freqs); +#endif /* FEATURE_ANI_LEVEL_REQUEST */ + +#ifdef FEATURE_WLAN_TIME_SYNC_FTM +/** + * wmi_unified_send_wlan_time_sync_ftm_trigger() - send ftm timesync trigger cmd + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @burst_mode: mode reg getting time sync relation from FW + * + * This function indicates the FW to trigger wlan time sync using FTM + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_wlan_time_sync_ftm_trigger(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool burst_mode); + +/** + * wmi_unified_send_wlan_time_sync_qtime() - send ftm time sync qtime cmd. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @lpass_ts: audio qtime + * + * This function sends the wmi cmd to FW having audio qtime + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_wlan_time_sync_qtime(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint64_t lpass_ts); + +/** + * wmi_unified_extract_time_sync_ftm_start_stop_params() - extract FTM time sync + * params + * @wmi_handle: wmi handle + * @evt_buf: event buffer + * @param: params received in start stop ftm timesync event + * + * This function extracts the params from ftm timesync start stop event + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_extract_time_sync_ftm_start_stop_params( + wmi_unified_t wmi_handle, void *evt_buf, + struct ftm_time_sync_start_stop_params *param); + +/** + * wmi_unified_extract_time_sync_ftm_offset() - extract timesync FTM offset + * @wmi_handle: wmi handle + * @evt_buf: event buffer + * @param: params received in ftm timesync offset event + * + * This function extracts the params from ftm timesync offset event + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_extract_time_sync_ftm_offset(wmi_unified_t wmi_handle, + void *evt_buf, + struct ftm_time_sync_offset *param); +#endif /* FEATURE_WLAN_TIME_SYNC_FTM */ + +#endif /* _WMI_UNIFIED_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_bcn_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_bcn_api.h new file mode 100644 index 0000000000000000000000000000000000000000..52cc48722f5d06bc3ecb39f6d2470d02de917ed6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_bcn_api.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) specific to beacon component + */ + +#ifndef _WMI_UNIFIED_BCN_API_H_ +#define _WMI_UNIFIED_BCN_API_H_ + +#include "wmi_unified_param.h" +#include "wmi_unified.h" +#include "wmi_unified_priv.h" + +/** + * wmi_unified_bcn_buf_ll_cmd() - prepare and send beacon buffer to fw for LL + * @wmi_handle: wmi handle + * @param: bcn ll cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ + +QDF_STATUS +wmi_unified_bcn_buf_ll_cmd(wmi_unified_t wmi_handle, + wmi_bcn_send_from_host_cmd_fixed_param * param); +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_cfr_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_cfr_api.h new file mode 100644 index 0000000000000000000000000000000000000000..aa457b905d47b8e81518a9fb7e9b6877a4ba180f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_cfr_api.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_CFR_API_H_ +#define _WMI_UNIFIED_CFR_API_H_ + +#include "wmi_unified_param.h" +#include "wmi_unified_cfr_param.h" + +#ifdef WLAN_CFR_ENABLE +/** + * wmi_unified_send_peer_cfr_capture_cmd() - WMI function to start CFR capture + * for a peer + * @wmi_handle: WMI handle + * @param: configuration params for capture + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS +wmi_unified_send_peer_cfr_capture_cmd(wmi_unified_t wmi_handle, + struct peer_cfr_params *param); +/** + * wmi_extract_cfr_peer_tx_event_param() - WMI function to extract cfr tx event + * for a peer + * @wmi_handle: WMI handle + * @evt_buf: Buffer holding event data + * @peer_tx_event: pointer to hold tx event data + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS +wmi_extract_cfr_peer_tx_event_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_cfr_peer_tx_event_param *peer_tx_event); + +#ifdef WLAN_ENH_CFR_ENABLE +/** + * wmi_unified_send_cfr_rcc_cmd() - WMI function to send CFR RCC param + * @wmi_handle: WMI handle + * @cfg: pointer to RCC param + * + * Return: QDF_STATUS_SUCCESS if success, else returns proper error code. + */ +QDF_STATUS wmi_unified_send_cfr_rcc_cmd(wmi_unified_t wmi_handle, + struct cfr_rcc_param *cfg); +#endif +#endif /* WLAN_CFR_ENABLE */ +#endif /* _WMI_UNIFIED_CFR_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_cfr_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_cfr_param.h new file mode 100644 index 0000000000000000000000000000000000000000..151d9f60b0e8a4ee9f4471269f93cff9c51c6087 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_cfr_param.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_CFR_PARAM_H_ +#define _WMI_UNIFIED_CFR_PARAM_H_ + +#ifdef WLAN_CFR_ENABLE + +#define WMI_HOST_PEER_CFR_TIMER_ENABLE 1 +#define WMI_HOST_PEER_CFR_TIMER_DISABLE 0 + + +/** + * struct peer_cfr_params - peer cfr capture cmd parameter + * @request: enable/disable cfr capture + * @macaddr: macaddr of the client + * @vdev_id: vdev id + * @periodicity: cfr capture period + * @bandwidth: bandwidth of cfr capture + * @capture_method: cfr capture method/type + */ +struct peer_cfr_params { + uint32_t request; + uint8_t *macaddr; + uint32_t vdev_id; + uint32_t periodicity; + uint32_t bandwidth; + uint32_t capture_method; +}; + + +#endif /* WLAN_CFR_ENABLE */ +#endif /* _WMI_UNIFIED_CFR_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_concurrency_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_concurrency_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2f2e968ed427bff5c086f2f2926b20afbdeb484a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_concurrency_api.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to concurrency component. + */ + +#ifndef _WMI_UNIFIED_CONCURRENCY_API_H_ +#define _WMI_UNIFIED_CONCURRENCY_API_H_ + +/** + * wmi_unified_set_mcc_channel_time_quota_cmd() - set MCC channel time quota + * @wmi_handle: wmi handle + * @adapter_1_chan_freq: adapter 1 channel number + * @adapter_1_quota: adapter 1 quota + * @adapter_2_chan_freq: adapter 2 channel number + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_mcc_channel_time_quota_cmd( + wmi_unified_t wmi_handle, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq); + +/** + * wmi_unified_set_mcc_channel_time_latency_cmd() - set MCC channel time latency + * @wmi_handle: wmi handle + * @mcc_channel_freq: mcc channel freq + * @mcc_channel_time_latency: MCC channel time latency. + * + * Currently used to set time latency for an MCC vdev/adapter using operating + * channel of it and channel number. The info is provided run time using + * iwpriv command: iwpriv setMccLatency . + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_mcc_channel_time_latency_cmd( + wmi_unified_t wmi_handle, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency); + +/** + * wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd() - control mcc + * scheduler + * @wmi_handle: wmi handle + * @mcc_adaptive_scheduler: enable/disable + * @pdev_id: pdev id + * + * This function enable/disable mcc adaptive scheduler in fw. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id); + +#endif /* _WMI_UNIFIED_CONCURRENCY_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_crypto_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_crypto_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3dce977c0bc2b38498e42ce030fd62d91e3ee7e1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_crypto_api.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) specific to crypto component. + */ + +#ifndef _WMI_UNIFIED_CRYPTO_API_H_ +#define _WMI_UNIFIED_CRYPTO_API_H_ + +/* + * WMI_ADD_CIPHER_KEY_CMDID + */ +typedef enum { + PAIRWISE_USAGE = 0x00, + GROUP_USAGE = 0x01, + TX_USAGE = 0x02, /* default Tx Key - Static WEP only */ + PMK_USAGE = 0x04, /* PMK cache */ +} KEY_USAGE; + +/** + * wmi_extract_install_key_comp_event() - extract params of install key complete + * from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @len: length of the event buffer + * @params: Pointer to hold params of install key complete + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_install_key_comp_event(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t len, + struct wmi_install_key_comp_event *param); +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dbr_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dbr_api.h new file mode 100644 index 0000000000000000000000000000000000000000..de378415c55378d99779542e4d491ba3d1da3898 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dbr_api.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to DBR component. + */ + +#ifndef _WMI_UNIFIED_DBR_API_H_ +#define _WMI_UNIFIED_DBR_API_H_ + +#include "wmi_unified_dbr_param.h" + +/** + * wmi_unified_dbr_ring_cfg: Configure direct buffer rx rings + * @wmi_handle: WMI handle + * @cfg: pointer to direct buffer rx config request + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_unified_dbr_ring_cfg(wmi_unified_t wmi_handle, + struct direct_buf_rx_cfg_req *cfg); + +/** + * wmi_extract_dbr_buf_release_fixed : Extract direct buffer rx fixed param + * from buffer release event + * @wmi_handle: WMI handle + * @evt_buf: Event buffer + * @param: Pointer to direct buffer rx response struct + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_buf_release_fixed( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct direct_buf_rx_rsp *param); + +/** + * wmi_extract_dbr_buf_release_entry: Extract direct buffer rx buffer tlv + * + * @wmi_handle: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer rx entry + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_buf_release_entry( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_entry *param); + +/** + * wmi_extract_dbr_buf_metadata: Extract direct buffer metadata + * + * @wmi_handle: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer metadata + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_buf_metadata( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_metadata *param); + +#endif /* _WMI_UNIFIED_DBR_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dbr_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dbr_param.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7226b69801c02a1aa4092b2112de6e2a86abb4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dbr_param.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_DBR_PARAM_H_ +#define _WMI_UNIFIED_DBR_PARAM_H_ + +#define WMI_HOST_DBR_RING_ADDR_LO_S 0 +#define WMI_HOST_DBR_RING_ADDR_LO_M 0xffffffff +#define WMI_HOST_DBR_RING_ADDR_LO \ + (WMI_HOST_DBR_RING_ADDR_LO_M << WMI_HOST_DBR_RING_ADDR_LO_S) + +#define WMI_HOST_DBR_RING_ADDR_LO_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_RING_ADDR_LO) +#define WMI_HOST_DBR_RING_ADDR_LO_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_RING_ADDR_LO) + +#define WMI_HOST_DBR_RING_ADDR_HI_S 0 +#define WMI_HOST_DBR_RING_ADDR_HI_M 0xf +#define WMI_HOST_DBR_RING_ADDR_HI \ + (WMI_HOST_DBR_RING_ADDR_HI_M << WMI_HOST_DBR_RING_ADDR_HI_S) + +#define WMI_HOST_DBR_RING_ADDR_HI_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_RING_ADDR_HI) +#define WMI_HOST_DBR_RING_ADDR_HI_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_RING_ADDR_HI) + +#define WMI_HOST_DBR_DATA_ADDR_LO_S 0 +#define WMI_HOST_DBR_DATA_ADDR_LO_M 0xffffffff +#define WMI_HOST_DBR_DATA_ADDR_LO \ + (WMI_HOST_DBR_DATA_ADDR_LO_M << WMI_HOST_DBR_DATA_ADDR_LO_S) + +#define WMI_HOST_DBR_DATA_ADDR_LO_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_DATA_ADDR_LO) +#define WMI_HOST_DBR_DATA_ADDR_LO_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_DATA_ADDR_LO) + +#define WMI_HOST_DBR_DATA_ADDR_HI_S 0 +#define WMI_HOST_DBR_DATA_ADDR_HI_M 0xf +#define WMI_HOST_DBR_DATA_ADDR_HI \ + (WMI_HOST_DBR_DATA_ADDR_HI_M << WMI_HOST_DBR_DATA_ADDR_HI_S) + +#define WMI_HOST_DBR_DATA_ADDR_HI_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_DATA_ADDR_HI) +#define WMI_HOST_DBR_DATA_ADDR_HI_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_DATA_ADDR_HI) + +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_S 12 +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_M 0x7ffff +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA \ + (WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_M << \ + WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_S) + +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA) +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA) + +#define WMI_HOST_MAX_NUM_CHAINS 8 + +/** + * struct direct_buf_rx_rsp: direct buffer rx response structure + * + * @pdev_id: Index of the pdev for which response is received + * @mod_mod: Index of the module for which respone is received + * @num_buf_release_entry: Number of buffers released through event + * @dbr_entries: Pointer to direct buffer rx entry struct + */ +struct direct_buf_rx_rsp { + uint32_t pdev_id; + uint32_t mod_id; + uint32_t num_buf_release_entry; + uint32_t num_meta_data_entry; + struct direct_buf_rx_entry *dbr_entries; +}; + +/** + * struct direct_buf_rx_cfg_req: direct buffer rx config request structure + * + * @pdev_id: Index of the pdev for which response is received + * @mod_id: Index of the module for which respone is received + * @base_paddr_lo: Lower 32bits of ring base address + * @base_paddr_hi: Higher 32bits of ring base address + * @head_idx_paddr_lo: Lower 32bits of head idx register address + * @head_idx_paddr_hi: Higher 32bits of head idx register address + * @tail_idx_paddr_lo: Lower 32bits of tail idx register address + * @tail_idx_paddr_hi: Higher 32bits of tail idx register address + * @buf_size: Size of the buffer for each pointer in the ring + * @num_elems: Number of pointers allocated and part of the source ring + */ +struct direct_buf_rx_cfg_req { + uint32_t pdev_id; + uint32_t mod_id; + uint32_t base_paddr_lo; + uint32_t base_paddr_hi; + uint32_t head_idx_paddr_lo; + uint32_t head_idx_paddr_hi; + uint32_t tail_idx_paddr_hi; + uint32_t tail_idx_paddr_lo; + uint32_t buf_size; + uint32_t num_elems; + uint32_t event_timeout_ms; + uint32_t num_resp_per_event; +}; + +/** + * struct direct_buf_rx_metadata: direct buffer metadata + * + * @noisefloor: noisefloor + * @reset_delay: reset delay + */ +struct direct_buf_rx_metadata { + int32_t noisefloor[WMI_HOST_MAX_NUM_CHAINS]; + uint32_t reset_delay; +}; + +/** + * struct direct_buf_rx_entry: direct buffer rx release entry structure + * + * @addr_lo: LSB 32-bits of the buffer + * @addr_hi: MSB 32-bits of the buffer + * @len: Length of the buffer + */ +struct direct_buf_rx_entry { + uint32_t paddr_lo; + uint32_t paddr_hi; + uint32_t len; +}; + +#endif /* _WMI_UNIFIED_DBR_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dfs_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dfs_api.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8ccca9eeb665cd1f1a9e0fd052e6d73f1a7bc3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dfs_api.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) which are specific to DFS module. + */ + +#ifndef _WMI_UNIFIED_DFS_API_H_ +#define _WMI_UNIFIED_DFS_API_H_ + +#include +#include + +/** + * wmi_extract_dfs_cac_complete_event() - function to handle cac complete event + * @wmi_handle: wmi handle + * @event_buf: event buffer + * @vdev_id: vdev id + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_dfs_cac_complete_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len); + +/** + * wmi_extract_dfs_ocac_complete_event() - function to handle off channel + * CAC complete event + * @wmi_handle: wmi handle + * @event_buf: event buffer + * @param: off channel cac complete params + * + * Return: 0 for success or error code + */ +QDF_STATUS +wmi_extract_dfs_ocac_complete_event(wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct vdev_adfs_complete_status *param); + +/** + * wmi_extract_dfs_radar_detection_event() - function to handle radar event + * @wmi_handle: wmi handle + * @event_buf: event buffer + * @radar_found: radar found event info + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_dfs_radar_detection_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * wmi_extract_wlan_radar_event_info() - function to handle radar pulse event. + * @wmi_handle: wmi handle + * @evt_buf: event buffer + * @wlan_radar_event: pointer to radar event info structure + * @len: length of buffer + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_extract_wlan_radar_event_info( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len); +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +/** + * wmi_send_usenol_pdev_param() - function to send usenol pdev param. + * @wmi_handle: wmi handle + * @usenol: value of usenol + * @pdev: pointer to objmgr_pdev structure + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_send_usenol_pdev_param(wmi_unified_t wmi_handle, bool usenol, + struct wlan_objmgr_pdev *pdev); + +/** + * wmi_send_subchan_marking_pdev_param() - Function to send subchannel + * marking pdev param. + * @wmi_handle: WMI handle. + * @subchanmark: Value of use subchannel marking. + * @pdev: Pointer to objmgr_pdev structure. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_send_subchan_marking_pdev_param(wmi_unified_t wmi_handle, + bool subchanmark, + struct wlan_objmgr_pdev *pdev); +#else +static inline QDF_STATUS +wmi_send_usenol_pdev_param(wmi_unified_t wmi_hdl, bool usenol, + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +wmi_send_subchan_marking_pdev_param(wmi_unified_t wmi_handle, + bool subchanmark, + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _WMI_UNIFIED_DFS_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_extscan_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_extscan_api.h new file mode 100644 index 0000000000000000000000000000000000000000..102efbceb4573e8fd2f1c5f2b13d7fd64d072e17 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_extscan_api.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_EXTSCAN_API_H_ +#define _WMI_UNIFIED_EXTSCAN_API_H_ + +/** + * wmi_unified_reset_passpoint_network_list_cmd() - reset passpoint network list + * @wmi_handle: wmi handle + * @req: passpoint network request structure + * + * This function sends down WMI command with network id set to wildcard id. + * firmware shall clear all the config entries + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_reset_passpoint_network_list_cmd( + wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req); + +/** + * wmi_unified_set_passpoint_network_list_cmd() - set passpoint network list + * @wmi_handle: wmi handle + * @req: passpoint network request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the passpoint configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ + +QDF_STATUS wmi_unified_set_passpoint_network_list_cmd( + wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req); + +/** wmi_unified_set_epno_network_list_cmd() - set epno network list + * @wmi_handle: wmi handle + * @req: epno config params request structure + * + * This function reads the incoming epno config request structure + * and constructs the WMI message to the firmware. + * + * Returns: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures, + * error number otherwise + */ +QDF_STATUS wmi_unified_set_epno_network_list_cmd( + wmi_unified_t wmi_handle, + struct wifi_enhanced_pno_params *req); + +/** + * wmi_unified_extscan_get_capabilities_cmd() - extscan get capabilities + * @wmi_handle: wmi handle + * @pgetcapab: get capabilities params + * + * This function send request to fw to get extscan capabilities. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_get_capabilities_cmd( + wmi_unified_t wmi_handle, + struct extscan_capabilities_params *pgetcapab); + +/** + * wmi_unified_extscan_get_cached_results_cmd() - extscan get cached results + * @wmi_handle: wmi handle + * @pcached_results: cached results parameters + * + * This function send request to fw to get cached results. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_get_cached_results_cmd( + wmi_unified_t wmi_handle, + struct extscan_cached_result_params *pcached_results); + +/** + * wmi_unified_extscan_stop_change_monitor_cmd() - send stop change monitor cmd + * @wmi_handle: wmi handle + * @reset_req: Reset change request params + * + * This function sends stop change monitor request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_stop_change_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_capabilities_reset_params *reset_req); + +/** + * wmi_unified_extscan_start_change_monitor_cmd() - start change monitor cmd + * @wmi_handle: wmi handle + * @psigchange: change monitor request params + * + * This function sends start change monitor request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_start_change_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params *psigchange); + +/** + * wmi_unified_extscan_stop_hotlist_monitor_cmd() - stop hotlist monitor + * @wmi_handle: wmi handle + * @photlist_reset: hotlist reset params + * + * This function configures hotlist monitor to stop in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_stop_hotlist_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_reset_params *photlist_reset); + +/** + * wmi_unified_extscan_start_hotlist_monitor_cmd() - start hotlist monitor + * @wmi_handle: wmi handle + * @params: hotlist params + * + * This function configures hotlist monitor to start in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_start_hotlist_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_set_params *params); + +/** + * wmi_unified_stop_extscan_cmd() - stop extscan command to fw. + * @wmi_handle: wmi handle + * @pstopcmd: stop scan command request params + * + * This function sends stop extscan request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_stop_extscan_cmd( + wmi_unified_t wmi_handle, + struct extscan_stop_req_params *pstopcmd); + +/** + * wmi_unified_start_extscan_cmd() - start extscan command to fw. + * @wmi_handle: wmi handle + * @pstart: scan command request params + * + * This function sends start extscan request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_start_extscan_cmd( + wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart); + +#endif /* _WMI_UNIFIED_EXTSCAN_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_fwol_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_fwol_api.h new file mode 100644 index 0000000000000000000000000000000000000000..478246ca3e649fbdd63a99b9e16c97c571e71abe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_fwol_api.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to fw offload component. + */ + +#ifndef _WMI_UNIFIED_FWOL_API_H_ +#define _WMI_UNIFIED_FWOL_API_H_ +#include "wlan_fwol_public_structs.h" + +#ifdef WLAN_FEATURE_ELNA +/** + * wmi_unified_send_set_elna_bypass_cmd() - Send WMI set eLNA bypass cmd + * @wmi_handle: wmi handle + * @req: set eLNA bypass request + * + * Send WMI set eLNA bypass command to firmware. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_set_elna_bypass_cmd(struct wmi_unified *wmi_handle, + struct set_elna_bypass_request *req); + +/** + * wmi_unified_send_get_elna_bypass_cmd() - Send WMI get eLNA bypass cmd + * @wmi_handle: wmi handle + * @req: get eLNA bypass request + * + * Send WMI get eLNA bypass command to firmware. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_get_elna_bypass_cmd(struct wmi_unified *wmi_handle, + struct get_elna_bypass_request *req); + +/** + * wmi_extract_get_elna_bypass_resp() - Extract WMI get eLNA bypass response + * @wmi_handle: wmi handle + * @resp_buf: response buffer + * @resp: get eLNA bypass response + * + * Extract WMI get eLNA bypass response from firmware. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_extract_get_elna_bypass_resp(struct wmi_unified *wmi_handle, void *resp_buf, + struct get_elna_bypass_response *resp); +#endif /* WLAN_FEATURE_ELNA */ + +#ifdef WLAN_SEND_DSCP_UP_MAP_TO_FW +/** + * wmi_unified_send_dscp_tip_map_cmd() - Send dscp-to-tid map values cmd + * @wmi_handle: wmi handle + * @dscp_to_tid_map: array of dscp_tid map values + * + * Send dscp-to-tid map values to FW. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_dscp_tip_map_cmd(struct wmi_unified *wmi_handle, + uint32_t *dscp_to_tid_map); +#else +static inline QDF_STATUS +wmi_unified_send_dscp_tip_map_cmd(struct wmi_unified *wmi_handle, + uint32_t *dscp_to_tid_map) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SEND_DSCP_UP_MAP_TO_FW */ + +#endif /* _WMI_UNIFIED_FWOL_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_gpio_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_gpio_api.h new file mode 100644 index 0000000000000000000000000000000000000000..15f9bebf4cee0f3268a6be809e8a13a6b4b08477 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_gpio_api.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to gpio component. + */ + +#ifndef _WMI_UNIFIED_GPIO_API_H_ +#define _WMI_UNIFIED_GPIO_API_H_ + +#include + +/** + * wmi_unified_gpio_config_cmd_send() - WMI gpio config function + * @wmi_handle: handle to WMI. + * @param: pointer to hold gpio config param + * + * Send WMI set gpio configuration to firmware. + * + * Return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_gpio_config_cmd_send(wmi_unified_t wmi_handle, + struct gpio_config_params *param); + +/** + * wmi_unified_gpio_output_cmd_send() - WMI gpio output function + * @wmi_handle: handle to WMI. + * @param: pointer to hold gpio output param + * + * Send WMI set gpio output value to firmware. + * + * Return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_gpio_output_cmd_send(wmi_unified_t wmi_handle, + struct gpio_output_params *param); + +#endif /* _WMI_UNIFIED_GPIO_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_interop_issues_ap_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_interop_issues_ap_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a1d7198c0603892d5c2288da8b9a2ca6243e5dcc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_interop_issues_ap_api.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to interop issues ap component. + */ + +#ifndef _WMI_UNIFIED_INTEROP_ISSUES_AP_API_H_ +#define _WMI_UNIFIED_INTEROP_ISSUES_AP_API_H_ + +#include +#include + +/** + * wmi_extract_interop_issues_ap_ev_param() - extract info from event + * @wmi_handle: wmi handle + * @evt_buf: event buffer + * @param: pointer to interop issues ap event structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_interop_issues_ap_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_interop_issues_ap_event *param); +/** + * wmi_unified_set_rap_ps_cmd() - set interop issues ap for ps in fw + * @wmi_handle: wmi handle + * @rap: interop issues ap info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_rap_ps_cmd(wmi_unified_t wmi_handle, + struct wlan_interop_issues_ap_info *rap); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_nan_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_nan_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ee0fafd9999e39bf068d42fbf862d0521632b52c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_nan_api.h @@ -0,0 +1,205 @@ + +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to NAN component. + */ + +#ifndef _WMI_UNIFIED_NAN_API_H_ +#define _WMI_UNIFIED_NAN_API_H_ + +#include + +/** + * wmi_unified_nan_req_cmd() - to send nan request to target + * @wmi_handle: wmi handle + * @nan_req: request data which will be non-null + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nan_req_cmd(wmi_unified_t wmi_handle, + struct nan_msg_params *nan_req); + +/** + * wmi_unified_nan_disable_req_cmd() - to send nan disable request to target + * @wmi_handle: wmi handle + * @nan_req: pointer to NAN Disable request structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nan_disable_req_cmd(wmi_unified_t wmi_handle, + struct nan_disable_req *nan_req); + +/** + * wmi_unified_ndp_initiator_req_cmd_send - api to send initiator request to FW + * @wmi_handle: wmi handle + * @req: pointer to request buffer + * + * Return: status of operation + */ +QDF_STATUS +wmi_unified_ndp_initiator_req_cmd_send(wmi_unified_t wmi_handle, + struct nan_datapath_initiator_req *req); + +/** + * wmi_unified_ndp_responder_req_cmd_send - api to send responder request to FW + * @wmi_handle: wmi handle + * @req: pointer to request buffer + * + * Return: status of operation + */ +QDF_STATUS +wmi_unified_ndp_responder_req_cmd_send(wmi_unified_t wmi_handle, + struct nan_datapath_responder_req *req); + +/** + * wmi_unified_ndp_end_req_cmd_send - api to send end request to FW + * @wmi_handle: wmi handle + * @req: pointer to request buffer + * + * Return: status of operation + */ +QDF_STATUS wmi_unified_ndp_end_req_cmd_send(wmi_unified_t wmi_handle, + struct nan_datapath_end_req *req); + +/** + * wmi_unified_terminate_all_ndps_req_cmd - api to request Firmware for + * termination of all NDP's associated with the given vdev id. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: status of operation + */ +QDF_STATUS wmi_unified_terminate_all_ndps_req_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id); + +/** + * wmi_extract_ndp_initiator_rsp - api to extract initiator rsp from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @rsp: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS +wmi_extract_ndp_initiator_rsp(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_initiator_rsp *rsp); + +/** + * wmi_extract_ndp_ind - api to extract ndp indication struct from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ind: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_indication_event *ind); + +/** + * wmi_extract_nan_msg - api to extract ndp dmesg buffer to print logs + * @data: event buffer + * @msg: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_nan_msg(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_dump_msg *msg); + +/** + * wmi_extract_ndp_confirm - api to extract ndp confim struct from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ev: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_confirm(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_confirm_event *ev); + +/** + * wmi_extract_ndp_responder_rsp - api to extract responder rsp from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @rsp: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS +wmi_extract_ndp_responder_rsp(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_responder_rsp *rsp); + +/** + * wmi_extract_ndp_end_rsp - api to extract ndp end rsp from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @rsp: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_end_rsp(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_rsp_event *rsp); + +/** + * wmi_extract_ndp_end_ind - api to extract ndp end indication from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ind: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS +wmi_extract_ndp_end_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_indication_event **ind); + +/** + * wmi_extract_ndp_sch_update - api to extract ndp sch update from event buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ind: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS +wmi_extract_ndp_sch_update(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_sch_update_event *ind); + +/** + * wmi_extract_nan_event_rsp - api to extract nan event into event parameters + * @wmi_hdl: wmi handle + * @wvt_buf: pointer to the event buffer + * @temp_evt_params: Pointer to a temporary parameters structure to populate + * @nan_msg_buf: Pointer to the NAN Message buffer encapsulated in the event + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_nan_event_rsp(wmi_unified_t wmi_handle, void *evt_buf, + struct nan_event_params *temp_evt_params, + uint8_t **nan_msg_buf); + +/** + * wmi_extract_ndp_host_event - api to extract ndp event from event buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @evt: event buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_host_event(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_host_event *evt); +#endif /* _WMI_UNIFIED_NAN_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_ocb_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_ocb_api.h new file mode 100644 index 0000000000000000000000000000000000000000..883bc8d24b66b464a399ca68a23803b61022702d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_ocb_api.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to OCB component. + */ + +#ifndef _WMI_UNIFIED_DSRC_API_H_ +#define _WMI_UNIFIED_DSRC_API_H_ +#include + +/** + * wmi_unified_ocb_start_timing_advert() - start sending the timing + * advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_start_timing_advert(struct wmi_unified *wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +/** + * wmi_unified_ocb_stop_timing_advert() - stop sending the timing + * advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_stop_timing_advert(struct wmi_unified *wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +/** + * wmi_unified_ocb_set_config() - send the OCB config to the FW + * @wmi_handle: pointer to the wmi handle + * @config: the OCB configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures + */ +QDF_STATUS wmi_unified_ocb_set_config(struct wmi_unified *wmi_handle, + struct ocb_config *config); + +/** + * wmi_unified_ocb_get_tsf_timer() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @req: request for tsf timer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_get_tsf_timer(struct wmi_unified *wmi_handle, + struct ocb_get_tsf_timer_param *req); + +/** + * wmi_unified_ocb_set_utc_time_cmd() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_set_utc_time_cmd(struct wmi_unified *wmi_handle, + struct ocb_utc_param *utc); + +/** + * wmi_unified_dcc_get_stats_cmd() - get the DCC channel stats + * @wmi_handle: pointer to the wmi handle + * @get_stats_param: pointer to the dcc stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_dcc_get_stats_cmd(struct wmi_unified *wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param); + +/** + * wmi_unified_dcc_clear_stats() - command to clear the DCC stats + * @wmi_handle: pointer to the wmi handle + * @clear_stats_param: parameters to the command + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_dcc_clear_stats(struct wmi_unified *wmi_handle, + struct ocb_dcc_clear_stats_param *clear_stats_param); + +/** + * wmi_unified_dcc_update_ndl() - command to update the NDL data + * @wmi_handle: pointer to the wmi handle + * @update_ndl_param: pointer to the request parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures + */ +QDF_STATUS wmi_unified_dcc_update_ndl(struct wmi_unified *wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param); + +/** + * wmi_extract_ocb_set_channel_config_resp() - extract status from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @status: status buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +wmi_extract_ocb_set_channel_config_resp(struct wmi_unified *wmi_handle, + void *evt_buf, + uint32_t *status); + +/** + * wmi_extract_ocb_tsf_timer() - extract tsf timer from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: tsf timer + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS wmi_extract_ocb_tsf_timer(struct wmi_unified *wmi_handle, + void *evt_buf, + struct ocb_get_tsf_timer_response *resp); + +/** + * wmi_extract_dcc_update_ndl_resp() - extract NDL update from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: ndl update status + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS wmi_extract_dcc_update_ndl_resp(struct wmi_unified *wmi_handle, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp); + +/** + * wmi_extract_dcc_stats() - extract DCC stats from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: DCC stats + * + * Since length of the response is variable, response buffer will be allocated. + * The caller must free the response buffer. + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS wmi_extract_dcc_stats(struct wmi_unified *wmi_handle, + void *evt_buf, + struct ocb_dcc_get_stats_response **response); + +#endif /* _WMI_UNIFIED_DSRC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_p2p_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_p2p_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e5a5db3f8695b8cab595518d2ab222eed7911fe8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_p2p_api.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to P2P component. + */ + +#ifndef _WMI_UNIFIED_P2P_API_H_ +#define _WMI_UNIFIED_P2P_API_H_ + +#include + +/** + * wmi_unified_set_p2pgo_oppps_req() - send p2p go opp power save request to fw + * @wmi_handle: wmi handle + * @oppps: p2p opp power save parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_p2pgo_oppps_req(wmi_unified_t wmi_handle, + struct p2p_ps_params *oppps); + +/** + * wmi_unified_set_p2pgo_noa_req_cmd() - send p2p go noa request to fw + * @wmi_handle: wmi handle + * @noa: p2p power save parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_p2pgo_noa_req_cmd(wmi_unified_t wmi_handle, + struct p2p_ps_params *noa); + +/** + * wmi_extract_p2p_noa_ev_param() - extract p2p noa param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold p2p noa param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_p2p_noa_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_noa_info *param); + +/** + * wmi_send_set_mac_addr_rx_filter_cmd() - set mac addr rx filter cmd + * @wmi_handle: wmi handle + * @param: Pointer to set mac filter struct + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_set_mac_addr_rx_filter_cmd(wmi_unified_t wmi_handle, + struct p2p_set_mac_filter *param); + +/** + * wmi_extract_mac_addr_rx_filter_evt_param() - extract mac addr rx filter evt + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to extracted evt info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_mac_addr_rx_filter_evt_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_set_mac_filter_evt *param); + +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +/** + * wmi_unified_p2p_lo_start_cmd() - send p2p lo start request to fw + * @wmi_handle: wmi handle + * @param: p2p listen offload start parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_p2p_lo_start_cmd(wmi_unified_t wmi_handle, + struct p2p_lo_start *param); + +/** + * wmi_unified_p2p_lo_stop_cmd() - send p2p lo stop request to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_p2p_lo_stop_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +/** + * wmi_extract_p2p_lo_stop_ev_param() - extract p2p lo stop param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold listen offload stop param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_p2p_lo_stop_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_lo_event *param); +#endif /* FEATURE_P2P_LISTEN_OFFLOAD */ + +#endif /* _WMI_UNIFIED_P2P_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h new file mode 100644 index 0000000000000000000000000000000000000000..6384d3d58656d05a782bdce92266cdd64fafb170 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h @@ -0,0 +1,8348 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI). + */ + +#ifndef _WMI_UNIFIED_PARAM_H_ +#define _WMI_UNIFIED_PARAM_H_ + +#include +#ifdef FEATURE_WLAN_TDLS +#include +#endif + +#define MAC_MAX_KEY_LENGTH 32 +#define MAC_PN_LENGTH 8 +#define MAX_MAC_HEADER_LEN 32 +#define MIN_MAC_HEADER_LEN 24 +#define QOS_CONTROL_LEN 2 + +#define WMI_MAC_MAX_SSID_LENGTH 32 +#ifndef CONFIG_HL_SUPPORT +#define mgmt_tx_dl_frm_len 64 +#else +#define mgmt_tx_dl_frm_len 1532 +#endif +#define WMI_SMPS_MASK_LOWER_16BITS 0xFF +#define WMI_SMPS_MASK_UPPER_3BITS 0x7 +#define WMI_SMPS_PARAM_VALUE_S 29 +#define WMI_UNIT_TEST_MAX_NUM_ARGS 100 +/* The size of the utc time in bytes. */ +#define WMI_SIZE_UTC_TIME (10) +/* The size of the utc time error in bytes. */ +#define WMI_SIZE_UTC_TIME_ERROR (5) +#define WMI_MCC_MIN_CHANNEL_QUOTA 20 +#define WMI_MCC_MAX_CHANNEL_QUOTA 80 +#define WMI_MCC_MIN_NON_ZERO_CHANNEL_LATENCY 30 +#define WMI_BEACON_TX_BUFFER_SIZE (512) +#define WMI_WIFI_SCANNING_MAC_OUI_LENGTH 3 +#define WMI_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS 64 +#define WMI_RSSI_THOLD_DEFAULT -300 +#define WMI_NLO_FREQ_THRESH 1000 +#define WMI_SEC_TO_MSEC(sec) (sec * 1000) +#define WMI_MSEC_TO_USEC(msec) (msec * 1000) +#define WMI_ETH_LEN 64 +#define WMI_QOS_NUM_TSPEC_MAX 2 +#define WMI_IPV4_ADDR_LEN 4 +#define WMI_KEEP_ALIVE_NULL_PKT 1 +#define WMI_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2 +#define WMI_MAC_MAX_KEY_LENGTH 32 +#define WMI_KRK_KEY_LEN 16 +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +#define WMI_BTK_KEY_LEN 32 +#define WMI_ROAM_R0KH_ID_MAX_LEN 48 +#define WMI_ROAM_SCAN_PSK_SIZE 48 +#endif +#define WMI_NOISE_FLOOR_DBM_DEFAULT (-96) +#define WMI_EXTSCAN_MAX_HOTLIST_SSIDS 8 +#define WMI_ROAM_MAX_CHANNELS 80 +#ifdef FEATURE_WLAN_EXTSCAN +#define WMI_MAX_EXTSCAN_MSG_SIZE 1536 +#define WMI_EXTSCAN_REST_TIME 100 +#define WMI_EXTSCAN_MAX_SCAN_TIME 50000 +#define WMI_EXTSCAN_BURST_DURATION 150 +#endif +#define WMI_SCAN_NPROBES_DEFAULT (2) +#define WMI_SEC_TO_MSEC(sec) (sec * 1000) /* sec to msec */ +#define WMI_MSEC_TO_USEC(msec) (msec * 1000) /* msec to usec */ +#define WMI_NLO_FREQ_THRESH 1000 /* in MHz */ + +#define WMI_SVC_MSG_MAX_SIZE 1536 +#define MAX_UTF_EVENT_LENGTH 2048 +#define MAX_WMI_UTF_LEN 252 +#define MAX_WMI_QVIT_LEN 252 +#define THERMAL_LEVELS 4 +#define WMI_HOST_BCN_FLT_MAX_SUPPORTED_IES 256 +#define WMI_HOST_BCN_FLT_MAX_ELEMS_IE_LIST \ + (WMI_HOST_BCN_FLT_MAX_SUPPORTED_IES/32) +#define LTEU_MAX_BINS 10 +#define ATF_ACTIVED_MAX_CLIENTS 50 +#define ATF_ACTIVED_MAX_ATFGROUPS 16 +#define CTL_5G_SIZE 1536 +#define CTL_2G_SIZE 684 +#define MAX_CTL_SIZE (CTL_5G_SIZE > CTL_2G_SIZE ? CTL_5G_SIZE : CTL_2G_SIZE) +#define IEEE80211_MICBUF_SIZE (8+8) +#define IEEE80211_TID_SIZE 17 +#define WME_NUM_AC 4 +#define SMART_ANT_MODE_SERIAL 0 +#define SMART_ANT_MODE_PARALLEL 1 +#define IEEE80211_WEP_NKID 4 /* number of key ids */ +#define WPI_IV_LEN 16 +#define WMI_SCAN_MAX_NUM_BSSID 10 +#define MAX_CHANS 1023 +#define TARGET_OEM_CONFIGURE_LCI 0x0A +#define RTT_LCI_ALTITUDE_MASK 0x3FFFFFFF +#define TARGET_OEM_CONFIGURE_LCR 0x09 +#define RTT_TIMEOUT_MS 180 +#define MAX_SUPPORTED_RATES 128 +#define WMI_HOST_MAX_BUFFER_SIZE 1712 +#define WMI_HAL_MAX_SANTENNA 4 +#define WMI_HOST_PDEV_VI_PRIORITY_BIT (1<<2) +#define WMI_HOST_PDEV_BEACON_PRIORITY_BIT (1<<4) +#define WMI_HOST_PDEV_MGMT_PRIORITY_BIT (1<<5) +#define WMI_MAX_CMDS 1024 + +#define FIPS_ALIGN 4 +#define FIPS_ALIGNTO(__addr, __to) \ + ((((unsigned long int)(__addr)) + (__to) - 1) & ~((__to) - 1)) +#define FIPS_IS_ALIGNED(__addr, __to) \ + (!(((unsigned long int)(__addr)) & ((__to)-1))) + +#define WMI_HOST_MAX_SERIAL_ANTENNA 2 +#define WMI_SMART_ANT_MAX_RATE_SERIES 2 + +#define WMI_HOST_F_MS(_v, _f) \ + (((_v) & (_f)) >> (_f##_S)) + +#define WMI_HOST_F_RMW(_var, _v, _f) \ + do { \ + (_var) &= ~(_f); \ + (_var) |= (((_v) << (_f##_S)) & (_f)); \ + } while (0) + +/* vdev capabilities bit mask */ +#define WMI_HOST_VDEV_BEACON_SUPPORT 0x1 +#define WMI_HOST_VDEV_WDS_LRN_ENABLED 0x2 +#define WMI_HOST_VDEV_VOW_ENABLED 0x4 +#define WMI_HOST_VDEV_IS_BEACON_SUPPORTED(param) \ + ((param) & WMI_HOST_VDEV_BEACON_SUPPORT) +#define WMI_HOST_VDEV_IS_WDS_LRN_ENABLED(param) \ + ((param) & WMI_HOST_VDEV_WDS_LRN_ENABLED) +#define WMI_HOST_VDEV_IS_VOW_ENABLED(param) \ + ((param) & WMI_HOST_VDEV_VOW_ENABLED) + +/* TXBF capabilities masks */ +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_S 0 +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_M 0x1 +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE \ + (WMI_HOST_TXBF_CONF_SU_TX_BFEE_M << WMI_HOST_TXBF_CONF_SU_TX_BFEE_S) +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_SU_TX_BFEE) +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_SU_TX_BFEE) + +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_S 1 +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_M 0x1 +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE \ + (WMI_HOST_TXBF_CONF_MU_TX_BFEE_M << WMI_HOST_TXBF_CONF_MU_TX_BFEE_S) +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_MU_TX_BFEE) +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_MU_TX_BFEE) + +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_S 2 +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_M 0x1 +#define WMI_HOST_TXBF_CONF_SU_TX_BFER \ + (WMI_HOST_TXBF_CONF_SU_TX_BFER_M << WMI_HOST_TXBF_CONF_SU_TX_BFER_S) +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_SU_TX_BFER) +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_SU_TX_BFER) + +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_S 3 +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_M 0x1 +#define WMI_HOST_TXBF_CONF_MU_TX_BFER \ + (WMI_HOST_TXBF_CONF_MU_TX_BFER_M << WMI_HOST_TXBF_CONF_MU_TX_BFER_S) +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_MU_TX_BFER) +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_MU_TX_BFER) + +#define WMI_HOST_TXBF_CONF_STS_CAP_S 4 +#define WMI_HOST_TXBF_CONF_STS_CAP_M 0x7 +#define WMI_HOST_TXBF_CONF_STS_CAP \ + (WMI_HOST_TXBF_CONF_STS_CAP_M << WMI_HOST_TXBF_CONF_STS_CAP_S) +#define WMI_HOST_TXBF_CONF_STS_CAP_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_STS_CAP); +#define WMI_HOST_TXBF_CONF_STS_CAP_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_STS_CAP) + +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_S 7 +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_M 0x1 +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF \ + (WMI_HOST_TXBF_CONF_IMPLICIT_BF_M << WMI_HOST_TXBF_CONF_IMPLICIT_BF_S) +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_IMPLICIT_BF) +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_IMPLICIT_BF) + +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_S 8 +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_M 0x7 +#define WMI_HOST_TXBF_CONF_BF_SND_DIM \ + (WMI_HOST_TXBF_CONF_BF_SND_DIM_M << WMI_HOST_TXBF_CONF_BF_SND_DIM_S) +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_BF_SND_DIM) +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_BF_SND_DIM) + +/* The following WMI_HOST_HEOPS_BSSCOLOR_XXX macros correspond to the + * WMI_HEOPS_COLOR_XXX macros in the FW wmi_unified.h */ +#define WMI_HOST_HEOPS_BSSCOLOR_S 0 +#define WMI_HOST_HEOPS_BSSCOLOR_M 0x3f +#define WMI_HOST_HEOPS_BSSCOLOR \ + (WMI_HOST_HEOPS_BSSCOLOR_M << WMI_HOST_HEOPS_BSSCOLOR_S) +#define WMI_HOST_HEOPS_BSSCOLOR_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HEOPS_BSSCOLOR) +#define WMI_HOST_HEOPS_BSSCOLOR_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HEOPS_BSSCOLOR) + +/* The following WMI_HOST_HEOPS_BSSCOLOR_DISABLE_XXX macros correspond to the + * WMI_HEOPS_BSSCOLORDISABLE_XXX macros in the FW wmi_unified.h */ +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_S 30 +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_M 0x1 +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE \ + (WMI_HOST_HEOPS_BSSCOLOR_DISABLE_M << WMI_HOST_HEOPS_BSSCOLOR_DISABLE_S) +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HEOPS_BSSCOLOR_DISABLE) +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HEOPS_BSSCOLOR_DISABLE) + +/* HE BF capabilities mask */ +#define WMI_HOST_HE_BF_CONF_SU_BFEE_S 0 +#define WMI_HOST_HE_BF_CONF_SU_BFEE_M 0x1 +#define WMI_HOST_HE_BF_CONF_SU_BFEE \ + (WMI_HOST_HE_BF_CONF_SU_BFEE_M << WMI_HOST_HE_BF_CONF_SU_BFEE_S) +#define WMI_HOST_HE_BF_CONF_SU_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_SU_BFEE) +#define WMI_HOST_HE_BF_CONF_SU_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_SU_BFEE) + +#define WMI_HOST_HE_BF_CONF_SU_BFER_S 1 +#define WMI_HOST_HE_BF_CONF_SU_BFER_M 0x1 +#define WMI_HOST_HE_BF_CONF_SU_BFER \ + (WMI_HOST_HE_BF_CONF_SU_BFER_M << WMI_HOST_HE_BF_CONF_SU_BFER_S) +#define WMI_HOST_HE_BF_CONF_SU_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_SU_BFER) +#define WMI_HOST_HE_BF_CONF_SU_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_SU_BFER) + +#define WMI_HOST_HE_BF_CONF_MU_BFEE_S 2 +#define WMI_HOST_HE_BF_CONF_MU_BFEE_M 0x1 +#define WMI_HOST_HE_BF_CONF_MU_BFEE \ + (WMI_HOST_HE_BF_CONF_MU_BFEE_M << WMI_HOST_HE_BF_CONF_MU_BFEE_S) +#define WMI_HOST_HE_BF_CONF_MU_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_MU_BFEE) +#define WMI_HOST_HE_BF_CONF_MU_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_MU_BFEE) + +#define WMI_HOST_HE_BF_CONF_MU_BFER_S 3 +#define WMI_HOST_HE_BF_CONF_MU_BFER_M 0x1 +#define WMI_HOST_HE_BF_CONF_MU_BFER \ + (WMI_HOST_HE_BF_CONF_MU_BFER_M << WMI_HOST_HE_BF_CONF_MU_BFER_S) +#define WMI_HOST_HE_BF_CONF_MU_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_MU_BFER) +#define WMI_HOST_HE_BF_CONF_MU_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_MU_BFER) + +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_S 4 +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_M 0x1 +#define WMI_HOST_HE_BF_CONF_DL_OFDMA \ + (WMI_HOST_HE_BF_CONF_DL_OFDMA_M << WMI_HOST_HE_BF_CONF_DL_OFDMA_S) +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_DL_OFDMA) +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_DL_OFDMA) + +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_S 5 +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_M 0x1 +#define WMI_HOST_HE_BF_CONF_UL_OFDMA \ + (WMI_HOST_HE_BF_CONF_UL_OFDMA_M << WMI_HOST_HE_BF_CONF_UL_OFDMA_S) +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_UL_OFDMA) +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_UL_OFDMA) + +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_S 6 +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_M 0x1 +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO \ + (WMI_HOST_HE_BF_CONF_UL_MUMIMO_M << WMI_HOST_HE_BF_CONF_UL_MUMIMO_S) +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_UL_MUMIMO) +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_UL_MUMIMO) + +/* HE or VHT Sounding */ +#define WMI_HOST_HE_VHT_SOUNDING_MODE_S 0 +#define WMI_HOST_HE_VHT_SOUNDING_MODE_M 0x1 +#define WMI_HOST_HE_VHT_SOUNDING_MODE \ + (WMI_HOST_HE_VHT_SOUNDING_MODE_M << WMI_HOST_HE_VHT_SOUNDING_MODE_S) +#define WMI_HOST_HE_VHT_SOUNDING_MODE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_VHT_SOUNDING_MODE) +#define WMI_HOST_HE_VHT_SOUNDING_MODE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_VHT_SOUNDING_MODE) + +/* SU or MU Sounding */ +#define WMI_HOST_SU_MU_SOUNDING_MODE_S 2 +#define WMI_HOST_SU_MU_SOUNDING_MODE_M 0x1 +#define WMI_HOST_SU_MU_SOUNDING_MODE \ + (WMI_HOST_SU_MU_SOUNDING_MODE_M << \ + WMI_HOST_SU_MU_SOUNDING_MODE_S) +#define WMI_HOST_SU_MU_SOUNDING_MODE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_SU_MU_SOUNDING_MODE) +#define WMI_HOST_SU_MU_SOUNDING_MODE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_SU_MU_SOUNDING_MODE) + +/* Trig or Non-Trig Sounding */ +#define WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE_S 3 +#define WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE_M 0x1 +#define WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE \ + (WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE_M << \ + WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE_S) +#define WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE) +#define WMI_HOST_HE_VHT_SU_MU_SOUNDING_MODE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TRIG_NONTRIG_SOUNDING_MODE) + +#define WMI_HOST_TPC_RATE_MAX 160 +#define WMI_HOST_TPC_TX_NUM_CHAIN 4 +#define WMI_HOST_RXG_CAL_CHAN_MAX 8 +#define WMI_HOST_MAX_NUM_CHAINS 8 +#define WMI_MAX_NUM_OF_RATE_THRESH 4 + +#define WMI_HOST_PDEV_MAX_VDEVS 17 + +/* for QC98XX only */ +/*6 modes (A, HT20, HT40, VHT20, VHT40, VHT80) * 3 reg dommains + */ +#define WMI_HOST_NUM_CTLS_5G 18 +/*6 modes (B, G, HT20, HT40, VHT20, VHT40) * 3 reg domains */ +#define WMI_HOST_NUM_CTLS_2G 18 +#define WMI_HOST_NUM_BAND_EDGES_5G 8 +#define WMI_HOST_NUM_BAND_EDGES_2G 4 + +/*Beelinier 5G*/ +#define WMI_HOST_NUM_CTLS_5G_11A 9 +#define WMI_HOST_NUM_BAND_EDGES_5G_11A 25 +#define WMI_HOST_NUM_CTLS_5G_HT20 24 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT20 25 +#define WMI_HOST_NUM_CTLS_5G_HT40 18 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT40 12 +#define WMI_HOST_NUM_CTLS_5G_HT80 18 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT80 6 +#define WMI_HOST_NUM_CTLS_5G_HT160 9 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT160 2 + +/* Beeliner 2G */ +#define WMI_HOST_NUM_CTLS_2G_11B 6 +#define WMI_HOST_NUM_BAND_EDGES_2G_11B 9 +#define WMI_HOST_NUM_CTLS_2G_20MHZ 30 +#define WMI_HOST_NUM_BAND_EDGES_2G_20MHZ 11 +#define WMI_HOST_NUM_CTLS_2G_40MHZ 18 +#define WMI_HOST_NUM_BAND_EDGES_2G_40MHZ 6 + +/* for QC98XX only */ +#define WMI_HOST_TX_NUM_CHAIN 0x3 +#define WMI_HOST_TPC_REGINDEX_MAX 4 +#define WMI_HOST_ARRAY_GAIN_NUM_STREAMS 2 + +/* AST Index for flow override */ +#define WMI_CONFIG_MSDU_AST_INDEX_0 0x0 +#define WMI_CONFIG_MSDU_AST_INDEX_1 0x1 +#define WMI_CONFIG_MSDU_AST_INDEX_2 0x2 +#define WMI_CONFIG_MSDU_AST_INDEX_3 0x3 + +#include "qdf_atomic.h" + +#ifdef BIG_ENDIAN_HOST + /* This API is used in copying in elements to WMI message, + since WMI message uses multilpes of 4 bytes, This API + converts length into multiples of 4 bytes, and performs copy + */ +#define WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(destp, srcp, len) do { \ + int j; \ + u_int32_t *src, *dest; \ + src = (u_int32_t *)srcp; \ + dest = (u_int32_t *)destp; \ + for (j = 0; j < roundup(len, sizeof(u_int32_t))/4; j++) { \ + *(dest+j) = qdf_le32_to_cpu(*(src+j)); \ + } \ +} while (0) +#else + +#define WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(destp, srcp, len) OS_MEMCPY(destp,\ + srcp, len) + +#endif + +/** macro to convert MAC address from WMI word format to char array */ +#define WMI_HOST_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \ + (c_macaddr)[0] = ((pwmi_mac_addr)->mac_addr31to0) & 0xff; \ + (c_macaddr)[1] = (((pwmi_mac_addr)->mac_addr31to0) >> 8) & 0xff; \ + (c_macaddr)[2] = (((pwmi_mac_addr)->mac_addr31to0) >> 16) & 0xff; \ + (c_macaddr)[3] = (((pwmi_mac_addr)->mac_addr31to0) >> 24) & 0xff; \ + (c_macaddr)[4] = ((pwmi_mac_addr)->mac_addr47to32) & 0xff; \ + (c_macaddr)[5] = (((pwmi_mac_addr)->mac_addr47to32) >> 8) & 0xff; \ + } while (0) + +#define TARGET_INIT_STATUS_SUCCESS 0x0 +#define TARGET_INIT_STATUS_GEN_FAILED 0x1 +#define TARGET_GET_INIT_STATUS_REASON(status) ((status) & 0xffff) +#define TARGET_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) + +#define MAX_ASSOC_IE_LENGTH 1024 +typedef uint32_t TARGET_INIT_STATUS; + +/** + * @brief Opaque handle of wmi structure + */ +struct wmi_unified; +typedef struct wmi_unified *wmi_unified_t; + +typedef void *ol_scn_t; +/** + * @wmi_event_handler function prototype + */ +typedef int (*wmi_unified_event_handler)(ol_scn_t scn_handle, + uint8_t *event_buf, uint32_t len); + +/** + * @WMI_HOST_WLAN_PHY_MODE: Host based enum ID for corresponding in + * WLAN_PHY_MODE. This should be consistent with WLAN_PHY_MODE always to avoid + * breaking the WMI + */ +typedef enum { + WMI_HOST_MODE_11A = 0, /* 11a Mode */ + WMI_HOST_MODE_11G = 1, /* 11b/g Mode */ + WMI_HOST_MODE_11B = 2, /* 11b Mode */ + WMI_HOST_MODE_11GONLY = 3, /* 11g only Mode */ + WMI_HOST_MODE_11NA_HT20 = 4, /* 11a HT20 mode */ + WMI_HOST_MODE_11NG_HT20 = 5, /* 11g HT20 mode */ + WMI_HOST_MODE_11NA_HT40 = 6, /* 11a HT40 mode */ + WMI_HOST_MODE_11NG_HT40 = 7, /* 11g HT40 mode */ + WMI_HOST_MODE_11AC_VHT20 = 8, + WMI_HOST_MODE_11AC_VHT40 = 9, + WMI_HOST_MODE_11AC_VHT80 = 10, + WMI_HOST_MODE_11AC_VHT20_2G = 11, + WMI_HOST_MODE_11AC_VHT40_2G = 12, + WMI_HOST_MODE_11AC_VHT80_2G = 13, + WMI_HOST_MODE_11AC_VHT80_80 = 14, + WMI_HOST_MODE_11AC_VHT160 = 15, + WMI_HOST_MODE_11AX_HE20 = 16, + WMI_HOST_MODE_11AX_HE40 = 17, + WMI_HOST_MODE_11AX_HE80 = 18, + WMI_HOST_MODE_11AX_HE80_80 = 19, + WMI_HOST_MODE_11AX_HE160 = 20, + WMI_HOST_MODE_11AX_HE20_2G = 21, + WMI_HOST_MODE_11AX_HE40_2G = 22, + WMI_HOST_MODE_11AX_HE80_2G = 23, + WMI_HOST_MODE_UNKNOWN = 24, + WMI_HOST_MODE_MAX = 24 +} WMI_HOST_WLAN_PHY_MODE; + +/* + * Needs to be removed and use channel_param based + * on how it is processed + */ +typedef struct { + /** primary 20 MHz channel frequency in mhz */ + uint32_t mhz; + /** Center frequency 1 in MHz*/ + uint32_t band_center_freq1; + /** Center frequency 2 in MHz - valid only for 11acvht 80plus80 mode*/ + uint32_t band_center_freq2; + /** channel info described below */ + uint32_t info; + /** contains min power, max power, reg power and reg class id. */ + uint32_t reg_info_1; + /** contains antennamax */ + uint32_t reg_info_2; +} wmi_host_channel; + +/** + * enum WMI_HOST_REGDMN_MODE: + * @WMI_HOST_REGDMN_MODE_11A: 11a channels + * @WMI_HOST_REGDMN_MODE_TURBO: 11a turbo-only channels + * @WMI_HOST_REGDMN_MODE_11B: 11b channels + * @WMI_HOST_REGDMN_MODE_PUREG: 11g channels (OFDM only) + * @WMI_HOST_REGDMN_MODE_11G: historical + * @WMI_HOST_REGDMN_MODE_108G: 11g+Turbo channels + * @WMI_HOST_REGDMN_MODE_108A: 11a+Turbo channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT20_2G: 2GHz, VHT20 + * @WMI_HOST_REGDMN_MODE_XR: XR channels + * @WMI_HOST_REGDMN_MODE_11A_HALF_RATE: 11a half rate channels + * @WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE: 11a quarter rate channels + * @WMI_HOST_REGDMN_MODE_11NG_HT20: 11ng HT20 channels + * @WMI_HOST_REGDMN_MODE_11NA_HT20: 11na HT20 channels + * @WMI_HOST_REGDMN_MODE_11NG_HT40PLUS: 11ng HT40+ channels + * @WMI_HOST_REGDMN_MODE_11NG_HT40MINUS: 11ng HT40- channels + * @WMI_HOST_REGDMN_MODE_11NA_HT40PLUS: 11na HT40+ channels + * @WMI_HOST_REGDMN_MODE_11NA_HT40MINUS: 11na HT40- channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT20: 5GHz, VHT20 + * @WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS: 5GHz, VHT40+ channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS: 5GHz, VHT40- channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT80: 5GHz, VHT80 channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT160: 5GHz, VHT160 channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT80_80: 5GHz, VHT80+80 channels + * @WMI_HOST_REGDMN_MODE_11AXG_HE20: 11ax 2.4GHz, HE20 channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE20: 11ax 5GHz, HE20 channels + * @WMI_HOST_REGDMN_MODE_11AXG_HE40PLUS: 11ax 2.4GHz, HE40+ channels + * @WMI_HOST_REGDMN_MODE_11AXG_HE40MINUS: 11ax 2.4GHz, HE40- channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE40PLUS: 11ax 5GHz, HE40+ channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE40MINUS: 11ax 5GHz, HE40- channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE80: 11ax 5GHz, HE80 channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE160: 11ax 5GHz, HE160 channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE80_80: 11ax 5GHz, HE80+80 channels + */ +typedef enum { + WMI_HOST_REGDMN_MODE_11A = 0x00000001, + WMI_HOST_REGDMN_MODE_TURBO = 0x00000002, + WMI_HOST_REGDMN_MODE_11B = 0x00000004, + WMI_HOST_REGDMN_MODE_PUREG = 0x00000008, + WMI_HOST_REGDMN_MODE_11G = 0x00000008, + WMI_HOST_REGDMN_MODE_108G = 0x00000020, + WMI_HOST_REGDMN_MODE_108A = 0x00000040, + WMI_HOST_REGDMN_MODE_11AC_VHT20_2G = 0x00000080, + WMI_HOST_REGDMN_MODE_XR = 0x00000100, + WMI_HOST_REGDMN_MODE_11A_HALF_RATE = 0x00000200, + WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE = 0x00000400, + WMI_HOST_REGDMN_MODE_11NG_HT20 = 0x00000800, + WMI_HOST_REGDMN_MODE_11NA_HT20 = 0x00001000, + WMI_HOST_REGDMN_MODE_11NG_HT40PLUS = 0x00002000, + WMI_HOST_REGDMN_MODE_11NG_HT40MINUS = 0x00004000, + WMI_HOST_REGDMN_MODE_11NA_HT40PLUS = 0x00008000, + WMI_HOST_REGDMN_MODE_11NA_HT40MINUS = 0x00010000, + WMI_HOST_REGDMN_MODE_11AC_VHT20 = 0x00020000, + WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS = 0x00040000, + WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS = 0x00080000, + WMI_HOST_REGDMN_MODE_11AC_VHT80 = 0x00100000, + WMI_HOST_REGDMN_MODE_11AC_VHT160 = 0x00200000, + WMI_HOST_REGDMN_MODE_11AC_VHT80_80 = 0x00400000, + WMI_HOST_REGDMN_MODE_11AXG_HE20 = 0x00800000, + WMI_HOST_REGDMN_MODE_11AXA_HE20 = 0x01000000, + WMI_HOST_REGDMN_MODE_11AXG_HE40PLUS = 0x02000000, + WMI_HOST_REGDMN_MODE_11AXG_HE40MINUS = 0x04000000, + WMI_HOST_REGDMN_MODE_11AXA_HE40PLUS = 0x08000000, + WMI_HOST_REGDMN_MODE_11AXA_HE40MINUS = 0x10000000, + WMI_HOST_REGDMN_MODE_11AXA_HE80 = 0x20000000, + WMI_HOST_REGDMN_MODE_11AXA_HE160 = 0x40000000, + WMI_HOST_REGDMN_MODE_11AXA_HE80_80 = 0x80000000, + WMI_HOST_REGDMN_MODE_ALL = 0xffffffff +} WMI_HOST_REGDMN_MODE; + +/** + * enum WMI_HOST_WLAN_BAND_CAPABILITY: Band capability (2.4 GHz, 5 GHz). Maps to + * WLAN_BAND_CAPABILITY used in firmware header file(s). + * @WMI_HOST_WLAN_2G_CAPABILITY: 2.4 GHz capable + * @WMI_HOST_WLAN_5G_CAPABILITY: 5 GHz capable + */ +typedef enum { + WMI_HOST_WLAN_2G_CAPABILITY = 0x1, + WMI_HOST_WLAN_5G_CAPABILITY = 0x2, +} WMI_HOST_WLAN_BAND_CAPABILITY; + +/** + * enum wmi_host_channel_width: Channel operating width. Maps to + * wmi_channel_width used in firmware header file(s). + * @WMI_HOST_CHAN_WIDTH_20: 20 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_40: 40 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_80: 80 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_160: 160 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_80P80: 80+80 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_5: 5 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_10: 10 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_165: 165 MHz channel operating width + */ +typedef enum { + WMI_HOST_CHAN_WIDTH_20 = 0, + WMI_HOST_CHAN_WIDTH_40 = 1, + WMI_HOST_CHAN_WIDTH_80 = 2, + WMI_HOST_CHAN_WIDTH_160 = 3, + WMI_HOST_CHAN_WIDTH_80P80 = 4, + WMI_HOST_CHAN_WIDTH_5 = 5, + WMI_HOST_CHAN_WIDTH_10 = 6, + WMI_HOST_CHAN_WIDTH_165 = 7, +} wmi_host_channel_width; + +#define ATH_EXPONENT_TO_VALUE(v) ((1< CCK 1 Mbps rate is allowed + * bit 1 -> CCK 2 Mbps rate is allowed + * bit 2 -> CCK 5.5 Mbps rate is allowed + * bit 3 -> CCK 11 Mbps rate is allowed + * bit 4 -> OFDM BPSK modulation, 1/2 coding rate is allowed + * bit 5 -> OFDM BPSK modulation, 3/4 coding rate is allowed + * bit 6 -> OFDM QPSK modulation, 1/2 coding rate is allowed + * bit 7 -> OFDM QPSK modulation, 3/4 coding rate is allowed + * bit 8 -> OFDM 16-QAM modulation, 1/2 coding rate is allowed + * bit 9 -> OFDM 16-QAM modulation, 3/4 coding rate is allowed + * bit 10 -> OFDM 64-QAM modulation, 2/3 coding rate is allowed + * bit 11 -> OFDM 64-QAM modulation, 3/4 coding rate is allowed + * @nss_mask: Spatial streams permitted + * bit 0: if set, Nss = 1 (non-MIMO) is permitted + * bit 1: if set, Nss = 2 (2x2 MIMO) is permitted + * bit 2: if set, Nss = 3 (3x3 MIMO) is permitted + * bit 3: if set, Nss = 4 (4x4 MIMO) is permitted + * bit 4: if set, Nss = 5 (5x5 MIMO) is permitted + * bit 5: if set, Nss = 6 (6x6 MIMO) is permitted + * bit 6: if set, Nss = 7 (7x7 MIMO) is permitted + * bit 7: if set, Nss = 8 (8x8 MIMO) is permitted + * If no bits are set, target will choose what NSS type to use + * @retry_limit: Maximum number of retries before ACK + * @chain_mask: Chains to be used for transmission + * @bw_mask: Bandwidth to be used for transmission + * bit 0 -> 5MHz + * bit 1 -> 10MHz + * bit 2 -> 20MHz + * bit 3 -> 40MHz + * bit 4 -> 80MHz + * bit 5 -> 160MHz + * bit 6 -> 80_80MHz + * @preamble_type: Preamble types for transmission + * bit 0: if set, OFDM + * bit 1: if set, CCK + * bit 2: if set, HT + * bit 3: if set, VHT + * bit 4: if set, HE + * @frame_type: Data or Management frame + * Data:1 Mgmt:0 + * @cfr_enable: flag to enable CFR capture + * 0:disable 1:enable + */ +struct tx_send_params { + uint32_t pwr:8, + mcs_mask:12, + nss_mask:8, + retry_limit:4; + uint32_t chain_mask:8, + bw_mask:7, + preamble_type:5, + frame_type:1, + cfr_enable:1, + reserved:10; +}; + +/** + * struct wmi_mgmt_params - wmi mgmt cmd parameters + * @tx_frame: management tx frame + * @frm_len: frame length + * @vdev_id: vdev id + * @tx_type: type of management frame (determines what callback to use) + * @chanfreq: channel frequency + * @desc_id: descriptor id relyaed back by target + * @pdata: frame data + * @macaddr: macaddr of peer + * @qdf_ctx: qdf context for qdf_nbuf_map + * @tx_param: TX send parameters + * @tx_params_valid: Flag that indicates if TX params are valid + * @use_6mbps: specify whether management frame to transmit should + * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P) + */ +struct wmi_mgmt_params { + void *tx_frame; + uint16_t frm_len; + uint8_t vdev_id; + uint8_t tx_type; + uint16_t chanfreq; + uint16_t desc_id; + void *pdata; + uint8_t *macaddr; + void *qdf_ctx; + struct tx_send_params tx_param; + bool tx_params_valid; + uint8_t use_6mbps; +}; + +/** + * struct wmi_offchan_data_tx_params - wmi offchan data tx cmd parameters + * @tx_frame: management tx frame + * @frm_len: frame length + * @vdev_id: vdev id + * @tx_params_valid: Flag that indicates if TX params are valid + * @chanfreq: channel frequency + * @desc_id: descriptor id relyaed back by target + * @pdata: frame data + * @macaddr: macaddr of peer + * @qdf_ctx: qdf context for qdf_nbuf_map + * @tx_param: TX send parameters + */ +struct wmi_offchan_data_tx_params { + void *tx_frame; + uint16_t frm_len; + uint8_t vdev_id; + bool tx_params_valid; + uint16_t chanfreq; + uint16_t desc_id; + void *pdata; + uint8_t *macaddr; + void *qdf_ctx; + struct tx_send_params tx_param; +}; + +/** + * struct sta_uapsd_params - uapsd auto trig params + * @wmm_ac: WMM access category from 0 to 3 + * @user_priority: User priority to use in trigger frames + * @service_interval: service interval + * @suspend_interval: suspend interval + * @delay_interval: delay interval + */ +struct sta_uapsd_params { + uint32_t wmm_ac; + uint32_t user_priority; + uint32_t service_interval; + uint32_t suspend_interval; + uint32_t delay_interval; +}; + +/** + * struct ta_uapsd_trig_params - uapsd trigger parameter + * @vdevid: vdev id + * @peer_addr: peer address + * @num_ac: no of access category + * @auto_triggerparam: trigger parameters + */ +struct sta_uapsd_trig_params { + uint32_t vdevid; + uint8_t peer_addr[QDF_MAC_ADDR_SIZE]; + uint32_t num_ac; + struct sta_uapsd_params *auto_triggerparam; +}; + +#define WMI_NUM_AC (4) +#define WMI_MAX_NUM_AC 4 + + +enum wmi_peer_rate_report_cond_phy_type { + WMI_PEER_RATE_REPORT_COND_11B = 0, + WMI_PEER_RATE_REPORT_COND_11A_G, + WMI_PEER_RATE_REPORT_COND_11N, + WMI_PEER_RATE_REPORT_COND_11AC, + WMI_PEER_RATE_REPORT_COND_MAX_NUM +}; + +/** + * struct report_rate_delta - peer specific parameters + * @percent: percentage + * @delta_min: rate min delta + */ +struct report_rate_delta { + uint32_t percent; /* in unit of 12.5% */ + uint32_t delta_min; /* in unit of Mbps */ +}; + +/** + * struct report_rate_per_phy - per phy report parameters + * @cond_flags: condition flag val + * @delta: rate delta + * @report_rate_threshold: rate threshold + */ +struct report_rate_per_phy { + /* + * PEER_RATE_REPORT_COND_FLAG_DELTA, + * PEER_RATE_REPORT_COND_FLAG_THRESHOLD + * Any of these two conditions or both of + * them can be set. + */ + uint32_t cond_flags; + struct report_rate_delta delta; + /* + * In unit of Mbps. There are at most 4 thresholds + * If the threshold count is less than 4, set zero to + * the one following the last threshold + */ + uint32_t report_rate_threshold[WMI_MAX_NUM_OF_RATE_THRESH]; +}; + +/** + * struct peer_rate_report_params - peer rate report parameters + * @rate_report_enable: enable rate report param + * @backoff_time: backoff time + * @timer_period: timer + * @report_per_phy: report per phy type + */ +struct wmi_peer_rate_report_params { + uint32_t rate_report_enable; + uint32_t backoff_time; /* in unit of msecond */ + uint32_t timer_period; /* in unit of msecond */ + /* + *In the following field, the array index means the phy type, + * please see enum wmi_peer_rate_report_cond_phy_type for detail + */ + struct report_rate_per_phy report_per_phy[ + WMI_PEER_RATE_REPORT_COND_MAX_NUM]; + +}; + +/** + * struct t_thermal_cmd_params - thermal command parameters + * @min_temp: minimum temprature + * @max_temp: maximum temprature + * @thermal_enable: thermal enable + */ +struct thermal_cmd_params { + uint16_t min_temp; + uint16_t max_temp; + uint8_t thermal_enable; +}; + +#define WMI_LRO_IPV4_SEED_ARR_SZ 5 +#define WMI_LRO_IPV6_SEED_ARR_SZ 11 + +/** + * struct wmi_lro_config_cmd_t - set LRO init parameters + * @lro_enable: indicates whether lro is enabled + * @tcp_flag: If the TCP flags from the packet do not match + * the values in this field after masking with TCP flags mask + * below, packet is not LRO eligible + * @tcp_flag_mask: field for comparing the TCP values provided + * above with the TCP flags field in the received packet + * @toeplitz_hash_ipv4: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv4 packets + * @toeplitz_hash_ipv6: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv6 packets + * @pdev_id: radio on which lro hash is configured + */ +struct wmi_lro_config_cmd_t { + uint32_t lro_enable; + uint32_t tcp_flag:9, + tcp_flag_mask:9; + uint32_t toeplitz_hash_ipv4[WMI_LRO_IPV4_SEED_ARR_SZ]; + uint32_t toeplitz_hash_ipv6[WMI_LRO_IPV6_SEED_ARR_SZ]; + uint32_t pdev_id; +}; + +/** + * struct wmi_probe_resp_params - send probe response parameters + * @prb_rsp_template_frm: pointer to template probe response template + * @prb_rsp_template_len: length of probe response template + */ +struct wmi_probe_resp_params { + uint8_t *prb_rsp_template_frm; + uint32_t prb_rsp_template_len; +}; + +/* struct set_key_params: structure containing + * installation key parameters + * @vdev_id: vdev id + * @key_len: key length + * @key_idx: key index + * @group_key_idx: group key index for VLAN + * @peer_mac: peer mac address + * @key_flags: key flags, 0:pairwise key, 1:group key, 2:static key + * @key_cipher: key cipher based on security mode + * @key_txmic_len: tx mic length + * @key_rxmic_len: rx mic length + * @key_tsc_counter: key tx sc counter + * @key_rsc_counter: key rx sc counter + * @key_rsc_ctr: key rx sc counter (stack variable, unnecessary heap alloc for + * key_rsc_counter should be cleaned up eventually) + * @rx_iv: receive IV, applicable only in case of WAPI + * @tx_iv: transmit IV, applicable only in case of WAPI + * @key_data: key data + */ +struct set_key_params { + uint8_t vdev_id; + uint16_t key_len; + uint32_t key_idx; + uint32_t group_key_idx; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint32_t key_flags; + uint32_t key_cipher; + uint32_t key_txmic_len; + uint32_t key_rxmic_len; + uint64_t key_tsc_counter; + uint64_t *key_rsc_counter; + uint64_t key_rsc_ctr; +#if defined(ATH_SUPPORT_WAPI) || defined(FEATURE_WLAN_WAPI) + uint8_t rx_iv[16]; + uint8_t tx_iv[16]; +#endif + uint8_t key_data[WMI_MAC_MAX_KEY_LENGTH]; +}; + +/** + * struct scan_mac_oui - oui parameters + * @oui: oui parameters + * @vdev_id: interface id + * @enb_probe_req_sno_randomization: control probe req sequence no randomization + * @ie_whitelist: probe req IE whitelist attrs + */ +struct scan_mac_oui { + uint8_t oui[WMI_WIFI_SCANNING_MAC_OUI_LENGTH]; + uint32_t vdev_id; + bool enb_probe_req_sno_randomization; + struct probe_req_whitelist_attr ie_whitelist; +}; + +#define WMI_PASSPOINT_REALM_LEN 256 +#define WMI_PASSPOINT_ROAMING_CONSORTIUM_ID_NUM 16 +#define WMI_PASSPOINT_PLMN_LEN 3 +/** + * struct wifi_passpoint_network_param - passpoint network block + * @id: identifier of this network block + * @realm: null terminated UTF8 encoded realm, 0 if unspecified + * @plmn: mcc/mnc combination as per rules, 0s if unspecified + * @roaming_consortium_ids: roaming consortium ids to match, 0s if unspecified + */ +struct wifi_passpoint_network_param { + uint32_t id; + uint8_t realm[WMI_PASSPOINT_REALM_LEN]; + uint8_t plmn[WMI_PASSPOINT_PLMN_LEN]; + int64_t roaming_consortium_ids[ + WMI_PASSPOINT_ROAMING_CONSORTIUM_ID_NUM]; +}; + +/** + * struct wifi_passpoint_req_param - passpoint request + * @request_id: request identifier + * @vdev_id: vdev that is the target of the request + * @num_networks: number of valid entries in @networks + * @networks: passpoint networks + */ +struct wifi_passpoint_req_param { + uint32_t request_id; + uint32_t vdev_id; + uint32_t num_networks; + struct wifi_passpoint_network_param networks[]; +}; + +/* struct mobility_domain_info - structure containing + * mobility domain info + * @mdie_present: mobility domain present or not + * @mobility_domain: mobility domain + */ +struct mobility_domain_info { + uint8_t mdie_present; + uint16_t mobility_domain; +}; + +#define WMI_HOST_ROAM_OFFLOAD_NUM_MCS_SET (16) + +/* This TLV will be filled only in case roam offload + * for wpa2-psk/pmkid/ese/11r is enabled */ +typedef struct { + /* + * TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_roam_offload_fixed_param + */ + uint32_t tlv_header; + uint32_t rssi_cat_gap; /* gap for every category bucket */ + uint32_t prefer_5g; /* prefer select 5G candidate */ + uint32_t select_5g_margin; + uint32_t reassoc_failure_timeout; /* reassoc failure timeout */ + uint32_t capability; + uint32_t ht_caps_info; + uint32_t ampdu_param; + uint32_t ht_ext_cap; + uint32_t ht_txbf; + uint32_t asel_cap; + uint32_t qos_enabled; + uint32_t qos_caps; + uint32_t wmm_caps; + /* since this is 4 byte aligned, we don't declare it as tlv array */ + uint32_t mcsset[WMI_HOST_ROAM_OFFLOAD_NUM_MCS_SET >> 2]; + uint32_t ho_delay_for_rx; + uint32_t roam_preauth_retry_count; + uint32_t roam_preauth_no_ack_timeout; +} roam_offload_param; + +#define WMI_FILS_MAX_RRK_LENGTH 64 +#define WMI_FILS_MAX_RIK_LENGTH WMI_FILS_MAX_RRK_LENGTH +#define WMI_FILS_MAX_REALM_LENGTH 256 +#define WMI_FILS_MAX_USERNAME_LENGTH 16 +#define WMI_FILS_FT_MAX_LEN 48 + +/** + * struct roam_fils_params - Roam FILS params + * @username: username + * @username_length: username length + * @next_erp_seq_num: next ERP sequence number + * @rrk: RRK + * @rrk_length: length of @rrk + * @rik: RIK + * @rik_length: length of @rik + * @realm: realm + * @realm_len: length of @realm + * @fils_ft: xx_key for FT-FILS connection + * @fils_ft_len: length of FT-FILS + */ +struct roam_fils_params { + uint8_t username[WMI_FILS_MAX_USERNAME_LENGTH]; + uint32_t username_length; + uint32_t next_erp_seq_num; + uint8_t rrk[WMI_FILS_MAX_RRK_LENGTH]; + uint32_t rrk_length; + uint8_t rik[WMI_FILS_MAX_RIK_LENGTH]; + uint32_t rik_length; + uint8_t realm[WMI_FILS_MAX_REALM_LENGTH]; + uint32_t realm_len; + uint8_t fils_ft[WMI_FILS_FT_MAX_LEN]; + uint8_t fils_ft_len; +}; + +/* struct roam_offload_scan_params - structure + * containing roaming offload scan parameters + * @is_roam_req_valid: flag to tell whether roam req + * is valid or NULL + * @mode: stores flags for scan + * @vdev_id: vdev id + * @roam_offload_enabled: flag for offload enable + * @disable_self_roam: disable roaming to self BSSID + * @psk_pmk: pre shared key/pairwise master key + * @pmk_len: length of PMK + * @prefer_5ghz: prefer select 5G candidate + * @roam_rssi_cat_gap: gap for every category bucket + * @select_5ghz_margin: select 5 Ghz margin + * @krk: KRK + * @btk: BTK + * @reassoc_failure_timeout: reassoc failure timeout + * @rokh_id_length: r0kh id length + * @rokh_id: r0kh id + * @roam_key_mgmt_offload_enabled: roam offload flag + * @auth_mode: authentication mode + * @fw_okc: use OKC in firmware + * @fw_pmksa_cache: use PMKSA cache in firmware + * @is_ese_assoc: flag to determine ese assoc + * @mdid: mobility domain info + * @roam_offload_params: roam offload tlv params + * @min_delay_btw_roam_scans: Delay btw two scans + * @roam_trigger_reason_bitmask: Roam reason bitmark + * @roam_offload_params: roam offload tlv params, unused + * in non tlv target, only for roam offload feature + * @assoc_ie_length: Assoc IE length + * @assoc_ie: Assoc IE buffer + * @add_fils_tlv: add FILS TLV boolean + * @roam_fils_params: roam fils params + * @rct_validity_timer: duration value for which the entries in + * roam candidate table are valid + * @roam_scan_inactivity_time: inactivity monitoring time in ms for which the + * device is considered to be inactive + * @is_sae_same_pmk: Flag to indicate fw whether WLAN_SAE_SINGLE_PMK feature is + * enable or not + * @enable_ft_im_roaming: Flag to enable/disable FT-IM roaming upon receiving + * deauth + * @roam_inactive_data_packet_count: Maximum allowed data packets count during + * roam_scan_inactivity_time. + * @roam_scan_period_after_inactivity: Roam scan period in ms after device is + * in inactive state. + */ +struct roam_offload_scan_params { + uint8_t is_roam_req_valid; + uint32_t mode; + uint32_t vdev_id; +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + uint8_t roam_offload_enabled; + bool disable_self_roam; + uint8_t psk_pmk[WMI_ROAM_SCAN_PSK_SIZE]; + uint32_t pmk_len; + uint8_t prefer_5ghz; + uint8_t roam_rssi_cat_gap; + uint8_t select_5ghz_margin; + uint8_t krk[WMI_KRK_KEY_LEN]; + uint8_t btk[WMI_BTK_KEY_LEN]; + uint32_t reassoc_failure_timeout; + uint32_t rokh_id_length; + uint8_t rokh_id[WMI_ROAM_R0KH_ID_MAX_LEN]; + uint8_t roam_key_mgmt_offload_enabled; + int auth_mode; + bool fw_okc; + bool fw_pmksa_cache; + uint32_t rct_validity_timer; + bool is_adaptive_11r; + bool is_sae_same_pmk; + bool enable_ft_im_roaming; +#endif + uint32_t min_delay_btw_roam_scans; + uint32_t roam_trigger_reason_bitmask; + bool is_ese_assoc; + bool is_11r_assoc; + struct mobility_domain_info mdid; +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + roam_offload_param roam_offload_params; +#endif + uint32_t assoc_ie_length; + uint8_t assoc_ie[MAX_ASSOC_IE_LENGTH]; + bool add_fils_tlv; + uint32_t roam_scan_inactivity_time; + uint32_t roam_inactive_data_packet_count; + uint32_t roam_scan_period_after_inactivity; +#ifdef WLAN_FEATURE_FILS_SK + struct roam_fils_params roam_fils_params; +#endif +}; + +/** + * struct wifi_epno_network - enhanced pno network block + * @ssid: ssid + * @rssi_threshold: threshold for considering this SSID as found, required + * granularity for this threshold is 4dBm to 8dBm + * @flags: WIFI_PNO_FLAG_XXX + * @auth_bit_field: auth bit field for matching WPA IE + */ +struct wifi_epno_network_params { + struct mac_ssid ssid; + int8_t rssi_threshold; + uint8_t flags; + uint8_t auth_bit_field; +}; + +/** + * struct wifi_enhanced_pno_params - enhanced pno network params + * @request_id: request id number + * @vdev_id: vdev id + * @min_5ghz_rssi: minimum 5GHz RSSI for a BSSID to be considered + * @min_24ghz_rssi: minimum 2.4GHz RSSI for a BSSID to be considered + * @initial_score_max: maximum score that a network can have before bonuses + * @current_connection_bonus: only report when there is a network's score this + * much higher than the current connection + * @same_network_bonus: score bonus for all n/w with the same network flag + * @secure_bonus: score bonus for networks that are not open + * @band_5ghz_bonus: 5GHz RSSI score bonus (applied to all 5GHz networks) + * @num_networks: number of ssids + * @networks: EPNO networks + */ +struct wifi_enhanced_pno_params { + uint32_t request_id; + uint32_t vdev_id; + uint32_t min_5ghz_rssi; + uint32_t min_24ghz_rssi; + uint32_t initial_score_max; + uint32_t current_connection_bonus; + uint32_t same_network_bonus; + uint32_t secure_bonus; + uint32_t band_5ghz_bonus; + uint32_t num_networks; + struct wifi_epno_network_params networks[]; +}; + +enum { + WMI_AP_RX_DATA_OFFLOAD = 0x00, + WMI_STA_RX_DATA_OFFLOAD = 0x01, +}; + +/** + * enum extscan_configuration_flags - extscan config flags + * @WMI_EXTSCAN_LP_EXTENDED_BATCHING: extended batching + */ +enum wmi_extscan_configuration_flags { + WMI_EXTSCAN_LP_EXTENDED_BATCHING = 0x00000001, +}; + +/** + * enum extscan_report_events_type - extscan report events type + * @EXTSCAN_REPORT_EVENTS_BUFFER_FULL: report only when scan history is % full + * @EXTSCAN_REPORT_EVENTS_EACH_SCAN: report a scan completion event after scan + * @EXTSCAN_REPORT_EVENTS_FULL_RESULTS: forward scan results + * (beacons/probe responses + IEs) + * in real time to HAL, in addition to completion events. + * Note: To keep backward compatibility, + * fire completion events regardless of REPORT_EVENTS_EACH_SCAN. + * @EXTSCAN_REPORT_EVENTS_NO_BATCH: controls batching, + * 0 => batching, 1 => no batching + */ +enum wmi_extscan_report_events_type { + WMI_EXTSCAN_REPORT_EVENTS_BUFFER_FULL = 0x00, + WMI_EXTSCAN_REPORT_EVENTS_EACH_SCAN = 0x01, + WMI_EXTSCAN_REPORT_EVENTS_FULL_RESULTS = 0x02, + WMI_EXTSCAN_REPORT_EVENTS_NO_BATCH = 0x04, +}; + +/** + * struct extscan_capabilities_params - ext scan capablities + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_capabilities_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct extscan_capabilities_reset_params - ext scan capablities reset + * parameter + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_capabilities_reset_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct extscan_bssid_hotlist_reset_params - ext scan hotlist reset parameter + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_bssid_hotlist_reset_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct extscan_stop_req_params - ext scan stop parameter + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_stop_req_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct ap_threshold_params - ap threshold parameter + * @bssid: mac address + * @low: low threshold + * @high: high threshold + */ +struct ap_threshold_params { + struct qdf_mac_addr bssid; + int32_t low; + int32_t high; +}; + +/** + * struct extscan_set_sig_changereq_params - ext scan channel parameter + * @request_id: request_id + * @vdev_id: vdev id + * @rssi_sample_size: Number of samples for averaging RSSI + * @lostap_sample_size: Number of missed samples to confirm AP loss + * @min_breaching: Number of APs breaching threshold required for firmware + * @num_ap: no of scanned ap + * @ap: ap threshold parameter + */ +struct extscan_set_sig_changereq_params { + uint32_t request_id; + uint8_t vdev_id; + uint32_t rssi_sample_size; + uint32_t lostap_sample_size; + uint32_t min_breaching; + uint32_t num_ap; + struct ap_threshold_params ap[WMI_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS]; +}; + +/** + * struct extscan_cached_result_params - ext scan cached parameter + * @request_id: request_id + * @vdev_id: vdev id + * @flush: cached results flush + */ +struct extscan_cached_result_params { + uint32_t request_id; + uint8_t vdev_id; + bool flush; +}; + +#define WMI_WLAN_EXTSCAN_MAX_CHANNELS 36 +#define WMI_WLAN_EXTSCAN_MAX_BUCKETS 16 +#define WMI_WLAN_EXTSCAN_MAX_HOTLIST_APS 128 +#define WMI_WLAN_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS 64 +#define WMI_EXTSCAN_MAX_HOTLIST_SSIDS 8 + +/** + * struct wifi_scan_channelspec_params - wifi scan channel parameter + * @channel: Frequency in MHz + * @dwell_time_ms: dwell time in milliseconds + * @passive: passive scan + * @channel_class: channel class + */ +struct wifi_scan_channelspec_params { + uint32_t channel; + uint32_t dwell_time_ms; + bool passive; + uint8_t channel_class; +}; + +/** + * enum wmi_wifi_band - wifi band + * @WMI_WIFI_BAND_UNSPECIFIED: unspecified band + * @WMI_WIFI_BAND_BG: 2.4 GHz + * @WMI_WIFI_BAND_A: 5 GHz without DFS + * @WMI_WIFI_BAND_ABG: 2.4 GHz + 5 GHz; no DFS + * @WMI_WIFI_BAND_A_DFS_ONLY: 5 GHz DFS only + * @WMI_WIFI_BAND_A_WITH_DFS: 5 GHz with DFS + * @WMI_WIFI_BAND_ABG_WITH_DFS: 2.4 GHz + 5 GHz with DFS + * @WMI_WIFI_BAND_MAX: max range + */ +enum wmi_wifi_band { + WMI_WIFI_BAND_UNSPECIFIED, + WMI_WIFI_BAND_BG = 1, + WMI_WIFI_BAND_A = 2, + WMI_WIFI_BAND_ABG = 3, + WMI_WIFI_BAND_A_DFS_ONLY = 4, + /* 5 is reserved */ + WMI_WIFI_BAND_A_WITH_DFS = 6, + WMI_WIFI_BAND_ABG_WITH_DFS = 7, + /* Keep it last */ + WMI_WIFI_BAND_MAX +}; + +/** + * struct wifi_scan_bucket_params - wifi scan bucket spec + * @bucket: bucket identifier + * @band: wifi band + * @period: Desired period, in millisecond; if this is too + * low, the firmware should choose to generate results as fast as + * it can instead of failing the command byte + * for exponential backoff bucket this is the min_period + * @report_events: 0 => normal reporting (reporting rssi history + * only, when rssi history buffer is % full) + * 1 => same as 0 + report a scan completion event after scanning + * this bucket + * 2 => same as 1 + forward scan results + * (beacons/probe responses + IEs) in real time to HAL + * @max_period: if max_period is non zero or different than period, + * then this bucket is an exponential backoff bucket and + * the scan period will grow exponentially as per formula: + * actual_period(N) = period ^ (N/(step_count+1)) to a + * maximum period of max_period + * @exponent: for exponential back off bucket: multiplier: + * new_period = old_period * exponent + * @step_count: for exponential back off bucket, number of scans performed + * at a given period and until the exponent is applied + * @num_channels: channels to scan; these may include DFS channels + * Note that a given channel may appear in multiple buckets + * @min_dwell_time_active: per bucket minimum active dwell time + * @max_dwell_time_active: per bucket maximum active dwell time + * @min_dwell_time_passive: per bucket minimum passive dwell time + * @max_dwell_time_passive: per bucket maximum passive dwell time + * @channels: Channel list + */ +struct wifi_scan_bucket_params { + uint8_t bucket; + enum wmi_wifi_band band; + uint32_t period; + uint32_t report_events; + uint32_t max_period; + uint32_t exponent; + uint32_t step_count; + uint32_t num_channels; + uint32_t min_dwell_time_active; + uint32_t max_dwell_time_active; + uint32_t min_dwell_time_passive; + uint32_t max_dwell_time_passive; + struct wifi_scan_channelspec_params + channels[WMI_WLAN_EXTSCAN_MAX_CHANNELS]; +}; + +/** + * struct wifi_scan_cmd_req_params - wifi scan command request params + * @base_period: base timer period + * @max_ap_per_scan: max ap per scan + * @report_threshold_percent: report threshold + * in %, when buffer is this much full, wake up host + * @report_threshold_num_scans: report threshold number of scans + * in number of scans, wake up host after these many scans + * @request_id: request id + * @vdev_id: vdev that is the target of the request + * @num_buckets: number of buckets + * @min_dwell_time_active: per bucket minimum active dwell time + * @max_dwell_time_active: per bucket maximum active dwell time + * @min_dwell_time_passive: per bucket minimum passive dwell time + * @max_dwell_time_passive: per bucket maximum passive dwell time + * @configuration_flags: configuration flags + * @extscan_adaptive_dwell_mode: adaptive dwelltime mode for extscan + * @buckets: buckets array + */ +struct wifi_scan_cmd_req_params { + uint32_t base_period; + uint32_t max_ap_per_scan; + uint32_t report_threshold_percent; + uint32_t report_threshold_num_scans; + uint32_t request_id; + uint8_t vdev_id; + uint32_t num_buckets; + uint32_t min_dwell_time_active; + uint32_t max_dwell_time_active; + uint32_t min_dwell_time_passive; + uint32_t max_dwell_time_passive; + uint32_t configuration_flags; + enum scan_dwelltime_adaptive_mode extscan_adaptive_dwell_mode; + struct wifi_scan_bucket_params buckets[WMI_WLAN_EXTSCAN_MAX_BUCKETS]; +}; + +/** + * struct mac_ts_info_tfc - mac ts info parameters + * @burstSizeDefn: burst size + * @reserved: reserved + * @ackPolicy: ack policy + * @psb: psb + * @aggregation: aggregation + * @accessPolicy: access policy + * @direction: direction + * @tsid: direction + * @trafficType: traffic type + */ +struct mac_ts_info_tfc { +#ifndef ANI_LITTLE_BIT_ENDIAN + uint8_t burstSizeDefn:1; + uint8_t reserved:7; +#else + uint8_t reserved:7; + uint8_t burstSizeDefn:1; +#endif + +#ifndef ANI_LITTLE_BIT_ENDIAN + uint16_t ackPolicy:2; + uint16_t userPrio:3; + uint16_t psb:1; + uint16_t aggregation:1; + uint16_t accessPolicy:2; + uint16_t direction:2; + uint16_t tsid:4; + uint16_t trafficType:1; +#else + uint16_t trafficType:1; + uint16_t tsid:4; + uint16_t direction:2; + uint16_t accessPolicy:2; + uint16_t aggregation:1; + uint16_t psb:1; + uint16_t userPrio:3; + uint16_t ackPolicy:2; +#endif +} qdf_packed; + +/** + * struct mac_ts_info_sch - mac ts info schedule parameters + * @rsvd: reserved + * @schedule: schedule bit + */ +struct mac_ts_info_sch { +#ifndef ANI_LITTLE_BIT_ENDIAN + uint8_t rsvd:7; + uint8_t schedule:1; +#else + uint8_t schedule:1; + uint8_t rsvd:7; +#endif +} qdf_packed; + +/** + * struct mac_ts_info_sch - mac ts info schedule parameters + * @traffic: mac tfc parameter + * @schedule: mac schedule parameters + */ +struct mac_ts_info { + struct mac_ts_info_tfc traffic; + struct mac_ts_info_sch schedule; +} qdf_packed; + +/** + * struct mac_tspec_ie - mac ts spec + * @type: type + * @length: length + * @tsinfo: tsinfo + * @nomMsduSz: nomMsduSz + * @maxMsduSz: maxMsduSz + * @minSvcInterval: minSvcInterval + * @maxSvcInterval: maxSvcInterval + * @inactInterval: inactInterval + * @suspendInterval: suspendInterval + * @svcStartTime: svcStartTime + * @minDataRate: minDataRate + * @meanDataRate: meanDataRate + * @peakDataRate: peakDataRate + * @maxBurstSz: maxBurstSz + * @delayBound: delayBound + * @minPhyRate: minPhyRate + * @surplusBw: surplusBw + * @mediumTime: mediumTime + */ +struct mac_tspec_ie { + uint8_t type; + uint8_t length; + struct mac_ts_info tsinfo; + uint16_t nomMsduSz; + uint16_t maxMsduSz; + uint32_t minSvcInterval; + uint32_t maxSvcInterval; + uint32_t inactInterval; + uint32_t suspendInterval; + uint32_t svcStartTime; + uint32_t minDataRate; + uint32_t meanDataRate; + uint32_t peakDataRate; + uint32_t maxBurstSz; + uint32_t delayBound; + uint32_t minPhyRate; + uint16_t surplusBw; + uint16_t mediumTime; +} qdf_packed; + +/** + * struct add_ts_param - ADDTS related parameters + * @vdev_id: vdev id + * @tspec_idx: TSPEC handle uniquely identifying a TSPEC for a STA in a BSS + * @tspec: tspec value + * @status: QDF status + * @pe_session_id: protocol engine session id + * @tsm_interval: TSM interval period passed from UMAC to WMI + * @set_ric_params: Should RIC parameters be set? + */ +struct add_ts_param { + uint8_t vdev_id; + uint16_t tspec_idx; + struct mac_tspec_ie tspec; + QDF_STATUS status; + uint8_t pe_session_id; +#ifdef FEATURE_WLAN_ESE + uint16_t tsm_interval; +#endif /* FEATURE_WLAN_ESE */ +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + bool set_ric_params; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ +}; + +/** + * struct delts_req_info - DELTS request parameter + * @tsinfo: ts info + * @tspec: ts spec + * @wmeTspecPresent: wme ts spec flag + * @wsmTspecPresent: wsm ts spec flag + * @lleTspecPresent: lle ts spec flag + */ +struct delts_req_info { + struct mac_ts_info tsinfo; + struct mac_tspec_ie tspec; + uint8_t wmeTspecPresent:1; + uint8_t wsmTspecPresent:1; + uint8_t lleTspecPresent:1; +}; + +/** + * struct del_ts_params - DELTS related parameters + * @tspecIdx: TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS + * @bssId: BSSID + * @sessionId: session id + * @userPrio: user priority + * @delTsInfo: DELTS info + * @setRICparams: RIC parameters + */ +struct del_ts_params { + uint16_t tspecIdx; + uint8_t bssId[QDF_MAC_ADDR_SIZE]; + uint8_t sessionId; + uint8_t userPrio; +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + struct delts_req_info delTsInfo; + uint8_t setRICparams; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ +}; + +/** + * struct ll_stats_clear_params - ll stats clear parameter + * @req_id: request id + * @vdev_id: vdev id + * @stats_clear_mask: stats clear mask + * @stop_req: stop request + * @peer_macaddr: MAC address of the peer for which stats are to be cleared + */ +struct ll_stats_clear_params { + uint32_t req_id; + uint8_t vdev_id; + uint32_t stats_clear_mask; + uint8_t stop_req; + struct qdf_mac_addr peer_macaddr; +}; + +/** + * struct ll_stats_set_params - ll stats get parameter + * @req_id: request id + * @mpdu_size_threshold: mpdu size threshold + * @aggressive_statistics_gathering: aggressive_statistics_gathering + */ +struct ll_stats_set_params { + uint32_t req_id; + uint32_t mpdu_size_threshold; + uint32_t aggressive_statistics_gathering; +}; + +/** + * struct ll_stats_get_params - ll stats parameter + * @req_id: request id + * @vdev_id: vdev id + * @param_id_mask: param is mask + * @peer_macaddr: MAC address of the peer for which stats are desired + */ +struct ll_stats_get_params { + uint32_t req_id; + uint8_t vdev_id; + uint32_t param_id_mask; + struct qdf_mac_addr peer_macaddr; +}; + + +/** + * struct link_status_params - link stats parameter + * @vdev_id: ID of the vdev for which link status is desired + */ +struct link_status_params { + uint8_t vdev_id; +}; + +/** + * struct dhcp_stop_ind_params - DHCP Stop indication message + * @msgtype: message type is same as the request type + * @msglen: length of the entire request + * @device_mode: Mode of the device(ex:STA, AP) + * @adapter_macaddr: MAC address of the adapter + * @peer_macaddr: MAC address of the connected peer + */ +struct dhcp_stop_ind_params { + uint16_t msgtype; + uint16_t msglen; + uint8_t device_mode; + struct qdf_mac_addr adapter_macaddr; + struct qdf_mac_addr peer_macaddr; +}; + +#define WMI_MAX_FILTER_TEST_DATA_LEN 8 +#define WMI_MAX_NUM_MULTICAST_ADDRESS 240 +#define WMI_MAX_NUM_FILTERS 20 +#define WMI_MAX_NUM_TESTS_PER_FILTER 10 + +/** + * enum packet_filter_type - packet filter type + * @WMI_RCV_FILTER_TYPE_INVALID: invalid type + * @WMI_RCV_FILTER_TYPE_FILTER_PKT: filter packet type + * @WMI_RCV_FILTER_TYPE_BUFFER_PKT: buffer packet type + * @WMI_RCV_FILTER_TYPE_MAX_ENUM_SIZE: max enum size + */ +enum packet_filter_type { + WMI_RCV_FILTER_TYPE_INVALID, + WMI_RCV_FILTER_TYPE_FILTER_PKT, + WMI_RCV_FILTER_TYPE_BUFFER_PKT, + WMI_RCV_FILTER_TYPE_MAX_ENUM_SIZE +}; + +/** + * enum packet_protocol_type - packet protocol type + * @WMI_FILTER_HDR_TYPE_INVALID: invalid type + * @WMI_FILTER_HDR_TYPE_MAC: mac type + * @WMI_FILTER_HDR_TYPE_ARP: trp type + * @WMI_FILTER_HDR_TYPE_IPV4: ipv4 type + * @WMI_FILTER_HDR_TYPE_IPV6: ipv6 type + * @WMI_FILTER_HDR_TYPE_UDP: udp type + * @WMI_FILTER_HDR_TYPE_MAX: max type + */ +enum packet_protocol_type { + WMI_FILTER_HDR_TYPE_INVALID, + WMI_FILTER_HDR_TYPE_MAC, + WMI_FILTER_HDR_TYPE_ARP, + WMI_FILTER_HDR_TYPE_IPV4, + WMI_FILTER_HDR_TYPE_IPV6, + WMI_FILTER_HDR_TYPE_UDP, + WMI_FILTER_HDR_TYPE_MAX +}; + +/** + * enum packet_filter_comp_type - packet filter comparison type + * @WMI_FILTER_CMP_TYPE_INVALID: invalid type + * @WMI_FILTER_CMP_TYPE_EQUAL: type equal + * @WMI_FILTER_CMP_TYPE_MASK_EQUAL: mask equal + * @WMI_FILTER_CMP_TYPE_NOT_EQUAL: type not equal + * @WMI_FILTER_CMP_TYPE_MASK_NOT_EQUAL: mask not equal + * @WMI_FILTER_CMP_TYPE_MAX: max type + */ +enum packet_filter_comp_type { + WMI_FILTER_CMP_TYPE_INVALID, + WMI_FILTER_CMP_TYPE_EQUAL, + WMI_FILTER_CMP_TYPE_MASK_EQUAL, + WMI_FILTER_CMP_TYPE_NOT_EQUAL, + WMI_FILTER_CMP_TYPE_MASK_NOT_EQUAL, + WMI_FILTER_CMP_TYPE_MAX +}; + +/** + * struct rcv_pkt_filter_params - receive packet filter parameters + * @protocolLayer - protocol layer + * @cmpFlag - comparison flag + * @dataLength - data length + * @dataOffset - data offset + * @reserved - resserved + * @compareData - compare data + * @dataMask - data mask + */ +struct rcv_pkt_filter_params { + enum packet_protocol_type protocolLayer; + enum packet_filter_comp_type cmpFlag; + uint16_t dataLength; + uint8_t dataOffset; + uint8_t reserved; + uint8_t compareData[WMI_MAX_FILTER_TEST_DATA_LEN]; + uint8_t dataMask[WMI_MAX_FILTER_TEST_DATA_LEN]; +}; + +/** + * struct rcv_pkt_filter_config - receive packet filter info + * @filterId - filter id + * @filterType - filter type + * @numFieldParams - no of fields + * @coalesceTime - reserved parameter + * @self_macaddr - self mac address + * @bssid - Bssid of the connected AP + * @paramsData - data parameter + */ +struct rcv_pkt_filter_config { + uint8_t filterId; + enum packet_filter_type filterType; + uint32_t numFieldParams; + uint32_t coalesceTime; + struct qdf_mac_addr self_macaddr; + struct qdf_mac_addr bssid; + struct rcv_pkt_filter_params paramsData[WMI_MAX_NUM_TESTS_PER_FILTER]; +}; + +/** + * struct cfg_action_frm_tb_ppdu_param - action frm in TB PPDU cfg + * @cfg - enable/disable + * @frm_len - length of the frame + * @data - data pointer + */ +struct cfg_action_frm_tb_ppdu_param { + uint32_t cfg; + uint32_t frm_len; + uint8_t *data; +}; + +#define WMI_MAX_NUM_FW_SEGMENTS 4 + +/** + * struct fw_dump_seg_req_param - individual segment details + * @seg_id - segment id. + * @seg_start_addr_lo - lower address of the segment. + * @seg_start_addr_hi - higher address of the segment. + * @seg_length - length of the segment. + * @dst_addr_lo - lower address of the destination buffer. + * @dst_addr_hi - higher address of the destination buffer. + * + * This structure carries the information to firmware about the + * individual segments. This structure is part of firmware memory + * dump request. + */ +struct fw_dump_seg_req_param { + uint8_t seg_id; + uint32_t seg_start_addr_lo; + uint32_t seg_start_addr_hi; + uint32_t seg_length; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; +}; + +/** + * struct fw_dump_req_param - firmware memory dump request details. + * @request_id - request id. + * @num_seg - requested number of segments. + * @fw_dump_seg_req - individual segment information. + * + * This structure carries information about the firmware + * memory dump request. + */ +struct fw_dump_req_param { + uint32_t request_id; + uint32_t num_seg; + struct fw_dump_seg_req_param segment[WMI_MAX_NUM_FW_SEGMENTS]; +}; + +/** + * struct dhcp_offload_info_params - dhcp offload parameters + * @vdev_id: request data length + * @dhcp_offload_enabled: dhcp offload enabled + * @dhcp_client_num: dhcp client no + * @dhcp_srv_addr: dhcp server ip + */ +struct dhcp_offload_info_params { + uint32_t vdev_id; + bool dhcp_offload_enabled; + uint32_t dhcp_client_num; + uint32_t dhcp_srv_addr; +}; + +/** + * struct app_type2_params - app type2parameter + * @vdev_id: vdev id + * @rc4_key: rc4 key + * @rc4_key_len: rc4 key length + * @ip_id: NC id + * @ip_device_ip: NC IP address + * @ip_server_ip: Push server IP address + * @tcp_src_port: NC TCP port + * @tcp_dst_port: Push server TCP port + * @tcp_seq: tcp sequence + * @tcp_ack_seq: tcp ack sequence + * @keepalive_init: Initial ping interval + * @keepalive_min: Minimum ping interval + * @keepalive_max: Maximum ping interval + * @keepalive_inc: Increment of ping interval + * @gateway_mac: gateway mac address + * @tcp_tx_timeout_val: tcp tx timeout value + * @tcp_rx_timeout_val: tcp rx timeout value + */ +struct app_type2_params { + uint8_t vdev_id; + uint8_t rc4_key[16]; + uint32_t rc4_key_len; + /** ip header parameter */ + uint32_t ip_id; + uint32_t ip_device_ip; + uint32_t ip_server_ip; + /** tcp header parameter */ + uint16_t tcp_src_port; + uint16_t tcp_dst_port; + uint32_t tcp_seq; + uint32_t tcp_ack_seq; + uint32_t keepalive_init; + uint32_t keepalive_min; + uint32_t keepalive_max; + uint32_t keepalive_inc; + struct qdf_mac_addr gateway_mac; + uint32_t tcp_tx_timeout_val; + uint32_t tcp_rx_timeout_val; +}; + +/** + * struct app_type1_params - app type1 parameter + * @vdev_id: vdev id + * @wakee_mac_addr: mac address + * @identification_id: identification id + * @password: password + * @id_length: id length + * @pass_length: password length + */ +struct app_type1_params { + uint8_t vdev_id; + struct qdf_mac_addr wakee_mac_addr; + uint8_t identification_id[8]; + uint8_t password[16]; + uint32_t id_length; + uint32_t pass_length; +}; + +/** + * enum wmi_ext_wow_type - wow type + * @WMI_EXT_WOW_TYPE_APP_TYPE1: only enable wakeup for app type1 + * @WMI_EXT_WOW_TYPE_APP_TYPE2: only enable wakeup for app type2 + * @WMI_EXT_WOW_TYPE_APP_TYPE1_2: enable wakeup for app type1&2 + */ +enum wmi_ext_wow_type { + WMI_EXT_WOW_TYPE_APP_TYPE1, + WMI_EXT_WOW_TYPE_APP_TYPE2, + WMI_EXT_WOW_TYPE_APP_TYPE1_2, +}; + +/** + * struct ext_wow_params - ext wow parameters + * @vdev_id: vdev id + * @type: wow type + * @wakeup_pin_num: wake up gpio no + */ +struct ext_wow_params { + uint8_t vdev_id; + enum wmi_ext_wow_type type; + uint32_t wakeup_pin_num; +}; + +/** + * struct stats_ext_params - ext stats request + * @vdev_id: vdev id + * @request_data_len: request data length + * @request_data: request data + */ +struct stats_ext_params { + uint32_t vdev_id; + uint32_t request_data_len; + uint8_t request_data[]; +}; + +#define WMI_GTK_OFFLOAD_KEK_BYTES 64 +#define WMI_GTK_OFFLOAD_KCK_BYTES 16 +#define WMI_GTK_OFFLOAD_ENABLE 0 +#define WMI_GTK_OFFLOAD_DISABLE 1 + +#define MAX_MEM_CHUNKS 32 +/** + * struct wmi_host_mem_chunk - host memory chunk structure + * @vaddr: Pointer to virtual address + * @paddr: Physical address + * @memctx: qdf memory context for mapped address. + * @len: length of chunk + * @req_id: request id from target + */ +struct wmi_host_mem_chunk { + uint32_t *vaddr; + uint32_t paddr; + qdf_dma_mem_context(memctx); + uint32_t len; + uint32_t req_id; +}; + +/** + * struct wmi_wifi_start_log - Structure to store the params sent to start/ + * stop logging + * @name: Attribute which indicates the type of logging like per packet + * statistics, connectivity etc. + * @verbose_level: Verbose level which can be 0,1,2,3 + * @flag: Flag field for future use + */ +struct wmi_wifi_start_log { + uint32_t ring_id; + uint32_t verbose_level; + uint32_t flag; +}; + +/** + * struct wmi_pcl_list - Format of PCL + * @pcl_list: List of preferred channels + * @weight_list: Weights of the PCL + * @pcl_len: Number of channels in the PCL + */ +struct wmi_pcl_list { + uint8_t pcl_list[NUM_CHANNELS]; + uint8_t weight_list[NUM_CHANNELS]; + uint32_t pcl_len; +}; + +/** + * struct wmi_hw_mode_params - HW mode params + * @mac0_tx_ss: MAC0 Tx spatial stream + * @mac0_rx_ss: MAC0 Rx spatial stream + * @mac1_tx_ss: MAC1 Tx spatial stream + * @mac1_rx_ss: MAC1 Rx spatial stream + * @mac0_bw: MAC0 bandwidth + * @mac1_bw: MAC1 bandwidth + * @dbs_cap: DBS capabality + * @agile_dfs_cap: Agile DFS capabality + */ +struct wmi_hw_mode_params { + uint8_t mac0_tx_ss; + uint8_t mac0_rx_ss; + uint8_t mac1_tx_ss; + uint8_t mac1_rx_ss; + uint8_t mac0_bw; + uint8_t mac1_bw; + uint8_t dbs_cap; + uint8_t agile_dfs_cap; +}; + +/** + * struct wmi_unit_test_cmd - unit test command parameters + * @vdev_id: vdev id + * @module_id: module id + * @num_args: number of arguments + * @diag_token: dialog token, which identifies the transaction. + * this number is generated by wifitool and may be used to + * identify the transaction in the event path + * @args: arguments + */ +struct wmi_unit_test_cmd { + uint32_t vdev_id; + uint32_t module_id; + uint32_t num_args; + uint32_t diag_token; + uint32_t args[WMI_UNIT_TEST_MAX_NUM_ARGS]; +}; + +/** + * struct extscan_bssid_hotlist_set_params - set hotlist request + * @request_id: request_id + * @vdev_id: vdev id + * @lost_ap_sample_size: number of samples to confirm AP loss + * @num_ap: Number of hotlist APs + * @ap: hotlist APs + */ +struct extscan_bssid_hotlist_set_params { + uint32_t request_id; + uint8_t vdev_id; + uint32_t lost_ap_sample_size; + uint32_t num_ap; + struct ap_threshold_params ap[WMI_WLAN_EXTSCAN_MAX_HOTLIST_APS]; +}; + +/** + * struct host_mem_req - Host memory request paramseters request by target + * @req_id: Request id to identify the request. + * @unit_size: Size of single unit requested. + * @num_unit_info: Memory chunk info + * @num_units: number of units requested. + * @tgt_num_units: number of units request by target. + */ +typedef struct { + uint32_t req_id; + uint32_t unit_size; + uint32_t num_unit_info; + uint32_t num_units; + uint32_t tgt_num_units; +} host_mem_req; + +#define WMI_HOST_DSCP_MAP_MAX (64) + +/** + * struct wmi_host_ext_resource_config - Extended resource config + * @host_platform_config: Host plaform configuration. + * @fw_featuew_bitmap: FW feature requested bitmap. + */ +typedef struct { + uint32_t host_platform_config; + +#define WMI_HOST_FW_FEATURE_LTEU_SUPPORT 0x0001 +#define WMI_HOST_FW_FEATURE_COEX_GPIO_SUPPORT 0x0002 +#define WMI_HOST_FW_FEATURE_AUX_RADIO_SPECTRAL_INTF 0x0004 +#define WMI_HOST_FW_FEATURE_AUX_RADIO_CHAN_LOAD_INTF 0x0008 +#define WMI_HOST_FW_FEATURE_BSS_CHANNEL_INFO_64 0x0010 +#define WMI_HOST_FW_FEATURE_PEER_STATS 0x0020 +#define WMI_HOST_FW_FEATURE_VDEV_STATS 0x0040 +#define WMI_HOST_FW_FEATURE_VOW_FEATURES 0x00004000 +#define WMI_HOST_FW_FEATURE_VOW_STATS 0x00008000 + /** + * @brief fw_feature_bitmask - Enable/Disable features in FW + * @details + * The bits in fw_feature_bitmask are used as shown by the masks below: + * 0x0001 - LTEU Config enable/disable + * 0x0002 - COEX GPIO Config enable/disable + * 0x0004 - Aux Radio enhancement for spectral scan enable/disable + * 0x0008 - Aux Radio enhancement for chan load scan enable/disable + * 0x0010 - BSS channel info stats enable/disable + * The features in question are enabled by setting + * the feature's bit to 1, + * or disabled by setting the feature's bit to 0. + */ + uint32_t fw_feature_bitmap; + + /* WLAN priority GPIO number + * The target uses a GPIO pin to indicate when it is transmitting + * high-priority traffic (e.g. beacon, management, or AC_VI) or + * low-priority traffic (e.g. AC_BE, AC_BK). The HW uses this + * WLAN GPIO pin to determine whether to abort WLAN tx in favor of + * BT activity. + * Which GPIO is used for this WLAN tx traffic priority specification + * varies between platforms, so the host needs to indicate to the + * target which GPIO to use. + */ + uint32_t wlan_priority_gpio; + + /* Host will notify target which coex algorithm has to be + * enabled based on HW, FW capability and device tree config. + * Till now the coex algorithms were target specific. Now the + * same target can choose between multiple coex algorithms + * depending on device tree config on host. For backward + * compatibility, version support will have option 0 and will + * rely on FW compile time flags to decide the coex version + * between VERSION_1, VERSION_2 and VERSION_3. Version info is + * mandatory from VERSION_4 onwards for any new coex algorithms. + * + * 0 = no version support + * 1 = COEX_VERSION_1 (3 wire coex) + * 2 = COEX_VERSION_2 (2.5 wire coex) + * 3 = COEX_VERSION_3 (2.5 wire coex+duty cycle) + * 4 = COEX_VERSION_4 (4 wire coex) + */ + uint32_t coex_version; + + /* There are multiple coex implementations on FW to support different + * hardwares. Since the coex algos are mutually exclusive, host will + * use below fields to send GPIO info to FW and these GPIO pins will + * have different usages depending on the feature enabled. This is to + * avoid adding multiple GPIO fields here for different features. + * + * COEX VERSION_4 (4 wire coex) : + * 4 wire coex feature uses 1 common input request line from BT/ZB/ + * Thread which interrupts the WLAN target processor directly, 1 input + * priority line from BT and ZB each, 1 output line to grant access to + * requesting IOT subsystem. WLAN uses the input priority line to + * identify the requesting IOT subsystem. Request is granted based on + * IOT interface priority and WLAN traffic. GPIO pin usage is as below: + * coex_gpio_pin_1 = BT PRIORITY INPUT GPIO + * coex_gpio_pin_2 = ZIGBEE PRIORITY INPUT GPIO + * coex_gpio_pin_3 = GRANT OUTPUT GPIO + * when a BT active interrupt is raised, WLAN reads + * BT and ZB priority input GPIO pins to compare against the coex + * priority table and accordingly sets the grant output GPIO to give + * access to requesting IOT subsystem. + */ + uint32_t coex_gpio_pin_1; + uint32_t coex_gpio_pin_2; + uint32_t coex_gpio_pin_3; + + /* add new members here */ +} wmi_host_ext_resource_config; + +/** + * struct set_fwtest_params - FW test params + * @arg: FW param id + * @value: value + */ +struct set_fwtest_params { + uint32_t arg; + uint32_t value; +}; + +/* + * msduq_update_params - MSDUQ update param structure + * @tid_num: TID number + * @msduq_update_mask: update bit mask + * @qdepth_thresh_value: threshold value for the queue depth + */ + +#define QDEPTH_THRESH_MAX_UPDATES 1 + +typedef struct { + uint32_t tid_num; + uint32_t msduq_update_mask; + uint32_t qdepth_thresh_value; +} msduq_update_params; + +/** + * struct set_qdepth_thresh_params - MSDU Queue Depth Threshold Params + * @vdev_id: vdev id + * @pdev_id: pdev id + * @mac_addr: MAC address + * @num_of_msduq_updates: holds the number of tid updates + */ + +struct set_qdepth_thresh_params { + uint32_t pdev_id; + uint32_t vdev_id; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + uint32_t num_of_msduq_updates; + msduq_update_params update_params[QDEPTH_THRESH_MAX_UPDATES]; +}; + +/** + * struct peer_chan_width_switch_info - Peer channel width capability params + * @mac_addr: MAC address of peer + * @chan_width: Max supported channel width of peer + * (enum ieee80211_cwm_width) + */ + +struct peer_chan_width_switch_info { + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + uint32_t chan_width; +}; + +/** + * struct peer_chan_width_switch_params - Peer channel width capability wrapper + * @num_peers: Total number of peers connected to AP + * @chan_width_peer_list: List of capabilities for all connected peers + */ + +struct peer_chan_width_switch_params { + uint32_t num_peers; + struct peer_chan_width_switch_info *chan_width_peer_list; +}; + +/** + * struct peer_add_wds_entry_params - WDS peer entry add params + * @dest_addr: Pointer to destination macaddr + * @peer_addr: Pointer to peer mac addr + * @flags: flags + * @vdev_id: Vdev id + */ +struct peer_add_wds_entry_params { + const uint8_t *dest_addr; + uint8_t *peer_addr; + uint32_t flags; + uint32_t vdev_id; +}; + +/** + * struct peer_del_wds_entry_params - WDS peer entry del params + * @dest_addr: Pointer to destination macaddr + * @vdev_id: Vdev id + */ +struct peer_del_wds_entry_params { + uint8_t *dest_addr; + uint32_t vdev_id; +}; + +/** + * struct set_bridge_mac_addr_params - set bridge MAC addr params + * @dest_addr: Pointer to bridge macaddr + */ +struct set_bridge_mac_addr_params { + uint8_t *bridge_addr; +}; + +/** + * struct peer_updatewds_entry_params - WDS peer entry update params + * @wds_macaddr: Pointer to destination macaddr + * @peer_add: Pointer to peer mac addr + * @flags: flags + * @vdev_id: Vdev id + */ +struct peer_update_wds_entry_params { + uint8_t *wds_macaddr; + uint8_t *peer_macaddr; + uint32_t flags; + uint32_t vdev_id; +}; + +/** + * struct peer_del_all_wds_entries_params - WDS peer entries del params + * @wds_macaddr: Pointer to destination macaddr + * @peer_macaddr: Pointer to peer mac addr + * @flags: flags + */ +struct peer_del_all_wds_entries_params { + uint8_t *wds_macaddr; + uint8_t *peer_macaddr; + uint32_t flags; +}; + +/** + * struct set_ps_mode_params - PS mode params + * @vdev_id: vdev id + * @psmode: PS mode + */ +struct set_ps_mode_params { + uint8_t vdev_id; + uint8_t psmode; +}; + +/** + * @struct tt_level_config - Set Thermal throttlling config + * @tmplwm: Temperature low water mark + * @tmphwm: Temperature high water mark + * @dcoffpercent: dc off percentage + * @priority: priority + */ +typedef struct { + uint32_t tmplwm; + uint32_t tmphwm; + uint32_t dcoffpercent; + uint32_t priority; +} tt_level_config; + +/** + * struct thermal_mitigation_params - Thermal mitigation params + * @enable: Enable/Disable Thermal mitigation + * @dc: DC + * @dc_per_event: DC per event + * @num_thermal_conf: Number of thermal configurations to be sent + * @tt_level_config: TT level config params + */ +struct thermal_mitigation_params { + uint32_t pdev_id; + uint32_t enable; + uint32_t dc; + uint32_t dc_per_event; + uint8_t num_thermal_conf; + tt_level_config levelconf[THERMAL_LEVELS]; +}; + +/** + * struct smart_ant_enable_tx_feedback_params - SA tx feeback params + * @enable: Enable TX feedback for SA + */ +struct smart_ant_enable_tx_feedback_params { + int enable; +}; + +/** + * struct vdev_spectral_configure_params - SPectral config params + * @vdev_id: VDEV id + * @count: count + * @period: period + * @spectral_pri: Spectral priority + * @fft_size: FFT size + * @gc_enable: GC enable + * @restart_enable: restart enabled + * @noise_floor_ref: Noise floor reference + * @init_delay: Init delays + * @nb_tone_thr: NB tone threshold + * @str_bin_thr: STR BIN threshold + * @wb_rpt_mode: WB BIN threshold + * @rssi_rpt_mode: RSSI report mode + * @rssi_thr: RSSI threshold + * @pwr_format: Power format + * @rpt_mode: Report mdoe + * @bin_scale: BIN scale + * @dbm_adj: DBM adjust + * @chn_mask: chain mask + * @mode: Mode + * @center_freq: Center frequency + * @chan_freq: Primary channel frequency + * @chan_width: Channel width + */ +struct vdev_spectral_configure_params { + uint8_t vdev_id; + uint16_t count; + uint16_t period; + uint16_t spectral_pri; + uint16_t fft_size; + uint16_t gc_enable; + uint16_t restart_enable; + uint16_t noise_floor_ref; + uint16_t init_delay; + uint16_t nb_tone_thr; + uint16_t str_bin_thr; + uint16_t wb_rpt_mode; + uint16_t rssi_rpt_mode; + uint16_t rssi_thr; + uint16_t pwr_format; + uint16_t rpt_mode; + uint16_t bin_scale; + uint16_t dbm_adj; + uint16_t chn_mask; + uint16_t mode; + uint16_t center_freq; + uint16_t chan_freq; + uint16_t chan_width; +}; + +/** + * struct vdev_spectral_enable_params - Spectral enabled params + * @vdev_id: VDEV id + * @active_valid: Active valid + * @active: active + * @enabled_valid: Enabled valid + * @enabled: enabled + * @mode: Mode + */ +struct vdev_spectral_enable_params { + uint8_t vdev_id; + uint8_t active_valid; + uint8_t active; + uint8_t enabled_valid; + uint8_t enabled; + uint8_t mode; +}; + +/** + * struct pdev_set_regdomain_params - PDEV set reg domain params + * @currentRDinuse: Current Reg domain + * @currentRD2G: Current Reg domain 2G + * @currentRD5G: Current Reg domain 5G + * @ctl_2G: CTL 2G + * @ctl_5G: CTL 5G + * @dfsDomain: DFS domain + * @pdev_id: pdev_id + */ +struct pdev_set_regdomain_params { + uint16_t currentRDinuse; + uint16_t currentRD2G; + uint16_t currentRD5G; + uint32_t ctl_2G; + uint32_t ctl_5G; + uint8_t dfsDomain; + uint32_t pdev_id; +}; + +/** + * struct set_quiet_mode_params - Set quiet mode params + * @enabled: Enabled + * @period: Quite period + * @intval: Quite interval + * @duration: Quite duration + * @offset: offset + */ +struct set_quiet_mode_params { + uint8_t enabled; + uint8_t period; + uint16_t intval; + uint16_t duration; + uint16_t offset; +}; + +/** + * struct set_bcn_offload_quiet_mode_params - Set quiet mode params + * @vdev_id: Vdev ID + * @period: Quite period + * @duration: Quite duration + * @next_start: Next quiet start + * @flag: 0 - disable, 1 - enable and continuous, 3 - enable and single shot + */ +struct set_bcn_offload_quiet_mode_params { + uint32_t vdev_id; + uint32_t period; + uint32_t duration; + uint32_t next_start; + uint32_t flag; +}; + +/** + * struct set_beacon_filter_params - Set beacon filter params + * @vdev_id: VDEV id + * @ie: Pointer to IE fields + */ +struct set_beacon_filter_params { + uint8_t vdev_id; + uint32_t *ie; +}; + +/** + * struct remove_beacon_filter_params - Remove beacon filter params + * @vdev_id: VDEV id + */ +struct remove_beacon_filter_params { + uint8_t vdev_id; +}; + +/** + * struct mgmt_params - Mgmt params + * @vdev_id: vdev id + * @buf_len: length of frame buffer + * @wbuf: frame buffer + */ +struct mgmt_params { + int vdev_id; + uint32_t buf_len; + qdf_nbuf_t wbuf; +}; + +/** + * struct addba_clearresponse_params - Addba clear response params + * @vdev_id: VDEV id + */ +struct addba_clearresponse_params { + uint8_t vdev_id; +}; + +/** + * struct addba_send_params - ADDBA send params + * @vdev_id: vdev id + * @tidno: TID + * @buffersize: buffer size + */ +struct addba_send_params { + uint8_t vdev_id; + uint8_t tidno; + uint16_t buffersize; +}; + +/** + * struct delba_send_params - DELBA send params + * @vdev_id: vdev id + * @tidno: TID + * @initiator: initiator + * @reasoncode: reason code + */ +struct delba_send_params { + uint8_t vdev_id; + uint8_t tidno; + uint8_t initiator; + uint16_t reasoncode; +}; +/** + * struct addba_setresponse_arams - Set ADDBA response params + * @vdev_id: vdev id + * @tidno: TID + * @statuscode: status code in response + */ +struct addba_setresponse_params { + uint8_t vdev_id; + uint8_t tidno; + uint16_t statuscode; +}; + +/** + * struct singleamsdu_params - Single AMSDU params + * @vdev_id: vdev is + * @tidno: TID + */ +struct singleamsdu_params { + uint8_t vdev_id; + uint8_t tidno; +}; + +/** + * struct set_qbosst_params - Set QBOOST params + * @vdev_id: vdev id + * @value: value + */ +struct set_qboost_params { + uint8_t vdev_id; + uint32_t value; +}; + +/** + * struct mu_scan_params - MU scan params + * @id: id + * @type: type + * @duration: Duration + * @lteu_tx_power: LTEU tx power + */ +struct mu_scan_params { + uint8_t id; + uint8_t type; + uint32_t duration; + uint32_t lteu_tx_power; + uint32_t rssi_thr_bssid; + uint32_t rssi_thr_sta; + uint32_t rssi_thr_sc; + uint32_t plmn_id; + uint32_t alpha_num_bssid; +}; + +/** + * struct lteu_config_params - LTEU config params + * @lteu_gpio_start: start MU/AP scan after GPIO toggle + * @lteu_num_bins: no. of elements in the following arrays + * @use_actual_nf: whether to use the actual NF obtained or a hardcoded one + * @lteu_weight: weights for MU algo + * @lteu_thresh: thresholds for MU algo + * @lteu_gamma: gamma's for MU algo + * @lteu_scan_timeout: timeout in ms to gpio toggle + * @alpha_num_ssid: alpha for num active bssid calculation + * @wifi_tx_power: Wifi Tx power + */ +struct lteu_config_params { + uint8_t lteu_gpio_start; + uint8_t lteu_num_bins; + uint8_t use_actual_nf; + uint32_t lteu_weight[LTEU_MAX_BINS]; + uint32_t lteu_thresh[LTEU_MAX_BINS]; + uint32_t lteu_gamma[LTEU_MAX_BINS]; + uint32_t lteu_scan_timeout; + uint32_t alpha_num_bssid; + uint32_t wifi_tx_power; + uint32_t allow_err_packets; +}; + +struct wmi_macaddr_t { + /** upper 4 bytes of MAC address */ + uint32_t mac_addr31to0; + /** lower 2 bytes of MAC address */ + uint32_t mac_addr47to32; +}; + +/** + * struct wlan_profile_params - WLAN profile params + * @param_id: param id + * @profile_id: profile id + * @enable: enable + */ +struct wlan_profile_params { + uint32_t param_id; + uint32_t profile_id; + uint32_t enable; +}; + +/* struct ht_ie_params - HT IE params + * @ie_len: IE length + * @ie_data: pointer to IE data + * @tx_streams: Tx streams supported for this HT IE + * @rx_streams: Rx streams supported for this HT IE + */ +struct ht_ie_params { + uint32_t ie_len; + uint8_t *ie_data; + uint32_t tx_streams; + uint32_t rx_streams; +}; + +/* struct vht_ie_params - VHT IE params + * @ie_len: IE length + * @ie_data: pointer to IE data + * @tx_streams: Tx streams supported for this VHT IE + * @rx_streams: Rx streams supported for this VHT IE + */ +struct vht_ie_params { + uint32_t ie_len; + uint8_t *ie_data; + uint32_t tx_streams; + uint32_t rx_streams; +}; + +/** + * struct wmi_host_wmeParams - WME params + * @wmep_acm: ACM paramete + * @wmep_aifsn: AIFSN parameters + * @wmep_logcwmin: cwmin in exponential form + * @wmep_logcwmax: cwmax in exponential form + * @wmep_txopLimit: txopLimit + * @wmep_noackPolicy: No-Ack Policy: 0=ack, 1=no-ack + */ +struct wmi_host_wmeParams { + u_int8_t wmep_acm; + u_int8_t wmep_aifsn; + u_int8_t wmep_logcwmin; + u_int8_t wmep_logcwmax; + u_int16_t wmep_txopLimit; + u_int8_t wmep_noackPolicy; +}; + +/** + * struct wmm_update_params - WMM update params + * @wmep_array: WME params for each AC + */ +struct wmm_update_params { + struct wmi_host_wmeParams *wmep_array; +}; + +/** + * struct wmi_host_wmevParams - WME params + * @wmep_acm: ACM paramete + * @wmep_aifsn: AIFSN parameters + * @wmep_logcwmin: cwmin in exponential form + * @wmep_logcwmax: cwmax in exponential form + * @wmep_txopLimit: txopLimit + * @wmep_noackPolicy: No-Ack Policy: 0=ack, 1=no-ack + */ +struct wmi_host_wme_vparams { + u_int32_t acm; + u_int32_t aifs; + u_int32_t cwmin; + u_int32_t cwmax; + union { + u_int32_t txoplimit; + u_int32_t mu_edca_timer; + }; + u_int32_t noackpolicy; +}; + +/** + * struct ratepwr_table_params - Rate power table params + * @ratepwr_tbl: pointer to rate power table + * @ratepwr_len: rate power table len + */ +struct ratepwr_table_params { + uint8_t *ratepwr_tbl; + uint16_t ratepwr_len; +}; + +/** + * struct ctl_table_params - Ctl table params + * @ctl_array: pointer to ctl array + * @ctl_cmd_len: ctl command length + * @is_2g: is 2G + * @target_type: target type + * @ctl_band: ctl band + * @pdev_id: pdev id + */ +struct ctl_table_params { + uint8_t *ctl_array; + uint16_t ctl_cmd_len; + bool is_2g; + uint32_t target_type; + uint32_t ctl_band; + uint32_t pdev_id; +}; + +/** + * struct mimogain_table_params - MIMO gain table params + * @array_gain: pointer to array gain table + * @tbl_len: table length + * @multichain_gain_bypass: bypass multichain gain + */ +struct mimogain_table_params { + uint8_t *array_gain; + uint16_t tbl_len; + bool multichain_gain_bypass; + uint32_t pdev_id; +}; + +/** + * struct ratepwr_chainmask_params - Rate power chainmask params + * @ratepwr_chain_tbl: pointer to ratepwr chain table + * @num_rate: number of rate in table + * @pream_type: preamble type + * @ops: ops + */ +struct ratepwr_chainmsk_params { + uint32_t *ratepwr_chain_tbl; + uint16_t num_rate; + uint8_t pream_type; + uint8_t ops; +}; + +struct macaddr_params { + uint8_t *macaddr; +}; + +/** + * struct acparams_params - acparams config structure + * @ac: AC to configure + * @use_rts: Use rts for this AC + * @aggrsize_scaling: Aggregrate size scaling for the AC + * @min_kbps: min kbps req + */ +struct acparams_params { + uint8_t ac; + uint8_t use_rts; + uint8_t aggrsize_scaling; + uint32_t min_kbps; +}; + +/** + * struct vap_dscp_tid_map_params - DSCP tid map params + * @vdev_id: vdev id + * @dscp_to_tid_map: pointer to arry of tid to dscp map table + */ +struct vap_dscp_tid_map_params { + uint8_t vdev_id; + uint32_t *dscp_to_tid_map; +}; + +/** + * struct proxy_ast_reserve_params - Proxy AST reserve params + * @macaddr: macaddr for proxy ast entry + */ +struct proxy_ast_reserve_params { + uint8_t *macaddr; +}; + +/** + * struct fips_params - FIPS params config + * @key: pointer to key + * @key_len: length of key + * @data: pointer data buf + * @data_len: length of data buf + * @mode: mode + * @op: operation + * @pdev_id: pdev_id for identifying the MAC + */ +struct fips_params { + uint8_t *key; + uint32_t key_len; + uint8_t *data; + uint32_t data_len; + uint32_t mode; + uint32_t op; + uint32_t pdev_id; +}; + +#ifdef WLAN_FEATURE_DISA_FIPS +/** + * struct disa_encrypt_decrypt_req_params - disa encrypt request + * @vdev_id: virtual device id + * @key_flag: This indicates firmware to encrypt/decrypt payload + * see ENCRYPT_DECRYPT_FLAG + * @key_idx: Index used in storing key + * @key_cipher: cipher used for encryption/decryption + * Eg: see WMI_CIPHER_AES_CCM for CCMP + * @key_len: length of key data + * @key_txmic_len: length of Tx MIC + * @key_rxmic_len: length of Rx MIC + * @key_data: Key + * @pn: packet number + * @mac_header: MAC header + * @data_len: length of data + * @data: pointer to payload + */ +struct disa_encrypt_decrypt_req_params { + uint32_t vdev_id; + uint8_t key_flag; + uint32_t key_idx; + uint32_t key_cipher; + uint32_t key_len; + uint32_t key_txmic_len; + uint32_t key_rxmic_len; + uint8_t key_data[MAC_MAX_KEY_LENGTH]; + uint8_t pn[MAC_PN_LENGTH]; + uint8_t mac_header[MAX_MAC_HEADER_LEN]; + uint32_t data_len; + uint8_t *data; +}; +#endif + +/** + * struct mcast_group_update_param - Mcast group table update to target + * @action: Addition/deletion + * @wildcard: iwldcard table entry? + * @mcast_ip_addr: mcast ip address to be updated + * @mcast_ip_addr_bytes: mcast ip addr bytes + * @nsrcs: number of entries in source list + * @filter_mode: filter mode + * @is_action_delete: is delete + * @is_filter_mode_snoop: is filter mode snoop + * @ucast_mac_addr: ucast peer mac subscribed to mcast ip + * @srcs: source mac accpted + * @mask: mask + * @vap_id: vdev id + * @is_mcast_addr_len: is mcast address length + */ +struct mcast_group_update_params { + int action; + int wildcard; + uint8_t *mcast_ip_addr; + int mcast_ip_addr_bytes; + uint8_t nsrcs; + uint8_t filter_mode; + bool is_action_delete; + bool is_filter_mode_snoop; + uint8_t *ucast_mac_addr; + uint8_t *srcs; + uint8_t *mask; + uint8_t vap_id; + bool is_mcast_addr_len; +}; + +/** + * struct periodic_chan_stats_param - periodic channel stats req param + * @stats_period: stats period update + * @enable: enable/disable + */ +struct periodic_chan_stats_params { + uint32_t stats_period; + bool enable; + uint32_t pdev_id; +}; + +/** + * enum wmi_host_packet_power_rate_flags: packer power rate flags + * @WMI_HOST_FLAG_RTSENA: RTS enabled + * @WMI_HOST_FLAG_CTSENA: CTS enabled + * @WMI_HOST_FLAG_STBC: STBC is set + * @WMI_HOST_FLAG_LDPC: LDPC is set + * @WMI_HOST_FLAG_TXBF: Tx Bf enabled + * @WMI_HOST_FLAG_MU2: MU2 data + * @WMI_HOST_FLAG_MU3: MU3 data + * @WMI_HOST_FLAG_SERIES1: Rate series 1 + * @WMI_HOST_FLAG_SGI: Short gaurd interval + */ +enum wmi_host_packet_power_rate_flags { + WMI_HOST_FLAG_RTSENA = 0x0001, + WMI_HOST_FLAG_CTSENA = 0x0002, + WMI_HOST_FLAG_STBC = 0x0004, + WMI_HOST_FLAG_LDPC = 0x0008, + WMI_HOST_FLAG_TXBF = 0x0010, + WMI_HOST_FLAG_MU2 = 0x0020, + WMI_HOST_FLAG_MU3 = 0x0040, + WMI_HOST_FLAG_SERIES1 = 0x0080, + WMI_HOST_FLAG_SGI = 0x0100, +}; + +/** + * enum wmi_host_su_mu_ofdma_flags: packer power su mu ofdma flags + * @WMI_HOST_FLAG_SU: SU Data + * @WMI_HOST_FLAG_DL_MU_MIMO_AC: DL AC MU data + * @WMI_HOST_FLAG_DL_MU_MIMO_AX: DL AX MU data + * @WMI_HOST_FLAG_DL_OFDMA: DL OFDMA data + * @WMI_HOST_FLAG_UL_OFDMA: UL OFDMA data + * @WMI_HOST_FLAG_UL_MU_MIMO: UL MU data + */ +enum wmi_host_su_mu_ofdma_flags { + WMI_HOST_FLAG_SU = 0x0001, + WMI_HOST_FLAG_DL_MU_MIMO_AC = 0x0002, + WMI_HOST_FLAG_DL_MU_MIMO_AX = 0x0003, + WMI_HOST_FLAG_DL_OFDMA = 0x0004, + WMI_HOST_FLAG_UL_OFDMA = 0x0005, + WMI_HOST_FLAG_UL_MU_MIMO = 0x0006, +}; + +/** + * enum wmi_host_preamble_type: preamble type + * @WMI_HOST_PREAMBLE_OFDM: ofdm rate + * @WMI_HOST_PREAMBLE_CCK: cck rate + * @WMI_HOST_PREAMBLE_HT: ht rate + * @WMI_HOST_PREAMBLE_VHT: vht rate + * @WMI_HOST_PREAMBLE_HE: 11ax he rate + */ +enum wmi_host_preamble_type { + WMI_HOST_PREAMBLE_OFDM = 0, + WMI_HOST_PREAMBLE_CCK = 1, + WMI_HOST_PREAMBLE_HT = 2, + WMI_HOST_PREAMBLE_VHT = 3, + WMI_HOST_PREAMBLE_HE = 4, +}; + +/** + * enum wmi_ratemask_type: ratemask type + * @WMI_RATEMASK_TYPE_CCK: CCK rate mask type + * @WMI_RATEMASK_TYPE_HT: HT rate mask type + * @WMI_RATEMASK_TYPE_VHT: VHT rate mask type + * @WMI_RATEMASK_TYPE_HE: HE rate mask type + * + * This is used for 'type' in WMI_VDEV_RATEMASK_CMDID + */ +enum wmi_ratemask_type { + WMI_RATEMASK_TYPE_CCK = 0, + WMI_RATEMASK_TYPE_HT = 1, + WMI_RATEMASK_TYPE_VHT = 2, + WMI_RATEMASK_TYPE_HE = 3, +}; + +/** + * struct packet_power_info_params - packet power info params + * @chainmask: chain mask + * @chan_width: channel bandwidth + * @rate_flags: rate flags + * @su_mu_ofdma: su/mu/ofdma flags + * @nss: number of spatial streams + * @preamble: preamble + * @hw_rate: + */ +struct packet_power_info_params { + uint16_t chainmask; + uint16_t chan_width; + uint16_t rate_flags; + uint16_t su_mu_ofdma; + uint16_t nss; + uint16_t preamble; + uint16_t hw_rate; + uint32_t pdev_id; +}; + +/** + * enum gpio_pull_type - GPIO PULL TYPE + * @WMI_HOST_GPIO_PULL_NONE: set gpio pull type to none + * @WMI_HOST_GPIO_PULL_UP: set gpio to pull up + * @WMI_HOST_GPIO_PULL_DOWN: set gpio to pull down + * @WMI_HOST_GPIO_PULL_MAX: invalid pull type + */ +enum gpio_pull_type { + WMI_HOST_GPIO_PULL_NONE = 0, + WMI_HOST_GPIO_PULL_UP = 1, + WMI_HOST_GPIO_PULL_DOWN = 2, + WMI_HOST_GPIO_PULL_MAX, +}; + +/** + * enum gpio_interrupt_mode - GPIO INTERRUPT MODE + * @WMI_HOST_GPIO_INTMODE_DISABLE: disable interrupt mode + * @WMI_HOST_GPIO_INTMODE_RISING_EDGE: interrupt with rising edge trigger + * @WMI_HOST_GPIO_INTMODE_FALLING_EDGE: interrupt with falling edge trigger + * @WMI_HOST_GPIO_INTMODE_BOTH_EDGE: interrupt with both edge trigger + * @WMI_HOST_GPIO_INTMODE_LEVEL_LOW: interrupt with gpio level low trigger + * @WMI_HOST_GPIO_INTMODE_LEVEL_HIGH: interrupt with gpio level high trigger + * @WMI_HOST_GPIO_INTMODE_MAX: invalid interrupt mode + */ +enum gpio_interrupt_mode { + WMI_HOST_GPIO_INTMODE_DISABLE = 0, + WMI_HOST_GPIO_INTMODE_RISING_EDGE = 1, + WMI_HOST_GPIO_INTMODE_FALLING_EDGE = 2, + WMI_HOST_GPIO_INTMODE_BOTH_EDGE = 3, + WMI_HOST_GPIO_INTMODE_LEVEL_LOW = 4, + WMI_HOST_GPIO_INTMODE_LEVEL_HIGH = 5, + WMI_HOST_GPIO_INTMODE_MAX, +}; + +/** + * enum qca_gpio_direction - GPIO Direction + * @WLAN_GPIO_INPUT: set gpio as input mode + * @WLAN_GPIO_OUTPUT: set gpio as output mode + * @WLAN_GPIO_VALUE_MAX: invalid gpio direction + */ +enum gpio_direction { + WMI_HOST_GPIO_INPUT = 0, + WMI_HOST_GPIO_OUTPUT = 1, + WMI_HOST_GPIO_DIR_MAX, +}; + +/** + * enum fw_gpio_direction - GPIO Direction + * @WMI_FW_GPIO_OUTPUT: set gpio as output mode + * @WMI_FW_GPIO_INPUT: set gpio as input mode + */ +enum fw_gpio_direction { + WMI_FW_GPIO_OUTPUT = 0, + WMI_FW_GPIO_INPUT = 1, +}; + +/** + * enum qca_gpio_value - GPIO Value + * @WLAN_GPIO_LEVEL_LOW: set gpio output level low + * @WLAN_GPIO_LEVEL_HIGH: set gpio output level high + * @WLAN_GPIO_LEVEL_MAX: invalid gpio value + */ +enum gpio_value { + WMI_HOST_GPIO_LEVEL_LOW = 0, + WMI_HOST_GPIO_LEVEL_HIGH = 1, + WMI_HOST_GPIO_LEVEL_MAX, +}; + +/** + * enum gpio_init_enable - GPIO init enable + * @WMI_HOST_GPIO_INIT_DISABLE: Disable INIT + * @WMI_HOST_GPIO_INIT_ENABLE: Enable INIT + * @WMI_HOST_GPIO_INIT_MAX: invalid gpio init_enable + */ +enum gpio_init_enable { + WMI_HOST_GPIO_INIT_DISABLE = 0, + WMI_HOST_GPIO_INIT_ENABLE = 1, + WMI_HOST_GPIO_INIT_MAX, +}; + +/** + * enum gpio_drive - GPIO drive + * @WMI_HOST_GPIO_DRIVE_2MA: drive 2MA + * @WMI_HOST_GPIO_DRIVE_4MA: drive 4MA + * @WMI_HOST_GPIO_DRIVE_6MA: drive 6MA + * @WMI_HOST_GPIO_DRIVE_8MA: drive 8MA + * @WMI_HOST_GPIO_DRIVE_10MA: drive 10MA + * @WMI_HOST_GPIO_DRIVE_12MA: drive 12MA + * @WMI_HOST_GPIO_DRIVE_14MA: drive 14MA + * @WMI_HOST_GPIO_DRIVE_16MA: drive 16MA + * @WMI_HOST_GPIO_DRIVE_MAX: invalid gpio drive + */ +enum gpio_drive { + WMI_HOST_GPIO_DRIVE_2MA = 0, + WMI_HOST_GPIO_DRIVE_4MA = 1, + WMI_HOST_GPIO_DRIVE_6MA = 2, + WMI_HOST_GPIO_DRIVE_8MA = 3, + WMI_HOST_GPIO_DRIVE_10MA = 4, + WMI_HOST_GPIO_DRIVE_12MA = 5, + WMI_HOST_GPIO_DRIVE_14MA = 6, + WMI_HOST_GPIO_DRIVE_16MA = 7, + WMI_HOST_GPIO_DRIVE_MAX, +}; + +/** + * enum gpio_mux_config - GPIO mux_config + * @WMI_HOST_GPIO_MUX_DEFAULT: Default mux value + * @WMI_HOST_GPIO_MUX_MAX: maximum allowed gpio mux_config + */ +enum gpio_mux_config { + WMI_HOST_GPIO_MUX_DEFAULT = 0, + WMI_HOST_GPIO_MUX_MAX = 15, +}; + +/** + * struct wmi_host_gpio_input_event - GPIO input event structure + * @gpio_num: GPIO number which changed state + */ +typedef struct { + uint32_t gpio_num; /* GPIO number which changed state */ +} wmi_host_gpio_input_event; + +/** + * struct gpio_config_params - GPIO config params + * @pin_num: GPIO number to config + * @pin_dir: gpio direction, 1-input/0-output + * @pin_pull_type: pull type define in gpio_pull_type + * @pin_intr_mode: interrupt mode define in gpio_interrupt_mode + */ +struct gpio_config_params { + uint32_t pin_num; + enum gpio_direction pin_dir; + enum gpio_pull_type pin_pull_type; + enum gpio_interrupt_mode pin_intr_mode; + enum gpio_mux_config mux_config_val; + enum gpio_drive drive; + enum gpio_init_enable init_enable; +}; + +/** + * struct gpio_output_params - GPIO output params + * @pin_num: GPIO number to configure + * @pinset: 1 mean gpio output high level, 0 mean gpio output low level + */ +struct gpio_output_params { + uint32_t pin_num; + enum gpio_value pin_set; +}; + +/* flags bit 0: to configure wlan priority bitmap */ +#define WMI_HOST_BTCOEX_PARAM_FLAGS_WLAN_PRIORITY_BITMAP_BIT (1<<0) +/* flags bit 1: to configure both period and wlan duration */ +#define WMI_HOST_BTCOEX_PARAM_FLAGS_DUTY_CYCLE_BIT (1<<1) +struct btcoex_cfg_params { + /* WLAN priority bitmask for different frame types */ + uint32_t btcoex_wlan_priority_bitmap; + /* This command is used to configure different btcoex params + * in different situations.The host sets the appropriate bit(s) + * in btcoex_param_flags to indicate which configuration parameters + * are valid within a particular BT coex config message, so that one + * BT configuration parameter can be configured without affecting + * other BT configuration parameters.E.g. if the host wants to + * configure only btcoex_wlan_priority_bitmap it sets only + * WMI_BTCOEX_PARAM_FLAGS_WLAN_PRIORITY_BITMAP_BIT in + * btcoex_param_flags so that firmware will not overwrite + * other params with default value passed in the command. + * Host can also set multiple bits in btcoex_param_flags + * to configure more than one param in single message. + */ + uint32_t btcoex_param_flags; + /* period denotes the total time in milliseconds which WLAN and BT share + * configured percentage for transmission and reception. + */ + uint32_t period; + /* wlan duration is the time in milliseconds given for wlan + * in above period. + */ + uint32_t wlan_duration; +}; + +#define WMI_HOST_COEX_CONFIG_BUF_MAX_LEN 32 /* 128 bytes */ +/** + * coex_ver_cfg_t + * @coex_version: Version for 4 wire coex + * @length: Length of payload buffer based on version + * @config_buf: Payload Buffer + */ +typedef struct { + /* VERSION_4 (4 wire coex) */ + uint32_t coex_version; + + /* No. of uint32_t elements in payload buffer. Will depend on the coex + * version + */ + uint32_t length; + + /* Payload buffer */ + uint32_t config_buf[WMI_HOST_COEX_CONFIG_BUF_MAX_LEN]; +} coex_ver_cfg_t; + +#define WMI_HOST_RTT_REPORT_CFR 0 +#define WMI_HOST_RTT_NO_REPORT_CFR 1 +#define WMI_HOST_RTT_AGGREGATE_REPORT_NON_CFR 2 +/** + * struct rtt_meas_req_test_params + * @peer: peer mac address + * @req_frame_type: RTT request frame type + * @req_bw: requested bandwidth + * @req_preamble: Preamble + * @req_num_req: num of requests + * @req_report_type: report type + * @num_measurements: number of measurements + * @asap_mode: priority + * @lci_requested: LCI requested + * @loc_civ_requested: + * @channel_param: channel param + * @req_id: requested id + */ +struct rtt_meas_req_test_params { + uint8_t peer[QDF_MAC_ADDR_SIZE]; + int req_frame_type; + int req_bw; + int req_preamble; + int req_num_req; + int req_report_type; + uint32_t num_measurements; + uint32_t asap_mode; + uint32_t lci_requested; + uint32_t loc_civ_requested; + struct channel_param channel; + uint8_t req_id; +}; + +/** + * struct peer_request_pn_param - PN request params + * @vdev_id: vdev id + * @peer_macaddr: Peer mac address + * @key_type: key type + */ +struct peer_request_pn_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t key_type; +}; + +/** + * struct rtt_meas_req_params - RTT measurement request params + * @req_id: Request id + * @vdev_id: vdev id + * @is_mode_na: 11NA + * @is_mode_ac: AC + * @is_bw_20: 20 + * @is_bw_40: 40 + * @is_bw_80: 80 + * @sta_mac_addr: pointer to station mac address + * @spoof_mac_addr: pointer to spoof mac address + * @num_probe_rqst: number of probe request + * @channel_param: channel param + */ +struct rtt_meas_req_params { + uint8_t req_id; + uint8_t vdev_id; + bool is_mode_na; + bool is_mode_ac; + bool is_bw_20; + bool is_bw_40; + bool is_bw_80; + uint8_t *sta_mac_addr; + uint8_t *spoof_mac_addr; + uint32_t num_probe_rqst; + struct channel_param channel; +}; + +/** + * struct lci_set_params - LCI params + * @lci_data: pointer to LCI data + * @latitude_unc: latitude + * @latitude_0_12: bits 0 to 1 of latitude + * @latitude_2_33: bits 2 to 33 of latitude + * @longitude_unc: longitude + * @longitude_0_1: bits 0 to 1 of longitude + * @longitude_2_33: bits 2 to 33 of longitude + * @altitude_type: altitude type + * @altitude_unc_0_3: altitude bits 0 - 3 + * @altitude_unc_4_5: altitude bits 4 - 5 + * @altitude: altitude + * @datum: dataum + * @reg_loc_agmt: + * @reg_loc_dse: + * @dep_sta: + * @version: version + */ +struct lci_set_params { + void *lci_data; + uint8_t latitude_unc:6, + latitude_0_1:2; + uint32_t latitude_2_33; + uint8_t longitude_unc:6, + longitude_0_1:2; + uint32_t longitude_2_33; + uint8_t altitude_type:4, + altitude_unc_0_3:4; + uint32_t altitude_unc_4_5:2, + altitude:30; + uint8_t datum:3, + reg_loc_agmt:1, + reg_loc_dse:1, + dep_sta:1, + version:2; + uint8_t *colocated_bss; + int msg_len; +}; + +/** + * struct lcr_set_params - LCR params + * @lcr_data: pointer to lcr data + */ +struct lcr_set_params { + void *lcr_data; + int msg_len; +}; + +/** + * struct rtt_keepalive_req_params - RTT keepalive params + * @macaddr: pointer to macaddress + * @req_id: Request id + * @vdev_id: vdev id + * @stop: start/stop + */ +struct rtt_keepalive_req_params { + uint8_t *macaddr; + uint8_t req_id; + uint8_t vdev_id; + bool stop; +}; + +/** + * struct rx_reorder_queue_setup_params - Reorder queue setup params + * @peer_mac_addr: Peer mac address + * @tid: TID + * @vdev_id: vdev id + * @hw_qdesc_paddr_lo: lower 32 bits of queue desc adddress + * @hw_qdesc_paddr_hi: upper 32 bits of queue desc adddress + * @queue_no: 16-bit number assigned by host for queue + * @ba_window_size_valid: BA window size validity flag + * @ba_window_size: BA window size + */ +struct rx_reorder_queue_setup_params { + uint8_t *peer_macaddr; + uint16_t tid; + uint16_t vdev_id; + uint32_t hw_qdesc_paddr_lo; + uint32_t hw_qdesc_paddr_hi; + uint16_t queue_no; + uint8_t ba_window_size_valid; + uint16_t ba_window_size; + +}; + +/** + * struct rx_reorder_queue_remove_params - Reorder queue setup params + * @peer_mac_addr: Peer mac address + * @vdev_id: vdev id + * @peer_tid_bitmap: peer tid bitmap + */ +struct rx_reorder_queue_remove_params { + uint8_t *peer_macaddr; + uint16_t vdev_id; + uint32_t peer_tid_bitmap; +}; + +/** + * struct wmi_host_stats_event - Stats event params + * @stats_id: stats id of type wmi_host_stats_event + * @num_pdev_stats: number of pdev stats event structures 0 or 1 + * @num_pdev_ext_stats: number of pdev ext stats event structures + * @num_vdev_stats: number of vdev stats + * @num_peer_stats: number of peer stats event structures 0 or max peers + * @num_peer_extd_stats: number of peer extended stats event structures 0 + * or max peers + * @num_bcnflt_stats: number of beacon filter stats + * @num_chan_stats: number of channel stats + * @pdev_id: device id for the radio + * @num_bcn_stats: number of beacon stats + * @num_rssi_stats: number of rssi stats + * @num_peer_adv_stats: number of peer adv stats + * @num_mib_stats: number of mib stats + * @num_mib_extd_stats: number of extended mib stats + * @last_event: specify if the current event is the last event + */ +typedef struct { + wmi_host_stats_id stats_id; + uint32_t num_pdev_stats; + uint32_t num_pdev_ext_stats; + uint32_t num_vdev_stats; + uint32_t num_peer_stats; + uint32_t num_peer_extd_stats; + uint32_t num_bcnflt_stats; + uint32_t num_chan_stats; + uint32_t pdev_id; + uint32_t num_bcn_stats; + uint32_t num_rssi_stats; + uint32_t num_peer_adv_stats; + uint32_t num_mib_stats; + uint32_t num_mib_extd_stats; + uint32_t last_event; +} wmi_host_stats_event; + +/** + * struct wmi_host_peer_extd_stats - peer extd stats event structure + * @peer_macaddr: Peer mac address + * @inactive_time: inactive time in secs + * @peer_chain_rssi: peer rssi + * @rx_duration: RX duration + * @peer_tx_bytes: TX bytes + * @last_tx_rate_code: Tx rate code of last frame + * @last_tx_power: Tx power latest + * @atf_tokens_allocated: atf tokens allocated + * @atf_tokens_utilized: atf tokens utilized + * @num_mu_tx_blacklisted: Blacklisted MU Tx count + * @sgi_count: sgi count of the peer + * @rx_mc_bc_cnt: Total number of received multicast & broadcast data frames + * corresponding to this peer, 1 in the MSB of rx_mc_bc_cnt represents a + * valid data + * @rx_retry_cnt: Number of rx retries received from current station + */ +typedef struct { + wmi_host_mac_addr peer_macaddr; + uint32_t inactive_time; + uint32_t peer_chain_rssi; + uint32_t rx_duration; + uint32_t peer_tx_bytes; + uint32_t last_tx_rate_code; + uint32_t last_tx_power; + uint32_t atf_tokens_allocated; + uint32_t atf_tokens_utilized; + uint32_t num_mu_tx_blacklisted; + uint32_t sgi_count; + uint32_t rx_mc_bc_cnt; + uint32_t rx_retry_cnt; +} wmi_host_peer_extd_stats; + +/** + * struct wmi_host_peer_adv_stats - peer adv stats event structure + * @peer_macaddr: mac address + * @fcs_count: fcs count + * @rx_count: rx count + * @rx_bytes: rx bytes + */ +struct wmi_host_peer_adv_stats { + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t fcs_count; + uint32_t rx_count; + uint64_t rx_bytes; +}; + +/** + * struct wmi_host_pdev_ext_stats - peer ext stats structure + * @rx_rssi_comb: RX rssi + * @rx_rssi_chain0: RX rssi chain 0 + * @rx_rssi_chain1: RX rssi chain 1 + * @rx_rssi_chain2: RX rssi chain 2 + * @rx_rssi_chain3: RX rssi chain 3 + * @rx_mcs: RX MCS array + * @tx_mcs: TX MCS array + * @ack_rssi: Ack rssi + */ +typedef struct { + uint32_t rx_rssi_comb; + uint32_t rx_rssi_chain0; + uint32_t rx_rssi_chain1; + uint32_t rx_rssi_chain2; + uint32_t rx_rssi_chain3; + uint32_t rx_mcs[10]; + uint32_t tx_mcs[10]; + uint32_t ack_rssi; +} wmi_host_pdev_ext_stats; + +/** + * struct wmi_host_dbg_tx_stats - Debug stats + * @comp_queued: Num HTT cookies queued to dispatch list + * @comp_delivered: Num HTT cookies dispatched + * @msdu_enqued: Num MSDU queued to WAL + * @mpdu_enqued: Num MPDU queue to WAL + * @wmm_drop: Num MSDUs dropped by WMM limit + * @local_enqued: Num Local frames queued + * @local_freed: Num Local frames done + * @hw_queued: Num queued to HW + * @hw_reaped: Num PPDU reaped from HW + * @underrun: Num underruns + * @hw_paused: HW Paused. + * @tx_abort: Num PPDUs cleaned up in TX abort + * @mpdus_requed: Num MPDUs requed by SW + * @tx_ko: excessive retries + * @tx_xretry: + * @data_rc: data hw rate code + * @self_triggers: Scheduler self triggers + * @sw_retry_failure: frames dropped due to excessive sw retries + * @illgl_rate_phy_err: illegal rate phy errors + * @pdev_cont_xretry: wal pdev continuous xretry + * @pdev_tx_timeout: wal pdev continuous xretry + * @pdev_resets: wal pdev resets + * @stateless_tid_alloc_failure: frames dropped due to non-availability of + * stateless TIDs + * @phy_underrun: PhY/BB underrun + * @txop_ovf: MPDU is more than txop limit + * @seq_posted: Number of Sequences posted + * @seq_failed_queueing: Number of Sequences failed queueing + * @seq_completed: Number of Sequences completed + * @seq_restarted: Number of Sequences restarted + * @mu_seq_posted: Number of MU Sequences posted + * @mpdus_sw_flush: Num MPDUs flushed by SW, HWPAUSED, SW TXABORT + * (Reset,channel change) + * @mpdus_hw_filter: Num MPDUs filtered by HW, all filter condition + * (TTL expired) + * @mpdus_truncated: Num MPDUs truncated by PDG (TXOP, TBTT, + * PPDU_duration based on rate, dyn_bw) + * @mpdus_ack_failed: Num MPDUs that was tried but didn't receive ACK or BA + * @mpdus_expired: Num MPDUs that was dropped du to expiry. + * @mc_dropr: Num mc drops + */ +typedef struct { + int32_t comp_queued; + int32_t comp_delivered; + int32_t msdu_enqued; + int32_t mpdu_enqued; + int32_t wmm_drop; + int32_t local_enqued; + int32_t local_freed; + int32_t hw_queued; + int32_t hw_reaped; + int32_t underrun; + uint32_t hw_paused; + int32_t tx_abort; + int32_t mpdus_requed; + uint32_t tx_ko; + uint32_t tx_xretry; + uint32_t data_rc; + uint32_t self_triggers; + uint32_t sw_retry_failure; + uint32_t illgl_rate_phy_err; + uint32_t pdev_cont_xretry; + uint32_t pdev_tx_timeout; + uint32_t pdev_resets; + uint32_t stateless_tid_alloc_failure; + uint32_t phy_underrun; + uint32_t txop_ovf; + uint32_t seq_posted; + uint32_t seq_failed_queueing; + uint32_t seq_completed; + uint32_t seq_restarted; + uint32_t mu_seq_posted; + int32_t mpdus_sw_flush; + int32_t mpdus_hw_filter; + int32_t mpdus_truncated; + int32_t mpdus_ack_failed; + int32_t mpdus_expired; + uint32_t mc_drop; +} wmi_host_dbg_tx_stats; + +/** + * struct wmi_host_dbg_rx_stats - RX Debug stats + * @mid_ppdu_route_change: Cnts any change in ring routing mid-ppdu + * @status_rcvd: Total number of statuses processed + * @r0_frags: Extra frags on rings 0 + * @r1_frags: Extra frags on rings 1 + * @r2_frags: Extra frags on rings 2 + * @r3_frags: Extra frags on rings 3 + * @htt_msdus: MSDUs delivered to HTT + * @htt_mpdus: MPDUs delivered to HTT + * @loc_msdus: MSDUs delivered to local stack + * @loc_mpdus: MPDUS delivered to local stack + * @oversize_amsdu: AMSDUs that have more MSDUs than the status ring size + * @phy_errs: Number of PHY errors + * @phy_err_drop: Number of PHY errors drops + * @mpdu_errs: Number of mpdu errors - FCS, MIC, ENC etc. + * @pdev_rx_timeout: Number of rx inactivity timeouts + * @rx_ovfl_errs: Number of rx overflow errors. + */ +typedef struct { + int32_t mid_ppdu_route_change; + int32_t status_rcvd; + int32_t r0_frags; + int32_t r1_frags; + int32_t r2_frags; + int32_t r3_frags; + int32_t htt_msdus; + int32_t htt_mpdus; + int32_t loc_msdus; + int32_t loc_mpdus; + int32_t oversize_amsdu; + int32_t phy_errs; + int32_t phy_err_drop; + int32_t mpdu_errs; + uint32_t pdev_rx_timeout; + int32_t rx_ovfl_errs; +} wmi_host_dbg_rx_stats; + +/** struct wmi_host_dbg_mem_stats - memory stats + * @iram_free_size: IRAM free size on target + * @dram_free_size: DRAM free size on target + * @sram_free_size: SRAM free size on target + */ +typedef struct { + uint32_t iram_free_size; + uint32_t dram_free_size; + /* Only Non-TLV */ + uint32_t sram_free_size; +} wmi_host_dbg_mem_stats; + +typedef struct { + /* Only TLV */ + int32_t dummy;/* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ +} wmi_host_dbg_peer_stats; + +/** + * struct wmi_host_dbg_stats - host debug stats + * @tx: TX stats of type wmi_host_dbg_tx_stats + * @rx: RX stats of type wmi_host_dbg_rx_stats + * @mem: Memory stats of type wmi_host_dbg_mem_stats + * @peer: peer stats of type wmi_host_dbg_peer_stats + */ +typedef struct { + wmi_host_dbg_tx_stats tx; + wmi_host_dbg_rx_stats rx; + wmi_host_dbg_mem_stats mem; + wmi_host_dbg_peer_stats peer; +} wmi_host_dbg_stats; + +/** + * struct wmi_host_pdev_stats - PDEV stats + * @chan_nf: Channel noise floor + * @tx_frame_count: TX frame count + * @rx_frame_count: RX frame count + * @rx_clear_count: rx clear count + * @cycle_count: cycle count + * @phy_err_count: Phy error count + * @chan_tx_pwr: Channel Tx Power + * @pdev_stats: WAL dbg stats + * @ackRcvBad: + * @rtsBad: + * @rtsGood: + * @fcsBad: + * @noBeacons: + * @mib_int_count: + */ +typedef struct { + int32_t chan_nf; + uint32_t tx_frame_count; + uint32_t rx_frame_count; + uint32_t rx_clear_count; + uint32_t cycle_count; + uint32_t phy_err_count; + uint32_t chan_tx_pwr; + wmi_host_dbg_stats pdev_stats; + uint32_t ackRcvBad; + uint32_t rtsBad; + uint32_t rtsGood; + uint32_t fcsBad; + uint32_t noBeacons; + uint32_t mib_int_count; +} wmi_host_pdev_stats; + + +/** + * struct wmi_unit_test_event - Structure corresponding to WMI Unit test event + * @vdev_id: VDEV ID + * @module_id: MODULE ID + * @diag_token: Diag Token (the number that was generated in the unit-test cmd) + * @flag: flag has 2 bits 0x1 indicates status, and 0x2 indicates done-bit + * @payload_len: payload_len (blindly copied from payload_len field in WMI) + * @buffer_len: actual number of data bytes in the variable data size TLV + * buffer_len is likely to be the nearest multiple of 4 (from + * payload_len). both buffer_len and payload_len need to be + * passed to wifitool so that the driver can be agnostic + * regarding these differences. + * @buffer: data buffer + */ +typedef struct { + uint32_t vdev_id; + uint32_t module_id; + uint32_t diag_token; + uint32_t flag; + uint32_t payload_len; + uint32_t buffer_len; + uint8_t buffer[1]; +} wmi_unit_test_event; + + +/** + * struct wmi_host_snr_info - WMI host Signal to noise ration info + * @bcn_snr: beacon SNR + * @dat_snr: Data frames SNR + */ +typedef struct { + int32_t bcn_snr; + int32_t dat_snr; +} wmi_host_snr_info; + +#define WMI_HOST_MAX_TX_RATE_VALUES 10 /*Max Tx Rates */ +#define WMI_HOST_MAX_RSSI_VALUES 10 /*Max Rssi values */ + +/* The WLAN_MAX_AC macro cannot be changed without breaking + * * WMI compatibility. + * * The maximum value of access category + * */ +#define WMI_HOST_WLAN_MAX_AC 4 + +/* The WMI_HOST_MAX_CHAINS macro cannot be changed without breaking WMI + * compatibility. + * The maximum value of number of chains + */ +#define WMI_HOST_MAX_CHAINS 8 + +/** + * struct wmi_host_vdev_stats - vdev stats structure + * @vdev_id: unique id identifying the VDEV, generated by the caller + * Rest all Only TLV + * @vdev_snr: wmi_host_snr_info + * @tx_frm_cnt: Total number of packets(per AC) that were successfully + * transmitted (with and without retries, + * including multi-cast, broadcast) + * @rx_frm_cnt: Total number of packets that were successfully received + * (after appropriate filter rules including multi-cast, broadcast) + * @multiple_retry_cnt: The number of MSDU packets and MMPDU frames per AC + * that the 802.11 station successfully transmitted after + * more than one retransmission attempt + * @fail_cnt: Total number packets(per AC) failed to transmit + * @rts_fail_cnt: Total number of RTS/CTS sequence failures for transmission + * of a packet + * @rts_succ_cnt: Total number of RTS/CTS sequence success for transmission + * of a packet + * @rx_err_cnt: The receive error count. HAL will provide the + * RxP FCS error global + * @rx_discard_cnt: The sum of the receive error count and + * dropped-receive-buffer error count (FCS error) + * @ack_fail_cnt: Total number packets failed transmit because of no + * ACK from the remote entity + * @tx_rate_history:History of last ten transmit rate, in units of 500 kbit/sec + * @bcn_rssi_history: History of last ten Beacon rssi of the connected Bss + */ +typedef struct { + uint32_t vdev_id; + /* Rest all Only TLV */ + wmi_host_snr_info vdev_snr; + uint32_t tx_frm_cnt[WMI_HOST_WLAN_MAX_AC]; + uint32_t rx_frm_cnt; + uint32_t multiple_retry_cnt[WMI_HOST_WLAN_MAX_AC]; + uint32_t fail_cnt[WMI_HOST_WLAN_MAX_AC]; + uint32_t rts_fail_cnt; + uint32_t rts_succ_cnt; + uint32_t rx_err_cnt; + uint32_t rx_discard_cnt; + uint32_t ack_fail_cnt; + uint32_t tx_rate_history[WMI_HOST_MAX_TX_RATE_VALUES]; + uint32_t bcn_rssi_history[WMI_HOST_MAX_RSSI_VALUES]; +} wmi_host_vdev_stats; + +/** + * struct wmi_host_vdev_stats - vdev stats structure + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @tx_bcn_succ_cnt: Total number of beacon frame transmitted successfully + * @tx_bcn_outage_cnt: Total number of failed beacons + */ +typedef struct { + uint32_t vdev_id; + uint32_t tx_bcn_succ_cnt; + uint32_t tx_bcn_outage_cnt; +} wmi_host_bcn_stats; + +/** + * struct wmi_host_vdev_extd_stats - VDEV extended stats + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @ppdu_aggr_cnt: No of Aggrs Queued to HW + * @ppdu_noack: No of PPDU's not Acked includes both aggr and nonaggr's + * @mpdu_queued: No of MPDU/Subframes's queued to HW in Aggregates + * @ppdu_nonaggr_cnt: No of NonAggr/MPDU/Subframes's queued to HW + * in Legacy NonAggregates + * @mpdu_sw_requed: No of MPDU/Subframes's SW requeued includes + * both Aggr and NonAggr + * @mpdu_suc_retry: No of MPDU/Subframes's transmitted Successfully + * after Single/mul HW retry + * @mpdu_suc_multitry: No of MPDU/Subframes's transmitted Success + * after Multiple HW retry + * @mpdu_fail_retry: No of MPDU/Subframes's failed transmission + * after Multiple HW retry + * @reserved[13]: for future extensions set to 0x0 + */ +typedef struct { + uint32_t vdev_id; + uint32_t ppdu_aggr_cnt; + uint32_t ppdu_noack; + uint32_t mpdu_queued; + uint32_t ppdu_nonaggr_cnt; + uint32_t mpdu_sw_requed; + uint32_t mpdu_suc_retry; + uint32_t mpdu_suc_multitry; + uint32_t mpdu_fail_retry; + uint32_t reserved[13]; +} wmi_host_vdev_extd_stats; + +/** + * struct wmi_host_vdev_nac_rssi_event - VDEV nac rssi stats + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @last_rssi: rssi + * @avg_rssi: averge rssi + * @rssi_seq_num: rssi sequence number + */ +struct wmi_host_vdev_nac_rssi_event { + uint32_t vdev_id; + uint32_t last_rssi; + uint32_t avg_rssi; + uint32_t rssi_seq_num; +}; + +/** + * structure wmi_host_peer_retry_stats - peer retry stats + * @peer_macaddr: peer macaddr + * @retry_counter_wraparnd_ind: wraparound counter indication + * @msdu_success: successfully transmitted msdus + * @msdu_retried: Retried msdus + * @msdu_mul_retried: msdus retried for more than once + * @msdu_failed: msdus failed + * @reserved: for furure extensions + */ +struct wmi_host_peer_retry_stats { + wmi_host_mac_addr peer_macaddr; + uint32_t retry_counter_wraparnd_ind; + uint32_t msdus_success; + uint32_t msdus_retried; + uint32_t msdus_mul_retried; + uint32_t msdus_failed; + uint32_t reserved[4]; +}; + +/** + * struct wmi_host_per_chain_rssi_stats - VDEV nac rssi stats + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @rssi_avg_beacon: per chain avg rssi for beacon + * @rssi_avg_data: per chain avg rssi for data + * @peer_macaddr: peer macaddr + */ +struct wmi_host_per_chain_rssi_stats { + uint32_t vdev_id; + int32_t rssi_avg_beacon[WMI_HOST_MAX_CHAINS]; + int32_t rssi_avg_data[WMI_HOST_MAX_CHAINS]; + wmi_host_mac_addr peer_macaddr; +}; + +/** + * struct wmi_host_peer_stats - peer stats + * @peer_macaddr: peer MAC address + * @peer_rssi: rssi + * @peer_rssi_seq_num: rssi sequence number + * @peer_tx_rate: last tx data rate used for peer + * @peer_rx_rate: last rx data rate used for peer + * @currentper: Current PER + * @retries: Retries happened during transmission + * @txratecount: Maximum Aggregation Size + * @max4msframelen: Max4msframelen of tx rates used + * @totalsubframes: Total no of subframes + * @txbytes: No of bytes transmitted to the client + * @nobuffs[4]: Packet Loss due to buffer overflows + * @excretries[4]: Packet Loss due to excessive retries + * @peer_rssi_changed: how many times peer's RSSI changed by a + * non-negligible amount + */ +typedef struct { + wmi_host_mac_addr peer_macaddr; + int8_t peer_rssi; + uint32_t peer_rssi_seq_num; + uint32_t peer_tx_rate; + uint32_t peer_rx_rate; + uint32_t currentper; + uint32_t retries; + uint32_t txratecount; + uint32_t max4msframelen; + uint32_t totalsubframes; + uint32_t txbytes; + uint32_t nobuffs[4]; + uint32_t excretries[4]; + uint32_t peer_rssi_changed; +} wmi_host_peer_stats; + +typedef struct { + uint32_t dummy; +} wmi_host_bcnflt_stats; + +/** + * struct wmi_host_chan_stats - WMI chan stats + * @chan_mhz: Primary channel freq of the channel for which stats are sent + * @sampling_period_us: Time spent on the channel + * @rx_clear_count: Aggregate duration over a sampling period for + * which channel activity was observed + * @tx_duration_us: Accumalation of the TX PPDU duration over a sampling period + * @rx_duration_us: Accumalation of the RX PPDU duration over a sampling period + */ +typedef struct { + uint32_t chan_mhz; + uint32_t sampling_period_us; + uint32_t rx_clear_count; + uint32_t tx_duration_us; + uint32_t rx_duration_us; +} wmi_host_chan_stats; + +#ifdef FEATURE_WLAN_TIME_SYNC_FTM + +#define FTM_TIME_SYNC_QTIME_PAIR_MAX 32 + +/** + * struct ftm_time_sync_start_stop_param- Get wlan time sync ftm info + * @vdev_id: vdev id + * @timer_interval: periodicity to trigger wlan time sync strobe + * @num_reads: Number of times to trigger wlabn time sync strobe + * @qtime: ref Qtimer value + * @mac_time: ref Mac timer value + */ +struct ftm_time_sync_start_stop_params { + uint32_t vdev_id; + uint32_t timer_interval; + uint32_t num_reads; + uint64_t qtime; + uint64_t mac_time; +}; + +/** + * struct wlan_time_sync_qtime_pair- Get wlan time sync qtime pair value + * @vdev_id: vdev id + * @qtime_master: qtimer value of master + * @qtime_slave: qtimer value of slave + */ +struct wlan_time_sync_qtime_pair { + uint64_t qtime_master; + uint64_t qtime_slave; +}; + +/** + * struct ftm_time_sync_offset- Get ftm time sync offset + * @vdev_id: vdev id + * @num_qtime: number of qtime values received + * @pairs: array of qtime pairs + */ +struct ftm_time_sync_offset { + uint32_t vdev_id; + uint32_t num_qtime; + struct wlan_time_sync_qtime_pair pairs[FTM_TIME_SYNC_QTIME_PAIR_MAX]; +}; +#endif + +#define WMI_EVENT_ID_INVALID 0 +/** + * Host based ENUM IDs for events to abstract target enums for event_id + */ +typedef enum { + wmi_service_ready_event_id = 0, + wmi_ready_event_id, + wmi_dbg_msg_event_id, + wmi_scan_event_id, + wmi_echo_event_id, + wmi_update_stats_event_id, + wmi_inst_rssi_stats_event_id, + wmi_vdev_start_resp_event_id, + wmi_vdev_standby_req_event_id, + wmi_vdev_resume_req_event_id, + wmi_vdev_stopped_event_id, + wmi_peer_sta_kickout_event_id, + wmi_host_swba_event_id, + wmi_tbttoffset_update_event_id, + wmi_mgmt_rx_event_id, + wmi_chan_info_event_id, + wmi_phyerr_event_id, + wmi_roam_event_id, + wmi_profile_match, + wmi_debug_print_event_id, + wmi_pdev_qvit_event_id, + wmi_wlan_profile_data_event_id, + wmi_rtt_meas_report_event_id, + wmi_tsf_meas_report_event_id, + wmi_rtt_error_report_event_id, + wmi_rtt_keepalive_event_id, + wmi_oem_cap_event_id, + wmi_oem_meas_report_event_id, + wmi_oem_report_event_id, + wmi_nan_event_id, + wmi_wow_wakeup_host_event_id, + wmi_gtk_offload_status_event_id, + wmi_gtk_rekey_fail_event_id, + wmi_dcs_interference_event_id, + wmi_pdev_tpc_config_event_id, + wmi_csa_handling_event_id, + wmi_gpio_input_event_id, + wmi_peer_ratecode_list_event_id, + wmi_generic_buffer_event_id, + wmi_mcast_buf_release_event_id, + wmi_mcast_list_ageout_event_id, + wmi_vdev_get_keepalive_event_id, + wmi_wds_peer_event_id, + wmi_peer_sta_ps_statechg_event_id, + wmi_pdev_fips_event_id, + wmi_tt_stats_event_id, + wmi_pdev_channel_hopping_event_id, + wmi_pdev_ani_cck_level_event_id, + wmi_pdev_ani_ofdm_level_event_id, + wmi_pdev_reserve_ast_entry_event_id, + wmi_pdev_nfcal_power_event_id, + wmi_pdev_tpc_event_id, + wmi_pdev_get_ast_info_event_id, + wmi_pdev_temperature_event_id, + wmi_pdev_nfcal_power_all_channels_event_id, + wmi_pdev_bss_chan_info_event_id, + wmi_mu_report_event_id, + wmi_pdev_utf_event_id, + wmi_pdev_dump_event_id, + wmi_tx_pause_event_id, + wmi_dfs_radar_event_id, + wmi_pdev_l1ss_track_event_id, + wmi_service_ready_ext_event_id, + wmi_vdev_install_key_complete_event_id, + wmi_vdev_mcc_bcn_intvl_change_req_event_id, + wmi_vdev_tsf_report_event_id, + wmi_peer_info_event_id, + wmi_peer_tx_fail_cnt_thr_event_id, + wmi_peer_estimated_linkspeed_event_id, + wmi_peer_state_event_id, + wmi_offload_bcn_tx_status_event_id, + wmi_offload_prob_resp_tx_status_event_id, + wmi_mgmt_tx_completion_event_id, + wmi_tx_delba_complete_event_id, + wmi_tx_addba_complete_event_id, + wmi_ba_rsp_ssn_event_id, + wmi_aggr_state_trig_event_id, + wmi_roam_synch_event_id, + wmi_roam_synch_frame_event_id, + wmi_p2p_disc_event_id, + wmi_p2p_noa_event_id, + wmi_p2p_lo_stop_event_id, + wmi_vdev_add_macaddr_rx_filter_event_id, + wmi_pdev_resume_event_id, + wmi_d0_wow_disable_ack_event_id, + wmi_wow_initial_wakeup_event_id, + wmi_stats_ext_event_id, + wmi_iface_link_stats_event_id, + wmi_peer_link_stats_event_id, + wmi_radio_link_stats_link, + wmi_update_fw_mem_dump_event_id, + wmi_diag_event_id_log_supported_event_id, + wmi_nlo_match_event_id, + wmi_nlo_scan_complete_event_id, + wmi_apfind_event_id, + wmi_passpoint_match_event_id, + wmi_chatter_pc_query_event_id, + wmi_pdev_ftm_intg_event_id, + wmi_wlan_freq_avoid_event_id, + wmi_thermal_mgmt_event_id, + wmi_diag_container_event_id, + wmi_host_auto_shutdown_event_id, + wmi_update_whal_mib_stats_event_id, + wmi_update_vdev_rate_stats_event_id, + wmi_diag_event_id, + wmi_unit_test_event_id, + wmi_ocb_set_sched_event_id, + wmi_dbg_mesg_flush_complete_event_id, + wmi_rssi_breach_event_id, + wmi_uploadh_event_id, + wmi_captureh_event_id, + wmi_rfkill_state_change_event_id, + wmi_tdls_peer_event_id, + wmi_batch_scan_enabled_event_id, + wmi_batch_scan_result_event_id, + wmi_lpi_result_event_id, + wmi_lpi_status_event_id, + wmi_lpi_handoff_event_id, + wmi_extscan_start_stop_event_id, + wmi_extscan_operation_event_id, + wmi_extscan_table_usage_event_id, + wmi_extscan_cached_results_event_id, + wmi_extscan_wlan_change_results_event_id, + wmi_extscan_hotlist_match_event_id, + wmi_extscan_capabilities_event_id, + wmi_extscan_hotlist_ssid_match_event_id, + wmi_mdns_stats_event_id, + wmi_sap_ofl_add_sta_event_id, + wmi_sap_ofl_del_sta_event_id, + wmi_ocb_set_config_resp_event_id, + wmi_ocb_get_tsf_timer_resp_event_id, + wmi_dcc_get_stats_resp_event_id, + wmi_dcc_update_ndl_resp_event_id, + wmi_dcc_stats_event_id, + wmi_soc_set_hw_mode_resp_event_id, + wmi_soc_hw_mode_transition_event_id, + wmi_soc_set_dual_mac_config_resp_event_id, + wmi_tx_data_traffic_ctrl_event_id, + wmi_peer_tx_mu_txmit_count_event_id, + wmi_peer_gid_userpos_list_event_id, + wmi_pdev_check_cal_version_event_id, + wmi_atf_peer_stats_event_id, + wmi_peer_delete_response_event_id, + wmi_peer_delete_all_response_event_id, + wmi_pdev_csa_switch_count_status_event_id, + wmi_reg_chan_list_cc_event_id, + wmi_offchan_data_tx_completion_event, + wmi_dfs_cac_complete_id, + wmi_dfs_radar_detection_event_id, + wmi_ext_tbttoffset_update_event_id, + wmi_11d_new_country_event_id, + wmi_get_arp_stats_req_id, + wmi_service_available_event_id, + wmi_update_rcpi_event_id, + wmi_pdev_wds_entry_list_event_id, + wmi_ndp_initiator_rsp_event_id, + wmi_ndp_indication_event_id, + wmi_ndp_confirm_event_id, + wmi_ndp_responder_rsp_event_id, + wmi_ndp_end_indication_event_id, + wmi_ndp_end_rsp_event_id, + wmi_nan_dmesg_event_id, + wmi_ndl_schedule_update_event_id, + wmi_ndp_event_id, + wmi_oem_response_event_id, + wmi_peer_stats_info_event_id, + wmi_pdev_chip_power_stats_event_id, + wmi_ap_ps_egap_info_event_id, + wmi_peer_assoc_conf_event_id, + wmi_vdev_delete_resp_event_id, + wmi_apf_capability_info_event_id, + wmi_vdev_encrypt_decrypt_data_rsp_event_id, + wmi_report_rx_aggr_failure_event_id, + wmi_pdev_chip_pwr_save_failure_detect_event_id, + wmi_peer_antdiv_info_event_id, + wmi_pdev_set_hw_mode_rsp_event_id, + wmi_pdev_hw_mode_transition_event_id, + wmi_pdev_set_mac_config_resp_event_id, + wmi_coex_bt_activity_event_id, + wmi_mgmt_tx_bundle_completion_event_id, + wmi_radio_tx_power_level_stats_event_id, + wmi_report_stats_event_id, + wmi_dma_buf_release_event_id, + wmi_sap_obss_detection_report_event_id, + wmi_obss_color_collision_report_event_id, + wmi_host_swfda_event_id, + wmi_sar_get_limits_event_id, + wmi_pdev_div_rssi_antid_event_id, +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + wmi_host_dfs_status_check_event_id, +#endif + wmi_twt_enable_complete_event_id, + wmi_twt_disable_complete_event_id, + wmi_apf_get_vdev_work_memory_resp_event_id, + wmi_roam_scan_stats_event_id, + wmi_vdev_ocac_complete_event_id, + +#ifdef OL_ATH_SMART_LOGGING + wmi_debug_fatal_condition_eventid, +#endif /* OL_ATH_SMART_LOGGING */ + wmi_wlan_sar2_result_event_id, + wmi_esp_estimate_event_id, + wmi_pdev_ctl_failsafe_check_event_id, + wmi_vdev_bcn_reception_stats_event_id, + wmi_roam_blacklist_event_id, + wmi_wlm_stats_event_id, + wmi_peer_cfr_capture_event_id, + wmi_pdev_cold_boot_cal_event_id, + wmi_vdev_get_mws_coex_state_eventid, + wmi_vdev_get_mws_coex_dpwb_state_eventid, + wmi_vdev_get_mws_coex_tdm_state_eventid, + wmi_vdev_get_mws_coex_idrx_state_eventid, + wmi_vdev_get_mws_coex_antenna_sharing_state_eventid, +#ifdef WLAN_FEATURE_INTEROP_ISSUES_AP + wmi_pdev_interop_issues_ap_event_id, +#endif + wmi_coex_report_antenna_isolation_event_id, + wmi_chan_rf_characterization_info_event_id, + wmi_roam_auth_offload_event_id, + wmi_service_ready_ext2_event_id, + wmi_get_elna_bypass_event_id, + wmi_motion_det_host_eventid, + wmi_motion_det_base_line_host_eventid, + wmi_get_ani_level_event_id, + wmi_peer_tx_pn_response_event_id, + wmi_roam_stats_event_id, + wmi_oem_data_event_id, + wmi_mgmt_offload_data_event_id, + wmi_pdev_multi_vdev_restart_response_event_id, + wmi_roam_pmkid_request_event_id, +#ifdef FEATURE_WLAN_TIME_SYNC_FTM + wmi_wlan_time_sync_ftm_start_stop_event_id, + wmi_wlan_time_sync_q_master_slave_offset_eventid, +#endif + wmi_roam_scan_chan_list_id, + wmi_events_max, +} wmi_conv_event_id; + +#define WMI_UNAVAILABLE_PARAM 0 +/** + * Host based ENUM IDs for PDEV params to abstract target enums + */ +typedef enum { + wmi_pdev_param_tx_chain_mask = 0, + wmi_pdev_param_rx_chain_mask, + wmi_pdev_param_txpower_limit2g, + wmi_pdev_param_txpower_limit5g, + wmi_pdev_param_txpower_scale, + wmi_pdev_param_beacon_gen_mode, + wmi_pdev_param_beacon_tx_mode, + wmi_pdev_param_resmgr_offchan_mode, + wmi_pdev_param_protection_mode, + wmi_pdev_param_dynamic_bw, + wmi_pdev_param_non_agg_sw_retry_th, + wmi_pdev_param_agg_sw_retry_th, + wmi_pdev_param_sta_kickout_th, + wmi_pdev_param_ac_aggrsize_scaling, + wmi_pdev_param_ltr_enable, + wmi_pdev_param_ltr_ac_latency_be, + wmi_pdev_param_ltr_ac_latency_bk, + wmi_pdev_param_ltr_ac_latency_vi, + wmi_pdev_param_ltr_ac_latency_vo, + wmi_pdev_param_ltr_ac_latency_timeout, + wmi_pdev_param_ltr_sleep_override, + wmi_pdev_param_ltr_rx_override, + wmi_pdev_param_ltr_tx_activity_timeout, + wmi_pdev_param_l1ss_enable, + wmi_pdev_param_dsleep_enable, + wmi_pdev_param_pcielp_txbuf_flush, + wmi_pdev_param_pcielp_txbuf_watermark, + wmi_pdev_param_pcielp_txbuf_tmo_en, + wmi_pdev_param_pcielp_txbuf_tmo_value, + wmi_pdev_param_pdev_stats_update_period, + wmi_pdev_param_vdev_stats_update_period, + wmi_pdev_param_peer_stats_update_period, + wmi_pdev_param_bcnflt_stats_update_period, + wmi_pdev_param_pmf_qos, + wmi_pdev_param_arp_ac_override, + wmi_pdev_param_dcs, + wmi_pdev_param_ani_enable, + wmi_pdev_param_ani_poll_period, + wmi_pdev_param_ani_listen_period, + wmi_pdev_param_ani_ofdm_level, + wmi_pdev_param_ani_cck_level, + wmi_pdev_param_dyntxchain, + wmi_pdev_param_proxy_sta, + wmi_pdev_param_idle_ps_config, + wmi_pdev_param_power_gating_sleep, + wmi_pdev_param_aggr_burst, + wmi_pdev_param_rx_decap_mode, + wmi_pdev_param_fast_channel_reset, + wmi_pdev_param_burst_dur, + wmi_pdev_param_burst_enable, + wmi_pdev_param_smart_antenna_default_antenna, + wmi_pdev_param_igmpmld_override, + wmi_pdev_param_igmpmld_tid, + wmi_pdev_param_antenna_gain, + wmi_pdev_param_rx_filter, + wmi_pdev_set_mcast_to_ucast_tid, + wmi_pdev_param_proxy_sta_mode, + wmi_pdev_param_set_mcast2ucast_mode, + wmi_pdev_param_set_mcast2ucast_buffer, + wmi_pdev_param_remove_mcast2ucast_buffer, + wmi_pdev_peer_sta_ps_statechg_enable, + wmi_pdev_param_block_interbss, + wmi_pdev_param_set_disable_reset_cmdid, + wmi_pdev_param_set_msdu_ttl_cmdid, + wmi_pdev_param_set_ppdu_duration_cmdid, + wmi_pdev_param_txbf_sound_period_cmdid, + wmi_pdev_param_set_promisc_mode_cmdid, + wmi_pdev_param_set_burst_mode_cmdid, + wmi_pdev_param_en_stats, + wmi_pdev_param_mu_group_policy, + wmi_pdev_param_noise_detection, + wmi_pdev_param_noise_threshold, + wmi_pdev_param_dpd_enable, + wmi_pdev_param_set_mcast_bcast_echo, + wmi_pdev_param_atf_strict_sch, + wmi_pdev_param_atf_sched_duration, + wmi_pdev_param_ant_plzn, + wmi_pdev_param_mgmt_retry_limit, + wmi_pdev_param_sensitivity_level, + wmi_pdev_param_signed_txpower_2g, + wmi_pdev_param_signed_txpower_5g, + wmi_pdev_param_enable_per_tid_amsdu, + wmi_pdev_param_enable_per_tid_ampdu, + wmi_pdev_param_cca_threshold, + wmi_pdev_param_rts_fixed_rate, + wmi_pdev_param_cal_period, + wmi_pdev_param_pdev_reset, + wmi_pdev_param_wapi_mbssid_offset, + wmi_pdev_param_arp_srcaddr, + wmi_pdev_param_arp_dstaddr, + wmi_pdev_param_txpower_decr_db, + wmi_pdev_param_rx_batchmode, + wmi_pdev_param_packet_aggr_delay, + wmi_pdev_param_atf_obss_noise_sch, + wmi_pdev_param_atf_obss_noise_scaling_factor, + wmi_pdev_param_cust_txpower_scale, + wmi_pdev_param_atf_dynamic_enable, + wmi_pdev_param_atf_ssid_group_policy, + wmi_pdev_param_rfkill_enable, + wmi_pdev_param_hw_rfkill_config, + wmi_pdev_param_low_power_rf_enable, + wmi_pdev_param_l1ss_track, + wmi_pdev_param_hyst_en, + wmi_pdev_param_power_collapse_enable, + wmi_pdev_param_led_sys_state, + wmi_pdev_param_led_enable, + wmi_pdev_param_audio_over_wlan_latency, + wmi_pdev_param_audio_over_wlan_enable, + wmi_pdev_param_whal_mib_stats_update_enable, + wmi_pdev_param_vdev_rate_stats_update_period, + wmi_pdev_param_cts_cbw, + wmi_pdev_param_wnts_config, + wmi_pdev_param_adaptive_early_rx_enable, + wmi_pdev_param_adaptive_early_rx_min_sleep_slop, + wmi_pdev_param_adaptive_early_rx_inc_dec_step, + wmi_pdev_param_early_rx_fix_sleep_slop, + wmi_pdev_param_bmiss_based_adaptive_bto_enable, + wmi_pdev_param_bmiss_bto_min_bcn_timeout, + wmi_pdev_param_bmiss_bto_inc_dec_step, + wmi_pdev_param_bto_fix_bcn_timeout, + wmi_pdev_param_ce_based_adaptive_bto_enable, + wmi_pdev_param_ce_bto_combo_ce_value, + wmi_pdev_param_tx_chain_mask_2g, + wmi_pdev_param_rx_chain_mask_2g, + wmi_pdev_param_tx_chain_mask_5g, + wmi_pdev_param_rx_chain_mask_5g, + wmi_pdev_param_tx_chain_mask_cck, + wmi_pdev_param_tx_chain_mask_1ss, + wmi_pdev_param_enable_btcoex, + wmi_pdev_param_atf_peer_stats, + wmi_pdev_param_btcoex_cfg, + wmi_pdev_param_mesh_mcast_enable, + wmi_pdev_param_tx_ack_timeout, + wmi_pdev_param_soft_tx_chain_mask, + wmi_pdev_param_cck_tx_enable, + wmi_pdev_param_esp_indication_period, + wmi_pdev_param_antenna_gain_half_db, + wmi_pdev_param_ru26_allowed, + wmi_pdev_param_esp_ba_window, + wmi_pdev_param_esp_airtime_fraction, + wmi_pdev_param_esp_ppdu_duration, + wmi_pdev_param_use_nol, + wmi_pdev_param_enable_peer_retry_stats, + wmi_pdev_param_ul_trig_int, + wmi_pdev_param_sub_channel_marking, + wmi_pdev_param_ul_ppdu_duration, + wmi_pdev_param_equal_ru_allocation_enable, + wmi_pdev_param_per_peer_prd_cfr_enable, + wmi_pdev_param_nav_override_config, + wmi_pdev_param_set_mgmt_ttl, + wmi_pdev_param_set_prb_rsp_ttl, + wmi_pdev_param_set_mu_ppdu_duration, + wmi_pdev_param_set_tbtt_ctrl, + wmi_pdev_param_set_cmd_obss_pd_threshold, + wmi_pdev_param_set_cmd_obss_pd_per_ac, + wmi_pdev_param_set_cong_ctrl_max_msdus, + wmi_pdev_param_enable_fw_dynamic_he_edca, + wmi_pdev_param_max, +} wmi_conv_pdev_params_id; + + +/** + * Host based ENUM IDs for VDEV params to abstract target enums + */ +typedef enum { + wmi_vdev_param_rts_threshold = 0, + wmi_vdev_param_fragmentation_threshold, + wmi_vdev_param_beacon_interval, + wmi_vdev_param_listen_interval, + wmi_vdev_param_multicast_rate, + wmi_vdev_param_mgmt_tx_rate, + wmi_vdev_param_slot_time, + wmi_vdev_param_preamble, + wmi_vdev_param_swba_time, + wmi_vdev_stats_update_period, + wmi_vdev_pwrsave_ageout_time, + wmi_vdev_host_swba_interval, + wmi_vdev_param_dtim_period, + wmi_vdev_oc_scheduler_air_time_limit, + wmi_vdev_param_wds, + wmi_vdev_param_atim_window, + wmi_vdev_param_bmiss_count_max, + wmi_vdev_param_bmiss_first_bcnt, + wmi_vdev_param_bmiss_final_bcnt, + wmi_vdev_param_feature_wmm, + wmi_vdev_param_chwidth, + wmi_vdev_param_chextoffset, + wmi_vdev_param_disable_htprotection, + wmi_vdev_param_sta_quickkickout, + wmi_vdev_param_mgmt_rate, + wmi_vdev_param_protection_mode, + wmi_vdev_param_fixed_rate, + wmi_vdev_param_sgi, + wmi_vdev_param_ldpc, + wmi_vdev_param_tx_stbc, + wmi_vdev_param_rx_stbc, + wmi_vdev_param_intra_bss_fwd, + wmi_vdev_param_def_keyid, + wmi_vdev_param_nss, + wmi_vdev_param_bcast_data_rate, + wmi_vdev_param_mcast_data_rate, + wmi_vdev_param_mcast_indicate, + wmi_vdev_param_dhcp_indicate, + wmi_vdev_param_unknown_dest_indicate, + wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs, + wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs, + wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs, + wmi_vdev_param_ap_enable_nawds, + wmi_vdev_param_mcast2ucast_set, + wmi_vdev_param_enable_rtscts, + wmi_vdev_param_rc_num_retries, + wmi_vdev_param_txbf, + wmi_vdev_param_packet_powersave, + wmi_vdev_param_drop_unencry, + wmi_vdev_param_tx_encap_type, + wmi_vdev_param_ap_detect_out_of_sync_sleeping_sta_time_secs, + wmi_vdev_param_cabq_maxdur, + wmi_vdev_param_mfptest_set, + wmi_vdev_param_rts_fixed_rate, + wmi_vdev_param_vht_sgimask, + wmi_vdev_param_vht80_ratemask, + wmi_vdev_param_early_rx_adjust_enable, + wmi_vdev_param_early_rx_tgt_bmiss_num, + wmi_vdev_param_early_rx_bmiss_sample_cycle, + wmi_vdev_param_early_rx_slop_step, + wmi_vdev_param_early_rx_init_slop, + wmi_vdev_param_early_rx_adjust_pause, + wmi_vdev_param_proxy_sta, + wmi_vdev_param_meru_vc, + wmi_vdev_param_rx_decap_type, + wmi_vdev_param_bw_nss_ratemask, + wmi_vdev_param_sensor_ap, + wmi_vdev_param_beacon_rate, + wmi_vdev_param_dtim_enable_cts, + wmi_vdev_param_sta_kickout, + wmi_vdev_param_tx_pwrlimit, + wmi_vdev_param_snr_num_for_cal, + wmi_vdev_param_roam_fw_offload, + wmi_vdev_param_enable_rmc, + wmi_vdev_param_ibss_max_bcn_lost_ms, + wmi_vdev_param_max_rate, + wmi_vdev_param_early_rx_drift_sample, + wmi_vdev_param_set_ibss_tx_fail_cnt_thr, + wmi_vdev_param_ebt_resync_timeout, + wmi_vdev_param_aggr_trig_event_enable, + wmi_vdev_param_is_ibss_power_save_allowed, + wmi_vdev_param_is_power_collapse_allowed, + wmi_vdev_param_is_awake_on_txrx_enabled, + wmi_vdev_param_inactivity_cnt, + wmi_vdev_param_txsp_end_inactivity_time_ms, + wmi_vdev_param_dtim_policy, + wmi_vdev_param_ibss_ps_warmup_time_secs, + wmi_vdev_param_ibss_ps_1rx_chain_in_atim_window_enable, + wmi_vdev_param_rx_leak_window, + wmi_vdev_param_stats_avg_factor, + wmi_vdev_param_disconnect_th, + wmi_vdev_param_rtscts_rate, + wmi_vdev_param_mcc_rtscts_protection_enable, + wmi_vdev_param_mcc_broadcast_probe_enable, + wmi_vdev_param_capabilities, + wmi_vdev_param_mgmt_tx_power, + wmi_vdev_param_atf_ssid_sched_policy, + wmi_vdev_param_disable_dyn_bw_rts, + wmi_vdev_param_ampdu_subframe_size_per_ac, + wmi_vdev_param_he_dcm_enable, + wmi_vdev_param_he_bss_color, + wmi_vdev_param_he_range_ext_enable, + wmi_vdev_param_set_hemu_mode, + wmi_vdev_param_set_he_ltf, + wmi_vdev_param_set_heop, + wmi_vdev_param_disable_cabq, + wmi_vdev_param_rate_dropdown_bmap, + wmi_vdev_param_tx_power, + wmi_vdev_param_set_ba_mode, + wmi_vdev_param_autorate_misc_cfg, + wmi_vdev_param_amsdu_subframe_size_per_ac, + wmi_vdev_param_set_he_sounding_mode, + wmi_vdev_param_sifs_trigger_rate, + wmi_vdev_param_ul_shortgi, + wmi_vdev_param_ul_he_ltf, + wmi_vdev_param_ul_nss, + wmi_vdev_param_ul_ppdu_bw, + wmi_vdev_param_ul_ldpc, + wmi_vdev_param_ul_stbc, + wmi_vdev_param_ul_fixed_rate, + wmi_vdev_param_rawmode_open_war, + wmi_vdev_param_max_mtu_size, + wmi_vdev_param_mcast_rc_stale_period, + wmi_vdev_param_enable_multi_group_key, + wmi_vdev_param_max_group_keys, + wmi_vdev_param_enable_mcast_rc, + wmi_vdev_param_6ghz_params, + wmi_vdev_param_enable_disable_roam_reason_vsie, + wmi_vdev_param_nan_config_features, +} wmi_conv_vdev_param_id; + +/** + * Host based ENUM IDs for service bits to abstract target enums + */ +typedef enum { + wmi_service_beacon_offload = 0, + wmi_service_scan_offload, + wmi_service_roam_offload, + wmi_service_bcn_miss_offload, + wmi_service_sta_pwrsave, + wmi_service_sta_advanced_pwrsave, + wmi_service_ap_uapsd, + wmi_service_ap_dfs, + wmi_service_11ac, + wmi_service_blockack, + wmi_service_phyerr, + wmi_service_bcn_filter, + wmi_service_rtt, + wmi_service_ratectrl, + wmi_service_wow, + wmi_service_ratectrl_cache, + wmi_service_iram_tids, + wmi_service_burst, + wmi_service_smart_antenna_sw_support, + wmi_service_gtk_offload, + wmi_service_scan_sch, + wmi_service_csa_offload, + wmi_service_chatter, + wmi_service_coex_freqavoid, + wmi_service_packet_power_save, + wmi_service_force_fw_hang, + wmi_service_smart_antenna_hw_support, + wmi_service_gpio, + wmi_sta_uapsd_basic_auto_trig, + wmi_sta_uapsd_var_auto_trig, + wmi_service_sta_keep_alive, + wmi_service_tx_encap, + wmi_service_ap_ps_detect_out_of_sync, + wmi_service_early_rx, + wmi_service_enhanced_proxy_sta, + wmi_service_tt, + wmi_service_atf, + wmi_service_peer_caching, + wmi_service_coex_gpio, + wmi_service_aux_spectral_intf, + wmi_service_aux_chan_load_intf, + wmi_service_bss_channel_info_64, + wmi_service_ext_res_cfg_support, + wmi_service_mesh, + wmi_service_restrt_chnl_support, + wmi_service_roam_scan_offload, + wmi_service_arpns_offload, + wmi_service_nlo, + wmi_service_sta_dtim_ps_modulated_dtim, + wmi_service_sta_smps, + wmi_service_fwtest, + wmi_service_sta_wmmac, + wmi_service_tdls, + wmi_service_mcc_bcn_interval_change, + wmi_service_adaptive_ocs, + wmi_service_ba_ssn_support, + wmi_service_filter_ipsec_natkeepalive, + wmi_service_wlan_hb, + wmi_service_lte_ant_share_support, + wmi_service_batch_scan, + wmi_service_qpower, + wmi_service_plmreq, + wmi_service_thermal_mgmt, + wmi_service_rmc, + wmi_service_mhf_offload, + wmi_service_coex_sar, + wmi_service_bcn_txrate_override, + wmi_service_nan, + wmi_service_l1ss_stat, + wmi_service_estimate_linkspeed, + wmi_service_obss_scan, + wmi_service_tdls_offchan, + wmi_service_tdls_uapsd_buffer_sta, + wmi_service_tdls_uapsd_sleep_sta, + wmi_service_ibss_pwrsave, + wmi_service_lpass, + wmi_service_extscan, + wmi_service_d0wow, + wmi_service_hsoffload, + wmi_service_roam_ho_offload, + wmi_service_rx_full_reorder, + wmi_service_dhcp_offload, + wmi_service_sta_rx_ipa_offload_support, + wmi_service_mdns_offload, + wmi_service_sap_auth_offload, + wmi_service_dual_band_simultaneous_support, + wmi_service_ocb, + wmi_service_ap_arpns_offload, + wmi_service_per_band_chainmask_support, + wmi_service_packet_filter_offload, + wmi_service_mgmt_tx_htt, + wmi_service_mgmt_tx_wmi, + wmi_service_ext_msg, + wmi_service_mawc, + + wmi_service_peer_stats, + wmi_service_mesh_11s, + wmi_service_periodic_chan_stat_support, + wmi_service_tx_mode_push_only, + wmi_service_tx_mode_push_pull, + wmi_service_tx_mode_dynamic, + wmi_service_check_cal_version, + wmi_service_btcoex_duty_cycle, + wmi_service_4_wire_coex_support, + wmi_service_multiple_vdev_restart, + wmi_service_peer_assoc_conf, + wmi_service_egap, + wmi_service_sta_pmf_offload, + wmi_service_unified_wow_capability, + wmi_service_enterprise_mesh, + wmi_service_apf_offload, + wmi_service_sync_delete_cmds, + wmi_service_ratectrl_limit_max_min_rates, + wmi_service_nan_data, + wmi_service_nan_rtt, + wmi_service_11ax, + wmi_service_deprecated_replace, + wmi_service_tdls_conn_tracker_in_host_mode, + wmi_service_enhanced_mcast_filter, + wmi_service_half_rate_quarter_rate_support, + wmi_service_vdev_rx_filter, + wmi_service_p2p_listen_offload_support, + wmi_service_mark_first_wakeup_packet, + wmi_service_multiple_mcast_filter_set, + wmi_service_host_managed_rx_reorder, + wmi_service_flash_rdwr_support, + wmi_service_wlan_stats_report, + wmi_service_tx_msdu_id_new_partition_support, + wmi_service_dfs_phyerr_offload, + wmi_service_rcpi_support, + wmi_service_fw_mem_dump_support, + wmi_service_peer_stats_info, + wmi_service_regulatory_db, + wmi_service_11d_offload, + wmi_service_hw_data_filtering, + wmi_service_pkt_routing, + wmi_service_offchan_tx_wmi, + wmi_service_chan_load_info, + wmi_service_extended_nss_support, + wmi_service_ack_timeout, + wmi_service_widebw_scan, + wmi_service_bcn_offload_start_stop_support, + wmi_service_offchan_data_tid_support, + wmi_service_support_dma, + wmi_service_8ss_tx_bfee, + wmi_service_fils_support, + wmi_service_mawc_support, + wmi_service_wow_wakeup_by_timer_pattern, + wmi_service_11k_neighbour_report_support, + wmi_service_ap_obss_detection_offload, + wmi_service_bss_color_offload, + wmi_service_gmac_offload_support, + wmi_service_host_dfs_check_support, + wmi_service_dual_beacon_on_single_mac_scc_support, + wmi_service_dual_beacon_on_single_mac_mcc_support, + wmi_service_twt_requestor, + wmi_service_twt_responder, + wmi_service_listen_interval_offload_support, + +#ifdef OL_ATH_SMART_LOGGING + wmi_service_smart_logging_support, +#endif + wmi_service_infra_mbssid, + wmi_service_esp_support, + wmi_service_obss_spatial_reuse, + wmi_service_per_vdev_chain_support, + wmi_service_new_htt_msg_format, + wmi_service_peer_unmap_cnf_support, + wmi_service_beacon_reception_stats, + wmi_service_vdev_latency_config, + wmi_service_nan_dbs_support, + wmi_service_ndi_dbs_support, + wmi_service_nan_sap_support, + wmi_service_ndi_sap_support, + wmi_service_nan_disable_support, + wmi_service_sta_plus_sta_support, + wmi_service_hw_db2dbm_support, + wmi_service_wlm_stats_support, + wmi_service_ul_ru26_allowed, + wmi_service_cfr_capture_support, + wmi_service_bcast_twt_support, + wmi_service_wpa3_ft_sae_support, + wmi_service_wpa3_ft_suite_b_support, + wmi_service_ft_fils, + wmi_service_adaptive_11r_support, + wmi_service_data_stall_recovery_support, + wmi_service_tx_compl_tsf64, + wmi_service_vdev_delete_all_peer, + wmi_service_three_way_coex_config_legacy, + wmi_service_rx_fse_support, + wmi_service_dynamic_hw_mode, + wmi_service_sae_roam_support, + wmi_service_owe_roam_support, + wmi_service_ext2_msg, + wmi_service_6ghz_support, + wmi_service_bw_165mhz_support, + wmi_service_packet_capture_support, + wmi_service_nan_vdev, + wmi_service_multiple_vdev_restart_ext, + wmi_service_peer_delete_no_peer_flush_tids_cmd, + wmi_service_time_sync_ftm, + wmi_service_nss_ratio_to_host_support, + wmi_roam_scan_chan_list_to_host_support, + wmi_service_sta_nan_ndi_four_port, + wmi_service_host_scan_stop_vdev_all, + wmi_services_max, +} wmi_conv_service_ids; +#define WMI_SERVICE_UNAVAILABLE 0xFFFF + +/** + * enum WMI_DBG_PARAM - Debug params + * @WMI_DBGLOG_LOG_LEVEL: Set the loglevel + * @WMI_DBGLOG_VAP_ENABLE: Enable VAP level debug + * @WMI_DBGLOG_VAP_DISABLE: Disable VAP level debug + * @WMI_DBGLOG_MODULE_ENABLE: Enable MODULE level debug + * @WMI_DBGLOG_MODULE_DISABLE: Disable MODULE level debug + * @WMI_DBGLOG_MOD_LOG_LEVEL: Enable MODULE level debug + * @WMI_DBGLOG_TYPE: set type of the debug output + * @WMI_DBGLOG_REPORT_ENABLE: Enable Disable debug + * @WMI_DBGLOG_MOD_WOW_LOG_LEVEL: set the WOW MODULE debug loglevel + */ +typedef enum { + WMI_DBGLOG_LOG_LEVEL = 0x1, + WMI_DBGLOG_VAP_ENABLE, + WMI_DBGLOG_VAP_DISABLE, + WMI_DBGLOG_MODULE_ENABLE, + WMI_DBGLOG_MODULE_DISABLE, + WMI_DBGLOG_MOD_LOG_LEVEL, + WMI_DBGLOG_TYPE, + WMI_DBGLOG_REPORT_ENABLE, + WMI_DBGLOG_MOD_WOW_LOG_LEVEL +} WMI_DBG_PARAM; + +/** + * struct wmi_host_fw_ver - FW version in non-tlv target + * @sw_version: Versin info + * @sw_version_1: Second dword of version + */ +struct wmi_host_fw_ver { + uint32_t sw_version; + uint32_t sw_version_1; +}; + +/** + * struct wmi_host_fw_abi_ver - FW version in non-tlv target + * @sw_version: Versin info + * @abi_version: ABI version + */ +struct wmi_host_fw_abi_ver { + uint32_t sw_version; + uint32_t abi_version; +}; + +/** + * struct target_resource_config - Resource config sent from host to target + * abstracted out to include union of both configs + * @num_vdevs: Number vdevs configured + * @num_peers: Number of peers + * @num_active_peers: Number of active peers for peer cache + * @num_offload_peers: Number of offload peers + * @num_offload_reorder_buffs: number of offload reorder buffs + * @num_peer_keys: number of peer keys + * @num_tids: number of tids + * @ast_skid_limit: AST skid limit + * @tx_chain_mask: TX chain mask + * @rx_chain_mask: RX chain mask + * @rx_timeout_pri: RX reorder timeout per AC + * @rx_decap_mode: RX decap mode + * @scan_max_pending_req: Scan mac pending req + * @bmiss_offload_max_vdev: Beacom miss offload max vdevs + * @roam_offload_max_vdev: Roam offload max vdevs + * @roam_offload_max_ap_profiles: roam offload max ap profiles + * @num_mcast_groups: num mcast groups + * @num_mcast_table_elems: number of macst table elems + * @mcast2ucast_mode: mcast enhance mode + * @tx_dbg_log_size: DBG log buf size + * @num_wds_entries: number of WDS entries + * @dma_burst_size: DMA burst size. + * @mac_aggr_delim: Mac aggr delim + * @rx_skip_defrag_timeout_dup_detection_check: Defrag dup check in host? + * @vow_config: vow configuration + * @gtk_offload_max_vdev: Max vdevs for GTK offload + * @num_msdu_desc: Number of msdu desc + * @max_frag_entries: Max frag entries + * @agile_capability: Target Agile Capability + * End common + * @max_peer_ext_stats: Max peer EXT stats + * @smart_ant_cap: Smart antenna capabilities + * @BK_Minfree: BIN configuration for BK traffic + * @BE_Minfree: BIN configuration for BE traffic + * @VI_Minfree: BIN configuration for VI traffic + * @VO_Minfree: BIN configuration for VO traffic + * @rx_batchmode: RX batch mode + * @tt_support: Thermal throttling support + * @atf_config: ATF config + * @mgmt_comp_evt_bundle_support: bundle support required for mgmt complete evt + * @tx_msdu_new_partition_id_support: new partiition id support for tx msdu + * @peer_unmap_conf_support: peer unmap conf support in fw + * @iphdr_pad_config: ipheader pad config + * @qwrap_config: Qwrap configuration + * @alloc_frag_desc_for_data_pkt: Frag desc for data + * Added in MCL + * @num_tdls_vdevs: + * @num_tdls_conn_table_entries: + * @beacon_tx_offload_max_vdev: + * @num_multicast_filter_entries: + * @num_wow_filters: + * @num_keep_alive_pattern: + * @keep_alive_pattern_size: + * @max_tdls_concurrent_sleep_sta: + * @max_tdls_concurrent_buffer_sta: + * @wmi_send_separate: + * @num_ocb_vdevs: + * @num_ocb_channels: + * @num_ocb_schedules: + * @num_packet_filters: maximum number of packet filter rules to support + * @num_max_sta_vdevs: maximum number of concurrent station vdevs to support + * @num_ns_ext_tuples_cfg: + * @apf_instruction_size: + * @max_bssid_rx_filters: + * @use_pdev_id: + * @max_num_dbs_scan_duty_cycle: max dbs can duty cycle value + * @cce_disable: disable cce component + * @peer_map_unmap_v2: enable peer map/unmap version 2 messaging + * @twt_ap_pdev_count: Number of MAC on which AP TWT feature is supported + * @twt_ap_sta_count: Max no of STA with which TWT sessions can be formed + * by the AP + * @max_bssid_indicator: max number of MBSS VAPs + * @three_way_coex_config_legacy_en: enable three way coex legacy feature + * @max_num_group_keys: max number of group keys supported for VLAN + * @re_ul_resp: enable 11ax UL response feature (UL-OFDMA) for repeater + * @ipa_disable: disable IPA feature + * @ast_1_valid_mask_enable: mask to enable ast index 1 + * @ast_2_valid_mask_enable: mask to enable ast index 2 + * @ast_3_valid_mask_enable: mask to enable ast index 3 + * @ast_0_flow_mask_enable: mask to enable flow support for ast index 0 + * @ast_1_flow_mask_enable: mask to enable flow support for ast index 1 + * @ast_2_flow_mask_enable: mask to enable flow support for ast index 2 + * @ast_3_flow_mask_enable: mask to enable flow support for ast index 3 + * @ast_tid_high_mask_enable: enable tid valid mask for high priority flow + * @ast_tid_low_mask_enable: enable tid valid mask for low priority flow + * @nan_separate_iface_support: Separate iface creation for NAN + * @time_sync_ftm: enable ftm based time sync + * @max_ndp_sessions: Max ndp sessions support + * @max_ndi: max number of ndi host supports + */ +typedef struct { + uint32_t num_vdevs; + uint32_t num_peers; + uint32_t num_active_peers; + uint32_t num_offload_peers; + uint32_t num_offload_reorder_buffs; + uint32_t num_peer_keys; + uint32_t num_tids; + uint32_t ast_skid_limit; + uint32_t tx_chain_mask; + uint32_t rx_chain_mask; + uint32_t rx_timeout_pri[4]; + uint32_t rx_decap_mode; + uint32_t scan_max_pending_req; + uint32_t bmiss_offload_max_vdev; + uint32_t roam_offload_max_vdev; + uint32_t roam_offload_max_ap_profiles; + uint32_t num_mcast_groups; + uint32_t num_mcast_table_elems; + uint32_t mcast2ucast_mode; + uint32_t tx_dbg_log_size; + uint32_t num_wds_entries; + uint32_t dma_burst_size; + uint32_t mac_aggr_delim; + uint32_t rx_skip_defrag_timeout_dup_detection_check; + uint32_t vow_config; + uint32_t gtk_offload_max_vdev; + uint32_t num_msdu_desc; /* Number of msdu desc */ + uint32_t max_frag_entries; + uint32_t scheduler_params; + uint32_t agile_capability; + /* End common */ + + /* Added for Beeliner */ + uint32_t max_peer_ext_stats; + uint32_t smart_ant_cap; + uint32_t BK_Minfree; + uint32_t BE_Minfree; + uint32_t VI_Minfree; + uint32_t VO_Minfree; + uint32_t rx_batchmode; + uint32_t tt_support; + uint32_t atf_config:1, + mgmt_comp_evt_bundle_support:1, + tx_msdu_new_partition_id_support:1, + new_htt_msg_format:1, + peer_unmap_conf_support:1, + pktcapture_support:1; + uint32_t iphdr_pad_config; + uint32_t + qwrap_config:16, + alloc_frag_desc_for_data_pkt:16; + + /* Added in MCL */ + uint32_t num_tdls_vdevs; + uint32_t num_tdls_conn_table_entries; + uint32_t beacon_tx_offload_max_vdev; + uint32_t num_multicast_filter_entries; + uint32_t num_wow_filters; + uint32_t num_keep_alive_pattern; + uint32_t keep_alive_pattern_size; + uint32_t max_tdls_concurrent_sleep_sta; + uint32_t max_tdls_concurrent_buffer_sta; + uint32_t wmi_send_separate; + uint32_t num_ocb_vdevs; + uint32_t num_ocb_channels; + uint32_t num_ocb_schedules; + uint32_t num_packet_filters; + uint32_t num_max_sta_vdevs; + uint32_t num_ns_ext_tuples_cfg; + uint32_t apf_instruction_size; + uint32_t max_bssid_rx_filters; + uint32_t use_pdev_id; + uint32_t max_num_dbs_scan_duty_cycle; + bool cce_disable; + bool peer_map_unmap_v2; + uint32_t twt_ap_pdev_count; + uint32_t twt_ap_sta_count; + uint32_t max_bssid_indicator; + uint32_t eapol_minrate_set:1, + eapol_minrate_ac_set:2; + bool tstamp64_en; + bool three_way_coex_config_legacy_en; + uint32_t max_num_group_keys; + uint32_t re_ul_resp; + bool ipa_disable; + uint32_t ast_1_valid_mask_enable:1, + ast_2_valid_mask_enable:1, + ast_3_valid_mask_enable:1; + uint32_t ast_0_flow_mask_enable:4, + ast_1_flow_mask_enable:4, + ast_2_flow_mask_enable:4, + ast_3_flow_mask_enable:4, + ast_tid_high_mask_enable:8, + ast_tid_low_mask_enable:8; + bool nan_separate_iface_support; + bool time_sync_ftm; + uint32_t max_ndp_sessions; + uint32_t max_ndi; +} target_resource_config; + +/** + * struct wds_addr_event - WDS addr event structure + * @event_type: event type add/delete + * @peer_mac: peer mac + * @dest_mac: destination mac address + * @vdev_id: vdev id + */ +typedef struct { + uint32_t event_type[4]; + u_int8_t peer_mac[QDF_MAC_ADDR_SIZE]; + u_int8_t dest_mac[QDF_MAC_ADDR_SIZE]; + uint32_t vdev_id; +} wds_addr_event_t; + +/** + * Enum replicated for host abstraction with FW + */ +typedef enum { + /* Event respose of START CMD */ + WMI_HOST_VDEV_START_RESP_EVENT = 0, + /* Event respose of RESTART CMD */ + WMI_HOST_VDEV_RESTART_RESP_EVENT, +} WMI_HOST_START_EVENT_PARAM; + +/** + * struct wmi_host_vdev_start_resp - VDEV start response + * @vdev_id: vdev id + * @requestor_id: requestor id that requested the VDEV start request + * @resp_type: Respose of Event type START/RESTART + * @status: status of the response + * @chain_mask: Vdev chain mask + * @smps_mode: Vdev mimo power save mode + * @mac_id: mac_id field contains the MAC identifier that the + * VDEV is bound to. The valid range is 0 to (num_macs-1). + * @cfgd_tx_streams: Configured Transmit Streams + * @cfgd_rx_streams: Configured Receive Streams + * @max_allowed_tx_power: max tx power allowed + */ +typedef struct { + uint32_t vdev_id; + uint32_t requestor_id; + WMI_HOST_START_EVENT_PARAM resp_type; + uint32_t status; + uint32_t chain_mask; + uint32_t smps_mode; + uint32_t mac_id; + uint32_t cfgd_tx_streams; + uint32_t cfgd_rx_streams; + uint32_t max_allowed_tx_power; +} wmi_host_vdev_start_resp; + +/** + * struct wmi_host_vdev_delete_resp - VDEV delete response + * @vdev_id: vdev id + */ +struct wmi_host_vdev_delete_resp { + uint32_t vdev_id; +}; + +/** + * struct wmi_host_roam_event - host roam event param + * @vdev_id: vdev id + * @reason: roam reason + * @rssi: RSSI + */ +typedef struct { + uint32_t vdev_id; + uint32_t reason; + uint32_t rssi; +} wmi_host_roam_event; + +/** + * ENUM wmi_host_scan_event_type - Scan event type + */ +enum wmi_host_scan_event_type { + WMI_HOST_SCAN_EVENT_STARTED = 0x1, + WMI_HOST_SCAN_EVENT_COMPLETED = 0x2, + WMI_HOST_SCAN_EVENT_BSS_CHANNEL = 0x4, + WMI_HOST_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, + WMI_HOST_SCAN_EVENT_DEQUEUED = 0x10, + WMI_HOST_SCAN_EVENT_PREEMPTED = 0x20, + WMI_HOST_SCAN_EVENT_START_FAILED = 0x40, + WMI_HOST_SCAN_EVENT_RESTARTED = 0x80, + WMI_HOST_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = 0x100, + WMI_HOST_SCAN_EVENT_INVALID = 0x200, + WMI_HOST_SCAN_EVENT_GPIO_TIMEOUT = 0x400, + WMI_HOST_SCAN_EVENT_MAX = 0x8000 +}; + +/** + * ENUM wmi_host_scan_completion_reason - Scan completion event type + */ +enum wmi_host_scan_completion_reason { + /** scan related events */ + WMI_HOST_SCAN_REASON_NONE = 0xFF, + WMI_HOST_SCAN_REASON_COMPLETED = 0, + WMI_HOST_SCAN_REASON_CANCELLED = 1, + WMI_HOST_SCAN_REASON_PREEMPTED = 2, + WMI_HOST_SCAN_REASON_TIMEDOUT = 3, + WMI_HOST_SCAN_REASON_INTERNAL_FAILURE = 4, + WMI_HOST_SCAN_REASON_MAX, +}; + +/** + * struct wmi_host_scan_event - Scan event response from target + * @event: event type + * @reason: Reason for event + * @channel_freq: channel frequency + * @requestor: requestor id + * @scan_id: scan id + * @vdev_id: vdev id + */ +typedef struct { + uint32_t event; + uint32_t reason; + uint32_t channel_freq; + uint32_t requestor; + uint32_t scan_id; + uint32_t vdev_id; +} wmi_host_scan_event; + +/** + * struct wmi_host_pdev_reserve_ast_entry_event - Reserve AST entry + * @result: result + */ +typedef struct { + uint32_t result; +} wmi_host_pdev_reserve_ast_entry_event; + +/** + * struct wmi_host_mcast_ageout_entry - mcast aged-out entry + * @grp_addr: IPv4/6 mcast group addr + * @vdev_id: vdev id + */ +typedef struct { + uint8_t grp_addr[16]; + uint32_t vdev_id; +} wmi_host_mcast_ageout_entry; + +/** + * struct wmi_host_mcast_list_ageout_event - List of mcast entry aged-out + * @num_entry: Number of mcast entries timed-out + * @entry: List of wmi_host_mcast_ageout_entry + */ +typedef struct { + uint32_t num_entry; + wmi_host_mcast_ageout_entry entry[1]; +} wmi_host_mcast_list_ageout_event; + +/** + * struct wmi_host_pdev_nfcal_power_all_channels_event - NF cal event data + * @nfdbr: + * chan[0 ~ 7]: {NFCalPower_chain0, NFCalPower_chain1, + * NFCalPower_chain2, NFCalPower_chain3, + * NFCalPower_chain4, NFCalPower_chain5, + * NFCalPower_chain6, NFCalPower_chain7}, + * @nfdbm: + * chan[0 ~ 7]: {NFCalPower_chain0, NFCalPower_chain1, + * NFCalPower_chain2, NFCalPower_chain3, + * NFCalPower_chain4, NFCalPower_chain5, + * NFCalPower_chain6, NFCalPower_chain7}, + * @freqnum: + * chan[0 ~ 7]: frequency number + * @pdev_id: pdev_id + */ +typedef struct { + int8_t nfdbr[WMI_HOST_RXG_CAL_CHAN_MAX * WMI_HOST_MAX_NUM_CHAINS]; + int8_t nfdbm[WMI_HOST_RXG_CAL_CHAN_MAX * WMI_HOST_MAX_NUM_CHAINS]; + uint32_t freqnum[WMI_HOST_RXG_CAL_CHAN_MAX]; + uint32_t pdev_id; +} wmi_host_pdev_nfcal_power_all_channels_event; + +/** + * enum wmi_host_pdev_tpc_event_offset: offsets of TPC events + * @WMI_HOST_TX_POWER_MAX: offset of max tx power + * @WMI_HOST_TX_POWER_MIN: offset of min tx power + * @WMI_HOST_TX_POWER_LEN: size of tpc values + */ +enum wmi_host_pdev_tpc_event_offset { + WMI_HOST_TX_POWER_MAX, + WMI_HOST_TX_POWER_MIN, + WMI_HOST_TX_POWER_LEN, +}; + +/** + * struct wmi_host_pdev_tpc_event - WMI host pdev TPC event + * @pdev_id: pdev_id + * @tpc: + */ +typedef struct { + uint32_t pdev_id; + int32_t tpc[WMI_HOST_TX_POWER_LEN]; +} wmi_host_pdev_tpc_event; + +/** + * struct wmi_host_pdev_generic_buffer_event + * @buf_type: Buffer type + * @frag_id: Frag id + * @more_frag: more frags pending + * @buf_len: buffer length + * @buf_info: variable length buffer + */ +typedef struct { + uint32_t buf_type; + uint32_t frag_id; + uint32_t more_frag; + uint32_t buf_len; + uint32_t buf_info[1]; +} wmi_host_pdev_generic_buffer_event; +/** + * Enum for host buffer event + */ +enum { + WMI_HOST_BUFFER_TYPE_RATEPWR_TABLE, + WMI_HOST_BUFFER_TYPE_CTL_TABLE, +}; + +/** + * struct wmi_host_pdev_tpc_config_event - host pdev tpc config event + * @pdev_id: pdev_id + * @regDomain: + * @chanFreq: + * @phyMode: + * @twiceAntennaReduction: + * @twiceMaxRDPower: + * @twiceAntennaGain: + * @powerLimit: + * @rateMax: + * @numTxChain: + * @ctl: + * @flags: + * @maxRegAllowedPower: + * @maxRegAllowedPowerAGCDD: + * @maxRegAllowedPowerAGSTBC: + * @maxRegAllowedPowerAGTXBF: + * @ratesArray: + */ +typedef struct { + uint32_t pdev_id; + uint32_t regDomain; + uint32_t chanFreq; + uint32_t phyMode; + uint32_t twiceAntennaReduction; + uint32_t twiceMaxRDPower; + int32_t twiceAntennaGain; + uint32_t powerLimit; + uint32_t rateMax; + uint32_t numTxChain; + uint32_t ctl; + uint32_t flags; + int8_t maxRegAllowedPower[WMI_HOST_TPC_TX_NUM_CHAIN]; + int8_t maxRegAllowedPowerAGCDD[WMI_HOST_TPC_TX_NUM_CHAIN][WMI_HOST_TPC_TX_NUM_CHAIN]; + int8_t maxRegAllowedPowerAGSTBC[WMI_HOST_TPC_TX_NUM_CHAIN][WMI_HOST_TPC_TX_NUM_CHAIN]; + int8_t maxRegAllowedPowerAGTXBF[WMI_HOST_TPC_TX_NUM_CHAIN][WMI_HOST_TPC_TX_NUM_CHAIN]; + uint8_t ratesArray[WMI_HOST_TPC_RATE_MAX]; +} wmi_host_pdev_tpc_config_event; +/** + * Enums for TPC event + */ +typedef enum { + WMI_HOST_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1, + WMI_HOST_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2, + WMI_HOST_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4, +} WMI_HOST_TPC_CONFIG_EVENT_FLAG; + +/** + * Medium Utilization evaluation algorithms + * These algorithms can be complementary rather than exclusive. + */ +typedef enum { + WMI_HOST_MU_BASIC_ALGO = 0x1, + WMI_HOST_MU_PER_BSSID_ALGO = 0x2, + WMI_HOST_MU_HIDDEN_NODE_ALGO = 0x4, +} WMI_HOST_MU_ALGO_TYPE; +/* max MU alg combinations supported by target */ +#define WMI_HOST_MU_MAX_ALGO_TYPE 3 + +/** + * struct wmi_host_mu_db_entry + * @event_type: 0=AP, 1=STA, 2=Small Cell(SC) + * @bssid_mac_addr: Transmitter MAC if entry is WiFi node. PLMNID if SC + * @tx_addr: Transmitter MAC if entry is WiFi node. PLMNID if SC + * @avg_duration_us: Avg. duration for which node was transmitting + * @avg_rssi: Avg. RSSI of all TX packets by node. Unit dBm + * @mu_percent: % medium utilization by node + */ +typedef struct { + uint32_t entry_type; + wmi_host_mac_addr bssid_mac_addr; + wmi_host_mac_addr tx_addr; + uint32_t avg_duration_us; + uint32_t avg_rssi; + uint32_t mu_percent; +} wmi_host_mu_db_entry; + +/** + * struct wmi_host_mu_report_event - WMI_MU_REPORT_EVENTID + * @mu_request_id: request id + * @status_reason: MU_STATUS_REASON + * @total_mu: MU_ALG_TYPE combinations + * @num_active_bssid: number of active bssid + * @hidden_node_mu : hidden node algo MU per bin + * @num_TA_entries : No. of entries found in MU db report + */ +typedef struct { + uint32_t mu_request_id; + uint32_t status_reason; + uint32_t total_mu[WMI_HOST_MU_MAX_ALGO_TYPE]; + uint32_t num_active_bssid; + uint32_t hidden_node_mu[LTEU_MAX_BINS]; + uint32_t num_TA_entries; +} wmi_host_mu_report_event; + +/** + * struct wmi_host_mgmt_tx_compl_event - TX completion event + * @desc_id: from tx_send_cmd + * @status: WMI_MGMT_TX_COMP_STATUS_TYPE + * @pdev_id: pdev_id + * @ppdu_id: ppdu_id + * @retries_count: retries count + * @tx_tsf: 64 bits completion timestamp + */ +typedef struct { + uint32_t desc_id; + uint32_t status; + uint32_t pdev_id; + uint32_t ppdu_id; + uint32_t retries_count; + uint64_t tx_tsf; +} wmi_host_mgmt_tx_compl_event; + +/** + * struct wmi_host_offchan_data_tx_compl_event - TX completion event + * @desc_id: from tx_send_cmd + * @status: VWMI_MGMT_TX_COMP_STATUS_TYPE + * @pdev_id: pdev_id + */ +struct wmi_host_offchan_data_tx_compl_event { + uint32_t desc_id; + uint32_t status; + uint32_t pdev_id; +}; + +#define WMI_HOST_TIM_BITMAP_ARRAY_SIZE 17 + +/** + * struct wmi_host_tim_info - TIM info in SWBA event + * @tim_len: TIM length + * @tim_mcast: + * @tim_bitmap: TIM bitmap + * @tim_changed: TIM changed + * @tim_num_ps_pending: TIM num PS sta pending + * @vdev_id: Vdev id + */ +typedef struct { + uint32_t tim_len; + uint32_t tim_mcast; + uint32_t tim_bitmap[WMI_HOST_TIM_BITMAP_ARRAY_SIZE]; + uint32_t tim_changed; + uint32_t tim_num_ps_pending; + uint32_t vdev_id; +} wmi_host_tim_info; + +/** + * struct wmi_host_quiet_info - Quiet info in SWBA event + * @vdev_id: vdev_id for quiet info structure + * @tbttcount: quiet start tbtt count + * @period: Beacon interval between quiets + * @duration: TUs of each quiet + * @offset: TUs from TBTT to quiet start + */ +typedef struct { + uint32_t vdev_id; + uint32_t tbttcount; + uint32_t period; + uint32_t duration; + uint32_t offset; +} wmi_host_quiet_info; + +/** + * struct wmi_host_p2p_noa_descriptor - NoA desc in SWBA event + * @type_count: Absence count + * @duration: NoA duration + * @interval: NoA interval + * @start_time: start time + */ +typedef struct { + uint32_t type_count; + uint32_t duration; + uint32_t interval; + uint32_t start_time; +} wmi_host_p2p_noa_descriptor; +/* Maximum number of NOA Descriptors supported */ +#define WMI_HOST_P2P_MAX_NOA_DESCRIPTORS 4 +/** + * struct wmi_host_p2p_noa_info - p2p noa information + * @modified: NoA modified + * @index: Index + * @oppPS: Oppurtunstic ps + * @ctwindow: CT window + * @num_descriptors: number of descriptors + * @noa_descriptors: noa descriptors + * @vdev_id: Vdev id + */ +typedef struct { + uint8_t modified; + uint8_t index; + uint8_t oppPS; + uint8_t ctwindow; + uint8_t num_descriptors; + wmi_host_p2p_noa_descriptor + noa_descriptors[WMI_HOST_P2P_MAX_NOA_DESCRIPTORS]; + uint32_t vdev_id; +} wmi_host_p2p_noa_info; + +/** + * struct wmi_host_peer_sta_kickout_event + * @peer_macaddr: peer mac address + * @reason: kickout reason + * @rssi: rssi + * @pdev_id: pdev_id + */ +typedef struct { + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t reason; + uint32_t rssi; +} wmi_host_peer_sta_kickout_event; + +/** + * struct wmi_host_peer_sta_ps_statechange_event - ST ps state change event + * @peer_macaddr: peer mac address + * @peer_ps_stats: peer PS state + * @pdev_id: pdev_id + */ +typedef struct { + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t peer_ps_state; +} wmi_host_peer_sta_ps_statechange_event; + +/* Maximum CCK, OFDM rates supported */ +#define WMI_SA_MAX_CCK_OFDM_RATES 12 +/* Maximum MCS rates supported; 4 rates in each dword */ +/* Maximum MCS ratecodes with 11ax */ +#define WMI_SA_MAX_MCS_RATES 96 +#define WMI_SA_MAX_RATE_COUNTERS 4 +/* Maximum rate series used for transmission */ +#define SA_MAX_RATE_SERIES 2 + +#define SA_MAX_LEGACY_RATE_DWORDS 3 +#define SA_MAX_HT_RATE_DWORDS 10 +#define SA_BYTES_IN_DWORD 4 +#define SA_MASK_BYTE 0xff +#define SA_MASK_BYTE3 0xff0000 + +/* Support 11ax 11bit ratecode */ +#define SA_MASK_RCODE 0x7ff +#define SA_WORD_BITS_LEN 16 +#define SA_WORDS_IN_DWORD 2 +#define SA_MAX_LEGACY_RATE_WORDS 6 +#define SA_MAX_HT_RATE_WORDS 48 + +/* TODO: ratecode_160 needs to add for future chips */ +/** + * struct wmi_sa_rate_cap - smart antenna rat capabilities + * @pdev_id: pdev_id + * @ratecode_legacy: Rate code array for CCK OFDM + * @ratecode_20: Rate code array for 20MHz BW + * @ratecode_40: Rate code array for 40MHz BW + * @ratecode_80: Rate code array for 80MHz BW + * @ratecount: Max Rate count for each mode + */ +typedef struct { + uint16_t ratecode_legacy[WMI_SA_MAX_CCK_OFDM_RATES]; + uint16_t ratecode_20[WMI_SA_MAX_MCS_RATES]; + uint16_t ratecode_40[WMI_SA_MAX_MCS_RATES]; + uint16_t ratecode_80[WMI_SA_MAX_MCS_RATES]; + uint8_t ratecount[WMI_SA_MAX_RATE_COUNTERS]; +} wmi_sa_rate_cap; + +/** Preamble types to be used with VDEV fixed rate configuration */ +typedef enum { + WMI_HOST_RATE_PREAMBLE_OFDM, + WMI_HOST_RATE_PREAMBLE_CCK, + WMI_HOST_RATE_PREAMBLE_HT, + WMI_HOST_RATE_PREAMBLE_VHT, + WMI_HOST_RATE_PREAMBLE_HE, +} WMI_HOST_RATE_PREAMBLE; + +#define WMI_HOST_FIXED_RATE_NONE (0xff) + +/** preablbe long */ +#define WMI_HOST_VDEV_PREAMBLE_LONG 0x1 +/** preablbe short */ +#define WMI_HOST_VDEV_PREAMBLE_SHORT 0x2 +/** found a better AP */ +#define WMI_HOST_ROAM_REASON_BETTER_AP 0x1 +/** beacon miss detected */ +#define WMI_HOST_ROAM_REASON_BMISS 0x2 +/** deauth/disassoc received */ +#define WMI_HOST_ROAM_REASON_DEAUTH 0x2 +/** connected AP's low rssi condition detected */ +#define WMI_HOST_ROAM_REASON_LOW_RSSI 0x3 +/** found another AP that matches SSID and Security profile in + * WMI_ROAM_AP_PROFILE, found during scan triggered upon FINAL_BMISS + */ +#define WMI_HOST_ROAM_REASON_SUITABLE_AP 0x4 +/** LFR3.0 roaming failed, indicate the disconnection to host */ +#define WMI_HOST_ROAM_REASON_HO_FAILED 0x5 + +/** values for vdev_type */ +#define WMI_HOST_VDEV_TYPE_AP 0x1 +#define WMI_HOST_VDEV_TYPE_STA 0x2 +#define WMI_HOST_VDEV_TYPE_IBSS 0x3 +#define WMI_HOST_VDEV_TYPE_MONITOR 0x4 + +/** values for vdev_subtype */ +#define WMI_HOST_VDEV_SUBTYPE_P2P_DEVICE 0x1 +#define WMI_HOST_VDEV_SUBTYPE_P2P_CLIENT 0x2 +#define WMI_HOST_VDEV_SUBTYPE_P2P_GO 0x3 +#define WMI_HOST_VDEV_SUBTYPE_PROXY_STA 0x4 +#define WMI_HOST_VDEV_SUBTYPE_MESH 0x5 + +#define WMI_HOST_MGMT_TID 17 +/* Disable aging & learning */ +#define WMI_HOST_WDS_FLAG_STATIC 0x1 + +/** + * Peer param enum abstracted from target + * @WMI_HOST_PEER_MIMO_PS_STATE: mimo powersave state + * @WMI_HOST_PEER_AMPDU: enable/disable AMPDU . initial value (enabled) + * @WMI_HOST_PEER_AUTHORIZE: authorize/unauthorize peer. + * initial value is unauthorized (0) + * @WMI_HOST_PEER_CHWIDTH: Peer channel bandwidth + * @WMI_HOST_PEER_NSS: peer NSS + * @WMI_HOST_PEER_USE_4ADDR: USE 4 ADDR + * @WMI_HOST_PEER_EXT_STATS_ENABLE: Enable extended peer stats + * @WMI_HOST_PEER_USE_FIXED_PWR: Use FIXED Pwr, + * @WMI_HOST_PEER_PARAM_FIXED_RATE: Set peer fixed rate + * @WMI_HOST_PEER_SET_MU_WHITELIST: Whitelist peer TIDs + * @WMI_HOST_PEER_MEMBERSHIP: set group membership status + * @WMI_HOST_PEER_USERPOS: User POS + * @WMI_HOST_PEER_CRIT_PROTO_HINT_ENABLED: Critical Protocol Hint enabled + * @WMI_HOST_PEER_TX_FAIL_CNT_THR: Tx Fail count threshold + * @WMI_HOST_PEER_SET_HW_RETRY_CTS2S: Set hardware retry CTS to self + * @WMI_HOST_PEER_IBSS_ATIM_WINDOW_LENGTH: IBSS ATIM window length + * @WMI_HOST_PEER_PHYMODE: Peer Phymode + * @WMI_HOST_PEER_SET_MAC_TX_RATE: Set MAC Tx rate + * @WMI_HOST_PEER_SET_DEFAULT_ROUTING: Set default Rx routing + * @WMI_HOST_PEER_SET_MIN_TX_RATE: Set Minimum T rate + * @WMI_HOST_PEER_NSS_VHT160: peer NSS for 160Mhz + * @WMI_HOST_PEER_NSS_VHT80_80: peer NSS for 80+80MHz + * @WMI_HOST_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL: Set SU sounding interval + * @WMI_HOST_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL: Set MU sounding interval + * @WMI_HOST_PEER_PARAM_TXBF_SOUNDING_ENABLE: Enable sounding interval set + * @WMI_HOST_PEER_PARAM_MU_ENABLE: Enable MU support + * @WMI_HOST_PEER_PARAM_OFDMA_ENABLE: Enable OFDMA support + * @WMI_HOST_PEER_PARAM_ENABLE_FT: Notify FT roam + */ +enum { + WMI_HOST_PEER_MIMO_PS_STATE = 0x1, + WMI_HOST_PEER_AMPDU, + WMI_HOST_PEER_AUTHORIZE, + WMI_HOST_PEER_CHWIDTH, + WMI_HOST_PEER_NSS, + WMI_HOST_PEER_USE_4ADDR, + WMI_HOST_PEER_EXT_STATS_ENABLE, + WMI_HOST_PEER_USE_FIXED_PWR, + WMI_HOST_PEER_PARAM_FIXED_RATE, + WMI_HOST_PEER_SET_MU_WHITELIST, + WMI_HOST_PEER_MEMBERSHIP, + WMI_HOST_PEER_USERPOS, + WMI_HOST_PEER_CRIT_PROTO_HINT_ENABLED, + WMI_HOST_PEER_TX_FAIL_CNT_THR, + WMI_HOST_PEER_SET_HW_RETRY_CTS2S, + WMI_HOST_PEER_IBSS_ATIM_WINDOW_LENGTH, + WMI_HOST_PEER_PHYMODE, + WMI_HOST_PEER_SET_MAC_TX_RATE, + WMI_HOST_PEER_SET_DEFAULT_ROUTING, + WMI_HOST_PEER_SET_MIN_TX_RATE, + WMI_HOST_PEER_NSS_VHT160, + WMI_HOST_PEER_NSS_VHT80_80, + WMI_HOST_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL, + WMI_HOST_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL, + WMI_HOST_PEER_PARAM_TXBF_SOUNDING_ENABLE, + WMI_HOST_PEER_PARAM_MU_ENABLE, + WMI_HOST_PEER_PARAM_OFDMA_ENABLE, + WMI_HOST_PEER_PARAM_ENABLE_FT, +}; +#define WMI_HOST_PEER_MIMO_PS_NONE 0x0 +#define WMI_HOST_PEER_MIMO_PS_STATIC 0x1 +#define WMI_HOST_PEER_MIMO_PS_DYNAMIC 0x2 +typedef enum { + HOST_PLATFORM_HIGH_PERF, + HOST_PLATFORM_LOW_PERF, + HOST_PLATFORM_LOW_PERF_NO_FETCH, +} HOST_PLATFORM_TYPE; + +enum wmi_host_sta_ps_mode { + /** enable power save for the given STA VDEV */ + WMI_HOST_STA_PS_MODE_DISABLED = 0, + /** disable power save for a given STA VDEV */ + WMI_HOST_STA_PS_MODE_ENABLED = 1, +}; +enum wmi_host_sta_powersave_param { + /** + * Controls how frames are retrievd from AP while STA is sleeping + * + * (see enum wmi_sta_ps_param_rx_wake_policy) + */ + WMI_HOST_STA_PS_PARAM_RX_WAKE_POLICY = 0, + + /** + * The STA will go active after this many TX + * + * (see enum wmi_sta_ps_param_tx_wake_threshold) + */ + WMI_HOST_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1, + + /** + * Number of PS-Poll to send before STA wakes up + * + * (see enum wmi_sta_ps_param_pspoll_count) + * + */ + WMI_HOST_STA_PS_PARAM_PSPOLL_COUNT = 2, + + /** + * TX/RX inactivity time in msec before going to sleep. + * + * The power save SM will monitor tx/rx activity on the VDEV, if no + * activity for the specified msec of the parameter + * the Power save SM will go to sleep. + */ + WMI_HOST_STA_PS_PARAM_INACTIVITY_TIME = 3, + + /** + * Set uapsd configuration. + * + * (see enum wmi_sta_ps_param_uapsd) + */ + WMI_HOST_STA_PS_PARAM_UAPSD = 4, +}; +/* prefix used by scan requestor ids on the host + * replicated here form wmi_unified.h*/ +#define WMI_HOST_P_SCAN_REQUESTOR_ID_PREFIX 0xA000 +/* prefix used by scan request ids generated on the host */ +/* host cycles through the lower 12 bits to generate ids */ +#define WMI_HOST_P_SCAN_REQ_ID_PREFIX 0xA000 + +#define WMI_HOST_RC_DS_FLAG 0x01 /* Dual stream flag */ +#define WMI_HOST_RC_CW40_FLAG 0x02 /* CW 40 */ +#define WMI_HOST_RC_SGI_FLAG 0x04 /* Short Guard Interval */ +#define WMI_HOST_RC_HT_FLAG 0x08 /* HT */ +#define WMI_HOST_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */ +#define WMI_HOST_RC_TX_STBC_FLAG 0x20 /* TX STBC */ +#define WMI_HOST_RC_RX_STBC_FLAG 0xC0 /* RX STBC ,2 bits */ +#define WMI_HOST_RC_RX_STBC_FLAG_S 6 /* RX STBC ,2 bits */ +#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100 /* WEP/TKIP encryption */ +#define WMI_HOST_RC_TS_FLAG 0x200 /* Three stream flag */ +#define WMI_HOST_RC_UAPSD_FLAG 0x400 /* UAPSD Rate Control */ + +/** HT Capabilities*/ +#define WMI_HOST_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ +/* Short Guard Interval with HT20 */ +#define WMI_HOST_HT_CAP_HT20_SGI 0x0002 +#define WMI_HOST_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */ +#define WMI_HOST_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */ +#define WMI_HOST_HT_CAP_TX_STBC_MASK_SHIFT 3 +#define WMI_HOST_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */ +#define WMI_HOST_HT_CAP_RX_STBC_MASK_SHIFT 4 +#define WMI_HOST_HT_CAP_LDPC 0x0040 /* LDPC supported */ +#define WMI_HOST_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */ +#define WMI_HOST_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */ +#define WMI_HOST_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 +#define WMI_HOST_HT_CAP_HT40_SGI 0x0800 +#define WMI_HOST_HT_CAP_RX_LDPC 0x1000 +#define WMI_HOST_HT_CAP_TX_LDPC 0x2000 +#define WMI_HOST_HT_CAP_IBF_BFER 0x4000 + +/* These macros should be used when we wish to advertise STBC support for + * only 1SS or 2SS or 3SS. */ +#define WMI_HOST_HT_CAP_RX_STBC_1SS 0x0010 /* B4-B5 RX STBC */ +#define WMI_HOST_HT_CAP_RX_STBC_2SS 0x0020 /* B4-B5 RX STBC */ +#define WMI_HOST_HT_CAP_RX_STBC_3SS 0x0030 /* B4-B5 RX STBC */ + + +#define WMI_HOST_HT_CAP_DEFAULT_ALL (WMI_HOST_HT_CAP_ENABLED | \ + WMI_HOST_HT_CAP_HT20_SGI | \ + WMI_HOST_HT_CAP_HT40_SGI | \ + WMI_HOST_HT_CAP_TX_STBC | \ + WMI_HOST_HT_CAP_RX_STBC | \ + WMI_HOST_HT_CAP_LDPC) + +/* WMI_HOST_VHT_CAP_* these maps to ieee 802.11ac vht capability information + field. The fields not defined here are not supported, or reserved. + Do not change these masks and if you have to add new one follow the + bitmask as specified by 802.11ac draft. +*/ + +#define WMI_HOST_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 +#define WMI_HOST_VHT_CAP_RX_LDPC 0x00000010 +#define WMI_HOST_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_HOST_VHT_CAP_SGI_160MHZ 0x00000040 +#define WMI_HOST_VHT_CAP_TX_STBC 0x00000080 +#define WMI_HOST_VHT_CAP_RX_STBC_MASK 0x00000300 +#define WMI_HOST_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_HOST_VHT_CAP_SU_BFER 0x00000800 +#define WMI_HOST_VHT_CAP_SU_BFEE 0x00001000 +#define WMI_HOST_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000 +#define WMI_HOST_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13 +#define WMI_HOST_VHT_CAP_MAX_SND_DIM_MASK 0x00070000 +#define WMI_HOST_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16 +#define WMI_HOST_VHT_CAP_MU_BFER 0x00080000 +#define WMI_HOST_VHT_CAP_MU_BFEE 0x00100000 +#define WMI_HOST_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 +#define WMI_HOST_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23 +#define WMI_HOST_VHT_CAP_RX_FIXED_ANT 0x10000000 +#define WMI_HOST_VHT_CAP_TX_FIXED_ANT 0x20000000 + +#define WMI_HOST_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002 + +/* These macros should be used when we wish to advertise STBC support for + * only 1SS or 2SS or 3SS. */ +#define WMI_HOST_VHT_CAP_RX_STBC_1SS 0x00000100 +#define WMI_HOST_VHT_CAP_RX_STBC_2SS 0x00000200 +#define WMI_HOST_VHT_CAP_RX_STBC_3SS 0x00000300 + +#define WMI_HOST_VHT_CAP_DEFAULT_ALL (WMI_HOST_VHT_CAP_MAX_MPDU_LEN_11454 | \ + WMI_HOST_VHT_CAP_SGI_80MHZ | \ + WMI_HOST_VHT_CAP_TX_STBC | \ + WMI_HOST_VHT_CAP_RX_STBC_MASK | \ + WMI_HOST_VHT_CAP_RX_LDPC | \ + WMI_HOST_VHT_CAP_MAX_AMPDU_LEN_EXP | \ + WMI_HOST_VHT_CAP_RX_FIXED_ANT | \ + WMI_HOST_VHT_CAP_TX_FIXED_ANT) + +/* Interested readers refer to Rx/Tx MCS Map definition as defined in + 802.11ac +*/ +#define WMI_HOST_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1)) +#define WMI_HOST_VHT_MAX_SUPP_RATE_MASK 0x1fff0000 +#define WMI_HOST_VHT_MAX_SUPP_RATE_MASK_SHIFT 16 + +/** U-APSD configuration of peer station from (re)assoc request and TSPECs */ +enum wmi_host_ap_ps_param_uapsd { + WMI_HOST_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_HOST_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_HOST_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_HOST_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_HOST_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_HOST_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_HOST_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_HOST_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; +/** U-APSD maximum service period of peer station */ +enum wmi_host_ap_ps_peer_param_max_sp { + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0, + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_2 = 1, + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_4 = 2, + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_6 = 3, + + /* keep last! */ + MAX_HOST_WMI_AP_PS_PEER_PARAM_MAX_SP, +}; + +#define WMI_HOST_UAPSD_AC_TYPE_DELI 0 +#define WMI_HOST_UAPSD_AC_TYPE_TRIG 1 + +#define WMI_HOST_UAPSD_AC_BIT_MASK(ac, type) \ + ((type == WMI_HOST_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) :\ + (1<<((ac<<1)+1))) + +enum wmi_host_ap_ps_peer_param_wnm_sleep { + WMI_HOST_AP_PS_PEER_PARAM_WNM_SLEEP_ENABLE, + WMI_HOST_AP_PS_PEER_PARAM_WNM_SLEEP_DISABLE, +}; + +enum wmi_host_ap_ps_peer_param { + /** Set uapsd configuration for a given peer. + * + * This will include the delivery and trigger enabled state for every AC. + * The host MLME needs to set this based on AP capability and stations + * request Set in the association request received from the station. + * + * Lower 8 bits of the value specify the UAPSD configuration. + * + * (see enum wmi_ap_ps_param_uapsd) + * The default value is 0. + */ + WMI_HOST_AP_PS_PEER_PARAM_UAPSD = 0, + + /** + * Set the service period for a UAPSD capable station + * + * The service period from wme ie in the (re)assoc request frame. + * + * (see enum wmi_ap_ps_peer_param_max_sp) + */ + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP = 1, + + /** Time in seconds for aging out buffered frames + * for STA in power save */ + WMI_HOST_AP_PS_PEER_PARAM_AGEOUT_TIME = 2, + + /** Specify frame types that are considered SIFS + * RESP trigger frame */ + WMI_HOST_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3, + + /** Specifies the trigger state of TID. + * Valid only for UAPSD frame type */ + WMI_HOST_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4, + + /** Specifies the WNM sleep state of a STA */ + WMI_HOST_AP_PS_PEER_PARAM_WNM_SLEEP = 5, +}; +#define WMI_HOST_RXERR_CRC 0x01 /* CRC error on frame */ +#define WMI_HOST_RXERR_DECRYPT 0x08 /* non-Michael decrypt error */ +#define WMI_HOST_RXERR_MIC 0x10 /* Michael MIC decrypt error */ +#define WMI_HOST_RXERR_KEY_CACHE_MISS 0x20 /* No/incorrect key matter in h/w */ + +enum wmi_host_sta_ps_param_rx_wake_policy { + /* Wake up when ever there is an RX activity on the VDEV. In this mode + * the Power save SM(state machine) will come out of sleep by either + * sending null frame (or) a data frame (with PS==0) in response to TIM + * bit set in the received beacon frame from AP. + */ + WMI_HOST_STA_PS_RX_WAKE_POLICY_WAKE = 0, + + /* Here the power save state machine will not wakeup in response to TIM + * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD + * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all + * access categories are delivery-enabled, the station will send a UAPSD + * trigger frame, otherwise it will send a PS-Poll. + */ + WMI_HOST_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1, +}; +enum wmi_host_sta_ps_param_pspoll_count { + WMI_HOST_STA_PS_PSPOLL_COUNT_NO_MAX = 0, + /* Values greater than 0 indicate the maximum numer of PS-Poll frames FW + * will send before waking up. + */ +}; +/** Number of tx frames/beacon that cause the power save SM to wake up. + * + * Value 1 causes the SM to wake up for every TX. Value 0 has a special + * meaning, It will cause the SM to never wake up. This is useful if you want + * to keep the system to sleep all the time for some kind of test mode . host + * can change this parameter any time. It will affect at the next tx frame. + */ +enum wmi_host_sta_ps_param_tx_wake_threshold { + WMI_HOST_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0, + WMI_HOST_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1, + + /* Values greater than one indicate that many TX attempts per beacon + * interval before the STA will wake up + */ +}; +/* + * Transmit power scale factor. + * + */ +typedef enum { + WMI_HOST_TP_SCALE_MAX = 0, /* no scaling (default) */ + WMI_HOST_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */ + WMI_HOST_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */ + WMI_HOST_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */ + WMI_HOST_TP_SCALE_MIN = 4, /* min, but still on */ + WMI_HOST_TP_SCALE_SIZE = 5, /* max num of enum */ +} WMI_HOST_TP_SCALE; +enum { + WMI_HOST_RATEPWR_TABLE_OPS_SET, + WMI_HOST_RATEPWR_TABLE_OPS_GET, +}; +/* reserved up through 0xF */ +/** + * struct wmi_host_dcs_mib_stats - WLAN IM stats from target to host + * Below statistics are sent from target to host periodically. + * These are collected at target as long as target is running + * and target chip is not in sleep. + * @listen_time: + * @reg_tx_frame_cnt: + * @reg_rx_frame_cnt: + * @reg_rxclr_cnt: + * @reg_cycle_cnt: delta cycle count + * @reg_rxclr_ext_cnt: + * @reg_ofdm_phyerr_cnt: + * @reg_cck_phyerr_cnt: CCK err count since last reset, read from register + */ +typedef struct _hp_dcs_mib_stats { + int32_t listen_time; + uint32_t reg_tx_frame_cnt; + uint32_t reg_rx_frame_cnt; + uint32_t reg_rxclr_cnt; + uint32_t reg_cycle_cnt; + uint32_t reg_rxclr_ext_cnt; + uint32_t reg_ofdm_phyerr_cnt; + uint32_t reg_cck_phyerr_cnt; +} wmi_host_dcs_mib_stats_t; + +/** + * struct wmi_host_dcs_im_tgt_stats - DCS IM target stats + * @reg_tsf32: current running TSF from the TSF-1 + * @last_ack_rssi: Known last frame rssi, in case of multiple stations, if + * and at different ranges, this would not gaurantee that + * this is the least rssi. + * @tx_waste_time: Sum of all the failed durations in the last + * one second interval. + * @rx_time: count how many times the hal_rxerr_phy is marked, in this + * time period + * @phyerr_cnt: + * @mib_stats: wmi_host_dcs_mib_stats_t - collected mib stats as explained + * in mib structure + * @chan_nf: Channel noise floor (Units are in dBm) + * @my_bss_rx_cycle_count: BSS rx cycle count + * @reg_rxclr_ext40_cnt: extension channel 40Mhz rxclear count + * @reg_rxclr_ext80_cnt: extension channel 80Mhz rxclear count + */ +typedef struct _wmi_host_dcs_im_tgt_stats { + uint32_t reg_tsf32; + uint32_t last_ack_rssi; + uint32_t tx_waste_time; + uint32_t rx_time; + uint32_t phyerr_cnt; + wmi_host_dcs_mib_stats_t mib_stats; + uint32_t chan_nf; + uint32_t my_bss_rx_cycle_count; + /* these fields are added here for backward compatibility instead of + * wmi_host_dcs_mib_stats_t + */ + uint32_t reg_rxclr_ext40_cnt; + uint32_t reg_rxclr_ext80_cnt; +} wmi_host_dcs_im_tgt_stats_t; + +/** + * Enum for pktlog req + */ +typedef enum { + WMI_HOST_PKTLOG_EVENT_RX = 0x1, + WMI_HOST_PKTLOG_EVENT_TX = 0x2, + WMI_HOST_PKTLOG_EVENT_RCF = 0x4, /* Rate Control Find */ + WMI_HOST_PKTLOG_EVENT_RCU = 0x8, /* Rate Control Update */ + WMI_HOST_PKTLOG_EVENT_DBG_PRINT = 0x10, /* DEBUG prints */ + /* To support Smart Antenna */ + WMI_HOST_PKTLOG_EVENT_SMART_ANTENNA = 0x20, + WMI_HOST_PKTLOG_EVENT_H_INFO = 0x40, + WMI_HOST_PKTLOG_EVENT_STEERING = 0x80, + /* To support Tx data Capture */ + WMI_HOST_PKTLOG_EVENT_TX_DATA_CAPTURE = 0x100, +} WMI_HOST_PKTLOG_EVENT; + +/** + * wmi_host_phyerr + * + */ +#define WMI_HOST_PHY_ERROR_SPECTRAL_SCAN 0x26 +#define WMI_HOST_PHY_ERROR_FALSE_RADAR_EXT 0x24 + +#define WMI_HOST_AR900B_DFS_PHYERR_MASK 0x4 +#define WMI_HOST_AR900B_SPECTRAL_PHYERR_MASK 0x4000000 + +/** + * struct wmi_host_perchain_rssi_info - per chain RSSI info + * @rssi_pri20: RSSI on primary 20 + * @rssi_sec20: RSSI on secomdary 20 + * @rssi_sec40: RSSI secondary 40 + * @rssi_sec80: RSSI secondary 80 + */ +typedef struct wmi_host_perchain_rssi_info { + int8_t rssi_pri20; + int8_t rssi_sec20; + int8_t rssi_sec40; + int8_t rssi_sec80; +} wmi_host_perchain_rssi_info_t; + +/** + * struct _wmi_host_rf_info - RF measurement information + * @rssi_comb: RSSI Information + * @pc_rssi_info[4]: For now, we know we are getting information + * for only 4 chains at max. For future extensions + * use a define + * @noise_floor: Noise floor information + */ +typedef struct _wmi_host_rf_info { + int8_t rssi_comb; + wmi_host_perchain_rssi_info_t pc_rssi_info[4]; + int16_t noise_floor[4]; +} wmi_host_rf_info_t; + +/** + * struct _wmi_host_chan_info + * @center_freq1: center frequency 1 in MHz + * @center_freq2: center frequency 2 in MHz -valid only for + * 11ACVHT 80PLUS80 mode + * @chan_width: channel width in MHz + */ +typedef struct _wmi_host_chan_info { + u_int16_t center_freq1; + u_int16_t center_freq2; + u_int8_t chan_width; +} wmi_host_chan_info_t; + +/** + * struct wmi_host_phyerr + * @rf_info: + * @chan_info: + * @tsf64: + * @phy_err_code: + * @tsf_timestamp: + * @bufp: + * @buf_len: + * @phy_err_mask0: + * @phy_err_mask1: + * @pdev_id: pdev_id + */ +typedef struct _wmi_host_phyerr { + wmi_host_rf_info_t rf_info; + wmi_host_chan_info_t chan_info; + uint64_t tsf64; + int32_t phy_err_code; + uint32_t tsf_timestamp; + uint8_t *bufp; + uint32_t buf_len; + uint32_t phy_err_mask0; + uint32_t phy_err_mask1; + uint32_t pdev_id; +} wmi_host_phyerr_t; + +/** + * struct wmi_host_rtt_event_hdr + * @req_id: request id + * @status: status + * @meas_done: measurement done flag + * @meas_type: measurement type + * @report_type: report type + * @v3_status: v2 status + * @v3_finish: + * @v3_tm_start: + * @num_ap: number of AP + * @result: resuult + * @dest_mac: destination mac + */ +typedef struct { + uint16_t req_id; + uint16_t status:1, + meas_done:1, + meas_type:3, + report_type:3, + v3_status:2, + v3_finish:1, + v3_tm_start:1, + num_ap:4; + uint16_t result; + uint8_t dest_mac[QDF_MAC_ADDR_SIZE]; +} wmi_host_rtt_event_hdr; + +/** + * struct wmi_host_rtt_meas_event - RTT measurement event + * @chain_mask: + * @bw: + * @rsvd: + * @txrxchain_mask: Bit:0-3:chain mask + * Bit 4-5: band width info + * 00 --Legacy 20, 01 --HT/VHT20 + * 10 --HT/VHT40, 11 -- VHT80 + * @tod: resolution of 0.1ns + * @toa: resolution of 0.1ns + * @t3: + * @t4: + * @rssi0: + * @rssi1: + * @rssi2: + * @rssi3: + */ +typedef struct { + uint32_t chain_mask:3, + bw:2, + rsvd:27; + uint32_t txrxchain_mask; + uint64_t tod; + uint64_t toa; + uint64_t t3; + uint64_t t4; + uint32_t rssi0; + uint32_t rssi1; + uint32_t rssi2; + uint32_t rssi3; +} wmi_host_rtt_meas_event; + +/*----RTT Report event definition ----*/ +typedef enum { + /* rtt cmd header parsing error --terminate */ + WMI_HOST_RTT_COMMAND_HEADER_ERROR = 0, + /* rtt body parsing error -- skip current STA REQ */ + WMI_HOST_RTT_COMMAND_ERROR, + /* rtt no resource -- terminate */ + WMI_HOST_RTT_MODULE_BUSY, + /* STA exceed the support limit -- only server the first n STA */ + WMI_HOST_RTT_TOO_MANY_STA, + /* any allocate failure */ + WMI_HOST_RTT_NO_RESOURCE, + /* can not find vdev with vdev ID - skip current STA REQ */ + WMI_HOST_RTT_VDEV_ERROR, + /* Tx failure -- continiue and measure number */ + WMI_HOST_RTT_TRANSIMISSION_ERROR, + /* wait for first TM timer expire-terminate current STA measurement */ + WMI_HOST_RTT_TM_TIMER_EXPIRE, + /* we do not support RTT measurement with this type of frame */ + WMI_HOST_RTT_FRAME_TYPE_NOSUPPORT, + /* whole RTT measurement timer expire-terminate + ** current STA measurement */ + WMI_HOST_RTT_TIMER_EXPIRE, + /* channel swicth failed */ + WMI_HOST_RTT_CHAN_SWITCH_ERROR, + /* TMR trans error, this dest peer will be skipped */ + WMI_HOST_RTT_TMR_TRANS_ERROR, + /* V3 only. If both CFR and Token mismatch, do not report */ + WMI_HOST_RTT_NO_REPORT_BAD_CFR_TOKEN, + /* For First TM, if CFR is bad, then do not report */ + WMI_HOST_RTT_NO_REPORT_FIRST_TM_BAD_CFR, + /* do not allow report type2 mix with type 0, 1 */ + WMI_HOST_RTT_REPORT_TYPE2_MIX, + /* LCI Configuration OK. - Responder only */ + WMI_HOST_RTT_LCI_CFG_OK, + /* LCR configuration OK. - Responder only */ + WMI_HOST_RTT_LCR_CFG_OK, + /* Bad configuration LCI (or) LCR request - Responder only */ + WMI_HOST_RTT_CFG_ERROR, + WMI_HOST_WMI_RTT_REJECT_MAX, +} WMI_HOST_RTT_ERROR_INDICATOR; +typedef struct { + wmi_host_rtt_event_hdr hdr; + WMI_HOST_RTT_ERROR_INDICATOR reject_reason; +} wmi_host_rtt_error_report_event; + +#if defined(AR9888) +typedef enum { + WMI_HOST_PROF_CPU_IDLE, + WMI_HOST_PROF_PPDU_PROC, + WMI_HOST_PROF_PPDU_POST, + WMI_HOST_PROF_HTT_TX_INPUT, + WMI_HOST_PROF_MSDU_ENQ, + WMI_HOST_PROF_PPDU_POST_HAL, + WMI_HOST_PROF_COMPUTE_TX_TIME, + + /* Add new ID's above this. */ + WMI_HOST_PROF_MAX_ID, +} wmi_host_profile_id_t; +#endif + +#define WMI_HOST_WLAN_PROFILE_MAX_HIST 3 +#define WMI_HOST_WLAN_PROFILE_MAX_BIN_CNT 32 + +#if defined(AR9888) +#define WMI_HOST_MAX_PROFILE WMI_HOST_PROF_MAX_ID +#else +#define WMI_HOST_MAX_PROFILE WMI_HOST_WLAN_PROFILE_MAX_BIN_CNT +#endif + +/** + * struct wmi_host_wlan_profile - Host profile param + * @id: profile id + * @cnt: Count + * @tot: + * @min: minimum + * @max: Mac + * @hist_intvl: history interval + * @hist: profile data history + */ +typedef struct { + uint32_t id; + uint32_t cnt; + uint32_t tot; + uint32_t min; + uint32_t max; + uint32_t hist_intvl; + uint32_t hist[WMI_HOST_WLAN_PROFILE_MAX_HIST]; +} wmi_host_wlan_profile_t; + +/** + * struct wmi_host_wlan_profile_ctx_t - profile context + * @tot: time in us + * @tx_msdu_cnt: MSDU TX count + * @tx_mpdu_cnt: MPDU tx count + * @tx_ppdu_cnt: PPDU tx count + * @rx_msdu_cnt: MSDU RX count + * @rx_mpdu_cnt: MPDU RXcount + * @bin_count: Bin count + */ +typedef struct { + uint32_t tot; + uint32_t tx_msdu_cnt; + uint32_t tx_mpdu_cnt; + uint32_t tx_ppdu_cnt; + uint32_t rx_msdu_cnt; + uint32_t rx_mpdu_cnt; + uint32_t bin_count; +} wmi_host_wlan_profile_ctx_t; + +/** + * struct wmi_host_chan_info_event - Channel info WMI event + * @pdev_id: pdev_id + * @err_code: Error code + * @freq: Channel freq + * @cmd_flags: Read flags + * @noise_floor: Noise Floor value + * @rx_clear_count: rx clear count + * @cycle_count: cycle count + * @chan_tx_pwr_range: channel tx power per range + * @chan_tx_pwr_tp: channel tx power per throughput + * @rx_frame_count: rx frame count + * @rx_11b_mode_data_duration: 11b mode data duration + * @my_bss_rx_cycle_count: self BSS rx cycle count + * @tx_frame_cnt: tx frame count + * @mac_clk_mhz: mac clock + * @vdev_id: unique id identifying the VDEV + * @tx_frame_count: tx frame count + * @rx_clear_ext20_count: ext20 frame count + * @rx_clear_ext40_count: ext40 frame count + * @rx_clear_ext80_count: ext80 frame count + * @per_chain_noise_floor: Per chain NF value in dBm + */ +typedef struct { + uint32_t pdev_id; + uint32_t err_code; + uint32_t freq; + uint32_t cmd_flags; + uint32_t noise_floor; + uint32_t rx_clear_count; + uint32_t cycle_count; + uint32_t chan_tx_pwr_range; + uint32_t chan_tx_pwr_tp; + uint32_t rx_frame_count; + uint32_t rx_11b_mode_data_duration; + uint32_t my_bss_rx_cycle_count; + uint32_t tx_frame_cnt; + uint32_t mac_clk_mhz; + uint32_t vdev_id; + uint32_t tx_frame_count; + uint32_t rx_clear_ext20_count; + uint32_t rx_clear_ext40_count; + uint32_t rx_clear_ext80_count; + uint32_t per_chain_noise_floor[WMI_HOST_MAX_CHAINS]; +} wmi_host_chan_info_event; + +/** + * struct wmi_host_pdev_channel_hopping_event + * @pdev_id: pdev_id + * @noise_floor_report_iter: Noise threshold iterations with high values + * @noise_floor_total_iter: Total noise threshold iterations + */ +typedef struct { + uint32_t pdev_id; + uint32_t noise_floor_report_iter; + uint32_t noise_floor_total_iter; +} wmi_host_pdev_channel_hopping_event; + +/** + * struct wmi_host_pdev_bss_chan_info_event + * @pdev_id: pdev_id + * @freq: Units in MHz + * @noise_floor: units are dBm + * @rx_clear_count_low: + * @rx_clear_count_high: + * @cycle_count_low: + * @cycle_count_high: + * @tx_cycle_count_low: + * @tx_cycle_count_high: + * @rx_cycle_count_low: + * @rx_cycle_count_high: + * @rx_bss_cycle_count_low: + * @rx_bss_cycle_count_high: + * @reserved: + */ +typedef struct { + uint32_t pdev_id; + uint32_t freq; + uint32_t noise_floor; + uint32_t rx_clear_count_low; + uint32_t rx_clear_count_high; + uint32_t cycle_count_low; + uint32_t cycle_count_high; + uint32_t tx_cycle_count_low; + uint32_t tx_cycle_count_high; + uint32_t rx_cycle_count_low; + uint32_t rx_cycle_count_high; + uint32_t rx_bss_cycle_count_low; + uint32_t rx_bss_cycle_count_high; + uint32_t reserved; +} wmi_host_pdev_bss_chan_info_event; + +/** + * struct wmi_host_pdev_ctl_failsafe_event + * @ctl_failsafe_status: Indicate if Failsafe value is imposed on CTL + */ +struct wmi_host_pdev_ctl_failsafe_event { + uint32_t ctl_failsafe_status; +}; + +#define WMI_HOST_INST_STATS_INVALID_RSSI 0 +/** + * struct wmi_host_inst_stats_resp + * @iRSSI: Instantaneous RSSI + * @peer_macaddr: peer mac address + * @pdev_id: pdev_id + */ +typedef struct { + uint32_t iRSSI; + wmi_host_mac_addr peer_macaddr; + uint32_t pdev_id; +} wmi_host_inst_stats_resp; + +/* Event definition and new structure addition to send event + * to host to block/unblock tx data traffic based on peer_ast_idx or vdev id + */ +#define WMI_HOST_INVALID_PEER_AST_INDEX 0xffff +#define WMI_HOST_TX_DATA_TRAFFIC_CTRL_BLOCK 0x1 +#define WMI_HOST_TX_DATA_TRAFFIC_CTRL_UNBLOCK 0x2 +/** + * struct wmi_host_tx_data_traffic_ctrl_event + * @peer_ast_idx: For vdev based control, peer_ast_idx will be + * WMI_INVALID_PEER_AST_INDEX + * @vdev_id: only applies if peer_ast_idx == INVALID + * @ctrl_cmd: WMI_TX_DATA_TRAFFIC_CTRL_BLOCK or + * WMI_TX_DATA_TRAFFIC_CTRL_UNBLOCK + * @wmm_ac: Indicates AC to be blocked or unblocked + * Bits 4-31 : Reserved (Shall be zero) + * Bits 0-3 : WMM AC NO [ BE (1), BK (2), VI (3), VO (4)] + * Started numbering from 1 to preserve backward compatibility + */ +typedef struct { + uint32_t peer_ast_idx; + uint32_t vdev_id; + uint32_t ctrl_cmd; + uint32_t wmm_ac; +} wmi_host_tx_data_traffic_ctrl_event; + +/** + * struct wmi_host_ath_dcs_cw_int + * @channel: either number or freq in mhz + */ +typedef struct { + uint32_t channel; +} wmi_host_ath_dcs_cw_int; + +#define WMI_MAX_POWER_DBG_ARGS 8 + +/** + * struct wmi_power_dbg_params - power debug command parameter + * @pdev_id: subsystem identifier + * @module_id: parameter id + * @num_arg: no of arguments + * @args: arguments + */ +struct wmi_power_dbg_params { + uint32_t pdev_id; + uint32_t module_id; + uint32_t num_args; + uint32_t args[WMI_MAX_POWER_DBG_ARGS]; +}; + +/** + * struct wmi_fw_dump_seg_req - individual segment details + * @seg_id - segment id. + * @seg_start_addr_lo - lower address of the segment. + * @seg_start_addr_hi - higher address of the segment. + * @seg_length - length of the segment. + * @dst_addr_lo - lower address of the destination buffer. + * @dst_addr_hi - higher address of the destination buffer. + * + * This structure carries the information to firmware about the + * individual segments. This structure is part of firmware memory + * dump request. + */ +struct wmi_fw_dump_seg_req { + uint8_t seg_id; + uint32_t seg_start_addr_lo; + uint32_t seg_start_addr_hi; + uint32_t seg_length; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; +}; + +/** + * enum wmi_userspace_log_level - Log level at userspace + * @WMI_LOG_LEVEL_NO_COLLECTION: verbose_level 0 corresponds to no collection + * @WMI_LOG_LEVEL_NORMAL_COLLECT: verbose_level 1 correspond to normal log + * level with minimal user impact. This is the default value. + * @WMI_LOG_LEVEL_ISSUE_REPRO: verbose_level 2 are enabled when user is lazily + * trying to reproduce a problem, wifi performances and power can be impacted + * but device should not otherwise be significantly impacted + * @WMI_LOG_LEVEL_ACTIVE: verbose_level 3+ are used when trying to + * actively debug a problem + * + * Various log levels defined in the userspace for logging applications + */ +enum wmi_userspace_log_level { + WMI_LOG_LEVEL_NO_COLLECTION, + WMI_LOG_LEVEL_NORMAL_COLLECT, + WMI_LOG_LEVEL_ISSUE_REPRO, + WMI_LOG_LEVEL_ACTIVE, +}; + +/** + * HW mode config type replicated from FW header + * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active. + * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands, + * one in 2G and another in 5G. + * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in + * same band; no tx allowed. + * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band. + * Support for both PHYs within one band is planned + * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES), + * but could be extended to other bands in the future. + * The separation of the band between the two PHYs needs + * to be communicated separately. + * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS + * as in WMI_HW_MODE_SBS, and 3rd on the other band + * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and + * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G). + * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode. + * @WMI_HOST_HW_MODE_DETECT: Mode id used by host to choose mode from target + * supported modes. + */ +enum wmi_host_hw_mode_config_type { + WMI_HOST_HW_MODE_SINGLE = 0, + WMI_HOST_HW_MODE_DBS = 1, + WMI_HOST_HW_MODE_SBS_PASSIVE = 2, + WMI_HOST_HW_MODE_SBS = 3, + WMI_HOST_HW_MODE_DBS_SBS = 4, + WMI_HOST_HW_MODE_DBS_OR_SBS = 5, + WMI_HOST_HW_MODE_MAX, + WMI_HOST_HW_MODE_DETECT, +}; + +/** + * enum wmi_host_dynamic_hw_mode_config_type - Host defined enum for + * dynamic mode switch + * @WMI_HOST_DYNAMIC_HW_MODE_DISABLED: hw mode switch is disabled + * @WMI_HOST_DYNAMIC_HW_MODE_SLOW: hw mode switch with interface down/up + * @WMI_HOST_DYNAMIC_HW_MODE_FAST: hw mode switch without interface down/up + * @WMI_HOST_DYNAMIC_HW_MODE_MAX: Max value to indicate invalid mode + */ +enum wmi_host_dynamic_hw_mode_config_type { + WMI_HOST_DYNAMIC_HW_MODE_DISABLED = 0, + WMI_HOST_DYNAMIC_HW_MODE_SLOW = 1, + WMI_HOST_DYNAMIC_HW_MODE_FAST = 2, + WMI_HOST_DYNAMIC_HW_MODE_MAX, +}; + +/* + * struct wmi_host_peer_txmu_cnt_event + * @tx_mu_transmitted - MU-MIMO tx count + */ +typedef struct { + uint32_t tx_mu_transmitted; +} wmi_host_peer_txmu_cnt_event; + +/** + * struct esp_estimation_event - esp airtime estimation event + * @ac_airtime_percentage: Estimated Airtime + * @pdev_id: PDEV_ID of Data + */ +struct esp_estimation_event { + uint32_t ac_airtime_percentage; + uint32_t pdev_id; +}; + +/* + * struct wmi_peer_gid_userpos_list_event + * @usr_list - User list + */ +#define GID_OVERLOAD_GROUP_COUNT 15 +typedef struct { + uint32_t usr_list[GID_OVERLOAD_GROUP_COUNT]; +} wmi_host_peer_gid_userpos_list_event; + +/** + * enum rcpi_measurement_type - for identifying type of rcpi measurement + * @RCPI_MEASUREMENT_TYPE_AVG_MGMT: avg rcpi of mgmt frames + * @RCPI_MEASUREMENT_TYPE_AVG_DATA: avg rcpi of data frames + * @RCPI_MEASUREMENT_TYPE_LAST_MGMT: rcpi of last mgmt frame + * @RCPI_MEASUREMENT_TYPE_LAST_DATA: rcpi of last data frame + * @RCPI_MEASUREMENT_TYPE_INVALID: invalid rcpi measurement type + */ +enum rcpi_measurement_type { + RCPI_MEASUREMENT_TYPE_AVG_MGMT = 0x1, + RCPI_MEASUREMENT_TYPE_AVG_DATA = 0x2, + RCPI_MEASUREMENT_TYPE_LAST_MGMT = 0x3, + RCPI_MEASUREMENT_TYPE_LAST_DATA = 0x4, + RCPI_MEASUREMENT_TYPE_INVALID = 0x5, +}; + +/** + * struct rcpi_req - RCPI req parameter + * @vdev_id: virtual device id + * @measurement_type: type of rcpi from enum wmi_rcpi_measurement_type + * @mac_addr: peer mac addr for which measurement is required + */ +struct rcpi_req { + uint32_t vdev_id; + enum rcpi_measurement_type measurement_type; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct rcpi_res - RCPI response parameter + * @vdev_id: virtual device id + * @measurement_type: type of rcpi from enum wmi_rcpi_measurement_type + * @mac_addr: peer mac addr for which measurement is required + * @rcpi_value: value of RCPI computed by firmware + */ +struct rcpi_res { + uint32_t vdev_id; + enum rcpi_measurement_type measurement_type; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + int32_t rcpi_value; +}; + +#define WMI_HOST_BOARD_MCN_STRING_MAX_SIZE 19 +#define WMI_HOST_BOARD_MCN_STRING_BUF_SIZE \ + (WMI_HOST_BOARD_MCN_STRING_MAX_SIZE+1) /* null-terminator */ + +typedef struct { + uint32_t software_cal_version; + uint32_t board_cal_version; + /* board_mcn_detail: + * Provide a calibration message string for the host to display. + * Note: on a big-endian host, the 4 bytes within each uint32_t portion + * of a WMI message will be automatically byteswapped by the copy engine + * as the messages are transferred between host and target, to convert + * between the target's little-endianness and the host's big-endianness. + * Consequently, a big-endian host should manually unswap the bytes + * within the board_mcn_detail string buffer to get the bytes back into + * the desired natural order. + */ + uint8_t board_mcn_detail[WMI_HOST_BOARD_MCN_STRING_BUF_SIZE]; + uint32_t cal_ok; /* filled with CALIBRATION_STATUS enum value */ +} wmi_host_pdev_check_cal_version_event; + +/** + * enum WMI_HOST_CALIBRATION_STATUS - Host defined Enums for cal status + * @WMI_HOST_NO_FEATURE: The board was calibrated with a meta + * which did not have this feature + * @WMI_HOST_CALIBRATION_OK: The calibration status is OK + * @WMI_HOST_CALIBRATION_NOT_OK: The calibration status is NOT OK + */ +enum WMI_HOST_CALIBRATION_STATUS { + WMI_HOST_NO_FEATURE = 0, + WMI_HOST_CALIBRATION_OK, + WMI_HOST_CALIBRATION_NOT_OK, +}; + +/** + * struct wmi_host_pdev_utf_event - Host defined struct to hold utf event data + * @data: Pointer to data + * @datalen: Data length + * @pdev_id: Pdev_id of data + * + */ +struct wmi_host_pdev_utf_event { + uint8_t *data; + uint16_t datalen; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_utf_seg_header_info - Host defined struct to map seg info in + * UTF event + * @len: segment length + * @msgref: message reference + * @segment_info: segment info + * @pdev_id: pdev_id + * + */ +struct wmi_host_utf_seg_header_info { + uint32_t len; + uint32_t msgref; + uint32_t segment_info; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_pdev_qvit_event - Host defined struct to hold qvit event data + * @data: Pointer to data + * @datalen: Data length + * + */ +struct wmi_host_pdev_qvit_event { + uint8_t *data; + uint16_t datalen; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_peer_delete_response_event - Peer Delete response event param + * @vdev_id: vdev id + * @mac_address: Peer Mac Address + * + */ +struct wmi_host_peer_delete_response_event { + uint32_t vdev_id; + struct qdf_mac_addr mac_address; +}; + +/** + * struct wmi_host_vdev_peer_delete_all_response_event - + * VDEV peer delete all response + * @vdev_id: vdev id + * @status: status of request + * 0 - OK; command successful + * 1 - EINVAL; Requested invalid vdev_id + * 2 - EFAILED; Delete all peer failed + */ +struct wmi_host_vdev_peer_delete_all_response_event { + uint32_t vdev_id; + uint32_t status; +}; + +/** + * @struct wmi_host_dcs_interference_param + * @interference_type: Type of DCS Interference + * @uint32_t pdev_id: pdev id + */ +struct wmi_host_dcs_interference_param { + uint32_t interference_type; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_rf_characterization_event_param - rf characterization table + * @freq: center frequency of primary channel (in MHz) + * @chan_metric: primary channel-specific metric + * @bw: bandwidth of primary channel (in MHz) + */ +struct wmi_host_rf_characterization_event_param { + uint16_t freq; + uint8_t chan_metric; + wmi_host_channel_width bw; +}; + +/* + * struct wmi_host_fips_event_param: FIPS event param + * @pdev_id: pdev id + * @error_status: Error status: 0 (no err), 1, or OPER_TIMEOUR + * @data_len: FIPS data length + * @data: pointer to data + */ +struct wmi_host_fips_event_param { + uint32_t pdev_id; + uint32_t error_status; + uint32_t data_len; + uint32_t *data; +}; + +#ifdef WLAN_FEATURE_DISA_FIPS +/** + * struct disa_encrypt_decrypt_resp_params - disa encrypt response + * @vdev_id: vdev id + * @status: status + * @data_length: data length + * @data: data pointer + */ +struct disa_encrypt_decrypt_resp_params { + uint32_t vdev_id; + int32_t status; + uint32_t data_len; + uint8_t *data; +}; +#endif + +/** + * struct wmi_host_proxy_ast_reserve_param + * @pdev_id: pdev id + * @result: result + */ +struct wmi_host_proxy_ast_reserve_param { + uint32_t pdev_id; + uint32_t result; +}; + +/** + * struct wmi_host_pdev_band_to_mac - freq range for mac + * @pdev_id: PDEV ID to identifiy mac + * @start_freq: start frequency value + * @end_freq: end frequency value + */ +struct wmi_host_pdev_band_to_mac { + uint32_t pdev_id; + uint32_t start_freq; + uint32_t end_freq; +}; +#define WMI_HOST_MAX_PDEV 3 + +#ifdef OL_ATH_SMART_LOGGING + +#define WMI_HOST_SMART_LOG_SCENARIO_SET(flag, scenario) ((flag) |= (scenario)) +#define WMI_HOST_SMART_LOG_SCENARIO_GET(flag, scenario) ((flag) & (scenario)) + +/** + * enum wmi_host_smart_log_scenario - Smart log scenarios to be enabled/disabled + * @WMI_HOST_SMART_LOG_ALL: All smart logging features. + * @WMI_HOST_SMART_LOG_CE_FULL_DETECT_BY_FW: CE with full detect by FW. + * @WMI_HOST_SMART_LOG_TX_RX_TIMEOUT: Tx/Rx timeout. + * @WMI_HOST_SMART_LOG_STA_KICKOUT: STA Kickout. + * @WMI_HOST_SMART_LOG_BCN_CMD_FAILURE: Beacon command failure. + * @WMI_HOST_SMART_LOG_P1_PING_FAILURE: P1 ping failure. Ping failure detection + * is done by host entities. So, host should be able to control the + * enable/disable of this feature. Yet, this is provided in case the + * corresponding FW specific debugs alone have to be enabled/disabled. + * @WMI_HOST_SMART_LOG_CONNECTION_FAILURE: Connection failure. Connection + * failure detection is done by host entities. So, host should be able to + * control the enable/disable of this feature. Yet, this is provided in case the + * corresponding FW specific debugs alone have to be enabled/disabled. + * @WMI_HOST_SMART_LOG_FW_INITIATED_PKT_LOG: FW Initiated packetlog. + * @WMI_HOST_SMART_LOG_EXTENSION_1: If WMI_HOST_SMART_LOG_EXTENSION_1 is set, + * then the 'log_case_ext_1' field in 'wmi_smart_logging' is used; else + * log_case_ext_1 is ignored. + */ +enum wmi_host_smart_log_scenario { + WMI_HOST_SMART_LOG_ALL = 0x0, + WMI_HOST_SMART_LOG_CE_FULL_DETECT_BY_FW = 0x1, + WMI_HOST_SMART_LOG_TX_RX_TIMEOUT = 0x2, + WMI_HOST_SMART_LOG_STA_KICKOUT = 0x4, + WMI_HOST_SMART_LOG_BCN_CMD_FAILURE = 0x8, + WMI_HOST_SMART_LOG_P1_PING_FAILURE = 0x10, + WMI_HOST_SMART_LOG_CONNECTION_FAILURE = 0x20, + WMI_HOST_SMART_LOG_FW_INITIATED_PKT_LOG = 0x40, + + /* New scenarios to be added here */ + + WMI_HOST_SMART_LOG_EXTENSION_1 = 0x80000000, +}; + +/** + * struct wmi_fatal_condition_event - Fatal condition event param + * @type: Type of event + * @subtype: Subtype of event + * @type_subtype_specific_data: Data specific to combination of type and + * subtype. This is held in a union with the original "reserved0" for backward + * compatibility with any code that might refer to it. The previously-reserved + * field now holds data under some conditions. The kind of data depends on the + * above "type" and "subtype" fields. The interpretation of the + * type_subtype_specific_data field based on different type + subtype values is + * shown below: + * 1. type == WMI_HOST_FATAL_CONDITION_PACKET_LOG_CONFIG + subtype + * == WMI_HOST_FATAL_SUBTYPE_PACKET_LOG_CONFIG_START -> + * type_subtype_specific_data = WMI_HOST_PKTLOG_EVENT + * For any type+subtype combinations not listed above, the recipient is expected + * to ignore the type_subtype_specific_data field. + */ +struct wmi_fatal_condition_event { + uint32_t type; + uint32_t subtype; + union { + uint32_t reserved0; + uint32_t type_subtype_specific_data; + }; +}; + +/*Currently the Max fatal events is 3 */ +#define FATAL_EVENTS_MAX 3 + +/** + * struct wmi_debug_fatal_events: Fatal events list + * @num_events:Number of events + * @event[FATAL_EVENTS_MAX]: Each event data + */ +struct wmi_debug_fatal_events { + uint32_t num_events; + struct wmi_fatal_condition_event event[FATAL_EVENTS_MAX]; +}; + +/** + * enum wmi_host_fatal_condition_type - Values that 'type' can take in + * wmi_fatal_condition_event + * @WMI_HOST_FATAL_CONDITION_EVENT_COMPLETION: Fatal condition event completion + * @WMI_HOST_FATAL_CONDITION_CE_FAILURE: CE failure + * @WMI_HOST_FATAL_CONDITION_TIMEOUTS: Communication timeouts + * @WMI_HOST_FATAL_CONDITION_CONNECTION_ISSUE: Connection issue + * @WMI_HOST_FATAL_CONDITION_PACKET_LOG_CONFIG: Configuration for FW initiated + * packetlog + */ +enum wmi_host_fatal_condition_type { + WMI_HOST_FATAL_CONDITION_EVENT_COMPLETION, + WMI_HOST_FATAL_CONDITION_CE_FAILURE, + WMI_HOST_FATAL_CONDITION_TIMEOUTS, + WMI_HOST_FATAL_CONDITION_CONNECTION_ISSUE, + WMI_HOST_FATAL_CONDITION_PACKET_LOG_CONFIG, +}; + +/** + * enum wmi_host_fatal_condition_subtype_timeouts - Possible subtypes for + * WMI_HOST_FATAL_CONDITION_TIMEOUTS + * @WMI_HOST_FATAL_SUBTYPE_TX_TIMEOUT: Tx timeout + * @WMI_HOST_FATAL_SUBTYPE_RX_TIMEOUT: Rx timeout + */ +enum wmi_host_fatal_condition_subtype_timeouts { + WMI_HOST_FATAL_SUBTYPE_TX_TIMEOUT, + WMI_HOST_FATAL_SUBTYPE_RX_TIMEOUT, +}; + +/** + * enum wmi_host_fatal_condition_subtype_connection_issue - Possible subtypes + * for WMI_HOST_FATAL_CONDITION_CONNECTION_ISSUE + * @WMI_HOST_FATAL_SUBTYPE_STA_KICKOUT: STA Kickout + * @WMI_HOST_FATAL_SUBTYPE_P1_PING_FAILURE_START_DEBUG: Start debugging for P1 + * ping failure + * @WMI_HOST_FATAL_SUBTYPE_P1_PING_FAILURE_STOP_DEBUG: Stop debugging for P1 + * ping failure + * @WMI_HOST_FATAL_SUBTYPE_CONNECTION_FAILURE_START_DEBUG: Start debugging for + * connection failure + * @WMI_HOST_FATAL_SUBTYPE_CONNECTION_FAILURE_STOP_DEBUG: Stop debugging for + * connection failure + */ +enum wmi_host_fatal_condition_subtype_connection_issue { + WMI_HOST_FATAL_SUBTYPE_STA_KICKOUT, + WMI_HOST_FATAL_SUBTYPE_P1_PING_FAILURE_START_DEBUG, + WMI_HOST_FATAL_SUBTYPE_P1_PING_FAILURE_STOP_DEBUG, + WMI_HOST_FATAL_SUBTYPE_CONNECTION_FAILURE_START_DEBUG, + WMI_HOST_FATAL_SUBTYPE_CONNECTION_FAILURE_STOP_DEBUG, +}; + +/** + * enum wmi_host_fatal_condition_subtype_packet_log_config - Possible subtypes + * for WMI_HOST_FATAL_CONDITION_PACKET_LOG_CONFIG + * @WMI_HOST_FATAL_SUBTYPE_PACKET_LOG_CONFIG_START: Start FW initiated packetlog + * @WMI_HOST_FATAL_SUBTYPE_PACKET_LOG_CONFIG_STOP: Stop FW initiated packetlog + */ +enum wmi_host_fatal_condition_subtype_packet_log_config { + WMI_HOST_FATAL_SUBTYPE_PACKET_LOG_CONFIG_START, + WMI_HOST_FATAL_SUBTYPE_PACKET_LOG_CONFIG_STOP, +}; + +#endif /* OL_ATH_SMART_LOGGING */ + +#define GET_PN_MAX_LEN 16 + +/** + * struct wmi_host_get_pn_event - PN event params + * @vdev_id: vdev id + * @peer_macaddr: Peer mac address + * @key_type: key type + * @pn : pn value + */ +struct wmi_host_get_pn_event { + uint32_t vdev_id; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + uint32_t key_type; + uint8_t pn[GET_PN_MAX_LEN]; +}; + +/** + * struct wmi_init_cmd_param - INIT command params + * @target_resource_config: pointer to resource config + * @num_mem_chunks: number of memory chunks + * @struct wmi_host_mem_chunk: pointer to memory chunks + * @hw_mode_index: HW mode index chosen + * @num_band_to_mac: Number of band to mac setting + * @struct wmi_host_pdev_band_to_mac: band to mac setting + */ +struct wmi_init_cmd_param { + target_resource_config *res_cfg; + uint8_t num_mem_chunks; + struct wmi_host_mem_chunk *mem_chunks; + uint32_t hw_mode_id; + uint32_t num_band_to_mac; + struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV]; +}; + +/** + * struct pdev_csa_switch_count_status - CSA switch count status event param + * @pdev_id: Physical device identifier + * @current_switch_count: Current CSA switch count + * @num_vdevs: Number of vdevs that need restart + * @vdev_ids: Array containing the vdev ids that need restart + */ +struct pdev_csa_switch_count_status { + uint32_t pdev_id; + uint32_t current_switch_count; + uint32_t num_vdevs; + uint32_t *vdev_ids; +}; + +/** + * enum wmi_host_active-apf_mode - FW_ACTIVE_APF_MODE, replicated from FW header + * @WMI_HOST_ACTIVE_APF_DISABLED: APF is disabled for all packets in active mode + * @WMI_HOST_ACTIVE_APF_ENABLED: APF is enabled for all packets in active mode + * @WMI_HOST_ACTIVE_APF_ADAPTIVE: APF is enabled for packets up to some + * threshold in active mode + */ +enum wmi_host_active_apf_mode { + WMI_HOST_ACTIVE_APF_DISABLED = (1 << 1), + WMI_HOST_ACTIVE_APF_ENABLED = (1 << 2), + WMI_HOST_ACTIVE_APF_ADAPTIVE = (1 << 3) +}; + +/** + * struct coex_config_params - Coex config command params + * @vdev_id: Virtual AP device identifier + * @config_type: Configuration type - wmi_coex_config_type enum + * @config_arg1: Configuration argument based on config type + * @config_arg2: Configuration argument based on config type + * @config_arg3: Configuration argument based on config type + * @config_arg4: Configuration argument based on config type + * @config_arg5: Configuration argument based on config type + * @config_arg6: Configuration argument based on config type + */ +struct coex_config_params { + uint32_t vdev_id; + uint32_t config_type; + uint32_t config_arg1; + uint32_t config_arg2; + uint32_t config_arg3; + uint32_t config_arg4; + uint32_t config_arg5; + uint32_t config_arg6; +}; + +#define WMI_HOST_PDEV_ID_SOC 0xFF +#define WMI_HOST_PDEV_ID_0 0 +#define WMI_HOST_PDEV_ID_1 1 +#define WMI_HOST_PDEV_ID_2 2 +#define WMI_HOST_PDEV_ID_INVALID 0xFFFFFFFF + +/** + * struct wmi_host_ready_ev_param - Data revieved in ready event + * @status: FW init status. Success or Failure. + * @num_dscp_table: Number of DSCP table supported in FW + * @num_extra_mac_addr: Extra mac address present in ready event. Used + * in DBDC mode to provide multiple mac per pdev. + * @num_total_peer: Total number of peers FW could allocate. Zero means + * FW could allocate num peers requested by host in init. + * Otherwise, host need update it max_peer to this value. + * @num_extra_peer: Number of extra peers created and used within FW. Host + * should expect peer_id can be num_total_peer + num_extra_peer + * but it can create only upto num_total_peer. + * @agile_capability: Boolean specification of whether the target supports + * agile DFS, by means of using one 80 MHz radio chain for + * radar detection, concurrently with using another radio + * chain for non-160 MHz regular operation. + * @max_ast_index: Max number of AST entries that FW could allocate. + * @pktlog_defs_checksum: checksum computed from the definitions of the enums + * and structs used within pktlog traces. This is sent + * from the FW as part of FW ready event and needs + * to be embedded in the pktlog buffer header as version. + */ +struct wmi_host_ready_ev_param { + uint32_t status; + uint32_t num_dscp_table; + uint32_t num_extra_mac_addr; + uint32_t num_total_peer; + uint32_t num_extra_peer; + bool agile_capability; + uint32_t max_ast_index; + uint32_t pktlog_defs_checksum; +}; + +enum bcn_offload_control_param { + BCN_OFFLD_CTRL_TX_DISABLE = 0, + BCN_OFFLD_CTRL_TX_ENABLE, + BCN_OFFLD_CTRL_SWBA_DISABLE, + BCN_OFFLD_CTRL_SWBA_ENABLE, +}; + +/** + * struct bcn_offload_control - Beacon offload control params + * @vdev_id: vdev identifer of VAP to control beacon tx + * @bcn_ctrl_op: values from enum bcn_offload_control_param + */ +struct bcn_offload_control { + uint32_t vdev_id; + enum bcn_offload_control_param bcn_ctrl_op; +}; + +#ifdef OBSS_PD +/** + * struct wmi_host_obss_spatial_reuse_set_param - OBSS_PD_SPATIAL Reuse + * structure + * @enable: Enable/Disable Spatial Reuse + * @obss_min: Minimum OBSS level to use + * @obss_max: Maximum OBSS level to use + * @pdev_id: Pdev id + */ +struct wmi_host_obss_spatial_reuse_set_param { + uint32_t enable; + int32_t obss_min; + int32_t obss_max; + uint32_t vdev_id; +}; + +/** + * struct wmi_host_obss_spatial_reuse_set_def_thresh - default obsspd offsets + * @obss_min: Minimum OBSS level to use + * @obss_max: Maximum OBSS level to use + * @vdev_type: vdev_type should be one of WMI_VDEV_TYPE_STA or WMI_VDEV_TYPE_AP + */ +struct wmi_host_obss_spatial_reuse_set_def_thresh { + int32_t obss_min; + int32_t obss_max; + uint32_t vdev_type; +}; +#endif + +/** + * struct wds_entry - WDS entry structure + * @peer_mac: peer mac + * @wds_mac: wds mac address + * @flags: flags + */ +struct wdsentry { + u_int8_t peer_mac[QDF_MAC_ADDR_SIZE]; + u_int8_t wds_mac[QDF_MAC_ADDR_SIZE]; + uint32_t flags; +}; + +/** + * struct wmi_obss_detection_cfg_param - obss detection cfg + * @vdev_id: vdev id + * @obss_detect_period_ms: detection period in ms + * @obss_11b_ap_detect_mode: detect whether there is 11b ap/ibss + * @obss_11b_sta_detect_mode: detect whether there is 11b sta + * connected with other APs + * @obss_11g_ap_detect_mode: detect whether there is 11g AP + * @obss_11a_detect_mode: detect whether there is legacy 11a traffic + * @obss_ht_legacy_detect_mode: detect whether there is ap which is + * ht legacy mode + * @obss_ht_mixed_detect_mode: detect whether there is ap which is ht mixed mode + * @obss_ht_20mhz_detect_mode: detect whether there is ap which has 20M only + * station + */ +struct wmi_obss_detection_cfg_param { + uint32_t vdev_id; + uint32_t obss_detect_period_ms; + uint32_t obss_11b_ap_detect_mode; + uint32_t obss_11b_sta_detect_mode; + uint32_t obss_11g_ap_detect_mode; + uint32_t obss_11a_detect_mode; + uint32_t obss_ht_legacy_detect_mode; + uint32_t obss_ht_mixed_detect_mode; + uint32_t obss_ht_20mhz_detect_mode; +}; + +/** + * enum wmi_obss_detection_reason - obss detection event reasons + * @OBSS_OFFLOAD_DETECTION_DISABLED: OBSS detection disabled + * @OBSS_OFFLOAD_DETECTION_PRESENT: OBSS present detection + * @OBSS_OFFLOAD_DETECTION_ABSENT: OBSS absent detection + * + * Defines different types of reasons for obss detection event from firmware. + */ +enum wmi_obss_detection_reason { + OBSS_OFFLOAD_DETECTION_DISABLED = 0, + OBSS_OFFLOAD_DETECTION_PRESENT = 1, + OBSS_OFFLOAD_DETECTION_ABSENT = 2, +}; + +/** + * struct wmi_obss_detect_info - OBSS detection info from firmware + * @vdev_id: ID of the vdev to which this info belongs. + * @reason: Indicate if present or Absent detection, + * also if not supported offload for this vdev. + * @matched_detection_masks: Detection bit map. + * @matched_bssid_addr: MAC address valid for only if info is present detection. + */ +struct wmi_obss_detect_info { + uint32_t vdev_id; + enum wmi_obss_detection_reason reason; + uint32_t matched_detection_masks; + uint8_t matched_bssid_addr[QDF_MAC_ADDR_SIZE]; +}; + +#ifdef QCA_SUPPORT_CP_STATS +/** + * struct wmi_host_congestion_stats - host definition of congestion stats + * @vdev_id: ID of the vdev to which this info belongs. + * @congestion: This field holds the congestion percentage = + * (busy_time/total_time)*100 + * for the interval from when the vdev was started to the current time + * (or the time at which the vdev was stopped). + */ +struct wmi_host_congestion_stats { + uint32_t vdev_id; + uint32_t congestion; +}; +#endif + +#ifdef FEATURE_WLAN_APF +/** + * struct wmi_apf_write_memory_params - Android Packet Filter write memory + * params + * @vdev_id: VDEV on which APF memory is to be written + * @apf_version: APF version number + * @program_len: Length reserved for program in the APF work memory + * @addr_offset: Relative address in APF work memory to start writing + * @length: Size of the write + * @buf: Pointer to the buffer + */ +struct wmi_apf_write_memory_params { + uint8_t vdev_id; + uint32_t apf_version; + uint32_t program_len; + uint32_t addr_offset; + uint32_t length; + uint8_t *buf; +}; + +/** + * struct wmi_apf_read_memory_params - Android Packet Filter read memory params + * @vdev_id: vdev id + * @addr_offset: Relative address in APF work memory to read from + * @length: Size of the memory fetch + */ +struct wmi_apf_read_memory_params { + uint8_t vdev_id; + uint32_t addr_offset; + uint32_t length; +}; + +/** + * struct wmi_apf_read_memory_resp_event_params - Event containing read Android + * Packet Filter memory response + * @vdev_id: vdev id + * @offset: Read memory offset + * @length: Read memory length + * @more_data: Indicates more data to come + * @data: Pointer to the data + */ +struct wmi_apf_read_memory_resp_event_params { + uint32_t vdev_id; + uint32_t offset; + uint32_t length; + bool more_data; + uint8_t *data; +}; +#endif /* FEATURE_WLAN_APF */ + +/* vdev control flags (per bits) */ +#define WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP 0x00000001 +#define WMI_HOST_VDEV_FLAGS_TRANSMIT_AP 0x00000002 +#define WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP 0x00000004 + +/* Begin of roam scan stats definitions */ + +#define WMI_ROAM_SCAN_STATS_MAX 5 +#define WMI_ROAM_SCAN_STATS_CANDIDATES_MAX 4 +#define WMI_ROAM_SCAN_STATS_CHANNELS_MAX 50 + +/** + * struct wmi_roam_scan_stats_req - Structure to hold roam scan stats request + * @vdev_id: interface id + */ +struct wmi_roam_scan_stats_req { + uint32_t vdev_id; +}; + +/** + * struct wmi_roam_scan_cand - Roam scan candidates + * @score: score of AP + * @rssi: rssi of the AP + * @freq: center frequency + * @bssid: bssid of AP + */ +struct wmi_roam_scan_cand { + uint32_t score; + uint32_t rssi; + uint32_t freq; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct wmi_roam_scan_stats_params - Roam scan details + * @time_stamp: time at which this roam scan happened + * @client_id: id of client which triggered this scan + * @num_scan_chans: number of channels that were scanned as part of this scan + * @scan_freqs: frequencies of the channels that were scanned + * @is_roam_successful: whether a successful roaming happened after this scan + * @old_bssid: bssid to which STA is connected just before this scan + * @new_bssid: bssid to which STA is roamed to in case of successful roaming + * @num_roam_candidates: no.of roam candidates that are being reported + * @roam_candidate: roam scan candidate details + * @trigger_id: reason for triggering this roam or roam scan + * @trigger_value: threshold value related to trigger_id + */ +struct wmi_roam_scan_stats_params { + uint64_t time_stamp; + uint32_t client_id; + uint32_t num_scan_chans; + uint32_t scan_freqs[WMI_ROAM_SCAN_STATS_CHANNELS_MAX]; + uint32_t is_roam_successful; + + /* Bssid to which STA is connected when the roam scan is triggered */ + uint8_t old_bssid[QDF_MAC_ADDR_SIZE]; + + /* + * Bssid to which STA is connected after roaming. Will be valid only + * if is_roam_successful is true. + */ + uint8_t new_bssid[QDF_MAC_ADDR_SIZE]; + + /* Number of roam candidates that are being reported in the stats */ + uint32_t num_roam_candidates; + struct wmi_roam_scan_cand cand[WMI_ROAM_SCAN_STATS_CANDIDATES_MAX]; + uint32_t trigger_id; + uint32_t trigger_value; +}; + +/** + * struct wmi_roam_scan_stats_res - Roam scan stats response from firmware + * @num_roam_scan: number of roam scans triggered + * @roam_scan: place holder to indicate the array of + * wmi_roam_scan_stats_params followed by this structure + */ +struct wmi_roam_scan_stats_res { + uint32_t num_roam_scans; + struct wmi_roam_scan_stats_params roam_scan[0]; +}; + +#define MAX_ROAM_CANDIDATE_AP 9 +#define MAX_ROAM_SCAN_CHAN 38 +#define MAX_ROAM_SCAN_STATS_TLV 5 + +/** + * struct wmi_roam_btm_trigger_data - BTM roam trigger related information + * @btm_request_mode: BTM request mode - solicited/unsolicited + * @disassoc_timer: Number of TBTT before AP disassociates the STA in ms + * @validity_interval: Preferred candidate list validity interval in ms + * @candidate_list_count: Number of candidates in BTM request. + * @btm_resp_status: Status code of the BTM response. + * @btm_bss_termination_timeout: BTM BSS termination timeout value + * in milli seconds + * @btm_mbo_assoc_retry_timeout: BTM MBO assoc retry timeout value in + * milli seconds + */ +struct wmi_roam_btm_trigger_data { + uint32_t btm_request_mode; + uint32_t disassoc_timer; + uint32_t validity_interval; + uint32_t candidate_list_count; + uint32_t btm_resp_status; + uint32_t btm_bss_termination_timeout; + uint32_t btm_mbo_assoc_retry_timeout; +}; + +/** + * struct wmi_roam_cu_trigger_data - BSS Load roam trigger parameters + * @cu_load: Connected AP CU load percentage + */ +struct wmi_roam_cu_trigger_data { + uint32_t cu_load; + /* TODO: Add threshold value */ +}; + +/** + * Struct wmi_roam_rssi_trigger_data - RSSI roam trigger related + * parameters + * @threshold: RSSI threshold value in dBm for LOW rssi roam trigger + */ +struct wmi_roam_rssi_trigger_data { + uint32_t threshold; +}; + +/** + * struct wmi_roam_deauth_trigger_data - Deauth roaming trigger related + * parameters + * @type: 1- Deauthentication 2- Disassociation + * @reason: Status code of the Deauth/Disassoc received + */ +struct wmi_roam_deauth_trigger_data { + uint32_t type; + uint32_t reason; +}; + +/** + * struct wmi_roam_candidate_info - Roam scan candidate APs related info + * @timestamp: Host timestamp in millisecs + * @type: 0 - Candidate AP; 1 - Current connected AP. + * @bssid: AP bssid. + * @freq: Channel frquency + * @cu_load: Channel utilization load of the AP. + * @cu_score: Channel Utilization score. + * @rssi: Candidate AP rssi + * @rssi_score: AP RSSI score + * @total_score: Total score of the candidate AP. + * @etp: Estimated throughput value of the AP in Mbps + * @bl_reason: Blacklist reason + * @bl_source: Source of adding AP to BL + * @bl_timestamp:This timestamp indicates the time when AP added + * to blacklist. + * @bl_original_timeout: Original timeout value in milli seconds + * when AP added to BL + */ +struct wmi_roam_candidate_info { + uint32_t timestamp; + uint8_t type; + struct qdf_mac_addr bssid; + uint16_t freq; + uint32_t cu_load; + uint32_t cu_score; + uint32_t rssi; + uint32_t rssi_score; + uint32_t total_score; + uint32_t etp; + uint32_t bl_reason; + uint32_t bl_source; + uint32_t bl_timestamp; + uint32_t bl_original_timeout; +}; + +/** + * struct wmi_roam_scan_data - Roam scan event details + * @present: Flag to check if the roam scan tlv is present + * @type: 0 - Partial roam scan; 1 - Full roam scan + * @num_ap: Number of candidate APs. + * @num_chan: Number of channels. + * @next_rssi_threshold: Next roam can trigger rssi threshold + * @chan_freq: List of frequencies scanned as part of roam scan + * @ap: List of candidate AP info + */ +struct wmi_roam_scan_data { + bool present; + uint16_t type; + uint16_t num_ap; + uint16_t num_chan; + uint32_t next_rssi_threshold; + uint16_t chan_freq[MAX_ROAM_SCAN_CHAN]; + struct wmi_roam_candidate_info ap[MAX_ROAM_CANDIDATE_AP]; +}; + +/** + * struct wmi_roam_result - Roam result related info. + * @present: Flag to check if the roam result tlv is present + * @timestamp: Host timestamp in millisecs + * @status: 0 - Roaming is success ; 1 - Roaming failed + * @fail_reason: One of WMI_ROAM_FAIL_REASON_ID + */ +struct wmi_roam_result { + bool present; + uint32_t timestamp; + bool status; + uint32_t fail_reason; +}; + +/** + * struct wmi_neighbor_report_data - Neighbor report/BTM request related + * data. + * @present: Flag to check if the roam 11kv tlv is present + * @timestamp: Host timestamp in millisecs + * @req_type: 1 - BTM query ; 2 - 11K neighbor report request + * @req_time: Request timestamp in ms + * @resp_time: Response timestamp in ms + * @freq: Channel frequency in Mhz + */ +struct wmi_neighbor_report_data { + bool present; + uint32_t timestamp; + uint8_t req_type; + uint32_t req_time; + uint32_t resp_time; + uint8_t num_freq; + uint32_t freq[MAX_ROAM_SCAN_CHAN]; +}; + +/** + * struct wmi_roam_msg_info - Roam message related information + * @present: Flag to check if the roam msg info tlv is present + * @timestamp: Timestamp is the absolute time w.r.t host timer which is + * synchronized between the host and target + * @msg_id: Message ID from WMI_ROAM_MSG_ID + * @msg_param1: msg_param1, values is based on the host & FW + * understanding and depend on the msg ID + * @msg_param2: msg_param2 value is based on the host & FW understanding + * and depend on the msg ID + */ +struct wmi_roam_msg_info { + bool present; + uint32_t timestamp; + uint32_t msg_id; + uint32_t msg_param1; + uint32_t msg_param2; +}; + +/** + * struct wmi_roam_trigger_info() - Roam trigger related details + * @present: Flag to check if the roam_trigger_info tlv is present + * @trigger_reason: Roam trigger reason(enum WMI_ROAM_TRIGGER_REASON_ID) + * @trigger_sub_reason: Sub reason for roam trigger if multiple roam scans + * @current_rssi: Connected AP RSSI + * @timestamp: Host timestamp in millisecs when roam scan was triggered + * @btm_trig_data: BTM roam trigger parameters. + * @cu_trig_data: BSS Load roam trigger parameters. + * @rssi_trig_data: RSSI trigger related info. + * @deauth_trig_data: Deauth roam trigger related info + */ +struct wmi_roam_trigger_info { + bool present; + uint32_t trigger_reason; + uint32_t trigger_sub_reason; + uint32_t current_rssi; + uint32_t timestamp; + union { + struct wmi_roam_btm_trigger_data btm_trig_data; + struct wmi_roam_cu_trigger_data cu_trig_data; + struct wmi_roam_rssi_trigger_data rssi_trig_data; + struct wmi_roam_deauth_trigger_data deauth_trig_data; + }; +}; + +/* End of roam scan stats definitions */ + +/** + * enum wmi_obss_color_collision_evt_type - bss color collision event type + * @OBSS_COLOR_COLLISION_DETECTION_DISABLE: OBSS color detection disabled + * @OBSS_COLOR_COLLISION_DETECTION: OBSS color collision detection + * @OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY: OBSS free slot detection with + * within expiry period + * @OBSS_COLOR_FREE_SLOT_AVAILABLE: OBSS free slot detection + * + * Defines different types of type for obss color collision event type. + */ +enum wmi_obss_color_collision_evt_type { + OBSS_COLOR_COLLISION_DETECTION_DISABLE = 0, + OBSS_COLOR_COLLISION_DETECTION = 1, + OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY = 2, + OBSS_COLOR_FREE_SLOT_AVAILABLE = 3, +}; + +/** + * struct wmi_obss_color_collision_cfg_param - obss color collision cfg + * @vdev_id: vdev id + * @flags: proposed for future use cases, currently not used. + * @evt_type: bss color collision event. + * @current_bss_color: current bss color. + * @detection_period_ms: scan interval for both AP and STA mode. + * @scan_period_ms: scan period for passive scan to detect collision. + * @free_slot_expiry_time_ms: FW to notify host at timer expiry after + * which Host will disable the bss color. + */ +struct wmi_obss_color_collision_cfg_param { + uint32_t vdev_id; + uint32_t flags; + enum wmi_obss_color_collision_evt_type evt_type; + uint32_t current_bss_color; + uint32_t detection_period_ms; + uint32_t scan_period_ms; + uint32_t free_slot_expiry_time_ms; +}; + +/** + * struct wmi_obss_color_collision_info - bss color detection info from firmware + * @vdev_id: ID of the vdev to which this info belongs. + * @evt_type: bss color collision event. + * @obss_color_bitmap_bit0to31: Bit set indicating BSS color present. + * @obss_color_bitmap_bit32to63: Bit set indicating BSS color present. + */ +struct wmi_obss_color_collision_info { + uint32_t vdev_id; + enum wmi_obss_color_collision_evt_type evt_type; + uint32_t obss_color_bitmap_bit0to31; + uint32_t obss_color_bitmap_bit32to63; +}; + +#ifdef WMI_AP_SUPPORT +/** + * struct vap_pcp_tid_map_params - PCP tid map params + * @vdev_id: vdev id + * @pcp_to_tid_map: pointer to arry of pcp to tid map table + */ +struct vap_pcp_tid_map_params { + uint32_t vdev_id; + uint32_t *pcp_to_tid_map; +}; + +/** + * struct vap_tidmap_prec_params - tidmap precedence params + * @vdev_id: vdev id + * @map_precedence: precedence of tid mapping + */ +struct vap_tidmap_prec_params { + uint32_t vdev_id; + uint32_t map_precedence; +}; + +/** + * struct peer_vlan_config_param - peer vlan config command + * @tx_cmd: Tx command + * @rx_cmd: Rx command + * @tx_strip_insert: Strip or Insert vlan in Tx[0:Strip, 1: Insert] + * @tx_strip_insert_inner: Enable tx_strip_insert operation for inner vlan tag. + * @tx_strip_insert_outer: Enable tx_strip_insert operation for outer vlan tag. + * @rx_strip_c_tag: Strip c_tag + * @rx_strip_s_tag: Strip s_tag + * @rx_insert_c_tag: Insert c_tag + * @rx_insert_s_tag: Insert s_tag + * + * @insert_vlan_inner_tci: Vlan inner tci + * @insert_vlan_inner_tci: Vlan outer tci + * + * @vdev_id: vdev id corresponding to peer. + */ +struct peer_vlan_config_param { + uint16_t tx_cmd:1, + rx_cmd:1, + tx_strip_insert:1, + tx_strip_insert_inner:1, + tx_strip_insert_outer:1, + rx_strip_c_tag:1, + rx_strip_s_tag:1, + rx_insert_c_tag:1, + rx_insert_s_tag:1; + uint16_t insert_vlan_inner_tci; + uint16_t insert_vlan_outer_tci; + uint8_t vdev_id; +}; +#endif + +/** + * struct wmi_cfr_peer_tx_event_param - CFR peer tx_event params + * @capture_method: CFR data capture method + * @vdev_id: ID of vdev to which this info belongs + * @mac_addr: Peer MAC address. In AP mode, it is the address of connected + * peer for which CFR capture is needed. In STA mode, this is the + * address of AP it is connected to. + * @primary_20mhz_chan: Primary 20 MHz channel frequency in MHz + * @bandwidth: BW of measurement + * 0 - 20MHz, 1 - 40MHz, 2 - 80MHz, 3 - 160MHz, 4 - 80+80MHz + * @phy_mode: Phy mode of channel, type - WMI_HOST_WLAN_PHY_MODE + * @band_center_freq1: Center frequency 1 in MHz + * @band_center_freq2: Center frequency 2 in MHz + * @spatial_streams: Number of spatial streams + * @correlation_info_1: Address of data from wmi_dma_buf_release_entry [31:0] + * @correlation_info_2: + * Bits [3:0] - Address of data from wmi_dma_buf_release_entry [35:32] + * Bits [15:4] - Reserved + * Bits [31:16] - Hardware PPDU ID [15:0] + * @status: + * Bits [1:0] - TX status, if any. 0-OK, 1-XRETRY, 2-DROP, 3-FILTERED. + * Bits [30:2] - Reserved + * Bit [31] - Status of the CFR capture of the peer + * 1 - Successful, 0 - Unsuccessful. + * @timestamp_us: Timestamp in microseconds at which the CFR was captured + * in the hardware. The clock used for this timestamp is private + * to the target and not visible to the host. So, Host can + * interpret only the relative timestamp deltas from one message + * to the next, but can't interpret the absolute timestamp + * from a single message + * @counter: Count of the current CFR capture from FW. + * This is helpful to identify any drops in FW + * @chain_rssi: Per chain RSSI of the peer, for upto WMI_HOST_MAX_CHAINS. + * Each chain's entry reports the RSSI for different bandwidths. + * Bits [7:0] - Primary 20 MHz + * Bits [15:8] - Secondary 20 MHz of 40 MHz channel (if applicable) + * Bits [23:16] - Secondary 40 MHz of 80 MHz channel (if applicable) + * Bits [31:24] - Secondary 80 MHz of 160 MHz channel (if applicable) + * Each of these 8-bit RSSI reports is in dBm units. 0x80 means invalid. + * Unused bytes within used chain_rssi indices will be 0x80. + * Unused rssi_chain indices will be set to 0x80808080. + * @chain_phase: Per chain phase of peer for upto WMI_HOST_MAX_CHAINS. + */ +typedef struct { + uint32_t capture_method; + uint32_t vdev_id; + struct qdf_mac_addr peer_mac_addr; + uint32_t primary_20mhz_chan; + uint32_t bandwidth; + uint32_t phy_mode; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t spatial_streams; + uint32_t correlation_info_1; + uint32_t correlation_info_2; + uint32_t status; + uint32_t timestamp_us; + uint32_t counter; + uint32_t chain_rssi[WMI_HOST_MAX_CHAINS]; + uint16_t chain_phase[WMI_HOST_MAX_CHAINS]; +} wmi_cfr_peer_tx_event_param; + +/** + * struct wmi_host_oem_indirect_data - Indirect OEM data + * @pdev_id: pdev id + * @len: length of data in bytes + * @addr: 36 bit address + */ +struct wmi_host_oem_indirect_data { + uint32_t pdev_id; + uint32_t len; + uint64_t addr; +}; + +/** + * struct wmi_oem_response_param - OEM response info + * @num_data1: First data response length + * @num_data2: Second data response length + * @data_1: First data + * @data_2: Second data + * @indirect_data: Indirect data + */ +struct wmi_oem_response_param { + uint32_t num_data1; + uint32_t num_data2; + uint8_t *data_1; + uint8_t *data_2; + struct wmi_host_oem_indirect_data indirect_data; +}; + +/** + * struct mws_coex_state - Modem Wireless Subsystem(MWS) coex info + * @vdev_id : vdev id + * @coex_scheme_bitmap: LTE-WLAN coexistence scheme bitmap + * Indicates the final schemes applied for the currrent Coex scenario. + * Bit 0 - TDM policy + * Bit 1 - Forced TDM policy + * Bit 2 - Dynamic Power Back-off policy + * Bit 3 - Channel Avoidance policy + * Bit 4 - Static Power Back-off policy. + * @active_conflict_count : active conflict count + * @potential_conflict_count: Potential conflict count + * @chavd_group0_bitmap : Indicates the WLAN channels to be avoided in + * b/w WLAN CH-1 and WLAN CH-14 + * @chavd_group1_bitmap : Indicates the WLAN channels to be avoided in + * WLAN CH-36 and WLAN CH-64 + * @chavd_group2_bitmap : Indicates the WLAN channels to be avoided in + * b/w WLAN CH-100 and WLAN CH-140 + * @chavd_group2_bitmap : Indicates the WLAN channels to be avoided in + * b/w WLAN CH-149 and WLAN CH-165 + */ +struct mws_coex_state { + uint32_t vdev_id; + uint32_t coex_scheme_bitmap; + uint32_t active_conflict_count; + uint32_t potential_conflict_count; + uint32_t chavd_group0_bitmap; + uint32_t chavd_group1_bitmap; + uint32_t chavd_group2_bitmap; + uint32_t chavd_group3_bitmap; +}; + +/** + * struct hdd_mws_coex_dpwb_state - Modem Wireless Subsystem(MWS) coex DPWB info + * @vdev_id : vdev id + * @current_dpwb_state: Current state of the Dynamic Power Back-off SM + * @pnp1_value: Tx power to be applied in next Dynamic Power Back-off cycle + * @lte_dutycycle: Indicates the duty cycle of current LTE frame + * @sinr_wlan_on: LTE SINR value in dB, when WLAN is ON + * @sinr_wlan_off: LTE SINR value in dB, when WLAN is OFF + * @bler_count: LTE blocks with error for the current block err report. + * @block_count: Number of LTE blocks considered for bler count report. + * @wlan_rssi_level: WLAN RSSI level + * @wlan_rssi: WLAN RSSI value in dBm considered in DP backoff algo + * @is_tdm_running: Indicates whether any TDM policy triggered + */ +struct mws_coex_dpwb_state { + uint32_t vdev_id; + int32_t current_dpwb_state; + int32_t pnp1_value; + uint32_t lte_dutycycle; + int32_t sinr_wlan_on; + int32_t sinr_wlan_off; + uint32_t bler_count; + uint32_t block_count; + uint32_t wlan_rssi_level; + int32_t wlan_rssi; + uint32_t is_tdm_running; +}; + +/** + * struct mws_coex_tdm_state - Modem Wireless Subsystem(MWS) coex TDM state info + * @vdev_id: vdev id + * @tdm_policy_bitmap: Time Division Multiplexing (TDM) LTE-Coex Policy type. + * @tdm_sf_bitmap: TDM LTE/WLAN sub-frame bitmap. + */ +struct mws_coex_tdm_state { + uint32_t vdev_id; + uint32_t tdm_policy_bitmap; + uint32_t tdm_sf_bitmap; +}; + +/** + * struct mws_coex_idrx_state - Modem Wireless Subsystem(MWS) coex IDRX state + * @vdev_id: vdev id + * @sub0_techid: SUB0 LTE-coex tech. + * @sub0_policy: SUB0 mitigation policy. + * @sub0_is_link_critical: Set if SUB0 is in link critical state. + * @sub0_static_power: LTE SUB0 imposed static power applied + * to WLAN due to LTE-WLAN coex. + * @sub0_rssi: LTE SUB0 RSSI value in dBm. + * @sub1_techid: SUB1 LTE-coex tech. + * @sub1_policy: SUB1 mitigation policy. + * @sub1_is_link_critical: Set if SUB1 is in link critical state. + * @sub1_static_power: LTE SUB1 imposed static power applied + * to WLAN due to LTE-WLAN coex. + * @sub1_rssi: LTE SUB1 RSSI value in dBm. + */ +struct mws_coex_idrx_state { + uint32_t vdev_id; + uint32_t sub0_techid; + uint32_t sub0_policy; + uint32_t sub0_is_link_critical; + int32_t sub0_static_power; + int32_t sub0_rssi; + uint32_t sub1_techid; + uint32_t sub1_policy; + uint32_t sub1_is_link_critical; + int32_t sub1_static_power; + int32_t sub1_rssi; +}; + +/** + * struct mws_antenna_sharing_info - MWS Antenna sharing Info + * @vdev_id: vdev id + * @coex_flags: BDF values of Coex flags + * @coex_config: BDF values of Coex Antenna sharing config + * @tx_chain_mask: Tx Chain mask value + * @rx_chain_mask: Rx Chain mask value + * @rx_nss: Currently active Rx Spatial streams + * @force_mrc: Forced MRC policy type + * @rssi_type: RSSI value considered for MRC + * @chain0_rssi: RSSI value measured at Chain-0 in dBm + * @chain1_rssi: RSSI value measured at Chain-1 in dBm + * @combined_rssi: RSSI value of two chains combined in dBm + * @imbalance: Absolute imbalance between two Rx chains in dB + * @mrc_threshold: RSSI threshold defined for the above imbalance value in dBm + * @grant_duration: Antenna grant duration to WLAN, in milliseconds + */ +struct mws_antenna_sharing_info { + uint32_t vdev_id; + uint32_t coex_flags; + uint32_t coex_config; + uint32_t tx_chain_mask; + uint32_t rx_chain_mask; + uint32_t rx_nss; + uint32_t force_mrc; + uint32_t rssi_type; + int32_t chain0_rssi; + int32_t chain1_rssi; + int32_t combined_rssi; + uint32_t imbalance; + int32_t mrc_threshold; + uint32_t grant_duration; +}; + +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +/** + * enum wmi_pdev_pkt_routing_op_code_type - packet routing supported opcodes + * @ADD_PKT_ROUTING: Add packet routing command + * @DEL_PKT_ROUTING: Delete packet routing command + * + * Defines supported opcodes for packet routing/tagging + */ +enum wmi_pdev_pkt_routing_op_code_type { + ADD_PKT_ROUTING, + DEL_PKT_ROUTING, +}; + +/** + * enum wmi_pdev_pkt_routing_pkt_type - supported packet types for + * routing & tagging + * @PDEV_PKT_TYPE_ARP_IPV4: Route/Tag for packet type ARP IPv4 (L3) + * @PDEV_PKT_TYPE_NS_IPV6: Route/Tag for packet type NS IPv6 (L3) + * @PDEV_PKT_TYPE_IGMP_IPV4: Route/Tag for packet type IGMP IPv4 (L3) + * @PDEV_PKT_TYPE_MLD_IPV6: Route/Tag for packet type MLD IPv6 (L3) + * @PDEV_PKT_TYPE_DHCP_IPV4: Route/Tag for packet type DHCP IPv4 (APP) + * @PDEV_PKT_TYPE_DHCP_IPV6: Route/Tag for packet type DHCP IPv6 (APP) + * @PDEV_PKT_TYPE_DNS_TCP_IPV4: Route/Tag for packet type TCP DNS IPv4 (APP) + * @PDEV_PKT_TYPE_DNS_TCP_IPV6: Route/Tag for packet type TCP DNS IPv6 (APP) + * @PDEV_PKT_TYPE_DNS_UDP_IPV4: Route/Tag for packet type UDP DNS IPv4 (APP) + * @PDEV_PKT_TYPE_DNS_UDP_IPV6: Route/Tag for packet type UDP DNS IPv6 (APP) + * @PDEV_PKT_TYPE_ICMP_IPV4: Route/Tag for packet type ICMP IPv4 (L3) + * @PDEV_PKT_TYPE_ICMP_IPV6: Route/Tag for packet type ICMP IPv6 (L3) + * @PDEV_PKT_TYPE_TCP_IPV4: Route/Tag for packet type TCP IPv4 (L4) + * @PDEV_PKT_TYPE_TCP_IPV6: Route/Tag for packet type TCP IPv6 (L4) + * @PDEV_PKT_TYPE_UDP_IPV4: Route/Tag for packet type UDP IPv4 (L4) + * @PDEV_PKT_TYPE_UDP_IPV6: Route/Tag for packet type UDP IPv6 (L4) + * @PDEV_PKT_TYPE_IPV4: Route/Tag for packet type IPv4 (L3) + * @PDEV_PKT_TYPE_IPV6: Route/Tag for packet type IPv6 (L3) + * @PDEV_PKT_TYPE_EAP: Route/Tag for packet type EAP (L2) + * + * Defines supported protocol types for routing/tagging + */ +enum wmi_pdev_pkt_routing_pkt_type { + PDEV_PKT_TYPE_ARP_IPV4, + PDEV_PKT_TYPE_NS_IPV6, + PDEV_PKT_TYPE_IGMP_IPV4, + PDEV_PKT_TYPE_MLD_IPV6, + PDEV_PKT_TYPE_DHCP_IPV4, + PDEV_PKT_TYPE_DHCP_IPV6, + PDEV_PKT_TYPE_DNS_TCP_IPV4, + PDEV_PKT_TYPE_DNS_TCP_IPV6, + PDEV_PKT_TYPE_DNS_UDP_IPV4, + PDEV_PKT_TYPE_DNS_UDP_IPV6, + PDEV_PKT_TYPE_ICMP_IPV4, + PDEV_PKT_TYPE_ICMP_IPV6, + PDEV_PKT_TYPE_TCP_IPV4, + PDEV_PKT_TYPE_TCP_IPV6, + PDEV_PKT_TYPE_UDP_IPV4, + PDEV_PKT_TYPE_UDP_IPV6, + PDEV_PKT_TYPE_IPV4, + PDEV_PKT_TYPE_IPV6, + PDEV_PKT_TYPE_EAP, + PDEV_PKT_TYPE_MAX +}; + +/** + * enum wmi_pdev_dest_ring_handler_type - packet routing options post CCE + * tagging + * @PDEV_WIFIRXCCE_USE_CCE_E: Use REO destination ring from CCE + * @PDEV_WIFIRXCCE_USE_ASPT_E: Use REO destination ring from ASPT + * @PDEV_WIFIRXCCE_USE_FT_E: Use REO destination ring from FSE + * @PDEV_WIFIRXCCE_USE_CCE2_E: Use REO destination ring from CCE2 + * + * Defines various options for routing policy + */ +enum wmi_pdev_dest_ring_handler_type { + PDEV_WIFIRXCCE_USE_CCE_E = 0, + PDEV_WIFIRXCCE_USE_ASPT_E = 1, + PDEV_WIFIRXCCE_USE_FT_E = 2, + PDEV_WIFIRXCCE_USE_CCE2_E = 3, +}; + +/** + * struct wmi_rx_pkt_protocol_routing_info - RX packet routing/tagging params + * @pdev_id: pdev id + * @op_code: Opcode option from wmi_pdev_pkt_routing_op_code_type enum + * @routing_type_bitmap: Bitmap of protocol that is being configured. Only + * one protocol can be configured in one command. Supported protocol list + * from enum wmi_pdev_pkt_routing_pkt_type + * @dest_ring_handler: Destination ring selection from enum + * wmi_pdev_dest_ring_handler_type + * @dest_ring: Destination ring number to use if dest ring handler is CCE + * @meta_data: Metadata to tag with for given protocol + */ +struct wmi_rx_pkt_protocol_routing_info { + uint32_t pdev_id; + enum wmi_pdev_pkt_routing_op_code_type op_code; + uint32_t routing_type_bitmap; + uint32_t dest_ring_handler; + uint32_t dest_ring; + uint32_t meta_data; +}; +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ + +#ifdef FEATURE_ANI_LEVEL_REQUEST +/* Maximum number of freqs for which ANI level can be requested */ +#define MAX_NUM_FREQS_FOR_ANI_LEVEL 20 + +/* A valid ANI level lies between 0 to 9 */ +#define MAX_ANI_LEVEL 9 + +struct wmi_host_ani_level_event { + uint32_t chan_freq; + uint32_t ani_level; +}; +#endif /* FEATURE_ANI_LEVEL_REQUEST */ + +/** + * struct wmi_install_key_comp_event - params of install key complete event + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @key_ix: key index + * @key_flags: key flags + * @status: Event status + * @peer_macaddr: MAC address used for installing + */ +struct wmi_install_key_comp_event { + uint32_t vdev_id; + uint32_t key_ix; + uint32_t key_flags; + uint32_t status; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; +}; + +#endif /* _WMI_UNIFIED_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_pmo_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_pmo_api.h new file mode 100644 index 0000000000000000000000000000000000000000..724eeaa915b147762e77fd2b6eca2b8514727e56 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_pmo_api.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) which are specific to Power management offloads (PMO). + */ + +#ifndef _WMI_UNIFIED_PMO_API_H_ +#define _WMI_UNIFIED_PMO_API_H_ + +#include "wlan_pmo_tgt_api.h" +#include "wlan_pmo_arp_public_struct.h" +#include "wlan_pmo_ns_public_struct.h" +#include "wlan_pmo_gtk_public_struct.h" +#include "wlan_pmo_wow_public_struct.h" +#include "wlan_pmo_pkt_filter_public_struct.h" + +#ifdef FEATURE_WLAN_D0WOW +/** + * wmi_unified_d0wow_enable_send() - WMI d0 wow enable function + * @wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_d0wow_enable_send(wmi_unified_t wmi_handle, + uint8_t mac_id); + +/** + * wmi_unified_d0wow_disable_send() - WMI d0 wow disable function + * @wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_d0wow_disable_send(wmi_unified_t wmi_handle, + uint8_t mac_id); +#endif /* FEATURE_WLAN_D0WOW */ + +/** + * wmi_unified_add_wow_wakeup_event_cmd() - Configures wow wakeup events. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @bitmap: Event bitmap + * @enable: enable/disable + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_add_wow_wakeup_event_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable); + +/** + * wmi_unified_wow_patterns_to_fw_cmd() - Sends WOW patterns to FW. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @ptrn_id: pattern id + * @ptrn: pattern + * @ptrn_len: pattern length + * @ptrn_offset: pattern offset + * @mask: mask + * @mask_len: mask length + * @user: true for user configured pattern and false for default pattern + * @default_patterns: default patterns + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_patterns_to_fw_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns); + +/** + * wmi_unified_add_clear_mcbc_filter_cmd() - set mcast filter command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @multicastAddr: mcast address + * @clear_list: clear list flag + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_add_clear_mcbc_filter_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clear_list); + +/** + * wmi_unified_multiple_add_clear_mcbc_filter_cmd() - send multiple mcast + * filter command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @filter_params: mcast filter params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_multiple_add_clear_mcbc_filter_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param); + +#ifdef FEATURE_WLAN_RA_FILTERING +/** + * wmi_unified_wow_sta_ra_filter_cmd() - set RA filter pattern in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @default_pattern: default pattern id + * @rate_limit_interval: rate limit interval + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_sta_ra_filter_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t default_pattern, + uint16_t rate_limit_interval); +#endif + +/** + * wmi_unified_enable_enhance_multicast_offload() - enhance multicast offload + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @action: true for enable else false + * + * To configure enhance multicast offload in to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_enhance_multicast_offload_cmd( + wmi_unified_t wmi_handle, uint8_t vdev_id, bool action); + +/** + * wmi_extract_gtk_rsp_event() - extract gtk rsp params from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @gtk_rsp_param: Pointer to gtk rsp parameters + * @ len: len of gtk rsp event + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_gtk_rsp_event( + wmi_unified_t wmi_hdl, void *evt_buf, + struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len); + +/** + * wmi_unified_process_gtk_offload_getinfo_cmd() - send GTK offload cmd to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @offload_req_opcode: gtk offload flag + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_gtk_offload_getinfo_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint64_t offload_req_opcode); + +/** + * wmi_unified_action_frame_patterns_cmd() - send action filter wmi cmd + * @wmi_handle: wmi handler + * @action_params: pointer to action_params + * + * Return: 0 for success, otherwise appropriate error code + */ +QDF_STATUS wmi_unified_action_frame_patterns_cmd( + wmi_unified_t wmi_handle, + struct pmo_action_wakeup_set_params *action_params); + +/** + * wmi_unified_send_gtk_offload_cmd() - send GTK offload command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @params: GTK offload parameters + * @enable_offload: flag to enable offload + * @gtk_offload_opcode: gtk offload flag + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_gtk_offload_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode); + +/** + * wmi_unified_enable_arp_ns_offload_cmd() - enable ARP NS offload + * @wmi_hdl: wmi handle + * @arp_offload_req: arp offload request + * @ns_offload_req: ns offload request + * @vdev_id: vdev id + * + * To configure ARP NS off load data to firmware + * when target goes to wow mode. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd( + wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id); + +/** + * wmi_unified_conf_hw_filter_cmd() - Configure hardware filter in DTIM mode + * @wmi_handle: wmi handle + * @req: request parameters to configure to firmware + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_conf_hw_filter_cmd(wmi_unified_t wmi_handle, + struct pmo_hw_filter_params *req); + +#ifdef FEATURE_WLAN_LPHB +/** + * wmi_unified_lphb_config_hbenable_cmd() - enable command of LPHB configuration + * @wmi_handle: wmi handle + * @params: configuration info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_hbenable_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_enable_cmd_fixed_param *params); + +/** + * wmi_unified_lphb_config_tcp_params_cmd() - set tcp params of LPHB config req + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_tcp_params_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req); + +/** + * wmi_unified_lphb_config_tcp_pkt_filter_cmd() - config LPHB tcp packet filter + * @wmi_handle: wmi handle + * @g_hb_tcp_filter_fp: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_tcp_pkt_filter_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp); + +/** + * wmi_unified_lphb_config_udp_params_cmd() - configure LPHB udp param command + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_udp_params_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req); + +/** + * wmi_unified_lphb_config_udp_pkt_filter_cmd() - configure LPHB udp pkt filter + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_udp_pkt_filter_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req); +#endif /* FEATURE_WLAN_LPHB */ + +#ifdef WLAN_FEATURE_PACKET_FILTERING +/** + * wmi_unified_enable_disable_packet_filter_cmd() - enable/disable packet filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @enable: Flag to enable/disable packet filter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_enable_disable_packet_filter_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + bool enable); + +/** + * wmi_unified_config_packet_filter_cmd() - configure packet filter in target + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @rcv_filter_param: Packet filter parameters + * @filter_id: Filter id + * @enable: Flag to add/delete packet filter configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_config_packet_filter_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable); +#endif /* WLAN_FEATURE_PACKET_FILTERING */ + +/** + * wmi_unified_wow_delete_pattern_cmd() - delete wow pattern in target + * @wmi_handle: wmi handle + * @ptrn_id: pattern id + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_delete_pattern_cmd(wmi_unified_t wmi_handle, + uint8_t ptrn_id, + uint8_t vdev_id); + +/** + * wmi_unified_host_wakeup_ind_to_fw_cmd() - send wakeup ind to fw + * @wmi_handle: wmi handle + * + * Sends host wakeup indication to FW. On receiving this indication, + * FW will come out of WOW. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_host_wakeup_ind_to_fw_cmd(wmi_unified_t wmi_handle); + +/** + * wmi_unified_wow_timer_pattern_cmd() - set timer pattern tlv, so that + * firmware will wake up host after + * specified time is elapsed + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @cookie: value to identify reason why host set up wake call. + * @time: time in ms + * + * Return: QDF status + */ +QDF_STATUS wmi_unified_wow_timer_pattern_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t cookie, uint32_t time); + +#ifdef WLAN_FEATURE_EXTWOW_SUPPORT +/** + * wmi_unified_enable_ext_wow_cmd() - enable ext wow in fw + * @wmi_handle: wmi handle + * @params: ext wow params + * + * Return:QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_ext_wow_cmd(wmi_unified_t wmi_handle, + struct ext_wow_params *params); + +/** + * wmi_unified_set_app_type2_params_in_fw_cmd() - set app type2 params in fw + * @wmi_handle: wmi handle + * @appType2Params: app type2 params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_app_type2_params_in_fw_cmd( + wmi_unified_t wmi_handle, + struct app_type2_params *appType2Params); + +/** + * wmi_unified_app_type1_params_in_fw_cmd() - set app type1 params in fw + * @wmi_hdl: wmi handle + * @app_type1_params: app type1 params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_app_type1_params_in_fw_cmd( + wmi_unified_t wmi_handle, + struct app_type1_params *app_type1_params); +#endif /* WLAN_FEATURE_EXTWOW_SUPPORT */ + +#endif /* _WMI_UNIFIED_PMO_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..10bf9a8fdeb24a57a204ef7bddd9d40c6e4b8198 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h @@ -0,0 +1,2797 @@ +/* + * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless + * Module Interface (WMI). + */ +#ifndef _WMI_UNIFIED_PRIV_H_ +#define _WMI_UNIFIED_PRIV_H_ +#include +#include "wmi_unified_api.h" +#include "wmi_unified_param.h" +#include "wlan_scan_ucfg_api.h" +#include "qdf_atomic.h" +#include + +#ifdef WLAN_FW_OFFLOAD +#include "wlan_fwol_public_structs.h" +#endif + +#ifdef DFS_COMPONENT_ENABLE +#include +#endif +#include +#ifdef WLAN_SUPPORT_GREEN_AP +#include "wlan_green_ap_api.h" +#endif + +#ifdef WLAN_FEATURE_NAN +#include "nan_public_structs.h" +#endif + +#ifdef WLAN_SUPPORT_TWT +#include "wmi_unified_twt_param.h" +#endif + +#ifdef WMI_SMART_ANT_SUPPORT +#include "wmi_unified_smart_ant_param.h" +#endif + +#ifdef WMI_DBR_SUPPORT +#include "wmi_unified_dbr_param.h" +#endif + +#ifdef WMI_ATF_SUPPORT +#include "wmi_unified_atf_param.h" +#endif + +#ifdef WLAN_FEATURE_INTEROP_ISSUES_AP +#include +#endif + +#ifdef WLAN_CFR_ENABLE +#include +#endif + +#define WMI_UNIFIED_MAX_EVENT 0x100 + +#ifdef WMI_EXT_DBG + +#define WMI_EXT_DBG_DIR "WMI_EXT_DBG" +#define WMI_EXT_DBG_FILE "wmi_log" +#define WMI_EXT_DBG_FILE_PERM (QDF_FILE_USR_READ | \ + QDF_FILE_GRP_READ | \ + QDF_FILE_OTH_READ) +#define WMI_EXT_DBG_QUEUE_SIZE 1024 +#define WMI_EXT_DBG_DUMP_ROW_SIZE 16 +#define WMI_EXT_DBG_DUMP_GROUP_SIZE 1 + + +/** + * enum WMI_MSG_TYPE - WMI message types + * @ WMI_MSG_TYPE_CMD - Message is of type WMI command + * @ WMI_MSG_TYPE_EVENT - Message is of type WMI event + */ +enum WMI_MSG_TYPE { + WMI_MSG_TYPE_CMD = 0, + WMI_MSG_TYPE_EVENT, +}; + +/** + * struct wmi_ext_dbg_msg - WMI command/event msg details + * @ node - qdf list node of wmi messages + * @ len - command/event message length + * @ ts - Time of WMI command/event handling + * @ WMI_MSG_TYPE - message type + * @ bug - command/event buffer + */ +struct wmi_ext_dbg_msg { + qdf_list_node_t node; + uint32_t len; + uint64_t ts; + enum WMI_MSG_TYPE type; + uint8_t buf[0]; +}; +#endif /*WMI_EXT_DBG */ + +#ifdef WMI_INTERFACE_EVENT_LOGGING + +#ifndef WMI_EVENT_DEBUG_MAX_ENTRY +#define WMI_EVENT_DEBUG_MAX_ENTRY (1024) +#endif + +#ifndef WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH +#define WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH (16) +#endif + +/* wmi_mgmt commands */ +#ifndef WMI_MGMT_EVENT_DEBUG_MAX_ENTRY +#define WMI_MGMT_EVENT_DEBUG_MAX_ENTRY (256) +#endif +/* wmi diag rx events max buffer */ +#ifndef WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY +#define WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY (256) +#endif + +#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING +#ifndef WMI_FILTERED_CMD_EVT_SUPPORTED +#define WMI_FILTERED_CMD_EVT_SUPPORTED (10) +#endif + +#ifndef WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY +#define WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY (1024) +#endif +#endif /* WMI_INTERFACE_FILTERED_EVENT_LOGGING */ + +#define wmi_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_WMI, ## params) +#define wmi_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_WMI, ## params) +#define wmi_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_WMI, ## params) +#define wmi_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_WMI, ## params) +#define wmi_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_WMI, ## params) + +#define wmi_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_WMI, ## params) + +#define wmi_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_WMI, params) +#define wmi_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_WMI, params) +#define wmi_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_WMI, params) +#define wmi_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_WMI, params) +#define wmi_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_WMI, params) + +/** + * struct wmi_command_debug - WMI command log buffer data type + * @ command - Store WMI Command id + * @ data - Stores WMI command data + * @ time - Time of WMI command handling + */ +struct wmi_command_debug { + uint32_t command; + /*16 bytes of WMI cmd excluding TLV and WMI headers */ + uint32_t data[WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH/sizeof(uint32_t)]; + uint64_t time; +}; + +/** + * struct wmi_event_debug - WMI event log buffer data type + * @ command - Store WMI Event id + * @ data - Stores WMI Event data + * @ time - Time of WMI Event handling + */ +struct wmi_event_debug { + uint32_t event; + /*16 bytes of WMI event data excluding TLV header */ + uint32_t data[WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH/sizeof(uint32_t)]; + uint64_t time; +}; + +/** + * struct wmi_command_header - Type for accessing frame data + * @ type - 802.11 Frame type + * @ subType - 802.11 Frame subtype + * @ protVer - 802.11 Version + */ +struct wmi_command_header { +#ifndef ANI_LITTLE_BIT_ENDIAN + + uint32_t sub_type:4; + uint32_t type:2; + uint32_t prot_ver:2; + +#else + + uint32_t prot_ver:2; + uint32_t type:2; + uint32_t sub_type:4; + +#endif + +}; + +/** + * struct wmi_log_buf_t - WMI log buffer information type + * @buf - Refernce to WMI log buffer + * @ length - length of buffer + * @ buf_tail_idx - Tail index of buffer + * @ p_buf_tail_idx - refernce to buffer tail index. It is added to accommodate + * unified design since MCL uses global variable for buffer tail index + * @ size - the size of the buffer in number of entries + */ +struct wmi_log_buf_t { + void *buf; + uint32_t length; + uint32_t buf_tail_idx; + uint32_t *p_buf_tail_idx; + uint32_t size; +}; + +/** + * struct wmi_debug_log_info - Meta data to hold information of all buffers + * used for WMI logging + * @wmi_command_log_buf_info - Buffer info for WMI Command log + * @wmi_command_tx_cmp_log_buf_info - Buffer info for WMI Command Tx completion + * log + * @wmi_event_log_buf_info - Buffer info for WMI Event log + * @wmi_rx_event_log_buf_info - Buffer info for WMI event received log + * @wmi_mgmt_command_log_buf_info - Buffer info for WMI Management Command log + * @wmi_mgmt_command_tx_cmp_log_buf_info - Buffer info for WMI Management + * Command Tx completion log + * @wmi_mgmt_event_log_buf_info - Buffer info for WMI Management event log + * @wmi_diag_event_log_buf_info - Buffer info for WMI diag event log + * @wmi_record_lock - Lock WMI recording + * @wmi_logging_enable - Enable/Disable state for WMI logging + * @wmi_id_to_name - Function refernce to API to convert Command id to + * string name + * @wmi_log_debugfs_dir - refernce to debugfs directory + * @filtered_wmi_cmds - Buffer to save inputs from user on + * which WMI commands to record + * @filtered_wmi_cmds_idx - target cmd index + * @filtered_wmi_evts - Buffer to save inputs from user on + * which WMI event to record + * @filtered_wmi_evts_idx - target evt index + * @wmi_filtered_command_log - buffer to record user specified WMI commands + * @wmi_filtered_event_log - buffer to record user specified WMI events + */ +struct wmi_debug_log_info { + struct wmi_log_buf_t wmi_command_log_buf_info; + struct wmi_log_buf_t wmi_command_tx_cmp_log_buf_info; + + struct wmi_log_buf_t wmi_event_log_buf_info; + struct wmi_log_buf_t wmi_rx_event_log_buf_info; + + struct wmi_log_buf_t wmi_mgmt_command_log_buf_info; + struct wmi_log_buf_t wmi_mgmt_command_tx_cmp_log_buf_info; + struct wmi_log_buf_t wmi_mgmt_event_log_buf_info; + struct wmi_log_buf_t wmi_diag_event_log_buf_info; + + qdf_spinlock_t wmi_record_lock; + bool wmi_logging_enable; + struct dentry *wmi_log_debugfs_dir; + +#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING + uint32_t *filtered_wmi_cmds; + uint32_t filtered_wmi_cmds_idx; + uint32_t *filtered_wmi_evts; + uint32_t filtered_wmi_evts_idx; + struct wmi_log_buf_t *wmi_filtered_command_log; + struct wmi_log_buf_t *wmi_filtered_event_log; +#endif +}; + +/** + * enum WMI_RECORD_TYPE - User specified WMI logging types + * @ WMI_CMD - wmi command id + * @ WMI_EVT - wmi event id + */ +enum WMI_RECORD_TYPE { + WMI_CMD = 1, + WMI_EVT = 2, +}; + +#endif /*WMI_INTERFACE_EVENT_LOGGING */ + +#ifdef WLAN_OPEN_SOURCE +struct fwdebug { + struct sk_buff_head fwlog_queue; + struct completion fwlog_completion; + A_BOOL fwlog_open; +}; +#endif /* WLAN_OPEN_SOURCE */ + +/** + * struct wmi_wq_dbg_info - WMI WQ debug info + * @ wd_msg_type_id - wmi event id + * @ wmi_wq - WMI workqueue struct + * @ task - WMI workqueue task struct + */ +struct wmi_wq_dbg_info { + uint32_t wd_msg_type_id; + qdf_workqueue_t *wmi_wq; + qdf_thread_t *task; +}; + +struct wmi_ops { +QDF_STATUS (*send_vdev_create_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct vdev_create_params *param); + +QDF_STATUS (*send_vdev_delete_cmd)(wmi_unified_t wmi_handle, + uint8_t if_id); + +QDF_STATUS (*send_vdev_nss_chain_params_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct vdev_nss_chains *user_cfg); + +QDF_STATUS (*send_vdev_stop_cmd)(wmi_unified_t wmi, + uint8_t vdev_id); + +QDF_STATUS (*send_vdev_down_cmd)(wmi_unified_t wmi, + uint8_t vdev_id); + +QDF_STATUS (*send_vdev_start_cmd)(wmi_unified_t wmi, + struct vdev_start_params *req); + +QDF_STATUS (*send_vdev_set_nac_rssi_cmd)(wmi_unified_t wmi, + struct vdev_scan_nac_rssi_params *req); + +QDF_STATUS (*send_peer_flush_tids_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_flush_params *param); + +QDF_STATUS (*send_peer_delete_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + uint8_t vdev_id); + +QDF_STATUS (*send_peer_delete_all_cmd)( + wmi_unified_t wmi, + struct peer_delete_all_params *param); + +QDF_STATUS +(*send_peer_unmap_conf_cmd)(wmi_unified_t wmi, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list); + +QDF_STATUS (*send_peer_param_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_set_params *param); + +QDF_STATUS (*send_vdev_up_cmd)(wmi_unified_t wmi, + uint8_t bssid[QDF_MAC_ADDR_SIZE], + struct vdev_up_params *params); + +QDF_STATUS (*send_peer_create_cmd)(wmi_unified_t wmi, + struct peer_create_params *param); + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS (*send_green_ap_ps_cmd)(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id); + +QDF_STATUS (*extract_green_ap_egap_status_info)( + uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params); +#endif + +QDF_STATUS +(*send_pdev_utf_cmd)(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id); +QDF_STATUS +(*send_pdev_param_cmd)(wmi_unified_t wmi_handle, + struct pdev_params *param, + uint8_t mac_id); + +QDF_STATUS +(*send_pdev_set_hw_mode_cmd)(wmi_unified_t wmi_handle, + uint32_t hw_mode_index); + +QDF_STATUS (*send_suspend_cmd)(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id); + +QDF_STATUS (*send_resume_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); + +#ifdef FEATURE_WLAN_D0WOW +QDF_STATUS (*send_d0wow_enable_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); +QDF_STATUS (*send_d0wow_disable_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); +#endif + +#ifdef FEATURE_BLACKLIST_MGR +QDF_STATUS +(*send_reject_ap_list_cmd)(struct wmi_unified *wmi_handle, + struct reject_ap_params *reject_params); +#endif + +QDF_STATUS (*send_wow_enable_cmd)(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, + uint8_t mac_id); + +QDF_STATUS (*send_set_ap_ps_param_cmd)(wmi_unified_t wmi_handle, + uint8_t *peer_addr, + struct ap_ps_params *param); + +QDF_STATUS (*send_set_sta_ps_param_cmd)(wmi_unified_t wmi_handle, + struct sta_ps_params *param); + +QDF_STATUS (*send_crash_inject_cmd)(wmi_unified_t wmi_handle, + struct crash_inject *param); + +QDF_STATUS +(*send_dbglog_cmd)(wmi_unified_t wmi_handle, + struct dbglog_params *dbglog_param); + +QDF_STATUS (*send_vdev_set_param_cmd)(wmi_unified_t wmi_handle, + struct vdev_set_params *param); + +QDF_STATUS (*send_vdev_sifs_trigger_cmd)(wmi_unified_t wmi_handle, + struct sifs_trigger_param *param); + +QDF_STATUS (*send_stats_request_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct stats_request_params *param); + +QDF_STATUS (*send_packet_log_enable_cmd)(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id); + +QDF_STATUS (*send_packet_log_disable_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); + +QDF_STATUS (*send_fd_tmpl_cmd)(wmi_unified_t wmi_handle, + struct fils_discovery_tmpl_params *param); + +QDF_STATUS (*send_beacon_send_cmd)(wmi_unified_t wmi_handle, + struct beacon_params *param); + +QDF_STATUS (*send_beacon_tmpl_send_cmd)(wmi_unified_t wmi_handle, + struct beacon_tmpl_params *param); + +QDF_STATUS (*send_peer_assoc_cmd)(wmi_unified_t wmi_handle, + struct peer_assoc_params *param); + +QDF_STATUS (*send_scan_start_cmd)(wmi_unified_t wmi_handle, + struct scan_req_params *param); + +QDF_STATUS (*send_scan_stop_cmd)(wmi_unified_t wmi_handle, + struct scan_cancel_param *param); + +QDF_STATUS (*send_scan_chan_list_cmd)(wmi_unified_t wmi_handle, + struct scan_chan_list_params *param); + +QDF_STATUS (*send_mgmt_cmd)(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param); + +QDF_STATUS (*send_offchan_data_tx_cmd)(wmi_unified_t wmi_handle, + struct wmi_offchan_data_tx_params *param); + +QDF_STATUS (*send_modem_power_state_cmd)(wmi_unified_t wmi_handle, + uint32_t param_value); + +QDF_STATUS (*send_set_sta_ps_mode_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t val); + +QDF_STATUS (*send_idle_roam_monitor_cmd)(wmi_unified_t wmi_handle, + uint8_t val); + +QDF_STATUS (*send_get_temperature_cmd)(wmi_unified_t wmi_handle); + +#ifdef CONVERGED_P2P_ENABLE +QDF_STATUS (*send_set_p2pgo_oppps_req_cmd)(wmi_unified_t wmi_handle, + struct p2p_ps_params *oppps); + +QDF_STATUS (*send_set_p2pgo_noa_req_cmd)(wmi_unified_t wmi_handle, + struct p2p_ps_params *noa); + +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +QDF_STATUS (*send_p2p_lo_start_cmd)(wmi_unified_t wmi_handle, + struct p2p_lo_start *param); + +QDF_STATUS (*send_p2p_lo_stop_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); +#endif /* FEATURE_P2P_LISTEN_OFFLOAD */ +#endif /* CONVERGED_P2P_ENABLE */ + +QDF_STATUS (*send_set_smps_params_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + int value); + +QDF_STATUS (*send_set_mimops_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, int value); + +QDF_STATUS (*send_set_sta_uapsd_auto_trig_cmd)(wmi_unified_t wmi_handle, + struct sta_uapsd_trig_params *param); + +#ifdef WLAN_FEATURE_DSRC +QDF_STATUS (*send_ocb_set_utc_time_cmd)(wmi_unified_t wmi_handle, + struct ocb_utc_param *utc); + +QDF_STATUS (*send_ocb_get_tsf_timer_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_ocb_start_timing_advert_cmd)(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +QDF_STATUS (*send_ocb_stop_timing_advert_cmd)(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +QDF_STATUS (*send_dcc_get_stats_cmd)(wmi_unified_t wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param); + +QDF_STATUS (*send_dcc_clear_stats_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t dcc_stats_bitmap); + +QDF_STATUS (*send_dcc_update_ndl_cmd)(wmi_unified_t wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param); + +QDF_STATUS (*send_ocb_set_config_cmd)(wmi_unified_t wmi_handle, + struct ocb_config *config); +QDF_STATUS (*extract_ocb_chan_config_resp)(wmi_unified_t wmi_hdl, + void *evt_buf, + uint32_t *status); +QDF_STATUS (*extract_ocb_tsf_timer)(wmi_unified_t wmi_hdl, + void *evt_buf, + struct ocb_get_tsf_timer_response *resp); +QDF_STATUS (*extract_dcc_update_ndl_resp)(wmi_unified_t wmi_hdl, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp); +QDF_STATUS (*extract_dcc_stats)(wmi_unified_t wmi_hdl, + void *evt_buf, + struct ocb_dcc_get_stats_response **response); +#endif /* WLAN_FEATURE_DSRC */ +QDF_STATUS (*send_lro_config_cmd)(wmi_unified_t wmi_handle, + struct wmi_lro_config_cmd_t *wmi_lro_cmd); + +QDF_STATUS (*send_set_thermal_mgmt_cmd)(wmi_unified_t wmi_handle, + struct thermal_cmd_params *thermal_info); + +QDF_STATUS (*send_peer_rate_report_cmd)(wmi_unified_t wmi_handle, + struct wmi_peer_rate_report_params *rate_report_params); + +#ifdef WMI_CONCURRENCY_SUPPORT +QDF_STATUS (*send_set_mcc_channel_time_quota_cmd) + (wmi_unified_t wmi_handle, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq); + +QDF_STATUS (*send_set_mcc_channel_time_latency_cmd) + (wmi_unified_t wmi_handle, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency); + +QDF_STATUS (*send_set_enable_disable_mcc_adaptive_scheduler_cmd)( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id); +#endif /* WMI_CONCURRENCY_SUPPORT */ + +QDF_STATUS (*send_p2p_go_set_beacon_ie_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t *p2p_ie); + +QDF_STATUS (*send_probe_rsp_tmpl_send_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info); + +QDF_STATUS (*send_setup_install_key_cmd)(wmi_unified_t wmi_handle, + struct set_key_params *key_params); + +QDF_STATUS (*send_reset_passpoint_network_list_cmd)(wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req); + +#ifdef WMI_ROAM_SUPPORT +#ifdef FEATURE_LFR_SUBNET_DETECTION +QDF_STATUS (*send_set_gateway_params_cmd)(wmi_unified_t wmi_handle, + struct gateway_update_req_param *req); +#endif /* FEATURE_LFR_SUBNET_DETECTION */ + +#ifdef FEATURE_RSSI_MONITOR +QDF_STATUS (*send_set_rssi_monitoring_cmd)(wmi_unified_t wmi_handle, + struct rssi_monitor_param *req); +#endif /* FEATURE_RSSI_MONITOR */ + +QDF_STATUS (*send_roam_scan_offload_rssi_thresh_cmd)(wmi_unified_t wmi_handle, + struct roam_offload_scan_rssi_params *roam_req); + +QDF_STATUS (*send_roam_mawc_params_cmd)(wmi_unified_t wmi_handle, + struct wmi_mawc_roam_params *params); + +QDF_STATUS (*send_roam_scan_filter_cmd)(wmi_unified_t wmi_handle, + struct roam_scan_filter_params *roam_req); + +QDF_STATUS (*send_roam_scan_offload_mode_cmd)(wmi_unified_t wmi_handle, + wmi_start_scan_cmd_fixed_param *scan_cmd_fp, + struct roam_offload_scan_params *roam_req); + +QDF_STATUS (*send_roam_scan_offload_ap_profile_cmd)(wmi_unified_t wmi_handle, + struct ap_profile_params *ap_profile); + +QDF_STATUS (*send_roam_scan_offload_cmd)(wmi_unified_t wmi_handle, + uint32_t command, uint32_t vdev_id); + +QDF_STATUS (*send_roam_scan_offload_scan_period_cmd)(wmi_unified_t wmi_handle, + struct roam_scan_period_params *params); + +QDF_STATUS (*send_roam_scan_offload_chan_list_cmd)(wmi_unified_t wmi_handle, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, uint32_t vdev_id); + +QDF_STATUS (*send_roam_scan_offload_rssi_change_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans); + +QDF_STATUS (*send_per_roam_config_cmd)(wmi_unified_t wmi_handle, + struct wmi_per_roam_config_req *req_buf); + +QDF_STATUS (*send_offload_11k_cmd)(wmi_unified_t wmi_handle, + struct wmi_11k_offload_params *params); + +QDF_STATUS (*send_invoke_neighbor_report_cmd)(wmi_unified_t wmi_handle, + struct wmi_invoke_neighbor_report_params *params); + +QDF_STATUS (*send_roam_bss_load_config)(wmi_unified_t wmi_handle, + struct wmi_bss_load_config *params); + +QDF_STATUS (*send_disconnect_roam_params)( + wmi_unified_t wmi_handle, + struct wmi_disconnect_roam_params *req); + +QDF_STATUS (*send_idle_roam_params)(wmi_unified_t wmi_handle, + struct wmi_idle_roam_params *req); + +QDF_STATUS (*send_roam_preauth_status)(wmi_unified_t wmi_handle, + struct wmi_roam_auth_status_params *params); + +QDF_STATUS (*send_btm_config)(wmi_unified_t wmi_handle, + struct wmi_btm_config *params); + +QDF_STATUS (*send_limit_off_chan_cmd)(wmi_unified_t wmi_handle, + struct wmi_limit_off_chan_param *limit_off_chan_param); + +#ifdef WLAN_FEATURE_FILS_SK +QDF_STATUS (*send_roam_scan_hlp_cmd) (wmi_unified_t wmi_handle, + struct hlp_params *params); +#endif /* WLAN_FEATURE_FILS_SK */ + +#ifdef FEATURE_WLAN_ESE +QDF_STATUS (*send_plm_stop_cmd)(wmi_unified_t wmi_handle, + const struct plm_req_params *plm); + +QDF_STATUS (*send_plm_start_cmd)(wmi_unified_t wmi_handle, + const struct plm_req_params *plm); +#endif /* FEATURE_WLAN_ESE */ + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +QDF_STATUS (*send_set_ric_req_cmd)(wmi_unified_t wmi_handle, void *msg, + uint8_t is_add_ts); + +QDF_STATUS (*send_process_roam_synch_complete_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_roam_invoke_cmd)(wmi_unified_t wmi_handle, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz); +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ +#endif /* WMI_ROAM_SUPPORT */ + +QDF_STATUS (*send_scan_probe_setoui_cmd)(wmi_unified_t wmi_handle, + struct scan_mac_oui *psetoui); + +QDF_STATUS (*send_set_passpoint_network_list_cmd)(wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req); + +QDF_STATUS (*send_set_epno_network_list_cmd)(wmi_unified_t wmi_handle, + struct wifi_enhanced_pno_params *req); + +QDF_STATUS (*send_extscan_get_capabilities_cmd)(wmi_unified_t wmi_handle, + struct extscan_capabilities_params *pgetcapab); + +QDF_STATUS (*send_extscan_get_cached_results_cmd)(wmi_unified_t wmi_handle, + struct extscan_cached_result_params *pcached_results); + +QDF_STATUS (*send_extscan_stop_change_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_capabilities_reset_params *reset_req); + +QDF_STATUS (*send_extscan_start_change_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params * + psigchange); + +QDF_STATUS (*send_extscan_stop_hotlist_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_reset_params *photlist_reset); + +QDF_STATUS (*send_extscan_start_hotlist_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_set_params *params); + +QDF_STATUS (*send_stop_extscan_cmd)(wmi_unified_t wmi_handle, + struct extscan_stop_req_params *pstopcmd); + +QDF_STATUS (*send_start_extscan_cmd)(wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart); + +QDF_STATUS (*send_csa_offload_enable_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_pno_stop_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id); + +QDF_STATUS (*send_pno_start_cmd)(wmi_unified_t wmi_handle, + struct pno_scan_req_params *pno); + +QDF_STATUS (*send_nlo_mawc_cmd)(wmi_unified_t wmi_handle, + struct nlo_mawc_params *params); + +#ifdef IPA_OFFLOAD +QDF_STATUS (*send_ipa_offload_control_cmd)(wmi_unified_t wmi_handle, + struct ipa_uc_offload_control_params *ipa_offload); +#endif + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS +QDF_STATUS (*send_process_ll_stats_clear_cmd)(wmi_unified_t wmi_handle, + const struct ll_stats_clear_params *clear_req); + +QDF_STATUS (*send_process_ll_stats_set_cmd)(wmi_unified_t wmi_handle, + const struct ll_stats_set_params *set_req); + +QDF_STATUS (*send_process_ll_stats_get_cmd)(wmi_unified_t wmi_handle, + const struct ll_stats_get_params *get_req); +#endif + +QDF_STATUS (*send_congestion_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_snr_request_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_snr_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id); + +QDF_STATUS (*send_link_status_req_cmd)(wmi_unified_t wmi_handle, + struct link_status_params *link_status); +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +QDF_STATUS (*send_add_wow_wakeup_event_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable); + +QDF_STATUS (*send_wow_patterns_to_fw_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns); + +QDF_STATUS (*send_enable_arp_ns_offload_cmd)(wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id); + +QDF_STATUS (*send_conf_hw_filter_cmd)(wmi_unified_t wmi, + struct pmo_hw_filter_params *req); + +QDF_STATUS (*send_enable_enhance_multicast_offload_cmd)( + wmi_unified_t wmi_handle, + uint8_t vdev_id, bool action); + +QDF_STATUS (*send_add_clear_mcbc_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clearList); + +QDF_STATUS (*send_multiple_add_clear_mcbc_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param); + +QDF_STATUS (*send_gtk_offload_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode); + +QDF_STATUS (*send_process_gtk_offload_getinfo_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint64_t offload_req_opcode); + +QDF_STATUS (*send_wow_sta_ra_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t default_pattern, + uint16_t rate_limit_interval); + +QDF_STATUS (*send_action_frame_patterns_cmd)(wmi_unified_t wmi_handle, + struct pmo_action_wakeup_set_params *action_params); + +QDF_STATUS (*extract_gtk_rsp_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len); + +QDF_STATUS (*send_wow_delete_pattern_cmd)(wmi_unified_t wmi_handle, + uint8_t ptrn_id, + uint8_t vdev_id); + +QDF_STATUS (*send_host_wakeup_ind_to_fw_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_wow_timer_pattern_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t cookie, uint32_t time); +#ifdef FEATURE_WLAN_LPHB +QDF_STATUS (*send_lphb_config_hbenable_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_enable_cmd_fixed_param *params); + +QDF_STATUS (*send_lphb_config_tcp_params_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req); + +QDF_STATUS (*send_lphb_config_tcp_pkt_filter_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp); + +QDF_STATUS (*send_lphb_config_udp_params_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req); + +QDF_STATUS (*send_lphb_config_udp_pkt_filter_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req); +#endif /* FEATURE_WLAN_LPHB */ +#ifdef WLAN_FEATURE_PACKET_FILTERING +QDF_STATUS (*send_enable_disable_packet_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool enable); + +QDF_STATUS (*send_config_packet_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable); +#endif +#endif /* end of WLAN_POWER_MANAGEMENT_OFFLOAD */ +#ifdef WLAN_WMI_BCN +QDF_STATUS (*send_bcn_buf_ll_cmd)(wmi_unified_t wmi_handle, + wmi_bcn_send_from_host_cmd_fixed_param * param); +#endif + +#if !defined(REMOVE_PKT_LOG) && defined(FEATURE_PKTLOG) +QDF_STATUS (*send_pktlog_wmi_send_cmd)(wmi_unified_t wmi_handle, + WMI_PKTLOG_EVENT pktlog_event, + WMI_CMD_ID cmd_id, uint8_t user_triggered); +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS (*send_egap_conf_params_cmd)(wmi_unified_t wmi_handle, + struct wlan_green_ap_egap_params *egap_params); +#endif + +#ifdef WLAN_FEATURE_CIF_CFR +QDF_STATUS (*send_oem_dma_cfg_cmd)(wmi_unified_t wmi_handle, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg); +#endif + +QDF_STATUS (*send_start_oem_data_cmd)(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data); + +#ifdef FEATURE_OEM_DATA +QDF_STATUS (*send_start_oemv2_data_cmd)(wmi_unified_t wmi_handle, + struct oem_data *params); +#endif + +QDF_STATUS +(*send_dfs_phyerr_filter_offload_en_cmd)(wmi_unified_t wmi_handle, + bool dfs_phyerr_filter_offload); + +QDF_STATUS (*send_bss_color_change_enable_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable); + +QDF_STATUS (*send_obss_color_collision_cfg_cmd)(wmi_unified_t wmi_handle, + struct wmi_obss_color_collision_cfg_param *cfg); + +QDF_STATUS (*extract_obss_color_collision_info)(uint8_t *evt_buf, + struct wmi_obss_color_collision_info *info); + +QDF_STATUS (*send_peer_based_pktlog_cmd)(wmi_unified_t wmi_handle, + uint8_t *macaddr, + uint8_t mac_id, + uint8_t enb_dsb); + +#ifdef WMI_STA_SUPPORT +QDF_STATUS (*send_del_ts_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id, + uint8_t ac); + +QDF_STATUS (*send_aggr_qos_cmd)(wmi_unified_t wmi_handle, + struct aggr_add_ts_param *aggr_qos_rsp_msg); + +QDF_STATUS (*send_add_ts_cmd)(wmi_unified_t wmi_handle, + struct add_ts_param *msg); + +QDF_STATUS (*send_process_add_periodic_tx_ptrn_cmd)( + wmi_unified_t wmi_handle, + struct periodic_tx_pattern *pattern, + uint8_t vdev_id); + +QDF_STATUS (*send_process_del_periodic_tx_ptrn_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t pattern_id); + +QDF_STATUS (*send_set_auto_shutdown_timer_cmd)(wmi_unified_t wmi_handle, + uint32_t timer_val); + +#ifdef WLAN_FEATURE_NAN +QDF_STATUS (*send_nan_req_cmd)(wmi_unified_t wmi_handle, + struct nan_msg_params *nan_req); + +QDF_STATUS (*send_nan_disable_req_cmd)(wmi_unified_t wmi_handle, + struct nan_disable_req *nan_msg); + +QDF_STATUS (*extract_nan_event_rsp)(wmi_unified_t wmi_handle, void *evt_buf, + struct nan_event_params *evt_params, + uint8_t **msg_buf); +#endif + +QDF_STATUS (*send_process_ch_avoid_update_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_process_set_ie_info_cmd)(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info); + +QDF_STATUS (*send_set_base_macaddr_indicate_cmd)(wmi_unified_t wmi_handle, + uint8_t *custom_addr); + +QDF_STATUS (*send_pdev_set_pcl_cmd)(wmi_unified_t wmi_handle, + struct wmi_pcl_chan_weights *msg); + +#ifdef WLAN_POLICY_MGR_ENABLE +QDF_STATUS (*send_pdev_set_dual_mac_config_cmd)(wmi_unified_t wmi_handle, + struct policy_mgr_dual_mac_config *msg); +#endif + +QDF_STATUS (*send_set_led_flashing_cmd)(wmi_unified_t wmi_handle, + struct flashing_req_params *flashing); + +QDF_STATUS (*send_set_arp_stats_req_cmd)(wmi_unified_t wmi_handle, + struct set_arp_stats *req_buf); + +QDF_STATUS (*send_get_arp_stats_req_cmd)(wmi_unified_t wmi_handle, + struct get_arp_stats *req_buf); + +QDF_STATUS (*send_set_del_pmkid_cache_cmd) (wmi_unified_t wmi_handle, + struct wmi_unified_pmk_cache *req_buf); + +QDF_STATUS (*send_adapt_dwelltime_params_cmd)(wmi_unified_t wmi_handle, + struct wmi_adaptive_dwelltime_params *dwelltime_params); + +QDF_STATUS (*send_dbs_scan_sel_params_cmd)(wmi_unified_t wmi_handle, + struct wmi_dbs_scan_sel_params *dbs_scan_params); + +QDF_STATUS (*send_vdev_set_gtx_cfg_cmd)(wmi_unified_t wmi_handle, + uint32_t if_id, + struct wmi_gtx_config *gtx_info); + +QDF_STATUS (*send_set_sta_keep_alive_cmd)(wmi_unified_t wmi_handle, + struct sta_keep_alive_params *params); + +QDF_STATUS (*send_set_sta_sa_query_param_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t max_retries, + uint32_t retry_interval); + +QDF_STATUS (*send_fw_profiling_cmd)(wmi_unified_t wmi_handle, + uint32_t cmd, uint32_t value1, uint32_t value2); + +QDF_STATUS (*send_nat_keepalive_en_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_process_dhcp_ind_cmd)(wmi_unified_t wmi_handle, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind); + +QDF_STATUS (*send_get_link_speed_cmd)(wmi_unified_t wmi_handle, + wmi_mac_addr peer_macaddr); + +QDF_STATUS (*send_wlm_latency_level_cmd)(wmi_unified_t wmi_handle, + struct wlm_latency_level_param *param); + +QDF_STATUS (*send_sar_limit_cmd)(wmi_unified_t wmi_handle, + struct sar_limit_cmd_params *params); + +QDF_STATUS (*get_sar_limit_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*extract_sar_limit_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct sar_limit_event *event); + +QDF_STATUS (*extract_sar2_result_event)(void *handle, + uint8_t *event, + uint32_t len); + +#ifdef FEATURE_WLAN_TDLS +QDF_STATUS (*send_set_tdls_offchan_mode_cmd)(wmi_unified_t wmi_handle, + struct tdls_channel_switch_params *chan_switch_params); + +QDF_STATUS (*send_update_fw_tdls_state_cmd)(wmi_unified_t wmi_handle, + struct tdls_info *tdls_param, + enum wmi_tdls_state tdls_state); + +QDF_STATUS (*send_update_tdls_peer_state_cmd)(wmi_unified_t wmi_handle, + struct tdls_peer_update_state *peer_state, + uint32_t *ch_mhz); + +QDF_STATUS (*extract_vdev_tdls_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct tdls_event_info *param); +#endif /* FEATURE_WLAN_TDLS */ +#endif /* WMI_STA_SUPPORT */ + +QDF_STATUS (*send_stats_ext_req_cmd)(wmi_unified_t wmi_handle, + struct stats_ext_params *preq); + +#ifdef WLAN_FEATURE_EXTWOW_SUPPORT +QDF_STATUS (*send_enable_ext_wow_cmd)(wmi_unified_t wmi_handle, + struct ext_wow_params *params); + +QDF_STATUS (*send_set_app_type2_params_in_fw_cmd)(wmi_unified_t wmi_handle, + struct app_type2_params *appType2Params); + +QDF_STATUS (*send_app_type1_params_in_fw_cmd)(wmi_unified_t wmi_handle, + struct app_type1_params *app_type1_params); +#endif /* WLAN_FEATURE_EXTWOW_SUPPORT */ + +QDF_STATUS (*send_process_dhcpserver_offload_cmd)(wmi_unified_t wmi_handle, + struct dhcp_offload_info_params *params); + +QDF_STATUS (*send_regdomain_info_to_fw_cmd)(wmi_unified_t wmi_handle, + uint32_t reg_dmn, uint16_t regdmn2G, + uint16_t regdmn5G, uint8_t ctl2G, + uint8_t ctl5G); + +QDF_STATUS (*send_process_fw_mem_dump_cmd)(wmi_unified_t wmi_handle, + struct fw_dump_req_param *mem_dump_req); + +QDF_STATUS (*send_cfg_action_frm_tb_ppdu_cmd)(wmi_unified_t wmi_handle, + struct cfg_action_frm_tb_ppdu_param *cfg_info); + +QDF_STATUS (*save_fw_version_cmd)(wmi_unified_t wmi_handle, void *evt_buf); + +QDF_STATUS (*check_and_update_fw_version_cmd)(wmi_unified_t wmi_hdl, void *ev); + +QDF_STATUS (*send_log_supported_evt_cmd)(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t len); + +QDF_STATUS (*send_enable_specific_fw_logs_cmd)(wmi_unified_t wmi_handle, + struct wmi_wifi_start_log *start_log); + +QDF_STATUS (*send_flush_logs_to_fw_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_unit_test_cmd)(wmi_unified_t wmi_handle, + struct wmi_unit_test_cmd *wmi_utest); + +#ifdef FEATURE_WLAN_APF +QDF_STATUS +(*send_set_active_apf_mode_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode mcast_bcast_mode); + +QDF_STATUS (*send_apf_enable_cmd)(wmi_unified_t wmi_handle, uint32_t vdev_id, + bool enable); + +QDF_STATUS (*send_apf_write_work_memory_cmd)(wmi_unified_t wmi_handle, + struct wmi_apf_write_memory_params *apf_write_params); + +QDF_STATUS (*send_apf_read_work_memory_cmd)(wmi_unified_t wmi_handle, + struct wmi_apf_read_memory_params *apf_read_params); + +QDF_STATUS (*extract_apf_read_memory_resp_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_apf_read_memory_resp_event_params *resp); +#endif /* FEATURE_WLAN_APF */ + +QDF_STATUS (*send_pdev_get_tpc_config_cmd)(wmi_unified_t wmi_handle, + uint32_t param); + +#ifdef WMI_ATF_SUPPORT +QDF_STATUS (*send_set_bwf_cmd)(wmi_unified_t wmi_handle, + struct set_bwf_params *param); +#endif + +QDF_STATUS (*send_pdev_fips_cmd)(wmi_unified_t wmi_handle, + struct fips_params *param); + +QDF_STATUS (*send_wlan_profile_enable_cmd)(wmi_unified_t wmi_handle, + struct wlan_profile_params *param); + +#ifdef WLAN_FEATURE_DISA +QDF_STATUS +(*send_encrypt_decrypt_send_cmd)(wmi_unified_t wmi_handle, + struct disa_encrypt_decrypt_req_params + *params); +#endif + +QDF_STATUS (*send_wlan_profile_trigger_cmd)(wmi_unified_t wmi_handle, + struct wlan_profile_params *param); + +QDF_STATUS (*send_pdev_set_chan_cmd)(wmi_unified_t wmi_handle, + struct channel_param *param); + +QDF_STATUS (*send_set_ht_ie_cmd)(wmi_unified_t wmi_handle, + struct ht_ie_params *param); + +QDF_STATUS (*send_set_vht_ie_cmd)(wmi_unified_t wmi_handle, + struct vht_ie_params *param); + +QDF_STATUS (*send_wmm_update_cmd)(wmi_unified_t wmi_handle, + struct wmm_update_params *param); + +QDF_STATUS (*send_process_update_edca_param_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]); + +QDF_STATUS (*send_set_ratepwr_table_cmd)(wmi_unified_t wmi_handle, + struct ratepwr_table_params *param); + +QDF_STATUS (*send_get_ratepwr_table_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_set_ctl_table_cmd)(wmi_unified_t wmi_handle, + struct ctl_table_params *param); + +QDF_STATUS (*send_set_mimogain_table_cmd)(wmi_unified_t wmi_handle, + struct mimogain_table_params *param); + +QDF_STATUS (*send_set_ratepwr_chainmsk_cmd)(wmi_unified_t wmi_handle, + struct ratepwr_chainmsk_params *param); + +QDF_STATUS (*send_set_macaddr_cmd)(wmi_unified_t wmi_handle, + struct macaddr_params *param); + +QDF_STATUS (*send_pdev_scan_start_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_pdev_scan_end_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_set_acparams_cmd)(wmi_unified_t wmi_handle, + struct acparams_params *param); + +QDF_STATUS (*send_set_vap_dscp_tid_map_cmd)(wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param); + +QDF_STATUS (*send_proxy_ast_reserve_cmd)(wmi_unified_t wmi_handle, + struct proxy_ast_reserve_params *param); + +QDF_STATUS (*send_pdev_qvit_cmd)(wmi_unified_t wmi_handle, + struct pdev_qvit_params *param); + +QDF_STATUS (*send_mcast_group_update_cmd)(wmi_unified_t wmi_handle, + struct mcast_group_update_params *param); + +QDF_STATUS (*send_peer_add_wds_entry_cmd)(wmi_unified_t wmi_handle, + struct peer_add_wds_entry_params *param); + +QDF_STATUS (*send_peer_del_wds_entry_cmd)(wmi_unified_t wmi_handle, + struct peer_del_wds_entry_params *param); + +QDF_STATUS (*send_set_bridge_mac_addr_cmd)(wmi_unified_t wmi_handle, + struct set_bridge_mac_addr_params *param); + +QDF_STATUS (*send_peer_update_wds_entry_cmd)(wmi_unified_t wmi_handle, + struct peer_update_wds_entry_params *param); + +QDF_STATUS (*send_phyerr_enable_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_phyerr_disable_cmd)(wmi_unified_t wmi_handle); + +#ifdef WMI_SMART_ANT_SUPPORT +QDF_STATUS (*send_set_ant_switch_tbl_cmd)(wmi_unified_t wmi_handle, + struct ant_switch_tbl_params *param); + +QDF_STATUS (*send_smart_ant_enable_cmd)(wmi_unified_t wmi_handle, + struct smart_ant_enable_params *param); + +QDF_STATUS (*send_smart_ant_set_rx_ant_cmd)(wmi_unified_t wmi_handle, + struct smart_ant_rx_ant_params *param); + +QDF_STATUS (*send_smart_ant_set_tx_ant_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct smart_ant_tx_ant_params *param); + +QDF_STATUS (*send_smart_ant_set_training_info_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct smart_ant_training_info_params *param); + +QDF_STATUS (*send_smart_ant_set_node_config_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct smart_ant_node_config_params *param); +#endif + +QDF_STATUS (*send_smart_ant_enable_tx_feedback_cmd)(wmi_unified_t wmi_handle, + struct smart_ant_enable_tx_feedback_params *param); + +QDF_STATUS (*send_vdev_spectral_configure_cmd)(wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param); + +QDF_STATUS (*send_vdev_spectral_enable_cmd)(wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param); + +QDF_STATUS (*send_bss_chan_info_request_cmd)(wmi_unified_t wmi_handle, + struct bss_chan_info_request_params *param); + +QDF_STATUS (*send_thermal_mitigation_param_cmd)(wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param); + +QDF_STATUS (*send_vdev_set_neighbour_rx_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct set_neighbour_rx_params *param); + +QDF_STATUS (*send_vdev_set_fwtest_param_cmd)(wmi_unified_t wmi_handle, + struct set_fwtest_params *param); + +QDF_STATUS (*send_vdev_config_ratemask_cmd)(wmi_unified_t wmi_handle, + struct config_ratemask_params *param); + +QDF_STATUS (*send_vdev_set_custom_aggr_size_cmd)(wmi_unified_t wmi_handle, + struct set_custom_aggr_size_params *param); + +QDF_STATUS (*send_vdev_set_qdepth_thresh_cmd)(wmi_unified_t wmi_handle, + struct set_qdepth_thresh_params *param); + +QDF_STATUS (*send_peer_chan_width_switch_cmd)(wmi_unified_t wmi_handle, + struct peer_chan_width_switch_params *param); + +QDF_STATUS (*send_wow_wakeup_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_wow_add_wakeup_event_cmd)(wmi_unified_t wmi_handle, + struct wow_add_wakeup_params *param); + +QDF_STATUS (*send_wow_add_wakeup_pattern_cmd)(wmi_unified_t wmi_handle, + struct wow_add_wakeup_pattern_params *param); + +QDF_STATUS (*send_wow_remove_wakeup_pattern_cmd)(wmi_unified_t wmi_handle, + struct wow_remove_wakeup_pattern_params *param); + +QDF_STATUS (*send_pdev_set_regdomain_cmd)(wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param); + +QDF_STATUS (*send_set_quiet_mode_cmd)(wmi_unified_t wmi_handle, + struct set_quiet_mode_params *param); + +QDF_STATUS (*send_set_bcn_offload_quiet_mode_cmd)(wmi_unified_t wmi_handle, + struct set_bcn_offload_quiet_mode_params *param); + +QDF_STATUS (*send_set_beacon_filter_cmd)(wmi_unified_t wmi_handle, + struct set_beacon_filter_params *param); + +QDF_STATUS (*send_remove_beacon_filter_cmd)(wmi_unified_t wmi_handle, + struct remove_beacon_filter_params *param); +/* +QDF_STATUS (*send_mgmt_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct mgmt_params *param); + */ + +QDF_STATUS (*send_addba_clearresponse_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_clearresponse_params *param); + +QDF_STATUS (*send_addba_send_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_send_params *param); + +QDF_STATUS (*send_delba_send_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct delba_send_params *param); + +QDF_STATUS (*send_addba_setresponse_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_setresponse_params *param); + +QDF_STATUS (*send_singleamsdu_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct singleamsdu_params *param); + +QDF_STATUS (*send_set_qboost_param_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct set_qboost_params *param); + +QDF_STATUS (*send_mu_scan_cmd)(wmi_unified_t wmi_handle, + struct mu_scan_params *param); + +QDF_STATUS (*send_lteu_config_cmd)(wmi_unified_t wmi_handle, + struct lteu_config_params *param); + +QDF_STATUS (*send_set_ps_mode_cmd)(wmi_unified_t wmi_handle, + struct set_ps_mode_params *param); +QDF_STATUS (*save_service_bitmap)(wmi_unified_t wmi_handle, + void *evt_buf, void *bitmap_buf); +QDF_STATUS (*save_ext_service_bitmap)(wmi_unified_t wmi_handle, + void *evt_buf, void *bitmap_buf); +bool (*is_service_enabled)(wmi_unified_t wmi_handle, + uint32_t service_id); +QDF_STATUS (*get_target_cap_from_service_ready)(wmi_unified_t wmi_handle, + void *evt_buf, struct wlan_psoc_target_capability_info *ev); + +QDF_STATUS (*extract_fw_version)(wmi_unified_t wmi_handle, + void *ev, struct wmi_host_fw_ver *fw_ver); + +QDF_STATUS (*extract_fw_abi_version)(wmi_unified_t wmi_handle, + void *ev, struct wmi_host_fw_abi_ver *fw_ver); + +QDF_STATUS (*extract_hal_reg_cap)(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_psoc_hal_reg_capability *hal_reg_cap); + +uint32_t (*extract_num_mem_reqs)(wmi_unified_t wmi_handle, + void *evt_buf); + +QDF_STATUS (*extract_host_mem_req)(wmi_unified_t wmi_handle, + void *evt_buf, host_mem_req *mem_reqs, + uint32_t num_active_peers, + uint32_t num_peers, + enum wmi_fw_mem_prio fw_prio, uint16_t idx); + +QDF_STATUS (*init_cmd_send)(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param); + +QDF_STATUS (*save_fw_version)(wmi_unified_t wmi_handle, void *evt_buf); +uint32_t (*ready_extract_init_status)(wmi_unified_t wmi_hdl, void *ev); +QDF_STATUS (*ready_extract_mac_addr)(wmi_unified_t wmi_hdl, void *ev, + uint8_t *macaddr); +wmi_host_mac_addr * (*ready_extract_mac_addr_list)(wmi_unified_t wmi_hdl, + void *ev, uint8_t *num_mac_addr); +QDF_STATUS (*extract_ready_event_params)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param); + +QDF_STATUS (*check_and_update_fw_version)(wmi_unified_t wmi_hdl, void *ev); +uint8_t* (*extract_dbglog_data_len)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *len); +QDF_STATUS (*send_ext_resource_config)(wmi_unified_t wmi_handle, + wmi_host_ext_resource_config *ext_cfg); + +QDF_STATUS (*send_nf_dbr_dbm_info_get_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); + +QDF_STATUS (*send_packet_power_info_get_cmd)(wmi_unified_t wmi_handle, + struct packet_power_info_params *param); + +#ifdef WLAN_FEATURE_GPIO_CFG +QDF_STATUS (*send_gpio_config_cmd)(wmi_unified_t wmi_handle, + struct gpio_config_params *param); + +QDF_STATUS (*send_gpio_output_cmd)(wmi_unified_t wmi_handle, + struct gpio_output_params *param); +#endif + +QDF_STATUS (*send_rtt_meas_req_test_cmd)(wmi_unified_t wmi_handle, + struct rtt_meas_req_test_params *param); + +QDF_STATUS (*send_rtt_meas_req_cmd)(wmi_unified_t wmi_handle, + struct rtt_meas_req_params *param); + +QDF_STATUS (*send_rtt_keepalive_req_cmd)(wmi_unified_t wmi_handle, + struct rtt_keepalive_req_params *param); + +QDF_STATUS (*send_lci_set_cmd)(wmi_unified_t wmi_handle, + struct lci_set_params *param); + +QDF_STATUS (*send_lcr_set_cmd)(wmi_unified_t wmi_handle, + struct lcr_set_params *param); + +QDF_STATUS (*send_periodic_chan_stats_config_cmd)(wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param); + +#ifdef WLAN_ATF_ENABLE +QDF_STATUS (*send_set_atf_cmd)(wmi_unified_t wmi_handle, + struct set_atf_params *param); + +QDF_STATUS +(*send_atf_peer_request_cmd)(wmi_unified_t wmi_handle, + struct atf_peer_request_params *param); + +QDF_STATUS +(*send_set_atf_grouping_cmd)(wmi_unified_t wmi_handle, + struct atf_grouping_params *param); + +QDF_STATUS +(*send_set_atf_group_ac_cmd)(wmi_unified_t wmi_handle, + struct atf_group_ac_params *param); + +QDF_STATUS (*extract_atf_peer_stats_ev)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_atf_peer_stats_event *ev); + +QDF_STATUS (*extract_atf_token_info_ev)(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, + wmi_host_atf_peer_stats_info *atf_info); +#endif + +QDF_STATUS (*send_get_user_position_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS +(*send_reset_peer_mumimo_tx_count_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS (*send_get_peer_mumimo_tx_count_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS +(*send_pdev_caldata_version_check_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS +(*send_btcoex_wlan_priority_cmd)(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param); + +QDF_STATUS +(*send_start_11d_scan_cmd)(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *param); + +QDF_STATUS +(*send_stop_11d_scan_cmd)(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *param); + +QDF_STATUS +(*send_btcoex_duty_cycle_cmd)(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param); + +QDF_STATUS +(*send_coex_ver_cfg_cmd)(wmi_unified_t wmi_handle, coex_ver_cfg_t *param); + +QDF_STATUS +(*send_coex_config_cmd)(wmi_unified_t wmi_handle, + struct coex_config_params *param); + +QDF_STATUS (*send_bcn_offload_control_cmd)(wmi_unified_t wmi_handle, + struct bcn_offload_control *bcn_ctrl_param); +#ifdef OL_ATH_SMART_LOGGING +QDF_STATUS +(*send_smart_logging_enable_cmd)(wmi_unified_t wmi_handle, uint32_t param); + +QDF_STATUS +(*send_smart_logging_fatal_cmd)(wmi_unified_t wmi_handle, + struct wmi_debug_fatal_events *param); +#endif /* OL_ATH_SMART_LOGGING */ +QDF_STATUS (*extract_wds_addr_event)(wmi_unified_t wmi_handle, + void *evt_buf, uint16_t len, wds_addr_event_t *wds_ev); + +QDF_STATUS (*extract_dcs_interference_type)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_dcs_interference_param *param); + +QDF_STATUS (*extract_dcs_cw_int)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_ath_dcs_cw_int *cw_int); + +QDF_STATUS (*extract_dcs_im_tgt_stats)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_dcs_im_tgt_stats_t *wlan_stat); + +QDF_STATUS (*extract_fips_event_data)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_fips_event_param *param); + +#ifdef WLAN_FEATURE_DISA +QDF_STATUS +(*extract_encrypt_decrypt_resp_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct disa_encrypt_decrypt_resp_params + *resp); +#endif + +QDF_STATUS (*extract_vdev_start_resp)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp); + +QDF_STATUS (*extract_vdev_delete_resp)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_vdev_delete_resp *delete_rsp); + +QDF_STATUS (*extract_tbttoffset_update_params)(wmi_unified_t wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param); + +QDF_STATUS (*extract_ext_tbttoffset_update_params)(wmi_unified_t wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param); + +QDF_STATUS (*extract_tbttoffset_num_vdevs)(wmi_unified_t wmi_hdl, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS (*extract_ext_tbttoffset_num_vdevs)(wmi_unified_t wmi_hdl, + void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS (*extract_mgmt_rx_params)(wmi_unified_t wmi_handle, void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp); + +QDF_STATUS (*extract_vdev_stopped_param)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *vdev_id); + +QDF_STATUS (*extract_vdev_roam_param)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_roam_event *param); + +QDF_STATUS (*extract_vdev_scan_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct scan_event *param); + +#ifdef FEATURE_WLAN_SCAN_PNO +QDF_STATUS (*extract_nlo_match_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, + struct scan_event *param); + +QDF_STATUS (*extract_nlo_complete_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, + struct scan_event *param); +#endif + +QDF_STATUS (*extract_mu_ev_param)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_mu_report_event *param); + +QDF_STATUS (*extract_mu_db_entry)(wmi_unified_t wmi_hdl, void *evt_buf, + uint8_t idx, wmi_host_mu_db_entry *param); + +QDF_STATUS (*extract_mumimo_tx_count_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_txmu_cnt_event *param); + +QDF_STATUS (*extract_peer_gid_userpos_list_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_gid_userpos_list_event *param); + +QDF_STATUS +(*extract_esp_estimation_ev_param)(wmi_unified_t wmi_handle, void *evt_buf, + struct esp_estimation_event *param); + +QDF_STATUS (*extract_pdev_caldata_version_check_ev_param)( + wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_check_cal_version_event *param); + +QDF_STATUS (*extract_pdev_tpc_config_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_tpc_config_event *param); + +QDF_STATUS (*extract_gpio_input_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *gpio_num); + +QDF_STATUS (*extract_pdev_reserve_ast_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_proxy_ast_reserve_param *param); + +QDF_STATUS (*extract_nfcal_power_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_nfcal_power_all_channels_event *param); + +QDF_STATUS (*extract_pdev_tpc_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_tpc_event *param); + +QDF_STATUS (*extract_pdev_generic_buffer_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_generic_buffer_event *param); + +QDF_STATUS (*extract_mgmt_tx_compl_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_mgmt_tx_compl_event *param); + +QDF_STATUS (*extract_offchan_data_tx_compl_param)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_offchan_data_tx_compl_event *param); + +QDF_STATUS (*extract_pdev_csa_switch_count_status)(wmi_unified_t wmi_handle, + void *evt_buf, struct pdev_csa_switch_count_status *param); + +QDF_STATUS (*extract_swba_num_vdevs)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS (*extract_swba_tim_info)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t idx, wmi_host_tim_info *tim_info); + +QDF_STATUS (*extract_swba_noa_info)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t idx, wmi_host_p2p_noa_info *p2p_desc); + +QDF_STATUS (*extract_swba_quiet_info)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t idx, + wmi_host_quiet_info *quiet_info); + +#ifdef CONVERGED_P2P_ENABLE +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +QDF_STATUS (*extract_p2p_lo_stop_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct p2p_lo_event *param); +#endif + +QDF_STATUS (*extract_p2p_noa_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct p2p_noa_info *param); + +QDF_STATUS (*set_mac_addr_rx_filter)(wmi_unified_t wmi_handle, + struct p2p_set_mac_filter *param); +QDF_STATUS +(*extract_mac_addr_rx_filter_evt_param)(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_set_mac_filter_evt *param); +#endif + +#ifdef WLAN_FEATURE_INTEROP_ISSUES_AP +QDF_STATUS +(*extract_interop_issues_ap_ev_param)(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_interop_issues_ap_event *param); +QDF_STATUS +(*send_set_rap_ps_cmd)(wmi_unified_t wmi_handle, + struct wlan_interop_issues_ap_info *interop_issues_ap); +#endif + +QDF_STATUS (*extract_peer_sta_ps_statechange_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_ps_statechange_event *ev); + +QDF_STATUS (*extract_peer_sta_kickout_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_kickout_event *ev); + +QDF_STATUS (*extract_peer_ratecode_list_ev)(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t *peer_mac, + uint32_t *pdev_id, + wmi_sa_rate_cap *rate_cap); + +QDF_STATUS (*extract_comb_phyerr)(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, wmi_host_phyerr_t *phyerr); + +QDF_STATUS (*extract_single_phyerr)(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, wmi_host_phyerr_t *phyerr); + +QDF_STATUS (*extract_composite_phyerr)(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr); + +QDF_STATUS (*extract_rtt_hdr)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_event_hdr *ev); + +QDF_STATUS (*extract_rtt_ev)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_meas_event *ev, uint8_t *hdump, uint16_t hdump_len); + +QDF_STATUS (*extract_rtt_error_report_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_rtt_error_report_event *ev); + +QDF_STATUS (*extract_all_stats_count)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_stats_event *stats_param); + +QDF_STATUS (*extract_pdev_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_pdev_stats *pdev_stats); + +QDF_STATUS (*extract_unit_test)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_unit_test_event *unit_test, uint32_t maxspace); + +QDF_STATUS (*extract_pdev_ext_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_pdev_ext_stats *pdev_ext_stats); + +QDF_STATUS (*extract_vdev_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_vdev_stats *vdev_stats); + +QDF_STATUS (*extract_per_chain_rssi_stats)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + struct wmi_host_per_chain_rssi_stats *rssi_stats); + +QDF_STATUS (*extract_peer_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_peer_stats *peer_stats); + +QDF_STATUS (*extract_bcnflt_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats); + +QDF_STATUS (*extract_peer_extd_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_peer_extd_stats *peer_extd_stats); + +QDF_STATUS (*extract_peer_retry_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + struct wmi_host_peer_retry_stats *peer_retry_stats); + +QDF_STATUS (*extract_peer_adv_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_peer_adv_stats + *peer_adv_stats); + +QDF_STATUS (*extract_chan_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats); + +#ifdef WLAN_FEATURE_MIB_STATS +QDF_STATUS (*extract_mib_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct mib_stats_metrics *mib_stats); +#endif + +QDF_STATUS (*extract_thermal_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *temp, uint32_t *level, uint32_t *pdev_id); + +QDF_STATUS (*extract_thermal_level_stats)(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, uint32_t *levelcount, + uint32_t *dccount); + +QDF_STATUS (*extract_profile_ctx)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx); + +QDF_STATUS (*extract_profile_data)(wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, + wmi_host_wlan_profile_t *profile_data); + +QDF_STATUS (*extract_chan_info_event)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_chan_info_event *chan_info); + +QDF_STATUS (*extract_channel_hopping_event)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_channel_hopping_event *ch_hopping); + +QDF_STATUS (*extract_bss_chan_info_event)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_bss_chan_info_event *bss_chan_info); + +QDF_STATUS (*extract_inst_rssi_stats_event)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_inst_stats_resp *inst_rssi_resp); + +QDF_STATUS (*extract_tx_data_traffic_ctrl_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_tx_data_traffic_ctrl_event *ev); + +QDF_STATUS (*extract_vdev_extd_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_vdev_extd_stats *vdev_extd_stats); + +QDF_STATUS (*extract_vdev_nac_rssi_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats); + +QDF_STATUS (*extract_bcn_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcn_stats *bcn_stats); + +#ifdef OL_ATH_SMART_LOGGING +QDF_STATUS (*extract_smartlog_event)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_debug_fatal_events *event); +#endif /* OL_ATH_SMART_LOGGING */ +QDF_STATUS (*send_power_dbg_cmd)(wmi_unified_t wmi_handle, + struct wmi_power_dbg_params *param); + +QDF_STATUS (*send_multiple_vdev_restart_req_cmd)(wmi_unified_t wmi_handle, + struct multiple_vdev_restart_params *param); +#ifdef QCA_SUPPORT_AGILE_DFS +QDF_STATUS +(*send_adfs_ocac_abort_cmd)(wmi_unified_t wmi_handle, + struct vdev_adfs_abort_params *param); + +QDF_STATUS (*send_adfs_ch_cfg_cmd)(wmi_unified_t wmi_handle, + struct vdev_adfs_ch_cfg_params *param); +#endif +QDF_STATUS (*send_fw_test_cmd)(wmi_unified_t wmi_handle, + struct set_fwtest_params *wmi_fwtest); + +#ifdef WLAN_FEATURE_ACTION_OUI +QDF_STATUS (*send_action_oui_cmd)(wmi_unified_t wmi_handle, + struct action_oui_request *req); +#endif /* WLAN_FEATURE_ACTION_OUI */ + +QDF_STATUS (*send_peer_rx_reorder_queue_setup_cmd)(wmi_unified_t wmi_handle, + struct rx_reorder_queue_setup_params *param); + +QDF_STATUS (*send_peer_rx_reorder_queue_remove_cmd)(wmi_unified_t wmi_handle, + struct rx_reorder_queue_remove_params *param); + +QDF_STATUS (*extract_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *param); + +QDF_STATUS (*extract_service_ready_ext2)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext2_param *param); + +QDF_STATUS (*extract_hw_mode_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param); + +QDF_STATUS (*extract_mac_phy_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint8_t hw_mode_id, + uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param); + +QDF_STATUS (*extract_reg_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param); + +QDF_STATUS (*extract_dbr_ring_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param); + +QDF_STATUS (*extract_dbr_ring_cap_service_ready_ext2)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param); + +QDF_STATUS (*extract_scaling_params_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_spectral_scaling_params *param); + +QDF_STATUS (*extract_sar_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *ext_param); + +#ifdef WMI_DBR_SUPPORT +QDF_STATUS (*send_dbr_cfg_cmd)(wmi_unified_t wmi_handle, + struct direct_buf_rx_cfg_req *cfg); + +QDF_STATUS (*extract_dbr_buf_release_fixed)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct direct_buf_rx_rsp *param); + +QDF_STATUS (*extract_dbr_buf_release_entry)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_entry *param); + +QDF_STATUS (*extract_dbr_buf_metadata)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_metadata *param); +#endif + +QDF_STATUS (*extract_pdev_utf_event)(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *param); + +QDF_STATUS (*extract_pdev_qvit_event)(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *param); + +uint16_t (*wmi_set_htc_tx_tag)(wmi_unified_t wmi_handle, + wmi_buf_t buf, uint32_t cmd_id); + +QDF_STATUS (*extract_peer_delete_response_event)( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_peer_delete_response_event *param); + +QDF_STATUS (*extract_vdev_peer_delete_all_resp)( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_vdev_peer_delete_all_response_event + *peer_delete_all_rsp); + +QDF_STATUS (*extract_vdev_peer_delete_all_response_event)( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_vdev_peer_delete_all_response_event *param); + +bool (*is_management_record)(uint32_t cmd_id); +bool (*is_diag_event)(uint32_t event_id); +uint8_t *(*wmi_id_to_name)(uint32_t cmd_id); +QDF_STATUS (*send_dfs_phyerr_offload_en_cmd)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +QDF_STATUS (*send_dfs_phyerr_offload_dis_cmd)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +QDF_STATUS (*extract_reg_chan_list_update_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct cur_regulatory_info + *reg_info, + uint32_t len); + +QDF_STATUS (*extract_reg_11d_new_country_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_country, + uint32_t len); + +QDF_STATUS (*extract_reg_ch_avoid_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_event, + uint32_t len); + +#ifdef WLAN_SUPPORT_RF_CHARACTERIZATION +QDF_STATUS (*extract_num_rf_characterization_entries)(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + uint32_t *num_rf_characterization_entries); + + +QDF_STATUS (*extract_rf_characterization_entries)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t num_rf_characterization_entries, + struct wmi_host_rf_characterization_event_param *rf_characterization_entries); +#endif + +QDF_STATUS (*extract_chainmask_tables)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_chainmask_table *chainmask_table); + +QDF_STATUS (*send_get_rcpi_cmd)(wmi_unified_t wmi_handle, + struct rcpi_req *get_rcpi_param); + +QDF_STATUS (*extract_rcpi_response_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct rcpi_res *res); + +QDF_STATUS (*extract_dfs_cac_complete_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len); +QDF_STATUS +(*extract_dfs_ocac_complete_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct vdev_adfs_complete_status *oca_stats); + +QDF_STATUS (*extract_dfs_radar_detection_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len); +QDF_STATUS (*extract_wlan_radar_event_info)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len); + +QDF_STATUS (*send_set_country_cmd)(wmi_unified_t wmi_handle, + struct set_country *param); + +uint32_t (*convert_pdev_id_host_to_target)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +uint32_t (*convert_pdev_id_target_to_host)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +uint32_t (*convert_phy_id_host_to_target)(wmi_unified_t wmi_handle, + uint32_t phy_id); +uint32_t (*convert_phy_id_target_to_host)(wmi_unified_t wmi_handle, + uint32_t phy_id); + +/* + * For MCL, convert_pdev_id_host_to_target returns legacy pdev id value. + * But in converged firmware, WMI_SET_CURRENT_COUNTRY_CMDID expects target + * mapping of pdev_id to give only one WMI_REG_CHAN_LIST_CC_EVENTID. + * wmi_pdev_id_conversion_enable cannot be used since it overwrites + * convert_pdev_id_host_to_target which effects legacy cases. + * Below two commands: convert_host_pdev_id_to_target and + * convert_target_pdev_id_to_host should be used for any WMI + * command/event where FW expects target/host mapping of pdev_id respectively. + */ +uint32_t (*convert_host_pdev_id_to_target)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +uint32_t (*convert_target_pdev_id_to_host)(wmi_unified_t wmi_handle, + uint32_t pdev_id); + +uint32_t (*convert_host_phy_id_to_target)(wmi_unified_t wmi_handle, + uint32_t phy_id); +uint32_t (*convert_target_phy_id_to_host)(wmi_unified_t wmi_handle, + uint32_t phy_id); + +QDF_STATUS (*send_user_country_code_cmd)(wmi_unified_t wmi_handle, + uint8_t pdev_id, struct cc_regdmn_s *rd); + +QDF_STATUS (*send_wds_entry_list_cmd)(wmi_unified_t wmi_handle); +QDF_STATUS (*extract_wds_entry)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wdsentry *wds_entry, + u_int32_t idx); + +#ifdef WLAN_FEATURE_NAN +QDF_STATUS (*send_ndp_initiator_req_cmd)(wmi_unified_t wmi_handle, + struct nan_datapath_initiator_req *req); +QDF_STATUS (*send_ndp_responder_req_cmd)(wmi_unified_t wmi_handle, + struct nan_datapath_responder_req *req); +QDF_STATUS (*send_ndp_end_req_cmd)(wmi_unified_t wmi_handle, + struct nan_datapath_end_req *req); +QDF_STATUS (*send_terminate_all_ndps_req_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id); + +QDF_STATUS (*extract_ndp_initiator_rsp)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp); +QDF_STATUS (*extract_ndp_ind)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_indication_event *ind); +QDF_STATUS (*extract_nan_msg)(uint8_t *data, + struct nan_dump_msg *msg); +QDF_STATUS (*extract_ndp_confirm)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_confirm_event *ev); +QDF_STATUS (*extract_ndp_responder_rsp)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_responder_rsp *rsp); +QDF_STATUS (*extract_ndp_end_rsp)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_rsp_event *rsp); +QDF_STATUS (*extract_ndp_end_ind)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_indication_event **ind); +QDF_STATUS (*extract_ndp_sch_update)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_sch_update_event *ind); +QDF_STATUS (*extract_ndp_host_event)(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_host_event *evt); +#endif /* WLAN_FEATURE_NAN */ + +QDF_STATUS (*send_obss_detection_cfg_cmd)(wmi_unified_t wmi_handle, + struct wmi_obss_detection_cfg_param *obss_cfg_param); +QDF_STATUS (*extract_obss_detection_info)(uint8_t *evt_buf, + struct wmi_obss_detect_info *info); +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +QDF_STATUS (*send_vdev_fils_enable_cmd)(wmi_unified_t wmi_handle, + struct config_fils_params *param); +#endif +#ifdef WLAN_SUPPORT_FILS +QDF_STATUS (*extract_swfda_vdev_id)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *vdev_id); +QDF_STATUS (*send_fils_discovery_send_cmd)(wmi_unified_t wmi_handle, + struct fd_params *param); +#endif /* WLAN_SUPPORT_FILS */ + +QDF_STATUS +(*send_roam_scan_stats_cmd)(wmi_unified_t wmi_handle, + struct wmi_roam_scan_stats_req *params); + +QDF_STATUS +(*extract_roam_scan_stats_res_evt)(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param); +QDF_STATUS +(*extract_offload_bcn_tx_status_evt)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *vdev_id, + uint32_t *tx_status); + +QDF_STATUS +(*extract_roam_trigger_stats)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_roam_trigger_info *trig, + uint8_t idx); + +QDF_STATUS +(*extract_roam_scan_stats)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_roam_scan_data *dst, uint8_t idx, + uint8_t chan_idx, uint8_t ap_idx); + +QDF_STATUS +(*extract_roam_result_stats)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_roam_result *dst, + uint8_t idx); + +QDF_STATUS +(*extract_roam_11kv_stats)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_neighbor_report_data *dst, + uint8_t idx, uint8_t rpt_idx); +QDF_STATUS +(*extract_roam_msg_info)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_msg_info *dst, uint8_t idx); + +void (*wmi_pdev_id_conversion_enable)(wmi_unified_t wmi_handle, + uint32_t *pdev_map, + uint8_t size); +void (*send_time_stamp_sync_cmd)(wmi_unified_t wmi_handle); +void (*wmi_free_allocated_event)(uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr); +int (*wmi_check_and_pad_event)(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id, + void **wmi_cmd_struct_ptr); +int (*wmi_check_command_params)(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id); + +#ifdef WLAN_SUPPORT_TWT +QDF_STATUS (*send_twt_enable_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_enable_param *params); + +QDF_STATUS (*send_twt_disable_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_disable_param *params); + +QDF_STATUS (*send_twt_add_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_add_dialog_param *params); + +QDF_STATUS (*send_twt_del_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_del_dialog_param *params); + +QDF_STATUS (*send_twt_pause_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_pause_dialog_cmd_param *params); + +QDF_STATUS (*send_twt_resume_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_resume_dialog_cmd_param *params); + +#ifdef WLAN_SUPPORT_BCAST_TWT +QDF_STATUS (*send_twt_btwt_invite_sta_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_btwt_invite_sta_cmd_param *params); + +QDF_STATUS (*send_twt_btwt_remove_sta_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_btwt_remove_sta_cmd_param *params); +#endif + +QDF_STATUS (*extract_twt_enable_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params); + +QDF_STATUS (*extract_twt_disable_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params); + +QDF_STATUS (*extract_twt_add_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params); + +QDF_STATUS (*extract_twt_del_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params); + +QDF_STATUS (*extract_twt_pause_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params); + +QDF_STATUS (*extract_twt_resume_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params); + +#ifdef WLAN_SUPPORT_BCAST_TWT +QDF_STATUS (*extract_twt_btwt_invite_sta_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_btwt_invite_sta_complete_event_param *params); + +QDF_STATUS (*extract_twt_btwt_remove_sta_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_btwt_remove_sta_complete_event_param *params); +#endif + +#endif + +#ifdef QCA_SUPPORT_CP_STATS +QDF_STATUS (*extract_cca_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_congestion_stats *stats); +#endif /* QCA_SUPPORT_CP_STATS */ + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS (*send_dfs_average_radar_params_cmd)( + wmi_unified_t wmi_handle, + struct dfs_radar_found_params *params); + +QDF_STATUS (*extract_dfs_status_from_fw)(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *dfs_status_check); +#endif + + +#ifdef OBSS_PD +QDF_STATUS (*send_obss_spatial_reuse_set)(wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_param + *obss_spatial_reuse_param); + +QDF_STATUS (*send_obss_spatial_reuse_set_def_thresh)(wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_def_thresh + *obss_spatial_reuse_param); +#endif + +QDF_STATUS +(*extract_ctl_failsafe_check_ev_param)( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_pdev_ctl_failsafe_event *param); + +QDF_STATUS (*send_peer_del_all_wds_entries_cmd)(wmi_unified_t wmi_handle, + struct peer_del_all_wds_entries_params *param); + +#ifdef WLAN_CFR_ENABLE +QDF_STATUS +(*extract_cfr_peer_tx_event_param)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_cfr_peer_tx_event_param *peer_tx_event); + +QDF_STATUS (*send_peer_cfr_capture_cmd)(wmi_unified_t wmi_handle, + struct peer_cfr_params *param); +#ifdef WLAN_ENH_CFR_ENABLE +QDF_STATUS (*send_cfr_rcc_cmd)(wmi_unified_t wmi_handle, + struct cfr_rcc_param *cfg); +#endif +#endif + +#ifdef WMI_AP_SUPPORT +QDF_STATUS (*send_vdev_pcp_tid_map_cmd)(wmi_unified_t wmi_handle, + struct vap_pcp_tid_map_params *param); +QDF_STATUS (*send_vdev_tidmap_prec_cmd)(wmi_unified_t wmi_handle, + struct vap_tidmap_prec_params *param); +QDF_STATUS (*send_peer_ft_roam_cmd)(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + uint8_t vdev_id); +QDF_STATUS (*send_peer_vlan_config_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_vlan_config_param *param); + +#endif +QDF_STATUS (*send_mws_coex_status_req_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t cmd_id); + +#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG +QDF_STATUS (*set_rx_pkt_type_routing_tag_cmd)( + wmi_unified_t wmi_hdl, struct wmi_rx_pkt_protocol_routing_info *param); +#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ +#ifdef WIFI_POS_CONVERGED +QDF_STATUS (*extract_oem_response_param) + (wmi_unified_t wmi_hdl, void *resp_buf, + struct wmi_oem_response_param *oem_resp_param); +#endif /* WIFI_POS_CONVERGED */ + +QDF_STATUS (*extract_hw_mode_resp_event)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *cmd_status); + +QDF_STATUS (*send_set_roam_trigger_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t trigger_bitmap); + +#ifdef WLAN_FEATURE_ELNA +QDF_STATUS (*send_set_elna_bypass_cmd)(wmi_unified_t wmi_handle, + struct set_elna_bypass_request *req); +QDF_STATUS (*send_get_elna_bypass_cmd)(wmi_unified_t wmi_handle, + struct get_elna_bypass_request *req); +QDF_STATUS (*extract_get_elna_bypass_resp)(wmi_unified_t wmi_handle, + void *resp_buf, + struct get_elna_bypass_response *resp); +#endif /* WLAN_FEATURE_ELNA */ + +#ifdef WLAN_SEND_DSCP_UP_MAP_TO_FW +QDF_STATUS (*send_dscp_tid_map_cmd)(wmi_unified_t wmi_handle, + uint32_t *dscp_to_tid_map); +#endif + +QDF_STATUS (*send_pdev_get_pn_cmd)(wmi_unified_t wmi_handle, + struct peer_request_pn_param *pn_params); +QDF_STATUS (*extract_get_pn_data)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_get_pn_event *param); +#ifdef FEATURE_ANI_LEVEL_REQUEST +QDF_STATUS (*send_ani_level_cmd)(wmi_unified_t wmi_handle, uint32_t *freqs, + uint8_t num_freqs); + +QDF_STATUS (*extract_ani_level)(uint8_t *evt_buf, + struct wmi_host_ani_level_event **info, + uint32_t *num_freqs); +#endif /* FEATURE_ANI_LEVEL_REQUEST */ + +QDF_STATUS (*extract_multi_vdev_restart_resp_event)( + wmi_unified_t wmi_handle, void *evt_buf, + struct multi_vdev_restart_resp *restart_rsp); + +#ifdef FEATURE_WLAN_TIME_SYNC_FTM +QDF_STATUS (*send_wlan_time_sync_ftm_trigger_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool burst_mode); + +QDF_STATUS (*send_wlan_ts_qtime_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint64_t lpass_ts); + +QDF_STATUS (*extract_time_sync_ftm_start_stop_event)( + wmi_unified_t wmi_hdl, void *evt_buf, + struct ftm_time_sync_start_stop_params *param); + +QDF_STATUS (*extract_time_sync_ftm_offset_event)( + wmi_unified_t wmi_hdl, void *evt_buf, + struct ftm_time_sync_offset *param); +#endif /* FEATURE_WLAN_TIME_SYNC_FTM */ +QDF_STATUS (*send_roam_scan_ch_list_req_cmd)(wmi_unified_t wmi_hdl, + uint32_t vdev_id); + +QDF_STATUS +(*extract_install_key_comp_event)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t len, + struct wmi_install_key_comp_event *param); +}; + +/* Forward declartion for psoc*/ +struct wlan_objmgr_psoc; + +/** + * struct wmi_init_cmd - Saved wmi INIT command + * @buf: Buffer containing the wmi INIT command + * @buf_len: Length of the buffer + */ +struct wmi_cmd_init { + wmi_buf_t buf; + uint32_t buf_len; +}; + +/** + * @abi_version_0: WMI Major and Minor versions + * @abi_version_1: WMI change revision + * @abi_version_ns_0: ABI version namespace first four dwords + * @abi_version_ns_1: ABI version namespace second four dwords + * @abi_version_ns_2: ABI version namespace third four dwords + * @abi_version_ns_3: ABI version namespace fourth four dwords + */ +struct wmi_host_abi_version { + uint32_t abi_version_0; + uint32_t abi_version_1; + uint32_t abi_version_ns_0; + uint32_t abi_version_ns_1; + uint32_t abi_version_ns_2; + uint32_t abi_version_ns_3; +}; + +/* number of debugfs entries used */ +#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING +/* filtered logging added 4 more entries */ +#define NUM_DEBUG_INFOS 13 +#else +#define NUM_DEBUG_INFOS 9 +#endif + +struct wmi_unified { + void *scn_handle; /* handle to device */ + osdev_t osdev; /* handle to use OS-independent services */ + struct wbuff_mod_handle *wbuff_handle; /* handle to wbuff */ + qdf_atomic_t pending_cmds; + HTC_ENDPOINT_ID wmi_endpoint_id; + uint16_t max_msg_len; + uint32_t *event_id; + wmi_unified_event_handler *event_handler; + enum wmi_rx_exec_ctx *ctx; + HTC_HANDLE htc_handle; + qdf_spinlock_t eventq_lock; + qdf_nbuf_queue_t event_queue; + qdf_work_t rx_event_work; + qdf_workqueue_t *wmi_rx_work_queue; + int wmi_stop_in_progress; + struct wmi_host_abi_version fw_abi_version; + struct wmi_host_abi_version final_abi_vers; + uint32_t num_of_diag_events_logs; + uint32_t *events_logs_list; +#ifdef WLAN_OPEN_SOURCE + struct fwdebug dbglog; + struct dentry *debugfs_phy; +#endif /* WLAN_OPEN_SOURCE */ + +#ifdef WMI_INTERFACE_EVENT_LOGGING + struct wmi_debug_log_info log_info; +#endif /*WMI_INTERFACE_EVENT_LOGGING */ + + qdf_atomic_t is_target_suspended; +#ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI + bool is_qmi_stats_enabled; +#endif + +#ifdef FEATURE_RUNTIME_PM + qdf_atomic_t runtime_pm_inprogress; +#endif + qdf_atomic_t is_wow_bus_suspended; + bool tag_crash_inject; + bool tgt_force_assert_enable; + enum wmi_target_type target_type; + struct wmi_ops *ops; + bool use_cookie; + bool wmi_stopinprogress; + uint32_t *wmi_events; + uint32_t *services; + struct wmi_soc *soc; + uint16_t wmi_max_cmds; + struct dentry *debugfs_de[NUM_DEBUG_INFOS]; + qdf_atomic_t critical_events_in_flight; +#ifdef WMI_EXT_DBG + int wmi_ext_dbg_msg_queue_size; + qdf_list_t wmi_ext_dbg_msg_queue; + qdf_spinlock_t wmi_ext_dbg_msg_queue_lock; + qdf_dentry_t wmi_ext_dbg_dentry; +#endif /*WMI_EXT_DBG*/ + uint32_t *cmd_pdev_id_map; + uint32_t *evt_pdev_id_map; + uint32_t *cmd_phy_id_map; + uint32_t *evt_phy_id_map; + qdf_atomic_t num_stats_over_qmi; +}; + +#define WMI_MAX_RADIOS 3 +struct wmi_soc { + struct wlan_objmgr_psoc *wmi_psoc; + void *scn_handle; /* handle to device */ + qdf_atomic_t num_pdevs; + enum wmi_target_type target_type; + bool is_async_ep; + HTC_HANDLE htc_handle; + uint32_t event_id[WMI_UNIFIED_MAX_EVENT]; + wmi_unified_event_handler event_handler[WMI_UNIFIED_MAX_EVENT]; + uint32_t max_event_idx; + enum wmi_rx_exec_ctx ctx[WMI_UNIFIED_MAX_EVENT]; + qdf_spinlock_t ctx_lock; + struct wmi_unified *wmi_pdev[WMI_MAX_RADIOS]; + HTC_ENDPOINT_ID wmi_endpoint_id[WMI_MAX_RADIOS]; + uint16_t max_msg_len[WMI_MAX_RADIOS]; + struct wmi_ops *ops; + const uint32_t *svc_ids; + uint32_t wmi_events[wmi_events_max]; + /* WMI service bitmap received from target */ + uint32_t *wmi_service_bitmap; + uint32_t *wmi_ext_service_bitmap; + uint32_t services[wmi_services_max]; + uint16_t wmi_max_cmds; + uint32_t soc_idx; + uint32_t cmd_pdev_id_map[WMI_MAX_RADIOS]; + uint32_t evt_pdev_id_map[WMI_MAX_RADIOS]; + uint32_t cmd_phy_id_map[WMI_MAX_RADIOS]; + uint32_t evt_phy_id_map[WMI_MAX_RADIOS]; + bool is_pdev_is_map_enable; + bool is_phy_id_map_enable; +#ifdef WMI_INTERFACE_EVENT_LOGGING + uint32_t buf_offset_command; + uint32_t buf_offset_event; +#endif /*WMI_INTERFACE_EVENT_LOGGING */ +}; + +/** + * struct wmi_process_fw_event_params - fw event parameters + * @wmi_handle: wmi handle + * @evt_buf: event buffer + */ +struct wmi_process_fw_event_params { + void *wmi_handle; + void *evt_buf; +}; + +/** + * wmi_mtrace() - Wrappper function for qdf_mtrace api + * @message_id: 32-Bit Wmi message ID + * @vdev_id: Vdev ID + * @data: Actual message contents + * + * This function converts the 32-bit WMI message ID in 15-bit message ID + * format for qdf_mtrace as in qdf_mtrace message there are only 15 + * bits reserved for message ID. + * out of these 15-bits, 8-bits (From MSB) specifies the WMI_GRP_ID + * and remaining 7-bits specifies the actual WMI command. With this + * notation there can be maximum 256 groups and each group can have + * max 128 commands can be supported. + * + * Return: None + */ +void wmi_mtrace(uint32_t message_id, uint16_t vdev_id, uint32_t data); + +void wmi_unified_register_module(enum wmi_target_type target_type, + void (*wmi_attach)(wmi_unified_t wmi_handle)); +void wmi_tlv_init(void); +void wmi_non_tlv_init(void); +#ifdef WMI_NON_TLV_SUPPORT +/* ONLY_NON_TLV_TARGET:TLV attach dummy function definition for case when + * driver supports only NON-TLV target (WIN mainline) */ +#define wmi_tlv_attach(x) qdf_print("TLV Unavailable") +#else +void wmi_tlv_attach(wmi_unified_t wmi_handle); +#endif +void wmi_non_tlv_attach(wmi_unified_t wmi_handle); + +#ifdef FEATURE_WLAN_EXTSCAN +void wmi_extscan_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_extscan_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_SMART_ANT_SUPPORT +void wmi_smart_ant_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_smart_ant_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_DBR_SUPPORT +void wmi_dbr_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_dbr_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_ATF_SUPPORT +void wmi_atf_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_atf_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_AP_SUPPORT +void wmi_ap_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_ap_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_DSRC +void wmi_ocb_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_ocb_attach_tlv(wmi_unified_t wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_NAN +void wmi_nan_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_nan_attach_tlv(wmi_unified_t wmi_handle) +{ +} +#endif + +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +void wmi_p2p_listen_offload_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline +void wmi_p2p_listen_offload_attach_tlv(wmi_unified_t wmi_handle) +{ +} +#endif + +#ifdef CONVERGED_P2P_ENABLE +void wmi_p2p_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_p2p_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_INTEROP_ISSUES_AP +void wmi_interop_issues_ap_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void +wmi_interop_issues_ap_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_LFR_SUBNET_DETECTION +void wmi_lfr_subnet_detection_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline +void wmi_lfr_subnet_detection_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_RSSI_MONITOR +void wmi_rssi_monitor_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline +void wmi_rssi_monitor_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_WLAN_ESE +void wmi_ese_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_ese_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +void wmi_roam_offload_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline +void wmi_roam_offload_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_FILS_SK +void wmi_fils_sk_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_fils_sk_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_ROAM_SUPPORT +void wmi_roam_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_roam_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_CONCURRENCY_SUPPORT +void wmi_concurrency_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_concurrency_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_WLAN_D0WOW +void wmi_d0wow_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_d0wow_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_WLAN_RA_FILTERING +void wmi_ra_filtering_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline +void wmi_ra_filtering_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_WLAN_LPHB +void wmi_lphb_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_lphb_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_PACKET_FILTERING +void wmi_packet_filtering_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline +void wmi_packet_filtering_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_FEATURE_EXTWOW_SUPPORT +void wmi_extwow_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_extwow_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +void wmi_pmo_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_pmo_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_WLAN_TDLS +void wmi_tdls_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_tdls_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_POLICY_MGR_ENABLE +void wmi_policy_mgr_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline +void wmi_policy_mgr_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef FEATURE_BLACKLIST_MGR +void wmi_blacklist_mgr_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline +void wmi_blacklist_mgr_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WMI_STA_SUPPORT +void wmi_sta_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_sta_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +#ifdef WLAN_WMI_BCN +void wmi_bcn_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_bcn_attach_tlv(wmi_unified_t wmi_handle) +{ +} +#endif + +/** + * wmi_fwol_attach_tlv() - attach fw offload tlv handlers + * @wmi_handle: wmi handle + * + * Return: void + */ +#ifdef WLAN_FW_OFFLOAD +void wmi_fwol_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void wmi_fwol_attach_tlv(wmi_unified_t wmi_handle) +{ +} +#endif + +/** + * wmi_gpio_attach_tlv() - attach gpio tlv handlers + * @wmi_handle: wmi handle + * + * Return: void + */ +#ifdef WLAN_FEATURE_GPIO_CFG +void wmi_gpio_attach_tlv(wmi_unified_t wmi_handle); +#else +static inline void +wmi_gpio_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +/** + * wmi_align() - provides word aligned parameter + * @param: parameter to be aligned + * + * Return: word aligned parameter + */ +static inline uint32_t wmi_align(uint32_t param) +{ + return roundup(param, sizeof(uint32_t)); +} + +/** + * wmi_vdev_map_to_vdev_id() - Provides vdev id corresponding to idx + * from vdev map + * @vdev_map: Bitmask containing information of active vdev ids + * @idx: Index referring to the i'th bit set from LSB in vdev map + * + * This API returns the vdev id for the i'th bit set from LSB in vdev map. + * Index runs through 1 from maximum number of vdevs set in the vdev map + * + * Return: vdev id of the vdev object + */ +static inline uint32_t wmi_vdev_map_to_vdev_id(uint32_t vdev_map, + uint32_t idx) +{ + uint32_t vdev_count = 0, vdev_set = 0, vdev_id = WLAN_INVALID_VDEV_ID; + + while (vdev_map) { + vdev_set += (vdev_map & 0x1); + if (vdev_set == (idx+1)) { + vdev_id = vdev_count; + break; + } + vdev_map >>= 1; + vdev_count++; + } + + return vdev_id; +} + +/** + * wmi_vdev_map_to_num_vdevs() - Provides number of vdevs active based on the + * vdev map received from FW + * @vdev_map: Bitmask containing information of active vdev ids + * + * Return: Number of vdevs set in the vdev bit mask + */ +static inline uint32_t wmi_vdev_map_to_num_vdevs(uint32_t vdev_map) +{ + uint32_t num_vdevs = 0; + + while (vdev_map) { + num_vdevs += (vdev_map & 0x1); + vdev_map >>= 1; + } + + return num_vdevs; +} + +#ifdef WMI_EXT_DBG + +/** + * wmi_ext_dbg_msg_get() - Allocate memory for wmi debug msg + * + * @buflen: Length of WMI message buffer + * + * Return: Allocated msg buffer else NULL on failure. + */ +static inline struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_get(uint32_t buflen) +{ + return qdf_mem_malloc(sizeof(struct wmi_ext_dbg_msg) + buflen); +} + +/** + * wmi_ext_dbg_msg_put() - Free wmi debug msg buffer + * + * @msg: wmi message buffer to be freed + * + * Return: none + */ +static inline void wmi_ext_dbg_msg_put(struct wmi_ext_dbg_msg *msg) +{ + qdf_mem_free(msg); +} + +#else + +static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified + *wmi_handle, + uint8_t *buf, uint32_t len) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified + *wmi_handle, + uint8_t *buf, + uint32_t len) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) +{ + return QDF_STATUS_SUCCESS; +} + +#endif /*WMI_EXT_DBG */ + +#ifdef WLAN_CFR_ENABLE +void wmi_cfr_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_cfr_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_reg_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_reg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9d74d448cc51212692ca491837836d665f85ba0c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_reg_api.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) which are specific to Regulatory module. + */ + +#ifndef _WMI_UNIFIED_REG_API_H_ +#define _WMI_UNIFIED_REG_API_H_ + +#include "reg_services_public_struct.h" +/** + * reg_chan_list_update_handler() - function to update channel list + * @wmi_handle: wmi handle + * @event_buf: event buffer + * @reg_info regulatory info + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_reg_chan_list_update_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct cur_regulatory_info *reg_info, + uint32_t len); + +/* + * wmi_unified_send_stop_11d_scan_cmd() - stop 11d scan + * @wmi_handle: wmi handle + * @stop_11d_scan: pointer to 11d scan stop req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_stop_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *stop_11d_scan); + +/* + * wmi_unified_send_start_11d_scan_cmd() - start 11d scan + * @wmi_handle: wmi handle + * @start_11d_scan: pointer to 11d scan start req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_start_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *start_11d_scan); + +/** + * wmi_extract_reg_11d_new_cc_event() - function to extract the 11d new country + * @wmi_handle: wmi handle + * @evt_buf: event buffer + * @reg_11d_new_cc: pointer to new 11d country info + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_reg_11d_new_cc_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_new_cc, + uint32_t len); + +/** + * wmi_unified_set_user_country_code_cmd_send() - WMI set country function + * @wmi_handle: wmi handle. + * @pdev_id: Pdev id + * @rd: User country code or regdomain + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_user_country_code_cmd_send( + wmi_unified_t wmi_handle, + uint8_t pdev_id, struct cc_regdmn_s *rd); + +/** + * wmi_extract_reg_ch_avoid_event() - process freq avoid event + * @wmi_handle: wmi handle. + * @evt_buf: event buffer + * @ch_avoid_ind: buffer pointer to save the event processed data + * @len: length of buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_reg_ch_avoid_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_ind, + uint32_t len); + +#endif /* _WMI_UNIFIED_REG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_roam_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_roam_api.h new file mode 100644 index 0000000000000000000000000000000000000000..8cad035b4cb17b7f0a781d7f7ee1c2bde4dba683 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_roam_api.h @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to ROAMING component. + */ + +#ifndef _WMI_UNIFIED_ROAM_API_H_ +#define _WMI_UNIFIED_ROAM_API_H_ + +#include + +#ifdef FEATURE_LFR_SUBNET_DETECTION +/** + * wmi_unified_set_gateway_params_cmd() - set gateway parameters + * @wmi_handle: wmi handle + * @req: gateway parameter update request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and sends down the gateway configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures; + * error number otherwise + */ +QDF_STATUS +wmi_unified_set_gateway_params_cmd(wmi_unified_t wmi_handle, + struct gateway_update_req_param *req); +#endif + +#ifdef FEATURE_RSSI_MONITOR +/** + * wmi_unified_set_rssi_monitoring_cmd() - set rssi monitoring + * @wmi_handle: wmi handle + * @req: rssi monitoring request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the rssi monitoring configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures; + * error number otherwise + */ +QDF_STATUS +wmi_unified_set_rssi_monitoring_cmd(wmi_unified_t wmi_handle, + struct rssi_monitor_param *req); +#endif + +/** + * wmi_unified_roam_scan_offload_rssi_thresh_cmd() - set roam scan rssi + * parameters + * @wmi_handle: wmi handle + * @roam_req: roam rssi related parameters + * + * This function reads the incoming @roam_req and fill in the destination + * WMI structure and send down the roam scan rssi configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_rssi_thresh_cmd( + wmi_unified_t wmi_handle, + struct roam_offload_scan_rssi_params *roam_req); + +/** + * wmi_unified_roam_mawc_params_cmd() - configure roaming MAWC parameters + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Pass the MAWC(Motion Aided wireless connectivity) related roaming + * parameters from the host to the target + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_roam_mawc_params_cmd(wmi_unified_t wmi_handle, + struct wmi_mawc_roam_params *params); + +/** + * wmi_unified_roam_scan_filter_cmd() - send roam scan whitelist, + * blacklist and preferred list + * @wmi_handle: wmi handle + * @roam_req: roam scan lists related parameters + * + * This function reads the incoming @roam_req and fill in the destination + * WMI structure and send down the different roam scan lists down to the fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_roam_scan_filter_cmd(wmi_unified_t wmi_handle, + struct roam_scan_filter_params *roam_req); + +#ifdef FEATURE_WLAN_ESE +/** + * wmi_unified_plm_stop_cmd() - plm stop request + * @wmi_handle: wmi handle + * @plm: plm request parameters + * + * This function request FW to stop PLM. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_plm_stop_cmd(wmi_unified_t wmi_handle, + const struct plm_req_params *plm); + +/** + * wmi_unified_plm_start_cmd() - plm start request + * @wmi_handle: wmi handle + * @plm: plm request parameters + * + * This function request FW to start PLM. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_plm_start_cmd(wmi_unified_t wmi_handle, + const struct plm_req_params *plm); +#endif /* FEATURE_WLAN_ESE */ + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +/* wmi_unified_set_ric_req_cmd() - set ric request element + * @wmi_handle: wmi handle + * @msg: message + * @is_add_ts: is addts required + * + * This function sets ric request element for 11r roaming. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ric_req_cmd(wmi_unified_t wmi_handle, void *msg, + uint8_t is_add_ts); + +/** + * wmi_unified_roam_synch_complete_cmd() - roam synch complete command to fw. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function sends roam synch complete event to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_synch_complete_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +/** + * wmi_unified__roam_invoke_cmd() - send roam invoke command to fw. + * @wmi_handle: wmi handle + * @roaminvoke: roam invoke command + * @ch_hz: channel + * + * Send roam invoke command to fw for fastreassoc. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_roam_invoke_cmd(wmi_unified_t wmi_handle, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz); +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + +/** + * wmi_unified_roam_scan_offload_mode_cmd() - set roam scan parameters + * @wmi_handle: wmi handle + * @scan_cmd_fp: scan related parameters + * @roam_req: roam related parameters + * + * This function reads the incoming @roam_req and fill in the destination + * WMI structure and send down the roam scan configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_mode_cmd( + wmi_unified_t wmi_handle, + wmi_start_scan_cmd_fixed_param *scan_cmd_fp, + struct roam_offload_scan_params *roam_req); + +/** + * wmi_unified_send_roam_scan_offload_ap_cmd() - set roam ap profile in fw + * @wmi_handle: wmi handle + * @ap_profile: ap profile params + * + * Send WMI_ROAM_AP_PROFILE to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_roam_scan_offload_ap_cmd( + wmi_unified_t wmi_handle, + struct ap_profile_params *ap_profile); + +/** + * wmi_unified_roam_scan_offload_cmd() - set roam offload command + * @wmi_handle: wmi handle + * @command: command + * @vdev_id: vdev id + * + * This function set roam offload command to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_cmd(wmi_unified_t wmi_handle, + uint32_t command, + uint32_t vdev_id); + +/** + * wmi_unified_roam_scan_offload_scan_period() - set roam offload scan period + * @wmi_handle: wmi handle + * @param: pointer to roam scan period params to be sent to fw + * + * Send WMI_ROAM_SCAN_PERIOD parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_roam_scan_offload_scan_period(wmi_unified_t wmi_handle, + struct roam_scan_period_params *param); + +/** + * wmi_unified_roam_scan_offload_chan_list_cmd() - set roam offload channel list + * @wmi_handle: wmi handle + * @chan_count: channel count + * @chan_list: channel list + * @list_type: list type + * @vdev_id: vdev id + * + * Set roam offload channel list. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_roam_scan_offload_chan_list_cmd(wmi_unified_t wmi_handle, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, + uint32_t vdev_id); + +/** + * wmi_unified_roam_scan_offload_rssi_change_cmd() - set roam offload RSSI th + * @wmi_handle: wmi handle + * @rssi_change_thresh: RSSI Change threshold + * @bcn_rssi_weight: beacon RSSI weight + * @vdev_id: vdev id + * + * Send WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_roam_scan_offload_rssi_change_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans); + +/** + * wmi_unified_set_per_roam_config() - set PER roam config in FW + * @wmi_handle: wmi handle + * @req_buf: per roam config request buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_per_roam_config(wmi_unified_t wmi_handle, + struct wmi_per_roam_config_req *req_buf); + +/** + * wmi_unified_send_limit_off_chan_cmd() - send wmi cmd of limit off channel + * configuration params + * @wmi_handle: wmi handler + * @wmi_param: pointer to wmi_limit_off_chan_param + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code on failure + */ +QDF_STATUS wmi_unified_send_limit_off_chan_cmd( + wmi_unified_t wmi_handle, + struct wmi_limit_off_chan_param *wmi_param); + +#ifdef WLAN_FEATURE_FILS_SK +/* + * wmi_unified_roam_send_hlp_cmd() -send HLP command info + * @wmi_handle: wma handle + * @req_buf: Pointer to HLP params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_send_hlp_cmd(wmi_unified_t wmi_handle, + struct hlp_params *req_buf); +#endif /* WLAN_FEATURE_FILS_SK */ + +/** + * wmi_unified_send_btm_config() - Send BTM config to fw + * @wmi_handle: wmi handle + * @params: pointer to wmi_btm_config + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_btm_config(wmi_unified_t wmi_handle, + struct wmi_btm_config *params); + +/** + * wmi_unified_send_bss_load_config() - Send bss load trigger params to fw + * @wmi_handle: wmi handle + * @params: pointer to wmi_bss_load_config + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_bss_load_config(wmi_unified_t wmi_handle, + struct wmi_bss_load_config *params); + +/** + * wmi_unified_send_disconnect_roam_params() - Send disconnect roam trigger + * parameters to firmware + * @wmi_hdl: wmi handle + * @params: pointer to wmi_disconnect_roam_params + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_disconnect_roam_params(wmi_unified_t wmi_handle, + struct wmi_disconnect_roam_params *req); + +/** + * wmi_unified_send_idle_roam_params() - Send idle roam trigger params to fw + * @wmi_hdl: wmi handle + * @params: pointer to wmi_idle_roam_params + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_idle_roam_params(wmi_unified_t wmi_handle, + struct wmi_idle_roam_params *req); + +/** + * wmi_unified_send_roam_preauth_status() - Send roam preauthentication status + * to target. + * @wmi_handle: wmi handle + * @param: Roam auth status params + * + * This function passes preauth status of WPA3 SAE auth to firmware. It is + * called when external_auth_status event is received from userspace. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_unified_send_roam_preauth_status(wmi_unified_t wmi_handle, + struct wmi_roam_auth_status_params *param); + +/** + * wmi_unified_offload_11k_cmd() - send 11k offload command + * @wmi_handle: wmi handle + * @params: 11k offload params + * + * This function passes the 11k offload command params to FW + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_offload_11k_cmd(wmi_unified_t wmi_handle, + struct wmi_11k_offload_params *params); +/** + * wmi_unified_invoke_neighbor_report_cmd() - send invoke neighbor report cmd + * @wmi_handle: wmi handle + * @params: invoke neighbor report params + * + * This function passes the invoke neighbor report command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_invoke_neighbor_report_cmd( + wmi_unified_t wmi_handle, + struct wmi_invoke_neighbor_report_params *params); + +/** + * wmi_unified_get_roam_scan_ch_list() - send roam scan channel list get cmd + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function sends roam scan channel list get command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_roam_scan_ch_list(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +/** + * wmi_unified_set_roam_triggers() - send roam trigger bitmap + * @wmi_handle: wmi handle + * @triggers: Roam trigger bitmap params as defined @roam_control_trigger_reason + * + * This function passes the roam trigger bitmap to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_roam_triggers(wmi_unified_t wmi_handle, + struct roam_triggers *triggers); +#endif +#endif /* _WMI_UNIFIED_ROAM_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_roam_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_roam_param.h new file mode 100644 index 0000000000000000000000000000000000000000..56ff819c8468e0c8cd71dc1e56c9f5656df3c260 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_roam_param.h @@ -0,0 +1,794 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the ROAMING WMI APIs. + */ + +#ifndef _WMI_UNIFIED_ROAM_PARAM_H_ +#define _WMI_UNIFIED_ROAM_PARAM_H_ + +#include + +/** + * struct gateway_update_req_param - gateway parameter update request + * @request_id: request id + * @vdev_id: vdev id + * @max_retries: Max ARP/NS retry attempts + * @timeout: Retry interval + * @ipv4_addr_type: on ipv4 network + * @ipv6_addr_type: on ipv6 network + * @gw_mac_addr: gateway mac addr + * @ipv4_addr: ipv4 addr + * @ipv6_addr: ipv6 addr + */ +struct gateway_update_req_param { + uint32_t request_id; + uint32_t vdev_id; + uint32_t max_retries; + uint32_t timeout; + uint32_t ipv4_addr_type; + uint32_t ipv6_addr_type; + struct qdf_mac_addr gw_mac_addr; + uint8_t ipv4_addr[QDF_IPV4_ADDR_SIZE]; + uint8_t ipv6_addr[QDF_IPV6_ADDR_SIZE]; +}; + +/** + * struct rssi_monitor_param - rssi monitoring + * @request_id: request id + * @vdev_id: vdev id + * @min_rssi: minimum rssi + * @max_rssi: maximum rssi + * @control: flag to indicate start or stop + */ +struct rssi_monitor_param { + uint32_t request_id; + uint32_t vdev_id; + int8_t min_rssi; + int8_t max_rssi; + bool control; +}; + +/** + * struct roam_offload_scan_rssi_params - structure containing + * parameters for roam offload scan based on RSSI + * @rssi_thresh: rssi threshold + * @rssi_thresh_diff: difference in rssi threshold + * @hi_rssi_scan_max_count: 5G scan max count + * @hi_rssi_scan_rssi_delta: 5G scan rssi change threshold value + * @hi_rssi_scan_rssi_ub: 5G scan upper bound + * @raise_rssi_thresh_5g: flag to determine penalty and boost thresholds + * @vdev_id: vdev id + * @penalty_threshold_5g: RSSI threshold below which 5GHz RSSI is penalized + * @boost_threshold_5g: RSSI threshold above which 5GHz RSSI is favored + * @raise_factor_5g: factor by which 5GHz RSSI is boosted + * @drop_factor_5g: factor by which 5GHz RSSI is penalized + * @max_raise_rssi_5g: maximum boost that can be applied to a 5GHz RSSI + * @max_drop_rssi_5g: maximum penalty that can be applied to a 5GHz RSSI + * @good_rssi_threshold: RSSI below which roam is kicked in by background + * scan although rssi is still good + * @roam_earlystop_thres_min: Minimum RSSI threshold value for early stop, + * unit is dB above NF + * @roam_earlystop_thres_max: Maximum RSSI threshold value for early stop, + * unit is dB above NF + * @dense_rssi_thresh_offset: dense roam RSSI threshold difference + * @dense_min_aps_cnt: dense roam minimum APs + * @initial_dense_status: dense status detected by host + * @traffic_threshold: dense roam RSSI threshold + * @bg_scan_bad_rssi_thresh: Bad RSSI threshold to perform bg scan + * @roam_bad_rssi_thresh_offset_2g: Offset from Bad RSSI threshold for 2G + * to 5G Roam + * @bg_scan_client_bitmap: Bitmap used to identify the client scans to snoop + * @roam_data_rssi_threshold_triggers: triggers of bad data RSSI threshold to + * roam + * @roam_data_rssi_threshold: Bad data RSSI threshold to roam + * @rx_data_inactivity_time: Rx duration to check data RSSI + * @flags: Flags for Background Roaming + * Bit 0 : BG roaming enabled when we connect to 2G AP only and roaming + * to 5G AP only. + * Bit 1-31: Reserved + */ +struct roam_offload_scan_rssi_params { + int8_t rssi_thresh; + uint8_t rssi_thresh_diff; + uint32_t hi_rssi_scan_max_count; + uint32_t hi_rssi_scan_rssi_delta; + int32_t hi_rssi_scan_rssi_ub; + int raise_rssi_thresh_5g; + uint8_t vdev_id; + uint32_t penalty_threshold_5g; + uint32_t boost_threshold_5g; + uint8_t raise_factor_5g; + uint8_t drop_factor_5g; + int max_raise_rssi_5g; + int max_drop_rssi_5g; + uint32_t good_rssi_threshold; + uint32_t roam_earlystop_thres_min; + uint32_t roam_earlystop_thres_max; + int dense_rssi_thresh_offset; + int dense_min_aps_cnt; + int initial_dense_status; + int traffic_threshold; + int32_t rssi_thresh_offset_5g; + int8_t bg_scan_bad_rssi_thresh; + uint8_t roam_bad_rssi_thresh_offset_2g; + uint32_t bg_scan_client_bitmap; + uint32_t roam_data_rssi_threshold_triggers; + int32_t roam_data_rssi_threshold; + uint32_t rx_data_inactivity_time; + uint32_t flags; +}; + +/** + * struct roam_scan_period_params - Roam scan period parameters + * @vdev_id: Vdev for which the scan period parameters are sent + * @scan_period: Opportunistic scan runs on a timer for scan_period + * @scan_age: Duration after which the scan entries are to be aged out + * @roam_scan_inactivity_time: inactivity monitoring time in ms for which the + * device is considered to be inactive + * @roam_inactive_data_packet_count: Maximum allowed data packets count during + * roam_scan_inactivity_time. + * @roam_scan_period_after_inactivity: Roam scan period in ms after device is + * in inactive state. + * @full_scan_period: Full scan period is the idle period in seconds + * between two successive full channel roam scans. + */ +struct roam_scan_period_params { + uint32_t vdev_id; + uint32_t scan_period; + uint32_t scan_age; + uint32_t roam_scan_inactivity_time; + uint32_t roam_inactive_data_packet_count; + uint32_t roam_scan_period_after_inactivity; + uint32_t full_scan_period; +}; + +/** + * struct wmi_mawc_roam_params - Motion Aided wireless connectivity params + * @vdev_id: VDEV on which the parameters should be applied + * @enable: MAWC roaming feature enable/disable + * @traffic_load_threshold: Traffic threshold in kBps for MAWC roaming + * @best_ap_rssi_threshold: AP RSSI Threshold for MAWC roaming + * @rssi_stationary_high_adjust: High RSSI adjustment value to suppress scan + * @rssi_stationary_low_adjust: Low RSSI adjustment value to suppress scan + */ +struct wmi_mawc_roam_params { + uint8_t vdev_id; + bool enable; + uint32_t traffic_load_threshold; + uint32_t best_ap_rssi_threshold; + uint8_t rssi_stationary_high_adjust; + uint8_t rssi_stationary_low_adjust; +}; + +#define MAX_SSID_ALLOWED_LIST 4 +#define MAX_BSSID_AVOID_LIST 16 +#define MAX_BSSID_FAVORED 16 + +/** + * struct roam_scan_filter_params - Structure holding roaming scan + * parameters + * @op_bitmap: bitmap to determine reason of roaming + * @vdev_id: vdev id + * @num_bssid_black_list: The number of BSSID's that we should avoid + * connecting to. It is like a blacklist of BSSID's. + * @num_ssid_white_list: The number of SSID profiles that are in the + * Whitelist. When roaming, we consider the BSSID's with + * this SSID also for roaming apart from the connected + * one's + * @num_bssid_preferred_list: Number of BSSID's which have a preference over + * others + * @bssid_avoid_list: Blacklist SSID's + * @ssid_allowed_list: Whitelist SSID's + * @bssid_favored: Favorable BSSID's + * @bssid_favored_factor: RSSI to be added to this BSSID to prefer it + * @lca_disallow_config_present: LCA [Last Connected AP] disallow config + * present + * @disallow_duration: How long LCA AP will be disallowed before it can be a + * roaming candidate again, in seconds + * @rssi_channel_penalization: How much RSSI will be penalized if candidate(s) + * are found in the same channel as disallowed + * AP's, in units of db + * @num_disallowed_aps: How many APs the target should maintain in its LCA + * list + * @delta_rssi: (dB units) when AB in RSSI blacklist improved by at least + * delta_rssi,it will be removed from blacklist + * + * This structure holds all the key parameters related to + * initial connection and roaming connections. + */ + +struct roam_scan_filter_params { + uint32_t op_bitmap; + uint8_t vdev_id; + uint32_t num_bssid_black_list; + uint32_t num_ssid_white_list; + uint32_t num_bssid_preferred_list; + struct qdf_mac_addr bssid_avoid_list[MAX_BSSID_AVOID_LIST]; + struct mac_ssid ssid_allowed_list[MAX_SSID_ALLOWED_LIST]; + struct qdf_mac_addr bssid_favored[MAX_BSSID_FAVORED]; + uint8_t bssid_favored_factor[MAX_BSSID_FAVORED]; + uint8_t lca_disallow_config_present; + uint32_t disallow_duration; + uint32_t rssi_channel_penalization; + uint32_t num_disallowed_aps; + uint32_t num_rssi_rejection_ap; + struct reject_ap_config_params + rssi_rejection_ap[MAX_RSSI_AVOID_BSSID_LIST]; + uint32_t delta_rssi; +}; + +#define WMI_CFG_VALID_CHANNEL_LIST_LEN 100 +/* Occupied channel list remains static */ +#define WMI_CHANNEL_LIST_STATIC 1 +/* Occupied channel list can be learnt after init */ +#define WMI_CHANNEL_LIST_DYNAMIC_INIT 2 +/* Occupied channel list can be learnt after flush */ +#define WMI_CHANNEL_LIST_DYNAMIC_FLUSH 3 +/* Occupied channel list can be learnt after update */ +#define WMI_CHANNEL_LIST_DYNAMIC_UPDATE 4 + +/** + * struct plm_req_params - plm req parameter + * @diag_token: Dialog token + * @meas_token: measurement token + * @num_bursts: total number of bursts + * @burst_int: burst interval in seconds + * @meas_duration:in TU's,STA goes off-ch + * @burst_len: no of times the STA should cycle through PLM ch list + * @desired_tx_pwr: desired tx power + * @mac_addr: MC dest addr + * @plm_num_ch: channel numbers + * @plm_ch_freq_list: channel frequency list + * @vdev_id: vdev id + * @enable: enable/disable + */ +struct plm_req_params { + uint16_t diag_token; + uint16_t meas_token; + uint16_t num_bursts; + uint16_t burst_int; + uint16_t meas_duration; + /* no of times the STA should cycle through PLM ch list */ + uint8_t burst_len; + int8_t desired_tx_pwr; + struct qdf_mac_addr mac_addr; + /* no of channels */ + uint8_t plm_num_ch; + /* channel frequency list */ + uint32_t plm_ch_freq_list[WMI_CFG_VALID_CHANNEL_LIST_LEN]; + uint8_t vdev_id; + bool enable; +}; + +/** + * struct ap_profile - Structure ap profile to match candidate + * @flags: flags + * @rssi_threshold: the value of the the candidate AP should higher by this + * threshold than the rssi of the currrently associated AP + * @ssid: ssid vlaue to be matched + * @rsn_authmode: security params to be matched + * @rsn_ucastcipherset: unicast cipher set + * @rsn_mcastcipherset: mcast/group cipher set + * @rsn_mcastmgmtcipherset: mcast/group management frames cipher set + * @rssi_abs_thresh: the value of the candidate AP should higher than this + * absolute RSSI threshold. Zero means no absolute minimum + * RSSI is required. units are the offset from the noise + * floor in dB + */ +struct ap_profile { + uint32_t flags; + uint32_t rssi_threshold; + struct mac_ssid ssid; + uint32_t rsn_authmode; + uint32_t rsn_ucastcipherset; + uint32_t rsn_mcastcipherset; + uint32_t rsn_mcastmgmtcipherset; + uint32_t rssi_abs_thresh; +}; + +/** + * struct rssi_scoring - rssi scoring param to sortlist selected AP + * @best_rssi_threshold: Roamable AP RSSI equal or better than this threshold, + * full rssi score 100. Units in dBm. + * @good_rssi_threshold: Below threshold, scoring linear percentage between + * rssi_good_pnt and 100. Units in dBm. + * @bad_rssi_threshold: Between good and bad rssi threshold, scoring linear + * % between rssi_bad_pcnt and rssi_good_pct in dBm. + * @good_rssi_pcnt: Used to assigned scoring percentage of each slot between + * best to good rssi threshold. Units in percentage. + * @bad_rssi_pcnt: Used to assigned scoring percentage of each slot between good + * to bad rssi threshold. Unites in percentage. + * @good_bucket_size : bucket size of slot in good zone + * @bad_bucket_size : bucket size of slot in bad zone + * @rssi_pref_5g_rssi_thresh: Below rssi threshold, 5G AP have given preference + * of band percentage. Units in dBm. + */ +struct rssi_scoring { + int32_t best_rssi_threshold; + int32_t good_rssi_threshold; + int32_t bad_rssi_threshold; + uint32_t good_rssi_pcnt; + uint32_t bad_rssi_pcnt; + uint32_t good_bucket_size; + uint32_t bad_bucket_size; + int32_t rssi_pref_5g_rssi_thresh; +}; + +/** + * struct param_slot_scoring - define % score for differents slots for a + * scoring param. + * @num_slot: number of slots in which the param will be divided. + * Max 15. index 0 is used for 'not_present. Num_slot will + * equally divide 100. e.g, if num_slot = 4 slot 0 = 0-25%, slot + * 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100% + * @score_pcnt3_to_0: Conatins score percentage for slot 0-3 + * BITS 0-7 :- the scoring pcnt when not present + * BITS 8-15 :- SLOT_1 + * BITS 16-23 :- SLOT_2 + * BITS 24-31 :- SLOT_3 + * @score_pcnt7_to_4: Conatins score percentage for slot 4-7 + * BITS 0-7 :- SLOT_4 + * BITS 8-15 :- SLOT_5 + * BITS 16-23 :- SLOT_6 + * BITS 24-31 :- SLOT_7 + * @score_pcnt11_to_8: Conatins score percentage for slot 8-11 + * BITS 0-7 :- SLOT_8 + * BITS 8-15 :- SLOT_9 + * BITS 16-23 :- SLOT_10 + * BITS 24-31 :- SLOT_11 + * @score_pcnt15_to_12: Conatins score percentage for slot 12-15 + * BITS 0-7 :- SLOT_12 + * BITS 8-15 :- SLOT_13 + * BITS 16-23 :- SLOT_14 + * BITS 24-31 :- SLOT_15 + */ +struct param_slot_scoring { + uint32_t num_slot; + uint32_t score_pcnt3_to_0; + uint32_t score_pcnt7_to_4; + uint32_t score_pcnt11_to_8; + uint32_t score_pcnt15_to_12; +}; + +/** + * struct scoring_param - scoring param to sortlist selected AP + * @disable_bitmap: Each bit will be either allow(0)/disallow(1) to + * considered the roam score param. + * @rssi_weightage: RSSI weightage out of total score in % + * @ht_weightage: HT weightage out of total score in %. + * @vht_weightage: VHT weightage out of total score in %. + * @he_weightaget: 11ax weightage out of total score in %. + * @bw_weightage: Bandwidth weightage out of total score in %. + * @band_weightage: Band(2G/5G) weightage out of total score in %. + * @nss_weightage: NSS(1x1 / 2x2)weightage out of total score in %. + * @esp_qbss_weightage: ESP/QBSS weightage out of total score in %. + * @beamforming_weightage: Beamforming weightage out of total score in %. + * @pcl_weightage: PCL weightage out of total score in %. + * @oce_wan_weightage OCE WAN metrics weightage out of total score in %. + * @bw_index_score: channel BW scoring percentage information. + * BITS 0-7 :- It contains scoring percentage of 20MHz BW + * BITS 8-15 :- It contains scoring percentage of 40MHz BW + * BITS 16-23 :- It contains scoring percentage of 80MHz BW + * BITS 24-31 :- It contains scoring percentage of 1600MHz BW + * The value of each index must be 0-100 + * @band_index_score: band scording percentage information. + * BITS 0-7 :- It contains scoring percentage of 2G + * BITS 8-15 :- It contains scoring percentage of 5G + * BITS 16-23 :- reserved + * BITS 24-31 :- reserved + * The value of each index must be 0-100 + * @nss_index_score: NSS scoring percentage information. + * BITS 0-7 :- It contains scoring percentage of 1x1 + * BITS 8-15 :- It contains scoring percentage of 2x2 + * BITS 16-23 :- It contains scoring percentage of 3x3 + * BITS 24-31 :- It contains scoring percentage of 4x4 + * The value of each index must be 0-100 + * @roam_score_delta: delta value expected over the roam score of the candidate + * ap over the roam score of the current ap + * @roam_trigger_bitmap: bitmap of roam triggers on which roam_score_delta + * will be applied + * @vendor_roam_score_algorithm: Prefered algorithm for roam candidate selection + * @cand_min_roam_score_delta: candidate min roam score delta value + * @rssi_scoring: RSSI scoring information. + * @esp_qbss_scoring: ESP/QBSS scoring percentage information + * @oce_wan_scoring: OCE WAN metrics percentage information + */ +struct scoring_param { + uint32_t disable_bitmap; + int32_t rssi_weightage; + int32_t ht_weightage; + int32_t vht_weightage; + int32_t he_weightage; + int32_t bw_weightage; + int32_t band_weightage; + int32_t nss_weightage; + int32_t esp_qbss_weightage; + int32_t beamforming_weightage; + int32_t pcl_weightage; + int32_t oce_wan_weightage; + uint32_t bw_index_score; + uint32_t band_index_score; + uint32_t nss_index_score; + uint32_t roam_score_delta; + uint32_t roam_trigger_bitmap; + uint32_t vendor_roam_score_algorithm; + uint32_t cand_min_roam_score_delta; + struct rssi_scoring rssi_scoring; + struct param_slot_scoring esp_qbss_scoring; + struct param_slot_scoring oce_wan_scoring; +}; + +/* + * Currently roam score delta value and min rssi values are sent + * for 2 triggers + */ +#define NUM_OF_ROAM_TRIGGERS 2 +#define IDLE_ROAM_TRIGGER 0 +#define BTM_ROAM_TRIGGER 1 + +#define NUM_OF_ROAM_MIN_RSSI 3 + +#define DEAUTH_MIN_RSSI 0 +#define BMISS_MIN_RSSI 1 +#define MIN_RSSI_2G_TO_5G_ROAM 2 + +/** + * enum roam_trigger_reason - Reason for triggering roam + * ROAM_TRIGGER_REASON_NONE: Roam trigger reason none + * ROAM_TRIGGER_REASON_PER: Roam triggered due to packet error + * ROAM_TRIGGER_REASON_BMISS: Roam triggered due to beacon miss + * ROAM_TRIGGER_REASON_LOW_RSSI: Roam triggered due to low RSSI of current + * connected AP. + * ROAM_TRIGGER_REASON_HIGH_RSSI: Roam triggered because sta is connected to + * a AP in 2.4GHz band and a better 5GHz AP is available + * ROAM_TRIGGER_REASON_PERIODIC: Roam triggered as better AP was found during + * periodic roam scan. + * ROAM_TRIGGER_REASON_MAWC: Motion Aided WiFi Connectivity triggered roam. + * ROAM_TRIGGER_REASON_DENSE: Roaming triggered due to dense environment + * detected. + * ROAM_TRIGGER_REASON_BACKGROUND: Roam triggered due to current AP having + * poor rssi and scan candidate found in scan results provided by other + * scan clients. + * ROAM_TRIGGER_REASON_FORCED: Forced roam trigger. + * ROAM_TRIGGER_REASON_BTM: Roam triggered due to AP sent BTM query with + * Disassoc imminent bit set. + * ROAM_TRIGGER_REASON_UNIT_TEST: Roam triggered due to unit test command. + * ROAM_TRIGGER_REASON_BSS_LOAD: Roam triggered due to high channel utilization + * in the current connected channel + * ROAM_TRIGGER_REASON_DEAUTH: Roam triggered due to deauth received from the + * current connected AP. + * ROAM_TRIGGER_REASON_IDLE: Roam triggered due to inactivity of the device. + * ROAM_TRIGGER_REASON_STA_KICKOUT: Roam triggered due to sta kickout event. + * ROAM_TRIGGER_REASON_MAX: Maximum number of roam triggers + */ +enum roam_trigger_reason { + ROAM_TRIGGER_REASON_NONE = 0, + ROAM_TRIGGER_REASON_PER, + ROAM_TRIGGER_REASON_BMISS, + ROAM_TRIGGER_REASON_LOW_RSSI, + ROAM_TRIGGER_REASON_HIGH_RSSI, + ROAM_TRIGGER_REASON_PERIODIC, + ROAM_TRIGGER_REASON_MAWC, + ROAM_TRIGGER_REASON_DENSE, + ROAM_TRIGGER_REASON_BACKGROUND, + ROAM_TRIGGER_REASON_FORCED, + ROAM_TRIGGER_REASON_BTM, + ROAM_TRIGGER_REASON_UNIT_TEST, + ROAM_TRIGGER_REASON_BSS_LOAD, + ROAM_TRIGGER_REASON_DEAUTH, + ROAM_TRIGGER_REASON_IDLE, + ROAM_TRIGGER_REASON_STA_KICKOUT, + ROAM_TRIGGER_REASON_MAX, +}; + +/** + * struct roam_trigger_min_rssi - structure to hold minimum rssi value of + * candidate APs for each roam trigger + * @min_rssi: minimum RSSI of candidate AP for the trigger reason specified in + * trigger_id + * @trigger_reason: Roam trigger reason + */ +struct roam_trigger_min_rssi { + int32_t min_rssi; + enum roam_trigger_reason trigger_reason; +}; + +/** + * struct roam_trigger_score_delta - structure to hold roam score delta value of + * candidate APs for each roam trigger + * @roam_score_delta: delta value in score of the candidate AP for the roam + * trigger mentioned in the trigger_id. + * @trigger_reason: Roam trigger reason + */ +struct roam_trigger_score_delta { + uint32_t roam_score_delta; + enum roam_trigger_reason trigger_reason; +}; + +/** + * struct ap_profile_params - ap profile params + * @vdev_id: vdev id + * @profile: ap profile to match candidate + * @param: scoring params to short candidate + * @min_rssi_params: Min RSSI values for different roam triggers + * @score_delta_params: Roam score delta values for different triggers + */ +struct ap_profile_params { + uint8_t vdev_id; + struct ap_profile profile; + struct scoring_param param; + struct roam_trigger_min_rssi min_rssi_params[NUM_OF_ROAM_MIN_RSSI]; + struct roam_trigger_score_delta score_delta_param[NUM_OF_ROAM_TRIGGERS]; +}; + +/** + * struct wmi_roam_invoke_cmd - roam invoke command + * @vdev_id: vdev id + * @bssid: mac address + * @channel: channel + * @frame_len: frame length, includs mac header, fixed params and ies + * @frame_buf: buffer contaning probe response or beacon + * @is_same_bssid: flag to indicate if roaming is requested for same bssid + * @forced_roaming: Roam to any bssid in any ch (here bssid & ch is not given) + */ +struct wmi_roam_invoke_cmd { + uint32_t vdev_id; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; + uint32_t channel; + uint32_t frame_len; + uint8_t *frame_buf; + uint8_t is_same_bssid; + bool forced_roaming; +}; + +/** + * struct wmi_per_roam_config - per based roaming parameters + * @enable: if PER based roaming is enabled/disabled + * @tx_high_rate_thresh: high rate threshold at which PER based + * roam will stop in tx path + * @rx_high_rate_thresh: high rate threshold at which PER based + * roam will stop in rx path + * @tx_low_rate_thresh: rate below which traffic will be considered + * for PER based roaming in Tx path + * @rx_low_rate_thresh: rate below which traffic will be considered + * for PER based roaming in Tx path + * @tx_rate_thresh_percnt: % above which when traffic is below low_rate_thresh + * will be considered for PER based scan in tx path + * @rx_rate_thresh_percnt: % above which when traffic is below low_rate_thresh + * will be considered for PER based scan in rx path + * @per_rest_time: time for which PER based roam will wait once it + * issues a roam scan. + * @tx_per_mon_time: Minimum time required to be considered as valid scenario + * for PER based roam in tx path + * @rx_per_mon_time: Minimum time required to be considered as valid scenario + * for PER based roam in rx path + * @min_candidate_rssi: Minimum RSSI threshold for candidate AP to be used for + * PER based roaming + */ +struct wmi_per_roam_config { + uint32_t enable; + uint32_t tx_high_rate_thresh; + uint32_t rx_high_rate_thresh; + uint32_t tx_low_rate_thresh; + uint32_t rx_low_rate_thresh; + uint32_t tx_rate_thresh_percnt; + uint32_t rx_rate_thresh_percnt; + uint32_t per_rest_time; + uint32_t tx_per_mon_time; + uint32_t rx_per_mon_time; + uint32_t min_candidate_rssi; +}; + +/** + * struct wmi_per_roam_config_req: PER based roaming config request + * @vdev_id: vdev id on which config needs to be set + * @per_config: PER config + */ +struct wmi_per_roam_config_req { + uint8_t vdev_id; + struct wmi_per_roam_config per_config; +}; + +/** + * struct wmi_limit_off_chan_param - limit off channel parameters + * @vdev_id: vdev id + * @status: status of the command (enable/disable) + * @max_offchan_time: max off channel time + * @rest_time: home channel time + * @skip_dfs_chans: skip dfs channels during scan + */ +struct wmi_limit_off_chan_param { + uint32_t vdev_id; + bool status; + uint32_t max_offchan_time; + uint32_t rest_time; + bool skip_dfs_chans; +}; + +#define WMI_MAX_HLP_IE_LEN 2048 +/** + * struct hlp_params - HLP info params + * @vdev_id: vdev id + * @hlp_ie_len: HLP IE length + * @hlp_ie: HLP IE + */ +struct hlp_params { + uint8_t vdev_id; + uint32_t hlp_ie_len; + uint8_t hlp_ie[WMI_MAX_HLP_IE_LEN]; +}; + +/** + * struct wmi_btm_config - BSS Transition Management offload params + * @vdev_id: VDEV on which the parameters should be applied + * @btm_offload_config: BTM config + * @btm_solicited_timeout: Timeout value for waiting BTM request + * @btm_max_attempt_cnt: Maximum attempt for sending BTM query to ESS + * @btm_sticky_time: Stick time after roaming to new AP by BTM + * @disassoc_timer_threshold: threshold value till which the firmware can + * wait before triggering the roam scan after receiving the disassoc iminent + * @btm_query_bitmask: bitmask to btm query with candidate list + * @btm_candidate_min_score: Minimum score of the AP to consider it as a + * candidate if the roam trigger is BTM kickout. + */ +struct wmi_btm_config { + uint8_t vdev_id; + uint32_t btm_offload_config; + uint32_t btm_solicited_timeout; + uint32_t btm_max_attempt_cnt; + uint32_t btm_sticky_time; + uint32_t disassoc_timer_threshold; + uint32_t btm_query_bitmask; + uint32_t btm_candidate_min_score; +}; + +/** + * struct wmi_bss_load_config - BSS load trigger parameters + * @vdev_id: VDEV on which the parameters should be applied + * @bss_load_threshold: BSS load threshold after which roam scan should trigger + * @bss_load_sample_time: Time duration in milliseconds for which the bss load + * trigger needs to be enabled + * @rssi_threshold_5ghz: RSSI threshold of the current connected AP below which + * roam should be triggered if bss load threshold exceeds the configured value. + * This value is applicable only when we are connected in 5GHz band. + * @rssi_threshold_24ghz: RSSI threshold of the current connected AP below which + * roam should be triggered if bss load threshold exceeds the configured value. + * This value is applicable only when we are connected in 2.4GHz band. + */ +struct wmi_bss_load_config { + uint32_t vdev_id; + uint32_t bss_load_threshold; + uint32_t bss_load_sample_time; + int32_t rssi_threshold_5ghz; + int32_t rssi_threshold_24ghz; +}; + +/** + * struct wmi_idle_roam_params - Idle roam trigger parameters + * @vdev_id: VDEV on which the parameters should be applied + * @enable: Enable/Disable Idle roaming + * @band: Connected AP band + * @conn_ap_rssi_delta: Rssi change of connected AP in dBm + * @conn_ap_min_rssi: If connected AP rssi is less than min rssi trigger roam + * @inactive_time: Connected AP idle time + * @data_pkt_count: Data packet count allowed during idle time + */ +struct wmi_idle_roam_params { + uint32_t vdev_id; + bool enable; + uint32_t band; + uint32_t conn_ap_rssi_delta; + int32_t conn_ap_min_rssi; + uint32_t inactive_time; + uint32_t data_pkt_count; +}; + +/** + * struct wmi_disconnect_roam_params - Emergency deauth/disconnect roam params + * @vdev_id: VDEV on which the parameters should be applied + * @enable: Enable or disable disconnect roaming. + */ +struct wmi_disconnect_roam_params { + uint32_t vdev_id; + bool enable; +}; + +/** + * struct wmi_roam_auth_status_params - WPA3 roam auth response status + * parameters + * @vdev_id: Vdev on which roam preauth is happening + * @preauth_status: Status of the Auth response. + * IEEE80211_STATUS_SUCCESS(0) for success. Corresponding + * IEEE80211 failure status code for failure. + * + * @bssid: Candidate BSSID + * @pmkid: PMKID derived for the auth + */ +struct wmi_roam_auth_status_params { + uint32_t vdev_id; + uint32_t preauth_status; + struct qdf_mac_addr bssid; + uint8_t pmkid[PMKID_LEN]; +}; + +/** + * @time_offset: time offset after 11k offload command to trigger a neighbor + * report request (in seconds) + * @low_rssi_offset: Offset from rssi threshold to trigger a neighbor + * report request (in dBm) + * @bmiss_count_trigger: Number of beacon miss events to trigger neighbor + * report request + * @per_threshold_offset: offset from PER threshold to trigger neighbor + * report request (in %) + * @neighbor_report_cache_timeout: timeout after which new trigger can enable + * sending of a neighbor report request (in seconds) + * @max_neighbor_report_req_cap: max number of neighbor report requests that + * can be sent to the peer in the current session + * @ssid: Current connect SSID info + */ +struct wmi_11k_offload_neighbor_report_params { + uint32_t time_offset; + uint32_t low_rssi_offset; + uint32_t bmiss_count_trigger; + uint32_t per_threshold_offset; + uint32_t neighbor_report_cache_timeout; + uint32_t max_neighbor_report_req_cap; + struct mac_ssid ssid; +}; + +/** + * struct wmi_11k_offload_params - offload 11k features to FW + * @vdev_id: vdev id + * @offload_11k_bitmask: bitmask to specify offloaded features + * B0: Neighbor Report Request offload + * B1-B31: Reserved + * @neighbor_report_params: neighbor report offload params + */ +struct wmi_11k_offload_params { + uint32_t vdev_id; + uint32_t offload_11k_bitmask; + struct wmi_11k_offload_neighbor_report_params neighbor_report_params; +}; + +/** + * struct wmi_invoke_neighbor_report_params - Invoke neighbor report request + * from IW to FW + * @vdev_id: vdev id + * @send_resp_to_host: bool to send response to host or not + * @ssid: ssid given from the IW command + */ +struct wmi_invoke_neighbor_report_params { + uint32_t vdev_id; + uint32_t send_resp_to_host; + struct mac_ssid ssid; +}; + +/** + * struct roam_triggers - vendor configured roam triggers + * @vdev_id: vdev id + * @trigger_bitmap: vendor configured roam trigger bitmap as + * defined @enum roam_control_trigger_reason + */ +struct roam_triggers { + uint32_t vdev_id; + uint32_t trigger_bitmap; +}; + +#endif /* _WMI_UNIFIED_ROAM_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_sta_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_sta_api.h new file mode 100644 index 0000000000000000000000000000000000000000..b0dd4cc11e2b14a367633559a071dbb735cb1f5f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_sta_api.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implement API's specific to STA component. + */ + +#ifndef _WMI_UNIFIED_STA_API_H_ +#define _WMI_UNIFIED_STA_API_H_ + +#include "wlan_disa_public_struct.h" +#include "wlan_tdls_public_structs.h" +#include "wlan_policy_mgr_public_struct.h" +#include "wmi_unified_sta_param.h" + +struct policy_mgr_dual_mac_config; + +/** + * wmi_unified_set_sta_sa_query_param_cmd() - set sta sa query parameters + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @max_retries: max retries + * @retry_interval: retry interval + * This function sets sta query related parameters in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_sta_sa_query_param_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t max_retries, + uint32_t retry_interval); + +/** + * wmi_unified_set_sta_keep_alive_cmd() - set sta keep alive parameters + * @wmi_handle: wmi handle + * @params: sta keep alive parameter + * + * This function sets keep alive related parameters in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_sta_keep_alive_cmd(wmi_unified_t wmi_handle, + struct sta_keep_alive_params *params); + +/** + * wmi_unified_vdev_set_gtx_cfg_cmd() - set GTX params + * @wmi_handle: wmi handle + * @if_id: vdev id + * @gtx_info: GTX config params + * + * This function set GTX related params in firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_vdev_set_gtx_cfg_cmd(wmi_unified_t wmi_handle, uint32_t if_id, + struct wmi_gtx_config *gtx_info); + +#ifdef FEATURE_BLACKLIST_MGR +/** + * wmi_unified_send_reject_ap_list() - send the reject ap list maintained by + * BLM to FW for roaming cases. + * @wmi_handle: wmi handle + * @reject_params: This contains the reject ap list, and the num of BSSIDs. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +wmi_unified_send_reject_ap_list(struct wmi_unified *wmi_handle, + struct reject_ap_params *reject_params); +#endif + +/** + * wmi_unified_process_dhcp_ind() - process dhcp indication from SME + * @wmi_handle: wmi handle + * @ta_dhcp_ind: DHCP indication parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_dhcp_ind( + wmi_unified_t wmi_handle, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind); + +/** + * wmi_unified_get_link_speed_cmd() -send command to get linkspeed + * @wmi_handle: wmi handle + * @peer_macaddr: peer MAC address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_link_speed_cmd(wmi_unified_t wmi_handle, + wmi_mac_addr peer_macaddr); + +/** + * wmi_unified_fw_profiling_data_cmd() - send FW profiling cmd to WLAN FW + * @wmi_handle: wmi handle + * @cmd: Profiling command index + * @value1: parameter1 value + * @value2: parameter2 value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_fw_profiling_data_cmd(wmi_unified_t wmi_handle, + uint32_t cmd, + uint32_t value1, + uint32_t value2); + +/** + * wmi_unified_nat_keepalive_en_cmd() - enable NAT keepalive filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nat_keepalive_en_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +/** + * wmi_unified_wlm_latency_level_cmd() - set latency level + * @wmi_handle: wmi handle + * @param: WLM parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_wlm_latency_level_cmd(wmi_unified_t wmi_handle, + struct wlm_latency_level_param *param); + +/** + * wmi_unified_process_set_ie_info_cmd() - Function to send IE info to firmware + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_process_set_ie_info_cmd(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info); + +/** + * wmi_unified_set_base_macaddr_indicate_cmd() - set base mac address in fw + * @wmi_handle: wmi handle + * @custom_addr: base mac address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_base_macaddr_indicate_cmd(wmi_unified_t wmi_handle, + uint8_t *custom_addr); + +#ifdef FEATURE_WLAN_TDLS +/** + * wmi_unified_set_tdls_offchan_mode_cmd() - set tdls off channel mode + * @wmi_handle: wmi handle + * @chan_switch_params: Pointer to tdls channel switch parameter structure + * + * This function sets tdls off channel mode + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures; + * Negative errno otherwise + */ +QDF_STATUS wmi_unified_set_tdls_offchan_mode_cmd( + wmi_unified_t wmi_handle, + struct tdls_channel_switch_params *chan_switch_params); + +/** + * wmi_unified_update_fw_tdls_state_cmd() - send enable/disable tdls for a vdev + * @wmi_handle: wmi handle + * @tdls_param: TDLS params + * @tdls_state: TDLS state + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_update_fw_tdls_state_cmd(wmi_unified_t wmi_handle, + struct tdls_info *tdls_param, + enum wmi_tdls_state tdls_state); + +/** + * wmi_unified_update_tdls_peer_state_cmd() - update TDLS peer state + * @wmi_handle: wmi handle + * @peer_state: TDLS peer state params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_update_tdls_peer_state_cmd(wmi_unified_t wmi_handle, + struct tdls_peer_update_state *peer_state, + uint32_t *ch_mhz); + +/** + * wmi_extract_vdev_tdls_ev_param - extract vdev tdls param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold vdev tdls param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_tdls_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct tdls_event_info *param); +#endif /* FEATURE_WLAN_TDLS */ + +/** + * wmi_unified_send_sar_limit_cmd() - send sar limit cmd to fw + * @wmi_handle: wmi handle + * @params: sar limit command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_sar_limit_cmd(wmi_unified_t wmi_handle, + struct sar_limit_cmd_params *params); + +/** + * wmi_unified_get_sar_limit_cmd() - request current SAR limits from FW + * @wmi_handle: wmi handle + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_get_sar_limit_cmd(wmi_unified_t wmi_handle); + +/** + * wmi_unified_extract_sar_limit_event() - extract SAR limits from FW event + * @wmi_handle: wmi handle + * @evt_buf: event buffer received from firmware + * @event: SAR limit event which is to be populated by data extracted from + * the @evt_buf buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_extract_sar_limit_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct sar_limit_event *event); + +/** + * wmi_unified_extract_sar2_result_event() - extract SAR limits from FW event + * @handle: wmi handle + * @event: event buffer received from firmware + * @len: length of the event buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_extract_sar2_result_event(void *handle, + uint8_t *event, uint32_t len); + +/* + * wmi_unified_set_del_pmkid_cache() - set delete PMKID + * @wmi_handle: wma handle + * @pmksa: pointer to pmk cache entry + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_del_pmkid_cache(wmi_unified_t wmi_handle, + struct wmi_unified_pmk_cache *pmksa); + +/** + * wmi_unified_del_ts_cmd() - send DELTS request to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @ac: ac param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_del_ts_cmd(wmi_unified_t wmi_handle, uint8_t vdev_id, + uint8_t ac); + +/** + * wmi_unified_aggr_qos_cmd() - send aggr qos request to fw + * @wmi_handle: handle to wmi + * @aggr_qos_rsp_msg: combined struct for all ADD_TS requests. + * + * A function to handle WMI_AGGR_QOS_REQ. This will send out + * ADD_TS requestes to firmware in loop for all the ACs with + * active flow. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_aggr_qos_cmd( + wmi_unified_t wmi_handle, + struct aggr_add_ts_param *aggr_qos_rsp_msg); + +/** + * wmi_unified_add_ts_cmd() - send ADDTS request to fw + * @wmi_handle: wmi handle + * @msg: ADDTS params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_add_ts_cmd(wmi_unified_t wmi_handle, + struct add_ts_param *msg); + +/** + * wmi_unified_process_add_periodic_tx_ptrn_cmd() - add periodic tx pattern + * @wmi_handle: wmi handle + * @pattern: tx pattern parameters + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_add_periodic_tx_ptrn_cmd( + wmi_unified_t wmi_handle, + struct periodic_tx_pattern *pattern, + uint8_t vdev_id); + +/** + * wmi_unified_process_del_periodic_tx_ptrn_cmd() - del periodic tx ptrn + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @pattern_id: pattern id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_process_del_periodic_tx_ptrn_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t pattern_id); + +/** + * wmi_unified_set_auto_shutdown_timer_cmd() - sets auto shutdown + * timer in firmware + * @wmi_handle: wmi handle + * @timer_val: auto shutdown timer value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_auto_shutdown_timer_cmd(wmi_unified_t wmi_handle, + uint32_t timer_val); + +/** + * wmi_unified_set_led_flashing_cmd() - set led flashing in fw + * @wmi_handle: wmi handle + * @flashing: flashing request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_led_flashing_cmd(wmi_unified_t wmi_handle, + struct flashing_req_params *flashing); + +/** + * wmi_unified_process_ch_avoid_update_cmd() - handles channel avoid + * update request + * @wmi_handle: wmi handle + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ch_avoid_update_cmd(wmi_unified_t wmi_handle); + +/** + * wmi_unified_pdev_set_pcl_cmd() - Send WMI_SOC_SET_PCL_CMDID to FW + * @wmi_handle: wmi handle + * @msg: PCL structure containing the PCL and the number of channels + * + * WMI_SOC_SET_PCL_CMDID provides a Preferred Channel List (PCL) to the WLAN + * firmware. The DBS Manager is the consumer of this information in the WLAN + * firmware. The channel list will be used when a Virtual DEVice (VDEV) needs + * to migrate to a new channel without host driver involvement. An example of + * this behavior is Legacy Fast Roaming (LFR 3.0). Generally, the host will + * manage the channel selection without firmware involvement. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_set_pcl_cmd(wmi_unified_t wmi_handle, + struct wmi_pcl_chan_weights *msg); + +#ifdef WLAN_POLICY_MGR_ENABLE +/** + * wmi_unified_pdev_set_dual_mac_config_cmd() - Set dual mac config to FW + * @wmi_handle: wmi handle + * @msg: Dual MAC config parameters + * + * Configures WLAN firmware with the dual MAC features + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures. + */ +QDF_STATUS wmi_unified_pdev_set_dual_mac_config_cmd( + wmi_unified_t wmi_handle, + struct policy_mgr_dual_mac_config *msg); +#endif /* WLAN_POLICY_MGR_ENABLE */ + +/** + * wmi_unified_send_adapt_dwelltime_params_cmd() - send wmi cmd of + * adaptive dwelltime configuration params + * @wma_handle: wma handler + * @wmi_param: pointer to dwelltime_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +QDF_STATUS wmi_unified_send_adapt_dwelltime_params_cmd( + wmi_unified_t wmi_handle, + struct wmi_adaptive_dwelltime_params *wmi_param); + +/** + * wmi_unified_send_dbs_scan_sel_params_cmd() - send wmi cmd of + * DBS scan selection configuration params + * @wma_handle: wma handler + * @wmi_param: pointer to wmi_dbs_scan_sel_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +QDF_STATUS wmi_unified_send_dbs_scan_sel_params_cmd( + wmi_unified_t wmi_handle, + struct wmi_dbs_scan_sel_params *wmi_param); + +/** + * wmi_unified_set_arp_stats_req() - set arp stats request + * @wmi_handle: wmi handle + * @req_buf: pointer to set_arp_stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_arp_stats_req(wmi_unified_t wmi_handle, + struct set_arp_stats *req_buf); + +/** + * wmi_unified_get_arp_stats_req() - get arp stats request + * @wmi_handle: wmi handle + * @req_buf: pointer to get_arp_stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_arp_stats_req(wmi_unified_t wmi_handle, + struct get_arp_stats *req_buf); + +/** + * wmi_unified_peer_unmap_conf_send() - send PEER unmap conf command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @peer_id_cnt: number of peer id + * @peer_id_list: list of peer ids + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_unmap_conf_send(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list); + +#endif /* _WMI_UNIFIED_STA_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_sta_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_sta_param.h new file mode 100644 index 0000000000000000000000000000000000000000..3febde54e7bce80532139c212217cfdf90631a6f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_sta_param.h @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the STA WMI APIs. + */ + +#ifndef _WMI_UNIFIED_STA_PARAM_H_ +#define _WMI_UNIFIED_STA_PARAM_H_ + +/** + * struct sta_keep_alive_params - sta keep alive parameters + * @vdev_id: vdev id + * @method: keep alive method + * @timeperiod: time to keep alive + * @hostv4addr: host ipv4 address + * @destv4addr: destination ipv4 address + * @destmac: destination mac address + */ +struct sta_keep_alive_params { + uint8_t vdev_id; + uint32_t method; + uint32_t timeperiod; + uint8_t hostv4addr[QDF_IPV4_ADDR_SIZE]; + uint8_t destv4addr[QDF_IPV4_ADDR_SIZE]; + uint8_t destmac[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct gtx_config_t - GTX config + * @gtx_rt_mask: for HT and VHT rate masks + * @gtx_usrcfg: host request for GTX mask + * @gtx_threshold: PER Threshold (default: 10%) + * @gtx_margin: PER margin (default: 2%) + * @gtx_tcpstep: TCP step (default: 1) + * @gtx_tpcMin: TCP min (default: 5) + * @gtx_bwmask: BW mask (20/40/80/160 Mhz) + */ +struct wmi_gtx_config { + uint32_t gtx_rt_mask[2]; + uint32_t gtx_usrcfg; + uint32_t gtx_threshold; + uint32_t gtx_margin; + uint32_t gtx_tpcstep; + uint32_t gtx_tpcmin; + uint32_t gtx_bwmask; +}; + +/** + * struct wlm_latency_level_param - WLM parameters + * @wlm_latency_level: wlm latency level to set + * 0 - normal, 1 - moderate, 2 - low, 3 - ultralow + * @wlm_latency_flags: wlm latency flags to set + * |31 12| 11 | 10 |9 8|7 6|5 4|3 2| 1 | 0 | + * +------+------+------+------+------+------+------+-----+-----+ + * | RSVD | SSLP | CSLP | RSVD | Roam | RSVD | DWLT | DFS | SUP | + * +------+-------------+-------------+-------------------------+ + * | WAL | PS | Roam | Scan | + * + * bit 0: Avoid scan request from HLOS if setting + * bit 1: Skip DFS channel SCAN if setting + * bit 2-3: Define policy of dwell time/duration for each foreign channel + * (b2 b3) + * (0 0 ): Default scan dwell time + * (0 1 ): Reserve + * (1 0 ): Shrink off channel dwell time + * (1 1 ): Reserve + * bit 4-5: Reserve for scan + * bit 6-7: Define roaming policy + * (b6 b7) + * (0 0 ): Default roaming behavior, allow roaming in all scenarios + * (0 1 ): Disallow all roaming + * (1 0 ): Allow roaming when final bmissed + * (1 1 ): Reserve + * bit 8-9: Reserve for roaming + * bit 10: Disable css power collapse if setting + * bit 11: Disable sys sleep if setting + * bit 12-31: Reserve for future useage + * @vdev_id: vdev id + */ +struct wlm_latency_level_param { + uint16_t wlm_latency_level; + uint32_t wlm_latency_flags; + uint16_t vdev_id; +}; + +#define WMI_2_4_GHZ_MAX_FREQ 3000 + +/** + * struct vdev_ie_info_param - IE info + * @vdev_id - vdev for which the IE is being sent + * @ie_id - ID of the IE + * @length - length of the IE data + * @data - IE data + * + * This structure is used to store the IE information. + */ +struct vdev_ie_info_param { + uint32_t vdev_id; + uint32_t ie_id; + uint32_t length; + uint32_t ie_source; + uint32_t band; + uint8_t *data; +}; + +#define MAX_SAR_LIMIT_ROWS_SUPPORTED 64 +/** + * struct sar_limit_cmd_row - sar limits row + * @band_id: Optional param for frequency band + * See %enum wmi_sar_band_id_flags for possible values + * @chain_id: Optional param for antenna chain id + * @mod_id: Optional param for modulation scheme + * See %enum wmi_sar_mod_id_flags for possible values + * @limit_value: Mandatory param providing power limits in steps of 0.5 dbm + * @validity_bitmap: bitmap of valid optional params in sar_limit_cmd_row struct + * See WMI_SAR_*_VALID_MASK for possible values + */ +struct sar_limit_cmd_row { + uint32_t band_id; + uint32_t chain_id; + uint32_t mod_id; + uint32_t limit_value; + uint32_t validity_bitmap; +}; + +/** + * struct sar_limit_cmd_params - sar limits params + * @sar_enable: flag to enable SAR + * See %enum wmi_sar_feature_state_flags for possible values + * @num_limit_rows: number of items in sar_limits + * @commit_limits: indicates firmware to start apply new SAR values + * @sar_limit_row_list: pointer to array of sar limit rows + */ +struct sar_limit_cmd_params { + uint32_t sar_enable; + uint32_t num_limit_rows; + uint32_t commit_limits; + struct sar_limit_cmd_row *sar_limit_row_list; +}; + +/** + * struct sar_limit_event_row - sar limits row + * @band_id: Frequency band. + * See %enum wmi_sar_band_id_flags for possible values + * @chain_id: Chain id + * @mod_id: Modulation scheme + * See %enum wmi_sar_mod_id_flags for possible values + * @limit_value: Power limits in steps of 0.5 dbm that is currently active for + * the given @band_id, @chain_id, and @mod_id + */ +struct sar_limit_event_row { + uint32_t band_id; + uint32_t chain_id; + uint32_t mod_id; + uint32_t limit_value; +}; + +/** + * struct sar_limit_event - sar limits params + * @sar_enable: Current status of SAR enablement. + * See %enum wmi_sar_feature_state_flags for possible values + * @num_limit_rows: number of items in sar_limits + * @sar_limit_row: array of sar limit rows. Only @num_limit_rows + * should be considered valid. + */ +struct sar_limit_event { + uint32_t sar_enable; + uint32_t num_limit_rows; + struct sar_limit_event_row + sar_limit_row[MAX_SAR_LIMIT_ROWS_SUPPORTED]; +}; + +#define WMI_UNIFIED_MAX_PMKID_LEN 16 +#define WMI_UNIFIED_MAX_PMK_LEN 64 + +/** + * struct wmi_unified_pmk_cache - used to set del pmkid cache + * @vdev_id: ID of the vdev being configured + * @pmk_len: PMK len + * for big-endian hosts, manual endian conversion will be needed to keep + * the array values in their original order in spite of the automatic + * byte-swap applied to WMI messages during download + * @pmk: PMK array + * @pmkid_len: PMK ID Len + * @pmkid: PMK ID Array + * @bssid: BSSID + * @ssid: SSID + * @cache_id: PMK Cache ID + * @cat_flag: whether (bssid) or (ssid,cache_id) is valid + * @action_flag: add/delete the entry + * @is_flush_all: FLAG to indicate PMKSA flush. True if PMKSA cache flush is + * needed. + */ +struct wmi_unified_pmk_cache { + uint8_t vdev_id; + uint32_t pmk_len; + uint8_t pmk[WMI_UNIFIED_MAX_PMK_LEN]; + uint32_t pmkid_len; + uint8_t pmkid[WMI_UNIFIED_MAX_PMKID_LEN]; + wmi_host_mac_addr bssid; + struct mac_ssid ssid; + uint32_t cache_id; + uint32_t cat_flag; + uint32_t action_flag; + bool is_flush_all; +}; + +#define WMI_QOS_NUM_AC_MAX 4 + +/** + * struct aggr_add_ts_param - ADDTS parameters + * @tspecIdx: TSPEC handler uniquely identifying a TSPEC for a STA in a BSS + * @tspec: tspec value + * @status: CDF status + * @sessionId: session id + * @vdev_id: vdev id + */ +struct aggr_add_ts_param { + uint16_t tspecIdx; + struct mac_tspec_ie tspec[WMI_QOS_NUM_AC_MAX]; + QDF_STATUS status[WMI_QOS_NUM_AC_MAX]; + uint8_t sessionId; + uint8_t vdev_id; +}; + +#define WMI_PERIODIC_TX_PTRN_MAX_SIZE 1536 +/** + * struct periodic_tx_pattern - periodic tx pattern + * @mac_address: MAC Address for the adapter + * @ucPtrnId: Pattern ID + * @ucPtrnSize: Pattern size + * @usPtrnIntervalMs: in ms + * @ucPattern: Pattern buffer + */ +struct periodic_tx_pattern { + struct qdf_mac_addr mac_address; + uint8_t ucPtrnId; + uint16_t ucPtrnSize; + uint32_t usPtrnIntervalMs; + uint8_t ucPattern[WMI_PERIODIC_TX_PTRN_MAX_SIZE]; +}; + +/** + * struct flashing_req_params - led flashing parameter + * @reqId: request id + * @pattern_id: pattern identifier. 0: disconnected 1: connected + * @led_x0: led flashing parameter0 + * @led_x1: led flashing parameter1 + */ +struct flashing_req_params { + uint32_t req_id; + uint32_t pattern_id; + uint32_t led_x0; + uint32_t led_x1; +}; + +/** + * struct wmi_pcl_chan_weights - Params to get the valid weighed list + * @pcl_list: channel freq list sorted in preferred order + * @pcl_len: Length of the PCL + * @saved_chan_list: Valid channel freq list updated as part of + * WMA_UPDATE_CHAN_LIST_REQ + * @saved_num_chan: Length of the valid channel list + * @weighed_valid_list: Weights of the valid channel list. This will have one + * to one mapping with valid_chan_list. FW expects channel order and size to be + * as per the list provided in WMI_SCAN_CHAN_LIST_CMDID. + * @weight_list: Weights assigned by policy manager + */ +struct wmi_pcl_chan_weights { + uint32_t pcl_list[NUM_CHANNELS]; + uint32_t pcl_len; + uint32_t saved_chan_list[NUM_CHANNELS]; + uint32_t saved_num_chan; + uint8_t weighed_valid_list[NUM_CHANNELS]; + uint8_t weight_list[NUM_CHANNELS]; +}; + +/** + * struct wmi_adaptive_dwelltime_params - the adaptive dwelltime params + * @vdev_id: vdev id + * @is_enabled: Adaptive dwell time is enabled/disabled + * @dwelltime_mode: global default adaptive dwell mode + * @lpf_weight: weight to calculate the average low pass + * filter for channel congestion + * @passive_mon_intval: intval to monitor wifi activity in passive scan in msec + * @wifi_act_threshold: % of wifi activity used in passive scan 0-100 + * + */ +struct wmi_adaptive_dwelltime_params { + uint32_t vdev_id; + bool is_enabled; + enum scan_dwelltime_adaptive_mode dwelltime_mode; + uint8_t lpf_weight; + uint8_t passive_mon_intval; + uint8_t wifi_act_threshold; +}; + +#define WMI_SCAN_CLIENT_MAX 7 + +/** + * struct wmi_dbs_scan_sel_params - DBS scan selection params + * @num_clients: Number of scan clients dutycycle + * @pdev_id: pdev_id for identifying the MAC + * @module_id: scan client module id + * @num_dbs_scans: number of DBS scans + * @num_non_dbs_scans: number of non-DBS scans + */ +struct wmi_dbs_scan_sel_params { + uint32_t num_clients; + uint32_t pdev_id; + uint32_t module_id[WMI_SCAN_CLIENT_MAX]; + uint32_t num_dbs_scans[WMI_SCAN_CLIENT_MAX]; + uint32_t num_non_dbs_scans[WMI_SCAN_CLIENT_MAX]; +}; + +/** + * struct set_arp_stats - set/reset arp stats + * @vdev_id: session id + * @flag: enable/disable stats + * @pkt_type: type of packet(1 - arp) + * @ip_addr: subnet ipv4 address in case of encrypted packets + * @pkt_type_bitmap: pkt bitmap + * @tcp_src_port: tcp src port for pkt tracking + * @tcp_dst_port: tcp dst port for pkt tracking + * @icmp_ipv4: target ipv4 address to track ping packets + * @reserved: reserved + */ +struct set_arp_stats { + uint32_t vdev_id; + uint8_t flag; + uint8_t pkt_type; + uint32_t ip_addr; + uint32_t pkt_type_bitmap; + uint32_t tcp_src_port; + uint32_t tcp_dst_port; + uint32_t icmp_ipv4; + uint32_t reserved; +}; + +/** + * struct get_arp_stats - get arp stats from firmware + * @pkt_type: packet type(1 - ARP) + * @vdev_id: session id + */ +struct get_arp_stats { + uint8_t pkt_type; + uint32_t vdev_id; +}; + +#endif /* _WMI_UNIFIED_STA_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..00bcc263974d4952910d99de46397e783694bf0b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_api.h @@ -0,0 +1,243 @@ + +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to TWT component. + */ + +#ifndef _WMI_UNIFIED_TWT_API_H_ +#define _WMI_UNIFIED_TWT_API_H_ + +#include "wmi_unified_twt_param.h" + + +/** + * wmi_unified_twt_enable_cmd() - Send WMI command to Enable TWT + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_twt_enable_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_enable_param *params); + +/** + * wmi_unified_twt_disable_cmd() - Send WMI command to disable TWT + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_twt_disable_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_disable_param *params); + +/** + * wmi_unified_twt_add_dialog_cmd() - Send WMI command to add TWT dialog + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_twt_add_dialog_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_add_dialog_param *params); + +/** + * wmi_unified_twt_del_dialog_cmd() - Send WMI command to delete TWT dialog + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_twt_del_dialog_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_del_dialog_param *params); + +/** + * wmi_unified_twt_pause_dialog_cmd() - Send WMI command to pause TWT dialog + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_twt_pause_dialog_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_pause_dialog_cmd_param *params); + +/** + * wmi_unified_twt_resume_dialog_cmd() - Send WMI command to resume TWT dialog + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_resume_dialog_cmd( + wmi_unified_t wmi_handle, + struct wmi_twt_resume_dialog_cmd_param *params); + +#ifdef WLAN_SUPPORT_BCAST_TWT +/** + * wmi_unified_twt_btwt_invite_sta_cmd() - Send WMI command for bTWT sta + * invitation + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_btwt_invite_sta_cmd( + wmi_unified_t wmi_handle, + struct wmi_twt_btwt_invite_sta_cmd_param *params); + +/** + * wmi_unified_twt_btwt_remove_sta_cmd() - Send WMI command for bTWT sta kickoff + * @wmi_handle: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_btwt_remove_sta_cmd( + wmi_unified_t wmi_handle, + struct wmi_twt_btwt_remove_sta_cmd_param *params); +#endif + +/** + * wmi_extract_twt_enable_comp_event() - Extract WMI event params for TWT enable + * completion event + * @wmi_handle: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_enable_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params); + +/** + * wmi_extract_twt_disable_comp_event() - Extract WMI event params for TWT + * disable completion event + * @wmi_handle: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_disable_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params); + +/** + * wmi_extract_twt_add_dialog_comp_event() - Extract WMI event params for TWT + * add dialog completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_add_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params); + +/** + * wmi_extract_twt_del_dialog_comp_event() - Extract WMI event params for TWT + * delete dialog completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_del_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params); + +/** + * wmi_extract_twt_pause_dialog_comp_event() - Extract WMI event params for TWT + * pause dialog completion event + * @wmi_handle: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_pause_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params); + +/** + * wmi_extract_twt_resume_dialog_comp_event() - Extract WMI event params for TWT + * resume dialog completion event + * @wmi_handle: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_resume_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params); + +#ifdef WLAN_SUPPORT_BCAST_TWT +/** + * wmi_extract_twt_btwt_invite_sta_comp_event() - Extract WMI event params for + * BTWT sta invitation completion event + * @wmi_handle: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_btwt_invite_sta_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_btwt_invite_sta_complete_event_param *params); + +/** + * wmi_extract_twt_btwt_remove_sta_comp_event() - Extract WMI event params for + * BTWT sta kickoff completion event + * @wmi_handle: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_btwt_remove_sta_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_btwt_remove_sta_complete_event_param *params); +#endif + +#ifdef WLAN_SUPPORT_TWT +void wmi_twt_attach_tlv(struct wmi_unified *wmi_handle); +#else +static void wmi_twt_attach_tlv(struct wmi_unified *wmi_handle) +{ + return; +} +#endif + +#endif /* _WMI_UNIFIED_TWT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_param.h new file mode 100644 index 0000000000000000000000000000000000000000..151f61f5e43a8bb46c9614b974a2ff3672a55f49 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_param.h @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the TWT WMI APIs. + */ + +#ifndef _WMI_UNIFIED_TWT_PARAM_H_ +#define _WMI_UNIFIED_TWT_PARAM_H_ + +/** + * @pdev_id: pdev_id for identifying the MAC. + * @sta_cong_timer_ms: STA TWT congestion timer TO value in terms of ms + * @mbss_support: Flag indicating if AP TWT feature supported in + * MBSS mode or not. + * @default_slot_size: This is the default value for the TWT slot setup + * by AP (units = microseconds) + * @congestion_thresh_setup: Minimum congestion required to start setting + * up TWT sessions + * @congestion_thresh_teardown: Minimum congestion below which TWT will be + * torn down (in percent of occupied airtime) + * @congestion_thresh_critical: Threshold above which TWT will not be active + * (in percent of occupied airtime) + * @interference_thresh_teardown: Minimum interference above that TWT + * will not be active. The interference parameters use an + * abstract method of evaluating interference. + * The parameters are in percent, ranging from 0 for no + * interference, to 100 for interference extreme enough + * to completely block the signal of interest. + * @interference_thresh_setup: Minimum interference below that TWT session + * can be setup. The interference parameters use an + * abstract method of evaluating interference. + * The parameters are in percent, ranging from 0 for no + * interference, to 100 for interference extreme enough + * to completely block the signal of interest. + * @min_no_sta_setup: Minimum no of STA required to start TWT setup + * @min_no_sta_teardown: Minimum no of STA below which TWT will be torn down + * @no_of_bcast_mcast_slots: Number of default slot sizes reserved for + * BCAST/MCAST delivery + * @min_no_twt_slots: Minimum no of available slots for TWT to be operational + * @max_no_sta_twt: Max no of STA with which TWT is possible + * (must be <= the wmi_resource_config's twt_ap_sta_count value) + * * The below interval parameters have units of milliseconds. + * @mode_check_interval: Interval between two successive check to decide the + * mode of TWT. (units = milliseconds) + * @add_sta_slot_interval: Interval between decisions making to create + * TWT slots for STAs. (units = milliseconds) + * @remove_sta_slot_interval: Inrerval between decisions making to remove TWT + * slot of STAs. (units = milliseconds) + * @flags: Flag to enable or disable capabilities, example bcast twt. + */ +struct wmi_twt_enable_param { + uint32_t pdev_id; + uint32_t sta_cong_timer_ms; + uint32_t mbss_support; + uint32_t default_slot_size; + uint32_t congestion_thresh_setup; + uint32_t congestion_thresh_teardown; + uint32_t congestion_thresh_critical; + uint32_t interference_thresh_teardown; + uint32_t interference_thresh_setup; + uint32_t min_no_sta_setup; + uint32_t min_no_sta_teardown; + uint32_t no_of_bcast_mcast_slots; + uint32_t min_no_twt_slots; + uint32_t max_no_sta_twt; + uint32_t mode_check_interval; + uint32_t add_sta_slot_interval; + uint32_t remove_sta_slot_interval; + uint32_t flags; +}; + +/* status code of enabling TWT + * WMI_ENABLE_TWT_STATUS_OK: enabling TWT successfully completed + * WMI_ENABLE_TWT_STATUS_ALREADY_ENABLED: TWT already enabled + * WMI_ENABLE_TWT_STATUS_NOT_READY: FW not ready for enabling TWT + * WMI_ENABLE_TWT_INVALID_PARAM: invalid parameters + * WMI_ENABLE_TWT_STATUS_UNKNOWN_ERROR: enabling TWT failed with an + * unknown reason + */ +enum WMI_HOST_ENABLE_TWT_STATUS { + WMI_HOST_ENABLE_TWT_STATUS_OK, + WMI_HOST_ENABLE_TWT_STATUS_ALREADY_ENABLED, + WMI_HOST_ENABLE_TWT_STATUS_NOT_READY, + WMI_HOST_ENABLE_TWT_INVALID_PARAM, + WMI_HOST_ENABLE_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_enable_complete_event_param: + * @pdev_is: pdev_id for identifying the MAC. + * @status: From enum WMI_HOST_ENABLE_TWT_STATUS + */ +struct wmi_twt_enable_complete_event_param { + uint32_t pdev_id; + uint32_t status; +}; + +/** struct wmi_twt_disable_param: + * @pdev_id: pdev_id for identifying the MAC. + */ +struct wmi_twt_disable_param { + uint32_t pdev_id; +}; + +/** struct wmi_twt_disable_complete_event: + * @pdev_id: pdev_id for identifying the MAC. + */ +struct wmi_twt_disable_complete_event { + uint32_t pdev_id; +}; + +/* from IEEE 802.11ah section 9.4.2.200 */ +enum WMI_HOST_TWT_COMMAND { + WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0, + WMI_HOST_TWT_COMMAND_SUGGEST_TWT = 1, + WMI_HOST_TWT_COMMAND_DEMAND_TWT = 2, + WMI_HOST_TWT_COMMAND_TWT_GROUPING = 3, + WMI_HOST_TWT_COMMAND_ACCEPT_TWT = 4, + WMI_HOST_TWT_COMMAND_ALTERNATE_TWT = 5, + WMI_HOST_TWT_COMMAND_DICTATE_TWT = 6, + WMI_HOST_TWT_COMMAND_REJECT_TWT = 7, +}; + +/** struct wmi_twt_add_dialog_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: peer MAC address when vdev is AP VDEV + * @dialog_id: diaglog_id (TWT dialog ID) + * This dialog ID must be unique within its vdev. + * @wake_intvl_us: TWT Wake Interval in units of us + * @wake_intvl_mantis: TWT Wake Interval Mantissa + * - wake_intvl_mantis must be <= 0xFFFF + * - wake_intvl_us must be divided evenly by wake_intvl_mantis, + * i.e., wake_intvl_us % wake_intvl_mantis == 0 + * - the quotient of wake_intvl_us/wake_intvl_mantis must be + * 2 to N-th(0<=N<=31) power, + * i.e., wake_intvl_us/wake_intvl_mantis == 2^N, 0<=N<=31 + * @wake_dura_us: TWT Wake Duration in units of us, must be <= 0xFFFF + * wake_dura_us must be divided evenly by 256, + * i.e., wake_dura_us % 256 == 0 + * @sp_offset_us: this long time after TWT setup the 1st SP will start. + * @twt_cmd: cmd from enum WMI_HOST_TWT_COMMAND + * @flag_bcast: 0 means Individual TWT, + * 1 means Broadcast TWT + * @flag_trigger: 0 means non-Trigger-enabled TWT, + * 1 means means Trigger-enabled TWT + * @flag_flow_type: 0 means announced TWT, + * 1 means un-announced TWT + * @flag_protection: 0 means TWT protection is required, + * 1 means TWT protection is not required + * @b_twt_id0: 0 means BTWT recommendation will not be used + * 1 means BTWT recommendation will be used + * @flag_reserved: unused bits + * @b_twt_recommendation: defines types of frames tx during bTWT SP + * @b_twt_persistence: Countdown VAL frames to param update/teardown + */ +struct wmi_twt_add_dialog_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t wake_intvl_us; + uint32_t wake_intvl_mantis; + uint32_t wake_dura_us; + uint32_t sp_offset_us; + enum WMI_HOST_TWT_COMMAND twt_cmd; + uint32_t + flag_bcast:1, + flag_trigger:1, + flag_flow_type:1, + flag_protection:1, + flag_b_twt_id0:1, + flag_reserved:11, + b_twt_persistence:8, + b_twt_recommendation:3; +}; + +/* enum - status code of adding TWT dialog + * WMI_HOST_ADD_TWT_STATUS_OK: adding TWT dialog successfully completed + * WMI_HOST_ADD_TWT_STATUS_TWT_NOT_ENABLED: TWT not enabled + * WMI_HOST_ADD_TWT_STATUS_USED_DIALOG_ID: TWT dialog ID is already used + * WMI_HOST_ADD_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_ADD_TWT_STATUS_NOT_READY: FW not ready + * WMI_HOST_ADD_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_ADD_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_ADD_TWT_STATUS_NO_RESPONSE: peer AP did not send the response frame + * WMI_HOST_ADD_TWT_STATUS_DENIED: AP did not accept the request + * WMI_HOST_ADD_TWT_STATUS_UNKNOWN_ERROR: adding TWT dialog failed with + * an unknown reason + */ +enum WMI_HOST_ADD_TWT_STATUS { + WMI_HOST_ADD_TWT_STATUS_OK, + WMI_HOST_ADD_TWT_STATUS_TWT_NOT_ENABLED, + WMI_HOST_ADD_TWT_STATUS_USED_DIALOG_ID, + WMI_HOST_ADD_TWT_STATUS_INVALID_PARAM, + WMI_HOST_ADD_TWT_STATUS_NOT_READY, + WMI_HOST_ADD_TWT_STATUS_NO_RESOURCE, + WMI_HOST_ADD_TWT_STATUS_NO_ACK, + WMI_HOST_ADD_TWT_STATUS_NO_RESPONSE, + WMI_HOST_ADD_TWT_STATUS_DENIED, + WMI_HOST_ADD_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_add_dialog_complete_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_ADD_TWT_STATUS enum + */ +struct wmi_twt_add_dialog_complete_event_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t status; +}; + +/** struct wmi_twt_del_dialog_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + * @b_twt_persistence: persistence val for b-twt + */ +struct wmi_twt_del_dialog_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; +#ifdef WLAN_SUPPORT_BCAST_TWT + uint32_t b_twt_persistence; +#endif +}; + +/* status code of deleting TWT dialog + * WMI_HOST_DEL_TWT_STATUS_OK: deleting TWT dialog successfully completed + * WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_NOT_EXIST: TWT dialog ID not exists + * WMI_HOST_DEL_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_BUSY: FW is in the process of handling + * this dialog + * WMI_HOST_DEL_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_DEL_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the request/response + * frame + * WMI_HOST_DEL_TWT_STATUS_UNKNOWN_ERROR: deleting TWT dialog failed with an + * unknown reason + */ +enum WMI_HOST_DEL_TWT_STATUS { + WMI_HOST_DEL_TWT_STATUS_OK, + WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_DEL_TWT_STATUS_INVALID_PARAM, + WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_DEL_TWT_STATUS_NO_RESOURCE, + WMI_HOST_DEL_TWT_STATUS_NO_ACK, + WMI_HOST_DEL_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_del_dialog_complete_event_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + * @b_twt_persistence: persistence val for b-twt + * @status: refer to WMI_HOST_DEL_TWT_STATUS enum + */ +struct wmi_twt_del_dialog_complete_event_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; +#ifdef WLAN_SUPPORT_BCAST_TWT + uint32_t b_twt_persistence; +#endif + uint32_t status; +}; + +/** struct wmi_twt_pause_dialog_cmd_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + */ +struct wmi_twt_pause_dialog_cmd_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; +}; + +/* enum WMI_HOST_PAUSE_TWT_STATUS - status code of pausing TWT dialog + * WMI_HOST_PAUSE_TWT_STATUS_OK: pausing TWT dialog successfully completed + * WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_NOT_EXIST: TWT dialog ID not exists + * WMI_HOST_PAUSE_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_BUSY: FW is in the process of handling + * this dialog + * WMI_HOST_PAUSE_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_PAUSE_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_PAUSE_TWT_STATUS_UNKNOWN_ERROR: pausing TWT dialog failed with an + * unknown reason + */ +enum WMI_HOST_PAUSE_TWT_STATUS { + WMI_HOST_PAUSE_TWT_STATUS_OK, + WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_PAUSE_TWT_STATUS_INVALID_PARAM, + WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_PAUSE_TWT_STATUS_NO_RESOURCE, + WMI_HOST_PAUSE_TWT_STATUS_NO_ACK, + WMI_HOST_PAUSE_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_pause_dialog_complete_event_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_PAUSE_TWT_STATUS + */ +struct wmi_twt_pause_dialog_complete_event_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t status; +}; + +/** struct wmi_twt_resume_dialog_cmd_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + * @sp_offset_us: this long time after TWT resumed the 1st SP will start + * @next_twt_size: Next TWT subfield Size. + * Refer IEEE 802.11ax section "9.4.1.60 TWT Information field" + */ +struct wmi_twt_resume_dialog_cmd_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t sp_offset_us; + uint32_t next_twt_size; +}; + +/* enum WMI_HOST_RESUME_TWT_STATUS - status code of resuming TWT dialog + * WMI_HOST_RESUME_TWT_STATUS_OK: resuming TWT dialog successfully completed + * WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_NOT_EXIST: TWT dialog ID not exists + * WMI_HOST_RESUME_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_BUSY: FW is in the process of handling + * this dialog + * WMI_HOST_RESUME_TWT_STATUS_NOT_PAUSED: dialog not paused currently + * WMI_HOST_RESUME_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_RESUME_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_RESUME_TWT_STATUS_UNKNOWN_ERROR: resuming TWT dialog failed with an + * unknown reason + */ +enum WMI_HOST_RESUME_TWT_STATUS { + WMI_HOST_RESUME_TWT_STATUS_OK, + WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_RESUME_TWT_STATUS_INVALID_PARAM, + WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_RESUME_TWT_STATUS_NOT_PAUSED, + WMI_HOST_RESUME_TWT_STATUS_NO_RESOURCE, + WMI_HOST_RESUME_TWT_STATUS_NO_ACK, + WMI_HOST_RESUME_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_resume_dialog_complete_event_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_RESUME_TWT_STATUS + */ +struct wmi_twt_resume_dialog_complete_event_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t status; +}; + +#ifdef WLAN_SUPPORT_BCAST_TWT +/** struct wmi_twt_btwt_invite_sta_cmd_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: TWT dialog ID + */ +struct wmi_twt_btwt_invite_sta_cmd_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; +}; + +/* enum WMI_HOST_INVITATION_TWT_BTWT_STATUS - status code of TWT Invitation + * dialog + * WMI_HOST_INVITATION_TWT_BTWT_STATUS_OK: BTWT invitation successfully + * completed + * WMI_HOST_INVITATION_TWT_TWT_STATUS_DIALOG_ID_NOT_EXIST: BTWT dialog ID not + * exists + * WMI_HOST_INVITATION_TWT_BTWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_INVITATION_TWT_BTWT_STATUS_DIALOG_ID_BUSY: FW is in the process of + * handling this dialog + * WMI_HOST_INVITATION_TWT_BTWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_INVITATION_TWT_BTWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_INVITATION_TWT_BTWT_STATUS_UNKNOWN_ERROR: BTWT invitation failed + * with an unknown reason + */ +enum WMI_HOST_INVITATION_TWT_BTWT_STATUS { + WMI_HOST_INVITATION_TWT_BTWT_STATUS_OK, + WMI_HOST_INVITATION_TWT_BTWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_INVITATION_TWT_BTWT_STATUS_INVALID_PARAM, + WMI_HOST_INVITATION_TWT_BTWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_INVITATION_TWT_BTWT_STATUS_NO_RESOURCE, + WMI_HOST_INVITATION_TWT_BTWT_STATUS_NO_ACK, + WMI_HOST_INVITATION_TWT_BTWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_btwt_invite_sta_complete_event_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: BTWT dialog ID + * @status: refer to WMI_HOST_INVITATION_TWT_BTWT_STATUS + */ +struct wmi_twt_btwt_invite_sta_complete_event_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t status; +}; + +/** struct wmi_twt_btwt_remove_sta_cmd_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: BTWT dialog ID + */ +struct wmi_twt_btwt_remove_sta_cmd_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; +}; + +/* enum WMI_HOST_KICKOFF_TWT_BTWT_STATUS - status code of resuming TWT dialog + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_OK: TWT kickoff successfully completed + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_DIALOG_ID_NOT_EXIST: BTWT dialog ID not + * exists + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_DIALOG_ID_BUSY: FW is in the process of + * handling this dialog + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_NOT_PAUSED: Dialog not currently paused + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_KICKOFF_TWT_BTWT_STATUS_UNKNOWN_ERROR: BTWT kickoff failed with an + * unknown reason + */ +enum WMI_HOST_KICKOFF_TWT_BTWT_STATUS { + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_OK, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_INVALID_PARAM, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_NOT_PAUSED, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_NO_RESOURCE, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_NO_ACK, + WMI_HOST_KICKOFF_TWT_BTWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_btwt_remove_sta_complete_event_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: Peer mac address + * @dialog_id: BTWT dialog ID + * @status: refer to WMI_HOST_KICKOFF_TWT_BTWT_STATUS + */ +struct wmi_twt_btwt_remove_sta_complete_event_param { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t dialog_id; + uint32_t status; +}; +#endif + +#endif /* _WMI_UNIFIED_TWT_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_vdev_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_vdev_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e8c5c8ece81bdb46468b9e5a6610e478b4a1565f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_vdev_api.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API declarations for the Unified Wireless Module + * Interface (WMI). + */ + +#ifndef _WMI_UNIFIED_VDEV_API_H_ +#define _WMI_UNIFIED_VDEV_API_H_ + +#include + +/** + * wmi_unified_vdev_set_neighbour_rx_cmd_send() - WMI set neighbour rx function + * @param wmi_handle: handle to WMI. + * @param macaddr: MAC address + * @param param: pointer to hold neighbour rx parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_neighbour_rx_cmd_send( + struct wmi_unified *wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct set_neighbour_rx_params *param); + +/** + * wmi_unified_vdev_config_ratemask_cmd_send() - WMI config ratemask function + * @param wmi_handle: handle to WMI. + * @param param: pointer to hold config ratemask param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_vdev_config_ratemask_cmd_send(struct wmi_unified *wmi_handle, + struct config_ratemask_params *param); + +/** + * wmi_unified_send_multiple_vdev_restart_req_cmd() - send multiple vdev restart + * @wmi_handle: wmi handle + * @param: multiple vdev restart parameter + * + * Send WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_multiple_vdev_restart_req_cmd( + struct wmi_unified *wmi_handle, + struct multiple_vdev_restart_params *param); + +/** + * wmi_unified_beacon_send_cmd() - WMI beacon send function + * @param wmi_handle: handle to WMI. + * @param macaddr: MAC address + * @param param: pointer to hold beacon send cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_beacon_send_cmd(struct wmi_unified *wmi_handle, + struct beacon_params *param); + +/** + * wmi_extract_vdev_start_resp() - extract vdev start response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_rsp: Pointer to hold vdev response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_start_resp(struct wmi_unified *wmi_handle, void *evt_buf, + wmi_host_vdev_start_resp *vdev_start_resp); + +/** + * wmi_extract_vdev_stopped_param() - extract vdev stop param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_id: Pointer to hold vdev identifier + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_stopped_param(struct wmi_unified *wmi_handle, void *evt_buf, + uint32_t *vdev_id); + +/** + * wmi_extract_vdev_delete_resp() - extract vdev delete response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param delete_rsp: Pointer to hold vdev delete response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_vdev_delete_resp(struct wmi_unified *wmi_handle, void *evt_buf, + struct wmi_host_vdev_delete_resp *vdev_del_resp); + +/** + * wmi_extract_vdev_peer_delete_all_response_event() - extract peer delete all + * response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param delete_rsp: Pointer to hold peer delete al response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_peer_delete_all_response_event( + struct wmi_unified *wmi_handle, + void *evt_buf, + struct wmi_host_vdev_peer_delete_all_response_event *delete_rsp); + +/** + * wmi_extract_ext_tbttoffset_num_vdevs() - extract ext tbtt offset num vdev + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_map: Pointer to hold num vdev + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_ext_tbttoffset_num_vdevs(struct wmi_unified *wmi_handle, + void *evt_buf, uint32_t *num_vdevs); + +/** + * wmi_extract_tbttoffset_num_vdevs() - extract tbtt offset num vdev + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_map: Pointer to hold num vdev + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_tbttoffset_num_vdevs(struct wmi_unified *wmi_handle, void *evt_buf, + uint32_t *num_vdevs); + +/** + * wmi_extract_multi_vdev_restart_resp_event() - extract multi vdev restart + * response + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @restart_rsp: Pointer to hold multi vdev restart response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_multi_vdev_restart_resp_event( + struct wmi_unified *wmi_handle, + void *evt_buf, + struct multi_vdev_restart_resp *restart_rsp); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_vdev_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_vdev_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..546d54f231b9b38239f124c9ba48a6ca29d31601 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_vdev_tlv.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains TLV API declarations for the VDEV Unified Wireless Module + * Interface (WMI). + */ + +#ifndef __WMI_UNIFIED_VDEV_TLV_H__ +#define __WMI_UNIFIED_VDEV_TLV_H__ + +#include +#include +#include "wmi_unified.h" + +/** + * wmi_vdev_attach_tlv: API to init vdev tlv ops + * + * @wmi_handle: pointer to wmi_unified structure. + * + * Return: none + */ +void wmi_vdev_attach_tlv(struct wmi_unified *wmi_handle); + +#ifdef WLAN_BCN_RATECODE_ENABLE +static inline void wmi_enable_bcn_ratecode(uint32_t cmd_flag) +{ + cmd_flag |= WMI_UNIFIED_VDEV_START_BCN_TX_RATE_PRESENT; +} +#else +static inline void wmi_enable_bcn_ratecode(uint32_t cmd_flag) +{ +} +#endif + +#endif /* __WMI_UNIFIED_VDEV_TLV_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_version_whitelist.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_version_whitelist.h new file mode 100644 index 0000000000000000000000000000000000000000..a114cf40ebb0e807d590d6d2b969b40eacbcff03 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_version_whitelist.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Every Product Line or chipset or team can have its own Whitelist table. + * The following is a list of versions that the present software can support + * even though its versions are incompatible. Any entry here means that the + * indicated version does not break WMI compatibility even though it has + * a minor version change. + */ +#ifndef _WMI_VERSION_WHITELIST_H_ +#define _WMI_VERSION_WHITELIST_H_ +static wmi_whitelist_version_info version_whitelist[] = { + {0, 0, 0x5F414351, 0x00004C4D, 0, 0} + /* Placeholder: Major=0, Minor=0, Namespace="QCA_ML" (Dummy entry) */ +}; +#endif /* _WMI_VERSION_WHITELIST_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_filtered_logging.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_filtered_logging.c new file mode 100644 index 0000000000000000000000000000000000000000..46bfe651bc409e9952b05a44aef87efc52f350fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_filtered_logging.c @@ -0,0 +1,513 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_filtered_logging.h" + +static struct wmi_log_buf_t *wmi_log_buf_allocate(void) +{ + struct wmi_log_buf_t *cmd_log_buf; + int buf_size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY * + sizeof(struct wmi_command_debug); + + cmd_log_buf = qdf_mem_malloc(sizeof(struct wmi_log_buf_t)); + if (!cmd_log_buf) + return NULL; + + cmd_log_buf->buf = qdf_mem_malloc(buf_size); + if (!cmd_log_buf->buf) { + qdf_mem_free(cmd_log_buf); + return NULL; + } + cmd_log_buf->length = 0; + cmd_log_buf->buf_tail_idx = 0; + cmd_log_buf->size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY; + cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; + + return cmd_log_buf; +} + +void wmi_filtered_logging_init(wmi_unified_t wmi_handle) +{ + int buf_size = WMI_FILTERED_CMD_EVT_SUPPORTED * sizeof(int); + + /* alloc buffer to save user inputs, for WMI_CMD */ + wmi_handle->log_info.filtered_wmi_cmds = + qdf_mem_malloc(buf_size); + if (!wmi_handle->log_info.filtered_wmi_cmds) + return; + + wmi_handle->log_info.filtered_wmi_cmds_idx = 0; + + /* alloc buffer to save user interested WMI commands */ + wmi_handle->log_info.wmi_filtered_command_log = wmi_log_buf_allocate(); + if (!wmi_handle->log_info.wmi_filtered_command_log) + goto fail1; + + /* alloc buffer to save user inputs, for WMI_EVT */ + wmi_handle->log_info.filtered_wmi_evts = + qdf_mem_malloc(buf_size); + if (!wmi_handle->log_info.filtered_wmi_evts) + goto fail2; + + wmi_handle->log_info.filtered_wmi_evts_idx = 0; + + /* alloc buffer to save user interested WMI events */ + wmi_handle->log_info.wmi_filtered_event_log = wmi_log_buf_allocate(); + if (!wmi_handle->log_info.wmi_filtered_event_log) + goto fail3; + + return; + +fail3: + qdf_mem_free(wmi_handle->log_info.filtered_wmi_evts); + wmi_handle->log_info.filtered_wmi_evts = NULL; +fail2: + qdf_mem_free(wmi_handle->log_info.wmi_filtered_command_log); + wmi_handle->log_info.wmi_filtered_command_log = NULL; +fail1: + qdf_mem_free(wmi_handle->log_info.filtered_wmi_cmds); + wmi_handle->log_info.filtered_wmi_cmds = NULL; +} + +void wmi_filtered_logging_free(wmi_unified_t wmi_handle) +{ + if (!wmi_handle) + return; + + qdf_mem_free(wmi_handle->log_info.filtered_wmi_cmds); + wmi_handle->log_info.filtered_wmi_cmds = NULL; + qdf_mem_free(wmi_handle->log_info.filtered_wmi_evts); + wmi_handle->log_info.filtered_wmi_evts = NULL; + + if (wmi_handle->log_info.wmi_filtered_command_log) { + qdf_mem_free(wmi_handle->log_info. + wmi_filtered_command_log->buf); + wmi_handle->log_info.wmi_filtered_command_log->buf = NULL; + qdf_mem_free(wmi_handle->log_info.wmi_filtered_command_log); + wmi_handle->log_info.wmi_filtered_command_log = NULL; + } + if (wmi_handle->log_info.wmi_filtered_event_log) { + qdf_mem_free(wmi_handle->log_info. + wmi_filtered_event_log->buf); + wmi_handle->log_info.wmi_filtered_event_log->buf = NULL; + qdf_mem_free(wmi_handle->log_info.wmi_filtered_event_log); + wmi_handle->log_info.wmi_filtered_event_log = NULL; + } +} + +/* + * Reset the buffer which saves user interested cmds/evts + */ +static int wmi_reset_filtered_buffers(wmi_unified_t wmi_handle, + struct wmi_log_buf_t *cmd_log_buf) +{ + int buf_size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY * + sizeof(struct wmi_command_debug); + + if (!cmd_log_buf) + return 0; + + cmd_log_buf->length = 0; + cmd_log_buf->buf_tail_idx = 0; + cmd_log_buf->size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY; + cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; + qdf_mem_zero(cmd_log_buf->buf, buf_size); + return 0; +} + +/* + * Check if id is in id list, + * return true if found. + */ +static bool wmi_id_in_list(uint32_t *id_list, uint32_t id) +{ + int i; + + if (!id_list) + return false; + + for (i = 0; i < WMI_FILTERED_CMD_EVT_SUPPORTED; i++) { + if (id == id_list[i]) { + /* id already in target list */ + return true; + } + } + return false; +} + +/* + * Add command or event ids to list to be recorded + */ +static int wmi_add_to_record_list(wmi_unified_t wmi_handle, + uint32_t id, + enum WMI_RECORD_TYPE record_type) +{ + uint32_t *target_list; + + if (record_type == WMI_CMD) { + target_list = wmi_handle->log_info.filtered_wmi_cmds; + /* check if id already in target list */ + if (wmi_id_in_list(target_list, id)) + return 0; + if (wmi_handle->log_info.filtered_wmi_cmds_idx >= + WMI_FILTERED_CMD_EVT_SUPPORTED) { + wmi_handle->log_info.filtered_wmi_cmds_idx = 0; + } + target_list[wmi_handle->log_info.filtered_wmi_cmds_idx] = id; + wmi_handle->log_info.filtered_wmi_cmds_idx++; + } else if (record_type == WMI_EVT) { + target_list = wmi_handle->log_info.filtered_wmi_evts; + /* check if id already in target list */ + if (wmi_id_in_list(target_list, id)) + return 0; + if (wmi_handle->log_info.filtered_wmi_evts_idx >= + WMI_FILTERED_CMD_EVT_SUPPORTED) { + wmi_handle->log_info.filtered_wmi_evts_idx = 0; + } + target_list[wmi_handle->log_info.filtered_wmi_evts_idx] = id; + wmi_handle->log_info.filtered_wmi_evts_idx++; + } else { + return -EINVAL; + } + return 0; +} + +static void wmi_specific_cmd_evt_record(uint32_t id, uint8_t *buf, + struct wmi_log_buf_t *log_buffer) +{ + int idx; + struct wmi_command_debug *tmpbuf = + (struct wmi_command_debug *)log_buffer->buf; + + if (*log_buffer->p_buf_tail_idx >= WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY) + *log_buffer->p_buf_tail_idx = 0; + + idx = *log_buffer->p_buf_tail_idx; + tmpbuf[idx].command = id; + qdf_mem_copy(tmpbuf[idx].data, buf, + WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH); + tmpbuf[idx].time = qdf_get_log_timestamp(); + (*log_buffer->p_buf_tail_idx)++; + log_buffer->length++; +} + +void wmi_specific_cmd_record(wmi_unified_t wmi_handle, + uint32_t id, uint8_t *buf) +{ + uint32_t *target_list; + struct wmi_log_buf_t *log_buffer; + + target_list = wmi_handle->log_info.filtered_wmi_cmds; + if (!target_list) + return; + + log_buffer = wmi_handle->log_info.wmi_filtered_command_log; + if (!log_buffer) + return; + + if (wmi_id_in_list(target_list, id)) { + /* id in target list, need to be recorded */ + wmi_specific_cmd_evt_record(id, buf, log_buffer); + } +} + +void wmi_specific_evt_record(wmi_unified_t wmi_handle, + uint32_t id, uint8_t *buf) +{ + uint32_t *target_list; + struct wmi_log_buf_t *log_buffer; + + target_list = wmi_handle->log_info.filtered_wmi_evts; + if (!target_list) + return; + + log_buffer = wmi_handle->log_info.wmi_filtered_event_log; + if (!log_buffer) + return; + + if (wmi_id_in_list(target_list, id)) { + /* id in target list, need to be recorded */ + wmi_specific_cmd_evt_record(id, buf, log_buffer); + } +} + +/* + * Debugfs read/write functions + */ +static int wmi_filtered_seq_printf(qdf_debugfs_file_t m, const char *f, ...) +{ + va_list args; + + va_start(args, f); + seq_vprintf(m, f, args); + va_end(args); + + return 0; +} + +/* + * debugfs show/read for filtered_wmi_cmds + */ +int debug_filtered_wmi_cmds_show(qdf_debugfs_file_t m, void *v) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)m->private; + int i; + int *target_list; + + target_list = wmi_handle->log_info.filtered_wmi_cmds; + if (!target_list) + return 0; + + for (i = 0; i < WMI_FILTERED_CMD_EVT_SUPPORTED; i++) { + if (target_list[i] != 0) { + wmi_filtered_seq_printf(m, "0x%x ", + target_list[i]); + } + } + wmi_filtered_seq_printf(m, "\n"); + + return 0; +} + +int debug_filtered_wmi_evts_show(qdf_debugfs_file_t m, void *v) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)m->private; + int i; + int *target_list; + + target_list = wmi_handle->log_info.filtered_wmi_evts; + if (!target_list) + return 0; + for (i = 0; i < WMI_FILTERED_CMD_EVT_SUPPORTED; i++) { + if (target_list[i] != 0) { + wmi_filtered_seq_printf(m, "0x%x ", + target_list[i]); + } + } + wmi_filtered_seq_printf(m, "\n"); + + return 0; +} + +static int wmi_log_show(wmi_unified_t wmi_handle, void *buf, + qdf_debugfs_file_t m) +{ + struct wmi_log_buf_t *wmi_log = (struct wmi_log_buf_t *)buf; + int pos, nread, outlen; + int i; + uint64_t secs, usecs; + int wmi_ring_size = 100; + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + if (!wmi_log->length) { + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + return wmi_filtered_seq_printf(m, + "Nothing to read!\n"); + } + if (wmi_log->length <= wmi_ring_size) + nread = wmi_log->length; + else + nread = wmi_ring_size; + + if (*wmi_log->p_buf_tail_idx == 0) + /* tail can be 0 after wrap-around */ + pos = wmi_ring_size - 1; + else + pos = *wmi_log->p_buf_tail_idx - 1; + + outlen = wmi_filtered_seq_printf(m, "Length = %d\n", wmi_log->length); + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + while (nread--) { + struct wmi_event_debug *wmi_record; + + wmi_record = &(((struct wmi_event_debug *)wmi_log->buf)[pos]); + qdf_log_timestamp_to_secs(wmi_record->time, &secs, + &usecs); + outlen += wmi_filtered_seq_printf(m, "Event ID = %x\n", + (wmi_record->event)); + outlen += + wmi_filtered_seq_printf(m, + "Event TIME = [%llu.%06llu]\n", + secs, usecs); + outlen += wmi_filtered_seq_printf(m, "CMD = "); + for (i = 0; i < (WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / + sizeof(uint32_t)); i++) + outlen += wmi_filtered_seq_printf(m, "%x ", + wmi_record->data[i]); + outlen += wmi_filtered_seq_printf(m, "\n"); + if (pos == 0) + pos = wmi_ring_size - 1; + else + pos--; + } + return outlen; +} + +int debug_wmi_filtered_command_log_show(qdf_debugfs_file_t m, void *v) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)m->private; + struct wmi_log_buf_t *wmi_log = + wmi_handle->log_info.wmi_filtered_command_log; + + if (!wmi_log) + return 0; + return wmi_log_show(wmi_handle, wmi_log, m); +} + +int debug_wmi_filtered_event_log_show(qdf_debugfs_file_t m, void *v) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)m->private; + struct wmi_log_buf_t *wmi_log = + wmi_handle->log_info.wmi_filtered_event_log; + + if (!wmi_log) + return 0; + return wmi_log_show(wmi_handle, wmi_log, m); +} + +ssize_t debug_filtered_wmi_cmds_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + wmi_unified_t wmi_handle = + ((struct seq_file *)file->private_data)->private; + int k, ret; + char locbuf[12] = {0}; + int buf_size = WMI_FILTERED_CMD_EVT_SUPPORTED * sizeof(int); + + if ((!buf) || (count > 8 || count <= 0)) + return -EFAULT; + + if (!wmi_handle->log_info.filtered_wmi_cmds) + return -EFAULT; + + if (copy_from_user(locbuf, buf, count)) + return -EFAULT; + + ret = qdf_kstrtoint(locbuf, 16, &k); + if (ret) + return -EINVAL; + + if (k == 0xffff) { + qdf_mem_zero(wmi_handle->log_info.filtered_wmi_cmds, buf_size); + wmi_handle->log_info.filtered_wmi_cmds_idx = 0; + return count; + } + + if (wmi_add_to_record_list(wmi_handle, k, WMI_CMD)) { + WMI_LOGE("Add cmd %d to WMI_CMD list failed"); + return 0; + } + + return count; +} + +ssize_t debug_filtered_wmi_evts_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + wmi_unified_t wmi_handle = + ((struct seq_file *)file->private_data)->private; + int k, ret; + char locbuf[12] = {0}; + int buf_size = WMI_FILTERED_CMD_EVT_SUPPORTED * sizeof(int); + + if ((!buf) || (count > 8 || count <= 0)) + return -EFAULT; + + if (!wmi_handle->log_info.filtered_wmi_evts) + return -EFAULT; + + if (copy_from_user(locbuf, buf, count)) + return -EFAULT; + + ret = qdf_kstrtoint(locbuf, 16, &k); + if (ret) + return -EINVAL; + + if (k == 0xffff) { + qdf_mem_zero(wmi_handle->log_info.filtered_wmi_evts, buf_size); + wmi_handle->log_info.filtered_wmi_evts_idx = 0; + return count; + } + + if (wmi_add_to_record_list(wmi_handle, k, WMI_EVT)) { + WMI_LOGE("Add cmd %d to WMI_EVT list failed"); + return 0; + } + + return count; +} + +ssize_t debug_wmi_filtered_command_log_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + wmi_unified_t wmi_handle = + ((struct seq_file *)file->private_data)->private; + int k, ret; + char locbuf[12] = {0}; + struct wmi_log_buf_t *cmd_log_buf; + + if ((!buf) || (count > 8 || count <= 0)) + return -EFAULT; + + if (copy_from_user(locbuf, buf, count)) + return -EFAULT; + + ret = qdf_kstrtoint(locbuf, 16, &k); + if (ret) + return -EINVAL; + + if (k != 0xffff) + return -EINVAL; + + cmd_log_buf = wmi_handle->log_info.wmi_filtered_command_log; + if (wmi_reset_filtered_buffers(wmi_handle, cmd_log_buf)) + WMI_LOGE("reset WMI CMD filtered_buffers failed"); + return count; +} + +ssize_t debug_wmi_filtered_event_log_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + wmi_unified_t wmi_handle = + ((struct seq_file *)file->private_data)->private; + int k, ret; + char locbuf[12] = {0}; + struct wmi_log_buf_t *cmd_log_buf; + + if ((!buf) || (count > 8 || count <= 0)) + return -EFAULT; + + if (copy_from_user(locbuf, buf, count)) + return -EFAULT; + + ret = qdf_kstrtoint(locbuf, 16, &k); + if (ret) + return -EINVAL; + + if (k != 0xffff) + return -EINVAL; + + cmd_log_buf = wmi_handle->log_info.wmi_filtered_event_log; + if (wmi_reset_filtered_buffers(wmi_handle, cmd_log_buf)) + WMI_LOGE("reset WMI EVT filtered_buffers failed"); + return count; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_hang_event.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_hang_event.c new file mode 100644 index 0000000000000000000000000000000000000000..f69ceebb7db8dc0a35ec7987b90bfafa01554c73 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_hang_event.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include + +struct wmi_hang_data_fixed_param { + uint16_t tlv_header; /* tlv tag and length */ + uint32_t event; + uint32_t data; + uint64_t time; +} qdf_packed; + +#define WMI_EVT_HIST 0 +#define WMI_CMD_HIST 1 + +static void wmi_log_history(struct notifier_block *block, void *data, + uint8_t wmi_history) +{ + qdf_notif_block *notif_block = qdf_container_of(block, qdf_notif_block, + notif_block); + struct qdf_notifer_data *wmi_hang_data = data; + int nread, pos, total_len; + unsigned int wmi_ring_size = 1; + uint64_t secs, usecs; + struct wmi_event_debug *wmi_evt; + struct wmi_unified *wmi_handle; + struct wmi_log_buf_t *wmi_log; + struct wmi_hang_data_fixed_param *cmd; + struct wmi_command_debug *wmi_cmd; + uint8_t *wmi_buf_ptr; + + if (!wmi_hang_data) + return; + + wmi_handle = notif_block->priv_data; + if (!wmi_handle) + return; + + if (wmi_history) + wmi_log = &wmi_handle->log_info.wmi_event_log_buf_info; + else + wmi_log = &wmi_handle->log_info.wmi_command_log_buf_info; + + total_len = sizeof(struct wmi_hang_data_fixed_param); + + if (wmi_log->length <= wmi_ring_size) + nread = wmi_log->length; + else + nread = wmi_ring_size; + + if (*wmi_log->p_buf_tail_idx == 0) + /* tail can be 0 after wrap-around */ + pos = wmi_ring_size - 1; + else + pos = *wmi_log->p_buf_tail_idx - 1; + + while (nread--) { + if (wmi_hang_data->offset + total_len > QDF_WLAN_HANG_FW_OFFSET) + return; + + switch (wmi_history) { + case WMI_EVT_HIST: + wmi_buf_ptr = (wmi_hang_data->hang_data + + wmi_hang_data->offset); + cmd = ((struct wmi_hang_data_fixed_param *)wmi_buf_ptr); + QDF_HANG_EVT_SET_HDR(&cmd->tlv_header, + HANG_EVT_TAG_WMI_EVT_HIST, + QDF_HANG_GET_STRUCT_TLVLEN(struct wmi_hang_data_fixed_param)); + wmi_evt = &(((struct wmi_event_debug *)wmi_log->buf)[pos]); + cmd->event = wmi_evt->event; + qdf_log_timestamp_to_secs(wmi_evt->time, &secs, &usecs); + cmd->time = secs; + cmd->data = wmi_evt->data[0]; + break; + case WMI_CMD_HIST: + wmi_buf_ptr = (wmi_hang_data->hang_data + + wmi_hang_data->offset); + cmd = ((struct wmi_hang_data_fixed_param *)wmi_buf_ptr); + QDF_HANG_EVT_SET_HDR(&cmd->tlv_header, + HANG_EVT_TAG_WMI_CMD_HIST, + QDF_HANG_GET_STRUCT_TLVLEN(struct wmi_hang_data_fixed_param)); + wmi_cmd = &(((struct wmi_command_debug *)wmi_log->buf)[pos]); + cmd->event = wmi_cmd->command; + qdf_log_timestamp_to_secs(wmi_cmd->time, &secs, &usecs); + cmd->time = secs; + cmd->data = wmi_cmd->data[0]; + break; + } + if (pos == 0) + pos = wmi_ring_size - 1; + else + pos--; + wmi_hang_data->offset += total_len; + } +} + +static int wmi_recovery_notifier_call(struct notifier_block *block, + unsigned long state, + void *data) +{ + wmi_log_history(block, data, WMI_EVT_HIST); + wmi_log_history(block, data, WMI_CMD_HIST); + + return NOTIFY_OK; +} + +static qdf_notif_block wmi_recovery_notifier = { + .notif_block.notifier_call = wmi_recovery_notifier_call, +}; + +QDF_STATUS wmi_hang_event_notifier_register(struct wmi_unified *wmi_hdl) +{ + wmi_recovery_notifier.priv_data = wmi_hdl; + return qdf_hang_event_register_notifier(&wmi_recovery_notifier); +} + +QDF_STATUS wmi_hang_event_notifier_unregister(void) +{ + return qdf_hang_event_unregister_notifier(&wmi_recovery_notifier); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..5414eb1681143b734f7cb6bb181d8c8204fba70e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c @@ -0,0 +1,1350 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_tlv_platform.c" +#include "wmi_tlv_defs.h" +#include "wmi_version.h" +#include "qdf_module.h" + +#define WMITLV_GET_ATTRIB_NUM_TLVS 0xFFFFFFFF + +#define WMITLV_GET_CMDID(val) (val & 0x00FFFFFF) +#define WMITLV_GET_NUM_TLVS(val) ((val >> 24) & 0xFF) + +#define WMITLV_GET_TAGID(val) (val & 0x00000FFF) +#define WMITLV_GET_TAG_STRUCT_SIZE(val) ((val >> 12) & 0x000001FF) +#define WMITLV_GET_TAG_ARRAY_SIZE(val) ((val >> 21) & 0x000001FF) +#define WMITLV_GET_TAG_VARIED(val) ((val >> 30) & 0x00000001) + +#define WMITLV_SET_ATTRB0(id) ((WMITLV_GET_TAG_NUM_TLV_ATTRIB(id) << 24) | \ + (id & 0x00FFFFFF)) +#define WMITLV_SET_ATTRB1(tagID, tagStructSize, tagArraySize, tagVaried) \ + (((tagVaried&0x1)<<30) | ((tagArraySize&0x1FF)<<21) | \ + ((tagStructSize&0x1FF)<<12) | (tagID&0xFFF)) + +#define WMITLV_OP_SET_TLV_ATTRIB_macro(param_ptr, param_len, wmi_cmd_event_id, \ + elem_tlv_tag, elem_struc_type, elem_name, var_len, arr_size) \ + WMITLV_SET_ATTRB1(elem_tlv_tag, sizeof(elem_struc_type), arr_size, var_len), + +#define WMITLV_GET_CMD_EVT_ATTRB_LIST(id) \ + WMITLV_SET_ATTRB0(id), \ + WMITLV_TABLE(id,SET_TLV_ATTRIB, NULL, 0) + +uint32_t cmd_attr_list[] = { + WMITLV_ALL_CMD_LIST(WMITLV_GET_CMD_EVT_ATTRB_LIST) +}; + +uint32_t evt_attr_list[] = { + WMITLV_ALL_EVT_LIST(WMITLV_GET_CMD_EVT_ATTRB_LIST) +}; + +#ifdef NO_DYNAMIC_MEM_ALLOC +static wmitlv_cmd_param_info *g_wmi_static_cmd_param_info_buf; +uint32_t g_wmi_static_max_cmd_param_tlvs; +#endif + + +/** + * wmitlv_set_static_param_tlv_buf() - tlv helper function + * @param_tlv_buf: tlv buffer parameter + * @max_tlvs_accommodated: max no of tlv entries + * + * + * WMI TLV Helper function to set the static cmd_param_tlv structure + * and number of TLVs that can be accommodated in the structure. + * This function should be used when dynamic memory allocation is not + * supported. When dynamic memory allocation is not supported by any + * component then NO_DYNAMIC_MEMALLOC macro has to be defined in respective + * tlv_platform.c file. And respective component has to allocate + * cmd_param_tlv structure buffer to accommodate whatever number of TLV's. + * Both the buffer address and number of TLV's that can be accommodated in + * the buffer should be sent as arguments to this function. + * + * Return None + */ +void +wmitlv_set_static_param_tlv_buf(void *param_tlv_buf, + uint32_t max_tlvs_accommodated) +{ +#ifdef NO_DYNAMIC_MEM_ALLOC + g_wmi_static_cmd_param_info_buf = param_tlv_buf; + g_wmi_static_max_cmd_param_tlvs = max_tlvs_accommodated; +#endif +} + +/** + * wmitlv_get_attributes() - tlv helper function + * @is_cmd_id: boolean for command attribute + * @cmd_event_id: command event id + * @curr_tlv_order: tlv order + * @tlv_attr_ptr: pointer to tlv attribute + * + * + * WMI TLV Helper functions to find the attributes of the + * Command/Event TLVs. + * + * Return: 0 if success. Return >=1 if failure. + */ +static +uint32_t wmitlv_get_attributes(uint32_t is_cmd_id, uint32_t cmd_event_id, + uint32_t curr_tlv_order, + wmitlv_attributes_struc *tlv_attr_ptr) +{ + uint32_t i, base_index, num_tlvs, num_entries; + uint32_t *pAttrArrayList; + + if (is_cmd_id) { + pAttrArrayList = &cmd_attr_list[0]; + num_entries = QDF_ARRAY_SIZE(cmd_attr_list); + } else { + pAttrArrayList = &evt_attr_list[0]; + num_entries = QDF_ARRAY_SIZE(evt_attr_list); + } + + for (i = 0; i < num_entries; i++) { + num_tlvs = WMITLV_GET_NUM_TLVS(pAttrArrayList[i]); + if (WMITLV_GET_CMDID(cmd_event_id) == + WMITLV_GET_CMDID(pAttrArrayList[i])) { + tlv_attr_ptr->cmd_num_tlv = num_tlvs; + /* Return success from here when only number of TLVS for + * this command/event is required */ + if (curr_tlv_order == WMITLV_GET_ATTRIB_NUM_TLVS) { + wmi_tlv_print_verbose + ("%s: WMI TLV attribute definitions for %s:0x%x found; num_of_tlvs:%d\n", + __func__, (is_cmd_id ? "Cmd" : "Evt"), + cmd_event_id, num_tlvs); + return 0; + } + + /* Return failure if tlv_order is more than the expected + * number of TLVs */ + if (curr_tlv_order >= num_tlvs) { + wmi_tlv_print_error + ("%s: ERROR: TLV order %d greater than num_of_tlvs:%d for %s:0x%x\n", + __func__, curr_tlv_order, num_tlvs, + (is_cmd_id ? "Cmd" : "Evt"), cmd_event_id); + return 1; + } + + base_index = i + 1; /* index to first TLV attributes */ + wmi_tlv_print_verbose + ("%s: WMI TLV attributes for %s:0x%x tlv[%d]:0x%x\n", + __func__, (is_cmd_id ? "Cmd" : "Evt"), + cmd_event_id, curr_tlv_order, + pAttrArrayList[(base_index + curr_tlv_order)]); + tlv_attr_ptr->tag_order = curr_tlv_order; + tlv_attr_ptr->tag_id = + WMITLV_GET_TAGID(pAttrArrayList + [(base_index + curr_tlv_order)]); + tlv_attr_ptr->tag_struct_size = + WMITLV_GET_TAG_STRUCT_SIZE(pAttrArrayList + [(base_index + + curr_tlv_order)]); + tlv_attr_ptr->tag_varied_size = + WMITLV_GET_TAG_VARIED(pAttrArrayList + [(base_index + + curr_tlv_order)]); + tlv_attr_ptr->tag_array_size = + WMITLV_GET_TAG_ARRAY_SIZE(pAttrArrayList + [(base_index + + curr_tlv_order)]); + return 0; + } + i += num_tlvs; + } + + wmi_tlv_print_error + ("%s: ERROR: Didn't found WMI TLV attribute definitions for %s:0x%x\n", + __func__, (is_cmd_id ? "Cmd" : "Evt"), cmd_event_id); + return 1; +} + +/** + * wmitlv_check_tlv_params() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * + * + * Helper Function to vaidate the prepared TLV's for + * an WMI event/command to be sent. + * + * Return: 0 if success. Return < 0 if failure. + */ +static int +wmitlv_check_tlv_params(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, uint32_t is_cmd_id, + uint32_t wmi_cmd_event_id) +{ + wmitlv_attributes_struc attr_struct_ptr; + uint32_t buf_idx = 0; + uint32_t tlv_index = 0; + uint8_t *buf_ptr = (unsigned char *)param_struc_ptr; + uint32_t expected_num_tlvs, expected_tlv_len; + int32_t error = -1; + + /* Get the number of TLVs for this command/event */ + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n", + __func__, wmi_cmd_event_id); + goto Error_wmitlv_check_tlv_params; + } + + /* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */ + + expected_num_tlvs = attr_struct_ptr.cmd_num_tlv; + + while ((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len) { + uint32_t curr_tlv_tag = + WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr)); + uint32_t curr_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)); + + if ((buf_idx + WMI_TLV_HDR_SIZE + curr_tlv_len) > param_buf_len) { + wmi_tlv_print_error + ("%s: ERROR: Invalid TLV length for Cmd=%d Tag_order=%d buf_idx=%d Tag:%d Len:%d TotalLen:%d\n", + __func__, wmi_cmd_event_id, tlv_index, buf_idx, + curr_tlv_tag, curr_tlv_len, param_buf_len); + goto Error_wmitlv_check_tlv_params; + } + + /* Get the attributes of the TLV with the given order in "tlv_index" */ + wmi_tlv_OS_MEMZERO(&attr_struct_ptr, + sizeof(wmitlv_attributes_struc)); + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, tlv_index, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n", + __func__, wmi_cmd_event_id, tlv_index); + goto Error_wmitlv_check_tlv_params; + } + + /* Found the TLV that we wanted */ + wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n", + __func__, tlv_index, curr_tlv_tag, + curr_tlv_len); + + /* Validating Tag ID order */ + if (curr_tlv_tag != attr_struct_ptr.tag_id) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n", + __func__, wmi_cmd_event_id, curr_tlv_tag, + attr_struct_ptr.tag_id); + goto Error_wmitlv_check_tlv_params; + } + + /* Validate Tag length */ + /* Array TLVs length checking needs special handling */ + if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM) + && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) { + if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { + /* Array size can't be invalid for fixed size Array TLV */ + if (WMITLV_ARR_SIZE_INVALID == + attr_struct_ptr.tag_array_size) { + wmi_tlv_print_error + ("%s: ERROR: array_size can't be invalid for Array TLV Cmd=0x%x Tag=%d\n", + __func__, wmi_cmd_event_id, + curr_tlv_tag); + goto Error_wmitlv_check_tlv_params; + } + + expected_tlv_len = + attr_struct_ptr.tag_array_size * + attr_struct_ptr.tag_struct_size; + /* Paddding is only required for Byte array Tlvs all other + * array tlv's should be aligned to 4 bytes during their + * definition */ + if (WMITLV_TAG_ARRAY_BYTE == + attr_struct_ptr.tag_id) { + expected_tlv_len = + roundup(expected_tlv_len, + sizeof(uint32_t)); + } + + if (curr_tlv_len != expected_tlv_len) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong length for Cmd=0x%x. Tag_order=%d Tag=%d, Given_Len:%d Expected_Len=%d.\n", + __func__, wmi_cmd_event_id, + tlv_index, curr_tlv_tag, + curr_tlv_len, expected_tlv_len); + goto Error_wmitlv_check_tlv_params; + } + } else { + /* Array size should be invalid for variable size Array TLV */ + if (WMITLV_ARR_SIZE_INVALID != + attr_struct_ptr.tag_array_size) { + wmi_tlv_print_error + ("%s: ERROR: array_size should be invalid for Array TLV Cmd=0x%x Tag=%d\n", + __func__, wmi_cmd_event_id, + curr_tlv_tag); + goto Error_wmitlv_check_tlv_params; + } + + /* Incase of variable length TLV's, there is no expectation + * on the length field so do whatever checking you can + * depending on the TLV tag if TLV length is non-zero */ + if (curr_tlv_len != 0) { + /* Verify TLV length is aligned to the size of structure */ + if ((curr_tlv_len % + attr_struct_ptr.tag_struct_size) != + 0) { + wmi_tlv_print_error + ("%s: ERROR: TLV length %d for Cmd=0x%x is not aligned to size of structure(%d bytes)\n", + __func__, curr_tlv_len, + wmi_cmd_event_id, + attr_struct_ptr. + tag_struct_size); + goto Error_wmitlv_check_tlv_params; + } + + if (curr_tlv_tag == + WMITLV_TAG_ARRAY_STRUC) { + uint8_t *tlv_buf_ptr = NULL; + uint32_t in_tlv_len; + uint32_t idx; + uint32_t num_of_elems; + + /* Verify length of inner TLVs */ + + num_of_elems = + curr_tlv_len / + attr_struct_ptr. + tag_struct_size; + /* Set tlv_buf_ptr to the first inner TLV address */ + tlv_buf_ptr = + buf_ptr + WMI_TLV_HDR_SIZE; + for (idx = 0; + idx < num_of_elems; + idx++) { + in_tlv_len = + WMITLV_GET_TLVLEN + (WMITLV_GET_HDR + (tlv_buf_ptr)); + if ((in_tlv_len + + WMI_TLV_HDR_SIZE) + != + attr_struct_ptr. + tag_struct_size) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong length for Cmd=0x%x. Tag_order=%d Tag=%d, Given_Len:%zu Expected_Len=%d.\n", + __func__, + wmi_cmd_event_id, + tlv_index, + curr_tlv_tag, + (in_tlv_len + + + WMI_TLV_HDR_SIZE), + attr_struct_ptr. + tag_struct_size); + goto Error_wmitlv_check_tlv_params; + } + tlv_buf_ptr += + in_tlv_len + + WMI_TLV_HDR_SIZE; + } + } else + if ((curr_tlv_tag == + WMITLV_TAG_ARRAY_UINT32) + || (curr_tlv_tag == + WMITLV_TAG_ARRAY_BYTE) + || (curr_tlv_tag == + WMITLV_TAG_ARRAY_FIXED_STRUC)) { + /* Nothing to verify here */ + } else { + wmi_tlv_print_error + ("%s ERROR Need to handle the Array tlv %d for variable length for Cmd=0x%x\n", + __func__, + attr_struct_ptr.tag_id, + wmi_cmd_event_id); + goto Error_wmitlv_check_tlv_params; + } + } + } + } else { + /* Non-array TLV. */ + + if ((curr_tlv_len + WMI_TLV_HDR_SIZE) != + attr_struct_ptr.tag_struct_size) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong length for Cmd=0x%x. Given=%zu, Expected=%d.\n", + __func__, wmi_cmd_event_id, + (curr_tlv_len + WMI_TLV_HDR_SIZE), + attr_struct_ptr.tag_struct_size); + goto Error_wmitlv_check_tlv_params; + } + } + + /* Check TLV length is aligned to 4 bytes or not */ + if ((curr_tlv_len % sizeof(uint32_t)) != 0) { + wmi_tlv_print_error + ("%s: ERROR: TLV length %d for Cmd=0x%x is not aligned to %zu bytes\n", + __func__, curr_tlv_len, wmi_cmd_event_id, + sizeof(uint32_t)); + goto Error_wmitlv_check_tlv_params; + } + + tlv_index++; + buf_ptr += curr_tlv_len + WMI_TLV_HDR_SIZE; + buf_idx += curr_tlv_len + WMI_TLV_HDR_SIZE; + } + + if (tlv_index != expected_num_tlvs) { + wmi_tlv_print_verbose + ("%s: INFO: Less number of TLVs filled for Cmd=0x%x Filled %d Expected=%d\n", + __func__, wmi_cmd_event_id, tlv_index, expected_num_tlvs); + } + + return 0; +Error_wmitlv_check_tlv_params: + return error; +} + +/** + * wmitlv_check_event_tlv_params() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * + * + * Helper Function to vaidate the prepared TLV's for + * an WMI event/command to be sent. + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_event_tlv_params(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, uint32_t wmi_cmd_event_id) +{ + uint32_t is_cmd_id = 0; + + return wmitlv_check_tlv_params + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id); +} + +/** + * wmitlv_check_command_tlv_params() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * + * + * Helper Function to vaidate the prepared TLV's for + * an WMI event/command to be sent. + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_command_tlv_params(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id) +{ + uint32_t is_cmd_id = 1; + + return wmitlv_check_tlv_params + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id); +} +qdf_export_symbol(wmitlv_check_command_tlv_params); + +/** + * wmitlv_check_and_pad_tlvs() - tlv helper function + * @os_handle: os context handle + * @param_buf_len: length of tlv parameter + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * vaidate the TLV's coming for an event/command and + * also pads data to TLV's if necessary + * + * Return: 0 if success. Return < 0 if failure. + */ +static int +wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, uint32_t is_cmd_id, + uint32_t wmi_cmd_event_id, void **wmi_cmd_struct_ptr) +{ + wmitlv_attributes_struc attr_struct_ptr; + uint32_t buf_idx = 0; + uint32_t tlv_index = 0; + uint32_t num_of_elems = 0; + int tlv_size_diff = 0; + uint8_t *buf_ptr = (unsigned char *)param_struc_ptr; + wmitlv_cmd_param_info *cmd_param_tlvs_ptr = NULL; + uint32_t remaining_expected_tlvs = 0xFFFFFFFF; + uint32_t len_wmi_cmd_struct_buf; + uint32_t free_buf_len; + int32_t error = -1; + + /* Get the number of TLVs for this command/event */ + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n", + __func__, wmi_cmd_event_id); + return error; + } + /* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */ + + if (param_buf_len < WMI_TLV_HDR_SIZE) { + wmi_tlv_print_error + ("%s: ERROR: Incorrect param buf length passed\n", + __func__); + return error; + } + + /* Create base structure of format wmi_cmd_event_id##_param_tlvs */ + len_wmi_cmd_struct_buf = + attr_struct_ptr.cmd_num_tlv * sizeof(wmitlv_cmd_param_info); +#ifndef NO_DYNAMIC_MEM_ALLOC + /* Dynamic memory allocation supported */ + wmi_tlv_os_mem_alloc(os_handle, *wmi_cmd_struct_ptr, + len_wmi_cmd_struct_buf); +#else + /* Dynamic memory allocation is not supported. Use the buffer + * g_wmi_static_cmd_param_info_buf, which should be set using + * wmi_tlv_set_static_param_tlv_buf(), + * for base structure of format wmi_cmd_event_id##_param_tlvs */ + *wmi_cmd_struct_ptr = g_wmi_static_cmd_param_info_buf; + if (attr_struct_ptr.cmd_num_tlv > g_wmi_static_max_cmd_param_tlvs) { + /* Error: Expecting more TLVs that accommodated for static structure */ + wmi_tlv_print_error + ("%s: Error: Expecting more TLVs that accommodated for static structure. Expected:%d Accomodated:%d\n", + __func__, attr_struct_ptr.cmd_num_tlv, + g_wmi_static_max_cmd_param_tlvs); + return error; + } +#endif + if (!*wmi_cmd_struct_ptr) { + /* Error: unable to alloc memory */ + wmi_tlv_print_error + ("%s: Error: unable to alloc memory (size=%d) for TLV\n", + __func__, len_wmi_cmd_struct_buf); + return error; + } + + cmd_param_tlvs_ptr = (wmitlv_cmd_param_info *) *wmi_cmd_struct_ptr; + wmi_tlv_OS_MEMZERO(cmd_param_tlvs_ptr, len_wmi_cmd_struct_buf); + remaining_expected_tlvs = attr_struct_ptr.cmd_num_tlv; + + while (((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len) + && (remaining_expected_tlvs)) { + uint32_t curr_tlv_tag = + WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr)); + uint32_t curr_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)); + int num_padding_bytes = 0; + + free_buf_len = param_buf_len - (buf_idx + WMI_TLV_HDR_SIZE); + if (curr_tlv_len > free_buf_len) { + wmi_tlv_print_error("%s: TLV length overflow", + __func__); + goto Error_wmitlv_check_and_pad_tlvs; + } + + /* Get the attributes of the TLV with the given order in "tlv_index" */ + wmi_tlv_OS_MEMZERO(&attr_struct_ptr, + sizeof(wmitlv_attributes_struc)); + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, tlv_index, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n", + __func__, wmi_cmd_event_id, tlv_index); + goto Error_wmitlv_check_and_pad_tlvs; + } + + /* Found the TLV that we wanted */ + wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n", + __func__, tlv_index, curr_tlv_tag, + curr_tlv_len); + + /* Validating Tag order */ + if (curr_tlv_tag != attr_struct_ptr.tag_id) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n", + __func__, wmi_cmd_event_id, curr_tlv_tag, + attr_struct_ptr.tag_id); + goto Error_wmitlv_check_and_pad_tlvs; + } + + if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM) + && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) { + /* Current Tag is an array of some kind. */ + /* Skip the TLV header of this array */ + buf_ptr += WMI_TLV_HDR_SIZE; + buf_idx += WMI_TLV_HDR_SIZE; + } else { + /* Non-array TLV. */ + curr_tlv_len += WMI_TLV_HDR_SIZE; + } + + if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { + /* This TLV is fixed length */ + if (WMITLV_ARR_SIZE_INVALID == + attr_struct_ptr.tag_array_size) { + tlv_size_diff = + curr_tlv_len - + attr_struct_ptr.tag_struct_size; + num_of_elems = + (curr_tlv_len > WMI_TLV_HDR_SIZE) ? 1 : 0; + } else { + tlv_size_diff = + curr_tlv_len - + (attr_struct_ptr.tag_struct_size * + attr_struct_ptr.tag_array_size); + num_of_elems = attr_struct_ptr.tag_array_size; + } + } else { + /* This TLV has a variable number of elements */ + if (WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) { + uint32_t in_tlv_len = 0; + + if (curr_tlv_len != 0) { + in_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR + (buf_ptr)); + in_tlv_len += WMI_TLV_HDR_SIZE; + if (in_tlv_len > curr_tlv_len) { + wmi_tlv_print_error("%s: Invalid in_tlv_len=%d", + __func__, + in_tlv_len); + goto + Error_wmitlv_check_and_pad_tlvs; + } + tlv_size_diff = + in_tlv_len - + attr_struct_ptr.tag_struct_size; + num_of_elems = + curr_tlv_len / in_tlv_len; + wmi_tlv_print_verbose + ("%s: WARN: TLV array of structures in_tlv_len=%d struct_size:%d diff:%d num_of_elems=%d \n", + __func__, in_tlv_len, + attr_struct_ptr.tag_struct_size, + tlv_size_diff, num_of_elems); + } else { + tlv_size_diff = 0; + num_of_elems = 0; + } + } else + if ((WMITLV_TAG_ARRAY_UINT32 == + attr_struct_ptr.tag_id) + || (WMITLV_TAG_ARRAY_BYTE == + attr_struct_ptr.tag_id) + || (WMITLV_TAG_ARRAY_FIXED_STRUC == + attr_struct_ptr.tag_id)) { + tlv_size_diff = 0; + num_of_elems = + curr_tlv_len / + attr_struct_ptr.tag_struct_size; + } else { + wmi_tlv_print_error + ("%s ERROR Need to handle this tag ID for variable length %d\n", + __func__, attr_struct_ptr.tag_id); + goto Error_wmitlv_check_and_pad_tlvs; + } + } + + if ((WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) && + (tlv_size_diff != 0)) { + void *new_tlv_buf = NULL; + uint8_t *tlv_buf_ptr = NULL; + uint32_t in_tlv_len; + uint32_t i; + + if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { + /* This is not allowed. The tag WMITLV_TAG_ARRAY_STRUC can + * only be used with variable-length structure array + * should not have a fixed number of elements (contradicting). + * Use WMITLV_TAG_ARRAY_FIXED_STRUC tag for fixed size + * structure array(where structure never change without + * breaking compatibility) */ + wmi_tlv_print_error + ("%s: ERROR: TLV (tag=%d) should be variable-length and not fixed length\n", + __func__, curr_tlv_tag); + goto Error_wmitlv_check_and_pad_tlvs; + } + + /* Warning: Needs to allocate a larger structure and pad with zeros */ + wmi_tlv_print_verbose + ("%s: WARN: TLV array of structures needs padding. tlv_size_diff=%d\n", + __func__, tlv_size_diff); + + /* incoming structure length */ + in_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)) + + WMI_TLV_HDR_SIZE; +#ifndef NO_DYNAMIC_MEM_ALLOC + wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf, + (num_of_elems * + attr_struct_ptr.tag_struct_size)); + if (!new_tlv_buf) { + /* Error: unable to alloc memory */ + wmi_tlv_print_error + ("%s: Error: unable to alloc memory (size=%d) for padding the TLV array %d\n", + __func__, + (num_of_elems * + attr_struct_ptr.tag_struct_size), + curr_tlv_tag); + goto Error_wmitlv_check_and_pad_tlvs; + } + + wmi_tlv_OS_MEMZERO(new_tlv_buf, + (num_of_elems * + attr_struct_ptr.tag_struct_size)); + tlv_buf_ptr = (uint8_t *) new_tlv_buf; + for (i = 0; i < num_of_elems; i++) { + if (tlv_size_diff > 0) { + /* Incoming structure size is greater than expected + * structure size. so copy the number of bytes equal + * to expected structure size */ + wmi_tlv_OS_MEMCPY(tlv_buf_ptr, + (void *)(buf_ptr + + i * + in_tlv_len), + attr_struct_ptr. + tag_struct_size); + } else { + /* Incoming structure size is smaller than expected + * structure size. so copy the number of bytes equal + * to incoming structure size */ + wmi_tlv_OS_MEMCPY(tlv_buf_ptr, + (void *)(buf_ptr + + i * + in_tlv_len), + in_tlv_len); + } + tlv_buf_ptr += attr_struct_ptr.tag_struct_size; + } +#else + { + uint8_t *src_addr; + uint8_t *dst_addr; + uint32_t buf_mov_len; + + if (tlv_size_diff < 0) { + /* Incoming structure size is smaller than expected size + * then this needs padding for each element in the array */ + + /* Find amount of bytes to be padded for one element */ + num_padding_bytes = tlv_size_diff * -1; + + /* Move subsequent TLVs by number of bytes to be padded + * for all elements */ + if ((free_buf_len < + attr_struct_ptr.tag_struct_size * + num_of_elems) || + (param_buf_len < + buf_idx + curr_tlv_len + + num_padding_bytes * num_of_elems)) { + wmi_tlv_print_error("%s: Insufficent buffer\n", + __func__); + goto + Error_wmitlv_check_and_pad_tlvs; + } else { + src_addr = + buf_ptr + curr_tlv_len; + dst_addr = + buf_ptr + curr_tlv_len + + (num_padding_bytes * + num_of_elems); + buf_mov_len = + param_buf_len - (buf_idx + + curr_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, + src_addr, + buf_mov_len); + } + + /* Move subsequent elements of array down by number of + * bytes to be padded for one element and alse set + * padding bytes to zero */ + tlv_buf_ptr = buf_ptr; + for (i = 0; i < num_of_elems - 1; i++) { + src_addr = + tlv_buf_ptr + in_tlv_len; + if (i != (num_of_elems - 1)) { + dst_addr = + tlv_buf_ptr + + in_tlv_len + + num_padding_bytes; + buf_mov_len = + curr_tlv_len - + ((i + + 1) * in_tlv_len); + + wmi_tlv_OS_MEMMOVE + (dst_addr, src_addr, + buf_mov_len); + } + + /* Set the padding bytes to zeroes */ + wmi_tlv_OS_MEMZERO(src_addr, + num_padding_bytes); + + tlv_buf_ptr += + attr_struct_ptr. + tag_struct_size; + } + src_addr = tlv_buf_ptr + in_tlv_len; + wmi_tlv_OS_MEMZERO(src_addr, + num_padding_bytes); + + /* Update the number of padding bytes to total number + * of bytes padded for all elements in the array */ + num_padding_bytes = + num_padding_bytes * num_of_elems; + + new_tlv_buf = buf_ptr; + } else { + /* Incoming structure size is greater than expected size + * then this needs shrinking for each element in the array */ + + /* Find amount of bytes to be shrunk for one element */ + num_padding_bytes = tlv_size_diff * -1; + + /* Move subsequent elements of array up by number of bytes + * to be shrunk for one element */ + tlv_buf_ptr = buf_ptr; + for (i = 0; i < (num_of_elems - 1); i++) { + src_addr = + tlv_buf_ptr + in_tlv_len; + dst_addr = + tlv_buf_ptr + in_tlv_len + + num_padding_bytes; + buf_mov_len = + curr_tlv_len - + ((i + 1) * in_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, + src_addr, + buf_mov_len); + + tlv_buf_ptr += + attr_struct_ptr. + tag_struct_size; + } + + /* Move subsequent TLVs by number of bytes to be shrunk + * for all elements */ + if (param_buf_len > + (buf_idx + curr_tlv_len)) { + src_addr = + buf_ptr + curr_tlv_len; + dst_addr = + buf_ptr + curr_tlv_len + + (num_padding_bytes * + num_of_elems); + buf_mov_len = + param_buf_len - (buf_idx + + curr_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, + src_addr, + buf_mov_len); + } + + /* Update the number of padding bytes to total number of + * bytes shrunk for all elements in the array */ + num_padding_bytes = + num_padding_bytes * num_of_elems; + + new_tlv_buf = buf_ptr; + } + } +#endif + cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf; + cmd_param_tlvs_ptr[tlv_index].num_elements = + num_of_elems; + cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1; /* Indicates that buffer is allocated */ + + } else if (tlv_size_diff >= 0) { + /* Warning: some parameter truncation */ + if (tlv_size_diff > 0) { + wmi_tlv_print_verbose + ("%s: WARN: TLV truncated. tlv_size_diff=%d, curr_tlv_len=%d\n", + __func__, tlv_size_diff, curr_tlv_len); + } + /* TODO: this next line needs more comments and explanation */ + cmd_param_tlvs_ptr[tlv_index].tlv_ptr = + (attr_struct_ptr.tag_varied_size + && !curr_tlv_len) ? NULL : (void *)buf_ptr; + cmd_param_tlvs_ptr[tlv_index].num_elements = + num_of_elems; + cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 0; /* Indicates that buffer is not allocated */ + } else { + void *new_tlv_buf = NULL; + + /* Warning: Needs to allocate a larger structure and pad with zeros */ + wmi_tlv_print_verbose + ("%s: WARN: TLV needs padding. tlv_size_diff=%d\n", + __func__, tlv_size_diff); +#ifndef NO_DYNAMIC_MEM_ALLOC + /* Dynamic memory allocation is supported */ + wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf, + (curr_tlv_len - tlv_size_diff)); + if (!new_tlv_buf) { + /* Error: unable to alloc memory */ + wmi_tlv_print_error + ("%s: Error: unable to alloc memory (size=%d) for padding the TLV %d\n", + __func__, (curr_tlv_len - tlv_size_diff), + curr_tlv_tag); + goto Error_wmitlv_check_and_pad_tlvs; + } + + wmi_tlv_OS_MEMZERO(new_tlv_buf, + (curr_tlv_len - tlv_size_diff)); + wmi_tlv_OS_MEMCPY(new_tlv_buf, (void *)buf_ptr, + curr_tlv_len); +#else + /* Dynamic memory allocation is not supported. Padding has + * to be done with in the existing buffer assuming we have + * enough space to grow */ + { + /* Note: tlv_size_diff is a value less than zero */ + /* Move the Subsequent TLVs by amount of bytes needs to be padded */ + uint8_t *src_addr; + uint8_t *dst_addr; + uint32_t src_len; + + num_padding_bytes = (tlv_size_diff * -1); + + src_addr = buf_ptr + curr_tlv_len; + dst_addr = + buf_ptr + curr_tlv_len + num_padding_bytes; + src_len = + param_buf_len - (buf_idx + curr_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, src_len); + + /* Set the padding bytes to zeroes */ + wmi_tlv_OS_MEMZERO(src_addr, num_padding_bytes); + + new_tlv_buf = buf_ptr; + } +#endif + cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf; + cmd_param_tlvs_ptr[tlv_index].num_elements = + num_of_elems; + cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1; /* Indicates that buffer is allocated */ + } + + tlv_index++; + remaining_expected_tlvs--; + buf_ptr += curr_tlv_len + num_padding_bytes; + buf_idx += curr_tlv_len + num_padding_bytes; + } + + return 0; +Error_wmitlv_check_and_pad_tlvs: + if (is_cmd_id) { + wmitlv_free_allocated_command_tlvs(wmi_cmd_event_id, + wmi_cmd_struct_ptr); + } else { + wmitlv_free_allocated_event_tlvs(wmi_cmd_event_id, + wmi_cmd_struct_ptr); + } + *wmi_cmd_struct_ptr = NULL; + return error; +} + +/** + * wmitlv_check_and_pad_event_tlvs() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @param_buf_len: length of tlv parameter + * @wmi_cmd_event_id: command event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * validate and pad(if necessary) for incoming WMI Event TLVs + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_and_pad_event_tlvs(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + uint32_t is_cmd_id = 0; + return wmitlv_check_and_pad_tlvs + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id, wmi_cmd_struct_ptr); +} +qdf_export_symbol(wmitlv_check_and_pad_event_tlvs); + +/** + * wmitlv_check_and_pad_command_tlvs() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @param_buf_len: length of tlv parameter + * @wmi_cmd_event_id: command event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * validate and pad(if necessary) for incoming WMI Command TLVs + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_and_pad_command_tlvs(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + uint32_t is_cmd_id = 1; + return wmitlv_check_and_pad_tlvs + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id, wmi_cmd_struct_ptr); +} + +/** + * wmitlv_free_allocated_tlvs() - tlv helper function + * @is_cmd_id: bollean to check if cmd or event tlv + * @cmd_event_id: command or event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * free any allocated buffers for WMI Event/Command TLV processing + * + * Return: none + */ +static void wmitlv_free_allocated_tlvs(uint32_t is_cmd_id, + uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + void *ptr = *wmi_cmd_struct_ptr; + + if (!ptr) { + wmi_tlv_print_error("%s: Nothing to free for CMD/Event 0x%x\n", + __func__, cmd_event_id); + return; + } +#ifndef NO_DYNAMIC_MEM_ALLOC + +/* macro to free that previously allocated memory for this TLV. When (op==FREE_TLV_ELEM). */ +#define WMITLV_OP_FREE_TLV_ELEM_macro(param_ptr, param_len, wmi_cmd_event_id, elem_tlv_tag, elem_struc_type, elem_name, var_len, arr_size) \ + if ((((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->WMITLV_FIELD_BUF_IS_ALLOCATED(elem_name)) && \ + (((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->elem_name)) \ + { \ + wmi_tlv_os_mem_free(((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->elem_name); \ + } + +#define WMITLV_FREE_TLV_ELEMS(id) \ +case id: \ +{ \ + WMITLV_TABLE(id, FREE_TLV_ELEM, NULL, 0) \ +} \ +break; + + if (is_cmd_id) { + switch (cmd_event_id) { + WMITLV_ALL_CMD_LIST(WMITLV_FREE_TLV_ELEMS); + default: + wmi_tlv_print_error + ("%s: ERROR: Cannot find the TLVs attributes for Cmd=0x%x, %d\n", + __func__, cmd_event_id, cmd_event_id); + } + } else { + switch (cmd_event_id) { + WMITLV_ALL_EVT_LIST(WMITLV_FREE_TLV_ELEMS); + default: + wmi_tlv_print_error + ("%s: ERROR: Cannot find the TLVs attributes for Cmd=0x%x, %d\n", + __func__, cmd_event_id, cmd_event_id); + } + } + + wmi_tlv_os_mem_free(*wmi_cmd_struct_ptr); + *wmi_cmd_struct_ptr = NULL; +#endif + + return; +} + +/** + * wmitlv_free_allocated_command_tlvs() - tlv helper function + * @cmd_event_id: command or event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * free any allocated buffers for WMI Event/Command TLV processing + * + * Return: none + */ +void wmitlv_free_allocated_command_tlvs(uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + wmitlv_free_allocated_tlvs(1, cmd_event_id, wmi_cmd_struct_ptr); +} + +/** + * wmitlv_free_allocated_event_tlvs() - tlv helper function + * @cmd_event_id: command or event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * free any allocated buffers for WMI Event/Command TLV processing + * + * Return: none + */ +void wmitlv_free_allocated_event_tlvs(uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + wmitlv_free_allocated_tlvs(0, cmd_event_id, wmi_cmd_struct_ptr); +} +qdf_export_symbol(wmitlv_free_allocated_event_tlvs); + +/** + * wmi_versions_are_compatible() - tlv helper function + * @vers1: host wmi version + * @vers2: target wmi version + * + * + * check if two given wmi versions are compatible + * + * Return: none + */ +int +wmi_versions_are_compatible(wmi_abi_version *vers1, wmi_abi_version *vers2) +{ + if ((vers1->abi_version_ns_0 != vers2->abi_version_ns_0) || + (vers1->abi_version_ns_1 != vers2->abi_version_ns_1) || + (vers1->abi_version_ns_2 != vers2->abi_version_ns_2) || + (vers1->abi_version_ns_3 != vers2->abi_version_ns_3)) { + /* The namespaces are different. Incompatible. */ + return 0; + } + + if (vers1->abi_version_0 != vers2->abi_version_0) { + /* The major or minor versions are different. Incompatible */ + return 0; + } + /* We ignore the build version */ + return 1; +} + +/** + * wmi_versions_can_downgrade() - tlv helper function + * @version_whitelist_table: version table + * @my_vers: host version + * @opp_vers: target version + * @out_vers: downgraded version + * + * + * check if target wmi version can be downgraded + * + * Return: 0 if success. Return < 0 if failure. + */ +static int +wmi_versions_can_downgrade(int num_whitelist, + wmi_whitelist_version_info *version_whitelist_table, + wmi_abi_version *my_vers, + wmi_abi_version *opp_vers, + wmi_abi_version *out_vers) +{ + uint8_t can_try_to_downgrade; + uint32_t my_major_vers = WMI_VER_GET_MAJOR(my_vers->abi_version_0); + uint32_t my_minor_vers = WMI_VER_GET_MINOR(my_vers->abi_version_0); + uint32_t opp_major_vers = WMI_VER_GET_MAJOR(opp_vers->abi_version_0); + uint32_t opp_minor_vers = WMI_VER_GET_MINOR(opp_vers->abi_version_0); + uint32_t downgraded_minor_vers; + + if ((my_vers->abi_version_ns_0 != opp_vers->abi_version_ns_0) || + (my_vers->abi_version_ns_1 != opp_vers->abi_version_ns_1) || + (my_vers->abi_version_ns_2 != opp_vers->abi_version_ns_2) || + (my_vers->abi_version_ns_3 != opp_vers->abi_version_ns_3)) { + /* The namespaces are different. Incompatible. */ + can_try_to_downgrade = false; + } else if (my_major_vers != opp_major_vers) { + /* Major version is different. Incompatible and cannot downgrade. */ + can_try_to_downgrade = false; + } else { + /* Same major version. */ + + if (my_minor_vers < opp_minor_vers) { + /* Opposite party is newer. Incompatible and cannot downgrade. */ + can_try_to_downgrade = false; + } else if (my_minor_vers > opp_minor_vers) { + /* Opposite party is older. Check whitelist if we can downgrade */ + can_try_to_downgrade = true; + } else { + /* Same version */ + wmi_tlv_OS_MEMCPY(out_vers, my_vers, + sizeof(wmi_abi_version)); + return 1; + } + } + + if (!can_try_to_downgrade) { + wmi_tlv_print_error("%s: Warning: incompatible WMI version.\n", + __func__); + wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version)); + return 0; + } + /* Try to see we can downgrade the supported version */ + downgraded_minor_vers = my_minor_vers; + while (downgraded_minor_vers > opp_minor_vers) { + uint8_t downgraded = false; + int i; + + for (i = 0; i < num_whitelist; i++) { + if (version_whitelist_table[i].major != my_major_vers) { + continue; /* skip */ + } + if ((version_whitelist_table[i].namespace_0 != + my_vers->abi_version_ns_0) + || (version_whitelist_table[i].namespace_1 != + my_vers->abi_version_ns_1) + || (version_whitelist_table[i].namespace_2 != + my_vers->abi_version_ns_2) + || (version_whitelist_table[i].namespace_3 != + my_vers->abi_version_ns_3)) { + continue; /* skip */ + } + if (version_whitelist_table[i].minor == + downgraded_minor_vers) { + /* Found the next version that I can downgrade */ + wmi_tlv_print_error + ("%s: Note: found a whitelist entry to downgrade. wh. list ver: %d,%d,0x%x 0x%x 0x%x 0x%x\n", + __func__, version_whitelist_table[i].major, + version_whitelist_table[i].minor, + version_whitelist_table[i].namespace_0, + version_whitelist_table[i].namespace_1, + version_whitelist_table[i].namespace_2, + version_whitelist_table[i].namespace_3); + downgraded_minor_vers--; + downgraded = true; + break; + } + } + if (!downgraded) { + break; /* Done since we did not find any whitelist to downgrade version */ + } + } + wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version)); + out_vers->abi_version_0 = + WMI_VER_GET_VERSION_0(my_major_vers, downgraded_minor_vers); + if (downgraded_minor_vers != opp_minor_vers) { + wmi_tlv_print_error + ("%s: Warning: incompatible WMI version and cannot downgrade.\n", + __func__); + return 0; /* Incompatible */ + } else { + return 1; /* Compatible */ + } +} + +/** + * wmi_cmp_and_set_abi_version() - tlv helper function + * @version_whitelist_table: version table + * @my_vers: host version + * @opp_vers: target version + * @out_vers: downgraded version + * + * This routine will compare and set the WMI ABI version. + * First, compare my version with the opposite side's version. + * If incompatible, then check the whitelist to see if our side can downgrade. + * Finally, fill in the final ABI version into the output, out_vers. + * Return 0 if the output version is compatible + * Else return 1 if the output version is incompatible + * + * Return: 0 if the output version is compatible else < 0. + */ +int +wmi_cmp_and_set_abi_version(int num_whitelist, + wmi_whitelist_version_info * + version_whitelist_table, + struct _wmi_abi_version *my_vers, + struct _wmi_abi_version *opp_vers, + struct _wmi_abi_version *out_vers) +{ + wmi_tlv_print_verbose + ("%s: Our WMI Version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, WMI_VER_GET_MAJOR(my_vers->abi_version_0), + WMI_VER_GET_MINOR(my_vers->abi_version_0), my_vers->abi_version_1, + my_vers->abi_version_ns_0, my_vers->abi_version_ns_1, + my_vers->abi_version_ns_2, my_vers->abi_version_ns_3); + + wmi_tlv_print_verbose + ("%s: Opposite side WMI Version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, WMI_VER_GET_MAJOR(opp_vers->abi_version_0), + WMI_VER_GET_MINOR(opp_vers->abi_version_0), + opp_vers->abi_version_1, opp_vers->abi_version_ns_0, + opp_vers->abi_version_ns_1, opp_vers->abi_version_ns_2, + opp_vers->abi_version_ns_3); + + /* By default, the output version is our version. */ + wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version)); + if (!wmi_versions_are_compatible(my_vers, opp_vers)) { + /* Our host version and the given firmware version are incompatible. */ + if (wmi_versions_can_downgrade + (num_whitelist, version_whitelist_table, my_vers, opp_vers, + out_vers)) { + /* We can downgrade our host versions to match firmware. */ + wmi_tlv_print_error + ("%s: Host downgraded WMI Versions to match fw. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, + WMI_VER_GET_MAJOR(out_vers->abi_version_0), + WMI_VER_GET_MINOR(out_vers->abi_version_0), + out_vers->abi_version_1, + out_vers->abi_version_ns_0, + out_vers->abi_version_ns_1, + out_vers->abi_version_ns_2, + out_vers->abi_version_ns_3); + return 0; /* Compatible */ + } else { + /* Warn: We cannot downgrade our host versions to match firmware. */ + wmi_tlv_print_error + ("%s: WARN: Host WMI Versions mismatch with fw. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, + WMI_VER_GET_MAJOR(out_vers->abi_version_0), + WMI_VER_GET_MINOR(out_vers->abi_version_0), + out_vers->abi_version_1, + out_vers->abi_version_ns_0, + out_vers->abi_version_ns_1, + out_vers->abi_version_ns_2, + out_vers->abi_version_ns_3); + + return 1; /* Incompatible */ + } + } else { + /* We are compatible. Our host version is the output version */ + wmi_tlv_print_verbose + ("%s: Host and FW Compatible WMI Versions. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, WMI_VER_GET_MAJOR(out_vers->abi_version_0), + WMI_VER_GET_MINOR(out_vers->abi_version_0), + out_vers->abi_version_1, out_vers->abi_version_ns_0, + out_vers->abi_version_ns_1, out_vers->abi_version_ns_2, + out_vers->abi_version_ns_3); + return 0; /* Compatible */ + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_platform.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..8afe92ef916cb3d10eadd90570e963596c9f0f22 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_platform.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * LMAC offload interface functions for WMI TLV Interface + */ + +#include /* qdf_mem_malloc,free, etc. */ +#include +#include "htc_api.h" +#include "wmi.h" + + +/* Following macro definitions use OS or platform specific functions */ +#define dummy_print(fmt, ...) {} +#define wmi_tlv_print_verbose dummy_print +#define wmi_tlv_print_error qdf_print +#define wmi_tlv_OS_MEMCPY OS_MEMCPY +#define wmi_tlv_OS_MEMZERO OS_MEMZERO +#define wmi_tlv_OS_MEMMOVE OS_MEMMOVE + +#ifndef NO_DYNAMIC_MEM_ALLOC +#define wmi_tlv_os_mem_alloc(scn, ptr, numBytes) \ + { \ + (ptr) = qdf_mem_malloc(numBytes); \ + } +#define wmi_tlv_os_mem_free qdf_mem_free +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c new file mode 100644 index 0000000000000000000000000000000000000000..f0a5b32ad4923b16d606ca27fa66416ae77a5758 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c @@ -0,0 +1,3257 @@ +/* + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Host WMI unified implementation + */ +#include "htc_api.h" +#include "htc_api.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_api.h" +#include "qdf_module.h" +#include "qdf_platform.h" +#ifdef WMI_EXT_DBG +#include "qdf_list.h" +#include "qdf_atomic.h" +#endif + +#ifndef WMI_NON_TLV_SUPPORT +#include "wmi_tlv_helper.h" +#endif + +#include +#include +#include +#include "wmi_filtered_logging.h" +#include + +/* This check for CONFIG_WIN temporary added due to redeclaration compilation +error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h +which gets included here through ol_if_athvar.h. Eventually it is expected that +wmi.h will be removed from wmi_unified_api.h after cleanup, which will need +WMI_CMD_HDR to be defined here. */ +/* Copied from wmi.h */ +#undef MS +#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) +#undef SM +#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) +#undef WO +#define WO(_f) ((_f##_OFFSET) >> 2) + +#undef GET_FIELD +#define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) +#undef SET_FIELD +#define SET_FIELD(_addr, _f, _val) \ + (*((uint32_t *)(_addr) + WO(_f)) = \ + (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) + +#define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ + GET_FIELD(_msg_buf, _msg_type ## _ ## _f) + +#define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ + SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) + +#define WMI_EP_APASS 0x0 +#define WMI_EP_LPASS 0x1 +#define WMI_EP_SENSOR 0x2 + +/* + * * Control Path + * */ +typedef PREPACK struct { + uint32_t commandId:24, + reserved:2, /* used for WMI endpoint ID */ + plt_priv:6; /* platform private */ +} POSTPACK WMI_CMD_HDR; /* used for commands and events */ + +#define WMI_CMD_HDR_COMMANDID_LSB 0 +#define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff +#define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 +#define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 +#define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 +#define WMI_CMD_HDR_PLT_PRIV_LSB 24 +#define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 +#define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 +/* end of copy wmi.h */ + +#define WMI_MIN_HEAD_ROOM 64 + +/* WBUFF pool sizes for WMI */ +/* Allocation of size 256 bytes */ +#define WMI_WBUFF_POOL_0_SIZE 128 +/* Allocation of size 512 bytes */ +#define WMI_WBUFF_POOL_1_SIZE 16 +/* Allocation of size 1024 bytes */ +#define WMI_WBUFF_POOL_2_SIZE 8 +/* Allocation of size 2048 bytes */ +#define WMI_WBUFF_POOL_3_SIZE 8 + +#ifdef WMI_INTERFACE_EVENT_LOGGING +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) +/* TODO Cleanup this backported function */ +static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...) +{ + va_list args; + + va_start(args, f); + seq_vprintf(m, f, args); + va_end(args); + + return 0; +} +#else +#define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) +#endif + +#ifndef MAX_WMI_INSTANCES +#define CUSTOM_MGMT_CMD_DATA_SIZE 4 +#endif + +#ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC +/* WMI commands */ +uint32_t g_wmi_command_buf_idx = 0; +struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; + +/* WMI commands TX completed */ +uint32_t g_wmi_command_tx_cmp_buf_idx = 0; +struct wmi_command_debug + wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; + +/* WMI events when processed */ +uint32_t g_wmi_event_buf_idx = 0; +struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; + +/* WMI events when queued */ +uint32_t g_wmi_rx_event_buf_idx = 0; +struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; +#endif + +#define WMI_COMMAND_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ + *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ + [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ + .command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_command_log_buf_info.buf) \ + [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ + b, wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ + [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_command_log_buf_info.length++; \ +} + +#define WMI_COMMAND_TX_CMP_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx) = 0; \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)]. \ + command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)]. \ + data, b, wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)]. \ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ + h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ +} + +#define WMI_EVENT_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ + *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ + [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ + event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_event_log_buf_info.buf) \ + [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ + wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ + [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ + qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_event_log_buf_info.length++; \ +} + +#define WMI_RX_EVENT_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ + [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ + event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_rx_event_log_buf_info.buf) \ + [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ + data, b, wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ + [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_rx_event_log_buf_info.length++; \ +} + +#ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC +uint32_t g_wmi_mgmt_command_buf_idx = 0; +struct +wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; + +/* wmi_mgmt commands TX completed */ +uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; +struct wmi_command_debug +wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; + +/* wmi_mgmt events when received */ +uint32_t g_wmi_mgmt_rx_event_buf_idx = 0; +struct wmi_event_debug +wmi_mgmt_rx_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; + +/* wmi_diag events when received */ +uint32_t g_wmi_diag_rx_event_buf_idx = 0; +struct wmi_event_debug +wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY]; +#endif + +#define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ + *(h->log_info.wmi_mgmt_command_log_buf_info. \ + p_buf_tail_idx) = 0; \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ + command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ + data, b, \ + wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ + h->log_info.wmi_mgmt_command_log_buf_info.length++; \ +} + +#define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)) \ + *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx) = 0; \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)].command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ + [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)].data, b, \ + wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)].time = \ + qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx))++; \ + h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ +} + +#define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ + [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ + .event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_mgmt_event_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ + data, b, wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ + [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_mgmt_event_log_buf_info.length++; \ +} while (0); + +#define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \ + if (wmi_diag_log_max_entry <= \ + *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ + [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\ + .event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_diag_event_log_buf_info.buf) \ + [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ + data, b, wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ + [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_diag_event_log_buf_info.length++; \ +} while (0); + +/* These are defined to made it as module param, which can be configured */ +uint32_t wmi_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; +uint32_t wmi_mgmt_log_max_entry = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; +uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; +uint32_t wmi_record_max_length = WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH; +uint32_t wmi_display_size = 100; + +#ifdef WMI_EXT_DBG + +/** + * wmi_ext_dbg_msg_enqueue() - enqueue wmi message + * + * @wmi_handle: wmi handler + * + * Return: size of wmi message queue after enqueue + */ +static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle, + struct wmi_ext_dbg_msg *msg) +{ + uint32_t list_size; + + qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); + qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue, + &msg->node, &list_size); + qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); + + return list_size; +} + +/** + * wmi_ext_dbg_msg_dequeue() - dequeue wmi message + * + * @wmi_handle: wmi handler + * + * Return: wmi msg on success else NULL + */ +static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified + *wmi_handle) +{ + qdf_list_node_t *list_node = NULL; + + qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); + qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node); + qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); + + if (!list_node) + return NULL; + + return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node); +} + +/** + * wmi_ext_dbg_msg_record() - record wmi messages + * + * @wmi_handle: wmi handler + * @buf: wmi message buffer + * @len: wmi message length + * @type: wmi message type + * + * Return: QDF_STATUS_SUCCESS on successful recording else failure. + */ +static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle, + uint8_t *buf, uint32_t len, + enum WMI_MSG_TYPE type) +{ + struct wmi_ext_dbg_msg *msg; + uint32_t list_size; + + msg = wmi_ext_dbg_msg_get(len); + if (!msg) + return QDF_STATUS_E_NOMEM; + + msg->len = len; + msg->type = type; + qdf_mem_copy(msg->buf, buf, len); + msg->ts = qdf_get_log_timestamp(); + list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg); + + if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) { + msg = wmi_ext_dbg_msg_dequeue(wmi_handle); + wmi_ext_dbg_msg_put(msg); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_ext_dbg_msg_cmd_record() - record wmi command messages + * + * @wmi_handle: wmi handler + * @buf: wmi command buffer + * @len: wmi command message length + * + * Return: QDF_STATUS_SUCCESS on successful recording else failure. + */ +static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle, + uint8_t *buf, uint32_t len) +{ + return wmi_ext_dbg_msg_record(wmi_handle, buf, len, + WMI_MSG_TYPE_CMD); +} + +/** + * wmi_ext_dbg_msg_event_record() - record wmi event messages + * + * @wmi_handle: wmi handler + * @buf: wmi event buffer + * @len: wmi event message length + * + * Return: QDF_STATUS_SUCCESS on successful recording else failure. + */ +static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle, + uint8_t *buf, uint32_t len) +{ + uint32_t id; + + id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID); + if (id != wmi_handle->wmi_events[wmi_diag_event_id]) + return wmi_ext_dbg_msg_record(wmi_handle, buf, len, + WMI_MSG_TYPE_EVENT); + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock + * + * @wmi_handle: wmi handler + * + * Return: none + */ +static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle) +{ + qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue, + wmi_handle->wmi_ext_dbg_msg_queue_size); + qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock); +} + +/** + * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock + * + * @wmi_handle: wmi handler + * + * Return: none + */ +static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle) +{ + qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue); + qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock); +} + +/** + * wmi_ext_dbg_msg_show() - debugfs function to display whole content of + * wmi command/event messages including headers. + * + * @file: qdf debugfs file handler + * @arg: pointer to wmi handler + * + * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully, + * else QDF_STATUS_E_AGAIN if more data to show. + */ +static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)arg; + struct wmi_ext_dbg_msg *msg; + uint64_t secs, usecs; + + msg = wmi_ext_dbg_msg_dequeue(wmi_handle); + if (!msg) + return QDF_STATUS_SUCCESS; + + qdf_debugfs_printf(file, "%s: 0x%x\n", + msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" : + "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR, + COMMANDID)); + qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs); + qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs); + qdf_debugfs_printf(file, "Length:%d\n", msg->len); + qdf_debugfs_hexdump(file, msg->buf, msg->len, + WMI_EXT_DBG_DUMP_ROW_SIZE, + WMI_EXT_DBG_DUMP_GROUP_SIZE); + qdf_debugfs_printf(file, "\n"); + + if (qdf_debugfs_overflow(file)) { + qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock); + qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue, + &msg->node); + qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock); + + } else { + wmi_ext_dbg_msg_put(msg); + } + + return QDF_STATUS_E_AGAIN; +} + +/** + * wmi_ext_dbg_msg_write() - debugfs write not supported + * + * @priv: private data + * @buf: received data buffer + * @len: length of received buffer + * + * Return: QDF_STATUS_E_NOSUPPORT. + */ +static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf, + qdf_size_t len) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static struct qdf_debugfs_fops wmi_ext_dbgfs_ops = { + .show = wmi_ext_dbg_msg_show, + .write = wmi_ext_dbg_msg_write, + .priv = NULL, +}; + +/** + * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump. + * + * @wmi_handle: wmi handler + * + * Return: QDF_STATUS_SUCCESS if debugfs is initialized else + * QDF_STATUS_E_FAILURE + */ +static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle) +{ + qdf_dentry_t dentry; + + dentry = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL); + if (!dentry) { + WMI_LOGE("error while creating extended wmi debugfs dir"); + return QDF_STATUS_E_FAILURE; + } + + wmi_ext_dbgfs_ops.priv = wmi_handle; + if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM, + dentry, &wmi_ext_dbgfs_ops)) { + qdf_debugfs_remove_dir(dentry); + WMI_LOGE("error while creating extended wmi debugfs file"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle->wmi_ext_dbg_dentry = dentry; + wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE; + wmi_ext_dbg_msg_queue_init(wmi_handle); + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump. + * + * @wmi_handle: wmi handler + * + * Return: QDF_STATUS_SUCCESS if cleanup is successful + */ +static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle) +{ + struct wmi_ext_dbg_msg *msg; + + while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle))) + wmi_ext_dbg_msg_put(msg); + + wmi_ext_dbg_msg_queue_deinit(wmi_handle); + qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry); + + return QDF_STATUS_SUCCESS; +} + +#endif /*WMI_EXT_DBG */ + +/** + * wmi_log_init() - Initialize WMI event logging + * @wmi_handle: WMI handle. + * + * Return: Initialization status + */ +#ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC +static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) +{ + struct wmi_log_buf_t *cmd_log_buf = + &wmi_handle->log_info.wmi_command_log_buf_info; + struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = + &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; + + struct wmi_log_buf_t *event_log_buf = + &wmi_handle->log_info.wmi_event_log_buf_info; + struct wmi_log_buf_t *rx_event_log_buf = + &wmi_handle->log_info.wmi_rx_event_log_buf_info; + + struct wmi_log_buf_t *mgmt_cmd_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; + struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; + struct wmi_log_buf_t *mgmt_event_log_buf = + &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; + struct wmi_log_buf_t *diag_event_log_buf = + &wmi_handle->log_info.wmi_diag_event_log_buf_info; + + /* WMI commands */ + cmd_log_buf->length = 0; + cmd_log_buf->buf_tail_idx = 0; + cmd_log_buf->buf = wmi_command_log_buffer; + cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; + cmd_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI commands TX completed */ + cmd_tx_cmpl_log_buf->length = 0; + cmd_tx_cmpl_log_buf->buf_tail_idx = 0; + cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; + cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; + cmd_tx_cmpl_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI events when processed */ + event_log_buf->length = 0; + event_log_buf->buf_tail_idx = 0; + event_log_buf->buf = wmi_event_log_buffer; + event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; + event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI events when queued */ + rx_event_log_buf->length = 0; + rx_event_log_buf->buf_tail_idx = 0; + rx_event_log_buf->buf = wmi_rx_event_log_buffer; + rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; + rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI Management commands */ + mgmt_cmd_log_buf->length = 0; + mgmt_cmd_log_buf->buf_tail_idx = 0; + mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; + mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; + mgmt_cmd_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; + + /* WMI Management commands Tx completed*/ + mgmt_cmd_tx_cmp_log_buf->length = 0; + mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; + mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; + mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = + &g_wmi_mgmt_command_tx_cmp_buf_idx; + mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; + + /* WMI Management events when received */ + mgmt_event_log_buf->length = 0; + mgmt_event_log_buf->buf_tail_idx = 0; + mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer; + mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx; + mgmt_event_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; + + /* WMI diag events when received */ + diag_event_log_buf->length = 0; + diag_event_log_buf->buf_tail_idx = 0; + diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer; + diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx; + diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; + + qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); + wmi_handle->log_info.wmi_logging_enable = 1; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) +{ + struct wmi_log_buf_t *cmd_log_buf = + &wmi_handle->log_info.wmi_command_log_buf_info; + struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = + &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; + + struct wmi_log_buf_t *event_log_buf = + &wmi_handle->log_info.wmi_event_log_buf_info; + struct wmi_log_buf_t *rx_event_log_buf = + &wmi_handle->log_info.wmi_rx_event_log_buf_info; + + struct wmi_log_buf_t *mgmt_cmd_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; + struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; + struct wmi_log_buf_t *mgmt_event_log_buf = + &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; + struct wmi_log_buf_t *diag_event_log_buf = + &wmi_handle->log_info.wmi_diag_event_log_buf_info; + + wmi_handle->log_info.wmi_logging_enable = 0; + + /* WMI commands */ + cmd_log_buf->length = 0; + cmd_log_buf->buf_tail_idx = 0; + cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_command_debug)); + cmd_log_buf->size = wmi_log_max_entry; + + if (!cmd_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; + + /* WMI commands TX completed */ + cmd_tx_cmpl_log_buf->length = 0; + cmd_tx_cmpl_log_buf->buf_tail_idx = 0; + cmd_tx_cmpl_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_command_debug)); + cmd_tx_cmpl_log_buf->size = wmi_log_max_entry; + + if (!cmd_tx_cmpl_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + cmd_tx_cmpl_log_buf->p_buf_tail_idx = + &cmd_tx_cmpl_log_buf->buf_tail_idx; + + /* WMI events when processed */ + event_log_buf->length = 0; + event_log_buf->buf_tail_idx = 0; + event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_event_debug)); + event_log_buf->size = wmi_log_max_entry; + + if (!event_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; + + /* WMI events when queued */ + rx_event_log_buf->length = 0; + rx_event_log_buf->buf_tail_idx = 0; + rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_event_debug)); + rx_event_log_buf->size = wmi_log_max_entry; + + if (!rx_event_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; + + /* WMI Management commands */ + mgmt_cmd_log_buf->length = 0; + mgmt_cmd_log_buf->buf_tail_idx = 0; + mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( + wmi_mgmt_log_max_entry * sizeof(struct wmi_command_debug)); + mgmt_cmd_log_buf->size = wmi_mgmt_log_max_entry; + + if (!mgmt_cmd_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; + + /* WMI Management commands Tx completed*/ + mgmt_cmd_tx_cmp_log_buf->length = 0; + mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; + mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) + qdf_mem_malloc( + wmi_mgmt_log_max_entry * + sizeof(struct wmi_command_debug)); + mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_log_max_entry; + + if (!mgmt_cmd_tx_cmp_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = + &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; + + /* WMI Management events when received */ + mgmt_event_log_buf->length = 0; + mgmt_event_log_buf->buf_tail_idx = 0; + + mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_mgmt_log_max_entry * + sizeof(struct wmi_event_debug)); + mgmt_event_log_buf->size = wmi_mgmt_log_max_entry; + + if (!mgmt_event_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; + + /* WMI diag events when received */ + diag_event_log_buf->length = 0; + diag_event_log_buf->buf_tail_idx = 0; + + diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_diag_log_max_entry * + sizeof(struct wmi_event_debug)); + diag_event_log_buf->size = wmi_diag_log_max_entry; + + if (!diag_event_log_buf->buf) + return QDF_STATUS_E_NOMEM; + + diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx; + + qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); + wmi_handle->log_info.wmi_logging_enable = 1; + + wmi_filtered_logging_init(wmi_handle); + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for + * event logging + * @wmi_handle: WMI handle. + * + * Return: None + */ +#ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC +static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) +{ + wmi_filtered_logging_free(wmi_handle); + + if (wmi_handle->log_info.wmi_command_log_buf_info.buf) + qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); + if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); + if (wmi_handle->log_info.wmi_event_log_buf_info.buf) + qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); + if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); + if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); + if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); + if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); + if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_diag_event_log_buf_info.buf); + wmi_handle->log_info.wmi_logging_enable = 0; + + qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); +} +#else +static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) +{ + /* Do Nothing */ +} +#endif + +/** + * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer + * @log_buffer: the command log buffer metadata of the buffer to print + * @count: the maximum number of entries to print + * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper + * @print_priv: any data required by the print method, e.g. a file handle + * + * Return: None + */ +static void +wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + static const int data_len = + WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); + char str[128]; + uint32_t idx; + + if (count > log_buffer->size) + count = log_buffer->size; + if (count > log_buffer->length) + count = log_buffer->length; + + /* subtract count from index, and wrap if necessary */ + idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; + idx %= log_buffer->size; + + print(print_priv, "Time (seconds) Cmd Id Payload"); + while (count) { + struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) + &((struct wmi_command_debug *)log_buffer->buf)[idx]; + uint64_t secs, usecs; + int len = 0; + int i; + + qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); + len += scnprintf(str + len, sizeof(str) - len, + "% 8lld.%06lld %6u (0x%06x) ", + secs, usecs, + cmd_log->command, cmd_log->command); + for (i = 0; i < data_len; ++i) { + len += scnprintf(str + len, sizeof(str) - len, + "0x%08x ", cmd_log->data[i]); + } + + print(print_priv, str); + + --count; + ++idx; + if (idx >= log_buffer->size) + idx = 0; + } +} + +/** + * wmi_print_event_log_buffer() - an output agnostic wmi event log printer + * @log_buffer: the event log buffer metadata of the buffer to print + * @count: the maximum number of entries to print + * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper + * @print_priv: any data required by the print method, e.g. a file handle + * + * Return: None + */ +static void +wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + static const int data_len = + WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); + char str[128]; + uint32_t idx; + + if (count > log_buffer->size) + count = log_buffer->size; + if (count > log_buffer->length) + count = log_buffer->length; + + /* subtract count from index, and wrap if necessary */ + idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; + idx %= log_buffer->size; + + print(print_priv, "Time (seconds) Event Id Payload"); + while (count) { + struct wmi_event_debug *event_log = (struct wmi_event_debug *) + &((struct wmi_event_debug *)log_buffer->buf)[idx]; + uint64_t secs, usecs; + int len = 0; + int i; + + qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); + len += scnprintf(str + len, sizeof(str) - len, + "% 8lld.%06lld %6u (0x%06x) ", + secs, usecs, + event_log->event, event_log->event); + for (i = 0; i < data_len; ++i) { + len += scnprintf(str + len, sizeof(str) - len, + "0x%08x ", event_log->data[i]); + } + + print(print_priv, str); + + --count; + ++idx; + if (idx >= log_buffer->size) + idx = 0; + } +} + +inline void +wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_command_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_command_tx_cmp_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_mgmt_command_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_event_log_buffer( + &wmi->log_info.wmi_event_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_event_log_buffer( + &wmi->log_info.wmi_rx_event_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_event_log_buffer( + &wmi->log_info.wmi_mgmt_event_log_buf_info, + count, print, print_priv); +} + + +/* debugfs routines*/ + +/** + * debug_wmi_##func_base##_show() - debugfs functions to display content of + * command and event buffers. Macro uses max buffer length to display + * buffer when it is wraparound. + * + * @m: debugfs handler to access wmi_handle + * @v: Variable arguments (not used) + * + * Return: Length of characters printed + */ +#define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ + static int debug_wmi_##func_base##_show(struct seq_file *m, \ + void *v) \ + { \ + wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ + struct wmi_log_buf_t *wmi_log = \ + &wmi_handle->log_info.wmi_##func_base##_buf_info;\ + int pos, nread, outlen; \ + int i; \ + uint64_t secs, usecs; \ + \ + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ + if (!wmi_log->length) { \ + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ + return wmi_bp_seq_printf(m, \ + "no elements to read from ring buffer!\n"); \ + } \ + \ + if (wmi_log->length <= wmi_ring_size) \ + nread = wmi_log->length; \ + else \ + nread = wmi_ring_size; \ + \ + if (*(wmi_log->p_buf_tail_idx) == 0) \ + /* tail can be 0 after wrap-around */ \ + pos = wmi_ring_size - 1; \ + else \ + pos = *(wmi_log->p_buf_tail_idx) - 1; \ + \ + outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ + while (nread--) { \ + struct wmi_command_debug *wmi_record; \ + \ + wmi_record = (struct wmi_command_debug *) \ + &(((struct wmi_command_debug *)wmi_log->buf)[pos]);\ + outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ + (wmi_record->command)); \ + qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ + &usecs); \ + outlen += \ + wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ + secs, usecs); \ + outlen += wmi_bp_seq_printf(m, "CMD = "); \ + for (i = 0; i < (wmi_record_max_length/ \ + sizeof(uint32_t)); i++) \ + outlen += wmi_bp_seq_printf(m, "%x ", \ + wmi_record->data[i]); \ + outlen += wmi_bp_seq_printf(m, "\n"); \ + \ + if (pos == 0) \ + pos = wmi_ring_size - 1; \ + else \ + pos--; \ + } \ + return outlen; \ + } \ + +#define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ + static int debug_wmi_##func_base##_show(struct seq_file *m, \ + void *v) \ + { \ + wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ + struct wmi_log_buf_t *wmi_log = \ + &wmi_handle->log_info.wmi_##func_base##_buf_info;\ + int pos, nread, outlen; \ + int i; \ + uint64_t secs, usecs; \ + \ + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ + if (!wmi_log->length) { \ + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ + return wmi_bp_seq_printf(m, \ + "no elements to read from ring buffer!\n"); \ + } \ + \ + if (wmi_log->length <= wmi_ring_size) \ + nread = wmi_log->length; \ + else \ + nread = wmi_ring_size; \ + \ + if (*(wmi_log->p_buf_tail_idx) == 0) \ + /* tail can be 0 after wrap-around */ \ + pos = wmi_ring_size - 1; \ + else \ + pos = *(wmi_log->p_buf_tail_idx) - 1; \ + \ + outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ + while (nread--) { \ + struct wmi_event_debug *wmi_record; \ + \ + wmi_record = (struct wmi_event_debug *) \ + &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ + qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ + &usecs); \ + outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ + (wmi_record->event)); \ + outlen += \ + wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ + secs, usecs); \ + outlen += wmi_bp_seq_printf(m, "CMD = "); \ + for (i = 0; i < (wmi_record_max_length/ \ + sizeof(uint32_t)); i++) \ + outlen += wmi_bp_seq_printf(m, "%x ", \ + wmi_record->data[i]); \ + outlen += wmi_bp_seq_printf(m, "\n"); \ + \ + if (pos == 0) \ + pos = wmi_ring_size - 1; \ + else \ + pos--; \ + } \ + return outlen; \ + } + +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size); +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size); +GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); +GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size); +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, + wmi_display_size); +GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); + +/** + * debug_wmi_enable_show() - debugfs functions to display enable state of + * wmi logging feature. + * + * @m: debugfs handler to access wmi_handle + * @v: Variable arguments (not used) + * + * Return: always 1 + */ +static int debug_wmi_enable_show(struct seq_file *m, void *v) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) m->private; + + return wmi_bp_seq_printf(m, "%d\n", + wmi_handle->log_info.wmi_logging_enable); +} + +/** + * debug_wmi_log_size_show() - debugfs functions to display configured size of + * wmi logging command/event buffer and management command/event buffer. + * + * @m: debugfs handler to access wmi_handle + * @v: Variable arguments (not used) + * + * Return: Length of characters printed + */ +static int debug_wmi_log_size_show(struct seq_file *m, void *v) +{ + + wmi_bp_seq_printf(m, "WMI command/event log max size:%d\n", + wmi_log_max_entry); + return wmi_bp_seq_printf(m, + "WMI management command/events log max size:%d\n", + wmi_mgmt_log_max_entry); +} + +/** + * debug_wmi_##func_base##_write() - debugfs functions to clear + * wmi logging command/event buffer and management command/event buffer. + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +#define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ + static ssize_t debug_wmi_##func_base##_write(struct file *file, \ + const char __user *buf, \ + size_t count, loff_t *ppos) \ + { \ + int k, ret; \ + wmi_unified_t wmi_handle = \ + ((struct seq_file *)file->private_data)->private;\ + struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ + wmi_##func_base##_buf_info; \ + char locbuf[50]; \ + \ + if ((!buf) || (count > 50)) \ + return -EFAULT; \ + \ + if (copy_from_user(locbuf, buf, count)) \ + return -EFAULT; \ + \ + ret = sscanf(locbuf, "%d", &k); \ + if ((ret != 1) || (k != 0)) { \ + WMI_LOGE("Wrong input, echo 0 to clear the wmi buffer");\ + return -EINVAL; \ + } \ + \ + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\ + qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ + sizeof(struct wmi_record_type)); \ + wmi_log->length = 0; \ + *(wmi_log->p_buf_tail_idx) = 0; \ + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\ + \ + return count; \ + } + +GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_log_max_entry, + wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_log_max_entry, + wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_log_max_entry, + wmi_event_debug); +GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_log_max_entry, + wmi_event_debug); +GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_log_max_entry, + wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, + wmi_mgmt_log_max_entry, wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_log_max_entry, + wmi_event_debug); + +/** + * debug_wmi_enable_write() - debugfs functions to enable/disable + * wmi logging feature. + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + wmi_unified_t wmi_handle = + ((struct seq_file *)file->private_data)->private; + int k, ret; + char locbuf[50]; + + if ((!buf) || (count > 50)) + return -EFAULT; + + if (copy_from_user(locbuf, buf, count)) + return -EFAULT; + + ret = sscanf(locbuf, "%d", &k); + if ((ret != 1) || ((k != 0) && (k != 1))) + return -EINVAL; + + wmi_handle->log_info.wmi_logging_enable = k; + return count; +} + +/** + * debug_wmi_log_size_write() - reserved. + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +static ssize_t debug_wmi_log_size_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + return -EINVAL; +} + +/* Structure to maintain debug information */ +struct wmi_debugfs_info { + const char *name; + const struct file_operations *ops; +}; + +#define DEBUG_FOO(func_base) { .name = #func_base, \ + .ops = &debug_##func_base##_ops } + +/** + * debug_##func_base##_open() - Open debugfs entry for respective command + * and event buffer. + * + * @inode: node for debug dir entry + * @file: file handler + * + * Return: open status + */ +#define GENERATE_DEBUG_STRUCTS(func_base) \ + static int debug_##func_base##_open(struct inode *inode, \ + struct file *file) \ + { \ + return single_open(file, debug_##func_base##_show, \ + inode->i_private); \ + } \ + \ + \ + static struct file_operations debug_##func_base##_ops = { \ + .open = debug_##func_base##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .write = debug_##func_base##_write, \ + .release = single_release, \ + }; + +GENERATE_DEBUG_STRUCTS(wmi_command_log); +GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); +GENERATE_DEBUG_STRUCTS(wmi_event_log); +GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); +GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); +GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); +GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); +GENERATE_DEBUG_STRUCTS(wmi_enable); +GENERATE_DEBUG_STRUCTS(wmi_log_size); +#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING +GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds); +GENERATE_DEBUG_STRUCTS(filtered_wmi_evts); +GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log); +GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log); +#endif + +struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = { + DEBUG_FOO(wmi_command_log), + DEBUG_FOO(wmi_command_tx_cmp_log), + DEBUG_FOO(wmi_event_log), + DEBUG_FOO(wmi_rx_event_log), + DEBUG_FOO(wmi_mgmt_command_log), + DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), + DEBUG_FOO(wmi_mgmt_event_log), + DEBUG_FOO(wmi_enable), + DEBUG_FOO(wmi_log_size), +#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING + DEBUG_FOO(filtered_wmi_cmds), + DEBUG_FOO(filtered_wmi_evts), + DEBUG_FOO(wmi_filtered_command_log), + DEBUG_FOO(wmi_filtered_event_log), +#endif +}; + + +/** + * wmi_debugfs_create() - Create debug_fs entry for wmi logging. + * + * @wmi_handle: wmi handle + * @par_entry: debug directory entry + * @id: Index to debug info data array + * + * Return: none + */ +static void wmi_debugfs_create(wmi_unified_t wmi_handle, + struct dentry *par_entry) +{ + int i; + + if (!par_entry) + goto out; + + for (i = 0; i < NUM_DEBUG_INFOS; ++i) { + wmi_handle->debugfs_de[i] = debugfs_create_file( + wmi_debugfs_infos[i].name, 0644, par_entry, + wmi_handle, wmi_debugfs_infos[i].ops); + + if (!wmi_handle->debugfs_de[i]) { + WMI_LOGE("debug Entry creation failed!"); + goto out; + } + } + + return; + +out: + WMI_LOGE("debug Entry creation failed!"); + wmi_log_buffer_free(wmi_handle); + return; +} + +/** + * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. + * @wmi_handle: wmi handle + * @dentry: debugfs directory entry + * @id: Index to debug info data array + * + * Return: none + */ +static void wmi_debugfs_remove(wmi_unified_t wmi_handle) +{ + int i; + struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; + + if (dentry) { + for (i = 0; i < NUM_DEBUG_INFOS; ++i) { + if (wmi_handle->debugfs_de[i]) + wmi_handle->debugfs_de[i] = NULL; + } + } + + if (dentry) + debugfs_remove_recursive(dentry); +} + +/** + * wmi_debugfs_init() - debugfs functions to create debugfs directory and to + * create debugfs enteries. + * + * @h: wmi handler + * + * Return: init status + */ +static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx) +{ + char buf[32]; + + snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u", + wmi_handle->soc->soc_idx, pdev_idx); + + wmi_handle->log_info.wmi_log_debugfs_dir = + debugfs_create_dir(buf, NULL); + + if (!wmi_handle->log_info.wmi_log_debugfs_dir) { + WMI_LOGE("error while creating debugfs dir for %s", buf); + return QDF_STATUS_E_FAILURE; + } + wmi_debugfs_create(wmi_handle, + wmi_handle->log_info.wmi_log_debugfs_dir); + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro + * + * @wmi_handle: wmi handle + * @cmd: mgmt command + * @header: pointer to 802.11 header + * @vdev_id: vdev id + * @chanfreq: channel frequency + * + * Return: none + */ +void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, + void *header, uint32_t vdev_id, uint32_t chanfreq) +{ + + uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; + + data[0] = ((struct wmi_command_header *)header)->type; + data[1] = ((struct wmi_command_header *)header)->sub_type; + data[2] = vdev_id; + data[3] = chanfreq; + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + + WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); + wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data); + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); +} +#else +/** + * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. + * @wmi_handle: wmi handle + * @dentry: debugfs directory entry + * @id: Index to debug info data array + * + * Return: none + */ +static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } +void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, + void *header, uint32_t vdev_id, uint32_t chanfreq) { } +static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } +#endif /*WMI_INTERFACE_EVENT_LOGGING */ +qdf_export_symbol(wmi_mgmt_cmd_record); + +int wmi_get_host_credits(wmi_unified_t wmi_handle); +/* WMI buffer APIs */ + +#ifdef NBUF_MEMORY_DEBUG +wmi_buf_t +wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, + const char *func_name, + uint32_t line_num) +{ + wmi_buf_t wmi_buf; + + if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { + QDF_ASSERT(0); + return NULL; + } + + wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name, + line_num); + if (!wmi_buf) + wmi_buf = qdf_nbuf_alloc_debug(NULL, + roundup(len + WMI_MIN_HEAD_ROOM, + 4), + WMI_MIN_HEAD_ROOM, 4, false, + func_name, line_num); + if (!wmi_buf) + return NULL; + + /* Clear the wmi buffer */ + OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); + + /* + * Set the length of the buffer to match the allocation size. + */ + qdf_nbuf_set_pktlen(wmi_buf, len); + + return wmi_buf; +} +qdf_export_symbol(wmi_buf_alloc_debug); + +void wmi_buf_free(wmi_buf_t net_buf) +{ + net_buf = wbuff_buff_put(net_buf); + if (net_buf) + qdf_nbuf_free(net_buf); +} +qdf_export_symbol(wmi_buf_free); +#else +wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, + const char *func, uint32_t line) +{ + wmi_buf_t wmi_buf; + + if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { + QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)", + len, func, line); + return NULL; + } + + wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__, + __LINE__); + if (!wmi_buf) + wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len + + WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, + false, func, line); + + if (!wmi_buf) { + wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len); + return NULL; + } + + /* Clear the wmi buffer */ + OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); + + /* + * Set the length of the buffer to match the allocation size. + */ + qdf_nbuf_set_pktlen(wmi_buf, len); + + return wmi_buf; +} +qdf_export_symbol(wmi_buf_alloc_fl); + +void wmi_buf_free(wmi_buf_t net_buf) +{ + net_buf = wbuff_buff_put(net_buf); + if (net_buf) + qdf_nbuf_free(net_buf); +} +qdf_export_symbol(wmi_buf_free); +#endif + +/** + * wmi_get_max_msg_len() - get maximum WMI message length + * @wmi_handle: WMI handle. + * + * This function returns the maximum WMI message length + * + * Return: maximum WMI message length + */ +uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) +{ + return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; +} +qdf_export_symbol(wmi_get_max_msg_len); + +#ifndef WMI_CMD_STRINGS +static uint8_t *wmi_id_to_name(uint32_t wmi_command) +{ + return "Invalid WMI cmd"; +} +#endif + +static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) +{ + WMI_LOGD("Send WMI command:%s command_id:%d htc_tag:%d", + wmi_id_to_name(cmd_id), cmd_id, tag); +} + +/** + * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence + * @cmd_id: command to check + * + * Return: true if the command is part of the resume sequence. + */ +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) +{ + switch (cmd_id) { + case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: + case WMI_PDEV_RESUME_CMDID: + return true; + + default: + return false; + } +} + +#else +static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) +{ + return false; +} + +#endif + +#ifdef FEATURE_WLAN_D0WOW +static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) +{ + wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; + + if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) { + cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) + wmi_buf_data(buf); + if (!cmd->enable) + return true; + else + return false; + } + + return false; +} +#else +static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id) +{ + return false; +} + +#endif + +static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle) +{ + wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s", + wmi_handle->wmi_endpoint_id, + htc_get_tx_queue_depth(wmi_handle->htc_handle, + wmi_handle->wmi_endpoint_id), + wmi_handle->soc->soc_idx, + (wmi_handle->target_type == + WMI_TLV_TARGET ? "WMI_TLV_TARGET" : + "WMI_NON_TLV_TARGET")); +} + +#ifdef SYSTEM_PM_CHECK +/** + * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets + * @htc_tag: HTC tag + * @buf: wmi cmd buffer + * @cmd_id: cmd id + * + * Return: None + */ +static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf, + uint32_t cmd_id) +{ + switch (cmd_id) { + case WMI_WOW_ENABLE_CMDID: + case WMI_PDEV_SUSPEND_CMDID: + *htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND; + break; + case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: + case WMI_PDEV_RESUME_CMDID: + *htc_tag = HTC_TX_PACKET_SYSTEM_RESUME; + break; + case WMI_D0_WOW_ENABLE_DISABLE_CMDID: + if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) + *htc_tag = HTC_TX_PACKET_SYSTEM_RESUME; + else + *htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND; + break; + default: + break; + } +} +#else +static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf, + uint32_t cmd_id) +{ +} +#endif + +QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf, + uint32_t len, uint32_t cmd_id, + const char *func, uint32_t line) +{ + HTC_PACKET *pkt; + QDF_STATUS status; + uint16_t htc_tag = 0; + bool rtpm_inprogress; + + rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle); + if (rtpm_inprogress) { + htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf, + cmd_id); + } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && + !wmi_is_pm_resume_cmd(cmd_id) && + !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) { + wmi_nofl_err("Target is suspended (via %s:%u)", + func, line); + return QDF_STATUS_E_BUSY; + } + + if (wmi_handle->wmi_stopinprogress) { + wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK", + func, line, wmi_handle); + return QDF_STATUS_E_INVAL; + } + +#ifndef WMI_NON_TLV_SUPPORT + /* Do sanity check on the TLV parameter structure */ + if (wmi_handle->target_type == WMI_TLV_TARGET) { + void *buf_ptr = (void *)qdf_nbuf_data(buf); + + if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) + != 0) { + wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d", + func, line, cmd_id); + return QDF_STATUS_E_INVAL; + } + } +#endif + + if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { + wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory", + func, line, cmd_id); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); + WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); + + qdf_atomic_inc(&wmi_handle->pending_cmds); + if (qdf_atomic_read(&wmi_handle->pending_cmds) >= + wmi_handle->wmi_max_cmds) { + wmi_nofl_err("hostcredits = %d", + wmi_get_host_credits(wmi_handle)); + htc_dump_counter_info(wmi_handle->htc_handle); + qdf_atomic_dec(&wmi_handle->pending_cmds); + wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached", + func, line, wmi_handle->wmi_max_cmds); + wmi_unified_debug_dump(wmi_handle); + htc_ce_tasklet_debug_dump(wmi_handle->htc_handle); + qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc, + QDF_WMI_EXCEED_MAX_PENDING_CMDS); + return QDF_STATUS_E_BUSY; + } + + pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line); + if (!pkt) { + qdf_atomic_dec(&wmi_handle->pending_cmds); + return QDF_STATUS_E_NOMEM; + } + + if (!rtpm_inprogress) + wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id); + + SET_HTC_PACKET_INFO_TX(pkt, + NULL, + qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), + wmi_handle->wmi_endpoint_id, htc_tag); + + SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); + wmi_log_cmd_id(cmd_id, htc_tag); + wmi_ext_dbg_msg_cmd_record(wmi_handle, + qdf_nbuf_data(buf), qdf_nbuf_len(buf)); +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle->log_info.wmi_logging_enable) { + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* + * Record 16 bytes of WMI cmd data - + * exclude TLV and WMI headers + * + * WMI mgmt command already recorded in wmi_mgmt_cmd_record + */ + if (wmi_handle->ops->is_management_record(cmd_id) == false) { + uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) + + wmi_handle->soc->buf_offset_command; + + WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf); + wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf); + } + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + + status = htc_send_pkt(wmi_handle->htc_handle, pkt); + + if (QDF_STATUS_SUCCESS != status) { + qdf_atomic_dec(&wmi_handle->pending_cmds); + wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d", + func, line, status); + qdf_mem_free(pkt); + return status; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wmi_unified_cmd_send_fl); + +/** + * wmi_unified_get_event_handler_ix() - gives event handler's index + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * + * Return: event handler's index + */ +static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, + uint32_t event_id) +{ + uint32_t idx = 0; + int32_t invalid_idx = -1; + struct wmi_soc *soc = wmi_handle->soc; + + for (idx = 0; (idx < soc->max_event_idx && + idx < WMI_UNIFIED_MAX_EVENT); ++idx) { + if (wmi_handle->event_id[idx] == event_id && + wmi_handle->event_handler[idx]) { + return idx; + } + } + + return invalid_idx; +} + +/** + * wmi_unified_register_event() - register wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * @handler_func: wmi event handler function + * + * Return: 0 on success + */ +int wmi_unified_register_event(wmi_unified_t wmi_handle, + uint32_t event_id, + wmi_unified_event_handler handler_func) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc; + + if (!wmi_handle) { + WMI_LOGE("WMI handle is NULL"); + return QDF_STATUS_E_FAILURE; + } + + soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "%s: Event id %d is unavailable", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "%s : event handler already registered 0x%x", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s : no more event handlers 0x%x", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + idx = soc->max_event_idx; + wmi_handle->event_handler[idx] = handler_func; + wmi_handle->event_id[idx] = evt_id; + qdf_spin_lock_bh(&soc->ctx_lock); + wmi_handle->ctx[idx] = WMI_RX_UMAC_CTX; + qdf_spin_unlock_bh(&soc->ctx_lock); + soc->max_event_idx++; + + return 0; +} + +/** + * wmi_unified_register_event_handler() - register wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * @handler_func: wmi event handler function + * @rx_ctx: rx execution context for wmi rx events + * + * This API is to support legacy requirements. Will be deprecated in future. + * Return: 0 on success + */ +int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id, + wmi_unified_event_handler handler_func, + uint8_t rx_ctx) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc; + + if (!wmi_handle) { + WMI_LOGE("WMI handle is NULL"); + return QDF_STATUS_E_FAILURE; + } + + soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "%s: Event id %d is unavailable", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + + if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { + wmi_info("event handler already registered 0x%x", evt_id); + return QDF_STATUS_E_FAILURE; + } + if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { + WMI_LOGE("no more event handlers 0x%x", + evt_id); + return QDF_STATUS_E_FAILURE; + } + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + "Registered event handler for event 0x%8x", evt_id); + idx = soc->max_event_idx; + wmi_handle->event_handler[idx] = handler_func; + wmi_handle->event_id[idx] = evt_id; + qdf_spin_lock_bh(&soc->ctx_lock); + wmi_handle->ctx[idx] = rx_ctx; + qdf_spin_unlock_bh(&soc->ctx_lock); + soc->max_event_idx++; + + return 0; +} +qdf_export_symbol(wmi_unified_register_event_handler); + +/** + * wmi_unified_unregister_event() - unregister wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * + * Return: 0 on success + */ +int wmi_unified_unregister_event(wmi_unified_t wmi_handle, + uint32_t event_id) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "%s: Event id %d is unavailable", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + + idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); + if (idx == -1) { + wmi_warn("event handler is not registered: evt id 0x%x", + evt_id); + return QDF_STATUS_E_FAILURE; + } + wmi_handle->event_handler[idx] = NULL; + wmi_handle->event_id[idx] = 0; + --soc->max_event_idx; + wmi_handle->event_handler[idx] = + wmi_handle->event_handler[soc->max_event_idx]; + wmi_handle->event_id[idx] = + wmi_handle->event_id[soc->max_event_idx]; + + return 0; +} + +/** + * wmi_unified_unregister_event_handler() - unregister wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * + * Return: 0 on success + */ +int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc; + + if (!wmi_handle) { + WMI_LOGE("WMI handle is NULL"); + return QDF_STATUS_E_FAILURE; + } + + soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + wmi_err("Event id %d is unavailable", event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + + idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); + if (idx == -1) { + wmi_err("event handler is not registered: evt id 0x%x", + evt_id); + return QDF_STATUS_E_FAILURE; + } + wmi_handle->event_handler[idx] = NULL; + wmi_handle->event_id[idx] = 0; + --soc->max_event_idx; + wmi_handle->event_handler[idx] = + wmi_handle->event_handler[soc->max_event_idx]; + wmi_handle->event_id[idx] = + wmi_handle->event_id[soc->max_event_idx]; + + return 0; +} +qdf_export_symbol(wmi_unified_unregister_event_handler); + +void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, + void *evt_buf) +{ + + qdf_spin_lock_bh(&wmi_handle->eventq_lock); + qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); + qdf_spin_unlock_bh(&wmi_handle->eventq_lock); + qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, + &wmi_handle->rx_event_work); + + return; +} + +qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); + +uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi) +{ + return qdf_atomic_read(&wmi->critical_events_in_flight); +} + +static bool +wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id) +{ + if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id) + return true; + + return false; +} + +static void wmi_discard_fw_event(struct scheduler_msg *msg) +{ + struct wmi_process_fw_event_params *event_param; + + if (!msg->bodyptr) + return; + + event_param = (struct wmi_process_fw_event_params *)msg->bodyptr; + qdf_nbuf_free(event_param->evt_buf); + qdf_mem_free(msg->bodyptr); + msg->bodyptr = NULL; + msg->bodyval = 0; + msg->type = 0; +} + +static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg) +{ + struct wmi_process_fw_event_params *params = + (struct wmi_process_fw_event_params *)msg->bodyptr; + struct wmi_unified *wmi_handle; + uint32_t event_id; + + wmi_handle = (struct wmi_unified *)params->wmi_handle; + event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf), + WMI_CMD_HDR, COMMANDID); + wmi_process_fw_event(wmi_handle, params->evt_buf); + + if (wmi_is_event_critical(wmi_handle, event_id)) + qdf_atomic_dec(&wmi_handle->critical_events_in_flight); + + qdf_mem_free(msg->bodyptr); + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize + * event processing through scheduler thread + * @ctx: wmi context + * @ev: event buffer + * @rx_ctx: rx execution context + * + * Return: 0 on success, errno on failure + */ +static QDF_STATUS +wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi, + void *ev) +{ + struct wmi_process_fw_event_params *params_buf; + struct scheduler_msg msg = { 0 }; + uint32_t event_id; + + params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params)); + if (!params_buf) { + wmi_err("malloc failed"); + qdf_nbuf_free(ev); + return QDF_STATUS_E_NOMEM; + } + + params_buf->wmi_handle = wmi; + params_buf->evt_buf = ev; + + event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf), + WMI_CMD_HDR, COMMANDID); + if (wmi_is_event_critical(wmi, event_id)) + qdf_atomic_inc(&wmi->critical_events_in_flight); + + msg.bodyptr = params_buf; + msg.bodyval = 0; + msg.callback = wmi_process_fw_event_handler; + msg.flush_callback = wmi_discard_fw_event; + + if (QDF_STATUS_SUCCESS != + scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg)) { + qdf_nbuf_free(ev); + qdf_mem_free(params_buf); + return QDF_STATUS_E_FAULT; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_pdev_ep: Get wmi handle based on endpoint + * @soc: handle to wmi soc + * @ep: endpoint id + * + * Return: none + */ +static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, + HTC_ENDPOINT_ID ep) +{ + uint32_t i; + + for (i = 0; i < WMI_MAX_RADIOS; i++) + if (soc->wmi_endpoint_id[i] == ep) + break; + + if (i == WMI_MAX_RADIOS) + return NULL; + + return soc->wmi_pdev[i]; +} + +/** + * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api + * @message_id: 32-Bit Wmi message ID + * @vdev_id: Vdev ID + * @data: Actual message contents + * + * This function converts the 32-bit WMI message ID in 15-bit message ID + * format for qdf_mtrace as in qdf_mtrace message there are only 15 + * bits reserved for message ID. + * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID + * and remaining 7-bits specifies the actual WMI command. With this + * notation there can be maximum 256 groups and each group can have + * max 128 commands can be supported. + * + * Return: None + */ +static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data) +{ + uint16_t mtrace_message_id; + + mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | + (QDF_WMI_MTRACE_GRP_ID(message_id) << + QDF_WMI_MTRACE_CMD_NUM_BITS); + qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA, + mtrace_message_id, vdev_id, data); +} + +/** + * wmi_process_control_rx() - process fw events callbacks + * @wmi_handle: handle to wmi_unified + * @evt_buf: handle to wmi_buf_t + * + * Return: none + */ +static void wmi_process_control_rx(struct wmi_unified *wmi_handle, + wmi_buf_t evt_buf) +{ + struct wmi_soc *soc = wmi_handle->soc; + uint32_t id; + uint32_t idx; + enum wmi_rx_exec_ctx exec_ctx; + + id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); + idx = wmi_unified_get_event_handler_ix(wmi_handle, id); + if (qdf_unlikely(idx == A_ERROR)) { + wmi_debug("no handler registered for event id 0x%x", id); + qdf_nbuf_free(evt_buf); + return; + } + wmi_mtrace_rx(id, 0xFF, idx); + qdf_spin_lock_bh(&soc->ctx_lock); + exec_ctx = wmi_handle->ctx[idx]; + qdf_spin_unlock_bh(&soc->ctx_lock); + +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle->log_info.wmi_logging_enable) { + uint8_t *data; + data = qdf_nbuf_data(evt_buf); + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* Exclude 4 bytes of TLV header */ + if (wmi_handle->ops->is_diag_event(id)) { + WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id, + ((uint8_t *) data + + wmi_handle->soc->buf_offset_event)); + } else if (wmi_handle->ops->is_management_record(id)) { + WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id, + ((uint8_t *) data + + wmi_handle->soc->buf_offset_event)); + } else { + WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + + wmi_handle->soc->buf_offset_event)); + } + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + + if (exec_ctx == WMI_RX_WORK_CTX) { + wmi_process_fw_event_worker_thread_ctx + (wmi_handle, evt_buf); + } else if (exec_ctx == WMI_RX_TASKLET_CTX) { + wmi_process_fw_event(wmi_handle, evt_buf); + } else if (exec_ctx == WMI_RX_SERIALIZER_CTX) { + wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf); + } else { + WMI_LOGE("Invalid event context %d", exec_ctx); + qdf_nbuf_free(evt_buf); + } + +} + +/** + * wmi_control_rx() - process fw events callbacks + * @ctx: handle to wmi + * @htc_packet: pointer to htc packet + * + * Return: none + */ +static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) +{ + struct wmi_soc *soc = (struct wmi_soc *)ctx; + struct wmi_unified *wmi_handle; + wmi_buf_t evt_buf; + + evt_buf = (wmi_buf_t)htc_packet->pPktContext; + + wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); + if (!wmi_handle) { + WMI_LOGE + ("unable to get wmi_handle to Endpoint %d\n", + htc_packet->Endpoint); + qdf_nbuf_free(evt_buf); + return; + } + + wmi_process_control_rx(wmi_handle, evt_buf); +} + +#ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI +QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle, + wmi_buf_t buf, uint32_t buflen, + uint32_t cmd_id) +{ + QDF_STATUS status; + int32_t ret; + + if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) { + wmi_err("Failed to send cmd %x, no memory", cmd_id); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); + WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); + wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id); + status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf), + buflen + sizeof(WMI_CMD_HDR), + wmi_handle, + wmi_process_qmi_fw_event); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR)); + wmi_warn("WMI send on QMI failed. Retrying WMI on HTC"); + } else { + ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi); + wmi_debug("num stats over qmi: %d", ret); + wmi_buf_free(buf); + } + + return status; +} + +static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) +{ + struct wmi_unified *wmi_handle = wmi_cb_ctx; + wmi_buf_t evt_buf; + uint32_t evt_id; + + if (!wmi_handle || !buf) + return -EINVAL; + + evt_buf = wmi_buf_alloc(wmi_handle, len); + if (!evt_buf) + return -ENOMEM; + + qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len); + evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); + wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id); + wmi_process_control_rx(wmi_handle, evt_buf); + + return 0; +} + +int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len) +{ + struct qdf_op_sync *op_sync; + int ret; + + if (qdf_op_protect(&op_sync)) + return -EINVAL; + ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len); + qdf_op_unprotect(op_sync); + + return ret; +} +#endif + +/** + * wmi_process_fw_event() - process any fw event + * @wmi_handle: wmi handle + * @evt_buf: fw event buffer + * + * This function process fw event in caller context + * + * Return: none + */ +void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) +{ + __wmi_control_rx(wmi_handle, evt_buf); +} + +/** + * __wmi_control_rx() - process serialize wmi event callback + * @wmi_handle: wmi handle + * @evt_buf: fw event buffer + * + * Return: none + */ +void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) +{ + uint32_t id; + uint8_t *data; + uint32_t len; + void *wmi_cmd_struct_ptr = NULL; +#ifndef WMI_NON_TLV_SUPPORT + int tlv_ok_status = 0; +#endif + uint32_t idx = 0; + + id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); + + wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf), + qdf_nbuf_len(evt_buf)); + + if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) + goto end; + + data = qdf_nbuf_data(evt_buf); + len = qdf_nbuf_len(evt_buf); + +#ifndef WMI_NON_TLV_SUPPORT + if (wmi_handle->target_type == WMI_TLV_TARGET) { + /* Validate and pad(if necessary) the TLVs */ + tlv_ok_status = + wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, + data, len, id, + &wmi_cmd_struct_ptr); + if (tlv_ok_status != 0) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s: Error: id=0x%x, wmitlv check status=%d", + __func__, id, tlv_ok_status); + goto end; + } + } +#endif + + idx = wmi_unified_get_event_handler_ix(wmi_handle, id); + if (idx == A_ERROR) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s : event handler is not registered: event id 0x%x", + __func__, id); + goto end; + } +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle->log_info.wmi_logging_enable) { + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* Exclude 4 bytes of TLV header */ + if (wmi_handle->ops->is_diag_event(id)) { + /* + * skip diag event logging in WMI event buffer + * as its already logged in WMI RX event buffer + */ + } else if (wmi_handle->ops->is_management_record(id)) { + /* + * skip wmi mgmt event logging in WMI event buffer + * as its already logged in WMI RX event buffer + */ + } else { + uint8_t *tmpbuf = (uint8_t *)data + + wmi_handle->soc->buf_offset_event; + + WMI_EVENT_RECORD(wmi_handle, id, tmpbuf); + wmi_specific_evt_record(wmi_handle, id, tmpbuf); + } + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + /* Call the WMI registered event handler */ + if (wmi_handle->target_type == WMI_TLV_TARGET) + wmi_handle->event_handler[idx] (wmi_handle->scn_handle, + wmi_cmd_struct_ptr, len); + else + wmi_handle->event_handler[idx] (wmi_handle->scn_handle, + data, len); + +end: + /* Free event buffer and allocated event tlv */ +#ifndef WMI_NON_TLV_SUPPORT + if (wmi_handle->target_type == WMI_TLV_TARGET) + wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); +#endif + + qdf_nbuf_free(evt_buf); + +} + +#define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ + +static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds", + __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); +} + +#ifdef CONFIG_SLUB_DEBUG_ON +static void wmi_workqueue_watchdog_bite(void *arg) +{ + struct wmi_wq_dbg_info *info = arg; + + wmi_workqueue_watchdog_warn(info->wd_msg_type_id); + qdf_print_thread_trace(info->task); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Going down for WMI WQ Watchdog Bite!", __func__); + QDF_BUG(0); +} +#else +static inline void wmi_workqueue_watchdog_bite(void *arg) +{ + struct wmi_wq_dbg_info *info = arg; + + wmi_workqueue_watchdog_warn(info->wd_msg_type_id); +} +#endif + +/** + * wmi_rx_event_work() - process rx event in rx work queue context + * @arg: opaque pointer to wmi handle + * + * This function process any fw event to serialize it through rx worker thread. + * + * Return: none + */ +static void wmi_rx_event_work(void *arg) +{ + wmi_buf_t buf; + struct wmi_unified *wmi = arg; + qdf_timer_t wd_timer; + struct wmi_wq_dbg_info info; + + /* initialize WMI workqueue watchdog timer */ + qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, + &info, QDF_TIMER_TYPE_SW); + qdf_spin_lock_bh(&wmi->eventq_lock); + buf = qdf_nbuf_queue_remove(&wmi->event_queue); + qdf_spin_unlock_bh(&wmi->eventq_lock); + while (buf) { + qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); + info.wd_msg_type_id = + WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); + info.wmi_wq = wmi->wmi_rx_work_queue; + info.task = qdf_get_current_task(); + __wmi_control_rx(wmi, buf); + qdf_timer_stop(&wd_timer); + qdf_spin_lock_bh(&wmi->eventq_lock); + buf = qdf_nbuf_queue_remove(&wmi->event_queue); + qdf_spin_unlock_bh(&wmi->eventq_lock); + } + qdf_timer_free(&wd_timer); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * wmi_runtime_pm_init() - initialize runtime pm wmi variables + * @wmi_handle: wmi context + */ +static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) +{ + qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); +} + +/** + * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag + * @wmi_handle: wmi context + * @val: runtime pm progress flag + */ +void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) +{ + qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); +} + +/** + * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag + * @wmi_handle: wmi context + */ +inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) +{ + return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); +} +#else +static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) +{ +} +#endif + +/** + * wmi_unified_get_soc_handle: Get WMI SoC handle + * @param wmi_handle: WMI context got from wmi_attach + * + * return: Pointer to Soc handle + */ +void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) +{ + return wmi_handle->soc; +} + +/** + * wmi_interface_logging_init: Interface looging init + * @param wmi_handle: Pointer to wmi handle object + * + * return: None + */ +#ifdef WMI_INTERFACE_EVENT_LOGGING +static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, + uint32_t pdev_idx) +{ + if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { + qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); + wmi_debugfs_init(wmi_handle, pdev_idx); + } +} +#else +static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle, + uint32_t pdev_idx) +{ +} +#endif + +/** + * wmi_unified_get_pdev_handle: Get WMI SoC handle + * @param wmi_soc: Pointer to wmi soc object + * @param pdev_idx: pdev index + * + * return: Pointer to wmi handle or NULL on failure + */ +void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) +{ + struct wmi_unified *wmi_handle; + + if (pdev_idx >= WMI_MAX_RADIOS) + return NULL; + + if (!soc->wmi_pdev[pdev_idx]) { + wmi_handle = + (struct wmi_unified *) qdf_mem_malloc( + sizeof(struct wmi_unified)); + if (!wmi_handle) + return NULL; + + wmi_handle->scn_handle = soc->scn_handle; + wmi_handle->event_id = soc->event_id; + wmi_handle->event_handler = soc->event_handler; + wmi_handle->ctx = soc->ctx; + wmi_handle->ops = soc->ops; + qdf_spinlock_create(&wmi_handle->eventq_lock); + qdf_nbuf_queue_init(&wmi_handle->event_queue); + + qdf_create_work(0, &wmi_handle->rx_event_work, + wmi_rx_event_work, wmi_handle); + wmi_handle->wmi_rx_work_queue = + qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); + if (!wmi_handle->wmi_rx_work_queue) { + WMI_LOGE("failed to create wmi_rx_event_work_queue"); + goto error; + } + wmi_handle->wmi_events = soc->wmi_events; + wmi_handle->services = soc->services; + wmi_handle->soc = soc; + wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; + wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; + wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map; + wmi_handle->evt_phy_id_map = soc->evt_phy_id_map; + wmi_interface_logging_init(wmi_handle, pdev_idx); + qdf_atomic_init(&wmi_handle->pending_cmds); + qdf_atomic_init(&wmi_handle->is_target_suspended); + wmi_handle->target_type = soc->target_type; + wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; + + soc->wmi_pdev[pdev_idx] = wmi_handle; + } else + wmi_handle = soc->wmi_pdev[pdev_idx]; + + wmi_handle->wmi_stopinprogress = 0; + wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; + wmi_handle->htc_handle = soc->htc_handle; + wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; + + return wmi_handle; + +error: + qdf_mem_free(wmi_handle); + + return NULL; +} +qdf_export_symbol(wmi_unified_get_pdev_handle); + +static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); + +void wmi_unified_register_module(enum wmi_target_type target_type, + void (*wmi_attach)(wmi_unified_t wmi_handle)) +{ + if (target_type < WMI_MAX_TARGET_TYPE) + wmi_attach_register[target_type] = wmi_attach; + + return; +} +qdf_export_symbol(wmi_unified_register_module); + +/** + * wmi_wbuff_register() - register wmi with wbuff + * @wmi_handle: handle to wmi + * + * @Return: void + */ +static void wmi_wbuff_register(struct wmi_unified *wmi_handle) +{ + struct wbuff_alloc_request wbuff_alloc[4]; + + wbuff_alloc[0].slot = WBUFF_POOL_0; + wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE; + wbuff_alloc[1].slot = WBUFF_POOL_1; + wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE; + wbuff_alloc[2].slot = WBUFF_POOL_2; + wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE; + wbuff_alloc[3].slot = WBUFF_POOL_3; + wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE; + + wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4, + WMI_MIN_HEAD_ROOM, 4); +} + +/** + * wmi_wbuff_deregister() - deregister wmi with wbuff + * @wmi_handle: handle to wmi + * + * @Return: void + */ +static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle) +{ + wbuff_module_deregister(wmi_handle->wbuff_handle); + wmi_handle->wbuff_handle = NULL; +} + +/** + * wmi_unified_attach() - attach for unified WMI + * @scn_handle: handle to SCN + * @osdev: OS device context + * @target_type: TLV or not-TLV based target + * @use_cookie: cookie based allocation enabled/disabled + * @ops: umac rx callbacks + * @psoc: objmgr psoc + * + * @Return: wmi handle. + */ +void *wmi_unified_attach(void *scn_handle, + struct wmi_unified_attach_params *param) +{ + struct wmi_unified *wmi_handle; + struct wmi_soc *soc; + + soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); + if (!soc) + return NULL; + + wmi_handle = + (struct wmi_unified *) qdf_mem_malloc( + sizeof(struct wmi_unified)); + if (!wmi_handle) { + qdf_mem_free(soc); + return NULL; + } + wmi_handle->soc = soc; + wmi_handle->soc->soc_idx = param->soc_id; + wmi_handle->soc->is_async_ep = param->is_async_ep; + wmi_handle->event_id = soc->event_id; + wmi_handle->event_handler = soc->event_handler; + wmi_handle->ctx = soc->ctx; + wmi_handle->wmi_events = soc->wmi_events; + wmi_handle->services = soc->services; + wmi_handle->scn_handle = scn_handle; + wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map; + wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map; + wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map; + wmi_handle->evt_phy_id_map = soc->evt_phy_id_map; + soc->scn_handle = scn_handle; + qdf_atomic_init(&wmi_handle->pending_cmds); + qdf_atomic_init(&wmi_handle->is_target_suspended); + qdf_atomic_init(&wmi_handle->num_stats_over_qmi); + wmi_runtime_pm_init(wmi_handle); + qdf_spinlock_create(&wmi_handle->eventq_lock); + qdf_nbuf_queue_init(&wmi_handle->event_queue); + qdf_create_work(0, &wmi_handle->rx_event_work, + wmi_rx_event_work, wmi_handle); + wmi_handle->wmi_rx_work_queue = + qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); + if (!wmi_handle->wmi_rx_work_queue) { + WMI_LOGE("failed to create wmi_rx_event_work_queue"); + goto error; + } + wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0); + wmi_handle->target_type = param->target_type; + soc->target_type = param->target_type; + + if (param->target_type >= WMI_MAX_TARGET_TYPE) + goto error; + + if (wmi_attach_register[param->target_type]) { + wmi_attach_register[param->target_type](wmi_handle); + } else { + WMI_LOGE("wmi attach is not registered"); + goto error; + } + /* Assign target cookie capablity */ + wmi_handle->use_cookie = param->use_cookie; + wmi_handle->osdev = param->osdev; + wmi_handle->wmi_stopinprogress = 0; + wmi_handle->wmi_max_cmds = param->max_commands; + soc->wmi_max_cmds = param->max_commands; + /* Increase the ref count once refcount infra is present */ + soc->wmi_psoc = param->psoc; + qdf_spinlock_create(&soc->ctx_lock); + + soc->ops = wmi_handle->ops; + soc->wmi_pdev[0] = wmi_handle; + if (wmi_ext_dbgfs_init(wmi_handle) != QDF_STATUS_SUCCESS) + WMI_LOGE("failed to initialize wmi extended debugfs"); + + wmi_wbuff_register(wmi_handle); + + wmi_hang_event_notifier_register(wmi_handle); + + return wmi_handle; + +error: + qdf_mem_free(soc); + qdf_mem_free(wmi_handle); + + return NULL; +} + +/** + * wmi_unified_detach() - detach for unified WMI + * + * @wmi_handle : handle to wmi. + * + * @Return: none. + */ +void wmi_unified_detach(struct wmi_unified *wmi_handle) +{ + wmi_buf_t buf; + struct wmi_soc *soc; + uint8_t i; + + wmi_hang_event_notifier_unregister(); + + wmi_wbuff_deregister(wmi_handle); + + wmi_ext_dbgfs_deinit(wmi_handle); + + soc = wmi_handle->soc; + for (i = 0; i < WMI_MAX_RADIOS; i++) { + if (soc->wmi_pdev[i]) { + qdf_flush_workqueue(0, + soc->wmi_pdev[i]->wmi_rx_work_queue); + qdf_destroy_workqueue(0, + soc->wmi_pdev[i]->wmi_rx_work_queue); + wmi_debugfs_remove(soc->wmi_pdev[i]); + buf = qdf_nbuf_queue_remove( + &soc->wmi_pdev[i]->event_queue); + while (buf) { + qdf_nbuf_free(buf); + buf = qdf_nbuf_queue_remove( + &soc->wmi_pdev[i]->event_queue); + } + + wmi_log_buffer_free(soc->wmi_pdev[i]); + + /* Free events logs list */ + if (soc->wmi_pdev[i]->events_logs_list) + qdf_mem_free( + soc->wmi_pdev[i]->events_logs_list); + + qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); + qdf_mem_free(soc->wmi_pdev[i]); + } + } + qdf_spinlock_destroy(&soc->ctx_lock); + + if (soc->wmi_service_bitmap) { + qdf_mem_free(soc->wmi_service_bitmap); + soc->wmi_service_bitmap = NULL; + } + + if (soc->wmi_ext_service_bitmap) { + qdf_mem_free(soc->wmi_ext_service_bitmap); + soc->wmi_ext_service_bitmap = NULL; + } + + /* Decrease the ref count once refcount infra is present */ + soc->wmi_psoc = NULL; + qdf_mem_free(soc); +} + +/** + * wmi_unified_remove_work() - detach for WMI work + * @wmi_handle: handle to WMI + * + * A function that does not fully detach WMI, but just remove work + * queue items associated with it. This is used to make sure that + * before any other processing code that may destroy related contexts + * (HTC, etc), work queue processing on WMI has already been stopped. + * + * Return: None + */ +void +wmi_unified_remove_work(struct wmi_unified *wmi_handle) +{ + wmi_buf_t buf; + + qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); + qdf_spin_lock_bh(&wmi_handle->eventq_lock); + buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); + while (buf) { + qdf_nbuf_free(buf); + buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); + } + qdf_spin_unlock_bh(&wmi_handle->eventq_lock); +} + +/** + * wmi_htc_tx_complete() - Process htc tx completion + * + * @ctx: handle to wmi + * @htc_packet: pointer to htc packet + * + * @Return: none. + */ +static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) +{ + struct wmi_soc *soc = (struct wmi_soc *) ctx; + wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); + u_int8_t *buf_ptr; + u_int32_t len; + struct wmi_unified *wmi_handle; +#ifdef WMI_INTERFACE_EVENT_LOGGING + uint32_t cmd_id; +#endif + + ASSERT(wmi_cmd_buf); + wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); + if (!wmi_handle) { + WMI_LOGE("%s: Unable to get wmi handle\n", __func__); + QDF_ASSERT(0); + return; + } +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle && wmi_handle->log_info.wmi_logging_enable) { + cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), + WMI_CMD_HDR, COMMANDID); + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* Record 16 bytes of WMI cmd tx complete data + - exclude TLV and WMI headers */ + if (wmi_handle->ops->is_management_record(cmd_id)) { + WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, + qdf_nbuf_data(wmi_cmd_buf) + + wmi_handle->soc->buf_offset_command); + } else { + WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, + qdf_nbuf_data(wmi_cmd_buf) + + wmi_handle->soc->buf_offset_command); + } + + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + buf_ptr = (u_int8_t *) wmi_buf_data(wmi_cmd_buf); + len = qdf_nbuf_len(wmi_cmd_buf); + qdf_mem_zero(buf_ptr, len); + wmi_buf_free(wmi_cmd_buf); + qdf_mem_free(htc_pkt); + qdf_atomic_dec(&wmi_handle->pending_cmds); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * wmi_htc_log_pkt() - Print information of WMI command from HTC packet + * + * @ctx: handle of WMI context + * @htc_pkt: handle of HTC packet + * + * @Return: none + */ +static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) +{ + wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); + uint32_t cmd_id; + + ASSERT(wmi_cmd_buf); + cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR, + COMMANDID); + + WMI_LOGD("WMI command from HTC packet: %s, ID: %d\n", + wmi_id_to_name(cmd_id), cmd_id); +} +#else +static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt) +{ +} +#endif + +/** + * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service + * + * @wmi_handle: handle to WMI. + * @pdev_idx: Pdev index + * + * @Return: QDF_STATUS + */ +static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc, + uint32_t pdev_idx) +{ + QDF_STATUS status; + struct htc_service_connect_resp response; + struct htc_service_connect_req connect; + + OS_MEMZERO(&connect, sizeof(connect)); + OS_MEMZERO(&response, sizeof(response)); + + /* meta data is unused for now */ + connect.pMetaData = NULL; + connect.MetaDataLength = 0; + /* these fields are the same for all service endpoints */ + connect.EpCallbacks.pContext = soc; + connect.EpCallbacks.EpTxCompleteMultiple = + NULL /* Control path completion ar6000_tx_complete */; + connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; + connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; + connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; + connect.EpCallbacks.EpTxComplete = + wmi_htc_tx_complete /* ar6000_tx_queue_full */; + connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt; + + /* connect to control service */ + connect.service_id = soc->svc_ids[pdev_idx]; + status = htc_connect_service(soc->htc_handle, &connect, &response); + + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to connect to WMI CONTROL service status:%d\n", + status); + return status; + } + + if (soc->is_async_ep) + htc_set_async_ep(soc->htc_handle, response.Endpoint, true); + + soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; + soc->max_msg_len[pdev_idx] = response.MaxMsgLength; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, + HTC_HANDLE htc_handle) +{ + uint32_t i; + uint8_t wmi_ep_count; + + wmi_handle->soc->htc_handle = htc_handle; + + wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); + if (wmi_ep_count > WMI_MAX_RADIOS) + return QDF_STATUS_E_FAULT; + + for (i = 0; i < wmi_ep_count; i++) + wmi_connect_pdev_htc_service(wmi_handle->soc, i); + + wmi_handle->htc_handle = htc_handle; + wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; + wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_host_credits() - WMI API to get updated host_credits + * + * @wmi_handle: handle to WMI. + * + * @Return: updated host_credits. + */ +int wmi_get_host_credits(wmi_unified_t wmi_handle) +{ + int host_credits = 0; + + htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, + &host_credits); + return host_credits; +} + +/** + * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC + * queue + * + * @wmi_handle: handle to WMI. + * + * @Return: Pending Commands in the HTC queue. + */ +int wmi_get_pending_cmds(wmi_unified_t wmi_handle) +{ + return qdf_atomic_read(&wmi_handle->pending_cmds); +} + +/** + * wmi_set_target_suspend() - WMI API to set target suspend state + * + * @wmi_handle: handle to WMI. + * @val: suspend state boolean. + * + * @Return: none. + */ +void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) +{ + qdf_atomic_set(&wmi_handle->is_target_suspended, val); +} + +/** + * wmi_is_target_suspended() - WMI API to check target suspend state + * @wmi_handle: handle to WMI. + * + * WMI API to check target suspend state + * + * Return: true if target is suspended, else false. + */ +bool wmi_is_target_suspended(struct wmi_unified *wmi_handle) +{ + return qdf_atomic_read(&wmi_handle->is_target_suspended); +} + +#ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI +void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val) +{ + wmi_handle->is_qmi_stats_enabled = val; +} + +bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle) +{ + return wmi_handle->is_qmi_stats_enabled; +} +#endif + +/** + * WMI API to set crash injection state + * @param wmi_handle: handle to WMI. + * @param val: crash injection state boolean. + */ +void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) +{ + wmi_handle->tag_crash_inject = flag; +} + +/** + * WMI API to set bus suspend state + * @param wmi_handle: handle to WMI. + * @param val: suspend state boolean. + */ +void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) +{ + qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); +} + +void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) +{ + wmi_handle->tgt_force_assert_enable = val; +} + +/** + * wmi_stop() - generic function to block unified WMI command + * @wmi_handle: handle to WMI. + * + * @Return: success always. + */ +int +wmi_stop(wmi_unified_t wmi_handle) +{ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "WMI Stop"); + wmi_handle->wmi_stopinprogress = 1; + return 0; +} + +/** + * wmi_start() - generic function to allow unified WMI command + * @wmi_handle: handle to WMI. + * + * @Return: success always. + */ +int +wmi_start(wmi_unified_t wmi_handle) +{ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "WMI Start"); + wmi_handle->wmi_stopinprogress = 0; + return 0; +} + +/** + * API to flush all the previous packets associated with the wmi endpoint + * + * @param wmi_handle : handle to WMI. + */ +void +wmi_flush_endpoint(wmi_unified_t wmi_handle) +{ + htc_flush_endpoint(wmi_handle->htc_handle, + wmi_handle->wmi_endpoint_id, 0); +} +qdf_export_symbol(wmi_flush_endpoint); + +/** + * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion + * in WMI. By default pdev_id conversion is not done in WMI. + * This API can be used enable conversion in WMI. + * @param wmi_handle : handle to WMI + * @param pdev_map : pointer to pdev_map + * @size : size of pdev_id_map + * Return none + */ +void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle, + uint32_t *pdev_id_map, + uint8_t size) +{ + if (wmi_handle->target_type == WMI_TLV_TARGET) + wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle, + pdev_id_map, + size); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_action_oui_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_action_oui_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..7c587ae35784fc6d12db49962299d5d8ec196877 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_action_oui_tlv.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_action_oui_tlv.h" + +bool wmi_get_action_oui_id(enum action_oui_id action_id, + wmi_vendor_oui_action_id *id) +{ + switch (action_id) { + + case ACTION_OUI_CONNECT_1X1: + *id = WMI_VENDOR_OUI_ACTION_CONNECTION_1X1; + return true; + + case ACTION_OUI_ITO_EXTENSION: + *id = WMI_VENDOR_OUI_ACTION_ITO_EXTENSION; + return true; + + case ACTION_OUI_CCKM_1X1: + *id = WMI_VENDOR_OUI_ACTION_CCKM_1X1; + return true; + + case ACTION_OUI_ITO_ALTERNATE: + *id = WMI_VENDOR_OUI_ACTION_ALT_ITO; + return true; + + case ACTION_OUI_SWITCH_TO_11N_MODE: + *id = WMI_VENDOR_OUI_ACTION_SWITCH_TO_11N_MODE; + return true; + + case ACTION_OUI_CONNECT_1X1_WITH_1_CHAIN: + *id = WMI_VENDOR_OUI_ACTION_CONNECTION_1X1_NUM_TX_RX_CHAINS_1; + return true; + + case ACTION_OUI_DISABLE_AGGRESSIVE_TX: + *id = WMI_VENDOR_OUI_ACTION_DISABLE_AGGRESSIVE_TX; + return true; + + case ACTION_OUI_DISABLE_TWT: + *id = WMI_VENDOR_OUI_ACTION_DISABLE_FW_TRIGGERED_TWT; + return true; + + default: + return false; + } +} + +uint32_t wmi_get_action_oui_info_mask(uint32_t info_mask) +{ + uint32_t info_presence = 0; + + if (info_mask & ACTION_OUI_INFO_OUI) + info_presence |= WMI_BEACON_INFO_PRESENCE_OUI_EXT; + + if (info_mask & ACTION_OUI_INFO_MAC_ADDRESS) + info_presence |= WMI_BEACON_INFO_PRESENCE_MAC_ADDRESS; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_NSS) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_NSS; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_HT) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_HT; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_VHT) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_VHT; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_BAND) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_BAND; + + return info_presence; +} + +void wmi_fill_oui_extensions(struct action_oui_extension *extension, + uint32_t no_oui_extns, + wmi_vendor_oui_ext *cmd_ext) +{ + uint32_t i; + uint32_t buffer_length; + + for (i = 0; i < no_oui_extns; i++) { + WMITLV_SET_HDR(&cmd_ext->tlv_header, + WMITLV_TAG_STRUC_wmi_vendor_oui_ext, + WMITLV_GET_STRUCT_TLVLEN(wmi_vendor_oui_ext)); + cmd_ext->info_presence_bit_mask = + wmi_get_action_oui_info_mask(extension->info_mask); + + cmd_ext->oui_header_length = extension->oui_length; + cmd_ext->oui_data_length = extension->data_length; + cmd_ext->mac_address_length = extension->mac_addr_length; + cmd_ext->capability_data_length = + extension->capability_length; + + buffer_length = extension->oui_length + + extension->data_length + + extension->data_mask_length + + extension->mac_addr_length + + extension->mac_mask_length + + extension->capability_length; + + cmd_ext->buf_data_length = buffer_length + 1; + + cmd_ext++; + extension++; + } + +} + +QDF_STATUS +wmi_fill_oui_extensions_buffer(struct action_oui_extension *extension, + wmi_vendor_oui_ext *cmd_ext, + uint32_t no_oui_extns, uint32_t rem_var_buf_len, + uint8_t *var_buf) +{ + uint8_t i; + + for (i = 0; i < (uint8_t)no_oui_extns; i++) { + if ((rem_var_buf_len - cmd_ext->buf_data_length) < 0) { + WMI_LOGE(FL("Invalid action oui command length")); + return QDF_STATUS_E_INVAL; + } + + var_buf[0] = i; + var_buf++; + + if (extension->oui_length) { + qdf_mem_copy(var_buf, extension->oui, + extension->oui_length); + var_buf += extension->oui_length; + } + + if (extension->data_length) { + qdf_mem_copy(var_buf, extension->data, + extension->data_length); + var_buf += extension->data_length; + } + + if (extension->data_mask_length) { + qdf_mem_copy(var_buf, extension->data_mask, + extension->data_mask_length); + var_buf += extension->data_mask_length; + } + + if (extension->mac_addr_length) { + qdf_mem_copy(var_buf, extension->mac_addr, + extension->mac_addr_length); + var_buf += extension->mac_addr_length; + } + + if (extension->mac_mask_length) { + qdf_mem_copy(var_buf, extension->mac_mask, + extension->mac_mask_length); + var_buf += extension->mac_mask_length; + } + + if (extension->capability_length) { + qdf_mem_copy(var_buf, extension->capability, + extension->capability_length); + var_buf += extension->capability_length; + } + + rem_var_buf_len -= cmd_ext->buf_data_length; + cmd_ext++; + extension++; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +send_action_oui_cmd_tlv(wmi_unified_t wmi_handle, + struct action_oui_request *req) +{ + wmi_pdev_config_vendor_oui_action_fixed_param *cmd; + wmi_vendor_oui_ext *cmd_ext; + wmi_buf_t wmi_buf; + struct action_oui_extension *extension; + uint32_t len; + uint32_t i; + uint8_t *buf_ptr; + uint32_t no_oui_extns; + uint32_t total_no_oui_extns; + uint32_t var_buf_len = 0; + wmi_vendor_oui_action_id action_id; + bool valid; + uint32_t rem_var_buf_len; + QDF_STATUS status; + + if (!req) { + WMI_LOGE(FL("action oui is empty")); + return QDF_STATUS_E_INVAL; + } + + no_oui_extns = req->no_oui_extensions; + total_no_oui_extns = req->total_no_oui_extensions; + + len = sizeof(*cmd); + len += WMI_TLV_HDR_SIZE; /* Array of wmi_vendor_oui_ext structures */ + + if (!no_oui_extns || + no_oui_extns > WMI_MAX_VENDOR_OUI_ACTION_SUPPORTED_PER_ACTION || + (total_no_oui_extns > WMI_VENDOR_OUI_ACTION_MAX_ACTION_ID * + WMI_MAX_VENDOR_OUI_ACTION_SUPPORTED_PER_ACTION)) { + WMI_LOGE(FL("Invalid number of action oui extensions")); + return QDF_STATUS_E_INVAL; + } + + valid = wmi_get_action_oui_id(req->action_id, &action_id); + if (!valid) { + WMI_LOGE(FL("Invalid action id")); + return QDF_STATUS_E_INVAL; + } + + len += no_oui_extns * sizeof(*cmd_ext); + len += WMI_TLV_HDR_SIZE; /* Variable length buffer */ + + extension = req->extension; + for (i = 0; i < no_oui_extns; i++) { + var_buf_len += extension->oui_length + + extension->data_length + + extension->data_mask_length + + extension->mac_addr_length + + extension->mac_mask_length + + extension->capability_length; + extension++; + } + + var_buf_len += no_oui_extns; /* to store indexes */ + rem_var_buf_len = var_buf_len; + var_buf_len = (var_buf_len + 3) & ~0x3; + len += var_buf_len; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE(FL("Failed to allocate wmi buffer")); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *)wmi_buf_data(wmi_buf); + cmd = (wmi_pdev_config_vendor_oui_action_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_config_vendor_oui_action_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_config_vendor_oui_action_fixed_param)); + + cmd->action_id = action_id; + cmd->total_num_vendor_oui = total_no_oui_extns; + cmd->num_vendor_oui_ext = no_oui_extns; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + no_oui_extns * sizeof(*cmd_ext)); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd_ext = (wmi_vendor_oui_ext *)buf_ptr; + wmi_fill_oui_extensions(req->extension, no_oui_extns, cmd_ext); + + buf_ptr += no_oui_extns * sizeof(*cmd_ext); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, var_buf_len); + buf_ptr += WMI_TLV_HDR_SIZE; + status = wmi_fill_oui_extensions_buffer(req->extension, + cmd_ext, no_oui_extns, + rem_var_buf_len, buf_ptr); + if (!QDF_IS_STATUS_SUCCESS(status)) { + wmi_buf_free(wmi_buf); + wmi_buf = NULL; + return QDF_STATUS_E_INVAL; + } + + buf_ptr += var_buf_len; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID)) { + WMI_LOGE(FL("WMI_PDEV_CONFIG_VENDOR_OUI_ACTION send fail")); + wmi_buf_free(wmi_buf); + wmi_buf = NULL; + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..4c235aeaa331e206563a95112e4892e7407afb43 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_apf_tlv.h" +#include "wmi.h" + +QDF_STATUS wmi_send_set_active_apf_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + enum wmi_host_active_apf_mode + ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode) +{ + const WMITLV_TAG_ID tag_id = + WMITLV_TAG_STRUC_wmi_bpf_set_vdev_active_mode_cmd_fixed_param; + const uint32_t tlv_len = WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_set_vdev_active_mode_cmd_fixed_param); + QDF_STATUS status; + wmi_bpf_set_vdev_active_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + + WMI_LOGD("Sending WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID(%u, %d, %d)", + vdev_id, ucast_mode, mcast_bcast_mode); + + /* allocate command buffer */ + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + /* set TLV header */ + cmd = (wmi_bpf_set_vdev_active_mode_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, tag_id, tlv_len); + + /* populate data */ + cmd->vdev_id = vdev_id; + cmd->uc_mode = ucast_mode; + cmd->mcbc_mode = mcast_bcast_mode; + + /* send to FW */ + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID:%d", + status); + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wmi_send_apf_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable) +{ + wmi_bpf_set_vdev_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_bpf_set_vdev_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bpf_set_vdev_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_set_vdev_enable_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->is_enabled = enable; + + if (wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_BPF_SET_VDEV_ENABLE_CMDID)) { + WMI_LOGE("%s: Failed to enable/disable APF interpreter", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_send_apf_write_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_write_memory_params + *apf_write_params) +{ + wmi_bpf_set_vdev_work_memory_cmd_fixed_param *cmd; + uint32_t wmi_buf_len; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t aligned_len = 0; + + wmi_buf_len = sizeof(*cmd); + if (apf_write_params->length) { + aligned_len = roundup(apf_write_params->length, + sizeof(A_UINT32)); + + wmi_buf_len += WMI_TLV_HDR_SIZE + aligned_len; + + } + + buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_bpf_set_vdev_work_memory_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bpf_set_vdev_work_memory_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_set_vdev_work_memory_cmd_fixed_param)); + cmd->vdev_id = apf_write_params->vdev_id; + cmd->bpf_version = apf_write_params->apf_version; + cmd->program_len = apf_write_params->program_len; + cmd->addr_offset = apf_write_params->addr_offset; + cmd->length = apf_write_params->length; + + if (apf_write_params->length) { + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + aligned_len); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, apf_write_params->buf, + apf_write_params->length); + } + + if (wmi_unified_cmd_send(wmi_handle, buf, wmi_buf_len, + WMI_BPF_SET_VDEV_WORK_MEMORY_CMDID)) { + WMI_LOGE("%s: Failed to write APF work memory", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_send_apf_read_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_read_memory_params + *apf_read_params) +{ + wmi_bpf_get_vdev_work_memory_cmd_fixed_param *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_bpf_get_vdev_work_memory_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bpf_get_vdev_work_memory_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_get_vdev_work_memory_cmd_fixed_param)); + cmd->vdev_id = apf_read_params->vdev_id; + cmd->addr_offset = apf_read_params->addr_offset; + cmd->length = apf_read_params->length; + + if (wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_BPF_GET_VDEV_WORK_MEMORY_CMDID)) { + WMI_LOGE("%s: Failed to get APF work memory", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_extract_apf_read_memory_resp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *resp) +{ + WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID_param_tlvs *param_buf; + wmi_bpf_get_vdev_work_memory_resp_evt_fixed_param *data_event; + + param_buf = evt_buf; + if (!param_buf) { + WMI_LOGE("encrypt decrypt resp evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + data_event = param_buf->fixed_param; + + resp->vdev_id = data_event->vdev_id; + resp->offset = data_event->offset; + resp->more_data = data_event->fragment; + + if (data_event->length > param_buf->num_data) { + WMI_LOGE("FW msg data_len %d more than TLV hdr %d", + data_event->length, + param_buf->num_data); + return QDF_STATUS_E_INVAL; + } + + if (data_event->length && param_buf->data) { + resp->length = data_event->length; + resp->data = (uint8_t *)param_buf->data; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c new file mode 100644 index 0000000000000000000000000000000000000000..ceeccd673f1e73cb23a95af27fe907e107702d7a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c @@ -0,0 +1,3183 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_priv.h" +#include "wmi_unified_param.h" +#include "qdf_module.h" +#include "cdp_txrx_cmn_struct.h" +#include + +static const wmi_host_channel_width mode_to_width[WMI_HOST_MODE_MAX] = { + [WMI_HOST_MODE_11A] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11G] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11B] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11GONLY] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11NA_HT20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11NG_HT20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AC_VHT20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AC_VHT20_2G] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11NA_HT40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11NG_HT40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AC_VHT40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AC_VHT40_2G] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AC_VHT80] = WMI_HOST_CHAN_WIDTH_80, + [WMI_HOST_MODE_11AC_VHT80_2G] = WMI_HOST_CHAN_WIDTH_80, +#if CONFIG_160MHZ_SUPPORT + [WMI_HOST_MODE_11AC_VHT80_80] = WMI_HOST_CHAN_WIDTH_80P80, + [WMI_HOST_MODE_11AC_VHT160] = WMI_HOST_CHAN_WIDTH_160, +#endif + +#if SUPPORT_11AX + [WMI_HOST_MODE_11AX_HE20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AX_HE40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AX_HE80] = WMI_HOST_CHAN_WIDTH_80, + [WMI_HOST_MODE_11AX_HE80_80] = WMI_HOST_CHAN_WIDTH_80P80, + [WMI_HOST_MODE_11AX_HE160] = WMI_HOST_CHAN_WIDTH_160, + [WMI_HOST_MODE_11AX_HE20_2G] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AX_HE40_2G] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AX_HE80_2G] = WMI_HOST_CHAN_WIDTH_80, +#endif +}; + +QDF_STATUS wmi_unified_soc_set_hw_mode_cmd(wmi_unified_t wmi_handle, + uint32_t hw_mode_index) +{ + if (wmi_handle->ops->send_pdev_set_hw_mode_cmd) + return wmi_handle->ops->send_pdev_set_hw_mode_cmd( + wmi_handle, + hw_mode_index); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_create_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct vdev_create_params *param) +{ + if (wmi_handle->ops->send_vdev_create_cmd) + return wmi_handle->ops->send_vdev_create_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_delete_send(wmi_unified_t wmi_handle, + uint8_t if_id) +{ + if (wmi_handle->ops->send_vdev_delete_cmd) + return wmi_handle->ops->send_vdev_delete_cmd(wmi_handle, + if_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_nss_chain_params_send(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct vdev_nss_chains *user_cfg) +{ + if (wmi_handle->ops->send_vdev_nss_chain_params_cmd) + return wmi_handle->ops->send_vdev_nss_chain_params_cmd( + wmi_handle, + vdev_id, + user_cfg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_stop_send(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_vdev_stop_cmd) + return wmi_handle->ops->send_vdev_stop_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_down_send(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_vdev_down_cmd) + return wmi_handle->ops->send_vdev_down_cmd(wmi_handle, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_start_send(wmi_unified_t wmi_handle, + struct vdev_start_params *req) +{ + if (wmi_handle->ops->send_vdev_start_cmd) + return wmi_handle->ops->send_vdev_start_cmd(wmi_handle, req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_set_nac_rssi_send(wmi_unified_t wmi_handle, + struct vdev_scan_nac_rssi_params *req) +{ + if (wmi_handle->ops->send_vdev_set_nac_rssi_cmd) + return wmi_handle->ops->send_vdev_set_nac_rssi_cmd(wmi_handle, req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_peer_flush_tids_send(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_flush_params *param) +{ + if (wmi_handle->ops->send_peer_flush_tids_cmd) + return wmi_handle->ops->send_peer_flush_tids_cmd(wmi_handle, + peer_addr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_delete_send(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_peer_delete_cmd) + return wmi_handle->ops->send_peer_delete_cmd(wmi_handle, + peer_addr, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_delete_all_send( + wmi_unified_t wmi_hdl, + struct peer_delete_all_params *param) +{ + if (wmi_hdl->ops->send_peer_delete_all_cmd) + return wmi_hdl->ops->send_peer_delete_all_cmd(wmi_hdl, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_set_peer_param_send(wmi_unified_t wmi_handle, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_set_params *param) +{ + if (wmi_handle->ops->send_peer_param_cmd) + return wmi_handle->ops->send_peer_param_cmd(wmi_handle, + peer_addr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_up_send(wmi_unified_t wmi_handle, + uint8_t bssid[QDF_MAC_ADDR_SIZE], + struct vdev_up_params *params) +{ + if (wmi_handle->ops->send_vdev_up_cmd) + return wmi_handle->ops->send_vdev_up_cmd(wmi_handle, bssid, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_create_send(wmi_unified_t wmi_handle, + struct peer_create_params *param) +{ + if (wmi_handle->ops->send_peer_create_cmd) + return wmi_handle->ops->send_peer_create_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_rx_reorder_queue_setup_send( + wmi_unified_t wmi_handle, + struct rx_reorder_queue_setup_params *param) +{ + if (wmi_handle->ops->send_peer_rx_reorder_queue_setup_cmd) + return wmi_handle->ops->send_peer_rx_reorder_queue_setup_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_rx_reorder_queue_remove_send( + wmi_unified_t wmi_handle, + struct rx_reorder_queue_remove_params *param) +{ + if (wmi_handle->ops->send_peer_rx_reorder_queue_remove_cmd) + return wmi_handle->ops->send_peer_rx_reorder_queue_remove_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS wmi_unified_green_ap_ps_send(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id) +{ + if (wmi_handle->ops->send_green_ap_ps_cmd) + return wmi_handle->ops->send_green_ap_ps_cmd(wmi_handle, value, + pdev_id); + + return QDF_STATUS_E_FAILURE; +} +#else +QDF_STATUS wmi_unified_green_ap_ps_send(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_GREEN_AP */ + +QDF_STATUS +wmi_unified_pdev_utf_cmd_send(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_pdev_utf_cmd) + return wmi_handle->ops->send_pdev_utf_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_pdev_param_send(wmi_unified_t wmi_handle, + struct pdev_params *param, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_pdev_param_cmd) + return wmi_handle->ops->send_pdev_param_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_suspend_send(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_suspend_cmd) + return wmi_handle->ops->send_suspend_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_resume_send(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_resume_cmd) + return wmi_handle->ops->send_resume_cmd(wmi_handle, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_wow_enable_send(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_wow_enable_cmd) + return wmi_handle->ops->send_wow_enable_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_wow_wakeup_send(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_wow_wakeup_cmd) + return wmi_handle->ops->send_wow_wakeup_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_wow_add_wakeup_event_send(wmi_unified_t wmi_handle, + struct wow_add_wakeup_params *param) +{ + if (wmi_handle->ops->send_wow_add_wakeup_event_cmd) + return wmi_handle->ops->send_wow_add_wakeup_event_cmd( + wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_wow_add_wakeup_pattern_send( + wmi_unified_t wmi_handle, + struct wow_add_wakeup_pattern_params *param) +{ + if (wmi_handle->ops->send_wow_add_wakeup_pattern_cmd) + return wmi_handle->ops->send_wow_add_wakeup_pattern_cmd( + wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_wow_remove_wakeup_pattern_send( + wmi_unified_t wmi_handle, + struct wow_remove_wakeup_pattern_params *param) +{ + if (wmi_handle->ops->send_wow_remove_wakeup_pattern_cmd) + return wmi_handle->ops->send_wow_remove_wakeup_pattern_cmd( + wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ap_ps_cmd_send(wmi_unified_t wmi_handle, + uint8_t *peer_addr, + struct ap_ps_params *param) +{ + if (wmi_handle->ops->send_set_ap_ps_param_cmd) + return wmi_handle->ops->send_set_ap_ps_param_cmd(wmi_handle, + peer_addr, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_sta_ps_cmd_send(wmi_unified_t wmi_handle, + struct sta_ps_params *param) +{ + if (wmi_handle->ops->send_set_sta_ps_param_cmd) + return wmi_handle->ops->send_set_sta_ps_param_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_crash_inject(wmi_unified_t wmi_handle, + struct crash_inject *param) +{ + if (wmi_handle->ops->send_crash_inject_cmd) + return wmi_handle->ops->send_crash_inject_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_FW_LOG_PARSING +QDF_STATUS +wmi_unified_dbglog_cmd_send(wmi_unified_t wmi_handle, + struct dbglog_params *dbglog_param) +{ + if (wmi_handle->ops->send_dbglog_cmd) + return wmi_handle->ops->send_dbglog_cmd(wmi_handle, + dbglog_param); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_dbglog_cmd_send); +#endif + +QDF_STATUS +wmi_unified_vdev_set_param_send(wmi_unified_t wmi_handle, + struct vdev_set_params *param) +{ + if (wmi_handle->ops->send_vdev_set_param_cmd) + return wmi_handle->ops->send_vdev_set_param_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_sifs_trigger_send(wmi_unified_t wmi_handle, + struct sifs_trigger_param *param) +{ + if (wmi_handle->ops->send_vdev_sifs_trigger_cmd) + return wmi_handle->ops->send_vdev_sifs_trigger_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_stats_request_send() - WMI request stats function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_stats_request_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct stats_request_params *param) +{ + if (wmi_handle->ops->send_stats_request_cmd) + return wmi_handle->ops->send_stats_request_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_packet_log_enable_send(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_packet_log_enable_cmd) + return wmi_handle->ops->send_packet_log_enable_cmd(wmi_handle, + PKTLOG_EVENT, mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_based_pktlog_send(wmi_unified_t wmi_handle, + uint8_t *macaddr, + uint8_t mac_id, + uint8_t enb_dsb) +{ + if (wmi_handle->ops->send_peer_based_pktlog_cmd) + return wmi_handle->ops->send_peer_based_pktlog_cmd + (wmi_handle, macaddr, mac_id, enb_dsb); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_packet_log_disable_send(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_packet_log_disable_cmd) + return wmi_handle->ops->send_packet_log_disable_cmd(wmi_handle, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_fd_tmpl_send_cmd(wmi_unified_t wmi_handle, + struct fils_discovery_tmpl_params *param) +{ + if (wmi_handle->ops->send_fd_tmpl_cmd) + return wmi_handle->ops->send_fd_tmpl_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_beacon_tmpl_send_cmd(wmi_unified_t wmi_handle, + struct beacon_tmpl_params *param) +{ + if (wmi_handle->ops->send_beacon_tmpl_send_cmd) + return wmi_handle->ops->send_beacon_tmpl_send_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_peer_assoc_send(wmi_unified_t wmi_handle, + struct peer_assoc_params *param) +{ + if (wmi_handle->ops->send_peer_assoc_cmd) + return wmi_handle->ops->send_peer_assoc_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_scan_start_cmd_send(wmi_unified_t wmi_handle, + struct scan_req_params *param) +{ + if (wmi_handle->ops->send_scan_start_cmd) + return wmi_handle->ops->send_scan_start_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_scan_stop_cmd_send(wmi_unified_t wmi_handle, + struct scan_cancel_param *param) +{ + if (wmi_handle->ops->send_scan_stop_cmd) + return wmi_handle->ops->send_scan_stop_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_scan_chan_list_cmd_send(wmi_unified_t wmi_handle, + struct scan_chan_list_params *param) +{ + if (wmi_handle->ops->send_scan_chan_list_cmd) + return wmi_handle->ops->send_scan_chan_list_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_mgmt_unified_cmd_send(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param) +{ + if (wmi_handle->ops->send_mgmt_cmd) + return wmi_handle->ops->send_mgmt_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_offchan_data_tx_cmd_send(wmi_unified_t wmi_handle, + struct wmi_offchan_data_tx_params *param) +{ + if (wmi_handle->ops->send_offchan_data_tx_cmd) + return wmi_handle->ops->send_offchan_data_tx_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_modem_power_state(wmi_unified_t wmi_handle, + uint32_t param_value) +{ + if (wmi_handle->ops->send_modem_power_state_cmd) + return wmi_handle->ops->send_modem_power_state_cmd(wmi_handle, + param_value); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_sta_ps_mode(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t val) +{ + if (wmi_handle->ops->send_set_sta_ps_mode_cmd) + return wmi_handle->ops->send_set_sta_ps_mode_cmd(wmi_handle, + vdev_id, val); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_idle_trigger_monitor(wmi_unified_t wmi_handle, uint8_t val) +{ + if (wmi_handle->ops->send_idle_roam_monitor_cmd) + return wmi_handle->ops->send_idle_roam_monitor_cmd(wmi_handle, + val); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_mimops(wmi_unified_t wmi_handle, uint8_t vdev_id, + int value) +{ + if (wmi_handle->ops->send_set_mimops_cmd) + return wmi_handle->ops->send_set_mimops_cmd(wmi_handle, + vdev_id, value); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_smps_params(wmi_unified_t wmi_handle, + uint8_t vdev_id, + int value) +{ + if (wmi_handle->ops->send_set_smps_params_cmd) + return wmi_handle->ops->send_set_smps_params_cmd(wmi_handle, + vdev_id, value); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_temperature(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_get_temperature_cmd) + return wmi_handle->ops->send_get_temperature_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_sta_uapsd_auto_trig_cmd(wmi_unified_t wmi_handle, + struct sta_uapsd_trig_params *param) +{ + if (wmi_handle->ops->send_set_sta_uapsd_auto_trig_cmd) + return wmi_handle->ops->send_set_sta_uapsd_auto_trig_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_thermal_mgmt_cmd(wmi_unified_t wmi_handle, + struct thermal_cmd_params *thermal_info) +{ + if (wmi_handle->ops->send_set_thermal_mgmt_cmd) + return wmi_handle->ops->send_set_thermal_mgmt_cmd(wmi_handle, + thermal_info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_lro_config_cmd(wmi_unified_t wmi_handle, + struct wmi_lro_config_cmd_t *wmi_lro_cmd) +{ + if (wmi_handle->ops->send_lro_config_cmd) + return wmi_handle->ops->send_lro_config_cmd(wmi_handle, + wmi_lro_cmd); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_peer_rate_report_cmd( + wmi_unified_t wmi_handle, + struct wmi_peer_rate_report_params *rate_report_params) +{ + if (wmi_handle->ops->send_peer_rate_report_cmd) + return wmi_handle->ops->send_peer_rate_report_cmd(wmi_handle, + rate_report_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_process_update_edca_param( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]) +{ + if (wmi_handle->ops->send_process_update_edca_param_cmd) + return wmi_handle->ops->send_process_update_edca_param_cmd(wmi_handle, + vdev_id, mu_edca_param, wmm_vparams); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_probe_rsp_tmpl_send_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info) +{ + if (wmi_handle->ops->send_probe_rsp_tmpl_send_cmd) + return wmi_handle->ops->send_probe_rsp_tmpl_send_cmd(wmi_handle, + vdev_id, probe_rsp_info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_setup_install_key_cmd(wmi_unified_t wmi_handle, + struct set_key_params *key_params) +{ + if (wmi_handle->ops->send_setup_install_key_cmd) + return wmi_handle->ops->send_setup_install_key_cmd(wmi_handle, + key_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_p2p_go_set_beacon_ie_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint8_t *p2p_ie) +{ + if (wmi_handle->ops->send_p2p_go_set_beacon_ie_cmd) + return wmi_handle->ops->send_p2p_go_set_beacon_ie_cmd(wmi_handle, + vdev_id, p2p_ie); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_scan_probe_setoui_cmd(wmi_unified_t wmi_handle, + struct scan_mac_oui *psetoui) +{ + if (wmi_handle->ops->send_scan_probe_setoui_cmd) + return wmi_handle->ops->send_scan_probe_setoui_cmd(wmi_handle, + psetoui); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef IPA_OFFLOAD +QDF_STATUS +wmi_unified_ipa_offload_control_cmd( + wmi_unified_t wmi_handle, + struct ipa_uc_offload_control_params *ipa_offload) +{ + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + if (wmi_handle->ops->send_ipa_offload_control_cmd) + return wmi_handle->ops->send_ipa_offload_control_cmd(wmi_handle, + ipa_offload); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_pno_stop_cmd(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + if (wmi_handle->ops->send_pno_stop_cmd) + return wmi_handle->ops->send_pno_stop_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_SCAN_PNO +QDF_STATUS wmi_unified_pno_start_cmd(wmi_unified_t wmi_handle, + struct pno_scan_req_params *pno) +{ + if (wmi_handle->ops->send_pno_start_cmd) + return wmi_handle->ops->send_pno_start_cmd(wmi_handle, + pno); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_nlo_mawc_cmd(wmi_unified_t wmi_handle, + struct nlo_mawc_params *params) +{ + if (wmi_handle->ops->send_nlo_mawc_cmd) + return wmi_handle->ops->send_nlo_mawc_cmd(wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS +QDF_STATUS wmi_unified_process_ll_stats_clear_cmd(wmi_unified_t wmi_handle, + const struct ll_stats_clear_params *clear_req) +{ + if (wmi_handle->ops->send_process_ll_stats_clear_cmd) + return wmi_handle->ops->send_process_ll_stats_clear_cmd(wmi_handle, + clear_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_ll_stats_set_cmd(wmi_unified_t wmi_handle, + const struct ll_stats_set_params *set_req) +{ + if (wmi_handle->ops->send_process_ll_stats_set_cmd) + return wmi_handle->ops->send_process_ll_stats_set_cmd(wmi_handle, + set_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_ll_stats_get_cmd(wmi_unified_t wmi_handle, + const struct ll_stats_get_params *get_req) +{ + if (wmi_handle->ops->send_process_ll_stats_get_cmd) + return wmi_handle->ops->send_process_ll_stats_get_cmd(wmi_handle, + get_req); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + +QDF_STATUS wmi_unified_congestion_request_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_congestion_cmd) + return wmi_handle->ops->send_congestion_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_snr_request_cmd(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_snr_request_cmd) + return wmi_handle->ops->send_snr_request_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_snr_cmd(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + if (wmi_handle->ops->send_snr_cmd) + return wmi_handle->ops->send_snr_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_link_status_req_cmd() - process link status request from UMAC + * @wmi_handle: wmi handle + * @params: get link status params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_link_status_req_cmd(wmi_unified_t wmi_handle, + struct link_status_params *params) +{ + if (wmi_handle->ops->send_link_status_req_cmd) + return wmi_handle->ops->send_link_status_req_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS +wmi_unified_egap_conf_params_cmd(wmi_unified_t wmi_handle, + struct wlan_green_ap_egap_params *egap_params) +{ + if (wmi_handle->ops->send_egap_conf_params_cmd) + return wmi_handle->ops->send_egap_conf_params_cmd(wmi_handle, + egap_params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_csa_offload_enable(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_csa_offload_enable_cmd) + return wmi_handle->ops->send_csa_offload_enable_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_CIF_CFR +QDF_STATUS +wmi_unified_oem_dma_ring_cfg(wmi_unified_t wmi_handle, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg) +{ + if (wmi_handle->ops->send_oem_dma_cfg_cmd) + return wmi_handle->ops->send_oem_dma_cfg_cmd(wmi_handle, cfg); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_start_oem_data_cmd(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data) +{ + if (wmi_handle->ops->send_start_oem_data_cmd) + return wmi_handle->ops->send_start_oem_data_cmd(wmi_handle, + data_len, + data); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_OEM_DATA +QDF_STATUS wmi_unified_start_oemv2_data_cmd(wmi_unified_t wmi_handle, + struct oem_data *params) +{ + if (wmi_handle->ops->send_start_oemv2_data_cmd) + return wmi_handle->ops->send_start_oemv2_data_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS +wmi_unified_dfs_phyerr_filter_offload_en_cmd(wmi_unified_t wmi_handle, + bool dfs_phyerr_filter_offload) +{ + if (wmi_handle->ops->send_dfs_phyerr_filter_offload_en_cmd) + return wmi_handle->ops->send_dfs_phyerr_filter_offload_en_cmd(wmi_handle, + dfs_phyerr_filter_offload); + + return QDF_STATUS_E_FAILURE; +} + +#if !defined(REMOVE_PKT_LOG) && defined(FEATURE_PKTLOG) +QDF_STATUS wmi_unified_pktlog_wmi_send_cmd(wmi_unified_t wmi_handle, + WMI_PKTLOG_EVENT pktlog_event, + uint32_t cmd_id, + uint8_t user_triggered) +{ + if (wmi_handle->ops->send_pktlog_wmi_send_cmd) + return wmi_handle->ops->send_pktlog_wmi_send_cmd(wmi_handle, + pktlog_event, cmd_id, user_triggered); + + return QDF_STATUS_E_FAILURE; +} +#endif /* !REMOVE_PKT_LOG && FEATURE_PKTLOG */ + +QDF_STATUS wmi_unified_stats_ext_req_cmd(wmi_unified_t wmi_handle, + struct stats_ext_params *preq) +{ + if (wmi_handle->ops->send_stats_ext_req_cmd) + return wmi_handle->ops->send_stats_ext_req_cmd(wmi_handle, + preq); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_dhcpserver_offload_cmd( + wmi_unified_t wmi_handle, + struct dhcp_offload_info_params *params) +{ + if (wmi_handle->ops->send_process_dhcpserver_offload_cmd) + return wmi_handle->ops->send_process_dhcpserver_offload_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_regdomain_info_to_fw_cmd(wmi_unified_t wmi_handle, + uint32_t reg_dmn, + uint16_t regdmn2G, + uint16_t regdmn5G, + uint8_t ctl2G, + uint8_t ctl5G) +{ + if (wmi_handle->ops->send_regdomain_info_to_fw_cmd) + return wmi_handle->ops->send_regdomain_info_to_fw_cmd(wmi_handle, + reg_dmn, regdmn2G, + regdmn5G, ctl2G, + ctl5G); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_cfg_action_frm_tb_ppdu_cmd( + wmi_unified_t wmi_handle, + struct cfg_action_frm_tb_ppdu_param *cfg_info) +{ + if (wmi_handle->ops->send_cfg_action_frm_tb_ppdu_cmd) + return wmi_handle->ops->send_cfg_action_frm_tb_ppdu_cmd( + wmi_handle, cfg_info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_save_fw_version_cmd(wmi_unified_t wmi_handle, + void *evt_buf) +{ + if (wmi_handle->ops->save_fw_version_cmd) + return wmi_handle->ops->save_fw_version_cmd(wmi_handle, + evt_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_log_supported_evt_cmd(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t len) +{ + if (wmi_handle->ops->send_log_supported_evt_cmd) + return wmi_handle->ops->send_log_supported_evt_cmd(wmi_handle, + event, len); + + return QDF_STATUS_E_FAILURE; +} + +void wmi_send_time_stamp_sync_cmd_tlv(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_time_stamp_sync_cmd) + wmi_handle->ops->send_time_stamp_sync_cmd(wmi_handle); + +} + +QDF_STATUS +wmi_unified_enable_specific_fw_logs_cmd(wmi_unified_t wmi_handle, + struct wmi_wifi_start_log *start_log) +{ + if (wmi_handle->ops->send_enable_specific_fw_logs_cmd) + return wmi_handle->ops->send_enable_specific_fw_logs_cmd(wmi_handle, + start_log); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_flush_logs_to_fw_cmd(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_flush_logs_to_fw_cmd) + return wmi_handle->ops->send_flush_logs_to_fw_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_fw_test_cmd(wmi_unified_t wmi_handle, + struct set_fwtest_params *wmi_fwtest) +{ + if (wmi_handle->ops->send_fw_test_cmd) + return wmi_handle->ops->send_fw_test_cmd(wmi_handle, + wmi_fwtest); + + return QDF_STATUS_E_FAILURE; + +} + +QDF_STATUS wmi_unified_unit_test_cmd(wmi_unified_t wmi_handle, + struct wmi_unit_test_cmd *wmi_utest) +{ + if (wmi_handle->ops->send_unit_test_cmd) + return wmi_handle->ops->send_unit_test_cmd(wmi_handle, + wmi_utest); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_APF +QDF_STATUS +wmi_unified_set_active_apf_mode_cmd(wmi_unified_t wmi, uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode) +{ + if (wmi->ops->send_set_active_apf_mode_cmd) + return wmi->ops->send_set_active_apf_mode_cmd(wmi, vdev_id, + ucast_mode, + mcast_bcast_mode); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_apf_enable_cmd(wmi_unified_t wmi, + uint32_t vdev_id, bool enable) +{ + if (wmi->ops->send_apf_enable_cmd) + return wmi->ops->send_apf_enable_cmd(wmi, vdev_id, enable); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_apf_write_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_write_memory_params + *write_params) +{ + if (wmi->ops->send_apf_write_work_memory_cmd) + return wmi->ops->send_apf_write_work_memory_cmd(wmi, + write_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_apf_read_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_read_memory_params + *read_params) +{ + if (wmi->ops->send_apf_read_work_memory_cmd) + return wmi->ops->send_apf_read_work_memory_cmd(wmi, + read_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_apf_read_memory_resp_event(wmi_unified_t wmi, void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *read_mem_evt) +{ + if (wmi->ops->extract_apf_read_memory_resp_event) + return wmi->ops->extract_apf_read_memory_resp_event(wmi, + evt_buf, + read_mem_evt); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_APF */ + +QDF_STATUS +wmi_unified_pdev_fips_cmd_send(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + if (wmi_handle->ops->send_pdev_fips_cmd) + return wmi_handle->ops->send_pdev_fips_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_DISA +QDF_STATUS +wmi_unified_encrypt_decrypt_send_cmd(void *wmi_hdl, + struct disa_encrypt_decrypt_req_params + *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_encrypt_decrypt_send_cmd) + return wmi_handle->ops->send_encrypt_decrypt_send_cmd(wmi_handle + , params); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_DISA */ + +QDF_STATUS +wmi_unified_wlan_profile_enable_cmd_send(wmi_unified_t wmi_handle, + struct wlan_profile_params *param) +{ + if (wmi_handle->ops->send_wlan_profile_enable_cmd) + return wmi_handle->ops->send_wlan_profile_enable_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_wlan_profile_trigger_cmd_send(wmi_unified_t wmi_handle, + struct wlan_profile_params *param) +{ + if (wmi_handle->ops->send_wlan_profile_trigger_cmd) + return wmi_handle->ops->send_wlan_profile_trigger_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_chan_cmd_send(wmi_unified_t wmi_handle, + struct channel_param *param) +{ + if (wmi_handle->ops->send_pdev_set_chan_cmd) + return wmi_handle->ops->send_pdev_set_chan_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_ratepwr_table_cmd_send(wmi_unified_t wmi_handle, + struct ratepwr_table_params *param) +{ + if (wmi_handle->ops->send_set_ratepwr_table_cmd) + return wmi_handle->ops->send_set_ratepwr_table_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_ratepwr_table_cmd_send(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_get_ratepwr_table_cmd) + return wmi_handle->ops->send_get_ratepwr_table_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_ratepwr_chainmsk_cmd_send( + wmi_unified_t wmi_handle, + struct ratepwr_chainmsk_params *param) +{ + if (wmi_handle->ops->send_set_ratepwr_chainmsk_cmd) + return wmi_handle->ops->send_set_ratepwr_chainmsk_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_macaddr_cmd_send(wmi_unified_t wmi_handle, + struct macaddr_params *param) +{ + if (wmi_handle->ops->send_set_macaddr_cmd) + return wmi_handle->ops->send_set_macaddr_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_pdev_scan_start_cmd_send(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_pdev_scan_start_cmd) + return wmi_handle->ops->send_pdev_scan_start_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_pdev_scan_end_cmd_send(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_pdev_scan_end_cmd) + return wmi_handle->ops->send_pdev_scan_end_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_acparams_cmd_send(wmi_unified_t wmi_handle, + struct acparams_params *param) +{ + if (wmi_handle->ops->send_set_acparams_cmd) + return wmi_handle->ops->send_set_acparams_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_vap_dscp_tid_map_cmd_send( + wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param) +{ + if (wmi_handle->ops->send_set_vap_dscp_tid_map_cmd) + return wmi_handle->ops->send_set_vap_dscp_tid_map_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_proxy_ast_reserve_cmd_send(wmi_unified_t wmi_handle, + struct proxy_ast_reserve_params *param) +{ + if (wmi_handle->ops->send_proxy_ast_reserve_cmd) + return wmi_handle->ops->send_proxy_ast_reserve_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_bridge_mac_addr_cmd_send( + wmi_unified_t wmi_handle, + struct set_bridge_mac_addr_params *param) +{ + if (wmi_handle->ops->send_set_bridge_mac_addr_cmd) + return wmi_handle->ops->send_set_bridge_mac_addr_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_phyerr_enable_cmd_send(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_phyerr_enable_cmd) + return wmi_handle->ops->send_phyerr_enable_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_phyerr_disable_cmd_send(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_phyerr_disable_cmd) + return wmi_handle->ops->send_phyerr_disable_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_smart_ant_enable_tx_feedback_cmd_send( + wmi_unified_t wmi_handle, + struct smart_ant_enable_tx_feedback_params *param) +{ + if (wmi_handle->ops->send_smart_ant_enable_tx_feedback_cmd) + return wmi_handle->ops->send_smart_ant_enable_tx_feedback_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_smart_ant_enable_tx_feedback_cmd_send); + +QDF_STATUS +wmi_unified_vdev_spectral_configure_cmd_send( + wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param) +{ + if (wmi_handle->ops->send_vdev_spectral_configure_cmd) + return wmi_handle->ops->send_vdev_spectral_configure_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_spectral_enable_cmd_send( + wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param) +{ + if (wmi_handle->ops->send_vdev_spectral_enable_cmd) + return wmi_handle->ops->send_vdev_spectral_enable_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_bss_chan_info_request_cmd_send( + wmi_unified_t wmi_handle, + struct bss_chan_info_request_params *param) +{ + if (wmi_handle->ops->send_bss_chan_info_request_cmd) + return wmi_handle->ops->send_bss_chan_info_request_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_thermal_mitigation_param_cmd_send( + wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param) +{ + if (wmi_handle->ops->send_thermal_mitigation_param_cmd) + return wmi_handle->ops->send_thermal_mitigation_param_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_set_fwtest_param_cmd_send(wmi_unified_t wmi_handle, + struct set_fwtest_params *param) +{ + if (wmi_handle->ops->send_vdev_set_fwtest_param_cmd) + return wmi_handle->ops->send_vdev_set_fwtest_param_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_set_custom_aggr_size_cmd_send( + wmi_unified_t wmi_handle, + struct set_custom_aggr_size_params *param) +{ + if (wmi_handle->ops->send_vdev_set_custom_aggr_size_cmd) + return wmi_handle->ops->send_vdev_set_custom_aggr_size_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_set_qdepth_thresh_cmd_send( + wmi_unified_t wmi_handle, + struct set_qdepth_thresh_params *param) +{ + if (wmi_handle->ops->send_vdev_set_qdepth_thresh_cmd) + return wmi_handle->ops->send_vdev_set_qdepth_thresh_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_pdev_set_regdomain_cmd_send( + wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param) +{ + if (wmi_handle->ops->send_pdev_set_regdomain_cmd) + return wmi_handle->ops->send_pdev_set_regdomain_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_beacon_filter_cmd_send( + wmi_unified_t wmi_handle, + struct set_beacon_filter_params *param) +{ + if (wmi_handle->ops->send_set_beacon_filter_cmd) + return wmi_handle->ops->send_set_beacon_filter_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_remove_beacon_filter_cmd_send( + wmi_unified_t wmi_handle, + struct remove_beacon_filter_params *param) +{ + if (wmi_handle->ops->send_remove_beacon_filter_cmd) + return wmi_handle->ops->send_remove_beacon_filter_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_get_pn_send_cmd() - send command to get PN for peer + * @wmi_hdl: wmi handle + * @wmi_peer_tx_pn_request_cmd_fixed_param: pn request params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_pn_send_cmd(wmi_unified_t wmi_hdl, + struct peer_request_pn_param *pn_params) +{ + if (wmi_hdl->ops->send_pdev_get_pn_cmd) + return wmi_hdl->ops->send_pdev_get_pn_cmd(wmi_hdl, + pn_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_mgmt_cmd_send() - WMI mgmt cmd function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold mgmt parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +#if 0 +QDF_STATUS wmi_unified_mgmt_cmd_send(void *wmi_hdl, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct mgmt_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_mgmt_cmd) + return wmi_handle->ops->send_mgmt_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_addba_clearresponse_cmd_send( + wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_clearresponse_params *param) +{ + if (wmi_handle->ops->send_addba_clearresponse_cmd) + return wmi_handle->ops->send_addba_clearresponse_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_addba_send_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_send_params *param) +{ + if (wmi_handle->ops->send_addba_send_cmd) + return wmi_handle->ops->send_addba_send_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_delba_send_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct delba_send_params *param) +{ + if (wmi_handle->ops->send_delba_send_cmd) + return wmi_handle->ops->send_delba_send_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_addba_setresponse_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_setresponse_params *param) +{ + if (wmi_handle->ops->send_addba_setresponse_cmd) + return wmi_handle->ops->send_addba_setresponse_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_singleamsdu_cmd_send(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct singleamsdu_params *param) +{ + if (wmi_handle->ops->send_singleamsdu_cmd) + return wmi_handle->ops->send_singleamsdu_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_mu_scan_cmd_send(wmi_unified_t wmi_handle, + struct mu_scan_params *param) +{ + if (wmi_handle->ops->send_mu_scan_cmd) + return wmi_handle->ops->send_mu_scan_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_lteu_config_cmd_send(wmi_unified_t wmi_handle, + struct lteu_config_params *param) +{ + if (wmi_handle->ops->send_lteu_config_cmd) + return wmi_handle->ops->send_lteu_config_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_psmode_cmd_send(wmi_unified_t wmi_handle, + struct set_ps_mode_params *param) +{ + if (wmi_handle->ops->send_set_ps_mode_cmd) + return wmi_handle->ops->send_set_ps_mode_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_init_cmd_send(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param) +{ + if (wmi_handle->ops->init_cmd_send) + return wmi_handle->ops->init_cmd_send(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_save_service_bitmap(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf) +{ + if (wmi_handle->ops->save_service_bitmap) { + return wmi_handle->ops->save_service_bitmap(wmi_handle, evt_buf, + bitmap_buf); + } + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_save_ext_service_bitmap(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf) +{ + if (wmi_handle->ops->save_ext_service_bitmap) { + return wmi_handle->ops->save_ext_service_bitmap(wmi_handle, + evt_buf, bitmap_buf); + } + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_save_fw_version(wmi_unified_t wmi_handle, void *evt_buf) +{ + if (wmi_handle->ops->save_fw_version) { + wmi_handle->ops->save_fw_version(wmi_handle, evt_buf); + return 0; + } + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_check_and_update_fw_version(wmi_unified_t wmi_handle, void *evt_buf) +{ + if (wmi_handle->ops->check_and_update_fw_version) + return wmi_handle->ops->check_and_update_fw_version(wmi_handle, + evt_buf); + + return QDF_STATUS_E_FAILURE; +} + +bool wmi_service_enabled(wmi_unified_t wmi_handle, uint32_t service_id) +{ + if ((service_id < wmi_services_max) && + (wmi_handle->services[service_id] != WMI_SERVICE_UNAVAILABLE)) { + if (wmi_handle->ops->is_service_enabled) { + return wmi_handle->ops->is_service_enabled(wmi_handle, + wmi_handle->services[service_id]); + } + } else { + wmi_info("Service %d not supported", service_id); + } + + return false; +} + +QDF_STATUS +wmi_get_target_cap_from_service_ready( + wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_psoc_target_capability_info *ev) +{ + if (wmi_handle->ops->get_target_cap_from_service_ready) + return wmi_handle->ops->get_target_cap_from_service_ready( + wmi_handle, evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_fw_version(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_fw_ver *fw_ver) +{ + if (wmi_handle->ops->extract_fw_version) + return wmi_handle->ops->extract_fw_version(wmi_handle, + evt_buf, fw_ver); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_fw_abi_version(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_fw_abi_ver *fw_ver) +{ + if (wmi_handle->ops->extract_fw_abi_version) + return wmi_handle->ops->extract_fw_abi_version(wmi_handle, + evt_buf, fw_ver); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_hal_reg_cap(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_psoc_hal_reg_capability *hal_reg_cap) +{ + if (wmi_handle->ops->extract_hal_reg_cap) + return wmi_handle->ops->extract_hal_reg_cap(wmi_handle, + evt_buf, hal_reg_cap); + + return QDF_STATUS_E_FAILURE; +} + +uint32_t +wmi_extract_num_mem_reqs_from_service_ready( + wmi_unified_t wmi_handle, + void *evt_buf) +{ + if (wmi_handle->ops->extract_num_mem_reqs) + return wmi_handle->ops->extract_num_mem_reqs(wmi_handle, + evt_buf); + + return 0; +} + +QDF_STATUS +wmi_extract_host_mem_req_from_service_ready(wmi_unified_t wmi_handle, + void *evt_buf, + host_mem_req *mem_reqs, + uint32_t num_active_peers, + uint32_t num_peers, + enum wmi_fw_mem_prio fw_prio, + uint16_t idx) +{ + if (wmi_handle->ops->extract_host_mem_req) + return wmi_handle->ops->extract_host_mem_req(wmi_handle, + evt_buf, mem_reqs, num_active_peers, + num_peers, fw_prio, idx); + + return QDF_STATUS_E_FAILURE; +} + +uint32_t wmi_ready_extract_init_status(wmi_unified_t wmi_handle, void *ev) +{ + if (wmi_handle->ops->ready_extract_init_status) + return wmi_handle->ops->ready_extract_init_status(wmi_handle, + ev); + + + return 1; + +} + +QDF_STATUS wmi_ready_extract_mac_addr(wmi_unified_t wmi_handle, + void *ev, uint8_t *macaddr) +{ + if (wmi_handle->ops->ready_extract_mac_addr) + return wmi_handle->ops->ready_extract_mac_addr(wmi_handle, + ev, macaddr); + + + return QDF_STATUS_E_FAILURE; +} + +wmi_host_mac_addr +*wmi_ready_extract_mac_addr_list(wmi_unified_t wmi_handle, void *ev, + uint8_t *num_mac_addr) +{ + if (wmi_handle->ops->ready_extract_mac_addr_list) + return wmi_handle->ops->ready_extract_mac_addr_list(wmi_handle, + ev, num_mac_addr); + + *num_mac_addr = 0; + + return NULL; +} + +QDF_STATUS wmi_extract_ready_event_params( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_ready_ev_param *ev_param) +{ + if (wmi_handle->ops->extract_ready_event_params) + return wmi_handle->ops->extract_ready_event_params(wmi_handle, + evt_buf, ev_param); + + return QDF_STATUS_E_FAILURE; +} + +uint8_t *wmi_extract_dbglog_data_len(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *len) +{ + if (wmi_handle->ops->extract_dbglog_data_len) + return wmi_handle->ops->extract_dbglog_data_len(wmi_handle, + evt_buf, len); + + + return NULL; +} +qdf_export_symbol(wmi_extract_dbglog_data_len); + +QDF_STATUS wmi_send_ext_resource_config(wmi_unified_t wmi_handle, + wmi_host_ext_resource_config *ext_cfg) +{ + if (wmi_handle->ops->send_ext_resource_config) + return wmi_handle->ops->send_ext_resource_config(wmi_handle, + ext_cfg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_rtt_meas_req_test_cmd_send(wmi_unified_t wmi_handle, + struct rtt_meas_req_test_params *param) +{ + if (wmi_handle->ops->send_rtt_meas_req_test_cmd) + return wmi_handle->ops->send_rtt_meas_req_test_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_rtt_meas_req_cmd_send(wmi_unified_t wmi_handle, + struct rtt_meas_req_params *param) +{ + if (wmi_handle->ops->send_rtt_meas_req_cmd) + return wmi_handle->ops->send_rtt_meas_req_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lci_set_cmd_send(wmi_unified_t wmi_handle, + struct lci_set_params *param) +{ + if (wmi_handle->ops->send_lci_set_cmd) + return wmi_handle->ops->send_lci_set_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lcr_set_cmd_send(wmi_unified_t wmi_handle, + struct lcr_set_params *param) +{ + if (wmi_handle->ops->send_lcr_set_cmd) + return wmi_handle->ops->send_lcr_set_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_rtt_keepalive_req_cmd_send(wmi_unified_t wmi_handle, + struct rtt_keepalive_req_params *param) +{ + if (wmi_handle->ops->send_rtt_keepalive_req_cmd) + return wmi_handle->ops->send_rtt_keepalive_req_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_periodic_chan_stats_config_cmd( + wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param) +{ + if (wmi_handle->ops->send_periodic_chan_stats_config_cmd) + return wmi_handle->ops->send_periodic_chan_stats_config_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_send_get_user_position_cmd(wmi_unified_t wmi_handle, uint32_t value) +{ + if (wmi_handle->ops->send_get_user_position_cmd) + return wmi_handle->ops->send_get_user_position_cmd(wmi_handle, + value); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_send_get_peer_mumimo_tx_count_cmd(wmi_unified_t wmi_handle, uint32_t value) +{ + if (wmi_handle->ops->send_get_peer_mumimo_tx_count_cmd) + return wmi_handle->ops->send_get_peer_mumimo_tx_count_cmd( + wmi_handle, value); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_send_reset_peer_mumimo_tx_count_cmd(wmi_unified_t wmi_handle, + uint32_t value) +{ + if (wmi_handle->ops->send_reset_peer_mumimo_tx_count_cmd) + return wmi_handle->ops->send_reset_peer_mumimo_tx_count_cmd( + wmi_handle, value); + + return QDF_STATUS_E_FAILURE; +} + +/* Extract - APIs */ + +QDF_STATUS wmi_extract_ctl_failsafe_check_ev_param( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_pdev_ctl_failsafe_event *param) +{ + if (wmi_handle->ops->extract_ctl_failsafe_check_ev_param) + return wmi_handle->ops->extract_ctl_failsafe_check_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_fips_event_data(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_fips_event_param *param) +{ + if (wmi_handle->ops->extract_fips_event_data) { + return wmi_handle->ops->extract_fips_event_data(wmi_handle, + evt_buf, param); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_extract_pn() - extract pn event data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: pointer to get pn event param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extract_pn(wmi_unified_t wmi_hdl, void *evt_buf, + struct wmi_host_get_pn_event *param) +{ + if (wmi_hdl->ops->extract_get_pn_data) + return wmi_hdl->ops->extract_get_pn_data(wmi_hdl, + evt_buf, param); + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_DISA +QDF_STATUS +wmi_extract_encrypt_decrypt_resp_params(void *wmi_hdl, void *evt_buf, + struct disa_encrypt_decrypt_resp_params + *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_encrypt_decrypt_resp_event) + return + wmi_handle->ops->extract_encrypt_decrypt_resp_event(wmi_handle, + evt_buf, + param); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_DISA */ + +QDF_STATUS +wmi_extract_mgmt_rx_params(wmi_unified_t wmi_handle, void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp) +{ + if (wmi_handle->ops->extract_mgmt_rx_params) + return wmi_handle->ops->extract_mgmt_rx_params(wmi_handle, + evt_buf, hdr, bufp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_roam_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_roam_event *param) +{ + if (wmi_handle->ops->extract_vdev_roam_param) + return wmi_handle->ops->extract_vdev_roam_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_scan_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct scan_event *param) +{ + if (wmi_handle->ops->extract_vdev_scan_ev_param) + return wmi_handle->ops->extract_vdev_scan_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_SCAN_PNO +QDF_STATUS +wmi_extract_nlo_match_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct scan_event *param) +{ + if (wmi_handle->ops->extract_nlo_match_ev_param) + return wmi_handle->ops->extract_nlo_match_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_nlo_complete_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct scan_event *param) +{ + if (wmi_handle->ops->extract_nlo_complete_ev_param) + return wmi_handle->ops->extract_nlo_complete_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS +wmi_extract_mu_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_mu_report_event *param) +{ + if (wmi_handle->ops->extract_mu_ev_param) + return wmi_handle->ops->extract_mu_ev_param(wmi_handle, evt_buf, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_mu_db_entry(wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, wmi_host_mu_db_entry *param) +{ + if (wmi_handle->ops->extract_mu_db_entry) + return wmi_handle->ops->extract_mu_db_entry(wmi_handle, evt_buf, + idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_mumimo_tx_count_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_peer_txmu_cnt_event *param) +{ + if (wmi_handle->ops->extract_mumimo_tx_count_ev_param) + return wmi_handle->ops->extract_mumimo_tx_count_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_peer_gid_userpos_list_ev_param( + wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_peer_gid_userpos_list_event *param) +{ + if (wmi_handle->ops->extract_peer_gid_userpos_list_ev_param) + return wmi_handle->ops->extract_peer_gid_userpos_list_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_esp_estimate_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct esp_estimation_event *param) +{ + if (wmi_handle->ops->extract_esp_estimation_ev_param) + return wmi_handle->ops->extract_esp_estimation_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_gpio_input_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *gpio_num) +{ + if (wmi_handle->ops->extract_gpio_input_ev_param) + return wmi_handle->ops->extract_gpio_input_ev_param(wmi_handle, + evt_buf, gpio_num); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_pdev_reserve_ast_ev_param( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_proxy_ast_reserve_param *param) +{ + if (wmi_handle->ops->extract_pdev_reserve_ast_ev_param) + return wmi_handle->ops->extract_pdev_reserve_ast_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_pdev_generic_buffer_ev_param( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_generic_buffer_event *param) +{ + if (wmi_handle->ops->extract_pdev_generic_buffer_ev_param) + return wmi_handle->ops->extract_pdev_generic_buffer_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; + +} + +QDF_STATUS wmi_extract_peer_ratecode_list_ev( + wmi_unified_t wmi_handle, void *evt_buf, + uint8_t *peer_mac, uint32_t *pdev_id, wmi_sa_rate_cap *rate_cap) +{ + if (wmi_handle->ops->extract_peer_ratecode_list_ev) + return wmi_handle->ops->extract_peer_ratecode_list_ev( + wmi_handle, evt_buf, + peer_mac, pdev_id, rate_cap); + + return QDF_STATUS_E_FAILURE; + +} + +QDF_STATUS +wmi_extract_comb_phyerr(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + if (wmi_handle->ops->extract_comb_phyerr) + return wmi_handle->ops->extract_comb_phyerr(wmi_handle, + evt_buf, datalen, buf_offset, phyerr); + + return QDF_STATUS_E_FAILURE; + +} + +QDF_STATUS +wmi_extract_single_phyerr(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + if (wmi_handle->ops->extract_single_phyerr) + return wmi_handle->ops->extract_single_phyerr(wmi_handle, + evt_buf, datalen, buf_offset, phyerr); + + return QDF_STATUS_E_FAILURE; + +} + +QDF_STATUS +wmi_extract_composite_phyerr(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr) +{ + if (wmi_handle->ops->extract_composite_phyerr) + return wmi_handle->ops->extract_composite_phyerr(wmi_handle, + evt_buf, datalen, phyerr); + + return QDF_STATUS_E_FAILURE; + +} + +QDF_STATUS +wmi_extract_stats_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_stats_event *stats_param) +{ + if (wmi_handle->ops->extract_all_stats_count) + return wmi_handle->ops->extract_all_stats_count(wmi_handle, + evt_buf, stats_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_pdev_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_pdev_stats *pdev_stats) +{ + if (wmi_handle->ops->extract_pdev_stats) + return wmi_handle->ops->extract_pdev_stats(wmi_handle, + evt_buf, index, pdev_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_unit_test(wmi_unified_t wmi_handle, void *evt_buf, + wmi_unit_test_event *unit_test, uint32_t maxspace) +{ + if (wmi_handle->ops->extract_unit_test) + return wmi_handle->ops->extract_unit_test(wmi_handle, + evt_buf, unit_test, maxspace); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_pdev_ext_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + wmi_host_pdev_ext_stats *pdev_ext_stats) +{ + if (wmi_handle->ops->extract_pdev_ext_stats) + return wmi_handle->ops->extract_pdev_ext_stats(wmi_handle, + evt_buf, index, pdev_ext_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_peer_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_peer_stats *peer_stats) +{ + if (wmi_handle->ops->extract_peer_stats) + return wmi_handle->ops->extract_peer_stats(wmi_handle, + evt_buf, index, peer_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_vdev_stats *vdev_stats) +{ + if (wmi_handle->ops->extract_vdev_stats) + return wmi_handle->ops->extract_vdev_stats(wmi_handle, + evt_buf, index, vdev_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_per_chain_rssi_stats( + wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + struct wmi_host_per_chain_rssi_stats *rssi_stats) +{ + if (wmi_handle->ops->extract_per_chain_rssi_stats) + return wmi_handle->ops->extract_per_chain_rssi_stats(wmi_handle, + evt_buf, index, rssi_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_rtt_hdr(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_event_hdr *ev) +{ + if (wmi_handle->ops->extract_rtt_hdr) + return wmi_handle->ops->extract_rtt_hdr(wmi_handle, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_bcnflt_stats( + wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats) +{ + if (wmi_handle->ops->extract_bcnflt_stats) + return wmi_handle->ops->extract_bcnflt_stats(wmi_handle, + evt_buf, index, bcnflt_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_rtt_ev(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_meas_event *ev, + uint8_t *hdump, uint16_t hdump_len) +{ + if (wmi_handle->ops->extract_rtt_ev) + return wmi_handle->ops->extract_rtt_ev(wmi_handle, + evt_buf, ev, hdump, hdump_len); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_peer_extd_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + wmi_host_peer_extd_stats *peer_extd_stats) +{ + if (wmi_handle->ops->extract_peer_extd_stats) + return wmi_handle->ops->extract_peer_extd_stats(wmi_handle, + evt_buf, index, peer_extd_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_peer_retry_stats( + wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, struct wmi_host_peer_retry_stats *peer_retry_stats) +{ + if (wmi_handle->ops->extract_peer_retry_stats) + return wmi_handle->ops->extract_peer_retry_stats(wmi_handle, + evt_buf, index, peer_retry_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_peer_adv_stats( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_peer_adv_stats *peer_adv_stats) +{ + if (wmi_handle->ops->extract_peer_adv_stats) + return wmi_handle->ops->extract_peer_adv_stats(wmi_handle, + evt_buf, peer_adv_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_rtt_error_report_ev(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_error_report_event *ev) +{ + if (wmi_handle->ops->extract_rtt_error_report_ev) + return wmi_handle->ops->extract_rtt_error_report_ev(wmi_handle, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_chan_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats) +{ + if (wmi_handle->ops->extract_chan_stats) + return wmi_handle->ops->extract_chan_stats(wmi_handle, + evt_buf, index, chan_stats); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_MIB_STATS +QDF_STATUS wmi_extract_mib_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct mib_stats_metrics *mib_stats) +{ + if (wmi_handle->ops->extract_mib_stats) + return wmi_handle->ops->extract_mib_stats(wmi_handle, + evt_buf, + mib_stats); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_extract_thermal_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *temp, uint32_t *level, + uint32_t *pdev_id) +{ + if (wmi_handle->ops->extract_thermal_stats) + return wmi_handle->ops->extract_thermal_stats(wmi_handle, + evt_buf, temp, level, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_profile_ctx(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx) +{ + if (wmi_handle->ops->extract_profile_ctx) + return wmi_handle->ops->extract_profile_ctx(wmi_handle, + evt_buf, profile_ctx); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_thermal_level_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, uint32_t *levelcount, + uint32_t *dccount) +{ + if (wmi_handle->ops->extract_thermal_level_stats) + return wmi_handle->ops->extract_thermal_level_stats(wmi_handle, + evt_buf, idx, levelcount, dccount); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_profile_data(wmi_unified_t wmi_handle, void *evt_buf, uint8_t idx, + wmi_host_wlan_profile_t *profile_data) +{ + if (wmi_handle->ops->extract_profile_data) + return wmi_handle->ops->extract_profile_data(wmi_handle, + evt_buf, idx, profile_data); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_bss_chan_info_event( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_bss_chan_info_event *bss_chan_info) +{ + if (wmi_handle->ops->extract_bss_chan_info_event) + return wmi_handle->ops->extract_bss_chan_info_event(wmi_handle, + evt_buf, bss_chan_info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_tx_data_traffic_ctrl_ev(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_tx_data_traffic_ctrl_event *ev) +{ + if (wmi_handle->ops->extract_tx_data_traffic_ctrl_ev) + return wmi_handle->ops->extract_tx_data_traffic_ctrl_ev( + wmi_handle, evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_extd_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, + wmi_host_vdev_extd_stats *vdev_extd_stats) +{ + if (wmi_handle->ops->extract_vdev_extd_stats) + return wmi_handle->ops->extract_vdev_extd_stats(wmi_handle, + evt_buf, index, vdev_extd_stats); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_bcn_stats(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcn_stats *vdev_bcn_stats) +{ + if (wmi_handle->ops->extract_bcn_stats) + return wmi_handle->ops->extract_bcn_stats(wmi_handle, + evt_buf, index, vdev_bcn_stats); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_vdev_nac_rssi_stats( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats) +{ + if (wmi_handle->ops->extract_vdev_nac_rssi_stats) + return wmi_handle->ops->extract_vdev_nac_rssi_stats(wmi_handle, + evt_buf, vdev_nac_rssi_stats); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_power_dbg_cmd(wmi_unified_t wmi_handle, + struct wmi_power_dbg_params *param) +{ + if (wmi_handle->ops->send_power_dbg_cmd) + return wmi_handle->ops->send_power_dbg_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_btcoex_wlan_priority_cmd(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param) +{ + if (wmi_handle->ops->send_btcoex_wlan_priority_cmd) + return wmi_handle->ops->send_btcoex_wlan_priority_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_btcoex_duty_cycle_cmd(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param) +{ + if (wmi_handle->ops->send_btcoex_duty_cycle_cmd) + return wmi_handle->ops->send_btcoex_duty_cycle_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_service_ready_ext( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *param) +{ + if (wmi_handle->ops->extract_service_ready_ext) + return wmi_handle->ops->extract_service_ready_ext(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_service_ready_ext2( + struct wmi_unified *wmi_handle, uint8_t *evt_buf, + struct wlan_psoc_host_service_ext2_param *param) +{ + if (wmi_handle->ops->extract_service_ready_ext2) + return wmi_handle->ops->extract_service_ready_ext2(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_sar_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *ext_param) +{ + if (wmi_handle->ops->extract_sar_cap_service_ready_ext) + return wmi_handle->ops->extract_sar_cap_service_ready_ext( + wmi_handle, + evt_buf, ext_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_hw_mode_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param) +{ + if (wmi_handle->ops->extract_hw_mode_cap_service_ready_ext) + return wmi_handle->ops->extract_hw_mode_cap_service_ready_ext( + wmi_handle, + evt_buf, hw_mode_idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_mac_phy_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint8_t hw_mode_id, + uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param) +{ + if (wmi_handle->ops->extract_mac_phy_cap_service_ready_ext) + return wmi_handle->ops->extract_mac_phy_cap_service_ready_ext( + wmi_handle, + evt_buf, hw_mode_id, phy_id, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_reg_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param) +{ + if (wmi_handle->ops->extract_reg_cap_service_ready_ext) + return wmi_handle->ops->extract_reg_cap_service_ready_ext( + wmi_handle, + evt_buf, phy_idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_ring_cap_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param) +{ + if (wmi_handle->ops->extract_dbr_ring_cap_service_ready_ext) + return wmi_handle->ops->extract_dbr_ring_cap_service_ready_ext( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_ring_cap_service_ready_ext2( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param) +{ + if (wmi_handle->ops->extract_dbr_ring_cap_service_ready_ext2) + return wmi_handle->ops->extract_dbr_ring_cap_service_ready_ext2( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_spectral_scaling_params_service_ready_ext( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_spectral_scaling_params *param) +{ + if (wmi_handle->ops->extract_scaling_params_service_ready_ext) + return wmi_handle->ops->extract_scaling_params_service_ready_ext + (wmi_handle, evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_pdev_utf_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *param) +{ + if (wmi_handle->ops->extract_pdev_utf_event) + return wmi_handle->ops->extract_pdev_utf_event( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_pdev_qvit_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *param) +{ + if (wmi_handle->ops->extract_pdev_qvit_event) + return wmi_handle->ops->extract_pdev_qvit_event( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_coex_ver_cfg_cmd(wmi_unified_t wmi_handle, + coex_ver_cfg_t *param) +{ + if (wmi_handle->ops->send_coex_ver_cfg_cmd) + return wmi_handle->ops->send_coex_ver_cfg_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_coex_config_cmd(wmi_unified_t wmi_handle, + struct coex_config_params *param) +{ + if (wmi_handle->ops->send_coex_config_cmd) + return wmi_handle->ops->send_coex_config_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_request_get_rcpi_cmd(wmi_unified_t wmi_handle, + struct rcpi_req *get_rcpi_param) +{ + if (wmi_handle->ops->send_get_rcpi_cmd) + return wmi_handle->ops->send_get_rcpi_cmd(wmi_handle, + get_rcpi_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_rcpi_response_event(wmi_unified_t wmi_handle, void *evt_buf, + struct rcpi_res *res) +{ + struct wmi_ops *ops = wmi_handle->ops; + + if (ops->extract_rcpi_response_event) + return ops->extract_rcpi_response_event(wmi_handle, evt_buf, + res); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_dfs_phyerr_offload_en_cmd(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + if (wmi_handle->ops->send_dfs_phyerr_offload_en_cmd) + return wmi_handle->ops->send_dfs_phyerr_offload_en_cmd( + wmi_handle, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef QCA_SUPPORT_AGILE_DFS +QDF_STATUS +wmi_unified_send_vdev_adfs_ch_cfg_cmd(wmi_unified_t wmi_handle, + struct vdev_adfs_ch_cfg_params *param) +{ + if (wmi_handle->ops->send_adfs_ch_cfg_cmd) + return wmi_handle->ops->send_adfs_ch_cfg_cmd( + wmi_handle, + param); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_vdev_adfs_ocac_abort_cmd(wmi_unified_t wmi_handle, + struct vdev_adfs_abort_params *param) +{ + if (wmi_handle->ops->send_adfs_ocac_abort_cmd) + return wmi_handle->ops->send_adfs_ocac_abort_cmd( + wmi_handle, + param); + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS +wmi_unified_dfs_phyerr_offload_dis_cmd(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + if (wmi_handle->ops->send_dfs_phyerr_offload_dis_cmd) + return wmi_handle->ops->send_dfs_phyerr_offload_dis_cmd( + wmi_handle, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_RF_CHARACTERIZATION +QDF_STATUS wmi_extract_num_rf_characterization_entries(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + uint32_t *num_rf_characterization_entries) +{ + if (wmi_hdl->ops->extract_num_rf_characterization_entries) + return wmi_hdl->ops->extract_num_rf_characterization_entries(wmi_hdl, + evt_buf, num_rf_characterization_entries); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_rf_characterization_entries(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + uint32_t num_rf_characterization_entries, + struct wmi_host_rf_characterization_event_param *rf_characterization_entries) +{ + if (wmi_hdl->ops->extract_rf_characterization_entries) + return wmi_hdl->ops->extract_rf_characterization_entries(wmi_hdl, + evt_buf, num_rf_characterization_entries, + rf_characterization_entries); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_extract_chainmask_tables( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wlan_psoc_host_chainmask_table *chainmask_table) +{ + if (wmi_handle->ops->extract_chainmask_tables) + return wmi_handle->ops->extract_chainmask_tables(wmi_handle, + evt_buf, chainmask_table); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_country_cmd_send(wmi_unified_t wmi_handle, + struct set_country *param) +{ + if (wmi_handle->ops->send_set_country_cmd) + return wmi_handle->ops->send_set_country_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_ACTION_OUI +QDF_STATUS +wmi_unified_send_action_oui_cmd(wmi_unified_t wmi_handle, + struct action_oui_request *req) +{ + if (wmi_handle->ops->send_action_oui_cmd) + return wmi_handle->ops->send_action_oui_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_send_dump_wds_table_cmd(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_wds_entry_list_cmd) + return wmi_handle->ops->send_wds_entry_list_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_wds_entry(wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wdsentry *wds_entry, + u_int32_t idx) +{ + if (wmi_handle->ops->extract_wds_entry) + return wmi_handle->ops->extract_wds_entry(wmi_handle, + evt_buf, wds_entry, idx); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_wds_entry); + +QDF_STATUS wmi_unified_send_obss_detection_cfg_cmd( + wmi_unified_t wmi_handle, + struct wmi_obss_detection_cfg_param *obss_cfg_param) +{ + if (wmi_handle->ops->send_obss_detection_cfg_cmd) + return wmi_handle->ops->send_obss_detection_cfg_cmd(wmi_handle, + obss_cfg_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_obss_detection_info( + wmi_unified_t wmi_handle, + uint8_t *data, + struct wmi_obss_detect_info *info) +{ + if (wmi_handle->ops->extract_obss_detection_info) + return wmi_handle->ops->extract_obss_detection_info(data, info); + + return QDF_STATUS_E_FAILURE; +} + +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +QDF_STATUS +wmi_unified_vdev_fils_enable_cmd_send(struct wmi_unified *wmi_handle, + struct config_fils_params *param) +{ + if (wmi_handle->ops->send_vdev_fils_enable_cmd) + return wmi_handle->ops->send_vdev_fils_enable_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS wmi_extract_green_ap_egap_status_info( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params) +{ + if (wmi_handle->ops->extract_green_ap_egap_status_info) + return wmi_handle->ops->extract_green_ap_egap_status_info( + evt_buf, egap_status_info_params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +wmi_host_channel_width wmi_get_ch_width_from_phy_mode( + wmi_unified_t wmi_handle, WMI_HOST_WLAN_PHY_MODE phymode) +{ + /* + * this API does translation between host only strcutres, hence + * does not need separate TLV, non-TLV definitions + */ + + if (phymode >= WMI_HOST_MODE_11A && phymode < WMI_HOST_MODE_MAX) + return mode_to_width[phymode]; + else + return WMI_HOST_CHAN_WIDTH_20; +} + +#ifdef QCA_SUPPORT_CP_STATS +QDF_STATUS wmi_extract_cca_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_congestion_stats *stats) +{ + if (wmi_handle->ops->extract_cca_stats) + return wmi_handle->ops->extract_cca_stats(wmi_handle, evt_buf, + stats); + + return QDF_STATUS_E_FAILURE; +} +#endif /* QCA_SUPPORT_CP_STATS */ + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS +wmi_unified_dfs_send_avg_params_cmd(wmi_unified_t wmi_handle, + struct dfs_radar_found_params *params) +{ + if (wmi_handle->ops->send_dfs_average_radar_params_cmd) + return wmi_handle->ops->send_dfs_average_radar_params_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_dfs_status_from_fw(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *dfs_status_check) +{ + if (wmi_handle->ops->extract_dfs_status_from_fw) + return wmi_handle->ops->extract_dfs_status_from_fw(wmi_handle, + evt_buf, dfs_status_check); + + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef OL_ATH_SMART_LOGGING +QDF_STATUS wmi_unified_send_smart_logging_enable_cmd(wmi_unified_t wmi_handle, + uint32_t param) +{ + if (wmi_handle->ops->send_smart_logging_enable_cmd) + return wmi_handle->ops->send_smart_logging_enable_cmd( + wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_smart_logging_fatal_cmd(wmi_unified_t wmi_handle, + struct wmi_debug_fatal_events *param) +{ + if (wmi_handle->ops->send_smart_logging_fatal_cmd) + return wmi_handle->ops->send_smart_logging_fatal_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_smartlog_ev(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_debug_fatal_events *ev) +{ + if (wmi_handle->ops->extract_smartlog_event) + return wmi_handle->ops->extract_smartlog_event( + wmi_handle, evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(wmi_extract_smartlog_ev); +#endif /* OL_ATH_SMART_LOGGING */ + +QDF_STATUS +wmi_unified_send_roam_scan_stats_cmd(wmi_unified_t wmi_handle, + struct wmi_roam_scan_stats_req *params) +{ + if (wmi_handle->ops->send_roam_scan_stats_cmd) + return wmi_handle->ops->send_roam_scan_stats_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CRYPTO_SET_KEY_CONVERGED +uint8_t wlan_crypto_cipher_to_wmi_cipher( + enum wlan_crypto_cipher_type crypto_cipher) +{ + switch (crypto_cipher) { + case WLAN_CRYPTO_CIPHER_NONE: + return WMI_CIPHER_NONE; + case WLAN_CRYPTO_CIPHER_WEP: + case WLAN_CRYPTO_CIPHER_WEP_40: + case WLAN_CRYPTO_CIPHER_WEP_104: + return WMI_CIPHER_WEP; + case WLAN_CRYPTO_CIPHER_TKIP: + return WMI_CIPHER_TKIP; + case WLAN_CRYPTO_CIPHER_WAPI_SMS4: + case WLAN_CRYPTO_CIPHER_WAPI_GCM4: + return WMI_CIPHER_WAPI; + case WLAN_CRYPTO_CIPHER_AES_CCM: + case WLAN_CRYPTO_CIPHER_AES_CCM_256: + return WMI_CIPHER_AES_CCM; + case WLAN_CRYPTO_CIPHER_AES_CMAC: + return WMI_CIPHER_AES_CMAC; + case WLAN_CRYPTO_CIPHER_AES_GMAC: + case WLAN_CRYPTO_CIPHER_AES_GMAC_256: + return WMI_CIPHER_AES_GMAC; + case WLAN_CRYPTO_CIPHER_AES_GCM: + case WLAN_CRYPTO_CIPHER_AES_GCM_256: + return WMI_CIPHER_AES_GCM; + default: + return 0; + } +} + +enum cdp_sec_type wlan_crypto_cipher_to_cdp_sec_type( + enum wlan_crypto_cipher_type crypto_cipher) +{ + switch (crypto_cipher) { + case WLAN_CRYPTO_CIPHER_NONE: + return cdp_sec_type_none; + case WLAN_CRYPTO_CIPHER_WEP: + case WLAN_CRYPTO_CIPHER_WEP_40: + case WLAN_CRYPTO_CIPHER_WEP_104: + return cdp_sec_type_wep104; + case WLAN_CRYPTO_CIPHER_TKIP: + return cdp_sec_type_tkip; + case WLAN_CRYPTO_CIPHER_WAPI_SMS4: + case WLAN_CRYPTO_CIPHER_WAPI_GCM4: + return cdp_sec_type_wapi; + case WLAN_CRYPTO_CIPHER_AES_CCM: + return cdp_sec_type_aes_ccmp; + case WLAN_CRYPTO_CIPHER_AES_CCM_256: + return cdp_sec_type_aes_ccmp_256; + case WLAN_CRYPTO_CIPHER_AES_GCM: + return cdp_sec_type_aes_gcmp; + case WLAN_CRYPTO_CIPHER_AES_GCM_256: + return cdp_sec_type_aes_gcmp_256; + default: + return cdp_sec_type_none; + } +} +#endif /* CRYPTO_SET_KEY_CONVERGED */ + +QDF_STATUS +wmi_extract_roam_scan_stats_res_evt(wmi_unified_t wmi, void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param) +{ + if (wmi->ops->extract_roam_scan_stats_res_evt) + return wmi->ops->extract_roam_scan_stats_res_evt(wmi, + evt_buf, + vdev_id, res_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_offload_bcn_tx_status_evt(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *vdev_id, uint32_t *tx_status) +{ + if (wmi_handle->ops->extract_offload_bcn_tx_status_evt) + return wmi_handle->ops->extract_offload_bcn_tx_status_evt( + wmi_handle, evt_buf, + vdev_id, tx_status); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef OBSS_PD +QDF_STATUS wmi_unified_send_obss_spatial_reuse_set_cmd( + wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_param *obss_spatial_reuse_param) +{ + if (wmi_handle->ops->send_obss_spatial_reuse_set) + return wmi_handle->ops->send_obss_spatial_reuse_set(wmi_handle, + obss_spatial_reuse_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_obss_spatial_reuse_set_def_thresh_cmd( + wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_def_thresh *thresh) +{ + if (wmi_handle->ops->send_obss_spatial_reuse_set_def_thresh) + return wmi_handle->ops->send_obss_spatial_reuse_set_def_thresh( + wmi_handle, thresh); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_convert_pdev_id_host_to_target(wmi_unified_t wmi_handle, + uint32_t host_pdev_id, + uint32_t *target_pdev_id) +{ + if (wmi_handle->ops->convert_pdev_id_host_to_target) { + *target_pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + host_pdev_id); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_bss_color_change_enable_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable) +{ + if (wmi_handle->ops->send_bss_color_change_enable_cmd) + return wmi_handle->ops->send_bss_color_change_enable_cmd( + wmi_handle, vdev_id, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_obss_color_collision_cfg_cmd( + wmi_unified_t wmi_handle, + struct wmi_obss_color_collision_cfg_param *cfg) +{ + if (wmi_handle->ops->send_obss_color_collision_cfg_cmd) + return wmi_handle->ops->send_obss_color_collision_cfg_cmd( + wmi_handle, cfg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_obss_color_collision_info( + wmi_unified_t wmi_handle, + uint8_t *data, struct wmi_obss_color_collision_info *info) +{ + if (wmi_handle->ops->extract_obss_color_collision_info) + return wmi_handle->ops->extract_obss_color_collision_info(data, + info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_mws_coex_req_cmd(struct wmi_unified *wmi_handle, + uint32_t vdev_id, + uint32_t cmd_id) +{ + + if (wmi_handle->ops->send_mws_coex_status_req_cmd) + return wmi_handle->ops->send_mws_coex_status_req_cmd(wmi_handle, + vdev_id, cmd_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WIFI_POS_CONVERGED +QDF_STATUS +wmi_extract_oem_response_param(wmi_unified_t wmi_hdl, void *resp_buf, + struct wmi_oem_response_param *oem_resp_param) +{ + if (wmi_hdl->ops->extract_oem_response_param) + return wmi_hdl->ops->extract_oem_response_param(wmi_hdl, + resp_buf, + oem_resp_param); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WIFI_POS_CONVERGED */ + +QDF_STATUS wmi_unified_extract_hw_mode_resp(wmi_unified_t wmi, + void *evt_buf, + uint32_t *cmd_status) +{ + if (wmi->ops->extract_hw_mode_resp_event) + return wmi->ops->extract_hw_mode_resp_event(wmi, + evt_buf, + cmd_status); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_ANI_LEVEL_REQUEST +QDF_STATUS wmi_unified_ani_level_cmd_send(wmi_unified_t wmi_handle, + uint32_t *freqs, + uint8_t num_freqs) +{ + if (wmi_handle->ops->send_ani_level_cmd) + return wmi_handle->ops->send_ani_level_cmd(wmi_handle, freqs, + num_freqs); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_ani_level(wmi_unified_t wmi_handle, + uint8_t *data, + struct wmi_host_ani_level_event **info, + uint32_t *num_channels) +{ + if (wmi_handle->ops->extract_ani_level) + return wmi_handle->ops->extract_ani_level(data, info, + num_channels); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_ANI_LEVEL_REQUEST */ + +QDF_STATUS +wmi_unified_extract_roam_trigger_stats(wmi_unified_t wmi, + void *evt_buf, + struct wmi_roam_trigger_info *trig, + uint8_t idx) +{ + if (wmi->ops->extract_roam_trigger_stats) + return wmi->ops->extract_roam_trigger_stats(wmi, evt_buf, trig, + idx); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_extract_roam_scan_stats(wmi_unified_t wmi, void *evt_buf, + struct wmi_roam_scan_data *dst, uint8_t idx, + uint8_t chan_idx, uint8_t ap_idx) +{ + if (wmi->ops->extract_roam_scan_stats) + return wmi->ops->extract_roam_scan_stats(wmi, evt_buf, dst, + idx, chan_idx, ap_idx); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_extract_roam_result_stats(wmi_unified_t wmi, void *buf, + struct wmi_roam_result *dst, + uint8_t idx) +{ + if (wmi->ops->extract_roam_result_stats) + return wmi->ops->extract_roam_result_stats(wmi, buf, dst, idx); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_extract_roam_11kv_stats(wmi_unified_t wmi, void *evt_buf, + struct wmi_neighbor_report_data *dst, + uint8_t idx, uint8_t rpt_idx) +{ + if (wmi->ops->extract_roam_11kv_stats) + return wmi->ops->extract_roam_11kv_stats(wmi, evt_buf, dst, idx, + rpt_idx); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_extract_roam_msg_info(wmi_unified_t wmi, void *evt_buf, + struct wmi_roam_msg_info *dst, uint8_t idx) +{ + if (wmi->ops->extract_roam_msg_info) + return wmi->ops->extract_roam_msg_info(wmi, evt_buf, dst, idx); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_TIME_SYNC_FTM +QDF_STATUS wmi_unified_send_wlan_time_sync_ftm_trigger(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool burst_mode) +{ + if (wmi_handle->ops->send_wlan_time_sync_ftm_trigger_cmd) + return wmi_handle->ops->send_wlan_time_sync_ftm_trigger_cmd + (wmi_handle, vdev_id, burst_mode); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_wlan_time_sync_qtime(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint64_t lpass_ts) +{ + if (wmi_handle->ops->send_wlan_ts_qtime_cmd) + return wmi_handle->ops->send_wlan_ts_qtime_cmd(wmi_handle, + vdev_id, lpass_ts); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_time_sync_ftm_start_stop_params( + wmi_unified_t wmi_handle, void *evt_buf, + struct ftm_time_sync_start_stop_params *param) +{ + if (wmi_handle->ops->extract_time_sync_ftm_start_stop_event) + return + wmi_handle->ops->extract_time_sync_ftm_start_stop_event( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_time_sync_ftm_offset( + wmi_unified_t wmi_handle, void *evt_buf, + struct ftm_time_sync_offset *param) +{ + if (wmi_handle->ops->extract_time_sync_ftm_offset_event) + return + wmi_handle->ops->extract_time_sync_ftm_offset_event( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_TIME_SYNC_FTM */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_bcn_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_bcn_api.c new file mode 100644 index 0000000000000000000000000000000000000000..4b56a5f5e3fe1e1cd3c551e85fcfeb08e402836d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_bcn_api.c @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to concurrency component. + */ + +#include "wmi_unified_bcn_api.h" + +QDF_STATUS +wmi_unified_bcn_buf_ll_cmd(wmi_unified_t wmi_handle, + wmi_bcn_send_from_host_cmd_fixed_param *param) +{ + if (wmi_handle->ops->send_bcn_buf_ll_cmd) + return wmi_handle->ops->send_bcn_buf_ll_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_bcn_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_bcn_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..714c9d236f417f52b34e51fe17c50d7e2582c52d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_bcn_tlv.c @@ -0,0 +1,71 @@ + +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include "wmi_unified_bcn_api.h" + +/** + * send_bcn_buf_ll_cmd_tlv() - prepare and send beacon buffer to fw for LL + * @wmi_handle: wmi handle + * @param: bcn ll cmd parameter + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_bcn_buf_ll_cmd_tlv( + wmi_unified_t wmi_handle, + wmi_bcn_send_from_host_cmd_fixed_param *param) +{ + wmi_bcn_send_from_host_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + QDF_STATUS ret; + + wmi_buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!wmi_buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_bcn_send_from_host_cmd_fixed_param *)wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_send_from_host_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bcn_send_from_host_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->data_len = param->data_len; + cmd->frame_ctrl = param->frame_ctrl; + cmd->frag_ptr = param->frag_ptr; + cmd->dtim_flag = param->dtim_flag; + + wmi_mtrace(WMI_PDEV_SEND_BCN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, sizeof(*cmd), + WMI_PDEV_SEND_BCN_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PDEV_SEND_BCN_CMDID command"); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +void wmi_bcn_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_bcn_buf_ll_cmd = send_bcn_buf_ll_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_cfr_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_cfr_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8d727d7c481f6d77e0c53177e8782cb32e149e69 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_cfr_api.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_priv.h" +#include "wmi_unified_cfr_api.h" + +#ifdef WLAN_CFR_ENABLE + +QDF_STATUS wmi_unified_send_peer_cfr_capture_cmd(wmi_unified_t wmi_handle, + struct peer_cfr_params *param) +{ + if (wmi_handle->ops->send_peer_cfr_capture_cmd) + return wmi_handle->ops->send_peer_cfr_capture_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_cfr_peer_tx_event_param() - extract tx event params from event + */ +QDF_STATUS +wmi_extract_cfr_peer_tx_event_param(wmi_unified_t wmi_handle, void *evt_buf, + wmi_cfr_peer_tx_event_param *peer_tx_event) +{ + if (wmi_handle->ops->extract_cfr_peer_tx_event_param) + return wmi_handle->ops->extract_cfr_peer_tx_event_param( + wmi_handle, + evt_buf, + peer_tx_event); + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_ENH_CFR_ENABLE +QDF_STATUS wmi_unified_send_cfr_rcc_cmd(wmi_unified_t wmi_handle, + struct cfr_rcc_param *cfg) +{ + if (wmi_handle->ops->send_cfr_rcc_cmd) + return wmi_handle->ops->send_cfr_rcc_cmd(wmi_handle, cfg); + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_ENH_CFR_ENABLE */ +#endif /* WLAN_CFR_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_cfr_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_cfr_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..7efd2d08b4492f0cd81d3aaacd1b4c246f2f48ea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_cfr_tlv.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_cfr_param.h" +#include "wmi_unified_cfr_api.h" + +#ifdef WLAN_CFR_ENABLE +static QDF_STATUS +extract_cfr_peer_tx_event_param_tlv(wmi_unified_t wmi_handle, void *evt_buf, + wmi_cfr_peer_tx_event_param *peer_tx_event) +{ + int idx; + WMI_PEER_CFR_CAPTURE_EVENTID_param_tlvs *param_buf; + wmi_peer_cfr_capture_event_fixed_param *peer_tx_event_ev; + wmi_peer_cfr_capture_event_phase_fixed_param *chain_phase_ev; + + param_buf = (WMI_PEER_CFR_CAPTURE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid cfr capture buffer"); + return QDF_STATUS_E_INVAL; + } + + peer_tx_event_ev = param_buf->fixed_param; + if (!peer_tx_event_ev) { + WMI_LOGE("peer cfr capture buffer is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + peer_tx_event->capture_method = peer_tx_event_ev->capture_method; + peer_tx_event->vdev_id = peer_tx_event_ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_tx_event_ev->mac_addr, + &peer_tx_event->peer_mac_addr.bytes[0]); + peer_tx_event->primary_20mhz_chan = + peer_tx_event_ev->chan_mhz; + peer_tx_event->bandwidth = peer_tx_event_ev->bandwidth; + peer_tx_event->phy_mode = peer_tx_event_ev->phy_mode; + peer_tx_event->band_center_freq1 = peer_tx_event_ev->band_center_freq1; + peer_tx_event->band_center_freq2 = peer_tx_event_ev->band_center_freq2; + peer_tx_event->spatial_streams = peer_tx_event_ev->sts_count; + peer_tx_event->correlation_info_1 = + peer_tx_event_ev->correlation_info_1; + peer_tx_event->correlation_info_2 = + peer_tx_event_ev->correlation_info_2; + peer_tx_event->status = peer_tx_event_ev->status; + peer_tx_event->timestamp_us = peer_tx_event_ev->timestamp_us; + peer_tx_event->counter = peer_tx_event_ev->counter; + qdf_mem_copy(peer_tx_event->chain_rssi, peer_tx_event_ev->chain_rssi, + sizeof(peer_tx_event->chain_rssi)); + + chain_phase_ev = param_buf->phase_param; + if (chain_phase_ev) { + for (idx = 0; idx < WMI_HOST_MAX_CHAINS; idx++) { + /* Due to FW's alignment rules, phase information being + * passed is 32-bit, out of which only 16 bits is valid. + * Remaining bits are all zeroed. So direct mem copy + * will not work as it will copy extra zeroes into host + * structures. + */ + peer_tx_event->chain_phase[idx] = + (0xffff & chain_phase_ev->chain_phase[idx]); + } + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_ENH_CFR_ENABLE +static void populate_wmi_cfr_param(uint8_t grp_id, struct cfr_rcc_param *rcc, + wmi_cfr_filter_group_config *param) +{ + struct ta_ra_cfr_cfg *tgt_cfg = NULL; + + WMITLV_SET_HDR(¶m->tlv_header, + WMITLV_TAG_STRUC_wmi_cfr_filter_group_config, + WMITLV_GET_STRUCT_TLVLEN + (wmi_cfr_filter_group_config)); + tgt_cfg = &rcc->curr[grp_id]; + + param->filter_group_id = grp_id; + WMI_CFR_GROUP_TA_ADDR_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_ta); + WMI_CFR_GROUP_TA_ADDR_MASK_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_ta_mask); + WMI_CFR_GROUP_RA_ADDR_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_ra); + WMI_CFR_GROUP_RA_ADDR_MASK_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_ra_mask); + WMI_CFR_GROUP_BW_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_bw_mask); + WMI_CFR_GROUP_NSS_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_nss_mask); + WMI_CFR_GROUP_MGMT_SUBTYPE_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_mgmt_subtype); + WMI_CFR_GROUP_CTRL_SUBTYPE_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_ctrl_subtype); + WMI_CFR_GROUP_DATA_SUBTYPE_VALID_SET(param->filter_set_valid_mask, + tgt_cfg->valid_data_subtype); + WMI_CHAR_ARRAY_TO_MAC_ADDR(tgt_cfg->tx_addr, + ¶m->ta_addr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(tgt_cfg->tx_addr_mask, + ¶m->ta_addr_mask); + WMI_CHAR_ARRAY_TO_MAC_ADDR(tgt_cfg->rx_addr, + ¶m->ra_addr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(tgt_cfg->rx_addr_mask, + ¶m->ra_addr_mask); + WMI_CFR_GROUP_BW_SET(param->bw_nss_filter, + tgt_cfg->bw); + WMI_CFR_GROUP_NSS_SET(param->bw_nss_filter, + tgt_cfg->nss); + param->mgmt_subtype_filter = tgt_cfg->mgmt_subtype_filter; + param->ctrl_subtype_filter = tgt_cfg->ctrl_subtype_filter; + param->data_subtype_filter = tgt_cfg->data_subtype_filter; +} + +static QDF_STATUS send_cfr_rcc_cmd_tlv(wmi_unified_t wmi_handle, + struct cfr_rcc_param *rcc) +{ + wmi_cfr_capture_filter_cmd_fixed_param *cmd; + wmi_cfr_filter_group_config *param; + uint8_t *buf_ptr, grp_id; + wmi_buf_t buf; + uint32_t len; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wmi_ops *ops = wmi_handle->ops; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += rcc->num_grp_tlvs * sizeof(wmi_cfr_filter_group_config); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_cfr_capture_filter_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_cfr_capture_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_cfr_capture_filter_cmd_fixed_param)); + cmd->pdev_id = ops->convert_host_pdev_id_to_target(wmi_handle, + rcc->pdev_id); + WMI_CFR_CAPTURE_INTERVAL_SET(cmd->capture_interval, + rcc->capture_interval); + WMI_CFR_CAPTURE_DURATION_SET(cmd->capture_duration, + rcc->capture_duration); + WMI_CFR_FILTER_GROUP_BITMAP_SET(cmd->filter_group_bitmap, + rcc->filter_group_bitmap); + WMI_CFR_UL_MU_USER_UPPER_SET(cmd->ul_mu_user_mask_upper, + rcc->ul_mu_user_mask_upper); + cmd->ul_mu_user_mask_lower = rcc->ul_mu_user_mask_lower; + WMI_CFR_FREEZE_DELAY_CNT_EN_SET(cmd->freeze_tlv_delay_cnt, + rcc->freeze_tlv_delay_cnt_en); + WMI_CFR_FREEZE_DELAY_CNT_THR_SET(cmd->freeze_tlv_delay_cnt, + rcc->freeze_tlv_delay_cnt_thr); + WMI_CFR_DIRECTED_FTM_ACK_EN_SET(cmd->filter_type, + rcc->m_directed_ftm); + WMI_CFR_ALL_FTM_ACK_EN_SET(cmd->filter_type, + rcc->m_all_ftm_ack); + WMI_CFR_NDPA_NDP_DIRECTED_EN_SET(cmd->filter_type, + rcc->m_ndpa_ndp_directed); + WMI_CFR_NDPA_NDP_ALL_EN_SET(cmd->filter_type, + rcc->m_ndpa_ndp_all); + WMI_CFR_TA_RA_TYPE_FILTER_EN_SET(cmd->filter_type, + rcc->m_ta_ra_filter); + WMI_CFR_ALL_PACKET_EN_SET(cmd->filter_type, + rcc->m_all_packet); + + /* TLV indicating array of structures to follow */ + buf_ptr += sizeof(wmi_cfr_capture_filter_cmd_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + rcc->num_grp_tlvs * sizeof(wmi_cfr_filter_group_config)); + + if (rcc->num_grp_tlvs) { + buf_ptr += WMI_TLV_HDR_SIZE; + param = (wmi_cfr_filter_group_config *)buf_ptr; + + for (grp_id = 0; grp_id < MAX_TA_RA_ENTRIES; grp_id++) { + if (qdf_test_bit(grp_id, + (unsigned long *) + &rcc->modified_in_curr_session)) { + populate_wmi_cfr_param(grp_id, rcc, param); + param++; + } + } + } + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_CFR_CAPTURE_FILTER_CMDID); + if (status) + wmi_buf_free(buf); + + return status; +} +#endif + +static QDF_STATUS send_peer_cfr_capture_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_cfr_params *param) +{ + wmi_peer_cfr_capture_cmd_fixed_param *cmd; + wmi_buf_t buf; + int len = sizeof(*cmd); + int ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_cfr_capture_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_cfr_capture_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_cfr_capture_cmd_fixed_param)); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->macaddr, &cmd->mac_addr); + cmd->request = param->request; + cmd->vdev_id = param->vdev_id; + cmd->periodicity = param->periodicity; + cmd->bandwidth = param->bandwidth; + cmd->capture_method = param->capture_method; + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_CFR_CAPTURE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PEER_CFR_CAPTURE_CMDID"); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef WLAN_ENH_CFR_ENABLE +static inline void wmi_enh_cfr_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_cfr_rcc_cmd = send_cfr_rcc_cmd_tlv; +} +#else +static inline void wmi_enh_cfr_attach_tlv(wmi_unified_t wmi_handle) +{ +} +#endif + +void wmi_cfr_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_peer_cfr_capture_cmd = send_peer_cfr_capture_cmd_tlv; + ops->extract_cfr_peer_tx_event_param = + extract_cfr_peer_tx_event_param_tlv; + wmi_enh_cfr_attach_tlv(wmi_handle); +} +#endif /* WLAN_CFR_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_concurrency_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_concurrency_api.c new file mode 100644 index 0000000000000000000000000000000000000000..b8dde1163bca13525c587db48c92bfb7cc7af848 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_concurrency_api.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to concurrency component. + */ + +#include +#include + +QDF_STATUS wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id) +{ + if (wmi_handle->ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd) + return wmi_handle->ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd(wmi_handle, + mcc_adaptive_scheduler, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_mcc_channel_time_latency_cmd( + wmi_unified_t wmi_handle, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency) +{ + if (wmi_handle->ops->send_set_mcc_channel_time_latency_cmd) + return wmi_handle->ops->send_set_mcc_channel_time_latency_cmd(wmi_handle, + mcc_channel_freq, + mcc_channel_time_latency); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_mcc_channel_time_quota_cmd( + wmi_unified_t wmi_handle, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq) +{ + if (wmi_handle->ops->send_set_mcc_channel_time_quota_cmd) + return wmi_handle->ops->send_set_mcc_channel_time_quota_cmd(wmi_handle, + adapter_1_chan_freq, + adapter_1_quota, + adapter_2_chan_freq); + + return QDF_STATUS_E_FAILURE; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_concurrency_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_concurrency_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..805522252afc4278ebcda380096d39cc237a6280 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_concurrency_tlv.c @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +/** + * send_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv() -enable/disable + * mcc scheduler + * @wmi_handle: wmi handle + * @mcc_adaptive_scheduler: enable/disable + * + * This function enable/disable mcc adaptive scheduler in fw. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id) +{ + QDF_STATUS ret; + wmi_buf_t buf = 0; + wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param *cmd = NULL; + uint16_t len = + sizeof(wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param)); + cmd->enable = mcc_adaptive_scheduler; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + pdev_id); + + wmi_mtrace(WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGP("%s: Failed to send enable/disable MCC" + " adaptive scheduler command", __func__); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_mcc_channel_time_latency_cmd_tlv() -set MCC channel time latency + * @wmi: wmi handle + * @mcc_channel: mcc channel + * @mcc_channel_time_latency: MCC channel time latency. + * + * Currently used to set time latency for an MCC vdev/adapter using operating + * channel of it and channel number. The info is provided run time using + * iwpriv command: iwpriv setMccLatency . + * + * Return: CDF status + */ +static QDF_STATUS send_set_mcc_channel_time_latency_cmd_tlv( + wmi_unified_t wmi_handle, + uint32_t mcc_channel_freq, + uint32_t mcc_channel_time_latency) +{ + QDF_STATUS ret; + wmi_buf_t buf = 0; + wmi_resmgr_set_chan_latency_cmd_fixed_param *cmdTL = NULL; + uint16_t len = 0; + uint8_t *buf_ptr = NULL; + wmi_resmgr_chan_latency chan_latency; + /* Note: we only support MCC time latency for a single channel */ + uint32_t num_channels = 1; + uint32_t chan1_freq = mcc_channel_freq; + uint32_t latency_chan1 = mcc_channel_time_latency; + + /* If 0ms latency is provided, then FW will set to a default. + * Otherwise, latency must be at least 30ms. + */ + if ((latency_chan1 > 0) && + (latency_chan1 < WMI_MCC_MIN_NON_ZERO_CHANNEL_LATENCY)) { + WMI_LOGE("%s: Invalid time latency for Channel #1 = %dms " + "Minimum is 30ms (or 0 to use default value by " + "firmware)", __func__, latency_chan1); + return QDF_STATUS_E_INVAL; + } + + /* Set WMI CMD for channel time latency here */ + len = sizeof(wmi_resmgr_set_chan_latency_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + /*Place holder for chan_time_latency array */ + num_channels * sizeof(wmi_resmgr_chan_latency); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmdTL = (wmi_resmgr_set_chan_latency_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmdTL->tlv_header, + WMITLV_TAG_STRUC_wmi_resmgr_set_chan_latency_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_resmgr_set_chan_latency_cmd_fixed_param)); + cmdTL->num_chans = num_channels; + /* Update channel time latency information for home channel(s) */ + buf_ptr += sizeof(*cmdTL); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + num_channels * sizeof(wmi_resmgr_chan_latency)); + buf_ptr += WMI_TLV_HDR_SIZE; + chan_latency.chan_mhz = chan1_freq; + chan_latency.latency = latency_chan1; + qdf_mem_copy(buf_ptr, &chan_latency, sizeof(chan_latency)); + wmi_mtrace(WMI_RESMGR_SET_CHAN_LATENCY_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RESMGR_SET_CHAN_LATENCY_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send MCC Channel Time Latency command", + __func__); + wmi_buf_free(buf); + QDF_ASSERT(0); + } + + return ret; +} + +/** + * send_set_mcc_channel_time_quota_cmd_tlv() -set MCC channel time quota + * @wmi: wmi handle + * @adapter_1_chan_number: adapter 1 channel number + * @adapter_1_quota: adapter 1 quota + * @adapter_2_chan_number: adapter 2 channel number + * + * Return: CDF status + */ +static QDF_STATUS send_set_mcc_channel_time_quota_cmd_tlv( + wmi_unified_t wmi_handle, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, + uint32_t adapter_2_chan_freq) +{ + QDF_STATUS ret; + wmi_buf_t buf = 0; + uint16_t len = 0; + uint8_t *buf_ptr = NULL; + wmi_resmgr_set_chan_time_quota_cmd_fixed_param *cmdTQ = NULL; + wmi_resmgr_chan_time_quota chan_quota; + uint32_t quota_chan1 = adapter_1_quota; + /* Knowing quota of 1st chan., derive quota for 2nd chan. */ + uint32_t quota_chan2 = 100 - quota_chan1; + /* Note: setting time quota for MCC requires info for 2 channels */ + uint32_t num_channels = 2; + uint32_t chan1_freq = adapter_1_chan_freq; + uint32_t chan2_freq = adapter_2_chan_freq; + + WMI_LOGD("%s: freq1:%dMHz, Quota1:%dms, " + "freq2:%dMHz, Quota2:%dms", __func__, + chan1_freq, quota_chan1, chan2_freq, + quota_chan2); + + /* + * Perform sanity check on time quota values provided. + */ + if (quota_chan1 < WMI_MCC_MIN_CHANNEL_QUOTA || + quota_chan1 > WMI_MCC_MAX_CHANNEL_QUOTA) { + WMI_LOGE("%s: Invalid time quota for Channel #1=%dms. Minimum " + "is 20ms & maximum is 80ms", __func__, quota_chan1); + return QDF_STATUS_E_INVAL; + } + /* Set WMI CMD for channel time quota here */ + len = sizeof(wmi_resmgr_set_chan_time_quota_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + /* Place holder for chan_time_quota array */ + num_channels * sizeof(wmi_resmgr_chan_time_quota); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmdTQ = (wmi_resmgr_set_chan_time_quota_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmdTQ->tlv_header, + WMITLV_TAG_STRUC_wmi_resmgr_set_chan_time_quota_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_resmgr_set_chan_time_quota_cmd_fixed_param)); + cmdTQ->num_chans = num_channels; + + /* Update channel time quota information for home channel(s) */ + buf_ptr += sizeof(*cmdTQ); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + num_channels * sizeof(wmi_resmgr_chan_time_quota)); + buf_ptr += WMI_TLV_HDR_SIZE; + chan_quota.chan_mhz = chan1_freq; + chan_quota.channel_time_quota = quota_chan1; + qdf_mem_copy(buf_ptr, &chan_quota, sizeof(chan_quota)); + /* Construct channel and quota record for the 2nd MCC mode. */ + buf_ptr += sizeof(chan_quota); + chan_quota.chan_mhz = chan2_freq; + chan_quota.channel_time_quota = quota_chan2; + qdf_mem_copy(buf_ptr, &chan_quota, sizeof(chan_quota)); + + wmi_mtrace(WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send MCC Channel Time Quota command"); + wmi_buf_free(buf); + QDF_ASSERT(0); + } + + return ret; +} + +void wmi_concurrency_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd = + send_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv; + ops->send_set_mcc_channel_time_latency_cmd = + send_set_mcc_channel_time_latency_cmd_tlv; + ops->send_set_mcc_channel_time_quota_cmd = + send_set_mcc_channel_time_quota_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_crypto_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_crypto_api.c new file mode 100644 index 0000000000000000000000000000000000000000..ac63bd8a8fd73c920220c6b0407c94b2904dd4e6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_crypto_api.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to crypto component. + */ + +#include "wmi_unified_priv.h" +#include "wmi_unified_param.h" +#include "wmi_unified_crypto_api.h" + +QDF_STATUS +wmi_extract_install_key_comp_event(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t len, + struct wmi_install_key_comp_event *param) +{ + if (wmi_handle->ops->extract_install_key_comp_event) + return wmi_handle->ops->extract_install_key_comp_event( + wmi_handle, evt_buf, len, param); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dbr_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dbr_api.c new file mode 100644 index 0000000000000000000000000000000000000000..c59633ed53e6d2a60634d0583d5379f5cbc4b4f9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dbr_api.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_priv.h" +#include "qdf_module.h" + +QDF_STATUS wmi_unified_dbr_ring_cfg(wmi_unified_t wmi_handle, + struct direct_buf_rx_cfg_req *cfg) +{ + if (wmi_handle->ops->send_dbr_cfg_cmd) + return wmi_handle->ops->send_dbr_cfg_cmd(wmi_handle, cfg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_buf_release_fixed( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct direct_buf_rx_rsp *param) +{ + if (wmi_handle->ops->extract_dbr_buf_release_fixed) + return wmi_handle->ops->extract_dbr_buf_release_fixed( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_buf_release_entry( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_entry *param) +{ + if (wmi_handle->ops->extract_dbr_buf_release_entry) + return wmi_handle->ops->extract_dbr_buf_release_entry( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_buf_metadata( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_metadata *param) +{ + if (wmi_handle->ops->extract_dbr_buf_metadata) + return wmi_handle->ops->extract_dbr_buf_metadata( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dbr_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dbr_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..d006932a6c15b27277cc76ea422462113456f26d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dbr_tlv.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_dbr_param.h" +#include "wmi_unified_dbr_api.h" + +/** + * send_dbr_cfg_cmd_tlv() - configure DMA rings for Direct Buf RX + * @wmi_handle: wmi handle + * @data_len: len of dma cfg req + * @data: dma cfg req + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +static QDF_STATUS send_dbr_cfg_cmd_tlv(wmi_unified_t wmi_handle, + struct direct_buf_rx_cfg_req *cfg) +{ + wmi_buf_t buf; + wmi_dma_ring_cfg_req_fixed_param *cmd; + QDF_STATUS ret; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_dma_ring_cfg_req_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dma_ring_cfg_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_dma_ring_cfg_req_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_host_pdev_id_to_target( + wmi_handle, + cfg->pdev_id); + cmd->mod_id = cfg->mod_id; + cmd->base_paddr_lo = cfg->base_paddr_lo; + cmd->base_paddr_hi = cfg->base_paddr_hi; + cmd->head_idx_paddr_lo = cfg->head_idx_paddr_lo; + cmd->head_idx_paddr_hi = cfg->head_idx_paddr_hi; + cmd->tail_idx_paddr_lo = cfg->tail_idx_paddr_lo; + cmd->tail_idx_paddr_hi = cfg->tail_idx_paddr_hi; + cmd->num_elems = cfg->num_elems; + cmd->buf_size = cfg->buf_size; + cmd->num_resp_per_event = cfg->num_resp_per_event; + cmd->event_timeout_ms = cfg->event_timeout_ms; + + WMI_LOGD("%s: wmi_dma_ring_cfg_req_fixed_param pdev id %d mod id %d" + "base paddr lo %x base paddr hi %x head idx paddr lo %x" + "head idx paddr hi %x tail idx paddr lo %x" + "tail idx addr hi %x num elems %d buf size %d num resp %d" + "event timeout %d", __func__, cmd->pdev_id, + cmd->mod_id, cmd->base_paddr_lo, cmd->base_paddr_hi, + cmd->head_idx_paddr_lo, cmd->head_idx_paddr_hi, + cmd->tail_idx_paddr_lo, cmd->tail_idx_paddr_hi, + cmd->num_elems, cmd->buf_size, cmd->num_resp_per_event, + cmd->event_timeout_ms); + wmi_mtrace(WMI_PDEV_DMA_RING_CFG_REQ_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_DMA_RING_CFG_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL(":wmi cmd send failed")); + wmi_buf_free(buf); + } + + return ret; +} + +static QDF_STATUS extract_scaling_params_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t idx, + struct wlan_psoc_host_spectral_scaling_params *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + wmi_spectral_bin_scaling_params *spectral_bin_scaling_params; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + spectral_bin_scaling_params = ¶m_buf->wmi_bin_scaling_params[idx]; + + param->pdev_id = wmi_handle->ops->convert_target_pdev_id_to_host( + wmi_handle, + spectral_bin_scaling_params->pdev_id); + param->low_level_offset = spectral_bin_scaling_params->low_level_offset; + param->formula_id = spectral_bin_scaling_params->formula_id; + param->high_level_offset = + spectral_bin_scaling_params->high_level_offset; + param->rssi_thr = spectral_bin_scaling_params->rssi_thr; + param->default_agc_max_gain = + spectral_bin_scaling_params->default_agc_max_gain; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_buf_release_fixed_tlv(wmi_unified_t wmi_handle, + uint8_t *event, struct direct_buf_rx_rsp *param) +{ + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *param_buf; + wmi_dma_buf_release_fixed_param *ev; + + param_buf = (WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_INVAL; + + param->pdev_id = wmi_handle->ops->convert_target_pdev_id_to_host( + wmi_handle, + ev->pdev_id); + param->mod_id = ev->mod_id; + if ((!param_buf->num_entries) || + param_buf->num_entries < ev->num_buf_release_entry){ + wmi_err("actual num of buf release entries less than provided entries"); + return QDF_STATUS_E_INVAL; + } + param->num_buf_release_entry = ev->num_buf_release_entry; + if ((!param_buf->num_meta_data) || + param_buf->num_meta_data < ev->num_meta_data_entry) { + wmi_err("actual num of meta data entries less than provided entries"); + return QDF_STATUS_E_INVAL; + } + param->num_meta_data_entry = ev->num_meta_data_entry; + WMI_LOGD("%s:pdev id %d mod id %d num buf release entry %d", __func__, + param->pdev_id, param->mod_id, param->num_buf_release_entry); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_buf_release_entry_tlv(wmi_unified_t wmi_handle, + uint8_t *event, uint8_t idx, struct direct_buf_rx_entry *param) +{ + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *param_buf; + wmi_dma_buf_release_entry *entry; + + param_buf = (WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + entry = ¶m_buf->entries[idx]; + + if (!entry) { + WMI_LOGE("%s: Entry is NULL", __func__); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("%s: paddr_lo[%d] = %x", __func__, idx, entry->paddr_lo); + + param->paddr_lo = entry->paddr_lo; + param->paddr_hi = entry->paddr_hi; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_buf_metadata_tlv( + wmi_unified_t wmi_handle, uint8_t *event, + uint8_t idx, struct direct_buf_rx_metadata *param) +{ + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *param_buf; + wmi_dma_buf_release_spectral_meta_data *entry; + + param_buf = (WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + entry = ¶m_buf->meta_data[idx]; + + if (!entry) { + WMI_LOGE("%s: Entry is NULL", __func__); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(param->noisefloor, entry->noise_floor, + qdf_min(sizeof(entry->noise_floor), + sizeof(param->noisefloor))); + param->reset_delay = entry->reset_delay; + + return QDF_STATUS_SUCCESS; +} + +void wmi_dbr_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_dbr_cfg_cmd = send_dbr_cfg_cmd_tlv; + ops->extract_dbr_buf_release_entry = extract_dbr_buf_release_entry_tlv; + ops->extract_dbr_buf_metadata = extract_dbr_buf_metadata_tlv; + ops->extract_dbr_buf_release_fixed = extract_dbr_buf_release_fixed_tlv; + ops->extract_scaling_params_service_ready_ext = + extract_scaling_params_service_ready_ext_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dfs_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dfs_api.c new file mode 100644 index 0000000000000000000000000000000000000000..b7568f98ad691127cd27fc7eed6bfcc53535a5e4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dfs_api.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implement API's specific to DFS component. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +QDF_STATUS wmi_extract_dfs_cac_complete_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len) +{ + if (wmi_handle && wmi_handle->ops->extract_dfs_cac_complete_event) + return wmi_handle->ops->extract_dfs_cac_complete_event( + wmi_handle, evt_buf, vdev_id, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_dfs_cac_complete_event); + +QDF_STATUS +wmi_extract_dfs_ocac_complete_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct vdev_adfs_complete_status *param) +{ + if (wmi_handle && wmi_handle->ops->extract_dfs_ocac_complete_event) + return wmi_handle->ops->extract_dfs_ocac_complete_event( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +qdf_export_symbol(wmi_extract_dfs_ocac_complete_event); + +QDF_STATUS wmi_extract_dfs_radar_detection_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len) +{ + if (wmi_handle && wmi_handle->ops->extract_dfs_radar_detection_event) + return wmi_handle->ops->extract_dfs_radar_detection_event( + wmi_handle, evt_buf, radar_found, len); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS wmi_extract_wlan_radar_event_info( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len) +{ + if (wmi_handle->ops->extract_wlan_radar_event_info) + return wmi_handle->ops->extract_wlan_radar_event_info( + wmi_handle, evt_buf, wlan_radar_event, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_dfs_radar_detection_event); +#endif + +#if defined(WLAN_DFS_FULL_OFFLOAD) && defined(QCA_DFS_NOL_OFFLOAD) +QDF_STATUS wmi_send_usenol_pdev_param(wmi_unified_t wmi_handle, bool usenol, + struct wlan_objmgr_pdev *pdev) +{ + struct pdev_params pparam; + int pdev_idx; + + pdev_idx = lmac_get_pdev_idx(pdev); + if (pdev_idx < 0) + return QDF_STATUS_E_FAILURE; + + qdf_mem_zero(&pparam, sizeof(pparam)); + pparam.param_id = wmi_pdev_param_use_nol; + pparam.param_value = usenol; + + return wmi_unified_pdev_param_send(wmi_handle, &pparam, pdev_idx); +} + +QDF_STATUS +wmi_send_subchan_marking_pdev_param(wmi_unified_t wmi_handle, + bool subchanmark, + struct wlan_objmgr_pdev *pdev) +{ + struct pdev_params pparam; + int pdev_idx; + + pdev_idx = lmac_get_pdev_idx(pdev); + if (pdev_idx < 0) + return QDF_STATUS_E_FAILURE; + + qdf_mem_zero(&pparam, sizeof(pparam)); + pparam.param_id = wmi_pdev_param_sub_channel_marking; + pparam.param_value = subchanmark; + + return wmi_unified_pdev_param_send(wmi_handle, &pparam, pdev_idx); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_api.c new file mode 100644 index 0000000000000000000000000000000000000000..f0e86f13612b0b57e34763e4d1cdcde51782b2d8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_api.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_priv.h" + +QDF_STATUS wmi_unified_reset_passpoint_network_list_cmd( + wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req) +{ + if (wmi_handle->ops->send_reset_passpoint_network_list_cmd) + return wmi_handle->ops->send_reset_passpoint_network_list_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_passpoint_network_list_cmd( + wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req) +{ + if (wmi_handle->ops->send_set_passpoint_network_list_cmd) + return wmi_handle->ops->send_set_passpoint_network_list_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_epno_network_list_cmd( + wmi_unified_t wmi_handle, + struct wifi_enhanced_pno_params *req) +{ + if (wmi_handle->ops->send_set_epno_network_list_cmd) + return wmi_handle->ops->send_set_epno_network_list_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_get_capabilities_cmd( + wmi_unified_t wmi_handle, + struct extscan_capabilities_params *pgetcapab) +{ + if (wmi_handle->ops->send_extscan_get_capabilities_cmd) + return wmi_handle->ops->send_extscan_get_capabilities_cmd(wmi_handle, + pgetcapab); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_get_cached_results_cmd( + wmi_unified_t wmi_handle, + struct extscan_cached_result_params *pcached_results) +{ + if (wmi_handle->ops->send_extscan_get_cached_results_cmd) + return wmi_handle->ops->send_extscan_get_cached_results_cmd(wmi_handle, + pcached_results); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_stop_change_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_capabilities_reset_params *reset_req) +{ + if (wmi_handle->ops->send_extscan_stop_change_monitor_cmd) + return wmi_handle->ops->send_extscan_stop_change_monitor_cmd(wmi_handle, + reset_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_start_change_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params *psigchange) +{ + if (wmi_handle->ops->send_extscan_start_change_monitor_cmd) + return wmi_handle->ops->send_extscan_start_change_monitor_cmd(wmi_handle, + psigchange); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_stop_hotlist_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_reset_params *photlist_reset) +{ + if (wmi_handle->ops->send_extscan_stop_hotlist_monitor_cmd) + return wmi_handle->ops->send_extscan_stop_hotlist_monitor_cmd(wmi_handle, + photlist_reset); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_start_hotlist_monitor_cmd( + wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_set_params *params) +{ + if (wmi_handle->ops->send_extscan_start_hotlist_monitor_cmd) + return wmi_handle->ops->send_extscan_start_hotlist_monitor_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_stop_extscan_cmd( + wmi_unified_t wmi_handle, + struct extscan_stop_req_params *pstopcmd) +{ + if (wmi_handle->ops->send_stop_extscan_cmd) + return wmi_handle->ops->send_stop_extscan_cmd(wmi_handle, + pstopcmd); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_start_extscan_cmd( + wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart) +{ + if (wmi_handle->ops->send_start_extscan_cmd) + return wmi_handle->ops->send_start_extscan_cmd(wmi_handle, + pstart); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..03113430711a24c19c8e6d11665d46b313205b90 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_tlv.c @@ -0,0 +1,1109 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" + +/** + * send_reset_passpoint_network_list_cmd_tlv() - reset passpoint network list + * @wmi_handle: wmi handle + * @req: passpoint network request structure + * + * This function sends down WMI command with network id set to wildcard id. + * firmware shall clear all the config entries + * + * Return: QDF_STATUS enumeration + */ +static QDF_STATUS send_reset_passpoint_network_list_cmd_tlv + (wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req) +{ + wmi_passpoint_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_passpoint_config_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_passpoint_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_passpoint_config_cmd_fixed_param)); + cmd->id = WMI_PASSPOINT_NETWORK_ID_WILDCARD; + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PASSPOINT_LIST_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send reset passpoint network list wmi cmd", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_passpoint_network_list_cmd_tlv() - set passpoint network list + * @wmi_handle: wmi handle + * @req: passpoint network request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the passpoint configs down to the firmware + * + * Return: QDF_STATUS enumeration + */ +static QDF_STATUS send_set_passpoint_network_list_cmd_tlv + (wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req) +{ + wmi_passpoint_config_cmd_fixed_param *cmd; + u_int8_t i, j, *bytes; + wmi_buf_t buf; + uint32_t len; + int ret; + + len = sizeof(*cmd); + for (i = 0; i < req->num_networks; i++) { + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_passpoint_config_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_passpoint_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_passpoint_config_cmd_fixed_param)); + cmd->id = req->networks[i].id; + WMI_LOGD("%s: network id: %u", __func__, cmd->id); + qdf_mem_copy(cmd->realm, req->networks[i].realm, + strlen(req->networks[i].realm) + 1); + WMI_LOGD("%s: realm: %s", __func__, cmd->realm); + for (j = 0; j < PASSPOINT_ROAMING_CONSORTIUM_ID_NUM; j++) { + bytes = (uint8_t *) &req->networks[i].roaming_consortium_ids[j]; + WMI_LOGD("index: %d rcids: %02x %02x %02x %02x %02x %02x %02x %02x", + j, bytes[0], bytes[1], bytes[2], bytes[3], + bytes[4], bytes[5], bytes[6], bytes[7]); + + qdf_mem_copy(&cmd->roaming_consortium_ids[j], + &req->networks[i].roaming_consortium_ids[j], + PASSPOINT_ROAMING_CONSORTIUM_ID_LEN); + } + qdf_mem_copy(cmd->plmn, req->networks[i].plmn, + PASSPOINT_PLMN_ID_LEN); + WMI_LOGD("%s: plmn: %02x:%02x:%02x", __func__, + cmd->plmn[0], cmd->plmn[1], cmd->plmn[2]); + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PASSPOINT_LIST_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send set passpoint network list wmi cmd", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** send_set_epno_network_list_cmd_tlv() - set epno network list + * @wmi_handle: wmi handle + * @req: epno config params request structure + * + * This function reads the incoming epno config request structure + * and constructs the WMI message to the firmware. + * + * Returns: 0 on success, error number otherwise + */ +static QDF_STATUS send_set_epno_network_list_cmd_tlv(wmi_unified_t wmi_handle, + struct wifi_enhanced_pno_params *req) +{ + wmi_nlo_config_cmd_fixed_param *cmd; + nlo_configured_parameters *nlo_list; + enlo_candidate_score_params *cand_score_params; + u_int8_t i, *buf_ptr; + wmi_buf_t buf; + uint32_t len; + QDF_STATUS ret; + + /* Fixed Params */ + len = sizeof(*cmd); + if (req->num_networks) { + /* TLV place holder for array of structures + * then each nlo_configured_parameters(nlo_list) TLV. + */ + len += WMI_TLV_HDR_SIZE; + len += (sizeof(nlo_configured_parameters) + * QDF_MIN(req->num_networks, WMI_NLO_MAX_SSIDS)); + /* TLV for array of uint32 channel_list */ + len += WMI_TLV_HDR_SIZE; + /* TLV for nlo_channel_prediction_cfg */ + len += WMI_TLV_HDR_SIZE; + /* TLV for candidate score params */ + len += sizeof(enlo_candidate_score_params); + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_nlo_config_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (u_int8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_nlo_config_cmd_fixed_param)); + cmd->vdev_id = req->vdev_id; + + /* set flag to reset if num of networks are 0 */ + cmd->flags = (req->num_networks == 0 ? + WMI_NLO_CONFIG_ENLO_RESET : WMI_NLO_CONFIG_ENLO); + + buf_ptr += sizeof(wmi_nlo_config_cmd_fixed_param); + + cmd->no_of_ssids = QDF_MIN(req->num_networks, WMI_NLO_MAX_SSIDS); + WMI_LOGD("SSID count: %d flags: %d", + cmd->no_of_ssids, cmd->flags); + + /* Fill nlo_config only when num_networks are non zero */ + if (cmd->no_of_ssids) { + /* Fill networks */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->no_of_ssids * sizeof(nlo_configured_parameters)); + buf_ptr += WMI_TLV_HDR_SIZE; + + nlo_list = (nlo_configured_parameters *) buf_ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + WMITLV_SET_HDR(&nlo_list[i].tlv_header, + WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN( + nlo_configured_parameters)); + /* Copy ssid and it's length */ + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = + req->networks[i].ssid.length; + qdf_mem_copy(nlo_list[i].ssid.ssid.ssid, + req->networks[i].ssid.mac_ssid, + nlo_list[i].ssid.ssid.ssid_len); + WMI_LOGD("index: %d ssid: %.*s len: %d", i, + nlo_list[i].ssid.ssid.ssid_len, + (char *) nlo_list[i].ssid.ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + + /* Copy pno flags */ + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + req->networks[i].flags; + WMI_LOGD("PNO flags (%u)", + nlo_list[i].bcast_nw_type.bcast_nw_type); + + /* Copy auth bit field */ + nlo_list[i].auth_type.valid = true; + nlo_list[i].auth_type.auth_type = + req->networks[i].auth_bit_field; + WMI_LOGD("Auth bit field (%u)", + nlo_list[i].auth_type.auth_type); + } + + buf_ptr += cmd->no_of_ssids * sizeof(nlo_configured_parameters); + /* Fill the channel list */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill prediction_param */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill epno candidate score params */ + cand_score_params = (enlo_candidate_score_params *) buf_ptr; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_enlo_candidate_score_param, + WMITLV_GET_STRUCT_TLVLEN(enlo_candidate_score_params)); + cand_score_params->min5GHz_rssi = + req->min_5ghz_rssi; + cand_score_params->min24GHz_rssi = + req->min_24ghz_rssi; + cand_score_params->initial_score_max = + req->initial_score_max; + cand_score_params->current_connection_bonus = + req->current_connection_bonus; + cand_score_params->same_network_bonus = + req->same_network_bonus; + cand_score_params->secure_bonus = + req->secure_bonus; + cand_score_params->band5GHz_bonus = + req->band_5ghz_bonus; + buf_ptr += sizeof(enlo_candidate_score_params); + } + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send nlo wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("set ePNO list request sent successfully for vdev %d", + req->vdev_id); + + return ret; +} + +/** + * send_extscan_get_capabilities_cmd_tlv() - extscan get capabilities + * @wmi_handle: wmi handle + * @pgetcapab: get capabilities params + * + * This function send request to fw to get extscan capabilities. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_get_capabilities_cmd_tlv(wmi_unified_t wmi_handle, + struct extscan_capabilities_params *pgetcapab) +{ + wmi_extscan_get_capabilities_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_extscan_get_capabilities_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_get_capabilities_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_get_capabilities_cmd_fixed_param)); + + cmd->request_id = pgetcapab->request_id; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_GET_CAPABILITIES_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_get_cached_results_cmd_tlv() - extscan get cached results + * @wmi_handle: wmi handle + * @pcached_results: cached results parameters + * + * This function send request to fw to get cached results. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_get_cached_results_cmd_tlv(wmi_unified_t wmi_handle, + struct extscan_cached_result_params *pcached_results) +{ + wmi_extscan_get_cached_results_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_extscan_get_cached_results_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_get_cached_results_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_get_cached_results_cmd_fixed_param)); + + cmd->request_id = pcached_results->request_id; + cmd->vdev_id = pcached_results->vdev_id; + cmd->control_flags = pcached_results->flush; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_stop_change_monitor_cmd_tlv() - send stop change monitor cmd + * @wmi_handle: wmi handle + * @reset_req: Reset change request params + * + * This function sends stop change monitor request to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_stop_change_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_capabilities_reset_params *reset_req) +{ + wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + int change_list = 0; + + len = sizeof(*cmd); + + /* reset significant change tlv is set to 0 */ + len += WMI_TLV_HDR_SIZE; + len += change_list * sizeof(wmi_extscan_wlan_change_bssid_param); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param)); + + cmd->request_id = reset_req->request_id; + cmd->vdev_id = reset_req->vdev_id; + cmd->mode = 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + change_list * + sizeof(wmi_extscan_wlan_change_bssid_param)); + buf_ptr += WMI_TLV_HDR_SIZE + (change_list * + sizeof + (wmi_extscan_wlan_change_bssid_param)); + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_buf_extscan_change_monitor_cmd() - fill change monitor request + * @wmi_handle: wmi handle + * @psigchange: change monitor request params + * @buf: wmi buffer + * @buf_len: buffer length + * + * This function fills elements of change monitor request buffer. + * + * Return: QDF status + */ +static QDF_STATUS wmi_get_buf_extscan_change_monitor_cmd + (wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params + *psigchange, wmi_buf_t *buf, int *buf_len) +{ + wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *cmd; + wmi_extscan_wlan_change_bssid_param *dest_chglist; + uint8_t *buf_ptr; + int j; + int len = sizeof(*cmd); + uint32_t numap = psigchange->num_ap; + struct ap_threshold_params *src_ap = psigchange->ap; + + if (!numap || (numap > WMI_WLAN_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS)) { + WMI_LOGE("%s: Invalid number of bssid's", __func__); + return QDF_STATUS_E_INVAL; + } + len += WMI_TLV_HDR_SIZE; + len += numap * sizeof(wmi_extscan_wlan_change_bssid_param); + + *buf = wmi_buf_alloc(wmi_handle, len); + if (!*buf) { + WMI_LOGP("%s: failed to allocate memory for change monitor cmd", + __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *) wmi_buf_data(*buf); + cmd = + (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param)); + + cmd->request_id = psigchange->request_id; + cmd->vdev_id = psigchange->vdev_id; + cmd->total_entries = numap; + cmd->mode = 1; + cmd->num_entries_in_page = numap; + cmd->lost_ap_scan_count = psigchange->lostap_sample_size; + cmd->max_rssi_samples = psigchange->rssi_sample_size; + cmd->rssi_averaging_samples = psigchange->rssi_sample_size; + cmd->max_out_of_range_count = psigchange->min_breaching; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + numap * sizeof(wmi_extscan_wlan_change_bssid_param)); + dest_chglist = (wmi_extscan_wlan_change_bssid_param *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + for (j = 0; j < numap; j++) { + WMITLV_SET_HDR(dest_chglist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_wlan_change_bssid_param)); + + dest_chglist->lower_rssi_limit = src_ap->low; + dest_chglist->upper_rssi_limit = src_ap->high; + WMI_CHAR_ARRAY_TO_MAC_ADDR(src_ap->bssid.bytes, + &dest_chglist->bssid); + + WMI_LOGD("%s: min_rssi %d", __func__, + dest_chglist->lower_rssi_limit); + dest_chglist++; + src_ap++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (numap * sizeof(wmi_extscan_wlan_change_bssid_param)); + *buf_len = len; + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_start_change_monitor_cmd_tlv() - send start change monitor cmd + * @wmi_handle: wmi handle + * @psigchange: change monitor request params + * + * This function sends start change monitor request to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_start_change_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params * + psigchange) +{ + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + wmi_buf_t buf; + int len; + + + qdf_status = wmi_get_buf_extscan_change_monitor_cmd(wmi_handle, + psigchange, &buf, + &len); + if (qdf_status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to get buffer for change monitor cmd", + __func__); + return QDF_STATUS_E_FAILURE; + } + if (!buf) { + WMI_LOGE("%s: Failed to get buffer", __func__); + return QDF_STATUS_E_FAILURE; + } + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_stop_hotlist_monitor_cmd_tlv() - stop hotlist monitor + * @wmi_handle: wmi handle + * @photlist_reset: hotlist reset params + * + * This function configures hotlist monitor to stop in fw. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_stop_hotlist_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_reset_params *photlist_reset) +{ + wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + int hotlist_entries = 0; + + len = sizeof(*cmd); + + /* reset bssid hotlist with tlv set to 0 */ + len += WMI_TLV_HDR_SIZE; + len += hotlist_entries * sizeof(wmi_extscan_hotlist_entry); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_hotlist_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param)); + + cmd->request_id = photlist_reset->request_id; + cmd->vdev_id = photlist_reset->vdev_id; + cmd->mode = 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + hotlist_entries * sizeof(wmi_extscan_hotlist_entry)); + buf_ptr += WMI_TLV_HDR_SIZE + + (hotlist_entries * sizeof(wmi_extscan_hotlist_entry)); + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_stop_extscan_cmd_tlv() - stop extscan command to fw. + * @wmi_handle: wmi handle + * @pstopcmd: stop scan command request params + * + * This function sends stop extscan request to fw. + * + * Return: CDF Status. + */ +static QDF_STATUS send_stop_extscan_cmd_tlv(wmi_unified_t wmi_handle, + struct extscan_stop_req_params *pstopcmd) +{ + wmi_extscan_stop_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_extscan_stop_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_stop_cmd_fixed_param)); + + cmd->request_id = pstopcmd->request_id; + cmd->vdev_id = pstopcmd->vdev_id; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_STOP_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_buf_extscan_start_cmd() - Fill extscan start request + * @wmi_handle: wmi handle + * @pstart: scan command request params + * @buf: event buffer + * @buf_len: length of buffer + * + * This function fills individual elements of extscan request and + * TLV for buckets, channel list. + * + * Return: CDF Status. + */ +static +QDF_STATUS wmi_get_buf_extscan_start_cmd(wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart, + wmi_buf_t *buf, int *buf_len) +{ + wmi_extscan_start_cmd_fixed_param *cmd; + wmi_extscan_bucket *dest_blist; + wmi_extscan_bucket_channel *dest_clist; + struct wifi_scan_bucket_params *src_bucket = pstart->buckets; + struct wifi_scan_channelspec_params *src_channel = src_bucket->channels; + struct wifi_scan_channelspec_params save_channel[WMI_WLAN_EXTSCAN_MAX_CHANNELS]; + + uint8_t *buf_ptr; + int i, k, count = 0; + int len = sizeof(*cmd); + int nbuckets = pstart->num_buckets; + int nchannels = 0; + + /* These TLV's are are NULL by default */ + uint32_t ie_len_with_pad = 0; + int num_ssid = 0; + int num_bssid = 0; + int ie_len = 0; + + uint32_t base_period = pstart->base_period; + + /* TLV placeholder for ssid_list (NULL) */ + len += WMI_TLV_HDR_SIZE; + len += num_ssid * sizeof(wmi_ssid); + + /* TLV placeholder for bssid_list (NULL) */ + len += WMI_TLV_HDR_SIZE; + len += num_bssid * sizeof(wmi_mac_addr); + + /* TLV placeholder for ie_data (NULL) */ + len += WMI_TLV_HDR_SIZE; + len += ie_len * sizeof(uint32_t); + + /* TLV placeholder for bucket */ + len += WMI_TLV_HDR_SIZE; + len += nbuckets * sizeof(wmi_extscan_bucket); + + /* TLV channel placeholder */ + len += WMI_TLV_HDR_SIZE; + for (i = 0; i < nbuckets; i++) { + nchannels += src_bucket->num_channels; + src_bucket++; + } + + WMI_LOGD("%s: Total buckets: %d total #of channels is %d", + __func__, nbuckets, nchannels); + len += nchannels * sizeof(wmi_extscan_bucket_channel); + /* Allocate the memory */ + *buf = wmi_buf_alloc(wmi_handle, len); + if (!*buf) { + WMI_LOGP("%s: failed to allocate memory for start extscan cmd", + __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(*buf); + cmd = (wmi_extscan_start_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_start_cmd_fixed_param)); + + cmd->request_id = pstart->request_id; + cmd->vdev_id = pstart->vdev_id; + cmd->base_period = pstart->base_period; + cmd->num_buckets = nbuckets; + cmd->configuration_flags = 0; + if (pstart->configuration_flags & WMI_EXTSCAN_LP_EXTENDED_BATCHING) + cmd->configuration_flags |= WMI_EXTSCAN_EXTENDED_BATCHING_EN; + wmi_debug("configuration_flags: 0x%x", cmd->configuration_flags); +#ifdef FEATURE_WLAN_EXTSCAN + cmd->min_rest_time = WMI_EXTSCAN_REST_TIME; + cmd->max_rest_time = WMI_EXTSCAN_REST_TIME; + cmd->max_scan_time = WMI_EXTSCAN_MAX_SCAN_TIME; + cmd->burst_duration = WMI_EXTSCAN_BURST_DURATION; +#endif + + /* The max dwell time is retrieved from the first channel + * of the first bucket and kept common for all channels. + */ + cmd->min_dwell_time_active = pstart->min_dwell_time_active; + cmd->max_dwell_time_active = pstart->max_dwell_time_active; + cmd->min_dwell_time_passive = pstart->min_dwell_time_passive; + cmd->max_dwell_time_passive = pstart->max_dwell_time_passive; + cmd->max_bssids_per_scan_cycle = pstart->max_ap_per_scan; + cmd->max_table_usage = pstart->report_threshold_percent; + cmd->report_threshold_num_scans = pstart->report_threshold_num_scans; + + cmd->repeat_probe_time = cmd->max_dwell_time_active / + WMI_SCAN_NPROBES_DEFAULT; + cmd->probe_delay = 0; + cmd->probe_spacing_time = 0; + cmd->idle_time = 0; + cmd->scan_ctrl_flags = WMI_SCAN_ADD_BCAST_PROBE_REQ | + WMI_SCAN_ADD_CCK_RATES | + WMI_SCAN_ADD_OFDM_RATES | + WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ | + WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; + WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, + pstart->extscan_adaptive_dwell_mode); + cmd->scan_priority = WMI_SCAN_PRIORITY_VERY_LOW; + cmd->num_ssids = 0; + cmd->num_bssid = 0; + cmd->ie_len = 0; + cmd->n_probes = (cmd->repeat_probe_time > 0) ? + cmd->max_dwell_time_active / cmd->repeat_probe_time : 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_FIXED_STRUC, + num_ssid * sizeof(wmi_ssid)); + buf_ptr += WMI_TLV_HDR_SIZE + (num_ssid * sizeof(wmi_ssid)); + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_FIXED_STRUC, + num_bssid * sizeof(wmi_mac_addr)); + buf_ptr += WMI_TLV_HDR_SIZE + (num_bssid * sizeof(wmi_mac_addr)); + + ie_len_with_pad = 0; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + ie_len_with_pad); + buf_ptr += WMI_TLV_HDR_SIZE + ie_len_with_pad; + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + nbuckets * sizeof(wmi_extscan_bucket)); + dest_blist = (wmi_extscan_bucket *) + (buf_ptr + WMI_TLV_HDR_SIZE); + src_bucket = pstart->buckets; + + /* Retrieve scanning information from each bucket and + * channels and send it to the target + */ + for (i = 0; i < nbuckets; i++) { + WMITLV_SET_HDR(dest_blist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_extscan_bucket)); + + dest_blist->bucket_id = src_bucket->bucket; + dest_blist->base_period_multiplier = + src_bucket->period / base_period; + dest_blist->min_period = src_bucket->period; + dest_blist->max_period = src_bucket->max_period; + dest_blist->exp_backoff = src_bucket->exponent; + dest_blist->exp_max_step_count = src_bucket->step_count; + dest_blist->channel_band = src_bucket->band; + dest_blist->num_channels = src_bucket->num_channels; + dest_blist->notify_extscan_events = 0; + + if (src_bucket->report_events & + WMI_EXTSCAN_REPORT_EVENTS_EACH_SCAN) + dest_blist->notify_extscan_events = + WMI_EXTSCAN_CYCLE_COMPLETED_EVENT | + WMI_EXTSCAN_CYCLE_STARTED_EVENT; + + if (src_bucket->report_events & + WMI_EXTSCAN_REPORT_EVENTS_FULL_RESULTS) { + dest_blist->forwarding_flags = + WMI_EXTSCAN_FORWARD_FRAME_TO_HOST; + dest_blist->notify_extscan_events |= + WMI_EXTSCAN_BUCKET_COMPLETED_EVENT | + WMI_EXTSCAN_CYCLE_STARTED_EVENT | + WMI_EXTSCAN_CYCLE_COMPLETED_EVENT; + } else { + dest_blist->forwarding_flags = + WMI_EXTSCAN_NO_FORWARDING; + } + + if (src_bucket->report_events & + WMI_EXTSCAN_REPORT_EVENTS_NO_BATCH) + dest_blist->configuration_flags = 0; + else + dest_blist->configuration_flags = + WMI_EXTSCAN_BUCKET_CACHE_RESULTS; + + wmi_debug("ntfy_extscan_events:%u cfg_flags:%u fwd_flags:%u", + dest_blist->notify_extscan_events, + dest_blist->configuration_flags, + dest_blist->forwarding_flags); + + dest_blist->min_dwell_time_active = + src_bucket->min_dwell_time_active; + dest_blist->max_dwell_time_active = + src_bucket->max_dwell_time_active; + dest_blist->min_dwell_time_passive = + src_bucket->min_dwell_time_passive; + dest_blist->max_dwell_time_passive = + src_bucket->max_dwell_time_passive; + src_channel = src_bucket->channels; + + /* save the channel info to later populate + * the channel TLV + */ + for (k = 0; k < src_bucket->num_channels; k++) { + save_channel[count++].channel = src_channel->channel; + src_channel++; + } + dest_blist++; + src_bucket++; + } + buf_ptr += WMI_TLV_HDR_SIZE + (nbuckets * sizeof(wmi_extscan_bucket)); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + nchannels * sizeof(wmi_extscan_bucket_channel)); + dest_clist = (wmi_extscan_bucket_channel *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + /* Active or passive scan is based on the bucket dwell time + * and channel specific active,passive scans are not + * supported yet + */ + for (i = 0; i < nchannels; i++) { + WMITLV_SET_HDR(dest_clist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_channel_event_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_bucket_channel)); + dest_clist->channel = save_channel[i].channel; + dest_clist++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (nchannels * sizeof(wmi_extscan_bucket_channel)); + *buf_len = len; + return QDF_STATUS_SUCCESS; +} + +/** + * send_start_extscan_cmd_tlv() - start extscan command to fw. + * @wmi_handle: wmi handle + * @pstart: scan command request params + * + * This function sends start extscan request to fw. + * + * Return: CDF Status. + */ +static QDF_STATUS send_start_extscan_cmd_tlv(wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart) +{ + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + wmi_buf_t buf; + int len; + + /* Fill individual elements of extscan request and + * TLV for buckets, channel list. + */ + qdf_status = wmi_get_buf_extscan_start_cmd(wmi_handle, + pstart, &buf, &len); + if (qdf_status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to get buffer for ext scan cmd", __func__); + return QDF_STATUS_E_FAILURE; + } + if (!buf) { + WMI_LOGE("%s:Failed to get buffer for current extscan info", + __func__); + return QDF_STATUS_E_FAILURE; + } + if (wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_EXTSCAN_START_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** wmi_get_hotlist_entries_per_page() - hotlist entries per page + * @wmi_handle: wmi handle. + * @cmd: size of command structure. + * @per_entry_size: per entry size. + * + * This utility function calculates how many hotlist entries can + * fit in one page. + * + * Return: number of entries + */ +static inline int wmi_get_hotlist_entries_per_page + (wmi_unified_t wmi_handle, + size_t cmd_size, + size_t per_entry_size) +{ + uint32_t avail_space = 0; + int num_entries = 0; + uint16_t max_msg_len = wmi_get_max_msg_len(wmi_handle); + + /* Calculate number of hotlist entries that can + * be passed in wma message request. + */ + avail_space = max_msg_len - cmd_size; + num_entries = avail_space / per_entry_size; + return num_entries; +} + +/** + * send_extscan_start_hotlist_monitor_cmd_tlv() - start hotlist monitor + * @wmi_handle: wmi handle + * @params: hotlist params + * + * This function configures hotlist monitor to start in fw. + * + * Return: QDF status + */ +static QDF_STATUS send_extscan_start_hotlist_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_set_params *params) +{ + wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *cmd = NULL; + wmi_extscan_hotlist_entry *dest_hotlist; + struct ap_threshold_params *src_ap = params->ap; + wmi_buf_t buf; + uint8_t *buf_ptr; + + int j, index = 0; + int cmd_len = 0; + int num_entries; + int min_entries = 0; + uint32_t numap = params->num_ap; + int len = sizeof(*cmd); + + len += WMI_TLV_HDR_SIZE; + cmd_len = len; + + num_entries = wmi_get_hotlist_entries_per_page(wmi_handle, + cmd_len, + sizeof(*dest_hotlist)); + /* setbssid hotlist expects the bssid list + * to be non zero value + */ + if (!numap || (numap > WMI_WLAN_EXTSCAN_MAX_HOTLIST_APS)) { + WMI_LOGE("Invalid number of APs: %d", numap); + return QDF_STATUS_E_INVAL; + } + + /* Split the hot list entry pages and send multiple command + * requests if the buffer reaches the maximum request size + */ + while (index < numap) { + min_entries = QDF_MIN(num_entries, numap); + len += min_entries * sizeof(wmi_extscan_hotlist_entry); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_hotlist_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param)); + + /* Multiple requests are sent until the num_entries_in_page + * matches the total_entries + */ + cmd->request_id = params->request_id; + cmd->vdev_id = params->vdev_id; + cmd->total_entries = numap; + cmd->mode = 1; + cmd->num_entries_in_page = min_entries; + cmd->lost_ap_scan_count = params->lost_ap_sample_size; + cmd->first_entry_index = index; + + WMI_LOGD("%s: vdev id:%d total_entries: %d num_entries: %d lost_ap_sample_size: %d", + __func__, cmd->vdev_id, cmd->total_entries, + cmd->num_entries_in_page, + cmd->lost_ap_scan_count); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + min_entries * sizeof(wmi_extscan_hotlist_entry)); + dest_hotlist = (wmi_extscan_hotlist_entry *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + /* Populate bssid, channel info and rssi + * for the bssid's that are sent as hotlists. + */ + for (j = 0; j < min_entries; j++) { + WMITLV_SET_HDR(dest_hotlist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_hotlist_entry)); + + dest_hotlist->min_rssi = src_ap->low; + WMI_CHAR_ARRAY_TO_MAC_ADDR(src_ap->bssid.bytes, + &dest_hotlist->bssid); + + WMI_LOGD("%s:channel:%d min_rssi %d", + __func__, dest_hotlist->channel, + dest_hotlist->min_rssi); + WMI_LOGD + ("%s: bssid mac_addr31to0: 0x%x, mac_addr47to32: 0x%x", + __func__, dest_hotlist->bssid.mac_addr31to0, + dest_hotlist->bssid.mac_addr47to32); + dest_hotlist++; + src_ap++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (min_entries * sizeof(wmi_extscan_hotlist_entry)); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + index = index + min_entries; + num_entries = numap - min_entries; + len = cmd_len; + } + return QDF_STATUS_SUCCESS; +} + +void wmi_extscan_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + ops->send_reset_passpoint_network_list_cmd = + send_reset_passpoint_network_list_cmd_tlv; + ops->send_set_passpoint_network_list_cmd = + send_set_passpoint_network_list_cmd_tlv; + ops->send_set_epno_network_list_cmd = + send_set_epno_network_list_cmd_tlv; + ops->send_extscan_get_capabilities_cmd = + send_extscan_get_capabilities_cmd_tlv; + ops->send_extscan_get_cached_results_cmd = + send_extscan_get_cached_results_cmd_tlv; + ops->send_extscan_stop_change_monitor_cmd = + send_extscan_stop_change_monitor_cmd_tlv; + ops->send_extscan_start_change_monitor_cmd = + send_extscan_start_change_monitor_cmd_tlv; + ops->send_extscan_stop_hotlist_monitor_cmd = + send_extscan_stop_hotlist_monitor_cmd_tlv; + ops->send_extscan_start_hotlist_monitor_cmd = + send_extscan_start_hotlist_monitor_cmd_tlv; + ops->send_stop_extscan_cmd = send_stop_extscan_cmd_tlv; + ops->send_start_extscan_cmd = send_start_extscan_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_fwol_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_fwol_api.c new file mode 100644 index 0000000000000000000000000000000000000000..7bca133af9f45c1596305dd3e50e0914d73811be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_fwol_api.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to fw offload component. + */ + +#include "wmi_unified_priv.h" +#include "wlan_fwol_public_structs.h" +#include "wmi_unified_fwol_api.h" + +#ifdef WLAN_FEATURE_ELNA +QDF_STATUS +wmi_unified_send_set_elna_bypass_cmd(struct wmi_unified *wmi_handle, + struct set_elna_bypass_request *req) +{ + if (wmi_handle->ops->send_set_elna_bypass_cmd) + return wmi_handle->ops->send_set_elna_bypass_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_get_elna_bypass_cmd(struct wmi_unified *wmi_handle, + struct get_elna_bypass_request *req) +{ + if (wmi_handle->ops->send_get_elna_bypass_cmd) + return wmi_handle->ops->send_get_elna_bypass_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_get_elna_bypass_resp(struct wmi_unified *wmi_handle, void *resp_buf, + struct get_elna_bypass_response *resp) +{ + if (wmi_handle->ops->extract_get_elna_bypass_resp) + return wmi_handle->ops->extract_get_elna_bypass_resp(wmi_handle, + resp_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_ELNA */ + +#ifdef WLAN_SEND_DSCP_UP_MAP_TO_FW +QDF_STATUS +wmi_unified_send_dscp_tip_map_cmd(struct wmi_unified *wmi_handle, + uint32_t *dscp_to_tid_map) +{ + if (wmi_handle->ops->send_dscp_tid_map_cmd) + return wmi_handle->ops->send_dscp_tid_map_cmd(wmi_handle, + dscp_to_tid_map); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_SEND_DSCP_UP_MAP_TO_FW */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_fwol_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_fwol_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..c9b20f86d0501669fc0c85d3c6999aa1723ed0b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_fwol_tlv.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "osdep.h" +#include "wmi.h" +#include "wmi_unified_priv.h" +#include "wlan_fwol_public_structs.h" +#include "wmi_unified_fwol_api.h" + +#ifdef WLAN_FEATURE_ELNA +/** + * send_set_elna_bypass_cmd_tlv() - send set elna bypass cmd to fw + * @wmi_handle: wmi handle + * @req: set eLNA bypass request + * + * Send WMI_SET_ELNA_BYPASS_CMDID to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS +send_set_elna_bypass_cmd_tlv(wmi_unified_t wmi_handle, + struct set_elna_bypass_request *req) +{ + wmi_buf_t buf; + wmi_set_elna_bypass_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_set_elna_bypass_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_elna_bypass_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_elna_bypass_cmd_fixed_param)); + cmd->vdev_id = req->vdev_id; + cmd->en_dis = req->en_dis; + wmi_mtrace(WMI_SET_ELNA_BYPASS_CMDID, req->vdev_id, req->en_dis); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SET_ELNA_BYPASS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_get_elna_bypass_cmd_tlv() - send get elna bypass cmd to fw + * @wmi_handle: wmi handle + * @req: get eLNA bypass request + * + * Send WMI_GET_ELNA_BYPASS_CMDID to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS +send_get_elna_bypass_cmd_tlv(wmi_unified_t wmi_handle, + struct get_elna_bypass_request *req) +{ + wmi_buf_t buf; + wmi_get_elna_bypass_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_get_elna_bypass_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_get_elna_bypass_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_get_elna_bypass_cmd_fixed_param)); + cmd->vdev_id = req->vdev_id; + wmi_mtrace(WMI_GET_ELNA_BYPASS_CMDID, req->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_GET_ELNA_BYPASS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * extract_get_elna_bypass_resp_tlv() - Extract WMI get eLNA bypass response + * @wmi_handle: wmi handle + * @resp_buf: response buffer + * @resp: get eLNA bypass response + * + * Extract WMI get eLNA bypass response from firmware. + * + * Return: QDF_STATUS + */ +static QDF_STATUS +extract_get_elna_bypass_resp_tlv(struct wmi_unified *wmi_handle, void *resp_buf, + struct get_elna_bypass_response *resp) +{ + WMI_GET_ELNA_BYPASS_EVENTID_param_tlvs *param_buf; + wmi_get_elna_bypass_event_fixed_param *evt; + + param_buf = resp_buf; + evt = param_buf->fixed_param; + if (!evt) { + WMI_LOGE("Invalid get elna bypass event"); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("Get elna bypass %d from vdev %d", evt->en_dis, evt->vdev_id); + + resp->vdev_id = evt->vdev_id; + resp->en_dis = evt->en_dis; + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_ELNA */ + +#ifdef WLAN_FEATURE_ELNA +static void wmi_fwol_attach_elna_tlv(struct wmi_ops *ops) +{ + ops->send_set_elna_bypass_cmd = send_set_elna_bypass_cmd_tlv; + ops->send_get_elna_bypass_cmd = send_get_elna_bypass_cmd_tlv; + ops->extract_get_elna_bypass_resp = extract_get_elna_bypass_resp_tlv; +} +#else +static void wmi_fwol_attach_elna_tlv(struct wmi_ops *ops) +{ +} +#endif /* WLAN_FEATURE_ELNA */ + +#ifdef WLAN_SEND_DSCP_UP_MAP_TO_FW +/** + * send_dscp_tid_map_cmd_tlv() - send dscp to tid map cmd to fw + * @wmi_handle: wmi handle + * @dscp_to_tid_map: array of dscp to tid map values + * + * Send WMI_PDEV_SET_DSCP_TID_MAP_CMDID to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS +send_dscp_tid_map_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t *dscp_to_tid_map) +{ + QDF_STATUS status; + wmi_pdev_set_dscp_tid_map_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_dscp_tid_map_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_dscp_tid_map_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_dscp_tid_map_cmd_fixed_param)); + cmd->reserved0 = WMI_PDEV_ID_SOC; + qdf_mem_copy(&cmd->dscp_to_tid_map, dscp_to_tid_map, + sizeof(uint32_t) * WMI_DSCP_MAP_MAX); + + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_DSCP_TID_MAP_CMDID); + if (status) { + WMI_LOGE("Failed to send dscp_up_map_to_fw %d", status); + wmi_buf_free(buf); + } + + return status; +} + +static void wmi_fwol_attach_dscp_tid_tlv(struct wmi_ops *ops) +{ + ops->send_dscp_tid_map_cmd = send_dscp_tid_map_cmd_tlv; +} +#else +static void wmi_fwol_attach_dscp_tid_tlv(struct wmi_ops *ops) +{ +} +#endif /* WLAN_SEND_DSCP_UP_MAP_TO_FW */ + +void wmi_fwol_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + wmi_fwol_attach_elna_tlv(ops); + wmi_fwol_attach_dscp_tid_tlv(ops); + +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_gpio_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_gpio_api.c new file mode 100644 index 0000000000000000000000000000000000000000..167bc53259f22af99c59c0435ee258e633e8574f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_gpio_api.c @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to gpio component. + */ + +#include +#include + +QDF_STATUS wmi_unified_gpio_config_cmd_send(wmi_unified_t wmi_handle, + struct gpio_config_params *param) +{ + if (wmi_handle->ops->send_gpio_config_cmd) + return wmi_handle->ops->send_gpio_config_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_gpio_output_cmd_send(wmi_unified_t wmi_handle, + struct gpio_output_params *param) +{ + if (wmi_handle->ops->send_gpio_output_cmd) + return wmi_handle->ops->send_gpio_output_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_gpio_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_gpio_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..f4ee5c905c9f11d939d451c3dac957dcbc3d4c26 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_gpio_tlv.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +/** + * convert_gpio_dir() - Function to convert unified gpio direction + * @dir: pointer to enum gpio_direction + * + * Convert the wmi unified gpio direction to FW TLV WMI gpio direction + * + * Return: + * 0 - Output + * 1 - Input + */ +static uint32_t +convert_gpio_direction(enum gpio_direction dir) +{ + switch (dir) { + case WMI_HOST_GPIO_INPUT: + return WMI_FW_GPIO_INPUT; + case WMI_HOST_GPIO_OUTPUT: + return WMI_FW_GPIO_OUTPUT; + default: + return WMI_FW_GPIO_OUTPUT; + } +} + +/** + * convert_gpio_pull_type() - Function to convert unified pull type + * @pull_type: pointer to enum gpio_pull_type + * + * Convert the wmi unified pull type to FW TLV WMI gpio pull type + * + * Return: FW TLV WMI gpio pull type + */ +static uint32_t +convert_gpio_pull_type(enum gpio_pull_type pull_type) +{ + switch (pull_type) { + case WMI_HOST_GPIO_PULL_NONE: + return WMI_GPIO_PULL_NONE; + case WMI_HOST_GPIO_PULL_UP: + return WMI_GPIO_PULL_UP; + case WMI_HOST_GPIO_PULL_DOWN: + return WMI_GPIO_PULL_DOWN; + default: + return WMI_GPIO_PULL_NONE; + } +} + +/** + * convert_gpio_interrupt_mode() - Function to convert unified interrupt mode + * @intr_mode: pointer to enum gpio_interrupt_mode + * + * Convert the wmi unified interrupt mode to FW TLV WMI gpio interrupt mode + * + * Return: FW TLV WMI gpio interrupt mode + */ +static uint32_t +convert_gpio_interrupt_mode(enum gpio_interrupt_mode intr_mode) +{ + switch (intr_mode) { + case WMI_HOST_GPIO_INTMODE_DISABLE: + return WMI_GPIO_INTTYPE_DISABLE; + case WMI_HOST_GPIO_INTMODE_RISING_EDGE: + return WMI_GPIO_INTTYPE_RISING_EDGE; + case WMI_HOST_GPIO_INTMODE_FALLING_EDGE: + return WMI_GPIO_INTTYPE_FALLING_EDGE; + case WMI_HOST_GPIO_INTMODE_BOTH_EDGE: + return WMI_GPIO_INTTYPE_BOTH_EDGE; + case WMI_HOST_GPIO_INTMODE_LEVEL_LOW: + return WMI_GPIO_INTTYPE_LEVEL_LOW; + case WMI_HOST_GPIO_INTMODE_LEVEL_HIGH: + return WMI_GPIO_INTTYPE_LEVEL_HIGH; + default: + return WMI_GPIO_INTTYPE_DISABLE; + } +} + +/** + * convert_gpio_output_value() - Function to convert unified gpio output value + * @value: pointer to enum gpio_value + * + * Convert the wmi unified gpio output value to FW TLV WMI gpio output value + * + * Return: + * 0 - Output low level + * 1 - Output high level + */ +static uint32_t +convert_gpio_output_value(enum gpio_value value) +{ + switch (value) { + case WMI_HOST_GPIO_LEVEL_LOW: + return 0; + case WMI_HOST_GPIO_LEVEL_HIGH: + return 1; + default: + return 0; + } +} + +/** + * send_gpio_config_cmd_tlv() - send gpio config to fw + * @wmi_handle: wmi handle + * @param: pointer to hold gpio config params + * + * Send gpio configuration to firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +static QDF_STATUS +send_gpio_config_cmd_tlv(wmi_unified_t wmi_handle, + struct gpio_config_params *param) +{ + wmi_gpio_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + /* Sanity Checks */ + if (param->pin_pull_type >= WMI_HOST_GPIO_PULL_MAX || + param->pin_intr_mode >= WMI_HOST_GPIO_INTMODE_MAX || + param->pin_dir >= WMI_HOST_GPIO_DIR_MAX) { + return QDF_STATUS_E_FAILURE; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_gpio_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_gpio_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_gpio_config_cmd_fixed_param)); + cmd->gpio_num = param->pin_num; + cmd->input = convert_gpio_direction(param->pin_dir); + cmd->pull_type = convert_gpio_pull_type(param->pin_pull_type); + cmd->intr_mode = convert_gpio_interrupt_mode(param->pin_intr_mode); + cmd->mux_config_val = param->mux_config_val; + cmd->drive = param->drive; + cmd->init_enable = param->init_enable; + + wmi_debug("GPIO num %d, input-dir %d, pull_type %d, intr_mode %d" + " mux_config_val %d drive %d init_enable %d", + cmd->gpio_num, cmd->input, cmd->pull_type, cmd->intr_mode, + cmd->mux_config_val, cmd->drive, cmd->init_enable); + + wmi_mtrace(WMI_GPIO_CONFIG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_GPIO_CONFIG_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Sending GPIO config cmd failed"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_gpio_output_cmd_tlv() - send gpio output to fw + * @wmi_handle: wmi handle + * @param: pointer to hold gpio output param + * + * Send gpio output value to firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +static QDF_STATUS +send_gpio_output_cmd_tlv(wmi_unified_t wmi_handle, + struct gpio_output_params *param) +{ + wmi_gpio_output_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + /* Sanity Checks */ + if (param->pin_set >= WMI_HOST_GPIO_LEVEL_MAX) + return QDF_STATUS_E_FAILURE; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_gpio_output_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_gpio_output_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_gpio_output_cmd_fixed_param)); + cmd->gpio_num = param->pin_num; + cmd->set = convert_gpio_output_value(param->pin_set); + + wmi_debug("GPIO num %d, set %d", cmd->gpio_num, cmd->set); + wmi_mtrace(WMI_GPIO_OUTPUT_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_GPIO_OUTPUT_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Sending GPIO output cmd failed"); + wmi_buf_free(buf); + } + + return ret; +} + +void wmi_gpio_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_gpio_config_cmd = send_gpio_config_cmd_tlv; + ops->send_gpio_output_cmd = send_gpio_output_cmd_tlv; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_interop_issues_ap_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_interop_issues_ap_api.c new file mode 100644 index 0000000000000000000000000000000000000000..f29bf00fb8b72ea182dab2d0006b498aeda1edc4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_interop_issues_ap_api.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to interop issues ap component. + */ + +#include +#include + +QDF_STATUS +wmi_extract_interop_issues_ap_ev_param(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_interop_issues_ap_event *param) +{ + if (wmi_handle->ops->extract_interop_issues_ap_ev_param) + return wmi_handle->ops->extract_interop_issues_ap_ev_param( + wmi_handle, evt_buf, param); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_rap_ps_cmd(wmi_unified_t wmi_handle, + struct wlan_interop_issues_ap_info *rap) +{ + if (wmi_handle->ops->send_set_rap_ps_cmd) + return wmi_handle->ops->send_set_rap_ps_cmd(wmi_handle, rap); + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_interop_issues_ap_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_interop_issues_ap_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..c60ffda4954bca947046e8b6fe7a51e6e26382b4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_interop_issues_ap_tlv.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +/** + * extract_interop_issues_ap_ev_param_tlv() - extract info from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold interop issues ap info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +extract_interop_issues_ap_ev_param_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_interop_issues_ap_event *data) +{ + wmi_pdev_rap_info_event_fixed_param *fixed_param; + WMI_PDEV_RAP_INFO_EVENTID_param_tlvs *param_buf = + (WMI_PDEV_RAP_INFO_EVENTID_param_tlvs *)evt_buf; + + if (!param_buf) { + wmi_err_rl("Invalid param_buf"); + return -EINVAL; + } + + fixed_param = param_buf->fixed_param; + if (!fixed_param) { + wmi_err_rl("Invalid fixed_praram"); + return -EINVAL; + } + + if (fixed_param->type != WMI_ROGUE_AP_ON_STA_PS) { + wmi_err_rl("Invalid type"); + return -EINVAL; + } + + data->pdev_id = fixed_param->pdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_param->bssid, data->rap_addr.bytes); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_rap_ps_cmd_tlv() - set interop issues ap mac address in fw + * @wmi_handle: wmi handle + * @rap: interop issues ap info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_set_rap_ps_cmd_tlv(wmi_unified_t wmi_handle, + struct wlan_interop_issues_ap_info *rap) +{ + wmi_pdev_set_rap_config_fixed_param *cmd; + wmi_pdev_set_rap_config_on_sta_ps_tlv_param *param; + uint8_t *buf_ptr; + wmi_buf_t buf; + uint32_t ret; + uint32_t len, count; + qdf_size_t i; + + count = rap->count; + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + sizeof(*param) * count; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_pdev_set_rap_config_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_rap_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_rap_config_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + + cmd->type = WMI_ROGUE_AP_ON_STA_PS; + if (rap->detect_enable) + cmd->sta_ps_detection_enabled = 1; + else + cmd->sta_ps_detection_enabled = 0; + + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_pdev_set_rap_config_on_sta_ps_tlv_param) * count); + buf_ptr += WMI_TLV_HDR_SIZE; + + for (i = 0; i < count; i++) { + param = (wmi_pdev_set_rap_config_on_sta_ps_tlv_param *)buf_ptr; + WMITLV_SET_HDR(¶m->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_rap_config_on_sta_ps_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_rap_config_on_sta_ps_tlv_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(rap->rap_items[i].bytes, + ¶m->bssid); + buf_ptr += sizeof(*param); + } + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_RAP_CONFIG_CMDID); + if (ret) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_interop_issues_ap_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->extract_interop_issues_ap_ev_param = + extract_interop_issues_ap_ev_param_tlv; + ops->send_set_rap_ps_cmd = send_set_rap_ps_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_nan_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_nan_api.c new file mode 100644 index 0000000000000000000000000000000000000000..91953f8a12311217efb44028f1c97494251f0958 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_nan_api.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to NAN component. + */ + +#include +#include + +QDF_STATUS wmi_unified_nan_req_cmd(wmi_unified_t wmi_handle, + struct nan_msg_params *nan_msg) +{ + if (wmi_handle->ops->send_nan_req_cmd) + return wmi_handle->ops->send_nan_req_cmd(wmi_handle, + nan_msg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_nan_disable_req_cmd(wmi_unified_t wmi_handle, + struct nan_disable_req *nan_msg) +{ + if (wmi_handle->ops->send_nan_disable_req_cmd) + return wmi_handle->ops->send_nan_disable_req_cmd(wmi_handle, + nan_msg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_nan_event_rsp(wmi_unified_t wmi_handle, void *evt_buf, + struct nan_event_params *nan_evt_params, + uint8_t **nan_msg_buf) +{ + if (wmi_handle->ops->extract_nan_event_rsp) + return wmi_handle->ops->extract_nan_event_rsp(wmi_handle, + evt_buf, + nan_evt_params, + nan_msg_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_ndp_initiator_req_cmd_send(wmi_unified_t wmi_handle, + struct nan_datapath_initiator_req *req) +{ + if (wmi_handle->ops->send_ndp_initiator_req_cmd) + return wmi_handle->ops->send_ndp_initiator_req_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_ndp_responder_req_cmd_send(wmi_unified_t wmi_handle, + struct nan_datapath_responder_req *req) +{ + if (wmi_handle->ops->send_ndp_responder_req_cmd) + return wmi_handle->ops->send_ndp_responder_req_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_terminate_all_ndps_req_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id) +{ + if (wmi_handle->ops->send_terminate_all_ndps_req_cmd) + return wmi_handle->ops->send_terminate_all_ndps_req_cmd( + wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ndp_end_req_cmd_send(wmi_unified_t wmi_handle, + struct nan_datapath_end_req *req) +{ + if (wmi_handle->ops->send_ndp_end_req_cmd) + return wmi_handle->ops->send_ndp_end_req_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_nan_msg(wmi_unified_t wmi_handle, + uint8_t *data, + struct nan_dump_msg *msg) +{ + if (wmi_handle->ops->extract_nan_msg) + return wmi_handle->ops->extract_nan_msg(data, msg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_initiator_rsp(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp) +{ + if (wmi_handle->ops->extract_ndp_initiator_rsp) + return wmi_handle->ops->extract_ndp_initiator_rsp(wmi_handle, + data, rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_indication_event *ind) +{ + if (wmi_handle->ops->extract_ndp_ind) + return wmi_handle->ops->extract_ndp_ind(wmi_handle, + data, ind); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_confirm(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_confirm_event *ev) +{ + if (wmi_handle->ops->extract_ndp_confirm) + return wmi_handle->ops->extract_ndp_confirm(wmi_handle, + data, ev); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_responder_rsp(wmi_unified_t wmi_handle, + uint8_t *data, + struct nan_datapath_responder_rsp *rsp) +{ + if (wmi_handle->ops->extract_ndp_responder_rsp) + return wmi_handle->ops->extract_ndp_responder_rsp(wmi_handle, + data, rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_end_rsp(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_rsp_event *rsp) +{ + if (wmi_handle->ops->extract_ndp_end_rsp) + return wmi_handle->ops->extract_ndp_end_rsp(wmi_handle, + data, rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_end_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_indication_event **ind) +{ + if (wmi_handle->ops->extract_ndp_end_ind) + return wmi_handle->ops->extract_ndp_end_ind(wmi_handle, + data, ind); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_sch_update(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_sch_update_event *ind) +{ + if (wmi_handle->ops->extract_ndp_sch_update) + return wmi_handle->ops->extract_ndp_sch_update(wmi_handle, + data, ind); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_host_event(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_host_event *evt) +{ + if (wmi_handle->ops->extract_ndp_host_event) + return wmi_handle->ops->extract_ndp_host_event(wmi_handle, + data, evt); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_nan_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_nan_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..181117688c47487b835dcfecb3db25190cf6b743 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_nan_tlv.c @@ -0,0 +1,1218 @@ + +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +static QDF_STATUS +extract_nan_event_rsp_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct nan_event_params *evt_params, + uint8_t **msg_buf) +{ + WMI_NAN_EVENTID_param_tlvs *event; + wmi_nan_event_hdr *nan_rsp_event_hdr; + nan_msg_header_t *nan_msg_hdr; + wmi_nan_event_info *nan_evt_info; + + /* + * This is how received evt looks like + * + * <-------------------- evt_buf -----------------------------------> + * + * <--wmi_nan_event_hdr--><---WMI_TLV_HDR_SIZE---><----- data --------> + * + * +-----------+---------+-----------------------+--------------------+- + * | tlv_header| data_len| WMITLV_TAG_ARRAY_BYTE | nan_rsp_event_data | + * +-----------+---------+-----------------------+--------------------+- + * + * (Only for NAN Enable Resp) + * <--wmi_nan_event_info--> + * +-----------+-----------+ + * | tlv_header| event_info| + * +-----------+-----------+ + * + */ + + event = (WMI_NAN_EVENTID_param_tlvs *)evt_buf; + nan_rsp_event_hdr = event->fixed_param; + + /* Actual data may include some padding, so data_len <= num_data */ + if (nan_rsp_event_hdr->data_len > event->num_data) { + WMI_LOGE("%s: Provided NAN event length(%d) exceeding actual length(%d)!", + __func__, nan_rsp_event_hdr->data_len, + event->num_data); + return QDF_STATUS_E_INVAL; + } + evt_params->buf_len = nan_rsp_event_hdr->data_len; + *msg_buf = event->data; + + if (nan_rsp_event_hdr->data_len < sizeof(nan_msg_header_t) || + nan_rsp_event_hdr->data_len > (WMI_SVC_MSG_MAX_SIZE - + WMI_TLV_HDR_SIZE)) { + WMI_LOGE("%s: Invalid NAN event data length(%d)!", __func__, + nan_rsp_event_hdr->data_len); + return QDF_STATUS_E_INVAL; + } + nan_msg_hdr = (nan_msg_header_t *)event->data; + + if (!wmi_service_enabled(wmi_handle, wmi_service_nan_dbs_support)) { + evt_params->evt_type = nan_event_id_generic_rsp; + return QDF_STATUS_SUCCESS; + } + + switch (nan_msg_hdr->msg_id) { + case NAN_MSG_ID_ENABLE_RSP: + nan_evt_info = event->event_info; + if (!nan_evt_info) { + WMI_LOGE(FL("Fail: NAN enable rsp event info Null")); + return QDF_STATUS_E_INVAL; + } + evt_params->evt_type = nan_event_id_enable_rsp; + evt_params->mac_id = nan_evt_info->mac_id; + evt_params->is_nan_enable_success = (nan_evt_info->status == 0); + evt_params->vdev_id = nan_evt_info->vdev_id; + break; + case NAN_MSG_ID_DISABLE_IND: + evt_params->evt_type = nan_event_id_disable_ind; + break; + case NAN_MSG_ID_ERROR_RSP: + evt_params->evt_type = nan_event_id_error_rsp; + break; + default: + evt_params->evt_type = nan_event_id_generic_rsp; + break; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_nan_disable_req_cmd_tlv() - to send nan disable request to target + * @wmi_handle: wmi handle + * @nan_msg: request data which will be non-null + * + * Return: CDF status + */ +static QDF_STATUS send_nan_disable_req_cmd_tlv(wmi_unified_t wmi_handle, + struct nan_disable_req *nan_msg) +{ + QDF_STATUS ret; + wmi_nan_cmd_param *cmd; + wmi_nan_host_config_param *cfg; + wmi_buf_t buf; + /* Initialize with minimum length required, which is Scenario 2*/ + uint16_t len = sizeof(*cmd) + sizeof(*cfg) + 2 * WMI_TLV_HDR_SIZE; + uint16_t nan_data_len, nan_data_len_aligned = 0; + uint8_t *buf_ptr; + + /* + * Scenario 1: NAN Disable with NAN msg data from upper layers + * + * <-----nan cmd param-----><-- WMI_TLV_HDR_SIZE --><--- data ----> + * +------------+----------+-----------------------+--------------+ + * | tlv_header | data_len | WMITLV_TAG_ARRAY_BYTE | nan_msg_data | + * +------------+----------+-----------------------+--------------+ + * + * <-- WMI_TLV_HDR_SIZE --><------nan host config params-----> + * -+-----------------------+---------------------------------+ + * | WMITLV_TAG_ARRAY_STRUC| tlv_header | 2g/5g disable flags| + * -+-----------------------+---------------------------------+ + * + * Scenario 2: NAN Disable without any NAN msg data from upper layers + * + * <------nan cmd param------><--WMI_TLV_HDR_SIZE--><--WMI_TLV_HDR_SI + * +------------+------------+----------------------+---------------- + * | tlv_header | data_len=0 | WMITLV_TAG_ARRAY_BYTE| WMITLV_TAG_ARRA + * +------------+------------+----------------------+---------------- + * + * ZE----><------nan host config params-----> + * -------+---------------------------------+ + * Y_STRUC| tlv_header | 2g/5g disable flags| + * -------+---------------------------------+ + */ + + if (!nan_msg) { + WMI_LOGE("%s:nan req is not valid", __func__); + return QDF_STATUS_E_FAILURE; + } + + nan_data_len = nan_msg->params.request_data_len; + + if (nan_data_len) { + nan_data_len_aligned = roundup(nan_data_len, sizeof(uint32_t)); + if (nan_data_len_aligned < nan_data_len) { + WMI_LOGE("%s: Int overflow while rounding up data_len", + __func__); + return QDF_STATUS_E_FAILURE; + } + + if (nan_data_len_aligned > WMI_SVC_MSG_MAX_SIZE + - WMI_TLV_HDR_SIZE) { + WMI_LOGE("%s: nan_data_len exceeding wmi_max_msg_size", + __func__); + return QDF_STATUS_E_FAILURE; + } + + len += nan_data_len_aligned; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_nan_cmd_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nan_cmd_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_nan_cmd_param)); + + cmd->data_len = nan_data_len; + WMI_LOGD("%s: nan data len value is %u", __func__, nan_data_len); + buf_ptr += sizeof(wmi_nan_cmd_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, nan_data_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + + if (nan_data_len) { + qdf_mem_copy(buf_ptr, nan_msg->params.request_data, + cmd->data_len); + buf_ptr += nan_data_len_aligned; + } + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_nan_host_config_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + + cfg = (wmi_nan_host_config_param *)buf_ptr; + WMITLV_SET_HDR(&cfg->tlv_header, + WMITLV_TAG_STRUC_wmi_nan_host_config_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_nan_host_config_param)); + cfg->nan_2g_disc_disable = nan_msg->disable_2g_discovery; + cfg->nan_5g_disc_disable = nan_msg->disable_5g_discovery; + + wmi_mtrace(WMI_NAN_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NAN_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s Failed to send set param command ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_nan_req_cmd_tlv() - to send nan request to target + * @wmi_handle: wmi handle + * @nan_msg: request data which will be non-null + * + * Return: CDF status + */ +static QDF_STATUS send_nan_req_cmd_tlv(wmi_unified_t wmi_handle, + struct nan_msg_params *nan_msg) +{ + QDF_STATUS ret; + wmi_nan_cmd_param *cmd; + wmi_buf_t buf; + wmi_nan_host_config_param *cfg; + uint16_t len = sizeof(*cmd) + sizeof(*cfg) + 2 * WMI_TLV_HDR_SIZE; + uint16_t nan_data_len, nan_data_len_aligned; + uint8_t *buf_ptr; + + /* + * <----- cmd ------------><-- WMI_TLV_HDR_SIZE --><--- data ----> + * +------------+----------+-----------------------+--------------+ + * | tlv_header | data_len | WMITLV_TAG_ARRAY_BYTE | nan_msg_data | + * +------------+----------+-----------------------+--------------+ + * + * <-- WMI_TLV_HDR_SIZE --><------nan host config params--------> + * +-----------------------+------------------------------------+ + * | WMITLV_TAG_ARRAY_STRUC| tlv_header | disable flags | flags | + * +-----------------------+------------------------------------+ + */ + if (!nan_msg) { + WMI_LOGE("%s:nan req is not valid", __func__); + return QDF_STATUS_E_FAILURE; + } + nan_data_len = nan_msg->request_data_len; + nan_data_len_aligned = roundup(nan_msg->request_data_len, + sizeof(uint32_t)); + if (nan_data_len_aligned < nan_msg->request_data_len) { + WMI_LOGE("%s: integer overflow while rounding up data_len", + __func__); + return QDF_STATUS_E_FAILURE; + } + + if (nan_data_len_aligned > WMI_SVC_MSG_MAX_SIZE - WMI_TLV_HDR_SIZE) { + WMI_LOGE("%s: wmi_max_msg_size overflow for given datalen", + __func__); + return QDF_STATUS_E_FAILURE; + } + + len += nan_data_len_aligned; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_nan_cmd_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nan_cmd_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_nan_cmd_param)); + cmd->data_len = nan_msg->request_data_len; + buf_ptr += sizeof(wmi_nan_cmd_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, nan_data_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, nan_msg->request_data, cmd->data_len); + buf_ptr += nan_data_len_aligned; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_nan_host_config_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + + cfg = (wmi_nan_host_config_param *)buf_ptr; + WMITLV_SET_HDR(&cfg->tlv_header, + WMITLV_TAG_STRUC_wmi_nan_host_config_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_nan_host_config_param)); + + WMI_NAN_SET_RANGING_INITIATOR_ROLE(cfg->flags, !!(nan_msg->rtt_cap & + WMI_FW_NAN_RTT_INITR)); + WMI_NAN_SET_RANGING_RESPONDER_ROLE(cfg->flags, !!(nan_msg->rtt_cap & + WMI_FW_NAN_RTT_RESPR)); + + wmi_mtrace(WMI_NAN_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_NAN_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s Failed to send NAN req command ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_terminate_all_ndps_cmd_tlv() - send NDP Terminate for all NDP's + * associated with the given vdev id + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF status + */ +static QDF_STATUS send_terminate_all_ndps_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id) +{ + wmi_ndp_cmd_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + QDF_STATUS status; + + WMI_LOGD(FL("Enter")); + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_ndp_cmd_param *)wmi_buf_data(wmi_buf); + + WMITLV_SET_HDR(&cmd->tlv_header, WMITLV_TAG_STRUC_wmi_ndp_cmd_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ndp_cmd_param)); + + cmd->vdev_id = vdev_id; + cmd->ndp_disable = 1; + + wmi_mtrace(WMI_NDP_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, wmi_buf, len, WMI_NDP_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send NDP Terminate cmd: %d", status); + wmi_buf_free(wmi_buf); + } + + return status; +} + +static QDF_STATUS nan_ndp_initiator_req_tlv(wmi_unified_t wmi_handle, + struct nan_datapath_initiator_req *ndp_req) +{ + uint16_t len; + wmi_buf_t buf; + uint8_t *tlv_ptr; + QDF_STATUS status; + wmi_channel *ch_tlv; + wmi_ndp_initiator_req_fixed_param *cmd; + uint32_t passphrase_len, service_name_len; + uint32_t ndp_cfg_len, ndp_app_info_len, pmk_len; + wmi_ndp_transport_ip_param *tcp_ip_param; + + /* + * WMI command expects 4 byte alligned len: + * round up ndp_cfg_len and ndp_app_info_len to 4 bytes + */ + ndp_cfg_len = qdf_roundup(ndp_req->ndp_config.ndp_cfg_len, 4); + ndp_app_info_len = qdf_roundup(ndp_req->ndp_info.ndp_app_info_len, 4); + pmk_len = qdf_roundup(ndp_req->pmk.pmk_len, 4); + passphrase_len = qdf_roundup(ndp_req->passphrase.passphrase_len, 4); + service_name_len = + qdf_roundup(ndp_req->service_name.service_name_len, 4); + /* allocated memory for fixed params as well as variable size data */ + len = sizeof(*cmd) + sizeof(*ch_tlv) + (5 * WMI_TLV_HDR_SIZE) + + ndp_cfg_len + ndp_app_info_len + pmk_len + + passphrase_len + service_name_len; + + if (ndp_req->is_ipv6_addr_present) + len += sizeof(*tcp_ip_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_ndp_initiator_req_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ndp_initiator_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_initiator_req_fixed_param)); + cmd->vdev_id = wlan_vdev_get_id(ndp_req->vdev); + cmd->transaction_id = ndp_req->transaction_id; + cmd->service_instance_id = ndp_req->service_instance_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(ndp_req->peer_discovery_mac_addr.bytes, + &cmd->peer_discovery_mac_addr); + + cmd->ndp_cfg_len = ndp_req->ndp_config.ndp_cfg_len; + cmd->ndp_app_info_len = ndp_req->ndp_info.ndp_app_info_len; + cmd->ndp_channel_cfg = ndp_req->channel_cfg; + cmd->nan_pmk_len = ndp_req->pmk.pmk_len; + cmd->nan_csid = ndp_req->ncs_sk_type; + cmd->nan_passphrase_len = ndp_req->passphrase.passphrase_len; + cmd->nan_servicename_len = ndp_req->service_name.service_name_len; + + ch_tlv = (wmi_channel *)&cmd[1]; + WMITLV_SET_HDR(ch_tlv, WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + ch_tlv->mhz = ndp_req->channel; + tlv_ptr = (uint8_t *)&ch_tlv[1]; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_cfg_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + ndp_req->ndp_config.ndp_cfg, cmd->ndp_cfg_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_cfg_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_app_info_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + ndp_req->ndp_info.ndp_app_info, cmd->ndp_app_info_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_app_info_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, pmk_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], ndp_req->pmk.pmk, + cmd->nan_pmk_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + pmk_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, passphrase_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], ndp_req->passphrase.passphrase, + cmd->nan_passphrase_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + passphrase_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, service_name_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + ndp_req->service_name.service_name, + cmd->nan_servicename_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + service_name_len; + + if (ndp_req->is_ipv6_addr_present) { + tcp_ip_param = (wmi_ndp_transport_ip_param *)tlv_ptr; + WMITLV_SET_HDR(tcp_ip_param, + WMITLV_TAG_STRUC_wmi_ndp_transport_ip_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_transport_ip_param)); + tcp_ip_param->ipv6_addr_present = true; + qdf_mem_copy(tcp_ip_param->ipv6_intf_addr, + ndp_req->ipv6_addr, WMI_NDP_IPV6_INTF_ADDR_LEN); + } + WMI_LOGD("IPv6 addr present: %d, addr: %pI6", + ndp_req->is_ipv6_addr_present, ndp_req->ipv6_addr); + + WMI_LOGD("vdev_id = %d, transaction_id: %d, service_instance_id: %d, ch: %d, ch_cfg: %d, csid: %d peer mac addr: mac_addr31to0: 0x%x, mac_addr47to32: 0x%x", + cmd->vdev_id, cmd->transaction_id, cmd->service_instance_id, + ch_tlv->mhz, cmd->ndp_channel_cfg, cmd->nan_csid, + cmd->peer_discovery_mac_addr.mac_addr31to0, + cmd->peer_discovery_mac_addr.mac_addr47to32); + + WMI_LOGD("ndp_config len: %d ndp_app_info len: %d pmk len: %d pass phrase len: %d service name len: %d", + cmd->ndp_cfg_len, cmd->ndp_app_info_len, cmd->nan_pmk_len, + cmd->nan_passphrase_len, cmd->nan_servicename_len); + + wmi_mtrace(WMI_NDP_INITIATOR_REQ_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NDP_INITIATOR_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NDP_INITIATOR_REQ_CMDID failed, ret: %d", status); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS nan_ndp_responder_req_tlv(wmi_unified_t wmi_handle, + struct nan_datapath_responder_req *req) +{ + uint16_t len; + wmi_buf_t buf; + uint8_t *tlv_ptr; + QDF_STATUS status; + wmi_ndp_responder_req_fixed_param *cmd; + wmi_ndp_transport_ip_param *tcp_ip_param; + uint32_t passphrase_len, service_name_len; + uint32_t vdev_id = 0, ndp_cfg_len, ndp_app_info_len, pmk_len; + + vdev_id = wlan_vdev_get_id(req->vdev); + WMI_LOGD("vdev_id: %d, transaction_id: %d, ndp_rsp %d, ndp_instance_id: %d, ndp_app_info_len: %d", + vdev_id, req->transaction_id, + req->ndp_rsp, + req->ndp_instance_id, + req->ndp_info.ndp_app_info_len); + + /* + * WMI command expects 4 byte alligned len: + * round up ndp_cfg_len and ndp_app_info_len to 4 bytes + */ + ndp_cfg_len = qdf_roundup(req->ndp_config.ndp_cfg_len, 4); + ndp_app_info_len = qdf_roundup(req->ndp_info.ndp_app_info_len, 4); + pmk_len = qdf_roundup(req->pmk.pmk_len, 4); + passphrase_len = qdf_roundup(req->passphrase.passphrase_len, 4); + service_name_len = + qdf_roundup(req->service_name.service_name_len, 4); + + /* allocated memory for fixed params as well as variable size data */ + len = sizeof(*cmd) + 5*WMI_TLV_HDR_SIZE + ndp_cfg_len + ndp_app_info_len + + pmk_len + passphrase_len + service_name_len; + + if (req->is_ipv6_addr_present || req->is_port_present || + req->is_protocol_present) + len += sizeof(*tcp_ip_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_ndp_responder_req_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ndp_responder_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_responder_req_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->transaction_id = req->transaction_id; + cmd->ndp_instance_id = req->ndp_instance_id; + cmd->rsp_code = req->ndp_rsp; + cmd->ndp_cfg_len = req->ndp_config.ndp_cfg_len; + cmd->ndp_app_info_len = req->ndp_info.ndp_app_info_len; + cmd->nan_pmk_len = req->pmk.pmk_len; + cmd->nan_csid = req->ncs_sk_type; + cmd->nan_passphrase_len = req->passphrase.passphrase_len; + cmd->nan_servicename_len = req->service_name.service_name_len; + + tlv_ptr = (uint8_t *)&cmd[1]; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_cfg_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->ndp_config.ndp_cfg, cmd->ndp_cfg_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_cfg_len; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_app_info_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->ndp_info.ndp_app_info, + req->ndp_info.ndp_app_info_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_app_info_len; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, pmk_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], req->pmk.pmk, + cmd->nan_pmk_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + pmk_len; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, passphrase_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->passphrase.passphrase, + cmd->nan_passphrase_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + passphrase_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, service_name_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->service_name.service_name, + cmd->nan_servicename_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + service_name_len; + + if (req->is_ipv6_addr_present || req->is_port_present || + req->is_protocol_present) { + tcp_ip_param = (wmi_ndp_transport_ip_param *)tlv_ptr; + WMITLV_SET_HDR(tcp_ip_param, + WMITLV_TAG_STRUC_wmi_ndp_transport_ip_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_transport_ip_param)); + tcp_ip_param->ipv6_addr_present = req->is_ipv6_addr_present; + qdf_mem_copy(tcp_ip_param->ipv6_intf_addr, + req->ipv6_addr, WMI_NDP_IPV6_INTF_ADDR_LEN); + + tcp_ip_param->trans_port_present = req->is_port_present; + tcp_ip_param->transport_port = req->port; + + tcp_ip_param->trans_proto_present = req->is_protocol_present; + tcp_ip_param->transport_protocol = req->protocol; + } + + WMI_LOGD("ndp_config len: %d ndp_app_info len: %d pmk len: %d pass phrase len: %d service name len: %d", + req->ndp_config.ndp_cfg_len, req->ndp_info.ndp_app_info_len, + cmd->nan_pmk_len, cmd->nan_passphrase_len, + cmd->nan_servicename_len); + + wmi_mtrace(WMI_NDP_RESPONDER_REQ_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NDP_RESPONDER_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NDP_RESPONDER_REQ_CMDID failed, ret: %d", status); + wmi_buf_free(buf); + } + return status; +} + +static QDF_STATUS nan_ndp_end_req_tlv(wmi_unified_t wmi_handle, + struct nan_datapath_end_req *req) +{ + uint16_t len; + wmi_buf_t buf; + QDF_STATUS status; + uint32_t ndp_end_req_len, i; + wmi_ndp_end_req *ndp_end_req_lst; + wmi_ndp_end_req_fixed_param *cmd; + + /* len of tlv following fixed param */ + ndp_end_req_len = sizeof(wmi_ndp_end_req) * req->num_ndp_instances; + /* above comes out to 4 byte alligned already, no need of padding */ + len = sizeof(*cmd) + ndp_end_req_len + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_ndp_end_req_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ndp_end_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ndp_end_req_fixed_param)); + + cmd->transaction_id = req->transaction_id; + + /* set tlv pointer to end of fixed param */ + WMITLV_SET_HDR((uint8_t *)&cmd[1], WMITLV_TAG_ARRAY_STRUC, + ndp_end_req_len); + + ndp_end_req_lst = (wmi_ndp_end_req *)((uint8_t *)&cmd[1] + + WMI_TLV_HDR_SIZE); + for (i = 0; i < req->num_ndp_instances; i++) { + WMITLV_SET_HDR(&ndp_end_req_lst[i], + WMITLV_TAG_ARRAY_FIXED_STRUC, + (sizeof(*ndp_end_req_lst) - WMI_TLV_HDR_SIZE)); + + ndp_end_req_lst[i].ndp_instance_id = req->ndp_ids[i]; + } + + wmi_mtrace(WMI_NDP_END_REQ_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NDP_END_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NDP_END_REQ_CMDID failed, ret: %d", status); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS +extract_ndp_host_event_tlv(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_host_event *evt) +{ + WMI_NDP_EVENTID_param_tlvs *event; + wmi_ndp_event_param *fixed_params; + + event = (WMI_NDP_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + evt->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!evt->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + + evt->ndp_termination_in_progress = + fixed_params->ndp_termination_in_progress ? true : false; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_initiator_rsp_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp) +{ + WMI_NDP_INITIATOR_RSP_EVENTID_param_tlvs *event; + wmi_ndp_initiator_rsp_event_fixed_param *fixed_params; + + event = (WMI_NDP_INITIATOR_RSP_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + + rsp->transaction_id = fixed_params->transaction_id; + rsp->ndp_instance_id = fixed_params->ndp_instance_id; + rsp->status = fixed_params->rsp_status; + rsp->reason = fixed_params->reason_code; + + return QDF_STATUS_SUCCESS; +} + +#define MAX_NAN_MSG_LEN 200 + +static QDF_STATUS extract_nan_msg_tlv(uint8_t *data, + struct nan_dump_msg *msg) +{ + WMI_NAN_DMESG_EVENTID_param_tlvs *event; + wmi_nan_dmesg_event_fixed_param *fixed_params; + + event = (WMI_NAN_DMESG_EVENTID_param_tlvs *)data; + fixed_params = (wmi_nan_dmesg_event_fixed_param *)event->fixed_param; + if (!fixed_params->msg_len || + fixed_params->msg_len > MAX_NAN_MSG_LEN || + fixed_params->msg_len > event->num_msg) + return QDF_STATUS_E_FAILURE; + + msg->data_len = fixed_params->msg_len; + msg->msg = event->msg; + + msg->msg[fixed_params->msg_len - 1] = (uint8_t)'\0'; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_ind_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_indication_event *rsp) +{ + WMI_NDP_INDICATION_EVENTID_param_tlvs *event; + wmi_ndp_indication_event_fixed_param *fixed_params; + size_t total_array_len; + + event = (WMI_NDP_INDICATION_EVENTID_param_tlvs *)data; + fixed_params = + (wmi_ndp_indication_event_fixed_param *)event->fixed_param; + + if (fixed_params->ndp_cfg_len > event->num_ndp_cfg) { + WMI_LOGE("FW message ndp cfg length %d larger than TLV hdr %d", + fixed_params->ndp_cfg_len, event->num_ndp_cfg); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->ndp_app_info_len > event->num_ndp_app_info) { + WMI_LOGE("FW message ndp app info length %d more than TLV hdr %d", + fixed_params->ndp_app_info_len, + event->num_ndp_app_info); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->nan_scid_len > event->num_ndp_scid) { + WMI_LOGE("FW msg ndp scid info len %d more than TLV hdr %d", + fixed_params->nan_scid_len, + event->num_ndp_scid); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->ndp_cfg_len > + (WMI_SVC_MSG_MAX_SIZE - sizeof(*fixed_params))) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_cfg_len); + return QDF_STATUS_E_INVAL; + } + + total_array_len = fixed_params->ndp_cfg_len + + sizeof(*fixed_params); + + if (fixed_params->ndp_app_info_len > + (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_app_info_len); + return QDF_STATUS_E_INVAL; + } + total_array_len += fixed_params->ndp_app_info_len; + + if (fixed_params->nan_scid_len > + (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->nan_scid_len); + return QDF_STATUS_E_INVAL; + } + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->service_instance_id = fixed_params->service_instance_id; + rsp->ndp_instance_id = fixed_params->ndp_instance_id; + rsp->role = fixed_params->self_ndp_role; + rsp->policy = fixed_params->accept_policy; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr, + rsp->peer_mac_addr.bytes); + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_discovery_mac_addr, + rsp->peer_discovery_mac_addr.bytes); + + WMI_LOGD("WMI_NDP_INDICATION_EVENTID(0x%X) received. vdev %d service_instance %d, ndp_instance %d, role %d, policy %d csid: %d, scid_len: %d, peer_addr: "QDF_MAC_ADDR_FMT", peer_disc_addr: "QDF_MAC_ADDR_FMT" ndp_cfg - %d bytes ndp_app_info - %d bytes", + WMI_NDP_INDICATION_EVENTID, fixed_params->vdev_id, + fixed_params->service_instance_id, + fixed_params->ndp_instance_id, fixed_params->self_ndp_role, + fixed_params->accept_policy, fixed_params->nan_csid, + fixed_params->nan_scid_len, + QDF_MAC_ADDR_REF(rsp->peer_mac_addr.bytes), + QDF_MAC_ADDR_REF(rsp->peer_discovery_mac_addr.bytes), + fixed_params->ndp_cfg_len, + fixed_params->ndp_app_info_len); + + rsp->ncs_sk_type = fixed_params->nan_csid; + if (event->ndp_cfg) { + rsp->ndp_config.ndp_cfg_len = fixed_params->ndp_cfg_len; + if (rsp->ndp_config.ndp_cfg_len > NDP_QOS_INFO_LEN) + rsp->ndp_config.ndp_cfg_len = NDP_QOS_INFO_LEN; + qdf_mem_copy(rsp->ndp_config.ndp_cfg, event->ndp_cfg, + rsp->ndp_config.ndp_cfg_len); + } + + if (event->ndp_app_info) { + rsp->ndp_info.ndp_app_info_len = fixed_params->ndp_app_info_len; + if (rsp->ndp_info.ndp_app_info_len > NDP_APP_INFO_LEN) + rsp->ndp_info.ndp_app_info_len = NDP_APP_INFO_LEN; + qdf_mem_copy(rsp->ndp_info.ndp_app_info, event->ndp_app_info, + rsp->ndp_info.ndp_app_info_len); + } + + if (event->ndp_scid) { + rsp->scid.scid_len = fixed_params->nan_scid_len; + if (rsp->scid.scid_len > NDP_SCID_BUF_LEN) + rsp->scid.scid_len = NDP_SCID_BUF_LEN; + qdf_mem_copy(rsp->scid.scid, event->ndp_scid, + rsp->scid.scid_len); + } + + if (event->ndp_transport_ip_param && + event->num_ndp_transport_ip_param) { + if (event->ndp_transport_ip_param->ipv6_addr_present) { + rsp->is_ipv6_addr_present = true; + qdf_mem_copy(rsp->ipv6_addr, + event->ndp_transport_ip_param->ipv6_intf_addr, + WMI_NDP_IPV6_INTF_ADDR_LEN); + } + } + WMI_LOGD(FL("IPv6 addr present: %d, addr: %pI6"), + rsp->is_ipv6_addr_present, rsp->ipv6_addr); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_confirm_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_confirm_event *rsp) +{ + uint8_t i; + WMI_HOST_WLAN_PHY_MODE ch_mode; + WMI_NDP_CONFIRM_EVENTID_param_tlvs *event; + wmi_ndp_confirm_event_fixed_param *fixed_params; + size_t total_array_len; + bool ndi_dbs = wmi_service_enabled(wmi_handle, + wmi_service_ndi_dbs_support); + + event = (WMI_NDP_CONFIRM_EVENTID_param_tlvs *) data; + fixed_params = (wmi_ndp_confirm_event_fixed_param *)event->fixed_param; + WMI_LOGD("WMI_NDP_CONFIRM_EVENTID(0x%X) received. vdev %d, ndp_instance %d, rsp_code %d, reason_code: %d, num_active_ndps_on_peer: %d num_ch: %d", + WMI_NDP_CONFIRM_EVENTID, fixed_params->vdev_id, + fixed_params->ndp_instance_id, fixed_params->rsp_code, + fixed_params->reason_code, + fixed_params->num_active_ndps_on_peer, + fixed_params->num_ndp_channels); + + if (fixed_params->ndp_cfg_len > event->num_ndp_cfg) { + WMI_LOGE("FW message ndp cfg length %d larger than TLV hdr %d", + fixed_params->ndp_cfg_len, event->num_ndp_cfg); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->ndp_app_info_len > event->num_ndp_app_info) { + WMI_LOGE("FW message ndp app info length %d more than TLV hdr %d", + fixed_params->ndp_app_info_len, + event->num_ndp_app_info); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("ndp_cfg - %d bytes, ndp_app_info - %d bytes", + fixed_params->ndp_cfg_len, fixed_params->ndp_app_info_len); + + if (fixed_params->ndp_cfg_len > + (WMI_SVC_MSG_MAX_SIZE - sizeof(*fixed_params))) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_cfg_len); + return QDF_STATUS_E_INVAL; + } + + total_array_len = fixed_params->ndp_cfg_len + + sizeof(*fixed_params); + + if (fixed_params->ndp_app_info_len > + (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_app_info_len); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->num_ndp_channels > event->num_ndp_channel_list || + fixed_params->num_ndp_channels > event->num_nss_list) { + WMI_LOGE(FL("NDP Ch count %d greater than NDP Ch TLV len(%d) or NSS TLV len(%d)"), + fixed_params->num_ndp_channels, + event->num_ndp_channel_list, + event->num_nss_list); + return QDF_STATUS_E_INVAL; + } + + if (ndi_dbs && + fixed_params->num_ndp_channels > event->num_ndp_channel_info) { + WMI_LOGE(FL("NDP Ch count %d greater than NDP Ch info(%d)"), + fixed_params->num_ndp_channels, + event->num_ndp_channel_info); + return QDF_STATUS_E_INVAL; + } + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->ndp_instance_id = fixed_params->ndp_instance_id; + rsp->rsp_code = fixed_params->rsp_code; + rsp->reason_code = fixed_params->reason_code; + rsp->num_active_ndps_on_peer = fixed_params->num_active_ndps_on_peer; + rsp->num_channels = fixed_params->num_ndp_channels; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr, + rsp->peer_ndi_mac_addr.bytes); + rsp->ndp_info.ndp_app_info_len = fixed_params->ndp_app_info_len; + + if (rsp->ndp_info.ndp_app_info_len > NDP_APP_INFO_LEN) + rsp->ndp_info.ndp_app_info_len = NDP_APP_INFO_LEN; + + qdf_mem_copy(rsp->ndp_info.ndp_app_info, event->ndp_app_info, + rsp->ndp_info.ndp_app_info_len); + + if (rsp->num_channels > NAN_CH_INFO_MAX_CHANNELS) { + WMI_LOGE(FL("too many channels")); + rsp->num_channels = NAN_CH_INFO_MAX_CHANNELS; + } + + for (i = 0; i < rsp->num_channels; i++) { + rsp->ch[i].freq = event->ndp_channel_list[i].mhz; + rsp->ch[i].nss = event->nss_list[i]; + ch_mode = WMI_GET_CHANNEL_MODE(&event->ndp_channel_list[i]); + rsp->ch[i].ch_width = wmi_get_ch_width_from_phy_mode(wmi_handle, + ch_mode); + if (ndi_dbs) { + rsp->ch[i].mac_id = event->ndp_channel_info[i].mac_id; + WMI_LOGD("Freq: %d, ch_mode: %d, nss: %d mac_id: %d", + rsp->ch[i].freq, rsp->ch[i].ch_width, + rsp->ch[i].nss, rsp->ch[i].mac_id); + } else { + WMI_LOGD("Freq: %d, ch_mode: %d, nss: %d", + rsp->ch[i].freq, rsp->ch[i].ch_width, + rsp->ch[i].nss); + } + } + + if (event->ndp_transport_ip_param && + event->num_ndp_transport_ip_param) { + if (event->ndp_transport_ip_param->ipv6_addr_present) { + rsp->is_ipv6_addr_present = true; + qdf_mem_copy(rsp->ipv6_addr, + event->ndp_transport_ip_param->ipv6_intf_addr, + WMI_NDP_IPV6_INTF_ADDR_LEN); + } + + if (event->ndp_transport_ip_param->trans_port_present) { + rsp->is_port_present = true; + rsp->port = + event->ndp_transport_ip_param->transport_port; + } + + if (event->ndp_transport_ip_param->trans_proto_present) { + rsp->is_protocol_present = true; + rsp->protocol = + event->ndp_transport_ip_param->transport_protocol; + } + } + WMI_LOGD("IPv6 addr present: %d, addr: %pI6 port: %d present: %d protocol: %d present: %d", + rsp->is_ipv6_addr_present, rsp->ipv6_addr, rsp->port, + rsp->is_port_present, rsp->protocol, rsp->is_protocol_present); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_responder_rsp_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_responder_rsp *rsp) +{ + WMI_NDP_RESPONDER_RSP_EVENTID_param_tlvs *event; + wmi_ndp_responder_rsp_event_fixed_param *fixed_params; + + event = (WMI_NDP_RESPONDER_RSP_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->transaction_id = fixed_params->transaction_id; + rsp->reason = fixed_params->reason_code; + rsp->status = fixed_params->rsp_status; + rsp->create_peer = fixed_params->create_peer; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr, + rsp->peer_mac_addr.bytes); + WMI_LOGD("WMI_NDP_RESPONDER_RSP_EVENTID(0x%X) received. vdev_id: %d, peer_mac_addr: "QDF_MAC_ADDR_FMT",transaction_id: %d, status_code %d, reason_code: %d, create_peer: %d", + WMI_NDP_RESPONDER_RSP_EVENTID, fixed_params->vdev_id, + QDF_MAC_ADDR_REF(rsp->peer_mac_addr.bytes), + rsp->transaction_id, + rsp->status, rsp->reason, rsp->create_peer); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_end_rsp_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_rsp_event *rsp) +{ + WMI_NDP_END_RSP_EVENTID_param_tlvs *event; + wmi_ndp_end_rsp_event_fixed_param *fixed_params = NULL; + + event = (WMI_NDP_END_RSP_EVENTID_param_tlvs *) data; + fixed_params = (wmi_ndp_end_rsp_event_fixed_param *)event->fixed_param; + WMI_LOGD("WMI_NDP_END_RSP_EVENTID(0x%X) received. transaction_id: %d, rsp_status: %d, reason_code: %d", + WMI_NDP_END_RSP_EVENTID, fixed_params->transaction_id, + fixed_params->rsp_status, fixed_params->reason_code); + + rsp->vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc( + wmi_handle->soc->wmi_psoc, QDF_NDI_MODE, WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->transaction_id = fixed_params->transaction_id; + rsp->reason = fixed_params->reason_code; + rsp->status = fixed_params->rsp_status; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_end_ind_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_indication_event **rsp) +{ + uint32_t i, buf_size; + wmi_ndp_end_indication *ind; + struct qdf_mac_addr peer_addr; + WMI_NDP_END_INDICATION_EVENTID_param_tlvs *event; + + event = (WMI_NDP_END_INDICATION_EVENTID_param_tlvs *) data; + ind = event->ndp_end_indication_list; + + if (event->num_ndp_end_indication_list == 0) { + WMI_LOGE("Error: Event ignored, 0 ndp instances"); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("number of ndp instances = %d", + event->num_ndp_end_indication_list); + + if (event->num_ndp_end_indication_list > ((UINT_MAX - sizeof(**rsp))/ + sizeof((*rsp)->ndp_map[0]))) { + WMI_LOGE("num_ndp_end_ind_list %d too large", + event->num_ndp_end_indication_list); + return QDF_STATUS_E_INVAL; + } + + buf_size = sizeof(**rsp) + event->num_ndp_end_indication_list * + sizeof((*rsp)->ndp_map[0]); + *rsp = qdf_mem_malloc(buf_size); + if (!(*rsp)) + return QDF_STATUS_E_NOMEM; + + (*rsp)->num_ndp_ids = event->num_ndp_end_indication_list; + for (i = 0; i < (*rsp)->num_ndp_ids; i++) { + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ind[i].peer_ndi_mac_addr, + peer_addr.bytes); + WMI_LOGD("ind[%d]: type %d, reason_code %d, instance_id %d num_active %d ", + i, ind[i].type, ind[i].reason_code, + ind[i].ndp_instance_id, + ind[i].num_active_ndps_on_peer); + /* Add each instance entry to the list */ + (*rsp)->ndp_map[i].ndp_instance_id = ind[i].ndp_instance_id; + (*rsp)->ndp_map[i].vdev_id = ind[i].vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ind[i].peer_ndi_mac_addr, + (*rsp)->ndp_map[i].peer_ndi_mac_addr.bytes); + (*rsp)->ndp_map[i].num_active_ndp_sessions = + ind[i].num_active_ndps_on_peer; + (*rsp)->ndp_map[i].type = ind[i].type; + (*rsp)->ndp_map[i].reason_code = ind[i].reason_code; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_sch_update_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_sch_update_event *ind) +{ + uint8_t i; + WMI_HOST_WLAN_PHY_MODE ch_mode; + WMI_NDL_SCHEDULE_UPDATE_EVENTID_param_tlvs *event; + wmi_ndl_schedule_update_fixed_param *fixed_params; + bool ndi_dbs = wmi_service_enabled(wmi_handle, + wmi_service_ndi_dbs_support); + + event = (WMI_NDL_SCHEDULE_UPDATE_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + WMI_LOGD(FL("flags: %d, num_ch: %d, num_ndp_instances: %d"), + fixed_params->flags, fixed_params->num_channels, + fixed_params->num_ndp_instances); + + if (fixed_params->num_channels > event->num_ndl_channel_list || + fixed_params->num_channels > event->num_nss_list) { + WMI_LOGE(FL("Channel count %d greater than NDP Ch list TLV len(%d) or NSS list TLV len(%d)"), + fixed_params->num_channels, + event->num_ndl_channel_list, + event->num_nss_list); + return QDF_STATUS_E_INVAL; + } + + if (ndi_dbs && + fixed_params->num_channels > event->num_ndp_channel_info) { + WMI_LOGE(FL("Channel count %d greater than NDP Ch info(%d)"), + fixed_params->num_channels, + event->num_ndp_channel_info); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->num_ndp_instances > event->num_ndp_instance_list) { + WMI_LOGE(FL("NDP Instance count %d greater than NDP Instancei TLV len %d"), + fixed_params->num_ndp_instances, + event->num_ndp_instance_list); + return QDF_STATUS_E_INVAL; + } + + ind->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!ind->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + + ind->flags = fixed_params->flags; + ind->num_channels = fixed_params->num_channels; + ind->num_ndp_instances = fixed_params->num_ndp_instances; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_macaddr, + ind->peer_addr.bytes); + + if (ind->num_ndp_instances > NDP_NUM_INSTANCE_ID) { + WMI_LOGE(FL("uint32 overflow")); + wlan_objmgr_vdev_release_ref(ind->vdev, WLAN_NAN_ID); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(ind->ndp_instances, event->ndp_instance_list, + sizeof(uint32_t) * ind->num_ndp_instances); + + if (ind->num_channels > NAN_CH_INFO_MAX_CHANNELS) { + WMI_LOGE(FL("too many channels")); + ind->num_channels = NAN_CH_INFO_MAX_CHANNELS; + } + + for (i = 0; i < ind->num_channels; i++) { + ind->ch[i].freq = event->ndl_channel_list[i].mhz; + ind->ch[i].nss = event->nss_list[i]; + ch_mode = WMI_GET_CHANNEL_MODE(&event->ndl_channel_list[i]); + ind->ch[i].ch_width = wmi_get_ch_width_from_phy_mode(wmi_handle, + ch_mode); + if (ndi_dbs) { + ind->ch[i].mac_id = event->ndp_channel_info[i].mac_id; + WMI_LOGD(FL("Freq: %d, ch_mode: %d, nss: %d mac_id: %d"), + ind->ch[i].freq, ind->ch[i].ch_width, + ind->ch[i].nss, ind->ch[i].mac_id); + } else { + WMI_LOGD(FL("Freq: %d, ch_mode: %d, nss: %d"), + ind->ch[i].freq, ind->ch[i].ch_width, + ind->ch[i].nss); + } + } + + for (i = 0; i < fixed_params->num_ndp_instances; i++) + WMI_LOGD(FL("instance_id[%d]: %d"), + i, event->ndp_instance_list[i]); + + return QDF_STATUS_SUCCESS; +} + +void wmi_nan_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_nan_req_cmd = send_nan_req_cmd_tlv; + ops->send_nan_disable_req_cmd = send_nan_disable_req_cmd_tlv; + ops->extract_nan_event_rsp = extract_nan_event_rsp_tlv; + ops->send_terminate_all_ndps_req_cmd = send_terminate_all_ndps_cmd_tlv; + ops->send_ndp_initiator_req_cmd = nan_ndp_initiator_req_tlv; + ops->send_ndp_responder_req_cmd = nan_ndp_responder_req_tlv; + ops->send_ndp_end_req_cmd = nan_ndp_end_req_tlv; + ops->extract_ndp_initiator_rsp = extract_ndp_initiator_rsp_tlv; + ops->extract_ndp_ind = extract_ndp_ind_tlv; + ops->extract_nan_msg = extract_nan_msg_tlv, + ops->extract_ndp_confirm = extract_ndp_confirm_tlv; + ops->extract_ndp_responder_rsp = extract_ndp_responder_rsp_tlv; + ops->extract_ndp_end_rsp = extract_ndp_end_rsp_tlv; + ops->extract_ndp_end_ind = extract_ndp_end_ind_tlv; + ops->extract_ndp_sch_update = extract_ndp_sch_update_tlv; + ops->extract_ndp_host_event = extract_ndp_host_event_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_api.c new file mode 100644 index 0000000000000000000000000000000000000000..5b8a6378e4c8f5a96624cb6a439395904e3310bc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_api.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to DSRC component. + */ + +#include +#include +#include + +QDF_STATUS wmi_unified_ocb_start_timing_advert(struct wmi_unified *wmi_hdl, + struct ocb_timing_advert_param *timing_advert) +{ + if (wmi_hdl->ops->send_ocb_start_timing_advert_cmd) + return wmi_hdl->ops->send_ocb_start_timing_advert_cmd(wmi_hdl, + timing_advert); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_stop_timing_advert(struct wmi_unified *wmi_hdl, + struct ocb_timing_advert_param *timing_advert) +{ + if (wmi_hdl->ops->send_ocb_stop_timing_advert_cmd) + return wmi_hdl->ops->send_ocb_stop_timing_advert_cmd(wmi_hdl, + timing_advert); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_set_utc_time_cmd(struct wmi_unified *wmi_hdl, + struct ocb_utc_param *utc) +{ + if (wmi_hdl->ops->send_ocb_set_utc_time_cmd) + return wmi_hdl->ops->send_ocb_set_utc_time_cmd(wmi_hdl, utc); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_get_tsf_timer(struct wmi_unified *wmi_hdl, + struct ocb_get_tsf_timer_param *req) +{ + if (wmi_hdl->ops->send_ocb_get_tsf_timer_cmd) + return wmi_hdl->ops->send_ocb_get_tsf_timer_cmd(wmi_hdl, + req->vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_dcc_get_stats_cmd(struct wmi_unified *wmi_hdl, + struct ocb_dcc_get_stats_param *get_stats_param) +{ + if (wmi_hdl->ops->send_dcc_get_stats_cmd) + return wmi_hdl->ops->send_dcc_get_stats_cmd(wmi_hdl, + get_stats_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_dcc_clear_stats(struct wmi_unified *wmi_hdl, + struct ocb_dcc_clear_stats_param *clear_stats_param) +{ + if (wmi_hdl->ops->send_dcc_clear_stats_cmd) + return wmi_hdl->ops->send_dcc_clear_stats_cmd(wmi_hdl, + clear_stats_param->vdev_id, + clear_stats_param->dcc_stats_bitmap); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_dcc_update_ndl(struct wmi_unified *wmi_hdl, + struct ocb_dcc_update_ndl_param *update_ndl_param) +{ + if (wmi_hdl->ops->send_dcc_update_ndl_cmd) + return wmi_hdl->ops->send_dcc_update_ndl_cmd(wmi_hdl, + update_ndl_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_set_config(struct wmi_unified *wmi_hdl, + struct ocb_config *config) +{ + if (wmi_hdl->ops->send_ocb_set_config_cmd) + return wmi_hdl->ops->send_ocb_set_config_cmd(wmi_hdl, + config); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_ocb_set_channel_config_resp(struct wmi_unified *wmi_hdl, + void *evt_buf, + uint32_t *status) +{ + if (wmi_hdl->ops->extract_ocb_chan_config_resp) + return wmi_hdl->ops->extract_ocb_chan_config_resp(wmi_hdl, + evt_buf, + status); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ocb_tsf_timer(struct wmi_unified *wmi_hdl, + void *evt_buf, + struct ocb_get_tsf_timer_response *resp) +{ + if (wmi_hdl->ops->extract_ocb_tsf_timer) + return wmi_hdl->ops->extract_ocb_tsf_timer(wmi_hdl, + evt_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dcc_update_ndl_resp(struct wmi_unified *wmi_hdl, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp) +{ + if (wmi_hdl->ops->extract_dcc_update_ndl_resp) + return wmi_hdl->ops->extract_dcc_update_ndl_resp(wmi_hdl, + evt_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dcc_stats(struct wmi_unified *wmi_hdl, + void *evt_buf, + struct ocb_dcc_get_stats_response **resp) +{ + if (wmi_hdl->ops->extract_dcc_stats) + return wmi_hdl->ops->extract_dcc_stats(wmi_hdl, + evt_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..cd55899c163a9ad6a5c708cde9a57f9943854d9d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_tlv.c @@ -0,0 +1,792 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +/** + * send_ocb_set_utc_time_cmd() - send the UTC time to the firmware + * @wmi_handle: pointer to the wmi handle + * @utc: pointer to the UTC time struct + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_set_utc_time_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_utc_param *utc) +{ + QDF_STATUS ret; + wmi_ocb_set_utc_time_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t len, i; + wmi_buf_t buf; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_set_utc_time_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_set_utc_time_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_set_utc_time_cmd_fixed_param)); + cmd->vdev_id = utc->vdev_id; + + for (i = 0; i < SIZE_UTC_TIME; i++) + WMI_UTC_TIME_SET(cmd, i, utc->utc_time[i]); + + for (i = 0; i < SIZE_UTC_TIME_ERROR; i++) + WMI_TIME_ERROR_SET(cmd, i, utc->time_error[i]); + + wmi_mtrace(WMI_OCB_SET_UTC_TIME_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_SET_UTC_TIME_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to set OCB UTC time")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_ocb_start_timing_advert_cmd_tlv() - start sending the timing advertisement + * frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_start_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + QDF_STATUS ret; + wmi_ocb_start_timing_advert_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t len, len_template; + wmi_buf_t buf; + + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE; + + len_template = timing_advert->template_length; + /* Add padding to the template if needed */ + if (len_template % 4 != 0) + len_template += 4 - (len_template % 4); + len += len_template; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_start_timing_advert_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_start_timing_advert_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ocb_start_timing_advert_cmd_fixed_param)); + cmd->vdev_id = timing_advert->vdev_id; + cmd->repeat_rate = timing_advert->repeat_rate; + cmd->channel_freq = timing_advert->chan_freq; + cmd->timestamp_offset = timing_advert->timestamp_offset; + cmd->time_value_offset = timing_advert->time_value_offset; + cmd->timing_advert_template_length = timing_advert->template_length; + buf_ptr += sizeof(*cmd); + + /* Add the timing advert template */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + len_template); + qdf_mem_copy(buf_ptr + WMI_TLV_HDR_SIZE, + (uint8_t *)timing_advert->template_value, + timing_advert->template_length); + + wmi_mtrace(WMI_OCB_START_TIMING_ADVERT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_START_TIMING_ADVERT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to start OCB timing advert")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_ocb_stop_timing_advert_cmd_tlv() - stop sending the timing advertisement frames + * on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_stop_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + QDF_STATUS ret; + wmi_ocb_stop_timing_advert_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t len; + wmi_buf_t buf; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_stop_timing_advert_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_stop_timing_advert_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ocb_stop_timing_advert_cmd_fixed_param)); + cmd->vdev_id = timing_advert->vdev_id; + cmd->channel_freq = timing_advert->chan_freq; + + wmi_mtrace(WMI_OCB_STOP_TIMING_ADVERT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_STOP_TIMING_ADVERT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to stop OCB timing advert")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_ocb_get_tsf_timer_cmd_tlv() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @request: pointer to the request + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_get_tsf_timer_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + QDF_STATUS ret; + wmi_ocb_get_tsf_timer_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_buf_t buf; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *)wmi_buf_data(buf); + + cmd = (wmi_ocb_get_tsf_timer_cmd_fixed_param *)buf_ptr; + qdf_mem_zero(cmd, len); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_get_tsf_timer_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ocb_get_tsf_timer_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + + /* Send the WMI command */ + wmi_mtrace(WMI_OCB_GET_TSF_TIMER_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_GET_TSF_TIMER_CMDID); + /* If there is an error, set the completion event */ + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send WMI message: %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dcc_get_stats_cmd_tlv() - get the DCC channel stats + * @wmi_handle: pointer to the wmi handle + * @get_stats_param: pointer to the dcc stats + * + * Return: 0 on succes + */ +static QDF_STATUS send_dcc_get_stats_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param) +{ + QDF_STATUS ret; + wmi_dcc_get_stats_cmd_fixed_param *cmd; + wmi_dcc_channel_stats_request *channel_stats_array; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + uint32_t i; + + /* Validate the input */ + if (get_stats_param->request_array_len != + get_stats_param->channel_count * sizeof(*channel_stats_array)) { + WMI_LOGE(FL("Invalid parameter")); + return QDF_STATUS_E_INVAL; + } + + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + get_stats_param->request_array_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_dcc_get_stats_cmd_fixed_param *)buf_ptr; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_get_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_get_stats_cmd_fixed_param)); + cmd->vdev_id = get_stats_param->vdev_id; + cmd->num_channels = get_stats_param->channel_count; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + get_stats_param->request_array_len); + buf_ptr += WMI_TLV_HDR_SIZE; + + channel_stats_array = (wmi_dcc_channel_stats_request *)buf_ptr; + qdf_mem_copy(channel_stats_array, get_stats_param->request_array, + get_stats_param->request_array_len); + for (i = 0; i < cmd->num_channels; i++) + WMITLV_SET_HDR(&channel_stats_array[i].tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_channel_stats_request, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_channel_stats_request)); + + /* Send the WMI command */ + wmi_mtrace(WMI_DCC_GET_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DCC_GET_STATS_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send WMI message: %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dcc_clear_stats_cmd_tlv() - command to clear the DCC stats + * @wmi_handle: pointer to the wmi handle + * @vdev_id: vdev id + * @dcc_stats_bitmap: dcc status bitmap + * + * Return: 0 on succes + */ +static QDF_STATUS send_dcc_clear_stats_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t dcc_stats_bitmap) +{ + QDF_STATUS ret; + wmi_dcc_clear_stats_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + + /* Allocate memory for the WMI command */ + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_dcc_clear_stats_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_clear_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_clear_stats_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->dcc_stats_bitmap = dcc_stats_bitmap; + + /* Send the WMI command */ + wmi_mtrace(WMI_DCC_CLEAR_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DCC_CLEAR_STATS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send the WMI command")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dcc_update_ndl_cmd_tlv() - command to update the NDL data + * @wmi_handle: pointer to the wmi handle + * @update_ndl_param: pointer to the request parameters + * + * Return: 0 on success + */ +static QDF_STATUS send_dcc_update_ndl_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param) +{ + QDF_STATUS qdf_status; + wmi_dcc_update_ndl_cmd_fixed_param *cmd; + wmi_dcc_ndl_chan *ndl_chan_array; + wmi_dcc_ndl_active_state_config *ndl_active_state_array; + uint32_t active_state_count; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + uint32_t i; + + /* validate the input */ + if (update_ndl_param->dcc_ndl_chan_list_len != + update_ndl_param->channel_count * sizeof(*ndl_chan_array)) { + WMI_LOGE(FL("Invalid parameter")); + return QDF_STATUS_E_INVAL; + } + active_state_count = 0; + ndl_chan_array = update_ndl_param->dcc_ndl_chan_list; + for (i = 0; i < update_ndl_param->channel_count; i++) + active_state_count += + WMI_NDL_NUM_ACTIVE_STATE_GET(&ndl_chan_array[i]); + if (update_ndl_param->dcc_ndl_active_state_list_len != + active_state_count * sizeof(*ndl_active_state_array)) { + WMI_LOGE(FL("Invalid parameter")); + return QDF_STATUS_E_INVAL; + } + + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + update_ndl_param->dcc_ndl_chan_list_len + + WMI_TLV_HDR_SIZE + + update_ndl_param->dcc_ndl_active_state_list_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_dcc_update_ndl_cmd_fixed_param *)buf_ptr; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_update_ndl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_update_ndl_cmd_fixed_param)); + cmd->vdev_id = update_ndl_param->vdev_id; + cmd->num_channel = update_ndl_param->channel_count; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + update_ndl_param->dcc_ndl_chan_list_len); + buf_ptr += WMI_TLV_HDR_SIZE; + + ndl_chan_array = (wmi_dcc_ndl_chan *)buf_ptr; + qdf_mem_copy(ndl_chan_array, update_ndl_param->dcc_ndl_chan_list, + update_ndl_param->dcc_ndl_chan_list_len); + for (i = 0; i < cmd->num_channel; i++) + WMITLV_SET_HDR(&ndl_chan_array[i].tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_ndl_chan, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_ndl_chan)); + buf_ptr += update_ndl_param->dcc_ndl_chan_list_len; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + update_ndl_param->dcc_ndl_active_state_list_len); + buf_ptr += WMI_TLV_HDR_SIZE; + + ndl_active_state_array = (wmi_dcc_ndl_active_state_config *)buf_ptr; + qdf_mem_copy(ndl_active_state_array, + update_ndl_param->dcc_ndl_active_state_list, + update_ndl_param->dcc_ndl_active_state_list_len); + for (i = 0; i < active_state_count; i++) { + WMITLV_SET_HDR(&ndl_active_state_array[i].tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_ndl_active_state_config, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_ndl_active_state_config)); + } + buf_ptr += update_ndl_param->dcc_ndl_active_state_list_len; + + /* Send the WMI command */ + wmi_mtrace(WMI_DCC_UPDATE_NDL_CMDID, cmd->vdev_id, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DCC_UPDATE_NDL_CMDID); + /* If there is an error, set the completion event */ + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE(FL("Failed to send WMI message: %d"), qdf_status); + wmi_buf_free(buf); + } + + return qdf_status; +} + +/** + * send_ocb_set_config_cmd_tlv() - send the OCB config to the FW + * @wmi_handle: pointer to the wmi handle + * @config: the OCB configuration + * + * Return: 0 on success + */ +static QDF_STATUS send_ocb_set_config_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_config *config) +{ + QDF_STATUS ret; + wmi_ocb_set_config_cmd_fixed_param *cmd; + wmi_channel *chan; + wmi_ocb_channel *ocb_chan; + wmi_qos_parameter *qos_param; + wmi_dcc_ndl_chan *ndl_chan; + wmi_dcc_ndl_active_state_config *ndl_active_config; + wmi_ocb_schedule_element *sched_elem; + uint8_t *buf_ptr; + wmi_buf_t buf; + int32_t len; + int32_t i, j, active_state_count; + + /* + * Validate the dcc_ndl_chan_list_len and count the number of active + * states. Validate dcc_ndl_active_state_list_len. + */ + active_state_count = 0; + if (config->dcc_ndl_chan_list_len) { + if (!config->dcc_ndl_chan_list || + config->dcc_ndl_chan_list_len != + config->channel_count * sizeof(wmi_dcc_ndl_chan)) { + WMI_LOGE(FL("NDL channel is invalid. List len: %d"), + config->dcc_ndl_chan_list_len); + return QDF_STATUS_E_INVAL; + } + + for (i = 0, ndl_chan = config->dcc_ndl_chan_list; + i < config->channel_count; ++i, ++ndl_chan) + active_state_count += + WMI_NDL_NUM_ACTIVE_STATE_GET(ndl_chan); + + if (active_state_count) { + if (!config->dcc_ndl_active_state_list || + config->dcc_ndl_active_state_list_len != + active_state_count * + sizeof(wmi_dcc_ndl_active_state_config)) { + WMI_LOGE(FL("NDL active state is invalid.")); + return QDF_STATUS_E_INVAL; + } + } + } + + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + config->channel_count * + sizeof(wmi_channel) + + WMI_TLV_HDR_SIZE + config->channel_count * + sizeof(wmi_ocb_channel) + + WMI_TLV_HDR_SIZE + config->channel_count * + sizeof(wmi_qos_parameter) * WMI_MAX_NUM_AC + + WMI_TLV_HDR_SIZE + config->dcc_ndl_chan_list_len + + WMI_TLV_HDR_SIZE + active_state_count * + sizeof(wmi_dcc_ndl_active_state_config) + + WMI_TLV_HDR_SIZE + config->schedule_size * + sizeof(wmi_ocb_schedule_element); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_set_config_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_set_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_set_config_cmd_fixed_param)); + cmd->vdev_id = config->vdev_id; + cmd->channel_count = config->channel_count; + cmd->schedule_size = config->schedule_size; + cmd->flags = config->flags; + buf_ptr += sizeof(*cmd); + + /* Add the wmi_channel info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->channel_count * sizeof(wmi_channel)); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < config->channel_count; i++) { + chan = (wmi_channel *)buf_ptr; + WMITLV_SET_HDR(&chan->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan->mhz = config->channels[i].chan_freq; + chan->band_center_freq1 = config->channels[i].chan_freq; + chan->band_center_freq2 = 0; + chan->info = 0; + + WMI_SET_CHANNEL_MODE(chan, config->channels[i].ch_mode); + WMI_SET_CHANNEL_MAX_POWER(chan, config->channels[i].max_pwr); + WMI_SET_CHANNEL_MIN_POWER(chan, config->channels[i].min_pwr); + WMI_SET_CHANNEL_MAX_TX_POWER(chan, config->channels[i].max_pwr); + WMI_SET_CHANNEL_REG_POWER(chan, config->channels[i].reg_pwr); + WMI_SET_CHANNEL_ANTENNA_MAX(chan, + config->channels[i].antenna_max); + + if (config->channels[i].bandwidth < 10) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_QUARTER_RATE); + else if (config->channels[i].bandwidth < 20) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_HALF_RATE); + buf_ptr += sizeof(*chan); + } + + /* Add the wmi_ocb_channel info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->channel_count * sizeof(wmi_ocb_channel)); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < config->channel_count; i++) { + ocb_chan = (wmi_ocb_channel *)buf_ptr; + WMITLV_SET_HDR(&ocb_chan->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_channel)); + ocb_chan->bandwidth = config->channels[i].bandwidth; + WMI_CHAR_ARRAY_TO_MAC_ADDR( + config->channels[i].mac_address.bytes, + &ocb_chan->mac_address); + buf_ptr += sizeof(*ocb_chan); + } + + /* Add the wmi_qos_parameter info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->channel_count * sizeof(wmi_qos_parameter)*WMI_MAX_NUM_AC); + buf_ptr += WMI_TLV_HDR_SIZE; + /* WMI_MAX_NUM_AC parameters for each channel */ + for (i = 0; i < config->channel_count; i++) { + for (j = 0; j < WMI_MAX_NUM_AC; j++) { + qos_param = (wmi_qos_parameter *)buf_ptr; + WMITLV_SET_HDR(&qos_param->tlv_header, + WMITLV_TAG_STRUC_wmi_qos_parameter, + WMITLV_GET_STRUCT_TLVLEN(wmi_qos_parameter)); + qos_param->aifsn = + config->channels[i].qos_params[j].aifsn; + qos_param->cwmin = + config->channels[i].qos_params[j].cwmin; + qos_param->cwmax = + config->channels[i].qos_params[j].cwmax; + buf_ptr += sizeof(*qos_param); + } + } + + /* Add the wmi_dcc_ndl_chan (per channel) */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->dcc_ndl_chan_list_len); + buf_ptr += WMI_TLV_HDR_SIZE; + if (config->dcc_ndl_chan_list_len) { + ndl_chan = (wmi_dcc_ndl_chan *)buf_ptr; + qdf_mem_copy(ndl_chan, config->dcc_ndl_chan_list, + config->dcc_ndl_chan_list_len); + for (i = 0; i < config->channel_count; i++) + WMITLV_SET_HDR(&(ndl_chan[i].tlv_header), + WMITLV_TAG_STRUC_wmi_dcc_ndl_chan, + WMITLV_GET_STRUCT_TLVLEN(wmi_dcc_ndl_chan)); + buf_ptr += config->dcc_ndl_chan_list_len; + } + + /* Add the wmi_dcc_ndl_active_state_config */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, active_state_count * + sizeof(wmi_dcc_ndl_active_state_config)); + buf_ptr += WMI_TLV_HDR_SIZE; + if (active_state_count) { + ndl_active_config = (wmi_dcc_ndl_active_state_config *)buf_ptr; + qdf_mem_copy(ndl_active_config, + config->dcc_ndl_active_state_list, + active_state_count * sizeof(*ndl_active_config)); + for (i = 0; i < active_state_count; ++i) + WMITLV_SET_HDR(&(ndl_active_config[i].tlv_header), + WMITLV_TAG_STRUC_wmi_dcc_ndl_active_state_config, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_ndl_active_state_config)); + buf_ptr += active_state_count * + sizeof(*ndl_active_config); + } + + /* Add the wmi_ocb_schedule_element info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->schedule_size * sizeof(wmi_ocb_schedule_element)); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < config->schedule_size; i++) { + sched_elem = (wmi_ocb_schedule_element *)buf_ptr; + WMITLV_SET_HDR(&sched_elem->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_schedule_element, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_schedule_element)); + sched_elem->channel_freq = config->schedule[i].chan_freq; + sched_elem->total_duration = config->schedule[i].total_duration; + sched_elem->guard_interval = config->schedule[i].guard_interval; + buf_ptr += sizeof(*sched_elem); + } + + wmi_mtrace(WMI_OCB_SET_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_SET_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to set OCB config"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * extract_ocb_channel_config_resp_tlv() - extract ocb channel config resp + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @status: status buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_channel_config_resp_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *status) +{ + WMI_OCB_SET_CONFIG_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_ocb_set_config_resp_event_fixed_param *fix_param; + + param_tlvs = evt_buf; + fix_param = param_tlvs->fixed_param; + + *status = fix_param->status; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ocb_tsf_timer_tlv() - extract TSF timer from event buffer + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @resp: response buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_tsf_timer_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct ocb_get_tsf_timer_response *resp) +{ + WMI_OCB_GET_TSF_TIMER_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_ocb_get_tsf_timer_resp_event_fixed_param *fix_param; + + param_tlvs = evt_buf; + fix_param = param_tlvs->fixed_param; + resp->vdev_id = fix_param->vdev_id; + resp->timer_high = fix_param->tsf_timer_high; + resp->timer_low = fix_param->tsf_timer_low; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ocb_ndl_resp_tlv() - extract TSF timer from event buffer + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @resp: response buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_ndl_resp_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp) +{ + WMI_DCC_UPDATE_NDL_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_dcc_update_ndl_resp_event_fixed_param *fix_param; + + param_tlvs = evt_buf; + fix_param = param_tlvs->fixed_param; + resp->vdev_id = fix_param->vdev_id; + resp->status = fix_param->status; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ocb_dcc_stats_tlv() - extract DCC stats from event buffer + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @resp: response buffer + * + * Since length of stats is variable, buffer for DCC stats will be allocated + * in this function. The caller must free the buffer. + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_dcc_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct ocb_dcc_get_stats_response **resp) +{ + struct ocb_dcc_get_stats_response *response; + WMI_DCC_GET_STATS_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_dcc_get_stats_resp_event_fixed_param *fix_param; + + param_tlvs = (WMI_DCC_GET_STATS_RESP_EVENTID_param_tlvs *)evt_buf; + fix_param = param_tlvs->fixed_param; + + /* Allocate and populate the response */ + if (fix_param->num_channels > ((WMI_SVC_MSG_MAX_SIZE - + sizeof(*fix_param)) / sizeof(wmi_dcc_ndl_stats_per_channel)) || + fix_param->num_channels > param_tlvs->num_stats_per_channel_list) { + WMI_LOGW("%s: too many channels:%d actual:%d", __func__, + fix_param->num_channels, + param_tlvs->num_stats_per_channel_list); + *resp = NULL; + return QDF_STATUS_E_INVAL; + } + response = qdf_mem_malloc(sizeof(*response) + fix_param->num_channels * + sizeof(wmi_dcc_ndl_stats_per_channel)); + *resp = response; + if (!response) + return QDF_STATUS_E_NOMEM; + + response->vdev_id = fix_param->vdev_id; + response->num_channels = fix_param->num_channels; + response->channel_stats_array_len = + fix_param->num_channels * + sizeof(wmi_dcc_ndl_stats_per_channel); + response->channel_stats_array = ((uint8_t *)response) + + sizeof(*response); + qdf_mem_copy(response->channel_stats_array, + param_tlvs->stats_per_channel_list, + response->channel_stats_array_len); + + return QDF_STATUS_SUCCESS; +} + +void wmi_ocb_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_ocb_set_utc_time_cmd = send_ocb_set_utc_time_cmd_tlv; + ops->send_ocb_get_tsf_timer_cmd = send_ocb_get_tsf_timer_cmd_tlv; + ops->send_dcc_clear_stats_cmd = send_dcc_clear_stats_cmd_tlv; + ops->send_dcc_get_stats_cmd = send_dcc_get_stats_cmd_tlv; + ops->send_dcc_update_ndl_cmd = send_dcc_update_ndl_cmd_tlv; + ops->send_ocb_set_config_cmd = send_ocb_set_config_cmd_tlv; + ops->send_ocb_stop_timing_advert_cmd = + send_ocb_stop_timing_advert_cmd_tlv; + ops->send_ocb_start_timing_advert_cmd = + send_ocb_start_timing_advert_cmd_tlv; + ops->extract_ocb_chan_config_resp = + extract_ocb_channel_config_resp_tlv; + ops->extract_ocb_tsf_timer = extract_ocb_tsf_timer_tlv; + ops->extract_dcc_update_ndl_resp = extract_ocb_ndl_resp_tlv; + ops->extract_dcc_stats = extract_ocb_dcc_stats_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_ut.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_ut.c new file mode 100644 index 0000000000000000000000000000000000000000..b97712418eec702b479f743defc12b1fb4d715fa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_ut.c @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_api.h" +#include "wmi.h" +#include "wmi_version.h" +#include "wmi_unified_priv.h" +#include +#include "target_if.h" +#include "wma.h" +#include "wlan_ocb_ucfg_api.h" +#include "wlan_ocb_main.h" + +void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle); + +static inline struct wlan_ocb_rx_ops * +target_if_ocb_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_pdev *pdev; + struct ocb_pdev_obj *pdev_obj; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, 0, + WLAN_OCB_SB_ID); + pdev_obj = (struct ocb_pdev_obj *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_OCB); + return &pdev_obj->ocb_rxops; +} + +/** + * fake_vdev_create_cmd_tlv() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_vdev_create_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct vdev_create_params *param) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_delete_cmd_tlv() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_vdev_delete_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t if_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_set_utc_time_cmd_tlv() - send the UTC time to the firmware + * @wmi_handle: pointer to the wmi handle + * @utc: pointer to the UTC time struct + * + * Return: 0 on succes + */ +static QDF_STATUS fake_ocb_set_utc_time_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_utc_param *utc) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_get_tsf_timer_cmd_tlv() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @request: pointer to the request + * + * Return: 0 on succes + */ +static QDF_STATUS fake_ocb_get_tsf_timer_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + struct ocb_get_tsf_timer_response response; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + + WMI_LOGP("%s : called", __func__); + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + response.vdev_id = vdev_id; + response.timer_high = 0x1234; + response.timer_low = 0x5678; + + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_tsf_timer) { + status = ocb_rx_ops->ocb_tsf_timer(psoc, &response); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("ocb_tsf_timer failed."); + return -EINVAL; + } + } else { + WMI_LOGP("No ocb_tsf_timer callback"); + return -EINVAL; + } + return QDF_STATUS_SUCCESS; +} + +/** + * fake_dcc_clear_stats_cmd_tlv() - command to clear the DCC stats + * @wmi_handle: pointer to the wmi handle + * @vdev_id: vdev id + * @dcc_stats_bitmap: dcc status bitmap + * + * Return: 0 on succes + */ +static QDF_STATUS fake_dcc_clear_stats_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t dcc_stats_bitmap) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +wmi_dcc_ndl_stats_per_channel chan1_info[2] = { + [0] = {.chan_info = 5860, + .tx_power_datarate = 23 | (10 << 8), + .carrier_sense_est_comm_range = 107 | (198 << 13), + .dcc_stats = 78 | (56 << 8) | (345 << 16), + .packet_stats = 1278 | (789 << 14), + .channel_busy_time = 1389, + }, + [1] = {.chan_info = 5880, + .tx_power_datarate = 53 | (17 << 8), + .carrier_sense_est_comm_range = 137 | (198 << 13), + .dcc_stats = 78 | (66 << 8) | (245 << 16), + .packet_stats = 1278 | (889 << 14), + .channel_busy_time = 2389, + }, +}; + +/** + * fake_dcc_get_stats_cmd_tlv() - get the DCC channel stats + * @wmi_handle: pointer to the wmi handle + * @get_stats_param: pointer to the dcc stats + * + * Return: 0 on succes + */ +static QDF_STATUS fake_dcc_get_stats_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + struct ocb_dcc_get_stats_response *response; + WMI_LOGP("%s : called", __func__); + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + response = qdf_mem_malloc(sizeof(*response) + 2 * + sizeof(wmi_dcc_ndl_stats_per_channel)); + response->num_channels = 2; + response->channel_stats_array_len = 2 * + sizeof(wmi_dcc_ndl_stats_per_channel); + response->vdev_id = get_stats_param->vdev_id; + response->channel_stats_array = (uint8_t *)response + sizeof(*response); + qdf_mem_copy(response->channel_stats_array, + &chan1_info, + 2 * sizeof(wmi_dcc_ndl_stats_per_channel)); + WMI_LOGP("channel1 freq %d, channel2 freq %d", chan1_info[0].chan_info, + chan1_info[1].chan_info); + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_dcc_stats_indicate) { + status = ocb_rx_ops->ocb_dcc_stats_indicate(psoc, + response, true); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("dcc_stats_indicate failed."); + status = -EINVAL; + } else { + status = 0; + } + } else { + WMI_LOGP("No dcc_stats_indicate callback"); + status = -EINVAL; + } + + qdf_mem_free(response); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_dcc_update_ndl_cmd_tlv() - command to update the NDL data + * @wmi_handle: pointer to the wmi handle + * @update_ndl_param: pointer to the request parameters + * + * Return: 0 on success + */ +static QDF_STATUS fake_dcc_update_ndl_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + struct ocb_dcc_update_ndl_response *resp; + WMI_LOGP("%s : called", __func__); + /* Allocate and populate the response */ + resp = qdf_mem_malloc(sizeof(*resp)); + if (!resp) + return -ENOMEM; + + resp->vdev_id = update_ndl_param->vdev_id; + resp->status = 0; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_dcc_ndl_update) { + status = ocb_rx_ops->ocb_dcc_ndl_update(psoc, resp); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("dcc_ndl_update failed."); + status = -EINVAL; + } else { + status = 0; + } + } else { + WMI_LOGP("No dcc_ndl_update callback"); + status = -EINVAL; + } + + qdf_mem_free(resp); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_set_config_cmd_tlv() - send the OCB config to the FW + * @wmi_handle: pointer to the wmi handle + * @config: the OCB configuration + * + * Return: 0 on success + */ +static QDF_STATUS fake_ocb_set_config_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_config *config) +{ + u32 i; + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + + wmi_debug("vdev_id=%d, channel_count=%d, schedule_size=%d, flag=%x", + config->vdev_id, config->channel_count, + config->schedule_size, config->flags); + + for (i = 0; i < config->channel_count; i++) { + wmi_debug("channel info for channel %d" + " chan_freq=%d, bandwidth=%d, " QDF_MAC_ADDRESS_STR + " max_pwr=%d, min_pwr=%d, reg_pwr=%d, antenna_max=%d, " + "flags=%d", i, config->channels[i].chan_freq, + config->channels[i].bandwidth, + QDF_MAC_ADDR_REF( + config->channels[i].mac_address.bytes), + config->channels[i].max_pwr, + config->channels[i].min_pwr, + config->channels[i].reg_pwr, + config->channels[i].antenna_max, + config->channels[i].flags); + } + + for (i = 0; i < config->schedule_size; i++) { + wmi_debug("schedule info for channel %d: " + "chan_fre=%d, total_duration=%d, guard_intreval=%d", + i, config->schedule[i].chan_freq, + config->schedule[i].total_duration, + config->schedule[i].guard_interval); + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_set_config_status) { + status = ocb_rx_ops->ocb_set_config_status(psoc, 0); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("ocb_set_config_status failed."); + return -EINVAL; + } + } else { + WMI_LOGP("No ocb_set_config_status callback"); + return -EINVAL; + } + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_stop_timing_advert_cmd_tlv() - stop sending the + * timing advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS fake_ocb_stop_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_start_timing_advert_cmd_tlv() - start sending the + * timing advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS +fake_ocb_start_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_peer_create_cmd_tlv() - send peer create command to fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @peer_type: peer type + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_peer_create_cmd_tlv(wmi_unified_t wmi, + struct peer_create_params *param) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_peer_delete_cmd_tlv() - send PEER delete command to fw + * @wmi: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_peer_delete_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + uint8_t vdev_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_start_cmd_tlv() - send vdev start request to fw + * @wmi_handle: wmi handle + * @req: vdev start params + * + * Return: QDF status + */ +static QDF_STATUS fake_vdev_start_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_start_params *req) +{ + tp_wma_handle wma = (tp_wma_handle) wmi_handle->scn_handle; + + wmi_debug("vdev_id %d freq %d chanmode %d ch_info is_dfs %d " + "beacon interval %d dtim %d center_chan %d center_freq2 %d " + "max_txpow: 0x%x " + "Tx SS %d, Rx SS %d, ldpc_rx: %d, cac %d, regd %d, HE ops: %d", + (int)req->vdev_id, req->channel.mhz, + req->channel.phy_mode, + (int)req->channel.dfs_set, req->beacon_intval, req->dtim_period, + req->channel.cfreq1, req->channel.cfreq2, + req->channel.maxregpower, + req->preferred_tx_streams, req->preferred_rx_streams, + (int)req->ldpc_rx_enabled, req->cac_duration_ms, + req->regdomain, req->he_ops); + ucfg_ocb_config_channel(wma->pdev); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_down_cmd_tlv() - send vdev down command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_vdev_down_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_set_param_cmd_tlv() - WMI vdev set parameter function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold vdev set parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS fake_vdev_set_param_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_set_params *param) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv() - + * faked API to enable/disable mcc scheduler + * @wmi_handle: wmi handle + * @mcc_adaptive_scheduler: enable/disable + * + * This function enable/disable mcc adaptive scheduler in fw. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/* + * fake_process_set_ie_info_cmd_tlv() - Function to send IE info to firmware + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS fake_process_set_ie_info_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *wmi_ops; + + if (!wmi_handle) { + WMI_LOGP("%s: null wmi handle", __func__); + return; + } + + wmi_ops = wmi_handle->ops; + wmi_ops->send_vdev_create_cmd = fake_vdev_create_cmd_tlv; + wmi_ops->send_vdev_delete_cmd = fake_vdev_delete_cmd_tlv; + wmi_ops->send_vdev_down_cmd = fake_vdev_down_cmd_tlv; + wmi_ops->send_vdev_start_cmd = fake_vdev_start_cmd_tlv; + wmi_ops->send_peer_create_cmd = fake_peer_create_cmd_tlv; + wmi_ops->send_peer_delete_cmd = fake_peer_delete_cmd_tlv; + wmi_ops->send_vdev_set_param_cmd = fake_vdev_set_param_cmd_tlv; + wmi_ops->send_ocb_set_utc_time_cmd = fake_ocb_set_utc_time_cmd_tlv; + wmi_ops->send_ocb_get_tsf_timer_cmd = fake_ocb_get_tsf_timer_cmd_tlv; + wmi_ops->send_dcc_clear_stats_cmd = fake_dcc_clear_stats_cmd_tlv; + wmi_ops->send_dcc_get_stats_cmd = fake_dcc_get_stats_cmd_tlv; + wmi_ops->send_dcc_update_ndl_cmd = fake_dcc_update_ndl_cmd_tlv; + wmi_ops->send_ocb_set_config_cmd = fake_ocb_set_config_cmd_tlv; + wmi_ops->send_ocb_stop_timing_advert_cmd = + fake_ocb_stop_timing_advert_cmd_tlv; + wmi_ops->send_ocb_start_timing_advert_cmd = + fake_ocb_start_timing_advert_cmd_tlv; + wmi_ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd = + fake_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv; + wmi_ops->send_process_set_ie_info_cmd = + fake_process_set_ie_info_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_p2p_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_p2p_api.c new file mode 100644 index 0000000000000000000000000000000000000000..d5ddb9b3a88ad5c83879e5aa58e9b58d8559c1f1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_p2p_api.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to P2P component. + */ + +#include +#include + +QDF_STATUS wmi_unified_set_p2pgo_oppps_req(wmi_unified_t wmi_handle, + struct p2p_ps_params *oppps) +{ + if (wmi_handle->ops->send_set_p2pgo_oppps_req_cmd) + return wmi_handle->ops->send_set_p2pgo_oppps_req_cmd(wmi_handle, + oppps); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_p2pgo_noa_req_cmd(wmi_unified_t wmi_handle, + struct p2p_ps_params *noa) +{ + if (wmi_handle->ops->send_set_p2pgo_noa_req_cmd) + return wmi_handle->ops->send_set_p2pgo_noa_req_cmd(wmi_handle, + noa); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_p2p_noa_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_noa_info *param) +{ + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->extract_p2p_noa_ev_param) + return wmi_handle->ops->extract_p2p_noa_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_send_set_mac_addr_rx_filter_cmd(wmi_unified_t wmi_handle, + struct p2p_set_mac_filter *param) +{ + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->set_mac_addr_rx_filter) + return wmi_handle->ops->set_mac_addr_rx_filter( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_mac_addr_rx_filter_evt_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_set_mac_filter_evt *param) +{ + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->extract_mac_addr_rx_filter_evt_param) + return wmi_handle->ops->extract_mac_addr_rx_filter_evt_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +QDF_STATUS wmi_unified_p2p_lo_start_cmd(wmi_unified_t wmi_handle, + struct p2p_lo_start *param) +{ + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->send_p2p_lo_start_cmd) + return wmi_handle->ops->send_p2p_lo_start_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_p2p_lo_stop_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->send_p2p_lo_stop_cmd) + return wmi_handle->ops->send_p2p_lo_stop_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_p2p_lo_stop_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_lo_event *param) +{ + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->extract_p2p_lo_stop_ev_param) + return wmi_handle->ops->extract_p2p_lo_stop_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} +#endif /* End of FEATURE_P2P_LISTEN_OFFLOAD*/ + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_p2p_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_p2p_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..c7c86e14d7118d90ecc27254a59e9d38c2e62b02 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_p2p_tlv.c @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +/** + * send_set_p2pgo_noa_req_cmd_tlv() - send p2p go noa request to fw + * @wmi_handle: wmi handle + * @noa: p2p power save parameters + * + * Return: CDF status + */ +static QDF_STATUS send_set_p2pgo_noa_req_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_ps_params *noa) +{ + wmi_p2p_set_noa_cmd_fixed_param *cmd; + wmi_p2p_noa_descriptor *noa_discriptor; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint16_t len; + QDF_STATUS status; + uint32_t duration; + + WMI_LOGD("%s: Enter", __func__); + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + sizeof(*noa_discriptor); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + status = QDF_STATUS_E_FAILURE; + goto end; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_p2p_set_noa_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_set_noa_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_p2p_set_noa_cmd_fixed_param)); + duration = (noa->count == 1) ? noa->single_noa_duration : noa->duration; + cmd->vdev_id = noa->session_id; + cmd->enable = (duration) ? true : false; + cmd->num_noa = 1; + + WMITLV_SET_HDR((buf_ptr + sizeof(wmi_p2p_set_noa_cmd_fixed_param)), + WMITLV_TAG_ARRAY_STRUC, sizeof(wmi_p2p_noa_descriptor)); + noa_discriptor = (wmi_p2p_noa_descriptor *)(buf_ptr + + sizeof + (wmi_p2p_set_noa_cmd_fixed_param) + + WMI_TLV_HDR_SIZE); + WMITLV_SET_HDR(&noa_discriptor->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_noa_descriptor, + WMITLV_GET_STRUCT_TLVLEN(wmi_p2p_noa_descriptor)); + noa_discriptor->type_count = noa->count; + noa_discriptor->duration = duration; + noa_discriptor->interval = noa->interval; + noa_discriptor->start_time = 0; + + wmi_debug("SET P2P GO NOA:vdev_id:%d count:%d duration:%d interval:%d", + cmd->vdev_id, noa->count, noa_discriptor->duration, + noa->interval); + wmi_mtrace(WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID"); + wmi_buf_free(buf); + } + +end: + WMI_LOGD("%s: Exit", __func__); + return status; +} + +/** + * send_set_p2pgo_oppps_req_cmd_tlv() - send p2p go opp power save request to fw + * @wmi_handle: wmi handle + * @noa: p2p opp power save parameters + * + * Return: CDF status + */ +static QDF_STATUS send_set_p2pgo_oppps_req_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_ps_params *oppps) +{ + wmi_p2p_set_oppps_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + WMI_LOGD("%s: Enter", __func__); + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + status = QDF_STATUS_E_FAILURE; + goto end; + } + + cmd = (wmi_p2p_set_oppps_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_set_oppps_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_p2p_set_oppps_cmd_fixed_param)); + cmd->vdev_id = oppps->session_id; + if (oppps->ctwindow) + WMI_UNIFIED_OPPPS_ATTR_ENABLED_SET(cmd); + + WMI_UNIFIED_OPPPS_ATTR_CTWIN_SET(cmd, oppps->ctwindow); + wmi_debug("SET P2P GO OPPPS:vdev_id:%d ctwindow:%d", + cmd->vdev_id, oppps->ctwindow); + wmi_mtrace(WMI_P2P_SET_OPPPS_PARAM_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_P2P_SET_OPPPS_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_P2P_SET_OPPPS_PARAM_CMDID"); + wmi_buf_free(buf); + } + +end: + WMI_LOGD("%s: Exit", __func__); + return status; +} + +/** + * extract_p2p_noa_ev_param_tlv() - extract p2p noa information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold p2p noa info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_p2p_noa_ev_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct p2p_noa_info *param) +{ + WMI_P2P_NOA_EVENTID_param_tlvs *param_tlvs; + wmi_p2p_noa_event_fixed_param *fixed_param; + uint8_t i; + wmi_p2p_noa_info *wmi_noa_info; + uint8_t *buf_ptr; + uint32_t descriptors; + + param_tlvs = (WMI_P2P_NOA_EVENTID_param_tlvs *)evt_buf; + if (!param_tlvs) { + WMI_LOGE("%s: Invalid P2P NoA event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!param) { + WMI_LOGE("noa information param is null"); + return QDF_STATUS_E_INVAL; + } + + fixed_param = param_tlvs->fixed_param; + buf_ptr = (uint8_t *) fixed_param; + buf_ptr += sizeof(wmi_p2p_noa_event_fixed_param); + wmi_noa_info = (wmi_p2p_noa_info *) (buf_ptr); + + if (!WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(wmi_noa_info)) { + WMI_LOGE("%s: noa attr is not modified", __func__); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = fixed_param->vdev_id; + param->index = + (uint8_t)WMI_UNIFIED_NOA_ATTR_INDEX_GET(wmi_noa_info); + param->opps_ps = + (uint8_t)WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(wmi_noa_info); + param->ct_window = + (uint8_t)WMI_UNIFIED_NOA_ATTR_CTWIN_GET(wmi_noa_info); + descriptors = WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(wmi_noa_info); + param->num_desc = (uint8_t)descriptors; + if (param->num_desc > WMI_P2P_MAX_NOA_DESCRIPTORS) { + WMI_LOGE("%s: invalid num desc:%d", __func__, + param->num_desc); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("%s:index %u, opps_ps %u, ct_window %u, num_descriptors = %u", + __func__, + param->index, param->opps_ps, param->ct_window, + param->num_desc); + for (i = 0; i < param->num_desc; i++) { + param->noa_desc[i].type_count = + (uint8_t)wmi_noa_info->noa_descriptors[i]. + type_count; + param->noa_desc[i].duration = + wmi_noa_info->noa_descriptors[i].duration; + param->noa_desc[i].interval = + wmi_noa_info->noa_descriptors[i].interval; + param->noa_desc[i].start_time = + wmi_noa_info->noa_descriptors[i].start_time; + WMI_LOGD("%s:NoA descriptor[%d] type_count %u, duration %u, interval %u, start_time = %u", + __func__, i, param->noa_desc[i].type_count, + param->noa_desc[i].duration, + param->noa_desc[i].interval, + param->noa_desc[i].start_time); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +send_set_mac_addr_rx_filter_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_set_mac_filter *param) +{ + wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param *cmd; + uint32_t len; + wmi_buf_t buf; + int ret; + + if (!wmi_handle) { + WMI_LOGE("WMA context is invald!"); + return QDF_STATUS_E_INVAL; + } + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed allocate wmi buffer"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->freq = param->freq; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->mac, &cmd->mac_addr); + if (param->set) + cmd->enable = 1; + else + cmd->enable = 0; + WMI_LOGD("set random mac rx vdev %d freq %d set %d "QDF_MAC_ADDR_FMT, + param->vdev_id, param->freq, param->set, + QDF_MAC_ADDR_REF(param->mac)); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID); + if (ret) { + WMI_LOGE("Failed to send action frame random mac cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_mac_addr_rx_filter_evt_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct p2p_set_mac_filter_evt *param) +{ + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID_param_tlvs *param_buf; + wmi_vdev_add_mac_addr_to_rx_filter_status_event_fixed_param *event; + + param_buf = + (WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid action frame filter mac event"); + return QDF_STATUS_E_INVAL; + } + event = param_buf->fixed_param; + if (!event) { + WMI_LOGE("Invalid fixed param"); + return QDF_STATUS_E_INVAL; + } + param->vdev_id = event->vdev_id; + param->status = event->status; + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_P2P_LISTEN_OFFLOAD +/** + * send_p2p_lo_start_cmd_tlv() - send p2p lo start request to fw + * @wmi_handle: wmi handle + * @param: p2p listen offload start parameters + * + * Return: QDF status + */ +static QDF_STATUS send_p2p_lo_start_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_lo_start *param) +{ + wmi_buf_t buf; + wmi_p2p_lo_start_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + uint8_t *buf_ptr; + QDF_STATUS status; + int device_types_len_aligned; + int probe_resp_len_aligned; + + if (!param) { + WMI_LOGE("lo start param is null"); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("%s: vdev_id:%d", __func__, param->vdev_id); + + device_types_len_aligned = + qdf_roundup(param->dev_types_len, + sizeof(uint32_t)); + probe_resp_len_aligned = + qdf_roundup(param->probe_resp_len, + sizeof(uint32_t)); + + len += 2 * WMI_TLV_HDR_SIZE + device_types_len_aligned + + probe_resp_len_aligned; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_p2p_lo_start_cmd_fixed_param *)wmi_buf_data(buf); + buf_ptr = (uint8_t *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_lo_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_p2p_lo_start_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->ctl_flags = param->ctl_flags; + cmd->channel = param->freq; + cmd->period = param->period; + cmd->interval = param->interval; + cmd->count = param->count; + cmd->device_types_len = param->dev_types_len; + cmd->prob_resp_len = param->probe_resp_len; + + buf_ptr += sizeof(wmi_p2p_lo_start_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + device_types_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->device_types, + param->dev_types_len); + + buf_ptr += device_types_len_aligned; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + probe_resp_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->probe_resp_tmplt, + param->probe_resp_len); + + WMI_LOGD("%s: Sending WMI_P2P_LO_START command, channel=%d, period=%d, interval=%d, count=%d", __func__, + cmd->channel, cmd->period, cmd->interval, cmd->count); + + wmi_mtrace(WMI_P2P_LISTEN_OFFLOAD_START_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, + buf, len, + WMI_P2P_LISTEN_OFFLOAD_START_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send p2p lo start: %d", + __func__, status); + wmi_buf_free(buf); + return status; + } + + WMI_LOGD("%s: Successfully sent WMI_P2P_LO_START", __func__); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_p2p_lo_stop_cmd_tlv() - send p2p lo stop request to fw + * @wmi_handle: wmi handle + * @param: p2p listen offload stop parameters + * + * Return: QDF status + */ +static QDF_STATUS send_p2p_lo_stop_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_buf_t buf; + wmi_p2p_lo_stop_cmd_fixed_param *cmd; + int32_t len; + QDF_STATUS status; + + WMI_LOGD("%s: vdev_id:%d", __func__, vdev_id); + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_p2p_lo_stop_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_lo_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_p2p_lo_stop_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + + WMI_LOGD("%s: Sending WMI_P2P_LO_STOP command", __func__); + + wmi_mtrace(WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, + buf, len, + WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send p2p lo stop: %d", + __func__, status); + wmi_buf_free(buf); + return status; + } + + WMI_LOGD("%s: Successfully sent WMI_P2P_LO_STOP", __func__); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_p2p_lo_stop_ev_param_tlv() - extract p2p lo stop + * information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold p2p lo stop event information + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_p2p_lo_stop_ev_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct p2p_lo_event *param) +{ + WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID_param_tlvs *param_tlvs; + wmi_p2p_lo_stopped_event_fixed_param *lo_param; + + param_tlvs = (WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID_param_tlvs *) + evt_buf; + if (!param_tlvs) { + WMI_LOGE("%s: Invalid P2P lo stop event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!param) { + WMI_LOGE("lo stop event param is null"); + return QDF_STATUS_E_INVAL; + } + + lo_param = param_tlvs->fixed_param; + param->vdev_id = lo_param->vdev_id; + param->reason_code = lo_param->reason; + WMI_LOGD("%s: vdev_id:%d, reason:%d", __func__, + param->vdev_id, param->reason_code); + + return QDF_STATUS_SUCCESS; +} + +void wmi_p2p_listen_offload_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_p2p_lo_start_cmd = send_p2p_lo_start_cmd_tlv; + ops->send_p2p_lo_stop_cmd = send_p2p_lo_stop_cmd_tlv; + ops->extract_p2p_lo_stop_ev_param = + extract_p2p_lo_stop_ev_param_tlv; +} +#endif /* FEATURE_P2P_LISTEN_OFFLOAD */ + +void wmi_p2p_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_p2pgo_oppps_req_cmd = send_set_p2pgo_oppps_req_cmd_tlv; + ops->send_set_p2pgo_noa_req_cmd = send_set_p2pgo_noa_req_cmd_tlv; + ops->extract_p2p_noa_ev_param = extract_p2p_noa_ev_param_tlv; + ops->set_mac_addr_rx_filter = send_set_mac_addr_rx_filter_cmd_tlv, + ops->extract_mac_addr_rx_filter_evt_param = + extract_mac_addr_rx_filter_evt_param_tlv, + wmi_p2p_listen_offload_attach_tlv(wmi_handle); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_api.c new file mode 100644 index 0000000000000000000000000000000000000000..00b2fc503b30128832f8e01e74c198599547866a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_api.c @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to PMO component. + */ + +#include "ol_if_athvar.h" +#include "ol_defines.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_pmo_api.h" +#include "wlan_pmo_hw_filter_public_struct.h" + +#ifdef FEATURE_WLAN_D0WOW +QDF_STATUS wmi_unified_d0wow_enable_send(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_d0wow_enable_cmd) + return wmi_handle->ops->send_d0wow_enable_cmd(wmi_handle, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_d0wow_disable_send(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + if (wmi_handle->ops->send_d0wow_disable_cmd) + return wmi_handle->ops->send_d0wow_disable_cmd(wmi_handle, + mac_id); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_D0WOW */ + +QDF_STATUS wmi_unified_add_wow_wakeup_event_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable) +{ + if (wmi_handle->ops->send_add_wow_wakeup_event_cmd) + return wmi_handle->ops->send_add_wow_wakeup_event_cmd( + wmi_handle, vdev_id, bitmap, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_wow_patterns_to_fw_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns) +{ + if (wmi_handle->ops->send_wow_patterns_to_fw_cmd) + return wmi_handle->ops->send_wow_patterns_to_fw_cmd(wmi_handle, + vdev_id, ptrn_id, ptrn, + ptrn_len, ptrn_offset, mask, + mask_len, user, default_patterns); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd( + wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_enable_arp_ns_offload_cmd) + return wmi_handle->ops->send_enable_arp_ns_offload_cmd( + wmi_handle, + arp_offload_req, ns_offload_req, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_add_clear_mcbc_filter_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clear_list) +{ + if (wmi_handle->ops->send_add_clear_mcbc_filter_cmd) + return wmi_handle->ops->send_add_clear_mcbc_filter_cmd( + wmi_handle, vdev_id, multicast_addr, clear_list); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_multiple_add_clear_mcbc_filter_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param) +{ + if (wmi_handle->ops->send_multiple_add_clear_mcbc_filter_cmd) + return wmi_handle->ops->send_multiple_add_clear_mcbc_filter_cmd( + wmi_handle, vdev_id, filter_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_conf_hw_filter_cmd(wmi_unified_t wmi_handle, + struct pmo_hw_filter_params *req) +{ + if (!wmi_handle->ops->send_conf_hw_filter_cmd) + return QDF_STATUS_E_NOSUPPORT; + + return wmi_handle->ops->send_conf_hw_filter_cmd(wmi_handle, req); +} + +QDF_STATUS wmi_unified_send_gtk_offload_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode) +{ + if (wmi_handle->ops->send_gtk_offload_cmd) + return wmi_handle->ops->send_gtk_offload_cmd(wmi_handle, + vdev_id, params, enable_offload, + gtk_offload_opcode); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_gtk_offload_getinfo_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint64_t offload_req_opcode) +{ + if (wmi_handle->ops->send_process_gtk_offload_getinfo_cmd) + return wmi_handle->ops->send_process_gtk_offload_getinfo_cmd( + wmi_handle, vdev_id, offload_req_opcode); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_enable_enhance_multicast_offload_cmd( + wmi_unified_t wmi_handle, uint8_t vdev_id, bool action) +{ + struct wmi_ops *ops; + + ops = wmi_handle->ops; + if (ops && ops->send_enable_enhance_multicast_offload_cmd) + return ops->send_enable_enhance_multicast_offload_cmd( + wmi_handle, vdev_id, action); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_gtk_rsp_event( + wmi_unified_t wmi_handle, void *evt_buf, + struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len) +{ + if (wmi_handle->ops->extract_gtk_rsp_event) + return wmi_handle->ops->extract_gtk_rsp_event(wmi_handle, + evt_buf, gtk_rsp_param, len); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_RA_FILTERING +QDF_STATUS wmi_unified_wow_sta_ra_filter_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t default_pattern, + uint16_t rate_limit_interval) +{ + + if (wmi_handle->ops->send_wow_sta_ra_filter_cmd) + return wmi_handle->ops->send_wow_sta_ra_filter_cmd(wmi_handle, + vdev_id, default_pattern, rate_limit_interval); + + return QDF_STATUS_E_FAILURE; + +} +#endif /* FEATURE_WLAN_RA_FILTERING */ + +QDF_STATUS wmi_unified_action_frame_patterns_cmd( + wmi_unified_t wmi_handle, + struct pmo_action_wakeup_set_params *action_params) +{ + if (wmi_handle->ops->send_action_frame_patterns_cmd) + return wmi_handle->ops->send_action_frame_patterns_cmd( + wmi_handle, action_params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_LPHB +QDF_STATUS wmi_unified_lphb_config_hbenable_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_enable_cmd_fixed_param *params) +{ + if (wmi_handle->ops->send_lphb_config_hbenable_cmd) + return wmi_handle->ops->send_lphb_config_hbenable_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_tcp_params_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req) +{ + if (wmi_handle->ops->send_lphb_config_tcp_params_cmd) + return wmi_handle->ops->send_lphb_config_tcp_params_cmd( + wmi_handle, lphb_conf_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_tcp_pkt_filter_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp) +{ + if (wmi_handle->ops->send_lphb_config_tcp_pkt_filter_cmd) + return wmi_handle->ops->send_lphb_config_tcp_pkt_filter_cmd( + wmi_handle, g_hb_tcp_filter_fp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_udp_params_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req) +{ + if (wmi_handle->ops->send_lphb_config_udp_params_cmd) + return wmi_handle->ops->send_lphb_config_udp_params_cmd( + wmi_handle, lphb_conf_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_udp_pkt_filter_cmd( + wmi_unified_t wmi_handle, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req) +{ + if (wmi_handle->ops->send_lphb_config_udp_pkt_filter_cmd) + return wmi_handle->ops->send_lphb_config_udp_pkt_filter_cmd( + wmi_handle, lphb_conf_req); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_LPHB */ + +#ifdef WLAN_FEATURE_PACKET_FILTERING +QDF_STATUS +wmi_unified_enable_disable_packet_filter_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool enable) +{ + if (wmi_handle->ops->send_enable_disable_packet_filter_cmd) + return wmi_handle->ops->send_enable_disable_packet_filter_cmd( + wmi_handle, vdev_id, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_config_packet_filter_cmd( + wmi_unified_t wmi_handle, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable) +{ + if (wmi_handle->ops->send_config_packet_filter_cmd) + return wmi_handle->ops->send_config_packet_filter_cmd( + wmi_handle, vdev_id, rcv_filter_param, filter_id, enable); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_PACKET_FILTERING */ + +QDF_STATUS wmi_unified_wow_delete_pattern_cmd(wmi_unified_t wmi_handle, + uint8_t ptrn_id, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_wow_delete_pattern_cmd) + return wmi_handle->ops->send_wow_delete_pattern_cmd(wmi_handle, + ptrn_id, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_host_wakeup_ind_to_fw_cmd(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_host_wakeup_ind_to_fw_cmd) + return wmi_handle->ops->send_host_wakeup_ind_to_fw_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_wow_timer_pattern_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t cookie, uint32_t time) +{ + if (wmi_handle->ops->send_wow_timer_pattern_cmd) + return wmi_handle->ops->send_wow_timer_pattern_cmd(wmi_handle, + vdev_id, cookie, time); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_EXTWOW_SUPPORT +QDF_STATUS wmi_unified_enable_ext_wow_cmd(wmi_unified_t wmi_handle, + struct ext_wow_params *params) +{ + if (wmi_handle->ops->send_enable_ext_wow_cmd) + return wmi_handle->ops->send_enable_ext_wow_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_app_type2_params_in_fw_cmd( + wmi_unified_t wmi_handle, + struct app_type2_params *appType2Params) +{ + if (wmi_handle->ops->send_set_app_type2_params_in_fw_cmd) + return wmi_handle->ops->send_set_app_type2_params_in_fw_cmd( + wmi_handle, appType2Params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_app_type1_params_in_fw_cmd( + wmi_unified_t wmi_handle, + struct app_type1_params *app_type1_params) +{ + if (wmi_handle->ops->send_app_type1_params_in_fw_cmd) + return wmi_handle->ops->send_app_type1_params_in_fw_cmd( + wmi_handle, app_type1_params); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_EXTWOW_SUPPORT */ + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..3f2c2762e67482d8caecd98034222259563cbc7f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_tlv.c @@ -0,0 +1,1957 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_pmo_api.h" + +#ifdef FEATURE_WLAN_D0WOW +/** + * send_d0wow_enable_cmd_tlv() - WMI d0 wow enable function + * @param wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: 0 on success and error code on failure. + */ +static QDF_STATUS send_d0wow_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS status; + + len = sizeof(wmi_d0_wow_enable_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_d0_wow_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_d0_wow_enable_disable_cmd_fixed_param)); + + cmd->enable = true; + + wmi_mtrace(WMI_D0_WOW_ENABLE_DISABLE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_D0_WOW_ENABLE_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) + wmi_buf_free(buf); + + return status; +} + +/** + * send_d0wow_disable_cmd_tlv() - WMI d0 wow disable function + * @param wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: 0 on success and error code on failure. + */ +static QDF_STATUS send_d0wow_disable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS status; + + len = sizeof(wmi_d0_wow_enable_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_d0_wow_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_d0_wow_enable_disable_cmd_fixed_param)); + + cmd->enable = false; + + wmi_mtrace(WMI_D0_WOW_ENABLE_DISABLE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_D0_WOW_ENABLE_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) + wmi_buf_free(buf); + + return status; +} + +void wmi_d0wow_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_d0wow_enable_cmd = send_d0wow_enable_cmd_tlv; + ops->send_d0wow_disable_cmd = send_d0wow_disable_cmd_tlv; +} +#endif /* FEATURE_WLAN_D0WOW */ + +/** + * send_add_wow_wakeup_event_cmd_tlv() - Configures wow wakeup events. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @bitmap: Event bitmap + * @enable: enable/disable + * + * Return: CDF status + */ +static QDF_STATUS send_add_wow_wakeup_event_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable) +{ + WMI_WOW_ADD_DEL_EVT_CMD_fixed_param *cmd; + uint16_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(WMI_WOW_ADD_DEL_EVT_CMD_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_ADD_DEL_EVT_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_DEL_EVT_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_DEL_EVT_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->is_add = enable; + qdf_mem_copy(&(cmd->event_bitmaps[0]), bitmap, sizeof(uint32_t) * + WMI_WOW_MAX_EVENT_BM_LEN); + + WMI_LOGD("Wakeup pattern 0x%x%x%x%x %s in fw", cmd->event_bitmaps[0], + cmd->event_bitmaps[1], cmd->event_bitmaps[2], + cmd->event_bitmaps[3], enable ? "enabled" : "disabled"); + + wmi_mtrace(WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); + if (ret) { + WMI_LOGE("Failed to config wow wakeup event"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_wow_patterns_to_fw_cmd_tlv() - Sends WOW patterns to FW. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @ptrn_id: pattern id + * @ptrn: pattern + * @ptrn_len: pattern length + * @ptrn_offset: pattern offset + * @mask: mask + * @mask_len: mask length + * @user: true for user configured pattern and false for default pattern + * @default_patterns: default patterns + * + * Return: CDF status + */ +static QDF_STATUS send_wow_patterns_to_fw_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns) +{ + WMI_WOW_ADD_PATTERN_CMD_fixed_param *cmd; + WOW_BITMAP_PATTERN_T *bitmap_pattern; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + + 1 * sizeof(WOW_BITMAP_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV4_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV6_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_MAGIC_PATTERN_CMD) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(uint32_t) + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_ADD_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = ptrn_id; + + cmd->pattern_type = WOW_BITMAP_PATTERN; + buf_ptr += sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(WOW_BITMAP_PATTERN_T)); + buf_ptr += WMI_TLV_HDR_SIZE; + bitmap_pattern = (WOW_BITMAP_PATTERN_T *) buf_ptr; + + WMITLV_SET_HDR(&bitmap_pattern->tlv_header, + WMITLV_TAG_STRUC_WOW_BITMAP_PATTERN_T, + WMITLV_GET_STRUCT_TLVLEN(WOW_BITMAP_PATTERN_T)); + + qdf_mem_copy(&bitmap_pattern->patternbuf[0], ptrn, ptrn_len); + qdf_mem_copy(&bitmap_pattern->bitmaskbuf[0], mask, mask_len); + + bitmap_pattern->pattern_offset = ptrn_offset; + bitmap_pattern->pattern_len = ptrn_len; + + if (bitmap_pattern->pattern_len > WOW_DEFAULT_BITMAP_PATTERN_SIZE) + bitmap_pattern->pattern_len = WOW_DEFAULT_BITMAP_PATTERN_SIZE; + + if (bitmap_pattern->pattern_len > WOW_DEFAULT_BITMASK_SIZE) + bitmap_pattern->pattern_len = WOW_DEFAULT_BITMASK_SIZE; + + bitmap_pattern->bitmask_len = bitmap_pattern->pattern_len; + bitmap_pattern->pattern_id = ptrn_id; + + WMI_LOGD("vdev: %d, ptrn id: %d, ptrn len: %d, ptrn offset: %d user %d", + cmd->vdev_id, cmd->pattern_id, bitmap_pattern->pattern_len, + bitmap_pattern->pattern_offset, user); + WMI_LOGD("Pattern : "); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + &bitmap_pattern->patternbuf[0], + bitmap_pattern->pattern_len); + + WMI_LOGD("Mask : "); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + &bitmap_pattern->bitmaskbuf[0], + bitmap_pattern->pattern_len); + + buf_ptr += sizeof(WOW_BITMAP_PATTERN_T); + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV4_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV6_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_MAGIC_PATTERN_CMD but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for pattern_info_timeout but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for ratelimit_interval with dummy data as this fix elem */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 1 * sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + *(uint32_t *) buf_ptr = 0; + + wmi_mtrace(WMI_WOW_ADD_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send wow ptrn to fw", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * fill_arp_offload_params_tlv() - Fill ARP offload data + * @wmi_handle: wmi handle + * @offload_req: offload request + * @buf_ptr: buffer pointer + * + * To fill ARP offload data to firmware + * when target goes to wow mode. + * + * Return: None + */ +static void fill_arp_offload_params_tlv(wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *offload_req, uint8_t **buf_ptr) +{ + + int i; + WMI_ARP_OFFLOAD_TUPLE *arp_tuple; + bool enable_or_disable = offload_req->enable; + + WMITLV_SET_HDR(*buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (WMI_MAX_ARP_OFFLOADS*sizeof(WMI_ARP_OFFLOAD_TUPLE))); + *buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp_tuple = (WMI_ARP_OFFLOAD_TUPLE *)*buf_ptr; + WMITLV_SET_HDR(&arp_tuple->tlv_header, + WMITLV_TAG_STRUC_WMI_ARP_OFFLOAD_TUPLE, + WMITLV_GET_STRUCT_TLVLEN(WMI_ARP_OFFLOAD_TUPLE)); + + /* Fill data for ARP and NS in the first tupple for LA */ + if ((enable_or_disable & PMO_OFFLOAD_ENABLE) && (i == 0)) { + /* Copy the target ip addr and flags */ + arp_tuple->flags = WMI_ARPOFF_FLAGS_VALID; + qdf_mem_copy(&arp_tuple->target_ipaddr, + offload_req->host_ipv4_addr, + WMI_IPV4_ADDR_LEN); + WMI_LOGD("ARPOffload IP4 address: %pI4", + offload_req->host_ipv4_addr); + } + *buf_ptr += sizeof(WMI_ARP_OFFLOAD_TUPLE); + } +} + +#ifdef WLAN_NS_OFFLOAD +/** + * fill_ns_offload_params_tlv() - Fill NS offload data + * @wmi|_handle: wmi handle + * @offload_req: offload request + * @buf_ptr: buffer pointer + * + * To fill NS offload data to firmware + * when target goes to wow mode. + * + * Return: None + */ +static void fill_ns_offload_params_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ + + int i; + WMI_NS_OFFLOAD_TUPLE *ns_tuple; + + WMITLV_SET_HDR(*buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (WMI_MAX_NS_OFFLOADS * sizeof(WMI_NS_OFFLOAD_TUPLE))); + *buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < WMI_MAX_NS_OFFLOADS; i++) { + ns_tuple = (WMI_NS_OFFLOAD_TUPLE *)*buf_ptr; + WMITLV_SET_HDR(&ns_tuple->tlv_header, + WMITLV_TAG_STRUC_WMI_NS_OFFLOAD_TUPLE, + (sizeof(WMI_NS_OFFLOAD_TUPLE) - WMI_TLV_HDR_SIZE)); + + /* + * Fill data only for NS offload in the first ARP tuple for LA + */ + if ((ns_req->enable & PMO_OFFLOAD_ENABLE)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_VALID; + /* Copy the target/solicitation/remote ip addr */ + if (ns_req->target_ipv6_addr_valid[i]) + qdf_mem_copy(&ns_tuple->target_ipaddr[0], + &ns_req->target_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + qdf_mem_copy(&ns_tuple->solicitation_ipaddr, + &ns_req->self_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + if (ns_req->target_ipv6_addr_ac_type[i]) { + ns_tuple->flags |= + WMI_NSOFF_FLAGS_IS_IPV6_ANYCAST; + } + WMI_LOGD("Index %d NS solicitedIp %pI6, targetIp %pI6", + i, &ns_req->self_ipv6_addr[i], + &ns_req->target_ipv6_addr[i]); + + /* target MAC is optional, check if it is valid, + * if this is not valid, the target will use the known + * local MAC address rather than the tuple + */ + WMI_CHAR_ARRAY_TO_MAC_ADDR( + ns_req->self_macaddr.bytes, + &ns_tuple->target_mac); + if ((ns_tuple->target_mac.mac_addr31to0 != 0) || + (ns_tuple->target_mac.mac_addr47to32 != 0)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_MAC_VALID; + } + } + *buf_ptr += sizeof(WMI_NS_OFFLOAD_TUPLE); + } +} + +/** + * fill_nsoffload_ext_tlv() - Fill NS offload ext data + * @wmi: wmi handle + * @offload_req: offload request + * @buf_ptr: buffer pointer + * + * To fill extended NS offload extended data to firmware + * when target goes to wow mode. + * + * Return: None + */ +static void fill_nsoffload_ext_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ + int i; + WMI_NS_OFFLOAD_TUPLE *ns_tuple; + uint32_t count, num_ns_ext_tuples; + + count = ns_req->num_ns_offload_count; + num_ns_ext_tuples = ns_req->num_ns_offload_count - + WMI_MAX_NS_OFFLOADS; + + /* Populate extended NS offload tuples */ + WMITLV_SET_HDR(*buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (num_ns_ext_tuples * sizeof(WMI_NS_OFFLOAD_TUPLE))); + *buf_ptr += WMI_TLV_HDR_SIZE; + for (i = WMI_MAX_NS_OFFLOADS; i < count; i++) { + ns_tuple = (WMI_NS_OFFLOAD_TUPLE *)*buf_ptr; + WMITLV_SET_HDR(&ns_tuple->tlv_header, + WMITLV_TAG_STRUC_WMI_NS_OFFLOAD_TUPLE, + (sizeof(WMI_NS_OFFLOAD_TUPLE)-WMI_TLV_HDR_SIZE)); + + /* + * Fill data only for NS offload in the first ARP tuple for LA + */ + if ((ns_req->enable & PMO_OFFLOAD_ENABLE)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_VALID; + /* Copy the target/solicitation/remote ip addr */ + if (ns_req->target_ipv6_addr_valid[i]) + qdf_mem_copy(&ns_tuple->target_ipaddr[0], + &ns_req->target_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + qdf_mem_copy(&ns_tuple->solicitation_ipaddr, + &ns_req->self_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + if (ns_req->target_ipv6_addr_ac_type[i]) { + ns_tuple->flags |= + WMI_NSOFF_FLAGS_IS_IPV6_ANYCAST; + } + WMI_LOGD("Index %d NS solicitedIp %pI6, targetIp %pI6", + i, &ns_req->self_ipv6_addr[i], + &ns_req->target_ipv6_addr[i]); + + /* target MAC is optional, check if it is valid, + * if this is not valid, the target will use the + * known local MAC address rather than the tuple + */ + WMI_CHAR_ARRAY_TO_MAC_ADDR( + ns_req->self_macaddr.bytes, + &ns_tuple->target_mac); + if ((ns_tuple->target_mac.mac_addr31to0 != 0) || + (ns_tuple->target_mac.mac_addr47to32 != 0)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_MAC_VALID; + } + } + *buf_ptr += sizeof(WMI_NS_OFFLOAD_TUPLE); + } +} +#else +static void fill_ns_offload_params_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ +} + +static void fill_nsoffload_ext_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ +} +#endif + +/** + * send_enable_arp_ns_offload_cmd_tlv() - enable ARP NS offload + * @wma: wmi handle + * @arp_offload_req: arp offload request + * @ns_offload_req: ns offload request + * @arp_only: flag + * + * To configure ARP NS off load data to firmware + * when target goes to wow mode. + * + * Return: QDF Status + */ +static QDF_STATUS send_enable_arp_ns_offload_cmd_tlv(wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id) +{ + int32_t res; + WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_buf_t buf; + int32_t len; + uint32_t count = 0, num_ns_ext_tuples = 0; + + count = ns_offload_req->num_ns_offload_count; + + /* + * TLV place holder size for array of NS tuples + * TLV place holder size for array of ARP tuples + */ + len = sizeof(WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + + WMI_MAX_NS_OFFLOADS * sizeof(WMI_NS_OFFLOAD_TUPLE) + + WMI_TLV_HDR_SIZE + + WMI_MAX_ARP_OFFLOADS * sizeof(WMI_ARP_OFFLOAD_TUPLE); + + /* + * If there are more than WMI_MAX_NS_OFFLOADS addresses then allocate + * extra length for extended NS offload tuples which follows ARP offload + * tuples. Host needs to fill this structure in following format: + * 2 NS ofload tuples + * 2 ARP offload tuples + * N numbers of extended NS offload tuples if HDD has given more than + * 2 NS offload addresses + */ + if (count > WMI_MAX_NS_OFFLOADS) { + num_ns_ext_tuples = count - WMI_MAX_NS_OFFLOADS; + len += WMI_TLV_HDR_SIZE + num_ns_ext_tuples + * sizeof(WMI_NS_OFFLOAD_TUPLE); + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param)); + cmd->flags = 0; + cmd->vdev_id = vdev_id; + cmd->num_ns_ext_tuples = num_ns_ext_tuples; + + WMI_LOGD("ARP NS Offload vdev_id: %d", cmd->vdev_id); + + buf_ptr += sizeof(WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param); + fill_ns_offload_params_tlv(wmi_handle, ns_offload_req, &buf_ptr); + fill_arp_offload_params_tlv(wmi_handle, arp_offload_req, &buf_ptr); + if (num_ns_ext_tuples) + fill_nsoffload_ext_tlv(wmi_handle, ns_offload_req, &buf_ptr); + + wmi_mtrace(WMI_SET_ARP_NS_OFFLOAD_CMDID, cmd->vdev_id, 0); + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SET_ARP_NS_OFFLOAD_CMDID); + if (res) { + WMI_LOGE("Failed to enable ARP NDP/NSffload"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_add_clear_mcbc_filter_cmd_tlv() - set mcast filter command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @multicastAddr: mcast address + * @clearList: clear list flag + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_add_clear_mcbc_filter_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clearList) +{ + WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param *cmd; + wmi_buf_t buf; + int err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param *) wmi_buf_data(buf); + qdf_mem_zero(cmd, sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param)); + cmd->action = + (clearList ? WMI_MCAST_FILTER_DELETE : WMI_MCAST_FILTER_SET); + cmd->vdev_id = vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(multicast_addr.bytes, &cmd->mcastbdcastaddr); + + WMI_LOGD("Action:%d; vdev_id:%d; clearList:%d; MCBC MAC Addr: "QDF_MAC_ADDR_FMT, + cmd->action, vdev_id, clearList, + QDF_MAC_ADDR_REF(multicast_addr.bytes)); + + wmi_mtrace(WMI_SET_MCASTBCAST_FILTER_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), + WMI_SET_MCASTBCAST_FILTER_CMDID); + if (err) { + WMI_LOGE("Failed to send set_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_multiple_add_clear_mcbc_filter_cmd_tlv() - send multiple mcast filter + * command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @mcast_filter_params: mcast filter params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_multiple_add_clear_mcbc_filter_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param) + +{ + WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_buf_t buf; + int err; + int i; + uint8_t *mac_addr_src_ptr = NULL; + wmi_mac_addr *mac_addr_dst_ptr; + uint32_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + sizeof(wmi_mac_addr) * filter_param->multicast_addr_cnt; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param *) + wmi_buf_data(buf); + qdf_mem_zero(cmd, sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_multiple_mcast_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param)); + cmd->operation = + ((filter_param->action == 0) ? WMI_MULTIPLE_MCAST_FILTER_DELETE + : WMI_MULTIPLE_MCAST_FILTER_ADD); + cmd->vdev_id = vdev_id; + cmd->num_mcastaddrs = filter_param->multicast_addr_cnt; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + sizeof(wmi_mac_addr) * + filter_param->multicast_addr_cnt); + + if (filter_param->multicast_addr_cnt == 0) + goto send_cmd; + + mac_addr_src_ptr = (uint8_t *)&filter_param->multicast_addr; + mac_addr_dst_ptr = (wmi_mac_addr *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + for (i = 0; i < filter_param->multicast_addr_cnt; i++) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(mac_addr_src_ptr, mac_addr_dst_ptr); + mac_addr_src_ptr += ATH_MAC_LEN; + mac_addr_dst_ptr++; + } + +send_cmd: + wmi_mtrace(WMI_SET_MULTIPLE_MCAST_FILTER_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, + WMI_SET_MULTIPLE_MCAST_FILTER_CMDID); + if (err) { + WMI_LOGE("Failed to send set_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi, + struct pmo_hw_filter_params *req) +{ + QDF_STATUS status; + wmi_hw_data_filter_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + + if (!req) { + WMI_LOGE("req is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_buf = wmi_buf_alloc(wmi, sizeof(*cmd)); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_hw_data_filter_cmd_fixed_param *)wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_hw_data_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_hw_data_filter_cmd_fixed_param)); + cmd->vdev_id = req->vdev_id; + cmd->enable = req->enable; + /* Set all modes in case of disable */ + if (!cmd->enable) + cmd->hw_filter_bitmap = ((uint32_t)~0U); + else + cmd->hw_filter_bitmap = req->mode_bitmap; + + WMI_LOGD("Send %s hw filter mode: 0x%X for vdev id %d", + req->enable ? "enable" : "disable", req->mode_bitmap, + req->vdev_id); + + wmi_mtrace(WMI_HW_DATA_FILTER_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi, wmi_buf, sizeof(*cmd), + WMI_HW_DATA_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to configure hw filter"); + wmi_buf_free(wmi_buf); + } + + return status; +} + +static void +fill_fils_tlv_params(WMI_GTK_OFFLOAD_CMD_fixed_param *cmd, + uint8_t vdev_id, + struct pmo_gtk_req *params) +{ + uint8_t *buf_ptr; + wmi_gtk_offload_fils_tlv_param *ext_param; + + buf_ptr = (uint8_t *) cmd + sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*ext_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + + ext_param = (wmi_gtk_offload_fils_tlv_param *)buf_ptr; + WMITLV_SET_HDR(&ext_param->tlv_header, + WMITLV_TAG_STRUC_wmi_gtk_offload_extended_tlv_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_gtk_offload_fils_tlv_param)); + ext_param->vdev_id = vdev_id; + ext_param->flags = cmd->flags; + ext_param->kek_len = params->kek_len; + qdf_mem_copy(ext_param->KEK, params->kek, params->kek_len); + qdf_mem_copy(ext_param->KCK, params->kck, + WMI_GTK_OFFLOAD_KCK_BYTES); + qdf_mem_copy(ext_param->replay_counter, ¶ms->replay_counter, + GTK_REPLAY_COUNTER_BYTES); +} + +/** + * send_gtk_offload_cmd_tlv() - send GTK offload command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @params: GTK offload parameters + * + * Return: CDF status + */ +static +QDF_STATUS send_gtk_offload_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode) +{ + int len; + wmi_buf_t buf; + WMI_GTK_OFFLOAD_CMD_fixed_param *cmd; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + WMI_LOGD("%s Enter", __func__); + + len = sizeof(*cmd); + + if (params->is_fils_connection) + len += WMI_TLV_HDR_SIZE + + sizeof(wmi_gtk_offload_fils_tlv_param); + + /* alloc wmi buffer */ + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + status = QDF_STATUS_E_NOMEM; + goto out; + } + + cmd = (WMI_GTK_OFFLOAD_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_GTK_OFFLOAD_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_GTK_OFFLOAD_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + + /* Request target to enable GTK offload */ + if (enable_offload == PMO_GTK_OFFLOAD_ENABLE) { + cmd->flags = gtk_offload_opcode; + + /* Copy the keys and replay counter */ + qdf_mem_copy(cmd->KCK, params->kck, sizeof(cmd->KCK)); + qdf_mem_copy(cmd->KEK, params->kek, sizeof(cmd->KEK)); + qdf_mem_copy(cmd->replay_counter, ¶ms->replay_counter, + GTK_REPLAY_COUNTER_BYTES); + } else { + cmd->flags = gtk_offload_opcode; + } + if (params->is_fils_connection) + fill_fils_tlv_params(cmd, vdev_id, params); + + WMI_LOGD("VDEVID: %d, GTK_FLAGS: x%x kek len %d", vdev_id, cmd->flags, params->kek_len); + /* send the wmi command */ + wmi_mtrace(WMI_GTK_OFFLOAD_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_GTK_OFFLOAD_CMDID)) { + WMI_LOGE("Failed to send WMI_GTK_OFFLOAD_CMDID"); + wmi_buf_free(buf); + status = QDF_STATUS_E_FAILURE; + } + +out: + WMI_LOGD("%s Exit", __func__); + return status; +} + +/** + * send_process_gtk_offload_getinfo_cmd_tlv() - send GTK offload cmd to fw + * @wmi_handle: wmi handle + * @params: GTK offload params + * + * Return: CDF status + */ +static QDF_STATUS send_process_gtk_offload_getinfo_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint64_t offload_req_opcode) +{ + int len; + wmi_buf_t buf; + WMI_GTK_OFFLOAD_CMD_fixed_param *cmd; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + len = sizeof(*cmd); + + /* alloc wmi buffer */ + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + status = QDF_STATUS_E_NOMEM; + goto out; + } + + cmd = (WMI_GTK_OFFLOAD_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_GTK_OFFLOAD_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_GTK_OFFLOAD_CMD_fixed_param)); + + /* Request for GTK offload status */ + cmd->flags = offload_req_opcode; + cmd->vdev_id = vdev_id; + + /* send the wmi command */ + wmi_mtrace(WMI_GTK_OFFLOAD_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_GTK_OFFLOAD_CMDID)) { + WMI_LOGE("Failed to send WMI_GTK_OFFLOAD_CMDID for req info"); + wmi_buf_free(buf); + status = QDF_STATUS_E_FAILURE; + } + +out: + return status; +} + +/** + * send_enable_enhance_multicast_offload_tlv() - send enhance multicast offload + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @action: true for enable else false + * + * To enable enhance multicast offload to firmware + * when target goes to wow mode. + * + * Return: QDF Status + */ + +static +QDF_STATUS send_enable_enhance_multicast_offload_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, bool action) +{ + QDF_STATUS status; + wmi_buf_t buf; + wmi_config_enhanced_mcast_filter_cmd_fixed_param *cmd; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_config_enhanced_mcast_filter_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_config_enhanced_mcast_filter_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_config_enhanced_mcast_filter_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->enable = ((action == 0) ? ENHANCED_MCAST_FILTER_DISABLED : + ENHANCED_MCAST_FILTER_ENABLED); + WMI_LOGD("%s: config enhance multicast offload action %d for vdev %d", + __func__, action, vdev_id); + wmi_mtrace(WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID); + if (status != QDF_STATUS_SUCCESS) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send ENHANCED_MCAST_FILTER_CMDID", + __func__); + } + + return status; +} + +/** + * extract_gtk_rsp_event_tlv() - extract gtk rsp params from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hdr: Pointer to hold header + * @param bufp: Pointer to hold pointer to rx param buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_gtk_rsp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len) +{ + WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param *fixed_param; + WMI_GTK_OFFLOAD_STATUS_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_GTK_OFFLOAD_STATUS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("gtk param_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (len < sizeof(WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param)) { + WMI_LOGE("Invalid length for GTK status"); + return QDF_STATUS_E_INVAL; + } + + fixed_param = (WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param *) + param_buf->fixed_param; + + if (fixed_param->vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + wmi_err_rl("Invalid vdev_id %u", fixed_param->vdev_id); + return QDF_STATUS_E_INVAL; + } + + gtk_rsp_param->vdev_id = fixed_param->vdev_id; + gtk_rsp_param->status_flag = QDF_STATUS_SUCCESS; + gtk_rsp_param->refresh_cnt = fixed_param->refresh_cnt; + qdf_mem_copy(>k_rsp_param->replay_counter, + &fixed_param->replay_counter, + GTK_REPLAY_COUNTER_BYTES); + + return QDF_STATUS_SUCCESS; + +} + +#ifdef FEATURE_WLAN_RA_FILTERING +/** + * send_wow_sta_ra_filter_cmd_tlv() - set RA filter pattern in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_wow_sta_ra_filter_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t default_pattern, + uint16_t rate_limit_interval) +{ + + WMI_WOW_ADD_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_BITMAP_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV4_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV6_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_MAGIC_PATTERN_CMD) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(uint32_t) + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_ADD_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = default_pattern, + cmd->pattern_type = WOW_IPV6_RA_PATTERN; + buf_ptr += sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param); + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_BITMAP_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV4_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV6_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_MAGIC_PATTERN_CMD but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for pattern_info_timeout but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for ra_ratelimit_interval. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + + *((uint32_t *) buf_ptr) = rate_limit_interval; + + WMI_LOGD("%s: send RA rate limit [%d] to fw vdev = %d", __func__, + rate_limit_interval, vdev_id); + + wmi_mtrace(WMI_WOW_ADD_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send RA rate limit to fw", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_ra_filtering_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_wow_sta_ra_filter_cmd = send_wow_sta_ra_filter_cmd_tlv; +} +#endif /* FEATURE_WLAN_RA_FILTERING */ + +/** + * send_action_frame_patterns_cmd_tlv() - send wmi cmd of action filter params + * @wmi_handle: wmi handler + * @action_params: pointer to action_params + * + * Return: 0 for success, otherwise appropriate error code + */ +static QDF_STATUS send_action_frame_patterns_cmd_tlv(wmi_unified_t wmi_handle, + struct pmo_action_wakeup_set_params *action_params) +{ + WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param *cmd; + wmi_buf_t buf; + int i; + int32_t err; + uint32_t len = 0, *cmd_args; + uint8_t *buf_ptr; + + len = (PMO_SUPPORTED_ACTION_CATE * sizeof(uint32_t)) + + WMI_TLV_HDR_SIZE + sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *)cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wow_set_action_wake_up_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param)); + + cmd->vdev_id = action_params->vdev_id; + cmd->operation = action_params->operation; + + for (i = 0; i < MAX_SUPPORTED_ACTION_CATEGORY_ELE_LIST; i++) + cmd->action_category_map[i] = + action_params->action_category_map[i]; + + buf_ptr += sizeof(WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (PMO_SUPPORTED_ACTION_CATE * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd_args = (uint32_t *) buf_ptr; + for (i = 0; i < PMO_SUPPORTED_ACTION_CATE; i++) + cmd_args[i] = action_params->action_per_category[i]; + + wmi_mtrace(WMI_WOW_SET_ACTION_WAKE_UP_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_WOW_SET_ACTION_WAKE_UP_CMDID); + if (err) { + WMI_LOGE("Failed to send ap_ps_egap cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_WLAN_LPHB +/** + * send_lphb_config_hbenable_cmd_tlv() - enable command of LPHB configuration + * @wmi_handle: wmi handle + * @lphb_conf_req: configuration info + * + * Return: CDF status + */ +static QDF_STATUS send_lphb_config_hbenable_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_enable_cmd_fixed_param *params) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_enable_cmd_fixed_param *hb_enable_fp; + int len = sizeof(wmi_hb_set_enable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_enable_fp = (wmi_hb_set_enable_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_enable_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_enable_cmd_fixed_param)); + + /* fill in values */ + hb_enable_fp->vdev_id = params->session; + hb_enable_fp->enable = params->enable; + hb_enable_fp->item = params->item; + hb_enable_fp->session = params->session; + + wmi_mtrace(WMI_HB_SET_ENABLE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_ENABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_ENABLE returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_tcp_params_cmd_tlv() - set tcp params of LPHB configuration + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static QDF_STATUS send_lphb_config_tcp_params_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_tcp_params_cmd_fixed_param *hb_tcp_params_fp; + int len = sizeof(wmi_hb_set_tcp_params_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_tcp_params_fp = (wmi_hb_set_tcp_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_tcp_params_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_tcp_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_tcp_params_cmd_fixed_param)); + + /* fill in values */ + hb_tcp_params_fp->vdev_id = lphb_conf_req->vdev_id; + hb_tcp_params_fp->srv_ip = lphb_conf_req->srv_ip; + hb_tcp_params_fp->dev_ip = lphb_conf_req->dev_ip; + hb_tcp_params_fp->seq = lphb_conf_req->seq; + hb_tcp_params_fp->src_port = lphb_conf_req->src_port; + hb_tcp_params_fp->dst_port = lphb_conf_req->dst_port; + hb_tcp_params_fp->interval = lphb_conf_req->interval; + hb_tcp_params_fp->timeout = lphb_conf_req->timeout; + hb_tcp_params_fp->session = lphb_conf_req->session; + qdf_mem_copy(&hb_tcp_params_fp->gateway_mac, + &lphb_conf_req->gateway_mac, + sizeof(hb_tcp_params_fp->gateway_mac)); + + wmi_mtrace(WMI_HB_SET_TCP_PARAMS_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_TCP_PARAMS_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_TCP_PARAMS returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_tcp_pkt_filter_cmd_tlv() - configure tcp packet filter cmd + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static +QDF_STATUS send_lphb_config_tcp_pkt_filter_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *hb_tcp_filter_fp; + int len = sizeof(wmi_hb_set_tcp_pkt_filter_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_tcp_filter_fp = + (wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_tcp_filter_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_tcp_pkt_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_tcp_pkt_filter_cmd_fixed_param)); + + /* fill in values */ + hb_tcp_filter_fp->vdev_id = g_hb_tcp_filter_fp->vdev_id; + hb_tcp_filter_fp->length = g_hb_tcp_filter_fp->length; + hb_tcp_filter_fp->offset = g_hb_tcp_filter_fp->offset; + hb_tcp_filter_fp->session = g_hb_tcp_filter_fp->session; + memcpy((void *)&hb_tcp_filter_fp->filter, + (void *)&g_hb_tcp_filter_fp->filter, + WMI_WLAN_HB_MAX_FILTER_SIZE); + + wmi_mtrace(WMI_HB_SET_TCP_PKT_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_TCP_PKT_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_TCP_PKT_FILTER returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_udp_params_cmd_tlv() - configure udp param command of LPHB + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static QDF_STATUS send_lphb_config_udp_params_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_udp_params_cmd_fixed_param *hb_udp_params_fp; + int len = sizeof(wmi_hb_set_udp_params_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_udp_params_fp = (wmi_hb_set_udp_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_udp_params_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_udp_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_udp_params_cmd_fixed_param)); + + /* fill in values */ + hb_udp_params_fp->vdev_id = lphb_conf_req->vdev_id; + hb_udp_params_fp->srv_ip = lphb_conf_req->srv_ip; + hb_udp_params_fp->dev_ip = lphb_conf_req->dev_ip; + hb_udp_params_fp->src_port = lphb_conf_req->src_port; + hb_udp_params_fp->dst_port = lphb_conf_req->dst_port; + hb_udp_params_fp->interval = lphb_conf_req->interval; + hb_udp_params_fp->timeout = lphb_conf_req->timeout; + hb_udp_params_fp->session = lphb_conf_req->session; + qdf_mem_copy(&hb_udp_params_fp->gateway_mac, + &lphb_conf_req->gateway_mac, + sizeof(lphb_conf_req->gateway_mac)); + + wmi_mtrace(WMI_HB_SET_UDP_PARAMS_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_UDP_PARAMS_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_UDP_PARAMS returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_udp_pkt_filter_cmd_tlv() - configure udp pkt filter command + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static +QDF_STATUS send_lphb_config_udp_pkt_filter_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *hb_udp_filter_fp; + int len = sizeof(wmi_hb_set_udp_pkt_filter_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_udp_filter_fp = + (wmi_hb_set_udp_pkt_filter_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_udp_filter_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_udp_pkt_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_udp_pkt_filter_cmd_fixed_param)); + + /* fill in values */ + hb_udp_filter_fp->vdev_id = lphb_conf_req->vdev_id; + hb_udp_filter_fp->length = lphb_conf_req->length; + hb_udp_filter_fp->offset = lphb_conf_req->offset; + hb_udp_filter_fp->session = lphb_conf_req->session; + memcpy((void *)&hb_udp_filter_fp->filter, + (void *)&lphb_conf_req->filter, + WMI_WLAN_HB_MAX_FILTER_SIZE); + + wmi_mtrace(WMI_HB_SET_UDP_PKT_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_UDP_PKT_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_UDP_PKT_FILTER returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +void wmi_lphb_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_lphb_config_hbenable_cmd = + send_lphb_config_hbenable_cmd_tlv; + ops->send_lphb_config_tcp_params_cmd = + send_lphb_config_tcp_params_cmd_tlv; + ops->send_lphb_config_tcp_pkt_filter_cmd = + send_lphb_config_tcp_pkt_filter_cmd_tlv; + ops->send_lphb_config_udp_params_cmd = + send_lphb_config_udp_params_cmd_tlv; + ops->send_lphb_config_udp_pkt_filter_cmd = + send_lphb_config_udp_pkt_filter_cmd_tlv; +} +#endif /* FEATURE_WLAN_LPHB */ + +#ifdef WLAN_FEATURE_PACKET_FILTERING +/** + * send_enable_disable_packet_filter_cmd_tlv() - enable/disable packet filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @enable: Flag to enable/disable packet filter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_enable_disable_packet_filter_cmd_tlv( + wmi_unified_t wmi_handle, uint8_t vdev_id, bool enable) +{ + int32_t len; + int ret = 0; + wmi_buf_t buf; + WMI_PACKET_FILTER_ENABLE_CMD_fixed_param *cmd; + + len = sizeof(WMI_PACKET_FILTER_ENABLE_CMD_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_PACKET_FILTER_ENABLE_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_packet_filter_enable_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_PACKET_FILTER_ENABLE_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + if (enable) + cmd->enable = PACKET_FILTER_SET_ENABLE; + else + cmd->enable = PACKET_FILTER_SET_DISABLE; + + WMI_LOGE("%s: Packet filter enable %d for vdev_id %d", + __func__, cmd->enable, vdev_id); + + wmi_mtrace(WMI_PACKET_FILTER_ENABLE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PACKET_FILTER_ENABLE_CMDID); + if (ret) { + WMI_LOGE("Failed to send packet filter wmi cmd to fw"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_config_packet_filter_cmd_tlv() - configure packet filter in target + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @rcv_filter_param: Packet filter parameters + * @filter_id: Filter id + * @enable: Flag to add/delete packet filter configuration + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_config_packet_filter_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable) +{ + int len, i; + int err = 0; + wmi_buf_t buf; + WMI_PACKET_FILTER_CONFIG_CMD_fixed_param *cmd; + + /* allocate the memory */ + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_PACKET_FILTER_CONFIG_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_packet_filter_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_PACKET_FILTER_CONFIG_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->filter_id = filter_id; + if (enable) + cmd->filter_action = PACKET_FILTER_SET_ACTIVE; + else + cmd->filter_action = PACKET_FILTER_SET_INACTIVE; + + if (enable) { + cmd->num_params = QDF_MIN( + WMI_PACKET_FILTER_MAX_CMP_PER_PACKET_FILTER, + rcv_filter_param->num_params); + cmd->filter_type = rcv_filter_param->filter_type; + cmd->coalesce_time = rcv_filter_param->coalesce_time; + + for (i = 0; i < cmd->num_params; i++) { + cmd->paramsData[i].proto_type = + rcv_filter_param->params_data[i].protocol_layer; + cmd->paramsData[i].cmp_type = + rcv_filter_param->params_data[i].compare_flag; + cmd->paramsData[i].data_length = + rcv_filter_param->params_data[i].data_length; + cmd->paramsData[i].data_offset = + rcv_filter_param->params_data[i].data_offset; + memcpy(&cmd->paramsData[i].compareData, + rcv_filter_param->params_data[i].compare_data, + sizeof(cmd->paramsData[i].compareData)); + memcpy(&cmd->paramsData[i].dataMask, + rcv_filter_param->params_data[i].data_mask, + sizeof(cmd->paramsData[i].dataMask)); + } + } + + WMI_LOGE("Packet filter action %d filter with id: %d, num_params=%d", + cmd->filter_action, cmd->filter_id, cmd->num_params); + /* send the command along with data */ + wmi_mtrace(WMI_PACKET_FILTER_CONFIG_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PACKET_FILTER_CONFIG_CMDID); + if (err) { + WMI_LOGE("Failed to send pkt_filter cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_packet_filtering_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_enable_disable_packet_filter_cmd = + send_enable_disable_packet_filter_cmd_tlv; + ops->send_config_packet_filter_cmd = + send_config_packet_filter_cmd_tlv; +} +#endif /* WLAN_FEATURE_PACKET_FILTERING */ + +/** + * send_wow_delete_pattern_cmd_tlv() - delete wow pattern in target + * @wmi_handle: wmi handle + * @ptrn_id: pattern id + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_wow_delete_pattern_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t ptrn_id, + uint8_t vdev_id) +{ + WMI_WOW_DEL_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_DEL_PATTERN_CMD_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_DEL_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_DEL_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_WOW_DEL_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = ptrn_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + wmi_mtrace(WMI_WOW_DEL_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_DEL_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to delete wow ptrn from fw", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_host_wakeup_ind_to_fw_cmd_tlv() - send wakeup ind to fw + * @wmi_handle: wmi handle + * + * Sends host wakeup indication to FW. On receiving this indication, + * FW will come out of WOW. + * + * Return: CDF status + */ +static QDF_STATUS send_host_wakeup_ind_to_fw_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_wow_hostwakeup_from_sleep_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + int32_t len; + int ret; + + len = sizeof(wmi_wow_hostwakeup_from_sleep_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_wow_hostwakeup_from_sleep_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wow_hostwakeup_from_sleep_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wow_hostwakeup_from_sleep_cmd_fixed_param)); + + wmi_mtrace(WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); + if (ret) { + WMI_LOGE("Failed to send host wakeup indication to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return qdf_status; +} + +/** + * send_wow_timer_pattern_cmd_tlv() - set timer pattern tlv, so that firmware + * will wake up host after specified time is elapsed + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @cookie: value to identify reason why host set up wake call. + * @time: time in ms + * + * Return: QDF status + */ +static QDF_STATUS send_wow_timer_pattern_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t cookie, uint32_t time) +{ + WMI_WOW_ADD_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_BITMAP_PATTERN_T) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_IPV4_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_IPV6_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_MAGIC_PATTERN_CMD) + + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t) + + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_ADD_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = cookie, + cmd->pattern_type = WOW_TIMER_PATTERN; + buf_ptr += sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param); + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_BITMAP_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV4_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV6_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_MAGIC_PATTERN_CMD but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for pattern_info_timeout, and time value */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + *((uint32_t *) buf_ptr) = time; + buf_ptr += sizeof(uint32_t); + + /* Fill TLV for ra_ratelimit_interval. with dummy 0 value */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + *((uint32_t *) buf_ptr) = 0; + + WMI_LOGD("%s: send wake timer pattern with time[%d] to fw vdev = %d", + __func__, time, vdev_id); + + wmi_mtrace(WMI_WOW_ADD_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send wake timer pattern to fw", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_EXTWOW_SUPPORT +/** + * send_enable_ext_wow_cmd_tlv() - enable ext wow in fw + * @wmi_handle: wmi handle + * @params: ext wow params + * + * Return:0 for success or error code + */ +static QDF_STATUS send_enable_ext_wow_cmd_tlv(wmi_unified_t wmi_handle, + struct ext_wow_params *params) +{ + wmi_extwow_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(wmi_extwow_enable_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_extwow_enable_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extwow_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extwow_enable_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->type = params->type; + cmd->wakeup_pin_num = params->wakeup_pin_num; + + WMI_LOGD("%s: vdev_id %d type %d Wakeup_pin_num %x", + __func__, cmd->vdev_id, cmd->type, cmd->wakeup_pin_num); + + wmi_mtrace(WMI_EXTWOW_ENABLE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTWOW_ENABLE_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to set EXTWOW Enable", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + +} + +/** + * send_set_app_type2_params_in_fw_cmd_tlv() - set app type2 params in fw + * @wmi_handle: wmi handle + * @appType2Params: app type2 params + * + * Return: CDF status + */ +static QDF_STATUS send_set_app_type2_params_in_fw_cmd_tlv(wmi_unified_t wmi_handle, + struct app_type2_params *appType2Params) +{ + wmi_extwow_set_app_type2_params_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(wmi_extwow_set_app_type2_params_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_extwow_set_app_type2_params_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extwow_set_app_type2_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extwow_set_app_type2_params_cmd_fixed_param)); + + cmd->vdev_id = appType2Params->vdev_id; + + qdf_mem_copy(cmd->rc4_key, appType2Params->rc4_key, 16); + cmd->rc4_key_len = appType2Params->rc4_key_len; + + cmd->ip_id = appType2Params->ip_id; + cmd->ip_device_ip = appType2Params->ip_device_ip; + cmd->ip_server_ip = appType2Params->ip_server_ip; + + cmd->tcp_src_port = appType2Params->tcp_src_port; + cmd->tcp_dst_port = appType2Params->tcp_dst_port; + cmd->tcp_seq = appType2Params->tcp_seq; + cmd->tcp_ack_seq = appType2Params->tcp_ack_seq; + + cmd->keepalive_init = appType2Params->keepalive_init; + cmd->keepalive_min = appType2Params->keepalive_min; + cmd->keepalive_max = appType2Params->keepalive_max; + cmd->keepalive_inc = appType2Params->keepalive_inc; + + WMI_CHAR_ARRAY_TO_MAC_ADDR(appType2Params->gateway_mac.bytes, + &cmd->gateway_mac); + cmd->tcp_tx_timeout_val = appType2Params->tcp_tx_timeout_val; + cmd->tcp_rx_timeout_val = appType2Params->tcp_rx_timeout_val; + + WMI_LOGD("%s: vdev_id %d gateway_mac "QDF_MAC_ADDR_FMT" " + "rc4_key %.16s rc4_key_len %u " + "ip_id %x ip_device_ip %x ip_server_ip %x " + "tcp_src_port %u tcp_dst_port %u tcp_seq %u " + "tcp_ack_seq %u keepalive_init %u keepalive_min %u " + "keepalive_max %u keepalive_inc %u " + "tcp_tx_timeout_val %u tcp_rx_timeout_val %u", + __func__, cmd->vdev_id, + QDF_MAC_ADDR_REF(appType2Params->gateway_mac.bytes), + cmd->rc4_key, cmd->rc4_key_len, + cmd->ip_id, cmd->ip_device_ip, cmd->ip_server_ip, + cmd->tcp_src_port, cmd->tcp_dst_port, cmd->tcp_seq, + cmd->tcp_ack_seq, cmd->keepalive_init, cmd->keepalive_min, + cmd->keepalive_max, cmd->keepalive_inc, + cmd->tcp_tx_timeout_val, cmd->tcp_rx_timeout_val); + + wmi_mtrace(WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to set APP TYPE2 PARAMS", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + +} + +/** + * send_app_type1_params_in_fw_cmd_tlv() - set app type1 params in fw + * @wmi_handle: wmi handle + * @app_type1_params: app type1 params + * + * Return: CDF status + */ +static QDF_STATUS send_app_type1_params_in_fw_cmd_tlv(wmi_unified_t wmi_handle, + struct app_type1_params *app_type1_params) +{ + wmi_extwow_set_app_type1_params_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(wmi_extwow_set_app_type1_params_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_extwow_set_app_type1_params_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extwow_set_app_type1_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extwow_set_app_type1_params_cmd_fixed_param)); + + cmd->vdev_id = app_type1_params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(app_type1_params->wakee_mac_addr.bytes, + &cmd->wakee_mac); + qdf_mem_copy(cmd->ident, app_type1_params->identification_id, 8); + cmd->ident_len = app_type1_params->id_length; + qdf_mem_copy(cmd->passwd, app_type1_params->password, 16); + cmd->passwd_len = app_type1_params->pass_length; + + WMI_LOGD("%s: vdev_id %d wakee_mac_addr "QDF_MAC_ADDR_FMT" " + "identification_id %.8s id_length %u " + "password %.16s pass_length %u", + __func__, cmd->vdev_id, + QDF_MAC_ADDR_REF(app_type1_params->wakee_mac_addr.bytes), + cmd->ident, cmd->ident_len, cmd->passwd, cmd->passwd_len); + + wmi_mtrace(WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to set APP TYPE1 PARAMS", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_extwow_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_enable_ext_wow_cmd = send_enable_ext_wow_cmd_tlv; + ops->send_set_app_type2_params_in_fw_cmd = + send_set_app_type2_params_in_fw_cmd_tlv; + ops->send_app_type1_params_in_fw_cmd = + send_app_type1_params_in_fw_cmd_tlv; +} +#endif /* WLAN_FEATURE_EXTWOW_SUPPORT */ + +void wmi_pmo_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_add_wow_wakeup_event_cmd = + send_add_wow_wakeup_event_cmd_tlv; + ops->send_wow_patterns_to_fw_cmd = send_wow_patterns_to_fw_cmd_tlv; + ops->send_enable_arp_ns_offload_cmd = + send_enable_arp_ns_offload_cmd_tlv; + ops->send_add_clear_mcbc_filter_cmd = + send_add_clear_mcbc_filter_cmd_tlv; + ops->send_multiple_add_clear_mcbc_filter_cmd = + send_multiple_add_clear_mcbc_filter_cmd_tlv; + ops->send_conf_hw_filter_cmd = send_conf_hw_filter_cmd_tlv; + ops->send_gtk_offload_cmd = send_gtk_offload_cmd_tlv; + ops->send_process_gtk_offload_getinfo_cmd = + send_process_gtk_offload_getinfo_cmd_tlv; + ops->send_enable_enhance_multicast_offload_cmd = + send_enable_enhance_multicast_offload_tlv; + ops->extract_gtk_rsp_event = extract_gtk_rsp_event_tlv; + ops->send_action_frame_patterns_cmd = + send_action_frame_patterns_cmd_tlv; + ops->send_wow_delete_pattern_cmd = send_wow_delete_pattern_cmd_tlv; + ops->send_host_wakeup_ind_to_fw_cmd = + send_host_wakeup_ind_to_fw_cmd_tlv; + ops->send_wow_timer_pattern_cmd = send_wow_timer_pattern_cmd_tlv; + + wmi_d0wow_attach_tlv(wmi_handle); + wmi_ra_filtering_attach_tlv(wmi_handle); + wmi_lphb_attach_tlv(wmi_handle); + wmi_packet_filtering_attach_tlv(wmi_handle); + wmi_extwow_attach_tlv(wmi_handle); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_reg_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_reg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..4cfa3ae155c324a008eae5cde5e7b2774093b8bc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_reg_api.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implement API's specific to Regulatory component. + */ + +#include +#include +#include +#include +#include + +QDF_STATUS wmi_extract_reg_chan_list_update_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct cur_regulatory_info *reg_info, + uint32_t len) +{ + if (wmi_handle && wmi_handle->ops->extract_reg_chan_list_update_event) + return wmi_handle->ops->extract_reg_chan_list_update_event + (wmi_handle, + evt_buf, reg_info, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_reg_chan_list_update_event); + +/* + * wmi_unified_send_start_11d_scan_cmd() - start 11d scan + * @wmi_handle: wmi handle + * @start_11d_scan: pointer to 11d scan start req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_start_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *start_11d_scan) +{ + if (wmi_handle->ops->send_start_11d_scan_cmd) + return wmi_handle->ops->send_start_11d_scan_cmd(wmi_handle, + start_11d_scan); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_send_start_11d_scan_cmd); + +/* + * wmi_unified_send_stop_11d_scan_cmd() - stop 11d scan + * @wmi_handle: wmi handle + * @stop_11d_scan: pointer to 11d scan stop req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_stop_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *stop_11d_scan) +{ + if (wmi_handle->ops->send_stop_11d_scan_cmd) + return wmi_handle->ops->send_stop_11d_scan_cmd(wmi_handle, + stop_11d_scan); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_send_stop_11d_scan_cmd); + +QDF_STATUS wmi_extract_reg_11d_new_cc_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_new_cc, + uint32_t len) +{ + if (wmi_handle && wmi_handle->ops->extract_reg_11d_new_country_event) + return wmi_handle->ops->extract_reg_11d_new_country_event( + wmi_handle, evt_buf, reg_11d_new_cc, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_reg_11d_new_cc_event); + +QDF_STATUS wmi_unified_set_user_country_code_cmd_send( + wmi_unified_t wmi_handle, + uint8_t pdev_id, struct cc_regdmn_s *rd) +{ + if (wmi_handle->ops->send_user_country_code_cmd) + return wmi_handle->ops->send_user_country_code_cmd( + wmi_handle, pdev_id, rd); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_set_user_country_code_cmd_send); + +QDF_STATUS wmi_extract_reg_ch_avoid_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_ind, + uint32_t len) +{ + if (wmi_handle && wmi_handle->ops->extract_reg_ch_avoid_event) + return wmi_handle->ops->extract_reg_ch_avoid_event( + wmi_handle, evt_buf, ch_avoid_ind, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_reg_ch_avoid_event); diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_roam_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_roam_api.c new file mode 100644 index 0000000000000000000000000000000000000000..90794794bc8a78922b780c7d94bf1587f9a2b52d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_roam_api.c @@ -0,0 +1,340 @@ + +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#ifdef FEATURE_LFR_SUBNET_DETECTION +QDF_STATUS +wmi_unified_set_gateway_params_cmd(wmi_unified_t wmi_handle, + struct gateway_update_req_param *req) +{ + if (wmi_handle->ops->send_set_gateway_params_cmd) + return wmi_handle->ops->send_set_gateway_params_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_LFR_SUBNET_DETECTION */ + +#ifdef FEATURE_RSSI_MONITOR +QDF_STATUS +wmi_unified_set_rssi_monitoring_cmd(wmi_unified_t wmi_handle, + struct rssi_monitor_param *req) +{ + if (wmi_handle->ops->send_set_rssi_monitoring_cmd) + return wmi_handle->ops->send_set_rssi_monitoring_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_RSSI_MONITOR */ + +QDF_STATUS wmi_unified_roam_scan_offload_rssi_thresh_cmd( + wmi_unified_t wmi_handle, + struct roam_offload_scan_rssi_params *roam_req) +{ + if (wmi_handle->ops->send_roam_scan_offload_rssi_thresh_cmd) + return wmi_handle->ops->send_roam_scan_offload_rssi_thresh_cmd( + wmi_handle, roam_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_roam_mawc_params_cmd( + wmi_unified_t wmi_handle, + struct wmi_mawc_roam_params *params) +{ + if (wmi_handle->ops->send_roam_mawc_params_cmd) + return wmi_handle->ops->send_roam_mawc_params_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_roam_scan_filter_cmd(wmi_unified_t wmi_handle, + struct roam_scan_filter_params *roam_req) +{ + if (wmi_handle->ops->send_roam_scan_filter_cmd) + return wmi_handle->ops->send_roam_scan_filter_cmd(wmi_handle, + roam_req); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_ESE +QDF_STATUS wmi_unified_plm_stop_cmd(wmi_unified_t wmi_handle, + const struct plm_req_params *plm) +{ + if (wmi_handle->ops->send_plm_stop_cmd) + return wmi_handle->ops->send_plm_stop_cmd(wmi_handle, plm); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_plm_start_cmd(wmi_unified_t wmi_handle, + const struct plm_req_params *plm) +{ + if (wmi_handle->ops->send_plm_start_cmd) + return wmi_handle->ops->send_plm_start_cmd(wmi_handle, plm); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_ESE */ + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +QDF_STATUS wmi_unified_set_ric_req_cmd(wmi_unified_t wmi_handle, void *msg, + uint8_t is_add_ts) +{ + if (wmi_handle->ops->send_set_ric_req_cmd) + return wmi_handle->ops->send_set_ric_req_cmd(wmi_handle, msg, + is_add_ts); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_roam_synch_complete_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_process_roam_synch_complete_cmd) + return wmi_handle->ops->send_process_roam_synch_complete_cmd( + wmi_handle, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_roam_invoke_cmd(wmi_unified_t wmi_handle, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz) +{ + if (wmi_handle->ops->send_roam_invoke_cmd) + return wmi_handle->ops->send_roam_invoke_cmd(wmi_handle, + roaminvoke, + ch_hz); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + +QDF_STATUS wmi_unified_roam_scan_offload_mode_cmd( + wmi_unified_t wmi_handle, + wmi_start_scan_cmd_fixed_param *scan_cmd_fp, + struct roam_offload_scan_params *roam_req) +{ + if (wmi_handle->ops->send_roam_scan_offload_mode_cmd) + return wmi_handle->ops->send_roam_scan_offload_mode_cmd( + wmi_handle, scan_cmd_fp, roam_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_roam_scan_offload_ap_cmd( + wmi_unified_t wmi_handle, + struct ap_profile_params *ap_profile) +{ + if (wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd) + return wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd( + wmi_handle, ap_profile); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_roam_scan_offload_cmd(wmi_unified_t wmi_handle, + uint32_t command, + uint32_t vdev_id) +{ + if (wmi_handle->ops->send_roam_scan_offload_cmd) + return wmi_handle->ops->send_roam_scan_offload_cmd(wmi_handle, + command, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_roam_scan_offload_scan_period(wmi_unified_t wmi_handle, + struct roam_scan_period_params *param) +{ + if (wmi_handle->ops->send_roam_scan_offload_scan_period_cmd) + return wmi_handle->ops->send_roam_scan_offload_scan_period_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_roam_scan_offload_chan_list_cmd(wmi_unified_t wmi_handle, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, + uint32_t vdev_id) +{ + if (wmi_handle->ops->send_roam_scan_offload_chan_list_cmd) + return wmi_handle->ops->send_roam_scan_offload_chan_list_cmd(wmi_handle, + chan_count, chan_list, + list_type, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_roam_scan_offload_rssi_change_cmd(wmi_unified_t wmi_handle, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans) +{ + if (wmi_handle->ops->send_roam_scan_offload_rssi_change_cmd) + return wmi_handle->ops->send_roam_scan_offload_rssi_change_cmd(wmi_handle, + vdev_id, rssi_change_thresh, + bcn_rssi_weight, hirssi_delay_btw_scans); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_per_roam_config(wmi_unified_t wmi_handle, + struct wmi_per_roam_config_req *req_buf) +{ + if (wmi_handle->ops->send_per_roam_config_cmd) + return wmi_handle->ops->send_per_roam_config_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_limit_off_chan_cmd( + wmi_unified_t wmi_handle, + struct wmi_limit_off_chan_param *limit_off_chan_param) +{ + if (wmi_handle->ops->send_limit_off_chan_cmd) + return wmi_handle->ops->send_limit_off_chan_cmd(wmi_handle, + limit_off_chan_param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_FILS_SK +QDF_STATUS wmi_unified_roam_send_hlp_cmd(wmi_unified_t wmi_handle, + struct hlp_params *req_buf) +{ + if (wmi_handle->ops->send_roam_scan_hlp_cmd) + return wmi_handle->ops->send_roam_scan_hlp_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_FEATURE_FILS_SK */ + +QDF_STATUS wmi_unified_send_btm_config(wmi_unified_t wmi_handle, + struct wmi_btm_config *params) +{ + if (wmi_handle->ops->send_btm_config) + return wmi_handle->ops->send_btm_config(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_bss_load_config(wmi_unified_t wmi_handle, + struct wmi_bss_load_config *params) +{ + if (wmi_handle->ops->send_roam_bss_load_config) + return wmi_handle->ops->send_roam_bss_load_config(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_disconnect_roam_params(wmi_unified_t wmi_handle, + struct wmi_disconnect_roam_params *req) +{ + if (wmi_handle->ops->send_disconnect_roam_params) + return wmi_handle->ops->send_disconnect_roam_params(wmi_handle, + req); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_idle_roam_params(wmi_unified_t wmi_handle, + struct wmi_idle_roam_params *req) +{ + if (wmi_handle->ops->send_idle_roam_params) + return wmi_handle->ops->send_idle_roam_params(wmi_handle, + req); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_roam_preauth_status(wmi_unified_t wmi_handle, + struct wmi_roam_auth_status_params *params) +{ + if (wmi_handle->ops->send_roam_preauth_status) + return wmi_handle->ops->send_roam_preauth_status(wmi_handle, + params); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_offload_11k_cmd(wmi_unified_t wmi_handle, + struct wmi_11k_offload_params *params) +{ + if (wmi_handle->ops->send_offload_11k_cmd) + return wmi_handle->ops->send_offload_11k_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_invoke_neighbor_report_cmd( + wmi_unified_t wmi_handle, + struct wmi_invoke_neighbor_report_params *params) +{ + if (wmi_handle->ops->send_invoke_neighbor_report_cmd) + return wmi_handle->ops->send_invoke_neighbor_report_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_roam_scan_ch_list(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_roam_scan_ch_list_req_cmd) + return wmi_handle->ops->send_roam_scan_ch_list_req_cmd( + wmi_handle, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +QDF_STATUS wmi_unified_set_roam_triggers(wmi_unified_t wmi_handle, + struct roam_triggers *triggers) +{ + if (wmi_handle->ops->send_set_roam_trigger_cmd) + return wmi_handle->ops->send_set_roam_trigger_cmd(wmi_handle, + triggers->vdev_id, triggers->trigger_bitmap); + + return QDF_STATUS_E_FAILURE; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_roam_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_roam_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..322b71d358c1cad10e79d43cb03a2e3936da3ff2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_roam_tlv.c @@ -0,0 +1,2850 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to roaming component. + */ + +#include +#include +#include "wmi.h" + +#ifdef FEATURE_LFR_SUBNET_DETECTION +/** + * send_set_gateway_params_cmd_tlv() - set gateway parameters + * @wmi_handle: wmi handle + * @req: gateway parameter update request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and sends down the gateway configs down to the firmware + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_set_gateway_params_cmd_tlv(wmi_unified_t wmi_handle, + struct gateway_update_req_param *req) +{ + wmi_roam_subnet_change_config_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_roam_subnet_change_config_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_subnet_change_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_subnet_change_config_fixed_param)); + + cmd->vdev_id = req->vdev_id; + qdf_mem_copy(&cmd->inet_gw_ip_v4_addr, req->ipv4_addr, + QDF_IPV4_ADDR_SIZE); + qdf_mem_copy(&cmd->inet_gw_ip_v6_addr, req->ipv6_addr, + QDF_IPV6_ADDR_SIZE); + WMI_CHAR_ARRAY_TO_MAC_ADDR(req->gw_mac_addr.bytes, + &cmd->inet_gw_mac_addr); + cmd->max_retries = req->max_retries; + cmd->timeout = req->timeout; + cmd->num_skip_subnet_change_detection_bssid_list = 0; + cmd->flag = 0; + if (req->ipv4_addr_type) + WMI_SET_ROAM_SUBNET_CHANGE_FLAG_IP4_ENABLED(cmd->flag); + + if (req->ipv6_addr_type) + WMI_SET_ROAM_SUBNET_CHANGE_FLAG_IP6_ENABLED(cmd->flag); + + wmi_mtrace(WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send gw config parameter to fw, ret: %d", + ret); + wmi_buf_free(buf); + } + + return ret; +} + +void wmi_lfr_subnet_detection_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_gateway_params_cmd = send_set_gateway_params_cmd_tlv; +} +#endif /* FEATURE_LFR_SUBNET_DETECTION */ + +#ifdef FEATURE_RSSI_MONITOR +/** + * send_set_rssi_monitoring_cmd_tlv() - set rssi monitoring + * @wmi_handle: wmi handle + * @req: rssi monitoring request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the rssi monitoring configs down to the firmware + * + * Return: 0 on success; error number otherwise + */ +static QDF_STATUS send_set_rssi_monitoring_cmd_tlv(wmi_unified_t wmi_handle, + struct rssi_monitor_param *req) +{ + wmi_rssi_breach_monitor_config_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + uint32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_rssi_breach_monitor_config_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_rssi_breach_monitor_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_rssi_breach_monitor_config_fixed_param)); + + cmd->vdev_id = req->vdev_id; + cmd->request_id = req->request_id; + cmd->lo_rssi_reenable_hysteresis = 0; + cmd->hi_rssi_reenable_histeresis = 0; + cmd->min_report_interval = 0; + cmd->max_num_report = 1; + if (req->control) { + /* enable one threshold for each min/max */ + cmd->enabled_bitmap = 0x09; + cmd->low_rssi_breach_threshold[0] = req->min_rssi; + cmd->hi_rssi_breach_threshold[0] = req->max_rssi; + } else { + cmd->enabled_bitmap = 0; + cmd->low_rssi_breach_threshold[0] = 0; + cmd->hi_rssi_breach_threshold[0] = 0; + } + + wmi_mtrace(WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID"); + wmi_buf_free(buf); + } + + WMI_LOGD("Sent WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID to FW"); + + return ret; +} + +void wmi_rssi_monitor_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_rssi_monitoring_cmd = send_set_rssi_monitoring_cmd_tlv; +} +#endif /* FEATURE_RSSI_MONITOR */ + +/** + * send_roam_scan_offload_rssi_thresh_cmd_tlv() - set scan offload + * rssi threashold + * @wmi_handle: wmi handle + * @roam_req: Roaming request buffer + * + * Send WMI_ROAM_SCAN_RSSI_THRESHOLD TLV to firmware + * + * Return: QDF status + */ +static QDF_STATUS send_roam_scan_offload_rssi_thresh_cmd_tlv(wmi_unified_t wmi_handle, + struct roam_offload_scan_rssi_params *roam_req) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_rssi_threshold_fixed_param *rssi_threshold_fp; + wmi_roam_scan_extended_threshold_param *ext_thresholds = NULL; + wmi_roam_earlystop_rssi_thres_param *early_stop_thresholds = NULL; + wmi_roam_dense_thres_param *dense_thresholds = NULL; + wmi_roam_bg_scan_roaming_param *bg_scan_params = NULL; + wmi_roam_data_rssi_roaming_param *data_rssi_param = NULL; + + len = sizeof(wmi_roam_scan_rssi_threshold_fixed_param); + len += WMI_TLV_HDR_SIZE; /* TLV for ext_thresholds*/ + len += sizeof(wmi_roam_scan_extended_threshold_param); + len += WMI_TLV_HDR_SIZE; + len += sizeof(wmi_roam_earlystop_rssi_thres_param); + len += WMI_TLV_HDR_SIZE; /* TLV for dense thresholds*/ + len += sizeof(wmi_roam_dense_thres_param); + len += WMI_TLV_HDR_SIZE; /* TLV for BG Scan*/ + len += sizeof(wmi_roam_bg_scan_roaming_param); + len += WMI_TLV_HDR_SIZE; /* TLV for data RSSI*/ + len += sizeof(wmi_roam_data_rssi_roaming_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + rssi_threshold_fp = + (wmi_roam_scan_rssi_threshold_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&rssi_threshold_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_rssi_threshold_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_rssi_threshold_fixed_param)); + /* fill in threshold values */ + rssi_threshold_fp->vdev_id = roam_req->vdev_id; + rssi_threshold_fp->roam_scan_rssi_thresh = roam_req->rssi_thresh; + rssi_threshold_fp->roam_rssi_thresh_diff = roam_req->rssi_thresh_diff; + rssi_threshold_fp->hirssi_scan_max_count = + roam_req->hi_rssi_scan_max_count; + rssi_threshold_fp->hirssi_scan_delta = + roam_req->hi_rssi_scan_rssi_delta; + rssi_threshold_fp->hirssi_upper_bound = roam_req->hi_rssi_scan_rssi_ub; + rssi_threshold_fp->rssi_thresh_offset_5g = + roam_req->rssi_thresh_offset_5g; + + buf_ptr += sizeof(wmi_roam_scan_rssi_threshold_fixed_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_scan_extended_threshold_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + ext_thresholds = (wmi_roam_scan_extended_threshold_param *) buf_ptr; + + ext_thresholds->penalty_threshold_5g = roam_req->penalty_threshold_5g; + if (roam_req->raise_rssi_thresh_5g >= WMI_NOISE_FLOOR_DBM_DEFAULT) + ext_thresholds->boost_threshold_5g = + roam_req->boost_threshold_5g; + + ext_thresholds->boost_algorithm_5g = + WMI_ROAM_5G_BOOST_PENALIZE_ALGO_LINEAR; + ext_thresholds->boost_factor_5g = roam_req->raise_factor_5g; + ext_thresholds->penalty_algorithm_5g = + WMI_ROAM_5G_BOOST_PENALIZE_ALGO_LINEAR; + ext_thresholds->penalty_factor_5g = roam_req->drop_factor_5g; + ext_thresholds->max_boost_5g = roam_req->max_raise_rssi_5g; + ext_thresholds->max_penalty_5g = roam_req->max_drop_rssi_5g; + ext_thresholds->good_rssi_threshold = roam_req->good_rssi_threshold; + + WMITLV_SET_HDR(&ext_thresholds->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_extended_threshold_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_extended_threshold_param)); + buf_ptr += sizeof(wmi_roam_scan_extended_threshold_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_earlystop_rssi_thres_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + early_stop_thresholds = (wmi_roam_earlystop_rssi_thres_param *) buf_ptr; + early_stop_thresholds->roam_earlystop_thres_min = + roam_req->roam_earlystop_thres_min; + early_stop_thresholds->roam_earlystop_thres_max = + roam_req->roam_earlystop_thres_max; + WMITLV_SET_HDR(&early_stop_thresholds->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_earlystop_rssi_thres_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_earlystop_rssi_thres_param)); + + buf_ptr += sizeof(wmi_roam_earlystop_rssi_thres_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_dense_thres_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + dense_thresholds = (wmi_roam_dense_thres_param *) buf_ptr; + dense_thresholds->roam_dense_rssi_thres_offset = + roam_req->dense_rssi_thresh_offset; + dense_thresholds->roam_dense_min_aps = roam_req->dense_min_aps_cnt; + dense_thresholds->roam_dense_traffic_thres = + roam_req->traffic_threshold; + dense_thresholds->roam_dense_status = roam_req->initial_dense_status; + WMITLV_SET_HDR(&dense_thresholds->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_dense_thres_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_dense_thres_param)); + + buf_ptr += sizeof(wmi_roam_dense_thres_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_bg_scan_roaming_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + bg_scan_params = (wmi_roam_bg_scan_roaming_param *) buf_ptr; + bg_scan_params->roam_bg_scan_bad_rssi_thresh = + roam_req->bg_scan_bad_rssi_thresh; + bg_scan_params->roam_bg_scan_client_bitmap = + roam_req->bg_scan_client_bitmap; + bg_scan_params->bad_rssi_thresh_offset_2g = + roam_req->roam_bad_rssi_thresh_offset_2g; + bg_scan_params->flags = roam_req->flags; + WMITLV_SET_HDR(&bg_scan_params->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_bg_scan_roaming_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_bg_scan_roaming_param)); + + buf_ptr += sizeof(wmi_roam_bg_scan_roaming_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_data_rssi_roaming_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + data_rssi_param = (wmi_roam_data_rssi_roaming_param *)buf_ptr; + data_rssi_param->flags = + roam_req->roam_data_rssi_threshold_triggers; + data_rssi_param->roam_data_rssi_thres = + roam_req->roam_data_rssi_threshold; + data_rssi_param->rx_inactivity_ms = + roam_req->rx_data_inactivity_time; + WMITLV_SET_HDR(&data_rssi_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_data_rssi_roaming_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_data_rssi_roaming_param)); + WMI_LOGD("Data rssi threshold: %d, triggers: 0x%x, rx time: %d", + data_rssi_param->roam_data_rssi_thres, + data_rssi_param->flags, + data_rssi_param->rx_inactivity_ms); + + wmi_mtrace(WMI_ROAM_SCAN_RSSI_THRESHOLD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_RSSI_THRESHOLD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd WMI_ROAM_SCAN_RSSI_THRESHOLD returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_roam_mawc_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_mawc_roam_params *params) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_configure_mawc_cmd_fixed_param *wmi_roam_mawc_params; + + len = sizeof(*wmi_roam_mawc_params); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_roam_mawc_params = + (wmi_roam_configure_mawc_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_roam_mawc_params->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_configure_mawc_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_configure_mawc_cmd_fixed_param)); + wmi_roam_mawc_params->vdev_id = params->vdev_id; + if (params->enable) + wmi_roam_mawc_params->enable = 1; + else + wmi_roam_mawc_params->enable = 0; + wmi_roam_mawc_params->traffic_load_threshold = + params->traffic_load_threshold; + wmi_roam_mawc_params->best_ap_rssi_threshold = + params->best_ap_rssi_threshold; + wmi_roam_mawc_params->rssi_stationary_high_adjust = + params->rssi_stationary_high_adjust; + wmi_roam_mawc_params->rssi_stationary_low_adjust = + params->rssi_stationary_low_adjust; + WMI_LOGD(FL("MAWC roam en=%d, vdev=%d, tr=%d, ap=%d, high=%d, low=%d"), + wmi_roam_mawc_params->enable, wmi_roam_mawc_params->vdev_id, + wmi_roam_mawc_params->traffic_load_threshold, + wmi_roam_mawc_params->best_ap_rssi_threshold, + wmi_roam_mawc_params->rssi_stationary_high_adjust, + wmi_roam_mawc_params->rssi_stationary_low_adjust); + + wmi_mtrace(WMI_ROAM_CONFIGURE_MAWC_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_CONFIGURE_MAWC_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_ROAM_CONFIGURE_MAWC_CMDID failed, Error %d", + status); + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_filter_cmd_tlv() - Filter to be applied while roaming + * @wmi_handle: wmi handle + * @roam_req: Request which contains the filters + * + * There are filters such as whitelist, blacklist and preferred + * list that need to be applied to the scan results to form the + * probable candidates for roaming. + * + * Return: Return success upon successfully passing the + * parameters to the firmware, otherwise failure. + */ +static QDF_STATUS send_roam_scan_filter_cmd_tlv(wmi_unified_t wmi_handle, + struct roam_scan_filter_params *roam_req) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + uint32_t i; + uint32_t len, blist_len = 0; + uint8_t *buf_ptr; + wmi_roam_filter_fixed_param *roam_filter; + uint8_t *bssid_src_ptr = NULL; + wmi_mac_addr *bssid_dst_ptr = NULL; + wmi_ssid *ssid_ptr = NULL; + uint32_t *bssid_preferred_factor_ptr = NULL; + wmi_roam_lca_disallow_config_tlv_param *blist_param; + wmi_roam_rssi_rejection_oce_config_param *rssi_rej; + + len = sizeof(wmi_roam_filter_fixed_param); + + len += WMI_TLV_HDR_SIZE; + if (roam_req->num_bssid_black_list) + len += roam_req->num_bssid_black_list * sizeof(wmi_mac_addr); + len += WMI_TLV_HDR_SIZE; + if (roam_req->num_ssid_white_list) + len += roam_req->num_ssid_white_list * sizeof(wmi_ssid); + len += 2 * WMI_TLV_HDR_SIZE; + if (roam_req->num_bssid_preferred_list) { + len += roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr); + len += roam_req->num_bssid_preferred_list * sizeof(uint32_t); + } + len += WMI_TLV_HDR_SIZE; + if (roam_req->lca_disallow_config_present) { + len += sizeof(*blist_param); + blist_len = sizeof(*blist_param); + } + + len += WMI_TLV_HDR_SIZE; + if (roam_req->num_rssi_rejection_ap) + len += roam_req->num_rssi_rejection_ap * sizeof(*rssi_rej); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (u_int8_t *) wmi_buf_data(buf); + roam_filter = (wmi_roam_filter_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&roam_filter->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_filter_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_filter_fixed_param)); + /* fill in fixed values */ + roam_filter->vdev_id = roam_req->vdev_id; + roam_filter->flags = 0; + roam_filter->op_bitmap = roam_req->op_bitmap; + roam_filter->num_bssid_black_list = roam_req->num_bssid_black_list; + roam_filter->num_ssid_white_list = roam_req->num_ssid_white_list; + roam_filter->num_bssid_preferred_list = + roam_req->num_bssid_preferred_list; + roam_filter->num_rssi_rejection_ap = + roam_req->num_rssi_rejection_ap; + roam_filter->delta_rssi = roam_req->delta_rssi; + buf_ptr += sizeof(wmi_roam_filter_fixed_param); + + WMITLV_SET_HDR((buf_ptr), + WMITLV_TAG_ARRAY_FIXED_STRUC, + (roam_req->num_bssid_black_list * sizeof(wmi_mac_addr))); + bssid_src_ptr = (uint8_t *)&roam_req->bssid_avoid_list; + bssid_dst_ptr = (wmi_mac_addr *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_bssid_black_list; i++) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid_src_ptr, bssid_dst_ptr); + bssid_src_ptr += ATH_MAC_LEN; + bssid_dst_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (roam_req->num_bssid_black_list * sizeof(wmi_mac_addr)); + WMITLV_SET_HDR((buf_ptr), + WMITLV_TAG_ARRAY_FIXED_STRUC, + (roam_req->num_ssid_white_list * sizeof(wmi_ssid))); + ssid_ptr = (wmi_ssid *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_ssid_white_list; i++) { + qdf_mem_copy(&ssid_ptr->ssid, + &roam_req->ssid_allowed_list[i].mac_ssid, + roam_req->ssid_allowed_list[i].length); + ssid_ptr->ssid_len = roam_req->ssid_allowed_list[i].length; + ssid_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + (roam_req->num_ssid_white_list * + sizeof(wmi_ssid)); + WMITLV_SET_HDR((buf_ptr), + WMITLV_TAG_ARRAY_FIXED_STRUC, + (roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr))); + bssid_src_ptr = (uint8_t *)&roam_req->bssid_favored; + bssid_dst_ptr = (wmi_mac_addr *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_bssid_preferred_list; i++) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid_src_ptr, + (wmi_mac_addr *)bssid_dst_ptr); + bssid_src_ptr += ATH_MAC_LEN; + bssid_dst_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr)); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (roam_req->num_bssid_preferred_list * sizeof(uint32_t))); + bssid_preferred_factor_ptr = (uint32_t *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_bssid_preferred_list; i++) { + *bssid_preferred_factor_ptr = + roam_req->bssid_favored_factor[i]; + bssid_preferred_factor_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (roam_req->num_bssid_preferred_list * sizeof(uint32_t)); + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, blist_len); + buf_ptr += WMI_TLV_HDR_SIZE; + if (roam_req->lca_disallow_config_present) { + blist_param = + (wmi_roam_lca_disallow_config_tlv_param *) buf_ptr; + WMITLV_SET_HDR(&blist_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_lca_disallow_config_tlv_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_lca_disallow_config_tlv_param)); + + blist_param->disallow_duration = roam_req->disallow_duration; + blist_param->rssi_channel_penalization = + roam_req->rssi_channel_penalization; + blist_param->num_disallowed_aps = roam_req->num_disallowed_aps; + blist_param->disallow_lca_enable_source_bitmap = + (WMI_ROAM_LCA_DISALLOW_SOURCE_PER | + WMI_ROAM_LCA_DISALLOW_SOURCE_BACKGROUND); + buf_ptr += (sizeof(wmi_roam_lca_disallow_config_tlv_param)); + } + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + (roam_req->num_rssi_rejection_ap * sizeof(*rssi_rej))); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < roam_req->num_rssi_rejection_ap; i++) { + rssi_rej = + (wmi_roam_rssi_rejection_oce_config_param *) buf_ptr; + WMITLV_SET_HDR(&rssi_rej->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_rssi_rejection_oce_config_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_rssi_rejection_oce_config_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR( + roam_req->rssi_rejection_ap[i].bssid.bytes, + &rssi_rej->bssid); + rssi_rej->remaining_disallow_duration = + roam_req->rssi_rejection_ap[i].reject_duration; + rssi_rej->requested_rssi = + (int32_t)roam_req->rssi_rejection_ap[i].expected_rssi; + buf_ptr += + (sizeof(wmi_roam_rssi_rejection_oce_config_param)); + } + + wmi_mtrace(WMI_ROAM_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd WMI_ROAM_FILTER_CMDID returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +#ifdef FEATURE_WLAN_ESE +/** + * send_plm_stop_cmd_tlv() - plm stop request + * @wmi_handle: wmi handle + * @plm: plm request parameters + * + * This function request FW to stop PLM. + * + * Return: CDF status + */ +static QDF_STATUS send_plm_stop_cmd_tlv(wmi_unified_t wmi_handle, + const struct plm_req_params *plm) +{ + wmi_vdev_plmreq_stop_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_plmreq_stop_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_plmreq_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_plmreq_stop_cmd_fixed_param)); + + cmd->vdev_id = plm->vdev_id; + + cmd->meas_token = plm->meas_token; + WMI_LOGD("vdev %d meas token %d", cmd->vdev_id, cmd->meas_token); + + wmi_mtrace(WMI_VDEV_PLMREQ_STOP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_PLMREQ_STOP_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send plm stop wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_plm_start_cmd_tlv() - plm start request + * @wmi_handle: wmi handle + * @plm: plm request parameters + * + * This function request FW to start PLM. + * + * Return: CDF status + */ +static QDF_STATUS send_plm_start_cmd_tlv(wmi_unified_t wmi_handle, + const struct plm_req_params *plm) +{ + wmi_vdev_plmreq_start_cmd_fixed_param *cmd; + uint32_t *channel_list; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint8_t count; + int ret; + + /* TLV place holder for channel_list */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += sizeof(uint32_t) * plm->plm_num_ch; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_plmreq_start_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_plmreq_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_plmreq_start_cmd_fixed_param)); + + cmd->vdev_id = plm->vdev_id; + + cmd->meas_token = plm->meas_token; + cmd->dialog_token = plm->diag_token; + cmd->number_bursts = plm->num_bursts; + cmd->burst_interval = WMI_SEC_TO_MSEC(plm->burst_int); + cmd->off_duration = plm->meas_duration; + cmd->burst_cycle = plm->burst_len; + cmd->tx_power = plm->desired_tx_pwr; + WMI_CHAR_ARRAY_TO_MAC_ADDR(plm->mac_addr.bytes, &cmd->dest_mac); + cmd->num_chans = plm->plm_num_ch; + + buf_ptr += sizeof(wmi_vdev_plmreq_start_cmd_fixed_param); + + WMI_LOGD("vdev : %d measu token : %d", cmd->vdev_id, cmd->meas_token); + WMI_LOGD("dialog_token: %d", cmd->dialog_token); + WMI_LOGD("number_bursts: %d", cmd->number_bursts); + WMI_LOGD("burst_interval: %d", cmd->burst_interval); + WMI_LOGD("off_duration: %d", cmd->off_duration); + WMI_LOGD("burst_cycle: %d", cmd->burst_cycle); + WMI_LOGD("tx_power: %d", cmd->tx_power); + WMI_LOGD("Number of channels : %d", cmd->num_chans); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (cmd->num_chans * sizeof(uint32_t))); + + buf_ptr += WMI_TLV_HDR_SIZE; + if (cmd->num_chans) { + channel_list = (uint32_t *) buf_ptr; + for (count = 0; count < cmd->num_chans; count++) { + channel_list[count] = plm->plm_ch_freq_list[count]; + WMI_LOGD("Ch[%d]: %d MHz", count, channel_list[count]); + } + buf_ptr += cmd->num_chans * sizeof(uint32_t); + } + + wmi_mtrace(WMI_VDEV_PLMREQ_START_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_PLMREQ_START_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send plm start wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_ese_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_plm_stop_cmd = send_plm_stop_cmd_tlv; + ops->send_plm_start_cmd = send_plm_start_cmd_tlv; +} +#endif /* FEATURE_WLAN_ESE */ + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +/* send_set_ric_req_cmd_tlv() - set ric request element + * @wmi_handle: wmi handle + * @msg: message + * @is_add_ts: is addts required + * + * This function sets ric request element for 11r roaming. + * + * Return: CDF status + */ +static QDF_STATUS send_set_ric_req_cmd_tlv(wmi_unified_t wmi_handle, + void *msg, uint8_t is_add_ts) +{ + wmi_ric_request_fixed_param *cmd; + wmi_ric_tspec *tspec_param; + wmi_buf_t buf; + uint8_t *buf_ptr; + struct mac_tspec_ie *tspec_ie = NULL; + int32_t len = sizeof(wmi_ric_request_fixed_param) + + WMI_TLV_HDR_SIZE + sizeof(wmi_ric_tspec); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + + cmd = (wmi_ric_request_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ric_request_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ric_request_fixed_param)); + if (is_add_ts) + cmd->vdev_id = ((struct add_ts_param *)msg)->vdev_id; + else + cmd->vdev_id = ((struct del_ts_params *)msg)->sessionId; + cmd->num_ric_request = 1; + cmd->is_add_ric = is_add_ts; + + buf_ptr += sizeof(wmi_ric_request_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, sizeof(wmi_ric_tspec)); + + buf_ptr += WMI_TLV_HDR_SIZE; + tspec_param = (wmi_ric_tspec *) buf_ptr; + WMITLV_SET_HDR(&tspec_param->tlv_header, + WMITLV_TAG_STRUC_wmi_ric_tspec, + WMITLV_GET_STRUCT_TLVLEN(wmi_ric_tspec)); + + if (is_add_ts) + tspec_ie = &(((struct add_ts_param *) msg)->tspec); + else + tspec_ie = &(((struct del_ts_params *) msg)->delTsInfo.tspec); + if (tspec_ie) { + /* Fill the tsinfo in the format expected by firmware */ +#ifndef ANI_LITTLE_BIT_ENDIAN + qdf_mem_copy(((uint8_t *) &tspec_param->ts_info) + 1, + ((uint8_t *) &tspec_ie->tsinfo) + 1, 2); +#else + qdf_mem_copy(((uint8_t *) &tspec_param->ts_info), + ((uint8_t *) &tspec_ie->tsinfo) + 1, 2); +#endif /* ANI_LITTLE_BIT_ENDIAN */ + + tspec_param->nominal_msdu_size = tspec_ie->nomMsduSz; + tspec_param->maximum_msdu_size = tspec_ie->maxMsduSz; + tspec_param->min_service_interval = tspec_ie->minSvcInterval; + tspec_param->max_service_interval = tspec_ie->maxSvcInterval; + tspec_param->inactivity_interval = tspec_ie->inactInterval; + tspec_param->suspension_interval = tspec_ie->suspendInterval; + tspec_param->svc_start_time = tspec_ie->svcStartTime; + tspec_param->min_data_rate = tspec_ie->minDataRate; + tspec_param->mean_data_rate = tspec_ie->meanDataRate; + tspec_param->peak_data_rate = tspec_ie->peakDataRate; + tspec_param->max_burst_size = tspec_ie->maxBurstSz; + tspec_param->delay_bound = tspec_ie->delayBound; + tspec_param->min_phy_rate = tspec_ie->minPhyRate; + tspec_param->surplus_bw_allowance = tspec_ie->surplusBw; + tspec_param->medium_time = 0; + } + WMI_LOGI("%s: Set RIC Req is_add_ts:%d", __func__, is_add_ts); + + wmi_mtrace(WMI_ROAM_SET_RIC_REQUEST_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_SET_RIC_REQUEST_CMDID)) { + WMI_LOGP("%s: Failed to send vdev Set RIC Req command", + __func__); + if (is_add_ts) + ((struct add_ts_param *) msg)->status = + QDF_STATUS_E_FAILURE; + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_roam_synch_complete_cmd_tlv() - roam synch complete command to fw. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function sends roam synch complete event to fw. + * + * Return: CDF STATUS + */ +static QDF_STATUS send_process_roam_synch_complete_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_roam_synch_complete_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint16_t len; + len = sizeof(wmi_roam_synch_complete_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_roam_synch_complete_fixed_param *) wmi_buf_data(wmi_buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_synch_complete_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_synch_complete_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_ROAM_SYNCH_COMPLETE, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_ROAM_SYNCH_COMPLETE)) { + WMI_LOGP("%s: failed to send roam synch confirmation", + __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_invoke_cmd_tlv() - send roam invoke command to fw. + * @wmi_handle: wma handle + * @roaminvoke: roam invoke command + * + * Send roam invoke command to fw for fastreassoc. + * + * Return: CDF STATUS + */ +static QDF_STATUS send_roam_invoke_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz) +{ + wmi_roam_invoke_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + u_int8_t *buf_ptr; + u_int16_t len, args_tlv_len; + uint32_t *channel_list; + wmi_mac_addr *bssid_list; + wmi_tlv_buf_len_param *buf_len_tlv; + + args_tlv_len = (4 * WMI_TLV_HDR_SIZE) + sizeof(uint32_t) + + sizeof(wmi_mac_addr) + sizeof(wmi_tlv_buf_len_param) + + roundup(roaminvoke->frame_len, sizeof(uint32_t)); + len = sizeof(wmi_roam_invoke_cmd_fixed_param) + args_tlv_len; + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_roam_invoke_cmd_fixed_param *)wmi_buf_data(wmi_buf); + buf_ptr = (u_int8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_invoke_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_invoke_cmd_fixed_param)); + cmd->vdev_id = roaminvoke->vdev_id; + cmd->flags |= (1 << WMI_ROAM_INVOKE_FLAG_REPORT_FAILURE); + if (roaminvoke->is_same_bssid) + cmd->flags |= (1 << WMI_ROAM_INVOKE_FLAG_NO_NULL_FRAME_TO_AP); + + if (roaminvoke->frame_len) { + cmd->roam_scan_mode = WMI_ROAM_INVOKE_SCAN_MODE_SKIP; + /* packing 1 beacon/probe_rsp frame with WMI cmd */ + cmd->num_buf = 1; + } else { + cmd->roam_scan_mode = WMI_ROAM_INVOKE_SCAN_MODE_FIXED_CH; + cmd->num_buf = 0; + } + + cmd->roam_ap_sel_mode = 0; + cmd->roam_delay = 0; + cmd->num_chan = 1; + cmd->num_bssid = 1; + + if (roaminvoke->forced_roaming) { + cmd->num_chan = 0; + cmd->num_bssid = 0; + cmd->roam_scan_mode = WMI_ROAM_INVOKE_SCAN_MODE_CACHE_MAP; + cmd->flags |= (1 << WMI_ROAM_INVOKE_FLAG_FULL_SCAN_IF_NO_CANDIDATE); + cmd->reason = ROAM_INVOKE_REASON_NUD_FAILURE; + } else { + cmd->reason = ROAM_INVOKE_REASON_USER_SPACE; + } + + buf_ptr += sizeof(wmi_roam_invoke_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (sizeof(u_int32_t))); + channel_list = (uint32_t *)(buf_ptr + WMI_TLV_HDR_SIZE); + *channel_list = ch_hz; + buf_ptr += sizeof(uint32_t) + WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (sizeof(wmi_mac_addr))); + bssid_list = (wmi_mac_addr *)(buf_ptr + WMI_TLV_HDR_SIZE); + WMI_CHAR_ARRAY_TO_MAC_ADDR(roaminvoke->bssid, bssid_list); + + /* move to next tlv i.e. bcn_prb_buf_list */ + buf_ptr += WMI_TLV_HDR_SIZE + sizeof(wmi_mac_addr); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + sizeof(wmi_tlv_buf_len_param)); + + buf_len_tlv = (wmi_tlv_buf_len_param *)(buf_ptr + WMI_TLV_HDR_SIZE); + buf_len_tlv->buf_len = roaminvoke->frame_len; + + /* move to next tlv i.e. bcn_prb_frm */ + buf_ptr += WMI_TLV_HDR_SIZE + sizeof(wmi_tlv_buf_len_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(roaminvoke->frame_len, sizeof(uint32_t))); + + /* copy frame after the header */ + qdf_mem_copy(buf_ptr + WMI_TLV_HDR_SIZE, + roaminvoke->frame_buf, + roaminvoke->frame_len); + + WMI_LOGD(FL("flag:%d, MODE scn:%d, ap:%d, dly:%d, n_ch:%d, n_bssid:%d, channel:%d, is_same_bssid:%d"), + cmd->flags, cmd->roam_scan_mode, + cmd->roam_ap_sel_mode, cmd->roam_delay, + cmd->num_chan, cmd->num_bssid, ch_hz, + roaminvoke->is_same_bssid); + + wmi_mtrace(WMI_ROAM_INVOKE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_ROAM_INVOKE_CMDID)) { + WMI_LOGP("%s: failed to send roam invoke command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_roam_offload_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_ric_req_cmd = send_set_ric_req_cmd_tlv; + ops->send_process_roam_synch_complete_cmd = + send_process_roam_synch_complete_cmd_tlv; + ops->send_roam_invoke_cmd = send_roam_invoke_cmd_tlv; +} +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + +#if defined(WLAN_FEATURE_FILS_SK) && defined(WLAN_FEATURE_ROAM_OFFLOAD) +/** + * wmi_add_fils_tlv() - Add FILS TLV to roam scan offload command + * @wmi_handle: wmi handle + * @roam_req: Roam scan offload params + * @buf_ptr: command buffer to send + * @fils_tlv_len: fils tlv length + * + * Return: Updated buffer pointer + */ +static uint8_t *wmi_add_fils_tlv(wmi_unified_t wmi_handle, + struct roam_offload_scan_params *roam_req, + uint8_t *buf_ptr, uint32_t fils_tlv_len) +{ + wmi_roam_fils_offload_tlv_param *fils_tlv; + wmi_erp_info *erp_info; + struct roam_fils_params *roam_fils_params; + + if (!roam_req->add_fils_tlv) + return buf_ptr; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*fils_tlv)); + buf_ptr += WMI_TLV_HDR_SIZE; + + fils_tlv = (wmi_roam_fils_offload_tlv_param *)buf_ptr; + WMITLV_SET_HDR(&fils_tlv->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_fils_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_fils_offload_tlv_param)); + + roam_fils_params = &roam_req->roam_fils_params; + erp_info = (wmi_erp_info *)(&fils_tlv->vdev_erp_info); + + erp_info->username_length = roam_fils_params->username_length; + qdf_mem_copy(erp_info->username, roam_fils_params->username, + erp_info->username_length); + + erp_info->next_erp_seq_num = roam_fils_params->next_erp_seq_num; + + erp_info->rRk_length = roam_fils_params->rrk_length; + qdf_mem_copy(erp_info->rRk, roam_fils_params->rrk, + erp_info->rRk_length); + + erp_info->rIk_length = roam_fils_params->rik_length; + qdf_mem_copy(erp_info->rIk, roam_fils_params->rik, + erp_info->rIk_length); + + erp_info->realm_len = roam_fils_params->realm_len; + qdf_mem_copy(erp_info->realm, roam_fils_params->realm, + erp_info->realm_len); + + buf_ptr += sizeof(*fils_tlv); + return buf_ptr; +} +#else +static inline uint8_t *wmi_add_fils_tlv(wmi_unified_t wmi_handle, + struct roam_offload_scan_params *roam_req, + uint8_t *buf_ptr, uint32_t fils_tlv_len) +{ + return buf_ptr; +} +#endif + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +/** + * fill_roam_offload_11r_params() - Fill roam scan params to send it to fw + * @auth_mode: Authentication mode + * @roam_offload_11r: TLV to be filled with 11r params + * @roam_req: roam request param + */ +static void +fill_roam_offload_11r_params(uint32_t auth_mode, + wmi_roam_11r_offload_tlv_param *roam_offload_11r, + struct roam_offload_scan_params *roam_req) +{ + uint8_t *psk_msk, len; + + if (auth_mode == WMI_AUTH_FT_RSNA_FILS_SHA256 || + auth_mode == WMI_AUTH_FT_RSNA_FILS_SHA384) { + psk_msk = roam_req->roam_fils_params.fils_ft; + len = roam_req->roam_fils_params.fils_ft_len; + } else { + psk_msk = roam_req->psk_pmk; + len = roam_req->pmk_len; + } + + /* + * For SHA384 based akm, the pmk length is 48 bytes. So fill + * first 32 bytes in roam_offload_11r->psk_msk and the remaining + * bytes in roam_offload_11r->psk_msk_ext buffer + */ + roam_offload_11r->psk_msk_len = len > ROAM_OFFLOAD_PSK_MSK_BYTES ? + ROAM_OFFLOAD_PSK_MSK_BYTES : len; + qdf_mem_copy(roam_offload_11r->psk_msk, psk_msk, + roam_offload_11r->psk_msk_len); + roam_offload_11r->psk_msk_ext_len = 0; + + if (len > ROAM_OFFLOAD_PSK_MSK_BYTES) { + roam_offload_11r->psk_msk_ext_len = + len - roam_offload_11r->psk_msk_len; + qdf_mem_copy(roam_offload_11r->psk_msk_ext, + &psk_msk[roam_offload_11r->psk_msk_len], + roam_offload_11r->psk_msk_ext_len); + } +} + +/** + * wmi_fill_sae_single_pmk_param() - Fill sae single pmk flag to indicate fw to + * use same PMKID for WPA3 SAE roaming. + * @params: roam request param + * @roam_offload_11i: pointer to 11i params + * + * Return: None + */ +static inline void +wmi_fill_sae_single_pmk_param(struct roam_offload_scan_params *params, + wmi_roam_11i_offload_tlv_param *roam_offload_11i) +{ + if (params->is_sae_same_pmk) + roam_offload_11i->flags |= + 1 << WMI_ROAM_OFFLOAD_FLAG_SAE_SAME_PMKID; +} +#else +static inline void +wmi_fill_sae_single_pmk_param(struct roam_offload_scan_params *params, + wmi_roam_11i_offload_tlv_param *roam_offload_11i) +{ +} +#endif + +/** + * send_roam_scan_offload_mode_cmd_tlv() - send roam scan mode request to fw + * @wmi_handle: wmi handle + * @scan_cmd_fp: start scan command ptr + * @roam_req: roam request param + * + * send WMI_ROAM_SCAN_MODE TLV to firmware. It has a piggyback + * of WMI_ROAM_SCAN_MODE. + * + * Return: QDF status + */ +static QDF_STATUS +send_roam_scan_offload_mode_cmd_tlv(wmi_unified_t wmi_handle, + wmi_start_scan_cmd_fixed_param *scan_cmd_fp, + struct roam_offload_scan_params *roam_req) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_mode_fixed_param *roam_scan_mode_fp; + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + int auth_mode = roam_req->auth_mode; + roam_offload_param *req_offload_params = + &roam_req->roam_offload_params; + wmi_roam_offload_tlv_param *roam_offload_params; + wmi_roam_11i_offload_tlv_param *roam_offload_11i; + wmi_roam_11r_offload_tlv_param *roam_offload_11r; + wmi_roam_ese_offload_tlv_param *roam_offload_ese; + wmi_tlv_buf_len_param *assoc_ies; + uint32_t fils_tlv_len = 0; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + /* Need to create a buf with roam_scan command at + * front and piggyback with scan command */ + len = sizeof(wmi_roam_scan_mode_fixed_param) + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + (2 * WMI_TLV_HDR_SIZE) + +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + sizeof(wmi_start_scan_cmd_fixed_param); +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + wmi_debug("auth_mode = %d", auth_mode); + if (roam_req->is_roam_req_valid && + roam_req->roam_offload_enabled) { + len += sizeof(wmi_roam_offload_tlv_param); + len += WMI_TLV_HDR_SIZE; + if ((auth_mode != WMI_AUTH_NONE) && + ((auth_mode != WMI_AUTH_OPEN) || + (auth_mode == WMI_AUTH_OPEN && + roam_req->mdid.mdie_present && + roam_req->is_11r_assoc) || + roam_req->is_ese_assoc)) { + len += WMI_TLV_HDR_SIZE; + if (roam_req->is_ese_assoc) + len += sizeof(wmi_roam_ese_offload_tlv_param); + else if ((auth_mode == WMI_AUTH_FT_RSNA) || + (auth_mode == WMI_AUTH_FT_RSNA_PSK) || + (auth_mode == WMI_AUTH_FT_RSNA_SAE) || + (auth_mode == + WMI_AUTH_FT_RSNA_SUITE_B_8021X_SHA384) || + (auth_mode == + WMI_AUTH_FT_RSNA_FILS_SHA256) || + (auth_mode == + WMI_AUTH_FT_RSNA_FILS_SHA384) || + (auth_mode == WMI_AUTH_OPEN && + roam_req->mdid.mdie_present && + roam_req->is_11r_assoc)) + len += sizeof(wmi_roam_11r_offload_tlv_param); + else + len += sizeof(wmi_roam_11i_offload_tlv_param); + } else { + len += WMI_TLV_HDR_SIZE; + } + + len += (sizeof(*assoc_ies) + (2*WMI_TLV_HDR_SIZE) + + roundup(roam_req->assoc_ie_length, sizeof(uint32_t))); + + if (roam_req->add_fils_tlv) { + fils_tlv_len = sizeof(wmi_roam_fils_offload_tlv_param); + len += WMI_TLV_HDR_SIZE + fils_tlv_len; + } + } else { + if (roam_req->is_roam_req_valid) + WMI_LOGD("%s : roam offload = %d", __func__, + roam_req->roam_offload_enabled); + + len += (4 * WMI_TLV_HDR_SIZE); + } + + if (roam_req->is_roam_req_valid && roam_req->roam_offload_enabled) + roam_req->mode |= WMI_ROAM_SCAN_MODE_ROAMOFFLOAD; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + + if (roam_req->mode == + (WMI_ROAM_SCAN_MODE_NONE | WMI_ROAM_SCAN_MODE_ROAMOFFLOAD)) + len = sizeof(wmi_roam_scan_mode_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + roam_scan_mode_fp = (wmi_roam_scan_mode_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&roam_scan_mode_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_mode_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_scan_mode_fixed_param)); + + roam_scan_mode_fp->min_delay_roam_trigger_reason_bitmask = + roam_req->roam_trigger_reason_bitmask; + roam_scan_mode_fp->min_delay_btw_scans = + WMI_SEC_TO_MSEC(roam_req->min_delay_btw_roam_scans); + roam_scan_mode_fp->roam_scan_mode = roam_req->mode; + roam_scan_mode_fp->vdev_id = roam_req->vdev_id; + if (roam_req->mode == + (WMI_ROAM_SCAN_MODE_NONE | WMI_ROAM_SCAN_MODE_ROAMOFFLOAD)) { + roam_scan_mode_fp->flags |= + WMI_ROAM_SCAN_MODE_FLAG_REPORT_STATUS; + goto send_roam_scan_mode_cmd; + } + + /* Fill in scan parameters suitable for roaming scan */ + buf_ptr += sizeof(wmi_roam_scan_mode_fixed_param); + + qdf_mem_copy(buf_ptr, scan_cmd_fp, + sizeof(wmi_start_scan_cmd_fixed_param)); + /* Ensure there is no additional IEs */ + scan_cmd_fp->ie_len = 0; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_wmi_start_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_start_scan_cmd_fixed_param)); +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + buf_ptr += sizeof(wmi_start_scan_cmd_fixed_param); + if (roam_req->is_roam_req_valid && roam_req->roam_offload_enabled) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_params = (wmi_roam_offload_tlv_param *) buf_ptr; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_wmi_roam_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_offload_tlv_param)); + roam_offload_params->prefer_5g = roam_req->prefer_5ghz; + roam_offload_params->rssi_cat_gap = roam_req->roam_rssi_cat_gap; + roam_offload_params->select_5g_margin = + roam_req->select_5ghz_margin; + roam_offload_params->handoff_delay_for_rx = + req_offload_params->ho_delay_for_rx; + roam_offload_params->max_mlme_sw_retries = + req_offload_params->roam_preauth_retry_count; + roam_offload_params->no_ack_timeout = + req_offload_params->roam_preauth_no_ack_timeout; + roam_offload_params->reassoc_failure_timeout = + roam_req->reassoc_failure_timeout; + roam_offload_params->roam_candidate_validity_time = + roam_req->rct_validity_timer; + roam_offload_params->roam_to_current_bss_disable = + roam_req->disable_self_roam; + /* Fill the capabilities */ + roam_offload_params->capability = + req_offload_params->capability; + roam_offload_params->ht_caps_info = + req_offload_params->ht_caps_info; + roam_offload_params->ampdu_param = + req_offload_params->ampdu_param; + roam_offload_params->ht_ext_cap = + req_offload_params->ht_ext_cap; + roam_offload_params->ht_txbf = req_offload_params->ht_txbf; + roam_offload_params->asel_cap = req_offload_params->asel_cap; + roam_offload_params->qos_caps = req_offload_params->qos_caps; + roam_offload_params->qos_enabled = + req_offload_params->qos_enabled; + roam_offload_params->wmm_caps = req_offload_params->wmm_caps; + qdf_mem_copy((uint8_t *)roam_offload_params->mcsset, + (uint8_t *)req_offload_params->mcsset, + ROAM_OFFLOAD_NUM_MCS_SET); + + buf_ptr += sizeof(wmi_roam_offload_tlv_param); + /* The TLV's are in the order of 11i, 11R, ESE. Hence, + * they are filled in the same order.Depending on the + * authentication type, the other mode TLV's are nullified + * and only headers are filled.*/ + if ((auth_mode != WMI_AUTH_NONE) && + ((auth_mode != WMI_AUTH_OPEN) || + (auth_mode == WMI_AUTH_OPEN + && roam_req->mdid.mdie_present && + roam_req->is_11r_assoc) || + roam_req->is_ese_assoc)) { + if (roam_req->is_ese_assoc) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_ese_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_ese = + (wmi_roam_ese_offload_tlv_param *) buf_ptr; + qdf_mem_copy(roam_offload_ese->krk, + roam_req->krk, + sizeof(roam_req->krk)); + qdf_mem_copy(roam_offload_ese->btk, + roam_req->btk, + sizeof(roam_req->btk)); + WMITLV_SET_HDR(&roam_offload_ese->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_ese_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_ese_offload_tlv_param)); + buf_ptr += + sizeof(wmi_roam_ese_offload_tlv_param); + } else if (auth_mode == WMI_AUTH_FT_RSNA || + auth_mode == WMI_AUTH_FT_RSNA_PSK || + auth_mode == WMI_AUTH_FT_RSNA_SAE || + (auth_mode == + WMI_AUTH_FT_RSNA_SUITE_B_8021X_SHA384) || + (auth_mode == + WMI_AUTH_FT_RSNA_FILS_SHA256) || + (auth_mode == + WMI_AUTH_FT_RSNA_FILS_SHA384) || + (auth_mode == WMI_AUTH_OPEN + && roam_req->mdid.mdie_present && + roam_req->is_11r_assoc)) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + 0); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_11r_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_11r = + (wmi_roam_11r_offload_tlv_param *) buf_ptr; + roam_offload_11r->r0kh_id_len = + roam_req->rokh_id_length; + qdf_mem_copy(roam_offload_11r->r0kh_id, + roam_req->rokh_id, + roam_offload_11r->r0kh_id_len); + fill_roam_offload_11r_params(auth_mode, + roam_offload_11r, + roam_req); + roam_offload_11r->mdie_present = + roam_req->mdid.mdie_present; + roam_offload_11r->mdid = + roam_req->mdid.mobility_domain; + roam_offload_11r->adaptive_11r = + roam_req->is_adaptive_11r; + roam_offload_11r->ft_im_for_deauth = + roam_req->enable_ft_im_roaming; + + if (auth_mode == WMI_AUTH_OPEN) { + /* If FT-Open ensure pmk length + and r0khid len are zero */ + roam_offload_11r->r0kh_id_len = 0; + roam_offload_11r->psk_msk_len = 0; + } + WMITLV_SET_HDR(&roam_offload_11r->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_11r_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_11r_offload_tlv_param)); + buf_ptr += + sizeof(wmi_roam_11r_offload_tlv_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMI_LOGD("psk_msk_len = %d psk_msk_ext:%d", + roam_offload_11r->psk_msk_len, + roam_offload_11r->psk_msk_ext_len); + if (roam_offload_11r->psk_msk_len) + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, + QDF_TRACE_LEVEL_DEBUG, + roam_offload_11r->psk_msk, + roam_offload_11r->psk_msk_len); + } else { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_11i_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_11i = + (wmi_roam_11i_offload_tlv_param *) buf_ptr; + + if (roam_req->fw_okc) { + WMI_SET_ROAM_OFFLOAD_OKC_ENABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:OKC enabled"); + } else { + WMI_SET_ROAM_OFFLOAD_OKC_DISABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:OKC disabled"); + } + + if (roam_req->fw_pmksa_cache) { + WMI_SET_ROAM_OFFLOAD_PMK_CACHE_ENABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:PMKSA caching enabled"); + } else { + WMI_SET_ROAM_OFFLOAD_PMK_CACHE_DISABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:PMKSA caching disabled"); + } + + wmi_fill_sae_single_pmk_param(roam_req, + roam_offload_11i); + + roam_offload_11i->pmk_len = roam_req->pmk_len > + ROAM_OFFLOAD_PMK_BYTES ? + ROAM_OFFLOAD_PMK_BYTES : + roam_req->pmk_len; + + qdf_mem_copy(roam_offload_11i->pmk, + roam_req->psk_pmk, + roam_offload_11i->pmk_len); + + WMITLV_SET_HDR(&roam_offload_11i->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_11i_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_11i_offload_tlv_param)); + buf_ptr += + sizeof(wmi_roam_11i_offload_tlv_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + 0); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + 0); + buf_ptr += WMI_TLV_HDR_SIZE; + WMI_LOGD("pmk_len = %d", + roam_offload_11i->pmk_len); + if (roam_offload_11i->pmk_len) + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, + QDF_TRACE_LEVEL_DEBUG, + roam_offload_11i->pmk, + roam_offload_11i->pmk_len); + } + } else { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + } + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*assoc_ies)); + buf_ptr += WMI_TLV_HDR_SIZE; + + assoc_ies = (wmi_tlv_buf_len_param *) buf_ptr; + WMITLV_SET_HDR(&assoc_ies->tlv_header, + WMITLV_TAG_STRUC_wmi_tlv_buf_len_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_tlv_buf_len_param)); + assoc_ies->buf_len = roam_req->assoc_ie_length; + + buf_ptr += sizeof(*assoc_ies); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(assoc_ies->buf_len, sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + + if (assoc_ies->buf_len != 0) { + qdf_mem_copy(buf_ptr, roam_req->assoc_ie, + assoc_ies->buf_len); + } + buf_ptr += qdf_roundup(assoc_ies->buf_len, sizeof(uint32_t)); + buf_ptr = wmi_add_fils_tlv(wmi_handle, roam_req, + buf_ptr, fils_tlv_len); + } else { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN(0)); + } +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + +send_roam_scan_mode_cmd: + wmi_mtrace(WMI_ROAM_SCAN_MODE, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_MODE); + if (QDF_IS_STATUS_ERROR(status)) + wmi_buf_free(buf); + + return status; +} + +/** + * convert_roam_trigger_reason() - Function to convert unified Roam trigger + * enum to TLV specific WMI_ROAM_TRIGGER_REASON_ID + * @reason: Roam trigger reason + * + * Return: WMI_ROAM_TRIGGER_REASON_ID + */ +static WMI_ROAM_TRIGGER_REASON_ID +convert_roam_trigger_reason(enum roam_trigger_reason trigger_reason) { + + switch (trigger_reason) { + case ROAM_TRIGGER_REASON_NONE: + return WMI_ROAM_TRIGGER_REASON_NONE; + case ROAM_TRIGGER_REASON_PER: + return WMI_ROAM_TRIGGER_REASON_PER; + case ROAM_TRIGGER_REASON_BMISS: + return WMI_ROAM_TRIGGER_REASON_BMISS; + case ROAM_TRIGGER_REASON_LOW_RSSI: + return WMI_ROAM_TRIGGER_REASON_LOW_RSSI; + case ROAM_TRIGGER_REASON_HIGH_RSSI: + return WMI_ROAM_TRIGGER_REASON_HIGH_RSSI; + case ROAM_TRIGGER_REASON_PERIODIC: + return WMI_ROAM_TRIGGER_REASON_PERIODIC; + case ROAM_TRIGGER_REASON_MAWC: + return WMI_ROAM_TRIGGER_REASON_MAWC; + case ROAM_TRIGGER_REASON_DENSE: + return WMI_ROAM_TRIGGER_REASON_DENSE; + case ROAM_TRIGGER_REASON_BACKGROUND: + return WMI_ROAM_TRIGGER_REASON_BACKGROUND; + case ROAM_TRIGGER_REASON_FORCED: + return WMI_ROAM_TRIGGER_REASON_FORCED; + case ROAM_TRIGGER_REASON_BTM: + return WMI_ROAM_TRIGGER_REASON_BTM; + case ROAM_TRIGGER_REASON_UNIT_TEST: + return WMI_ROAM_TRIGGER_REASON_UNIT_TEST; + case ROAM_TRIGGER_REASON_BSS_LOAD: + return WMI_ROAM_TRIGGER_REASON_BSS_LOAD; + case ROAM_TRIGGER_REASON_DEAUTH: + return WMI_ROAM_TRIGGER_REASON_DEAUTH; + case ROAM_TRIGGER_REASON_IDLE: + return WMI_ROAM_TRIGGER_REASON_IDLE; + case ROAM_TRIGGER_REASON_MAX: + return WMI_ROAM_TRIGGER_REASON_MAX; + default: + return WMI_ROAM_TRIGGER_REASON_NONE; + } +} + +/** + * send_roam_scan_offload_ap_profile_cmd_tlv() - set roam ap profile in fw + * @wmi_handle: wmi handle + * @ap_profile_p: ap profile + * + * Send WMI_ROAM_AP_PROFILE to firmware + * + * Return: CDF status + */ +static QDF_STATUS +send_roam_scan_offload_ap_profile_cmd_tlv(wmi_unified_t wmi_handle, + struct ap_profile_params *ap_profile) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + size_t len; + uint8_t *buf_ptr; + wmi_roam_ap_profile_fixed_param *roam_ap_profile_fp; + wmi_roam_cnd_scoring_param *score_param; + wmi_ap_profile *profile; + wmi_roam_score_delta_param *score_delta_param; + wmi_roam_cnd_min_rssi_param *min_rssi_param; + enum roam_trigger_reason trig_reason; + + len = sizeof(wmi_roam_ap_profile_fixed_param) + sizeof(wmi_ap_profile); + len += sizeof(*score_param); + len += WMI_TLV_HDR_SIZE; + len += NUM_OF_ROAM_TRIGGERS * sizeof(*score_delta_param); + len += WMI_TLV_HDR_SIZE; + len += NUM_OF_ROAM_MIN_RSSI * sizeof(*min_rssi_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + roam_ap_profile_fp = (wmi_roam_ap_profile_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&roam_ap_profile_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_ap_profile_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_ap_profile_fixed_param)); + /* fill in threshold values */ + roam_ap_profile_fp->vdev_id = ap_profile->vdev_id; + roam_ap_profile_fp->id = 0; + buf_ptr += sizeof(wmi_roam_ap_profile_fixed_param); + + profile = (wmi_ap_profile *)buf_ptr; + WMITLV_SET_HDR(&profile->tlv_header, + WMITLV_TAG_STRUC_wmi_ap_profile, + WMITLV_GET_STRUCT_TLVLEN(wmi_ap_profile)); + profile->flags = ap_profile->profile.flags; + profile->rssi_threshold = ap_profile->profile.rssi_threshold; + profile->ssid.ssid_len = ap_profile->profile.ssid.length; + qdf_mem_copy(profile->ssid.ssid, ap_profile->profile.ssid.mac_ssid, + profile->ssid.ssid_len); + profile->rsn_authmode = ap_profile->profile.rsn_authmode; + profile->rsn_ucastcipherset = ap_profile->profile.rsn_ucastcipherset; + profile->rsn_mcastcipherset = ap_profile->profile.rsn_mcastcipherset; + profile->rsn_mcastmgmtcipherset = + ap_profile->profile.rsn_mcastmgmtcipherset; + profile->rssi_abs_thresh = ap_profile->profile.rssi_abs_thresh; + + WMI_LOGD("AP PROFILE: flags %x rssi_threshold %d ssid:%.*s authmode %d uc cipher %d mc cipher %d mc mgmt cipher %d rssi abs thresh %d", + profile->flags, profile->rssi_threshold, + profile->ssid.ssid_len, ap_profile->profile.ssid.mac_ssid, + profile->rsn_authmode, profile->rsn_ucastcipherset, + profile->rsn_mcastcipherset, profile->rsn_mcastmgmtcipherset, + profile->rssi_abs_thresh); + + buf_ptr += sizeof(wmi_ap_profile); + + score_param = (wmi_roam_cnd_scoring_param *)buf_ptr; + WMITLV_SET_HDR(&score_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_cnd_scoring_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_cnd_scoring_param)); + score_param->disable_bitmap = ap_profile->param.disable_bitmap; + score_param->rssi_weightage_pcnt = + ap_profile->param.rssi_weightage; + score_param->ht_weightage_pcnt = ap_profile->param.ht_weightage; + score_param->vht_weightage_pcnt = ap_profile->param.vht_weightage; + score_param->he_weightage_pcnt = ap_profile->param.he_weightage; + score_param->bw_weightage_pcnt = ap_profile->param.bw_weightage; + score_param->band_weightage_pcnt = ap_profile->param.band_weightage; + score_param->nss_weightage_pcnt = ap_profile->param.nss_weightage; + score_param->esp_qbss_weightage_pcnt = + ap_profile->param.esp_qbss_weightage; + score_param->beamforming_weightage_pcnt = + ap_profile->param.beamforming_weightage; + score_param->pcl_weightage_pcnt = ap_profile->param.pcl_weightage; + score_param->oce_wan_weightage_pcnt = + ap_profile->param.oce_wan_weightage; + score_param->vendor_roam_score_algorithm_id = + ap_profile->param.vendor_roam_score_algorithm; + + WMI_LOGD("Score params weightage: disable_bitmap %x rssi %d ht %d vht %d he %d BW %d band %d NSS %d ESP %d BF %d PCL %d OCE WAN %d roam score algo %d", + score_param->disable_bitmap, score_param->rssi_weightage_pcnt, + score_param->ht_weightage_pcnt, + score_param->vht_weightage_pcnt, + score_param->he_weightage_pcnt, score_param->bw_weightage_pcnt, + score_param->band_weightage_pcnt, + score_param->nss_weightage_pcnt, + score_param->esp_qbss_weightage_pcnt, + score_param->beamforming_weightage_pcnt, + score_param->pcl_weightage_pcnt, + score_param->oce_wan_weightage_pcnt, + score_param->vendor_roam_score_algorithm_id); + + score_param->bw_scoring.score_pcnt = ap_profile->param.bw_index_score; + score_param->band_scoring.score_pcnt = + ap_profile->param.band_index_score; + score_param->nss_scoring.score_pcnt = + ap_profile->param.nss_index_score; + + WMI_LOGD("Params index score bitmask: bw_index_score %x band_index_score %x nss_index_score %x", + score_param->bw_scoring.score_pcnt, + score_param->band_scoring.score_pcnt, + score_param->nss_scoring.score_pcnt); + + score_param->rssi_scoring.best_rssi_threshold = + (-1) * ap_profile->param.rssi_scoring.best_rssi_threshold; + score_param->rssi_scoring.good_rssi_threshold = + (-1) * ap_profile->param.rssi_scoring.good_rssi_threshold; + score_param->rssi_scoring.bad_rssi_threshold = + (-1) * ap_profile->param.rssi_scoring.bad_rssi_threshold; + score_param->rssi_scoring.good_rssi_pcnt = + ap_profile->param.rssi_scoring.good_rssi_pcnt; + score_param->rssi_scoring.bad_rssi_pcnt = + ap_profile->param.rssi_scoring.bad_rssi_pcnt; + score_param->rssi_scoring.good_bucket_size = + ap_profile->param.rssi_scoring.good_bucket_size; + score_param->rssi_scoring.bad_bucket_size = + ap_profile->param.rssi_scoring.bad_bucket_size; + score_param->rssi_scoring.rssi_pref_5g_rssi_thresh = + (-1) * ap_profile->param.rssi_scoring.rssi_pref_5g_rssi_thresh; + + WMI_LOGD("Rssi scoring threshold: best RSSI %d good RSSI %d bad RSSI %d prefer 5g threshold %d", + score_param->rssi_scoring.best_rssi_threshold, + score_param->rssi_scoring.good_rssi_threshold, + score_param->rssi_scoring.bad_rssi_threshold, + score_param->rssi_scoring.rssi_pref_5g_rssi_thresh); + WMI_LOGD("Good RSSI score for each slot %d bad RSSI score for each slot %d good bucket %d bad bucket %d", + score_param->rssi_scoring.good_rssi_pcnt, + score_param->rssi_scoring.bad_rssi_pcnt, + score_param->rssi_scoring.good_bucket_size, + score_param->rssi_scoring.bad_bucket_size); + + score_param->esp_qbss_scoring.num_slot = + ap_profile->param.esp_qbss_scoring.num_slot; + score_param->esp_qbss_scoring.score_pcnt3_to_0 = + ap_profile->param.esp_qbss_scoring.score_pcnt3_to_0; + score_param->esp_qbss_scoring.score_pcnt7_to_4 = + ap_profile->param.esp_qbss_scoring.score_pcnt7_to_4; + score_param->esp_qbss_scoring.score_pcnt11_to_8 = + ap_profile->param.esp_qbss_scoring.score_pcnt11_to_8; + score_param->esp_qbss_scoring.score_pcnt15_to_12 = + ap_profile->param.esp_qbss_scoring.score_pcnt15_to_12; + + WMI_LOGD("ESP QBSS index weight: slots %d weight 0to3 %x weight 4to7 %x weight 8to11 %x weight 12to15 %x", + score_param->esp_qbss_scoring.num_slot, + score_param->esp_qbss_scoring.score_pcnt3_to_0, + score_param->esp_qbss_scoring.score_pcnt7_to_4, + score_param->esp_qbss_scoring.score_pcnt11_to_8, + score_param->esp_qbss_scoring.score_pcnt15_to_12); + + score_param->oce_wan_scoring.num_slot = + ap_profile->param.oce_wan_scoring.num_slot; + score_param->oce_wan_scoring.score_pcnt3_to_0 = + ap_profile->param.oce_wan_scoring.score_pcnt3_to_0; + score_param->oce_wan_scoring.score_pcnt7_to_4 = + ap_profile->param.oce_wan_scoring.score_pcnt7_to_4; + score_param->oce_wan_scoring.score_pcnt11_to_8 = + ap_profile->param.oce_wan_scoring.score_pcnt11_to_8; + score_param->oce_wan_scoring.score_pcnt15_to_12 = + ap_profile->param.oce_wan_scoring.score_pcnt15_to_12; + + WMI_LOGD("OCE WAN index weight: slots %d weight 0to3 %x weight 4to7 %x weight 8to11 %x weight 12to15 %x", + score_param->oce_wan_scoring.num_slot, + score_param->oce_wan_scoring.score_pcnt3_to_0, + score_param->oce_wan_scoring.score_pcnt7_to_4, + score_param->oce_wan_scoring.score_pcnt11_to_8, + score_param->oce_wan_scoring.score_pcnt15_to_12); + + score_param->roam_score_delta_pcnt = ap_profile->param.roam_score_delta; + score_param->roam_score_delta_mask = + ap_profile->param.roam_trigger_bitmap; + score_param->candidate_min_roam_score_delta = + ap_profile->param.cand_min_roam_score_delta; + WMI_LOGD("Roam score delta:%d Roam_trigger_bitmap:%x cand min score delta = %d", + score_param->roam_score_delta_pcnt, + score_param->roam_score_delta_mask, + score_param->candidate_min_roam_score_delta); + + buf_ptr += sizeof(*score_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (NUM_OF_ROAM_TRIGGERS * sizeof(*score_delta_param))); + buf_ptr += WMI_TLV_HDR_SIZE; + + score_delta_param = (wmi_roam_score_delta_param *)buf_ptr; + WMITLV_SET_HDR(&score_delta_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_score_delta_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_score_delta_param)); + trig_reason = + ap_profile->score_delta_param[IDLE_ROAM_TRIGGER].trigger_reason; + score_delta_param->roam_trigger_reason = + convert_roam_trigger_reason(trig_reason); + score_delta_param->roam_score_delta = + ap_profile->score_delta_param[IDLE_ROAM_TRIGGER].roam_score_delta; + + buf_ptr += sizeof(*score_delta_param); + score_delta_param = (wmi_roam_score_delta_param *)buf_ptr; + WMITLV_SET_HDR(&score_delta_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_score_delta_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_score_delta_param)); + trig_reason = + ap_profile->score_delta_param[BTM_ROAM_TRIGGER].trigger_reason; + score_delta_param->roam_trigger_reason = + convert_roam_trigger_reason(trig_reason); + score_delta_param->roam_score_delta = + ap_profile->score_delta_param[BTM_ROAM_TRIGGER].roam_score_delta; + + buf_ptr += sizeof(*score_delta_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (NUM_OF_ROAM_MIN_RSSI * sizeof(*min_rssi_param))); + buf_ptr += WMI_TLV_HDR_SIZE; + + min_rssi_param = (wmi_roam_cnd_min_rssi_param *)buf_ptr; + WMITLV_SET_HDR(&min_rssi_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_cnd_min_rssi_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_cnd_min_rssi_param)); + trig_reason = + ap_profile->min_rssi_params[DEAUTH_MIN_RSSI].trigger_reason; + min_rssi_param->roam_trigger_reason = + convert_roam_trigger_reason(trig_reason); + min_rssi_param->candidate_min_rssi = + ap_profile->min_rssi_params[DEAUTH_MIN_RSSI].min_rssi; + + buf_ptr += sizeof(*min_rssi_param); + min_rssi_param = (wmi_roam_cnd_min_rssi_param *)buf_ptr; + WMITLV_SET_HDR(&min_rssi_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_cnd_min_rssi_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_cnd_min_rssi_param)); + trig_reason = + ap_profile->min_rssi_params[BMISS_MIN_RSSI].trigger_reason; + min_rssi_param->roam_trigger_reason = + convert_roam_trigger_reason(trig_reason); + min_rssi_param->candidate_min_rssi = + ap_profile->min_rssi_params[BMISS_MIN_RSSI].min_rssi; + + buf_ptr += sizeof(*min_rssi_param); + min_rssi_param = (wmi_roam_cnd_min_rssi_param *)buf_ptr; + WMITLV_SET_HDR(&min_rssi_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_cnd_min_rssi_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_cnd_min_rssi_param)); + trig_reason = + ap_profile->min_rssi_params[MIN_RSSI_2G_TO_5G_ROAM].trigger_reason; + min_rssi_param->roam_trigger_reason = + convert_roam_trigger_reason(trig_reason); + min_rssi_param->candidate_min_rssi = + ap_profile->min_rssi_params[MIN_RSSI_2G_TO_5G_ROAM].min_rssi; + + wmi_mtrace(WMI_ROAM_AP_PROFILE, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_AP_PROFILE); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_AP_PROFILE returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_roam_scan_offload_cmd_tlv() - set roam offload command + * @wmi_handle: wmi handle + * @command: command + * @vdev_id: vdev id + * + * This function set roam offload command to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t command, uint32_t vdev_id) +{ + QDF_STATUS status; + wmi_roam_scan_cmd_fixed_param *cmd_fp; + wmi_buf_t buf = NULL; + int len; + uint8_t *buf_ptr; + + len = sizeof(wmi_roam_scan_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + + cmd_fp = (wmi_roam_scan_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_scan_cmd_fixed_param)); + cmd_fp->vdev_id = vdev_id; + cmd_fp->command_arg = command; + + wmi_mtrace(WMI_ROAM_SCAN_CMD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_CMD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_SCAN_CMD returned Error %d", + status); + goto error; + } + + WMI_LOGI("%s: WMI --> WMI_ROAM_SCAN_CMD", __func__); + return QDF_STATUS_SUCCESS; + +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_roam_scan_offload_scan_period_cmd_tlv() - set roam offload scan period + * @wmi_handle: wmi handle + * @param: roam scan parameters to be sent to firmware + * + * Send WMI_ROAM_SCAN_PERIOD parameters to fw. + * + * Return: QDF status + */ +static QDF_STATUS +send_roam_scan_offload_scan_period_cmd_tlv( + wmi_unified_t wmi_handle, + struct roam_scan_period_params *param) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_period_fixed_param *scan_period_fp; + + /* Send scan period values */ + len = sizeof(wmi_roam_scan_period_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + scan_period_fp = (wmi_roam_scan_period_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&scan_period_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_period_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_period_fixed_param)); + /* fill in scan period values */ + scan_period_fp->vdev_id = param->vdev_id; + scan_period_fp->roam_scan_period = param->scan_period; + scan_period_fp->roam_scan_age = param->scan_age; + scan_period_fp->inactivity_time_period = + param->roam_scan_inactivity_time; + scan_period_fp->roam_inactive_count = + param->roam_inactive_data_packet_count; + scan_period_fp->roam_scan_period_after_inactivity = + param->roam_scan_period_after_inactivity; + /* Firmware expects the full scan preriod in msec whereas host + * provides the same in seconds. + * Convert it to msec and send to firmware + */ + scan_period_fp->roam_full_scan_period = param->full_scan_period * 1000; + + WMI_LOGD("%s: roam_scan_period=%d, roam_scan_age=%d, full_scan_period= %u", + __func__, scan_period_fp->roam_scan_period, + scan_period_fp->roam_scan_age, + scan_period_fp->roam_full_scan_period); + WMI_LOGD("%s: inactiviy period:%d inactive count:%d period after inactivity:%d", + __func__, scan_period_fp->inactivity_time_period, + scan_period_fp->roam_inactive_count, + scan_period_fp->roam_scan_period_after_inactivity); + + wmi_mtrace(WMI_ROAM_SCAN_PERIOD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_SCAN_PERIOD); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_offload_chan_list_cmd_tlv() - set roam offload channel list + * @wmi_handle: wmi handle + * @chan_count: channel count + * @chan_list: channel list + * @list_type: list type + * @vdev_id: vdev id + * + * Set roam offload channel list. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_chan_list_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, uint32_t vdev_id) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len, list_tlv_len; + int i; + uint8_t *buf_ptr; + wmi_roam_chan_list_fixed_param *chan_list_fp; + uint32_t *roam_chan_list_array; + + /* Channel list is a table of 2 TLV's */ + list_tlv_len = WMI_TLV_HDR_SIZE + chan_count * sizeof(uint32_t); + len = sizeof(wmi_roam_chan_list_fixed_param) + list_tlv_len; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + chan_list_fp = (wmi_roam_chan_list_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&chan_list_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_chan_list_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_chan_list_fixed_param)); + chan_list_fp->vdev_id = vdev_id; + chan_list_fp->num_chan = chan_count; + if (list_type == WMI_CHANNEL_LIST_STATIC) { + /* external app is controlling channel list */ + chan_list_fp->chan_list_type = + WMI_ROAM_SCAN_CHAN_LIST_TYPE_STATIC; + } else { + /* umac supplied occupied channel list in LFR */ + chan_list_fp->chan_list_type = + WMI_ROAM_SCAN_CHAN_LIST_TYPE_DYNAMIC; + } + + buf_ptr += sizeof(wmi_roam_chan_list_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (chan_list_fp->num_chan * sizeof(uint32_t))); + roam_chan_list_array = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; ((i < chan_list_fp->num_chan) && + (i < WMI_ROAM_MAX_CHANNELS)); i++) + roam_chan_list_array[i] = chan_list[i]; + + wmi_mtrace(WMI_ROAM_CHAN_LIST, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_CHAN_LIST); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_CHAN_LIST returned Error %d", + status); + goto error; + } + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_roam_scan_offload_rssi_change_cmd_tlv() - set roam offload RSSI th + * @wmi_handle: wmi handle + * @rssi_change_thresh: RSSI Change threshold + * @bcn_rssi_weight: beacon RSSI weight + * @vdev_id: vdev id + * + * Send WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD parameters to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_rssi_change_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_rssi_change_threshold_fixed_param *rssi_change_fp; + + /* Send rssi change parameters */ + len = sizeof(wmi_roam_scan_rssi_change_threshold_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + rssi_change_fp = + (wmi_roam_scan_rssi_change_threshold_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&rssi_change_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_rssi_change_threshold_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_rssi_change_threshold_fixed_param)); + /* fill in rssi change threshold (hysteresis) values */ + rssi_change_fp->vdev_id = vdev_id; + rssi_change_fp->roam_scan_rssi_change_thresh = rssi_change_thresh; + rssi_change_fp->bcn_rssi_weight = bcn_rssi_weight; + rssi_change_fp->hirssi_delay_btw_scans = hirssi_delay_btw_scans; + + wmi_mtrace(WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD returned Error %d", + status); + goto error; + } + + wmi_nofl_debug("roam_scan_rssi_change_thresh %d bcn_rssi_weight %d hirssi_delay_btw_scans %d", + rssi_change_thresh, bcn_rssi_weight, + hirssi_delay_btw_scans); + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_per_roam_config_cmd_tlv() - set per roaming config to FW + * @wmi_handle: wmi handle + * @req_buf: per roam config buffer + * + * Return: QDF status + */ +static QDF_STATUS send_per_roam_config_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_per_roam_config_req *req_buf) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_per_config_fixed_param *wmi_per_config; + + len = sizeof(wmi_roam_per_config_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_per_config = + (wmi_roam_per_config_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_per_config->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_per_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_per_config_fixed_param)); + + /* fill in per roam config values */ + wmi_per_config->vdev_id = req_buf->vdev_id; + + wmi_per_config->enable = req_buf->per_config.enable; + wmi_per_config->high_rate_thresh = + (req_buf->per_config.tx_high_rate_thresh << 16) | + (req_buf->per_config.rx_high_rate_thresh & 0x0000ffff); + wmi_per_config->low_rate_thresh = + (req_buf->per_config.tx_low_rate_thresh << 16) | + (req_buf->per_config.rx_low_rate_thresh & 0x0000ffff); + wmi_per_config->pkt_err_rate_thresh_pct = + (req_buf->per_config.tx_rate_thresh_percnt << 16) | + (req_buf->per_config.rx_rate_thresh_percnt & 0x0000ffff); + wmi_per_config->per_rest_time = req_buf->per_config.per_rest_time; + wmi_per_config->pkt_err_rate_mon_time = + (req_buf->per_config.tx_per_mon_time << 16) | + (req_buf->per_config.rx_per_mon_time & 0x0000ffff); + wmi_per_config->min_candidate_rssi = + req_buf->per_config.min_candidate_rssi; + + /* Send per roam config parameters */ + wmi_mtrace(WMI_ROAM_PER_CONFIG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_PER_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_ROAM_PER_CONFIG_CMDID failed, Error %d", + status); + wmi_buf_free(buf); + return status; + } + WMI_LOGD(FL("per roam enable=%d, vdev=%d"), + req_buf->per_config.enable, req_buf->vdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_limit_off_chan_cmd_tlv() - send wmi cmd of limit off chan + * configuration params + * @wmi_handle: wmi handler + * @limit_off_chan_param: pointer to wmi_off_chan_param + * + * Return: 0 for success and non zero for failure + */ +static +QDF_STATUS send_limit_off_chan_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_limit_off_chan_param *limit_off_chan_param) +{ + wmi_vdev_limit_offchan_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len = sizeof(*cmd); + int err; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_limit_offchan_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_limit_offchan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_limit_offchan_cmd_fixed_param)); + + cmd->vdev_id = limit_off_chan_param->vdev_id; + + cmd->flags &= 0; + if (limit_off_chan_param->status) + cmd->flags |= WMI_VDEV_LIMIT_OFFCHAN_ENABLE; + if (limit_off_chan_param->skip_dfs_chans) + cmd->flags |= WMI_VDEV_LIMIT_OFFCHAN_SKIP_DFS; + + cmd->max_offchan_time = limit_off_chan_param->max_offchan_time; + cmd->rest_time = limit_off_chan_param->rest_time; + + WMI_LOGE("%s: vdev_id=%d, flags =%x, max_offchan_time=%d, rest_time=%d", + __func__, cmd->vdev_id, cmd->flags, cmd->max_offchan_time, + cmd->rest_time); + + wmi_mtrace(WMI_VDEV_LIMIT_OFFCHAN_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_VDEV_LIMIT_OFFCHAN_CMDID); + if (QDF_IS_STATUS_ERROR(err)) { + WMI_LOGE("Failed to send limit off chan cmd err=%d", err); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_FILS_SK +static QDF_STATUS send_roam_scan_send_hlp_cmd_tlv(wmi_unified_t wmi_handle, + struct hlp_params *params) +{ + uint32_t len; + uint8_t *buf_ptr; + wmi_buf_t buf = NULL; + wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param *hlp_params; + + len = sizeof(wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param); + len += WMI_TLV_HDR_SIZE; + len += qdf_roundup(params->hlp_ie_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hlp_params = (wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hlp_params->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param)); + + hlp_params->vdev_id = params->vdev_id; + hlp_params->size = params->hlp_ie_len; + hlp_params->pkt_type = WMI_FILS_HLP_PKT_TYPE_DHCP_DISCOVER; + + buf_ptr += sizeof(*hlp_params); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + round_up(params->hlp_ie_len, + sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, params->hlp_ie, params->hlp_ie_len); + + WMI_LOGD(FL("send FILS HLP pkt vdev %d len %d"), + hlp_params->vdev_id, hlp_params->size); + wmi_mtrace(WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID)) { + WMI_LOGE(FL("Failed to send FILS HLP pkt cmd")); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_fils_sk_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_roam_scan_hlp_cmd = send_roam_scan_send_hlp_cmd_tlv; +} +#endif /* WLAN_FEATURE_FILS_SK */ + +/* + * send_btm_config_cmd_tlv() - Send wmi cmd for BTM config + * @wmi_handle: wmi handle + * @params: pointer to wmi_btm_config + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_btm_config_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_btm_config *params) +{ + + wmi_btm_config_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_btm_config_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_btm_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_btm_config_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->flags = params->btm_offload_config; + cmd->max_attempt_cnt = params->btm_max_attempt_cnt; + cmd->solicited_timeout_ms = params->btm_solicited_timeout; + cmd->stick_time_seconds = params->btm_sticky_time; + cmd->disassoc_timer_threshold = params->disassoc_timer_threshold; + cmd->btm_bitmap = params->btm_query_bitmask; + cmd->btm_candidate_min_score = params->btm_candidate_min_score; + + wmi_mtrace(WMI_ROAM_BTM_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_BTM_CONFIG_CMDID)) { + WMI_LOGE("%s: failed to send WMI_ROAM_BTM_CONFIG_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_bss_load_config_tlv() - send roam load bss trigger configuration + * @wmi_handle: wmi handle + * @parms: pointer to wmi_bss_load_config + * + * This function sends the roam load bss trigger configuration to fw. + * the bss_load_threshold parameter is used to configure the maximum + * bss load percentage, above which the firmware should trigger roaming + * + * Return: QDF status + */ +static QDF_STATUS +send_roam_bss_load_config_tlv(wmi_unified_t wmi_handle, + struct wmi_bss_load_config *params) +{ + wmi_roam_bss_load_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_roam_bss_load_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_bss_load_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_bss_load_config_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->bss_load_threshold = params->bss_load_threshold; + cmd->monitor_time_window = params->bss_load_sample_time; + cmd->rssi_2g_threshold = params->rssi_threshold_24ghz; + cmd->rssi_5g_threshold = params->rssi_threshold_5ghz; + + WMI_LOGD("%s: vdev:%d bss_load_thres:%d monitor_time:%d rssi_2g:%d rssi_5g:%d", + __func__, cmd->vdev_id, cmd->bss_load_threshold, + cmd->monitor_time_window, cmd->rssi_2g_threshold, + cmd->rssi_5g_threshold); + + wmi_mtrace(WMI_ROAM_BSS_LOAD_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_BSS_LOAD_CONFIG_CMDID)) { + WMI_LOGE("%s: failed to send WMI_ROAM_BSS_LOAD_CONFIG_CMDID ", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +/** + * send_disconnect_roam_params_tlv() - send disconnect roam trigger parameters + * @wmi_handle: wmi handle + * @disconnect_roam: pointer to wmi_disconnect_roam_params which carries the + * disconnect_roam_trigger parameters from CSR + * + * This function sends the disconnect roam trigger parameters to fw. + * + * Return: QDF status + */ +static QDF_STATUS +send_disconnect_roam_params_tlv(wmi_unified_t wmi_handle, + struct wmi_disconnect_roam_params *req) +{ + wmi_roam_deauth_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_roam_deauth_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_deauth_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_deauth_config_cmd_fixed_param)); + + cmd->vdev_id = req->vdev_id; + cmd->enable = req->enable; + WMI_LOGD("%s: Send WMI_ROAM_DEAUTH_CONFIG vdev_id:%d enable:%d", + __func__, cmd->vdev_id, cmd->enable); + + wmi_mtrace(WMI_ROAM_DEAUTH_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_DEAUTH_CONFIG_CMDID)) { + WMI_LOGE("%s: failed to send WMI_ROAM_DEAUTH_CONFIG_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_idle_roam_params_tlv() - send idle roam trigger parameters + * @wmi_handle: wmi handle + * @idle_roam_params: pointer to wmi_idle_roam_params which carries the + * idle roam parameters from CSR + * + * This function sends the idle roam trigger parameters to fw. + * + * Return: QDF status + */ +static QDF_STATUS +send_idle_roam_params_tlv(wmi_unified_t wmi_handle, + struct wmi_idle_roam_params *idle_roam_params) +{ + wmi_roam_idle_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_roam_idle_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_idle_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_idle_config_cmd_fixed_param)); + + cmd->vdev_id = idle_roam_params->vdev_id; + cmd->enable = idle_roam_params->enable; + cmd->band = idle_roam_params->band; + cmd->rssi_delta = idle_roam_params->conn_ap_rssi_delta; + cmd->min_rssi = idle_roam_params->conn_ap_min_rssi; + cmd->idle_time = idle_roam_params->inactive_time; + cmd->data_packet_count = idle_roam_params->data_pkt_count; + WMI_LOGD("%s: Send WMI_ROAM_IDLE_CONFIG_CMDID vdev_id:%d enable:%d", + __func__, cmd->vdev_id, cmd->enable); + WMI_LOGD("%s: band:%d rssi_delta:%d min_rssi:%d idle_time:%d data_pkt:%d", + __func__, cmd->band, cmd->rssi_delta, cmd->min_rssi, + cmd->idle_time, cmd->data_packet_count); + + wmi_mtrace(WMI_ROAM_IDLE_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_IDLE_CONFIG_CMDID)) { + WMI_LOGE("%s: failed to send WMI_ROAM_IDLE_CONFIG_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_preauth_status_tlv() - send roam pre-authentication status + * @wmi_handle: wmi handle + * @params: pre-auth status params + * + * This function sends the roam pre-authentication status for WPA3 SAE + * pre-auth to target. + * + * Return: QDF status + */ +static QDF_STATUS +send_roam_preauth_status_tlv(wmi_unified_t wmi_handle, + struct wmi_roam_auth_status_params *params) +{ + wmi_roam_preauth_status_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + PMKID_LEN; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_roam_preauth_status_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_preauth_status_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_preauth_status_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->preauth_status = params->preauth_status; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->bssid.bytes, + &cmd->candidate_ap_bssid); + + buf_ptr += sizeof(wmi_roam_preauth_status_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, PMKID_LEN); + buf_ptr += WMI_TLV_HDR_SIZE; + + qdf_mem_copy(buf_ptr, params->pmkid, PMKID_LEN); + WMI_LOGD("%s: vdev_id:%d status:%d bssid:"QDF_MAC_ADDR_FMT, __func__, cmd->vdev_id, + cmd->preauth_status, + QDF_MAC_ADDR_REF(params->bssid.bytes)); + + wmi_mtrace(WMI_ROAM_PREAUTH_STATUS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_PREAUTH_STATUS_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * convert_control_roam_trigger_reason_bitmap() - Convert roam trigger bitmap + * + * @trigger_reason_bitmap: Roam trigger reason bitmap received from upper layers + * + * Converts the controlled roam trigger reason bitmap of + * type @roam_control_trigger_reason to firmware trigger + * reason bitmap as defined in + * trigger_reason_bitmask @wmi_roam_enable_disable_trigger_reason_fixed_param + * + * Return: trigger_reason_bitmask as defined in + * wmi_roam_enable_disable_trigger_reason_fixed_param + */ +static uint32_t +convert_control_roam_trigger_reason_bitmap(uint32_t trigger_reason_bitmap) +{ + uint32_t fw_trigger_bitmap = 0, all_bitmap; + + /* Enable the complete trigger bitmap when all bits are set in + * the control config bitmap + */ + all_bitmap = BIT(ROAM_TRIGGER_REASON_MAX) - 1; + if (trigger_reason_bitmap == all_bitmap) + return BIT(WMI_ROAM_TRIGGER_EXT_REASON_MAX) - 1; + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_NONE)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_NONE); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_PER)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_PER); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_BMISS)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_BMISS); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_LOW_RSSI)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_LOW_RSSI); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_HIGH_RSSI)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_HIGH_RSSI); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_PERIODIC)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_PERIODIC); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_MAWC)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_MAWC); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_DENSE)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_DENSE); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_BACKGROUND)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_BACKGROUND); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_FORCED)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_FORCED); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_BTM)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_BTM); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_UNIT_TEST)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_UNIT_TEST); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_BSS_LOAD)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_BSS_LOAD); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_DEAUTH)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_DEAUTH); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_IDLE)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_IDLE); + + if (trigger_reason_bitmap & BIT(ROAM_TRIGGER_REASON_STA_KICKOUT)) + fw_trigger_bitmap |= BIT(WMI_ROAM_TRIGGER_REASON_STA_KICKOUT); + + return fw_trigger_bitmap; +} + +/** + * get_internal_mandatory_roam_triggers() - Internal triggers to be added + * + * Return: the bitmap of mandatory triggers to be sent to firmware but not given + * by user. + */ +static uint32_t +get_internal_mandatory_roam_triggers(void) +{ + return BIT(WMI_ROAM_TRIGGER_REASON_FORCED); +} + +/** + * send_set_roam_trigger_cmd_tlv() - send set roam triggers to fw + * + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @trigger_bitmap: roam trigger bitmap to be enabled + * + * Send WMI_ROAM_ENABLE_DISABLE_TRIGGER_REASON_CMDID to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_set_roam_trigger_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t trigger_bitmap) +{ + wmi_buf_t buf; + wmi_roam_enable_disable_trigger_reason_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + int ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_roam_enable_disable_trigger_reason_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_enable_disable_trigger_reason_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_enable_disable_trigger_reason_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->trigger_reason_bitmask = + convert_control_roam_trigger_reason_bitmap(trigger_bitmap); + WMI_LOGD("Received trigger bitmap: 0x%x converted trigger_bitmap: 0x%x", + trigger_bitmap, cmd->trigger_reason_bitmask); + cmd->trigger_reason_bitmask |= get_internal_mandatory_roam_triggers(); + WMI_LOGD("WMI_ROAM_ENABLE_DISABLE_TRIGGER_REASON_CMDID vdev id: %d final trigger_bitmap: 0x%x", + cmd->vdev_id, cmd->trigger_reason_bitmask); + wmi_mtrace(WMI_ROAM_ENABLE_DISABLE_TRIGGER_REASON_CMDID, vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_ENABLE_DISABLE_TRIGGER_REASON_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set roam triggers command ret = %d", + ret); + wmi_buf_free(buf); + } + return ret; +} +#else +static inline QDF_STATUS +send_disconnect_roam_params_tlv(wmi_unified_t wmi_handle, + struct wmi_disconnect_roam_params *req) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline QDF_STATUS +send_idle_roam_params_tlv(wmi_unified_t wmi_handle, + struct wmi_idle_roam_params *idle_roam_params) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline QDF_STATUS +send_roam_preauth_status_tlv(wmi_unified_t wmi_handle, + struct wmi_roam_auth_status_params *params) +{ + return QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS +send_set_roam_trigger_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t trigger_bitmap) +{ + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * send_offload_11k_cmd_tlv() - send wmi cmd with 11k offload params + * @wmi_handle: wmi handler + * @params: pointer to 11k offload params + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_offload_11k_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_11k_offload_params *params) +{ + wmi_11k_offload_report_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t *buf_ptr; + wmi_neighbor_report_11k_offload_tlv_param + *neighbor_report_offload_params; + wmi_neighbor_report_offload *neighbor_report_offload; + + uint32_t len = sizeof(*cmd); + + if (params->offload_11k_bitmask & + WMI_11K_OFFLOAD_BITMAP_NEIGHBOR_REPORT_REQ) + len += WMI_TLV_HDR_SIZE + + sizeof(wmi_neighbor_report_11k_offload_tlv_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_11k_offload_report_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_offload_11k_report_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_11k_offload_report_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->offload_11k = params->offload_11k_bitmask; + + if (params->offload_11k_bitmask & + WMI_11K_OFFLOAD_BITMAP_NEIGHBOR_REPORT_REQ) { + buf_ptr += sizeof(wmi_11k_offload_report_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_neighbor_report_11k_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + + neighbor_report_offload_params = + (wmi_neighbor_report_11k_offload_tlv_param *)buf_ptr; + WMITLV_SET_HDR(&neighbor_report_offload_params->tlv_header, + WMITLV_TAG_STRUC_wmi_neighbor_report_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_neighbor_report_11k_offload_tlv_param)); + + neighbor_report_offload = &neighbor_report_offload_params-> + neighbor_rep_ofld_params; + + neighbor_report_offload->time_offset = + params->neighbor_report_params.time_offset; + neighbor_report_offload->low_rssi_offset = + params->neighbor_report_params.low_rssi_offset; + neighbor_report_offload->bmiss_count_trigger = + params->neighbor_report_params.bmiss_count_trigger; + neighbor_report_offload->per_threshold_offset = + params->neighbor_report_params.per_threshold_offset; + neighbor_report_offload->neighbor_report_cache_timeout = + params->neighbor_report_params. + neighbor_report_cache_timeout; + neighbor_report_offload->max_neighbor_report_req_cap = + params->neighbor_report_params. + max_neighbor_report_req_cap; + neighbor_report_offload->ssid.ssid_len = + params->neighbor_report_params.ssid.length; + qdf_mem_copy(neighbor_report_offload->ssid.ssid, + ¶ms->neighbor_report_params.ssid.mac_ssid, + neighbor_report_offload->ssid.ssid_len); + } + + wmi_mtrace(WMI_11K_OFFLOAD_REPORT_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11K_OFFLOAD_REPORT_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: failed to send 11k offload command %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_invoke_neighbor_report_cmd_tlv() - send invoke 11k neighbor report + * command + * @wmi_handle: wmi handler + * @params: pointer to neighbor report invoke params + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_invoke_neighbor_report_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_invoke_neighbor_report_params *params) +{ + wmi_11k_offload_invoke_neighbor_report_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t *buf_ptr; + uint32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_11k_offload_invoke_neighbor_report_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_invoke_neighbor_report_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_11k_offload_invoke_neighbor_report_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->flags = params->send_resp_to_host; + + cmd->ssid.ssid_len = params->ssid.length; + qdf_mem_copy(cmd->ssid.ssid, + ¶ms->ssid.mac_ssid, + cmd->ssid.ssid_len); + + wmi_mtrace(WMI_11K_INVOKE_NEIGHBOR_REPORT_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11K_INVOKE_NEIGHBOR_REPORT_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: failed to send invoke neighbor report command %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +void wmi_roam_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_roam_scan_offload_rssi_thresh_cmd = + send_roam_scan_offload_rssi_thresh_cmd_tlv; + ops->send_roam_mawc_params_cmd = send_roam_mawc_params_cmd_tlv; + ops->send_roam_scan_filter_cmd = + send_roam_scan_filter_cmd_tlv; + ops->send_roam_scan_offload_mode_cmd = + send_roam_scan_offload_mode_cmd_tlv; + ops->send_roam_scan_offload_ap_profile_cmd = + send_roam_scan_offload_ap_profile_cmd_tlv; + ops->send_roam_scan_offload_cmd = send_roam_scan_offload_cmd_tlv; + ops->send_roam_scan_offload_scan_period_cmd = + send_roam_scan_offload_scan_period_cmd_tlv; + ops->send_roam_scan_offload_chan_list_cmd = + send_roam_scan_offload_chan_list_cmd_tlv; + ops->send_roam_scan_offload_rssi_change_cmd = + send_roam_scan_offload_rssi_change_cmd_tlv; + ops->send_per_roam_config_cmd = send_per_roam_config_cmd_tlv; + ops->send_limit_off_chan_cmd = send_limit_off_chan_cmd_tlv; + ops->send_btm_config = send_btm_config_cmd_tlv; + ops->send_offload_11k_cmd = send_offload_11k_cmd_tlv; + ops->send_invoke_neighbor_report_cmd = + send_invoke_neighbor_report_cmd_tlv; + ops->send_roam_bss_load_config = send_roam_bss_load_config_tlv; + ops->send_idle_roam_params = send_idle_roam_params_tlv; + ops->send_disconnect_roam_params = send_disconnect_roam_params_tlv; + ops->send_roam_preauth_status = send_roam_preauth_status_tlv; + ops->send_set_roam_trigger_cmd = send_set_roam_trigger_cmd_tlv, + + wmi_lfr_subnet_detection_attach_tlv(wmi_handle); + wmi_rssi_monitor_attach_tlv(wmi_handle); + wmi_ese_attach_tlv(wmi_handle); + wmi_roam_offload_attach_tlv(wmi_handle); + wmi_fils_sk_attach_tlv(wmi_handle); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_sta_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_sta_api.c new file mode 100644 index 0000000000000000000000000000000000000000..78ee7aca0487eeca8a4044461d0b7aaf11fcf199 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_sta_api.c @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to STA component. + */ +#include +#include "wmi_unified_priv.h" +#include "wmi_unified_sta_api.h" + +QDF_STATUS wmi_unified_set_sta_sa_query_param_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t max_retries, + uint32_t retry_interval) +{ + if (wmi_handle->ops->send_set_sta_sa_query_param_cmd) + return wmi_handle->ops->send_set_sta_sa_query_param_cmd(wmi_handle, + vdev_id, max_retries, + retry_interval); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_sta_keep_alive_cmd(wmi_unified_t wmi_handle, + struct sta_keep_alive_params *params) +{ + if (wmi_handle->ops->send_set_sta_keep_alive_cmd) + return wmi_handle->ops->send_set_sta_keep_alive_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_set_gtx_cfg_cmd(wmi_unified_t wmi_handle, uint32_t if_id, + struct wmi_gtx_config *gtx_info) +{ + if (wmi_handle->ops->send_vdev_set_gtx_cfg_cmd) + return wmi_handle->ops->send_vdev_set_gtx_cfg_cmd(wmi_handle, + if_id, + gtx_info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_dhcp_ind( + wmi_unified_t wmi_handle, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind) +{ + if (wmi_handle->ops->send_process_dhcp_ind_cmd) + return wmi_handle->ops->send_process_dhcp_ind_cmd(wmi_handle, + ta_dhcp_ind); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_link_speed_cmd(wmi_unified_t wmi_handle, + wmi_mac_addr peer_macaddr) +{ + if (wmi_handle->ops->send_get_link_speed_cmd) + return wmi_handle->ops->send_get_link_speed_cmd(wmi_handle, + peer_macaddr); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_fw_profiling_data_cmd(wmi_unified_t wmi_handle, + uint32_t cmd, + uint32_t value1, + uint32_t value2) +{ + if (wmi_handle->ops->send_fw_profiling_cmd) + return wmi_handle->ops->send_fw_profiling_cmd(wmi_handle, + cmd, + value1, + value2); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_nat_keepalive_en_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_nat_keepalive_en_cmd) + return wmi_handle->ops->send_nat_keepalive_en_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_wlm_latency_level_cmd(wmi_unified_t wmi_handle, + struct wlm_latency_level_param *param) +{ + if (wmi_handle->ops->send_wlm_latency_level_cmd) + return wmi_handle->ops->send_wlm_latency_level_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_process_set_ie_info_cmd(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info) +{ + if (wmi_handle->ops->send_process_set_ie_info_cmd) + return wmi_handle->ops->send_process_set_ie_info_cmd(wmi_handle, + ie_info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_base_macaddr_indicate_cmd(wmi_unified_t wmi_handle, + uint8_t *custom_addr) +{ + if (wmi_handle->ops->send_set_base_macaddr_indicate_cmd) + return wmi_handle->ops->send_set_base_macaddr_indicate_cmd( + wmi_handle, custom_addr); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_TDLS +QDF_STATUS wmi_unified_set_tdls_offchan_mode_cmd( + wmi_unified_t wmi_handle, + struct tdls_channel_switch_params *chan_switch_params) +{ + if (wmi_handle->ops->send_set_tdls_offchan_mode_cmd) + return wmi_handle->ops->send_set_tdls_offchan_mode_cmd( + wmi_handle, chan_switch_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_update_fw_tdls_state_cmd(wmi_unified_t wmi_handle, + struct tdls_info *tdls_param, + enum wmi_tdls_state tdls_state) +{ + if (wmi_handle->ops->send_update_fw_tdls_state_cmd) + return wmi_handle->ops->send_update_fw_tdls_state_cmd( + wmi_handle, tdls_param, tdls_state); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_update_tdls_peer_state_cmd(wmi_unified_t wmi_handle, + struct tdls_peer_update_state *peer_state, + uint32_t *ch_mhz) +{ + if (wmi_handle->ops->send_update_tdls_peer_state_cmd) + return wmi_handle->ops->send_update_tdls_peer_state_cmd(wmi_handle, + peer_state, ch_mhz); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_vdev_tdls_ev_param(wmi_unified_t wmi_handle, + void *evt_buf, + struct tdls_event_info *param) +{ + if (wmi_handle->ops->extract_vdev_tdls_ev_param) + return wmi_handle->ops->extract_vdev_tdls_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_TDLS */ + +#ifdef FEATURE_BLACKLIST_MGR +QDF_STATUS +wmi_unified_send_reject_ap_list(struct wmi_unified *wmi_handle, + struct reject_ap_params *reject_params) +{ + if (wmi_handle->ops->send_reject_ap_list_cmd) + return wmi_handle->ops->send_reject_ap_list_cmd(wmi_handle, + reject_params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_send_sar_limit_cmd(wmi_unified_t wmi_handle, + struct sar_limit_cmd_params *params) +{ + if (wmi_handle->ops->send_sar_limit_cmd) + return wmi_handle->ops->send_sar_limit_cmd( + wmi_handle, + params); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_sar_limit_cmd(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->get_sar_limit_cmd) + return wmi_handle->ops->get_sar_limit_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_sar_limit_event(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct sar_limit_event *event) +{ + if (wmi_handle->ops->extract_sar_limit_event) + return wmi_handle->ops->extract_sar_limit_event(wmi_handle, + evt_buf, + event); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_sar2_result_event(void *handle, + uint8_t *event, uint32_t len) +{ + wmi_unified_t wmi_handle = handle; + + if (wmi_handle->ops->extract_sar2_result_event) + return wmi_handle->ops->extract_sar2_result_event(wmi_handle, + event, + len); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_del_pmkid_cache(wmi_unified_t wmi_handle, + struct wmi_unified_pmk_cache *req_buf) +{ + if (wmi_handle->ops->send_set_del_pmkid_cache_cmd) + return wmi_handle->ops->send_set_del_pmkid_cache_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_del_ts_cmd(wmi_unified_t wmi_handle, uint8_t vdev_id, + uint8_t ac) +{ + if (wmi_handle->ops->send_del_ts_cmd) + return wmi_handle->ops->send_del_ts_cmd(wmi_handle, + vdev_id, ac); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_aggr_qos_cmd( + wmi_unified_t wmi_handle, + struct aggr_add_ts_param *aggr_qos_rsp_msg) +{ + if (wmi_handle->ops->send_aggr_qos_cmd) + return wmi_handle->ops->send_aggr_qos_cmd(wmi_handle, + aggr_qos_rsp_msg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_add_ts_cmd(wmi_unified_t wmi_handle, + struct add_ts_param *msg) +{ + if (wmi_handle->ops->send_add_ts_cmd) + return wmi_handle->ops->send_add_ts_cmd(wmi_handle, + msg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_add_periodic_tx_ptrn_cmd( + wmi_unified_t wmi_handle, + struct periodic_tx_pattern *pattern, + uint8_t vdev_id) +{ + if (wmi_handle->ops->send_process_add_periodic_tx_ptrn_cmd) + return wmi_handle->ops->send_process_add_periodic_tx_ptrn_cmd( + wmi_handle, pattern, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_process_del_periodic_tx_ptrn_cmd(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t pattern_id) +{ + if (wmi_handle->ops->send_process_del_periodic_tx_ptrn_cmd) + return wmi_handle->ops->send_process_del_periodic_tx_ptrn_cmd( + wmi_handle, + vdev_id, + pattern_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_auto_shutdown_timer_cmd(wmi_unified_t wmi_handle, + uint32_t timer_val) +{ + if (wmi_handle->ops->send_set_auto_shutdown_timer_cmd) + return wmi_handle->ops->send_set_auto_shutdown_timer_cmd( + wmi_handle, + timer_val); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_set_led_flashing_cmd(wmi_unified_t wmi_handle, + struct flashing_req_params *flashing) +{ + if (wmi_handle->ops->send_set_led_flashing_cmd) + return wmi_handle->ops->send_set_led_flashing_cmd(wmi_handle, + flashing); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_process_ch_avoid_update_cmd(wmi_unified_t wmi_handle) +{ + if (wmi_handle->ops->send_process_ch_avoid_update_cmd) + return wmi_handle->ops->send_process_ch_avoid_update_cmd( + wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_pdev_set_pcl_cmd(wmi_unified_t wmi_handle, + struct wmi_pcl_chan_weights *msg) +{ + if (wmi_handle->ops->send_pdev_set_pcl_cmd) + return wmi_handle->ops->send_pdev_set_pcl_cmd(wmi_handle, msg); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_POLICY_MGR_ENABLE +QDF_STATUS wmi_unified_pdev_set_dual_mac_config_cmd( + wmi_unified_t wmi_handle, + struct policy_mgr_dual_mac_config *msg) +{ + if (wmi_handle->ops->send_pdev_set_dual_mac_config_cmd) + return wmi_handle->ops->send_pdev_set_dual_mac_config_cmd( + wmi_handle, + msg); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_POLICY_MGR_ENABLE */ + +QDF_STATUS wmi_unified_send_adapt_dwelltime_params_cmd( + wmi_unified_t wmi_handle, + struct wmi_adaptive_dwelltime_params *dwelltime_params) +{ + if (wmi_handle->ops->send_adapt_dwelltime_params_cmd) + return wmi_handle->ops-> + send_adapt_dwelltime_params_cmd(wmi_handle, + dwelltime_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_dbs_scan_sel_params_cmd( + wmi_unified_t wmi_handle, + struct wmi_dbs_scan_sel_params *dbs_scan_params) +{ + if (wmi_handle->ops->send_dbs_scan_sel_params_cmd) + return wmi_handle->ops-> + send_dbs_scan_sel_params_cmd(wmi_handle, + dbs_scan_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_arp_stats_req(wmi_unified_t wmi_handle, + struct set_arp_stats *req_buf) +{ + if (wmi_handle->ops->send_set_arp_stats_req_cmd) + return wmi_handle->ops->send_set_arp_stats_req_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_arp_stats_req(wmi_unified_t wmi_handle, + struct get_arp_stats *req_buf) +{ + if (wmi_handle->ops->send_get_arp_stats_req_cmd) + return wmi_handle->ops->send_get_arp_stats_req_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_peer_unmap_conf_send(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list) +{ + if (wmi_handle->ops->send_peer_unmap_conf_cmd) + return wmi_handle->ops->send_peer_unmap_conf_cmd(wmi_handle, + vdev_id, peer_id_cnt, peer_id_list); + + return QDF_STATUS_E_FAILURE; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_sta_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_sta_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..0d99a0ae18521b36ccdc96c44b82a1512ee8260f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_sta_tlv.c @@ -0,0 +1,2459 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_version.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_sta_param.h" +#include "wmi_unified_sta_api.h" +#ifdef FEATURE_WLAN_TDLS +#include +#endif + +/** + * send_set_sta_sa_query_param_cmd_tlv() - set sta sa query parameters + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @max_retries: max retries + * @retry_interval: retry interval + * This function sets sta query related parameters in fw. + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_set_sta_sa_query_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint32_t max_retries, + uint32_t retry_interval) +{ + wmi_buf_t buf; + WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param *cmd; + int len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->sa_query_max_retry_count = max_retries; + cmd->sa_query_retry_interval = retry_interval; + + WMI_LOGD(FL("STA sa query: vdev_id:%d interval:%u retry count:%d"), + vdev_id, retry_interval, max_retries); + + wmi_mtrace(WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID)) { + WMI_LOGE(FL("Failed to offload STA SA Query")); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD(FL("Exit :")); + return 0; +} + +/** + * send_set_sta_keep_alive_cmd_tlv() - set sta keep alive parameters + * @wmi_handle: wmi handle + * @params: sta keep alive parameter + * + * This function sets keep alive related parameters in fw. + * + * Return: CDF status + */ +static QDF_STATUS +send_set_sta_keep_alive_cmd_tlv(wmi_unified_t wmi_handle, + struct sta_keep_alive_params *params) +{ + wmi_buf_t buf; + WMI_STA_KEEPALIVE_CMD_fixed_param *cmd; + WMI_STA_KEEPALVE_ARP_RESPONSE *arp_rsp; + uint8_t *buf_ptr; + int len; + QDF_STATUS ret; + + WMI_LOGD("%s: Enter", __func__); + + len = sizeof(*cmd) + sizeof(*arp_rsp); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_STA_KEEPALIVE_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_STA_KEEPALIVE_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_STA_KEEPALIVE_CMD_fixed_param)); + cmd->interval = params->timeperiod; + cmd->enable = (params->timeperiod) ? 1 : 0; + cmd->vdev_id = params->vdev_id; + WMI_LOGD("Keep Alive: vdev_id:%d interval:%u method:%d", + params->vdev_id, params->timeperiod, params->method); + arp_rsp = (WMI_STA_KEEPALVE_ARP_RESPONSE *) (buf_ptr + sizeof(*cmd)); + WMITLV_SET_HDR(&arp_rsp->tlv_header, + WMITLV_TAG_STRUC_WMI_STA_KEEPALVE_ARP_RESPONSE, + WMITLV_GET_STRUCT_TLVLEN(WMI_STA_KEEPALVE_ARP_RESPONSE)); + + if ((params->method == WMI_KEEP_ALIVE_UNSOLICIT_ARP_RSP) || + (params->method == + WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST)) { + cmd->method = params->method; + qdf_mem_copy(&arp_rsp->sender_prot_addr, params->hostv4addr, + QDF_IPV4_ADDR_SIZE); + qdf_mem_copy(&arp_rsp->target_prot_addr, params->destv4addr, + QDF_IPV4_ADDR_SIZE); + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->destmac, + &arp_rsp->dest_mac_addr); + } else { + cmd->method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; + } + + wmi_mtrace(WMI_STA_KEEPALIVE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_KEEPALIVE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to set KeepAlive"); + wmi_buf_free(buf); + } + + WMI_LOGD("%s: Exit", __func__); + return ret; +} + +/** + * send_vdev_set_gtx_cfg_cmd_tlv() - set GTX params + * @wmi_handle: wmi handle + * @if_id: vdev id + * @gtx_info: GTX config params + * + * This function set GTX related params in firmware. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_set_gtx_cfg_cmd_tlv(wmi_unified_t wmi_handle, uint32_t if_id, + struct wmi_gtx_config *gtx_info) +{ + wmi_vdev_set_gtx_params_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int len = sizeof(wmi_vdev_set_gtx_params_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_set_gtx_params_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_gtx_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_gtx_params_cmd_fixed_param)); + cmd->vdev_id = if_id; + + cmd->gtxRTMask[0] = gtx_info->gtx_rt_mask[0]; + cmd->gtxRTMask[1] = gtx_info->gtx_rt_mask[1]; + cmd->userGtxMask = gtx_info->gtx_usrcfg; + cmd->gtxPERThreshold = gtx_info->gtx_threshold; + cmd->gtxPERMargin = gtx_info->gtx_margin; + cmd->gtxTPCstep = gtx_info->gtx_tpcstep; + cmd->gtxTPCMin = gtx_info->gtx_tpcmin; + cmd->gtxBWMask = gtx_info->gtx_bwmask; + + WMI_LOGD("Setting vdev%d GTX values:htmcs 0x%x, vhtmcs 0x%x, usermask 0x%x, \ + gtxPERThreshold %d, gtxPERMargin %d, gtxTPCstep %d, gtxTPCMin %d, \ + gtxBWMask 0x%x.", if_id, cmd->gtxRTMask[0], cmd->gtxRTMask[1], + cmd->userGtxMask, cmd->gtxPERThreshold, cmd->gtxPERMargin, + cmd->gtxTPCstep, cmd->gtxTPCMin, cmd->gtxBWMask); + + wmi_mtrace(WMI_VDEV_SET_GTX_PARAMS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_GTX_PARAMS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to set GTX PARAMS"); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_process_dhcp_ind_cmd_tlv() - process dhcp indication from SME + * @wmi_handle: wmi handle + * @ta_dhcp_ind: DHCP indication parameter + * + * Return: CDF Status + */ +static QDF_STATUS send_process_dhcp_ind_cmd_tlv(wmi_unified_t wmi_handle, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_peer_set_param_cmd_fixed_param *peer_set_param_fp; + int len = sizeof(wmi_peer_set_param_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + peer_set_param_fp = (wmi_peer_set_param_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&peer_set_param_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_set_param_cmd_fixed_param)); + + /* fill in values */ + peer_set_param_fp->vdev_id = ta_dhcp_ind->vdev_id; + peer_set_param_fp->param_id = ta_dhcp_ind->param_id; + peer_set_param_fp->param_value = ta_dhcp_ind->param_value; + qdf_mem_copy(&peer_set_param_fp->peer_macaddr, + &ta_dhcp_ind->peer_macaddr, + sizeof(ta_dhcp_ind->peer_macaddr)); + + wmi_mtrace(WMI_PEER_SET_PARAM_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_PEER_SET_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: wmi_unified_cmd_send WMI_PEER_SET_PARAM_CMD" + " returned Error %d", __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_get_link_speed_cmd_tlv() -send command to get linkspeed + * @wmi_handle: wmi handle + * @pLinkSpeed: link speed info + * + * Return: CDF status + */ +static QDF_STATUS send_get_link_speed_cmd_tlv(wmi_unified_t wmi_handle, + wmi_mac_addr peer_macaddr) +{ + wmi_peer_get_estimated_linkspeed_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(wmi_peer_get_estimated_linkspeed_cmd_fixed_param); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_peer_get_estimated_linkspeed_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_get_estimated_linkspeed_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_get_estimated_linkspeed_cmd_fixed_param)); + + /* Copy the peer macaddress to the wma buffer */ + qdf_mem_copy(&cmd->peer_macaddr, + &peer_macaddr, + sizeof(peer_macaddr)); + + wmi_mtrace(WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID)) { + WMI_LOGE("%s: failed to send link speed command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_fw_profiling_cmd_tlv() - send FW profiling cmd to WLAN FW + * @wmi_handl: wmi handle + * @cmd: Profiling command index + * @value1: parameter1 value + * @value2: parameter2 value + * + * Return: QDF_STATUS_SUCCESS for success else error code + */ +static QDF_STATUS send_fw_profiling_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t cmd, uint32_t value1, uint32_t value2) +{ + wmi_buf_t buf; + int32_t len = 0; + int ret; + wmi_wlan_profile_trigger_cmd_fixed_param *prof_trig_cmd; + wmi_wlan_profile_set_hist_intvl_cmd_fixed_param *hist_intvl_cmd; + wmi_wlan_profile_enable_profile_id_cmd_fixed_param *profile_enable_cmd; + wmi_wlan_profile_get_prof_data_cmd_fixed_param *profile_getdata_cmd; + + switch (cmd) { + case WMI_WLAN_PROFILE_TRIGGER_CMDID: + len = sizeof(wmi_wlan_profile_trigger_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + prof_trig_cmd = + (wmi_wlan_profile_trigger_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&prof_trig_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_trigger_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_trigger_cmd_fixed_param)); + prof_trig_cmd->enable = value1; + wmi_mtrace(WMI_WLAN_PROFILE_TRIGGER_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_TRIGGER_CMDID); + if (ret) { + WMI_LOGE("PROFILE_TRIGGER cmd Failed with value %d", + value1); + wmi_buf_free(buf); + return ret; + } + break; + + case WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID: + len = sizeof(wmi_wlan_profile_get_prof_data_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + profile_getdata_cmd = + (wmi_wlan_profile_get_prof_data_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&profile_getdata_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_get_prof_data_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_get_prof_data_cmd_fixed_param)); + wmi_mtrace(WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID); + if (ret) { + WMI_LOGE("PROFILE_DATA cmd Failed for id %d value %d", + value1, value2); + wmi_buf_free(buf); + return ret; + } + break; + + case WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID: + len = sizeof(wmi_wlan_profile_set_hist_intvl_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + hist_intvl_cmd = + (wmi_wlan_profile_set_hist_intvl_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&hist_intvl_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_set_hist_intvl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_set_hist_intvl_cmd_fixed_param)); + hist_intvl_cmd->profile_id = value1; + hist_intvl_cmd->value = value2; + wmi_mtrace(WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID); + if (ret) { + WMI_LOGE("HIST_INTVL cmd Failed for id %d value %d", + value1, value2); + wmi_buf_free(buf); + return ret; + } + break; + + case WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID: + len = + sizeof(wmi_wlan_profile_enable_profile_id_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + profile_enable_cmd = + (wmi_wlan_profile_enable_profile_id_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&profile_enable_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_enable_profile_id_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_enable_profile_id_cmd_fixed_param)); + profile_enable_cmd->profile_id = value1; + profile_enable_cmd->enable = value2; + wmi_mtrace(WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID); + if (ret) { + WMI_LOGE("enable cmd Failed for id %d value %d", + value1, value2); + wmi_buf_free(buf); + return ret; + } + break; + + default: + WMI_LOGD("%s: invalid profiling command", __func__); + break; + } + + return 0; +} + +/** + * send_nat_keepalive_en_cmd_tlv() - enable NAT keepalive filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_nat_keepalive_en_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->action = IPSEC_NATKEEPALIVE_FILTER_ENABLE; + wmi_mtrace(WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID)) { + WMI_LOGP("%s: Failed to send NAT keepalive enable command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +static QDF_STATUS send_wlm_latency_level_cmd_tlv(wmi_unified_t wmi_handle, + struct wlm_latency_level_param *params) +{ + wmi_wlm_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len = sizeof(*cmd); + static uint32_t ll[4] = {100, 60, 40, 20}; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_wlm_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlm_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlm_config_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->latency_level = params->wlm_latency_level; + cmd->ul_latency = ll[params->wlm_latency_level]; + cmd->dl_latency = ll[params->wlm_latency_level]; + cmd->flags = params->wlm_latency_flags; + wmi_mtrace(WMI_WLM_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLM_CONFIG_CMDID)) { + WMI_LOGE("%s: Failed to send setting latency config command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +#ifdef FEATURE_WLAN_TDLS +/** + * tdls_get_wmi_offchannel_mode - Get WMI tdls off channel mode + * @tdls_sw_mode: tdls_sw_mode + * + * This function returns wmi tdls offchannel mode + * + * Return: enum value of wmi tdls offchannel mode + */ +static uint8_t tdls_get_wmi_offchannel_mode(uint8_t tdls_sw_mode) +{ + uint8_t off_chan_mode; + + switch (tdls_sw_mode) { + case ENABLE_CHANSWITCH: + off_chan_mode = WMI_TDLS_ENABLE_OFFCHANNEL; + break; + + case DISABLE_CHANSWITCH: + off_chan_mode = WMI_TDLS_DISABLE_OFFCHANNEL; + break; + + default: + WMI_LOGD(FL("unknown tdls_sw_mode %d"), tdls_sw_mode); + off_chan_mode = WMI_TDLS_DISABLE_OFFCHANNEL; + } + return off_chan_mode; +} + +/** + * tdls_get_wmi_offchannel_bw - Get WMI tdls off channel Bandwidth + * @tdls_sw_mode: tdls_sw_mode + * + * This function returns wmi tdls offchannel bandwidth + * + * Return: TDLS offchannel bandwidth + */ +static uint8_t tdls_get_wmi_offchannel_bw(uint16_t tdls_off_ch_bw_offset) +{ + uint8_t off_chan_bw; + + switch (tdls_off_ch_bw_offset) { + case BW20: + off_chan_bw = WMI_TDLS_OFFCHAN_20MHZ; + break; + case BW40_LOW_PRIMARY: + case BW40_HIGH_PRIMARY: + off_chan_bw = WMI_TDLS_OFFCHAN_40MHZ; + break; + case BW80: + off_chan_bw = WMI_TDLS_OFFCHAN_80MHZ; + break; + case BWALL: + off_chan_bw = WMI_TDLS_OFFCHAN_160MHZ; + break; + default: + WMI_LOGD(FL("unknown tdls offchannel bw offset %d"), + tdls_off_ch_bw_offset); + off_chan_bw = WMI_TDLS_OFFCHAN_20MHZ; + } + return off_chan_bw; +} + +/** + * send_set_tdls_offchan_mode_cmd_tlv() - set tdls off channel mode + * @wmi_handle: wmi handle + * @chan_switch_params: Pointer to tdls channel switch parameter structure + * + * This function sets tdls off channel mode + * + * Return: 0 on success; Negative errno otherwise + */ +static QDF_STATUS send_set_tdls_offchan_mode_cmd_tlv(wmi_unified_t wmi_handle, + struct tdls_channel_switch_params *chan_switch_params) +{ + wmi_tdls_set_offchan_mode_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + u_int16_t len = sizeof(wmi_tdls_set_offchan_mode_cmd_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_tdls_set_offchan_mode_cmd_fixed_param *) + wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_set_offchan_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_tdls_set_offchan_mode_cmd_fixed_param)); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(chan_switch_params->peer_mac_addr, + &cmd->peer_macaddr); + cmd->vdev_id = chan_switch_params->vdev_id; + cmd->offchan_mode = + tdls_get_wmi_offchannel_mode(chan_switch_params->tdls_sw_mode); + cmd->is_peer_responder = chan_switch_params->is_responder; + cmd->offchan_freq = chan_switch_params->tdls_off_chan_freq; + cmd->offchan_num = chan_switch_params->tdls_off_ch; + cmd->offchan_bw_bitmap = + tdls_get_wmi_offchannel_bw( + chan_switch_params->tdls_off_ch_bw_offset); + cmd->offchan_oper_class = chan_switch_params->oper_class; + + WMI_LOGD(FL("Peer MAC Addr mac_addr31to0: 0x%x, mac_addr47to32: 0x%x"), + cmd->peer_macaddr.mac_addr31to0, + cmd->peer_macaddr.mac_addr47to32); + + WMI_LOGD(FL( + "vdev_id: %d, off channel mode: %d, off channel Num: %d, " + "off channel frequency: %u off channel offset: 0x%x, " + " is_peer_responder: %d, operating class: %d"), + cmd->vdev_id, + cmd->offchan_mode, + cmd->offchan_num, + cmd->offchan_freq, + cmd->offchan_bw_bitmap, + cmd->is_peer_responder, + cmd->offchan_oper_class); + + wmi_mtrace(WMI_TDLS_SET_OFFCHAN_MODE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_TDLS_SET_OFFCHAN_MODE_CMDID)) { + WMI_LOGP(FL("failed to send tdls off chan command")); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_update_fw_tdls_state_cmd_tlv() - send enable/disable tdls for a vdev + * @wmi_handle: wmi handle + * @pwmaTdlsparams: TDLS params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_update_fw_tdls_state_cmd_tlv(wmi_unified_t wmi_handle, + struct tdls_info *tdls_param, + enum wmi_tdls_state tdls_state) +{ + wmi_tdls_set_state_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + + uint16_t len = sizeof(wmi_tdls_set_state_cmd_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_tdls_set_state_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_set_state_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_tdls_set_state_cmd_fixed_param)); + cmd->vdev_id = tdls_param->vdev_id; + cmd->state = (A_UINT32)tdls_state; + cmd->notification_interval_ms = tdls_param->notification_interval_ms; + cmd->tx_discovery_threshold = tdls_param->tx_discovery_threshold; + cmd->tx_teardown_threshold = tdls_param->tx_teardown_threshold; + cmd->rssi_teardown_threshold = tdls_param->rssi_teardown_threshold; + cmd->rssi_delta = tdls_param->rssi_delta; + cmd->tdls_options = tdls_param->tdls_options; + cmd->tdls_peer_traffic_ind_window = tdls_param->peer_traffic_ind_window; + cmd->tdls_peer_traffic_response_timeout_ms = + tdls_param->peer_traffic_response_timeout; + cmd->tdls_puapsd_mask = tdls_param->puapsd_mask; + cmd->tdls_puapsd_inactivity_time_ms = + tdls_param->puapsd_inactivity_time; + cmd->tdls_puapsd_rx_frame_threshold = + tdls_param->puapsd_rx_frame_threshold; + cmd->teardown_notification_ms = + tdls_param->teardown_notification_ms; + cmd->tdls_peer_kickout_threshold = + tdls_param->tdls_peer_kickout_threshold; + cmd->tdls_discovery_wake_timeout = + tdls_param->tdls_discovery_wake_timeout; + + WMI_LOGD("%s: vdev %d tdls_state: %d, state: %d, " + "notification_interval_ms: %d, " + "tx_discovery_threshold: %d, " + "tx_teardown_threshold: %d, " + "rssi_teardown_threshold: %d, " + "rssi_delta: %d, " + "tdls_options: 0x%x, " + "tdls_peer_traffic_ind_window: %d, " + "tdls_peer_traffic_response_timeout: %d, " + "tdls_puapsd_mask: 0x%x, " + "tdls_puapsd_inactivity_time: %d, " + "tdls_puapsd_rx_frame_threshold: %d, " + "teardown_notification_ms: %d, " + "tdls_peer_kickout_threshold: %d, " + "tdls_discovery_wake_timeout: %d", + __func__,tdls_param->vdev_id, tdls_state, cmd->state, + cmd->notification_interval_ms, + cmd->tx_discovery_threshold, + cmd->tx_teardown_threshold, + cmd->rssi_teardown_threshold, + cmd->rssi_delta, + cmd->tdls_options, + cmd->tdls_peer_traffic_ind_window, + cmd->tdls_peer_traffic_response_timeout_ms, + cmd->tdls_puapsd_mask, + cmd->tdls_puapsd_inactivity_time_ms, + cmd->tdls_puapsd_rx_frame_threshold, + cmd->teardown_notification_ms, + cmd->tdls_peer_kickout_threshold, + cmd->tdls_discovery_wake_timeout); + + wmi_mtrace(WMI_TDLS_SET_STATE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_TDLS_SET_STATE_CMDID)) { + WMI_LOGP("%s: failed to send tdls set state command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_update_tdls_peer_state_cmd_tlv() - update TDLS peer state + * @wmi_handle: wmi handle + * @peer_state: TDLS peer state params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_update_tdls_peer_state_cmd_tlv(wmi_unified_t wmi_handle, + struct tdls_peer_update_state *peer_state, + uint32_t *ch_mhz) +{ + struct tdls_peer_params *in_peer_cap; + struct tdls_ch_params *in_chan_info; + wmi_tdls_peer_update_cmd_fixed_param *cmd; + wmi_tdls_peer_capabilities *peer_cap; + wmi_channel *chan_info; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint32_t i; + int32_t len = sizeof(wmi_tdls_peer_update_cmd_fixed_param) + + sizeof(wmi_tdls_peer_capabilities); + + in_peer_cap = &peer_state->peer_cap; + len += WMI_TLV_HDR_SIZE + + sizeof(wmi_channel) * in_peer_cap->peer_chanlen; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_tdls_peer_update_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_peer_update_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_tdls_peer_update_cmd_fixed_param)); + + cmd->vdev_id = peer_state->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_state->peer_macaddr, + &cmd->peer_macaddr); + + cmd->peer_state = peer_state->peer_state; + + WMI_LOGD("%s: vdev_id: %d, peermac: "QDF_MAC_ADDR_FMT", " + "peer_macaddr.mac_addr31to0: 0x%x, " + "peer_macaddr.mac_addr47to32: 0x%x, peer_state: %d", + __func__, cmd->vdev_id, + QDF_MAC_ADDR_REF(peer_state->peer_macaddr), + cmd->peer_macaddr.mac_addr31to0, + cmd->peer_macaddr.mac_addr47to32, cmd->peer_state); + + buf_ptr += sizeof(wmi_tdls_peer_update_cmd_fixed_param); + peer_cap = (wmi_tdls_peer_capabilities *) buf_ptr; + WMITLV_SET_HDR(&peer_cap->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_peer_capabilities, + WMITLV_GET_STRUCT_TLVLEN(wmi_tdls_peer_capabilities)); + + if ((in_peer_cap->peer_uapsd_queue & 0x08) >> 3) + WMI_SET_TDLS_PEER_VO_UAPSD(peer_cap); + if ((in_peer_cap->peer_uapsd_queue & 0x04) >> 2) + WMI_SET_TDLS_PEER_VI_UAPSD(peer_cap); + if ((in_peer_cap->peer_uapsd_queue & 0x02) >> 1) + WMI_SET_TDLS_PEER_BK_UAPSD(peer_cap); + if (in_peer_cap->peer_uapsd_queue & 0x01) + WMI_SET_TDLS_PEER_BE_UAPSD(peer_cap); + + /* Ack and More Data Ack are sent as 0, so no need to set + * but fill SP + */ + WMI_SET_TDLS_PEER_SP_UAPSD(peer_cap, in_peer_cap->peer_max_sp); + + peer_cap->buff_sta_support = in_peer_cap->peer_buff_sta_support; + peer_cap->off_chan_support = in_peer_cap->peer_off_chan_support; + peer_cap->peer_curr_operclass = in_peer_cap->peer_curr_operclass; + /* self curr operclass is not being used and so pass op class for + * preferred off chan in it. + */ + peer_cap->self_curr_operclass = in_peer_cap->opclass_for_prefoffchan; + peer_cap->peer_chan_len = in_peer_cap->peer_chanlen; + peer_cap->peer_operclass_len = in_peer_cap->peer_oper_classlen; + + WMI_LOGD("peer_operclass_len: %d", peer_cap->peer_operclass_len); + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) { + peer_cap->peer_operclass[i] = in_peer_cap->peer_oper_class[i]; + } + qdf_trace_hex_dump(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + (uint8_t *)peer_cap->peer_operclass, + WMI_TDLS_MAX_SUPP_OPER_CLASSES); + + peer_cap->is_peer_responder = in_peer_cap->is_peer_responder; + peer_cap->pref_offchan_freq = in_peer_cap->pref_offchan_freq; + peer_cap->pref_offchan_num = in_peer_cap->pref_off_channum; + peer_cap->pref_offchan_bw = in_peer_cap->pref_off_chan_bandwidth; + + WMI_LOGD + ("%s: peer_qos: 0x%x, buff_sta_support: %d, off_chan_support: %d, " + "peer_curr_operclass: %d, self_curr_operclass: %d, peer_chan_len: " + "%d, peer_operclass_len: %d, is_peer_responder: %d, pref_offchan_num:" + " %d, pref_offchan_bw: %d, pref_offchan_freq: %u", + __func__, peer_cap->peer_qos, peer_cap->buff_sta_support, + peer_cap->off_chan_support, peer_cap->peer_curr_operclass, + peer_cap->self_curr_operclass, peer_cap->peer_chan_len, + peer_cap->peer_operclass_len, peer_cap->is_peer_responder, + peer_cap->pref_offchan_num, peer_cap->pref_offchan_bw, + peer_cap->pref_offchan_freq); + + /* next fill variable size array of peer chan info */ + buf_ptr += sizeof(wmi_tdls_peer_capabilities); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_channel) * + in_peer_cap->peer_chanlen); + + chan_info = (wmi_channel *) (buf_ptr + WMI_TLV_HDR_SIZE); + in_chan_info = in_peer_cap->peer_chan; + + for (i = 0; i < in_peer_cap->peer_chanlen; ++i) { + WMITLV_SET_HDR(&chan_info->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan_info->mhz = ch_mhz[i]; + chan_info->band_center_freq1 = chan_info->mhz; + chan_info->band_center_freq2 = 0; + + WMI_LOGD("%s: chan[%d] = %u", __func__, i, chan_info->mhz); + + if (in_chan_info->dfs_set) { + WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_PASSIVE); + wmi_debug("chan[%d] DFS[%d]", + in_chan_info->chan_id, + in_chan_info->dfs_set); + } + + if (chan_info->mhz < WMI_2_4_GHZ_MAX_FREQ) + WMI_SET_CHANNEL_MODE(chan_info, MODE_11G); + else + WMI_SET_CHANNEL_MODE(chan_info, MODE_11A); + + WMI_SET_CHANNEL_MAX_TX_POWER(chan_info, in_chan_info->pwr); + WMI_SET_CHANNEL_REG_POWER(chan_info, in_chan_info->pwr); + WMI_LOGD("Channel TX power[%d] = %u: %d", i, chan_info->mhz, + in_chan_info->pwr); + + chan_info++; + in_chan_info++; + } + + wmi_mtrace(WMI_TDLS_PEER_UPDATE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_TDLS_PEER_UPDATE_CMDID)) { + WMI_LOGE("%s: failed to send tdls peer update state command", + __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_tdls_ev_param_tlv() - extract vdev tdls param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev tdls param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_tdls_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct tdls_event_info *param) +{ + WMI_TDLS_PEER_EVENTID_param_tlvs *param_buf; + wmi_tdls_peer_event_fixed_param *evt; + + param_buf = (WMI_TDLS_PEER_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: NULL param_buf", __func__); + return QDF_STATUS_E_NULL_VALUE; + } + + evt = param_buf->fixed_param; + + qdf_mem_zero(param, sizeof(*param)); + + param->vdev_id = evt->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&evt->peer_macaddr, + param->peermac.bytes); + switch (evt->peer_status) { + case WMI_TDLS_SHOULD_DISCOVER: + param->message_type = TDLS_SHOULD_DISCOVER; + break; + case WMI_TDLS_SHOULD_TEARDOWN: + param->message_type = TDLS_SHOULD_TEARDOWN; + break; + case WMI_TDLS_PEER_DISCONNECTED: + param->message_type = TDLS_PEER_DISCONNECTED; + break; + case WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION: + param->message_type = TDLS_CONNECTION_TRACKER_NOTIFY; + break; + default: + WMI_LOGE("%s: Discarding unknown tdls event %d from target", + __func__, evt->peer_status); + return QDF_STATUS_E_INVAL; + }; + + switch (evt->peer_reason) { + case WMI_TDLS_TEARDOWN_REASON_TX: + param->peer_reason = TDLS_TEARDOWN_TX; + break; + case WMI_TDLS_TEARDOWN_REASON_RSSI: + param->peer_reason = TDLS_TEARDOWN_RSSI; + break; + case WMI_TDLS_TEARDOWN_REASON_SCAN: + param->peer_reason = TDLS_TEARDOWN_SCAN; + break; + case WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE: + param->peer_reason = TDLS_DISCONNECTED_PEER_DELETE; + break; + case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + param->peer_reason = TDLS_TEARDOWN_PTR_TIMEOUT; + break; + case WMI_TDLS_TEARDOWN_REASON_BAD_PTR: + param->peer_reason = TDLS_TEARDOWN_BAD_PTR; + break; + case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE: + param->peer_reason = TDLS_TEARDOWN_NO_RSP; + break; + case WMI_TDLS_ENTER_BUF_STA: + param->peer_reason = TDLS_PEER_ENTER_BUF_STA; + break; + case WMI_TDLS_EXIT_BUF_STA: + param->peer_reason = TDLS_PEER_EXIT_BUF_STA; + break; + case WMI_TDLS_ENTER_BT_BUSY_MODE: + param->peer_reason = TDLS_ENTER_BT_BUSY; + break; + case WMI_TDLS_EXIT_BT_BUSY_MODE: + param->peer_reason = TDLS_EXIT_BT_BUSY; + break; + case WMI_TDLS_SCAN_STARTED_EVENT: + param->peer_reason = TDLS_SCAN_STARTED; + break; + case WMI_TDLS_SCAN_COMPLETED_EVENT: + param->peer_reason = TDLS_SCAN_COMPLETED; + break; + + default: + WMI_LOGE("%s: unknown reason %d in tdls event %d from target", + __func__, evt->peer_reason, evt->peer_status); + return QDF_STATUS_E_INVAL; + }; + + WMI_LOGD("%s: tdls event, peer: "QDF_MAC_ADDR_FMT", type: 0x%x, reason: %d, vdev: %d", + __func__, QDF_MAC_ADDR_REF(param->peermac.bytes), + param->message_type, + param->peer_reason, param->vdev_id); + + return QDF_STATUS_SUCCESS; +} + +void wmi_tdls_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_tdls_offchan_mode_cmd = + send_set_tdls_offchan_mode_cmd_tlv; + ops->send_update_fw_tdls_state_cmd = + send_update_fw_tdls_state_cmd_tlv; + ops->send_update_tdls_peer_state_cmd = + send_update_tdls_peer_state_cmd_tlv; + ops->extract_vdev_tdls_ev_param = extract_vdev_tdls_ev_param_tlv; +} +#endif /* FEATURE_WLAN_TDLS */ + +/* + * send_process_set_ie_info_cmd_tlv() - Function to send IE info to firmware + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS send_process_set_ie_info_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info) +{ + wmi_vdev_set_ie_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len, ie_len_aligned; + QDF_STATUS ret; + + ie_len_aligned = roundup(ie_info->length, sizeof(uint32_t)); + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + ie_len_aligned; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_vdev_set_ie_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_ie_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_set_ie_cmd_fixed_param)); + cmd->vdev_id = ie_info->vdev_id; + cmd->ie_id = ie_info->ie_id; + cmd->ie_len = ie_info->length; + cmd->band = ie_info->band; + + WMI_LOGD(FL("IE:%d of size:%d sent for vdev:%d"), ie_info->ie_id, + ie_info->length, ie_info->vdev_id); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, ie_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + + qdf_mem_copy(buf_ptr, ie_info->data, cmd->ie_len); + + wmi_mtrace(WMI_VDEV_SET_IE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_IE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send set IE command ret = %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_base_macaddr_indicate_cmd_tlv() - set base mac address in fw + * @wmi_handle: wmi handle + * @custom_addr: base mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_base_macaddr_indicate_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t *custom_addr) +{ + wmi_pdev_set_base_macaddr_cmd_fixed_param *cmd; + wmi_buf_t buf; + int err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_base_macaddr_cmd_fixed_param *) wmi_buf_data(buf); + qdf_mem_zero(cmd, sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_base_macaddr_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_base_macaddr_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(custom_addr, &cmd->base_macaddr); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + wmi_mtrace(WMI_PDEV_SET_BASE_MACADDR_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), + WMI_PDEV_SET_BASE_MACADDR_CMDID); + if (err) { + WMI_LOGE("Failed to send set_base_macaddr cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +#ifdef FEATURE_BLACKLIST_MGR + +static WMI_BSSID_DISALLOW_LIST_TYPE +wmi_get_wmi_reject_ap_type(enum blm_reject_ap_type reject_ap_type) +{ + switch (reject_ap_type) { + case USERSPACE_AVOID_TYPE: + return WMI_BSSID_DISALLOW_USER_SPACE_AVOID_LIST; + case DRIVER_AVOID_TYPE: + return WMI_BSSID_DISALLOW_DRIVER_AVOID_LIST; + case USERSPACE_BLACKLIST_TYPE: + return WMI_BSSID_DISALLOW_USER_SPACE_BLACK_LIST; + case DRIVER_BLACKLIST_TYPE: + return WMI_BSSID_DISALLOW_DRIVER_BLACK_LIST; + case DRIVER_RSSI_REJECT_TYPE: + return WMI_BSSID_DISALLOW_RSSI_REJECT_LIST; + default: + return WMI_BSSID_DISALLOW_DRIVER_AVOID_LIST; + } +} + +static WMI_BLACKLIST_REASON_ID +wmi_get_reject_reason(enum blm_reject_ap_reason reject_reason) +{ + switch(reject_reason) { + case REASON_NUD_FAILURE: + return WMI_BL_REASON_NUD_FAILURE; + case REASON_STA_KICKOUT: + return WMI_BL_REASON_STA_KICKOUT; + case REASON_ROAM_HO_FAILURE: + return WMI_BL_REASON_ROAM_HO_FAILURE; + case REASON_ASSOC_REJECT_POOR_RSSI: + return WMI_BL_REASON_ASSOC_REJECT_POOR_RSSI; + case REASON_ASSOC_REJECT_OCE: + return WMI_BL_REASON_ASSOC_REJECT_OCE; + case REASON_USERSPACE_BL: + return WMI_BL_REASON_USERSPACE_BL; + case REASON_USERSPACE_AVOID_LIST: + return WMI_BL_REASON_USERSPACE_AVOID_LIST; + case REASON_BTM_DISASSOC_IMMINENT: + return WMI_BL_REASON_BTM_DIASSOC_IMMINENT; + case REASON_BTM_BSS_TERMINATION: + return WMI_BL_REASON_BTM_BSS_TERMINATION; + case REASON_BTM_MBO_RETRY: + return WMI_BL_REASON_BTM_MBO_RETRY; + case REASON_REASSOC_RSSI_REJECT: + return WMI_BL_REASON_REASSOC_RSSI_REJECT; + case REASON_REASSOC_NO_MORE_STAS: + return WMI_BL_REASON_REASSOC_NO_MORE_STAS; + default: + return 0; + } +} + +static QDF_STATUS +send_reject_ap_list_cmd_tlv(wmi_unified_t wmi_handle, + struct reject_ap_params *reject_params) +{ + wmi_buf_t buf; + QDF_STATUS status; + uint32_t len, list_tlv_len; + int i; + uint8_t *buf_ptr; + wmi_pdev_dsm_filter_fixed_param *chan_list_fp; + wmi_pdev_bssid_disallow_list_config_param *chan_list; + struct reject_ap_config_params *reject_list = reject_params->bssid_list; + uint8_t num_of_reject_bssid = reject_params->num_of_reject_bssid; + + list_tlv_len = sizeof(*chan_list) * num_of_reject_bssid; + + len = sizeof(*chan_list_fp) + list_tlv_len + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + WMI_LOGD("num of reject BSSIDs %d", num_of_reject_bssid); + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + chan_list_fp = (wmi_pdev_dsm_filter_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&chan_list_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dsm_filter_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_dsm_filter_fixed_param)); + + buf_ptr += sizeof(wmi_pdev_dsm_filter_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, list_tlv_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + chan_list = (wmi_pdev_bssid_disallow_list_config_param *)buf_ptr; + for (i = 0; i < num_of_reject_bssid; i++) { + + WMITLV_SET_HDR(&chan_list->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_bssid_disallow_list_config_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_bssid_disallow_list_config_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(reject_list[i].bssid.bytes, + &chan_list->bssid); + chan_list->bssid_type = + wmi_get_wmi_reject_ap_type(reject_list[i].reject_ap_type); + chan_list->expected_rssi = reject_list[i].expected_rssi; + chan_list->remaining_disallow_duration = + reject_list[i].reject_duration; + chan_list->reason = + wmi_get_reject_reason(reject_list[i].reject_reason); + chan_list->original_timeout = reject_list[i].original_timeout; + chan_list->timestamp = reject_list[i].received_time; + chan_list->source = reject_list[i].source; + chan_list++; + } + + wmi_mtrace(WMI_PDEV_DSM_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_PDEV_DSM_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_PDEV_DSM_FILTER_CMDID returned Error %d", + status); + goto error; + } + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + return status; +} + +void wmi_blacklist_mgr_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_reject_ap_list_cmd = send_reject_ap_list_cmd_tlv; +} +#endif + +/** + * send_sar_limit_cmd_tlv() - send sar limit cmd to fw + * @wmi_handle: wmi handle + * @params: sar limit params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_sar_limit_cmd_tlv(wmi_unified_t wmi_handle, + struct sar_limit_cmd_params *sar_limit_params) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_sar_limits_cmd_fixed_param *cmd; + int i; + uint8_t *buf_ptr; + wmi_sar_limit_cmd_row *wmi_sar_rows_list; + struct sar_limit_cmd_row *sar_rows_list; + uint32_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + + len += sizeof(wmi_sar_limit_cmd_row) * sar_limit_params->num_limit_rows; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_sar_limits_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sar_limits_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sar_limits_cmd_fixed_param)); + cmd->sar_enable = sar_limit_params->sar_enable; + cmd->commit_limits = sar_limit_params->commit_limits; + cmd->num_limit_rows = sar_limit_params->num_limit_rows; + + WMI_LOGD("no of sar rows = %d, len = %d", + sar_limit_params->num_limit_rows, len); + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_sar_limit_cmd_row) * + sar_limit_params->num_limit_rows); + if (cmd->num_limit_rows == 0) + goto send_sar_limits; + + wmi_sar_rows_list = (wmi_sar_limit_cmd_row *) + (buf_ptr + WMI_TLV_HDR_SIZE); + sar_rows_list = sar_limit_params->sar_limit_row_list; + + for (i = 0; i < sar_limit_params->num_limit_rows; i++) { + WMITLV_SET_HDR(&wmi_sar_rows_list->tlv_header, + WMITLV_TAG_STRUC_wmi_sar_limit_cmd_row, + WMITLV_GET_STRUCT_TLVLEN(wmi_sar_limit_cmd_row)); + wmi_sar_rows_list->band_id = sar_rows_list->band_id; + wmi_sar_rows_list->chain_id = sar_rows_list->chain_id; + wmi_sar_rows_list->mod_id = sar_rows_list->mod_id; + wmi_sar_rows_list->limit_value = sar_rows_list->limit_value; + wmi_sar_rows_list->validity_bitmap = + sar_rows_list->validity_bitmap; + WMI_LOGD("row %d, band_id = %d, chain_id = %d, mod_id = %d, limit_value = %d, validity_bitmap = %d", + i, wmi_sar_rows_list->band_id, + wmi_sar_rows_list->chain_id, + wmi_sar_rows_list->mod_id, + wmi_sar_rows_list->limit_value, + wmi_sar_rows_list->validity_bitmap); + sar_rows_list++; + wmi_sar_rows_list++; + } +send_sar_limits: + wmi_mtrace(WMI_SAR_LIMITS_CMDID, NO_SESSION, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SAR_LIMITS_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SAR_LIMITS_CMDID"); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} + +static QDF_STATUS get_sar_limit_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_sar_get_limits_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + QDF_STATUS status; + + WMI_LOGD(FL("Enter")); + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_sar_get_limits_cmd_fixed_param *)wmi_buf_data(wmi_buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sar_get_limits_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sar_get_limits_cmd_fixed_param)); + + cmd->reserved = 0; + + wmi_mtrace(WMI_SAR_GET_LIMITS_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_SAR_GET_LIMITS_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE(FL("Failed to send get SAR limit cmd: %d"), status); + wmi_buf_free(wmi_buf); + } + + WMI_LOGD(FL("Exit")); + + return status; +} + +/** + * wmi_sar2_result_string() - return string conversion of sar2 result + * @result: sar2 result value + * + * This utility function helps log string conversion of sar2 result. + * + * Return: string conversion of sar 2 result, if match found; + * "Unknown response" otherwise. + */ +static const char *wmi_sar2_result_string(uint32_t result) +{ + switch (result) { + CASE_RETURN_STRING(WMI_SAR2_SUCCESS); + CASE_RETURN_STRING(WMI_SAR2_INVALID_ANTENNA_INDEX); + CASE_RETURN_STRING(WMI_SAR2_INVALID_TABLE_INDEX); + CASE_RETURN_STRING(WMI_SAR2_STATE_ERROR); + CASE_RETURN_STRING(WMI_SAR2_BDF_NO_TABLE); + default: + return "Unknown response"; + } +} + +/** + * extract_sar2_result_event_tlv() - process sar response event from FW. + * @handle: wma handle + * @event: event buffer + * @len: buffer length + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_sar2_result_event_tlv(void *handle, + uint8_t *event, + uint32_t len) +{ + wmi_sar2_result_event_fixed_param *sar2_fixed_param; + + WMI_SAR2_RESULT_EVENTID_param_tlvs *param_buf = + (WMI_SAR2_RESULT_EVENTID_param_tlvs *)event; + + if (!param_buf) { + wmi_err("Invalid sar2 result event buffer"); + return QDF_STATUS_E_INVAL; + } + + sar2_fixed_param = param_buf->fixed_param; + if (!sar2_fixed_param) { + wmi_err("Invalid sar2 result event fixed param buffer"); + return QDF_STATUS_E_INVAL; + } + + wmi_debug("SAR2 result: %s", + wmi_sar2_result_string(sar2_fixed_param->result)); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_sar_limit_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct sar_limit_event *event) +{ + wmi_sar_get_limits_event_fixed_param *fixed_param; + WMI_SAR_GET_LIMITS_EVENTID_param_tlvs *param_buf; + wmi_sar_get_limit_event_row *row_in; + struct sar_limit_event_row *row_out; + uint32_t row; + + if (!evt_buf) { + WMI_LOGE(FL("input event is NULL")); + return QDF_STATUS_E_INVAL; + } + if (!event) { + WMI_LOGE(FL("output event is NULL")); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_SAR_GET_LIMITS_EVENTID_param_tlvs *)evt_buf; + + fixed_param = param_buf->fixed_param; + if (!fixed_param) { + WMI_LOGE(FL("Invalid fixed param")); + return QDF_STATUS_E_INVAL; + } + + event->sar_enable = fixed_param->sar_enable; + event->num_limit_rows = fixed_param->num_limit_rows; + + if (event->num_limit_rows > param_buf->num_sar_get_limits) { + WMI_LOGE(FL("Num rows %d exceeds sar_get_limits rows len %d"), + event->num_limit_rows, param_buf->num_sar_get_limits); + return QDF_STATUS_E_INVAL; + } + + if (event->num_limit_rows > MAX_SAR_LIMIT_ROWS_SUPPORTED) { + QDF_ASSERT(0); + WMI_LOGE(FL("Num rows %d exceeds max of %d"), + event->num_limit_rows, + MAX_SAR_LIMIT_ROWS_SUPPORTED); + event->num_limit_rows = MAX_SAR_LIMIT_ROWS_SUPPORTED; + } + + row_in = param_buf->sar_get_limits; + if (!row_in) { + WMI_LOGD("sar_get_limits is NULL"); + } else { + row_out = &event->sar_limit_row[0]; + for (row = 0; row < event->num_limit_rows; row++) { + row_out->band_id = row_in->band_id; + row_out->chain_id = row_in->chain_id; + row_out->mod_id = row_in->mod_id; + row_out->limit_value = row_in->limit_value; + row_out++; + row_in++; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_del_pmkid_cache_cmd_tlv() - send wmi cmd of set del pmkid + * @wmi_handle: wmi handler + * @pmk_info: pointer to PMK cache entry + * @vdev_id: vdev id + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_set_del_pmkid_cache_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_unified_pmk_cache *pmk_info) +{ + wmi_pdev_update_pmk_cache_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t *buf_ptr; + wmi_pmk_cache *pmksa; + uint32_t len = sizeof(*cmd); + + if (!pmk_info) + return QDF_STATUS_E_INVAL; + + if (!pmk_info->is_flush_all) + len += WMI_TLV_HDR_SIZE + sizeof(*pmksa); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_update_pmk_cache_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_update_pmk_cache_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_update_pmk_cache_cmd_fixed_param)); + + cmd->vdev_id = pmk_info->vdev_id; + + /* If pmk_info->is_flush_all is true, this is a flush request */ + if (pmk_info->is_flush_all) { + cmd->op_flag = WMI_PMK_CACHE_OP_FLAG_FLUSH_ALL; + cmd->num_cache = 0; + goto send_cmd; + } + + cmd->num_cache = 1; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*pmksa)); + buf_ptr += WMI_TLV_HDR_SIZE; + + pmksa = (wmi_pmk_cache *)buf_ptr; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_wmi_pmk_cache, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pmk_cache)); + pmksa->pmk_len = pmk_info->pmk_len; + qdf_mem_copy(pmksa->pmk, pmk_info->pmk, pmksa->pmk_len); + pmksa->pmkid_len = pmk_info->pmkid_len; + qdf_mem_copy(pmksa->pmkid, pmk_info->pmkid, pmksa->pmkid_len); + qdf_mem_copy(&(pmksa->bssid), &(pmk_info->bssid), sizeof(wmi_mac_addr)); + pmksa->ssid.ssid_len = pmk_info->ssid.length; + qdf_mem_copy(&(pmksa->ssid.ssid), &(pmk_info->ssid.mac_ssid), + pmksa->ssid.ssid_len); + pmksa->cache_id = pmk_info->cache_id; + pmksa->cat_flag = pmk_info->cat_flag; + pmksa->action_flag = pmk_info->action_flag; + +send_cmd: + wmi_mtrace(WMI_PDEV_UPDATE_PMK_CACHE_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_UPDATE_PMK_CACHE_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: failed to send set del pmkid cache command %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_del_ts_cmd_tlv() - send DELTS request to fw + * @wmi_handle: wmi handle + * @msg: delts params + * + * Return: CDF status + */ +static QDF_STATUS send_del_ts_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id, + uint8_t ac) +{ + wmi_vdev_wmm_delts_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_wmm_delts_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_wmm_delts_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_wmm_delts_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->ac = ac; + + WMI_LOGD("Delts vdev:%d, ac:%d, %s:%d", + cmd->vdev_id, cmd->ac, __func__, __LINE__); + wmi_mtrace(WMI_VDEV_WMM_DELTS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_WMM_DELTS_CMDID)) { + WMI_LOGP("%s: Failed to send vdev DELTS command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_aggr_qos_cmd_tlv() - send aggr qos request to fw + * @wmi_handle: handle to wmi + * @aggr_qos_rsp_msg - combined struct for all ADD_TS requests. + * + * A function to handle WMI_AGGR_QOS_REQ. This will send out + * ADD_TS requests to firmware in loop for all the ACs with + * active flow. + * + * Return: CDF status + */ +static QDF_STATUS send_aggr_qos_cmd_tlv(wmi_unified_t wmi_handle, + struct aggr_add_ts_param *aggr_qos_rsp_msg) +{ + int i = 0; + wmi_vdev_wmm_addts_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + for (i = 0; i < WMI_QOS_NUM_AC_MAX; i++) { + /* if flow in this AC is active */ + if (((1 << i) & aggr_qos_rsp_msg->tspecIdx)) { + /* + * as per implementation of wma_add_ts_req() we + * are not waiting any response from firmware so + * apart from sending ADDTS to firmware just send + * success to upper layers + */ + aggr_qos_rsp_msg->status[i] = QDF_STATUS_SUCCESS; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_wmm_addts_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_wmm_addts_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_wmm_addts_cmd_fixed_param)); + cmd->vdev_id = aggr_qos_rsp_msg->vdev_id; + cmd->ac = + WMI_TID_TO_AC(aggr_qos_rsp_msg->tspec[i].tsinfo. + traffic.userPrio); + cmd->medium_time_us = + aggr_qos_rsp_msg->tspec[i].mediumTime * 32; + cmd->downgrade_type = WMM_AC_DOWNGRADE_DEPRIO; + WMI_LOGD("%s:%d: Addts vdev:%d, ac:%d, mediumTime:%d downgrade_type:%d", + __func__, __LINE__, cmd->vdev_id, cmd->ac, + cmd->medium_time_us, cmd->downgrade_type); + wmi_mtrace(WMI_VDEV_WMM_ADDTS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_WMM_ADDTS_CMDID)) { + WMI_LOGP("%s: Failed to send vdev ADDTS command", + __func__); + aggr_qos_rsp_msg->status[i] = + QDF_STATUS_E_FAILURE; + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_add_ts_cmd_tlv() - send ADDTS request to fw + * @wmi_handle: wmi handle + * @msg: ADDTS params + * + * Return: CDF status + */ +static QDF_STATUS send_add_ts_cmd_tlv(wmi_unified_t wmi_handle, + struct add_ts_param *msg) +{ + wmi_vdev_wmm_addts_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + msg->status = QDF_STATUS_SUCCESS; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_wmm_addts_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_wmm_addts_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_wmm_addts_cmd_fixed_param)); + cmd->vdev_id = msg->vdev_id; + cmd->ac = msg->tspec.tsinfo.traffic.userPrio; + cmd->medium_time_us = msg->tspec.mediumTime * 32; + cmd->downgrade_type = WMM_AC_DOWNGRADE_DROP; + WMI_LOGD("Addts vdev:%d, ac:%d, mediumTime:%d, downgrade_type:%d %s:%d", + cmd->vdev_id, cmd->ac, cmd->medium_time_us, + cmd->downgrade_type, __func__, __LINE__); + wmi_mtrace(WMI_VDEV_WMM_ADDTS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_WMM_ADDTS_CMDID)) { + WMI_LOGP("%s: Failed to send vdev ADDTS command", __func__); + msg->status = QDF_STATUS_E_FAILURE; + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_add_periodic_tx_ptrn_cmd_tlv() - add periodic tx pattern + * @wmi_handle: wmi handle + * @pattern: tx pattern params + * @vdev_id: vdev id + * + * Return: QDF status + */ +static QDF_STATUS send_process_add_periodic_tx_ptrn_cmd_tlv( + wmi_unified_t wmi_handle, + struct periodic_tx_pattern *pattern, + uint8_t vdev_id) +{ + WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + uint32_t ptrn_len, ptrn_len_aligned; + int j; + + ptrn_len = pattern->ucPtrnSize; + ptrn_len_aligned = roundup(ptrn_len, sizeof(uint32_t)); + len = sizeof(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + ptrn_len_aligned; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param)); + + /* Pass the pattern id to delete for the corresponding vdev id */ + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern->ucPtrnId; + cmd->timeout = pattern->usPtrnIntervalMs; + cmd->length = pattern->ucPtrnSize; + + /* Pattern info */ + buf_ptr += sizeof(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, ptrn_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, pattern->ucPattern, ptrn_len); + for (j = 0; j < pattern->ucPtrnSize; j++) + WMI_LOGD("%s: Add Ptrn: %02x", __func__, buf_ptr[j] & 0xff); + + WMI_LOGD("%s: Add ptrn id: %d vdev_id: %d", + __func__, cmd->pattern_id, cmd->vdev_id); + + wmi_mtrace(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID)) { + WMI_LOGE("%s: failed to add pattern set state command", + __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_del_periodic_tx_ptrn_cmd_tlv() - del periodic tx pattern + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @pattern_id: pattern id + * + * Return: QDF status + */ +static QDF_STATUS send_process_del_periodic_tx_ptrn_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t pattern_id) +{ + WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len = + sizeof(WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *) + wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param)); + + /* Pass the pattern id to delete for the corresponding vdev id */ + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + WMI_LOGD("%s: Del ptrn id: %d vdev_id: %d", + __func__, cmd->pattern_id, cmd->vdev_id); + + wmi_mtrace(WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID)) { + WMI_LOGE("%s: failed to send del pattern command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_auto_shutdown_timer_cmd_tlv() - sets auto shutdown timer in firmware + * @wmi_handle: wmi handle + * @timer_val: auto shutdown timer value + * + * Return: CDF status + */ +static QDF_STATUS send_set_auto_shutdown_timer_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t timer_val) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_host_auto_shutdown_cfg_cmd_fixed_param *wmi_auto_sh_cmd; + int len = sizeof(wmi_host_auto_shutdown_cfg_cmd_fixed_param); + + WMI_LOGD("%s: Set WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID:TIMER_VAL=%d", + __func__, timer_val); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_auto_sh_cmd = + (wmi_host_auto_shutdown_cfg_cmd_fixed_param *) buf_ptr; + wmi_auto_sh_cmd->timer_value = timer_val; + + WMITLV_SET_HDR(&wmi_auto_sh_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_host_auto_shutdown_cfg_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_host_auto_shutdown_cfg_cmd_fixed_param)); + + wmi_mtrace(WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID Err %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_set_led_flashing_cmd_tlv() - set led flashing in fw + * @wmi_handle: wmi handle + * @flashing: flashing request + * + * Return: CDF status + */ +static QDF_STATUS send_set_led_flashing_cmd_tlv(wmi_unified_t wmi_handle, + struct flashing_req_params *flashing) +{ + wmi_set_led_flashing_cmd_fixed_param *cmd; + QDF_STATUS status; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len = sizeof(wmi_set_led_flashing_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_set_led_flashing_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_led_flashing_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_led_flashing_cmd_fixed_param)); + cmd->pattern_id = flashing->pattern_id; + cmd->led_x0 = flashing->led_x0; + cmd->led_x1 = flashing->led_x1; + + wmi_mtrace(WMI_PDEV_SET_LED_FLASHING_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_LED_FLASHING_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: wmi_unified_cmd_send WMI_PEER_SET_PARAM_CMD" + " returned Error %d", __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_process_ch_avoid_update_cmd_tlv() - handles channel avoid update request + * @wmi_handle: wmi handle + * @ch_avoid_update_req: channel avoid update params + * + * Return: CDF status + */ +static QDF_STATUS send_process_ch_avoid_update_cmd_tlv(wmi_unified_t wmi_handle) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_chan_avoid_update_cmd_param *ch_avoid_update_fp; + int len = sizeof(wmi_chan_avoid_update_cmd_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + ch_avoid_update_fp = (wmi_chan_avoid_update_cmd_param *) buf_ptr; + WMITLV_SET_HDR(&ch_avoid_update_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_chan_avoid_update_cmd_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_chan_avoid_update_cmd_param)); + + wmi_mtrace(WMI_CHAN_AVOID_UPDATE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_CHAN_AVOID_UPDATE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send" + " WMITLV_TABLE_WMI_CHAN_AVOID_UPDATE" + " returned Error %d", status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_pdev_set_pcl_cmd_tlv() - Send WMI_SOC_SET_PCL_CMDID to FW + * @wmi_handle: wmi handle + * @msg: PCL structure containing the PCL and the number of channels + * + * WMI_PDEV_SET_PCL_CMDID provides a Preferred Channel List (PCL) to the WLAN + * firmware. The DBS Manager is the consumer of this information in the WLAN + * firmware. The channel list will be used when a Virtual DEVice (VDEV) needs + * to migrate to a new channel without host driver involvement. An example of + * this behavior is Legacy Fast Roaming (LFR 3.0). Generally, the host will + * manage the channel selection without firmware involvement. + * + * WMI_PDEV_SET_PCL_CMDID will carry only the weight list and not the actual + * channel list. The weights corresponds to the channels sent in + * WMI_SCAN_CHAN_LIST_CMDID. The channels from PCL would be having a higher + * weightage compared to the non PCL channels. + * + * Return: Success if the cmd is sent successfully to the firmware + */ +static QDF_STATUS send_pdev_set_pcl_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_pcl_chan_weights *msg) +{ + wmi_pdev_set_pcl_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t *cmd_args, i, len; + uint32_t chan_len; + + chan_len = msg->saved_num_chan; + + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + (chan_len * sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_pcl_cmd_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_pcl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_pdev_set_pcl_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + cmd->num_chan = chan_len; + buf_ptr += sizeof(wmi_pdev_set_pcl_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (chan_len * sizeof(uint32_t))); + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < chan_len ; i++) + cmd_args[i] = msg->weighed_valid_list[i]; + wmi_mtrace(WMI_PDEV_SET_PCL_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_PCL_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_PDEV_SET_PCL_CMDID", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_POLICY_MGR_ENABLE +/** + * send_pdev_set_dual_mac_config_cmd_tlv() - Set dual mac config to FW + * @wmi_handle: wmi handle + * @msg: Dual MAC config parameters + * + * Configures WLAN firmware with the dual MAC features + * + * Return: QDF_STATUS. 0 on success. + */ +static +QDF_STATUS send_pdev_set_dual_mac_config_cmd_tlv(wmi_unified_t wmi_handle, + struct policy_mgr_dual_mac_config *msg) +{ + wmi_pdev_set_mac_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_pdev_set_mac_config_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_mac_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_mac_config_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + cmd->concurrent_scan_config_bits = msg->scan_config; + cmd->fw_mode_config_bits = msg->fw_mode_config; + WMI_LOGD("%s: scan_config:%x fw_mode_config:%x", + __func__, msg->scan_config, msg->fw_mode_config); + + wmi_mtrace(WMI_PDEV_SET_MAC_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_MAC_CONFIG_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_PDEV_SET_MAC_CONFIG_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +void wmi_policy_mgr_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_pdev_set_dual_mac_config_cmd = + send_pdev_set_dual_mac_config_cmd_tlv; +} +#endif /* WLAN_POLICY_MGR_ENABLE */ + +/** + * send_adapt_dwelltime_params_cmd_tlv() - send wmi cmd of adaptive dwelltime + * configuration params + * @wma_handle: wma handler + * @dwelltime_params: pointer to dwelltime_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +static +QDF_STATUS send_adapt_dwelltime_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_adaptive_dwelltime_params *dwelltime_params) +{ + wmi_scan_adaptive_dwell_config_fixed_param *dwell_param; + wmi_scan_adaptive_dwell_parameters_tlv *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t err; + int len; + + len = sizeof(wmi_scan_adaptive_dwell_config_fixed_param); + len += WMI_TLV_HDR_SIZE; /* TLV for ext_thresholds*/ + len += sizeof(wmi_scan_adaptive_dwell_parameters_tlv); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + dwell_param = (wmi_scan_adaptive_dwell_config_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&dwell_param->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_adaptive_dwell_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_adaptive_dwell_config_fixed_param)); + + dwell_param->enable = dwelltime_params->is_enabled; + buf_ptr += sizeof(wmi_scan_adaptive_dwell_config_fixed_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_scan_adaptive_dwell_parameters_tlv)); + buf_ptr += WMI_TLV_HDR_SIZE; + + cmd = (wmi_scan_adaptive_dwell_parameters_tlv *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_adaptive_dwell_parameters_tlv, + WMITLV_GET_STRUCT_TLVLEN( + wmi_scan_adaptive_dwell_parameters_tlv)); + + cmd->default_adaptive_dwell_mode = dwelltime_params->dwelltime_mode; + cmd->adapative_lpf_weight = dwelltime_params->lpf_weight; + cmd->passive_monitor_interval_ms = dwelltime_params->passive_mon_intval; + cmd->wifi_activity_threshold_pct = dwelltime_params->wifi_act_threshold; + wmi_mtrace(WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID); + if (err) { + WMI_LOGE("Failed to send adapt dwelltime cmd err=%d", err); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_dbs_scan_sel_params_cmd_tlv() - send wmi cmd of DBS scan selection + * configuration params + * @wmi_handle: wmi handler + * @dbs_scan_params: pointer to wmi_dbs_scan_sel_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +static QDF_STATUS send_dbs_scan_sel_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_dbs_scan_sel_params *dbs_scan_params) +{ + wmi_scan_dbs_duty_cycle_fixed_param *dbs_scan_param; + wmi_scan_dbs_duty_cycle_tlv_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + QDF_STATUS err; + uint32_t i; + int len; + + len = sizeof(*dbs_scan_param); + len += WMI_TLV_HDR_SIZE; + len += dbs_scan_params->num_clients * sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + dbs_scan_param = (wmi_scan_dbs_duty_cycle_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&dbs_scan_param->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_dbs_duty_cycle_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_dbs_duty_cycle_fixed_param)); + + dbs_scan_param->num_clients = dbs_scan_params->num_clients; + dbs_scan_param->pdev_id = dbs_scan_params->pdev_id; + buf_ptr += sizeof(*dbs_scan_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (sizeof(*cmd) * dbs_scan_params->num_clients)); + buf_ptr = buf_ptr + (uint8_t) WMI_TLV_HDR_SIZE; + + for (i = 0; i < dbs_scan_params->num_clients; i++) { + cmd = (wmi_scan_dbs_duty_cycle_tlv_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_dbs_duty_cycle_param_tlv, + WMITLV_GET_STRUCT_TLVLEN( + wmi_scan_dbs_duty_cycle_tlv_param)); + cmd->module_id = dbs_scan_params->module_id[i]; + cmd->num_dbs_scans = dbs_scan_params->num_dbs_scans[i]; + cmd->num_non_dbs_scans = dbs_scan_params->num_non_dbs_scans[i]; + buf_ptr = buf_ptr + (uint8_t) sizeof(*cmd); + } + + wmi_mtrace(WMI_SET_SCAN_DBS_DUTY_CYCLE_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_SET_SCAN_DBS_DUTY_CYCLE_CMDID); + if (QDF_IS_STATUS_ERROR(err)) { + WMI_LOGE("Failed to send dbs scan selection cmd err=%d", err); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_arp_stats_req_cmd_tlv() - send wmi cmd to set arp stats request + * @wmi_handle: wmi handler + * @req_buf: set arp stats request buffer + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_set_arp_stats_req_cmd_tlv(wmi_unified_t wmi_handle, + struct set_arp_stats *req_buf) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_vdev_set_arp_stats_cmd_fixed_param *wmi_set_arp; + + len = sizeof(wmi_vdev_set_arp_stats_cmd_fixed_param); + if (req_buf->pkt_type_bitmap) { + len += WMI_TLV_HDR_SIZE; + len += sizeof(wmi_vdev_set_connectivity_check_stats); + } + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_set_arp = + (wmi_vdev_set_arp_stats_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_set_arp->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_arp_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_arp_stats_cmd_fixed_param)); + + /* fill in per roam config values */ + wmi_set_arp->vdev_id = req_buf->vdev_id; + + wmi_set_arp->set_clr = req_buf->flag; + wmi_set_arp->pkt_type = req_buf->pkt_type; + wmi_set_arp->ipv4 = req_buf->ip_addr; + + WMI_LOGD("NUD Stats: vdev_id %u set_clr %u pkt_type:%u ipv4 %u", + wmi_set_arp->vdev_id, wmi_set_arp->set_clr, + wmi_set_arp->pkt_type, wmi_set_arp->ipv4); + + /* + * pkt_type_bitmap should be non-zero to ensure + * presence of additional stats. + */ + if (req_buf->pkt_type_bitmap) { + wmi_vdev_set_connectivity_check_stats *wmi_set_connect_stats; + + buf_ptr += sizeof(wmi_vdev_set_arp_stats_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_vdev_set_connectivity_check_stats)); + buf_ptr += WMI_TLV_HDR_SIZE; + wmi_set_connect_stats = + (wmi_vdev_set_connectivity_check_stats *)buf_ptr; + WMITLV_SET_HDR(&wmi_set_connect_stats->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_connectivity_check_stats, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_set_connectivity_check_stats)); + wmi_set_connect_stats->pkt_type_bitmap = + req_buf->pkt_type_bitmap; + wmi_set_connect_stats->tcp_src_port = req_buf->tcp_src_port; + wmi_set_connect_stats->tcp_dst_port = req_buf->tcp_dst_port; + wmi_set_connect_stats->icmp_ipv4 = req_buf->icmp_ipv4; + + WMI_LOGD("Connectivity Stats: pkt_type_bitmap %u tcp_src_port:%u tcp_dst_port %u icmp_ipv4 %u", + wmi_set_connect_stats->pkt_type_bitmap, + wmi_set_connect_stats->tcp_src_port, + wmi_set_connect_stats->tcp_dst_port, + wmi_set_connect_stats->icmp_ipv4); + } + + /* Send per roam config parameters */ + wmi_mtrace(WMI_VDEV_SET_ARP_STAT_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_VDEV_SET_ARP_STAT_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_SET_ARP_STATS_CMDID failed, Error %d", + status); + goto error; + } + + WMI_LOGD(FL("set arp stats flag=%d, vdev=%d"), + req_buf->flag, req_buf->vdev_id); + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_get_arp_stats_req_cmd_tlv() - send wmi cmd to get arp stats request + * @wmi_handle: wmi handler + * @req_buf: get arp stats request buffer + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_get_arp_stats_req_cmd_tlv(wmi_unified_t wmi_handle, + struct get_arp_stats *req_buf) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_vdev_get_arp_stats_cmd_fixed_param *get_arp_stats; + + len = sizeof(wmi_vdev_get_arp_stats_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + get_arp_stats = + (wmi_vdev_get_arp_stats_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&get_arp_stats->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_get_arp_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_get_arp_stats_cmd_fixed_param)); + + /* fill in arp stats req cmd values */ + get_arp_stats->vdev_id = req_buf->vdev_id; + + wmi_debug("vdev=%d", req_buf->vdev_id); + /* Send per roam config parameters */ + wmi_mtrace(WMI_VDEV_GET_ARP_STAT_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_VDEV_GET_ARP_STAT_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_GET_ARP_STATS_CMDID failed, Error %d", + status); + goto error; + } + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_peer_unmap_conf_cmd_tlv() - send PEER UNMAP conf command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * @peer_id_cnt: no. of peer ids + * @peer_id_list: list of peer ids + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_unmap_conf_cmd_tlv(wmi_unified_t wmi, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list) +{ + int i; + wmi_buf_t buf; + uint8_t *buf_ptr; + A_UINT32 *peer_ids; + wmi_peer_unmap_response_cmd_fixed_param *cmd; + uint32_t peer_id_list_len; + uint32_t len = sizeof(*cmd); + QDF_STATUS status; + + if (!peer_id_cnt || !peer_id_list) + return QDF_STATUS_E_FAILURE; + + len += WMI_TLV_HDR_SIZE; + + peer_id_list_len = peer_id_cnt * sizeof(A_UINT32); + + len += peer_id_list_len; + + buf = wmi_buf_alloc(wmi, len); + + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_unmap_response_cmd_fixed_param *)wmi_buf_data(buf); + buf_ptr = (uint8_t *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_unmap_response_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_unmap_response_cmd_fixed_param)); + + buf_ptr += sizeof(wmi_peer_unmap_response_cmd_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + peer_id_list_len); + + peer_ids = (A_UINT32 *)(buf_ptr + WMI_TLV_HDR_SIZE); + + for (i = 0; i < peer_id_cnt; i++) + peer_ids[i] = peer_id_list[i]; + + WMI_LOGD("%s: vdev_id %d peer_id_cnt %d", __func__, + vdev_id, peer_id_cnt); + wmi_mtrace(WMI_PEER_UNMAP_RESPONSE_CMDID, vdev_id, 0); + status = wmi_unified_cmd_send(wmi, buf, len, + WMI_PEER_UNMAP_RESPONSE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: Failed to send peer unmap conf command: Err[%d]", + __func__, status); + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +void wmi_sta_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_set_sta_sa_query_param_cmd = + send_set_sta_sa_query_param_cmd_tlv; + ops->send_set_sta_keep_alive_cmd = send_set_sta_keep_alive_cmd_tlv; + ops->send_vdev_set_gtx_cfg_cmd = send_vdev_set_gtx_cfg_cmd_tlv; + ops->send_process_dhcp_ind_cmd = send_process_dhcp_ind_cmd_tlv; + ops->send_get_link_speed_cmd = send_get_link_speed_cmd_tlv; + ops->send_fw_profiling_cmd = send_fw_profiling_cmd_tlv; + ops->send_nat_keepalive_en_cmd = send_nat_keepalive_en_cmd_tlv; + ops->send_wlm_latency_level_cmd = send_wlm_latency_level_cmd_tlv; + ops->send_process_set_ie_info_cmd = send_process_set_ie_info_cmd_tlv; + ops->send_set_base_macaddr_indicate_cmd = + send_set_base_macaddr_indicate_cmd_tlv; + ops->send_sar_limit_cmd = send_sar_limit_cmd_tlv; + ops->get_sar_limit_cmd = get_sar_limit_cmd_tlv; + ops->extract_sar_limit_event = extract_sar_limit_event_tlv; + ops->extract_sar2_result_event = extract_sar2_result_event_tlv; + ops->send_set_del_pmkid_cache_cmd = send_set_del_pmkid_cache_cmd_tlv; + ops->send_del_ts_cmd = send_del_ts_cmd_tlv; + ops->send_aggr_qos_cmd = send_aggr_qos_cmd_tlv; + ops->send_add_ts_cmd = send_add_ts_cmd_tlv; + ops->send_process_add_periodic_tx_ptrn_cmd = + send_process_add_periodic_tx_ptrn_cmd_tlv; + ops->send_process_del_periodic_tx_ptrn_cmd = + send_process_del_periodic_tx_ptrn_cmd_tlv; + ops->send_set_auto_shutdown_timer_cmd = + send_set_auto_shutdown_timer_cmd_tlv; + ops->send_set_led_flashing_cmd = send_set_led_flashing_cmd_tlv; + ops->send_process_ch_avoid_update_cmd = + send_process_ch_avoid_update_cmd_tlv; + ops->send_pdev_set_pcl_cmd = send_pdev_set_pcl_cmd_tlv; + ops->send_adapt_dwelltime_params_cmd = + send_adapt_dwelltime_params_cmd_tlv; + ops->send_dbs_scan_sel_params_cmd = + send_dbs_scan_sel_params_cmd_tlv; + ops->send_set_arp_stats_req_cmd = send_set_arp_stats_req_cmd_tlv; + ops->send_get_arp_stats_req_cmd = send_get_arp_stats_req_cmd_tlv; + ops->send_peer_unmap_conf_cmd = send_peer_unmap_conf_cmd_tlv; + + wmi_tdls_attach_tlv(wmi_handle); + wmi_policy_mgr_attach_tlv(wmi_handle); + wmi_blacklist_mgr_attach_tlv(wmi_handle); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..9c40df0491d4b5775d285c050fd32214e9c33767 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c @@ -0,0 +1,14399 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_api.h" +#include "wmi.h" +#include "wmi_version.h" +#include "wmi_unified_priv.h" +#include "wmi_version_whitelist.h" +#include +#include +#include +#include +#ifdef FEATURE_WLAN_APF +#include "wmi_unified_apf_tlv.h" +#endif +#ifdef WLAN_FEATURE_ACTION_OUI +#include "wmi_unified_action_oui_tlv.h" +#endif +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include "wlan_pmo_hw_filter_public_struct.h" +#endif +#include +#ifdef WLAN_SUPPORT_GREEN_AP +#include "wlan_green_ap_api.h" +#endif + +#include "wmi_unified_twt_api.h" + +#ifdef WLAN_POLICY_MGR_ENABLE +#include "wlan_policy_mgr_public_struct.h" +#endif + +#ifdef WMI_SMART_ANT_SUPPORT +#include "wmi_unified_smart_ant_api.h" +#endif + +#ifdef WMI_DBR_SUPPORT +#include "wmi_unified_dbr_api.h" +#endif + +#ifdef WMI_ATF_SUPPORT +#include "wmi_unified_atf_api.h" +#endif + +#ifdef WMI_AP_SUPPORT +#include "wmi_unified_ap_api.h" +#endif + +#include +#include + +/* HTC service ids for WMI for multi-radio */ +static const uint32_t multi_svc_ids[] = {WMI_CONTROL_SVC, + WMI_CONTROL_SVC_WMAC1, + WMI_CONTROL_SVC_WMAC2}; + +#ifdef ENABLE_HOST_TO_TARGET_CONVERSION +/*Populate peer_param array whose index as host id and + *value as target id + */ +static const uint32_t peer_param_tlv[] = { + [WMI_HOST_PEER_MIMO_PS_STATE] = WMI_PEER_MIMO_PS_STATE, + [WMI_HOST_PEER_AMPDU] = WMI_PEER_AMPDU, + [WMI_HOST_PEER_AUTHORIZE] = WMI_PEER_AUTHORIZE, + [WMI_HOST_PEER_CHWIDTH] = WMI_PEER_CHWIDTH, + [WMI_HOST_PEER_NSS] = WMI_PEER_NSS, + [WMI_HOST_PEER_USE_4ADDR] = WMI_PEER_USE_4ADDR, + [WMI_HOST_PEER_MEMBERSHIP] = WMI_PEER_MEMBERSHIP, + [WMI_HOST_PEER_USERPOS] = WMI_PEER_USERPOS, + [WMI_HOST_PEER_CRIT_PROTO_HINT_ENABLED] = + WMI_PEER_CRIT_PROTO_HINT_ENABLED, + [WMI_HOST_PEER_TX_FAIL_CNT_THR] = WMI_PEER_TX_FAIL_CNT_THR, + [WMI_HOST_PEER_SET_HW_RETRY_CTS2S] = WMI_PEER_SET_HW_RETRY_CTS2S, + [WMI_HOST_PEER_IBSS_ATIM_WINDOW_LENGTH] = + WMI_PEER_IBSS_ATIM_WINDOW_LENGTH, + [WMI_HOST_PEER_PHYMODE] = WMI_PEER_PHYMODE, + [WMI_HOST_PEER_USE_FIXED_PWR] = WMI_PEER_USE_FIXED_PWR, + [WMI_HOST_PEER_PARAM_FIXED_RATE] = WMI_PEER_PARAM_FIXED_RATE, + [WMI_HOST_PEER_SET_MU_WHITELIST] = WMI_PEER_SET_MU_WHITELIST, + [WMI_HOST_PEER_SET_MAC_TX_RATE] = WMI_PEER_SET_MAX_TX_RATE, + [WMI_HOST_PEER_SET_MIN_TX_RATE] = WMI_PEER_SET_MIN_TX_RATE, + [WMI_HOST_PEER_SET_DEFAULT_ROUTING] = WMI_PEER_SET_DEFAULT_ROUTING, + [WMI_HOST_PEER_NSS_VHT160] = WMI_PEER_NSS_VHT160, + [WMI_HOST_PEER_NSS_VHT80_80] = WMI_PEER_NSS_VHT80_80, + [WMI_HOST_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL] = + WMI_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL, + [WMI_HOST_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL] = + WMI_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL, + [WMI_HOST_PEER_PARAM_TXBF_SOUNDING_ENABLE] = + WMI_PEER_PARAM_TXBF_SOUNDING_ENABLE, + [WMI_HOST_PEER_PARAM_MU_ENABLE] = WMI_PEER_PARAM_MU_ENABLE, + [WMI_HOST_PEER_PARAM_OFDMA_ENABLE] = WMI_PEER_PARAM_OFDMA_ENABLE, + [WMI_HOST_PEER_PARAM_ENABLE_FT] = WMI_PEER_PARAM_ENABLE_FT, +}; + +/** + * Populate pdev_param_value whose index is host param and value is target + * param + */ +static const uint32_t pdev_param_tlv[] = { + [wmi_pdev_param_tx_chain_mask] = WMI_PDEV_PARAM_TX_CHAIN_MASK, + [wmi_pdev_param_rx_chain_mask] = WMI_PDEV_PARAM_RX_CHAIN_MASK, + [wmi_pdev_param_txpower_limit2g] = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, + [wmi_pdev_param_txpower_limit5g] = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, + [wmi_pdev_param_txpower_scale] = WMI_PDEV_PARAM_TXPOWER_SCALE, + [wmi_pdev_param_beacon_gen_mode] = WMI_PDEV_PARAM_BEACON_GEN_MODE, + [wmi_pdev_param_beacon_tx_mode] = WMI_PDEV_PARAM_BEACON_TX_MODE, + [wmi_pdev_param_resmgr_offchan_mode] = + WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + [wmi_pdev_param_protection_mode] = WMI_PDEV_PARAM_PROTECTION_MODE, + [wmi_pdev_param_dynamic_bw] = WMI_PDEV_PARAM_DYNAMIC_BW, + [wmi_pdev_param_non_agg_sw_retry_th] = + WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + [wmi_pdev_param_agg_sw_retry_th] = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, + [wmi_pdev_param_sta_kickout_th] = WMI_PDEV_PARAM_STA_KICKOUT_TH, + [wmi_pdev_param_ac_aggrsize_scaling] = + WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, + [wmi_pdev_param_ltr_enable] = WMI_PDEV_PARAM_LTR_ENABLE, + [wmi_pdev_param_ltr_ac_latency_be] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, + [wmi_pdev_param_ltr_ac_latency_bk] = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, + [wmi_pdev_param_ltr_ac_latency_vi] = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, + [wmi_pdev_param_ltr_ac_latency_vo] = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, + [wmi_pdev_param_ltr_ac_latency_timeout] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + [wmi_pdev_param_ltr_sleep_override] = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + [wmi_pdev_param_ltr_rx_override] = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, + [wmi_pdev_param_ltr_tx_activity_timeout] = + WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + [wmi_pdev_param_l1ss_enable] = WMI_PDEV_PARAM_L1SS_ENABLE, + [wmi_pdev_param_dsleep_enable] = WMI_PDEV_PARAM_DSLEEP_ENABLE, + [wmi_pdev_param_pcielp_txbuf_flush] = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + [wmi_pdev_param_pcielp_txbuf_watermark] = + WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + [wmi_pdev_param_pcielp_txbuf_tmo_en] = + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + [wmi_pdev_param_pcielp_txbuf_tmo_value] = + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + [wmi_pdev_param_pdev_stats_update_period] = + WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + [wmi_pdev_param_vdev_stats_update_period] = + WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + [wmi_pdev_param_peer_stats_update_period] = + WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + [wmi_pdev_param_bcnflt_stats_update_period] = + WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + [wmi_pdev_param_pmf_qos] = WMI_PDEV_PARAM_PMF_QOS, + [wmi_pdev_param_arp_ac_override] = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, + [wmi_pdev_param_dcs] = WMI_PDEV_PARAM_DCS, + [wmi_pdev_param_ani_enable] = WMI_PDEV_PARAM_ANI_ENABLE, + [wmi_pdev_param_ani_poll_period] = WMI_PDEV_PARAM_ANI_POLL_PERIOD, + [wmi_pdev_param_ani_listen_period] = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, + [wmi_pdev_param_ani_ofdm_level] = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, + [wmi_pdev_param_ani_cck_level] = WMI_PDEV_PARAM_ANI_CCK_LEVEL, + [wmi_pdev_param_dyntxchain] = WMI_PDEV_PARAM_DYNTXCHAIN, + [wmi_pdev_param_proxy_sta] = WMI_PDEV_PARAM_PROXY_STA, + [wmi_pdev_param_idle_ps_config] = WMI_PDEV_PARAM_IDLE_PS_CONFIG, + [wmi_pdev_param_power_gating_sleep] = WMI_PDEV_PARAM_POWER_GATING_SLEEP, + [wmi_pdev_param_rfkill_enable] = WMI_PDEV_PARAM_RFKILL_ENABLE, + [wmi_pdev_param_burst_dur] = WMI_PDEV_PARAM_BURST_DUR, + [wmi_pdev_param_burst_enable] = WMI_PDEV_PARAM_BURST_ENABLE, + [wmi_pdev_param_hw_rfkill_config] = WMI_PDEV_PARAM_HW_RFKILL_CONFIG, + [wmi_pdev_param_low_power_rf_enable] = + WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE, + [wmi_pdev_param_l1ss_track] = WMI_PDEV_PARAM_L1SS_TRACK, + [wmi_pdev_param_hyst_en] = WMI_PDEV_PARAM_HYST_EN, + [wmi_pdev_param_power_collapse_enable] = + WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE, + [wmi_pdev_param_led_sys_state] = WMI_PDEV_PARAM_LED_SYS_STATE, + [wmi_pdev_param_led_enable] = WMI_PDEV_PARAM_LED_ENABLE, + [wmi_pdev_param_audio_over_wlan_latency] = + WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY, + [wmi_pdev_param_audio_over_wlan_enable] = + WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE, + [wmi_pdev_param_whal_mib_stats_update_enable] = + WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE, + [wmi_pdev_param_vdev_rate_stats_update_period] = + WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD, + [wmi_pdev_param_cts_cbw] = WMI_PDEV_PARAM_CTS_CBW, + [wmi_pdev_param_wnts_config] = WMI_PDEV_PARAM_WNTS_CONFIG, + [wmi_pdev_param_adaptive_early_rx_enable] = + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE, + [wmi_pdev_param_adaptive_early_rx_min_sleep_slop] = + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP, + [wmi_pdev_param_adaptive_early_rx_inc_dec_step] = + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP, + [wmi_pdev_param_early_rx_fix_sleep_slop] = + WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP, + [wmi_pdev_param_bmiss_based_adaptive_bto_enable] = + WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE, + [wmi_pdev_param_bmiss_bto_min_bcn_timeout] = + WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT, + [wmi_pdev_param_bmiss_bto_inc_dec_step] = + WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP, + [wmi_pdev_param_bto_fix_bcn_timeout] = + WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT, + [wmi_pdev_param_ce_based_adaptive_bto_enable] = + WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE, + [wmi_pdev_param_ce_bto_combo_ce_value] = + WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE, + [wmi_pdev_param_tx_chain_mask_2g] = WMI_PDEV_PARAM_TX_CHAIN_MASK_2G, + [wmi_pdev_param_rx_chain_mask_2g] = WMI_PDEV_PARAM_RX_CHAIN_MASK_2G, + [wmi_pdev_param_tx_chain_mask_5g] = WMI_PDEV_PARAM_TX_CHAIN_MASK_5G, + [wmi_pdev_param_rx_chain_mask_5g] = WMI_PDEV_PARAM_RX_CHAIN_MASK_5G, + [wmi_pdev_param_tx_chain_mask_cck] = WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK, + [wmi_pdev_param_tx_chain_mask_1ss] = WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS, + [wmi_pdev_param_rx_filter] = WMI_PDEV_PARAM_RX_FILTER, + [wmi_pdev_set_mcast_to_ucast_tid] = WMI_PDEV_SET_MCAST_TO_UCAST_TID, + [wmi_pdev_param_mgmt_retry_limit] = WMI_PDEV_PARAM_MGMT_RETRY_LIMIT, + [wmi_pdev_param_aggr_burst] = WMI_PDEV_PARAM_AGGR_BURST, + [wmi_pdev_peer_sta_ps_statechg_enable] = + WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE, + [wmi_pdev_param_proxy_sta_mode] = WMI_PDEV_PARAM_PROXY_STA_MODE, + [wmi_pdev_param_mu_group_policy] = WMI_PDEV_PARAM_MU_GROUP_POLICY, + [wmi_pdev_param_noise_detection] = WMI_PDEV_PARAM_NOISE_DETECTION, + [wmi_pdev_param_noise_threshold] = WMI_PDEV_PARAM_NOISE_THRESHOLD, + [wmi_pdev_param_dpd_enable] = WMI_PDEV_PARAM_DPD_ENABLE, + [wmi_pdev_param_set_mcast_bcast_echo] = + WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + [wmi_pdev_param_atf_strict_sch] = WMI_PDEV_PARAM_ATF_STRICT_SCH, + [wmi_pdev_param_atf_sched_duration] = WMI_PDEV_PARAM_ATF_SCHED_DURATION, + [wmi_pdev_param_ant_plzn] = WMI_PDEV_PARAM_ANT_PLZN, + [wmi_pdev_param_sensitivity_level] = WMI_PDEV_PARAM_SENSITIVITY_LEVEL, + [wmi_pdev_param_signed_txpower_2g] = WMI_PDEV_PARAM_SIGNED_TXPOWER_2G, + [wmi_pdev_param_signed_txpower_5g] = WMI_PDEV_PARAM_SIGNED_TXPOWER_5G, + [wmi_pdev_param_enable_per_tid_amsdu] = + WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + [wmi_pdev_param_enable_per_tid_ampdu] = + WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + [wmi_pdev_param_cca_threshold] = WMI_PDEV_PARAM_CCA_THRESHOLD, + [wmi_pdev_param_rts_fixed_rate] = WMI_PDEV_PARAM_RTS_FIXED_RATE, + [wmi_pdev_param_cal_period] = WMI_UNAVAILABLE_PARAM, + [wmi_pdev_param_pdev_reset] = WMI_PDEV_PARAM_PDEV_RESET, + [wmi_pdev_param_wapi_mbssid_offset] = WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET, + [wmi_pdev_param_arp_srcaddr] = WMI_PDEV_PARAM_ARP_DBG_SRCADDR, + [wmi_pdev_param_arp_dstaddr] = WMI_PDEV_PARAM_ARP_DBG_DSTADDR, + [wmi_pdev_param_txpower_decr_db] = WMI_PDEV_PARAM_TXPOWER_DECR_DB, + [wmi_pdev_param_rx_batchmode] = WMI_UNAVAILABLE_PARAM, + [wmi_pdev_param_packet_aggr_delay] = WMI_UNAVAILABLE_PARAM, + [wmi_pdev_param_atf_obss_noise_sch] = + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH, + [wmi_pdev_param_atf_obss_noise_scaling_factor] = + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, + [wmi_pdev_param_cust_txpower_scale] = WMI_PDEV_PARAM_CUST_TXPOWER_SCALE, + [wmi_pdev_param_atf_dynamic_enable] = WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE, + [wmi_pdev_param_atf_ssid_group_policy] = WMI_UNAVAILABLE_PARAM, + [wmi_pdev_param_igmpmld_override] = WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + [wmi_pdev_param_igmpmld_tid] = WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + [wmi_pdev_param_antenna_gain] = WMI_PDEV_PARAM_ANTENNA_GAIN, + [wmi_pdev_param_block_interbss] = WMI_PDEV_PARAM_BLOCK_INTERBSS, + [wmi_pdev_param_set_disable_reset_cmdid] = + WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + [wmi_pdev_param_set_msdu_ttl_cmdid] = WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID, + [wmi_pdev_param_txbf_sound_period_cmdid] = + WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + [wmi_pdev_param_set_burst_mode_cmdid] = + WMI_PDEV_PARAM_SET_BURST_MODE_CMDID, + [wmi_pdev_param_en_stats] = WMI_PDEV_PARAM_EN_STATS, + [wmi_pdev_param_mesh_mcast_enable] = WMI_PDEV_PARAM_MESH_MCAST_ENABLE, + [wmi_pdev_param_set_promisc_mode_cmdid] = + WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + [wmi_pdev_param_set_ppdu_duration_cmdid] = + WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + [wmi_pdev_param_remove_mcast2ucast_buffer] = + WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + [wmi_pdev_param_set_mcast2ucast_buffer] = + WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + [wmi_pdev_param_set_mcast2ucast_mode] = + WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE, + [wmi_pdev_param_smart_antenna_default_antenna] = + WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + [wmi_pdev_param_fast_channel_reset] = + WMI_PDEV_PARAM_FAST_CHANNEL_RESET, + [wmi_pdev_param_rx_decap_mode] = WMI_PDEV_PARAM_RX_DECAP_MODE, + [wmi_pdev_param_tx_ack_timeout] = WMI_PDEV_PARAM_ACK_TIMEOUT, + [wmi_pdev_param_cck_tx_enable] = WMI_PDEV_PARAM_CCK_TX_ENABLE, + [wmi_pdev_param_antenna_gain_half_db] = + WMI_PDEV_PARAM_ANTENNA_GAIN_HALF_DB, + [wmi_pdev_param_esp_indication_period] = + WMI_PDEV_PARAM_ESP_INDICATION_PERIOD, + [wmi_pdev_param_esp_ba_window] = WMI_PDEV_PARAM_ESP_BA_WINDOW, + [wmi_pdev_param_esp_airtime_fraction] = + WMI_PDEV_PARAM_ESP_AIRTIME_FRACTION, + [wmi_pdev_param_esp_ppdu_duration] = WMI_PDEV_PARAM_ESP_PPDU_DURATION, + [wmi_pdev_param_ru26_allowed] = WMI_PDEV_PARAM_UL_RU26_ALLOWED, + [wmi_pdev_param_use_nol] = WMI_PDEV_PARAM_USE_NOL, + /* Trigger interval for all trigger types. */ + [wmi_pdev_param_ul_trig_int] = WMI_PDEV_PARAM_SET_UL_BSR_TRIG_INTERVAL, + [wmi_pdev_param_sub_channel_marking] = + WMI_PDEV_PARAM_SUB_CHANNEL_MARKING, + [wmi_pdev_param_ul_ppdu_duration] = WMI_PDEV_PARAM_SET_UL_PPDU_DURATION, + [wmi_pdev_param_equal_ru_allocation_enable] = + WMI_PDEV_PARAM_EQUAL_RU_ALLOCATION_ENABLE, + [wmi_pdev_param_per_peer_prd_cfr_enable] = + WMI_PDEV_PARAM_PER_PEER_PERIODIC_CFR_ENABLE, + [wmi_pdev_param_nav_override_config] = + WMI_PDEV_PARAM_NAV_OVERRIDE_CONFIG, + [wmi_pdev_param_set_mgmt_ttl] = WMI_PDEV_PARAM_SET_MGMT_TTL, + [wmi_pdev_param_set_prb_rsp_ttl] = + WMI_PDEV_PARAM_SET_PROBE_RESP_TTL, + [wmi_pdev_param_set_mu_ppdu_duration] = + WMI_PDEV_PARAM_SET_MU_PPDU_DURATION, + [wmi_pdev_param_set_tbtt_ctrl] = + WMI_PDEV_PARAM_SET_TBTT_CTRL, + [wmi_pdev_param_set_cmd_obss_pd_threshold] = + WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD, + [wmi_pdev_param_set_cmd_obss_pd_per_ac] = + WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC, + [wmi_pdev_param_set_cong_ctrl_max_msdus] = + WMI_PDEV_PARAM_SET_CONG_CTRL_MAX_MSDUS, + [wmi_pdev_param_enable_fw_dynamic_he_edca] = + WMI_PDEV_PARAM_ENABLE_FW_DYNAMIC_HE_EDCA, +}; + +/** + * Populate vdev_param_value_tlv array whose index is host param + * and value is target param + */ +static const uint32_t vdev_param_tlv[] = { + [wmi_vdev_param_rts_threshold] = WMI_VDEV_PARAM_RTS_THRESHOLD, + [wmi_vdev_param_fragmentation_threshold] = + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + [wmi_vdev_param_beacon_interval] = WMI_VDEV_PARAM_BEACON_INTERVAL, + [wmi_vdev_param_listen_interval] = WMI_VDEV_PARAM_LISTEN_INTERVAL, + [wmi_vdev_param_multicast_rate] = WMI_VDEV_PARAM_MULTICAST_RATE, + [wmi_vdev_param_mgmt_tx_rate] = WMI_VDEV_PARAM_MGMT_TX_RATE, + [wmi_vdev_param_slot_time] = WMI_VDEV_PARAM_SLOT_TIME, + [wmi_vdev_param_preamble] = WMI_VDEV_PARAM_PREAMBLE, + [wmi_vdev_param_swba_time] = WMI_VDEV_PARAM_SWBA_TIME, + [wmi_vdev_stats_update_period] = WMI_VDEV_STATS_UPDATE_PERIOD, + [wmi_vdev_pwrsave_ageout_time] = WMI_VDEV_PWRSAVE_AGEOUT_TIME, + [wmi_vdev_host_swba_interval] = WMI_VDEV_HOST_SWBA_INTERVAL, + [wmi_vdev_param_dtim_period] = WMI_VDEV_PARAM_DTIM_PERIOD, + [wmi_vdev_oc_scheduler_air_time_limit] = + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + [wmi_vdev_param_wds] = WMI_VDEV_PARAM_WDS, + [wmi_vdev_param_atim_window] = WMI_VDEV_PARAM_ATIM_WINDOW, + [wmi_vdev_param_bmiss_count_max] = WMI_VDEV_PARAM_BMISS_COUNT_MAX, + [wmi_vdev_param_bmiss_first_bcnt] = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, + [wmi_vdev_param_bmiss_final_bcnt] = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, + [wmi_vdev_param_feature_wmm] = WMI_VDEV_PARAM_FEATURE_WMM, + [wmi_vdev_param_chwidth] = WMI_VDEV_PARAM_CHWIDTH, + [wmi_vdev_param_chextoffset] = WMI_VDEV_PARAM_CHEXTOFFSET, + [wmi_vdev_param_disable_htprotection] = + WMI_VDEV_PARAM_DISABLE_HTPROTECTION, + [wmi_vdev_param_sta_quickkickout] = WMI_VDEV_PARAM_STA_QUICKKICKOUT, + [wmi_vdev_param_mgmt_rate] = WMI_VDEV_PARAM_MGMT_RATE, + [wmi_vdev_param_protection_mode] = WMI_VDEV_PARAM_PROTECTION_MODE, + [wmi_vdev_param_fixed_rate] = WMI_VDEV_PARAM_FIXED_RATE, + [wmi_vdev_param_sgi] = WMI_VDEV_PARAM_SGI, + [wmi_vdev_param_ldpc] = WMI_VDEV_PARAM_LDPC, + [wmi_vdev_param_tx_stbc] = WMI_VDEV_PARAM_TX_STBC, + [wmi_vdev_param_rx_stbc] = WMI_VDEV_PARAM_RX_STBC, + [wmi_vdev_param_intra_bss_fwd] = WMI_VDEV_PARAM_INTRA_BSS_FWD, + [wmi_vdev_param_def_keyid] = WMI_VDEV_PARAM_DEF_KEYID, + [wmi_vdev_param_nss] = WMI_VDEV_PARAM_NSS, + [wmi_vdev_param_bcast_data_rate] = WMI_VDEV_PARAM_BCAST_DATA_RATE, + [wmi_vdev_param_mcast_data_rate] = WMI_VDEV_PARAM_MCAST_DATA_RATE, + [wmi_vdev_param_mcast_indicate] = WMI_VDEV_PARAM_MCAST_INDICATE, + [wmi_vdev_param_dhcp_indicate] = WMI_VDEV_PARAM_DHCP_INDICATE, + [wmi_vdev_param_unknown_dest_indicate] = + WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + [wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + [wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + [wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + [wmi_vdev_param_ap_enable_nawds] = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, + [wmi_vdev_param_enable_rtscts] = WMI_VDEV_PARAM_ENABLE_RTSCTS, + [wmi_vdev_param_txbf] = WMI_VDEV_PARAM_TXBF, + [wmi_vdev_param_packet_powersave] = WMI_VDEV_PARAM_PACKET_POWERSAVE, + [wmi_vdev_param_drop_unencry] = WMI_VDEV_PARAM_DROP_UNENCRY, + [wmi_vdev_param_tx_encap_type] = WMI_VDEV_PARAM_TX_ENCAP_TYPE, + [wmi_vdev_param_ap_detect_out_of_sync_sleeping_sta_time_secs] = + WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + [wmi_vdev_param_early_rx_adjust_enable] = + WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + [wmi_vdev_param_early_rx_tgt_bmiss_num] = + WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + [wmi_vdev_param_early_rx_bmiss_sample_cycle] = + WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + [wmi_vdev_param_early_rx_slop_step] = WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP, + [wmi_vdev_param_early_rx_init_slop] = WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP, + [wmi_vdev_param_early_rx_adjust_pause] = + WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + [wmi_vdev_param_tx_pwrlimit] = WMI_VDEV_PARAM_TX_PWRLIMIT, + [wmi_vdev_param_snr_num_for_cal] = WMI_VDEV_PARAM_SNR_NUM_FOR_CAL, + [wmi_vdev_param_roam_fw_offload] = WMI_VDEV_PARAM_ROAM_FW_OFFLOAD, + [wmi_vdev_param_enable_rmc] = WMI_VDEV_PARAM_ENABLE_RMC, + [wmi_vdev_param_ibss_max_bcn_lost_ms] = + WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS, + [wmi_vdev_param_max_rate] = WMI_VDEV_PARAM_MAX_RATE, + [wmi_vdev_param_early_rx_drift_sample] = + WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE, + [wmi_vdev_param_set_ibss_tx_fail_cnt_thr] = + WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR, + [wmi_vdev_param_ebt_resync_timeout] = + WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT, + [wmi_vdev_param_aggr_trig_event_enable] = + WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE, + [wmi_vdev_param_is_ibss_power_save_allowed] = + WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED, + [wmi_vdev_param_is_power_collapse_allowed] = + WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED, + [wmi_vdev_param_is_awake_on_txrx_enabled] = + WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED, + [wmi_vdev_param_inactivity_cnt] = WMI_VDEV_PARAM_INACTIVITY_CNT, + [wmi_vdev_param_txsp_end_inactivity_time_ms] = + WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS, + [wmi_vdev_param_dtim_policy] = WMI_VDEV_PARAM_DTIM_POLICY, + [wmi_vdev_param_ibss_ps_warmup_time_secs] = + WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS, + [wmi_vdev_param_ibss_ps_1rx_chain_in_atim_window_enable] = + WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE, + [wmi_vdev_param_rx_leak_window] = WMI_VDEV_PARAM_RX_LEAK_WINDOW, + [wmi_vdev_param_stats_avg_factor] = + WMI_VDEV_PARAM_STATS_AVG_FACTOR, + [wmi_vdev_param_disconnect_th] = WMI_VDEV_PARAM_DISCONNECT_TH, + [wmi_vdev_param_rtscts_rate] = WMI_VDEV_PARAM_RTSCTS_RATE, + [wmi_vdev_param_mcc_rtscts_protection_enable] = + WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE, + [wmi_vdev_param_mcc_broadcast_probe_enable] = + WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE, + [wmi_vdev_param_mgmt_tx_power] = WMI_VDEV_PARAM_MGMT_TX_POWER, + [wmi_vdev_param_beacon_rate] = WMI_VDEV_PARAM_BEACON_RATE, + [wmi_vdev_param_rx_decap_type] = WMI_VDEV_PARAM_RX_DECAP_TYPE, + [wmi_vdev_param_he_dcm_enable] = WMI_VDEV_PARAM_HE_DCM, + [wmi_vdev_param_he_range_ext_enable] = WMI_VDEV_PARAM_HE_RANGE_EXT, + [wmi_vdev_param_he_bss_color] = WMI_VDEV_PARAM_BSS_COLOR, + [wmi_vdev_param_set_hemu_mode] = WMI_VDEV_PARAM_SET_HEMU_MODE, + [wmi_vdev_param_set_he_sounding_mode] = + WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE, + [wmi_vdev_param_set_heop] = WMI_VDEV_PARAM_HEOPS_0_31, + [wmi_vdev_param_sensor_ap] = WMI_VDEV_PARAM_SENSOR_AP, + [wmi_vdev_param_dtim_enable_cts] = WMI_VDEV_PARAM_DTIM_ENABLE_CTS, + [wmi_vdev_param_atf_ssid_sched_policy] = + WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY, + [wmi_vdev_param_disable_dyn_bw_rts] = WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS, + [wmi_vdev_param_mcast2ucast_set] = WMI_VDEV_PARAM_MCAST2UCAST_SET, + [wmi_vdev_param_rc_num_retries] = WMI_VDEV_PARAM_RC_NUM_RETRIES, + [wmi_vdev_param_cabq_maxdur] = WMI_VDEV_PARAM_CABQ_MAXDUR, + [wmi_vdev_param_mfptest_set] = WMI_VDEV_PARAM_MFPTEST_SET, + [wmi_vdev_param_rts_fixed_rate] = WMI_VDEV_PARAM_RTS_FIXED_RATE, + [wmi_vdev_param_vht_sgimask] = WMI_VDEV_PARAM_VHT_SGIMASK, + [wmi_vdev_param_vht80_ratemask] = WMI_VDEV_PARAM_VHT80_RATEMASK, + [wmi_vdev_param_proxy_sta] = WMI_VDEV_PARAM_PROXY_STA, + [wmi_vdev_param_bw_nss_ratemask] = WMI_VDEV_PARAM_BW_NSS_RATEMASK, + [wmi_vdev_param_set_he_ltf] = WMI_VDEV_PARAM_HE_LTF, + [wmi_vdev_param_disable_cabq] = WMI_VDEV_PARAM_DISABLE_CABQ, + [wmi_vdev_param_rate_dropdown_bmap] = WMI_VDEV_PARAM_RATE_DROPDOWN_BMAP, + [wmi_vdev_param_set_ba_mode] = WMI_VDEV_PARAM_BA_MODE, + [wmi_vdev_param_capabilities] = WMI_VDEV_PARAM_CAPABILITIES, + [wmi_vdev_param_autorate_misc_cfg] = WMI_VDEV_PARAM_AUTORATE_MISC_CFG, + [wmi_vdev_param_ul_shortgi] = WMI_VDEV_PARAM_UL_GI, + [wmi_vdev_param_ul_he_ltf] = WMI_VDEV_PARAM_UL_HE_LTF, + [wmi_vdev_param_ul_nss] = WMI_VDEV_PARAM_UL_NSS, + [wmi_vdev_param_ul_ppdu_bw] = WMI_VDEV_PARAM_UL_PPDU_BW, + [wmi_vdev_param_ul_ldpc] = WMI_VDEV_PARAM_UL_LDPC, + [wmi_vdev_param_ul_stbc] = WMI_VDEV_PARAM_UL_STBC, + [wmi_vdev_param_ul_fixed_rate] = WMI_VDEV_PARAM_UL_FIXED_RATE, + [wmi_vdev_param_rawmode_open_war] = WMI_VDEV_PARAM_RAW_IS_ENCRYPTED, + [wmi_vdev_param_max_mtu_size] = WMI_VDEV_PARAM_MAX_MTU_SIZE, + [wmi_vdev_param_mcast_rc_stale_period] = + WMI_VDEV_PARAM_MCAST_RC_STALE_PERIOD, + [wmi_vdev_param_enable_multi_group_key] = + WMI_VDEV_PARAM_ENABLE_MULTI_GROUP_KEY, + [wmi_vdev_param_max_group_keys] = WMI_VDEV_PARAM_NUM_GROUP_KEYS, + [wmi_vdev_param_enable_mcast_rc] = WMI_VDEV_PARAM_ENABLE_MCAST_RC, + [wmi_vdev_param_6ghz_params] = WMI_VDEV_PARAM_6GHZ_PARAMS, + [wmi_vdev_param_enable_disable_roam_reason_vsie] = + WMI_VDEV_PARAM_ENABLE_DISABLE_ROAM_REASON_VSIE, + [wmi_vdev_param_nan_config_features] = + WMI_VDEV_PARAM_ENABLE_DISABLE_NAN_CONFIG_FEATURES, +}; +#endif + +/** + * convert_host_pdev_id_to_target_pdev_id() - Convert pdev_id from + * host to target defines. + * @wmi_handle: pointer to wmi_handle + * @param pdev_id: host pdev_id to be converted. + * Return: target pdev_id after conversion. + */ +static uint32_t convert_host_pdev_id_to_target_pdev_id(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + if (pdev_id <= WMI_HOST_PDEV_ID_2 && pdev_id >= WMI_HOST_PDEV_ID_0) { + if (!wmi_handle->soc->is_pdev_is_map_enable) { + switch (pdev_id) { + case WMI_HOST_PDEV_ID_0: + return WMI_PDEV_ID_1ST; + case WMI_HOST_PDEV_ID_1: + return WMI_PDEV_ID_2ND; + case WMI_HOST_PDEV_ID_2: + return WMI_PDEV_ID_3RD; + } + } else { + return wmi_handle->cmd_pdev_id_map[pdev_id]; + } + } else { + return WMI_PDEV_ID_SOC; + } + + QDF_ASSERT(0); + + return WMI_PDEV_ID_SOC; +} + +/** + * convert_target_pdev_id_to_host_pdev_id() - Convert pdev_id from + * target to host defines. + * @wmi_handle: pointer to wmi_handle + * @param pdev_id: target pdev_id to be converted. + * Return: host pdev_id after conversion. + */ +static uint32_t convert_target_pdev_id_to_host_pdev_id(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + + if (pdev_id <= WMI_PDEV_ID_3RD && pdev_id >= WMI_PDEV_ID_1ST) { + if (!wmi_handle->soc->is_pdev_is_map_enable) { + switch (pdev_id) { + case WMI_PDEV_ID_1ST: + return WMI_HOST_PDEV_ID_0; + case WMI_PDEV_ID_2ND: + return WMI_HOST_PDEV_ID_1; + case WMI_PDEV_ID_3RD: + return WMI_HOST_PDEV_ID_2; + } + } else { + return wmi_handle->evt_pdev_id_map[pdev_id - 1]; + } + } else if (pdev_id == WMI_PDEV_ID_SOC) { + return WMI_HOST_PDEV_ID_SOC; + } else { + WMI_LOGE("Invalid pdev_id"); + } + + return WMI_HOST_PDEV_ID_INVALID; +} + +/** + * convert_host_phy_id_to_target_phy_id() - Convert phy_id from + * host to target defines. + * @wmi_handle: pointer to wmi_handle + * @param phy_id: host pdev_id to be converted. + * Return: target phy_id after conversion. + */ +static uint32_t convert_host_phy_id_to_target_phy_id(wmi_unified_t wmi_handle, + uint32_t phy_id) +{ + if (!wmi_handle->soc->is_phy_id_map_enable || + phy_id >= WMI_MAX_RADIOS) { + return phy_id; + } + + return wmi_handle->cmd_phy_id_map[phy_id]; +} + +/** + * convert_target_phy_id_to_host_phy_id() - Convert phy_id from + * target to host defines. + * @wmi_handle: pointer to wmi_handle + * @param phy_id: target phy_id to be converted. + * Return: host phy_id after conversion. + */ +static uint32_t convert_target_phy_id_to_host_phy_id(wmi_unified_t wmi_handle, + uint32_t phy_id) +{ + if (!wmi_handle->soc->is_phy_id_map_enable || + phy_id >= WMI_MAX_RADIOS) { + return phy_id; + } + + return wmi_handle->evt_phy_id_map[phy_id]; +} + +/** + * wmi_tlv_pdev_id_conversion_enable() - Enable pdev_id conversion + * + * Return None. + */ +static void wmi_tlv_pdev_id_conversion_enable(wmi_unified_t wmi_handle, + uint32_t *pdev_id_map, + uint8_t size) +{ + int i = 0; + + if (pdev_id_map && (size <= WMI_MAX_RADIOS)) { + for (i = 0; i < size; i++) { + wmi_handle->cmd_pdev_id_map[i] = pdev_id_map[i]; + wmi_handle->evt_pdev_id_map[i] = + WMI_HOST_PDEV_ID_INVALID; + wmi_handle->cmd_phy_id_map[i] = pdev_id_map[i] - 1; + wmi_handle->evt_phy_id_map[i] = + WMI_HOST_PDEV_ID_INVALID; + } + + for (i = 0; i < size; i++) { + if (wmi_handle->cmd_pdev_id_map[i] != + WMI_HOST_PDEV_ID_INVALID) { + wmi_handle->evt_pdev_id_map + [wmi_handle->cmd_pdev_id_map[i] - 1] = i; + } + if (wmi_handle->cmd_phy_id_map[i] != + WMI_HOST_PDEV_ID_INVALID) { + wmi_handle->evt_phy_id_map + [wmi_handle->cmd_phy_id_map[i]] = i; + } + } + wmi_handle->soc->is_pdev_is_map_enable = true; + wmi_handle->soc->is_phy_id_map_enable = true; + } else { + wmi_handle->soc->is_pdev_is_map_enable = false; + wmi_handle->soc->is_phy_id_map_enable = false; + } + + wmi_handle->ops->convert_pdev_id_host_to_target = + convert_host_pdev_id_to_target_pdev_id; + wmi_handle->ops->convert_pdev_id_target_to_host = + convert_target_pdev_id_to_host_pdev_id; + + /* phy_id convert function assignments */ + wmi_handle->ops->convert_phy_id_host_to_target = + convert_host_phy_id_to_target_phy_id; + wmi_handle->ops->convert_phy_id_target_to_host = + convert_target_phy_id_to_host_phy_id; +} + +/* copy_vdev_create_pdev_id() - copy pdev from host params to target command + * buffer. + * @wmi_handle: pointer to wmi_handle + * @cmd: pointer target vdev create command buffer + * @param: pointer host params for vdev create + * + * Return: None + */ +static inline void copy_vdev_create_pdev_id( + struct wmi_unified *wmi_handle, + wmi_vdev_create_cmd_fixed_param * cmd, + struct vdev_create_params *param) +{ + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); +} + +void wmi_mtrace(uint32_t message_id, uint16_t vdev_id, uint32_t data) +{ + uint16_t mtrace_message_id; + + mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | + (QDF_WMI_MTRACE_GRP_ID(message_id) << + QDF_WMI_MTRACE_CMD_NUM_BITS); + qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_TARGET, + mtrace_message_id, vdev_id, data); +} +qdf_export_symbol(wmi_mtrace); + +#ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI +static QDF_STATUS wmi_unified_cmd_send_pm_chk(struct wmi_unified *wmi_handle, + wmi_buf_t buf, + uint32_t buflen, uint32_t cmd_id) +{ + if (!wmi_is_qmi_stats_enabled(wmi_handle)) + goto send_over_wmi; + + if (wmi_is_target_suspended(wmi_handle)) { + if (QDF_IS_STATUS_SUCCESS( + wmi_unified_cmd_send_over_qmi(wmi_handle, buf, + buflen, cmd_id))) + return QDF_STATUS_SUCCESS; + } + +send_over_wmi: + qdf_atomic_set(&wmi_handle->num_stats_over_qmi, 0); + + return wmi_unified_cmd_send(wmi_handle, buf, buflen, cmd_id); +} +#else +static inline +QDF_STATUS wmi_unified_cmd_send_pm_chk(struct wmi_unified *wmi_handle, + wmi_buf_t buf, + uint32_t buflen, uint32_t cmd_id) +{ + return wmi_unified_cmd_send(wmi_handle, buf, buflen, cmd_id); +} +#endif + +/** + * send_vdev_create_cmd_tlv() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_create_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct vdev_create_params *param) +{ + wmi_vdev_create_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + QDF_STATUS ret; + int num_bands = 2; + uint8_t *buf_ptr; + wmi_vdev_txrx_streams *txrx_streams; + + len += (num_bands * sizeof(*txrx_streams) + WMI_TLV_HDR_SIZE); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_create_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_create_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_create_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->vdev_type = param->type; + cmd->vdev_subtype = param->subtype; + cmd->flags = param->mbssid_flags; + cmd->vdevid_trans = param->vdevid_trans; + cmd->num_cfg_txrx_streams = num_bands; + copy_vdev_create_pdev_id(wmi_handle, cmd, param); + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->vdev_macaddr); + WMI_LOGD("%s: ID = %d[pdev:%d] VAP Addr = "QDF_MAC_ADDR_FMT, + __func__, param->vdev_id, cmd->pdev_id, + QDF_MAC_ADDR_REF(macaddr)); + buf_ptr = (uint8_t *)cmd + sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (num_bands * sizeof(wmi_vdev_txrx_streams))); + buf_ptr += WMI_TLV_HDR_SIZE; + + WMI_LOGD("%s: type %d, subtype %d, nss_2g %d, nss_5g %d", __func__, + param->type, param->subtype, + param->nss_2g, param->nss_5g); + txrx_streams = (wmi_vdev_txrx_streams *)buf_ptr; + txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; + txrx_streams->supported_tx_streams = param->nss_2g; + txrx_streams->supported_rx_streams = param->nss_2g; + WMITLV_SET_HDR(&txrx_streams->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_txrx_streams, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_txrx_streams)); + + txrx_streams++; + txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; + txrx_streams->supported_tx_streams = param->nss_5g; + txrx_streams->supported_rx_streams = param->nss_5g; + WMITLV_SET_HDR(&txrx_streams->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_txrx_streams, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_txrx_streams)); + wmi_mtrace(WMI_VDEV_CREATE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_VDEV_CREATE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_VDEV_CREATE_CMDID"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_vdev_delete_cmd_tlv() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_delete_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t if_id) +{ + wmi_vdev_delete_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_delete_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_delete_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_delete_cmd_fixed_param)); + cmd->vdev_id = if_id; + wmi_mtrace(WMI_VDEV_DELETE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_vdev_delete_cmd_fixed_param), + WMI_VDEV_DELETE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_VDEV_DELETE_CMDID"); + wmi_buf_free(buf); + } + WMI_LOGD("%s:vdev id = %d", __func__, if_id); + + return ret; +} + +/** + * send_vdev_nss_chain_params_cmd_tlv() - send VDEV nss chain params to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @nss_chains_user_cfg: user configured nss chain params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_vdev_nss_chain_params_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct vdev_nss_chains *user_cfg) +{ + wmi_vdev_chainmask_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_chainmask_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_chainmask_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_chainmask_config_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->disable_rx_mrc_2g = user_cfg->disable_rx_mrc[NSS_CHAINS_BAND_2GHZ]; + cmd->disable_tx_mrc_2g = user_cfg->disable_tx_mrc[NSS_CHAINS_BAND_2GHZ]; + cmd->disable_rx_mrc_5g = user_cfg->disable_rx_mrc[NSS_CHAINS_BAND_5GHZ]; + cmd->disable_tx_mrc_5g = user_cfg->disable_tx_mrc[NSS_CHAINS_BAND_5GHZ]; + cmd->num_rx_chains_2g = user_cfg->num_rx_chains[NSS_CHAINS_BAND_2GHZ]; + cmd->num_tx_chains_2g = user_cfg->num_tx_chains[NSS_CHAINS_BAND_2GHZ]; + cmd->num_rx_chains_5g = user_cfg->num_rx_chains[NSS_CHAINS_BAND_5GHZ]; + cmd->num_tx_chains_5g = user_cfg->num_tx_chains[NSS_CHAINS_BAND_5GHZ]; + cmd->rx_nss_2g = user_cfg->rx_nss[NSS_CHAINS_BAND_2GHZ]; + cmd->tx_nss_2g = user_cfg->tx_nss[NSS_CHAINS_BAND_2GHZ]; + cmd->rx_nss_5g = user_cfg->rx_nss[NSS_CHAINS_BAND_5GHZ]; + cmd->tx_nss_5g = user_cfg->tx_nss[NSS_CHAINS_BAND_5GHZ]; + cmd->num_tx_chains_a = user_cfg->num_tx_chains_11a; + cmd->num_tx_chains_b = user_cfg->num_tx_chains_11b; + cmd->num_tx_chains_g = user_cfg->num_tx_chains_11g; + + wmi_mtrace(WMI_VDEV_CHAINMASK_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_vdev_chainmask_config_cmd_fixed_param), + WMI_VDEV_CHAINMASK_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_VDEV_CHAINMASK_CONFIG_CMDID"); + wmi_buf_free(buf); + } + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + + return ret; +} + +/** + * send_vdev_stop_cmd_tlv() - send vdev stop command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or erro code + */ +static QDF_STATUS send_vdev_stop_cmd_tlv(wmi_unified_t wmi, + uint8_t vdev_id) +{ + wmi_vdev_stop_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_stop_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_stop_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_VDEV_STOP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_STOP_CMDID)) { + WMI_LOGP("%s: Failed to send vdev stop command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s:vdev id = %d", __func__, vdev_id); + + return 0; +} + +/** + * send_vdev_down_cmd_tlv() - send vdev down command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_down_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id) +{ + wmi_vdev_down_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_down_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_down_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_down_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_VDEV_DOWN_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_DOWN_CMDID)) { + WMI_LOGP("%s: Failed to send vdev down", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + + return 0; +} + +static inline void copy_channel_info( + wmi_vdev_start_request_cmd_fixed_param * cmd, + wmi_channel *chan, + struct vdev_start_params *req) +{ + chan->mhz = req->channel.mhz; + + WMI_SET_CHANNEL_MODE(chan, req->channel.phy_mode); + + chan->band_center_freq1 = req->channel.cfreq1; + chan->band_center_freq2 = req->channel.cfreq2; + + if (req->channel.half_rate) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_HALF_RATE); + else if (req->channel.quarter_rate) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_QUARTER_RATE); + + if (req->channel.dfs_set) { + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_DFS); + cmd->disable_hw_ack = req->disable_hw_ack; + } + + if (req->channel.dfs_set_cfreq2) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_DFS_CFREQ2); + + /* According to firmware both reg power and max tx power + * on set channel power is used and set it to max reg + * power from regulatory. + */ + WMI_SET_CHANNEL_MIN_POWER(chan, req->channel.minpower); + WMI_SET_CHANNEL_MAX_POWER(chan, req->channel.maxpower); + WMI_SET_CHANNEL_REG_POWER(chan, req->channel.maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(chan, req->channel.antennamax); + WMI_SET_CHANNEL_REG_CLASSID(chan, req->channel.reg_class_id); + WMI_SET_CHANNEL_MAX_TX_POWER(chan, req->channel.maxregpower); + +} + +/** + * send_vdev_start_cmd_tlv() - send vdev start request to fw + * @wmi_handle: wmi handle + * @req: vdev start params + * + * Return: QDF status + */ +static QDF_STATUS send_vdev_start_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_start_params *req) +{ + wmi_vdev_start_request_cmd_fixed_param *cmd; + wmi_buf_t buf; + wmi_channel *chan; + int32_t len, ret; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + sizeof(wmi_channel) + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_start_request_cmd_fixed_param *) buf_ptr; + chan = (wmi_channel *) (buf_ptr + sizeof(*cmd)); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_start_request_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_start_request_cmd_fixed_param)); + WMITLV_SET_HDR(&chan->tlv_header, WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + cmd->vdev_id = req->vdev_id; + + /* Fill channel info */ + copy_channel_info(cmd, chan, req); + cmd->beacon_interval = req->beacon_interval; + cmd->dtim_period = req->dtim_period; + + cmd->bcn_tx_rate = req->bcn_tx_rate_code; + if (req->bcn_tx_rate_code) + wmi_enable_bcn_ratecode(cmd->flags); + + if (!req->is_restart) { + if (req->pmf_enabled) + cmd->flags |= WMI_UNIFIED_VDEV_START_PMF_ENABLED; + } + + /* Copy the SSID */ + if (req->ssid.length) { + if (req->ssid.length < sizeof(cmd->ssid.ssid)) + cmd->ssid.ssid_len = req->ssid.length; + else + cmd->ssid.ssid_len = sizeof(cmd->ssid.ssid); + qdf_mem_copy(cmd->ssid.ssid, req->ssid.mac_ssid, + cmd->ssid.ssid_len); + } + + if (req->hidden_ssid) + cmd->flags |= WMI_UNIFIED_VDEV_START_HIDDEN_SSID; + + cmd->flags |= WMI_UNIFIED_VDEV_START_LDPC_RX_ENABLED; + cmd->num_noa_descriptors = req->num_noa_descriptors; + cmd->preferred_rx_streams = req->preferred_rx_streams; + cmd->preferred_tx_streams = req->preferred_tx_streams; + cmd->cac_duration_ms = req->cac_duration_ms; + cmd->regdomain = req->regdomain; + cmd->he_ops = req->he_ops; + + buf_ptr = (uint8_t *) (((uintptr_t) cmd) + sizeof(*cmd) + + sizeof(wmi_channel)); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->num_noa_descriptors * + sizeof(wmi_p2p_noa_descriptor)); + wmi_info("%s: vdev_id %d freq %d chanmode %d ch_info: 0x%x is_dfs %d " + "beacon interval %d dtim %d center_chan %d center_freq2 %d " + "reg_info_1: 0x%x reg_info_2: 0x%x, req->max_txpow: 0x%x " + "Tx SS %d, Rx SS %d, ldpc_rx: %d, cac %d, regd %d, HE ops: %d" + "req->dis_hw_ack: %d ", __func__, req->vdev_id, + chan->mhz, req->channel.phy_mode, chan->info, + req->channel.dfs_set, req->beacon_interval, cmd->dtim_period, + chan->band_center_freq1, chan->band_center_freq2, + chan->reg_info_1, chan->reg_info_2, req->channel.maxregpower, + req->preferred_tx_streams, req->preferred_rx_streams, + req->ldpc_rx_enabled, req->cac_duration_ms, + req->regdomain, req->he_ops, + req->disable_hw_ack); + + if (req->is_restart) { + wmi_mtrace(WMI_VDEV_RESTART_REQUEST_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_RESTART_REQUEST_CMDID); + } else { + wmi_mtrace(WMI_VDEV_START_REQUEST_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_START_REQUEST_CMDID); + } + if (ret) { + WMI_LOGP("%s: Failed to send vdev start command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_peer_flush_tids_cmd_tlv() - flush peer tids packets in fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @param: pointer to hold peer flush tid parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_flush_tids_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_flush_params *param) +{ + wmi_peer_flush_tids_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_peer_flush_tids_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_flush_tids_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_flush_tids_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->peer_tid_bitmap = param->peer_tid_bitmap; + cmd->vdev_id = param->vdev_id; + WMI_LOGD("%s: peer_addr "QDF_MAC_ADDR_FMT" vdev_id %d and peer bitmap %d", + __func__, QDF_MAC_ADDR_REF(peer_addr), param->vdev_id, + param->peer_tid_bitmap); + wmi_mtrace(WMI_PEER_FLUSH_TIDS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_FLUSH_TIDS_CMDID)) { + WMI_LOGP("%s: Failed to send flush tid command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_peer_delete_cmd_tlv() - send PEER delete command to fw + * @wmi: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_delete_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + uint8_t vdev_id) +{ + wmi_peer_delete_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_peer_delete_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_delete_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_delete_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->vdev_id = vdev_id; + + WMI_LOGD("%s: peer_addr "QDF_MAC_ADDR_FMT" vdev_id %d", + __func__, QDF_MAC_ADDR_REF(peer_addr), vdev_id); + wmi_mtrace(WMI_PEER_DELETE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_DELETE_CMDID)) { + WMI_LOGP("%s: Failed to send peer delete command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_peer_delete_all_cmd_tlv() - send PEER delete all command to fw + * @wmi: wmi handle + * @param: pointer to hold peer delete all parameter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_delete_all_cmd_tlv( + wmi_unified_t wmi, + struct peer_delete_all_params *param) +{ + wmi_vdev_delete_all_peer_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_delete_all_peer_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_delete_all_peer_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_delete_all_peer_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + + WMI_LOGD("%s: vdev_id %d", __func__, cmd->vdev_id); + wmi_mtrace(WMI_VDEV_DELETE_ALL_PEER_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, + WMI_VDEV_DELETE_ALL_PEER_CMDID)) { + WMI_LOGP("%s: Failed to send peer del all command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * convert_host_peer_param_id_to_target_id_tlv - convert host peer param_id + * to target id. + * @peer_param_id: host param id. + * + * Return: Target param id. + */ +#ifdef ENABLE_HOST_TO_TARGET_CONVERSION +static inline uint32_t convert_host_peer_param_id_to_target_id_tlv( + uint32_t peer_param_id) +{ + if (peer_param_id < QDF_ARRAY_SIZE(peer_param_tlv)) + return peer_param_tlv[peer_param_id]; + return WMI_UNAVAILABLE_PARAM; +} +#else +static inline uint32_t convert_host_peer_param_id_to_target_id_tlv( + uint32_t peer_param_id) +{ + return peer_param_id; +} +#endif + +/** + * send_peer_param_cmd_tlv() - set peer parameter in fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @param : pointer to hold peer set parameter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_param_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[QDF_MAC_ADDR_SIZE], + struct peer_set_params *param) +{ + wmi_peer_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t err; + uint32_t param_id; + + param_id = convert_host_peer_param_id_to_target_id_tlv(param->param_id); + if (param_id == WMI_UNAVAILABLE_PARAM) { + WMI_LOGW("%s: Unavailable param %d", __func__, param->param_id); + return QDF_STATUS_E_NOSUPPORT; + } + + buf = wmi_buf_alloc(wmi, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_peer_set_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_set_param_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->param_id = param_id; + cmd->param_value = param->param_value; + wmi_mtrace(WMI_PEER_SET_PARAM_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi, buf, + sizeof(wmi_peer_set_param_cmd_fixed_param), + WMI_PEER_SET_PARAM_CMDID); + if (err) { + WMI_LOGE("Failed to send set_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_vdev_up_cmd_tlv() - send vdev up command in fw + * @wmi: wmi handle + * @bssid: bssid + * @vdev_up_params: pointer to hold vdev up parameter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_up_cmd_tlv(wmi_unified_t wmi, + uint8_t bssid[QDF_MAC_ADDR_SIZE], + struct vdev_up_params *params) +{ + wmi_vdev_up_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("%s: VDEV_UP", __func__); + WMI_LOGD("%s: vdev_id %d aid %d bssid "QDF_MAC_ADDR_FMT, + __func__, + params->vdev_id, params->assoc_id, QDF_MAC_ADDR_REF(bssid)); + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_up_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_up_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_up_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->vdev_assoc_id = params->assoc_id; + cmd->profile_idx = params->profile_idx; + cmd->profile_num = params->profile_num; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->trans_bssid, &cmd->trans_bssid); + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid, &cmd->vdev_bssid); + wmi_mtrace(WMI_VDEV_UP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_UP_CMDID)) { + WMI_LOGP("%s: Failed to send vdev up command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_peer_create_cmd_tlv() - send peer create command to fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @peer_type: peer type + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_create_cmd_tlv(wmi_unified_t wmi, + struct peer_create_params *param) +{ + wmi_peer_create_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_peer_create_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_create_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_create_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + cmd->peer_type = param->peer_type; + cmd->vdev_id = param->vdev_id; + + wmi_mtrace(WMI_PEER_CREATE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_CREATE_CMDID)) { + WMI_LOGP("%s: failed to send WMI_PEER_CREATE_CMDID", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: peer_addr "QDF_MAC_ADDR_FMT" vdev_id %d", + __func__, QDF_MAC_ADDR_REF(param->peer_addr), + param->vdev_id); + + return 0; +} + +/** + * send_peer_rx_reorder_queue_setup_cmd_tlv() - send rx reorder setup + * command to fw + * @wmi: wmi handle + * @rx_reorder_queue_setup_params: Rx reorder queue setup parameters + * + * Return: 0 for success or error code + */ +static +QDF_STATUS send_peer_rx_reorder_queue_setup_cmd_tlv(wmi_unified_t wmi, + struct rx_reorder_queue_setup_params *param) +{ + wmi_peer_reorder_queue_setup_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_peer_reorder_queue_setup_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_reorder_queue_setup_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_reorder_queue_setup_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_macaddr, &cmd->peer_macaddr); + cmd->vdev_id = param->vdev_id; + cmd->tid = param->tid; + cmd->queue_ptr_lo = param->hw_qdesc_paddr_lo; + cmd->queue_ptr_hi = param->hw_qdesc_paddr_hi; + cmd->queue_no = param->queue_no; + cmd->ba_window_size_valid = param->ba_window_size_valid; + cmd->ba_window_size = param->ba_window_size; + + + wmi_mtrace(WMI_PEER_REORDER_QUEUE_SETUP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, + WMI_PEER_REORDER_QUEUE_SETUP_CMDID)) { + WMI_LOGP("%s: fail to send WMI_PEER_REORDER_QUEUE_SETUP_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: peer_macaddr "QDF_MAC_ADDR_FMT" vdev_id %d, tid %d", + __func__, + QDF_MAC_ADDR_REF(param->peer_macaddr), + param->vdev_id, param->tid); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_peer_rx_reorder_queue_remove_cmd_tlv() - send rx reorder remove + * command to fw + * @wmi: wmi handle + * @rx_reorder_queue_remove_params: Rx reorder queue remove parameters + * + * Return: 0 for success or error code + */ +static +QDF_STATUS send_peer_rx_reorder_queue_remove_cmd_tlv(wmi_unified_t wmi, + struct rx_reorder_queue_remove_params *param) +{ + wmi_peer_reorder_queue_remove_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_peer_reorder_queue_remove_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_reorder_queue_remove_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_reorder_queue_remove_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_macaddr, &cmd->peer_macaddr); + cmd->vdev_id = param->vdev_id; + cmd->tid_mask = param->peer_tid_bitmap; + + wmi_mtrace(WMI_PEER_REORDER_QUEUE_REMOVE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, + WMI_PEER_REORDER_QUEUE_REMOVE_CMDID)) { + WMI_LOGP("%s: fail to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: peer_macaddr "QDF_MAC_ADDR_FMT" vdev_id %d, tid_map %d", + __func__, + QDF_MAC_ADDR_REF(param->peer_macaddr), + param->vdev_id, param->peer_tid_bitmap); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * send_green_ap_ps_cmd_tlv() - enable green ap powersave command + * @wmi_handle: wmi handle + * @value: value + * @pdev_id: pdev id to have radio context + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_green_ap_ps_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id) +{ + wmi_pdev_green_ap_ps_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("Set Green AP PS val %d", value); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_green_ap_ps_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_green_ap_ps_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_green_ap_ps_enable_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + pdev_id); + cmd->enable = value; + + wmi_mtrace(WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID)) { + WMI_LOGE("Set Green AP PS param Failed val %d", value); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} +#endif + +/** + * send_pdev_utf_cmd_tlv() - send utf command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_utf_params + * @mac_id: mac id to have radio context + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_pdev_utf_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id) +{ + wmi_buf_t buf; + uint8_t *cmd; + /* if param->len is 0 no data is sent, return error */ + QDF_STATUS ret = QDF_STATUS_E_INVAL; + static uint8_t msgref = 1; + uint8_t segNumber = 0, segInfo, numSegments; + uint16_t chunk_len, total_bytes; + uint8_t *bufpos; + struct seg_hdr_info segHdrInfo; + + bufpos = param->utf_payload; + total_bytes = param->len; + ASSERT(total_bytes / MAX_WMI_UTF_LEN == + (uint8_t) (total_bytes / MAX_WMI_UTF_LEN)); + numSegments = (uint8_t) (total_bytes / MAX_WMI_UTF_LEN); + + if (param->len - (numSegments * MAX_WMI_UTF_LEN)) + numSegments++; + + while (param->len) { + if (param->len > MAX_WMI_UTF_LEN) + chunk_len = MAX_WMI_UTF_LEN; /* MAX message */ + else + chunk_len = param->len; + + buf = wmi_buf_alloc(wmi_handle, + (chunk_len + sizeof(segHdrInfo) + + WMI_TLV_HDR_SIZE)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (uint8_t *) wmi_buf_data(buf); + + segHdrInfo.len = total_bytes; + segHdrInfo.msgref = msgref; + segInfo = ((numSegments << 4) & 0xF0) | (segNumber & 0xF); + segHdrInfo.segmentInfo = segInfo; + segHdrInfo.pad = 0; + + WMI_LOGD("%s:segHdrInfo.len = %d, segHdrInfo.msgref = %d," + " segHdrInfo.segmentInfo = %d", + __func__, segHdrInfo.len, segHdrInfo.msgref, + segHdrInfo.segmentInfo); + + WMI_LOGD("%s:total_bytes %d segNumber %d totalSegments %d" + "chunk len %d", __func__, total_bytes, segNumber, + numSegments, chunk_len); + + segNumber++; + + WMITLV_SET_HDR(cmd, WMITLV_TAG_ARRAY_BYTE, + (chunk_len + sizeof(segHdrInfo))); + cmd += WMI_TLV_HDR_SIZE; + memcpy(cmd, &segHdrInfo, sizeof(segHdrInfo)); /* 4 bytes */ + memcpy(&cmd[sizeof(segHdrInfo)], bufpos, chunk_len); + + wmi_mtrace(WMI_PDEV_UTF_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + (chunk_len + sizeof(segHdrInfo) + + WMI_TLV_HDR_SIZE), + WMI_PDEV_UTF_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PDEV_UTF_CMDID command"); + wmi_buf_free(buf); + break; + } + + param->len -= chunk_len; + bufpos += chunk_len; + } + + msgref++; + + return ret; +} + +#ifdef ENABLE_HOST_TO_TARGET_CONVERSION +static inline uint32_t convert_host_pdev_param_tlv(uint32_t host_param) +{ + if (host_param < QDF_ARRAY_SIZE(pdev_param_tlv)) + return pdev_param_tlv[host_param]; + return WMI_UNAVAILABLE_PARAM; +} +#else +static inline uint32_t convert_host_pdev_param_tlv(uint32_t host_param) +{ + return host_param; +} +#endif + +/** + * send_pdev_param_cmd_tlv() - set pdev parameters + * @wmi_handle: wmi handle + * @param: pointer to pdev parameter + * @mac_id: radio context + * + * Return: 0 on success, errno on failure + */ +static QDF_STATUS +send_pdev_param_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_params *param, + uint8_t mac_id) +{ + QDF_STATUS ret; + wmi_pdev_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + uint32_t pdev_param; + + pdev_param = convert_host_pdev_param_tlv(param->param_id); + if (pdev_param == WMI_UNAVAILABLE_PARAM) { + WMI_LOGW("%s: Unavailable param %d", + __func__, param->param_id); + return QDF_STATUS_E_INVAL; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_set_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_param_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + mac_id); + cmd->param_id = pdev_param; + cmd->param_value = param->param_value; + WMI_LOGD("Setting pdev param = %x, value = %u", param->param_id, + param->param_value); + wmi_mtrace(WMI_PDEV_SET_PARAM_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_pdev_set_hw_mode_cmd_tlv() - Send WMI_PDEV_SET_HW_MODE_CMDID to FW + * @wmi_handle: wmi handle + * @msg: Structure containing the following parameters + * @hw_mode_index: The HW_Mode field is a enumerated type that is selected + * from the HW_Mode table, which is returned in the WMI_SERVICE_READY_EVENTID. + * + * Provides notification to the WLAN firmware that host driver is requesting a + * HardWare (HW) Mode change. This command is needed to support iHelium in the + * configurations that include the Dual Band Simultaneous (DBS) feature. + * + * Return: Success if the cmd is sent successfully to the firmware + */ +static QDF_STATUS send_pdev_set_hw_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t hw_mode_index) +{ + wmi_pdev_set_hw_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_set_hw_mode_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_hw_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_hw_mode_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + cmd->hw_mode_index = hw_mode_index; + WMI_LOGD("%s: HW mode index:%d", __func__, cmd->hw_mode_index); + + wmi_mtrace(WMI_PDEV_SET_HW_MODE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_HW_MODE_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_PDEV_SET_HW_MODE_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_suspend_cmd_tlv() - WMI suspend function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold suspend parameter + * @mac_id: radio context + * + * Return 0 on success and -ve on failure. + */ +static QDF_STATUS send_suspend_cmd_tlv(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id) +{ + wmi_pdev_suspend_cmd_fixed_param *cmd; + wmi_buf_t wmibuf; + uint32_t len = sizeof(*cmd); + int32_t ret; + + /* + * send the command to Target to ignore the + * PCIE reset so as to ensure that Host and target + * states are in sync + */ + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (!wmibuf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_suspend_cmd_fixed_param *) wmi_buf_data(wmibuf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_suspend_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_suspend_cmd_fixed_param)); + if (param->disable_target_intr) + cmd->suspend_opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR; + else + cmd->suspend_opt = WMI_PDEV_SUSPEND; + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + mac_id); + + wmi_mtrace(WMI_PDEV_SUSPEND_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_PDEV_SUSPEND_CMDID); + if (ret) { + wmi_buf_free(wmibuf); + WMI_LOGE("Failed to send WMI_PDEV_SUSPEND_CMDID command"); + } + + return ret; +} + +/** + * send_resume_cmd_tlv() - WMI resume function + * @param wmi_handle : handle to WMI. + * @mac_id: radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_resume_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_buf_t wmibuf; + wmi_pdev_resume_cmd_fixed_param *cmd; + QDF_STATUS ret; + + wmibuf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!wmibuf) + return QDF_STATUS_E_NOMEM; + cmd = (wmi_pdev_resume_cmd_fixed_param *) wmi_buf_data(wmibuf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_resume_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_resume_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + mac_id); + wmi_mtrace(WMI_PDEV_RESUME_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmibuf, sizeof(*cmd), + WMI_PDEV_RESUME_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PDEV_RESUME_CMDID command"); + wmi_buf_free(wmibuf); + } + + return ret; +} + +/** + * send_wow_enable_cmd_tlv() - WMI wow enable function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow enable parameter + * @mac_id: radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_wow_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, + uint8_t mac_id) +{ + wmi_wow_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int32_t ret; + + len = sizeof(wmi_wow_enable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_wow_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wow_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wow_enable_cmd_fixed_param)); + cmd->enable = param->enable; + if (param->can_suspend_link) + cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; + else + cmd->pause_iface_config = WOW_IFACE_PAUSE_DISABLED; + cmd->flags = param->flags; + + wmi_info("suspend type: %s flag is 0x%x", + cmd->pause_iface_config == WOW_IFACE_PAUSE_ENABLED ? + "WOW_IFACE_PAUSE_ENABLED" : "WOW_IFACE_PAUSE_DISABLED", + cmd->flags); + + wmi_mtrace(WMI_WOW_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ENABLE_CMDID); + if (ret) + wmi_buf_free(buf); + + return ret; +} + +/** + * send_set_ap_ps_param_cmd_tlv() - set ap powersave parameters + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to ap_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_ap_ps_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t *peer_addr, + struct ap_ps_params *param) +{ + wmi_ap_ps_peer_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_ap_ps_peer_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ap_ps_peer_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_ap_ps_peer_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->param = param->param; + cmd->value = param->value; + wmi_mtrace(WMI_AP_PS_PEER_PARAM_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_AP_PS_PEER_PARAM_CMDID); + if (err) { + WMI_LOGE("Failed to send set_ap_ps_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_set_sta_ps_param_cmd_tlv() - set sta powersave parameters + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to sta_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_sta_ps_param_cmd_tlv(wmi_unified_t wmi_handle, + struct sta_ps_params *param) +{ + wmi_sta_powersave_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_sta_powersave_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_powersave_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_powersave_param_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->param = param->param_id; + cmd->value = param->value; + + wmi_mtrace(WMI_STA_POWERSAVE_PARAM_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_POWERSAVE_PARAM_CMDID)) { + WMI_LOGE("Set Sta Ps param Failed vdevId %d Param %d val %d", + param->vdev_id, param->param_id, param->value); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_crash_inject_cmd_tlv() - inject fw crash + * @wmi_handle: wmi handle + * @param: ponirt to crash inject parameter structure + * + * Return: QDF_STATUS_SUCCESS for success or return error + */ +static QDF_STATUS send_crash_inject_cmd_tlv(wmi_unified_t wmi_handle, + struct crash_inject *param) +{ + int32_t ret = 0; + WMI_FORCE_FW_HANG_CMD_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (WMI_FORCE_FW_HANG_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_FORCE_FW_HANG_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_FORCE_FW_HANG_CMD_fixed_param)); + cmd->type = param->type; + cmd->delay_time_ms = param->delay_time_ms; + + wmi_mtrace(WMI_FORCE_FW_HANG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_FORCE_FW_HANG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send set param command, ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef FEATURE_FW_LOG_PARSING +/** + * send_dbglog_cmd_tlv() - set debug log level + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold dbglog level parameter + * + * Return: 0 on success and -ve on failure. + */ + static QDF_STATUS +send_dbglog_cmd_tlv(wmi_unified_t wmi_handle, + struct dbglog_params *dbglog_param) +{ + wmi_buf_t buf; + wmi_debug_log_config_cmd_fixed_param *configmsg; + QDF_STATUS status; + int32_t i; + int32_t len; + int8_t *buf_ptr; + int32_t *module_id_bitmap_array; /* Used to fomr the second tlv */ + + ASSERT(dbglog_param->bitmap_len < MAX_MODULE_ID_BITMAP_WORDS); + + /* Allocate size for 2 tlvs - including tlv hdr space for second tlv */ + len = sizeof(wmi_debug_log_config_cmd_fixed_param) + WMI_TLV_HDR_SIZE + + (sizeof(int32_t) * MAX_MODULE_ID_BITMAP_WORDS); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + configmsg = + (wmi_debug_log_config_cmd_fixed_param *) (wmi_buf_data(buf)); + buf_ptr = (int8_t *) configmsg; + WMITLV_SET_HDR(&configmsg->tlv_header, + WMITLV_TAG_STRUC_wmi_debug_log_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_debug_log_config_cmd_fixed_param)); + configmsg->dbg_log_param = dbglog_param->param; + configmsg->value = dbglog_param->val; + /* Filling in the data part of second tlv -- should + * follow first tlv _ WMI_TLV_HDR_SIZE */ + module_id_bitmap_array = (uint32_t *) (buf_ptr + + sizeof + (wmi_debug_log_config_cmd_fixed_param) + + WMI_TLV_HDR_SIZE); + WMITLV_SET_HDR(buf_ptr + sizeof(wmi_debug_log_config_cmd_fixed_param), + WMITLV_TAG_ARRAY_UINT32, + sizeof(uint32_t) * MAX_MODULE_ID_BITMAP_WORDS); + if (dbglog_param->module_id_bitmap) { + for (i = 0; i < dbglog_param->bitmap_len; ++i) { + module_id_bitmap_array[i] = + dbglog_param->module_id_bitmap[i]; + } + } + + wmi_mtrace(WMI_DBGLOG_CFG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_DBGLOG_CFG_CMDID); + + if (status != QDF_STATUS_SUCCESS) + wmi_buf_free(buf); + + return status; +} +#endif + +#ifdef ENABLE_HOST_TO_TARGET_CONVERSION +static inline uint32_t convert_host_vdev_param_tlv(uint32_t host_param) +{ + if (host_param < QDF_ARRAY_SIZE(vdev_param_tlv)) + return vdev_param_tlv[host_param]; + return WMI_UNAVAILABLE_PARAM; +} +#else +static inline uint32_t convert_host_vdev_param_tlv(uint32_t host_param) +{ + return host_param; +} +#endif + +/** + * send_vdev_set_param_cmd_tlv() - WMI vdev set parameter function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold vdev set parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_param_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_set_params *param) +{ + QDF_STATUS ret; + wmi_vdev_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + uint32_t vdev_param; + + vdev_param = convert_host_vdev_param_tlv(param->param_id); + if (vdev_param == WMI_UNAVAILABLE_PARAM) { + WMI_LOGW("%s:Vdev param %d not available", __func__, + param->param_id); + return QDF_STATUS_E_INVAL; + + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_vdev_set_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_param_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->param_id = vdev_param; + cmd->param_value = param->param_value; + WMI_LOGD("Setting vdev %d param = %x, value = %u", + cmd->vdev_id, cmd->param_id, cmd->param_value); + wmi_mtrace(WMI_VDEV_SET_PARAM_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_stats_request_cmd_tlv() - WMI request stats function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_stats_request_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct stats_request_params *param) +{ + int32_t ret; + wmi_request_stats_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return -QDF_STATUS_E_NOMEM; + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = param->stats_id; + cmd->vdev_id = param->vdev_id; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + WMI_LOGD("STATS REQ STATS_ID:%d VDEV_ID:%d PDEV_ID:%d-->", + cmd->stats_id, cmd->vdev_id, cmd->pdev_id); + + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send_pm_chk(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID); + + if (ret) { + WMI_LOGE("Failed to send status request to fw =%d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_peer_based_pktlog_cmd() - Send WMI command to enable packet-log + * @wmi_handle: handle to WMI. + * @macaddr: Peer mac address to be filter + * @mac_id: mac id to have radio context + * @enb_dsb: Enable MAC based filtering or Disable + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_peer_based_pktlog_cmd(wmi_unified_t wmi_handle, + uint8_t *macaddr, + uint8_t mac_id, + uint8_t enb_dsb) +{ + int32_t ret; + wmi_pdev_pktlog_filter_cmd_fixed_param *cmd; + wmi_pdev_pktlog_filter_info *mac_info; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint16_t len = sizeof(wmi_pdev_pktlog_filter_cmd_fixed_param) + + sizeof(wmi_pdev_pktlog_filter_info) + WMI_TLV_HDR_SIZE; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_pdev_pktlog_filter_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_filter_cmd_fixed_param)); + cmd->pdev_id = mac_id; + cmd->enable = enb_dsb; + cmd->num_of_mac_addresses = 1; + wmi_mtrace(WMI_PDEV_PKTLOG_FILTER_CMDID, cmd->pdev_id, 0); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_pdev_pktlog_filter_info)); + buf_ptr += WMI_TLV_HDR_SIZE; + + mac_info = (wmi_pdev_pktlog_filter_info *)(buf_ptr); + + WMITLV_SET_HDR(&mac_info->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_filter_info, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_filter_info)); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &mac_info->peer_mac_address); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_FILTER_CMDID); + if (ret) { + WMI_LOGE("Failed to send peer based pktlog command to FW =%d" + , ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_packet_log_enable_cmd_tlv() - Send WMI command to enable packet-log + * @param wmi_handle : handle to WMI. + * @param PKTLOG_EVENT : packet log event + * @mac_id: mac id to have radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_enable_cmd_tlv(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id) +{ + int32_t ret; + wmi_pdev_pktlog_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(wmi_pdev_pktlog_enable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return -QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_pktlog_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_enable_cmd_fixed_param)); + cmd->evlist = PKTLOG_EVENT; + cmd->pdev_id = mac_id; + wmi_mtrace(WMI_PDEV_PKTLOG_ENABLE_CMDID, cmd->pdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_ENABLE_CMDID); + if (ret) { + WMI_LOGE("Failed to send pktlog enable cmd to FW =%d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_packet_log_disable_cmd_tlv() - Send WMI command to disable packet-log + * @param wmi_handle : handle to WMI. + * @mac_id: mac id to have radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_disable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + int32_t ret; + wmi_pdev_pktlog_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(wmi_pdev_pktlog_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return -QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_pktlog_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_disable_cmd_fixed_param)); + cmd->pdev_id = mac_id; + wmi_mtrace(WMI_PDEV_PKTLOG_DISABLE_CMDID, cmd->pdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_DISABLE_CMDID); + if (ret) { + WMI_LOGE("Failed to send pktlog disable cmd to FW =%d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +#define WMI_FW_TIME_STAMP_LOW_MASK 0xffffffff +/** + * send_time_stamp_sync_cmd_tlv() - Send WMI command to + * sync time between bwtween host and firmware + * @param wmi_handle : handle to WMI. + * + * Return: None + */ +static void send_time_stamp_sync_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + QDF_STATUS status = QDF_STATUS_SUCCESS; + WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param *time_stamp; + int32_t len; + qdf_time_t time_ms; + + len = sizeof(*time_stamp); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return; + + time_stamp = + (WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param *) + (wmi_buf_data(buf)); + WMITLV_SET_HDR(&time_stamp->tlv_header, + WMITLV_TAG_STRUC_wmi_dbglog_time_stamp_sync_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param)); + + time_ms = qdf_get_time_of_the_day_ms(); + time_stamp->mode = WMI_TIME_STAMP_SYNC_MODE_MS; + time_stamp->time_stamp_low = time_ms & + WMI_FW_TIME_STAMP_LOW_MASK; + /* + * Send time_stamp_high 0 as the time converted from HR:MIN:SEC:MS to ms + * wont exceed 27 bit + */ + time_stamp->time_stamp_high = 0; + WMI_LOGD(FL("WMA --> DBGLOG_TIME_STAMP_SYNC_CMDID mode %d time_stamp low %d high %d"), + time_stamp->mode, time_stamp->time_stamp_low, + time_stamp->time_stamp_high); + + wmi_mtrace(WMI_DBGLOG_TIME_STAMP_SYNC_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_DBGLOG_TIME_STAMP_SYNC_CMDID); + if (status) { + WMI_LOGE("Failed to send WMI_DBGLOG_TIME_STAMP_SYNC_CMDID command"); + wmi_buf_free(buf); + } + +} + +/** + * send_fd_tmpl_cmd_tlv() - WMI FILS Discovery send function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold FILS Discovery send cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_fd_tmpl_cmd_tlv(wmi_unified_t wmi_handle, + struct fils_discovery_tmpl_params *param) +{ + int32_t ret; + wmi_fd_tmpl_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint32_t wmi_buf_len; + + wmi_buf_len = sizeof(wmi_fd_tmpl_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + param->tmpl_len_aligned; + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_fd_tmpl_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_fd_tmpl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_fd_tmpl_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->buf_len = param->tmpl_len; + buf_ptr += sizeof(wmi_fd_tmpl_cmd_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, param->tmpl_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->frm, param->tmpl_len); + + wmi_mtrace(WMI_FD_TMPL_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, WMI_FD_TMPL_CMDID); + + if (ret) { + WMI_LOGE("%s: Failed to send fd tmpl: %d", __func__, ret); + wmi_buf_free(wmi_buf); + return ret; + } + + return 0; +} + +/** + * send_beacon_send_tmpl_cmd_tlv() - WMI beacon send function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold beacon send cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_beacon_tmpl_send_cmd_tlv(wmi_unified_t wmi_handle, + struct beacon_tmpl_params *param) +{ + int32_t ret; + wmi_bcn_tmpl_cmd_fixed_param *cmd; + wmi_bcn_prb_info *bcn_prb_info; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint32_t wmi_buf_len; + + wmi_buf_len = sizeof(wmi_bcn_tmpl_cmd_fixed_param) + + sizeof(wmi_bcn_prb_info) + WMI_TLV_HDR_SIZE + + param->tmpl_len_aligned; + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_bcn_tmpl_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_tmpl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_bcn_tmpl_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->tim_ie_offset = param->tim_ie_offset; + cmd->mbssid_ie_offset = param->mbssid_ie_offset; + cmd->csa_switch_count_offset = param->csa_switch_count_offset; + cmd->ext_csa_switch_count_offset = param->ext_csa_switch_count_offset; + cmd->esp_ie_offset = param->esp_ie_offset; + cmd->mu_edca_ie_offset = param->mu_edca_ie_offset; + cmd->buf_len = param->tmpl_len; + buf_ptr += sizeof(wmi_bcn_tmpl_cmd_fixed_param); + + bcn_prb_info = (wmi_bcn_prb_info *) buf_ptr; + WMITLV_SET_HDR(&bcn_prb_info->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_prb_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_bcn_prb_info)); + bcn_prb_info->caps = 0; + bcn_prb_info->erp = 0; + buf_ptr += sizeof(wmi_bcn_prb_info); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, param->tmpl_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->frm, param->tmpl_len); + + wmi_mtrace(WMI_BCN_TMPL_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, WMI_BCN_TMPL_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send bcn tmpl: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + + return 0; +} + +static inline void copy_peer_flags_tlv( + wmi_peer_assoc_complete_cmd_fixed_param * cmd, + struct peer_assoc_params *param) +{ + /* + * The target only needs a subset of the flags maintained in the host. + * Just populate those flags and send it down + */ + cmd->peer_flags = 0; + + /* + * Do not enable HT/VHT if WMM/wme is disabled for vap. + */ + if (param->is_wme_set) { + + if (param->qos_flag) + cmd->peer_flags |= WMI_PEER_QOS; + if (param->apsd_flag) + cmd->peer_flags |= WMI_PEER_APSD; + if (param->ht_flag) + cmd->peer_flags |= WMI_PEER_HT; + if (param->bw_40) + cmd->peer_flags |= WMI_PEER_40MHZ; + if (param->bw_80) + cmd->peer_flags |= WMI_PEER_80MHZ; + if (param->bw_160) + cmd->peer_flags |= WMI_PEER_160MHZ; + + /* Typically if STBC is enabled for VHT it should be enabled + * for HT as well + **/ + if (param->stbc_flag) + cmd->peer_flags |= WMI_PEER_STBC; + + /* Typically if LDPC is enabled for VHT it should be enabled + * for HT as well + **/ + if (param->ldpc_flag) + cmd->peer_flags |= WMI_PEER_LDPC; + + if (param->static_mimops_flag) + cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; + if (param->dynamic_mimops_flag) + cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; + if (param->spatial_mux_flag) + cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; + if (param->vht_flag) + cmd->peer_flags |= WMI_PEER_VHT; + if (param->he_flag) + cmd->peer_flags |= WMI_PEER_HE; + if (param->p2p_capable_sta) + cmd->peer_flags |= WMI_PEER_IS_P2P_CAPABLE; + } + + if (param->is_pmf_enabled) + cmd->peer_flags |= WMI_PEER_PMF; + /* + * Suppress authorization for all AUTH modes that need 4-way handshake + * (during re-association). + * Authorization will be done for these modes on key installation. + */ + if (param->auth_flag) + cmd->peer_flags |= WMI_PEER_AUTH; + if (param->need_ptk_4_way) + cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + else + cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY; + if (param->need_gtk_2_way) + cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + /* safe mode bypass the 4-way handshake */ + if (param->safe_mode_enabled) + cmd->peer_flags &= + ~(WMI_PEER_NEED_PTK_4_WAY | WMI_PEER_NEED_GTK_2_WAY); + /* inter BSS peer */ + if (param->inter_bss_peer) + cmd->peer_flags |= WMI_PEER_INTER_BSS_PEER; + /* Disable AMSDU for station transmit, if user configures it */ + /* Disable AMSDU for AP transmit to 11n Stations, if user configures + * it + * if (param->amsdu_disable) Add after FW support + **/ + + /* Target asserts if node is marked HT and all MCS is set to 0. + * Mark the node as non-HT if all the mcs rates are disabled through + * iwpriv + **/ + if (param->peer_ht_rates.num_rates == 0) + cmd->peer_flags &= ~WMI_PEER_HT; + + if (param->twt_requester) + cmd->peer_flags |= WMI_PEER_TWT_REQ; + + if (param->twt_responder) + cmd->peer_flags |= WMI_PEER_TWT_RESP; +} + +static inline void copy_peer_mac_addr_tlv( + wmi_peer_assoc_complete_cmd_fixed_param * cmd, + struct peer_assoc_params *param) +{ + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_mac, &cmd->peer_macaddr); +} + +/** + * send_peer_assoc_cmd_tlv() - WMI peer assoc function + * @param wmi_handle : handle to WMI. + * @param param : pointer to peer assoc parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_peer_assoc_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_assoc_params *param) +{ + wmi_peer_assoc_complete_cmd_fixed_param *cmd; + wmi_vht_rate_set *mcs; + wmi_he_rate_set *he_mcs; + wmi_buf_t buf; + int32_t len; + uint8_t *buf_ptr; + QDF_STATUS ret; + uint32_t peer_legacy_rates_align; + uint32_t peer_ht_rates_align; + int32_t i; + + + peer_legacy_rates_align = wmi_align(param->peer_legacy_rates.num_rates); + peer_ht_rates_align = wmi_align(param->peer_ht_rates.num_rates); + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + (peer_legacy_rates_align * sizeof(uint8_t)) + + WMI_TLV_HDR_SIZE + + (peer_ht_rates_align * sizeof(uint8_t)) + + sizeof(wmi_vht_rate_set) + + (sizeof(wmi_he_rate_set) * param->peer_he_mcs_count + + WMI_TLV_HDR_SIZE); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_peer_assoc_complete_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_assoc_complete_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_assoc_complete_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->peer_new_assoc = param->peer_new_assoc; + cmd->peer_associd = param->peer_associd; + + copy_peer_flags_tlv(cmd, param); + copy_peer_mac_addr_tlv(cmd, param); + + cmd->peer_rate_caps = param->peer_rate_caps; + cmd->peer_caps = param->peer_caps; + cmd->peer_listen_intval = param->peer_listen_intval; + cmd->peer_ht_caps = param->peer_ht_caps; + cmd->peer_max_mpdu = param->peer_max_mpdu; + cmd->peer_mpdu_density = param->peer_mpdu_density; + cmd->peer_vht_caps = param->peer_vht_caps; + cmd->peer_phymode = param->peer_phymode; + + /* Update 11ax capabilities */ + cmd->peer_he_cap_info = + param->peer_he_cap_macinfo[WMI_HOST_HECAP_MAC_WORD1]; + cmd->peer_he_cap_info_ext = + param->peer_he_cap_macinfo[WMI_HOST_HECAP_MAC_WORD2]; + cmd->peer_he_cap_info_internal = param->peer_he_cap_info_internal; + cmd->peer_he_ops = param->peer_he_ops; + qdf_mem_copy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, + sizeof(param->peer_he_cap_phyinfo)); + qdf_mem_copy(&cmd->peer_ppet, ¶m->peer_ppet, + sizeof(param->peer_ppet)); + cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz; + + /* Update peer legacy rate information */ + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + peer_legacy_rates_align); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates; + qdf_mem_copy(buf_ptr, param->peer_legacy_rates.rates, + param->peer_legacy_rates.num_rates); + + /* Update peer HT rate information */ + buf_ptr += peer_legacy_rates_align; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + peer_ht_rates_align); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates; + qdf_mem_copy(buf_ptr, param->peer_ht_rates.rates, + param->peer_ht_rates.num_rates); + + /* VHT Rates */ + buf_ptr += peer_ht_rates_align; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_wmi_vht_rate_set, + WMITLV_GET_STRUCT_TLVLEN(wmi_vht_rate_set)); + + cmd->peer_nss = param->peer_nss; + + /* Update bandwidth-NSS mapping */ + cmd->peer_bw_rxnss_override = 0; + cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; + + mcs = (wmi_vht_rate_set *) buf_ptr; + if (param->vht_capable) { + mcs->rx_max_rate = param->rx_max_rate; + mcs->rx_mcs_set = param->rx_mcs_set; + mcs->tx_max_rate = param->tx_max_rate; + mcs->tx_mcs_set = param->tx_mcs_set; + } + + /* HE Rates */ + cmd->min_data_rate = param->min_data_rate; + cmd->peer_he_mcs = param->peer_he_mcs_count; + buf_ptr += sizeof(wmi_vht_rate_set); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (param->peer_he_mcs_count * sizeof(wmi_he_rate_set))); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Loop through the HE rate set */ + for (i = 0; i < param->peer_he_mcs_count; i++) { + he_mcs = (wmi_he_rate_set *) buf_ptr; + WMITLV_SET_HDR(he_mcs, WMITLV_TAG_STRUC_wmi_he_rate_set, + WMITLV_GET_STRUCT_TLVLEN(wmi_he_rate_set)); + + he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; + he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; + WMI_LOGD("%s:HE idx %d RxMCSmap %x TxMCSmap %x ", __func__, + i, he_mcs->rx_mcs_set, he_mcs->tx_mcs_set); + buf_ptr += sizeof(wmi_he_rate_set); + } + + if ((param->he_flag) && (param->peer_he_mcs_count > 1) && + (param->peer_he_rx_mcs_set[WMI_HOST_HE_TXRX_MCS_NSS_IDX_160] + == WMI_HOST_HE_INVALID_MCSNSSMAP || + param->peer_he_tx_mcs_set[WMI_HOST_HE_TXRX_MCS_NSS_IDX_160] + == WMI_HOST_HE_INVALID_MCSNSSMAP)) { + WMI_LOGD("param->peer_he_tx_mcs_set[160MHz]=%x", + param->peer_he_tx_mcs_set[WMI_HOST_HE_TXRX_MCS_NSS_IDX_160]); + WMI_LOGD("param->peer_he_rx_mcs_set[160MHz]=%x", + param->peer_he_rx_mcs_set[WMI_HOST_HE_TXRX_MCS_NSS_IDX_160]); + WMI_LOGD("peer_mac="QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(param->peer_mac)); + } + + WMI_LOGD("%s: vdev_id %d associd %d peer_flags %x rate_caps %x " + "peer_caps %x listen_intval %d ht_caps %x max_mpdu %d " + "nss %d phymode %d peer_mpdu_density %d " + "cmd->peer_vht_caps %x " + "HE cap_info %x ops %x " + "HE cap_info_ext %x " + "HE phy %x %x %x " + "peer_bw_rxnss_override %x", __func__, + cmd->vdev_id, cmd->peer_associd, cmd->peer_flags, + cmd->peer_rate_caps, cmd->peer_caps, + cmd->peer_listen_intval, cmd->peer_ht_caps, + cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, + cmd->peer_mpdu_density, + cmd->peer_vht_caps, cmd->peer_he_cap_info, + cmd->peer_he_ops, cmd->peer_he_cap_info_ext, + cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], + cmd->peer_he_cap_phy[2], + cmd->peer_bw_rxnss_override); + + wmi_mtrace(WMI_PEER_ASSOC_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ASSOC_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGP("%s: Failed to send peer assoc command ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +/* copy_scan_notify_events() - Helper routine to copy scan notify events + */ +static inline void copy_scan_event_cntrl_flags( + wmi_start_scan_cmd_fixed_param * cmd, + struct scan_req_params *param) +{ + + /* Scan events subscription */ + if (param->scan_ev_started) + cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED; + if (param->scan_ev_completed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED; + if (param->scan_ev_bss_chan) + cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL; + if (param->scan_ev_foreign_chan) + cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHANNEL; + if (param->scan_ev_dequeued) + cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED; + if (param->scan_ev_preempted) + cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED; + if (param->scan_ev_start_failed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED; + if (param->scan_ev_restarted) + cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED; + if (param->scan_ev_foreign_chn_exit) + cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT; + if (param->scan_ev_suspended) + cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED; + if (param->scan_ev_resumed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED; + + /** Set scan control flags */ + cmd->scan_ctrl_flags = 0; + if (param->scan_f_passive) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + if (param->scan_f_strict_passive_pch) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN; + if (param->scan_f_promisc_mode) + cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCOUS; + if (param->scan_f_capture_phy_err) + cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR; + if (param->scan_f_half_rate) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT; + if (param->scan_f_quarter_rate) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT; + if (param->scan_f_cck_rates) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; + if (param->scan_f_ofdm_rates) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; + if (param->scan_f_chan_stat_evnt) + cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; + if (param->scan_f_filter_prb_req) + cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + if (param->scan_f_bcast_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ; + if (param->scan_f_offchan_mgmt_tx) + cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX; + if (param->scan_f_offchan_data_tx) + cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX; + if (param->scan_f_force_active_dfs_chn) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS; + if (param->scan_f_add_tpc_ie_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ; + if (param->scan_f_add_ds_ie_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; + if (param->scan_f_add_spoofed_mac_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; + if (param->scan_f_add_rand_seq_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ; + if (param->scan_f_en_ie_whitelist_in_probe) + cmd->scan_ctrl_flags |= + WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ; + + /* for adaptive scan mode using 3 bits (21 - 23 bits) */ + WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, + param->adaptive_dwell_time_mode); +} + +/* scan_copy_ie_buffer() - Copy scan ie_data */ +static inline void scan_copy_ie_buffer(uint8_t *buf_ptr, + struct scan_req_params *params) +{ + qdf_mem_copy(buf_ptr, params->extraie.ptr, params->extraie.len); +} + +/** + * wmi_copy_scan_random_mac() - To copy scan randomization attrs to wmi buffer + * @mac: random mac addr + * @mask: random mac mask + * @mac_addr: wmi random mac + * @mac_mask: wmi random mac mask + * + * Return None. + */ +static inline +void wmi_copy_scan_random_mac(uint8_t *mac, uint8_t *mask, + wmi_mac_addr *mac_addr, wmi_mac_addr *mac_mask) +{ + WMI_CHAR_ARRAY_TO_MAC_ADDR(mac, mac_addr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(mask, mac_mask); +} + +/* + * wmi_fill_vendor_oui() - fill vendor OUIs + * @buf_ptr: pointer to wmi tlv buffer + * @num_vendor_oui: number of vendor OUIs to be filled + * @param_voui: pointer to OUI buffer + * + * This function populates the wmi tlv buffer when vendor specific OUIs are + * present. + * + * Return: None + */ +static inline +void wmi_fill_vendor_oui(uint8_t *buf_ptr, uint32_t num_vendor_oui, + uint32_t *pvoui) +{ + wmi_vendor_oui *voui = NULL; + uint32_t i; + + voui = (wmi_vendor_oui *)buf_ptr; + + for (i = 0; i < num_vendor_oui; i++) { + WMITLV_SET_HDR(&voui[i].tlv_header, + WMITLV_TAG_STRUC_wmi_vendor_oui, + WMITLV_GET_STRUCT_TLVLEN(wmi_vendor_oui)); + voui[i].oui_type_subtype = pvoui[i]; + } +} + +/* + * wmi_fill_ie_whitelist_attrs() - fill IE whitelist attrs + * @ie_bitmap: output pointer to ie bit map in cmd + * @num_vendor_oui: output pointer to num vendor OUIs + * @ie_whitelist: input parameter + * + * This function populates the IE whitelist attrs of scan, pno and + * scan oui commands for ie_whitelist parameter. + * + * Return: None + */ +static inline +void wmi_fill_ie_whitelist_attrs(uint32_t *ie_bitmap, + uint32_t *num_vendor_oui, + struct probe_req_whitelist_attr *ie_whitelist) +{ + uint32_t i = 0; + + for (i = 0; i < PROBE_REQ_BITMAP_LEN; i++) + ie_bitmap[i] = ie_whitelist->ie_bitmap[i]; + + *num_vendor_oui = ie_whitelist->num_vendor_oui; +} + +/** + * send_scan_start_cmd_tlv() - WMI scan start function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan start cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_scan_start_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_req_params *params) +{ + int32_t ret = 0; + int32_t i; + wmi_buf_t wmi_buf; + wmi_start_scan_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t *tmp_ptr; + wmi_ssid *ssid = NULL; + wmi_mac_addr *bssid; + size_t len = sizeof(*cmd); + uint16_t extraie_len_with_pad = 0; + uint8_t phymode_roundup = 0; + struct probe_req_whitelist_attr *ie_whitelist = ¶ms->ie_whitelist; + wmi_hint_freq_short_ssid *s_ssid = NULL; + wmi_hint_freq_bssid *hint_bssid = NULL; + + /* Length TLV placeholder for array of uint32_t */ + len += WMI_TLV_HDR_SIZE; + /* calculate the length of buffer required */ + if (params->chan_list.num_chan) + len += params->chan_list.num_chan * sizeof(uint32_t); + + /* Length TLV placeholder for array of wmi_ssid structures */ + len += WMI_TLV_HDR_SIZE; + if (params->num_ssids) + len += params->num_ssids * sizeof(wmi_ssid); + + /* Length TLV placeholder for array of wmi_mac_addr structures */ + len += WMI_TLV_HDR_SIZE; + if (params->num_bssid) + len += sizeof(wmi_mac_addr) * params->num_bssid; + + /* Length TLV placeholder for array of bytes */ + len += WMI_TLV_HDR_SIZE; + if (params->extraie.len) + extraie_len_with_pad = + roundup(params->extraie.len, sizeof(uint32_t)); + len += extraie_len_with_pad; + + len += WMI_TLV_HDR_SIZE; /* Length of TLV for array of wmi_vendor_oui */ + if (ie_whitelist->num_vendor_oui) + len += ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui); + + len += WMI_TLV_HDR_SIZE; /* Length of TLV for array of scan phymode */ + if (params->scan_f_wide_band) + phymode_roundup = + qdf_roundup(params->chan_list.num_chan * sizeof(uint8_t), + sizeof(uint32_t)); + len += phymode_roundup; + + len += WMI_TLV_HDR_SIZE; + if (params->num_hint_bssid) + len += params->num_hint_bssid * sizeof(wmi_hint_freq_bssid); + + len += WMI_TLV_HDR_SIZE; + if (params->num_hint_s_ssid) + len += params->num_hint_s_ssid * sizeof(wmi_hint_freq_short_ssid); + + /* Allocate the memory */ + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_FAILURE; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_start_scan_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_start_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_start_scan_cmd_fixed_param)); + + cmd->scan_id = params->scan_id; + cmd->scan_req_id = params->scan_req_id; + cmd->vdev_id = params->vdev_id; + cmd->scan_priority = params->scan_priority; + + copy_scan_event_cntrl_flags(cmd, params); + + cmd->dwell_time_active = params->dwell_time_active; + cmd->dwell_time_active_2g = params->dwell_time_active_2g; + cmd->dwell_time_passive = params->dwell_time_passive; + cmd->dwell_time_active_6ghz = params->dwell_time_active_6g; + cmd->dwell_time_passive_6ghz = params->dwell_time_passive_6g; + cmd->scan_start_offset = params->scan_offset_time; + cmd->min_rest_time = params->min_rest_time; + cmd->max_rest_time = params->max_rest_time; + cmd->repeat_probe_time = params->repeat_probe_time; + cmd->probe_spacing_time = params->probe_spacing_time; + cmd->idle_time = params->idle_time; + cmd->max_scan_time = params->max_scan_time; + cmd->probe_delay = params->probe_delay; + cmd->burst_duration = params->burst_duration; + cmd->num_chan = params->chan_list.num_chan; + cmd->num_bssid = params->num_bssid; + cmd->num_ssids = params->num_ssids; + cmd->ie_len = params->extraie.len; + cmd->n_probes = params->n_probes; + cmd->scan_ctrl_flags_ext = params->scan_ctrl_flags_ext; + + if (params->scan_random.randomize) + wmi_copy_scan_random_mac(params->scan_random.mac_addr, + params->scan_random.mac_mask, + &cmd->mac_addr, + &cmd->mac_mask); + + if (ie_whitelist->white_list) + wmi_fill_ie_whitelist_attrs(cmd->ie_bitmap, + &cmd->num_vendor_oui, + ie_whitelist); + + buf_ptr += sizeof(*cmd); + tmp_ptr = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->chan_list.num_chan; ++i) + tmp_ptr[i] = params->chan_list.chan[i].freq; + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_UINT32, + (params->chan_list.num_chan * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE + + (params->chan_list.num_chan * sizeof(uint32_t)); + + if (params->num_ssids > WLAN_SCAN_MAX_NUM_SSID) { + WMI_LOGE("Invalid value for num_ssids %d", params->num_ssids); + goto error; + } + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (params->num_ssids * sizeof(wmi_ssid))); + + if (params->num_ssids) { + ssid = (wmi_ssid *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->num_ssids; ++i) { + ssid->ssid_len = params->ssid[i].length; + qdf_mem_copy(ssid->ssid, params->ssid[i].ssid, + params->ssid[i].length); + ssid++; + } + } + buf_ptr += WMI_TLV_HDR_SIZE + (params->num_ssids * sizeof(wmi_ssid)); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (params->num_bssid * sizeof(wmi_mac_addr))); + bssid = (wmi_mac_addr *) (buf_ptr + WMI_TLV_HDR_SIZE); + + if (params->num_bssid) { + for (i = 0; i < params->num_bssid; ++i) { + WMI_CHAR_ARRAY_TO_MAC_ADDR( + ¶ms->bssid_list[i].bytes[0], bssid); + bssid++; + } + } + + buf_ptr += WMI_TLV_HDR_SIZE + + (params->num_bssid * sizeof(wmi_mac_addr)); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, extraie_len_with_pad); + if (params->extraie.len) + scan_copy_ie_buffer(buf_ptr + WMI_TLV_HDR_SIZE, + params); + + buf_ptr += WMI_TLV_HDR_SIZE + extraie_len_with_pad; + + /* probe req ie whitelisting */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui)); + + buf_ptr += WMI_TLV_HDR_SIZE; + + if (cmd->num_vendor_oui) { + wmi_fill_vendor_oui(buf_ptr, cmd->num_vendor_oui, + ie_whitelist->voui); + buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui); + } + + /* Add phy mode TLV if it's a wide band scan */ + if (params->scan_f_wide_band) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, phymode_roundup); + buf_ptr = (uint8_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->chan_list.num_chan; ++i) + buf_ptr[i] = + WMI_SCAN_CHAN_SET_MODE(params->chan_list.chan[i].phymode); + buf_ptr += phymode_roundup; + } else { + /* Add ZERO legth phy mode TLV */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + } + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (params->num_hint_s_ssid * sizeof(wmi_hint_freq_short_ssid))); + if (params->num_hint_s_ssid) { + s_ssid = (wmi_hint_freq_short_ssid *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->num_hint_s_ssid; ++i) { + s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; + s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; + s_ssid++; + } + } + buf_ptr += WMI_TLV_HDR_SIZE + + (params->num_hint_s_ssid * sizeof(wmi_hint_freq_short_ssid)); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (params->num_hint_bssid * sizeof(wmi_hint_freq_bssid))); + if (params->num_hint_bssid) { + hint_bssid = (wmi_hint_freq_bssid *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->num_hint_bssid; ++i) { + hint_bssid->freq_flags = + params->hint_bssid[i].freq_flags; + WMI_CHAR_ARRAY_TO_MAC_ADDR(¶ms->hint_bssid[i].bssid.bytes[0], + &hint_bssid->bssid); + hint_bssid++; + } + } + + wmi_mtrace(WMI_START_SCAN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, + len, WMI_START_SCAN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to start scan: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + return ret; +error: + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_scan_stop_cmd_tlv() - WMI scan start function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan cancel cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_scan_stop_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_cancel_param *param) +{ + wmi_stop_scan_cmd_fixed_param *cmd; + int ret; + int len = sizeof(*cmd); + wmi_buf_t wmi_buf; + + /* Allocate the memory */ + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + ret = QDF_STATUS_E_NOMEM; + goto error; + } + + cmd = (wmi_stop_scan_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_stop_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_stop_scan_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->requestor = param->requester; + cmd->scan_id = param->scan_id; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + /* stop the scan with the corresponding scan_id */ + if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { + /* Cancelling all scans */ + cmd->req_type = WMI_SCAN_STOP_ALL; + } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { + /* Cancelling VAP scans */ + cmd->req_type = WMI_SCN_STOP_VAP_ALL; + } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { + /* Cancelling specific scan */ + cmd->req_type = WMI_SCAN_STOP_ONE; + } else if (param->req_type == WLAN_SCAN_CANCEL_HOST_VDEV_ALL) { + cmd->req_type = WMI_SCN_STOP_HOST_VAP_ALL; + } else { + WMI_LOGE("%s: Invalid Command : ", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_INVAL; + } + + wmi_mtrace(WMI_STOP_SCAN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, + len, WMI_STOP_SCAN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send stop scan: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + +error: + return ret; +} + +#define WMI_MAX_CHAN_INFO_LOG 192 + +/** + * wmi_scan_chanlist_dump() - Dump scan channel list info + * @scan_chan_list: scan channel list + * + * Return: void + */ +static void wmi_scan_chanlist_dump(struct scan_chan_list_params *scan_chan_list) +{ + uint32_t i; + uint8_t info[WMI_MAX_CHAN_INFO_LOG]; + uint32_t len = 0; + struct channel_param *chan; + int ret; + + wmi_debug("Total chan %d", scan_chan_list->nallchans); + for (i = 0; i < scan_chan_list->nallchans; i++) { + chan = &scan_chan_list->ch_param[i]; + ret = qdf_scnprintf(info + len, sizeof(info) - len, + " %d[%d][%d]", chan->mhz, chan->maxregpower, + chan->dfs_set); + if (ret <= 0) + break; + len += ret; + if (len >= (sizeof(info) - 20)) { + wmi_nofl_debug("Chan[TXPwr][DFS]:%s", info); + len = 0; + } + } + if (len) + wmi_nofl_debug("Chan[TXPwr][DFS]:%s", info); +} + +static QDF_STATUS send_scan_chan_list_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_chan_list_params *chan_list) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + wmi_scan_chan_list_cmd_fixed_param *cmd; + int i; + uint8_t *buf_ptr; + wmi_channel *chan_info; + struct channel_param *tchan_info; + uint16_t len; + uint16_t num_send_chans, num_sends = 0; + + wmi_scan_chanlist_dump(chan_list); + tchan_info = &chan_list->ch_param[0]; + while (chan_list->nallchans) { + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + if (chan_list->nallchans > MAX_NUM_CHAN_PER_WMI_CMD) + num_send_chans = MAX_NUM_CHAN_PER_WMI_CMD; + else + num_send_chans = chan_list->nallchans; + + chan_list->nallchans -= num_send_chans; + len += sizeof(wmi_channel) * num_send_chans; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_scan_chan_list_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_chan_list_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_chan_list_cmd_fixed_param)); + + WMI_LOGD("no of channels = %d, len = %d", num_send_chans, len); + + if (num_sends) + cmd->flags |= APPEND_TO_EXISTING_CHAN_LIST; + + if (chan_list->max_bw_support_present) + cmd->flags |= CHANNEL_MAX_BANDWIDTH_VALID; + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + chan_list->pdev_id); + + wmi_mtrace(WMI_SCAN_CHAN_LIST_CMDID, cmd->pdev_id, 0); + + cmd->num_scan_chans = num_send_chans; + WMITLV_SET_HDR((buf_ptr + + sizeof(wmi_scan_chan_list_cmd_fixed_param)), + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_channel) * num_send_chans); + chan_info = (wmi_channel *)(buf_ptr + sizeof(*cmd) + + WMI_TLV_HDR_SIZE); + + for (i = 0; i < num_send_chans; ++i) { + WMITLV_SET_HDR(&chan_info->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan_info->mhz = tchan_info->mhz; + chan_info->band_center_freq1 = + tchan_info->cfreq1; + chan_info->band_center_freq2 = + tchan_info->cfreq2; + + if (tchan_info->is_chan_passive) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_PASSIVE); + if (tchan_info->dfs_set) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_DFS); + + if (tchan_info->dfs_set_cfreq2) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_DFS_CFREQ2); + + if (tchan_info->allow_he) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_HE); + + if (tchan_info->allow_vht) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_VHT); + + if (tchan_info->allow_ht) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_HT); + WMI_SET_CHANNEL_MODE(chan_info, + tchan_info->phy_mode); + + if (tchan_info->half_rate) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_HALF_RATE); + + if (tchan_info->quarter_rate) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_QUARTER_RATE); + + if (tchan_info->psc_channel) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_PSC); + + if (tchan_info->nan_disabled) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_NAN_DISABLED); + + /* also fill in power information */ + WMI_SET_CHANNEL_MIN_POWER(chan_info, + tchan_info->minpower); + WMI_SET_CHANNEL_MAX_POWER(chan_info, + tchan_info->maxpower); + WMI_SET_CHANNEL_REG_POWER(chan_info, + tchan_info->maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(chan_info, + tchan_info->antennamax); + WMI_SET_CHANNEL_REG_CLASSID(chan_info, + tchan_info->reg_class_id); + WMI_SET_CHANNEL_MAX_TX_POWER(chan_info, + tchan_info->maxregpower); + WMI_SET_CHANNEL_MAX_BANDWIDTH(chan_info, + tchan_info->max_bw_supported); + + tchan_info++; + chan_info++; + } + + qdf_status = wmi_unified_cmd_send( + wmi_handle, + buf, len, WMI_SCAN_CHAN_LIST_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SCAN_CHAN_LIST_CMDID"); + wmi_buf_free(buf); + goto end; + } + num_sends++; + } + +end: + return qdf_status; +} + +/** + * populate_tx_send_params - Populate TX param TLV for mgmt and offchan tx + * + * @bufp: Pointer to buffer + * @param: Pointer to tx param + * + * Return: QDF_STATUS_SUCCESS for success and QDF_STATUS_E_FAILURE for failure + */ +static inline QDF_STATUS populate_tx_send_params(uint8_t *bufp, + struct tx_send_params param) +{ + wmi_tx_send_params *tx_param; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!bufp) { + status = QDF_STATUS_E_FAILURE; + return status; + } + tx_param = (wmi_tx_send_params *)bufp; + WMITLV_SET_HDR(&tx_param->tlv_header, + WMITLV_TAG_STRUC_wmi_tx_send_params, + WMITLV_GET_STRUCT_TLVLEN(wmi_tx_send_params)); + WMI_TX_SEND_PARAM_PWR_SET(tx_param->tx_param_dword0, param.pwr); + WMI_TX_SEND_PARAM_MCS_MASK_SET(tx_param->tx_param_dword0, + param.mcs_mask); + WMI_TX_SEND_PARAM_NSS_MASK_SET(tx_param->tx_param_dword0, + param.nss_mask); + WMI_TX_SEND_PARAM_RETRY_LIMIT_SET(tx_param->tx_param_dword0, + param.retry_limit); + WMI_TX_SEND_PARAM_CHAIN_MASK_SET(tx_param->tx_param_dword1, + param.chain_mask); + WMI_TX_SEND_PARAM_BW_MASK_SET(tx_param->tx_param_dword1, + param.bw_mask); + WMI_TX_SEND_PARAM_PREAMBLE_SET(tx_param->tx_param_dword1, + param.preamble_type); + WMI_TX_SEND_PARAM_FRAME_TYPE_SET(tx_param->tx_param_dword1, + param.frame_type); + WMI_TX_SEND_PARAM_CFR_CAPTURE_SET(tx_param->tx_param_dword1, + param.cfr_enable); + + return status; +} + +#ifdef CONFIG_HL_SUPPORT +/** + * send_mgmt_cmd_tlv() - WMI scan start function + * @wmi_handle : handle to WMI. + * @param : pointer to hold mgmt cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_mgmt_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param) +{ + wmi_buf_t buf; + uint8_t *bufp; + int32_t cmd_len; + wmi_mgmt_tx_send_cmd_fixed_param *cmd; + int32_t bufp_len = (param->frm_len < mgmt_tx_dl_frm_len) ? param->frm_len : + mgmt_tx_dl_frm_len; + + if (param->frm_len > mgmt_tx_dl_frm_len) { + WMI_LOGE("%s:mgmt frame len %u exceeds %u", + __func__, param->frm_len, mgmt_tx_dl_frm_len); + return QDF_STATUS_E_INVAL; + } + + cmd_len = sizeof(wmi_mgmt_tx_send_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + roundup(bufp_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_tx_send_params) + cmd_len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_mgmt_tx_send_cmd_fixed_param *)wmi_buf_data(buf); + bufp = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_mgmt_tx_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_mgmt_tx_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->desc_id = param->desc_id; + cmd->chanfreq = param->chanfreq; + bufp += sizeof(wmi_mgmt_tx_send_cmd_fixed_param); + WMITLV_SET_HDR(bufp, WMITLV_TAG_ARRAY_BYTE, roundup(bufp_len, + sizeof(uint32_t))); + bufp += WMI_TLV_HDR_SIZE; + qdf_mem_copy(bufp, param->pdata, bufp_len); + + cmd->frame_len = param->frm_len; + cmd->buf_len = bufp_len; + cmd->tx_params_valid = param->tx_params_valid; + + wmi_mgmt_cmd_record(wmi_handle, WMI_MGMT_TX_SEND_CMDID, + bufp, cmd->vdev_id, cmd->chanfreq); + + bufp += roundup(bufp_len, sizeof(uint32_t)); + if (param->tx_params_valid) { + if (populate_tx_send_params(bufp, param->tx_param) != + QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Populate TX send params failed", + __func__); + goto free_buf; + } + cmd_len += sizeof(wmi_tx_send_params); + } + + wmi_mtrace(WMI_MGMT_TX_SEND_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_MGMT_TX_SEND_CMDID)) { + WMI_LOGE("%s: Failed to send mgmt Tx", __func__); + goto free_buf; + } + return QDF_STATUS_SUCCESS; + +free_buf: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} +#else +/** + * send_mgmt_cmd_tlv() - WMI scan start function + * @wmi_handle : handle to WMI. + * @param : pointer to hold mgmt cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_mgmt_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param) +{ + wmi_buf_t buf; + wmi_mgmt_tx_send_cmd_fixed_param *cmd; + int32_t cmd_len; + uint64_t dma_addr; + void *qdf_ctx = param->qdf_ctx; + uint8_t *bufp; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int32_t bufp_len = (param->frm_len < mgmt_tx_dl_frm_len) ? param->frm_len : + mgmt_tx_dl_frm_len; + + cmd_len = sizeof(wmi_mgmt_tx_send_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + roundup(bufp_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_tx_send_params) + cmd_len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_mgmt_tx_send_cmd_fixed_param *)wmi_buf_data(buf); + bufp = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_mgmt_tx_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_mgmt_tx_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->desc_id = param->desc_id; + cmd->chanfreq = param->chanfreq; + bufp += sizeof(wmi_mgmt_tx_send_cmd_fixed_param); + WMITLV_SET_HDR(bufp, WMITLV_TAG_ARRAY_BYTE, roundup(bufp_len, + sizeof(uint32_t))); + bufp += WMI_TLV_HDR_SIZE; + qdf_mem_copy(bufp, param->pdata, bufp_len); + + status = qdf_nbuf_map_single(qdf_ctx, param->tx_frame, + QDF_DMA_TO_DEVICE); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: wmi buf map failed", __func__); + goto free_buf; + } + + dma_addr = qdf_nbuf_get_frag_paddr(param->tx_frame, 0); + cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff); +#if defined(HTT_PADDR64) + cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F); +#endif + cmd->frame_len = param->frm_len; + cmd->buf_len = bufp_len; + cmd->tx_params_valid = param->tx_params_valid; + + wmi_mgmt_cmd_record(wmi_handle, WMI_MGMT_TX_SEND_CMDID, + bufp, cmd->vdev_id, cmd->chanfreq); + + bufp += roundup(bufp_len, sizeof(uint32_t)); + if (param->tx_params_valid) { + status = populate_tx_send_params(bufp, param->tx_param); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Populate TX send params failed", + __func__); + goto unmap_tx_frame; + } + cmd_len += sizeof(wmi_tx_send_params); + } + + wmi_mtrace(WMI_MGMT_TX_SEND_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_MGMT_TX_SEND_CMDID)) { + WMI_LOGE("%s: Failed to send mgmt Tx", __func__); + goto unmap_tx_frame; + } + return QDF_STATUS_SUCCESS; + +unmap_tx_frame: + qdf_nbuf_unmap_single(qdf_ctx, param->tx_frame, + QDF_DMA_TO_DEVICE); +free_buf: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} +#endif /* CONFIG_HL_SUPPORT */ + +/** + * send_offchan_data_tx_send_cmd_tlv() - Send off-chan tx data + * @wmi_handle : handle to WMI. + * @param : pointer to offchan data tx cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and error on failure. + */ +static QDF_STATUS send_offchan_data_tx_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_offchan_data_tx_params *param) +{ + wmi_buf_t buf; + wmi_offchan_data_tx_send_cmd_fixed_param *cmd; + int32_t cmd_len; + uint64_t dma_addr; + void *qdf_ctx = param->qdf_ctx; + uint8_t *bufp; + int32_t bufp_len = (param->frm_len < mgmt_tx_dl_frm_len) ? + param->frm_len : mgmt_tx_dl_frm_len; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + cmd_len = sizeof(wmi_offchan_data_tx_send_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + roundup(bufp_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_tx_send_params) + cmd_len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_offchan_data_tx_send_cmd_fixed_param *) wmi_buf_data(buf); + bufp = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_offchan_data_tx_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_offchan_data_tx_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->desc_id = param->desc_id; + cmd->chanfreq = param->chanfreq; + bufp += sizeof(wmi_offchan_data_tx_send_cmd_fixed_param); + WMITLV_SET_HDR(bufp, WMITLV_TAG_ARRAY_BYTE, roundup(bufp_len, + sizeof(uint32_t))); + bufp += WMI_TLV_HDR_SIZE; + qdf_mem_copy(bufp, param->pdata, bufp_len); + qdf_nbuf_map_single(qdf_ctx, param->tx_frame, QDF_DMA_TO_DEVICE); + dma_addr = qdf_nbuf_get_frag_paddr(param->tx_frame, 0); + cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff); +#if defined(HTT_PADDR64) + cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F); +#endif + cmd->frame_len = param->frm_len; + cmd->buf_len = bufp_len; + cmd->tx_params_valid = param->tx_params_valid; + + wmi_mgmt_cmd_record(wmi_handle, WMI_OFFCHAN_DATA_TX_SEND_CMDID, + bufp, cmd->vdev_id, cmd->chanfreq); + + bufp += roundup(bufp_len, sizeof(uint32_t)); + if (param->tx_params_valid) { + status = populate_tx_send_params(bufp, param->tx_param); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Populate TX send params failed", + __func__); + goto err1; + } + cmd_len += sizeof(wmi_tx_send_params); + } + + wmi_mtrace(WMI_OFFCHAN_DATA_TX_SEND_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_OFFCHAN_DATA_TX_SEND_CMDID)) { + WMI_LOGE("%s: Failed to offchan data Tx", __func__); + goto err1; + } + + return QDF_STATUS_SUCCESS; + +err1: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_modem_power_state_cmd_tlv() - set modem power state to fw + * @wmi_handle: wmi handle + * @param_value: parameter value + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_modem_power_state_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t param_value) +{ + QDF_STATUS ret; + wmi_modem_power_state_cmd_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_modem_power_state_cmd_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_modem_power_state_cmd_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_modem_power_state_cmd_param)); + cmd->modem_power_state = param_value; + WMI_LOGD("%s: Setting cmd->modem_power_state = %u", __func__, + param_value); + wmi_mtrace(WMI_MODEM_POWER_STATE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_MODEM_POWER_STATE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send notify cmd ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_sta_ps_mode_cmd_tlv() - set sta powersave mode in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @val: value + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_sta_ps_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t val) +{ + wmi_sta_powersave_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("Set Sta Mode Ps vdevId %d val %d", vdev_id, val); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_sta_powersave_mode_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_powersave_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_powersave_mode_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + if (val) + cmd->sta_ps_mode = WMI_STA_PS_MODE_ENABLED; + else + cmd->sta_ps_mode = WMI_STA_PS_MODE_DISABLED; + + wmi_mtrace(WMI_STA_POWERSAVE_MODE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_POWERSAVE_MODE_CMDID)) { + WMI_LOGE("Set Sta Mode Ps Failed vdevId %d val %d", + vdev_id, val); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_idle_roam_monitor_cmd_tlv() - send idle monitor command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_idle_roam_monitor_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t val) +{ + wmi_idle_trigger_monitor_cmd_fixed_param *cmd; + wmi_buf_t buf; + size_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_idle_trigger_monitor_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_idle_trigger_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_idle_trigger_monitor_cmd_fixed_param)); + + cmd->idle_trigger_monitor = (val ? WMI_IDLE_TRIGGER_MONITOR_ON : + WMI_IDLE_TRIGGER_MONITOR_OFF); + + WMI_LOGD("val:%d", cmd->idle_trigger_monitor); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_IDLE_TRIGGER_MONITOR_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_mimops_cmd_tlv() - set MIMO powersave + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_mimops_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, int value) +{ + QDF_STATUS ret; + wmi_sta_smps_force_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_sta_smps_force_mode_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_smps_force_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_smps_force_mode_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + + /* WMI_SMPS_FORCED_MODE values do not directly map + * to SM power save values defined in the specification. + * Make sure to send the right mapping. + */ + switch (value) { + case 0: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_NONE; + break; + case 1: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_DISABLED; + break; + case 2: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_STATIC; + break; + case 3: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_DYNAMIC; + break; + default: + WMI_LOGE("%s:INVALID Mimo PS CONFIG", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("Setting vdev %d value = %u", vdev_id, value); + + wmi_mtrace(WMI_STA_SMPS_FORCE_MODE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_SMPS_FORCE_MODE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set Mimo PS ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_smps_params_cmd_tlv() - set smps params + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_smps_params_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id, + int value) +{ + QDF_STATUS ret; + wmi_sta_smps_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_sta_smps_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_smps_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_smps_param_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->value = value & WMI_SMPS_MASK_LOWER_16BITS; + cmd->param = + (value >> WMI_SMPS_PARAM_VALUE_S) & WMI_SMPS_MASK_UPPER_3BITS; + + WMI_LOGD("Setting vdev %d value = %x param %x", vdev_id, cmd->value, + cmd->param); + + wmi_mtrace(WMI_STA_SMPS_PARAM_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_SMPS_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set Mimo PS ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_get_temperature_cmd_tlv() - get pdev temperature req + * @wmi_handle: wmi handle + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_get_temperature_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_pdev_get_temperature_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len = sizeof(wmi_pdev_get_temperature_cmd_fixed_param); + uint8_t *buf_ptr; + + if (!wmi_handle) { + WMI_LOGE(FL("WMI is closed, can not issue cmd")); + return QDF_STATUS_E_INVAL; + } + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_pdev_get_temperature_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_get_temperature_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_get_temperature_cmd_fixed_param)); + + wmi_mtrace(WMI_PDEV_GET_TEMPERATURE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PDEV_GET_TEMPERATURE_CMDID)) { + WMI_LOGE(FL("failed to send get temperature command")); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_sta_uapsd_auto_trig_cmd_tlv() - set uapsd auto trigger command + * @wmi_handle: wmi handle + * @vdevid: vdev id + * @peer_addr: peer mac address + * @auto_triggerparam: auto trigger parameters + * @num_ac: number of access category + * + * This function sets the trigger + * uapsd params such as service interval, delay interval + * and suspend interval which will be used by the firmware + * to send trigger frames periodically when there is no + * traffic on the transmit side. + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_sta_uapsd_auto_trig_cmd_tlv(wmi_unified_t wmi_handle, + struct sta_uapsd_trig_params *param) +{ + wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; + QDF_STATUS ret; + uint32_t param_len = param->num_ac * sizeof(wmi_sta_uapsd_auto_trig_param); + uint32_t cmd_len = sizeof(*cmd) + param_len + WMI_TLV_HDR_SIZE; + uint32_t i; + wmi_buf_t buf; + uint8_t *buf_ptr; + struct sta_uapsd_params *uapsd_param; + wmi_sta_uapsd_auto_trig_param *trig_param; + + buf = wmi_buf_alloc(wmi_handle, cmd_len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_sta_uapsd_auto_trig_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_uapsd_auto_trig_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_uapsd_auto_trig_cmd_fixed_param)); + cmd->vdev_id = param->vdevid; + cmd->num_ac = param->num_ac; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + + /* TLV indicating array of structures to follow */ + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, param_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + + /* + * Update tag and length for uapsd auto trigger params (this will take + * care of updating tag and length if it is not pre-filled by caller). + */ + uapsd_param = (struct sta_uapsd_params *)param->auto_triggerparam; + trig_param = (wmi_sta_uapsd_auto_trig_param *)buf_ptr; + for (i = 0; i < param->num_ac; i++) { + WMITLV_SET_HDR((buf_ptr + + (i * sizeof(wmi_sta_uapsd_auto_trig_param))), + WMITLV_TAG_STRUC_wmi_sta_uapsd_auto_trig_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_uapsd_auto_trig_param)); + trig_param->wmm_ac = uapsd_param->wmm_ac; + trig_param->user_priority = uapsd_param->user_priority; + trig_param->service_interval = uapsd_param->service_interval; + trig_param->suspend_interval = uapsd_param->suspend_interval; + trig_param->delay_interval = uapsd_param->delay_interval; + trig_param++; + uapsd_param++; + } + + wmi_mtrace(WMI_STA_UAPSD_AUTO_TRIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_STA_UAPSD_AUTO_TRIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set uapsd param ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_thermal_mgmt_cmd_tlv() - set thermal mgmt command to fw + * @wmi_handle: Pointer to wmi handle + * @thermal_info: Thermal command information + * + * This function sends the thermal management command + * to the firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_set_thermal_mgmt_cmd_tlv(wmi_unified_t wmi_handle, + struct thermal_cmd_params *thermal_info) +{ + wmi_thermal_mgmt_cmd_fixed_param *cmd = NULL; + wmi_buf_t buf = NULL; + QDF_STATUS status; + uint32_t len = 0; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_thermal_mgmt_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_thermal_mgmt_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_thermal_mgmt_cmd_fixed_param)); + + cmd->lower_thresh_degreeC = thermal_info->min_temp; + cmd->upper_thresh_degreeC = thermal_info->max_temp; + cmd->enable = thermal_info->thermal_enable; + + WMI_LOGE("TM Sending thermal mgmt cmd: low temp %d, upper temp %d, enabled %d", + cmd->lower_thresh_degreeC, cmd->upper_thresh_degreeC, cmd->enable); + + wmi_mtrace(WMI_THERMAL_MGMT_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_THERMAL_MGMT_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send thermal mgmt command", __func__); + } + + return status; +} + +/** + * send_lro_config_cmd_tlv() - process the LRO config command + * @wmi_handle: Pointer to WMI handle + * @wmi_lro_cmd: Pointer to LRO configuration parameters + * + * This function sends down the LRO configuration parameters to + * the firmware to enable LRO, sets the TCP flags and sets the + * seed values for the toeplitz hash generation + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_lro_config_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_lro_config_cmd_t *wmi_lro_cmd) +{ + wmi_lro_info_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t pdev_id = wmi_lro_cmd->pdev_id; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_lro_info_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_lro_info_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_lro_info_cmd_fixed_param)); + + cmd->lro_enable = wmi_lro_cmd->lro_enable; + WMI_LRO_INFO_TCP_FLAG_VALS_SET(cmd->tcp_flag_u32, + wmi_lro_cmd->tcp_flag); + WMI_LRO_INFO_TCP_FLAGS_MASK_SET(cmd->tcp_flag_u32, + wmi_lro_cmd->tcp_flag_mask); + cmd->toeplitz_hash_ipv4_0_3 = + wmi_lro_cmd->toeplitz_hash_ipv4[0]; + cmd->toeplitz_hash_ipv4_4_7 = + wmi_lro_cmd->toeplitz_hash_ipv4[1]; + cmd->toeplitz_hash_ipv4_8_11 = + wmi_lro_cmd->toeplitz_hash_ipv4[2]; + cmd->toeplitz_hash_ipv4_12_15 = + wmi_lro_cmd->toeplitz_hash_ipv4[3]; + cmd->toeplitz_hash_ipv4_16 = + wmi_lro_cmd->toeplitz_hash_ipv4[4]; + + cmd->toeplitz_hash_ipv6_0_3 = + wmi_lro_cmd->toeplitz_hash_ipv6[0]; + cmd->toeplitz_hash_ipv6_4_7 = + wmi_lro_cmd->toeplitz_hash_ipv6[1]; + cmd->toeplitz_hash_ipv6_8_11 = + wmi_lro_cmd->toeplitz_hash_ipv6[2]; + cmd->toeplitz_hash_ipv6_12_15 = + wmi_lro_cmd->toeplitz_hash_ipv6[3]; + cmd->toeplitz_hash_ipv6_16_19 = + wmi_lro_cmd->toeplitz_hash_ipv6[4]; + cmd->toeplitz_hash_ipv6_20_23 = + wmi_lro_cmd->toeplitz_hash_ipv6[5]; + cmd->toeplitz_hash_ipv6_24_27 = + wmi_lro_cmd->toeplitz_hash_ipv6[6]; + cmd->toeplitz_hash_ipv6_28_31 = + wmi_lro_cmd->toeplitz_hash_ipv6[7]; + cmd->toeplitz_hash_ipv6_32_35 = + wmi_lro_cmd->toeplitz_hash_ipv6[8]; + cmd->toeplitz_hash_ipv6_36_39 = + wmi_lro_cmd->toeplitz_hash_ipv6[9]; + cmd->toeplitz_hash_ipv6_40 = + wmi_lro_cmd->toeplitz_hash_ipv6[10]; + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + pdev_id); + WMI_LOGD("WMI_LRO_CONFIG: lro_enable %d, tcp_flag 0x%x, pdev_id: %d", + cmd->lro_enable, cmd->tcp_flag_u32, cmd->pdev_id); + + wmi_mtrace(WMI_LRO_CONFIG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_LRO_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send WMI_LRO_CONFIG_CMDID", __func__); + } + + return status; +} + +/** + * send_peer_rate_report_cmd_tlv() - process the peer rate report command + * @wmi_handle: Pointer to wmi handle + * @rate_report_params: Pointer to peer rate report parameters + * + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_peer_rate_report_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_peer_rate_report_params *rate_report_params) +{ + wmi_peer_set_rate_report_condition_fixed_param *cmd = NULL; + wmi_buf_t buf = NULL; + QDF_STATUS status = 0; + uint32_t len = 0; + uint32_t i, j; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_peer_set_rate_report_condition_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_set_rate_report_condition_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_set_rate_report_condition_fixed_param)); + + cmd->enable_rate_report = rate_report_params->rate_report_enable; + cmd->report_backoff_time = rate_report_params->backoff_time; + cmd->report_timer_period = rate_report_params->timer_period; + for (i = 0; i < PEER_RATE_REPORT_COND_MAX_NUM; i++) { + cmd->cond_per_phy[i].val_cond_flags = + rate_report_params->report_per_phy[i].cond_flags; + cmd->cond_per_phy[i].rate_delta.min_delta = + rate_report_params->report_per_phy[i].delta.delta_min; + cmd->cond_per_phy[i].rate_delta.percentage = + rate_report_params->report_per_phy[i].delta.percent; + for (j = 0; j < MAX_NUM_OF_RATE_THRESH; j++) { + cmd->cond_per_phy[i].rate_threshold[j] = + rate_report_params->report_per_phy[i]. + report_rate_threshold[j]; + } + } + + WMI_LOGE("%s enable %d backoff_time %d period %d", __func__, + cmd->enable_rate_report, + cmd->report_backoff_time, cmd->report_timer_period); + + wmi_mtrace(WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send peer_set_report_cond command", + __func__); + } + return status; +} + +/** + * send_process_update_edca_param_cmd_tlv() - update EDCA params + * @wmi_handle: wmi handle + * @vdev_id: vdev id. + * @wmm_vparams: edca parameters + * + * This function updates EDCA parameters to the target + * + * Return: CDF Status + */ +static QDF_STATUS send_process_update_edca_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]) +{ + uint8_t *buf_ptr; + wmi_buf_t buf; + wmi_vdev_set_wmm_params_cmd_fixed_param *cmd; + wmi_wmm_vparams *wmm_param; + struct wmi_host_wme_vparams *twmm_param; + int len = sizeof(*cmd); + int ac; + + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_set_wmm_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_wmm_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_wmm_params_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->wmm_param_type = mu_edca_param; + + for (ac = 0; ac < WMI_MAX_NUM_AC; ac++) { + wmm_param = (wmi_wmm_vparams *) (&cmd->wmm_params[ac]); + twmm_param = (struct wmi_host_wme_vparams *) (&wmm_vparams[ac]); + WMITLV_SET_HDR(&wmm_param->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_wmm_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_wmm_vparams)); + wmm_param->cwmin = twmm_param->cwmin; + wmm_param->cwmax = twmm_param->cwmax; + wmm_param->aifs = twmm_param->aifs; + if (mu_edca_param) + wmm_param->mu_edca_timer = twmm_param->mu_edca_timer; + else + wmm_param->txoplimit = twmm_param->txoplimit; + wmm_param->acm = twmm_param->acm; + wmm_param->no_ack = twmm_param->noackpolicy; + } + + wmi_mtrace(WMI_VDEV_SET_WMM_PARAMS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_WMM_PARAMS_CMDID)) + goto fail; + + return QDF_STATUS_SUCCESS; + +fail: + wmi_buf_free(buf); + WMI_LOGE("%s: Failed to set WMM Paremeters", __func__); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_probe_rsp_tmpl_send_cmd_tlv() - send probe response template to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @probe_rsp_info: probe response info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_probe_rsp_tmpl_send_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info) +{ + wmi_prb_tmpl_cmd_fixed_param *cmd; + wmi_bcn_prb_info *bcn_prb_info; + wmi_buf_t wmi_buf; + uint32_t tmpl_len, tmpl_len_aligned, wmi_buf_len; + uint8_t *buf_ptr; + QDF_STATUS ret; + + WMI_LOGD(FL("Send probe response template for vdev %d"), vdev_id); + + tmpl_len = probe_rsp_info->prb_rsp_template_len; + tmpl_len_aligned = roundup(tmpl_len, sizeof(uint32_t)); + + wmi_buf_len = sizeof(wmi_prb_tmpl_cmd_fixed_param) + + sizeof(wmi_bcn_prb_info) + WMI_TLV_HDR_SIZE + + tmpl_len_aligned; + + if (wmi_buf_len > WMI_BEACON_TX_BUFFER_SIZE) { + WMI_LOGE(FL("wmi_buf_len: %d > %d. Can't send wmi cmd"), + wmi_buf_len, WMI_BEACON_TX_BUFFER_SIZE); + return QDF_STATUS_E_INVAL; + } + + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_prb_tmpl_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_prb_tmpl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_prb_tmpl_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->buf_len = tmpl_len; + buf_ptr += sizeof(wmi_prb_tmpl_cmd_fixed_param); + + bcn_prb_info = (wmi_bcn_prb_info *) buf_ptr; + WMITLV_SET_HDR(&bcn_prb_info->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_prb_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_bcn_prb_info)); + bcn_prb_info->caps = 0; + bcn_prb_info->erp = 0; + buf_ptr += sizeof(wmi_bcn_prb_info); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, tmpl_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, probe_rsp_info->prb_rsp_template_frm, tmpl_len); + + wmi_mtrace(WMI_PRB_TMPL_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, WMI_PRB_TMPL_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send PRB RSP tmpl: %d"), ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +#if defined(ATH_SUPPORT_WAPI) || defined(FEATURE_WLAN_WAPI) +#define WPI_IV_LEN 16 + +/** + * wmi_update_wpi_key_counter() - update WAPI tsc and rsc key counters + * + * @dest_tx: destination address of tsc key counter + * @src_tx: source address of tsc key counter + * @dest_rx: destination address of rsc key counter + * @src_rx: source address of rsc key counter + * + * This function copies WAPI tsc and rsc key counters in the wmi buffer. + * + * Return: None + * + */ +static void wmi_update_wpi_key_counter(uint8_t *dest_tx, uint8_t *src_tx, + uint8_t *dest_rx, uint8_t *src_rx) +{ + qdf_mem_copy(dest_tx, src_tx, WPI_IV_LEN); + qdf_mem_copy(dest_rx, src_rx, WPI_IV_LEN); +} +#else +static void wmi_update_wpi_key_counter(uint8_t *dest_tx, uint8_t *src_tx, + uint8_t *dest_rx, uint8_t *src_rx) +{ + return; +} +#endif + +/** + * send_setup_install_key_cmd_tlv() - set key parameters + * @wmi_handle: wmi handle + * @key_params: key parameters + * + * This function fills structure from information + * passed in key_params. + * + * Return: QDF_STATUS_SUCCESS - success + * QDF_STATUS_E_FAILURE - failure + * QDF_STATUS_E_NOMEM - not able to allocate buffer + */ +static QDF_STATUS send_setup_install_key_cmd_tlv(wmi_unified_t wmi_handle, + struct set_key_params *key_params) +{ + wmi_vdev_install_key_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + uint8_t *key_data; + QDF_STATUS status; + + len = sizeof(*cmd) + roundup(key_params->key_len, sizeof(uint32_t)) + + WMI_TLV_HDR_SIZE; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_install_key_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_install_key_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_install_key_cmd_fixed_param)); + cmd->vdev_id = key_params->vdev_id; + cmd->key_ix = key_params->key_idx; + if (key_params->group_key_idx) { + cmd->is_group_key_ix_valid = 1; + cmd->group_key_ix = key_params->group_key_idx; + } + + + WMI_CHAR_ARRAY_TO_MAC_ADDR(key_params->peer_mac, &cmd->peer_macaddr); + cmd->key_flags |= key_params->key_flags; + cmd->key_cipher = key_params->key_cipher; + if ((key_params->key_txmic_len) && + (key_params->key_rxmic_len)) { + cmd->key_txmic_len = key_params->key_txmic_len; + cmd->key_rxmic_len = key_params->key_rxmic_len; + } +#if defined(ATH_SUPPORT_WAPI) || defined(FEATURE_WLAN_WAPI) + wmi_update_wpi_key_counter(cmd->wpi_key_tsc_counter, + key_params->tx_iv, + cmd->wpi_key_rsc_counter, + key_params->rx_iv); +#endif + buf_ptr += sizeof(wmi_vdev_install_key_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(key_params->key_len, sizeof(uint32_t))); + key_data = (uint8_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + qdf_mem_copy((void *)key_data, + (const void *)key_params->key_data, key_params->key_len); + qdf_mem_copy(&cmd->key_rsc_counter, &key_params->key_rsc_ctr, + sizeof(wmi_key_seq_counter)); + cmd->key_len = key_params->key_len; + + qdf_mem_copy(&cmd->key_tsc_counter, &key_params->key_tsc_counter, + sizeof(wmi_key_seq_counter)); + wmi_mtrace(WMI_VDEV_INSTALL_KEY_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_INSTALL_KEY_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_zero(wmi_buf_data(buf), len); + wmi_buf_free(buf); + } + return status; +} + +/** + * send_p2p_go_set_beacon_ie_cmd_tlv() - set beacon IE for p2p go + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @p2p_ie: p2p IE + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_p2p_go_set_beacon_ie_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t *p2p_ie) +{ + QDF_STATUS ret; + wmi_p2p_go_set_beacon_ie_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t ie_len, ie_len_aligned, wmi_buf_len; + uint8_t *buf_ptr; + + ie_len = (uint32_t) (p2p_ie[1] + 2); + + /* More than one P2P IE may be included in a single frame. + If multiple P2P IEs are present, the complete P2P attribute + data consists of the concatenation of the P2P Attribute + fields of the P2P IEs. The P2P Attributes field of each + P2P IE may be any length up to the maximum (251 octets). + In this case host sends one P2P IE to firmware so the length + should not exceed more than 251 bytes + */ + if (ie_len > 251) { + WMI_LOGE("%s : invalid p2p ie length %u", __func__, ie_len); + return QDF_STATUS_E_INVAL; + } + + ie_len_aligned = roundup(ie_len, sizeof(uint32_t)); + + wmi_buf_len = + sizeof(wmi_p2p_go_set_beacon_ie_fixed_param) + ie_len_aligned + + WMI_TLV_HDR_SIZE; + + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_p2p_go_set_beacon_ie_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_go_set_beacon_ie_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_p2p_go_set_beacon_ie_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->ie_buf_len = ie_len; + + buf_ptr += sizeof(wmi_p2p_go_set_beacon_ie_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, ie_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, p2p_ie, ie_len); + + WMI_LOGD("%s: Sending WMI_P2P_GO_SET_BEACON_IE", __func__); + + wmi_mtrace(WMI_P2P_GO_SET_BEACON_IE, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, + WMI_P2P_GO_SET_BEACON_IE); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send bcn tmpl: %d", ret); + wmi_buf_free(wmi_buf); + } + + WMI_LOGD("%s: Successfully sent WMI_P2P_GO_SET_BEACON_IE", __func__); + return ret; +} + +/** + * send_scan_probe_setoui_cmd_tlv() - set scan probe OUI + * @wmi_handle: wmi handle + * @psetoui: OUI parameters + * + * set scan probe OUI parameters in firmware + * + * Return: CDF status + */ +static QDF_STATUS send_scan_probe_setoui_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_mac_oui *psetoui) +{ + wmi_scan_prob_req_oui_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + uint32_t *oui_buf; + struct probe_req_whitelist_attr *ie_whitelist = &psetoui->ie_whitelist; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_scan_prob_req_oui_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_prob_req_oui_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_prob_req_oui_cmd_fixed_param)); + + oui_buf = &cmd->prob_req_oui; + qdf_mem_zero(oui_buf, sizeof(cmd->prob_req_oui)); + *oui_buf = psetoui->oui[0] << 16 | psetoui->oui[1] << 8 + | psetoui->oui[2]; + WMI_LOGD("%s: wmi:oui received from hdd %08x", __func__, + cmd->prob_req_oui); + + cmd->vdev_id = psetoui->vdev_id; + cmd->flags = WMI_SCAN_PROBE_OUI_SPOOFED_MAC_IN_PROBE_REQ; + if (psetoui->enb_probe_req_sno_randomization) + cmd->flags |= WMI_SCAN_PROBE_OUI_RANDOM_SEQ_NO_IN_PROBE_REQ; + + if (ie_whitelist->white_list) { + wmi_fill_ie_whitelist_attrs(cmd->ie_bitmap, + &cmd->num_vendor_oui, + ie_whitelist); + cmd->flags |= + WMI_SCAN_PROBE_OUI_ENABLE_IE_WHITELIST_IN_PROBE_REQ; + } + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui)); + buf_ptr += WMI_TLV_HDR_SIZE; + + if (cmd->num_vendor_oui != 0) { + wmi_fill_vendor_oui(buf_ptr, cmd->num_vendor_oui, + ie_whitelist->voui); + buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui); + } + + wmi_mtrace(WMI_SCAN_PROB_REQ_OUI_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_SCAN_PROB_REQ_OUI_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +#ifdef IPA_OFFLOAD +/** send_ipa_offload_control_cmd_tlv() - ipa offload control parameter + * @wmi_handle: wmi handle + * @ipa_offload: ipa offload control parameter + * + * Returns: 0 on success, error number otherwise + */ +static QDF_STATUS send_ipa_offload_control_cmd_tlv(wmi_unified_t wmi_handle, + struct ipa_uc_offload_control_params *ipa_offload) +{ + wmi_ipa_offload_enable_disable_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + u_int8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + WMI_LOGD("%s: offload_type=%d, enable=%d", __func__, + ipa_offload->offload_type, ipa_offload->enable); + + buf_ptr = (u_int8_t *)wmi_buf_data(wmi_buf); + + cmd = (wmi_ipa_offload_enable_disable_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUCT_wmi_ipa_offload_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ipa_offload_enable_disable_cmd_fixed_param)); + + cmd->offload_type = ipa_offload->offload_type; + cmd->vdev_id = ipa_offload->vdev_id; + cmd->enable = ipa_offload->enable; + + wmi_mtrace(WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * send_pno_stop_cmd_tlv() - PNO stop request + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function request FW to stop ongoing PNO operation. + * + * Return: CDF status + */ +static QDF_STATUS send_pno_stop_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + wmi_nlo_config_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + /* + * TLV place holder for array of structures nlo_configured_parameters + * TLV place holder for array of uint32_t channel_list + * TLV place holder for chnl prediction cfg + */ + len += WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_nlo_config_cmd_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_nlo_config_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->flags = WMI_NLO_CONFIG_STOP; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + wmi_mtrace(WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send nlo wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_set_pno_channel_prediction() - Set PNO channel prediction + * @buf_ptr: Buffer passed by upper layers + * @pno: Buffer to be sent to the firmware + * + * Copy the PNO Channel prediction configuration parameters + * passed by the upper layers to a WMI format TLV and send it + * down to the firmware. + * + * Return: None + */ +static void wmi_set_pno_channel_prediction(uint8_t *buf_ptr, + struct pno_scan_req_params *pno) +{ + nlo_channel_prediction_cfg *channel_prediction_cfg = + (nlo_channel_prediction_cfg *) buf_ptr; + WMITLV_SET_HDR(&channel_prediction_cfg->tlv_header, + WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN(nlo_channel_prediction_cfg)); +#ifdef FEATURE_WLAN_SCAN_PNO + channel_prediction_cfg->enable = pno->pno_channel_prediction; + channel_prediction_cfg->top_k_num = pno->top_k_num_of_channels; + channel_prediction_cfg->stationary_threshold = pno->stationary_thresh; + channel_prediction_cfg->full_scan_period_ms = + pno->channel_prediction_full_scan; +#endif + buf_ptr += sizeof(nlo_channel_prediction_cfg); + WMI_LOGD("enable: %d, top_k_num: %d, stat_thresh: %d, full_scan: %d", + channel_prediction_cfg->enable, + channel_prediction_cfg->top_k_num, + channel_prediction_cfg->stationary_threshold, + channel_prediction_cfg->full_scan_period_ms); +} + +/** + * send_nlo_mawc_cmd_tlv() - Send MAWC NLO configuration + * @wmi_handle: wmi handle + * @params: configuration parameters + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_nlo_mawc_cmd_tlv(wmi_unified_t wmi_handle, + struct nlo_mawc_params *params) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_nlo_configure_mawc_cmd_fixed_param *wmi_nlo_mawc_params; + + len = sizeof(*wmi_nlo_mawc_params); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_nlo_mawc_params = + (wmi_nlo_configure_mawc_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_nlo_mawc_params->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_configure_mawc_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_nlo_configure_mawc_cmd_fixed_param)); + wmi_nlo_mawc_params->vdev_id = params->vdev_id; + if (params->enable) + wmi_nlo_mawc_params->enable = 1; + else + wmi_nlo_mawc_params->enable = 0; + wmi_nlo_mawc_params->exp_backoff_ratio = params->exp_backoff_ratio; + wmi_nlo_mawc_params->init_scan_interval = params->init_scan_interval; + wmi_nlo_mawc_params->max_scan_interval = params->max_scan_interval; + WMI_LOGD(FL("MAWC NLO en=%d, vdev=%d, ratio=%d, SCAN init=%d, max=%d"), + wmi_nlo_mawc_params->enable, wmi_nlo_mawc_params->vdev_id, + wmi_nlo_mawc_params->exp_backoff_ratio, + wmi_nlo_mawc_params->init_scan_interval, + wmi_nlo_mawc_params->max_scan_interval); + + wmi_mtrace(WMI_NLO_CONFIGURE_MAWC_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_NLO_CONFIGURE_MAWC_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NLO_CONFIGURE_MAWC_CMDID failed, Error %d", + status); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_pno_start_cmd_tlv() - PNO start request + * @wmi_handle: wmi handle + * @pno: PNO request + * + * This function request FW to start PNO request. + * Request: CDF status + */ +static QDF_STATUS send_pno_start_cmd_tlv(wmi_unified_t wmi_handle, + struct pno_scan_req_params *pno) +{ + wmi_nlo_config_cmd_fixed_param *cmd; + nlo_configured_parameters *nlo_list; + uint32_t *channel_list; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint8_t i; + int ret; + struct probe_req_whitelist_attr *ie_whitelist = &pno->ie_whitelist; + connected_nlo_rssi_params *nlo_relative_rssi; + connected_nlo_bss_band_rssi_pref *nlo_band_rssi; + + /* + * TLV place holder for array nlo_configured_parameters(nlo_list) + * TLV place holder for array of uint32_t channel_list + * TLV place holder for chnnl prediction cfg + * TLV place holder for array of wmi_vendor_oui + * TLV place holder for array of connected_nlo_bss_band_rssi_pref + */ + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE + + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE; + + len += sizeof(uint32_t) * QDF_MIN(pno->networks_list[0].channel_cnt, + WMI_NLO_MAX_CHAN); + len += sizeof(nlo_configured_parameters) * + QDF_MIN(pno->networks_cnt, WMI_NLO_MAX_SSIDS); + len += sizeof(nlo_channel_prediction_cfg); + len += sizeof(enlo_candidate_score_params); + len += sizeof(wmi_vendor_oui) * ie_whitelist->num_vendor_oui; + len += sizeof(connected_nlo_rssi_params); + len += sizeof(connected_nlo_bss_band_rssi_pref); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_nlo_config_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_nlo_config_cmd_fixed_param)); + cmd->vdev_id = pno->vdev_id; + cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; + +#ifdef FEATURE_WLAN_SCAN_PNO + WMI_SCAN_SET_DWELL_MODE(cmd->flags, + pno->adaptive_dwell_mode); +#endif + /* Current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = pno->active_dwell_time; + cmd->passive_dwell_time = pno->passive_dwell_time; + + if (pno->do_passive_scan) + cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; + /* Copy scan interval */ + cmd->fast_scan_period = pno->fast_scan_period; + cmd->slow_scan_period = pno->slow_scan_period; + cmd->delay_start_time = WMI_SEC_TO_MSEC(pno->delay_start_time); + cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; + cmd->scan_backoff_multiplier = pno->scan_backoff_multiplier; + + /* mac randomization attributes */ + if (pno->scan_random.randomize) { + cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; + wmi_copy_scan_random_mac(pno->scan_random.mac_addr, + pno->scan_random.mac_mask, + &cmd->mac_addr, + &cmd->mac_mask); + } + + buf_ptr += sizeof(wmi_nlo_config_cmd_fixed_param); + + cmd->no_of_ssids = QDF_MIN(pno->networks_cnt, WMI_NLO_MAX_SSIDS); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->no_of_ssids * sizeof(nlo_configured_parameters)); + buf_ptr += WMI_TLV_HDR_SIZE; + + nlo_list = (nlo_configured_parameters *) buf_ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + WMITLV_SET_HDR(&nlo_list[i].tlv_header, + WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN + (nlo_configured_parameters)); + /* Copy ssid and it's length */ + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = + pno->networks_list[i].ssid.length; + qdf_mem_copy(nlo_list[i].ssid.ssid.ssid, + pno->networks_list[i].ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + + /* Copy rssi threshold */ + if (pno->networks_list[i].rssi_thresh && + pno->networks_list[i].rssi_thresh > + WMI_RSSI_THOLD_DEFAULT) { + nlo_list[i].rssi_cond.valid = true; + nlo_list[i].rssi_cond.rssi = + pno->networks_list[i].rssi_thresh; + } + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + pno->networks_list[i].bc_new_type; + } + buf_ptr += cmd->no_of_ssids * sizeof(nlo_configured_parameters); + + /* Copy channel info */ + cmd->num_of_channels = QDF_MIN(pno->networks_list[0].channel_cnt, + WMI_NLO_MAX_CHAN); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (cmd->num_of_channels * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + + channel_list = (uint32_t *) buf_ptr; + for (i = 0; i < cmd->num_of_channels; i++) { + channel_list[i] = pno->networks_list[0].channels[i]; + + if (channel_list[i] < WMI_NLO_FREQ_THRESH) + channel_list[i] = + wlan_chan_to_freq(pno-> + networks_list[0].channels[i]); + } + buf_ptr += cmd->num_of_channels * sizeof(uint32_t); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(nlo_channel_prediction_cfg)); + buf_ptr += WMI_TLV_HDR_SIZE; + wmi_set_pno_channel_prediction(buf_ptr, pno); + buf_ptr += sizeof(nlo_channel_prediction_cfg); + /** TODO: Discrete firmware doesn't have command/option to configure + * App IE which comes from wpa_supplicant as of part PNO start request. + */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_enlo_candidate_score_param, + WMITLV_GET_STRUCT_TLVLEN(enlo_candidate_score_params)); + buf_ptr += sizeof(enlo_candidate_score_params); + + if (ie_whitelist->white_list) { + cmd->flags |= WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ; + wmi_fill_ie_whitelist_attrs(cmd->ie_bitmap, + &cmd->num_vendor_oui, + ie_whitelist); + } + + /* ie white list */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui)); + buf_ptr += WMI_TLV_HDR_SIZE; + if (cmd->num_vendor_oui != 0) { + wmi_fill_vendor_oui(buf_ptr, cmd->num_vendor_oui, + ie_whitelist->voui); + buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui); + } + + if (pno->relative_rssi_set) + cmd->flags |= WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG; + + /* + * Firmware calculation using connected PNO params: + * New AP's RSSI >= (Connected AP's RSSI + relative_rssi +/- rssi_pref) + * deduction of rssi_pref for chosen band_pref and + * addition of rssi_pref for remaining bands (other than chosen band). + */ + nlo_relative_rssi = (connected_nlo_rssi_params *) buf_ptr; + WMITLV_SET_HDR(&nlo_relative_rssi->tlv_header, + WMITLV_TAG_STRUC_wmi_connected_nlo_rssi_params, + WMITLV_GET_STRUCT_TLVLEN(connected_nlo_rssi_params)); + nlo_relative_rssi->relative_rssi = pno->relative_rssi; + buf_ptr += sizeof(*nlo_relative_rssi); + + /* + * As of now Kernel and Host supports one band and rssi preference. + * Firmware supports array of band and rssi preferences + */ + cmd->num_cnlo_band_pref = 1; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + cmd->num_cnlo_band_pref * + sizeof(connected_nlo_bss_band_rssi_pref)); + buf_ptr += WMI_TLV_HDR_SIZE; + + nlo_band_rssi = (connected_nlo_bss_band_rssi_pref *) buf_ptr; + for (i = 0; i < cmd->num_cnlo_band_pref; i++) { + WMITLV_SET_HDR(&nlo_band_rssi[i].tlv_header, + WMITLV_TAG_STRUC_wmi_connected_nlo_bss_band_rssi_pref, + WMITLV_GET_STRUCT_TLVLEN( + connected_nlo_bss_band_rssi_pref)); + nlo_band_rssi[i].band = pno->band_rssi_pref.band; + nlo_band_rssi[i].rssi_pref = pno->band_rssi_pref.rssi; + } + buf_ptr += cmd->num_cnlo_band_pref * sizeof(*nlo_band_rssi); + + wmi_mtrace(WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send nlo wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS +/** + * send_process_ll_stats_clear_cmd_tlv() - clear link layer stats + * @wmi_handle: wmi handle + * @clear_req: ll stats clear request command params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_process_ll_stats_clear_cmd_tlv(wmi_unified_t wmi_handle, + const struct ll_stats_clear_params *clear_req) +{ + wmi_clear_link_stats_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_clear_link_stats_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_clear_link_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_clear_link_stats_cmd_fixed_param)); + + cmd->stop_stats_collection_req = clear_req->stop_req; + cmd->vdev_id = clear_req->vdev_id; + cmd->stats_clear_req_mask = clear_req->stats_clear_mask; + + WMI_CHAR_ARRAY_TO_MAC_ADDR(clear_req->peer_macaddr.bytes, + &cmd->peer_macaddr); + + WMI_LOGD("LINK_LAYER_STATS - Clear Request Params"); + WMI_LOGD("StopReq: %d", cmd->stop_stats_collection_req); + WMI_LOGD("Vdev Id: %d", cmd->vdev_id); + WMI_LOGD("Clear Stat Mask: %d", cmd->stats_clear_req_mask); + WMI_LOGD("Peer MAC Addr: "QDF_MAC_ADDR_FMT, + QDF_MAC_ADDR_REF(clear_req->peer_macaddr.bytes)); + + wmi_mtrace(WMI_CLEAR_LINK_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_CLEAR_LINK_STATS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send clear link stats req", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("Clear Link Layer Stats request sent successfully"); + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_ll_stats_set_cmd_tlv() - link layer stats set request + * @wmi_handle: wmi handle + * @set_req: ll stats set request command params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_process_ll_stats_set_cmd_tlv(wmi_unified_t wmi_handle, + const struct ll_stats_set_params *set_req) +{ + wmi_start_link_stats_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_start_link_stats_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_start_link_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_start_link_stats_cmd_fixed_param)); + + cmd->mpdu_size_threshold = set_req->mpdu_size_threshold; + cmd->aggressive_statistics_gathering = + set_req->aggressive_statistics_gathering; + + WMI_LOGD("LINK_LAYER_STATS - Start/Set Params MPDU Size Thresh : %d Aggressive Gather: %d", + cmd->mpdu_size_threshold, + cmd->aggressive_statistics_gathering); + + wmi_mtrace(WMI_START_LINK_STATS_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_START_LINK_STATS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send set link stats request", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_ll_stats_get_cmd_tlv() - link layer stats get request + * @wmi_handle: wmi handle + * @get_req: ll stats get request command params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_process_ll_stats_get_cmd_tlv(wmi_unified_t wmi_handle, + const struct ll_stats_get_params *get_req) +{ + wmi_request_link_stats_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_request_link_stats_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_link_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_link_stats_cmd_fixed_param)); + + cmd->request_id = get_req->req_id; + cmd->stats_type = get_req->param_id_mask; + cmd->vdev_id = get_req->vdev_id; + + WMI_CHAR_ARRAY_TO_MAC_ADDR(get_req->peer_macaddr.bytes, + &cmd->peer_macaddr); + + WMI_LOGD("LINK_LAYER_STATS - Get Request Params Request ID: %u Stats Type: %0x Vdev ID: %d Peer MAC Addr: "QDF_MAC_ADDR_FMT, + cmd->request_id, cmd->stats_type, cmd->vdev_id, + QDF_MAC_ADDR_REF(get_req->peer_macaddr.bytes)); + + wmi_mtrace(WMI_REQUEST_LINK_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send_pm_chk(wmi_handle, buf, len, + WMI_REQUEST_LINK_STATS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send get link stats request", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + +/** + * send_congestion_cmd_tlv() - send request to fw to get CCA + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_congestion_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_request_stats_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + + cmd->stats_id = WMI_REQUEST_CONGESTION_STAT; + cmd->vdev_id = vdev_id; + WMI_LOGD("STATS REQ VDEV_ID:%d stats_id %d -->", + cmd->vdev_id, cmd->stats_id); + + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_REQUEST_STATS_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_snr_request_cmd_tlv() - send request to fw to get RSSI stats + * @wmi_handle: wmi handle + * @rssi_req: get RSSI request + * + * Return: CDF status + */ +static QDF_STATUS send_snr_request_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = WMI_REQUEST_VDEV_STAT; + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("Failed to send host stats request to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_snr_cmd_tlv() - get RSSI from fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_snr_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + cmd->vdev_id = vdev_id; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = WMI_REQUEST_VDEV_STAT; + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("Failed to send host stats request to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_link_status_req_cmd_tlv() - process link status request from UMAC + * @wmi_handle: wmi handle + * @link_status: get link params + * + * Return: CDF status + */ +static QDF_STATUS send_link_status_req_cmd_tlv(wmi_unified_t wmi_handle, + struct link_status_params *link_status) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = WMI_REQUEST_VDEV_RATE_STAT; + cmd->vdev_id = link_status->vdev_id; + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("Failed to send WMI link status request to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * send_egap_conf_params_cmd_tlv() - send wmi cmd of egap configuration params + * @wmi_handle: wmi handler + * @egap_params: pointer to egap_params + * + * Return: 0 for success, otherwise appropriate error code + */ +static QDF_STATUS send_egap_conf_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wlan_green_ap_egap_params *egap_params) +{ + wmi_ap_ps_egap_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_ap_ps_egap_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ap_ps_egap_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ap_ps_egap_param_cmd_fixed_param)); + + cmd->enable = egap_params->host_enable_egap; + cmd->inactivity_time = egap_params->egap_inactivity_time; + cmd->wait_time = egap_params->egap_wait_time; + cmd->flags = egap_params->egap_feature_flags; + wmi_mtrace(WMI_AP_PS_EGAP_PARAM_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_AP_PS_EGAP_PARAM_CMDID); + if (err) { + WMI_LOGE("Failed to send ap_ps_egap cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wmi_unified_csa_offload_enable() - sen CSA offload enable command + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_csa_offload_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_csa_offload_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_csa_offload_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_csa_offload_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_csa_offload_enable_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->csa_offload_enable = WMI_CSA_OFFLOAD_ENABLE; + wmi_mtrace(WMI_CSA_OFFLOAD_ENABLE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_CSA_OFFLOAD_ENABLE_CMDID)) { + WMI_LOGP("%s: Failed to send CSA offload enable command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +#ifdef WLAN_FEATURE_CIF_CFR +/** + * send_oem_dma_cfg_cmd_tlv() - configure OEM DMA rings + * @wmi_handle: wmi handle + * @data_len: len of dma cfg req + * @data: dma cfg req + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +static QDF_STATUS send_oem_dma_cfg_cmd_tlv(wmi_unified_t wmi_handle, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg) +{ + wmi_buf_t buf; + uint8_t *cmd; + QDF_STATUS ret; + + WMITLV_SET_HDR(cfg, + WMITLV_TAG_STRUC_wmi_oem_dma_ring_cfg_req_fixed_param, + (sizeof(*cfg) - WMI_TLV_HDR_SIZE)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cfg)); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (uint8_t *) wmi_buf_data(buf); + qdf_mem_copy(cmd, cfg, sizeof(*cfg)); + wmi_debug("Sending OEM Data Request to target, data len %lu"), + sizeof(*cfg); + wmi_mtrace(WMI_OEM_DMA_RING_CFG_REQ_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cfg), + WMI_OEM_DMA_RING_CFG_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL(":wmi cmd send failed")); + wmi_buf_free(buf); + } + + return ret; +} +#endif + +/** + * send_start_11d_scan_cmd_tlv() - start 11d scan request + * @wmi_handle: wmi handle + * @start_11d_scan: 11d scan start request parameters + * + * This function request FW to start 11d scan. + * + * Return: QDF status + */ +static QDF_STATUS send_start_11d_scan_cmd_tlv(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *start_11d_scan) +{ + wmi_11d_scan_start_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_11d_scan_start_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_11d_scan_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_11d_scan_start_cmd_fixed_param)); + + cmd->vdev_id = start_11d_scan->vdev_id; + cmd->scan_period_msec = start_11d_scan->scan_period_msec; + cmd->start_interval_msec = start_11d_scan->start_interval_msec; + + WMI_LOGD("vdev %d sending 11D scan start req", cmd->vdev_id); + + wmi_mtrace(WMI_11D_SCAN_START_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11D_SCAN_START_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send start 11d scan wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_stop_11d_scan_cmd_tlv() - stop 11d scan request + * @wmi_handle: wmi handle + * @start_11d_scan: 11d scan stop request parameters + * + * This function request FW to stop 11d scan. + * + * Return: QDF status + */ +static QDF_STATUS send_stop_11d_scan_cmd_tlv(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *stop_11d_scan) +{ + wmi_11d_scan_stop_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_11d_scan_stop_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_11d_scan_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_11d_scan_stop_cmd_fixed_param)); + + cmd->vdev_id = stop_11d_scan->vdev_id; + + WMI_LOGD("vdev %d sending 11D scan stop req", cmd->vdev_id); + + wmi_mtrace(WMI_11D_SCAN_STOP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11D_SCAN_STOP_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send stop 11d scan wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_start_oem_data_cmd_tlv() - start OEM data request to target + * @wmi_handle: wmi handle + * @data_len: the length of @data + * @data: the pointer to data buf + * + * Return: CDF status + */ +static QDF_STATUS send_start_oem_data_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data) +{ + wmi_buf_t buf; + uint8_t *cmd; + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, + (data_len + WMI_TLV_HDR_SIZE)); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (uint8_t *) wmi_buf_data(buf); + + WMITLV_SET_HDR(cmd, WMITLV_TAG_ARRAY_BYTE, data_len); + cmd += WMI_TLV_HDR_SIZE; + qdf_mem_copy(cmd, data, + data_len); + + WMI_LOGD(FL("Sending OEM Data Request to target, data len %d"), + data_len); + + wmi_mtrace(WMI_OEM_REQ_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + (data_len + + WMI_TLV_HDR_SIZE), WMI_OEM_REQ_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL(":wmi cmd send failed")); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef FEATURE_OEM_DATA +/** + * send_start_oemv2_data_cmd_tlv() - start OEM data to target + * @wmi_handle: wmi handle + * @oem_data: the pointer to oem data + * + * Return: QDF status + */ +static QDF_STATUS send_start_oemv2_data_cmd_tlv(wmi_unified_t wmi_handle, + struct oem_data *oem_data) +{ + QDF_STATUS ret; + wmi_oem_data_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + uint16_t oem_data_len_aligned; + uint8_t *buf_ptr; + + if (!oem_data || !oem_data->data) { + wmi_err_rl("oem data is not valid"); + return QDF_STATUS_E_FAILURE; + } + oem_data_len_aligned = roundup(oem_data->data_len, sizeof(uint32_t)); + if (oem_data_len_aligned < oem_data->data_len) { + wmi_err_rl("integer overflow while rounding up data_len"); + return QDF_STATUS_E_FAILURE; + } + + if (oem_data_len_aligned > WMI_SVC_MSG_MAX_SIZE - WMI_TLV_HDR_SIZE) { + wmi_err_rl("wmi_max_msg_size overflow for given data_len"); + return QDF_STATUS_E_FAILURE; + } + + len += WMI_TLV_HDR_SIZE + oem_data_len_aligned; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_oem_data_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_oem_data_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_oem_data_cmd_fixed_param)); + + cmd->vdev_id = oem_data->vdev_id; + cmd->data_len = oem_data->data_len; + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, oem_data_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, oem_data->data, oem_data->data_len); + + wmi_mtrace(WMI_OEM_DATA_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_OEM_DATA_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + wmi_err_rl("Failed with ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} +#endif + +/** + * send_dfs_phyerr_filter_offload_en_cmd_tlv() - enable dfs phyerr filter + * @wmi_handle: wmi handle + * @dfs_phyerr_filter_offload: is dfs phyerr filter offload + * + * Send WMI_DFS_PHYERR_FILTER_ENA_CMDID or + * WMI_DFS_PHYERR_FILTER_DIS_CMDID command + * to firmware based on phyerr filtering + * offload status. + * + * Return: 1 success, 0 failure + */ +static QDF_STATUS +send_dfs_phyerr_filter_offload_en_cmd_tlv(wmi_unified_t wmi_handle, + bool dfs_phyerr_filter_offload) +{ + wmi_dfs_phyerr_filter_ena_cmd_fixed_param *enable_phyerr_offload_cmd; + wmi_dfs_phyerr_filter_dis_cmd_fixed_param *disable_phyerr_offload_cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + + if (false == dfs_phyerr_filter_offload) { + WMI_LOGD("%s:Phyerror Filtering offload is Disabled in ini", + __func__); + len = sizeof(*disable_phyerr_offload_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return 0; + + disable_phyerr_offload_cmd = + (wmi_dfs_phyerr_filter_dis_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&disable_phyerr_offload_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dfs_phyerr_filter_dis_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_dfs_phyerr_filter_dis_cmd_fixed_param)); + + /* + * Send WMI_DFS_PHYERR_FILTER_DIS_CMDID + * to the firmware to disable the phyerror + * filtering offload. + */ + wmi_mtrace(WMI_DFS_PHYERR_FILTER_DIS_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DFS_PHYERR_FILTER_DIS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send WMI_DFS_PHYERR_FILTER_DIS_CMDID ret=%d", + __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: WMI_DFS_PHYERR_FILTER_DIS_CMDID Send Success", + __func__); + } else { + WMI_LOGD("%s:Phyerror Filtering offload is Enabled in ini", + __func__); + + len = sizeof(*enable_phyerr_offload_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + enable_phyerr_offload_cmd = + (wmi_dfs_phyerr_filter_ena_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&enable_phyerr_offload_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dfs_phyerr_filter_ena_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_dfs_phyerr_filter_ena_cmd_fixed_param)); + + /* + * Send a WMI_DFS_PHYERR_FILTER_ENA_CMDID + * to the firmware to enable the phyerror + * filtering offload. + */ + wmi_mtrace(WMI_DFS_PHYERR_FILTER_ENA_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DFS_PHYERR_FILTER_ENA_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send DFS PHYERR CMD ret=%d", + __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: WMI_DFS_PHYERR_FILTER_ENA_CMDID Send Success", + __func__); + } + + return QDF_STATUS_SUCCESS; +} + +#if !defined(REMOVE_PKT_LOG) && defined(FEATURE_PKTLOG) +/** + * send_pktlog_wmi_send_cmd_tlv() - send pktlog enable/disable command to target + * @wmi_handle: wmi handle + * @pktlog_event: pktlog event + * @cmd_id: pktlog cmd id + * @user_triggered: user triggered input for PKTLOG enable mode + * + * Return: CDF status + */ +static QDF_STATUS send_pktlog_wmi_send_cmd_tlv(wmi_unified_t wmi_handle, + WMI_PKTLOG_EVENT pktlog_event, + WMI_CMD_ID cmd_id, uint8_t user_triggered) +{ + WMI_PKTLOG_EVENT PKTLOG_EVENT; + WMI_CMD_ID CMD_ID; + wmi_pdev_pktlog_enable_cmd_fixed_param *cmd; + wmi_pdev_pktlog_disable_cmd_fixed_param *disable_cmd; + int len = 0; + wmi_buf_t buf; + + PKTLOG_EVENT = pktlog_event; + CMD_ID = cmd_id; + + switch (CMD_ID) { + case WMI_PDEV_PKTLOG_ENABLE_CMDID: + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_pktlog_enable_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_enable_cmd_fixed_param)); + cmd->evlist = PKTLOG_EVENT; + cmd->enable = user_triggered ? WMI_PKTLOG_ENABLE_FORCE + : WMI_PKTLOG_ENABLE_AUTO; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + wmi_mtrace(WMI_PDEV_PKTLOG_ENABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_ENABLE_CMDID)) { + WMI_LOGE("failed to send pktlog enable cmdid"); + goto wmi_send_failed; + } + break; + case WMI_PDEV_PKTLOG_DISABLE_CMDID: + len = sizeof(*disable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + disable_cmd = (wmi_pdev_pktlog_disable_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&disable_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_disable_cmd_fixed_param)); + disable_cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + wmi_mtrace(WMI_PDEV_PKTLOG_DISABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_DISABLE_CMDID)) { + WMI_LOGE("failed to send pktlog disable cmdid"); + goto wmi_send_failed; + } + break; + default: + WMI_LOGD("%s: invalid PKTLOG command", __func__); + break; + } + + return QDF_STATUS_SUCCESS; + +wmi_send_failed: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} +#endif /* !REMOVE_PKT_LOG && FEATURE_PKTLOG */ + +/** + * send_stats_ext_req_cmd_tlv() - request ext stats from fw + * @wmi_handle: wmi handle + * @preq: stats ext params + * + * Return: CDF status + */ +static QDF_STATUS send_stats_ext_req_cmd_tlv(wmi_unified_t wmi_handle, + struct stats_ext_params *preq) +{ + QDF_STATUS ret; + wmi_req_stats_ext_cmd_fixed_param *cmd; + wmi_buf_t buf; + size_t len; + uint8_t *buf_ptr; + uint16_t max_wmi_msg_size = wmi_get_max_msg_len(wmi_handle); + + if (preq->request_data_len > (max_wmi_msg_size - WMI_TLV_HDR_SIZE - + sizeof(*cmd))) { + wmi_err("Data length=%d is greater than max wmi msg size", + preq->request_data_len); + return QDF_STATUS_E_FAILURE; + } + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + preq->request_data_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_req_stats_ext_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_req_stats_ext_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_req_stats_ext_cmd_fixed_param)); + cmd->vdev_id = preq->vdev_id; + cmd->data_len = preq->request_data_len; + + WMI_LOGD("%s: The data len value is %u and vdev id set is %u ", + __func__, preq->request_data_len, preq->vdev_id); + + buf_ptr += sizeof(wmi_req_stats_ext_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, cmd->data_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, preq->request_data, cmd->data_len); + + wmi_mtrace(WMI_REQUEST_STATS_EXT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_EXT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send notify cmd ret = %d", __func__, + ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_process_dhcpserver_offload_cmd_tlv() - enable DHCP server offload + * @wmi_handle: wmi handle + * @params: DHCP server offload info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_process_dhcpserver_offload_cmd_tlv(wmi_unified_t wmi_handle, + struct dhcp_offload_info_params *params) +{ + wmi_set_dhcp_server_offload_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_set_dhcp_server_offload_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_dhcp_server_offload_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_dhcp_server_offload_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->enable = params->dhcp_offload_enabled; + cmd->num_client = params->dhcp_client_num; + cmd->srv_ipv4 = params->dhcp_srv_addr; + cmd->start_lsb = 0; + wmi_mtrace(WMI_SET_DHCP_SERVER_OFFLOAD_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), + WMI_SET_DHCP_SERVER_OFFLOAD_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send set_dhcp_server_offload cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("Set dhcp server offload to vdevId %d", + params->vdev_id); + + return status; +} + +/** + * send_pdev_set_regdomain_cmd_tlv() - send set regdomain command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev regdomain params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_set_regdomain_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param) +{ + wmi_buf_t buf; + wmi_pdev_set_regdomain_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_set_regdomain_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_regdomain_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_regdomain_cmd_fixed_param)); + + cmd->reg_domain = param->currentRDinuse; + cmd->reg_domain_2G = param->currentRD2G; + cmd->reg_domain_5G = param->currentRD5G; + cmd->conformance_test_limit_2G = param->ctl_2G; + cmd->conformance_test_limit_5G = param->ctl_5G; + cmd->dfs_domain = param->dfsDomain; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + + wmi_mtrace(WMI_PDEV_SET_REGDOMAIN_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_REGDOMAIN_CMDID)) { + WMI_LOGE("%s: Failed to send pdev set regdomain command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_regdomain_info_to_fw_cmd_tlv() - send regdomain info to fw + * @wmi_handle: wmi handle + * @reg_dmn: reg domain + * @regdmn2G: 2G reg domain + * @regdmn5G: 5G reg domain + * @ctl2G: 2G test limit + * @ctl5G: 5G test limit + * + * Return: none + */ +static QDF_STATUS send_regdomain_info_to_fw_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t reg_dmn, uint16_t regdmn2G, + uint16_t regdmn5G, uint8_t ctl2G, + uint8_t ctl5G) +{ + wmi_buf_t buf; + wmi_pdev_set_regdomain_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_set_regdomain_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_regdomain_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_regdomain_cmd_fixed_param)); + cmd->reg_domain = reg_dmn; + cmd->reg_domain_2G = regdmn2G; + cmd->reg_domain_5G = regdmn5G; + cmd->conformance_test_limit_2G = ctl2G; + cmd->conformance_test_limit_5G = ctl5G; + + wmi_debug("regd = %x, regd_2g = %x, regd_5g = %x, ctl_2g = %x, ctl_5g = %x", + cmd->reg_domain, cmd->reg_domain_2G, cmd->reg_domain_5G, + cmd->conformance_test_limit_2G, + cmd->conformance_test_limit_5G); + + wmi_mtrace(WMI_PDEV_SET_REGDOMAIN_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_REGDOMAIN_CMDID)) { + WMI_LOGP("%s: Failed to send pdev set regdomain command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * copy_custom_aggr_bitmap() - copies host side bitmap using FW APIs + * @param: param sent from the host side + * @cmd: param to be sent to the fw side + */ +static inline void copy_custom_aggr_bitmap( + struct set_custom_aggr_size_params *param, + wmi_vdev_set_custom_aggr_size_cmd_fixed_param *cmd) +{ + WMI_VDEV_CUSTOM_AGGR_AC_SET(cmd->enable_bitmap, + param->ac); + WMI_VDEV_CUSTOM_AGGR_TYPE_SET(cmd->enable_bitmap, + param->aggr_type); + WMI_VDEV_CUSTOM_TX_AGGR_SZ_DIS_SET(cmd->enable_bitmap, + param->tx_aggr_size_disable); + WMI_VDEV_CUSTOM_RX_AGGR_SZ_DIS_SET(cmd->enable_bitmap, + param->rx_aggr_size_disable); + WMI_VDEV_CUSTOM_TX_AC_EN_SET(cmd->enable_bitmap, + param->tx_ac_enable); +} + +/** + * send_vdev_set_custom_aggr_size_cmd_tlv() - custom aggr size param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold custom aggr size params + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_custom_aggr_size_cmd_tlv( + wmi_unified_t wmi_handle, + struct set_custom_aggr_size_params *param) +{ + wmi_vdev_set_custom_aggr_size_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_vdev_set_custom_aggr_size_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_custom_aggr_size_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_set_custom_aggr_size_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->tx_aggr_size = param->tx_aggr_size; + cmd->rx_aggr_size = param->rx_aggr_size; + copy_custom_aggr_bitmap(param, cmd); + + WMI_LOGD("Set custom aggr: vdev id=0x%X, tx aggr size=0x%X " + "rx_aggr_size=0x%X access category=0x%X, agg_type=0x%X " + "tx_aggr_size_disable=0x%X, rx_aggr_size_disable=0x%X " + "tx_ac_enable=0x%X", + param->vdev_id, param->tx_aggr_size, param->rx_aggr_size, + param->ac, param->aggr_type, param->tx_aggr_size_disable, + param->rx_aggr_size_disable, param->tx_ac_enable); + + wmi_mtrace(WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID)) { + WMI_LOGE("Seting custom aggregation size failed"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_set_qdepth_thresh_cmd_tlv() - WMI set qdepth threshold + * @param wmi_handle : handle to WMI. + * @param param : pointer to tx antenna param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ + +static QDF_STATUS send_vdev_set_qdepth_thresh_cmd_tlv(wmi_unified_t wmi_handle, + struct set_qdepth_thresh_params *param) +{ + wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param *cmd; + wmi_msduq_qdepth_thresh_update *cmd_update; + wmi_buf_t buf; + int32_t len = 0; + int i; + uint8_t *buf_ptr; + QDF_STATUS ret; + + if (param->num_of_msduq_updates > QDEPTH_THRESH_MAX_UPDATES) { + WMI_LOGE("%s: Invalid Update Count!", __func__); + return QDF_STATUS_E_INVAL; + } + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += (sizeof(wmi_msduq_qdepth_thresh_update) * + param->num_of_msduq_updates); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param *) + buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param + , WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param)); + + cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->mac_addr, &cmd->peer_mac_address); + cmd->num_of_msduq_updates = param->num_of_msduq_updates; + + buf_ptr += sizeof( + wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + param->num_of_msduq_updates * + sizeof(wmi_msduq_qdepth_thresh_update)); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd_update = (wmi_msduq_qdepth_thresh_update *)buf_ptr; + + for (i = 0; i < cmd->num_of_msduq_updates; i++) { + WMITLV_SET_HDR(&cmd_update->tlv_header, + WMITLV_TAG_STRUC_wmi_msduq_qdepth_thresh_update, + WMITLV_GET_STRUCT_TLVLEN( + wmi_msduq_qdepth_thresh_update)); + cmd_update->tid_num = param->update_params[i].tid_num; + cmd_update->msduq_update_mask = + param->update_params[i].msduq_update_mask; + cmd_update->qdepth_thresh_value = + param->update_params[i].qdepth_thresh_value; + WMI_LOGD("Set QDepth Threshold: vdev=0x%X pdev=0x%X, tid=0x%X " + "mac_addr_upper4=%X, mac_addr_lower2:%X," + " update mask=0x%X thresh val=0x%X", + cmd->vdev_id, cmd->pdev_id, cmd_update->tid_num, + cmd->peer_mac_address.mac_addr31to0, + cmd->peer_mac_address.mac_addr47to32, + cmd_update->msduq_update_mask, + cmd_update->qdepth_thresh_value); + cmd_update++; + } + + wmi_mtrace(WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID, + cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID); + + if (ret != 0) { + WMI_LOGE(" %s :WMI Failed", __func__); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_vap_dscp_tid_map_cmd_tlv() - send vap dscp tid map cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vap dscp tid map param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_vap_dscp_tid_map_cmd_tlv(wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param) +{ + wmi_buf_t buf; + wmi_vdev_set_dscp_tid_map_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_vdev_set_dscp_tid_map_cmd_fixed_param *)wmi_buf_data(buf); + qdf_mem_copy(cmd->dscp_to_tid_map, param->dscp_to_tid_map, + sizeof(uint32_t) * WMI_DSCP_MAP_MAX); + + cmd->vdev_id = param->vdev_id; + cmd->enable_override = 0; + + wmi_debug("Setting dscp for vap id: %d", cmd->vdev_id); + wmi_mtrace(WMI_VDEV_SET_DSCP_TID_MAP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_DSCP_TID_MAP_CMDID)) { + WMI_LOGE("Failed to set dscp cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_set_fwtest_param_cmd_tlv() - send fwtest param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold fwtest param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_fwtest_param_cmd_tlv(wmi_unified_t wmi_handle, + struct set_fwtest_params *param) +{ + wmi_fwtest_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_fwtest_set_param_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_fwtest_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_fwtest_set_param_cmd_fixed_param)); + cmd->param_id = param->arg; + cmd->param_value = param->value; + + wmi_mtrace(WMI_FWTEST_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_FWTEST_CMDID)) { + WMI_LOGE("Setting FW test param failed"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_phyerr_disable_cmd_tlv() - WMI phyerr disable function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_phyerr_disable_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_pdev_dfs_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_pdev_dfs_disable_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_disable_cmd_fixed_param)); + /* Filling it with WMI_PDEV_ID_SOC for now */ + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + WMI_HOST_PDEV_ID_SOC); + + wmi_mtrace(WMI_PDEV_DFS_DISABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_PDEV_DFS_DISABLE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending PDEV DFS disable cmd failed"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_phyerr_enable_cmd_tlv() - WMI phyerr disable function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_phyerr_enable_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_pdev_dfs_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_pdev_dfs_enable_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_enable_cmd_fixed_param)); + /* Reserved for future use */ + cmd->reserved0 = 0; + + wmi_mtrace(WMI_PDEV_DFS_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_PDEV_DFS_ENABLE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending PDEV DFS enable cmd failed"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_periodic_chan_stats_config_cmd_tlv() - send periodic chan stats cmd + * to fw + * @wmi_handle: wmi handle + * @param: pointer to hold periodic chan stats param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_periodic_chan_stats_config_cmd_tlv(wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param) +{ + wmi_set_periodic_channel_stats_config_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_set_periodic_channel_stats_config_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_periodic_channel_stats_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_set_periodic_channel_stats_config_fixed_param)); + cmd->enable = param->enable; + cmd->stats_period = param->stats_period; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + + wmi_mtrace(WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending periodic chan stats config failed"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_vdev_spectral_configure_cmd_tlv() - send VDEV spectral configure + * command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold spectral config parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_spectral_configure_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param) +{ + wmi_vdev_spectral_configure_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_vdev_spectral_configure_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_spectral_configure_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_spectral_configure_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->spectral_scan_count = param->count; + cmd->spectral_scan_period = param->period; + cmd->spectral_scan_priority = param->spectral_pri; + cmd->spectral_scan_fft_size = param->fft_size; + cmd->spectral_scan_gc_ena = param->gc_enable; + cmd->spectral_scan_restart_ena = param->restart_enable; + cmd->spectral_scan_noise_floor_ref = param->noise_floor_ref; + cmd->spectral_scan_init_delay = param->init_delay; + cmd->spectral_scan_nb_tone_thr = param->nb_tone_thr; + cmd->spectral_scan_str_bin_thr = param->str_bin_thr; + cmd->spectral_scan_wb_rpt_mode = param->wb_rpt_mode; + cmd->spectral_scan_rssi_rpt_mode = param->rssi_rpt_mode; + cmd->spectral_scan_rssi_thr = param->rssi_thr; + cmd->spectral_scan_pwr_format = param->pwr_format; + cmd->spectral_scan_rpt_mode = param->rpt_mode; + cmd->spectral_scan_bin_scale = param->bin_scale; + cmd->spectral_scan_dBm_adj = param->dbm_adj; + cmd->spectral_scan_chn_mask = param->chn_mask; + cmd->spectral_scan_mode = param->mode; + cmd->spectral_scan_center_freq = param->center_freq; + /* Not used, fill with zeros */ + cmd->spectral_scan_chan_freq = 0; + cmd->spectral_scan_chan_width = 0; + + wmi_mtrace(WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending set quiet cmd failed"); + wmi_buf_free(buf); + } + + wmi_debug("Sent WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID"); + wmi_debug("vdev_id: %u spectral_scan_count: %u", + param->vdev_id, param->count); + wmi_debug("spectral_scan_period: %u spectral_scan_priority: %u", + param->period, param->spectral_pri); + wmi_debug("spectral_scan_fft_size: %u spectral_scan_gc_ena: %u", + param->fft_size, param->gc_enable); + wmi_debug("spectral_scan_restart_ena: %u", param->restart_enable); + wmi_debug("spectral_scan_noise_floor_ref: %u", param->noise_floor_ref); + wmi_debug("spectral_scan_init_delay: %u", param->init_delay); + wmi_debug("spectral_scan_nb_tone_thr: %u", param->nb_tone_thr); + wmi_debug("spectral_scan_str_bin_thr: %u", param->str_bin_thr); + wmi_debug("spectral_scan_wb_rpt_mode: %u", param->wb_rpt_mode); + wmi_debug("spectral_scan_rssi_rpt_mode: %u", param->rssi_rpt_mode); + wmi_debug("spectral_scan_rssi_thr: %u spectral_scan_pwr_format: %u", + param->rssi_thr, param->pwr_format); + wmi_debug("spectral_scan_rpt_mode: %u spectral_scan_bin_scale: %u", + param->rpt_mode, param->bin_scale); + wmi_debug("spectral_scan_dBm_adj: %u spectral_scan_chn_mask: %u", + param->dbm_adj, param->chn_mask); + wmi_debug("spectral_scan_mode: %u spectral_scan_center_freq: %u", + param->mode, param->center_freq); + wmi_debug("spectral_scan_chan_freq: %u", param->chan_freq); + wmi_debug("spectral_scan_chan_width: %u Status: %d", + param->chan_width, ret); + + return ret; +} + +/** + * send_vdev_spectral_enable_cmd_tlv() - send VDEV spectral configure + * command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold spectral enable parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_spectral_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param) +{ + wmi_vdev_spectral_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_vdev_spectral_enable_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_spectral_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_spectral_enable_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + if (param->active_valid) { + cmd->trigger_cmd = param->active ? 1 : 2; + /* 1: Trigger, 2: Clear Trigger */ + } else { + cmd->trigger_cmd = 0; /* 0: Ignore */ + } + + if (param->enabled_valid) { + cmd->enable_cmd = param->enabled ? 1 : 2; + /* 1: Enable 2: Disable */ + } else { + cmd->enable_cmd = 0; /* 0: Ignore */ + } + cmd->spectral_scan_mode = param->mode; + + wmi_debug("vdev_id = %u trigger_cmd = %u enable_cmd = %u", + cmd->vdev_id, cmd->trigger_cmd, cmd->enable_cmd); + wmi_debug("spectral_scan_mode = %u", cmd->spectral_scan_mode); + + wmi_mtrace(WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending scan enable CMD failed"); + wmi_buf_free(buf); + } + + wmi_debug("Sent WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, Status: %d", + ret); + + return ret; +} + +/** + * send_thermal_mitigation_param_cmd_tlv() - configure thermal mitigation params + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold thermal mitigation param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_thermal_mitigation_param_cmd_tlv( + wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param) +{ + wmi_therm_throt_config_request_fixed_param *tt_conf = NULL; + wmi_therm_throt_level_config_info *lvl_conf = NULL; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr = NULL; + int error; + int32_t len; + int i; + + len = sizeof(*tt_conf) + WMI_TLV_HDR_SIZE + + param->num_thermal_conf * + sizeof(wmi_therm_throt_level_config_info); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + tt_conf = (wmi_therm_throt_config_request_fixed_param *) wmi_buf_data(buf); + + /* init fixed params */ + WMITLV_SET_HDR(tt_conf, + WMITLV_TAG_STRUC_wmi_therm_throt_config_request_fixed_param, + (WMITLV_GET_STRUCT_TLVLEN(wmi_therm_throt_config_request_fixed_param))); + + tt_conf->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + tt_conf->enable = param->enable; + tt_conf->dc = param->dc; + tt_conf->dc_per_event = param->dc_per_event; + tt_conf->therm_throt_levels = param->num_thermal_conf; + + buf_ptr = (uint8_t *) ++tt_conf; + /* init TLV params */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (param->num_thermal_conf * + sizeof(wmi_therm_throt_level_config_info))); + + lvl_conf = (wmi_therm_throt_level_config_info *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < param->num_thermal_conf; i++) { + WMITLV_SET_HDR(&lvl_conf->tlv_header, + WMITLV_TAG_STRUC_wmi_therm_throt_level_config_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_therm_throt_level_config_info)); + lvl_conf->temp_lwm = param->levelconf[i].tmplwm; + lvl_conf->temp_hwm = param->levelconf[i].tmphwm; + lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent; + lvl_conf->prio = param->levelconf[i].priority; + lvl_conf++; + } + + wmi_mtrace(WMI_THERM_THROT_SET_CONF_CMDID, NO_SESSION, 0); + error = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_THERM_THROT_SET_CONF_CMDID); + if (QDF_IS_STATUS_ERROR(error)) { + wmi_buf_free(buf); + WMI_LOGE("Failed to send WMI_THERM_THROT_SET_CONF_CMDID command"); + } + + return error; +} + +/** + * send_coex_config_cmd_tlv() - send coex config command to fw + * @wmi_handle: wmi handle + * @param: pointer to coex config param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_coex_config_cmd_tlv(wmi_unified_t wmi_handle, + struct coex_config_params *param) +{ + WMI_COEX_CONFIG_CMD_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (WMI_COEX_CONFIG_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_COEX_CONFIG_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_COEX_CONFIG_CMD_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->config_type = param->config_type; + cmd->config_arg1 = param->config_arg1; + cmd->config_arg2 = param->config_arg2; + cmd->config_arg3 = param->config_arg3; + cmd->config_arg4 = param->config_arg4; + cmd->config_arg5 = param->config_arg5; + cmd->config_arg6 = param->config_arg6; + + wmi_mtrace(WMI_COEX_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_COEX_CONFIG_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending COEX CONFIG CMD failed"); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef WLAN_SUPPORT_TWT +static void wmi_copy_twt_resource_config(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->twt_ap_pdev_count = tgt_res_cfg->twt_ap_pdev_count; + resource_cfg->twt_ap_sta_count = tgt_res_cfg->twt_ap_sta_count; +} +#else +static void wmi_copy_twt_resource_config(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->twt_ap_pdev_count = 0; + resource_cfg->twt_ap_sta_count = 0; +} +#endif + +#ifdef WLAN_FEATURE_NAN +static void wmi_set_nan_channel_support(wmi_resource_config *resource_cfg) +{ + WMI_RSRC_CFG_HOST_SERVICE_FLAG_NAN_CHANNEL_SUPPORT_SET( + resource_cfg->host_service_flags, 1); +} +#else +static inline +void wmi_set_nan_channel_support(wmi_resource_config *resource_cfg) +{ +} +#endif + +static +void wmi_copy_resource_config(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->num_vdevs = tgt_res_cfg->num_vdevs; + resource_cfg->num_peers = tgt_res_cfg->num_peers; + resource_cfg->num_offload_peers = tgt_res_cfg->num_offload_peers; + resource_cfg->num_offload_reorder_buffs = + tgt_res_cfg->num_offload_reorder_buffs; + resource_cfg->num_peer_keys = tgt_res_cfg->num_peer_keys; + resource_cfg->num_tids = tgt_res_cfg->num_tids; + resource_cfg->ast_skid_limit = tgt_res_cfg->ast_skid_limit; + resource_cfg->tx_chain_mask = tgt_res_cfg->tx_chain_mask; + resource_cfg->rx_chain_mask = tgt_res_cfg->rx_chain_mask; + resource_cfg->rx_timeout_pri[0] = tgt_res_cfg->rx_timeout_pri[0]; + resource_cfg->rx_timeout_pri[1] = tgt_res_cfg->rx_timeout_pri[1]; + resource_cfg->rx_timeout_pri[2] = tgt_res_cfg->rx_timeout_pri[2]; + resource_cfg->rx_timeout_pri[3] = tgt_res_cfg->rx_timeout_pri[3]; + resource_cfg->rx_decap_mode = tgt_res_cfg->rx_decap_mode; + resource_cfg->scan_max_pending_req = + tgt_res_cfg->scan_max_pending_req; + resource_cfg->bmiss_offload_max_vdev = + tgt_res_cfg->bmiss_offload_max_vdev; + resource_cfg->roam_offload_max_vdev = + tgt_res_cfg->roam_offload_max_vdev; + resource_cfg->roam_offload_max_ap_profiles = + tgt_res_cfg->roam_offload_max_ap_profiles; + resource_cfg->num_mcast_groups = tgt_res_cfg->num_mcast_groups; + resource_cfg->num_mcast_table_elems = + tgt_res_cfg->num_mcast_table_elems; + resource_cfg->mcast2ucast_mode = tgt_res_cfg->mcast2ucast_mode; + resource_cfg->tx_dbg_log_size = tgt_res_cfg->tx_dbg_log_size; + resource_cfg->num_wds_entries = tgt_res_cfg->num_wds_entries; + resource_cfg->dma_burst_size = tgt_res_cfg->dma_burst_size; + resource_cfg->mac_aggr_delim = tgt_res_cfg->mac_aggr_delim; + resource_cfg->rx_skip_defrag_timeout_dup_detection_check = + tgt_res_cfg->rx_skip_defrag_timeout_dup_detection_check; + resource_cfg->vow_config = tgt_res_cfg->vow_config; + resource_cfg->gtk_offload_max_vdev = tgt_res_cfg->gtk_offload_max_vdev; + resource_cfg->num_msdu_desc = tgt_res_cfg->num_msdu_desc; + resource_cfg->max_frag_entries = tgt_res_cfg->max_frag_entries; + resource_cfg->num_tdls_vdevs = tgt_res_cfg->num_tdls_vdevs; + resource_cfg->num_tdls_conn_table_entries = + tgt_res_cfg->num_tdls_conn_table_entries; + resource_cfg->beacon_tx_offload_max_vdev = + tgt_res_cfg->beacon_tx_offload_max_vdev; + resource_cfg->num_multicast_filter_entries = + tgt_res_cfg->num_multicast_filter_entries; + resource_cfg->num_wow_filters = + tgt_res_cfg->num_wow_filters; + resource_cfg->num_keep_alive_pattern = + tgt_res_cfg->num_keep_alive_pattern; + resource_cfg->keep_alive_pattern_size = + tgt_res_cfg->keep_alive_pattern_size; + resource_cfg->max_tdls_concurrent_sleep_sta = + tgt_res_cfg->max_tdls_concurrent_sleep_sta; + resource_cfg->max_tdls_concurrent_buffer_sta = + tgt_res_cfg->max_tdls_concurrent_buffer_sta; + resource_cfg->wmi_send_separate = + tgt_res_cfg->wmi_send_separate; + resource_cfg->num_ocb_vdevs = + tgt_res_cfg->num_ocb_vdevs; + resource_cfg->num_ocb_channels = + tgt_res_cfg->num_ocb_channels; + resource_cfg->num_ocb_schedules = + tgt_res_cfg->num_ocb_schedules; + resource_cfg->bpf_instruction_size = tgt_res_cfg->apf_instruction_size; + resource_cfg->max_bssid_rx_filters = tgt_res_cfg->max_bssid_rx_filters; + resource_cfg->use_pdev_id = tgt_res_cfg->use_pdev_id; + resource_cfg->max_num_dbs_scan_duty_cycle = + tgt_res_cfg->max_num_dbs_scan_duty_cycle; + resource_cfg->sched_params = tgt_res_cfg->scheduler_params; + resource_cfg->num_packet_filters = tgt_res_cfg->num_packet_filters; + resource_cfg->num_max_sta_vdevs = tgt_res_cfg->num_max_sta_vdevs; + resource_cfg->max_bssid_indicator = tgt_res_cfg->max_bssid_indicator; + resource_cfg->max_num_group_keys = tgt_res_cfg->max_num_group_keys; + + if (tgt_res_cfg->max_ndp_sessions) + resource_cfg->max_ndp_sessions = + tgt_res_cfg->max_ndp_sessions; + resource_cfg->max_ndi_interfaces = tgt_res_cfg->max_ndi; + + if (tgt_res_cfg->atf_config) + WMI_RSRC_CFG_FLAG_ATF_CONFIG_ENABLE_SET(resource_cfg->flag1, 1); + if (tgt_res_cfg->mgmt_comp_evt_bundle_support) + WMI_RSRC_CFG_FLAG_MGMT_COMP_EVT_BUNDLE_SUPPORT_SET( + resource_cfg->flag1, 1); + if (tgt_res_cfg->tx_msdu_new_partition_id_support) + WMI_RSRC_CFG_FLAG_TX_MSDU_ID_NEW_PARTITION_SUPPORT_SET( + resource_cfg->flag1, 1); + if (tgt_res_cfg->cce_disable) + WMI_RSRC_CFG_FLAG_TCL_CCE_DISABLE_SET(resource_cfg->flag1, 1); + if (tgt_res_cfg->eapol_minrate_set) { + WMI_RSRC_CFG_FLAG_EAPOL_REKEY_MINRATE_SUPPORT_ENABLE_SET( + resource_cfg->flag1, 1); + if (tgt_res_cfg->eapol_minrate_ac_set != 3) { + WMI_RSRC_CFG_FLAG_EAPOL_AC_OVERRIDE_VALID_SET( + resource_cfg->flag1, 1); + WMI_RSRC_CFG_FLAG_EAPOL_AC_OVERRIDE_SET( + resource_cfg->flag1, + tgt_res_cfg->eapol_minrate_ac_set); + } + } + if (tgt_res_cfg->new_htt_msg_format) { + WMI_RSRC_CFG_FLAG_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN_SET( + resource_cfg->flag1, 1); + } + + if (tgt_res_cfg->peer_unmap_conf_support) + WMI_RSRC_CFG_FLAG_PEER_UNMAP_RESPONSE_SUPPORT_SET( + resource_cfg->flag1, 1); + + if (tgt_res_cfg->tstamp64_en) + WMI_RSRC_CFG_FLAG_TX_COMPLETION_TX_TSF64_ENABLE_SET( + resource_cfg->flag1, 1); + + if (tgt_res_cfg->three_way_coex_config_legacy_en) + WMI_RSRC_CFG_FLAG_THREE_WAY_COEX_CONFIG_LEGACY_SUPPORT_SET( + resource_cfg->flag1, 1); + if (tgt_res_cfg->pktcapture_support) + WMI_RSRC_CFG_FLAG_PACKET_CAPTURE_SUPPORT_SET( + resource_cfg->flag1, 1); + + /* + * Control padding using config param/ini of iphdr_pad_config + */ + if (tgt_res_cfg->iphdr_pad_config) + WMI_RSRC_CFG_FLAG_IPHR_PAD_CONFIG_ENABLE_SET( + resource_cfg->flag1, 1); + + WMI_RSRC_CFG_FLAG_IPA_DISABLE_SET(resource_cfg->flag1, + tgt_res_cfg->ipa_disable); + + if (tgt_res_cfg->time_sync_ftm) + WMI_RSRC_CFG_FLAG_AUDIO_SYNC_SUPPORT_SET(resource_cfg->flag1, + 1); + + wmi_copy_twt_resource_config(resource_cfg, tgt_res_cfg); + resource_cfg->peer_map_unmap_v2_support = + tgt_res_cfg->peer_map_unmap_v2; + resource_cfg->smart_ant_cap = tgt_res_cfg->smart_ant_cap; + if (tgt_res_cfg->re_ul_resp) + WMI_SET_BITS(resource_cfg->flags2, 0, 4, + tgt_res_cfg->re_ul_resp); + + + /* + * Enable ast flow override per peer + */ + resource_cfg->msdu_flow_override_config0 = 0; + WMI_MSDU_FLOW_AST_ENABLE_SET( + resource_cfg->msdu_flow_override_config0, + WMI_CONFIG_MSDU_AST_INDEX_1, + tgt_res_cfg->ast_1_valid_mask_enable); + + WMI_MSDU_FLOW_AST_ENABLE_SET( + resource_cfg->msdu_flow_override_config0, + WMI_CONFIG_MSDU_AST_INDEX_2, + tgt_res_cfg->ast_2_valid_mask_enable); + + WMI_MSDU_FLOW_AST_ENABLE_SET( + resource_cfg->msdu_flow_override_config0, + WMI_CONFIG_MSDU_AST_INDEX_3, + tgt_res_cfg->ast_3_valid_mask_enable); + + /* + * Enable ast flow mask and TID valid mask configurations + */ + resource_cfg->msdu_flow_override_config1 = 0; + + /*Enable UDP flow for Ast index 0*/ + WMI_MSDU_FLOW_ASTX_MSDU_FLOW_MASKS_SET( + resource_cfg->msdu_flow_override_config1, + WMI_CONFIG_MSDU_AST_INDEX_0, + tgt_res_cfg->ast_0_flow_mask_enable); + + /*Enable Non UDP flow for Ast index 1*/ + WMI_MSDU_FLOW_ASTX_MSDU_FLOW_MASKS_SET( + resource_cfg->msdu_flow_override_config1, + WMI_CONFIG_MSDU_AST_INDEX_1, + tgt_res_cfg->ast_1_flow_mask_enable); + + /*Enable Hi-Priority flow for Ast index 2*/ + WMI_MSDU_FLOW_ASTX_MSDU_FLOW_MASKS_SET( + resource_cfg->msdu_flow_override_config1, + WMI_CONFIG_MSDU_AST_INDEX_2, + tgt_res_cfg->ast_2_flow_mask_enable); + + /*Enable Low-Priority flow for Ast index 3*/ + WMI_MSDU_FLOW_ASTX_MSDU_FLOW_MASKS_SET( + resource_cfg->msdu_flow_override_config1, + WMI_CONFIG_MSDU_AST_INDEX_3, + tgt_res_cfg->ast_3_flow_mask_enable); + + /*Enable all 8 tid for Hi-Pririty Flow Queue*/ + WMI_MSDU_FLOW_TID_VALID_HI_MASKS_SET( + resource_cfg->msdu_flow_override_config1, + tgt_res_cfg->ast_tid_high_mask_enable); + + /*Enable all 8 tid for Low-Pririty Flow Queue*/ + WMI_MSDU_FLOW_TID_VALID_LOW_MASKS_SET( + resource_cfg->msdu_flow_override_config1, + tgt_res_cfg->ast_tid_low_mask_enable); + WMI_RSRC_CFG_HOST_SERVICE_FLAG_NAN_IFACE_SUPPORT_SET( + resource_cfg->host_service_flags, + tgt_res_cfg->nan_separate_iface_support); + + wmi_set_nan_channel_support(resource_cfg); +} + +/* copy_hw_mode_id_in_init_cmd() - Helper routine to copy hw_mode in init cmd + * @wmi_handle: pointer to wmi handle + * @buf_ptr: pointer to current position in init command buffer + * @len: pointer to length. This will be updated with current length of cmd + * @param: point host parameters for init command + * + * Return: Updated pointer of buf_ptr. + */ +static inline uint8_t *copy_hw_mode_in_init_cmd(struct wmi_unified *wmi_handle, + uint8_t *buf_ptr, int *len, struct wmi_init_cmd_param *param) +{ + uint16_t idx; + + if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { + wmi_pdev_set_hw_mode_cmd_fixed_param *hw_mode; + wmi_pdev_band_to_mac *band_to_mac; + + hw_mode = (wmi_pdev_set_hw_mode_cmd_fixed_param *) + (buf_ptr + sizeof(wmi_init_cmd_fixed_param) + + sizeof(wmi_resource_config) + + WMI_TLV_HDR_SIZE + (param->num_mem_chunks * + sizeof(wlan_host_memory_chunk))); + + WMITLV_SET_HDR(&hw_mode->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_hw_mode_cmd_fixed_param, + (WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_hw_mode_cmd_fixed_param))); + + hw_mode->hw_mode_index = param->hw_mode_id; + hw_mode->num_band_to_mac = param->num_band_to_mac; + + buf_ptr = (uint8_t *) (hw_mode + 1); + band_to_mac = (wmi_pdev_band_to_mac *) (buf_ptr + + WMI_TLV_HDR_SIZE); + for (idx = 0; idx < param->num_band_to_mac; idx++) { + WMITLV_SET_HDR(&band_to_mac[idx].tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_band_to_mac, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_band_to_mac)); + band_to_mac[idx].pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->band_to_mac[idx].pdev_id); + band_to_mac[idx].start_freq = + param->band_to_mac[idx].start_freq; + band_to_mac[idx].end_freq = + param->band_to_mac[idx].end_freq; + } + *len += sizeof(wmi_pdev_set_hw_mode_cmd_fixed_param) + + (param->num_band_to_mac * + sizeof(wmi_pdev_band_to_mac)) + + WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (param->num_band_to_mac * + sizeof(wmi_pdev_band_to_mac))); + } + + return buf_ptr; +} + +static inline void copy_fw_abi_version_tlv(wmi_unified_t wmi_handle, + wmi_init_cmd_fixed_param *cmd) +{ + int num_whitelist; + wmi_abi_version my_vers; + + num_whitelist = sizeof(version_whitelist) / + sizeof(wmi_whitelist_version_info); + my_vers.abi_version_0 = WMI_ABI_VERSION_0; + my_vers.abi_version_1 = WMI_ABI_VERSION_1; + my_vers.abi_version_ns_0 = WMI_ABI_VERSION_NS_0; + my_vers.abi_version_ns_1 = WMI_ABI_VERSION_NS_1; + my_vers.abi_version_ns_2 = WMI_ABI_VERSION_NS_2; + my_vers.abi_version_ns_3 = WMI_ABI_VERSION_NS_3; + + wmi_cmp_and_set_abi_version(num_whitelist, version_whitelist, + &my_vers, + (struct _wmi_abi_version *)&wmi_handle->fw_abi_version, + &cmd->host_abi_vers); + + qdf_print("%s: INIT_CMD version: %d, %d, 0x%x, 0x%x, 0x%x, 0x%x", + __func__, + WMI_VER_GET_MAJOR(cmd->host_abi_vers.abi_version_0), + WMI_VER_GET_MINOR(cmd->host_abi_vers.abi_version_0), + cmd->host_abi_vers.abi_version_ns_0, + cmd->host_abi_vers.abi_version_ns_1, + cmd->host_abi_vers.abi_version_ns_2, + cmd->host_abi_vers.abi_version_ns_3); + + /* Save version sent from host - + * Will be used to check ready event + */ + qdf_mem_copy(&wmi_handle->final_abi_vers, &cmd->host_abi_vers, + sizeof(wmi_abi_version)); +} + +/* + * send_cfg_action_frm_tb_ppdu_cmd_tlv() - send action frame tb ppdu cfg to FW + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends action frame tb ppdu cfg to FW + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS send_cfg_action_frm_tb_ppdu_cmd_tlv(wmi_unified_t wmi_handle, + struct cfg_action_frm_tb_ppdu_param *cfg_msg) +{ + wmi_pdev_he_tb_action_frm_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len, frm_len_aligned; + QDF_STATUS ret; + + frm_len_aligned = roundup(cfg_msg->frm_len, sizeof(uint32_t)); + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + frm_len_aligned; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_pdev_he_tb_action_frm_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_he_tb_action_frm_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_he_tb_action_frm_cmd_fixed_param)); + cmd->enable = cfg_msg->cfg; + cmd->data_len = cfg_msg->frm_len; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, frm_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + + qdf_mem_copy(buf_ptr, cfg_msg->data, cmd->data_len); + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_HE_TB_ACTION_FRM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("HE TB action frame cmnd send fail, ret %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +static QDF_STATUS save_fw_version_cmd_tlv(wmi_unified_t wmi_handle, void *evt_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_FAILURE; + + /*Save fw version from service ready message */ + /*This will be used while sending INIT message */ + qdf_mem_copy(&wmi_handle->fw_abi_version, &ev->fw_abi_vers, + sizeof(wmi_handle->fw_abi_version)); + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_unified_save_fw_version_cmd() - save fw version + * @wmi_handle: pointer to wmi handle + * @res_cfg: resource config + * @num_mem_chunks: no of mem chunck + * @mem_chunk: pointer to mem chunck structure + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS check_and_update_fw_version_cmd_tlv(wmi_unified_t wmi_handle, + void *evt_buf) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + if (!wmi_versions_are_compatible((struct _wmi_abi_version *) + &wmi_handle->final_abi_vers, + &ev->fw_abi_vers)) { + /* + * Error: Our host version and the given firmware version + * are incompatible. + **/ + WMI_LOGD("%s: Error: Incompatible WMI version." + "Host: %d,%d,0x%x 0x%x 0x%x 0x%x, FW: %d,%d,0x%x 0x%x 0x%x 0x%x", + __func__, + WMI_VER_GET_MAJOR(wmi_handle->final_abi_vers. + abi_version_0), + WMI_VER_GET_MINOR(wmi_handle->final_abi_vers. + abi_version_0), + wmi_handle->final_abi_vers.abi_version_ns_0, + wmi_handle->final_abi_vers.abi_version_ns_1, + wmi_handle->final_abi_vers.abi_version_ns_2, + wmi_handle->final_abi_vers.abi_version_ns_3, + WMI_VER_GET_MAJOR(ev->fw_abi_vers.abi_version_0), + WMI_VER_GET_MINOR(ev->fw_abi_vers.abi_version_0), + ev->fw_abi_vers.abi_version_ns_0, + ev->fw_abi_vers.abi_version_ns_1, + ev->fw_abi_vers.abi_version_ns_2, + ev->fw_abi_vers.abi_version_ns_3); + + return QDF_STATUS_E_FAILURE; + } + qdf_mem_copy(&wmi_handle->final_abi_vers, &ev->fw_abi_vers, + sizeof(wmi_abi_version)); + qdf_mem_copy(&wmi_handle->fw_abi_version, &ev->fw_abi_vers, + sizeof(wmi_abi_version)); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_log_supported_evt_cmd_tlv() - Enable/Disable FW diag/log events + * @handle: wmi handle + * @event: Event received from FW + * @len: Length of the event + * + * Enables the low frequency events and disables the high frequency + * events. Bit 17 indicates if the event if low/high frequency. + * 1 - high frequency, 0 - low frequency + * + * Return: 0 on successfully enabling/disabling the events + */ +static QDF_STATUS send_log_supported_evt_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t len) +{ + uint32_t num_of_diag_events_logs; + wmi_diag_event_log_config_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t *cmd_args, *evt_args; + uint32_t buf_len, i; + + WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID_param_tlvs *param_buf; + wmi_diag_event_log_supported_event_fixed_params *wmi_event; + + wmi_debug("Received WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID"); + + param_buf = (WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID_param_tlvs *) event; + if (!param_buf) { + WMI_LOGE("Invalid log supported event buffer"); + return QDF_STATUS_E_INVAL; + } + wmi_event = param_buf->fixed_param; + num_of_diag_events_logs = wmi_event->num_of_diag_events_logs; + + if (num_of_diag_events_logs > + param_buf->num_diag_events_logs_list) { + WMI_LOGE("message number of events %d is more than tlv hdr content %d", + num_of_diag_events_logs, + param_buf->num_diag_events_logs_list); + return QDF_STATUS_E_INVAL; + } + + evt_args = param_buf->diag_events_logs_list; + if (!evt_args) { + WMI_LOGE("%s: Event list is empty, num_of_diag_events_logs=%d", + __func__, num_of_diag_events_logs); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("%s: num_of_diag_events_logs=%d", + __func__, num_of_diag_events_logs); + + /* Free any previous allocation */ + if (wmi_handle->events_logs_list) { + qdf_mem_free(wmi_handle->events_logs_list); + wmi_handle->events_logs_list = NULL; + } + + if (num_of_diag_events_logs > + (WMI_SVC_MSG_MAX_SIZE / sizeof(uint32_t))) { + WMI_LOGE("%s: excess num of logs:%d", __func__, + num_of_diag_events_logs); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + /* Store the event list for run time enable/disable */ + wmi_handle->events_logs_list = qdf_mem_malloc(num_of_diag_events_logs * + sizeof(uint32_t)); + if (!wmi_handle->events_logs_list) + return QDF_STATUS_E_NOMEM; + + wmi_handle->num_of_diag_events_logs = num_of_diag_events_logs; + + /* Prepare the send buffer */ + buf_len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + (num_of_diag_events_logs * sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, buf_len); + if (!buf) { + qdf_mem_free(wmi_handle->events_logs_list); + wmi_handle->events_logs_list = NULL; + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_diag_event_log_config_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_diag_event_log_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_diag_event_log_config_fixed_param)); + + cmd->num_of_diag_events_logs = num_of_diag_events_logs; + + buf_ptr += sizeof(wmi_diag_event_log_config_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (num_of_diag_events_logs * sizeof(uint32_t))); + + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + + /* Populate the events */ + for (i = 0; i < num_of_diag_events_logs; i++) { + /* Low freq (0) - Enable (1) the event + * High freq (1) - Disable (0) the event + */ + WMI_DIAG_ID_ENABLED_DISABLED_SET(cmd_args[i], + !(WMI_DIAG_FREQUENCY_GET(evt_args[i]))); + /* Set the event ID */ + WMI_DIAG_ID_SET(cmd_args[i], + WMI_DIAG_ID_GET(evt_args[i])); + /* Set the type */ + WMI_DIAG_TYPE_SET(cmd_args[i], + WMI_DIAG_TYPE_GET(evt_args[i])); + /* Storing the event/log list in WMI */ + wmi_handle->events_logs_list[i] = evt_args[i]; + } + + wmi_mtrace(WMI_DIAG_EVENT_LOG_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, buf_len, + WMI_DIAG_EVENT_LOG_CONFIG_CMDID)) { + WMI_LOGE("%s: WMI_DIAG_EVENT_LOG_CONFIG_CMDID failed", + __func__); + wmi_buf_free(buf); + /* Not clearing events_logs_list, though wmi cmd failed. + * Host can still have this list + */ + return QDF_STATUS_E_INVAL; + } + + return 0; +} + +/** + * send_enable_specific_fw_logs_cmd_tlv() - Start/Stop logging of diag log id + * @wmi_handle: wmi handle + * @start_log: Start logging related parameters + * + * Send the command to the FW based on which specific logging of diag + * event/log id can be started/stopped + * + * Return: None + */ +static QDF_STATUS send_enable_specific_fw_logs_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_wifi_start_log *start_log) +{ + wmi_diag_event_log_config_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len, count, log_level, i; + uint32_t *cmd_args; + uint32_t total_len; + count = 0; + + if (!wmi_handle->events_logs_list) { + WMI_LOGD("%s: Not received event/log list from FW, yet", + __func__); + return QDF_STATUS_E_NOMEM; + } + /* total_len stores the number of events where BITS 17 and 18 are set. + * i.e., events of high frequency (17) and for extended debugging (18) + */ + total_len = 0; + for (i = 0; i < wmi_handle->num_of_diag_events_logs; i++) { + if ((WMI_DIAG_FREQUENCY_GET(wmi_handle->events_logs_list[i])) && + (WMI_DIAG_EXT_FEATURE_GET(wmi_handle->events_logs_list[i]))) + total_len++; + } + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + (total_len * sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_diag_event_log_config_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_diag_event_log_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_diag_event_log_config_fixed_param)); + + cmd->num_of_diag_events_logs = total_len; + + buf_ptr += sizeof(wmi_diag_event_log_config_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (total_len * sizeof(uint32_t))); + + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + + if (start_log->verbose_level >= WMI_LOG_LEVEL_ACTIVE) + log_level = 1; + else + log_level = 0; + + WMI_LOGD("%s: Length:%d, Log_level:%d", __func__, total_len, log_level); + for (i = 0; i < wmi_handle->num_of_diag_events_logs; i++) { + uint32_t val = wmi_handle->events_logs_list[i]; + if ((WMI_DIAG_FREQUENCY_GET(val)) && + (WMI_DIAG_EXT_FEATURE_GET(val))) { + + WMI_DIAG_ID_SET(cmd_args[count], + WMI_DIAG_ID_GET(val)); + WMI_DIAG_TYPE_SET(cmd_args[count], + WMI_DIAG_TYPE_GET(val)); + WMI_DIAG_ID_ENABLED_DISABLED_SET(cmd_args[count], + log_level); + WMI_LOGD("%s: Idx:%d, val:%x", __func__, i, val); + count++; + } + } + + wmi_mtrace(WMI_DIAG_EVENT_LOG_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DIAG_EVENT_LOG_CONFIG_CMDID)) { + WMI_LOGE("%s: WMI_DIAG_EVENT_LOG_CONFIG_CMDID failed", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_flush_logs_to_fw_cmd_tlv() - Send log flush command to FW + * @wmi_handle: WMI handle + * + * This function is used to send the flush command to the FW, + * that will flush the fw logs that are residue in the FW + * + * Return: None + */ +static QDF_STATUS send_flush_logs_to_fw_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_debug_mesg_flush_fixed_param *cmd; + wmi_buf_t buf; + int len = sizeof(*cmd); + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_debug_mesg_flush_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_debug_mesg_flush_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_debug_mesg_flush_fixed_param)); + cmd->reserved0 = 0; + + wmi_mtrace(WMI_DEBUG_MESG_FLUSH_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_DEBUG_MESG_FLUSH_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_DEBUG_MESG_FLUSH_CMDID"); + wmi_buf_free(buf); + return QDF_STATUS_E_INVAL; + } + WMI_LOGD("Sent WMI_DEBUG_MESG_FLUSH_CMDID to FW"); + + return ret; +} + +#ifdef BIG_ENDIAN_HOST +/** +* fips_conv_data_be() - LE to BE conversion of FIPS ev data +* @param data_len - data length +* @param data - pointer to data +* +* Return: QDF_STATUS - success or error status +*/ +static QDF_STATUS fips_align_data_be(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + unsigned char *key_unaligned, *data_unaligned; + int c; + u_int8_t *key_aligned = NULL; + u_int8_t *data_aligned = NULL; + + /* Assigning unaligned space to copy the key */ + key_unaligned = qdf_mem_malloc( + sizeof(u_int8_t)*param->key_len + FIPS_ALIGN); + data_unaligned = qdf_mem_malloc( + sizeof(u_int8_t)*param->data_len + FIPS_ALIGN); + + /* Checking if kmalloc is successful to allocate space */ + if (!key_unaligned) + return QDF_STATUS_SUCCESS; + /* Checking if space is aligned */ + if (!FIPS_IS_ALIGNED(key_unaligned, FIPS_ALIGN)) { + /* align to 4 */ + key_aligned = + (u_int8_t *)FIPS_ALIGNTO(key_unaligned, + FIPS_ALIGN); + } else { + key_aligned = (u_int8_t *)key_unaligned; + } + + /* memset and copy content from key to key aligned */ + OS_MEMSET(key_aligned, 0, param->key_len); + OS_MEMCPY(key_aligned, param->key, param->key_len); + + /* print a hexdump for host debug */ + print_hex_dump(KERN_DEBUG, + "\t Aligned and Copied Key:@@@@ ", + DUMP_PREFIX_NONE, + 16, 1, key_aligned, param->key_len, true); + + /* Checking if kmalloc is successful to allocate space */ + if (!data_unaligned) + return QDF_STATUS_SUCCESS; + /* Checking of space is aligned */ + if (!FIPS_IS_ALIGNED(data_unaligned, FIPS_ALIGN)) { + /* align to 4 */ + data_aligned = + (u_int8_t *)FIPS_ALIGNTO(data_unaligned, + FIPS_ALIGN); + } else { + data_aligned = (u_int8_t *)data_unaligned; + } + + /* memset and copy content from data to data aligned */ + OS_MEMSET(data_aligned, 0, param->data_len); + OS_MEMCPY(data_aligned, param->data, param->data_len); + + /* print a hexdump for host debug */ + print_hex_dump(KERN_DEBUG, + "\t Properly Aligned and Copied Data:@@@@ ", + DUMP_PREFIX_NONE, + 16, 1, data_aligned, param->data_len, true); + + /* converting to little Endian both key_aligned and + * data_aligned*/ + for (c = 0; c < param->key_len/4; c++) { + *((u_int32_t *)key_aligned+c) = + qdf_cpu_to_le32(*((u_int32_t *)key_aligned+c)); + } + for (c = 0; c < param->data_len/4; c++) { + *((u_int32_t *)data_aligned+c) = + qdf_cpu_to_le32(*((u_int32_t *)data_aligned+c)); + } + + /* update endian data to key and data vectors */ + OS_MEMCPY(param->key, key_aligned, param->key_len); + OS_MEMCPY(param->data, data_aligned, param->data_len); + + /* clean up allocated spaces */ + qdf_mem_free(key_unaligned); + key_unaligned = NULL; + key_aligned = NULL; + + qdf_mem_free(data_unaligned); + data_unaligned = NULL; + data_aligned = NULL; + + return QDF_STATUS_SUCCESS; +} +#else +/** +* fips_align_data_be() - DUMMY for LE platform +* +* Return: QDF_STATUS - success +*/ +static QDF_STATUS fips_align_data_be(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_FEATURE_DISA +/** + * send_encrypt_decrypt_send_cmd() - send encrypt/decrypt cmd to fw + * @wmi_handle: wmi handle + * @params: encrypt/decrypt params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_encrypt_decrypt_send_cmd_tlv(wmi_unified_t wmi_handle, + struct disa_encrypt_decrypt_req_params + *encrypt_decrypt_params) +{ + wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + QDF_STATUS ret; + uint32_t len; + + WMI_LOGD(FL("Send encrypt decrypt cmd")); + + len = sizeof(*cmd) + + encrypt_decrypt_params->data_len + + WMI_TLV_HDR_SIZE; + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = wmi_buf_data(wmi_buf); + cmd = (wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param)); + + cmd->vdev_id = encrypt_decrypt_params->vdev_id; + cmd->key_flag = encrypt_decrypt_params->key_flag; + cmd->key_idx = encrypt_decrypt_params->key_idx; + cmd->key_cipher = encrypt_decrypt_params->key_cipher; + cmd->key_len = encrypt_decrypt_params->key_len; + cmd->key_txmic_len = encrypt_decrypt_params->key_txmic_len; + cmd->key_rxmic_len = encrypt_decrypt_params->key_rxmic_len; + + qdf_mem_copy(cmd->key_data, encrypt_decrypt_params->key_data, + encrypt_decrypt_params->key_len); + + qdf_mem_copy(cmd->mac_hdr, encrypt_decrypt_params->mac_header, + MAX_MAC_HEADER_LEN); + + cmd->data_len = encrypt_decrypt_params->data_len; + + if (cmd->data_len) { + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(encrypt_decrypt_params->data_len, + sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, encrypt_decrypt_params->data, + encrypt_decrypt_params->data_len); + } + + /* This conversion is to facilitate data to FW in little endian */ + cmd->pn[5] = encrypt_decrypt_params->pn[0]; + cmd->pn[4] = encrypt_decrypt_params->pn[1]; + cmd->pn[3] = encrypt_decrypt_params->pn[2]; + cmd->pn[2] = encrypt_decrypt_params->pn[3]; + cmd->pn[1] = encrypt_decrypt_params->pn[4]; + cmd->pn[0] = encrypt_decrypt_params->pn[5]; + + wmi_mtrace(WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, len, + WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send ENCRYPT DECRYPT cmd: %d", ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} +#endif /* WLAN_FEATURE_DISA */ + +/** + * send_pdev_fips_cmd_tlv() - send pdev fips cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold pdev fips param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_fips_cmd_tlv(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + wmi_pdev_fips_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len = sizeof(wmi_pdev_fips_cmd_fixed_param); + QDF_STATUS retval = QDF_STATUS_SUCCESS; + + /* Length TLV placeholder for array of bytes */ + len += WMI_TLV_HDR_SIZE; + if (param->data_len) + len += (param->data_len*sizeof(uint8_t)); + + /* + * Data length must be multiples of 16 bytes - checked against 0xF - + * and must be less than WMI_SVC_MSG_SIZE - static size of + * wmi_pdev_fips_cmd structure + */ + + /* do sanity on the input */ + if (!(((param->data_len & 0xF) == 0) && + ((param->data_len > 0) && + (param->data_len < (WMI_HOST_MAX_BUFFER_SIZE - + sizeof(wmi_pdev_fips_cmd_fixed_param)))))) { + return QDF_STATUS_E_INVAL; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_fips_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_fips_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_fips_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + if (param->key && param->data) { + cmd->key_len = param->key_len; + cmd->data_len = param->data_len; + cmd->fips_cmd = !!(param->op); + + if (fips_align_data_be(wmi_handle, param) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + qdf_mem_copy(cmd->key, param->key, param->key_len); + + if (param->mode == FIPS_ENGINE_AES_CTR || + param->mode == FIPS_ENGINE_AES_MIC) { + cmd->mode = param->mode; + } else { + cmd->mode = FIPS_ENGINE_AES_CTR; + } + qdf_print("Key len = %d, Data len = %d", + cmd->key_len, cmd->data_len); + + print_hex_dump(KERN_DEBUG, "Key: ", DUMP_PREFIX_NONE, 16, 1, + cmd->key, cmd->key_len, true); + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, param->data_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + if (param->data_len) + qdf_mem_copy(buf_ptr, + (uint8_t *) param->data, param->data_len); + + print_hex_dump(KERN_DEBUG, "Plain text: ", DUMP_PREFIX_NONE, + 16, 1, buf_ptr, cmd->data_len, true); + + buf_ptr += param->data_len; + + wmi_mtrace(WMI_PDEV_FIPS_CMDID, NO_SESSION, 0); + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_FIPS_CMDID); + qdf_print("%s return value %d", __func__, retval); + } else { + qdf_print("\n%s:%d Key or Data is NULL", __func__, __LINE__); + wmi_buf_free(buf); + retval = -QDF_STATUS_E_BADMSG; + } + + return retval; +} + +/** + * send_fw_test_cmd_tlv() - send fw test command to fw. + * @wmi_handle: wmi handle + * @wmi_fwtest: fw test command + * + * This function sends fw test command to fw. + * + * Return: CDF STATUS + */ +static +QDF_STATUS send_fw_test_cmd_tlv(wmi_unified_t wmi_handle, + struct set_fwtest_params *wmi_fwtest) +{ + wmi_fwtest_set_param_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint16_t len; + + len = sizeof(*cmd); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_fwtest_set_param_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_fwtest_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_fwtest_set_param_cmd_fixed_param)); + cmd->param_id = wmi_fwtest->arg; + cmd->param_value = wmi_fwtest->value; + + wmi_mtrace(WMI_FWTEST_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_FWTEST_CMDID)) { + WMI_LOGP("%s: failed to send fw test command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_unit_test_cmd_tlv() - send unit test command to fw. + * @wmi_handle: wmi handle + * @wmi_utest: unit test command + * + * This function send unit test command to fw. + * + * Return: CDF STATUS + */ +static QDF_STATUS send_unit_test_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_unit_test_cmd *wmi_utest) +{ + wmi_unit_test_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + int i; + uint16_t len, args_tlv_len; + uint32_t *unit_test_cmd_args; + + args_tlv_len = + WMI_TLV_HDR_SIZE + wmi_utest->num_args * sizeof(uint32_t); + len = sizeof(wmi_unit_test_cmd_fixed_param) + args_tlv_len; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_unit_test_cmd_fixed_param *) wmi_buf_data(wmi_buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_unit_test_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_unit_test_cmd_fixed_param)); + cmd->vdev_id = wmi_utest->vdev_id; + cmd->module_id = wmi_utest->module_id; + cmd->num_args = wmi_utest->num_args; + cmd->diag_token = wmi_utest->diag_token; + buf_ptr += sizeof(wmi_unit_test_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (wmi_utest->num_args * sizeof(uint32_t))); + unit_test_cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + wmi_debug("VDEV ID: %d MODULE ID: %d TOKEN: %d", + cmd->vdev_id, cmd->module_id, cmd->diag_token); + wmi_debug("%d num of args = ", wmi_utest->num_args); + for (i = 0; (i < wmi_utest->num_args && i < WMI_UNIT_TEST_MAX_NUM_ARGS); i++) { + unit_test_cmd_args[i] = wmi_utest->args[i]; + wmi_debug("%d,", wmi_utest->args[i]); + } + wmi_mtrace(WMI_UNIT_TEST_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_UNIT_TEST_CMDID)) { + WMI_LOGP("%s: failed to send unit test command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_power_dbg_cmd_tlv() - send power debug commands + * @wmi_handle: wmi handle + * @param: wmi power debug parameter + * + * Send WMI_POWER_DEBUG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_power_dbg_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_power_dbg_params *param) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len, args_tlv_len; + uint8_t *buf_ptr; + uint8_t i; + wmi_pdev_wal_power_debug_cmd_fixed_param *cmd; + uint32_t *cmd_args; + + /* Prepare and send power debug cmd parameters */ + args_tlv_len = WMI_TLV_HDR_SIZE + param->num_args * sizeof(uint32_t); + len = sizeof(*cmd) + args_tlv_len; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_wal_power_debug_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_wal_power_debug_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_wal_power_debug_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + param->pdev_id); + cmd->module_id = param->module_id; + cmd->num_args = param->num_args; + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (param->num_args * sizeof(uint32_t))); + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + wmi_debug("%d num of args = ", param->num_args); + for (i = 0; (i < param->num_args && i < WMI_MAX_POWER_DBG_ARGS); i++) { + cmd_args[i] = param->args[i]; + wmi_debug("%d,", param->args[i]); + } + + wmi_mtrace(WMI_PDEV_WAL_POWER_DEBUG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_PDEV_WAL_POWER_DEBUG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_PDEV_WAL_POWER_DEBUG_CMDID returned Error %d", + status); + goto error; + } + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_dfs_phyerr_offload_en_cmd_tlv() - send dfs phyerr offload enable cmd + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_phyerr_offload_en_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + wmi_debug("pdev_id=%d", pdev_id); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + pdev_id); + wmi_mtrace(WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d, pdev_id=%d", + __func__, ret, pdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_dfs_phyerr_offload_dis_cmd_tlv() - send dfs phyerr offload disable cmd + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Send WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_phyerr_offload_dis_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + wmi_debug("pdev_id=%d", pdev_id); + + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + pdev_id); + wmi_mtrace(WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d, pdev_id=%d", + __func__, ret, pdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_SUPPORT_AGILE_DFS +static +QDF_STATUS send_adfs_ch_cfg_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_adfs_ch_cfg_params *param) +{ + /* wmi_unified_cmd_send set request of agile ADFS channel*/ + wmi_vdev_adfs_ch_cfg_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + uint16_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_adfs_ch_cfg_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_adfs_ch_cfg_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_adfs_ch_cfg_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->ocac_mode = param->ocac_mode; + cmd->center_freq = param->center_freq; + cmd->chan_freq = param->chan_freq; + cmd->chan_width = param->chan_width; + cmd->min_duration_ms = param->min_duration_ms; + cmd->max_duration_ms = param->max_duration_ms; + WMI_LOGD("%s:cmd->vdev_id: %d ,cmd->ocac_mode: %d cmd->center_freq: %d", + __func__, cmd->vdev_id, cmd->ocac_mode, + cmd->center_freq); + + wmi_mtrace(WMI_VDEV_ADFS_CH_CFG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_ADFS_CH_CFG_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", + __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static +QDF_STATUS send_adfs_ocac_abort_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_adfs_abort_params *param) +{ + /*wmi_unified_cmd_send with ocac abort on ADFS channel*/ + wmi_vdev_adfs_ocac_abort_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + uint16_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_adfs_ocac_abort_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR + (&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_adfs_ocac_abort_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_adfs_ocac_abort_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + wmi_mtrace(WMI_VDEV_ADFS_OCAC_ABORT_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_ADFS_OCAC_ABORT_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", + __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * init_cmd_send_tlv() - send initialization cmd to fw + * @wmi_handle: wmi handle + * @param param: pointer to wmi init param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS init_cmd_send_tlv(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param) +{ + wmi_buf_t buf; + wmi_init_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_resource_config *resource_cfg; + wlan_host_memory_chunk *host_mem_chunks; + uint32_t mem_chunk_len = 0, hw_mode_len = 0; + uint16_t idx; + int len; + QDF_STATUS ret; + + len = sizeof(*cmd) + sizeof(wmi_resource_config) + + WMI_TLV_HDR_SIZE; + mem_chunk_len = (sizeof(wlan_host_memory_chunk) * MAX_MEM_CHUNKS); + + if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) + hw_mode_len = sizeof(wmi_pdev_set_hw_mode_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + (param->num_band_to_mac * sizeof(wmi_pdev_band_to_mac)); + + buf = wmi_buf_alloc(wmi_handle, len + mem_chunk_len + hw_mode_len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_init_cmd_fixed_param *) buf_ptr; + resource_cfg = (wmi_resource_config *) (buf_ptr + sizeof(*cmd)); + + host_mem_chunks = (wlan_host_memory_chunk *) + (buf_ptr + sizeof(*cmd) + sizeof(wmi_resource_config) + + WMI_TLV_HDR_SIZE); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_init_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_init_cmd_fixed_param)); + + wmi_copy_resource_config(resource_cfg, param->res_cfg); + WMITLV_SET_HDR(&resource_cfg->tlv_header, + WMITLV_TAG_STRUC_wmi_resource_config, + WMITLV_GET_STRUCT_TLVLEN(wmi_resource_config)); + + for (idx = 0; idx < param->num_mem_chunks; ++idx) { + WMITLV_SET_HDR(&(host_mem_chunks[idx].tlv_header), + WMITLV_TAG_STRUC_wlan_host_memory_chunk, + WMITLV_GET_STRUCT_TLVLEN + (wlan_host_memory_chunk)); + host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; + host_mem_chunks[idx].size = param->mem_chunks[idx].len; + host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; + QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, + "chunk %d len %d requested ,ptr 0x%x ", + idx, host_mem_chunks[idx].size, + host_mem_chunks[idx].ptr); + } + cmd->num_host_mem_chunks = param->num_mem_chunks; + len += (param->num_mem_chunks * sizeof(wlan_host_memory_chunk)); + + WMITLV_SET_HDR((buf_ptr + sizeof(*cmd) + sizeof(wmi_resource_config)), + WMITLV_TAG_ARRAY_STRUC, + (sizeof(wlan_host_memory_chunk) * + param->num_mem_chunks)); + + /* Fill hw mode id config */ + buf_ptr = copy_hw_mode_in_init_cmd(wmi_handle, buf_ptr, &len, param); + + /* Fill fw_abi_vers */ + copy_fw_abi_version_tlv(wmi_handle, cmd); + + wmi_mtrace(WMI_INIT_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_INIT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("wmi_unified_cmd_send WMI_INIT_CMDID returned Error %d", + ret); + wmi_buf_free(buf); + } + + return ret; + +} + +/** + * send_addba_send_cmd_tlv() - send addba send command to fw + * @wmi_handle: wmi handle + * @param: pointer to delba send params + * @macaddr: peer mac address + * + * Send WMI_ADDBA_SEND_CMDID command to firmware + * Return: QDF_STATUS_SUCCESS on success. QDF_STATUS_E** on error + */ +static QDF_STATUS +send_addba_send_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_send_params *param) +{ + wmi_addba_send_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_addba_send_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_addba_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_addba_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->buffersize = param->buffersize; + + wmi_mtrace(WMI_ADDBA_SEND_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_ADDBA_SEND_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_delba_send_cmd_tlv() - send delba send command to fw + * @wmi_handle: wmi handle + * @param: pointer to delba send params + * @macaddr: peer mac address + * + * Send WMI_DELBA_SEND_CMDID command to firmware + * Return: QDF_STATUS_SUCCESS on success. QDF_STATUS_E** on error + */ +static QDF_STATUS +send_delba_send_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct delba_send_params *param) +{ + wmi_delba_send_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_delba_send_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_delba_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_delba_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->initiator = param->initiator; + cmd->reasoncode = param->reasoncode; + + wmi_mtrace(WMI_DELBA_SEND_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_DELBA_SEND_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_addba_clearresponse_cmd_tlv() - send addba clear response command + * to fw + * @wmi_handle: wmi handle + * @param: pointer to addba clearresp params + * @macaddr: peer mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_addba_clearresponse_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct addba_clearresponse_params *param) +{ + wmi_addba_clear_resp_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_addba_clear_resp_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_addba_clear_resp_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_addba_clear_resp_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + wmi_mtrace(WMI_ADDBA_CLEAR_RESP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, len, WMI_ADDBA_CLEAR_RESP_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef OBSS_PD +/** + * send_obss_spatial_reuse_set_def_thresh_cmd_tlv - send obss spatial reuse set + * def thresh to fw + * @wmi_handle: wmi handle + * @thresh: pointer to obss_spatial_reuse_def_thresh + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static +QDF_STATUS send_obss_spatial_reuse_set_def_thresh_cmd_tlv( + wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_def_thresh + *thresh) +{ + wmi_buf_t buf; + wmi_obss_spatial_reuse_set_def_obss_thresh_cmd_fixed_param *cmd; + QDF_STATUS ret; + uint32_t cmd_len; + uint32_t tlv_len; + + cmd_len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, cmd_len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_obss_spatial_reuse_set_def_obss_thresh_cmd_fixed_param *) + wmi_buf_data(buf); + + tlv_len = WMITLV_GET_STRUCT_TLVLEN( + wmi_obss_spatial_reuse_set_def_obss_thresh_cmd_fixed_param); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_obss_spatial_reuse_set_def_obss_thresh_cmd_fixed_param, + tlv_len); + + cmd->obss_min = thresh->obss_min; + cmd->obss_max = thresh->obss_max; + cmd->vdev_type = thresh->vdev_type; + ret = wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) + wmi_buf_free(buf); + + return ret; +} + +/** + * send_obss_spatial_reuse_set_cmd_tlv - send obss spatial reuse set cmd to fw + * @wmi_handle: wmi handle + * @obss_spatial_reuse_param: pointer to obss_spatial_reuse_param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static +QDF_STATUS send_obss_spatial_reuse_set_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_host_obss_spatial_reuse_set_param + *obss_spatial_reuse_param) +{ + wmi_buf_t buf; + wmi_obss_spatial_reuse_set_cmd_fixed_param *cmd; + QDF_STATUS ret; + uint32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_obss_spatial_reuse_set_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_obss_spatial_reuse_set_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_obss_spatial_reuse_set_cmd_fixed_param)); + + cmd->enable = obss_spatial_reuse_param->enable; + cmd->obss_min = obss_spatial_reuse_param->obss_min; + cmd->obss_max = obss_spatial_reuse_param->obss_max; + cmd->vdev_id = obss_spatial_reuse_param->vdev_id; + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE( + "WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID send returned Error %d", + ret); + wmi_buf_free(buf); + } + + return ret; +} +#endif + +#ifdef QCA_SUPPORT_CP_STATS +/** + * extract_cca_stats_tlv - api to extract congestion stats from event buffer + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @out_buff: buffer to populated after stats extraction + * + * Return: status of operation + */ +static QDF_STATUS extract_cca_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_congestion_stats *out_buff) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_congestion_stats *congestion_stats; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *)evt_buf; + congestion_stats = param_buf->congestion_stats; + if (!congestion_stats) + return QDF_STATUS_E_INVAL; + + out_buff->vdev_id = congestion_stats->vdev_id; + out_buff->congestion = congestion_stats->congestion; + + WMI_LOGD("%s: cca stats event processed", __func__); + return QDF_STATUS_SUCCESS; +} +#endif /* QCA_SUPPORT_CP_STATS */ + +/** + * extract_ctl_failsafe_check_ev_param_tlv() - extract ctl data from + * event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold peer ctl data + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_ctl_failsafe_check_ev_param_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_pdev_ctl_failsafe_event *param) +{ + WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID_param_tlvs *param_buf; + wmi_pdev_ctl_failsafe_check_fixed_param *fix_param; + + param_buf = (WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid ctl_failsafe event buffer"); + return QDF_STATUS_E_INVAL; + } + + fix_param = param_buf->fixed_param; + param->ctl_failsafe_status = fix_param->ctl_FailsafeStatus; + + return QDF_STATUS_SUCCESS; +} + +/** + * save_service_bitmap_tlv() - save service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bitmap_buf: bitmap buffer, for converged legacy support + * + * Return: QDF_STATUS + */ +static +QDF_STATUS save_service_bitmap_tlv(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + struct wmi_soc *soc = wmi_handle->soc; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + /* If it is already allocated, use that buffer. This can happen + * during target stop/start scenarios where host allocation is skipped. + */ + if (!soc->wmi_service_bitmap) { + soc->wmi_service_bitmap = + qdf_mem_malloc(WMI_SERVICE_BM_SIZE * sizeof(uint32_t)); + if (!soc->wmi_service_bitmap) + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(soc->wmi_service_bitmap, + param_buf->wmi_service_bitmap, + (WMI_SERVICE_BM_SIZE * sizeof(uint32_t))); + + if (bitmap_buf) + qdf_mem_copy(bitmap_buf, + param_buf->wmi_service_bitmap, + (WMI_SERVICE_BM_SIZE * sizeof(uint32_t))); + + return QDF_STATUS_SUCCESS; +} + +/** + * save_ext_service_bitmap_tlv() - save extendend service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bitmap_buf: bitmap buffer, for converged legacy support + * + * Return: QDF_STATUS + */ +static +QDF_STATUS save_ext_service_bitmap_tlv(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf) +{ + WMI_SERVICE_AVAILABLE_EVENTID_param_tlvs *param_buf; + wmi_service_available_event_fixed_param *ev; + struct wmi_soc *soc = wmi_handle->soc; + + param_buf = (WMI_SERVICE_AVAILABLE_EVENTID_param_tlvs *) evt_buf; + + ev = param_buf->fixed_param; + + /* If it is already allocated, use that buffer. This can happen + * during target stop/start scenarios where host allocation is skipped. + */ + if (!soc->wmi_ext_service_bitmap) { + soc->wmi_ext_service_bitmap = qdf_mem_malloc( + WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t)); + if (!soc->wmi_ext_service_bitmap) + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(soc->wmi_ext_service_bitmap, + ev->wmi_service_segment_bitmap, + (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t))); + + WMI_LOGD("wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x", + soc->wmi_ext_service_bitmap[0], soc->wmi_ext_service_bitmap[1], + soc->wmi_ext_service_bitmap[2], soc->wmi_ext_service_bitmap[3]); + + if (bitmap_buf) + qdf_mem_copy(bitmap_buf, + soc->wmi_ext_service_bitmap, + (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t))); + + return QDF_STATUS_SUCCESS; +} +/** + * is_service_enabled_tlv() - Check if service enabled + * @param wmi_handle: wmi handle + * @param service_id: service identifier + * + * Return: 1 enabled, 0 disabled + */ +static bool is_service_enabled_tlv(wmi_unified_t wmi_handle, + uint32_t service_id) +{ + struct wmi_soc *soc = wmi_handle->soc; + + if (!soc->wmi_service_bitmap) { + WMI_LOGE("WMI service bit map is not saved yet"); + return false; + } + + /* if wmi_service_enabled was received with extended bitmap, + * use WMI_SERVICE_EXT_IS_ENABLED to check the services. + */ + if (soc->wmi_ext_service_bitmap) + return WMI_SERVICE_EXT_IS_ENABLED(soc->wmi_service_bitmap, + soc->wmi_ext_service_bitmap, + service_id); + + if (service_id >= WMI_MAX_SERVICE) { + WMI_LOGE("Service id %d but WMI ext service bitmap is NULL", + service_id); + return false; + } + + return WMI_SERVICE_IS_ENABLED(soc->wmi_service_bitmap, + service_id); +} + +static inline void copy_ht_cap_info(uint32_t ev_target_cap, + struct wlan_psoc_target_capability_info *cap) +{ + /* except LDPC all flags are common betwen legacy and here + * also IBFEER is not defined for TLV + */ + cap->ht_cap_info |= ev_target_cap & ( + WMI_HT_CAP_ENABLED + | WMI_HT_CAP_HT20_SGI + | WMI_HT_CAP_DYNAMIC_SMPS + | WMI_HT_CAP_TX_STBC + | WMI_HT_CAP_TX_STBC_MASK_SHIFT + | WMI_HT_CAP_RX_STBC + | WMI_HT_CAP_RX_STBC_MASK_SHIFT + | WMI_HT_CAP_LDPC + | WMI_HT_CAP_L_SIG_TXOP_PROT + | WMI_HT_CAP_MPDU_DENSITY + | WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT + | WMI_HT_CAP_HT40_SGI); + if (ev_target_cap & WMI_HT_CAP_LDPC) + cap->ht_cap_info |= WMI_HOST_HT_CAP_RX_LDPC | + WMI_HOST_HT_CAP_TX_LDPC; +} +/** + * extract_service_ready_tlv() - extract service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to received event buffer + * @param cap: pointer to hold target capability information extracted from even + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_service_ready_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wlan_psoc_target_capability_info *cap) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + qdf_print("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + cap->phy_capability = ev->phy_capability; + cap->max_frag_entry = ev->max_frag_entry; + cap->num_rf_chains = ev->num_rf_chains; + copy_ht_cap_info(ev->ht_cap_info, cap); + cap->vht_cap_info = ev->vht_cap_info; + cap->vht_supp_mcs = ev->vht_supp_mcs; + cap->hw_min_tx_power = ev->hw_min_tx_power; + cap->hw_max_tx_power = ev->hw_max_tx_power; + cap->sys_cap_info = ev->sys_cap_info; + cap->min_pkt_size_enable = ev->min_pkt_size_enable; + cap->max_bcn_ie_size = ev->max_bcn_ie_size; + cap->max_num_scan_channels = ev->max_num_scan_channels; + cap->max_supported_macs = ev->max_supported_macs; + cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps; + cap->txrx_chainmask = ev->txrx_chainmask; + cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; + cap->num_msdu_desc = ev->num_msdu_desc; + cap->fw_version = ev->fw_build_vers; + /* fw_version_1 is not available in TLV. */ + cap->fw_version_1 = 0; + + return QDF_STATUS_SUCCESS; +} + +/* convert_wireless_modes_tlv() - Convert REGDMN_MODE values sent by target + * to host internal WMI_HOST_REGDMN_MODE values. + * REGULATORY TODO : REGDMN_MODE_11AC_VHT*_2G values are not used by the + * host currently. Add this in the future if required. + * 11AX (Phase II) : 11ax related values are not currently + * advertised separately by FW. As part of phase II regulatory bring-up, + * finalize the advertisement mechanism. + * @target_wireless_mode: target wireless mode received in message + * + * Return: returns the host internal wireless mode. + */ +static inline uint32_t convert_wireless_modes_tlv(uint32_t target_wireless_mode) +{ + + uint32_t wireless_modes = 0; + + WMI_LOGD("Target wireless mode: 0x%x", target_wireless_mode); + + if (target_wireless_mode & REGDMN_MODE_11A) + wireless_modes |= WMI_HOST_REGDMN_MODE_11A; + + if (target_wireless_mode & REGDMN_MODE_TURBO) + wireless_modes |= WMI_HOST_REGDMN_MODE_TURBO; + + if (target_wireless_mode & REGDMN_MODE_11B) + wireless_modes |= WMI_HOST_REGDMN_MODE_11B; + + if (target_wireless_mode & REGDMN_MODE_PUREG) + wireless_modes |= WMI_HOST_REGDMN_MODE_PUREG; + + if (target_wireless_mode & REGDMN_MODE_11G) + wireless_modes |= WMI_HOST_REGDMN_MODE_11G; + + if (target_wireless_mode & REGDMN_MODE_108G) + wireless_modes |= WMI_HOST_REGDMN_MODE_108G; + + if (target_wireless_mode & REGDMN_MODE_108A) + wireless_modes |= WMI_HOST_REGDMN_MODE_108A; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT20_2G) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT20_2G; + + if (target_wireless_mode & REGDMN_MODE_XR) + wireless_modes |= WMI_HOST_REGDMN_MODE_XR; + + if (target_wireless_mode & REGDMN_MODE_11A_HALF_RATE) + wireless_modes |= WMI_HOST_REGDMN_MODE_11A_HALF_RATE; + + if (target_wireless_mode & REGDMN_MODE_11A_QUARTER_RATE) + wireless_modes |= WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE; + + if (target_wireless_mode & REGDMN_MODE_11NG_HT20) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT20; + + if (target_wireless_mode & REGDMN_MODE_11NA_HT20) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT20; + + if (target_wireless_mode & REGDMN_MODE_11NG_HT40PLUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT40PLUS; + + if (target_wireless_mode & REGDMN_MODE_11NG_HT40MINUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT40MINUS; + + if (target_wireless_mode & REGDMN_MODE_11NA_HT40PLUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT40PLUS; + + if (target_wireless_mode & REGDMN_MODE_11NA_HT40MINUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT40MINUS; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT20) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT20; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT40PLUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT40MINUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT80) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT80; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT160) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT160; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT80_80) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT80_80; + + return wireless_modes; +} + +/** + * extract_hal_reg_cap_tlv() - extract HAL registered capabilities + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param cap: pointer to hold HAL reg capabilities + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_hal_reg_cap_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wlan_psoc_hal_reg_capability *cap) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + if (!param_buf || !param_buf->hal_reg_capabilities) { + WMI_LOGE("%s: Invalid arguments", __func__); + return QDF_STATUS_E_FAILURE; + } + qdf_mem_copy(cap, (((uint8_t *)param_buf->hal_reg_capabilities) + + sizeof(uint32_t)), + sizeof(struct wlan_psoc_hal_reg_capability)); + + cap->wireless_modes = convert_wireless_modes_tlv( + param_buf->hal_reg_capabilities->wireless_modes); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_num_mem_reqs_tlv() - Extract number of memory entries requested + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * + * Return: Number of entries requested + */ +static uint32_t extract_num_mem_reqs_tlv(wmi_unified_t wmi_handle, + void *evt_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + qdf_print("%s: wmi_buf_alloc failed", __func__); + return 0; + } + + if (ev->num_mem_reqs > param_buf->num_mem_reqs) { + WMI_LOGE("Invalid num_mem_reqs %d:%d", + ev->num_mem_reqs, param_buf->num_mem_reqs); + return 0; + } + + return ev->num_mem_reqs; +} + +/** + * extract_host_mem_req_tlv() - Extract host memory required from + * service ready event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @mem_reqs: pointer to host memory request structure + * @num_active_peers: number of active peers for peer cache + * @num_peers: number of peers + * @fw_prio: FW priority + * @idx: index for memory request + * + * Return: Host memory request parameters requested by target + */ +static QDF_STATUS extract_host_mem_req_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + host_mem_req *mem_reqs, + uint32_t num_active_peers, + uint32_t num_peers, + enum wmi_fw_mem_prio fw_prio, + uint16_t idx) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *)evt_buf; + + mem_reqs->req_id = (uint32_t)param_buf->mem_reqs[idx].req_id; + mem_reqs->unit_size = (uint32_t)param_buf->mem_reqs[idx].unit_size; + mem_reqs->num_unit_info = + (uint32_t)param_buf->mem_reqs[idx].num_unit_info; + mem_reqs->num_units = (uint32_t)param_buf->mem_reqs[idx].num_units; + mem_reqs->tgt_num_units = 0; + + if (((fw_prio == WMI_FW_MEM_HIGH_PRIORITY) && + (mem_reqs->num_unit_info & + REQ_TO_HOST_FOR_CONT_MEMORY)) || + ((fw_prio == WMI_FW_MEM_LOW_PRIORITY) && + (!(mem_reqs->num_unit_info & + REQ_TO_HOST_FOR_CONT_MEMORY)))) { + /* First allocate the memory that requires contiguous memory */ + mem_reqs->tgt_num_units = mem_reqs->num_units; + if (mem_reqs->num_unit_info) { + if (mem_reqs->num_unit_info & + NUM_UNITS_IS_NUM_PEERS) { + /* + * number of units allocated is equal to number + * of peers, 1 extra for self peer on target. + * this needs to be fixed, host and target can + * get out of sync + */ + mem_reqs->tgt_num_units = num_peers + 1; + } + if (mem_reqs->num_unit_info & + NUM_UNITS_IS_NUM_ACTIVE_PEERS) { + /* + * Requesting allocation of memory using + * num_active_peers in qcache. if qcache is + * disabled in host, then it should allocate + * memory for num_peers instead of + * num_active_peers. + */ + if (num_active_peers) + mem_reqs->tgt_num_units = + num_active_peers + 1; + else + mem_reqs->tgt_num_units = + num_peers + 1; + } + } + + wmi_debug("idx %d req %d num_units %d num_unit_info %d" + "unit size %d actual units %d", + idx, mem_reqs->req_id, + mem_reqs->num_units, + mem_reqs->num_unit_info, + mem_reqs->unit_size, + mem_reqs->tgt_num_units); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * save_fw_version_in_service_ready_tlv() - Save fw version in service + * ready function + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +save_fw_version_in_service_ready_tlv(wmi_unified_t wmi_handle, void *evt_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + qdf_print("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + /*Save fw version from service ready message */ + /*This will be used while sending INIT message */ + qdf_mem_copy(&wmi_handle->fw_abi_version, &ev->fw_abi_vers, + sizeof(wmi_handle->fw_abi_version)); + + return QDF_STATUS_SUCCESS; +} + +/** + * ready_extract_init_status_tlv() - Extract init status from ready event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * + * Return: ready status + */ +static uint32_t ready_extract_init_status_tlv(wmi_unified_t wmi_handle, + void *evt_buf) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + qdf_print("%s:%d", __func__, ev->status); + + return ev->status; +} + +/** + * ready_extract_mac_addr_tlv() - extract mac address from ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param macaddr: Pointer to hold MAC address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS ready_extract_mac_addr_tlv(wmi_unified_t wmi_hamdle, + void *evt_buf, uint8_t *macaddr) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, macaddr); + + return QDF_STATUS_SUCCESS; +} + +/** + * ready_extract_mac_addr_list_tlv() - extract MAC address list from ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param macaddr: Pointer to hold number of MAC addresses + * + * Return: Pointer to addr list + */ +static wmi_host_mac_addr *ready_extract_mac_addr_list_tlv(wmi_unified_t wmi_hamdle, + void *evt_buf, uint8_t *num_mac) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + *num_mac = ev->num_extra_mac_addr; + + return (wmi_host_mac_addr *) param_buf->mac_addr_list; +} + +/** + * extract_ready_params_tlv() - Extract data from ready event apart from + * status, macaddr and version. + * @wmi_handle: Pointer to WMI handle. + * @evt_buf: Pointer to Ready event buffer. + * @ev_param: Pointer to host defined struct to copy the data from event. + * + * Return: QDF_STATUS_SUCCESS on success. + */ +static QDF_STATUS extract_ready_event_params_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + ev_param->status = ev->status; + ev_param->num_dscp_table = ev->num_dscp_table; + ev_param->num_extra_mac_addr = ev->num_extra_mac_addr; + ev_param->num_total_peer = ev->num_total_peers; + ev_param->num_extra_peer = ev->num_extra_peers; + /* Agile_capability in ready event is supported in TLV target, + * as per aDFS FR + */ + ev_param->max_ast_index = ev->max_ast_index; + ev_param->pktlog_defs_checksum = ev->pktlog_defs_checksum; + ev_param->agile_capability = 1; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dbglog_data_len_tlv() - extract debuglog data length + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: length + */ +static uint8_t *extract_dbglog_data_len_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *len) +{ + WMI_DEBUG_MESG_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_DEBUG_MESG_EVENTID_param_tlvs *) evt_buf; + + *len = param_buf->num_bufp; + + return param_buf->bufp; +} + + +#ifdef MGMT_FRAME_RX_DECRYPT_ERROR +#define IS_WMI_RX_MGMT_FRAME_STATUS_INVALID(_status) false +#else +#define IS_WMI_RX_MGMT_FRAME_STATUS_INVALID(_status) \ + ((_status) & WMI_RXERR_DECRYPT) +#endif + +/** + * extract_mgmt_rx_params_tlv() - extract management rx params from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hdr: Pointer to hold header + * @param bufp: Pointer to hold pointer to rx param buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_mgmt_rx_params_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct mgmt_rx_event_params *hdr, + uint8_t **bufp) +{ + WMI_MGMT_RX_EVENTID_param_tlvs *param_tlvs = NULL; + wmi_mgmt_rx_hdr *ev_hdr = NULL; + int i; + + param_tlvs = (WMI_MGMT_RX_EVENTID_param_tlvs *) evt_buf; + if (!param_tlvs) { + WMI_LOGE("Get NULL point message from FW"); + return QDF_STATUS_E_INVAL; + } + + ev_hdr = param_tlvs->hdr; + if (!hdr) { + WMI_LOGE("Rx event is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (IS_WMI_RX_MGMT_FRAME_STATUS_INVALID(ev_hdr->status)) { + WMI_LOGE("%s: RX mgmt frame decrypt error, discard it", + __func__); + return QDF_STATUS_E_INVAL; + } + + if (ev_hdr->buf_len > param_tlvs->num_bufp) { + WMI_LOGE("Rx mgmt frame length mismatch, discard it"); + return QDF_STATUS_E_INVAL; + } + + hdr->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + wmi_handle, + ev_hdr->pdev_id); + hdr->chan_freq = ev_hdr->chan_freq; + hdr->channel = ev_hdr->channel; + hdr->snr = ev_hdr->snr; + hdr->rate = ev_hdr->rate; + hdr->phy_mode = ev_hdr->phy_mode; + hdr->buf_len = ev_hdr->buf_len; + hdr->status = ev_hdr->status; + hdr->flags = ev_hdr->flags; + hdr->rssi = ev_hdr->rssi; + hdr->tsf_delta = ev_hdr->tsf_delta; + for (i = 0; i < ATH_MAX_ANTENNA; i++) + hdr->rssi_ctl[i] = ev_hdr->rssi_ctl[i]; + + *bufp = param_tlvs->bufp; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_roam_param_tlv() - extract vdev roam param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold roam param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_roam_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_roam_event *param) +{ + WMI_ROAM_EVENTID_param_tlvs *param_buf; + wmi_roam_event_fixed_param *evt; + + param_buf = (WMI_ROAM_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid roam event buffer"); + return QDF_STATUS_E_INVAL; + } + + evt = param_buf->fixed_param; + qdf_mem_zero(param, sizeof(*param)); + + param->vdev_id = evt->vdev_id; + param->reason = evt->reason; + param->rssi = evt->rssi; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_scan_ev_param_tlv() - extract vdev scan param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev scan param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_scan_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct scan_event *param) +{ + WMI_SCAN_EVENTID_param_tlvs *param_buf = NULL; + wmi_scan_event_fixed_param *evt = NULL; + + param_buf = (WMI_SCAN_EVENTID_param_tlvs *) evt_buf; + evt = param_buf->fixed_param; + + qdf_mem_zero(param, sizeof(*param)); + + switch (evt->event) { + case WMI_SCAN_EVENT_STARTED: + param->type = SCAN_EVENT_TYPE_STARTED; + break; + case WMI_SCAN_EVENT_COMPLETED: + param->type = SCAN_EVENT_TYPE_COMPLETED; + break; + case WMI_SCAN_EVENT_BSS_CHANNEL: + param->type = SCAN_EVENT_TYPE_BSS_CHANNEL; + break; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + param->type = SCAN_EVENT_TYPE_FOREIGN_CHANNEL; + break; + case WMI_SCAN_EVENT_DEQUEUED: + param->type = SCAN_EVENT_TYPE_DEQUEUED; + break; + case WMI_SCAN_EVENT_PREEMPTED: + param->type = SCAN_EVENT_TYPE_PREEMPTED; + break; + case WMI_SCAN_EVENT_START_FAILED: + param->type = SCAN_EVENT_TYPE_START_FAILED; + break; + case WMI_SCAN_EVENT_RESTARTED: + param->type = SCAN_EVENT_TYPE_RESTARTED; + break; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: + param->type = SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT; + break; + case WMI_SCAN_EVENT_MAX: + default: + param->type = SCAN_EVENT_TYPE_MAX; + break; + }; + + switch (evt->reason) { + case WMI_SCAN_REASON_NONE: + param->reason = SCAN_REASON_NONE; + break; + case WMI_SCAN_REASON_COMPLETED: + param->reason = SCAN_REASON_COMPLETED; + break; + case WMI_SCAN_REASON_CANCELLED: + param->reason = SCAN_REASON_CANCELLED; + break; + case WMI_SCAN_REASON_PREEMPTED: + param->reason = SCAN_REASON_PREEMPTED; + break; + case WMI_SCAN_REASON_TIMEDOUT: + param->reason = SCAN_REASON_TIMEDOUT; + break; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + param->reason = SCAN_REASON_INTERNAL_FAILURE; + break; + case WMI_SCAN_REASON_SUSPENDED: + param->reason = SCAN_REASON_SUSPENDED; + break; + case WMI_SCAN_REASON_DFS_VIOLATION: + param->reason = SCAN_REASON_DFS_VIOLATION; + break; + case WMI_SCAN_REASON_MAX: + param->reason = SCAN_REASON_MAX; + break; + default: + param->reason = SCAN_REASON_MAX; + break; + }; + + param->chan_freq = evt->channel_freq; + param->requester = evt->requestor; + param->scan_id = evt->scan_id; + param->vdev_id = evt->vdev_id; + param->timestamp = evt->tsf_timestamp; + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * extract_nlo_match_ev_param_tlv() - extract NLO match param from event + * @wmi_handle: pointer to WMI handle + * @evt_buf: pointer to WMI event buffer + * @param: pointer to scan event param for NLO match + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_nlo_match_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct scan_event *param) +{ + WMI_NLO_MATCH_EVENTID_param_tlvs *param_buf = evt_buf; + wmi_nlo_event *evt = param_buf->fixed_param; + + qdf_mem_zero(param, sizeof(*param)); + + param->type = SCAN_EVENT_TYPE_NLO_MATCH; + param->vdev_id = evt->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_nlo_complete_ev_param_tlv() - extract NLO complete param from event + * @wmi_handle: pointer to WMI handle + * @evt_buf: pointer to WMI event buffer + * @param: pointer to scan event param for NLO complete + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_nlo_complete_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct scan_event *param) +{ + WMI_NLO_SCAN_COMPLETE_EVENTID_param_tlvs *param_buf = evt_buf; + wmi_nlo_event *evt = param_buf->fixed_param; + + qdf_mem_zero(param, sizeof(*param)); + + param->type = SCAN_EVENT_TYPE_NLO_COMPLETE; + param->vdev_id = evt->vdev_id; + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * extract_all_stats_counts_tlv() - extract all stats count from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param stats_param: Pointer to hold stats count + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_all_stats_counts_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_stats_event *stats_param) +{ + wmi_stats_event_fixed_param *ev; + wmi_per_chain_rssi_stats *rssi_event; + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + uint64_t min_data_len; + + qdf_mem_zero(stats_param, sizeof(*stats_param)); + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + rssi_event = param_buf->chain_stats; + if (!ev) { + WMI_LOGE("%s: event fixed param NULL", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (param_buf->num_data > WMI_SVC_MSG_MAX_SIZE - sizeof(*ev)) { + WMI_LOGE("num_data : %u is invalid", param_buf->num_data); + return QDF_STATUS_E_FAULT; + } + + switch (ev->stats_id) { + case WMI_REQUEST_PEER_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PEER_STAT; + break; + + case WMI_REQUEST_AP_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_AP_STAT; + break; + + case WMI_REQUEST_PDEV_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PDEV_STAT; + break; + + case WMI_REQUEST_VDEV_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_VDEV_STAT; + break; + + case WMI_REQUEST_BCNFLT_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_BCNFLT_STAT; + break; + + case WMI_REQUEST_VDEV_RATE_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_VDEV_RATE_STAT; + break; + + case WMI_REQUEST_BCN_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_BCN_STAT; + break; + case WMI_REQUEST_PEER_EXTD_STAT: + stats_param->stats_id |= WMI_REQUEST_PEER_EXTD_STAT; + break; + + case WMI_REQUEST_PEER_EXTD2_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PEER_ADV_STATS; + break; + + default: + stats_param->stats_id = 0; + break; + + } + + /* ev->num_*_stats may cause uint32_t overflow, so use uint64_t + * to save total length calculated + */ + min_data_len = + (((uint64_t)ev->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + (((uint64_t)ev->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + (((uint64_t)ev->num_peer_stats) * sizeof(wmi_peer_stats)) + + (((uint64_t)ev->num_bcnflt_stats) * + sizeof(wmi_bcnfilter_stats_t)) + + (((uint64_t)ev->num_chan_stats) * sizeof(wmi_chan_stats)) + + (((uint64_t)ev->num_mib_stats) * sizeof(wmi_mib_stats)) + + (((uint64_t)ev->num_bcn_stats) * sizeof(wmi_bcn_stats)) + + (((uint64_t)ev->num_peer_extd_stats) * + sizeof(wmi_peer_extd_stats)) + + (((uint64_t)ev->num_mib_extd_stats) * + sizeof(wmi_mib_extd_stats)); + if (param_buf->num_data != min_data_len) { + WMI_LOGE("data len: %u isn't same as calculated: %llu", + param_buf->num_data, min_data_len); + return QDF_STATUS_E_FAULT; + } + + stats_param->last_event = ev->last_event; + stats_param->num_pdev_stats = ev->num_pdev_stats; + stats_param->num_pdev_ext_stats = 0; + stats_param->num_vdev_stats = ev->num_vdev_stats; + stats_param->num_peer_stats = ev->num_peer_stats; + stats_param->num_peer_extd_stats = ev->num_peer_extd_stats; + stats_param->num_bcnflt_stats = ev->num_bcnflt_stats; + stats_param->num_chan_stats = ev->num_chan_stats; + stats_param->num_mib_stats = ev->num_mib_stats; + stats_param->num_mib_extd_stats = ev->num_mib_extd_stats; + stats_param->num_bcn_stats = ev->num_bcn_stats; + stats_param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + wmi_handle, + ev->pdev_id); + + /* if chain_stats is not populated */ + if (!param_buf->chain_stats || !param_buf->num_chain_stats) + return QDF_STATUS_SUCCESS; + + if (WMITLV_TAG_STRUC_wmi_per_chain_rssi_stats != + WMITLV_GET_TLVTAG(rssi_event->tlv_header)) + return QDF_STATUS_SUCCESS; + + if (WMITLV_GET_STRUCT_TLVLEN(wmi_per_chain_rssi_stats) != + WMITLV_GET_TLVLEN(rssi_event->tlv_header)) + return QDF_STATUS_SUCCESS; + + if (rssi_event->num_per_chain_rssi_stats >= + WMITLV_GET_TLVLEN(rssi_event->tlv_header)) { + WMI_LOGE("num_per_chain_rssi_stats:%u is out of bounds", + rssi_event->num_per_chain_rssi_stats); + return QDF_STATUS_E_INVAL; + } + stats_param->num_rssi_stats = rssi_event->num_per_chain_rssi_stats; + + /* if peer_adv_stats is not populated */ + if (!param_buf->num_peer_extd2_stats) + return QDF_STATUS_SUCCESS; + + stats_param->num_peer_adv_stats = param_buf->num_peer_extd2_stats; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_tx_stats() - extract pdev tx stats from event + */ +static void extract_pdev_tx_stats(wmi_host_dbg_tx_stats *tx, + struct wlan_dbg_tx_stats *tx_stats) +{ + /* Tx Stats */ + tx->comp_queued = tx_stats->comp_queued; + tx->comp_delivered = tx_stats->comp_delivered; + tx->msdu_enqued = tx_stats->msdu_enqued; + tx->mpdu_enqued = tx_stats->mpdu_enqued; + tx->wmm_drop = tx_stats->wmm_drop; + tx->local_enqued = tx_stats->local_enqued; + tx->local_freed = tx_stats->local_freed; + tx->hw_queued = tx_stats->hw_queued; + tx->hw_reaped = tx_stats->hw_reaped; + tx->underrun = tx_stats->underrun; + tx->tx_abort = tx_stats->tx_abort; + tx->mpdus_requed = tx_stats->mpdus_requed; + tx->data_rc = tx_stats->data_rc; + tx->self_triggers = tx_stats->self_triggers; + tx->sw_retry_failure = tx_stats->sw_retry_failure; + tx->illgl_rate_phy_err = tx_stats->illgl_rate_phy_err; + tx->pdev_cont_xretry = tx_stats->pdev_cont_xretry; + tx->pdev_tx_timeout = tx_stats->pdev_tx_timeout; + tx->pdev_resets = tx_stats->pdev_resets; + tx->stateless_tid_alloc_failure = tx_stats->stateless_tid_alloc_failure; + tx->phy_underrun = tx_stats->phy_underrun; + tx->txop_ovf = tx_stats->txop_ovf; + + return; +} + + +/** + * extract_pdev_rx_stats() - extract pdev rx stats from event + */ +static void extract_pdev_rx_stats(wmi_host_dbg_rx_stats *rx, + struct wlan_dbg_rx_stats *rx_stats) +{ + /* Rx Stats */ + rx->mid_ppdu_route_change = rx_stats->mid_ppdu_route_change; + rx->status_rcvd = rx_stats->status_rcvd; + rx->r0_frags = rx_stats->r0_frags; + rx->r1_frags = rx_stats->r1_frags; + rx->r2_frags = rx_stats->r2_frags; + /* Only TLV */ + rx->r3_frags = 0; + rx->htt_msdus = rx_stats->htt_msdus; + rx->htt_mpdus = rx_stats->htt_mpdus; + rx->loc_msdus = rx_stats->loc_msdus; + rx->loc_mpdus = rx_stats->loc_mpdus; + rx->oversize_amsdu = rx_stats->oversize_amsdu; + rx->phy_errs = rx_stats->phy_errs; + rx->phy_err_drop = rx_stats->phy_err_drop; + rx->mpdu_errs = rx_stats->mpdu_errs; + + return; +} + +/** + * extract_pdev_stats_tlv() - extract pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into pdev stats + * @param pdev_stats: Pointer to hold pdev stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_pdev_stats *pdev_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + + data = param_buf->data; + + if (index < ev_param->num_pdev_stats) { + wmi_pdev_stats *ev = (wmi_pdev_stats *) ((data) + + (index * sizeof(wmi_pdev_stats))); + + pdev_stats->chan_nf = ev->chan_nf; + pdev_stats->tx_frame_count = ev->tx_frame_count; + pdev_stats->rx_frame_count = ev->rx_frame_count; + pdev_stats->rx_clear_count = ev->rx_clear_count; + pdev_stats->cycle_count = ev->cycle_count; + pdev_stats->phy_err_count = ev->phy_err_count; + pdev_stats->chan_tx_pwr = ev->chan_tx_pwr; + + extract_pdev_tx_stats(&(pdev_stats->pdev_stats.tx), + &(ev->pdev_stats.tx)); + extract_pdev_rx_stats(&(pdev_stats->pdev_stats.rx), + &(ev->pdev_stats.rx)); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_unit_test_tlv() - extract unit test data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param unit_test: pointer to hold unit test data + * @param maxspace: Amount of space in evt_buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_unit_test_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_unit_test_event *unit_test, uint32_t maxspace) +{ + WMI_UNIT_TEST_EVENTID_param_tlvs *param_buf; + wmi_unit_test_event_fixed_param *ev_param; + uint32_t num_bufp; + uint32_t copy_size; + uint8_t *bufp; + + param_buf = (WMI_UNIT_TEST_EVENTID_param_tlvs *) evt_buf; + ev_param = param_buf->fixed_param; + bufp = param_buf->bufp; + num_bufp = param_buf->num_bufp; + unit_test->vdev_id = ev_param->vdev_id; + unit_test->module_id = ev_param->module_id; + unit_test->diag_token = ev_param->diag_token; + unit_test->flag = ev_param->flag; + unit_test->payload_len = ev_param->payload_len; + wmi_debug("vdev_id:%d mod_id:%d diag_token:%d flag:%d", + ev_param->vdev_id, + ev_param->module_id, + ev_param->diag_token, + ev_param->flag); + wmi_debug("Unit-test data given below %d", num_bufp); + qdf_trace_hex_dump(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + bufp, num_bufp); + copy_size = (num_bufp < maxspace) ? num_bufp : maxspace; + qdf_mem_copy(unit_test->buffer, bufp, copy_size); + unit_test->buffer_len = copy_size; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_ext_stats_tlv() - extract extended pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended pdev stats + * @param pdev_ext_stats: Pointer to hold extended pdev stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_ext_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_pdev_ext_stats *pdev_ext_stats) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_stats_tlv() - extract vdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into vdev stats + * @param vdev_stats: Pointer to hold vdev stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_vdev_stats *vdev_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_vdev_stats) { + wmi_vdev_stats *ev = (wmi_vdev_stats *) ((data) + + ((ev_param->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + (index * sizeof(wmi_vdev_stats))); + + vdev_stats->vdev_id = ev->vdev_id; + vdev_stats->vdev_snr.bcn_snr = ev->vdev_snr.bcn_snr; + vdev_stats->vdev_snr.dat_snr = ev->vdev_snr.dat_snr; + + OS_MEMCPY(vdev_stats->tx_frm_cnt, ev->tx_frm_cnt, + sizeof(ev->tx_frm_cnt)); + vdev_stats->rx_frm_cnt = ev->rx_frm_cnt; + OS_MEMCPY(vdev_stats->multiple_retry_cnt, + ev->multiple_retry_cnt, + sizeof(ev->multiple_retry_cnt)); + OS_MEMCPY(vdev_stats->fail_cnt, ev->fail_cnt, + sizeof(ev->fail_cnt)); + vdev_stats->rts_fail_cnt = ev->rts_fail_cnt; + vdev_stats->rts_succ_cnt = ev->rts_succ_cnt; + vdev_stats->rx_err_cnt = ev->rx_err_cnt; + vdev_stats->rx_discard_cnt = ev->rx_discard_cnt; + vdev_stats->ack_fail_cnt = ev->ack_fail_cnt; + OS_MEMCPY(vdev_stats->tx_rate_history, ev->tx_rate_history, + sizeof(ev->tx_rate_history)); + OS_MEMCPY(vdev_stats->bcn_rssi_history, ev->bcn_rssi_history, + sizeof(ev->bcn_rssi_history)); + + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_per_chain_rssi_stats_tlv() - api to extract rssi stats from event + * buffer + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into vdev stats + * @rssi_stats: Pointer to hold rssi stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_per_chain_rssi_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + struct wmi_host_per_chain_rssi_stats *rssi_stats) +{ + uint8_t *data; + wmi_rssi_stats *fw_rssi_stats; + wmi_per_chain_rssi_stats *rssi_event; + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + + if (!evt_buf) { + WMI_LOGE("evt_buf is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + rssi_event = param_buf->chain_stats; + + if (index >= rssi_event->num_per_chain_rssi_stats) { + WMI_LOGE("invalid index"); + return QDF_STATUS_E_INVAL; + } + + data = ((uint8_t *)(&rssi_event[1])) + WMI_TLV_HDR_SIZE; + fw_rssi_stats = &((wmi_rssi_stats *)data)[index]; + if (fw_rssi_stats->vdev_id >= WLAN_UMAC_PDEV_MAX_VDEVS) + return QDF_STATUS_E_INVAL; + + rssi_stats->vdev_id = fw_rssi_stats->vdev_id; + qdf_mem_copy(rssi_stats->rssi_avg_beacon, + fw_rssi_stats->rssi_avg_beacon, + sizeof(fw_rssi_stats->rssi_avg_beacon)); + qdf_mem_copy(rssi_stats->rssi_avg_data, + fw_rssi_stats->rssi_avg_data, + sizeof(fw_rssi_stats->rssi_avg_data)); + qdf_mem_copy(&rssi_stats->peer_macaddr, + &fw_rssi_stats->peer_macaddr, + sizeof(fw_rssi_stats->peer_macaddr)); + + return QDF_STATUS_SUCCESS; +} + + + +/** + * extract_bcn_stats_tlv() - extract bcn stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into vdev stats + * @param bcn_stats: Pointer to hold bcn stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_bcn_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_bcn_stats *bcn_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_bcn_stats) { + wmi_bcn_stats *ev = (wmi_bcn_stats *) ((data) + + ((ev_param->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + ((ev_param->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + ((ev_param->num_peer_stats) * sizeof(wmi_peer_stats)) + + ((ev_param->num_chan_stats) * sizeof(wmi_chan_stats)) + + ((ev_param->num_mib_stats) * sizeof(wmi_mib_stats)) + + (index * sizeof(wmi_bcn_stats))); + + bcn_stats->vdev_id = ev->vdev_id; + bcn_stats->tx_bcn_succ_cnt = ev->tx_bcn_succ_cnt; + bcn_stats->tx_bcn_outage_cnt = ev->tx_bcn_outage_cnt; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_stats_tlv() - extract peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into peer stats + * @param peer_stats: Pointer to hold peer stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_peer_stats *peer_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_peer_stats) { + wmi_peer_stats *ev = (wmi_peer_stats *) ((data) + + ((ev_param->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + ((ev_param->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + (index * sizeof(wmi_peer_stats))); + + OS_MEMSET(peer_stats, 0, sizeof(wmi_host_peer_stats)); + + OS_MEMCPY(&(peer_stats->peer_macaddr), + &(ev->peer_macaddr), sizeof(wmi_mac_addr)); + + peer_stats->peer_rssi = ev->peer_rssi; + peer_stats->peer_tx_rate = ev->peer_tx_rate; + peer_stats->peer_rx_rate = ev->peer_rx_rate; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_bcnflt_stats_tlv() - extract bcn fault stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into bcn fault stats + * @param bcnflt_stats: Pointer to hold bcn fault stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_bcnflt_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_bcnflt_stats *peer_stats) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_adv_stats_tlv() - extract adv peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended peer stats + * @param peer_adv_stats: Pointer to hold adv peer stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_adv_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_peer_adv_stats + *peer_adv_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_peer_extd2_stats *adv_stats; + int i; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *)evt_buf; + + adv_stats = param_buf->peer_extd2_stats; + if (!adv_stats) { + WMI_LOGD("%s: no peer_adv stats in event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + for (i = 0; i < param_buf->num_peer_extd2_stats; i++) { + WMI_MAC_ADDR_TO_CHAR_ARRAY(&adv_stats[i].peer_macaddr, + peer_adv_stats[i].peer_macaddr); + peer_adv_stats[i].fcs_count = adv_stats[i].rx_fcs_err; + peer_adv_stats[i].rx_bytes = + (uint64_t)adv_stats[i].rx_bytes_u32 << + WMI_LOWER_BITS_SHIFT_32 | + adv_stats[i].rx_bytes_l32; + peer_adv_stats[i].rx_count = adv_stats[i].rx_mpdus; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_extd_stats_tlv() - extract extended peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended peer stats + * @param peer_extd_stats: Pointer to hold extended peer stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_extd_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + wmi_host_peer_extd_stats *peer_extd_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *)evt_buf; + ev_param = (wmi_stats_event_fixed_param *)param_buf->fixed_param; + data = (uint8_t *)param_buf->data; + if (!data) + return QDF_STATUS_E_FAILURE; + + if (index < ev_param->num_peer_extd_stats) { + wmi_peer_extd_stats *ev = (wmi_peer_extd_stats *) (data + + (ev_param->num_pdev_stats * sizeof(wmi_pdev_stats)) + + (ev_param->num_vdev_stats * sizeof(wmi_vdev_stats)) + + (ev_param->num_peer_stats * sizeof(wmi_peer_stats)) + + (ev_param->num_bcnflt_stats * + sizeof(wmi_bcnfilter_stats_t)) + + (ev_param->num_chan_stats * sizeof(wmi_chan_stats)) + + (ev_param->num_mib_stats * sizeof(wmi_mib_stats)) + + (ev_param->num_bcn_stats * sizeof(wmi_bcn_stats)) + + (index * sizeof(wmi_peer_extd_stats))); + + qdf_mem_zero(peer_extd_stats, sizeof(wmi_host_peer_extd_stats)); + qdf_mem_copy(&peer_extd_stats->peer_macaddr, &ev->peer_macaddr, + sizeof(wmi_mac_addr)); + + peer_extd_stats->rx_mc_bc_cnt = ev->rx_mc_bc_cnt; + } + + return QDF_STATUS_SUCCESS; + +} + +/** + * extract_chan_stats_tlv() - extract chan stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into chan stats + * @param vdev_extd_stats: Pointer to hold chan stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_chan_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_chan_stats *chan_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_chan_stats) { + wmi_chan_stats *ev = (wmi_chan_stats *) ((data) + + ((ev_param->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + ((ev_param->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + ((ev_param->num_peer_stats) * sizeof(wmi_peer_stats)) + + (index * sizeof(wmi_chan_stats))); + + + /* Non-TLV doesn't have num_chan_stats */ + chan_stats->chan_mhz = ev->chan_mhz; + chan_stats->sampling_period_us = ev->sampling_period_us; + chan_stats->rx_clear_count = ev->rx_clear_count; + chan_stats->tx_duration_us = ev->tx_duration_us; + chan_stats->rx_duration_us = ev->rx_duration_us; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_MIB_STATS +/** + * extract_mib_stats_tlv() - extract mib stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param mib_stats: pointer to hold mib stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_mib_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct mib_stats_metrics + *mib_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + wmi_mib_stats *ev; + wmi_mib_extd_stats *ev_extd; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *)evt_buf; + ev_param = (wmi_stats_event_fixed_param *)param_buf->fixed_param; + data = (uint8_t *)param_buf->data; + + ev = (wmi_mib_stats *)(data + + ev_param->num_pdev_stats * sizeof(wmi_pdev_stats) + + ev_param->num_vdev_stats * sizeof(wmi_vdev_stats) + + ev_param->num_peer_stats * sizeof(wmi_peer_stats) + + ev_param->num_bcnflt_stats * + sizeof(wmi_bcnfilter_stats_t) + + ev_param->num_chan_stats * sizeof(wmi_chan_stats)); + + qdf_mem_zero(mib_stats, sizeof(*mib_stats)); + + mib_stats->mib_counters.tx_frags = + ev->tx_mpdu_grp_frag_cnt; + mib_stats->mib_counters.group_tx_frames = + ev->tx_msdu_grp_frm_cnt; + mib_stats->mib_counters.failed_cnt = ev->tx_msdu_fail_cnt; + mib_stats->mib_counters.rx_frags = ev->rx_mpdu_frag_cnt; + mib_stats->mib_counters.group_rx_frames = + ev->rx_msdu_grp_frm_cnt; + mib_stats->mib_counters.fcs_error_cnt = + ev->rx_mpdu_fcs_err; + mib_stats->mib_counters.tx_frames = + ev->tx_msdu_frm_cnt; + mib_stats->mib_mac_statistics.retry_cnt = + ev->tx_msdu_retry_cnt; + mib_stats->mib_mac_statistics.frame_dup_cnt = + ev->rx_frm_dup_cnt; + mib_stats->mib_mac_statistics.rts_success_cnt = + ev->tx_rts_success_cnt; + mib_stats->mib_mac_statistics.rts_fail_cnt = + ev->tx_rts_fail_cnt; + + mib_stats->mib_qos_counters.qos_tx_frag_cnt = + ev->tx_Qos_mpdu_grp_frag_cnt; + mib_stats->mib_qos_counters.qos_retry_cnt = + ev->tx_Qos_msdu_retry_UP; + mib_stats->mib_qos_counters.qos_failed_cnt = + ev->tx_Qos_msdu_fail_UP; + mib_stats->mib_qos_counters.qos_frame_dup_cnt = + ev->rx_Qos_frm_dup_cnt_UP; + mib_stats->mib_qos_counters.qos_rts_success_cnt = + ev->tx_Qos_rts_success_cnt_UP; + mib_stats->mib_qos_counters.qos_rts_fail_cnt = + ev->tx_Qos_rts_fail_cnt_UP; + mib_stats->mib_qos_counters.qos_rx_frag_cnt = + ev->rx_Qos_mpdu_frag_cnt_UP; + mib_stats->mib_qos_counters.qos_tx_frame_cnt = + ev->tx_Qos_msdu_frm_cnt_UP; + mib_stats->mib_qos_counters.qos_discarded_frame_cnt = + ev->rx_Qos_msdu_discard_cnt_UP; + mib_stats->mib_qos_counters.qos_mpdu_rx_cnt = + ev->rx_Qos_mpdu_cnt; + mib_stats->mib_qos_counters.qos_retries_rx_cnt = + ev->rx_Qos_mpdu_retryBit_cnt; + + mib_stats->mib_rsna_stats.tkip_icv_err = + ev->rsna_TKIP_icv_err_cnt; + mib_stats->mib_rsna_stats.tkip_replays = + ev->rsna_TKIP_replay_err_cnt; + mib_stats->mib_rsna_stats.ccmp_decrypt_err = + ev->rsna_CCMP_decrypt_err_cnt; + mib_stats->mib_rsna_stats.ccmp_replays = + ev->rsna_CCMP_replay_err_cnt; + + mib_stats->mib_counters_group3.tx_ampdu_cnt = + ev->tx_ampdu_cnt; + mib_stats->mib_counters_group3.tx_mpdus_in_ampdu_cnt = + ev->tx_mpdu_cnt_in_ampdu; + mib_stats->mib_counters_group3.tx_octets_in_ampdu_cnt = + ev->tx_octets_in_ampdu.upload.high; + mib_stats->mib_counters_group3.tx_octets_in_ampdu_cnt = + mib_stats->mib_counters_group3.tx_octets_in_ampdu_cnt << 32; + mib_stats->mib_counters_group3.tx_octets_in_ampdu_cnt += + ev->tx_octets_in_ampdu.upload.low; + + mib_stats->mib_counters_group3.ampdu_rx_cnt = + ev->rx_ampdu_cnt; + mib_stats->mib_counters_group3.mpdu_in_rx_ampdu_cnt = + ev->rx_mpdu_cnt_in_ampdu; + mib_stats->mib_counters_group3.rx_octets_in_ampdu_cnt = + ev->rx_octets_in_ampdu.upload.rx_octets_in_ampdu_high; + mib_stats->mib_counters_group3.rx_octets_in_ampdu_cnt = + mib_stats->mib_counters_group3.rx_octets_in_ampdu_cnt << 32; + mib_stats->mib_counters_group3.rx_octets_in_ampdu_cnt += + ev->rx_octets_in_ampdu.upload.rx_octets_in_ampdu_low; + + if (ev_param->num_mib_extd_stats) { + ev_extd = (wmi_mib_extd_stats *)((uint8_t *)ev + + ev_param->num_mib_stats * sizeof(wmi_mib_stats) + + ev_param->num_bcn_stats * sizeof(wmi_bcn_stats) + + ev_param->num_peer_extd_stats * + sizeof(wmi_peer_extd_stats)); + mib_stats->mib_mac_statistics.multi_retry_cnt = + ev_extd->tx_msdu_multi_retry_cnt; + mib_stats->mib_mac_statistics.tx_ack_fail_cnt = + ev_extd->tx_ack_fail_cnt; + + mib_stats->mib_qos_counters.qos_multi_retry_cnt = + ev_extd->tx_qos_msdu_multi_retry_up; + mib_stats->mib_qos_counters.tx_qos_ack_fail_cnt_up = + ev_extd->tx_qos_ack_fail_cnt_up; + + mib_stats->mib_rsna_stats.cmac_icv_err = + ev_extd->rsna_cmac_icv_err_cnt; + mib_stats->mib_rsna_stats.cmac_replays = + ev_extd->rsna_cmac_replay_err_cnt; + + mib_stats->mib_counters_group3.rx_ampdu_deli_crc_err_cnt = + ev_extd->rx_ampdu_deli_crc_err_cnt; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * extract_profile_ctx_tlv() - extract profile context from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @idx: profile stats index to extract + * @param profile_ctx: Pointer to hold profile context + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_profile_ctx_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_wlan_profile_ctx_t *profile_ctx) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_profile_data_tlv() - extract profile data from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param profile_data: Pointer to hold profile data + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_profile_data_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, wmi_host_wlan_profile_t *profile_data) +{ + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_utf_event_tlv() - extract UTF data info from event + * @wmi_handle: WMI handle + * @param evt_buf: Pointer to event buffer + * @param param: Pointer to hold data + * + * Return : QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_utf_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *event) +{ + WMI_PDEV_UTF_EVENTID_param_tlvs *param_buf; + struct wmi_host_utf_seg_header_info *seg_hdr; + + param_buf = (WMI_PDEV_UTF_EVENTID_param_tlvs *)evt_buf; + event->data = param_buf->data; + event->datalen = param_buf->num_data; + + if (event->datalen < sizeof(struct wmi_host_utf_seg_header_info)) { + WMI_LOGE("%s: Invalid datalen: %d ", __func__, event->datalen); + return QDF_STATUS_E_INVAL; + } + seg_hdr = (struct wmi_host_utf_seg_header_info *)param_buf->data; + /* Set pdev_id=1 until FW adds support to include pdev_id */ + event->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + wmi_handle, + seg_hdr->pdev_id); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_RF_CHARACTERIZATION +static QDF_STATUS extract_num_rf_characterization_entries_tlv(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t *num_rf_characterization_entries) +{ + WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + *num_rf_characterization_entries = + param_buf->num_wmi_chan_rf_characterization_info; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_rf_characterization_entries_tlv(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t num_rf_characterization_entries, + struct wmi_host_rf_characterization_event_param *rf_characterization_entries) +{ + WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID_param_tlvs *param_buf; + WMI_CHAN_RF_CHARACTERIZATION_INFO *wmi_rf_characterization_entry; + uint8_t ix; + + param_buf = (WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + wmi_rf_characterization_entry = + param_buf->wmi_chan_rf_characterization_info; + if (!wmi_rf_characterization_entry) + return QDF_STATUS_E_INVAL; + + /* + * Using num_wmi_chan_rf_characterization instead of param_buf value + * since memory for rf_characterization_entries was allocated using + * the former. + */ + for (ix = 0; ix < num_rf_characterization_entries; ix++) { + rf_characterization_entries[ix].freq = + WMI_CHAN_RF_CHARACTERIZATION_FREQ_GET( + &wmi_rf_characterization_entry[ix]); + + rf_characterization_entries[ix].bw = + WMI_CHAN_RF_CHARACTERIZATION_BW_GET( + &wmi_rf_characterization_entry[ix]); + + rf_characterization_entries[ix].chan_metric = + WMI_CHAN_RF_CHARACTERIZATION_CHAN_METRIC_GET( + &wmi_rf_characterization_entry[ix]); + + wmi_nofl_debug("rf_characterization_entries[%u]: freq: %u, " + "bw: %u, chan_metric: %u", + ix, rf_characterization_entries[ix].freq, + rf_characterization_entries[ix].bw, + rf_characterization_entries[ix].chan_metric); + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * extract_chainmask_tables_tlv() - extract chain mask tables from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_chainmask_tables_tlv(wmi_unified_t wmi_handle, + uint8_t *event, struct wlan_psoc_host_chainmask_table *chainmask_table) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_MAC_PHY_CHAINMASK_CAPABILITY *chainmask_caps; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + uint8_t i = 0, j = 0; + uint32_t num_mac_phy_chainmask_caps = 0; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + hw_caps = param_buf->soc_hw_mode_caps; + if (!hw_caps) + return QDF_STATUS_E_INVAL; + + if ((!hw_caps->num_chainmask_tables) || + (hw_caps->num_chainmask_tables > PSOC_MAX_CHAINMASK_TABLES) || + (hw_caps->num_chainmask_tables > + param_buf->num_mac_phy_chainmask_combo)) + return QDF_STATUS_E_INVAL; + + chainmask_caps = param_buf->mac_phy_chainmask_caps; + + if (!chainmask_caps) + return QDF_STATUS_E_INVAL; + + for (i = 0; i < hw_caps->num_chainmask_tables; i++) { + if (chainmask_table[i].num_valid_chainmasks > + (UINT_MAX - num_mac_phy_chainmask_caps)) { + wmi_err_rl("integer overflow, num_mac_phy_chainmask_caps:%d, i:%d, um_valid_chainmasks:%d", + num_mac_phy_chainmask_caps, i, + chainmask_table[i].num_valid_chainmasks); + return QDF_STATUS_E_INVAL; + } + num_mac_phy_chainmask_caps += + chainmask_table[i].num_valid_chainmasks; + } + + if (num_mac_phy_chainmask_caps > + param_buf->num_mac_phy_chainmask_caps) { + wmi_err_rl("invalid chainmask caps num, num_mac_phy_chainmask_caps:%d, param_buf->num_mac_phy_chainmask_caps:%d", + num_mac_phy_chainmask_caps, + param_buf->num_mac_phy_chainmask_caps); + return QDF_STATUS_E_INVAL; + } + + for (i = 0; i < hw_caps->num_chainmask_tables; i++) { + + wmi_nofl_debug("Dumping chain mask combo data for table : %d", + i); + for (j = 0; j < chainmask_table[i].num_valid_chainmasks; j++) { + + chainmask_table[i].cap_list[j].chainmask = + chainmask_caps->chainmask; + + chainmask_table[i].cap_list[j].supports_chan_width_20 = + WMI_SUPPORT_CHAN_WIDTH_20_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_40 = + WMI_SUPPORT_CHAN_WIDTH_40_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_80 = + WMI_SUPPORT_CHAN_WIDTH_80_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_160 = + WMI_SUPPORT_CHAN_WIDTH_160_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_80P80 = + WMI_SUPPORT_CHAN_WIDTH_80P80_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_2G = + WMI_SUPPORT_CHAIN_MASK_2G_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_5G = + WMI_SUPPORT_CHAIN_MASK_5G_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_tx = + WMI_SUPPORT_CHAIN_MASK_TX_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_rx = + WMI_SUPPORT_CHAIN_MASK_RX_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_aDFS = + WMI_SUPPORT_CHAIN_MASK_ADFS_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_aSpectral = + WMI_SUPPORT_AGILE_SPECTRAL_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_aSpectral_160 = + WMI_SUPPORT_AGILE_SPECTRAL_160_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_aDFS_160 = + WMI_SUPPORT_ADFS_160_GET(chainmask_caps->supported_flags); + + wmi_nofl_debug("supported_flags: 0x%08x chainmasks: 0x%08x", + chainmask_caps->supported_flags, + chainmask_caps->chainmask); + chainmask_caps++; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_service_ready_ext_tlv() - extract basic extended service ready params + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_service_ready_ext_tlv(wmi_unified_t wmi_handle, + uint8_t *event, struct wlan_psoc_host_service_ext_param *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + wmi_service_ready_ext_event_fixed_param *ev; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + WMI_SOC_HAL_REG_CAPABILITIES *reg_caps; + WMI_MAC_PHY_CHAINMASK_COMBO *chain_mask_combo; + uint8_t i = 0; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_INVAL; + + /* Move this to host based bitmap */ + param->default_conc_scan_config_bits = + ev->default_conc_scan_config_bits; + param->default_fw_config_bits = ev->default_fw_config_bits; + param->he_cap_info = ev->he_cap_info; + param->mpdu_density = ev->mpdu_density; + param->max_bssid_rx_filters = ev->max_bssid_rx_filters; + param->fw_build_vers_ext = ev->fw_build_vers_ext; + param->num_dbr_ring_caps = param_buf->num_dma_ring_caps; + param->num_bin_scaling_params = param_buf->num_wmi_bin_scaling_params; + param->max_bssid_indicator = ev->max_bssid_indicator; + qdf_mem_copy(¶m->ppet, &ev->ppet, sizeof(param->ppet)); + + hw_caps = param_buf->soc_hw_mode_caps; + if (hw_caps) + param->num_hw_modes = hw_caps->num_hw_modes; + else + param->num_hw_modes = 0; + + reg_caps = param_buf->soc_hal_reg_caps; + if (reg_caps) + param->num_phy = reg_caps->num_phy; + else + param->num_phy = 0; + + if (hw_caps) { + param->num_chainmask_tables = hw_caps->num_chainmask_tables; + wmi_nofl_debug("Num chain mask tables: %d", + hw_caps->num_chainmask_tables); + } else + param->num_chainmask_tables = 0; + + if (param->num_chainmask_tables > PSOC_MAX_CHAINMASK_TABLES || + param->num_chainmask_tables > + param_buf->num_mac_phy_chainmask_combo) { + wmi_err_rl("num_chainmask_tables is OOB: %u", + param->num_chainmask_tables); + return QDF_STATUS_E_INVAL; + } + chain_mask_combo = param_buf->mac_phy_chainmask_combo; + + if (!chain_mask_combo) + return QDF_STATUS_SUCCESS; + + wmi_nofl_debug("Dumping chain mask combo data"); + + for (i = 0; i < param->num_chainmask_tables; i++) { + + wmi_nofl_debug("table_id : %d Num valid chainmasks: %d", + chain_mask_combo->chainmask_table_id, + chain_mask_combo->num_valid_chainmask); + + param->chainmask_table[i].table_id = + chain_mask_combo->chainmask_table_id; + param->chainmask_table[i].num_valid_chainmasks = + chain_mask_combo->num_valid_chainmask; + chain_mask_combo++; + } + wmi_nofl_debug("chain mask combo end"); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_service_ready_ext2_tlv() - extract service ready ext2 params from + * event + * @wmi_handle: wmi handle + * @event: pointer to event buffer + * @param: Pointer to hold the params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +extract_service_ready_ext2_tlv(wmi_unified_t wmi_handle, uint8_t *event, + struct wlan_psoc_host_service_ext2_param *param) +{ + WMI_SERVICE_READY_EXT2_EVENTID_param_tlvs *param_buf; + wmi_service_ready_ext2_event_fixed_param *ev; + + param_buf = (WMI_SERVICE_READY_EXT2_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_INVAL; + + param->reg_db_version_major = + WMI_REG_DB_VERSION_MAJOR_GET( + ev->reg_db_version); + param->reg_db_version_minor = + WMI_REG_DB_VERSION_MINOR_GET( + ev->reg_db_version); + param->bdf_reg_db_version_major = + WMI_BDF_REG_DB_VERSION_MAJOR_GET( + ev->reg_db_version); + param->bdf_reg_db_version_minor = + WMI_BDF_REG_DB_VERSION_MINOR_GET( + ev->reg_db_version); + + param->num_dbr_ring_caps = param_buf->num_dma_ring_caps; + + if (param_buf->nan_cap) + param->max_ndp_sessions = + param_buf->nan_cap->max_ndp_sessions; + else + param->max_ndp_sessions = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_sar_cap_service_ready_ext_tlv() - + * extract SAR cap from service ready event + * @wmi_handle: wmi handle + * @event: pointer to event buffer + * @ext_param: extended target info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_sar_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, + struct wlan_psoc_host_service_ext_param *ext_param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_SAR_CAPABILITIES *sar_caps; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *)event; + + if (!param_buf) + return QDF_STATUS_E_INVAL; + + sar_caps = param_buf->sar_caps; + if (sar_caps) + ext_param->sar_version = sar_caps->active_version; + else + ext_param->sar_version = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_hw_mode_cap_service_ready_ext_tlv() - + * extract HW mode cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param hw_mode_idx: hw mode idx should be less than num_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_hw_mode_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + hw_caps = param_buf->soc_hw_mode_caps; + if (!hw_caps) + return QDF_STATUS_E_INVAL; + + if (!hw_caps->num_hw_modes || + !param_buf->hw_mode_caps || + hw_caps->num_hw_modes > PSOC_MAX_HW_MODE || + hw_caps->num_hw_modes > param_buf->num_hw_mode_caps) + return QDF_STATUS_E_INVAL; + + if (hw_mode_idx >= hw_caps->num_hw_modes) + return QDF_STATUS_E_INVAL; + + param->hw_mode_id = param_buf->hw_mode_caps[hw_mode_idx].hw_mode_id; + param->phy_id_map = param_buf->hw_mode_caps[hw_mode_idx].phy_id_map; + + param->hw_mode_config_type = + param_buf->hw_mode_caps[hw_mode_idx].hw_mode_config_type; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_mac_phy_cap_service_ready_ext_tlv() - + * extract MAC phy cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param hw_mode_idx: hw mode idx should be less than num_mode + * @param phy_id: phy id within hw_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_mac_phy_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t hw_mode_id, uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_MAC_PHY_CAPABILITIES *mac_phy_caps; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + uint32_t phy_map; + uint8_t hw_idx, phy_idx = 0; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + hw_caps = param_buf->soc_hw_mode_caps; + if (!hw_caps) + return QDF_STATUS_E_INVAL; + if (hw_caps->num_hw_modes > PSOC_MAX_HW_MODE || + hw_caps->num_hw_modes > param_buf->num_hw_mode_caps) { + wmi_err_rl("invalid num_hw_modes %d, num_hw_mode_caps %d", + hw_caps->num_hw_modes, param_buf->num_hw_mode_caps); + return QDF_STATUS_E_INVAL; + } + + for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) { + if (hw_mode_id == param_buf->hw_mode_caps[hw_idx].hw_mode_id) + break; + + phy_map = param_buf->hw_mode_caps[hw_idx].phy_id_map; + while (phy_map) { + phy_map >>= 1; + phy_idx++; + } + } + + if (hw_idx == hw_caps->num_hw_modes) + return QDF_STATUS_E_INVAL; + + phy_idx += phy_id; + if (phy_idx >= param_buf->num_mac_phy_caps) + return QDF_STATUS_E_INVAL; + + mac_phy_caps = ¶m_buf->mac_phy_caps[phy_idx]; + + param->hw_mode_id = mac_phy_caps->hw_mode_id; + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + wmi_handle, + mac_phy_caps->pdev_id); + param->tgt_pdev_id = mac_phy_caps->pdev_id; + param->phy_id = mac_phy_caps->phy_id; + param->supports_11b = + WMI_SUPPORT_11B_GET(mac_phy_caps->supported_flags); + param->supports_11g = + WMI_SUPPORT_11G_GET(mac_phy_caps->supported_flags); + param->supports_11a = + WMI_SUPPORT_11A_GET(mac_phy_caps->supported_flags); + param->supports_11n = + WMI_SUPPORT_11N_GET(mac_phy_caps->supported_flags); + param->supports_11ac = + WMI_SUPPORT_11AC_GET(mac_phy_caps->supported_flags); + param->supports_11ax = + WMI_SUPPORT_11AX_GET(mac_phy_caps->supported_flags); + + param->supported_bands = mac_phy_caps->supported_bands; + param->ampdu_density = mac_phy_caps->ampdu_density; + param->max_bw_supported_2G = mac_phy_caps->max_bw_supported_2G; + param->ht_cap_info_2G = mac_phy_caps->ht_cap_info_2G; + param->vht_cap_info_2G = mac_phy_caps->vht_cap_info_2G; + param->vht_supp_mcs_2G = mac_phy_caps->vht_supp_mcs_2G; + param->he_cap_info_2G[WMI_HOST_HECAP_MAC_WORD1] = + mac_phy_caps->he_cap_info_2G; + param->he_cap_info_2G[WMI_HOST_HECAP_MAC_WORD2] = + mac_phy_caps->he_cap_info_2G_ext; + param->he_supp_mcs_2G = mac_phy_caps->he_supp_mcs_2G; + param->tx_chain_mask_2G = mac_phy_caps->tx_chain_mask_2G; + param->rx_chain_mask_2G = mac_phy_caps->rx_chain_mask_2G; + param->max_bw_supported_5G = mac_phy_caps->max_bw_supported_5G; + param->ht_cap_info_5G = mac_phy_caps->ht_cap_info_5G; + param->vht_cap_info_5G = mac_phy_caps->vht_cap_info_5G; + param->vht_supp_mcs_5G = mac_phy_caps->vht_supp_mcs_5G; + param->he_cap_info_5G[WMI_HOST_HECAP_MAC_WORD1] = + mac_phy_caps->he_cap_info_5G; + param->he_cap_info_5G[WMI_HOST_HECAP_MAC_WORD2] = + mac_phy_caps->he_cap_info_5G_ext; + param->he_supp_mcs_5G = mac_phy_caps->he_supp_mcs_5G; + param->he_cap_info_internal = mac_phy_caps->he_cap_info_internal; + param->tx_chain_mask_5G = mac_phy_caps->tx_chain_mask_5G; + param->rx_chain_mask_5G = mac_phy_caps->rx_chain_mask_5G; + qdf_mem_copy(¶m->he_cap_phy_info_2G, + &mac_phy_caps->he_cap_phy_info_2G, + sizeof(param->he_cap_phy_info_2G)); + qdf_mem_copy(¶m->he_cap_phy_info_5G, + &mac_phy_caps->he_cap_phy_info_5G, + sizeof(param->he_cap_phy_info_5G)); + qdf_mem_copy(¶m->he_ppet2G, &mac_phy_caps->he_ppet2G, + sizeof(param->he_ppet2G)); + qdf_mem_copy(¶m->he_ppet5G, &mac_phy_caps->he_ppet5G, + sizeof(param->he_ppet5G)); + param->chainmask_table_id = mac_phy_caps->chainmask_table_id; + param->lmac_id = mac_phy_caps->lmac_id; + param->reg_cap_ext.wireless_modes = convert_wireless_modes_tlv + (mac_phy_caps->wireless_modes); + param->reg_cap_ext.low_2ghz_chan = mac_phy_caps->low_2ghz_chan_freq; + param->reg_cap_ext.high_2ghz_chan = mac_phy_caps->high_2ghz_chan_freq; + param->reg_cap_ext.low_5ghz_chan = mac_phy_caps->low_5ghz_chan_freq; + param->reg_cap_ext.high_5ghz_chan = mac_phy_caps->high_5ghz_chan_freq; + param->nss_ratio_enabled = WMI_NSS_RATIO_ENABLE_DISABLE_GET( + mac_phy_caps->nss_ratio); + param->nss_ratio_info = WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_reg_cap_service_ready_ext_tlv() - + * extract REG cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param phy_idx: phy idx should be less than num_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_reg_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_SOC_HAL_REG_CAPABILITIES *reg_caps; + WMI_HAL_REG_CAPABILITIES_EXT *ext_reg_cap; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + reg_caps = param_buf->soc_hal_reg_caps; + if (!reg_caps) + return QDF_STATUS_E_INVAL; + + if (reg_caps->num_phy > param_buf->num_hal_reg_caps) + return QDF_STATUS_E_INVAL; + + if (phy_idx >= reg_caps->num_phy) + return QDF_STATUS_E_INVAL; + + if (!param_buf->hal_reg_caps) + return QDF_STATUS_E_INVAL; + + ext_reg_cap = ¶m_buf->hal_reg_caps[phy_idx]; + + param->phy_id = ext_reg_cap->phy_id; + param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain; + param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext; + param->regcap1 = ext_reg_cap->regcap1; + param->regcap2 = ext_reg_cap->regcap2; + param->wireless_modes = convert_wireless_modes_tlv( + ext_reg_cap->wireless_modes); + param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan; + param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan; + param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan; + param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS validate_dbr_ring_caps_idx(uint8_t idx, + uint8_t num_dma_ring_caps) +{ + /* If dma_ring_caps is populated, num_dbr_ring_caps is non-zero */ + if (!num_dma_ring_caps) { + wmi_err("dma_ring_caps %d", num_dma_ring_caps); + return QDF_STATUS_E_INVAL; + } + if (idx >= num_dma_ring_caps) { + WMI_LOGE("%s: Index %d exceeds range", __func__, idx); + return QDF_STATUS_E_INVAL; + } + return QDF_STATUS_SUCCESS; +} + +static void +populate_dbr_ring_cap_elems(wmi_unified_t wmi_handle, + struct wlan_psoc_host_dbr_ring_caps *param, + WMI_DMA_RING_CAPABILITIES *dbr_ring_caps) +{ + param->pdev_id = wmi_handle->ops->convert_target_pdev_id_to_host( + wmi_handle, + dbr_ring_caps->pdev_id); + param->mod_id = dbr_ring_caps->mod_id; + param->ring_elems_min = dbr_ring_caps->ring_elems_min; + param->min_buf_size = dbr_ring_caps->min_buf_size; + param->min_buf_align = dbr_ring_caps->min_buf_align; +} + +static QDF_STATUS extract_dbr_ring_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + QDF_STATUS status; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + status = validate_dbr_ring_caps_idx(idx, param_buf->num_dma_ring_caps); + if (status != QDF_STATUS_SUCCESS) + return status; + + populate_dbr_ring_cap_elems(wmi_handle, param, + ¶m_buf->dma_ring_caps[idx]); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_ring_cap_service_ready_ext2_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param) +{ + WMI_SERVICE_READY_EXT2_EVENTID_param_tlvs *param_buf; + QDF_STATUS status; + + param_buf = (WMI_SERVICE_READY_EXT2_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + status = validate_dbr_ring_caps_idx(idx, param_buf->num_dma_ring_caps); + if (status != QDF_STATUS_SUCCESS) + return status; + + populate_dbr_ring_cap_elems(wmi_handle, param, + ¶m_buf->dma_ring_caps[idx]); + return QDF_STATUS_SUCCESS; +} +/** + * extract_thermal_stats_tlv() - extract thermal stats from event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param temp: Pointer to hold extracted temperature + * @param level: Pointer to hold extracted level + * + * Return: 0 for success or error code + */ +static QDF_STATUS +extract_thermal_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *temp, + uint32_t *level, uint32_t *pdev_id) +{ + WMI_THERM_THROT_STATS_EVENTID_param_tlvs *param_buf; + wmi_therm_throt_stats_event_fixed_param *tt_stats_event; + + param_buf = + (WMI_THERM_THROT_STATS_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + tt_stats_event = param_buf->fixed_param; + + *pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + wmi_handle, + tt_stats_event->pdev_id); + *temp = tt_stats_event->temp; + *level = tt_stats_event->level; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_thermal_level_stats_tlv() - extract thermal level stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to level stats + * @param levelcount: Pointer to hold levelcount + * @param dccount: Pointer to hold dccount + * + * Return: 0 for success or error code + */ +static QDF_STATUS +extract_thermal_level_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, uint32_t *levelcount, + uint32_t *dccount) +{ + WMI_THERM_THROT_STATS_EVENTID_param_tlvs *param_buf; + wmi_therm_throt_level_stats_info *tt_level_info; + + param_buf = + (WMI_THERM_THROT_STATS_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + tt_level_info = param_buf->therm_throt_level_stats_info; + + if (idx < THERMAL_LEVELS) { + *levelcount = tt_level_info[idx].level_count; + *dccount = tt_level_info[idx].dc_count; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} +#ifdef BIG_ENDIAN_HOST +/** + * fips_conv_data_be() - LE to BE conversion of FIPS ev data + * @param data_len - data length + * @param data - pointer to data + * + * Return: QDF_STATUS - success or error status + */ +static QDF_STATUS fips_conv_data_be(uint32_t data_len, uint8_t *data) +{ + uint8_t *data_aligned = NULL; + int c; + unsigned char *data_unaligned; + + data_unaligned = qdf_mem_malloc(((sizeof(uint8_t) * data_len) + + FIPS_ALIGN)); + /* Assigning unaligned space to copy the data */ + /* Checking if kmalloc does successful allocation */ + if (!data_unaligned) + return QDF_STATUS_E_FAILURE; + + /* Checking if space is alligned */ + if (!FIPS_IS_ALIGNED(data_unaligned, FIPS_ALIGN)) { + /* align the data space */ + data_aligned = + (uint8_t *)FIPS_ALIGNTO(data_unaligned, FIPS_ALIGN); + } else { + data_aligned = (u_int8_t *)data_unaligned; + } + + /* memset and copy content from data to data aligned */ + OS_MEMSET(data_aligned, 0, data_len); + OS_MEMCPY(data_aligned, data, data_len); + /* Endianness to LE */ + for (c = 0; c < data_len/4; c++) { + *((u_int32_t *)data_aligned + c) = + qdf_le32_to_cpu(*((u_int32_t *)data_aligned + c)); + } + + /* Copy content to event->data */ + OS_MEMCPY(data, data_aligned, data_len); + + /* clean up allocated space */ + qdf_mem_free(data_unaligned); + data_aligned = NULL; + data_unaligned = NULL; + + /*************************************************************/ + + return QDF_STATUS_SUCCESS; +} +#else +/** + * fips_conv_data_be() - DUMMY for LE platform + * + * Return: QDF_STATUS - success + */ +static QDF_STATUS fips_conv_data_be(uint32_t data_len, uint8_t *data) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** +* send_pdev_get_pn_cmd_tlv() - send get PN request params to fw +* @wmi_handle - wmi handle +* @params - PN request params for peer +* +* Return: QDF_STATUS - success or error status +*/ +static QDF_STATUS +send_pdev_get_pn_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_request_pn_param *params) +{ + wmi_peer_tx_pn_request_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len = sizeof(wmi_peer_tx_pn_request_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_peer_tx_pn_request_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_tx_pn_request_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_peer_tx_pn_request_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->key_type = params->key_type; + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_TX_PN_REQUEST_CMDID)) { + WMI_LOGE("%s:Failed to send WMI command\n", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** +* extract_get_pn_data_tlv() - extract pn resp +* @wmi_handle - wmi handle +* @params - PN response params for peer +* +* Return: QDF_STATUS - success or error status +*/ +static QDF_STATUS +extract_get_pn_data_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_get_pn_event *param) +{ + WMI_PEER_TX_PN_RESPONSE_EVENTID_param_tlvs *param_buf; + wmi_peer_tx_pn_response_event_fixed_param *event = NULL; + + param_buf = (WMI_PEER_TX_PN_RESPONSE_EVENTID_param_tlvs *)evt_buf; + event = + (wmi_peer_tx_pn_response_event_fixed_param *)param_buf->fixed_param; + + param->vdev_id = event->vdev_id; + param->key_type = event->key_type; + qdf_mem_copy(param->pn, event->pn, sizeof(event->pn)); + WMI_MAC_ADDR_TO_CHAR_ARRAY(&event->peer_macaddr, param->mac_addr); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_fips_event_data_tlv() - extract fips event data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: pointer FIPS event params + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_fips_event_data_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_fips_event_param *param) +{ + WMI_PDEV_FIPS_EVENTID_param_tlvs *param_buf; + wmi_pdev_fips_event_fixed_param *event; + + param_buf = (WMI_PDEV_FIPS_EVENTID_param_tlvs *) evt_buf; + event = (wmi_pdev_fips_event_fixed_param *) param_buf->fixed_param; + + if (event->data_len > param_buf->num_data) + return QDF_STATUS_E_FAILURE; + + if (fips_conv_data_be(event->data_len, param_buf->data) != + QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + param->data = (uint32_t *)param_buf->data; + param->data_len = event->data_len; + param->error_status = event->error_status; + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + wmi_handle, + event->pdev_id); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_DISA +/** + * extract_encrypt_decrypt_resp_event_tlv() - extract encrypt decrypt resp + * params from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: Pointer to hold resp parameters + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +extract_encrypt_decrypt_resp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct disa_encrypt_decrypt_resp_params + *resp) +{ + WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param *data_event; + + param_buf = evt_buf; + if (!param_buf) { + WMI_LOGE("encrypt decrypt resp evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + data_event = param_buf->fixed_param; + + resp->vdev_id = data_event->vdev_id; + resp->status = data_event->status; + + if ((data_event->data_length > param_buf->num_enc80211_frame) || + (data_event->data_length > WMI_SVC_MSG_MAX_SIZE - + WMI_TLV_HDR_SIZE - sizeof(*data_event))) { + WMI_LOGE("FW msg data_len %d more than TLV hdr %d", + data_event->data_length, + param_buf->num_enc80211_frame); + return QDF_STATUS_E_INVAL; + } + + resp->data_len = data_event->data_length; + + if (resp->data_len) + resp->data = (uint8_t *)param_buf->enc80211_frame; + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_DISA */ + +static bool is_management_record_tlv(uint32_t cmd_id) +{ + switch (cmd_id) { + case WMI_MGMT_TX_SEND_CMDID: + case WMI_MGMT_TX_COMPLETION_EVENTID: + case WMI_OFFCHAN_DATA_TX_SEND_CMDID: + case WMI_MGMT_RX_EVENTID: + return true; + default: + return false; + } +} + +static bool is_diag_event_tlv(uint32_t event_id) +{ + if (WMI_DIAG_EVENTID == event_id) + return true; + + return false; +} + +static uint16_t wmi_tag_fw_hang_cmd(wmi_unified_t wmi_handle) +{ + uint16_t tag = 0; + + if (qdf_atomic_read(&wmi_handle->is_target_suspended)) { + qdf_nofl_err("%s: Target is already suspended, Ignore FW Hang Command", + __func__); + return tag; + } + + if (wmi_handle->tag_crash_inject) + tag = HTC_TX_PACKET_TAG_AUTO_PM; + + wmi_handle->tag_crash_inject = false; + return tag; +} + +/** + * wmi_set_htc_tx_tag_tlv() - set HTC TX tag for WMI commands + * @wmi_handle: WMI handle + * @buf: WMI buffer + * @cmd_id: WMI command Id + * + * Return htc_tx_tag + */ +static uint16_t wmi_set_htc_tx_tag_tlv(wmi_unified_t wmi_handle, + wmi_buf_t buf, + uint32_t cmd_id) +{ + uint16_t htc_tx_tag = 0; + + switch (cmd_id) { + case WMI_WOW_ENABLE_CMDID: + case WMI_PDEV_SUSPEND_CMDID: + case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: + case WMI_PDEV_RESUME_CMDID: + case WMI_HB_SET_ENABLE_CMDID: + case WMI_WOW_SET_ACTION_WAKE_UP_CMDID: +#ifdef FEATURE_WLAN_D0WOW + case WMI_D0_WOW_ENABLE_DISABLE_CMDID: +#endif + htc_tx_tag = HTC_TX_PACKET_TAG_AUTO_PM; + break; + case WMI_FORCE_FW_HANG_CMDID: + htc_tx_tag = wmi_tag_fw_hang_cmd(wmi_handle); + break; + default: + break; + } + + return htc_tx_tag; +} + +static struct cur_reg_rule +*create_reg_rules_from_wmi(uint32_t num_reg_rules, + wmi_regulatory_rule_struct *wmi_reg_rule) +{ + struct cur_reg_rule *reg_rule_ptr; + uint32_t count; + + reg_rule_ptr = qdf_mem_malloc(num_reg_rules * sizeof(*reg_rule_ptr)); + + if (!reg_rule_ptr) + return NULL; + + for (count = 0; count < num_reg_rules; count++) { + reg_rule_ptr[count].start_freq = + WMI_REG_RULE_START_FREQ_GET( + wmi_reg_rule[count].freq_info); + reg_rule_ptr[count].end_freq = + WMI_REG_RULE_END_FREQ_GET( + wmi_reg_rule[count].freq_info); + reg_rule_ptr[count].max_bw = + WMI_REG_RULE_MAX_BW_GET( + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].reg_power = + WMI_REG_RULE_REG_POWER_GET( + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].ant_gain = + WMI_REG_RULE_ANTENNA_GAIN_GET( + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].flags = + WMI_REG_RULE_FLAGS_GET( + wmi_reg_rule[count].flag_info); + } + + return reg_rule_ptr; +} + +static QDF_STATUS extract_reg_chan_list_update_event_tlv( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct cur_regulatory_info *reg_info, uint32_t len) +{ + WMI_REG_CHAN_LIST_CC_EVENTID_param_tlvs *param_buf; + wmi_reg_chan_list_cc_event_fixed_param *chan_list_event_hdr; + wmi_regulatory_rule_struct *wmi_reg_rule; + uint32_t num_2g_reg_rules, num_5g_reg_rules; + + WMI_LOGD("processing regulatory channel list"); + + param_buf = (WMI_REG_CHAN_LIST_CC_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("invalid channel list event buf"); + return QDF_STATUS_E_FAILURE; + } + + chan_list_event_hdr = param_buf->fixed_param; + + reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules; + reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules; + num_2g_reg_rules = reg_info->num_2g_reg_rules; + num_5g_reg_rules = reg_info->num_5g_reg_rules; + if ((num_2g_reg_rules > MAX_REG_RULES) || + (num_5g_reg_rules > MAX_REG_RULES) || + (num_2g_reg_rules + num_5g_reg_rules > MAX_REG_RULES) || + (num_2g_reg_rules + num_5g_reg_rules != + param_buf->num_reg_rule_array)) { + wmi_err_rl("Invalid num_2g_reg_rules: %u, num_5g_reg_rules: %u", + num_2g_reg_rules, num_5g_reg_rules); + return QDF_STATUS_E_FAILURE; + } + if (param_buf->num_reg_rule_array > + (WMI_SVC_MSG_MAX_SIZE - sizeof(*chan_list_event_hdr)) / + sizeof(*wmi_reg_rule)) { + wmi_err_rl("Invalid num_reg_rule_array: %u", + param_buf->num_reg_rule_array); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(reg_info->alpha2, &(chan_list_event_hdr->alpha2), + REG_ALPHA2_LEN); + reg_info->dfs_region = chan_list_event_hdr->dfs_region; + reg_info->phybitmap = chan_list_event_hdr->phybitmap; + reg_info->offload_enabled = true; + reg_info->num_phy = chan_list_event_hdr->num_phy; + reg_info->phy_id = wmi_handle->ops->convert_phy_id_target_to_host( + wmi_handle, chan_list_event_hdr->phy_id); + reg_info->ctry_code = chan_list_event_hdr->country_id; + reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; + if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS) + reg_info->status_code = REG_SET_CC_STATUS_PASS; + else if (chan_list_event_hdr->status_code == + WMI_REG_CURRENT_ALPHA2_NOT_FOUND) + reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; + else if (chan_list_event_hdr->status_code == + WMI_REG_INIT_ALPHA2_NOT_FOUND) + reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; + else if (chan_list_event_hdr->status_code == + WMI_REG_SET_CC_CHANGE_NOT_ALLOWED) + reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; + else if (chan_list_event_hdr->status_code == + WMI_REG_SET_CC_STATUS_NO_MEMORY) + reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; + else if (chan_list_event_hdr->status_code == + WMI_REG_SET_CC_STATUS_FAIL) + reg_info->status_code = REG_SET_CC_STATUS_FAIL; + + reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g; + reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g; + reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g; + reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g; + + WMI_LOGD(FL("num_phys = %u and phy_id = %u"), + reg_info->num_phy, reg_info->phy_id); + + WMI_LOGD("%s:cc %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", + __func__, reg_info->alpha2, reg_info->dfs_region, + reg_info->min_bw_2g, reg_info->max_bw_2g, + reg_info->min_bw_5g, reg_info->max_bw_5g); + + WMI_LOGD("%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__, + num_2g_reg_rules, num_5g_reg_rules); + wmi_reg_rule = + (wmi_regulatory_rule_struct *)((uint8_t *)chan_list_event_hdr + + sizeof(wmi_reg_chan_list_cc_event_fixed_param) + + WMI_TLV_HDR_SIZE); + reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules, + wmi_reg_rule); + wmi_reg_rule += num_2g_reg_rules; + + reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules, + wmi_reg_rule); + + WMI_LOGD("processed regulatory channel list"); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_reg_11d_new_country_event_tlv( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_country, uint32_t len) +{ + wmi_11d_new_country_event_fixed_param *reg_11d_country_event; + WMI_11D_NEW_COUNTRY_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_11D_NEW_COUNTRY_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("invalid 11d country event buf"); + return QDF_STATUS_E_FAILURE; + } + + reg_11d_country_event = param_buf->fixed_param; + + qdf_mem_copy(reg_11d_country->alpha2, + ®_11d_country_event->new_alpha2, REG_ALPHA2_LEN); + reg_11d_country->alpha2[REG_ALPHA2_LEN] = '\0'; + + WMI_LOGD("processed 11d country event, new cc %s", + reg_11d_country->alpha2); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_reg_ch_avoid_event_tlv( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_ind, uint32_t len) +{ + wmi_avoid_freq_ranges_event_fixed_param *afr_fixed_param; + wmi_avoid_freq_range_desc *afr_desc; + uint32_t num_freq_ranges, freq_range_idx; + WMI_WLAN_FREQ_AVOID_EVENTID_param_tlvs *param_buf = + (WMI_WLAN_FREQ_AVOID_EVENTID_param_tlvs *) evt_buf; + + if (!param_buf) { + WMI_LOGE("Invalid channel avoid event buffer"); + return QDF_STATUS_E_INVAL; + } + + afr_fixed_param = param_buf->fixed_param; + if (!afr_fixed_param) { + WMI_LOGE("Invalid channel avoid event fixed param buffer"); + return QDF_STATUS_E_INVAL; + } + + if (!ch_avoid_ind) { + WMI_LOGE("Invalid channel avoid indication buffer"); + return QDF_STATUS_E_INVAL; + } + if (param_buf->num_avd_freq_range < afr_fixed_param->num_freq_ranges) { + WMI_LOGE(FL("no.of freq ranges exceeded the limit")); + return QDF_STATUS_E_INVAL; + } + num_freq_ranges = (afr_fixed_param->num_freq_ranges > + CH_AVOID_MAX_RANGE) ? CH_AVOID_MAX_RANGE : + afr_fixed_param->num_freq_ranges; + + WMI_LOGD("Channel avoid event received with %d ranges", + num_freq_ranges); + + ch_avoid_ind->ch_avoid_range_cnt = num_freq_ranges; + afr_desc = (wmi_avoid_freq_range_desc *)(param_buf->avd_freq_range); + for (freq_range_idx = 0; freq_range_idx < num_freq_ranges; + freq_range_idx++) { + ch_avoid_ind->avoid_freq_range[freq_range_idx].start_freq = + afr_desc->start_freq; + ch_avoid_ind->avoid_freq_range[freq_range_idx].end_freq = + afr_desc->end_freq; + WMI_LOGD("range %d tlv id %u, start freq %u, end freq %u", + freq_range_idx, afr_desc->tlv_header, + afr_desc->start_freq, afr_desc->end_freq); + afr_desc++; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef DFS_COMPONENT_ENABLE +/** + * extract_dfs_cac_complete_event_tlv() - extract cac complete event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @vdev_id: vdev id + * @len: length of buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dfs_cac_complete_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len) +{ + WMI_VDEV_DFS_CAC_COMPLETE_EVENTID_param_tlvs *param_tlvs; + wmi_vdev_dfs_cac_complete_event_fixed_param *cac_event; + + param_tlvs = (WMI_VDEV_DFS_CAC_COMPLETE_EVENTID_param_tlvs *) evt_buf; + if (!param_tlvs) { + WMI_LOGE("invalid cac complete event buf"); + return QDF_STATUS_E_FAILURE; + } + + cac_event = param_tlvs->fixed_param; + *vdev_id = cac_event->vdev_id; + WMI_LOGD("processed cac complete event vdev %d", *vdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dfs_ocac_complete_event_tlv() - extract cac complete event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @vdev_id: vdev id + * @len: length of buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS +extract_dfs_ocac_complete_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct vdev_adfs_complete_status *param) +{ + WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID_param_tlvs *param_tlvs; + wmi_vdev_adfs_ocac_complete_event_fixed_param *ocac_complete_status; + + param_tlvs = (WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_tlvs) { + WMI_LOGE("invalid ocac complete event buf"); + return QDF_STATUS_E_FAILURE; + } + + if (!param_tlvs->fixed_param) { + WMI_LOGE("invalid param_tlvs->fixed_param"); + return QDF_STATUS_E_FAILURE; + } + + ocac_complete_status = param_tlvs->fixed_param; + param->vdev_id = ocac_complete_status->vdev_id; + param->chan_freq = ocac_complete_status->chan_freq; + param->center_freq = ocac_complete_status->center_freq; + param->ocac_status = ocac_complete_status->status; + param->chan_width = ocac_complete_status->chan_width; + WMI_LOGD("processed ocac complete event vdev %d agile chan %d", + param->vdev_id, param->center_freq); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dfs_radar_detection_event_tlv() - extract radar found event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @radar_found: radar found event info + * @len: length of buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dfs_radar_detection_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len) +{ + WMI_PDEV_DFS_RADAR_DETECTION_EVENTID_param_tlvs *param_tlv; + wmi_pdev_dfs_radar_detection_event_fixed_param *radar_event; + + param_tlv = (WMI_PDEV_DFS_RADAR_DETECTION_EVENTID_param_tlvs *) evt_buf; + if (!param_tlv) { + WMI_LOGE("invalid radar detection event buf"); + return QDF_STATUS_E_FAILURE; + } + + radar_event = param_tlv->fixed_param; + + radar_found->pdev_id = convert_target_pdev_id_to_host_pdev_id( + wmi_handle, + radar_event->pdev_id); + + if (radar_found->pdev_id == WMI_HOST_PDEV_ID_INVALID) + return QDF_STATUS_E_FAILURE; + + radar_found->detection_mode = radar_event->detection_mode; + radar_found->chan_freq = radar_event->chan_freq; + radar_found->chan_width = radar_event->chan_width; + radar_found->detector_id = radar_event->detector_id; + radar_found->segment_id = radar_event->segment_id; + radar_found->timestamp = radar_event->timestamp; + radar_found->is_chirp = radar_event->is_chirp; + radar_found->freq_offset = radar_event->freq_offset; + radar_found->sidx = radar_event->sidx; + + wmi_info("processed radar found event pdev %d," + "Radar Event Info:pdev_id %d,timestamp %d,chan_freq (dur) %d," + "chan_width (RSSI) %d,detector_id (false_radar) %d," + "freq_offset (radar_check) %d,segment_id %d,sidx %d," + "is_chirp %d,detection mode %d", + radar_event->pdev_id, radar_found->pdev_id, + radar_event->timestamp, radar_event->chan_freq, + radar_event->chan_width, radar_event->detector_id, + radar_event->freq_offset, radar_event->segment_id, + radar_event->sidx, radar_event->is_chirp, + radar_event->detection_mode); + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * extract_wlan_radar_event_info_tlv() - extract radar pulse event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @wlan_radar_event: Pointer to struct radar_event_info + * @len: length of buffer + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_wlan_radar_event_info_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len) +{ + WMI_DFS_RADAR_EVENTID_param_tlvs *param_tlv; + wmi_dfs_radar_event_fixed_param *radar_event; + + param_tlv = (WMI_DFS_RADAR_EVENTID_param_tlvs *)evt_buf; + if (!param_tlv) { + WMI_LOGE("invalid wlan radar event buf"); + return QDF_STATUS_E_FAILURE; + } + + radar_event = param_tlv->fixed_param; + wlan_radar_event->pulse_is_chirp = radar_event->pulse_is_chirp; + wlan_radar_event->pulse_center_freq = radar_event->pulse_center_freq; + wlan_radar_event->pulse_duration = radar_event->pulse_duration; + wlan_radar_event->rssi = radar_event->rssi; + wlan_radar_event->pulse_detect_ts = radar_event->pulse_detect_ts; + wlan_radar_event->upload_fullts_high = radar_event->upload_fullts_high; + wlan_radar_event->upload_fullts_low = radar_event->upload_fullts_low; + wlan_radar_event->peak_sidx = radar_event->peak_sidx; + wlan_radar_event->delta_peak = radar_event->pulse_delta_peak; + wlan_radar_event->delta_diff = radar_event->pulse_delta_diff; + if (radar_event->pulse_flags & + WMI_DFS_RADAR_PULSE_FLAG_MASK_PSIDX_DIFF_VALID) { + wlan_radar_event->is_psidx_diff_valid = true; + wlan_radar_event->psidx_diff = radar_event->psidx_diff; + } else { + wlan_radar_event->is_psidx_diff_valid = false; + } + + wlan_radar_event->pdev_id = radar_event->pdev_id; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS extract_wlan_radar_event_info_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif + +/** + * send_get_rcpi_cmd_tlv() - send request for rcpi value + * @wmi_handle: wmi handle + * @get_rcpi_param: rcpi params + * + * Return: QDF status + */ +static QDF_STATUS send_get_rcpi_cmd_tlv(wmi_unified_t wmi_handle, + struct rcpi_req *get_rcpi_param) +{ + wmi_buf_t buf; + wmi_request_rcpi_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_rcpi_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_request_rcpi_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_rcpi_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_rcpi_cmd_fixed_param)); + + cmd->vdev_id = get_rcpi_param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(get_rcpi_param->mac_addr, + &cmd->peer_macaddr); + + switch (get_rcpi_param->measurement_type) { + + case RCPI_MEASUREMENT_TYPE_AVG_MGMT: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT; + break; + + case RCPI_MEASUREMENT_TYPE_AVG_DATA: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_AVG_DATA; + break; + + case RCPI_MEASUREMENT_TYPE_LAST_MGMT: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_LAST_MGMT; + break; + + case RCPI_MEASUREMENT_TYPE_LAST_DATA: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_LAST_DATA; + break; + + default: + /* + * invalid rcpi measurement type, fall back to + * RCPI_MEASUREMENT_TYPE_AVG_MGMT + */ + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT; + break; + } + WMI_LOGD("RCPI REQ VDEV_ID:%d-->", cmd->vdev_id); + wmi_mtrace(WMI_REQUEST_RCPI_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_RCPI_CMDID)) { + + WMI_LOGE("%s: Failed to send WMI_REQUEST_RCPI_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_rcpi_response_event_tlv() - Extract RCPI event params + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @res: pointer to hold rcpi response from firmware + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +static QDF_STATUS +extract_rcpi_response_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct rcpi_res *res) +{ + WMI_UPDATE_RCPI_EVENTID_param_tlvs *param_buf; + wmi_update_rcpi_event_fixed_param *event; + + param_buf = (WMI_UPDATE_RCPI_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE(FL("Invalid rcpi event")); + return QDF_STATUS_E_INVAL; + } + + event = param_buf->fixed_param; + res->vdev_id = event->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&event->peer_macaddr, res->mac_addr); + + switch (event->measurement_type) { + + case WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT: + res->measurement_type = RCPI_MEASUREMENT_TYPE_AVG_MGMT; + break; + + case WMI_RCPI_MEASUREMENT_TYPE_AVG_DATA: + res->measurement_type = RCPI_MEASUREMENT_TYPE_AVG_DATA; + break; + + case WMI_RCPI_MEASUREMENT_TYPE_LAST_MGMT: + res->measurement_type = RCPI_MEASUREMENT_TYPE_LAST_MGMT; + break; + + case WMI_RCPI_MEASUREMENT_TYPE_LAST_DATA: + res->measurement_type = RCPI_MEASUREMENT_TYPE_LAST_DATA; + break; + + default: + WMI_LOGE(FL("Invalid rcpi measurement type from firmware")); + res->measurement_type = RCPI_MEASUREMENT_TYPE_INVALID; + return QDF_STATUS_E_FAILURE; + } + + if (event->status) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} + +/** + * convert_host_pdev_id_to_target_pdev_id_legacy() - Convert pdev_id from + * host to target defines. For legacy there is not conversion + * required. Just return pdev_id as it is. + * @param pdev_id: host pdev_id to be converted. + * Return: target pdev_id after conversion. + */ +static uint32_t convert_host_pdev_id_to_target_pdev_id_legacy( + wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + if (pdev_id == WMI_HOST_PDEV_ID_SOC) + return WMI_PDEV_ID_SOC; + + /*No conversion required*/ + return pdev_id; +} + +/** + * convert_target_pdev_id_to_host_pdev_id_legacy() - Convert pdev_id from + * target to host defines. For legacy there is not conversion + * required. Just return pdev_id as it is. + * @param pdev_id: target pdev_id to be converted. + * Return: host pdev_id after conversion. + */ +static uint32_t convert_target_pdev_id_to_host_pdev_id_legacy( + wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + /*No conversion required*/ + return pdev_id; +} + +/** + * convert_host_phy_id_to_target_phy_id_legacy() - Convert phy_id from + * host to target defines. For legacy there is not conversion + * required. Just return phy_id as it is. + * @param pdev_id: host phy_id to be converted. + * Return: target phy_id after conversion. + */ +static uint32_t convert_host_phy_id_to_target_phy_id_legacy( + wmi_unified_t wmi_handle, + uint32_t phy_id) +{ + /*No conversion required*/ + return phy_id; +} + +/** + * convert_target_phy_id_to_host_phy_id_legacy() - Convert phy_id from + * target to host defines. For legacy there is not conversion + * required. Just return phy_id as it is. + * @param pdev_id: target phy_id to be converted. + * Return: host phy_id after conversion. + */ +static uint32_t convert_target_phy_id_to_host_phy_id_legacy( + wmi_unified_t wmi_handle, + uint32_t phy_id) +{ + /*No conversion required*/ + return phy_id; +} + +/** + * send_set_country_cmd_tlv() - WMI scan channel list function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan channel list parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_set_country_cmd_tlv(wmi_unified_t wmi_handle, + struct set_country *params) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_set_current_country_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + uint8_t pdev_id = params->pdev_id; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + cmd = (wmi_set_current_country_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_current_country_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_current_country_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_host_pdev_id_to_target( + wmi_handle, + pdev_id); + WMI_LOGD("setting current country to %s and target pdev_id = %u", + params->country, cmd->pdev_id); + + qdf_mem_copy((uint8_t *)&cmd->new_alpha2, params->country, 3); + + wmi_mtrace(WMI_SET_CURRENT_COUNTRY_CMDID, NO_SESSION, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, + buf, len, WMI_SET_CURRENT_COUNTRY_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SET_CURRENT_COUNTRY_CMDID"); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} + +#define WMI_REG_COUNTRY_ALPHA_SET(alpha, val0, val1, val2) do { \ + WMI_SET_BITS(alpha, 0, 8, val0); \ + WMI_SET_BITS(alpha, 8, 8, val1); \ + WMI_SET_BITS(alpha, 16, 8, val2); \ + } while (0) + +static QDF_STATUS send_user_country_code_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t pdev_id, struct cc_regdmn_s *rd) +{ + wmi_set_init_country_cmd_fixed_param *cmd; + uint16_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(wmi_set_init_country_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_set_init_country_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_init_country_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_init_country_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + pdev_id); + + if (rd->flags == CC_IS_SET) { + cmd->countrycode_type = WMI_COUNTRYCODE_COUNTRY_ID; + cmd->country_code.country_id = rd->cc.country_code; + } else if (rd->flags == ALPHA_IS_SET) { + cmd->countrycode_type = WMI_COUNTRYCODE_ALPHA2; + WMI_REG_COUNTRY_ALPHA_SET(cmd->country_code.alpha2, + rd->cc.alpha[0], + rd->cc.alpha[1], + rd->cc.alpha[2]); + } else if (rd->flags == REGDMN_IS_SET) { + cmd->countrycode_type = WMI_COUNTRYCODE_DOMAIN_CODE; + cmd->country_code.domain_code = rd->cc.regdmn_id; + } + + wmi_mtrace(WMI_SET_INIT_COUNTRY_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SET_INIT_COUNTRY_CMDID); + if (ret) { + WMI_LOGE("Failed to config wow wakeup event"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_obss_detection_cfg_cmd_tlv() - send obss detection + * configurations to firmware. + * @wmi_handle: wmi handle + * @obss_cfg_param: obss detection configurations + * + * Send WMI_SAP_OBSS_DETECTION_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_obss_detection_cfg_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_obss_detection_cfg_param *obss_cfg_param) +{ + wmi_buf_t buf; + wmi_sap_obss_detection_cfg_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_sap_obss_detection_cfg_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_sap_obss_detection_cfg_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sap_obss_detection_cfg_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sap_obss_detection_cfg_cmd_fixed_param)); + + cmd->vdev_id = obss_cfg_param->vdev_id; + cmd->detect_period_ms = obss_cfg_param->obss_detect_period_ms; + cmd->b_ap_detect_mode = obss_cfg_param->obss_11b_ap_detect_mode; + cmd->b_sta_detect_mode = obss_cfg_param->obss_11b_sta_detect_mode; + cmd->g_ap_detect_mode = obss_cfg_param->obss_11g_ap_detect_mode; + cmd->a_detect_mode = obss_cfg_param->obss_11a_detect_mode; + cmd->ht_legacy_detect_mode = obss_cfg_param->obss_ht_legacy_detect_mode; + cmd->ht_mixed_detect_mode = obss_cfg_param->obss_ht_mixed_detect_mode; + cmd->ht_20mhz_detect_mode = obss_cfg_param->obss_ht_20mhz_detect_mode; + + wmi_mtrace(WMI_SAP_OBSS_DETECTION_CFG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SAP_OBSS_DETECTION_CFG_CMDID)) { + WMI_LOGE("Failed to send WMI_SAP_OBSS_DETECTION_CFG_CMDID"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_obss_detection_info_tlv() - Extract obss detection info + * received from firmware. + * @evt_buf: pointer to event buffer + * @obss_detection: Pointer to hold obss detection info + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_obss_detection_info_tlv(uint8_t *evt_buf, + struct wmi_obss_detect_info + *obss_detection) +{ + WMI_SAP_OBSS_DETECTION_REPORT_EVENTID_param_tlvs *param_buf; + wmi_sap_obss_detection_info_evt_fixed_param *fix_param; + + if (!obss_detection) { + WMI_LOGE("%s: Invalid obss_detection event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_SAP_OBSS_DETECTION_REPORT_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid evt_buf", __func__); + return QDF_STATUS_E_INVAL; + } + + fix_param = param_buf->fixed_param; + obss_detection->vdev_id = fix_param->vdev_id; + obss_detection->matched_detection_masks = + fix_param->matched_detection_masks; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fix_param->matched_bssid_addr, + &obss_detection->matched_bssid_addr[0]); + switch (fix_param->reason) { + case WMI_SAP_OBSS_DETECTION_EVENT_REASON_NOT_SUPPORT: + obss_detection->reason = OBSS_OFFLOAD_DETECTION_DISABLED; + break; + case WMI_SAP_OBSS_DETECTION_EVENT_REASON_PRESENT_NOTIFY: + obss_detection->reason = OBSS_OFFLOAD_DETECTION_PRESENT; + break; + case WMI_SAP_OBSS_DETECTION_EVENT_REASON_ABSENT_TIMEOUT: + obss_detection->reason = OBSS_OFFLOAD_DETECTION_ABSENT; + break; + default: + WMI_LOGE("%s: Invalid reason %d", __func__, fix_param->reason); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_stats_cmd_tlv() - Send roam scan stats req command to fw + * @wmi_handle: wmi handle + * @params: pointer to request structure + * + * Return: QDF_STATUS + */ +static QDF_STATUS +send_roam_scan_stats_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_roam_scan_stats_req *params) +{ + wmi_buf_t buf; + wmi_request_roam_scan_stats_cmd_fixed_param *cmd; + WMITLV_TAG_ID tag; + uint32_t size; + uint32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_request_roam_scan_stats_cmd_fixed_param *)wmi_buf_data(buf); + + tag = WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param; + size = WMITLV_GET_STRUCT_TLVLEN( + wmi_request_roam_scan_stats_cmd_fixed_param); + WMITLV_SET_HDR(&cmd->tlv_header, tag, size); + + cmd->vdev_id = params->vdev_id; + + WMI_LOGD(FL("Roam Scan Stats Req vdev_id: %u"), cmd->vdev_id); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_ROAM_SCAN_STATS_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_REQUEST_ROAM_SCAN_STATS_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_ch_list_req_cmd_tlv() - send wmi cmd to get roam scan + * channel list from firmware + * @wmi_handle: wmi handler + * @vdev_id: vdev id + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_roam_scan_ch_list_req_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id) +{ + wmi_buf_t buf; + wmi_roam_get_scan_channel_list_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + int ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_roam_get_scan_channel_list_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_get_scan_channel_list_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_get_scan_channel_list_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_ROAM_GET_SCAN_CHANNEL_LIST_CMDID, vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_GET_SCAN_CHANNEL_LIST_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send get roam scan channels request = %d", + ret); + wmi_buf_free(buf); + } + return ret; +} + +/** + * extract_roam_scan_stats_res_evt_tlv() - Extract roam scan stats event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_id: output pointer to hold vdev id + * @res_param: output pointer to hold the allocated response + * + * Return: QDF_STATUS + */ +static QDF_STATUS +extract_roam_scan_stats_res_evt_tlv(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param) +{ + WMI_ROAM_SCAN_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_scan_stats_event_fixed_param *fixed_param; + uint32_t *client_id = NULL; + wmi_roaming_timestamp *timestamp = NULL; + uint32_t *num_channels = NULL; + uint32_t *chan_info = NULL; + wmi_mac_addr *old_bssid = NULL; + uint32_t *is_roaming_success = NULL; + wmi_mac_addr *new_bssid = NULL; + uint32_t *num_roam_candidates = NULL; + wmi_roam_scan_trigger_reason *roam_reason = NULL; + wmi_mac_addr *bssid = NULL; + uint32_t *score = NULL; + uint32_t *channel = NULL; + uint32_t *rssi = NULL; + int chan_idx = 0, cand_idx = 0; + uint32_t total_len; + struct wmi_roam_scan_stats_res *res; + uint32_t i, j; + uint32_t num_scans, scan_param_size; + + *res_param = NULL; + *vdev_id = 0xFF; /* Initialize to invalid vdev id */ + param_buf = (WMI_ROAM_SCAN_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE(FL("Invalid roam scan stats event")); + return QDF_STATUS_E_INVAL; + } + + fixed_param = param_buf->fixed_param; + + num_scans = fixed_param->num_roam_scans; + scan_param_size = sizeof(struct wmi_roam_scan_stats_params); + *vdev_id = fixed_param->vdev_id; + if (num_scans > WMI_ROAM_SCAN_STATS_MAX) { + wmi_err_rl("%u exceeded maximum roam scan stats: %u", + num_scans, WMI_ROAM_SCAN_STATS_MAX); + return QDF_STATUS_E_INVAL; + } + + total_len = sizeof(*res) + num_scans * scan_param_size; + + res = qdf_mem_malloc(total_len); + if (!res) + return QDF_STATUS_E_NOMEM; + + if (!num_scans) { + *res_param = res; + return QDF_STATUS_SUCCESS; + } + + if (param_buf->client_id && + param_buf->num_client_id == num_scans) + client_id = param_buf->client_id; + + if (param_buf->timestamp && + param_buf->num_timestamp == num_scans) + timestamp = param_buf->timestamp; + + if (param_buf->old_bssid && + param_buf->num_old_bssid == num_scans) + old_bssid = param_buf->old_bssid; + + if (param_buf->new_bssid && + param_buf->num_new_bssid == num_scans) + new_bssid = param_buf->new_bssid; + + if (param_buf->is_roaming_success && + param_buf->num_is_roaming_success == num_scans) + is_roaming_success = param_buf->is_roaming_success; + + if (param_buf->roam_reason && + param_buf->num_roam_reason == num_scans) + roam_reason = param_buf->roam_reason; + + if (param_buf->num_channels && + param_buf->num_num_channels == num_scans) { + uint32_t count, chan_info_sum = 0; + + num_channels = param_buf->num_channels; + for (count = 0; count < param_buf->num_num_channels; count++) { + if (param_buf->num_channels[count] > + WMI_ROAM_SCAN_STATS_CHANNELS_MAX) { + wmi_err_rl("%u exceeded max scan channels %u", + param_buf->num_channels[count], + WMI_ROAM_SCAN_STATS_CHANNELS_MAX); + goto error; + } + chan_info_sum += param_buf->num_channels[count]; + } + + if (param_buf->chan_info && + param_buf->num_chan_info == chan_info_sum) + chan_info = param_buf->chan_info; + } + + if (param_buf->num_roam_candidates && + param_buf->num_num_roam_candidates == num_scans) { + uint32_t cnt, roam_cand_sum = 0; + + num_roam_candidates = param_buf->num_roam_candidates; + for (cnt = 0; cnt < param_buf->num_num_roam_candidates; cnt++) { + if (param_buf->num_roam_candidates[cnt] > + WMI_ROAM_SCAN_STATS_CANDIDATES_MAX) { + wmi_err_rl("%u exceeded max scan cand %u", + param_buf->num_roam_candidates[cnt], + WMI_ROAM_SCAN_STATS_CANDIDATES_MAX); + goto error; + } + roam_cand_sum += param_buf->num_roam_candidates[cnt]; + } + + if (param_buf->bssid && + param_buf->num_bssid == roam_cand_sum) + bssid = param_buf->bssid; + + if (param_buf->score && + param_buf->num_score == roam_cand_sum) + score = param_buf->score; + + if (param_buf->channel && + param_buf->num_channel == roam_cand_sum) + channel = param_buf->channel; + + if (param_buf->rssi && + param_buf->num_rssi == roam_cand_sum) + rssi = param_buf->rssi; + } + + res->num_roam_scans = num_scans; + for (i = 0; i < num_scans; i++) { + struct wmi_roam_scan_stats_params *roam = &res->roam_scan[i]; + + if (timestamp) + roam->time_stamp = timestamp[i].lower32bit | + (timestamp[i].upper32bit << 31); + + if (client_id) + roam->client_id = client_id[i]; + + if (num_channels) { + roam->num_scan_chans = num_channels[i]; + if (chan_info) { + for (j = 0; j < num_channels[i]; j++) + roam->scan_freqs[j] = + chan_info[chan_idx++]; + } + } + + if (is_roaming_success) + roam->is_roam_successful = is_roaming_success[i]; + + if (roam_reason) { + roam->trigger_id = roam_reason[i].trigger_id; + roam->trigger_value = roam_reason[i].trigger_value; + } + + if (num_roam_candidates) { + roam->num_roam_candidates = num_roam_candidates[i]; + + for (j = 0; j < num_roam_candidates[i]; j++) { + if (score) + roam->cand[j].score = score[cand_idx]; + if (rssi) + roam->cand[j].rssi = rssi[cand_idx]; + if (channel) + roam->cand[j].freq = + channel[cand_idx]; + + if (bssid) + WMI_MAC_ADDR_TO_CHAR_ARRAY( + &bssid[cand_idx], + roam->cand[j].bssid); + + cand_idx++; + } + } + + if (old_bssid) + WMI_MAC_ADDR_TO_CHAR_ARRAY(&old_bssid[i], + roam->old_bssid); + + if (new_bssid) + WMI_MAC_ADDR_TO_CHAR_ARRAY(&new_bssid[i], + roam->new_bssid); + } + + *res_param = res; + + return QDF_STATUS_SUCCESS; +error: + qdf_mem_free(res); + return QDF_STATUS_E_FAILURE; +} + +/** + * extract_offload_bcn_tx_status_evt() - Extract beacon-tx status event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_id: output pointer to hold vdev id + * @tx_status: output pointer to hold the tx_status + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_offload_bcn_tx_status_evt(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *vdev_id, + uint32_t *tx_status) { + WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *param_buf; + wmi_offload_bcn_tx_status_event_fixed_param *bcn_tx_status_event; + + param_buf = (WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid offload bcn tx status event buffer"); + return QDF_STATUS_E_INVAL; + } + + bcn_tx_status_event = param_buf->fixed_param; + *vdev_id = bcn_tx_status_event->vdev_id; + *tx_status = bcn_tx_status_event->tx_status; + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS extract_green_ap_egap_status_info_tlv( + uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params) +{ + WMI_AP_PS_EGAP_INFO_EVENTID_param_tlvs *param_buf; + wmi_ap_ps_egap_info_event_fixed_param *egap_info_event; + wmi_ap_ps_egap_info_chainmask_list *chainmask_event; + + param_buf = (WMI_AP_PS_EGAP_INFO_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid EGAP Info status event buffer"); + return QDF_STATUS_E_INVAL; + } + + egap_info_event = (wmi_ap_ps_egap_info_event_fixed_param *) + param_buf->fixed_param; + chainmask_event = (wmi_ap_ps_egap_info_chainmask_list *) + param_buf->chainmask_list; + + if (!egap_info_event || !chainmask_event) { + WMI_LOGE("Invalid EGAP Info event or chainmask event"); + return QDF_STATUS_E_INVAL; + } + + egap_status_info_params->status = egap_info_event->status; + egap_status_info_params->mac_id = chainmask_event->mac_id; + egap_status_info_params->tx_chainmask = chainmask_event->tx_chainmask; + egap_status_info_params->rx_chainmask = chainmask_event->rx_chainmask; + + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * extract_comb_phyerr_tlv() - extract comb phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: data length of event buffer + * @buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_comb_phyerr_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t datalen, + uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + WMI_PHYERR_EVENTID_param_tlvs *param_tlvs; + wmi_comb_phyerr_rx_hdr *pe_hdr; + + param_tlvs = (WMI_PHYERR_EVENTID_param_tlvs *)evt_buf; + if (!param_tlvs) { + WMI_LOGD("%s: Received null data from FW", __func__); + return QDF_STATUS_E_FAILURE; + } + + pe_hdr = param_tlvs->hdr; + if (!pe_hdr) { + WMI_LOGD("%s: Received Data PE Header is NULL", __func__); + return QDF_STATUS_E_FAILURE; + } + + /* Ensure it's at least the size of the header */ + if (datalen < sizeof(*pe_hdr)) { + WMI_LOGD("%s: Expected minimum size %zu, received %d", + __func__, sizeof(*pe_hdr), datalen); + return QDF_STATUS_E_FAILURE; + } + + phyerr->pdev_id = wmi_handle->ops-> + convert_pdev_id_target_to_host(wmi_handle, pe_hdr->pdev_id); + phyerr->tsf64 = pe_hdr->tsf_l32; + phyerr->tsf64 |= (((uint64_t)pe_hdr->tsf_u32) << 32); + phyerr->bufp = param_tlvs->bufp; + + if (pe_hdr->buf_len > param_tlvs->num_bufp) { + WMI_LOGD("Invalid buf_len %d, num_bufp %d", + pe_hdr->buf_len, param_tlvs->num_bufp); + return QDF_STATUS_E_FAILURE; + } + + phyerr->buf_len = pe_hdr->buf_len; + phyerr->phy_err_mask0 = pe_hdr->rsPhyErrMask0; + phyerr->phy_err_mask1 = pe_hdr->rsPhyErrMask1; + *buf_offset = sizeof(*pe_hdr) + sizeof(uint32_t); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_single_phyerr_tlv() - extract single phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: data length of event buffer + * @buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_single_phyerr_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t datalen, + uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + wmi_single_phyerr_rx_event *ev; + uint16_t n = *buf_offset; + uint8_t *data = (uint8_t *)evt_buf; + + if (n < datalen) { + if ((datalen - n) < sizeof(ev->hdr)) { + WMI_LOGD("%s: Not enough space. len=%d, n=%d, hdr=%zu", + __func__, datalen, n, sizeof(ev->hdr)); + return QDF_STATUS_E_FAILURE; + } + + /* + * Obtain a pointer to the beginning of the current event. + * data[0] is the beginning of the WMI payload. + */ + ev = (wmi_single_phyerr_rx_event *)&data[n]; + + /* + * Sanity check the buffer length of the event against + * what we currently have. + * + * Since buf_len is 32 bits, we check if it overflows + * a large 32 bit value. It's not 0x7fffffff because + * we increase n by (buf_len + sizeof(hdr)), which would + * in itself cause n to overflow. + * + * If "int" is 64 bits then this becomes a moot point. + */ + if (ev->hdr.buf_len > PHYERROR_MAX_BUFFER_LENGTH) { + WMI_LOGD("%s: buf_len is garbage 0x%x", + __func__, ev->hdr.buf_len); + return QDF_STATUS_E_FAILURE; + } + + if ((n + ev->hdr.buf_len) > datalen) { + WMI_LOGD("%s: len exceeds n=%d, buf_len=%d, datalen=%d", + __func__, n, ev->hdr.buf_len, datalen); + return QDF_STATUS_E_FAILURE; + } + + phyerr->phy_err_code = WMI_UNIFIED_PHYERRCODE_GET(&ev->hdr); + phyerr->tsf_timestamp = ev->hdr.tsf_timestamp; + phyerr->bufp = &ev->bufp[0]; + phyerr->buf_len = ev->hdr.buf_len; + phyerr->rf_info.rssi_comb = WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr); + + /* + * Advance the buffer pointer to the next PHY error. + * buflen is the length of this payload, so we need to + * advance past the current header _AND_ the payload. + */ + n += sizeof(*ev) + ev->hdr.buf_len; + } + *buf_offset = n; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_esp_estimation_ev_param_tlv() - extract air time from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold esp event + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_INVAL on failure + */ +static QDF_STATUS +extract_esp_estimation_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct esp_estimation_event *param) +{ + WMI_ESP_ESTIMATE_EVENTID_param_tlvs *param_buf; + wmi_esp_estimate_event_fixed_param *esp_event; + + param_buf = (WMI_ESP_ESTIMATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid ESP Estimate Event buffer"); + return QDF_STATUS_E_INVAL; + } + esp_event = param_buf->fixed_param; + param->ac_airtime_percentage = esp_event->ac_airtime_percentage; + + param->pdev_id = convert_target_pdev_id_to_host_pdev_id( + wmi_handle, + esp_event->pdev_id); + + if (param->pdev_id == WMI_HOST_PDEV_ID_INVALID) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +/* + * send_bss_color_change_enable_cmd_tlv() - Send command to enable or disable of + * updating bss color change within firmware when AP announces bss color change. + * @wmi_handle: wmi handle + * @vdev_id: vdev ID + * @enable: enable bss color change within firmware + * + * Send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_bss_color_change_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable) +{ + wmi_buf_t buf; + wmi_bss_color_change_enable_fixed_param *cmd; + uint8_t len = sizeof(wmi_bss_color_change_enable_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_bss_color_change_enable_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bss_color_change_enable_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bss_color_change_enable_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->enable = enable; + wmi_mtrace(WMI_BSS_COLOR_CHANGE_ENABLE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_BSS_COLOR_CHANGE_ENABLE_CMDID)) { + WMI_LOGE("Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_obss_color_collision_cfg_cmd_tlv() - send bss color detection + * configurations to firmware. + * @wmi_handle: wmi handle + * @cfg_param: obss detection configurations + * + * Send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_obss_color_collision_cfg_cmd_tlv( + wmi_unified_t wmi_handle, + struct wmi_obss_color_collision_cfg_param *cfg_param) +{ + wmi_buf_t buf; + wmi_obss_color_collision_det_config_fixed_param *cmd; + uint8_t len = sizeof(wmi_obss_color_collision_det_config_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_obss_color_collision_det_config_fixed_param *)wmi_buf_data( + buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_obss_color_collision_det_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_obss_color_collision_det_config_fixed_param)); + cmd->vdev_id = cfg_param->vdev_id; + cmd->flags = cfg_param->flags; + cmd->current_bss_color = cfg_param->current_bss_color; + cmd->detection_period_ms = cfg_param->detection_period_ms; + cmd->scan_period_ms = cfg_param->scan_period_ms; + cmd->free_slot_expiry_time_ms = cfg_param->free_slot_expiry_time_ms; + + switch (cfg_param->evt_type) { + case OBSS_COLOR_COLLISION_DETECTION_DISABLE: + cmd->evt_type = WMI_BSS_COLOR_COLLISION_DISABLE; + break; + case OBSS_COLOR_COLLISION_DETECTION: + cmd->evt_type = WMI_BSS_COLOR_COLLISION_DETECTION; + break; + case OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + cmd->evt_type = WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY; + break; + case OBSS_COLOR_FREE_SLOT_AVAILABLE: + cmd->evt_type = WMI_BSS_COLOR_FREE_SLOT_AVAILABLE; + break; + default: + WMI_LOGE("%s: invalid event type: %d", + __func__, cfg_param->evt_type); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("%s: evt_type: %d vdev id: %d current_bss_color: %d\n" + "detection_period_ms: %d scan_period_ms: %d\n" + "free_slot_expiry_timer_ms: %d", + __func__, cmd->evt_type, cmd->vdev_id, cmd->current_bss_color, + cmd->detection_period_ms, cmd->scan_period_ms, + cmd->free_slot_expiry_time_ms); + + wmi_mtrace(WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID)) { + WMI_LOGE("%s: Sending OBSS color det cmd failed, vdev_id: %d", + __func__, cfg_param->vdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_obss_color_collision_info_tlv() - Extract bss color collision info + * received from firmware. + * @evt_buf: pointer to event buffer + * @info: Pointer to hold bss collision info + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_obss_color_collision_info_tlv(uint8_t *evt_buf, + struct wmi_obss_color_collision_info *info) +{ + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID_param_tlvs *param_buf; + wmi_obss_color_collision_evt_fixed_param *fix_param; + + if (!info) { + WMI_LOGE("%s: Invalid obss color buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid evt_buf", __func__); + return QDF_STATUS_E_INVAL; + } + + fix_param = param_buf->fixed_param; + info->vdev_id = fix_param->vdev_id; + info->obss_color_bitmap_bit0to31 = + fix_param->bss_color_bitmap_bit0to31; + info->obss_color_bitmap_bit32to63 = + fix_param->bss_color_bitmap_bit32to63; + + switch (fix_param->evt_type) { + case WMI_BSS_COLOR_COLLISION_DISABLE: + info->evt_type = OBSS_COLOR_COLLISION_DETECTION_DISABLE; + break; + case WMI_BSS_COLOR_COLLISION_DETECTION: + info->evt_type = OBSS_COLOR_COLLISION_DETECTION; + break; + case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + info->evt_type = OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY; + break; + case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: + info->evt_type = OBSS_COLOR_FREE_SLOT_AVAILABLE; + break; + default: + WMI_LOGE("%s: invalid event type: %d, vdev_id: %d", + __func__, fix_param->evt_type, fix_param->vdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static void wmi_11ax_bss_color_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_obss_color_collision_cfg_cmd = + send_obss_color_collision_cfg_cmd_tlv; + ops->extract_obss_color_collision_info = + extract_obss_color_collision_info_tlv; +} + +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) +static QDF_STATUS +send_vdev_fils_enable_cmd_send(struct wmi_unified *wmi_handle, + struct config_fils_params *param) +{ + wmi_buf_t buf; + wmi_enable_fils_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_enable_fils_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_enable_fils_cmd_fixed_param *)wmi_buf_data( + buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_enable_fils_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_enable_fils_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->fd_period = param->fd_period; + if (param->send_prb_rsp_frame) + cmd->flags |= WMI_FILS_FLAGS_BITMAP_BCAST_PROBE_RSP; + WMI_LOGD("%s: vdev id: %d fd_period: %d cmd->Flags %d", + __func__, cmd->vdev_id, cmd->fd_period, cmd->flags); + wmi_mtrace(WMI_ENABLE_FILS_CMDID, cmd->vdev_id, cmd->fd_period); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ENABLE_FILS_CMDID)) { + WMI_LOGE("%s: Sending FILS cmd failed, vdev_id: %d", + __func__, param->vdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_MWS_INFO_DEBUGFS +/** + * send_mws_coex_status_req_cmd_tlv() - send coex cmd to fw + * + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @cmd_id: Coex command id + * + * Send WMI_VDEV_GET_MWS_COEX_INFO_CMDID to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_mws_coex_status_req_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t cmd_id) +{ + wmi_buf_t buf; + wmi_vdev_get_mws_coex_info_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + int ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_get_mws_coex_info_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_get_mws_coex_info_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_get_mws_coex_info_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->cmd_id = cmd_id; + wmi_mtrace(WMI_VDEV_GET_MWS_COEX_INFO_CMDID, vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_GET_MWS_COEX_INFO_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + return ret; +} +#endif + +#ifdef WIFI_POS_CONVERGED +/** + * extract_oem_response_param_tlv() - Extract oem response params + * @wmi_handle: wmi handle + * @resp_buf: response buffer + * @oem_resp_param: pointer to hold oem response params + * + * Return: QDF_STATUS_SUCCESS on success or proper error code. + */ +static QDF_STATUS +extract_oem_response_param_tlv(wmi_unified_t wmi_handle, void *resp_buf, + struct wmi_oem_response_param *oem_resp_param) +{ + uint64_t temp_addr; + WMI_OEM_RESPONSE_EVENTID_param_tlvs *param_buf = + (WMI_OEM_RESPONSE_EVENTID_param_tlvs *)resp_buf; + + if (!param_buf) { + WMI_LOGE("Invalid OEM response"); + return QDF_STATUS_E_INVAL; + } + + if (param_buf->num_data) { + oem_resp_param->num_data1 = param_buf->num_data; + oem_resp_param->data_1 = param_buf->data; + } + + if (param_buf->num_data2) { + oem_resp_param->num_data2 = param_buf->num_data2; + oem_resp_param->data_2 = param_buf->data2; + } + + if (param_buf->indirect_data) { + oem_resp_param->indirect_data.pdev_id = + param_buf->indirect_data->pdev_id; + temp_addr = (param_buf->indirect_data->addr_hi) & 0xf; + oem_resp_param->indirect_data.addr = + param_buf->indirect_data->addr_lo + + ((uint64_t)temp_addr << 32); + oem_resp_param->indirect_data.len = + param_buf->indirect_data->len; + } + + return QDF_STATUS_SUCCESS; +} +#endif /* WIFI_POS_CONVERGED */ + +/** + * extract_hw_mode_resp_event_status_tlv() - Extract HW mode change status + * @wmi_handle: wmi handle + * @event_buf: pointer to event buffer + * @cmd_status: status of HW mode change command + * + * Return QDF_STATUS_SUCCESS on success or proper error code. + */ +static QDF_STATUS +extract_hw_mode_resp_event_status_tlv(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *cmd_status) +{ + WMI_PDEV_SET_HW_MODE_RESP_EVENTID_param_tlvs *param_buf; + wmi_pdev_set_hw_mode_response_event_fixed_param *fixed_param; + + param_buf = (WMI_PDEV_SET_HW_MODE_RESP_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid mode change event buffer"); + return QDF_STATUS_E_INVAL; + } + + fixed_param = param_buf->fixed_param; + if (!fixed_param) { + WMI_LOGE("Invalid fixed param"); + return QDF_STATUS_E_INVAL; + } + + *cmd_status = fixed_param->status; + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_ANI_LEVEL_REQUEST +static QDF_STATUS send_ani_level_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t *freqs, + uint8_t num_freqs) +{ + wmi_buf_t buf; + wmi_get_channel_ani_cmd_fixed_param *cmd; + QDF_STATUS ret; + uint32_t len; + A_UINT32 *chan_list; + uint8_t i, *buf_ptr; + + len = sizeof(wmi_get_channel_ani_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + num_freqs * sizeof(A_UINT32); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_FAILURE; + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_get_channel_ani_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_get_channel_ani_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_get_channel_ani_cmd_fixed_param)); + + buf_ptr += sizeof(wmi_get_channel_ani_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (num_freqs * sizeof(A_UINT32))); + + chan_list = (A_UINT32 *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < num_freqs; i++) { + chan_list[i] = freqs[i]; + WMI_LOGD("Requesting ANI for channel[%d]", chan_list[i]); + } + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_GET_CHANNEL_ANI_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("WMI_GET_CHANNEL_ANI_CMDID send error %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +static QDF_STATUS extract_ani_level_tlv(uint8_t *evt_buf, + struct wmi_host_ani_level_event **info, + uint32_t *num_freqs) +{ + WMI_GET_CHANNEL_ANI_EVENTID_param_tlvs *param_buf; + wmi_get_channel_ani_event_fixed_param *fixed_param; + wmi_channel_ani_info_tlv_param *tlv_params; + uint8_t *buf_ptr, i; + + param_buf = (WMI_GET_CHANNEL_ANI_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + wmi_err("Invalid ani level event buffer"); + return QDF_STATUS_E_INVAL; + } + + fixed_param = + (wmi_get_channel_ani_event_fixed_param *)param_buf->fixed_param; + if (!fixed_param) { + wmi_err("Invalid fixed param"); + return QDF_STATUS_E_INVAL; + } + + buf_ptr = (uint8_t *)fixed_param; + buf_ptr += sizeof(wmi_get_channel_ani_event_fixed_param); + buf_ptr += WMI_TLV_HDR_SIZE; + + *num_freqs = param_buf->num_ani_info; + if (*num_freqs > MAX_NUM_FREQS_FOR_ANI_LEVEL) { + wmi_err("Invalid number of freqs received"); + return QDF_STATUS_E_INVAL; + } + + *info = qdf_mem_malloc(*num_freqs * + sizeof(struct wmi_host_ani_level_event)); + if (!(*info)) + return QDF_STATUS_E_NOMEM; + + tlv_params = (wmi_channel_ani_info_tlv_param *)buf_ptr; + for (i = 0; i < param_buf->num_ani_info; i++) { + (*info)[i].ani_level = tlv_params->ani_level; + (*info)[i].chan_freq = tlv_params->chan_freq; + tlv_params++; + } + + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_ANI_LEVEL_REQUEST */ + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +/** + * extract_roam_trigger_stats_tlv() - Extract the Roam trigger stats + * from the WMI_ROAM_STATS_EVENTID + * @wmi_handle: wmi handle + * @evt_buf: Pointer to the event buffer + * @trig: Pointer to destination structure to fill data + * @idx: TLV id + */ +static QDF_STATUS +extract_roam_trigger_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_trigger_info *trig, uint8_t idx) +{ + WMI_ROAM_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_trigger_reason *src_data = NULL; + + param_buf = (WMI_ROAM_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf || !param_buf->roam_trigger_reason) + return QDF_STATUS_E_FAILURE; + + src_data = ¶m_buf->roam_trigger_reason[idx]; + + trig->present = true; + trig->trigger_reason = src_data->trigger_reason; + trig->trigger_sub_reason = src_data->trigger_sub_reason; + trig->current_rssi = src_data->current_rssi; + trig->timestamp = src_data->timestamp; + + switch (trig->trigger_reason) { + case WMI_ROAM_TRIGGER_REASON_PER: + case WMI_ROAM_TRIGGER_REASON_BMISS: + case WMI_ROAM_TRIGGER_REASON_HIGH_RSSI: + case WMI_ROAM_TRIGGER_REASON_MAWC: + case WMI_ROAM_TRIGGER_REASON_DENSE: + case WMI_ROAM_TRIGGER_REASON_BACKGROUND: + case WMI_ROAM_TRIGGER_REASON_IDLE: + case WMI_ROAM_TRIGGER_REASON_FORCED: + case WMI_ROAM_TRIGGER_REASON_UNIT_TEST: + return QDF_STATUS_SUCCESS; + + case WMI_ROAM_TRIGGER_REASON_BTM: + trig->btm_trig_data.btm_request_mode = + src_data->btm_request_mode; + trig->btm_trig_data.disassoc_timer = + src_data->disassoc_imminent_timer; + trig->btm_trig_data.validity_interval = + src_data->validity_internal; + trig->btm_trig_data.candidate_list_count = + src_data->candidate_list_count; + trig->btm_trig_data.btm_resp_status = + src_data->btm_response_status_code; + trig->btm_trig_data.btm_bss_termination_timeout = + src_data->btm_bss_termination_timeout; + trig->btm_trig_data.btm_mbo_assoc_retry_timeout = + src_data->btm_mbo_assoc_retry_timeout; + return QDF_STATUS_SUCCESS; + + case WMI_ROAM_TRIGGER_REASON_BSS_LOAD: + trig->cu_trig_data.cu_load = src_data->cu_load; + return QDF_STATUS_SUCCESS; + + case WMI_ROAM_TRIGGER_REASON_DEAUTH: + trig->deauth_trig_data.type = src_data->deauth_type; + trig->deauth_trig_data.reason = src_data->deauth_reason; + return QDF_STATUS_SUCCESS; + + case WMI_ROAM_TRIGGER_REASON_PERIODIC: + case WMI_ROAM_TRIGGER_REASON_LOW_RSSI: + trig->rssi_trig_data.threshold = src_data->roam_rssi_threshold; + return QDF_STATUS_SUCCESS; + + default: + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_roam_scan_ap_stats_tlv() - Extract the Roam trigger stats + * from the WMI_ROAM_STATS_EVENTID + * @wmi_handle: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @ap_idx: TLV index for this roam scan + * @num_cand: number of candidates list in the roam scan + */ +static QDF_STATUS +extract_roam_scan_ap_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_candidate_info *dst, + uint8_t ap_idx, uint16_t num_cand) +{ + WMI_ROAM_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_ap_info *src = NULL; + uint8_t i; + + param_buf = (WMI_ROAM_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + wmi_err("Param buf is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (ap_idx >= param_buf->num_roam_ap_info) { + wmi_err("Invalid roam scan AP tlv ap_idx:%d total_ap:%d", + ap_idx, param_buf->num_roam_ap_info); + return QDF_STATUS_E_FAILURE; + } + + src = ¶m_buf->roam_ap_info[ap_idx]; + + for (i = 0; i < num_cand; i++) { + WMI_MAC_ADDR_TO_CHAR_ARRAY(&src->bssid, dst->bssid.bytes); + dst->type = src->candidate_type; + dst->freq = src->channel; + dst->etp = src->etp; + dst->rssi = src->rssi; + dst->rssi_score = src->rssi_score; + dst->cu_load = src->cu_load; + dst->cu_score = src->cu_score; + dst->total_score = src->total_score; + dst->timestamp = src->timestamp; + dst->bl_reason = src->bl_reason; + dst->bl_source = src->bl_source; + dst->bl_timestamp = src->bl_timestamp; + dst->bl_original_timeout = src->bl_original_timeout; + + src++; + dst++; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_roam_scan_stats_tlv() - Extract the Roam trigger stats + * from the WMI_ROAM_STATS_EVENTID + * @wmi_handle: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + * @chan_idx: Index of the channel tlv for the current roam trigger + * @ap_idx: Index of the candidate AP TLV for the current roam trigger + */ +static QDF_STATUS +extract_roam_scan_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_scan_data *dst, uint8_t idx, + uint8_t chan_idx, uint8_t ap_idx) +{ + WMI_ROAM_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_scan_info *src_data = NULL; + wmi_roam_scan_channel_info *src_chan = NULL; + QDF_STATUS status; + uint8_t i; + + param_buf = (WMI_ROAM_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf || !param_buf->roam_scan_info || + idx >= param_buf->num_roam_scan_info) + return QDF_STATUS_E_FAILURE; + + src_data = ¶m_buf->roam_scan_info[idx]; + + dst->present = true; + dst->type = src_data->roam_scan_type; + dst->num_chan = src_data->roam_scan_channel_count; + dst->next_rssi_threshold = src_data->next_rssi_trigger_threshold; + + /* Read the channel data only for dst->type is 0 (partial scan) */ + if (dst->num_chan && !dst->type && param_buf->num_roam_scan_chan_info && + chan_idx < param_buf->num_roam_scan_chan_info) { + if (dst->num_chan > MAX_ROAM_SCAN_CHAN) + dst->num_chan = MAX_ROAM_SCAN_CHAN; + + src_chan = ¶m_buf->roam_scan_chan_info[chan_idx]; + for (i = 0; i < dst->num_chan; i++) { + dst->chan_freq[i] = src_chan->channel; + src_chan++; + } + } + + if (!src_data->roam_ap_count || !param_buf->num_roam_ap_info) + return QDF_STATUS_SUCCESS; + + dst->num_ap = src_data->roam_ap_count; + if (dst->num_ap > MAX_ROAM_CANDIDATE_AP) + dst->num_ap = MAX_ROAM_CANDIDATE_AP; + + status = extract_roam_scan_ap_stats_tlv(wmi_handle, evt_buf, dst->ap, + ap_idx, dst->num_ap); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Extract candidate stats for tlv[%d] failed", idx); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_roam_scan_stats_tlv() - Extract the Roam trigger stats + * from the WMI_ROAM_STATS_EVENTID + * @wmi_handle: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + */ +static QDF_STATUS +extract_roam_result_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_result *dst, uint8_t idx) +{ + WMI_ROAM_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_result *src_data = NULL; + + param_buf = (WMI_ROAM_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf || !param_buf->roam_result || + idx >= param_buf->num_roam_result) + return QDF_STATUS_E_FAILURE; + + src_data = ¶m_buf->roam_result[idx]; + + dst->present = true; + dst->status = src_data->roam_status ? false : true; + dst->timestamp = src_data->timestamp; + dst->fail_reason = src_data->roam_fail_reason; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_roam_11kv_stats_tlv() - Extract the Roam trigger stats + * from the WMI_ROAM_STATS_EVENTID + * @wmi_handle: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + * @rpt_idx: Neighbor report Channel index + */ +static QDF_STATUS +extract_roam_11kv_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_neighbor_report_data *dst, + uint8_t idx, uint8_t rpt_idx) +{ + WMI_ROAM_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_neighbor_report_info *src_data = NULL; + wmi_roam_neighbor_report_channel_info *src_freq = NULL; + uint8_t i; + + param_buf = (WMI_ROAM_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf || !param_buf->roam_neighbor_report_info || + !param_buf->num_roam_neighbor_report_info || + idx >= param_buf->num_roam_neighbor_report_info) { + WMI_LOGD("%s: Invalid 1kv param buf", __func__); + return QDF_STATUS_E_FAILURE; + } + + src_data = ¶m_buf->roam_neighbor_report_info[idx]; + + dst->present = true; + dst->req_type = src_data->request_type; + dst->num_freq = src_data->neighbor_report_channel_count; + dst->req_time = src_data->neighbor_report_request_timestamp; + dst->resp_time = src_data->neighbor_report_response_timestamp; + + if (!dst->num_freq || !param_buf->num_roam_neighbor_report_chan_info || + rpt_idx >= param_buf->num_roam_neighbor_report_chan_info) + return QDF_STATUS_SUCCESS; + + if (!param_buf->roam_neighbor_report_chan_info) { + WMI_LOGD("%s: 11kv channel present, but TLV is NULL num_freq:%d", + __func__, dst->num_freq); + dst->num_freq = 0; + /* return success as its optional tlv and we can print neighbor + * report received info + */ + return QDF_STATUS_SUCCESS; + } + + src_freq = ¶m_buf->roam_neighbor_report_chan_info[rpt_idx]; + + if (dst->num_freq > MAX_ROAM_SCAN_CHAN) + dst->num_freq = MAX_ROAM_SCAN_CHAN; + + for (i = 0; i < dst->num_freq; i++) { + dst->freq[i] = src_freq->channel; + src_freq++; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_roam_msg_info_tlv() - Extract the roam message info + * from the WMI_ROAM_STATS_EVENTID + * @wmi_handle: wmi handle + * @evt_buf: Pointer to the event buffer + * @dst: Pointer to destination structure to fill data + * @idx: TLV id + */ +static QDF_STATUS +extract_roam_msg_info_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_msg_info *dst, uint8_t idx) +{ + WMI_ROAM_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_msg_info *src_data = NULL; + + param_buf = (WMI_ROAM_STATS_EVENTID_param_tlvs *)evt_buf; + + if (!param_buf || !param_buf->roam_msg_info || + !param_buf->num_roam_msg_info || + idx >= param_buf->num_roam_msg_info) { + wmi_debug("Empty roam_msg_info param buf"); + return QDF_STATUS_SUCCESS; + } + + src_data = ¶m_buf->roam_msg_info[idx]; + + dst->present = true; + dst->timestamp = src_data->timestamp; + dst->msg_id = src_data->msg_id; + dst->msg_param1 = src_data->msg_param1; + dst->msg_param2 = src_data->msg_param2; + + return QDF_STATUS_SUCCESS; +} + +#else +static inline QDF_STATUS +extract_roam_trigger_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_trigger_info *trig, uint8_t idx) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS +extract_roam_result_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_result *dst, uint8_t idx) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static QDF_STATUS +extract_roam_11kv_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_neighbor_report_data *dst, + uint8_t idx, uint8_t rpt_idx) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static QDF_STATUS +extract_roam_scan_stats_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_scan_data *dst, uint8_t idx, + uint8_t chan_idx, uint8_t ap_idx) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS +extract_roam_msg_info_tlv(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_roam_msg_info *dst, uint8_t idx) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#endif + +#ifdef FEATURE_WLAN_TIME_SYNC_FTM +/** + * send_wlan_ts_ftm_trigger_cmd_tlv(): send wlan time sync cmd to FW + * + * @wmi: wmi handle + * @vdev_id: vdev id + * @burst_mode: Indicates whether relation derived using FTM is needed for + * each FTM frame or only aggregated result is required. + * + * Send WMI_AUDIO_SYNC_TRIGGER_CMDID to FW. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_wlan_ts_ftm_trigger_cmd_tlv(wmi_unified_t wmi, + uint32_t vdev_id, + bool burst_mode) +{ + wmi_audio_sync_trigger_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_audio_sync_trigger_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_audio_sync_trigger_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_audio_sync_trigger_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->agg_relation = burst_mode ? false : true; + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID)) { + WMI_LOGE("%s: failed to send audio sync trigger cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS send_wlan_ts_qtime_cmd_tlv(wmi_unified_t wmi, + uint32_t vdev_id, + uint64_t lpass_ts) +{ + wmi_audio_sync_qtimer_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_audio_sync_qtimer_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_audio_sync_qtimer_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_audio_sync_qtimer_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->qtimer_u32 = (uint32_t)((lpass_ts & 0xffffffff00000000LL) >> 32); + cmd->qtimer_l32 = (uint32_t)(lpass_ts & 0xffffffffLL); + + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID)) { + WMI_LOGP("%s: Failed to send audio qtime command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_time_sync_ftm_start_stop_event_tlv( + wmi_unified_t wmi, void *buf, + struct ftm_time_sync_start_stop_params *param) +{ + WMI_VDEV_AUDIO_SYNC_START_STOP_EVENTID_param_tlvs *param_buf; + wmi_audio_sync_start_stop_event_fixed_param *resp_event; + + param_buf = (WMI_VDEV_AUDIO_SYNC_START_STOP_EVENTID_param_tlvs *)buf; + if (!param_buf) { + WMI_LOGE("Invalid audio sync start stop event buffer"); + return QDF_STATUS_E_FAILURE; + } + + resp_event = param_buf->fixed_param; + if (!resp_event) { + WMI_LOGE("Invalid audio sync start stop fixed param buffer"); + return QDF_STATUS_E_FAILURE; + } + + param->vdev_id = resp_event->vdev_id; + param->timer_interval = resp_event->periodicity; + param->num_reads = resp_event->reads_needed; + param->qtime = ((uint64_t)resp_event->qtimer_u32 << 32) | + resp_event->qtimer_l32; + param->mac_time = ((uint64_t)resp_event->mac_timer_u32 << 32) | + resp_event->mac_timer_l32; + + wmi_debug("FTM time sync time_interval %d, num_reads %d", + param->timer_interval, param->num_reads); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +extract_time_sync_ftm_offset_event_tlv(wmi_unified_t wmi, void *buf, + struct ftm_time_sync_offset *param) +{ + WMI_VDEV_AUDIO_SYNC_Q_MASTER_SLAVE_OFFSET_EVENTID_param_tlvs *param_buf; + wmi_audio_sync_q_master_slave_offset_event_fixed_param *resp_event; + wmi_audio_sync_q_master_slave_times *q_pair; + int iter; + + param_buf = + (WMI_VDEV_AUDIO_SYNC_Q_MASTER_SLAVE_OFFSET_EVENTID_param_tlvs *)buf; + if (!param_buf) { + WMI_LOGE("Invalid timesync ftm offset event buffer"); + return QDF_STATUS_E_FAILURE; + } + + resp_event = param_buf->fixed_param; + if (!resp_event) { + WMI_LOGE("Invalid timesync ftm offset fixed param buffer"); + return QDF_STATUS_E_FAILURE; + } + + param->vdev_id = resp_event->vdev_id; + param->num_qtime = param_buf->num_audio_sync_q_master_slave_times; + if (param->num_qtime > FTM_TIME_SYNC_QTIME_PAIR_MAX) + param->num_qtime = FTM_TIME_SYNC_QTIME_PAIR_MAX; + + q_pair = param_buf->audio_sync_q_master_slave_times; + if (!q_pair) { + WMI_LOGE("Invalid q_master_slave_times buffer"); + return QDF_STATUS_E_FAILURE; + } + + for (iter = 0; iter < param->num_qtime; iter++) { + param->pairs[iter].qtime_master = ( + (uint64_t)q_pair[iter].qmaster_u32 << 32) | + q_pair[iter].qmaster_l32; + param->pairs[iter].qtime_slave = ( + (uint64_t)q_pair[iter].qslave_u32 << 32) | + q_pair[iter].qslave_l32; + } + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_WLAN_TIME_SYNC_FTM */ + +/** + * extract_install_key_comp_event_tlv() - extract install key complete event tlv + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @len: length of the event buffer + * @param: Pointer to hold install key complete event param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +extract_install_key_comp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t len, + struct wmi_install_key_comp_event *param) +{ + WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_vdev_install_key_complete_event_fixed_param *key_fp; + + if (len < sizeof(*param_buf)) { + wmi_err("invalid event buf len %d", len); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + wmi_err("received null buf from target"); + return QDF_STATUS_E_INVAL; + } + + key_fp = param_buf->fixed_param; + if (!key_fp) { + wmi_err("received null event data from target"); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = key_fp->vdev_id; + param->key_ix = key_fp->key_ix; + param->key_flags = key_fp->key_flags; + param->status = key_fp->status; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&key_fp->peer_macaddr, + param->peer_macaddr); + + return QDF_STATUS_SUCCESS; +} + +struct wmi_ops tlv_ops = { + .send_vdev_create_cmd = send_vdev_create_cmd_tlv, + .send_vdev_delete_cmd = send_vdev_delete_cmd_tlv, + .send_vdev_nss_chain_params_cmd = send_vdev_nss_chain_params_cmd_tlv, + .send_vdev_down_cmd = send_vdev_down_cmd_tlv, + .send_vdev_start_cmd = send_vdev_start_cmd_tlv, + .send_peer_flush_tids_cmd = send_peer_flush_tids_cmd_tlv, + .send_peer_param_cmd = send_peer_param_cmd_tlv, + .send_vdev_up_cmd = send_vdev_up_cmd_tlv, + .send_vdev_stop_cmd = send_vdev_stop_cmd_tlv, + .send_peer_create_cmd = send_peer_create_cmd_tlv, + .send_peer_delete_cmd = send_peer_delete_cmd_tlv, + .send_peer_delete_all_cmd = send_peer_delete_all_cmd_tlv, + .send_peer_rx_reorder_queue_setup_cmd = + send_peer_rx_reorder_queue_setup_cmd_tlv, + .send_peer_rx_reorder_queue_remove_cmd = + send_peer_rx_reorder_queue_remove_cmd_tlv, + .send_pdev_utf_cmd = send_pdev_utf_cmd_tlv, + .send_pdev_param_cmd = send_pdev_param_cmd_tlv, + .send_pdev_set_hw_mode_cmd = send_pdev_set_hw_mode_cmd_tlv, + .send_suspend_cmd = send_suspend_cmd_tlv, + .send_resume_cmd = send_resume_cmd_tlv, + .send_wow_enable_cmd = send_wow_enable_cmd_tlv, + .send_set_ap_ps_param_cmd = send_set_ap_ps_param_cmd_tlv, + .send_set_sta_ps_param_cmd = send_set_sta_ps_param_cmd_tlv, + .send_crash_inject_cmd = send_crash_inject_cmd_tlv, +#ifdef FEATURE_FW_LOG_PARSING + .send_dbglog_cmd = send_dbglog_cmd_tlv, +#endif + .send_vdev_set_param_cmd = send_vdev_set_param_cmd_tlv, + .send_stats_request_cmd = send_stats_request_cmd_tlv, + .send_packet_log_enable_cmd = send_packet_log_enable_cmd_tlv, + .send_peer_based_pktlog_cmd = send_peer_based_pktlog_cmd, + .send_time_stamp_sync_cmd = send_time_stamp_sync_cmd_tlv, + .send_packet_log_disable_cmd = send_packet_log_disable_cmd_tlv, + .send_beacon_tmpl_send_cmd = send_beacon_tmpl_send_cmd_tlv, + .send_fd_tmpl_cmd = send_fd_tmpl_cmd_tlv, + .send_peer_assoc_cmd = send_peer_assoc_cmd_tlv, + .send_scan_start_cmd = send_scan_start_cmd_tlv, + .send_scan_stop_cmd = send_scan_stop_cmd_tlv, + .send_scan_chan_list_cmd = send_scan_chan_list_cmd_tlv, + .send_mgmt_cmd = send_mgmt_cmd_tlv, + .send_offchan_data_tx_cmd = send_offchan_data_tx_cmd_tlv, + .send_modem_power_state_cmd = send_modem_power_state_cmd_tlv, + .send_set_sta_ps_mode_cmd = send_set_sta_ps_mode_cmd_tlv, + .send_idle_roam_monitor_cmd = send_idle_roam_monitor_cmd_tlv, + .send_set_sta_uapsd_auto_trig_cmd = + send_set_sta_uapsd_auto_trig_cmd_tlv, + .send_get_temperature_cmd = send_get_temperature_cmd_tlv, + .send_set_smps_params_cmd = send_set_smps_params_cmd_tlv, + .send_set_mimops_cmd = send_set_mimops_cmd_tlv, + .send_set_thermal_mgmt_cmd = send_set_thermal_mgmt_cmd_tlv, + .send_lro_config_cmd = send_lro_config_cmd_tlv, + .send_peer_rate_report_cmd = send_peer_rate_report_cmd_tlv, + .send_probe_rsp_tmpl_send_cmd = + send_probe_rsp_tmpl_send_cmd_tlv, + .send_p2p_go_set_beacon_ie_cmd = + send_p2p_go_set_beacon_ie_cmd_tlv, + .send_setup_install_key_cmd = + send_setup_install_key_cmd_tlv, + .send_scan_probe_setoui_cmd = + send_scan_probe_setoui_cmd_tlv, +#ifdef IPA_OFFLOAD + .send_ipa_offload_control_cmd = + send_ipa_offload_control_cmd_tlv, +#endif + .send_pno_stop_cmd = send_pno_stop_cmd_tlv, + .send_pno_start_cmd = send_pno_start_cmd_tlv, + .send_nlo_mawc_cmd = send_nlo_mawc_cmd_tlv, +#ifdef WLAN_FEATURE_LINK_LAYER_STATS + .send_process_ll_stats_clear_cmd = send_process_ll_stats_clear_cmd_tlv, + .send_process_ll_stats_set_cmd = send_process_ll_stats_set_cmd_tlv, + .send_process_ll_stats_get_cmd = send_process_ll_stats_get_cmd_tlv, +#endif /* WLAN_FEATURE_LINK_LAYER_STATS*/ + .send_congestion_cmd = send_congestion_cmd_tlv, + .send_snr_request_cmd = send_snr_request_cmd_tlv, + .send_snr_cmd = send_snr_cmd_tlv, + .send_link_status_req_cmd = send_link_status_req_cmd_tlv, +#if !defined(REMOVE_PKT_LOG) && defined(FEATURE_PKTLOG) + .send_pktlog_wmi_send_cmd = send_pktlog_wmi_send_cmd_tlv, +#endif +#ifdef WLAN_SUPPORT_GREEN_AP + .send_egap_conf_params_cmd = send_egap_conf_params_cmd_tlv, + .send_green_ap_ps_cmd = send_green_ap_ps_cmd_tlv, + .extract_green_ap_egap_status_info = + extract_green_ap_egap_status_info_tlv, +#endif + .send_csa_offload_enable_cmd = send_csa_offload_enable_cmd_tlv, + .send_start_oem_data_cmd = send_start_oem_data_cmd_tlv, +#ifdef FEATURE_OEM_DATA + .send_start_oemv2_data_cmd = send_start_oemv2_data_cmd_tlv, +#endif +#ifdef WLAN_FEATURE_CIF_CFR + .send_oem_dma_cfg_cmd = send_oem_dma_cfg_cmd_tlv, +#endif + .send_dfs_phyerr_filter_offload_en_cmd = + send_dfs_phyerr_filter_offload_en_cmd_tlv, + .send_stats_ext_req_cmd = send_stats_ext_req_cmd_tlv, + .send_process_dhcpserver_offload_cmd = + send_process_dhcpserver_offload_cmd_tlv, + .send_pdev_set_regdomain_cmd = + send_pdev_set_regdomain_cmd_tlv, + .send_regdomain_info_to_fw_cmd = send_regdomain_info_to_fw_cmd_tlv, + .send_cfg_action_frm_tb_ppdu_cmd = send_cfg_action_frm_tb_ppdu_cmd_tlv, + .save_fw_version_cmd = save_fw_version_cmd_tlv, + .check_and_update_fw_version = + check_and_update_fw_version_cmd_tlv, + .send_log_supported_evt_cmd = send_log_supported_evt_cmd_tlv, + .send_enable_specific_fw_logs_cmd = + send_enable_specific_fw_logs_cmd_tlv, + .send_flush_logs_to_fw_cmd = send_flush_logs_to_fw_cmd_tlv, + .send_unit_test_cmd = send_unit_test_cmd_tlv, +#ifdef FEATURE_WLAN_APF + .send_set_active_apf_mode_cmd = wmi_send_set_active_apf_mode_cmd_tlv, + .send_apf_enable_cmd = wmi_send_apf_enable_cmd_tlv, + .send_apf_write_work_memory_cmd = + wmi_send_apf_write_work_memory_cmd_tlv, + .send_apf_read_work_memory_cmd = + wmi_send_apf_read_work_memory_cmd_tlv, + .extract_apf_read_memory_resp_event = + wmi_extract_apf_read_memory_resp_event_tlv, +#endif /* FEATURE_WLAN_APF */ + .init_cmd_send = init_cmd_send_tlv, + .send_vdev_set_custom_aggr_size_cmd = + send_vdev_set_custom_aggr_size_cmd_tlv, + .send_vdev_set_qdepth_thresh_cmd = + send_vdev_set_qdepth_thresh_cmd_tlv, + .send_set_vap_dscp_tid_map_cmd = send_set_vap_dscp_tid_map_cmd_tlv, + .send_vdev_set_fwtest_param_cmd = send_vdev_set_fwtest_param_cmd_tlv, + .send_phyerr_disable_cmd = send_phyerr_disable_cmd_tlv, + .send_phyerr_enable_cmd = send_phyerr_enable_cmd_tlv, + .send_periodic_chan_stats_config_cmd = + send_periodic_chan_stats_config_cmd_tlv, + .send_vdev_spectral_configure_cmd = + send_vdev_spectral_configure_cmd_tlv, + .send_vdev_spectral_enable_cmd = + send_vdev_spectral_enable_cmd_tlv, + .send_thermal_mitigation_param_cmd = + send_thermal_mitigation_param_cmd_tlv, + .send_process_update_edca_param_cmd = + send_process_update_edca_param_cmd_tlv, + .send_bss_color_change_enable_cmd = + send_bss_color_change_enable_cmd_tlv, + .send_coex_config_cmd = send_coex_config_cmd_tlv, + .send_set_country_cmd = send_set_country_cmd_tlv, + .send_addba_send_cmd = send_addba_send_cmd_tlv, + .send_delba_send_cmd = send_delba_send_cmd_tlv, + .send_addba_clearresponse_cmd = send_addba_clearresponse_cmd_tlv, + .get_target_cap_from_service_ready = extract_service_ready_tlv, + .extract_hal_reg_cap = extract_hal_reg_cap_tlv, + .extract_num_mem_reqs = extract_num_mem_reqs_tlv, + .extract_host_mem_req = extract_host_mem_req_tlv, + .save_service_bitmap = save_service_bitmap_tlv, + .save_ext_service_bitmap = save_ext_service_bitmap_tlv, + .is_service_enabled = is_service_enabled_tlv, + .save_fw_version = save_fw_version_in_service_ready_tlv, + .ready_extract_init_status = ready_extract_init_status_tlv, + .ready_extract_mac_addr = ready_extract_mac_addr_tlv, + .ready_extract_mac_addr_list = ready_extract_mac_addr_list_tlv, + .extract_ready_event_params = extract_ready_event_params_tlv, + .extract_dbglog_data_len = extract_dbglog_data_len_tlv, + .extract_mgmt_rx_params = extract_mgmt_rx_params_tlv, + .extract_vdev_roam_param = extract_vdev_roam_param_tlv, + .extract_vdev_scan_ev_param = extract_vdev_scan_ev_param_tlv, +#ifdef FEATURE_WLAN_SCAN_PNO + .extract_nlo_match_ev_param = extract_nlo_match_ev_param_tlv, + .extract_nlo_complete_ev_param = extract_nlo_complete_ev_param_tlv, +#endif + .extract_all_stats_count = extract_all_stats_counts_tlv, + .extract_pdev_stats = extract_pdev_stats_tlv, + .extract_unit_test = extract_unit_test_tlv, + .extract_pdev_ext_stats = extract_pdev_ext_stats_tlv, + .extract_vdev_stats = extract_vdev_stats_tlv, + .extract_per_chain_rssi_stats = extract_per_chain_rssi_stats_tlv, + .extract_peer_stats = extract_peer_stats_tlv, + .extract_bcn_stats = extract_bcn_stats_tlv, + .extract_bcnflt_stats = extract_bcnflt_stats_tlv, + .extract_peer_extd_stats = extract_peer_extd_stats_tlv, + .extract_peer_adv_stats = extract_peer_adv_stats_tlv, + .extract_chan_stats = extract_chan_stats_tlv, +#ifdef WLAN_FEATURE_MIB_STATS + .extract_mib_stats = extract_mib_stats_tlv, +#endif + .extract_profile_ctx = extract_profile_ctx_tlv, + .extract_profile_data = extract_profile_data_tlv, + .send_fw_test_cmd = send_fw_test_cmd_tlv, + .send_power_dbg_cmd = send_power_dbg_cmd_tlv, + .extract_service_ready_ext = extract_service_ready_ext_tlv, + .extract_service_ready_ext2 = extract_service_ready_ext2_tlv, + .extract_hw_mode_cap_service_ready_ext = + extract_hw_mode_cap_service_ready_ext_tlv, + .extract_mac_phy_cap_service_ready_ext = + extract_mac_phy_cap_service_ready_ext_tlv, + .extract_reg_cap_service_ready_ext = + extract_reg_cap_service_ready_ext_tlv, + .extract_dbr_ring_cap_service_ready_ext = + extract_dbr_ring_cap_service_ready_ext_tlv, + .extract_dbr_ring_cap_service_ready_ext2 = + extract_dbr_ring_cap_service_ready_ext2_tlv, + .extract_sar_cap_service_ready_ext = + extract_sar_cap_service_ready_ext_tlv, + .extract_pdev_utf_event = extract_pdev_utf_event_tlv, + .wmi_set_htc_tx_tag = wmi_set_htc_tx_tag_tlv, + .extract_fips_event_data = extract_fips_event_data_tlv, +#if defined(WLAN_SUPPORT_FILS) || defined(CONFIG_BAND_6GHZ) + .send_vdev_fils_enable_cmd = send_vdev_fils_enable_cmd_send, +#endif +#ifdef WLAN_FEATURE_DISA + .extract_encrypt_decrypt_resp_event = + extract_encrypt_decrypt_resp_event_tlv, +#endif + .send_pdev_fips_cmd = send_pdev_fips_cmd_tlv, + .extract_get_pn_data = extract_get_pn_data_tlv, + .send_pdev_get_pn_cmd = send_pdev_get_pn_cmd_tlv, +#ifdef WLAN_FEATURE_DISA + .send_encrypt_decrypt_send_cmd = send_encrypt_decrypt_send_cmd_tlv, +#endif + .is_management_record = is_management_record_tlv, + .is_diag_event = is_diag_event_tlv, +#ifdef WLAN_FEATURE_ACTION_OUI + .send_action_oui_cmd = send_action_oui_cmd_tlv, +#endif + .send_dfs_phyerr_offload_en_cmd = send_dfs_phyerr_offload_en_cmd_tlv, +#ifdef QCA_SUPPORT_AGILE_DFS + .send_adfs_ch_cfg_cmd = send_adfs_ch_cfg_cmd_tlv, + .send_adfs_ocac_abort_cmd = send_adfs_ocac_abort_cmd_tlv, +#endif + .send_dfs_phyerr_offload_dis_cmd = send_dfs_phyerr_offload_dis_cmd_tlv, + .extract_reg_chan_list_update_event = + extract_reg_chan_list_update_event_tlv, +#ifdef WLAN_SUPPORT_RF_CHARACTERIZATION + .extract_num_rf_characterization_entries = + extract_num_rf_characterization_entries_tlv, + .extract_rf_characterization_entries = + extract_rf_characterization_entries_tlv, +#endif + .extract_chainmask_tables = + extract_chainmask_tables_tlv, + .extract_thermal_stats = extract_thermal_stats_tlv, + .extract_thermal_level_stats = extract_thermal_level_stats_tlv, + .send_get_rcpi_cmd = send_get_rcpi_cmd_tlv, + .extract_rcpi_response_event = extract_rcpi_response_event_tlv, +#ifdef DFS_COMPONENT_ENABLE + .extract_dfs_cac_complete_event = extract_dfs_cac_complete_event_tlv, + .extract_dfs_ocac_complete_event = extract_dfs_ocac_complete_event_tlv, + .extract_dfs_radar_detection_event = + extract_dfs_radar_detection_event_tlv, + .extract_wlan_radar_event_info = extract_wlan_radar_event_info_tlv, +#endif + .convert_pdev_id_host_to_target = + convert_host_pdev_id_to_target_pdev_id_legacy, + .convert_pdev_id_target_to_host = + convert_target_pdev_id_to_host_pdev_id_legacy, + + .convert_host_pdev_id_to_target = + convert_host_pdev_id_to_target_pdev_id, + .convert_target_pdev_id_to_host = + convert_target_pdev_id_to_host_pdev_id, + + .convert_phy_id_host_to_target = + convert_host_phy_id_to_target_phy_id_legacy, + .convert_phy_id_target_to_host = + convert_target_phy_id_to_host_phy_id_legacy, + + .convert_host_phy_id_to_target = + convert_host_phy_id_to_target_phy_id, + .convert_target_phy_id_to_host = + convert_target_phy_id_to_host_phy_id, + + .send_start_11d_scan_cmd = send_start_11d_scan_cmd_tlv, + .send_stop_11d_scan_cmd = send_stop_11d_scan_cmd_tlv, + .extract_reg_11d_new_country_event = + extract_reg_11d_new_country_event_tlv, + .send_user_country_code_cmd = send_user_country_code_cmd_tlv, + .extract_reg_ch_avoid_event = + extract_reg_ch_avoid_event_tlv, + .send_obss_detection_cfg_cmd = send_obss_detection_cfg_cmd_tlv, + .extract_obss_detection_info = extract_obss_detection_info_tlv, + .wmi_pdev_id_conversion_enable = wmi_tlv_pdev_id_conversion_enable, + .wmi_free_allocated_event = wmitlv_free_allocated_event_tlvs, + .wmi_check_and_pad_event = wmitlv_check_and_pad_event_tlvs, + .wmi_check_command_params = wmitlv_check_command_tlv_params, + .extract_comb_phyerr = extract_comb_phyerr_tlv, + .extract_single_phyerr = extract_single_phyerr_tlv, +#ifdef QCA_SUPPORT_CP_STATS + .extract_cca_stats = extract_cca_stats_tlv, +#endif + .extract_esp_estimation_ev_param = + extract_esp_estimation_ev_param_tlv, + .send_roam_scan_stats_cmd = send_roam_scan_stats_cmd_tlv, + .extract_roam_scan_stats_res_evt = extract_roam_scan_stats_res_evt_tlv, +#ifdef OBSS_PD + .send_obss_spatial_reuse_set = send_obss_spatial_reuse_set_cmd_tlv, + .send_obss_spatial_reuse_set_def_thresh = + send_obss_spatial_reuse_set_def_thresh_cmd_tlv, +#endif + .extract_offload_bcn_tx_status_evt = extract_offload_bcn_tx_status_evt, + .extract_ctl_failsafe_check_ev_param = + extract_ctl_failsafe_check_ev_param_tlv, +#ifdef WIFI_POS_CONVERGED + .extract_oem_response_param = extract_oem_response_param_tlv, +#endif /* WIFI_POS_CONVERGED */ +#ifdef WLAN_MWS_INFO_DEBUGFS + .send_mws_coex_status_req_cmd = send_mws_coex_status_req_cmd_tlv, +#endif + .extract_hw_mode_resp_event = extract_hw_mode_resp_event_status_tlv, +#ifdef FEATURE_ANI_LEVEL_REQUEST + .send_ani_level_cmd = send_ani_level_cmd_tlv, + .extract_ani_level = extract_ani_level_tlv, +#endif /* FEATURE_ANI_LEVEL_REQUEST */ + .extract_roam_trigger_stats = extract_roam_trigger_stats_tlv, + .extract_roam_scan_stats = extract_roam_scan_stats_tlv, + .extract_roam_result_stats = extract_roam_result_stats_tlv, + .extract_roam_11kv_stats = extract_roam_11kv_stats_tlv, + .extract_roam_msg_info = extract_roam_msg_info_tlv, + +#ifdef FEATURE_WLAN_TIME_SYNC_FTM + .send_wlan_time_sync_ftm_trigger_cmd = send_wlan_ts_ftm_trigger_cmd_tlv, + .send_wlan_ts_qtime_cmd = send_wlan_ts_qtime_cmd_tlv, + .extract_time_sync_ftm_start_stop_event = + extract_time_sync_ftm_start_stop_event_tlv, + .extract_time_sync_ftm_offset_event = + extract_time_sync_ftm_offset_event_tlv, +#endif /* FEATURE_WLAN_TIME_SYNC_FTM */ + .send_roam_scan_ch_list_req_cmd = send_roam_scan_ch_list_req_cmd_tlv, + .extract_install_key_comp_event = extract_install_key_comp_event_tlv, +}; + +/** + * populate_tlv_event_id() - populates wmi event ids + * + * @param event_ids: Pointer to hold event ids + * Return: None + */ +static void populate_tlv_events_id(uint32_t *event_ids) +{ + event_ids[wmi_service_ready_event_id] = WMI_SERVICE_READY_EVENTID; + event_ids[wmi_ready_event_id] = WMI_READY_EVENTID; + event_ids[wmi_scan_event_id] = WMI_SCAN_EVENTID; + event_ids[wmi_pdev_tpc_config_event_id] = WMI_PDEV_TPC_CONFIG_EVENTID; + event_ids[wmi_chan_info_event_id] = WMI_CHAN_INFO_EVENTID; + event_ids[wmi_phyerr_event_id] = WMI_PHYERR_EVENTID; + event_ids[wmi_pdev_dump_event_id] = WMI_PDEV_DUMP_EVENTID; + event_ids[wmi_tx_pause_event_id] = WMI_TX_PAUSE_EVENTID; + event_ids[wmi_dfs_radar_event_id] = WMI_DFS_RADAR_EVENTID; + event_ids[wmi_pdev_l1ss_track_event_id] = WMI_PDEV_L1SS_TRACK_EVENTID; + event_ids[wmi_pdev_temperature_event_id] = WMI_PDEV_TEMPERATURE_EVENTID; + event_ids[wmi_service_ready_ext_event_id] = + WMI_SERVICE_READY_EXT_EVENTID; + event_ids[wmi_service_ready_ext2_event_id] = + WMI_SERVICE_READY_EXT2_EVENTID; + event_ids[wmi_vdev_start_resp_event_id] = WMI_VDEV_START_RESP_EVENTID; + event_ids[wmi_vdev_stopped_event_id] = WMI_VDEV_STOPPED_EVENTID; + event_ids[wmi_vdev_install_key_complete_event_id] = + WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID; + event_ids[wmi_vdev_mcc_bcn_intvl_change_req_event_id] = + WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID; + + event_ids[wmi_vdev_tsf_report_event_id] = WMI_VDEV_TSF_REPORT_EVENTID; + event_ids[wmi_peer_sta_kickout_event_id] = WMI_PEER_STA_KICKOUT_EVENTID; + event_ids[wmi_peer_info_event_id] = WMI_PEER_INFO_EVENTID; + event_ids[wmi_peer_tx_fail_cnt_thr_event_id] = + WMI_PEER_TX_FAIL_CNT_THR_EVENTID; + event_ids[wmi_peer_estimated_linkspeed_event_id] = + WMI_PEER_ESTIMATED_LINKSPEED_EVENTID; + event_ids[wmi_peer_state_event_id] = WMI_PEER_STATE_EVENTID; + event_ids[wmi_peer_delete_response_event_id] = + WMI_PEER_DELETE_RESP_EVENTID; + event_ids[wmi_peer_delete_all_response_event_id] = + WMI_VDEV_DELETE_ALL_PEER_RESP_EVENTID; + event_ids[wmi_mgmt_rx_event_id] = WMI_MGMT_RX_EVENTID; + event_ids[wmi_host_swba_event_id] = WMI_HOST_SWBA_EVENTID; + event_ids[wmi_tbttoffset_update_event_id] = + WMI_TBTTOFFSET_UPDATE_EVENTID; + event_ids[wmi_ext_tbttoffset_update_event_id] = + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID; + event_ids[wmi_offload_bcn_tx_status_event_id] = + WMI_OFFLOAD_BCN_TX_STATUS_EVENTID; + event_ids[wmi_offload_prob_resp_tx_status_event_id] = + WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID; + event_ids[wmi_mgmt_tx_completion_event_id] = + WMI_MGMT_TX_COMPLETION_EVENTID; + event_ids[wmi_pdev_nfcal_power_all_channels_event_id] = + WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID; + event_ids[wmi_tx_delba_complete_event_id] = + WMI_TX_DELBA_COMPLETE_EVENTID; + event_ids[wmi_tx_addba_complete_event_id] = + WMI_TX_ADDBA_COMPLETE_EVENTID; + event_ids[wmi_ba_rsp_ssn_event_id] = WMI_BA_RSP_SSN_EVENTID; + + event_ids[wmi_aggr_state_trig_event_id] = WMI_AGGR_STATE_TRIG_EVENTID; + + event_ids[wmi_roam_event_id] = WMI_ROAM_EVENTID; + event_ids[wmi_profile_match] = WMI_PROFILE_MATCH; + + event_ids[wmi_roam_synch_event_id] = WMI_ROAM_SYNCH_EVENTID; + event_ids[wmi_roam_synch_frame_event_id] = WMI_ROAM_SYNCH_FRAME_EVENTID; + + event_ids[wmi_p2p_disc_event_id] = WMI_P2P_DISC_EVENTID; + + event_ids[wmi_p2p_noa_event_id] = WMI_P2P_NOA_EVENTID; + event_ids[wmi_p2p_lo_stop_event_id] = + WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID; + event_ids[wmi_vdev_add_macaddr_rx_filter_event_id] = + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID; + event_ids[wmi_pdev_resume_event_id] = WMI_PDEV_RESUME_EVENTID; + event_ids[wmi_wow_wakeup_host_event_id] = WMI_WOW_WAKEUP_HOST_EVENTID; + event_ids[wmi_d0_wow_disable_ack_event_id] = + WMI_D0_WOW_DISABLE_ACK_EVENTID; + event_ids[wmi_wow_initial_wakeup_event_id] = + WMI_WOW_INITIAL_WAKEUP_EVENTID; + + event_ids[wmi_rtt_meas_report_event_id] = + WMI_RTT_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_tsf_meas_report_event_id] = + WMI_TSF_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_rtt_error_report_event_id] = WMI_RTT_ERROR_REPORT_EVENTID; + event_ids[wmi_stats_ext_event_id] = WMI_STATS_EXT_EVENTID; + event_ids[wmi_iface_link_stats_event_id] = WMI_IFACE_LINK_STATS_EVENTID; + event_ids[wmi_peer_link_stats_event_id] = WMI_PEER_LINK_STATS_EVENTID; + event_ids[wmi_radio_link_stats_link] = WMI_RADIO_LINK_STATS_EVENTID; + event_ids[wmi_diag_event_id_log_supported_event_id] = + WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID; + event_ids[wmi_nlo_match_event_id] = WMI_NLO_MATCH_EVENTID; + event_ids[wmi_nlo_scan_complete_event_id] = + WMI_NLO_SCAN_COMPLETE_EVENTID; + event_ids[wmi_apfind_event_id] = WMI_APFIND_EVENTID; + event_ids[wmi_passpoint_match_event_id] = WMI_PASSPOINT_MATCH_EVENTID; + + event_ids[wmi_gtk_offload_status_event_id] = + WMI_GTK_OFFLOAD_STATUS_EVENTID; + event_ids[wmi_gtk_rekey_fail_event_id] = WMI_GTK_REKEY_FAIL_EVENTID; + event_ids[wmi_csa_handling_event_id] = WMI_CSA_HANDLING_EVENTID; + event_ids[wmi_chatter_pc_query_event_id] = WMI_CHATTER_PC_QUERY_EVENTID; + + event_ids[wmi_echo_event_id] = WMI_ECHO_EVENTID; + + event_ids[wmi_pdev_utf_event_id] = WMI_PDEV_UTF_EVENTID; + + event_ids[wmi_dbg_msg_event_id] = WMI_DEBUG_MESG_EVENTID; + event_ids[wmi_update_stats_event_id] = WMI_UPDATE_STATS_EVENTID; + event_ids[wmi_debug_print_event_id] = WMI_DEBUG_PRINT_EVENTID; + event_ids[wmi_dcs_interference_event_id] = WMI_DCS_INTERFERENCE_EVENTID; + event_ids[wmi_pdev_qvit_event_id] = WMI_PDEV_QVIT_EVENTID; + event_ids[wmi_wlan_profile_data_event_id] = + WMI_WLAN_PROFILE_DATA_EVENTID; + event_ids[wmi_pdev_ftm_intg_event_id] = WMI_PDEV_FTM_INTG_EVENTID; + event_ids[wmi_wlan_freq_avoid_event_id] = WMI_WLAN_FREQ_AVOID_EVENTID; + event_ids[wmi_vdev_get_keepalive_event_id] = + WMI_VDEV_GET_KEEPALIVE_EVENTID; + event_ids[wmi_thermal_mgmt_event_id] = WMI_THERMAL_MGMT_EVENTID; + + event_ids[wmi_diag_container_event_id] = + WMI_DIAG_DATA_CONTAINER_EVENTID; + + event_ids[wmi_host_auto_shutdown_event_id] = + WMI_HOST_AUTO_SHUTDOWN_EVENTID; + + event_ids[wmi_update_whal_mib_stats_event_id] = + WMI_UPDATE_WHAL_MIB_STATS_EVENTID; + + /*update ht/vht info based on vdev (rx and tx NSS and preamble) */ + event_ids[wmi_update_vdev_rate_stats_event_id] = + WMI_UPDATE_VDEV_RATE_STATS_EVENTID; + + event_ids[wmi_diag_event_id] = WMI_DIAG_EVENTID; + event_ids[wmi_unit_test_event_id] = WMI_UNIT_TEST_EVENTID; + + /** Set OCB Sched Response, deprecated */ + event_ids[wmi_ocb_set_sched_event_id] = WMI_OCB_SET_SCHED_EVENTID; + + event_ids[wmi_dbg_mesg_flush_complete_event_id] = + WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID; + event_ids[wmi_rssi_breach_event_id] = WMI_RSSI_BREACH_EVENTID; + + /* GPIO Event */ + event_ids[wmi_gpio_input_event_id] = WMI_GPIO_INPUT_EVENTID; + event_ids[wmi_uploadh_event_id] = WMI_UPLOADH_EVENTID; + + event_ids[wmi_captureh_event_id] = WMI_CAPTUREH_EVENTID; + event_ids[wmi_rfkill_state_change_event_id] = + WMI_RFKILL_STATE_CHANGE_EVENTID; + + /* TDLS Event */ + event_ids[wmi_tdls_peer_event_id] = WMI_TDLS_PEER_EVENTID; + + event_ids[wmi_batch_scan_enabled_event_id] = + WMI_BATCH_SCAN_ENABLED_EVENTID; + event_ids[wmi_batch_scan_result_event_id] = + WMI_BATCH_SCAN_RESULT_EVENTID; + /* OEM Event */ + event_ids[wmi_oem_cap_event_id] = WMI_OEM_CAPABILITY_EVENTID; + event_ids[wmi_oem_meas_report_event_id] = + WMI_OEM_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_oem_report_event_id] = WMI_OEM_ERROR_REPORT_EVENTID; + + /* NAN Event */ + event_ids[wmi_nan_event_id] = WMI_NAN_EVENTID; + + /* LPI Event */ + event_ids[wmi_lpi_result_event_id] = WMI_LPI_RESULT_EVENTID; + event_ids[wmi_lpi_status_event_id] = WMI_LPI_STATUS_EVENTID; + event_ids[wmi_lpi_handoff_event_id] = WMI_LPI_HANDOFF_EVENTID; + + /* ExtScan events */ + event_ids[wmi_extscan_start_stop_event_id] = + WMI_EXTSCAN_START_STOP_EVENTID; + event_ids[wmi_extscan_operation_event_id] = + WMI_EXTSCAN_OPERATION_EVENTID; + event_ids[wmi_extscan_table_usage_event_id] = + WMI_EXTSCAN_TABLE_USAGE_EVENTID; + event_ids[wmi_extscan_cached_results_event_id] = + WMI_EXTSCAN_CACHED_RESULTS_EVENTID; + event_ids[wmi_extscan_wlan_change_results_event_id] = + WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID; + event_ids[wmi_extscan_hotlist_match_event_id] = + WMI_EXTSCAN_HOTLIST_MATCH_EVENTID; + event_ids[wmi_extscan_capabilities_event_id] = + WMI_EXTSCAN_CAPABILITIES_EVENTID; + event_ids[wmi_extscan_hotlist_ssid_match_event_id] = + WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID; + + /* mDNS offload events */ + event_ids[wmi_mdns_stats_event_id] = WMI_MDNS_STATS_EVENTID; + + /* SAP Authentication offload events */ + event_ids[wmi_sap_ofl_add_sta_event_id] = WMI_SAP_OFL_ADD_STA_EVENTID; + event_ids[wmi_sap_ofl_del_sta_event_id] = WMI_SAP_OFL_DEL_STA_EVENTID; + + /** Out-of-context-of-bss (OCB) events */ + event_ids[wmi_ocb_set_config_resp_event_id] = + WMI_OCB_SET_CONFIG_RESP_EVENTID; + event_ids[wmi_ocb_get_tsf_timer_resp_event_id] = + WMI_OCB_GET_TSF_TIMER_RESP_EVENTID; + event_ids[wmi_dcc_get_stats_resp_event_id] = + WMI_DCC_GET_STATS_RESP_EVENTID; + event_ids[wmi_dcc_update_ndl_resp_event_id] = + WMI_DCC_UPDATE_NDL_RESP_EVENTID; + event_ids[wmi_dcc_stats_event_id] = WMI_DCC_STATS_EVENTID; + /* System-On-Chip events */ + event_ids[wmi_soc_set_hw_mode_resp_event_id] = + WMI_SOC_SET_HW_MODE_RESP_EVENTID; + event_ids[wmi_soc_hw_mode_transition_event_id] = + WMI_SOC_HW_MODE_TRANSITION_EVENTID; + event_ids[wmi_soc_set_dual_mac_config_resp_event_id] = + WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID; + event_ids[wmi_pdev_fips_event_id] = WMI_PDEV_FIPS_EVENTID; + event_ids[wmi_pdev_csa_switch_count_status_event_id] = + WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID; + event_ids[wmi_vdev_ocac_complete_event_id] = + WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID; + event_ids[wmi_reg_chan_list_cc_event_id] = WMI_REG_CHAN_LIST_CC_EVENTID; + event_ids[wmi_inst_rssi_stats_event_id] = WMI_INST_RSSI_STATS_EVENTID; + event_ids[wmi_pdev_tpc_config_event_id] = WMI_PDEV_TPC_CONFIG_EVENTID; + event_ids[wmi_peer_sta_ps_statechg_event_id] = + WMI_PEER_STA_PS_STATECHG_EVENTID; + event_ids[wmi_pdev_channel_hopping_event_id] = + WMI_PDEV_CHANNEL_HOPPING_EVENTID; + event_ids[wmi_offchan_data_tx_completion_event] = + WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID; + event_ids[wmi_dfs_cac_complete_id] = WMI_VDEV_DFS_CAC_COMPLETE_EVENTID; + event_ids[wmi_dfs_radar_detection_event_id] = + WMI_PDEV_DFS_RADAR_DETECTION_EVENTID; + event_ids[wmi_tt_stats_event_id] = WMI_THERM_THROT_STATS_EVENTID; + event_ids[wmi_11d_new_country_event_id] = WMI_11D_NEW_COUNTRY_EVENTID; + event_ids[wmi_pdev_tpc_event_id] = WMI_PDEV_TPC_EVENTID; + event_ids[wmi_get_arp_stats_req_id] = WMI_VDEV_GET_ARP_STAT_EVENTID; + event_ids[wmi_service_available_event_id] = + WMI_SERVICE_AVAILABLE_EVENTID; + event_ids[wmi_update_rcpi_event_id] = WMI_UPDATE_RCPI_EVENTID; + event_ids[wmi_pdev_check_cal_version_event_id] = WMI_PDEV_CHECK_CAL_VERSION_EVENTID; + /* NDP events */ + event_ids[wmi_ndp_initiator_rsp_event_id] = + WMI_NDP_INITIATOR_RSP_EVENTID; + event_ids[wmi_ndp_indication_event_id] = WMI_NDP_INDICATION_EVENTID; + event_ids[wmi_ndp_confirm_event_id] = WMI_NDP_CONFIRM_EVENTID; + event_ids[wmi_ndp_responder_rsp_event_id] = + WMI_NDP_RESPONDER_RSP_EVENTID; + event_ids[wmi_ndp_end_indication_event_id] = + WMI_NDP_END_INDICATION_EVENTID; + event_ids[wmi_ndp_end_rsp_event_id] = WMI_NDP_END_RSP_EVENTID; + event_ids[wmi_ndl_schedule_update_event_id] = + WMI_NDL_SCHEDULE_UPDATE_EVENTID; + event_ids[wmi_ndp_event_id] = WMI_NDP_EVENTID; + + event_ids[wmi_oem_response_event_id] = WMI_OEM_RESPONSE_EVENTID; + event_ids[wmi_peer_stats_info_event_id] = WMI_PEER_STATS_INFO_EVENTID; + event_ids[wmi_pdev_chip_power_stats_event_id] = + WMI_PDEV_CHIP_POWER_STATS_EVENTID; + event_ids[wmi_ap_ps_egap_info_event_id] = WMI_AP_PS_EGAP_INFO_EVENTID; + event_ids[wmi_peer_assoc_conf_event_id] = WMI_PEER_ASSOC_CONF_EVENTID; + event_ids[wmi_vdev_delete_resp_event_id] = WMI_VDEV_DELETE_RESP_EVENTID; + event_ids[wmi_apf_capability_info_event_id] = + WMI_BPF_CAPABILIY_INFO_EVENTID; + event_ids[wmi_vdev_encrypt_decrypt_data_rsp_event_id] = + WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID; + event_ids[wmi_report_rx_aggr_failure_event_id] = + WMI_REPORT_RX_AGGR_FAILURE_EVENTID; + event_ids[wmi_pdev_chip_pwr_save_failure_detect_event_id] = + WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID; + event_ids[wmi_peer_antdiv_info_event_id] = WMI_PEER_ANTDIV_INFO_EVENTID; + event_ids[wmi_pdev_set_hw_mode_rsp_event_id] = + WMI_PDEV_SET_HW_MODE_RESP_EVENTID; + event_ids[wmi_pdev_hw_mode_transition_event_id] = + WMI_PDEV_HW_MODE_TRANSITION_EVENTID; + event_ids[wmi_pdev_set_mac_config_resp_event_id] = + WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID; + event_ids[wmi_coex_bt_activity_event_id] = + WMI_WLAN_COEX_BT_ACTIVITY_EVENTID; + event_ids[wmi_mgmt_tx_bundle_completion_event_id] = + WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID; + event_ids[wmi_radio_tx_power_level_stats_event_id] = + WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID; + event_ids[wmi_report_stats_event_id] = WMI_REPORT_STATS_EVENTID; + event_ids[wmi_dma_buf_release_event_id] = + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID; + event_ids[wmi_sap_obss_detection_report_event_id] = + WMI_SAP_OBSS_DETECTION_REPORT_EVENTID; + event_ids[wmi_host_swfda_event_id] = WMI_HOST_SWFDA_EVENTID; + event_ids[wmi_sar_get_limits_event_id] = WMI_SAR_GET_LIMITS_EVENTID; + event_ids[wmi_obss_color_collision_report_event_id] = + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID; + event_ids[wmi_pdev_div_rssi_antid_event_id] = + WMI_PDEV_DIV_RSSI_ANTID_EVENTID; + event_ids[wmi_twt_enable_complete_event_id] = + WMI_TWT_ENABLE_COMPLETE_EVENTID; + event_ids[wmi_twt_disable_complete_event_id] = + WMI_TWT_DISABLE_COMPLETE_EVENTID; + event_ids[wmi_apf_get_vdev_work_memory_resp_event_id] = + WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID; + event_ids[wmi_wlan_sar2_result_event_id] = WMI_SAR2_RESULT_EVENTID; + event_ids[wmi_esp_estimate_event_id] = WMI_ESP_ESTIMATE_EVENTID; + event_ids[wmi_roam_scan_stats_event_id] = WMI_ROAM_SCAN_STATS_EVENTID; +#ifdef WLAN_FEATURE_INTEROP_ISSUES_AP + event_ids[wmi_pdev_interop_issues_ap_event_id] = + WMI_PDEV_RAP_INFO_EVENTID; +#endif +#ifdef AST_HKV1_WORKAROUND + event_ids[wmi_wds_peer_event_id] = WMI_WDS_PEER_EVENTID; +#endif + event_ids[wmi_pdev_ctl_failsafe_check_event_id] = + WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID; + event_ids[wmi_vdev_bcn_reception_stats_event_id] = + WMI_VDEV_BCN_RECEPTION_STATS_EVENTID; + event_ids[wmi_roam_blacklist_event_id] = WMI_ROAM_BLACKLIST_EVENTID; + event_ids[wmi_wlm_stats_event_id] = WMI_WLM_STATS_EVENTID; + event_ids[wmi_peer_cfr_capture_event_id] = WMI_PEER_CFR_CAPTURE_EVENTID; + event_ids[wmi_pdev_cold_boot_cal_event_id] = + WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID; +#ifdef WLAN_MWS_INFO_DEBUGFS + event_ids[wmi_vdev_get_mws_coex_state_eventid] = + WMI_VDEV_GET_MWS_COEX_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_dpwb_state_eventid] = + WMI_VDEV_GET_MWS_COEX_DPWB_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_tdm_state_eventid] = + WMI_VDEV_GET_MWS_COEX_TDM_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_idrx_state_eventid] = + WMI_VDEV_GET_MWS_COEX_IDRX_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_antenna_sharing_state_eventid] = + WMI_VDEV_GET_MWS_COEX_ANTENNA_SHARING_STATE_EVENTID; +#endif + event_ids[wmi_coex_report_antenna_isolation_event_id] = + WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID; + event_ids[wmi_peer_ratecode_list_event_id] = + WMI_PEER_RATECODE_LIST_EVENTID; + event_ids[wmi_chan_rf_characterization_info_event_id] = + WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID; + event_ids[wmi_roam_auth_offload_event_id] = + WMI_ROAM_PREAUTH_START_EVENTID; + event_ids[wmi_get_elna_bypass_event_id] = WMI_GET_ELNA_BYPASS_EVENTID; + event_ids[wmi_motion_det_host_eventid] = WMI_MOTION_DET_HOST_EVENTID; + event_ids[wmi_motion_det_base_line_host_eventid] = + WMI_MOTION_DET_BASE_LINE_HOST_EVENTID; + event_ids[wmi_get_ani_level_event_id] = WMI_GET_CHANNEL_ANI_EVENTID; + event_ids[wmi_peer_tx_pn_response_event_id] = + WMI_PEER_TX_PN_RESPONSE_EVENTID; + event_ids[wmi_roam_stats_event_id] = WMI_ROAM_STATS_EVENTID; + event_ids[wmi_oem_data_event_id] = WMI_OEM_DATA_EVENTID; + event_ids[wmi_mgmt_offload_data_event_id] = + WMI_VDEV_MGMT_OFFLOAD_EVENTID; + event_ids[wmi_nan_dmesg_event_id] = + WMI_NAN_DMESG_EVENTID; + event_ids[wmi_pdev_multi_vdev_restart_response_event_id] = + WMI_PDEV_MULTIPLE_VDEV_RESTART_RESP_EVENTID; + event_ids[wmi_roam_pmkid_request_event_id] = + WMI_ROAM_PMKID_REQUEST_EVENTID; +#ifdef FEATURE_WLAN_TIME_SYNC_FTM + event_ids[wmi_wlan_time_sync_ftm_start_stop_event_id] = + WMI_VDEV_AUDIO_SYNC_START_STOP_EVENTID; + event_ids[wmi_wlan_time_sync_q_master_slave_offset_eventid] = + WMI_VDEV_AUDIO_SYNC_Q_MASTER_SLAVE_OFFSET_EVENTID; +#endif +event_ids[wmi_roam_scan_chan_list_id] = + WMI_ROAM_SCAN_CHANNEL_LIST_EVENTID; +} + +/** + * populate_tlv_service() - populates wmi services + * + * @param wmi_service: Pointer to hold wmi_service + * Return: None + */ +static void populate_tlv_service(uint32_t *wmi_service) +{ + wmi_service[wmi_service_beacon_offload] = WMI_SERVICE_BEACON_OFFLOAD; + wmi_service[wmi_service_ack_timeout] = WMI_SERVICE_ACK_TIMEOUT; + wmi_service[wmi_service_scan_offload] = WMI_SERVICE_SCAN_OFFLOAD; + wmi_service[wmi_service_roam_scan_offload] = + WMI_SERVICE_ROAM_SCAN_OFFLOAD; + wmi_service[wmi_service_bcn_miss_offload] = + WMI_SERVICE_BCN_MISS_OFFLOAD; + wmi_service[wmi_service_sta_pwrsave] = WMI_SERVICE_STA_PWRSAVE; + wmi_service[wmi_service_sta_advanced_pwrsave] = + WMI_SERVICE_STA_ADVANCED_PWRSAVE; + wmi_service[wmi_service_ap_uapsd] = WMI_SERVICE_AP_UAPSD; + wmi_service[wmi_service_ap_dfs] = WMI_SERVICE_AP_DFS; + wmi_service[wmi_service_11ac] = WMI_SERVICE_11AC; + wmi_service[wmi_service_blockack] = WMI_SERVICE_BLOCKACK; + wmi_service[wmi_service_phyerr] = WMI_SERVICE_PHYERR; + wmi_service[wmi_service_bcn_filter] = WMI_SERVICE_BCN_FILTER; + wmi_service[wmi_service_rtt] = WMI_SERVICE_RTT; + wmi_service[wmi_service_wow] = WMI_SERVICE_WOW; + wmi_service[wmi_service_ratectrl_cache] = WMI_SERVICE_RATECTRL_CACHE; + wmi_service[wmi_service_iram_tids] = WMI_SERVICE_IRAM_TIDS; + wmi_service[wmi_service_arpns_offload] = WMI_SERVICE_ARPNS_OFFLOAD; + wmi_service[wmi_service_nlo] = WMI_SERVICE_NLO; + wmi_service[wmi_service_gtk_offload] = WMI_SERVICE_GTK_OFFLOAD; + wmi_service[wmi_service_scan_sch] = WMI_SERVICE_SCAN_SCH; + wmi_service[wmi_service_csa_offload] = WMI_SERVICE_CSA_OFFLOAD; + wmi_service[wmi_service_chatter] = WMI_SERVICE_CHATTER; + wmi_service[wmi_service_coex_freqavoid] = WMI_SERVICE_COEX_FREQAVOID; + wmi_service[wmi_service_packet_power_save] = + WMI_SERVICE_PACKET_POWER_SAVE; + wmi_service[wmi_service_force_fw_hang] = WMI_SERVICE_FORCE_FW_HANG; + wmi_service[wmi_service_gpio] = WMI_SERVICE_GPIO; + wmi_service[wmi_service_sta_dtim_ps_modulated_dtim] = + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM; + wmi_service[wmi_sta_uapsd_basic_auto_trig] = + WMI_STA_UAPSD_BASIC_AUTO_TRIG; + wmi_service[wmi_sta_uapsd_var_auto_trig] = WMI_STA_UAPSD_VAR_AUTO_TRIG; + wmi_service[wmi_service_sta_keep_alive] = WMI_SERVICE_STA_KEEP_ALIVE; + wmi_service[wmi_service_tx_encap] = WMI_SERVICE_TX_ENCAP; + wmi_service[wmi_service_ap_ps_detect_out_of_sync] = + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC; + wmi_service[wmi_service_early_rx] = WMI_SERVICE_EARLY_RX; + wmi_service[wmi_service_sta_smps] = WMI_SERVICE_STA_SMPS; + wmi_service[wmi_service_fwtest] = WMI_SERVICE_FWTEST; + wmi_service[wmi_service_sta_wmmac] = WMI_SERVICE_STA_WMMAC; + wmi_service[wmi_service_tdls] = WMI_SERVICE_TDLS; + wmi_service[wmi_service_burst] = WMI_SERVICE_BURST; + wmi_service[wmi_service_mcc_bcn_interval_change] = + WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE; + wmi_service[wmi_service_adaptive_ocs] = WMI_SERVICE_ADAPTIVE_OCS; + wmi_service[wmi_service_ba_ssn_support] = WMI_SERVICE_BA_SSN_SUPPORT; + wmi_service[wmi_service_filter_ipsec_natkeepalive] = + WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE; + wmi_service[wmi_service_wlan_hb] = WMI_SERVICE_WLAN_HB; + wmi_service[wmi_service_lte_ant_share_support] = + WMI_SERVICE_LTE_ANT_SHARE_SUPPORT; + wmi_service[wmi_service_batch_scan] = WMI_SERVICE_BATCH_SCAN; + wmi_service[wmi_service_qpower] = WMI_SERVICE_QPOWER; + wmi_service[wmi_service_plmreq] = WMI_SERVICE_PLMREQ; + wmi_service[wmi_service_thermal_mgmt] = WMI_SERVICE_THERMAL_MGMT; + wmi_service[wmi_service_rmc] = WMI_SERVICE_RMC; + wmi_service[wmi_service_mhf_offload] = WMI_SERVICE_MHF_OFFLOAD; + wmi_service[wmi_service_coex_sar] = WMI_SERVICE_COEX_SAR; + wmi_service[wmi_service_bcn_txrate_override] = + WMI_SERVICE_BCN_TXRATE_OVERRIDE; + wmi_service[wmi_service_nan] = WMI_SERVICE_NAN; + wmi_service[wmi_service_l1ss_stat] = WMI_SERVICE_L1SS_STAT; + wmi_service[wmi_service_estimate_linkspeed] = + WMI_SERVICE_ESTIMATE_LINKSPEED; + wmi_service[wmi_service_obss_scan] = WMI_SERVICE_OBSS_SCAN; + wmi_service[wmi_service_tdls_offchan] = WMI_SERVICE_TDLS_OFFCHAN; + wmi_service[wmi_service_tdls_uapsd_buffer_sta] = + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA; + wmi_service[wmi_service_tdls_uapsd_sleep_sta] = + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA; + wmi_service[wmi_service_ibss_pwrsave] = WMI_SERVICE_IBSS_PWRSAVE; + wmi_service[wmi_service_lpass] = WMI_SERVICE_LPASS; + wmi_service[wmi_service_extscan] = WMI_SERVICE_EXTSCAN; + wmi_service[wmi_service_d0wow] = WMI_SERVICE_D0WOW; + wmi_service[wmi_service_hsoffload] = WMI_SERVICE_HSOFFLOAD; + wmi_service[wmi_service_roam_ho_offload] = WMI_SERVICE_ROAM_HO_OFFLOAD; + wmi_service[wmi_service_rx_full_reorder] = WMI_SERVICE_RX_FULL_REORDER; + wmi_service[wmi_service_dhcp_offload] = WMI_SERVICE_DHCP_OFFLOAD; + wmi_service[wmi_service_sta_rx_ipa_offload_support] = + WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT; + wmi_service[wmi_service_mdns_offload] = WMI_SERVICE_MDNS_OFFLOAD; + wmi_service[wmi_service_sap_auth_offload] = + WMI_SERVICE_SAP_AUTH_OFFLOAD; + wmi_service[wmi_service_dual_band_simultaneous_support] = + WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT; + wmi_service[wmi_service_ocb] = WMI_SERVICE_OCB; + wmi_service[wmi_service_ap_arpns_offload] = + WMI_SERVICE_AP_ARPNS_OFFLOAD; + wmi_service[wmi_service_per_band_chainmask_support] = + WMI_SERVICE_PER_BAND_CHAINMASK_SUPPORT; + wmi_service[wmi_service_packet_filter_offload] = + WMI_SERVICE_PACKET_FILTER_OFFLOAD; + wmi_service[wmi_service_mgmt_tx_htt] = WMI_SERVICE_MGMT_TX_HTT; + wmi_service[wmi_service_mgmt_tx_wmi] = WMI_SERVICE_MGMT_TX_WMI; + wmi_service[wmi_service_ext_msg] = WMI_SERVICE_EXT_MSG; + wmi_service[wmi_service_ext2_msg] = WMI_SERVICE_EXT2_MSG; + wmi_service[wmi_service_mawc] = WMI_SERVICE_MAWC; + wmi_service[wmi_service_multiple_vdev_restart] = + WMI_SERVICE_MULTIPLE_VDEV_RESTART; + wmi_service[wmi_service_smart_antenna_sw_support] = + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT; + wmi_service[wmi_service_smart_antenna_hw_support] = + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT; + + wmi_service[wmi_service_roam_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ratectrl] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_enhanced_proxy_sta] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tt] = WMI_SERVICE_THERM_THROT; + wmi_service[wmi_service_atf] = WMI_SERVICE_ATF; + wmi_service[wmi_service_peer_caching] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_coex_gpio] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_aux_spectral_intf] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_aux_chan_load_intf] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_bss_channel_info_64] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ext_res_cfg_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mesh] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_restrt_chnl_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_peer_stats] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mesh_11s] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_periodic_chan_stat_support] = + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT; + wmi_service[wmi_service_tx_mode_push_only] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tx_mode_push_pull] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tx_mode_dynamic] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_btcoex_duty_cycle] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_4_wire_coex_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mesh] = WMI_SERVICE_ENTERPRISE_MESH; + wmi_service[wmi_service_peer_assoc_conf] = WMI_SERVICE_PEER_ASSOC_CONF; + wmi_service[wmi_service_egap] = WMI_SERVICE_EGAP; + wmi_service[wmi_service_sta_pmf_offload] = WMI_SERVICE_STA_PMF_OFFLOAD; + wmi_service[wmi_service_unified_wow_capability] = + WMI_SERVICE_UNIFIED_WOW_CAPABILITY; + wmi_service[wmi_service_enterprise_mesh] = WMI_SERVICE_ENTERPRISE_MESH; + wmi_service[wmi_service_apf_offload] = WMI_SERVICE_BPF_OFFLOAD; + wmi_service[wmi_service_sync_delete_cmds] = + WMI_SERVICE_SYNC_DELETE_CMDS; + wmi_service[wmi_service_ratectrl_limit_max_min_rates] = + WMI_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES; + wmi_service[wmi_service_nan_data] = WMI_SERVICE_NAN_DATA; + wmi_service[wmi_service_nan_rtt] = WMI_SERVICE_NAN_RTT; + wmi_service[wmi_service_11ax] = WMI_SERVICE_11AX; + wmi_service[wmi_service_deprecated_replace] = + WMI_SERVICE_DEPRECATED_REPLACE; + wmi_service[wmi_service_tdls_conn_tracker_in_host_mode] = + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE; + wmi_service[wmi_service_enhanced_mcast_filter] = + WMI_SERVICE_ENHANCED_MCAST_FILTER; + wmi_service[wmi_service_half_rate_quarter_rate_support] = + WMI_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT; + wmi_service[wmi_service_vdev_rx_filter] = WMI_SERVICE_VDEV_RX_FILTER; + wmi_service[wmi_service_p2p_listen_offload_support] = + WMI_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT; + wmi_service[wmi_service_mark_first_wakeup_packet] = + WMI_SERVICE_MARK_FIRST_WAKEUP_PACKET; + wmi_service[wmi_service_multiple_mcast_filter_set] = + WMI_SERVICE_MULTIPLE_MCAST_FILTER_SET; + wmi_service[wmi_service_host_managed_rx_reorder] = + WMI_SERVICE_HOST_MANAGED_RX_REORDER; + wmi_service[wmi_service_flash_rdwr_support] = + WMI_SERVICE_FLASH_RDWR_SUPPORT; + wmi_service[wmi_service_wlan_stats_report] = + WMI_SERVICE_WLAN_STATS_REPORT; + wmi_service[wmi_service_tx_msdu_id_new_partition_support] = + WMI_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT; + wmi_service[wmi_service_dfs_phyerr_offload] = + WMI_SERVICE_DFS_PHYERR_OFFLOAD; + wmi_service[wmi_service_rcpi_support] = WMI_SERVICE_RCPI_SUPPORT; + wmi_service[wmi_service_fw_mem_dump_support] = + WMI_SERVICE_FW_MEM_DUMP_SUPPORT; + wmi_service[wmi_service_peer_stats_info] = WMI_SERVICE_PEER_STATS_INFO; + wmi_service[wmi_service_regulatory_db] = WMI_SERVICE_REGULATORY_DB; + wmi_service[wmi_service_11d_offload] = WMI_SERVICE_11D_OFFLOAD; + wmi_service[wmi_service_hw_data_filtering] = + WMI_SERVICE_HW_DATA_FILTERING; + wmi_service[wmi_service_pkt_routing] = WMI_SERVICE_PKT_ROUTING; + wmi_service[wmi_service_offchan_tx_wmi] = WMI_SERVICE_OFFCHAN_TX_WMI; + wmi_service[wmi_service_chan_load_info] = WMI_SERVICE_CHAN_LOAD_INFO; + wmi_service[wmi_service_extended_nss_support] = + WMI_SERVICE_EXTENDED_NSS_SUPPORT; + wmi_service[wmi_service_widebw_scan] = WMI_SERVICE_SCAN_PHYMODE_SUPPORT; + wmi_service[wmi_service_bcn_offload_start_stop_support] = + WMI_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT; + wmi_service[wmi_service_offchan_data_tid_support] = + WMI_SERVICE_OFFCHAN_DATA_TID_SUPPORT; + wmi_service[wmi_service_support_dma] = + WMI_SERVICE_SUPPORT_DIRECT_DMA; + wmi_service[wmi_service_8ss_tx_bfee] = WMI_SERVICE_8SS_TX_BFEE; + wmi_service[wmi_service_fils_support] = WMI_SERVICE_FILS_SUPPORT; + wmi_service[wmi_service_mawc_support] = WMI_SERVICE_MAWC_SUPPORT; + wmi_service[wmi_service_wow_wakeup_by_timer_pattern] = + WMI_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN; + wmi_service[wmi_service_11k_neighbour_report_support] = + WMI_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT; + wmi_service[wmi_service_ap_obss_detection_offload] = + WMI_SERVICE_AP_OBSS_DETECTION_OFFLOAD; + wmi_service[wmi_service_bss_color_offload] = + WMI_SERVICE_BSS_COLOR_OFFLOAD; + wmi_service[wmi_service_gmac_offload_support] = + WMI_SERVICE_GMAC_OFFLOAD_SUPPORT; + wmi_service[wmi_service_dual_beacon_on_single_mac_scc_support] = + WMI_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT; + wmi_service[wmi_service_dual_beacon_on_single_mac_mcc_support] = + WMI_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT; + wmi_service[wmi_service_twt_requestor] = WMI_SERVICE_STA_TWT; + wmi_service[wmi_service_twt_responder] = WMI_SERVICE_AP_TWT; + wmi_service[wmi_service_listen_interval_offload_support] = + WMI_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT; + wmi_service[wmi_service_esp_support] = WMI_SERVICE_ESP_SUPPORT; + wmi_service[wmi_service_obss_spatial_reuse] = + WMI_SERVICE_OBSS_SPATIAL_REUSE; + wmi_service[wmi_service_per_vdev_chain_support] = + WMI_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT; + wmi_service[wmi_service_new_htt_msg_format] = + WMI_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN; + wmi_service[wmi_service_peer_unmap_cnf_support] = + WMI_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT; + wmi_service[wmi_service_beacon_reception_stats] = + WMI_SERVICE_BEACON_RECEPTION_STATS; + wmi_service[wmi_service_vdev_latency_config] = + WMI_SERVICE_VDEV_LATENCY_CONFIG; + wmi_service[wmi_service_nan_dbs_support] = WMI_SERVICE_NAN_DBS_SUPPORT; + wmi_service[wmi_service_ndi_dbs_support] = WMI_SERVICE_NDI_DBS_SUPPORT; + wmi_service[wmi_service_nan_sap_support] = WMI_SERVICE_NAN_SAP_SUPPORT; + wmi_service[wmi_service_ndi_sap_support] = WMI_SERVICE_NDI_SAP_SUPPORT; + wmi_service[wmi_service_nan_disable_support] = + WMI_SERVICE_NAN_DISABLE_SUPPORT; + wmi_service[wmi_service_sta_plus_sta_support] = + WMI_SERVICE_STA_PLUS_STA_SUPPORT; + wmi_service[wmi_service_hw_db2dbm_support] = + WMI_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT; + wmi_service[wmi_service_wlm_stats_support] = + WMI_SERVICE_WLM_STATS_REQUEST; + wmi_service[wmi_service_infra_mbssid] = WMI_SERVICE_INFRA_MBSSID; + wmi_service[wmi_service_ul_ru26_allowed] = WMI_SERVICE_UL_RU26_ALLOWED; + wmi_service[wmi_service_cfr_capture_support] = + WMI_SERVICE_CFR_CAPTURE_SUPPORT; + wmi_service[wmi_service_bcast_twt_support] = + WMI_SERVICE_BROADCAST_TWT; + wmi_service[wmi_service_wpa3_ft_sae_support] = + WMI_SERVICE_WPA3_FT_SAE_SUPPORT; + wmi_service[wmi_service_wpa3_ft_suite_b_support] = + WMI_SERVICE_WPA3_FT_SUITE_B_SUPPORT; + wmi_service[wmi_service_ft_fils] = + WMI_SERVICE_WPA3_FT_FILS; + wmi_service[wmi_service_adaptive_11r_support] = + WMI_SERVICE_ADAPTIVE_11R_ROAM; + wmi_service[wmi_service_tx_compl_tsf64] = + WMI_SERVICE_TX_COMPL_TSF64; + wmi_service[wmi_service_data_stall_recovery_support] = + WMI_SERVICE_DSM_ROAM_FILTER; + wmi_service[wmi_service_vdev_delete_all_peer] = + WMI_SERVICE_DELETE_ALL_PEER_SUPPORT; + wmi_service[wmi_service_three_way_coex_config_legacy] = + WMI_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY; + wmi_service[wmi_service_rx_fse_support] = + WMI_SERVICE_RX_FSE_SUPPORT; + wmi_service[wmi_service_sae_roam_support] = + WMI_SERVICE_WPA3_SAE_ROAM_SUPPORT; + wmi_service[wmi_service_owe_roam_support] = + WMI_SERVICE_WPA3_OWE_ROAM_SUPPORT; + wmi_service[wmi_service_6ghz_support] = + WMI_SERVICE_6GHZ_SUPPORT; + wmi_service[wmi_service_bw_165mhz_support] = + WMI_SERVICE_BW_165MHZ_SUPPORT; + wmi_service[wmi_service_packet_capture_support] = + WMI_SERVICE_PACKET_CAPTURE_SUPPORT; + wmi_service[wmi_service_nan_vdev] = WMI_SERVICE_NAN_VDEV_SUPPORT; + wmi_service[wmi_service_peer_delete_no_peer_flush_tids_cmd] = + WMI_SERVICE_PEER_DELETE_NO_PEER_FLUSH_TIDS_CMD; + wmi_service[wmi_service_multiple_vdev_restart_ext] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_time_sync_ftm] = + WMI_SERVICE_AUDIO_SYNC_SUPPORT; + wmi_service[wmi_service_nss_ratio_to_host_support] = + WMI_SERVICE_NSS_RATIO_TO_HOST_SUPPORT; + wmi_service[wmi_roam_scan_chan_list_to_host_support] = + WMI_SERVICE_ROAM_SCAN_CHANNEL_LIST_TO_HOST_SUPPORT; + wmi_service[wmi_service_sta_nan_ndi_four_port] = + WMI_SERVICE_NDI_NDI_STA_SUPPORT; + wmi_service[wmi_service_host_scan_stop_vdev_all] = + WMI_SERVICE_HOST_SCAN_STOP_VDEV_ALL_SUPPORT; +} + +/** + * wmi_ocb_ut_attach() - Attach OCB test framework + * @wmi_handle: wmi handle + * + * Return: None + */ +#ifdef WLAN_OCB_UT +void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle); +#else +static inline void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle) +{ + return; +} +#endif + +/** + * wmi_tlv_attach() - Attach TLV APIs + * + * Return: None + */ +void wmi_tlv_attach(wmi_unified_t wmi_handle) +{ + wmi_handle->ops = &tlv_ops; + wmi_ocb_ut_attach(wmi_handle); + wmi_handle->soc->svc_ids = &multi_svc_ids[0]; +#ifdef WMI_INTERFACE_EVENT_LOGGING + /* Skip saving WMI_CMD_HDR and TLV HDR */ + wmi_handle->soc->buf_offset_command = 8; + /* WMI_CMD_HDR is already stripped, skip saving TLV HDR */ + wmi_handle->soc->buf_offset_event = 4; +#endif + populate_tlv_events_id(wmi_handle->wmi_events); + populate_tlv_service(wmi_handle->services); + wmi_twt_attach_tlv(wmi_handle); + wmi_extscan_attach_tlv(wmi_handle); + wmi_smart_ant_attach_tlv(wmi_handle); + wmi_dbr_attach_tlv(wmi_handle); + wmi_atf_attach_tlv(wmi_handle); + wmi_ap_attach_tlv(wmi_handle); + wmi_bcn_attach_tlv(wmi_handle); + wmi_ocb_attach_tlv(wmi_handle); + wmi_nan_attach_tlv(wmi_handle); + wmi_p2p_attach_tlv(wmi_handle); + wmi_interop_issues_ap_attach_tlv(wmi_handle); + wmi_roam_attach_tlv(wmi_handle); + wmi_concurrency_attach_tlv(wmi_handle); + wmi_pmo_attach_tlv(wmi_handle); + wmi_sta_attach_tlv(wmi_handle); + wmi_11ax_bss_color_attach_tlv(wmi_handle); + wmi_fwol_attach_tlv(wmi_handle); + wmi_vdev_attach_tlv(wmi_handle); + wmi_cfr_attach_tlv(wmi_handle); + wmi_gpio_attach_tlv(wmi_handle); +} +qdf_export_symbol(wmi_tlv_attach); + +/** + * wmi_tlv_init() - Initialize WMI TLV module by registering TLV attach routine + * + * Return: None + */ +void wmi_tlv_init(void) +{ + wmi_unified_register_module(WMI_TLV_TARGET, &wmi_tlv_attach); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..bae359f5400e91aab22142b04f76446de6699a6d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_api.c @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to TWT component. + */ + +#include "wmi_unified_priv.h" +#include "wmi_unified_twt_api.h" + + +QDF_STATUS +wmi_unified_twt_enable_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_enable_param *params) +{ + if (wmi_handle->ops->send_twt_enable_cmd) + return wmi_handle->ops->send_twt_enable_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_twt_disable_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_disable_param *params) +{ + if (wmi_handle->ops->send_twt_disable_cmd) + return wmi_handle->ops->send_twt_disable_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_twt_add_dialog_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_add_dialog_param *params) +{ + if (wmi_handle->ops->send_twt_add_dialog_cmd) + return wmi_handle->ops->send_twt_add_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_twt_del_dialog_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_del_dialog_param *params) +{ + if (wmi_handle->ops->send_twt_del_dialog_cmd) + return wmi_handle->ops->send_twt_del_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_twt_pause_dialog_cmd(wmi_unified_t wmi_handle, + struct wmi_twt_pause_dialog_cmd_param *params) +{ + if (wmi_handle->ops->send_twt_pause_dialog_cmd) + return wmi_handle->ops->send_twt_pause_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_resume_dialog_cmd( + wmi_unified_t wmi_handle, + struct wmi_twt_resume_dialog_cmd_param *params) +{ + if (wmi_handle->ops->send_twt_resume_dialog_cmd) + return wmi_handle->ops->send_twt_resume_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_BCAST_TWT +QDF_STATUS wmi_unified_twt_btwt_invite_sta_cmd( + wmi_unified_t wmi_handle, + struct wmi_twt_btwt_invite_sta_cmd_param *params) +{ + if (wmi_handle->ops->send_twt_btwt_invite_sta_cmd) + return wmi_handle->ops->send_twt_btwt_invite_sta_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_btwt_remove_sta_cmd( + wmi_unified_t wmi_handle, + struct wmi_twt_btwt_remove_sta_cmd_param *params) +{ + if (wmi_handle->ops->send_twt_btwt_remove_sta_cmd) + return wmi_handle->ops->send_twt_btwt_remove_sta_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_extract_twt_enable_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_enable_comp_event) + return wmi_handle->ops->extract_twt_enable_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_disable_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params) +{ + if (wmi_handle->ops->extract_twt_disable_comp_event) + return wmi_handle->ops->extract_twt_disable_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_add_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_add_dialog_comp_event) + return wmi_handle->ops->extract_twt_add_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_del_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_del_dialog_comp_event) + return wmi_handle->ops->extract_twt_del_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_pause_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_pause_dialog_comp_event) + return wmi_handle->ops->extract_twt_pause_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_resume_dialog_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_resume_dialog_comp_event) + return wmi_handle->ops->extract_twt_resume_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_BCAST_TWT +QDF_STATUS wmi_extract_twt_btwt_invite_sta_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_btwt_invite_sta_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_btwt_invite_sta_comp_event) + return wmi_handle->ops->extract_twt_btwt_invite_sta_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_btwt_remove_sta_comp_event( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_btwt_remove_sta_complete_event_param *params) +{ + if (wmi_handle->ops->extract_twt_btwt_remove_sta_comp_event) + return wmi_handle->ops->extract_twt_btwt_remove_sta_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..03d7c2580d03e4199293d392e6fdeba003d5a1ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_tlv.c @@ -0,0 +1,614 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_twt_param.h" +#include "wmi_unified_twt_api.h" + +static QDF_STATUS send_twt_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_enable_param *params) +{ + wmi_twt_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_enable_cmd_fixed_param)); + + cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + params->pdev_id); + cmd->sta_cong_timer_ms = params->sta_cong_timer_ms; + cmd->mbss_support = params->mbss_support; + cmd->default_slot_size = params->default_slot_size; + cmd->congestion_thresh_setup = params->congestion_thresh_setup; + cmd->congestion_thresh_teardown = params->congestion_thresh_teardown; + cmd->congestion_thresh_critical = params->congestion_thresh_critical; + cmd->interference_thresh_teardown = + params->interference_thresh_teardown; + cmd->interference_thresh_setup = params->interference_thresh_setup; + cmd->min_no_sta_setup = params->min_no_sta_setup; + cmd->min_no_sta_teardown = params->min_no_sta_teardown; + cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots; + cmd->min_no_twt_slots = params->min_no_twt_slots; + cmd->max_no_sta_twt = params->max_no_sta_twt; + cmd->mode_check_interval = params->mode_check_interval; + cmd->add_sta_slot_interval = params->add_sta_slot_interval; + cmd->remove_sta_slot_interval = params->remove_sta_slot_interval; + cmd->flags = params->flags; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_ENABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_ENABLE_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + + +static QDF_STATUS send_twt_disable_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_disable_param *params) +{ + wmi_twt_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_disable_cmd_fixed_param)); + + cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + wmi_handle, + params->pdev_id); + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_DISABLE_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +#ifdef WLAN_SUPPORT_BCAST_TWT +static void +twt_add_dialog_set_bcast_twt_params(struct wmi_twt_add_dialog_param *params, + wmi_twt_add_dialog_cmd_fixed_param *cmd) +{ + TWT_FLAGS_SET_BTWT_ID0(cmd->flags, params->flag_b_twt_id0); + cmd->b_twt_persistence = params->b_twt_persistence; + cmd->b_twt_recommendation = params->b_twt_recommendation; + + return; +} +#else +static void +twt_add_dialog_set_bcast_twt_params(struct wmi_twt_add_dialog_param *params, + wmi_twt_add_dialog_cmd_fixed_param *cmd) +{ + return; +} +#endif + +static QDF_STATUS +send_twt_add_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_add_dialog_param *params) +{ + wmi_twt_add_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_add_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_add_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_add_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + cmd->wake_intvl_us = params->wake_intvl_us; + cmd->wake_intvl_mantis = params->wake_intvl_mantis; + cmd->wake_dura_us = params->wake_dura_us; + cmd->sp_offset_us = params->sp_offset_us; + TWT_FLAGS_SET_CMD(cmd->flags, params->twt_cmd); + TWT_FLAGS_SET_BROADCAST(cmd->flags, params->flag_bcast); + TWT_FLAGS_SET_TRIGGER(cmd->flags, params->flag_trigger); + TWT_FLAGS_SET_FLOW_TYPE(cmd->flags, params->flag_flow_type); + TWT_FLAGS_SET_PROTECTION(cmd->flags, params->flag_protection); + + twt_add_dialog_set_bcast_twt_params(params, cmd); + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_ADD_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_ADD_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +#ifdef WLAN_SUPPORT_BCAST_TWT +static void +twt_del_dialog_set_bcast_twt_params(struct wmi_twt_del_dialog_param *params, + wmi_twt_del_dialog_cmd_fixed_param *cmd) +{ + cmd->b_twt_persistence = params->b_twt_persistence; + return; +} +#else +static void +twt_del_dialog_set_bcast_twt_params(struct wmi_twt_del_dialog_param *params, + wmi_twt_del_dialog_cmd_fixed_param *cmd) +{ + return; +} +#endif + +static QDF_STATUS +send_twt_del_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_del_dialog_param *params) +{ + wmi_twt_del_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_del_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_del_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_del_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + + twt_del_dialog_set_bcast_twt_params(params, cmd); + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_DEL_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_DEL_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS +send_twt_pause_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_pause_dialog_cmd_param *params) +{ + wmi_twt_pause_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_pause_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_pause_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_pause_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_PAUSE_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_PAUSE_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_twt_resume_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_resume_dialog_cmd_param *params) +{ + wmi_twt_resume_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_resume_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_resume_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_resume_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + cmd->sp_offset_us = params->sp_offset_us; + cmd->next_twt_size = params->next_twt_size; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_RESUME_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_RESUME_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +#ifdef WLAN_SUPPORT_BCAST_TWT +static QDF_STATUS +send_twt_btwt_invite_sta_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_btwt_invite_sta_cmd_param + *params) +{ + wmi_twt_btwt_invite_sta_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_btwt_invite_sta_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_btwt_invite_sta_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_btwt_invite_sta_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_BTWT_INVITE_STA_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS +send_twt_btwt_remove_sta_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_btwt_remove_sta_cmd_param + *params) +{ + wmi_twt_btwt_remove_sta_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_btwt_remove_sta_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_btwt_remove_sta_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_btwt_remove_sta_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_BTWT_REMOVE_STA_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + } + + return status; +} +#endif + +static QDF_STATUS extract_twt_enable_comp_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params) +{ + WMI_TWT_ENABLE_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_enable_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_ENABLE_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->pdev_id = + wmi_handle->ops->convert_pdev_id_target_to_host(wmi_handle, + ev->pdev_id); + params->status = ev->status; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_disable_comp_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params) +{ + WMI_TWT_DISABLE_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_disable_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_DISABLE_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + +#if 0 + params->pdev_id = + wmi_handle->ops->convert_pdev_id_target_to_host(ev->pdev_id); + params->status = ev->status; +#endif + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_add_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params) +{ + WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_add_dialog_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, params->peer_macaddr); + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_del_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params) +{ + WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_del_dialog_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, params->peer_macaddr); + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_pause_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params) +{ + WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_pause_dialog_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, params->peer_macaddr); + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_resume_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params) +{ + WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_resume_dialog_complete_event_fixed_param *ev; + + param_buf = + (WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, params->peer_macaddr); + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_BCAST_TWT +static QDF_STATUS +extract_twt_btwt_invite_sta_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct + wmi_twt_btwt_invite_sta_complete_event_param + *params) +{ + WMI_TWT_BTWT_INVITE_STA_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_btwt_invite_sta_complete_event_fixed_param *ev; + + param_buf = + (WMI_TWT_BTWT_INVITE_STA_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, params->peer_macaddr); + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +extract_twt_btwt_remove_sta_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct + wmi_twt_btwt_remove_sta_complete_event_param + *params) +{ + WMI_TWT_BTWT_REMOVE_STA_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_btwt_remove_sta_complete_event_fixed_param *ev; + + param_buf = + (WMI_TWT_BTWT_REMOVE_STA_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, params->peer_macaddr); + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_SUPPORT_BCAST_TWT +static void +wmi_twt_attach_bcast_twt_tlv(struct wmi_ops *ops) +{ + ops->send_twt_btwt_invite_sta_cmd = send_twt_btwt_invite_sta_cmd_tlv; + ops->send_twt_btwt_remove_sta_cmd = send_twt_btwt_remove_sta_cmd_tlv; + ops->extract_twt_btwt_invite_sta_comp_event = + extract_twt_btwt_invite_sta_comp_event_tlv; + ops->extract_twt_btwt_remove_sta_comp_event = + extract_twt_btwt_remove_sta_comp_event_tlv; + + return; +} +#else +static void +wmi_twt_attach_bcast_twt_tlv(struct wmi_ops *ops) +{ + return; +} +#endif + +void wmi_twt_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_twt_enable_cmd = send_twt_enable_cmd_tlv; + ops->send_twt_disable_cmd = send_twt_disable_cmd_tlv; + ops->send_twt_add_dialog_cmd = send_twt_add_dialog_cmd_tlv; + ops->send_twt_del_dialog_cmd = send_twt_del_dialog_cmd_tlv; + ops->send_twt_pause_dialog_cmd = send_twt_pause_dialog_cmd_tlv; + ops->send_twt_resume_dialog_cmd = send_twt_resume_dialog_cmd_tlv; + ops->extract_twt_enable_comp_event = extract_twt_enable_comp_event_tlv; + ops->extract_twt_disable_comp_event = + extract_twt_disable_comp_event_tlv; + ops->extract_twt_add_dialog_comp_event = + extract_twt_add_dialog_comp_event_tlv; + ops->extract_twt_del_dialog_comp_event = + extract_twt_del_dialog_comp_event_tlv; + ops->extract_twt_pause_dialog_comp_event = + extract_twt_pause_dialog_comp_event_tlv; + ops->extract_twt_resume_dialog_comp_event = + extract_twt_resume_dialog_comp_event_tlv; + + wmi_twt_attach_bcast_twt_tlv(ops); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_vdev_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_vdev_api.c new file mode 100644 index 0000000000000000000000000000000000000000..1c8150fe0384c60a1fb9a07490e6ec16ace765e7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_vdev_api.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI). + */ +#include +#include +#include +#include +#include +#include +#include +#ifdef WMI_EXT_DBG +#include +#endif + +#include +#include +#include +#include + +QDF_STATUS +wmi_extract_tbttoffset_num_vdevs(struct wmi_unified *wmi_handle, void *evt_buf, + uint32_t *num_vdevs) +{ + if (wmi_handle->ops->extract_tbttoffset_num_vdevs) + return wmi_handle->ops->extract_tbttoffset_num_vdevs(wmi_handle, + evt_buf, + num_vdevs); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_multiple_vdev_restart_req_cmd( + struct wmi_unified *wmi_handle, + struct multiple_vdev_restart_params *param) +{ + if (wmi_handle->ops->send_multiple_vdev_restart_req_cmd) + return wmi_handle->ops->send_multiple_vdev_restart_req_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_beacon_send_cmd(struct wmi_unified *wmi_handle, + struct beacon_params *param) +{ + if (wmi_handle->ops->send_beacon_send_cmd) + return wmi_handle->ops->send_beacon_send_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_start_resp(struct wmi_unified *wmi_handle, void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp) +{ + if (wmi_handle->ops->extract_vdev_start_resp) + return wmi_handle->ops->extract_vdev_start_resp(wmi_handle, + evt_buf, + vdev_rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_delete_resp(struct wmi_unified *wmi_handle, void *evt_buf, + struct wmi_host_vdev_delete_resp *delete_rsp) +{ + if (wmi_handle->ops->extract_vdev_delete_resp) + return wmi_handle->ops->extract_vdev_delete_resp(wmi_handle, + evt_buf, + delete_rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_vdev_peer_delete_all_response_event( + struct wmi_unified *wmi_handle, + void *evt_buf, + struct wmi_host_vdev_peer_delete_all_response_event *delete_rsp) +{ + if (wmi_handle->ops->extract_vdev_peer_delete_all_response_event) + return + wmi_handle->ops->extract_vdev_peer_delete_all_response_event( + wmi_handle, + evt_buf, + delete_rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_vdev_stopped_param(struct wmi_unified *wmi_handle, void *evt_buf, + uint32_t *vdev_id) +{ + if (wmi_handle->ops->extract_vdev_stopped_param) + return wmi_handle->ops->extract_vdev_stopped_param(wmi_handle, + evt_buf, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_ext_tbttoffset_num_vdevs(struct wmi_unified *wmi_handle, + void *evt_buf, uint32_t *num_vdevs) +{ + if (wmi_handle->ops->extract_ext_tbttoffset_num_vdevs) + return wmi_handle->ops->extract_ext_tbttoffset_num_vdevs( + wmi_handle, + evt_buf, num_vdevs); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_config_ratemask_cmd_send(struct wmi_unified *wmi_handle, + struct config_ratemask_params *param) +{ + if (wmi_handle->ops->send_vdev_config_ratemask_cmd) + return wmi_handle->ops->send_vdev_config_ratemask_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_vdev_set_neighbour_rx_cmd_send( + struct wmi_unified *wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct set_neighbour_rx_params *param) +{ + if (wmi_handle->ops->send_vdev_set_neighbour_rx_cmd) + return wmi_handle->ops->send_vdev_set_neighbour_rx_cmd( + wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_multi_vdev_restart_resp_event( + struct wmi_unified *wmi_handle, + void *evt_buf, + struct multi_vdev_restart_resp *restart_rsp) +{ + if (wmi_handle->ops->extract_multi_vdev_restart_resp_event) + return wmi_handle->ops->extract_multi_vdev_restart_resp_event( + wmi_handle, evt_buf, restart_rsp); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_vdev_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_vdev_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..7e988e066c3e9101a516c5dce8716062c124e2db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_vdev_tlv.c @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +static QDF_STATUS +send_vdev_config_ratemask_cmd_tlv(struct wmi_unified *wmi_handle, + struct config_ratemask_params *param) +{ + wmi_vdev_config_ratemask_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_config_ratemask_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_config_ratemask_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_config_ratemask_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->type = param->type; + cmd->mask_lower32 = param->lower32; + cmd->mask_higher32 = param->higher32; + cmd->mask_lower32_2 = param->lower32_2; + cmd->mask_higher32_2 = param->higher32_2; + + wmi_mtrace(WMI_VDEV_RATEMASK_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_RATEMASK_CMDID)) { + WMI_LOGE("Seting vdev ratemask failed"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +send_beacon_send_cmd_tlv(struct wmi_unified *wmi_handle, + struct beacon_params *param) +{ + QDF_STATUS ret; + wmi_bcn_send_from_host_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + qdf_dma_addr_t dma_addr; + uint32_t dtim_flag = 0; + + wmi_buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!wmi_buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + if (param->is_dtim_count_zero) { + dtim_flag |= WMI_BCN_SEND_DTIM_ZERO; + if (param->is_bitctl_reqd) { + /* deliver CAB traffic in next DTIM beacon */ + dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET; + } + } + cmd = (wmi_bcn_send_from_host_cmd_fixed_param *)wmi_buf_data(wmi_buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_send_from_host_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bcn_send_from_host_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->data_len = qdf_nbuf_len(param->wbuf); + cmd->frame_ctrl = param->frame_ctrl; + cmd->dtim_flag = dtim_flag; + dma_addr = qdf_nbuf_get_frag_paddr(param->wbuf, 0); + cmd->frag_ptr_lo = qdf_get_lower_32_bits(dma_addr); +#if defined(HTT_PADDR64) + cmd->frag_ptr_hi = qdf_get_upper_32_bits(dma_addr) & 0x1F; +#endif + cmd->bcn_antenna = param->bcn_txant; + + wmi_mtrace(WMI_PDEV_SEND_BCN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, sizeof(*cmd), + WMI_PDEV_SEND_BCN_CMDID); + if (ret != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send bcn: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +static QDF_STATUS +extract_tbttoffset_num_vdevs_tlv(struct wmi_unified *wmi_handle, void *evt_buf, + uint32_t *num_vdevs) +{ + WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_event_fixed_param *tbtt_offset_event; + uint32_t vdev_map; + + param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid tbtt update ext event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + tbtt_offset_event = param_buf->fixed_param; + vdev_map = tbtt_offset_event->vdev_map; + *num_vdevs = wmi_vdev_map_to_num_vdevs(vdev_map); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +send_vdev_set_neighbour_rx_cmd_tlv(struct wmi_unified *wmi_handle, + uint8_t macaddr[QDF_MAC_ADDR_SIZE], + struct set_neighbour_rx_params *param) +{ + wmi_vdev_filter_nrp_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_filter_nrp_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_filter_nrp_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_filter_nrp_config_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->bssid_idx = param->idx; + cmd->action = param->action; + cmd->type = param->type; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->addr); + cmd->flag = 0; + + wmi_mtrace(WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID)) { + WMI_LOGE("Failed to set neighbour rx param"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +extract_vdev_start_resp_tlv(struct wmi_unified *wmi_handle, void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp) +{ + WMI_VDEV_START_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_start_response_event_fixed_param *ev; + + param_buf = (WMI_VDEV_START_RESP_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid start response event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + if (!ev) { + WMI_LOGE("%s: Invalid start response event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_zero(vdev_rsp, sizeof(*vdev_rsp)); + + vdev_rsp->vdev_id = ev->vdev_id; + vdev_rsp->requestor_id = ev->requestor_id; + switch (ev->resp_type) { + case WMI_VDEV_START_RESP_EVENT: + vdev_rsp->resp_type = WMI_HOST_VDEV_START_RESP_EVENT; + break; + case WMI_VDEV_RESTART_RESP_EVENT: + vdev_rsp->resp_type = WMI_HOST_VDEV_RESTART_RESP_EVENT; + break; + default: + WMI_LOGE("Invalid start response event buffer"); + break; + }; + vdev_rsp->status = ev->status; + vdev_rsp->chain_mask = ev->chain_mask; + vdev_rsp->smps_mode = ev->smps_mode; + vdev_rsp->mac_id = ev->mac_id; + vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; + vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; + vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +extract_vdev_delete_resp_tlv(struct wmi_unified *wmi_handle, void *evt_buf, + struct wmi_host_vdev_delete_resp *delete_rsp) +{ + WMI_VDEV_DELETE_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_delete_resp_event_fixed_param *ev; + + param_buf = (WMI_VDEV_DELETE_RESP_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid vdev delete response event buffer"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + if (!ev) { + WMI_LOGE("Invalid vdev delete response event"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_zero(delete_rsp, sizeof(*delete_rsp)); + delete_rsp->vdev_id = ev->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_vdev_peer_delete_all_response_event_tlv( + wmi_unified_t wmi_hdl, + void *evt_buf, + struct wmi_host_vdev_peer_delete_all_response_event *param) +{ + WMI_VDEV_DELETE_ALL_PEER_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_delete_all_peer_resp_event_fixed_param *ev; + + param_buf = (WMI_VDEV_DELETE_ALL_PEER_RESP_EVENTID_param_tlvs *)evt_buf; + + ev = (wmi_vdev_delete_all_peer_resp_event_fixed_param *) + param_buf->fixed_param; + if (!ev) { + WMI_LOGE("%s: Invalid peer_delete all response", __func__); + return QDF_STATUS_E_FAILURE; + } + + param->vdev_id = ev->vdev_id; + param->status = ev->status; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +extract_vdev_stopped_param_tlv(struct wmi_unified *wmi_handle, + void *evt_buf, uint32_t *vdev_id) +{ + WMI_VDEV_STOPPED_EVENTID_param_tlvs *param_buf; + wmi_vdev_stopped_event_fixed_param *resp_event; + + param_buf = (WMI_VDEV_STOPPED_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid event buffer"); + return QDF_STATUS_E_INVAL; + } + resp_event = param_buf->fixed_param; + *vdev_id = resp_event->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ext_tbttoffset_num_vdevs_tlv( + wmi_unified_t wmi_hdl, + void *evt_buf, + uint32_t *num_vdevs) +{ + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_ext_event_fixed_param *tbtt_offset_ext_event; + + param_buf = (WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s Invalid tbtt update ext event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + tbtt_offset_ext_event = param_buf->fixed_param; + + *num_vdevs = tbtt_offset_ext_event->num_vdevs; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_tbttoffset_update_params_tlv( + wmi_unified_t wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param) +{ + WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_event_fixed_param *tbtt_offset_event; + uint32_t vdev_map; + + param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid tbtt update event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + tbtt_offset_event = param_buf->fixed_param; + vdev_map = tbtt_offset_event->vdev_map; + tbtt_param->vdev_id = wmi_vdev_map_to_vdev_id(vdev_map, idx); + if (tbtt_param->vdev_id == WLAN_INVALID_VDEV_ID) + return QDF_STATUS_E_INVAL; + tbtt_param->tbttoffset = + param_buf->tbttoffset_list[tbtt_param->vdev_id]; + if (param_buf->tbtt_qtime_low_us_list) + tbtt_param->vdev_tbtt_qtime_lo = + param_buf->tbtt_qtime_low_us_list[tbtt_param->vdev_id]; + if (param_buf->tbtt_qtime_high_us_list) + tbtt_param->vdev_tbtt_qtime_hi = + param_buf->tbtt_qtime_high_us_list[tbtt_param->vdev_id]; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ext_tbttoffset_update_params_tlv( + wmi_unified_t wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param) +{ + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_info *tbtt_offset_info; + + param_buf = (WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid tbtt update event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + tbtt_offset_info = ¶m_buf->tbtt_offset_info[idx]; + + tbtt_param->vdev_id = tbtt_offset_info->vdev_id; + tbtt_param->tbttoffset = tbtt_offset_info->tbttoffset; + tbtt_param->vdev_tbtt_qtime_lo = tbtt_offset_info->tbtt_qtime_low_us; + tbtt_param->vdev_tbtt_qtime_hi = tbtt_offset_info->tbtt_qtime_high_us; + + return QDF_STATUS_SUCCESS; +} + +void wmi_vdev_attach_tlv(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *wmi_ops; + + if (!wmi_handle) { + WMI_LOGP("%s: null wmi handle", __func__); + return; + } + + wmi_ops = wmi_handle->ops; + wmi_ops->extract_vdev_delete_resp = extract_vdev_delete_resp_tlv; + wmi_ops->extract_vdev_stopped_param = extract_vdev_stopped_param_tlv; + wmi_ops->extract_vdev_start_resp = extract_vdev_start_resp_tlv; + wmi_ops->extract_vdev_peer_delete_all_response_event = + extract_vdev_peer_delete_all_response_event_tlv; + wmi_ops->extract_tbttoffset_num_vdevs = + extract_tbttoffset_num_vdevs_tlv; + wmi_ops->extract_tbttoffset_update_params = + extract_tbttoffset_update_params_tlv; + wmi_ops->extract_ext_tbttoffset_update_params = + extract_ext_tbttoffset_update_params_tlv; + wmi_ops->extract_ext_tbttoffset_num_vdevs = + extract_ext_tbttoffset_num_vdevs_tlv; + wmi_ops->send_vdev_set_neighbour_rx_cmd = + send_vdev_set_neighbour_rx_cmd_tlv; + wmi_ops->send_beacon_send_cmd = send_beacon_send_cmd_tlv; + wmi_ops->send_vdev_config_ratemask_cmd = + send_vdev_config_ratemask_cmd_tlv; +}